1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (c) 2018-2020, The Linux Foundation. All rights reserved.
7 #include <linux/delay.h>
8 #include <linux/device.h>
9 #include <linux/dma-direction.h>
10 #include <linux/dma-mapping.h>
11 #include <linux/firmware.h>
12 #include <linux/interrupt.h>
13 #include <linux/list.h>
14 #include <linux/mhi.h>
15 #include <linux/module.h>
16 #include <linux/random.h>
17 #include <linux/slab.h>
18 #include <linux/wait.h>
21 /* Setup RDDM vector table for RDDM transfer and program RXVEC */
22 void mhi_rddm_prepare(struct mhi_controller *mhi_cntrl,
23 struct image_info *img_info)
25 struct mhi_buf *mhi_buf = img_info->mhi_buf;
26 struct bhi_vec_entry *bhi_vec = img_info->bhi_vec;
27 void __iomem *base = mhi_cntrl->bhie;
28 struct device *dev = &mhi_cntrl->mhi_dev->dev;
32 for (i = 0; i < img_info->entries - 1; i++, mhi_buf++, bhi_vec++) {
33 bhi_vec->dma_addr = mhi_buf->dma_addr;
34 bhi_vec->size = mhi_buf->len;
37 dev_dbg(dev, "BHIe programming for RDDM\n");
39 mhi_write_reg(mhi_cntrl, base, BHIE_RXVECADDR_HIGH_OFFS,
40 upper_32_bits(mhi_buf->dma_addr));
42 mhi_write_reg(mhi_cntrl, base, BHIE_RXVECADDR_LOW_OFFS,
43 lower_32_bits(mhi_buf->dma_addr));
45 mhi_write_reg(mhi_cntrl, base, BHIE_RXVECSIZE_OFFS, mhi_buf->len);
46 sequence_id = prandom_u32() & BHIE_RXVECSTATUS_SEQNUM_BMSK;
48 if (unlikely(!sequence_id))
51 mhi_write_reg_field(mhi_cntrl, base, BHIE_RXVECDB_OFFS,
52 BHIE_RXVECDB_SEQNUM_BMSK, BHIE_RXVECDB_SEQNUM_SHFT,
55 dev_dbg(dev, "Address: %p and len: 0x%zx sequence: %u\n",
56 &mhi_buf->dma_addr, mhi_buf->len, sequence_id);
59 /* Collect RDDM buffer during kernel panic */
60 static int __mhi_download_rddm_in_panic(struct mhi_controller *mhi_cntrl)
65 const u32 delayus = 2000;
66 u32 retry = (mhi_cntrl->timeout_ms * 1000) / delayus;
67 const u32 rddm_timeout_us = 200000;
68 int rddm_retry = rddm_timeout_us / delayus;
69 void __iomem *base = mhi_cntrl->bhie;
70 struct device *dev = &mhi_cntrl->mhi_dev->dev;
72 dev_dbg(dev, "Entered with pm_state:%s dev_state:%s ee:%s\n",
73 to_mhi_pm_state_str(mhi_cntrl->pm_state),
74 TO_MHI_STATE_STR(mhi_cntrl->dev_state),
75 TO_MHI_EXEC_STR(mhi_cntrl->ee));
78 * This should only be executing during a kernel panic, we expect all
79 * other cores to shutdown while we're collecting RDDM buffer. After
80 * returning from this function, we expect the device to reset.
82 * Normaly, we read/write pm_state only after grabbing the
83 * pm_lock, since we're in a panic, skipping it. Also there is no
84 * gurantee that this state change would take effect since
85 * we're setting it w/o grabbing pm_lock
87 mhi_cntrl->pm_state = MHI_PM_LD_ERR_FATAL_DETECT;
88 /* update should take the effect immediately */
92 * Make sure device is not already in RDDM. In case the device asserts
93 * and a kernel panic follows, device will already be in RDDM.
94 * Do not trigger SYS ERR again and proceed with waiting for
95 * image download completion.
97 ee = mhi_get_exec_env(mhi_cntrl);
98 if (ee != MHI_EE_RDDM) {
99 dev_dbg(dev, "Trigger device into RDDM mode using SYS ERR\n");
100 mhi_set_mhi_state(mhi_cntrl, MHI_STATE_SYS_ERR);
102 dev_dbg(dev, "Waiting for device to enter RDDM\n");
103 while (rddm_retry--) {
104 ee = mhi_get_exec_env(mhi_cntrl);
105 if (ee == MHI_EE_RDDM)
111 if (rddm_retry <= 0) {
112 /* Hardware reset so force device to enter RDDM */
114 "Did not enter RDDM, do a host req reset\n");
115 mhi_write_reg(mhi_cntrl, mhi_cntrl->regs,
116 MHI_SOC_RESET_REQ_OFFSET,
121 ee = mhi_get_exec_env(mhi_cntrl);
124 dev_dbg(dev, "Waiting for image download completion, current EE: %s\n",
125 TO_MHI_EXEC_STR(ee));
128 ret = mhi_read_reg_field(mhi_cntrl, base, BHIE_RXVECSTATUS_OFFS,
129 BHIE_RXVECSTATUS_STATUS_BMSK,
130 BHIE_RXVECSTATUS_STATUS_SHFT,
135 if (rx_status == BHIE_RXVECSTATUS_STATUS_XFER_COMPL)
141 ee = mhi_get_exec_env(mhi_cntrl);
142 ret = mhi_read_reg(mhi_cntrl, base, BHIE_RXVECSTATUS_OFFS, &rx_status);
144 dev_err(dev, "Did not complete RDDM transfer\n");
145 dev_err(dev, "Current EE: %s\n", TO_MHI_EXEC_STR(ee));
146 dev_err(dev, "RXVEC_STATUS: 0x%x\n", rx_status);
151 /* Download RDDM image from device */
152 int mhi_download_rddm_img(struct mhi_controller *mhi_cntrl, bool in_panic)
154 void __iomem *base = mhi_cntrl->bhie;
158 return __mhi_download_rddm_in_panic(mhi_cntrl);
160 /* Wait for the image download to complete */
161 wait_event_timeout(mhi_cntrl->state_event,
162 mhi_read_reg_field(mhi_cntrl, base,
163 BHIE_RXVECSTATUS_OFFS,
164 BHIE_RXVECSTATUS_STATUS_BMSK,
165 BHIE_RXVECSTATUS_STATUS_SHFT,
166 &rx_status) || rx_status,
167 msecs_to_jiffies(mhi_cntrl->timeout_ms));
169 return (rx_status == BHIE_RXVECSTATUS_STATUS_XFER_COMPL) ? 0 : -EIO;
171 EXPORT_SYMBOL_GPL(mhi_download_rddm_img);
173 static int mhi_fw_load_amss(struct mhi_controller *mhi_cntrl,
174 const struct mhi_buf *mhi_buf)
176 void __iomem *base = mhi_cntrl->bhie;
177 rwlock_t *pm_lock = &mhi_cntrl->pm_lock;
178 u32 tx_status, sequence_id;
180 read_lock_bh(pm_lock);
181 if (!MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) {
182 read_unlock_bh(pm_lock);
186 mhi_write_reg(mhi_cntrl, base, BHIE_TXVECADDR_HIGH_OFFS,
187 upper_32_bits(mhi_buf->dma_addr));
189 mhi_write_reg(mhi_cntrl, base, BHIE_TXVECADDR_LOW_OFFS,
190 lower_32_bits(mhi_buf->dma_addr));
192 mhi_write_reg(mhi_cntrl, base, BHIE_TXVECSIZE_OFFS, mhi_buf->len);
194 sequence_id = prandom_u32() & BHIE_TXVECSTATUS_SEQNUM_BMSK;
195 mhi_write_reg_field(mhi_cntrl, base, BHIE_TXVECDB_OFFS,
196 BHIE_TXVECDB_SEQNUM_BMSK, BHIE_TXVECDB_SEQNUM_SHFT,
198 read_unlock_bh(pm_lock);
200 /* Wait for the image download to complete */
201 wait_event_timeout(mhi_cntrl->state_event,
202 MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state) ||
203 mhi_read_reg_field(mhi_cntrl, base,
204 BHIE_TXVECSTATUS_OFFS,
205 BHIE_TXVECSTATUS_STATUS_BMSK,
206 BHIE_TXVECSTATUS_STATUS_SHFT,
207 &tx_status) || tx_status,
208 msecs_to_jiffies(mhi_cntrl->timeout_ms));
210 if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state))
213 return (tx_status == BHIE_TXVECSTATUS_STATUS_XFER_COMPL) ? 0 : -EIO;
216 static int mhi_fw_load_sbl(struct mhi_controller *mhi_cntrl,
220 u32 tx_status, val, session_id;
222 void __iomem *base = mhi_cntrl->bhi;
223 rwlock_t *pm_lock = &mhi_cntrl->pm_lock;
224 struct device *dev = &mhi_cntrl->mhi_dev->dev;
229 { "ERROR_CODE", BHI_ERRCODE },
230 { "ERROR_DBG1", BHI_ERRDBG1 },
231 { "ERROR_DBG2", BHI_ERRDBG2 },
232 { "ERROR_DBG3", BHI_ERRDBG3 },
236 read_lock_bh(pm_lock);
237 if (!MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) {
238 read_unlock_bh(pm_lock);
239 goto invalid_pm_state;
242 dev_dbg(dev, "Starting SBL download via BHI\n");
243 mhi_write_reg(mhi_cntrl, base, BHI_STATUS, 0);
244 mhi_write_reg(mhi_cntrl, base, BHI_IMGADDR_HIGH,
245 upper_32_bits(dma_addr));
246 mhi_write_reg(mhi_cntrl, base, BHI_IMGADDR_LOW,
247 lower_32_bits(dma_addr));
248 mhi_write_reg(mhi_cntrl, base, BHI_IMGSIZE, size);
249 session_id = prandom_u32() & BHI_TXDB_SEQNUM_BMSK;
250 mhi_write_reg(mhi_cntrl, base, BHI_IMGTXDB, session_id);
251 read_unlock_bh(pm_lock);
253 /* Wait for the image download to complete */
254 ret = wait_event_timeout(mhi_cntrl->state_event,
255 MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state) ||
256 mhi_read_reg_field(mhi_cntrl, base, BHI_STATUS,
257 BHI_STATUS_MASK, BHI_STATUS_SHIFT,
258 &tx_status) || tx_status,
259 msecs_to_jiffies(mhi_cntrl->timeout_ms));
260 if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state))
261 goto invalid_pm_state;
263 if (tx_status == BHI_STATUS_ERROR) {
264 dev_err(dev, "Image transfer failed\n");
265 read_lock_bh(pm_lock);
266 if (MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) {
267 for (i = 0; error_reg[i].name; i++) {
268 ret = mhi_read_reg(mhi_cntrl, base,
269 error_reg[i].offset, &val);
272 dev_err(dev, "Reg: %s value: 0x%x\n",
273 error_reg[i].name, val);
276 read_unlock_bh(pm_lock);
277 goto invalid_pm_state;
280 return (!ret) ? -ETIMEDOUT : 0;
287 void mhi_free_bhie_table(struct mhi_controller *mhi_cntrl,
288 struct image_info *image_info)
291 struct mhi_buf *mhi_buf = image_info->mhi_buf;
293 for (i = 0; i < image_info->entries; i++, mhi_buf++)
294 mhi_free_coherent(mhi_cntrl, mhi_buf->len, mhi_buf->buf,
297 kfree(image_info->mhi_buf);
301 int mhi_alloc_bhie_table(struct mhi_controller *mhi_cntrl,
302 struct image_info **image_info,
305 size_t seg_size = mhi_cntrl->seg_len;
306 int segments = DIV_ROUND_UP(alloc_size, seg_size) + 1;
308 struct image_info *img_info;
309 struct mhi_buf *mhi_buf;
311 img_info = kzalloc(sizeof(*img_info), GFP_KERNEL);
315 /* Allocate memory for entries */
316 img_info->mhi_buf = kcalloc(segments, sizeof(*img_info->mhi_buf),
318 if (!img_info->mhi_buf)
319 goto error_alloc_mhi_buf;
321 /* Allocate and populate vector table */
322 mhi_buf = img_info->mhi_buf;
323 for (i = 0; i < segments; i++, mhi_buf++) {
324 size_t vec_size = seg_size;
326 /* Vector table is the last entry */
327 if (i == segments - 1)
328 vec_size = sizeof(struct bhi_vec_entry) * i;
330 mhi_buf->len = vec_size;
331 mhi_buf->buf = mhi_alloc_coherent(mhi_cntrl, vec_size,
335 goto error_alloc_segment;
338 img_info->bhi_vec = img_info->mhi_buf[segments - 1].buf;
339 img_info->entries = segments;
340 *image_info = img_info;
345 for (--i, --mhi_buf; i >= 0; i--, mhi_buf--)
346 mhi_free_coherent(mhi_cntrl, mhi_buf->len, mhi_buf->buf,
355 static void mhi_firmware_copy(struct mhi_controller *mhi_cntrl,
356 const struct firmware *firmware,
357 struct image_info *img_info)
359 size_t remainder = firmware->size;
361 const u8 *buf = firmware->data;
363 struct mhi_buf *mhi_buf = img_info->mhi_buf;
364 struct bhi_vec_entry *bhi_vec = img_info->bhi_vec;
367 to_cpy = min(remainder, mhi_buf->len);
368 memcpy(mhi_buf->buf, buf, to_cpy);
369 bhi_vec->dma_addr = mhi_buf->dma_addr;
370 bhi_vec->size = to_cpy;
380 void mhi_fw_load_worker(struct work_struct *work)
382 struct mhi_controller *mhi_cntrl;
383 const struct firmware *firmware = NULL;
384 struct image_info *image_info;
392 mhi_cntrl = container_of(work, struct mhi_controller, fw_worker);
393 dev = &mhi_cntrl->mhi_dev->dev;
395 dev_dbg(dev, "Waiting for device to enter PBL from: %s\n",
396 TO_MHI_EXEC_STR(mhi_cntrl->ee));
398 ret = wait_event_timeout(mhi_cntrl->state_event,
399 MHI_IN_PBL(mhi_cntrl->ee) ||
400 MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state),
401 msecs_to_jiffies(mhi_cntrl->timeout_ms));
403 if (!ret || MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) {
404 dev_err(dev, "Device MHI is not in valid state\n");
408 /* If device is in pass through, do reset to ready state transition */
409 if (mhi_cntrl->ee == MHI_EE_PTHRU)
410 goto fw_load_ee_pthru;
412 fw_name = (mhi_cntrl->ee == MHI_EE_EDL) ?
413 mhi_cntrl->edl_image : mhi_cntrl->fw_image;
415 if (!fw_name || (mhi_cntrl->fbc_download && (!mhi_cntrl->sbl_size ||
416 !mhi_cntrl->seg_len))) {
418 "No firmware image defined or !sbl_size || !seg_len\n");
422 ret = reject_firmware(&firmware, fw_name, dev);
424 dev_err(dev, "Error loading firmware: %d\n", ret);
428 size = (mhi_cntrl->fbc_download) ? mhi_cntrl->sbl_size : firmware->size;
430 /* SBL size provided is maximum size, not necessarily the image size */
431 if (size > firmware->size)
432 size = firmware->size;
434 buf = mhi_alloc_coherent(mhi_cntrl, size, &dma_addr, GFP_KERNEL);
436 release_firmware(firmware);
440 /* Download SBL image */
441 memcpy(buf, firmware->data, size);
442 ret = mhi_fw_load_sbl(mhi_cntrl, dma_addr, size);
443 mhi_free_coherent(mhi_cntrl, size, buf, dma_addr);
445 if (!mhi_cntrl->fbc_download || ret || mhi_cntrl->ee == MHI_EE_EDL)
446 release_firmware(firmware);
448 /* Error or in EDL mode, we're done */
449 if (ret || mhi_cntrl->ee == MHI_EE_EDL)
452 write_lock_irq(&mhi_cntrl->pm_lock);
453 mhi_cntrl->dev_state = MHI_STATE_RESET;
454 write_unlock_irq(&mhi_cntrl->pm_lock);
457 * If we're doing fbc, populate vector tables while
458 * device transitioning into MHI READY state
460 if (mhi_cntrl->fbc_download) {
461 ret = mhi_alloc_bhie_table(mhi_cntrl, &mhi_cntrl->fbc_image,
464 goto error_alloc_fw_table;
466 /* Load the firmware into BHIE vec table */
467 mhi_firmware_copy(mhi_cntrl, firmware, mhi_cntrl->fbc_image);
471 /* Transitioning into MHI RESET->READY state */
472 ret = mhi_ready_state_transition(mhi_cntrl);
474 if (!mhi_cntrl->fbc_download)
480 /* Wait for the SBL event */
481 ret = wait_event_timeout(mhi_cntrl->state_event,
482 mhi_cntrl->ee == MHI_EE_SBL ||
483 MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state),
484 msecs_to_jiffies(mhi_cntrl->timeout_ms));
486 if (!ret || MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) {
487 dev_err(dev, "MHI did not enter SBL\n");
491 /* Start full firmware image download */
492 image_info = mhi_cntrl->fbc_image;
493 ret = mhi_fw_load_amss(mhi_cntrl,
494 /* Vector table is the last entry */
495 &image_info->mhi_buf[image_info->entries - 1]);
497 release_firmware(firmware);
502 mhi_free_bhie_table(mhi_cntrl, mhi_cntrl->fbc_image);
503 mhi_cntrl->fbc_image = NULL;
505 error_alloc_fw_table:
506 release_firmware(firmware);