Linux-libre 5.7.5-gnu
[librecmc/linux-libre.git] / drivers / bus / mhi / core / boot.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (c) 2018-2020, The Linux Foundation. All rights reserved.
4  *
5  */
6
7 #include <linux/delay.h>
8 #include <linux/device.h>
9 #include <linux/dma-direction.h>
10 #include <linux/dma-mapping.h>
11 #include <linux/firmware.h>
12 #include <linux/interrupt.h>
13 #include <linux/list.h>
14 #include <linux/mhi.h>
15 #include <linux/module.h>
16 #include <linux/random.h>
17 #include <linux/slab.h>
18 #include <linux/wait.h>
19 #include "internal.h"
20
21 /* Setup RDDM vector table for RDDM transfer and program RXVEC */
22 void mhi_rddm_prepare(struct mhi_controller *mhi_cntrl,
23                       struct image_info *img_info)
24 {
25         struct mhi_buf *mhi_buf = img_info->mhi_buf;
26         struct bhi_vec_entry *bhi_vec = img_info->bhi_vec;
27         void __iomem *base = mhi_cntrl->bhie;
28         struct device *dev = &mhi_cntrl->mhi_dev->dev;
29         u32 sequence_id;
30         unsigned int i;
31
32         for (i = 0; i < img_info->entries - 1; i++, mhi_buf++, bhi_vec++) {
33                 bhi_vec->dma_addr = mhi_buf->dma_addr;
34                 bhi_vec->size = mhi_buf->len;
35         }
36
37         dev_dbg(dev, "BHIe programming for RDDM\n");
38
39         mhi_write_reg(mhi_cntrl, base, BHIE_RXVECADDR_HIGH_OFFS,
40                       upper_32_bits(mhi_buf->dma_addr));
41
42         mhi_write_reg(mhi_cntrl, base, BHIE_RXVECADDR_LOW_OFFS,
43                       lower_32_bits(mhi_buf->dma_addr));
44
45         mhi_write_reg(mhi_cntrl, base, BHIE_RXVECSIZE_OFFS, mhi_buf->len);
46         sequence_id = prandom_u32() & BHIE_RXVECSTATUS_SEQNUM_BMSK;
47
48         if (unlikely(!sequence_id))
49                 sequence_id = 1;
50
51         mhi_write_reg_field(mhi_cntrl, base, BHIE_RXVECDB_OFFS,
52                             BHIE_RXVECDB_SEQNUM_BMSK, BHIE_RXVECDB_SEQNUM_SHFT,
53                             sequence_id);
54
55         dev_dbg(dev, "Address: %p and len: 0x%zx sequence: %u\n",
56                 &mhi_buf->dma_addr, mhi_buf->len, sequence_id);
57 }
58
59 /* Collect RDDM buffer during kernel panic */
60 static int __mhi_download_rddm_in_panic(struct mhi_controller *mhi_cntrl)
61 {
62         int ret;
63         u32 rx_status;
64         enum mhi_ee_type ee;
65         const u32 delayus = 2000;
66         u32 retry = (mhi_cntrl->timeout_ms * 1000) / delayus;
67         const u32 rddm_timeout_us = 200000;
68         int rddm_retry = rddm_timeout_us / delayus;
69         void __iomem *base = mhi_cntrl->bhie;
70         struct device *dev = &mhi_cntrl->mhi_dev->dev;
71
72         dev_dbg(dev, "Entered with pm_state:%s dev_state:%s ee:%s\n",
73                 to_mhi_pm_state_str(mhi_cntrl->pm_state),
74                 TO_MHI_STATE_STR(mhi_cntrl->dev_state),
75                 TO_MHI_EXEC_STR(mhi_cntrl->ee));
76
77         /*
78          * This should only be executing during a kernel panic, we expect all
79          * other cores to shutdown while we're collecting RDDM buffer. After
80          * returning from this function, we expect the device to reset.
81          *
82          * Normaly, we read/write pm_state only after grabbing the
83          * pm_lock, since we're in a panic, skipping it. Also there is no
84          * gurantee that this state change would take effect since
85          * we're setting it w/o grabbing pm_lock
86          */
87         mhi_cntrl->pm_state = MHI_PM_LD_ERR_FATAL_DETECT;
88         /* update should take the effect immediately */
89         smp_wmb();
90
91         /*
92          * Make sure device is not already in RDDM. In case the device asserts
93          * and a kernel panic follows, device will already be in RDDM.
94          * Do not trigger SYS ERR again and proceed with waiting for
95          * image download completion.
96          */
97         ee = mhi_get_exec_env(mhi_cntrl);
98         if (ee != MHI_EE_RDDM) {
99                 dev_dbg(dev, "Trigger device into RDDM mode using SYS ERR\n");
100                 mhi_set_mhi_state(mhi_cntrl, MHI_STATE_SYS_ERR);
101
102                 dev_dbg(dev, "Waiting for device to enter RDDM\n");
103                 while (rddm_retry--) {
104                         ee = mhi_get_exec_env(mhi_cntrl);
105                         if (ee == MHI_EE_RDDM)
106                                 break;
107
108                         udelay(delayus);
109                 }
110
111                 if (rddm_retry <= 0) {
112                         /* Hardware reset so force device to enter RDDM */
113                         dev_dbg(dev,
114                                 "Did not enter RDDM, do a host req reset\n");
115                         mhi_write_reg(mhi_cntrl, mhi_cntrl->regs,
116                                       MHI_SOC_RESET_REQ_OFFSET,
117                                       MHI_SOC_RESET_REQ);
118                         udelay(delayus);
119                 }
120
121                 ee = mhi_get_exec_env(mhi_cntrl);
122         }
123
124         dev_dbg(dev, "Waiting for image download completion, current EE: %s\n",
125                 TO_MHI_EXEC_STR(ee));
126
127         while (retry--) {
128                 ret = mhi_read_reg_field(mhi_cntrl, base, BHIE_RXVECSTATUS_OFFS,
129                                          BHIE_RXVECSTATUS_STATUS_BMSK,
130                                          BHIE_RXVECSTATUS_STATUS_SHFT,
131                                          &rx_status);
132                 if (ret)
133                         return -EIO;
134
135                 if (rx_status == BHIE_RXVECSTATUS_STATUS_XFER_COMPL)
136                         return 0;
137
138                 udelay(delayus);
139         }
140
141         ee = mhi_get_exec_env(mhi_cntrl);
142         ret = mhi_read_reg(mhi_cntrl, base, BHIE_RXVECSTATUS_OFFS, &rx_status);
143
144         dev_err(dev, "Did not complete RDDM transfer\n");
145         dev_err(dev, "Current EE: %s\n", TO_MHI_EXEC_STR(ee));
146         dev_err(dev, "RXVEC_STATUS: 0x%x\n", rx_status);
147
148         return -EIO;
149 }
150
151 /* Download RDDM image from device */
152 int mhi_download_rddm_img(struct mhi_controller *mhi_cntrl, bool in_panic)
153 {
154         void __iomem *base = mhi_cntrl->bhie;
155         u32 rx_status;
156
157         if (in_panic)
158                 return __mhi_download_rddm_in_panic(mhi_cntrl);
159
160         /* Wait for the image download to complete */
161         wait_event_timeout(mhi_cntrl->state_event,
162                            mhi_read_reg_field(mhi_cntrl, base,
163                                               BHIE_RXVECSTATUS_OFFS,
164                                               BHIE_RXVECSTATUS_STATUS_BMSK,
165                                               BHIE_RXVECSTATUS_STATUS_SHFT,
166                                               &rx_status) || rx_status,
167                            msecs_to_jiffies(mhi_cntrl->timeout_ms));
168
169         return (rx_status == BHIE_RXVECSTATUS_STATUS_XFER_COMPL) ? 0 : -EIO;
170 }
171 EXPORT_SYMBOL_GPL(mhi_download_rddm_img);
172
173 static int mhi_fw_load_amss(struct mhi_controller *mhi_cntrl,
174                             const struct mhi_buf *mhi_buf)
175 {
176         void __iomem *base = mhi_cntrl->bhie;
177         rwlock_t *pm_lock = &mhi_cntrl->pm_lock;
178         u32 tx_status, sequence_id;
179
180         read_lock_bh(pm_lock);
181         if (!MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) {
182                 read_unlock_bh(pm_lock);
183                 return -EIO;
184         }
185
186         mhi_write_reg(mhi_cntrl, base, BHIE_TXVECADDR_HIGH_OFFS,
187                       upper_32_bits(mhi_buf->dma_addr));
188
189         mhi_write_reg(mhi_cntrl, base, BHIE_TXVECADDR_LOW_OFFS,
190                       lower_32_bits(mhi_buf->dma_addr));
191
192         mhi_write_reg(mhi_cntrl, base, BHIE_TXVECSIZE_OFFS, mhi_buf->len);
193
194         sequence_id = prandom_u32() & BHIE_TXVECSTATUS_SEQNUM_BMSK;
195         mhi_write_reg_field(mhi_cntrl, base, BHIE_TXVECDB_OFFS,
196                             BHIE_TXVECDB_SEQNUM_BMSK, BHIE_TXVECDB_SEQNUM_SHFT,
197                             sequence_id);
198         read_unlock_bh(pm_lock);
199
200         /* Wait for the image download to complete */
201         wait_event_timeout(mhi_cntrl->state_event,
202                            MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state) ||
203                            mhi_read_reg_field(mhi_cntrl, base,
204                                               BHIE_TXVECSTATUS_OFFS,
205                                               BHIE_TXVECSTATUS_STATUS_BMSK,
206                                               BHIE_TXVECSTATUS_STATUS_SHFT,
207                                               &tx_status) || tx_status,
208                            msecs_to_jiffies(mhi_cntrl->timeout_ms));
209
210         if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state))
211                 return -EIO;
212
213         return (tx_status == BHIE_TXVECSTATUS_STATUS_XFER_COMPL) ? 0 : -EIO;
214 }
215
216 static int mhi_fw_load_sbl(struct mhi_controller *mhi_cntrl,
217                            dma_addr_t dma_addr,
218                            size_t size)
219 {
220         u32 tx_status, val, session_id;
221         int i, ret;
222         void __iomem *base = mhi_cntrl->bhi;
223         rwlock_t *pm_lock = &mhi_cntrl->pm_lock;
224         struct device *dev = &mhi_cntrl->mhi_dev->dev;
225         struct {
226                 char *name;
227                 u32 offset;
228         } error_reg[] = {
229                 { "ERROR_CODE", BHI_ERRCODE },
230                 { "ERROR_DBG1", BHI_ERRDBG1 },
231                 { "ERROR_DBG2", BHI_ERRDBG2 },
232                 { "ERROR_DBG3", BHI_ERRDBG3 },
233                 { NULL },
234         };
235
236         read_lock_bh(pm_lock);
237         if (!MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) {
238                 read_unlock_bh(pm_lock);
239                 goto invalid_pm_state;
240         }
241
242         dev_dbg(dev, "Starting SBL download via BHI\n");
243         mhi_write_reg(mhi_cntrl, base, BHI_STATUS, 0);
244         mhi_write_reg(mhi_cntrl, base, BHI_IMGADDR_HIGH,
245                       upper_32_bits(dma_addr));
246         mhi_write_reg(mhi_cntrl, base, BHI_IMGADDR_LOW,
247                       lower_32_bits(dma_addr));
248         mhi_write_reg(mhi_cntrl, base, BHI_IMGSIZE, size);
249         session_id = prandom_u32() & BHI_TXDB_SEQNUM_BMSK;
250         mhi_write_reg(mhi_cntrl, base, BHI_IMGTXDB, session_id);
251         read_unlock_bh(pm_lock);
252
253         /* Wait for the image download to complete */
254         ret = wait_event_timeout(mhi_cntrl->state_event,
255                            MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state) ||
256                            mhi_read_reg_field(mhi_cntrl, base, BHI_STATUS,
257                                               BHI_STATUS_MASK, BHI_STATUS_SHIFT,
258                                               &tx_status) || tx_status,
259                            msecs_to_jiffies(mhi_cntrl->timeout_ms));
260         if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state))
261                 goto invalid_pm_state;
262
263         if (tx_status == BHI_STATUS_ERROR) {
264                 dev_err(dev, "Image transfer failed\n");
265                 read_lock_bh(pm_lock);
266                 if (MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) {
267                         for (i = 0; error_reg[i].name; i++) {
268                                 ret = mhi_read_reg(mhi_cntrl, base,
269                                                    error_reg[i].offset, &val);
270                                 if (ret)
271                                         break;
272                                 dev_err(dev, "Reg: %s value: 0x%x\n",
273                                         error_reg[i].name, val);
274                         }
275                 }
276                 read_unlock_bh(pm_lock);
277                 goto invalid_pm_state;
278         }
279
280         return (!ret) ? -ETIMEDOUT : 0;
281
282 invalid_pm_state:
283
284         return -EIO;
285 }
286
287 void mhi_free_bhie_table(struct mhi_controller *mhi_cntrl,
288                          struct image_info *image_info)
289 {
290         int i;
291         struct mhi_buf *mhi_buf = image_info->mhi_buf;
292
293         for (i = 0; i < image_info->entries; i++, mhi_buf++)
294                 mhi_free_coherent(mhi_cntrl, mhi_buf->len, mhi_buf->buf,
295                                   mhi_buf->dma_addr);
296
297         kfree(image_info->mhi_buf);
298         kfree(image_info);
299 }
300
301 int mhi_alloc_bhie_table(struct mhi_controller *mhi_cntrl,
302                          struct image_info **image_info,
303                          size_t alloc_size)
304 {
305         size_t seg_size = mhi_cntrl->seg_len;
306         int segments = DIV_ROUND_UP(alloc_size, seg_size) + 1;
307         int i;
308         struct image_info *img_info;
309         struct mhi_buf *mhi_buf;
310
311         img_info = kzalloc(sizeof(*img_info), GFP_KERNEL);
312         if (!img_info)
313                 return -ENOMEM;
314
315         /* Allocate memory for entries */
316         img_info->mhi_buf = kcalloc(segments, sizeof(*img_info->mhi_buf),
317                                     GFP_KERNEL);
318         if (!img_info->mhi_buf)
319                 goto error_alloc_mhi_buf;
320
321         /* Allocate and populate vector table */
322         mhi_buf = img_info->mhi_buf;
323         for (i = 0; i < segments; i++, mhi_buf++) {
324                 size_t vec_size = seg_size;
325
326                 /* Vector table is the last entry */
327                 if (i == segments - 1)
328                         vec_size = sizeof(struct bhi_vec_entry) * i;
329
330                 mhi_buf->len = vec_size;
331                 mhi_buf->buf = mhi_alloc_coherent(mhi_cntrl, vec_size,
332                                                   &mhi_buf->dma_addr,
333                                                   GFP_KERNEL);
334                 if (!mhi_buf->buf)
335                         goto error_alloc_segment;
336         }
337
338         img_info->bhi_vec = img_info->mhi_buf[segments - 1].buf;
339         img_info->entries = segments;
340         *image_info = img_info;
341
342         return 0;
343
344 error_alloc_segment:
345         for (--i, --mhi_buf; i >= 0; i--, mhi_buf--)
346                 mhi_free_coherent(mhi_cntrl, mhi_buf->len, mhi_buf->buf,
347                                   mhi_buf->dma_addr);
348
349 error_alloc_mhi_buf:
350         kfree(img_info);
351
352         return -ENOMEM;
353 }
354
355 static void mhi_firmware_copy(struct mhi_controller *mhi_cntrl,
356                               const struct firmware *firmware,
357                               struct image_info *img_info)
358 {
359         size_t remainder = firmware->size;
360         size_t to_cpy;
361         const u8 *buf = firmware->data;
362         int i = 0;
363         struct mhi_buf *mhi_buf = img_info->mhi_buf;
364         struct bhi_vec_entry *bhi_vec = img_info->bhi_vec;
365
366         while (remainder) {
367                 to_cpy = min(remainder, mhi_buf->len);
368                 memcpy(mhi_buf->buf, buf, to_cpy);
369                 bhi_vec->dma_addr = mhi_buf->dma_addr;
370                 bhi_vec->size = to_cpy;
371
372                 buf += to_cpy;
373                 remainder -= to_cpy;
374                 i++;
375                 bhi_vec++;
376                 mhi_buf++;
377         }
378 }
379
380 void mhi_fw_load_worker(struct work_struct *work)
381 {
382         struct mhi_controller *mhi_cntrl;
383         const struct firmware *firmware = NULL;
384         struct image_info *image_info;
385         struct device *dev;
386         const char *fw_name;
387         void *buf;
388         dma_addr_t dma_addr;
389         size_t size;
390         int ret;
391
392         mhi_cntrl = container_of(work, struct mhi_controller, fw_worker);
393         dev = &mhi_cntrl->mhi_dev->dev;
394
395         dev_dbg(dev, "Waiting for device to enter PBL from: %s\n",
396                 TO_MHI_EXEC_STR(mhi_cntrl->ee));
397
398         ret = wait_event_timeout(mhi_cntrl->state_event,
399                                  MHI_IN_PBL(mhi_cntrl->ee) ||
400                                  MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state),
401                                  msecs_to_jiffies(mhi_cntrl->timeout_ms));
402
403         if (!ret || MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) {
404                 dev_err(dev, "Device MHI is not in valid state\n");
405                 return;
406         }
407
408         /* If device is in pass through, do reset to ready state transition */
409         if (mhi_cntrl->ee == MHI_EE_PTHRU)
410                 goto fw_load_ee_pthru;
411
412         fw_name = (mhi_cntrl->ee == MHI_EE_EDL) ?
413                 mhi_cntrl->edl_image : mhi_cntrl->fw_image;
414
415         if (!fw_name || (mhi_cntrl->fbc_download && (!mhi_cntrl->sbl_size ||
416                                                      !mhi_cntrl->seg_len))) {
417                 dev_err(dev,
418                         "No firmware image defined or !sbl_size || !seg_len\n");
419                 return;
420         }
421
422         ret = reject_firmware(&firmware, fw_name, dev);
423         if (ret) {
424                 dev_err(dev, "Error loading firmware: %d\n", ret);
425                 return;
426         }
427
428         size = (mhi_cntrl->fbc_download) ? mhi_cntrl->sbl_size : firmware->size;
429
430         /* SBL size provided is maximum size, not necessarily the image size */
431         if (size > firmware->size)
432                 size = firmware->size;
433
434         buf = mhi_alloc_coherent(mhi_cntrl, size, &dma_addr, GFP_KERNEL);
435         if (!buf) {
436                 release_firmware(firmware);
437                 return;
438         }
439
440         /* Download SBL image */
441         memcpy(buf, firmware->data, size);
442         ret = mhi_fw_load_sbl(mhi_cntrl, dma_addr, size);
443         mhi_free_coherent(mhi_cntrl, size, buf, dma_addr);
444
445         if (!mhi_cntrl->fbc_download || ret || mhi_cntrl->ee == MHI_EE_EDL)
446                 release_firmware(firmware);
447
448         /* Error or in EDL mode, we're done */
449         if (ret || mhi_cntrl->ee == MHI_EE_EDL)
450                 return;
451
452         write_lock_irq(&mhi_cntrl->pm_lock);
453         mhi_cntrl->dev_state = MHI_STATE_RESET;
454         write_unlock_irq(&mhi_cntrl->pm_lock);
455
456         /*
457          * If we're doing fbc, populate vector tables while
458          * device transitioning into MHI READY state
459          */
460         if (mhi_cntrl->fbc_download) {
461                 ret = mhi_alloc_bhie_table(mhi_cntrl, &mhi_cntrl->fbc_image,
462                                            firmware->size);
463                 if (ret)
464                         goto error_alloc_fw_table;
465
466                 /* Load the firmware into BHIE vec table */
467                 mhi_firmware_copy(mhi_cntrl, firmware, mhi_cntrl->fbc_image);
468         }
469
470 fw_load_ee_pthru:
471         /* Transitioning into MHI RESET->READY state */
472         ret = mhi_ready_state_transition(mhi_cntrl);
473
474         if (!mhi_cntrl->fbc_download)
475                 return;
476
477         if (ret)
478                 goto error_read;
479
480         /* Wait for the SBL event */
481         ret = wait_event_timeout(mhi_cntrl->state_event,
482                                  mhi_cntrl->ee == MHI_EE_SBL ||
483                                  MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state),
484                                  msecs_to_jiffies(mhi_cntrl->timeout_ms));
485
486         if (!ret || MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) {
487                 dev_err(dev, "MHI did not enter SBL\n");
488                 goto error_read;
489         }
490
491         /* Start full firmware image download */
492         image_info = mhi_cntrl->fbc_image;
493         ret = mhi_fw_load_amss(mhi_cntrl,
494                                /* Vector table is the last entry */
495                                &image_info->mhi_buf[image_info->entries - 1]);
496
497         release_firmware(firmware);
498
499         return;
500
501 error_read:
502         mhi_free_bhie_table(mhi_cntrl, mhi_cntrl->fbc_image);
503         mhi_cntrl->fbc_image = NULL;
504
505 error_alloc_fw_table:
506         release_firmware(firmware);
507 }