common: Drop linux/delay.h from common header
[oweals/u-boot.git] / drivers / ufs / ufs.c
1 // SPDX-License-Identifier: GPL-2.0+
2 /**
3  * ufs.c - Universal Flash Subsystem (UFS) driver
4  *
5  * Taken from Linux Kernel v5.2 (drivers/scsi/ufs/ufshcd.c) and ported
6  * to u-boot.
7  *
8  * Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com
9  */
10
11 #include <charset.h>
12 #include <common.h>
13 #include <dm.h>
14 #include <log.h>
15 #include <dm/device_compat.h>
16 #include <dm/devres.h>
17 #include <dm/lists.h>
18 #include <dm/device-internal.h>
19 #include <malloc.h>
20 #include <hexdump.h>
21 #include <scsi.h>
22 #include <linux/delay.h>
23
24 #include <linux/dma-mapping.h>
25
26 #include "ufs.h"
27
28 #define UFSHCD_ENABLE_INTRS     (UTP_TRANSFER_REQ_COMPL |\
29                                  UTP_TASK_REQ_COMPL |\
30                                  UFSHCD_ERROR_MASK)
31 /* maximum number of link-startup retries */
32 #define DME_LINKSTARTUP_RETRIES 3
33
34 /* maximum number of retries for a general UIC command  */
35 #define UFS_UIC_COMMAND_RETRIES 3
36
37 /* Query request retries */
38 #define QUERY_REQ_RETRIES 3
39 /* Query request timeout */
40 #define QUERY_REQ_TIMEOUT 1500 /* 1.5 seconds */
41
42 /* maximum timeout in ms for a general UIC command */
43 #define UFS_UIC_CMD_TIMEOUT     1000
44 /* NOP OUT retries waiting for NOP IN response */
45 #define NOP_OUT_RETRIES    10
46 /* Timeout after 30 msecs if NOP OUT hangs without response */
47 #define NOP_OUT_TIMEOUT    30 /* msecs */
48
49 /* Only use one Task Tag for all requests */
50 #define TASK_TAG        0
51
52 /* Expose the flag value from utp_upiu_query.value */
53 #define MASK_QUERY_UPIU_FLAG_LOC 0xFF
54
55 #define MAX_PRDT_ENTRY  262144
56
57 /* maximum bytes per request */
58 #define UFS_MAX_BYTES   (128 * 256 * 1024)
59
60 static inline bool ufshcd_is_hba_active(struct ufs_hba *hba);
61 static inline void ufshcd_hba_stop(struct ufs_hba *hba);
62 static int ufshcd_hba_enable(struct ufs_hba *hba);
63
64 /*
65  * ufshcd_wait_for_register - wait for register value to change
66  */
67 static int ufshcd_wait_for_register(struct ufs_hba *hba, u32 reg, u32 mask,
68                                     u32 val, unsigned long timeout_ms)
69 {
70         int err = 0;
71         unsigned long start = get_timer(0);
72
73         /* ignore bits that we don't intend to wait on */
74         val = val & mask;
75
76         while ((ufshcd_readl(hba, reg) & mask) != val) {
77                 if (get_timer(start) > timeout_ms) {
78                         if ((ufshcd_readl(hba, reg) & mask) != val)
79                                 err = -ETIMEDOUT;
80                         break;
81                 }
82         }
83
84         return err;
85 }
86
87 /**
88  * ufshcd_init_pwr_info - setting the POR (power on reset)
89  * values in hba power info
90  */
91 static void ufshcd_init_pwr_info(struct ufs_hba *hba)
92 {
93         hba->pwr_info.gear_rx = UFS_PWM_G1;
94         hba->pwr_info.gear_tx = UFS_PWM_G1;
95         hba->pwr_info.lane_rx = 1;
96         hba->pwr_info.lane_tx = 1;
97         hba->pwr_info.pwr_rx = SLOWAUTO_MODE;
98         hba->pwr_info.pwr_tx = SLOWAUTO_MODE;
99         hba->pwr_info.hs_rate = 0;
100 }
101
102 /**
103  * ufshcd_print_pwr_info - print power params as saved in hba
104  * power info
105  */
106 static void ufshcd_print_pwr_info(struct ufs_hba *hba)
107 {
108         static const char * const names[] = {
109                 "INVALID MODE",
110                 "FAST MODE",
111                 "SLOW_MODE",
112                 "INVALID MODE",
113                 "FASTAUTO_MODE",
114                 "SLOWAUTO_MODE",
115                 "INVALID MODE",
116         };
117
118         dev_err(hba->dev, "[RX, TX]: gear=[%d, %d], lane[%d, %d], pwr[%s, %s], rate = %d\n",
119                 hba->pwr_info.gear_rx, hba->pwr_info.gear_tx,
120                 hba->pwr_info.lane_rx, hba->pwr_info.lane_tx,
121                 names[hba->pwr_info.pwr_rx],
122                 names[hba->pwr_info.pwr_tx],
123                 hba->pwr_info.hs_rate);
124 }
125
126 /**
127  * ufshcd_ready_for_uic_cmd - Check if controller is ready
128  *                            to accept UIC commands
129  */
130 static inline bool ufshcd_ready_for_uic_cmd(struct ufs_hba *hba)
131 {
132         if (ufshcd_readl(hba, REG_CONTROLLER_STATUS) & UIC_COMMAND_READY)
133                 return true;
134         else
135                 return false;
136 }
137
138 /**
139  * ufshcd_get_uic_cmd_result - Get the UIC command result
140  */
141 static inline int ufshcd_get_uic_cmd_result(struct ufs_hba *hba)
142 {
143         return ufshcd_readl(hba, REG_UIC_COMMAND_ARG_2) &
144                MASK_UIC_COMMAND_RESULT;
145 }
146
147 /**
148  * ufshcd_get_dme_attr_val - Get the value of attribute returned by UIC command
149  */
150 static inline u32 ufshcd_get_dme_attr_val(struct ufs_hba *hba)
151 {
152         return ufshcd_readl(hba, REG_UIC_COMMAND_ARG_3);
153 }
154
155 /**
156  * ufshcd_is_device_present - Check if any device connected to
157  *                            the host controller
158  */
159 static inline bool ufshcd_is_device_present(struct ufs_hba *hba)
160 {
161         return (ufshcd_readl(hba, REG_CONTROLLER_STATUS) &
162                                                 DEVICE_PRESENT) ? true : false;
163 }
164
165 /**
166  * ufshcd_send_uic_cmd - UFS Interconnect layer command API
167  *
168  */
169 static int ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
170 {
171         unsigned long start = 0;
172         u32 intr_status;
173         u32 enabled_intr_status;
174
175         if (!ufshcd_ready_for_uic_cmd(hba)) {
176                 dev_err(hba->dev,
177                         "Controller not ready to accept UIC commands\n");
178                 return -EIO;
179         }
180
181         debug("sending uic command:%d\n", uic_cmd->command);
182
183         /* Write Args */
184         ufshcd_writel(hba, uic_cmd->argument1, REG_UIC_COMMAND_ARG_1);
185         ufshcd_writel(hba, uic_cmd->argument2, REG_UIC_COMMAND_ARG_2);
186         ufshcd_writel(hba, uic_cmd->argument3, REG_UIC_COMMAND_ARG_3);
187
188         /* Write UIC Cmd */
189         ufshcd_writel(hba, uic_cmd->command & COMMAND_OPCODE_MASK,
190                       REG_UIC_COMMAND);
191
192         start = get_timer(0);
193         do {
194                 intr_status = ufshcd_readl(hba, REG_INTERRUPT_STATUS);
195                 enabled_intr_status = intr_status & hba->intr_mask;
196                 ufshcd_writel(hba, intr_status, REG_INTERRUPT_STATUS);
197
198                 if (get_timer(start) > UFS_UIC_CMD_TIMEOUT) {
199                         dev_err(hba->dev,
200                                 "Timedout waiting for UIC response\n");
201
202                         return -ETIMEDOUT;
203                 }
204
205                 if (enabled_intr_status & UFSHCD_ERROR_MASK) {
206                         dev_err(hba->dev, "Error in status:%08x\n",
207                                 enabled_intr_status);
208
209                         return -1;
210                 }
211         } while (!(enabled_intr_status & UFSHCD_UIC_MASK));
212
213         uic_cmd->argument2 = ufshcd_get_uic_cmd_result(hba);
214         uic_cmd->argument3 = ufshcd_get_dme_attr_val(hba);
215
216         debug("Sent successfully\n");
217
218         return 0;
219 }
220
221 /**
222  * ufshcd_dme_set_attr - UIC command for DME_SET, DME_PEER_SET
223  *
224  */
225 int ufshcd_dme_set_attr(struct ufs_hba *hba, u32 attr_sel, u8 attr_set,
226                         u32 mib_val, u8 peer)
227 {
228         struct uic_command uic_cmd = {0};
229         static const char *const action[] = {
230                 "dme-set",
231                 "dme-peer-set"
232         };
233         const char *set = action[!!peer];
234         int ret;
235         int retries = UFS_UIC_COMMAND_RETRIES;
236
237         uic_cmd.command = peer ?
238                 UIC_CMD_DME_PEER_SET : UIC_CMD_DME_SET;
239         uic_cmd.argument1 = attr_sel;
240         uic_cmd.argument2 = UIC_ARG_ATTR_TYPE(attr_set);
241         uic_cmd.argument3 = mib_val;
242
243         do {
244                 /* for peer attributes we retry upon failure */
245                 ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
246                 if (ret)
247                         dev_dbg(hba->dev, "%s: attr-id 0x%x val 0x%x error code %d\n",
248                                 set, UIC_GET_ATTR_ID(attr_sel), mib_val, ret);
249         } while (ret && peer && --retries);
250
251         if (ret)
252                 dev_err(hba->dev, "%s: attr-id 0x%x val 0x%x failed %d retries\n",
253                         set, UIC_GET_ATTR_ID(attr_sel), mib_val,
254                         UFS_UIC_COMMAND_RETRIES - retries);
255
256         return ret;
257 }
258
259 /**
260  * ufshcd_dme_get_attr - UIC command for DME_GET, DME_PEER_GET
261  *
262  */
263 int ufshcd_dme_get_attr(struct ufs_hba *hba, u32 attr_sel,
264                         u32 *mib_val, u8 peer)
265 {
266         struct uic_command uic_cmd = {0};
267         static const char *const action[] = {
268                 "dme-get",
269                 "dme-peer-get"
270         };
271         const char *get = action[!!peer];
272         int ret;
273         int retries = UFS_UIC_COMMAND_RETRIES;
274
275         uic_cmd.command = peer ?
276                 UIC_CMD_DME_PEER_GET : UIC_CMD_DME_GET;
277         uic_cmd.argument1 = attr_sel;
278
279         do {
280                 /* for peer attributes we retry upon failure */
281                 ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
282                 if (ret)
283                         dev_dbg(hba->dev, "%s: attr-id 0x%x error code %d\n",
284                                 get, UIC_GET_ATTR_ID(attr_sel), ret);
285         } while (ret && peer && --retries);
286
287         if (ret)
288                 dev_err(hba->dev, "%s: attr-id 0x%x failed %d retries\n",
289                         get, UIC_GET_ATTR_ID(attr_sel),
290                         UFS_UIC_COMMAND_RETRIES - retries);
291
292         if (mib_val && !ret)
293                 *mib_val = uic_cmd.argument3;
294
295         return ret;
296 }
297
298 static int ufshcd_disable_tx_lcc(struct ufs_hba *hba, bool peer)
299 {
300         u32 tx_lanes, i, err = 0;
301
302         if (!peer)
303                 ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES),
304                                &tx_lanes);
305         else
306                 ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES),
307                                     &tx_lanes);
308         for (i = 0; i < tx_lanes; i++) {
309                 if (!peer)
310                         err = ufshcd_dme_set(hba,
311                                              UIC_ARG_MIB_SEL(TX_LCC_ENABLE,
312                                              UIC_ARG_MPHY_TX_GEN_SEL_INDEX(i)),
313                                              0);
314                 else
315                         err = ufshcd_dme_peer_set(hba,
316                                         UIC_ARG_MIB_SEL(TX_LCC_ENABLE,
317                                         UIC_ARG_MPHY_TX_GEN_SEL_INDEX(i)),
318                                         0);
319                 if (err) {
320                         dev_err(hba->dev, "%s: TX LCC Disable failed, peer = %d, lane = %d, err = %d",
321                                 __func__, peer, i, err);
322                         break;
323                 }
324         }
325
326         return err;
327 }
328
329 static inline int ufshcd_disable_device_tx_lcc(struct ufs_hba *hba)
330 {
331         return ufshcd_disable_tx_lcc(hba, true);
332 }
333
334 /**
335  * ufshcd_dme_link_startup - Notify Unipro to perform link startup
336  *
337  */
338 static int ufshcd_dme_link_startup(struct ufs_hba *hba)
339 {
340         struct uic_command uic_cmd = {0};
341         int ret;
342
343         uic_cmd.command = UIC_CMD_DME_LINK_STARTUP;
344
345         ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
346         if (ret)
347                 dev_dbg(hba->dev,
348                         "dme-link-startup: error code %d\n", ret);
349         return ret;
350 }
351
352 /**
353  * ufshcd_disable_intr_aggr - Disables interrupt aggregation.
354  *
355  */
356 static inline void ufshcd_disable_intr_aggr(struct ufs_hba *hba)
357 {
358         ufshcd_writel(hba, 0, REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL);
359 }
360
361 /**
362  * ufshcd_get_lists_status - Check UCRDY, UTRLRDY and UTMRLRDY
363  */
364 static inline int ufshcd_get_lists_status(u32 reg)
365 {
366         return !((reg & UFSHCD_STATUS_READY) == UFSHCD_STATUS_READY);
367 }
368
369 /**
370  * ufshcd_enable_run_stop_reg - Enable run-stop registers,
371  *                      When run-stop registers are set to 1, it indicates the
372  *                      host controller that it can process the requests
373  */
374 static void ufshcd_enable_run_stop_reg(struct ufs_hba *hba)
375 {
376         ufshcd_writel(hba, UTP_TASK_REQ_LIST_RUN_STOP_BIT,
377                       REG_UTP_TASK_REQ_LIST_RUN_STOP);
378         ufshcd_writel(hba, UTP_TRANSFER_REQ_LIST_RUN_STOP_BIT,
379                       REG_UTP_TRANSFER_REQ_LIST_RUN_STOP);
380 }
381
382 /**
383  * ufshcd_enable_intr - enable interrupts
384  */
385 static void ufshcd_enable_intr(struct ufs_hba *hba, u32 intrs)
386 {
387         u32 set = ufshcd_readl(hba, REG_INTERRUPT_ENABLE);
388         u32 rw;
389
390         if (hba->version == UFSHCI_VERSION_10) {
391                 rw = set & INTERRUPT_MASK_RW_VER_10;
392                 set = rw | ((set ^ intrs) & intrs);
393         } else {
394                 set |= intrs;
395         }
396
397         ufshcd_writel(hba, set, REG_INTERRUPT_ENABLE);
398
399         hba->intr_mask = set;
400 }
401
402 /**
403  * ufshcd_make_hba_operational - Make UFS controller operational
404  *
405  * To bring UFS host controller to operational state,
406  * 1. Enable required interrupts
407  * 2. Configure interrupt aggregation
408  * 3. Program UTRL and UTMRL base address
409  * 4. Configure run-stop-registers
410  *
411  */
412 static int ufshcd_make_hba_operational(struct ufs_hba *hba)
413 {
414         int err = 0;
415         u32 reg;
416
417         /* Enable required interrupts */
418         ufshcd_enable_intr(hba, UFSHCD_ENABLE_INTRS);
419
420         /* Disable interrupt aggregation */
421         ufshcd_disable_intr_aggr(hba);
422
423         /* Configure UTRL and UTMRL base address registers */
424         ufshcd_writel(hba, lower_32_bits((dma_addr_t)hba->utrdl),
425                       REG_UTP_TRANSFER_REQ_LIST_BASE_L);
426         ufshcd_writel(hba, upper_32_bits((dma_addr_t)hba->utrdl),
427                       REG_UTP_TRANSFER_REQ_LIST_BASE_H);
428         ufshcd_writel(hba, lower_32_bits((dma_addr_t)hba->utmrdl),
429                       REG_UTP_TASK_REQ_LIST_BASE_L);
430         ufshcd_writel(hba, upper_32_bits((dma_addr_t)hba->utmrdl),
431                       REG_UTP_TASK_REQ_LIST_BASE_H);
432
433         /*
434          * UCRDY, UTMRLDY and UTRLRDY bits must be 1
435          */
436         reg = ufshcd_readl(hba, REG_CONTROLLER_STATUS);
437         if (!(ufshcd_get_lists_status(reg))) {
438                 ufshcd_enable_run_stop_reg(hba);
439         } else {
440                 dev_err(hba->dev,
441                         "Host controller not ready to process requests");
442                 err = -EIO;
443                 goto out;
444         }
445
446 out:
447         return err;
448 }
449
450 /**
451  * ufshcd_link_startup - Initialize unipro link startup
452  */
453 static int ufshcd_link_startup(struct ufs_hba *hba)
454 {
455         int ret;
456         int retries = DME_LINKSTARTUP_RETRIES;
457         bool link_startup_again = true;
458
459 link_startup:
460         do {
461                 ufshcd_ops_link_startup_notify(hba, PRE_CHANGE);
462
463                 ret = ufshcd_dme_link_startup(hba);
464
465                 /* check if device is detected by inter-connect layer */
466                 if (!ret && !ufshcd_is_device_present(hba)) {
467                         dev_err(hba->dev, "%s: Device not present\n", __func__);
468                         ret = -ENXIO;
469                         goto out;
470                 }
471
472                 /*
473                  * DME link lost indication is only received when link is up,
474                  * but we can't be sure if the link is up until link startup
475                  * succeeds. So reset the local Uni-Pro and try again.
476                  */
477                 if (ret && ufshcd_hba_enable(hba))
478                         goto out;
479         } while (ret && retries--);
480
481         if (ret)
482                 /* failed to get the link up... retire */
483                 goto out;
484
485         if (link_startup_again) {
486                 link_startup_again = false;
487                 retries = DME_LINKSTARTUP_RETRIES;
488                 goto link_startup;
489         }
490
491         /* Mark that link is up in PWM-G1, 1-lane, SLOW-AUTO mode */
492         ufshcd_init_pwr_info(hba);
493
494         if (hba->quirks & UFSHCD_QUIRK_BROKEN_LCC) {
495                 ret = ufshcd_disable_device_tx_lcc(hba);
496                 if (ret)
497                         goto out;
498         }
499
500         /* Include any host controller configuration via UIC commands */
501         ret = ufshcd_ops_link_startup_notify(hba, POST_CHANGE);
502         if (ret)
503                 goto out;
504
505         ret = ufshcd_make_hba_operational(hba);
506 out:
507         if (ret)
508                 dev_err(hba->dev, "link startup failed %d\n", ret);
509
510         return ret;
511 }
512
513 /**
514  * ufshcd_hba_stop - Send controller to reset state
515  */
516 static inline void ufshcd_hba_stop(struct ufs_hba *hba)
517 {
518         int err;
519
520         ufshcd_writel(hba, CONTROLLER_DISABLE,  REG_CONTROLLER_ENABLE);
521         err = ufshcd_wait_for_register(hba, REG_CONTROLLER_ENABLE,
522                                        CONTROLLER_ENABLE, CONTROLLER_DISABLE,
523                                        10);
524         if (err)
525                 dev_err(hba->dev, "%s: Controller disable failed\n", __func__);
526 }
527
528 /**
529  * ufshcd_is_hba_active - Get controller state
530  */
531 static inline bool ufshcd_is_hba_active(struct ufs_hba *hba)
532 {
533         return (ufshcd_readl(hba, REG_CONTROLLER_ENABLE) & CONTROLLER_ENABLE)
534                 ? false : true;
535 }
536
537 /**
538  * ufshcd_hba_start - Start controller initialization sequence
539  */
540 static inline void ufshcd_hba_start(struct ufs_hba *hba)
541 {
542         ufshcd_writel(hba, CONTROLLER_ENABLE, REG_CONTROLLER_ENABLE);
543 }
544
545 /**
546  * ufshcd_hba_enable - initialize the controller
547  */
548 static int ufshcd_hba_enable(struct ufs_hba *hba)
549 {
550         int retry;
551
552         if (!ufshcd_is_hba_active(hba))
553                 /* change controller state to "reset state" */
554                 ufshcd_hba_stop(hba);
555
556         ufshcd_ops_hce_enable_notify(hba, PRE_CHANGE);
557
558         /* start controller initialization sequence */
559         ufshcd_hba_start(hba);
560
561         /*
562          * To initialize a UFS host controller HCE bit must be set to 1.
563          * During initialization the HCE bit value changes from 1->0->1.
564          * When the host controller completes initialization sequence
565          * it sets the value of HCE bit to 1. The same HCE bit is read back
566          * to check if the controller has completed initialization sequence.
567          * So without this delay the value HCE = 1, set in the previous
568          * instruction might be read back.
569          * This delay can be changed based on the controller.
570          */
571         mdelay(1);
572
573         /* wait for the host controller to complete initialization */
574         retry = 10;
575         while (ufshcd_is_hba_active(hba)) {
576                 if (retry) {
577                         retry--;
578                 } else {
579                         dev_err(hba->dev, "Controller enable failed\n");
580                         return -EIO;
581                 }
582                 mdelay(5);
583         }
584
585         /* enable UIC related interrupts */
586         ufshcd_enable_intr(hba, UFSHCD_UIC_MASK);
587
588         ufshcd_ops_hce_enable_notify(hba, POST_CHANGE);
589
590         return 0;
591 }
592
593 /**
594  * ufshcd_host_memory_configure - configure local reference block with
595  *                              memory offsets
596  */
597 static void ufshcd_host_memory_configure(struct ufs_hba *hba)
598 {
599         struct utp_transfer_req_desc *utrdlp;
600         dma_addr_t cmd_desc_dma_addr;
601         u16 response_offset;
602         u16 prdt_offset;
603
604         utrdlp = hba->utrdl;
605         cmd_desc_dma_addr = (dma_addr_t)hba->ucdl;
606
607         utrdlp->command_desc_base_addr_lo =
608                                 cpu_to_le32(lower_32_bits(cmd_desc_dma_addr));
609         utrdlp->command_desc_base_addr_hi =
610                                 cpu_to_le32(upper_32_bits(cmd_desc_dma_addr));
611
612         response_offset = offsetof(struct utp_transfer_cmd_desc, response_upiu);
613         prdt_offset = offsetof(struct utp_transfer_cmd_desc, prd_table);
614
615         utrdlp->response_upiu_offset = cpu_to_le16(response_offset >> 2);
616         utrdlp->prd_table_offset = cpu_to_le16(prdt_offset >> 2);
617         utrdlp->response_upiu_length = cpu_to_le16(ALIGNED_UPIU_SIZE >> 2);
618
619         hba->ucd_req_ptr = (struct utp_upiu_req *)hba->ucdl;
620         hba->ucd_rsp_ptr =
621                 (struct utp_upiu_rsp *)&hba->ucdl->response_upiu;
622         hba->ucd_prdt_ptr =
623                 (struct ufshcd_sg_entry *)&hba->ucdl->prd_table;
624 }
625
626 /**
627  * ufshcd_memory_alloc - allocate memory for host memory space data structures
628  */
629 static int ufshcd_memory_alloc(struct ufs_hba *hba)
630 {
631         /* Allocate one Transfer Request Descriptor
632          * Should be aligned to 1k boundary.
633          */
634         hba->utrdl = memalign(1024, sizeof(struct utp_transfer_req_desc));
635         if (!hba->utrdl) {
636                 dev_err(hba->dev, "Transfer Descriptor memory allocation failed\n");
637                 return -ENOMEM;
638         }
639
640         /* Allocate one Command Descriptor
641          * Should be aligned to 1k boundary.
642          */
643         hba->ucdl = memalign(1024, sizeof(struct utp_transfer_cmd_desc));
644         if (!hba->ucdl) {
645                 dev_err(hba->dev, "Command descriptor memory allocation failed\n");
646                 return -ENOMEM;
647         }
648
649         return 0;
650 }
651
652 /**
653  * ufshcd_get_intr_mask - Get the interrupt bit mask
654  */
655 static inline u32 ufshcd_get_intr_mask(struct ufs_hba *hba)
656 {
657         u32 intr_mask = 0;
658
659         switch (hba->version) {
660         case UFSHCI_VERSION_10:
661                 intr_mask = INTERRUPT_MASK_ALL_VER_10;
662                 break;
663         case UFSHCI_VERSION_11:
664         case UFSHCI_VERSION_20:
665                 intr_mask = INTERRUPT_MASK_ALL_VER_11;
666                 break;
667         case UFSHCI_VERSION_21:
668         default:
669                 intr_mask = INTERRUPT_MASK_ALL_VER_21;
670                 break;
671         }
672
673         return intr_mask;
674 }
675
676 /**
677  * ufshcd_get_ufs_version - Get the UFS version supported by the HBA
678  */
679 static inline u32 ufshcd_get_ufs_version(struct ufs_hba *hba)
680 {
681         return ufshcd_readl(hba, REG_UFS_VERSION);
682 }
683
684 /**
685  * ufshcd_get_upmcrs - Get the power mode change request status
686  */
687 static inline u8 ufshcd_get_upmcrs(struct ufs_hba *hba)
688 {
689         return (ufshcd_readl(hba, REG_CONTROLLER_STATUS) >> 8) & 0x7;
690 }
691
692 /**
693  * ufshcd_prepare_req_desc_hdr() - Fills the requests header
694  * descriptor according to request
695  */
696 static void ufshcd_prepare_req_desc_hdr(struct utp_transfer_req_desc *req_desc,
697                                         u32 *upiu_flags,
698                                         enum dma_data_direction cmd_dir)
699 {
700         u32 data_direction;
701         u32 dword_0;
702
703         if (cmd_dir == DMA_FROM_DEVICE) {
704                 data_direction = UTP_DEVICE_TO_HOST;
705                 *upiu_flags = UPIU_CMD_FLAGS_READ;
706         } else if (cmd_dir == DMA_TO_DEVICE) {
707                 data_direction = UTP_HOST_TO_DEVICE;
708                 *upiu_flags = UPIU_CMD_FLAGS_WRITE;
709         } else {
710                 data_direction = UTP_NO_DATA_TRANSFER;
711                 *upiu_flags = UPIU_CMD_FLAGS_NONE;
712         }
713
714         dword_0 = data_direction | (0x1 << UPIU_COMMAND_TYPE_OFFSET);
715
716         /* Enable Interrupt for command */
717         dword_0 |= UTP_REQ_DESC_INT_CMD;
718
719         /* Transfer request descriptor header fields */
720         req_desc->header.dword_0 = cpu_to_le32(dword_0);
721         /* dword_1 is reserved, hence it is set to 0 */
722         req_desc->header.dword_1 = 0;
723         /*
724          * assigning invalid value for command status. Controller
725          * updates OCS on command completion, with the command
726          * status
727          */
728         req_desc->header.dword_2 =
729                 cpu_to_le32(OCS_INVALID_COMMAND_STATUS);
730         /* dword_3 is reserved, hence it is set to 0 */
731         req_desc->header.dword_3 = 0;
732
733         req_desc->prd_table_length = 0;
734 }
735
736 static void ufshcd_prepare_utp_query_req_upiu(struct ufs_hba *hba,
737                                               u32 upiu_flags)
738 {
739         struct utp_upiu_req *ucd_req_ptr = hba->ucd_req_ptr;
740         struct ufs_query *query = &hba->dev_cmd.query;
741         u16 len = be16_to_cpu(query->request.upiu_req.length);
742
743         /* Query request header */
744         ucd_req_ptr->header.dword_0 =
745                                 UPIU_HEADER_DWORD(UPIU_TRANSACTION_QUERY_REQ,
746                                                   upiu_flags, 0, TASK_TAG);
747         ucd_req_ptr->header.dword_1 =
748                                 UPIU_HEADER_DWORD(0, query->request.query_func,
749                                                   0, 0);
750
751         /* Data segment length only need for WRITE_DESC */
752         if (query->request.upiu_req.opcode == UPIU_QUERY_OPCODE_WRITE_DESC)
753                 ucd_req_ptr->header.dword_2 =
754                                 UPIU_HEADER_DWORD(0, 0, (len >> 8), (u8)len);
755         else
756                 ucd_req_ptr->header.dword_2 = 0;
757
758         /* Copy the Query Request buffer as is */
759         memcpy(&ucd_req_ptr->qr, &query->request.upiu_req, QUERY_OSF_SIZE);
760
761         /* Copy the Descriptor */
762         if (query->request.upiu_req.opcode == UPIU_QUERY_OPCODE_WRITE_DESC)
763                 memcpy(ucd_req_ptr + 1, query->descriptor, len);
764
765         memset(hba->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp));
766 }
767
768 static inline void ufshcd_prepare_utp_nop_upiu(struct ufs_hba *hba)
769 {
770         struct utp_upiu_req *ucd_req_ptr = hba->ucd_req_ptr;
771
772         memset(ucd_req_ptr, 0, sizeof(struct utp_upiu_req));
773
774         /* command descriptor fields */
775         ucd_req_ptr->header.dword_0 =
776                         UPIU_HEADER_DWORD(UPIU_TRANSACTION_NOP_OUT, 0, 0, 0x1f);
777         /* clear rest of the fields of basic header */
778         ucd_req_ptr->header.dword_1 = 0;
779         ucd_req_ptr->header.dword_2 = 0;
780
781         memset(hba->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp));
782 }
783
784 /**
785  * ufshcd_comp_devman_upiu - UFS Protocol Information Unit(UPIU)
786  *                           for Device Management Purposes
787  */
788 static int ufshcd_comp_devman_upiu(struct ufs_hba *hba,
789                                    enum dev_cmd_type cmd_type)
790 {
791         u32 upiu_flags;
792         int ret = 0;
793         struct utp_transfer_req_desc *req_desc = hba->utrdl;
794
795         hba->dev_cmd.type = cmd_type;
796
797         ufshcd_prepare_req_desc_hdr(req_desc, &upiu_flags, DMA_NONE);
798         switch (cmd_type) {
799         case DEV_CMD_TYPE_QUERY:
800                 ufshcd_prepare_utp_query_req_upiu(hba, upiu_flags);
801                 break;
802         case DEV_CMD_TYPE_NOP:
803                 ufshcd_prepare_utp_nop_upiu(hba);
804                 break;
805         default:
806                 ret = -EINVAL;
807         }
808
809         return ret;
810 }
811
812 static int ufshcd_send_command(struct ufs_hba *hba, unsigned int task_tag)
813 {
814         unsigned long start;
815         u32 intr_status;
816         u32 enabled_intr_status;
817
818         ufshcd_writel(hba, 1 << task_tag, REG_UTP_TRANSFER_REQ_DOOR_BELL);
819
820         start = get_timer(0);
821         do {
822                 intr_status = ufshcd_readl(hba, REG_INTERRUPT_STATUS);
823                 enabled_intr_status = intr_status & hba->intr_mask;
824                 ufshcd_writel(hba, intr_status, REG_INTERRUPT_STATUS);
825
826                 if (get_timer(start) > QUERY_REQ_TIMEOUT) {
827                         dev_err(hba->dev,
828                                 "Timedout waiting for UTP response\n");
829
830                         return -ETIMEDOUT;
831                 }
832
833                 if (enabled_intr_status & UFSHCD_ERROR_MASK) {
834                         dev_err(hba->dev, "Error in status:%08x\n",
835                                 enabled_intr_status);
836
837                         return -1;
838                 }
839         } while (!(enabled_intr_status & UTP_TRANSFER_REQ_COMPL));
840
841         return 0;
842 }
843
844 /**
845  * ufshcd_get_req_rsp - returns the TR response transaction type
846  */
847 static inline int ufshcd_get_req_rsp(struct utp_upiu_rsp *ucd_rsp_ptr)
848 {
849         return be32_to_cpu(ucd_rsp_ptr->header.dword_0) >> 24;
850 }
851
852 /**
853  * ufshcd_get_tr_ocs - Get the UTRD Overall Command Status
854  *
855  */
856 static inline int ufshcd_get_tr_ocs(struct ufs_hba *hba)
857 {
858         return le32_to_cpu(hba->utrdl->header.dword_2) & MASK_OCS;
859 }
860
861 static inline int ufshcd_get_rsp_upiu_result(struct utp_upiu_rsp *ucd_rsp_ptr)
862 {
863         return be32_to_cpu(ucd_rsp_ptr->header.dword_1) & MASK_RSP_UPIU_RESULT;
864 }
865
866 static int ufshcd_check_query_response(struct ufs_hba *hba)
867 {
868         struct ufs_query_res *query_res = &hba->dev_cmd.query.response;
869
870         /* Get the UPIU response */
871         query_res->response = ufshcd_get_rsp_upiu_result(hba->ucd_rsp_ptr) >>
872                                 UPIU_RSP_CODE_OFFSET;
873         return query_res->response;
874 }
875
876 /**
877  * ufshcd_copy_query_response() - Copy the Query Response and the data
878  * descriptor
879  */
880 static int ufshcd_copy_query_response(struct ufs_hba *hba)
881 {
882         struct ufs_query_res *query_res = &hba->dev_cmd.query.response;
883
884         memcpy(&query_res->upiu_res, &hba->ucd_rsp_ptr->qr, QUERY_OSF_SIZE);
885
886         /* Get the descriptor */
887         if (hba->dev_cmd.query.descriptor &&
888             hba->ucd_rsp_ptr->qr.opcode == UPIU_QUERY_OPCODE_READ_DESC) {
889                 u8 *descp = (u8 *)hba->ucd_rsp_ptr +
890                                 GENERAL_UPIU_REQUEST_SIZE;
891                 u16 resp_len;
892                 u16 buf_len;
893
894                 /* data segment length */
895                 resp_len = be32_to_cpu(hba->ucd_rsp_ptr->header.dword_2) &
896                                                 MASK_QUERY_DATA_SEG_LEN;
897                 buf_len =
898                         be16_to_cpu(hba->dev_cmd.query.request.upiu_req.length);
899                 if (likely(buf_len >= resp_len)) {
900                         memcpy(hba->dev_cmd.query.descriptor, descp, resp_len);
901                 } else {
902                         dev_warn(hba->dev,
903                                  "%s: Response size is bigger than buffer",
904                                  __func__);
905                         return -EINVAL;
906                 }
907         }
908
909         return 0;
910 }
911
912 /**
913  * ufshcd_exec_dev_cmd - API for sending device management requests
914  */
915 static int ufshcd_exec_dev_cmd(struct ufs_hba *hba, enum dev_cmd_type cmd_type,
916                                int timeout)
917 {
918         int err;
919         int resp;
920
921         err = ufshcd_comp_devman_upiu(hba, cmd_type);
922         if (err)
923                 return err;
924
925         err = ufshcd_send_command(hba, TASK_TAG);
926         if (err)
927                 return err;
928
929         err = ufshcd_get_tr_ocs(hba);
930         if (err) {
931                 dev_err(hba->dev, "Error in OCS:%d\n", err);
932                 return -EINVAL;
933         }
934
935         resp = ufshcd_get_req_rsp(hba->ucd_rsp_ptr);
936         switch (resp) {
937         case UPIU_TRANSACTION_NOP_IN:
938                 break;
939         case UPIU_TRANSACTION_QUERY_RSP:
940                 err = ufshcd_check_query_response(hba);
941                 if (!err)
942                         err = ufshcd_copy_query_response(hba);
943                 break;
944         case UPIU_TRANSACTION_REJECT_UPIU:
945                 /* TODO: handle Reject UPIU Response */
946                 err = -EPERM;
947                 dev_err(hba->dev, "%s: Reject UPIU not fully implemented\n",
948                         __func__);
949                 break;
950         default:
951                 err = -EINVAL;
952                 dev_err(hba->dev, "%s: Invalid device management cmd response: %x\n",
953                         __func__, resp);
954         }
955
956         return err;
957 }
958
959 /**
960  * ufshcd_init_query() - init the query response and request parameters
961  */
962 static inline void ufshcd_init_query(struct ufs_hba *hba,
963                                      struct ufs_query_req **request,
964                                      struct ufs_query_res **response,
965                                      enum query_opcode opcode,
966                                      u8 idn, u8 index, u8 selector)
967 {
968         *request = &hba->dev_cmd.query.request;
969         *response = &hba->dev_cmd.query.response;
970         memset(*request, 0, sizeof(struct ufs_query_req));
971         memset(*response, 0, sizeof(struct ufs_query_res));
972         (*request)->upiu_req.opcode = opcode;
973         (*request)->upiu_req.idn = idn;
974         (*request)->upiu_req.index = index;
975         (*request)->upiu_req.selector = selector;
976 }
977
978 /**
979  * ufshcd_query_flag() - API function for sending flag query requests
980  */
981 int ufshcd_query_flag(struct ufs_hba *hba, enum query_opcode opcode,
982                       enum flag_idn idn, bool *flag_res)
983 {
984         struct ufs_query_req *request = NULL;
985         struct ufs_query_res *response = NULL;
986         int err, index = 0, selector = 0;
987         int timeout = QUERY_REQ_TIMEOUT;
988
989         ufshcd_init_query(hba, &request, &response, opcode, idn, index,
990                           selector);
991
992         switch (opcode) {
993         case UPIU_QUERY_OPCODE_SET_FLAG:
994         case UPIU_QUERY_OPCODE_CLEAR_FLAG:
995         case UPIU_QUERY_OPCODE_TOGGLE_FLAG:
996                 request->query_func = UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST;
997                 break;
998         case UPIU_QUERY_OPCODE_READ_FLAG:
999                 request->query_func = UPIU_QUERY_FUNC_STANDARD_READ_REQUEST;
1000                 if (!flag_res) {
1001                         /* No dummy reads */
1002                         dev_err(hba->dev, "%s: Invalid argument for read request\n",
1003                                 __func__);
1004                         err = -EINVAL;
1005                         goto out;
1006                 }
1007                 break;
1008         default:
1009                 dev_err(hba->dev,
1010                         "%s: Expected query flag opcode but got = %d\n",
1011                         __func__, opcode);
1012                 err = -EINVAL;
1013                 goto out;
1014         }
1015
1016         err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, timeout);
1017
1018         if (err) {
1019                 dev_err(hba->dev,
1020                         "%s: Sending flag query for idn %d failed, err = %d\n",
1021                         __func__, idn, err);
1022                 goto out;
1023         }
1024
1025         if (flag_res)
1026                 *flag_res = (be32_to_cpu(response->upiu_res.value) &
1027                                 MASK_QUERY_UPIU_FLAG_LOC) & 0x1;
1028
1029 out:
1030         return err;
1031 }
1032
1033 static int ufshcd_query_flag_retry(struct ufs_hba *hba,
1034                                    enum query_opcode opcode,
1035                                    enum flag_idn idn, bool *flag_res)
1036 {
1037         int ret;
1038         int retries;
1039
1040         for (retries = 0; retries < QUERY_REQ_RETRIES; retries++) {
1041                 ret = ufshcd_query_flag(hba, opcode, idn, flag_res);
1042                 if (ret)
1043                         dev_dbg(hba->dev,
1044                                 "%s: failed with error %d, retries %d\n",
1045                                 __func__, ret, retries);
1046                 else
1047                         break;
1048         }
1049
1050         if (ret)
1051                 dev_err(hba->dev,
1052                         "%s: query attribute, opcode %d, idn %d, failed with error %d after %d retires\n",
1053                         __func__, opcode, idn, ret, retries);
1054         return ret;
1055 }
1056
1057 static int __ufshcd_query_descriptor(struct ufs_hba *hba,
1058                                      enum query_opcode opcode,
1059                                      enum desc_idn idn, u8 index, u8 selector,
1060                                      u8 *desc_buf, int *buf_len)
1061 {
1062         struct ufs_query_req *request = NULL;
1063         struct ufs_query_res *response = NULL;
1064         int err;
1065
1066         if (!desc_buf) {
1067                 dev_err(hba->dev, "%s: descriptor buffer required for opcode 0x%x\n",
1068                         __func__, opcode);
1069                 err = -EINVAL;
1070                 goto out;
1071         }
1072
1073         if (*buf_len < QUERY_DESC_MIN_SIZE || *buf_len > QUERY_DESC_MAX_SIZE) {
1074                 dev_err(hba->dev, "%s: descriptor buffer size (%d) is out of range\n",
1075                         __func__, *buf_len);
1076                 err = -EINVAL;
1077                 goto out;
1078         }
1079
1080         ufshcd_init_query(hba, &request, &response, opcode, idn, index,
1081                           selector);
1082         hba->dev_cmd.query.descriptor = desc_buf;
1083         request->upiu_req.length = cpu_to_be16(*buf_len);
1084
1085         switch (opcode) {
1086         case UPIU_QUERY_OPCODE_WRITE_DESC:
1087                 request->query_func = UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST;
1088                 break;
1089         case UPIU_QUERY_OPCODE_READ_DESC:
1090                 request->query_func = UPIU_QUERY_FUNC_STANDARD_READ_REQUEST;
1091                 break;
1092         default:
1093                 dev_err(hba->dev, "%s: Expected query descriptor opcode but got = 0x%.2x\n",
1094                         __func__, opcode);
1095                 err = -EINVAL;
1096                 goto out;
1097         }
1098
1099         err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, QUERY_REQ_TIMEOUT);
1100
1101         if (err) {
1102                 dev_err(hba->dev, "%s: opcode 0x%.2x for idn %d failed, index %d, err = %d\n",
1103                         __func__, opcode, idn, index, err);
1104                 goto out;
1105         }
1106
1107         hba->dev_cmd.query.descriptor = NULL;
1108         *buf_len = be16_to_cpu(response->upiu_res.length);
1109
1110 out:
1111         return err;
1112 }
1113
1114 /**
1115  * ufshcd_query_descriptor_retry - API function for sending descriptor requests
1116  */
1117 int ufshcd_query_descriptor_retry(struct ufs_hba *hba, enum query_opcode opcode,
1118                                   enum desc_idn idn, u8 index, u8 selector,
1119                                   u8 *desc_buf, int *buf_len)
1120 {
1121         int err;
1122         int retries;
1123
1124         for (retries = QUERY_REQ_RETRIES; retries > 0; retries--) {
1125                 err = __ufshcd_query_descriptor(hba, opcode, idn, index,
1126                                                 selector, desc_buf, buf_len);
1127                 if (!err || err == -EINVAL)
1128                         break;
1129         }
1130
1131         return err;
1132 }
1133
1134 /**
1135  * ufshcd_read_desc_length - read the specified descriptor length from header
1136  */
1137 static int ufshcd_read_desc_length(struct ufs_hba *hba, enum desc_idn desc_id,
1138                                    int desc_index, int *desc_length)
1139 {
1140         int ret;
1141         u8 header[QUERY_DESC_HDR_SIZE];
1142         int header_len = QUERY_DESC_HDR_SIZE;
1143
1144         if (desc_id >= QUERY_DESC_IDN_MAX)
1145                 return -EINVAL;
1146
1147         ret = ufshcd_query_descriptor_retry(hba, UPIU_QUERY_OPCODE_READ_DESC,
1148                                             desc_id, desc_index, 0, header,
1149                                             &header_len);
1150
1151         if (ret) {
1152                 dev_err(hba->dev, "%s: Failed to get descriptor header id %d",
1153                         __func__, desc_id);
1154                 return ret;
1155         } else if (desc_id != header[QUERY_DESC_DESC_TYPE_OFFSET]) {
1156                 dev_warn(hba->dev, "%s: descriptor header id %d and desc_id %d mismatch",
1157                          __func__, header[QUERY_DESC_DESC_TYPE_OFFSET],
1158                          desc_id);
1159                 ret = -EINVAL;
1160         }
1161
1162         *desc_length = header[QUERY_DESC_LENGTH_OFFSET];
1163
1164         return ret;
1165 }
1166
1167 static void ufshcd_init_desc_sizes(struct ufs_hba *hba)
1168 {
1169         int err;
1170
1171         err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_DEVICE, 0,
1172                                       &hba->desc_size.dev_desc);
1173         if (err)
1174                 hba->desc_size.dev_desc = QUERY_DESC_DEVICE_DEF_SIZE;
1175
1176         err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_POWER, 0,
1177                                       &hba->desc_size.pwr_desc);
1178         if (err)
1179                 hba->desc_size.pwr_desc = QUERY_DESC_POWER_DEF_SIZE;
1180
1181         err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_INTERCONNECT, 0,
1182                                       &hba->desc_size.interc_desc);
1183         if (err)
1184                 hba->desc_size.interc_desc = QUERY_DESC_INTERCONNECT_DEF_SIZE;
1185
1186         err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_CONFIGURATION, 0,
1187                                       &hba->desc_size.conf_desc);
1188         if (err)
1189                 hba->desc_size.conf_desc = QUERY_DESC_CONFIGURATION_DEF_SIZE;
1190
1191         err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_UNIT, 0,
1192                                       &hba->desc_size.unit_desc);
1193         if (err)
1194                 hba->desc_size.unit_desc = QUERY_DESC_UNIT_DEF_SIZE;
1195
1196         err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_GEOMETRY, 0,
1197                                       &hba->desc_size.geom_desc);
1198         if (err)
1199                 hba->desc_size.geom_desc = QUERY_DESC_GEOMETRY_DEF_SIZE;
1200
1201         err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_HEALTH, 0,
1202                                       &hba->desc_size.hlth_desc);
1203         if (err)
1204                 hba->desc_size.hlth_desc = QUERY_DESC_HEALTH_DEF_SIZE;
1205 }
1206
1207 /**
1208  * ufshcd_map_desc_id_to_length - map descriptor IDN to its length
1209  *
1210  */
1211 int ufshcd_map_desc_id_to_length(struct ufs_hba *hba, enum desc_idn desc_id,
1212                                  int *desc_len)
1213 {
1214         switch (desc_id) {
1215         case QUERY_DESC_IDN_DEVICE:
1216                 *desc_len = hba->desc_size.dev_desc;
1217                 break;
1218         case QUERY_DESC_IDN_POWER:
1219                 *desc_len = hba->desc_size.pwr_desc;
1220                 break;
1221         case QUERY_DESC_IDN_GEOMETRY:
1222                 *desc_len = hba->desc_size.geom_desc;
1223                 break;
1224         case QUERY_DESC_IDN_CONFIGURATION:
1225                 *desc_len = hba->desc_size.conf_desc;
1226                 break;
1227         case QUERY_DESC_IDN_UNIT:
1228                 *desc_len = hba->desc_size.unit_desc;
1229                 break;
1230         case QUERY_DESC_IDN_INTERCONNECT:
1231                 *desc_len = hba->desc_size.interc_desc;
1232                 break;
1233         case QUERY_DESC_IDN_STRING:
1234                 *desc_len = QUERY_DESC_MAX_SIZE;
1235                 break;
1236         case QUERY_DESC_IDN_HEALTH:
1237                 *desc_len = hba->desc_size.hlth_desc;
1238                 break;
1239         case QUERY_DESC_IDN_RFU_0:
1240         case QUERY_DESC_IDN_RFU_1:
1241                 *desc_len = 0;
1242                 break;
1243         default:
1244                 *desc_len = 0;
1245                 return -EINVAL;
1246         }
1247         return 0;
1248 }
1249 EXPORT_SYMBOL(ufshcd_map_desc_id_to_length);
1250
1251 /**
1252  * ufshcd_read_desc_param - read the specified descriptor parameter
1253  *
1254  */
1255 int ufshcd_read_desc_param(struct ufs_hba *hba, enum desc_idn desc_id,
1256                            int desc_index, u8 param_offset, u8 *param_read_buf,
1257                            u8 param_size)
1258 {
1259         int ret;
1260         u8 *desc_buf;
1261         int buff_len;
1262         bool is_kmalloc = true;
1263
1264         /* Safety check */
1265         if (desc_id >= QUERY_DESC_IDN_MAX || !param_size)
1266                 return -EINVAL;
1267
1268         /* Get the max length of descriptor from structure filled up at probe
1269          * time.
1270          */
1271         ret = ufshcd_map_desc_id_to_length(hba, desc_id, &buff_len);
1272
1273         /* Sanity checks */
1274         if (ret || !buff_len) {
1275                 dev_err(hba->dev, "%s: Failed to get full descriptor length",
1276                         __func__);
1277                 return ret;
1278         }
1279
1280         /* Check whether we need temp memory */
1281         if (param_offset != 0 || param_size < buff_len) {
1282                 desc_buf = kmalloc(buff_len, GFP_KERNEL);
1283                 if (!desc_buf)
1284                         return -ENOMEM;
1285         } else {
1286                 desc_buf = param_read_buf;
1287                 is_kmalloc = false;
1288         }
1289
1290         /* Request for full descriptor */
1291         ret = ufshcd_query_descriptor_retry(hba, UPIU_QUERY_OPCODE_READ_DESC,
1292                                             desc_id, desc_index, 0, desc_buf,
1293                                             &buff_len);
1294
1295         if (ret) {
1296                 dev_err(hba->dev, "%s: Failed reading descriptor. desc_id %d, desc_index %d, param_offset %d, ret %d",
1297                         __func__, desc_id, desc_index, param_offset, ret);
1298                 goto out;
1299         }
1300
1301         /* Sanity check */
1302         if (desc_buf[QUERY_DESC_DESC_TYPE_OFFSET] != desc_id) {
1303                 dev_err(hba->dev, "%s: invalid desc_id %d in descriptor header",
1304                         __func__, desc_buf[QUERY_DESC_DESC_TYPE_OFFSET]);
1305                 ret = -EINVAL;
1306                 goto out;
1307         }
1308
1309         /* Check wherher we will not copy more data, than available */
1310         if (is_kmalloc && param_size > buff_len)
1311                 param_size = buff_len;
1312
1313         if (is_kmalloc)
1314                 memcpy(param_read_buf, &desc_buf[param_offset], param_size);
1315 out:
1316         if (is_kmalloc)
1317                 kfree(desc_buf);
1318         return ret;
1319 }
1320
1321 /* replace non-printable or non-ASCII characters with spaces */
1322 static inline void ufshcd_remove_non_printable(uint8_t *val)
1323 {
1324         if (!val)
1325                 return;
1326
1327         if (*val < 0x20 || *val > 0x7e)
1328                 *val = ' ';
1329 }
1330
1331 /**
1332  * ufshcd_uic_pwr_ctrl - executes UIC commands (which affects the link power
1333  * state) and waits for it to take effect.
1334  *
1335  */
1336 static int ufshcd_uic_pwr_ctrl(struct ufs_hba *hba, struct uic_command *cmd)
1337 {
1338         unsigned long start = 0;
1339         u8 status;
1340         int ret;
1341
1342         ret = ufshcd_send_uic_cmd(hba, cmd);
1343         if (ret) {
1344                 dev_err(hba->dev,
1345                         "pwr ctrl cmd 0x%x with mode 0x%x uic error %d\n",
1346                         cmd->command, cmd->argument3, ret);
1347
1348                 return ret;
1349         }
1350
1351         start = get_timer(0);
1352         do {
1353                 status = ufshcd_get_upmcrs(hba);
1354                 if (get_timer(start) > UFS_UIC_CMD_TIMEOUT) {
1355                         dev_err(hba->dev,
1356                                 "pwr ctrl cmd 0x%x failed, host upmcrs:0x%x\n",
1357                                 cmd->command, status);
1358                         ret = (status != PWR_OK) ? status : -1;
1359                         break;
1360                 }
1361         } while (status != PWR_LOCAL);
1362
1363         return ret;
1364 }
1365
1366 /**
1367  * ufshcd_uic_change_pwr_mode - Perform the UIC power mode change
1368  *                              using DME_SET primitives.
1369  */
1370 static int ufshcd_uic_change_pwr_mode(struct ufs_hba *hba, u8 mode)
1371 {
1372         struct uic_command uic_cmd = {0};
1373         int ret;
1374
1375         uic_cmd.command = UIC_CMD_DME_SET;
1376         uic_cmd.argument1 = UIC_ARG_MIB(PA_PWRMODE);
1377         uic_cmd.argument3 = mode;
1378         ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd);
1379
1380         return ret;
1381 }
1382
1383 static
1384 void ufshcd_prepare_utp_scsi_cmd_upiu(struct ufs_hba *hba,
1385                                       struct scsi_cmd *pccb, u32 upiu_flags)
1386 {
1387         struct utp_upiu_req *ucd_req_ptr = hba->ucd_req_ptr;
1388         unsigned int cdb_len;
1389
1390         /* command descriptor fields */
1391         ucd_req_ptr->header.dword_0 =
1392                         UPIU_HEADER_DWORD(UPIU_TRANSACTION_COMMAND, upiu_flags,
1393                                           pccb->lun, TASK_TAG);
1394         ucd_req_ptr->header.dword_1 =
1395                         UPIU_HEADER_DWORD(UPIU_COMMAND_SET_TYPE_SCSI, 0, 0, 0);
1396
1397         /* Total EHS length and Data segment length will be zero */
1398         ucd_req_ptr->header.dword_2 = 0;
1399
1400         ucd_req_ptr->sc.exp_data_transfer_len = cpu_to_be32(pccb->datalen);
1401
1402         cdb_len = min_t(unsigned short, pccb->cmdlen, UFS_CDB_SIZE);
1403         memset(ucd_req_ptr->sc.cdb, 0, UFS_CDB_SIZE);
1404         memcpy(ucd_req_ptr->sc.cdb, pccb->cmd, cdb_len);
1405
1406         memset(hba->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp));
1407 }
1408
1409 static inline void prepare_prdt_desc(struct ufshcd_sg_entry *entry,
1410                                      unsigned char *buf, ulong len)
1411 {
1412         entry->size = cpu_to_le32(len) | GENMASK(1, 0);
1413         entry->base_addr = cpu_to_le32(lower_32_bits((unsigned long)buf));
1414         entry->upper_addr = cpu_to_le32(upper_32_bits((unsigned long)buf));
1415 }
1416
1417 static void prepare_prdt_table(struct ufs_hba *hba, struct scsi_cmd *pccb)
1418 {
1419         struct utp_transfer_req_desc *req_desc = hba->utrdl;
1420         struct ufshcd_sg_entry *prd_table = hba->ucd_prdt_ptr;
1421         ulong datalen = pccb->datalen;
1422         int table_length;
1423         u8 *buf;
1424         int i;
1425
1426         if (!datalen) {
1427                 req_desc->prd_table_length = 0;
1428                 return;
1429         }
1430
1431         table_length = DIV_ROUND_UP(pccb->datalen, MAX_PRDT_ENTRY);
1432         buf = pccb->pdata;
1433         i = table_length;
1434         while (--i) {
1435                 prepare_prdt_desc(&prd_table[table_length - i - 1], buf,
1436                                   MAX_PRDT_ENTRY - 1);
1437                 buf += MAX_PRDT_ENTRY;
1438                 datalen -= MAX_PRDT_ENTRY;
1439         }
1440
1441         prepare_prdt_desc(&prd_table[table_length - i - 1], buf, datalen - 1);
1442
1443         req_desc->prd_table_length = table_length;
1444 }
1445
1446 static int ufs_scsi_exec(struct udevice *scsi_dev, struct scsi_cmd *pccb)
1447 {
1448         struct ufs_hba *hba = dev_get_uclass_priv(scsi_dev->parent);
1449         struct utp_transfer_req_desc *req_desc = hba->utrdl;
1450         u32 upiu_flags;
1451         int ocs, result = 0;
1452         u8 scsi_status;
1453
1454         ufshcd_prepare_req_desc_hdr(req_desc, &upiu_flags, pccb->dma_dir);
1455         ufshcd_prepare_utp_scsi_cmd_upiu(hba, pccb, upiu_flags);
1456         prepare_prdt_table(hba, pccb);
1457
1458         ufshcd_send_command(hba, TASK_TAG);
1459
1460         ocs = ufshcd_get_tr_ocs(hba);
1461         switch (ocs) {
1462         case OCS_SUCCESS:
1463                 result = ufshcd_get_req_rsp(hba->ucd_rsp_ptr);
1464                 switch (result) {
1465                 case UPIU_TRANSACTION_RESPONSE:
1466                         result = ufshcd_get_rsp_upiu_result(hba->ucd_rsp_ptr);
1467
1468                         scsi_status = result & MASK_SCSI_STATUS;
1469                         if (scsi_status)
1470                                 return -EINVAL;
1471
1472                         break;
1473                 case UPIU_TRANSACTION_REJECT_UPIU:
1474                         /* TODO: handle Reject UPIU Response */
1475                         dev_err(hba->dev,
1476                                 "Reject UPIU not fully implemented\n");
1477                         return -EINVAL;
1478                 default:
1479                         dev_err(hba->dev,
1480                                 "Unexpected request response code = %x\n",
1481                                 result);
1482                         return -EINVAL;
1483                 }
1484                 break;
1485         default:
1486                 dev_err(hba->dev, "OCS error from controller = %x\n", ocs);
1487                 return -EINVAL;
1488         }
1489
1490         return 0;
1491 }
1492
1493 static inline int ufshcd_read_desc(struct ufs_hba *hba, enum desc_idn desc_id,
1494                                    int desc_index, u8 *buf, u32 size)
1495 {
1496         return ufshcd_read_desc_param(hba, desc_id, desc_index, 0, buf, size);
1497 }
1498
1499 static int ufshcd_read_device_desc(struct ufs_hba *hba, u8 *buf, u32 size)
1500 {
1501         return ufshcd_read_desc(hba, QUERY_DESC_IDN_DEVICE, 0, buf, size);
1502 }
1503
1504 /**
1505  * ufshcd_read_string_desc - read string descriptor
1506  *
1507  */
1508 int ufshcd_read_string_desc(struct ufs_hba *hba, int desc_index,
1509                             u8 *buf, u32 size, bool ascii)
1510 {
1511         int err = 0;
1512
1513         err = ufshcd_read_desc(hba, QUERY_DESC_IDN_STRING, desc_index, buf,
1514                                size);
1515
1516         if (err) {
1517                 dev_err(hba->dev, "%s: reading String Desc failed after %d retries. err = %d\n",
1518                         __func__, QUERY_REQ_RETRIES, err);
1519                 goto out;
1520         }
1521
1522         if (ascii) {
1523                 int desc_len;
1524                 int ascii_len;
1525                 int i;
1526                 u8 *buff_ascii;
1527
1528                 desc_len = buf[0];
1529                 /* remove header and divide by 2 to move from UTF16 to UTF8 */
1530                 ascii_len = (desc_len - QUERY_DESC_HDR_SIZE) / 2 + 1;
1531                 if (size < ascii_len + QUERY_DESC_HDR_SIZE) {
1532                         dev_err(hba->dev, "%s: buffer allocated size is too small\n",
1533                                 __func__);
1534                         err = -ENOMEM;
1535                         goto out;
1536                 }
1537
1538                 buff_ascii = kmalloc(ascii_len, GFP_KERNEL);
1539                 if (!buff_ascii) {
1540                         err = -ENOMEM;
1541                         goto out;
1542                 }
1543
1544                 /*
1545                  * the descriptor contains string in UTF16 format
1546                  * we need to convert to utf-8 so it can be displayed
1547                  */
1548                 utf16_to_utf8(buff_ascii,
1549                               (uint16_t *)&buf[QUERY_DESC_HDR_SIZE], ascii_len);
1550
1551                 /* replace non-printable or non-ASCII characters with spaces */
1552                 for (i = 0; i < ascii_len; i++)
1553                         ufshcd_remove_non_printable(&buff_ascii[i]);
1554
1555                 memset(buf + QUERY_DESC_HDR_SIZE, 0,
1556                        size - QUERY_DESC_HDR_SIZE);
1557                 memcpy(buf + QUERY_DESC_HDR_SIZE, buff_ascii, ascii_len);
1558                 buf[QUERY_DESC_LENGTH_OFFSET] = ascii_len + QUERY_DESC_HDR_SIZE;
1559                 kfree(buff_ascii);
1560         }
1561 out:
1562         return err;
1563 }
1564
1565 static int ufs_get_device_desc(struct ufs_hba *hba,
1566                                struct ufs_dev_desc *dev_desc)
1567 {
1568         int err;
1569         size_t buff_len;
1570         u8 model_index;
1571         u8 *desc_buf;
1572
1573         buff_len = max_t(size_t, hba->desc_size.dev_desc,
1574                          QUERY_DESC_MAX_SIZE + 1);
1575         desc_buf = kmalloc(buff_len, GFP_KERNEL);
1576         if (!desc_buf) {
1577                 err = -ENOMEM;
1578                 goto out;
1579         }
1580
1581         err = ufshcd_read_device_desc(hba, desc_buf, hba->desc_size.dev_desc);
1582         if (err) {
1583                 dev_err(hba->dev, "%s: Failed reading Device Desc. err = %d\n",
1584                         __func__, err);
1585                 goto out;
1586         }
1587
1588         /*
1589          * getting vendor (manufacturerID) and Bank Index in big endian
1590          * format
1591          */
1592         dev_desc->wmanufacturerid = desc_buf[DEVICE_DESC_PARAM_MANF_ID] << 8 |
1593                                      desc_buf[DEVICE_DESC_PARAM_MANF_ID + 1];
1594
1595         model_index = desc_buf[DEVICE_DESC_PARAM_PRDCT_NAME];
1596
1597         /* Zero-pad entire buffer for string termination. */
1598         memset(desc_buf, 0, buff_len);
1599
1600         err = ufshcd_read_string_desc(hba, model_index, desc_buf,
1601                                       QUERY_DESC_MAX_SIZE, true/*ASCII*/);
1602         if (err) {
1603                 dev_err(hba->dev, "%s: Failed reading Product Name. err = %d\n",
1604                         __func__, err);
1605                 goto out;
1606         }
1607
1608         desc_buf[QUERY_DESC_MAX_SIZE] = '\0';
1609         strlcpy(dev_desc->model, (char *)(desc_buf + QUERY_DESC_HDR_SIZE),
1610                 min_t(u8, desc_buf[QUERY_DESC_LENGTH_OFFSET],
1611                       MAX_MODEL_LEN));
1612
1613         /* Null terminate the model string */
1614         dev_desc->model[MAX_MODEL_LEN] = '\0';
1615
1616 out:
1617         kfree(desc_buf);
1618         return err;
1619 }
1620
1621 /**
1622  * ufshcd_get_max_pwr_mode - reads the max power mode negotiated with device
1623  */
1624 static int ufshcd_get_max_pwr_mode(struct ufs_hba *hba)
1625 {
1626         struct ufs_pa_layer_attr *pwr_info = &hba->max_pwr_info.info;
1627
1628         if (hba->max_pwr_info.is_valid)
1629                 return 0;
1630
1631         pwr_info->pwr_tx = FAST_MODE;
1632         pwr_info->pwr_rx = FAST_MODE;
1633         pwr_info->hs_rate = PA_HS_MODE_B;
1634
1635         /* Get the connected lane count */
1636         ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDRXDATALANES),
1637                        &pwr_info->lane_rx);
1638         ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES),
1639                        &pwr_info->lane_tx);
1640
1641         if (!pwr_info->lane_rx || !pwr_info->lane_tx) {
1642                 dev_err(hba->dev, "%s: invalid connected lanes value. rx=%d, tx=%d\n",
1643                         __func__, pwr_info->lane_rx, pwr_info->lane_tx);
1644                 return -EINVAL;
1645         }
1646
1647         /*
1648          * First, get the maximum gears of HS speed.
1649          * If a zero value, it means there is no HSGEAR capability.
1650          * Then, get the maximum gears of PWM speed.
1651          */
1652         ufshcd_dme_get(hba, UIC_ARG_MIB(PA_MAXRXHSGEAR), &pwr_info->gear_rx);
1653         if (!pwr_info->gear_rx) {
1654                 ufshcd_dme_get(hba, UIC_ARG_MIB(PA_MAXRXPWMGEAR),
1655                                &pwr_info->gear_rx);
1656                 if (!pwr_info->gear_rx) {
1657                         dev_err(hba->dev, "%s: invalid max pwm rx gear read = %d\n",
1658                                 __func__, pwr_info->gear_rx);
1659                         return -EINVAL;
1660                 }
1661                 pwr_info->pwr_rx = SLOW_MODE;
1662         }
1663
1664         ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_MAXRXHSGEAR),
1665                             &pwr_info->gear_tx);
1666         if (!pwr_info->gear_tx) {
1667                 ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_MAXRXPWMGEAR),
1668                                     &pwr_info->gear_tx);
1669                 if (!pwr_info->gear_tx) {
1670                         dev_err(hba->dev, "%s: invalid max pwm tx gear read = %d\n",
1671                                 __func__, pwr_info->gear_tx);
1672                         return -EINVAL;
1673                 }
1674                 pwr_info->pwr_tx = SLOW_MODE;
1675         }
1676
1677         hba->max_pwr_info.is_valid = true;
1678         return 0;
1679 }
1680
1681 static int ufshcd_change_power_mode(struct ufs_hba *hba,
1682                                     struct ufs_pa_layer_attr *pwr_mode)
1683 {
1684         int ret;
1685
1686         /* if already configured to the requested pwr_mode */
1687         if (pwr_mode->gear_rx == hba->pwr_info.gear_rx &&
1688             pwr_mode->gear_tx == hba->pwr_info.gear_tx &&
1689             pwr_mode->lane_rx == hba->pwr_info.lane_rx &&
1690             pwr_mode->lane_tx == hba->pwr_info.lane_tx &&
1691             pwr_mode->pwr_rx == hba->pwr_info.pwr_rx &&
1692             pwr_mode->pwr_tx == hba->pwr_info.pwr_tx &&
1693             pwr_mode->hs_rate == hba->pwr_info.hs_rate) {
1694                 dev_dbg(hba->dev, "%s: power already configured\n", __func__);
1695                 return 0;
1696         }
1697
1698         /*
1699          * Configure attributes for power mode change with below.
1700          * - PA_RXGEAR, PA_ACTIVERXDATALANES, PA_RXTERMINATION,
1701          * - PA_TXGEAR, PA_ACTIVETXDATALANES, PA_TXTERMINATION,
1702          * - PA_HSSERIES
1703          */
1704         ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXGEAR), pwr_mode->gear_rx);
1705         ufshcd_dme_set(hba, UIC_ARG_MIB(PA_ACTIVERXDATALANES),
1706                        pwr_mode->lane_rx);
1707         if (pwr_mode->pwr_rx == FASTAUTO_MODE || pwr_mode->pwr_rx == FAST_MODE)
1708                 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXTERMINATION), TRUE);
1709         else
1710                 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXTERMINATION), FALSE);
1711
1712         ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXGEAR), pwr_mode->gear_tx);
1713         ufshcd_dme_set(hba, UIC_ARG_MIB(PA_ACTIVETXDATALANES),
1714                        pwr_mode->lane_tx);
1715         if (pwr_mode->pwr_tx == FASTAUTO_MODE || pwr_mode->pwr_tx == FAST_MODE)
1716                 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXTERMINATION), TRUE);
1717         else
1718                 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXTERMINATION), FALSE);
1719
1720         if (pwr_mode->pwr_rx == FASTAUTO_MODE ||
1721             pwr_mode->pwr_tx == FASTAUTO_MODE ||
1722             pwr_mode->pwr_rx == FAST_MODE ||
1723             pwr_mode->pwr_tx == FAST_MODE)
1724                 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_HSSERIES),
1725                                pwr_mode->hs_rate);
1726
1727         ret = ufshcd_uic_change_pwr_mode(hba, pwr_mode->pwr_rx << 4 |
1728                                          pwr_mode->pwr_tx);
1729
1730         if (ret) {
1731                 dev_err(hba->dev,
1732                         "%s: power mode change failed %d\n", __func__, ret);
1733
1734                 return ret;
1735         }
1736
1737         /* Copy new Power Mode to power info */
1738         memcpy(&hba->pwr_info, pwr_mode, sizeof(struct ufs_pa_layer_attr));
1739
1740         return ret;
1741 }
1742
1743 /**
1744  * ufshcd_verify_dev_init() - Verify device initialization
1745  *
1746  */
1747 static int ufshcd_verify_dev_init(struct ufs_hba *hba)
1748 {
1749         int retries;
1750         int err;
1751
1752         for (retries = NOP_OUT_RETRIES; retries > 0; retries--) {
1753                 err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_NOP,
1754                                           NOP_OUT_TIMEOUT);
1755                 if (!err || err == -ETIMEDOUT)
1756                         break;
1757
1758                 dev_dbg(hba->dev, "%s: error %d retrying\n", __func__, err);
1759         }
1760
1761         if (err)
1762                 dev_err(hba->dev, "%s: NOP OUT failed %d\n", __func__, err);
1763
1764         return err;
1765 }
1766
1767 /**
1768  * ufshcd_complete_dev_init() - checks device readiness
1769  */
1770 static int ufshcd_complete_dev_init(struct ufs_hba *hba)
1771 {
1772         int i;
1773         int err;
1774         bool flag_res = 1;
1775
1776         err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_SET_FLAG,
1777                                       QUERY_FLAG_IDN_FDEVICEINIT, NULL);
1778         if (err) {
1779                 dev_err(hba->dev,
1780                         "%s setting fDeviceInit flag failed with error %d\n",
1781                         __func__, err);
1782                 goto out;
1783         }
1784
1785         /* poll for max. 1000 iterations for fDeviceInit flag to clear */
1786         for (i = 0; i < 1000 && !err && flag_res; i++)
1787                 err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_READ_FLAG,
1788                                               QUERY_FLAG_IDN_FDEVICEINIT,
1789                                               &flag_res);
1790
1791         if (err)
1792                 dev_err(hba->dev,
1793                         "%s reading fDeviceInit flag failed with error %d\n",
1794                         __func__, err);
1795         else if (flag_res)
1796                 dev_err(hba->dev,
1797                         "%s fDeviceInit was not cleared by the device\n",
1798                         __func__);
1799
1800 out:
1801         return err;
1802 }
1803
1804 static void ufshcd_def_desc_sizes(struct ufs_hba *hba)
1805 {
1806         hba->desc_size.dev_desc = QUERY_DESC_DEVICE_DEF_SIZE;
1807         hba->desc_size.pwr_desc = QUERY_DESC_POWER_DEF_SIZE;
1808         hba->desc_size.interc_desc = QUERY_DESC_INTERCONNECT_DEF_SIZE;
1809         hba->desc_size.conf_desc = QUERY_DESC_CONFIGURATION_DEF_SIZE;
1810         hba->desc_size.unit_desc = QUERY_DESC_UNIT_DEF_SIZE;
1811         hba->desc_size.geom_desc = QUERY_DESC_GEOMETRY_DEF_SIZE;
1812         hba->desc_size.hlth_desc = QUERY_DESC_HEALTH_DEF_SIZE;
1813 }
1814
1815 int ufs_start(struct ufs_hba *hba)
1816 {
1817         struct ufs_dev_desc card = {0};
1818         int ret;
1819
1820         ret = ufshcd_link_startup(hba);
1821         if (ret)
1822                 return ret;
1823
1824         ret = ufshcd_verify_dev_init(hba);
1825         if (ret)
1826                 return ret;
1827
1828         ret = ufshcd_complete_dev_init(hba);
1829         if (ret)
1830                 return ret;
1831
1832         /* Init check for device descriptor sizes */
1833         ufshcd_init_desc_sizes(hba);
1834
1835         ret = ufs_get_device_desc(hba, &card);
1836         if (ret) {
1837                 dev_err(hba->dev, "%s: Failed getting device info. err = %d\n",
1838                         __func__, ret);
1839
1840                 return ret;
1841         }
1842
1843         if (ufshcd_get_max_pwr_mode(hba)) {
1844                 dev_err(hba->dev,
1845                         "%s: Failed getting max supported power mode\n",
1846                         __func__);
1847         } else {
1848                 ret = ufshcd_change_power_mode(hba, &hba->max_pwr_info.info);
1849                 if (ret) {
1850                         dev_err(hba->dev, "%s: Failed setting power mode, err = %d\n",
1851                                 __func__, ret);
1852
1853                         return ret;
1854                 }
1855
1856                 printf("Device at %s up at:", hba->dev->name);
1857                 ufshcd_print_pwr_info(hba);
1858         }
1859
1860         return 0;
1861 }
1862
1863 int ufshcd_probe(struct udevice *ufs_dev, struct ufs_hba_ops *hba_ops)
1864 {
1865         struct ufs_hba *hba = dev_get_uclass_priv(ufs_dev);
1866         struct scsi_platdata *scsi_plat;
1867         struct udevice *scsi_dev;
1868         int err;
1869
1870         device_find_first_child(ufs_dev, &scsi_dev);
1871         if (!scsi_dev)
1872                 return -ENODEV;
1873
1874         scsi_plat = dev_get_uclass_platdata(scsi_dev);
1875         scsi_plat->max_id = UFSHCD_MAX_ID;
1876         scsi_plat->max_lun = UFS_MAX_LUNS;
1877         scsi_plat->max_bytes_per_req = UFS_MAX_BYTES;
1878
1879         hba->dev = ufs_dev;
1880         hba->ops = hba_ops;
1881         hba->mmio_base = (void *)dev_read_addr(ufs_dev);
1882
1883         /* Set descriptor lengths to specification defaults */
1884         ufshcd_def_desc_sizes(hba);
1885
1886         ufshcd_ops_init(hba);
1887
1888         /* Read capabilties registers */
1889         hba->capabilities = ufshcd_readl(hba, REG_CONTROLLER_CAPABILITIES);
1890
1891         /* Get UFS version supported by the controller */
1892         hba->version = ufshcd_get_ufs_version(hba);
1893         if (hba->version != UFSHCI_VERSION_10 &&
1894             hba->version != UFSHCI_VERSION_11 &&
1895             hba->version != UFSHCI_VERSION_20 &&
1896             hba->version != UFSHCI_VERSION_21)
1897                 dev_err(hba->dev, "invalid UFS version 0x%x\n",
1898                         hba->version);
1899
1900         /* Get Interrupt bit mask per version */
1901         hba->intr_mask = ufshcd_get_intr_mask(hba);
1902
1903         /* Allocate memory for host memory space */
1904         err = ufshcd_memory_alloc(hba);
1905         if (err) {
1906                 dev_err(hba->dev, "Memory allocation failed\n");
1907                 return err;
1908         }
1909
1910         /* Configure Local data structures */
1911         ufshcd_host_memory_configure(hba);
1912
1913         /*
1914          * In order to avoid any spurious interrupt immediately after
1915          * registering UFS controller interrupt handler, clear any pending UFS
1916          * interrupt status and disable all the UFS interrupts.
1917          */
1918         ufshcd_writel(hba, ufshcd_readl(hba, REG_INTERRUPT_STATUS),
1919                       REG_INTERRUPT_STATUS);
1920         ufshcd_writel(hba, 0, REG_INTERRUPT_ENABLE);
1921
1922         err = ufshcd_hba_enable(hba);
1923         if (err) {
1924                 dev_err(hba->dev, "Host controller enable failed\n");
1925                 return err;
1926         }
1927
1928         err = ufs_start(hba);
1929         if (err)
1930                 return err;
1931
1932         return 0;
1933 }
1934
1935 int ufs_scsi_bind(struct udevice *ufs_dev, struct udevice **scsi_devp)
1936 {
1937         int ret = device_bind_driver(ufs_dev, "ufs_scsi", "ufs_scsi",
1938                                      scsi_devp);
1939
1940         return ret;
1941 }
1942
1943 static struct scsi_ops ufs_ops = {
1944         .exec           = ufs_scsi_exec,
1945 };
1946
1947 int ufs_probe_dev(int index)
1948 {
1949         struct udevice *dev;
1950
1951         return uclass_get_device(UCLASS_UFS, index, &dev);
1952 }
1953
1954 int ufs_probe(void)
1955 {
1956         struct udevice *dev;
1957         int ret, i;
1958
1959         for (i = 0;; i++) {
1960                 ret = uclass_get_device(UCLASS_UFS, i, &dev);
1961                 if (ret == -ENODEV)
1962                         break;
1963         }
1964
1965         return 0;
1966 }
1967
1968 U_BOOT_DRIVER(ufs_scsi) = {
1969         .id = UCLASS_SCSI,
1970         .name = "ufs_scsi",
1971         .ops = &ufs_ops,
1972 };