Linux-libre 5.7.5-gnu
[librecmc/linux-libre.git] / drivers / mmc / core / mmc.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  *  linux/drivers/mmc/core/mmc.c
4  *
5  *  Copyright (C) 2003-2004 Russell King, All Rights Reserved.
6  *  Copyright (C) 2005-2007 Pierre Ossman, All Rights Reserved.
7  *  MMCv4 support Copyright (C) 2006 Philip Langdale, All Rights Reserved.
8  */
9
10 #include <linux/err.h>
11 #include <linux/of.h>
12 #include <linux/slab.h>
13 #include <linux/stat.h>
14 #include <linux/pm_runtime.h>
15
16 #include <linux/mmc/host.h>
17 #include <linux/mmc/card.h>
18 #include <linux/mmc/mmc.h>
19
20 #include "core.h"
21 #include "card.h"
22 #include "host.h"
23 #include "bus.h"
24 #include "mmc_ops.h"
25 #include "quirks.h"
26 #include "sd_ops.h"
27 #include "pwrseq.h"
28
29 #define DEFAULT_CMD6_TIMEOUT_MS 500
30 #define MIN_CACHE_EN_TIMEOUT_MS 1600
31
32 static const unsigned int tran_exp[] = {
33         10000,          100000,         1000000,        10000000,
34         0,              0,              0,              0
35 };
36
37 static const unsigned char tran_mant[] = {
38         0,      10,     12,     13,     15,     20,     25,     30,
39         35,     40,     45,     50,     55,     60,     70,     80,
40 };
41
42 static const unsigned int taac_exp[] = {
43         1,      10,     100,    1000,   10000,  100000, 1000000, 10000000,
44 };
45
46 static const unsigned int taac_mant[] = {
47         0,      10,     12,     13,     15,     20,     25,     30,
48         35,     40,     45,     50,     55,     60,     70,     80,
49 };
50
51 #define UNSTUFF_BITS(resp,start,size)                                   \
52         ({                                                              \
53                 const int __size = size;                                \
54                 const u32 __mask = (__size < 32 ? 1 << __size : 0) - 1; \
55                 const int __off = 3 - ((start) / 32);                   \
56                 const int __shft = (start) & 31;                        \
57                 u32 __res;                                              \
58                                                                         \
59                 __res = resp[__off] >> __shft;                          \
60                 if (__size + __shft > 32)                               \
61                         __res |= resp[__off-1] << ((32 - __shft) % 32); \
62                 __res & __mask;                                         \
63         })
64
65 /*
66  * Given the decoded CSD structure, decode the raw CID to our CID structure.
67  */
68 static int mmc_decode_cid(struct mmc_card *card)
69 {
70         u32 *resp = card->raw_cid;
71
72         /*
73          * The selection of the format here is based upon published
74          * specs from sandisk and from what people have reported.
75          */
76         switch (card->csd.mmca_vsn) {
77         case 0: /* MMC v1.0 - v1.2 */
78         case 1: /* MMC v1.4 */
79                 card->cid.manfid        = UNSTUFF_BITS(resp, 104, 24);
80                 card->cid.prod_name[0]  = UNSTUFF_BITS(resp, 96, 8);
81                 card->cid.prod_name[1]  = UNSTUFF_BITS(resp, 88, 8);
82                 card->cid.prod_name[2]  = UNSTUFF_BITS(resp, 80, 8);
83                 card->cid.prod_name[3]  = UNSTUFF_BITS(resp, 72, 8);
84                 card->cid.prod_name[4]  = UNSTUFF_BITS(resp, 64, 8);
85                 card->cid.prod_name[5]  = UNSTUFF_BITS(resp, 56, 8);
86                 card->cid.prod_name[6]  = UNSTUFF_BITS(resp, 48, 8);
87                 card->cid.hwrev         = UNSTUFF_BITS(resp, 44, 4);
88                 card->cid.fwrev         = UNSTUFF_BITS(resp, 40, 4);
89                 card->cid.serial        = UNSTUFF_BITS(resp, 16, 24);
90                 card->cid.month         = UNSTUFF_BITS(resp, 12, 4);
91                 card->cid.year          = UNSTUFF_BITS(resp, 8, 4) + 1997;
92                 break;
93
94         case 2: /* MMC v2.0 - v2.2 */
95         case 3: /* MMC v3.1 - v3.3 */
96         case 4: /* MMC v4 */
97                 card->cid.manfid        = UNSTUFF_BITS(resp, 120, 8);
98                 card->cid.oemid         = UNSTUFF_BITS(resp, 104, 16);
99                 card->cid.prod_name[0]  = UNSTUFF_BITS(resp, 96, 8);
100                 card->cid.prod_name[1]  = UNSTUFF_BITS(resp, 88, 8);
101                 card->cid.prod_name[2]  = UNSTUFF_BITS(resp, 80, 8);
102                 card->cid.prod_name[3]  = UNSTUFF_BITS(resp, 72, 8);
103                 card->cid.prod_name[4]  = UNSTUFF_BITS(resp, 64, 8);
104                 card->cid.prod_name[5]  = UNSTUFF_BITS(resp, 56, 8);
105                 card->cid.prv           = UNSTUFF_BITS(resp, 48, 8);
106                 card->cid.serial        = UNSTUFF_BITS(resp, 16, 32);
107                 card->cid.month         = UNSTUFF_BITS(resp, 12, 4);
108                 card->cid.year          = UNSTUFF_BITS(resp, 8, 4) + 1997;
109                 break;
110
111         default:
112                 pr_err("%s: card has unknown MMCA version %d\n",
113                         mmc_hostname(card->host), card->csd.mmca_vsn);
114                 return -EINVAL;
115         }
116
117         return 0;
118 }
119
120 static void mmc_set_erase_size(struct mmc_card *card)
121 {
122         if (card->ext_csd.erase_group_def & 1)
123                 card->erase_size = card->ext_csd.hc_erase_size;
124         else
125                 card->erase_size = card->csd.erase_size;
126
127         mmc_init_erase(card);
128 }
129
130 /*
131  * Given a 128-bit response, decode to our card CSD structure.
132  */
133 static int mmc_decode_csd(struct mmc_card *card)
134 {
135         struct mmc_csd *csd = &card->csd;
136         unsigned int e, m, a, b;
137         u32 *resp = card->raw_csd;
138
139         /*
140          * We only understand CSD structure v1.1 and v1.2.
141          * v1.2 has extra information in bits 15, 11 and 10.
142          * We also support eMMC v4.4 & v4.41.
143          */
144         csd->structure = UNSTUFF_BITS(resp, 126, 2);
145         if (csd->structure == 0) {
146                 pr_err("%s: unrecognised CSD structure version %d\n",
147                         mmc_hostname(card->host), csd->structure);
148                 return -EINVAL;
149         }
150
151         csd->mmca_vsn    = UNSTUFF_BITS(resp, 122, 4);
152         m = UNSTUFF_BITS(resp, 115, 4);
153         e = UNSTUFF_BITS(resp, 112, 3);
154         csd->taac_ns     = (taac_exp[e] * taac_mant[m] + 9) / 10;
155         csd->taac_clks   = UNSTUFF_BITS(resp, 104, 8) * 100;
156
157         m = UNSTUFF_BITS(resp, 99, 4);
158         e = UNSTUFF_BITS(resp, 96, 3);
159         csd->max_dtr      = tran_exp[e] * tran_mant[m];
160         csd->cmdclass     = UNSTUFF_BITS(resp, 84, 12);
161
162         e = UNSTUFF_BITS(resp, 47, 3);
163         m = UNSTUFF_BITS(resp, 62, 12);
164         csd->capacity     = (1 + m) << (e + 2);
165
166         csd->read_blkbits = UNSTUFF_BITS(resp, 80, 4);
167         csd->read_partial = UNSTUFF_BITS(resp, 79, 1);
168         csd->write_misalign = UNSTUFF_BITS(resp, 78, 1);
169         csd->read_misalign = UNSTUFF_BITS(resp, 77, 1);
170         csd->dsr_imp = UNSTUFF_BITS(resp, 76, 1);
171         csd->r2w_factor = UNSTUFF_BITS(resp, 26, 3);
172         csd->write_blkbits = UNSTUFF_BITS(resp, 22, 4);
173         csd->write_partial = UNSTUFF_BITS(resp, 21, 1);
174
175         if (csd->write_blkbits >= 9) {
176                 a = UNSTUFF_BITS(resp, 42, 5);
177                 b = UNSTUFF_BITS(resp, 37, 5);
178                 csd->erase_size = (a + 1) * (b + 1);
179                 csd->erase_size <<= csd->write_blkbits - 9;
180         }
181
182         return 0;
183 }
184
185 static void mmc_select_card_type(struct mmc_card *card)
186 {
187         struct mmc_host *host = card->host;
188         u8 card_type = card->ext_csd.raw_card_type;
189         u32 caps = host->caps, caps2 = host->caps2;
190         unsigned int hs_max_dtr = 0, hs200_max_dtr = 0;
191         unsigned int avail_type = 0;
192
193         if (caps & MMC_CAP_MMC_HIGHSPEED &&
194             card_type & EXT_CSD_CARD_TYPE_HS_26) {
195                 hs_max_dtr = MMC_HIGH_26_MAX_DTR;
196                 avail_type |= EXT_CSD_CARD_TYPE_HS_26;
197         }
198
199         if (caps & MMC_CAP_MMC_HIGHSPEED &&
200             card_type & EXT_CSD_CARD_TYPE_HS_52) {
201                 hs_max_dtr = MMC_HIGH_52_MAX_DTR;
202                 avail_type |= EXT_CSD_CARD_TYPE_HS_52;
203         }
204
205         if (caps & (MMC_CAP_1_8V_DDR | MMC_CAP_3_3V_DDR) &&
206             card_type & EXT_CSD_CARD_TYPE_DDR_1_8V) {
207                 hs_max_dtr = MMC_HIGH_DDR_MAX_DTR;
208                 avail_type |= EXT_CSD_CARD_TYPE_DDR_1_8V;
209         }
210
211         if (caps & MMC_CAP_1_2V_DDR &&
212             card_type & EXT_CSD_CARD_TYPE_DDR_1_2V) {
213                 hs_max_dtr = MMC_HIGH_DDR_MAX_DTR;
214                 avail_type |= EXT_CSD_CARD_TYPE_DDR_1_2V;
215         }
216
217         if (caps2 & MMC_CAP2_HS200_1_8V_SDR &&
218             card_type & EXT_CSD_CARD_TYPE_HS200_1_8V) {
219                 hs200_max_dtr = MMC_HS200_MAX_DTR;
220                 avail_type |= EXT_CSD_CARD_TYPE_HS200_1_8V;
221         }
222
223         if (caps2 & MMC_CAP2_HS200_1_2V_SDR &&
224             card_type & EXT_CSD_CARD_TYPE_HS200_1_2V) {
225                 hs200_max_dtr = MMC_HS200_MAX_DTR;
226                 avail_type |= EXT_CSD_CARD_TYPE_HS200_1_2V;
227         }
228
229         if (caps2 & MMC_CAP2_HS400_1_8V &&
230             card_type & EXT_CSD_CARD_TYPE_HS400_1_8V) {
231                 hs200_max_dtr = MMC_HS200_MAX_DTR;
232                 avail_type |= EXT_CSD_CARD_TYPE_HS400_1_8V;
233         }
234
235         if (caps2 & MMC_CAP2_HS400_1_2V &&
236             card_type & EXT_CSD_CARD_TYPE_HS400_1_2V) {
237                 hs200_max_dtr = MMC_HS200_MAX_DTR;
238                 avail_type |= EXT_CSD_CARD_TYPE_HS400_1_2V;
239         }
240
241         if ((caps2 & MMC_CAP2_HS400_ES) &&
242             card->ext_csd.strobe_support &&
243             (avail_type & EXT_CSD_CARD_TYPE_HS400))
244                 avail_type |= EXT_CSD_CARD_TYPE_HS400ES;
245
246         card->ext_csd.hs_max_dtr = hs_max_dtr;
247         card->ext_csd.hs200_max_dtr = hs200_max_dtr;
248         card->mmc_avail_type = avail_type;
249 }
250
251 static void mmc_manage_enhanced_area(struct mmc_card *card, u8 *ext_csd)
252 {
253         u8 hc_erase_grp_sz, hc_wp_grp_sz;
254
255         /*
256          * Disable these attributes by default
257          */
258         card->ext_csd.enhanced_area_offset = -EINVAL;
259         card->ext_csd.enhanced_area_size = -EINVAL;
260
261         /*
262          * Enhanced area feature support -- check whether the eMMC
263          * card has the Enhanced area enabled.  If so, export enhanced
264          * area offset and size to user by adding sysfs interface.
265          */
266         if ((ext_csd[EXT_CSD_PARTITION_SUPPORT] & 0x2) &&
267             (ext_csd[EXT_CSD_PARTITION_ATTRIBUTE] & 0x1)) {
268                 if (card->ext_csd.partition_setting_completed) {
269                         hc_erase_grp_sz =
270                                 ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE];
271                         hc_wp_grp_sz =
272                                 ext_csd[EXT_CSD_HC_WP_GRP_SIZE];
273
274                         /*
275                          * calculate the enhanced data area offset, in bytes
276                          */
277                         card->ext_csd.enhanced_area_offset =
278                                 (((unsigned long long)ext_csd[139]) << 24) +
279                                 (((unsigned long long)ext_csd[138]) << 16) +
280                                 (((unsigned long long)ext_csd[137]) << 8) +
281                                 (((unsigned long long)ext_csd[136]));
282                         if (mmc_card_blockaddr(card))
283                                 card->ext_csd.enhanced_area_offset <<= 9;
284                         /*
285                          * calculate the enhanced data area size, in kilobytes
286                          */
287                         card->ext_csd.enhanced_area_size =
288                                 (ext_csd[142] << 16) + (ext_csd[141] << 8) +
289                                 ext_csd[140];
290                         card->ext_csd.enhanced_area_size *=
291                                 (size_t)(hc_erase_grp_sz * hc_wp_grp_sz);
292                         card->ext_csd.enhanced_area_size <<= 9;
293                 } else {
294                         pr_warn("%s: defines enhanced area without partition setting complete\n",
295                                 mmc_hostname(card->host));
296                 }
297         }
298 }
299
300 static void mmc_part_add(struct mmc_card *card, u64 size,
301                          unsigned int part_cfg, char *name, int idx, bool ro,
302                          int area_type)
303 {
304         card->part[card->nr_parts].size = size;
305         card->part[card->nr_parts].part_cfg = part_cfg;
306         sprintf(card->part[card->nr_parts].name, name, idx);
307         card->part[card->nr_parts].force_ro = ro;
308         card->part[card->nr_parts].area_type = area_type;
309         card->nr_parts++;
310 }
311
312 static void mmc_manage_gp_partitions(struct mmc_card *card, u8 *ext_csd)
313 {
314         int idx;
315         u8 hc_erase_grp_sz, hc_wp_grp_sz;
316         u64 part_size;
317
318         /*
319          * General purpose partition feature support --
320          * If ext_csd has the size of general purpose partitions,
321          * set size, part_cfg, partition name in mmc_part.
322          */
323         if (ext_csd[EXT_CSD_PARTITION_SUPPORT] &
324             EXT_CSD_PART_SUPPORT_PART_EN) {
325                 hc_erase_grp_sz =
326                         ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE];
327                 hc_wp_grp_sz =
328                         ext_csd[EXT_CSD_HC_WP_GRP_SIZE];
329
330                 for (idx = 0; idx < MMC_NUM_GP_PARTITION; idx++) {
331                         if (!ext_csd[EXT_CSD_GP_SIZE_MULT + idx * 3] &&
332                             !ext_csd[EXT_CSD_GP_SIZE_MULT + idx * 3 + 1] &&
333                             !ext_csd[EXT_CSD_GP_SIZE_MULT + idx * 3 + 2])
334                                 continue;
335                         if (card->ext_csd.partition_setting_completed == 0) {
336                                 pr_warn("%s: has partition size defined without partition complete\n",
337                                         mmc_hostname(card->host));
338                                 break;
339                         }
340                         part_size =
341                                 (ext_csd[EXT_CSD_GP_SIZE_MULT + idx * 3 + 2]
342                                 << 16) +
343                                 (ext_csd[EXT_CSD_GP_SIZE_MULT + idx * 3 + 1]
344                                 << 8) +
345                                 ext_csd[EXT_CSD_GP_SIZE_MULT + idx * 3];
346                         part_size *= (hc_erase_grp_sz * hc_wp_grp_sz);
347                         mmc_part_add(card, part_size << 19,
348                                 EXT_CSD_PART_CONFIG_ACC_GP0 + idx,
349                                 "gp%d", idx, false,
350                                 MMC_BLK_DATA_AREA_GP);
351                 }
352         }
353 }
354
355 /* Minimum partition switch timeout in milliseconds */
356 #define MMC_MIN_PART_SWITCH_TIME        300
357
358 /*
359  * Decode extended CSD.
360  */
361 static int mmc_decode_ext_csd(struct mmc_card *card, u8 *ext_csd)
362 {
363         int err = 0, idx;
364         u64 part_size;
365         struct device_node *np;
366         bool broken_hpi = false;
367
368         /* Version is coded in the CSD_STRUCTURE byte in the EXT_CSD register */
369         card->ext_csd.raw_ext_csd_structure = ext_csd[EXT_CSD_STRUCTURE];
370         if (card->csd.structure == 3) {
371                 if (card->ext_csd.raw_ext_csd_structure > 2) {
372                         pr_err("%s: unrecognised EXT_CSD structure "
373                                 "version %d\n", mmc_hostname(card->host),
374                                         card->ext_csd.raw_ext_csd_structure);
375                         err = -EINVAL;
376                         goto out;
377                 }
378         }
379
380         np = mmc_of_find_child_device(card->host, 0);
381         if (np && of_device_is_compatible(np, "mmc-card"))
382                 broken_hpi = of_property_read_bool(np, "broken-hpi");
383         of_node_put(np);
384
385         /*
386          * The EXT_CSD format is meant to be forward compatible. As long
387          * as CSD_STRUCTURE does not change, all values for EXT_CSD_REV
388          * are authorized, see JEDEC JESD84-B50 section B.8.
389          */
390         card->ext_csd.rev = ext_csd[EXT_CSD_REV];
391
392         /* fixup device after ext_csd revision field is updated */
393         mmc_fixup_device(card, mmc_ext_csd_fixups);
394
395         card->ext_csd.raw_sectors[0] = ext_csd[EXT_CSD_SEC_CNT + 0];
396         card->ext_csd.raw_sectors[1] = ext_csd[EXT_CSD_SEC_CNT + 1];
397         card->ext_csd.raw_sectors[2] = ext_csd[EXT_CSD_SEC_CNT + 2];
398         card->ext_csd.raw_sectors[3] = ext_csd[EXT_CSD_SEC_CNT + 3];
399         if (card->ext_csd.rev >= 2) {
400                 card->ext_csd.sectors =
401                         ext_csd[EXT_CSD_SEC_CNT + 0] << 0 |
402                         ext_csd[EXT_CSD_SEC_CNT + 1] << 8 |
403                         ext_csd[EXT_CSD_SEC_CNT + 2] << 16 |
404                         ext_csd[EXT_CSD_SEC_CNT + 3] << 24;
405
406                 /* Cards with density > 2GiB are sector addressed */
407                 if (card->ext_csd.sectors > (2u * 1024 * 1024 * 1024) / 512)
408                         mmc_card_set_blockaddr(card);
409         }
410
411         card->ext_csd.strobe_support = ext_csd[EXT_CSD_STROBE_SUPPORT];
412         card->ext_csd.raw_card_type = ext_csd[EXT_CSD_CARD_TYPE];
413         mmc_select_card_type(card);
414
415         card->ext_csd.raw_s_a_timeout = ext_csd[EXT_CSD_S_A_TIMEOUT];
416         card->ext_csd.raw_erase_timeout_mult =
417                 ext_csd[EXT_CSD_ERASE_TIMEOUT_MULT];
418         card->ext_csd.raw_hc_erase_grp_size =
419                 ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE];
420         if (card->ext_csd.rev >= 3) {
421                 u8 sa_shift = ext_csd[EXT_CSD_S_A_TIMEOUT];
422                 card->ext_csd.part_config = ext_csd[EXT_CSD_PART_CONFIG];
423
424                 /* EXT_CSD value is in units of 10ms, but we store in ms */
425                 card->ext_csd.part_time = 10 * ext_csd[EXT_CSD_PART_SWITCH_TIME];
426                 /* Some eMMC set the value too low so set a minimum */
427                 if (card->ext_csd.part_time &&
428                     card->ext_csd.part_time < MMC_MIN_PART_SWITCH_TIME)
429                         card->ext_csd.part_time = MMC_MIN_PART_SWITCH_TIME;
430
431                 /* Sleep / awake timeout in 100ns units */
432                 if (sa_shift > 0 && sa_shift <= 0x17)
433                         card->ext_csd.sa_timeout =
434                                         1 << ext_csd[EXT_CSD_S_A_TIMEOUT];
435                 card->ext_csd.erase_group_def =
436                         ext_csd[EXT_CSD_ERASE_GROUP_DEF];
437                 card->ext_csd.hc_erase_timeout = 300 *
438                         ext_csd[EXT_CSD_ERASE_TIMEOUT_MULT];
439                 card->ext_csd.hc_erase_size =
440                         ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE] << 10;
441
442                 card->ext_csd.rel_sectors = ext_csd[EXT_CSD_REL_WR_SEC_C];
443
444                 /*
445                  * There are two boot regions of equal size, defined in
446                  * multiples of 128K.
447                  */
448                 if (ext_csd[EXT_CSD_BOOT_MULT] && mmc_boot_partition_access(card->host)) {
449                         for (idx = 0; idx < MMC_NUM_BOOT_PARTITION; idx++) {
450                                 part_size = ext_csd[EXT_CSD_BOOT_MULT] << 17;
451                                 mmc_part_add(card, part_size,
452                                         EXT_CSD_PART_CONFIG_ACC_BOOT0 + idx,
453                                         "boot%d", idx, true,
454                                         MMC_BLK_DATA_AREA_BOOT);
455                         }
456                 }
457         }
458
459         card->ext_csd.raw_hc_erase_gap_size =
460                 ext_csd[EXT_CSD_HC_WP_GRP_SIZE];
461         card->ext_csd.raw_sec_trim_mult =
462                 ext_csd[EXT_CSD_SEC_TRIM_MULT];
463         card->ext_csd.raw_sec_erase_mult =
464                 ext_csd[EXT_CSD_SEC_ERASE_MULT];
465         card->ext_csd.raw_sec_feature_support =
466                 ext_csd[EXT_CSD_SEC_FEATURE_SUPPORT];
467         card->ext_csd.raw_trim_mult =
468                 ext_csd[EXT_CSD_TRIM_MULT];
469         card->ext_csd.raw_partition_support = ext_csd[EXT_CSD_PARTITION_SUPPORT];
470         card->ext_csd.raw_driver_strength = ext_csd[EXT_CSD_DRIVER_STRENGTH];
471         if (card->ext_csd.rev >= 4) {
472                 if (ext_csd[EXT_CSD_PARTITION_SETTING_COMPLETED] &
473                     EXT_CSD_PART_SETTING_COMPLETED)
474                         card->ext_csd.partition_setting_completed = 1;
475                 else
476                         card->ext_csd.partition_setting_completed = 0;
477
478                 mmc_manage_enhanced_area(card, ext_csd);
479
480                 mmc_manage_gp_partitions(card, ext_csd);
481
482                 card->ext_csd.sec_trim_mult =
483                         ext_csd[EXT_CSD_SEC_TRIM_MULT];
484                 card->ext_csd.sec_erase_mult =
485                         ext_csd[EXT_CSD_SEC_ERASE_MULT];
486                 card->ext_csd.sec_feature_support =
487                         ext_csd[EXT_CSD_SEC_FEATURE_SUPPORT];
488                 card->ext_csd.trim_timeout = 300 *
489                         ext_csd[EXT_CSD_TRIM_MULT];
490
491                 /*
492                  * Note that the call to mmc_part_add above defaults to read
493                  * only. If this default assumption is changed, the call must
494                  * take into account the value of boot_locked below.
495                  */
496                 card->ext_csd.boot_ro_lock = ext_csd[EXT_CSD_BOOT_WP];
497                 card->ext_csd.boot_ro_lockable = true;
498
499                 /* Save power class values */
500                 card->ext_csd.raw_pwr_cl_52_195 =
501                         ext_csd[EXT_CSD_PWR_CL_52_195];
502                 card->ext_csd.raw_pwr_cl_26_195 =
503                         ext_csd[EXT_CSD_PWR_CL_26_195];
504                 card->ext_csd.raw_pwr_cl_52_360 =
505                         ext_csd[EXT_CSD_PWR_CL_52_360];
506                 card->ext_csd.raw_pwr_cl_26_360 =
507                         ext_csd[EXT_CSD_PWR_CL_26_360];
508                 card->ext_csd.raw_pwr_cl_200_195 =
509                         ext_csd[EXT_CSD_PWR_CL_200_195];
510                 card->ext_csd.raw_pwr_cl_200_360 =
511                         ext_csd[EXT_CSD_PWR_CL_200_360];
512                 card->ext_csd.raw_pwr_cl_ddr_52_195 =
513                         ext_csd[EXT_CSD_PWR_CL_DDR_52_195];
514                 card->ext_csd.raw_pwr_cl_ddr_52_360 =
515                         ext_csd[EXT_CSD_PWR_CL_DDR_52_360];
516                 card->ext_csd.raw_pwr_cl_ddr_200_360 =
517                         ext_csd[EXT_CSD_PWR_CL_DDR_200_360];
518         }
519
520         if (card->ext_csd.rev >= 5) {
521                 /* Adjust production date as per JEDEC JESD84-B451 */
522                 if (card->cid.year < 2010)
523                         card->cid.year += 16;
524
525                 /* check whether the eMMC card supports BKOPS */
526                 if (ext_csd[EXT_CSD_BKOPS_SUPPORT] & 0x1) {
527                         card->ext_csd.bkops = 1;
528                         card->ext_csd.man_bkops_en =
529                                         (ext_csd[EXT_CSD_BKOPS_EN] &
530                                                 EXT_CSD_MANUAL_BKOPS_MASK);
531                         card->ext_csd.raw_bkops_status =
532                                 ext_csd[EXT_CSD_BKOPS_STATUS];
533                         if (card->ext_csd.man_bkops_en)
534                                 pr_debug("%s: MAN_BKOPS_EN bit is set\n",
535                                         mmc_hostname(card->host));
536                         card->ext_csd.auto_bkops_en =
537                                         (ext_csd[EXT_CSD_BKOPS_EN] &
538                                                 EXT_CSD_AUTO_BKOPS_MASK);
539                         if (card->ext_csd.auto_bkops_en)
540                                 pr_debug("%s: AUTO_BKOPS_EN bit is set\n",
541                                         mmc_hostname(card->host));
542                 }
543
544                 /* check whether the eMMC card supports HPI */
545                 if (!mmc_card_broken_hpi(card) &&
546                     !broken_hpi && (ext_csd[EXT_CSD_HPI_FEATURES] & 0x1)) {
547                         card->ext_csd.hpi = 1;
548                         if (ext_csd[EXT_CSD_HPI_FEATURES] & 0x2)
549                                 card->ext_csd.hpi_cmd = MMC_STOP_TRANSMISSION;
550                         else
551                                 card->ext_csd.hpi_cmd = MMC_SEND_STATUS;
552                         /*
553                          * Indicate the maximum timeout to close
554                          * a command interrupted by HPI
555                          */
556                         card->ext_csd.out_of_int_time =
557                                 ext_csd[EXT_CSD_OUT_OF_INTERRUPT_TIME] * 10;
558                 }
559
560                 card->ext_csd.rel_param = ext_csd[EXT_CSD_WR_REL_PARAM];
561                 card->ext_csd.rst_n_function = ext_csd[EXT_CSD_RST_N_FUNCTION];
562
563                 /*
564                  * RPMB regions are defined in multiples of 128K.
565                  */
566                 card->ext_csd.raw_rpmb_size_mult = ext_csd[EXT_CSD_RPMB_MULT];
567                 if (ext_csd[EXT_CSD_RPMB_MULT] && mmc_host_cmd23(card->host)) {
568                         mmc_part_add(card, ext_csd[EXT_CSD_RPMB_MULT] << 17,
569                                 EXT_CSD_PART_CONFIG_ACC_RPMB,
570                                 "rpmb", 0, false,
571                                 MMC_BLK_DATA_AREA_RPMB);
572                 }
573         }
574
575         card->ext_csd.raw_erased_mem_count = ext_csd[EXT_CSD_ERASED_MEM_CONT];
576         if (ext_csd[EXT_CSD_ERASED_MEM_CONT])
577                 card->erased_byte = 0xFF;
578         else
579                 card->erased_byte = 0x0;
580
581         /* eMMC v4.5 or later */
582         card->ext_csd.generic_cmd6_time = DEFAULT_CMD6_TIMEOUT_MS;
583         if (card->ext_csd.rev >= 6) {
584                 card->ext_csd.feature_support |= MMC_DISCARD_FEATURE;
585
586                 card->ext_csd.generic_cmd6_time = 10 *
587                         ext_csd[EXT_CSD_GENERIC_CMD6_TIME];
588                 card->ext_csd.power_off_longtime = 10 *
589                         ext_csd[EXT_CSD_POWER_OFF_LONG_TIME];
590
591                 card->ext_csd.cache_size =
592                         ext_csd[EXT_CSD_CACHE_SIZE + 0] << 0 |
593                         ext_csd[EXT_CSD_CACHE_SIZE + 1] << 8 |
594                         ext_csd[EXT_CSD_CACHE_SIZE + 2] << 16 |
595                         ext_csd[EXT_CSD_CACHE_SIZE + 3] << 24;
596
597                 if (ext_csd[EXT_CSD_DATA_SECTOR_SIZE] == 1)
598                         card->ext_csd.data_sector_size = 4096;
599                 else
600                         card->ext_csd.data_sector_size = 512;
601
602                 if ((ext_csd[EXT_CSD_DATA_TAG_SUPPORT] & 1) &&
603                     (ext_csd[EXT_CSD_TAG_UNIT_SIZE] <= 8)) {
604                         card->ext_csd.data_tag_unit_size =
605                         ((unsigned int) 1 << ext_csd[EXT_CSD_TAG_UNIT_SIZE]) *
606                         (card->ext_csd.data_sector_size);
607                 } else {
608                         card->ext_csd.data_tag_unit_size = 0;
609                 }
610
611                 card->ext_csd.max_packed_writes =
612                         ext_csd[EXT_CSD_MAX_PACKED_WRITES];
613                 card->ext_csd.max_packed_reads =
614                         ext_csd[EXT_CSD_MAX_PACKED_READS];
615         } else {
616                 card->ext_csd.data_sector_size = 512;
617         }
618
619         /* eMMC v5 or later */
620         if (card->ext_csd.rev >= 7) {
621                 memcpy(card->ext_csd.fwrev, &ext_csd[EXT_CSD_FIRMWARE_VERSION],
622                        MMC_FIRMWARE_LEN);
623                 card->ext_csd.ffu_capable =
624                         (ext_csd[EXT_CSD_SUPPORTED_MODE] & 0x1) &&
625                         !(ext_csd[EXT_CSD_FW_CONFIG] & 0x1);
626
627                 card->ext_csd.pre_eol_info = ext_csd[EXT_CSD_PRE_EOL_INFO];
628                 card->ext_csd.device_life_time_est_typ_a =
629                         ext_csd[EXT_CSD_DEVICE_LIFE_TIME_EST_TYP_A];
630                 card->ext_csd.device_life_time_est_typ_b =
631                         ext_csd[EXT_CSD_DEVICE_LIFE_TIME_EST_TYP_B];
632         }
633
634         /* eMMC v5.1 or later */
635         if (card->ext_csd.rev >= 8) {
636                 card->ext_csd.cmdq_support = ext_csd[EXT_CSD_CMDQ_SUPPORT] &
637                                              EXT_CSD_CMDQ_SUPPORTED;
638                 card->ext_csd.cmdq_depth = (ext_csd[EXT_CSD_CMDQ_DEPTH] &
639                                             EXT_CSD_CMDQ_DEPTH_MASK) + 1;
640                 /* Exclude inefficiently small queue depths */
641                 if (card->ext_csd.cmdq_depth <= 2) {
642                         card->ext_csd.cmdq_support = false;
643                         card->ext_csd.cmdq_depth = 0;
644                 }
645                 if (card->ext_csd.cmdq_support) {
646                         pr_debug("%s: Command Queue supported depth %u\n",
647                                  mmc_hostname(card->host),
648                                  card->ext_csd.cmdq_depth);
649                 }
650         }
651 out:
652         return err;
653 }
654
655 static int mmc_read_ext_csd(struct mmc_card *card)
656 {
657         u8 *ext_csd;
658         int err;
659
660         if (!mmc_can_ext_csd(card))
661                 return 0;
662
663         err = mmc_get_ext_csd(card, &ext_csd);
664         if (err) {
665                 /* If the host or the card can't do the switch,
666                  * fail more gracefully. */
667                 if ((err != -EINVAL)
668                  && (err != -ENOSYS)
669                  && (err != -EFAULT))
670                         return err;
671
672                 /*
673                  * High capacity cards should have this "magic" size
674                  * stored in their CSD.
675                  */
676                 if (card->csd.capacity == (4096 * 512)) {
677                         pr_err("%s: unable to read EXT_CSD on a possible high capacity card. Card will be ignored.\n",
678                                 mmc_hostname(card->host));
679                 } else {
680                         pr_warn("%s: unable to read EXT_CSD, performance might suffer\n",
681                                 mmc_hostname(card->host));
682                         err = 0;
683                 }
684
685                 return err;
686         }
687
688         err = mmc_decode_ext_csd(card, ext_csd);
689         kfree(ext_csd);
690         return err;
691 }
692
693 static int mmc_compare_ext_csds(struct mmc_card *card, unsigned bus_width)
694 {
695         u8 *bw_ext_csd;
696         int err;
697
698         if (bus_width == MMC_BUS_WIDTH_1)
699                 return 0;
700
701         err = mmc_get_ext_csd(card, &bw_ext_csd);
702         if (err)
703                 return err;
704
705         /* only compare read only fields */
706         err = !((card->ext_csd.raw_partition_support ==
707                         bw_ext_csd[EXT_CSD_PARTITION_SUPPORT]) &&
708                 (card->ext_csd.raw_erased_mem_count ==
709                         bw_ext_csd[EXT_CSD_ERASED_MEM_CONT]) &&
710                 (card->ext_csd.rev ==
711                         bw_ext_csd[EXT_CSD_REV]) &&
712                 (card->ext_csd.raw_ext_csd_structure ==
713                         bw_ext_csd[EXT_CSD_STRUCTURE]) &&
714                 (card->ext_csd.raw_card_type ==
715                         bw_ext_csd[EXT_CSD_CARD_TYPE]) &&
716                 (card->ext_csd.raw_s_a_timeout ==
717                         bw_ext_csd[EXT_CSD_S_A_TIMEOUT]) &&
718                 (card->ext_csd.raw_hc_erase_gap_size ==
719                         bw_ext_csd[EXT_CSD_HC_WP_GRP_SIZE]) &&
720                 (card->ext_csd.raw_erase_timeout_mult ==
721                         bw_ext_csd[EXT_CSD_ERASE_TIMEOUT_MULT]) &&
722                 (card->ext_csd.raw_hc_erase_grp_size ==
723                         bw_ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE]) &&
724                 (card->ext_csd.raw_sec_trim_mult ==
725                         bw_ext_csd[EXT_CSD_SEC_TRIM_MULT]) &&
726                 (card->ext_csd.raw_sec_erase_mult ==
727                         bw_ext_csd[EXT_CSD_SEC_ERASE_MULT]) &&
728                 (card->ext_csd.raw_sec_feature_support ==
729                         bw_ext_csd[EXT_CSD_SEC_FEATURE_SUPPORT]) &&
730                 (card->ext_csd.raw_trim_mult ==
731                         bw_ext_csd[EXT_CSD_TRIM_MULT]) &&
732                 (card->ext_csd.raw_sectors[0] ==
733                         bw_ext_csd[EXT_CSD_SEC_CNT + 0]) &&
734                 (card->ext_csd.raw_sectors[1] ==
735                         bw_ext_csd[EXT_CSD_SEC_CNT + 1]) &&
736                 (card->ext_csd.raw_sectors[2] ==
737                         bw_ext_csd[EXT_CSD_SEC_CNT + 2]) &&
738                 (card->ext_csd.raw_sectors[3] ==
739                         bw_ext_csd[EXT_CSD_SEC_CNT + 3]) &&
740                 (card->ext_csd.raw_pwr_cl_52_195 ==
741                         bw_ext_csd[EXT_CSD_PWR_CL_52_195]) &&
742                 (card->ext_csd.raw_pwr_cl_26_195 ==
743                         bw_ext_csd[EXT_CSD_PWR_CL_26_195]) &&
744                 (card->ext_csd.raw_pwr_cl_52_360 ==
745                         bw_ext_csd[EXT_CSD_PWR_CL_52_360]) &&
746                 (card->ext_csd.raw_pwr_cl_26_360 ==
747                         bw_ext_csd[EXT_CSD_PWR_CL_26_360]) &&
748                 (card->ext_csd.raw_pwr_cl_200_195 ==
749                         bw_ext_csd[EXT_CSD_PWR_CL_200_195]) &&
750                 (card->ext_csd.raw_pwr_cl_200_360 ==
751                         bw_ext_csd[EXT_CSD_PWR_CL_200_360]) &&
752                 (card->ext_csd.raw_pwr_cl_ddr_52_195 ==
753                         bw_ext_csd[EXT_CSD_PWR_CL_DDR_52_195]) &&
754                 (card->ext_csd.raw_pwr_cl_ddr_52_360 ==
755                         bw_ext_csd[EXT_CSD_PWR_CL_DDR_52_360]) &&
756                 (card->ext_csd.raw_pwr_cl_ddr_200_360 ==
757                         bw_ext_csd[EXT_CSD_PWR_CL_DDR_200_360]));
758
759         if (err)
760                 err = -EINVAL;
761
762         kfree(bw_ext_csd);
763         return err;
764 }
765
766 MMC_DEV_ATTR(cid, "%08x%08x%08x%08x\n", card->raw_cid[0], card->raw_cid[1],
767         card->raw_cid[2], card->raw_cid[3]);
768 MMC_DEV_ATTR(csd, "%08x%08x%08x%08x\n", card->raw_csd[0], card->raw_csd[1],
769         card->raw_csd[2], card->raw_csd[3]);
770 MMC_DEV_ATTR(date, "%02d/%04d\n", card->cid.month, card->cid.year);
771 MMC_DEV_ATTR(erase_size, "%u\n", card->erase_size << 9);
772 MMC_DEV_ATTR(preferred_erase_size, "%u\n", card->pref_erase << 9);
773 MMC_DEV_ATTR(ffu_capable, "%d\n", card->ext_csd.ffu_capable);
774 MMC_DEV_ATTR(hwrev, "0x%x\n", card->cid.hwrev);
775 MMC_DEV_ATTR(manfid, "0x%06x\n", card->cid.manfid);
776 MMC_DEV_ATTR(name, "%s\n", card->cid.prod_name);
777 MMC_DEV_ATTR(oemid, "0x%04x\n", card->cid.oemid);
778 MMC_DEV_ATTR(prv, "0x%x\n", card->cid.prv);
779 MMC_DEV_ATTR(rev, "0x%x\n", card->ext_csd.rev);
780 MMC_DEV_ATTR(pre_eol_info, "0x%02x\n", card->ext_csd.pre_eol_info);
781 MMC_DEV_ATTR(life_time, "0x%02x 0x%02x\n",
782         card->ext_csd.device_life_time_est_typ_a,
783         card->ext_csd.device_life_time_est_typ_b);
784 MMC_DEV_ATTR(serial, "0x%08x\n", card->cid.serial);
785 MMC_DEV_ATTR(enhanced_area_offset, "%llu\n",
786                 card->ext_csd.enhanced_area_offset);
787 MMC_DEV_ATTR(enhanced_area_size, "%u\n", card->ext_csd.enhanced_area_size);
788 MMC_DEV_ATTR(raw_rpmb_size_mult, "%#x\n", card->ext_csd.raw_rpmb_size_mult);
789 MMC_DEV_ATTR(rel_sectors, "%#x\n", card->ext_csd.rel_sectors);
790 MMC_DEV_ATTR(ocr, "0x%08x\n", card->ocr);
791 MMC_DEV_ATTR(rca, "0x%04x\n", card->rca);
792 MMC_DEV_ATTR(cmdq_en, "%d\n", card->ext_csd.cmdq_en);
793
794 static ssize_t mmc_fwrev_show(struct device *dev,
795                               struct device_attribute *attr,
796                               char *buf)
797 {
798         struct mmc_card *card = mmc_dev_to_card(dev);
799
800         if (card->ext_csd.rev < 7) {
801                 return sprintf(buf, "0x%x\n", card->cid.fwrev);
802         } else {
803                 return sprintf(buf, "0x%*phN\n", MMC_FIRMWARE_LEN,
804                                card->ext_csd.fwrev);
805         }
806 }
807
808 static DEVICE_ATTR(fwrev, S_IRUGO, mmc_fwrev_show, NULL);
809
810 static ssize_t mmc_dsr_show(struct device *dev,
811                             struct device_attribute *attr,
812                             char *buf)
813 {
814         struct mmc_card *card = mmc_dev_to_card(dev);
815         struct mmc_host *host = card->host;
816
817         if (card->csd.dsr_imp && host->dsr_req)
818                 return sprintf(buf, "0x%x\n", host->dsr);
819         else
820                 /* return default DSR value */
821                 return sprintf(buf, "0x%x\n", 0x404);
822 }
823
824 static DEVICE_ATTR(dsr, S_IRUGO, mmc_dsr_show, NULL);
825
826 static struct attribute *mmc_std_attrs[] = {
827         &dev_attr_cid.attr,
828         &dev_attr_csd.attr,
829         &dev_attr_date.attr,
830         &dev_attr_erase_size.attr,
831         &dev_attr_preferred_erase_size.attr,
832         &dev_attr_fwrev.attr,
833         &dev_attr_ffu_capable.attr,
834         &dev_attr_hwrev.attr,
835         &dev_attr_manfid.attr,
836         &dev_attr_name.attr,
837         &dev_attr_oemid.attr,
838         &dev_attr_prv.attr,
839         &dev_attr_rev.attr,
840         &dev_attr_pre_eol_info.attr,
841         &dev_attr_life_time.attr,
842         &dev_attr_serial.attr,
843         &dev_attr_enhanced_area_offset.attr,
844         &dev_attr_enhanced_area_size.attr,
845         &dev_attr_raw_rpmb_size_mult.attr,
846         &dev_attr_rel_sectors.attr,
847         &dev_attr_ocr.attr,
848         &dev_attr_rca.attr,
849         &dev_attr_dsr.attr,
850         &dev_attr_cmdq_en.attr,
851         NULL,
852 };
853 ATTRIBUTE_GROUPS(mmc_std);
854
855 static struct device_type mmc_type = {
856         .groups = mmc_std_groups,
857 };
858
859 /*
860  * Select the PowerClass for the current bus width
861  * If power class is defined for 4/8 bit bus in the
862  * extended CSD register, select it by executing the
863  * mmc_switch command.
864  */
865 static int __mmc_select_powerclass(struct mmc_card *card,
866                                    unsigned int bus_width)
867 {
868         struct mmc_host *host = card->host;
869         struct mmc_ext_csd *ext_csd = &card->ext_csd;
870         unsigned int pwrclass_val = 0;
871         int err = 0;
872
873         switch (1 << host->ios.vdd) {
874         case MMC_VDD_165_195:
875                 if (host->ios.clock <= MMC_HIGH_26_MAX_DTR)
876                         pwrclass_val = ext_csd->raw_pwr_cl_26_195;
877                 else if (host->ios.clock <= MMC_HIGH_52_MAX_DTR)
878                         pwrclass_val = (bus_width <= EXT_CSD_BUS_WIDTH_8) ?
879                                 ext_csd->raw_pwr_cl_52_195 :
880                                 ext_csd->raw_pwr_cl_ddr_52_195;
881                 else if (host->ios.clock <= MMC_HS200_MAX_DTR)
882                         pwrclass_val = ext_csd->raw_pwr_cl_200_195;
883                 break;
884         case MMC_VDD_27_28:
885         case MMC_VDD_28_29:
886         case MMC_VDD_29_30:
887         case MMC_VDD_30_31:
888         case MMC_VDD_31_32:
889         case MMC_VDD_32_33:
890         case MMC_VDD_33_34:
891         case MMC_VDD_34_35:
892         case MMC_VDD_35_36:
893                 if (host->ios.clock <= MMC_HIGH_26_MAX_DTR)
894                         pwrclass_val = ext_csd->raw_pwr_cl_26_360;
895                 else if (host->ios.clock <= MMC_HIGH_52_MAX_DTR)
896                         pwrclass_val = (bus_width <= EXT_CSD_BUS_WIDTH_8) ?
897                                 ext_csd->raw_pwr_cl_52_360 :
898                                 ext_csd->raw_pwr_cl_ddr_52_360;
899                 else if (host->ios.clock <= MMC_HS200_MAX_DTR)
900                         pwrclass_val = (bus_width == EXT_CSD_DDR_BUS_WIDTH_8) ?
901                                 ext_csd->raw_pwr_cl_ddr_200_360 :
902                                 ext_csd->raw_pwr_cl_200_360;
903                 break;
904         default:
905                 pr_warn("%s: Voltage range not supported for power class\n",
906                         mmc_hostname(host));
907                 return -EINVAL;
908         }
909
910         if (bus_width & (EXT_CSD_BUS_WIDTH_8 | EXT_CSD_DDR_BUS_WIDTH_8))
911                 pwrclass_val = (pwrclass_val & EXT_CSD_PWR_CL_8BIT_MASK) >>
912                                 EXT_CSD_PWR_CL_8BIT_SHIFT;
913         else
914                 pwrclass_val = (pwrclass_val & EXT_CSD_PWR_CL_4BIT_MASK) >>
915                                 EXT_CSD_PWR_CL_4BIT_SHIFT;
916
917         /* If the power class is different from the default value */
918         if (pwrclass_val > 0) {
919                 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
920                                  EXT_CSD_POWER_CLASS,
921                                  pwrclass_val,
922                                  card->ext_csd.generic_cmd6_time);
923         }
924
925         return err;
926 }
927
928 static int mmc_select_powerclass(struct mmc_card *card)
929 {
930         struct mmc_host *host = card->host;
931         u32 bus_width, ext_csd_bits;
932         int err, ddr;
933
934         /* Power class selection is supported for versions >= 4.0 */
935         if (!mmc_can_ext_csd(card))
936                 return 0;
937
938         bus_width = host->ios.bus_width;
939         /* Power class values are defined only for 4/8 bit bus */
940         if (bus_width == MMC_BUS_WIDTH_1)
941                 return 0;
942
943         ddr = card->mmc_avail_type & EXT_CSD_CARD_TYPE_DDR_52;
944         if (ddr)
945                 ext_csd_bits = (bus_width == MMC_BUS_WIDTH_8) ?
946                         EXT_CSD_DDR_BUS_WIDTH_8 : EXT_CSD_DDR_BUS_WIDTH_4;
947         else
948                 ext_csd_bits = (bus_width == MMC_BUS_WIDTH_8) ?
949                         EXT_CSD_BUS_WIDTH_8 :  EXT_CSD_BUS_WIDTH_4;
950
951         err = __mmc_select_powerclass(card, ext_csd_bits);
952         if (err)
953                 pr_warn("%s: power class selection to bus width %d ddr %d failed\n",
954                         mmc_hostname(host), 1 << bus_width, ddr);
955
956         return err;
957 }
958
959 /*
960  * Set the bus speed for the selected speed mode.
961  */
962 static void mmc_set_bus_speed(struct mmc_card *card)
963 {
964         unsigned int max_dtr = (unsigned int)-1;
965
966         if ((mmc_card_hs200(card) || mmc_card_hs400(card)) &&
967              max_dtr > card->ext_csd.hs200_max_dtr)
968                 max_dtr = card->ext_csd.hs200_max_dtr;
969         else if (mmc_card_hs(card) && max_dtr > card->ext_csd.hs_max_dtr)
970                 max_dtr = card->ext_csd.hs_max_dtr;
971         else if (max_dtr > card->csd.max_dtr)
972                 max_dtr = card->csd.max_dtr;
973
974         mmc_set_clock(card->host, max_dtr);
975 }
976
977 /*
978  * Select the bus width amoung 4-bit and 8-bit(SDR).
979  * If the bus width is changed successfully, return the selected width value.
980  * Zero is returned instead of error value if the wide width is not supported.
981  */
982 static int mmc_select_bus_width(struct mmc_card *card)
983 {
984         static unsigned ext_csd_bits[] = {
985                 EXT_CSD_BUS_WIDTH_8,
986                 EXT_CSD_BUS_WIDTH_4,
987         };
988         static unsigned bus_widths[] = {
989                 MMC_BUS_WIDTH_8,
990                 MMC_BUS_WIDTH_4,
991         };
992         struct mmc_host *host = card->host;
993         unsigned idx, bus_width = 0;
994         int err = 0;
995
996         if (!mmc_can_ext_csd(card) ||
997             !(host->caps & (MMC_CAP_4_BIT_DATA | MMC_CAP_8_BIT_DATA)))
998                 return 0;
999
1000         idx = (host->caps & MMC_CAP_8_BIT_DATA) ? 0 : 1;
1001
1002         /*
1003          * Unlike SD, MMC cards dont have a configuration register to notify
1004          * supported bus width. So bus test command should be run to identify
1005          * the supported bus width or compare the ext csd values of current
1006          * bus width and ext csd values of 1 bit mode read earlier.
1007          */
1008         for (; idx < ARRAY_SIZE(bus_widths); idx++) {
1009                 /*
1010                  * Host is capable of 8bit transfer, then switch
1011                  * the device to work in 8bit transfer mode. If the
1012                  * mmc switch command returns error then switch to
1013                  * 4bit transfer mode. On success set the corresponding
1014                  * bus width on the host.
1015                  */
1016                 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
1017                                  EXT_CSD_BUS_WIDTH,
1018                                  ext_csd_bits[idx],
1019                                  card->ext_csd.generic_cmd6_time);
1020                 if (err)
1021                         continue;
1022
1023                 bus_width = bus_widths[idx];
1024                 mmc_set_bus_width(host, bus_width);
1025
1026                 /*
1027                  * If controller can't handle bus width test,
1028                  * compare ext_csd previously read in 1 bit mode
1029                  * against ext_csd at new bus width
1030                  */
1031                 if (!(host->caps & MMC_CAP_BUS_WIDTH_TEST))
1032                         err = mmc_compare_ext_csds(card, bus_width);
1033                 else
1034                         err = mmc_bus_test(card, bus_width);
1035
1036                 if (!err) {
1037                         err = bus_width;
1038                         break;
1039                 } else {
1040                         pr_warn("%s: switch to bus width %d failed\n",
1041                                 mmc_hostname(host), 1 << bus_width);
1042                 }
1043         }
1044
1045         return err;
1046 }
1047
1048 /*
1049  * Switch to the high-speed mode
1050  */
1051 static int mmc_select_hs(struct mmc_card *card)
1052 {
1053         int err;
1054
1055         err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
1056                            EXT_CSD_HS_TIMING, EXT_CSD_TIMING_HS,
1057                            card->ext_csd.generic_cmd6_time, MMC_TIMING_MMC_HS,
1058                            true, true);
1059         if (err)
1060                 pr_warn("%s: switch to high-speed failed, err:%d\n",
1061                         mmc_hostname(card->host), err);
1062
1063         return err;
1064 }
1065
1066 /*
1067  * Activate wide bus and DDR if supported.
1068  */
1069 static int mmc_select_hs_ddr(struct mmc_card *card)
1070 {
1071         struct mmc_host *host = card->host;
1072         u32 bus_width, ext_csd_bits;
1073         int err = 0;
1074
1075         if (!(card->mmc_avail_type & EXT_CSD_CARD_TYPE_DDR_52))
1076                 return 0;
1077
1078         bus_width = host->ios.bus_width;
1079         if (bus_width == MMC_BUS_WIDTH_1)
1080                 return 0;
1081
1082         ext_csd_bits = (bus_width == MMC_BUS_WIDTH_8) ?
1083                 EXT_CSD_DDR_BUS_WIDTH_8 : EXT_CSD_DDR_BUS_WIDTH_4;
1084
1085         err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
1086                            EXT_CSD_BUS_WIDTH,
1087                            ext_csd_bits,
1088                            card->ext_csd.generic_cmd6_time,
1089                            MMC_TIMING_MMC_DDR52,
1090                            true, true);
1091         if (err) {
1092                 pr_err("%s: switch to bus width %d ddr failed\n",
1093                         mmc_hostname(host), 1 << bus_width);
1094                 return err;
1095         }
1096
1097         /*
1098          * eMMC cards can support 3.3V to 1.2V i/o (vccq)
1099          * signaling.
1100          *
1101          * EXT_CSD_CARD_TYPE_DDR_1_8V means 3.3V or 1.8V vccq.
1102          *
1103          * 1.8V vccq at 3.3V core voltage (vcc) is not required
1104          * in the JEDEC spec for DDR.
1105          *
1106          * Even (e)MMC card can support 3.3v to 1.2v vccq, but not all
1107          * host controller can support this, like some of the SDHCI
1108          * controller which connect to an eMMC device. Some of these
1109          * host controller still needs to use 1.8v vccq for supporting
1110          * DDR mode.
1111          *
1112          * So the sequence will be:
1113          * if (host and device can both support 1.2v IO)
1114          *      use 1.2v IO;
1115          * else if (host and device can both support 1.8v IO)
1116          *      use 1.8v IO;
1117          * so if host and device can only support 3.3v IO, this is the
1118          * last choice.
1119          *
1120          * WARNING: eMMC rules are NOT the same as SD DDR
1121          */
1122         if (card->mmc_avail_type & EXT_CSD_CARD_TYPE_DDR_1_2V) {
1123                 err = mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_120);
1124                 if (!err)
1125                         return 0;
1126         }
1127
1128         if (card->mmc_avail_type & EXT_CSD_CARD_TYPE_DDR_1_8V &&
1129             host->caps & MMC_CAP_1_8V_DDR)
1130                 err = mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_180);
1131
1132         /* make sure vccq is 3.3v after switching disaster */
1133         if (err)
1134                 err = mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_330);
1135
1136         return err;
1137 }
1138
1139 static int mmc_select_hs400(struct mmc_card *card)
1140 {
1141         struct mmc_host *host = card->host;
1142         unsigned int max_dtr;
1143         int err = 0;
1144         u8 val;
1145
1146         /*
1147          * HS400 mode requires 8-bit bus width
1148          */
1149         if (!(card->mmc_avail_type & EXT_CSD_CARD_TYPE_HS400 &&
1150               host->ios.bus_width == MMC_BUS_WIDTH_8))
1151                 return 0;
1152
1153         /* Switch card to HS mode */
1154         val = EXT_CSD_TIMING_HS;
1155         err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
1156                            EXT_CSD_HS_TIMING, val,
1157                            card->ext_csd.generic_cmd6_time, 0,
1158                            false, true);
1159         if (err) {
1160                 pr_err("%s: switch to high-speed from hs200 failed, err:%d\n",
1161                         mmc_hostname(host), err);
1162                 return err;
1163         }
1164
1165         /* Set host controller to HS timing */
1166         mmc_set_timing(card->host, MMC_TIMING_MMC_HS);
1167
1168         /* Prepare host to downgrade to HS timing */
1169         if (host->ops->hs400_downgrade)
1170                 host->ops->hs400_downgrade(host);
1171
1172         /* Reduce frequency to HS frequency */
1173         max_dtr = card->ext_csd.hs_max_dtr;
1174         mmc_set_clock(host, max_dtr);
1175
1176         err = mmc_switch_status(card, true);
1177         if (err)
1178                 goto out_err;
1179
1180         if (host->ops->hs400_prepare_ddr)
1181                 host->ops->hs400_prepare_ddr(host);
1182
1183         /* Switch card to DDR */
1184         err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
1185                          EXT_CSD_BUS_WIDTH,
1186                          EXT_CSD_DDR_BUS_WIDTH_8,
1187                          card->ext_csd.generic_cmd6_time);
1188         if (err) {
1189                 pr_err("%s: switch to bus width for hs400 failed, err:%d\n",
1190                         mmc_hostname(host), err);
1191                 return err;
1192         }
1193
1194         /* Switch card to HS400 */
1195         val = EXT_CSD_TIMING_HS400 |
1196               card->drive_strength << EXT_CSD_DRV_STR_SHIFT;
1197         err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
1198                            EXT_CSD_HS_TIMING, val,
1199                            card->ext_csd.generic_cmd6_time, 0,
1200                            false, true);
1201         if (err) {
1202                 pr_err("%s: switch to hs400 failed, err:%d\n",
1203                          mmc_hostname(host), err);
1204                 return err;
1205         }
1206
1207         /* Set host controller to HS400 timing and frequency */
1208         mmc_set_timing(host, MMC_TIMING_MMC_HS400);
1209         mmc_set_bus_speed(card);
1210
1211         if (host->ops->hs400_complete)
1212                 host->ops->hs400_complete(host);
1213
1214         err = mmc_switch_status(card, true);
1215         if (err)
1216                 goto out_err;
1217
1218         return 0;
1219
1220 out_err:
1221         pr_err("%s: %s failed, error %d\n", mmc_hostname(card->host),
1222                __func__, err);
1223         return err;
1224 }
1225
1226 int mmc_hs200_to_hs400(struct mmc_card *card)
1227 {
1228         return mmc_select_hs400(card);
1229 }
1230
1231 int mmc_hs400_to_hs200(struct mmc_card *card)
1232 {
1233         struct mmc_host *host = card->host;
1234         unsigned int max_dtr;
1235         int err;
1236         u8 val;
1237
1238         /* Reduce frequency to HS */
1239         max_dtr = card->ext_csd.hs_max_dtr;
1240         mmc_set_clock(host, max_dtr);
1241
1242         /* Switch HS400 to HS DDR */
1243         val = EXT_CSD_TIMING_HS;
1244         err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_HS_TIMING,
1245                            val, card->ext_csd.generic_cmd6_time, 0,
1246                            false, true);
1247         if (err)
1248                 goto out_err;
1249
1250         mmc_set_timing(host, MMC_TIMING_MMC_DDR52);
1251
1252         err = mmc_switch_status(card, true);
1253         if (err)
1254                 goto out_err;
1255
1256         /* Switch HS DDR to HS */
1257         err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BUS_WIDTH,
1258                            EXT_CSD_BUS_WIDTH_8, card->ext_csd.generic_cmd6_time,
1259                            0, false, true);
1260         if (err)
1261                 goto out_err;
1262
1263         mmc_set_timing(host, MMC_TIMING_MMC_HS);
1264
1265         if (host->ops->hs400_downgrade)
1266                 host->ops->hs400_downgrade(host);
1267
1268         err = mmc_switch_status(card, true);
1269         if (err)
1270                 goto out_err;
1271
1272         /* Switch HS to HS200 */
1273         val = EXT_CSD_TIMING_HS200 |
1274               card->drive_strength << EXT_CSD_DRV_STR_SHIFT;
1275         err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_HS_TIMING,
1276                            val, card->ext_csd.generic_cmd6_time, 0,
1277                            false, true);
1278         if (err)
1279                 goto out_err;
1280
1281         mmc_set_timing(host, MMC_TIMING_MMC_HS200);
1282
1283         /*
1284          * For HS200, CRC errors are not a reliable way to know the switch
1285          * failed. If there really is a problem, we would expect tuning will
1286          * fail and the result ends up the same.
1287          */
1288         err = mmc_switch_status(card, false);
1289         if (err)
1290                 goto out_err;
1291
1292         mmc_set_bus_speed(card);
1293
1294         /* Prepare tuning for HS400 mode. */
1295         if (host->ops->prepare_hs400_tuning)
1296                 host->ops->prepare_hs400_tuning(host, &host->ios);
1297
1298         return 0;
1299
1300 out_err:
1301         pr_err("%s: %s failed, error %d\n", mmc_hostname(card->host),
1302                __func__, err);
1303         return err;
1304 }
1305
1306 static void mmc_select_driver_type(struct mmc_card *card)
1307 {
1308         int card_drv_type, drive_strength, drv_type = 0;
1309         int fixed_drv_type = card->host->fixed_drv_type;
1310
1311         card_drv_type = card->ext_csd.raw_driver_strength |
1312                         mmc_driver_type_mask(0);
1313
1314         if (fixed_drv_type >= 0)
1315                 drive_strength = card_drv_type & mmc_driver_type_mask(fixed_drv_type)
1316                                  ? fixed_drv_type : 0;
1317         else
1318                 drive_strength = mmc_select_drive_strength(card,
1319                                                            card->ext_csd.hs200_max_dtr,
1320                                                            card_drv_type, &drv_type);
1321
1322         card->drive_strength = drive_strength;
1323
1324         if (drv_type)
1325                 mmc_set_driver_type(card->host, drv_type);
1326 }
1327
1328 static int mmc_select_hs400es(struct mmc_card *card)
1329 {
1330         struct mmc_host *host = card->host;
1331         int err = -EINVAL;
1332         u8 val;
1333
1334         if (!(host->caps & MMC_CAP_8_BIT_DATA)) {
1335                 err = -ENOTSUPP;
1336                 goto out_err;
1337         }
1338
1339         if (card->mmc_avail_type & EXT_CSD_CARD_TYPE_HS400_1_2V)
1340                 err = mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_120);
1341
1342         if (err && card->mmc_avail_type & EXT_CSD_CARD_TYPE_HS400_1_8V)
1343                 err = mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_180);
1344
1345         /* If fails try again during next card power cycle */
1346         if (err)
1347                 goto out_err;
1348
1349         err = mmc_select_bus_width(card);
1350         if (err != MMC_BUS_WIDTH_8) {
1351                 pr_err("%s: switch to 8bit bus width failed, err:%d\n",
1352                         mmc_hostname(host), err);
1353                 err = err < 0 ? err : -ENOTSUPP;
1354                 goto out_err;
1355         }
1356
1357         /* Switch card to HS mode */
1358         err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
1359                            EXT_CSD_HS_TIMING, EXT_CSD_TIMING_HS,
1360                            card->ext_csd.generic_cmd6_time, 0,
1361                            false, true);
1362         if (err) {
1363                 pr_err("%s: switch to hs for hs400es failed, err:%d\n",
1364                         mmc_hostname(host), err);
1365                 goto out_err;
1366         }
1367
1368         mmc_set_timing(host, MMC_TIMING_MMC_HS);
1369         err = mmc_switch_status(card, true);
1370         if (err)
1371                 goto out_err;
1372
1373         mmc_set_clock(host, card->ext_csd.hs_max_dtr);
1374
1375         /* Switch card to DDR with strobe bit */
1376         val = EXT_CSD_DDR_BUS_WIDTH_8 | EXT_CSD_BUS_WIDTH_STROBE;
1377         err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
1378                          EXT_CSD_BUS_WIDTH,
1379                          val,
1380                          card->ext_csd.generic_cmd6_time);
1381         if (err) {
1382                 pr_err("%s: switch to bus width for hs400es failed, err:%d\n",
1383                         mmc_hostname(host), err);
1384                 goto out_err;
1385         }
1386
1387         mmc_select_driver_type(card);
1388
1389         /* Switch card to HS400 */
1390         val = EXT_CSD_TIMING_HS400 |
1391               card->drive_strength << EXT_CSD_DRV_STR_SHIFT;
1392         err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
1393                            EXT_CSD_HS_TIMING, val,
1394                            card->ext_csd.generic_cmd6_time, 0,
1395                            false, true);
1396         if (err) {
1397                 pr_err("%s: switch to hs400es failed, err:%d\n",
1398                         mmc_hostname(host), err);
1399                 goto out_err;
1400         }
1401
1402         /* Set host controller to HS400 timing and frequency */
1403         mmc_set_timing(host, MMC_TIMING_MMC_HS400);
1404
1405         /* Controller enable enhanced strobe function */
1406         host->ios.enhanced_strobe = true;
1407         if (host->ops->hs400_enhanced_strobe)
1408                 host->ops->hs400_enhanced_strobe(host, &host->ios);
1409
1410         err = mmc_switch_status(card, true);
1411         if (err)
1412                 goto out_err;
1413
1414         return 0;
1415
1416 out_err:
1417         pr_err("%s: %s failed, error %d\n", mmc_hostname(card->host),
1418                __func__, err);
1419         return err;
1420 }
1421
1422 /*
1423  * For device supporting HS200 mode, the following sequence
1424  * should be done before executing the tuning process.
1425  * 1. set the desired bus width(4-bit or 8-bit, 1-bit is not supported)
1426  * 2. switch to HS200 mode
1427  * 3. set the clock to > 52Mhz and <=200MHz
1428  */
1429 static int mmc_select_hs200(struct mmc_card *card)
1430 {
1431         struct mmc_host *host = card->host;
1432         unsigned int old_timing, old_signal_voltage;
1433         int err = -EINVAL;
1434         u8 val;
1435
1436         old_signal_voltage = host->ios.signal_voltage;
1437         if (card->mmc_avail_type & EXT_CSD_CARD_TYPE_HS200_1_2V)
1438                 err = mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_120);
1439
1440         if (err && card->mmc_avail_type & EXT_CSD_CARD_TYPE_HS200_1_8V)
1441                 err = mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_180);
1442
1443         /* If fails try again during next card power cycle */
1444         if (err)
1445                 return err;
1446
1447         mmc_select_driver_type(card);
1448
1449         /*
1450          * Set the bus width(4 or 8) with host's support and
1451          * switch to HS200 mode if bus width is set successfully.
1452          */
1453         err = mmc_select_bus_width(card);
1454         if (err > 0) {
1455                 val = EXT_CSD_TIMING_HS200 |
1456                       card->drive_strength << EXT_CSD_DRV_STR_SHIFT;
1457                 err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
1458                                    EXT_CSD_HS_TIMING, val,
1459                                    card->ext_csd.generic_cmd6_time, 0,
1460                                    false, true);
1461                 if (err)
1462                         goto err;
1463                 old_timing = host->ios.timing;
1464                 mmc_set_timing(host, MMC_TIMING_MMC_HS200);
1465
1466                 /*
1467                  * For HS200, CRC errors are not a reliable way to know the
1468                  * switch failed. If there really is a problem, we would expect
1469                  * tuning will fail and the result ends up the same.
1470                  */
1471                 err = mmc_switch_status(card, false);
1472
1473                 /*
1474                  * mmc_select_timing() assumes timing has not changed if
1475                  * it is a switch error.
1476                  */
1477                 if (err == -EBADMSG)
1478                         mmc_set_timing(host, old_timing);
1479         }
1480 err:
1481         if (err) {
1482                 /* fall back to the old signal voltage, if fails report error */
1483                 if (mmc_set_signal_voltage(host, old_signal_voltage))
1484                         err = -EIO;
1485
1486                 pr_err("%s: %s failed, error %d\n", mmc_hostname(card->host),
1487                        __func__, err);
1488         }
1489         return err;
1490 }
1491
1492 /*
1493  * Activate High Speed, HS200 or HS400ES mode if supported.
1494  */
1495 static int mmc_select_timing(struct mmc_card *card)
1496 {
1497         int err = 0;
1498
1499         if (!mmc_can_ext_csd(card))
1500                 goto bus_speed;
1501
1502         if (card->mmc_avail_type & EXT_CSD_CARD_TYPE_HS400ES)
1503                 err = mmc_select_hs400es(card);
1504         else if (card->mmc_avail_type & EXT_CSD_CARD_TYPE_HS200)
1505                 err = mmc_select_hs200(card);
1506         else if (card->mmc_avail_type & EXT_CSD_CARD_TYPE_HS)
1507                 err = mmc_select_hs(card);
1508
1509         if (err && err != -EBADMSG)
1510                 return err;
1511
1512 bus_speed:
1513         /*
1514          * Set the bus speed to the selected bus timing.
1515          * If timing is not selected, backward compatible is the default.
1516          */
1517         mmc_set_bus_speed(card);
1518         return 0;
1519 }
1520
1521 /*
1522  * Execute tuning sequence to seek the proper bus operating
1523  * conditions for HS200 and HS400, which sends CMD21 to the device.
1524  */
1525 static int mmc_hs200_tuning(struct mmc_card *card)
1526 {
1527         struct mmc_host *host = card->host;
1528
1529         /*
1530          * Timing should be adjusted to the HS400 target
1531          * operation frequency for tuning process
1532          */
1533         if (card->mmc_avail_type & EXT_CSD_CARD_TYPE_HS400 &&
1534             host->ios.bus_width == MMC_BUS_WIDTH_8)
1535                 if (host->ops->prepare_hs400_tuning)
1536                         host->ops->prepare_hs400_tuning(host, &host->ios);
1537
1538         return mmc_execute_tuning(card);
1539 }
1540
1541 /*
1542  * Handle the detection and initialisation of a card.
1543  *
1544  * In the case of a resume, "oldcard" will contain the card
1545  * we're trying to reinitialise.
1546  */
1547 static int mmc_init_card(struct mmc_host *host, u32 ocr,
1548         struct mmc_card *oldcard)
1549 {
1550         struct mmc_card *card;
1551         int err;
1552         u32 cid[4];
1553         u32 rocr;
1554
1555         WARN_ON(!host->claimed);
1556
1557         /* Set correct bus mode for MMC before attempting init */
1558         if (!mmc_host_is_spi(host))
1559                 mmc_set_bus_mode(host, MMC_BUSMODE_OPENDRAIN);
1560
1561         /*
1562          * Since we're changing the OCR value, we seem to
1563          * need to tell some cards to go back to the idle
1564          * state.  We wait 1ms to give cards time to
1565          * respond.
1566          * mmc_go_idle is needed for eMMC that are asleep
1567          */
1568         mmc_go_idle(host);
1569
1570         /* The extra bit indicates that we support high capacity */
1571         err = mmc_send_op_cond(host, ocr | (1 << 30), &rocr);
1572         if (err)
1573                 goto err;
1574
1575         /*
1576          * For SPI, enable CRC as appropriate.
1577          */
1578         if (mmc_host_is_spi(host)) {
1579                 err = mmc_spi_set_crc(host, use_spi_crc);
1580                 if (err)
1581                         goto err;
1582         }
1583
1584         /*
1585          * Fetch CID from card.
1586          */
1587         err = mmc_send_cid(host, cid);
1588         if (err)
1589                 goto err;
1590
1591         if (oldcard) {
1592                 if (memcmp(cid, oldcard->raw_cid, sizeof(cid)) != 0) {
1593                         pr_debug("%s: Perhaps the card was replaced\n",
1594                                 mmc_hostname(host));
1595                         err = -ENOENT;
1596                         goto err;
1597                 }
1598
1599                 card = oldcard;
1600         } else {
1601                 /*
1602                  * Allocate card structure.
1603                  */
1604                 card = mmc_alloc_card(host, &mmc_type);
1605                 if (IS_ERR(card)) {
1606                         err = PTR_ERR(card);
1607                         goto err;
1608                 }
1609
1610                 card->ocr = ocr;
1611                 card->type = MMC_TYPE_MMC;
1612                 card->rca = 1;
1613                 memcpy(card->raw_cid, cid, sizeof(card->raw_cid));
1614         }
1615
1616         /*
1617          * Call the optional HC's init_card function to handle quirks.
1618          */
1619         if (host->ops->init_card)
1620                 host->ops->init_card(host, card);
1621
1622         /*
1623          * For native busses:  set card RCA and quit open drain mode.
1624          */
1625         if (!mmc_host_is_spi(host)) {
1626                 err = mmc_set_relative_addr(card);
1627                 if (err)
1628                         goto free_card;
1629
1630                 mmc_set_bus_mode(host, MMC_BUSMODE_PUSHPULL);
1631         }
1632
1633         if (!oldcard) {
1634                 /*
1635                  * Fetch CSD from card.
1636                  */
1637                 err = mmc_send_csd(card, card->raw_csd);
1638                 if (err)
1639                         goto free_card;
1640
1641                 err = mmc_decode_csd(card);
1642                 if (err)
1643                         goto free_card;
1644                 err = mmc_decode_cid(card);
1645                 if (err)
1646                         goto free_card;
1647         }
1648
1649         /*
1650          * handling only for cards supporting DSR and hosts requesting
1651          * DSR configuration
1652          */
1653         if (card->csd.dsr_imp && host->dsr_req)
1654                 mmc_set_dsr(host);
1655
1656         /*
1657          * Select card, as all following commands rely on that.
1658          */
1659         if (!mmc_host_is_spi(host)) {
1660                 err = mmc_select_card(card);
1661                 if (err)
1662                         goto free_card;
1663         }
1664
1665         if (!oldcard) {
1666                 /* Read extended CSD. */
1667                 err = mmc_read_ext_csd(card);
1668                 if (err)
1669                         goto free_card;
1670
1671                 /*
1672                  * If doing byte addressing, check if required to do sector
1673                  * addressing.  Handle the case of <2GB cards needing sector
1674                  * addressing.  See section 8.1 JEDEC Standard JED84-A441;
1675                  * ocr register has bit 30 set for sector addressing.
1676                  */
1677                 if (rocr & BIT(30))
1678                         mmc_card_set_blockaddr(card);
1679
1680                 /* Erase size depends on CSD and Extended CSD */
1681                 mmc_set_erase_size(card);
1682         }
1683
1684         /* Enable ERASE_GRP_DEF. This bit is lost after a reset or power off. */
1685         if (card->ext_csd.rev >= 3) {
1686                 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
1687                                  EXT_CSD_ERASE_GROUP_DEF, 1,
1688                                  card->ext_csd.generic_cmd6_time);
1689
1690                 if (err && err != -EBADMSG)
1691                         goto free_card;
1692
1693                 if (err) {
1694                         err = 0;
1695                         /*
1696                          * Just disable enhanced area off & sz
1697                          * will try to enable ERASE_GROUP_DEF
1698                          * during next time reinit
1699                          */
1700                         card->ext_csd.enhanced_area_offset = -EINVAL;
1701                         card->ext_csd.enhanced_area_size = -EINVAL;
1702                 } else {
1703                         card->ext_csd.erase_group_def = 1;
1704                         /*
1705                          * enable ERASE_GRP_DEF successfully.
1706                          * This will affect the erase size, so
1707                          * here need to reset erase size
1708                          */
1709                         mmc_set_erase_size(card);
1710                 }
1711         }
1712
1713         /*
1714          * Ensure eMMC user default partition is enabled
1715          */
1716         if (card->ext_csd.part_config & EXT_CSD_PART_CONFIG_ACC_MASK) {
1717                 card->ext_csd.part_config &= ~EXT_CSD_PART_CONFIG_ACC_MASK;
1718                 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_PART_CONFIG,
1719                                  card->ext_csd.part_config,
1720                                  card->ext_csd.part_time);
1721                 if (err && err != -EBADMSG)
1722                         goto free_card;
1723         }
1724
1725         /*
1726          * Enable power_off_notification byte in the ext_csd register
1727          */
1728         if (card->ext_csd.rev >= 6) {
1729                 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
1730                                  EXT_CSD_POWER_OFF_NOTIFICATION,
1731                                  EXT_CSD_POWER_ON,
1732                                  card->ext_csd.generic_cmd6_time);
1733                 if (err && err != -EBADMSG)
1734                         goto free_card;
1735
1736                 /*
1737                  * The err can be -EBADMSG or 0,
1738                  * so check for success and update the flag
1739                  */
1740                 if (!err)
1741                         card->ext_csd.power_off_notification = EXT_CSD_POWER_ON;
1742         }
1743
1744         /* set erase_arg */
1745         if (mmc_can_discard(card))
1746                 card->erase_arg = MMC_DISCARD_ARG;
1747         else if (mmc_can_trim(card))
1748                 card->erase_arg = MMC_TRIM_ARG;
1749         else
1750                 card->erase_arg = MMC_ERASE_ARG;
1751
1752         /*
1753          * Select timing interface
1754          */
1755         err = mmc_select_timing(card);
1756         if (err)
1757                 goto free_card;
1758
1759         if (mmc_card_hs200(card)) {
1760                 err = mmc_hs200_tuning(card);
1761                 if (err)
1762                         goto free_card;
1763
1764                 err = mmc_select_hs400(card);
1765                 if (err)
1766                         goto free_card;
1767         } else if (!mmc_card_hs400es(card)) {
1768                 /* Select the desired bus width optionally */
1769                 err = mmc_select_bus_width(card);
1770                 if (err > 0 && mmc_card_hs(card)) {
1771                         err = mmc_select_hs_ddr(card);
1772                         if (err)
1773                                 goto free_card;
1774                 }
1775         }
1776
1777         /*
1778          * Choose the power class with selected bus interface
1779          */
1780         mmc_select_powerclass(card);
1781
1782         /*
1783          * Enable HPI feature (if supported)
1784          */
1785         if (card->ext_csd.hpi) {
1786                 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
1787                                 EXT_CSD_HPI_MGMT, 1,
1788                                 card->ext_csd.generic_cmd6_time);
1789                 if (err && err != -EBADMSG)
1790                         goto free_card;
1791                 if (err) {
1792                         pr_warn("%s: Enabling HPI failed\n",
1793                                 mmc_hostname(card->host));
1794                         card->ext_csd.hpi_en = 0;
1795                         err = 0;
1796                 } else {
1797                         card->ext_csd.hpi_en = 1;
1798                 }
1799         }
1800
1801         /*
1802          * If cache size is higher than 0, this indicates the existence of cache
1803          * and it can be turned on. Note that some eMMCs from Micron has been
1804          * reported to need ~800 ms timeout, while enabling the cache after
1805          * sudden power failure tests. Let's extend the timeout to a minimum of
1806          * DEFAULT_CACHE_EN_TIMEOUT_MS and do it for all cards.
1807          */
1808         if (card->ext_csd.cache_size > 0) {
1809                 unsigned int timeout_ms = MIN_CACHE_EN_TIMEOUT_MS;
1810
1811                 timeout_ms = max(card->ext_csd.generic_cmd6_time, timeout_ms);
1812                 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
1813                                 EXT_CSD_CACHE_CTRL, 1, timeout_ms);
1814                 if (err && err != -EBADMSG)
1815                         goto free_card;
1816
1817                 /*
1818                  * Only if no error, cache is turned on successfully.
1819                  */
1820                 if (err) {
1821                         pr_warn("%s: Cache is supported, but failed to turn on (%d)\n",
1822                                 mmc_hostname(card->host), err);
1823                         card->ext_csd.cache_ctrl = 0;
1824                         err = 0;
1825                 } else {
1826                         card->ext_csd.cache_ctrl = 1;
1827                 }
1828         }
1829
1830         /*
1831          * Enable Command Queue if supported. Note that Packed Commands cannot
1832          * be used with Command Queue.
1833          */
1834         card->ext_csd.cmdq_en = false;
1835         if (card->ext_csd.cmdq_support && host->caps2 & MMC_CAP2_CQE) {
1836                 err = mmc_cmdq_enable(card);
1837                 if (err && err != -EBADMSG)
1838                         goto free_card;
1839                 if (err) {
1840                         pr_warn("%s: Enabling CMDQ failed\n",
1841                                 mmc_hostname(card->host));
1842                         card->ext_csd.cmdq_support = false;
1843                         card->ext_csd.cmdq_depth = 0;
1844                         err = 0;
1845                 }
1846         }
1847         /*
1848          * In some cases (e.g. RPMB or mmc_test), the Command Queue must be
1849          * disabled for a time, so a flag is needed to indicate to re-enable the
1850          * Command Queue.
1851          */
1852         card->reenable_cmdq = card->ext_csd.cmdq_en;
1853
1854         if (host->cqe_ops && !host->cqe_enabled) {
1855                 err = host->cqe_ops->cqe_enable(host, card);
1856                 if (!err) {
1857                         host->cqe_enabled = true;
1858
1859                         if (card->ext_csd.cmdq_en) {
1860                                 pr_info("%s: Command Queue Engine enabled\n",
1861                                         mmc_hostname(host));
1862                         } else {
1863                                 host->hsq_enabled = true;
1864                                 pr_info("%s: Host Software Queue enabled\n",
1865                                         mmc_hostname(host));
1866                         }
1867                 }
1868         }
1869
1870         if (host->caps2 & MMC_CAP2_AVOID_3_3V &&
1871             host->ios.signal_voltage == MMC_SIGNAL_VOLTAGE_330) {
1872                 pr_err("%s: Host failed to negotiate down from 3.3V\n",
1873                         mmc_hostname(host));
1874                 err = -EINVAL;
1875                 goto free_card;
1876         }
1877
1878         if (!oldcard)
1879                 host->card = card;
1880
1881         return 0;
1882
1883 free_card:
1884         if (!oldcard)
1885                 mmc_remove_card(card);
1886 err:
1887         return err;
1888 }
1889
1890 static int mmc_can_sleep(struct mmc_card *card)
1891 {
1892         return (card && card->ext_csd.rev >= 3);
1893 }
1894
1895 static int mmc_sleep(struct mmc_host *host)
1896 {
1897         struct mmc_command cmd = {};
1898         struct mmc_card *card = host->card;
1899         unsigned int timeout_ms = DIV_ROUND_UP(card->ext_csd.sa_timeout, 10000);
1900         int err;
1901
1902         /* Re-tuning can't be done once the card is deselected */
1903         mmc_retune_hold(host);
1904
1905         err = mmc_deselect_cards(host);
1906         if (err)
1907                 goto out_release;
1908
1909         cmd.opcode = MMC_SLEEP_AWAKE;
1910         cmd.arg = card->rca << 16;
1911         cmd.arg |= 1 << 15;
1912
1913         /*
1914          * If the max_busy_timeout of the host is specified, validate it against
1915          * the sleep cmd timeout. A failure means we need to prevent the host
1916          * from doing hw busy detection, which is done by converting to a R1
1917          * response instead of a R1B. Note, some hosts requires R1B, which also
1918          * means they are on their own when it comes to deal with the busy
1919          * timeout.
1920          */
1921         if (!(host->caps & MMC_CAP_NEED_RSP_BUSY) && host->max_busy_timeout &&
1922             (timeout_ms > host->max_busy_timeout)) {
1923                 cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
1924         } else {
1925                 cmd.flags = MMC_RSP_R1B | MMC_CMD_AC;
1926                 cmd.busy_timeout = timeout_ms;
1927         }
1928
1929         err = mmc_wait_for_cmd(host, &cmd, 0);
1930         if (err)
1931                 goto out_release;
1932
1933         /*
1934          * If the host does not wait while the card signals busy, then we will
1935          * will have to wait the sleep/awake timeout.  Note, we cannot use the
1936          * SEND_STATUS command to poll the status because that command (and most
1937          * others) is invalid while the card sleeps.
1938          */
1939         if (!cmd.busy_timeout || !(host->caps & MMC_CAP_WAIT_WHILE_BUSY))
1940                 mmc_delay(timeout_ms);
1941
1942 out_release:
1943         mmc_retune_release(host);
1944         return err;
1945 }
1946
1947 static int mmc_can_poweroff_notify(const struct mmc_card *card)
1948 {
1949         return card &&
1950                 mmc_card_mmc(card) &&
1951                 (card->ext_csd.power_off_notification == EXT_CSD_POWER_ON);
1952 }
1953
1954 static int mmc_poweroff_notify(struct mmc_card *card, unsigned int notify_type)
1955 {
1956         unsigned int timeout = card->ext_csd.generic_cmd6_time;
1957         int err;
1958
1959         /* Use EXT_CSD_POWER_OFF_SHORT as default notification type. */
1960         if (notify_type == EXT_CSD_POWER_OFF_LONG)
1961                 timeout = card->ext_csd.power_off_longtime;
1962
1963         err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
1964                         EXT_CSD_POWER_OFF_NOTIFICATION,
1965                         notify_type, timeout, 0, false, false);
1966         if (err)
1967                 pr_err("%s: Power Off Notification timed out, %u\n",
1968                        mmc_hostname(card->host), timeout);
1969
1970         /* Disable the power off notification after the switch operation. */
1971         card->ext_csd.power_off_notification = EXT_CSD_NO_POWER_NOTIFICATION;
1972
1973         return err;
1974 }
1975
1976 /*
1977  * Host is being removed. Free up the current card.
1978  */
1979 static void mmc_remove(struct mmc_host *host)
1980 {
1981         mmc_remove_card(host->card);
1982         host->card = NULL;
1983 }
1984
1985 /*
1986  * Card detection - card is alive.
1987  */
1988 static int mmc_alive(struct mmc_host *host)
1989 {
1990         return mmc_send_status(host->card, NULL);
1991 }
1992
1993 /*
1994  * Card detection callback from host.
1995  */
1996 static void mmc_detect(struct mmc_host *host)
1997 {
1998         int err;
1999
2000         mmc_get_card(host->card, NULL);
2001
2002         /*
2003          * Just check if our card has been removed.
2004          */
2005         err = _mmc_detect_card_removed(host);
2006
2007         mmc_put_card(host->card, NULL);
2008
2009         if (err) {
2010                 mmc_remove(host);
2011
2012                 mmc_claim_host(host);
2013                 mmc_detach_bus(host);
2014                 mmc_power_off(host);
2015                 mmc_release_host(host);
2016         }
2017 }
2018
2019 static int _mmc_suspend(struct mmc_host *host, bool is_suspend)
2020 {
2021         int err = 0;
2022         unsigned int notify_type = is_suspend ? EXT_CSD_POWER_OFF_SHORT :
2023                                         EXT_CSD_POWER_OFF_LONG;
2024
2025         mmc_claim_host(host);
2026
2027         if (mmc_card_suspended(host->card))
2028                 goto out;
2029
2030         err = mmc_flush_cache(host->card);
2031         if (err)
2032                 goto out;
2033
2034         if (mmc_can_poweroff_notify(host->card) &&
2035                 ((host->caps2 & MMC_CAP2_FULL_PWR_CYCLE) || !is_suspend))
2036                 err = mmc_poweroff_notify(host->card, notify_type);
2037         else if (mmc_can_sleep(host->card))
2038                 err = mmc_sleep(host);
2039         else if (!mmc_host_is_spi(host))
2040                 err = mmc_deselect_cards(host);
2041
2042         if (!err) {
2043                 mmc_power_off(host);
2044                 mmc_card_set_suspended(host->card);
2045         }
2046 out:
2047         mmc_release_host(host);
2048         return err;
2049 }
2050
2051 /*
2052  * Suspend callback
2053  */
2054 static int mmc_suspend(struct mmc_host *host)
2055 {
2056         int err;
2057
2058         err = _mmc_suspend(host, true);
2059         if (!err) {
2060                 pm_runtime_disable(&host->card->dev);
2061                 pm_runtime_set_suspended(&host->card->dev);
2062         }
2063
2064         return err;
2065 }
2066
2067 /*
2068  * This function tries to determine if the same card is still present
2069  * and, if so, restore all state to it.
2070  */
2071 static int _mmc_resume(struct mmc_host *host)
2072 {
2073         int err = 0;
2074
2075         mmc_claim_host(host);
2076
2077         if (!mmc_card_suspended(host->card))
2078                 goto out;
2079
2080         mmc_power_up(host, host->card->ocr);
2081         err = mmc_init_card(host, host->card->ocr, host->card);
2082         mmc_card_clr_suspended(host->card);
2083
2084 out:
2085         mmc_release_host(host);
2086         return err;
2087 }
2088
2089 /*
2090  * Shutdown callback
2091  */
2092 static int mmc_shutdown(struct mmc_host *host)
2093 {
2094         int err = 0;
2095
2096         /*
2097          * In a specific case for poweroff notify, we need to resume the card
2098          * before we can shutdown it properly.
2099          */
2100         if (mmc_can_poweroff_notify(host->card) &&
2101                 !(host->caps2 & MMC_CAP2_FULL_PWR_CYCLE))
2102                 err = _mmc_resume(host);
2103
2104         if (!err)
2105                 err = _mmc_suspend(host, false);
2106
2107         return err;
2108 }
2109
2110 /*
2111  * Callback for resume.
2112  */
2113 static int mmc_resume(struct mmc_host *host)
2114 {
2115         pm_runtime_enable(&host->card->dev);
2116         return 0;
2117 }
2118
2119 /*
2120  * Callback for runtime_suspend.
2121  */
2122 static int mmc_runtime_suspend(struct mmc_host *host)
2123 {
2124         int err;
2125
2126         if (!(host->caps & MMC_CAP_AGGRESSIVE_PM))
2127                 return 0;
2128
2129         err = _mmc_suspend(host, true);
2130         if (err)
2131                 pr_err("%s: error %d doing aggressive suspend\n",
2132                         mmc_hostname(host), err);
2133
2134         return err;
2135 }
2136
2137 /*
2138  * Callback for runtime_resume.
2139  */
2140 static int mmc_runtime_resume(struct mmc_host *host)
2141 {
2142         int err;
2143
2144         err = _mmc_resume(host);
2145         if (err && err != -ENOMEDIUM)
2146                 pr_err("%s: error %d doing runtime resume\n",
2147                         mmc_hostname(host), err);
2148
2149         return 0;
2150 }
2151
2152 static int mmc_can_reset(struct mmc_card *card)
2153 {
2154         u8 rst_n_function;
2155
2156         rst_n_function = card->ext_csd.rst_n_function;
2157         if ((rst_n_function & EXT_CSD_RST_N_EN_MASK) != EXT_CSD_RST_N_ENABLED)
2158                 return 0;
2159         return 1;
2160 }
2161
2162 static int _mmc_hw_reset(struct mmc_host *host)
2163 {
2164         struct mmc_card *card = host->card;
2165
2166         /*
2167          * In the case of recovery, we can't expect flushing the cache to work
2168          * always, but we have a go and ignore errors.
2169          */
2170         mmc_flush_cache(host->card);
2171
2172         if ((host->caps & MMC_CAP_HW_RESET) && host->ops->hw_reset &&
2173              mmc_can_reset(card)) {
2174                 /* If the card accept RST_n signal, send it. */
2175                 mmc_set_clock(host, host->f_init);
2176                 host->ops->hw_reset(host);
2177                 /* Set initial state and call mmc_set_ios */
2178                 mmc_set_initial_state(host);
2179         } else {
2180                 /* Do a brute force power cycle */
2181                 mmc_power_cycle(host, card->ocr);
2182                 mmc_pwrseq_reset(host);
2183         }
2184         return mmc_init_card(host, card->ocr, card);
2185 }
2186
2187 static const struct mmc_bus_ops mmc_ops = {
2188         .remove = mmc_remove,
2189         .detect = mmc_detect,
2190         .suspend = mmc_suspend,
2191         .resume = mmc_resume,
2192         .runtime_suspend = mmc_runtime_suspend,
2193         .runtime_resume = mmc_runtime_resume,
2194         .alive = mmc_alive,
2195         .shutdown = mmc_shutdown,
2196         .hw_reset = _mmc_hw_reset,
2197 };
2198
2199 /*
2200  * Starting point for MMC card init.
2201  */
2202 int mmc_attach_mmc(struct mmc_host *host)
2203 {
2204         int err;
2205         u32 ocr, rocr;
2206
2207         WARN_ON(!host->claimed);
2208
2209         /* Set correct bus mode for MMC before attempting attach */
2210         if (!mmc_host_is_spi(host))
2211                 mmc_set_bus_mode(host, MMC_BUSMODE_OPENDRAIN);
2212
2213         err = mmc_send_op_cond(host, 0, &ocr);
2214         if (err)
2215                 return err;
2216
2217         mmc_attach_bus(host, &mmc_ops);
2218         if (host->ocr_avail_mmc)
2219                 host->ocr_avail = host->ocr_avail_mmc;
2220
2221         /*
2222          * We need to get OCR a different way for SPI.
2223          */
2224         if (mmc_host_is_spi(host)) {
2225                 err = mmc_spi_read_ocr(host, 1, &ocr);
2226                 if (err)
2227                         goto err;
2228         }
2229
2230         rocr = mmc_select_voltage(host, ocr);
2231
2232         /*
2233          * Can we support the voltage of the card?
2234          */
2235         if (!rocr) {
2236                 err = -EINVAL;
2237                 goto err;
2238         }
2239
2240         /*
2241          * Detect and init the card.
2242          */
2243         err = mmc_init_card(host, rocr, NULL);
2244         if (err)
2245                 goto err;
2246
2247         mmc_release_host(host);
2248         err = mmc_add_card(host->card);
2249         if (err)
2250                 goto remove_card;
2251
2252         mmc_claim_host(host);
2253         return 0;
2254
2255 remove_card:
2256         mmc_remove_card(host->card);
2257         mmc_claim_host(host);
2258         host->card = NULL;
2259 err:
2260         mmc_detach_bus(host);
2261
2262         pr_err("%s: error %d whilst initialising MMC card\n",
2263                 mmc_hostname(host), err);
2264
2265         return err;
2266 }