1 // SPDX-License-Identifier: GPL-2.0+
3 * Copyright 2008, Freescale Semiconductor, Inc
6 * Based vaguely on the Linux code
13 #include <dm/device-internal.h>
17 #include <power/regulator.h>
20 #include <linux/list.h>
22 #include "mmc_private.h"
24 #define DEFAULT_CMD6_TIMEOUT_MS 500
26 static int mmc_set_signal_voltage(struct mmc *mmc, uint signal_voltage);
27 static int mmc_power_cycle(struct mmc *mmc);
28 #if !CONFIG_IS_ENABLED(MMC_TINY)
29 static int mmc_select_mode_and_width(struct mmc *mmc, uint card_caps);
32 #if !CONFIG_IS_ENABLED(DM_MMC)
34 static int mmc_wait_dat0(struct mmc *mmc, int state, int timeout)
39 __weak int board_mmc_getwp(struct mmc *mmc)
44 int mmc_getwp(struct mmc *mmc)
48 wp = board_mmc_getwp(mmc);
51 if (mmc->cfg->ops->getwp)
52 wp = mmc->cfg->ops->getwp(mmc);
60 __weak int board_mmc_getcd(struct mmc *mmc)
66 #ifdef CONFIG_MMC_TRACE
67 void mmmc_trace_before_send(struct mmc *mmc, struct mmc_cmd *cmd)
69 printf("CMD_SEND:%d\n", cmd->cmdidx);
70 printf("\t\tARG\t\t\t 0x%08x\n", cmd->cmdarg);
73 void mmmc_trace_after_send(struct mmc *mmc, struct mmc_cmd *cmd, int ret)
79 printf("\t\tRET\t\t\t %d\n", ret);
81 switch (cmd->resp_type) {
83 printf("\t\tMMC_RSP_NONE\n");
86 printf("\t\tMMC_RSP_R1,5,6,7 \t 0x%08x \n",
90 printf("\t\tMMC_RSP_R1b\t\t 0x%08x \n",
94 printf("\t\tMMC_RSP_R2\t\t 0x%08x \n",
96 printf("\t\t \t\t 0x%08x \n",
98 printf("\t\t \t\t 0x%08x \n",
100 printf("\t\t \t\t 0x%08x \n",
103 printf("\t\t\t\t\tDUMPING DATA\n");
104 for (i = 0; i < 4; i++) {
106 printf("\t\t\t\t\t%03d - ", i*4);
107 ptr = (u8 *)&cmd->response[i];
109 for (j = 0; j < 4; j++)
110 printf("%02x ", *ptr--);
115 printf("\t\tMMC_RSP_R3,4\t\t 0x%08x \n",
119 printf("\t\tERROR MMC rsp not supported\n");
125 void mmc_trace_state(struct mmc *mmc, struct mmc_cmd *cmd)
129 status = (cmd->response[0] & MMC_STATUS_CURR_STATE) >> 9;
130 printf("CURR STATE:%d\n", status);
134 #if CONFIG_IS_ENABLED(MMC_VERBOSE) || defined(DEBUG)
135 const char *mmc_mode_name(enum bus_mode mode)
137 static const char *const names[] = {
138 [MMC_LEGACY] = "MMC legacy",
139 [SD_LEGACY] = "SD Legacy",
140 [MMC_HS] = "MMC High Speed (26MHz)",
141 [SD_HS] = "SD High Speed (50MHz)",
142 [UHS_SDR12] = "UHS SDR12 (25MHz)",
143 [UHS_SDR25] = "UHS SDR25 (50MHz)",
144 [UHS_SDR50] = "UHS SDR50 (100MHz)",
145 [UHS_SDR104] = "UHS SDR104 (208MHz)",
146 [UHS_DDR50] = "UHS DDR50 (50MHz)",
147 [MMC_HS_52] = "MMC High Speed (52MHz)",
148 [MMC_DDR_52] = "MMC DDR52 (52MHz)",
149 [MMC_HS_200] = "HS200 (200MHz)",
150 [MMC_HS_400] = "HS400 (200MHz)",
153 if (mode >= MMC_MODES_END)
154 return "Unknown mode";
160 static uint mmc_mode2freq(struct mmc *mmc, enum bus_mode mode)
162 static const int freqs[] = {
163 [MMC_LEGACY] = 25000000,
164 [SD_LEGACY] = 25000000,
167 [MMC_HS_52] = 52000000,
168 [MMC_DDR_52] = 52000000,
169 [UHS_SDR12] = 25000000,
170 [UHS_SDR25] = 50000000,
171 [UHS_SDR50] = 100000000,
172 [UHS_DDR50] = 50000000,
173 [UHS_SDR104] = 208000000,
174 [MMC_HS_200] = 200000000,
175 [MMC_HS_400] = 200000000,
178 if (mode == MMC_LEGACY)
179 return mmc->legacy_speed;
180 else if (mode >= MMC_MODES_END)
186 static int mmc_select_mode(struct mmc *mmc, enum bus_mode mode)
188 mmc->selected_mode = mode;
189 mmc->tran_speed = mmc_mode2freq(mmc, mode);
190 mmc->ddr_mode = mmc_is_mode_ddr(mode);
191 pr_debug("selecting mode %s (freq : %d MHz)\n", mmc_mode_name(mode),
192 mmc->tran_speed / 1000000);
196 #if !CONFIG_IS_ENABLED(DM_MMC)
197 int mmc_send_cmd(struct mmc *mmc, struct mmc_cmd *cmd, struct mmc_data *data)
201 mmmc_trace_before_send(mmc, cmd);
202 ret = mmc->cfg->ops->send_cmd(mmc, cmd, data);
203 mmmc_trace_after_send(mmc, cmd, ret);
209 int mmc_send_status(struct mmc *mmc, unsigned int *status)
212 int err, retries = 5;
214 cmd.cmdidx = MMC_CMD_SEND_STATUS;
215 cmd.resp_type = MMC_RSP_R1;
216 if (!mmc_host_is_spi(mmc))
217 cmd.cmdarg = mmc->rca << 16;
220 err = mmc_send_cmd(mmc, &cmd, NULL);
222 mmc_trace_state(mmc, &cmd);
223 *status = cmd.response[0];
227 mmc_trace_state(mmc, &cmd);
231 int mmc_poll_for_busy(struct mmc *mmc, int timeout)
236 err = mmc_wait_dat0(mmc, 1, timeout);
241 err = mmc_send_status(mmc, &status);
245 if ((status & MMC_STATUS_RDY_FOR_DATA) &&
246 (status & MMC_STATUS_CURR_STATE) !=
250 if (status & MMC_STATUS_MASK) {
251 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
252 pr_err("Status Error: 0x%08x\n", status);
264 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
265 pr_err("Timeout waiting card ready\n");
273 int mmc_set_blocklen(struct mmc *mmc, int len)
281 cmd.cmdidx = MMC_CMD_SET_BLOCKLEN;
282 cmd.resp_type = MMC_RSP_R1;
285 err = mmc_send_cmd(mmc, &cmd, NULL);
287 #ifdef CONFIG_MMC_QUIRKS
288 if (err && (mmc->quirks & MMC_QUIRK_RETRY_SET_BLOCKLEN)) {
291 * It has been seen that SET_BLOCKLEN may fail on the first
292 * attempt, let's try a few more time
295 err = mmc_send_cmd(mmc, &cmd, NULL);
305 #ifdef MMC_SUPPORTS_TUNING
306 static const u8 tuning_blk_pattern_4bit[] = {
307 0xff, 0x0f, 0xff, 0x00, 0xff, 0xcc, 0xc3, 0xcc,
308 0xc3, 0x3c, 0xcc, 0xff, 0xfe, 0xff, 0xfe, 0xef,
309 0xff, 0xdf, 0xff, 0xdd, 0xff, 0xfb, 0xff, 0xfb,
310 0xbf, 0xff, 0x7f, 0xff, 0x77, 0xf7, 0xbd, 0xef,
311 0xff, 0xf0, 0xff, 0xf0, 0x0f, 0xfc, 0xcc, 0x3c,
312 0xcc, 0x33, 0xcc, 0xcf, 0xff, 0xef, 0xff, 0xee,
313 0xff, 0xfd, 0xff, 0xfd, 0xdf, 0xff, 0xbf, 0xff,
314 0xbb, 0xff, 0xf7, 0xff, 0xf7, 0x7f, 0x7b, 0xde,
317 static const u8 tuning_blk_pattern_8bit[] = {
318 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00, 0x00,
319 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc, 0xcc,
320 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff, 0xff,
321 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee, 0xff,
322 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd, 0xdd,
323 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff, 0xbb,
324 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff, 0xff,
325 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee, 0xff,
326 0xff, 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00,
327 0x00, 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc,
328 0xcc, 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff,
329 0xff, 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee,
330 0xff, 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd,
331 0xdd, 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff,
332 0xbb, 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff,
333 0xff, 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee,
336 int mmc_send_tuning(struct mmc *mmc, u32 opcode, int *cmd_error)
339 struct mmc_data data;
340 const u8 *tuning_block_pattern;
343 if (mmc->bus_width == 8) {
344 tuning_block_pattern = tuning_blk_pattern_8bit;
345 size = sizeof(tuning_blk_pattern_8bit);
346 } else if (mmc->bus_width == 4) {
347 tuning_block_pattern = tuning_blk_pattern_4bit;
348 size = sizeof(tuning_blk_pattern_4bit);
353 ALLOC_CACHE_ALIGN_BUFFER(u8, data_buf, size);
357 cmd.resp_type = MMC_RSP_R1;
359 data.dest = (void *)data_buf;
361 data.blocksize = size;
362 data.flags = MMC_DATA_READ;
364 err = mmc_send_cmd(mmc, &cmd, &data);
368 if (memcmp(data_buf, tuning_block_pattern, size))
375 static int mmc_read_blocks(struct mmc *mmc, void *dst, lbaint_t start,
379 struct mmc_data data;
382 cmd.cmdidx = MMC_CMD_READ_MULTIPLE_BLOCK;
384 cmd.cmdidx = MMC_CMD_READ_SINGLE_BLOCK;
386 if (mmc->high_capacity)
389 cmd.cmdarg = start * mmc->read_bl_len;
391 cmd.resp_type = MMC_RSP_R1;
394 data.blocks = blkcnt;
395 data.blocksize = mmc->read_bl_len;
396 data.flags = MMC_DATA_READ;
398 if (mmc_send_cmd(mmc, &cmd, &data))
402 cmd.cmdidx = MMC_CMD_STOP_TRANSMISSION;
404 cmd.resp_type = MMC_RSP_R1b;
405 if (mmc_send_cmd(mmc, &cmd, NULL)) {
406 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
407 pr_err("mmc fail to send stop cmd\n");
416 #if CONFIG_IS_ENABLED(BLK)
417 ulong mmc_bread(struct udevice *dev, lbaint_t start, lbaint_t blkcnt, void *dst)
419 ulong mmc_bread(struct blk_desc *block_dev, lbaint_t start, lbaint_t blkcnt,
423 #if CONFIG_IS_ENABLED(BLK)
424 struct blk_desc *block_dev = dev_get_uclass_platdata(dev);
426 int dev_num = block_dev->devnum;
428 lbaint_t cur, blocks_todo = blkcnt;
433 struct mmc *mmc = find_mmc_device(dev_num);
437 if (CONFIG_IS_ENABLED(MMC_TINY))
438 err = mmc_switch_part(mmc, block_dev->hwpart);
440 err = blk_dselect_hwpart(block_dev, block_dev->hwpart);
445 if ((start + blkcnt) > block_dev->lba) {
446 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
447 pr_err("MMC: block number 0x" LBAF " exceeds max(0x" LBAF ")\n",
448 start + blkcnt, block_dev->lba);
453 if (mmc_set_blocklen(mmc, mmc->read_bl_len)) {
454 pr_debug("%s: Failed to set blocklen\n", __func__);
459 cur = (blocks_todo > mmc->cfg->b_max) ?
460 mmc->cfg->b_max : blocks_todo;
461 if (mmc_read_blocks(mmc, dst, start, cur) != cur) {
462 pr_debug("%s: Failed to read blocks\n", __func__);
467 dst += cur * mmc->read_bl_len;
468 } while (blocks_todo > 0);
473 static int mmc_go_idle(struct mmc *mmc)
480 cmd.cmdidx = MMC_CMD_GO_IDLE_STATE;
482 cmd.resp_type = MMC_RSP_NONE;
484 err = mmc_send_cmd(mmc, &cmd, NULL);
494 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
495 static int mmc_switch_voltage(struct mmc *mmc, int signal_voltage)
501 * Send CMD11 only if the request is to switch the card to
504 if (signal_voltage == MMC_SIGNAL_VOLTAGE_330)
505 return mmc_set_signal_voltage(mmc, signal_voltage);
507 cmd.cmdidx = SD_CMD_SWITCH_UHS18V;
509 cmd.resp_type = MMC_RSP_R1;
511 err = mmc_send_cmd(mmc, &cmd, NULL);
515 if (!mmc_host_is_spi(mmc) && (cmd.response[0] & MMC_STATUS_ERROR))
519 * The card should drive cmd and dat[0:3] low immediately
520 * after the response of cmd11, but wait 100 us to be sure
522 err = mmc_wait_dat0(mmc, 0, 100);
529 * During a signal voltage level switch, the clock must be gated
530 * for 5 ms according to the SD spec
532 mmc_set_clock(mmc, mmc->clock, MMC_CLK_DISABLE);
534 err = mmc_set_signal_voltage(mmc, signal_voltage);
538 /* Keep clock gated for at least 10 ms, though spec only says 5 ms */
540 mmc_set_clock(mmc, mmc->clock, MMC_CLK_ENABLE);
543 * Failure to switch is indicated by the card holding
544 * dat[0:3] low. Wait for at least 1 ms according to spec
546 err = mmc_wait_dat0(mmc, 1, 1000);
556 static int sd_send_op_cond(struct mmc *mmc, bool uhs_en)
563 cmd.cmdidx = MMC_CMD_APP_CMD;
564 cmd.resp_type = MMC_RSP_R1;
567 err = mmc_send_cmd(mmc, &cmd, NULL);
572 cmd.cmdidx = SD_CMD_APP_SEND_OP_COND;
573 cmd.resp_type = MMC_RSP_R3;
576 * Most cards do not answer if some reserved bits
577 * in the ocr are set. However, Some controller
578 * can set bit 7 (reserved for low voltages), but
579 * how to manage low voltages SD card is not yet
582 cmd.cmdarg = mmc_host_is_spi(mmc) ? 0 :
583 (mmc->cfg->voltages & 0xff8000);
585 if (mmc->version == SD_VERSION_2)
586 cmd.cmdarg |= OCR_HCS;
589 cmd.cmdarg |= OCR_S18R;
591 err = mmc_send_cmd(mmc, &cmd, NULL);
596 if (cmd.response[0] & OCR_BUSY)
605 if (mmc->version != SD_VERSION_2)
606 mmc->version = SD_VERSION_1_0;
608 if (mmc_host_is_spi(mmc)) { /* read OCR for spi */
609 cmd.cmdidx = MMC_CMD_SPI_READ_OCR;
610 cmd.resp_type = MMC_RSP_R3;
613 err = mmc_send_cmd(mmc, &cmd, NULL);
619 mmc->ocr = cmd.response[0];
621 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
622 if (uhs_en && !(mmc_host_is_spi(mmc)) && (cmd.response[0] & 0x41000000)
624 err = mmc_switch_voltage(mmc, MMC_SIGNAL_VOLTAGE_180);
630 mmc->high_capacity = ((mmc->ocr & OCR_HCS) == OCR_HCS);
636 static int mmc_send_op_cond_iter(struct mmc *mmc, int use_arg)
641 cmd.cmdidx = MMC_CMD_SEND_OP_COND;
642 cmd.resp_type = MMC_RSP_R3;
644 if (use_arg && !mmc_host_is_spi(mmc))
645 cmd.cmdarg = OCR_HCS |
646 (mmc->cfg->voltages &
647 (mmc->ocr & OCR_VOLTAGE_MASK)) |
648 (mmc->ocr & OCR_ACCESS_MODE);
650 err = mmc_send_cmd(mmc, &cmd, NULL);
653 mmc->ocr = cmd.response[0];
657 static int mmc_send_op_cond(struct mmc *mmc)
661 /* Some cards seem to need this */
664 /* Asking to the card its capabilities */
665 for (i = 0; i < 2; i++) {
666 err = mmc_send_op_cond_iter(mmc, i != 0);
670 /* exit if not busy (flag seems to be inverted) */
671 if (mmc->ocr & OCR_BUSY)
674 mmc->op_cond_pending = 1;
678 static int mmc_complete_op_cond(struct mmc *mmc)
685 mmc->op_cond_pending = 0;
686 if (!(mmc->ocr & OCR_BUSY)) {
687 /* Some cards seem to need this */
690 start = get_timer(0);
692 err = mmc_send_op_cond_iter(mmc, 1);
695 if (mmc->ocr & OCR_BUSY)
697 if (get_timer(start) > timeout)
703 if (mmc_host_is_spi(mmc)) { /* read OCR for spi */
704 cmd.cmdidx = MMC_CMD_SPI_READ_OCR;
705 cmd.resp_type = MMC_RSP_R3;
708 err = mmc_send_cmd(mmc, &cmd, NULL);
713 mmc->ocr = cmd.response[0];
716 mmc->version = MMC_VERSION_UNKNOWN;
718 mmc->high_capacity = ((mmc->ocr & OCR_HCS) == OCR_HCS);
725 static int mmc_send_ext_csd(struct mmc *mmc, u8 *ext_csd)
728 struct mmc_data data;
731 /* Get the Card Status Register */
732 cmd.cmdidx = MMC_CMD_SEND_EXT_CSD;
733 cmd.resp_type = MMC_RSP_R1;
736 data.dest = (char *)ext_csd;
738 data.blocksize = MMC_MAX_BLOCK_LEN;
739 data.flags = MMC_DATA_READ;
741 err = mmc_send_cmd(mmc, &cmd, &data);
746 static int __mmc_switch(struct mmc *mmc, u8 set, u8 index, u8 value,
750 int timeout = DEFAULT_CMD6_TIMEOUT_MS;
751 bool is_part_switch = (set == EXT_CSD_CMD_SET_NORMAL) &&
752 (index == EXT_CSD_PART_CONF);
756 if (mmc->gen_cmd6_time)
757 timeout = mmc->gen_cmd6_time * 10;
759 if (is_part_switch && mmc->part_switch_time)
760 timeout = mmc->part_switch_time * 10;
762 cmd.cmdidx = MMC_CMD_SWITCH;
763 cmd.resp_type = MMC_RSP_R1b;
764 cmd.cmdarg = (MMC_SWITCH_MODE_WRITE_BYTE << 24) |
768 while (retries > 0) {
769 ret = mmc_send_cmd(mmc, &cmd, NULL);
781 /* Waiting for the ready status */
782 return mmc_poll_for_busy(mmc, timeout);
789 int mmc_switch(struct mmc *mmc, u8 set, u8 index, u8 value)
791 return __mmc_switch(mmc, set, index, value, true);
794 #if !CONFIG_IS_ENABLED(MMC_TINY)
795 static int mmc_set_card_speed(struct mmc *mmc, enum bus_mode mode,
801 ALLOC_CACHE_ALIGN_BUFFER(u8, test_csd, MMC_MAX_BLOCK_LEN);
807 speed_bits = EXT_CSD_TIMING_HS;
809 #if CONFIG_IS_ENABLED(MMC_HS200_SUPPORT)
811 speed_bits = EXT_CSD_TIMING_HS200;
814 #if CONFIG_IS_ENABLED(MMC_HS400_SUPPORT)
816 speed_bits = EXT_CSD_TIMING_HS400;
820 speed_bits = EXT_CSD_TIMING_LEGACY;
826 err = __mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_HS_TIMING,
827 speed_bits, !hsdowngrade);
831 #if CONFIG_IS_ENABLED(MMC_HS200_SUPPORT) || \
832 CONFIG_IS_ENABLED(MMC_HS400_SUPPORT)
834 * In case the eMMC is in HS200/HS400 mode and we are downgrading
835 * to HS mode, the card clock are still running much faster than
836 * the supported HS mode clock, so we can not reliably read out
837 * Extended CSD. Reconfigure the controller to run at HS mode.
840 mmc_select_mode(mmc, MMC_HS);
841 mmc_set_clock(mmc, mmc_mode2freq(mmc, MMC_HS), false);
845 if ((mode == MMC_HS) || (mode == MMC_HS_52)) {
846 /* Now check to see that it worked */
847 err = mmc_send_ext_csd(mmc, test_csd);
851 /* No high-speed support */
852 if (!test_csd[EXT_CSD_HS_TIMING])
859 static int mmc_get_capabilities(struct mmc *mmc)
861 u8 *ext_csd = mmc->ext_csd;
864 mmc->card_caps = MMC_MODE_1BIT | MMC_CAP(MMC_LEGACY);
866 if (mmc_host_is_spi(mmc))
869 /* Only version 4 supports high-speed */
870 if (mmc->version < MMC_VERSION_4)
874 pr_err("No ext_csd found!\n"); /* this should enver happen */
878 mmc->card_caps |= MMC_MODE_4BIT | MMC_MODE_8BIT;
880 cardtype = ext_csd[EXT_CSD_CARD_TYPE];
881 mmc->cardtype = cardtype;
883 #if CONFIG_IS_ENABLED(MMC_HS200_SUPPORT)
884 if (cardtype & (EXT_CSD_CARD_TYPE_HS200_1_2V |
885 EXT_CSD_CARD_TYPE_HS200_1_8V)) {
886 mmc->card_caps |= MMC_MODE_HS200;
889 #if CONFIG_IS_ENABLED(MMC_HS400_SUPPORT)
890 if (cardtype & (EXT_CSD_CARD_TYPE_HS400_1_2V |
891 EXT_CSD_CARD_TYPE_HS400_1_8V)) {
892 mmc->card_caps |= MMC_MODE_HS400;
895 if (cardtype & EXT_CSD_CARD_TYPE_52) {
896 if (cardtype & EXT_CSD_CARD_TYPE_DDR_52)
897 mmc->card_caps |= MMC_MODE_DDR_52MHz;
898 mmc->card_caps |= MMC_MODE_HS_52MHz;
900 if (cardtype & EXT_CSD_CARD_TYPE_26)
901 mmc->card_caps |= MMC_MODE_HS;
907 static int mmc_set_capacity(struct mmc *mmc, int part_num)
911 mmc->capacity = mmc->capacity_user;
915 mmc->capacity = mmc->capacity_boot;
918 mmc->capacity = mmc->capacity_rpmb;
924 mmc->capacity = mmc->capacity_gp[part_num - 4];
930 mmc_get_blk_desc(mmc)->lba = lldiv(mmc->capacity, mmc->read_bl_len);
935 #if CONFIG_IS_ENABLED(MMC_HS200_SUPPORT) || CONFIG_IS_ENABLED(MMC_HS400_SUPPORT)
936 static int mmc_boot_part_access_chk(struct mmc *mmc, unsigned int part_num)
941 if (part_num & PART_ACCESS_MASK)
942 forbidden = MMC_CAP(MMC_HS_200) | MMC_CAP(MMC_HS_400);
944 if (MMC_CAP(mmc->selected_mode) & forbidden) {
945 pr_debug("selected mode (%s) is forbidden for part %d\n",
946 mmc_mode_name(mmc->selected_mode), part_num);
948 } else if (mmc->selected_mode != mmc->best_mode) {
949 pr_debug("selected mode is not optimal\n");
954 return mmc_select_mode_and_width(mmc,
955 mmc->card_caps & ~forbidden);
960 static inline int mmc_boot_part_access_chk(struct mmc *mmc,
961 unsigned int part_num)
967 int mmc_switch_part(struct mmc *mmc, unsigned int part_num)
971 ret = mmc_boot_part_access_chk(mmc, part_num);
975 ret = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_PART_CONF,
976 (mmc->part_config & ~PART_ACCESS_MASK)
977 | (part_num & PART_ACCESS_MASK));
980 * Set the capacity if the switch succeeded or was intended
981 * to return to representing the raw device.
983 if ((ret == 0) || ((ret == -ENODEV) && (part_num == 0))) {
984 ret = mmc_set_capacity(mmc, part_num);
985 mmc_get_blk_desc(mmc)->hwpart = part_num;
991 #if CONFIG_IS_ENABLED(MMC_HW_PARTITIONING)
992 int mmc_hwpart_config(struct mmc *mmc,
993 const struct mmc_hwpart_conf *conf,
994 enum mmc_hwpart_conf_mode mode)
1000 u32 max_enh_size_mult;
1001 u32 tot_enh_size_mult = 0;
1004 ALLOC_CACHE_ALIGN_BUFFER(u8, ext_csd, MMC_MAX_BLOCK_LEN);
1006 if (mode < MMC_HWPART_CONF_CHECK || mode > MMC_HWPART_CONF_COMPLETE)
1009 if (IS_SD(mmc) || (mmc->version < MMC_VERSION_4_41)) {
1010 pr_err("eMMC >= 4.4 required for enhanced user data area\n");
1011 return -EMEDIUMTYPE;
1014 if (!(mmc->part_support & PART_SUPPORT)) {
1015 pr_err("Card does not support partitioning\n");
1016 return -EMEDIUMTYPE;
1019 if (!mmc->hc_wp_grp_size) {
1020 pr_err("Card does not define HC WP group size\n");
1021 return -EMEDIUMTYPE;
1024 /* check partition alignment and total enhanced size */
1025 if (conf->user.enh_size) {
1026 if (conf->user.enh_size % mmc->hc_wp_grp_size ||
1027 conf->user.enh_start % mmc->hc_wp_grp_size) {
1028 pr_err("User data enhanced area not HC WP group "
1032 part_attrs |= EXT_CSD_ENH_USR;
1033 enh_size_mult = conf->user.enh_size / mmc->hc_wp_grp_size;
1034 if (mmc->high_capacity) {
1035 enh_start_addr = conf->user.enh_start;
1037 enh_start_addr = (conf->user.enh_start << 9);
1043 tot_enh_size_mult += enh_size_mult;
1045 for (pidx = 0; pidx < 4; pidx++) {
1046 if (conf->gp_part[pidx].size % mmc->hc_wp_grp_size) {
1047 pr_err("GP%i partition not HC WP group size "
1048 "aligned\n", pidx+1);
1051 gp_size_mult[pidx] = conf->gp_part[pidx].size / mmc->hc_wp_grp_size;
1052 if (conf->gp_part[pidx].size && conf->gp_part[pidx].enhanced) {
1053 part_attrs |= EXT_CSD_ENH_GP(pidx);
1054 tot_enh_size_mult += gp_size_mult[pidx];
1058 if (part_attrs && ! (mmc->part_support & ENHNCD_SUPPORT)) {
1059 pr_err("Card does not support enhanced attribute\n");
1060 return -EMEDIUMTYPE;
1063 err = mmc_send_ext_csd(mmc, ext_csd);
1068 (ext_csd[EXT_CSD_MAX_ENH_SIZE_MULT+2] << 16) +
1069 (ext_csd[EXT_CSD_MAX_ENH_SIZE_MULT+1] << 8) +
1070 ext_csd[EXT_CSD_MAX_ENH_SIZE_MULT];
1071 if (tot_enh_size_mult > max_enh_size_mult) {
1072 pr_err("Total enhanced size exceeds maximum (%u > %u)\n",
1073 tot_enh_size_mult, max_enh_size_mult);
1074 return -EMEDIUMTYPE;
1077 /* The default value of EXT_CSD_WR_REL_SET is device
1078 * dependent, the values can only be changed if the
1079 * EXT_CSD_HS_CTRL_REL bit is set. The values can be
1080 * changed only once and before partitioning is completed. */
1081 wr_rel_set = ext_csd[EXT_CSD_WR_REL_SET];
1082 if (conf->user.wr_rel_change) {
1083 if (conf->user.wr_rel_set)
1084 wr_rel_set |= EXT_CSD_WR_DATA_REL_USR;
1086 wr_rel_set &= ~EXT_CSD_WR_DATA_REL_USR;
1088 for (pidx = 0; pidx < 4; pidx++) {
1089 if (conf->gp_part[pidx].wr_rel_change) {
1090 if (conf->gp_part[pidx].wr_rel_set)
1091 wr_rel_set |= EXT_CSD_WR_DATA_REL_GP(pidx);
1093 wr_rel_set &= ~EXT_CSD_WR_DATA_REL_GP(pidx);
1097 if (wr_rel_set != ext_csd[EXT_CSD_WR_REL_SET] &&
1098 !(ext_csd[EXT_CSD_WR_REL_PARAM] & EXT_CSD_HS_CTRL_REL)) {
1099 puts("Card does not support host controlled partition write "
1100 "reliability settings\n");
1101 return -EMEDIUMTYPE;
1104 if (ext_csd[EXT_CSD_PARTITION_SETTING] &
1105 EXT_CSD_PARTITION_SETTING_COMPLETED) {
1106 pr_err("Card already partitioned\n");
1110 if (mode == MMC_HWPART_CONF_CHECK)
1113 /* Partitioning requires high-capacity size definitions */
1114 if (!(ext_csd[EXT_CSD_ERASE_GROUP_DEF] & 0x01)) {
1115 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1116 EXT_CSD_ERASE_GROUP_DEF, 1);
1121 ext_csd[EXT_CSD_ERASE_GROUP_DEF] = 1;
1123 /* update erase group size to be high-capacity */
1124 mmc->erase_grp_size =
1125 ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE] * 1024;
1129 /* all OK, write the configuration */
1130 for (i = 0; i < 4; i++) {
1131 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1132 EXT_CSD_ENH_START_ADDR+i,
1133 (enh_start_addr >> (i*8)) & 0xFF);
1137 for (i = 0; i < 3; i++) {
1138 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1139 EXT_CSD_ENH_SIZE_MULT+i,
1140 (enh_size_mult >> (i*8)) & 0xFF);
1144 for (pidx = 0; pidx < 4; pidx++) {
1145 for (i = 0; i < 3; i++) {
1146 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1147 EXT_CSD_GP_SIZE_MULT+pidx*3+i,
1148 (gp_size_mult[pidx] >> (i*8)) & 0xFF);
1153 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1154 EXT_CSD_PARTITIONS_ATTRIBUTE, part_attrs);
1158 if (mode == MMC_HWPART_CONF_SET)
1161 /* The WR_REL_SET is a write-once register but shall be
1162 * written before setting PART_SETTING_COMPLETED. As it is
1163 * write-once we can only write it when completing the
1165 if (wr_rel_set != ext_csd[EXT_CSD_WR_REL_SET]) {
1166 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1167 EXT_CSD_WR_REL_SET, wr_rel_set);
1172 /* Setting PART_SETTING_COMPLETED confirms the partition
1173 * configuration but it only becomes effective after power
1174 * cycle, so we do not adjust the partition related settings
1175 * in the mmc struct. */
1177 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1178 EXT_CSD_PARTITION_SETTING,
1179 EXT_CSD_PARTITION_SETTING_COMPLETED);
1187 #if !CONFIG_IS_ENABLED(DM_MMC)
1188 int mmc_getcd(struct mmc *mmc)
1192 cd = board_mmc_getcd(mmc);
1195 if (mmc->cfg->ops->getcd)
1196 cd = mmc->cfg->ops->getcd(mmc);
1205 #if !CONFIG_IS_ENABLED(MMC_TINY)
1206 static int sd_switch(struct mmc *mmc, int mode, int group, u8 value, u8 *resp)
1209 struct mmc_data data;
1211 /* Switch the frequency */
1212 cmd.cmdidx = SD_CMD_SWITCH_FUNC;
1213 cmd.resp_type = MMC_RSP_R1;
1214 cmd.cmdarg = (mode << 31) | 0xffffff;
1215 cmd.cmdarg &= ~(0xf << (group * 4));
1216 cmd.cmdarg |= value << (group * 4);
1218 data.dest = (char *)resp;
1219 data.blocksize = 64;
1221 data.flags = MMC_DATA_READ;
1223 return mmc_send_cmd(mmc, &cmd, &data);
1226 static int sd_get_capabilities(struct mmc *mmc)
1230 ALLOC_CACHE_ALIGN_BUFFER(__be32, scr, 2);
1231 ALLOC_CACHE_ALIGN_BUFFER(__be32, switch_status, 16);
1232 struct mmc_data data;
1234 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
1238 mmc->card_caps = MMC_MODE_1BIT | MMC_CAP(SD_LEGACY);
1240 if (mmc_host_is_spi(mmc))
1243 /* Read the SCR to find out if this card supports higher speeds */
1244 cmd.cmdidx = MMC_CMD_APP_CMD;
1245 cmd.resp_type = MMC_RSP_R1;
1246 cmd.cmdarg = mmc->rca << 16;
1248 err = mmc_send_cmd(mmc, &cmd, NULL);
1253 cmd.cmdidx = SD_CMD_APP_SEND_SCR;
1254 cmd.resp_type = MMC_RSP_R1;
1260 data.dest = (char *)scr;
1263 data.flags = MMC_DATA_READ;
1265 err = mmc_send_cmd(mmc, &cmd, &data);
1274 mmc->scr[0] = __be32_to_cpu(scr[0]);
1275 mmc->scr[1] = __be32_to_cpu(scr[1]);
1277 switch ((mmc->scr[0] >> 24) & 0xf) {
1279 mmc->version = SD_VERSION_1_0;
1282 mmc->version = SD_VERSION_1_10;
1285 mmc->version = SD_VERSION_2;
1286 if ((mmc->scr[0] >> 15) & 0x1)
1287 mmc->version = SD_VERSION_3;
1290 mmc->version = SD_VERSION_1_0;
1294 if (mmc->scr[0] & SD_DATA_4BIT)
1295 mmc->card_caps |= MMC_MODE_4BIT;
1297 /* Version 1.0 doesn't support switching */
1298 if (mmc->version == SD_VERSION_1_0)
1303 err = sd_switch(mmc, SD_SWITCH_CHECK, 0, 1,
1304 (u8 *)switch_status);
1309 /* The high-speed function is busy. Try again */
1310 if (!(__be32_to_cpu(switch_status[7]) & SD_HIGHSPEED_BUSY))
1314 /* If high-speed isn't supported, we return */
1315 if (__be32_to_cpu(switch_status[3]) & SD_HIGHSPEED_SUPPORTED)
1316 mmc->card_caps |= MMC_CAP(SD_HS);
1318 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
1319 /* Version before 3.0 don't support UHS modes */
1320 if (mmc->version < SD_VERSION_3)
1323 sd3_bus_mode = __be32_to_cpu(switch_status[3]) >> 16 & 0x1f;
1324 if (sd3_bus_mode & SD_MODE_UHS_SDR104)
1325 mmc->card_caps |= MMC_CAP(UHS_SDR104);
1326 if (sd3_bus_mode & SD_MODE_UHS_SDR50)
1327 mmc->card_caps |= MMC_CAP(UHS_SDR50);
1328 if (sd3_bus_mode & SD_MODE_UHS_SDR25)
1329 mmc->card_caps |= MMC_CAP(UHS_SDR25);
1330 if (sd3_bus_mode & SD_MODE_UHS_SDR12)
1331 mmc->card_caps |= MMC_CAP(UHS_SDR12);
1332 if (sd3_bus_mode & SD_MODE_UHS_DDR50)
1333 mmc->card_caps |= MMC_CAP(UHS_DDR50);
1339 static int sd_set_card_speed(struct mmc *mmc, enum bus_mode mode)
1343 ALLOC_CACHE_ALIGN_BUFFER(uint, switch_status, 16);
1346 /* SD version 1.00 and 1.01 does not support CMD 6 */
1347 if (mmc->version == SD_VERSION_1_0)
1352 speed = UHS_SDR12_BUS_SPEED;
1355 speed = HIGH_SPEED_BUS_SPEED;
1357 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
1359 speed = UHS_SDR12_BUS_SPEED;
1362 speed = UHS_SDR25_BUS_SPEED;
1365 speed = UHS_SDR50_BUS_SPEED;
1368 speed = UHS_DDR50_BUS_SPEED;
1371 speed = UHS_SDR104_BUS_SPEED;
1378 err = sd_switch(mmc, SD_SWITCH_SWITCH, 0, speed, (u8 *)switch_status);
1382 if (((__be32_to_cpu(switch_status[4]) >> 24) & 0xF) != speed)
1388 static int sd_select_bus_width(struct mmc *mmc, int w)
1393 if ((w != 4) && (w != 1))
1396 cmd.cmdidx = MMC_CMD_APP_CMD;
1397 cmd.resp_type = MMC_RSP_R1;
1398 cmd.cmdarg = mmc->rca << 16;
1400 err = mmc_send_cmd(mmc, &cmd, NULL);
1404 cmd.cmdidx = SD_CMD_APP_SET_BUS_WIDTH;
1405 cmd.resp_type = MMC_RSP_R1;
1410 err = mmc_send_cmd(mmc, &cmd, NULL);
1418 #if CONFIG_IS_ENABLED(MMC_WRITE)
1419 static int sd_read_ssr(struct mmc *mmc)
1421 static const unsigned int sd_au_size[] = {
1422 0, SZ_16K / 512, SZ_32K / 512,
1423 SZ_64K / 512, SZ_128K / 512, SZ_256K / 512,
1424 SZ_512K / 512, SZ_1M / 512, SZ_2M / 512,
1425 SZ_4M / 512, SZ_8M / 512, (SZ_8M + SZ_4M) / 512,
1426 SZ_16M / 512, (SZ_16M + SZ_8M) / 512, SZ_32M / 512,
1431 ALLOC_CACHE_ALIGN_BUFFER(uint, ssr, 16);
1432 struct mmc_data data;
1434 unsigned int au, eo, et, es;
1436 cmd.cmdidx = MMC_CMD_APP_CMD;
1437 cmd.resp_type = MMC_RSP_R1;
1438 cmd.cmdarg = mmc->rca << 16;
1440 err = mmc_send_cmd(mmc, &cmd, NULL);
1444 cmd.cmdidx = SD_CMD_APP_SD_STATUS;
1445 cmd.resp_type = MMC_RSP_R1;
1449 data.dest = (char *)ssr;
1450 data.blocksize = 64;
1452 data.flags = MMC_DATA_READ;
1454 err = mmc_send_cmd(mmc, &cmd, &data);
1462 for (i = 0; i < 16; i++)
1463 ssr[i] = be32_to_cpu(ssr[i]);
1465 au = (ssr[2] >> 12) & 0xF;
1466 if ((au <= 9) || (mmc->version == SD_VERSION_3)) {
1467 mmc->ssr.au = sd_au_size[au];
1468 es = (ssr[3] >> 24) & 0xFF;
1469 es |= (ssr[2] & 0xFF) << 8;
1470 et = (ssr[3] >> 18) & 0x3F;
1472 eo = (ssr[3] >> 16) & 0x3;
1473 mmc->ssr.erase_timeout = (et * 1000) / es;
1474 mmc->ssr.erase_offset = eo * 1000;
1477 pr_debug("Invalid Allocation Unit Size.\n");
1483 /* frequency bases */
1484 /* divided by 10 to be nice to platforms without floating point */
1485 static const int fbase[] = {
1492 /* Multiplier values for TRAN_SPEED. Multiplied by 10 to be nice
1493 * to platforms without floating point.
1495 static const u8 multipliers[] = {
1514 static inline int bus_width(uint cap)
1516 if (cap == MMC_MODE_8BIT)
1518 if (cap == MMC_MODE_4BIT)
1520 if (cap == MMC_MODE_1BIT)
1522 pr_warn("invalid bus witdh capability 0x%x\n", cap);
1526 #if !CONFIG_IS_ENABLED(DM_MMC)
1527 #ifdef MMC_SUPPORTS_TUNING
1528 static int mmc_execute_tuning(struct mmc *mmc, uint opcode)
1534 static int mmc_set_ios(struct mmc *mmc)
1538 if (mmc->cfg->ops->set_ios)
1539 ret = mmc->cfg->ops->set_ios(mmc);
1545 int mmc_set_clock(struct mmc *mmc, uint clock, bool disable)
1548 if (clock > mmc->cfg->f_max)
1549 clock = mmc->cfg->f_max;
1551 if (clock < mmc->cfg->f_min)
1552 clock = mmc->cfg->f_min;
1556 mmc->clk_disable = disable;
1558 debug("clock is %s (%dHz)\n", disable ? "disabled" : "enabled", clock);
1560 return mmc_set_ios(mmc);
1563 static int mmc_set_bus_width(struct mmc *mmc, uint width)
1565 mmc->bus_width = width;
1567 return mmc_set_ios(mmc);
1570 #if CONFIG_IS_ENABLED(MMC_VERBOSE) || defined(DEBUG)
1572 * helper function to display the capabilities in a human
1573 * friendly manner. The capabilities include bus width and
1576 void mmc_dump_capabilities(const char *text, uint caps)
1580 pr_debug("%s: widths [", text);
1581 if (caps & MMC_MODE_8BIT)
1583 if (caps & MMC_MODE_4BIT)
1585 if (caps & MMC_MODE_1BIT)
1587 pr_debug("\b\b] modes [");
1588 for (mode = MMC_LEGACY; mode < MMC_MODES_END; mode++)
1589 if (MMC_CAP(mode) & caps)
1590 pr_debug("%s, ", mmc_mode_name(mode));
1591 pr_debug("\b\b]\n");
1595 struct mode_width_tuning {
1598 #ifdef MMC_SUPPORTS_TUNING
1603 #if CONFIG_IS_ENABLED(MMC_IO_VOLTAGE)
1604 int mmc_voltage_to_mv(enum mmc_voltage voltage)
1607 case MMC_SIGNAL_VOLTAGE_000: return 0;
1608 case MMC_SIGNAL_VOLTAGE_330: return 3300;
1609 case MMC_SIGNAL_VOLTAGE_180: return 1800;
1610 case MMC_SIGNAL_VOLTAGE_120: return 1200;
1615 static int mmc_set_signal_voltage(struct mmc *mmc, uint signal_voltage)
1619 if (mmc->signal_voltage == signal_voltage)
1622 mmc->signal_voltage = signal_voltage;
1623 err = mmc_set_ios(mmc);
1625 pr_debug("unable to set voltage (err %d)\n", err);
1630 static inline int mmc_set_signal_voltage(struct mmc *mmc, uint signal_voltage)
1636 #if !CONFIG_IS_ENABLED(MMC_TINY)
1637 static const struct mode_width_tuning sd_modes_by_pref[] = {
1638 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
1639 #ifdef MMC_SUPPORTS_TUNING
1642 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1643 .tuning = MMC_CMD_SEND_TUNING_BLOCK
1648 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1652 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1656 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1661 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1663 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
1666 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1671 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1675 #define for_each_sd_mode_by_pref(caps, mwt) \
1676 for (mwt = sd_modes_by_pref;\
1677 mwt < sd_modes_by_pref + ARRAY_SIZE(sd_modes_by_pref);\
1679 if (caps & MMC_CAP(mwt->mode))
1681 static int sd_select_mode_and_width(struct mmc *mmc, uint card_caps)
1684 uint widths[] = {MMC_MODE_4BIT, MMC_MODE_1BIT};
1685 const struct mode_width_tuning *mwt;
1686 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
1687 bool uhs_en = (mmc->ocr & OCR_S18R) ? true : false;
1689 bool uhs_en = false;
1694 mmc_dump_capabilities("sd card", card_caps);
1695 mmc_dump_capabilities("host", mmc->host_caps);
1698 /* Restrict card's capabilities by what the host can do */
1699 caps = card_caps & mmc->host_caps;
1704 for_each_sd_mode_by_pref(caps, mwt) {
1707 for (w = widths; w < widths + ARRAY_SIZE(widths); w++) {
1708 if (*w & caps & mwt->widths) {
1709 pr_debug("trying mode %s width %d (at %d MHz)\n",
1710 mmc_mode_name(mwt->mode),
1712 mmc_mode2freq(mmc, mwt->mode) / 1000000);
1714 /* configure the bus width (card + host) */
1715 err = sd_select_bus_width(mmc, bus_width(*w));
1718 mmc_set_bus_width(mmc, bus_width(*w));
1720 /* configure the bus mode (card) */
1721 err = sd_set_card_speed(mmc, mwt->mode);
1725 /* configure the bus mode (host) */
1726 mmc_select_mode(mmc, mwt->mode);
1727 mmc_set_clock(mmc, mmc->tran_speed,
1730 #ifdef MMC_SUPPORTS_TUNING
1731 /* execute tuning if needed */
1732 if (mwt->tuning && !mmc_host_is_spi(mmc)) {
1733 err = mmc_execute_tuning(mmc,
1736 pr_debug("tuning failed\n");
1742 #if CONFIG_IS_ENABLED(MMC_WRITE)
1743 err = sd_read_ssr(mmc);
1745 pr_warn("unable to read ssr\n");
1751 /* revert to a safer bus speed */
1752 mmc_select_mode(mmc, SD_LEGACY);
1753 mmc_set_clock(mmc, mmc->tran_speed,
1759 pr_err("unable to select a mode\n");
1764 * read the compare the part of ext csd that is constant.
1765 * This can be used to check that the transfer is working
1768 static int mmc_read_and_compare_ext_csd(struct mmc *mmc)
1771 const u8 *ext_csd = mmc->ext_csd;
1772 ALLOC_CACHE_ALIGN_BUFFER(u8, test_csd, MMC_MAX_BLOCK_LEN);
1774 if (mmc->version < MMC_VERSION_4)
1777 err = mmc_send_ext_csd(mmc, test_csd);
1781 /* Only compare read only fields */
1782 if (ext_csd[EXT_CSD_PARTITIONING_SUPPORT]
1783 == test_csd[EXT_CSD_PARTITIONING_SUPPORT] &&
1784 ext_csd[EXT_CSD_HC_WP_GRP_SIZE]
1785 == test_csd[EXT_CSD_HC_WP_GRP_SIZE] &&
1786 ext_csd[EXT_CSD_REV]
1787 == test_csd[EXT_CSD_REV] &&
1788 ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE]
1789 == test_csd[EXT_CSD_HC_ERASE_GRP_SIZE] &&
1790 memcmp(&ext_csd[EXT_CSD_SEC_CNT],
1791 &test_csd[EXT_CSD_SEC_CNT], 4) == 0)
1797 #if CONFIG_IS_ENABLED(MMC_IO_VOLTAGE)
1798 static int mmc_set_lowest_voltage(struct mmc *mmc, enum bus_mode mode,
1799 uint32_t allowed_mask)
1806 if (mmc->cardtype & (EXT_CSD_CARD_TYPE_HS200_1_8V |
1807 EXT_CSD_CARD_TYPE_HS400_1_8V))
1808 card_mask |= MMC_SIGNAL_VOLTAGE_180;
1809 if (mmc->cardtype & (EXT_CSD_CARD_TYPE_HS200_1_2V |
1810 EXT_CSD_CARD_TYPE_HS400_1_2V))
1811 card_mask |= MMC_SIGNAL_VOLTAGE_120;
1814 if (mmc->cardtype & EXT_CSD_CARD_TYPE_DDR_1_8V)
1815 card_mask |= MMC_SIGNAL_VOLTAGE_330 |
1816 MMC_SIGNAL_VOLTAGE_180;
1817 if (mmc->cardtype & EXT_CSD_CARD_TYPE_DDR_1_2V)
1818 card_mask |= MMC_SIGNAL_VOLTAGE_120;
1821 card_mask |= MMC_SIGNAL_VOLTAGE_330;
1825 while (card_mask & allowed_mask) {
1826 enum mmc_voltage best_match;
1828 best_match = 1 << (ffs(card_mask & allowed_mask) - 1);
1829 if (!mmc_set_signal_voltage(mmc, best_match))
1832 allowed_mask &= ~best_match;
1838 static inline int mmc_set_lowest_voltage(struct mmc *mmc, enum bus_mode mode,
1839 uint32_t allowed_mask)
1845 static const struct mode_width_tuning mmc_modes_by_pref[] = {
1846 #if CONFIG_IS_ENABLED(MMC_HS400_SUPPORT)
1849 .widths = MMC_MODE_8BIT,
1850 .tuning = MMC_CMD_SEND_TUNING_BLOCK_HS200
1853 #if CONFIG_IS_ENABLED(MMC_HS200_SUPPORT)
1856 .widths = MMC_MODE_8BIT | MMC_MODE_4BIT,
1857 .tuning = MMC_CMD_SEND_TUNING_BLOCK_HS200
1862 .widths = MMC_MODE_8BIT | MMC_MODE_4BIT,
1866 .widths = MMC_MODE_8BIT | MMC_MODE_4BIT | MMC_MODE_1BIT,
1870 .widths = MMC_MODE_8BIT | MMC_MODE_4BIT | MMC_MODE_1BIT,
1874 .widths = MMC_MODE_8BIT | MMC_MODE_4BIT | MMC_MODE_1BIT,
1878 #define for_each_mmc_mode_by_pref(caps, mwt) \
1879 for (mwt = mmc_modes_by_pref;\
1880 mwt < mmc_modes_by_pref + ARRAY_SIZE(mmc_modes_by_pref);\
1882 if (caps & MMC_CAP(mwt->mode))
1884 static const struct ext_csd_bus_width {
1888 } ext_csd_bus_width[] = {
1889 {MMC_MODE_8BIT, true, EXT_CSD_DDR_BUS_WIDTH_8},
1890 {MMC_MODE_4BIT, true, EXT_CSD_DDR_BUS_WIDTH_4},
1891 {MMC_MODE_8BIT, false, EXT_CSD_BUS_WIDTH_8},
1892 {MMC_MODE_4BIT, false, EXT_CSD_BUS_WIDTH_4},
1893 {MMC_MODE_1BIT, false, EXT_CSD_BUS_WIDTH_1},
1896 #if CONFIG_IS_ENABLED(MMC_HS400_SUPPORT)
1897 static int mmc_select_hs400(struct mmc *mmc)
1901 /* Set timing to HS200 for tuning */
1902 err = mmc_set_card_speed(mmc, MMC_HS_200, false);
1906 /* configure the bus mode (host) */
1907 mmc_select_mode(mmc, MMC_HS_200);
1908 mmc_set_clock(mmc, mmc->tran_speed, false);
1910 /* execute tuning if needed */
1911 err = mmc_execute_tuning(mmc, MMC_CMD_SEND_TUNING_BLOCK_HS200);
1913 debug("tuning failed\n");
1917 /* Set back to HS */
1918 mmc_set_card_speed(mmc, MMC_HS, true);
1920 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BUS_WIDTH,
1921 EXT_CSD_BUS_WIDTH_8 | EXT_CSD_DDR_FLAG);
1925 err = mmc_set_card_speed(mmc, MMC_HS_400, false);
1929 mmc_select_mode(mmc, MMC_HS_400);
1930 err = mmc_set_clock(mmc, mmc->tran_speed, false);
1937 static int mmc_select_hs400(struct mmc *mmc)
1943 #define for_each_supported_width(caps, ddr, ecbv) \
1944 for (ecbv = ext_csd_bus_width;\
1945 ecbv < ext_csd_bus_width + ARRAY_SIZE(ext_csd_bus_width);\
1947 if ((ddr == ecbv->is_ddr) && (caps & ecbv->cap))
1949 static int mmc_select_mode_and_width(struct mmc *mmc, uint card_caps)
1952 const struct mode_width_tuning *mwt;
1953 const struct ext_csd_bus_width *ecbw;
1956 mmc_dump_capabilities("mmc", card_caps);
1957 mmc_dump_capabilities("host", mmc->host_caps);
1960 /* Restrict card's capabilities by what the host can do */
1961 card_caps &= mmc->host_caps;
1963 /* Only version 4 of MMC supports wider bus widths */
1964 if (mmc->version < MMC_VERSION_4)
1967 if (!mmc->ext_csd) {
1968 pr_debug("No ext_csd found!\n"); /* this should enver happen */
1972 #if CONFIG_IS_ENABLED(MMC_HS200_SUPPORT) || \
1973 CONFIG_IS_ENABLED(MMC_HS400_SUPPORT)
1975 * In case the eMMC is in HS200/HS400 mode, downgrade to HS mode
1976 * before doing anything else, since a transition from either of
1977 * the HS200/HS400 mode directly to legacy mode is not supported.
1979 if (mmc->selected_mode == MMC_HS_200 ||
1980 mmc->selected_mode == MMC_HS_400)
1981 mmc_set_card_speed(mmc, MMC_HS, true);
1984 mmc_set_clock(mmc, mmc->legacy_speed, MMC_CLK_ENABLE);
1986 for_each_mmc_mode_by_pref(card_caps, mwt) {
1987 for_each_supported_width(card_caps & mwt->widths,
1988 mmc_is_mode_ddr(mwt->mode), ecbw) {
1989 enum mmc_voltage old_voltage;
1990 pr_debug("trying mode %s width %d (at %d MHz)\n",
1991 mmc_mode_name(mwt->mode),
1992 bus_width(ecbw->cap),
1993 mmc_mode2freq(mmc, mwt->mode) / 1000000);
1994 old_voltage = mmc->signal_voltage;
1995 err = mmc_set_lowest_voltage(mmc, mwt->mode,
1996 MMC_ALL_SIGNAL_VOLTAGE);
2000 /* configure the bus width (card + host) */
2001 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
2003 ecbw->ext_csd_bits & ~EXT_CSD_DDR_FLAG);
2006 mmc_set_bus_width(mmc, bus_width(ecbw->cap));
2008 if (mwt->mode == MMC_HS_400) {
2009 err = mmc_select_hs400(mmc);
2011 printf("Select HS400 failed %d\n", err);
2015 /* configure the bus speed (card) */
2016 err = mmc_set_card_speed(mmc, mwt->mode, false);
2021 * configure the bus width AND the ddr mode
2022 * (card). The host side will be taken care
2023 * of in the next step
2025 if (ecbw->ext_csd_bits & EXT_CSD_DDR_FLAG) {
2026 err = mmc_switch(mmc,
2027 EXT_CSD_CMD_SET_NORMAL,
2029 ecbw->ext_csd_bits);
2034 /* configure the bus mode (host) */
2035 mmc_select_mode(mmc, mwt->mode);
2036 mmc_set_clock(mmc, mmc->tran_speed,
2038 #ifdef MMC_SUPPORTS_TUNING
2040 /* execute tuning if needed */
2042 err = mmc_execute_tuning(mmc,
2045 pr_debug("tuning failed\n");
2052 /* do a transfer to check the configuration */
2053 err = mmc_read_and_compare_ext_csd(mmc);
2057 mmc_set_signal_voltage(mmc, old_voltage);
2058 /* if an error occured, revert to a safer bus mode */
2059 mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
2060 EXT_CSD_BUS_WIDTH, EXT_CSD_BUS_WIDTH_1);
2061 mmc_select_mode(mmc, MMC_LEGACY);
2062 mmc_set_bus_width(mmc, 1);
2066 pr_err("unable to select a mode\n");
2072 #if CONFIG_IS_ENABLED(MMC_TINY)
2073 DEFINE_CACHE_ALIGN_BUFFER(u8, ext_csd_bkup, MMC_MAX_BLOCK_LEN);
2076 static int mmc_startup_v4(struct mmc *mmc)
2080 bool has_parts = false;
2081 bool part_completed;
2082 static const u32 mmc_versions[] = {
2094 #if CONFIG_IS_ENABLED(MMC_TINY)
2095 u8 *ext_csd = ext_csd_bkup;
2097 if (IS_SD(mmc) || mmc->version < MMC_VERSION_4)
2101 memset(ext_csd_bkup, 0, sizeof(ext_csd_bkup));
2103 err = mmc_send_ext_csd(mmc, ext_csd);
2107 /* store the ext csd for future reference */
2109 mmc->ext_csd = ext_csd;
2111 ALLOC_CACHE_ALIGN_BUFFER(u8, ext_csd, MMC_MAX_BLOCK_LEN);
2113 if (IS_SD(mmc) || (mmc->version < MMC_VERSION_4))
2116 /* check ext_csd version and capacity */
2117 err = mmc_send_ext_csd(mmc, ext_csd);
2121 /* store the ext csd for future reference */
2123 mmc->ext_csd = malloc(MMC_MAX_BLOCK_LEN);
2126 memcpy(mmc->ext_csd, ext_csd, MMC_MAX_BLOCK_LEN);
2128 if (ext_csd[EXT_CSD_REV] >= ARRAY_SIZE(mmc_versions))
2131 mmc->version = mmc_versions[ext_csd[EXT_CSD_REV]];
2133 if (mmc->version >= MMC_VERSION_4_2) {
2135 * According to the JEDEC Standard, the value of
2136 * ext_csd's capacity is valid if the value is more
2139 capacity = ext_csd[EXT_CSD_SEC_CNT] << 0
2140 | ext_csd[EXT_CSD_SEC_CNT + 1] << 8
2141 | ext_csd[EXT_CSD_SEC_CNT + 2] << 16
2142 | ext_csd[EXT_CSD_SEC_CNT + 3] << 24;
2143 capacity *= MMC_MAX_BLOCK_LEN;
2144 if ((capacity >> 20) > 2 * 1024)
2145 mmc->capacity_user = capacity;
2148 if (mmc->version >= MMC_VERSION_4_5)
2149 mmc->gen_cmd6_time = ext_csd[EXT_CSD_GENERIC_CMD6_TIME];
2151 /* The partition data may be non-zero but it is only
2152 * effective if PARTITION_SETTING_COMPLETED is set in
2153 * EXT_CSD, so ignore any data if this bit is not set,
2154 * except for enabling the high-capacity group size
2155 * definition (see below).
2157 part_completed = !!(ext_csd[EXT_CSD_PARTITION_SETTING] &
2158 EXT_CSD_PARTITION_SETTING_COMPLETED);
2160 mmc->part_switch_time = ext_csd[EXT_CSD_PART_SWITCH_TIME];
2161 /* Some eMMC set the value too low so set a minimum */
2162 if (mmc->part_switch_time < MMC_MIN_PART_SWITCH_TIME && mmc->part_switch_time)
2163 mmc->part_switch_time = MMC_MIN_PART_SWITCH_TIME;
2165 /* store the partition info of emmc */
2166 mmc->part_support = ext_csd[EXT_CSD_PARTITIONING_SUPPORT];
2167 if ((ext_csd[EXT_CSD_PARTITIONING_SUPPORT] & PART_SUPPORT) ||
2168 ext_csd[EXT_CSD_BOOT_MULT])
2169 mmc->part_config = ext_csd[EXT_CSD_PART_CONF];
2170 if (part_completed &&
2171 (ext_csd[EXT_CSD_PARTITIONING_SUPPORT] & ENHNCD_SUPPORT))
2172 mmc->part_attr = ext_csd[EXT_CSD_PARTITIONS_ATTRIBUTE];
2174 mmc->capacity_boot = ext_csd[EXT_CSD_BOOT_MULT] << 17;
2176 mmc->capacity_rpmb = ext_csd[EXT_CSD_RPMB_MULT] << 17;
2178 for (i = 0; i < 4; i++) {
2179 int idx = EXT_CSD_GP_SIZE_MULT + i * 3;
2180 uint mult = (ext_csd[idx + 2] << 16) +
2181 (ext_csd[idx + 1] << 8) + ext_csd[idx];
2184 if (!part_completed)
2186 mmc->capacity_gp[i] = mult;
2187 mmc->capacity_gp[i] *=
2188 ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE];
2189 mmc->capacity_gp[i] *= ext_csd[EXT_CSD_HC_WP_GRP_SIZE];
2190 mmc->capacity_gp[i] <<= 19;
2193 #ifndef CONFIG_SPL_BUILD
2194 if (part_completed) {
2195 mmc->enh_user_size =
2196 (ext_csd[EXT_CSD_ENH_SIZE_MULT + 2] << 16) +
2197 (ext_csd[EXT_CSD_ENH_SIZE_MULT + 1] << 8) +
2198 ext_csd[EXT_CSD_ENH_SIZE_MULT];
2199 mmc->enh_user_size *= ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE];
2200 mmc->enh_user_size *= ext_csd[EXT_CSD_HC_WP_GRP_SIZE];
2201 mmc->enh_user_size <<= 19;
2202 mmc->enh_user_start =
2203 (ext_csd[EXT_CSD_ENH_START_ADDR + 3] << 24) +
2204 (ext_csd[EXT_CSD_ENH_START_ADDR + 2] << 16) +
2205 (ext_csd[EXT_CSD_ENH_START_ADDR + 1] << 8) +
2206 ext_csd[EXT_CSD_ENH_START_ADDR];
2207 if (mmc->high_capacity)
2208 mmc->enh_user_start <<= 9;
2213 * Host needs to enable ERASE_GRP_DEF bit if device is
2214 * partitioned. This bit will be lost every time after a reset
2215 * or power off. This will affect erase size.
2219 if ((ext_csd[EXT_CSD_PARTITIONING_SUPPORT] & PART_SUPPORT) &&
2220 (ext_csd[EXT_CSD_PARTITIONS_ATTRIBUTE] & PART_ENH_ATTRIB))
2223 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
2224 EXT_CSD_ERASE_GROUP_DEF, 1);
2229 ext_csd[EXT_CSD_ERASE_GROUP_DEF] = 1;
2232 if (ext_csd[EXT_CSD_ERASE_GROUP_DEF] & 0x01) {
2233 #if CONFIG_IS_ENABLED(MMC_WRITE)
2234 /* Read out group size from ext_csd */
2235 mmc->erase_grp_size =
2236 ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE] * 1024;
2239 * if high capacity and partition setting completed
2240 * SEC_COUNT is valid even if it is smaller than 2 GiB
2241 * JEDEC Standard JESD84-B45, 6.2.4
2243 if (mmc->high_capacity && part_completed) {
2244 capacity = (ext_csd[EXT_CSD_SEC_CNT]) |
2245 (ext_csd[EXT_CSD_SEC_CNT + 1] << 8) |
2246 (ext_csd[EXT_CSD_SEC_CNT + 2] << 16) |
2247 (ext_csd[EXT_CSD_SEC_CNT + 3] << 24);
2248 capacity *= MMC_MAX_BLOCK_LEN;
2249 mmc->capacity_user = capacity;
2252 #if CONFIG_IS_ENABLED(MMC_WRITE)
2254 /* Calculate the group size from the csd value. */
2255 int erase_gsz, erase_gmul;
2257 erase_gsz = (mmc->csd[2] & 0x00007c00) >> 10;
2258 erase_gmul = (mmc->csd[2] & 0x000003e0) >> 5;
2259 mmc->erase_grp_size = (erase_gsz + 1)
2263 #if CONFIG_IS_ENABLED(MMC_HW_PARTITIONING)
2264 mmc->hc_wp_grp_size = 1024
2265 * ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE]
2266 * ext_csd[EXT_CSD_HC_WP_GRP_SIZE];
2269 mmc->wr_rel_set = ext_csd[EXT_CSD_WR_REL_SET];
2274 #if !CONFIG_IS_ENABLED(MMC_TINY)
2277 mmc->ext_csd = NULL;
2282 static int mmc_startup(struct mmc *mmc)
2288 struct blk_desc *bdesc;
2290 #ifdef CONFIG_MMC_SPI_CRC_ON
2291 if (mmc_host_is_spi(mmc)) { /* enable CRC check for spi */
2292 cmd.cmdidx = MMC_CMD_SPI_CRC_ON_OFF;
2293 cmd.resp_type = MMC_RSP_R1;
2295 err = mmc_send_cmd(mmc, &cmd, NULL);
2301 /* Put the Card in Identify Mode */
2302 cmd.cmdidx = mmc_host_is_spi(mmc) ? MMC_CMD_SEND_CID :
2303 MMC_CMD_ALL_SEND_CID; /* cmd not supported in spi */
2304 cmd.resp_type = MMC_RSP_R2;
2307 err = mmc_send_cmd(mmc, &cmd, NULL);
2309 #ifdef CONFIG_MMC_QUIRKS
2310 if (err && (mmc->quirks & MMC_QUIRK_RETRY_SEND_CID)) {
2313 * It has been seen that SEND_CID may fail on the first
2314 * attempt, let's try a few more time
2317 err = mmc_send_cmd(mmc, &cmd, NULL);
2320 } while (retries--);
2327 memcpy(mmc->cid, cmd.response, 16);
2330 * For MMC cards, set the Relative Address.
2331 * For SD cards, get the Relatvie Address.
2332 * This also puts the cards into Standby State
2334 if (!mmc_host_is_spi(mmc)) { /* cmd not supported in spi */
2335 cmd.cmdidx = SD_CMD_SEND_RELATIVE_ADDR;
2336 cmd.cmdarg = mmc->rca << 16;
2337 cmd.resp_type = MMC_RSP_R6;
2339 err = mmc_send_cmd(mmc, &cmd, NULL);
2345 mmc->rca = (cmd.response[0] >> 16) & 0xffff;
2348 /* Get the Card-Specific Data */
2349 cmd.cmdidx = MMC_CMD_SEND_CSD;
2350 cmd.resp_type = MMC_RSP_R2;
2351 cmd.cmdarg = mmc->rca << 16;
2353 err = mmc_send_cmd(mmc, &cmd, NULL);
2358 mmc->csd[0] = cmd.response[0];
2359 mmc->csd[1] = cmd.response[1];
2360 mmc->csd[2] = cmd.response[2];
2361 mmc->csd[3] = cmd.response[3];
2363 if (mmc->version == MMC_VERSION_UNKNOWN) {
2364 int version = (cmd.response[0] >> 26) & 0xf;
2368 mmc->version = MMC_VERSION_1_2;
2371 mmc->version = MMC_VERSION_1_4;
2374 mmc->version = MMC_VERSION_2_2;
2377 mmc->version = MMC_VERSION_3;
2380 mmc->version = MMC_VERSION_4;
2383 mmc->version = MMC_VERSION_1_2;
2388 /* divide frequency by 10, since the mults are 10x bigger */
2389 freq = fbase[(cmd.response[0] & 0x7)];
2390 mult = multipliers[((cmd.response[0] >> 3) & 0xf)];
2392 mmc->legacy_speed = freq * mult;
2393 mmc_select_mode(mmc, MMC_LEGACY);
2395 mmc->dsr_imp = ((cmd.response[1] >> 12) & 0x1);
2396 mmc->read_bl_len = 1 << ((cmd.response[1] >> 16) & 0xf);
2397 #if CONFIG_IS_ENABLED(MMC_WRITE)
2400 mmc->write_bl_len = mmc->read_bl_len;
2402 mmc->write_bl_len = 1 << ((cmd.response[3] >> 22) & 0xf);
2405 if (mmc->high_capacity) {
2406 csize = (mmc->csd[1] & 0x3f) << 16
2407 | (mmc->csd[2] & 0xffff0000) >> 16;
2410 csize = (mmc->csd[1] & 0x3ff) << 2
2411 | (mmc->csd[2] & 0xc0000000) >> 30;
2412 cmult = (mmc->csd[2] & 0x00038000) >> 15;
2415 mmc->capacity_user = (csize + 1) << (cmult + 2);
2416 mmc->capacity_user *= mmc->read_bl_len;
2417 mmc->capacity_boot = 0;
2418 mmc->capacity_rpmb = 0;
2419 for (i = 0; i < 4; i++)
2420 mmc->capacity_gp[i] = 0;
2422 if (mmc->read_bl_len > MMC_MAX_BLOCK_LEN)
2423 mmc->read_bl_len = MMC_MAX_BLOCK_LEN;
2425 #if CONFIG_IS_ENABLED(MMC_WRITE)
2426 if (mmc->write_bl_len > MMC_MAX_BLOCK_LEN)
2427 mmc->write_bl_len = MMC_MAX_BLOCK_LEN;
2430 if ((mmc->dsr_imp) && (0xffffffff != mmc->dsr)) {
2431 cmd.cmdidx = MMC_CMD_SET_DSR;
2432 cmd.cmdarg = (mmc->dsr & 0xffff) << 16;
2433 cmd.resp_type = MMC_RSP_NONE;
2434 if (mmc_send_cmd(mmc, &cmd, NULL))
2435 pr_warn("MMC: SET_DSR failed\n");
2438 /* Select the card, and put it into Transfer Mode */
2439 if (!mmc_host_is_spi(mmc)) { /* cmd not supported in spi */
2440 cmd.cmdidx = MMC_CMD_SELECT_CARD;
2441 cmd.resp_type = MMC_RSP_R1;
2442 cmd.cmdarg = mmc->rca << 16;
2443 err = mmc_send_cmd(mmc, &cmd, NULL);
2450 * For SD, its erase group is always one sector
2452 #if CONFIG_IS_ENABLED(MMC_WRITE)
2453 mmc->erase_grp_size = 1;
2455 mmc->part_config = MMCPART_NOAVAILABLE;
2457 err = mmc_startup_v4(mmc);
2461 err = mmc_set_capacity(mmc, mmc_get_blk_desc(mmc)->hwpart);
2465 #if CONFIG_IS_ENABLED(MMC_TINY)
2466 mmc_set_clock(mmc, mmc->legacy_speed, false);
2467 mmc_select_mode(mmc, IS_SD(mmc) ? SD_LEGACY : MMC_LEGACY);
2468 mmc_set_bus_width(mmc, 1);
2471 err = sd_get_capabilities(mmc);
2474 err = sd_select_mode_and_width(mmc, mmc->card_caps);
2476 err = mmc_get_capabilities(mmc);
2479 mmc_select_mode_and_width(mmc, mmc->card_caps);
2485 mmc->best_mode = mmc->selected_mode;
2487 /* Fix the block length for DDR mode */
2488 if (mmc->ddr_mode) {
2489 mmc->read_bl_len = MMC_MAX_BLOCK_LEN;
2490 #if CONFIG_IS_ENABLED(MMC_WRITE)
2491 mmc->write_bl_len = MMC_MAX_BLOCK_LEN;
2495 /* fill in device description */
2496 bdesc = mmc_get_blk_desc(mmc);
2500 bdesc->blksz = mmc->read_bl_len;
2501 bdesc->log2blksz = LOG2(bdesc->blksz);
2502 bdesc->lba = lldiv(mmc->capacity, mmc->read_bl_len);
2503 #if !defined(CONFIG_SPL_BUILD) || \
2504 (defined(CONFIG_SPL_LIBCOMMON_SUPPORT) && \
2505 !defined(CONFIG_USE_TINY_PRINTF))
2506 sprintf(bdesc->vendor, "Man %06x Snr %04x%04x",
2507 mmc->cid[0] >> 24, (mmc->cid[2] & 0xffff),
2508 (mmc->cid[3] >> 16) & 0xffff);
2509 sprintf(bdesc->product, "%c%c%c%c%c%c", mmc->cid[0] & 0xff,
2510 (mmc->cid[1] >> 24), (mmc->cid[1] >> 16) & 0xff,
2511 (mmc->cid[1] >> 8) & 0xff, mmc->cid[1] & 0xff,
2512 (mmc->cid[2] >> 24) & 0xff);
2513 sprintf(bdesc->revision, "%d.%d", (mmc->cid[2] >> 20) & 0xf,
2514 (mmc->cid[2] >> 16) & 0xf);
2516 bdesc->vendor[0] = 0;
2517 bdesc->product[0] = 0;
2518 bdesc->revision[0] = 0;
2521 #if !defined(CONFIG_DM_MMC) && (!defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBDISK_SUPPORT))
2528 static int mmc_send_if_cond(struct mmc *mmc)
2533 cmd.cmdidx = SD_CMD_SEND_IF_COND;
2534 /* We set the bit if the host supports voltages between 2.7 and 3.6 V */
2535 cmd.cmdarg = ((mmc->cfg->voltages & 0xff8000) != 0) << 8 | 0xaa;
2536 cmd.resp_type = MMC_RSP_R7;
2538 err = mmc_send_cmd(mmc, &cmd, NULL);
2543 if ((cmd.response[0] & 0xff) != 0xaa)
2546 mmc->version = SD_VERSION_2;
2551 #if !CONFIG_IS_ENABLED(DM_MMC)
2552 /* board-specific MMC power initializations. */
2553 __weak void board_mmc_power_init(void)
2558 static int mmc_power_init(struct mmc *mmc)
2560 #if CONFIG_IS_ENABLED(DM_MMC)
2561 #if CONFIG_IS_ENABLED(DM_REGULATOR)
2564 ret = device_get_supply_regulator(mmc->dev, "vmmc-supply",
2567 pr_debug("%s: No vmmc supply\n", mmc->dev->name);
2569 ret = device_get_supply_regulator(mmc->dev, "vqmmc-supply",
2570 &mmc->vqmmc_supply);
2572 pr_debug("%s: No vqmmc supply\n", mmc->dev->name);
2574 #else /* !CONFIG_DM_MMC */
2576 * Driver model should use a regulator, as above, rather than calling
2577 * out to board code.
2579 board_mmc_power_init();
2585 * put the host in the initial state:
2586 * - turn on Vdd (card power supply)
2587 * - configure the bus width and clock to minimal values
2589 static void mmc_set_initial_state(struct mmc *mmc)
2593 /* First try to set 3.3V. If it fails set to 1.8V */
2594 err = mmc_set_signal_voltage(mmc, MMC_SIGNAL_VOLTAGE_330);
2596 err = mmc_set_signal_voltage(mmc, MMC_SIGNAL_VOLTAGE_180);
2598 pr_warn("mmc: failed to set signal voltage\n");
2600 mmc_select_mode(mmc, MMC_LEGACY);
2601 mmc_set_bus_width(mmc, 1);
2602 mmc_set_clock(mmc, 0, MMC_CLK_ENABLE);
2605 static int mmc_power_on(struct mmc *mmc)
2607 #if CONFIG_IS_ENABLED(DM_MMC) && CONFIG_IS_ENABLED(DM_REGULATOR)
2608 if (mmc->vmmc_supply) {
2609 int ret = regulator_set_enable(mmc->vmmc_supply, true);
2612 puts("Error enabling VMMC supply\n");
2620 static int mmc_power_off(struct mmc *mmc)
2622 mmc_set_clock(mmc, 0, MMC_CLK_DISABLE);
2623 #if CONFIG_IS_ENABLED(DM_MMC) && CONFIG_IS_ENABLED(DM_REGULATOR)
2624 if (mmc->vmmc_supply) {
2625 int ret = regulator_set_enable(mmc->vmmc_supply, false);
2628 pr_debug("Error disabling VMMC supply\n");
2636 static int mmc_power_cycle(struct mmc *mmc)
2640 ret = mmc_power_off(mmc);
2644 * SD spec recommends at least 1ms of delay. Let's wait for 2ms
2645 * to be on the safer side.
2648 return mmc_power_on(mmc);
2651 int mmc_get_op_cond(struct mmc *mmc)
2653 bool uhs_en = supports_uhs(mmc->cfg->host_caps);
2659 #ifdef CONFIG_FSL_ESDHC_ADAPTER_IDENT
2660 mmc_adapter_card_type_ident();
2662 err = mmc_power_init(mmc);
2666 #ifdef CONFIG_MMC_QUIRKS
2667 mmc->quirks = MMC_QUIRK_RETRY_SET_BLOCKLEN |
2668 MMC_QUIRK_RETRY_SEND_CID;
2671 err = mmc_power_cycle(mmc);
2674 * if power cycling is not supported, we should not try
2675 * to use the UHS modes, because we wouldn't be able to
2676 * recover from an error during the UHS initialization.
2678 pr_debug("Unable to do a full power cycle. Disabling the UHS modes for safety\n");
2680 mmc->host_caps &= ~UHS_CAPS;
2681 err = mmc_power_on(mmc);
2686 #if CONFIG_IS_ENABLED(DM_MMC)
2687 /* The device has already been probed ready for use */
2689 /* made sure it's not NULL earlier */
2690 err = mmc->cfg->ops->init(mmc);
2697 mmc_set_initial_state(mmc);
2699 /* Reset the Card */
2700 err = mmc_go_idle(mmc);
2705 /* The internal partition reset to user partition(0) at every CMD0*/
2706 mmc_get_blk_desc(mmc)->hwpart = 0;
2708 /* Test for SD version 2 */
2709 err = mmc_send_if_cond(mmc);
2711 /* Now try to get the SD card's operating condition */
2712 err = sd_send_op_cond(mmc, uhs_en);
2713 if (err && uhs_en) {
2715 mmc_power_cycle(mmc);
2719 /* If the command timed out, we check for an MMC card */
2720 if (err == -ETIMEDOUT) {
2721 err = mmc_send_op_cond(mmc);
2724 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
2725 pr_err("Card did not respond to voltage select!\n");
2734 int mmc_start_init(struct mmc *mmc)
2740 * all hosts are capable of 1 bit bus-width and able to use the legacy
2743 mmc->host_caps = mmc->cfg->host_caps | MMC_CAP(SD_LEGACY) |
2744 MMC_CAP(MMC_LEGACY) | MMC_MODE_1BIT;
2746 #if !defined(CONFIG_MMC_BROKEN_CD)
2747 /* we pretend there's no card when init is NULL */
2748 no_card = mmc_getcd(mmc) == 0;
2752 #if !CONFIG_IS_ENABLED(DM_MMC)
2753 no_card = no_card || (mmc->cfg->ops->init == NULL);
2757 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
2758 pr_err("MMC: no card present\n");
2763 err = mmc_get_op_cond(mmc);
2766 mmc->init_in_progress = 1;
2771 static int mmc_complete_init(struct mmc *mmc)
2775 mmc->init_in_progress = 0;
2776 if (mmc->op_cond_pending)
2777 err = mmc_complete_op_cond(mmc);
2780 err = mmc_startup(mmc);
2788 int mmc_init(struct mmc *mmc)
2791 __maybe_unused ulong start;
2792 #if CONFIG_IS_ENABLED(DM_MMC)
2793 struct mmc_uclass_priv *upriv = dev_get_uclass_priv(mmc->dev);
2800 start = get_timer(0);
2802 if (!mmc->init_in_progress)
2803 err = mmc_start_init(mmc);
2806 err = mmc_complete_init(mmc);
2808 pr_info("%s: %d, time %lu\n", __func__, err, get_timer(start));
2813 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT) || \
2814 CONFIG_IS_ENABLED(MMC_HS200_SUPPORT) || \
2815 CONFIG_IS_ENABLED(MMC_HS400_SUPPORT)
2816 int mmc_deinit(struct mmc *mmc)
2824 caps_filtered = mmc->card_caps &
2825 ~(MMC_CAP(UHS_SDR12) | MMC_CAP(UHS_SDR25) |
2826 MMC_CAP(UHS_SDR50) | MMC_CAP(UHS_DDR50) |
2827 MMC_CAP(UHS_SDR104));
2829 return sd_select_mode_and_width(mmc, caps_filtered);
2831 caps_filtered = mmc->card_caps &
2832 ~(MMC_CAP(MMC_HS_200) | MMC_CAP(MMC_HS_400));
2834 return mmc_select_mode_and_width(mmc, caps_filtered);
2839 int mmc_set_dsr(struct mmc *mmc, u16 val)
2845 /* CPU-specific MMC initializations */
2846 __weak int cpu_mmc_init(bd_t *bis)
2851 /* board-specific MMC initializations. */
2852 __weak int board_mmc_init(bd_t *bis)
2857 void mmc_set_preinit(struct mmc *mmc, int preinit)
2859 mmc->preinit = preinit;
2862 #if CONFIG_IS_ENABLED(DM_MMC)
2863 static int mmc_probe(bd_t *bis)
2867 struct udevice *dev;
2869 ret = uclass_get(UCLASS_MMC, &uc);
2874 * Try to add them in sequence order. Really with driver model we
2875 * should allow holes, but the current MMC list does not allow that.
2876 * So if we request 0, 1, 3 we will get 0, 1, 2.
2878 for (i = 0; ; i++) {
2879 ret = uclass_get_device_by_seq(UCLASS_MMC, i, &dev);
2883 uclass_foreach_dev(dev, uc) {
2884 ret = device_probe(dev);
2886 pr_err("%s - probe failed: %d\n", dev->name, ret);
2892 static int mmc_probe(bd_t *bis)
2894 if (board_mmc_init(bis) < 0)
2901 int mmc_initialize(bd_t *bis)
2903 static int initialized = 0;
2905 if (initialized) /* Avoid initializing mmc multiple times */
2909 #if !CONFIG_IS_ENABLED(BLK)
2910 #if !CONFIG_IS_ENABLED(MMC_TINY)
2914 ret = mmc_probe(bis);
2918 #ifndef CONFIG_SPL_BUILD
2919 print_mmc_devices(',');
2926 #ifdef CONFIG_CMD_BKOPS_ENABLE
2927 int mmc_set_bkops_enable(struct mmc *mmc)
2930 ALLOC_CACHE_ALIGN_BUFFER(u8, ext_csd, MMC_MAX_BLOCK_LEN);
2932 err = mmc_send_ext_csd(mmc, ext_csd);
2934 puts("Could not get ext_csd register values\n");
2938 if (!(ext_csd[EXT_CSD_BKOPS_SUPPORT] & 0x1)) {
2939 puts("Background operations not supported on device\n");
2940 return -EMEDIUMTYPE;
2943 if (ext_csd[EXT_CSD_BKOPS_EN] & 0x1) {
2944 puts("Background operations already enabled\n");
2948 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BKOPS_EN, 1);
2950 puts("Failed to enable manual background operations\n");
2954 puts("Enabled manual background operations\n");