1 // SPDX-License-Identifier: GPL-2.0+
3 * Copyright 2008, Freescale Semiconductor, Inc
6 * Based vaguely on the Linux code
13 #include <dm/device-internal.h>
17 #include <power/regulator.h>
20 #include <linux/list.h>
22 #include "mmc_private.h"
24 #define DEFAULT_CMD6_TIMEOUT_MS 500
26 static int mmc_set_signal_voltage(struct mmc *mmc, uint signal_voltage);
27 static int mmc_power_cycle(struct mmc *mmc);
28 #if !CONFIG_IS_ENABLED(MMC_TINY)
29 static int mmc_select_mode_and_width(struct mmc *mmc, uint card_caps);
32 #if !CONFIG_IS_ENABLED(DM_MMC)
34 static int mmc_wait_dat0(struct mmc *mmc, int state, int timeout)
39 __weak int board_mmc_getwp(struct mmc *mmc)
44 int mmc_getwp(struct mmc *mmc)
48 wp = board_mmc_getwp(mmc);
51 if (mmc->cfg->ops->getwp)
52 wp = mmc->cfg->ops->getwp(mmc);
60 __weak int board_mmc_getcd(struct mmc *mmc)
66 #ifdef CONFIG_MMC_TRACE
67 void mmmc_trace_before_send(struct mmc *mmc, struct mmc_cmd *cmd)
69 printf("CMD_SEND:%d\n", cmd->cmdidx);
70 printf("\t\tARG\t\t\t 0x%08x\n", cmd->cmdarg);
73 void mmmc_trace_after_send(struct mmc *mmc, struct mmc_cmd *cmd, int ret)
79 printf("\t\tRET\t\t\t %d\n", ret);
81 switch (cmd->resp_type) {
83 printf("\t\tMMC_RSP_NONE\n");
86 printf("\t\tMMC_RSP_R1,5,6,7 \t 0x%08x \n",
90 printf("\t\tMMC_RSP_R1b\t\t 0x%08x \n",
94 printf("\t\tMMC_RSP_R2\t\t 0x%08x \n",
96 printf("\t\t \t\t 0x%08x \n",
98 printf("\t\t \t\t 0x%08x \n",
100 printf("\t\t \t\t 0x%08x \n",
103 printf("\t\t\t\t\tDUMPING DATA\n");
104 for (i = 0; i < 4; i++) {
106 printf("\t\t\t\t\t%03d - ", i*4);
107 ptr = (u8 *)&cmd->response[i];
109 for (j = 0; j < 4; j++)
110 printf("%02x ", *ptr--);
115 printf("\t\tMMC_RSP_R3,4\t\t 0x%08x \n",
119 printf("\t\tERROR MMC rsp not supported\n");
125 void mmc_trace_state(struct mmc *mmc, struct mmc_cmd *cmd)
129 status = (cmd->response[0] & MMC_STATUS_CURR_STATE) >> 9;
130 printf("CURR STATE:%d\n", status);
134 #if CONFIG_IS_ENABLED(MMC_VERBOSE) || defined(DEBUG)
135 const char *mmc_mode_name(enum bus_mode mode)
137 static const char *const names[] = {
138 [MMC_LEGACY] = "MMC legacy",
139 [SD_LEGACY] = "SD Legacy",
140 [MMC_HS] = "MMC High Speed (26MHz)",
141 [SD_HS] = "SD High Speed (50MHz)",
142 [UHS_SDR12] = "UHS SDR12 (25MHz)",
143 [UHS_SDR25] = "UHS SDR25 (50MHz)",
144 [UHS_SDR50] = "UHS SDR50 (100MHz)",
145 [UHS_SDR104] = "UHS SDR104 (208MHz)",
146 [UHS_DDR50] = "UHS DDR50 (50MHz)",
147 [MMC_HS_52] = "MMC High Speed (52MHz)",
148 [MMC_DDR_52] = "MMC DDR52 (52MHz)",
149 [MMC_HS_200] = "HS200 (200MHz)",
150 [MMC_HS_400] = "HS400 (200MHz)",
153 if (mode >= MMC_MODES_END)
154 return "Unknown mode";
160 static uint mmc_mode2freq(struct mmc *mmc, enum bus_mode mode)
162 static const int freqs[] = {
163 [MMC_LEGACY] = 25000000,
164 [SD_LEGACY] = 25000000,
167 [MMC_HS_52] = 52000000,
168 [MMC_DDR_52] = 52000000,
169 [UHS_SDR12] = 25000000,
170 [UHS_SDR25] = 50000000,
171 [UHS_SDR50] = 100000000,
172 [UHS_DDR50] = 50000000,
173 [UHS_SDR104] = 208000000,
174 [MMC_HS_200] = 200000000,
175 [MMC_HS_400] = 200000000,
178 if (mode == MMC_LEGACY)
179 return mmc->legacy_speed;
180 else if (mode >= MMC_MODES_END)
186 static int mmc_select_mode(struct mmc *mmc, enum bus_mode mode)
188 mmc->selected_mode = mode;
189 mmc->tran_speed = mmc_mode2freq(mmc, mode);
190 mmc->ddr_mode = mmc_is_mode_ddr(mode);
191 pr_debug("selecting mode %s (freq : %d MHz)\n", mmc_mode_name(mode),
192 mmc->tran_speed / 1000000);
196 #if !CONFIG_IS_ENABLED(DM_MMC)
197 int mmc_send_cmd(struct mmc *mmc, struct mmc_cmd *cmd, struct mmc_data *data)
201 mmmc_trace_before_send(mmc, cmd);
202 ret = mmc->cfg->ops->send_cmd(mmc, cmd, data);
203 mmmc_trace_after_send(mmc, cmd, ret);
209 int mmc_send_status(struct mmc *mmc, unsigned int *status)
212 int err, retries = 5;
214 cmd.cmdidx = MMC_CMD_SEND_STATUS;
215 cmd.resp_type = MMC_RSP_R1;
216 if (!mmc_host_is_spi(mmc))
217 cmd.cmdarg = mmc->rca << 16;
220 err = mmc_send_cmd(mmc, &cmd, NULL);
222 mmc_trace_state(mmc, &cmd);
223 *status = cmd.response[0];
227 mmc_trace_state(mmc, &cmd);
231 int mmc_poll_for_busy(struct mmc *mmc, int timeout)
236 err = mmc_wait_dat0(mmc, 1, timeout);
241 err = mmc_send_status(mmc, &status);
245 if ((status & MMC_STATUS_RDY_FOR_DATA) &&
246 (status & MMC_STATUS_CURR_STATE) !=
250 if (status & MMC_STATUS_MASK) {
251 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
252 pr_err("Status Error: 0x%08x\n", status);
264 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
265 pr_err("Timeout waiting card ready\n");
273 int mmc_set_blocklen(struct mmc *mmc, int len)
281 cmd.cmdidx = MMC_CMD_SET_BLOCKLEN;
282 cmd.resp_type = MMC_RSP_R1;
285 err = mmc_send_cmd(mmc, &cmd, NULL);
287 #ifdef CONFIG_MMC_QUIRKS
288 if (err && (mmc->quirks & MMC_QUIRK_RETRY_SET_BLOCKLEN)) {
291 * It has been seen that SET_BLOCKLEN may fail on the first
292 * attempt, let's try a few more time
295 err = mmc_send_cmd(mmc, &cmd, NULL);
305 #ifdef MMC_SUPPORTS_TUNING
306 static const u8 tuning_blk_pattern_4bit[] = {
307 0xff, 0x0f, 0xff, 0x00, 0xff, 0xcc, 0xc3, 0xcc,
308 0xc3, 0x3c, 0xcc, 0xff, 0xfe, 0xff, 0xfe, 0xef,
309 0xff, 0xdf, 0xff, 0xdd, 0xff, 0xfb, 0xff, 0xfb,
310 0xbf, 0xff, 0x7f, 0xff, 0x77, 0xf7, 0xbd, 0xef,
311 0xff, 0xf0, 0xff, 0xf0, 0x0f, 0xfc, 0xcc, 0x3c,
312 0xcc, 0x33, 0xcc, 0xcf, 0xff, 0xef, 0xff, 0xee,
313 0xff, 0xfd, 0xff, 0xfd, 0xdf, 0xff, 0xbf, 0xff,
314 0xbb, 0xff, 0xf7, 0xff, 0xf7, 0x7f, 0x7b, 0xde,
317 static const u8 tuning_blk_pattern_8bit[] = {
318 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00, 0x00,
319 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc, 0xcc,
320 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff, 0xff,
321 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee, 0xff,
322 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd, 0xdd,
323 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff, 0xbb,
324 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff, 0xff,
325 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee, 0xff,
326 0xff, 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00,
327 0x00, 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc,
328 0xcc, 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff,
329 0xff, 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee,
330 0xff, 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd,
331 0xdd, 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff,
332 0xbb, 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff,
333 0xff, 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee,
336 int mmc_send_tuning(struct mmc *mmc, u32 opcode, int *cmd_error)
339 struct mmc_data data;
340 const u8 *tuning_block_pattern;
343 if (mmc->bus_width == 8) {
344 tuning_block_pattern = tuning_blk_pattern_8bit;
345 size = sizeof(tuning_blk_pattern_8bit);
346 } else if (mmc->bus_width == 4) {
347 tuning_block_pattern = tuning_blk_pattern_4bit;
348 size = sizeof(tuning_blk_pattern_4bit);
353 ALLOC_CACHE_ALIGN_BUFFER(u8, data_buf, size);
357 cmd.resp_type = MMC_RSP_R1;
359 data.dest = (void *)data_buf;
361 data.blocksize = size;
362 data.flags = MMC_DATA_READ;
364 err = mmc_send_cmd(mmc, &cmd, &data);
368 if (memcmp(data_buf, tuning_block_pattern, size))
375 static int mmc_read_blocks(struct mmc *mmc, void *dst, lbaint_t start,
379 struct mmc_data data;
382 cmd.cmdidx = MMC_CMD_READ_MULTIPLE_BLOCK;
384 cmd.cmdidx = MMC_CMD_READ_SINGLE_BLOCK;
386 if (mmc->high_capacity)
389 cmd.cmdarg = start * mmc->read_bl_len;
391 cmd.resp_type = MMC_RSP_R1;
394 data.blocks = blkcnt;
395 data.blocksize = mmc->read_bl_len;
396 data.flags = MMC_DATA_READ;
398 if (mmc_send_cmd(mmc, &cmd, &data))
402 cmd.cmdidx = MMC_CMD_STOP_TRANSMISSION;
404 cmd.resp_type = MMC_RSP_R1b;
405 if (mmc_send_cmd(mmc, &cmd, NULL)) {
406 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
407 pr_err("mmc fail to send stop cmd\n");
416 #if CONFIG_IS_ENABLED(BLK)
417 ulong mmc_bread(struct udevice *dev, lbaint_t start, lbaint_t blkcnt, void *dst)
419 ulong mmc_bread(struct blk_desc *block_dev, lbaint_t start, lbaint_t blkcnt,
423 #if CONFIG_IS_ENABLED(BLK)
424 struct blk_desc *block_dev = dev_get_uclass_platdata(dev);
426 int dev_num = block_dev->devnum;
428 lbaint_t cur, blocks_todo = blkcnt;
433 struct mmc *mmc = find_mmc_device(dev_num);
437 if (CONFIG_IS_ENABLED(MMC_TINY))
438 err = mmc_switch_part(mmc, block_dev->hwpart);
440 err = blk_dselect_hwpart(block_dev, block_dev->hwpart);
445 if ((start + blkcnt) > block_dev->lba) {
446 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
447 pr_err("MMC: block number 0x" LBAF " exceeds max(0x" LBAF ")\n",
448 start + blkcnt, block_dev->lba);
453 if (mmc_set_blocklen(mmc, mmc->read_bl_len)) {
454 pr_debug("%s: Failed to set blocklen\n", __func__);
459 cur = (blocks_todo > mmc->cfg->b_max) ?
460 mmc->cfg->b_max : blocks_todo;
461 if (mmc_read_blocks(mmc, dst, start, cur) != cur) {
462 pr_debug("%s: Failed to read blocks\n", __func__);
467 dst += cur * mmc->read_bl_len;
468 } while (blocks_todo > 0);
473 static int mmc_go_idle(struct mmc *mmc)
480 cmd.cmdidx = MMC_CMD_GO_IDLE_STATE;
482 cmd.resp_type = MMC_RSP_NONE;
484 err = mmc_send_cmd(mmc, &cmd, NULL);
494 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
495 static int mmc_switch_voltage(struct mmc *mmc, int signal_voltage)
501 * Send CMD11 only if the request is to switch the card to
504 if (signal_voltage == MMC_SIGNAL_VOLTAGE_330)
505 return mmc_set_signal_voltage(mmc, signal_voltage);
507 cmd.cmdidx = SD_CMD_SWITCH_UHS18V;
509 cmd.resp_type = MMC_RSP_R1;
511 err = mmc_send_cmd(mmc, &cmd, NULL);
515 if (!mmc_host_is_spi(mmc) && (cmd.response[0] & MMC_STATUS_ERROR))
519 * The card should drive cmd and dat[0:3] low immediately
520 * after the response of cmd11, but wait 100 us to be sure
522 err = mmc_wait_dat0(mmc, 0, 100);
529 * During a signal voltage level switch, the clock must be gated
530 * for 5 ms according to the SD spec
532 mmc_set_clock(mmc, mmc->clock, MMC_CLK_DISABLE);
534 err = mmc_set_signal_voltage(mmc, signal_voltage);
538 /* Keep clock gated for at least 10 ms, though spec only says 5 ms */
540 mmc_set_clock(mmc, mmc->clock, MMC_CLK_ENABLE);
543 * Failure to switch is indicated by the card holding
544 * dat[0:3] low. Wait for at least 1 ms according to spec
546 err = mmc_wait_dat0(mmc, 1, 1000);
556 static int sd_send_op_cond(struct mmc *mmc, bool uhs_en)
563 cmd.cmdidx = MMC_CMD_APP_CMD;
564 cmd.resp_type = MMC_RSP_R1;
567 err = mmc_send_cmd(mmc, &cmd, NULL);
572 cmd.cmdidx = SD_CMD_APP_SEND_OP_COND;
573 cmd.resp_type = MMC_RSP_R3;
576 * Most cards do not answer if some reserved bits
577 * in the ocr are set. However, Some controller
578 * can set bit 7 (reserved for low voltages), but
579 * how to manage low voltages SD card is not yet
582 cmd.cmdarg = mmc_host_is_spi(mmc) ? 0 :
583 (mmc->cfg->voltages & 0xff8000);
585 if (mmc->version == SD_VERSION_2)
586 cmd.cmdarg |= OCR_HCS;
589 cmd.cmdarg |= OCR_S18R;
591 err = mmc_send_cmd(mmc, &cmd, NULL);
596 if (cmd.response[0] & OCR_BUSY)
605 if (mmc->version != SD_VERSION_2)
606 mmc->version = SD_VERSION_1_0;
608 if (mmc_host_is_spi(mmc)) { /* read OCR for spi */
609 cmd.cmdidx = MMC_CMD_SPI_READ_OCR;
610 cmd.resp_type = MMC_RSP_R3;
613 err = mmc_send_cmd(mmc, &cmd, NULL);
619 mmc->ocr = cmd.response[0];
621 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
622 if (uhs_en && !(mmc_host_is_spi(mmc)) && (cmd.response[0] & 0x41000000)
624 err = mmc_switch_voltage(mmc, MMC_SIGNAL_VOLTAGE_180);
630 mmc->high_capacity = ((mmc->ocr & OCR_HCS) == OCR_HCS);
636 static int mmc_send_op_cond_iter(struct mmc *mmc, int use_arg)
641 cmd.cmdidx = MMC_CMD_SEND_OP_COND;
642 cmd.resp_type = MMC_RSP_R3;
644 if (use_arg && !mmc_host_is_spi(mmc))
645 cmd.cmdarg = OCR_HCS |
646 (mmc->cfg->voltages &
647 (mmc->ocr & OCR_VOLTAGE_MASK)) |
648 (mmc->ocr & OCR_ACCESS_MODE);
650 err = mmc_send_cmd(mmc, &cmd, NULL);
653 mmc->ocr = cmd.response[0];
657 static int mmc_send_op_cond(struct mmc *mmc)
661 /* Some cards seem to need this */
664 /* Asking to the card its capabilities */
665 for (i = 0; i < 2; i++) {
666 err = mmc_send_op_cond_iter(mmc, i != 0);
670 /* exit if not busy (flag seems to be inverted) */
671 if (mmc->ocr & OCR_BUSY)
674 mmc->op_cond_pending = 1;
678 static int mmc_complete_op_cond(struct mmc *mmc)
685 mmc->op_cond_pending = 0;
686 if (!(mmc->ocr & OCR_BUSY)) {
687 /* Some cards seem to need this */
690 start = get_timer(0);
692 err = mmc_send_op_cond_iter(mmc, 1);
695 if (mmc->ocr & OCR_BUSY)
697 if (get_timer(start) > timeout)
703 if (mmc_host_is_spi(mmc)) { /* read OCR for spi */
704 cmd.cmdidx = MMC_CMD_SPI_READ_OCR;
705 cmd.resp_type = MMC_RSP_R3;
708 err = mmc_send_cmd(mmc, &cmd, NULL);
713 mmc->ocr = cmd.response[0];
716 mmc->version = MMC_VERSION_UNKNOWN;
718 mmc->high_capacity = ((mmc->ocr & OCR_HCS) == OCR_HCS);
725 static int mmc_send_ext_csd(struct mmc *mmc, u8 *ext_csd)
728 struct mmc_data data;
731 /* Get the Card Status Register */
732 cmd.cmdidx = MMC_CMD_SEND_EXT_CSD;
733 cmd.resp_type = MMC_RSP_R1;
736 data.dest = (char *)ext_csd;
738 data.blocksize = MMC_MAX_BLOCK_LEN;
739 data.flags = MMC_DATA_READ;
741 err = mmc_send_cmd(mmc, &cmd, &data);
746 static int __mmc_switch(struct mmc *mmc, u8 set, u8 index, u8 value,
749 unsigned int status, start;
751 int timeout = DEFAULT_CMD6_TIMEOUT_MS;
752 bool is_part_switch = (set == EXT_CSD_CMD_SET_NORMAL) &&
753 (index == EXT_CSD_PART_CONF);
757 if (mmc->gen_cmd6_time)
758 timeout = mmc->gen_cmd6_time * 10;
760 if (is_part_switch && mmc->part_switch_time)
761 timeout = mmc->part_switch_time * 10;
763 cmd.cmdidx = MMC_CMD_SWITCH;
764 cmd.resp_type = MMC_RSP_R1b;
765 cmd.cmdarg = (MMC_SWITCH_MODE_WRITE_BYTE << 24) |
770 ret = mmc_send_cmd(mmc, &cmd, NULL);
771 } while (ret && retries-- > 0);
776 start = get_timer(0);
778 /* poll dat0 for rdy/buys status */
779 ret = mmc_wait_dat0(mmc, 1, timeout);
780 if (ret && ret != -ENOSYS)
784 * In cases when not allowed to poll by using CMD13 or because we aren't
785 * capable of polling by using mmc_wait_dat0, then rely on waiting the
786 * stated timeout to be sufficient.
788 if (ret == -ENOSYS && !send_status)
791 /* Finally wait until the card is ready or indicates a failure
792 * to switch. It doesn't hurt to use CMD13 here even if send_status
793 * is false, because by now (after 'timeout' ms) the bus should be
797 ret = mmc_send_status(mmc, &status);
799 if (!ret && (status & MMC_STATUS_SWITCH_ERROR)) {
800 pr_debug("switch failed %d/%d/0x%x !\n", set, index,
804 if (!ret && (status & MMC_STATUS_RDY_FOR_DATA))
807 } while (get_timer(start) < timeout);
812 int mmc_switch(struct mmc *mmc, u8 set, u8 index, u8 value)
814 return __mmc_switch(mmc, set, index, value, true);
817 #if !CONFIG_IS_ENABLED(MMC_TINY)
818 static int mmc_set_card_speed(struct mmc *mmc, enum bus_mode mode,
824 ALLOC_CACHE_ALIGN_BUFFER(u8, test_csd, MMC_MAX_BLOCK_LEN);
830 speed_bits = EXT_CSD_TIMING_HS;
832 #if CONFIG_IS_ENABLED(MMC_HS200_SUPPORT)
834 speed_bits = EXT_CSD_TIMING_HS200;
837 #if CONFIG_IS_ENABLED(MMC_HS400_SUPPORT)
839 speed_bits = EXT_CSD_TIMING_HS400;
843 speed_bits = EXT_CSD_TIMING_LEGACY;
849 err = __mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_HS_TIMING,
850 speed_bits, !hsdowngrade);
854 #if CONFIG_IS_ENABLED(MMC_HS200_SUPPORT) || \
855 CONFIG_IS_ENABLED(MMC_HS400_SUPPORT)
857 * In case the eMMC is in HS200/HS400 mode and we are downgrading
858 * to HS mode, the card clock are still running much faster than
859 * the supported HS mode clock, so we can not reliably read out
860 * Extended CSD. Reconfigure the controller to run at HS mode.
863 mmc_select_mode(mmc, MMC_HS);
864 mmc_set_clock(mmc, mmc_mode2freq(mmc, MMC_HS), false);
868 if ((mode == MMC_HS) || (mode == MMC_HS_52)) {
869 /* Now check to see that it worked */
870 err = mmc_send_ext_csd(mmc, test_csd);
874 /* No high-speed support */
875 if (!test_csd[EXT_CSD_HS_TIMING])
882 static int mmc_get_capabilities(struct mmc *mmc)
884 u8 *ext_csd = mmc->ext_csd;
887 mmc->card_caps = MMC_MODE_1BIT | MMC_CAP(MMC_LEGACY);
889 if (mmc_host_is_spi(mmc))
892 /* Only version 4 supports high-speed */
893 if (mmc->version < MMC_VERSION_4)
897 pr_err("No ext_csd found!\n"); /* this should enver happen */
901 mmc->card_caps |= MMC_MODE_4BIT | MMC_MODE_8BIT;
903 cardtype = ext_csd[EXT_CSD_CARD_TYPE];
904 mmc->cardtype = cardtype;
906 #if CONFIG_IS_ENABLED(MMC_HS200_SUPPORT)
907 if (cardtype & (EXT_CSD_CARD_TYPE_HS200_1_2V |
908 EXT_CSD_CARD_TYPE_HS200_1_8V)) {
909 mmc->card_caps |= MMC_MODE_HS200;
912 #if CONFIG_IS_ENABLED(MMC_HS400_SUPPORT)
913 if (cardtype & (EXT_CSD_CARD_TYPE_HS400_1_2V |
914 EXT_CSD_CARD_TYPE_HS400_1_8V)) {
915 mmc->card_caps |= MMC_MODE_HS400;
918 if (cardtype & EXT_CSD_CARD_TYPE_52) {
919 if (cardtype & EXT_CSD_CARD_TYPE_DDR_52)
920 mmc->card_caps |= MMC_MODE_DDR_52MHz;
921 mmc->card_caps |= MMC_MODE_HS_52MHz;
923 if (cardtype & EXT_CSD_CARD_TYPE_26)
924 mmc->card_caps |= MMC_MODE_HS;
930 static int mmc_set_capacity(struct mmc *mmc, int part_num)
934 mmc->capacity = mmc->capacity_user;
938 mmc->capacity = mmc->capacity_boot;
941 mmc->capacity = mmc->capacity_rpmb;
947 mmc->capacity = mmc->capacity_gp[part_num - 4];
953 mmc_get_blk_desc(mmc)->lba = lldiv(mmc->capacity, mmc->read_bl_len);
958 #if CONFIG_IS_ENABLED(MMC_HS200_SUPPORT) || CONFIG_IS_ENABLED(MMC_HS400_SUPPORT)
959 static int mmc_boot_part_access_chk(struct mmc *mmc, unsigned int part_num)
964 if (part_num & PART_ACCESS_MASK)
965 forbidden = MMC_CAP(MMC_HS_200) | MMC_CAP(MMC_HS_400);
967 if (MMC_CAP(mmc->selected_mode) & forbidden) {
968 pr_debug("selected mode (%s) is forbidden for part %d\n",
969 mmc_mode_name(mmc->selected_mode), part_num);
971 } else if (mmc->selected_mode != mmc->best_mode) {
972 pr_debug("selected mode is not optimal\n");
977 return mmc_select_mode_and_width(mmc,
978 mmc->card_caps & ~forbidden);
983 static inline int mmc_boot_part_access_chk(struct mmc *mmc,
984 unsigned int part_num)
990 int mmc_switch_part(struct mmc *mmc, unsigned int part_num)
994 ret = mmc_boot_part_access_chk(mmc, part_num);
998 ret = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_PART_CONF,
999 (mmc->part_config & ~PART_ACCESS_MASK)
1000 | (part_num & PART_ACCESS_MASK));
1003 * Set the capacity if the switch succeeded or was intended
1004 * to return to representing the raw device.
1006 if ((ret == 0) || ((ret == -ENODEV) && (part_num == 0))) {
1007 ret = mmc_set_capacity(mmc, part_num);
1008 mmc_get_blk_desc(mmc)->hwpart = part_num;
1014 #if CONFIG_IS_ENABLED(MMC_HW_PARTITIONING)
1015 int mmc_hwpart_config(struct mmc *mmc,
1016 const struct mmc_hwpart_conf *conf,
1017 enum mmc_hwpart_conf_mode mode)
1022 u32 gp_size_mult[4];
1023 u32 max_enh_size_mult;
1024 u32 tot_enh_size_mult = 0;
1027 ALLOC_CACHE_ALIGN_BUFFER(u8, ext_csd, MMC_MAX_BLOCK_LEN);
1029 if (mode < MMC_HWPART_CONF_CHECK || mode > MMC_HWPART_CONF_COMPLETE)
1032 if (IS_SD(mmc) || (mmc->version < MMC_VERSION_4_41)) {
1033 pr_err("eMMC >= 4.4 required for enhanced user data area\n");
1034 return -EMEDIUMTYPE;
1037 if (!(mmc->part_support & PART_SUPPORT)) {
1038 pr_err("Card does not support partitioning\n");
1039 return -EMEDIUMTYPE;
1042 if (!mmc->hc_wp_grp_size) {
1043 pr_err("Card does not define HC WP group size\n");
1044 return -EMEDIUMTYPE;
1047 /* check partition alignment and total enhanced size */
1048 if (conf->user.enh_size) {
1049 if (conf->user.enh_size % mmc->hc_wp_grp_size ||
1050 conf->user.enh_start % mmc->hc_wp_grp_size) {
1051 pr_err("User data enhanced area not HC WP group "
1055 part_attrs |= EXT_CSD_ENH_USR;
1056 enh_size_mult = conf->user.enh_size / mmc->hc_wp_grp_size;
1057 if (mmc->high_capacity) {
1058 enh_start_addr = conf->user.enh_start;
1060 enh_start_addr = (conf->user.enh_start << 9);
1066 tot_enh_size_mult += enh_size_mult;
1068 for (pidx = 0; pidx < 4; pidx++) {
1069 if (conf->gp_part[pidx].size % mmc->hc_wp_grp_size) {
1070 pr_err("GP%i partition not HC WP group size "
1071 "aligned\n", pidx+1);
1074 gp_size_mult[pidx] = conf->gp_part[pidx].size / mmc->hc_wp_grp_size;
1075 if (conf->gp_part[pidx].size && conf->gp_part[pidx].enhanced) {
1076 part_attrs |= EXT_CSD_ENH_GP(pidx);
1077 tot_enh_size_mult += gp_size_mult[pidx];
1081 if (part_attrs && ! (mmc->part_support & ENHNCD_SUPPORT)) {
1082 pr_err("Card does not support enhanced attribute\n");
1083 return -EMEDIUMTYPE;
1086 err = mmc_send_ext_csd(mmc, ext_csd);
1091 (ext_csd[EXT_CSD_MAX_ENH_SIZE_MULT+2] << 16) +
1092 (ext_csd[EXT_CSD_MAX_ENH_SIZE_MULT+1] << 8) +
1093 ext_csd[EXT_CSD_MAX_ENH_SIZE_MULT];
1094 if (tot_enh_size_mult > max_enh_size_mult) {
1095 pr_err("Total enhanced size exceeds maximum (%u > %u)\n",
1096 tot_enh_size_mult, max_enh_size_mult);
1097 return -EMEDIUMTYPE;
1100 /* The default value of EXT_CSD_WR_REL_SET is device
1101 * dependent, the values can only be changed if the
1102 * EXT_CSD_HS_CTRL_REL bit is set. The values can be
1103 * changed only once and before partitioning is completed. */
1104 wr_rel_set = ext_csd[EXT_CSD_WR_REL_SET];
1105 if (conf->user.wr_rel_change) {
1106 if (conf->user.wr_rel_set)
1107 wr_rel_set |= EXT_CSD_WR_DATA_REL_USR;
1109 wr_rel_set &= ~EXT_CSD_WR_DATA_REL_USR;
1111 for (pidx = 0; pidx < 4; pidx++) {
1112 if (conf->gp_part[pidx].wr_rel_change) {
1113 if (conf->gp_part[pidx].wr_rel_set)
1114 wr_rel_set |= EXT_CSD_WR_DATA_REL_GP(pidx);
1116 wr_rel_set &= ~EXT_CSD_WR_DATA_REL_GP(pidx);
1120 if (wr_rel_set != ext_csd[EXT_CSD_WR_REL_SET] &&
1121 !(ext_csd[EXT_CSD_WR_REL_PARAM] & EXT_CSD_HS_CTRL_REL)) {
1122 puts("Card does not support host controlled partition write "
1123 "reliability settings\n");
1124 return -EMEDIUMTYPE;
1127 if (ext_csd[EXT_CSD_PARTITION_SETTING] &
1128 EXT_CSD_PARTITION_SETTING_COMPLETED) {
1129 pr_err("Card already partitioned\n");
1133 if (mode == MMC_HWPART_CONF_CHECK)
1136 /* Partitioning requires high-capacity size definitions */
1137 if (!(ext_csd[EXT_CSD_ERASE_GROUP_DEF] & 0x01)) {
1138 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1139 EXT_CSD_ERASE_GROUP_DEF, 1);
1144 ext_csd[EXT_CSD_ERASE_GROUP_DEF] = 1;
1146 /* update erase group size to be high-capacity */
1147 mmc->erase_grp_size =
1148 ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE] * 1024;
1152 /* all OK, write the configuration */
1153 for (i = 0; i < 4; i++) {
1154 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1155 EXT_CSD_ENH_START_ADDR+i,
1156 (enh_start_addr >> (i*8)) & 0xFF);
1160 for (i = 0; i < 3; i++) {
1161 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1162 EXT_CSD_ENH_SIZE_MULT+i,
1163 (enh_size_mult >> (i*8)) & 0xFF);
1167 for (pidx = 0; pidx < 4; pidx++) {
1168 for (i = 0; i < 3; i++) {
1169 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1170 EXT_CSD_GP_SIZE_MULT+pidx*3+i,
1171 (gp_size_mult[pidx] >> (i*8)) & 0xFF);
1176 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1177 EXT_CSD_PARTITIONS_ATTRIBUTE, part_attrs);
1181 if (mode == MMC_HWPART_CONF_SET)
1184 /* The WR_REL_SET is a write-once register but shall be
1185 * written before setting PART_SETTING_COMPLETED. As it is
1186 * write-once we can only write it when completing the
1188 if (wr_rel_set != ext_csd[EXT_CSD_WR_REL_SET]) {
1189 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1190 EXT_CSD_WR_REL_SET, wr_rel_set);
1195 /* Setting PART_SETTING_COMPLETED confirms the partition
1196 * configuration but it only becomes effective after power
1197 * cycle, so we do not adjust the partition related settings
1198 * in the mmc struct. */
1200 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1201 EXT_CSD_PARTITION_SETTING,
1202 EXT_CSD_PARTITION_SETTING_COMPLETED);
1210 #if !CONFIG_IS_ENABLED(DM_MMC)
1211 int mmc_getcd(struct mmc *mmc)
1215 cd = board_mmc_getcd(mmc);
1218 if (mmc->cfg->ops->getcd)
1219 cd = mmc->cfg->ops->getcd(mmc);
1228 #if !CONFIG_IS_ENABLED(MMC_TINY)
1229 static int sd_switch(struct mmc *mmc, int mode, int group, u8 value, u8 *resp)
1232 struct mmc_data data;
1234 /* Switch the frequency */
1235 cmd.cmdidx = SD_CMD_SWITCH_FUNC;
1236 cmd.resp_type = MMC_RSP_R1;
1237 cmd.cmdarg = (mode << 31) | 0xffffff;
1238 cmd.cmdarg &= ~(0xf << (group * 4));
1239 cmd.cmdarg |= value << (group * 4);
1241 data.dest = (char *)resp;
1242 data.blocksize = 64;
1244 data.flags = MMC_DATA_READ;
1246 return mmc_send_cmd(mmc, &cmd, &data);
1249 static int sd_get_capabilities(struct mmc *mmc)
1253 ALLOC_CACHE_ALIGN_BUFFER(__be32, scr, 2);
1254 ALLOC_CACHE_ALIGN_BUFFER(__be32, switch_status, 16);
1255 struct mmc_data data;
1257 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
1261 mmc->card_caps = MMC_MODE_1BIT | MMC_CAP(SD_LEGACY);
1263 if (mmc_host_is_spi(mmc))
1266 /* Read the SCR to find out if this card supports higher speeds */
1267 cmd.cmdidx = MMC_CMD_APP_CMD;
1268 cmd.resp_type = MMC_RSP_R1;
1269 cmd.cmdarg = mmc->rca << 16;
1271 err = mmc_send_cmd(mmc, &cmd, NULL);
1276 cmd.cmdidx = SD_CMD_APP_SEND_SCR;
1277 cmd.resp_type = MMC_RSP_R1;
1283 data.dest = (char *)scr;
1286 data.flags = MMC_DATA_READ;
1288 err = mmc_send_cmd(mmc, &cmd, &data);
1297 mmc->scr[0] = __be32_to_cpu(scr[0]);
1298 mmc->scr[1] = __be32_to_cpu(scr[1]);
1300 switch ((mmc->scr[0] >> 24) & 0xf) {
1302 mmc->version = SD_VERSION_1_0;
1305 mmc->version = SD_VERSION_1_10;
1308 mmc->version = SD_VERSION_2;
1309 if ((mmc->scr[0] >> 15) & 0x1)
1310 mmc->version = SD_VERSION_3;
1313 mmc->version = SD_VERSION_1_0;
1317 if (mmc->scr[0] & SD_DATA_4BIT)
1318 mmc->card_caps |= MMC_MODE_4BIT;
1320 /* Version 1.0 doesn't support switching */
1321 if (mmc->version == SD_VERSION_1_0)
1326 err = sd_switch(mmc, SD_SWITCH_CHECK, 0, 1,
1327 (u8 *)switch_status);
1332 /* The high-speed function is busy. Try again */
1333 if (!(__be32_to_cpu(switch_status[7]) & SD_HIGHSPEED_BUSY))
1337 /* If high-speed isn't supported, we return */
1338 if (__be32_to_cpu(switch_status[3]) & SD_HIGHSPEED_SUPPORTED)
1339 mmc->card_caps |= MMC_CAP(SD_HS);
1341 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
1342 /* Version before 3.0 don't support UHS modes */
1343 if (mmc->version < SD_VERSION_3)
1346 sd3_bus_mode = __be32_to_cpu(switch_status[3]) >> 16 & 0x1f;
1347 if (sd3_bus_mode & SD_MODE_UHS_SDR104)
1348 mmc->card_caps |= MMC_CAP(UHS_SDR104);
1349 if (sd3_bus_mode & SD_MODE_UHS_SDR50)
1350 mmc->card_caps |= MMC_CAP(UHS_SDR50);
1351 if (sd3_bus_mode & SD_MODE_UHS_SDR25)
1352 mmc->card_caps |= MMC_CAP(UHS_SDR25);
1353 if (sd3_bus_mode & SD_MODE_UHS_SDR12)
1354 mmc->card_caps |= MMC_CAP(UHS_SDR12);
1355 if (sd3_bus_mode & SD_MODE_UHS_DDR50)
1356 mmc->card_caps |= MMC_CAP(UHS_DDR50);
1362 static int sd_set_card_speed(struct mmc *mmc, enum bus_mode mode)
1366 ALLOC_CACHE_ALIGN_BUFFER(uint, switch_status, 16);
1369 /* SD version 1.00 and 1.01 does not support CMD 6 */
1370 if (mmc->version == SD_VERSION_1_0)
1375 speed = UHS_SDR12_BUS_SPEED;
1378 speed = HIGH_SPEED_BUS_SPEED;
1380 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
1382 speed = UHS_SDR12_BUS_SPEED;
1385 speed = UHS_SDR25_BUS_SPEED;
1388 speed = UHS_SDR50_BUS_SPEED;
1391 speed = UHS_DDR50_BUS_SPEED;
1394 speed = UHS_SDR104_BUS_SPEED;
1401 err = sd_switch(mmc, SD_SWITCH_SWITCH, 0, speed, (u8 *)switch_status);
1405 if (((__be32_to_cpu(switch_status[4]) >> 24) & 0xF) != speed)
1411 static int sd_select_bus_width(struct mmc *mmc, int w)
1416 if ((w != 4) && (w != 1))
1419 cmd.cmdidx = MMC_CMD_APP_CMD;
1420 cmd.resp_type = MMC_RSP_R1;
1421 cmd.cmdarg = mmc->rca << 16;
1423 err = mmc_send_cmd(mmc, &cmd, NULL);
1427 cmd.cmdidx = SD_CMD_APP_SET_BUS_WIDTH;
1428 cmd.resp_type = MMC_RSP_R1;
1433 err = mmc_send_cmd(mmc, &cmd, NULL);
1441 #if CONFIG_IS_ENABLED(MMC_WRITE)
1442 static int sd_read_ssr(struct mmc *mmc)
1444 static const unsigned int sd_au_size[] = {
1445 0, SZ_16K / 512, SZ_32K / 512,
1446 SZ_64K / 512, SZ_128K / 512, SZ_256K / 512,
1447 SZ_512K / 512, SZ_1M / 512, SZ_2M / 512,
1448 SZ_4M / 512, SZ_8M / 512, (SZ_8M + SZ_4M) / 512,
1449 SZ_16M / 512, (SZ_16M + SZ_8M) / 512, SZ_32M / 512,
1454 ALLOC_CACHE_ALIGN_BUFFER(uint, ssr, 16);
1455 struct mmc_data data;
1457 unsigned int au, eo, et, es;
1459 cmd.cmdidx = MMC_CMD_APP_CMD;
1460 cmd.resp_type = MMC_RSP_R1;
1461 cmd.cmdarg = mmc->rca << 16;
1463 err = mmc_send_cmd(mmc, &cmd, NULL);
1467 cmd.cmdidx = SD_CMD_APP_SD_STATUS;
1468 cmd.resp_type = MMC_RSP_R1;
1472 data.dest = (char *)ssr;
1473 data.blocksize = 64;
1475 data.flags = MMC_DATA_READ;
1477 err = mmc_send_cmd(mmc, &cmd, &data);
1485 for (i = 0; i < 16; i++)
1486 ssr[i] = be32_to_cpu(ssr[i]);
1488 au = (ssr[2] >> 12) & 0xF;
1489 if ((au <= 9) || (mmc->version == SD_VERSION_3)) {
1490 mmc->ssr.au = sd_au_size[au];
1491 es = (ssr[3] >> 24) & 0xFF;
1492 es |= (ssr[2] & 0xFF) << 8;
1493 et = (ssr[3] >> 18) & 0x3F;
1495 eo = (ssr[3] >> 16) & 0x3;
1496 mmc->ssr.erase_timeout = (et * 1000) / es;
1497 mmc->ssr.erase_offset = eo * 1000;
1500 pr_debug("Invalid Allocation Unit Size.\n");
1506 /* frequency bases */
1507 /* divided by 10 to be nice to platforms without floating point */
1508 static const int fbase[] = {
1515 /* Multiplier values for TRAN_SPEED. Multiplied by 10 to be nice
1516 * to platforms without floating point.
1518 static const u8 multipliers[] = {
1537 static inline int bus_width(uint cap)
1539 if (cap == MMC_MODE_8BIT)
1541 if (cap == MMC_MODE_4BIT)
1543 if (cap == MMC_MODE_1BIT)
1545 pr_warn("invalid bus witdh capability 0x%x\n", cap);
1549 #if !CONFIG_IS_ENABLED(DM_MMC)
1550 #ifdef MMC_SUPPORTS_TUNING
1551 static int mmc_execute_tuning(struct mmc *mmc, uint opcode)
1557 static int mmc_set_ios(struct mmc *mmc)
1561 if (mmc->cfg->ops->set_ios)
1562 ret = mmc->cfg->ops->set_ios(mmc);
1568 int mmc_set_clock(struct mmc *mmc, uint clock, bool disable)
1571 if (clock > mmc->cfg->f_max)
1572 clock = mmc->cfg->f_max;
1574 if (clock < mmc->cfg->f_min)
1575 clock = mmc->cfg->f_min;
1579 mmc->clk_disable = disable;
1581 debug("clock is %s (%dHz)\n", disable ? "disabled" : "enabled", clock);
1583 return mmc_set_ios(mmc);
1586 static int mmc_set_bus_width(struct mmc *mmc, uint width)
1588 mmc->bus_width = width;
1590 return mmc_set_ios(mmc);
1593 #if CONFIG_IS_ENABLED(MMC_VERBOSE) || defined(DEBUG)
1595 * helper function to display the capabilities in a human
1596 * friendly manner. The capabilities include bus width and
1599 void mmc_dump_capabilities(const char *text, uint caps)
1603 pr_debug("%s: widths [", text);
1604 if (caps & MMC_MODE_8BIT)
1606 if (caps & MMC_MODE_4BIT)
1608 if (caps & MMC_MODE_1BIT)
1610 pr_debug("\b\b] modes [");
1611 for (mode = MMC_LEGACY; mode < MMC_MODES_END; mode++)
1612 if (MMC_CAP(mode) & caps)
1613 pr_debug("%s, ", mmc_mode_name(mode));
1614 pr_debug("\b\b]\n");
1618 struct mode_width_tuning {
1621 #ifdef MMC_SUPPORTS_TUNING
1626 #if CONFIG_IS_ENABLED(MMC_IO_VOLTAGE)
1627 int mmc_voltage_to_mv(enum mmc_voltage voltage)
1630 case MMC_SIGNAL_VOLTAGE_000: return 0;
1631 case MMC_SIGNAL_VOLTAGE_330: return 3300;
1632 case MMC_SIGNAL_VOLTAGE_180: return 1800;
1633 case MMC_SIGNAL_VOLTAGE_120: return 1200;
1638 static int mmc_set_signal_voltage(struct mmc *mmc, uint signal_voltage)
1642 if (mmc->signal_voltage == signal_voltage)
1645 mmc->signal_voltage = signal_voltage;
1646 err = mmc_set_ios(mmc);
1648 pr_debug("unable to set voltage (err %d)\n", err);
1653 static inline int mmc_set_signal_voltage(struct mmc *mmc, uint signal_voltage)
1659 #if !CONFIG_IS_ENABLED(MMC_TINY)
1660 static const struct mode_width_tuning sd_modes_by_pref[] = {
1661 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
1662 #ifdef MMC_SUPPORTS_TUNING
1665 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1666 .tuning = MMC_CMD_SEND_TUNING_BLOCK
1671 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1675 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1679 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1684 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1686 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
1689 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1694 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1698 #define for_each_sd_mode_by_pref(caps, mwt) \
1699 for (mwt = sd_modes_by_pref;\
1700 mwt < sd_modes_by_pref + ARRAY_SIZE(sd_modes_by_pref);\
1702 if (caps & MMC_CAP(mwt->mode))
1704 static int sd_select_mode_and_width(struct mmc *mmc, uint card_caps)
1707 uint widths[] = {MMC_MODE_4BIT, MMC_MODE_1BIT};
1708 const struct mode_width_tuning *mwt;
1709 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
1710 bool uhs_en = (mmc->ocr & OCR_S18R) ? true : false;
1712 bool uhs_en = false;
1717 mmc_dump_capabilities("sd card", card_caps);
1718 mmc_dump_capabilities("host", mmc->host_caps);
1721 /* Restrict card's capabilities by what the host can do */
1722 caps = card_caps & mmc->host_caps;
1727 for_each_sd_mode_by_pref(caps, mwt) {
1730 for (w = widths; w < widths + ARRAY_SIZE(widths); w++) {
1731 if (*w & caps & mwt->widths) {
1732 pr_debug("trying mode %s width %d (at %d MHz)\n",
1733 mmc_mode_name(mwt->mode),
1735 mmc_mode2freq(mmc, mwt->mode) / 1000000);
1737 /* configure the bus width (card + host) */
1738 err = sd_select_bus_width(mmc, bus_width(*w));
1741 mmc_set_bus_width(mmc, bus_width(*w));
1743 /* configure the bus mode (card) */
1744 err = sd_set_card_speed(mmc, mwt->mode);
1748 /* configure the bus mode (host) */
1749 mmc_select_mode(mmc, mwt->mode);
1750 mmc_set_clock(mmc, mmc->tran_speed,
1753 #ifdef MMC_SUPPORTS_TUNING
1754 /* execute tuning if needed */
1755 if (mwt->tuning && !mmc_host_is_spi(mmc)) {
1756 err = mmc_execute_tuning(mmc,
1759 pr_debug("tuning failed\n");
1765 #if CONFIG_IS_ENABLED(MMC_WRITE)
1766 err = sd_read_ssr(mmc);
1768 pr_warn("unable to read ssr\n");
1774 /* revert to a safer bus speed */
1775 mmc_select_mode(mmc, SD_LEGACY);
1776 mmc_set_clock(mmc, mmc->tran_speed,
1782 pr_err("unable to select a mode\n");
1787 * read the compare the part of ext csd that is constant.
1788 * This can be used to check that the transfer is working
1791 static int mmc_read_and_compare_ext_csd(struct mmc *mmc)
1794 const u8 *ext_csd = mmc->ext_csd;
1795 ALLOC_CACHE_ALIGN_BUFFER(u8, test_csd, MMC_MAX_BLOCK_LEN);
1797 if (mmc->version < MMC_VERSION_4)
1800 err = mmc_send_ext_csd(mmc, test_csd);
1804 /* Only compare read only fields */
1805 if (ext_csd[EXT_CSD_PARTITIONING_SUPPORT]
1806 == test_csd[EXT_CSD_PARTITIONING_SUPPORT] &&
1807 ext_csd[EXT_CSD_HC_WP_GRP_SIZE]
1808 == test_csd[EXT_CSD_HC_WP_GRP_SIZE] &&
1809 ext_csd[EXT_CSD_REV]
1810 == test_csd[EXT_CSD_REV] &&
1811 ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE]
1812 == test_csd[EXT_CSD_HC_ERASE_GRP_SIZE] &&
1813 memcmp(&ext_csd[EXT_CSD_SEC_CNT],
1814 &test_csd[EXT_CSD_SEC_CNT], 4) == 0)
1820 #if CONFIG_IS_ENABLED(MMC_IO_VOLTAGE)
1821 static int mmc_set_lowest_voltage(struct mmc *mmc, enum bus_mode mode,
1822 uint32_t allowed_mask)
1829 if (mmc->cardtype & (EXT_CSD_CARD_TYPE_HS200_1_8V |
1830 EXT_CSD_CARD_TYPE_HS400_1_8V))
1831 card_mask |= MMC_SIGNAL_VOLTAGE_180;
1832 if (mmc->cardtype & (EXT_CSD_CARD_TYPE_HS200_1_2V |
1833 EXT_CSD_CARD_TYPE_HS400_1_2V))
1834 card_mask |= MMC_SIGNAL_VOLTAGE_120;
1837 if (mmc->cardtype & EXT_CSD_CARD_TYPE_DDR_1_8V)
1838 card_mask |= MMC_SIGNAL_VOLTAGE_330 |
1839 MMC_SIGNAL_VOLTAGE_180;
1840 if (mmc->cardtype & EXT_CSD_CARD_TYPE_DDR_1_2V)
1841 card_mask |= MMC_SIGNAL_VOLTAGE_120;
1844 card_mask |= MMC_SIGNAL_VOLTAGE_330;
1848 while (card_mask & allowed_mask) {
1849 enum mmc_voltage best_match;
1851 best_match = 1 << (ffs(card_mask & allowed_mask) - 1);
1852 if (!mmc_set_signal_voltage(mmc, best_match))
1855 allowed_mask &= ~best_match;
1861 static inline int mmc_set_lowest_voltage(struct mmc *mmc, enum bus_mode mode,
1862 uint32_t allowed_mask)
1868 static const struct mode_width_tuning mmc_modes_by_pref[] = {
1869 #if CONFIG_IS_ENABLED(MMC_HS400_SUPPORT)
1872 .widths = MMC_MODE_8BIT,
1873 .tuning = MMC_CMD_SEND_TUNING_BLOCK_HS200
1876 #if CONFIG_IS_ENABLED(MMC_HS200_SUPPORT)
1879 .widths = MMC_MODE_8BIT | MMC_MODE_4BIT,
1880 .tuning = MMC_CMD_SEND_TUNING_BLOCK_HS200
1885 .widths = MMC_MODE_8BIT | MMC_MODE_4BIT,
1889 .widths = MMC_MODE_8BIT | MMC_MODE_4BIT | MMC_MODE_1BIT,
1893 .widths = MMC_MODE_8BIT | MMC_MODE_4BIT | MMC_MODE_1BIT,
1897 .widths = MMC_MODE_8BIT | MMC_MODE_4BIT | MMC_MODE_1BIT,
1901 #define for_each_mmc_mode_by_pref(caps, mwt) \
1902 for (mwt = mmc_modes_by_pref;\
1903 mwt < mmc_modes_by_pref + ARRAY_SIZE(mmc_modes_by_pref);\
1905 if (caps & MMC_CAP(mwt->mode))
1907 static const struct ext_csd_bus_width {
1911 } ext_csd_bus_width[] = {
1912 {MMC_MODE_8BIT, true, EXT_CSD_DDR_BUS_WIDTH_8},
1913 {MMC_MODE_4BIT, true, EXT_CSD_DDR_BUS_WIDTH_4},
1914 {MMC_MODE_8BIT, false, EXT_CSD_BUS_WIDTH_8},
1915 {MMC_MODE_4BIT, false, EXT_CSD_BUS_WIDTH_4},
1916 {MMC_MODE_1BIT, false, EXT_CSD_BUS_WIDTH_1},
1919 #if CONFIG_IS_ENABLED(MMC_HS400_SUPPORT)
1920 static int mmc_select_hs400(struct mmc *mmc)
1924 /* Set timing to HS200 for tuning */
1925 err = mmc_set_card_speed(mmc, MMC_HS_200, false);
1929 /* configure the bus mode (host) */
1930 mmc_select_mode(mmc, MMC_HS_200);
1931 mmc_set_clock(mmc, mmc->tran_speed, false);
1933 /* execute tuning if needed */
1934 err = mmc_execute_tuning(mmc, MMC_CMD_SEND_TUNING_BLOCK_HS200);
1936 debug("tuning failed\n");
1940 /* Set back to HS */
1941 mmc_set_card_speed(mmc, MMC_HS, true);
1943 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BUS_WIDTH,
1944 EXT_CSD_BUS_WIDTH_8 | EXT_CSD_DDR_FLAG);
1948 err = mmc_set_card_speed(mmc, MMC_HS_400, false);
1952 mmc_select_mode(mmc, MMC_HS_400);
1953 err = mmc_set_clock(mmc, mmc->tran_speed, false);
1960 static int mmc_select_hs400(struct mmc *mmc)
1966 #define for_each_supported_width(caps, ddr, ecbv) \
1967 for (ecbv = ext_csd_bus_width;\
1968 ecbv < ext_csd_bus_width + ARRAY_SIZE(ext_csd_bus_width);\
1970 if ((ddr == ecbv->is_ddr) && (caps & ecbv->cap))
1972 static int mmc_select_mode_and_width(struct mmc *mmc, uint card_caps)
1975 const struct mode_width_tuning *mwt;
1976 const struct ext_csd_bus_width *ecbw;
1979 mmc_dump_capabilities("mmc", card_caps);
1980 mmc_dump_capabilities("host", mmc->host_caps);
1983 /* Restrict card's capabilities by what the host can do */
1984 card_caps &= mmc->host_caps;
1986 /* Only version 4 of MMC supports wider bus widths */
1987 if (mmc->version < MMC_VERSION_4)
1990 if (!mmc->ext_csd) {
1991 pr_debug("No ext_csd found!\n"); /* this should enver happen */
1995 #if CONFIG_IS_ENABLED(MMC_HS200_SUPPORT) || \
1996 CONFIG_IS_ENABLED(MMC_HS400_SUPPORT)
1998 * In case the eMMC is in HS200/HS400 mode, downgrade to HS mode
1999 * before doing anything else, since a transition from either of
2000 * the HS200/HS400 mode directly to legacy mode is not supported.
2002 if (mmc->selected_mode == MMC_HS_200 ||
2003 mmc->selected_mode == MMC_HS_400)
2004 mmc_set_card_speed(mmc, MMC_HS, true);
2007 mmc_set_clock(mmc, mmc->legacy_speed, MMC_CLK_ENABLE);
2009 for_each_mmc_mode_by_pref(card_caps, mwt) {
2010 for_each_supported_width(card_caps & mwt->widths,
2011 mmc_is_mode_ddr(mwt->mode), ecbw) {
2012 enum mmc_voltage old_voltage;
2013 pr_debug("trying mode %s width %d (at %d MHz)\n",
2014 mmc_mode_name(mwt->mode),
2015 bus_width(ecbw->cap),
2016 mmc_mode2freq(mmc, mwt->mode) / 1000000);
2017 old_voltage = mmc->signal_voltage;
2018 err = mmc_set_lowest_voltage(mmc, mwt->mode,
2019 MMC_ALL_SIGNAL_VOLTAGE);
2023 /* configure the bus width (card + host) */
2024 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
2026 ecbw->ext_csd_bits & ~EXT_CSD_DDR_FLAG);
2029 mmc_set_bus_width(mmc, bus_width(ecbw->cap));
2031 if (mwt->mode == MMC_HS_400) {
2032 err = mmc_select_hs400(mmc);
2034 printf("Select HS400 failed %d\n", err);
2038 /* configure the bus speed (card) */
2039 err = mmc_set_card_speed(mmc, mwt->mode, false);
2044 * configure the bus width AND the ddr mode
2045 * (card). The host side will be taken care
2046 * of in the next step
2048 if (ecbw->ext_csd_bits & EXT_CSD_DDR_FLAG) {
2049 err = mmc_switch(mmc,
2050 EXT_CSD_CMD_SET_NORMAL,
2052 ecbw->ext_csd_bits);
2057 /* configure the bus mode (host) */
2058 mmc_select_mode(mmc, mwt->mode);
2059 mmc_set_clock(mmc, mmc->tran_speed,
2061 #ifdef MMC_SUPPORTS_TUNING
2063 /* execute tuning if needed */
2065 err = mmc_execute_tuning(mmc,
2068 pr_debug("tuning failed\n");
2075 /* do a transfer to check the configuration */
2076 err = mmc_read_and_compare_ext_csd(mmc);
2080 mmc_set_signal_voltage(mmc, old_voltage);
2081 /* if an error occured, revert to a safer bus mode */
2082 mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
2083 EXT_CSD_BUS_WIDTH, EXT_CSD_BUS_WIDTH_1);
2084 mmc_select_mode(mmc, MMC_LEGACY);
2085 mmc_set_bus_width(mmc, 1);
2089 pr_err("unable to select a mode\n");
2095 #if CONFIG_IS_ENABLED(MMC_TINY)
2096 DEFINE_CACHE_ALIGN_BUFFER(u8, ext_csd_bkup, MMC_MAX_BLOCK_LEN);
2099 static int mmc_startup_v4(struct mmc *mmc)
2103 bool has_parts = false;
2104 bool part_completed;
2105 static const u32 mmc_versions[] = {
2117 #if CONFIG_IS_ENABLED(MMC_TINY)
2118 u8 *ext_csd = ext_csd_bkup;
2120 if (IS_SD(mmc) || mmc->version < MMC_VERSION_4)
2124 memset(ext_csd_bkup, 0, sizeof(ext_csd_bkup));
2126 err = mmc_send_ext_csd(mmc, ext_csd);
2130 /* store the ext csd for future reference */
2132 mmc->ext_csd = ext_csd;
2134 ALLOC_CACHE_ALIGN_BUFFER(u8, ext_csd, MMC_MAX_BLOCK_LEN);
2136 if (IS_SD(mmc) || (mmc->version < MMC_VERSION_4))
2139 /* check ext_csd version and capacity */
2140 err = mmc_send_ext_csd(mmc, ext_csd);
2144 /* store the ext csd for future reference */
2146 mmc->ext_csd = malloc(MMC_MAX_BLOCK_LEN);
2149 memcpy(mmc->ext_csd, ext_csd, MMC_MAX_BLOCK_LEN);
2151 if (ext_csd[EXT_CSD_REV] >= ARRAY_SIZE(mmc_versions))
2154 mmc->version = mmc_versions[ext_csd[EXT_CSD_REV]];
2156 if (mmc->version >= MMC_VERSION_4_2) {
2158 * According to the JEDEC Standard, the value of
2159 * ext_csd's capacity is valid if the value is more
2162 capacity = ext_csd[EXT_CSD_SEC_CNT] << 0
2163 | ext_csd[EXT_CSD_SEC_CNT + 1] << 8
2164 | ext_csd[EXT_CSD_SEC_CNT + 2] << 16
2165 | ext_csd[EXT_CSD_SEC_CNT + 3] << 24;
2166 capacity *= MMC_MAX_BLOCK_LEN;
2167 if ((capacity >> 20) > 2 * 1024)
2168 mmc->capacity_user = capacity;
2171 if (mmc->version >= MMC_VERSION_4_5)
2172 mmc->gen_cmd6_time = ext_csd[EXT_CSD_GENERIC_CMD6_TIME];
2174 /* The partition data may be non-zero but it is only
2175 * effective if PARTITION_SETTING_COMPLETED is set in
2176 * EXT_CSD, so ignore any data if this bit is not set,
2177 * except for enabling the high-capacity group size
2178 * definition (see below).
2180 part_completed = !!(ext_csd[EXT_CSD_PARTITION_SETTING] &
2181 EXT_CSD_PARTITION_SETTING_COMPLETED);
2183 mmc->part_switch_time = ext_csd[EXT_CSD_PART_SWITCH_TIME];
2184 /* Some eMMC set the value too low so set a minimum */
2185 if (mmc->part_switch_time < MMC_MIN_PART_SWITCH_TIME && mmc->part_switch_time)
2186 mmc->part_switch_time = MMC_MIN_PART_SWITCH_TIME;
2188 /* store the partition info of emmc */
2189 mmc->part_support = ext_csd[EXT_CSD_PARTITIONING_SUPPORT];
2190 if ((ext_csd[EXT_CSD_PARTITIONING_SUPPORT] & PART_SUPPORT) ||
2191 ext_csd[EXT_CSD_BOOT_MULT])
2192 mmc->part_config = ext_csd[EXT_CSD_PART_CONF];
2193 if (part_completed &&
2194 (ext_csd[EXT_CSD_PARTITIONING_SUPPORT] & ENHNCD_SUPPORT))
2195 mmc->part_attr = ext_csd[EXT_CSD_PARTITIONS_ATTRIBUTE];
2197 mmc->capacity_boot = ext_csd[EXT_CSD_BOOT_MULT] << 17;
2199 mmc->capacity_rpmb = ext_csd[EXT_CSD_RPMB_MULT] << 17;
2201 for (i = 0; i < 4; i++) {
2202 int idx = EXT_CSD_GP_SIZE_MULT + i * 3;
2203 uint mult = (ext_csd[idx + 2] << 16) +
2204 (ext_csd[idx + 1] << 8) + ext_csd[idx];
2207 if (!part_completed)
2209 mmc->capacity_gp[i] = mult;
2210 mmc->capacity_gp[i] *=
2211 ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE];
2212 mmc->capacity_gp[i] *= ext_csd[EXT_CSD_HC_WP_GRP_SIZE];
2213 mmc->capacity_gp[i] <<= 19;
2216 #ifndef CONFIG_SPL_BUILD
2217 if (part_completed) {
2218 mmc->enh_user_size =
2219 (ext_csd[EXT_CSD_ENH_SIZE_MULT + 2] << 16) +
2220 (ext_csd[EXT_CSD_ENH_SIZE_MULT + 1] << 8) +
2221 ext_csd[EXT_CSD_ENH_SIZE_MULT];
2222 mmc->enh_user_size *= ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE];
2223 mmc->enh_user_size *= ext_csd[EXT_CSD_HC_WP_GRP_SIZE];
2224 mmc->enh_user_size <<= 19;
2225 mmc->enh_user_start =
2226 (ext_csd[EXT_CSD_ENH_START_ADDR + 3] << 24) +
2227 (ext_csd[EXT_CSD_ENH_START_ADDR + 2] << 16) +
2228 (ext_csd[EXT_CSD_ENH_START_ADDR + 1] << 8) +
2229 ext_csd[EXT_CSD_ENH_START_ADDR];
2230 if (mmc->high_capacity)
2231 mmc->enh_user_start <<= 9;
2236 * Host needs to enable ERASE_GRP_DEF bit if device is
2237 * partitioned. This bit will be lost every time after a reset
2238 * or power off. This will affect erase size.
2242 if ((ext_csd[EXT_CSD_PARTITIONING_SUPPORT] & PART_SUPPORT) &&
2243 (ext_csd[EXT_CSD_PARTITIONS_ATTRIBUTE] & PART_ENH_ATTRIB))
2246 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
2247 EXT_CSD_ERASE_GROUP_DEF, 1);
2252 ext_csd[EXT_CSD_ERASE_GROUP_DEF] = 1;
2255 if (ext_csd[EXT_CSD_ERASE_GROUP_DEF] & 0x01) {
2256 #if CONFIG_IS_ENABLED(MMC_WRITE)
2257 /* Read out group size from ext_csd */
2258 mmc->erase_grp_size =
2259 ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE] * 1024;
2262 * if high capacity and partition setting completed
2263 * SEC_COUNT is valid even if it is smaller than 2 GiB
2264 * JEDEC Standard JESD84-B45, 6.2.4
2266 if (mmc->high_capacity && part_completed) {
2267 capacity = (ext_csd[EXT_CSD_SEC_CNT]) |
2268 (ext_csd[EXT_CSD_SEC_CNT + 1] << 8) |
2269 (ext_csd[EXT_CSD_SEC_CNT + 2] << 16) |
2270 (ext_csd[EXT_CSD_SEC_CNT + 3] << 24);
2271 capacity *= MMC_MAX_BLOCK_LEN;
2272 mmc->capacity_user = capacity;
2275 #if CONFIG_IS_ENABLED(MMC_WRITE)
2277 /* Calculate the group size from the csd value. */
2278 int erase_gsz, erase_gmul;
2280 erase_gsz = (mmc->csd[2] & 0x00007c00) >> 10;
2281 erase_gmul = (mmc->csd[2] & 0x000003e0) >> 5;
2282 mmc->erase_grp_size = (erase_gsz + 1)
2286 #if CONFIG_IS_ENABLED(MMC_HW_PARTITIONING)
2287 mmc->hc_wp_grp_size = 1024
2288 * ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE]
2289 * ext_csd[EXT_CSD_HC_WP_GRP_SIZE];
2292 mmc->wr_rel_set = ext_csd[EXT_CSD_WR_REL_SET];
2297 #if !CONFIG_IS_ENABLED(MMC_TINY)
2300 mmc->ext_csd = NULL;
2305 static int mmc_startup(struct mmc *mmc)
2311 struct blk_desc *bdesc;
2313 #ifdef CONFIG_MMC_SPI_CRC_ON
2314 if (mmc_host_is_spi(mmc)) { /* enable CRC check for spi */
2315 cmd.cmdidx = MMC_CMD_SPI_CRC_ON_OFF;
2316 cmd.resp_type = MMC_RSP_R1;
2318 err = mmc_send_cmd(mmc, &cmd, NULL);
2324 /* Put the Card in Identify Mode */
2325 cmd.cmdidx = mmc_host_is_spi(mmc) ? MMC_CMD_SEND_CID :
2326 MMC_CMD_ALL_SEND_CID; /* cmd not supported in spi */
2327 cmd.resp_type = MMC_RSP_R2;
2330 err = mmc_send_cmd(mmc, &cmd, NULL);
2332 #ifdef CONFIG_MMC_QUIRKS
2333 if (err && (mmc->quirks & MMC_QUIRK_RETRY_SEND_CID)) {
2336 * It has been seen that SEND_CID may fail on the first
2337 * attempt, let's try a few more time
2340 err = mmc_send_cmd(mmc, &cmd, NULL);
2343 } while (retries--);
2350 memcpy(mmc->cid, cmd.response, 16);
2353 * For MMC cards, set the Relative Address.
2354 * For SD cards, get the Relatvie Address.
2355 * This also puts the cards into Standby State
2357 if (!mmc_host_is_spi(mmc)) { /* cmd not supported in spi */
2358 cmd.cmdidx = SD_CMD_SEND_RELATIVE_ADDR;
2359 cmd.cmdarg = mmc->rca << 16;
2360 cmd.resp_type = MMC_RSP_R6;
2362 err = mmc_send_cmd(mmc, &cmd, NULL);
2368 mmc->rca = (cmd.response[0] >> 16) & 0xffff;
2371 /* Get the Card-Specific Data */
2372 cmd.cmdidx = MMC_CMD_SEND_CSD;
2373 cmd.resp_type = MMC_RSP_R2;
2374 cmd.cmdarg = mmc->rca << 16;
2376 err = mmc_send_cmd(mmc, &cmd, NULL);
2381 mmc->csd[0] = cmd.response[0];
2382 mmc->csd[1] = cmd.response[1];
2383 mmc->csd[2] = cmd.response[2];
2384 mmc->csd[3] = cmd.response[3];
2386 if (mmc->version == MMC_VERSION_UNKNOWN) {
2387 int version = (cmd.response[0] >> 26) & 0xf;
2391 mmc->version = MMC_VERSION_1_2;
2394 mmc->version = MMC_VERSION_1_4;
2397 mmc->version = MMC_VERSION_2_2;
2400 mmc->version = MMC_VERSION_3;
2403 mmc->version = MMC_VERSION_4;
2406 mmc->version = MMC_VERSION_1_2;
2411 /* divide frequency by 10, since the mults are 10x bigger */
2412 freq = fbase[(cmd.response[0] & 0x7)];
2413 mult = multipliers[((cmd.response[0] >> 3) & 0xf)];
2415 mmc->legacy_speed = freq * mult;
2416 mmc_select_mode(mmc, MMC_LEGACY);
2418 mmc->dsr_imp = ((cmd.response[1] >> 12) & 0x1);
2419 mmc->read_bl_len = 1 << ((cmd.response[1] >> 16) & 0xf);
2420 #if CONFIG_IS_ENABLED(MMC_WRITE)
2423 mmc->write_bl_len = mmc->read_bl_len;
2425 mmc->write_bl_len = 1 << ((cmd.response[3] >> 22) & 0xf);
2428 if (mmc->high_capacity) {
2429 csize = (mmc->csd[1] & 0x3f) << 16
2430 | (mmc->csd[2] & 0xffff0000) >> 16;
2433 csize = (mmc->csd[1] & 0x3ff) << 2
2434 | (mmc->csd[2] & 0xc0000000) >> 30;
2435 cmult = (mmc->csd[2] & 0x00038000) >> 15;
2438 mmc->capacity_user = (csize + 1) << (cmult + 2);
2439 mmc->capacity_user *= mmc->read_bl_len;
2440 mmc->capacity_boot = 0;
2441 mmc->capacity_rpmb = 0;
2442 for (i = 0; i < 4; i++)
2443 mmc->capacity_gp[i] = 0;
2445 if (mmc->read_bl_len > MMC_MAX_BLOCK_LEN)
2446 mmc->read_bl_len = MMC_MAX_BLOCK_LEN;
2448 #if CONFIG_IS_ENABLED(MMC_WRITE)
2449 if (mmc->write_bl_len > MMC_MAX_BLOCK_LEN)
2450 mmc->write_bl_len = MMC_MAX_BLOCK_LEN;
2453 if ((mmc->dsr_imp) && (0xffffffff != mmc->dsr)) {
2454 cmd.cmdidx = MMC_CMD_SET_DSR;
2455 cmd.cmdarg = (mmc->dsr & 0xffff) << 16;
2456 cmd.resp_type = MMC_RSP_NONE;
2457 if (mmc_send_cmd(mmc, &cmd, NULL))
2458 pr_warn("MMC: SET_DSR failed\n");
2461 /* Select the card, and put it into Transfer Mode */
2462 if (!mmc_host_is_spi(mmc)) { /* cmd not supported in spi */
2463 cmd.cmdidx = MMC_CMD_SELECT_CARD;
2464 cmd.resp_type = MMC_RSP_R1;
2465 cmd.cmdarg = mmc->rca << 16;
2466 err = mmc_send_cmd(mmc, &cmd, NULL);
2473 * For SD, its erase group is always one sector
2475 #if CONFIG_IS_ENABLED(MMC_WRITE)
2476 mmc->erase_grp_size = 1;
2478 mmc->part_config = MMCPART_NOAVAILABLE;
2480 err = mmc_startup_v4(mmc);
2484 err = mmc_set_capacity(mmc, mmc_get_blk_desc(mmc)->hwpart);
2488 #if CONFIG_IS_ENABLED(MMC_TINY)
2489 mmc_set_clock(mmc, mmc->legacy_speed, false);
2490 mmc_select_mode(mmc, IS_SD(mmc) ? SD_LEGACY : MMC_LEGACY);
2491 mmc_set_bus_width(mmc, 1);
2494 err = sd_get_capabilities(mmc);
2497 err = sd_select_mode_and_width(mmc, mmc->card_caps);
2499 err = mmc_get_capabilities(mmc);
2502 mmc_select_mode_and_width(mmc, mmc->card_caps);
2508 mmc->best_mode = mmc->selected_mode;
2510 /* Fix the block length for DDR mode */
2511 if (mmc->ddr_mode) {
2512 mmc->read_bl_len = MMC_MAX_BLOCK_LEN;
2513 #if CONFIG_IS_ENABLED(MMC_WRITE)
2514 mmc->write_bl_len = MMC_MAX_BLOCK_LEN;
2518 /* fill in device description */
2519 bdesc = mmc_get_blk_desc(mmc);
2523 bdesc->blksz = mmc->read_bl_len;
2524 bdesc->log2blksz = LOG2(bdesc->blksz);
2525 bdesc->lba = lldiv(mmc->capacity, mmc->read_bl_len);
2526 #if !defined(CONFIG_SPL_BUILD) || \
2527 (defined(CONFIG_SPL_LIBCOMMON_SUPPORT) && \
2528 !defined(CONFIG_USE_TINY_PRINTF))
2529 sprintf(bdesc->vendor, "Man %06x Snr %04x%04x",
2530 mmc->cid[0] >> 24, (mmc->cid[2] & 0xffff),
2531 (mmc->cid[3] >> 16) & 0xffff);
2532 sprintf(bdesc->product, "%c%c%c%c%c%c", mmc->cid[0] & 0xff,
2533 (mmc->cid[1] >> 24), (mmc->cid[1] >> 16) & 0xff,
2534 (mmc->cid[1] >> 8) & 0xff, mmc->cid[1] & 0xff,
2535 (mmc->cid[2] >> 24) & 0xff);
2536 sprintf(bdesc->revision, "%d.%d", (mmc->cid[2] >> 20) & 0xf,
2537 (mmc->cid[2] >> 16) & 0xf);
2539 bdesc->vendor[0] = 0;
2540 bdesc->product[0] = 0;
2541 bdesc->revision[0] = 0;
2544 #if !defined(CONFIG_DM_MMC) && (!defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBDISK_SUPPORT))
2551 static int mmc_send_if_cond(struct mmc *mmc)
2556 cmd.cmdidx = SD_CMD_SEND_IF_COND;
2557 /* We set the bit if the host supports voltages between 2.7 and 3.6 V */
2558 cmd.cmdarg = ((mmc->cfg->voltages & 0xff8000) != 0) << 8 | 0xaa;
2559 cmd.resp_type = MMC_RSP_R7;
2561 err = mmc_send_cmd(mmc, &cmd, NULL);
2566 if ((cmd.response[0] & 0xff) != 0xaa)
2569 mmc->version = SD_VERSION_2;
2574 #if !CONFIG_IS_ENABLED(DM_MMC)
2575 /* board-specific MMC power initializations. */
2576 __weak void board_mmc_power_init(void)
2581 static int mmc_power_init(struct mmc *mmc)
2583 #if CONFIG_IS_ENABLED(DM_MMC)
2584 #if CONFIG_IS_ENABLED(DM_REGULATOR)
2587 ret = device_get_supply_regulator(mmc->dev, "vmmc-supply",
2590 pr_debug("%s: No vmmc supply\n", mmc->dev->name);
2592 ret = device_get_supply_regulator(mmc->dev, "vqmmc-supply",
2593 &mmc->vqmmc_supply);
2595 pr_debug("%s: No vqmmc supply\n", mmc->dev->name);
2597 #else /* !CONFIG_DM_MMC */
2599 * Driver model should use a regulator, as above, rather than calling
2600 * out to board code.
2602 board_mmc_power_init();
2608 * put the host in the initial state:
2609 * - turn on Vdd (card power supply)
2610 * - configure the bus width and clock to minimal values
2612 static void mmc_set_initial_state(struct mmc *mmc)
2616 /* First try to set 3.3V. If it fails set to 1.8V */
2617 err = mmc_set_signal_voltage(mmc, MMC_SIGNAL_VOLTAGE_330);
2619 err = mmc_set_signal_voltage(mmc, MMC_SIGNAL_VOLTAGE_180);
2621 pr_warn("mmc: failed to set signal voltage\n");
2623 mmc_select_mode(mmc, MMC_LEGACY);
2624 mmc_set_bus_width(mmc, 1);
2625 mmc_set_clock(mmc, 0, MMC_CLK_ENABLE);
2628 static int mmc_power_on(struct mmc *mmc)
2630 #if CONFIG_IS_ENABLED(DM_MMC) && CONFIG_IS_ENABLED(DM_REGULATOR)
2631 if (mmc->vmmc_supply) {
2632 int ret = regulator_set_enable(mmc->vmmc_supply, true);
2635 puts("Error enabling VMMC supply\n");
2643 static int mmc_power_off(struct mmc *mmc)
2645 mmc_set_clock(mmc, 0, MMC_CLK_DISABLE);
2646 #if CONFIG_IS_ENABLED(DM_MMC) && CONFIG_IS_ENABLED(DM_REGULATOR)
2647 if (mmc->vmmc_supply) {
2648 int ret = regulator_set_enable(mmc->vmmc_supply, false);
2651 pr_debug("Error disabling VMMC supply\n");
2659 static int mmc_power_cycle(struct mmc *mmc)
2663 ret = mmc_power_off(mmc);
2667 * SD spec recommends at least 1ms of delay. Let's wait for 2ms
2668 * to be on the safer side.
2671 return mmc_power_on(mmc);
2674 int mmc_get_op_cond(struct mmc *mmc)
2676 bool uhs_en = supports_uhs(mmc->cfg->host_caps);
2682 #ifdef CONFIG_FSL_ESDHC_ADAPTER_IDENT
2683 mmc_adapter_card_type_ident();
2685 err = mmc_power_init(mmc);
2689 #ifdef CONFIG_MMC_QUIRKS
2690 mmc->quirks = MMC_QUIRK_RETRY_SET_BLOCKLEN |
2691 MMC_QUIRK_RETRY_SEND_CID;
2694 err = mmc_power_cycle(mmc);
2697 * if power cycling is not supported, we should not try
2698 * to use the UHS modes, because we wouldn't be able to
2699 * recover from an error during the UHS initialization.
2701 pr_debug("Unable to do a full power cycle. Disabling the UHS modes for safety\n");
2703 mmc->host_caps &= ~UHS_CAPS;
2704 err = mmc_power_on(mmc);
2709 #if CONFIG_IS_ENABLED(DM_MMC)
2710 /* The device has already been probed ready for use */
2712 /* made sure it's not NULL earlier */
2713 err = mmc->cfg->ops->init(mmc);
2720 mmc_set_initial_state(mmc);
2722 /* Reset the Card */
2723 err = mmc_go_idle(mmc);
2728 /* The internal partition reset to user partition(0) at every CMD0*/
2729 mmc_get_blk_desc(mmc)->hwpart = 0;
2731 /* Test for SD version 2 */
2732 err = mmc_send_if_cond(mmc);
2734 /* Now try to get the SD card's operating condition */
2735 err = sd_send_op_cond(mmc, uhs_en);
2736 if (err && uhs_en) {
2738 mmc_power_cycle(mmc);
2742 /* If the command timed out, we check for an MMC card */
2743 if (err == -ETIMEDOUT) {
2744 err = mmc_send_op_cond(mmc);
2747 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
2748 pr_err("Card did not respond to voltage select!\n");
2757 int mmc_start_init(struct mmc *mmc)
2763 * all hosts are capable of 1 bit bus-width and able to use the legacy
2766 mmc->host_caps = mmc->cfg->host_caps | MMC_CAP(SD_LEGACY) |
2767 MMC_CAP(MMC_LEGACY) | MMC_MODE_1BIT;
2769 #if !defined(CONFIG_MMC_BROKEN_CD)
2770 /* we pretend there's no card when init is NULL */
2771 no_card = mmc_getcd(mmc) == 0;
2775 #if !CONFIG_IS_ENABLED(DM_MMC)
2776 no_card = no_card || (mmc->cfg->ops->init == NULL);
2780 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
2781 pr_err("MMC: no card present\n");
2786 err = mmc_get_op_cond(mmc);
2789 mmc->init_in_progress = 1;
2794 static int mmc_complete_init(struct mmc *mmc)
2798 mmc->init_in_progress = 0;
2799 if (mmc->op_cond_pending)
2800 err = mmc_complete_op_cond(mmc);
2803 err = mmc_startup(mmc);
2811 int mmc_init(struct mmc *mmc)
2814 __maybe_unused ulong start;
2815 #if CONFIG_IS_ENABLED(DM_MMC)
2816 struct mmc_uclass_priv *upriv = dev_get_uclass_priv(mmc->dev);
2823 start = get_timer(0);
2825 if (!mmc->init_in_progress)
2826 err = mmc_start_init(mmc);
2829 err = mmc_complete_init(mmc);
2831 pr_info("%s: %d, time %lu\n", __func__, err, get_timer(start));
2836 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT) || \
2837 CONFIG_IS_ENABLED(MMC_HS200_SUPPORT) || \
2838 CONFIG_IS_ENABLED(MMC_HS400_SUPPORT)
2839 int mmc_deinit(struct mmc *mmc)
2847 caps_filtered = mmc->card_caps &
2848 ~(MMC_CAP(UHS_SDR12) | MMC_CAP(UHS_SDR25) |
2849 MMC_CAP(UHS_SDR50) | MMC_CAP(UHS_DDR50) |
2850 MMC_CAP(UHS_SDR104));
2852 return sd_select_mode_and_width(mmc, caps_filtered);
2854 caps_filtered = mmc->card_caps &
2855 ~(MMC_CAP(MMC_HS_200) | MMC_CAP(MMC_HS_400));
2857 return mmc_select_mode_and_width(mmc, caps_filtered);
2862 int mmc_set_dsr(struct mmc *mmc, u16 val)
2868 /* CPU-specific MMC initializations */
2869 __weak int cpu_mmc_init(bd_t *bis)
2874 /* board-specific MMC initializations. */
2875 __weak int board_mmc_init(bd_t *bis)
2880 void mmc_set_preinit(struct mmc *mmc, int preinit)
2882 mmc->preinit = preinit;
2885 #if CONFIG_IS_ENABLED(DM_MMC)
2886 static int mmc_probe(bd_t *bis)
2890 struct udevice *dev;
2892 ret = uclass_get(UCLASS_MMC, &uc);
2897 * Try to add them in sequence order. Really with driver model we
2898 * should allow holes, but the current MMC list does not allow that.
2899 * So if we request 0, 1, 3 we will get 0, 1, 2.
2901 for (i = 0; ; i++) {
2902 ret = uclass_get_device_by_seq(UCLASS_MMC, i, &dev);
2906 uclass_foreach_dev(dev, uc) {
2907 ret = device_probe(dev);
2909 pr_err("%s - probe failed: %d\n", dev->name, ret);
2915 static int mmc_probe(bd_t *bis)
2917 if (board_mmc_init(bis) < 0)
2924 int mmc_initialize(bd_t *bis)
2926 static int initialized = 0;
2928 if (initialized) /* Avoid initializing mmc multiple times */
2932 #if !CONFIG_IS_ENABLED(BLK)
2933 #if !CONFIG_IS_ENABLED(MMC_TINY)
2937 ret = mmc_probe(bis);
2941 #ifndef CONFIG_SPL_BUILD
2942 print_mmc_devices(',');
2949 #ifdef CONFIG_CMD_BKOPS_ENABLE
2950 int mmc_set_bkops_enable(struct mmc *mmc)
2953 ALLOC_CACHE_ALIGN_BUFFER(u8, ext_csd, MMC_MAX_BLOCK_LEN);
2955 err = mmc_send_ext_csd(mmc, ext_csd);
2957 puts("Could not get ext_csd register values\n");
2961 if (!(ext_csd[EXT_CSD_BKOPS_SUPPORT] & 0x1)) {
2962 puts("Background operations not supported on device\n");
2963 return -EMEDIUMTYPE;
2966 if (ext_csd[EXT_CSD_BKOPS_EN] & 0x1) {
2967 puts("Background operations already enabled\n");
2971 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BKOPS_EN, 1);
2973 puts("Failed to enable manual background operations\n");
2977 puts("Enabled manual background operations\n");