1 // SPDX-License-Identifier: GPL-2.0+
3 * Copyright 2008, Freescale Semiconductor, Inc
6 * Based vaguely on the Linux code
15 #include <dm/device-internal.h>
19 #include <power/regulator.h>
22 #include <linux/list.h>
24 #include "mmc_private.h"
26 #define DEFAULT_CMD6_TIMEOUT_MS 500
28 static int mmc_set_signal_voltage(struct mmc *mmc, uint signal_voltage);
30 #if !CONFIG_IS_ENABLED(DM_MMC)
32 static int mmc_wait_dat0(struct mmc *mmc, int state, int timeout_us)
37 __weak int board_mmc_getwp(struct mmc *mmc)
42 int mmc_getwp(struct mmc *mmc)
46 wp = board_mmc_getwp(mmc);
49 if (mmc->cfg->ops->getwp)
50 wp = mmc->cfg->ops->getwp(mmc);
58 __weak int board_mmc_getcd(struct mmc *mmc)
64 #ifdef CONFIG_MMC_TRACE
65 void mmmc_trace_before_send(struct mmc *mmc, struct mmc_cmd *cmd)
67 printf("CMD_SEND:%d\n", cmd->cmdidx);
68 printf("\t\tARG\t\t\t 0x%08x\n", cmd->cmdarg);
71 void mmmc_trace_after_send(struct mmc *mmc, struct mmc_cmd *cmd, int ret)
77 printf("\t\tRET\t\t\t %d\n", ret);
79 switch (cmd->resp_type) {
81 printf("\t\tMMC_RSP_NONE\n");
84 printf("\t\tMMC_RSP_R1,5,6,7 \t 0x%08x \n",
88 printf("\t\tMMC_RSP_R1b\t\t 0x%08x \n",
92 printf("\t\tMMC_RSP_R2\t\t 0x%08x \n",
94 printf("\t\t \t\t 0x%08x \n",
96 printf("\t\t \t\t 0x%08x \n",
98 printf("\t\t \t\t 0x%08x \n",
101 printf("\t\t\t\t\tDUMPING DATA\n");
102 for (i = 0; i < 4; i++) {
104 printf("\t\t\t\t\t%03d - ", i*4);
105 ptr = (u8 *)&cmd->response[i];
107 for (j = 0; j < 4; j++)
108 printf("%02x ", *ptr--);
113 printf("\t\tMMC_RSP_R3,4\t\t 0x%08x \n",
117 printf("\t\tERROR MMC rsp not supported\n");
123 void mmc_trace_state(struct mmc *mmc, struct mmc_cmd *cmd)
127 status = (cmd->response[0] & MMC_STATUS_CURR_STATE) >> 9;
128 printf("CURR STATE:%d\n", status);
132 #if CONFIG_IS_ENABLED(MMC_VERBOSE) || defined(DEBUG)
133 const char *mmc_mode_name(enum bus_mode mode)
135 static const char *const names[] = {
136 [MMC_LEGACY] = "MMC legacy",
137 [MMC_HS] = "MMC High Speed (26MHz)",
138 [SD_HS] = "SD High Speed (50MHz)",
139 [UHS_SDR12] = "UHS SDR12 (25MHz)",
140 [UHS_SDR25] = "UHS SDR25 (50MHz)",
141 [UHS_SDR50] = "UHS SDR50 (100MHz)",
142 [UHS_SDR104] = "UHS SDR104 (208MHz)",
143 [UHS_DDR50] = "UHS DDR50 (50MHz)",
144 [MMC_HS_52] = "MMC High Speed (52MHz)",
145 [MMC_DDR_52] = "MMC DDR52 (52MHz)",
146 [MMC_HS_200] = "HS200 (200MHz)",
147 [MMC_HS_400] = "HS400 (200MHz)",
148 [MMC_HS_400_ES] = "HS400ES (200MHz)",
151 if (mode >= MMC_MODES_END)
152 return "Unknown mode";
158 static uint mmc_mode2freq(struct mmc *mmc, enum bus_mode mode)
160 static const int freqs[] = {
161 [MMC_LEGACY] = 25000000,
164 [MMC_HS_52] = 52000000,
165 [MMC_DDR_52] = 52000000,
166 [UHS_SDR12] = 25000000,
167 [UHS_SDR25] = 50000000,
168 [UHS_SDR50] = 100000000,
169 [UHS_DDR50] = 50000000,
170 [UHS_SDR104] = 208000000,
171 [MMC_HS_200] = 200000000,
172 [MMC_HS_400] = 200000000,
173 [MMC_HS_400_ES] = 200000000,
176 if (mode == MMC_LEGACY)
177 return mmc->legacy_speed;
178 else if (mode >= MMC_MODES_END)
184 static int mmc_select_mode(struct mmc *mmc, enum bus_mode mode)
186 mmc->selected_mode = mode;
187 mmc->tran_speed = mmc_mode2freq(mmc, mode);
188 mmc->ddr_mode = mmc_is_mode_ddr(mode);
189 pr_debug("selecting mode %s (freq : %d MHz)\n", mmc_mode_name(mode),
190 mmc->tran_speed / 1000000);
194 #if !CONFIG_IS_ENABLED(DM_MMC)
195 int mmc_send_cmd(struct mmc *mmc, struct mmc_cmd *cmd, struct mmc_data *data)
199 mmmc_trace_before_send(mmc, cmd);
200 ret = mmc->cfg->ops->send_cmd(mmc, cmd, data);
201 mmmc_trace_after_send(mmc, cmd, ret);
207 int mmc_send_status(struct mmc *mmc, unsigned int *status)
210 int err, retries = 5;
212 cmd.cmdidx = MMC_CMD_SEND_STATUS;
213 cmd.resp_type = MMC_RSP_R1;
214 if (!mmc_host_is_spi(mmc))
215 cmd.cmdarg = mmc->rca << 16;
218 err = mmc_send_cmd(mmc, &cmd, NULL);
220 mmc_trace_state(mmc, &cmd);
221 *status = cmd.response[0];
225 mmc_trace_state(mmc, &cmd);
229 int mmc_poll_for_busy(struct mmc *mmc, int timeout_ms)
234 err = mmc_wait_dat0(mmc, 1, timeout_ms * 1000);
239 err = mmc_send_status(mmc, &status);
243 if ((status & MMC_STATUS_RDY_FOR_DATA) &&
244 (status & MMC_STATUS_CURR_STATE) !=
248 if (status & MMC_STATUS_MASK) {
249 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
250 pr_err("Status Error: 0x%08x\n", status);
255 if (timeout_ms-- <= 0)
261 if (timeout_ms <= 0) {
262 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
263 pr_err("Timeout waiting card ready\n");
271 int mmc_set_blocklen(struct mmc *mmc, int len)
279 cmd.cmdidx = MMC_CMD_SET_BLOCKLEN;
280 cmd.resp_type = MMC_RSP_R1;
283 err = mmc_send_cmd(mmc, &cmd, NULL);
285 #ifdef CONFIG_MMC_QUIRKS
286 if (err && (mmc->quirks & MMC_QUIRK_RETRY_SET_BLOCKLEN)) {
289 * It has been seen that SET_BLOCKLEN may fail on the first
290 * attempt, let's try a few more time
293 err = mmc_send_cmd(mmc, &cmd, NULL);
303 #ifdef MMC_SUPPORTS_TUNING
304 static const u8 tuning_blk_pattern_4bit[] = {
305 0xff, 0x0f, 0xff, 0x00, 0xff, 0xcc, 0xc3, 0xcc,
306 0xc3, 0x3c, 0xcc, 0xff, 0xfe, 0xff, 0xfe, 0xef,
307 0xff, 0xdf, 0xff, 0xdd, 0xff, 0xfb, 0xff, 0xfb,
308 0xbf, 0xff, 0x7f, 0xff, 0x77, 0xf7, 0xbd, 0xef,
309 0xff, 0xf0, 0xff, 0xf0, 0x0f, 0xfc, 0xcc, 0x3c,
310 0xcc, 0x33, 0xcc, 0xcf, 0xff, 0xef, 0xff, 0xee,
311 0xff, 0xfd, 0xff, 0xfd, 0xdf, 0xff, 0xbf, 0xff,
312 0xbb, 0xff, 0xf7, 0xff, 0xf7, 0x7f, 0x7b, 0xde,
315 static const u8 tuning_blk_pattern_8bit[] = {
316 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00, 0x00,
317 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc, 0xcc,
318 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff, 0xff,
319 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee, 0xff,
320 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd, 0xdd,
321 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff, 0xbb,
322 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff, 0xff,
323 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee, 0xff,
324 0xff, 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00,
325 0x00, 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc,
326 0xcc, 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff,
327 0xff, 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee,
328 0xff, 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd,
329 0xdd, 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff,
330 0xbb, 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff,
331 0xff, 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee,
334 int mmc_send_tuning(struct mmc *mmc, u32 opcode, int *cmd_error)
337 struct mmc_data data;
338 const u8 *tuning_block_pattern;
341 if (mmc->bus_width == 8) {
342 tuning_block_pattern = tuning_blk_pattern_8bit;
343 size = sizeof(tuning_blk_pattern_8bit);
344 } else if (mmc->bus_width == 4) {
345 tuning_block_pattern = tuning_blk_pattern_4bit;
346 size = sizeof(tuning_blk_pattern_4bit);
351 ALLOC_CACHE_ALIGN_BUFFER(u8, data_buf, size);
355 cmd.resp_type = MMC_RSP_R1;
357 data.dest = (void *)data_buf;
359 data.blocksize = size;
360 data.flags = MMC_DATA_READ;
362 err = mmc_send_cmd(mmc, &cmd, &data);
366 if (memcmp(data_buf, tuning_block_pattern, size))
373 static int mmc_read_blocks(struct mmc *mmc, void *dst, lbaint_t start,
377 struct mmc_data data;
380 cmd.cmdidx = MMC_CMD_READ_MULTIPLE_BLOCK;
382 cmd.cmdidx = MMC_CMD_READ_SINGLE_BLOCK;
384 if (mmc->high_capacity)
387 cmd.cmdarg = start * mmc->read_bl_len;
389 cmd.resp_type = MMC_RSP_R1;
392 data.blocks = blkcnt;
393 data.blocksize = mmc->read_bl_len;
394 data.flags = MMC_DATA_READ;
396 if (mmc_send_cmd(mmc, &cmd, &data))
400 cmd.cmdidx = MMC_CMD_STOP_TRANSMISSION;
402 cmd.resp_type = MMC_RSP_R1b;
403 if (mmc_send_cmd(mmc, &cmd, NULL)) {
404 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
405 pr_err("mmc fail to send stop cmd\n");
414 #if !CONFIG_IS_ENABLED(DM_MMC)
415 static int mmc_get_b_max(struct mmc *mmc, void *dst, lbaint_t blkcnt)
417 if (mmc->cfg->ops->get_b_max)
418 return mmc->cfg->ops->get_b_max(mmc, dst, blkcnt);
420 return mmc->cfg->b_max;
424 #if CONFIG_IS_ENABLED(BLK)
425 ulong mmc_bread(struct udevice *dev, lbaint_t start, lbaint_t blkcnt, void *dst)
427 ulong mmc_bread(struct blk_desc *block_dev, lbaint_t start, lbaint_t blkcnt,
431 #if CONFIG_IS_ENABLED(BLK)
432 struct blk_desc *block_dev = dev_get_uclass_platdata(dev);
434 int dev_num = block_dev->devnum;
436 lbaint_t cur, blocks_todo = blkcnt;
442 struct mmc *mmc = find_mmc_device(dev_num);
446 if (CONFIG_IS_ENABLED(MMC_TINY))
447 err = mmc_switch_part(mmc, block_dev->hwpart);
449 err = blk_dselect_hwpart(block_dev, block_dev->hwpart);
454 if ((start + blkcnt) > block_dev->lba) {
455 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
456 pr_err("MMC: block number 0x" LBAF " exceeds max(0x" LBAF ")\n",
457 start + blkcnt, block_dev->lba);
462 if (mmc_set_blocklen(mmc, mmc->read_bl_len)) {
463 pr_debug("%s: Failed to set blocklen\n", __func__);
467 b_max = mmc_get_b_max(mmc, dst, blkcnt);
470 cur = (blocks_todo > b_max) ? b_max : blocks_todo;
471 if (mmc_read_blocks(mmc, dst, start, cur) != cur) {
472 pr_debug("%s: Failed to read blocks\n", __func__);
477 dst += cur * mmc->read_bl_len;
478 } while (blocks_todo > 0);
483 static int mmc_go_idle(struct mmc *mmc)
490 cmd.cmdidx = MMC_CMD_GO_IDLE_STATE;
492 cmd.resp_type = MMC_RSP_NONE;
494 err = mmc_send_cmd(mmc, &cmd, NULL);
504 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
505 static int mmc_switch_voltage(struct mmc *mmc, int signal_voltage)
511 * Send CMD11 only if the request is to switch the card to
514 if (signal_voltage == MMC_SIGNAL_VOLTAGE_330)
515 return mmc_set_signal_voltage(mmc, signal_voltage);
517 cmd.cmdidx = SD_CMD_SWITCH_UHS18V;
519 cmd.resp_type = MMC_RSP_R1;
521 err = mmc_send_cmd(mmc, &cmd, NULL);
525 if (!mmc_host_is_spi(mmc) && (cmd.response[0] & MMC_STATUS_ERROR))
529 * The card should drive cmd and dat[0:3] low immediately
530 * after the response of cmd11, but wait 100 us to be sure
532 err = mmc_wait_dat0(mmc, 0, 100);
539 * During a signal voltage level switch, the clock must be gated
540 * for 5 ms according to the SD spec
542 mmc_set_clock(mmc, mmc->clock, MMC_CLK_DISABLE);
544 err = mmc_set_signal_voltage(mmc, signal_voltage);
548 /* Keep clock gated for at least 10 ms, though spec only says 5 ms */
550 mmc_set_clock(mmc, mmc->clock, MMC_CLK_ENABLE);
553 * Failure to switch is indicated by the card holding
554 * dat[0:3] low. Wait for at least 1 ms according to spec
556 err = mmc_wait_dat0(mmc, 1, 1000);
566 static int sd_send_op_cond(struct mmc *mmc, bool uhs_en)
573 cmd.cmdidx = MMC_CMD_APP_CMD;
574 cmd.resp_type = MMC_RSP_R1;
577 err = mmc_send_cmd(mmc, &cmd, NULL);
582 cmd.cmdidx = SD_CMD_APP_SEND_OP_COND;
583 cmd.resp_type = MMC_RSP_R3;
586 * Most cards do not answer if some reserved bits
587 * in the ocr are set. However, Some controller
588 * can set bit 7 (reserved for low voltages), but
589 * how to manage low voltages SD card is not yet
592 cmd.cmdarg = mmc_host_is_spi(mmc) ? 0 :
593 (mmc->cfg->voltages & 0xff8000);
595 if (mmc->version == SD_VERSION_2)
596 cmd.cmdarg |= OCR_HCS;
599 cmd.cmdarg |= OCR_S18R;
601 err = mmc_send_cmd(mmc, &cmd, NULL);
606 if (cmd.response[0] & OCR_BUSY)
615 if (mmc->version != SD_VERSION_2)
616 mmc->version = SD_VERSION_1_0;
618 if (mmc_host_is_spi(mmc)) { /* read OCR for spi */
619 cmd.cmdidx = MMC_CMD_SPI_READ_OCR;
620 cmd.resp_type = MMC_RSP_R3;
623 err = mmc_send_cmd(mmc, &cmd, NULL);
629 mmc->ocr = cmd.response[0];
631 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
632 if (uhs_en && !(mmc_host_is_spi(mmc)) && (cmd.response[0] & 0x41000000)
634 err = mmc_switch_voltage(mmc, MMC_SIGNAL_VOLTAGE_180);
640 mmc->high_capacity = ((mmc->ocr & OCR_HCS) == OCR_HCS);
646 static int mmc_send_op_cond_iter(struct mmc *mmc, int use_arg)
651 cmd.cmdidx = MMC_CMD_SEND_OP_COND;
652 cmd.resp_type = MMC_RSP_R3;
654 if (use_arg && !mmc_host_is_spi(mmc))
655 cmd.cmdarg = OCR_HCS |
656 (mmc->cfg->voltages &
657 (mmc->ocr & OCR_VOLTAGE_MASK)) |
658 (mmc->ocr & OCR_ACCESS_MODE);
660 err = mmc_send_cmd(mmc, &cmd, NULL);
663 mmc->ocr = cmd.response[0];
667 static int mmc_send_op_cond(struct mmc *mmc)
671 /* Some cards seem to need this */
674 /* Asking to the card its capabilities */
675 for (i = 0; i < 2; i++) {
676 err = mmc_send_op_cond_iter(mmc, i != 0);
680 /* exit if not busy (flag seems to be inverted) */
681 if (mmc->ocr & OCR_BUSY)
684 mmc->op_cond_pending = 1;
688 static int mmc_complete_op_cond(struct mmc *mmc)
695 mmc->op_cond_pending = 0;
696 if (!(mmc->ocr & OCR_BUSY)) {
697 /* Some cards seem to need this */
700 start = get_timer(0);
702 err = mmc_send_op_cond_iter(mmc, 1);
705 if (mmc->ocr & OCR_BUSY)
707 if (get_timer(start) > timeout)
713 if (mmc_host_is_spi(mmc)) { /* read OCR for spi */
714 cmd.cmdidx = MMC_CMD_SPI_READ_OCR;
715 cmd.resp_type = MMC_RSP_R3;
718 err = mmc_send_cmd(mmc, &cmd, NULL);
723 mmc->ocr = cmd.response[0];
726 mmc->version = MMC_VERSION_UNKNOWN;
728 mmc->high_capacity = ((mmc->ocr & OCR_HCS) == OCR_HCS);
735 int mmc_send_ext_csd(struct mmc *mmc, u8 *ext_csd)
738 struct mmc_data data;
741 /* Get the Card Status Register */
742 cmd.cmdidx = MMC_CMD_SEND_EXT_CSD;
743 cmd.resp_type = MMC_RSP_R1;
746 data.dest = (char *)ext_csd;
748 data.blocksize = MMC_MAX_BLOCK_LEN;
749 data.flags = MMC_DATA_READ;
751 err = mmc_send_cmd(mmc, &cmd, &data);
756 static int __mmc_switch(struct mmc *mmc, u8 set, u8 index, u8 value,
759 unsigned int status, start;
761 int timeout_ms = DEFAULT_CMD6_TIMEOUT_MS;
762 bool is_part_switch = (set == EXT_CSD_CMD_SET_NORMAL) &&
763 (index == EXT_CSD_PART_CONF);
767 if (mmc->gen_cmd6_time)
768 timeout_ms = mmc->gen_cmd6_time * 10;
770 if (is_part_switch && mmc->part_switch_time)
771 timeout_ms = mmc->part_switch_time * 10;
773 cmd.cmdidx = MMC_CMD_SWITCH;
774 cmd.resp_type = MMC_RSP_R1b;
775 cmd.cmdarg = (MMC_SWITCH_MODE_WRITE_BYTE << 24) |
780 ret = mmc_send_cmd(mmc, &cmd, NULL);
781 } while (ret && retries-- > 0);
786 start = get_timer(0);
788 /* poll dat0 for rdy/buys status */
789 ret = mmc_wait_dat0(mmc, 1, timeout_ms * 1000);
790 if (ret && ret != -ENOSYS)
794 * In cases when not allowed to poll by using CMD13 or because we aren't
795 * capable of polling by using mmc_wait_dat0, then rely on waiting the
796 * stated timeout to be sufficient.
798 if (ret == -ENOSYS && !send_status)
801 /* Finally wait until the card is ready or indicates a failure
802 * to switch. It doesn't hurt to use CMD13 here even if send_status
803 * is false, because by now (after 'timeout_ms' ms) the bus should be
807 ret = mmc_send_status(mmc, &status);
809 if (!ret && (status & MMC_STATUS_SWITCH_ERROR)) {
810 pr_debug("switch failed %d/%d/0x%x !\n", set, index,
814 if (!ret && (status & MMC_STATUS_RDY_FOR_DATA))
817 } while (get_timer(start) < timeout_ms);
822 int mmc_switch(struct mmc *mmc, u8 set, u8 index, u8 value)
824 return __mmc_switch(mmc, set, index, value, true);
827 int mmc_boot_wp(struct mmc *mmc)
829 return mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BOOT_WP, 1);
832 #if !CONFIG_IS_ENABLED(MMC_TINY)
833 static int mmc_set_card_speed(struct mmc *mmc, enum bus_mode mode,
839 ALLOC_CACHE_ALIGN_BUFFER(u8, test_csd, MMC_MAX_BLOCK_LEN);
845 speed_bits = EXT_CSD_TIMING_HS;
847 #if CONFIG_IS_ENABLED(MMC_HS200_SUPPORT)
849 speed_bits = EXT_CSD_TIMING_HS200;
852 #if CONFIG_IS_ENABLED(MMC_HS400_SUPPORT)
854 speed_bits = EXT_CSD_TIMING_HS400;
857 #if CONFIG_IS_ENABLED(MMC_HS400_ES_SUPPORT)
859 speed_bits = EXT_CSD_TIMING_HS400;
863 speed_bits = EXT_CSD_TIMING_LEGACY;
869 err = __mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_HS_TIMING,
870 speed_bits, !hsdowngrade);
874 #if CONFIG_IS_ENABLED(MMC_HS200_SUPPORT) || \
875 CONFIG_IS_ENABLED(MMC_HS400_SUPPORT)
877 * In case the eMMC is in HS200/HS400 mode and we are downgrading
878 * to HS mode, the card clock are still running much faster than
879 * the supported HS mode clock, so we can not reliably read out
880 * Extended CSD. Reconfigure the controller to run at HS mode.
883 mmc_select_mode(mmc, MMC_HS);
884 mmc_set_clock(mmc, mmc_mode2freq(mmc, MMC_HS), false);
888 if ((mode == MMC_HS) || (mode == MMC_HS_52)) {
889 /* Now check to see that it worked */
890 err = mmc_send_ext_csd(mmc, test_csd);
894 /* No high-speed support */
895 if (!test_csd[EXT_CSD_HS_TIMING])
902 static int mmc_get_capabilities(struct mmc *mmc)
904 u8 *ext_csd = mmc->ext_csd;
907 mmc->card_caps = MMC_MODE_1BIT | MMC_CAP(MMC_LEGACY);
909 if (mmc_host_is_spi(mmc))
912 /* Only version 4 supports high-speed */
913 if (mmc->version < MMC_VERSION_4)
917 pr_err("No ext_csd found!\n"); /* this should enver happen */
921 mmc->card_caps |= MMC_MODE_4BIT | MMC_MODE_8BIT;
923 cardtype = ext_csd[EXT_CSD_CARD_TYPE];
924 mmc->cardtype = cardtype;
926 #if CONFIG_IS_ENABLED(MMC_HS200_SUPPORT)
927 if (cardtype & (EXT_CSD_CARD_TYPE_HS200_1_2V |
928 EXT_CSD_CARD_TYPE_HS200_1_8V)) {
929 mmc->card_caps |= MMC_MODE_HS200;
932 #if CONFIG_IS_ENABLED(MMC_HS400_SUPPORT) || \
933 CONFIG_IS_ENABLED(MMC_HS400_ES_SUPPORT)
934 if (cardtype & (EXT_CSD_CARD_TYPE_HS400_1_2V |
935 EXT_CSD_CARD_TYPE_HS400_1_8V)) {
936 mmc->card_caps |= MMC_MODE_HS400;
939 if (cardtype & EXT_CSD_CARD_TYPE_52) {
940 if (cardtype & EXT_CSD_CARD_TYPE_DDR_52)
941 mmc->card_caps |= MMC_MODE_DDR_52MHz;
942 mmc->card_caps |= MMC_MODE_HS_52MHz;
944 if (cardtype & EXT_CSD_CARD_TYPE_26)
945 mmc->card_caps |= MMC_MODE_HS;
947 #if CONFIG_IS_ENABLED(MMC_HS400_ES_SUPPORT)
948 if (ext_csd[EXT_CSD_STROBE_SUPPORT] &&
949 (mmc->card_caps & MMC_MODE_HS400)) {
950 mmc->card_caps |= MMC_MODE_HS400_ES;
958 static int mmc_set_capacity(struct mmc *mmc, int part_num)
962 mmc->capacity = mmc->capacity_user;
966 mmc->capacity = mmc->capacity_boot;
969 mmc->capacity = mmc->capacity_rpmb;
975 mmc->capacity = mmc->capacity_gp[part_num - 4];
981 mmc_get_blk_desc(mmc)->lba = lldiv(mmc->capacity, mmc->read_bl_len);
986 int mmc_switch_part(struct mmc *mmc, unsigned int part_num)
992 ret = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
994 (mmc->part_config & ~PART_ACCESS_MASK)
995 | (part_num & PART_ACCESS_MASK));
996 } while (ret && retry--);
999 * Set the capacity if the switch succeeded or was intended
1000 * to return to representing the raw device.
1002 if ((ret == 0) || ((ret == -ENODEV) && (part_num == 0))) {
1003 ret = mmc_set_capacity(mmc, part_num);
1004 mmc_get_blk_desc(mmc)->hwpart = part_num;
1010 #if CONFIG_IS_ENABLED(MMC_HW_PARTITIONING)
1011 int mmc_hwpart_config(struct mmc *mmc,
1012 const struct mmc_hwpart_conf *conf,
1013 enum mmc_hwpart_conf_mode mode)
1018 u32 gp_size_mult[4];
1019 u32 max_enh_size_mult;
1020 u32 tot_enh_size_mult = 0;
1023 ALLOC_CACHE_ALIGN_BUFFER(u8, ext_csd, MMC_MAX_BLOCK_LEN);
1025 if (mode < MMC_HWPART_CONF_CHECK || mode > MMC_HWPART_CONF_COMPLETE)
1028 if (IS_SD(mmc) || (mmc->version < MMC_VERSION_4_41)) {
1029 pr_err("eMMC >= 4.4 required for enhanced user data area\n");
1030 return -EMEDIUMTYPE;
1033 if (!(mmc->part_support & PART_SUPPORT)) {
1034 pr_err("Card does not support partitioning\n");
1035 return -EMEDIUMTYPE;
1038 if (!mmc->hc_wp_grp_size) {
1039 pr_err("Card does not define HC WP group size\n");
1040 return -EMEDIUMTYPE;
1043 /* check partition alignment and total enhanced size */
1044 if (conf->user.enh_size) {
1045 if (conf->user.enh_size % mmc->hc_wp_grp_size ||
1046 conf->user.enh_start % mmc->hc_wp_grp_size) {
1047 pr_err("User data enhanced area not HC WP group "
1051 part_attrs |= EXT_CSD_ENH_USR;
1052 enh_size_mult = conf->user.enh_size / mmc->hc_wp_grp_size;
1053 if (mmc->high_capacity) {
1054 enh_start_addr = conf->user.enh_start;
1056 enh_start_addr = (conf->user.enh_start << 9);
1062 tot_enh_size_mult += enh_size_mult;
1064 for (pidx = 0; pidx < 4; pidx++) {
1065 if (conf->gp_part[pidx].size % mmc->hc_wp_grp_size) {
1066 pr_err("GP%i partition not HC WP group size "
1067 "aligned\n", pidx+1);
1070 gp_size_mult[pidx] = conf->gp_part[pidx].size / mmc->hc_wp_grp_size;
1071 if (conf->gp_part[pidx].size && conf->gp_part[pidx].enhanced) {
1072 part_attrs |= EXT_CSD_ENH_GP(pidx);
1073 tot_enh_size_mult += gp_size_mult[pidx];
1077 if (part_attrs && ! (mmc->part_support & ENHNCD_SUPPORT)) {
1078 pr_err("Card does not support enhanced attribute\n");
1079 return -EMEDIUMTYPE;
1082 err = mmc_send_ext_csd(mmc, ext_csd);
1087 (ext_csd[EXT_CSD_MAX_ENH_SIZE_MULT+2] << 16) +
1088 (ext_csd[EXT_CSD_MAX_ENH_SIZE_MULT+1] << 8) +
1089 ext_csd[EXT_CSD_MAX_ENH_SIZE_MULT];
1090 if (tot_enh_size_mult > max_enh_size_mult) {
1091 pr_err("Total enhanced size exceeds maximum (%u > %u)\n",
1092 tot_enh_size_mult, max_enh_size_mult);
1093 return -EMEDIUMTYPE;
1096 /* The default value of EXT_CSD_WR_REL_SET is device
1097 * dependent, the values can only be changed if the
1098 * EXT_CSD_HS_CTRL_REL bit is set. The values can be
1099 * changed only once and before partitioning is completed. */
1100 wr_rel_set = ext_csd[EXT_CSD_WR_REL_SET];
1101 if (conf->user.wr_rel_change) {
1102 if (conf->user.wr_rel_set)
1103 wr_rel_set |= EXT_CSD_WR_DATA_REL_USR;
1105 wr_rel_set &= ~EXT_CSD_WR_DATA_REL_USR;
1107 for (pidx = 0; pidx < 4; pidx++) {
1108 if (conf->gp_part[pidx].wr_rel_change) {
1109 if (conf->gp_part[pidx].wr_rel_set)
1110 wr_rel_set |= EXT_CSD_WR_DATA_REL_GP(pidx);
1112 wr_rel_set &= ~EXT_CSD_WR_DATA_REL_GP(pidx);
1116 if (wr_rel_set != ext_csd[EXT_CSD_WR_REL_SET] &&
1117 !(ext_csd[EXT_CSD_WR_REL_PARAM] & EXT_CSD_HS_CTRL_REL)) {
1118 puts("Card does not support host controlled partition write "
1119 "reliability settings\n");
1120 return -EMEDIUMTYPE;
1123 if (ext_csd[EXT_CSD_PARTITION_SETTING] &
1124 EXT_CSD_PARTITION_SETTING_COMPLETED) {
1125 pr_err("Card already partitioned\n");
1129 if (mode == MMC_HWPART_CONF_CHECK)
1132 /* Partitioning requires high-capacity size definitions */
1133 if (!(ext_csd[EXT_CSD_ERASE_GROUP_DEF] & 0x01)) {
1134 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1135 EXT_CSD_ERASE_GROUP_DEF, 1);
1140 ext_csd[EXT_CSD_ERASE_GROUP_DEF] = 1;
1142 #if CONFIG_IS_ENABLED(MMC_WRITE)
1143 /* update erase group size to be high-capacity */
1144 mmc->erase_grp_size =
1145 ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE] * 1024;
1150 /* all OK, write the configuration */
1151 for (i = 0; i < 4; i++) {
1152 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1153 EXT_CSD_ENH_START_ADDR+i,
1154 (enh_start_addr >> (i*8)) & 0xFF);
1158 for (i = 0; i < 3; i++) {
1159 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1160 EXT_CSD_ENH_SIZE_MULT+i,
1161 (enh_size_mult >> (i*8)) & 0xFF);
1165 for (pidx = 0; pidx < 4; pidx++) {
1166 for (i = 0; i < 3; i++) {
1167 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1168 EXT_CSD_GP_SIZE_MULT+pidx*3+i,
1169 (gp_size_mult[pidx] >> (i*8)) & 0xFF);
1174 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1175 EXT_CSD_PARTITIONS_ATTRIBUTE, part_attrs);
1179 if (mode == MMC_HWPART_CONF_SET)
1182 /* The WR_REL_SET is a write-once register but shall be
1183 * written before setting PART_SETTING_COMPLETED. As it is
1184 * write-once we can only write it when completing the
1186 if (wr_rel_set != ext_csd[EXT_CSD_WR_REL_SET]) {
1187 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1188 EXT_CSD_WR_REL_SET, wr_rel_set);
1193 /* Setting PART_SETTING_COMPLETED confirms the partition
1194 * configuration but it only becomes effective after power
1195 * cycle, so we do not adjust the partition related settings
1196 * in the mmc struct. */
1198 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1199 EXT_CSD_PARTITION_SETTING,
1200 EXT_CSD_PARTITION_SETTING_COMPLETED);
1208 #if !CONFIG_IS_ENABLED(DM_MMC)
1209 int mmc_getcd(struct mmc *mmc)
1213 cd = board_mmc_getcd(mmc);
1216 if (mmc->cfg->ops->getcd)
1217 cd = mmc->cfg->ops->getcd(mmc);
1226 #if !CONFIG_IS_ENABLED(MMC_TINY)
1227 static int sd_switch(struct mmc *mmc, int mode, int group, u8 value, u8 *resp)
1230 struct mmc_data data;
1232 /* Switch the frequency */
1233 cmd.cmdidx = SD_CMD_SWITCH_FUNC;
1234 cmd.resp_type = MMC_RSP_R1;
1235 cmd.cmdarg = (mode << 31) | 0xffffff;
1236 cmd.cmdarg &= ~(0xf << (group * 4));
1237 cmd.cmdarg |= value << (group * 4);
1239 data.dest = (char *)resp;
1240 data.blocksize = 64;
1242 data.flags = MMC_DATA_READ;
1244 return mmc_send_cmd(mmc, &cmd, &data);
1247 static int sd_get_capabilities(struct mmc *mmc)
1251 ALLOC_CACHE_ALIGN_BUFFER(__be32, scr, 2);
1252 ALLOC_CACHE_ALIGN_BUFFER(__be32, switch_status, 16);
1253 struct mmc_data data;
1255 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
1259 mmc->card_caps = MMC_MODE_1BIT | MMC_CAP(MMC_LEGACY);
1261 if (mmc_host_is_spi(mmc))
1264 /* Read the SCR to find out if this card supports higher speeds */
1265 cmd.cmdidx = MMC_CMD_APP_CMD;
1266 cmd.resp_type = MMC_RSP_R1;
1267 cmd.cmdarg = mmc->rca << 16;
1269 err = mmc_send_cmd(mmc, &cmd, NULL);
1274 cmd.cmdidx = SD_CMD_APP_SEND_SCR;
1275 cmd.resp_type = MMC_RSP_R1;
1281 data.dest = (char *)scr;
1284 data.flags = MMC_DATA_READ;
1286 err = mmc_send_cmd(mmc, &cmd, &data);
1295 mmc->scr[0] = __be32_to_cpu(scr[0]);
1296 mmc->scr[1] = __be32_to_cpu(scr[1]);
1298 switch ((mmc->scr[0] >> 24) & 0xf) {
1300 mmc->version = SD_VERSION_1_0;
1303 mmc->version = SD_VERSION_1_10;
1306 mmc->version = SD_VERSION_2;
1307 if ((mmc->scr[0] >> 15) & 0x1)
1308 mmc->version = SD_VERSION_3;
1311 mmc->version = SD_VERSION_1_0;
1315 if (mmc->scr[0] & SD_DATA_4BIT)
1316 mmc->card_caps |= MMC_MODE_4BIT;
1318 /* Version 1.0 doesn't support switching */
1319 if (mmc->version == SD_VERSION_1_0)
1324 err = sd_switch(mmc, SD_SWITCH_CHECK, 0, 1,
1325 (u8 *)switch_status);
1330 /* The high-speed function is busy. Try again */
1331 if (!(__be32_to_cpu(switch_status[7]) & SD_HIGHSPEED_BUSY))
1335 /* If high-speed isn't supported, we return */
1336 if (__be32_to_cpu(switch_status[3]) & SD_HIGHSPEED_SUPPORTED)
1337 mmc->card_caps |= MMC_CAP(SD_HS);
1339 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
1340 /* Version before 3.0 don't support UHS modes */
1341 if (mmc->version < SD_VERSION_3)
1344 sd3_bus_mode = __be32_to_cpu(switch_status[3]) >> 16 & 0x1f;
1345 if (sd3_bus_mode & SD_MODE_UHS_SDR104)
1346 mmc->card_caps |= MMC_CAP(UHS_SDR104);
1347 if (sd3_bus_mode & SD_MODE_UHS_SDR50)
1348 mmc->card_caps |= MMC_CAP(UHS_SDR50);
1349 if (sd3_bus_mode & SD_MODE_UHS_SDR25)
1350 mmc->card_caps |= MMC_CAP(UHS_SDR25);
1351 if (sd3_bus_mode & SD_MODE_UHS_SDR12)
1352 mmc->card_caps |= MMC_CAP(UHS_SDR12);
1353 if (sd3_bus_mode & SD_MODE_UHS_DDR50)
1354 mmc->card_caps |= MMC_CAP(UHS_DDR50);
1360 static int sd_set_card_speed(struct mmc *mmc, enum bus_mode mode)
1364 ALLOC_CACHE_ALIGN_BUFFER(uint, switch_status, 16);
1367 /* SD version 1.00 and 1.01 does not support CMD 6 */
1368 if (mmc->version == SD_VERSION_1_0)
1373 speed = UHS_SDR12_BUS_SPEED;
1376 speed = HIGH_SPEED_BUS_SPEED;
1378 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
1380 speed = UHS_SDR12_BUS_SPEED;
1383 speed = UHS_SDR25_BUS_SPEED;
1386 speed = UHS_SDR50_BUS_SPEED;
1389 speed = UHS_DDR50_BUS_SPEED;
1392 speed = UHS_SDR104_BUS_SPEED;
1399 err = sd_switch(mmc, SD_SWITCH_SWITCH, 0, speed, (u8 *)switch_status);
1403 if (((__be32_to_cpu(switch_status[4]) >> 24) & 0xF) != speed)
1409 static int sd_select_bus_width(struct mmc *mmc, int w)
1414 if ((w != 4) && (w != 1))
1417 cmd.cmdidx = MMC_CMD_APP_CMD;
1418 cmd.resp_type = MMC_RSP_R1;
1419 cmd.cmdarg = mmc->rca << 16;
1421 err = mmc_send_cmd(mmc, &cmd, NULL);
1425 cmd.cmdidx = SD_CMD_APP_SET_BUS_WIDTH;
1426 cmd.resp_type = MMC_RSP_R1;
1431 err = mmc_send_cmd(mmc, &cmd, NULL);
1439 #if CONFIG_IS_ENABLED(MMC_WRITE)
1440 static int sd_read_ssr(struct mmc *mmc)
1442 static const unsigned int sd_au_size[] = {
1443 0, SZ_16K / 512, SZ_32K / 512,
1444 SZ_64K / 512, SZ_128K / 512, SZ_256K / 512,
1445 SZ_512K / 512, SZ_1M / 512, SZ_2M / 512,
1446 SZ_4M / 512, SZ_8M / 512, (SZ_8M + SZ_4M) / 512,
1447 SZ_16M / 512, (SZ_16M + SZ_8M) / 512, SZ_32M / 512,
1452 ALLOC_CACHE_ALIGN_BUFFER(uint, ssr, 16);
1453 struct mmc_data data;
1455 unsigned int au, eo, et, es;
1457 cmd.cmdidx = MMC_CMD_APP_CMD;
1458 cmd.resp_type = MMC_RSP_R1;
1459 cmd.cmdarg = mmc->rca << 16;
1461 err = mmc_send_cmd(mmc, &cmd, NULL);
1462 #ifdef CONFIG_MMC_QUIRKS
1463 if (err && (mmc->quirks & MMC_QUIRK_RETRY_APP_CMD)) {
1466 * It has been seen that APP_CMD may fail on the first
1467 * attempt, let's try a few more times
1470 err = mmc_send_cmd(mmc, &cmd, NULL);
1473 } while (retries--);
1479 cmd.cmdidx = SD_CMD_APP_SD_STATUS;
1480 cmd.resp_type = MMC_RSP_R1;
1484 data.dest = (char *)ssr;
1485 data.blocksize = 64;
1487 data.flags = MMC_DATA_READ;
1489 err = mmc_send_cmd(mmc, &cmd, &data);
1497 for (i = 0; i < 16; i++)
1498 ssr[i] = be32_to_cpu(ssr[i]);
1500 au = (ssr[2] >> 12) & 0xF;
1501 if ((au <= 9) || (mmc->version == SD_VERSION_3)) {
1502 mmc->ssr.au = sd_au_size[au];
1503 es = (ssr[3] >> 24) & 0xFF;
1504 es |= (ssr[2] & 0xFF) << 8;
1505 et = (ssr[3] >> 18) & 0x3F;
1507 eo = (ssr[3] >> 16) & 0x3;
1508 mmc->ssr.erase_timeout = (et * 1000) / es;
1509 mmc->ssr.erase_offset = eo * 1000;
1512 pr_debug("Invalid Allocation Unit Size.\n");
1518 /* frequency bases */
1519 /* divided by 10 to be nice to platforms without floating point */
1520 static const int fbase[] = {
1527 /* Multiplier values for TRAN_SPEED. Multiplied by 10 to be nice
1528 * to platforms without floating point.
1530 static const u8 multipliers[] = {
1549 static inline int bus_width(uint cap)
1551 if (cap == MMC_MODE_8BIT)
1553 if (cap == MMC_MODE_4BIT)
1555 if (cap == MMC_MODE_1BIT)
1557 pr_warn("invalid bus witdh capability 0x%x\n", cap);
1561 #if !CONFIG_IS_ENABLED(DM_MMC)
1562 #ifdef MMC_SUPPORTS_TUNING
1563 static int mmc_execute_tuning(struct mmc *mmc, uint opcode)
1569 static int mmc_set_ios(struct mmc *mmc)
1573 if (mmc->cfg->ops->set_ios)
1574 ret = mmc->cfg->ops->set_ios(mmc);
1579 static int mmc_host_power_cycle(struct mmc *mmc)
1583 if (mmc->cfg->ops->host_power_cycle)
1584 ret = mmc->cfg->ops->host_power_cycle(mmc);
1590 int mmc_set_clock(struct mmc *mmc, uint clock, bool disable)
1593 if (clock > mmc->cfg->f_max)
1594 clock = mmc->cfg->f_max;
1596 if (clock < mmc->cfg->f_min)
1597 clock = mmc->cfg->f_min;
1601 mmc->clk_disable = disable;
1603 debug("clock is %s (%dHz)\n", disable ? "disabled" : "enabled", clock);
1605 return mmc_set_ios(mmc);
1608 static int mmc_set_bus_width(struct mmc *mmc, uint width)
1610 mmc->bus_width = width;
1612 return mmc_set_ios(mmc);
1615 #if CONFIG_IS_ENABLED(MMC_VERBOSE) || defined(DEBUG)
1617 * helper function to display the capabilities in a human
1618 * friendly manner. The capabilities include bus width and
1621 void mmc_dump_capabilities(const char *text, uint caps)
1625 pr_debug("%s: widths [", text);
1626 if (caps & MMC_MODE_8BIT)
1628 if (caps & MMC_MODE_4BIT)
1630 if (caps & MMC_MODE_1BIT)
1632 pr_debug("\b\b] modes [");
1633 for (mode = MMC_LEGACY; mode < MMC_MODES_END; mode++)
1634 if (MMC_CAP(mode) & caps)
1635 pr_debug("%s, ", mmc_mode_name(mode));
1636 pr_debug("\b\b]\n");
1640 struct mode_width_tuning {
1643 #ifdef MMC_SUPPORTS_TUNING
1648 #if CONFIG_IS_ENABLED(MMC_IO_VOLTAGE)
1649 int mmc_voltage_to_mv(enum mmc_voltage voltage)
1652 case MMC_SIGNAL_VOLTAGE_000: return 0;
1653 case MMC_SIGNAL_VOLTAGE_330: return 3300;
1654 case MMC_SIGNAL_VOLTAGE_180: return 1800;
1655 case MMC_SIGNAL_VOLTAGE_120: return 1200;
1660 static int mmc_set_signal_voltage(struct mmc *mmc, uint signal_voltage)
1664 if (mmc->signal_voltage == signal_voltage)
1667 mmc->signal_voltage = signal_voltage;
1668 err = mmc_set_ios(mmc);
1670 pr_debug("unable to set voltage (err %d)\n", err);
1675 static inline int mmc_set_signal_voltage(struct mmc *mmc, uint signal_voltage)
1681 #if !CONFIG_IS_ENABLED(MMC_TINY)
1682 static const struct mode_width_tuning sd_modes_by_pref[] = {
1683 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
1684 #ifdef MMC_SUPPORTS_TUNING
1687 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1688 .tuning = MMC_CMD_SEND_TUNING_BLOCK
1693 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1697 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1701 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1706 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1708 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
1711 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1716 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1720 #define for_each_sd_mode_by_pref(caps, mwt) \
1721 for (mwt = sd_modes_by_pref;\
1722 mwt < sd_modes_by_pref + ARRAY_SIZE(sd_modes_by_pref);\
1724 if (caps & MMC_CAP(mwt->mode))
1726 static int sd_select_mode_and_width(struct mmc *mmc, uint card_caps)
1729 uint widths[] = {MMC_MODE_4BIT, MMC_MODE_1BIT};
1730 const struct mode_width_tuning *mwt;
1731 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
1732 bool uhs_en = (mmc->ocr & OCR_S18R) ? true : false;
1734 bool uhs_en = false;
1739 mmc_dump_capabilities("sd card", card_caps);
1740 mmc_dump_capabilities("host", mmc->host_caps);
1743 if (mmc_host_is_spi(mmc)) {
1744 mmc_set_bus_width(mmc, 1);
1745 mmc_select_mode(mmc, MMC_LEGACY);
1746 mmc_set_clock(mmc, mmc->tran_speed, MMC_CLK_ENABLE);
1750 /* Restrict card's capabilities by what the host can do */
1751 caps = card_caps & mmc->host_caps;
1756 for_each_sd_mode_by_pref(caps, mwt) {
1759 for (w = widths; w < widths + ARRAY_SIZE(widths); w++) {
1760 if (*w & caps & mwt->widths) {
1761 pr_debug("trying mode %s width %d (at %d MHz)\n",
1762 mmc_mode_name(mwt->mode),
1764 mmc_mode2freq(mmc, mwt->mode) / 1000000);
1766 /* configure the bus width (card + host) */
1767 err = sd_select_bus_width(mmc, bus_width(*w));
1770 mmc_set_bus_width(mmc, bus_width(*w));
1772 /* configure the bus mode (card) */
1773 err = sd_set_card_speed(mmc, mwt->mode);
1777 /* configure the bus mode (host) */
1778 mmc_select_mode(mmc, mwt->mode);
1779 mmc_set_clock(mmc, mmc->tran_speed,
1782 #ifdef MMC_SUPPORTS_TUNING
1783 /* execute tuning if needed */
1784 if (mwt->tuning && !mmc_host_is_spi(mmc)) {
1785 err = mmc_execute_tuning(mmc,
1788 pr_debug("tuning failed\n");
1794 #if CONFIG_IS_ENABLED(MMC_WRITE)
1795 err = sd_read_ssr(mmc);
1797 pr_warn("unable to read ssr\n");
1803 /* revert to a safer bus speed */
1804 mmc_select_mode(mmc, MMC_LEGACY);
1805 mmc_set_clock(mmc, mmc->tran_speed,
1811 pr_err("unable to select a mode\n");
1816 * read the compare the part of ext csd that is constant.
1817 * This can be used to check that the transfer is working
1820 static int mmc_read_and_compare_ext_csd(struct mmc *mmc)
1823 const u8 *ext_csd = mmc->ext_csd;
1824 ALLOC_CACHE_ALIGN_BUFFER(u8, test_csd, MMC_MAX_BLOCK_LEN);
1826 if (mmc->version < MMC_VERSION_4)
1829 err = mmc_send_ext_csd(mmc, test_csd);
1833 /* Only compare read only fields */
1834 if (ext_csd[EXT_CSD_PARTITIONING_SUPPORT]
1835 == test_csd[EXT_CSD_PARTITIONING_SUPPORT] &&
1836 ext_csd[EXT_CSD_HC_WP_GRP_SIZE]
1837 == test_csd[EXT_CSD_HC_WP_GRP_SIZE] &&
1838 ext_csd[EXT_CSD_REV]
1839 == test_csd[EXT_CSD_REV] &&
1840 ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE]
1841 == test_csd[EXT_CSD_HC_ERASE_GRP_SIZE] &&
1842 memcmp(&ext_csd[EXT_CSD_SEC_CNT],
1843 &test_csd[EXT_CSD_SEC_CNT], 4) == 0)
1849 #if CONFIG_IS_ENABLED(MMC_IO_VOLTAGE)
1850 static int mmc_set_lowest_voltage(struct mmc *mmc, enum bus_mode mode,
1851 uint32_t allowed_mask)
1859 if (mmc->cardtype & (EXT_CSD_CARD_TYPE_HS200_1_8V |
1860 EXT_CSD_CARD_TYPE_HS400_1_8V))
1861 card_mask |= MMC_SIGNAL_VOLTAGE_180;
1862 if (mmc->cardtype & (EXT_CSD_CARD_TYPE_HS200_1_2V |
1863 EXT_CSD_CARD_TYPE_HS400_1_2V))
1864 card_mask |= MMC_SIGNAL_VOLTAGE_120;
1867 if (mmc->cardtype & EXT_CSD_CARD_TYPE_DDR_1_8V)
1868 card_mask |= MMC_SIGNAL_VOLTAGE_330 |
1869 MMC_SIGNAL_VOLTAGE_180;
1870 if (mmc->cardtype & EXT_CSD_CARD_TYPE_DDR_1_2V)
1871 card_mask |= MMC_SIGNAL_VOLTAGE_120;
1874 card_mask |= MMC_SIGNAL_VOLTAGE_330;
1878 while (card_mask & allowed_mask) {
1879 enum mmc_voltage best_match;
1881 best_match = 1 << (ffs(card_mask & allowed_mask) - 1);
1882 if (!mmc_set_signal_voltage(mmc, best_match))
1885 allowed_mask &= ~best_match;
1891 static inline int mmc_set_lowest_voltage(struct mmc *mmc, enum bus_mode mode,
1892 uint32_t allowed_mask)
1898 static const struct mode_width_tuning mmc_modes_by_pref[] = {
1899 #if CONFIG_IS_ENABLED(MMC_HS400_ES_SUPPORT)
1901 .mode = MMC_HS_400_ES,
1902 .widths = MMC_MODE_8BIT,
1905 #if CONFIG_IS_ENABLED(MMC_HS400_SUPPORT)
1908 .widths = MMC_MODE_8BIT,
1909 .tuning = MMC_CMD_SEND_TUNING_BLOCK_HS200
1912 #if CONFIG_IS_ENABLED(MMC_HS200_SUPPORT)
1915 .widths = MMC_MODE_8BIT | MMC_MODE_4BIT,
1916 .tuning = MMC_CMD_SEND_TUNING_BLOCK_HS200
1921 .widths = MMC_MODE_8BIT | MMC_MODE_4BIT,
1925 .widths = MMC_MODE_8BIT | MMC_MODE_4BIT | MMC_MODE_1BIT,
1929 .widths = MMC_MODE_8BIT | MMC_MODE_4BIT | MMC_MODE_1BIT,
1933 .widths = MMC_MODE_8BIT | MMC_MODE_4BIT | MMC_MODE_1BIT,
1937 #define for_each_mmc_mode_by_pref(caps, mwt) \
1938 for (mwt = mmc_modes_by_pref;\
1939 mwt < mmc_modes_by_pref + ARRAY_SIZE(mmc_modes_by_pref);\
1941 if (caps & MMC_CAP(mwt->mode))
1943 static const struct ext_csd_bus_width {
1947 } ext_csd_bus_width[] = {
1948 {MMC_MODE_8BIT, true, EXT_CSD_DDR_BUS_WIDTH_8},
1949 {MMC_MODE_4BIT, true, EXT_CSD_DDR_BUS_WIDTH_4},
1950 {MMC_MODE_8BIT, false, EXT_CSD_BUS_WIDTH_8},
1951 {MMC_MODE_4BIT, false, EXT_CSD_BUS_WIDTH_4},
1952 {MMC_MODE_1BIT, false, EXT_CSD_BUS_WIDTH_1},
1955 #if CONFIG_IS_ENABLED(MMC_HS400_SUPPORT)
1956 static int mmc_select_hs400(struct mmc *mmc)
1960 /* Set timing to HS200 for tuning */
1961 err = mmc_set_card_speed(mmc, MMC_HS_200, false);
1965 /* configure the bus mode (host) */
1966 mmc_select_mode(mmc, MMC_HS_200);
1967 mmc_set_clock(mmc, mmc->tran_speed, false);
1969 /* execute tuning if needed */
1970 err = mmc_execute_tuning(mmc, MMC_CMD_SEND_TUNING_BLOCK_HS200);
1972 debug("tuning failed\n");
1976 /* Set back to HS */
1977 mmc_set_card_speed(mmc, MMC_HS, true);
1979 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BUS_WIDTH,
1980 EXT_CSD_BUS_WIDTH_8 | EXT_CSD_DDR_FLAG);
1984 err = mmc_set_card_speed(mmc, MMC_HS_400, false);
1988 mmc_select_mode(mmc, MMC_HS_400);
1989 err = mmc_set_clock(mmc, mmc->tran_speed, false);
1996 static int mmc_select_hs400(struct mmc *mmc)
2002 #if CONFIG_IS_ENABLED(MMC_HS400_ES_SUPPORT)
2003 #if !CONFIG_IS_ENABLED(DM_MMC)
2004 static int mmc_set_enhanced_strobe(struct mmc *mmc)
2009 static int mmc_select_hs400es(struct mmc *mmc)
2013 err = mmc_set_card_speed(mmc, MMC_HS, true);
2017 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BUS_WIDTH,
2018 EXT_CSD_BUS_WIDTH_8 | EXT_CSD_DDR_FLAG |
2019 EXT_CSD_BUS_WIDTH_STROBE);
2021 printf("switch to bus width for hs400 failed\n");
2024 /* TODO: driver strength */
2025 err = mmc_set_card_speed(mmc, MMC_HS_400_ES, false);
2029 mmc_select_mode(mmc, MMC_HS_400_ES);
2030 err = mmc_set_clock(mmc, mmc->tran_speed, false);
2034 return mmc_set_enhanced_strobe(mmc);
2037 static int mmc_select_hs400es(struct mmc *mmc)
2043 #define for_each_supported_width(caps, ddr, ecbv) \
2044 for (ecbv = ext_csd_bus_width;\
2045 ecbv < ext_csd_bus_width + ARRAY_SIZE(ext_csd_bus_width);\
2047 if ((ddr == ecbv->is_ddr) && (caps & ecbv->cap))
2049 static int mmc_select_mode_and_width(struct mmc *mmc, uint card_caps)
2052 const struct mode_width_tuning *mwt;
2053 const struct ext_csd_bus_width *ecbw;
2056 mmc_dump_capabilities("mmc", card_caps);
2057 mmc_dump_capabilities("host", mmc->host_caps);
2060 if (mmc_host_is_spi(mmc)) {
2061 mmc_set_bus_width(mmc, 1);
2062 mmc_select_mode(mmc, MMC_LEGACY);
2063 mmc_set_clock(mmc, mmc->tran_speed, MMC_CLK_ENABLE);
2067 /* Restrict card's capabilities by what the host can do */
2068 card_caps &= mmc->host_caps;
2070 /* Only version 4 of MMC supports wider bus widths */
2071 if (mmc->version < MMC_VERSION_4)
2074 if (!mmc->ext_csd) {
2075 pr_debug("No ext_csd found!\n"); /* this should enver happen */
2079 #if CONFIG_IS_ENABLED(MMC_HS200_SUPPORT) || \
2080 CONFIG_IS_ENABLED(MMC_HS400_SUPPORT)
2082 * In case the eMMC is in HS200/HS400 mode, downgrade to HS mode
2083 * before doing anything else, since a transition from either of
2084 * the HS200/HS400 mode directly to legacy mode is not supported.
2086 if (mmc->selected_mode == MMC_HS_200 ||
2087 mmc->selected_mode == MMC_HS_400)
2088 mmc_set_card_speed(mmc, MMC_HS, true);
2091 mmc_set_clock(mmc, mmc->legacy_speed, MMC_CLK_ENABLE);
2093 for_each_mmc_mode_by_pref(card_caps, mwt) {
2094 for_each_supported_width(card_caps & mwt->widths,
2095 mmc_is_mode_ddr(mwt->mode), ecbw) {
2096 enum mmc_voltage old_voltage;
2097 pr_debug("trying mode %s width %d (at %d MHz)\n",
2098 mmc_mode_name(mwt->mode),
2099 bus_width(ecbw->cap),
2100 mmc_mode2freq(mmc, mwt->mode) / 1000000);
2101 old_voltage = mmc->signal_voltage;
2102 err = mmc_set_lowest_voltage(mmc, mwt->mode,
2103 MMC_ALL_SIGNAL_VOLTAGE);
2107 /* configure the bus width (card + host) */
2108 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
2110 ecbw->ext_csd_bits & ~EXT_CSD_DDR_FLAG);
2113 mmc_set_bus_width(mmc, bus_width(ecbw->cap));
2115 if (mwt->mode == MMC_HS_400) {
2116 err = mmc_select_hs400(mmc);
2118 printf("Select HS400 failed %d\n", err);
2121 } else if (mwt->mode == MMC_HS_400_ES) {
2122 err = mmc_select_hs400es(mmc);
2124 printf("Select HS400ES failed %d\n",
2129 /* configure the bus speed (card) */
2130 err = mmc_set_card_speed(mmc, mwt->mode, false);
2135 * configure the bus width AND the ddr mode
2136 * (card). The host side will be taken care
2137 * of in the next step
2139 if (ecbw->ext_csd_bits & EXT_CSD_DDR_FLAG) {
2140 err = mmc_switch(mmc,
2141 EXT_CSD_CMD_SET_NORMAL,
2143 ecbw->ext_csd_bits);
2148 /* configure the bus mode (host) */
2149 mmc_select_mode(mmc, mwt->mode);
2150 mmc_set_clock(mmc, mmc->tran_speed,
2152 #ifdef MMC_SUPPORTS_TUNING
2154 /* execute tuning if needed */
2156 err = mmc_execute_tuning(mmc,
2159 pr_debug("tuning failed\n");
2166 /* do a transfer to check the configuration */
2167 err = mmc_read_and_compare_ext_csd(mmc);
2171 mmc_set_signal_voltage(mmc, old_voltage);
2172 /* if an error occured, revert to a safer bus mode */
2173 mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
2174 EXT_CSD_BUS_WIDTH, EXT_CSD_BUS_WIDTH_1);
2175 mmc_select_mode(mmc, MMC_LEGACY);
2176 mmc_set_bus_width(mmc, 1);
2180 pr_err("unable to select a mode\n");
2186 #if CONFIG_IS_ENABLED(MMC_TINY)
2187 DEFINE_CACHE_ALIGN_BUFFER(u8, ext_csd_bkup, MMC_MAX_BLOCK_LEN);
2190 static int mmc_startup_v4(struct mmc *mmc)
2194 bool has_parts = false;
2195 bool part_completed;
2196 static const u32 mmc_versions[] = {
2208 #if CONFIG_IS_ENABLED(MMC_TINY)
2209 u8 *ext_csd = ext_csd_bkup;
2211 if (IS_SD(mmc) || mmc->version < MMC_VERSION_4)
2215 memset(ext_csd_bkup, 0, sizeof(ext_csd_bkup));
2217 err = mmc_send_ext_csd(mmc, ext_csd);
2221 /* store the ext csd for future reference */
2223 mmc->ext_csd = ext_csd;
2225 ALLOC_CACHE_ALIGN_BUFFER(u8, ext_csd, MMC_MAX_BLOCK_LEN);
2227 if (IS_SD(mmc) || (mmc->version < MMC_VERSION_4))
2230 /* check ext_csd version and capacity */
2231 err = mmc_send_ext_csd(mmc, ext_csd);
2235 /* store the ext csd for future reference */
2237 mmc->ext_csd = malloc(MMC_MAX_BLOCK_LEN);
2240 memcpy(mmc->ext_csd, ext_csd, MMC_MAX_BLOCK_LEN);
2242 if (ext_csd[EXT_CSD_REV] >= ARRAY_SIZE(mmc_versions))
2245 mmc->version = mmc_versions[ext_csd[EXT_CSD_REV]];
2247 if (mmc->version >= MMC_VERSION_4_2) {
2249 * According to the JEDEC Standard, the value of
2250 * ext_csd's capacity is valid if the value is more
2253 capacity = ext_csd[EXT_CSD_SEC_CNT] << 0
2254 | ext_csd[EXT_CSD_SEC_CNT + 1] << 8
2255 | ext_csd[EXT_CSD_SEC_CNT + 2] << 16
2256 | ext_csd[EXT_CSD_SEC_CNT + 3] << 24;
2257 capacity *= MMC_MAX_BLOCK_LEN;
2258 if ((capacity >> 20) > 2 * 1024)
2259 mmc->capacity_user = capacity;
2262 if (mmc->version >= MMC_VERSION_4_5)
2263 mmc->gen_cmd6_time = ext_csd[EXT_CSD_GENERIC_CMD6_TIME];
2265 /* The partition data may be non-zero but it is only
2266 * effective if PARTITION_SETTING_COMPLETED is set in
2267 * EXT_CSD, so ignore any data if this bit is not set,
2268 * except for enabling the high-capacity group size
2269 * definition (see below).
2271 part_completed = !!(ext_csd[EXT_CSD_PARTITION_SETTING] &
2272 EXT_CSD_PARTITION_SETTING_COMPLETED);
2274 mmc->part_switch_time = ext_csd[EXT_CSD_PART_SWITCH_TIME];
2275 /* Some eMMC set the value too low so set a minimum */
2276 if (mmc->part_switch_time < MMC_MIN_PART_SWITCH_TIME && mmc->part_switch_time)
2277 mmc->part_switch_time = MMC_MIN_PART_SWITCH_TIME;
2279 /* store the partition info of emmc */
2280 mmc->part_support = ext_csd[EXT_CSD_PARTITIONING_SUPPORT];
2281 if ((ext_csd[EXT_CSD_PARTITIONING_SUPPORT] & PART_SUPPORT) ||
2282 ext_csd[EXT_CSD_BOOT_MULT])
2283 mmc->part_config = ext_csd[EXT_CSD_PART_CONF];
2284 if (part_completed &&
2285 (ext_csd[EXT_CSD_PARTITIONING_SUPPORT] & ENHNCD_SUPPORT))
2286 mmc->part_attr = ext_csd[EXT_CSD_PARTITIONS_ATTRIBUTE];
2288 mmc->capacity_boot = ext_csd[EXT_CSD_BOOT_MULT] << 17;
2290 mmc->capacity_rpmb = ext_csd[EXT_CSD_RPMB_MULT] << 17;
2292 for (i = 0; i < 4; i++) {
2293 int idx = EXT_CSD_GP_SIZE_MULT + i * 3;
2294 uint mult = (ext_csd[idx + 2] << 16) +
2295 (ext_csd[idx + 1] << 8) + ext_csd[idx];
2298 if (!part_completed)
2300 mmc->capacity_gp[i] = mult;
2301 mmc->capacity_gp[i] *=
2302 ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE];
2303 mmc->capacity_gp[i] *= ext_csd[EXT_CSD_HC_WP_GRP_SIZE];
2304 mmc->capacity_gp[i] <<= 19;
2307 #ifndef CONFIG_SPL_BUILD
2308 if (part_completed) {
2309 mmc->enh_user_size =
2310 (ext_csd[EXT_CSD_ENH_SIZE_MULT + 2] << 16) +
2311 (ext_csd[EXT_CSD_ENH_SIZE_MULT + 1] << 8) +
2312 ext_csd[EXT_CSD_ENH_SIZE_MULT];
2313 mmc->enh_user_size *= ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE];
2314 mmc->enh_user_size *= ext_csd[EXT_CSD_HC_WP_GRP_SIZE];
2315 mmc->enh_user_size <<= 19;
2316 mmc->enh_user_start =
2317 (ext_csd[EXT_CSD_ENH_START_ADDR + 3] << 24) +
2318 (ext_csd[EXT_CSD_ENH_START_ADDR + 2] << 16) +
2319 (ext_csd[EXT_CSD_ENH_START_ADDR + 1] << 8) +
2320 ext_csd[EXT_CSD_ENH_START_ADDR];
2321 if (mmc->high_capacity)
2322 mmc->enh_user_start <<= 9;
2327 * Host needs to enable ERASE_GRP_DEF bit if device is
2328 * partitioned. This bit will be lost every time after a reset
2329 * or power off. This will affect erase size.
2333 if ((ext_csd[EXT_CSD_PARTITIONING_SUPPORT] & PART_SUPPORT) &&
2334 (ext_csd[EXT_CSD_PARTITIONS_ATTRIBUTE] & PART_ENH_ATTRIB))
2337 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
2338 EXT_CSD_ERASE_GROUP_DEF, 1);
2343 ext_csd[EXT_CSD_ERASE_GROUP_DEF] = 1;
2346 if (ext_csd[EXT_CSD_ERASE_GROUP_DEF] & 0x01) {
2347 #if CONFIG_IS_ENABLED(MMC_WRITE)
2348 /* Read out group size from ext_csd */
2349 mmc->erase_grp_size =
2350 ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE] * 1024;
2353 * if high capacity and partition setting completed
2354 * SEC_COUNT is valid even if it is smaller than 2 GiB
2355 * JEDEC Standard JESD84-B45, 6.2.4
2357 if (mmc->high_capacity && part_completed) {
2358 capacity = (ext_csd[EXT_CSD_SEC_CNT]) |
2359 (ext_csd[EXT_CSD_SEC_CNT + 1] << 8) |
2360 (ext_csd[EXT_CSD_SEC_CNT + 2] << 16) |
2361 (ext_csd[EXT_CSD_SEC_CNT + 3] << 24);
2362 capacity *= MMC_MAX_BLOCK_LEN;
2363 mmc->capacity_user = capacity;
2366 #if CONFIG_IS_ENABLED(MMC_WRITE)
2368 /* Calculate the group size from the csd value. */
2369 int erase_gsz, erase_gmul;
2371 erase_gsz = (mmc->csd[2] & 0x00007c00) >> 10;
2372 erase_gmul = (mmc->csd[2] & 0x000003e0) >> 5;
2373 mmc->erase_grp_size = (erase_gsz + 1)
2377 #if CONFIG_IS_ENABLED(MMC_HW_PARTITIONING)
2378 mmc->hc_wp_grp_size = 1024
2379 * ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE]
2380 * ext_csd[EXT_CSD_HC_WP_GRP_SIZE];
2383 mmc->wr_rel_set = ext_csd[EXT_CSD_WR_REL_SET];
2388 #if !CONFIG_IS_ENABLED(MMC_TINY)
2391 mmc->ext_csd = NULL;
2396 static int mmc_startup(struct mmc *mmc)
2402 struct blk_desc *bdesc;
2404 #ifdef CONFIG_MMC_SPI_CRC_ON
2405 if (mmc_host_is_spi(mmc)) { /* enable CRC check for spi */
2406 cmd.cmdidx = MMC_CMD_SPI_CRC_ON_OFF;
2407 cmd.resp_type = MMC_RSP_R1;
2409 err = mmc_send_cmd(mmc, &cmd, NULL);
2415 /* Put the Card in Identify Mode */
2416 cmd.cmdidx = mmc_host_is_spi(mmc) ? MMC_CMD_SEND_CID :
2417 MMC_CMD_ALL_SEND_CID; /* cmd not supported in spi */
2418 cmd.resp_type = MMC_RSP_R2;
2421 err = mmc_send_cmd(mmc, &cmd, NULL);
2423 #ifdef CONFIG_MMC_QUIRKS
2424 if (err && (mmc->quirks & MMC_QUIRK_RETRY_SEND_CID)) {
2427 * It has been seen that SEND_CID may fail on the first
2428 * attempt, let's try a few more time
2431 err = mmc_send_cmd(mmc, &cmd, NULL);
2434 } while (retries--);
2441 memcpy(mmc->cid, cmd.response, 16);
2444 * For MMC cards, set the Relative Address.
2445 * For SD cards, get the Relatvie Address.
2446 * This also puts the cards into Standby State
2448 if (!mmc_host_is_spi(mmc)) { /* cmd not supported in spi */
2449 cmd.cmdidx = SD_CMD_SEND_RELATIVE_ADDR;
2450 cmd.cmdarg = mmc->rca << 16;
2451 cmd.resp_type = MMC_RSP_R6;
2453 err = mmc_send_cmd(mmc, &cmd, NULL);
2459 mmc->rca = (cmd.response[0] >> 16) & 0xffff;
2462 /* Get the Card-Specific Data */
2463 cmd.cmdidx = MMC_CMD_SEND_CSD;
2464 cmd.resp_type = MMC_RSP_R2;
2465 cmd.cmdarg = mmc->rca << 16;
2467 err = mmc_send_cmd(mmc, &cmd, NULL);
2472 mmc->csd[0] = cmd.response[0];
2473 mmc->csd[1] = cmd.response[1];
2474 mmc->csd[2] = cmd.response[2];
2475 mmc->csd[3] = cmd.response[3];
2477 if (mmc->version == MMC_VERSION_UNKNOWN) {
2478 int version = (cmd.response[0] >> 26) & 0xf;
2482 mmc->version = MMC_VERSION_1_2;
2485 mmc->version = MMC_VERSION_1_4;
2488 mmc->version = MMC_VERSION_2_2;
2491 mmc->version = MMC_VERSION_3;
2494 mmc->version = MMC_VERSION_4;
2497 mmc->version = MMC_VERSION_1_2;
2502 /* divide frequency by 10, since the mults are 10x bigger */
2503 freq = fbase[(cmd.response[0] & 0x7)];
2504 mult = multipliers[((cmd.response[0] >> 3) & 0xf)];
2506 mmc->legacy_speed = freq * mult;
2507 mmc_select_mode(mmc, MMC_LEGACY);
2509 mmc->dsr_imp = ((cmd.response[1] >> 12) & 0x1);
2510 mmc->read_bl_len = 1 << ((cmd.response[1] >> 16) & 0xf);
2511 #if CONFIG_IS_ENABLED(MMC_WRITE)
2514 mmc->write_bl_len = mmc->read_bl_len;
2516 mmc->write_bl_len = 1 << ((cmd.response[3] >> 22) & 0xf);
2519 if (mmc->high_capacity) {
2520 csize = (mmc->csd[1] & 0x3f) << 16
2521 | (mmc->csd[2] & 0xffff0000) >> 16;
2524 csize = (mmc->csd[1] & 0x3ff) << 2
2525 | (mmc->csd[2] & 0xc0000000) >> 30;
2526 cmult = (mmc->csd[2] & 0x00038000) >> 15;
2529 mmc->capacity_user = (csize + 1) << (cmult + 2);
2530 mmc->capacity_user *= mmc->read_bl_len;
2531 mmc->capacity_boot = 0;
2532 mmc->capacity_rpmb = 0;
2533 for (i = 0; i < 4; i++)
2534 mmc->capacity_gp[i] = 0;
2536 if (mmc->read_bl_len > MMC_MAX_BLOCK_LEN)
2537 mmc->read_bl_len = MMC_MAX_BLOCK_LEN;
2539 #if CONFIG_IS_ENABLED(MMC_WRITE)
2540 if (mmc->write_bl_len > MMC_MAX_BLOCK_LEN)
2541 mmc->write_bl_len = MMC_MAX_BLOCK_LEN;
2544 if ((mmc->dsr_imp) && (0xffffffff != mmc->dsr)) {
2545 cmd.cmdidx = MMC_CMD_SET_DSR;
2546 cmd.cmdarg = (mmc->dsr & 0xffff) << 16;
2547 cmd.resp_type = MMC_RSP_NONE;
2548 if (mmc_send_cmd(mmc, &cmd, NULL))
2549 pr_warn("MMC: SET_DSR failed\n");
2552 /* Select the card, and put it into Transfer Mode */
2553 if (!mmc_host_is_spi(mmc)) { /* cmd not supported in spi */
2554 cmd.cmdidx = MMC_CMD_SELECT_CARD;
2555 cmd.resp_type = MMC_RSP_R1;
2556 cmd.cmdarg = mmc->rca << 16;
2557 err = mmc_send_cmd(mmc, &cmd, NULL);
2564 * For SD, its erase group is always one sector
2566 #if CONFIG_IS_ENABLED(MMC_WRITE)
2567 mmc->erase_grp_size = 1;
2569 mmc->part_config = MMCPART_NOAVAILABLE;
2571 err = mmc_startup_v4(mmc);
2575 err = mmc_set_capacity(mmc, mmc_get_blk_desc(mmc)->hwpart);
2579 #if CONFIG_IS_ENABLED(MMC_TINY)
2580 mmc_set_clock(mmc, mmc->legacy_speed, false);
2581 mmc_select_mode(mmc, MMC_LEGACY);
2582 mmc_set_bus_width(mmc, 1);
2585 err = sd_get_capabilities(mmc);
2588 err = sd_select_mode_and_width(mmc, mmc->card_caps);
2590 err = mmc_get_capabilities(mmc);
2593 err = mmc_select_mode_and_width(mmc, mmc->card_caps);
2599 mmc->best_mode = mmc->selected_mode;
2601 /* Fix the block length for DDR mode */
2602 if (mmc->ddr_mode) {
2603 mmc->read_bl_len = MMC_MAX_BLOCK_LEN;
2604 #if CONFIG_IS_ENABLED(MMC_WRITE)
2605 mmc->write_bl_len = MMC_MAX_BLOCK_LEN;
2609 /* fill in device description */
2610 bdesc = mmc_get_blk_desc(mmc);
2614 bdesc->blksz = mmc->read_bl_len;
2615 bdesc->log2blksz = LOG2(bdesc->blksz);
2616 bdesc->lba = lldiv(mmc->capacity, mmc->read_bl_len);
2617 #if !defined(CONFIG_SPL_BUILD) || \
2618 (defined(CONFIG_SPL_LIBCOMMON_SUPPORT) && \
2619 !CONFIG_IS_ENABLED(USE_TINY_PRINTF))
2620 sprintf(bdesc->vendor, "Man %06x Snr %04x%04x",
2621 mmc->cid[0] >> 24, (mmc->cid[2] & 0xffff),
2622 (mmc->cid[3] >> 16) & 0xffff);
2623 sprintf(bdesc->product, "%c%c%c%c%c%c", mmc->cid[0] & 0xff,
2624 (mmc->cid[1] >> 24), (mmc->cid[1] >> 16) & 0xff,
2625 (mmc->cid[1] >> 8) & 0xff, mmc->cid[1] & 0xff,
2626 (mmc->cid[2] >> 24) & 0xff);
2627 sprintf(bdesc->revision, "%d.%d", (mmc->cid[2] >> 20) & 0xf,
2628 (mmc->cid[2] >> 16) & 0xf);
2630 bdesc->vendor[0] = 0;
2631 bdesc->product[0] = 0;
2632 bdesc->revision[0] = 0;
2635 #if !defined(CONFIG_DM_MMC) && (!defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBDISK_SUPPORT))
2642 static int mmc_send_if_cond(struct mmc *mmc)
2647 cmd.cmdidx = SD_CMD_SEND_IF_COND;
2648 /* We set the bit if the host supports voltages between 2.7 and 3.6 V */
2649 cmd.cmdarg = ((mmc->cfg->voltages & 0xff8000) != 0) << 8 | 0xaa;
2650 cmd.resp_type = MMC_RSP_R7;
2652 err = mmc_send_cmd(mmc, &cmd, NULL);
2657 if ((cmd.response[0] & 0xff) != 0xaa)
2660 mmc->version = SD_VERSION_2;
2665 #if !CONFIG_IS_ENABLED(DM_MMC)
2666 /* board-specific MMC power initializations. */
2667 __weak void board_mmc_power_init(void)
2672 static int mmc_power_init(struct mmc *mmc)
2674 #if CONFIG_IS_ENABLED(DM_MMC)
2675 #if CONFIG_IS_ENABLED(DM_REGULATOR)
2678 ret = device_get_supply_regulator(mmc->dev, "vmmc-supply",
2681 pr_debug("%s: No vmmc supply\n", mmc->dev->name);
2683 ret = device_get_supply_regulator(mmc->dev, "vqmmc-supply",
2684 &mmc->vqmmc_supply);
2686 pr_debug("%s: No vqmmc supply\n", mmc->dev->name);
2688 #else /* !CONFIG_DM_MMC */
2690 * Driver model should use a regulator, as above, rather than calling
2691 * out to board code.
2693 board_mmc_power_init();
2699 * put the host in the initial state:
2700 * - turn on Vdd (card power supply)
2701 * - configure the bus width and clock to minimal values
2703 static void mmc_set_initial_state(struct mmc *mmc)
2707 /* First try to set 3.3V. If it fails set to 1.8V */
2708 err = mmc_set_signal_voltage(mmc, MMC_SIGNAL_VOLTAGE_330);
2710 err = mmc_set_signal_voltage(mmc, MMC_SIGNAL_VOLTAGE_180);
2712 pr_warn("mmc: failed to set signal voltage\n");
2714 mmc_select_mode(mmc, MMC_LEGACY);
2715 mmc_set_bus_width(mmc, 1);
2716 mmc_set_clock(mmc, 0, MMC_CLK_ENABLE);
2719 static int mmc_power_on(struct mmc *mmc)
2721 #if CONFIG_IS_ENABLED(DM_MMC) && CONFIG_IS_ENABLED(DM_REGULATOR)
2722 if (mmc->vmmc_supply) {
2723 int ret = regulator_set_enable(mmc->vmmc_supply, true);
2726 puts("Error enabling VMMC supply\n");
2734 static int mmc_power_off(struct mmc *mmc)
2736 mmc_set_clock(mmc, 0, MMC_CLK_DISABLE);
2737 #if CONFIG_IS_ENABLED(DM_MMC) && CONFIG_IS_ENABLED(DM_REGULATOR)
2738 if (mmc->vmmc_supply) {
2739 int ret = regulator_set_enable(mmc->vmmc_supply, false);
2742 pr_debug("Error disabling VMMC supply\n");
2750 static int mmc_power_cycle(struct mmc *mmc)
2754 ret = mmc_power_off(mmc);
2758 ret = mmc_host_power_cycle(mmc);
2763 * SD spec recommends at least 1ms of delay. Let's wait for 2ms
2764 * to be on the safer side.
2767 return mmc_power_on(mmc);
2770 int mmc_get_op_cond(struct mmc *mmc)
2772 bool uhs_en = supports_uhs(mmc->cfg->host_caps);
2778 #ifdef CONFIG_FSL_ESDHC_ADAPTER_IDENT
2779 mmc_adapter_card_type_ident();
2781 err = mmc_power_init(mmc);
2785 #ifdef CONFIG_MMC_QUIRKS
2786 mmc->quirks = MMC_QUIRK_RETRY_SET_BLOCKLEN |
2787 MMC_QUIRK_RETRY_SEND_CID |
2788 MMC_QUIRK_RETRY_APP_CMD;
2791 err = mmc_power_cycle(mmc);
2794 * if power cycling is not supported, we should not try
2795 * to use the UHS modes, because we wouldn't be able to
2796 * recover from an error during the UHS initialization.
2798 pr_debug("Unable to do a full power cycle. Disabling the UHS modes for safety\n");
2800 mmc->host_caps &= ~UHS_CAPS;
2801 err = mmc_power_on(mmc);
2806 #if CONFIG_IS_ENABLED(DM_MMC)
2807 /* The device has already been probed ready for use */
2809 /* made sure it's not NULL earlier */
2810 err = mmc->cfg->ops->init(mmc);
2817 mmc_set_initial_state(mmc);
2819 /* Reset the Card */
2820 err = mmc_go_idle(mmc);
2825 /* The internal partition reset to user partition(0) at every CMD0*/
2826 mmc_get_blk_desc(mmc)->hwpart = 0;
2828 /* Test for SD version 2 */
2829 err = mmc_send_if_cond(mmc);
2831 /* Now try to get the SD card's operating condition */
2832 err = sd_send_op_cond(mmc, uhs_en);
2833 if (err && uhs_en) {
2835 mmc_power_cycle(mmc);
2839 /* If the command timed out, we check for an MMC card */
2840 if (err == -ETIMEDOUT) {
2841 err = mmc_send_op_cond(mmc);
2844 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
2845 pr_err("Card did not respond to voltage select!\n");
2854 int mmc_start_init(struct mmc *mmc)
2860 * all hosts are capable of 1 bit bus-width and able to use the legacy
2863 mmc->host_caps = mmc->cfg->host_caps | MMC_CAP(MMC_LEGACY) |
2864 MMC_CAP(MMC_LEGACY) | MMC_MODE_1BIT;
2865 #if CONFIG_IS_ENABLED(DM_MMC)
2866 mmc_deferred_probe(mmc);
2868 #if !defined(CONFIG_MMC_BROKEN_CD)
2869 no_card = mmc_getcd(mmc) == 0;
2873 #if !CONFIG_IS_ENABLED(DM_MMC)
2874 /* we pretend there's no card when init is NULL */
2875 no_card = no_card || (mmc->cfg->ops->init == NULL);
2879 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
2880 pr_err("MMC: no card present\n");
2885 err = mmc_get_op_cond(mmc);
2888 mmc->init_in_progress = 1;
2893 static int mmc_complete_init(struct mmc *mmc)
2897 mmc->init_in_progress = 0;
2898 if (mmc->op_cond_pending)
2899 err = mmc_complete_op_cond(mmc);
2902 err = mmc_startup(mmc);
2910 int mmc_init(struct mmc *mmc)
2913 __maybe_unused ulong start;
2914 #if CONFIG_IS_ENABLED(DM_MMC)
2915 struct mmc_uclass_priv *upriv = dev_get_uclass_priv(mmc->dev);
2922 start = get_timer(0);
2924 if (!mmc->init_in_progress)
2925 err = mmc_start_init(mmc);
2928 err = mmc_complete_init(mmc);
2930 pr_info("%s: %d, time %lu\n", __func__, err, get_timer(start));
2935 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT) || \
2936 CONFIG_IS_ENABLED(MMC_HS200_SUPPORT) || \
2937 CONFIG_IS_ENABLED(MMC_HS400_SUPPORT)
2938 int mmc_deinit(struct mmc *mmc)
2946 caps_filtered = mmc->card_caps &
2947 ~(MMC_CAP(UHS_SDR12) | MMC_CAP(UHS_SDR25) |
2948 MMC_CAP(UHS_SDR50) | MMC_CAP(UHS_DDR50) |
2949 MMC_CAP(UHS_SDR104));
2951 return sd_select_mode_and_width(mmc, caps_filtered);
2953 caps_filtered = mmc->card_caps &
2954 ~(MMC_CAP(MMC_HS_200) | MMC_CAP(MMC_HS_400));
2956 return mmc_select_mode_and_width(mmc, caps_filtered);
2961 int mmc_set_dsr(struct mmc *mmc, u16 val)
2967 /* CPU-specific MMC initializations */
2968 __weak int cpu_mmc_init(bd_t *bis)
2973 /* board-specific MMC initializations. */
2974 __weak int board_mmc_init(bd_t *bis)
2979 void mmc_set_preinit(struct mmc *mmc, int preinit)
2981 mmc->preinit = preinit;
2984 #if CONFIG_IS_ENABLED(DM_MMC)
2985 static int mmc_probe(bd_t *bis)
2989 struct udevice *dev;
2991 ret = uclass_get(UCLASS_MMC, &uc);
2996 * Try to add them in sequence order. Really with driver model we
2997 * should allow holes, but the current MMC list does not allow that.
2998 * So if we request 0, 1, 3 we will get 0, 1, 2.
3000 for (i = 0; ; i++) {
3001 ret = uclass_get_device_by_seq(UCLASS_MMC, i, &dev);
3005 uclass_foreach_dev(dev, uc) {
3006 ret = device_probe(dev);
3008 pr_err("%s - probe failed: %d\n", dev->name, ret);
3014 static int mmc_probe(bd_t *bis)
3016 if (board_mmc_init(bis) < 0)
3023 int mmc_initialize(bd_t *bis)
3025 static int initialized = 0;
3027 if (initialized) /* Avoid initializing mmc multiple times */
3031 #if !CONFIG_IS_ENABLED(BLK)
3032 #if !CONFIG_IS_ENABLED(MMC_TINY)
3036 ret = mmc_probe(bis);
3040 #ifndef CONFIG_SPL_BUILD
3041 print_mmc_devices(',');
3048 #if CONFIG_IS_ENABLED(DM_MMC)
3049 int mmc_init_device(int num)
3051 struct udevice *dev;
3055 ret = uclass_get_device(UCLASS_MMC, num, &dev);
3059 m = mmc_get_mmc_dev(dev);
3062 #ifdef CONFIG_FSL_ESDHC_ADAPTER_IDENT
3063 mmc_set_preinit(m, 1);
3072 #ifdef CONFIG_CMD_BKOPS_ENABLE
3073 int mmc_set_bkops_enable(struct mmc *mmc)
3076 ALLOC_CACHE_ALIGN_BUFFER(u8, ext_csd, MMC_MAX_BLOCK_LEN);
3078 err = mmc_send_ext_csd(mmc, ext_csd);
3080 puts("Could not get ext_csd register values\n");
3084 if (!(ext_csd[EXT_CSD_BKOPS_SUPPORT] & 0x1)) {
3085 puts("Background operations not supported on device\n");
3086 return -EMEDIUMTYPE;
3089 if (ext_csd[EXT_CSD_BKOPS_EN] & 0x1) {
3090 puts("Background operations already enabled\n");
3094 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BKOPS_EN, 1);
3096 puts("Failed to enable manual background operations\n");
3100 puts("Enabled manual background operations\n");