1 // SPDX-License-Identifier: GPL-2.0+
3 * Copyright 2008, Freescale Semiconductor, Inc
6 * Based vaguely on the Linux code
13 #include <dm/device-internal.h>
17 #include <power/regulator.h>
20 #include <linux/list.h>
22 #include "mmc_private.h"
24 #define DEFAULT_CMD6_TIMEOUT_MS 500
26 static int mmc_set_signal_voltage(struct mmc *mmc, uint signal_voltage);
28 #if !CONFIG_IS_ENABLED(DM_MMC)
30 static int mmc_wait_dat0(struct mmc *mmc, int state, int timeout_us)
35 __weak int board_mmc_getwp(struct mmc *mmc)
40 int mmc_getwp(struct mmc *mmc)
44 wp = board_mmc_getwp(mmc);
47 if (mmc->cfg->ops->getwp)
48 wp = mmc->cfg->ops->getwp(mmc);
56 __weak int board_mmc_getcd(struct mmc *mmc)
62 #ifdef CONFIG_MMC_TRACE
63 void mmmc_trace_before_send(struct mmc *mmc, struct mmc_cmd *cmd)
65 printf("CMD_SEND:%d\n", cmd->cmdidx);
66 printf("\t\tARG\t\t\t 0x%08x\n", cmd->cmdarg);
69 void mmmc_trace_after_send(struct mmc *mmc, struct mmc_cmd *cmd, int ret)
75 printf("\t\tRET\t\t\t %d\n", ret);
77 switch (cmd->resp_type) {
79 printf("\t\tMMC_RSP_NONE\n");
82 printf("\t\tMMC_RSP_R1,5,6,7 \t 0x%08x \n",
86 printf("\t\tMMC_RSP_R1b\t\t 0x%08x \n",
90 printf("\t\tMMC_RSP_R2\t\t 0x%08x \n",
92 printf("\t\t \t\t 0x%08x \n",
94 printf("\t\t \t\t 0x%08x \n",
96 printf("\t\t \t\t 0x%08x \n",
99 printf("\t\t\t\t\tDUMPING DATA\n");
100 for (i = 0; i < 4; i++) {
102 printf("\t\t\t\t\t%03d - ", i*4);
103 ptr = (u8 *)&cmd->response[i];
105 for (j = 0; j < 4; j++)
106 printf("%02x ", *ptr--);
111 printf("\t\tMMC_RSP_R3,4\t\t 0x%08x \n",
115 printf("\t\tERROR MMC rsp not supported\n");
121 void mmc_trace_state(struct mmc *mmc, struct mmc_cmd *cmd)
125 status = (cmd->response[0] & MMC_STATUS_CURR_STATE) >> 9;
126 printf("CURR STATE:%d\n", status);
130 #if CONFIG_IS_ENABLED(MMC_VERBOSE) || defined(DEBUG)
131 const char *mmc_mode_name(enum bus_mode mode)
133 static const char *const names[] = {
134 [MMC_LEGACY] = "MMC legacy",
135 [SD_LEGACY] = "SD Legacy",
136 [MMC_HS] = "MMC High Speed (26MHz)",
137 [SD_HS] = "SD High Speed (50MHz)",
138 [UHS_SDR12] = "UHS SDR12 (25MHz)",
139 [UHS_SDR25] = "UHS SDR25 (50MHz)",
140 [UHS_SDR50] = "UHS SDR50 (100MHz)",
141 [UHS_SDR104] = "UHS SDR104 (208MHz)",
142 [UHS_DDR50] = "UHS DDR50 (50MHz)",
143 [MMC_HS_52] = "MMC High Speed (52MHz)",
144 [MMC_DDR_52] = "MMC DDR52 (52MHz)",
145 [MMC_HS_200] = "HS200 (200MHz)",
146 [MMC_HS_400] = "HS400 (200MHz)",
147 [MMC_HS_400_ES] = "HS400ES (200MHz)",
150 if (mode >= MMC_MODES_END)
151 return "Unknown mode";
157 static uint mmc_mode2freq(struct mmc *mmc, enum bus_mode mode)
159 static const int freqs[] = {
160 [MMC_LEGACY] = 25000000,
161 [SD_LEGACY] = 25000000,
164 [MMC_HS_52] = 52000000,
165 [MMC_DDR_52] = 52000000,
166 [UHS_SDR12] = 25000000,
167 [UHS_SDR25] = 50000000,
168 [UHS_SDR50] = 100000000,
169 [UHS_DDR50] = 50000000,
170 [UHS_SDR104] = 208000000,
171 [MMC_HS_200] = 200000000,
172 [MMC_HS_400] = 200000000,
173 [MMC_HS_400_ES] = 200000000,
176 if (mode == MMC_LEGACY)
177 return mmc->legacy_speed;
178 else if (mode >= MMC_MODES_END)
184 static int mmc_select_mode(struct mmc *mmc, enum bus_mode mode)
186 mmc->selected_mode = mode;
187 mmc->tran_speed = mmc_mode2freq(mmc, mode);
188 mmc->ddr_mode = mmc_is_mode_ddr(mode);
189 pr_debug("selecting mode %s (freq : %d MHz)\n", mmc_mode_name(mode),
190 mmc->tran_speed / 1000000);
194 #if !CONFIG_IS_ENABLED(DM_MMC)
195 int mmc_send_cmd(struct mmc *mmc, struct mmc_cmd *cmd, struct mmc_data *data)
199 mmmc_trace_before_send(mmc, cmd);
200 ret = mmc->cfg->ops->send_cmd(mmc, cmd, data);
201 mmmc_trace_after_send(mmc, cmd, ret);
207 int mmc_send_status(struct mmc *mmc, unsigned int *status)
210 int err, retries = 5;
212 cmd.cmdidx = MMC_CMD_SEND_STATUS;
213 cmd.resp_type = MMC_RSP_R1;
214 if (!mmc_host_is_spi(mmc))
215 cmd.cmdarg = mmc->rca << 16;
218 err = mmc_send_cmd(mmc, &cmd, NULL);
220 mmc_trace_state(mmc, &cmd);
221 *status = cmd.response[0];
225 mmc_trace_state(mmc, &cmd);
229 int mmc_poll_for_busy(struct mmc *mmc, int timeout_ms)
234 err = mmc_wait_dat0(mmc, 1, timeout_ms * 1000);
239 err = mmc_send_status(mmc, &status);
243 if ((status & MMC_STATUS_RDY_FOR_DATA) &&
244 (status & MMC_STATUS_CURR_STATE) !=
248 if (status & MMC_STATUS_MASK) {
249 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
250 pr_err("Status Error: 0x%08x\n", status);
255 if (timeout_ms-- <= 0)
261 if (timeout_ms <= 0) {
262 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
263 pr_err("Timeout waiting card ready\n");
271 int mmc_set_blocklen(struct mmc *mmc, int len)
279 cmd.cmdidx = MMC_CMD_SET_BLOCKLEN;
280 cmd.resp_type = MMC_RSP_R1;
283 err = mmc_send_cmd(mmc, &cmd, NULL);
285 #ifdef CONFIG_MMC_QUIRKS
286 if (err && (mmc->quirks & MMC_QUIRK_RETRY_SET_BLOCKLEN)) {
289 * It has been seen that SET_BLOCKLEN may fail on the first
290 * attempt, let's try a few more time
293 err = mmc_send_cmd(mmc, &cmd, NULL);
303 #ifdef MMC_SUPPORTS_TUNING
304 static const u8 tuning_blk_pattern_4bit[] = {
305 0xff, 0x0f, 0xff, 0x00, 0xff, 0xcc, 0xc3, 0xcc,
306 0xc3, 0x3c, 0xcc, 0xff, 0xfe, 0xff, 0xfe, 0xef,
307 0xff, 0xdf, 0xff, 0xdd, 0xff, 0xfb, 0xff, 0xfb,
308 0xbf, 0xff, 0x7f, 0xff, 0x77, 0xf7, 0xbd, 0xef,
309 0xff, 0xf0, 0xff, 0xf0, 0x0f, 0xfc, 0xcc, 0x3c,
310 0xcc, 0x33, 0xcc, 0xcf, 0xff, 0xef, 0xff, 0xee,
311 0xff, 0xfd, 0xff, 0xfd, 0xdf, 0xff, 0xbf, 0xff,
312 0xbb, 0xff, 0xf7, 0xff, 0xf7, 0x7f, 0x7b, 0xde,
315 static const u8 tuning_blk_pattern_8bit[] = {
316 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00, 0x00,
317 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc, 0xcc,
318 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff, 0xff,
319 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee, 0xff,
320 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd, 0xdd,
321 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff, 0xbb,
322 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff, 0xff,
323 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee, 0xff,
324 0xff, 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00,
325 0x00, 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc,
326 0xcc, 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff,
327 0xff, 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee,
328 0xff, 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd,
329 0xdd, 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff,
330 0xbb, 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff,
331 0xff, 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee,
334 int mmc_send_tuning(struct mmc *mmc, u32 opcode, int *cmd_error)
337 struct mmc_data data;
338 const u8 *tuning_block_pattern;
341 if (mmc->bus_width == 8) {
342 tuning_block_pattern = tuning_blk_pattern_8bit;
343 size = sizeof(tuning_blk_pattern_8bit);
344 } else if (mmc->bus_width == 4) {
345 tuning_block_pattern = tuning_blk_pattern_4bit;
346 size = sizeof(tuning_blk_pattern_4bit);
351 ALLOC_CACHE_ALIGN_BUFFER(u8, data_buf, size);
355 cmd.resp_type = MMC_RSP_R1;
357 data.dest = (void *)data_buf;
359 data.blocksize = size;
360 data.flags = MMC_DATA_READ;
362 err = mmc_send_cmd(mmc, &cmd, &data);
366 if (memcmp(data_buf, tuning_block_pattern, size))
373 static int mmc_read_blocks(struct mmc *mmc, void *dst, lbaint_t start,
377 struct mmc_data data;
380 cmd.cmdidx = MMC_CMD_READ_MULTIPLE_BLOCK;
382 cmd.cmdidx = MMC_CMD_READ_SINGLE_BLOCK;
384 if (mmc->high_capacity)
387 cmd.cmdarg = start * mmc->read_bl_len;
389 cmd.resp_type = MMC_RSP_R1;
392 data.blocks = blkcnt;
393 data.blocksize = mmc->read_bl_len;
394 data.flags = MMC_DATA_READ;
396 if (mmc_send_cmd(mmc, &cmd, &data))
400 cmd.cmdidx = MMC_CMD_STOP_TRANSMISSION;
402 cmd.resp_type = MMC_RSP_R1b;
403 if (mmc_send_cmd(mmc, &cmd, NULL)) {
404 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
405 pr_err("mmc fail to send stop cmd\n");
414 #if CONFIG_IS_ENABLED(BLK)
415 ulong mmc_bread(struct udevice *dev, lbaint_t start, lbaint_t blkcnt, void *dst)
417 ulong mmc_bread(struct blk_desc *block_dev, lbaint_t start, lbaint_t blkcnt,
421 #if CONFIG_IS_ENABLED(BLK)
422 struct blk_desc *block_dev = dev_get_uclass_platdata(dev);
424 int dev_num = block_dev->devnum;
426 lbaint_t cur, blocks_todo = blkcnt;
431 struct mmc *mmc = find_mmc_device(dev_num);
435 if (CONFIG_IS_ENABLED(MMC_TINY))
436 err = mmc_switch_part(mmc, block_dev->hwpart);
438 err = blk_dselect_hwpart(block_dev, block_dev->hwpart);
443 if ((start + blkcnt) > block_dev->lba) {
444 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
445 pr_err("MMC: block number 0x" LBAF " exceeds max(0x" LBAF ")\n",
446 start + blkcnt, block_dev->lba);
451 if (mmc_set_blocklen(mmc, mmc->read_bl_len)) {
452 pr_debug("%s: Failed to set blocklen\n", __func__);
457 cur = (blocks_todo > mmc->cfg->b_max) ?
458 mmc->cfg->b_max : blocks_todo;
459 if (mmc_read_blocks(mmc, dst, start, cur) != cur) {
460 pr_debug("%s: Failed to read blocks\n", __func__);
465 dst += cur * mmc->read_bl_len;
466 } while (blocks_todo > 0);
471 static int mmc_go_idle(struct mmc *mmc)
478 cmd.cmdidx = MMC_CMD_GO_IDLE_STATE;
480 cmd.resp_type = MMC_RSP_NONE;
482 err = mmc_send_cmd(mmc, &cmd, NULL);
492 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
493 static int mmc_switch_voltage(struct mmc *mmc, int signal_voltage)
499 * Send CMD11 only if the request is to switch the card to
502 if (signal_voltage == MMC_SIGNAL_VOLTAGE_330)
503 return mmc_set_signal_voltage(mmc, signal_voltage);
505 cmd.cmdidx = SD_CMD_SWITCH_UHS18V;
507 cmd.resp_type = MMC_RSP_R1;
509 err = mmc_send_cmd(mmc, &cmd, NULL);
513 if (!mmc_host_is_spi(mmc) && (cmd.response[0] & MMC_STATUS_ERROR))
517 * The card should drive cmd and dat[0:3] low immediately
518 * after the response of cmd11, but wait 100 us to be sure
520 err = mmc_wait_dat0(mmc, 0, 100);
527 * During a signal voltage level switch, the clock must be gated
528 * for 5 ms according to the SD spec
530 mmc_set_clock(mmc, mmc->clock, MMC_CLK_DISABLE);
532 err = mmc_set_signal_voltage(mmc, signal_voltage);
536 /* Keep clock gated for at least 10 ms, though spec only says 5 ms */
538 mmc_set_clock(mmc, mmc->clock, MMC_CLK_ENABLE);
541 * Failure to switch is indicated by the card holding
542 * dat[0:3] low. Wait for at least 1 ms according to spec
544 err = mmc_wait_dat0(mmc, 1, 1000);
554 static int sd_send_op_cond(struct mmc *mmc, bool uhs_en)
561 cmd.cmdidx = MMC_CMD_APP_CMD;
562 cmd.resp_type = MMC_RSP_R1;
565 err = mmc_send_cmd(mmc, &cmd, NULL);
570 cmd.cmdidx = SD_CMD_APP_SEND_OP_COND;
571 cmd.resp_type = MMC_RSP_R3;
574 * Most cards do not answer if some reserved bits
575 * in the ocr are set. However, Some controller
576 * can set bit 7 (reserved for low voltages), but
577 * how to manage low voltages SD card is not yet
580 cmd.cmdarg = mmc_host_is_spi(mmc) ? 0 :
581 (mmc->cfg->voltages & 0xff8000);
583 if (mmc->version == SD_VERSION_2)
584 cmd.cmdarg |= OCR_HCS;
587 cmd.cmdarg |= OCR_S18R;
589 err = mmc_send_cmd(mmc, &cmd, NULL);
594 if (cmd.response[0] & OCR_BUSY)
603 if (mmc->version != SD_VERSION_2)
604 mmc->version = SD_VERSION_1_0;
606 if (mmc_host_is_spi(mmc)) { /* read OCR for spi */
607 cmd.cmdidx = MMC_CMD_SPI_READ_OCR;
608 cmd.resp_type = MMC_RSP_R3;
611 err = mmc_send_cmd(mmc, &cmd, NULL);
617 mmc->ocr = cmd.response[0];
619 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
620 if (uhs_en && !(mmc_host_is_spi(mmc)) && (cmd.response[0] & 0x41000000)
622 err = mmc_switch_voltage(mmc, MMC_SIGNAL_VOLTAGE_180);
628 mmc->high_capacity = ((mmc->ocr & OCR_HCS) == OCR_HCS);
634 static int mmc_send_op_cond_iter(struct mmc *mmc, int use_arg)
639 cmd.cmdidx = MMC_CMD_SEND_OP_COND;
640 cmd.resp_type = MMC_RSP_R3;
642 if (use_arg && !mmc_host_is_spi(mmc))
643 cmd.cmdarg = OCR_HCS |
644 (mmc->cfg->voltages &
645 (mmc->ocr & OCR_VOLTAGE_MASK)) |
646 (mmc->ocr & OCR_ACCESS_MODE);
648 err = mmc_send_cmd(mmc, &cmd, NULL);
651 mmc->ocr = cmd.response[0];
655 static int mmc_send_op_cond(struct mmc *mmc)
659 /* Some cards seem to need this */
662 /* Asking to the card its capabilities */
663 for (i = 0; i < 2; i++) {
664 err = mmc_send_op_cond_iter(mmc, i != 0);
668 /* exit if not busy (flag seems to be inverted) */
669 if (mmc->ocr & OCR_BUSY)
672 mmc->op_cond_pending = 1;
676 static int mmc_complete_op_cond(struct mmc *mmc)
683 mmc->op_cond_pending = 0;
684 if (!(mmc->ocr & OCR_BUSY)) {
685 /* Some cards seem to need this */
688 start = get_timer(0);
690 err = mmc_send_op_cond_iter(mmc, 1);
693 if (mmc->ocr & OCR_BUSY)
695 if (get_timer(start) > timeout)
701 if (mmc_host_is_spi(mmc)) { /* read OCR for spi */
702 cmd.cmdidx = MMC_CMD_SPI_READ_OCR;
703 cmd.resp_type = MMC_RSP_R3;
706 err = mmc_send_cmd(mmc, &cmd, NULL);
711 mmc->ocr = cmd.response[0];
714 mmc->version = MMC_VERSION_UNKNOWN;
716 mmc->high_capacity = ((mmc->ocr & OCR_HCS) == OCR_HCS);
723 static int mmc_send_ext_csd(struct mmc *mmc, u8 *ext_csd)
726 struct mmc_data data;
729 /* Get the Card Status Register */
730 cmd.cmdidx = MMC_CMD_SEND_EXT_CSD;
731 cmd.resp_type = MMC_RSP_R1;
734 data.dest = (char *)ext_csd;
736 data.blocksize = MMC_MAX_BLOCK_LEN;
737 data.flags = MMC_DATA_READ;
739 err = mmc_send_cmd(mmc, &cmd, &data);
744 static int __mmc_switch(struct mmc *mmc, u8 set, u8 index, u8 value,
747 unsigned int status, start;
749 int timeout_ms = DEFAULT_CMD6_TIMEOUT_MS;
750 bool is_part_switch = (set == EXT_CSD_CMD_SET_NORMAL) &&
751 (index == EXT_CSD_PART_CONF);
755 if (mmc->gen_cmd6_time)
756 timeout_ms = mmc->gen_cmd6_time * 10;
758 if (is_part_switch && mmc->part_switch_time)
759 timeout_ms = mmc->part_switch_time * 10;
761 cmd.cmdidx = MMC_CMD_SWITCH;
762 cmd.resp_type = MMC_RSP_R1b;
763 cmd.cmdarg = (MMC_SWITCH_MODE_WRITE_BYTE << 24) |
768 ret = mmc_send_cmd(mmc, &cmd, NULL);
769 } while (ret && retries-- > 0);
774 start = get_timer(0);
776 /* poll dat0 for rdy/buys status */
777 ret = mmc_wait_dat0(mmc, 1, timeout_ms * 1000);
778 if (ret && ret != -ENOSYS)
782 * In cases when not allowed to poll by using CMD13 or because we aren't
783 * capable of polling by using mmc_wait_dat0, then rely on waiting the
784 * stated timeout to be sufficient.
786 if (ret == -ENOSYS && !send_status)
789 /* Finally wait until the card is ready or indicates a failure
790 * to switch. It doesn't hurt to use CMD13 here even if send_status
791 * is false, because by now (after 'timeout_ms' ms) the bus should be
795 ret = mmc_send_status(mmc, &status);
797 if (!ret && (status & MMC_STATUS_SWITCH_ERROR)) {
798 pr_debug("switch failed %d/%d/0x%x !\n", set, index,
802 if (!ret && (status & MMC_STATUS_RDY_FOR_DATA))
805 } while (get_timer(start) < timeout_ms);
810 int mmc_switch(struct mmc *mmc, u8 set, u8 index, u8 value)
812 return __mmc_switch(mmc, set, index, value, true);
815 #if !CONFIG_IS_ENABLED(MMC_TINY)
816 static int mmc_set_card_speed(struct mmc *mmc, enum bus_mode mode,
822 ALLOC_CACHE_ALIGN_BUFFER(u8, test_csd, MMC_MAX_BLOCK_LEN);
828 speed_bits = EXT_CSD_TIMING_HS;
830 #if CONFIG_IS_ENABLED(MMC_HS200_SUPPORT)
832 speed_bits = EXT_CSD_TIMING_HS200;
835 #if CONFIG_IS_ENABLED(MMC_HS400_SUPPORT)
837 speed_bits = EXT_CSD_TIMING_HS400;
840 #if CONFIG_IS_ENABLED(MMC_HS400_ES_SUPPORT)
842 speed_bits = EXT_CSD_TIMING_HS400;
846 speed_bits = EXT_CSD_TIMING_LEGACY;
852 err = __mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_HS_TIMING,
853 speed_bits, !hsdowngrade);
857 #if CONFIG_IS_ENABLED(MMC_HS200_SUPPORT) || \
858 CONFIG_IS_ENABLED(MMC_HS400_SUPPORT)
860 * In case the eMMC is in HS200/HS400 mode and we are downgrading
861 * to HS mode, the card clock are still running much faster than
862 * the supported HS mode clock, so we can not reliably read out
863 * Extended CSD. Reconfigure the controller to run at HS mode.
866 mmc_select_mode(mmc, MMC_HS);
867 mmc_set_clock(mmc, mmc_mode2freq(mmc, MMC_HS), false);
871 if ((mode == MMC_HS) || (mode == MMC_HS_52)) {
872 /* Now check to see that it worked */
873 err = mmc_send_ext_csd(mmc, test_csd);
877 /* No high-speed support */
878 if (!test_csd[EXT_CSD_HS_TIMING])
885 static int mmc_get_capabilities(struct mmc *mmc)
887 u8 *ext_csd = mmc->ext_csd;
890 mmc->card_caps = MMC_MODE_1BIT | MMC_CAP(MMC_LEGACY);
892 if (mmc_host_is_spi(mmc))
895 /* Only version 4 supports high-speed */
896 if (mmc->version < MMC_VERSION_4)
900 pr_err("No ext_csd found!\n"); /* this should enver happen */
904 mmc->card_caps |= MMC_MODE_4BIT | MMC_MODE_8BIT;
906 cardtype = ext_csd[EXT_CSD_CARD_TYPE];
907 mmc->cardtype = cardtype;
909 #if CONFIG_IS_ENABLED(MMC_HS200_SUPPORT)
910 if (cardtype & (EXT_CSD_CARD_TYPE_HS200_1_2V |
911 EXT_CSD_CARD_TYPE_HS200_1_8V)) {
912 mmc->card_caps |= MMC_MODE_HS200;
915 #if CONFIG_IS_ENABLED(MMC_HS400_SUPPORT) || \
916 CONFIG_IS_ENABLED(MMC_HS400_ES_SUPPORT)
917 if (cardtype & (EXT_CSD_CARD_TYPE_HS400_1_2V |
918 EXT_CSD_CARD_TYPE_HS400_1_8V)) {
919 mmc->card_caps |= MMC_MODE_HS400;
922 if (cardtype & EXT_CSD_CARD_TYPE_52) {
923 if (cardtype & EXT_CSD_CARD_TYPE_DDR_52)
924 mmc->card_caps |= MMC_MODE_DDR_52MHz;
925 mmc->card_caps |= MMC_MODE_HS_52MHz;
927 if (cardtype & EXT_CSD_CARD_TYPE_26)
928 mmc->card_caps |= MMC_MODE_HS;
930 #if CONFIG_IS_ENABLED(MMC_HS400_ES_SUPPORT)
931 if (ext_csd[EXT_CSD_STROBE_SUPPORT] &&
932 (mmc->card_caps & MMC_MODE_HS400)) {
933 mmc->card_caps |= MMC_MODE_HS400_ES;
941 static int mmc_set_capacity(struct mmc *mmc, int part_num)
945 mmc->capacity = mmc->capacity_user;
949 mmc->capacity = mmc->capacity_boot;
952 mmc->capacity = mmc->capacity_rpmb;
958 mmc->capacity = mmc->capacity_gp[part_num - 4];
964 mmc_get_blk_desc(mmc)->lba = lldiv(mmc->capacity, mmc->read_bl_len);
969 int mmc_switch_part(struct mmc *mmc, unsigned int part_num)
975 ret = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
977 (mmc->part_config & ~PART_ACCESS_MASK)
978 | (part_num & PART_ACCESS_MASK));
979 } while (ret && retry--);
982 * Set the capacity if the switch succeeded or was intended
983 * to return to representing the raw device.
985 if ((ret == 0) || ((ret == -ENODEV) && (part_num == 0))) {
986 ret = mmc_set_capacity(mmc, part_num);
987 mmc_get_blk_desc(mmc)->hwpart = part_num;
993 #if CONFIG_IS_ENABLED(MMC_HW_PARTITIONING)
994 int mmc_hwpart_config(struct mmc *mmc,
995 const struct mmc_hwpart_conf *conf,
996 enum mmc_hwpart_conf_mode mode)
1001 u32 gp_size_mult[4];
1002 u32 max_enh_size_mult;
1003 u32 tot_enh_size_mult = 0;
1006 ALLOC_CACHE_ALIGN_BUFFER(u8, ext_csd, MMC_MAX_BLOCK_LEN);
1008 if (mode < MMC_HWPART_CONF_CHECK || mode > MMC_HWPART_CONF_COMPLETE)
1011 if (IS_SD(mmc) || (mmc->version < MMC_VERSION_4_41)) {
1012 pr_err("eMMC >= 4.4 required for enhanced user data area\n");
1013 return -EMEDIUMTYPE;
1016 if (!(mmc->part_support & PART_SUPPORT)) {
1017 pr_err("Card does not support partitioning\n");
1018 return -EMEDIUMTYPE;
1021 if (!mmc->hc_wp_grp_size) {
1022 pr_err("Card does not define HC WP group size\n");
1023 return -EMEDIUMTYPE;
1026 /* check partition alignment and total enhanced size */
1027 if (conf->user.enh_size) {
1028 if (conf->user.enh_size % mmc->hc_wp_grp_size ||
1029 conf->user.enh_start % mmc->hc_wp_grp_size) {
1030 pr_err("User data enhanced area not HC WP group "
1034 part_attrs |= EXT_CSD_ENH_USR;
1035 enh_size_mult = conf->user.enh_size / mmc->hc_wp_grp_size;
1036 if (mmc->high_capacity) {
1037 enh_start_addr = conf->user.enh_start;
1039 enh_start_addr = (conf->user.enh_start << 9);
1045 tot_enh_size_mult += enh_size_mult;
1047 for (pidx = 0; pidx < 4; pidx++) {
1048 if (conf->gp_part[pidx].size % mmc->hc_wp_grp_size) {
1049 pr_err("GP%i partition not HC WP group size "
1050 "aligned\n", pidx+1);
1053 gp_size_mult[pidx] = conf->gp_part[pidx].size / mmc->hc_wp_grp_size;
1054 if (conf->gp_part[pidx].size && conf->gp_part[pidx].enhanced) {
1055 part_attrs |= EXT_CSD_ENH_GP(pidx);
1056 tot_enh_size_mult += gp_size_mult[pidx];
1060 if (part_attrs && ! (mmc->part_support & ENHNCD_SUPPORT)) {
1061 pr_err("Card does not support enhanced attribute\n");
1062 return -EMEDIUMTYPE;
1065 err = mmc_send_ext_csd(mmc, ext_csd);
1070 (ext_csd[EXT_CSD_MAX_ENH_SIZE_MULT+2] << 16) +
1071 (ext_csd[EXT_CSD_MAX_ENH_SIZE_MULT+1] << 8) +
1072 ext_csd[EXT_CSD_MAX_ENH_SIZE_MULT];
1073 if (tot_enh_size_mult > max_enh_size_mult) {
1074 pr_err("Total enhanced size exceeds maximum (%u > %u)\n",
1075 tot_enh_size_mult, max_enh_size_mult);
1076 return -EMEDIUMTYPE;
1079 /* The default value of EXT_CSD_WR_REL_SET is device
1080 * dependent, the values can only be changed if the
1081 * EXT_CSD_HS_CTRL_REL bit is set. The values can be
1082 * changed only once and before partitioning is completed. */
1083 wr_rel_set = ext_csd[EXT_CSD_WR_REL_SET];
1084 if (conf->user.wr_rel_change) {
1085 if (conf->user.wr_rel_set)
1086 wr_rel_set |= EXT_CSD_WR_DATA_REL_USR;
1088 wr_rel_set &= ~EXT_CSD_WR_DATA_REL_USR;
1090 for (pidx = 0; pidx < 4; pidx++) {
1091 if (conf->gp_part[pidx].wr_rel_change) {
1092 if (conf->gp_part[pidx].wr_rel_set)
1093 wr_rel_set |= EXT_CSD_WR_DATA_REL_GP(pidx);
1095 wr_rel_set &= ~EXT_CSD_WR_DATA_REL_GP(pidx);
1099 if (wr_rel_set != ext_csd[EXT_CSD_WR_REL_SET] &&
1100 !(ext_csd[EXT_CSD_WR_REL_PARAM] & EXT_CSD_HS_CTRL_REL)) {
1101 puts("Card does not support host controlled partition write "
1102 "reliability settings\n");
1103 return -EMEDIUMTYPE;
1106 if (ext_csd[EXT_CSD_PARTITION_SETTING] &
1107 EXT_CSD_PARTITION_SETTING_COMPLETED) {
1108 pr_err("Card already partitioned\n");
1112 if (mode == MMC_HWPART_CONF_CHECK)
1115 /* Partitioning requires high-capacity size definitions */
1116 if (!(ext_csd[EXT_CSD_ERASE_GROUP_DEF] & 0x01)) {
1117 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1118 EXT_CSD_ERASE_GROUP_DEF, 1);
1123 ext_csd[EXT_CSD_ERASE_GROUP_DEF] = 1;
1125 #if CONFIG_IS_ENABLED(MMC_WRITE)
1126 /* update erase group size to be high-capacity */
1127 mmc->erase_grp_size =
1128 ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE] * 1024;
1133 /* all OK, write the configuration */
1134 for (i = 0; i < 4; i++) {
1135 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1136 EXT_CSD_ENH_START_ADDR+i,
1137 (enh_start_addr >> (i*8)) & 0xFF);
1141 for (i = 0; i < 3; i++) {
1142 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1143 EXT_CSD_ENH_SIZE_MULT+i,
1144 (enh_size_mult >> (i*8)) & 0xFF);
1148 for (pidx = 0; pidx < 4; pidx++) {
1149 for (i = 0; i < 3; i++) {
1150 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1151 EXT_CSD_GP_SIZE_MULT+pidx*3+i,
1152 (gp_size_mult[pidx] >> (i*8)) & 0xFF);
1157 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1158 EXT_CSD_PARTITIONS_ATTRIBUTE, part_attrs);
1162 if (mode == MMC_HWPART_CONF_SET)
1165 /* The WR_REL_SET is a write-once register but shall be
1166 * written before setting PART_SETTING_COMPLETED. As it is
1167 * write-once we can only write it when completing the
1169 if (wr_rel_set != ext_csd[EXT_CSD_WR_REL_SET]) {
1170 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1171 EXT_CSD_WR_REL_SET, wr_rel_set);
1176 /* Setting PART_SETTING_COMPLETED confirms the partition
1177 * configuration but it only becomes effective after power
1178 * cycle, so we do not adjust the partition related settings
1179 * in the mmc struct. */
1181 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1182 EXT_CSD_PARTITION_SETTING,
1183 EXT_CSD_PARTITION_SETTING_COMPLETED);
1191 #if !CONFIG_IS_ENABLED(DM_MMC)
1192 int mmc_getcd(struct mmc *mmc)
1196 cd = board_mmc_getcd(mmc);
1199 if (mmc->cfg->ops->getcd)
1200 cd = mmc->cfg->ops->getcd(mmc);
1209 #if !CONFIG_IS_ENABLED(MMC_TINY)
1210 static int sd_switch(struct mmc *mmc, int mode, int group, u8 value, u8 *resp)
1213 struct mmc_data data;
1215 /* Switch the frequency */
1216 cmd.cmdidx = SD_CMD_SWITCH_FUNC;
1217 cmd.resp_type = MMC_RSP_R1;
1218 cmd.cmdarg = (mode << 31) | 0xffffff;
1219 cmd.cmdarg &= ~(0xf << (group * 4));
1220 cmd.cmdarg |= value << (group * 4);
1222 data.dest = (char *)resp;
1223 data.blocksize = 64;
1225 data.flags = MMC_DATA_READ;
1227 return mmc_send_cmd(mmc, &cmd, &data);
1230 static int sd_get_capabilities(struct mmc *mmc)
1234 ALLOC_CACHE_ALIGN_BUFFER(__be32, scr, 2);
1235 ALLOC_CACHE_ALIGN_BUFFER(__be32, switch_status, 16);
1236 struct mmc_data data;
1238 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
1242 mmc->card_caps = MMC_MODE_1BIT | MMC_CAP(SD_LEGACY);
1244 if (mmc_host_is_spi(mmc))
1247 /* Read the SCR to find out if this card supports higher speeds */
1248 cmd.cmdidx = MMC_CMD_APP_CMD;
1249 cmd.resp_type = MMC_RSP_R1;
1250 cmd.cmdarg = mmc->rca << 16;
1252 err = mmc_send_cmd(mmc, &cmd, NULL);
1257 cmd.cmdidx = SD_CMD_APP_SEND_SCR;
1258 cmd.resp_type = MMC_RSP_R1;
1264 data.dest = (char *)scr;
1267 data.flags = MMC_DATA_READ;
1269 err = mmc_send_cmd(mmc, &cmd, &data);
1278 mmc->scr[0] = __be32_to_cpu(scr[0]);
1279 mmc->scr[1] = __be32_to_cpu(scr[1]);
1281 switch ((mmc->scr[0] >> 24) & 0xf) {
1283 mmc->version = SD_VERSION_1_0;
1286 mmc->version = SD_VERSION_1_10;
1289 mmc->version = SD_VERSION_2;
1290 if ((mmc->scr[0] >> 15) & 0x1)
1291 mmc->version = SD_VERSION_3;
1294 mmc->version = SD_VERSION_1_0;
1298 if (mmc->scr[0] & SD_DATA_4BIT)
1299 mmc->card_caps |= MMC_MODE_4BIT;
1301 /* Version 1.0 doesn't support switching */
1302 if (mmc->version == SD_VERSION_1_0)
1307 err = sd_switch(mmc, SD_SWITCH_CHECK, 0, 1,
1308 (u8 *)switch_status);
1313 /* The high-speed function is busy. Try again */
1314 if (!(__be32_to_cpu(switch_status[7]) & SD_HIGHSPEED_BUSY))
1318 /* If high-speed isn't supported, we return */
1319 if (__be32_to_cpu(switch_status[3]) & SD_HIGHSPEED_SUPPORTED)
1320 mmc->card_caps |= MMC_CAP(SD_HS);
1322 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
1323 /* Version before 3.0 don't support UHS modes */
1324 if (mmc->version < SD_VERSION_3)
1327 sd3_bus_mode = __be32_to_cpu(switch_status[3]) >> 16 & 0x1f;
1328 if (sd3_bus_mode & SD_MODE_UHS_SDR104)
1329 mmc->card_caps |= MMC_CAP(UHS_SDR104);
1330 if (sd3_bus_mode & SD_MODE_UHS_SDR50)
1331 mmc->card_caps |= MMC_CAP(UHS_SDR50);
1332 if (sd3_bus_mode & SD_MODE_UHS_SDR25)
1333 mmc->card_caps |= MMC_CAP(UHS_SDR25);
1334 if (sd3_bus_mode & SD_MODE_UHS_SDR12)
1335 mmc->card_caps |= MMC_CAP(UHS_SDR12);
1336 if (sd3_bus_mode & SD_MODE_UHS_DDR50)
1337 mmc->card_caps |= MMC_CAP(UHS_DDR50);
1343 static int sd_set_card_speed(struct mmc *mmc, enum bus_mode mode)
1347 ALLOC_CACHE_ALIGN_BUFFER(uint, switch_status, 16);
1350 /* SD version 1.00 and 1.01 does not support CMD 6 */
1351 if (mmc->version == SD_VERSION_1_0)
1356 speed = UHS_SDR12_BUS_SPEED;
1359 speed = HIGH_SPEED_BUS_SPEED;
1361 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
1363 speed = UHS_SDR12_BUS_SPEED;
1366 speed = UHS_SDR25_BUS_SPEED;
1369 speed = UHS_SDR50_BUS_SPEED;
1372 speed = UHS_DDR50_BUS_SPEED;
1375 speed = UHS_SDR104_BUS_SPEED;
1382 err = sd_switch(mmc, SD_SWITCH_SWITCH, 0, speed, (u8 *)switch_status);
1386 if (((__be32_to_cpu(switch_status[4]) >> 24) & 0xF) != speed)
1392 static int sd_select_bus_width(struct mmc *mmc, int w)
1397 if ((w != 4) && (w != 1))
1400 cmd.cmdidx = MMC_CMD_APP_CMD;
1401 cmd.resp_type = MMC_RSP_R1;
1402 cmd.cmdarg = mmc->rca << 16;
1404 err = mmc_send_cmd(mmc, &cmd, NULL);
1408 cmd.cmdidx = SD_CMD_APP_SET_BUS_WIDTH;
1409 cmd.resp_type = MMC_RSP_R1;
1414 err = mmc_send_cmd(mmc, &cmd, NULL);
1422 #if CONFIG_IS_ENABLED(MMC_WRITE)
1423 static int sd_read_ssr(struct mmc *mmc)
1425 static const unsigned int sd_au_size[] = {
1426 0, SZ_16K / 512, SZ_32K / 512,
1427 SZ_64K / 512, SZ_128K / 512, SZ_256K / 512,
1428 SZ_512K / 512, SZ_1M / 512, SZ_2M / 512,
1429 SZ_4M / 512, SZ_8M / 512, (SZ_8M + SZ_4M) / 512,
1430 SZ_16M / 512, (SZ_16M + SZ_8M) / 512, SZ_32M / 512,
1435 ALLOC_CACHE_ALIGN_BUFFER(uint, ssr, 16);
1436 struct mmc_data data;
1438 unsigned int au, eo, et, es;
1440 cmd.cmdidx = MMC_CMD_APP_CMD;
1441 cmd.resp_type = MMC_RSP_R1;
1442 cmd.cmdarg = mmc->rca << 16;
1444 err = mmc_send_cmd(mmc, &cmd, NULL);
1445 #ifdef CONFIG_MMC_QUIRKS
1446 if (err && (mmc->quirks & MMC_QUIRK_RETRY_APP_CMD)) {
1449 * It has been seen that APP_CMD may fail on the first
1450 * attempt, let's try a few more times
1453 err = mmc_send_cmd(mmc, &cmd, NULL);
1456 } while (retries--);
1462 cmd.cmdidx = SD_CMD_APP_SD_STATUS;
1463 cmd.resp_type = MMC_RSP_R1;
1467 data.dest = (char *)ssr;
1468 data.blocksize = 64;
1470 data.flags = MMC_DATA_READ;
1472 err = mmc_send_cmd(mmc, &cmd, &data);
1480 for (i = 0; i < 16; i++)
1481 ssr[i] = be32_to_cpu(ssr[i]);
1483 au = (ssr[2] >> 12) & 0xF;
1484 if ((au <= 9) || (mmc->version == SD_VERSION_3)) {
1485 mmc->ssr.au = sd_au_size[au];
1486 es = (ssr[3] >> 24) & 0xFF;
1487 es |= (ssr[2] & 0xFF) << 8;
1488 et = (ssr[3] >> 18) & 0x3F;
1490 eo = (ssr[3] >> 16) & 0x3;
1491 mmc->ssr.erase_timeout = (et * 1000) / es;
1492 mmc->ssr.erase_offset = eo * 1000;
1495 pr_debug("Invalid Allocation Unit Size.\n");
1501 /* frequency bases */
1502 /* divided by 10 to be nice to platforms without floating point */
1503 static const int fbase[] = {
1510 /* Multiplier values for TRAN_SPEED. Multiplied by 10 to be nice
1511 * to platforms without floating point.
1513 static const u8 multipliers[] = {
1532 static inline int bus_width(uint cap)
1534 if (cap == MMC_MODE_8BIT)
1536 if (cap == MMC_MODE_4BIT)
1538 if (cap == MMC_MODE_1BIT)
1540 pr_warn("invalid bus witdh capability 0x%x\n", cap);
1544 #if !CONFIG_IS_ENABLED(DM_MMC)
1545 #ifdef MMC_SUPPORTS_TUNING
1546 static int mmc_execute_tuning(struct mmc *mmc, uint opcode)
1552 static int mmc_set_ios(struct mmc *mmc)
1556 if (mmc->cfg->ops->set_ios)
1557 ret = mmc->cfg->ops->set_ios(mmc);
1562 static int mmc_host_power_cycle(struct mmc *mmc)
1566 if (mmc->cfg->ops->host_power_cycle)
1567 ret = mmc->cfg->ops->host_power_cycle(mmc);
1573 int mmc_set_clock(struct mmc *mmc, uint clock, bool disable)
1576 if (clock > mmc->cfg->f_max)
1577 clock = mmc->cfg->f_max;
1579 if (clock < mmc->cfg->f_min)
1580 clock = mmc->cfg->f_min;
1584 mmc->clk_disable = disable;
1586 debug("clock is %s (%dHz)\n", disable ? "disabled" : "enabled", clock);
1588 return mmc_set_ios(mmc);
1591 static int mmc_set_bus_width(struct mmc *mmc, uint width)
1593 mmc->bus_width = width;
1595 return mmc_set_ios(mmc);
1598 #if CONFIG_IS_ENABLED(MMC_VERBOSE) || defined(DEBUG)
1600 * helper function to display the capabilities in a human
1601 * friendly manner. The capabilities include bus width and
1604 void mmc_dump_capabilities(const char *text, uint caps)
1608 pr_debug("%s: widths [", text);
1609 if (caps & MMC_MODE_8BIT)
1611 if (caps & MMC_MODE_4BIT)
1613 if (caps & MMC_MODE_1BIT)
1615 pr_debug("\b\b] modes [");
1616 for (mode = MMC_LEGACY; mode < MMC_MODES_END; mode++)
1617 if (MMC_CAP(mode) & caps)
1618 pr_debug("%s, ", mmc_mode_name(mode));
1619 pr_debug("\b\b]\n");
1623 struct mode_width_tuning {
1626 #ifdef MMC_SUPPORTS_TUNING
1631 #if CONFIG_IS_ENABLED(MMC_IO_VOLTAGE)
1632 int mmc_voltage_to_mv(enum mmc_voltage voltage)
1635 case MMC_SIGNAL_VOLTAGE_000: return 0;
1636 case MMC_SIGNAL_VOLTAGE_330: return 3300;
1637 case MMC_SIGNAL_VOLTAGE_180: return 1800;
1638 case MMC_SIGNAL_VOLTAGE_120: return 1200;
1643 static int mmc_set_signal_voltage(struct mmc *mmc, uint signal_voltage)
1647 if (mmc->signal_voltage == signal_voltage)
1650 mmc->signal_voltage = signal_voltage;
1651 err = mmc_set_ios(mmc);
1653 pr_debug("unable to set voltage (err %d)\n", err);
1658 static inline int mmc_set_signal_voltage(struct mmc *mmc, uint signal_voltage)
1664 #if !CONFIG_IS_ENABLED(MMC_TINY)
1665 static const struct mode_width_tuning sd_modes_by_pref[] = {
1666 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
1667 #ifdef MMC_SUPPORTS_TUNING
1670 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1671 .tuning = MMC_CMD_SEND_TUNING_BLOCK
1676 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1680 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1684 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1689 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1691 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
1694 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1699 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1703 #define for_each_sd_mode_by_pref(caps, mwt) \
1704 for (mwt = sd_modes_by_pref;\
1705 mwt < sd_modes_by_pref + ARRAY_SIZE(sd_modes_by_pref);\
1707 if (caps & MMC_CAP(mwt->mode))
1709 static int sd_select_mode_and_width(struct mmc *mmc, uint card_caps)
1712 uint widths[] = {MMC_MODE_4BIT, MMC_MODE_1BIT};
1713 const struct mode_width_tuning *mwt;
1714 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
1715 bool uhs_en = (mmc->ocr & OCR_S18R) ? true : false;
1717 bool uhs_en = false;
1722 mmc_dump_capabilities("sd card", card_caps);
1723 mmc_dump_capabilities("host", mmc->host_caps);
1726 if (mmc_host_is_spi(mmc)) {
1727 mmc_set_bus_width(mmc, 1);
1728 mmc_select_mode(mmc, SD_LEGACY);
1729 mmc_set_clock(mmc, mmc->tran_speed, MMC_CLK_ENABLE);
1733 /* Restrict card's capabilities by what the host can do */
1734 caps = card_caps & mmc->host_caps;
1739 for_each_sd_mode_by_pref(caps, mwt) {
1742 for (w = widths; w < widths + ARRAY_SIZE(widths); w++) {
1743 if (*w & caps & mwt->widths) {
1744 pr_debug("trying mode %s width %d (at %d MHz)\n",
1745 mmc_mode_name(mwt->mode),
1747 mmc_mode2freq(mmc, mwt->mode) / 1000000);
1749 /* configure the bus width (card + host) */
1750 err = sd_select_bus_width(mmc, bus_width(*w));
1753 mmc_set_bus_width(mmc, bus_width(*w));
1755 /* configure the bus mode (card) */
1756 err = sd_set_card_speed(mmc, mwt->mode);
1760 /* configure the bus mode (host) */
1761 mmc_select_mode(mmc, mwt->mode);
1762 mmc_set_clock(mmc, mmc->tran_speed,
1765 #ifdef MMC_SUPPORTS_TUNING
1766 /* execute tuning if needed */
1767 if (mwt->tuning && !mmc_host_is_spi(mmc)) {
1768 err = mmc_execute_tuning(mmc,
1771 pr_debug("tuning failed\n");
1777 #if CONFIG_IS_ENABLED(MMC_WRITE)
1778 err = sd_read_ssr(mmc);
1780 pr_warn("unable to read ssr\n");
1786 /* revert to a safer bus speed */
1787 mmc_select_mode(mmc, SD_LEGACY);
1788 mmc_set_clock(mmc, mmc->tran_speed,
1794 pr_err("unable to select a mode\n");
1799 * read the compare the part of ext csd that is constant.
1800 * This can be used to check that the transfer is working
1803 static int mmc_read_and_compare_ext_csd(struct mmc *mmc)
1806 const u8 *ext_csd = mmc->ext_csd;
1807 ALLOC_CACHE_ALIGN_BUFFER(u8, test_csd, MMC_MAX_BLOCK_LEN);
1809 if (mmc->version < MMC_VERSION_4)
1812 err = mmc_send_ext_csd(mmc, test_csd);
1816 /* Only compare read only fields */
1817 if (ext_csd[EXT_CSD_PARTITIONING_SUPPORT]
1818 == test_csd[EXT_CSD_PARTITIONING_SUPPORT] &&
1819 ext_csd[EXT_CSD_HC_WP_GRP_SIZE]
1820 == test_csd[EXT_CSD_HC_WP_GRP_SIZE] &&
1821 ext_csd[EXT_CSD_REV]
1822 == test_csd[EXT_CSD_REV] &&
1823 ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE]
1824 == test_csd[EXT_CSD_HC_ERASE_GRP_SIZE] &&
1825 memcmp(&ext_csd[EXT_CSD_SEC_CNT],
1826 &test_csd[EXT_CSD_SEC_CNT], 4) == 0)
1832 #if CONFIG_IS_ENABLED(MMC_IO_VOLTAGE)
1833 static int mmc_set_lowest_voltage(struct mmc *mmc, enum bus_mode mode,
1834 uint32_t allowed_mask)
1842 if (mmc->cardtype & (EXT_CSD_CARD_TYPE_HS200_1_8V |
1843 EXT_CSD_CARD_TYPE_HS400_1_8V))
1844 card_mask |= MMC_SIGNAL_VOLTAGE_180;
1845 if (mmc->cardtype & (EXT_CSD_CARD_TYPE_HS200_1_2V |
1846 EXT_CSD_CARD_TYPE_HS400_1_2V))
1847 card_mask |= MMC_SIGNAL_VOLTAGE_120;
1850 if (mmc->cardtype & EXT_CSD_CARD_TYPE_DDR_1_8V)
1851 card_mask |= MMC_SIGNAL_VOLTAGE_330 |
1852 MMC_SIGNAL_VOLTAGE_180;
1853 if (mmc->cardtype & EXT_CSD_CARD_TYPE_DDR_1_2V)
1854 card_mask |= MMC_SIGNAL_VOLTAGE_120;
1857 card_mask |= MMC_SIGNAL_VOLTAGE_330;
1861 while (card_mask & allowed_mask) {
1862 enum mmc_voltage best_match;
1864 best_match = 1 << (ffs(card_mask & allowed_mask) - 1);
1865 if (!mmc_set_signal_voltage(mmc, best_match))
1868 allowed_mask &= ~best_match;
1874 static inline int mmc_set_lowest_voltage(struct mmc *mmc, enum bus_mode mode,
1875 uint32_t allowed_mask)
1881 static const struct mode_width_tuning mmc_modes_by_pref[] = {
1882 #if CONFIG_IS_ENABLED(MMC_HS400_ES_SUPPORT)
1884 .mode = MMC_HS_400_ES,
1885 .widths = MMC_MODE_8BIT,
1888 #if CONFIG_IS_ENABLED(MMC_HS400_SUPPORT)
1891 .widths = MMC_MODE_8BIT,
1892 .tuning = MMC_CMD_SEND_TUNING_BLOCK_HS200
1895 #if CONFIG_IS_ENABLED(MMC_HS200_SUPPORT)
1898 .widths = MMC_MODE_8BIT | MMC_MODE_4BIT,
1899 .tuning = MMC_CMD_SEND_TUNING_BLOCK_HS200
1904 .widths = MMC_MODE_8BIT | MMC_MODE_4BIT,
1908 .widths = MMC_MODE_8BIT | MMC_MODE_4BIT | MMC_MODE_1BIT,
1912 .widths = MMC_MODE_8BIT | MMC_MODE_4BIT | MMC_MODE_1BIT,
1916 .widths = MMC_MODE_8BIT | MMC_MODE_4BIT | MMC_MODE_1BIT,
1920 #define for_each_mmc_mode_by_pref(caps, mwt) \
1921 for (mwt = mmc_modes_by_pref;\
1922 mwt < mmc_modes_by_pref + ARRAY_SIZE(mmc_modes_by_pref);\
1924 if (caps & MMC_CAP(mwt->mode))
1926 static const struct ext_csd_bus_width {
1930 } ext_csd_bus_width[] = {
1931 {MMC_MODE_8BIT, true, EXT_CSD_DDR_BUS_WIDTH_8},
1932 {MMC_MODE_4BIT, true, EXT_CSD_DDR_BUS_WIDTH_4},
1933 {MMC_MODE_8BIT, false, EXT_CSD_BUS_WIDTH_8},
1934 {MMC_MODE_4BIT, false, EXT_CSD_BUS_WIDTH_4},
1935 {MMC_MODE_1BIT, false, EXT_CSD_BUS_WIDTH_1},
1938 #if CONFIG_IS_ENABLED(MMC_HS400_SUPPORT)
1939 static int mmc_select_hs400(struct mmc *mmc)
1943 /* Set timing to HS200 for tuning */
1944 err = mmc_set_card_speed(mmc, MMC_HS_200, false);
1948 /* configure the bus mode (host) */
1949 mmc_select_mode(mmc, MMC_HS_200);
1950 mmc_set_clock(mmc, mmc->tran_speed, false);
1952 /* execute tuning if needed */
1953 err = mmc_execute_tuning(mmc, MMC_CMD_SEND_TUNING_BLOCK_HS200);
1955 debug("tuning failed\n");
1959 /* Set back to HS */
1960 mmc_set_card_speed(mmc, MMC_HS, true);
1962 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BUS_WIDTH,
1963 EXT_CSD_BUS_WIDTH_8 | EXT_CSD_DDR_FLAG);
1967 err = mmc_set_card_speed(mmc, MMC_HS_400, false);
1971 mmc_select_mode(mmc, MMC_HS_400);
1972 err = mmc_set_clock(mmc, mmc->tran_speed, false);
1979 static int mmc_select_hs400(struct mmc *mmc)
1985 #if CONFIG_IS_ENABLED(MMC_HS400_ES_SUPPORT)
1986 #if !CONFIG_IS_ENABLED(DM_MMC)
1987 static int mmc_set_enhanced_strobe(struct mmc *mmc)
1992 static int mmc_select_hs400es(struct mmc *mmc)
1996 err = mmc_set_card_speed(mmc, MMC_HS, true);
2000 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BUS_WIDTH,
2001 EXT_CSD_BUS_WIDTH_8 | EXT_CSD_DDR_FLAG |
2002 EXT_CSD_BUS_WIDTH_STROBE);
2004 printf("switch to bus width for hs400 failed\n");
2007 /* TODO: driver strength */
2008 err = mmc_set_card_speed(mmc, MMC_HS_400_ES, false);
2012 mmc_select_mode(mmc, MMC_HS_400_ES);
2013 err = mmc_set_clock(mmc, mmc->tran_speed, false);
2017 return mmc_set_enhanced_strobe(mmc);
2020 static int mmc_select_hs400es(struct mmc *mmc)
2026 #define for_each_supported_width(caps, ddr, ecbv) \
2027 for (ecbv = ext_csd_bus_width;\
2028 ecbv < ext_csd_bus_width + ARRAY_SIZE(ext_csd_bus_width);\
2030 if ((ddr == ecbv->is_ddr) && (caps & ecbv->cap))
2032 static int mmc_select_mode_and_width(struct mmc *mmc, uint card_caps)
2035 const struct mode_width_tuning *mwt;
2036 const struct ext_csd_bus_width *ecbw;
2039 mmc_dump_capabilities("mmc", card_caps);
2040 mmc_dump_capabilities("host", mmc->host_caps);
2043 if (mmc_host_is_spi(mmc)) {
2044 mmc_set_bus_width(mmc, 1);
2045 mmc_select_mode(mmc, MMC_LEGACY);
2046 mmc_set_clock(mmc, mmc->tran_speed, MMC_CLK_ENABLE);
2050 /* Restrict card's capabilities by what the host can do */
2051 card_caps &= mmc->host_caps;
2053 /* Only version 4 of MMC supports wider bus widths */
2054 if (mmc->version < MMC_VERSION_4)
2057 if (!mmc->ext_csd) {
2058 pr_debug("No ext_csd found!\n"); /* this should enver happen */
2062 #if CONFIG_IS_ENABLED(MMC_HS200_SUPPORT) || \
2063 CONFIG_IS_ENABLED(MMC_HS400_SUPPORT)
2065 * In case the eMMC is in HS200/HS400 mode, downgrade to HS mode
2066 * before doing anything else, since a transition from either of
2067 * the HS200/HS400 mode directly to legacy mode is not supported.
2069 if (mmc->selected_mode == MMC_HS_200 ||
2070 mmc->selected_mode == MMC_HS_400)
2071 mmc_set_card_speed(mmc, MMC_HS, true);
2074 mmc_set_clock(mmc, mmc->legacy_speed, MMC_CLK_ENABLE);
2076 for_each_mmc_mode_by_pref(card_caps, mwt) {
2077 for_each_supported_width(card_caps & mwt->widths,
2078 mmc_is_mode_ddr(mwt->mode), ecbw) {
2079 enum mmc_voltage old_voltage;
2080 pr_debug("trying mode %s width %d (at %d MHz)\n",
2081 mmc_mode_name(mwt->mode),
2082 bus_width(ecbw->cap),
2083 mmc_mode2freq(mmc, mwt->mode) / 1000000);
2084 old_voltage = mmc->signal_voltage;
2085 err = mmc_set_lowest_voltage(mmc, mwt->mode,
2086 MMC_ALL_SIGNAL_VOLTAGE);
2090 /* configure the bus width (card + host) */
2091 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
2093 ecbw->ext_csd_bits & ~EXT_CSD_DDR_FLAG);
2096 mmc_set_bus_width(mmc, bus_width(ecbw->cap));
2098 if (mwt->mode == MMC_HS_400) {
2099 err = mmc_select_hs400(mmc);
2101 printf("Select HS400 failed %d\n", err);
2104 } else if (mwt->mode == MMC_HS_400_ES) {
2105 err = mmc_select_hs400es(mmc);
2107 printf("Select HS400ES failed %d\n",
2112 /* configure the bus speed (card) */
2113 err = mmc_set_card_speed(mmc, mwt->mode, false);
2118 * configure the bus width AND the ddr mode
2119 * (card). The host side will be taken care
2120 * of in the next step
2122 if (ecbw->ext_csd_bits & EXT_CSD_DDR_FLAG) {
2123 err = mmc_switch(mmc,
2124 EXT_CSD_CMD_SET_NORMAL,
2126 ecbw->ext_csd_bits);
2131 /* configure the bus mode (host) */
2132 mmc_select_mode(mmc, mwt->mode);
2133 mmc_set_clock(mmc, mmc->tran_speed,
2135 #ifdef MMC_SUPPORTS_TUNING
2137 /* execute tuning if needed */
2139 err = mmc_execute_tuning(mmc,
2142 pr_debug("tuning failed\n");
2149 /* do a transfer to check the configuration */
2150 err = mmc_read_and_compare_ext_csd(mmc);
2154 mmc_set_signal_voltage(mmc, old_voltage);
2155 /* if an error occured, revert to a safer bus mode */
2156 mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
2157 EXT_CSD_BUS_WIDTH, EXT_CSD_BUS_WIDTH_1);
2158 mmc_select_mode(mmc, MMC_LEGACY);
2159 mmc_set_bus_width(mmc, 1);
2163 pr_err("unable to select a mode\n");
2169 #if CONFIG_IS_ENABLED(MMC_TINY)
2170 DEFINE_CACHE_ALIGN_BUFFER(u8, ext_csd_bkup, MMC_MAX_BLOCK_LEN);
2173 static int mmc_startup_v4(struct mmc *mmc)
2177 bool has_parts = false;
2178 bool part_completed;
2179 static const u32 mmc_versions[] = {
2191 #if CONFIG_IS_ENABLED(MMC_TINY)
2192 u8 *ext_csd = ext_csd_bkup;
2194 if (IS_SD(mmc) || mmc->version < MMC_VERSION_4)
2198 memset(ext_csd_bkup, 0, sizeof(ext_csd_bkup));
2200 err = mmc_send_ext_csd(mmc, ext_csd);
2204 /* store the ext csd for future reference */
2206 mmc->ext_csd = ext_csd;
2208 ALLOC_CACHE_ALIGN_BUFFER(u8, ext_csd, MMC_MAX_BLOCK_LEN);
2210 if (IS_SD(mmc) || (mmc->version < MMC_VERSION_4))
2213 /* check ext_csd version and capacity */
2214 err = mmc_send_ext_csd(mmc, ext_csd);
2218 /* store the ext csd for future reference */
2220 mmc->ext_csd = malloc(MMC_MAX_BLOCK_LEN);
2223 memcpy(mmc->ext_csd, ext_csd, MMC_MAX_BLOCK_LEN);
2225 if (ext_csd[EXT_CSD_REV] >= ARRAY_SIZE(mmc_versions))
2228 mmc->version = mmc_versions[ext_csd[EXT_CSD_REV]];
2230 if (mmc->version >= MMC_VERSION_4_2) {
2232 * According to the JEDEC Standard, the value of
2233 * ext_csd's capacity is valid if the value is more
2236 capacity = ext_csd[EXT_CSD_SEC_CNT] << 0
2237 | ext_csd[EXT_CSD_SEC_CNT + 1] << 8
2238 | ext_csd[EXT_CSD_SEC_CNT + 2] << 16
2239 | ext_csd[EXT_CSD_SEC_CNT + 3] << 24;
2240 capacity *= MMC_MAX_BLOCK_LEN;
2241 if ((capacity >> 20) > 2 * 1024)
2242 mmc->capacity_user = capacity;
2245 if (mmc->version >= MMC_VERSION_4_5)
2246 mmc->gen_cmd6_time = ext_csd[EXT_CSD_GENERIC_CMD6_TIME];
2248 /* The partition data may be non-zero but it is only
2249 * effective if PARTITION_SETTING_COMPLETED is set in
2250 * EXT_CSD, so ignore any data if this bit is not set,
2251 * except for enabling the high-capacity group size
2252 * definition (see below).
2254 part_completed = !!(ext_csd[EXT_CSD_PARTITION_SETTING] &
2255 EXT_CSD_PARTITION_SETTING_COMPLETED);
2257 mmc->part_switch_time = ext_csd[EXT_CSD_PART_SWITCH_TIME];
2258 /* Some eMMC set the value too low so set a minimum */
2259 if (mmc->part_switch_time < MMC_MIN_PART_SWITCH_TIME && mmc->part_switch_time)
2260 mmc->part_switch_time = MMC_MIN_PART_SWITCH_TIME;
2262 /* store the partition info of emmc */
2263 mmc->part_support = ext_csd[EXT_CSD_PARTITIONING_SUPPORT];
2264 if ((ext_csd[EXT_CSD_PARTITIONING_SUPPORT] & PART_SUPPORT) ||
2265 ext_csd[EXT_CSD_BOOT_MULT])
2266 mmc->part_config = ext_csd[EXT_CSD_PART_CONF];
2267 if (part_completed &&
2268 (ext_csd[EXT_CSD_PARTITIONING_SUPPORT] & ENHNCD_SUPPORT))
2269 mmc->part_attr = ext_csd[EXT_CSD_PARTITIONS_ATTRIBUTE];
2271 mmc->capacity_boot = ext_csd[EXT_CSD_BOOT_MULT] << 17;
2273 mmc->capacity_rpmb = ext_csd[EXT_CSD_RPMB_MULT] << 17;
2275 for (i = 0; i < 4; i++) {
2276 int idx = EXT_CSD_GP_SIZE_MULT + i * 3;
2277 uint mult = (ext_csd[idx + 2] << 16) +
2278 (ext_csd[idx + 1] << 8) + ext_csd[idx];
2281 if (!part_completed)
2283 mmc->capacity_gp[i] = mult;
2284 mmc->capacity_gp[i] *=
2285 ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE];
2286 mmc->capacity_gp[i] *= ext_csd[EXT_CSD_HC_WP_GRP_SIZE];
2287 mmc->capacity_gp[i] <<= 19;
2290 #ifndef CONFIG_SPL_BUILD
2291 if (part_completed) {
2292 mmc->enh_user_size =
2293 (ext_csd[EXT_CSD_ENH_SIZE_MULT + 2] << 16) +
2294 (ext_csd[EXT_CSD_ENH_SIZE_MULT + 1] << 8) +
2295 ext_csd[EXT_CSD_ENH_SIZE_MULT];
2296 mmc->enh_user_size *= ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE];
2297 mmc->enh_user_size *= ext_csd[EXT_CSD_HC_WP_GRP_SIZE];
2298 mmc->enh_user_size <<= 19;
2299 mmc->enh_user_start =
2300 (ext_csd[EXT_CSD_ENH_START_ADDR + 3] << 24) +
2301 (ext_csd[EXT_CSD_ENH_START_ADDR + 2] << 16) +
2302 (ext_csd[EXT_CSD_ENH_START_ADDR + 1] << 8) +
2303 ext_csd[EXT_CSD_ENH_START_ADDR];
2304 if (mmc->high_capacity)
2305 mmc->enh_user_start <<= 9;
2310 * Host needs to enable ERASE_GRP_DEF bit if device is
2311 * partitioned. This bit will be lost every time after a reset
2312 * or power off. This will affect erase size.
2316 if ((ext_csd[EXT_CSD_PARTITIONING_SUPPORT] & PART_SUPPORT) &&
2317 (ext_csd[EXT_CSD_PARTITIONS_ATTRIBUTE] & PART_ENH_ATTRIB))
2320 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
2321 EXT_CSD_ERASE_GROUP_DEF, 1);
2326 ext_csd[EXT_CSD_ERASE_GROUP_DEF] = 1;
2329 if (ext_csd[EXT_CSD_ERASE_GROUP_DEF] & 0x01) {
2330 #if CONFIG_IS_ENABLED(MMC_WRITE)
2331 /* Read out group size from ext_csd */
2332 mmc->erase_grp_size =
2333 ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE] * 1024;
2336 * if high capacity and partition setting completed
2337 * SEC_COUNT is valid even if it is smaller than 2 GiB
2338 * JEDEC Standard JESD84-B45, 6.2.4
2340 if (mmc->high_capacity && part_completed) {
2341 capacity = (ext_csd[EXT_CSD_SEC_CNT]) |
2342 (ext_csd[EXT_CSD_SEC_CNT + 1] << 8) |
2343 (ext_csd[EXT_CSD_SEC_CNT + 2] << 16) |
2344 (ext_csd[EXT_CSD_SEC_CNT + 3] << 24);
2345 capacity *= MMC_MAX_BLOCK_LEN;
2346 mmc->capacity_user = capacity;
2349 #if CONFIG_IS_ENABLED(MMC_WRITE)
2351 /* Calculate the group size from the csd value. */
2352 int erase_gsz, erase_gmul;
2354 erase_gsz = (mmc->csd[2] & 0x00007c00) >> 10;
2355 erase_gmul = (mmc->csd[2] & 0x000003e0) >> 5;
2356 mmc->erase_grp_size = (erase_gsz + 1)
2360 #if CONFIG_IS_ENABLED(MMC_HW_PARTITIONING)
2361 mmc->hc_wp_grp_size = 1024
2362 * ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE]
2363 * ext_csd[EXT_CSD_HC_WP_GRP_SIZE];
2366 mmc->wr_rel_set = ext_csd[EXT_CSD_WR_REL_SET];
2371 #if !CONFIG_IS_ENABLED(MMC_TINY)
2374 mmc->ext_csd = NULL;
2379 static int mmc_startup(struct mmc *mmc)
2385 struct blk_desc *bdesc;
2387 #ifdef CONFIG_MMC_SPI_CRC_ON
2388 if (mmc_host_is_spi(mmc)) { /* enable CRC check for spi */
2389 cmd.cmdidx = MMC_CMD_SPI_CRC_ON_OFF;
2390 cmd.resp_type = MMC_RSP_R1;
2392 err = mmc_send_cmd(mmc, &cmd, NULL);
2398 /* Put the Card in Identify Mode */
2399 cmd.cmdidx = mmc_host_is_spi(mmc) ? MMC_CMD_SEND_CID :
2400 MMC_CMD_ALL_SEND_CID; /* cmd not supported in spi */
2401 cmd.resp_type = MMC_RSP_R2;
2404 err = mmc_send_cmd(mmc, &cmd, NULL);
2406 #ifdef CONFIG_MMC_QUIRKS
2407 if (err && (mmc->quirks & MMC_QUIRK_RETRY_SEND_CID)) {
2410 * It has been seen that SEND_CID may fail on the first
2411 * attempt, let's try a few more time
2414 err = mmc_send_cmd(mmc, &cmd, NULL);
2417 } while (retries--);
2424 memcpy(mmc->cid, cmd.response, 16);
2427 * For MMC cards, set the Relative Address.
2428 * For SD cards, get the Relatvie Address.
2429 * This also puts the cards into Standby State
2431 if (!mmc_host_is_spi(mmc)) { /* cmd not supported in spi */
2432 cmd.cmdidx = SD_CMD_SEND_RELATIVE_ADDR;
2433 cmd.cmdarg = mmc->rca << 16;
2434 cmd.resp_type = MMC_RSP_R6;
2436 err = mmc_send_cmd(mmc, &cmd, NULL);
2442 mmc->rca = (cmd.response[0] >> 16) & 0xffff;
2445 /* Get the Card-Specific Data */
2446 cmd.cmdidx = MMC_CMD_SEND_CSD;
2447 cmd.resp_type = MMC_RSP_R2;
2448 cmd.cmdarg = mmc->rca << 16;
2450 err = mmc_send_cmd(mmc, &cmd, NULL);
2455 mmc->csd[0] = cmd.response[0];
2456 mmc->csd[1] = cmd.response[1];
2457 mmc->csd[2] = cmd.response[2];
2458 mmc->csd[3] = cmd.response[3];
2460 if (mmc->version == MMC_VERSION_UNKNOWN) {
2461 int version = (cmd.response[0] >> 26) & 0xf;
2465 mmc->version = MMC_VERSION_1_2;
2468 mmc->version = MMC_VERSION_1_4;
2471 mmc->version = MMC_VERSION_2_2;
2474 mmc->version = MMC_VERSION_3;
2477 mmc->version = MMC_VERSION_4;
2480 mmc->version = MMC_VERSION_1_2;
2485 /* divide frequency by 10, since the mults are 10x bigger */
2486 freq = fbase[(cmd.response[0] & 0x7)];
2487 mult = multipliers[((cmd.response[0] >> 3) & 0xf)];
2489 mmc->legacy_speed = freq * mult;
2490 mmc_select_mode(mmc, MMC_LEGACY);
2492 mmc->dsr_imp = ((cmd.response[1] >> 12) & 0x1);
2493 mmc->read_bl_len = 1 << ((cmd.response[1] >> 16) & 0xf);
2494 #if CONFIG_IS_ENABLED(MMC_WRITE)
2497 mmc->write_bl_len = mmc->read_bl_len;
2499 mmc->write_bl_len = 1 << ((cmd.response[3] >> 22) & 0xf);
2502 if (mmc->high_capacity) {
2503 csize = (mmc->csd[1] & 0x3f) << 16
2504 | (mmc->csd[2] & 0xffff0000) >> 16;
2507 csize = (mmc->csd[1] & 0x3ff) << 2
2508 | (mmc->csd[2] & 0xc0000000) >> 30;
2509 cmult = (mmc->csd[2] & 0x00038000) >> 15;
2512 mmc->capacity_user = (csize + 1) << (cmult + 2);
2513 mmc->capacity_user *= mmc->read_bl_len;
2514 mmc->capacity_boot = 0;
2515 mmc->capacity_rpmb = 0;
2516 for (i = 0; i < 4; i++)
2517 mmc->capacity_gp[i] = 0;
2519 if (mmc->read_bl_len > MMC_MAX_BLOCK_LEN)
2520 mmc->read_bl_len = MMC_MAX_BLOCK_LEN;
2522 #if CONFIG_IS_ENABLED(MMC_WRITE)
2523 if (mmc->write_bl_len > MMC_MAX_BLOCK_LEN)
2524 mmc->write_bl_len = MMC_MAX_BLOCK_LEN;
2527 if ((mmc->dsr_imp) && (0xffffffff != mmc->dsr)) {
2528 cmd.cmdidx = MMC_CMD_SET_DSR;
2529 cmd.cmdarg = (mmc->dsr & 0xffff) << 16;
2530 cmd.resp_type = MMC_RSP_NONE;
2531 if (mmc_send_cmd(mmc, &cmd, NULL))
2532 pr_warn("MMC: SET_DSR failed\n");
2535 /* Select the card, and put it into Transfer Mode */
2536 if (!mmc_host_is_spi(mmc)) { /* cmd not supported in spi */
2537 cmd.cmdidx = MMC_CMD_SELECT_CARD;
2538 cmd.resp_type = MMC_RSP_R1;
2539 cmd.cmdarg = mmc->rca << 16;
2540 err = mmc_send_cmd(mmc, &cmd, NULL);
2547 * For SD, its erase group is always one sector
2549 #if CONFIG_IS_ENABLED(MMC_WRITE)
2550 mmc->erase_grp_size = 1;
2552 mmc->part_config = MMCPART_NOAVAILABLE;
2554 err = mmc_startup_v4(mmc);
2558 err = mmc_set_capacity(mmc, mmc_get_blk_desc(mmc)->hwpart);
2562 #if CONFIG_IS_ENABLED(MMC_TINY)
2563 mmc_set_clock(mmc, mmc->legacy_speed, false);
2564 mmc_select_mode(mmc, IS_SD(mmc) ? SD_LEGACY : MMC_LEGACY);
2565 mmc_set_bus_width(mmc, 1);
2568 err = sd_get_capabilities(mmc);
2571 err = sd_select_mode_and_width(mmc, mmc->card_caps);
2573 err = mmc_get_capabilities(mmc);
2576 err = mmc_select_mode_and_width(mmc, mmc->card_caps);
2582 mmc->best_mode = mmc->selected_mode;
2584 /* Fix the block length for DDR mode */
2585 if (mmc->ddr_mode) {
2586 mmc->read_bl_len = MMC_MAX_BLOCK_LEN;
2587 #if CONFIG_IS_ENABLED(MMC_WRITE)
2588 mmc->write_bl_len = MMC_MAX_BLOCK_LEN;
2592 /* fill in device description */
2593 bdesc = mmc_get_blk_desc(mmc);
2597 bdesc->blksz = mmc->read_bl_len;
2598 bdesc->log2blksz = LOG2(bdesc->blksz);
2599 bdesc->lba = lldiv(mmc->capacity, mmc->read_bl_len);
2600 #if !defined(CONFIG_SPL_BUILD) || \
2601 (defined(CONFIG_SPL_LIBCOMMON_SUPPORT) && \
2602 !CONFIG_IS_ENABLED(USE_TINY_PRINTF))
2603 sprintf(bdesc->vendor, "Man %06x Snr %04x%04x",
2604 mmc->cid[0] >> 24, (mmc->cid[2] & 0xffff),
2605 (mmc->cid[3] >> 16) & 0xffff);
2606 sprintf(bdesc->product, "%c%c%c%c%c%c", mmc->cid[0] & 0xff,
2607 (mmc->cid[1] >> 24), (mmc->cid[1] >> 16) & 0xff,
2608 (mmc->cid[1] >> 8) & 0xff, mmc->cid[1] & 0xff,
2609 (mmc->cid[2] >> 24) & 0xff);
2610 sprintf(bdesc->revision, "%d.%d", (mmc->cid[2] >> 20) & 0xf,
2611 (mmc->cid[2] >> 16) & 0xf);
2613 bdesc->vendor[0] = 0;
2614 bdesc->product[0] = 0;
2615 bdesc->revision[0] = 0;
2618 #if !defined(CONFIG_DM_MMC) && (!defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBDISK_SUPPORT))
2625 static int mmc_send_if_cond(struct mmc *mmc)
2630 cmd.cmdidx = SD_CMD_SEND_IF_COND;
2631 /* We set the bit if the host supports voltages between 2.7 and 3.6 V */
2632 cmd.cmdarg = ((mmc->cfg->voltages & 0xff8000) != 0) << 8 | 0xaa;
2633 cmd.resp_type = MMC_RSP_R7;
2635 err = mmc_send_cmd(mmc, &cmd, NULL);
2640 if ((cmd.response[0] & 0xff) != 0xaa)
2643 mmc->version = SD_VERSION_2;
2648 #if !CONFIG_IS_ENABLED(DM_MMC)
2649 /* board-specific MMC power initializations. */
2650 __weak void board_mmc_power_init(void)
2655 static int mmc_power_init(struct mmc *mmc)
2657 #if CONFIG_IS_ENABLED(DM_MMC)
2658 #if CONFIG_IS_ENABLED(DM_REGULATOR)
2661 ret = device_get_supply_regulator(mmc->dev, "vmmc-supply",
2664 pr_debug("%s: No vmmc supply\n", mmc->dev->name);
2666 ret = device_get_supply_regulator(mmc->dev, "vqmmc-supply",
2667 &mmc->vqmmc_supply);
2669 pr_debug("%s: No vqmmc supply\n", mmc->dev->name);
2671 #else /* !CONFIG_DM_MMC */
2673 * Driver model should use a regulator, as above, rather than calling
2674 * out to board code.
2676 board_mmc_power_init();
2682 * put the host in the initial state:
2683 * - turn on Vdd (card power supply)
2684 * - configure the bus width and clock to minimal values
2686 static void mmc_set_initial_state(struct mmc *mmc)
2690 /* First try to set 3.3V. If it fails set to 1.8V */
2691 err = mmc_set_signal_voltage(mmc, MMC_SIGNAL_VOLTAGE_330);
2693 err = mmc_set_signal_voltage(mmc, MMC_SIGNAL_VOLTAGE_180);
2695 pr_warn("mmc: failed to set signal voltage\n");
2697 mmc_select_mode(mmc, MMC_LEGACY);
2698 mmc_set_bus_width(mmc, 1);
2699 mmc_set_clock(mmc, 0, MMC_CLK_ENABLE);
2702 static int mmc_power_on(struct mmc *mmc)
2704 #if CONFIG_IS_ENABLED(DM_MMC) && CONFIG_IS_ENABLED(DM_REGULATOR)
2705 if (mmc->vmmc_supply) {
2706 int ret = regulator_set_enable(mmc->vmmc_supply, true);
2709 puts("Error enabling VMMC supply\n");
2717 static int mmc_power_off(struct mmc *mmc)
2719 mmc_set_clock(mmc, 0, MMC_CLK_DISABLE);
2720 #if CONFIG_IS_ENABLED(DM_MMC) && CONFIG_IS_ENABLED(DM_REGULATOR)
2721 if (mmc->vmmc_supply) {
2722 int ret = regulator_set_enable(mmc->vmmc_supply, false);
2725 pr_debug("Error disabling VMMC supply\n");
2733 static int mmc_power_cycle(struct mmc *mmc)
2737 ret = mmc_power_off(mmc);
2741 ret = mmc_host_power_cycle(mmc);
2746 * SD spec recommends at least 1ms of delay. Let's wait for 2ms
2747 * to be on the safer side.
2750 return mmc_power_on(mmc);
2753 int mmc_get_op_cond(struct mmc *mmc)
2755 bool uhs_en = supports_uhs(mmc->cfg->host_caps);
2761 #ifdef CONFIG_FSL_ESDHC_ADAPTER_IDENT
2762 mmc_adapter_card_type_ident();
2764 err = mmc_power_init(mmc);
2768 #ifdef CONFIG_MMC_QUIRKS
2769 mmc->quirks = MMC_QUIRK_RETRY_SET_BLOCKLEN |
2770 MMC_QUIRK_RETRY_SEND_CID |
2771 MMC_QUIRK_RETRY_APP_CMD;
2774 err = mmc_power_cycle(mmc);
2777 * if power cycling is not supported, we should not try
2778 * to use the UHS modes, because we wouldn't be able to
2779 * recover from an error during the UHS initialization.
2781 pr_debug("Unable to do a full power cycle. Disabling the UHS modes for safety\n");
2783 mmc->host_caps &= ~UHS_CAPS;
2784 err = mmc_power_on(mmc);
2789 #if CONFIG_IS_ENABLED(DM_MMC)
2790 /* The device has already been probed ready for use */
2792 /* made sure it's not NULL earlier */
2793 err = mmc->cfg->ops->init(mmc);
2800 mmc_set_initial_state(mmc);
2802 /* Reset the Card */
2803 err = mmc_go_idle(mmc);
2808 /* The internal partition reset to user partition(0) at every CMD0*/
2809 mmc_get_blk_desc(mmc)->hwpart = 0;
2811 /* Test for SD version 2 */
2812 err = mmc_send_if_cond(mmc);
2814 /* Now try to get the SD card's operating condition */
2815 err = sd_send_op_cond(mmc, uhs_en);
2816 if (err && uhs_en) {
2818 mmc_power_cycle(mmc);
2822 /* If the command timed out, we check for an MMC card */
2823 if (err == -ETIMEDOUT) {
2824 err = mmc_send_op_cond(mmc);
2827 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
2828 pr_err("Card did not respond to voltage select!\n");
2837 int mmc_start_init(struct mmc *mmc)
2843 * all hosts are capable of 1 bit bus-width and able to use the legacy
2846 mmc->host_caps = mmc->cfg->host_caps | MMC_CAP(SD_LEGACY) |
2847 MMC_CAP(MMC_LEGACY) | MMC_MODE_1BIT;
2848 #if CONFIG_IS_ENABLED(DM_MMC)
2849 mmc_deferred_probe(mmc);
2851 #if !defined(CONFIG_MMC_BROKEN_CD)
2852 no_card = mmc_getcd(mmc) == 0;
2856 #if !CONFIG_IS_ENABLED(DM_MMC)
2857 /* we pretend there's no card when init is NULL */
2858 no_card = no_card || (mmc->cfg->ops->init == NULL);
2862 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
2863 pr_err("MMC: no card present\n");
2868 err = mmc_get_op_cond(mmc);
2871 mmc->init_in_progress = 1;
2876 static int mmc_complete_init(struct mmc *mmc)
2880 mmc->init_in_progress = 0;
2881 if (mmc->op_cond_pending)
2882 err = mmc_complete_op_cond(mmc);
2885 err = mmc_startup(mmc);
2893 int mmc_init(struct mmc *mmc)
2896 __maybe_unused ulong start;
2897 #if CONFIG_IS_ENABLED(DM_MMC)
2898 struct mmc_uclass_priv *upriv = dev_get_uclass_priv(mmc->dev);
2905 start = get_timer(0);
2907 if (!mmc->init_in_progress)
2908 err = mmc_start_init(mmc);
2911 err = mmc_complete_init(mmc);
2913 pr_info("%s: %d, time %lu\n", __func__, err, get_timer(start));
2918 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT) || \
2919 CONFIG_IS_ENABLED(MMC_HS200_SUPPORT) || \
2920 CONFIG_IS_ENABLED(MMC_HS400_SUPPORT)
2921 int mmc_deinit(struct mmc *mmc)
2929 caps_filtered = mmc->card_caps &
2930 ~(MMC_CAP(UHS_SDR12) | MMC_CAP(UHS_SDR25) |
2931 MMC_CAP(UHS_SDR50) | MMC_CAP(UHS_DDR50) |
2932 MMC_CAP(UHS_SDR104));
2934 return sd_select_mode_and_width(mmc, caps_filtered);
2936 caps_filtered = mmc->card_caps &
2937 ~(MMC_CAP(MMC_HS_200) | MMC_CAP(MMC_HS_400));
2939 return mmc_select_mode_and_width(mmc, caps_filtered);
2944 int mmc_set_dsr(struct mmc *mmc, u16 val)
2950 /* CPU-specific MMC initializations */
2951 __weak int cpu_mmc_init(bd_t *bis)
2956 /* board-specific MMC initializations. */
2957 __weak int board_mmc_init(bd_t *bis)
2962 void mmc_set_preinit(struct mmc *mmc, int preinit)
2964 mmc->preinit = preinit;
2967 #if CONFIG_IS_ENABLED(DM_MMC)
2968 static int mmc_probe(bd_t *bis)
2972 struct udevice *dev;
2974 ret = uclass_get(UCLASS_MMC, &uc);
2979 * Try to add them in sequence order. Really with driver model we
2980 * should allow holes, but the current MMC list does not allow that.
2981 * So if we request 0, 1, 3 we will get 0, 1, 2.
2983 for (i = 0; ; i++) {
2984 ret = uclass_get_device_by_seq(UCLASS_MMC, i, &dev);
2988 uclass_foreach_dev(dev, uc) {
2989 ret = device_probe(dev);
2991 pr_err("%s - probe failed: %d\n", dev->name, ret);
2997 static int mmc_probe(bd_t *bis)
2999 if (board_mmc_init(bis) < 0)
3006 int mmc_initialize(bd_t *bis)
3008 static int initialized = 0;
3010 if (initialized) /* Avoid initializing mmc multiple times */
3014 #if !CONFIG_IS_ENABLED(BLK)
3015 #if !CONFIG_IS_ENABLED(MMC_TINY)
3019 ret = mmc_probe(bis);
3023 #ifndef CONFIG_SPL_BUILD
3024 print_mmc_devices(',');
3031 #if CONFIG_IS_ENABLED(DM_MMC)
3032 int mmc_init_device(int num)
3034 struct udevice *dev;
3038 ret = uclass_get_device(UCLASS_MMC, num, &dev);
3042 m = mmc_get_mmc_dev(dev);
3045 #ifdef CONFIG_FSL_ESDHC_ADAPTER_IDENT
3046 mmc_set_preinit(m, 1);
3055 #ifdef CONFIG_CMD_BKOPS_ENABLE
3056 int mmc_set_bkops_enable(struct mmc *mmc)
3059 ALLOC_CACHE_ALIGN_BUFFER(u8, ext_csd, MMC_MAX_BLOCK_LEN);
3061 err = mmc_send_ext_csd(mmc, ext_csd);
3063 puts("Could not get ext_csd register values\n");
3067 if (!(ext_csd[EXT_CSD_BKOPS_SUPPORT] & 0x1)) {
3068 puts("Background operations not supported on device\n");
3069 return -EMEDIUMTYPE;
3072 if (ext_csd[EXT_CSD_BKOPS_EN] & 0x1) {
3073 puts("Background operations already enabled\n");
3077 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BKOPS_EN, 1);
3079 puts("Failed to enable manual background operations\n");
3083 puts("Enabled manual background operations\n");