1 // SPDX-License-Identifier: GPL-2.0+
3 * Copyright 2008, Freescale Semiconductor, Inc
6 * Based vaguely on the Linux code
13 #include <dm/device-internal.h>
17 #include <power/regulator.h>
20 #include <linux/list.h>
22 #include "mmc_private.h"
24 static int mmc_set_signal_voltage(struct mmc *mmc, uint signal_voltage);
25 static int mmc_power_cycle(struct mmc *mmc);
26 #if !CONFIG_IS_ENABLED(MMC_TINY)
27 static int mmc_select_mode_and_width(struct mmc *mmc, uint card_caps);
30 #if !CONFIG_IS_ENABLED(DM_MMC)
32 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
33 static int mmc_wait_dat0(struct mmc *mmc, int state, int timeout)
39 __weak int board_mmc_getwp(struct mmc *mmc)
44 int mmc_getwp(struct mmc *mmc)
48 wp = board_mmc_getwp(mmc);
51 if (mmc->cfg->ops->getwp)
52 wp = mmc->cfg->ops->getwp(mmc);
60 __weak int board_mmc_getcd(struct mmc *mmc)
66 #ifdef CONFIG_MMC_TRACE
67 void mmmc_trace_before_send(struct mmc *mmc, struct mmc_cmd *cmd)
69 printf("CMD_SEND:%d\n", cmd->cmdidx);
70 printf("\t\tARG\t\t\t 0x%08x\n", cmd->cmdarg);
73 void mmmc_trace_after_send(struct mmc *mmc, struct mmc_cmd *cmd, int ret)
79 printf("\t\tRET\t\t\t %d\n", ret);
81 switch (cmd->resp_type) {
83 printf("\t\tMMC_RSP_NONE\n");
86 printf("\t\tMMC_RSP_R1,5,6,7 \t 0x%08x \n",
90 printf("\t\tMMC_RSP_R1b\t\t 0x%08x \n",
94 printf("\t\tMMC_RSP_R2\t\t 0x%08x \n",
96 printf("\t\t \t\t 0x%08x \n",
98 printf("\t\t \t\t 0x%08x \n",
100 printf("\t\t \t\t 0x%08x \n",
103 printf("\t\t\t\t\tDUMPING DATA\n");
104 for (i = 0; i < 4; i++) {
106 printf("\t\t\t\t\t%03d - ", i*4);
107 ptr = (u8 *)&cmd->response[i];
109 for (j = 0; j < 4; j++)
110 printf("%02x ", *ptr--);
115 printf("\t\tMMC_RSP_R3,4\t\t 0x%08x \n",
119 printf("\t\tERROR MMC rsp not supported\n");
125 void mmc_trace_state(struct mmc *mmc, struct mmc_cmd *cmd)
129 status = (cmd->response[0] & MMC_STATUS_CURR_STATE) >> 9;
130 printf("CURR STATE:%d\n", status);
134 #if CONFIG_IS_ENABLED(MMC_VERBOSE) || defined(DEBUG)
135 const char *mmc_mode_name(enum bus_mode mode)
137 static const char *const names[] = {
138 [MMC_LEGACY] = "MMC legacy",
139 [SD_LEGACY] = "SD Legacy",
140 [MMC_HS] = "MMC High Speed (26MHz)",
141 [SD_HS] = "SD High Speed (50MHz)",
142 [UHS_SDR12] = "UHS SDR12 (25MHz)",
143 [UHS_SDR25] = "UHS SDR25 (50MHz)",
144 [UHS_SDR50] = "UHS SDR50 (100MHz)",
145 [UHS_SDR104] = "UHS SDR104 (208MHz)",
146 [UHS_DDR50] = "UHS DDR50 (50MHz)",
147 [MMC_HS_52] = "MMC High Speed (52MHz)",
148 [MMC_DDR_52] = "MMC DDR52 (52MHz)",
149 [MMC_HS_200] = "HS200 (200MHz)",
150 [MMC_HS_400] = "HS400 (200MHz)",
153 if (mode >= MMC_MODES_END)
154 return "Unknown mode";
160 static uint mmc_mode2freq(struct mmc *mmc, enum bus_mode mode)
162 static const int freqs[] = {
163 [MMC_LEGACY] = 25000000,
164 [SD_LEGACY] = 25000000,
167 [MMC_HS_52] = 52000000,
168 [MMC_DDR_52] = 52000000,
169 [UHS_SDR12] = 25000000,
170 [UHS_SDR25] = 50000000,
171 [UHS_SDR50] = 100000000,
172 [UHS_DDR50] = 50000000,
173 [UHS_SDR104] = 208000000,
174 [MMC_HS_200] = 200000000,
175 [MMC_HS_400] = 200000000,
178 if (mode == MMC_LEGACY)
179 return mmc->legacy_speed;
180 else if (mode >= MMC_MODES_END)
186 static int mmc_select_mode(struct mmc *mmc, enum bus_mode mode)
188 mmc->selected_mode = mode;
189 mmc->tran_speed = mmc_mode2freq(mmc, mode);
190 mmc->ddr_mode = mmc_is_mode_ddr(mode);
191 pr_debug("selecting mode %s (freq : %d MHz)\n", mmc_mode_name(mode),
192 mmc->tran_speed / 1000000);
196 #if !CONFIG_IS_ENABLED(DM_MMC)
197 int mmc_send_cmd(struct mmc *mmc, struct mmc_cmd *cmd, struct mmc_data *data)
201 mmmc_trace_before_send(mmc, cmd);
202 ret = mmc->cfg->ops->send_cmd(mmc, cmd, data);
203 mmmc_trace_after_send(mmc, cmd, ret);
209 int mmc_send_status(struct mmc *mmc, unsigned int *status)
212 int err, retries = 5;
214 cmd.cmdidx = MMC_CMD_SEND_STATUS;
215 cmd.resp_type = MMC_RSP_R1;
216 if (!mmc_host_is_spi(mmc))
217 cmd.cmdarg = mmc->rca << 16;
220 err = mmc_send_cmd(mmc, &cmd, NULL);
222 mmc_trace_state(mmc, &cmd);
223 *status = cmd.response[0];
227 mmc_trace_state(mmc, &cmd);
231 int mmc_poll_for_busy(struct mmc *mmc, int timeout)
237 err = mmc_send_status(mmc, &status);
241 if ((status & MMC_STATUS_RDY_FOR_DATA) &&
242 (status & MMC_STATUS_CURR_STATE) !=
246 if (status & MMC_STATUS_MASK) {
247 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
248 pr_err("Status Error: 0x%08x\n", status);
260 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
261 pr_err("Timeout waiting card ready\n");
269 int mmc_set_blocklen(struct mmc *mmc, int len)
277 cmd.cmdidx = MMC_CMD_SET_BLOCKLEN;
278 cmd.resp_type = MMC_RSP_R1;
281 err = mmc_send_cmd(mmc, &cmd, NULL);
283 #ifdef CONFIG_MMC_QUIRKS
284 if (err && (mmc->quirks & MMC_QUIRK_RETRY_SET_BLOCKLEN)) {
287 * It has been seen that SET_BLOCKLEN may fail on the first
288 * attempt, let's try a few more time
291 err = mmc_send_cmd(mmc, &cmd, NULL);
301 #ifdef MMC_SUPPORTS_TUNING
302 static const u8 tuning_blk_pattern_4bit[] = {
303 0xff, 0x0f, 0xff, 0x00, 0xff, 0xcc, 0xc3, 0xcc,
304 0xc3, 0x3c, 0xcc, 0xff, 0xfe, 0xff, 0xfe, 0xef,
305 0xff, 0xdf, 0xff, 0xdd, 0xff, 0xfb, 0xff, 0xfb,
306 0xbf, 0xff, 0x7f, 0xff, 0x77, 0xf7, 0xbd, 0xef,
307 0xff, 0xf0, 0xff, 0xf0, 0x0f, 0xfc, 0xcc, 0x3c,
308 0xcc, 0x33, 0xcc, 0xcf, 0xff, 0xef, 0xff, 0xee,
309 0xff, 0xfd, 0xff, 0xfd, 0xdf, 0xff, 0xbf, 0xff,
310 0xbb, 0xff, 0xf7, 0xff, 0xf7, 0x7f, 0x7b, 0xde,
313 static const u8 tuning_blk_pattern_8bit[] = {
314 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00, 0x00,
315 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc, 0xcc,
316 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff, 0xff,
317 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee, 0xff,
318 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd, 0xdd,
319 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff, 0xbb,
320 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff, 0xff,
321 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee, 0xff,
322 0xff, 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00,
323 0x00, 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc,
324 0xcc, 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff,
325 0xff, 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee,
326 0xff, 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd,
327 0xdd, 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff,
328 0xbb, 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff,
329 0xff, 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee,
332 int mmc_send_tuning(struct mmc *mmc, u32 opcode, int *cmd_error)
335 struct mmc_data data;
336 const u8 *tuning_block_pattern;
339 if (mmc->bus_width == 8) {
340 tuning_block_pattern = tuning_blk_pattern_8bit;
341 size = sizeof(tuning_blk_pattern_8bit);
342 } else if (mmc->bus_width == 4) {
343 tuning_block_pattern = tuning_blk_pattern_4bit;
344 size = sizeof(tuning_blk_pattern_4bit);
349 ALLOC_CACHE_ALIGN_BUFFER(u8, data_buf, size);
353 cmd.resp_type = MMC_RSP_R1;
355 data.dest = (void *)data_buf;
357 data.blocksize = size;
358 data.flags = MMC_DATA_READ;
360 err = mmc_send_cmd(mmc, &cmd, &data);
364 if (memcmp(data_buf, tuning_block_pattern, size))
371 static int mmc_read_blocks(struct mmc *mmc, void *dst, lbaint_t start,
375 struct mmc_data data;
378 cmd.cmdidx = MMC_CMD_READ_MULTIPLE_BLOCK;
380 cmd.cmdidx = MMC_CMD_READ_SINGLE_BLOCK;
382 if (mmc->high_capacity)
385 cmd.cmdarg = start * mmc->read_bl_len;
387 cmd.resp_type = MMC_RSP_R1;
390 data.blocks = blkcnt;
391 data.blocksize = mmc->read_bl_len;
392 data.flags = MMC_DATA_READ;
394 if (mmc_send_cmd(mmc, &cmd, &data))
398 cmd.cmdidx = MMC_CMD_STOP_TRANSMISSION;
400 cmd.resp_type = MMC_RSP_R1b;
401 if (mmc_send_cmd(mmc, &cmd, NULL)) {
402 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
403 pr_err("mmc fail to send stop cmd\n");
412 #if CONFIG_IS_ENABLED(BLK)
413 ulong mmc_bread(struct udevice *dev, lbaint_t start, lbaint_t blkcnt, void *dst)
415 ulong mmc_bread(struct blk_desc *block_dev, lbaint_t start, lbaint_t blkcnt,
419 #if CONFIG_IS_ENABLED(BLK)
420 struct blk_desc *block_dev = dev_get_uclass_platdata(dev);
422 int dev_num = block_dev->devnum;
424 lbaint_t cur, blocks_todo = blkcnt;
429 struct mmc *mmc = find_mmc_device(dev_num);
433 if (CONFIG_IS_ENABLED(MMC_TINY))
434 err = mmc_switch_part(mmc, block_dev->hwpart);
436 err = blk_dselect_hwpart(block_dev, block_dev->hwpart);
441 if ((start + blkcnt) > block_dev->lba) {
442 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
443 pr_err("MMC: block number 0x" LBAF " exceeds max(0x" LBAF ")\n",
444 start + blkcnt, block_dev->lba);
449 if (mmc_set_blocklen(mmc, mmc->read_bl_len)) {
450 pr_debug("%s: Failed to set blocklen\n", __func__);
455 cur = (blocks_todo > mmc->cfg->b_max) ?
456 mmc->cfg->b_max : blocks_todo;
457 if (mmc_read_blocks(mmc, dst, start, cur) != cur) {
458 pr_debug("%s: Failed to read blocks\n", __func__);
463 dst += cur * mmc->read_bl_len;
464 } while (blocks_todo > 0);
469 static int mmc_go_idle(struct mmc *mmc)
476 cmd.cmdidx = MMC_CMD_GO_IDLE_STATE;
478 cmd.resp_type = MMC_RSP_NONE;
480 err = mmc_send_cmd(mmc, &cmd, NULL);
490 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
491 static int mmc_switch_voltage(struct mmc *mmc, int signal_voltage)
497 * Send CMD11 only if the request is to switch the card to
500 if (signal_voltage == MMC_SIGNAL_VOLTAGE_330)
501 return mmc_set_signal_voltage(mmc, signal_voltage);
503 cmd.cmdidx = SD_CMD_SWITCH_UHS18V;
505 cmd.resp_type = MMC_RSP_R1;
507 err = mmc_send_cmd(mmc, &cmd, NULL);
511 if (!mmc_host_is_spi(mmc) && (cmd.response[0] & MMC_STATUS_ERROR))
515 * The card should drive cmd and dat[0:3] low immediately
516 * after the response of cmd11, but wait 100 us to be sure
518 err = mmc_wait_dat0(mmc, 0, 100);
525 * During a signal voltage level switch, the clock must be gated
526 * for 5 ms according to the SD spec
528 mmc_set_clock(mmc, mmc->clock, MMC_CLK_DISABLE);
530 err = mmc_set_signal_voltage(mmc, signal_voltage);
534 /* Keep clock gated for at least 10 ms, though spec only says 5 ms */
536 mmc_set_clock(mmc, mmc->clock, MMC_CLK_ENABLE);
539 * Failure to switch is indicated by the card holding
540 * dat[0:3] low. Wait for at least 1 ms according to spec
542 err = mmc_wait_dat0(mmc, 1, 1000);
552 static int sd_send_op_cond(struct mmc *mmc, bool uhs_en)
559 cmd.cmdidx = MMC_CMD_APP_CMD;
560 cmd.resp_type = MMC_RSP_R1;
563 err = mmc_send_cmd(mmc, &cmd, NULL);
568 cmd.cmdidx = SD_CMD_APP_SEND_OP_COND;
569 cmd.resp_type = MMC_RSP_R3;
572 * Most cards do not answer if some reserved bits
573 * in the ocr are set. However, Some controller
574 * can set bit 7 (reserved for low voltages), but
575 * how to manage low voltages SD card is not yet
578 cmd.cmdarg = mmc_host_is_spi(mmc) ? 0 :
579 (mmc->cfg->voltages & 0xff8000);
581 if (mmc->version == SD_VERSION_2)
582 cmd.cmdarg |= OCR_HCS;
585 cmd.cmdarg |= OCR_S18R;
587 err = mmc_send_cmd(mmc, &cmd, NULL);
592 if (cmd.response[0] & OCR_BUSY)
601 if (mmc->version != SD_VERSION_2)
602 mmc->version = SD_VERSION_1_0;
604 if (mmc_host_is_spi(mmc)) { /* read OCR for spi */
605 cmd.cmdidx = MMC_CMD_SPI_READ_OCR;
606 cmd.resp_type = MMC_RSP_R3;
609 err = mmc_send_cmd(mmc, &cmd, NULL);
615 mmc->ocr = cmd.response[0];
617 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
618 if (uhs_en && !(mmc_host_is_spi(mmc)) && (cmd.response[0] & 0x41000000)
620 err = mmc_switch_voltage(mmc, MMC_SIGNAL_VOLTAGE_180);
626 mmc->high_capacity = ((mmc->ocr & OCR_HCS) == OCR_HCS);
632 static int mmc_send_op_cond_iter(struct mmc *mmc, int use_arg)
637 cmd.cmdidx = MMC_CMD_SEND_OP_COND;
638 cmd.resp_type = MMC_RSP_R3;
640 if (use_arg && !mmc_host_is_spi(mmc))
641 cmd.cmdarg = OCR_HCS |
642 (mmc->cfg->voltages &
643 (mmc->ocr & OCR_VOLTAGE_MASK)) |
644 (mmc->ocr & OCR_ACCESS_MODE);
646 err = mmc_send_cmd(mmc, &cmd, NULL);
649 mmc->ocr = cmd.response[0];
653 static int mmc_send_op_cond(struct mmc *mmc)
657 /* Some cards seem to need this */
660 /* Asking to the card its capabilities */
661 for (i = 0; i < 2; i++) {
662 err = mmc_send_op_cond_iter(mmc, i != 0);
666 /* exit if not busy (flag seems to be inverted) */
667 if (mmc->ocr & OCR_BUSY)
670 mmc->op_cond_pending = 1;
674 static int mmc_complete_op_cond(struct mmc *mmc)
681 mmc->op_cond_pending = 0;
682 if (!(mmc->ocr & OCR_BUSY)) {
683 /* Some cards seem to need this */
686 start = get_timer(0);
688 err = mmc_send_op_cond_iter(mmc, 1);
691 if (mmc->ocr & OCR_BUSY)
693 if (get_timer(start) > timeout)
699 if (mmc_host_is_spi(mmc)) { /* read OCR for spi */
700 cmd.cmdidx = MMC_CMD_SPI_READ_OCR;
701 cmd.resp_type = MMC_RSP_R3;
704 err = mmc_send_cmd(mmc, &cmd, NULL);
709 mmc->ocr = cmd.response[0];
712 mmc->version = MMC_VERSION_UNKNOWN;
714 mmc->high_capacity = ((mmc->ocr & OCR_HCS) == OCR_HCS);
721 static int mmc_send_ext_csd(struct mmc *mmc, u8 *ext_csd)
724 struct mmc_data data;
727 /* Get the Card Status Register */
728 cmd.cmdidx = MMC_CMD_SEND_EXT_CSD;
729 cmd.resp_type = MMC_RSP_R1;
732 data.dest = (char *)ext_csd;
734 data.blocksize = MMC_MAX_BLOCK_LEN;
735 data.flags = MMC_DATA_READ;
737 err = mmc_send_cmd(mmc, &cmd, &data);
742 static int __mmc_switch(struct mmc *mmc, u8 set, u8 index, u8 value,
750 cmd.cmdidx = MMC_CMD_SWITCH;
751 cmd.resp_type = MMC_RSP_R1b;
752 cmd.cmdarg = (MMC_SWITCH_MODE_WRITE_BYTE << 24) |
756 while (retries > 0) {
757 ret = mmc_send_cmd(mmc, &cmd, NULL);
769 /* Waiting for the ready status */
770 return mmc_poll_for_busy(mmc, timeout);
777 int mmc_switch(struct mmc *mmc, u8 set, u8 index, u8 value)
779 return __mmc_switch(mmc, set, index, value, true);
782 #if !CONFIG_IS_ENABLED(MMC_TINY)
783 static int mmc_set_card_speed(struct mmc *mmc, enum bus_mode mode,
789 ALLOC_CACHE_ALIGN_BUFFER(u8, test_csd, MMC_MAX_BLOCK_LEN);
795 speed_bits = EXT_CSD_TIMING_HS;
797 #if CONFIG_IS_ENABLED(MMC_HS200_SUPPORT)
799 speed_bits = EXT_CSD_TIMING_HS200;
802 #if CONFIG_IS_ENABLED(MMC_HS400_SUPPORT)
804 speed_bits = EXT_CSD_TIMING_HS400;
808 speed_bits = EXT_CSD_TIMING_LEGACY;
814 err = __mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_HS_TIMING,
815 speed_bits, !hsdowngrade);
819 #if CONFIG_IS_ENABLED(MMC_HS200_SUPPORT) || \
820 CONFIG_IS_ENABLED(MMC_HS400_SUPPORT)
822 * In case the eMMC is in HS200/HS400 mode and we are downgrading
823 * to HS mode, the card clock are still running much faster than
824 * the supported HS mode clock, so we can not reliably read out
825 * Extended CSD. Reconfigure the controller to run at HS mode.
828 mmc_select_mode(mmc, MMC_HS);
829 mmc_set_clock(mmc, mmc_mode2freq(mmc, MMC_HS), false);
833 if ((mode == MMC_HS) || (mode == MMC_HS_52)) {
834 /* Now check to see that it worked */
835 err = mmc_send_ext_csd(mmc, test_csd);
839 /* No high-speed support */
840 if (!test_csd[EXT_CSD_HS_TIMING])
847 static int mmc_get_capabilities(struct mmc *mmc)
849 u8 *ext_csd = mmc->ext_csd;
852 mmc->card_caps = MMC_MODE_1BIT | MMC_CAP(MMC_LEGACY);
854 if (mmc_host_is_spi(mmc))
857 /* Only version 4 supports high-speed */
858 if (mmc->version < MMC_VERSION_4)
862 pr_err("No ext_csd found!\n"); /* this should enver happen */
866 mmc->card_caps |= MMC_MODE_4BIT | MMC_MODE_8BIT;
868 cardtype = ext_csd[EXT_CSD_CARD_TYPE];
869 mmc->cardtype = cardtype;
871 #if CONFIG_IS_ENABLED(MMC_HS200_SUPPORT)
872 if (cardtype & (EXT_CSD_CARD_TYPE_HS200_1_2V |
873 EXT_CSD_CARD_TYPE_HS200_1_8V)) {
874 mmc->card_caps |= MMC_MODE_HS200;
877 #if CONFIG_IS_ENABLED(MMC_HS400_SUPPORT)
878 if (cardtype & (EXT_CSD_CARD_TYPE_HS400_1_2V |
879 EXT_CSD_CARD_TYPE_HS400_1_8V)) {
880 mmc->card_caps |= MMC_MODE_HS400;
883 if (cardtype & EXT_CSD_CARD_TYPE_52) {
884 if (cardtype & EXT_CSD_CARD_TYPE_DDR_52)
885 mmc->card_caps |= MMC_MODE_DDR_52MHz;
886 mmc->card_caps |= MMC_MODE_HS_52MHz;
888 if (cardtype & EXT_CSD_CARD_TYPE_26)
889 mmc->card_caps |= MMC_MODE_HS;
895 static int mmc_set_capacity(struct mmc *mmc, int part_num)
899 mmc->capacity = mmc->capacity_user;
903 mmc->capacity = mmc->capacity_boot;
906 mmc->capacity = mmc->capacity_rpmb;
912 mmc->capacity = mmc->capacity_gp[part_num - 4];
918 mmc_get_blk_desc(mmc)->lba = lldiv(mmc->capacity, mmc->read_bl_len);
923 #if CONFIG_IS_ENABLED(MMC_HS200_SUPPORT) || CONFIG_IS_ENABLED(MMC_HS400_SUPPORT)
924 static int mmc_boot_part_access_chk(struct mmc *mmc, unsigned int part_num)
929 if (part_num & PART_ACCESS_MASK)
930 forbidden = MMC_CAP(MMC_HS_200) | MMC_CAP(MMC_HS_400);
932 if (MMC_CAP(mmc->selected_mode) & forbidden) {
933 pr_debug("selected mode (%s) is forbidden for part %d\n",
934 mmc_mode_name(mmc->selected_mode), part_num);
936 } else if (mmc->selected_mode != mmc->best_mode) {
937 pr_debug("selected mode is not optimal\n");
942 return mmc_select_mode_and_width(mmc,
943 mmc->card_caps & ~forbidden);
948 static inline int mmc_boot_part_access_chk(struct mmc *mmc,
949 unsigned int part_num)
955 int mmc_switch_part(struct mmc *mmc, unsigned int part_num)
959 ret = mmc_boot_part_access_chk(mmc, part_num);
963 ret = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_PART_CONF,
964 (mmc->part_config & ~PART_ACCESS_MASK)
965 | (part_num & PART_ACCESS_MASK));
968 * Set the capacity if the switch succeeded or was intended
969 * to return to representing the raw device.
971 if ((ret == 0) || ((ret == -ENODEV) && (part_num == 0))) {
972 ret = mmc_set_capacity(mmc, part_num);
973 mmc_get_blk_desc(mmc)->hwpart = part_num;
979 #if CONFIG_IS_ENABLED(MMC_HW_PARTITIONING)
980 int mmc_hwpart_config(struct mmc *mmc,
981 const struct mmc_hwpart_conf *conf,
982 enum mmc_hwpart_conf_mode mode)
988 u32 max_enh_size_mult;
989 u32 tot_enh_size_mult = 0;
992 ALLOC_CACHE_ALIGN_BUFFER(u8, ext_csd, MMC_MAX_BLOCK_LEN);
994 if (mode < MMC_HWPART_CONF_CHECK || mode > MMC_HWPART_CONF_COMPLETE)
997 if (IS_SD(mmc) || (mmc->version < MMC_VERSION_4_41)) {
998 pr_err("eMMC >= 4.4 required for enhanced user data area\n");
1002 if (!(mmc->part_support & PART_SUPPORT)) {
1003 pr_err("Card does not support partitioning\n");
1004 return -EMEDIUMTYPE;
1007 if (!mmc->hc_wp_grp_size) {
1008 pr_err("Card does not define HC WP group size\n");
1009 return -EMEDIUMTYPE;
1012 /* check partition alignment and total enhanced size */
1013 if (conf->user.enh_size) {
1014 if (conf->user.enh_size % mmc->hc_wp_grp_size ||
1015 conf->user.enh_start % mmc->hc_wp_grp_size) {
1016 pr_err("User data enhanced area not HC WP group "
1020 part_attrs |= EXT_CSD_ENH_USR;
1021 enh_size_mult = conf->user.enh_size / mmc->hc_wp_grp_size;
1022 if (mmc->high_capacity) {
1023 enh_start_addr = conf->user.enh_start;
1025 enh_start_addr = (conf->user.enh_start << 9);
1031 tot_enh_size_mult += enh_size_mult;
1033 for (pidx = 0; pidx < 4; pidx++) {
1034 if (conf->gp_part[pidx].size % mmc->hc_wp_grp_size) {
1035 pr_err("GP%i partition not HC WP group size "
1036 "aligned\n", pidx+1);
1039 gp_size_mult[pidx] = conf->gp_part[pidx].size / mmc->hc_wp_grp_size;
1040 if (conf->gp_part[pidx].size && conf->gp_part[pidx].enhanced) {
1041 part_attrs |= EXT_CSD_ENH_GP(pidx);
1042 tot_enh_size_mult += gp_size_mult[pidx];
1046 if (part_attrs && ! (mmc->part_support & ENHNCD_SUPPORT)) {
1047 pr_err("Card does not support enhanced attribute\n");
1048 return -EMEDIUMTYPE;
1051 err = mmc_send_ext_csd(mmc, ext_csd);
1056 (ext_csd[EXT_CSD_MAX_ENH_SIZE_MULT+2] << 16) +
1057 (ext_csd[EXT_CSD_MAX_ENH_SIZE_MULT+1] << 8) +
1058 ext_csd[EXT_CSD_MAX_ENH_SIZE_MULT];
1059 if (tot_enh_size_mult > max_enh_size_mult) {
1060 pr_err("Total enhanced size exceeds maximum (%u > %u)\n",
1061 tot_enh_size_mult, max_enh_size_mult);
1062 return -EMEDIUMTYPE;
1065 /* The default value of EXT_CSD_WR_REL_SET is device
1066 * dependent, the values can only be changed if the
1067 * EXT_CSD_HS_CTRL_REL bit is set. The values can be
1068 * changed only once and before partitioning is completed. */
1069 wr_rel_set = ext_csd[EXT_CSD_WR_REL_SET];
1070 if (conf->user.wr_rel_change) {
1071 if (conf->user.wr_rel_set)
1072 wr_rel_set |= EXT_CSD_WR_DATA_REL_USR;
1074 wr_rel_set &= ~EXT_CSD_WR_DATA_REL_USR;
1076 for (pidx = 0; pidx < 4; pidx++) {
1077 if (conf->gp_part[pidx].wr_rel_change) {
1078 if (conf->gp_part[pidx].wr_rel_set)
1079 wr_rel_set |= EXT_CSD_WR_DATA_REL_GP(pidx);
1081 wr_rel_set &= ~EXT_CSD_WR_DATA_REL_GP(pidx);
1085 if (wr_rel_set != ext_csd[EXT_CSD_WR_REL_SET] &&
1086 !(ext_csd[EXT_CSD_WR_REL_PARAM] & EXT_CSD_HS_CTRL_REL)) {
1087 puts("Card does not support host controlled partition write "
1088 "reliability settings\n");
1089 return -EMEDIUMTYPE;
1092 if (ext_csd[EXT_CSD_PARTITION_SETTING] &
1093 EXT_CSD_PARTITION_SETTING_COMPLETED) {
1094 pr_err("Card already partitioned\n");
1098 if (mode == MMC_HWPART_CONF_CHECK)
1101 /* Partitioning requires high-capacity size definitions */
1102 if (!(ext_csd[EXT_CSD_ERASE_GROUP_DEF] & 0x01)) {
1103 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1104 EXT_CSD_ERASE_GROUP_DEF, 1);
1109 ext_csd[EXT_CSD_ERASE_GROUP_DEF] = 1;
1111 /* update erase group size to be high-capacity */
1112 mmc->erase_grp_size =
1113 ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE] * 1024;
1117 /* all OK, write the configuration */
1118 for (i = 0; i < 4; i++) {
1119 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1120 EXT_CSD_ENH_START_ADDR+i,
1121 (enh_start_addr >> (i*8)) & 0xFF);
1125 for (i = 0; i < 3; i++) {
1126 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1127 EXT_CSD_ENH_SIZE_MULT+i,
1128 (enh_size_mult >> (i*8)) & 0xFF);
1132 for (pidx = 0; pidx < 4; pidx++) {
1133 for (i = 0; i < 3; i++) {
1134 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1135 EXT_CSD_GP_SIZE_MULT+pidx*3+i,
1136 (gp_size_mult[pidx] >> (i*8)) & 0xFF);
1141 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1142 EXT_CSD_PARTITIONS_ATTRIBUTE, part_attrs);
1146 if (mode == MMC_HWPART_CONF_SET)
1149 /* The WR_REL_SET is a write-once register but shall be
1150 * written before setting PART_SETTING_COMPLETED. As it is
1151 * write-once we can only write it when completing the
1153 if (wr_rel_set != ext_csd[EXT_CSD_WR_REL_SET]) {
1154 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1155 EXT_CSD_WR_REL_SET, wr_rel_set);
1160 /* Setting PART_SETTING_COMPLETED confirms the partition
1161 * configuration but it only becomes effective after power
1162 * cycle, so we do not adjust the partition related settings
1163 * in the mmc struct. */
1165 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1166 EXT_CSD_PARTITION_SETTING,
1167 EXT_CSD_PARTITION_SETTING_COMPLETED);
1175 #if !CONFIG_IS_ENABLED(DM_MMC)
1176 int mmc_getcd(struct mmc *mmc)
1180 cd = board_mmc_getcd(mmc);
1183 if (mmc->cfg->ops->getcd)
1184 cd = mmc->cfg->ops->getcd(mmc);
1193 #if !CONFIG_IS_ENABLED(MMC_TINY)
1194 static int sd_switch(struct mmc *mmc, int mode, int group, u8 value, u8 *resp)
1197 struct mmc_data data;
1199 /* Switch the frequency */
1200 cmd.cmdidx = SD_CMD_SWITCH_FUNC;
1201 cmd.resp_type = MMC_RSP_R1;
1202 cmd.cmdarg = (mode << 31) | 0xffffff;
1203 cmd.cmdarg &= ~(0xf << (group * 4));
1204 cmd.cmdarg |= value << (group * 4);
1206 data.dest = (char *)resp;
1207 data.blocksize = 64;
1209 data.flags = MMC_DATA_READ;
1211 return mmc_send_cmd(mmc, &cmd, &data);
1214 static int sd_get_capabilities(struct mmc *mmc)
1218 ALLOC_CACHE_ALIGN_BUFFER(__be32, scr, 2);
1219 ALLOC_CACHE_ALIGN_BUFFER(__be32, switch_status, 16);
1220 struct mmc_data data;
1222 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
1226 mmc->card_caps = MMC_MODE_1BIT | MMC_CAP(SD_LEGACY);
1228 if (mmc_host_is_spi(mmc))
1231 /* Read the SCR to find out if this card supports higher speeds */
1232 cmd.cmdidx = MMC_CMD_APP_CMD;
1233 cmd.resp_type = MMC_RSP_R1;
1234 cmd.cmdarg = mmc->rca << 16;
1236 err = mmc_send_cmd(mmc, &cmd, NULL);
1241 cmd.cmdidx = SD_CMD_APP_SEND_SCR;
1242 cmd.resp_type = MMC_RSP_R1;
1248 data.dest = (char *)scr;
1251 data.flags = MMC_DATA_READ;
1253 err = mmc_send_cmd(mmc, &cmd, &data);
1262 mmc->scr[0] = __be32_to_cpu(scr[0]);
1263 mmc->scr[1] = __be32_to_cpu(scr[1]);
1265 switch ((mmc->scr[0] >> 24) & 0xf) {
1267 mmc->version = SD_VERSION_1_0;
1270 mmc->version = SD_VERSION_1_10;
1273 mmc->version = SD_VERSION_2;
1274 if ((mmc->scr[0] >> 15) & 0x1)
1275 mmc->version = SD_VERSION_3;
1278 mmc->version = SD_VERSION_1_0;
1282 if (mmc->scr[0] & SD_DATA_4BIT)
1283 mmc->card_caps |= MMC_MODE_4BIT;
1285 /* Version 1.0 doesn't support switching */
1286 if (mmc->version == SD_VERSION_1_0)
1291 err = sd_switch(mmc, SD_SWITCH_CHECK, 0, 1,
1292 (u8 *)switch_status);
1297 /* The high-speed function is busy. Try again */
1298 if (!(__be32_to_cpu(switch_status[7]) & SD_HIGHSPEED_BUSY))
1302 /* If high-speed isn't supported, we return */
1303 if (__be32_to_cpu(switch_status[3]) & SD_HIGHSPEED_SUPPORTED)
1304 mmc->card_caps |= MMC_CAP(SD_HS);
1306 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
1307 /* Version before 3.0 don't support UHS modes */
1308 if (mmc->version < SD_VERSION_3)
1311 sd3_bus_mode = __be32_to_cpu(switch_status[3]) >> 16 & 0x1f;
1312 if (sd3_bus_mode & SD_MODE_UHS_SDR104)
1313 mmc->card_caps |= MMC_CAP(UHS_SDR104);
1314 if (sd3_bus_mode & SD_MODE_UHS_SDR50)
1315 mmc->card_caps |= MMC_CAP(UHS_SDR50);
1316 if (sd3_bus_mode & SD_MODE_UHS_SDR25)
1317 mmc->card_caps |= MMC_CAP(UHS_SDR25);
1318 if (sd3_bus_mode & SD_MODE_UHS_SDR12)
1319 mmc->card_caps |= MMC_CAP(UHS_SDR12);
1320 if (sd3_bus_mode & SD_MODE_UHS_DDR50)
1321 mmc->card_caps |= MMC_CAP(UHS_DDR50);
1327 static int sd_set_card_speed(struct mmc *mmc, enum bus_mode mode)
1331 ALLOC_CACHE_ALIGN_BUFFER(uint, switch_status, 16);
1334 /* SD version 1.00 and 1.01 does not support CMD 6 */
1335 if (mmc->version == SD_VERSION_1_0)
1340 speed = UHS_SDR12_BUS_SPEED;
1343 speed = HIGH_SPEED_BUS_SPEED;
1345 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
1347 speed = UHS_SDR12_BUS_SPEED;
1350 speed = UHS_SDR25_BUS_SPEED;
1353 speed = UHS_SDR50_BUS_SPEED;
1356 speed = UHS_DDR50_BUS_SPEED;
1359 speed = UHS_SDR104_BUS_SPEED;
1366 err = sd_switch(mmc, SD_SWITCH_SWITCH, 0, speed, (u8 *)switch_status);
1370 if (((__be32_to_cpu(switch_status[4]) >> 24) & 0xF) != speed)
1376 static int sd_select_bus_width(struct mmc *mmc, int w)
1381 if ((w != 4) && (w != 1))
1384 cmd.cmdidx = MMC_CMD_APP_CMD;
1385 cmd.resp_type = MMC_RSP_R1;
1386 cmd.cmdarg = mmc->rca << 16;
1388 err = mmc_send_cmd(mmc, &cmd, NULL);
1392 cmd.cmdidx = SD_CMD_APP_SET_BUS_WIDTH;
1393 cmd.resp_type = MMC_RSP_R1;
1398 err = mmc_send_cmd(mmc, &cmd, NULL);
1406 #if CONFIG_IS_ENABLED(MMC_WRITE)
1407 static int sd_read_ssr(struct mmc *mmc)
1409 static const unsigned int sd_au_size[] = {
1410 0, SZ_16K / 512, SZ_32K / 512,
1411 SZ_64K / 512, SZ_128K / 512, SZ_256K / 512,
1412 SZ_512K / 512, SZ_1M / 512, SZ_2M / 512,
1413 SZ_4M / 512, SZ_8M / 512, (SZ_8M + SZ_4M) / 512,
1414 SZ_16M / 512, (SZ_16M + SZ_8M) / 512, SZ_32M / 512,
1419 ALLOC_CACHE_ALIGN_BUFFER(uint, ssr, 16);
1420 struct mmc_data data;
1422 unsigned int au, eo, et, es;
1424 cmd.cmdidx = MMC_CMD_APP_CMD;
1425 cmd.resp_type = MMC_RSP_R1;
1426 cmd.cmdarg = mmc->rca << 16;
1428 err = mmc_send_cmd(mmc, &cmd, NULL);
1432 cmd.cmdidx = SD_CMD_APP_SD_STATUS;
1433 cmd.resp_type = MMC_RSP_R1;
1437 data.dest = (char *)ssr;
1438 data.blocksize = 64;
1440 data.flags = MMC_DATA_READ;
1442 err = mmc_send_cmd(mmc, &cmd, &data);
1450 for (i = 0; i < 16; i++)
1451 ssr[i] = be32_to_cpu(ssr[i]);
1453 au = (ssr[2] >> 12) & 0xF;
1454 if ((au <= 9) || (mmc->version == SD_VERSION_3)) {
1455 mmc->ssr.au = sd_au_size[au];
1456 es = (ssr[3] >> 24) & 0xFF;
1457 es |= (ssr[2] & 0xFF) << 8;
1458 et = (ssr[3] >> 18) & 0x3F;
1460 eo = (ssr[3] >> 16) & 0x3;
1461 mmc->ssr.erase_timeout = (et * 1000) / es;
1462 mmc->ssr.erase_offset = eo * 1000;
1465 pr_debug("Invalid Allocation Unit Size.\n");
1471 /* frequency bases */
1472 /* divided by 10 to be nice to platforms without floating point */
1473 static const int fbase[] = {
1480 /* Multiplier values for TRAN_SPEED. Multiplied by 10 to be nice
1481 * to platforms without floating point.
1483 static const u8 multipliers[] = {
1502 static inline int bus_width(uint cap)
1504 if (cap == MMC_MODE_8BIT)
1506 if (cap == MMC_MODE_4BIT)
1508 if (cap == MMC_MODE_1BIT)
1510 pr_warn("invalid bus witdh capability 0x%x\n", cap);
1514 #if !CONFIG_IS_ENABLED(DM_MMC)
1515 #ifdef MMC_SUPPORTS_TUNING
1516 static int mmc_execute_tuning(struct mmc *mmc, uint opcode)
1522 static int mmc_set_ios(struct mmc *mmc)
1526 if (mmc->cfg->ops->set_ios)
1527 ret = mmc->cfg->ops->set_ios(mmc);
1533 int mmc_set_clock(struct mmc *mmc, uint clock, bool disable)
1536 if (clock > mmc->cfg->f_max)
1537 clock = mmc->cfg->f_max;
1539 if (clock < mmc->cfg->f_min)
1540 clock = mmc->cfg->f_min;
1544 mmc->clk_disable = disable;
1546 debug("clock is %s (%dHz)\n", disable ? "disabled" : "enabled", clock);
1548 return mmc_set_ios(mmc);
1551 static int mmc_set_bus_width(struct mmc *mmc, uint width)
1553 mmc->bus_width = width;
1555 return mmc_set_ios(mmc);
1558 #if CONFIG_IS_ENABLED(MMC_VERBOSE) || defined(DEBUG)
1560 * helper function to display the capabilities in a human
1561 * friendly manner. The capabilities include bus width and
1564 void mmc_dump_capabilities(const char *text, uint caps)
1568 pr_debug("%s: widths [", text);
1569 if (caps & MMC_MODE_8BIT)
1571 if (caps & MMC_MODE_4BIT)
1573 if (caps & MMC_MODE_1BIT)
1575 pr_debug("\b\b] modes [");
1576 for (mode = MMC_LEGACY; mode < MMC_MODES_END; mode++)
1577 if (MMC_CAP(mode) & caps)
1578 pr_debug("%s, ", mmc_mode_name(mode));
1579 pr_debug("\b\b]\n");
1583 struct mode_width_tuning {
1586 #ifdef MMC_SUPPORTS_TUNING
1591 #if CONFIG_IS_ENABLED(MMC_IO_VOLTAGE)
1592 int mmc_voltage_to_mv(enum mmc_voltage voltage)
1595 case MMC_SIGNAL_VOLTAGE_000: return 0;
1596 case MMC_SIGNAL_VOLTAGE_330: return 3300;
1597 case MMC_SIGNAL_VOLTAGE_180: return 1800;
1598 case MMC_SIGNAL_VOLTAGE_120: return 1200;
1603 static int mmc_set_signal_voltage(struct mmc *mmc, uint signal_voltage)
1607 if (mmc->signal_voltage == signal_voltage)
1610 mmc->signal_voltage = signal_voltage;
1611 err = mmc_set_ios(mmc);
1613 pr_debug("unable to set voltage (err %d)\n", err);
1618 static inline int mmc_set_signal_voltage(struct mmc *mmc, uint signal_voltage)
1624 #if !CONFIG_IS_ENABLED(MMC_TINY)
1625 static const struct mode_width_tuning sd_modes_by_pref[] = {
1626 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
1627 #ifdef MMC_SUPPORTS_TUNING
1630 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1631 .tuning = MMC_CMD_SEND_TUNING_BLOCK
1636 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1640 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1644 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1649 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1651 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
1654 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1659 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1663 #define for_each_sd_mode_by_pref(caps, mwt) \
1664 for (mwt = sd_modes_by_pref;\
1665 mwt < sd_modes_by_pref + ARRAY_SIZE(sd_modes_by_pref);\
1667 if (caps & MMC_CAP(mwt->mode))
1669 static int sd_select_mode_and_width(struct mmc *mmc, uint card_caps)
1672 uint widths[] = {MMC_MODE_4BIT, MMC_MODE_1BIT};
1673 const struct mode_width_tuning *mwt;
1674 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
1675 bool uhs_en = (mmc->ocr & OCR_S18R) ? true : false;
1677 bool uhs_en = false;
1682 mmc_dump_capabilities("sd card", card_caps);
1683 mmc_dump_capabilities("host", mmc->host_caps);
1686 /* Restrict card's capabilities by what the host can do */
1687 caps = card_caps & mmc->host_caps;
1692 for_each_sd_mode_by_pref(caps, mwt) {
1695 for (w = widths; w < widths + ARRAY_SIZE(widths); w++) {
1696 if (*w & caps & mwt->widths) {
1697 pr_debug("trying mode %s width %d (at %d MHz)\n",
1698 mmc_mode_name(mwt->mode),
1700 mmc_mode2freq(mmc, mwt->mode) / 1000000);
1702 /* configure the bus width (card + host) */
1703 err = sd_select_bus_width(mmc, bus_width(*w));
1706 mmc_set_bus_width(mmc, bus_width(*w));
1708 /* configure the bus mode (card) */
1709 err = sd_set_card_speed(mmc, mwt->mode);
1713 /* configure the bus mode (host) */
1714 mmc_select_mode(mmc, mwt->mode);
1715 mmc_set_clock(mmc, mmc->tran_speed,
1718 #ifdef MMC_SUPPORTS_TUNING
1719 /* execute tuning if needed */
1720 if (mwt->tuning && !mmc_host_is_spi(mmc)) {
1721 err = mmc_execute_tuning(mmc,
1724 pr_debug("tuning failed\n");
1730 #if CONFIG_IS_ENABLED(MMC_WRITE)
1731 err = sd_read_ssr(mmc);
1733 pr_warn("unable to read ssr\n");
1739 /* revert to a safer bus speed */
1740 mmc_select_mode(mmc, SD_LEGACY);
1741 mmc_set_clock(mmc, mmc->tran_speed,
1747 pr_err("unable to select a mode\n");
1752 * read the compare the part of ext csd that is constant.
1753 * This can be used to check that the transfer is working
1756 static int mmc_read_and_compare_ext_csd(struct mmc *mmc)
1759 const u8 *ext_csd = mmc->ext_csd;
1760 ALLOC_CACHE_ALIGN_BUFFER(u8, test_csd, MMC_MAX_BLOCK_LEN);
1762 if (mmc->version < MMC_VERSION_4)
1765 err = mmc_send_ext_csd(mmc, test_csd);
1769 /* Only compare read only fields */
1770 if (ext_csd[EXT_CSD_PARTITIONING_SUPPORT]
1771 == test_csd[EXT_CSD_PARTITIONING_SUPPORT] &&
1772 ext_csd[EXT_CSD_HC_WP_GRP_SIZE]
1773 == test_csd[EXT_CSD_HC_WP_GRP_SIZE] &&
1774 ext_csd[EXT_CSD_REV]
1775 == test_csd[EXT_CSD_REV] &&
1776 ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE]
1777 == test_csd[EXT_CSD_HC_ERASE_GRP_SIZE] &&
1778 memcmp(&ext_csd[EXT_CSD_SEC_CNT],
1779 &test_csd[EXT_CSD_SEC_CNT], 4) == 0)
1785 #if CONFIG_IS_ENABLED(MMC_IO_VOLTAGE)
1786 static int mmc_set_lowest_voltage(struct mmc *mmc, enum bus_mode mode,
1787 uint32_t allowed_mask)
1794 if (mmc->cardtype & (EXT_CSD_CARD_TYPE_HS200_1_8V |
1795 EXT_CSD_CARD_TYPE_HS400_1_8V))
1796 card_mask |= MMC_SIGNAL_VOLTAGE_180;
1797 if (mmc->cardtype & (EXT_CSD_CARD_TYPE_HS200_1_2V |
1798 EXT_CSD_CARD_TYPE_HS400_1_2V))
1799 card_mask |= MMC_SIGNAL_VOLTAGE_120;
1802 if (mmc->cardtype & EXT_CSD_CARD_TYPE_DDR_1_8V)
1803 card_mask |= MMC_SIGNAL_VOLTAGE_330 |
1804 MMC_SIGNAL_VOLTAGE_180;
1805 if (mmc->cardtype & EXT_CSD_CARD_TYPE_DDR_1_2V)
1806 card_mask |= MMC_SIGNAL_VOLTAGE_120;
1809 card_mask |= MMC_SIGNAL_VOLTAGE_330;
1813 while (card_mask & allowed_mask) {
1814 enum mmc_voltage best_match;
1816 best_match = 1 << (ffs(card_mask & allowed_mask) - 1);
1817 if (!mmc_set_signal_voltage(mmc, best_match))
1820 allowed_mask &= ~best_match;
1826 static inline int mmc_set_lowest_voltage(struct mmc *mmc, enum bus_mode mode,
1827 uint32_t allowed_mask)
1833 static const struct mode_width_tuning mmc_modes_by_pref[] = {
1834 #if CONFIG_IS_ENABLED(MMC_HS400_SUPPORT)
1837 .widths = MMC_MODE_8BIT,
1838 .tuning = MMC_CMD_SEND_TUNING_BLOCK_HS200
1841 #if CONFIG_IS_ENABLED(MMC_HS200_SUPPORT)
1844 .widths = MMC_MODE_8BIT | MMC_MODE_4BIT,
1845 .tuning = MMC_CMD_SEND_TUNING_BLOCK_HS200
1850 .widths = MMC_MODE_8BIT | MMC_MODE_4BIT,
1854 .widths = MMC_MODE_8BIT | MMC_MODE_4BIT | MMC_MODE_1BIT,
1858 .widths = MMC_MODE_8BIT | MMC_MODE_4BIT | MMC_MODE_1BIT,
1862 .widths = MMC_MODE_8BIT | MMC_MODE_4BIT | MMC_MODE_1BIT,
1866 #define for_each_mmc_mode_by_pref(caps, mwt) \
1867 for (mwt = mmc_modes_by_pref;\
1868 mwt < mmc_modes_by_pref + ARRAY_SIZE(mmc_modes_by_pref);\
1870 if (caps & MMC_CAP(mwt->mode))
1872 static const struct ext_csd_bus_width {
1876 } ext_csd_bus_width[] = {
1877 {MMC_MODE_8BIT, true, EXT_CSD_DDR_BUS_WIDTH_8},
1878 {MMC_MODE_4BIT, true, EXT_CSD_DDR_BUS_WIDTH_4},
1879 {MMC_MODE_8BIT, false, EXT_CSD_BUS_WIDTH_8},
1880 {MMC_MODE_4BIT, false, EXT_CSD_BUS_WIDTH_4},
1881 {MMC_MODE_1BIT, false, EXT_CSD_BUS_WIDTH_1},
1884 #if CONFIG_IS_ENABLED(MMC_HS400_SUPPORT)
1885 static int mmc_select_hs400(struct mmc *mmc)
1889 /* Set timing to HS200 for tuning */
1890 err = mmc_set_card_speed(mmc, MMC_HS_200, false);
1894 /* configure the bus mode (host) */
1895 mmc_select_mode(mmc, MMC_HS_200);
1896 mmc_set_clock(mmc, mmc->tran_speed, false);
1898 /* execute tuning if needed */
1899 err = mmc_execute_tuning(mmc, MMC_CMD_SEND_TUNING_BLOCK_HS200);
1901 debug("tuning failed\n");
1905 /* Set back to HS */
1906 mmc_set_card_speed(mmc, MMC_HS, true);
1908 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BUS_WIDTH,
1909 EXT_CSD_BUS_WIDTH_8 | EXT_CSD_DDR_FLAG);
1913 err = mmc_set_card_speed(mmc, MMC_HS_400, false);
1917 mmc_select_mode(mmc, MMC_HS_400);
1918 err = mmc_set_clock(mmc, mmc->tran_speed, false);
1925 static int mmc_select_hs400(struct mmc *mmc)
1931 #define for_each_supported_width(caps, ddr, ecbv) \
1932 for (ecbv = ext_csd_bus_width;\
1933 ecbv < ext_csd_bus_width + ARRAY_SIZE(ext_csd_bus_width);\
1935 if ((ddr == ecbv->is_ddr) && (caps & ecbv->cap))
1937 static int mmc_select_mode_and_width(struct mmc *mmc, uint card_caps)
1940 const struct mode_width_tuning *mwt;
1941 const struct ext_csd_bus_width *ecbw;
1944 mmc_dump_capabilities("mmc", card_caps);
1945 mmc_dump_capabilities("host", mmc->host_caps);
1948 /* Restrict card's capabilities by what the host can do */
1949 card_caps &= mmc->host_caps;
1951 /* Only version 4 of MMC supports wider bus widths */
1952 if (mmc->version < MMC_VERSION_4)
1955 if (!mmc->ext_csd) {
1956 pr_debug("No ext_csd found!\n"); /* this should enver happen */
1960 #if CONFIG_IS_ENABLED(MMC_HS200_SUPPORT) || \
1961 CONFIG_IS_ENABLED(MMC_HS400_SUPPORT)
1963 * In case the eMMC is in HS200/HS400 mode, downgrade to HS mode
1964 * before doing anything else, since a transition from either of
1965 * the HS200/HS400 mode directly to legacy mode is not supported.
1967 if (mmc->selected_mode == MMC_HS_200 ||
1968 mmc->selected_mode == MMC_HS_400)
1969 mmc_set_card_speed(mmc, MMC_HS, true);
1972 mmc_set_clock(mmc, mmc->legacy_speed, MMC_CLK_ENABLE);
1974 for_each_mmc_mode_by_pref(card_caps, mwt) {
1975 for_each_supported_width(card_caps & mwt->widths,
1976 mmc_is_mode_ddr(mwt->mode), ecbw) {
1977 enum mmc_voltage old_voltage;
1978 pr_debug("trying mode %s width %d (at %d MHz)\n",
1979 mmc_mode_name(mwt->mode),
1980 bus_width(ecbw->cap),
1981 mmc_mode2freq(mmc, mwt->mode) / 1000000);
1982 old_voltage = mmc->signal_voltage;
1983 err = mmc_set_lowest_voltage(mmc, mwt->mode,
1984 MMC_ALL_SIGNAL_VOLTAGE);
1988 /* configure the bus width (card + host) */
1989 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1991 ecbw->ext_csd_bits & ~EXT_CSD_DDR_FLAG);
1994 mmc_set_bus_width(mmc, bus_width(ecbw->cap));
1996 if (mwt->mode == MMC_HS_400) {
1997 err = mmc_select_hs400(mmc);
1999 printf("Select HS400 failed %d\n", err);
2003 /* configure the bus speed (card) */
2004 err = mmc_set_card_speed(mmc, mwt->mode, false);
2009 * configure the bus width AND the ddr mode
2010 * (card). The host side will be taken care
2011 * of in the next step
2013 if (ecbw->ext_csd_bits & EXT_CSD_DDR_FLAG) {
2014 err = mmc_switch(mmc,
2015 EXT_CSD_CMD_SET_NORMAL,
2017 ecbw->ext_csd_bits);
2022 /* configure the bus mode (host) */
2023 mmc_select_mode(mmc, mwt->mode);
2024 mmc_set_clock(mmc, mmc->tran_speed,
2026 #ifdef MMC_SUPPORTS_TUNING
2028 /* execute tuning if needed */
2030 err = mmc_execute_tuning(mmc,
2033 pr_debug("tuning failed\n");
2040 /* do a transfer to check the configuration */
2041 err = mmc_read_and_compare_ext_csd(mmc);
2045 mmc_set_signal_voltage(mmc, old_voltage);
2046 /* if an error occured, revert to a safer bus mode */
2047 mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
2048 EXT_CSD_BUS_WIDTH, EXT_CSD_BUS_WIDTH_1);
2049 mmc_select_mode(mmc, MMC_LEGACY);
2050 mmc_set_bus_width(mmc, 1);
2054 pr_err("unable to select a mode\n");
2060 #if CONFIG_IS_ENABLED(MMC_TINY)
2061 DEFINE_CACHE_ALIGN_BUFFER(u8, ext_csd_bkup, MMC_MAX_BLOCK_LEN);
2064 static int mmc_startup_v4(struct mmc *mmc)
2068 bool has_parts = false;
2069 bool part_completed;
2070 static const u32 mmc_versions[] = {
2082 #if CONFIG_IS_ENABLED(MMC_TINY)
2083 u8 *ext_csd = ext_csd_bkup;
2085 if (IS_SD(mmc) || mmc->version < MMC_VERSION_4)
2089 memset(ext_csd_bkup, 0, sizeof(ext_csd_bkup));
2091 err = mmc_send_ext_csd(mmc, ext_csd);
2095 /* store the ext csd for future reference */
2097 mmc->ext_csd = ext_csd;
2099 ALLOC_CACHE_ALIGN_BUFFER(u8, ext_csd, MMC_MAX_BLOCK_LEN);
2101 if (IS_SD(mmc) || (mmc->version < MMC_VERSION_4))
2104 /* check ext_csd version and capacity */
2105 err = mmc_send_ext_csd(mmc, ext_csd);
2109 /* store the ext csd for future reference */
2111 mmc->ext_csd = malloc(MMC_MAX_BLOCK_LEN);
2114 memcpy(mmc->ext_csd, ext_csd, MMC_MAX_BLOCK_LEN);
2116 if (ext_csd[EXT_CSD_REV] >= ARRAY_SIZE(mmc_versions))
2119 mmc->version = mmc_versions[ext_csd[EXT_CSD_REV]];
2121 if (mmc->version >= MMC_VERSION_4_2) {
2123 * According to the JEDEC Standard, the value of
2124 * ext_csd's capacity is valid if the value is more
2127 capacity = ext_csd[EXT_CSD_SEC_CNT] << 0
2128 | ext_csd[EXT_CSD_SEC_CNT + 1] << 8
2129 | ext_csd[EXT_CSD_SEC_CNT + 2] << 16
2130 | ext_csd[EXT_CSD_SEC_CNT + 3] << 24;
2131 capacity *= MMC_MAX_BLOCK_LEN;
2132 if ((capacity >> 20) > 2 * 1024)
2133 mmc->capacity_user = capacity;
2136 /* The partition data may be non-zero but it is only
2137 * effective if PARTITION_SETTING_COMPLETED is set in
2138 * EXT_CSD, so ignore any data if this bit is not set,
2139 * except for enabling the high-capacity group size
2140 * definition (see below).
2142 part_completed = !!(ext_csd[EXT_CSD_PARTITION_SETTING] &
2143 EXT_CSD_PARTITION_SETTING_COMPLETED);
2145 /* store the partition info of emmc */
2146 mmc->part_support = ext_csd[EXT_CSD_PARTITIONING_SUPPORT];
2147 if ((ext_csd[EXT_CSD_PARTITIONING_SUPPORT] & PART_SUPPORT) ||
2148 ext_csd[EXT_CSD_BOOT_MULT])
2149 mmc->part_config = ext_csd[EXT_CSD_PART_CONF];
2150 if (part_completed &&
2151 (ext_csd[EXT_CSD_PARTITIONING_SUPPORT] & ENHNCD_SUPPORT))
2152 mmc->part_attr = ext_csd[EXT_CSD_PARTITIONS_ATTRIBUTE];
2154 mmc->capacity_boot = ext_csd[EXT_CSD_BOOT_MULT] << 17;
2156 mmc->capacity_rpmb = ext_csd[EXT_CSD_RPMB_MULT] << 17;
2158 for (i = 0; i < 4; i++) {
2159 int idx = EXT_CSD_GP_SIZE_MULT + i * 3;
2160 uint mult = (ext_csd[idx + 2] << 16) +
2161 (ext_csd[idx + 1] << 8) + ext_csd[idx];
2164 if (!part_completed)
2166 mmc->capacity_gp[i] = mult;
2167 mmc->capacity_gp[i] *=
2168 ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE];
2169 mmc->capacity_gp[i] *= ext_csd[EXT_CSD_HC_WP_GRP_SIZE];
2170 mmc->capacity_gp[i] <<= 19;
2173 #ifndef CONFIG_SPL_BUILD
2174 if (part_completed) {
2175 mmc->enh_user_size =
2176 (ext_csd[EXT_CSD_ENH_SIZE_MULT + 2] << 16) +
2177 (ext_csd[EXT_CSD_ENH_SIZE_MULT + 1] << 8) +
2178 ext_csd[EXT_CSD_ENH_SIZE_MULT];
2179 mmc->enh_user_size *= ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE];
2180 mmc->enh_user_size *= ext_csd[EXT_CSD_HC_WP_GRP_SIZE];
2181 mmc->enh_user_size <<= 19;
2182 mmc->enh_user_start =
2183 (ext_csd[EXT_CSD_ENH_START_ADDR + 3] << 24) +
2184 (ext_csd[EXT_CSD_ENH_START_ADDR + 2] << 16) +
2185 (ext_csd[EXT_CSD_ENH_START_ADDR + 1] << 8) +
2186 ext_csd[EXT_CSD_ENH_START_ADDR];
2187 if (mmc->high_capacity)
2188 mmc->enh_user_start <<= 9;
2193 * Host needs to enable ERASE_GRP_DEF bit if device is
2194 * partitioned. This bit will be lost every time after a reset
2195 * or power off. This will affect erase size.
2199 if ((ext_csd[EXT_CSD_PARTITIONING_SUPPORT] & PART_SUPPORT) &&
2200 (ext_csd[EXT_CSD_PARTITIONS_ATTRIBUTE] & PART_ENH_ATTRIB))
2203 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
2204 EXT_CSD_ERASE_GROUP_DEF, 1);
2209 ext_csd[EXT_CSD_ERASE_GROUP_DEF] = 1;
2212 if (ext_csd[EXT_CSD_ERASE_GROUP_DEF] & 0x01) {
2213 #if CONFIG_IS_ENABLED(MMC_WRITE)
2214 /* Read out group size from ext_csd */
2215 mmc->erase_grp_size =
2216 ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE] * 1024;
2219 * if high capacity and partition setting completed
2220 * SEC_COUNT is valid even if it is smaller than 2 GiB
2221 * JEDEC Standard JESD84-B45, 6.2.4
2223 if (mmc->high_capacity && part_completed) {
2224 capacity = (ext_csd[EXT_CSD_SEC_CNT]) |
2225 (ext_csd[EXT_CSD_SEC_CNT + 1] << 8) |
2226 (ext_csd[EXT_CSD_SEC_CNT + 2] << 16) |
2227 (ext_csd[EXT_CSD_SEC_CNT + 3] << 24);
2228 capacity *= MMC_MAX_BLOCK_LEN;
2229 mmc->capacity_user = capacity;
2232 #if CONFIG_IS_ENABLED(MMC_WRITE)
2234 /* Calculate the group size from the csd value. */
2235 int erase_gsz, erase_gmul;
2237 erase_gsz = (mmc->csd[2] & 0x00007c00) >> 10;
2238 erase_gmul = (mmc->csd[2] & 0x000003e0) >> 5;
2239 mmc->erase_grp_size = (erase_gsz + 1)
2243 #if CONFIG_IS_ENABLED(MMC_HW_PARTITIONING)
2244 mmc->hc_wp_grp_size = 1024
2245 * ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE]
2246 * ext_csd[EXT_CSD_HC_WP_GRP_SIZE];
2249 mmc->wr_rel_set = ext_csd[EXT_CSD_WR_REL_SET];
2254 #if !CONFIG_IS_ENABLED(MMC_TINY)
2257 mmc->ext_csd = NULL;
2262 static int mmc_startup(struct mmc *mmc)
2268 struct blk_desc *bdesc;
2270 #ifdef CONFIG_MMC_SPI_CRC_ON
2271 if (mmc_host_is_spi(mmc)) { /* enable CRC check for spi */
2272 cmd.cmdidx = MMC_CMD_SPI_CRC_ON_OFF;
2273 cmd.resp_type = MMC_RSP_R1;
2275 err = mmc_send_cmd(mmc, &cmd, NULL);
2281 /* Put the Card in Identify Mode */
2282 cmd.cmdidx = mmc_host_is_spi(mmc) ? MMC_CMD_SEND_CID :
2283 MMC_CMD_ALL_SEND_CID; /* cmd not supported in spi */
2284 cmd.resp_type = MMC_RSP_R2;
2287 err = mmc_send_cmd(mmc, &cmd, NULL);
2289 #ifdef CONFIG_MMC_QUIRKS
2290 if (err && (mmc->quirks & MMC_QUIRK_RETRY_SEND_CID)) {
2293 * It has been seen that SEND_CID may fail on the first
2294 * attempt, let's try a few more time
2297 err = mmc_send_cmd(mmc, &cmd, NULL);
2300 } while (retries--);
2307 memcpy(mmc->cid, cmd.response, 16);
2310 * For MMC cards, set the Relative Address.
2311 * For SD cards, get the Relatvie Address.
2312 * This also puts the cards into Standby State
2314 if (!mmc_host_is_spi(mmc)) { /* cmd not supported in spi */
2315 cmd.cmdidx = SD_CMD_SEND_RELATIVE_ADDR;
2316 cmd.cmdarg = mmc->rca << 16;
2317 cmd.resp_type = MMC_RSP_R6;
2319 err = mmc_send_cmd(mmc, &cmd, NULL);
2325 mmc->rca = (cmd.response[0] >> 16) & 0xffff;
2328 /* Get the Card-Specific Data */
2329 cmd.cmdidx = MMC_CMD_SEND_CSD;
2330 cmd.resp_type = MMC_RSP_R2;
2331 cmd.cmdarg = mmc->rca << 16;
2333 err = mmc_send_cmd(mmc, &cmd, NULL);
2338 mmc->csd[0] = cmd.response[0];
2339 mmc->csd[1] = cmd.response[1];
2340 mmc->csd[2] = cmd.response[2];
2341 mmc->csd[3] = cmd.response[3];
2343 if (mmc->version == MMC_VERSION_UNKNOWN) {
2344 int version = (cmd.response[0] >> 26) & 0xf;
2348 mmc->version = MMC_VERSION_1_2;
2351 mmc->version = MMC_VERSION_1_4;
2354 mmc->version = MMC_VERSION_2_2;
2357 mmc->version = MMC_VERSION_3;
2360 mmc->version = MMC_VERSION_4;
2363 mmc->version = MMC_VERSION_1_2;
2368 /* divide frequency by 10, since the mults are 10x bigger */
2369 freq = fbase[(cmd.response[0] & 0x7)];
2370 mult = multipliers[((cmd.response[0] >> 3) & 0xf)];
2372 mmc->legacy_speed = freq * mult;
2373 mmc_select_mode(mmc, MMC_LEGACY);
2375 mmc->dsr_imp = ((cmd.response[1] >> 12) & 0x1);
2376 mmc->read_bl_len = 1 << ((cmd.response[1] >> 16) & 0xf);
2377 #if CONFIG_IS_ENABLED(MMC_WRITE)
2380 mmc->write_bl_len = mmc->read_bl_len;
2382 mmc->write_bl_len = 1 << ((cmd.response[3] >> 22) & 0xf);
2385 if (mmc->high_capacity) {
2386 csize = (mmc->csd[1] & 0x3f) << 16
2387 | (mmc->csd[2] & 0xffff0000) >> 16;
2390 csize = (mmc->csd[1] & 0x3ff) << 2
2391 | (mmc->csd[2] & 0xc0000000) >> 30;
2392 cmult = (mmc->csd[2] & 0x00038000) >> 15;
2395 mmc->capacity_user = (csize + 1) << (cmult + 2);
2396 mmc->capacity_user *= mmc->read_bl_len;
2397 mmc->capacity_boot = 0;
2398 mmc->capacity_rpmb = 0;
2399 for (i = 0; i < 4; i++)
2400 mmc->capacity_gp[i] = 0;
2402 if (mmc->read_bl_len > MMC_MAX_BLOCK_LEN)
2403 mmc->read_bl_len = MMC_MAX_BLOCK_LEN;
2405 #if CONFIG_IS_ENABLED(MMC_WRITE)
2406 if (mmc->write_bl_len > MMC_MAX_BLOCK_LEN)
2407 mmc->write_bl_len = MMC_MAX_BLOCK_LEN;
2410 if ((mmc->dsr_imp) && (0xffffffff != mmc->dsr)) {
2411 cmd.cmdidx = MMC_CMD_SET_DSR;
2412 cmd.cmdarg = (mmc->dsr & 0xffff) << 16;
2413 cmd.resp_type = MMC_RSP_NONE;
2414 if (mmc_send_cmd(mmc, &cmd, NULL))
2415 pr_warn("MMC: SET_DSR failed\n");
2418 /* Select the card, and put it into Transfer Mode */
2419 if (!mmc_host_is_spi(mmc)) { /* cmd not supported in spi */
2420 cmd.cmdidx = MMC_CMD_SELECT_CARD;
2421 cmd.resp_type = MMC_RSP_R1;
2422 cmd.cmdarg = mmc->rca << 16;
2423 err = mmc_send_cmd(mmc, &cmd, NULL);
2430 * For SD, its erase group is always one sector
2432 #if CONFIG_IS_ENABLED(MMC_WRITE)
2433 mmc->erase_grp_size = 1;
2435 mmc->part_config = MMCPART_NOAVAILABLE;
2437 err = mmc_startup_v4(mmc);
2441 err = mmc_set_capacity(mmc, mmc_get_blk_desc(mmc)->hwpart);
2445 #if CONFIG_IS_ENABLED(MMC_TINY)
2446 mmc_set_clock(mmc, mmc->legacy_speed, false);
2447 mmc_select_mode(mmc, IS_SD(mmc) ? SD_LEGACY : MMC_LEGACY);
2448 mmc_set_bus_width(mmc, 1);
2451 err = sd_get_capabilities(mmc);
2454 err = sd_select_mode_and_width(mmc, mmc->card_caps);
2456 err = mmc_get_capabilities(mmc);
2459 mmc_select_mode_and_width(mmc, mmc->card_caps);
2465 mmc->best_mode = mmc->selected_mode;
2467 /* Fix the block length for DDR mode */
2468 if (mmc->ddr_mode) {
2469 mmc->read_bl_len = MMC_MAX_BLOCK_LEN;
2470 #if CONFIG_IS_ENABLED(MMC_WRITE)
2471 mmc->write_bl_len = MMC_MAX_BLOCK_LEN;
2475 /* fill in device description */
2476 bdesc = mmc_get_blk_desc(mmc);
2480 bdesc->blksz = mmc->read_bl_len;
2481 bdesc->log2blksz = LOG2(bdesc->blksz);
2482 bdesc->lba = lldiv(mmc->capacity, mmc->read_bl_len);
2483 #if !defined(CONFIG_SPL_BUILD) || \
2484 (defined(CONFIG_SPL_LIBCOMMON_SUPPORT) && \
2485 !defined(CONFIG_USE_TINY_PRINTF))
2486 sprintf(bdesc->vendor, "Man %06x Snr %04x%04x",
2487 mmc->cid[0] >> 24, (mmc->cid[2] & 0xffff),
2488 (mmc->cid[3] >> 16) & 0xffff);
2489 sprintf(bdesc->product, "%c%c%c%c%c%c", mmc->cid[0] & 0xff,
2490 (mmc->cid[1] >> 24), (mmc->cid[1] >> 16) & 0xff,
2491 (mmc->cid[1] >> 8) & 0xff, mmc->cid[1] & 0xff,
2492 (mmc->cid[2] >> 24) & 0xff);
2493 sprintf(bdesc->revision, "%d.%d", (mmc->cid[2] >> 20) & 0xf,
2494 (mmc->cid[2] >> 16) & 0xf);
2496 bdesc->vendor[0] = 0;
2497 bdesc->product[0] = 0;
2498 bdesc->revision[0] = 0;
2501 #if !defined(CONFIG_DM_MMC) && (!defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBDISK_SUPPORT))
2508 static int mmc_send_if_cond(struct mmc *mmc)
2513 cmd.cmdidx = SD_CMD_SEND_IF_COND;
2514 /* We set the bit if the host supports voltages between 2.7 and 3.6 V */
2515 cmd.cmdarg = ((mmc->cfg->voltages & 0xff8000) != 0) << 8 | 0xaa;
2516 cmd.resp_type = MMC_RSP_R7;
2518 err = mmc_send_cmd(mmc, &cmd, NULL);
2523 if ((cmd.response[0] & 0xff) != 0xaa)
2526 mmc->version = SD_VERSION_2;
2531 #if !CONFIG_IS_ENABLED(DM_MMC)
2532 /* board-specific MMC power initializations. */
2533 __weak void board_mmc_power_init(void)
2538 static int mmc_power_init(struct mmc *mmc)
2540 #if CONFIG_IS_ENABLED(DM_MMC)
2541 #if CONFIG_IS_ENABLED(DM_REGULATOR)
2544 ret = device_get_supply_regulator(mmc->dev, "vmmc-supply",
2547 pr_debug("%s: No vmmc supply\n", mmc->dev->name);
2549 ret = device_get_supply_regulator(mmc->dev, "vqmmc-supply",
2550 &mmc->vqmmc_supply);
2552 pr_debug("%s: No vqmmc supply\n", mmc->dev->name);
2554 #else /* !CONFIG_DM_MMC */
2556 * Driver model should use a regulator, as above, rather than calling
2557 * out to board code.
2559 board_mmc_power_init();
2565 * put the host in the initial state:
2566 * - turn on Vdd (card power supply)
2567 * - configure the bus width and clock to minimal values
2569 static void mmc_set_initial_state(struct mmc *mmc)
2573 /* First try to set 3.3V. If it fails set to 1.8V */
2574 err = mmc_set_signal_voltage(mmc, MMC_SIGNAL_VOLTAGE_330);
2576 err = mmc_set_signal_voltage(mmc, MMC_SIGNAL_VOLTAGE_180);
2578 pr_warn("mmc: failed to set signal voltage\n");
2580 mmc_select_mode(mmc, MMC_LEGACY);
2581 mmc_set_bus_width(mmc, 1);
2582 mmc_set_clock(mmc, 0, MMC_CLK_ENABLE);
2585 static int mmc_power_on(struct mmc *mmc)
2587 #if CONFIG_IS_ENABLED(DM_MMC) && CONFIG_IS_ENABLED(DM_REGULATOR)
2588 if (mmc->vmmc_supply) {
2589 int ret = regulator_set_enable(mmc->vmmc_supply, true);
2592 puts("Error enabling VMMC supply\n");
2600 static int mmc_power_off(struct mmc *mmc)
2602 mmc_set_clock(mmc, 0, MMC_CLK_DISABLE);
2603 #if CONFIG_IS_ENABLED(DM_MMC) && CONFIG_IS_ENABLED(DM_REGULATOR)
2604 if (mmc->vmmc_supply) {
2605 int ret = regulator_set_enable(mmc->vmmc_supply, false);
2608 pr_debug("Error disabling VMMC supply\n");
2616 static int mmc_power_cycle(struct mmc *mmc)
2620 ret = mmc_power_off(mmc);
2624 * SD spec recommends at least 1ms of delay. Let's wait for 2ms
2625 * to be on the safer side.
2628 return mmc_power_on(mmc);
2631 int mmc_get_op_cond(struct mmc *mmc)
2633 bool uhs_en = supports_uhs(mmc->cfg->host_caps);
2639 #ifdef CONFIG_FSL_ESDHC_ADAPTER_IDENT
2640 mmc_adapter_card_type_ident();
2642 err = mmc_power_init(mmc);
2646 #ifdef CONFIG_MMC_QUIRKS
2647 mmc->quirks = MMC_QUIRK_RETRY_SET_BLOCKLEN |
2648 MMC_QUIRK_RETRY_SEND_CID;
2651 err = mmc_power_cycle(mmc);
2654 * if power cycling is not supported, we should not try
2655 * to use the UHS modes, because we wouldn't be able to
2656 * recover from an error during the UHS initialization.
2658 pr_debug("Unable to do a full power cycle. Disabling the UHS modes for safety\n");
2660 mmc->host_caps &= ~UHS_CAPS;
2661 err = mmc_power_on(mmc);
2666 #if CONFIG_IS_ENABLED(DM_MMC)
2667 /* The device has already been probed ready for use */
2669 /* made sure it's not NULL earlier */
2670 err = mmc->cfg->ops->init(mmc);
2677 mmc_set_initial_state(mmc);
2679 /* Reset the Card */
2680 err = mmc_go_idle(mmc);
2685 /* The internal partition reset to user partition(0) at every CMD0*/
2686 mmc_get_blk_desc(mmc)->hwpart = 0;
2688 /* Test for SD version 2 */
2689 err = mmc_send_if_cond(mmc);
2691 /* Now try to get the SD card's operating condition */
2692 err = sd_send_op_cond(mmc, uhs_en);
2693 if (err && uhs_en) {
2695 mmc_power_cycle(mmc);
2699 /* If the command timed out, we check for an MMC card */
2700 if (err == -ETIMEDOUT) {
2701 err = mmc_send_op_cond(mmc);
2704 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
2705 pr_err("Card did not respond to voltage select!\n");
2714 int mmc_start_init(struct mmc *mmc)
2720 * all hosts are capable of 1 bit bus-width and able to use the legacy
2723 mmc->host_caps = mmc->cfg->host_caps | MMC_CAP(SD_LEGACY) |
2724 MMC_CAP(MMC_LEGACY) | MMC_MODE_1BIT;
2726 #if !defined(CONFIG_MMC_BROKEN_CD)
2727 /* we pretend there's no card when init is NULL */
2728 no_card = mmc_getcd(mmc) == 0;
2732 #if !CONFIG_IS_ENABLED(DM_MMC)
2733 no_card = no_card || (mmc->cfg->ops->init == NULL);
2737 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
2738 pr_err("MMC: no card present\n");
2743 err = mmc_get_op_cond(mmc);
2746 mmc->init_in_progress = 1;
2751 static int mmc_complete_init(struct mmc *mmc)
2755 mmc->init_in_progress = 0;
2756 if (mmc->op_cond_pending)
2757 err = mmc_complete_op_cond(mmc);
2760 err = mmc_startup(mmc);
2768 int mmc_init(struct mmc *mmc)
2771 __maybe_unused ulong start;
2772 #if CONFIG_IS_ENABLED(DM_MMC)
2773 struct mmc_uclass_priv *upriv = dev_get_uclass_priv(mmc->dev);
2780 start = get_timer(0);
2782 if (!mmc->init_in_progress)
2783 err = mmc_start_init(mmc);
2786 err = mmc_complete_init(mmc);
2788 pr_info("%s: %d, time %lu\n", __func__, err, get_timer(start));
2793 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT) || \
2794 CONFIG_IS_ENABLED(MMC_HS200_SUPPORT) || \
2795 CONFIG_IS_ENABLED(MMC_HS400_SUPPORT)
2796 int mmc_deinit(struct mmc *mmc)
2804 caps_filtered = mmc->card_caps &
2805 ~(MMC_CAP(UHS_SDR12) | MMC_CAP(UHS_SDR25) |
2806 MMC_CAP(UHS_SDR50) | MMC_CAP(UHS_DDR50) |
2807 MMC_CAP(UHS_SDR104));
2809 return sd_select_mode_and_width(mmc, caps_filtered);
2811 caps_filtered = mmc->card_caps &
2812 ~(MMC_CAP(MMC_HS_200) | MMC_CAP(MMC_HS_400));
2814 return mmc_select_mode_and_width(mmc, caps_filtered);
2819 int mmc_set_dsr(struct mmc *mmc, u16 val)
2825 /* CPU-specific MMC initializations */
2826 __weak int cpu_mmc_init(bd_t *bis)
2831 /* board-specific MMC initializations. */
2832 __weak int board_mmc_init(bd_t *bis)
2837 void mmc_set_preinit(struct mmc *mmc, int preinit)
2839 mmc->preinit = preinit;
2842 #if CONFIG_IS_ENABLED(DM_MMC)
2843 static int mmc_probe(bd_t *bis)
2847 struct udevice *dev;
2849 ret = uclass_get(UCLASS_MMC, &uc);
2854 * Try to add them in sequence order. Really with driver model we
2855 * should allow holes, but the current MMC list does not allow that.
2856 * So if we request 0, 1, 3 we will get 0, 1, 2.
2858 for (i = 0; ; i++) {
2859 ret = uclass_get_device_by_seq(UCLASS_MMC, i, &dev);
2863 uclass_foreach_dev(dev, uc) {
2864 ret = device_probe(dev);
2866 pr_err("%s - probe failed: %d\n", dev->name, ret);
2872 static int mmc_probe(bd_t *bis)
2874 if (board_mmc_init(bis) < 0)
2881 int mmc_initialize(bd_t *bis)
2883 static int initialized = 0;
2885 if (initialized) /* Avoid initializing mmc multiple times */
2889 #if !CONFIG_IS_ENABLED(BLK)
2890 #if !CONFIG_IS_ENABLED(MMC_TINY)
2894 ret = mmc_probe(bis);
2898 #ifndef CONFIG_SPL_BUILD
2899 print_mmc_devices(',');
2906 #ifdef CONFIG_CMD_BKOPS_ENABLE
2907 int mmc_set_bkops_enable(struct mmc *mmc)
2910 ALLOC_CACHE_ALIGN_BUFFER(u8, ext_csd, MMC_MAX_BLOCK_LEN);
2912 err = mmc_send_ext_csd(mmc, ext_csd);
2914 puts("Could not get ext_csd register values\n");
2918 if (!(ext_csd[EXT_CSD_BKOPS_SUPPORT] & 0x1)) {
2919 puts("Background operations not supported on device\n");
2920 return -EMEDIUMTYPE;
2923 if (ext_csd[EXT_CSD_BKOPS_EN] & 0x1) {
2924 puts("Background operations already enabled\n");
2928 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BKOPS_EN, 1);
2930 puts("Failed to enable manual background operations\n");
2934 puts("Enabled manual background operations\n");