1 // SPDX-License-Identifier: GPL-2.0+
3 * Copyright 2008, Freescale Semiconductor, Inc
6 * Based vaguely on the Linux code
13 #include <dm/device-internal.h>
17 #include <power/regulator.h>
20 #include <linux/list.h>
22 #include "mmc_private.h"
24 #define DEFAULT_CMD6_TIMEOUT_MS 500
26 static int mmc_set_signal_voltage(struct mmc *mmc, uint signal_voltage);
28 #if !CONFIG_IS_ENABLED(DM_MMC)
30 static int mmc_wait_dat0(struct mmc *mmc, int state, int timeout_us)
35 __weak int board_mmc_getwp(struct mmc *mmc)
40 int mmc_getwp(struct mmc *mmc)
44 wp = board_mmc_getwp(mmc);
47 if (mmc->cfg->ops->getwp)
48 wp = mmc->cfg->ops->getwp(mmc);
56 __weak int board_mmc_getcd(struct mmc *mmc)
62 #ifdef CONFIG_MMC_TRACE
63 void mmmc_trace_before_send(struct mmc *mmc, struct mmc_cmd *cmd)
65 printf("CMD_SEND:%d\n", cmd->cmdidx);
66 printf("\t\tARG\t\t\t 0x%08x\n", cmd->cmdarg);
69 void mmmc_trace_after_send(struct mmc *mmc, struct mmc_cmd *cmd, int ret)
75 printf("\t\tRET\t\t\t %d\n", ret);
77 switch (cmd->resp_type) {
79 printf("\t\tMMC_RSP_NONE\n");
82 printf("\t\tMMC_RSP_R1,5,6,7 \t 0x%08x \n",
86 printf("\t\tMMC_RSP_R1b\t\t 0x%08x \n",
90 printf("\t\tMMC_RSP_R2\t\t 0x%08x \n",
92 printf("\t\t \t\t 0x%08x \n",
94 printf("\t\t \t\t 0x%08x \n",
96 printf("\t\t \t\t 0x%08x \n",
99 printf("\t\t\t\t\tDUMPING DATA\n");
100 for (i = 0; i < 4; i++) {
102 printf("\t\t\t\t\t%03d - ", i*4);
103 ptr = (u8 *)&cmd->response[i];
105 for (j = 0; j < 4; j++)
106 printf("%02x ", *ptr--);
111 printf("\t\tMMC_RSP_R3,4\t\t 0x%08x \n",
115 printf("\t\tERROR MMC rsp not supported\n");
121 void mmc_trace_state(struct mmc *mmc, struct mmc_cmd *cmd)
125 status = (cmd->response[0] & MMC_STATUS_CURR_STATE) >> 9;
126 printf("CURR STATE:%d\n", status);
130 #if CONFIG_IS_ENABLED(MMC_VERBOSE) || defined(DEBUG)
131 const char *mmc_mode_name(enum bus_mode mode)
133 static const char *const names[] = {
134 [MMC_LEGACY] = "MMC legacy",
135 [MMC_HS] = "MMC High Speed (26MHz)",
136 [SD_HS] = "SD High Speed (50MHz)",
137 [UHS_SDR12] = "UHS SDR12 (25MHz)",
138 [UHS_SDR25] = "UHS SDR25 (50MHz)",
139 [UHS_SDR50] = "UHS SDR50 (100MHz)",
140 [UHS_SDR104] = "UHS SDR104 (208MHz)",
141 [UHS_DDR50] = "UHS DDR50 (50MHz)",
142 [MMC_HS_52] = "MMC High Speed (52MHz)",
143 [MMC_DDR_52] = "MMC DDR52 (52MHz)",
144 [MMC_HS_200] = "HS200 (200MHz)",
145 [MMC_HS_400] = "HS400 (200MHz)",
146 [MMC_HS_400_ES] = "HS400ES (200MHz)",
149 if (mode >= MMC_MODES_END)
150 return "Unknown mode";
156 static uint mmc_mode2freq(struct mmc *mmc, enum bus_mode mode)
158 static const int freqs[] = {
159 [MMC_LEGACY] = 25000000,
162 [MMC_HS_52] = 52000000,
163 [MMC_DDR_52] = 52000000,
164 [UHS_SDR12] = 25000000,
165 [UHS_SDR25] = 50000000,
166 [UHS_SDR50] = 100000000,
167 [UHS_DDR50] = 50000000,
168 [UHS_SDR104] = 208000000,
169 [MMC_HS_200] = 200000000,
170 [MMC_HS_400] = 200000000,
171 [MMC_HS_400_ES] = 200000000,
174 if (mode == MMC_LEGACY)
175 return mmc->legacy_speed;
176 else if (mode >= MMC_MODES_END)
182 static int mmc_select_mode(struct mmc *mmc, enum bus_mode mode)
184 mmc->selected_mode = mode;
185 mmc->tran_speed = mmc_mode2freq(mmc, mode);
186 mmc->ddr_mode = mmc_is_mode_ddr(mode);
187 pr_debug("selecting mode %s (freq : %d MHz)\n", mmc_mode_name(mode),
188 mmc->tran_speed / 1000000);
192 #if !CONFIG_IS_ENABLED(DM_MMC)
193 int mmc_send_cmd(struct mmc *mmc, struct mmc_cmd *cmd, struct mmc_data *data)
197 mmmc_trace_before_send(mmc, cmd);
198 ret = mmc->cfg->ops->send_cmd(mmc, cmd, data);
199 mmmc_trace_after_send(mmc, cmd, ret);
205 int mmc_send_status(struct mmc *mmc, unsigned int *status)
208 int err, retries = 5;
210 cmd.cmdidx = MMC_CMD_SEND_STATUS;
211 cmd.resp_type = MMC_RSP_R1;
212 if (!mmc_host_is_spi(mmc))
213 cmd.cmdarg = mmc->rca << 16;
216 err = mmc_send_cmd(mmc, &cmd, NULL);
218 mmc_trace_state(mmc, &cmd);
219 *status = cmd.response[0];
223 mmc_trace_state(mmc, &cmd);
227 int mmc_poll_for_busy(struct mmc *mmc, int timeout_ms)
232 err = mmc_wait_dat0(mmc, 1, timeout_ms * 1000);
237 err = mmc_send_status(mmc, &status);
241 if ((status & MMC_STATUS_RDY_FOR_DATA) &&
242 (status & MMC_STATUS_CURR_STATE) !=
246 if (status & MMC_STATUS_MASK) {
247 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
248 pr_err("Status Error: 0x%08x\n", status);
253 if (timeout_ms-- <= 0)
259 if (timeout_ms <= 0) {
260 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
261 pr_err("Timeout waiting card ready\n");
269 int mmc_set_blocklen(struct mmc *mmc, int len)
277 cmd.cmdidx = MMC_CMD_SET_BLOCKLEN;
278 cmd.resp_type = MMC_RSP_R1;
281 err = mmc_send_cmd(mmc, &cmd, NULL);
283 #ifdef CONFIG_MMC_QUIRKS
284 if (err && (mmc->quirks & MMC_QUIRK_RETRY_SET_BLOCKLEN)) {
287 * It has been seen that SET_BLOCKLEN may fail on the first
288 * attempt, let's try a few more time
291 err = mmc_send_cmd(mmc, &cmd, NULL);
301 #ifdef MMC_SUPPORTS_TUNING
302 static const u8 tuning_blk_pattern_4bit[] = {
303 0xff, 0x0f, 0xff, 0x00, 0xff, 0xcc, 0xc3, 0xcc,
304 0xc3, 0x3c, 0xcc, 0xff, 0xfe, 0xff, 0xfe, 0xef,
305 0xff, 0xdf, 0xff, 0xdd, 0xff, 0xfb, 0xff, 0xfb,
306 0xbf, 0xff, 0x7f, 0xff, 0x77, 0xf7, 0xbd, 0xef,
307 0xff, 0xf0, 0xff, 0xf0, 0x0f, 0xfc, 0xcc, 0x3c,
308 0xcc, 0x33, 0xcc, 0xcf, 0xff, 0xef, 0xff, 0xee,
309 0xff, 0xfd, 0xff, 0xfd, 0xdf, 0xff, 0xbf, 0xff,
310 0xbb, 0xff, 0xf7, 0xff, 0xf7, 0x7f, 0x7b, 0xde,
313 static const u8 tuning_blk_pattern_8bit[] = {
314 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00, 0x00,
315 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc, 0xcc,
316 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff, 0xff,
317 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee, 0xff,
318 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd, 0xdd,
319 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff, 0xbb,
320 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff, 0xff,
321 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee, 0xff,
322 0xff, 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00,
323 0x00, 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc,
324 0xcc, 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff,
325 0xff, 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee,
326 0xff, 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd,
327 0xdd, 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff,
328 0xbb, 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff,
329 0xff, 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee,
332 int mmc_send_tuning(struct mmc *mmc, u32 opcode, int *cmd_error)
335 struct mmc_data data;
336 const u8 *tuning_block_pattern;
339 if (mmc->bus_width == 8) {
340 tuning_block_pattern = tuning_blk_pattern_8bit;
341 size = sizeof(tuning_blk_pattern_8bit);
342 } else if (mmc->bus_width == 4) {
343 tuning_block_pattern = tuning_blk_pattern_4bit;
344 size = sizeof(tuning_blk_pattern_4bit);
349 ALLOC_CACHE_ALIGN_BUFFER(u8, data_buf, size);
353 cmd.resp_type = MMC_RSP_R1;
355 data.dest = (void *)data_buf;
357 data.blocksize = size;
358 data.flags = MMC_DATA_READ;
360 err = mmc_send_cmd(mmc, &cmd, &data);
364 if (memcmp(data_buf, tuning_block_pattern, size))
371 static int mmc_read_blocks(struct mmc *mmc, void *dst, lbaint_t start,
375 struct mmc_data data;
378 cmd.cmdidx = MMC_CMD_READ_MULTIPLE_BLOCK;
380 cmd.cmdidx = MMC_CMD_READ_SINGLE_BLOCK;
382 if (mmc->high_capacity)
385 cmd.cmdarg = start * mmc->read_bl_len;
387 cmd.resp_type = MMC_RSP_R1;
390 data.blocks = blkcnt;
391 data.blocksize = mmc->read_bl_len;
392 data.flags = MMC_DATA_READ;
394 if (mmc_send_cmd(mmc, &cmd, &data))
398 cmd.cmdidx = MMC_CMD_STOP_TRANSMISSION;
400 cmd.resp_type = MMC_RSP_R1b;
401 if (mmc_send_cmd(mmc, &cmd, NULL)) {
402 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
403 pr_err("mmc fail to send stop cmd\n");
412 #if CONFIG_IS_ENABLED(BLK)
413 ulong mmc_bread(struct udevice *dev, lbaint_t start, lbaint_t blkcnt, void *dst)
415 ulong mmc_bread(struct blk_desc *block_dev, lbaint_t start, lbaint_t blkcnt,
419 #if CONFIG_IS_ENABLED(BLK)
420 struct blk_desc *block_dev = dev_get_uclass_platdata(dev);
422 int dev_num = block_dev->devnum;
424 lbaint_t cur, blocks_todo = blkcnt;
429 struct mmc *mmc = find_mmc_device(dev_num);
433 if (CONFIG_IS_ENABLED(MMC_TINY))
434 err = mmc_switch_part(mmc, block_dev->hwpart);
436 err = blk_dselect_hwpart(block_dev, block_dev->hwpart);
441 if ((start + blkcnt) > block_dev->lba) {
442 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
443 pr_err("MMC: block number 0x" LBAF " exceeds max(0x" LBAF ")\n",
444 start + blkcnt, block_dev->lba);
449 if (mmc_set_blocklen(mmc, mmc->read_bl_len)) {
450 pr_debug("%s: Failed to set blocklen\n", __func__);
455 cur = (blocks_todo > mmc->cfg->b_max) ?
456 mmc->cfg->b_max : blocks_todo;
457 if (mmc_read_blocks(mmc, dst, start, cur) != cur) {
458 pr_debug("%s: Failed to read blocks\n", __func__);
463 dst += cur * mmc->read_bl_len;
464 } while (blocks_todo > 0);
469 static int mmc_go_idle(struct mmc *mmc)
476 cmd.cmdidx = MMC_CMD_GO_IDLE_STATE;
478 cmd.resp_type = MMC_RSP_NONE;
480 err = mmc_send_cmd(mmc, &cmd, NULL);
490 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
491 static int mmc_switch_voltage(struct mmc *mmc, int signal_voltage)
497 * Send CMD11 only if the request is to switch the card to
500 if (signal_voltage == MMC_SIGNAL_VOLTAGE_330)
501 return mmc_set_signal_voltage(mmc, signal_voltage);
503 cmd.cmdidx = SD_CMD_SWITCH_UHS18V;
505 cmd.resp_type = MMC_RSP_R1;
507 err = mmc_send_cmd(mmc, &cmd, NULL);
511 if (!mmc_host_is_spi(mmc) && (cmd.response[0] & MMC_STATUS_ERROR))
515 * The card should drive cmd and dat[0:3] low immediately
516 * after the response of cmd11, but wait 100 us to be sure
518 err = mmc_wait_dat0(mmc, 0, 100);
525 * During a signal voltage level switch, the clock must be gated
526 * for 5 ms according to the SD spec
528 mmc_set_clock(mmc, mmc->clock, MMC_CLK_DISABLE);
530 err = mmc_set_signal_voltage(mmc, signal_voltage);
534 /* Keep clock gated for at least 10 ms, though spec only says 5 ms */
536 mmc_set_clock(mmc, mmc->clock, MMC_CLK_ENABLE);
539 * Failure to switch is indicated by the card holding
540 * dat[0:3] low. Wait for at least 1 ms according to spec
542 err = mmc_wait_dat0(mmc, 1, 1000);
552 static int sd_send_op_cond(struct mmc *mmc, bool uhs_en)
559 cmd.cmdidx = MMC_CMD_APP_CMD;
560 cmd.resp_type = MMC_RSP_R1;
563 err = mmc_send_cmd(mmc, &cmd, NULL);
568 cmd.cmdidx = SD_CMD_APP_SEND_OP_COND;
569 cmd.resp_type = MMC_RSP_R3;
572 * Most cards do not answer if some reserved bits
573 * in the ocr are set. However, Some controller
574 * can set bit 7 (reserved for low voltages), but
575 * how to manage low voltages SD card is not yet
578 cmd.cmdarg = mmc_host_is_spi(mmc) ? 0 :
579 (mmc->cfg->voltages & 0xff8000);
581 if (mmc->version == SD_VERSION_2)
582 cmd.cmdarg |= OCR_HCS;
585 cmd.cmdarg |= OCR_S18R;
587 err = mmc_send_cmd(mmc, &cmd, NULL);
592 if (cmd.response[0] & OCR_BUSY)
601 if (mmc->version != SD_VERSION_2)
602 mmc->version = SD_VERSION_1_0;
604 if (mmc_host_is_spi(mmc)) { /* read OCR for spi */
605 cmd.cmdidx = MMC_CMD_SPI_READ_OCR;
606 cmd.resp_type = MMC_RSP_R3;
609 err = mmc_send_cmd(mmc, &cmd, NULL);
615 mmc->ocr = cmd.response[0];
617 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
618 if (uhs_en && !(mmc_host_is_spi(mmc)) && (cmd.response[0] & 0x41000000)
620 err = mmc_switch_voltage(mmc, MMC_SIGNAL_VOLTAGE_180);
626 mmc->high_capacity = ((mmc->ocr & OCR_HCS) == OCR_HCS);
632 static int mmc_send_op_cond_iter(struct mmc *mmc, int use_arg)
637 cmd.cmdidx = MMC_CMD_SEND_OP_COND;
638 cmd.resp_type = MMC_RSP_R3;
640 if (use_arg && !mmc_host_is_spi(mmc))
641 cmd.cmdarg = OCR_HCS |
642 (mmc->cfg->voltages &
643 (mmc->ocr & OCR_VOLTAGE_MASK)) |
644 (mmc->ocr & OCR_ACCESS_MODE);
646 err = mmc_send_cmd(mmc, &cmd, NULL);
649 mmc->ocr = cmd.response[0];
653 static int mmc_send_op_cond(struct mmc *mmc)
657 /* Some cards seem to need this */
660 /* Asking to the card its capabilities */
661 for (i = 0; i < 2; i++) {
662 err = mmc_send_op_cond_iter(mmc, i != 0);
666 /* exit if not busy (flag seems to be inverted) */
667 if (mmc->ocr & OCR_BUSY)
670 mmc->op_cond_pending = 1;
674 static int mmc_complete_op_cond(struct mmc *mmc)
681 mmc->op_cond_pending = 0;
682 if (!(mmc->ocr & OCR_BUSY)) {
683 /* Some cards seem to need this */
686 start = get_timer(0);
688 err = mmc_send_op_cond_iter(mmc, 1);
691 if (mmc->ocr & OCR_BUSY)
693 if (get_timer(start) > timeout)
699 if (mmc_host_is_spi(mmc)) { /* read OCR for spi */
700 cmd.cmdidx = MMC_CMD_SPI_READ_OCR;
701 cmd.resp_type = MMC_RSP_R3;
704 err = mmc_send_cmd(mmc, &cmd, NULL);
709 mmc->ocr = cmd.response[0];
712 mmc->version = MMC_VERSION_UNKNOWN;
714 mmc->high_capacity = ((mmc->ocr & OCR_HCS) == OCR_HCS);
721 static int mmc_send_ext_csd(struct mmc *mmc, u8 *ext_csd)
724 struct mmc_data data;
727 /* Get the Card Status Register */
728 cmd.cmdidx = MMC_CMD_SEND_EXT_CSD;
729 cmd.resp_type = MMC_RSP_R1;
732 data.dest = (char *)ext_csd;
734 data.blocksize = MMC_MAX_BLOCK_LEN;
735 data.flags = MMC_DATA_READ;
737 err = mmc_send_cmd(mmc, &cmd, &data);
742 static int __mmc_switch(struct mmc *mmc, u8 set, u8 index, u8 value,
745 unsigned int status, start;
747 int timeout_ms = DEFAULT_CMD6_TIMEOUT_MS;
748 bool is_part_switch = (set == EXT_CSD_CMD_SET_NORMAL) &&
749 (index == EXT_CSD_PART_CONF);
753 if (mmc->gen_cmd6_time)
754 timeout_ms = mmc->gen_cmd6_time * 10;
756 if (is_part_switch && mmc->part_switch_time)
757 timeout_ms = mmc->part_switch_time * 10;
759 cmd.cmdidx = MMC_CMD_SWITCH;
760 cmd.resp_type = MMC_RSP_R1b;
761 cmd.cmdarg = (MMC_SWITCH_MODE_WRITE_BYTE << 24) |
766 ret = mmc_send_cmd(mmc, &cmd, NULL);
767 } while (ret && retries-- > 0);
772 start = get_timer(0);
774 /* poll dat0 for rdy/buys status */
775 ret = mmc_wait_dat0(mmc, 1, timeout_ms * 1000);
776 if (ret && ret != -ENOSYS)
780 * In cases when not allowed to poll by using CMD13 or because we aren't
781 * capable of polling by using mmc_wait_dat0, then rely on waiting the
782 * stated timeout to be sufficient.
784 if (ret == -ENOSYS && !send_status)
787 /* Finally wait until the card is ready or indicates a failure
788 * to switch. It doesn't hurt to use CMD13 here even if send_status
789 * is false, because by now (after 'timeout_ms' ms) the bus should be
793 ret = mmc_send_status(mmc, &status);
795 if (!ret && (status & MMC_STATUS_SWITCH_ERROR)) {
796 pr_debug("switch failed %d/%d/0x%x !\n", set, index,
800 if (!ret && (status & MMC_STATUS_RDY_FOR_DATA))
803 } while (get_timer(start) < timeout_ms);
808 int mmc_switch(struct mmc *mmc, u8 set, u8 index, u8 value)
810 return __mmc_switch(mmc, set, index, value, true);
813 #if !CONFIG_IS_ENABLED(MMC_TINY)
814 static int mmc_set_card_speed(struct mmc *mmc, enum bus_mode mode,
820 ALLOC_CACHE_ALIGN_BUFFER(u8, test_csd, MMC_MAX_BLOCK_LEN);
826 speed_bits = EXT_CSD_TIMING_HS;
828 #if CONFIG_IS_ENABLED(MMC_HS200_SUPPORT)
830 speed_bits = EXT_CSD_TIMING_HS200;
833 #if CONFIG_IS_ENABLED(MMC_HS400_SUPPORT)
835 speed_bits = EXT_CSD_TIMING_HS400;
838 #if CONFIG_IS_ENABLED(MMC_HS400_ES_SUPPORT)
840 speed_bits = EXT_CSD_TIMING_HS400;
844 speed_bits = EXT_CSD_TIMING_LEGACY;
850 err = __mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_HS_TIMING,
851 speed_bits, !hsdowngrade);
855 #if CONFIG_IS_ENABLED(MMC_HS200_SUPPORT) || \
856 CONFIG_IS_ENABLED(MMC_HS400_SUPPORT)
858 * In case the eMMC is in HS200/HS400 mode and we are downgrading
859 * to HS mode, the card clock are still running much faster than
860 * the supported HS mode clock, so we can not reliably read out
861 * Extended CSD. Reconfigure the controller to run at HS mode.
864 mmc_select_mode(mmc, MMC_HS);
865 mmc_set_clock(mmc, mmc_mode2freq(mmc, MMC_HS), false);
869 if ((mode == MMC_HS) || (mode == MMC_HS_52)) {
870 /* Now check to see that it worked */
871 err = mmc_send_ext_csd(mmc, test_csd);
875 /* No high-speed support */
876 if (!test_csd[EXT_CSD_HS_TIMING])
883 static int mmc_get_capabilities(struct mmc *mmc)
885 u8 *ext_csd = mmc->ext_csd;
888 mmc->card_caps = MMC_MODE_1BIT | MMC_CAP(MMC_LEGACY);
890 if (mmc_host_is_spi(mmc))
893 /* Only version 4 supports high-speed */
894 if (mmc->version < MMC_VERSION_4)
898 pr_err("No ext_csd found!\n"); /* this should enver happen */
902 mmc->card_caps |= MMC_MODE_4BIT | MMC_MODE_8BIT;
904 cardtype = ext_csd[EXT_CSD_CARD_TYPE];
905 mmc->cardtype = cardtype;
907 #if CONFIG_IS_ENABLED(MMC_HS200_SUPPORT)
908 if (cardtype & (EXT_CSD_CARD_TYPE_HS200_1_2V |
909 EXT_CSD_CARD_TYPE_HS200_1_8V)) {
910 mmc->card_caps |= MMC_MODE_HS200;
913 #if CONFIG_IS_ENABLED(MMC_HS400_SUPPORT) || \
914 CONFIG_IS_ENABLED(MMC_HS400_ES_SUPPORT)
915 if (cardtype & (EXT_CSD_CARD_TYPE_HS400_1_2V |
916 EXT_CSD_CARD_TYPE_HS400_1_8V)) {
917 mmc->card_caps |= MMC_MODE_HS400;
920 if (cardtype & EXT_CSD_CARD_TYPE_52) {
921 if (cardtype & EXT_CSD_CARD_TYPE_DDR_52)
922 mmc->card_caps |= MMC_MODE_DDR_52MHz;
923 mmc->card_caps |= MMC_MODE_HS_52MHz;
925 if (cardtype & EXT_CSD_CARD_TYPE_26)
926 mmc->card_caps |= MMC_MODE_HS;
928 #if CONFIG_IS_ENABLED(MMC_HS400_ES_SUPPORT)
929 if (ext_csd[EXT_CSD_STROBE_SUPPORT] &&
930 (mmc->card_caps & MMC_MODE_HS400)) {
931 mmc->card_caps |= MMC_MODE_HS400_ES;
939 static int mmc_set_capacity(struct mmc *mmc, int part_num)
943 mmc->capacity = mmc->capacity_user;
947 mmc->capacity = mmc->capacity_boot;
950 mmc->capacity = mmc->capacity_rpmb;
956 mmc->capacity = mmc->capacity_gp[part_num - 4];
962 mmc_get_blk_desc(mmc)->lba = lldiv(mmc->capacity, mmc->read_bl_len);
967 int mmc_switch_part(struct mmc *mmc, unsigned int part_num)
973 ret = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
975 (mmc->part_config & ~PART_ACCESS_MASK)
976 | (part_num & PART_ACCESS_MASK));
977 } while (ret && retry--);
980 * Set the capacity if the switch succeeded or was intended
981 * to return to representing the raw device.
983 if ((ret == 0) || ((ret == -ENODEV) && (part_num == 0))) {
984 ret = mmc_set_capacity(mmc, part_num);
985 mmc_get_blk_desc(mmc)->hwpart = part_num;
991 #if CONFIG_IS_ENABLED(MMC_HW_PARTITIONING)
992 int mmc_hwpart_config(struct mmc *mmc,
993 const struct mmc_hwpart_conf *conf,
994 enum mmc_hwpart_conf_mode mode)
1000 u32 max_enh_size_mult;
1001 u32 tot_enh_size_mult = 0;
1004 ALLOC_CACHE_ALIGN_BUFFER(u8, ext_csd, MMC_MAX_BLOCK_LEN);
1006 if (mode < MMC_HWPART_CONF_CHECK || mode > MMC_HWPART_CONF_COMPLETE)
1009 if (IS_SD(mmc) || (mmc->version < MMC_VERSION_4_41)) {
1010 pr_err("eMMC >= 4.4 required for enhanced user data area\n");
1011 return -EMEDIUMTYPE;
1014 if (!(mmc->part_support & PART_SUPPORT)) {
1015 pr_err("Card does not support partitioning\n");
1016 return -EMEDIUMTYPE;
1019 if (!mmc->hc_wp_grp_size) {
1020 pr_err("Card does not define HC WP group size\n");
1021 return -EMEDIUMTYPE;
1024 /* check partition alignment and total enhanced size */
1025 if (conf->user.enh_size) {
1026 if (conf->user.enh_size % mmc->hc_wp_grp_size ||
1027 conf->user.enh_start % mmc->hc_wp_grp_size) {
1028 pr_err("User data enhanced area not HC WP group "
1032 part_attrs |= EXT_CSD_ENH_USR;
1033 enh_size_mult = conf->user.enh_size / mmc->hc_wp_grp_size;
1034 if (mmc->high_capacity) {
1035 enh_start_addr = conf->user.enh_start;
1037 enh_start_addr = (conf->user.enh_start << 9);
1043 tot_enh_size_mult += enh_size_mult;
1045 for (pidx = 0; pidx < 4; pidx++) {
1046 if (conf->gp_part[pidx].size % mmc->hc_wp_grp_size) {
1047 pr_err("GP%i partition not HC WP group size "
1048 "aligned\n", pidx+1);
1051 gp_size_mult[pidx] = conf->gp_part[pidx].size / mmc->hc_wp_grp_size;
1052 if (conf->gp_part[pidx].size && conf->gp_part[pidx].enhanced) {
1053 part_attrs |= EXT_CSD_ENH_GP(pidx);
1054 tot_enh_size_mult += gp_size_mult[pidx];
1058 if (part_attrs && ! (mmc->part_support & ENHNCD_SUPPORT)) {
1059 pr_err("Card does not support enhanced attribute\n");
1060 return -EMEDIUMTYPE;
1063 err = mmc_send_ext_csd(mmc, ext_csd);
1068 (ext_csd[EXT_CSD_MAX_ENH_SIZE_MULT+2] << 16) +
1069 (ext_csd[EXT_CSD_MAX_ENH_SIZE_MULT+1] << 8) +
1070 ext_csd[EXT_CSD_MAX_ENH_SIZE_MULT];
1071 if (tot_enh_size_mult > max_enh_size_mult) {
1072 pr_err("Total enhanced size exceeds maximum (%u > %u)\n",
1073 tot_enh_size_mult, max_enh_size_mult);
1074 return -EMEDIUMTYPE;
1077 /* The default value of EXT_CSD_WR_REL_SET is device
1078 * dependent, the values can only be changed if the
1079 * EXT_CSD_HS_CTRL_REL bit is set. The values can be
1080 * changed only once and before partitioning is completed. */
1081 wr_rel_set = ext_csd[EXT_CSD_WR_REL_SET];
1082 if (conf->user.wr_rel_change) {
1083 if (conf->user.wr_rel_set)
1084 wr_rel_set |= EXT_CSD_WR_DATA_REL_USR;
1086 wr_rel_set &= ~EXT_CSD_WR_DATA_REL_USR;
1088 for (pidx = 0; pidx < 4; pidx++) {
1089 if (conf->gp_part[pidx].wr_rel_change) {
1090 if (conf->gp_part[pidx].wr_rel_set)
1091 wr_rel_set |= EXT_CSD_WR_DATA_REL_GP(pidx);
1093 wr_rel_set &= ~EXT_CSD_WR_DATA_REL_GP(pidx);
1097 if (wr_rel_set != ext_csd[EXT_CSD_WR_REL_SET] &&
1098 !(ext_csd[EXT_CSD_WR_REL_PARAM] & EXT_CSD_HS_CTRL_REL)) {
1099 puts("Card does not support host controlled partition write "
1100 "reliability settings\n");
1101 return -EMEDIUMTYPE;
1104 if (ext_csd[EXT_CSD_PARTITION_SETTING] &
1105 EXT_CSD_PARTITION_SETTING_COMPLETED) {
1106 pr_err("Card already partitioned\n");
1110 if (mode == MMC_HWPART_CONF_CHECK)
1113 /* Partitioning requires high-capacity size definitions */
1114 if (!(ext_csd[EXT_CSD_ERASE_GROUP_DEF] & 0x01)) {
1115 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1116 EXT_CSD_ERASE_GROUP_DEF, 1);
1121 ext_csd[EXT_CSD_ERASE_GROUP_DEF] = 1;
1123 #if CONFIG_IS_ENABLED(MMC_WRITE)
1124 /* update erase group size to be high-capacity */
1125 mmc->erase_grp_size =
1126 ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE] * 1024;
1131 /* all OK, write the configuration */
1132 for (i = 0; i < 4; i++) {
1133 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1134 EXT_CSD_ENH_START_ADDR+i,
1135 (enh_start_addr >> (i*8)) & 0xFF);
1139 for (i = 0; i < 3; i++) {
1140 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1141 EXT_CSD_ENH_SIZE_MULT+i,
1142 (enh_size_mult >> (i*8)) & 0xFF);
1146 for (pidx = 0; pidx < 4; pidx++) {
1147 for (i = 0; i < 3; i++) {
1148 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1149 EXT_CSD_GP_SIZE_MULT+pidx*3+i,
1150 (gp_size_mult[pidx] >> (i*8)) & 0xFF);
1155 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1156 EXT_CSD_PARTITIONS_ATTRIBUTE, part_attrs);
1160 if (mode == MMC_HWPART_CONF_SET)
1163 /* The WR_REL_SET is a write-once register but shall be
1164 * written before setting PART_SETTING_COMPLETED. As it is
1165 * write-once we can only write it when completing the
1167 if (wr_rel_set != ext_csd[EXT_CSD_WR_REL_SET]) {
1168 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1169 EXT_CSD_WR_REL_SET, wr_rel_set);
1174 /* Setting PART_SETTING_COMPLETED confirms the partition
1175 * configuration but it only becomes effective after power
1176 * cycle, so we do not adjust the partition related settings
1177 * in the mmc struct. */
1179 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1180 EXT_CSD_PARTITION_SETTING,
1181 EXT_CSD_PARTITION_SETTING_COMPLETED);
1189 #if !CONFIG_IS_ENABLED(DM_MMC)
1190 int mmc_getcd(struct mmc *mmc)
1194 cd = board_mmc_getcd(mmc);
1197 if (mmc->cfg->ops->getcd)
1198 cd = mmc->cfg->ops->getcd(mmc);
1207 #if !CONFIG_IS_ENABLED(MMC_TINY)
1208 static int sd_switch(struct mmc *mmc, int mode, int group, u8 value, u8 *resp)
1211 struct mmc_data data;
1213 /* Switch the frequency */
1214 cmd.cmdidx = SD_CMD_SWITCH_FUNC;
1215 cmd.resp_type = MMC_RSP_R1;
1216 cmd.cmdarg = (mode << 31) | 0xffffff;
1217 cmd.cmdarg &= ~(0xf << (group * 4));
1218 cmd.cmdarg |= value << (group * 4);
1220 data.dest = (char *)resp;
1221 data.blocksize = 64;
1223 data.flags = MMC_DATA_READ;
1225 return mmc_send_cmd(mmc, &cmd, &data);
1228 static int sd_get_capabilities(struct mmc *mmc)
1232 ALLOC_CACHE_ALIGN_BUFFER(__be32, scr, 2);
1233 ALLOC_CACHE_ALIGN_BUFFER(__be32, switch_status, 16);
1234 struct mmc_data data;
1236 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
1240 mmc->card_caps = MMC_MODE_1BIT | MMC_CAP(MMC_LEGACY);
1242 if (mmc_host_is_spi(mmc))
1245 /* Read the SCR to find out if this card supports higher speeds */
1246 cmd.cmdidx = MMC_CMD_APP_CMD;
1247 cmd.resp_type = MMC_RSP_R1;
1248 cmd.cmdarg = mmc->rca << 16;
1250 err = mmc_send_cmd(mmc, &cmd, NULL);
1255 cmd.cmdidx = SD_CMD_APP_SEND_SCR;
1256 cmd.resp_type = MMC_RSP_R1;
1262 data.dest = (char *)scr;
1265 data.flags = MMC_DATA_READ;
1267 err = mmc_send_cmd(mmc, &cmd, &data);
1276 mmc->scr[0] = __be32_to_cpu(scr[0]);
1277 mmc->scr[1] = __be32_to_cpu(scr[1]);
1279 switch ((mmc->scr[0] >> 24) & 0xf) {
1281 mmc->version = SD_VERSION_1_0;
1284 mmc->version = SD_VERSION_1_10;
1287 mmc->version = SD_VERSION_2;
1288 if ((mmc->scr[0] >> 15) & 0x1)
1289 mmc->version = SD_VERSION_3;
1292 mmc->version = SD_VERSION_1_0;
1296 if (mmc->scr[0] & SD_DATA_4BIT)
1297 mmc->card_caps |= MMC_MODE_4BIT;
1299 /* Version 1.0 doesn't support switching */
1300 if (mmc->version == SD_VERSION_1_0)
1305 err = sd_switch(mmc, SD_SWITCH_CHECK, 0, 1,
1306 (u8 *)switch_status);
1311 /* The high-speed function is busy. Try again */
1312 if (!(__be32_to_cpu(switch_status[7]) & SD_HIGHSPEED_BUSY))
1316 /* If high-speed isn't supported, we return */
1317 if (__be32_to_cpu(switch_status[3]) & SD_HIGHSPEED_SUPPORTED)
1318 mmc->card_caps |= MMC_CAP(SD_HS);
1320 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
1321 /* Version before 3.0 don't support UHS modes */
1322 if (mmc->version < SD_VERSION_3)
1325 sd3_bus_mode = __be32_to_cpu(switch_status[3]) >> 16 & 0x1f;
1326 if (sd3_bus_mode & SD_MODE_UHS_SDR104)
1327 mmc->card_caps |= MMC_CAP(UHS_SDR104);
1328 if (sd3_bus_mode & SD_MODE_UHS_SDR50)
1329 mmc->card_caps |= MMC_CAP(UHS_SDR50);
1330 if (sd3_bus_mode & SD_MODE_UHS_SDR25)
1331 mmc->card_caps |= MMC_CAP(UHS_SDR25);
1332 if (sd3_bus_mode & SD_MODE_UHS_SDR12)
1333 mmc->card_caps |= MMC_CAP(UHS_SDR12);
1334 if (sd3_bus_mode & SD_MODE_UHS_DDR50)
1335 mmc->card_caps |= MMC_CAP(UHS_DDR50);
1341 static int sd_set_card_speed(struct mmc *mmc, enum bus_mode mode)
1345 ALLOC_CACHE_ALIGN_BUFFER(uint, switch_status, 16);
1348 /* SD version 1.00 and 1.01 does not support CMD 6 */
1349 if (mmc->version == SD_VERSION_1_0)
1354 speed = UHS_SDR12_BUS_SPEED;
1357 speed = HIGH_SPEED_BUS_SPEED;
1359 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
1361 speed = UHS_SDR12_BUS_SPEED;
1364 speed = UHS_SDR25_BUS_SPEED;
1367 speed = UHS_SDR50_BUS_SPEED;
1370 speed = UHS_DDR50_BUS_SPEED;
1373 speed = UHS_SDR104_BUS_SPEED;
1380 err = sd_switch(mmc, SD_SWITCH_SWITCH, 0, speed, (u8 *)switch_status);
1384 if (((__be32_to_cpu(switch_status[4]) >> 24) & 0xF) != speed)
1390 static int sd_select_bus_width(struct mmc *mmc, int w)
1395 if ((w != 4) && (w != 1))
1398 cmd.cmdidx = MMC_CMD_APP_CMD;
1399 cmd.resp_type = MMC_RSP_R1;
1400 cmd.cmdarg = mmc->rca << 16;
1402 err = mmc_send_cmd(mmc, &cmd, NULL);
1406 cmd.cmdidx = SD_CMD_APP_SET_BUS_WIDTH;
1407 cmd.resp_type = MMC_RSP_R1;
1412 err = mmc_send_cmd(mmc, &cmd, NULL);
1420 #if CONFIG_IS_ENABLED(MMC_WRITE)
1421 static int sd_read_ssr(struct mmc *mmc)
1423 static const unsigned int sd_au_size[] = {
1424 0, SZ_16K / 512, SZ_32K / 512,
1425 SZ_64K / 512, SZ_128K / 512, SZ_256K / 512,
1426 SZ_512K / 512, SZ_1M / 512, SZ_2M / 512,
1427 SZ_4M / 512, SZ_8M / 512, (SZ_8M + SZ_4M) / 512,
1428 SZ_16M / 512, (SZ_16M + SZ_8M) / 512, SZ_32M / 512,
1433 ALLOC_CACHE_ALIGN_BUFFER(uint, ssr, 16);
1434 struct mmc_data data;
1436 unsigned int au, eo, et, es;
1438 cmd.cmdidx = MMC_CMD_APP_CMD;
1439 cmd.resp_type = MMC_RSP_R1;
1440 cmd.cmdarg = mmc->rca << 16;
1442 err = mmc_send_cmd(mmc, &cmd, NULL);
1443 #ifdef CONFIG_MMC_QUIRKS
1444 if (err && (mmc->quirks & MMC_QUIRK_RETRY_APP_CMD)) {
1447 * It has been seen that APP_CMD may fail on the first
1448 * attempt, let's try a few more times
1451 err = mmc_send_cmd(mmc, &cmd, NULL);
1454 } while (retries--);
1460 cmd.cmdidx = SD_CMD_APP_SD_STATUS;
1461 cmd.resp_type = MMC_RSP_R1;
1465 data.dest = (char *)ssr;
1466 data.blocksize = 64;
1468 data.flags = MMC_DATA_READ;
1470 err = mmc_send_cmd(mmc, &cmd, &data);
1478 for (i = 0; i < 16; i++)
1479 ssr[i] = be32_to_cpu(ssr[i]);
1481 au = (ssr[2] >> 12) & 0xF;
1482 if ((au <= 9) || (mmc->version == SD_VERSION_3)) {
1483 mmc->ssr.au = sd_au_size[au];
1484 es = (ssr[3] >> 24) & 0xFF;
1485 es |= (ssr[2] & 0xFF) << 8;
1486 et = (ssr[3] >> 18) & 0x3F;
1488 eo = (ssr[3] >> 16) & 0x3;
1489 mmc->ssr.erase_timeout = (et * 1000) / es;
1490 mmc->ssr.erase_offset = eo * 1000;
1493 pr_debug("Invalid Allocation Unit Size.\n");
1499 /* frequency bases */
1500 /* divided by 10 to be nice to platforms without floating point */
1501 static const int fbase[] = {
1508 /* Multiplier values for TRAN_SPEED. Multiplied by 10 to be nice
1509 * to platforms without floating point.
1511 static const u8 multipliers[] = {
1530 static inline int bus_width(uint cap)
1532 if (cap == MMC_MODE_8BIT)
1534 if (cap == MMC_MODE_4BIT)
1536 if (cap == MMC_MODE_1BIT)
1538 pr_warn("invalid bus witdh capability 0x%x\n", cap);
1542 #if !CONFIG_IS_ENABLED(DM_MMC)
1543 #ifdef MMC_SUPPORTS_TUNING
1544 static int mmc_execute_tuning(struct mmc *mmc, uint opcode)
1550 static int mmc_set_ios(struct mmc *mmc)
1554 if (mmc->cfg->ops->set_ios)
1555 ret = mmc->cfg->ops->set_ios(mmc);
1560 static int mmc_host_power_cycle(struct mmc *mmc)
1564 if (mmc->cfg->ops->host_power_cycle)
1565 ret = mmc->cfg->ops->host_power_cycle(mmc);
1571 int mmc_set_clock(struct mmc *mmc, uint clock, bool disable)
1574 if (clock > mmc->cfg->f_max)
1575 clock = mmc->cfg->f_max;
1577 if (clock < mmc->cfg->f_min)
1578 clock = mmc->cfg->f_min;
1582 mmc->clk_disable = disable;
1584 debug("clock is %s (%dHz)\n", disable ? "disabled" : "enabled", clock);
1586 return mmc_set_ios(mmc);
1589 static int mmc_set_bus_width(struct mmc *mmc, uint width)
1591 mmc->bus_width = width;
1593 return mmc_set_ios(mmc);
1596 #if CONFIG_IS_ENABLED(MMC_VERBOSE) || defined(DEBUG)
1598 * helper function to display the capabilities in a human
1599 * friendly manner. The capabilities include bus width and
1602 void mmc_dump_capabilities(const char *text, uint caps)
1606 pr_debug("%s: widths [", text);
1607 if (caps & MMC_MODE_8BIT)
1609 if (caps & MMC_MODE_4BIT)
1611 if (caps & MMC_MODE_1BIT)
1613 pr_debug("\b\b] modes [");
1614 for (mode = MMC_LEGACY; mode < MMC_MODES_END; mode++)
1615 if (MMC_CAP(mode) & caps)
1616 pr_debug("%s, ", mmc_mode_name(mode));
1617 pr_debug("\b\b]\n");
1621 struct mode_width_tuning {
1624 #ifdef MMC_SUPPORTS_TUNING
1629 #if CONFIG_IS_ENABLED(MMC_IO_VOLTAGE)
1630 int mmc_voltage_to_mv(enum mmc_voltage voltage)
1633 case MMC_SIGNAL_VOLTAGE_000: return 0;
1634 case MMC_SIGNAL_VOLTAGE_330: return 3300;
1635 case MMC_SIGNAL_VOLTAGE_180: return 1800;
1636 case MMC_SIGNAL_VOLTAGE_120: return 1200;
1641 static int mmc_set_signal_voltage(struct mmc *mmc, uint signal_voltage)
1645 if (mmc->signal_voltage == signal_voltage)
1648 mmc->signal_voltage = signal_voltage;
1649 err = mmc_set_ios(mmc);
1651 pr_debug("unable to set voltage (err %d)\n", err);
1656 static inline int mmc_set_signal_voltage(struct mmc *mmc, uint signal_voltage)
1662 #if !CONFIG_IS_ENABLED(MMC_TINY)
1663 static const struct mode_width_tuning sd_modes_by_pref[] = {
1664 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
1665 #ifdef MMC_SUPPORTS_TUNING
1668 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1669 .tuning = MMC_CMD_SEND_TUNING_BLOCK
1674 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1678 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1682 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1687 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1689 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
1692 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1697 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1701 #define for_each_sd_mode_by_pref(caps, mwt) \
1702 for (mwt = sd_modes_by_pref;\
1703 mwt < sd_modes_by_pref + ARRAY_SIZE(sd_modes_by_pref);\
1705 if (caps & MMC_CAP(mwt->mode))
1707 static int sd_select_mode_and_width(struct mmc *mmc, uint card_caps)
1710 uint widths[] = {MMC_MODE_4BIT, MMC_MODE_1BIT};
1711 const struct mode_width_tuning *mwt;
1712 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
1713 bool uhs_en = (mmc->ocr & OCR_S18R) ? true : false;
1715 bool uhs_en = false;
1720 mmc_dump_capabilities("sd card", card_caps);
1721 mmc_dump_capabilities("host", mmc->host_caps);
1724 if (mmc_host_is_spi(mmc)) {
1725 mmc_set_bus_width(mmc, 1);
1726 mmc_select_mode(mmc, MMC_LEGACY);
1727 mmc_set_clock(mmc, mmc->tran_speed, MMC_CLK_ENABLE);
1731 /* Restrict card's capabilities by what the host can do */
1732 caps = card_caps & mmc->host_caps;
1737 for_each_sd_mode_by_pref(caps, mwt) {
1740 for (w = widths; w < widths + ARRAY_SIZE(widths); w++) {
1741 if (*w & caps & mwt->widths) {
1742 pr_debug("trying mode %s width %d (at %d MHz)\n",
1743 mmc_mode_name(mwt->mode),
1745 mmc_mode2freq(mmc, mwt->mode) / 1000000);
1747 /* configure the bus width (card + host) */
1748 err = sd_select_bus_width(mmc, bus_width(*w));
1751 mmc_set_bus_width(mmc, bus_width(*w));
1753 /* configure the bus mode (card) */
1754 err = sd_set_card_speed(mmc, mwt->mode);
1758 /* configure the bus mode (host) */
1759 mmc_select_mode(mmc, mwt->mode);
1760 mmc_set_clock(mmc, mmc->tran_speed,
1763 #ifdef MMC_SUPPORTS_TUNING
1764 /* execute tuning if needed */
1765 if (mwt->tuning && !mmc_host_is_spi(mmc)) {
1766 err = mmc_execute_tuning(mmc,
1769 pr_debug("tuning failed\n");
1775 #if CONFIG_IS_ENABLED(MMC_WRITE)
1776 err = sd_read_ssr(mmc);
1778 pr_warn("unable to read ssr\n");
1784 /* revert to a safer bus speed */
1785 mmc_select_mode(mmc, MMC_LEGACY);
1786 mmc_set_clock(mmc, mmc->tran_speed,
1792 pr_err("unable to select a mode\n");
1797 * read the compare the part of ext csd that is constant.
1798 * This can be used to check that the transfer is working
1801 static int mmc_read_and_compare_ext_csd(struct mmc *mmc)
1804 const u8 *ext_csd = mmc->ext_csd;
1805 ALLOC_CACHE_ALIGN_BUFFER(u8, test_csd, MMC_MAX_BLOCK_LEN);
1807 if (mmc->version < MMC_VERSION_4)
1810 err = mmc_send_ext_csd(mmc, test_csd);
1814 /* Only compare read only fields */
1815 if (ext_csd[EXT_CSD_PARTITIONING_SUPPORT]
1816 == test_csd[EXT_CSD_PARTITIONING_SUPPORT] &&
1817 ext_csd[EXT_CSD_HC_WP_GRP_SIZE]
1818 == test_csd[EXT_CSD_HC_WP_GRP_SIZE] &&
1819 ext_csd[EXT_CSD_REV]
1820 == test_csd[EXT_CSD_REV] &&
1821 ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE]
1822 == test_csd[EXT_CSD_HC_ERASE_GRP_SIZE] &&
1823 memcmp(&ext_csd[EXT_CSD_SEC_CNT],
1824 &test_csd[EXT_CSD_SEC_CNT], 4) == 0)
1830 #if CONFIG_IS_ENABLED(MMC_IO_VOLTAGE)
1831 static int mmc_set_lowest_voltage(struct mmc *mmc, enum bus_mode mode,
1832 uint32_t allowed_mask)
1840 if (mmc->cardtype & (EXT_CSD_CARD_TYPE_HS200_1_8V |
1841 EXT_CSD_CARD_TYPE_HS400_1_8V))
1842 card_mask |= MMC_SIGNAL_VOLTAGE_180;
1843 if (mmc->cardtype & (EXT_CSD_CARD_TYPE_HS200_1_2V |
1844 EXT_CSD_CARD_TYPE_HS400_1_2V))
1845 card_mask |= MMC_SIGNAL_VOLTAGE_120;
1848 if (mmc->cardtype & EXT_CSD_CARD_TYPE_DDR_1_8V)
1849 card_mask |= MMC_SIGNAL_VOLTAGE_330 |
1850 MMC_SIGNAL_VOLTAGE_180;
1851 if (mmc->cardtype & EXT_CSD_CARD_TYPE_DDR_1_2V)
1852 card_mask |= MMC_SIGNAL_VOLTAGE_120;
1855 card_mask |= MMC_SIGNAL_VOLTAGE_330;
1859 while (card_mask & allowed_mask) {
1860 enum mmc_voltage best_match;
1862 best_match = 1 << (ffs(card_mask & allowed_mask) - 1);
1863 if (!mmc_set_signal_voltage(mmc, best_match))
1866 allowed_mask &= ~best_match;
1872 static inline int mmc_set_lowest_voltage(struct mmc *mmc, enum bus_mode mode,
1873 uint32_t allowed_mask)
1879 static const struct mode_width_tuning mmc_modes_by_pref[] = {
1880 #if CONFIG_IS_ENABLED(MMC_HS400_ES_SUPPORT)
1882 .mode = MMC_HS_400_ES,
1883 .widths = MMC_MODE_8BIT,
1886 #if CONFIG_IS_ENABLED(MMC_HS400_SUPPORT)
1889 .widths = MMC_MODE_8BIT,
1890 .tuning = MMC_CMD_SEND_TUNING_BLOCK_HS200
1893 #if CONFIG_IS_ENABLED(MMC_HS200_SUPPORT)
1896 .widths = MMC_MODE_8BIT | MMC_MODE_4BIT,
1897 .tuning = MMC_CMD_SEND_TUNING_BLOCK_HS200
1902 .widths = MMC_MODE_8BIT | MMC_MODE_4BIT,
1906 .widths = MMC_MODE_8BIT | MMC_MODE_4BIT | MMC_MODE_1BIT,
1910 .widths = MMC_MODE_8BIT | MMC_MODE_4BIT | MMC_MODE_1BIT,
1914 .widths = MMC_MODE_8BIT | MMC_MODE_4BIT | MMC_MODE_1BIT,
1918 #define for_each_mmc_mode_by_pref(caps, mwt) \
1919 for (mwt = mmc_modes_by_pref;\
1920 mwt < mmc_modes_by_pref + ARRAY_SIZE(mmc_modes_by_pref);\
1922 if (caps & MMC_CAP(mwt->mode))
1924 static const struct ext_csd_bus_width {
1928 } ext_csd_bus_width[] = {
1929 {MMC_MODE_8BIT, true, EXT_CSD_DDR_BUS_WIDTH_8},
1930 {MMC_MODE_4BIT, true, EXT_CSD_DDR_BUS_WIDTH_4},
1931 {MMC_MODE_8BIT, false, EXT_CSD_BUS_WIDTH_8},
1932 {MMC_MODE_4BIT, false, EXT_CSD_BUS_WIDTH_4},
1933 {MMC_MODE_1BIT, false, EXT_CSD_BUS_WIDTH_1},
1936 #if CONFIG_IS_ENABLED(MMC_HS400_SUPPORT)
1937 static int mmc_select_hs400(struct mmc *mmc)
1941 /* Set timing to HS200 for tuning */
1942 err = mmc_set_card_speed(mmc, MMC_HS_200, false);
1946 /* configure the bus mode (host) */
1947 mmc_select_mode(mmc, MMC_HS_200);
1948 mmc_set_clock(mmc, mmc->tran_speed, false);
1950 /* execute tuning if needed */
1951 err = mmc_execute_tuning(mmc, MMC_CMD_SEND_TUNING_BLOCK_HS200);
1953 debug("tuning failed\n");
1957 /* Set back to HS */
1958 mmc_set_card_speed(mmc, MMC_HS, true);
1960 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BUS_WIDTH,
1961 EXT_CSD_BUS_WIDTH_8 | EXT_CSD_DDR_FLAG);
1965 err = mmc_set_card_speed(mmc, MMC_HS_400, false);
1969 mmc_select_mode(mmc, MMC_HS_400);
1970 err = mmc_set_clock(mmc, mmc->tran_speed, false);
1977 static int mmc_select_hs400(struct mmc *mmc)
1983 #if CONFIG_IS_ENABLED(MMC_HS400_ES_SUPPORT)
1984 #if !CONFIG_IS_ENABLED(DM_MMC)
1985 static int mmc_set_enhanced_strobe(struct mmc *mmc)
1990 static int mmc_select_hs400es(struct mmc *mmc)
1994 err = mmc_set_card_speed(mmc, MMC_HS, true);
1998 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BUS_WIDTH,
1999 EXT_CSD_BUS_WIDTH_8 | EXT_CSD_DDR_FLAG |
2000 EXT_CSD_BUS_WIDTH_STROBE);
2002 printf("switch to bus width for hs400 failed\n");
2005 /* TODO: driver strength */
2006 err = mmc_set_card_speed(mmc, MMC_HS_400_ES, false);
2010 mmc_select_mode(mmc, MMC_HS_400_ES);
2011 err = mmc_set_clock(mmc, mmc->tran_speed, false);
2015 return mmc_set_enhanced_strobe(mmc);
2018 static int mmc_select_hs400es(struct mmc *mmc)
2024 #define for_each_supported_width(caps, ddr, ecbv) \
2025 for (ecbv = ext_csd_bus_width;\
2026 ecbv < ext_csd_bus_width + ARRAY_SIZE(ext_csd_bus_width);\
2028 if ((ddr == ecbv->is_ddr) && (caps & ecbv->cap))
2030 static int mmc_select_mode_and_width(struct mmc *mmc, uint card_caps)
2033 const struct mode_width_tuning *mwt;
2034 const struct ext_csd_bus_width *ecbw;
2037 mmc_dump_capabilities("mmc", card_caps);
2038 mmc_dump_capabilities("host", mmc->host_caps);
2041 if (mmc_host_is_spi(mmc)) {
2042 mmc_set_bus_width(mmc, 1);
2043 mmc_select_mode(mmc, MMC_LEGACY);
2044 mmc_set_clock(mmc, mmc->tran_speed, MMC_CLK_ENABLE);
2048 /* Restrict card's capabilities by what the host can do */
2049 card_caps &= mmc->host_caps;
2051 /* Only version 4 of MMC supports wider bus widths */
2052 if (mmc->version < MMC_VERSION_4)
2055 if (!mmc->ext_csd) {
2056 pr_debug("No ext_csd found!\n"); /* this should enver happen */
2060 #if CONFIG_IS_ENABLED(MMC_HS200_SUPPORT) || \
2061 CONFIG_IS_ENABLED(MMC_HS400_SUPPORT)
2063 * In case the eMMC is in HS200/HS400 mode, downgrade to HS mode
2064 * before doing anything else, since a transition from either of
2065 * the HS200/HS400 mode directly to legacy mode is not supported.
2067 if (mmc->selected_mode == MMC_HS_200 ||
2068 mmc->selected_mode == MMC_HS_400)
2069 mmc_set_card_speed(mmc, MMC_HS, true);
2072 mmc_set_clock(mmc, mmc->legacy_speed, MMC_CLK_ENABLE);
2074 for_each_mmc_mode_by_pref(card_caps, mwt) {
2075 for_each_supported_width(card_caps & mwt->widths,
2076 mmc_is_mode_ddr(mwt->mode), ecbw) {
2077 enum mmc_voltage old_voltage;
2078 pr_debug("trying mode %s width %d (at %d MHz)\n",
2079 mmc_mode_name(mwt->mode),
2080 bus_width(ecbw->cap),
2081 mmc_mode2freq(mmc, mwt->mode) / 1000000);
2082 old_voltage = mmc->signal_voltage;
2083 err = mmc_set_lowest_voltage(mmc, mwt->mode,
2084 MMC_ALL_SIGNAL_VOLTAGE);
2088 /* configure the bus width (card + host) */
2089 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
2091 ecbw->ext_csd_bits & ~EXT_CSD_DDR_FLAG);
2094 mmc_set_bus_width(mmc, bus_width(ecbw->cap));
2096 if (mwt->mode == MMC_HS_400) {
2097 err = mmc_select_hs400(mmc);
2099 printf("Select HS400 failed %d\n", err);
2102 } else if (mwt->mode == MMC_HS_400_ES) {
2103 err = mmc_select_hs400es(mmc);
2105 printf("Select HS400ES failed %d\n",
2110 /* configure the bus speed (card) */
2111 err = mmc_set_card_speed(mmc, mwt->mode, false);
2116 * configure the bus width AND the ddr mode
2117 * (card). The host side will be taken care
2118 * of in the next step
2120 if (ecbw->ext_csd_bits & EXT_CSD_DDR_FLAG) {
2121 err = mmc_switch(mmc,
2122 EXT_CSD_CMD_SET_NORMAL,
2124 ecbw->ext_csd_bits);
2129 /* configure the bus mode (host) */
2130 mmc_select_mode(mmc, mwt->mode);
2131 mmc_set_clock(mmc, mmc->tran_speed,
2133 #ifdef MMC_SUPPORTS_TUNING
2135 /* execute tuning if needed */
2137 err = mmc_execute_tuning(mmc,
2140 pr_debug("tuning failed\n");
2147 /* do a transfer to check the configuration */
2148 err = mmc_read_and_compare_ext_csd(mmc);
2152 mmc_set_signal_voltage(mmc, old_voltage);
2153 /* if an error occured, revert to a safer bus mode */
2154 mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
2155 EXT_CSD_BUS_WIDTH, EXT_CSD_BUS_WIDTH_1);
2156 mmc_select_mode(mmc, MMC_LEGACY);
2157 mmc_set_bus_width(mmc, 1);
2161 pr_err("unable to select a mode\n");
2167 #if CONFIG_IS_ENABLED(MMC_TINY)
2168 DEFINE_CACHE_ALIGN_BUFFER(u8, ext_csd_bkup, MMC_MAX_BLOCK_LEN);
2171 static int mmc_startup_v4(struct mmc *mmc)
2175 bool has_parts = false;
2176 bool part_completed;
2177 static const u32 mmc_versions[] = {
2189 #if CONFIG_IS_ENABLED(MMC_TINY)
2190 u8 *ext_csd = ext_csd_bkup;
2192 if (IS_SD(mmc) || mmc->version < MMC_VERSION_4)
2196 memset(ext_csd_bkup, 0, sizeof(ext_csd_bkup));
2198 err = mmc_send_ext_csd(mmc, ext_csd);
2202 /* store the ext csd for future reference */
2204 mmc->ext_csd = ext_csd;
2206 ALLOC_CACHE_ALIGN_BUFFER(u8, ext_csd, MMC_MAX_BLOCK_LEN);
2208 if (IS_SD(mmc) || (mmc->version < MMC_VERSION_4))
2211 /* check ext_csd version and capacity */
2212 err = mmc_send_ext_csd(mmc, ext_csd);
2216 /* store the ext csd for future reference */
2218 mmc->ext_csd = malloc(MMC_MAX_BLOCK_LEN);
2221 memcpy(mmc->ext_csd, ext_csd, MMC_MAX_BLOCK_LEN);
2223 if (ext_csd[EXT_CSD_REV] >= ARRAY_SIZE(mmc_versions))
2226 mmc->version = mmc_versions[ext_csd[EXT_CSD_REV]];
2228 if (mmc->version >= MMC_VERSION_4_2) {
2230 * According to the JEDEC Standard, the value of
2231 * ext_csd's capacity is valid if the value is more
2234 capacity = ext_csd[EXT_CSD_SEC_CNT] << 0
2235 | ext_csd[EXT_CSD_SEC_CNT + 1] << 8
2236 | ext_csd[EXT_CSD_SEC_CNT + 2] << 16
2237 | ext_csd[EXT_CSD_SEC_CNT + 3] << 24;
2238 capacity *= MMC_MAX_BLOCK_LEN;
2239 if ((capacity >> 20) > 2 * 1024)
2240 mmc->capacity_user = capacity;
2243 if (mmc->version >= MMC_VERSION_4_5)
2244 mmc->gen_cmd6_time = ext_csd[EXT_CSD_GENERIC_CMD6_TIME];
2246 /* The partition data may be non-zero but it is only
2247 * effective if PARTITION_SETTING_COMPLETED is set in
2248 * EXT_CSD, so ignore any data if this bit is not set,
2249 * except for enabling the high-capacity group size
2250 * definition (see below).
2252 part_completed = !!(ext_csd[EXT_CSD_PARTITION_SETTING] &
2253 EXT_CSD_PARTITION_SETTING_COMPLETED);
2255 mmc->part_switch_time = ext_csd[EXT_CSD_PART_SWITCH_TIME];
2256 /* Some eMMC set the value too low so set a minimum */
2257 if (mmc->part_switch_time < MMC_MIN_PART_SWITCH_TIME && mmc->part_switch_time)
2258 mmc->part_switch_time = MMC_MIN_PART_SWITCH_TIME;
2260 /* store the partition info of emmc */
2261 mmc->part_support = ext_csd[EXT_CSD_PARTITIONING_SUPPORT];
2262 if ((ext_csd[EXT_CSD_PARTITIONING_SUPPORT] & PART_SUPPORT) ||
2263 ext_csd[EXT_CSD_BOOT_MULT])
2264 mmc->part_config = ext_csd[EXT_CSD_PART_CONF];
2265 if (part_completed &&
2266 (ext_csd[EXT_CSD_PARTITIONING_SUPPORT] & ENHNCD_SUPPORT))
2267 mmc->part_attr = ext_csd[EXT_CSD_PARTITIONS_ATTRIBUTE];
2269 mmc->capacity_boot = ext_csd[EXT_CSD_BOOT_MULT] << 17;
2271 mmc->capacity_rpmb = ext_csd[EXT_CSD_RPMB_MULT] << 17;
2273 for (i = 0; i < 4; i++) {
2274 int idx = EXT_CSD_GP_SIZE_MULT + i * 3;
2275 uint mult = (ext_csd[idx + 2] << 16) +
2276 (ext_csd[idx + 1] << 8) + ext_csd[idx];
2279 if (!part_completed)
2281 mmc->capacity_gp[i] = mult;
2282 mmc->capacity_gp[i] *=
2283 ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE];
2284 mmc->capacity_gp[i] *= ext_csd[EXT_CSD_HC_WP_GRP_SIZE];
2285 mmc->capacity_gp[i] <<= 19;
2288 #ifndef CONFIG_SPL_BUILD
2289 if (part_completed) {
2290 mmc->enh_user_size =
2291 (ext_csd[EXT_CSD_ENH_SIZE_MULT + 2] << 16) +
2292 (ext_csd[EXT_CSD_ENH_SIZE_MULT + 1] << 8) +
2293 ext_csd[EXT_CSD_ENH_SIZE_MULT];
2294 mmc->enh_user_size *= ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE];
2295 mmc->enh_user_size *= ext_csd[EXT_CSD_HC_WP_GRP_SIZE];
2296 mmc->enh_user_size <<= 19;
2297 mmc->enh_user_start =
2298 (ext_csd[EXT_CSD_ENH_START_ADDR + 3] << 24) +
2299 (ext_csd[EXT_CSD_ENH_START_ADDR + 2] << 16) +
2300 (ext_csd[EXT_CSD_ENH_START_ADDR + 1] << 8) +
2301 ext_csd[EXT_CSD_ENH_START_ADDR];
2302 if (mmc->high_capacity)
2303 mmc->enh_user_start <<= 9;
2308 * Host needs to enable ERASE_GRP_DEF bit if device is
2309 * partitioned. This bit will be lost every time after a reset
2310 * or power off. This will affect erase size.
2314 if ((ext_csd[EXT_CSD_PARTITIONING_SUPPORT] & PART_SUPPORT) &&
2315 (ext_csd[EXT_CSD_PARTITIONS_ATTRIBUTE] & PART_ENH_ATTRIB))
2318 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
2319 EXT_CSD_ERASE_GROUP_DEF, 1);
2324 ext_csd[EXT_CSD_ERASE_GROUP_DEF] = 1;
2327 if (ext_csd[EXT_CSD_ERASE_GROUP_DEF] & 0x01) {
2328 #if CONFIG_IS_ENABLED(MMC_WRITE)
2329 /* Read out group size from ext_csd */
2330 mmc->erase_grp_size =
2331 ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE] * 1024;
2334 * if high capacity and partition setting completed
2335 * SEC_COUNT is valid even if it is smaller than 2 GiB
2336 * JEDEC Standard JESD84-B45, 6.2.4
2338 if (mmc->high_capacity && part_completed) {
2339 capacity = (ext_csd[EXT_CSD_SEC_CNT]) |
2340 (ext_csd[EXT_CSD_SEC_CNT + 1] << 8) |
2341 (ext_csd[EXT_CSD_SEC_CNT + 2] << 16) |
2342 (ext_csd[EXT_CSD_SEC_CNT + 3] << 24);
2343 capacity *= MMC_MAX_BLOCK_LEN;
2344 mmc->capacity_user = capacity;
2347 #if CONFIG_IS_ENABLED(MMC_WRITE)
2349 /* Calculate the group size from the csd value. */
2350 int erase_gsz, erase_gmul;
2352 erase_gsz = (mmc->csd[2] & 0x00007c00) >> 10;
2353 erase_gmul = (mmc->csd[2] & 0x000003e0) >> 5;
2354 mmc->erase_grp_size = (erase_gsz + 1)
2358 #if CONFIG_IS_ENABLED(MMC_HW_PARTITIONING)
2359 mmc->hc_wp_grp_size = 1024
2360 * ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE]
2361 * ext_csd[EXT_CSD_HC_WP_GRP_SIZE];
2364 mmc->wr_rel_set = ext_csd[EXT_CSD_WR_REL_SET];
2369 #if !CONFIG_IS_ENABLED(MMC_TINY)
2372 mmc->ext_csd = NULL;
2377 static int mmc_startup(struct mmc *mmc)
2383 struct blk_desc *bdesc;
2385 #ifdef CONFIG_MMC_SPI_CRC_ON
2386 if (mmc_host_is_spi(mmc)) { /* enable CRC check for spi */
2387 cmd.cmdidx = MMC_CMD_SPI_CRC_ON_OFF;
2388 cmd.resp_type = MMC_RSP_R1;
2390 err = mmc_send_cmd(mmc, &cmd, NULL);
2396 /* Put the Card in Identify Mode */
2397 cmd.cmdidx = mmc_host_is_spi(mmc) ? MMC_CMD_SEND_CID :
2398 MMC_CMD_ALL_SEND_CID; /* cmd not supported in spi */
2399 cmd.resp_type = MMC_RSP_R2;
2402 err = mmc_send_cmd(mmc, &cmd, NULL);
2404 #ifdef CONFIG_MMC_QUIRKS
2405 if (err && (mmc->quirks & MMC_QUIRK_RETRY_SEND_CID)) {
2408 * It has been seen that SEND_CID may fail on the first
2409 * attempt, let's try a few more time
2412 err = mmc_send_cmd(mmc, &cmd, NULL);
2415 } while (retries--);
2422 memcpy(mmc->cid, cmd.response, 16);
2425 * For MMC cards, set the Relative Address.
2426 * For SD cards, get the Relatvie Address.
2427 * This also puts the cards into Standby State
2429 if (!mmc_host_is_spi(mmc)) { /* cmd not supported in spi */
2430 cmd.cmdidx = SD_CMD_SEND_RELATIVE_ADDR;
2431 cmd.cmdarg = mmc->rca << 16;
2432 cmd.resp_type = MMC_RSP_R6;
2434 err = mmc_send_cmd(mmc, &cmd, NULL);
2440 mmc->rca = (cmd.response[0] >> 16) & 0xffff;
2443 /* Get the Card-Specific Data */
2444 cmd.cmdidx = MMC_CMD_SEND_CSD;
2445 cmd.resp_type = MMC_RSP_R2;
2446 cmd.cmdarg = mmc->rca << 16;
2448 err = mmc_send_cmd(mmc, &cmd, NULL);
2453 mmc->csd[0] = cmd.response[0];
2454 mmc->csd[1] = cmd.response[1];
2455 mmc->csd[2] = cmd.response[2];
2456 mmc->csd[3] = cmd.response[3];
2458 if (mmc->version == MMC_VERSION_UNKNOWN) {
2459 int version = (cmd.response[0] >> 26) & 0xf;
2463 mmc->version = MMC_VERSION_1_2;
2466 mmc->version = MMC_VERSION_1_4;
2469 mmc->version = MMC_VERSION_2_2;
2472 mmc->version = MMC_VERSION_3;
2475 mmc->version = MMC_VERSION_4;
2478 mmc->version = MMC_VERSION_1_2;
2483 /* divide frequency by 10, since the mults are 10x bigger */
2484 freq = fbase[(cmd.response[0] & 0x7)];
2485 mult = multipliers[((cmd.response[0] >> 3) & 0xf)];
2487 mmc->legacy_speed = freq * mult;
2488 mmc_select_mode(mmc, MMC_LEGACY);
2490 mmc->dsr_imp = ((cmd.response[1] >> 12) & 0x1);
2491 mmc->read_bl_len = 1 << ((cmd.response[1] >> 16) & 0xf);
2492 #if CONFIG_IS_ENABLED(MMC_WRITE)
2495 mmc->write_bl_len = mmc->read_bl_len;
2497 mmc->write_bl_len = 1 << ((cmd.response[3] >> 22) & 0xf);
2500 if (mmc->high_capacity) {
2501 csize = (mmc->csd[1] & 0x3f) << 16
2502 | (mmc->csd[2] & 0xffff0000) >> 16;
2505 csize = (mmc->csd[1] & 0x3ff) << 2
2506 | (mmc->csd[2] & 0xc0000000) >> 30;
2507 cmult = (mmc->csd[2] & 0x00038000) >> 15;
2510 mmc->capacity_user = (csize + 1) << (cmult + 2);
2511 mmc->capacity_user *= mmc->read_bl_len;
2512 mmc->capacity_boot = 0;
2513 mmc->capacity_rpmb = 0;
2514 for (i = 0; i < 4; i++)
2515 mmc->capacity_gp[i] = 0;
2517 if (mmc->read_bl_len > MMC_MAX_BLOCK_LEN)
2518 mmc->read_bl_len = MMC_MAX_BLOCK_LEN;
2520 #if CONFIG_IS_ENABLED(MMC_WRITE)
2521 if (mmc->write_bl_len > MMC_MAX_BLOCK_LEN)
2522 mmc->write_bl_len = MMC_MAX_BLOCK_LEN;
2525 if ((mmc->dsr_imp) && (0xffffffff != mmc->dsr)) {
2526 cmd.cmdidx = MMC_CMD_SET_DSR;
2527 cmd.cmdarg = (mmc->dsr & 0xffff) << 16;
2528 cmd.resp_type = MMC_RSP_NONE;
2529 if (mmc_send_cmd(mmc, &cmd, NULL))
2530 pr_warn("MMC: SET_DSR failed\n");
2533 /* Select the card, and put it into Transfer Mode */
2534 if (!mmc_host_is_spi(mmc)) { /* cmd not supported in spi */
2535 cmd.cmdidx = MMC_CMD_SELECT_CARD;
2536 cmd.resp_type = MMC_RSP_R1;
2537 cmd.cmdarg = mmc->rca << 16;
2538 err = mmc_send_cmd(mmc, &cmd, NULL);
2545 * For SD, its erase group is always one sector
2547 #if CONFIG_IS_ENABLED(MMC_WRITE)
2548 mmc->erase_grp_size = 1;
2550 mmc->part_config = MMCPART_NOAVAILABLE;
2552 err = mmc_startup_v4(mmc);
2556 err = mmc_set_capacity(mmc, mmc_get_blk_desc(mmc)->hwpart);
2560 #if CONFIG_IS_ENABLED(MMC_TINY)
2561 mmc_set_clock(mmc, mmc->legacy_speed, false);
2562 mmc_select_mode(mmc, MMC_LEGACY);
2563 mmc_set_bus_width(mmc, 1);
2566 err = sd_get_capabilities(mmc);
2569 err = sd_select_mode_and_width(mmc, mmc->card_caps);
2571 err = mmc_get_capabilities(mmc);
2574 err = mmc_select_mode_and_width(mmc, mmc->card_caps);
2580 mmc->best_mode = mmc->selected_mode;
2582 /* Fix the block length for DDR mode */
2583 if (mmc->ddr_mode) {
2584 mmc->read_bl_len = MMC_MAX_BLOCK_LEN;
2585 #if CONFIG_IS_ENABLED(MMC_WRITE)
2586 mmc->write_bl_len = MMC_MAX_BLOCK_LEN;
2590 /* fill in device description */
2591 bdesc = mmc_get_blk_desc(mmc);
2595 bdesc->blksz = mmc->read_bl_len;
2596 bdesc->log2blksz = LOG2(bdesc->blksz);
2597 bdesc->lba = lldiv(mmc->capacity, mmc->read_bl_len);
2598 #if !defined(CONFIG_SPL_BUILD) || \
2599 (defined(CONFIG_SPL_LIBCOMMON_SUPPORT) && \
2600 !CONFIG_IS_ENABLED(USE_TINY_PRINTF))
2601 sprintf(bdesc->vendor, "Man %06x Snr %04x%04x",
2602 mmc->cid[0] >> 24, (mmc->cid[2] & 0xffff),
2603 (mmc->cid[3] >> 16) & 0xffff);
2604 sprintf(bdesc->product, "%c%c%c%c%c%c", mmc->cid[0] & 0xff,
2605 (mmc->cid[1] >> 24), (mmc->cid[1] >> 16) & 0xff,
2606 (mmc->cid[1] >> 8) & 0xff, mmc->cid[1] & 0xff,
2607 (mmc->cid[2] >> 24) & 0xff);
2608 sprintf(bdesc->revision, "%d.%d", (mmc->cid[2] >> 20) & 0xf,
2609 (mmc->cid[2] >> 16) & 0xf);
2611 bdesc->vendor[0] = 0;
2612 bdesc->product[0] = 0;
2613 bdesc->revision[0] = 0;
2616 #if !defined(CONFIG_DM_MMC) && (!defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBDISK_SUPPORT))
2623 static int mmc_send_if_cond(struct mmc *mmc)
2628 cmd.cmdidx = SD_CMD_SEND_IF_COND;
2629 /* We set the bit if the host supports voltages between 2.7 and 3.6 V */
2630 cmd.cmdarg = ((mmc->cfg->voltages & 0xff8000) != 0) << 8 | 0xaa;
2631 cmd.resp_type = MMC_RSP_R7;
2633 err = mmc_send_cmd(mmc, &cmd, NULL);
2638 if ((cmd.response[0] & 0xff) != 0xaa)
2641 mmc->version = SD_VERSION_2;
2646 #if !CONFIG_IS_ENABLED(DM_MMC)
2647 /* board-specific MMC power initializations. */
2648 __weak void board_mmc_power_init(void)
2653 static int mmc_power_init(struct mmc *mmc)
2655 #if CONFIG_IS_ENABLED(DM_MMC)
2656 #if CONFIG_IS_ENABLED(DM_REGULATOR)
2659 ret = device_get_supply_regulator(mmc->dev, "vmmc-supply",
2662 pr_debug("%s: No vmmc supply\n", mmc->dev->name);
2664 ret = device_get_supply_regulator(mmc->dev, "vqmmc-supply",
2665 &mmc->vqmmc_supply);
2667 pr_debug("%s: No vqmmc supply\n", mmc->dev->name);
2669 #else /* !CONFIG_DM_MMC */
2671 * Driver model should use a regulator, as above, rather than calling
2672 * out to board code.
2674 board_mmc_power_init();
2680 * put the host in the initial state:
2681 * - turn on Vdd (card power supply)
2682 * - configure the bus width and clock to minimal values
2684 static void mmc_set_initial_state(struct mmc *mmc)
2688 /* First try to set 3.3V. If it fails set to 1.8V */
2689 err = mmc_set_signal_voltage(mmc, MMC_SIGNAL_VOLTAGE_330);
2691 err = mmc_set_signal_voltage(mmc, MMC_SIGNAL_VOLTAGE_180);
2693 pr_warn("mmc: failed to set signal voltage\n");
2695 mmc_select_mode(mmc, MMC_LEGACY);
2696 mmc_set_bus_width(mmc, 1);
2697 mmc_set_clock(mmc, 0, MMC_CLK_ENABLE);
2700 static int mmc_power_on(struct mmc *mmc)
2702 #if CONFIG_IS_ENABLED(DM_MMC) && CONFIG_IS_ENABLED(DM_REGULATOR)
2703 if (mmc->vmmc_supply) {
2704 int ret = regulator_set_enable(mmc->vmmc_supply, true);
2707 puts("Error enabling VMMC supply\n");
2715 static int mmc_power_off(struct mmc *mmc)
2717 mmc_set_clock(mmc, 0, MMC_CLK_DISABLE);
2718 #if CONFIG_IS_ENABLED(DM_MMC) && CONFIG_IS_ENABLED(DM_REGULATOR)
2719 if (mmc->vmmc_supply) {
2720 int ret = regulator_set_enable(mmc->vmmc_supply, false);
2723 pr_debug("Error disabling VMMC supply\n");
2731 static int mmc_power_cycle(struct mmc *mmc)
2735 ret = mmc_power_off(mmc);
2739 ret = mmc_host_power_cycle(mmc);
2744 * SD spec recommends at least 1ms of delay. Let's wait for 2ms
2745 * to be on the safer side.
2748 return mmc_power_on(mmc);
2751 int mmc_get_op_cond(struct mmc *mmc)
2753 bool uhs_en = supports_uhs(mmc->cfg->host_caps);
2759 #ifdef CONFIG_FSL_ESDHC_ADAPTER_IDENT
2760 mmc_adapter_card_type_ident();
2762 err = mmc_power_init(mmc);
2766 #ifdef CONFIG_MMC_QUIRKS
2767 mmc->quirks = MMC_QUIRK_RETRY_SET_BLOCKLEN |
2768 MMC_QUIRK_RETRY_SEND_CID |
2769 MMC_QUIRK_RETRY_APP_CMD;
2772 err = mmc_power_cycle(mmc);
2775 * if power cycling is not supported, we should not try
2776 * to use the UHS modes, because we wouldn't be able to
2777 * recover from an error during the UHS initialization.
2779 pr_debug("Unable to do a full power cycle. Disabling the UHS modes for safety\n");
2781 mmc->host_caps &= ~UHS_CAPS;
2782 err = mmc_power_on(mmc);
2787 #if CONFIG_IS_ENABLED(DM_MMC)
2788 /* The device has already been probed ready for use */
2790 /* made sure it's not NULL earlier */
2791 err = mmc->cfg->ops->init(mmc);
2798 mmc_set_initial_state(mmc);
2800 /* Reset the Card */
2801 err = mmc_go_idle(mmc);
2806 /* The internal partition reset to user partition(0) at every CMD0*/
2807 mmc_get_blk_desc(mmc)->hwpart = 0;
2809 /* Test for SD version 2 */
2810 err = mmc_send_if_cond(mmc);
2812 /* Now try to get the SD card's operating condition */
2813 err = sd_send_op_cond(mmc, uhs_en);
2814 if (err && uhs_en) {
2816 mmc_power_cycle(mmc);
2820 /* If the command timed out, we check for an MMC card */
2821 if (err == -ETIMEDOUT) {
2822 err = mmc_send_op_cond(mmc);
2825 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
2826 pr_err("Card did not respond to voltage select!\n");
2835 int mmc_start_init(struct mmc *mmc)
2841 * all hosts are capable of 1 bit bus-width and able to use the legacy
2844 mmc->host_caps = mmc->cfg->host_caps | MMC_CAP(MMC_LEGACY) |
2845 MMC_CAP(MMC_LEGACY) | MMC_MODE_1BIT;
2846 #if CONFIG_IS_ENABLED(DM_MMC)
2847 mmc_deferred_probe(mmc);
2849 #if !defined(CONFIG_MMC_BROKEN_CD)
2850 no_card = mmc_getcd(mmc) == 0;
2854 #if !CONFIG_IS_ENABLED(DM_MMC)
2855 /* we pretend there's no card when init is NULL */
2856 no_card = no_card || (mmc->cfg->ops->init == NULL);
2860 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
2861 pr_err("MMC: no card present\n");
2866 err = mmc_get_op_cond(mmc);
2869 mmc->init_in_progress = 1;
2874 static int mmc_complete_init(struct mmc *mmc)
2878 mmc->init_in_progress = 0;
2879 if (mmc->op_cond_pending)
2880 err = mmc_complete_op_cond(mmc);
2883 err = mmc_startup(mmc);
2891 int mmc_init(struct mmc *mmc)
2894 __maybe_unused ulong start;
2895 #if CONFIG_IS_ENABLED(DM_MMC)
2896 struct mmc_uclass_priv *upriv = dev_get_uclass_priv(mmc->dev);
2903 start = get_timer(0);
2905 if (!mmc->init_in_progress)
2906 err = mmc_start_init(mmc);
2909 err = mmc_complete_init(mmc);
2911 pr_info("%s: %d, time %lu\n", __func__, err, get_timer(start));
2916 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT) || \
2917 CONFIG_IS_ENABLED(MMC_HS200_SUPPORT) || \
2918 CONFIG_IS_ENABLED(MMC_HS400_SUPPORT)
2919 int mmc_deinit(struct mmc *mmc)
2927 caps_filtered = mmc->card_caps &
2928 ~(MMC_CAP(UHS_SDR12) | MMC_CAP(UHS_SDR25) |
2929 MMC_CAP(UHS_SDR50) | MMC_CAP(UHS_DDR50) |
2930 MMC_CAP(UHS_SDR104));
2932 return sd_select_mode_and_width(mmc, caps_filtered);
2934 caps_filtered = mmc->card_caps &
2935 ~(MMC_CAP(MMC_HS_200) | MMC_CAP(MMC_HS_400));
2937 return mmc_select_mode_and_width(mmc, caps_filtered);
2942 int mmc_set_dsr(struct mmc *mmc, u16 val)
2948 /* CPU-specific MMC initializations */
2949 __weak int cpu_mmc_init(bd_t *bis)
2954 /* board-specific MMC initializations. */
2955 __weak int board_mmc_init(bd_t *bis)
2960 void mmc_set_preinit(struct mmc *mmc, int preinit)
2962 mmc->preinit = preinit;
2965 #if CONFIG_IS_ENABLED(DM_MMC)
2966 static int mmc_probe(bd_t *bis)
2970 struct udevice *dev;
2972 ret = uclass_get(UCLASS_MMC, &uc);
2977 * Try to add them in sequence order. Really with driver model we
2978 * should allow holes, but the current MMC list does not allow that.
2979 * So if we request 0, 1, 3 we will get 0, 1, 2.
2981 for (i = 0; ; i++) {
2982 ret = uclass_get_device_by_seq(UCLASS_MMC, i, &dev);
2986 uclass_foreach_dev(dev, uc) {
2987 ret = device_probe(dev);
2989 pr_err("%s - probe failed: %d\n", dev->name, ret);
2995 static int mmc_probe(bd_t *bis)
2997 if (board_mmc_init(bis) < 0)
3004 int mmc_initialize(bd_t *bis)
3006 static int initialized = 0;
3008 if (initialized) /* Avoid initializing mmc multiple times */
3012 #if !CONFIG_IS_ENABLED(BLK)
3013 #if !CONFIG_IS_ENABLED(MMC_TINY)
3017 ret = mmc_probe(bis);
3021 #ifndef CONFIG_SPL_BUILD
3022 print_mmc_devices(',');
3029 #if CONFIG_IS_ENABLED(DM_MMC)
3030 int mmc_init_device(int num)
3032 struct udevice *dev;
3036 ret = uclass_get_device(UCLASS_MMC, num, &dev);
3040 m = mmc_get_mmc_dev(dev);
3043 #ifdef CONFIG_FSL_ESDHC_ADAPTER_IDENT
3044 mmc_set_preinit(m, 1);
3053 #ifdef CONFIG_CMD_BKOPS_ENABLE
3054 int mmc_set_bkops_enable(struct mmc *mmc)
3057 ALLOC_CACHE_ALIGN_BUFFER(u8, ext_csd, MMC_MAX_BLOCK_LEN);
3059 err = mmc_send_ext_csd(mmc, ext_csd);
3061 puts("Could not get ext_csd register values\n");
3065 if (!(ext_csd[EXT_CSD_BKOPS_SUPPORT] & 0x1)) {
3066 puts("Background operations not supported on device\n");
3067 return -EMEDIUMTYPE;
3070 if (ext_csd[EXT_CSD_BKOPS_EN] & 0x1) {
3071 puts("Background operations already enabled\n");
3075 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BKOPS_EN, 1);
3077 puts("Failed to enable manual background operations\n");
3081 puts("Enabled manual background operations\n");