1 // SPDX-License-Identifier: GPL-2.0+
3 * Copyright 2008, Freescale Semiconductor, Inc
6 * Based vaguely on the Linux code
13 #include <dm/device-internal.h>
17 #include <power/regulator.h>
20 #include <linux/list.h>
22 #include "mmc_private.h"
24 #define DEFAULT_CMD6_TIMEOUT_MS 500
26 static int mmc_set_signal_voltage(struct mmc *mmc, uint signal_voltage);
27 static int mmc_power_cycle(struct mmc *mmc);
28 #if !CONFIG_IS_ENABLED(MMC_TINY)
29 static int mmc_select_mode_and_width(struct mmc *mmc, uint card_caps);
32 #if !CONFIG_IS_ENABLED(DM_MMC)
34 static int mmc_wait_dat0(struct mmc *mmc, int state, int timeout)
39 __weak int board_mmc_getwp(struct mmc *mmc)
44 int mmc_getwp(struct mmc *mmc)
48 wp = board_mmc_getwp(mmc);
51 if (mmc->cfg->ops->getwp)
52 wp = mmc->cfg->ops->getwp(mmc);
60 __weak int board_mmc_getcd(struct mmc *mmc)
66 #ifdef CONFIG_MMC_TRACE
67 void mmmc_trace_before_send(struct mmc *mmc, struct mmc_cmd *cmd)
69 printf("CMD_SEND:%d\n", cmd->cmdidx);
70 printf("\t\tARG\t\t\t 0x%08x\n", cmd->cmdarg);
73 void mmmc_trace_after_send(struct mmc *mmc, struct mmc_cmd *cmd, int ret)
79 printf("\t\tRET\t\t\t %d\n", ret);
81 switch (cmd->resp_type) {
83 printf("\t\tMMC_RSP_NONE\n");
86 printf("\t\tMMC_RSP_R1,5,6,7 \t 0x%08x \n",
90 printf("\t\tMMC_RSP_R1b\t\t 0x%08x \n",
94 printf("\t\tMMC_RSP_R2\t\t 0x%08x \n",
96 printf("\t\t \t\t 0x%08x \n",
98 printf("\t\t \t\t 0x%08x \n",
100 printf("\t\t \t\t 0x%08x \n",
103 printf("\t\t\t\t\tDUMPING DATA\n");
104 for (i = 0; i < 4; i++) {
106 printf("\t\t\t\t\t%03d - ", i*4);
107 ptr = (u8 *)&cmd->response[i];
109 for (j = 0; j < 4; j++)
110 printf("%02x ", *ptr--);
115 printf("\t\tMMC_RSP_R3,4\t\t 0x%08x \n",
119 printf("\t\tERROR MMC rsp not supported\n");
125 void mmc_trace_state(struct mmc *mmc, struct mmc_cmd *cmd)
129 status = (cmd->response[0] & MMC_STATUS_CURR_STATE) >> 9;
130 printf("CURR STATE:%d\n", status);
134 #if CONFIG_IS_ENABLED(MMC_VERBOSE) || defined(DEBUG)
135 const char *mmc_mode_name(enum bus_mode mode)
137 static const char *const names[] = {
138 [MMC_LEGACY] = "MMC legacy",
139 [SD_LEGACY] = "SD Legacy",
140 [MMC_HS] = "MMC High Speed (26MHz)",
141 [SD_HS] = "SD High Speed (50MHz)",
142 [UHS_SDR12] = "UHS SDR12 (25MHz)",
143 [UHS_SDR25] = "UHS SDR25 (50MHz)",
144 [UHS_SDR50] = "UHS SDR50 (100MHz)",
145 [UHS_SDR104] = "UHS SDR104 (208MHz)",
146 [UHS_DDR50] = "UHS DDR50 (50MHz)",
147 [MMC_HS_52] = "MMC High Speed (52MHz)",
148 [MMC_DDR_52] = "MMC DDR52 (52MHz)",
149 [MMC_HS_200] = "HS200 (200MHz)",
150 [MMC_HS_400] = "HS400 (200MHz)",
153 if (mode >= MMC_MODES_END)
154 return "Unknown mode";
160 static uint mmc_mode2freq(struct mmc *mmc, enum bus_mode mode)
162 static const int freqs[] = {
163 [MMC_LEGACY] = 25000000,
164 [SD_LEGACY] = 25000000,
167 [MMC_HS_52] = 52000000,
168 [MMC_DDR_52] = 52000000,
169 [UHS_SDR12] = 25000000,
170 [UHS_SDR25] = 50000000,
171 [UHS_SDR50] = 100000000,
172 [UHS_DDR50] = 50000000,
173 [UHS_SDR104] = 208000000,
174 [MMC_HS_200] = 200000000,
175 [MMC_HS_400] = 200000000,
178 if (mode == MMC_LEGACY)
179 return mmc->legacy_speed;
180 else if (mode >= MMC_MODES_END)
186 static int mmc_select_mode(struct mmc *mmc, enum bus_mode mode)
188 mmc->selected_mode = mode;
189 mmc->tran_speed = mmc_mode2freq(mmc, mode);
190 mmc->ddr_mode = mmc_is_mode_ddr(mode);
191 pr_debug("selecting mode %s (freq : %d MHz)\n", mmc_mode_name(mode),
192 mmc->tran_speed / 1000000);
196 #if !CONFIG_IS_ENABLED(DM_MMC)
197 int mmc_send_cmd(struct mmc *mmc, struct mmc_cmd *cmd, struct mmc_data *data)
201 mmmc_trace_before_send(mmc, cmd);
202 ret = mmc->cfg->ops->send_cmd(mmc, cmd, data);
203 mmmc_trace_after_send(mmc, cmd, ret);
209 int mmc_send_status(struct mmc *mmc, unsigned int *status)
212 int err, retries = 5;
214 cmd.cmdidx = MMC_CMD_SEND_STATUS;
215 cmd.resp_type = MMC_RSP_R1;
216 if (!mmc_host_is_spi(mmc))
217 cmd.cmdarg = mmc->rca << 16;
220 err = mmc_send_cmd(mmc, &cmd, NULL);
222 mmc_trace_state(mmc, &cmd);
223 *status = cmd.response[0];
227 mmc_trace_state(mmc, &cmd);
231 int mmc_poll_for_busy(struct mmc *mmc, int timeout)
236 err = mmc_wait_dat0(mmc, 1, timeout);
241 err = mmc_send_status(mmc, &status);
245 if ((status & MMC_STATUS_RDY_FOR_DATA) &&
246 (status & MMC_STATUS_CURR_STATE) !=
250 if (status & MMC_STATUS_MASK) {
251 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
252 pr_err("Status Error: 0x%08x\n", status);
264 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
265 pr_err("Timeout waiting card ready\n");
273 int mmc_set_blocklen(struct mmc *mmc, int len)
281 cmd.cmdidx = MMC_CMD_SET_BLOCKLEN;
282 cmd.resp_type = MMC_RSP_R1;
285 err = mmc_send_cmd(mmc, &cmd, NULL);
287 #ifdef CONFIG_MMC_QUIRKS
288 if (err && (mmc->quirks & MMC_QUIRK_RETRY_SET_BLOCKLEN)) {
291 * It has been seen that SET_BLOCKLEN may fail on the first
292 * attempt, let's try a few more time
295 err = mmc_send_cmd(mmc, &cmd, NULL);
305 #ifdef MMC_SUPPORTS_TUNING
306 static const u8 tuning_blk_pattern_4bit[] = {
307 0xff, 0x0f, 0xff, 0x00, 0xff, 0xcc, 0xc3, 0xcc,
308 0xc3, 0x3c, 0xcc, 0xff, 0xfe, 0xff, 0xfe, 0xef,
309 0xff, 0xdf, 0xff, 0xdd, 0xff, 0xfb, 0xff, 0xfb,
310 0xbf, 0xff, 0x7f, 0xff, 0x77, 0xf7, 0xbd, 0xef,
311 0xff, 0xf0, 0xff, 0xf0, 0x0f, 0xfc, 0xcc, 0x3c,
312 0xcc, 0x33, 0xcc, 0xcf, 0xff, 0xef, 0xff, 0xee,
313 0xff, 0xfd, 0xff, 0xfd, 0xdf, 0xff, 0xbf, 0xff,
314 0xbb, 0xff, 0xf7, 0xff, 0xf7, 0x7f, 0x7b, 0xde,
317 static const u8 tuning_blk_pattern_8bit[] = {
318 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00, 0x00,
319 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc, 0xcc,
320 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff, 0xff,
321 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee, 0xff,
322 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd, 0xdd,
323 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff, 0xbb,
324 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff, 0xff,
325 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee, 0xff,
326 0xff, 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00,
327 0x00, 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc,
328 0xcc, 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff,
329 0xff, 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee,
330 0xff, 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd,
331 0xdd, 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff,
332 0xbb, 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff,
333 0xff, 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee,
336 int mmc_send_tuning(struct mmc *mmc, u32 opcode, int *cmd_error)
339 struct mmc_data data;
340 const u8 *tuning_block_pattern;
343 if (mmc->bus_width == 8) {
344 tuning_block_pattern = tuning_blk_pattern_8bit;
345 size = sizeof(tuning_blk_pattern_8bit);
346 } else if (mmc->bus_width == 4) {
347 tuning_block_pattern = tuning_blk_pattern_4bit;
348 size = sizeof(tuning_blk_pattern_4bit);
353 ALLOC_CACHE_ALIGN_BUFFER(u8, data_buf, size);
357 cmd.resp_type = MMC_RSP_R1;
359 data.dest = (void *)data_buf;
361 data.blocksize = size;
362 data.flags = MMC_DATA_READ;
364 err = mmc_send_cmd(mmc, &cmd, &data);
368 if (memcmp(data_buf, tuning_block_pattern, size))
375 static int mmc_read_blocks(struct mmc *mmc, void *dst, lbaint_t start,
379 struct mmc_data data;
382 cmd.cmdidx = MMC_CMD_READ_MULTIPLE_BLOCK;
384 cmd.cmdidx = MMC_CMD_READ_SINGLE_BLOCK;
386 if (mmc->high_capacity)
389 cmd.cmdarg = start * mmc->read_bl_len;
391 cmd.resp_type = MMC_RSP_R1;
394 data.blocks = blkcnt;
395 data.blocksize = mmc->read_bl_len;
396 data.flags = MMC_DATA_READ;
398 if (mmc_send_cmd(mmc, &cmd, &data))
402 cmd.cmdidx = MMC_CMD_STOP_TRANSMISSION;
404 cmd.resp_type = MMC_RSP_R1b;
405 if (mmc_send_cmd(mmc, &cmd, NULL)) {
406 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
407 pr_err("mmc fail to send stop cmd\n");
416 #if CONFIG_IS_ENABLED(BLK)
417 ulong mmc_bread(struct udevice *dev, lbaint_t start, lbaint_t blkcnt, void *dst)
419 ulong mmc_bread(struct blk_desc *block_dev, lbaint_t start, lbaint_t blkcnt,
423 #if CONFIG_IS_ENABLED(BLK)
424 struct blk_desc *block_dev = dev_get_uclass_platdata(dev);
426 int dev_num = block_dev->devnum;
428 lbaint_t cur, blocks_todo = blkcnt;
433 struct mmc *mmc = find_mmc_device(dev_num);
437 if (CONFIG_IS_ENABLED(MMC_TINY))
438 err = mmc_switch_part(mmc, block_dev->hwpart);
440 err = blk_dselect_hwpart(block_dev, block_dev->hwpart);
445 if ((start + blkcnt) > block_dev->lba) {
446 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
447 pr_err("MMC: block number 0x" LBAF " exceeds max(0x" LBAF ")\n",
448 start + blkcnt, block_dev->lba);
453 if (mmc_set_blocklen(mmc, mmc->read_bl_len)) {
454 pr_debug("%s: Failed to set blocklen\n", __func__);
459 cur = (blocks_todo > mmc->cfg->b_max) ?
460 mmc->cfg->b_max : blocks_todo;
461 if (mmc_read_blocks(mmc, dst, start, cur) != cur) {
462 pr_debug("%s: Failed to read blocks\n", __func__);
467 dst += cur * mmc->read_bl_len;
468 } while (blocks_todo > 0);
473 static int mmc_go_idle(struct mmc *mmc)
480 cmd.cmdidx = MMC_CMD_GO_IDLE_STATE;
482 cmd.resp_type = MMC_RSP_NONE;
484 err = mmc_send_cmd(mmc, &cmd, NULL);
494 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
495 static int mmc_switch_voltage(struct mmc *mmc, int signal_voltage)
501 * Send CMD11 only if the request is to switch the card to
504 if (signal_voltage == MMC_SIGNAL_VOLTAGE_330)
505 return mmc_set_signal_voltage(mmc, signal_voltage);
507 cmd.cmdidx = SD_CMD_SWITCH_UHS18V;
509 cmd.resp_type = MMC_RSP_R1;
511 err = mmc_send_cmd(mmc, &cmd, NULL);
515 if (!mmc_host_is_spi(mmc) && (cmd.response[0] & MMC_STATUS_ERROR))
519 * The card should drive cmd and dat[0:3] low immediately
520 * after the response of cmd11, but wait 100 us to be sure
522 err = mmc_wait_dat0(mmc, 0, 100);
529 * During a signal voltage level switch, the clock must be gated
530 * for 5 ms according to the SD spec
532 mmc_set_clock(mmc, mmc->clock, MMC_CLK_DISABLE);
534 err = mmc_set_signal_voltage(mmc, signal_voltage);
538 /* Keep clock gated for at least 10 ms, though spec only says 5 ms */
540 mmc_set_clock(mmc, mmc->clock, MMC_CLK_ENABLE);
543 * Failure to switch is indicated by the card holding
544 * dat[0:3] low. Wait for at least 1 ms according to spec
546 err = mmc_wait_dat0(mmc, 1, 1000);
556 static int sd_send_op_cond(struct mmc *mmc, bool uhs_en)
563 cmd.cmdidx = MMC_CMD_APP_CMD;
564 cmd.resp_type = MMC_RSP_R1;
567 err = mmc_send_cmd(mmc, &cmd, NULL);
572 cmd.cmdidx = SD_CMD_APP_SEND_OP_COND;
573 cmd.resp_type = MMC_RSP_R3;
576 * Most cards do not answer if some reserved bits
577 * in the ocr are set. However, Some controller
578 * can set bit 7 (reserved for low voltages), but
579 * how to manage low voltages SD card is not yet
582 cmd.cmdarg = mmc_host_is_spi(mmc) ? 0 :
583 (mmc->cfg->voltages & 0xff8000);
585 if (mmc->version == SD_VERSION_2)
586 cmd.cmdarg |= OCR_HCS;
589 cmd.cmdarg |= OCR_S18R;
591 err = mmc_send_cmd(mmc, &cmd, NULL);
596 if (cmd.response[0] & OCR_BUSY)
605 if (mmc->version != SD_VERSION_2)
606 mmc->version = SD_VERSION_1_0;
608 if (mmc_host_is_spi(mmc)) { /* read OCR for spi */
609 cmd.cmdidx = MMC_CMD_SPI_READ_OCR;
610 cmd.resp_type = MMC_RSP_R3;
613 err = mmc_send_cmd(mmc, &cmd, NULL);
619 mmc->ocr = cmd.response[0];
621 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
622 if (uhs_en && !(mmc_host_is_spi(mmc)) && (cmd.response[0] & 0x41000000)
624 err = mmc_switch_voltage(mmc, MMC_SIGNAL_VOLTAGE_180);
630 mmc->high_capacity = ((mmc->ocr & OCR_HCS) == OCR_HCS);
636 static int mmc_send_op_cond_iter(struct mmc *mmc, int use_arg)
641 cmd.cmdidx = MMC_CMD_SEND_OP_COND;
642 cmd.resp_type = MMC_RSP_R3;
644 if (use_arg && !mmc_host_is_spi(mmc))
645 cmd.cmdarg = OCR_HCS |
646 (mmc->cfg->voltages &
647 (mmc->ocr & OCR_VOLTAGE_MASK)) |
648 (mmc->ocr & OCR_ACCESS_MODE);
650 err = mmc_send_cmd(mmc, &cmd, NULL);
653 mmc->ocr = cmd.response[0];
657 static int mmc_send_op_cond(struct mmc *mmc)
661 /* Some cards seem to need this */
664 /* Asking to the card its capabilities */
665 for (i = 0; i < 2; i++) {
666 err = mmc_send_op_cond_iter(mmc, i != 0);
670 /* exit if not busy (flag seems to be inverted) */
671 if (mmc->ocr & OCR_BUSY)
674 mmc->op_cond_pending = 1;
678 static int mmc_complete_op_cond(struct mmc *mmc)
685 mmc->op_cond_pending = 0;
686 if (!(mmc->ocr & OCR_BUSY)) {
687 /* Some cards seem to need this */
690 start = get_timer(0);
692 err = mmc_send_op_cond_iter(mmc, 1);
695 if (mmc->ocr & OCR_BUSY)
697 if (get_timer(start) > timeout)
703 if (mmc_host_is_spi(mmc)) { /* read OCR for spi */
704 cmd.cmdidx = MMC_CMD_SPI_READ_OCR;
705 cmd.resp_type = MMC_RSP_R3;
708 err = mmc_send_cmd(mmc, &cmd, NULL);
713 mmc->ocr = cmd.response[0];
716 mmc->version = MMC_VERSION_UNKNOWN;
718 mmc->high_capacity = ((mmc->ocr & OCR_HCS) == OCR_HCS);
725 static int mmc_send_ext_csd(struct mmc *mmc, u8 *ext_csd)
728 struct mmc_data data;
731 /* Get the Card Status Register */
732 cmd.cmdidx = MMC_CMD_SEND_EXT_CSD;
733 cmd.resp_type = MMC_RSP_R1;
736 data.dest = (char *)ext_csd;
738 data.blocksize = MMC_MAX_BLOCK_LEN;
739 data.flags = MMC_DATA_READ;
741 err = mmc_send_cmd(mmc, &cmd, &data);
746 static int __mmc_switch(struct mmc *mmc, u8 set, u8 index, u8 value,
749 unsigned int status, start;
751 int timeout = DEFAULT_CMD6_TIMEOUT_MS;
752 bool is_part_switch = (set == EXT_CSD_CMD_SET_NORMAL) &&
753 (index == EXT_CSD_PART_CONF);
757 if (mmc->gen_cmd6_time)
758 timeout = mmc->gen_cmd6_time * 10;
760 if (is_part_switch && mmc->part_switch_time)
761 timeout = mmc->part_switch_time * 10;
763 cmd.cmdidx = MMC_CMD_SWITCH;
764 cmd.resp_type = MMC_RSP_R1b;
765 cmd.cmdarg = (MMC_SWITCH_MODE_WRITE_BYTE << 24) |
770 ret = mmc_send_cmd(mmc, &cmd, NULL);
771 } while (ret && retries-- > 0);
776 start = get_timer(0);
778 /* poll dat0 for rdy/buys status */
779 ret = mmc_wait_dat0(mmc, 1, timeout);
780 if (ret && ret != -ENOSYS)
784 * In cases when not allowed to poll by using CMD13 or because we aren't
785 * capable of polling by using mmc_wait_dat0, then rely on waiting the
786 * stated timeout to be sufficient.
788 if (ret == -ENOSYS && !send_status)
791 /* Finally wait until the card is ready or indicates a failure
792 * to switch. It doesn't hurt to use CMD13 here even if send_status
793 * is false, because by now (after 'timeout' ms) the bus should be
797 ret = mmc_send_status(mmc, &status);
799 if (!ret && (status & MMC_STATUS_SWITCH_ERROR)) {
800 pr_debug("switch failed %d/%d/0x%x !\n", set, index,
804 if (!ret && (status & MMC_STATUS_RDY_FOR_DATA))
807 } while (get_timer(start) < timeout);
812 int mmc_switch(struct mmc *mmc, u8 set, u8 index, u8 value)
814 return __mmc_switch(mmc, set, index, value, true);
817 #if !CONFIG_IS_ENABLED(MMC_TINY)
818 static int mmc_set_card_speed(struct mmc *mmc, enum bus_mode mode,
824 ALLOC_CACHE_ALIGN_BUFFER(u8, test_csd, MMC_MAX_BLOCK_LEN);
830 speed_bits = EXT_CSD_TIMING_HS;
832 #if CONFIG_IS_ENABLED(MMC_HS200_SUPPORT)
834 speed_bits = EXT_CSD_TIMING_HS200;
837 #if CONFIG_IS_ENABLED(MMC_HS400_SUPPORT)
839 speed_bits = EXT_CSD_TIMING_HS400;
843 speed_bits = EXT_CSD_TIMING_LEGACY;
849 err = __mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_HS_TIMING,
850 speed_bits, !hsdowngrade);
854 #if CONFIG_IS_ENABLED(MMC_HS200_SUPPORT) || \
855 CONFIG_IS_ENABLED(MMC_HS400_SUPPORT)
857 * In case the eMMC is in HS200/HS400 mode and we are downgrading
858 * to HS mode, the card clock are still running much faster than
859 * the supported HS mode clock, so we can not reliably read out
860 * Extended CSD. Reconfigure the controller to run at HS mode.
863 mmc_select_mode(mmc, MMC_HS);
864 mmc_set_clock(mmc, mmc_mode2freq(mmc, MMC_HS), false);
868 if ((mode == MMC_HS) || (mode == MMC_HS_52)) {
869 /* Now check to see that it worked */
870 err = mmc_send_ext_csd(mmc, test_csd);
874 /* No high-speed support */
875 if (!test_csd[EXT_CSD_HS_TIMING])
882 static int mmc_get_capabilities(struct mmc *mmc)
884 u8 *ext_csd = mmc->ext_csd;
887 mmc->card_caps = MMC_MODE_1BIT | MMC_CAP(MMC_LEGACY);
889 if (mmc_host_is_spi(mmc))
892 /* Only version 4 supports high-speed */
893 if (mmc->version < MMC_VERSION_4)
897 pr_err("No ext_csd found!\n"); /* this should enver happen */
901 mmc->card_caps |= MMC_MODE_4BIT | MMC_MODE_8BIT;
903 cardtype = ext_csd[EXT_CSD_CARD_TYPE];
904 mmc->cardtype = cardtype;
906 #if CONFIG_IS_ENABLED(MMC_HS200_SUPPORT)
907 if (cardtype & (EXT_CSD_CARD_TYPE_HS200_1_2V |
908 EXT_CSD_CARD_TYPE_HS200_1_8V)) {
909 mmc->card_caps |= MMC_MODE_HS200;
912 #if CONFIG_IS_ENABLED(MMC_HS400_SUPPORT)
913 if (cardtype & (EXT_CSD_CARD_TYPE_HS400_1_2V |
914 EXT_CSD_CARD_TYPE_HS400_1_8V)) {
915 mmc->card_caps |= MMC_MODE_HS400;
918 if (cardtype & EXT_CSD_CARD_TYPE_52) {
919 if (cardtype & EXT_CSD_CARD_TYPE_DDR_52)
920 mmc->card_caps |= MMC_MODE_DDR_52MHz;
921 mmc->card_caps |= MMC_MODE_HS_52MHz;
923 if (cardtype & EXT_CSD_CARD_TYPE_26)
924 mmc->card_caps |= MMC_MODE_HS;
930 static int mmc_set_capacity(struct mmc *mmc, int part_num)
934 mmc->capacity = mmc->capacity_user;
938 mmc->capacity = mmc->capacity_boot;
941 mmc->capacity = mmc->capacity_rpmb;
947 mmc->capacity = mmc->capacity_gp[part_num - 4];
953 mmc_get_blk_desc(mmc)->lba = lldiv(mmc->capacity, mmc->read_bl_len);
958 int mmc_switch_part(struct mmc *mmc, unsigned int part_num)
962 ret = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_PART_CONF,
963 (mmc->part_config & ~PART_ACCESS_MASK)
964 | (part_num & PART_ACCESS_MASK));
967 * Set the capacity if the switch succeeded or was intended
968 * to return to representing the raw device.
970 if ((ret == 0) || ((ret == -ENODEV) && (part_num == 0))) {
971 ret = mmc_set_capacity(mmc, part_num);
972 mmc_get_blk_desc(mmc)->hwpart = part_num;
978 #if CONFIG_IS_ENABLED(MMC_HW_PARTITIONING)
979 int mmc_hwpart_config(struct mmc *mmc,
980 const struct mmc_hwpart_conf *conf,
981 enum mmc_hwpart_conf_mode mode)
987 u32 max_enh_size_mult;
988 u32 tot_enh_size_mult = 0;
991 ALLOC_CACHE_ALIGN_BUFFER(u8, ext_csd, MMC_MAX_BLOCK_LEN);
993 if (mode < MMC_HWPART_CONF_CHECK || mode > MMC_HWPART_CONF_COMPLETE)
996 if (IS_SD(mmc) || (mmc->version < MMC_VERSION_4_41)) {
997 pr_err("eMMC >= 4.4 required for enhanced user data area\n");
1001 if (!(mmc->part_support & PART_SUPPORT)) {
1002 pr_err("Card does not support partitioning\n");
1003 return -EMEDIUMTYPE;
1006 if (!mmc->hc_wp_grp_size) {
1007 pr_err("Card does not define HC WP group size\n");
1008 return -EMEDIUMTYPE;
1011 /* check partition alignment and total enhanced size */
1012 if (conf->user.enh_size) {
1013 if (conf->user.enh_size % mmc->hc_wp_grp_size ||
1014 conf->user.enh_start % mmc->hc_wp_grp_size) {
1015 pr_err("User data enhanced area not HC WP group "
1019 part_attrs |= EXT_CSD_ENH_USR;
1020 enh_size_mult = conf->user.enh_size / mmc->hc_wp_grp_size;
1021 if (mmc->high_capacity) {
1022 enh_start_addr = conf->user.enh_start;
1024 enh_start_addr = (conf->user.enh_start << 9);
1030 tot_enh_size_mult += enh_size_mult;
1032 for (pidx = 0; pidx < 4; pidx++) {
1033 if (conf->gp_part[pidx].size % mmc->hc_wp_grp_size) {
1034 pr_err("GP%i partition not HC WP group size "
1035 "aligned\n", pidx+1);
1038 gp_size_mult[pidx] = conf->gp_part[pidx].size / mmc->hc_wp_grp_size;
1039 if (conf->gp_part[pidx].size && conf->gp_part[pidx].enhanced) {
1040 part_attrs |= EXT_CSD_ENH_GP(pidx);
1041 tot_enh_size_mult += gp_size_mult[pidx];
1045 if (part_attrs && ! (mmc->part_support & ENHNCD_SUPPORT)) {
1046 pr_err("Card does not support enhanced attribute\n");
1047 return -EMEDIUMTYPE;
1050 err = mmc_send_ext_csd(mmc, ext_csd);
1055 (ext_csd[EXT_CSD_MAX_ENH_SIZE_MULT+2] << 16) +
1056 (ext_csd[EXT_CSD_MAX_ENH_SIZE_MULT+1] << 8) +
1057 ext_csd[EXT_CSD_MAX_ENH_SIZE_MULT];
1058 if (tot_enh_size_mult > max_enh_size_mult) {
1059 pr_err("Total enhanced size exceeds maximum (%u > %u)\n",
1060 tot_enh_size_mult, max_enh_size_mult);
1061 return -EMEDIUMTYPE;
1064 /* The default value of EXT_CSD_WR_REL_SET is device
1065 * dependent, the values can only be changed if the
1066 * EXT_CSD_HS_CTRL_REL bit is set. The values can be
1067 * changed only once and before partitioning is completed. */
1068 wr_rel_set = ext_csd[EXT_CSD_WR_REL_SET];
1069 if (conf->user.wr_rel_change) {
1070 if (conf->user.wr_rel_set)
1071 wr_rel_set |= EXT_CSD_WR_DATA_REL_USR;
1073 wr_rel_set &= ~EXT_CSD_WR_DATA_REL_USR;
1075 for (pidx = 0; pidx < 4; pidx++) {
1076 if (conf->gp_part[pidx].wr_rel_change) {
1077 if (conf->gp_part[pidx].wr_rel_set)
1078 wr_rel_set |= EXT_CSD_WR_DATA_REL_GP(pidx);
1080 wr_rel_set &= ~EXT_CSD_WR_DATA_REL_GP(pidx);
1084 if (wr_rel_set != ext_csd[EXT_CSD_WR_REL_SET] &&
1085 !(ext_csd[EXT_CSD_WR_REL_PARAM] & EXT_CSD_HS_CTRL_REL)) {
1086 puts("Card does not support host controlled partition write "
1087 "reliability settings\n");
1088 return -EMEDIUMTYPE;
1091 if (ext_csd[EXT_CSD_PARTITION_SETTING] &
1092 EXT_CSD_PARTITION_SETTING_COMPLETED) {
1093 pr_err("Card already partitioned\n");
1097 if (mode == MMC_HWPART_CONF_CHECK)
1100 /* Partitioning requires high-capacity size definitions */
1101 if (!(ext_csd[EXT_CSD_ERASE_GROUP_DEF] & 0x01)) {
1102 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1103 EXT_CSD_ERASE_GROUP_DEF, 1);
1108 ext_csd[EXT_CSD_ERASE_GROUP_DEF] = 1;
1110 /* update erase group size to be high-capacity */
1111 mmc->erase_grp_size =
1112 ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE] * 1024;
1116 /* all OK, write the configuration */
1117 for (i = 0; i < 4; i++) {
1118 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1119 EXT_CSD_ENH_START_ADDR+i,
1120 (enh_start_addr >> (i*8)) & 0xFF);
1124 for (i = 0; i < 3; i++) {
1125 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1126 EXT_CSD_ENH_SIZE_MULT+i,
1127 (enh_size_mult >> (i*8)) & 0xFF);
1131 for (pidx = 0; pidx < 4; pidx++) {
1132 for (i = 0; i < 3; i++) {
1133 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1134 EXT_CSD_GP_SIZE_MULT+pidx*3+i,
1135 (gp_size_mult[pidx] >> (i*8)) & 0xFF);
1140 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1141 EXT_CSD_PARTITIONS_ATTRIBUTE, part_attrs);
1145 if (mode == MMC_HWPART_CONF_SET)
1148 /* The WR_REL_SET is a write-once register but shall be
1149 * written before setting PART_SETTING_COMPLETED. As it is
1150 * write-once we can only write it when completing the
1152 if (wr_rel_set != ext_csd[EXT_CSD_WR_REL_SET]) {
1153 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1154 EXT_CSD_WR_REL_SET, wr_rel_set);
1159 /* Setting PART_SETTING_COMPLETED confirms the partition
1160 * configuration but it only becomes effective after power
1161 * cycle, so we do not adjust the partition related settings
1162 * in the mmc struct. */
1164 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1165 EXT_CSD_PARTITION_SETTING,
1166 EXT_CSD_PARTITION_SETTING_COMPLETED);
1174 #if !CONFIG_IS_ENABLED(DM_MMC)
1175 int mmc_getcd(struct mmc *mmc)
1179 cd = board_mmc_getcd(mmc);
1182 if (mmc->cfg->ops->getcd)
1183 cd = mmc->cfg->ops->getcd(mmc);
1192 #if !CONFIG_IS_ENABLED(MMC_TINY)
1193 static int sd_switch(struct mmc *mmc, int mode, int group, u8 value, u8 *resp)
1196 struct mmc_data data;
1198 /* Switch the frequency */
1199 cmd.cmdidx = SD_CMD_SWITCH_FUNC;
1200 cmd.resp_type = MMC_RSP_R1;
1201 cmd.cmdarg = (mode << 31) | 0xffffff;
1202 cmd.cmdarg &= ~(0xf << (group * 4));
1203 cmd.cmdarg |= value << (group * 4);
1205 data.dest = (char *)resp;
1206 data.blocksize = 64;
1208 data.flags = MMC_DATA_READ;
1210 return mmc_send_cmd(mmc, &cmd, &data);
1213 static int sd_get_capabilities(struct mmc *mmc)
1217 ALLOC_CACHE_ALIGN_BUFFER(__be32, scr, 2);
1218 ALLOC_CACHE_ALIGN_BUFFER(__be32, switch_status, 16);
1219 struct mmc_data data;
1221 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
1225 mmc->card_caps = MMC_MODE_1BIT | MMC_CAP(SD_LEGACY);
1227 if (mmc_host_is_spi(mmc))
1230 /* Read the SCR to find out if this card supports higher speeds */
1231 cmd.cmdidx = MMC_CMD_APP_CMD;
1232 cmd.resp_type = MMC_RSP_R1;
1233 cmd.cmdarg = mmc->rca << 16;
1235 err = mmc_send_cmd(mmc, &cmd, NULL);
1240 cmd.cmdidx = SD_CMD_APP_SEND_SCR;
1241 cmd.resp_type = MMC_RSP_R1;
1247 data.dest = (char *)scr;
1250 data.flags = MMC_DATA_READ;
1252 err = mmc_send_cmd(mmc, &cmd, &data);
1261 mmc->scr[0] = __be32_to_cpu(scr[0]);
1262 mmc->scr[1] = __be32_to_cpu(scr[1]);
1264 switch ((mmc->scr[0] >> 24) & 0xf) {
1266 mmc->version = SD_VERSION_1_0;
1269 mmc->version = SD_VERSION_1_10;
1272 mmc->version = SD_VERSION_2;
1273 if ((mmc->scr[0] >> 15) & 0x1)
1274 mmc->version = SD_VERSION_3;
1277 mmc->version = SD_VERSION_1_0;
1281 if (mmc->scr[0] & SD_DATA_4BIT)
1282 mmc->card_caps |= MMC_MODE_4BIT;
1284 /* Version 1.0 doesn't support switching */
1285 if (mmc->version == SD_VERSION_1_0)
1290 err = sd_switch(mmc, SD_SWITCH_CHECK, 0, 1,
1291 (u8 *)switch_status);
1296 /* The high-speed function is busy. Try again */
1297 if (!(__be32_to_cpu(switch_status[7]) & SD_HIGHSPEED_BUSY))
1301 /* If high-speed isn't supported, we return */
1302 if (__be32_to_cpu(switch_status[3]) & SD_HIGHSPEED_SUPPORTED)
1303 mmc->card_caps |= MMC_CAP(SD_HS);
1305 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
1306 /* Version before 3.0 don't support UHS modes */
1307 if (mmc->version < SD_VERSION_3)
1310 sd3_bus_mode = __be32_to_cpu(switch_status[3]) >> 16 & 0x1f;
1311 if (sd3_bus_mode & SD_MODE_UHS_SDR104)
1312 mmc->card_caps |= MMC_CAP(UHS_SDR104);
1313 if (sd3_bus_mode & SD_MODE_UHS_SDR50)
1314 mmc->card_caps |= MMC_CAP(UHS_SDR50);
1315 if (sd3_bus_mode & SD_MODE_UHS_SDR25)
1316 mmc->card_caps |= MMC_CAP(UHS_SDR25);
1317 if (sd3_bus_mode & SD_MODE_UHS_SDR12)
1318 mmc->card_caps |= MMC_CAP(UHS_SDR12);
1319 if (sd3_bus_mode & SD_MODE_UHS_DDR50)
1320 mmc->card_caps |= MMC_CAP(UHS_DDR50);
1326 static int sd_set_card_speed(struct mmc *mmc, enum bus_mode mode)
1330 ALLOC_CACHE_ALIGN_BUFFER(uint, switch_status, 16);
1333 /* SD version 1.00 and 1.01 does not support CMD 6 */
1334 if (mmc->version == SD_VERSION_1_0)
1339 speed = UHS_SDR12_BUS_SPEED;
1342 speed = HIGH_SPEED_BUS_SPEED;
1344 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
1346 speed = UHS_SDR12_BUS_SPEED;
1349 speed = UHS_SDR25_BUS_SPEED;
1352 speed = UHS_SDR50_BUS_SPEED;
1355 speed = UHS_DDR50_BUS_SPEED;
1358 speed = UHS_SDR104_BUS_SPEED;
1365 err = sd_switch(mmc, SD_SWITCH_SWITCH, 0, speed, (u8 *)switch_status);
1369 if (((__be32_to_cpu(switch_status[4]) >> 24) & 0xF) != speed)
1375 static int sd_select_bus_width(struct mmc *mmc, int w)
1380 if ((w != 4) && (w != 1))
1383 cmd.cmdidx = MMC_CMD_APP_CMD;
1384 cmd.resp_type = MMC_RSP_R1;
1385 cmd.cmdarg = mmc->rca << 16;
1387 err = mmc_send_cmd(mmc, &cmd, NULL);
1391 cmd.cmdidx = SD_CMD_APP_SET_BUS_WIDTH;
1392 cmd.resp_type = MMC_RSP_R1;
1397 err = mmc_send_cmd(mmc, &cmd, NULL);
1405 #if CONFIG_IS_ENABLED(MMC_WRITE)
1406 static int sd_read_ssr(struct mmc *mmc)
1408 static const unsigned int sd_au_size[] = {
1409 0, SZ_16K / 512, SZ_32K / 512,
1410 SZ_64K / 512, SZ_128K / 512, SZ_256K / 512,
1411 SZ_512K / 512, SZ_1M / 512, SZ_2M / 512,
1412 SZ_4M / 512, SZ_8M / 512, (SZ_8M + SZ_4M) / 512,
1413 SZ_16M / 512, (SZ_16M + SZ_8M) / 512, SZ_32M / 512,
1418 ALLOC_CACHE_ALIGN_BUFFER(uint, ssr, 16);
1419 struct mmc_data data;
1421 unsigned int au, eo, et, es;
1423 cmd.cmdidx = MMC_CMD_APP_CMD;
1424 cmd.resp_type = MMC_RSP_R1;
1425 cmd.cmdarg = mmc->rca << 16;
1427 err = mmc_send_cmd(mmc, &cmd, NULL);
1431 cmd.cmdidx = SD_CMD_APP_SD_STATUS;
1432 cmd.resp_type = MMC_RSP_R1;
1436 data.dest = (char *)ssr;
1437 data.blocksize = 64;
1439 data.flags = MMC_DATA_READ;
1441 err = mmc_send_cmd(mmc, &cmd, &data);
1449 for (i = 0; i < 16; i++)
1450 ssr[i] = be32_to_cpu(ssr[i]);
1452 au = (ssr[2] >> 12) & 0xF;
1453 if ((au <= 9) || (mmc->version == SD_VERSION_3)) {
1454 mmc->ssr.au = sd_au_size[au];
1455 es = (ssr[3] >> 24) & 0xFF;
1456 es |= (ssr[2] & 0xFF) << 8;
1457 et = (ssr[3] >> 18) & 0x3F;
1459 eo = (ssr[3] >> 16) & 0x3;
1460 mmc->ssr.erase_timeout = (et * 1000) / es;
1461 mmc->ssr.erase_offset = eo * 1000;
1464 pr_debug("Invalid Allocation Unit Size.\n");
1470 /* frequency bases */
1471 /* divided by 10 to be nice to platforms without floating point */
1472 static const int fbase[] = {
1479 /* Multiplier values for TRAN_SPEED. Multiplied by 10 to be nice
1480 * to platforms without floating point.
1482 static const u8 multipliers[] = {
1501 static inline int bus_width(uint cap)
1503 if (cap == MMC_MODE_8BIT)
1505 if (cap == MMC_MODE_4BIT)
1507 if (cap == MMC_MODE_1BIT)
1509 pr_warn("invalid bus witdh capability 0x%x\n", cap);
1513 #if !CONFIG_IS_ENABLED(DM_MMC)
1514 #ifdef MMC_SUPPORTS_TUNING
1515 static int mmc_execute_tuning(struct mmc *mmc, uint opcode)
1521 static int mmc_set_ios(struct mmc *mmc)
1525 if (mmc->cfg->ops->set_ios)
1526 ret = mmc->cfg->ops->set_ios(mmc);
1532 int mmc_set_clock(struct mmc *mmc, uint clock, bool disable)
1535 if (clock > mmc->cfg->f_max)
1536 clock = mmc->cfg->f_max;
1538 if (clock < mmc->cfg->f_min)
1539 clock = mmc->cfg->f_min;
1543 mmc->clk_disable = disable;
1545 debug("clock is %s (%dHz)\n", disable ? "disabled" : "enabled", clock);
1547 return mmc_set_ios(mmc);
1550 static int mmc_set_bus_width(struct mmc *mmc, uint width)
1552 mmc->bus_width = width;
1554 return mmc_set_ios(mmc);
1557 #if CONFIG_IS_ENABLED(MMC_VERBOSE) || defined(DEBUG)
1559 * helper function to display the capabilities in a human
1560 * friendly manner. The capabilities include bus width and
1563 void mmc_dump_capabilities(const char *text, uint caps)
1567 pr_debug("%s: widths [", text);
1568 if (caps & MMC_MODE_8BIT)
1570 if (caps & MMC_MODE_4BIT)
1572 if (caps & MMC_MODE_1BIT)
1574 pr_debug("\b\b] modes [");
1575 for (mode = MMC_LEGACY; mode < MMC_MODES_END; mode++)
1576 if (MMC_CAP(mode) & caps)
1577 pr_debug("%s, ", mmc_mode_name(mode));
1578 pr_debug("\b\b]\n");
1582 struct mode_width_tuning {
1585 #ifdef MMC_SUPPORTS_TUNING
1590 #if CONFIG_IS_ENABLED(MMC_IO_VOLTAGE)
1591 int mmc_voltage_to_mv(enum mmc_voltage voltage)
1594 case MMC_SIGNAL_VOLTAGE_000: return 0;
1595 case MMC_SIGNAL_VOLTAGE_330: return 3300;
1596 case MMC_SIGNAL_VOLTAGE_180: return 1800;
1597 case MMC_SIGNAL_VOLTAGE_120: return 1200;
1602 static int mmc_set_signal_voltage(struct mmc *mmc, uint signal_voltage)
1606 if (mmc->signal_voltage == signal_voltage)
1609 mmc->signal_voltage = signal_voltage;
1610 err = mmc_set_ios(mmc);
1612 pr_debug("unable to set voltage (err %d)\n", err);
1617 static inline int mmc_set_signal_voltage(struct mmc *mmc, uint signal_voltage)
1623 #if !CONFIG_IS_ENABLED(MMC_TINY)
1624 static const struct mode_width_tuning sd_modes_by_pref[] = {
1625 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
1626 #ifdef MMC_SUPPORTS_TUNING
1629 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1630 .tuning = MMC_CMD_SEND_TUNING_BLOCK
1635 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1639 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1643 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1648 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1650 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
1653 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1658 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1662 #define for_each_sd_mode_by_pref(caps, mwt) \
1663 for (mwt = sd_modes_by_pref;\
1664 mwt < sd_modes_by_pref + ARRAY_SIZE(sd_modes_by_pref);\
1666 if (caps & MMC_CAP(mwt->mode))
1668 static int sd_select_mode_and_width(struct mmc *mmc, uint card_caps)
1671 uint widths[] = {MMC_MODE_4BIT, MMC_MODE_1BIT};
1672 const struct mode_width_tuning *mwt;
1673 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
1674 bool uhs_en = (mmc->ocr & OCR_S18R) ? true : false;
1676 bool uhs_en = false;
1681 mmc_dump_capabilities("sd card", card_caps);
1682 mmc_dump_capabilities("host", mmc->host_caps);
1685 /* Restrict card's capabilities by what the host can do */
1686 caps = card_caps & mmc->host_caps;
1691 for_each_sd_mode_by_pref(caps, mwt) {
1694 for (w = widths; w < widths + ARRAY_SIZE(widths); w++) {
1695 if (*w & caps & mwt->widths) {
1696 pr_debug("trying mode %s width %d (at %d MHz)\n",
1697 mmc_mode_name(mwt->mode),
1699 mmc_mode2freq(mmc, mwt->mode) / 1000000);
1701 /* configure the bus width (card + host) */
1702 err = sd_select_bus_width(mmc, bus_width(*w));
1705 mmc_set_bus_width(mmc, bus_width(*w));
1707 /* configure the bus mode (card) */
1708 err = sd_set_card_speed(mmc, mwt->mode);
1712 /* configure the bus mode (host) */
1713 mmc_select_mode(mmc, mwt->mode);
1714 mmc_set_clock(mmc, mmc->tran_speed,
1717 #ifdef MMC_SUPPORTS_TUNING
1718 /* execute tuning if needed */
1719 if (mwt->tuning && !mmc_host_is_spi(mmc)) {
1720 err = mmc_execute_tuning(mmc,
1723 pr_debug("tuning failed\n");
1729 #if CONFIG_IS_ENABLED(MMC_WRITE)
1730 err = sd_read_ssr(mmc);
1732 pr_warn("unable to read ssr\n");
1738 /* revert to a safer bus speed */
1739 mmc_select_mode(mmc, SD_LEGACY);
1740 mmc_set_clock(mmc, mmc->tran_speed,
1746 pr_err("unable to select a mode\n");
1751 * read the compare the part of ext csd that is constant.
1752 * This can be used to check that the transfer is working
1755 static int mmc_read_and_compare_ext_csd(struct mmc *mmc)
1758 const u8 *ext_csd = mmc->ext_csd;
1759 ALLOC_CACHE_ALIGN_BUFFER(u8, test_csd, MMC_MAX_BLOCK_LEN);
1761 if (mmc->version < MMC_VERSION_4)
1764 err = mmc_send_ext_csd(mmc, test_csd);
1768 /* Only compare read only fields */
1769 if (ext_csd[EXT_CSD_PARTITIONING_SUPPORT]
1770 == test_csd[EXT_CSD_PARTITIONING_SUPPORT] &&
1771 ext_csd[EXT_CSD_HC_WP_GRP_SIZE]
1772 == test_csd[EXT_CSD_HC_WP_GRP_SIZE] &&
1773 ext_csd[EXT_CSD_REV]
1774 == test_csd[EXT_CSD_REV] &&
1775 ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE]
1776 == test_csd[EXT_CSD_HC_ERASE_GRP_SIZE] &&
1777 memcmp(&ext_csd[EXT_CSD_SEC_CNT],
1778 &test_csd[EXT_CSD_SEC_CNT], 4) == 0)
1784 #if CONFIG_IS_ENABLED(MMC_IO_VOLTAGE)
1785 static int mmc_set_lowest_voltage(struct mmc *mmc, enum bus_mode mode,
1786 uint32_t allowed_mask)
1793 if (mmc->cardtype & (EXT_CSD_CARD_TYPE_HS200_1_8V |
1794 EXT_CSD_CARD_TYPE_HS400_1_8V))
1795 card_mask |= MMC_SIGNAL_VOLTAGE_180;
1796 if (mmc->cardtype & (EXT_CSD_CARD_TYPE_HS200_1_2V |
1797 EXT_CSD_CARD_TYPE_HS400_1_2V))
1798 card_mask |= MMC_SIGNAL_VOLTAGE_120;
1801 if (mmc->cardtype & EXT_CSD_CARD_TYPE_DDR_1_8V)
1802 card_mask |= MMC_SIGNAL_VOLTAGE_330 |
1803 MMC_SIGNAL_VOLTAGE_180;
1804 if (mmc->cardtype & EXT_CSD_CARD_TYPE_DDR_1_2V)
1805 card_mask |= MMC_SIGNAL_VOLTAGE_120;
1808 card_mask |= MMC_SIGNAL_VOLTAGE_330;
1812 while (card_mask & allowed_mask) {
1813 enum mmc_voltage best_match;
1815 best_match = 1 << (ffs(card_mask & allowed_mask) - 1);
1816 if (!mmc_set_signal_voltage(mmc, best_match))
1819 allowed_mask &= ~best_match;
1825 static inline int mmc_set_lowest_voltage(struct mmc *mmc, enum bus_mode mode,
1826 uint32_t allowed_mask)
1832 static const struct mode_width_tuning mmc_modes_by_pref[] = {
1833 #if CONFIG_IS_ENABLED(MMC_HS400_SUPPORT)
1836 .widths = MMC_MODE_8BIT,
1837 .tuning = MMC_CMD_SEND_TUNING_BLOCK_HS200
1840 #if CONFIG_IS_ENABLED(MMC_HS200_SUPPORT)
1843 .widths = MMC_MODE_8BIT | MMC_MODE_4BIT,
1844 .tuning = MMC_CMD_SEND_TUNING_BLOCK_HS200
1849 .widths = MMC_MODE_8BIT | MMC_MODE_4BIT,
1853 .widths = MMC_MODE_8BIT | MMC_MODE_4BIT | MMC_MODE_1BIT,
1857 .widths = MMC_MODE_8BIT | MMC_MODE_4BIT | MMC_MODE_1BIT,
1861 .widths = MMC_MODE_8BIT | MMC_MODE_4BIT | MMC_MODE_1BIT,
1865 #define for_each_mmc_mode_by_pref(caps, mwt) \
1866 for (mwt = mmc_modes_by_pref;\
1867 mwt < mmc_modes_by_pref + ARRAY_SIZE(mmc_modes_by_pref);\
1869 if (caps & MMC_CAP(mwt->mode))
1871 static const struct ext_csd_bus_width {
1875 } ext_csd_bus_width[] = {
1876 {MMC_MODE_8BIT, true, EXT_CSD_DDR_BUS_WIDTH_8},
1877 {MMC_MODE_4BIT, true, EXT_CSD_DDR_BUS_WIDTH_4},
1878 {MMC_MODE_8BIT, false, EXT_CSD_BUS_WIDTH_8},
1879 {MMC_MODE_4BIT, false, EXT_CSD_BUS_WIDTH_4},
1880 {MMC_MODE_1BIT, false, EXT_CSD_BUS_WIDTH_1},
1883 #if CONFIG_IS_ENABLED(MMC_HS400_SUPPORT)
1884 static int mmc_select_hs400(struct mmc *mmc)
1888 /* Set timing to HS200 for tuning */
1889 err = mmc_set_card_speed(mmc, MMC_HS_200, false);
1893 /* configure the bus mode (host) */
1894 mmc_select_mode(mmc, MMC_HS_200);
1895 mmc_set_clock(mmc, mmc->tran_speed, false);
1897 /* execute tuning if needed */
1898 err = mmc_execute_tuning(mmc, MMC_CMD_SEND_TUNING_BLOCK_HS200);
1900 debug("tuning failed\n");
1904 /* Set back to HS */
1905 mmc_set_card_speed(mmc, MMC_HS, true);
1907 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BUS_WIDTH,
1908 EXT_CSD_BUS_WIDTH_8 | EXT_CSD_DDR_FLAG);
1912 err = mmc_set_card_speed(mmc, MMC_HS_400, false);
1916 mmc_select_mode(mmc, MMC_HS_400);
1917 err = mmc_set_clock(mmc, mmc->tran_speed, false);
1924 static int mmc_select_hs400(struct mmc *mmc)
1930 #define for_each_supported_width(caps, ddr, ecbv) \
1931 for (ecbv = ext_csd_bus_width;\
1932 ecbv < ext_csd_bus_width + ARRAY_SIZE(ext_csd_bus_width);\
1934 if ((ddr == ecbv->is_ddr) && (caps & ecbv->cap))
1936 static int mmc_select_mode_and_width(struct mmc *mmc, uint card_caps)
1939 const struct mode_width_tuning *mwt;
1940 const struct ext_csd_bus_width *ecbw;
1943 mmc_dump_capabilities("mmc", card_caps);
1944 mmc_dump_capabilities("host", mmc->host_caps);
1947 /* Restrict card's capabilities by what the host can do */
1948 card_caps &= mmc->host_caps;
1950 /* Only version 4 of MMC supports wider bus widths */
1951 if (mmc->version < MMC_VERSION_4)
1954 if (!mmc->ext_csd) {
1955 pr_debug("No ext_csd found!\n"); /* this should enver happen */
1959 #if CONFIG_IS_ENABLED(MMC_HS200_SUPPORT) || \
1960 CONFIG_IS_ENABLED(MMC_HS400_SUPPORT)
1962 * In case the eMMC is in HS200/HS400 mode, downgrade to HS mode
1963 * before doing anything else, since a transition from either of
1964 * the HS200/HS400 mode directly to legacy mode is not supported.
1966 if (mmc->selected_mode == MMC_HS_200 ||
1967 mmc->selected_mode == MMC_HS_400)
1968 mmc_set_card_speed(mmc, MMC_HS, true);
1971 mmc_set_clock(mmc, mmc->legacy_speed, MMC_CLK_ENABLE);
1973 for_each_mmc_mode_by_pref(card_caps, mwt) {
1974 for_each_supported_width(card_caps & mwt->widths,
1975 mmc_is_mode_ddr(mwt->mode), ecbw) {
1976 enum mmc_voltage old_voltage;
1977 pr_debug("trying mode %s width %d (at %d MHz)\n",
1978 mmc_mode_name(mwt->mode),
1979 bus_width(ecbw->cap),
1980 mmc_mode2freq(mmc, mwt->mode) / 1000000);
1981 old_voltage = mmc->signal_voltage;
1982 err = mmc_set_lowest_voltage(mmc, mwt->mode,
1983 MMC_ALL_SIGNAL_VOLTAGE);
1987 /* configure the bus width (card + host) */
1988 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1990 ecbw->ext_csd_bits & ~EXT_CSD_DDR_FLAG);
1993 mmc_set_bus_width(mmc, bus_width(ecbw->cap));
1995 if (mwt->mode == MMC_HS_400) {
1996 err = mmc_select_hs400(mmc);
1998 printf("Select HS400 failed %d\n", err);
2002 /* configure the bus speed (card) */
2003 err = mmc_set_card_speed(mmc, mwt->mode, false);
2008 * configure the bus width AND the ddr mode
2009 * (card). The host side will be taken care
2010 * of in the next step
2012 if (ecbw->ext_csd_bits & EXT_CSD_DDR_FLAG) {
2013 err = mmc_switch(mmc,
2014 EXT_CSD_CMD_SET_NORMAL,
2016 ecbw->ext_csd_bits);
2021 /* configure the bus mode (host) */
2022 mmc_select_mode(mmc, mwt->mode);
2023 mmc_set_clock(mmc, mmc->tran_speed,
2025 #ifdef MMC_SUPPORTS_TUNING
2027 /* execute tuning if needed */
2029 err = mmc_execute_tuning(mmc,
2032 pr_debug("tuning failed\n");
2039 /* do a transfer to check the configuration */
2040 err = mmc_read_and_compare_ext_csd(mmc);
2044 mmc_set_signal_voltage(mmc, old_voltage);
2045 /* if an error occured, revert to a safer bus mode */
2046 mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
2047 EXT_CSD_BUS_WIDTH, EXT_CSD_BUS_WIDTH_1);
2048 mmc_select_mode(mmc, MMC_LEGACY);
2049 mmc_set_bus_width(mmc, 1);
2053 pr_err("unable to select a mode\n");
2059 #if CONFIG_IS_ENABLED(MMC_TINY)
2060 DEFINE_CACHE_ALIGN_BUFFER(u8, ext_csd_bkup, MMC_MAX_BLOCK_LEN);
2063 static int mmc_startup_v4(struct mmc *mmc)
2067 bool has_parts = false;
2068 bool part_completed;
2069 static const u32 mmc_versions[] = {
2081 #if CONFIG_IS_ENABLED(MMC_TINY)
2082 u8 *ext_csd = ext_csd_bkup;
2084 if (IS_SD(mmc) || mmc->version < MMC_VERSION_4)
2088 memset(ext_csd_bkup, 0, sizeof(ext_csd_bkup));
2090 err = mmc_send_ext_csd(mmc, ext_csd);
2094 /* store the ext csd for future reference */
2096 mmc->ext_csd = ext_csd;
2098 ALLOC_CACHE_ALIGN_BUFFER(u8, ext_csd, MMC_MAX_BLOCK_LEN);
2100 if (IS_SD(mmc) || (mmc->version < MMC_VERSION_4))
2103 /* check ext_csd version and capacity */
2104 err = mmc_send_ext_csd(mmc, ext_csd);
2108 /* store the ext csd for future reference */
2110 mmc->ext_csd = malloc(MMC_MAX_BLOCK_LEN);
2113 memcpy(mmc->ext_csd, ext_csd, MMC_MAX_BLOCK_LEN);
2115 if (ext_csd[EXT_CSD_REV] >= ARRAY_SIZE(mmc_versions))
2118 mmc->version = mmc_versions[ext_csd[EXT_CSD_REV]];
2120 if (mmc->version >= MMC_VERSION_4_2) {
2122 * According to the JEDEC Standard, the value of
2123 * ext_csd's capacity is valid if the value is more
2126 capacity = ext_csd[EXT_CSD_SEC_CNT] << 0
2127 | ext_csd[EXT_CSD_SEC_CNT + 1] << 8
2128 | ext_csd[EXT_CSD_SEC_CNT + 2] << 16
2129 | ext_csd[EXT_CSD_SEC_CNT + 3] << 24;
2130 capacity *= MMC_MAX_BLOCK_LEN;
2131 if ((capacity >> 20) > 2 * 1024)
2132 mmc->capacity_user = capacity;
2135 if (mmc->version >= MMC_VERSION_4_5)
2136 mmc->gen_cmd6_time = ext_csd[EXT_CSD_GENERIC_CMD6_TIME];
2138 /* The partition data may be non-zero but it is only
2139 * effective if PARTITION_SETTING_COMPLETED is set in
2140 * EXT_CSD, so ignore any data if this bit is not set,
2141 * except for enabling the high-capacity group size
2142 * definition (see below).
2144 part_completed = !!(ext_csd[EXT_CSD_PARTITION_SETTING] &
2145 EXT_CSD_PARTITION_SETTING_COMPLETED);
2147 mmc->part_switch_time = ext_csd[EXT_CSD_PART_SWITCH_TIME];
2148 /* Some eMMC set the value too low so set a minimum */
2149 if (mmc->part_switch_time < MMC_MIN_PART_SWITCH_TIME && mmc->part_switch_time)
2150 mmc->part_switch_time = MMC_MIN_PART_SWITCH_TIME;
2152 /* store the partition info of emmc */
2153 mmc->part_support = ext_csd[EXT_CSD_PARTITIONING_SUPPORT];
2154 if ((ext_csd[EXT_CSD_PARTITIONING_SUPPORT] & PART_SUPPORT) ||
2155 ext_csd[EXT_CSD_BOOT_MULT])
2156 mmc->part_config = ext_csd[EXT_CSD_PART_CONF];
2157 if (part_completed &&
2158 (ext_csd[EXT_CSD_PARTITIONING_SUPPORT] & ENHNCD_SUPPORT))
2159 mmc->part_attr = ext_csd[EXT_CSD_PARTITIONS_ATTRIBUTE];
2161 mmc->capacity_boot = ext_csd[EXT_CSD_BOOT_MULT] << 17;
2163 mmc->capacity_rpmb = ext_csd[EXT_CSD_RPMB_MULT] << 17;
2165 for (i = 0; i < 4; i++) {
2166 int idx = EXT_CSD_GP_SIZE_MULT + i * 3;
2167 uint mult = (ext_csd[idx + 2] << 16) +
2168 (ext_csd[idx + 1] << 8) + ext_csd[idx];
2171 if (!part_completed)
2173 mmc->capacity_gp[i] = mult;
2174 mmc->capacity_gp[i] *=
2175 ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE];
2176 mmc->capacity_gp[i] *= ext_csd[EXT_CSD_HC_WP_GRP_SIZE];
2177 mmc->capacity_gp[i] <<= 19;
2180 #ifndef CONFIG_SPL_BUILD
2181 if (part_completed) {
2182 mmc->enh_user_size =
2183 (ext_csd[EXT_CSD_ENH_SIZE_MULT + 2] << 16) +
2184 (ext_csd[EXT_CSD_ENH_SIZE_MULT + 1] << 8) +
2185 ext_csd[EXT_CSD_ENH_SIZE_MULT];
2186 mmc->enh_user_size *= ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE];
2187 mmc->enh_user_size *= ext_csd[EXT_CSD_HC_WP_GRP_SIZE];
2188 mmc->enh_user_size <<= 19;
2189 mmc->enh_user_start =
2190 (ext_csd[EXT_CSD_ENH_START_ADDR + 3] << 24) +
2191 (ext_csd[EXT_CSD_ENH_START_ADDR + 2] << 16) +
2192 (ext_csd[EXT_CSD_ENH_START_ADDR + 1] << 8) +
2193 ext_csd[EXT_CSD_ENH_START_ADDR];
2194 if (mmc->high_capacity)
2195 mmc->enh_user_start <<= 9;
2200 * Host needs to enable ERASE_GRP_DEF bit if device is
2201 * partitioned. This bit will be lost every time after a reset
2202 * or power off. This will affect erase size.
2206 if ((ext_csd[EXT_CSD_PARTITIONING_SUPPORT] & PART_SUPPORT) &&
2207 (ext_csd[EXT_CSD_PARTITIONS_ATTRIBUTE] & PART_ENH_ATTRIB))
2210 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
2211 EXT_CSD_ERASE_GROUP_DEF, 1);
2216 ext_csd[EXT_CSD_ERASE_GROUP_DEF] = 1;
2219 if (ext_csd[EXT_CSD_ERASE_GROUP_DEF] & 0x01) {
2220 #if CONFIG_IS_ENABLED(MMC_WRITE)
2221 /* Read out group size from ext_csd */
2222 mmc->erase_grp_size =
2223 ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE] * 1024;
2226 * if high capacity and partition setting completed
2227 * SEC_COUNT is valid even if it is smaller than 2 GiB
2228 * JEDEC Standard JESD84-B45, 6.2.4
2230 if (mmc->high_capacity && part_completed) {
2231 capacity = (ext_csd[EXT_CSD_SEC_CNT]) |
2232 (ext_csd[EXT_CSD_SEC_CNT + 1] << 8) |
2233 (ext_csd[EXT_CSD_SEC_CNT + 2] << 16) |
2234 (ext_csd[EXT_CSD_SEC_CNT + 3] << 24);
2235 capacity *= MMC_MAX_BLOCK_LEN;
2236 mmc->capacity_user = capacity;
2239 #if CONFIG_IS_ENABLED(MMC_WRITE)
2241 /* Calculate the group size from the csd value. */
2242 int erase_gsz, erase_gmul;
2244 erase_gsz = (mmc->csd[2] & 0x00007c00) >> 10;
2245 erase_gmul = (mmc->csd[2] & 0x000003e0) >> 5;
2246 mmc->erase_grp_size = (erase_gsz + 1)
2250 #if CONFIG_IS_ENABLED(MMC_HW_PARTITIONING)
2251 mmc->hc_wp_grp_size = 1024
2252 * ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE]
2253 * ext_csd[EXT_CSD_HC_WP_GRP_SIZE];
2256 mmc->wr_rel_set = ext_csd[EXT_CSD_WR_REL_SET];
2261 #if !CONFIG_IS_ENABLED(MMC_TINY)
2264 mmc->ext_csd = NULL;
2269 static int mmc_startup(struct mmc *mmc)
2275 struct blk_desc *bdesc;
2277 #ifdef CONFIG_MMC_SPI_CRC_ON
2278 if (mmc_host_is_spi(mmc)) { /* enable CRC check for spi */
2279 cmd.cmdidx = MMC_CMD_SPI_CRC_ON_OFF;
2280 cmd.resp_type = MMC_RSP_R1;
2282 err = mmc_send_cmd(mmc, &cmd, NULL);
2288 /* Put the Card in Identify Mode */
2289 cmd.cmdidx = mmc_host_is_spi(mmc) ? MMC_CMD_SEND_CID :
2290 MMC_CMD_ALL_SEND_CID; /* cmd not supported in spi */
2291 cmd.resp_type = MMC_RSP_R2;
2294 err = mmc_send_cmd(mmc, &cmd, NULL);
2296 #ifdef CONFIG_MMC_QUIRKS
2297 if (err && (mmc->quirks & MMC_QUIRK_RETRY_SEND_CID)) {
2300 * It has been seen that SEND_CID may fail on the first
2301 * attempt, let's try a few more time
2304 err = mmc_send_cmd(mmc, &cmd, NULL);
2307 } while (retries--);
2314 memcpy(mmc->cid, cmd.response, 16);
2317 * For MMC cards, set the Relative Address.
2318 * For SD cards, get the Relatvie Address.
2319 * This also puts the cards into Standby State
2321 if (!mmc_host_is_spi(mmc)) { /* cmd not supported in spi */
2322 cmd.cmdidx = SD_CMD_SEND_RELATIVE_ADDR;
2323 cmd.cmdarg = mmc->rca << 16;
2324 cmd.resp_type = MMC_RSP_R6;
2326 err = mmc_send_cmd(mmc, &cmd, NULL);
2332 mmc->rca = (cmd.response[0] >> 16) & 0xffff;
2335 /* Get the Card-Specific Data */
2336 cmd.cmdidx = MMC_CMD_SEND_CSD;
2337 cmd.resp_type = MMC_RSP_R2;
2338 cmd.cmdarg = mmc->rca << 16;
2340 err = mmc_send_cmd(mmc, &cmd, NULL);
2345 mmc->csd[0] = cmd.response[0];
2346 mmc->csd[1] = cmd.response[1];
2347 mmc->csd[2] = cmd.response[2];
2348 mmc->csd[3] = cmd.response[3];
2350 if (mmc->version == MMC_VERSION_UNKNOWN) {
2351 int version = (cmd.response[0] >> 26) & 0xf;
2355 mmc->version = MMC_VERSION_1_2;
2358 mmc->version = MMC_VERSION_1_4;
2361 mmc->version = MMC_VERSION_2_2;
2364 mmc->version = MMC_VERSION_3;
2367 mmc->version = MMC_VERSION_4;
2370 mmc->version = MMC_VERSION_1_2;
2375 /* divide frequency by 10, since the mults are 10x bigger */
2376 freq = fbase[(cmd.response[0] & 0x7)];
2377 mult = multipliers[((cmd.response[0] >> 3) & 0xf)];
2379 mmc->legacy_speed = freq * mult;
2380 mmc_select_mode(mmc, MMC_LEGACY);
2382 mmc->dsr_imp = ((cmd.response[1] >> 12) & 0x1);
2383 mmc->read_bl_len = 1 << ((cmd.response[1] >> 16) & 0xf);
2384 #if CONFIG_IS_ENABLED(MMC_WRITE)
2387 mmc->write_bl_len = mmc->read_bl_len;
2389 mmc->write_bl_len = 1 << ((cmd.response[3] >> 22) & 0xf);
2392 if (mmc->high_capacity) {
2393 csize = (mmc->csd[1] & 0x3f) << 16
2394 | (mmc->csd[2] & 0xffff0000) >> 16;
2397 csize = (mmc->csd[1] & 0x3ff) << 2
2398 | (mmc->csd[2] & 0xc0000000) >> 30;
2399 cmult = (mmc->csd[2] & 0x00038000) >> 15;
2402 mmc->capacity_user = (csize + 1) << (cmult + 2);
2403 mmc->capacity_user *= mmc->read_bl_len;
2404 mmc->capacity_boot = 0;
2405 mmc->capacity_rpmb = 0;
2406 for (i = 0; i < 4; i++)
2407 mmc->capacity_gp[i] = 0;
2409 if (mmc->read_bl_len > MMC_MAX_BLOCK_LEN)
2410 mmc->read_bl_len = MMC_MAX_BLOCK_LEN;
2412 #if CONFIG_IS_ENABLED(MMC_WRITE)
2413 if (mmc->write_bl_len > MMC_MAX_BLOCK_LEN)
2414 mmc->write_bl_len = MMC_MAX_BLOCK_LEN;
2417 if ((mmc->dsr_imp) && (0xffffffff != mmc->dsr)) {
2418 cmd.cmdidx = MMC_CMD_SET_DSR;
2419 cmd.cmdarg = (mmc->dsr & 0xffff) << 16;
2420 cmd.resp_type = MMC_RSP_NONE;
2421 if (mmc_send_cmd(mmc, &cmd, NULL))
2422 pr_warn("MMC: SET_DSR failed\n");
2425 /* Select the card, and put it into Transfer Mode */
2426 if (!mmc_host_is_spi(mmc)) { /* cmd not supported in spi */
2427 cmd.cmdidx = MMC_CMD_SELECT_CARD;
2428 cmd.resp_type = MMC_RSP_R1;
2429 cmd.cmdarg = mmc->rca << 16;
2430 err = mmc_send_cmd(mmc, &cmd, NULL);
2437 * For SD, its erase group is always one sector
2439 #if CONFIG_IS_ENABLED(MMC_WRITE)
2440 mmc->erase_grp_size = 1;
2442 mmc->part_config = MMCPART_NOAVAILABLE;
2444 err = mmc_startup_v4(mmc);
2448 err = mmc_set_capacity(mmc, mmc_get_blk_desc(mmc)->hwpart);
2452 #if CONFIG_IS_ENABLED(MMC_TINY)
2453 mmc_set_clock(mmc, mmc->legacy_speed, false);
2454 mmc_select_mode(mmc, IS_SD(mmc) ? SD_LEGACY : MMC_LEGACY);
2455 mmc_set_bus_width(mmc, 1);
2458 err = sd_get_capabilities(mmc);
2461 err = sd_select_mode_and_width(mmc, mmc->card_caps);
2463 err = mmc_get_capabilities(mmc);
2466 mmc_select_mode_and_width(mmc, mmc->card_caps);
2472 mmc->best_mode = mmc->selected_mode;
2474 /* Fix the block length for DDR mode */
2475 if (mmc->ddr_mode) {
2476 mmc->read_bl_len = MMC_MAX_BLOCK_LEN;
2477 #if CONFIG_IS_ENABLED(MMC_WRITE)
2478 mmc->write_bl_len = MMC_MAX_BLOCK_LEN;
2482 /* fill in device description */
2483 bdesc = mmc_get_blk_desc(mmc);
2487 bdesc->blksz = mmc->read_bl_len;
2488 bdesc->log2blksz = LOG2(bdesc->blksz);
2489 bdesc->lba = lldiv(mmc->capacity, mmc->read_bl_len);
2490 #if !defined(CONFIG_SPL_BUILD) || \
2491 (defined(CONFIG_SPL_LIBCOMMON_SUPPORT) && \
2492 !defined(CONFIG_USE_TINY_PRINTF))
2493 sprintf(bdesc->vendor, "Man %06x Snr %04x%04x",
2494 mmc->cid[0] >> 24, (mmc->cid[2] & 0xffff),
2495 (mmc->cid[3] >> 16) & 0xffff);
2496 sprintf(bdesc->product, "%c%c%c%c%c%c", mmc->cid[0] & 0xff,
2497 (mmc->cid[1] >> 24), (mmc->cid[1] >> 16) & 0xff,
2498 (mmc->cid[1] >> 8) & 0xff, mmc->cid[1] & 0xff,
2499 (mmc->cid[2] >> 24) & 0xff);
2500 sprintf(bdesc->revision, "%d.%d", (mmc->cid[2] >> 20) & 0xf,
2501 (mmc->cid[2] >> 16) & 0xf);
2503 bdesc->vendor[0] = 0;
2504 bdesc->product[0] = 0;
2505 bdesc->revision[0] = 0;
2508 #if !defined(CONFIG_DM_MMC) && (!defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBDISK_SUPPORT))
2515 static int mmc_send_if_cond(struct mmc *mmc)
2520 cmd.cmdidx = SD_CMD_SEND_IF_COND;
2521 /* We set the bit if the host supports voltages between 2.7 and 3.6 V */
2522 cmd.cmdarg = ((mmc->cfg->voltages & 0xff8000) != 0) << 8 | 0xaa;
2523 cmd.resp_type = MMC_RSP_R7;
2525 err = mmc_send_cmd(mmc, &cmd, NULL);
2530 if ((cmd.response[0] & 0xff) != 0xaa)
2533 mmc->version = SD_VERSION_2;
2538 #if !CONFIG_IS_ENABLED(DM_MMC)
2539 /* board-specific MMC power initializations. */
2540 __weak void board_mmc_power_init(void)
2545 static int mmc_power_init(struct mmc *mmc)
2547 #if CONFIG_IS_ENABLED(DM_MMC)
2548 #if CONFIG_IS_ENABLED(DM_REGULATOR)
2551 ret = device_get_supply_regulator(mmc->dev, "vmmc-supply",
2554 pr_debug("%s: No vmmc supply\n", mmc->dev->name);
2556 ret = device_get_supply_regulator(mmc->dev, "vqmmc-supply",
2557 &mmc->vqmmc_supply);
2559 pr_debug("%s: No vqmmc supply\n", mmc->dev->name);
2561 #else /* !CONFIG_DM_MMC */
2563 * Driver model should use a regulator, as above, rather than calling
2564 * out to board code.
2566 board_mmc_power_init();
2572 * put the host in the initial state:
2573 * - turn on Vdd (card power supply)
2574 * - configure the bus width and clock to minimal values
2576 static void mmc_set_initial_state(struct mmc *mmc)
2580 /* First try to set 3.3V. If it fails set to 1.8V */
2581 err = mmc_set_signal_voltage(mmc, MMC_SIGNAL_VOLTAGE_330);
2583 err = mmc_set_signal_voltage(mmc, MMC_SIGNAL_VOLTAGE_180);
2585 pr_warn("mmc: failed to set signal voltage\n");
2587 mmc_select_mode(mmc, MMC_LEGACY);
2588 mmc_set_bus_width(mmc, 1);
2589 mmc_set_clock(mmc, 0, MMC_CLK_ENABLE);
2592 static int mmc_power_on(struct mmc *mmc)
2594 #if CONFIG_IS_ENABLED(DM_MMC) && CONFIG_IS_ENABLED(DM_REGULATOR)
2595 if (mmc->vmmc_supply) {
2596 int ret = regulator_set_enable(mmc->vmmc_supply, true);
2599 puts("Error enabling VMMC supply\n");
2607 static int mmc_power_off(struct mmc *mmc)
2609 mmc_set_clock(mmc, 0, MMC_CLK_DISABLE);
2610 #if CONFIG_IS_ENABLED(DM_MMC) && CONFIG_IS_ENABLED(DM_REGULATOR)
2611 if (mmc->vmmc_supply) {
2612 int ret = regulator_set_enable(mmc->vmmc_supply, false);
2615 pr_debug("Error disabling VMMC supply\n");
2623 static int mmc_power_cycle(struct mmc *mmc)
2627 ret = mmc_power_off(mmc);
2631 * SD spec recommends at least 1ms of delay. Let's wait for 2ms
2632 * to be on the safer side.
2635 return mmc_power_on(mmc);
2638 int mmc_get_op_cond(struct mmc *mmc)
2640 bool uhs_en = supports_uhs(mmc->cfg->host_caps);
2646 #ifdef CONFIG_FSL_ESDHC_ADAPTER_IDENT
2647 mmc_adapter_card_type_ident();
2649 err = mmc_power_init(mmc);
2653 #ifdef CONFIG_MMC_QUIRKS
2654 mmc->quirks = MMC_QUIRK_RETRY_SET_BLOCKLEN |
2655 MMC_QUIRK_RETRY_SEND_CID;
2658 err = mmc_power_cycle(mmc);
2661 * if power cycling is not supported, we should not try
2662 * to use the UHS modes, because we wouldn't be able to
2663 * recover from an error during the UHS initialization.
2665 pr_debug("Unable to do a full power cycle. Disabling the UHS modes for safety\n");
2667 mmc->host_caps &= ~UHS_CAPS;
2668 err = mmc_power_on(mmc);
2673 #if CONFIG_IS_ENABLED(DM_MMC)
2674 /* The device has already been probed ready for use */
2676 /* made sure it's not NULL earlier */
2677 err = mmc->cfg->ops->init(mmc);
2684 mmc_set_initial_state(mmc);
2686 /* Reset the Card */
2687 err = mmc_go_idle(mmc);
2692 /* The internal partition reset to user partition(0) at every CMD0*/
2693 mmc_get_blk_desc(mmc)->hwpart = 0;
2695 /* Test for SD version 2 */
2696 err = mmc_send_if_cond(mmc);
2698 /* Now try to get the SD card's operating condition */
2699 err = sd_send_op_cond(mmc, uhs_en);
2700 if (err && uhs_en) {
2702 mmc_power_cycle(mmc);
2706 /* If the command timed out, we check for an MMC card */
2707 if (err == -ETIMEDOUT) {
2708 err = mmc_send_op_cond(mmc);
2711 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
2712 pr_err("Card did not respond to voltage select!\n");
2721 int mmc_start_init(struct mmc *mmc)
2727 * all hosts are capable of 1 bit bus-width and able to use the legacy
2730 mmc->host_caps = mmc->cfg->host_caps | MMC_CAP(SD_LEGACY) |
2731 MMC_CAP(MMC_LEGACY) | MMC_MODE_1BIT;
2733 #if !defined(CONFIG_MMC_BROKEN_CD)
2734 /* we pretend there's no card when init is NULL */
2735 no_card = mmc_getcd(mmc) == 0;
2739 #if !CONFIG_IS_ENABLED(DM_MMC)
2740 no_card = no_card || (mmc->cfg->ops->init == NULL);
2744 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
2745 pr_err("MMC: no card present\n");
2750 err = mmc_get_op_cond(mmc);
2753 mmc->init_in_progress = 1;
2758 static int mmc_complete_init(struct mmc *mmc)
2762 mmc->init_in_progress = 0;
2763 if (mmc->op_cond_pending)
2764 err = mmc_complete_op_cond(mmc);
2767 err = mmc_startup(mmc);
2775 int mmc_init(struct mmc *mmc)
2778 __maybe_unused ulong start;
2779 #if CONFIG_IS_ENABLED(DM_MMC)
2780 struct mmc_uclass_priv *upriv = dev_get_uclass_priv(mmc->dev);
2787 start = get_timer(0);
2789 if (!mmc->init_in_progress)
2790 err = mmc_start_init(mmc);
2793 err = mmc_complete_init(mmc);
2795 pr_info("%s: %d, time %lu\n", __func__, err, get_timer(start));
2800 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT) || \
2801 CONFIG_IS_ENABLED(MMC_HS200_SUPPORT) || \
2802 CONFIG_IS_ENABLED(MMC_HS400_SUPPORT)
2803 int mmc_deinit(struct mmc *mmc)
2811 caps_filtered = mmc->card_caps &
2812 ~(MMC_CAP(UHS_SDR12) | MMC_CAP(UHS_SDR25) |
2813 MMC_CAP(UHS_SDR50) | MMC_CAP(UHS_DDR50) |
2814 MMC_CAP(UHS_SDR104));
2816 return sd_select_mode_and_width(mmc, caps_filtered);
2818 caps_filtered = mmc->card_caps &
2819 ~(MMC_CAP(MMC_HS_200) | MMC_CAP(MMC_HS_400));
2821 return mmc_select_mode_and_width(mmc, caps_filtered);
2826 int mmc_set_dsr(struct mmc *mmc, u16 val)
2832 /* CPU-specific MMC initializations */
2833 __weak int cpu_mmc_init(bd_t *bis)
2838 /* board-specific MMC initializations. */
2839 __weak int board_mmc_init(bd_t *bis)
2844 void mmc_set_preinit(struct mmc *mmc, int preinit)
2846 mmc->preinit = preinit;
2849 #if CONFIG_IS_ENABLED(DM_MMC)
2850 static int mmc_probe(bd_t *bis)
2854 struct udevice *dev;
2856 ret = uclass_get(UCLASS_MMC, &uc);
2861 * Try to add them in sequence order. Really with driver model we
2862 * should allow holes, but the current MMC list does not allow that.
2863 * So if we request 0, 1, 3 we will get 0, 1, 2.
2865 for (i = 0; ; i++) {
2866 ret = uclass_get_device_by_seq(UCLASS_MMC, i, &dev);
2870 uclass_foreach_dev(dev, uc) {
2871 ret = device_probe(dev);
2873 pr_err("%s - probe failed: %d\n", dev->name, ret);
2879 static int mmc_probe(bd_t *bis)
2881 if (board_mmc_init(bis) < 0)
2888 int mmc_initialize(bd_t *bis)
2890 static int initialized = 0;
2892 if (initialized) /* Avoid initializing mmc multiple times */
2896 #if !CONFIG_IS_ENABLED(BLK)
2897 #if !CONFIG_IS_ENABLED(MMC_TINY)
2901 ret = mmc_probe(bis);
2905 #ifndef CONFIG_SPL_BUILD
2906 print_mmc_devices(',');
2913 #ifdef CONFIG_CMD_BKOPS_ENABLE
2914 int mmc_set_bkops_enable(struct mmc *mmc)
2917 ALLOC_CACHE_ALIGN_BUFFER(u8, ext_csd, MMC_MAX_BLOCK_LEN);
2919 err = mmc_send_ext_csd(mmc, ext_csd);
2921 puts("Could not get ext_csd register values\n");
2925 if (!(ext_csd[EXT_CSD_BKOPS_SUPPORT] & 0x1)) {
2926 puts("Background operations not supported on device\n");
2927 return -EMEDIUMTYPE;
2930 if (ext_csd[EXT_CSD_BKOPS_EN] & 0x1) {
2931 puts("Background operations already enabled\n");
2935 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BKOPS_EN, 1);
2937 puts("Failed to enable manual background operations\n");
2941 puts("Enabled manual background operations\n");