1 // SPDX-License-Identifier: GPL-2.0+
3 * Copyright 2008, Freescale Semiconductor, Inc
6 * Based vaguely on the Linux code
13 #include <dm/device-internal.h>
17 #include <power/regulator.h>
20 #include <linux/list.h>
22 #include "mmc_private.h"
24 #define DEFAULT_CMD6_TIMEOUT_MS 500
26 static int mmc_set_signal_voltage(struct mmc *mmc, uint signal_voltage);
27 static int mmc_power_cycle(struct mmc *mmc);
28 #if !CONFIG_IS_ENABLED(MMC_TINY)
29 static int mmc_select_mode_and_width(struct mmc *mmc, uint card_caps);
32 #if !CONFIG_IS_ENABLED(DM_MMC)
34 static int mmc_wait_dat0(struct mmc *mmc, int state, int timeout)
39 __weak int board_mmc_getwp(struct mmc *mmc)
44 int mmc_getwp(struct mmc *mmc)
48 wp = board_mmc_getwp(mmc);
51 if (mmc->cfg->ops->getwp)
52 wp = mmc->cfg->ops->getwp(mmc);
60 __weak int board_mmc_getcd(struct mmc *mmc)
66 #ifdef CONFIG_MMC_TRACE
67 void mmmc_trace_before_send(struct mmc *mmc, struct mmc_cmd *cmd)
69 printf("CMD_SEND:%d\n", cmd->cmdidx);
70 printf("\t\tARG\t\t\t 0x%08x\n", cmd->cmdarg);
73 void mmmc_trace_after_send(struct mmc *mmc, struct mmc_cmd *cmd, int ret)
79 printf("\t\tRET\t\t\t %d\n", ret);
81 switch (cmd->resp_type) {
83 printf("\t\tMMC_RSP_NONE\n");
86 printf("\t\tMMC_RSP_R1,5,6,7 \t 0x%08x \n",
90 printf("\t\tMMC_RSP_R1b\t\t 0x%08x \n",
94 printf("\t\tMMC_RSP_R2\t\t 0x%08x \n",
96 printf("\t\t \t\t 0x%08x \n",
98 printf("\t\t \t\t 0x%08x \n",
100 printf("\t\t \t\t 0x%08x \n",
103 printf("\t\t\t\t\tDUMPING DATA\n");
104 for (i = 0; i < 4; i++) {
106 printf("\t\t\t\t\t%03d - ", i*4);
107 ptr = (u8 *)&cmd->response[i];
109 for (j = 0; j < 4; j++)
110 printf("%02x ", *ptr--);
115 printf("\t\tMMC_RSP_R3,4\t\t 0x%08x \n",
119 printf("\t\tERROR MMC rsp not supported\n");
125 void mmc_trace_state(struct mmc *mmc, struct mmc_cmd *cmd)
129 status = (cmd->response[0] & MMC_STATUS_CURR_STATE) >> 9;
130 printf("CURR STATE:%d\n", status);
134 #if CONFIG_IS_ENABLED(MMC_VERBOSE) || defined(DEBUG)
135 const char *mmc_mode_name(enum bus_mode mode)
137 static const char *const names[] = {
138 [MMC_LEGACY] = "MMC legacy",
139 [SD_LEGACY] = "SD Legacy",
140 [MMC_HS] = "MMC High Speed (26MHz)",
141 [SD_HS] = "SD High Speed (50MHz)",
142 [UHS_SDR12] = "UHS SDR12 (25MHz)",
143 [UHS_SDR25] = "UHS SDR25 (50MHz)",
144 [UHS_SDR50] = "UHS SDR50 (100MHz)",
145 [UHS_SDR104] = "UHS SDR104 (208MHz)",
146 [UHS_DDR50] = "UHS DDR50 (50MHz)",
147 [MMC_HS_52] = "MMC High Speed (52MHz)",
148 [MMC_DDR_52] = "MMC DDR52 (52MHz)",
149 [MMC_HS_200] = "HS200 (200MHz)",
150 [MMC_HS_400] = "HS400 (200MHz)",
153 if (mode >= MMC_MODES_END)
154 return "Unknown mode";
160 static uint mmc_mode2freq(struct mmc *mmc, enum bus_mode mode)
162 static const int freqs[] = {
163 [MMC_LEGACY] = 25000000,
164 [SD_LEGACY] = 25000000,
167 [MMC_HS_52] = 52000000,
168 [MMC_DDR_52] = 52000000,
169 [UHS_SDR12] = 25000000,
170 [UHS_SDR25] = 50000000,
171 [UHS_SDR50] = 100000000,
172 [UHS_DDR50] = 50000000,
173 [UHS_SDR104] = 208000000,
174 [MMC_HS_200] = 200000000,
175 [MMC_HS_400] = 200000000,
178 if (mode == MMC_LEGACY)
179 return mmc->legacy_speed;
180 else if (mode >= MMC_MODES_END)
186 static int mmc_select_mode(struct mmc *mmc, enum bus_mode mode)
188 mmc->selected_mode = mode;
189 mmc->tran_speed = mmc_mode2freq(mmc, mode);
190 mmc->ddr_mode = mmc_is_mode_ddr(mode);
191 pr_debug("selecting mode %s (freq : %d MHz)\n", mmc_mode_name(mode),
192 mmc->tran_speed / 1000000);
196 #if !CONFIG_IS_ENABLED(DM_MMC)
197 int mmc_send_cmd(struct mmc *mmc, struct mmc_cmd *cmd, struct mmc_data *data)
201 mmmc_trace_before_send(mmc, cmd);
202 ret = mmc->cfg->ops->send_cmd(mmc, cmd, data);
203 mmmc_trace_after_send(mmc, cmd, ret);
209 int mmc_send_status(struct mmc *mmc, unsigned int *status)
212 int err, retries = 5;
214 cmd.cmdidx = MMC_CMD_SEND_STATUS;
215 cmd.resp_type = MMC_RSP_R1;
216 if (!mmc_host_is_spi(mmc))
217 cmd.cmdarg = mmc->rca << 16;
220 err = mmc_send_cmd(mmc, &cmd, NULL);
222 mmc_trace_state(mmc, &cmd);
223 *status = cmd.response[0];
227 mmc_trace_state(mmc, &cmd);
231 int mmc_poll_for_busy(struct mmc *mmc, int timeout)
236 err = mmc_wait_dat0(mmc, 1, timeout);
241 err = mmc_send_status(mmc, &status);
245 if ((status & MMC_STATUS_RDY_FOR_DATA) &&
246 (status & MMC_STATUS_CURR_STATE) !=
250 if (status & MMC_STATUS_MASK) {
251 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
252 pr_err("Status Error: 0x%08x\n", status);
264 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
265 pr_err("Timeout waiting card ready\n");
273 int mmc_set_blocklen(struct mmc *mmc, int len)
281 cmd.cmdidx = MMC_CMD_SET_BLOCKLEN;
282 cmd.resp_type = MMC_RSP_R1;
285 err = mmc_send_cmd(mmc, &cmd, NULL);
287 #ifdef CONFIG_MMC_QUIRKS
288 if (err && (mmc->quirks & MMC_QUIRK_RETRY_SET_BLOCKLEN)) {
291 * It has been seen that SET_BLOCKLEN may fail on the first
292 * attempt, let's try a few more time
295 err = mmc_send_cmd(mmc, &cmd, NULL);
305 #ifdef MMC_SUPPORTS_TUNING
306 static const u8 tuning_blk_pattern_4bit[] = {
307 0xff, 0x0f, 0xff, 0x00, 0xff, 0xcc, 0xc3, 0xcc,
308 0xc3, 0x3c, 0xcc, 0xff, 0xfe, 0xff, 0xfe, 0xef,
309 0xff, 0xdf, 0xff, 0xdd, 0xff, 0xfb, 0xff, 0xfb,
310 0xbf, 0xff, 0x7f, 0xff, 0x77, 0xf7, 0xbd, 0xef,
311 0xff, 0xf0, 0xff, 0xf0, 0x0f, 0xfc, 0xcc, 0x3c,
312 0xcc, 0x33, 0xcc, 0xcf, 0xff, 0xef, 0xff, 0xee,
313 0xff, 0xfd, 0xff, 0xfd, 0xdf, 0xff, 0xbf, 0xff,
314 0xbb, 0xff, 0xf7, 0xff, 0xf7, 0x7f, 0x7b, 0xde,
317 static const u8 tuning_blk_pattern_8bit[] = {
318 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00, 0x00,
319 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc, 0xcc,
320 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff, 0xff,
321 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee, 0xff,
322 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd, 0xdd,
323 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff, 0xbb,
324 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff, 0xff,
325 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee, 0xff,
326 0xff, 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00,
327 0x00, 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc,
328 0xcc, 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff,
329 0xff, 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee,
330 0xff, 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd,
331 0xdd, 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff,
332 0xbb, 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff,
333 0xff, 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee,
336 int mmc_send_tuning(struct mmc *mmc, u32 opcode, int *cmd_error)
339 struct mmc_data data;
340 const u8 *tuning_block_pattern;
343 if (mmc->bus_width == 8) {
344 tuning_block_pattern = tuning_blk_pattern_8bit;
345 size = sizeof(tuning_blk_pattern_8bit);
346 } else if (mmc->bus_width == 4) {
347 tuning_block_pattern = tuning_blk_pattern_4bit;
348 size = sizeof(tuning_blk_pattern_4bit);
353 ALLOC_CACHE_ALIGN_BUFFER(u8, data_buf, size);
357 cmd.resp_type = MMC_RSP_R1;
359 data.dest = (void *)data_buf;
361 data.blocksize = size;
362 data.flags = MMC_DATA_READ;
364 err = mmc_send_cmd(mmc, &cmd, &data);
368 if (memcmp(data_buf, tuning_block_pattern, size))
375 static int mmc_read_blocks(struct mmc *mmc, void *dst, lbaint_t start,
379 struct mmc_data data;
382 cmd.cmdidx = MMC_CMD_READ_MULTIPLE_BLOCK;
384 cmd.cmdidx = MMC_CMD_READ_SINGLE_BLOCK;
386 if (mmc->high_capacity)
389 cmd.cmdarg = start * mmc->read_bl_len;
391 cmd.resp_type = MMC_RSP_R1;
394 data.blocks = blkcnt;
395 data.blocksize = mmc->read_bl_len;
396 data.flags = MMC_DATA_READ;
398 if (mmc_send_cmd(mmc, &cmd, &data))
402 cmd.cmdidx = MMC_CMD_STOP_TRANSMISSION;
404 cmd.resp_type = MMC_RSP_R1b;
405 if (mmc_send_cmd(mmc, &cmd, NULL)) {
406 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
407 pr_err("mmc fail to send stop cmd\n");
416 #if CONFIG_IS_ENABLED(BLK)
417 ulong mmc_bread(struct udevice *dev, lbaint_t start, lbaint_t blkcnt, void *dst)
419 ulong mmc_bread(struct blk_desc *block_dev, lbaint_t start, lbaint_t blkcnt,
423 #if CONFIG_IS_ENABLED(BLK)
424 struct blk_desc *block_dev = dev_get_uclass_platdata(dev);
426 int dev_num = block_dev->devnum;
428 lbaint_t cur, blocks_todo = blkcnt;
433 struct mmc *mmc = find_mmc_device(dev_num);
437 if (CONFIG_IS_ENABLED(MMC_TINY))
438 err = mmc_switch_part(mmc, block_dev->hwpart);
440 err = blk_dselect_hwpart(block_dev, block_dev->hwpart);
445 if ((start + blkcnt) > block_dev->lba) {
446 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
447 pr_err("MMC: block number 0x" LBAF " exceeds max(0x" LBAF ")\n",
448 start + blkcnt, block_dev->lba);
453 if (mmc_set_blocklen(mmc, mmc->read_bl_len)) {
454 pr_debug("%s: Failed to set blocklen\n", __func__);
459 cur = (blocks_todo > mmc->cfg->b_max) ?
460 mmc->cfg->b_max : blocks_todo;
461 if (mmc_read_blocks(mmc, dst, start, cur) != cur) {
462 pr_debug("%s: Failed to read blocks\n", __func__);
467 dst += cur * mmc->read_bl_len;
468 } while (blocks_todo > 0);
473 static int mmc_go_idle(struct mmc *mmc)
480 cmd.cmdidx = MMC_CMD_GO_IDLE_STATE;
482 cmd.resp_type = MMC_RSP_NONE;
484 err = mmc_send_cmd(mmc, &cmd, NULL);
494 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
495 static int mmc_switch_voltage(struct mmc *mmc, int signal_voltage)
501 * Send CMD11 only if the request is to switch the card to
504 if (signal_voltage == MMC_SIGNAL_VOLTAGE_330)
505 return mmc_set_signal_voltage(mmc, signal_voltage);
507 cmd.cmdidx = SD_CMD_SWITCH_UHS18V;
509 cmd.resp_type = MMC_RSP_R1;
511 err = mmc_send_cmd(mmc, &cmd, NULL);
515 if (!mmc_host_is_spi(mmc) && (cmd.response[0] & MMC_STATUS_ERROR))
519 * The card should drive cmd and dat[0:3] low immediately
520 * after the response of cmd11, but wait 100 us to be sure
522 err = mmc_wait_dat0(mmc, 0, 100);
529 * During a signal voltage level switch, the clock must be gated
530 * for 5 ms according to the SD spec
532 mmc_set_clock(mmc, mmc->clock, MMC_CLK_DISABLE);
534 err = mmc_set_signal_voltage(mmc, signal_voltage);
538 /* Keep clock gated for at least 10 ms, though spec only says 5 ms */
540 mmc_set_clock(mmc, mmc->clock, MMC_CLK_ENABLE);
543 * Failure to switch is indicated by the card holding
544 * dat[0:3] low. Wait for at least 1 ms according to spec
546 err = mmc_wait_dat0(mmc, 1, 1000);
556 static int sd_send_op_cond(struct mmc *mmc, bool uhs_en)
563 cmd.cmdidx = MMC_CMD_APP_CMD;
564 cmd.resp_type = MMC_RSP_R1;
567 err = mmc_send_cmd(mmc, &cmd, NULL);
572 cmd.cmdidx = SD_CMD_APP_SEND_OP_COND;
573 cmd.resp_type = MMC_RSP_R3;
576 * Most cards do not answer if some reserved bits
577 * in the ocr are set. However, Some controller
578 * can set bit 7 (reserved for low voltages), but
579 * how to manage low voltages SD card is not yet
582 cmd.cmdarg = mmc_host_is_spi(mmc) ? 0 :
583 (mmc->cfg->voltages & 0xff8000);
585 if (mmc->version == SD_VERSION_2)
586 cmd.cmdarg |= OCR_HCS;
589 cmd.cmdarg |= OCR_S18R;
591 err = mmc_send_cmd(mmc, &cmd, NULL);
596 if (cmd.response[0] & OCR_BUSY)
605 if (mmc->version != SD_VERSION_2)
606 mmc->version = SD_VERSION_1_0;
608 if (mmc_host_is_spi(mmc)) { /* read OCR for spi */
609 cmd.cmdidx = MMC_CMD_SPI_READ_OCR;
610 cmd.resp_type = MMC_RSP_R3;
613 err = mmc_send_cmd(mmc, &cmd, NULL);
619 mmc->ocr = cmd.response[0];
621 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
622 if (uhs_en && !(mmc_host_is_spi(mmc)) && (cmd.response[0] & 0x41000000)
624 err = mmc_switch_voltage(mmc, MMC_SIGNAL_VOLTAGE_180);
630 mmc->high_capacity = ((mmc->ocr & OCR_HCS) == OCR_HCS);
636 static int mmc_send_op_cond_iter(struct mmc *mmc, int use_arg)
641 cmd.cmdidx = MMC_CMD_SEND_OP_COND;
642 cmd.resp_type = MMC_RSP_R3;
644 if (use_arg && !mmc_host_is_spi(mmc))
645 cmd.cmdarg = OCR_HCS |
646 (mmc->cfg->voltages &
647 (mmc->ocr & OCR_VOLTAGE_MASK)) |
648 (mmc->ocr & OCR_ACCESS_MODE);
650 err = mmc_send_cmd(mmc, &cmd, NULL);
653 mmc->ocr = cmd.response[0];
657 static int mmc_send_op_cond(struct mmc *mmc)
661 /* Some cards seem to need this */
664 /* Asking to the card its capabilities */
665 for (i = 0; i < 2; i++) {
666 err = mmc_send_op_cond_iter(mmc, i != 0);
670 /* exit if not busy (flag seems to be inverted) */
671 if (mmc->ocr & OCR_BUSY)
674 mmc->op_cond_pending = 1;
678 static int mmc_complete_op_cond(struct mmc *mmc)
685 mmc->op_cond_pending = 0;
686 if (!(mmc->ocr & OCR_BUSY)) {
687 /* Some cards seem to need this */
690 start = get_timer(0);
692 err = mmc_send_op_cond_iter(mmc, 1);
695 if (mmc->ocr & OCR_BUSY)
697 if (get_timer(start) > timeout)
703 if (mmc_host_is_spi(mmc)) { /* read OCR for spi */
704 cmd.cmdidx = MMC_CMD_SPI_READ_OCR;
705 cmd.resp_type = MMC_RSP_R3;
708 err = mmc_send_cmd(mmc, &cmd, NULL);
713 mmc->ocr = cmd.response[0];
716 mmc->version = MMC_VERSION_UNKNOWN;
718 mmc->high_capacity = ((mmc->ocr & OCR_HCS) == OCR_HCS);
725 static int mmc_send_ext_csd(struct mmc *mmc, u8 *ext_csd)
728 struct mmc_data data;
731 /* Get the Card Status Register */
732 cmd.cmdidx = MMC_CMD_SEND_EXT_CSD;
733 cmd.resp_type = MMC_RSP_R1;
736 data.dest = (char *)ext_csd;
738 data.blocksize = MMC_MAX_BLOCK_LEN;
739 data.flags = MMC_DATA_READ;
741 err = mmc_send_cmd(mmc, &cmd, &data);
746 static int __mmc_switch(struct mmc *mmc, u8 set, u8 index, u8 value,
749 unsigned int status, start;
751 int timeout = DEFAULT_CMD6_TIMEOUT_MS;
752 bool is_part_switch = (set == EXT_CSD_CMD_SET_NORMAL) &&
753 (index == EXT_CSD_PART_CONF);
757 if (mmc->gen_cmd6_time)
758 timeout = mmc->gen_cmd6_time * 10;
760 if (is_part_switch && mmc->part_switch_time)
761 timeout = mmc->part_switch_time * 10;
763 cmd.cmdidx = MMC_CMD_SWITCH;
764 cmd.resp_type = MMC_RSP_R1b;
765 cmd.cmdarg = (MMC_SWITCH_MODE_WRITE_BYTE << 24) |
770 ret = mmc_send_cmd(mmc, &cmd, NULL);
771 } while (ret && retries-- > 0);
776 start = get_timer(0);
778 /* poll dat0 for rdy/buys status */
779 ret = mmc_wait_dat0(mmc, 1, timeout);
780 if (ret && ret != -ENOSYS)
784 * In cases when not allowed to poll by using CMD13 or because we aren't
785 * capable of polling by using mmc_wait_dat0, then rely on waiting the
786 * stated timeout to be sufficient.
788 if (ret == -ENOSYS && !send_status)
791 /* Finally wait until the card is ready or indicates a failure
792 * to switch. It doesn't hurt to use CMD13 here even if send_status
793 * is false, because by now (after 'timeout' ms) the bus should be
797 ret = mmc_send_status(mmc, &status);
799 if (!ret && (status & MMC_STATUS_SWITCH_ERROR)) {
800 pr_debug("switch failed %d/%d/0x%x !\n", set, index,
804 if (!ret && (status & MMC_STATUS_RDY_FOR_DATA))
807 } while (get_timer(start) < timeout);
812 int mmc_switch(struct mmc *mmc, u8 set, u8 index, u8 value)
814 return __mmc_switch(mmc, set, index, value, true);
817 #if !CONFIG_IS_ENABLED(MMC_TINY)
818 static int mmc_set_card_speed(struct mmc *mmc, enum bus_mode mode,
824 ALLOC_CACHE_ALIGN_BUFFER(u8, test_csd, MMC_MAX_BLOCK_LEN);
830 speed_bits = EXT_CSD_TIMING_HS;
832 #if CONFIG_IS_ENABLED(MMC_HS200_SUPPORT)
834 speed_bits = EXT_CSD_TIMING_HS200;
837 #if CONFIG_IS_ENABLED(MMC_HS400_SUPPORT)
839 speed_bits = EXT_CSD_TIMING_HS400;
843 speed_bits = EXT_CSD_TIMING_LEGACY;
849 err = __mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_HS_TIMING,
850 speed_bits, !hsdowngrade);
854 #if CONFIG_IS_ENABLED(MMC_HS200_SUPPORT) || \
855 CONFIG_IS_ENABLED(MMC_HS400_SUPPORT)
857 * In case the eMMC is in HS200/HS400 mode and we are downgrading
858 * to HS mode, the card clock are still running much faster than
859 * the supported HS mode clock, so we can not reliably read out
860 * Extended CSD. Reconfigure the controller to run at HS mode.
863 mmc_select_mode(mmc, MMC_HS);
864 mmc_set_clock(mmc, mmc_mode2freq(mmc, MMC_HS), false);
868 if ((mode == MMC_HS) || (mode == MMC_HS_52)) {
869 /* Now check to see that it worked */
870 err = mmc_send_ext_csd(mmc, test_csd);
874 /* No high-speed support */
875 if (!test_csd[EXT_CSD_HS_TIMING])
882 static int mmc_get_capabilities(struct mmc *mmc)
884 u8 *ext_csd = mmc->ext_csd;
887 mmc->card_caps = MMC_MODE_1BIT | MMC_CAP(MMC_LEGACY);
889 if (mmc_host_is_spi(mmc))
892 /* Only version 4 supports high-speed */
893 if (mmc->version < MMC_VERSION_4)
897 pr_err("No ext_csd found!\n"); /* this should enver happen */
901 mmc->card_caps |= MMC_MODE_4BIT | MMC_MODE_8BIT;
903 cardtype = ext_csd[EXT_CSD_CARD_TYPE];
904 mmc->cardtype = cardtype;
906 #if CONFIG_IS_ENABLED(MMC_HS200_SUPPORT)
907 if (cardtype & (EXT_CSD_CARD_TYPE_HS200_1_2V |
908 EXT_CSD_CARD_TYPE_HS200_1_8V)) {
909 mmc->card_caps |= MMC_MODE_HS200;
912 #if CONFIG_IS_ENABLED(MMC_HS400_SUPPORT)
913 if (cardtype & (EXT_CSD_CARD_TYPE_HS400_1_2V |
914 EXT_CSD_CARD_TYPE_HS400_1_8V)) {
915 mmc->card_caps |= MMC_MODE_HS400;
918 if (cardtype & EXT_CSD_CARD_TYPE_52) {
919 if (cardtype & EXT_CSD_CARD_TYPE_DDR_52)
920 mmc->card_caps |= MMC_MODE_DDR_52MHz;
921 mmc->card_caps |= MMC_MODE_HS_52MHz;
923 if (cardtype & EXT_CSD_CARD_TYPE_26)
924 mmc->card_caps |= MMC_MODE_HS;
930 static int mmc_set_capacity(struct mmc *mmc, int part_num)
934 mmc->capacity = mmc->capacity_user;
938 mmc->capacity = mmc->capacity_boot;
941 mmc->capacity = mmc->capacity_rpmb;
947 mmc->capacity = mmc->capacity_gp[part_num - 4];
953 mmc_get_blk_desc(mmc)->lba = lldiv(mmc->capacity, mmc->read_bl_len);
958 int mmc_switch_part(struct mmc *mmc, unsigned int part_num)
964 ret = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
966 (mmc->part_config & ~PART_ACCESS_MASK)
967 | (part_num & PART_ACCESS_MASK));
968 } while (ret && retry--);
971 * Set the capacity if the switch succeeded or was intended
972 * to return to representing the raw device.
974 if ((ret == 0) || ((ret == -ENODEV) && (part_num == 0))) {
975 ret = mmc_set_capacity(mmc, part_num);
976 mmc_get_blk_desc(mmc)->hwpart = part_num;
982 #if CONFIG_IS_ENABLED(MMC_HW_PARTITIONING)
983 int mmc_hwpart_config(struct mmc *mmc,
984 const struct mmc_hwpart_conf *conf,
985 enum mmc_hwpart_conf_mode mode)
991 u32 max_enh_size_mult;
992 u32 tot_enh_size_mult = 0;
995 ALLOC_CACHE_ALIGN_BUFFER(u8, ext_csd, MMC_MAX_BLOCK_LEN);
997 if (mode < MMC_HWPART_CONF_CHECK || mode > MMC_HWPART_CONF_COMPLETE)
1000 if (IS_SD(mmc) || (mmc->version < MMC_VERSION_4_41)) {
1001 pr_err("eMMC >= 4.4 required for enhanced user data area\n");
1002 return -EMEDIUMTYPE;
1005 if (!(mmc->part_support & PART_SUPPORT)) {
1006 pr_err("Card does not support partitioning\n");
1007 return -EMEDIUMTYPE;
1010 if (!mmc->hc_wp_grp_size) {
1011 pr_err("Card does not define HC WP group size\n");
1012 return -EMEDIUMTYPE;
1015 /* check partition alignment and total enhanced size */
1016 if (conf->user.enh_size) {
1017 if (conf->user.enh_size % mmc->hc_wp_grp_size ||
1018 conf->user.enh_start % mmc->hc_wp_grp_size) {
1019 pr_err("User data enhanced area not HC WP group "
1023 part_attrs |= EXT_CSD_ENH_USR;
1024 enh_size_mult = conf->user.enh_size / mmc->hc_wp_grp_size;
1025 if (mmc->high_capacity) {
1026 enh_start_addr = conf->user.enh_start;
1028 enh_start_addr = (conf->user.enh_start << 9);
1034 tot_enh_size_mult += enh_size_mult;
1036 for (pidx = 0; pidx < 4; pidx++) {
1037 if (conf->gp_part[pidx].size % mmc->hc_wp_grp_size) {
1038 pr_err("GP%i partition not HC WP group size "
1039 "aligned\n", pidx+1);
1042 gp_size_mult[pidx] = conf->gp_part[pidx].size / mmc->hc_wp_grp_size;
1043 if (conf->gp_part[pidx].size && conf->gp_part[pidx].enhanced) {
1044 part_attrs |= EXT_CSD_ENH_GP(pidx);
1045 tot_enh_size_mult += gp_size_mult[pidx];
1049 if (part_attrs && ! (mmc->part_support & ENHNCD_SUPPORT)) {
1050 pr_err("Card does not support enhanced attribute\n");
1051 return -EMEDIUMTYPE;
1054 err = mmc_send_ext_csd(mmc, ext_csd);
1059 (ext_csd[EXT_CSD_MAX_ENH_SIZE_MULT+2] << 16) +
1060 (ext_csd[EXT_CSD_MAX_ENH_SIZE_MULT+1] << 8) +
1061 ext_csd[EXT_CSD_MAX_ENH_SIZE_MULT];
1062 if (tot_enh_size_mult > max_enh_size_mult) {
1063 pr_err("Total enhanced size exceeds maximum (%u > %u)\n",
1064 tot_enh_size_mult, max_enh_size_mult);
1065 return -EMEDIUMTYPE;
1068 /* The default value of EXT_CSD_WR_REL_SET is device
1069 * dependent, the values can only be changed if the
1070 * EXT_CSD_HS_CTRL_REL bit is set. The values can be
1071 * changed only once and before partitioning is completed. */
1072 wr_rel_set = ext_csd[EXT_CSD_WR_REL_SET];
1073 if (conf->user.wr_rel_change) {
1074 if (conf->user.wr_rel_set)
1075 wr_rel_set |= EXT_CSD_WR_DATA_REL_USR;
1077 wr_rel_set &= ~EXT_CSD_WR_DATA_REL_USR;
1079 for (pidx = 0; pidx < 4; pidx++) {
1080 if (conf->gp_part[pidx].wr_rel_change) {
1081 if (conf->gp_part[pidx].wr_rel_set)
1082 wr_rel_set |= EXT_CSD_WR_DATA_REL_GP(pidx);
1084 wr_rel_set &= ~EXT_CSD_WR_DATA_REL_GP(pidx);
1088 if (wr_rel_set != ext_csd[EXT_CSD_WR_REL_SET] &&
1089 !(ext_csd[EXT_CSD_WR_REL_PARAM] & EXT_CSD_HS_CTRL_REL)) {
1090 puts("Card does not support host controlled partition write "
1091 "reliability settings\n");
1092 return -EMEDIUMTYPE;
1095 if (ext_csd[EXT_CSD_PARTITION_SETTING] &
1096 EXT_CSD_PARTITION_SETTING_COMPLETED) {
1097 pr_err("Card already partitioned\n");
1101 if (mode == MMC_HWPART_CONF_CHECK)
1104 /* Partitioning requires high-capacity size definitions */
1105 if (!(ext_csd[EXT_CSD_ERASE_GROUP_DEF] & 0x01)) {
1106 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1107 EXT_CSD_ERASE_GROUP_DEF, 1);
1112 ext_csd[EXT_CSD_ERASE_GROUP_DEF] = 1;
1114 /* update erase group size to be high-capacity */
1115 mmc->erase_grp_size =
1116 ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE] * 1024;
1120 /* all OK, write the configuration */
1121 for (i = 0; i < 4; i++) {
1122 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1123 EXT_CSD_ENH_START_ADDR+i,
1124 (enh_start_addr >> (i*8)) & 0xFF);
1128 for (i = 0; i < 3; i++) {
1129 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1130 EXT_CSD_ENH_SIZE_MULT+i,
1131 (enh_size_mult >> (i*8)) & 0xFF);
1135 for (pidx = 0; pidx < 4; pidx++) {
1136 for (i = 0; i < 3; i++) {
1137 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1138 EXT_CSD_GP_SIZE_MULT+pidx*3+i,
1139 (gp_size_mult[pidx] >> (i*8)) & 0xFF);
1144 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1145 EXT_CSD_PARTITIONS_ATTRIBUTE, part_attrs);
1149 if (mode == MMC_HWPART_CONF_SET)
1152 /* The WR_REL_SET is a write-once register but shall be
1153 * written before setting PART_SETTING_COMPLETED. As it is
1154 * write-once we can only write it when completing the
1156 if (wr_rel_set != ext_csd[EXT_CSD_WR_REL_SET]) {
1157 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1158 EXT_CSD_WR_REL_SET, wr_rel_set);
1163 /* Setting PART_SETTING_COMPLETED confirms the partition
1164 * configuration but it only becomes effective after power
1165 * cycle, so we do not adjust the partition related settings
1166 * in the mmc struct. */
1168 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1169 EXT_CSD_PARTITION_SETTING,
1170 EXT_CSD_PARTITION_SETTING_COMPLETED);
1178 #if !CONFIG_IS_ENABLED(DM_MMC)
1179 int mmc_getcd(struct mmc *mmc)
1183 cd = board_mmc_getcd(mmc);
1186 if (mmc->cfg->ops->getcd)
1187 cd = mmc->cfg->ops->getcd(mmc);
1196 #if !CONFIG_IS_ENABLED(MMC_TINY)
1197 static int sd_switch(struct mmc *mmc, int mode, int group, u8 value, u8 *resp)
1200 struct mmc_data data;
1202 /* Switch the frequency */
1203 cmd.cmdidx = SD_CMD_SWITCH_FUNC;
1204 cmd.resp_type = MMC_RSP_R1;
1205 cmd.cmdarg = (mode << 31) | 0xffffff;
1206 cmd.cmdarg &= ~(0xf << (group * 4));
1207 cmd.cmdarg |= value << (group * 4);
1209 data.dest = (char *)resp;
1210 data.blocksize = 64;
1212 data.flags = MMC_DATA_READ;
1214 return mmc_send_cmd(mmc, &cmd, &data);
1217 static int sd_get_capabilities(struct mmc *mmc)
1221 ALLOC_CACHE_ALIGN_BUFFER(__be32, scr, 2);
1222 ALLOC_CACHE_ALIGN_BUFFER(__be32, switch_status, 16);
1223 struct mmc_data data;
1225 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
1229 mmc->card_caps = MMC_MODE_1BIT | MMC_CAP(SD_LEGACY);
1231 if (mmc_host_is_spi(mmc))
1234 /* Read the SCR to find out if this card supports higher speeds */
1235 cmd.cmdidx = MMC_CMD_APP_CMD;
1236 cmd.resp_type = MMC_RSP_R1;
1237 cmd.cmdarg = mmc->rca << 16;
1239 err = mmc_send_cmd(mmc, &cmd, NULL);
1244 cmd.cmdidx = SD_CMD_APP_SEND_SCR;
1245 cmd.resp_type = MMC_RSP_R1;
1251 data.dest = (char *)scr;
1254 data.flags = MMC_DATA_READ;
1256 err = mmc_send_cmd(mmc, &cmd, &data);
1265 mmc->scr[0] = __be32_to_cpu(scr[0]);
1266 mmc->scr[1] = __be32_to_cpu(scr[1]);
1268 switch ((mmc->scr[0] >> 24) & 0xf) {
1270 mmc->version = SD_VERSION_1_0;
1273 mmc->version = SD_VERSION_1_10;
1276 mmc->version = SD_VERSION_2;
1277 if ((mmc->scr[0] >> 15) & 0x1)
1278 mmc->version = SD_VERSION_3;
1281 mmc->version = SD_VERSION_1_0;
1285 if (mmc->scr[0] & SD_DATA_4BIT)
1286 mmc->card_caps |= MMC_MODE_4BIT;
1288 /* Version 1.0 doesn't support switching */
1289 if (mmc->version == SD_VERSION_1_0)
1294 err = sd_switch(mmc, SD_SWITCH_CHECK, 0, 1,
1295 (u8 *)switch_status);
1300 /* The high-speed function is busy. Try again */
1301 if (!(__be32_to_cpu(switch_status[7]) & SD_HIGHSPEED_BUSY))
1305 /* If high-speed isn't supported, we return */
1306 if (__be32_to_cpu(switch_status[3]) & SD_HIGHSPEED_SUPPORTED)
1307 mmc->card_caps |= MMC_CAP(SD_HS);
1309 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
1310 /* Version before 3.0 don't support UHS modes */
1311 if (mmc->version < SD_VERSION_3)
1314 sd3_bus_mode = __be32_to_cpu(switch_status[3]) >> 16 & 0x1f;
1315 if (sd3_bus_mode & SD_MODE_UHS_SDR104)
1316 mmc->card_caps |= MMC_CAP(UHS_SDR104);
1317 if (sd3_bus_mode & SD_MODE_UHS_SDR50)
1318 mmc->card_caps |= MMC_CAP(UHS_SDR50);
1319 if (sd3_bus_mode & SD_MODE_UHS_SDR25)
1320 mmc->card_caps |= MMC_CAP(UHS_SDR25);
1321 if (sd3_bus_mode & SD_MODE_UHS_SDR12)
1322 mmc->card_caps |= MMC_CAP(UHS_SDR12);
1323 if (sd3_bus_mode & SD_MODE_UHS_DDR50)
1324 mmc->card_caps |= MMC_CAP(UHS_DDR50);
1330 static int sd_set_card_speed(struct mmc *mmc, enum bus_mode mode)
1334 ALLOC_CACHE_ALIGN_BUFFER(uint, switch_status, 16);
1337 /* SD version 1.00 and 1.01 does not support CMD 6 */
1338 if (mmc->version == SD_VERSION_1_0)
1343 speed = UHS_SDR12_BUS_SPEED;
1346 speed = HIGH_SPEED_BUS_SPEED;
1348 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
1350 speed = UHS_SDR12_BUS_SPEED;
1353 speed = UHS_SDR25_BUS_SPEED;
1356 speed = UHS_SDR50_BUS_SPEED;
1359 speed = UHS_DDR50_BUS_SPEED;
1362 speed = UHS_SDR104_BUS_SPEED;
1369 err = sd_switch(mmc, SD_SWITCH_SWITCH, 0, speed, (u8 *)switch_status);
1373 if (((__be32_to_cpu(switch_status[4]) >> 24) & 0xF) != speed)
1379 static int sd_select_bus_width(struct mmc *mmc, int w)
1384 if ((w != 4) && (w != 1))
1387 cmd.cmdidx = MMC_CMD_APP_CMD;
1388 cmd.resp_type = MMC_RSP_R1;
1389 cmd.cmdarg = mmc->rca << 16;
1391 err = mmc_send_cmd(mmc, &cmd, NULL);
1395 cmd.cmdidx = SD_CMD_APP_SET_BUS_WIDTH;
1396 cmd.resp_type = MMC_RSP_R1;
1401 err = mmc_send_cmd(mmc, &cmd, NULL);
1409 #if CONFIG_IS_ENABLED(MMC_WRITE)
1410 static int sd_read_ssr(struct mmc *mmc)
1412 static const unsigned int sd_au_size[] = {
1413 0, SZ_16K / 512, SZ_32K / 512,
1414 SZ_64K / 512, SZ_128K / 512, SZ_256K / 512,
1415 SZ_512K / 512, SZ_1M / 512, SZ_2M / 512,
1416 SZ_4M / 512, SZ_8M / 512, (SZ_8M + SZ_4M) / 512,
1417 SZ_16M / 512, (SZ_16M + SZ_8M) / 512, SZ_32M / 512,
1422 ALLOC_CACHE_ALIGN_BUFFER(uint, ssr, 16);
1423 struct mmc_data data;
1425 unsigned int au, eo, et, es;
1427 cmd.cmdidx = MMC_CMD_APP_CMD;
1428 cmd.resp_type = MMC_RSP_R1;
1429 cmd.cmdarg = mmc->rca << 16;
1431 err = mmc_send_cmd(mmc, &cmd, NULL);
1435 cmd.cmdidx = SD_CMD_APP_SD_STATUS;
1436 cmd.resp_type = MMC_RSP_R1;
1440 data.dest = (char *)ssr;
1441 data.blocksize = 64;
1443 data.flags = MMC_DATA_READ;
1445 err = mmc_send_cmd(mmc, &cmd, &data);
1453 for (i = 0; i < 16; i++)
1454 ssr[i] = be32_to_cpu(ssr[i]);
1456 au = (ssr[2] >> 12) & 0xF;
1457 if ((au <= 9) || (mmc->version == SD_VERSION_3)) {
1458 mmc->ssr.au = sd_au_size[au];
1459 es = (ssr[3] >> 24) & 0xFF;
1460 es |= (ssr[2] & 0xFF) << 8;
1461 et = (ssr[3] >> 18) & 0x3F;
1463 eo = (ssr[3] >> 16) & 0x3;
1464 mmc->ssr.erase_timeout = (et * 1000) / es;
1465 mmc->ssr.erase_offset = eo * 1000;
1468 pr_debug("Invalid Allocation Unit Size.\n");
1474 /* frequency bases */
1475 /* divided by 10 to be nice to platforms without floating point */
1476 static const int fbase[] = {
1483 /* Multiplier values for TRAN_SPEED. Multiplied by 10 to be nice
1484 * to platforms without floating point.
1486 static const u8 multipliers[] = {
1505 static inline int bus_width(uint cap)
1507 if (cap == MMC_MODE_8BIT)
1509 if (cap == MMC_MODE_4BIT)
1511 if (cap == MMC_MODE_1BIT)
1513 pr_warn("invalid bus witdh capability 0x%x\n", cap);
1517 #if !CONFIG_IS_ENABLED(DM_MMC)
1518 #ifdef MMC_SUPPORTS_TUNING
1519 static int mmc_execute_tuning(struct mmc *mmc, uint opcode)
1525 static int mmc_set_ios(struct mmc *mmc)
1529 if (mmc->cfg->ops->set_ios)
1530 ret = mmc->cfg->ops->set_ios(mmc);
1536 int mmc_set_clock(struct mmc *mmc, uint clock, bool disable)
1539 if (clock > mmc->cfg->f_max)
1540 clock = mmc->cfg->f_max;
1542 if (clock < mmc->cfg->f_min)
1543 clock = mmc->cfg->f_min;
1547 mmc->clk_disable = disable;
1549 debug("clock is %s (%dHz)\n", disable ? "disabled" : "enabled", clock);
1551 return mmc_set_ios(mmc);
1554 static int mmc_set_bus_width(struct mmc *mmc, uint width)
1556 mmc->bus_width = width;
1558 return mmc_set_ios(mmc);
1561 #if CONFIG_IS_ENABLED(MMC_VERBOSE) || defined(DEBUG)
1563 * helper function to display the capabilities in a human
1564 * friendly manner. The capabilities include bus width and
1567 void mmc_dump_capabilities(const char *text, uint caps)
1571 pr_debug("%s: widths [", text);
1572 if (caps & MMC_MODE_8BIT)
1574 if (caps & MMC_MODE_4BIT)
1576 if (caps & MMC_MODE_1BIT)
1578 pr_debug("\b\b] modes [");
1579 for (mode = MMC_LEGACY; mode < MMC_MODES_END; mode++)
1580 if (MMC_CAP(mode) & caps)
1581 pr_debug("%s, ", mmc_mode_name(mode));
1582 pr_debug("\b\b]\n");
1586 struct mode_width_tuning {
1589 #ifdef MMC_SUPPORTS_TUNING
1594 #if CONFIG_IS_ENABLED(MMC_IO_VOLTAGE)
1595 int mmc_voltage_to_mv(enum mmc_voltage voltage)
1598 case MMC_SIGNAL_VOLTAGE_000: return 0;
1599 case MMC_SIGNAL_VOLTAGE_330: return 3300;
1600 case MMC_SIGNAL_VOLTAGE_180: return 1800;
1601 case MMC_SIGNAL_VOLTAGE_120: return 1200;
1606 static int mmc_set_signal_voltage(struct mmc *mmc, uint signal_voltage)
1610 if (mmc->signal_voltage == signal_voltage)
1613 mmc->signal_voltage = signal_voltage;
1614 err = mmc_set_ios(mmc);
1616 pr_debug("unable to set voltage (err %d)\n", err);
1621 static inline int mmc_set_signal_voltage(struct mmc *mmc, uint signal_voltage)
1627 #if !CONFIG_IS_ENABLED(MMC_TINY)
1628 static const struct mode_width_tuning sd_modes_by_pref[] = {
1629 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
1630 #ifdef MMC_SUPPORTS_TUNING
1633 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1634 .tuning = MMC_CMD_SEND_TUNING_BLOCK
1639 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1643 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1647 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1652 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1654 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
1657 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1662 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1666 #define for_each_sd_mode_by_pref(caps, mwt) \
1667 for (mwt = sd_modes_by_pref;\
1668 mwt < sd_modes_by_pref + ARRAY_SIZE(sd_modes_by_pref);\
1670 if (caps & MMC_CAP(mwt->mode))
1672 static int sd_select_mode_and_width(struct mmc *mmc, uint card_caps)
1675 uint widths[] = {MMC_MODE_4BIT, MMC_MODE_1BIT};
1676 const struct mode_width_tuning *mwt;
1677 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
1678 bool uhs_en = (mmc->ocr & OCR_S18R) ? true : false;
1680 bool uhs_en = false;
1685 mmc_dump_capabilities("sd card", card_caps);
1686 mmc_dump_capabilities("host", mmc->host_caps);
1689 /* Restrict card's capabilities by what the host can do */
1690 caps = card_caps & mmc->host_caps;
1695 for_each_sd_mode_by_pref(caps, mwt) {
1698 for (w = widths; w < widths + ARRAY_SIZE(widths); w++) {
1699 if (*w & caps & mwt->widths) {
1700 pr_debug("trying mode %s width %d (at %d MHz)\n",
1701 mmc_mode_name(mwt->mode),
1703 mmc_mode2freq(mmc, mwt->mode) / 1000000);
1705 /* configure the bus width (card + host) */
1706 err = sd_select_bus_width(mmc, bus_width(*w));
1709 mmc_set_bus_width(mmc, bus_width(*w));
1711 /* configure the bus mode (card) */
1712 err = sd_set_card_speed(mmc, mwt->mode);
1716 /* configure the bus mode (host) */
1717 mmc_select_mode(mmc, mwt->mode);
1718 mmc_set_clock(mmc, mmc->tran_speed,
1721 #ifdef MMC_SUPPORTS_TUNING
1722 /* execute tuning if needed */
1723 if (mwt->tuning && !mmc_host_is_spi(mmc)) {
1724 err = mmc_execute_tuning(mmc,
1727 pr_debug("tuning failed\n");
1733 #if CONFIG_IS_ENABLED(MMC_WRITE)
1734 err = sd_read_ssr(mmc);
1736 pr_warn("unable to read ssr\n");
1742 /* revert to a safer bus speed */
1743 mmc_select_mode(mmc, SD_LEGACY);
1744 mmc_set_clock(mmc, mmc->tran_speed,
1750 pr_err("unable to select a mode\n");
1755 * read the compare the part of ext csd that is constant.
1756 * This can be used to check that the transfer is working
1759 static int mmc_read_and_compare_ext_csd(struct mmc *mmc)
1762 const u8 *ext_csd = mmc->ext_csd;
1763 ALLOC_CACHE_ALIGN_BUFFER(u8, test_csd, MMC_MAX_BLOCK_LEN);
1765 if (mmc->version < MMC_VERSION_4)
1768 err = mmc_send_ext_csd(mmc, test_csd);
1772 /* Only compare read only fields */
1773 if (ext_csd[EXT_CSD_PARTITIONING_SUPPORT]
1774 == test_csd[EXT_CSD_PARTITIONING_SUPPORT] &&
1775 ext_csd[EXT_CSD_HC_WP_GRP_SIZE]
1776 == test_csd[EXT_CSD_HC_WP_GRP_SIZE] &&
1777 ext_csd[EXT_CSD_REV]
1778 == test_csd[EXT_CSD_REV] &&
1779 ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE]
1780 == test_csd[EXT_CSD_HC_ERASE_GRP_SIZE] &&
1781 memcmp(&ext_csd[EXT_CSD_SEC_CNT],
1782 &test_csd[EXT_CSD_SEC_CNT], 4) == 0)
1788 #if CONFIG_IS_ENABLED(MMC_IO_VOLTAGE)
1789 static int mmc_set_lowest_voltage(struct mmc *mmc, enum bus_mode mode,
1790 uint32_t allowed_mask)
1797 if (mmc->cardtype & (EXT_CSD_CARD_TYPE_HS200_1_8V |
1798 EXT_CSD_CARD_TYPE_HS400_1_8V))
1799 card_mask |= MMC_SIGNAL_VOLTAGE_180;
1800 if (mmc->cardtype & (EXT_CSD_CARD_TYPE_HS200_1_2V |
1801 EXT_CSD_CARD_TYPE_HS400_1_2V))
1802 card_mask |= MMC_SIGNAL_VOLTAGE_120;
1805 if (mmc->cardtype & EXT_CSD_CARD_TYPE_DDR_1_8V)
1806 card_mask |= MMC_SIGNAL_VOLTAGE_330 |
1807 MMC_SIGNAL_VOLTAGE_180;
1808 if (mmc->cardtype & EXT_CSD_CARD_TYPE_DDR_1_2V)
1809 card_mask |= MMC_SIGNAL_VOLTAGE_120;
1812 card_mask |= MMC_SIGNAL_VOLTAGE_330;
1816 while (card_mask & allowed_mask) {
1817 enum mmc_voltage best_match;
1819 best_match = 1 << (ffs(card_mask & allowed_mask) - 1);
1820 if (!mmc_set_signal_voltage(mmc, best_match))
1823 allowed_mask &= ~best_match;
1829 static inline int mmc_set_lowest_voltage(struct mmc *mmc, enum bus_mode mode,
1830 uint32_t allowed_mask)
1836 static const struct mode_width_tuning mmc_modes_by_pref[] = {
1837 #if CONFIG_IS_ENABLED(MMC_HS400_SUPPORT)
1840 .widths = MMC_MODE_8BIT,
1841 .tuning = MMC_CMD_SEND_TUNING_BLOCK_HS200
1844 #if CONFIG_IS_ENABLED(MMC_HS200_SUPPORT)
1847 .widths = MMC_MODE_8BIT | MMC_MODE_4BIT,
1848 .tuning = MMC_CMD_SEND_TUNING_BLOCK_HS200
1853 .widths = MMC_MODE_8BIT | MMC_MODE_4BIT,
1857 .widths = MMC_MODE_8BIT | MMC_MODE_4BIT | MMC_MODE_1BIT,
1861 .widths = MMC_MODE_8BIT | MMC_MODE_4BIT | MMC_MODE_1BIT,
1865 .widths = MMC_MODE_8BIT | MMC_MODE_4BIT | MMC_MODE_1BIT,
1869 #define for_each_mmc_mode_by_pref(caps, mwt) \
1870 for (mwt = mmc_modes_by_pref;\
1871 mwt < mmc_modes_by_pref + ARRAY_SIZE(mmc_modes_by_pref);\
1873 if (caps & MMC_CAP(mwt->mode))
1875 static const struct ext_csd_bus_width {
1879 } ext_csd_bus_width[] = {
1880 {MMC_MODE_8BIT, true, EXT_CSD_DDR_BUS_WIDTH_8},
1881 {MMC_MODE_4BIT, true, EXT_CSD_DDR_BUS_WIDTH_4},
1882 {MMC_MODE_8BIT, false, EXT_CSD_BUS_WIDTH_8},
1883 {MMC_MODE_4BIT, false, EXT_CSD_BUS_WIDTH_4},
1884 {MMC_MODE_1BIT, false, EXT_CSD_BUS_WIDTH_1},
1887 #if CONFIG_IS_ENABLED(MMC_HS400_SUPPORT)
1888 static int mmc_select_hs400(struct mmc *mmc)
1892 /* Set timing to HS200 for tuning */
1893 err = mmc_set_card_speed(mmc, MMC_HS_200, false);
1897 /* configure the bus mode (host) */
1898 mmc_select_mode(mmc, MMC_HS_200);
1899 mmc_set_clock(mmc, mmc->tran_speed, false);
1901 /* execute tuning if needed */
1902 err = mmc_execute_tuning(mmc, MMC_CMD_SEND_TUNING_BLOCK_HS200);
1904 debug("tuning failed\n");
1908 /* Set back to HS */
1909 mmc_set_card_speed(mmc, MMC_HS, true);
1911 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BUS_WIDTH,
1912 EXT_CSD_BUS_WIDTH_8 | EXT_CSD_DDR_FLAG);
1916 err = mmc_set_card_speed(mmc, MMC_HS_400, false);
1920 mmc_select_mode(mmc, MMC_HS_400);
1921 err = mmc_set_clock(mmc, mmc->tran_speed, false);
1928 static int mmc_select_hs400(struct mmc *mmc)
1934 #define for_each_supported_width(caps, ddr, ecbv) \
1935 for (ecbv = ext_csd_bus_width;\
1936 ecbv < ext_csd_bus_width + ARRAY_SIZE(ext_csd_bus_width);\
1938 if ((ddr == ecbv->is_ddr) && (caps & ecbv->cap))
1940 static int mmc_select_mode_and_width(struct mmc *mmc, uint card_caps)
1943 const struct mode_width_tuning *mwt;
1944 const struct ext_csd_bus_width *ecbw;
1947 mmc_dump_capabilities("mmc", card_caps);
1948 mmc_dump_capabilities("host", mmc->host_caps);
1951 /* Restrict card's capabilities by what the host can do */
1952 card_caps &= mmc->host_caps;
1954 /* Only version 4 of MMC supports wider bus widths */
1955 if (mmc->version < MMC_VERSION_4)
1958 if (!mmc->ext_csd) {
1959 pr_debug("No ext_csd found!\n"); /* this should enver happen */
1963 #if CONFIG_IS_ENABLED(MMC_HS200_SUPPORT) || \
1964 CONFIG_IS_ENABLED(MMC_HS400_SUPPORT)
1966 * In case the eMMC is in HS200/HS400 mode, downgrade to HS mode
1967 * before doing anything else, since a transition from either of
1968 * the HS200/HS400 mode directly to legacy mode is not supported.
1970 if (mmc->selected_mode == MMC_HS_200 ||
1971 mmc->selected_mode == MMC_HS_400)
1972 mmc_set_card_speed(mmc, MMC_HS, true);
1975 mmc_set_clock(mmc, mmc->legacy_speed, MMC_CLK_ENABLE);
1977 for_each_mmc_mode_by_pref(card_caps, mwt) {
1978 for_each_supported_width(card_caps & mwt->widths,
1979 mmc_is_mode_ddr(mwt->mode), ecbw) {
1980 enum mmc_voltage old_voltage;
1981 pr_debug("trying mode %s width %d (at %d MHz)\n",
1982 mmc_mode_name(mwt->mode),
1983 bus_width(ecbw->cap),
1984 mmc_mode2freq(mmc, mwt->mode) / 1000000);
1985 old_voltage = mmc->signal_voltage;
1986 err = mmc_set_lowest_voltage(mmc, mwt->mode,
1987 MMC_ALL_SIGNAL_VOLTAGE);
1991 /* configure the bus width (card + host) */
1992 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1994 ecbw->ext_csd_bits & ~EXT_CSD_DDR_FLAG);
1997 mmc_set_bus_width(mmc, bus_width(ecbw->cap));
1999 if (mwt->mode == MMC_HS_400) {
2000 err = mmc_select_hs400(mmc);
2002 printf("Select HS400 failed %d\n", err);
2006 /* configure the bus speed (card) */
2007 err = mmc_set_card_speed(mmc, mwt->mode, false);
2012 * configure the bus width AND the ddr mode
2013 * (card). The host side will be taken care
2014 * of in the next step
2016 if (ecbw->ext_csd_bits & EXT_CSD_DDR_FLAG) {
2017 err = mmc_switch(mmc,
2018 EXT_CSD_CMD_SET_NORMAL,
2020 ecbw->ext_csd_bits);
2025 /* configure the bus mode (host) */
2026 mmc_select_mode(mmc, mwt->mode);
2027 mmc_set_clock(mmc, mmc->tran_speed,
2029 #ifdef MMC_SUPPORTS_TUNING
2031 /* execute tuning if needed */
2033 err = mmc_execute_tuning(mmc,
2036 pr_debug("tuning failed\n");
2043 /* do a transfer to check the configuration */
2044 err = mmc_read_and_compare_ext_csd(mmc);
2048 mmc_set_signal_voltage(mmc, old_voltage);
2049 /* if an error occured, revert to a safer bus mode */
2050 mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
2051 EXT_CSD_BUS_WIDTH, EXT_CSD_BUS_WIDTH_1);
2052 mmc_select_mode(mmc, MMC_LEGACY);
2053 mmc_set_bus_width(mmc, 1);
2057 pr_err("unable to select a mode\n");
2063 #if CONFIG_IS_ENABLED(MMC_TINY)
2064 DEFINE_CACHE_ALIGN_BUFFER(u8, ext_csd_bkup, MMC_MAX_BLOCK_LEN);
2067 static int mmc_startup_v4(struct mmc *mmc)
2071 bool has_parts = false;
2072 bool part_completed;
2073 static const u32 mmc_versions[] = {
2085 #if CONFIG_IS_ENABLED(MMC_TINY)
2086 u8 *ext_csd = ext_csd_bkup;
2088 if (IS_SD(mmc) || mmc->version < MMC_VERSION_4)
2092 memset(ext_csd_bkup, 0, sizeof(ext_csd_bkup));
2094 err = mmc_send_ext_csd(mmc, ext_csd);
2098 /* store the ext csd for future reference */
2100 mmc->ext_csd = ext_csd;
2102 ALLOC_CACHE_ALIGN_BUFFER(u8, ext_csd, MMC_MAX_BLOCK_LEN);
2104 if (IS_SD(mmc) || (mmc->version < MMC_VERSION_4))
2107 /* check ext_csd version and capacity */
2108 err = mmc_send_ext_csd(mmc, ext_csd);
2112 /* store the ext csd for future reference */
2114 mmc->ext_csd = malloc(MMC_MAX_BLOCK_LEN);
2117 memcpy(mmc->ext_csd, ext_csd, MMC_MAX_BLOCK_LEN);
2119 if (ext_csd[EXT_CSD_REV] >= ARRAY_SIZE(mmc_versions))
2122 mmc->version = mmc_versions[ext_csd[EXT_CSD_REV]];
2124 if (mmc->version >= MMC_VERSION_4_2) {
2126 * According to the JEDEC Standard, the value of
2127 * ext_csd's capacity is valid if the value is more
2130 capacity = ext_csd[EXT_CSD_SEC_CNT] << 0
2131 | ext_csd[EXT_CSD_SEC_CNT + 1] << 8
2132 | ext_csd[EXT_CSD_SEC_CNT + 2] << 16
2133 | ext_csd[EXT_CSD_SEC_CNT + 3] << 24;
2134 capacity *= MMC_MAX_BLOCK_LEN;
2135 if ((capacity >> 20) > 2 * 1024)
2136 mmc->capacity_user = capacity;
2139 if (mmc->version >= MMC_VERSION_4_5)
2140 mmc->gen_cmd6_time = ext_csd[EXT_CSD_GENERIC_CMD6_TIME];
2142 /* The partition data may be non-zero but it is only
2143 * effective if PARTITION_SETTING_COMPLETED is set in
2144 * EXT_CSD, so ignore any data if this bit is not set,
2145 * except for enabling the high-capacity group size
2146 * definition (see below).
2148 part_completed = !!(ext_csd[EXT_CSD_PARTITION_SETTING] &
2149 EXT_CSD_PARTITION_SETTING_COMPLETED);
2151 mmc->part_switch_time = ext_csd[EXT_CSD_PART_SWITCH_TIME];
2152 /* Some eMMC set the value too low so set a minimum */
2153 if (mmc->part_switch_time < MMC_MIN_PART_SWITCH_TIME && mmc->part_switch_time)
2154 mmc->part_switch_time = MMC_MIN_PART_SWITCH_TIME;
2156 /* store the partition info of emmc */
2157 mmc->part_support = ext_csd[EXT_CSD_PARTITIONING_SUPPORT];
2158 if ((ext_csd[EXT_CSD_PARTITIONING_SUPPORT] & PART_SUPPORT) ||
2159 ext_csd[EXT_CSD_BOOT_MULT])
2160 mmc->part_config = ext_csd[EXT_CSD_PART_CONF];
2161 if (part_completed &&
2162 (ext_csd[EXT_CSD_PARTITIONING_SUPPORT] & ENHNCD_SUPPORT))
2163 mmc->part_attr = ext_csd[EXT_CSD_PARTITIONS_ATTRIBUTE];
2165 mmc->capacity_boot = ext_csd[EXT_CSD_BOOT_MULT] << 17;
2167 mmc->capacity_rpmb = ext_csd[EXT_CSD_RPMB_MULT] << 17;
2169 for (i = 0; i < 4; i++) {
2170 int idx = EXT_CSD_GP_SIZE_MULT + i * 3;
2171 uint mult = (ext_csd[idx + 2] << 16) +
2172 (ext_csd[idx + 1] << 8) + ext_csd[idx];
2175 if (!part_completed)
2177 mmc->capacity_gp[i] = mult;
2178 mmc->capacity_gp[i] *=
2179 ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE];
2180 mmc->capacity_gp[i] *= ext_csd[EXT_CSD_HC_WP_GRP_SIZE];
2181 mmc->capacity_gp[i] <<= 19;
2184 #ifndef CONFIG_SPL_BUILD
2185 if (part_completed) {
2186 mmc->enh_user_size =
2187 (ext_csd[EXT_CSD_ENH_SIZE_MULT + 2] << 16) +
2188 (ext_csd[EXT_CSD_ENH_SIZE_MULT + 1] << 8) +
2189 ext_csd[EXT_CSD_ENH_SIZE_MULT];
2190 mmc->enh_user_size *= ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE];
2191 mmc->enh_user_size *= ext_csd[EXT_CSD_HC_WP_GRP_SIZE];
2192 mmc->enh_user_size <<= 19;
2193 mmc->enh_user_start =
2194 (ext_csd[EXT_CSD_ENH_START_ADDR + 3] << 24) +
2195 (ext_csd[EXT_CSD_ENH_START_ADDR + 2] << 16) +
2196 (ext_csd[EXT_CSD_ENH_START_ADDR + 1] << 8) +
2197 ext_csd[EXT_CSD_ENH_START_ADDR];
2198 if (mmc->high_capacity)
2199 mmc->enh_user_start <<= 9;
2204 * Host needs to enable ERASE_GRP_DEF bit if device is
2205 * partitioned. This bit will be lost every time after a reset
2206 * or power off. This will affect erase size.
2210 if ((ext_csd[EXT_CSD_PARTITIONING_SUPPORT] & PART_SUPPORT) &&
2211 (ext_csd[EXT_CSD_PARTITIONS_ATTRIBUTE] & PART_ENH_ATTRIB))
2214 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
2215 EXT_CSD_ERASE_GROUP_DEF, 1);
2220 ext_csd[EXT_CSD_ERASE_GROUP_DEF] = 1;
2223 if (ext_csd[EXT_CSD_ERASE_GROUP_DEF] & 0x01) {
2224 #if CONFIG_IS_ENABLED(MMC_WRITE)
2225 /* Read out group size from ext_csd */
2226 mmc->erase_grp_size =
2227 ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE] * 1024;
2230 * if high capacity and partition setting completed
2231 * SEC_COUNT is valid even if it is smaller than 2 GiB
2232 * JEDEC Standard JESD84-B45, 6.2.4
2234 if (mmc->high_capacity && part_completed) {
2235 capacity = (ext_csd[EXT_CSD_SEC_CNT]) |
2236 (ext_csd[EXT_CSD_SEC_CNT + 1] << 8) |
2237 (ext_csd[EXT_CSD_SEC_CNT + 2] << 16) |
2238 (ext_csd[EXT_CSD_SEC_CNT + 3] << 24);
2239 capacity *= MMC_MAX_BLOCK_LEN;
2240 mmc->capacity_user = capacity;
2243 #if CONFIG_IS_ENABLED(MMC_WRITE)
2245 /* Calculate the group size from the csd value. */
2246 int erase_gsz, erase_gmul;
2248 erase_gsz = (mmc->csd[2] & 0x00007c00) >> 10;
2249 erase_gmul = (mmc->csd[2] & 0x000003e0) >> 5;
2250 mmc->erase_grp_size = (erase_gsz + 1)
2254 #if CONFIG_IS_ENABLED(MMC_HW_PARTITIONING)
2255 mmc->hc_wp_grp_size = 1024
2256 * ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE]
2257 * ext_csd[EXT_CSD_HC_WP_GRP_SIZE];
2260 mmc->wr_rel_set = ext_csd[EXT_CSD_WR_REL_SET];
2265 #if !CONFIG_IS_ENABLED(MMC_TINY)
2268 mmc->ext_csd = NULL;
2273 static int mmc_startup(struct mmc *mmc)
2279 struct blk_desc *bdesc;
2281 #ifdef CONFIG_MMC_SPI_CRC_ON
2282 if (mmc_host_is_spi(mmc)) { /* enable CRC check for spi */
2283 cmd.cmdidx = MMC_CMD_SPI_CRC_ON_OFF;
2284 cmd.resp_type = MMC_RSP_R1;
2286 err = mmc_send_cmd(mmc, &cmd, NULL);
2292 /* Put the Card in Identify Mode */
2293 cmd.cmdidx = mmc_host_is_spi(mmc) ? MMC_CMD_SEND_CID :
2294 MMC_CMD_ALL_SEND_CID; /* cmd not supported in spi */
2295 cmd.resp_type = MMC_RSP_R2;
2298 err = mmc_send_cmd(mmc, &cmd, NULL);
2300 #ifdef CONFIG_MMC_QUIRKS
2301 if (err && (mmc->quirks & MMC_QUIRK_RETRY_SEND_CID)) {
2304 * It has been seen that SEND_CID may fail on the first
2305 * attempt, let's try a few more time
2308 err = mmc_send_cmd(mmc, &cmd, NULL);
2311 } while (retries--);
2318 memcpy(mmc->cid, cmd.response, 16);
2321 * For MMC cards, set the Relative Address.
2322 * For SD cards, get the Relatvie Address.
2323 * This also puts the cards into Standby State
2325 if (!mmc_host_is_spi(mmc)) { /* cmd not supported in spi */
2326 cmd.cmdidx = SD_CMD_SEND_RELATIVE_ADDR;
2327 cmd.cmdarg = mmc->rca << 16;
2328 cmd.resp_type = MMC_RSP_R6;
2330 err = mmc_send_cmd(mmc, &cmd, NULL);
2336 mmc->rca = (cmd.response[0] >> 16) & 0xffff;
2339 /* Get the Card-Specific Data */
2340 cmd.cmdidx = MMC_CMD_SEND_CSD;
2341 cmd.resp_type = MMC_RSP_R2;
2342 cmd.cmdarg = mmc->rca << 16;
2344 err = mmc_send_cmd(mmc, &cmd, NULL);
2349 mmc->csd[0] = cmd.response[0];
2350 mmc->csd[1] = cmd.response[1];
2351 mmc->csd[2] = cmd.response[2];
2352 mmc->csd[3] = cmd.response[3];
2354 if (mmc->version == MMC_VERSION_UNKNOWN) {
2355 int version = (cmd.response[0] >> 26) & 0xf;
2359 mmc->version = MMC_VERSION_1_2;
2362 mmc->version = MMC_VERSION_1_4;
2365 mmc->version = MMC_VERSION_2_2;
2368 mmc->version = MMC_VERSION_3;
2371 mmc->version = MMC_VERSION_4;
2374 mmc->version = MMC_VERSION_1_2;
2379 /* divide frequency by 10, since the mults are 10x bigger */
2380 freq = fbase[(cmd.response[0] & 0x7)];
2381 mult = multipliers[((cmd.response[0] >> 3) & 0xf)];
2383 mmc->legacy_speed = freq * mult;
2384 mmc_select_mode(mmc, MMC_LEGACY);
2386 mmc->dsr_imp = ((cmd.response[1] >> 12) & 0x1);
2387 mmc->read_bl_len = 1 << ((cmd.response[1] >> 16) & 0xf);
2388 #if CONFIG_IS_ENABLED(MMC_WRITE)
2391 mmc->write_bl_len = mmc->read_bl_len;
2393 mmc->write_bl_len = 1 << ((cmd.response[3] >> 22) & 0xf);
2396 if (mmc->high_capacity) {
2397 csize = (mmc->csd[1] & 0x3f) << 16
2398 | (mmc->csd[2] & 0xffff0000) >> 16;
2401 csize = (mmc->csd[1] & 0x3ff) << 2
2402 | (mmc->csd[2] & 0xc0000000) >> 30;
2403 cmult = (mmc->csd[2] & 0x00038000) >> 15;
2406 mmc->capacity_user = (csize + 1) << (cmult + 2);
2407 mmc->capacity_user *= mmc->read_bl_len;
2408 mmc->capacity_boot = 0;
2409 mmc->capacity_rpmb = 0;
2410 for (i = 0; i < 4; i++)
2411 mmc->capacity_gp[i] = 0;
2413 if (mmc->read_bl_len > MMC_MAX_BLOCK_LEN)
2414 mmc->read_bl_len = MMC_MAX_BLOCK_LEN;
2416 #if CONFIG_IS_ENABLED(MMC_WRITE)
2417 if (mmc->write_bl_len > MMC_MAX_BLOCK_LEN)
2418 mmc->write_bl_len = MMC_MAX_BLOCK_LEN;
2421 if ((mmc->dsr_imp) && (0xffffffff != mmc->dsr)) {
2422 cmd.cmdidx = MMC_CMD_SET_DSR;
2423 cmd.cmdarg = (mmc->dsr & 0xffff) << 16;
2424 cmd.resp_type = MMC_RSP_NONE;
2425 if (mmc_send_cmd(mmc, &cmd, NULL))
2426 pr_warn("MMC: SET_DSR failed\n");
2429 /* Select the card, and put it into Transfer Mode */
2430 if (!mmc_host_is_spi(mmc)) { /* cmd not supported in spi */
2431 cmd.cmdidx = MMC_CMD_SELECT_CARD;
2432 cmd.resp_type = MMC_RSP_R1;
2433 cmd.cmdarg = mmc->rca << 16;
2434 err = mmc_send_cmd(mmc, &cmd, NULL);
2441 * For SD, its erase group is always one sector
2443 #if CONFIG_IS_ENABLED(MMC_WRITE)
2444 mmc->erase_grp_size = 1;
2446 mmc->part_config = MMCPART_NOAVAILABLE;
2448 err = mmc_startup_v4(mmc);
2452 err = mmc_set_capacity(mmc, mmc_get_blk_desc(mmc)->hwpart);
2456 #if CONFIG_IS_ENABLED(MMC_TINY)
2457 mmc_set_clock(mmc, mmc->legacy_speed, false);
2458 mmc_select_mode(mmc, IS_SD(mmc) ? SD_LEGACY : MMC_LEGACY);
2459 mmc_set_bus_width(mmc, 1);
2462 err = sd_get_capabilities(mmc);
2465 err = sd_select_mode_and_width(mmc, mmc->card_caps);
2467 err = mmc_get_capabilities(mmc);
2470 mmc_select_mode_and_width(mmc, mmc->card_caps);
2476 mmc->best_mode = mmc->selected_mode;
2478 /* Fix the block length for DDR mode */
2479 if (mmc->ddr_mode) {
2480 mmc->read_bl_len = MMC_MAX_BLOCK_LEN;
2481 #if CONFIG_IS_ENABLED(MMC_WRITE)
2482 mmc->write_bl_len = MMC_MAX_BLOCK_LEN;
2486 /* fill in device description */
2487 bdesc = mmc_get_blk_desc(mmc);
2491 bdesc->blksz = mmc->read_bl_len;
2492 bdesc->log2blksz = LOG2(bdesc->blksz);
2493 bdesc->lba = lldiv(mmc->capacity, mmc->read_bl_len);
2494 #if !defined(CONFIG_SPL_BUILD) || \
2495 (defined(CONFIG_SPL_LIBCOMMON_SUPPORT) && \
2496 !defined(CONFIG_USE_TINY_PRINTF))
2497 sprintf(bdesc->vendor, "Man %06x Snr %04x%04x",
2498 mmc->cid[0] >> 24, (mmc->cid[2] & 0xffff),
2499 (mmc->cid[3] >> 16) & 0xffff);
2500 sprintf(bdesc->product, "%c%c%c%c%c%c", mmc->cid[0] & 0xff,
2501 (mmc->cid[1] >> 24), (mmc->cid[1] >> 16) & 0xff,
2502 (mmc->cid[1] >> 8) & 0xff, mmc->cid[1] & 0xff,
2503 (mmc->cid[2] >> 24) & 0xff);
2504 sprintf(bdesc->revision, "%d.%d", (mmc->cid[2] >> 20) & 0xf,
2505 (mmc->cid[2] >> 16) & 0xf);
2507 bdesc->vendor[0] = 0;
2508 bdesc->product[0] = 0;
2509 bdesc->revision[0] = 0;
2512 #if !defined(CONFIG_DM_MMC) && (!defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBDISK_SUPPORT))
2519 static int mmc_send_if_cond(struct mmc *mmc)
2524 cmd.cmdidx = SD_CMD_SEND_IF_COND;
2525 /* We set the bit if the host supports voltages between 2.7 and 3.6 V */
2526 cmd.cmdarg = ((mmc->cfg->voltages & 0xff8000) != 0) << 8 | 0xaa;
2527 cmd.resp_type = MMC_RSP_R7;
2529 err = mmc_send_cmd(mmc, &cmd, NULL);
2534 if ((cmd.response[0] & 0xff) != 0xaa)
2537 mmc->version = SD_VERSION_2;
2542 #if !CONFIG_IS_ENABLED(DM_MMC)
2543 /* board-specific MMC power initializations. */
2544 __weak void board_mmc_power_init(void)
2549 static int mmc_power_init(struct mmc *mmc)
2551 #if CONFIG_IS_ENABLED(DM_MMC)
2552 #if CONFIG_IS_ENABLED(DM_REGULATOR)
2555 ret = device_get_supply_regulator(mmc->dev, "vmmc-supply",
2558 pr_debug("%s: No vmmc supply\n", mmc->dev->name);
2560 ret = device_get_supply_regulator(mmc->dev, "vqmmc-supply",
2561 &mmc->vqmmc_supply);
2563 pr_debug("%s: No vqmmc supply\n", mmc->dev->name);
2565 #else /* !CONFIG_DM_MMC */
2567 * Driver model should use a regulator, as above, rather than calling
2568 * out to board code.
2570 board_mmc_power_init();
2576 * put the host in the initial state:
2577 * - turn on Vdd (card power supply)
2578 * - configure the bus width and clock to minimal values
2580 static void mmc_set_initial_state(struct mmc *mmc)
2584 /* First try to set 3.3V. If it fails set to 1.8V */
2585 err = mmc_set_signal_voltage(mmc, MMC_SIGNAL_VOLTAGE_330);
2587 err = mmc_set_signal_voltage(mmc, MMC_SIGNAL_VOLTAGE_180);
2589 pr_warn("mmc: failed to set signal voltage\n");
2591 mmc_select_mode(mmc, MMC_LEGACY);
2592 mmc_set_bus_width(mmc, 1);
2593 mmc_set_clock(mmc, 0, MMC_CLK_ENABLE);
2596 static int mmc_power_on(struct mmc *mmc)
2598 #if CONFIG_IS_ENABLED(DM_MMC) && CONFIG_IS_ENABLED(DM_REGULATOR)
2599 if (mmc->vmmc_supply) {
2600 int ret = regulator_set_enable(mmc->vmmc_supply, true);
2603 puts("Error enabling VMMC supply\n");
2611 static int mmc_power_off(struct mmc *mmc)
2613 mmc_set_clock(mmc, 0, MMC_CLK_DISABLE);
2614 #if CONFIG_IS_ENABLED(DM_MMC) && CONFIG_IS_ENABLED(DM_REGULATOR)
2615 if (mmc->vmmc_supply) {
2616 int ret = regulator_set_enable(mmc->vmmc_supply, false);
2619 pr_debug("Error disabling VMMC supply\n");
2627 static int mmc_power_cycle(struct mmc *mmc)
2631 ret = mmc_power_off(mmc);
2635 * SD spec recommends at least 1ms of delay. Let's wait for 2ms
2636 * to be on the safer side.
2639 return mmc_power_on(mmc);
2642 int mmc_get_op_cond(struct mmc *mmc)
2644 bool uhs_en = supports_uhs(mmc->cfg->host_caps);
2650 #ifdef CONFIG_FSL_ESDHC_ADAPTER_IDENT
2651 mmc_adapter_card_type_ident();
2653 err = mmc_power_init(mmc);
2657 #ifdef CONFIG_MMC_QUIRKS
2658 mmc->quirks = MMC_QUIRK_RETRY_SET_BLOCKLEN |
2659 MMC_QUIRK_RETRY_SEND_CID;
2662 err = mmc_power_cycle(mmc);
2665 * if power cycling is not supported, we should not try
2666 * to use the UHS modes, because we wouldn't be able to
2667 * recover from an error during the UHS initialization.
2669 pr_debug("Unable to do a full power cycle. Disabling the UHS modes for safety\n");
2671 mmc->host_caps &= ~UHS_CAPS;
2672 err = mmc_power_on(mmc);
2677 #if CONFIG_IS_ENABLED(DM_MMC)
2678 /* The device has already been probed ready for use */
2680 /* made sure it's not NULL earlier */
2681 err = mmc->cfg->ops->init(mmc);
2688 mmc_set_initial_state(mmc);
2690 /* Reset the Card */
2691 err = mmc_go_idle(mmc);
2696 /* The internal partition reset to user partition(0) at every CMD0*/
2697 mmc_get_blk_desc(mmc)->hwpart = 0;
2699 /* Test for SD version 2 */
2700 err = mmc_send_if_cond(mmc);
2702 /* Now try to get the SD card's operating condition */
2703 err = sd_send_op_cond(mmc, uhs_en);
2704 if (err && uhs_en) {
2706 mmc_power_cycle(mmc);
2710 /* If the command timed out, we check for an MMC card */
2711 if (err == -ETIMEDOUT) {
2712 err = mmc_send_op_cond(mmc);
2715 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
2716 pr_err("Card did not respond to voltage select!\n");
2725 int mmc_start_init(struct mmc *mmc)
2731 * all hosts are capable of 1 bit bus-width and able to use the legacy
2734 mmc->host_caps = mmc->cfg->host_caps | MMC_CAP(SD_LEGACY) |
2735 MMC_CAP(MMC_LEGACY) | MMC_MODE_1BIT;
2737 #if !defined(CONFIG_MMC_BROKEN_CD)
2738 /* we pretend there's no card when init is NULL */
2739 no_card = mmc_getcd(mmc) == 0;
2743 #if !CONFIG_IS_ENABLED(DM_MMC)
2744 no_card = no_card || (mmc->cfg->ops->init == NULL);
2748 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
2749 pr_err("MMC: no card present\n");
2754 err = mmc_get_op_cond(mmc);
2757 mmc->init_in_progress = 1;
2762 static int mmc_complete_init(struct mmc *mmc)
2766 mmc->init_in_progress = 0;
2767 if (mmc->op_cond_pending)
2768 err = mmc_complete_op_cond(mmc);
2771 err = mmc_startup(mmc);
2779 int mmc_init(struct mmc *mmc)
2782 __maybe_unused ulong start;
2783 #if CONFIG_IS_ENABLED(DM_MMC)
2784 struct mmc_uclass_priv *upriv = dev_get_uclass_priv(mmc->dev);
2791 start = get_timer(0);
2793 if (!mmc->init_in_progress)
2794 err = mmc_start_init(mmc);
2797 err = mmc_complete_init(mmc);
2799 pr_info("%s: %d, time %lu\n", __func__, err, get_timer(start));
2804 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT) || \
2805 CONFIG_IS_ENABLED(MMC_HS200_SUPPORT) || \
2806 CONFIG_IS_ENABLED(MMC_HS400_SUPPORT)
2807 int mmc_deinit(struct mmc *mmc)
2815 caps_filtered = mmc->card_caps &
2816 ~(MMC_CAP(UHS_SDR12) | MMC_CAP(UHS_SDR25) |
2817 MMC_CAP(UHS_SDR50) | MMC_CAP(UHS_DDR50) |
2818 MMC_CAP(UHS_SDR104));
2820 return sd_select_mode_and_width(mmc, caps_filtered);
2822 caps_filtered = mmc->card_caps &
2823 ~(MMC_CAP(MMC_HS_200) | MMC_CAP(MMC_HS_400));
2825 return mmc_select_mode_and_width(mmc, caps_filtered);
2830 int mmc_set_dsr(struct mmc *mmc, u16 val)
2836 /* CPU-specific MMC initializations */
2837 __weak int cpu_mmc_init(bd_t *bis)
2842 /* board-specific MMC initializations. */
2843 __weak int board_mmc_init(bd_t *bis)
2848 void mmc_set_preinit(struct mmc *mmc, int preinit)
2850 mmc->preinit = preinit;
2853 #if CONFIG_IS_ENABLED(DM_MMC)
2854 static int mmc_probe(bd_t *bis)
2858 struct udevice *dev;
2860 ret = uclass_get(UCLASS_MMC, &uc);
2865 * Try to add them in sequence order. Really with driver model we
2866 * should allow holes, but the current MMC list does not allow that.
2867 * So if we request 0, 1, 3 we will get 0, 1, 2.
2869 for (i = 0; ; i++) {
2870 ret = uclass_get_device_by_seq(UCLASS_MMC, i, &dev);
2874 uclass_foreach_dev(dev, uc) {
2875 ret = device_probe(dev);
2877 pr_err("%s - probe failed: %d\n", dev->name, ret);
2883 static int mmc_probe(bd_t *bis)
2885 if (board_mmc_init(bis) < 0)
2892 int mmc_initialize(bd_t *bis)
2894 static int initialized = 0;
2896 if (initialized) /* Avoid initializing mmc multiple times */
2900 #if !CONFIG_IS_ENABLED(BLK)
2901 #if !CONFIG_IS_ENABLED(MMC_TINY)
2905 ret = mmc_probe(bis);
2909 #ifndef CONFIG_SPL_BUILD
2910 print_mmc_devices(',');
2917 #ifdef CONFIG_CMD_BKOPS_ENABLE
2918 int mmc_set_bkops_enable(struct mmc *mmc)
2921 ALLOC_CACHE_ALIGN_BUFFER(u8, ext_csd, MMC_MAX_BLOCK_LEN);
2923 err = mmc_send_ext_csd(mmc, ext_csd);
2925 puts("Could not get ext_csd register values\n");
2929 if (!(ext_csd[EXT_CSD_BKOPS_SUPPORT] & 0x1)) {
2930 puts("Background operations not supported on device\n");
2931 return -EMEDIUMTYPE;
2934 if (ext_csd[EXT_CSD_BKOPS_EN] & 0x1) {
2935 puts("Background operations already enabled\n");
2939 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BKOPS_EN, 1);
2941 puts("Failed to enable manual background operations\n");
2945 puts("Enabled manual background operations\n");