1 // SPDX-License-Identifier: GPL-2.0+
3 * Copyright 2008, Freescale Semiconductor, Inc
6 * Based vaguely on the Linux code
13 #include <dm/device-internal.h>
17 #include <power/regulator.h>
20 #include <linux/list.h>
22 #include "mmc_private.h"
24 static int mmc_set_signal_voltage(struct mmc *mmc, uint signal_voltage);
25 static int mmc_power_cycle(struct mmc *mmc);
26 #if !CONFIG_IS_ENABLED(MMC_TINY)
27 static int mmc_select_mode_and_width(struct mmc *mmc, uint card_caps);
30 #if !CONFIG_IS_ENABLED(DM_MMC)
32 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
33 static int mmc_wait_dat0(struct mmc *mmc, int state, int timeout)
39 __weak int board_mmc_getwp(struct mmc *mmc)
44 int mmc_getwp(struct mmc *mmc)
48 wp = board_mmc_getwp(mmc);
51 if (mmc->cfg->ops->getwp)
52 wp = mmc->cfg->ops->getwp(mmc);
60 __weak int board_mmc_getcd(struct mmc *mmc)
66 #ifdef CONFIG_MMC_TRACE
67 void mmmc_trace_before_send(struct mmc *mmc, struct mmc_cmd *cmd)
69 printf("CMD_SEND:%d\n", cmd->cmdidx);
70 printf("\t\tARG\t\t\t 0x%08x\n", cmd->cmdarg);
73 void mmmc_trace_after_send(struct mmc *mmc, struct mmc_cmd *cmd, int ret)
79 printf("\t\tRET\t\t\t %d\n", ret);
81 switch (cmd->resp_type) {
83 printf("\t\tMMC_RSP_NONE\n");
86 printf("\t\tMMC_RSP_R1,5,6,7 \t 0x%08x \n",
90 printf("\t\tMMC_RSP_R1b\t\t 0x%08x \n",
94 printf("\t\tMMC_RSP_R2\t\t 0x%08x \n",
96 printf("\t\t \t\t 0x%08x \n",
98 printf("\t\t \t\t 0x%08x \n",
100 printf("\t\t \t\t 0x%08x \n",
103 printf("\t\t\t\t\tDUMPING DATA\n");
104 for (i = 0; i < 4; i++) {
106 printf("\t\t\t\t\t%03d - ", i*4);
107 ptr = (u8 *)&cmd->response[i];
109 for (j = 0; j < 4; j++)
110 printf("%02x ", *ptr--);
115 printf("\t\tMMC_RSP_R3,4\t\t 0x%08x \n",
119 printf("\t\tERROR MMC rsp not supported\n");
125 void mmc_trace_state(struct mmc *mmc, struct mmc_cmd *cmd)
129 status = (cmd->response[0] & MMC_STATUS_CURR_STATE) >> 9;
130 printf("CURR STATE:%d\n", status);
134 #if CONFIG_IS_ENABLED(MMC_VERBOSE) || defined(DEBUG)
135 const char *mmc_mode_name(enum bus_mode mode)
137 static const char *const names[] = {
138 [MMC_LEGACY] = "MMC legacy",
139 [SD_LEGACY] = "SD Legacy",
140 [MMC_HS] = "MMC High Speed (26MHz)",
141 [SD_HS] = "SD High Speed (50MHz)",
142 [UHS_SDR12] = "UHS SDR12 (25MHz)",
143 [UHS_SDR25] = "UHS SDR25 (50MHz)",
144 [UHS_SDR50] = "UHS SDR50 (100MHz)",
145 [UHS_SDR104] = "UHS SDR104 (208MHz)",
146 [UHS_DDR50] = "UHS DDR50 (50MHz)",
147 [MMC_HS_52] = "MMC High Speed (52MHz)",
148 [MMC_DDR_52] = "MMC DDR52 (52MHz)",
149 [MMC_HS_200] = "HS200 (200MHz)",
150 [MMC_HS_400] = "HS400 (200MHz)",
153 if (mode >= MMC_MODES_END)
154 return "Unknown mode";
160 static uint mmc_mode2freq(struct mmc *mmc, enum bus_mode mode)
162 static const int freqs[] = {
163 [MMC_LEGACY] = 25000000,
164 [SD_LEGACY] = 25000000,
167 [MMC_HS_52] = 52000000,
168 [MMC_DDR_52] = 52000000,
169 [UHS_SDR12] = 25000000,
170 [UHS_SDR25] = 50000000,
171 [UHS_SDR50] = 100000000,
172 [UHS_DDR50] = 50000000,
173 [UHS_SDR104] = 208000000,
174 [MMC_HS_200] = 200000000,
175 [MMC_HS_400] = 200000000,
178 if (mode == MMC_LEGACY)
179 return mmc->legacy_speed;
180 else if (mode >= MMC_MODES_END)
186 static int mmc_select_mode(struct mmc *mmc, enum bus_mode mode)
188 mmc->selected_mode = mode;
189 mmc->tran_speed = mmc_mode2freq(mmc, mode);
190 mmc->ddr_mode = mmc_is_mode_ddr(mode);
191 pr_debug("selecting mode %s (freq : %d MHz)\n", mmc_mode_name(mode),
192 mmc->tran_speed / 1000000);
196 #if !CONFIG_IS_ENABLED(DM_MMC)
197 int mmc_send_cmd(struct mmc *mmc, struct mmc_cmd *cmd, struct mmc_data *data)
201 mmmc_trace_before_send(mmc, cmd);
202 ret = mmc->cfg->ops->send_cmd(mmc, cmd, data);
203 mmmc_trace_after_send(mmc, cmd, ret);
209 int mmc_send_status(struct mmc *mmc, int timeout)
212 int err, retries = 5;
214 cmd.cmdidx = MMC_CMD_SEND_STATUS;
215 cmd.resp_type = MMC_RSP_R1;
216 if (!mmc_host_is_spi(mmc))
217 cmd.cmdarg = mmc->rca << 16;
220 err = mmc_send_cmd(mmc, &cmd, NULL);
222 if ((cmd.response[0] & MMC_STATUS_RDY_FOR_DATA) &&
223 (cmd.response[0] & MMC_STATUS_CURR_STATE) !=
227 if (cmd.response[0] & MMC_STATUS_MASK) {
228 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
229 pr_err("Status Error: 0x%08x\n",
234 } else if (--retries < 0)
243 mmc_trace_state(mmc, &cmd);
245 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
246 pr_err("Timeout waiting card ready\n");
254 int mmc_set_blocklen(struct mmc *mmc, int len)
262 cmd.cmdidx = MMC_CMD_SET_BLOCKLEN;
263 cmd.resp_type = MMC_RSP_R1;
266 err = mmc_send_cmd(mmc, &cmd, NULL);
268 #ifdef CONFIG_MMC_QUIRKS
269 if (err && (mmc->quirks & MMC_QUIRK_RETRY_SET_BLOCKLEN)) {
272 * It has been seen that SET_BLOCKLEN may fail on the first
273 * attempt, let's try a few more time
276 err = mmc_send_cmd(mmc, &cmd, NULL);
286 #ifdef MMC_SUPPORTS_TUNING
287 static const u8 tuning_blk_pattern_4bit[] = {
288 0xff, 0x0f, 0xff, 0x00, 0xff, 0xcc, 0xc3, 0xcc,
289 0xc3, 0x3c, 0xcc, 0xff, 0xfe, 0xff, 0xfe, 0xef,
290 0xff, 0xdf, 0xff, 0xdd, 0xff, 0xfb, 0xff, 0xfb,
291 0xbf, 0xff, 0x7f, 0xff, 0x77, 0xf7, 0xbd, 0xef,
292 0xff, 0xf0, 0xff, 0xf0, 0x0f, 0xfc, 0xcc, 0x3c,
293 0xcc, 0x33, 0xcc, 0xcf, 0xff, 0xef, 0xff, 0xee,
294 0xff, 0xfd, 0xff, 0xfd, 0xdf, 0xff, 0xbf, 0xff,
295 0xbb, 0xff, 0xf7, 0xff, 0xf7, 0x7f, 0x7b, 0xde,
298 static const u8 tuning_blk_pattern_8bit[] = {
299 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00, 0x00,
300 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc, 0xcc,
301 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff, 0xff,
302 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee, 0xff,
303 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd, 0xdd,
304 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff, 0xbb,
305 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff, 0xff,
306 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee, 0xff,
307 0xff, 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00,
308 0x00, 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc,
309 0xcc, 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff,
310 0xff, 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee,
311 0xff, 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd,
312 0xdd, 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff,
313 0xbb, 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff,
314 0xff, 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee,
317 int mmc_send_tuning(struct mmc *mmc, u32 opcode, int *cmd_error)
320 struct mmc_data data;
321 const u8 *tuning_block_pattern;
324 if (mmc->bus_width == 8) {
325 tuning_block_pattern = tuning_blk_pattern_8bit;
326 size = sizeof(tuning_blk_pattern_8bit);
327 } else if (mmc->bus_width == 4) {
328 tuning_block_pattern = tuning_blk_pattern_4bit;
329 size = sizeof(tuning_blk_pattern_4bit);
334 ALLOC_CACHE_ALIGN_BUFFER(u8, data_buf, size);
338 cmd.resp_type = MMC_RSP_R1;
340 data.dest = (void *)data_buf;
342 data.blocksize = size;
343 data.flags = MMC_DATA_READ;
345 err = mmc_send_cmd(mmc, &cmd, &data);
349 if (memcmp(data_buf, tuning_block_pattern, size))
356 static int mmc_read_blocks(struct mmc *mmc, void *dst, lbaint_t start,
360 struct mmc_data data;
363 cmd.cmdidx = MMC_CMD_READ_MULTIPLE_BLOCK;
365 cmd.cmdidx = MMC_CMD_READ_SINGLE_BLOCK;
367 if (mmc->high_capacity)
370 cmd.cmdarg = start * mmc->read_bl_len;
372 cmd.resp_type = MMC_RSP_R1;
375 data.blocks = blkcnt;
376 data.blocksize = mmc->read_bl_len;
377 data.flags = MMC_DATA_READ;
379 if (mmc_send_cmd(mmc, &cmd, &data))
383 cmd.cmdidx = MMC_CMD_STOP_TRANSMISSION;
385 cmd.resp_type = MMC_RSP_R1b;
386 if (mmc_send_cmd(mmc, &cmd, NULL)) {
387 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
388 pr_err("mmc fail to send stop cmd\n");
397 #if CONFIG_IS_ENABLED(BLK)
398 ulong mmc_bread(struct udevice *dev, lbaint_t start, lbaint_t blkcnt, void *dst)
400 ulong mmc_bread(struct blk_desc *block_dev, lbaint_t start, lbaint_t blkcnt,
404 #if CONFIG_IS_ENABLED(BLK)
405 struct blk_desc *block_dev = dev_get_uclass_platdata(dev);
407 int dev_num = block_dev->devnum;
409 lbaint_t cur, blocks_todo = blkcnt;
414 struct mmc *mmc = find_mmc_device(dev_num);
418 if (CONFIG_IS_ENABLED(MMC_TINY))
419 err = mmc_switch_part(mmc, block_dev->hwpart);
421 err = blk_dselect_hwpart(block_dev, block_dev->hwpart);
426 if ((start + blkcnt) > block_dev->lba) {
427 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
428 pr_err("MMC: block number 0x" LBAF " exceeds max(0x" LBAF ")\n",
429 start + blkcnt, block_dev->lba);
434 if (mmc_set_blocklen(mmc, mmc->read_bl_len)) {
435 pr_debug("%s: Failed to set blocklen\n", __func__);
440 cur = (blocks_todo > mmc->cfg->b_max) ?
441 mmc->cfg->b_max : blocks_todo;
442 if (mmc_read_blocks(mmc, dst, start, cur) != cur) {
443 pr_debug("%s: Failed to read blocks\n", __func__);
448 dst += cur * mmc->read_bl_len;
449 } while (blocks_todo > 0);
454 static int mmc_go_idle(struct mmc *mmc)
461 cmd.cmdidx = MMC_CMD_GO_IDLE_STATE;
463 cmd.resp_type = MMC_RSP_NONE;
465 err = mmc_send_cmd(mmc, &cmd, NULL);
475 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
476 static int mmc_switch_voltage(struct mmc *mmc, int signal_voltage)
482 * Send CMD11 only if the request is to switch the card to
485 if (signal_voltage == MMC_SIGNAL_VOLTAGE_330)
486 return mmc_set_signal_voltage(mmc, signal_voltage);
488 cmd.cmdidx = SD_CMD_SWITCH_UHS18V;
490 cmd.resp_type = MMC_RSP_R1;
492 err = mmc_send_cmd(mmc, &cmd, NULL);
496 if (!mmc_host_is_spi(mmc) && (cmd.response[0] & MMC_STATUS_ERROR))
500 * The card should drive cmd and dat[0:3] low immediately
501 * after the response of cmd11, but wait 100 us to be sure
503 err = mmc_wait_dat0(mmc, 0, 100);
510 * During a signal voltage level switch, the clock must be gated
511 * for 5 ms according to the SD spec
513 mmc_set_clock(mmc, mmc->clock, MMC_CLK_DISABLE);
515 err = mmc_set_signal_voltage(mmc, signal_voltage);
519 /* Keep clock gated for at least 10 ms, though spec only says 5 ms */
521 mmc_set_clock(mmc, mmc->clock, MMC_CLK_ENABLE);
524 * Failure to switch is indicated by the card holding
525 * dat[0:3] low. Wait for at least 1 ms according to spec
527 err = mmc_wait_dat0(mmc, 1, 1000);
537 static int sd_send_op_cond(struct mmc *mmc, bool uhs_en)
544 cmd.cmdidx = MMC_CMD_APP_CMD;
545 cmd.resp_type = MMC_RSP_R1;
548 err = mmc_send_cmd(mmc, &cmd, NULL);
553 cmd.cmdidx = SD_CMD_APP_SEND_OP_COND;
554 cmd.resp_type = MMC_RSP_R3;
557 * Most cards do not answer if some reserved bits
558 * in the ocr are set. However, Some controller
559 * can set bit 7 (reserved for low voltages), but
560 * how to manage low voltages SD card is not yet
563 cmd.cmdarg = mmc_host_is_spi(mmc) ? 0 :
564 (mmc->cfg->voltages & 0xff8000);
566 if (mmc->version == SD_VERSION_2)
567 cmd.cmdarg |= OCR_HCS;
570 cmd.cmdarg |= OCR_S18R;
572 err = mmc_send_cmd(mmc, &cmd, NULL);
577 if (cmd.response[0] & OCR_BUSY)
586 if (mmc->version != SD_VERSION_2)
587 mmc->version = SD_VERSION_1_0;
589 if (mmc_host_is_spi(mmc)) { /* read OCR for spi */
590 cmd.cmdidx = MMC_CMD_SPI_READ_OCR;
591 cmd.resp_type = MMC_RSP_R3;
594 err = mmc_send_cmd(mmc, &cmd, NULL);
600 mmc->ocr = cmd.response[0];
602 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
603 if (uhs_en && !(mmc_host_is_spi(mmc)) && (cmd.response[0] & 0x41000000)
605 err = mmc_switch_voltage(mmc, MMC_SIGNAL_VOLTAGE_180);
611 mmc->high_capacity = ((mmc->ocr & OCR_HCS) == OCR_HCS);
617 static int mmc_send_op_cond_iter(struct mmc *mmc, int use_arg)
622 cmd.cmdidx = MMC_CMD_SEND_OP_COND;
623 cmd.resp_type = MMC_RSP_R3;
625 if (use_arg && !mmc_host_is_spi(mmc))
626 cmd.cmdarg = OCR_HCS |
627 (mmc->cfg->voltages &
628 (mmc->ocr & OCR_VOLTAGE_MASK)) |
629 (mmc->ocr & OCR_ACCESS_MODE);
631 err = mmc_send_cmd(mmc, &cmd, NULL);
634 mmc->ocr = cmd.response[0];
638 static int mmc_send_op_cond(struct mmc *mmc)
642 /* Some cards seem to need this */
645 /* Asking to the card its capabilities */
646 for (i = 0; i < 2; i++) {
647 err = mmc_send_op_cond_iter(mmc, i != 0);
651 /* exit if not busy (flag seems to be inverted) */
652 if (mmc->ocr & OCR_BUSY)
655 mmc->op_cond_pending = 1;
659 static int mmc_complete_op_cond(struct mmc *mmc)
666 mmc->op_cond_pending = 0;
667 if (!(mmc->ocr & OCR_BUSY)) {
668 /* Some cards seem to need this */
671 start = get_timer(0);
673 err = mmc_send_op_cond_iter(mmc, 1);
676 if (mmc->ocr & OCR_BUSY)
678 if (get_timer(start) > timeout)
684 if (mmc_host_is_spi(mmc)) { /* read OCR for spi */
685 cmd.cmdidx = MMC_CMD_SPI_READ_OCR;
686 cmd.resp_type = MMC_RSP_R3;
689 err = mmc_send_cmd(mmc, &cmd, NULL);
694 mmc->ocr = cmd.response[0];
697 mmc->version = MMC_VERSION_UNKNOWN;
699 mmc->high_capacity = ((mmc->ocr & OCR_HCS) == OCR_HCS);
706 static int mmc_send_ext_csd(struct mmc *mmc, u8 *ext_csd)
709 struct mmc_data data;
712 /* Get the Card Status Register */
713 cmd.cmdidx = MMC_CMD_SEND_EXT_CSD;
714 cmd.resp_type = MMC_RSP_R1;
717 data.dest = (char *)ext_csd;
719 data.blocksize = MMC_MAX_BLOCK_LEN;
720 data.flags = MMC_DATA_READ;
722 err = mmc_send_cmd(mmc, &cmd, &data);
727 static int __mmc_switch(struct mmc *mmc, u8 set, u8 index, u8 value,
735 cmd.cmdidx = MMC_CMD_SWITCH;
736 cmd.resp_type = MMC_RSP_R1b;
737 cmd.cmdarg = (MMC_SWITCH_MODE_WRITE_BYTE << 24) |
741 while (retries > 0) {
742 ret = mmc_send_cmd(mmc, &cmd, NULL);
754 /* Waiting for the ready status */
755 return mmc_send_status(mmc, timeout);
762 int mmc_switch(struct mmc *mmc, u8 set, u8 index, u8 value)
764 return __mmc_switch(mmc, set, index, value, true);
767 #if !CONFIG_IS_ENABLED(MMC_TINY)
768 static int mmc_set_card_speed(struct mmc *mmc, enum bus_mode mode,
774 ALLOC_CACHE_ALIGN_BUFFER(u8, test_csd, MMC_MAX_BLOCK_LEN);
780 speed_bits = EXT_CSD_TIMING_HS;
782 #if CONFIG_IS_ENABLED(MMC_HS200_SUPPORT)
784 speed_bits = EXT_CSD_TIMING_HS200;
787 #if CONFIG_IS_ENABLED(MMC_HS400_SUPPORT)
789 speed_bits = EXT_CSD_TIMING_HS400;
793 speed_bits = EXT_CSD_TIMING_LEGACY;
799 err = __mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_HS_TIMING,
800 speed_bits, !hsdowngrade);
804 #if CONFIG_IS_ENABLED(MMC_HS200_SUPPORT) || \
805 CONFIG_IS_ENABLED(MMC_HS400_SUPPORT)
807 * In case the eMMC is in HS200/HS400 mode and we are downgrading
808 * to HS mode, the card clock are still running much faster than
809 * the supported HS mode clock, so we can not reliably read out
810 * Extended CSD. Reconfigure the controller to run at HS mode.
813 mmc_select_mode(mmc, MMC_HS);
814 mmc_set_clock(mmc, mmc_mode2freq(mmc, MMC_HS), false);
818 if ((mode == MMC_HS) || (mode == MMC_HS_52)) {
819 /* Now check to see that it worked */
820 err = mmc_send_ext_csd(mmc, test_csd);
824 /* No high-speed support */
825 if (!test_csd[EXT_CSD_HS_TIMING])
832 static int mmc_get_capabilities(struct mmc *mmc)
834 u8 *ext_csd = mmc->ext_csd;
837 mmc->card_caps = MMC_MODE_1BIT | MMC_CAP(MMC_LEGACY);
839 if (mmc_host_is_spi(mmc))
842 /* Only version 4 supports high-speed */
843 if (mmc->version < MMC_VERSION_4)
847 pr_err("No ext_csd found!\n"); /* this should enver happen */
851 mmc->card_caps |= MMC_MODE_4BIT | MMC_MODE_8BIT;
853 cardtype = ext_csd[EXT_CSD_CARD_TYPE];
854 mmc->cardtype = cardtype;
856 #if CONFIG_IS_ENABLED(MMC_HS200_SUPPORT)
857 if (cardtype & (EXT_CSD_CARD_TYPE_HS200_1_2V |
858 EXT_CSD_CARD_TYPE_HS200_1_8V)) {
859 mmc->card_caps |= MMC_MODE_HS200;
862 #if CONFIG_IS_ENABLED(MMC_HS400_SUPPORT)
863 if (cardtype & (EXT_CSD_CARD_TYPE_HS400_1_2V |
864 EXT_CSD_CARD_TYPE_HS400_1_8V)) {
865 mmc->card_caps |= MMC_MODE_HS400;
868 if (cardtype & EXT_CSD_CARD_TYPE_52) {
869 if (cardtype & EXT_CSD_CARD_TYPE_DDR_52)
870 mmc->card_caps |= MMC_MODE_DDR_52MHz;
871 mmc->card_caps |= MMC_MODE_HS_52MHz;
873 if (cardtype & EXT_CSD_CARD_TYPE_26)
874 mmc->card_caps |= MMC_MODE_HS;
880 static int mmc_set_capacity(struct mmc *mmc, int part_num)
884 mmc->capacity = mmc->capacity_user;
888 mmc->capacity = mmc->capacity_boot;
891 mmc->capacity = mmc->capacity_rpmb;
897 mmc->capacity = mmc->capacity_gp[part_num - 4];
903 mmc_get_blk_desc(mmc)->lba = lldiv(mmc->capacity, mmc->read_bl_len);
908 #if CONFIG_IS_ENABLED(MMC_HS200_SUPPORT)
909 static int mmc_boot_part_access_chk(struct mmc *mmc, unsigned int part_num)
914 if (part_num & PART_ACCESS_MASK)
915 forbidden = MMC_CAP(MMC_HS_200);
917 if (MMC_CAP(mmc->selected_mode) & forbidden) {
918 pr_debug("selected mode (%s) is forbidden for part %d\n",
919 mmc_mode_name(mmc->selected_mode), part_num);
921 } else if (mmc->selected_mode != mmc->best_mode) {
922 pr_debug("selected mode is not optimal\n");
927 return mmc_select_mode_and_width(mmc,
928 mmc->card_caps & ~forbidden);
933 static inline int mmc_boot_part_access_chk(struct mmc *mmc,
934 unsigned int part_num)
940 int mmc_switch_part(struct mmc *mmc, unsigned int part_num)
944 ret = mmc_boot_part_access_chk(mmc, part_num);
948 ret = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_PART_CONF,
949 (mmc->part_config & ~PART_ACCESS_MASK)
950 | (part_num & PART_ACCESS_MASK));
953 * Set the capacity if the switch succeeded or was intended
954 * to return to representing the raw device.
956 if ((ret == 0) || ((ret == -ENODEV) && (part_num == 0))) {
957 ret = mmc_set_capacity(mmc, part_num);
958 mmc_get_blk_desc(mmc)->hwpart = part_num;
964 #if CONFIG_IS_ENABLED(MMC_HW_PARTITIONING)
965 int mmc_hwpart_config(struct mmc *mmc,
966 const struct mmc_hwpart_conf *conf,
967 enum mmc_hwpart_conf_mode mode)
973 u32 max_enh_size_mult;
974 u32 tot_enh_size_mult = 0;
977 ALLOC_CACHE_ALIGN_BUFFER(u8, ext_csd, MMC_MAX_BLOCK_LEN);
979 if (mode < MMC_HWPART_CONF_CHECK || mode > MMC_HWPART_CONF_COMPLETE)
982 if (IS_SD(mmc) || (mmc->version < MMC_VERSION_4_41)) {
983 pr_err("eMMC >= 4.4 required for enhanced user data area\n");
987 if (!(mmc->part_support & PART_SUPPORT)) {
988 pr_err("Card does not support partitioning\n");
992 if (!mmc->hc_wp_grp_size) {
993 pr_err("Card does not define HC WP group size\n");
997 /* check partition alignment and total enhanced size */
998 if (conf->user.enh_size) {
999 if (conf->user.enh_size % mmc->hc_wp_grp_size ||
1000 conf->user.enh_start % mmc->hc_wp_grp_size) {
1001 pr_err("User data enhanced area not HC WP group "
1005 part_attrs |= EXT_CSD_ENH_USR;
1006 enh_size_mult = conf->user.enh_size / mmc->hc_wp_grp_size;
1007 if (mmc->high_capacity) {
1008 enh_start_addr = conf->user.enh_start;
1010 enh_start_addr = (conf->user.enh_start << 9);
1016 tot_enh_size_mult += enh_size_mult;
1018 for (pidx = 0; pidx < 4; pidx++) {
1019 if (conf->gp_part[pidx].size % mmc->hc_wp_grp_size) {
1020 pr_err("GP%i partition not HC WP group size "
1021 "aligned\n", pidx+1);
1024 gp_size_mult[pidx] = conf->gp_part[pidx].size / mmc->hc_wp_grp_size;
1025 if (conf->gp_part[pidx].size && conf->gp_part[pidx].enhanced) {
1026 part_attrs |= EXT_CSD_ENH_GP(pidx);
1027 tot_enh_size_mult += gp_size_mult[pidx];
1031 if (part_attrs && ! (mmc->part_support & ENHNCD_SUPPORT)) {
1032 pr_err("Card does not support enhanced attribute\n");
1033 return -EMEDIUMTYPE;
1036 err = mmc_send_ext_csd(mmc, ext_csd);
1041 (ext_csd[EXT_CSD_MAX_ENH_SIZE_MULT+2] << 16) +
1042 (ext_csd[EXT_CSD_MAX_ENH_SIZE_MULT+1] << 8) +
1043 ext_csd[EXT_CSD_MAX_ENH_SIZE_MULT];
1044 if (tot_enh_size_mult > max_enh_size_mult) {
1045 pr_err("Total enhanced size exceeds maximum (%u > %u)\n",
1046 tot_enh_size_mult, max_enh_size_mult);
1047 return -EMEDIUMTYPE;
1050 /* The default value of EXT_CSD_WR_REL_SET is device
1051 * dependent, the values can only be changed if the
1052 * EXT_CSD_HS_CTRL_REL bit is set. The values can be
1053 * changed only once and before partitioning is completed. */
1054 wr_rel_set = ext_csd[EXT_CSD_WR_REL_SET];
1055 if (conf->user.wr_rel_change) {
1056 if (conf->user.wr_rel_set)
1057 wr_rel_set |= EXT_CSD_WR_DATA_REL_USR;
1059 wr_rel_set &= ~EXT_CSD_WR_DATA_REL_USR;
1061 for (pidx = 0; pidx < 4; pidx++) {
1062 if (conf->gp_part[pidx].wr_rel_change) {
1063 if (conf->gp_part[pidx].wr_rel_set)
1064 wr_rel_set |= EXT_CSD_WR_DATA_REL_GP(pidx);
1066 wr_rel_set &= ~EXT_CSD_WR_DATA_REL_GP(pidx);
1070 if (wr_rel_set != ext_csd[EXT_CSD_WR_REL_SET] &&
1071 !(ext_csd[EXT_CSD_WR_REL_PARAM] & EXT_CSD_HS_CTRL_REL)) {
1072 puts("Card does not support host controlled partition write "
1073 "reliability settings\n");
1074 return -EMEDIUMTYPE;
1077 if (ext_csd[EXT_CSD_PARTITION_SETTING] &
1078 EXT_CSD_PARTITION_SETTING_COMPLETED) {
1079 pr_err("Card already partitioned\n");
1083 if (mode == MMC_HWPART_CONF_CHECK)
1086 /* Partitioning requires high-capacity size definitions */
1087 if (!(ext_csd[EXT_CSD_ERASE_GROUP_DEF] & 0x01)) {
1088 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1089 EXT_CSD_ERASE_GROUP_DEF, 1);
1094 ext_csd[EXT_CSD_ERASE_GROUP_DEF] = 1;
1096 /* update erase group size to be high-capacity */
1097 mmc->erase_grp_size =
1098 ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE] * 1024;
1102 /* all OK, write the configuration */
1103 for (i = 0; i < 4; i++) {
1104 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1105 EXT_CSD_ENH_START_ADDR+i,
1106 (enh_start_addr >> (i*8)) & 0xFF);
1110 for (i = 0; i < 3; i++) {
1111 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1112 EXT_CSD_ENH_SIZE_MULT+i,
1113 (enh_size_mult >> (i*8)) & 0xFF);
1117 for (pidx = 0; pidx < 4; pidx++) {
1118 for (i = 0; i < 3; i++) {
1119 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1120 EXT_CSD_GP_SIZE_MULT+pidx*3+i,
1121 (gp_size_mult[pidx] >> (i*8)) & 0xFF);
1126 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1127 EXT_CSD_PARTITIONS_ATTRIBUTE, part_attrs);
1131 if (mode == MMC_HWPART_CONF_SET)
1134 /* The WR_REL_SET is a write-once register but shall be
1135 * written before setting PART_SETTING_COMPLETED. As it is
1136 * write-once we can only write it when completing the
1138 if (wr_rel_set != ext_csd[EXT_CSD_WR_REL_SET]) {
1139 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1140 EXT_CSD_WR_REL_SET, wr_rel_set);
1145 /* Setting PART_SETTING_COMPLETED confirms the partition
1146 * configuration but it only becomes effective after power
1147 * cycle, so we do not adjust the partition related settings
1148 * in the mmc struct. */
1150 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1151 EXT_CSD_PARTITION_SETTING,
1152 EXT_CSD_PARTITION_SETTING_COMPLETED);
1160 #if !CONFIG_IS_ENABLED(DM_MMC)
1161 int mmc_getcd(struct mmc *mmc)
1165 cd = board_mmc_getcd(mmc);
1168 if (mmc->cfg->ops->getcd)
1169 cd = mmc->cfg->ops->getcd(mmc);
1178 #if !CONFIG_IS_ENABLED(MMC_TINY)
1179 static int sd_switch(struct mmc *mmc, int mode, int group, u8 value, u8 *resp)
1182 struct mmc_data data;
1184 /* Switch the frequency */
1185 cmd.cmdidx = SD_CMD_SWITCH_FUNC;
1186 cmd.resp_type = MMC_RSP_R1;
1187 cmd.cmdarg = (mode << 31) | 0xffffff;
1188 cmd.cmdarg &= ~(0xf << (group * 4));
1189 cmd.cmdarg |= value << (group * 4);
1191 data.dest = (char *)resp;
1192 data.blocksize = 64;
1194 data.flags = MMC_DATA_READ;
1196 return mmc_send_cmd(mmc, &cmd, &data);
1199 static int sd_get_capabilities(struct mmc *mmc)
1203 ALLOC_CACHE_ALIGN_BUFFER(__be32, scr, 2);
1204 ALLOC_CACHE_ALIGN_BUFFER(__be32, switch_status, 16);
1205 struct mmc_data data;
1207 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
1211 mmc->card_caps = MMC_MODE_1BIT | MMC_CAP(SD_LEGACY);
1213 if (mmc_host_is_spi(mmc))
1216 /* Read the SCR to find out if this card supports higher speeds */
1217 cmd.cmdidx = MMC_CMD_APP_CMD;
1218 cmd.resp_type = MMC_RSP_R1;
1219 cmd.cmdarg = mmc->rca << 16;
1221 err = mmc_send_cmd(mmc, &cmd, NULL);
1226 cmd.cmdidx = SD_CMD_APP_SEND_SCR;
1227 cmd.resp_type = MMC_RSP_R1;
1233 data.dest = (char *)scr;
1236 data.flags = MMC_DATA_READ;
1238 err = mmc_send_cmd(mmc, &cmd, &data);
1247 mmc->scr[0] = __be32_to_cpu(scr[0]);
1248 mmc->scr[1] = __be32_to_cpu(scr[1]);
1250 switch ((mmc->scr[0] >> 24) & 0xf) {
1252 mmc->version = SD_VERSION_1_0;
1255 mmc->version = SD_VERSION_1_10;
1258 mmc->version = SD_VERSION_2;
1259 if ((mmc->scr[0] >> 15) & 0x1)
1260 mmc->version = SD_VERSION_3;
1263 mmc->version = SD_VERSION_1_0;
1267 if (mmc->scr[0] & SD_DATA_4BIT)
1268 mmc->card_caps |= MMC_MODE_4BIT;
1270 /* Version 1.0 doesn't support switching */
1271 if (mmc->version == SD_VERSION_1_0)
1276 err = sd_switch(mmc, SD_SWITCH_CHECK, 0, 1,
1277 (u8 *)switch_status);
1282 /* The high-speed function is busy. Try again */
1283 if (!(__be32_to_cpu(switch_status[7]) & SD_HIGHSPEED_BUSY))
1287 /* If high-speed isn't supported, we return */
1288 if (__be32_to_cpu(switch_status[3]) & SD_HIGHSPEED_SUPPORTED)
1289 mmc->card_caps |= MMC_CAP(SD_HS);
1291 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
1292 /* Version before 3.0 don't support UHS modes */
1293 if (mmc->version < SD_VERSION_3)
1296 sd3_bus_mode = __be32_to_cpu(switch_status[3]) >> 16 & 0x1f;
1297 if (sd3_bus_mode & SD_MODE_UHS_SDR104)
1298 mmc->card_caps |= MMC_CAP(UHS_SDR104);
1299 if (sd3_bus_mode & SD_MODE_UHS_SDR50)
1300 mmc->card_caps |= MMC_CAP(UHS_SDR50);
1301 if (sd3_bus_mode & SD_MODE_UHS_SDR25)
1302 mmc->card_caps |= MMC_CAP(UHS_SDR25);
1303 if (sd3_bus_mode & SD_MODE_UHS_SDR12)
1304 mmc->card_caps |= MMC_CAP(UHS_SDR12);
1305 if (sd3_bus_mode & SD_MODE_UHS_DDR50)
1306 mmc->card_caps |= MMC_CAP(UHS_DDR50);
1312 static int sd_set_card_speed(struct mmc *mmc, enum bus_mode mode)
1316 ALLOC_CACHE_ALIGN_BUFFER(uint, switch_status, 16);
1319 /* SD version 1.00 and 1.01 does not support CMD 6 */
1320 if (mmc->version == SD_VERSION_1_0)
1325 speed = UHS_SDR12_BUS_SPEED;
1328 speed = HIGH_SPEED_BUS_SPEED;
1330 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
1332 speed = UHS_SDR12_BUS_SPEED;
1335 speed = UHS_SDR25_BUS_SPEED;
1338 speed = UHS_SDR50_BUS_SPEED;
1341 speed = UHS_DDR50_BUS_SPEED;
1344 speed = UHS_SDR104_BUS_SPEED;
1351 err = sd_switch(mmc, SD_SWITCH_SWITCH, 0, speed, (u8 *)switch_status);
1355 if (((__be32_to_cpu(switch_status[4]) >> 24) & 0xF) != speed)
1361 static int sd_select_bus_width(struct mmc *mmc, int w)
1366 if ((w != 4) && (w != 1))
1369 cmd.cmdidx = MMC_CMD_APP_CMD;
1370 cmd.resp_type = MMC_RSP_R1;
1371 cmd.cmdarg = mmc->rca << 16;
1373 err = mmc_send_cmd(mmc, &cmd, NULL);
1377 cmd.cmdidx = SD_CMD_APP_SET_BUS_WIDTH;
1378 cmd.resp_type = MMC_RSP_R1;
1383 err = mmc_send_cmd(mmc, &cmd, NULL);
1391 #if CONFIG_IS_ENABLED(MMC_WRITE)
1392 static int sd_read_ssr(struct mmc *mmc)
1394 static const unsigned int sd_au_size[] = {
1395 0, SZ_16K / 512, SZ_32K / 512,
1396 SZ_64K / 512, SZ_128K / 512, SZ_256K / 512,
1397 SZ_512K / 512, SZ_1M / 512, SZ_2M / 512,
1398 SZ_4M / 512, SZ_8M / 512, (SZ_8M + SZ_4M) / 512,
1399 SZ_16M / 512, (SZ_16M + SZ_8M) / 512, SZ_32M / 512,
1404 ALLOC_CACHE_ALIGN_BUFFER(uint, ssr, 16);
1405 struct mmc_data data;
1407 unsigned int au, eo, et, es;
1409 cmd.cmdidx = MMC_CMD_APP_CMD;
1410 cmd.resp_type = MMC_RSP_R1;
1411 cmd.cmdarg = mmc->rca << 16;
1413 err = mmc_send_cmd(mmc, &cmd, NULL);
1417 cmd.cmdidx = SD_CMD_APP_SD_STATUS;
1418 cmd.resp_type = MMC_RSP_R1;
1422 data.dest = (char *)ssr;
1423 data.blocksize = 64;
1425 data.flags = MMC_DATA_READ;
1427 err = mmc_send_cmd(mmc, &cmd, &data);
1435 for (i = 0; i < 16; i++)
1436 ssr[i] = be32_to_cpu(ssr[i]);
1438 au = (ssr[2] >> 12) & 0xF;
1439 if ((au <= 9) || (mmc->version == SD_VERSION_3)) {
1440 mmc->ssr.au = sd_au_size[au];
1441 es = (ssr[3] >> 24) & 0xFF;
1442 es |= (ssr[2] & 0xFF) << 8;
1443 et = (ssr[3] >> 18) & 0x3F;
1445 eo = (ssr[3] >> 16) & 0x3;
1446 mmc->ssr.erase_timeout = (et * 1000) / es;
1447 mmc->ssr.erase_offset = eo * 1000;
1450 pr_debug("Invalid Allocation Unit Size.\n");
1456 /* frequency bases */
1457 /* divided by 10 to be nice to platforms without floating point */
1458 static const int fbase[] = {
1465 /* Multiplier values for TRAN_SPEED. Multiplied by 10 to be nice
1466 * to platforms without floating point.
1468 static const u8 multipliers[] = {
1487 static inline int bus_width(uint cap)
1489 if (cap == MMC_MODE_8BIT)
1491 if (cap == MMC_MODE_4BIT)
1493 if (cap == MMC_MODE_1BIT)
1495 pr_warn("invalid bus witdh capability 0x%x\n", cap);
1499 #if !CONFIG_IS_ENABLED(DM_MMC)
1500 #ifdef MMC_SUPPORTS_TUNING
1501 static int mmc_execute_tuning(struct mmc *mmc, uint opcode)
1507 static void mmc_send_init_stream(struct mmc *mmc)
1511 static int mmc_set_ios(struct mmc *mmc)
1515 if (mmc->cfg->ops->set_ios)
1516 ret = mmc->cfg->ops->set_ios(mmc);
1522 int mmc_set_clock(struct mmc *mmc, uint clock, bool disable)
1525 if (clock > mmc->cfg->f_max)
1526 clock = mmc->cfg->f_max;
1528 if (clock < mmc->cfg->f_min)
1529 clock = mmc->cfg->f_min;
1533 mmc->clk_disable = disable;
1535 debug("clock is %s (%dHz)\n", disable ? "disabled" : "enabled", clock);
1537 return mmc_set_ios(mmc);
1540 static int mmc_set_bus_width(struct mmc *mmc, uint width)
1542 mmc->bus_width = width;
1544 return mmc_set_ios(mmc);
1547 #if CONFIG_IS_ENABLED(MMC_VERBOSE) || defined(DEBUG)
1549 * helper function to display the capabilities in a human
1550 * friendly manner. The capabilities include bus width and
1553 void mmc_dump_capabilities(const char *text, uint caps)
1557 pr_debug("%s: widths [", text);
1558 if (caps & MMC_MODE_8BIT)
1560 if (caps & MMC_MODE_4BIT)
1562 if (caps & MMC_MODE_1BIT)
1564 pr_debug("\b\b] modes [");
1565 for (mode = MMC_LEGACY; mode < MMC_MODES_END; mode++)
1566 if (MMC_CAP(mode) & caps)
1567 pr_debug("%s, ", mmc_mode_name(mode));
1568 pr_debug("\b\b]\n");
1572 struct mode_width_tuning {
1575 #ifdef MMC_SUPPORTS_TUNING
1580 #if CONFIG_IS_ENABLED(MMC_IO_VOLTAGE)
1581 int mmc_voltage_to_mv(enum mmc_voltage voltage)
1584 case MMC_SIGNAL_VOLTAGE_000: return 0;
1585 case MMC_SIGNAL_VOLTAGE_330: return 3300;
1586 case MMC_SIGNAL_VOLTAGE_180: return 1800;
1587 case MMC_SIGNAL_VOLTAGE_120: return 1200;
1592 static int mmc_set_signal_voltage(struct mmc *mmc, uint signal_voltage)
1596 if (mmc->signal_voltage == signal_voltage)
1599 mmc->signal_voltage = signal_voltage;
1600 err = mmc_set_ios(mmc);
1602 pr_debug("unable to set voltage (err %d)\n", err);
1607 static inline int mmc_set_signal_voltage(struct mmc *mmc, uint signal_voltage)
1613 #if !CONFIG_IS_ENABLED(MMC_TINY)
1614 static const struct mode_width_tuning sd_modes_by_pref[] = {
1615 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
1616 #ifdef MMC_SUPPORTS_TUNING
1619 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1620 .tuning = MMC_CMD_SEND_TUNING_BLOCK
1625 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1629 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1633 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1638 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1640 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
1643 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1648 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1652 #define for_each_sd_mode_by_pref(caps, mwt) \
1653 for (mwt = sd_modes_by_pref;\
1654 mwt < sd_modes_by_pref + ARRAY_SIZE(sd_modes_by_pref);\
1656 if (caps & MMC_CAP(mwt->mode))
1658 static int sd_select_mode_and_width(struct mmc *mmc, uint card_caps)
1661 uint widths[] = {MMC_MODE_4BIT, MMC_MODE_1BIT};
1662 const struct mode_width_tuning *mwt;
1663 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
1664 bool uhs_en = (mmc->ocr & OCR_S18R) ? true : false;
1666 bool uhs_en = false;
1671 mmc_dump_capabilities("sd card", card_caps);
1672 mmc_dump_capabilities("host", mmc->host_caps);
1675 /* Restrict card's capabilities by what the host can do */
1676 caps = card_caps & mmc->host_caps;
1681 for_each_sd_mode_by_pref(caps, mwt) {
1684 for (w = widths; w < widths + ARRAY_SIZE(widths); w++) {
1685 if (*w & caps & mwt->widths) {
1686 pr_debug("trying mode %s width %d (at %d MHz)\n",
1687 mmc_mode_name(mwt->mode),
1689 mmc_mode2freq(mmc, mwt->mode) / 1000000);
1691 /* configure the bus width (card + host) */
1692 err = sd_select_bus_width(mmc, bus_width(*w));
1695 mmc_set_bus_width(mmc, bus_width(*w));
1697 /* configure the bus mode (card) */
1698 err = sd_set_card_speed(mmc, mwt->mode);
1702 /* configure the bus mode (host) */
1703 mmc_select_mode(mmc, mwt->mode);
1704 mmc_set_clock(mmc, mmc->tran_speed,
1707 #ifdef MMC_SUPPORTS_TUNING
1708 /* execute tuning if needed */
1709 if (mwt->tuning && !mmc_host_is_spi(mmc)) {
1710 err = mmc_execute_tuning(mmc,
1713 pr_debug("tuning failed\n");
1719 #if CONFIG_IS_ENABLED(MMC_WRITE)
1720 err = sd_read_ssr(mmc);
1722 pr_warn("unable to read ssr\n");
1728 /* revert to a safer bus speed */
1729 mmc_select_mode(mmc, SD_LEGACY);
1730 mmc_set_clock(mmc, mmc->tran_speed,
1736 pr_err("unable to select a mode\n");
1741 * read the compare the part of ext csd that is constant.
1742 * This can be used to check that the transfer is working
1745 static int mmc_read_and_compare_ext_csd(struct mmc *mmc)
1748 const u8 *ext_csd = mmc->ext_csd;
1749 ALLOC_CACHE_ALIGN_BUFFER(u8, test_csd, MMC_MAX_BLOCK_LEN);
1751 if (mmc->version < MMC_VERSION_4)
1754 err = mmc_send_ext_csd(mmc, test_csd);
1758 /* Only compare read only fields */
1759 if (ext_csd[EXT_CSD_PARTITIONING_SUPPORT]
1760 == test_csd[EXT_CSD_PARTITIONING_SUPPORT] &&
1761 ext_csd[EXT_CSD_HC_WP_GRP_SIZE]
1762 == test_csd[EXT_CSD_HC_WP_GRP_SIZE] &&
1763 ext_csd[EXT_CSD_REV]
1764 == test_csd[EXT_CSD_REV] &&
1765 ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE]
1766 == test_csd[EXT_CSD_HC_ERASE_GRP_SIZE] &&
1767 memcmp(&ext_csd[EXT_CSD_SEC_CNT],
1768 &test_csd[EXT_CSD_SEC_CNT], 4) == 0)
1774 #if CONFIG_IS_ENABLED(MMC_IO_VOLTAGE)
1775 static int mmc_set_lowest_voltage(struct mmc *mmc, enum bus_mode mode,
1776 uint32_t allowed_mask)
1783 if (mmc->cardtype & (EXT_CSD_CARD_TYPE_HS200_1_8V |
1784 EXT_CSD_CARD_TYPE_HS400_1_8V))
1785 card_mask |= MMC_SIGNAL_VOLTAGE_180;
1786 if (mmc->cardtype & (EXT_CSD_CARD_TYPE_HS200_1_2V |
1787 EXT_CSD_CARD_TYPE_HS400_1_2V))
1788 card_mask |= MMC_SIGNAL_VOLTAGE_120;
1791 if (mmc->cardtype & EXT_CSD_CARD_TYPE_DDR_1_8V)
1792 card_mask |= MMC_SIGNAL_VOLTAGE_330 |
1793 MMC_SIGNAL_VOLTAGE_180;
1794 if (mmc->cardtype & EXT_CSD_CARD_TYPE_DDR_1_2V)
1795 card_mask |= MMC_SIGNAL_VOLTAGE_120;
1798 card_mask |= MMC_SIGNAL_VOLTAGE_330;
1802 while (card_mask & allowed_mask) {
1803 enum mmc_voltage best_match;
1805 best_match = 1 << (ffs(card_mask & allowed_mask) - 1);
1806 if (!mmc_set_signal_voltage(mmc, best_match))
1809 allowed_mask &= ~best_match;
1815 static inline int mmc_set_lowest_voltage(struct mmc *mmc, enum bus_mode mode,
1816 uint32_t allowed_mask)
1822 static const struct mode_width_tuning mmc_modes_by_pref[] = {
1823 #if CONFIG_IS_ENABLED(MMC_HS400_SUPPORT)
1826 .widths = MMC_MODE_8BIT,
1827 .tuning = MMC_CMD_SEND_TUNING_BLOCK_HS200
1830 #if CONFIG_IS_ENABLED(MMC_HS200_SUPPORT)
1833 .widths = MMC_MODE_8BIT | MMC_MODE_4BIT,
1834 .tuning = MMC_CMD_SEND_TUNING_BLOCK_HS200
1839 .widths = MMC_MODE_8BIT | MMC_MODE_4BIT,
1843 .widths = MMC_MODE_8BIT | MMC_MODE_4BIT | MMC_MODE_1BIT,
1847 .widths = MMC_MODE_8BIT | MMC_MODE_4BIT | MMC_MODE_1BIT,
1851 .widths = MMC_MODE_8BIT | MMC_MODE_4BIT | MMC_MODE_1BIT,
1855 #define for_each_mmc_mode_by_pref(caps, mwt) \
1856 for (mwt = mmc_modes_by_pref;\
1857 mwt < mmc_modes_by_pref + ARRAY_SIZE(mmc_modes_by_pref);\
1859 if (caps & MMC_CAP(mwt->mode))
1861 static const struct ext_csd_bus_width {
1865 } ext_csd_bus_width[] = {
1866 {MMC_MODE_8BIT, true, EXT_CSD_DDR_BUS_WIDTH_8},
1867 {MMC_MODE_4BIT, true, EXT_CSD_DDR_BUS_WIDTH_4},
1868 {MMC_MODE_8BIT, false, EXT_CSD_BUS_WIDTH_8},
1869 {MMC_MODE_4BIT, false, EXT_CSD_BUS_WIDTH_4},
1870 {MMC_MODE_1BIT, false, EXT_CSD_BUS_WIDTH_1},
1873 #if CONFIG_IS_ENABLED(MMC_HS400_SUPPORT)
1874 static int mmc_select_hs400(struct mmc *mmc)
1878 /* Set timing to HS200 for tuning */
1879 err = mmc_set_card_speed(mmc, MMC_HS_200, false);
1883 /* configure the bus mode (host) */
1884 mmc_select_mode(mmc, MMC_HS_200);
1885 mmc_set_clock(mmc, mmc->tran_speed, false);
1887 /* execute tuning if needed */
1888 err = mmc_execute_tuning(mmc, MMC_CMD_SEND_TUNING_BLOCK_HS200);
1890 debug("tuning failed\n");
1894 /* Set back to HS */
1895 mmc_set_card_speed(mmc, MMC_HS, true);
1897 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BUS_WIDTH,
1898 EXT_CSD_BUS_WIDTH_8 | EXT_CSD_DDR_FLAG);
1902 err = mmc_set_card_speed(mmc, MMC_HS_400, false);
1906 mmc_select_mode(mmc, MMC_HS_400);
1907 err = mmc_set_clock(mmc, mmc->tran_speed, false);
1914 static int mmc_select_hs400(struct mmc *mmc)
1920 #define for_each_supported_width(caps, ddr, ecbv) \
1921 for (ecbv = ext_csd_bus_width;\
1922 ecbv < ext_csd_bus_width + ARRAY_SIZE(ext_csd_bus_width);\
1924 if ((ddr == ecbv->is_ddr) && (caps & ecbv->cap))
1926 static int mmc_select_mode_and_width(struct mmc *mmc, uint card_caps)
1929 const struct mode_width_tuning *mwt;
1930 const struct ext_csd_bus_width *ecbw;
1933 mmc_dump_capabilities("mmc", card_caps);
1934 mmc_dump_capabilities("host", mmc->host_caps);
1937 /* Restrict card's capabilities by what the host can do */
1938 card_caps &= mmc->host_caps;
1940 /* Only version 4 of MMC supports wider bus widths */
1941 if (mmc->version < MMC_VERSION_4)
1944 if (!mmc->ext_csd) {
1945 pr_debug("No ext_csd found!\n"); /* this should enver happen */
1949 #if CONFIG_IS_ENABLED(MMC_HS200_SUPPORT) || \
1950 CONFIG_IS_ENABLED(MMC_HS400_SUPPORT)
1952 * In case the eMMC is in HS200/HS400 mode, downgrade to HS mode
1953 * before doing anything else, since a transition from either of
1954 * the HS200/HS400 mode directly to legacy mode is not supported.
1956 if (mmc->selected_mode == MMC_HS_200 ||
1957 mmc->selected_mode == MMC_HS_400)
1958 mmc_set_card_speed(mmc, MMC_HS, true);
1961 mmc_set_clock(mmc, mmc->legacy_speed, MMC_CLK_ENABLE);
1963 for_each_mmc_mode_by_pref(card_caps, mwt) {
1964 for_each_supported_width(card_caps & mwt->widths,
1965 mmc_is_mode_ddr(mwt->mode), ecbw) {
1966 enum mmc_voltage old_voltage;
1967 pr_debug("trying mode %s width %d (at %d MHz)\n",
1968 mmc_mode_name(mwt->mode),
1969 bus_width(ecbw->cap),
1970 mmc_mode2freq(mmc, mwt->mode) / 1000000);
1971 old_voltage = mmc->signal_voltage;
1972 err = mmc_set_lowest_voltage(mmc, mwt->mode,
1973 MMC_ALL_SIGNAL_VOLTAGE);
1977 /* configure the bus width (card + host) */
1978 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1980 ecbw->ext_csd_bits & ~EXT_CSD_DDR_FLAG);
1983 mmc_set_bus_width(mmc, bus_width(ecbw->cap));
1985 if (mwt->mode == MMC_HS_400) {
1986 err = mmc_select_hs400(mmc);
1988 printf("Select HS400 failed %d\n", err);
1992 /* configure the bus speed (card) */
1993 err = mmc_set_card_speed(mmc, mwt->mode, false);
1998 * configure the bus width AND the ddr mode
1999 * (card). The host side will be taken care
2000 * of in the next step
2002 if (ecbw->ext_csd_bits & EXT_CSD_DDR_FLAG) {
2003 err = mmc_switch(mmc,
2004 EXT_CSD_CMD_SET_NORMAL,
2006 ecbw->ext_csd_bits);
2011 /* configure the bus mode (host) */
2012 mmc_select_mode(mmc, mwt->mode);
2013 mmc_set_clock(mmc, mmc->tran_speed,
2015 #ifdef MMC_SUPPORTS_TUNING
2017 /* execute tuning if needed */
2019 err = mmc_execute_tuning(mmc,
2022 pr_debug("tuning failed\n");
2029 /* do a transfer to check the configuration */
2030 err = mmc_read_and_compare_ext_csd(mmc);
2034 mmc_set_signal_voltage(mmc, old_voltage);
2035 /* if an error occured, revert to a safer bus mode */
2036 mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
2037 EXT_CSD_BUS_WIDTH, EXT_CSD_BUS_WIDTH_1);
2038 mmc_select_mode(mmc, MMC_LEGACY);
2039 mmc_set_bus_width(mmc, 1);
2043 pr_err("unable to select a mode\n");
2049 #if CONFIG_IS_ENABLED(MMC_TINY)
2050 DEFINE_CACHE_ALIGN_BUFFER(u8, ext_csd_bkup, MMC_MAX_BLOCK_LEN);
2053 static int mmc_startup_v4(struct mmc *mmc)
2057 bool has_parts = false;
2058 bool part_completed;
2059 static const u32 mmc_versions[] = {
2071 #if CONFIG_IS_ENABLED(MMC_TINY)
2072 u8 *ext_csd = ext_csd_bkup;
2074 if (IS_SD(mmc) || mmc->version < MMC_VERSION_4)
2078 memset(ext_csd_bkup, 0, sizeof(ext_csd_bkup));
2080 err = mmc_send_ext_csd(mmc, ext_csd);
2084 /* store the ext csd for future reference */
2086 mmc->ext_csd = ext_csd;
2088 ALLOC_CACHE_ALIGN_BUFFER(u8, ext_csd, MMC_MAX_BLOCK_LEN);
2090 if (IS_SD(mmc) || (mmc->version < MMC_VERSION_4))
2093 /* check ext_csd version and capacity */
2094 err = mmc_send_ext_csd(mmc, ext_csd);
2098 /* store the ext csd for future reference */
2100 mmc->ext_csd = malloc(MMC_MAX_BLOCK_LEN);
2103 memcpy(mmc->ext_csd, ext_csd, MMC_MAX_BLOCK_LEN);
2105 if (ext_csd[EXT_CSD_REV] >= ARRAY_SIZE(mmc_versions))
2108 mmc->version = mmc_versions[ext_csd[EXT_CSD_REV]];
2110 if (mmc->version >= MMC_VERSION_4_2) {
2112 * According to the JEDEC Standard, the value of
2113 * ext_csd's capacity is valid if the value is more
2116 capacity = ext_csd[EXT_CSD_SEC_CNT] << 0
2117 | ext_csd[EXT_CSD_SEC_CNT + 1] << 8
2118 | ext_csd[EXT_CSD_SEC_CNT + 2] << 16
2119 | ext_csd[EXT_CSD_SEC_CNT + 3] << 24;
2120 capacity *= MMC_MAX_BLOCK_LEN;
2121 if ((capacity >> 20) > 2 * 1024)
2122 mmc->capacity_user = capacity;
2125 /* The partition data may be non-zero but it is only
2126 * effective if PARTITION_SETTING_COMPLETED is set in
2127 * EXT_CSD, so ignore any data if this bit is not set,
2128 * except for enabling the high-capacity group size
2129 * definition (see below).
2131 part_completed = !!(ext_csd[EXT_CSD_PARTITION_SETTING] &
2132 EXT_CSD_PARTITION_SETTING_COMPLETED);
2134 /* store the partition info of emmc */
2135 mmc->part_support = ext_csd[EXT_CSD_PARTITIONING_SUPPORT];
2136 if ((ext_csd[EXT_CSD_PARTITIONING_SUPPORT] & PART_SUPPORT) ||
2137 ext_csd[EXT_CSD_BOOT_MULT])
2138 mmc->part_config = ext_csd[EXT_CSD_PART_CONF];
2139 if (part_completed &&
2140 (ext_csd[EXT_CSD_PARTITIONING_SUPPORT] & ENHNCD_SUPPORT))
2141 mmc->part_attr = ext_csd[EXT_CSD_PARTITIONS_ATTRIBUTE];
2143 mmc->capacity_boot = ext_csd[EXT_CSD_BOOT_MULT] << 17;
2145 mmc->capacity_rpmb = ext_csd[EXT_CSD_RPMB_MULT] << 17;
2147 for (i = 0; i < 4; i++) {
2148 int idx = EXT_CSD_GP_SIZE_MULT + i * 3;
2149 uint mult = (ext_csd[idx + 2] << 16) +
2150 (ext_csd[idx + 1] << 8) + ext_csd[idx];
2153 if (!part_completed)
2155 mmc->capacity_gp[i] = mult;
2156 mmc->capacity_gp[i] *=
2157 ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE];
2158 mmc->capacity_gp[i] *= ext_csd[EXT_CSD_HC_WP_GRP_SIZE];
2159 mmc->capacity_gp[i] <<= 19;
2162 #ifndef CONFIG_SPL_BUILD
2163 if (part_completed) {
2164 mmc->enh_user_size =
2165 (ext_csd[EXT_CSD_ENH_SIZE_MULT + 2] << 16) +
2166 (ext_csd[EXT_CSD_ENH_SIZE_MULT + 1] << 8) +
2167 ext_csd[EXT_CSD_ENH_SIZE_MULT];
2168 mmc->enh_user_size *= ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE];
2169 mmc->enh_user_size *= ext_csd[EXT_CSD_HC_WP_GRP_SIZE];
2170 mmc->enh_user_size <<= 19;
2171 mmc->enh_user_start =
2172 (ext_csd[EXT_CSD_ENH_START_ADDR + 3] << 24) +
2173 (ext_csd[EXT_CSD_ENH_START_ADDR + 2] << 16) +
2174 (ext_csd[EXT_CSD_ENH_START_ADDR + 1] << 8) +
2175 ext_csd[EXT_CSD_ENH_START_ADDR];
2176 if (mmc->high_capacity)
2177 mmc->enh_user_start <<= 9;
2182 * Host needs to enable ERASE_GRP_DEF bit if device is
2183 * partitioned. This bit will be lost every time after a reset
2184 * or power off. This will affect erase size.
2188 if ((ext_csd[EXT_CSD_PARTITIONING_SUPPORT] & PART_SUPPORT) &&
2189 (ext_csd[EXT_CSD_PARTITIONS_ATTRIBUTE] & PART_ENH_ATTRIB))
2192 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
2193 EXT_CSD_ERASE_GROUP_DEF, 1);
2198 ext_csd[EXT_CSD_ERASE_GROUP_DEF] = 1;
2201 if (ext_csd[EXT_CSD_ERASE_GROUP_DEF] & 0x01) {
2202 #if CONFIG_IS_ENABLED(MMC_WRITE)
2203 /* Read out group size from ext_csd */
2204 mmc->erase_grp_size =
2205 ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE] * 1024;
2208 * if high capacity and partition setting completed
2209 * SEC_COUNT is valid even if it is smaller than 2 GiB
2210 * JEDEC Standard JESD84-B45, 6.2.4
2212 if (mmc->high_capacity && part_completed) {
2213 capacity = (ext_csd[EXT_CSD_SEC_CNT]) |
2214 (ext_csd[EXT_CSD_SEC_CNT + 1] << 8) |
2215 (ext_csd[EXT_CSD_SEC_CNT + 2] << 16) |
2216 (ext_csd[EXT_CSD_SEC_CNT + 3] << 24);
2217 capacity *= MMC_MAX_BLOCK_LEN;
2218 mmc->capacity_user = capacity;
2221 #if CONFIG_IS_ENABLED(MMC_WRITE)
2223 /* Calculate the group size from the csd value. */
2224 int erase_gsz, erase_gmul;
2226 erase_gsz = (mmc->csd[2] & 0x00007c00) >> 10;
2227 erase_gmul = (mmc->csd[2] & 0x000003e0) >> 5;
2228 mmc->erase_grp_size = (erase_gsz + 1)
2232 #if CONFIG_IS_ENABLED(MMC_HW_PARTITIONING)
2233 mmc->hc_wp_grp_size = 1024
2234 * ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE]
2235 * ext_csd[EXT_CSD_HC_WP_GRP_SIZE];
2238 mmc->wr_rel_set = ext_csd[EXT_CSD_WR_REL_SET];
2243 #if !CONFIG_IS_ENABLED(MMC_TINY)
2246 mmc->ext_csd = NULL;
2251 static int mmc_startup(struct mmc *mmc)
2257 struct blk_desc *bdesc;
2259 #ifdef CONFIG_MMC_SPI_CRC_ON
2260 if (mmc_host_is_spi(mmc)) { /* enable CRC check for spi */
2261 cmd.cmdidx = MMC_CMD_SPI_CRC_ON_OFF;
2262 cmd.resp_type = MMC_RSP_R1;
2264 err = mmc_send_cmd(mmc, &cmd, NULL);
2270 /* Put the Card in Identify Mode */
2271 cmd.cmdidx = mmc_host_is_spi(mmc) ? MMC_CMD_SEND_CID :
2272 MMC_CMD_ALL_SEND_CID; /* cmd not supported in spi */
2273 cmd.resp_type = MMC_RSP_R2;
2276 err = mmc_send_cmd(mmc, &cmd, NULL);
2278 #ifdef CONFIG_MMC_QUIRKS
2279 if (err && (mmc->quirks & MMC_QUIRK_RETRY_SEND_CID)) {
2282 * It has been seen that SEND_CID may fail on the first
2283 * attempt, let's try a few more time
2286 err = mmc_send_cmd(mmc, &cmd, NULL);
2289 } while (retries--);
2296 memcpy(mmc->cid, cmd.response, 16);
2299 * For MMC cards, set the Relative Address.
2300 * For SD cards, get the Relatvie Address.
2301 * This also puts the cards into Standby State
2303 if (!mmc_host_is_spi(mmc)) { /* cmd not supported in spi */
2304 cmd.cmdidx = SD_CMD_SEND_RELATIVE_ADDR;
2305 cmd.cmdarg = mmc->rca << 16;
2306 cmd.resp_type = MMC_RSP_R6;
2308 err = mmc_send_cmd(mmc, &cmd, NULL);
2314 mmc->rca = (cmd.response[0] >> 16) & 0xffff;
2317 /* Get the Card-Specific Data */
2318 cmd.cmdidx = MMC_CMD_SEND_CSD;
2319 cmd.resp_type = MMC_RSP_R2;
2320 cmd.cmdarg = mmc->rca << 16;
2322 err = mmc_send_cmd(mmc, &cmd, NULL);
2327 mmc->csd[0] = cmd.response[0];
2328 mmc->csd[1] = cmd.response[1];
2329 mmc->csd[2] = cmd.response[2];
2330 mmc->csd[3] = cmd.response[3];
2332 if (mmc->version == MMC_VERSION_UNKNOWN) {
2333 int version = (cmd.response[0] >> 26) & 0xf;
2337 mmc->version = MMC_VERSION_1_2;
2340 mmc->version = MMC_VERSION_1_4;
2343 mmc->version = MMC_VERSION_2_2;
2346 mmc->version = MMC_VERSION_3;
2349 mmc->version = MMC_VERSION_4;
2352 mmc->version = MMC_VERSION_1_2;
2357 /* divide frequency by 10, since the mults are 10x bigger */
2358 freq = fbase[(cmd.response[0] & 0x7)];
2359 mult = multipliers[((cmd.response[0] >> 3) & 0xf)];
2361 mmc->legacy_speed = freq * mult;
2362 mmc_select_mode(mmc, MMC_LEGACY);
2364 mmc->dsr_imp = ((cmd.response[1] >> 12) & 0x1);
2365 mmc->read_bl_len = 1 << ((cmd.response[1] >> 16) & 0xf);
2366 #if CONFIG_IS_ENABLED(MMC_WRITE)
2369 mmc->write_bl_len = mmc->read_bl_len;
2371 mmc->write_bl_len = 1 << ((cmd.response[3] >> 22) & 0xf);
2374 if (mmc->high_capacity) {
2375 csize = (mmc->csd[1] & 0x3f) << 16
2376 | (mmc->csd[2] & 0xffff0000) >> 16;
2379 csize = (mmc->csd[1] & 0x3ff) << 2
2380 | (mmc->csd[2] & 0xc0000000) >> 30;
2381 cmult = (mmc->csd[2] & 0x00038000) >> 15;
2384 mmc->capacity_user = (csize + 1) << (cmult + 2);
2385 mmc->capacity_user *= mmc->read_bl_len;
2386 mmc->capacity_boot = 0;
2387 mmc->capacity_rpmb = 0;
2388 for (i = 0; i < 4; i++)
2389 mmc->capacity_gp[i] = 0;
2391 if (mmc->read_bl_len > MMC_MAX_BLOCK_LEN)
2392 mmc->read_bl_len = MMC_MAX_BLOCK_LEN;
2394 #if CONFIG_IS_ENABLED(MMC_WRITE)
2395 if (mmc->write_bl_len > MMC_MAX_BLOCK_LEN)
2396 mmc->write_bl_len = MMC_MAX_BLOCK_LEN;
2399 if ((mmc->dsr_imp) && (0xffffffff != mmc->dsr)) {
2400 cmd.cmdidx = MMC_CMD_SET_DSR;
2401 cmd.cmdarg = (mmc->dsr & 0xffff) << 16;
2402 cmd.resp_type = MMC_RSP_NONE;
2403 if (mmc_send_cmd(mmc, &cmd, NULL))
2404 pr_warn("MMC: SET_DSR failed\n");
2407 /* Select the card, and put it into Transfer Mode */
2408 if (!mmc_host_is_spi(mmc)) { /* cmd not supported in spi */
2409 cmd.cmdidx = MMC_CMD_SELECT_CARD;
2410 cmd.resp_type = MMC_RSP_R1;
2411 cmd.cmdarg = mmc->rca << 16;
2412 err = mmc_send_cmd(mmc, &cmd, NULL);
2419 * For SD, its erase group is always one sector
2421 #if CONFIG_IS_ENABLED(MMC_WRITE)
2422 mmc->erase_grp_size = 1;
2424 mmc->part_config = MMCPART_NOAVAILABLE;
2426 err = mmc_startup_v4(mmc);
2430 err = mmc_set_capacity(mmc, mmc_get_blk_desc(mmc)->hwpart);
2434 #if CONFIG_IS_ENABLED(MMC_TINY)
2435 mmc_set_clock(mmc, mmc->legacy_speed, false);
2436 mmc_select_mode(mmc, IS_SD(mmc) ? SD_LEGACY : MMC_LEGACY);
2437 mmc_set_bus_width(mmc, 1);
2440 err = sd_get_capabilities(mmc);
2443 err = sd_select_mode_and_width(mmc, mmc->card_caps);
2445 err = mmc_get_capabilities(mmc);
2448 mmc_select_mode_and_width(mmc, mmc->card_caps);
2454 mmc->best_mode = mmc->selected_mode;
2456 /* Fix the block length for DDR mode */
2457 if (mmc->ddr_mode) {
2458 mmc->read_bl_len = MMC_MAX_BLOCK_LEN;
2459 #if CONFIG_IS_ENABLED(MMC_WRITE)
2460 mmc->write_bl_len = MMC_MAX_BLOCK_LEN;
2464 /* fill in device description */
2465 bdesc = mmc_get_blk_desc(mmc);
2469 bdesc->blksz = mmc->read_bl_len;
2470 bdesc->log2blksz = LOG2(bdesc->blksz);
2471 bdesc->lba = lldiv(mmc->capacity, mmc->read_bl_len);
2472 #if !defined(CONFIG_SPL_BUILD) || \
2473 (defined(CONFIG_SPL_LIBCOMMON_SUPPORT) && \
2474 !defined(CONFIG_USE_TINY_PRINTF))
2475 sprintf(bdesc->vendor, "Man %06x Snr %04x%04x",
2476 mmc->cid[0] >> 24, (mmc->cid[2] & 0xffff),
2477 (mmc->cid[3] >> 16) & 0xffff);
2478 sprintf(bdesc->product, "%c%c%c%c%c%c", mmc->cid[0] & 0xff,
2479 (mmc->cid[1] >> 24), (mmc->cid[1] >> 16) & 0xff,
2480 (mmc->cid[1] >> 8) & 0xff, mmc->cid[1] & 0xff,
2481 (mmc->cid[2] >> 24) & 0xff);
2482 sprintf(bdesc->revision, "%d.%d", (mmc->cid[2] >> 20) & 0xf,
2483 (mmc->cid[2] >> 16) & 0xf);
2485 bdesc->vendor[0] = 0;
2486 bdesc->product[0] = 0;
2487 bdesc->revision[0] = 0;
2490 #if !defined(CONFIG_DM_MMC) && (!defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBDISK_SUPPORT))
2497 static int mmc_send_if_cond(struct mmc *mmc)
2502 cmd.cmdidx = SD_CMD_SEND_IF_COND;
2503 /* We set the bit if the host supports voltages between 2.7 and 3.6 V */
2504 cmd.cmdarg = ((mmc->cfg->voltages & 0xff8000) != 0) << 8 | 0xaa;
2505 cmd.resp_type = MMC_RSP_R7;
2507 err = mmc_send_cmd(mmc, &cmd, NULL);
2512 if ((cmd.response[0] & 0xff) != 0xaa)
2515 mmc->version = SD_VERSION_2;
2520 #if !CONFIG_IS_ENABLED(DM_MMC)
2521 /* board-specific MMC power initializations. */
2522 __weak void board_mmc_power_init(void)
2527 static int mmc_power_init(struct mmc *mmc)
2529 #if CONFIG_IS_ENABLED(DM_MMC)
2530 #if CONFIG_IS_ENABLED(DM_REGULATOR)
2533 ret = device_get_supply_regulator(mmc->dev, "vmmc-supply",
2536 pr_debug("%s: No vmmc supply\n", mmc->dev->name);
2538 ret = device_get_supply_regulator(mmc->dev, "vqmmc-supply",
2539 &mmc->vqmmc_supply);
2541 pr_debug("%s: No vqmmc supply\n", mmc->dev->name);
2543 #else /* !CONFIG_DM_MMC */
2545 * Driver model should use a regulator, as above, rather than calling
2546 * out to board code.
2548 board_mmc_power_init();
2554 * put the host in the initial state:
2555 * - turn on Vdd (card power supply)
2556 * - configure the bus width and clock to minimal values
2558 static void mmc_set_initial_state(struct mmc *mmc)
2562 /* First try to set 3.3V. If it fails set to 1.8V */
2563 err = mmc_set_signal_voltage(mmc, MMC_SIGNAL_VOLTAGE_330);
2565 err = mmc_set_signal_voltage(mmc, MMC_SIGNAL_VOLTAGE_180);
2567 pr_warn("mmc: failed to set signal voltage\n");
2569 mmc_select_mode(mmc, MMC_LEGACY);
2570 mmc_set_bus_width(mmc, 1);
2571 mmc_set_clock(mmc, 0, MMC_CLK_ENABLE);
2574 static int mmc_power_on(struct mmc *mmc)
2576 #if CONFIG_IS_ENABLED(DM_MMC) && CONFIG_IS_ENABLED(DM_REGULATOR)
2577 if (mmc->vmmc_supply) {
2578 int ret = regulator_set_enable(mmc->vmmc_supply, true);
2581 puts("Error enabling VMMC supply\n");
2589 static int mmc_power_off(struct mmc *mmc)
2591 mmc_set_clock(mmc, 0, MMC_CLK_DISABLE);
2592 #if CONFIG_IS_ENABLED(DM_MMC) && CONFIG_IS_ENABLED(DM_REGULATOR)
2593 if (mmc->vmmc_supply) {
2594 int ret = regulator_set_enable(mmc->vmmc_supply, false);
2597 pr_debug("Error disabling VMMC supply\n");
2605 static int mmc_power_cycle(struct mmc *mmc)
2609 ret = mmc_power_off(mmc);
2613 * SD spec recommends at least 1ms of delay. Let's wait for 2ms
2614 * to be on the safer side.
2617 return mmc_power_on(mmc);
2620 int mmc_get_op_cond(struct mmc *mmc)
2622 bool uhs_en = supports_uhs(mmc->cfg->host_caps);
2628 #ifdef CONFIG_FSL_ESDHC_ADAPTER_IDENT
2629 mmc_adapter_card_type_ident();
2631 err = mmc_power_init(mmc);
2635 #ifdef CONFIG_MMC_QUIRKS
2636 mmc->quirks = MMC_QUIRK_RETRY_SET_BLOCKLEN |
2637 MMC_QUIRK_RETRY_SEND_CID;
2640 err = mmc_power_cycle(mmc);
2643 * if power cycling is not supported, we should not try
2644 * to use the UHS modes, because we wouldn't be able to
2645 * recover from an error during the UHS initialization.
2647 pr_debug("Unable to do a full power cycle. Disabling the UHS modes for safety\n");
2649 mmc->host_caps &= ~UHS_CAPS;
2650 err = mmc_power_on(mmc);
2655 #if CONFIG_IS_ENABLED(DM_MMC)
2656 /* The device has already been probed ready for use */
2658 /* made sure it's not NULL earlier */
2659 err = mmc->cfg->ops->init(mmc);
2666 mmc_set_initial_state(mmc);
2667 mmc_send_init_stream(mmc);
2669 /* Reset the Card */
2670 err = mmc_go_idle(mmc);
2675 /* The internal partition reset to user partition(0) at every CMD0*/
2676 mmc_get_blk_desc(mmc)->hwpart = 0;
2678 /* Test for SD version 2 */
2679 err = mmc_send_if_cond(mmc);
2681 /* Now try to get the SD card's operating condition */
2682 err = sd_send_op_cond(mmc, uhs_en);
2683 if (err && uhs_en) {
2685 mmc_power_cycle(mmc);
2689 /* If the command timed out, we check for an MMC card */
2690 if (err == -ETIMEDOUT) {
2691 err = mmc_send_op_cond(mmc);
2694 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
2695 pr_err("Card did not respond to voltage select!\n");
2704 int mmc_start_init(struct mmc *mmc)
2710 * all hosts are capable of 1 bit bus-width and able to use the legacy
2713 mmc->host_caps = mmc->cfg->host_caps | MMC_CAP(SD_LEGACY) |
2714 MMC_CAP(MMC_LEGACY) | MMC_MODE_1BIT;
2716 #if !defined(CONFIG_MMC_BROKEN_CD)
2717 /* we pretend there's no card when init is NULL */
2718 no_card = mmc_getcd(mmc) == 0;
2722 #if !CONFIG_IS_ENABLED(DM_MMC)
2723 no_card = no_card || (mmc->cfg->ops->init == NULL);
2727 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
2728 pr_err("MMC: no card present\n");
2733 err = mmc_get_op_cond(mmc);
2736 mmc->init_in_progress = 1;
2741 static int mmc_complete_init(struct mmc *mmc)
2745 mmc->init_in_progress = 0;
2746 if (mmc->op_cond_pending)
2747 err = mmc_complete_op_cond(mmc);
2750 err = mmc_startup(mmc);
2758 int mmc_init(struct mmc *mmc)
2761 __maybe_unused ulong start;
2762 #if CONFIG_IS_ENABLED(DM_MMC)
2763 struct mmc_uclass_priv *upriv = dev_get_uclass_priv(mmc->dev);
2770 start = get_timer(0);
2772 if (!mmc->init_in_progress)
2773 err = mmc_start_init(mmc);
2776 err = mmc_complete_init(mmc);
2778 pr_info("%s: %d, time %lu\n", __func__, err, get_timer(start));
2783 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT) || \
2784 CONFIG_IS_ENABLED(MMC_HS200_SUPPORT) || \
2785 CONFIG_IS_ENABLED(MMC_HS400_SUPPORT)
2786 int mmc_deinit(struct mmc *mmc)
2794 caps_filtered = mmc->card_caps &
2795 ~(MMC_CAP(UHS_SDR12) | MMC_CAP(UHS_SDR25) |
2796 MMC_CAP(UHS_SDR50) | MMC_CAP(UHS_DDR50) |
2797 MMC_CAP(UHS_SDR104));
2799 return sd_select_mode_and_width(mmc, caps_filtered);
2801 caps_filtered = mmc->card_caps &
2802 ~(MMC_CAP(MMC_HS_200) | MMC_CAP(MMC_HS_400));
2804 return mmc_select_mode_and_width(mmc, caps_filtered);
2809 int mmc_set_dsr(struct mmc *mmc, u16 val)
2815 /* CPU-specific MMC initializations */
2816 __weak int cpu_mmc_init(bd_t *bis)
2821 /* board-specific MMC initializations. */
2822 __weak int board_mmc_init(bd_t *bis)
2827 void mmc_set_preinit(struct mmc *mmc, int preinit)
2829 mmc->preinit = preinit;
2832 #if CONFIG_IS_ENABLED(DM_MMC)
2833 static int mmc_probe(bd_t *bis)
2837 struct udevice *dev;
2839 ret = uclass_get(UCLASS_MMC, &uc);
2844 * Try to add them in sequence order. Really with driver model we
2845 * should allow holes, but the current MMC list does not allow that.
2846 * So if we request 0, 1, 3 we will get 0, 1, 2.
2848 for (i = 0; ; i++) {
2849 ret = uclass_get_device_by_seq(UCLASS_MMC, i, &dev);
2853 uclass_foreach_dev(dev, uc) {
2854 ret = device_probe(dev);
2856 pr_err("%s - probe failed: %d\n", dev->name, ret);
2862 static int mmc_probe(bd_t *bis)
2864 if (board_mmc_init(bis) < 0)
2871 int mmc_initialize(bd_t *bis)
2873 static int initialized = 0;
2875 if (initialized) /* Avoid initializing mmc multiple times */
2879 #if !CONFIG_IS_ENABLED(BLK)
2880 #if !CONFIG_IS_ENABLED(MMC_TINY)
2884 ret = mmc_probe(bis);
2888 #ifndef CONFIG_SPL_BUILD
2889 print_mmc_devices(',');
2896 #ifdef CONFIG_CMD_BKOPS_ENABLE
2897 int mmc_set_bkops_enable(struct mmc *mmc)
2900 ALLOC_CACHE_ALIGN_BUFFER(u8, ext_csd, MMC_MAX_BLOCK_LEN);
2902 err = mmc_send_ext_csd(mmc, ext_csd);
2904 puts("Could not get ext_csd register values\n");
2908 if (!(ext_csd[EXT_CSD_BKOPS_SUPPORT] & 0x1)) {
2909 puts("Background operations not supported on device\n");
2910 return -EMEDIUMTYPE;
2913 if (ext_csd[EXT_CSD_BKOPS_EN] & 0x1) {
2914 puts("Background operations already enabled\n");
2918 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BKOPS_EN, 1);
2920 puts("Failed to enable manual background operations\n");
2924 puts("Enabled manual background operations\n");