1 // SPDX-License-Identifier: GPL-2.0+
3 * Copyright 2008, Freescale Semiconductor, Inc
6 * Based vaguely on the Linux code
13 #include <dm/device-internal.h>
17 #include <power/regulator.h>
20 #include <linux/list.h>
22 #include "mmc_private.h"
24 static int mmc_set_signal_voltage(struct mmc *mmc, uint signal_voltage);
25 static int mmc_power_cycle(struct mmc *mmc);
26 #if !CONFIG_IS_ENABLED(MMC_TINY)
27 static int mmc_select_mode_and_width(struct mmc *mmc, uint card_caps);
30 #if !CONFIG_IS_ENABLED(DM_MMC)
32 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
33 static int mmc_wait_dat0(struct mmc *mmc, int state, int timeout)
39 __weak int board_mmc_getwp(struct mmc *mmc)
44 int mmc_getwp(struct mmc *mmc)
48 wp = board_mmc_getwp(mmc);
51 if (mmc->cfg->ops->getwp)
52 wp = mmc->cfg->ops->getwp(mmc);
60 __weak int board_mmc_getcd(struct mmc *mmc)
66 #ifdef CONFIG_MMC_TRACE
67 void mmmc_trace_before_send(struct mmc *mmc, struct mmc_cmd *cmd)
69 printf("CMD_SEND:%d\n", cmd->cmdidx);
70 printf("\t\tARG\t\t\t 0x%08X\n", cmd->cmdarg);
73 void mmmc_trace_after_send(struct mmc *mmc, struct mmc_cmd *cmd, int ret)
79 printf("\t\tRET\t\t\t %d\n", ret);
81 switch (cmd->resp_type) {
83 printf("\t\tMMC_RSP_NONE\n");
86 printf("\t\tMMC_RSP_R1,5,6,7 \t 0x%08X \n",
90 printf("\t\tMMC_RSP_R1b\t\t 0x%08X \n",
94 printf("\t\tMMC_RSP_R2\t\t 0x%08X \n",
96 printf("\t\t \t\t 0x%08X \n",
98 printf("\t\t \t\t 0x%08X \n",
100 printf("\t\t \t\t 0x%08X \n",
103 printf("\t\t\t\t\tDUMPING DATA\n");
104 for (i = 0; i < 4; i++) {
106 printf("\t\t\t\t\t%03d - ", i*4);
107 ptr = (u8 *)&cmd->response[i];
109 for (j = 0; j < 4; j++)
110 printf("%02X ", *ptr--);
115 printf("\t\tMMC_RSP_R3,4\t\t 0x%08X \n",
119 printf("\t\tERROR MMC rsp not supported\n");
125 void mmc_trace_state(struct mmc *mmc, struct mmc_cmd *cmd)
129 status = (cmd->response[0] & MMC_STATUS_CURR_STATE) >> 9;
130 printf("CURR STATE:%d\n", status);
134 #if CONFIG_IS_ENABLED(MMC_VERBOSE) || defined(DEBUG)
135 const char *mmc_mode_name(enum bus_mode mode)
137 static const char *const names[] = {
138 [MMC_LEGACY] = "MMC legacy",
139 [SD_LEGACY] = "SD Legacy",
140 [MMC_HS] = "MMC High Speed (26MHz)",
141 [SD_HS] = "SD High Speed (50MHz)",
142 [UHS_SDR12] = "UHS SDR12 (25MHz)",
143 [UHS_SDR25] = "UHS SDR25 (50MHz)",
144 [UHS_SDR50] = "UHS SDR50 (100MHz)",
145 [UHS_SDR104] = "UHS SDR104 (208MHz)",
146 [UHS_DDR50] = "UHS DDR50 (50MHz)",
147 [MMC_HS_52] = "MMC High Speed (52MHz)",
148 [MMC_DDR_52] = "MMC DDR52 (52MHz)",
149 [MMC_HS_200] = "HS200 (200MHz)",
150 [MMC_HS_400] = "HS400 (200MHz)",
153 if (mode >= MMC_MODES_END)
154 return "Unknown mode";
160 static uint mmc_mode2freq(struct mmc *mmc, enum bus_mode mode)
162 static const int freqs[] = {
163 [MMC_LEGACY] = 25000000,
164 [SD_LEGACY] = 25000000,
167 [MMC_HS_52] = 52000000,
168 [MMC_DDR_52] = 52000000,
169 [UHS_SDR12] = 25000000,
170 [UHS_SDR25] = 50000000,
171 [UHS_SDR50] = 100000000,
172 [UHS_DDR50] = 50000000,
173 [UHS_SDR104] = 208000000,
174 [MMC_HS_200] = 200000000,
175 [MMC_HS_400] = 200000000,
178 if (mode == MMC_LEGACY)
179 return mmc->legacy_speed;
180 else if (mode >= MMC_MODES_END)
186 static int mmc_select_mode(struct mmc *mmc, enum bus_mode mode)
188 mmc->selected_mode = mode;
189 mmc->tran_speed = mmc_mode2freq(mmc, mode);
190 mmc->ddr_mode = mmc_is_mode_ddr(mode);
191 pr_debug("selecting mode %s (freq : %d MHz)\n", mmc_mode_name(mode),
192 mmc->tran_speed / 1000000);
196 #if !CONFIG_IS_ENABLED(DM_MMC)
197 int mmc_send_cmd(struct mmc *mmc, struct mmc_cmd *cmd, struct mmc_data *data)
201 mmmc_trace_before_send(mmc, cmd);
202 ret = mmc->cfg->ops->send_cmd(mmc, cmd, data);
203 mmmc_trace_after_send(mmc, cmd, ret);
209 int mmc_send_status(struct mmc *mmc, int timeout)
212 int err, retries = 5;
214 cmd.cmdidx = MMC_CMD_SEND_STATUS;
215 cmd.resp_type = MMC_RSP_R1;
216 if (!mmc_host_is_spi(mmc))
217 cmd.cmdarg = mmc->rca << 16;
220 err = mmc_send_cmd(mmc, &cmd, NULL);
222 if ((cmd.response[0] & MMC_STATUS_RDY_FOR_DATA) &&
223 (cmd.response[0] & MMC_STATUS_CURR_STATE) !=
227 if (cmd.response[0] & MMC_STATUS_MASK) {
228 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
229 pr_err("Status Error: 0x%08X\n",
234 } else if (--retries < 0)
243 mmc_trace_state(mmc, &cmd);
245 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
246 pr_err("Timeout waiting card ready\n");
254 int mmc_set_blocklen(struct mmc *mmc, int len)
262 cmd.cmdidx = MMC_CMD_SET_BLOCKLEN;
263 cmd.resp_type = MMC_RSP_R1;
266 err = mmc_send_cmd(mmc, &cmd, NULL);
268 #ifdef CONFIG_MMC_QUIRKS
269 if (err && (mmc->quirks & MMC_QUIRK_RETRY_SET_BLOCKLEN)) {
272 * It has been seen that SET_BLOCKLEN may fail on the first
273 * attempt, let's try a few more time
276 err = mmc_send_cmd(mmc, &cmd, NULL);
286 #ifdef MMC_SUPPORTS_TUNING
287 static const u8 tuning_blk_pattern_4bit[] = {
288 0xff, 0x0f, 0xff, 0x00, 0xff, 0xcc, 0xc3, 0xcc,
289 0xc3, 0x3c, 0xcc, 0xff, 0xfe, 0xff, 0xfe, 0xef,
290 0xff, 0xdf, 0xff, 0xdd, 0xff, 0xfb, 0xff, 0xfb,
291 0xbf, 0xff, 0x7f, 0xff, 0x77, 0xf7, 0xbd, 0xef,
292 0xff, 0xf0, 0xff, 0xf0, 0x0f, 0xfc, 0xcc, 0x3c,
293 0xcc, 0x33, 0xcc, 0xcf, 0xff, 0xef, 0xff, 0xee,
294 0xff, 0xfd, 0xff, 0xfd, 0xdf, 0xff, 0xbf, 0xff,
295 0xbb, 0xff, 0xf7, 0xff, 0xf7, 0x7f, 0x7b, 0xde,
298 static const u8 tuning_blk_pattern_8bit[] = {
299 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00, 0x00,
300 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc, 0xcc,
301 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff, 0xff,
302 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee, 0xff,
303 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd, 0xdd,
304 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff, 0xbb,
305 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff, 0xff,
306 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee, 0xff,
307 0xff, 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00,
308 0x00, 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc,
309 0xcc, 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff,
310 0xff, 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee,
311 0xff, 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd,
312 0xdd, 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff,
313 0xbb, 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff,
314 0xff, 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee,
317 int mmc_send_tuning(struct mmc *mmc, u32 opcode, int *cmd_error)
320 struct mmc_data data;
321 const u8 *tuning_block_pattern;
324 if (mmc->bus_width == 8) {
325 tuning_block_pattern = tuning_blk_pattern_8bit;
326 size = sizeof(tuning_blk_pattern_8bit);
327 } else if (mmc->bus_width == 4) {
328 tuning_block_pattern = tuning_blk_pattern_4bit;
329 size = sizeof(tuning_blk_pattern_4bit);
334 ALLOC_CACHE_ALIGN_BUFFER(u8, data_buf, size);
338 cmd.resp_type = MMC_RSP_R1;
340 data.dest = (void *)data_buf;
342 data.blocksize = size;
343 data.flags = MMC_DATA_READ;
345 err = mmc_send_cmd(mmc, &cmd, &data);
349 if (memcmp(data_buf, tuning_block_pattern, size))
356 static int mmc_read_blocks(struct mmc *mmc, void *dst, lbaint_t start,
360 struct mmc_data data;
363 cmd.cmdidx = MMC_CMD_READ_MULTIPLE_BLOCK;
365 cmd.cmdidx = MMC_CMD_READ_SINGLE_BLOCK;
367 if (mmc->high_capacity)
370 cmd.cmdarg = start * mmc->read_bl_len;
372 cmd.resp_type = MMC_RSP_R1;
375 data.blocks = blkcnt;
376 data.blocksize = mmc->read_bl_len;
377 data.flags = MMC_DATA_READ;
379 if (mmc_send_cmd(mmc, &cmd, &data))
383 cmd.cmdidx = MMC_CMD_STOP_TRANSMISSION;
385 cmd.resp_type = MMC_RSP_R1b;
386 if (mmc_send_cmd(mmc, &cmd, NULL)) {
387 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
388 pr_err("mmc fail to send stop cmd\n");
397 #if CONFIG_IS_ENABLED(BLK)
398 ulong mmc_bread(struct udevice *dev, lbaint_t start, lbaint_t blkcnt, void *dst)
400 ulong mmc_bread(struct blk_desc *block_dev, lbaint_t start, lbaint_t blkcnt,
404 #if CONFIG_IS_ENABLED(BLK)
405 struct blk_desc *block_dev = dev_get_uclass_platdata(dev);
407 int dev_num = block_dev->devnum;
409 lbaint_t cur, blocks_todo = blkcnt;
414 struct mmc *mmc = find_mmc_device(dev_num);
418 if (CONFIG_IS_ENABLED(MMC_TINY))
419 err = mmc_switch_part(mmc, block_dev->hwpart);
421 err = blk_dselect_hwpart(block_dev, block_dev->hwpart);
426 if ((start + blkcnt) > block_dev->lba) {
427 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
428 pr_err("MMC: block number 0x" LBAF " exceeds max(0x" LBAF ")\n",
429 start + blkcnt, block_dev->lba);
434 if (mmc_set_blocklen(mmc, mmc->read_bl_len)) {
435 pr_debug("%s: Failed to set blocklen\n", __func__);
440 cur = (blocks_todo > mmc->cfg->b_max) ?
441 mmc->cfg->b_max : blocks_todo;
442 if (mmc_read_blocks(mmc, dst, start, cur) != cur) {
443 pr_debug("%s: Failed to read blocks\n", __func__);
448 dst += cur * mmc->read_bl_len;
449 } while (blocks_todo > 0);
454 static int mmc_go_idle(struct mmc *mmc)
461 cmd.cmdidx = MMC_CMD_GO_IDLE_STATE;
463 cmd.resp_type = MMC_RSP_NONE;
465 err = mmc_send_cmd(mmc, &cmd, NULL);
475 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
476 static int mmc_switch_voltage(struct mmc *mmc, int signal_voltage)
482 * Send CMD11 only if the request is to switch the card to
485 if (signal_voltage == MMC_SIGNAL_VOLTAGE_330)
486 return mmc_set_signal_voltage(mmc, signal_voltage);
488 cmd.cmdidx = SD_CMD_SWITCH_UHS18V;
490 cmd.resp_type = MMC_RSP_R1;
492 err = mmc_send_cmd(mmc, &cmd, NULL);
496 if (!mmc_host_is_spi(mmc) && (cmd.response[0] & MMC_STATUS_ERROR))
500 * The card should drive cmd and dat[0:3] low immediately
501 * after the response of cmd11, but wait 100 us to be sure
503 err = mmc_wait_dat0(mmc, 0, 100);
510 * During a signal voltage level switch, the clock must be gated
511 * for 5 ms according to the SD spec
513 mmc_set_clock(mmc, mmc->clock, MMC_CLK_DISABLE);
515 err = mmc_set_signal_voltage(mmc, signal_voltage);
519 /* Keep clock gated for at least 10 ms, though spec only says 5 ms */
521 mmc_set_clock(mmc, mmc->clock, MMC_CLK_ENABLE);
524 * Failure to switch is indicated by the card holding
525 * dat[0:3] low. Wait for at least 1 ms according to spec
527 err = mmc_wait_dat0(mmc, 1, 1000);
537 static int sd_send_op_cond(struct mmc *mmc, bool uhs_en)
544 cmd.cmdidx = MMC_CMD_APP_CMD;
545 cmd.resp_type = MMC_RSP_R1;
548 err = mmc_send_cmd(mmc, &cmd, NULL);
553 cmd.cmdidx = SD_CMD_APP_SEND_OP_COND;
554 cmd.resp_type = MMC_RSP_R3;
557 * Most cards do not answer if some reserved bits
558 * in the ocr are set. However, Some controller
559 * can set bit 7 (reserved for low voltages), but
560 * how to manage low voltages SD card is not yet
563 cmd.cmdarg = mmc_host_is_spi(mmc) ? 0 :
564 (mmc->cfg->voltages & 0xff8000);
566 if (mmc->version == SD_VERSION_2)
567 cmd.cmdarg |= OCR_HCS;
570 cmd.cmdarg |= OCR_S18R;
572 err = mmc_send_cmd(mmc, &cmd, NULL);
577 if (cmd.response[0] & OCR_BUSY)
586 if (mmc->version != SD_VERSION_2)
587 mmc->version = SD_VERSION_1_0;
589 if (mmc_host_is_spi(mmc)) { /* read OCR for spi */
590 cmd.cmdidx = MMC_CMD_SPI_READ_OCR;
591 cmd.resp_type = MMC_RSP_R3;
594 err = mmc_send_cmd(mmc, &cmd, NULL);
600 mmc->ocr = cmd.response[0];
602 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
603 if (uhs_en && !(mmc_host_is_spi(mmc)) && (cmd.response[0] & 0x41000000)
605 err = mmc_switch_voltage(mmc, MMC_SIGNAL_VOLTAGE_180);
611 mmc->high_capacity = ((mmc->ocr & OCR_HCS) == OCR_HCS);
617 static int mmc_send_op_cond_iter(struct mmc *mmc, int use_arg)
622 cmd.cmdidx = MMC_CMD_SEND_OP_COND;
623 cmd.resp_type = MMC_RSP_R3;
625 if (use_arg && !mmc_host_is_spi(mmc))
626 cmd.cmdarg = OCR_HCS |
627 (mmc->cfg->voltages &
628 (mmc->ocr & OCR_VOLTAGE_MASK)) |
629 (mmc->ocr & OCR_ACCESS_MODE);
631 err = mmc_send_cmd(mmc, &cmd, NULL);
634 mmc->ocr = cmd.response[0];
638 static int mmc_send_op_cond(struct mmc *mmc)
642 /* Some cards seem to need this */
645 /* Asking to the card its capabilities */
646 for (i = 0; i < 2; i++) {
647 err = mmc_send_op_cond_iter(mmc, i != 0);
651 /* exit if not busy (flag seems to be inverted) */
652 if (mmc->ocr & OCR_BUSY)
655 mmc->op_cond_pending = 1;
659 static int mmc_complete_op_cond(struct mmc *mmc)
666 mmc->op_cond_pending = 0;
667 if (!(mmc->ocr & OCR_BUSY)) {
668 /* Some cards seem to need this */
671 start = get_timer(0);
673 err = mmc_send_op_cond_iter(mmc, 1);
676 if (mmc->ocr & OCR_BUSY)
678 if (get_timer(start) > timeout)
684 if (mmc_host_is_spi(mmc)) { /* read OCR for spi */
685 cmd.cmdidx = MMC_CMD_SPI_READ_OCR;
686 cmd.resp_type = MMC_RSP_R3;
689 err = mmc_send_cmd(mmc, &cmd, NULL);
694 mmc->ocr = cmd.response[0];
697 mmc->version = MMC_VERSION_UNKNOWN;
699 mmc->high_capacity = ((mmc->ocr & OCR_HCS) == OCR_HCS);
706 static int mmc_send_ext_csd(struct mmc *mmc, u8 *ext_csd)
709 struct mmc_data data;
712 /* Get the Card Status Register */
713 cmd.cmdidx = MMC_CMD_SEND_EXT_CSD;
714 cmd.resp_type = MMC_RSP_R1;
717 data.dest = (char *)ext_csd;
719 data.blocksize = MMC_MAX_BLOCK_LEN;
720 data.flags = MMC_DATA_READ;
722 err = mmc_send_cmd(mmc, &cmd, &data);
727 int mmc_switch(struct mmc *mmc, u8 set, u8 index, u8 value)
734 cmd.cmdidx = MMC_CMD_SWITCH;
735 cmd.resp_type = MMC_RSP_R1b;
736 cmd.cmdarg = (MMC_SWITCH_MODE_WRITE_BYTE << 24) |
740 while (retries > 0) {
741 ret = mmc_send_cmd(mmc, &cmd, NULL);
743 /* Waiting for the ready status */
745 ret = mmc_send_status(mmc, timeout);
756 #if !CONFIG_IS_ENABLED(MMC_TINY)
757 static int mmc_set_card_speed(struct mmc *mmc, enum bus_mode mode,
763 ALLOC_CACHE_ALIGN_BUFFER(u8, test_csd, MMC_MAX_BLOCK_LEN);
769 speed_bits = EXT_CSD_TIMING_HS;
771 #if CONFIG_IS_ENABLED(MMC_HS200_SUPPORT)
773 speed_bits = EXT_CSD_TIMING_HS200;
776 #if CONFIG_IS_ENABLED(MMC_HS400_SUPPORT)
778 speed_bits = EXT_CSD_TIMING_HS400;
782 speed_bits = EXT_CSD_TIMING_LEGACY;
787 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_HS_TIMING,
792 #if CONFIG_IS_ENABLED(MMC_HS200_SUPPORT) || \
793 CONFIG_IS_ENABLED(MMC_HS400_SUPPORT)
795 * In case the eMMC is in HS200/HS400 mode and we are downgrading
796 * to HS mode, the card clock are still running much faster than
797 * the supported HS mode clock, so we can not reliably read out
798 * Extended CSD. Reconfigure the controller to run at HS mode.
801 mmc_select_mode(mmc, MMC_HS);
802 mmc_set_clock(mmc, mmc_mode2freq(mmc, MMC_HS), false);
806 if ((mode == MMC_HS) || (mode == MMC_HS_52)) {
807 /* Now check to see that it worked */
808 err = mmc_send_ext_csd(mmc, test_csd);
812 /* No high-speed support */
813 if (!test_csd[EXT_CSD_HS_TIMING])
820 static int mmc_get_capabilities(struct mmc *mmc)
822 u8 *ext_csd = mmc->ext_csd;
825 mmc->card_caps = MMC_MODE_1BIT | MMC_CAP(MMC_LEGACY);
827 if (mmc_host_is_spi(mmc))
830 /* Only version 4 supports high-speed */
831 if (mmc->version < MMC_VERSION_4)
835 pr_err("No ext_csd found!\n"); /* this should enver happen */
839 mmc->card_caps |= MMC_MODE_4BIT | MMC_MODE_8BIT;
841 cardtype = ext_csd[EXT_CSD_CARD_TYPE];
842 mmc->cardtype = cardtype;
844 #if CONFIG_IS_ENABLED(MMC_HS200_SUPPORT)
845 if (cardtype & (EXT_CSD_CARD_TYPE_HS200_1_2V |
846 EXT_CSD_CARD_TYPE_HS200_1_8V)) {
847 mmc->card_caps |= MMC_MODE_HS200;
850 #if CONFIG_IS_ENABLED(MMC_HS400_SUPPORT)
851 if (cardtype & (EXT_CSD_CARD_TYPE_HS400_1_2V |
852 EXT_CSD_CARD_TYPE_HS400_1_8V)) {
853 mmc->card_caps |= MMC_MODE_HS400;
856 if (cardtype & EXT_CSD_CARD_TYPE_52) {
857 if (cardtype & EXT_CSD_CARD_TYPE_DDR_52)
858 mmc->card_caps |= MMC_MODE_DDR_52MHz;
859 mmc->card_caps |= MMC_MODE_HS_52MHz;
861 if (cardtype & EXT_CSD_CARD_TYPE_26)
862 mmc->card_caps |= MMC_MODE_HS;
868 static int mmc_set_capacity(struct mmc *mmc, int part_num)
872 mmc->capacity = mmc->capacity_user;
876 mmc->capacity = mmc->capacity_boot;
879 mmc->capacity = mmc->capacity_rpmb;
885 mmc->capacity = mmc->capacity_gp[part_num - 4];
891 mmc_get_blk_desc(mmc)->lba = lldiv(mmc->capacity, mmc->read_bl_len);
896 #if CONFIG_IS_ENABLED(MMC_HS200_SUPPORT)
897 static int mmc_boot_part_access_chk(struct mmc *mmc, unsigned int part_num)
902 if (part_num & PART_ACCESS_MASK)
903 forbidden = MMC_CAP(MMC_HS_200);
905 if (MMC_CAP(mmc->selected_mode) & forbidden) {
906 pr_debug("selected mode (%s) is forbidden for part %d\n",
907 mmc_mode_name(mmc->selected_mode), part_num);
909 } else if (mmc->selected_mode != mmc->best_mode) {
910 pr_debug("selected mode is not optimal\n");
915 return mmc_select_mode_and_width(mmc,
916 mmc->card_caps & ~forbidden);
921 static inline int mmc_boot_part_access_chk(struct mmc *mmc,
922 unsigned int part_num)
928 int mmc_switch_part(struct mmc *mmc, unsigned int part_num)
932 ret = mmc_boot_part_access_chk(mmc, part_num);
936 ret = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_PART_CONF,
937 (mmc->part_config & ~PART_ACCESS_MASK)
938 | (part_num & PART_ACCESS_MASK));
941 * Set the capacity if the switch succeeded or was intended
942 * to return to representing the raw device.
944 if ((ret == 0) || ((ret == -ENODEV) && (part_num == 0))) {
945 ret = mmc_set_capacity(mmc, part_num);
946 mmc_get_blk_desc(mmc)->hwpart = part_num;
952 #if CONFIG_IS_ENABLED(MMC_HW_PARTITIONING)
953 int mmc_hwpart_config(struct mmc *mmc,
954 const struct mmc_hwpart_conf *conf,
955 enum mmc_hwpart_conf_mode mode)
961 u32 max_enh_size_mult;
962 u32 tot_enh_size_mult = 0;
965 ALLOC_CACHE_ALIGN_BUFFER(u8, ext_csd, MMC_MAX_BLOCK_LEN);
967 if (mode < MMC_HWPART_CONF_CHECK || mode > MMC_HWPART_CONF_COMPLETE)
970 if (IS_SD(mmc) || (mmc->version < MMC_VERSION_4_41)) {
971 pr_err("eMMC >= 4.4 required for enhanced user data area\n");
975 if (!(mmc->part_support & PART_SUPPORT)) {
976 pr_err("Card does not support partitioning\n");
980 if (!mmc->hc_wp_grp_size) {
981 pr_err("Card does not define HC WP group size\n");
985 /* check partition alignment and total enhanced size */
986 if (conf->user.enh_size) {
987 if (conf->user.enh_size % mmc->hc_wp_grp_size ||
988 conf->user.enh_start % mmc->hc_wp_grp_size) {
989 pr_err("User data enhanced area not HC WP group "
993 part_attrs |= EXT_CSD_ENH_USR;
994 enh_size_mult = conf->user.enh_size / mmc->hc_wp_grp_size;
995 if (mmc->high_capacity) {
996 enh_start_addr = conf->user.enh_start;
998 enh_start_addr = (conf->user.enh_start << 9);
1004 tot_enh_size_mult += enh_size_mult;
1006 for (pidx = 0; pidx < 4; pidx++) {
1007 if (conf->gp_part[pidx].size % mmc->hc_wp_grp_size) {
1008 pr_err("GP%i partition not HC WP group size "
1009 "aligned\n", pidx+1);
1012 gp_size_mult[pidx] = conf->gp_part[pidx].size / mmc->hc_wp_grp_size;
1013 if (conf->gp_part[pidx].size && conf->gp_part[pidx].enhanced) {
1014 part_attrs |= EXT_CSD_ENH_GP(pidx);
1015 tot_enh_size_mult += gp_size_mult[pidx];
1019 if (part_attrs && ! (mmc->part_support & ENHNCD_SUPPORT)) {
1020 pr_err("Card does not support enhanced attribute\n");
1021 return -EMEDIUMTYPE;
1024 err = mmc_send_ext_csd(mmc, ext_csd);
1029 (ext_csd[EXT_CSD_MAX_ENH_SIZE_MULT+2] << 16) +
1030 (ext_csd[EXT_CSD_MAX_ENH_SIZE_MULT+1] << 8) +
1031 ext_csd[EXT_CSD_MAX_ENH_SIZE_MULT];
1032 if (tot_enh_size_mult > max_enh_size_mult) {
1033 pr_err("Total enhanced size exceeds maximum (%u > %u)\n",
1034 tot_enh_size_mult, max_enh_size_mult);
1035 return -EMEDIUMTYPE;
1038 /* The default value of EXT_CSD_WR_REL_SET is device
1039 * dependent, the values can only be changed if the
1040 * EXT_CSD_HS_CTRL_REL bit is set. The values can be
1041 * changed only once and before partitioning is completed. */
1042 wr_rel_set = ext_csd[EXT_CSD_WR_REL_SET];
1043 if (conf->user.wr_rel_change) {
1044 if (conf->user.wr_rel_set)
1045 wr_rel_set |= EXT_CSD_WR_DATA_REL_USR;
1047 wr_rel_set &= ~EXT_CSD_WR_DATA_REL_USR;
1049 for (pidx = 0; pidx < 4; pidx++) {
1050 if (conf->gp_part[pidx].wr_rel_change) {
1051 if (conf->gp_part[pidx].wr_rel_set)
1052 wr_rel_set |= EXT_CSD_WR_DATA_REL_GP(pidx);
1054 wr_rel_set &= ~EXT_CSD_WR_DATA_REL_GP(pidx);
1058 if (wr_rel_set != ext_csd[EXT_CSD_WR_REL_SET] &&
1059 !(ext_csd[EXT_CSD_WR_REL_PARAM] & EXT_CSD_HS_CTRL_REL)) {
1060 puts("Card does not support host controlled partition write "
1061 "reliability settings\n");
1062 return -EMEDIUMTYPE;
1065 if (ext_csd[EXT_CSD_PARTITION_SETTING] &
1066 EXT_CSD_PARTITION_SETTING_COMPLETED) {
1067 pr_err("Card already partitioned\n");
1071 if (mode == MMC_HWPART_CONF_CHECK)
1074 /* Partitioning requires high-capacity size definitions */
1075 if (!(ext_csd[EXT_CSD_ERASE_GROUP_DEF] & 0x01)) {
1076 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1077 EXT_CSD_ERASE_GROUP_DEF, 1);
1082 ext_csd[EXT_CSD_ERASE_GROUP_DEF] = 1;
1084 /* update erase group size to be high-capacity */
1085 mmc->erase_grp_size =
1086 ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE] * 1024;
1090 /* all OK, write the configuration */
1091 for (i = 0; i < 4; i++) {
1092 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1093 EXT_CSD_ENH_START_ADDR+i,
1094 (enh_start_addr >> (i*8)) & 0xFF);
1098 for (i = 0; i < 3; i++) {
1099 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1100 EXT_CSD_ENH_SIZE_MULT+i,
1101 (enh_size_mult >> (i*8)) & 0xFF);
1105 for (pidx = 0; pidx < 4; pidx++) {
1106 for (i = 0; i < 3; i++) {
1107 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1108 EXT_CSD_GP_SIZE_MULT+pidx*3+i,
1109 (gp_size_mult[pidx] >> (i*8)) & 0xFF);
1114 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1115 EXT_CSD_PARTITIONS_ATTRIBUTE, part_attrs);
1119 if (mode == MMC_HWPART_CONF_SET)
1122 /* The WR_REL_SET is a write-once register but shall be
1123 * written before setting PART_SETTING_COMPLETED. As it is
1124 * write-once we can only write it when completing the
1126 if (wr_rel_set != ext_csd[EXT_CSD_WR_REL_SET]) {
1127 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1128 EXT_CSD_WR_REL_SET, wr_rel_set);
1133 /* Setting PART_SETTING_COMPLETED confirms the partition
1134 * configuration but it only becomes effective after power
1135 * cycle, so we do not adjust the partition related settings
1136 * in the mmc struct. */
1138 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1139 EXT_CSD_PARTITION_SETTING,
1140 EXT_CSD_PARTITION_SETTING_COMPLETED);
1148 #if !CONFIG_IS_ENABLED(DM_MMC)
1149 int mmc_getcd(struct mmc *mmc)
1153 cd = board_mmc_getcd(mmc);
1156 if (mmc->cfg->ops->getcd)
1157 cd = mmc->cfg->ops->getcd(mmc);
1166 #if !CONFIG_IS_ENABLED(MMC_TINY)
1167 static int sd_switch(struct mmc *mmc, int mode, int group, u8 value, u8 *resp)
1170 struct mmc_data data;
1172 /* Switch the frequency */
1173 cmd.cmdidx = SD_CMD_SWITCH_FUNC;
1174 cmd.resp_type = MMC_RSP_R1;
1175 cmd.cmdarg = (mode << 31) | 0xffffff;
1176 cmd.cmdarg &= ~(0xf << (group * 4));
1177 cmd.cmdarg |= value << (group * 4);
1179 data.dest = (char *)resp;
1180 data.blocksize = 64;
1182 data.flags = MMC_DATA_READ;
1184 return mmc_send_cmd(mmc, &cmd, &data);
1187 static int sd_get_capabilities(struct mmc *mmc)
1191 ALLOC_CACHE_ALIGN_BUFFER(__be32, scr, 2);
1192 ALLOC_CACHE_ALIGN_BUFFER(__be32, switch_status, 16);
1193 struct mmc_data data;
1195 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
1199 mmc->card_caps = MMC_MODE_1BIT | MMC_CAP(SD_LEGACY);
1201 if (mmc_host_is_spi(mmc))
1204 /* Read the SCR to find out if this card supports higher speeds */
1205 cmd.cmdidx = MMC_CMD_APP_CMD;
1206 cmd.resp_type = MMC_RSP_R1;
1207 cmd.cmdarg = mmc->rca << 16;
1209 err = mmc_send_cmd(mmc, &cmd, NULL);
1214 cmd.cmdidx = SD_CMD_APP_SEND_SCR;
1215 cmd.resp_type = MMC_RSP_R1;
1221 data.dest = (char *)scr;
1224 data.flags = MMC_DATA_READ;
1226 err = mmc_send_cmd(mmc, &cmd, &data);
1235 mmc->scr[0] = __be32_to_cpu(scr[0]);
1236 mmc->scr[1] = __be32_to_cpu(scr[1]);
1238 switch ((mmc->scr[0] >> 24) & 0xf) {
1240 mmc->version = SD_VERSION_1_0;
1243 mmc->version = SD_VERSION_1_10;
1246 mmc->version = SD_VERSION_2;
1247 if ((mmc->scr[0] >> 15) & 0x1)
1248 mmc->version = SD_VERSION_3;
1251 mmc->version = SD_VERSION_1_0;
1255 if (mmc->scr[0] & SD_DATA_4BIT)
1256 mmc->card_caps |= MMC_MODE_4BIT;
1258 /* Version 1.0 doesn't support switching */
1259 if (mmc->version == SD_VERSION_1_0)
1264 err = sd_switch(mmc, SD_SWITCH_CHECK, 0, 1,
1265 (u8 *)switch_status);
1270 /* The high-speed function is busy. Try again */
1271 if (!(__be32_to_cpu(switch_status[7]) & SD_HIGHSPEED_BUSY))
1275 /* If high-speed isn't supported, we return */
1276 if (__be32_to_cpu(switch_status[3]) & SD_HIGHSPEED_SUPPORTED)
1277 mmc->card_caps |= MMC_CAP(SD_HS);
1279 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
1280 /* Version before 3.0 don't support UHS modes */
1281 if (mmc->version < SD_VERSION_3)
1284 sd3_bus_mode = __be32_to_cpu(switch_status[3]) >> 16 & 0x1f;
1285 if (sd3_bus_mode & SD_MODE_UHS_SDR104)
1286 mmc->card_caps |= MMC_CAP(UHS_SDR104);
1287 if (sd3_bus_mode & SD_MODE_UHS_SDR50)
1288 mmc->card_caps |= MMC_CAP(UHS_SDR50);
1289 if (sd3_bus_mode & SD_MODE_UHS_SDR25)
1290 mmc->card_caps |= MMC_CAP(UHS_SDR25);
1291 if (sd3_bus_mode & SD_MODE_UHS_SDR12)
1292 mmc->card_caps |= MMC_CAP(UHS_SDR12);
1293 if (sd3_bus_mode & SD_MODE_UHS_DDR50)
1294 mmc->card_caps |= MMC_CAP(UHS_DDR50);
1300 static int sd_set_card_speed(struct mmc *mmc, enum bus_mode mode)
1304 ALLOC_CACHE_ALIGN_BUFFER(uint, switch_status, 16);
1307 /* SD version 1.00 and 1.01 does not support CMD 6 */
1308 if (mmc->version == SD_VERSION_1_0)
1313 speed = UHS_SDR12_BUS_SPEED;
1316 speed = HIGH_SPEED_BUS_SPEED;
1318 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
1320 speed = UHS_SDR12_BUS_SPEED;
1323 speed = UHS_SDR25_BUS_SPEED;
1326 speed = UHS_SDR50_BUS_SPEED;
1329 speed = UHS_DDR50_BUS_SPEED;
1332 speed = UHS_SDR104_BUS_SPEED;
1339 err = sd_switch(mmc, SD_SWITCH_SWITCH, 0, speed, (u8 *)switch_status);
1343 if (((__be32_to_cpu(switch_status[4]) >> 24) & 0xF) != speed)
1349 static int sd_select_bus_width(struct mmc *mmc, int w)
1354 if ((w != 4) && (w != 1))
1357 cmd.cmdidx = MMC_CMD_APP_CMD;
1358 cmd.resp_type = MMC_RSP_R1;
1359 cmd.cmdarg = mmc->rca << 16;
1361 err = mmc_send_cmd(mmc, &cmd, NULL);
1365 cmd.cmdidx = SD_CMD_APP_SET_BUS_WIDTH;
1366 cmd.resp_type = MMC_RSP_R1;
1371 err = mmc_send_cmd(mmc, &cmd, NULL);
1379 #if CONFIG_IS_ENABLED(MMC_WRITE)
1380 static int sd_read_ssr(struct mmc *mmc)
1382 static const unsigned int sd_au_size[] = {
1383 0, SZ_16K / 512, SZ_32K / 512,
1384 SZ_64K / 512, SZ_128K / 512, SZ_256K / 512,
1385 SZ_512K / 512, SZ_1M / 512, SZ_2M / 512,
1386 SZ_4M / 512, SZ_8M / 512, (SZ_8M + SZ_4M) / 512,
1387 SZ_16M / 512, (SZ_16M + SZ_8M) / 512, SZ_32M / 512,
1392 ALLOC_CACHE_ALIGN_BUFFER(uint, ssr, 16);
1393 struct mmc_data data;
1395 unsigned int au, eo, et, es;
1397 cmd.cmdidx = MMC_CMD_APP_CMD;
1398 cmd.resp_type = MMC_RSP_R1;
1399 cmd.cmdarg = mmc->rca << 16;
1401 err = mmc_send_cmd(mmc, &cmd, NULL);
1405 cmd.cmdidx = SD_CMD_APP_SD_STATUS;
1406 cmd.resp_type = MMC_RSP_R1;
1410 data.dest = (char *)ssr;
1411 data.blocksize = 64;
1413 data.flags = MMC_DATA_READ;
1415 err = mmc_send_cmd(mmc, &cmd, &data);
1423 for (i = 0; i < 16; i++)
1424 ssr[i] = be32_to_cpu(ssr[i]);
1426 au = (ssr[2] >> 12) & 0xF;
1427 if ((au <= 9) || (mmc->version == SD_VERSION_3)) {
1428 mmc->ssr.au = sd_au_size[au];
1429 es = (ssr[3] >> 24) & 0xFF;
1430 es |= (ssr[2] & 0xFF) << 8;
1431 et = (ssr[3] >> 18) & 0x3F;
1433 eo = (ssr[3] >> 16) & 0x3;
1434 mmc->ssr.erase_timeout = (et * 1000) / es;
1435 mmc->ssr.erase_offset = eo * 1000;
1438 pr_debug("Invalid Allocation Unit Size.\n");
1444 /* frequency bases */
1445 /* divided by 10 to be nice to platforms without floating point */
1446 static const int fbase[] = {
1453 /* Multiplier values for TRAN_SPEED. Multiplied by 10 to be nice
1454 * to platforms without floating point.
1456 static const u8 multipliers[] = {
1475 static inline int bus_width(uint cap)
1477 if (cap == MMC_MODE_8BIT)
1479 if (cap == MMC_MODE_4BIT)
1481 if (cap == MMC_MODE_1BIT)
1483 pr_warn("invalid bus witdh capability 0x%x\n", cap);
1487 #if !CONFIG_IS_ENABLED(DM_MMC)
1488 #ifdef MMC_SUPPORTS_TUNING
1489 static int mmc_execute_tuning(struct mmc *mmc, uint opcode)
1495 static void mmc_send_init_stream(struct mmc *mmc)
1499 static int mmc_set_ios(struct mmc *mmc)
1503 if (mmc->cfg->ops->set_ios)
1504 ret = mmc->cfg->ops->set_ios(mmc);
1510 int mmc_set_clock(struct mmc *mmc, uint clock, bool disable)
1513 if (clock > mmc->cfg->f_max)
1514 clock = mmc->cfg->f_max;
1516 if (clock < mmc->cfg->f_min)
1517 clock = mmc->cfg->f_min;
1521 mmc->clk_disable = disable;
1523 debug("clock is %s (%dHz)\n", disable ? "disabled" : "enabled", clock);
1525 return mmc_set_ios(mmc);
1528 static int mmc_set_bus_width(struct mmc *mmc, uint width)
1530 mmc->bus_width = width;
1532 return mmc_set_ios(mmc);
1535 #if CONFIG_IS_ENABLED(MMC_VERBOSE) || defined(DEBUG)
1537 * helper function to display the capabilities in a human
1538 * friendly manner. The capabilities include bus width and
1541 void mmc_dump_capabilities(const char *text, uint caps)
1545 pr_debug("%s: widths [", text);
1546 if (caps & MMC_MODE_8BIT)
1548 if (caps & MMC_MODE_4BIT)
1550 if (caps & MMC_MODE_1BIT)
1552 pr_debug("\b\b] modes [");
1553 for (mode = MMC_LEGACY; mode < MMC_MODES_END; mode++)
1554 if (MMC_CAP(mode) & caps)
1555 pr_debug("%s, ", mmc_mode_name(mode));
1556 pr_debug("\b\b]\n");
1560 struct mode_width_tuning {
1563 #ifdef MMC_SUPPORTS_TUNING
1568 #if CONFIG_IS_ENABLED(MMC_IO_VOLTAGE)
1569 int mmc_voltage_to_mv(enum mmc_voltage voltage)
1572 case MMC_SIGNAL_VOLTAGE_000: return 0;
1573 case MMC_SIGNAL_VOLTAGE_330: return 3300;
1574 case MMC_SIGNAL_VOLTAGE_180: return 1800;
1575 case MMC_SIGNAL_VOLTAGE_120: return 1200;
1580 static int mmc_set_signal_voltage(struct mmc *mmc, uint signal_voltage)
1584 if (mmc->signal_voltage == signal_voltage)
1587 mmc->signal_voltage = signal_voltage;
1588 err = mmc_set_ios(mmc);
1590 pr_debug("unable to set voltage (err %d)\n", err);
1595 static inline int mmc_set_signal_voltage(struct mmc *mmc, uint signal_voltage)
1601 #if !CONFIG_IS_ENABLED(MMC_TINY)
1602 static const struct mode_width_tuning sd_modes_by_pref[] = {
1603 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
1604 #ifdef MMC_SUPPORTS_TUNING
1607 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1608 .tuning = MMC_CMD_SEND_TUNING_BLOCK
1613 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1617 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1621 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1626 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1628 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
1631 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1636 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1640 #define for_each_sd_mode_by_pref(caps, mwt) \
1641 for (mwt = sd_modes_by_pref;\
1642 mwt < sd_modes_by_pref + ARRAY_SIZE(sd_modes_by_pref);\
1644 if (caps & MMC_CAP(mwt->mode))
1646 static int sd_select_mode_and_width(struct mmc *mmc, uint card_caps)
1649 uint widths[] = {MMC_MODE_4BIT, MMC_MODE_1BIT};
1650 const struct mode_width_tuning *mwt;
1651 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
1652 bool uhs_en = (mmc->ocr & OCR_S18R) ? true : false;
1654 bool uhs_en = false;
1659 mmc_dump_capabilities("sd card", card_caps);
1660 mmc_dump_capabilities("host", mmc->host_caps);
1663 /* Restrict card's capabilities by what the host can do */
1664 caps = card_caps & mmc->host_caps;
1669 for_each_sd_mode_by_pref(caps, mwt) {
1672 for (w = widths; w < widths + ARRAY_SIZE(widths); w++) {
1673 if (*w & caps & mwt->widths) {
1674 pr_debug("trying mode %s width %d (at %d MHz)\n",
1675 mmc_mode_name(mwt->mode),
1677 mmc_mode2freq(mmc, mwt->mode) / 1000000);
1679 /* configure the bus width (card + host) */
1680 err = sd_select_bus_width(mmc, bus_width(*w));
1683 mmc_set_bus_width(mmc, bus_width(*w));
1685 /* configure the bus mode (card) */
1686 err = sd_set_card_speed(mmc, mwt->mode);
1690 /* configure the bus mode (host) */
1691 mmc_select_mode(mmc, mwt->mode);
1692 mmc_set_clock(mmc, mmc->tran_speed,
1695 #ifdef MMC_SUPPORTS_TUNING
1696 /* execute tuning if needed */
1697 if (mwt->tuning && !mmc_host_is_spi(mmc)) {
1698 err = mmc_execute_tuning(mmc,
1701 pr_debug("tuning failed\n");
1707 #if CONFIG_IS_ENABLED(MMC_WRITE)
1708 err = sd_read_ssr(mmc);
1710 pr_warn("unable to read ssr\n");
1716 /* revert to a safer bus speed */
1717 mmc_select_mode(mmc, SD_LEGACY);
1718 mmc_set_clock(mmc, mmc->tran_speed,
1724 pr_err("unable to select a mode\n");
1729 * read the compare the part of ext csd that is constant.
1730 * This can be used to check that the transfer is working
1733 static int mmc_read_and_compare_ext_csd(struct mmc *mmc)
1736 const u8 *ext_csd = mmc->ext_csd;
1737 ALLOC_CACHE_ALIGN_BUFFER(u8, test_csd, MMC_MAX_BLOCK_LEN);
1739 if (mmc->version < MMC_VERSION_4)
1742 err = mmc_send_ext_csd(mmc, test_csd);
1746 /* Only compare read only fields */
1747 if (ext_csd[EXT_CSD_PARTITIONING_SUPPORT]
1748 == test_csd[EXT_CSD_PARTITIONING_SUPPORT] &&
1749 ext_csd[EXT_CSD_HC_WP_GRP_SIZE]
1750 == test_csd[EXT_CSD_HC_WP_GRP_SIZE] &&
1751 ext_csd[EXT_CSD_REV]
1752 == test_csd[EXT_CSD_REV] &&
1753 ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE]
1754 == test_csd[EXT_CSD_HC_ERASE_GRP_SIZE] &&
1755 memcmp(&ext_csd[EXT_CSD_SEC_CNT],
1756 &test_csd[EXT_CSD_SEC_CNT], 4) == 0)
1762 #if CONFIG_IS_ENABLED(MMC_IO_VOLTAGE)
1763 static int mmc_set_lowest_voltage(struct mmc *mmc, enum bus_mode mode,
1764 uint32_t allowed_mask)
1771 if (mmc->cardtype & (EXT_CSD_CARD_TYPE_HS200_1_8V |
1772 EXT_CSD_CARD_TYPE_HS400_1_8V))
1773 card_mask |= MMC_SIGNAL_VOLTAGE_180;
1774 if (mmc->cardtype & (EXT_CSD_CARD_TYPE_HS200_1_2V |
1775 EXT_CSD_CARD_TYPE_HS400_1_2V))
1776 card_mask |= MMC_SIGNAL_VOLTAGE_120;
1779 if (mmc->cardtype & EXT_CSD_CARD_TYPE_DDR_1_8V)
1780 card_mask |= MMC_SIGNAL_VOLTAGE_330 |
1781 MMC_SIGNAL_VOLTAGE_180;
1782 if (mmc->cardtype & EXT_CSD_CARD_TYPE_DDR_1_2V)
1783 card_mask |= MMC_SIGNAL_VOLTAGE_120;
1786 card_mask |= MMC_SIGNAL_VOLTAGE_330;
1790 while (card_mask & allowed_mask) {
1791 enum mmc_voltage best_match;
1793 best_match = 1 << (ffs(card_mask & allowed_mask) - 1);
1794 if (!mmc_set_signal_voltage(mmc, best_match))
1797 allowed_mask &= ~best_match;
1803 static inline int mmc_set_lowest_voltage(struct mmc *mmc, enum bus_mode mode,
1804 uint32_t allowed_mask)
1810 static const struct mode_width_tuning mmc_modes_by_pref[] = {
1811 #if CONFIG_IS_ENABLED(MMC_HS400_SUPPORT)
1814 .widths = MMC_MODE_8BIT,
1815 .tuning = MMC_CMD_SEND_TUNING_BLOCK_HS200
1818 #if CONFIG_IS_ENABLED(MMC_HS200_SUPPORT)
1821 .widths = MMC_MODE_8BIT | MMC_MODE_4BIT,
1822 .tuning = MMC_CMD_SEND_TUNING_BLOCK_HS200
1827 .widths = MMC_MODE_8BIT | MMC_MODE_4BIT,
1831 .widths = MMC_MODE_8BIT | MMC_MODE_4BIT | MMC_MODE_1BIT,
1835 .widths = MMC_MODE_8BIT | MMC_MODE_4BIT | MMC_MODE_1BIT,
1839 .widths = MMC_MODE_8BIT | MMC_MODE_4BIT | MMC_MODE_1BIT,
1843 #define for_each_mmc_mode_by_pref(caps, mwt) \
1844 for (mwt = mmc_modes_by_pref;\
1845 mwt < mmc_modes_by_pref + ARRAY_SIZE(mmc_modes_by_pref);\
1847 if (caps & MMC_CAP(mwt->mode))
1849 static const struct ext_csd_bus_width {
1853 } ext_csd_bus_width[] = {
1854 {MMC_MODE_8BIT, true, EXT_CSD_DDR_BUS_WIDTH_8},
1855 {MMC_MODE_4BIT, true, EXT_CSD_DDR_BUS_WIDTH_4},
1856 {MMC_MODE_8BIT, false, EXT_CSD_BUS_WIDTH_8},
1857 {MMC_MODE_4BIT, false, EXT_CSD_BUS_WIDTH_4},
1858 {MMC_MODE_1BIT, false, EXT_CSD_BUS_WIDTH_1},
1861 #if CONFIG_IS_ENABLED(MMC_HS400_SUPPORT)
1862 static int mmc_select_hs400(struct mmc *mmc)
1866 /* Set timing to HS200 for tuning */
1867 err = mmc_set_card_speed(mmc, MMC_HS_200, false);
1871 /* configure the bus mode (host) */
1872 mmc_select_mode(mmc, MMC_HS_200);
1873 mmc_set_clock(mmc, mmc->tran_speed, false);
1875 /* execute tuning if needed */
1876 err = mmc_execute_tuning(mmc, MMC_CMD_SEND_TUNING_BLOCK_HS200);
1878 debug("tuning failed\n");
1882 /* Set back to HS */
1883 mmc_set_card_speed(mmc, MMC_HS, false);
1884 mmc_set_clock(mmc, mmc_mode2freq(mmc, MMC_HS), false);
1886 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BUS_WIDTH,
1887 EXT_CSD_BUS_WIDTH_8 | EXT_CSD_DDR_FLAG);
1891 err = mmc_set_card_speed(mmc, MMC_HS_400, false);
1895 mmc_select_mode(mmc, MMC_HS_400);
1896 err = mmc_set_clock(mmc, mmc->tran_speed, false);
1903 static int mmc_select_hs400(struct mmc *mmc)
1909 #define for_each_supported_width(caps, ddr, ecbv) \
1910 for (ecbv = ext_csd_bus_width;\
1911 ecbv < ext_csd_bus_width + ARRAY_SIZE(ext_csd_bus_width);\
1913 if ((ddr == ecbv->is_ddr) && (caps & ecbv->cap))
1915 static int mmc_select_mode_and_width(struct mmc *mmc, uint card_caps)
1918 const struct mode_width_tuning *mwt;
1919 const struct ext_csd_bus_width *ecbw;
1922 mmc_dump_capabilities("mmc", card_caps);
1923 mmc_dump_capabilities("host", mmc->host_caps);
1926 /* Restrict card's capabilities by what the host can do */
1927 card_caps &= mmc->host_caps;
1929 /* Only version 4 of MMC supports wider bus widths */
1930 if (mmc->version < MMC_VERSION_4)
1933 if (!mmc->ext_csd) {
1934 pr_debug("No ext_csd found!\n"); /* this should enver happen */
1938 #if CONFIG_IS_ENABLED(MMC_HS200_SUPPORT) || \
1939 CONFIG_IS_ENABLED(MMC_HS400_SUPPORT)
1941 * In case the eMMC is in HS200/HS400 mode, downgrade to HS mode
1942 * before doing anything else, since a transition from either of
1943 * the HS200/HS400 mode directly to legacy mode is not supported.
1945 if (mmc->selected_mode == MMC_HS_200 ||
1946 mmc->selected_mode == MMC_HS_400)
1947 mmc_set_card_speed(mmc, MMC_HS, true);
1950 mmc_set_clock(mmc, mmc->legacy_speed, MMC_CLK_ENABLE);
1952 for_each_mmc_mode_by_pref(card_caps, mwt) {
1953 for_each_supported_width(card_caps & mwt->widths,
1954 mmc_is_mode_ddr(mwt->mode), ecbw) {
1955 enum mmc_voltage old_voltage;
1956 pr_debug("trying mode %s width %d (at %d MHz)\n",
1957 mmc_mode_name(mwt->mode),
1958 bus_width(ecbw->cap),
1959 mmc_mode2freq(mmc, mwt->mode) / 1000000);
1960 old_voltage = mmc->signal_voltage;
1961 err = mmc_set_lowest_voltage(mmc, mwt->mode,
1962 MMC_ALL_SIGNAL_VOLTAGE);
1966 /* configure the bus width (card + host) */
1967 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1969 ecbw->ext_csd_bits & ~EXT_CSD_DDR_FLAG);
1972 mmc_set_bus_width(mmc, bus_width(ecbw->cap));
1974 if (mwt->mode == MMC_HS_400) {
1975 err = mmc_select_hs400(mmc);
1977 printf("Select HS400 failed %d\n", err);
1981 /* configure the bus speed (card) */
1982 err = mmc_set_card_speed(mmc, mwt->mode, false);
1987 * configure the bus width AND the ddr mode
1988 * (card). The host side will be taken care
1989 * of in the next step
1991 if (ecbw->ext_csd_bits & EXT_CSD_DDR_FLAG) {
1992 err = mmc_switch(mmc,
1993 EXT_CSD_CMD_SET_NORMAL,
1995 ecbw->ext_csd_bits);
2000 /* configure the bus mode (host) */
2001 mmc_select_mode(mmc, mwt->mode);
2002 mmc_set_clock(mmc, mmc->tran_speed,
2004 #ifdef MMC_SUPPORTS_TUNING
2006 /* execute tuning if needed */
2008 err = mmc_execute_tuning(mmc,
2011 pr_debug("tuning failed\n");
2018 /* do a transfer to check the configuration */
2019 err = mmc_read_and_compare_ext_csd(mmc);
2023 mmc_set_signal_voltage(mmc, old_voltage);
2024 /* if an error occured, revert to a safer bus mode */
2025 mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
2026 EXT_CSD_BUS_WIDTH, EXT_CSD_BUS_WIDTH_1);
2027 mmc_select_mode(mmc, MMC_LEGACY);
2028 mmc_set_bus_width(mmc, 1);
2032 pr_err("unable to select a mode\n");
2038 #if CONFIG_IS_ENABLED(MMC_TINY)
2039 DEFINE_CACHE_ALIGN_BUFFER(u8, ext_csd_bkup, MMC_MAX_BLOCK_LEN);
2042 static int mmc_startup_v4(struct mmc *mmc)
2046 bool has_parts = false;
2047 bool part_completed;
2048 static const u32 mmc_versions[] = {
2060 #if CONFIG_IS_ENABLED(MMC_TINY)
2061 u8 *ext_csd = ext_csd_bkup;
2063 if (IS_SD(mmc) || mmc->version < MMC_VERSION_4)
2067 memset(ext_csd_bkup, 0, sizeof(ext_csd_bkup));
2069 err = mmc_send_ext_csd(mmc, ext_csd);
2073 /* store the ext csd for future reference */
2075 mmc->ext_csd = ext_csd;
2077 ALLOC_CACHE_ALIGN_BUFFER(u8, ext_csd, MMC_MAX_BLOCK_LEN);
2079 if (IS_SD(mmc) || (mmc->version < MMC_VERSION_4))
2082 /* check ext_csd version and capacity */
2083 err = mmc_send_ext_csd(mmc, ext_csd);
2087 /* store the ext csd for future reference */
2089 mmc->ext_csd = malloc(MMC_MAX_BLOCK_LEN);
2092 memcpy(mmc->ext_csd, ext_csd, MMC_MAX_BLOCK_LEN);
2094 if (ext_csd[EXT_CSD_REV] >= ARRAY_SIZE(mmc_versions))
2097 mmc->version = mmc_versions[ext_csd[EXT_CSD_REV]];
2099 if (mmc->version >= MMC_VERSION_4_2) {
2101 * According to the JEDEC Standard, the value of
2102 * ext_csd's capacity is valid if the value is more
2105 capacity = ext_csd[EXT_CSD_SEC_CNT] << 0
2106 | ext_csd[EXT_CSD_SEC_CNT + 1] << 8
2107 | ext_csd[EXT_CSD_SEC_CNT + 2] << 16
2108 | ext_csd[EXT_CSD_SEC_CNT + 3] << 24;
2109 capacity *= MMC_MAX_BLOCK_LEN;
2110 if ((capacity >> 20) > 2 * 1024)
2111 mmc->capacity_user = capacity;
2114 /* The partition data may be non-zero but it is only
2115 * effective if PARTITION_SETTING_COMPLETED is set in
2116 * EXT_CSD, so ignore any data if this bit is not set,
2117 * except for enabling the high-capacity group size
2118 * definition (see below).
2120 part_completed = !!(ext_csd[EXT_CSD_PARTITION_SETTING] &
2121 EXT_CSD_PARTITION_SETTING_COMPLETED);
2123 /* store the partition info of emmc */
2124 mmc->part_support = ext_csd[EXT_CSD_PARTITIONING_SUPPORT];
2125 if ((ext_csd[EXT_CSD_PARTITIONING_SUPPORT] & PART_SUPPORT) ||
2126 ext_csd[EXT_CSD_BOOT_MULT])
2127 mmc->part_config = ext_csd[EXT_CSD_PART_CONF];
2128 if (part_completed &&
2129 (ext_csd[EXT_CSD_PARTITIONING_SUPPORT] & ENHNCD_SUPPORT))
2130 mmc->part_attr = ext_csd[EXT_CSD_PARTITIONS_ATTRIBUTE];
2132 mmc->capacity_boot = ext_csd[EXT_CSD_BOOT_MULT] << 17;
2134 mmc->capacity_rpmb = ext_csd[EXT_CSD_RPMB_MULT] << 17;
2136 for (i = 0; i < 4; i++) {
2137 int idx = EXT_CSD_GP_SIZE_MULT + i * 3;
2138 uint mult = (ext_csd[idx + 2] << 16) +
2139 (ext_csd[idx + 1] << 8) + ext_csd[idx];
2142 if (!part_completed)
2144 mmc->capacity_gp[i] = mult;
2145 mmc->capacity_gp[i] *=
2146 ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE];
2147 mmc->capacity_gp[i] *= ext_csd[EXT_CSD_HC_WP_GRP_SIZE];
2148 mmc->capacity_gp[i] <<= 19;
2151 #ifndef CONFIG_SPL_BUILD
2152 if (part_completed) {
2153 mmc->enh_user_size =
2154 (ext_csd[EXT_CSD_ENH_SIZE_MULT + 2] << 16) +
2155 (ext_csd[EXT_CSD_ENH_SIZE_MULT + 1] << 8) +
2156 ext_csd[EXT_CSD_ENH_SIZE_MULT];
2157 mmc->enh_user_size *= ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE];
2158 mmc->enh_user_size *= ext_csd[EXT_CSD_HC_WP_GRP_SIZE];
2159 mmc->enh_user_size <<= 19;
2160 mmc->enh_user_start =
2161 (ext_csd[EXT_CSD_ENH_START_ADDR + 3] << 24) +
2162 (ext_csd[EXT_CSD_ENH_START_ADDR + 2] << 16) +
2163 (ext_csd[EXT_CSD_ENH_START_ADDR + 1] << 8) +
2164 ext_csd[EXT_CSD_ENH_START_ADDR];
2165 if (mmc->high_capacity)
2166 mmc->enh_user_start <<= 9;
2171 * Host needs to enable ERASE_GRP_DEF bit if device is
2172 * partitioned. This bit will be lost every time after a reset
2173 * or power off. This will affect erase size.
2177 if ((ext_csd[EXT_CSD_PARTITIONING_SUPPORT] & PART_SUPPORT) &&
2178 (ext_csd[EXT_CSD_PARTITIONS_ATTRIBUTE] & PART_ENH_ATTRIB))
2181 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
2182 EXT_CSD_ERASE_GROUP_DEF, 1);
2187 ext_csd[EXT_CSD_ERASE_GROUP_DEF] = 1;
2190 if (ext_csd[EXT_CSD_ERASE_GROUP_DEF] & 0x01) {
2191 #if CONFIG_IS_ENABLED(MMC_WRITE)
2192 /* Read out group size from ext_csd */
2193 mmc->erase_grp_size =
2194 ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE] * 1024;
2197 * if high capacity and partition setting completed
2198 * SEC_COUNT is valid even if it is smaller than 2 GiB
2199 * JEDEC Standard JESD84-B45, 6.2.4
2201 if (mmc->high_capacity && part_completed) {
2202 capacity = (ext_csd[EXT_CSD_SEC_CNT]) |
2203 (ext_csd[EXT_CSD_SEC_CNT + 1] << 8) |
2204 (ext_csd[EXT_CSD_SEC_CNT + 2] << 16) |
2205 (ext_csd[EXT_CSD_SEC_CNT + 3] << 24);
2206 capacity *= MMC_MAX_BLOCK_LEN;
2207 mmc->capacity_user = capacity;
2210 #if CONFIG_IS_ENABLED(MMC_WRITE)
2212 /* Calculate the group size from the csd value. */
2213 int erase_gsz, erase_gmul;
2215 erase_gsz = (mmc->csd[2] & 0x00007c00) >> 10;
2216 erase_gmul = (mmc->csd[2] & 0x000003e0) >> 5;
2217 mmc->erase_grp_size = (erase_gsz + 1)
2221 #if CONFIG_IS_ENABLED(MMC_HW_PARTITIONING)
2222 mmc->hc_wp_grp_size = 1024
2223 * ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE]
2224 * ext_csd[EXT_CSD_HC_WP_GRP_SIZE];
2227 mmc->wr_rel_set = ext_csd[EXT_CSD_WR_REL_SET];
2232 #if !CONFIG_IS_ENABLED(MMC_TINY)
2235 mmc->ext_csd = NULL;
2240 static int mmc_startup(struct mmc *mmc)
2246 struct blk_desc *bdesc;
2248 #ifdef CONFIG_MMC_SPI_CRC_ON
2249 if (mmc_host_is_spi(mmc)) { /* enable CRC check for spi */
2250 cmd.cmdidx = MMC_CMD_SPI_CRC_ON_OFF;
2251 cmd.resp_type = MMC_RSP_R1;
2253 err = mmc_send_cmd(mmc, &cmd, NULL);
2259 /* Put the Card in Identify Mode */
2260 cmd.cmdidx = mmc_host_is_spi(mmc) ? MMC_CMD_SEND_CID :
2261 MMC_CMD_ALL_SEND_CID; /* cmd not supported in spi */
2262 cmd.resp_type = MMC_RSP_R2;
2265 err = mmc_send_cmd(mmc, &cmd, NULL);
2267 #ifdef CONFIG_MMC_QUIRKS
2268 if (err && (mmc->quirks & MMC_QUIRK_RETRY_SEND_CID)) {
2271 * It has been seen that SEND_CID may fail on the first
2272 * attempt, let's try a few more time
2275 err = mmc_send_cmd(mmc, &cmd, NULL);
2278 } while (retries--);
2285 memcpy(mmc->cid, cmd.response, 16);
2288 * For MMC cards, set the Relative Address.
2289 * For SD cards, get the Relatvie Address.
2290 * This also puts the cards into Standby State
2292 if (!mmc_host_is_spi(mmc)) { /* cmd not supported in spi */
2293 cmd.cmdidx = SD_CMD_SEND_RELATIVE_ADDR;
2294 cmd.cmdarg = mmc->rca << 16;
2295 cmd.resp_type = MMC_RSP_R6;
2297 err = mmc_send_cmd(mmc, &cmd, NULL);
2303 mmc->rca = (cmd.response[0] >> 16) & 0xffff;
2306 /* Get the Card-Specific Data */
2307 cmd.cmdidx = MMC_CMD_SEND_CSD;
2308 cmd.resp_type = MMC_RSP_R2;
2309 cmd.cmdarg = mmc->rca << 16;
2311 err = mmc_send_cmd(mmc, &cmd, NULL);
2316 mmc->csd[0] = cmd.response[0];
2317 mmc->csd[1] = cmd.response[1];
2318 mmc->csd[2] = cmd.response[2];
2319 mmc->csd[3] = cmd.response[3];
2321 if (mmc->version == MMC_VERSION_UNKNOWN) {
2322 int version = (cmd.response[0] >> 26) & 0xf;
2326 mmc->version = MMC_VERSION_1_2;
2329 mmc->version = MMC_VERSION_1_4;
2332 mmc->version = MMC_VERSION_2_2;
2335 mmc->version = MMC_VERSION_3;
2338 mmc->version = MMC_VERSION_4;
2341 mmc->version = MMC_VERSION_1_2;
2346 /* divide frequency by 10, since the mults are 10x bigger */
2347 freq = fbase[(cmd.response[0] & 0x7)];
2348 mult = multipliers[((cmd.response[0] >> 3) & 0xf)];
2350 mmc->legacy_speed = freq * mult;
2351 mmc_select_mode(mmc, MMC_LEGACY);
2353 mmc->dsr_imp = ((cmd.response[1] >> 12) & 0x1);
2354 mmc->read_bl_len = 1 << ((cmd.response[1] >> 16) & 0xf);
2355 #if CONFIG_IS_ENABLED(MMC_WRITE)
2358 mmc->write_bl_len = mmc->read_bl_len;
2360 mmc->write_bl_len = 1 << ((cmd.response[3] >> 22) & 0xf);
2363 if (mmc->high_capacity) {
2364 csize = (mmc->csd[1] & 0x3f) << 16
2365 | (mmc->csd[2] & 0xffff0000) >> 16;
2368 csize = (mmc->csd[1] & 0x3ff) << 2
2369 | (mmc->csd[2] & 0xc0000000) >> 30;
2370 cmult = (mmc->csd[2] & 0x00038000) >> 15;
2373 mmc->capacity_user = (csize + 1) << (cmult + 2);
2374 mmc->capacity_user *= mmc->read_bl_len;
2375 mmc->capacity_boot = 0;
2376 mmc->capacity_rpmb = 0;
2377 for (i = 0; i < 4; i++)
2378 mmc->capacity_gp[i] = 0;
2380 if (mmc->read_bl_len > MMC_MAX_BLOCK_LEN)
2381 mmc->read_bl_len = MMC_MAX_BLOCK_LEN;
2383 #if CONFIG_IS_ENABLED(MMC_WRITE)
2384 if (mmc->write_bl_len > MMC_MAX_BLOCK_LEN)
2385 mmc->write_bl_len = MMC_MAX_BLOCK_LEN;
2388 if ((mmc->dsr_imp) && (0xffffffff != mmc->dsr)) {
2389 cmd.cmdidx = MMC_CMD_SET_DSR;
2390 cmd.cmdarg = (mmc->dsr & 0xffff) << 16;
2391 cmd.resp_type = MMC_RSP_NONE;
2392 if (mmc_send_cmd(mmc, &cmd, NULL))
2393 pr_warn("MMC: SET_DSR failed\n");
2396 /* Select the card, and put it into Transfer Mode */
2397 if (!mmc_host_is_spi(mmc)) { /* cmd not supported in spi */
2398 cmd.cmdidx = MMC_CMD_SELECT_CARD;
2399 cmd.resp_type = MMC_RSP_R1;
2400 cmd.cmdarg = mmc->rca << 16;
2401 err = mmc_send_cmd(mmc, &cmd, NULL);
2408 * For SD, its erase group is always one sector
2410 #if CONFIG_IS_ENABLED(MMC_WRITE)
2411 mmc->erase_grp_size = 1;
2413 mmc->part_config = MMCPART_NOAVAILABLE;
2415 err = mmc_startup_v4(mmc);
2419 err = mmc_set_capacity(mmc, mmc_get_blk_desc(mmc)->hwpart);
2423 #if CONFIG_IS_ENABLED(MMC_TINY)
2424 mmc_set_clock(mmc, mmc->legacy_speed, false);
2425 mmc_select_mode(mmc, IS_SD(mmc) ? SD_LEGACY : MMC_LEGACY);
2426 mmc_set_bus_width(mmc, 1);
2429 err = sd_get_capabilities(mmc);
2432 err = sd_select_mode_and_width(mmc, mmc->card_caps);
2434 err = mmc_get_capabilities(mmc);
2437 mmc_select_mode_and_width(mmc, mmc->card_caps);
2443 mmc->best_mode = mmc->selected_mode;
2445 /* Fix the block length for DDR mode */
2446 if (mmc->ddr_mode) {
2447 mmc->read_bl_len = MMC_MAX_BLOCK_LEN;
2448 #if CONFIG_IS_ENABLED(MMC_WRITE)
2449 mmc->write_bl_len = MMC_MAX_BLOCK_LEN;
2453 /* fill in device description */
2454 bdesc = mmc_get_blk_desc(mmc);
2458 bdesc->blksz = mmc->read_bl_len;
2459 bdesc->log2blksz = LOG2(bdesc->blksz);
2460 bdesc->lba = lldiv(mmc->capacity, mmc->read_bl_len);
2461 #if !defined(CONFIG_SPL_BUILD) || \
2462 (defined(CONFIG_SPL_LIBCOMMON_SUPPORT) && \
2463 !defined(CONFIG_USE_TINY_PRINTF))
2464 sprintf(bdesc->vendor, "Man %06x Snr %04x%04x",
2465 mmc->cid[0] >> 24, (mmc->cid[2] & 0xffff),
2466 (mmc->cid[3] >> 16) & 0xffff);
2467 sprintf(bdesc->product, "%c%c%c%c%c%c", mmc->cid[0] & 0xff,
2468 (mmc->cid[1] >> 24), (mmc->cid[1] >> 16) & 0xff,
2469 (mmc->cid[1] >> 8) & 0xff, mmc->cid[1] & 0xff,
2470 (mmc->cid[2] >> 24) & 0xff);
2471 sprintf(bdesc->revision, "%d.%d", (mmc->cid[2] >> 20) & 0xf,
2472 (mmc->cid[2] >> 16) & 0xf);
2474 bdesc->vendor[0] = 0;
2475 bdesc->product[0] = 0;
2476 bdesc->revision[0] = 0;
2479 #if !defined(CONFIG_DM_MMC) && (!defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBDISK_SUPPORT))
2486 static int mmc_send_if_cond(struct mmc *mmc)
2491 cmd.cmdidx = SD_CMD_SEND_IF_COND;
2492 /* We set the bit if the host supports voltages between 2.7 and 3.6 V */
2493 cmd.cmdarg = ((mmc->cfg->voltages & 0xff8000) != 0) << 8 | 0xaa;
2494 cmd.resp_type = MMC_RSP_R7;
2496 err = mmc_send_cmd(mmc, &cmd, NULL);
2501 if ((cmd.response[0] & 0xff) != 0xaa)
2504 mmc->version = SD_VERSION_2;
2509 #if !CONFIG_IS_ENABLED(DM_MMC)
2510 /* board-specific MMC power initializations. */
2511 __weak void board_mmc_power_init(void)
2516 static int mmc_power_init(struct mmc *mmc)
2518 #if CONFIG_IS_ENABLED(DM_MMC)
2519 #if CONFIG_IS_ENABLED(DM_REGULATOR)
2522 ret = device_get_supply_regulator(mmc->dev, "vmmc-supply",
2525 pr_debug("%s: No vmmc supply\n", mmc->dev->name);
2527 ret = device_get_supply_regulator(mmc->dev, "vqmmc-supply",
2528 &mmc->vqmmc_supply);
2530 pr_debug("%s: No vqmmc supply\n", mmc->dev->name);
2532 #else /* !CONFIG_DM_MMC */
2534 * Driver model should use a regulator, as above, rather than calling
2535 * out to board code.
2537 board_mmc_power_init();
2543 * put the host in the initial state:
2544 * - turn on Vdd (card power supply)
2545 * - configure the bus width and clock to minimal values
2547 static void mmc_set_initial_state(struct mmc *mmc)
2551 /* First try to set 3.3V. If it fails set to 1.8V */
2552 err = mmc_set_signal_voltage(mmc, MMC_SIGNAL_VOLTAGE_330);
2554 err = mmc_set_signal_voltage(mmc, MMC_SIGNAL_VOLTAGE_180);
2556 pr_warn("mmc: failed to set signal voltage\n");
2558 mmc_select_mode(mmc, MMC_LEGACY);
2559 mmc_set_bus_width(mmc, 1);
2560 mmc_set_clock(mmc, 0, MMC_CLK_ENABLE);
2563 static int mmc_power_on(struct mmc *mmc)
2565 #if CONFIG_IS_ENABLED(DM_MMC) && CONFIG_IS_ENABLED(DM_REGULATOR)
2566 if (mmc->vmmc_supply) {
2567 int ret = regulator_set_enable(mmc->vmmc_supply, true);
2570 puts("Error enabling VMMC supply\n");
2578 static int mmc_power_off(struct mmc *mmc)
2580 mmc_set_clock(mmc, 0, MMC_CLK_DISABLE);
2581 #if CONFIG_IS_ENABLED(DM_MMC) && CONFIG_IS_ENABLED(DM_REGULATOR)
2582 if (mmc->vmmc_supply) {
2583 int ret = regulator_set_enable(mmc->vmmc_supply, false);
2586 pr_debug("Error disabling VMMC supply\n");
2594 static int mmc_power_cycle(struct mmc *mmc)
2598 ret = mmc_power_off(mmc);
2602 * SD spec recommends at least 1ms of delay. Let's wait for 2ms
2603 * to be on the safer side.
2606 return mmc_power_on(mmc);
2609 int mmc_get_op_cond(struct mmc *mmc)
2611 bool uhs_en = supports_uhs(mmc->cfg->host_caps);
2617 #ifdef CONFIG_FSL_ESDHC_ADAPTER_IDENT
2618 mmc_adapter_card_type_ident();
2620 err = mmc_power_init(mmc);
2624 #ifdef CONFIG_MMC_QUIRKS
2625 mmc->quirks = MMC_QUIRK_RETRY_SET_BLOCKLEN |
2626 MMC_QUIRK_RETRY_SEND_CID;
2629 err = mmc_power_cycle(mmc);
2632 * if power cycling is not supported, we should not try
2633 * to use the UHS modes, because we wouldn't be able to
2634 * recover from an error during the UHS initialization.
2636 pr_debug("Unable to do a full power cycle. Disabling the UHS modes for safety\n");
2638 mmc->host_caps &= ~UHS_CAPS;
2639 err = mmc_power_on(mmc);
2644 #if CONFIG_IS_ENABLED(DM_MMC)
2645 /* The device has already been probed ready for use */
2647 /* made sure it's not NULL earlier */
2648 err = mmc->cfg->ops->init(mmc);
2655 mmc_set_initial_state(mmc);
2656 mmc_send_init_stream(mmc);
2658 /* Reset the Card */
2659 err = mmc_go_idle(mmc);
2664 /* The internal partition reset to user partition(0) at every CMD0*/
2665 mmc_get_blk_desc(mmc)->hwpart = 0;
2667 /* Test for SD version 2 */
2668 err = mmc_send_if_cond(mmc);
2670 /* Now try to get the SD card's operating condition */
2671 err = sd_send_op_cond(mmc, uhs_en);
2672 if (err && uhs_en) {
2674 mmc_power_cycle(mmc);
2678 /* If the command timed out, we check for an MMC card */
2679 if (err == -ETIMEDOUT) {
2680 err = mmc_send_op_cond(mmc);
2683 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
2684 pr_err("Card did not respond to voltage select!\n");
2693 int mmc_start_init(struct mmc *mmc)
2699 * all hosts are capable of 1 bit bus-width and able to use the legacy
2702 mmc->host_caps = mmc->cfg->host_caps | MMC_CAP(SD_LEGACY) |
2703 MMC_CAP(MMC_LEGACY) | MMC_MODE_1BIT;
2705 #if !defined(CONFIG_MMC_BROKEN_CD)
2706 /* we pretend there's no card when init is NULL */
2707 no_card = mmc_getcd(mmc) == 0;
2711 #if !CONFIG_IS_ENABLED(DM_MMC)
2712 no_card = no_card || (mmc->cfg->ops->init == NULL);
2716 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
2717 pr_err("MMC: no card present\n");
2722 err = mmc_get_op_cond(mmc);
2725 mmc->init_in_progress = 1;
2730 static int mmc_complete_init(struct mmc *mmc)
2734 mmc->init_in_progress = 0;
2735 if (mmc->op_cond_pending)
2736 err = mmc_complete_op_cond(mmc);
2739 err = mmc_startup(mmc);
2747 int mmc_init(struct mmc *mmc)
2750 __maybe_unused ulong start;
2751 #if CONFIG_IS_ENABLED(DM_MMC)
2752 struct mmc_uclass_priv *upriv = dev_get_uclass_priv(mmc->dev);
2759 start = get_timer(0);
2761 if (!mmc->init_in_progress)
2762 err = mmc_start_init(mmc);
2765 err = mmc_complete_init(mmc);
2767 pr_info("%s: %d, time %lu\n", __func__, err, get_timer(start));
2772 int mmc_set_dsr(struct mmc *mmc, u16 val)
2778 /* CPU-specific MMC initializations */
2779 __weak int cpu_mmc_init(bd_t *bis)
2784 /* board-specific MMC initializations. */
2785 __weak int board_mmc_init(bd_t *bis)
2790 void mmc_set_preinit(struct mmc *mmc, int preinit)
2792 mmc->preinit = preinit;
2795 #if CONFIG_IS_ENABLED(DM_MMC)
2796 static int mmc_probe(bd_t *bis)
2800 struct udevice *dev;
2802 ret = uclass_get(UCLASS_MMC, &uc);
2807 * Try to add them in sequence order. Really with driver model we
2808 * should allow holes, but the current MMC list does not allow that.
2809 * So if we request 0, 1, 3 we will get 0, 1, 2.
2811 for (i = 0; ; i++) {
2812 ret = uclass_get_device_by_seq(UCLASS_MMC, i, &dev);
2816 uclass_foreach_dev(dev, uc) {
2817 ret = device_probe(dev);
2819 pr_err("%s - probe failed: %d\n", dev->name, ret);
2825 static int mmc_probe(bd_t *bis)
2827 if (board_mmc_init(bis) < 0)
2834 int mmc_initialize(bd_t *bis)
2836 static int initialized = 0;
2838 if (initialized) /* Avoid initializing mmc multiple times */
2842 #if !CONFIG_IS_ENABLED(BLK)
2843 #if !CONFIG_IS_ENABLED(MMC_TINY)
2847 ret = mmc_probe(bis);
2851 #ifndef CONFIG_SPL_BUILD
2852 print_mmc_devices(',');
2859 #ifdef CONFIG_CMD_BKOPS_ENABLE
2860 int mmc_set_bkops_enable(struct mmc *mmc)
2863 ALLOC_CACHE_ALIGN_BUFFER(u8, ext_csd, MMC_MAX_BLOCK_LEN);
2865 err = mmc_send_ext_csd(mmc, ext_csd);
2867 puts("Could not get ext_csd register values\n");
2871 if (!(ext_csd[EXT_CSD_BKOPS_SUPPORT] & 0x1)) {
2872 puts("Background operations not supported on device\n");
2873 return -EMEDIUMTYPE;
2876 if (ext_csd[EXT_CSD_BKOPS_EN] & 0x1) {
2877 puts("Background operations already enabled\n");
2881 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BKOPS_EN, 1);
2883 puts("Failed to enable manual background operations\n");
2887 puts("Enabled manual background operations\n");