1 // SPDX-License-Identifier: GPL-2.0+
3 * Copyright 2011, Marvell Semiconductor Inc.
4 * Lei Wen <leiwen@marvell.com>
6 * Back ported to the 8xx platform (from the 8260 platform) by
7 * Murray.Jensen@cmst.csiro.au, 27-Jan-01.
19 #include <asm/cache.h>
20 #include <linux/bitops.h>
21 #include <linux/delay.h>
22 #include <linux/dma-mapping.h>
25 static void sdhci_reset(struct sdhci_host *host, u8 mask)
27 unsigned long timeout;
31 sdhci_writeb(host, mask, SDHCI_SOFTWARE_RESET);
32 while (sdhci_readb(host, SDHCI_SOFTWARE_RESET) & mask) {
34 printf("%s: Reset 0x%x never completed.\n",
43 static void sdhci_cmd_done(struct sdhci_host *host, struct mmc_cmd *cmd)
46 if (cmd->resp_type & MMC_RSP_136) {
47 /* CRC is stripped so we need to do some shifting. */
48 for (i = 0; i < 4; i++) {
49 cmd->response[i] = sdhci_readl(host,
50 SDHCI_RESPONSE + (3-i)*4) << 8;
52 cmd->response[i] |= sdhci_readb(host,
53 SDHCI_RESPONSE + (3-i)*4-1);
56 cmd->response[0] = sdhci_readl(host, SDHCI_RESPONSE);
60 static void sdhci_transfer_pio(struct sdhci_host *host, struct mmc_data *data)
64 for (i = 0; i < data->blocksize; i += 4) {
65 offs = data->dest + i;
66 if (data->flags == MMC_DATA_READ)
67 *(u32 *)offs = sdhci_readl(host, SDHCI_BUFFER);
69 sdhci_writel(host, *(u32 *)offs, SDHCI_BUFFER);
73 #if CONFIG_IS_ENABLED(MMC_SDHCI_ADMA)
74 static void sdhci_adma_desc(struct sdhci_host *host, dma_addr_t dma_addr,
77 struct sdhci_adma_desc *desc;
80 desc = &host->adma_desc_table[host->desc_slot];
82 attr = ADMA_DESC_ATTR_VALID | ADMA_DESC_TRANSFER_DATA;
86 attr |= ADMA_DESC_ATTR_END;
91 desc->addr_lo = lower_32_bits(dma_addr);
92 #ifdef CONFIG_DMA_ADDR_T_64BIT
93 desc->addr_hi = upper_32_bits(dma_addr);
97 static void sdhci_prepare_adma_table(struct sdhci_host *host,
98 struct mmc_data *data)
100 uint trans_bytes = data->blocksize * data->blocks;
101 uint desc_count = DIV_ROUND_UP(trans_bytes, ADMA_MAX_LEN);
103 dma_addr_t dma_addr = host->start_addr;
108 sdhci_adma_desc(host, dma_addr, ADMA_MAX_LEN, false);
109 dma_addr += ADMA_MAX_LEN;
110 trans_bytes -= ADMA_MAX_LEN;
113 sdhci_adma_desc(host, dma_addr, trans_bytes, true);
115 flush_cache((dma_addr_t)host->adma_desc_table,
116 ROUND(desc_count * sizeof(struct sdhci_adma_desc),
119 #elif defined(CONFIG_MMC_SDHCI_SDMA)
120 static void sdhci_prepare_adma_table(struct sdhci_host *host,
121 struct mmc_data *data)
124 #if (defined(CONFIG_MMC_SDHCI_SDMA) || CONFIG_IS_ENABLED(MMC_SDHCI_ADMA))
125 static void sdhci_prepare_dma(struct sdhci_host *host, struct mmc_data *data,
126 int *is_aligned, int trans_bytes)
131 if (data->flags == MMC_DATA_READ)
134 buf = (void *)data->src;
136 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
137 ctrl &= ~SDHCI_CTRL_DMA_MASK;
138 if (host->flags & USE_ADMA64)
139 ctrl |= SDHCI_CTRL_ADMA64;
140 else if (host->flags & USE_ADMA)
141 ctrl |= SDHCI_CTRL_ADMA32;
142 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
144 if (host->flags & USE_SDMA &&
145 (host->force_align_buffer ||
146 (host->quirks & SDHCI_QUIRK_32BIT_DMA_ADDR &&
147 ((unsigned long)buf & 0x7) != 0x0))) {
149 if (data->flags != MMC_DATA_READ)
150 memcpy(host->align_buffer, buf, trans_bytes);
151 buf = host->align_buffer;
154 host->start_addr = dma_map_single(buf, trans_bytes,
155 mmc_get_dma_dir(data));
157 if (host->flags & USE_SDMA) {
158 sdhci_writel(host, phys_to_bus((ulong)host->start_addr),
160 } else if (host->flags & (USE_ADMA | USE_ADMA64)) {
161 sdhci_prepare_adma_table(host, data);
163 sdhci_writel(host, lower_32_bits(host->adma_addr),
165 if (host->flags & USE_ADMA64)
166 sdhci_writel(host, upper_32_bits(host->adma_addr),
167 SDHCI_ADMA_ADDRESS_HI);
171 static void sdhci_prepare_dma(struct sdhci_host *host, struct mmc_data *data,
172 int *is_aligned, int trans_bytes)
175 static int sdhci_transfer_data(struct sdhci_host *host, struct mmc_data *data)
177 dma_addr_t start_addr = host->start_addr;
178 unsigned int stat, rdy, mask, timeout, block = 0;
179 bool transfer_done = false;
182 rdy = SDHCI_INT_SPACE_AVAIL | SDHCI_INT_DATA_AVAIL;
183 mask = SDHCI_DATA_AVAILABLE | SDHCI_SPACE_AVAILABLE;
185 stat = sdhci_readl(host, SDHCI_INT_STATUS);
186 if (stat & SDHCI_INT_ERROR) {
187 pr_debug("%s: Error detected in status(0x%X)!\n",
191 if (!transfer_done && (stat & rdy)) {
192 if (!(sdhci_readl(host, SDHCI_PRESENT_STATE) & mask))
194 sdhci_writel(host, rdy, SDHCI_INT_STATUS);
195 sdhci_transfer_pio(host, data);
196 data->dest += data->blocksize;
197 if (++block >= data->blocks) {
198 /* Keep looping until the SDHCI_INT_DATA_END is
199 * cleared, even if we finished sending all the
202 transfer_done = true;
206 if ((host->flags & USE_DMA) && !transfer_done &&
207 (stat & SDHCI_INT_DMA_END)) {
208 sdhci_writel(host, SDHCI_INT_DMA_END, SDHCI_INT_STATUS);
209 if (host->flags & USE_SDMA) {
211 ~(SDHCI_DEFAULT_BOUNDARY_SIZE - 1);
212 start_addr += SDHCI_DEFAULT_BOUNDARY_SIZE;
213 sdhci_writel(host, phys_to_bus((ulong)start_addr),
220 printf("%s: Transfer data timeout\n", __func__);
223 } while (!(stat & SDHCI_INT_DATA_END));
225 dma_unmap_single(host->start_addr, data->blocks * data->blocksize,
226 mmc_get_dma_dir(data));
232 * No command will be sent by driver if card is busy, so driver must wait
233 * for card ready state.
234 * Every time when card is busy after timeout then (last) timeout value will be
235 * increased twice but only if it doesn't exceed global defined maximum.
236 * Each function call will use last timeout value.
238 #define SDHCI_CMD_MAX_TIMEOUT 3200
239 #define SDHCI_CMD_DEFAULT_TIMEOUT 100
240 #define SDHCI_READ_STATUS_TIMEOUT 1000
243 static int sdhci_send_command(struct udevice *dev, struct mmc_cmd *cmd,
244 struct mmc_data *data)
246 struct mmc *mmc = mmc_get_mmc_dev(dev);
249 static int sdhci_send_command(struct mmc *mmc, struct mmc_cmd *cmd,
250 struct mmc_data *data)
253 struct sdhci_host *host = mmc->priv;
254 unsigned int stat = 0;
256 int trans_bytes = 0, is_aligned = 1;
257 u32 mask, flags, mode;
258 unsigned int time = 0;
259 int mmc_dev = mmc_get_blk_desc(mmc)->devnum;
260 ulong start = get_timer(0);
262 host->start_addr = 0;
263 /* Timeout unit - ms */
264 static unsigned int cmd_timeout = SDHCI_CMD_DEFAULT_TIMEOUT;
266 mask = SDHCI_CMD_INHIBIT | SDHCI_DATA_INHIBIT;
268 /* We shouldn't wait for data inihibit for stop commands, even
269 though they might use busy signaling */
270 if (cmd->cmdidx == MMC_CMD_STOP_TRANSMISSION ||
271 ((cmd->cmdidx == MMC_CMD_SEND_TUNING_BLOCK ||
272 cmd->cmdidx == MMC_CMD_SEND_TUNING_BLOCK_HS200) && !data))
273 mask &= ~SDHCI_DATA_INHIBIT;
275 while (sdhci_readl(host, SDHCI_PRESENT_STATE) & mask) {
276 if (time >= cmd_timeout) {
277 printf("%s: MMC: %d busy ", __func__, mmc_dev);
278 if (2 * cmd_timeout <= SDHCI_CMD_MAX_TIMEOUT) {
279 cmd_timeout += cmd_timeout;
280 printf("timeout increasing to: %u ms.\n",
291 sdhci_writel(host, SDHCI_INT_ALL_MASK, SDHCI_INT_STATUS);
293 mask = SDHCI_INT_RESPONSE;
294 if ((cmd->cmdidx == MMC_CMD_SEND_TUNING_BLOCK ||
295 cmd->cmdidx == MMC_CMD_SEND_TUNING_BLOCK_HS200) && !data)
296 mask = SDHCI_INT_DATA_AVAIL;
298 if (!(cmd->resp_type & MMC_RSP_PRESENT))
299 flags = SDHCI_CMD_RESP_NONE;
300 else if (cmd->resp_type & MMC_RSP_136)
301 flags = SDHCI_CMD_RESP_LONG;
302 else if (cmd->resp_type & MMC_RSP_BUSY) {
303 flags = SDHCI_CMD_RESP_SHORT_BUSY;
305 mask |= SDHCI_INT_DATA_END;
307 flags = SDHCI_CMD_RESP_SHORT;
309 if (cmd->resp_type & MMC_RSP_CRC)
310 flags |= SDHCI_CMD_CRC;
311 if (cmd->resp_type & MMC_RSP_OPCODE)
312 flags |= SDHCI_CMD_INDEX;
313 if (data || cmd->cmdidx == MMC_CMD_SEND_TUNING_BLOCK ||
314 cmd->cmdidx == MMC_CMD_SEND_TUNING_BLOCK_HS200)
315 flags |= SDHCI_CMD_DATA;
317 /* Set Transfer mode regarding to data flag */
319 sdhci_writeb(host, 0xe, SDHCI_TIMEOUT_CONTROL);
320 mode = SDHCI_TRNS_BLK_CNT_EN;
321 trans_bytes = data->blocks * data->blocksize;
322 if (data->blocks > 1)
323 mode |= SDHCI_TRNS_MULTI;
325 if (data->flags == MMC_DATA_READ)
326 mode |= SDHCI_TRNS_READ;
328 if (host->flags & USE_DMA) {
329 mode |= SDHCI_TRNS_DMA;
330 sdhci_prepare_dma(host, data, &is_aligned, trans_bytes);
333 sdhci_writew(host, SDHCI_MAKE_BLKSZ(SDHCI_DEFAULT_BOUNDARY_ARG,
336 sdhci_writew(host, data->blocks, SDHCI_BLOCK_COUNT);
337 sdhci_writew(host, mode, SDHCI_TRANSFER_MODE);
338 } else if (cmd->resp_type & MMC_RSP_BUSY) {
339 sdhci_writeb(host, 0xe, SDHCI_TIMEOUT_CONTROL);
342 sdhci_writel(host, cmd->cmdarg, SDHCI_ARGUMENT);
343 sdhci_writew(host, SDHCI_MAKE_CMD(cmd->cmdidx, flags), SDHCI_COMMAND);
344 start = get_timer(0);
346 stat = sdhci_readl(host, SDHCI_INT_STATUS);
347 if (stat & SDHCI_INT_ERROR)
350 if (get_timer(start) >= SDHCI_READ_STATUS_TIMEOUT) {
351 if (host->quirks & SDHCI_QUIRK_BROKEN_R1B) {
354 printf("%s: Timeout for status update!\n",
359 } while ((stat & mask) != mask);
361 if ((stat & (SDHCI_INT_ERROR | mask)) == mask) {
362 sdhci_cmd_done(host, cmd);
363 sdhci_writel(host, mask, SDHCI_INT_STATUS);
368 ret = sdhci_transfer_data(host, data);
370 if (host->quirks & SDHCI_QUIRK_WAIT_SEND_CMD)
373 stat = sdhci_readl(host, SDHCI_INT_STATUS);
374 sdhci_writel(host, SDHCI_INT_ALL_MASK, SDHCI_INT_STATUS);
376 if ((host->quirks & SDHCI_QUIRK_32BIT_DMA_ADDR) &&
377 !is_aligned && (data->flags == MMC_DATA_READ))
378 memcpy(data->dest, host->align_buffer, trans_bytes);
382 sdhci_reset(host, SDHCI_RESET_CMD);
383 sdhci_reset(host, SDHCI_RESET_DATA);
384 if (stat & SDHCI_INT_TIMEOUT)
390 #if defined(CONFIG_DM_MMC) && defined(MMC_SUPPORTS_TUNING)
391 static int sdhci_execute_tuning(struct udevice *dev, uint opcode)
394 struct mmc *mmc = mmc_get_mmc_dev(dev);
395 struct sdhci_host *host = mmc->priv;
397 debug("%s\n", __func__);
399 if (host->ops && host->ops->platform_execute_tuning) {
400 err = host->ops->platform_execute_tuning(mmc, opcode);
408 int sdhci_set_clock(struct mmc *mmc, unsigned int clock)
410 struct sdhci_host *host = mmc->priv;
411 unsigned int div, clk = 0, timeout;
415 while (sdhci_readl(host, SDHCI_PRESENT_STATE) &
416 (SDHCI_CMD_INHIBIT | SDHCI_DATA_INHIBIT)) {
418 printf("%s: Timeout to wait cmd & data inhibit\n",
427 sdhci_writew(host, 0, SDHCI_CLOCK_CONTROL);
432 if (host->ops && host->ops->set_delay)
433 host->ops->set_delay(host);
435 if (SDHCI_GET_VERSION(host) >= SDHCI_SPEC_300) {
437 * Check if the Host Controller supports Programmable Clock
441 for (div = 1; div <= 1024; div++) {
442 if ((host->max_clk / div) <= clock)
447 * Set Programmable Clock Mode in the Clock
450 clk = SDHCI_PROG_CLOCK_MODE;
453 /* Version 3.00 divisors must be a multiple of 2. */
454 if (host->max_clk <= clock) {
458 div < SDHCI_MAX_DIV_SPEC_300;
460 if ((host->max_clk / div) <= clock)
467 /* Version 2.00 divisors must be a power of 2. */
468 for (div = 1; div < SDHCI_MAX_DIV_SPEC_200; div *= 2) {
469 if ((host->max_clk / div) <= clock)
475 if (host->ops && host->ops->set_clock)
476 host->ops->set_clock(host, div);
478 clk |= (div & SDHCI_DIV_MASK) << SDHCI_DIVIDER_SHIFT;
479 clk |= ((div & SDHCI_DIV_HI_MASK) >> SDHCI_DIV_MASK_LEN)
480 << SDHCI_DIVIDER_HI_SHIFT;
481 clk |= SDHCI_CLOCK_INT_EN;
482 sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
486 while (!((clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL))
487 & SDHCI_CLOCK_INT_STABLE)) {
489 printf("%s: Internal clock never stabilised.\n",
497 clk |= SDHCI_CLOCK_CARD_EN;
498 sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
502 static void sdhci_set_power(struct sdhci_host *host, unsigned short power)
506 if (power != (unsigned short)-1) {
507 switch (1 << power) {
508 case MMC_VDD_165_195:
509 pwr = SDHCI_POWER_180;
513 pwr = SDHCI_POWER_300;
517 pwr = SDHCI_POWER_330;
523 sdhci_writeb(host, 0, SDHCI_POWER_CONTROL);
527 pwr |= SDHCI_POWER_ON;
529 sdhci_writeb(host, pwr, SDHCI_POWER_CONTROL);
532 void sdhci_set_uhs_timing(struct sdhci_host *host)
534 struct mmc *mmc = host->mmc;
537 reg = sdhci_readw(host, SDHCI_HOST_CONTROL2);
538 reg &= ~SDHCI_CTRL_UHS_MASK;
540 switch (mmc->selected_mode) {
543 reg |= SDHCI_CTRL_UHS_SDR50;
547 reg |= SDHCI_CTRL_UHS_DDR50;
551 reg |= SDHCI_CTRL_UHS_SDR104;
554 reg |= SDHCI_CTRL_UHS_SDR12;
557 sdhci_writew(host, reg, SDHCI_HOST_CONTROL2);
561 static int sdhci_set_ios(struct udevice *dev)
563 struct mmc *mmc = mmc_get_mmc_dev(dev);
565 static int sdhci_set_ios(struct mmc *mmc)
569 struct sdhci_host *host = mmc->priv;
571 if (host->ops && host->ops->set_control_reg)
572 host->ops->set_control_reg(host);
574 if (mmc->clock != host->clock)
575 sdhci_set_clock(mmc, mmc->clock);
577 if (mmc->clk_disable)
578 sdhci_set_clock(mmc, 0);
581 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
582 if (mmc->bus_width == 8) {
583 ctrl &= ~SDHCI_CTRL_4BITBUS;
584 if ((SDHCI_GET_VERSION(host) >= SDHCI_SPEC_300) ||
585 (host->quirks & SDHCI_QUIRK_USE_WIDE8))
586 ctrl |= SDHCI_CTRL_8BITBUS;
588 if ((SDHCI_GET_VERSION(host) >= SDHCI_SPEC_300) ||
589 (host->quirks & SDHCI_QUIRK_USE_WIDE8))
590 ctrl &= ~SDHCI_CTRL_8BITBUS;
591 if (mmc->bus_width == 4)
592 ctrl |= SDHCI_CTRL_4BITBUS;
594 ctrl &= ~SDHCI_CTRL_4BITBUS;
597 if (mmc->clock > 26000000)
598 ctrl |= SDHCI_CTRL_HISPD;
600 ctrl &= ~SDHCI_CTRL_HISPD;
602 if ((host->quirks & SDHCI_QUIRK_NO_HISPD_BIT) ||
603 (host->quirks & SDHCI_QUIRK_BROKEN_HISPD_MODE))
604 ctrl &= ~SDHCI_CTRL_HISPD;
606 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
608 /* If available, call the driver specific "post" set_ios() function */
609 if (host->ops && host->ops->set_ios_post)
610 return host->ops->set_ios_post(host);
615 static int sdhci_init(struct mmc *mmc)
617 struct sdhci_host *host = mmc->priv;
618 #if CONFIG_IS_ENABLED(DM_MMC) && CONFIG_IS_ENABLED(DM_GPIO)
619 struct udevice *dev = mmc->dev;
621 gpio_request_by_name(dev, "cd-gpios", 0,
622 &host->cd_gpio, GPIOD_IS_IN);
625 sdhci_reset(host, SDHCI_RESET_ALL);
627 #if defined(CONFIG_FIXED_SDHCI_ALIGNED_BUFFER)
628 host->align_buffer = (void *)CONFIG_FIXED_SDHCI_ALIGNED_BUFFER;
630 * Always use this bounce-buffer when CONFIG_FIXED_SDHCI_ALIGNED_BUFFER
633 host->force_align_buffer = true;
635 if (host->quirks & SDHCI_QUIRK_32BIT_DMA_ADDR) {
636 host->align_buffer = memalign(8, 512 * 1024);
637 if (!host->align_buffer) {
638 printf("%s: Aligned buffer alloc failed!!!\n",
645 sdhci_set_power(host, fls(mmc->cfg->voltages) - 1);
647 if (host->ops && host->ops->get_cd)
648 host->ops->get_cd(host);
650 /* Enable only interrupts served by the SD controller */
651 sdhci_writel(host, SDHCI_INT_DATA_MASK | SDHCI_INT_CMD_MASK,
653 /* Mask all sdhci interrupt sources */
654 sdhci_writel(host, 0x0, SDHCI_SIGNAL_ENABLE);
660 int sdhci_probe(struct udevice *dev)
662 struct mmc *mmc = mmc_get_mmc_dev(dev);
664 return sdhci_init(mmc);
667 static int sdhci_deferred_probe(struct udevice *dev)
670 struct mmc *mmc = mmc_get_mmc_dev(dev);
671 struct sdhci_host *host = mmc->priv;
673 if (host->ops && host->ops->deferred_probe) {
674 err = host->ops->deferred_probe(host);
681 static int sdhci_get_cd(struct udevice *dev)
683 struct mmc *mmc = mmc_get_mmc_dev(dev);
684 struct sdhci_host *host = mmc->priv;
687 /* If nonremovable, assume that the card is always present. */
688 if (mmc->cfg->host_caps & MMC_CAP_NONREMOVABLE)
690 /* If polling, assume that the card is always present. */
691 if (mmc->cfg->host_caps & MMC_CAP_NEEDS_POLL)
694 #if CONFIG_IS_ENABLED(DM_GPIO)
695 value = dm_gpio_get_value(&host->cd_gpio);
697 if (mmc->cfg->host_caps & MMC_CAP_CD_ACTIVE_HIGH)
703 value = !!(sdhci_readl(host, SDHCI_PRESENT_STATE) &
705 if (mmc->cfg->host_caps & MMC_CAP_CD_ACTIVE_HIGH)
711 const struct dm_mmc_ops sdhci_ops = {
712 .send_cmd = sdhci_send_command,
713 .set_ios = sdhci_set_ios,
714 .get_cd = sdhci_get_cd,
715 .deferred_probe = sdhci_deferred_probe,
716 #ifdef MMC_SUPPORTS_TUNING
717 .execute_tuning = sdhci_execute_tuning,
721 static const struct mmc_ops sdhci_ops = {
722 .send_cmd = sdhci_send_command,
723 .set_ios = sdhci_set_ios,
728 int sdhci_setup_cfg(struct mmc_config *cfg, struct sdhci_host *host,
729 u32 f_max, u32 f_min)
731 u32 caps, caps_1 = 0;
732 #if CONFIG_IS_ENABLED(DM_MMC)
733 u64 dt_caps, dt_caps_mask;
735 dt_caps_mask = dev_read_u64_default(host->mmc->dev,
736 "sdhci-caps-mask", 0);
737 dt_caps = dev_read_u64_default(host->mmc->dev,
739 caps = ~(u32)dt_caps_mask &
740 sdhci_readl(host, SDHCI_CAPABILITIES);
741 caps |= (u32)dt_caps;
743 caps = sdhci_readl(host, SDHCI_CAPABILITIES);
745 debug("%s, caps: 0x%x\n", __func__, caps);
747 #ifdef CONFIG_MMC_SDHCI_SDMA
748 if ((caps & SDHCI_CAN_DO_SDMA)) {
749 host->flags |= USE_SDMA;
751 debug("%s: Your controller doesn't support SDMA!!\n",
755 #if CONFIG_IS_ENABLED(MMC_SDHCI_ADMA)
756 if (!(caps & SDHCI_CAN_DO_ADMA2)) {
757 printf("%s: Your controller doesn't support SDMA!!\n",
761 host->adma_desc_table = memalign(ARCH_DMA_MINALIGN, ADMA_TABLE_SZ);
763 host->adma_addr = (dma_addr_t)host->adma_desc_table;
764 #ifdef CONFIG_DMA_ADDR_T_64BIT
765 host->flags |= USE_ADMA64;
767 host->flags |= USE_ADMA;
770 if (host->quirks & SDHCI_QUIRK_REG32_RW)
772 sdhci_readl(host, SDHCI_HOST_VERSION - 2) >> 16;
774 host->version = sdhci_readw(host, SDHCI_HOST_VERSION);
776 cfg->name = host->name;
777 #ifndef CONFIG_DM_MMC
778 cfg->ops = &sdhci_ops;
781 /* Check whether the clock multiplier is supported or not */
782 if (SDHCI_GET_VERSION(host) >= SDHCI_SPEC_300) {
783 #if CONFIG_IS_ENABLED(DM_MMC)
784 caps_1 = ~(u32)(dt_caps_mask >> 32) &
785 sdhci_readl(host, SDHCI_CAPABILITIES_1);
786 caps_1 |= (u32)(dt_caps >> 32);
788 caps_1 = sdhci_readl(host, SDHCI_CAPABILITIES_1);
790 debug("%s, caps_1: 0x%x\n", __func__, caps_1);
791 host->clk_mul = (caps_1 & SDHCI_CLOCK_MUL_MASK) >>
792 SDHCI_CLOCK_MUL_SHIFT;
795 if (host->max_clk == 0) {
796 if (SDHCI_GET_VERSION(host) >= SDHCI_SPEC_300)
797 host->max_clk = (caps & SDHCI_CLOCK_V3_BASE_MASK) >>
798 SDHCI_CLOCK_BASE_SHIFT;
800 host->max_clk = (caps & SDHCI_CLOCK_BASE_MASK) >>
801 SDHCI_CLOCK_BASE_SHIFT;
802 host->max_clk *= 1000000;
804 host->max_clk *= host->clk_mul;
806 if (host->max_clk == 0) {
807 printf("%s: Hardware doesn't specify base clock frequency\n",
811 if (f_max && (f_max < host->max_clk))
814 cfg->f_max = host->max_clk;
818 if (SDHCI_GET_VERSION(host) >= SDHCI_SPEC_300)
819 cfg->f_min = cfg->f_max / SDHCI_MAX_DIV_SPEC_300;
821 cfg->f_min = cfg->f_max / SDHCI_MAX_DIV_SPEC_200;
824 if (caps & SDHCI_CAN_VDD_330)
825 cfg->voltages |= MMC_VDD_32_33 | MMC_VDD_33_34;
826 if (caps & SDHCI_CAN_VDD_300)
827 cfg->voltages |= MMC_VDD_29_30 | MMC_VDD_30_31;
828 if (caps & SDHCI_CAN_VDD_180)
829 cfg->voltages |= MMC_VDD_165_195;
831 if (host->quirks & SDHCI_QUIRK_BROKEN_VOLTAGE)
832 cfg->voltages |= host->voltages;
834 cfg->host_caps |= MMC_MODE_HS | MMC_MODE_HS_52MHz | MMC_MODE_4BIT;
836 /* Since Host Controller Version3.0 */
837 if (SDHCI_GET_VERSION(host) >= SDHCI_SPEC_300) {
838 if (!(caps & SDHCI_CAN_DO_8BIT))
839 cfg->host_caps &= ~MMC_MODE_8BIT;
842 if (host->quirks & SDHCI_QUIRK_BROKEN_HISPD_MODE) {
843 cfg->host_caps &= ~MMC_MODE_HS;
844 cfg->host_caps &= ~MMC_MODE_HS_52MHz;
847 if (!(cfg->voltages & MMC_VDD_165_195))
848 caps_1 &= ~(SDHCI_SUPPORT_SDR104 | SDHCI_SUPPORT_SDR50 |
849 SDHCI_SUPPORT_DDR50);
851 if (caps_1 & (SDHCI_SUPPORT_SDR104 | SDHCI_SUPPORT_SDR50 |
852 SDHCI_SUPPORT_DDR50))
853 cfg->host_caps |= MMC_CAP(UHS_SDR12) | MMC_CAP(UHS_SDR25);
855 if (caps_1 & SDHCI_SUPPORT_SDR104) {
856 cfg->host_caps |= MMC_CAP(UHS_SDR104) | MMC_CAP(UHS_SDR50);
858 * SD3.0: SDR104 is supported so (for eMMC) the caps2
859 * field can be promoted to support HS200.
861 cfg->host_caps |= MMC_CAP(MMC_HS_200);
862 } else if (caps_1 & SDHCI_SUPPORT_SDR50) {
863 cfg->host_caps |= MMC_CAP(UHS_SDR50);
866 if (caps_1 & SDHCI_SUPPORT_DDR50)
867 cfg->host_caps |= MMC_CAP(UHS_DDR50);
870 cfg->host_caps |= host->host_caps;
872 cfg->b_max = CONFIG_SYS_MMC_MAX_BLK_COUNT;
878 int sdhci_bind(struct udevice *dev, struct mmc *mmc, struct mmc_config *cfg)
880 return mmc_bind(dev, mmc, cfg);
883 int add_sdhci(struct sdhci_host *host, u32 f_max, u32 f_min)
887 ret = sdhci_setup_cfg(&host->cfg, host, f_max, f_min);
891 host->mmc = mmc_create(&host->cfg, host);
892 if (host->mmc == NULL) {
893 printf("%s: mmc create fail!\n", __func__);