1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Freescale eSDHC controller driver.
5 * Copyright (c) 2007, 2010, 2012 Freescale Semiconductor, Inc.
6 * Copyright (c) 2009 MontaVista Software, Inc.
8 * Authors: Xiaobo Xie <X.Xie@freescale.com>
9 * Anton Vorontsov <avorontsov@ru.mvista.com>
12 #include <linux/err.h>
15 #include <linux/of_address.h>
16 #include <linux/delay.h>
17 #include <linux/module.h>
18 #include <linux/sys_soc.h>
19 #include <linux/clk.h>
20 #include <linux/ktime.h>
21 #include <linux/dma-mapping.h>
22 #include <linux/mmc/host.h>
23 #include <linux/mmc/mmc.h>
24 #include "sdhci-pltfm.h"
25 #include "sdhci-esdhc.h"
27 #define VENDOR_V_22 0x12
28 #define VENDOR_V_23 0x13
30 #define MMC_TIMING_NUM (MMC_TIMING_MMC_HS400 + 1)
32 struct esdhc_clk_fixup {
33 const unsigned int sd_dflt_max_clk;
34 const unsigned int max_clk[MMC_TIMING_NUM];
37 static const struct esdhc_clk_fixup ls1021a_esdhc_clk = {
38 .sd_dflt_max_clk = 25000000,
39 .max_clk[MMC_TIMING_MMC_HS] = 46500000,
40 .max_clk[MMC_TIMING_SD_HS] = 46500000,
43 static const struct esdhc_clk_fixup ls1046a_esdhc_clk = {
44 .sd_dflt_max_clk = 25000000,
45 .max_clk[MMC_TIMING_UHS_SDR104] = 167000000,
46 .max_clk[MMC_TIMING_MMC_HS200] = 167000000,
49 static const struct esdhc_clk_fixup ls1012a_esdhc_clk = {
50 .sd_dflt_max_clk = 25000000,
51 .max_clk[MMC_TIMING_UHS_SDR104] = 125000000,
52 .max_clk[MMC_TIMING_MMC_HS200] = 125000000,
55 static const struct esdhc_clk_fixup p1010_esdhc_clk = {
56 .sd_dflt_max_clk = 20000000,
57 .max_clk[MMC_TIMING_LEGACY] = 20000000,
58 .max_clk[MMC_TIMING_MMC_HS] = 42000000,
59 .max_clk[MMC_TIMING_SD_HS] = 40000000,
62 static const struct of_device_id sdhci_esdhc_of_match[] = {
63 { .compatible = "fsl,ls1021a-esdhc", .data = &ls1021a_esdhc_clk},
64 { .compatible = "fsl,ls1046a-esdhc", .data = &ls1046a_esdhc_clk},
65 { .compatible = "fsl,ls1012a-esdhc", .data = &ls1012a_esdhc_clk},
66 { .compatible = "fsl,p1010-esdhc", .data = &p1010_esdhc_clk},
67 { .compatible = "fsl,mpc8379-esdhc" },
68 { .compatible = "fsl,mpc8536-esdhc" },
69 { .compatible = "fsl,esdhc" },
72 MODULE_DEVICE_TABLE(of, sdhci_esdhc_of_match);
77 bool quirk_incorrect_hostver;
78 bool quirk_limited_clk_division;
79 bool quirk_unreliable_pulse_detection;
80 bool quirk_tuning_erratum_type1;
81 bool quirk_tuning_erratum_type2;
82 bool quirk_ignore_data_inhibit;
83 bool quirk_delay_before_data_reset;
85 unsigned int peripheral_clock;
86 const struct esdhc_clk_fixup *clk_fixup;
91 * esdhc_read*_fixup - Fixup the value read from incompatible eSDHC register
92 * to make it compatible with SD spec.
94 * @host: pointer to sdhci_host
95 * @spec_reg: SD spec register address
96 * @value: 32bit eSDHC register value on spec_reg address
98 * In SD spec, there are 8/16/32/64 bits registers, while all of eSDHC
99 * registers are 32 bits. There are differences in register size, register
100 * address, register function, bit position and function between eSDHC spec
103 * Return a fixed up register value
105 static u32 esdhc_readl_fixup(struct sdhci_host *host,
106 int spec_reg, u32 value)
108 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
109 struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host);
113 * The bit of ADMA flag in eSDHC is not compatible with standard
114 * SDHC register, so set fake flag SDHCI_CAN_DO_ADMA2 when ADMA is
115 * supported by eSDHC.
116 * And for many FSL eSDHC controller, the reset value of field
117 * SDHCI_CAN_DO_ADMA1 is 1, but some of them can't support ADMA,
118 * only these vendor version is greater than 2.2/0x12 support ADMA.
120 if ((spec_reg == SDHCI_CAPABILITIES) && (value & SDHCI_CAN_DO_ADMA1)) {
121 if (esdhc->vendor_ver > VENDOR_V_22) {
122 ret = value | SDHCI_CAN_DO_ADMA2;
127 * The DAT[3:0] line signal levels and the CMD line signal level are
128 * not compatible with standard SDHC register. The line signal levels
129 * DAT[7:0] are at bits 31:24 and the command line signal level is at
130 * bit 23. All other bits are the same as in the standard SDHC
133 if (spec_reg == SDHCI_PRESENT_STATE) {
134 ret = value & 0x000fffff;
135 ret |= (value >> 4) & SDHCI_DATA_LVL_MASK;
136 ret |= (value << 1) & SDHCI_CMD_LVL;
141 * DTS properties of mmc host are used to enable each speed mode
142 * according to soc and board capability. So clean up
143 * SDR50/SDR104/DDR50 support bits here.
145 if (spec_reg == SDHCI_CAPABILITIES_1) {
146 ret = value & ~(SDHCI_SUPPORT_SDR50 | SDHCI_SUPPORT_SDR104 |
147 SDHCI_SUPPORT_DDR50);
152 * Some controllers have unreliable Data Line Active
153 * bit for commands with busy signal. This affects
154 * Command Inhibit (data) bit. Just ignore it since
155 * MMC core driver has already polled card status
156 * with CMD13 after any command with busy siganl.
158 if ((spec_reg == SDHCI_PRESENT_STATE) &&
159 (esdhc->quirk_ignore_data_inhibit == true)) {
160 ret = value & ~SDHCI_DATA_INHIBIT;
168 static u16 esdhc_readw_fixup(struct sdhci_host *host,
169 int spec_reg, u32 value)
171 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
172 struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host);
174 int shift = (spec_reg & 0x2) * 8;
176 if (spec_reg == SDHCI_HOST_VERSION)
177 ret = value & 0xffff;
179 ret = (value >> shift) & 0xffff;
180 /* Workaround for T4240-R1.0-R2.0 eSDHC which has incorrect
181 * vendor version and spec version information.
183 if ((spec_reg == SDHCI_HOST_VERSION) &&
184 (esdhc->quirk_incorrect_hostver))
185 ret = (VENDOR_V_23 << SDHCI_VENDOR_VER_SHIFT) | SDHCI_SPEC_200;
189 static u8 esdhc_readb_fixup(struct sdhci_host *host,
190 int spec_reg, u32 value)
194 int shift = (spec_reg & 0x3) * 8;
196 ret = (value >> shift) & 0xff;
199 * "DMA select" locates at offset 0x28 in SD specification, but on
200 * P5020 or P3041, it locates at 0x29.
202 if (spec_reg == SDHCI_HOST_CONTROL) {
203 /* DMA select is 22,23 bits in Protocol Control Register */
204 dma_bits = (value >> 5) & SDHCI_CTRL_DMA_MASK;
205 /* fixup the result */
206 ret &= ~SDHCI_CTRL_DMA_MASK;
213 * esdhc_write*_fixup - Fixup the SD spec register value so that it could be
214 * written into eSDHC register.
216 * @host: pointer to sdhci_host
217 * @spec_reg: SD spec register address
218 * @value: 8/16/32bit SD spec register value that would be written
219 * @old_value: 32bit eSDHC register value on spec_reg address
221 * In SD spec, there are 8/16/32/64 bits registers, while all of eSDHC
222 * registers are 32 bits. There are differences in register size, register
223 * address, register function, bit position and function between eSDHC spec
226 * Return a fixed up register value
228 static u32 esdhc_writel_fixup(struct sdhci_host *host,
229 int spec_reg, u32 value, u32 old_value)
234 * Enabling IRQSTATEN[BGESEN] is just to set IRQSTAT[BGE]
235 * when SYSCTL[RSTD] is set for some special operations.
236 * No any impact on other operation.
238 if (spec_reg == SDHCI_INT_ENABLE)
239 ret = value | SDHCI_INT_BLK_GAP;
246 static u32 esdhc_writew_fixup(struct sdhci_host *host,
247 int spec_reg, u16 value, u32 old_value)
249 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
250 int shift = (spec_reg & 0x2) * 8;
254 case SDHCI_TRANSFER_MODE:
256 * Postpone this write, we must do it together with a
257 * command write that is down below. Return old value.
259 pltfm_host->xfer_mode_shadow = value;
262 ret = (value << 16) | pltfm_host->xfer_mode_shadow;
266 ret = old_value & (~(0xffff << shift));
267 ret |= (value << shift);
269 if (spec_reg == SDHCI_BLOCK_SIZE) {
271 * Two last DMA bits are reserved, and first one is used for
272 * non-standard blksz of 4096 bytes that we don't support
273 * yet. So clear the DMA boundary bits.
275 ret &= (~SDHCI_MAKE_BLKSZ(0x7, 0));
280 static u32 esdhc_writeb_fixup(struct sdhci_host *host,
281 int spec_reg, u8 value, u32 old_value)
286 int shift = (spec_reg & 0x3) * 8;
289 * eSDHC doesn't have a standard power control register, so we do
290 * nothing here to avoid incorrect operation.
292 if (spec_reg == SDHCI_POWER_CONTROL)
295 * "DMA select" location is offset 0x28 in SD specification, but on
296 * P5020 or P3041, it's located at 0x29.
298 if (spec_reg == SDHCI_HOST_CONTROL) {
300 * If host control register is not standard, exit
303 if (host->quirks2 & SDHCI_QUIRK2_BROKEN_HOST_CONTROL)
306 /* DMA select is 22,23 bits in Protocol Control Register */
307 dma_bits = (value & SDHCI_CTRL_DMA_MASK) << 5;
308 ret = (old_value & (~(SDHCI_CTRL_DMA_MASK << 5))) | dma_bits;
309 tmp = (value & (~SDHCI_CTRL_DMA_MASK)) |
310 (old_value & SDHCI_CTRL_DMA_MASK);
311 ret = (ret & (~0xff)) | tmp;
313 /* Prevent SDHCI core from writing reserved bits (e.g. HISPD) */
314 ret &= ~ESDHC_HOST_CONTROL_RES;
318 ret = (old_value & (~(0xff << shift))) | (value << shift);
322 static u32 esdhc_be_readl(struct sdhci_host *host, int reg)
327 if (reg == SDHCI_CAPABILITIES_1)
328 value = ioread32be(host->ioaddr + ESDHC_CAPABILITIES_1);
330 value = ioread32be(host->ioaddr + reg);
332 ret = esdhc_readl_fixup(host, reg, value);
337 static u32 esdhc_le_readl(struct sdhci_host *host, int reg)
342 if (reg == SDHCI_CAPABILITIES_1)
343 value = ioread32(host->ioaddr + ESDHC_CAPABILITIES_1);
345 value = ioread32(host->ioaddr + reg);
347 ret = esdhc_readl_fixup(host, reg, value);
352 static u16 esdhc_be_readw(struct sdhci_host *host, int reg)
356 int base = reg & ~0x3;
358 value = ioread32be(host->ioaddr + base);
359 ret = esdhc_readw_fixup(host, reg, value);
363 static u16 esdhc_le_readw(struct sdhci_host *host, int reg)
367 int base = reg & ~0x3;
369 value = ioread32(host->ioaddr + base);
370 ret = esdhc_readw_fixup(host, reg, value);
374 static u8 esdhc_be_readb(struct sdhci_host *host, int reg)
378 int base = reg & ~0x3;
380 value = ioread32be(host->ioaddr + base);
381 ret = esdhc_readb_fixup(host, reg, value);
385 static u8 esdhc_le_readb(struct sdhci_host *host, int reg)
389 int base = reg & ~0x3;
391 value = ioread32(host->ioaddr + base);
392 ret = esdhc_readb_fixup(host, reg, value);
396 static void esdhc_be_writel(struct sdhci_host *host, u32 val, int reg)
400 value = esdhc_writel_fixup(host, reg, val, 0);
401 iowrite32be(value, host->ioaddr + reg);
404 static void esdhc_le_writel(struct sdhci_host *host, u32 val, int reg)
408 value = esdhc_writel_fixup(host, reg, val, 0);
409 iowrite32(value, host->ioaddr + reg);
412 static void esdhc_be_writew(struct sdhci_host *host, u16 val, int reg)
414 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
415 struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host);
416 int base = reg & ~0x3;
420 value = ioread32be(host->ioaddr + base);
421 ret = esdhc_writew_fixup(host, reg, val, value);
422 if (reg != SDHCI_TRANSFER_MODE)
423 iowrite32be(ret, host->ioaddr + base);
425 /* Starting SW tuning requires ESDHC_SMPCLKSEL to be set
426 * 1us later after ESDHC_EXTN is set.
428 if (base == ESDHC_SYSTEM_CONTROL_2) {
429 if (!(value & ESDHC_EXTN) && (ret & ESDHC_EXTN) &&
430 esdhc->in_sw_tuning) {
432 ret |= ESDHC_SMPCLKSEL;
433 iowrite32be(ret, host->ioaddr + base);
438 static void esdhc_le_writew(struct sdhci_host *host, u16 val, int reg)
440 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
441 struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host);
442 int base = reg & ~0x3;
446 value = ioread32(host->ioaddr + base);
447 ret = esdhc_writew_fixup(host, reg, val, value);
448 if (reg != SDHCI_TRANSFER_MODE)
449 iowrite32(ret, host->ioaddr + base);
451 /* Starting SW tuning requires ESDHC_SMPCLKSEL to be set
452 * 1us later after ESDHC_EXTN is set.
454 if (base == ESDHC_SYSTEM_CONTROL_2) {
455 if (!(value & ESDHC_EXTN) && (ret & ESDHC_EXTN) &&
456 esdhc->in_sw_tuning) {
458 ret |= ESDHC_SMPCLKSEL;
459 iowrite32(ret, host->ioaddr + base);
464 static void esdhc_be_writeb(struct sdhci_host *host, u8 val, int reg)
466 int base = reg & ~0x3;
470 value = ioread32be(host->ioaddr + base);
471 ret = esdhc_writeb_fixup(host, reg, val, value);
472 iowrite32be(ret, host->ioaddr + base);
475 static void esdhc_le_writeb(struct sdhci_host *host, u8 val, int reg)
477 int base = reg & ~0x3;
481 value = ioread32(host->ioaddr + base);
482 ret = esdhc_writeb_fixup(host, reg, val, value);
483 iowrite32(ret, host->ioaddr + base);
487 * For Abort or Suspend after Stop at Block Gap, ignore the ADMA
488 * error(IRQSTAT[ADMAE]) if both Transfer Complete(IRQSTAT[TC])
489 * and Block Gap Event(IRQSTAT[BGE]) are also set.
490 * For Continue, apply soft reset for data(SYSCTL[RSTD]);
491 * and re-issue the entire read transaction from beginning.
493 static void esdhc_of_adma_workaround(struct sdhci_host *host, u32 intmask)
495 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
496 struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host);
501 applicable = (intmask & SDHCI_INT_DATA_END) &&
502 (intmask & SDHCI_INT_BLK_GAP) &&
503 (esdhc->vendor_ver == VENDOR_V_23);
507 host->data->error = 0;
508 dmastart = sg_dma_address(host->data->sg);
509 dmanow = dmastart + host->data->bytes_xfered;
511 * Force update to the next DMA block boundary.
513 dmanow = (dmanow & ~(SDHCI_DEFAULT_BOUNDARY_SIZE - 1)) +
514 SDHCI_DEFAULT_BOUNDARY_SIZE;
515 host->data->bytes_xfered = dmanow - dmastart;
516 sdhci_writel(host, dmanow, SDHCI_DMA_ADDRESS);
519 static int esdhc_of_enable_dma(struct sdhci_host *host)
522 struct device *dev = mmc_dev(host->mmc);
524 if (of_device_is_compatible(dev->of_node, "fsl,ls1043a-esdhc") ||
525 of_device_is_compatible(dev->of_node, "fsl,ls1046a-esdhc"))
526 dma_set_mask_and_coherent(dev, DMA_BIT_MASK(40));
528 value = sdhci_readl(host, ESDHC_DMA_SYSCTL);
530 if (of_dma_is_coherent(dev->of_node))
531 value |= ESDHC_DMA_SNOOP;
533 value &= ~ESDHC_DMA_SNOOP;
535 sdhci_writel(host, value, ESDHC_DMA_SYSCTL);
539 static unsigned int esdhc_of_get_max_clock(struct sdhci_host *host)
541 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
542 struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host);
544 if (esdhc->peripheral_clock)
545 return esdhc->peripheral_clock;
547 return pltfm_host->clock;
550 static unsigned int esdhc_of_get_min_clock(struct sdhci_host *host)
552 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
553 struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host);
556 if (esdhc->peripheral_clock)
557 clock = esdhc->peripheral_clock;
559 clock = pltfm_host->clock;
560 return clock / 256 / 16;
563 static void esdhc_clock_enable(struct sdhci_host *host, bool enable)
568 val = sdhci_readl(host, ESDHC_SYSTEM_CONTROL);
571 val |= ESDHC_CLOCK_SDCLKEN;
573 val &= ~ESDHC_CLOCK_SDCLKEN;
575 sdhci_writel(host, val, ESDHC_SYSTEM_CONTROL);
578 timeout = ktime_add_ms(ktime_get(), 20);
579 val = ESDHC_CLOCK_STABLE;
581 bool timedout = ktime_after(ktime_get(), timeout);
583 if (sdhci_readl(host, ESDHC_PRSSTAT) & val)
586 pr_err("%s: Internal clock never stabilised.\n",
587 mmc_hostname(host->mmc));
594 static void esdhc_of_set_clock(struct sdhci_host *host, unsigned int clock)
596 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
597 struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host);
605 host->mmc->actual_clock = 0;
608 esdhc_clock_enable(host, false);
612 /* Workaround to start pre_div at 2 for VNN < VENDOR_V_23 */
613 if (esdhc->vendor_ver < VENDOR_V_23)
616 if (host->mmc->card && mmc_card_sd(host->mmc->card) &&
617 esdhc->clk_fixup && host->mmc->ios.timing == MMC_TIMING_LEGACY)
618 fixup = esdhc->clk_fixup->sd_dflt_max_clk;
619 else if (esdhc->clk_fixup)
620 fixup = esdhc->clk_fixup->max_clk[host->mmc->ios.timing];
622 if (fixup && clock > fixup)
625 temp = sdhci_readl(host, ESDHC_SYSTEM_CONTROL);
626 temp &= ~(ESDHC_CLOCK_SDCLKEN | ESDHC_CLOCK_IPGEN | ESDHC_CLOCK_HCKEN |
627 ESDHC_CLOCK_PEREN | ESDHC_CLOCK_MASK);
628 sdhci_writel(host, temp, ESDHC_SYSTEM_CONTROL);
630 while (host->max_clk / pre_div / 16 > clock && pre_div < 256)
633 while (host->max_clk / pre_div / div > clock && div < 16)
636 if (esdhc->quirk_limited_clk_division &&
637 clock == MMC_HS200_MAX_DTR &&
638 (host->mmc->ios.timing == MMC_TIMING_MMC_HS400 ||
639 host->flags & SDHCI_HS400_TUNING)) {
640 division = pre_div * div;
644 } else if (division <= 8) {
647 } else if (division <= 12) {
651 pr_warn("%s: using unsupported clock division.\n",
652 mmc_hostname(host->mmc));
656 dev_dbg(mmc_dev(host->mmc), "desired SD clock: %d, actual: %d\n",
657 clock, host->max_clk / pre_div / div);
658 host->mmc->actual_clock = host->max_clk / pre_div / div;
659 esdhc->div_ratio = pre_div * div;
663 temp = sdhci_readl(host, ESDHC_SYSTEM_CONTROL);
664 temp |= (ESDHC_CLOCK_IPGEN | ESDHC_CLOCK_HCKEN | ESDHC_CLOCK_PEREN
665 | (div << ESDHC_DIVIDER_SHIFT)
666 | (pre_div << ESDHC_PREDIV_SHIFT));
667 sdhci_writel(host, temp, ESDHC_SYSTEM_CONTROL);
669 if (host->mmc->ios.timing == MMC_TIMING_MMC_HS400 &&
670 clock == MMC_HS200_MAX_DTR) {
671 temp = sdhci_readl(host, ESDHC_TBCTL);
672 sdhci_writel(host, temp | ESDHC_HS400_MODE, ESDHC_TBCTL);
673 temp = sdhci_readl(host, ESDHC_SDCLKCTL);
674 sdhci_writel(host, temp | ESDHC_CMD_CLK_CTL, ESDHC_SDCLKCTL);
675 esdhc_clock_enable(host, true);
677 temp = sdhci_readl(host, ESDHC_DLLCFG0);
678 temp |= ESDHC_DLL_ENABLE;
679 if (host->mmc->actual_clock == MMC_HS200_MAX_DTR)
680 temp |= ESDHC_DLL_FREQ_SEL;
681 sdhci_writel(host, temp, ESDHC_DLLCFG0);
682 temp = sdhci_readl(host, ESDHC_TBCTL);
683 sdhci_writel(host, temp | ESDHC_HS400_WNDW_ADJUST, ESDHC_TBCTL);
685 esdhc_clock_enable(host, false);
686 temp = sdhci_readl(host, ESDHC_DMA_SYSCTL);
687 temp |= ESDHC_FLUSH_ASYNC_FIFO;
688 sdhci_writel(host, temp, ESDHC_DMA_SYSCTL);
692 timeout = ktime_add_ms(ktime_get(), 20);
694 bool timedout = ktime_after(ktime_get(), timeout);
696 if (sdhci_readl(host, ESDHC_PRSSTAT) & ESDHC_CLOCK_STABLE)
699 pr_err("%s: Internal clock never stabilised.\n",
700 mmc_hostname(host->mmc));
706 temp = sdhci_readl(host, ESDHC_SYSTEM_CONTROL);
707 temp |= ESDHC_CLOCK_SDCLKEN;
708 sdhci_writel(host, temp, ESDHC_SYSTEM_CONTROL);
711 static void esdhc_pltfm_set_bus_width(struct sdhci_host *host, int width)
715 ctrl = sdhci_readl(host, ESDHC_PROCTL);
716 ctrl &= (~ESDHC_CTRL_BUSWIDTH_MASK);
718 case MMC_BUS_WIDTH_8:
719 ctrl |= ESDHC_CTRL_8BITBUS;
722 case MMC_BUS_WIDTH_4:
723 ctrl |= ESDHC_CTRL_4BITBUS;
730 sdhci_writel(host, ctrl, ESDHC_PROCTL);
733 static void esdhc_reset(struct sdhci_host *host, u8 mask)
735 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
736 struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host);
737 u32 val, bus_width = 0;
740 * Add delay to make sure all the DMA transfers are finished
743 if (esdhc->quirk_delay_before_data_reset &&
744 (mask & SDHCI_RESET_DATA) &&
745 (host->flags & SDHCI_REQ_USE_DMA))
749 * Save bus-width for eSDHC whose vendor version is 2.2
750 * or lower for data reset.
752 if ((mask & SDHCI_RESET_DATA) &&
753 (esdhc->vendor_ver <= VENDOR_V_22)) {
754 val = sdhci_readl(host, ESDHC_PROCTL);
755 bus_width = val & ESDHC_CTRL_BUSWIDTH_MASK;
758 sdhci_reset(host, mask);
761 * Restore bus-width setting and interrupt registers for eSDHC
762 * whose vendor version is 2.2 or lower for data reset.
764 if ((mask & SDHCI_RESET_DATA) &&
765 (esdhc->vendor_ver <= VENDOR_V_22)) {
766 val = sdhci_readl(host, ESDHC_PROCTL);
767 val &= ~ESDHC_CTRL_BUSWIDTH_MASK;
769 sdhci_writel(host, val, ESDHC_PROCTL);
771 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
772 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
776 * Some bits have to be cleaned manually for eSDHC whose spec
777 * version is higher than 3.0 for all reset.
779 if ((mask & SDHCI_RESET_ALL) &&
780 (esdhc->spec_ver >= SDHCI_SPEC_300)) {
781 val = sdhci_readl(host, ESDHC_TBCTL);
783 sdhci_writel(host, val, ESDHC_TBCTL);
786 * Initialize eSDHC_DLLCFG1[DLL_PD_PULSE_STRETCH_SEL] to
789 if (esdhc->quirk_unreliable_pulse_detection) {
790 val = sdhci_readl(host, ESDHC_DLLCFG1);
791 val &= ~ESDHC_DLL_PD_PULSE_STRETCH_SEL;
792 sdhci_writel(host, val, ESDHC_DLLCFG1);
797 /* The SCFG, Supplemental Configuration Unit, provides SoC specific
798 * configuration and status registers for the device. There is a
799 * SDHC IO VSEL control register on SCFG for some platforms. It's
800 * used to support SDHC IO voltage switching.
802 static const struct of_device_id scfg_device_ids[] = {
803 { .compatible = "fsl,t1040-scfg", },
804 { .compatible = "fsl,ls1012a-scfg", },
805 { .compatible = "fsl,ls1046a-scfg", },
809 /* SDHC IO VSEL control register definition */
810 #define SCFG_SDHCIOVSELCR 0x408
811 #define SDHCIOVSELCR_TGLEN 0x80000000
812 #define SDHCIOVSELCR_VSELVAL 0x60000000
813 #define SDHCIOVSELCR_SDHC_VS 0x00000001
815 static int esdhc_signal_voltage_switch(struct mmc_host *mmc,
818 struct sdhci_host *host = mmc_priv(mmc);
819 struct device_node *scfg_node;
820 void __iomem *scfg_base = NULL;
825 * Signal Voltage Switching is only applicable for Host Controllers
828 if (host->version < SDHCI_SPEC_300)
831 val = sdhci_readl(host, ESDHC_PROCTL);
833 switch (ios->signal_voltage) {
834 case MMC_SIGNAL_VOLTAGE_330:
835 val &= ~ESDHC_VOLT_SEL;
836 sdhci_writel(host, val, ESDHC_PROCTL);
838 case MMC_SIGNAL_VOLTAGE_180:
839 scfg_node = of_find_matching_node(NULL, scfg_device_ids);
841 scfg_base = of_iomap(scfg_node, 0);
843 sdhciovselcr = SDHCIOVSELCR_TGLEN |
844 SDHCIOVSELCR_VSELVAL;
845 iowrite32be(sdhciovselcr,
846 scfg_base + SCFG_SDHCIOVSELCR);
848 val |= ESDHC_VOLT_SEL;
849 sdhci_writel(host, val, ESDHC_PROCTL);
852 sdhciovselcr = SDHCIOVSELCR_TGLEN |
853 SDHCIOVSELCR_SDHC_VS;
854 iowrite32be(sdhciovselcr,
855 scfg_base + SCFG_SDHCIOVSELCR);
858 val |= ESDHC_VOLT_SEL;
859 sdhci_writel(host, val, ESDHC_PROCTL);
867 static struct soc_device_attribute soc_tuning_erratum_type1[] = {
868 { .family = "QorIQ T1023", .revision = "1.0", },
869 { .family = "QorIQ T1040", .revision = "1.0", },
870 { .family = "QorIQ T2080", .revision = "1.0", },
871 { .family = "QorIQ LS1021A", .revision = "1.0", },
875 static struct soc_device_attribute soc_tuning_erratum_type2[] = {
876 { .family = "QorIQ LS1012A", .revision = "1.0", },
877 { .family = "QorIQ LS1043A", .revision = "1.*", },
878 { .family = "QorIQ LS1046A", .revision = "1.0", },
879 { .family = "QorIQ LS1080A", .revision = "1.0", },
880 { .family = "QorIQ LS2080A", .revision = "1.0", },
881 { .family = "QorIQ LA1575A", .revision = "1.0", },
885 static void esdhc_tuning_block_enable(struct sdhci_host *host, bool enable)
889 esdhc_clock_enable(host, false);
891 val = sdhci_readl(host, ESDHC_DMA_SYSCTL);
892 val |= ESDHC_FLUSH_ASYNC_FIFO;
893 sdhci_writel(host, val, ESDHC_DMA_SYSCTL);
895 val = sdhci_readl(host, ESDHC_TBCTL);
900 sdhci_writel(host, val, ESDHC_TBCTL);
902 esdhc_clock_enable(host, true);
905 static void esdhc_prepare_sw_tuning(struct sdhci_host *host, u8 *window_start,
908 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
909 struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host);
910 u8 tbstat_15_8, tbstat_7_0;
913 if (esdhc->quirk_tuning_erratum_type1) {
914 *window_start = 5 * esdhc->div_ratio;
915 *window_end = 3 * esdhc->div_ratio;
919 /* Write TBCTL[11:8]=4'h8 */
920 val = sdhci_readl(host, ESDHC_TBCTL);
923 sdhci_writel(host, val, ESDHC_TBCTL);
927 /* Read TBCTL[31:0] register and rewrite again */
928 val = sdhci_readl(host, ESDHC_TBCTL);
929 sdhci_writel(host, val, ESDHC_TBCTL);
933 /* Read the TBSTAT[31:0] register twice */
934 val = sdhci_readl(host, ESDHC_TBSTAT);
935 val = sdhci_readl(host, ESDHC_TBSTAT);
937 /* Reset data lines by setting ESDHCCTL[RSTD] */
938 sdhci_reset(host, SDHCI_RESET_DATA);
939 /* Write 32'hFFFF_FFFF to IRQSTAT register */
940 sdhci_writel(host, 0xFFFFFFFF, SDHCI_INT_STATUS);
942 /* If TBSTAT[15:8]-TBSTAT[7:0] > 4 * div_ratio
943 * or TBSTAT[7:0]-TBSTAT[15:8] > 4 * div_ratio,
944 * then program TBPTR[TB_WNDW_END_PTR] = 4 * div_ratio
945 * and program TBPTR[TB_WNDW_START_PTR] = 8 * div_ratio.
947 tbstat_7_0 = val & 0xff;
948 tbstat_15_8 = (val >> 8) & 0xff;
950 if (abs(tbstat_15_8 - tbstat_7_0) > (4 * esdhc->div_ratio)) {
951 *window_start = 8 * esdhc->div_ratio;
952 *window_end = 4 * esdhc->div_ratio;
954 *window_start = 5 * esdhc->div_ratio;
955 *window_end = 3 * esdhc->div_ratio;
959 static int esdhc_execute_sw_tuning(struct mmc_host *mmc, u32 opcode,
960 u8 window_start, u8 window_end)
962 struct sdhci_host *host = mmc_priv(mmc);
963 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
964 struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host);
968 /* Program TBPTR[TB_WNDW_END_PTR] and TBPTR[TB_WNDW_START_PTR] */
969 val = ((u32)window_start << ESDHC_WNDW_STRT_PTR_SHIFT) &
970 ESDHC_WNDW_STRT_PTR_MASK;
971 val |= window_end & ESDHC_WNDW_END_PTR_MASK;
972 sdhci_writel(host, val, ESDHC_TBPTR);
974 /* Program the software tuning mode by setting TBCTL[TB_MODE]=2'h3 */
975 val = sdhci_readl(host, ESDHC_TBCTL);
976 val &= ~ESDHC_TB_MODE_MASK;
977 val |= ESDHC_TB_MODE_SW;
978 sdhci_writel(host, val, ESDHC_TBCTL);
980 esdhc->in_sw_tuning = true;
981 ret = sdhci_execute_tuning(mmc, opcode);
982 esdhc->in_sw_tuning = false;
986 static int esdhc_execute_tuning(struct mmc_host *mmc, u32 opcode)
988 struct sdhci_host *host = mmc_priv(mmc);
989 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
990 struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host);
991 u8 window_start, window_end;
992 int ret, retries = 1;
997 /* For tuning mode, the sd clock divisor value
998 * must be larger than 3 according to reference manual.
1000 clk = esdhc->peripheral_clock / 3;
1001 if (host->clock > clk)
1002 esdhc_of_set_clock(host, clk);
1004 esdhc_tuning_block_enable(host, true);
1006 hs400_tuning = host->flags & SDHCI_HS400_TUNING;
1009 if (esdhc->quirk_limited_clk_division &&
1011 esdhc_of_set_clock(host, host->clock);
1014 val = sdhci_readl(host, ESDHC_TBCTL);
1015 val &= ~ESDHC_TB_MODE_MASK;
1016 val |= ESDHC_TB_MODE_3;
1017 sdhci_writel(host, val, ESDHC_TBCTL);
1019 ret = sdhci_execute_tuning(mmc, opcode);
1023 /* If HW tuning fails and triggers erratum,
1026 ret = host->tuning_err;
1027 if (ret == -EAGAIN &&
1028 (esdhc->quirk_tuning_erratum_type1 ||
1029 esdhc->quirk_tuning_erratum_type2)) {
1030 /* Recover HS400 tuning flag */
1032 host->flags |= SDHCI_HS400_TUNING;
1033 pr_info("%s: Hold on to use fixed sampling clock. Try SW tuning!\n",
1036 esdhc_prepare_sw_tuning(host, &window_start,
1038 ret = esdhc_execute_sw_tuning(mmc, opcode,
1044 /* Retry both HW/SW tuning with reduced clock. */
1045 ret = host->tuning_err;
1046 if (ret == -EAGAIN && retries) {
1047 /* Recover HS400 tuning flag */
1049 host->flags |= SDHCI_HS400_TUNING;
1051 clk = host->max_clk / (esdhc->div_ratio + 1);
1052 esdhc_of_set_clock(host, clk);
1053 pr_info("%s: Hold on to use fixed sampling clock. Try tuning with reduced clock!\n",
1061 } while (retries--);
1064 esdhc_tuning_block_enable(host, false);
1065 } else if (hs400_tuning) {
1066 val = sdhci_readl(host, ESDHC_SDTIMNGCTL);
1067 val |= ESDHC_FLW_CTL_BG;
1068 sdhci_writel(host, val, ESDHC_SDTIMNGCTL);
1074 static void esdhc_set_uhs_signaling(struct sdhci_host *host,
1075 unsigned int timing)
1077 if (timing == MMC_TIMING_MMC_HS400)
1078 esdhc_tuning_block_enable(host, true);
1080 sdhci_set_uhs_signaling(host, timing);
1083 static u32 esdhc_irq(struct sdhci_host *host, u32 intmask)
1087 if (of_find_compatible_node(NULL, NULL,
1088 "fsl,p2020-esdhc")) {
1089 command = SDHCI_GET_CMD(sdhci_readw(host,
1091 if (command == MMC_WRITE_MULTIPLE_BLOCK &&
1092 sdhci_readw(host, SDHCI_BLOCK_COUNT) &&
1093 intmask & SDHCI_INT_DATA_END) {
1094 intmask &= ~SDHCI_INT_DATA_END;
1095 sdhci_writel(host, SDHCI_INT_DATA_END,
1102 #ifdef CONFIG_PM_SLEEP
1103 static u32 esdhc_proctl;
1104 static int esdhc_of_suspend(struct device *dev)
1106 struct sdhci_host *host = dev_get_drvdata(dev);
1108 esdhc_proctl = sdhci_readl(host, SDHCI_HOST_CONTROL);
1110 if (host->tuning_mode != SDHCI_TUNING_MODE_3)
1111 mmc_retune_needed(host->mmc);
1113 return sdhci_suspend_host(host);
1116 static int esdhc_of_resume(struct device *dev)
1118 struct sdhci_host *host = dev_get_drvdata(dev);
1119 int ret = sdhci_resume_host(host);
1122 /* Isn't this already done by sdhci_resume_host() ? --rmk */
1123 esdhc_of_enable_dma(host);
1124 sdhci_writel(host, esdhc_proctl, SDHCI_HOST_CONTROL);
1130 static SIMPLE_DEV_PM_OPS(esdhc_of_dev_pm_ops,
1134 static const struct sdhci_ops sdhci_esdhc_be_ops = {
1135 .read_l = esdhc_be_readl,
1136 .read_w = esdhc_be_readw,
1137 .read_b = esdhc_be_readb,
1138 .write_l = esdhc_be_writel,
1139 .write_w = esdhc_be_writew,
1140 .write_b = esdhc_be_writeb,
1141 .set_clock = esdhc_of_set_clock,
1142 .enable_dma = esdhc_of_enable_dma,
1143 .get_max_clock = esdhc_of_get_max_clock,
1144 .get_min_clock = esdhc_of_get_min_clock,
1145 .adma_workaround = esdhc_of_adma_workaround,
1146 .set_bus_width = esdhc_pltfm_set_bus_width,
1147 .reset = esdhc_reset,
1148 .set_uhs_signaling = esdhc_set_uhs_signaling,
1152 static const struct sdhci_ops sdhci_esdhc_le_ops = {
1153 .read_l = esdhc_le_readl,
1154 .read_w = esdhc_le_readw,
1155 .read_b = esdhc_le_readb,
1156 .write_l = esdhc_le_writel,
1157 .write_w = esdhc_le_writew,
1158 .write_b = esdhc_le_writeb,
1159 .set_clock = esdhc_of_set_clock,
1160 .enable_dma = esdhc_of_enable_dma,
1161 .get_max_clock = esdhc_of_get_max_clock,
1162 .get_min_clock = esdhc_of_get_min_clock,
1163 .adma_workaround = esdhc_of_adma_workaround,
1164 .set_bus_width = esdhc_pltfm_set_bus_width,
1165 .reset = esdhc_reset,
1166 .set_uhs_signaling = esdhc_set_uhs_signaling,
1170 static const struct sdhci_pltfm_data sdhci_esdhc_be_pdata = {
1171 .quirks = ESDHC_DEFAULT_QUIRKS |
1173 SDHCI_QUIRK_BROKEN_CARD_DETECTION |
1175 SDHCI_QUIRK_NO_CARD_NO_RESET |
1176 SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC,
1177 .ops = &sdhci_esdhc_be_ops,
1180 static const struct sdhci_pltfm_data sdhci_esdhc_le_pdata = {
1181 .quirks = ESDHC_DEFAULT_QUIRKS |
1182 SDHCI_QUIRK_NO_CARD_NO_RESET |
1183 SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC,
1184 .ops = &sdhci_esdhc_le_ops,
1187 static struct soc_device_attribute soc_incorrect_hostver[] = {
1188 { .family = "QorIQ T4240", .revision = "1.0", },
1189 { .family = "QorIQ T4240", .revision = "2.0", },
1193 static struct soc_device_attribute soc_fixup_sdhc_clkdivs[] = {
1194 { .family = "QorIQ LX2160A", .revision = "1.0", },
1195 { .family = "QorIQ LX2160A", .revision = "2.0", },
1196 { .family = "QorIQ LS1028A", .revision = "1.0", },
1200 static struct soc_device_attribute soc_unreliable_pulse_detection[] = {
1201 { .family = "QorIQ LX2160A", .revision = "1.0", },
1205 static void esdhc_init(struct platform_device *pdev, struct sdhci_host *host)
1207 const struct of_device_id *match;
1208 struct sdhci_pltfm_host *pltfm_host;
1209 struct sdhci_esdhc *esdhc;
1210 struct device_node *np;
1215 pltfm_host = sdhci_priv(host);
1216 esdhc = sdhci_pltfm_priv(pltfm_host);
1218 host_ver = sdhci_readw(host, SDHCI_HOST_VERSION);
1219 esdhc->vendor_ver = (host_ver & SDHCI_VENDOR_VER_MASK) >>
1220 SDHCI_VENDOR_VER_SHIFT;
1221 esdhc->spec_ver = host_ver & SDHCI_SPEC_VER_MASK;
1222 if (soc_device_match(soc_incorrect_hostver))
1223 esdhc->quirk_incorrect_hostver = true;
1225 esdhc->quirk_incorrect_hostver = false;
1227 if (soc_device_match(soc_fixup_sdhc_clkdivs))
1228 esdhc->quirk_limited_clk_division = true;
1230 esdhc->quirk_limited_clk_division = false;
1232 if (soc_device_match(soc_unreliable_pulse_detection))
1233 esdhc->quirk_unreliable_pulse_detection = true;
1235 esdhc->quirk_unreliable_pulse_detection = false;
1237 match = of_match_node(sdhci_esdhc_of_match, pdev->dev.of_node);
1239 esdhc->clk_fixup = match->data;
1240 np = pdev->dev.of_node;
1242 if (of_device_is_compatible(np, "fsl,p2020-esdhc"))
1243 esdhc->quirk_delay_before_data_reset = true;
1245 clk = of_clk_get(np, 0);
1248 * esdhc->peripheral_clock would be assigned with a value
1249 * which is eSDHC base clock when use periperal clock.
1250 * For some platforms, the clock value got by common clk
1251 * API is peripheral clock while the eSDHC base clock is
1252 * 1/2 peripheral clock.
1254 if (of_device_is_compatible(np, "fsl,ls1046a-esdhc") ||
1255 of_device_is_compatible(np, "fsl,ls1028a-esdhc"))
1256 esdhc->peripheral_clock = clk_get_rate(clk) / 2;
1258 esdhc->peripheral_clock = clk_get_rate(clk);
1263 if (esdhc->peripheral_clock) {
1264 esdhc_clock_enable(host, false);
1265 val = sdhci_readl(host, ESDHC_DMA_SYSCTL);
1266 val |= ESDHC_PERIPHERAL_CLK_SEL;
1267 sdhci_writel(host, val, ESDHC_DMA_SYSCTL);
1268 esdhc_clock_enable(host, true);
1272 static int esdhc_hs400_prepare_ddr(struct mmc_host *mmc)
1274 esdhc_tuning_block_enable(mmc_priv(mmc), false);
1278 static int sdhci_esdhc_probe(struct platform_device *pdev)
1280 struct sdhci_host *host;
1281 struct device_node *np;
1282 struct sdhci_pltfm_host *pltfm_host;
1283 struct sdhci_esdhc *esdhc;
1286 np = pdev->dev.of_node;
1288 if (of_property_read_bool(np, "little-endian"))
1289 host = sdhci_pltfm_init(pdev, &sdhci_esdhc_le_pdata,
1290 sizeof(struct sdhci_esdhc));
1292 host = sdhci_pltfm_init(pdev, &sdhci_esdhc_be_pdata,
1293 sizeof(struct sdhci_esdhc));
1296 return PTR_ERR(host);
1298 host->mmc_host_ops.start_signal_voltage_switch =
1299 esdhc_signal_voltage_switch;
1300 host->mmc_host_ops.execute_tuning = esdhc_execute_tuning;
1301 host->mmc_host_ops.hs400_prepare_ddr = esdhc_hs400_prepare_ddr;
1302 host->tuning_delay = 1;
1304 esdhc_init(pdev, host);
1306 sdhci_get_of_property(pdev);
1308 pltfm_host = sdhci_priv(host);
1309 esdhc = sdhci_pltfm_priv(pltfm_host);
1310 if (soc_device_match(soc_tuning_erratum_type1))
1311 esdhc->quirk_tuning_erratum_type1 = true;
1313 esdhc->quirk_tuning_erratum_type1 = false;
1315 if (soc_device_match(soc_tuning_erratum_type2))
1316 esdhc->quirk_tuning_erratum_type2 = true;
1318 esdhc->quirk_tuning_erratum_type2 = false;
1320 if (esdhc->vendor_ver == VENDOR_V_22)
1321 host->quirks2 |= SDHCI_QUIRK2_HOST_NO_CMD23;
1323 if (esdhc->vendor_ver > VENDOR_V_22)
1324 host->quirks &= ~SDHCI_QUIRK_NO_BUSY_IRQ;
1326 if (of_find_compatible_node(NULL, NULL, "fsl,p2020-esdhc")) {
1327 host->quirks |= SDHCI_QUIRK_RESET_AFTER_REQUEST;
1328 host->quirks |= SDHCI_QUIRK_BROKEN_TIMEOUT_VAL;
1331 if (of_device_is_compatible(np, "fsl,p5040-esdhc") ||
1332 of_device_is_compatible(np, "fsl,p5020-esdhc") ||
1333 of_device_is_compatible(np, "fsl,p4080-esdhc") ||
1334 of_device_is_compatible(np, "fsl,p1020-esdhc") ||
1335 of_device_is_compatible(np, "fsl,t1040-esdhc"))
1336 host->quirks &= ~SDHCI_QUIRK_BROKEN_CARD_DETECTION;
1338 if (of_device_is_compatible(np, "fsl,ls1021a-esdhc"))
1339 host->quirks |= SDHCI_QUIRK_BROKEN_TIMEOUT_VAL;
1341 esdhc->quirk_ignore_data_inhibit = false;
1342 if (of_device_is_compatible(np, "fsl,p2020-esdhc")) {
1344 * Freescale messed up with P2020 as it has a non-standard
1345 * host control register
1347 host->quirks2 |= SDHCI_QUIRK2_BROKEN_HOST_CONTROL;
1348 esdhc->quirk_ignore_data_inhibit = true;
1351 /* call to generic mmc_of_parse to support additional capabilities */
1352 ret = mmc_of_parse(host->mmc);
1356 mmc_of_parse_voltage(np, &host->ocr_mask);
1358 ret = sdhci_add_host(host);
1364 sdhci_pltfm_free(pdev);
1368 static struct platform_driver sdhci_esdhc_driver = {
1370 .name = "sdhci-esdhc",
1371 .of_match_table = sdhci_esdhc_of_match,
1372 .pm = &esdhc_of_dev_pm_ops,
1374 .probe = sdhci_esdhc_probe,
1375 .remove = sdhci_pltfm_unregister,
1378 module_platform_driver(sdhci_esdhc_driver);
1380 MODULE_DESCRIPTION("SDHCI OF driver for Freescale MPC eSDHC");
1381 MODULE_AUTHOR("Xiaobo Xie <X.Xie@freescale.com>, "
1382 "Anton Vorontsov <avorontsov@ru.mvista.com>");
1383 MODULE_LICENSE("GPL v2");