1 // SPDX-License-Identifier: GPL-2.0+
3 * (C) Copyright 2012 SAMSUNG Electronics
4 * Jaehoon Chung <jh80.chung@samsung.com>
5 * Rajeshawari Shinde <rajeshwari.s@samsung.com>
17 #define PAGE_SIZE 4096
19 static int dwmci_wait_reset(struct dwmci_host *host, u32 value)
21 unsigned long timeout = 1000;
24 dwmci_writel(host, DWMCI_CTRL, value);
27 ctrl = dwmci_readl(host, DWMCI_CTRL);
28 if (!(ctrl & DWMCI_RESET_ALL))
34 static void dwmci_set_idma_desc(struct dwmci_idmac *idmac,
35 u32 desc0, u32 desc1, u32 desc2)
37 struct dwmci_idmac *desc = idmac;
42 desc->next_addr = (ulong)desc + sizeof(struct dwmci_idmac);
45 static void dwmci_prepare_data(struct dwmci_host *host,
46 struct mmc_data *data,
47 struct dwmci_idmac *cur_idmac,
51 unsigned int i = 0, flags, cnt, blk_cnt;
52 ulong data_start, data_end;
55 blk_cnt = data->blocks;
57 dwmci_wait_reset(host, DWMCI_CTRL_FIFO_RESET);
59 /* Clear IDMAC interrupt */
60 dwmci_writel(host, DWMCI_IDSTS, 0xFFFFFFFF);
62 data_start = (ulong)cur_idmac;
63 dwmci_writel(host, DWMCI_DBADDR, (ulong)cur_idmac);
66 flags = DWMCI_IDMAC_OWN | DWMCI_IDMAC_CH ;
67 flags |= (i == 0) ? DWMCI_IDMAC_FS : 0;
69 flags |= DWMCI_IDMAC_LD;
70 cnt = data->blocksize * blk_cnt;
72 cnt = data->blocksize * 8;
74 dwmci_set_idma_desc(cur_idmac, flags, cnt,
75 (ulong)bounce_buffer + (i * PAGE_SIZE));
84 data_end = (ulong)cur_idmac;
85 flush_dcache_range(data_start, roundup(data_end, ARCH_DMA_MINALIGN));
87 ctrl = dwmci_readl(host, DWMCI_CTRL);
88 ctrl |= DWMCI_IDMAC_EN | DWMCI_DMA_EN;
89 dwmci_writel(host, DWMCI_CTRL, ctrl);
91 ctrl = dwmci_readl(host, DWMCI_BMOD);
92 ctrl |= DWMCI_BMOD_IDMAC_FB | DWMCI_BMOD_IDMAC_EN;
93 dwmci_writel(host, DWMCI_BMOD, ctrl);
95 dwmci_writel(host, DWMCI_BLKSIZ, data->blocksize);
96 dwmci_writel(host, DWMCI_BYTCNT, data->blocksize * data->blocks);
99 static int dwmci_fifo_ready(struct dwmci_host *host, u32 bit, u32 *len)
103 *len = dwmci_readl(host, DWMCI_STATUS);
104 while (--timeout && (*len & bit)) {
106 *len = dwmci_readl(host, DWMCI_STATUS);
110 debug("%s: FIFO underflow timeout\n", __func__);
117 static unsigned int dwmci_get_timeout(struct mmc *mmc, const unsigned int size)
119 unsigned int timeout;
121 timeout = size * 8 * 1000; /* counting in bits and msec */
122 timeout *= 2; /* wait twice as long */
123 timeout /= mmc->clock;
124 timeout /= mmc->bus_width;
125 timeout /= mmc->ddr_mode ? 2 : 1;
126 timeout = (timeout < 1000) ? 1000 : timeout;
131 static int dwmci_data_transfer(struct dwmci_host *host, struct mmc_data *data)
133 struct mmc *mmc = host->mmc;
135 u32 timeout, mask, size, i, len = 0;
137 ulong start = get_timer(0);
138 u32 fifo_depth = (((host->fifoth_val & RX_WMARK_MASK) >>
139 RX_WMARK_SHIFT) + 1) * 2;
141 size = data->blocksize * data->blocks;
142 if (data->flags == MMC_DATA_READ)
143 buf = (unsigned int *)data->dest;
145 buf = (unsigned int *)data->src;
147 timeout = dwmci_get_timeout(mmc, size);
152 mask = dwmci_readl(host, DWMCI_RINTSTS);
153 /* Error during data transfer. */
154 if (mask & (DWMCI_DATA_ERR | DWMCI_DATA_TOUT)) {
155 debug("%s: DATA ERROR!\n", __func__);
160 if (host->fifo_mode && size) {
162 if (data->flags == MMC_DATA_READ &&
163 (mask & DWMCI_INTMSK_RXDR)) {
165 ret = dwmci_fifo_ready(host,
171 len = (len >> DWMCI_FIFO_SHIFT) &
173 len = min(size, len);
174 for (i = 0; i < len; i++)
176 dwmci_readl(host, DWMCI_DATA);
177 size = size > len ? (size - len) : 0;
179 dwmci_writel(host, DWMCI_RINTSTS,
181 } else if (data->flags == MMC_DATA_WRITE &&
182 (mask & DWMCI_INTMSK_TXDR)) {
184 ret = dwmci_fifo_ready(host,
190 len = fifo_depth - ((len >>
193 len = min(size, len);
194 for (i = 0; i < len; i++)
195 dwmci_writel(host, DWMCI_DATA,
197 size = size > len ? (size - len) : 0;
199 dwmci_writel(host, DWMCI_RINTSTS,
204 /* Data arrived correctly. */
205 if (mask & DWMCI_INTMSK_DTO) {
210 /* Check for timeout. */
211 if (get_timer(start) > timeout) {
212 debug("%s: Timeout waiting for data!\n",
219 dwmci_writel(host, DWMCI_RINTSTS, mask);
224 static int dwmci_set_transfer_mode(struct dwmci_host *host,
225 struct mmc_data *data)
229 mode = DWMCI_CMD_DATA_EXP;
230 if (data->flags & MMC_DATA_WRITE)
231 mode |= DWMCI_CMD_RW;
237 static int dwmci_send_cmd(struct udevice *dev, struct mmc_cmd *cmd,
238 struct mmc_data *data)
240 struct mmc *mmc = mmc_get_mmc_dev(dev);
242 static int dwmci_send_cmd(struct mmc *mmc, struct mmc_cmd *cmd,
243 struct mmc_data *data)
246 struct dwmci_host *host = mmc->priv;
247 ALLOC_CACHE_ALIGN_BUFFER(struct dwmci_idmac, cur_idmac,
248 data ? DIV_ROUND_UP(data->blocks, 8) : 0);
249 int ret = 0, flags = 0, i;
250 unsigned int timeout = 500;
253 ulong start = get_timer(0);
254 struct bounce_buffer bbstate;
256 while (dwmci_readl(host, DWMCI_STATUS) & DWMCI_BUSY) {
257 if (get_timer(start) > timeout) {
258 debug("%s: Timeout on data busy\n", __func__);
263 dwmci_writel(host, DWMCI_RINTSTS, DWMCI_INTMSK_ALL);
266 if (host->fifo_mode) {
267 dwmci_writel(host, DWMCI_BLKSIZ, data->blocksize);
268 dwmci_writel(host, DWMCI_BYTCNT,
269 data->blocksize * data->blocks);
270 dwmci_wait_reset(host, DWMCI_CTRL_FIFO_RESET);
272 if (data->flags == MMC_DATA_READ) {
273 ret = bounce_buffer_start(&bbstate,
276 data->blocks, GEN_BB_WRITE);
278 ret = bounce_buffer_start(&bbstate,
281 data->blocks, GEN_BB_READ);
287 dwmci_prepare_data(host, data, cur_idmac,
288 bbstate.bounce_buffer);
292 dwmci_writel(host, DWMCI_CMDARG, cmd->cmdarg);
295 flags = dwmci_set_transfer_mode(host, data);
297 if ((cmd->resp_type & MMC_RSP_136) && (cmd->resp_type & MMC_RSP_BUSY))
300 if (cmd->cmdidx == MMC_CMD_STOP_TRANSMISSION)
301 flags |= DWMCI_CMD_ABORT_STOP;
303 flags |= DWMCI_CMD_PRV_DAT_WAIT;
305 if (cmd->resp_type & MMC_RSP_PRESENT) {
306 flags |= DWMCI_CMD_RESP_EXP;
307 if (cmd->resp_type & MMC_RSP_136)
308 flags |= DWMCI_CMD_RESP_LENGTH;
311 if (cmd->resp_type & MMC_RSP_CRC)
312 flags |= DWMCI_CMD_CHECK_CRC;
314 flags |= (cmd->cmdidx | DWMCI_CMD_START | DWMCI_CMD_USE_HOLD_REG);
316 debug("Sending CMD%d\n",cmd->cmdidx);
318 dwmci_writel(host, DWMCI_CMD, flags);
320 for (i = 0; i < retry; i++) {
321 mask = dwmci_readl(host, DWMCI_RINTSTS);
322 if (mask & DWMCI_INTMSK_CDONE) {
324 dwmci_writel(host, DWMCI_RINTSTS, mask);
330 debug("%s: Timeout.\n", __func__);
334 if (mask & DWMCI_INTMSK_RTO) {
336 * Timeout here is not necessarily fatal. (e)MMC cards
337 * will splat here when they receive CMD55 as they do
338 * not support this command and that is exactly the way
339 * to tell them apart from SD cards. Thus, this output
340 * below shall be debug(). eMMC cards also do not favor
341 * CMD8, please keep that in mind.
343 debug("%s: Response Timeout.\n", __func__);
345 } else if (mask & DWMCI_INTMSK_RE) {
346 debug("%s: Response Error.\n", __func__);
348 } else if ((cmd->resp_type & MMC_RSP_CRC) &&
349 (mask & DWMCI_INTMSK_RCRC)) {
350 debug("%s: Response CRC Error.\n", __func__);
355 if (cmd->resp_type & MMC_RSP_PRESENT) {
356 if (cmd->resp_type & MMC_RSP_136) {
357 cmd->response[0] = dwmci_readl(host, DWMCI_RESP3);
358 cmd->response[1] = dwmci_readl(host, DWMCI_RESP2);
359 cmd->response[2] = dwmci_readl(host, DWMCI_RESP1);
360 cmd->response[3] = dwmci_readl(host, DWMCI_RESP0);
362 cmd->response[0] = dwmci_readl(host, DWMCI_RESP0);
367 ret = dwmci_data_transfer(host, data);
369 /* only dma mode need it */
370 if (!host->fifo_mode) {
371 if (data->flags == MMC_DATA_READ)
372 mask = DWMCI_IDINTEN_RI;
374 mask = DWMCI_IDINTEN_TI;
375 ret = wait_for_bit_le32(host->ioaddr + DWMCI_IDSTS,
376 mask, true, 1000, false);
378 debug("%s: DWMCI_IDINTEN mask 0x%x timeout.\n",
380 /* clear interrupts */
381 dwmci_writel(host, DWMCI_IDSTS, DWMCI_IDINTEN_MASK);
383 ctrl = dwmci_readl(host, DWMCI_CTRL);
384 ctrl &= ~(DWMCI_DMA_EN);
385 dwmci_writel(host, DWMCI_CTRL, ctrl);
386 bounce_buffer_stop(&bbstate);
395 static int dwmci_setup_bus(struct dwmci_host *host, u32 freq)
401 if ((freq == host->clock) || (freq == 0))
404 * If host->get_mmc_clk isn't defined,
405 * then assume that host->bus_hz is source clock value.
406 * host->bus_hz should be set by user.
408 if (host->get_mmc_clk)
409 sclk = host->get_mmc_clk(host, freq);
410 else if (host->bus_hz)
413 debug("%s: Didn't get source clock value.\n", __func__);
418 div = 0; /* bypass mode */
420 div = DIV_ROUND_UP(sclk, 2 * freq);
422 dwmci_writel(host, DWMCI_CLKENA, 0);
423 dwmci_writel(host, DWMCI_CLKSRC, 0);
425 dwmci_writel(host, DWMCI_CLKDIV, div);
426 dwmci_writel(host, DWMCI_CMD, DWMCI_CMD_PRV_DAT_WAIT |
427 DWMCI_CMD_UPD_CLK | DWMCI_CMD_START);
430 status = dwmci_readl(host, DWMCI_CMD);
432 debug("%s: Timeout!\n", __func__);
435 } while (status & DWMCI_CMD_START);
437 dwmci_writel(host, DWMCI_CLKENA, DWMCI_CLKEN_ENABLE |
438 DWMCI_CLKEN_LOW_PWR);
440 dwmci_writel(host, DWMCI_CMD, DWMCI_CMD_PRV_DAT_WAIT |
441 DWMCI_CMD_UPD_CLK | DWMCI_CMD_START);
445 status = dwmci_readl(host, DWMCI_CMD);
447 debug("%s: Timeout!\n", __func__);
450 } while (status & DWMCI_CMD_START);
458 static int dwmci_set_ios(struct udevice *dev)
460 struct mmc *mmc = mmc_get_mmc_dev(dev);
462 static int dwmci_set_ios(struct mmc *mmc)
465 struct dwmci_host *host = (struct dwmci_host *)mmc->priv;
468 debug("Buswidth = %d, clock: %d\n", mmc->bus_width, mmc->clock);
470 dwmci_setup_bus(host, mmc->clock);
471 switch (mmc->bus_width) {
473 ctype = DWMCI_CTYPE_8BIT;
476 ctype = DWMCI_CTYPE_4BIT;
479 ctype = DWMCI_CTYPE_1BIT;
483 dwmci_writel(host, DWMCI_CTYPE, ctype);
485 regs = dwmci_readl(host, DWMCI_UHS_REG);
487 regs |= DWMCI_DDR_MODE;
489 regs &= ~DWMCI_DDR_MODE;
491 dwmci_writel(host, DWMCI_UHS_REG, regs);
499 static int dwmci_init(struct mmc *mmc)
501 struct dwmci_host *host = mmc->priv;
503 if (host->board_init)
504 host->board_init(host);
506 dwmci_writel(host, DWMCI_PWREN, 1);
508 if (!dwmci_wait_reset(host, DWMCI_RESET_ALL)) {
509 debug("%s[%d] Fail-reset!!\n", __func__, __LINE__);
513 /* Enumerate at 400KHz */
514 dwmci_setup_bus(host, mmc->cfg->f_min);
516 dwmci_writel(host, DWMCI_RINTSTS, 0xFFFFFFFF);
517 dwmci_writel(host, DWMCI_INTMASK, 0);
519 dwmci_writel(host, DWMCI_TMOUT, 0xFFFFFFFF);
521 dwmci_writel(host, DWMCI_IDINTEN, 0);
522 dwmci_writel(host, DWMCI_BMOD, 1);
524 if (!host->fifoth_val) {
527 fifo_size = dwmci_readl(host, DWMCI_FIFOTH);
528 fifo_size = ((fifo_size & RX_WMARK_MASK) >> RX_WMARK_SHIFT) + 1;
529 host->fifoth_val = MSIZE(0x2) | RX_WMARK(fifo_size / 2 - 1) |
530 TX_WMARK(fifo_size / 2);
532 dwmci_writel(host, DWMCI_FIFOTH, host->fifoth_val);
534 dwmci_writel(host, DWMCI_CLKENA, 0);
535 dwmci_writel(host, DWMCI_CLKSRC, 0);
537 if (!host->fifo_mode)
538 dwmci_writel(host, DWMCI_IDINTEN, DWMCI_IDINTEN_MASK);
544 int dwmci_probe(struct udevice *dev)
546 struct mmc *mmc = mmc_get_mmc_dev(dev);
548 return dwmci_init(mmc);
551 const struct dm_mmc_ops dm_dwmci_ops = {
552 .send_cmd = dwmci_send_cmd,
553 .set_ios = dwmci_set_ios,
557 static const struct mmc_ops dwmci_ops = {
558 .send_cmd = dwmci_send_cmd,
559 .set_ios = dwmci_set_ios,
564 void dwmci_setup_cfg(struct mmc_config *cfg, struct dwmci_host *host,
565 u32 max_clk, u32 min_clk)
567 cfg->name = host->name;
568 #ifndef CONFIG_DM_MMC
569 cfg->ops = &dwmci_ops;
571 cfg->f_min = min_clk;
572 cfg->f_max = max_clk;
574 cfg->voltages = MMC_VDD_32_33 | MMC_VDD_33_34 | MMC_VDD_165_195;
576 cfg->host_caps = host->caps;
578 if (host->buswidth == 8) {
579 cfg->host_caps |= MMC_MODE_8BIT;
580 cfg->host_caps &= ~MMC_MODE_4BIT;
582 cfg->host_caps |= MMC_MODE_4BIT;
583 cfg->host_caps &= ~MMC_MODE_8BIT;
585 cfg->host_caps |= MMC_MODE_HS | MMC_MODE_HS_52MHz;
587 cfg->b_max = CONFIG_SYS_MMC_MAX_BLK_COUNT;
591 int dwmci_bind(struct udevice *dev, struct mmc *mmc, struct mmc_config *cfg)
593 return mmc_bind(dev, mmc, cfg);
596 int add_dwmci(struct dwmci_host *host, u32 max_clk, u32 min_clk)
598 dwmci_setup_cfg(&host->cfg, host, max_clk, min_clk);
600 host->mmc = mmc_create(&host->cfg, host);
601 if (host->mmc == NULL)