2 * Designware SPI core controller driver (refer pxa2xx_spi.c)
4 * Copyright (c) 2009, Intel Corporation.
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * You should have received a copy of the GNU General Public License along with
16 * this program; if not, write to the Free Software Foundation, Inc.,
17 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
20 #include <linux/dma-mapping.h>
21 #include <linux/interrupt.h>
22 #include <linux/module.h>
23 #include <linux/highmem.h>
24 #include <linux/delay.h>
25 #include <linux/slab.h>
26 #include <linux/spi/spi.h>
27 #include <linux/gpio.h>
31 #ifdef CONFIG_DEBUG_FS
32 #include <linux/debugfs.h>
35 #define START_STATE ((void *)0)
36 #define RUNNING_STATE ((void *)1)
37 #define DONE_STATE ((void *)2)
38 #define ERROR_STATE ((void *)-1)
40 /* Slave spi_dev related */
43 u8 cs; /* chip select pin */
44 u8 n_bytes; /* current is a 1/2/4 byte op */
45 u8 tmode; /* TR/TO/RO/EEPROM */
46 u8 type; /* SPI/SSP/MicroWire */
48 u8 poll_mode; /* 1 means use poll mode */
55 u16 clk_div; /* baud rate divider */
56 u32 speed_hz; /* baud rate */
57 void (*cs_control)(u32 command);
60 #ifdef CONFIG_DEBUG_FS
61 #define SPI_REGS_BUFSIZE 1024
62 static ssize_t spi_show_regs(struct file *file, char __user *user_buf,
63 size_t count, loff_t *ppos)
70 dws = file->private_data;
72 buf = kzalloc(SPI_REGS_BUFSIZE, GFP_KERNEL);
76 len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
77 "MRST SPI0 registers:\n");
78 len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
79 "=================================\n");
80 len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
81 "CTRL0: \t\t0x%08x\n", dw_readl(dws, DW_SPI_CTRL0));
82 len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
83 "CTRL1: \t\t0x%08x\n", dw_readl(dws, DW_SPI_CTRL1));
84 len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
85 "SSIENR: \t0x%08x\n", dw_readl(dws, DW_SPI_SSIENR));
86 len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
87 "SER: \t\t0x%08x\n", dw_readl(dws, DW_SPI_SER));
88 len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
89 "BAUDR: \t\t0x%08x\n", dw_readl(dws, DW_SPI_BAUDR));
90 len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
91 "TXFTLR: \t0x%08x\n", dw_readl(dws, DW_SPI_TXFLTR));
92 len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
93 "RXFTLR: \t0x%08x\n", dw_readl(dws, DW_SPI_RXFLTR));
94 len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
95 "TXFLR: \t\t0x%08x\n", dw_readl(dws, DW_SPI_TXFLR));
96 len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
97 "RXFLR: \t\t0x%08x\n", dw_readl(dws, DW_SPI_RXFLR));
98 len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
99 "SR: \t\t0x%08x\n", dw_readl(dws, DW_SPI_SR));
100 len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
101 "IMR: \t\t0x%08x\n", dw_readl(dws, DW_SPI_IMR));
102 len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
103 "ISR: \t\t0x%08x\n", dw_readl(dws, DW_SPI_ISR));
104 len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
105 "DMACR: \t\t0x%08x\n", dw_readl(dws, DW_SPI_DMACR));
106 len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
107 "DMATDLR: \t0x%08x\n", dw_readl(dws, DW_SPI_DMATDLR));
108 len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
109 "DMARDLR: \t0x%08x\n", dw_readl(dws, DW_SPI_DMARDLR));
110 len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
111 "=================================\n");
113 ret = simple_read_from_buffer(user_buf, count, ppos, buf, len);
118 static const struct file_operations mrst_spi_regs_ops = {
119 .owner = THIS_MODULE,
121 .read = spi_show_regs,
122 .llseek = default_llseek,
125 static int mrst_spi_debugfs_init(struct dw_spi *dws)
127 dws->debugfs = debugfs_create_dir("mrst_spi", NULL);
131 debugfs_create_file("registers", S_IFREG | S_IRUGO,
132 dws->debugfs, (void *)dws, &mrst_spi_regs_ops);
136 static void mrst_spi_debugfs_remove(struct dw_spi *dws)
139 debugfs_remove_recursive(dws->debugfs);
143 static inline int mrst_spi_debugfs_init(struct dw_spi *dws)
148 static inline void mrst_spi_debugfs_remove(struct dw_spi *dws)
151 #endif /* CONFIG_DEBUG_FS */
153 /* Return the max entries we can fill into tx fifo */
154 static inline u32 tx_max(struct dw_spi *dws)
156 u32 tx_left, tx_room, rxtx_gap;
158 tx_left = (dws->tx_end - dws->tx) / dws->n_bytes;
159 tx_room = dws->fifo_len - dw_readw(dws, DW_SPI_TXFLR);
162 * Another concern is about the tx/rx mismatch, we
163 * though to use (dws->fifo_len - rxflr - txflr) as
164 * one maximum value for tx, but it doesn't cover the
165 * data which is out of tx/rx fifo and inside the
166 * shift registers. So a control from sw point of
169 rxtx_gap = ((dws->rx_end - dws->rx) - (dws->tx_end - dws->tx))
172 return min3(tx_left, tx_room, (u32) (dws->fifo_len - rxtx_gap));
175 /* Return the max entries we should read out of rx fifo */
176 static inline u32 rx_max(struct dw_spi *dws)
178 u32 rx_left = (dws->rx_end - dws->rx) / dws->n_bytes;
180 return min(rx_left, (u32)dw_readw(dws, DW_SPI_RXFLR));
183 static void dw_writer(struct dw_spi *dws)
188 spin_lock(&dws->buf_lock);
191 /* Set the tx word if the transfer's original "tx" is not null */
192 if (dws->tx_end - dws->len) {
193 if (dws->n_bytes == 1)
194 txw = *(u8 *)(dws->tx);
196 txw = *(u16 *)(dws->tx);
198 dw_writew(dws, DW_SPI_DR, txw);
199 dws->tx += dws->n_bytes;
201 spin_unlock(&dws->buf_lock);
204 static void dw_reader(struct dw_spi *dws)
209 spin_lock(&dws->buf_lock);
212 rxw = dw_readw(dws, DW_SPI_DR);
213 /* Care rx only if the transfer's original "rx" is not null */
214 if (dws->rx_end - dws->len) {
215 if (dws->n_bytes == 1)
216 *(u8 *)(dws->rx) = rxw;
218 *(u16 *)(dws->rx) = rxw;
220 dws->rx += dws->n_bytes;
222 spin_unlock(&dws->buf_lock);
225 static void *next_transfer(struct dw_spi *dws)
227 struct spi_message *msg = dws->cur_msg;
228 struct spi_transfer *trans = dws->cur_transfer;
230 /* Move to next transfer */
231 if (trans->transfer_list.next != &msg->transfers) {
233 list_entry(trans->transfer_list.next,
236 return RUNNING_STATE;
242 * Note: first step is the protocol driver prepares
243 * a dma-capable memory, and this func just need translate
244 * the virt addr to physical
246 static int map_dma_buffers(struct dw_spi *dws)
248 if (!dws->cur_msg->is_dma_mapped
250 || !dws->cur_chip->enable_dma
254 if (dws->cur_transfer->tx_dma)
255 dws->tx_dma = dws->cur_transfer->tx_dma;
257 if (dws->cur_transfer->rx_dma)
258 dws->rx_dma = dws->cur_transfer->rx_dma;
263 /* Caller already set message->status; dma and pio irqs are blocked */
264 static void giveback(struct dw_spi *dws)
266 struct spi_transfer *last_transfer;
267 struct spi_message *msg;
271 dws->cur_transfer = NULL;
272 dws->prev_chip = dws->cur_chip;
273 dws->cur_chip = NULL;
276 last_transfer = list_last_entry(&msg->transfers, struct spi_transfer,
279 if (!last_transfer->cs_change)
280 spi_chip_sel(dws, msg->spi, 0);
282 spi_finalize_current_message(dws->master);
285 static void int_error_stop(struct dw_spi *dws, const char *msg)
288 spi_enable_chip(dws, 0);
290 dev_err(&dws->master->dev, "%s\n", msg);
291 dws->cur_msg->state = ERROR_STATE;
292 tasklet_schedule(&dws->pump_transfers);
295 void dw_spi_xfer_done(struct dw_spi *dws)
297 /* Update total byte transferred return count actual bytes read */
298 dws->cur_msg->actual_length += dws->len;
300 /* Move to next transfer */
301 dws->cur_msg->state = next_transfer(dws);
303 /* Handle end of message */
304 if (dws->cur_msg->state == DONE_STATE) {
305 dws->cur_msg->status = 0;
308 tasklet_schedule(&dws->pump_transfers);
310 EXPORT_SYMBOL_GPL(dw_spi_xfer_done);
312 static irqreturn_t interrupt_transfer(struct dw_spi *dws)
314 u16 irq_status = dw_readw(dws, DW_SPI_ISR);
317 if (irq_status & (SPI_INT_TXOI | SPI_INT_RXOI | SPI_INT_RXUI)) {
318 dw_readw(dws, DW_SPI_TXOICR);
319 dw_readw(dws, DW_SPI_RXOICR);
320 dw_readw(dws, DW_SPI_RXUICR);
321 int_error_stop(dws, "interrupt_transfer: fifo overrun/underrun");
326 if (dws->rx_end == dws->rx) {
327 spi_mask_intr(dws, SPI_INT_TXEI);
328 dw_spi_xfer_done(dws);
331 if (irq_status & SPI_INT_TXEI) {
332 spi_mask_intr(dws, SPI_INT_TXEI);
334 /* Enable TX irq always, it will be disabled when RX finished */
335 spi_umask_intr(dws, SPI_INT_TXEI);
341 static irqreturn_t dw_spi_irq(int irq, void *dev_id)
343 struct dw_spi *dws = dev_id;
344 u16 irq_status = dw_readw(dws, DW_SPI_ISR) & 0x3f;
350 spi_mask_intr(dws, SPI_INT_TXEI);
354 return dws->transfer_handler(dws);
357 /* Must be called inside pump_transfers() */
358 static void poll_transfer(struct dw_spi *dws)
364 } while (dws->rx_end > dws->rx);
366 dw_spi_xfer_done(dws);
369 static void pump_transfers(unsigned long data)
371 struct dw_spi *dws = (struct dw_spi *)data;
372 struct spi_message *message = NULL;
373 struct spi_transfer *transfer = NULL;
374 struct spi_transfer *previous = NULL;
375 struct spi_device *spi = NULL;
376 struct chip_data *chip = NULL;
386 /* Get current state information */
387 message = dws->cur_msg;
388 transfer = dws->cur_transfer;
389 chip = dws->cur_chip;
392 if (message->state == ERROR_STATE) {
393 message->status = -EIO;
397 /* Handle end of message */
398 if (message->state == DONE_STATE) {
403 /* Delay if requested at end of transfer*/
404 if (message->state == RUNNING_STATE) {
405 previous = list_entry(transfer->transfer_list.prev,
408 if (previous->delay_usecs)
409 udelay(previous->delay_usecs);
412 dws->n_bytes = chip->n_bytes;
413 dws->dma_width = chip->dma_width;
414 dws->cs_control = chip->cs_control;
416 spin_lock_irqsave(&dws->buf_lock, flags);
417 dws->rx_dma = transfer->rx_dma;
418 dws->tx_dma = transfer->tx_dma;
419 dws->tx = (void *)transfer->tx_buf;
420 dws->tx_end = dws->tx + transfer->len;
421 dws->rx = transfer->rx_buf;
422 dws->rx_end = dws->rx + transfer->len;
423 dws->len = dws->cur_transfer->len;
424 if (chip != dws->prev_chip)
426 spin_unlock_irqrestore(&dws->buf_lock, flags);
430 /* Handle per transfer options for bpw and speed */
431 if (transfer->speed_hz) {
432 speed = chip->speed_hz;
434 if ((transfer->speed_hz != speed) || (!chip->clk_div)) {
435 speed = transfer->speed_hz;
437 /* clk_div doesn't support odd number */
438 clk_div = dws->max_freq / speed;
439 clk_div = (clk_div + 1) & 0xfffe;
441 chip->speed_hz = speed;
442 chip->clk_div = clk_div;
445 if (transfer->bits_per_word) {
446 bits = transfer->bits_per_word;
447 dws->n_bytes = dws->dma_width = bits >> 3;
449 | (chip->type << SPI_FRF_OFFSET)
450 | (spi->mode << SPI_MODE_OFFSET)
451 | (chip->tmode << SPI_TMOD_OFFSET);
453 message->state = RUNNING_STATE;
456 * Adjust transfer mode if necessary. Requires platform dependent
457 * chipselect mechanism.
459 if (dws->cs_control) {
460 if (dws->rx && dws->tx)
461 chip->tmode = SPI_TMOD_TR;
463 chip->tmode = SPI_TMOD_RO;
465 chip->tmode = SPI_TMOD_TO;
467 cr0 &= ~SPI_TMOD_MASK;
468 cr0 |= (chip->tmode << SPI_TMOD_OFFSET);
471 /* Check if current transfer is a DMA transaction */
472 dws->dma_mapped = map_dma_buffers(dws);
476 * we only need set the TXEI IRQ, as TX/RX always happen syncronizely
478 if (!dws->dma_mapped && !chip->poll_mode) {
479 int templen = dws->len / dws->n_bytes;
480 txint_level = dws->fifo_len / 2;
481 txint_level = (templen > txint_level) ? txint_level : templen;
483 imask |= SPI_INT_TXEI | SPI_INT_TXOI | SPI_INT_RXUI | SPI_INT_RXOI;
484 dws->transfer_handler = interrupt_transfer;
488 * Reprogram registers only if
489 * 1. chip select changes
490 * 2. clk_div is changed
491 * 3. control value changes
493 if (dw_readw(dws, DW_SPI_CTRL0) != cr0 || cs_change || clk_div || imask) {
494 spi_enable_chip(dws, 0);
496 if (dw_readw(dws, DW_SPI_CTRL0) != cr0)
497 dw_writew(dws, DW_SPI_CTRL0, cr0);
499 spi_set_clk(dws, clk_div ? clk_div : chip->clk_div);
500 spi_chip_sel(dws, spi, 1);
502 /* Set the interrupt mask, for poll mode just disable all int */
503 spi_mask_intr(dws, 0xff);
505 spi_umask_intr(dws, imask);
507 dw_writew(dws, DW_SPI_TXFLTR, txint_level);
509 spi_enable_chip(dws, 1);
511 dws->prev_chip = chip;
515 dws->dma_ops->dma_transfer(dws, cs_change);
527 static int dw_spi_transfer_one_message(struct spi_master *master,
528 struct spi_message *msg)
530 struct dw_spi *dws = spi_master_get_devdata(master);
533 /* Initial message state*/
534 dws->cur_msg->state = START_STATE;
535 dws->cur_transfer = list_entry(dws->cur_msg->transfers.next,
538 dws->cur_chip = spi_get_ctldata(dws->cur_msg->spi);
540 /* Launch transfers */
541 tasklet_schedule(&dws->pump_transfers);
546 /* This may be called twice for each spi dev */
547 static int dw_spi_setup(struct spi_device *spi)
549 struct dw_spi_chip *chip_info = NULL;
550 struct chip_data *chip;
553 /* Only alloc on first setup */
554 chip = spi_get_ctldata(spi);
556 chip = kzalloc(sizeof(struct chip_data), GFP_KERNEL);
559 spi_set_ctldata(spi, chip);
563 * Protocol drivers may change the chip settings, so...
564 * if chip_info exists, use it
566 chip_info = spi->controller_data;
568 /* chip_info doesn't always exist */
570 if (chip_info->cs_control)
571 chip->cs_control = chip_info->cs_control;
573 chip->poll_mode = chip_info->poll_mode;
574 chip->type = chip_info->type;
576 chip->rx_threshold = 0;
577 chip->tx_threshold = 0;
579 chip->enable_dma = chip_info->enable_dma;
582 if (spi->bits_per_word == 8) {
585 } else if (spi->bits_per_word == 16) {
589 chip->bits_per_word = spi->bits_per_word;
591 if (!spi->max_speed_hz) {
592 dev_err(&spi->dev, "No max speed HZ parameter\n");
596 chip->tmode = 0; /* Tx & Rx */
597 /* Default SPI mode is SCPOL = 0, SCPH = 0 */
598 chip->cr0 = (chip->bits_per_word - 1)
599 | (chip->type << SPI_FRF_OFFSET)
600 | (spi->mode << SPI_MODE_OFFSET)
601 | (chip->tmode << SPI_TMOD_OFFSET);
603 if (gpio_is_valid(spi->cs_gpio)) {
604 ret = gpio_direction_output(spi->cs_gpio,
605 !(spi->mode & SPI_CS_HIGH));
613 static void dw_spi_cleanup(struct spi_device *spi)
615 struct chip_data *chip = spi_get_ctldata(spi);
618 spi_set_ctldata(spi, NULL);
621 /* Restart the controller, disable all interrupts, clean rx fifo */
622 static void spi_hw_init(struct dw_spi *dws)
624 spi_enable_chip(dws, 0);
625 spi_mask_intr(dws, 0xff);
626 spi_enable_chip(dws, 1);
629 * Try to detect the FIFO depth if not set by interface driver,
630 * the depth could be from 2 to 256 from HW spec
632 if (!dws->fifo_len) {
634 for (fifo = 1; fifo < 256; fifo++) {
635 dw_writew(dws, DW_SPI_TXFLTR, fifo);
636 if (fifo != dw_readw(dws, DW_SPI_TXFLTR))
640 dws->fifo_len = (fifo == 1) ? 0 : fifo;
641 dw_writew(dws, DW_SPI_TXFLTR, 0);
645 int dw_spi_add_host(struct device *dev, struct dw_spi *dws)
647 struct spi_master *master;
652 master = spi_alloc_master(dev, 0);
656 dws->master = master;
657 dws->type = SSI_MOTO_SPI;
658 dws->prev_chip = NULL;
660 dws->dma_addr = (dma_addr_t)(dws->paddr + 0x60);
661 snprintf(dws->name, sizeof(dws->name), "dw_spi%d",
663 spin_lock_init(&dws->buf_lock);
665 ret = request_irq(dws->irq, dw_spi_irq, IRQF_SHARED, dws->name, dws);
667 dev_err(&master->dev, "can not get IRQ\n");
668 goto err_free_master;
671 master->mode_bits = SPI_CPOL | SPI_CPHA;
672 master->bits_per_word_mask = SPI_BPW_MASK(8) | SPI_BPW_MASK(16);
673 master->bus_num = dws->bus_num;
674 master->num_chipselect = dws->num_cs;
675 master->setup = dw_spi_setup;
676 master->cleanup = dw_spi_cleanup;
677 master->transfer_one_message = dw_spi_transfer_one_message;
678 master->max_speed_hz = dws->max_freq;
683 if (dws->dma_ops && dws->dma_ops->dma_init) {
684 ret = dws->dma_ops->dma_init(dws);
686 dev_warn(&master->dev, "DMA init failed\n");
691 tasklet_init(&dws->pump_transfers, pump_transfers, (unsigned long)dws);
693 spi_master_set_devdata(master, dws);
694 ret = devm_spi_register_master(dev, master);
696 dev_err(&master->dev, "problem registering spi master\n");
700 mrst_spi_debugfs_init(dws);
704 if (dws->dma_ops && dws->dma_ops->dma_exit)
705 dws->dma_ops->dma_exit(dws);
706 spi_enable_chip(dws, 0);
707 free_irq(dws->irq, master);
709 spi_master_put(master);
712 EXPORT_SYMBOL_GPL(dw_spi_add_host);
714 void dw_spi_remove_host(struct dw_spi *dws)
718 mrst_spi_debugfs_remove(dws);
720 if (dws->dma_ops && dws->dma_ops->dma_exit)
721 dws->dma_ops->dma_exit(dws);
722 spi_enable_chip(dws, 0);
726 free_irq(dws->irq, dws->master);
728 EXPORT_SYMBOL_GPL(dw_spi_remove_host);
730 int dw_spi_suspend_host(struct dw_spi *dws)
734 ret = spi_master_suspend(dws->master);
737 spi_enable_chip(dws, 0);
741 EXPORT_SYMBOL_GPL(dw_spi_suspend_host);
743 int dw_spi_resume_host(struct dw_spi *dws)
748 ret = spi_master_resume(dws->master);
750 dev_err(&dws->master->dev, "fail to start queue (%d)\n", ret);
753 EXPORT_SYMBOL_GPL(dw_spi_resume_host);
755 MODULE_AUTHOR("Feng Tang <feng.tang@intel.com>");
756 MODULE_DESCRIPTION("Driver for DesignWare SPI controller core");
757 MODULE_LICENSE("GPL v2");