1 // SPDX-License-Identifier: GPL-2.0+
3 * Copyright (c) 2011-12 The Chromium OS Authors.
5 * This file is derived from the flashrom project.
8 #define LOG_CATEGORY UCLASS_SPI
13 #include <dt-structs.h>
20 #include <spi_flash.h>
22 #include <asm/fast_spi.h>
28 #define debug_trace(fmt, args...) debug(fmt, ##args)
30 #define debug_trace(x, args...)
33 struct ich_spi_platdata {
34 #if CONFIG_IS_ENABLED(OF_PLATDATA)
35 struct dtd_intel_fast_spi dtplat;
37 enum ich_version ich_version; /* Controller version, 7 or 9 */
38 bool lockdown; /* lock down controller settings? */
39 ulong mmio_base; /* Base of MMIO registers */
40 pci_dev_t bdf; /* PCI address used by of-platdata */
41 bool hwseq; /* Use hardware sequencing (not s/w) */
44 static u8 ich_readb(struct ich_spi_priv *priv, int reg)
46 u8 value = readb(priv->base + reg);
48 debug_trace("read %2.2x from %4.4x\n", value, reg);
53 static u16 ich_readw(struct ich_spi_priv *priv, int reg)
55 u16 value = readw(priv->base + reg);
57 debug_trace("read %4.4x from %4.4x\n", value, reg);
62 static u32 ich_readl(struct ich_spi_priv *priv, int reg)
64 u32 value = readl(priv->base + reg);
66 debug_trace("read %8.8x from %4.4x\n", value, reg);
71 static void ich_writeb(struct ich_spi_priv *priv, u8 value, int reg)
73 writeb(value, priv->base + reg);
74 debug_trace("wrote %2.2x to %4.4x\n", value, reg);
77 static void ich_writew(struct ich_spi_priv *priv, u16 value, int reg)
79 writew(value, priv->base + reg);
80 debug_trace("wrote %4.4x to %4.4x\n", value, reg);
83 static void ich_writel(struct ich_spi_priv *priv, u32 value, int reg)
85 writel(value, priv->base + reg);
86 debug_trace("wrote %8.8x to %4.4x\n", value, reg);
89 static void write_reg(struct ich_spi_priv *priv, const void *value,
90 int dest_reg, uint32_t size)
92 memcpy_toio(priv->base + dest_reg, value, size);
95 static void read_reg(struct ich_spi_priv *priv, int src_reg, void *value,
98 memcpy_fromio(value, priv->base + src_reg, size);
101 static void ich_set_bbar(struct ich_spi_priv *ctlr, uint32_t minaddr)
103 const uint32_t bbar_mask = 0x00ffff00;
104 uint32_t ichspi_bbar;
106 minaddr &= bbar_mask;
107 ichspi_bbar = ich_readl(ctlr, ctlr->bbar) & ~bbar_mask;
108 ichspi_bbar |= minaddr;
109 ich_writel(ctlr, ichspi_bbar, ctlr->bbar);
112 /* @return 1 if the SPI flash supports the 33MHz speed */
113 static bool ich9_can_do_33mhz(struct udevice *dev)
115 struct ich_spi_priv *priv = dev_get_priv(dev);
118 /* Observe SPI Descriptor Component Section 0 */
119 dm_pci_write_config32(priv->pch, 0xb0, 0x1000);
121 /* Extract the Write/Erase SPI Frequency from descriptor */
122 dm_pci_read_config32(priv->pch, 0xb4, &fdod);
124 /* Bits 23:21 have the fast read clock frequency, 0=20MHz, 1=33MHz */
125 speed = (fdod >> 21) & 7;
130 static void spi_lock_down(struct ich_spi_platdata *plat, void *sbase)
132 if (plat->ich_version == ICHV_7) {
133 struct ich7_spi_regs *ich7_spi = sbase;
135 setbits_le16(&ich7_spi->spis, SPIS_LOCK);
136 } else if (plat->ich_version == ICHV_9) {
137 struct ich9_spi_regs *ich9_spi = sbase;
139 setbits_le16(&ich9_spi->hsfs, HSFS_FLOCKDN);
143 static bool spi_lock_status(struct ich_spi_platdata *plat, void *sbase)
147 if (plat->ich_version == ICHV_7) {
148 struct ich7_spi_regs *ich7_spi = sbase;
150 lock = readw(&ich7_spi->spis) & SPIS_LOCK;
151 } else if (plat->ich_version == ICHV_9) {
152 struct ich9_spi_regs *ich9_spi = sbase;
154 lock = readw(&ich9_spi->hsfs) & HSFS_FLOCKDN;
160 static int spi_setup_opcode(struct ich_spi_priv *ctlr, struct spi_trans *trans,
164 uint8_t opmenu[ctlr->menubytes];
167 /* The lock is off, so just use index 0. */
168 ich_writeb(ctlr, trans->opcode, ctlr->opmenu);
169 optypes = ich_readw(ctlr, ctlr->optype);
170 optypes = (optypes & 0xfffc) | (trans->type & 0x3);
171 ich_writew(ctlr, optypes, ctlr->optype);
174 /* The lock is on. See if what we need is on the menu. */
176 uint16_t opcode_index;
178 /* Write Enable is handled as atomic prefix */
179 if (trans->opcode == SPI_OPCODE_WREN)
182 read_reg(ctlr, ctlr->opmenu, opmenu, sizeof(opmenu));
183 for (opcode_index = 0; opcode_index < ctlr->menubytes;
185 if (opmenu[opcode_index] == trans->opcode)
189 if (opcode_index == ctlr->menubytes) {
190 debug("ICH SPI: Opcode %x not found\n", trans->opcode);
194 optypes = ich_readw(ctlr, ctlr->optype);
195 optype = (optypes >> (opcode_index * 2)) & 0x3;
197 if (optype != trans->type) {
198 debug("ICH SPI: Transaction doesn't fit type %d\n",
207 * Wait for up to 6s til status register bit(s) turn 1 (in case wait_til_set
208 * below is true) or 0. In case the wait was for the bit(s) to set - write
209 * those bits back, which would cause resetting them.
211 * Return the last read status value on success or -1 on failure.
213 static int ich_status_poll(struct ich_spi_priv *ctlr, u16 bitmask,
216 int timeout = 600000; /* This will result in 6s */
220 status = ich_readw(ctlr, ctlr->status);
221 if (wait_til_set ^ ((status & bitmask) == 0)) {
223 ich_writew(ctlr, status & bitmask,
230 debug("ICH SPI: SCIP timeout, read %x, expected %x, wts %x %x\n",
231 status, bitmask, wait_til_set, status & bitmask);
236 static void ich_spi_config_opcode(struct udevice *dev)
238 struct ich_spi_priv *ctlr = dev_get_priv(dev);
241 * PREOP, OPTYPE, OPMENU1/OPMENU2 registers can be locked down
242 * to prevent accidental or intentional writes. Before they get
243 * locked down, these registers should be initialized properly.
245 ich_writew(ctlr, SPI_OPPREFIX, ctlr->preop);
246 ich_writew(ctlr, SPI_OPTYPE, ctlr->optype);
247 ich_writel(ctlr, SPI_OPMENU_LOWER, ctlr->opmenu);
248 ich_writel(ctlr, SPI_OPMENU_UPPER, ctlr->opmenu + sizeof(u32));
251 static int ich_spi_exec_op_swseq(struct spi_slave *slave,
252 const struct spi_mem_op *op)
254 struct udevice *bus = dev_get_parent(slave->dev);
255 struct ich_spi_platdata *plat = dev_get_platdata(bus);
256 struct ich_spi_priv *ctlr = dev_get_priv(bus);
258 int16_t opcode_index;
261 struct spi_trans *trans = &ctlr->trans;
262 bool lock = spi_lock_status(plat, ctlr->base);
269 if (op->data.nbytes) {
270 if (op->data.dir == SPI_MEM_DATA_IN) {
271 trans->in = op->data.buf.in;
272 trans->bytesin = op->data.nbytes;
274 trans->out = op->data.buf.out;
275 trans->bytesout = op->data.nbytes;
279 if (trans->opcode != op->cmd.opcode)
280 trans->opcode = op->cmd.opcode;
282 if (lock && trans->opcode == SPI_OPCODE_WRDIS)
285 if (trans->opcode == SPI_OPCODE_WREN) {
287 * Treat Write Enable as Atomic Pre-Op if possible
288 * in order to prevent the Management Engine from
289 * issuing a transaction between WREN and DATA.
292 ich_writew(ctlr, trans->opcode, ctlr->preop);
296 ret = ich_status_poll(ctlr, SPIS_SCIP, 0);
300 if (plat->ich_version == ICHV_7)
301 ich_writew(ctlr, SPIS_CDS | SPIS_FCERR, ctlr->status);
303 ich_writeb(ctlr, SPIS_CDS | SPIS_FCERR, ctlr->status);
305 /* Try to guess spi transaction type */
306 if (op->data.dir == SPI_MEM_DATA_OUT) {
308 trans->type = SPI_OPCODE_TYPE_WRITE_WITH_ADDRESS;
310 trans->type = SPI_OPCODE_TYPE_WRITE_NO_ADDRESS;
313 trans->type = SPI_OPCODE_TYPE_READ_WITH_ADDRESS;
315 trans->type = SPI_OPCODE_TYPE_READ_NO_ADDRESS;
317 /* Special erase case handling */
318 if (op->addr.nbytes && !op->data.buswidth)
319 trans->type = SPI_OPCODE_TYPE_WRITE_WITH_ADDRESS;
321 opcode_index = spi_setup_opcode(ctlr, trans, lock);
322 if (opcode_index < 0)
325 if (op->addr.nbytes) {
326 trans->offset = op->addr.val;
330 if (ctlr->speed && ctlr->max_speed >= 33000000) {
333 byte = ich_readb(ctlr, ctlr->speed);
334 if (ctlr->cur_speed >= 33000000)
335 byte |= SSFC_SCF_33MHZ;
337 byte &= ~SSFC_SCF_33MHZ;
338 ich_writeb(ctlr, byte, ctlr->speed);
341 /* Preset control fields */
342 control = SPIC_SCGO | ((opcode_index & 0x07) << 4);
344 /* Issue atomic preop cycle if needed */
345 if (ich_readw(ctlr, ctlr->preop))
348 if (!trans->bytesout && !trans->bytesin) {
349 /* SPI addresses are 24 bit only */
351 ich_writel(ctlr, trans->offset & 0x00FFFFFF,
355 * This is a 'no data' command (like Write Enable), its
356 * bitesout size was 1, decremented to zero while executing
357 * spi_setup_opcode() above. Tell the chip to send the
360 ich_writew(ctlr, control, ctlr->control);
362 /* wait for the result */
363 status = ich_status_poll(ctlr, SPIS_CDS | SPIS_FCERR, 1);
367 if (status & SPIS_FCERR) {
368 debug("ICH SPI: Command transaction error\n");
375 while (trans->bytesout || trans->bytesin) {
376 uint32_t data_length;
378 /* SPI addresses are 24 bit only */
379 ich_writel(ctlr, trans->offset & 0x00FFFFFF, ctlr->addr);
382 data_length = min(trans->bytesout, ctlr->databytes);
384 data_length = min(trans->bytesin, ctlr->databytes);
386 /* Program data into FDATA0 to N */
387 if (trans->bytesout) {
388 write_reg(ctlr, trans->out, ctlr->data, data_length);
389 trans->bytesout -= data_length;
392 /* Add proper control fields' values */
393 control &= ~((ctlr->databytes - 1) << 8);
395 control |= (data_length - 1) << 8;
398 ich_writew(ctlr, control, ctlr->control);
400 /* Wait for Cycle Done Status or Flash Cycle Error */
401 status = ich_status_poll(ctlr, SPIS_CDS | SPIS_FCERR, 1);
405 if (status & SPIS_FCERR) {
406 debug("ICH SPI: Data transaction error %x\n", status);
410 if (trans->bytesin) {
411 read_reg(ctlr, ctlr->data, trans->in, data_length);
412 trans->bytesin -= data_length;
416 /* Clear atomic preop now that xfer is done */
418 ich_writew(ctlr, 0, ctlr->preop);
424 * Ensure read/write xfer len is not greater than SPIBAR_FDATA_FIFO_SIZE and
425 * that the operation does not cross page boundary.
427 static uint get_xfer_len(u32 offset, int len, int page_size)
429 uint xfer_len = min(len, SPIBAR_FDATA_FIFO_SIZE);
430 uint bytes_left = ALIGN(offset, page_size) - offset;
433 xfer_len = min(xfer_len, bytes_left);
438 /* Fill FDATAn FIFO in preparation for a write transaction */
439 static void fill_xfer_fifo(struct fast_spi_regs *regs, const void *data,
442 memcpy(regs->fdata, data, len);
445 /* Drain FDATAn FIFO after a read transaction populates data */
446 static void drain_xfer_fifo(struct fast_spi_regs *regs, void *dest, uint len)
448 memcpy(dest, regs->fdata, len);
451 /* Fire up a transfer using the hardware sequencer */
452 static void start_hwseq_xfer(struct fast_spi_regs *regs, uint hsfsts_cycle,
453 uint offset, uint len)
455 /* Make sure all W1C status bits get cleared */
458 hsfsts = readl(®s->hsfsts_ctl);
459 hsfsts &= ~(HSFSTS_FCYCLE_MASK | HSFSTS_FDBC_MASK);
460 hsfsts |= HSFSTS_AEL | HSFSTS_FCERR | HSFSTS_FDONE;
462 /* Set up transaction parameters */
463 hsfsts |= hsfsts_cycle << HSFSTS_FCYCLE_SHIFT;
464 hsfsts |= ((len - 1) << HSFSTS_FDBC_SHIFT) & HSFSTS_FDBC_MASK;
465 hsfsts |= HSFSTS_FGO;
467 writel(offset, ®s->faddr);
468 writel(hsfsts, ®s->hsfsts_ctl);
471 static int wait_for_hwseq_xfer(struct fast_spi_regs *regs, uint offset)
476 start = get_timer(0);
478 hsfsts = readl(®s->hsfsts_ctl);
479 if (hsfsts & HSFSTS_FCERR) {
480 debug("SPI transaction error at offset %x HSFSTS = %08x\n",
484 if (hsfsts & HSFSTS_AEL)
487 if (hsfsts & HSFSTS_FDONE)
489 } while (get_timer(start) < SPIBAR_HWSEQ_XFER_TIMEOUT_MS);
491 debug("SPI transaction timeout at offset %x HSFSTS = %08x, timer %d\n",
492 offset, hsfsts, (uint)get_timer(start));
498 * exec_sync_hwseq_xfer() - Execute flash transfer by hardware sequencing
500 * This waits until complete or timeout
502 * @regs: SPI registers
503 * @hsfsts_cycle: Cycle type (enum hsfsts_cycle_t)
504 * @offset: Offset to access
505 * @len: Number of bytes to transfer (can be 0)
506 * @return 0 if OK, -EIO on flash-cycle error (FCERR), -EPERM on access error
507 * (AEL), -ETIMEDOUT on timeout
509 static int exec_sync_hwseq_xfer(struct fast_spi_regs *regs, uint hsfsts_cycle,
510 uint offset, uint len)
512 start_hwseq_xfer(regs, hsfsts_cycle, offset, len);
514 return wait_for_hwseq_xfer(regs, offset);
517 static int ich_spi_exec_op_hwseq(struct spi_slave *slave,
518 const struct spi_mem_op *op)
520 struct spi_flash *flash = dev_get_uclass_priv(slave->dev);
521 struct udevice *bus = dev_get_parent(slave->dev);
522 struct ich_spi_priv *priv = dev_get_priv(bus);
523 struct fast_spi_regs *regs = priv->base;
532 offset = op->addr.val;
533 len = op->data.nbytes;
535 switch (op->cmd.opcode) {
537 cycle = HSFSTS_CYCLE_RDID;
539 case SPINOR_OP_READ_FAST:
540 cycle = HSFSTS_CYCLE_READ;
543 cycle = HSFSTS_CYCLE_WRITE;
546 /* Nothing needs to be done */
549 cycle = HSFSTS_CYCLE_WR_STATUS;
552 cycle = HSFSTS_CYCLE_RD_STATUS;
555 return 0; /* ignore */
556 case SPINOR_OP_BE_4K:
557 cycle = HSFSTS_CYCLE_4K_ERASE;
559 uint xfer_len = 0x1000;
561 ret = exec_sync_hwseq_xfer(regs, cycle, offset, 0);
569 debug("Unknown cycle %x\n", op->cmd.opcode);
573 out = op->data.dir == SPI_MEM_DATA_OUT;
574 buf = out ? (u8 *)op->data.buf.out : op->data.buf.in;
575 page_size = flash->page_size ? : 256;
578 uint xfer_len = get_xfer_len(offset, len, page_size);
581 fill_xfer_fifo(regs, buf, xfer_len);
583 ret = exec_sync_hwseq_xfer(regs, cycle, offset, xfer_len);
588 drain_xfer_fifo(regs, buf, xfer_len);
598 static int ich_spi_exec_op(struct spi_slave *slave, const struct spi_mem_op *op)
600 struct udevice *bus = dev_get_parent(slave->dev);
601 struct ich_spi_platdata *plat = dev_get_platdata(bus);
604 bootstage_start(BOOTSTAGE_ID_ACCUM_SPI, "fast_spi");
606 ret = ich_spi_exec_op_hwseq(slave, op);
608 ret = ich_spi_exec_op_swseq(slave, op);
609 bootstage_accum(BOOTSTAGE_ID_ACCUM_SPI);
614 static int ich_spi_adjust_size(struct spi_slave *slave, struct spi_mem_op *op)
616 unsigned int page_offset;
617 int addr = op->addr.val;
618 unsigned int byte_count = op->data.nbytes;
620 if (hweight32(ICH_BOUNDARY) == 1) {
621 page_offset = addr & (ICH_BOUNDARY - 1);
625 page_offset = do_div(aux, ICH_BOUNDARY);
628 if (op->data.dir == SPI_MEM_DATA_IN) {
629 if (slave->max_read_size) {
630 op->data.nbytes = min(ICH_BOUNDARY - page_offset,
631 slave->max_read_size);
633 } else if (slave->max_write_size) {
634 op->data.nbytes = min(ICH_BOUNDARY - page_offset,
635 slave->max_write_size);
638 op->data.nbytes = min(op->data.nbytes, byte_count);
643 static int ich_protect_lockdown(struct udevice *dev)
645 struct ich_spi_platdata *plat = dev_get_platdata(dev);
646 struct ich_spi_priv *priv = dev_get_priv(dev);
649 /* Disable the BIOS write protect so write commands are allowed */
651 ret = pch_set_spi_protect(priv->pch, false);
652 if (ret == -ENOSYS) {
655 bios_cntl = ich_readb(priv, priv->bcr);
656 bios_cntl &= ~BIT(5); /* clear Enable InSMM_STS (EISS) */
657 bios_cntl |= 1; /* Write Protect Disable (WPD) */
658 ich_writeb(priv, bios_cntl, priv->bcr);
660 debug("%s: Failed to disable write-protect: err=%d\n",
665 /* Lock down SPI controller settings if required */
666 if (plat->lockdown) {
667 ich_spi_config_opcode(dev);
668 spi_lock_down(plat, priv->base);
674 static int ich_init_controller(struct udevice *dev,
675 struct ich_spi_platdata *plat,
676 struct ich_spi_priv *ctlr)
678 ctlr->base = (void *)plat->mmio_base;
679 if (plat->ich_version == ICHV_7) {
680 struct ich7_spi_regs *ich7_spi = ctlr->base;
682 ctlr->opmenu = offsetof(struct ich7_spi_regs, opmenu);
683 ctlr->menubytes = sizeof(ich7_spi->opmenu);
684 ctlr->optype = offsetof(struct ich7_spi_regs, optype);
685 ctlr->addr = offsetof(struct ich7_spi_regs, spia);
686 ctlr->data = offsetof(struct ich7_spi_regs, spid);
687 ctlr->databytes = sizeof(ich7_spi->spid);
688 ctlr->status = offsetof(struct ich7_spi_regs, spis);
689 ctlr->control = offsetof(struct ich7_spi_regs, spic);
690 ctlr->bbar = offsetof(struct ich7_spi_regs, bbar);
691 ctlr->preop = offsetof(struct ich7_spi_regs, preop);
692 } else if (plat->ich_version == ICHV_9) {
693 struct ich9_spi_regs *ich9_spi = ctlr->base;
695 ctlr->opmenu = offsetof(struct ich9_spi_regs, opmenu);
696 ctlr->menubytes = sizeof(ich9_spi->opmenu);
697 ctlr->optype = offsetof(struct ich9_spi_regs, optype);
698 ctlr->addr = offsetof(struct ich9_spi_regs, faddr);
699 ctlr->data = offsetof(struct ich9_spi_regs, fdata);
700 ctlr->databytes = sizeof(ich9_spi->fdata);
701 ctlr->status = offsetof(struct ich9_spi_regs, ssfs);
702 ctlr->control = offsetof(struct ich9_spi_regs, ssfc);
703 ctlr->speed = ctlr->control + 2;
704 ctlr->bbar = offsetof(struct ich9_spi_regs, bbar);
705 ctlr->preop = offsetof(struct ich9_spi_regs, preop);
706 ctlr->bcr = offsetof(struct ich9_spi_regs, bcr);
707 ctlr->pr = &ich9_spi->pr[0];
709 debug("ICH SPI: Unrecognised ICH version %d\n",
714 /* Work out the maximum speed we can support */
715 ctlr->max_speed = 20000000;
716 if (plat->ich_version == ICHV_9 && ich9_can_do_33mhz(dev))
717 ctlr->max_speed = 33000000;
718 debug("ICH SPI: Version ID %d detected at %lx, speed %ld\n",
719 plat->ich_version, plat->mmio_base, ctlr->max_speed);
721 ich_set_bbar(ctlr, 0);
726 static int ich_spi_probe(struct udevice *dev)
728 struct ich_spi_platdata *plat = dev_get_platdata(dev);
729 struct ich_spi_priv *priv = dev_get_priv(dev);
732 ret = ich_init_controller(dev, plat, priv);
736 ret = ich_protect_lockdown(dev);
740 priv->cur_speed = priv->max_speed;
745 static int ich_spi_remove(struct udevice *bus)
748 * Configure SPI controller so that the Linux MTD driver can fully
749 * access the SPI NOR chip
751 ich_spi_config_opcode(bus);
756 static int ich_spi_set_speed(struct udevice *bus, uint speed)
758 struct ich_spi_priv *priv = dev_get_priv(bus);
760 priv->cur_speed = speed;
765 static int ich_spi_set_mode(struct udevice *bus, uint mode)
767 debug("%s: mode=%d\n", __func__, mode);
772 static int ich_spi_child_pre_probe(struct udevice *dev)
774 struct udevice *bus = dev_get_parent(dev);
775 struct ich_spi_platdata *plat = dev_get_platdata(bus);
776 struct ich_spi_priv *priv = dev_get_priv(bus);
777 struct spi_slave *slave = dev_get_parent_priv(dev);
780 * Yes this controller can only write a small number of bytes at
781 * once! The limit is typically 64 bytes. For hardware sequencing a
782 * a loop is used to get around this.
785 slave->max_write_size = priv->databytes;
787 * ICH 7 SPI controller only supports array read command
788 * and byte program command for SST flash
790 if (plat->ich_version == ICHV_7)
791 slave->mode = SPI_RX_SLOW | SPI_TX_BYTE;
796 static int ich_spi_ofdata_to_platdata(struct udevice *dev)
798 struct ich_spi_platdata *plat = dev_get_platdata(dev);
800 #if !CONFIG_IS_ENABLED(OF_PLATDATA)
801 struct ich_spi_priv *priv = dev_get_priv(dev);
803 /* Find a PCH if there is one */
804 uclass_first_device(UCLASS_PCH, &priv->pch);
806 priv->pch = dev_get_parent(dev);
808 plat->ich_version = dev_get_driver_data(dev);
809 plat->lockdown = dev_read_bool(dev, "intel,spi-lock-down");
810 pch_get_spi_base(priv->pch, &plat->mmio_base);
812 * Use an int so that the property is present in of-platdata even
815 plat->hwseq = dev_read_u32_default(dev, "intel,hardware-seq", 0);
817 plat->ich_version = ICHV_APL;
818 plat->mmio_base = plat->dtplat.early_regs[0];
819 plat->bdf = pci_ofplat_get_devfn(plat->dtplat.reg[0]);
820 plat->hwseq = plat->dtplat.intel_hardware_seq;
822 debug("%s: mmio_base=%lx\n", __func__, plat->mmio_base);
827 static const struct spi_controller_mem_ops ich_controller_mem_ops = {
828 .adjust_op_size = ich_spi_adjust_size,
830 .exec_op = ich_spi_exec_op,
833 static const struct dm_spi_ops ich_spi_ops = {
834 /* xfer is not supported */
835 .set_speed = ich_spi_set_speed,
836 .set_mode = ich_spi_set_mode,
837 .mem_ops = &ich_controller_mem_ops,
839 * cs_info is not needed, since we require all chip selects to be
840 * in the device tree explicitly
844 static const struct udevice_id ich_spi_ids[] = {
845 { .compatible = "intel,ich7-spi", ICHV_7 },
846 { .compatible = "intel,ich9-spi", ICHV_9 },
850 U_BOOT_DRIVER(intel_fast_spi) = {
851 .name = "intel_fast_spi",
853 .of_match = ich_spi_ids,
855 .ofdata_to_platdata = ich_spi_ofdata_to_platdata,
856 .platdata_auto_alloc_size = sizeof(struct ich_spi_platdata),
857 .priv_auto_alloc_size = sizeof(struct ich_spi_priv),
858 .child_pre_probe = ich_spi_child_pre_probe,
859 .probe = ich_spi_probe,
860 .remove = ich_spi_remove,
861 .flags = DM_FLAG_OS_PREPARE,