Linux-libre 5.3.12-gnu
[librecmc/linux-libre.git] / drivers / mtd / spi-nor / intel-spi.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Intel PCH/PCU SPI flash driver.
4  *
5  * Copyright (C) 2016, Intel Corporation
6  * Author: Mika Westerberg <mika.westerberg@linux.intel.com>
7  */
8
9 #include <linux/err.h>
10 #include <linux/io.h>
11 #include <linux/iopoll.h>
12 #include <linux/module.h>
13 #include <linux/sched.h>
14 #include <linux/sizes.h>
15 #include <linux/mtd/mtd.h>
16 #include <linux/mtd/partitions.h>
17 #include <linux/mtd/spi-nor.h>
18 #include <linux/platform_data/intel-spi.h>
19
20 #include "intel-spi.h"
21
22 /* Offsets are from @ispi->base */
23 #define BFPREG                          0x00
24
25 #define HSFSTS_CTL                      0x04
26 #define HSFSTS_CTL_FSMIE                BIT(31)
27 #define HSFSTS_CTL_FDBC_SHIFT           24
28 #define HSFSTS_CTL_FDBC_MASK            (0x3f << HSFSTS_CTL_FDBC_SHIFT)
29
30 #define HSFSTS_CTL_FCYCLE_SHIFT         17
31 #define HSFSTS_CTL_FCYCLE_MASK          (0x0f << HSFSTS_CTL_FCYCLE_SHIFT)
32 /* HW sequencer opcodes */
33 #define HSFSTS_CTL_FCYCLE_READ          (0x00 << HSFSTS_CTL_FCYCLE_SHIFT)
34 #define HSFSTS_CTL_FCYCLE_WRITE         (0x02 << HSFSTS_CTL_FCYCLE_SHIFT)
35 #define HSFSTS_CTL_FCYCLE_ERASE         (0x03 << HSFSTS_CTL_FCYCLE_SHIFT)
36 #define HSFSTS_CTL_FCYCLE_ERASE_64K     (0x04 << HSFSTS_CTL_FCYCLE_SHIFT)
37 #define HSFSTS_CTL_FCYCLE_RDID          (0x06 << HSFSTS_CTL_FCYCLE_SHIFT)
38 #define HSFSTS_CTL_FCYCLE_WRSR          (0x07 << HSFSTS_CTL_FCYCLE_SHIFT)
39 #define HSFSTS_CTL_FCYCLE_RDSR          (0x08 << HSFSTS_CTL_FCYCLE_SHIFT)
40
41 #define HSFSTS_CTL_FGO                  BIT(16)
42 #define HSFSTS_CTL_FLOCKDN              BIT(15)
43 #define HSFSTS_CTL_FDV                  BIT(14)
44 #define HSFSTS_CTL_SCIP                 BIT(5)
45 #define HSFSTS_CTL_AEL                  BIT(2)
46 #define HSFSTS_CTL_FCERR                BIT(1)
47 #define HSFSTS_CTL_FDONE                BIT(0)
48
49 #define FADDR                           0x08
50 #define DLOCK                           0x0c
51 #define FDATA(n)                        (0x10 + ((n) * 4))
52
53 #define FRACC                           0x50
54
55 #define FREG(n)                         (0x54 + ((n) * 4))
56 #define FREG_BASE_MASK                  0x3fff
57 #define FREG_LIMIT_SHIFT                16
58 #define FREG_LIMIT_MASK                 (0x03fff << FREG_LIMIT_SHIFT)
59
60 /* Offset is from @ispi->pregs */
61 #define PR(n)                           ((n) * 4)
62 #define PR_WPE                          BIT(31)
63 #define PR_LIMIT_SHIFT                  16
64 #define PR_LIMIT_MASK                   (0x3fff << PR_LIMIT_SHIFT)
65 #define PR_RPE                          BIT(15)
66 #define PR_BASE_MASK                    0x3fff
67
68 /* Offsets are from @ispi->sregs */
69 #define SSFSTS_CTL                      0x00
70 #define SSFSTS_CTL_FSMIE                BIT(23)
71 #define SSFSTS_CTL_DS                   BIT(22)
72 #define SSFSTS_CTL_DBC_SHIFT            16
73 #define SSFSTS_CTL_SPOP                 BIT(11)
74 #define SSFSTS_CTL_ACS                  BIT(10)
75 #define SSFSTS_CTL_SCGO                 BIT(9)
76 #define SSFSTS_CTL_COP_SHIFT            12
77 #define SSFSTS_CTL_FRS                  BIT(7)
78 #define SSFSTS_CTL_DOFRS                BIT(6)
79 #define SSFSTS_CTL_AEL                  BIT(4)
80 #define SSFSTS_CTL_FCERR                BIT(3)
81 #define SSFSTS_CTL_FDONE                BIT(2)
82 #define SSFSTS_CTL_SCIP                 BIT(0)
83
84 #define PREOP_OPTYPE                    0x04
85 #define OPMENU0                         0x08
86 #define OPMENU1                         0x0c
87
88 #define OPTYPE_READ_NO_ADDR             0
89 #define OPTYPE_WRITE_NO_ADDR            1
90 #define OPTYPE_READ_WITH_ADDR           2
91 #define OPTYPE_WRITE_WITH_ADDR          3
92
93 /* CPU specifics */
94 #define BYT_PR                          0x74
95 #define BYT_SSFSTS_CTL                  0x90
96 #define BYT_BCR                         0xfc
97 #define BYT_BCR_WPD                     BIT(0)
98 #define BYT_FREG_NUM                    5
99 #define BYT_PR_NUM                      5
100
101 #define LPT_PR                          0x74
102 #define LPT_SSFSTS_CTL                  0x90
103 #define LPT_FREG_NUM                    5
104 #define LPT_PR_NUM                      5
105
106 #define BXT_PR                          0x84
107 #define BXT_SSFSTS_CTL                  0xa0
108 #define BXT_FREG_NUM                    12
109 #define BXT_PR_NUM                      6
110
111 #define LVSCC                           0xc4
112 #define UVSCC                           0xc8
113 #define ERASE_OPCODE_SHIFT              8
114 #define ERASE_OPCODE_MASK               (0xff << ERASE_OPCODE_SHIFT)
115 #define ERASE_64K_OPCODE_SHIFT          16
116 #define ERASE_64K_OPCODE_MASK           (0xff << ERASE_OPCODE_SHIFT)
117
118 #define INTEL_SPI_TIMEOUT               5000 /* ms */
119 #define INTEL_SPI_FIFO_SZ               64
120
121 /**
122  * struct intel_spi - Driver private data
123  * @dev: Device pointer
124  * @info: Pointer to board specific info
125  * @nor: SPI NOR layer structure
126  * @base: Beginning of MMIO space
127  * @pregs: Start of protection registers
128  * @sregs: Start of software sequencer registers
129  * @nregions: Maximum number of regions
130  * @pr_num: Maximum number of protected range registers
131  * @writeable: Is the chip writeable
132  * @locked: Is SPI setting locked
133  * @swseq_reg: Use SW sequencer in register reads/writes
134  * @swseq_erase: Use SW sequencer in erase operation
135  * @erase_64k: 64k erase supported
136  * @atomic_preopcode: Holds preopcode when atomic sequence is requested
137  * @opcodes: Opcodes which are supported. This are programmed by BIOS
138  *           before it locks down the controller.
139  */
140 struct intel_spi {
141         struct device *dev;
142         const struct intel_spi_boardinfo *info;
143         struct spi_nor nor;
144         void __iomem *base;
145         void __iomem *pregs;
146         void __iomem *sregs;
147         size_t nregions;
148         size_t pr_num;
149         bool writeable;
150         bool locked;
151         bool swseq_reg;
152         bool swseq_erase;
153         bool erase_64k;
154         u8 atomic_preopcode;
155         u8 opcodes[8];
156 };
157
158 static bool writeable;
159 module_param(writeable, bool, 0);
160 MODULE_PARM_DESC(writeable, "Enable write access to SPI flash chip (default=0)");
161
162 static void intel_spi_dump_regs(struct intel_spi *ispi)
163 {
164         u32 value;
165         int i;
166
167         dev_dbg(ispi->dev, "BFPREG=0x%08x\n", readl(ispi->base + BFPREG));
168
169         value = readl(ispi->base + HSFSTS_CTL);
170         dev_dbg(ispi->dev, "HSFSTS_CTL=0x%08x\n", value);
171         if (value & HSFSTS_CTL_FLOCKDN)
172                 dev_dbg(ispi->dev, "-> Locked\n");
173
174         dev_dbg(ispi->dev, "FADDR=0x%08x\n", readl(ispi->base + FADDR));
175         dev_dbg(ispi->dev, "DLOCK=0x%08x\n", readl(ispi->base + DLOCK));
176
177         for (i = 0; i < 16; i++)
178                 dev_dbg(ispi->dev, "FDATA(%d)=0x%08x\n",
179                         i, readl(ispi->base + FDATA(i)));
180
181         dev_dbg(ispi->dev, "FRACC=0x%08x\n", readl(ispi->base + FRACC));
182
183         for (i = 0; i < ispi->nregions; i++)
184                 dev_dbg(ispi->dev, "FREG(%d)=0x%08x\n", i,
185                         readl(ispi->base + FREG(i)));
186         for (i = 0; i < ispi->pr_num; i++)
187                 dev_dbg(ispi->dev, "PR(%d)=0x%08x\n", i,
188                         readl(ispi->pregs + PR(i)));
189
190         value = readl(ispi->sregs + SSFSTS_CTL);
191         dev_dbg(ispi->dev, "SSFSTS_CTL=0x%08x\n", value);
192         dev_dbg(ispi->dev, "PREOP_OPTYPE=0x%08x\n",
193                 readl(ispi->sregs + PREOP_OPTYPE));
194         dev_dbg(ispi->dev, "OPMENU0=0x%08x\n", readl(ispi->sregs + OPMENU0));
195         dev_dbg(ispi->dev, "OPMENU1=0x%08x\n", readl(ispi->sregs + OPMENU1));
196
197         if (ispi->info->type == INTEL_SPI_BYT)
198                 dev_dbg(ispi->dev, "BCR=0x%08x\n", readl(ispi->base + BYT_BCR));
199
200         dev_dbg(ispi->dev, "LVSCC=0x%08x\n", readl(ispi->base + LVSCC));
201         dev_dbg(ispi->dev, "UVSCC=0x%08x\n", readl(ispi->base + UVSCC));
202
203         dev_dbg(ispi->dev, "Protected regions:\n");
204         for (i = 0; i < ispi->pr_num; i++) {
205                 u32 base, limit;
206
207                 value = readl(ispi->pregs + PR(i));
208                 if (!(value & (PR_WPE | PR_RPE)))
209                         continue;
210
211                 limit = (value & PR_LIMIT_MASK) >> PR_LIMIT_SHIFT;
212                 base = value & PR_BASE_MASK;
213
214                 dev_dbg(ispi->dev, " %02d base: 0x%08x limit: 0x%08x [%c%c]\n",
215                          i, base << 12, (limit << 12) | 0xfff,
216                          value & PR_WPE ? 'W' : '.',
217                          value & PR_RPE ? 'R' : '.');
218         }
219
220         dev_dbg(ispi->dev, "Flash regions:\n");
221         for (i = 0; i < ispi->nregions; i++) {
222                 u32 region, base, limit;
223
224                 region = readl(ispi->base + FREG(i));
225                 base = region & FREG_BASE_MASK;
226                 limit = (region & FREG_LIMIT_MASK) >> FREG_LIMIT_SHIFT;
227
228                 if (base >= limit || (i > 0 && limit == 0))
229                         dev_dbg(ispi->dev, " %02d disabled\n", i);
230                 else
231                         dev_dbg(ispi->dev, " %02d base: 0x%08x limit: 0x%08x\n",
232                                  i, base << 12, (limit << 12) | 0xfff);
233         }
234
235         dev_dbg(ispi->dev, "Using %cW sequencer for register access\n",
236                 ispi->swseq_reg ? 'S' : 'H');
237         dev_dbg(ispi->dev, "Using %cW sequencer for erase operation\n",
238                 ispi->swseq_erase ? 'S' : 'H');
239 }
240
241 /* Reads max INTEL_SPI_FIFO_SZ bytes from the device fifo */
242 static int intel_spi_read_block(struct intel_spi *ispi, void *buf, size_t size)
243 {
244         size_t bytes;
245         int i = 0;
246
247         if (size > INTEL_SPI_FIFO_SZ)
248                 return -EINVAL;
249
250         while (size > 0) {
251                 bytes = min_t(size_t, size, 4);
252                 memcpy_fromio(buf, ispi->base + FDATA(i), bytes);
253                 size -= bytes;
254                 buf += bytes;
255                 i++;
256         }
257
258         return 0;
259 }
260
261 /* Writes max INTEL_SPI_FIFO_SZ bytes to the device fifo */
262 static int intel_spi_write_block(struct intel_spi *ispi, const void *buf,
263                                  size_t size)
264 {
265         size_t bytes;
266         int i = 0;
267
268         if (size > INTEL_SPI_FIFO_SZ)
269                 return -EINVAL;
270
271         while (size > 0) {
272                 bytes = min_t(size_t, size, 4);
273                 memcpy_toio(ispi->base + FDATA(i), buf, bytes);
274                 size -= bytes;
275                 buf += bytes;
276                 i++;
277         }
278
279         return 0;
280 }
281
282 static int intel_spi_wait_hw_busy(struct intel_spi *ispi)
283 {
284         u32 val;
285
286         return readl_poll_timeout(ispi->base + HSFSTS_CTL, val,
287                                   !(val & HSFSTS_CTL_SCIP), 40,
288                                   INTEL_SPI_TIMEOUT * 1000);
289 }
290
291 static int intel_spi_wait_sw_busy(struct intel_spi *ispi)
292 {
293         u32 val;
294
295         return readl_poll_timeout(ispi->sregs + SSFSTS_CTL, val,
296                                   !(val & SSFSTS_CTL_SCIP), 40,
297                                   INTEL_SPI_TIMEOUT * 1000);
298 }
299
300 static int intel_spi_init(struct intel_spi *ispi)
301 {
302         u32 opmenu0, opmenu1, lvscc, uvscc, val;
303         int i;
304
305         switch (ispi->info->type) {
306         case INTEL_SPI_BYT:
307                 ispi->sregs = ispi->base + BYT_SSFSTS_CTL;
308                 ispi->pregs = ispi->base + BYT_PR;
309                 ispi->nregions = BYT_FREG_NUM;
310                 ispi->pr_num = BYT_PR_NUM;
311                 ispi->swseq_reg = true;
312
313                 if (writeable) {
314                         /* Disable write protection */
315                         val = readl(ispi->base + BYT_BCR);
316                         if (!(val & BYT_BCR_WPD)) {
317                                 val |= BYT_BCR_WPD;
318                                 writel(val, ispi->base + BYT_BCR);
319                                 val = readl(ispi->base + BYT_BCR);
320                         }
321
322                         ispi->writeable = !!(val & BYT_BCR_WPD);
323                 }
324
325                 break;
326
327         case INTEL_SPI_LPT:
328                 ispi->sregs = ispi->base + LPT_SSFSTS_CTL;
329                 ispi->pregs = ispi->base + LPT_PR;
330                 ispi->nregions = LPT_FREG_NUM;
331                 ispi->pr_num = LPT_PR_NUM;
332                 ispi->swseq_reg = true;
333                 break;
334
335         case INTEL_SPI_BXT:
336                 ispi->sregs = ispi->base + BXT_SSFSTS_CTL;
337                 ispi->pregs = ispi->base + BXT_PR;
338                 ispi->nregions = BXT_FREG_NUM;
339                 ispi->pr_num = BXT_PR_NUM;
340                 ispi->erase_64k = true;
341                 break;
342
343         default:
344                 return -EINVAL;
345         }
346
347         /* Disable #SMI generation from HW sequencer */
348         val = readl(ispi->base + HSFSTS_CTL);
349         val &= ~HSFSTS_CTL_FSMIE;
350         writel(val, ispi->base + HSFSTS_CTL);
351
352         /*
353          * Determine whether erase operation should use HW or SW sequencer.
354          *
355          * The HW sequencer has a predefined list of opcodes, with only the
356          * erase opcode being programmable in LVSCC and UVSCC registers.
357          * If these registers don't contain a valid erase opcode, erase
358          * cannot be done using HW sequencer.
359          */
360         lvscc = readl(ispi->base + LVSCC);
361         uvscc = readl(ispi->base + UVSCC);
362         if (!(lvscc & ERASE_OPCODE_MASK) || !(uvscc & ERASE_OPCODE_MASK))
363                 ispi->swseq_erase = true;
364         /* SPI controller on Intel BXT supports 64K erase opcode */
365         if (ispi->info->type == INTEL_SPI_BXT && !ispi->swseq_erase)
366                 if (!(lvscc & ERASE_64K_OPCODE_MASK) ||
367                     !(uvscc & ERASE_64K_OPCODE_MASK))
368                         ispi->erase_64k = false;
369
370         /*
371          * Some controllers can only do basic operations using hardware
372          * sequencer. All other operations are supposed to be carried out
373          * using software sequencer.
374          */
375         if (ispi->swseq_reg) {
376                 /* Disable #SMI generation from SW sequencer */
377                 val = readl(ispi->sregs + SSFSTS_CTL);
378                 val &= ~SSFSTS_CTL_FSMIE;
379                 writel(val, ispi->sregs + SSFSTS_CTL);
380         }
381
382         /* Check controller's lock status */
383         val = readl(ispi->base + HSFSTS_CTL);
384         ispi->locked = !!(val & HSFSTS_CTL_FLOCKDN);
385
386         if (ispi->locked) {
387                 /*
388                  * BIOS programs allowed opcodes and then locks down the
389                  * register. So read back what opcodes it decided to support.
390                  * That's the set we are going to support as well.
391                  */
392                 opmenu0 = readl(ispi->sregs + OPMENU0);
393                 opmenu1 = readl(ispi->sregs + OPMENU1);
394
395                 if (opmenu0 && opmenu1) {
396                         for (i = 0; i < ARRAY_SIZE(ispi->opcodes) / 2; i++) {
397                                 ispi->opcodes[i] = opmenu0 >> i * 8;
398                                 ispi->opcodes[i + 4] = opmenu1 >> i * 8;
399                         }
400                 }
401         }
402
403         intel_spi_dump_regs(ispi);
404
405         return 0;
406 }
407
408 static int intel_spi_opcode_index(struct intel_spi *ispi, u8 opcode, int optype)
409 {
410         int i;
411         int preop;
412
413         if (ispi->locked) {
414                 for (i = 0; i < ARRAY_SIZE(ispi->opcodes); i++)
415                         if (ispi->opcodes[i] == opcode)
416                                 return i;
417
418                 return -EINVAL;
419         }
420
421         /* The lock is off, so just use index 0 */
422         writel(opcode, ispi->sregs + OPMENU0);
423         preop = readw(ispi->sregs + PREOP_OPTYPE);
424         writel(optype << 16 | preop, ispi->sregs + PREOP_OPTYPE);
425
426         return 0;
427 }
428
429 static int intel_spi_hw_cycle(struct intel_spi *ispi, u8 opcode, int len)
430 {
431         u32 val, status;
432         int ret;
433
434         val = readl(ispi->base + HSFSTS_CTL);
435         val &= ~(HSFSTS_CTL_FCYCLE_MASK | HSFSTS_CTL_FDBC_MASK);
436
437         switch (opcode) {
438         case SPINOR_OP_RDID:
439                 val |= HSFSTS_CTL_FCYCLE_RDID;
440                 break;
441         case SPINOR_OP_WRSR:
442                 val |= HSFSTS_CTL_FCYCLE_WRSR;
443                 break;
444         case SPINOR_OP_RDSR:
445                 val |= HSFSTS_CTL_FCYCLE_RDSR;
446                 break;
447         default:
448                 return -EINVAL;
449         }
450
451         if (len > INTEL_SPI_FIFO_SZ)
452                 return -EINVAL;
453
454         val |= (len - 1) << HSFSTS_CTL_FDBC_SHIFT;
455         val |= HSFSTS_CTL_FCERR | HSFSTS_CTL_FDONE;
456         val |= HSFSTS_CTL_FGO;
457         writel(val, ispi->base + HSFSTS_CTL);
458
459         ret = intel_spi_wait_hw_busy(ispi);
460         if (ret)
461                 return ret;
462
463         status = readl(ispi->base + HSFSTS_CTL);
464         if (status & HSFSTS_CTL_FCERR)
465                 return -EIO;
466         else if (status & HSFSTS_CTL_AEL)
467                 return -EACCES;
468
469         return 0;
470 }
471
472 static int intel_spi_sw_cycle(struct intel_spi *ispi, u8 opcode, int len,
473                               int optype)
474 {
475         u32 val = 0, status;
476         u8 atomic_preopcode;
477         int ret;
478
479         ret = intel_spi_opcode_index(ispi, opcode, optype);
480         if (ret < 0)
481                 return ret;
482
483         if (len > INTEL_SPI_FIFO_SZ)
484                 return -EINVAL;
485
486         /*
487          * Always clear it after each SW sequencer operation regardless
488          * of whether it is successful or not.
489          */
490         atomic_preopcode = ispi->atomic_preopcode;
491         ispi->atomic_preopcode = 0;
492
493         /* Only mark 'Data Cycle' bit when there is data to be transferred */
494         if (len > 0)
495                 val = ((len - 1) << SSFSTS_CTL_DBC_SHIFT) | SSFSTS_CTL_DS;
496         val |= ret << SSFSTS_CTL_COP_SHIFT;
497         val |= SSFSTS_CTL_FCERR | SSFSTS_CTL_FDONE;
498         val |= SSFSTS_CTL_SCGO;
499         if (atomic_preopcode) {
500                 u16 preop;
501
502                 switch (optype) {
503                 case OPTYPE_WRITE_NO_ADDR:
504                 case OPTYPE_WRITE_WITH_ADDR:
505                         /* Pick matching preopcode for the atomic sequence */
506                         preop = readw(ispi->sregs + PREOP_OPTYPE);
507                         if ((preop & 0xff) == atomic_preopcode)
508                                 ; /* Do nothing */
509                         else if ((preop >> 8) == atomic_preopcode)
510                                 val |= SSFSTS_CTL_SPOP;
511                         else
512                                 return -EINVAL;
513
514                         /* Enable atomic sequence */
515                         val |= SSFSTS_CTL_ACS;
516                         break;
517
518                 default:
519                         return -EINVAL;
520                 }
521
522         }
523         writel(val, ispi->sregs + SSFSTS_CTL);
524
525         ret = intel_spi_wait_sw_busy(ispi);
526         if (ret)
527                 return ret;
528
529         status = readl(ispi->sregs + SSFSTS_CTL);
530         if (status & SSFSTS_CTL_FCERR)
531                 return -EIO;
532         else if (status & SSFSTS_CTL_AEL)
533                 return -EACCES;
534
535         return 0;
536 }
537
538 static int intel_spi_read_reg(struct spi_nor *nor, u8 opcode, u8 *buf, int len)
539 {
540         struct intel_spi *ispi = nor->priv;
541         int ret;
542
543         /* Address of the first chip */
544         writel(0, ispi->base + FADDR);
545
546         if (ispi->swseq_reg)
547                 ret = intel_spi_sw_cycle(ispi, opcode, len,
548                                          OPTYPE_READ_NO_ADDR);
549         else
550                 ret = intel_spi_hw_cycle(ispi, opcode, len);
551
552         if (ret)
553                 return ret;
554
555         return intel_spi_read_block(ispi, buf, len);
556 }
557
558 static int intel_spi_write_reg(struct spi_nor *nor, u8 opcode, u8 *buf, int len)
559 {
560         struct intel_spi *ispi = nor->priv;
561         int ret;
562
563         /*
564          * This is handled with atomic operation and preop code in Intel
565          * controller so we only verify that it is available. If the
566          * controller is not locked, program the opcode to the PREOP
567          * register for later use.
568          *
569          * When hardware sequencer is used there is no need to program
570          * any opcodes (it handles them automatically as part of a command).
571          */
572         if (opcode == SPINOR_OP_WREN) {
573                 u16 preop;
574
575                 if (!ispi->swseq_reg)
576                         return 0;
577
578                 preop = readw(ispi->sregs + PREOP_OPTYPE);
579                 if ((preop & 0xff) != opcode && (preop >> 8) != opcode) {
580                         if (ispi->locked)
581                                 return -EINVAL;
582                         writel(opcode, ispi->sregs + PREOP_OPTYPE);
583                 }
584
585                 /*
586                  * This enables atomic sequence on next SW sycle. Will
587                  * be cleared after next operation.
588                  */
589                 ispi->atomic_preopcode = opcode;
590                 return 0;
591         }
592
593         writel(0, ispi->base + FADDR);
594
595         /* Write the value beforehand */
596         ret = intel_spi_write_block(ispi, buf, len);
597         if (ret)
598                 return ret;
599
600         if (ispi->swseq_reg)
601                 return intel_spi_sw_cycle(ispi, opcode, len,
602                                           OPTYPE_WRITE_NO_ADDR);
603         return intel_spi_hw_cycle(ispi, opcode, len);
604 }
605
606 static ssize_t intel_spi_read(struct spi_nor *nor, loff_t from, size_t len,
607                               u_char *read_buf)
608 {
609         struct intel_spi *ispi = nor->priv;
610         size_t block_size, retlen = 0;
611         u32 val, status;
612         ssize_t ret;
613
614         /*
615          * Atomic sequence is not expected with HW sequencer reads. Make
616          * sure it is cleared regardless.
617          */
618         if (WARN_ON_ONCE(ispi->atomic_preopcode))
619                 ispi->atomic_preopcode = 0;
620
621         switch (nor->read_opcode) {
622         case SPINOR_OP_READ:
623         case SPINOR_OP_READ_FAST:
624                 break;
625         default:
626                 return -EINVAL;
627         }
628
629         while (len > 0) {
630                 block_size = min_t(size_t, len, INTEL_SPI_FIFO_SZ);
631
632                 /* Read cannot cross 4K boundary */
633                 block_size = min_t(loff_t, from + block_size,
634                                    round_up(from + 1, SZ_4K)) - from;
635
636                 writel(from, ispi->base + FADDR);
637
638                 val = readl(ispi->base + HSFSTS_CTL);
639                 val &= ~(HSFSTS_CTL_FDBC_MASK | HSFSTS_CTL_FCYCLE_MASK);
640                 val |= HSFSTS_CTL_AEL | HSFSTS_CTL_FCERR | HSFSTS_CTL_FDONE;
641                 val |= (block_size - 1) << HSFSTS_CTL_FDBC_SHIFT;
642                 val |= HSFSTS_CTL_FCYCLE_READ;
643                 val |= HSFSTS_CTL_FGO;
644                 writel(val, ispi->base + HSFSTS_CTL);
645
646                 ret = intel_spi_wait_hw_busy(ispi);
647                 if (ret)
648                         return ret;
649
650                 status = readl(ispi->base + HSFSTS_CTL);
651                 if (status & HSFSTS_CTL_FCERR)
652                         ret = -EIO;
653                 else if (status & HSFSTS_CTL_AEL)
654                         ret = -EACCES;
655
656                 if (ret < 0) {
657                         dev_err(ispi->dev, "read error: %llx: %#x\n", from,
658                                 status);
659                         return ret;
660                 }
661
662                 ret = intel_spi_read_block(ispi, read_buf, block_size);
663                 if (ret)
664                         return ret;
665
666                 len -= block_size;
667                 from += block_size;
668                 retlen += block_size;
669                 read_buf += block_size;
670         }
671
672         return retlen;
673 }
674
675 static ssize_t intel_spi_write(struct spi_nor *nor, loff_t to, size_t len,
676                                const u_char *write_buf)
677 {
678         struct intel_spi *ispi = nor->priv;
679         size_t block_size, retlen = 0;
680         u32 val, status;
681         ssize_t ret;
682
683         /* Not needed with HW sequencer write, make sure it is cleared */
684         ispi->atomic_preopcode = 0;
685
686         while (len > 0) {
687                 block_size = min_t(size_t, len, INTEL_SPI_FIFO_SZ);
688
689                 /* Write cannot cross 4K boundary */
690                 block_size = min_t(loff_t, to + block_size,
691                                    round_up(to + 1, SZ_4K)) - to;
692
693                 writel(to, ispi->base + FADDR);
694
695                 val = readl(ispi->base + HSFSTS_CTL);
696                 val &= ~(HSFSTS_CTL_FDBC_MASK | HSFSTS_CTL_FCYCLE_MASK);
697                 val |= HSFSTS_CTL_AEL | HSFSTS_CTL_FCERR | HSFSTS_CTL_FDONE;
698                 val |= (block_size - 1) << HSFSTS_CTL_FDBC_SHIFT;
699                 val |= HSFSTS_CTL_FCYCLE_WRITE;
700
701                 ret = intel_spi_write_block(ispi, write_buf, block_size);
702                 if (ret) {
703                         dev_err(ispi->dev, "failed to write block\n");
704                         return ret;
705                 }
706
707                 /* Start the write now */
708                 val |= HSFSTS_CTL_FGO;
709                 writel(val, ispi->base + HSFSTS_CTL);
710
711                 ret = intel_spi_wait_hw_busy(ispi);
712                 if (ret) {
713                         dev_err(ispi->dev, "timeout\n");
714                         return ret;
715                 }
716
717                 status = readl(ispi->base + HSFSTS_CTL);
718                 if (status & HSFSTS_CTL_FCERR)
719                         ret = -EIO;
720                 else if (status & HSFSTS_CTL_AEL)
721                         ret = -EACCES;
722
723                 if (ret < 0) {
724                         dev_err(ispi->dev, "write error: %llx: %#x\n", to,
725                                 status);
726                         return ret;
727                 }
728
729                 len -= block_size;
730                 to += block_size;
731                 retlen += block_size;
732                 write_buf += block_size;
733         }
734
735         return retlen;
736 }
737
738 static int intel_spi_erase(struct spi_nor *nor, loff_t offs)
739 {
740         size_t erase_size, len = nor->mtd.erasesize;
741         struct intel_spi *ispi = nor->priv;
742         u32 val, status, cmd;
743         int ret;
744
745         /* If the hardware can do 64k erase use that when possible */
746         if (len >= SZ_64K && ispi->erase_64k) {
747                 cmd = HSFSTS_CTL_FCYCLE_ERASE_64K;
748                 erase_size = SZ_64K;
749         } else {
750                 cmd = HSFSTS_CTL_FCYCLE_ERASE;
751                 erase_size = SZ_4K;
752         }
753
754         if (ispi->swseq_erase) {
755                 while (len > 0) {
756                         writel(offs, ispi->base + FADDR);
757
758                         ret = intel_spi_sw_cycle(ispi, nor->erase_opcode,
759                                                  0, OPTYPE_WRITE_WITH_ADDR);
760                         if (ret)
761                                 return ret;
762
763                         offs += erase_size;
764                         len -= erase_size;
765                 }
766
767                 return 0;
768         }
769
770         /* Not needed with HW sequencer erase, make sure it is cleared */
771         ispi->atomic_preopcode = 0;
772
773         while (len > 0) {
774                 writel(offs, ispi->base + FADDR);
775
776                 val = readl(ispi->base + HSFSTS_CTL);
777                 val &= ~(HSFSTS_CTL_FDBC_MASK | HSFSTS_CTL_FCYCLE_MASK);
778                 val |= HSFSTS_CTL_AEL | HSFSTS_CTL_FCERR | HSFSTS_CTL_FDONE;
779                 val |= cmd;
780                 val |= HSFSTS_CTL_FGO;
781                 writel(val, ispi->base + HSFSTS_CTL);
782
783                 ret = intel_spi_wait_hw_busy(ispi);
784                 if (ret)
785                         return ret;
786
787                 status = readl(ispi->base + HSFSTS_CTL);
788                 if (status & HSFSTS_CTL_FCERR)
789                         return -EIO;
790                 else if (status & HSFSTS_CTL_AEL)
791                         return -EACCES;
792
793                 offs += erase_size;
794                 len -= erase_size;
795         }
796
797         return 0;
798 }
799
800 static bool intel_spi_is_protected(const struct intel_spi *ispi,
801                                    unsigned int base, unsigned int limit)
802 {
803         int i;
804
805         for (i = 0; i < ispi->pr_num; i++) {
806                 u32 pr_base, pr_limit, pr_value;
807
808                 pr_value = readl(ispi->pregs + PR(i));
809                 if (!(pr_value & (PR_WPE | PR_RPE)))
810                         continue;
811
812                 pr_limit = (pr_value & PR_LIMIT_MASK) >> PR_LIMIT_SHIFT;
813                 pr_base = pr_value & PR_BASE_MASK;
814
815                 if (pr_base >= base && pr_limit <= limit)
816                         return true;
817         }
818
819         return false;
820 }
821
822 /*
823  * There will be a single partition holding all enabled flash regions. We
824  * call this "BIOS".
825  */
826 static void intel_spi_fill_partition(struct intel_spi *ispi,
827                                      struct mtd_partition *part)
828 {
829         u64 end;
830         int i;
831
832         memset(part, 0, sizeof(*part));
833
834         /* Start from the mandatory descriptor region */
835         part->size = 4096;
836         part->name = "BIOS";
837
838         /*
839          * Now try to find where this partition ends based on the flash
840          * region registers.
841          */
842         for (i = 1; i < ispi->nregions; i++) {
843                 u32 region, base, limit;
844
845                 region = readl(ispi->base + FREG(i));
846                 base = region & FREG_BASE_MASK;
847                 limit = (region & FREG_LIMIT_MASK) >> FREG_LIMIT_SHIFT;
848
849                 if (base >= limit || limit == 0)
850                         continue;
851
852                 /*
853                  * If any of the regions have protection bits set, make the
854                  * whole partition read-only to be on the safe side.
855                  */
856                 if (intel_spi_is_protected(ispi, base, limit))
857                         ispi->writeable = false;
858
859                 end = (limit << 12) + 4096;
860                 if (end > part->size)
861                         part->size = end;
862         }
863 }
864
865 struct intel_spi *intel_spi_probe(struct device *dev,
866         struct resource *mem, const struct intel_spi_boardinfo *info)
867 {
868         const struct spi_nor_hwcaps hwcaps = {
869                 .mask = SNOR_HWCAPS_READ |
870                         SNOR_HWCAPS_READ_FAST |
871                         SNOR_HWCAPS_PP,
872         };
873         struct mtd_partition part;
874         struct intel_spi *ispi;
875         int ret;
876
877         if (!info || !mem)
878                 return ERR_PTR(-EINVAL);
879
880         ispi = devm_kzalloc(dev, sizeof(*ispi), GFP_KERNEL);
881         if (!ispi)
882                 return ERR_PTR(-ENOMEM);
883
884         ispi->base = devm_ioremap_resource(dev, mem);
885         if (IS_ERR(ispi->base))
886                 return ERR_CAST(ispi->base);
887
888         ispi->dev = dev;
889         ispi->info = info;
890         ispi->writeable = info->writeable;
891
892         ret = intel_spi_init(ispi);
893         if (ret)
894                 return ERR_PTR(ret);
895
896         ispi->nor.dev = ispi->dev;
897         ispi->nor.priv = ispi;
898         ispi->nor.read_reg = intel_spi_read_reg;
899         ispi->nor.write_reg = intel_spi_write_reg;
900         ispi->nor.read = intel_spi_read;
901         ispi->nor.write = intel_spi_write;
902         ispi->nor.erase = intel_spi_erase;
903
904         ret = spi_nor_scan(&ispi->nor, NULL, &hwcaps);
905         if (ret) {
906                 dev_info(dev, "failed to locate the chip\n");
907                 return ERR_PTR(ret);
908         }
909
910         intel_spi_fill_partition(ispi, &part);
911
912         /* Prevent writes if not explicitly enabled */
913         if (!ispi->writeable || !writeable)
914                 ispi->nor.mtd.flags &= ~MTD_WRITEABLE;
915
916         ret = mtd_device_register(&ispi->nor.mtd, &part, 1);
917         if (ret)
918                 return ERR_PTR(ret);
919
920         return ispi;
921 }
922 EXPORT_SYMBOL_GPL(intel_spi_probe);
923
924 int intel_spi_remove(struct intel_spi *ispi)
925 {
926         return mtd_device_unregister(&ispi->nor.mtd);
927 }
928 EXPORT_SYMBOL_GPL(intel_spi_remove);
929
930 MODULE_DESCRIPTION("Intel PCH/PCU SPI flash core driver");
931 MODULE_AUTHOR("Mika Westerberg <mika.westerberg@linux.intel.com>");
932 MODULE_LICENSE("GPL v2");