}
#ifdef CONFIG_SPI_FLASH_SST
+/*
+ * sst26 flash series has its own block protection implementation:
+ * 4x - 8 KByte blocks - read & write protection bits - upper addresses
+ * 1x - 32 KByte blocks - write protection bits
+ * rest - 64 KByte blocks - write protection bits
+ * 1x - 32 KByte blocks - write protection bits
+ * 4x - 8 KByte blocks - read & write protection bits - lower addresses
+ *
+ * We'll support only per 64k lock/unlock so lower and upper 64 KByte region
+ * will be treated as single block.
+ */
+#define SST26_BPR_8K_NUM 4
+#define SST26_MAX_BPR_REG_LEN (18 + 1)
+#define SST26_BOUND_REG_SIZE ((32 + SST26_BPR_8K_NUM * 8) * SZ_1K)
+
+enum lock_ctl {
+ SST26_CTL_LOCK,
+ SST26_CTL_UNLOCK,
+ SST26_CTL_CHECK
+};
+
+static bool sst26_process_bpr(u32 bpr_size, u8 *cmd, u32 bit, enum lock_ctl ctl)
+{
+ switch (ctl) {
+ case SST26_CTL_LOCK:
+ cmd[bpr_size - (bit / 8) - 1] |= BIT(bit % 8);
+ break;
+ case SST26_CTL_UNLOCK:
+ cmd[bpr_size - (bit / 8) - 1] &= ~BIT(bit % 8);
+ break;
+ case SST26_CTL_CHECK:
+ return !!(cmd[bpr_size - (bit / 8) - 1] & BIT(bit % 8));
+ }
+
+ return false;
+}
+
+/*
+ * Lock, unlock or check lock status of the flash region of the flash (depending
+ * on the lock_ctl value)
+ */
+static int sst26_lock_ctl(struct spi_nor *nor, loff_t ofs, uint64_t len, enum lock_ctl ctl)
+{
+ struct mtd_info *mtd = &nor->mtd;
+ u32 i, bpr_ptr, rptr_64k, lptr_64k, bpr_size;
+ bool lower_64k = false, upper_64k = false;
+ u8 bpr_buff[SST26_MAX_BPR_REG_LEN] = {};
+ int ret;
+
+ /* Check length and offset for 64k alignment */
+ if ((ofs & (SZ_64K - 1)) || (len & (SZ_64K - 1))) {
+ dev_err(nor->dev, "length or offset is not 64KiB allighned\n");
+ return -EINVAL;
+ }
+
+ if (ofs + len > mtd->size) {
+ dev_err(nor->dev, "range is more than device size: %#llx + %#llx > %#llx\n",
+ ofs, len, mtd->size);
+ return -EINVAL;
+ }
+
+ /* SST26 family has only 16 Mbit, 32 Mbit and 64 Mbit IC */
+ if (mtd->size != SZ_2M &&
+ mtd->size != SZ_4M &&
+ mtd->size != SZ_8M)
+ return -EINVAL;
+
+ bpr_size = 2 + (mtd->size / SZ_64K / 8);
+
+ ret = nor->read_reg(nor, SPINOR_OP_READ_BPR, bpr_buff, bpr_size);
+ if (ret < 0) {
+ dev_err(nor->dev, "fail to read block-protection register\n");
+ return ret;
+ }
+
+ rptr_64k = min_t(u32, ofs + len, mtd->size - SST26_BOUND_REG_SIZE);
+ lptr_64k = max_t(u32, ofs, SST26_BOUND_REG_SIZE);
+
+ upper_64k = ((ofs + len) > (mtd->size - SST26_BOUND_REG_SIZE));
+ lower_64k = (ofs < SST26_BOUND_REG_SIZE);
+
+ /* Lower bits in block-protection register are about 64k region */
+ bpr_ptr = lptr_64k / SZ_64K - 1;
+
+ /* Process 64K blocks region */
+ while (lptr_64k < rptr_64k) {
+ if (sst26_process_bpr(bpr_size, bpr_buff, bpr_ptr, ctl))
+ return EACCES;
+
+ bpr_ptr++;
+ lptr_64k += SZ_64K;
+ }
+
+ /* 32K and 8K region bits in BPR are after 64k region bits */
+ bpr_ptr = (mtd->size - 2 * SST26_BOUND_REG_SIZE) / SZ_64K;
+
+ /* Process lower 32K block region */
+ if (lower_64k)
+ if (sst26_process_bpr(bpr_size, bpr_buff, bpr_ptr, ctl))
+ return EACCES;
+
+ bpr_ptr++;
+
+ /* Process upper 32K block region */
+ if (upper_64k)
+ if (sst26_process_bpr(bpr_size, bpr_buff, bpr_ptr, ctl))
+ return EACCES;
+
+ bpr_ptr++;
+
+ /* Process lower 8K block regions */
+ for (i = 0; i < SST26_BPR_8K_NUM; i++) {
+ if (lower_64k)
+ if (sst26_process_bpr(bpr_size, bpr_buff, bpr_ptr, ctl))
+ return EACCES;
+
+ /* In 8K area BPR has both read and write protection bits */
+ bpr_ptr += 2;
+ }
+
+ /* Process upper 8K block regions */
+ for (i = 0; i < SST26_BPR_8K_NUM; i++) {
+ if (upper_64k)
+ if (sst26_process_bpr(bpr_size, bpr_buff, bpr_ptr, ctl))
+ return EACCES;
+
+ /* In 8K area BPR has both read and write protection bits */
+ bpr_ptr += 2;
+ }
+
+ /* If we check region status we don't need to write BPR back */
+ if (ctl == SST26_CTL_CHECK)
+ return 0;
+
+ ret = nor->write_reg(nor, SPINOR_OP_WRITE_BPR, bpr_buff, bpr_size);
+ if (ret < 0) {
+ dev_err(nor->dev, "fail to write block-protection register\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int sst26_unlock(struct spi_nor *nor, loff_t ofs, uint64_t len)
+{
+ return sst26_lock_ctl(nor, ofs, len, SST26_CTL_UNLOCK);
+}
+
+static int sst26_lock(struct spi_nor *nor, loff_t ofs, uint64_t len)
+{
+ return sst26_lock_ctl(nor, ofs, len, SST26_CTL_LOCK);
+}
+
+/*
+ * Returns EACCES (positive value) if region is locked, 0 if region is unlocked,
+ * and negative on errors.
+ */
+static int sst26_is_locked(struct spi_nor *nor, loff_t ofs, uint64_t len)
+{
+ /*
+ * is_locked function is used for check before reading or erasing flash
+ * region, so offset and length might be not 64k allighned, so adjust
+ * them to be 64k allighned as sst26_lock_ctl works only with 64k
+ * allighned regions.
+ */
+ ofs -= ofs & (SZ_64K - 1);
+ len = len & (SZ_64K - 1) ? (len & ~(SZ_64K - 1)) + SZ_64K : len;
+
+ return sst26_lock_ctl(nor, ofs, len, SST26_CTL_CHECK);
+}
+
static int sst_write_byteprogram(struct spi_nor *nor, loff_t to, size_t len,
size_t *retlen, const u_char *buf)
{
#endif
#ifdef CONFIG_SPI_FLASH_SST
+ /*
+ * sst26 series block protection implementation differs from other
+ * series.
+ */
+ if (info->flags & SPI_NOR_HAS_SST26LOCK) {
+ nor->flash_lock = sst26_lock;
+ nor->flash_unlock = sst26_unlock;
+ nor->flash_is_locked = sst26_is_locked;
+ }
+
/* sst nor chips use AAI word program */
if (info->flags & SST_WRITE)
mtd->_write = sst_write;