+static int nand_max_ecc_strength(struct nfc_config *conf)
+{
+ static const int ecc_bytes[] = { 32, 46, 54, 60, 74, 88, 102, 110, 116 };
+ int max_oobsize, max_ecc_bytes;
+ int nsectors = conf->page_size / conf->ecc_size;
+ int i;
+
+ /*
+ * ECC strength is limited by the size of the OOB area which is
+ * correlated with the page size.
+ */
+ switch (conf->page_size) {
+ case 2048:
+ max_oobsize = 64;
+ break;
+ case 4096:
+ max_oobsize = 256;
+ break;
+ case 8192:
+ max_oobsize = 640;
+ break;
+ case 16384:
+ max_oobsize = 1664;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ max_ecc_bytes = max_oobsize / nsectors;
+
+ for (i = 0; i < ARRAY_SIZE(ecc_bytes); i++) {
+ if (ecc_bytes[i] > max_ecc_bytes)
+ break;
+ }
+
+ if (!i)
+ return -EINVAL;
+
+ return i - 1;
+}
+
+static int nand_detect_ecc_config(struct nfc_config *conf, u32 offs,
+ void *dest)
+{
+ /* NAND with pages > 4k will likely require 1k sector size. */
+ int min_ecc_size = conf->page_size > 4096 ? 1024 : 512;
+ int page = offs / conf->page_size;
+ int ret;
+
+ /*
+ * In most cases, 1k sectors are preferred over 512b ones, start
+ * testing this config first.
+ */
+ for (conf->ecc_size = 1024; conf->ecc_size >= min_ecc_size;
+ conf->ecc_size >>= 1) {
+ int max_ecc_strength = nand_max_ecc_strength(conf);
+
+ nand_apply_config(conf);
+
+ /*
+ * We are starting from the maximum ECC strength because
+ * most of the time NAND vendors provide an OOB area that
+ * barely meets the ECC requirements.
+ */
+ for (conf->ecc_strength = max_ecc_strength;
+ conf->ecc_strength >= 0;
+ conf->ecc_strength--) {
+ conf->randomize = false;
+ if (nand_reset_column())
+ return -EIO;
+
+ /*
+ * Only read the first sector to speedup detection.
+ */
+ ret = nand_read_page(conf, offs, dest, conf->ecc_size);
+ if (!ret) {
+ return 0;
+ } else if (ret > 0) {
+ /*
+ * If page is empty we can't deduce anything
+ * about the ECC config => stop the detection.
+ */
+ return -EINVAL;
+ }
+
+ conf->randomize = true;
+ conf->nseeds = ARRAY_SIZE(random_seed);
+ do {
+ if (nand_reset_column())
+ return -EIO;
+
+ if (!nand_read_page(conf, offs, dest,
+ conf->ecc_size))
+ return 0;
+
+ /*
+ * Find the next ->nseeds value that would
+ * change the randomizer seed for the page
+ * we're trying to read.
+ */
+ while (conf->nseeds >= 16) {
+ int seed = page % conf->nseeds;
+
+ conf->nseeds >>= 1;
+ if (seed != page % conf->nseeds)
+ break;
+ }
+ } while (conf->nseeds >= 16);
+ }
+ }
+
+ return -EINVAL;
+}
+
+static int nand_detect_config(struct nfc_config *conf, u32 offs, void *dest)
+{
+ if (conf->valid)
+ return 0;
+
+ /*
+ * Modern NANDs are more likely than legacy ones, so we start testing
+ * with 5 address cycles.
+ */
+ for (conf->addr_cycles = 5;
+ conf->addr_cycles >= 4;
+ conf->addr_cycles--) {
+ int max_page_size = conf->addr_cycles == 4 ? 2048 : 16384;
+
+ /*
+ * Ignoring 1k pages cause I'm not even sure this case exist
+ * in the real world.
+ */
+ for (conf->page_size = 2048; conf->page_size <= max_page_size;
+ conf->page_size <<= 1) {
+ if (nand_load_page(conf, offs))
+ return -1;
+
+ if (!nand_detect_ecc_config(conf, offs, dest)) {
+ conf->valid = true;
+ return 0;
+ }
+ }
+ }
+
+ return -EINVAL;
+}
+
+static int nand_read_buffer(struct nfc_config *conf, uint32_t offs,
+ unsigned int size, void *dest)