enable multicast routing for linux 2.4 (#6037)
[oweals/openwrt.git] / target / linux / generic-2.4 / patches / 005-mtd_flashtypes.patch
1 --- a/drivers/mtd/chips/Config.in
2 +++ b/drivers/mtd/chips/Config.in
3 @@ -45,6 +45,7 @@ fi
4  dep_tristate '  Support for Intel/Sharp flash chips' CONFIG_MTD_CFI_INTELEXT $CONFIG_MTD_GEN_PROBE
5  dep_tristate '  Support for AMD/Fujitsu flash chips' CONFIG_MTD_CFI_AMDSTD $CONFIG_MTD_GEN_PROBE
6  dep_tristate '  Support for ST (Advanced Architecture) flash chips' CONFIG_MTD_CFI_STAA $CONFIG_MTD_GEN_PROBE
7 +dep_tristate '  Support for SST flash chips' CONFIG_MTD_CFI_SSTSTD $CONFIG_MTD_GEN_PROBE
8  
9  dep_tristate '  Support for RAM chips in bus mapping' CONFIG_MTD_RAM $CONFIG_MTD
10  dep_tristate '  Support for ROM chips in bus mapping' CONFIG_MTD_ROM $CONFIG_MTD
11 --- a/drivers/mtd/chips/Makefile
12 +++ b/drivers/mtd/chips/Makefile
13 @@ -18,6 +18,7 @@ obj-$(CONFIG_MTD)             += chipreg.o
14  obj-$(CONFIG_MTD_AMDSTD)       += amd_flash.o 
15  obj-$(CONFIG_MTD_CFI)          += cfi_probe.o
16  obj-$(CONFIG_MTD_CFI_STAA)     += cfi_cmdset_0020.o
17 +obj-$(CONFIG_MTD_CFI_SSTSTD)   += cfi_cmdset_0701.o
18  obj-$(CONFIG_MTD_CFI_AMDSTD)   += cfi_cmdset_0002.o
19  obj-$(CONFIG_MTD_CFI_INTELEXT) += cfi_cmdset_0001.o
20  obj-$(CONFIG_MTD_GEN_PROBE)    += gen_probe.o
21 --- /dev/null
22 +++ b/drivers/mtd/chips/cfi_cmdset_0701.c
23 @@ -0,0 +1,855 @@
24 +/*
25 + * Common Flash Interface support:
26 + *   SST Standard Vendor Command Set (ID 0x0701)
27 + *
28 + * Copyright (C) 2000 Crossnet Co. <info@crossnet.co.jp>
29 + *
30 + * 2_by_8 routines added by Simon Munton
31 + *
32 + * This code is GPL
33 + *
34 + * $Id: cfi_cmdset_0701.c,v 1.1 2005/03/16 13:50:00 wbx Exp $
35 + *
36 + */
37 +
38 +#include <linux/module.h>
39 +#include <linux/types.h>
40 +#include <linux/kernel.h>
41 +#include <linux/sched.h>
42 +#include <asm/io.h>
43 +#include <asm/byteorder.h>
44 +
45 +#include <linux/errno.h>
46 +#include <linux/slab.h>
47 +#include <linux/delay.h>
48 +#include <linux/interrupt.h>
49 +#include <linux/mtd/map.h>
50 +#include <linux/mtd/cfi.h>
51 +
52 +static int cfi_sststd_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
53 +static int cfi_sststd_write(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
54 +static int cfi_sststd_erase_onesize(struct mtd_info *, struct erase_info *);
55 +static int cfi_sststd_erase_varsize(struct mtd_info *, struct erase_info *);
56 +static void cfi_sststd_sync (struct mtd_info *);
57 +static int cfi_sststd_suspend (struct mtd_info *);
58 +static void cfi_sststd_resume (struct mtd_info *);
59 +
60 +static void cfi_sststd_destroy(struct mtd_info *);
61 +
62 +struct mtd_info *cfi_cmdset_0701(struct map_info *, int);
63 +static struct mtd_info *cfi_sststd_setup (struct map_info *);
64 +
65 +
66 +static struct mtd_chip_driver cfi_sststd_chipdrv = {
67 +       probe: NULL, /* Not usable directly */
68 +       destroy: cfi_sststd_destroy,
69 +       name: "cfi_cmdset_0701",
70 +       module: THIS_MODULE
71 +};
72 +
73 +struct mtd_info *cfi_cmdset_0701(struct map_info *map, int primary)
74 +{
75 +       struct cfi_private *cfi = map->fldrv_priv;
76 +       int ofs_factor = cfi->interleave * cfi->device_type;
77 +       int i;
78 +       __u8 major, minor;
79 +       __u32 base = cfi->chips[0].start;
80 +
81 +       if (cfi->cfi_mode==1){
82 +               __u16 adr = primary?cfi->cfiq->P_ADR:cfi->cfiq->A_ADR;
83 +
84 +               cfi_send_gen_cmd(0xAA, 0x5555, base, map, cfi, cfi->device_type, NULL);
85 +               cfi_send_gen_cmd(0x55, 0x2AAA, base, map, cfi, cfi->device_type, NULL);
86 +               cfi_send_gen_cmd(0x98, 0x5555, base, map, cfi, cfi->device_type, NULL);
87 +               
88 +               major = cfi_read_query(map, base + (adr+3)*ofs_factor);
89 +               minor = cfi_read_query(map, base + (adr+4)*ofs_factor);
90 +               
91 +               printk(" SST Query Table v%c.%c at 0x%4.4X\n",
92 +                      major, minor, adr);
93 +               cfi_send_gen_cmd(0xf0, 0x5555, base, map, cfi, cfi->device_type, NULL);
94 +               
95 +               cfi_send_gen_cmd(0xAA, 0x5555, base, map, cfi, cfi->device_type, NULL);
96 +               cfi_send_gen_cmd(0x55, 0x2AAA, base, map, cfi, cfi->device_type, NULL);
97 +               cfi_send_gen_cmd(0x90, 0x5555, base, map, cfi, cfi->device_type, NULL);
98 +               cfi->mfr = cfi_read_query(map, base);
99 +               cfi->id = cfi_read_query(map, base + ofs_factor);
100 +
101 +               cfi_send_gen_cmd(0xAA, 0x5555, base, map, cfi, cfi->device_type, NULL);
102 +               cfi_send_gen_cmd(0x55, 0x2AAA, base, map, cfi, cfi->device_type, NULL);
103 +               cfi_send_gen_cmd(0x98, 0x5555, base, map, cfi, cfi->device_type, NULL);
104 +               
105 +               switch (cfi->device_type) {
106 +               case CFI_DEVICETYPE_X16:
107 +                       cfi->addr_unlock1 = 0x5555;
108 +                       cfi->addr_unlock2 = 0x2AAA;
109 +                       break;
110 +               default:
111 +                       printk(KERN_NOTICE "Eep. Unknown cfi_cmdset_0701 device type %d\n", cfi->device_type);
112 +                       return NULL;
113 +               }
114 +       } /* CFI mode */
115 +
116 +       for (i=0; i< cfi->numchips; i++) {
117 +               cfi->chips[i].word_write_time = 1<<cfi->cfiq->WordWriteTimeoutTyp;
118 +               cfi->chips[i].buffer_write_time = 1<<cfi->cfiq->BufWriteTimeoutTyp;
119 +               cfi->chips[i].erase_time = 1<<cfi->cfiq->BlockEraseTimeoutTyp;
120 +       }               
121 +       
122 +       map->fldrv = &cfi_sststd_chipdrv;
123 +       MOD_INC_USE_COUNT;
124 +
125 +       cfi_send_gen_cmd(0xf0, 0x5555, base, map, cfi, cfi->device_type, NULL);
126 +       return cfi_sststd_setup(map);
127 +}
128 +
129 +static struct mtd_info *cfi_sststd_setup(struct map_info *map)
130 +{
131 +       struct cfi_private *cfi = map->fldrv_priv;
132 +       struct mtd_info *mtd;
133 +       unsigned long devsize = (1<<cfi->cfiq->DevSize) * cfi->interleave;
134 +
135 +       mtd = kmalloc(sizeof(*mtd), GFP_KERNEL);
136 +       printk("number of %s chips: %d\n", (cfi->cfi_mode)?"JEDEC":"CFI",cfi->numchips);
137 +
138 +       if (!mtd) {
139 +         printk("Failed to allocate memory for MTD device\n");
140 +         kfree(cfi->cmdset_priv);
141 +         return NULL;
142 +       }
143 +
144 +       memset(mtd, 0, sizeof(*mtd));
145 +       mtd->priv = map;
146 +       mtd->type = MTD_NORFLASH;
147 +       /* Also select the correct geometry setup too */ 
148 +       mtd->size = devsize * cfi->numchips;
149 +       
150 +       if (cfi->cfiq->NumEraseRegions == 1) {
151 +               /* No need to muck about with multiple erase sizes */
152 +               mtd->erasesize = ((cfi->cfiq->EraseRegionInfo[0] >> 8) & ~0xff) * cfi->interleave;
153 +       } else {
154 +               unsigned long offset = 0;
155 +               int i,j;
156 +
157 +               mtd->numeraseregions = cfi->cfiq->NumEraseRegions * cfi->numchips;
158 +               mtd->eraseregions = kmalloc(sizeof(struct mtd_erase_region_info) * mtd->numeraseregions, GFP_KERNEL);
159 +               if (!mtd->eraseregions) { 
160 +                       printk("Failed to allocate memory for MTD erase region info\n");
161 +                       kfree(cfi->cmdset_priv);
162 +                       return NULL;
163 +               }
164 +                       
165 +               for (i=0; i<cfi->cfiq->NumEraseRegions; i++) {
166 +                       unsigned long ernum, ersize;
167 +                       ersize = ((cfi->cfiq->EraseRegionInfo[i] >> 8) & ~0xff) * cfi->interleave;
168 +                       ernum = (cfi->cfiq->EraseRegionInfo[i] & 0xffff) + 1;
169 +                       
170 +                       if (mtd->erasesize < ersize) {
171 +                               mtd->erasesize = ersize;
172 +                       }
173 +                       for (j=0; j<cfi->numchips; j++) {
174 +                               mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].offset = (j*devsize)+offset;
175 +                               mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].erasesize = ersize;
176 +                               mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].numblocks = ernum;
177 +                       }
178 +                       offset += (ersize * ernum);
179 +               }
180 +
181 +               // debug
182 +               for (i=0; i<mtd->numeraseregions;i++){
183 +                       printk("%d: offset=0x%x,size=0x%x,blocks=%d\n",
184 +                              i,mtd->eraseregions[i].offset,
185 +                              mtd->eraseregions[i].erasesize,
186 +                              mtd->eraseregions[i].numblocks);
187 +               }
188 +       }
189 +
190 +       switch (CFIDEV_BUSWIDTH)
191 +       {
192 +       case 1:
193 +       case 2:
194 +       case 4:
195 +               if (mtd->numeraseregions > 1)
196 +                       mtd->erase = cfi_sststd_erase_varsize;
197 +               else
198 +                       mtd->erase = cfi_sststd_erase_onesize;
199 +               mtd->read = cfi_sststd_read;
200 +               mtd->write = cfi_sststd_write;
201 +               break;
202 +
203 +       default:
204 +               printk("Unsupported buswidth\n");
205 +               kfree(mtd);
206 +               kfree(cfi->cmdset_priv);
207 +               return NULL;
208 +               break;
209 +       }
210 +       mtd->sync = cfi_sststd_sync;
211 +       mtd->suspend = cfi_sststd_suspend;
212 +       mtd->resume = cfi_sststd_resume;
213 +       mtd->flags = MTD_CAP_NORFLASH;
214 +       map->fldrv = &cfi_sststd_chipdrv;
215 +       mtd->name = map->name;
216 +       MOD_INC_USE_COUNT;
217 +       return mtd;
218 +}
219 +
220 +static inline int do_read_onechip(struct map_info *map, struct flchip *chip, loff_t adr, size_t len, u_char *buf)
221 +{
222 +       DECLARE_WAITQUEUE(wait, current);
223 +       unsigned long timeo = jiffies + HZ;
224 +
225 + retry:
226 +       cfi_spin_lock(chip->mutex);
227 +
228 +       if (chip->state != FL_READY){
229 +               printk("Waiting for chip to read, status = %d\n", chip->state);
230 +               set_current_state(TASK_UNINTERRUPTIBLE);
231 +               add_wait_queue(&chip->wq, &wait);
232 +                
233 +               cfi_spin_unlock(chip->mutex);
234 +
235 +               schedule();
236 +               remove_wait_queue(&chip->wq, &wait);
237 +               timeo = jiffies + HZ;
238 +
239 +               goto retry;
240 +       }       
241 +
242 +       adr += chip->start;
243 +
244 +       chip->state = FL_READY;
245 +
246 +       map->copy_from(map, buf, adr, len);
247 +
248 +       wake_up(&chip->wq);
249 +       cfi_spin_unlock(chip->mutex);
250 +
251 +       return 0;
252 +}
253 +
254 +static int cfi_sststd_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
255 +{
256 +       struct map_info *map = mtd->priv;
257 +       struct cfi_private *cfi = map->fldrv_priv;
258 +       unsigned long ofs;
259 +       int chipnum;
260 +       int ret = 0;
261 +
262 +       /* ofs: offset within the first chip that the first read should start */
263 +
264 +       chipnum = (from >> cfi->chipshift);
265 +       ofs = from - (chipnum <<  cfi->chipshift);
266 +
267 +
268 +       *retlen = 0;
269 +
270 +       while (len) {
271 +               unsigned long thislen;
272 +
273 +               if (chipnum >= cfi->numchips)
274 +                       break;
275 +
276 +               if ((len + ofs -1) >> cfi->chipshift)
277 +                       thislen = (1<<cfi->chipshift) - ofs;
278 +               else
279 +                       thislen = len;
280 +
281 +               ret = do_read_onechip(map, &cfi->chips[chipnum], ofs, thislen, buf);
282 +               if (ret)
283 +                       break;
284 +
285 +               *retlen += thislen;
286 +               len -= thislen;
287 +               buf += thislen;
288 +
289 +               ofs = 0;
290 +               chipnum++;
291 +       }
292 +       return ret;
293 +}
294 +
295 +static int do_write_oneword(struct map_info *map, struct flchip *chip, unsigned long adr, __u32 datum, int fast)
296 +{
297 +       unsigned long timeo = jiffies + HZ;
298 +       unsigned int Last[4];
299 +       unsigned long Count = 0;
300 +       struct cfi_private *cfi = map->fldrv_priv;
301 +       DECLARE_WAITQUEUE(wait, current);
302 +       int ret = 0;
303 +
304 + retry:
305 +       cfi_spin_lock(chip->mutex);
306 +
307 +       if (chip->state != FL_READY){
308 +               printk("Waiting for chip to write, status = %d\n", chip->state);
309 +               set_current_state(TASK_UNINTERRUPTIBLE);
310 +               add_wait_queue(&chip->wq, &wait);
311 +                
312 +               cfi_spin_unlock(chip->mutex);
313 +
314 +               schedule();
315 +               remove_wait_queue(&chip->wq, &wait);
316 +               printk("Wake up to write:\n");
317 +               timeo = jiffies + HZ;
318 +
319 +               goto retry;
320 +       }       
321 +
322 +       chip->state = FL_WRITING;
323 +
324 +       adr += chip->start;
325 +       ENABLE_VPP(map);
326 +    cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, CFI_DEVICETYPE_X16, NULL);
327 +    cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, CFI_DEVICETYPE_X16, NULL);
328 +    cfi_send_gen_cmd(0xA0, cfi->addr_unlock1, chip->start, map, cfi, CFI_DEVICETYPE_X16, NULL);
329 +
330 +       cfi_write(map, datum, adr);
331 +
332 +       cfi_spin_unlock(chip->mutex);
333 +       cfi_udelay(chip->word_write_time);
334 +       cfi_spin_lock(chip->mutex);
335 +
336 +       Last[0] = cfi_read(map, adr);
337 +       //      printk("Last[0] is %x\n", Last[0]);
338 +       Last[1] = cfi_read(map, adr);
339 +       //      printk("Last[1] is %x\n", Last[1]);
340 +       Last[2] = cfi_read(map, adr);
341 +       //      printk("Last[2] is %x\n", Last[2]);
342 +
343 +       for (Count = 3; Last[(Count - 1) % 4] != Last[(Count - 2) % 4] && Count < 10000; Count++){
344 +               cfi_spin_unlock(chip->mutex);
345 +               cfi_udelay(10);
346 +               cfi_spin_lock(chip->mutex);
347 +               
348 +               Last[Count % 4] = cfi_read(map, adr);
349 +               //              printk("Last[%d%%4] is %x\n", Count, Last[Count%4]);
350 +       }
351 +       
352 +       if (Last[(Count - 1) % 4] != datum){
353 +               printk("Last[%ld] is %x, datum is %x\n",(Count - 1) % 4,Last[(Count - 1) % 4],datum);
354 +               cfi_send_gen_cmd(0xF0, 0, chip->start, map, cfi, cfi->device_type, NULL);
355 +               DISABLE_VPP(map);
356 +               ret = -EIO;
357 +       }       
358 +       DISABLE_VPP(map);
359 +       chip->state = FL_READY;
360 +       wake_up(&chip->wq);
361 +       cfi_spin_unlock(chip->mutex);
362 +       
363 +       return ret;
364 +}
365 +
366 +static int cfi_sststd_write (struct mtd_info *mtd, loff_t to , size_t len, size_t *retlen, const u_char *buf)
367 +{
368 +       struct map_info *map = mtd->priv;
369 +       struct cfi_private *cfi = map->fldrv_priv;
370 +       int ret = 0;
371 +       int chipnum;
372 +       unsigned long ofs, chipstart;
373 +
374 +       *retlen = 0;
375 +       if (!len)
376 +               return 0;
377 +
378 +       chipnum = to >> cfi->chipshift;
379 +       ofs = to  - (chipnum << cfi->chipshift);
380 +       chipstart = cfi->chips[chipnum].start;
381 +
382 +       /* If it's not bus-aligned, do the first byte write */
383 +       if (ofs & (CFIDEV_BUSWIDTH-1)) {
384 +               unsigned long bus_ofs = ofs & ~(CFIDEV_BUSWIDTH-1);
385 +               int i = ofs - bus_ofs;
386 +               int n = 0;
387 +               u_char tmp_buf[4];
388 +               __u32 datum;
389 +
390 +               map->copy_from(map, tmp_buf, bus_ofs + cfi->chips[chipnum].start, CFIDEV_BUSWIDTH);
391 +               while (len && i < CFIDEV_BUSWIDTH)
392 +                       tmp_buf[i++] = buf[n++], len--;
393 +
394 +               if (cfi_buswidth_is_2()) {
395 +                       datum = *(__u16*)tmp_buf;
396 +               } else if (cfi_buswidth_is_4()) {
397 +                       datum = *(__u32*)tmp_buf;
398 +               } else {
399 +                       return -EINVAL;  /* should never happen, but be safe */
400 +               }
401 +
402 +               ret = do_write_oneword(map, &cfi->chips[chipnum], 
403 +                               bus_ofs, datum, 0);
404 +               if (ret) 
405 +                       return ret;
406 +               
407 +               ofs += n;
408 +               buf += n;
409 +               (*retlen) += n;
410 +
411 +               if (ofs >> cfi->chipshift) {
412 +                       chipnum ++; 
413 +                       ofs = 0;
414 +                       if (chipnum == cfi->numchips)
415 +                               return 0;
416 +               }
417 +       }
418 +       
419 +       /* We are now aligned, write as much as possible */
420 +       while(len >= CFIDEV_BUSWIDTH) {
421 +               __u32 datum;
422 +
423 +               if (cfi_buswidth_is_1()) {
424 +                       datum = *(__u8*)buf;
425 +               } else if (cfi_buswidth_is_2()) {
426 +                       datum = *(__u16*)buf;
427 +               } else if (cfi_buswidth_is_4()) {
428 +                       datum = *(__u32*)buf;
429 +               } else {
430 +                       return -EINVAL;
431 +               }
432 +               ret = do_write_oneword(map, &cfi->chips[chipnum],
433 +                                      ofs, datum, cfi->fast_prog);
434 +               if (ret) {
435 +                       return ret;
436 +               }
437 +
438 +               ofs += CFIDEV_BUSWIDTH;
439 +               buf += CFIDEV_BUSWIDTH;
440 +               (*retlen) += CFIDEV_BUSWIDTH;
441 +               len -= CFIDEV_BUSWIDTH;
442 +
443 +               if (ofs >> cfi->chipshift) {
444 +                       chipnum ++; 
445 +                       ofs = 0;
446 +                       if (chipnum == cfi->numchips)
447 +                               return 0;
448 +                       chipstart = cfi->chips[chipnum].start;
449 +               }
450 +       }
451 +
452 +       if (len & (CFIDEV_BUSWIDTH-1)) {
453 +               int i = 0, n = 0;
454 +               u_char tmp_buf[4];
455 +               __u32 datum;
456 +
457 +               map->copy_from(map, tmp_buf, ofs + cfi->chips[chipnum].start, CFIDEV_BUSWIDTH);
458 +               while (len--)
459 +                       tmp_buf[i++] = buf[n++];
460 +
461 +               if (cfi_buswidth_is_2()) {
462 +                       datum = *(__u16*)tmp_buf;
463 +               } else if (cfi_buswidth_is_4()) {
464 +                       datum = *(__u32*)tmp_buf;
465 +               } else {
466 +                       return -EINVAL;  /* should never happen, but be safe */
467 +               }
468 +
469 +               ret = do_write_oneword(map, &cfi->chips[chipnum], 
470 +                               ofs, datum, 0);
471 +               if (ret) 
472 +                       return ret;
473 +               
474 +               (*retlen) += n;
475 +       }
476 +
477 +       return 0;
478 +}
479 +
480 +static inline int do_erase_oneblock(struct map_info *map, struct flchip *chip, unsigned long adr)
481 +{
482 +       unsigned int status;
483 +       unsigned long timeo = jiffies + HZ;
484 +       struct cfi_private *cfi = map->fldrv_priv;
485 +       unsigned int rdy_mask;
486 +       DECLARE_WAITQUEUE(wait, current);
487 +
488 + retry:
489 +       cfi_spin_lock(chip->mutex);
490 +
491 +       if (chip->state != FL_READY){
492 +               set_current_state(TASK_UNINTERRUPTIBLE);
493 +               add_wait_queue(&chip->wq, &wait);
494 +                
495 +               cfi_spin_unlock(chip->mutex);
496 +
497 +               schedule();
498 +               remove_wait_queue(&chip->wq, &wait);
499 +               timeo = jiffies + HZ;
500 +
501 +               goto retry;
502 +       }       
503 +
504 +       chip->state = FL_ERASING;
505 +
506 +       adr += chip->start;
507 +       ENABLE_VPP(map);
508 +       cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, CFI_DEVICETYPE_X16, NULL);
509 +       cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, CFI_DEVICETYPE_X16, NULL);
510 +       cfi_send_gen_cmd(0x80, cfi->addr_unlock1, chip->start, map, cfi, CFI_DEVICETYPE_X16, NULL);
511 +       cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, CFI_DEVICETYPE_X16, NULL);
512 +       cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, CFI_DEVICETYPE_X16, NULL);
513 +       cfi_write(map, CMD(0x30), adr);
514 +       
515 +       timeo = jiffies + (HZ*20);
516 +
517 +       cfi_spin_unlock(chip->mutex);
518 +       schedule_timeout(HZ);
519 +       cfi_spin_lock(chip->mutex);
520 +       
521 +       rdy_mask = CMD(0x80);
522 +
523 +       /* Once the state machine's known to be working I'll do that */
524 +
525 +       while ( ( (status = cfi_read(map,adr)) & rdy_mask ) != rdy_mask ) {
526 +               static int z=0;
527 +
528 +               if (chip->state != FL_ERASING) {
529 +                       /* Someone's suspended the erase. Sleep */
530 +                       set_current_state(TASK_UNINTERRUPTIBLE);
531 +                       add_wait_queue(&chip->wq, &wait);
532 +                       
533 +                       cfi_spin_unlock(chip->mutex);
534 +                       printk("erase suspended. Sleeping\n");
535 +                       
536 +                       schedule();
537 +                       remove_wait_queue(&chip->wq, &wait);
538 +                       timeo = jiffies + (HZ*2); 
539 +                       cfi_spin_lock(chip->mutex);
540 +                       continue;
541 +               }
542 +
543 +               /* OK Still waiting */
544 +               if (time_after(jiffies, timeo)) {
545 +                       chip->state = FL_READY;
546 +                       cfi_spin_unlock(chip->mutex);
547 +                       printk("waiting for erase to complete timed out.");
548 +                       DISABLE_VPP(map);
549 +                       return -EIO;
550 +               }
551 +               
552 +               /* Latency issues. Drop the lock, wait a while and retry */
553 +               cfi_spin_unlock(chip->mutex);
554 +
555 +               z++;
556 +               if ( 0 && !(z % 100 )) 
557 +                       printk("chip not ready yet after erase. looping\n");
558 +
559 +               cfi_udelay(1);
560 +               
561 +               cfi_spin_lock(chip->mutex);
562 +               continue;
563 +       }
564 +       
565 +       /* Done and happy. */
566 +       DISABLE_VPP(map);
567 +       chip->state = FL_READY;
568 +       wake_up(&chip->wq);
569 +       cfi_spin_unlock(chip->mutex);
570 +       return 0;
571 +}
572 +
573 +static int cfi_sststd_erase_varsize(struct mtd_info *mtd, struct erase_info *instr)
574 +{
575 +       struct map_info *map = mtd->priv;
576 +       struct cfi_private *cfi = map->fldrv_priv;
577 +       unsigned long adr, len;
578 +       int chipnum, ret = 0;
579 +       int i, first;
580 +       struct mtd_erase_region_info *regions = mtd->eraseregions;
581 +
582 +       if (instr->addr > mtd->size)
583 +               return -EINVAL;
584 +
585 +       if ((instr->len + instr->addr) > mtd->size)
586 +               return -EINVAL;
587 +
588 +       /* Check that both start and end of the requested erase are
589 +        * aligned with the erasesize at the appropriate addresses.
590 +        */
591 +
592 +       i = 0;
593 +
594 +       /* Skip all erase regions which are ended before the start of 
595 +          the requested erase. Actually, to save on the calculations,
596 +          we skip to the first erase region which starts after the
597 +          start of the requested erase, and then go back one.
598 +       */
599 +       
600 +       while (i < mtd->numeraseregions && instr->addr >= regions[i].offset)
601 +              i++;
602 +       i--;
603 +
604 +       /* OK, now i is pointing at the erase region in which this 
605 +          erase request starts. Check the start of the requested
606 +          erase range is aligned with the erase size which is in
607 +          effect here.
608 +       */
609 +
610 +       if (instr->addr & (regions[i].erasesize-1))
611 +               return -EINVAL;
612 +
613 +       /* Remember the erase region we start on */
614 +       first = i;
615 +
616 +       /* Next, check that the end of the requested erase is aligned
617 +        * with the erase region at that address.
618 +        */
619 +
620 +       while (i<mtd->numeraseregions && (instr->addr + instr->len) >= regions[i].offset)
621 +               i++;
622 +
623 +       /* As before, drop back one to point at the region in which
624 +          the address actually falls
625 +       */
626 +       i--;
627 +       
628 +       if ((instr->addr + instr->len) & (regions[i].erasesize-1))
629 +               return -EINVAL;
630 +       
631 +       chipnum = instr->addr >> cfi->chipshift;
632 +       adr = instr->addr - (chipnum << cfi->chipshift);
633 +       len = instr->len;
634 +
635 +       i=first;
636 +
637 +       while(len) {
638 +               ret = do_erase_oneblock(map, &cfi->chips[chipnum], adr);
639 +
640 +               if (ret)
641 +                       return ret;
642 +
643 +               adr += regions[i].erasesize;
644 +               len -= regions[i].erasesize;
645 +
646 +               if (adr % (1<< cfi->chipshift) == ((regions[i].offset + (regions[i].erasesize * regions[i].numblocks)) %( 1<< cfi->chipshift)))
647 +                       i++;
648 +
649 +               if (adr >> cfi->chipshift) {
650 +                       adr = 0;
651 +                       chipnum++;
652 +                       
653 +                       if (chipnum >= cfi->numchips)
654 +                       break;
655 +               }
656 +       }
657 +
658 +       instr->state = MTD_ERASE_DONE;
659 +       if (instr->callback)
660 +               instr->callback(instr);
661 +       
662 +       return 0;
663 +}
664 +
665 +static int cfi_sststd_erase_onesize(struct mtd_info *mtd, struct erase_info *instr)
666 +{
667 +       struct map_info *map = mtd->priv;
668 +       struct cfi_private *cfi = map->fldrv_priv;
669 +       unsigned long adr, len;
670 +       int chipnum, ret = 0;
671 +
672 +       if (instr->addr & (mtd->erasesize - 1))
673 +               return -EINVAL;
674 +
675 +       if (instr->len & (mtd->erasesize -1))
676 +               return -EINVAL;
677 +
678 +       if ((instr->len + instr->addr) > mtd->size)
679 +               return -EINVAL;
680 +
681 +       chipnum = instr->addr >> cfi->chipshift;
682 +       adr = instr->addr - (chipnum << cfi->chipshift);
683 +       len = instr->len;
684 +
685 +       while(len) {
686 +               ret = do_erase_oneblock(map, &cfi->chips[chipnum], adr);
687 +
688 +               if (ret)
689 +                       return ret;
690 +
691 +               adr += mtd->erasesize;
692 +               len -= mtd->erasesize;
693 +
694 +               if (adr >> cfi->chipshift) {
695 +                       adr = 0;
696 +                       chipnum++;
697 +                       
698 +                       if (chipnum >= cfi->numchips)
699 +                       break;
700 +               }
701 +       }
702 +               
703 +       instr->state = MTD_ERASE_DONE;
704 +       if (instr->callback)
705 +               instr->callback(instr);
706 +       
707 +       return 0;
708 +}
709 +
710 +static void cfi_sststd_sync (struct mtd_info *mtd)
711 +{
712 +       struct map_info *map = mtd->priv;
713 +       struct cfi_private *cfi = map->fldrv_priv;
714 +       int i;
715 +       struct flchip *chip;
716 +       int ret = 0;
717 +       DECLARE_WAITQUEUE(wait, current);
718 +
719 +       for (i=0; !ret && i<cfi->numchips; i++) {
720 +               chip = &cfi->chips[i];
721 +
722 +       retry:
723 +               cfi_spin_lock(chip->mutex);
724 +
725 +               switch(chip->state) {
726 +               case FL_READY:
727 +               case FL_STATUS:
728 +               case FL_CFI_QUERY:
729 +               case FL_JEDEC_QUERY:
730 +                       chip->oldstate = chip->state;
731 +                       chip->state = FL_SYNCING;
732 +                       /* No need to wake_up() on this state change - 
733 +                        * as the whole point is that nobody can do anything
734 +                        * with the chip now anyway.
735 +                        */
736 +               case FL_SYNCING:
737 +                       cfi_spin_unlock(chip->mutex);
738 +                       break;
739 +
740 +               default:
741 +                       /* Not an idle state */
742 +                       add_wait_queue(&chip->wq, &wait);
743 +                       
744 +                       cfi_spin_unlock(chip->mutex);
745 +
746 +                       schedule();
747 +
748 +                       remove_wait_queue(&chip->wq, &wait);
749 +                       
750 +                       goto retry;
751 +               }
752 +       }
753 +
754 +       /* Unlock the chips again */
755 +
756 +       for (i--; i >=0; i--) {
757 +               chip = &cfi->chips[i];
758 +
759 +               cfi_spin_lock(chip->mutex);
760 +               
761 +               if (chip->state == FL_SYNCING) {
762 +                       chip->state = chip->oldstate;
763 +                       wake_up(&chip->wq);
764 +               }
765 +               cfi_spin_unlock(chip->mutex);
766 +       }
767 +}
768 +
769 +
770 +static int cfi_sststd_suspend(struct mtd_info *mtd)
771 +{
772 +       struct map_info *map = mtd->priv;
773 +       struct cfi_private *cfi = map->fldrv_priv;
774 +       int i;
775 +       struct flchip *chip;
776 +       int ret = 0;
777 +//printk("suspend\n");
778 +
779 +       for (i=0; !ret && i<cfi->numchips; i++) {
780 +               chip = &cfi->chips[i];
781 +
782 +               cfi_spin_lock(chip->mutex);
783 +
784 +               switch(chip->state) {
785 +               case FL_READY:
786 +               case FL_STATUS:
787 +               case FL_CFI_QUERY:
788 +               case FL_JEDEC_QUERY:
789 +                       chip->oldstate = chip->state;
790 +                       chip->state = FL_PM_SUSPENDED;
791 +                       /* No need to wake_up() on this state change - 
792 +                        * as the whole point is that nobody can do anything
793 +                        * with the chip now anyway.
794 +                        */
795 +               case FL_PM_SUSPENDED:
796 +                       break;
797 +
798 +               default:
799 +                       ret = -EAGAIN;
800 +                       break;
801 +               }
802 +               cfi_spin_unlock(chip->mutex);
803 +       }
804 +
805 +       /* Unlock the chips again */
806 +
807 +       if (ret) {
808 +               for (i--; i >=0; i--) {
809 +                       chip = &cfi->chips[i];
810 +
811 +                       cfi_spin_lock(chip->mutex);
812 +               
813 +                       if (chip->state == FL_PM_SUSPENDED) {
814 +                               chip->state = chip->oldstate;
815 +                               wake_up(&chip->wq);
816 +                       }
817 +                       cfi_spin_unlock(chip->mutex);
818 +               }
819 +       }
820 +       
821 +       return ret;
822 +}
823 +
824 +static void cfi_sststd_resume(struct mtd_info *mtd)
825 +{
826 +       struct map_info *map = mtd->priv;
827 +       struct cfi_private *cfi = map->fldrv_priv;
828 +       int i;
829 +       struct flchip *chip;
830 +//printk("resume\n");
831 +
832 +       for (i=0; i<cfi->numchips; i++) {
833 +       
834 +               chip = &cfi->chips[i];
835 +
836 +               cfi_spin_lock(chip->mutex);
837 +               
838 +               if (chip->state == FL_PM_SUSPENDED) {
839 +                       chip->state = FL_READY;
840 +                       cfi_write(map, CMD(0xF0), chip->start);
841 +                       wake_up(&chip->wq);
842 +               }
843 +               else
844 +                       printk("Argh. Chip not in PM_SUSPENDED state upon resume()\n");
845 +
846 +               cfi_spin_unlock(chip->mutex);
847 +       }
848 +}
849 +
850 +static void cfi_sststd_destroy(struct mtd_info *mtd)
851 +{
852 +       struct map_info *map = mtd->priv;
853 +       struct cfi_private *cfi = map->fldrv_priv;
854 +       kfree(cfi->cmdset_priv);
855 +       kfree(cfi);
856 +}
857 +
858 +#if LINUX_VERSION_CODE < 0x20212 && defined(MODULE)
859 +#define cfi_sststd_init init_module
860 +#define cfi_sststd_exit cleanup_module
861 +#endif
862 +
863 +static char im_name[]="cfi_cmdset_0701";
864 +
865 +mod_init_t cfi_sststd_init(void)
866 +{
867 +       inter_module_register(im_name, THIS_MODULE, &cfi_cmdset_0701);
868 +       return 0;
869 +}
870 +
871 +mod_exit_t cfi_sststd_exit(void)
872 +{
873 +       inter_module_unregister(im_name);
874 +}
875 +
876 +module_init(cfi_sststd_init);
877 +module_exit(cfi_sststd_exit);
878 +
879 --- a/drivers/mtd/chips/cfi_probe.c
880 +++ b/drivers/mtd/chips/cfi_probe.c
881 @@ -67,8 +67,15 @@ static int cfi_probe_chip(struct map_inf
882         cfi_send_gen_cmd(0xF0, 0, base, map, cfi, cfi->device_type, NULL);
883         cfi_send_gen_cmd(0x98, 0x55, base, map, cfi, cfi->device_type, NULL);
884  
885 -       if (!qry_present(map,base,cfi))
886 -               return 0;
887 +       if (!qry_present(map,base,cfi)) {
888 +               /* rather broken SST cfi probe (requires SST unlock) */
889 +               cfi_send_gen_cmd(0xF0, 0, base, map, cfi, cfi->device_type, NULL);
890 +               cfi_send_gen_cmd(0xAA, 0x5555, base, map, cfi, cfi->device_type, NULL);
891 +               cfi_send_gen_cmd(0x55, 0x2AAA, base, map, cfi, cfi->device_type, NULL);
892 +               cfi_send_gen_cmd(0x98, 0x5555, base, map, cfi, cfi->device_type, NULL);
893 +               if (!qry_present(map,base,cfi))
894 +                       return 0;
895 +       }
896  
897         if (!cfi->numchips) {
898                 /* This is the first time we're called. Set up the CFI 
899 --- a/drivers/mtd/chips/gen_probe.c
900 +++ b/drivers/mtd/chips/gen_probe.c
901 @@ -328,13 +328,18 @@ static struct mtd_info *check_cmd_set(st
902                 return cfi_cmdset_0001(map, primary);
903  #endif
904  #ifdef CONFIG_MTD_CFI_AMDSTD
905 +       case 0x0006:
906         case 0x0002:
907                 return cfi_cmdset_0002(map, primary);
908  #endif
909  #ifdef CONFIG_MTD_CFI_STAA
910 -        case 0x0020:
911 +       case 0x0020:
912                 return cfi_cmdset_0020(map, primary);
913  #endif
914 +#ifdef CONFIG_MTD_CFI_SSTSTD
915 +       case 0x0701:
916 +               return cfi_cmdset_0701(map, primary);
917 +#endif
918         }
919  
920         return cfi_cmdset_unknown(map, primary);