Add the rt linux 4.1.3-rt3 as base
[kvmfornfv.git] / kernel / drivers / mtd / chips / cfi_cmdset_0001.c
1 /*
2  * Common Flash Interface support:
3  *   Intel Extended Vendor Command Set (ID 0x0001)
4  *
5  * (C) 2000 Red Hat. GPL'd
6  *
7  *
8  * 10/10/2000   Nicolas Pitre <nico@fluxnic.net>
9  *      - completely revamped method functions so they are aware and
10  *        independent of the flash geometry (buswidth, interleave, etc.)
11  *      - scalability vs code size is completely set at compile-time
12  *        (see include/linux/mtd/cfi.h for selection)
13  *      - optimized write buffer method
14  * 02/05/2002   Christopher Hoover <ch@hpl.hp.com>/<ch@murgatroid.com>
15  *      - reworked lock/unlock/erase support for var size flash
16  * 21/03/2007   Rodolfo Giometti <giometti@linux.it>
17  *      - auto unlock sectors on resume for auto locking flash on power up
18  */
19
20 #include <linux/module.h>
21 #include <linux/types.h>
22 #include <linux/kernel.h>
23 #include <linux/sched.h>
24 #include <asm/io.h>
25 #include <asm/byteorder.h>
26
27 #include <linux/errno.h>
28 #include <linux/slab.h>
29 #include <linux/delay.h>
30 #include <linux/interrupt.h>
31 #include <linux/reboot.h>
32 #include <linux/bitmap.h>
33 #include <linux/mtd/xip.h>
34 #include <linux/mtd/map.h>
35 #include <linux/mtd/mtd.h>
36 #include <linux/mtd/cfi.h>
37
38 /* #define CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE */
39 /* #define CMDSET0001_DISABLE_WRITE_SUSPEND */
40
41 // debugging, turns off buffer write mode if set to 1
42 #define FORCE_WORD_WRITE 0
43
44 /* Intel chips */
45 #define I82802AB        0x00ad
46 #define I82802AC        0x00ac
47 #define PF38F4476       0x881c
48 /* STMicroelectronics chips */
49 #define M50LPW080       0x002F
50 #define M50FLW080A      0x0080
51 #define M50FLW080B      0x0081
52 /* Atmel chips */
53 #define AT49BV640D      0x02de
54 #define AT49BV640DT     0x02db
55 /* Sharp chips */
56 #define LH28F640BFHE_PTTL90     0x00b0
57 #define LH28F640BFHE_PBTL90     0x00b1
58 #define LH28F640BFHE_PTTL70A    0x00b2
59 #define LH28F640BFHE_PBTL70A    0x00b3
60
61 static int cfi_intelext_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
62 static int cfi_intelext_write_words(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
63 static int cfi_intelext_write_buffers(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
64 static int cfi_intelext_writev(struct mtd_info *, const struct kvec *, unsigned long, loff_t, size_t *);
65 static int cfi_intelext_erase_varsize(struct mtd_info *, struct erase_info *);
66 static void cfi_intelext_sync (struct mtd_info *);
67 static int cfi_intelext_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
68 static int cfi_intelext_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
69 static int cfi_intelext_is_locked(struct mtd_info *mtd, loff_t ofs,
70                                   uint64_t len);
71 #ifdef CONFIG_MTD_OTP
72 static int cfi_intelext_read_fact_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
73 static int cfi_intelext_read_user_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
74 static int cfi_intelext_write_user_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
75 static int cfi_intelext_lock_user_prot_reg (struct mtd_info *, loff_t, size_t);
76 static int cfi_intelext_get_fact_prot_info(struct mtd_info *, size_t,
77                                            size_t *, struct otp_info *);
78 static int cfi_intelext_get_user_prot_info(struct mtd_info *, size_t,
79                                            size_t *, struct otp_info *);
80 #endif
81 static int cfi_intelext_suspend (struct mtd_info *);
82 static void cfi_intelext_resume (struct mtd_info *);
83 static int cfi_intelext_reboot (struct notifier_block *, unsigned long, void *);
84
85 static void cfi_intelext_destroy(struct mtd_info *);
86
87 struct mtd_info *cfi_cmdset_0001(struct map_info *, int);
88
89 static struct mtd_info *cfi_intelext_setup (struct mtd_info *);
90 static int cfi_intelext_partition_fixup(struct mtd_info *, struct cfi_private **);
91
92 static int cfi_intelext_point (struct mtd_info *mtd, loff_t from, size_t len,
93                      size_t *retlen, void **virt, resource_size_t *phys);
94 static int cfi_intelext_unpoint(struct mtd_info *mtd, loff_t from, size_t len);
95
96 static int chip_ready (struct map_info *map, struct flchip *chip, unsigned long adr, int mode);
97 static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode);
98 static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr);
99 #include "fwh_lock.h"
100
101
102
103 /*
104  *  *********** SETUP AND PROBE BITS  ***********
105  */
106
107 static struct mtd_chip_driver cfi_intelext_chipdrv = {
108         .probe          = NULL, /* Not usable directly */
109         .destroy        = cfi_intelext_destroy,
110         .name           = "cfi_cmdset_0001",
111         .module         = THIS_MODULE
112 };
113
114 /* #define DEBUG_LOCK_BITS */
115 /* #define DEBUG_CFI_FEATURES */
116
117 #ifdef DEBUG_CFI_FEATURES
118 static void cfi_tell_features(struct cfi_pri_intelext *extp)
119 {
120         int i;
121         printk("  Extended Query version %c.%c\n", extp->MajorVersion, extp->MinorVersion);
122         printk("  Feature/Command Support:      %4.4X\n", extp->FeatureSupport);
123         printk("     - Chip Erase:              %s\n", extp->FeatureSupport&1?"supported":"unsupported");
124         printk("     - Suspend Erase:           %s\n", extp->FeatureSupport&2?"supported":"unsupported");
125         printk("     - Suspend Program:         %s\n", extp->FeatureSupport&4?"supported":"unsupported");
126         printk("     - Legacy Lock/Unlock:      %s\n", extp->FeatureSupport&8?"supported":"unsupported");
127         printk("     - Queued Erase:            %s\n", extp->FeatureSupport&16?"supported":"unsupported");
128         printk("     - Instant block lock:      %s\n", extp->FeatureSupport&32?"supported":"unsupported");
129         printk("     - Protection Bits:         %s\n", extp->FeatureSupport&64?"supported":"unsupported");
130         printk("     - Page-mode read:          %s\n", extp->FeatureSupport&128?"supported":"unsupported");
131         printk("     - Synchronous read:        %s\n", extp->FeatureSupport&256?"supported":"unsupported");
132         printk("     - Simultaneous operations: %s\n", extp->FeatureSupport&512?"supported":"unsupported");
133         printk("     - Extended Flash Array:    %s\n", extp->FeatureSupport&1024?"supported":"unsupported");
134         for (i=11; i<32; i++) {
135                 if (extp->FeatureSupport & (1<<i))
136                         printk("     - Unknown Bit %X:      supported\n", i);
137         }
138
139         printk("  Supported functions after Suspend: %2.2X\n", extp->SuspendCmdSupport);
140         printk("     - Program after Erase Suspend: %s\n", extp->SuspendCmdSupport&1?"supported":"unsupported");
141         for (i=1; i<8; i++) {
142                 if (extp->SuspendCmdSupport & (1<<i))
143                         printk("     - Unknown Bit %X:               supported\n", i);
144         }
145
146         printk("  Block Status Register Mask: %4.4X\n", extp->BlkStatusRegMask);
147         printk("     - Lock Bit Active:      %s\n", extp->BlkStatusRegMask&1?"yes":"no");
148         printk("     - Lock-Down Bit Active: %s\n", extp->BlkStatusRegMask&2?"yes":"no");
149         for (i=2; i<3; i++) {
150                 if (extp->BlkStatusRegMask & (1<<i))
151                         printk("     - Unknown Bit %X Active: yes\n",i);
152         }
153         printk("     - EFA Lock Bit:         %s\n", extp->BlkStatusRegMask&16?"yes":"no");
154         printk("     - EFA Lock-Down Bit:    %s\n", extp->BlkStatusRegMask&32?"yes":"no");
155         for (i=6; i<16; i++) {
156                 if (extp->BlkStatusRegMask & (1<<i))
157                         printk("     - Unknown Bit %X Active: yes\n",i);
158         }
159
160         printk("  Vcc Logic Supply Optimum Program/Erase Voltage: %d.%d V\n",
161                extp->VccOptimal >> 4, extp->VccOptimal & 0xf);
162         if (extp->VppOptimal)
163                 printk("  Vpp Programming Supply Optimum Program/Erase Voltage: %d.%d V\n",
164                        extp->VppOptimal >> 4, extp->VppOptimal & 0xf);
165 }
166 #endif
167
168 /* Atmel chips don't use the same PRI format as Intel chips */
169 static void fixup_convert_atmel_pri(struct mtd_info *mtd)
170 {
171         struct map_info *map = mtd->priv;
172         struct cfi_private *cfi = map->fldrv_priv;
173         struct cfi_pri_intelext *extp = cfi->cmdset_priv;
174         struct cfi_pri_atmel atmel_pri;
175         uint32_t features = 0;
176
177         /* Reverse byteswapping */
178         extp->FeatureSupport = cpu_to_le32(extp->FeatureSupport);
179         extp->BlkStatusRegMask = cpu_to_le16(extp->BlkStatusRegMask);
180         extp->ProtRegAddr = cpu_to_le16(extp->ProtRegAddr);
181
182         memcpy(&atmel_pri, extp, sizeof(atmel_pri));
183         memset((char *)extp + 5, 0, sizeof(*extp) - 5);
184
185         printk(KERN_ERR "atmel Features: %02x\n", atmel_pri.Features);
186
187         if (atmel_pri.Features & 0x01) /* chip erase supported */
188                 features |= (1<<0);
189         if (atmel_pri.Features & 0x02) /* erase suspend supported */
190                 features |= (1<<1);
191         if (atmel_pri.Features & 0x04) /* program suspend supported */
192                 features |= (1<<2);
193         if (atmel_pri.Features & 0x08) /* simultaneous operations supported */
194                 features |= (1<<9);
195         if (atmel_pri.Features & 0x20) /* page mode read supported */
196                 features |= (1<<7);
197         if (atmel_pri.Features & 0x40) /* queued erase supported */
198                 features |= (1<<4);
199         if (atmel_pri.Features & 0x80) /* Protection bits supported */
200                 features |= (1<<6);
201
202         extp->FeatureSupport = features;
203
204         /* burst write mode not supported */
205         cfi->cfiq->BufWriteTimeoutTyp = 0;
206         cfi->cfiq->BufWriteTimeoutMax = 0;
207 }
208
209 static void fixup_at49bv640dx_lock(struct mtd_info *mtd)
210 {
211         struct map_info *map = mtd->priv;
212         struct cfi_private *cfi = map->fldrv_priv;
213         struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
214
215         cfip->FeatureSupport |= (1 << 5);
216         mtd->flags |= MTD_POWERUP_LOCK;
217 }
218
219 #ifdef CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE
220 /* Some Intel Strata Flash prior to FPO revision C has bugs in this area */
221 static void fixup_intel_strataflash(struct mtd_info *mtd)
222 {
223         struct map_info *map = mtd->priv;
224         struct cfi_private *cfi = map->fldrv_priv;
225         struct cfi_pri_intelext *extp = cfi->cmdset_priv;
226
227         printk(KERN_WARNING "cfi_cmdset_0001: Suspend "
228                             "erase on write disabled.\n");
229         extp->SuspendCmdSupport &= ~1;
230 }
231 #endif
232
233 #ifdef CMDSET0001_DISABLE_WRITE_SUSPEND
234 static void fixup_no_write_suspend(struct mtd_info *mtd)
235 {
236         struct map_info *map = mtd->priv;
237         struct cfi_private *cfi = map->fldrv_priv;
238         struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
239
240         if (cfip && (cfip->FeatureSupport&4)) {
241                 cfip->FeatureSupport &= ~4;
242                 printk(KERN_WARNING "cfi_cmdset_0001: write suspend disabled\n");
243         }
244 }
245 #endif
246
247 static void fixup_st_m28w320ct(struct mtd_info *mtd)
248 {
249         struct map_info *map = mtd->priv;
250         struct cfi_private *cfi = map->fldrv_priv;
251
252         cfi->cfiq->BufWriteTimeoutTyp = 0;      /* Not supported */
253         cfi->cfiq->BufWriteTimeoutMax = 0;      /* Not supported */
254 }
255
256 static void fixup_st_m28w320cb(struct mtd_info *mtd)
257 {
258         struct map_info *map = mtd->priv;
259         struct cfi_private *cfi = map->fldrv_priv;
260
261         /* Note this is done after the region info is endian swapped */
262         cfi->cfiq->EraseRegionInfo[1] =
263                 (cfi->cfiq->EraseRegionInfo[1] & 0xffff0000) | 0x3e;
264 };
265
266 static int is_LH28F640BF(struct cfi_private *cfi)
267 {
268         /* Sharp LH28F640BF Family */
269         if (cfi->mfr == CFI_MFR_SHARP && (
270             cfi->id == LH28F640BFHE_PTTL90 || cfi->id == LH28F640BFHE_PBTL90 ||
271             cfi->id == LH28F640BFHE_PTTL70A || cfi->id == LH28F640BFHE_PBTL70A))
272                 return 1;
273         return 0;
274 }
275
276 static void fixup_LH28F640BF(struct mtd_info *mtd)
277 {
278         struct map_info *map = mtd->priv;
279         struct cfi_private *cfi = map->fldrv_priv;
280         struct cfi_pri_intelext *extp = cfi->cmdset_priv;
281
282         /* Reset the Partition Configuration Register on LH28F640BF
283          * to a single partition (PCR = 0x000): PCR is embedded into A0-A15. */
284         if (is_LH28F640BF(cfi)) {
285                 printk(KERN_INFO "Reset Partition Config. Register: 1 Partition of 4 planes\n");
286                 map_write(map, CMD(0x60), 0);
287                 map_write(map, CMD(0x04), 0);
288
289                 /* We have set one single partition thus
290                  * Simultaneous Operations are not allowed */
291                 printk(KERN_INFO "cfi_cmdset_0001: Simultaneous Operations disabled\n");
292                 extp->FeatureSupport &= ~512;
293         }
294 }
295
296 static void fixup_use_point(struct mtd_info *mtd)
297 {
298         struct map_info *map = mtd->priv;
299         if (!mtd->_point && map_is_linear(map)) {
300                 mtd->_point   = cfi_intelext_point;
301                 mtd->_unpoint = cfi_intelext_unpoint;
302         }
303 }
304
305 static void fixup_use_write_buffers(struct mtd_info *mtd)
306 {
307         struct map_info *map = mtd->priv;
308         struct cfi_private *cfi = map->fldrv_priv;
309         if (cfi->cfiq->BufWriteTimeoutTyp) {
310                 printk(KERN_INFO "Using buffer write method\n" );
311                 mtd->_write = cfi_intelext_write_buffers;
312                 mtd->_writev = cfi_intelext_writev;
313         }
314 }
315
316 /*
317  * Some chips power-up with all sectors locked by default.
318  */
319 static void fixup_unlock_powerup_lock(struct mtd_info *mtd)
320 {
321         struct map_info *map = mtd->priv;
322         struct cfi_private *cfi = map->fldrv_priv;
323         struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
324
325         if (cfip->FeatureSupport&32) {
326                 printk(KERN_INFO "Using auto-unlock on power-up/resume\n" );
327                 mtd->flags |= MTD_POWERUP_LOCK;
328         }
329 }
330
331 static struct cfi_fixup cfi_fixup_table[] = {
332         { CFI_MFR_ATMEL, CFI_ID_ANY, fixup_convert_atmel_pri },
333         { CFI_MFR_ATMEL, AT49BV640D, fixup_at49bv640dx_lock },
334         { CFI_MFR_ATMEL, AT49BV640DT, fixup_at49bv640dx_lock },
335 #ifdef CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE
336         { CFI_MFR_ANY, CFI_ID_ANY, fixup_intel_strataflash },
337 #endif
338 #ifdef CMDSET0001_DISABLE_WRITE_SUSPEND
339         { CFI_MFR_ANY, CFI_ID_ANY, fixup_no_write_suspend },
340 #endif
341 #if !FORCE_WORD_WRITE
342         { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_write_buffers },
343 #endif
344         { CFI_MFR_ST, 0x00ba, /* M28W320CT */ fixup_st_m28w320ct },
345         { CFI_MFR_ST, 0x00bb, /* M28W320CB */ fixup_st_m28w320cb },
346         { CFI_MFR_INTEL, CFI_ID_ANY, fixup_unlock_powerup_lock },
347         { CFI_MFR_SHARP, CFI_ID_ANY, fixup_unlock_powerup_lock },
348         { CFI_MFR_SHARP, CFI_ID_ANY, fixup_LH28F640BF },
349         { 0, 0, NULL }
350 };
351
352 static struct cfi_fixup jedec_fixup_table[] = {
353         { CFI_MFR_INTEL, I82802AB,   fixup_use_fwh_lock },
354         { CFI_MFR_INTEL, I82802AC,   fixup_use_fwh_lock },
355         { CFI_MFR_ST,    M50LPW080,  fixup_use_fwh_lock },
356         { CFI_MFR_ST,    M50FLW080A, fixup_use_fwh_lock },
357         { CFI_MFR_ST,    M50FLW080B, fixup_use_fwh_lock },
358         { 0, 0, NULL }
359 };
360 static struct cfi_fixup fixup_table[] = {
361         /* The CFI vendor ids and the JEDEC vendor IDs appear
362          * to be common.  It is like the devices id's are as
363          * well.  This table is to pick all cases where
364          * we know that is the case.
365          */
366         { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_point },
367         { 0, 0, NULL }
368 };
369
370 static void cfi_fixup_major_minor(struct cfi_private *cfi,
371                                                 struct cfi_pri_intelext *extp)
372 {
373         if (cfi->mfr == CFI_MFR_INTEL &&
374                         cfi->id == PF38F4476 && extp->MinorVersion == '3')
375                 extp->MinorVersion = '1';
376 }
377
378 static inline struct cfi_pri_intelext *
379 read_pri_intelext(struct map_info *map, __u16 adr)
380 {
381         struct cfi_private *cfi = map->fldrv_priv;
382         struct cfi_pri_intelext *extp;
383         unsigned int extra_size = 0;
384         unsigned int extp_size = sizeof(*extp);
385
386  again:
387         extp = (struct cfi_pri_intelext *)cfi_read_pri(map, adr, extp_size, "Intel/Sharp");
388         if (!extp)
389                 return NULL;
390
391         cfi_fixup_major_minor(cfi, extp);
392
393         if (extp->MajorVersion != '1' ||
394             (extp->MinorVersion < '0' || extp->MinorVersion > '5')) {
395                 printk(KERN_ERR "  Unknown Intel/Sharp Extended Query "
396                        "version %c.%c.\n",  extp->MajorVersion,
397                        extp->MinorVersion);
398                 kfree(extp);
399                 return NULL;
400         }
401
402         /* Do some byteswapping if necessary */
403         extp->FeatureSupport = le32_to_cpu(extp->FeatureSupport);
404         extp->BlkStatusRegMask = le16_to_cpu(extp->BlkStatusRegMask);
405         extp->ProtRegAddr = le16_to_cpu(extp->ProtRegAddr);
406
407         if (extp->MinorVersion >= '0') {
408                 extra_size = 0;
409
410                 /* Protection Register info */
411                 extra_size += (extp->NumProtectionFields - 1) *
412                               sizeof(struct cfi_intelext_otpinfo);
413         }
414
415         if (extp->MinorVersion >= '1') {
416                 /* Burst Read info */
417                 extra_size += 2;
418                 if (extp_size < sizeof(*extp) + extra_size)
419                         goto need_more;
420                 extra_size += extp->extra[extra_size - 1];
421         }
422
423         if (extp->MinorVersion >= '3') {
424                 int nb_parts, i;
425
426                 /* Number of hardware-partitions */
427                 extra_size += 1;
428                 if (extp_size < sizeof(*extp) + extra_size)
429                         goto need_more;
430                 nb_parts = extp->extra[extra_size - 1];
431
432                 /* skip the sizeof(partregion) field in CFI 1.4 */
433                 if (extp->MinorVersion >= '4')
434                         extra_size += 2;
435
436                 for (i = 0; i < nb_parts; i++) {
437                         struct cfi_intelext_regioninfo *rinfo;
438                         rinfo = (struct cfi_intelext_regioninfo *)&extp->extra[extra_size];
439                         extra_size += sizeof(*rinfo);
440                         if (extp_size < sizeof(*extp) + extra_size)
441                                 goto need_more;
442                         rinfo->NumIdentPartitions=le16_to_cpu(rinfo->NumIdentPartitions);
443                         extra_size += (rinfo->NumBlockTypes - 1)
444                                       * sizeof(struct cfi_intelext_blockinfo);
445                 }
446
447                 if (extp->MinorVersion >= '4')
448                         extra_size += sizeof(struct cfi_intelext_programming_regioninfo);
449
450                 if (extp_size < sizeof(*extp) + extra_size) {
451                         need_more:
452                         extp_size = sizeof(*extp) + extra_size;
453                         kfree(extp);
454                         if (extp_size > 4096) {
455                                 printk(KERN_ERR
456                                         "%s: cfi_pri_intelext is too fat\n",
457                                         __func__);
458                                 return NULL;
459                         }
460                         goto again;
461                 }
462         }
463
464         return extp;
465 }
466
467 struct mtd_info *cfi_cmdset_0001(struct map_info *map, int primary)
468 {
469         struct cfi_private *cfi = map->fldrv_priv;
470         struct mtd_info *mtd;
471         int i;
472
473         mtd = kzalloc(sizeof(*mtd), GFP_KERNEL);
474         if (!mtd)
475                 return NULL;
476         mtd->priv = map;
477         mtd->type = MTD_NORFLASH;
478
479         /* Fill in the default mtd operations */
480         mtd->_erase   = cfi_intelext_erase_varsize;
481         mtd->_read    = cfi_intelext_read;
482         mtd->_write   = cfi_intelext_write_words;
483         mtd->_sync    = cfi_intelext_sync;
484         mtd->_lock    = cfi_intelext_lock;
485         mtd->_unlock  = cfi_intelext_unlock;
486         mtd->_is_locked = cfi_intelext_is_locked;
487         mtd->_suspend = cfi_intelext_suspend;
488         mtd->_resume  = cfi_intelext_resume;
489         mtd->flags   = MTD_CAP_NORFLASH;
490         mtd->name    = map->name;
491         mtd->writesize = 1;
492         mtd->writebufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
493
494         mtd->reboot_notifier.notifier_call = cfi_intelext_reboot;
495
496         if (cfi->cfi_mode == CFI_MODE_CFI) {
497                 /*
498                  * It's a real CFI chip, not one for which the probe
499                  * routine faked a CFI structure. So we read the feature
500                  * table from it.
501                  */
502                 __u16 adr = primary?cfi->cfiq->P_ADR:cfi->cfiq->A_ADR;
503                 struct cfi_pri_intelext *extp;
504
505                 extp = read_pri_intelext(map, adr);
506                 if (!extp) {
507                         kfree(mtd);
508                         return NULL;
509                 }
510
511                 /* Install our own private info structure */
512                 cfi->cmdset_priv = extp;
513
514                 cfi_fixup(mtd, cfi_fixup_table);
515
516 #ifdef DEBUG_CFI_FEATURES
517                 /* Tell the user about it in lots of lovely detail */
518                 cfi_tell_features(extp);
519 #endif
520
521                 if(extp->SuspendCmdSupport & 1) {
522                         printk(KERN_NOTICE "cfi_cmdset_0001: Erase suspend on write enabled\n");
523                 }
524         }
525         else if (cfi->cfi_mode == CFI_MODE_JEDEC) {
526                 /* Apply jedec specific fixups */
527                 cfi_fixup(mtd, jedec_fixup_table);
528         }
529         /* Apply generic fixups */
530         cfi_fixup(mtd, fixup_table);
531
532         for (i=0; i< cfi->numchips; i++) {
533                 if (cfi->cfiq->WordWriteTimeoutTyp)
534                         cfi->chips[i].word_write_time =
535                                 1<<cfi->cfiq->WordWriteTimeoutTyp;
536                 else
537                         cfi->chips[i].word_write_time = 50000;
538
539                 if (cfi->cfiq->BufWriteTimeoutTyp)
540                         cfi->chips[i].buffer_write_time =
541                                 1<<cfi->cfiq->BufWriteTimeoutTyp;
542                 /* No default; if it isn't specified, we won't use it */
543
544                 if (cfi->cfiq->BlockEraseTimeoutTyp)
545                         cfi->chips[i].erase_time =
546                                 1000<<cfi->cfiq->BlockEraseTimeoutTyp;
547                 else
548                         cfi->chips[i].erase_time = 2000000;
549
550                 if (cfi->cfiq->WordWriteTimeoutTyp &&
551                     cfi->cfiq->WordWriteTimeoutMax)
552                         cfi->chips[i].word_write_time_max =
553                                 1<<(cfi->cfiq->WordWriteTimeoutTyp +
554                                     cfi->cfiq->WordWriteTimeoutMax);
555                 else
556                         cfi->chips[i].word_write_time_max = 50000 * 8;
557
558                 if (cfi->cfiq->BufWriteTimeoutTyp &&
559                     cfi->cfiq->BufWriteTimeoutMax)
560                         cfi->chips[i].buffer_write_time_max =
561                                 1<<(cfi->cfiq->BufWriteTimeoutTyp +
562                                     cfi->cfiq->BufWriteTimeoutMax);
563
564                 if (cfi->cfiq->BlockEraseTimeoutTyp &&
565                     cfi->cfiq->BlockEraseTimeoutMax)
566                         cfi->chips[i].erase_time_max =
567                                 1000<<(cfi->cfiq->BlockEraseTimeoutTyp +
568                                        cfi->cfiq->BlockEraseTimeoutMax);
569                 else
570                         cfi->chips[i].erase_time_max = 2000000 * 8;
571
572                 cfi->chips[i].ref_point_counter = 0;
573                 init_waitqueue_head(&(cfi->chips[i].wq));
574         }
575
576         map->fldrv = &cfi_intelext_chipdrv;
577
578         return cfi_intelext_setup(mtd);
579 }
580 struct mtd_info *cfi_cmdset_0003(struct map_info *map, int primary) __attribute__((alias("cfi_cmdset_0001")));
581 struct mtd_info *cfi_cmdset_0200(struct map_info *map, int primary) __attribute__((alias("cfi_cmdset_0001")));
582 EXPORT_SYMBOL_GPL(cfi_cmdset_0001);
583 EXPORT_SYMBOL_GPL(cfi_cmdset_0003);
584 EXPORT_SYMBOL_GPL(cfi_cmdset_0200);
585
586 static struct mtd_info *cfi_intelext_setup(struct mtd_info *mtd)
587 {
588         struct map_info *map = mtd->priv;
589         struct cfi_private *cfi = map->fldrv_priv;
590         unsigned long offset = 0;
591         int i,j;
592         unsigned long devsize = (1<<cfi->cfiq->DevSize) * cfi->interleave;
593
594         //printk(KERN_DEBUG "number of CFI chips: %d\n", cfi->numchips);
595
596         mtd->size = devsize * cfi->numchips;
597
598         mtd->numeraseregions = cfi->cfiq->NumEraseRegions * cfi->numchips;
599         mtd->eraseregions = kmalloc(sizeof(struct mtd_erase_region_info)
600                         * mtd->numeraseregions, GFP_KERNEL);
601         if (!mtd->eraseregions)
602                 goto setup_err;
603
604         for (i=0; i<cfi->cfiq->NumEraseRegions; i++) {
605                 unsigned long ernum, ersize;
606                 ersize = ((cfi->cfiq->EraseRegionInfo[i] >> 8) & ~0xff) * cfi->interleave;
607                 ernum = (cfi->cfiq->EraseRegionInfo[i] & 0xffff) + 1;
608
609                 if (mtd->erasesize < ersize) {
610                         mtd->erasesize = ersize;
611                 }
612                 for (j=0; j<cfi->numchips; j++) {
613                         mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].offset = (j*devsize)+offset;
614                         mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].erasesize = ersize;
615                         mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].numblocks = ernum;
616                         mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].lockmap = kmalloc(ernum / 8 + 1, GFP_KERNEL);
617                 }
618                 offset += (ersize * ernum);
619         }
620
621         if (offset != devsize) {
622                 /* Argh */
623                 printk(KERN_WARNING "Sum of regions (%lx) != total size of set of interleaved chips (%lx)\n", offset, devsize);
624                 goto setup_err;
625         }
626
627         for (i=0; i<mtd->numeraseregions;i++){
628                 printk(KERN_DEBUG "erase region %d: offset=0x%llx,size=0x%x,blocks=%d\n",
629                        i,(unsigned long long)mtd->eraseregions[i].offset,
630                        mtd->eraseregions[i].erasesize,
631                        mtd->eraseregions[i].numblocks);
632         }
633
634 #ifdef CONFIG_MTD_OTP
635         mtd->_read_fact_prot_reg = cfi_intelext_read_fact_prot_reg;
636         mtd->_read_user_prot_reg = cfi_intelext_read_user_prot_reg;
637         mtd->_write_user_prot_reg = cfi_intelext_write_user_prot_reg;
638         mtd->_lock_user_prot_reg = cfi_intelext_lock_user_prot_reg;
639         mtd->_get_fact_prot_info = cfi_intelext_get_fact_prot_info;
640         mtd->_get_user_prot_info = cfi_intelext_get_user_prot_info;
641 #endif
642
643         /* This function has the potential to distort the reality
644            a bit and therefore should be called last. */
645         if (cfi_intelext_partition_fixup(mtd, &cfi) != 0)
646                 goto setup_err;
647
648         __module_get(THIS_MODULE);
649         register_reboot_notifier(&mtd->reboot_notifier);
650         return mtd;
651
652  setup_err:
653         kfree(mtd->eraseregions);
654         kfree(mtd);
655         kfree(cfi->cmdset_priv);
656         return NULL;
657 }
658
659 static int cfi_intelext_partition_fixup(struct mtd_info *mtd,
660                                         struct cfi_private **pcfi)
661 {
662         struct map_info *map = mtd->priv;
663         struct cfi_private *cfi = *pcfi;
664         struct cfi_pri_intelext *extp = cfi->cmdset_priv;
665
666         /*
667          * Probing of multi-partition flash chips.
668          *
669          * To support multiple partitions when available, we simply arrange
670          * for each of them to have their own flchip structure even if they
671          * are on the same physical chip.  This means completely recreating
672          * a new cfi_private structure right here which is a blatent code
673          * layering violation, but this is still the least intrusive
674          * arrangement at this point. This can be rearranged in the future
675          * if someone feels motivated enough.  --nico
676          */
677         if (extp && extp->MajorVersion == '1' && extp->MinorVersion >= '3'
678             && extp->FeatureSupport & (1 << 9)) {
679                 struct cfi_private *newcfi;
680                 struct flchip *chip;
681                 struct flchip_shared *shared;
682                 int offs, numregions, numparts, partshift, numvirtchips, i, j;
683
684                 /* Protection Register info */
685                 offs = (extp->NumProtectionFields - 1) *
686                        sizeof(struct cfi_intelext_otpinfo);
687
688                 /* Burst Read info */
689                 offs += extp->extra[offs+1]+2;
690
691                 /* Number of partition regions */
692                 numregions = extp->extra[offs];
693                 offs += 1;
694
695                 /* skip the sizeof(partregion) field in CFI 1.4 */
696                 if (extp->MinorVersion >= '4')
697                         offs += 2;
698
699                 /* Number of hardware partitions */
700                 numparts = 0;
701                 for (i = 0; i < numregions; i++) {
702                         struct cfi_intelext_regioninfo *rinfo;
703                         rinfo = (struct cfi_intelext_regioninfo *)&extp->extra[offs];
704                         numparts += rinfo->NumIdentPartitions;
705                         offs += sizeof(*rinfo)
706                                 + (rinfo->NumBlockTypes - 1) *
707                                   sizeof(struct cfi_intelext_blockinfo);
708                 }
709
710                 if (!numparts)
711                         numparts = 1;
712
713                 /* Programming Region info */
714                 if (extp->MinorVersion >= '4') {
715                         struct cfi_intelext_programming_regioninfo *prinfo;
716                         prinfo = (struct cfi_intelext_programming_regioninfo *)&extp->extra[offs];
717                         mtd->writesize = cfi->interleave << prinfo->ProgRegShift;
718                         mtd->flags &= ~MTD_BIT_WRITEABLE;
719                         printk(KERN_DEBUG "%s: program region size/ctrl_valid/ctrl_inval = %d/%d/%d\n",
720                                map->name, mtd->writesize,
721                                cfi->interleave * prinfo->ControlValid,
722                                cfi->interleave * prinfo->ControlInvalid);
723                 }
724
725                 /*
726                  * All functions below currently rely on all chips having
727                  * the same geometry so we'll just assume that all hardware
728                  * partitions are of the same size too.
729                  */
730                 partshift = cfi->chipshift - __ffs(numparts);
731
732                 if ((1 << partshift) < mtd->erasesize) {
733                         printk( KERN_ERR
734                                 "%s: bad number of hw partitions (%d)\n",
735                                 __func__, numparts);
736                         return -EINVAL;
737                 }
738
739                 numvirtchips = cfi->numchips * numparts;
740                 newcfi = kmalloc(sizeof(struct cfi_private) + numvirtchips * sizeof(struct flchip), GFP_KERNEL);
741                 if (!newcfi)
742                         return -ENOMEM;
743                 shared = kmalloc(sizeof(struct flchip_shared) * cfi->numchips, GFP_KERNEL);
744                 if (!shared) {
745                         kfree(newcfi);
746                         return -ENOMEM;
747                 }
748                 memcpy(newcfi, cfi, sizeof(struct cfi_private));
749                 newcfi->numchips = numvirtchips;
750                 newcfi->chipshift = partshift;
751
752                 chip = &newcfi->chips[0];
753                 for (i = 0; i < cfi->numchips; i++) {
754                         shared[i].writing = shared[i].erasing = NULL;
755                         mutex_init(&shared[i].lock);
756                         for (j = 0; j < numparts; j++) {
757                                 *chip = cfi->chips[i];
758                                 chip->start += j << partshift;
759                                 chip->priv = &shared[i];
760                                 /* those should be reset too since
761                                    they create memory references. */
762                                 init_waitqueue_head(&chip->wq);
763                                 mutex_init(&chip->mutex);
764                                 chip++;
765                         }
766                 }
767
768                 printk(KERN_DEBUG "%s: %d set(s) of %d interleaved chips "
769                                   "--> %d partitions of %d KiB\n",
770                                   map->name, cfi->numchips, cfi->interleave,
771                                   newcfi->numchips, 1<<(newcfi->chipshift-10));
772
773                 map->fldrv_priv = newcfi;
774                 *pcfi = newcfi;
775                 kfree(cfi);
776         }
777
778         return 0;
779 }
780
781 /*
782  *  *********** CHIP ACCESS FUNCTIONS ***********
783  */
784 static int chip_ready (struct map_info *map, struct flchip *chip, unsigned long adr, int mode)
785 {
786         DECLARE_WAITQUEUE(wait, current);
787         struct cfi_private *cfi = map->fldrv_priv;
788         map_word status, status_OK = CMD(0x80), status_PWS = CMD(0x01);
789         struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
790         unsigned long timeo = jiffies + HZ;
791
792         /* Prevent setting state FL_SYNCING for chip in suspended state. */
793         if (mode == FL_SYNCING && chip->oldstate != FL_READY)
794                 goto sleep;
795
796         switch (chip->state) {
797
798         case FL_STATUS:
799                 for (;;) {
800                         status = map_read(map, adr);
801                         if (map_word_andequal(map, status, status_OK, status_OK))
802                                 break;
803
804                         /* At this point we're fine with write operations
805                            in other partitions as they don't conflict. */
806                         if (chip->priv && map_word_andequal(map, status, status_PWS, status_PWS))
807                                 break;
808
809                         mutex_unlock(&chip->mutex);
810                         cfi_udelay(1);
811                         mutex_lock(&chip->mutex);
812                         /* Someone else might have been playing with it. */
813                         return -EAGAIN;
814                 }
815                 /* Fall through */
816         case FL_READY:
817         case FL_CFI_QUERY:
818         case FL_JEDEC_QUERY:
819                 return 0;
820
821         case FL_ERASING:
822                 if (!cfip ||
823                     !(cfip->FeatureSupport & 2) ||
824                     !(mode == FL_READY || mode == FL_POINT ||
825                      (mode == FL_WRITING && (cfip->SuspendCmdSupport & 1))))
826                         goto sleep;
827
828
829                 /* Erase suspend */
830                 map_write(map, CMD(0xB0), adr);
831
832                 /* If the flash has finished erasing, then 'erase suspend'
833                  * appears to make some (28F320) flash devices switch to
834                  * 'read' mode.  Make sure that we switch to 'read status'
835                  * mode so we get the right data. --rmk
836                  */
837                 map_write(map, CMD(0x70), adr);
838                 chip->oldstate = FL_ERASING;
839                 chip->state = FL_ERASE_SUSPENDING;
840                 chip->erase_suspended = 1;
841                 for (;;) {
842                         status = map_read(map, adr);
843                         if (map_word_andequal(map, status, status_OK, status_OK))
844                                 break;
845
846                         if (time_after(jiffies, timeo)) {
847                                 /* Urgh. Resume and pretend we weren't here.
848                                  * Make sure we're in 'read status' mode if it had finished */
849                                 put_chip(map, chip, adr);
850                                 printk(KERN_ERR "%s: Chip not ready after erase "
851                                        "suspended: status = 0x%lx\n", map->name, status.x[0]);
852                                 return -EIO;
853                         }
854
855                         mutex_unlock(&chip->mutex);
856                         cfi_udelay(1);
857                         mutex_lock(&chip->mutex);
858                         /* Nobody will touch it while it's in state FL_ERASE_SUSPENDING.
859                            So we can just loop here. */
860                 }
861                 chip->state = FL_STATUS;
862                 return 0;
863
864         case FL_XIP_WHILE_ERASING:
865                 if (mode != FL_READY && mode != FL_POINT &&
866                     (mode != FL_WRITING || !cfip || !(cfip->SuspendCmdSupport&1)))
867                         goto sleep;
868                 chip->oldstate = chip->state;
869                 chip->state = FL_READY;
870                 return 0;
871
872         case FL_SHUTDOWN:
873                 /* The machine is rebooting now,so no one can get chip anymore */
874                 return -EIO;
875         case FL_POINT:
876                 /* Only if there's no operation suspended... */
877                 if (mode == FL_READY && chip->oldstate == FL_READY)
878                         return 0;
879                 /* Fall through */
880         default:
881         sleep:
882                 set_current_state(TASK_UNINTERRUPTIBLE);
883                 add_wait_queue(&chip->wq, &wait);
884                 mutex_unlock(&chip->mutex);
885                 schedule();
886                 remove_wait_queue(&chip->wq, &wait);
887                 mutex_lock(&chip->mutex);
888                 return -EAGAIN;
889         }
890 }
891
892 static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode)
893 {
894         int ret;
895         DECLARE_WAITQUEUE(wait, current);
896
897  retry:
898         if (chip->priv &&
899             (mode == FL_WRITING || mode == FL_ERASING || mode == FL_OTP_WRITE
900             || mode == FL_SHUTDOWN) && chip->state != FL_SYNCING) {
901                 /*
902                  * OK. We have possibility for contention on the write/erase
903                  * operations which are global to the real chip and not per
904                  * partition.  So let's fight it over in the partition which
905                  * currently has authority on the operation.
906                  *
907                  * The rules are as follows:
908                  *
909                  * - any write operation must own shared->writing.
910                  *
911                  * - any erase operation must own _both_ shared->writing and
912                  *   shared->erasing.
913                  *
914                  * - contention arbitration is handled in the owner's context.
915                  *
916                  * The 'shared' struct can be read and/or written only when
917                  * its lock is taken.
918                  */
919                 struct flchip_shared *shared = chip->priv;
920                 struct flchip *contender;
921                 mutex_lock(&shared->lock);
922                 contender = shared->writing;
923                 if (contender && contender != chip) {
924                         /*
925                          * The engine to perform desired operation on this
926                          * partition is already in use by someone else.
927                          * Let's fight over it in the context of the chip
928                          * currently using it.  If it is possible to suspend,
929                          * that other partition will do just that, otherwise
930                          * it'll happily send us to sleep.  In any case, when
931                          * get_chip returns success we're clear to go ahead.
932                          */
933                         ret = mutex_trylock(&contender->mutex);
934                         mutex_unlock(&shared->lock);
935                         if (!ret)
936                                 goto retry;
937                         mutex_unlock(&chip->mutex);
938                         ret = chip_ready(map, contender, contender->start, mode);
939                         mutex_lock(&chip->mutex);
940
941                         if (ret == -EAGAIN) {
942                                 mutex_unlock(&contender->mutex);
943                                 goto retry;
944                         }
945                         if (ret) {
946                                 mutex_unlock(&contender->mutex);
947                                 return ret;
948                         }
949                         mutex_lock(&shared->lock);
950
951                         /* We should not own chip if it is already
952                          * in FL_SYNCING state. Put contender and retry. */
953                         if (chip->state == FL_SYNCING) {
954                                 put_chip(map, contender, contender->start);
955                                 mutex_unlock(&contender->mutex);
956                                 goto retry;
957                         }
958                         mutex_unlock(&contender->mutex);
959                 }
960
961                 /* Check if we already have suspended erase
962                  * on this chip. Sleep. */
963                 if (mode == FL_ERASING && shared->erasing
964                     && shared->erasing->oldstate == FL_ERASING) {
965                         mutex_unlock(&shared->lock);
966                         set_current_state(TASK_UNINTERRUPTIBLE);
967                         add_wait_queue(&chip->wq, &wait);
968                         mutex_unlock(&chip->mutex);
969                         schedule();
970                         remove_wait_queue(&chip->wq, &wait);
971                         mutex_lock(&chip->mutex);
972                         goto retry;
973                 }
974
975                 /* We now own it */
976                 shared->writing = chip;
977                 if (mode == FL_ERASING)
978                         shared->erasing = chip;
979                 mutex_unlock(&shared->lock);
980         }
981         ret = chip_ready(map, chip, adr, mode);
982         if (ret == -EAGAIN)
983                 goto retry;
984
985         return ret;
986 }
987
988 static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr)
989 {
990         struct cfi_private *cfi = map->fldrv_priv;
991
992         if (chip->priv) {
993                 struct flchip_shared *shared = chip->priv;
994                 mutex_lock(&shared->lock);
995                 if (shared->writing == chip && chip->oldstate == FL_READY) {
996                         /* We own the ability to write, but we're done */
997                         shared->writing = shared->erasing;
998                         if (shared->writing && shared->writing != chip) {
999                                 /* give back ownership to who we loaned it from */
1000                                 struct flchip *loaner = shared->writing;
1001                                 mutex_lock(&loaner->mutex);
1002                                 mutex_unlock(&shared->lock);
1003                                 mutex_unlock(&chip->mutex);
1004                                 put_chip(map, loaner, loaner->start);
1005                                 mutex_lock(&chip->mutex);
1006                                 mutex_unlock(&loaner->mutex);
1007                                 wake_up(&chip->wq);
1008                                 return;
1009                         }
1010                         shared->erasing = NULL;
1011                         shared->writing = NULL;
1012                 } else if (shared->erasing == chip && shared->writing != chip) {
1013                         /*
1014                          * We own the ability to erase without the ability
1015                          * to write, which means the erase was suspended
1016                          * and some other partition is currently writing.
1017                          * Don't let the switch below mess things up since
1018                          * we don't have ownership to resume anything.
1019                          */
1020                         mutex_unlock(&shared->lock);
1021                         wake_up(&chip->wq);
1022                         return;
1023                 }
1024                 mutex_unlock(&shared->lock);
1025         }
1026
1027         switch(chip->oldstate) {
1028         case FL_ERASING:
1029                 /* What if one interleaved chip has finished and the
1030                    other hasn't? The old code would leave the finished
1031                    one in READY mode. That's bad, and caused -EROFS
1032                    errors to be returned from do_erase_oneblock because
1033                    that's the only bit it checked for at the time.
1034                    As the state machine appears to explicitly allow
1035                    sending the 0x70 (Read Status) command to an erasing
1036                    chip and expecting it to be ignored, that's what we
1037                    do. */
1038                 map_write(map, CMD(0xd0), adr);
1039                 map_write(map, CMD(0x70), adr);
1040                 chip->oldstate = FL_READY;
1041                 chip->state = FL_ERASING;
1042                 break;
1043
1044         case FL_XIP_WHILE_ERASING:
1045                 chip->state = chip->oldstate;
1046                 chip->oldstate = FL_READY;
1047                 break;
1048
1049         case FL_READY:
1050         case FL_STATUS:
1051         case FL_JEDEC_QUERY:
1052                 break;
1053         default:
1054                 printk(KERN_ERR "%s: put_chip() called with oldstate %d!!\n", map->name, chip->oldstate);
1055         }
1056         wake_up(&chip->wq);
1057 }
1058
1059 #ifdef CONFIG_MTD_XIP
1060
1061 /*
1062  * No interrupt what so ever can be serviced while the flash isn't in array
1063  * mode.  This is ensured by the xip_disable() and xip_enable() functions
1064  * enclosing any code path where the flash is known not to be in array mode.
1065  * And within a XIP disabled code path, only functions marked with __xipram
1066  * may be called and nothing else (it's a good thing to inspect generated
1067  * assembly to make sure inline functions were actually inlined and that gcc
1068  * didn't emit calls to its own support functions). Also configuring MTD CFI
1069  * support to a single buswidth and a single interleave is also recommended.
1070  */
1071
1072 static void xip_disable(struct map_info *map, struct flchip *chip,
1073                         unsigned long adr)
1074 {
1075         /* TODO: chips with no XIP use should ignore and return */
1076         (void) map_read(map, adr); /* ensure mmu mapping is up to date */
1077         local_irq_disable();
1078 }
1079
1080 static void __xipram xip_enable(struct map_info *map, struct flchip *chip,
1081                                 unsigned long adr)
1082 {
1083         struct cfi_private *cfi = map->fldrv_priv;
1084         if (chip->state != FL_POINT && chip->state != FL_READY) {
1085                 map_write(map, CMD(0xff), adr);
1086                 chip->state = FL_READY;
1087         }
1088         (void) map_read(map, adr);
1089         xip_iprefetch();
1090         local_irq_enable();
1091 }
1092
1093 /*
1094  * When a delay is required for the flash operation to complete, the
1095  * xip_wait_for_operation() function is polling for both the given timeout
1096  * and pending (but still masked) hardware interrupts.  Whenever there is an
1097  * interrupt pending then the flash erase or write operation is suspended,
1098  * array mode restored and interrupts unmasked.  Task scheduling might also
1099  * happen at that point.  The CPU eventually returns from the interrupt or
1100  * the call to schedule() and the suspended flash operation is resumed for
1101  * the remaining of the delay period.
1102  *
1103  * Warning: this function _will_ fool interrupt latency tracing tools.
1104  */
1105
1106 static int __xipram xip_wait_for_operation(
1107                 struct map_info *map, struct flchip *chip,
1108                 unsigned long adr, unsigned int chip_op_time_max)
1109 {
1110         struct cfi_private *cfi = map->fldrv_priv;
1111         struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
1112         map_word status, OK = CMD(0x80);
1113         unsigned long usec, suspended, start, done;
1114         flstate_t oldstate, newstate;
1115
1116         start = xip_currtime();
1117         usec = chip_op_time_max;
1118         if (usec == 0)
1119                 usec = 500000;
1120         done = 0;
1121
1122         do {
1123                 cpu_relax();
1124                 if (xip_irqpending() && cfip &&
1125                     ((chip->state == FL_ERASING && (cfip->FeatureSupport&2)) ||
1126                      (chip->state == FL_WRITING && (cfip->FeatureSupport&4))) &&
1127                     (cfi_interleave_is_1(cfi) || chip->oldstate == FL_READY)) {
1128                         /*
1129                          * Let's suspend the erase or write operation when
1130                          * supported.  Note that we currently don't try to
1131                          * suspend interleaved chips if there is already
1132                          * another operation suspended (imagine what happens
1133                          * when one chip was already done with the current
1134                          * operation while another chip suspended it, then
1135                          * we resume the whole thing at once).  Yes, it
1136                          * can happen!
1137                          */
1138                         usec -= done;
1139                         map_write(map, CMD(0xb0), adr);
1140                         map_write(map, CMD(0x70), adr);
1141                         suspended = xip_currtime();
1142                         do {
1143                                 if (xip_elapsed_since(suspended) > 100000) {
1144                                         /*
1145                                          * The chip doesn't want to suspend
1146                                          * after waiting for 100 msecs.
1147                                          * This is a critical error but there
1148                                          * is not much we can do here.
1149                                          */
1150                                         return -EIO;
1151                                 }
1152                                 status = map_read(map, adr);
1153                         } while (!map_word_andequal(map, status, OK, OK));
1154
1155                         /* Suspend succeeded */
1156                         oldstate = chip->state;
1157                         if (oldstate == FL_ERASING) {
1158                                 if (!map_word_bitsset(map, status, CMD(0x40)))
1159                                         break;
1160                                 newstate = FL_XIP_WHILE_ERASING;
1161                                 chip->erase_suspended = 1;
1162                         } else {
1163                                 if (!map_word_bitsset(map, status, CMD(0x04)))
1164                                         break;
1165                                 newstate = FL_XIP_WHILE_WRITING;
1166                                 chip->write_suspended = 1;
1167                         }
1168                         chip->state = newstate;
1169                         map_write(map, CMD(0xff), adr);
1170                         (void) map_read(map, adr);
1171                         xip_iprefetch();
1172                         local_irq_enable();
1173                         mutex_unlock(&chip->mutex);
1174                         xip_iprefetch();
1175                         cond_resched();
1176
1177                         /*
1178                          * We're back.  However someone else might have
1179                          * decided to go write to the chip if we are in
1180                          * a suspended erase state.  If so let's wait
1181                          * until it's done.
1182                          */
1183                         mutex_lock(&chip->mutex);
1184                         while (chip->state != newstate) {
1185                                 DECLARE_WAITQUEUE(wait, current);
1186                                 set_current_state(TASK_UNINTERRUPTIBLE);
1187                                 add_wait_queue(&chip->wq, &wait);
1188                                 mutex_unlock(&chip->mutex);
1189                                 schedule();
1190                                 remove_wait_queue(&chip->wq, &wait);
1191                                 mutex_lock(&chip->mutex);
1192                         }
1193                         /* Disallow XIP again */
1194                         local_irq_disable();
1195
1196                         /* Resume the write or erase operation */
1197                         map_write(map, CMD(0xd0), adr);
1198                         map_write(map, CMD(0x70), adr);
1199                         chip->state = oldstate;
1200                         start = xip_currtime();
1201                 } else if (usec >= 1000000/HZ) {
1202                         /*
1203                          * Try to save on CPU power when waiting delay
1204                          * is at least a system timer tick period.
1205                          * No need to be extremely accurate here.
1206                          */
1207                         xip_cpu_idle();
1208                 }
1209                 status = map_read(map, adr);
1210                 done = xip_elapsed_since(start);
1211         } while (!map_word_andequal(map, status, OK, OK)
1212                  && done < usec);
1213
1214         return (done >= usec) ? -ETIME : 0;
1215 }
1216
1217 /*
1218  * The INVALIDATE_CACHED_RANGE() macro is normally used in parallel while
1219  * the flash is actively programming or erasing since we have to poll for
1220  * the operation to complete anyway.  We can't do that in a generic way with
1221  * a XIP setup so do it before the actual flash operation in this case
1222  * and stub it out from INVAL_CACHE_AND_WAIT.
1223  */
1224 #define XIP_INVAL_CACHED_RANGE(map, from, size)  \
1225         INVALIDATE_CACHED_RANGE(map, from, size)
1226
1227 #define INVAL_CACHE_AND_WAIT(map, chip, cmd_adr, inval_adr, inval_len, usec, usec_max) \
1228         xip_wait_for_operation(map, chip, cmd_adr, usec_max)
1229
1230 #else
1231
1232 #define xip_disable(map, chip, adr)
1233 #define xip_enable(map, chip, adr)
1234 #define XIP_INVAL_CACHED_RANGE(x...)
1235 #define INVAL_CACHE_AND_WAIT inval_cache_and_wait_for_operation
1236
1237 static int inval_cache_and_wait_for_operation(
1238                 struct map_info *map, struct flchip *chip,
1239                 unsigned long cmd_adr, unsigned long inval_adr, int inval_len,
1240                 unsigned int chip_op_time, unsigned int chip_op_time_max)
1241 {
1242         struct cfi_private *cfi = map->fldrv_priv;
1243         map_word status, status_OK = CMD(0x80);
1244         int chip_state = chip->state;
1245         unsigned int timeo, sleep_time, reset_timeo;
1246
1247         mutex_unlock(&chip->mutex);
1248         if (inval_len)
1249                 INVALIDATE_CACHED_RANGE(map, inval_adr, inval_len);
1250         mutex_lock(&chip->mutex);
1251
1252         timeo = chip_op_time_max;
1253         if (!timeo)
1254                 timeo = 500000;
1255         reset_timeo = timeo;
1256         sleep_time = chip_op_time / 2;
1257
1258         for (;;) {
1259                 if (chip->state != chip_state) {
1260                         /* Someone's suspended the operation: sleep */
1261                         DECLARE_WAITQUEUE(wait, current);
1262                         set_current_state(TASK_UNINTERRUPTIBLE);
1263                         add_wait_queue(&chip->wq, &wait);
1264                         mutex_unlock(&chip->mutex);
1265                         schedule();
1266                         remove_wait_queue(&chip->wq, &wait);
1267                         mutex_lock(&chip->mutex);
1268                         continue;
1269                 }
1270
1271                 status = map_read(map, cmd_adr);
1272                 if (map_word_andequal(map, status, status_OK, status_OK))
1273                         break;
1274
1275                 if (chip->erase_suspended && chip_state == FL_ERASING)  {
1276                         /* Erase suspend occurred while sleep: reset timeout */
1277                         timeo = reset_timeo;
1278                         chip->erase_suspended = 0;
1279                 }
1280                 if (chip->write_suspended && chip_state == FL_WRITING)  {
1281                         /* Write suspend occurred while sleep: reset timeout */
1282                         timeo = reset_timeo;
1283                         chip->write_suspended = 0;
1284                 }
1285                 if (!timeo) {
1286                         map_write(map, CMD(0x70), cmd_adr);
1287                         chip->state = FL_STATUS;
1288                         return -ETIME;
1289                 }
1290
1291                 /* OK Still waiting. Drop the lock, wait a while and retry. */
1292                 mutex_unlock(&chip->mutex);
1293                 if (sleep_time >= 1000000/HZ) {
1294                         /*
1295                          * Half of the normal delay still remaining
1296                          * can be performed with a sleeping delay instead
1297                          * of busy waiting.
1298                          */
1299                         msleep(sleep_time/1000);
1300                         timeo -= sleep_time;
1301                         sleep_time = 1000000/HZ;
1302                 } else {
1303                         udelay(1);
1304                         cond_resched();
1305                         timeo--;
1306                 }
1307                 mutex_lock(&chip->mutex);
1308         }
1309
1310         /* Done and happy. */
1311         chip->state = FL_STATUS;
1312         return 0;
1313 }
1314
1315 #endif
1316
1317 #define WAIT_TIMEOUT(map, chip, adr, udelay, udelay_max) \
1318         INVAL_CACHE_AND_WAIT(map, chip, adr, 0, 0, udelay, udelay_max);
1319
1320
1321 static int do_point_onechip (struct map_info *map, struct flchip *chip, loff_t adr, size_t len)
1322 {
1323         unsigned long cmd_addr;
1324         struct cfi_private *cfi = map->fldrv_priv;
1325         int ret = 0;
1326
1327         adr += chip->start;
1328
1329         /* Ensure cmd read/writes are aligned. */
1330         cmd_addr = adr & ~(map_bankwidth(map)-1);
1331
1332         mutex_lock(&chip->mutex);
1333
1334         ret = get_chip(map, chip, cmd_addr, FL_POINT);
1335
1336         if (!ret) {
1337                 if (chip->state != FL_POINT && chip->state != FL_READY)
1338                         map_write(map, CMD(0xff), cmd_addr);
1339
1340                 chip->state = FL_POINT;
1341                 chip->ref_point_counter++;
1342         }
1343         mutex_unlock(&chip->mutex);
1344
1345         return ret;
1346 }
1347
1348 static int cfi_intelext_point(struct mtd_info *mtd, loff_t from, size_t len,
1349                 size_t *retlen, void **virt, resource_size_t *phys)
1350 {
1351         struct map_info *map = mtd->priv;
1352         struct cfi_private *cfi = map->fldrv_priv;
1353         unsigned long ofs, last_end = 0;
1354         int chipnum;
1355         int ret = 0;
1356
1357         if (!map->virt)
1358                 return -EINVAL;
1359
1360         /* Now lock the chip(s) to POINT state */
1361
1362         /* ofs: offset within the first chip that the first read should start */
1363         chipnum = (from >> cfi->chipshift);
1364         ofs = from - (chipnum << cfi->chipshift);
1365
1366         *virt = map->virt + cfi->chips[chipnum].start + ofs;
1367         if (phys)
1368                 *phys = map->phys + cfi->chips[chipnum].start + ofs;
1369
1370         while (len) {
1371                 unsigned long thislen;
1372
1373                 if (chipnum >= cfi->numchips)
1374                         break;
1375
1376                 /* We cannot point across chips that are virtually disjoint */
1377                 if (!last_end)
1378                         last_end = cfi->chips[chipnum].start;
1379                 else if (cfi->chips[chipnum].start != last_end)
1380                         break;
1381
1382                 if ((len + ofs -1) >> cfi->chipshift)
1383                         thislen = (1<<cfi->chipshift) - ofs;
1384                 else
1385                         thislen = len;
1386
1387                 ret = do_point_onechip(map, &cfi->chips[chipnum], ofs, thislen);
1388                 if (ret)
1389                         break;
1390
1391                 *retlen += thislen;
1392                 len -= thislen;
1393
1394                 ofs = 0;
1395                 last_end += 1 << cfi->chipshift;
1396                 chipnum++;
1397         }
1398         return 0;
1399 }
1400
1401 static int cfi_intelext_unpoint(struct mtd_info *mtd, loff_t from, size_t len)
1402 {
1403         struct map_info *map = mtd->priv;
1404         struct cfi_private *cfi = map->fldrv_priv;
1405         unsigned long ofs;
1406         int chipnum, err = 0;
1407
1408         /* Now unlock the chip(s) POINT state */
1409
1410         /* ofs: offset within the first chip that the first read should start */
1411         chipnum = (from >> cfi->chipshift);
1412         ofs = from - (chipnum <<  cfi->chipshift);
1413
1414         while (len && !err) {
1415                 unsigned long thislen;
1416                 struct flchip *chip;
1417
1418                 chip = &cfi->chips[chipnum];
1419                 if (chipnum >= cfi->numchips)
1420                         break;
1421
1422                 if ((len + ofs -1) >> cfi->chipshift)
1423                         thislen = (1<<cfi->chipshift) - ofs;
1424                 else
1425                         thislen = len;
1426
1427                 mutex_lock(&chip->mutex);
1428                 if (chip->state == FL_POINT) {
1429                         chip->ref_point_counter--;
1430                         if(chip->ref_point_counter == 0)
1431                                 chip->state = FL_READY;
1432                 } else {
1433                         printk(KERN_ERR "%s: Error: unpoint called on non pointed region\n", map->name);
1434                         err = -EINVAL;
1435                 }
1436
1437                 put_chip(map, chip, chip->start);
1438                 mutex_unlock(&chip->mutex);
1439
1440                 len -= thislen;
1441                 ofs = 0;
1442                 chipnum++;
1443         }
1444
1445         return err;
1446 }
1447
1448 static inline int do_read_onechip(struct map_info *map, struct flchip *chip, loff_t adr, size_t len, u_char *buf)
1449 {
1450         unsigned long cmd_addr;
1451         struct cfi_private *cfi = map->fldrv_priv;
1452         int ret;
1453
1454         adr += chip->start;
1455
1456         /* Ensure cmd read/writes are aligned. */
1457         cmd_addr = adr & ~(map_bankwidth(map)-1);
1458
1459         mutex_lock(&chip->mutex);
1460         ret = get_chip(map, chip, cmd_addr, FL_READY);
1461         if (ret) {
1462                 mutex_unlock(&chip->mutex);
1463                 return ret;
1464         }
1465
1466         if (chip->state != FL_POINT && chip->state != FL_READY) {
1467                 map_write(map, CMD(0xff), cmd_addr);
1468
1469                 chip->state = FL_READY;
1470         }
1471
1472         map_copy_from(map, buf, adr, len);
1473
1474         put_chip(map, chip, cmd_addr);
1475
1476         mutex_unlock(&chip->mutex);
1477         return 0;
1478 }
1479
1480 static int cfi_intelext_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
1481 {
1482         struct map_info *map = mtd->priv;
1483         struct cfi_private *cfi = map->fldrv_priv;
1484         unsigned long ofs;
1485         int chipnum;
1486         int ret = 0;
1487
1488         /* ofs: offset within the first chip that the first read should start */
1489         chipnum = (from >> cfi->chipshift);
1490         ofs = from - (chipnum <<  cfi->chipshift);
1491
1492         while (len) {
1493                 unsigned long thislen;
1494
1495                 if (chipnum >= cfi->numchips)
1496                         break;
1497
1498                 if ((len + ofs -1) >> cfi->chipshift)
1499                         thislen = (1<<cfi->chipshift) - ofs;
1500                 else
1501                         thislen = len;
1502
1503                 ret = do_read_onechip(map, &cfi->chips[chipnum], ofs, thislen, buf);
1504                 if (ret)
1505                         break;
1506
1507                 *retlen += thislen;
1508                 len -= thislen;
1509                 buf += thislen;
1510
1511                 ofs = 0;
1512                 chipnum++;
1513         }
1514         return ret;
1515 }
1516
1517 static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip,
1518                                      unsigned long adr, map_word datum, int mode)
1519 {
1520         struct cfi_private *cfi = map->fldrv_priv;
1521         map_word status, write_cmd;
1522         int ret=0;
1523
1524         adr += chip->start;
1525
1526         switch (mode) {
1527         case FL_WRITING:
1528                 write_cmd = (cfi->cfiq->P_ID != P_ID_INTEL_PERFORMANCE) ? CMD(0x40) : CMD(0x41);
1529                 break;
1530         case FL_OTP_WRITE:
1531                 write_cmd = CMD(0xc0);
1532                 break;
1533         default:
1534                 return -EINVAL;
1535         }
1536
1537         mutex_lock(&chip->mutex);
1538         ret = get_chip(map, chip, adr, mode);
1539         if (ret) {
1540                 mutex_unlock(&chip->mutex);
1541                 return ret;
1542         }
1543
1544         XIP_INVAL_CACHED_RANGE(map, adr, map_bankwidth(map));
1545         ENABLE_VPP(map);
1546         xip_disable(map, chip, adr);
1547         map_write(map, write_cmd, adr);
1548         map_write(map, datum, adr);
1549         chip->state = mode;
1550
1551         ret = INVAL_CACHE_AND_WAIT(map, chip, adr,
1552                                    adr, map_bankwidth(map),
1553                                    chip->word_write_time,
1554                                    chip->word_write_time_max);
1555         if (ret) {
1556                 xip_enable(map, chip, adr);
1557                 printk(KERN_ERR "%s: word write error (status timeout)\n", map->name);
1558                 goto out;
1559         }
1560
1561         /* check for errors */
1562         status = map_read(map, adr);
1563         if (map_word_bitsset(map, status, CMD(0x1a))) {
1564                 unsigned long chipstatus = MERGESTATUS(status);
1565
1566                 /* reset status */
1567                 map_write(map, CMD(0x50), adr);
1568                 map_write(map, CMD(0x70), adr);
1569                 xip_enable(map, chip, adr);
1570
1571                 if (chipstatus & 0x02) {
1572                         ret = -EROFS;
1573                 } else if (chipstatus & 0x08) {
1574                         printk(KERN_ERR "%s: word write error (bad VPP)\n", map->name);
1575                         ret = -EIO;
1576                 } else {
1577                         printk(KERN_ERR "%s: word write error (status 0x%lx)\n", map->name, chipstatus);
1578                         ret = -EINVAL;
1579                 }
1580
1581                 goto out;
1582         }
1583
1584         xip_enable(map, chip, adr);
1585  out:   DISABLE_VPP(map);
1586         put_chip(map, chip, adr);
1587         mutex_unlock(&chip->mutex);
1588         return ret;
1589 }
1590
1591
1592 static int cfi_intelext_write_words (struct mtd_info *mtd, loff_t to , size_t len, size_t *retlen, const u_char *buf)
1593 {
1594         struct map_info *map = mtd->priv;
1595         struct cfi_private *cfi = map->fldrv_priv;
1596         int ret = 0;
1597         int chipnum;
1598         unsigned long ofs;
1599
1600         chipnum = to >> cfi->chipshift;
1601         ofs = to  - (chipnum << cfi->chipshift);
1602
1603         /* If it's not bus-aligned, do the first byte write */
1604         if (ofs & (map_bankwidth(map)-1)) {
1605                 unsigned long bus_ofs = ofs & ~(map_bankwidth(map)-1);
1606                 int gap = ofs - bus_ofs;
1607                 int n;
1608                 map_word datum;
1609
1610                 n = min_t(int, len, map_bankwidth(map)-gap);
1611                 datum = map_word_ff(map);
1612                 datum = map_word_load_partial(map, datum, buf, gap, n);
1613
1614                 ret = do_write_oneword(map, &cfi->chips[chipnum],
1615                                                bus_ofs, datum, FL_WRITING);
1616                 if (ret)
1617                         return ret;
1618
1619                 len -= n;
1620                 ofs += n;
1621                 buf += n;
1622                 (*retlen) += n;
1623
1624                 if (ofs >> cfi->chipshift) {
1625                         chipnum ++;
1626                         ofs = 0;
1627                         if (chipnum == cfi->numchips)
1628                                 return 0;
1629                 }
1630         }
1631
1632         while(len >= map_bankwidth(map)) {
1633                 map_word datum = map_word_load(map, buf);
1634
1635                 ret = do_write_oneword(map, &cfi->chips[chipnum],
1636                                        ofs, datum, FL_WRITING);
1637                 if (ret)
1638                         return ret;
1639
1640                 ofs += map_bankwidth(map);
1641                 buf += map_bankwidth(map);
1642                 (*retlen) += map_bankwidth(map);
1643                 len -= map_bankwidth(map);
1644
1645                 if (ofs >> cfi->chipshift) {
1646                         chipnum ++;
1647                         ofs = 0;
1648                         if (chipnum == cfi->numchips)
1649                                 return 0;
1650                 }
1651         }
1652
1653         if (len & (map_bankwidth(map)-1)) {
1654                 map_word datum;
1655
1656                 datum = map_word_ff(map);
1657                 datum = map_word_load_partial(map, datum, buf, 0, len);
1658
1659                 ret = do_write_oneword(map, &cfi->chips[chipnum],
1660                                        ofs, datum, FL_WRITING);
1661                 if (ret)
1662                         return ret;
1663
1664                 (*retlen) += len;
1665         }
1666
1667         return 0;
1668 }
1669
1670
1671 static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
1672                                     unsigned long adr, const struct kvec **pvec,
1673                                     unsigned long *pvec_seek, int len)
1674 {
1675         struct cfi_private *cfi = map->fldrv_priv;
1676         map_word status, write_cmd, datum;
1677         unsigned long cmd_adr;
1678         int ret, wbufsize, word_gap, words;
1679         const struct kvec *vec;
1680         unsigned long vec_seek;
1681         unsigned long initial_adr;
1682         int initial_len = len;
1683
1684         wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
1685         adr += chip->start;
1686         initial_adr = adr;
1687         cmd_adr = adr & ~(wbufsize-1);
1688
1689         /* Sharp LH28F640BF chips need the first address for the
1690          * Page Buffer Program command. See Table 5 of
1691          * LH28F320BF, LH28F640BF, LH28F128BF Series (Appendix FUM00701) */
1692         if (is_LH28F640BF(cfi))
1693                 cmd_adr = adr;
1694
1695         /* Let's determine this according to the interleave only once */
1696         write_cmd = (cfi->cfiq->P_ID != P_ID_INTEL_PERFORMANCE) ? CMD(0xe8) : CMD(0xe9);
1697
1698         mutex_lock(&chip->mutex);
1699         ret = get_chip(map, chip, cmd_adr, FL_WRITING);
1700         if (ret) {
1701                 mutex_unlock(&chip->mutex);
1702                 return ret;
1703         }
1704
1705         XIP_INVAL_CACHED_RANGE(map, initial_adr, initial_len);
1706         ENABLE_VPP(map);
1707         xip_disable(map, chip, cmd_adr);
1708
1709         /* Â§4.8 of the 28FxxxJ3A datasheet says "Any time SR.4 and/or SR.5 is set
1710            [...], the device will not accept any more Write to Buffer commands".
1711            So we must check here and reset those bits if they're set. Otherwise
1712            we're just pissing in the wind */
1713         if (chip->state != FL_STATUS) {
1714                 map_write(map, CMD(0x70), cmd_adr);
1715                 chip->state = FL_STATUS;
1716         }
1717         status = map_read(map, cmd_adr);
1718         if (map_word_bitsset(map, status, CMD(0x30))) {
1719                 xip_enable(map, chip, cmd_adr);
1720                 printk(KERN_WARNING "SR.4 or SR.5 bits set in buffer write (status %lx). Clearing.\n", status.x[0]);
1721                 xip_disable(map, chip, cmd_adr);
1722                 map_write(map, CMD(0x50), cmd_adr);
1723                 map_write(map, CMD(0x70), cmd_adr);
1724         }
1725
1726         chip->state = FL_WRITING_TO_BUFFER;
1727         map_write(map, write_cmd, cmd_adr);
1728         ret = WAIT_TIMEOUT(map, chip, cmd_adr, 0, 0);
1729         if (ret) {
1730                 /* Argh. Not ready for write to buffer */
1731                 map_word Xstatus = map_read(map, cmd_adr);
1732                 map_write(map, CMD(0x70), cmd_adr);
1733                 chip->state = FL_STATUS;
1734                 status = map_read(map, cmd_adr);
1735                 map_write(map, CMD(0x50), cmd_adr);
1736                 map_write(map, CMD(0x70), cmd_adr);
1737                 xip_enable(map, chip, cmd_adr);
1738                 printk(KERN_ERR "%s: Chip not ready for buffer write. Xstatus = %lx, status = %lx\n",
1739                                 map->name, Xstatus.x[0], status.x[0]);
1740                 goto out;
1741         }
1742
1743         /* Figure out the number of words to write */
1744         word_gap = (-adr & (map_bankwidth(map)-1));
1745         words = DIV_ROUND_UP(len - word_gap, map_bankwidth(map));
1746         if (!word_gap) {
1747                 words--;
1748         } else {
1749                 word_gap = map_bankwidth(map) - word_gap;
1750                 adr -= word_gap;
1751                 datum = map_word_ff(map);
1752         }
1753
1754         /* Write length of data to come */
1755         map_write(map, CMD(words), cmd_adr );
1756
1757         /* Write data */
1758         vec = *pvec;
1759         vec_seek = *pvec_seek;
1760         do {
1761                 int n = map_bankwidth(map) - word_gap;
1762                 if (n > vec->iov_len - vec_seek)
1763                         n = vec->iov_len - vec_seek;
1764                 if (n > len)
1765                         n = len;
1766
1767                 if (!word_gap && len < map_bankwidth(map))
1768                         datum = map_word_ff(map);
1769
1770                 datum = map_word_load_partial(map, datum,
1771                                               vec->iov_base + vec_seek,
1772                                               word_gap, n);
1773
1774                 len -= n;
1775                 word_gap += n;
1776                 if (!len || word_gap == map_bankwidth(map)) {
1777                         map_write(map, datum, adr);
1778                         adr += map_bankwidth(map);
1779                         word_gap = 0;
1780                 }
1781
1782                 vec_seek += n;
1783                 if (vec_seek == vec->iov_len) {
1784                         vec++;
1785                         vec_seek = 0;
1786                 }
1787         } while (len);
1788         *pvec = vec;
1789         *pvec_seek = vec_seek;
1790
1791         /* GO GO GO */
1792         map_write(map, CMD(0xd0), cmd_adr);
1793         chip->state = FL_WRITING;
1794
1795         ret = INVAL_CACHE_AND_WAIT(map, chip, cmd_adr,
1796                                    initial_adr, initial_len,
1797                                    chip->buffer_write_time,
1798                                    chip->buffer_write_time_max);
1799         if (ret) {
1800                 map_write(map, CMD(0x70), cmd_adr);
1801                 chip->state = FL_STATUS;
1802                 xip_enable(map, chip, cmd_adr);
1803                 printk(KERN_ERR "%s: buffer write error (status timeout)\n", map->name);
1804                 goto out;
1805         }
1806
1807         /* check for errors */
1808         status = map_read(map, cmd_adr);
1809         if (map_word_bitsset(map, status, CMD(0x1a))) {
1810                 unsigned long chipstatus = MERGESTATUS(status);
1811
1812                 /* reset status */
1813                 map_write(map, CMD(0x50), cmd_adr);
1814                 map_write(map, CMD(0x70), cmd_adr);
1815                 xip_enable(map, chip, cmd_adr);
1816
1817                 if (chipstatus & 0x02) {
1818                         ret = -EROFS;
1819                 } else if (chipstatus & 0x08) {
1820                         printk(KERN_ERR "%s: buffer write error (bad VPP)\n", map->name);
1821                         ret = -EIO;
1822                 } else {
1823                         printk(KERN_ERR "%s: buffer write error (status 0x%lx)\n", map->name, chipstatus);
1824                         ret = -EINVAL;
1825                 }
1826
1827                 goto out;
1828         }
1829
1830         xip_enable(map, chip, cmd_adr);
1831  out:   DISABLE_VPP(map);
1832         put_chip(map, chip, cmd_adr);
1833         mutex_unlock(&chip->mutex);
1834         return ret;
1835 }
1836
1837 static int cfi_intelext_writev (struct mtd_info *mtd, const struct kvec *vecs,
1838                                 unsigned long count, loff_t to, size_t *retlen)
1839 {
1840         struct map_info *map = mtd->priv;
1841         struct cfi_private *cfi = map->fldrv_priv;
1842         int wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
1843         int ret = 0;
1844         int chipnum;
1845         unsigned long ofs, vec_seek, i;
1846         size_t len = 0;
1847
1848         for (i = 0; i < count; i++)
1849                 len += vecs[i].iov_len;
1850
1851         if (!len)
1852                 return 0;
1853
1854         chipnum = to >> cfi->chipshift;
1855         ofs = to - (chipnum << cfi->chipshift);
1856         vec_seek = 0;
1857
1858         do {
1859                 /* We must not cross write block boundaries */
1860                 int size = wbufsize - (ofs & (wbufsize-1));
1861
1862                 if (size > len)
1863                         size = len;
1864                 ret = do_write_buffer(map, &cfi->chips[chipnum],
1865                                       ofs, &vecs, &vec_seek, size);
1866                 if (ret)
1867                         return ret;
1868
1869                 ofs += size;
1870                 (*retlen) += size;
1871                 len -= size;
1872
1873                 if (ofs >> cfi->chipshift) {
1874                         chipnum ++;
1875                         ofs = 0;
1876                         if (chipnum == cfi->numchips)
1877                                 return 0;
1878                 }
1879
1880                 /* Be nice and reschedule with the chip in a usable state for other
1881                    processes. */
1882                 cond_resched();
1883
1884         } while (len);
1885
1886         return 0;
1887 }
1888
1889 static int cfi_intelext_write_buffers (struct mtd_info *mtd, loff_t to,
1890                                        size_t len, size_t *retlen, const u_char *buf)
1891 {
1892         struct kvec vec;
1893
1894         vec.iov_base = (void *) buf;
1895         vec.iov_len = len;
1896
1897         return cfi_intelext_writev(mtd, &vec, 1, to, retlen);
1898 }
1899
1900 static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip,
1901                                       unsigned long adr, int len, void *thunk)
1902 {
1903         struct cfi_private *cfi = map->fldrv_priv;
1904         map_word status;
1905         int retries = 3;
1906         int ret;
1907
1908         adr += chip->start;
1909
1910  retry:
1911         mutex_lock(&chip->mutex);
1912         ret = get_chip(map, chip, adr, FL_ERASING);
1913         if (ret) {
1914                 mutex_unlock(&chip->mutex);
1915                 return ret;
1916         }
1917
1918         XIP_INVAL_CACHED_RANGE(map, adr, len);
1919         ENABLE_VPP(map);
1920         xip_disable(map, chip, adr);
1921
1922         /* Clear the status register first */
1923         map_write(map, CMD(0x50), adr);
1924
1925         /* Now erase */
1926         map_write(map, CMD(0x20), adr);
1927         map_write(map, CMD(0xD0), adr);
1928         chip->state = FL_ERASING;
1929         chip->erase_suspended = 0;
1930
1931         ret = INVAL_CACHE_AND_WAIT(map, chip, adr,
1932                                    adr, len,
1933                                    chip->erase_time,
1934                                    chip->erase_time_max);
1935         if (ret) {
1936                 map_write(map, CMD(0x70), adr);
1937                 chip->state = FL_STATUS;
1938                 xip_enable(map, chip, adr);
1939                 printk(KERN_ERR "%s: block erase error: (status timeout)\n", map->name);
1940                 goto out;
1941         }
1942
1943         /* We've broken this before. It doesn't hurt to be safe */
1944         map_write(map, CMD(0x70), adr);
1945         chip->state = FL_STATUS;
1946         status = map_read(map, adr);
1947
1948         /* check for errors */
1949         if (map_word_bitsset(map, status, CMD(0x3a))) {
1950                 unsigned long chipstatus = MERGESTATUS(status);
1951
1952                 /* Reset the error bits */
1953                 map_write(map, CMD(0x50), adr);
1954                 map_write(map, CMD(0x70), adr);
1955                 xip_enable(map, chip, adr);
1956
1957                 if ((chipstatus & 0x30) == 0x30) {
1958                         printk(KERN_ERR "%s: block erase error: (bad command sequence, status 0x%lx)\n", map->name, chipstatus);
1959                         ret = -EINVAL;
1960                 } else if (chipstatus & 0x02) {
1961                         /* Protection bit set */
1962                         ret = -EROFS;
1963                 } else if (chipstatus & 0x8) {
1964                         /* Voltage */
1965                         printk(KERN_ERR "%s: block erase error: (bad VPP)\n", map->name);
1966                         ret = -EIO;
1967                 } else if (chipstatus & 0x20 && retries--) {
1968                         printk(KERN_DEBUG "block erase failed at 0x%08lx: status 0x%lx. Retrying...\n", adr, chipstatus);
1969                         DISABLE_VPP(map);
1970                         put_chip(map, chip, adr);
1971                         mutex_unlock(&chip->mutex);
1972                         goto retry;
1973                 } else {
1974                         printk(KERN_ERR "%s: block erase failed at 0x%08lx (status 0x%lx)\n", map->name, adr, chipstatus);
1975                         ret = -EIO;
1976                 }
1977
1978                 goto out;
1979         }
1980
1981         xip_enable(map, chip, adr);
1982  out:   DISABLE_VPP(map);
1983         put_chip(map, chip, adr);
1984         mutex_unlock(&chip->mutex);
1985         return ret;
1986 }
1987
1988 static int cfi_intelext_erase_varsize(struct mtd_info *mtd, struct erase_info *instr)
1989 {
1990         unsigned long ofs, len;
1991         int ret;
1992
1993         ofs = instr->addr;
1994         len = instr->len;
1995
1996         ret = cfi_varsize_frob(mtd, do_erase_oneblock, ofs, len, NULL);
1997         if (ret)
1998                 return ret;
1999
2000         instr->state = MTD_ERASE_DONE;
2001         mtd_erase_callback(instr);
2002
2003         return 0;
2004 }
2005
2006 static void cfi_intelext_sync (struct mtd_info *mtd)
2007 {
2008         struct map_info *map = mtd->priv;
2009         struct cfi_private *cfi = map->fldrv_priv;
2010         int i;
2011         struct flchip *chip;
2012         int ret = 0;
2013
2014         for (i=0; !ret && i<cfi->numchips; i++) {
2015                 chip = &cfi->chips[i];
2016
2017                 mutex_lock(&chip->mutex);
2018                 ret = get_chip(map, chip, chip->start, FL_SYNCING);
2019
2020                 if (!ret) {
2021                         chip->oldstate = chip->state;
2022                         chip->state = FL_SYNCING;
2023                         /* No need to wake_up() on this state change -
2024                          * as the whole point is that nobody can do anything
2025                          * with the chip now anyway.
2026                          */
2027                 }
2028                 mutex_unlock(&chip->mutex);
2029         }
2030
2031         /* Unlock the chips again */
2032
2033         for (i--; i >=0; i--) {
2034                 chip = &cfi->chips[i];
2035
2036                 mutex_lock(&chip->mutex);
2037
2038                 if (chip->state == FL_SYNCING) {
2039                         chip->state = chip->oldstate;
2040                         chip->oldstate = FL_READY;
2041                         wake_up(&chip->wq);
2042                 }
2043                 mutex_unlock(&chip->mutex);
2044         }
2045 }
2046
2047 static int __xipram do_getlockstatus_oneblock(struct map_info *map,
2048                                                 struct flchip *chip,
2049                                                 unsigned long adr,
2050                                                 int len, void *thunk)
2051 {
2052         struct cfi_private *cfi = map->fldrv_priv;
2053         int status, ofs_factor = cfi->interleave * cfi->device_type;
2054
2055         adr += chip->start;
2056         xip_disable(map, chip, adr+(2*ofs_factor));
2057         map_write(map, CMD(0x90), adr+(2*ofs_factor));
2058         chip->state = FL_JEDEC_QUERY;
2059         status = cfi_read_query(map, adr+(2*ofs_factor));
2060         xip_enable(map, chip, 0);
2061         return status;
2062 }
2063
2064 #ifdef DEBUG_LOCK_BITS
2065 static int __xipram do_printlockstatus_oneblock(struct map_info *map,
2066                                                 struct flchip *chip,
2067                                                 unsigned long adr,
2068                                                 int len, void *thunk)
2069 {
2070         printk(KERN_DEBUG "block status register for 0x%08lx is %x\n",
2071                adr, do_getlockstatus_oneblock(map, chip, adr, len, thunk));
2072         return 0;
2073 }
2074 #endif
2075
2076 #define DO_XXLOCK_ONEBLOCK_LOCK         ((void *) 1)
2077 #define DO_XXLOCK_ONEBLOCK_UNLOCK       ((void *) 2)
2078
2079 static int __xipram do_xxlock_oneblock(struct map_info *map, struct flchip *chip,
2080                                        unsigned long adr, int len, void *thunk)
2081 {
2082         struct cfi_private *cfi = map->fldrv_priv;
2083         struct cfi_pri_intelext *extp = cfi->cmdset_priv;
2084         int mdelay;
2085         int ret;
2086
2087         adr += chip->start;
2088
2089         mutex_lock(&chip->mutex);
2090         ret = get_chip(map, chip, adr, FL_LOCKING);
2091         if (ret) {
2092                 mutex_unlock(&chip->mutex);
2093                 return ret;
2094         }
2095
2096         ENABLE_VPP(map);
2097         xip_disable(map, chip, adr);
2098
2099         map_write(map, CMD(0x60), adr);
2100         if (thunk == DO_XXLOCK_ONEBLOCK_LOCK) {
2101                 map_write(map, CMD(0x01), adr);
2102                 chip->state = FL_LOCKING;
2103         } else if (thunk == DO_XXLOCK_ONEBLOCK_UNLOCK) {
2104                 map_write(map, CMD(0xD0), adr);
2105                 chip->state = FL_UNLOCKING;
2106         } else
2107                 BUG();
2108
2109         /*
2110          * If Instant Individual Block Locking supported then no need
2111          * to delay.
2112          */
2113         /*
2114          * Unlocking may take up to 1.4 seconds on some Intel flashes. So
2115          * lets use a max of 1.5 seconds (1500ms) as timeout.
2116          *
2117          * See "Clear Block Lock-Bits Time" on page 40 in
2118          * "3 Volt Intel StrataFlash Memory" 28F128J3,28F640J3,28F320J3 manual
2119          * from February 2003
2120          */
2121         mdelay = (!extp || !(extp->FeatureSupport & (1 << 5))) ? 1500 : 0;
2122
2123         ret = WAIT_TIMEOUT(map, chip, adr, mdelay, mdelay * 1000);
2124         if (ret) {
2125                 map_write(map, CMD(0x70), adr);
2126                 chip->state = FL_STATUS;
2127                 xip_enable(map, chip, adr);
2128                 printk(KERN_ERR "%s: block unlock error: (status timeout)\n", map->name);
2129                 goto out;
2130         }
2131
2132         xip_enable(map, chip, adr);
2133  out:   DISABLE_VPP(map);
2134         put_chip(map, chip, adr);
2135         mutex_unlock(&chip->mutex);
2136         return ret;
2137 }
2138
2139 static int cfi_intelext_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
2140 {
2141         int ret;
2142
2143 #ifdef DEBUG_LOCK_BITS
2144         printk(KERN_DEBUG "%s: lock status before, ofs=0x%08llx, len=0x%08X\n",
2145                __func__, ofs, len);
2146         cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
2147                 ofs, len, NULL);
2148 #endif
2149
2150         ret = cfi_varsize_frob(mtd, do_xxlock_oneblock,
2151                 ofs, len, DO_XXLOCK_ONEBLOCK_LOCK);
2152
2153 #ifdef DEBUG_LOCK_BITS
2154         printk(KERN_DEBUG "%s: lock status after, ret=%d\n",
2155                __func__, ret);
2156         cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
2157                 ofs, len, NULL);
2158 #endif
2159
2160         return ret;
2161 }
2162
2163 static int cfi_intelext_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
2164 {
2165         int ret;
2166
2167 #ifdef DEBUG_LOCK_BITS
2168         printk(KERN_DEBUG "%s: lock status before, ofs=0x%08llx, len=0x%08X\n",
2169                __func__, ofs, len);
2170         cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
2171                 ofs, len, NULL);
2172 #endif
2173
2174         ret = cfi_varsize_frob(mtd, do_xxlock_oneblock,
2175                                         ofs, len, DO_XXLOCK_ONEBLOCK_UNLOCK);
2176
2177 #ifdef DEBUG_LOCK_BITS
2178         printk(KERN_DEBUG "%s: lock status after, ret=%d\n",
2179                __func__, ret);
2180         cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
2181                 ofs, len, NULL);
2182 #endif
2183
2184         return ret;
2185 }
2186
2187 static int cfi_intelext_is_locked(struct mtd_info *mtd, loff_t ofs,
2188                                   uint64_t len)
2189 {
2190         return cfi_varsize_frob(mtd, do_getlockstatus_oneblock,
2191                                 ofs, len, NULL) ? 1 : 0;
2192 }
2193
2194 #ifdef CONFIG_MTD_OTP
2195
2196 typedef int (*otp_op_t)(struct map_info *map, struct flchip *chip,
2197                         u_long data_offset, u_char *buf, u_int size,
2198                         u_long prot_offset, u_int groupno, u_int groupsize);
2199
2200 static int __xipram
2201 do_otp_read(struct map_info *map, struct flchip *chip, u_long offset,
2202             u_char *buf, u_int size, u_long prot, u_int grpno, u_int grpsz)
2203 {
2204         struct cfi_private *cfi = map->fldrv_priv;
2205         int ret;
2206
2207         mutex_lock(&chip->mutex);
2208         ret = get_chip(map, chip, chip->start, FL_JEDEC_QUERY);
2209         if (ret) {
2210                 mutex_unlock(&chip->mutex);
2211                 return ret;
2212         }
2213
2214         /* let's ensure we're not reading back cached data from array mode */
2215         INVALIDATE_CACHED_RANGE(map, chip->start + offset, size);
2216
2217         xip_disable(map, chip, chip->start);
2218         if (chip->state != FL_JEDEC_QUERY) {
2219                 map_write(map, CMD(0x90), chip->start);
2220                 chip->state = FL_JEDEC_QUERY;
2221         }
2222         map_copy_from(map, buf, chip->start + offset, size);
2223         xip_enable(map, chip, chip->start);
2224
2225         /* then ensure we don't keep OTP data in the cache */
2226         INVALIDATE_CACHED_RANGE(map, chip->start + offset, size);
2227
2228         put_chip(map, chip, chip->start);
2229         mutex_unlock(&chip->mutex);
2230         return 0;
2231 }
2232
2233 static int
2234 do_otp_write(struct map_info *map, struct flchip *chip, u_long offset,
2235              u_char *buf, u_int size, u_long prot, u_int grpno, u_int grpsz)
2236 {
2237         int ret;
2238
2239         while (size) {
2240                 unsigned long bus_ofs = offset & ~(map_bankwidth(map)-1);
2241                 int gap = offset - bus_ofs;
2242                 int n = min_t(int, size, map_bankwidth(map)-gap);
2243                 map_word datum = map_word_ff(map);
2244
2245                 datum = map_word_load_partial(map, datum, buf, gap, n);
2246                 ret = do_write_oneword(map, chip, bus_ofs, datum, FL_OTP_WRITE);
2247                 if (ret)
2248                         return ret;
2249
2250                 offset += n;
2251                 buf += n;
2252                 size -= n;
2253         }
2254
2255         return 0;
2256 }
2257
2258 static int
2259 do_otp_lock(struct map_info *map, struct flchip *chip, u_long offset,
2260             u_char *buf, u_int size, u_long prot, u_int grpno, u_int grpsz)
2261 {
2262         struct cfi_private *cfi = map->fldrv_priv;
2263         map_word datum;
2264
2265         /* make sure area matches group boundaries */
2266         if (size != grpsz)
2267                 return -EXDEV;
2268
2269         datum = map_word_ff(map);
2270         datum = map_word_clr(map, datum, CMD(1 << grpno));
2271         return do_write_oneword(map, chip, prot, datum, FL_OTP_WRITE);
2272 }
2273
2274 static int cfi_intelext_otp_walk(struct mtd_info *mtd, loff_t from, size_t len,
2275                                  size_t *retlen, u_char *buf,
2276                                  otp_op_t action, int user_regs)
2277 {
2278         struct map_info *map = mtd->priv;
2279         struct cfi_private *cfi = map->fldrv_priv;
2280         struct cfi_pri_intelext *extp = cfi->cmdset_priv;
2281         struct flchip *chip;
2282         struct cfi_intelext_otpinfo *otp;
2283         u_long devsize, reg_prot_offset, data_offset;
2284         u_int chip_num, chip_step, field, reg_fact_size, reg_user_size;
2285         u_int groups, groupno, groupsize, reg_fact_groups, reg_user_groups;
2286         int ret;
2287
2288         *retlen = 0;
2289
2290         /* Check that we actually have some OTP registers */
2291         if (!extp || !(extp->FeatureSupport & 64) || !extp->NumProtectionFields)
2292                 return -ENODATA;
2293
2294         /* we need real chips here not virtual ones */
2295         devsize = (1 << cfi->cfiq->DevSize) * cfi->interleave;
2296         chip_step = devsize >> cfi->chipshift;
2297         chip_num = 0;
2298
2299         /* Some chips have OTP located in the _top_ partition only.
2300            For example: Intel 28F256L18T (T means top-parameter device) */
2301         if (cfi->mfr == CFI_MFR_INTEL) {
2302                 switch (cfi->id) {
2303                 case 0x880b:
2304                 case 0x880c:
2305                 case 0x880d:
2306                         chip_num = chip_step - 1;
2307                 }
2308         }
2309
2310         for ( ; chip_num < cfi->numchips; chip_num += chip_step) {
2311                 chip = &cfi->chips[chip_num];
2312                 otp = (struct cfi_intelext_otpinfo *)&extp->extra[0];
2313
2314                 /* first OTP region */
2315                 field = 0;
2316                 reg_prot_offset = extp->ProtRegAddr;
2317                 reg_fact_groups = 1;
2318                 reg_fact_size = 1 << extp->FactProtRegSize;
2319                 reg_user_groups = 1;
2320                 reg_user_size = 1 << extp->UserProtRegSize;
2321
2322                 while (len > 0) {
2323                         /* flash geometry fixup */
2324                         data_offset = reg_prot_offset + 1;
2325                         data_offset *= cfi->interleave * cfi->device_type;
2326                         reg_prot_offset *= cfi->interleave * cfi->device_type;
2327                         reg_fact_size *= cfi->interleave;
2328                         reg_user_size *= cfi->interleave;
2329
2330                         if (user_regs) {
2331                                 groups = reg_user_groups;
2332                                 groupsize = reg_user_size;
2333                                 /* skip over factory reg area */
2334                                 groupno = reg_fact_groups;
2335                                 data_offset += reg_fact_groups * reg_fact_size;
2336                         } else {
2337                                 groups = reg_fact_groups;
2338                                 groupsize = reg_fact_size;
2339                                 groupno = 0;
2340                         }
2341
2342                         while (len > 0 && groups > 0) {
2343                                 if (!action) {
2344                                         /*
2345                                          * Special case: if action is NULL
2346                                          * we fill buf with otp_info records.
2347                                          */
2348                                         struct otp_info *otpinfo;
2349                                         map_word lockword;
2350                                         len -= sizeof(struct otp_info);
2351                                         if (len <= 0)
2352                                                 return -ENOSPC;
2353                                         ret = do_otp_read(map, chip,
2354                                                           reg_prot_offset,
2355                                                           (u_char *)&lockword,
2356                                                           map_bankwidth(map),
2357                                                           0, 0,  0);
2358                                         if (ret)
2359                                                 return ret;
2360                                         otpinfo = (struct otp_info *)buf;
2361                                         otpinfo->start = from;
2362                                         otpinfo->length = groupsize;
2363                                         otpinfo->locked =
2364                                            !map_word_bitsset(map, lockword,
2365                                                              CMD(1 << groupno));
2366                                         from += groupsize;
2367                                         buf += sizeof(*otpinfo);
2368                                         *retlen += sizeof(*otpinfo);
2369                                 } else if (from >= groupsize) {
2370                                         from -= groupsize;
2371                                         data_offset += groupsize;
2372                                 } else {
2373                                         int size = groupsize;
2374                                         data_offset += from;
2375                                         size -= from;
2376                                         from = 0;
2377                                         if (size > len)
2378                                                 size = len;
2379                                         ret = action(map, chip, data_offset,
2380                                                      buf, size, reg_prot_offset,
2381                                                      groupno, groupsize);
2382                                         if (ret < 0)
2383                                                 return ret;
2384                                         buf += size;
2385                                         len -= size;
2386                                         *retlen += size;
2387                                         data_offset += size;
2388                                 }
2389                                 groupno++;
2390                                 groups--;
2391                         }
2392
2393                         /* next OTP region */
2394                         if (++field == extp->NumProtectionFields)
2395                                 break;
2396                         reg_prot_offset = otp->ProtRegAddr;
2397                         reg_fact_groups = otp->FactGroups;
2398                         reg_fact_size = 1 << otp->FactProtRegSize;
2399                         reg_user_groups = otp->UserGroups;
2400                         reg_user_size = 1 << otp->UserProtRegSize;
2401                         otp++;
2402                 }
2403         }
2404
2405         return 0;
2406 }
2407
2408 static int cfi_intelext_read_fact_prot_reg(struct mtd_info *mtd, loff_t from,
2409                                            size_t len, size_t *retlen,
2410                                             u_char *buf)
2411 {
2412         return cfi_intelext_otp_walk(mtd, from, len, retlen,
2413                                      buf, do_otp_read, 0);
2414 }
2415
2416 static int cfi_intelext_read_user_prot_reg(struct mtd_info *mtd, loff_t from,
2417                                            size_t len, size_t *retlen,
2418                                             u_char *buf)
2419 {
2420         return cfi_intelext_otp_walk(mtd, from, len, retlen,
2421                                      buf, do_otp_read, 1);
2422 }
2423
2424 static int cfi_intelext_write_user_prot_reg(struct mtd_info *mtd, loff_t from,
2425                                             size_t len, size_t *retlen,
2426                                              u_char *buf)
2427 {
2428         return cfi_intelext_otp_walk(mtd, from, len, retlen,
2429                                      buf, do_otp_write, 1);
2430 }
2431
2432 static int cfi_intelext_lock_user_prot_reg(struct mtd_info *mtd,
2433                                            loff_t from, size_t len)
2434 {
2435         size_t retlen;
2436         return cfi_intelext_otp_walk(mtd, from, len, &retlen,
2437                                      NULL, do_otp_lock, 1);
2438 }
2439
2440 static int cfi_intelext_get_fact_prot_info(struct mtd_info *mtd, size_t len,
2441                                            size_t *retlen, struct otp_info *buf)
2442
2443 {
2444         return cfi_intelext_otp_walk(mtd, 0, len, retlen, (u_char *)buf,
2445                                      NULL, 0);
2446 }
2447
2448 static int cfi_intelext_get_user_prot_info(struct mtd_info *mtd, size_t len,
2449                                            size_t *retlen, struct otp_info *buf)
2450 {
2451         return cfi_intelext_otp_walk(mtd, 0, len, retlen, (u_char *)buf,
2452                                      NULL, 1);
2453 }
2454
2455 #endif
2456
2457 static void cfi_intelext_save_locks(struct mtd_info *mtd)
2458 {
2459         struct mtd_erase_region_info *region;
2460         int block, status, i;
2461         unsigned long adr;
2462         size_t len;
2463
2464         for (i = 0; i < mtd->numeraseregions; i++) {
2465                 region = &mtd->eraseregions[i];
2466                 if (!region->lockmap)
2467                         continue;
2468
2469                 for (block = 0; block < region->numblocks; block++){
2470                         len = region->erasesize;
2471                         adr = region->offset + block * len;
2472
2473                         status = cfi_varsize_frob(mtd,
2474                                         do_getlockstatus_oneblock, adr, len, NULL);
2475                         if (status)
2476                                 set_bit(block, region->lockmap);
2477                         else
2478                                 clear_bit(block, region->lockmap);
2479                 }
2480         }
2481 }
2482
2483 static int cfi_intelext_suspend(struct mtd_info *mtd)
2484 {
2485         struct map_info *map = mtd->priv;
2486         struct cfi_private *cfi = map->fldrv_priv;
2487         struct cfi_pri_intelext *extp = cfi->cmdset_priv;
2488         int i;
2489         struct flchip *chip;
2490         int ret = 0;
2491
2492         if ((mtd->flags & MTD_POWERUP_LOCK)
2493             && extp && (extp->FeatureSupport & (1 << 5)))
2494                 cfi_intelext_save_locks(mtd);
2495
2496         for (i=0; !ret && i<cfi->numchips; i++) {
2497                 chip = &cfi->chips[i];
2498
2499                 mutex_lock(&chip->mutex);
2500
2501                 switch (chip->state) {
2502                 case FL_READY:
2503                 case FL_STATUS:
2504                 case FL_CFI_QUERY:
2505                 case FL_JEDEC_QUERY:
2506                         if (chip->oldstate == FL_READY) {
2507                                 /* place the chip in a known state before suspend */
2508                                 map_write(map, CMD(0xFF), cfi->chips[i].start);
2509                                 chip->oldstate = chip->state;
2510                                 chip->state = FL_PM_SUSPENDED;
2511                                 /* No need to wake_up() on this state change -
2512                                  * as the whole point is that nobody can do anything
2513                                  * with the chip now anyway.
2514                                  */
2515                         } else {
2516                                 /* There seems to be an operation pending. We must wait for it. */
2517                                 printk(KERN_NOTICE "Flash device refused suspend due to pending operation (oldstate %d)\n", chip->oldstate);
2518                                 ret = -EAGAIN;
2519                         }
2520                         break;
2521                 default:
2522                         /* Should we actually wait? Once upon a time these routines weren't
2523                            allowed to. Or should we return -EAGAIN, because the upper layers
2524                            ought to have already shut down anything which was using the device
2525                            anyway? The latter for now. */
2526                         printk(KERN_NOTICE "Flash device refused suspend due to active operation (state %d)\n", chip->state);
2527                         ret = -EAGAIN;
2528                 case FL_PM_SUSPENDED:
2529                         break;
2530                 }
2531                 mutex_unlock(&chip->mutex);
2532         }
2533
2534         /* Unlock the chips again */
2535
2536         if (ret) {
2537                 for (i--; i >=0; i--) {
2538                         chip = &cfi->chips[i];
2539
2540                         mutex_lock(&chip->mutex);
2541
2542                         if (chip->state == FL_PM_SUSPENDED) {
2543                                 /* No need to force it into a known state here,
2544                                    because we're returning failure, and it didn't
2545                                    get power cycled */
2546                                 chip->state = chip->oldstate;
2547                                 chip->oldstate = FL_READY;
2548                                 wake_up(&chip->wq);
2549                         }
2550                         mutex_unlock(&chip->mutex);
2551                 }
2552         }
2553
2554         return ret;
2555 }
2556
2557 static void cfi_intelext_restore_locks(struct mtd_info *mtd)
2558 {
2559         struct mtd_erase_region_info *region;
2560         int block, i;
2561         unsigned long adr;
2562         size_t len;
2563
2564         for (i = 0; i < mtd->numeraseregions; i++) {
2565                 region = &mtd->eraseregions[i];
2566                 if (!region->lockmap)
2567                         continue;
2568
2569                 for_each_clear_bit(block, region->lockmap, region->numblocks) {
2570                         len = region->erasesize;
2571                         adr = region->offset + block * len;
2572                         cfi_intelext_unlock(mtd, adr, len);
2573                 }
2574         }
2575 }
2576
2577 static void cfi_intelext_resume(struct mtd_info *mtd)
2578 {
2579         struct map_info *map = mtd->priv;
2580         struct cfi_private *cfi = map->fldrv_priv;
2581         struct cfi_pri_intelext *extp = cfi->cmdset_priv;
2582         int i;
2583         struct flchip *chip;
2584
2585         for (i=0; i<cfi->numchips; i++) {
2586
2587                 chip = &cfi->chips[i];
2588
2589                 mutex_lock(&chip->mutex);
2590
2591                 /* Go to known state. Chip may have been power cycled */
2592                 if (chip->state == FL_PM_SUSPENDED) {
2593                         /* Refresh LH28F640BF Partition Config. Register */
2594                         fixup_LH28F640BF(mtd);
2595                         map_write(map, CMD(0xFF), cfi->chips[i].start);
2596                         chip->oldstate = chip->state = FL_READY;
2597                         wake_up(&chip->wq);
2598                 }
2599
2600                 mutex_unlock(&chip->mutex);
2601         }
2602
2603         if ((mtd->flags & MTD_POWERUP_LOCK)
2604             && extp && (extp->FeatureSupport & (1 << 5)))
2605                 cfi_intelext_restore_locks(mtd);
2606 }
2607
2608 static int cfi_intelext_reset(struct mtd_info *mtd)
2609 {
2610         struct map_info *map = mtd->priv;
2611         struct cfi_private *cfi = map->fldrv_priv;
2612         int i, ret;
2613
2614         for (i=0; i < cfi->numchips; i++) {
2615                 struct flchip *chip = &cfi->chips[i];
2616
2617                 /* force the completion of any ongoing operation
2618                    and switch to array mode so any bootloader in
2619                    flash is accessible for soft reboot. */
2620                 mutex_lock(&chip->mutex);
2621                 ret = get_chip(map, chip, chip->start, FL_SHUTDOWN);
2622                 if (!ret) {
2623                         map_write(map, CMD(0xff), chip->start);
2624                         chip->state = FL_SHUTDOWN;
2625                         put_chip(map, chip, chip->start);
2626                 }
2627                 mutex_unlock(&chip->mutex);
2628         }
2629
2630         return 0;
2631 }
2632
2633 static int cfi_intelext_reboot(struct notifier_block *nb, unsigned long val,
2634                                void *v)
2635 {
2636         struct mtd_info *mtd;
2637
2638         mtd = container_of(nb, struct mtd_info, reboot_notifier);
2639         cfi_intelext_reset(mtd);
2640         return NOTIFY_DONE;
2641 }
2642
2643 static void cfi_intelext_destroy(struct mtd_info *mtd)
2644 {
2645         struct map_info *map = mtd->priv;
2646         struct cfi_private *cfi = map->fldrv_priv;
2647         struct mtd_erase_region_info *region;
2648         int i;
2649         cfi_intelext_reset(mtd);
2650         unregister_reboot_notifier(&mtd->reboot_notifier);
2651         kfree(cfi->cmdset_priv);
2652         kfree(cfi->cfiq);
2653         kfree(cfi->chips[0].priv);
2654         kfree(cfi);
2655         for (i = 0; i < mtd->numeraseregions; i++) {
2656                 region = &mtd->eraseregions[i];
2657                 kfree(region->lockmap);
2658         }
2659         kfree(mtd->eraseregions);
2660 }
2661
2662 MODULE_LICENSE("GPL");
2663 MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org> et al.");
2664 MODULE_DESCRIPTION("MTD chip driver for Intel/Sharp flash chips");
2665 MODULE_ALIAS("cfi_cmdset_0003");
2666 MODULE_ALIAS("cfi_cmdset_0200");