These changes are the raw update to linux-4.4.6-rt14. Kernel sources
[kvmfornfv.git] / kernel / drivers / mtd / nand / pxa3xx_nand.c
1 /*
2  * drivers/mtd/nand/pxa3xx_nand.c
3  *
4  * Copyright © 2005 Intel Corporation
5  * Copyright © 2006 Marvell International Ltd.
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License version 2 as
9  * published by the Free Software Foundation.
10  *
11  * See Documentation/mtd/nand/pxa3xx-nand.txt for more details.
12  */
13
14 #include <linux/kernel.h>
15 #include <linux/module.h>
16 #include <linux/interrupt.h>
17 #include <linux/platform_device.h>
18 #include <linux/dmaengine.h>
19 #include <linux/dma-mapping.h>
20 #include <linux/dma/pxa-dma.h>
21 #include <linux/delay.h>
22 #include <linux/clk.h>
23 #include <linux/mtd/mtd.h>
24 #include <linux/mtd/nand.h>
25 #include <linux/mtd/partitions.h>
26 #include <linux/io.h>
27 #include <linux/iopoll.h>
28 #include <linux/irq.h>
29 #include <linux/slab.h>
30 #include <linux/of.h>
31 #include <linux/of_device.h>
32 #include <linux/of_mtd.h>
33
34 #if defined(CONFIG_ARM) && (defined(CONFIG_ARCH_PXA) || defined(CONFIG_ARCH_MMP))
35 #define ARCH_HAS_DMA
36 #endif
37
38 #include <linux/platform_data/mtd-nand-pxa3xx.h>
39
40 #define CHIP_DELAY_TIMEOUT      msecs_to_jiffies(200)
41 #define NAND_STOP_DELAY         msecs_to_jiffies(40)
42 #define PAGE_CHUNK_SIZE         (2048)
43
44 /*
45  * Define a buffer size for the initial command that detects the flash device:
46  * STATUS, READID and PARAM.
47  * ONFI param page is 256 bytes, and there are three redundant copies
48  * to be read. JEDEC param page is 512 bytes, and there are also three
49  * redundant copies to be read.
50  * Hence this buffer should be at least 512 x 3. Let's pick 2048.
51  */
52 #define INIT_BUFFER_SIZE        2048
53
54 /* registers and bit definitions */
55 #define NDCR            (0x00) /* Control register */
56 #define NDTR0CS0        (0x04) /* Timing Parameter 0 for CS0 */
57 #define NDTR1CS0        (0x0C) /* Timing Parameter 1 for CS0 */
58 #define NDSR            (0x14) /* Status Register */
59 #define NDPCR           (0x18) /* Page Count Register */
60 #define NDBDR0          (0x1C) /* Bad Block Register 0 */
61 #define NDBDR1          (0x20) /* Bad Block Register 1 */
62 #define NDECCCTRL       (0x28) /* ECC control */
63 #define NDDB            (0x40) /* Data Buffer */
64 #define NDCB0           (0x48) /* Command Buffer0 */
65 #define NDCB1           (0x4C) /* Command Buffer1 */
66 #define NDCB2           (0x50) /* Command Buffer2 */
67
68 #define NDCR_SPARE_EN           (0x1 << 31)
69 #define NDCR_ECC_EN             (0x1 << 30)
70 #define NDCR_DMA_EN             (0x1 << 29)
71 #define NDCR_ND_RUN             (0x1 << 28)
72 #define NDCR_DWIDTH_C           (0x1 << 27)
73 #define NDCR_DWIDTH_M           (0x1 << 26)
74 #define NDCR_PAGE_SZ            (0x1 << 24)
75 #define NDCR_NCSX               (0x1 << 23)
76 #define NDCR_ND_MODE            (0x3 << 21)
77 #define NDCR_NAND_MODE          (0x0)
78 #define NDCR_CLR_PG_CNT         (0x1 << 20)
79 #define NFCV1_NDCR_ARB_CNTL     (0x1 << 19)
80 #define NFCV2_NDCR_STOP_ON_UNCOR        (0x1 << 19)
81 #define NDCR_RD_ID_CNT_MASK     (0x7 << 16)
82 #define NDCR_RD_ID_CNT(x)       (((x) << 16) & NDCR_RD_ID_CNT_MASK)
83
84 #define NDCR_RA_START           (0x1 << 15)
85 #define NDCR_PG_PER_BLK         (0x1 << 14)
86 #define NDCR_ND_ARB_EN          (0x1 << 12)
87 #define NDCR_INT_MASK           (0xFFF)
88
89 #define NDSR_MASK               (0xfff)
90 #define NDSR_ERR_CNT_OFF        (16)
91 #define NDSR_ERR_CNT_MASK       (0x1f)
92 #define NDSR_ERR_CNT(sr)        ((sr >> NDSR_ERR_CNT_OFF) & NDSR_ERR_CNT_MASK)
93 #define NDSR_RDY                (0x1 << 12)
94 #define NDSR_FLASH_RDY          (0x1 << 11)
95 #define NDSR_CS0_PAGED          (0x1 << 10)
96 #define NDSR_CS1_PAGED          (0x1 << 9)
97 #define NDSR_CS0_CMDD           (0x1 << 8)
98 #define NDSR_CS1_CMDD           (0x1 << 7)
99 #define NDSR_CS0_BBD            (0x1 << 6)
100 #define NDSR_CS1_BBD            (0x1 << 5)
101 #define NDSR_UNCORERR           (0x1 << 4)
102 #define NDSR_CORERR             (0x1 << 3)
103 #define NDSR_WRDREQ             (0x1 << 2)
104 #define NDSR_RDDREQ             (0x1 << 1)
105 #define NDSR_WRCMDREQ           (0x1)
106
107 #define NDCB0_LEN_OVRD          (0x1 << 28)
108 #define NDCB0_ST_ROW_EN         (0x1 << 26)
109 #define NDCB0_AUTO_RS           (0x1 << 25)
110 #define NDCB0_CSEL              (0x1 << 24)
111 #define NDCB0_EXT_CMD_TYPE_MASK (0x7 << 29)
112 #define NDCB0_EXT_CMD_TYPE(x)   (((x) << 29) & NDCB0_EXT_CMD_TYPE_MASK)
113 #define NDCB0_CMD_TYPE_MASK     (0x7 << 21)
114 #define NDCB0_CMD_TYPE(x)       (((x) << 21) & NDCB0_CMD_TYPE_MASK)
115 #define NDCB0_NC                (0x1 << 20)
116 #define NDCB0_DBC               (0x1 << 19)
117 #define NDCB0_ADDR_CYC_MASK     (0x7 << 16)
118 #define NDCB0_ADDR_CYC(x)       (((x) << 16) & NDCB0_ADDR_CYC_MASK)
119 #define NDCB0_CMD2_MASK         (0xff << 8)
120 #define NDCB0_CMD1_MASK         (0xff)
121 #define NDCB0_ADDR_CYC_SHIFT    (16)
122
123 #define EXT_CMD_TYPE_DISPATCH   6 /* Command dispatch */
124 #define EXT_CMD_TYPE_NAKED_RW   5 /* Naked read or Naked write */
125 #define EXT_CMD_TYPE_READ       4 /* Read */
126 #define EXT_CMD_TYPE_DISP_WR    4 /* Command dispatch with write */
127 #define EXT_CMD_TYPE_FINAL      3 /* Final command */
128 #define EXT_CMD_TYPE_LAST_RW    1 /* Last naked read/write */
129 #define EXT_CMD_TYPE_MONO       0 /* Monolithic read/write */
130
131 /*
132  * This should be large enough to read 'ONFI' and 'JEDEC'.
133  * Let's use 7 bytes, which is the maximum ID count supported
134  * by the controller (see NDCR_RD_ID_CNT_MASK).
135  */
136 #define READ_ID_BYTES           7
137
138 /* macros for registers read/write */
139 #define nand_writel(info, off, val)     \
140         writel_relaxed((val), (info)->mmio_base + (off))
141
142 #define nand_readl(info, off)           \
143         readl_relaxed((info)->mmio_base + (off))
144
145 /* error code and state */
146 enum {
147         ERR_NONE        = 0,
148         ERR_DMABUSERR   = -1,
149         ERR_SENDCMD     = -2,
150         ERR_UNCORERR    = -3,
151         ERR_BBERR       = -4,
152         ERR_CORERR      = -5,
153 };
154
155 enum {
156         STATE_IDLE = 0,
157         STATE_PREPARED,
158         STATE_CMD_HANDLE,
159         STATE_DMA_READING,
160         STATE_DMA_WRITING,
161         STATE_DMA_DONE,
162         STATE_PIO_READING,
163         STATE_PIO_WRITING,
164         STATE_CMD_DONE,
165         STATE_READY,
166 };
167
168 enum pxa3xx_nand_variant {
169         PXA3XX_NAND_VARIANT_PXA,
170         PXA3XX_NAND_VARIANT_ARMADA370,
171 };
172
173 struct pxa3xx_nand_host {
174         struct nand_chip        chip;
175         struct mtd_info         *mtd;
176         void                    *info_data;
177
178         /* page size of attached chip */
179         int                     use_ecc;
180         int                     cs;
181
182         /* calculated from pxa3xx_nand_flash data */
183         unsigned int            col_addr_cycles;
184         unsigned int            row_addr_cycles;
185 };
186
187 struct pxa3xx_nand_info {
188         struct nand_hw_control  controller;
189         struct platform_device   *pdev;
190
191         struct clk              *clk;
192         void __iomem            *mmio_base;
193         unsigned long           mmio_phys;
194         struct completion       cmd_complete, dev_ready;
195
196         unsigned int            buf_start;
197         unsigned int            buf_count;
198         unsigned int            buf_size;
199         unsigned int            data_buff_pos;
200         unsigned int            oob_buff_pos;
201
202         /* DMA information */
203         struct scatterlist      sg;
204         enum dma_data_direction dma_dir;
205         struct dma_chan         *dma_chan;
206         dma_cookie_t            dma_cookie;
207         int                     drcmr_dat;
208         int                     drcmr_cmd;
209
210         unsigned char           *data_buff;
211         unsigned char           *oob_buff;
212         dma_addr_t              data_buff_phys;
213         int                     data_dma_ch;
214
215         struct pxa3xx_nand_host *host[NUM_CHIP_SELECT];
216         unsigned int            state;
217
218         /*
219          * This driver supports NFCv1 (as found in PXA SoC)
220          * and NFCv2 (as found in Armada 370/XP SoC).
221          */
222         enum pxa3xx_nand_variant variant;
223
224         int                     cs;
225         int                     use_ecc;        /* use HW ECC ? */
226         int                     ecc_bch;        /* using BCH ECC? */
227         int                     use_dma;        /* use DMA ? */
228         int                     use_spare;      /* use spare ? */
229         int                     need_wait;
230
231         unsigned int            data_size;      /* data to be read from FIFO */
232         unsigned int            chunk_size;     /* split commands chunk size */
233         unsigned int            oob_size;
234         unsigned int            spare_size;
235         unsigned int            ecc_size;
236         unsigned int            ecc_err_cnt;
237         unsigned int            max_bitflips;
238         int                     retcode;
239
240         /* cached register value */
241         uint32_t                reg_ndcr;
242         uint32_t                ndtr0cs0;
243         uint32_t                ndtr1cs0;
244
245         /* generated NDCBx register values */
246         uint32_t                ndcb0;
247         uint32_t                ndcb1;
248         uint32_t                ndcb2;
249         uint32_t                ndcb3;
250 };
251
252 static bool use_dma = 1;
253 module_param(use_dma, bool, 0444);
254 MODULE_PARM_DESC(use_dma, "enable DMA for data transferring to/from NAND HW");
255
256 struct pxa3xx_nand_timing {
257         unsigned int    tCH;  /* Enable signal hold time */
258         unsigned int    tCS;  /* Enable signal setup time */
259         unsigned int    tWH;  /* ND_nWE high duration */
260         unsigned int    tWP;  /* ND_nWE pulse time */
261         unsigned int    tRH;  /* ND_nRE high duration */
262         unsigned int    tRP;  /* ND_nRE pulse width */
263         unsigned int    tR;   /* ND_nWE high to ND_nRE low for read */
264         unsigned int    tWHR; /* ND_nWE high to ND_nRE low for status read */
265         unsigned int    tAR;  /* ND_ALE low to ND_nRE low delay */
266 };
267
268 struct pxa3xx_nand_flash {
269         uint32_t        chip_id;
270         unsigned int    flash_width;    /* Width of Flash memory (DWIDTH_M) */
271         unsigned int    dfc_width;      /* Width of flash controller(DWIDTH_C) */
272         struct pxa3xx_nand_timing *timing;      /* NAND Flash timing */
273 };
274
275 static struct pxa3xx_nand_timing timing[] = {
276         { 40, 80, 60, 100, 80, 100, 90000, 400, 40, },
277         { 10,  0, 20,  40, 30,  40, 11123, 110, 10, },
278         { 10, 25, 15,  25, 15,  30, 25000,  60, 10, },
279         { 10, 35, 15,  25, 15,  25, 25000,  60, 10, },
280 };
281
282 static struct pxa3xx_nand_flash builtin_flash_types[] = {
283         { 0x46ec, 16, 16, &timing[1] },
284         { 0xdaec,  8,  8, &timing[1] },
285         { 0xd7ec,  8,  8, &timing[1] },
286         { 0xa12c,  8,  8, &timing[2] },
287         { 0xb12c, 16, 16, &timing[2] },
288         { 0xdc2c,  8,  8, &timing[2] },
289         { 0xcc2c, 16, 16, &timing[2] },
290         { 0xba20, 16, 16, &timing[3] },
291 };
292
293 static u8 bbt_pattern[] = {'M', 'V', 'B', 'b', 't', '0' };
294 static u8 bbt_mirror_pattern[] = {'1', 't', 'b', 'B', 'V', 'M' };
295
296 static struct nand_bbt_descr bbt_main_descr = {
297         .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
298                 | NAND_BBT_2BIT | NAND_BBT_VERSION,
299         .offs = 8,
300         .len = 6,
301         .veroffs = 14,
302         .maxblocks = 8,         /* Last 8 blocks in each chip */
303         .pattern = bbt_pattern
304 };
305
306 static struct nand_bbt_descr bbt_mirror_descr = {
307         .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
308                 | NAND_BBT_2BIT | NAND_BBT_VERSION,
309         .offs = 8,
310         .len = 6,
311         .veroffs = 14,
312         .maxblocks = 8,         /* Last 8 blocks in each chip */
313         .pattern = bbt_mirror_pattern
314 };
315
316 static struct nand_ecclayout ecc_layout_2KB_bch4bit = {
317         .eccbytes = 32,
318         .eccpos = {
319                 32, 33, 34, 35, 36, 37, 38, 39,
320                 40, 41, 42, 43, 44, 45, 46, 47,
321                 48, 49, 50, 51, 52, 53, 54, 55,
322                 56, 57, 58, 59, 60, 61, 62, 63},
323         .oobfree = { {2, 30} }
324 };
325
326 static struct nand_ecclayout ecc_layout_4KB_bch4bit = {
327         .eccbytes = 64,
328         .eccpos = {
329                 32,  33,  34,  35,  36,  37,  38,  39,
330                 40,  41,  42,  43,  44,  45,  46,  47,
331                 48,  49,  50,  51,  52,  53,  54,  55,
332                 56,  57,  58,  59,  60,  61,  62,  63,
333                 96,  97,  98,  99,  100, 101, 102, 103,
334                 104, 105, 106, 107, 108, 109, 110, 111,
335                 112, 113, 114, 115, 116, 117, 118, 119,
336                 120, 121, 122, 123, 124, 125, 126, 127},
337         /* Bootrom looks in bytes 0 & 5 for bad blocks */
338         .oobfree = { {6, 26}, { 64, 32} }
339 };
340
341 static struct nand_ecclayout ecc_layout_4KB_bch8bit = {
342         .eccbytes = 128,
343         .eccpos = {
344                 32,  33,  34,  35,  36,  37,  38,  39,
345                 40,  41,  42,  43,  44,  45,  46,  47,
346                 48,  49,  50,  51,  52,  53,  54,  55,
347                 56,  57,  58,  59,  60,  61,  62,  63},
348         .oobfree = { }
349 };
350
351 #define NDTR0_tCH(c)    (min((c), 7) << 19)
352 #define NDTR0_tCS(c)    (min((c), 7) << 16)
353 #define NDTR0_tWH(c)    (min((c), 7) << 11)
354 #define NDTR0_tWP(c)    (min((c), 7) << 8)
355 #define NDTR0_tRH(c)    (min((c), 7) << 3)
356 #define NDTR0_tRP(c)    (min((c), 7) << 0)
357
358 #define NDTR1_tR(c)     (min((c), 65535) << 16)
359 #define NDTR1_tWHR(c)   (min((c), 15) << 4)
360 #define NDTR1_tAR(c)    (min((c), 15) << 0)
361
362 /* convert nano-seconds to nand flash controller clock cycles */
363 #define ns2cycle(ns, clk)       (int)((ns) * (clk / 1000000) / 1000)
364
365 static const struct of_device_id pxa3xx_nand_dt_ids[] = {
366         {
367                 .compatible = "marvell,pxa3xx-nand",
368                 .data       = (void *)PXA3XX_NAND_VARIANT_PXA,
369         },
370         {
371                 .compatible = "marvell,armada370-nand",
372                 .data       = (void *)PXA3XX_NAND_VARIANT_ARMADA370,
373         },
374         {}
375 };
376 MODULE_DEVICE_TABLE(of, pxa3xx_nand_dt_ids);
377
378 static enum pxa3xx_nand_variant
379 pxa3xx_nand_get_variant(struct platform_device *pdev)
380 {
381         const struct of_device_id *of_id =
382                         of_match_device(pxa3xx_nand_dt_ids, &pdev->dev);
383         if (!of_id)
384                 return PXA3XX_NAND_VARIANT_PXA;
385         return (enum pxa3xx_nand_variant)of_id->data;
386 }
387
388 static void pxa3xx_nand_set_timing(struct pxa3xx_nand_host *host,
389                                    const struct pxa3xx_nand_timing *t)
390 {
391         struct pxa3xx_nand_info *info = host->info_data;
392         unsigned long nand_clk = clk_get_rate(info->clk);
393         uint32_t ndtr0, ndtr1;
394
395         ndtr0 = NDTR0_tCH(ns2cycle(t->tCH, nand_clk)) |
396                 NDTR0_tCS(ns2cycle(t->tCS, nand_clk)) |
397                 NDTR0_tWH(ns2cycle(t->tWH, nand_clk)) |
398                 NDTR0_tWP(ns2cycle(t->tWP, nand_clk)) |
399                 NDTR0_tRH(ns2cycle(t->tRH, nand_clk)) |
400                 NDTR0_tRP(ns2cycle(t->tRP, nand_clk));
401
402         ndtr1 = NDTR1_tR(ns2cycle(t->tR, nand_clk)) |
403                 NDTR1_tWHR(ns2cycle(t->tWHR, nand_clk)) |
404                 NDTR1_tAR(ns2cycle(t->tAR, nand_clk));
405
406         info->ndtr0cs0 = ndtr0;
407         info->ndtr1cs0 = ndtr1;
408         nand_writel(info, NDTR0CS0, ndtr0);
409         nand_writel(info, NDTR1CS0, ndtr1);
410 }
411
412 static void pxa3xx_nand_set_sdr_timing(struct pxa3xx_nand_host *host,
413                                        const struct nand_sdr_timings *t)
414 {
415         struct pxa3xx_nand_info *info = host->info_data;
416         struct nand_chip *chip = &host->chip;
417         unsigned long nand_clk = clk_get_rate(info->clk);
418         uint32_t ndtr0, ndtr1;
419
420         u32 tCH_min = DIV_ROUND_UP(t->tCH_min, 1000);
421         u32 tCS_min = DIV_ROUND_UP(t->tCS_min, 1000);
422         u32 tWH_min = DIV_ROUND_UP(t->tWH_min, 1000);
423         u32 tWP_min = DIV_ROUND_UP(t->tWC_min - t->tWH_min, 1000);
424         u32 tREH_min = DIV_ROUND_UP(t->tREH_min, 1000);
425         u32 tRP_min = DIV_ROUND_UP(t->tRC_min - t->tREH_min, 1000);
426         u32 tR = chip->chip_delay * 1000;
427         u32 tWHR_min = DIV_ROUND_UP(t->tWHR_min, 1000);
428         u32 tAR_min = DIV_ROUND_UP(t->tAR_min, 1000);
429
430         /* fallback to a default value if tR = 0 */
431         if (!tR)
432                 tR = 20000;
433
434         ndtr0 = NDTR0_tCH(ns2cycle(tCH_min, nand_clk)) |
435                 NDTR0_tCS(ns2cycle(tCS_min, nand_clk)) |
436                 NDTR0_tWH(ns2cycle(tWH_min, nand_clk)) |
437                 NDTR0_tWP(ns2cycle(tWP_min, nand_clk)) |
438                 NDTR0_tRH(ns2cycle(tREH_min, nand_clk)) |
439                 NDTR0_tRP(ns2cycle(tRP_min, nand_clk));
440
441         ndtr1 = NDTR1_tR(ns2cycle(tR, nand_clk)) |
442                 NDTR1_tWHR(ns2cycle(tWHR_min, nand_clk)) |
443                 NDTR1_tAR(ns2cycle(tAR_min, nand_clk));
444
445         info->ndtr0cs0 = ndtr0;
446         info->ndtr1cs0 = ndtr1;
447         nand_writel(info, NDTR0CS0, ndtr0);
448         nand_writel(info, NDTR1CS0, ndtr1);
449 }
450
451 static int pxa3xx_nand_init_timings_compat(struct pxa3xx_nand_host *host,
452                                            unsigned int *flash_width,
453                                            unsigned int *dfc_width)
454 {
455         struct nand_chip *chip = &host->chip;
456         struct pxa3xx_nand_info *info = host->info_data;
457         const struct pxa3xx_nand_flash *f = NULL;
458         int i, id, ntypes;
459
460         ntypes = ARRAY_SIZE(builtin_flash_types);
461
462         chip->cmdfunc(host->mtd, NAND_CMD_READID, 0x00, -1);
463
464         id = chip->read_byte(host->mtd);
465         id |= chip->read_byte(host->mtd) << 0x8;
466
467         for (i = 0; i < ntypes; i++) {
468                 f = &builtin_flash_types[i];
469
470                 if (f->chip_id == id)
471                         break;
472         }
473
474         if (i == ntypes) {
475                 dev_err(&info->pdev->dev, "Error: timings not found\n");
476                 return -EINVAL;
477         }
478
479         pxa3xx_nand_set_timing(host, f->timing);
480
481         *flash_width = f->flash_width;
482         *dfc_width = f->dfc_width;
483
484         return 0;
485 }
486
487 static int pxa3xx_nand_init_timings_onfi(struct pxa3xx_nand_host *host,
488                                          int mode)
489 {
490         const struct nand_sdr_timings *timings;
491
492         mode = fls(mode) - 1;
493         if (mode < 0)
494                 mode = 0;
495
496         timings = onfi_async_timing_mode_to_sdr_timings(mode);
497         if (IS_ERR(timings))
498                 return PTR_ERR(timings);
499
500         pxa3xx_nand_set_sdr_timing(host, timings);
501
502         return 0;
503 }
504
505 static int pxa3xx_nand_init(struct pxa3xx_nand_host *host)
506 {
507         struct nand_chip *chip = &host->chip;
508         struct pxa3xx_nand_info *info = host->info_data;
509         unsigned int flash_width = 0, dfc_width = 0;
510         int mode, err;
511
512         mode = onfi_get_async_timing_mode(chip);
513         if (mode == ONFI_TIMING_MODE_UNKNOWN) {
514                 err = pxa3xx_nand_init_timings_compat(host, &flash_width,
515                                                       &dfc_width);
516                 if (err)
517                         return err;
518
519                 if (flash_width == 16) {
520                         info->reg_ndcr |= NDCR_DWIDTH_M;
521                         chip->options |= NAND_BUSWIDTH_16;
522                 }
523
524                 info->reg_ndcr |= (dfc_width == 16) ? NDCR_DWIDTH_C : 0;
525         } else {
526                 err = pxa3xx_nand_init_timings_onfi(host, mode);
527                 if (err)
528                         return err;
529         }
530
531         return 0;
532 }
533
534 /*
535  * Set the data and OOB size, depending on the selected
536  * spare and ECC configuration.
537  * Only applicable to READ0, READOOB and PAGEPROG commands.
538  */
539 static void pxa3xx_set_datasize(struct pxa3xx_nand_info *info,
540                                 struct mtd_info *mtd)
541 {
542         int oob_enable = info->reg_ndcr & NDCR_SPARE_EN;
543
544         info->data_size = mtd->writesize;
545         if (!oob_enable)
546                 return;
547
548         info->oob_size = info->spare_size;
549         if (!info->use_ecc)
550                 info->oob_size += info->ecc_size;
551 }
552
553 /**
554  * NOTE: it is a must to set ND_RUN firstly, then write
555  * command buffer, otherwise, it does not work.
556  * We enable all the interrupt at the same time, and
557  * let pxa3xx_nand_irq to handle all logic.
558  */
559 static void pxa3xx_nand_start(struct pxa3xx_nand_info *info)
560 {
561         uint32_t ndcr;
562
563         ndcr = info->reg_ndcr;
564
565         if (info->use_ecc) {
566                 ndcr |= NDCR_ECC_EN;
567                 if (info->ecc_bch)
568                         nand_writel(info, NDECCCTRL, 0x1);
569         } else {
570                 ndcr &= ~NDCR_ECC_EN;
571                 if (info->ecc_bch)
572                         nand_writel(info, NDECCCTRL, 0x0);
573         }
574
575         if (info->use_dma)
576                 ndcr |= NDCR_DMA_EN;
577         else
578                 ndcr &= ~NDCR_DMA_EN;
579
580         if (info->use_spare)
581                 ndcr |= NDCR_SPARE_EN;
582         else
583                 ndcr &= ~NDCR_SPARE_EN;
584
585         ndcr |= NDCR_ND_RUN;
586
587         /* clear status bits and run */
588         nand_writel(info, NDSR, NDSR_MASK);
589         nand_writel(info, NDCR, 0);
590         nand_writel(info, NDCR, ndcr);
591 }
592
593 static void pxa3xx_nand_stop(struct pxa3xx_nand_info *info)
594 {
595         uint32_t ndcr;
596         int timeout = NAND_STOP_DELAY;
597
598         /* wait RUN bit in NDCR become 0 */
599         ndcr = nand_readl(info, NDCR);
600         while ((ndcr & NDCR_ND_RUN) && (timeout-- > 0)) {
601                 ndcr = nand_readl(info, NDCR);
602                 udelay(1);
603         }
604
605         if (timeout <= 0) {
606                 ndcr &= ~NDCR_ND_RUN;
607                 nand_writel(info, NDCR, ndcr);
608         }
609         if (info->dma_chan)
610                 dmaengine_terminate_all(info->dma_chan);
611
612         /* clear status bits */
613         nand_writel(info, NDSR, NDSR_MASK);
614 }
615
616 static void __maybe_unused
617 enable_int(struct pxa3xx_nand_info *info, uint32_t int_mask)
618 {
619         uint32_t ndcr;
620
621         ndcr = nand_readl(info, NDCR);
622         nand_writel(info, NDCR, ndcr & ~int_mask);
623 }
624
625 static void disable_int(struct pxa3xx_nand_info *info, uint32_t int_mask)
626 {
627         uint32_t ndcr;
628
629         ndcr = nand_readl(info, NDCR);
630         nand_writel(info, NDCR, ndcr | int_mask);
631 }
632
633 static void drain_fifo(struct pxa3xx_nand_info *info, void *data, int len)
634 {
635         if (info->ecc_bch) {
636                 u32 val;
637                 int ret;
638
639                 /*
640                  * According to the datasheet, when reading from NDDB
641                  * with BCH enabled, after each 32 bytes reads, we
642                  * have to make sure that the NDSR.RDDREQ bit is set.
643                  *
644                  * Drain the FIFO 8 32 bits reads at a time, and skip
645                  * the polling on the last read.
646                  */
647                 while (len > 8) {
648                         ioread32_rep(info->mmio_base + NDDB, data, 8);
649
650                         ret = readl_relaxed_poll_timeout(info->mmio_base + NDSR, val,
651                                                          val & NDSR_RDDREQ, 1000, 5000);
652                         if (ret) {
653                                 dev_err(&info->pdev->dev,
654                                         "Timeout on RDDREQ while draining the FIFO\n");
655                                 return;
656                         }
657
658                         data += 32;
659                         len -= 8;
660                 }
661         }
662
663         ioread32_rep(info->mmio_base + NDDB, data, len);
664 }
665
666 static void handle_data_pio(struct pxa3xx_nand_info *info)
667 {
668         unsigned int do_bytes = min(info->data_size, info->chunk_size);
669
670         switch (info->state) {
671         case STATE_PIO_WRITING:
672                 writesl(info->mmio_base + NDDB,
673                         info->data_buff + info->data_buff_pos,
674                         DIV_ROUND_UP(do_bytes, 4));
675
676                 if (info->oob_size > 0)
677                         writesl(info->mmio_base + NDDB,
678                                 info->oob_buff + info->oob_buff_pos,
679                                 DIV_ROUND_UP(info->oob_size, 4));
680                 break;
681         case STATE_PIO_READING:
682                 drain_fifo(info,
683                            info->data_buff + info->data_buff_pos,
684                            DIV_ROUND_UP(do_bytes, 4));
685
686                 if (info->oob_size > 0)
687                         drain_fifo(info,
688                                    info->oob_buff + info->oob_buff_pos,
689                                    DIV_ROUND_UP(info->oob_size, 4));
690                 break;
691         default:
692                 dev_err(&info->pdev->dev, "%s: invalid state %d\n", __func__,
693                                 info->state);
694                 BUG();
695         }
696
697         /* Update buffer pointers for multi-page read/write */
698         info->data_buff_pos += do_bytes;
699         info->oob_buff_pos += info->oob_size;
700         info->data_size -= do_bytes;
701 }
702
703 static void pxa3xx_nand_data_dma_irq(void *data)
704 {
705         struct pxa3xx_nand_info *info = data;
706         struct dma_tx_state state;
707         enum dma_status status;
708
709         status = dmaengine_tx_status(info->dma_chan, info->dma_cookie, &state);
710         if (likely(status == DMA_COMPLETE)) {
711                 info->state = STATE_DMA_DONE;
712         } else {
713                 dev_err(&info->pdev->dev, "DMA error on data channel\n");
714                 info->retcode = ERR_DMABUSERR;
715         }
716         dma_unmap_sg(info->dma_chan->device->dev, &info->sg, 1, info->dma_dir);
717
718         nand_writel(info, NDSR, NDSR_WRDREQ | NDSR_RDDREQ);
719         enable_int(info, NDCR_INT_MASK);
720 }
721
722 static void start_data_dma(struct pxa3xx_nand_info *info)
723 {
724         enum dma_transfer_direction direction;
725         struct dma_async_tx_descriptor *tx;
726
727         switch (info->state) {
728         case STATE_DMA_WRITING:
729                 info->dma_dir = DMA_TO_DEVICE;
730                 direction = DMA_MEM_TO_DEV;
731                 break;
732         case STATE_DMA_READING:
733                 info->dma_dir = DMA_FROM_DEVICE;
734                 direction = DMA_DEV_TO_MEM;
735                 break;
736         default:
737                 dev_err(&info->pdev->dev, "%s: invalid state %d\n", __func__,
738                                 info->state);
739                 BUG();
740         }
741         info->sg.length = info->data_size +
742                 (info->oob_size ? info->spare_size + info->ecc_size : 0);
743         dma_map_sg(info->dma_chan->device->dev, &info->sg, 1, info->dma_dir);
744
745         tx = dmaengine_prep_slave_sg(info->dma_chan, &info->sg, 1, direction,
746                                      DMA_PREP_INTERRUPT);
747         if (!tx) {
748                 dev_err(&info->pdev->dev, "prep_slave_sg() failed\n");
749                 return;
750         }
751         tx->callback = pxa3xx_nand_data_dma_irq;
752         tx->callback_param = info;
753         info->dma_cookie = dmaengine_submit(tx);
754         dma_async_issue_pending(info->dma_chan);
755         dev_dbg(&info->pdev->dev, "%s(dir=%d cookie=%x size=%u)\n",
756                 __func__, direction, info->dma_cookie, info->sg.length);
757 }
758
759 static irqreturn_t pxa3xx_nand_irq_thread(int irq, void *data)
760 {
761         struct pxa3xx_nand_info *info = data;
762
763         handle_data_pio(info);
764
765         info->state = STATE_CMD_DONE;
766         nand_writel(info, NDSR, NDSR_WRDREQ | NDSR_RDDREQ);
767
768         return IRQ_HANDLED;
769 }
770
771 static irqreturn_t pxa3xx_nand_irq(int irq, void *devid)
772 {
773         struct pxa3xx_nand_info *info = devid;
774         unsigned int status, is_completed = 0, is_ready = 0;
775         unsigned int ready, cmd_done;
776         irqreturn_t ret = IRQ_HANDLED;
777
778         if (info->cs == 0) {
779                 ready           = NDSR_FLASH_RDY;
780                 cmd_done        = NDSR_CS0_CMDD;
781         } else {
782                 ready           = NDSR_RDY;
783                 cmd_done        = NDSR_CS1_CMDD;
784         }
785
786         status = nand_readl(info, NDSR);
787
788         if (status & NDSR_UNCORERR)
789                 info->retcode = ERR_UNCORERR;
790         if (status & NDSR_CORERR) {
791                 info->retcode = ERR_CORERR;
792                 if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370 &&
793                     info->ecc_bch)
794                         info->ecc_err_cnt = NDSR_ERR_CNT(status);
795                 else
796                         info->ecc_err_cnt = 1;
797
798                 /*
799                  * Each chunk composing a page is corrected independently,
800                  * and we need to store maximum number of corrected bitflips
801                  * to return it to the MTD layer in ecc.read_page().
802                  */
803                 info->max_bitflips = max_t(unsigned int,
804                                            info->max_bitflips,
805                                            info->ecc_err_cnt);
806         }
807         if (status & (NDSR_RDDREQ | NDSR_WRDREQ)) {
808                 /* whether use dma to transfer data */
809                 if (info->use_dma) {
810                         disable_int(info, NDCR_INT_MASK);
811                         info->state = (status & NDSR_RDDREQ) ?
812                                       STATE_DMA_READING : STATE_DMA_WRITING;
813                         start_data_dma(info);
814                         goto NORMAL_IRQ_EXIT;
815                 } else {
816                         info->state = (status & NDSR_RDDREQ) ?
817                                       STATE_PIO_READING : STATE_PIO_WRITING;
818                         ret = IRQ_WAKE_THREAD;
819                         goto NORMAL_IRQ_EXIT;
820                 }
821         }
822         if (status & cmd_done) {
823                 info->state = STATE_CMD_DONE;
824                 is_completed = 1;
825         }
826         if (status & ready) {
827                 info->state = STATE_READY;
828                 is_ready = 1;
829         }
830
831         /*
832          * Clear all status bit before issuing the next command, which
833          * can and will alter the status bits and will deserve a new
834          * interrupt on its own. This lets the controller exit the IRQ
835          */
836         nand_writel(info, NDSR, status);
837
838         if (status & NDSR_WRCMDREQ) {
839                 status &= ~NDSR_WRCMDREQ;
840                 info->state = STATE_CMD_HANDLE;
841
842                 /*
843                  * Command buffer registers NDCB{0-2} (and optionally NDCB3)
844                  * must be loaded by writing directly either 12 or 16
845                  * bytes directly to NDCB0, four bytes at a time.
846                  *
847                  * Direct write access to NDCB1, NDCB2 and NDCB3 is ignored
848                  * but each NDCBx register can be read.
849                  */
850                 nand_writel(info, NDCB0, info->ndcb0);
851                 nand_writel(info, NDCB0, info->ndcb1);
852                 nand_writel(info, NDCB0, info->ndcb2);
853
854                 /* NDCB3 register is available in NFCv2 (Armada 370/XP SoC) */
855                 if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370)
856                         nand_writel(info, NDCB0, info->ndcb3);
857         }
858
859         if (is_completed)
860                 complete(&info->cmd_complete);
861         if (is_ready)
862                 complete(&info->dev_ready);
863 NORMAL_IRQ_EXIT:
864         return ret;
865 }
866
867 static inline int is_buf_blank(uint8_t *buf, size_t len)
868 {
869         for (; len > 0; len--)
870                 if (*buf++ != 0xff)
871                         return 0;
872         return 1;
873 }
874
875 static void set_command_address(struct pxa3xx_nand_info *info,
876                 unsigned int page_size, uint16_t column, int page_addr)
877 {
878         /* small page addr setting */
879         if (page_size < PAGE_CHUNK_SIZE) {
880                 info->ndcb1 = ((page_addr & 0xFFFFFF) << 8)
881                                 | (column & 0xFF);
882
883                 info->ndcb2 = 0;
884         } else {
885                 info->ndcb1 = ((page_addr & 0xFFFF) << 16)
886                                 | (column & 0xFFFF);
887
888                 if (page_addr & 0xFF0000)
889                         info->ndcb2 = (page_addr & 0xFF0000) >> 16;
890                 else
891                         info->ndcb2 = 0;
892         }
893 }
894
895 static void prepare_start_command(struct pxa3xx_nand_info *info, int command)
896 {
897         struct pxa3xx_nand_host *host = info->host[info->cs];
898         struct mtd_info *mtd = host->mtd;
899
900         /* reset data and oob column point to handle data */
901         info->buf_start         = 0;
902         info->buf_count         = 0;
903         info->oob_size          = 0;
904         info->data_buff_pos     = 0;
905         info->oob_buff_pos      = 0;
906         info->use_ecc           = 0;
907         info->use_spare         = 1;
908         info->retcode           = ERR_NONE;
909         info->ecc_err_cnt       = 0;
910         info->ndcb3             = 0;
911         info->need_wait         = 0;
912
913         switch (command) {
914         case NAND_CMD_READ0:
915         case NAND_CMD_PAGEPROG:
916                 info->use_ecc = 1;
917         case NAND_CMD_READOOB:
918                 pxa3xx_set_datasize(info, mtd);
919                 break;
920         case NAND_CMD_PARAM:
921                 info->use_spare = 0;
922                 break;
923         default:
924                 info->ndcb1 = 0;
925                 info->ndcb2 = 0;
926                 break;
927         }
928
929         /*
930          * If we are about to issue a read command, or about to set
931          * the write address, then clean the data buffer.
932          */
933         if (command == NAND_CMD_READ0 ||
934             command == NAND_CMD_READOOB ||
935             command == NAND_CMD_SEQIN) {
936
937                 info->buf_count = mtd->writesize + mtd->oobsize;
938                 memset(info->data_buff, 0xFF, info->buf_count);
939         }
940
941 }
942
943 static int prepare_set_command(struct pxa3xx_nand_info *info, int command,
944                 int ext_cmd_type, uint16_t column, int page_addr)
945 {
946         int addr_cycle, exec_cmd;
947         struct pxa3xx_nand_host *host;
948         struct mtd_info *mtd;
949
950         host = info->host[info->cs];
951         mtd = host->mtd;
952         addr_cycle = 0;
953         exec_cmd = 1;
954
955         if (info->cs != 0)
956                 info->ndcb0 = NDCB0_CSEL;
957         else
958                 info->ndcb0 = 0;
959
960         if (command == NAND_CMD_SEQIN)
961                 exec_cmd = 0;
962
963         addr_cycle = NDCB0_ADDR_CYC(host->row_addr_cycles
964                                     + host->col_addr_cycles);
965
966         switch (command) {
967         case NAND_CMD_READOOB:
968         case NAND_CMD_READ0:
969                 info->buf_start = column;
970                 info->ndcb0 |= NDCB0_CMD_TYPE(0)
971                                 | addr_cycle
972                                 | NAND_CMD_READ0;
973
974                 if (command == NAND_CMD_READOOB)
975                         info->buf_start += mtd->writesize;
976
977                 /*
978                  * Multiple page read needs an 'extended command type' field,
979                  * which is either naked-read or last-read according to the
980                  * state.
981                  */
982                 if (mtd->writesize == PAGE_CHUNK_SIZE) {
983                         info->ndcb0 |= NDCB0_DBC | (NAND_CMD_READSTART << 8);
984                 } else if (mtd->writesize > PAGE_CHUNK_SIZE) {
985                         info->ndcb0 |= NDCB0_DBC | (NAND_CMD_READSTART << 8)
986                                         | NDCB0_LEN_OVRD
987                                         | NDCB0_EXT_CMD_TYPE(ext_cmd_type);
988                         info->ndcb3 = info->chunk_size +
989                                       info->oob_size;
990                 }
991
992                 set_command_address(info, mtd->writesize, column, page_addr);
993                 break;
994
995         case NAND_CMD_SEQIN:
996
997                 info->buf_start = column;
998                 set_command_address(info, mtd->writesize, 0, page_addr);
999
1000                 /*
1001                  * Multiple page programming needs to execute the initial
1002                  * SEQIN command that sets the page address.
1003                  */
1004                 if (mtd->writesize > PAGE_CHUNK_SIZE) {
1005                         info->ndcb0 |= NDCB0_CMD_TYPE(0x1)
1006                                 | NDCB0_EXT_CMD_TYPE(ext_cmd_type)
1007                                 | addr_cycle
1008                                 | command;
1009                         /* No data transfer in this case */
1010                         info->data_size = 0;
1011                         exec_cmd = 1;
1012                 }
1013                 break;
1014
1015         case NAND_CMD_PAGEPROG:
1016                 if (is_buf_blank(info->data_buff,
1017                                         (mtd->writesize + mtd->oobsize))) {
1018                         exec_cmd = 0;
1019                         break;
1020                 }
1021
1022                 /* Second command setting for large pages */
1023                 if (mtd->writesize > PAGE_CHUNK_SIZE) {
1024                         /*
1025                          * Multiple page write uses the 'extended command'
1026                          * field. This can be used to issue a command dispatch
1027                          * or a naked-write depending on the current stage.
1028                          */
1029                         info->ndcb0 |= NDCB0_CMD_TYPE(0x1)
1030                                         | NDCB0_LEN_OVRD
1031                                         | NDCB0_EXT_CMD_TYPE(ext_cmd_type);
1032                         info->ndcb3 = info->chunk_size +
1033                                       info->oob_size;
1034
1035                         /*
1036                          * This is the command dispatch that completes a chunked
1037                          * page program operation.
1038                          */
1039                         if (info->data_size == 0) {
1040                                 info->ndcb0 = NDCB0_CMD_TYPE(0x1)
1041                                         | NDCB0_EXT_CMD_TYPE(ext_cmd_type)
1042                                         | command;
1043                                 info->ndcb1 = 0;
1044                                 info->ndcb2 = 0;
1045                                 info->ndcb3 = 0;
1046                         }
1047                 } else {
1048                         info->ndcb0 |= NDCB0_CMD_TYPE(0x1)
1049                                         | NDCB0_AUTO_RS
1050                                         | NDCB0_ST_ROW_EN
1051                                         | NDCB0_DBC
1052                                         | (NAND_CMD_PAGEPROG << 8)
1053                                         | NAND_CMD_SEQIN
1054                                         | addr_cycle;
1055                 }
1056                 break;
1057
1058         case NAND_CMD_PARAM:
1059                 info->buf_count = INIT_BUFFER_SIZE;
1060                 info->ndcb0 |= NDCB0_CMD_TYPE(0)
1061                                 | NDCB0_ADDR_CYC(1)
1062                                 | NDCB0_LEN_OVRD
1063                                 | command;
1064                 info->ndcb1 = (column & 0xFF);
1065                 info->ndcb3 = INIT_BUFFER_SIZE;
1066                 info->data_size = INIT_BUFFER_SIZE;
1067                 break;
1068
1069         case NAND_CMD_READID:
1070                 info->buf_count = READ_ID_BYTES;
1071                 info->ndcb0 |= NDCB0_CMD_TYPE(3)
1072                                 | NDCB0_ADDR_CYC(1)
1073                                 | command;
1074                 info->ndcb1 = (column & 0xFF);
1075
1076                 info->data_size = 8;
1077                 break;
1078         case NAND_CMD_STATUS:
1079                 info->buf_count = 1;
1080                 info->ndcb0 |= NDCB0_CMD_TYPE(4)
1081                                 | NDCB0_ADDR_CYC(1)
1082                                 | command;
1083
1084                 info->data_size = 8;
1085                 break;
1086
1087         case NAND_CMD_ERASE1:
1088                 info->ndcb0 |= NDCB0_CMD_TYPE(2)
1089                                 | NDCB0_AUTO_RS
1090                                 | NDCB0_ADDR_CYC(3)
1091                                 | NDCB0_DBC
1092                                 | (NAND_CMD_ERASE2 << 8)
1093                                 | NAND_CMD_ERASE1;
1094                 info->ndcb1 = page_addr;
1095                 info->ndcb2 = 0;
1096
1097                 break;
1098         case NAND_CMD_RESET:
1099                 info->ndcb0 |= NDCB0_CMD_TYPE(5)
1100                                 | command;
1101
1102                 break;
1103
1104         case NAND_CMD_ERASE2:
1105                 exec_cmd = 0;
1106                 break;
1107
1108         default:
1109                 exec_cmd = 0;
1110                 dev_err(&info->pdev->dev, "non-supported command %x\n",
1111                                 command);
1112                 break;
1113         }
1114
1115         return exec_cmd;
1116 }
1117
1118 static void nand_cmdfunc(struct mtd_info *mtd, unsigned command,
1119                          int column, int page_addr)
1120 {
1121         struct pxa3xx_nand_host *host = mtd->priv;
1122         struct pxa3xx_nand_info *info = host->info_data;
1123         int exec_cmd;
1124
1125         /*
1126          * if this is a x16 device ,then convert the input
1127          * "byte" address into a "word" address appropriate
1128          * for indexing a word-oriented device
1129          */
1130         if (info->reg_ndcr & NDCR_DWIDTH_M)
1131                 column /= 2;
1132
1133         /*
1134          * There may be different NAND chip hooked to
1135          * different chip select, so check whether
1136          * chip select has been changed, if yes, reset the timing
1137          */
1138         if (info->cs != host->cs) {
1139                 info->cs = host->cs;
1140                 nand_writel(info, NDTR0CS0, info->ndtr0cs0);
1141                 nand_writel(info, NDTR1CS0, info->ndtr1cs0);
1142         }
1143
1144         prepare_start_command(info, command);
1145
1146         info->state = STATE_PREPARED;
1147         exec_cmd = prepare_set_command(info, command, 0, column, page_addr);
1148
1149         if (exec_cmd) {
1150                 init_completion(&info->cmd_complete);
1151                 init_completion(&info->dev_ready);
1152                 info->need_wait = 1;
1153                 pxa3xx_nand_start(info);
1154
1155                 if (!wait_for_completion_timeout(&info->cmd_complete,
1156                     CHIP_DELAY_TIMEOUT)) {
1157                         dev_err(&info->pdev->dev, "Wait time out!!!\n");
1158                         /* Stop State Machine for next command cycle */
1159                         pxa3xx_nand_stop(info);
1160                 }
1161         }
1162         info->state = STATE_IDLE;
1163 }
1164
1165 static void nand_cmdfunc_extended(struct mtd_info *mtd,
1166                                   const unsigned command,
1167                                   int column, int page_addr)
1168 {
1169         struct pxa3xx_nand_host *host = mtd->priv;
1170         struct pxa3xx_nand_info *info = host->info_data;
1171         int exec_cmd, ext_cmd_type;
1172
1173         /*
1174          * if this is a x16 device then convert the input
1175          * "byte" address into a "word" address appropriate
1176          * for indexing a word-oriented device
1177          */
1178         if (info->reg_ndcr & NDCR_DWIDTH_M)
1179                 column /= 2;
1180
1181         /*
1182          * There may be different NAND chip hooked to
1183          * different chip select, so check whether
1184          * chip select has been changed, if yes, reset the timing
1185          */
1186         if (info->cs != host->cs) {
1187                 info->cs = host->cs;
1188                 nand_writel(info, NDTR0CS0, info->ndtr0cs0);
1189                 nand_writel(info, NDTR1CS0, info->ndtr1cs0);
1190         }
1191
1192         /* Select the extended command for the first command */
1193         switch (command) {
1194         case NAND_CMD_READ0:
1195         case NAND_CMD_READOOB:
1196                 ext_cmd_type = EXT_CMD_TYPE_MONO;
1197                 break;
1198         case NAND_CMD_SEQIN:
1199                 ext_cmd_type = EXT_CMD_TYPE_DISPATCH;
1200                 break;
1201         case NAND_CMD_PAGEPROG:
1202                 ext_cmd_type = EXT_CMD_TYPE_NAKED_RW;
1203                 break;
1204         default:
1205                 ext_cmd_type = 0;
1206                 break;
1207         }
1208
1209         prepare_start_command(info, command);
1210
1211         /*
1212          * Prepare the "is ready" completion before starting a command
1213          * transaction sequence. If the command is not executed the
1214          * completion will be completed, see below.
1215          *
1216          * We can do that inside the loop because the command variable
1217          * is invariant and thus so is the exec_cmd.
1218          */
1219         info->need_wait = 1;
1220         init_completion(&info->dev_ready);
1221         do {
1222                 info->state = STATE_PREPARED;
1223                 exec_cmd = prepare_set_command(info, command, ext_cmd_type,
1224                                                column, page_addr);
1225                 if (!exec_cmd) {
1226                         info->need_wait = 0;
1227                         complete(&info->dev_ready);
1228                         break;
1229                 }
1230
1231                 init_completion(&info->cmd_complete);
1232                 pxa3xx_nand_start(info);
1233
1234                 if (!wait_for_completion_timeout(&info->cmd_complete,
1235                     CHIP_DELAY_TIMEOUT)) {
1236                         dev_err(&info->pdev->dev, "Wait time out!!!\n");
1237                         /* Stop State Machine for next command cycle */
1238                         pxa3xx_nand_stop(info);
1239                         break;
1240                 }
1241
1242                 /* Check if the sequence is complete */
1243                 if (info->data_size == 0 && command != NAND_CMD_PAGEPROG)
1244                         break;
1245
1246                 /*
1247                  * After a splitted program command sequence has issued
1248                  * the command dispatch, the command sequence is complete.
1249                  */
1250                 if (info->data_size == 0 &&
1251                     command == NAND_CMD_PAGEPROG &&
1252                     ext_cmd_type == EXT_CMD_TYPE_DISPATCH)
1253                         break;
1254
1255                 if (command == NAND_CMD_READ0 || command == NAND_CMD_READOOB) {
1256                         /* Last read: issue a 'last naked read' */
1257                         if (info->data_size == info->chunk_size)
1258                                 ext_cmd_type = EXT_CMD_TYPE_LAST_RW;
1259                         else
1260                                 ext_cmd_type = EXT_CMD_TYPE_NAKED_RW;
1261
1262                 /*
1263                  * If a splitted program command has no more data to transfer,
1264                  * the command dispatch must be issued to complete.
1265                  */
1266                 } else if (command == NAND_CMD_PAGEPROG &&
1267                            info->data_size == 0) {
1268                                 ext_cmd_type = EXT_CMD_TYPE_DISPATCH;
1269                 }
1270         } while (1);
1271
1272         info->state = STATE_IDLE;
1273 }
1274
1275 static int pxa3xx_nand_write_page_hwecc(struct mtd_info *mtd,
1276                 struct nand_chip *chip, const uint8_t *buf, int oob_required,
1277                 int page)
1278 {
1279         chip->write_buf(mtd, buf, mtd->writesize);
1280         chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
1281
1282         return 0;
1283 }
1284
1285 static int pxa3xx_nand_read_page_hwecc(struct mtd_info *mtd,
1286                 struct nand_chip *chip, uint8_t *buf, int oob_required,
1287                 int page)
1288 {
1289         struct pxa3xx_nand_host *host = mtd->priv;
1290         struct pxa3xx_nand_info *info = host->info_data;
1291
1292         chip->read_buf(mtd, buf, mtd->writesize);
1293         chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
1294
1295         if (info->retcode == ERR_CORERR && info->use_ecc) {
1296                 mtd->ecc_stats.corrected += info->ecc_err_cnt;
1297
1298         } else if (info->retcode == ERR_UNCORERR) {
1299                 /*
1300                  * for blank page (all 0xff), HW will calculate its ECC as
1301                  * 0, which is different from the ECC information within
1302                  * OOB, ignore such uncorrectable errors
1303                  */
1304                 if (is_buf_blank(buf, mtd->writesize))
1305                         info->retcode = ERR_NONE;
1306                 else
1307                         mtd->ecc_stats.failed++;
1308         }
1309
1310         return info->max_bitflips;
1311 }
1312
1313 static uint8_t pxa3xx_nand_read_byte(struct mtd_info *mtd)
1314 {
1315         struct pxa3xx_nand_host *host = mtd->priv;
1316         struct pxa3xx_nand_info *info = host->info_data;
1317         char retval = 0xFF;
1318
1319         if (info->buf_start < info->buf_count)
1320                 /* Has just send a new command? */
1321                 retval = info->data_buff[info->buf_start++];
1322
1323         return retval;
1324 }
1325
1326 static u16 pxa3xx_nand_read_word(struct mtd_info *mtd)
1327 {
1328         struct pxa3xx_nand_host *host = mtd->priv;
1329         struct pxa3xx_nand_info *info = host->info_data;
1330         u16 retval = 0xFFFF;
1331
1332         if (!(info->buf_start & 0x01) && info->buf_start < info->buf_count) {
1333                 retval = *((u16 *)(info->data_buff+info->buf_start));
1334                 info->buf_start += 2;
1335         }
1336         return retval;
1337 }
1338
1339 static void pxa3xx_nand_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
1340 {
1341         struct pxa3xx_nand_host *host = mtd->priv;
1342         struct pxa3xx_nand_info *info = host->info_data;
1343         int real_len = min_t(size_t, len, info->buf_count - info->buf_start);
1344
1345         memcpy(buf, info->data_buff + info->buf_start, real_len);
1346         info->buf_start += real_len;
1347 }
1348
1349 static void pxa3xx_nand_write_buf(struct mtd_info *mtd,
1350                 const uint8_t *buf, int len)
1351 {
1352         struct pxa3xx_nand_host *host = mtd->priv;
1353         struct pxa3xx_nand_info *info = host->info_data;
1354         int real_len = min_t(size_t, len, info->buf_count - info->buf_start);
1355
1356         memcpy(info->data_buff + info->buf_start, buf, real_len);
1357         info->buf_start += real_len;
1358 }
1359
1360 static void pxa3xx_nand_select_chip(struct mtd_info *mtd, int chip)
1361 {
1362         return;
1363 }
1364
1365 static int pxa3xx_nand_waitfunc(struct mtd_info *mtd, struct nand_chip *this)
1366 {
1367         struct pxa3xx_nand_host *host = mtd->priv;
1368         struct pxa3xx_nand_info *info = host->info_data;
1369
1370         if (info->need_wait) {
1371                 info->need_wait = 0;
1372                 if (!wait_for_completion_timeout(&info->dev_ready,
1373                     CHIP_DELAY_TIMEOUT)) {
1374                         dev_err(&info->pdev->dev, "Ready time out!!!\n");
1375                         return NAND_STATUS_FAIL;
1376                 }
1377         }
1378
1379         /* pxa3xx_nand_send_command has waited for command complete */
1380         if (this->state == FL_WRITING || this->state == FL_ERASING) {
1381                 if (info->retcode == ERR_NONE)
1382                         return 0;
1383                 else
1384                         return NAND_STATUS_FAIL;
1385         }
1386
1387         return NAND_STATUS_READY;
1388 }
1389
1390 static int pxa3xx_nand_config_flash(struct pxa3xx_nand_info *info)
1391 {
1392         struct platform_device *pdev = info->pdev;
1393         struct pxa3xx_nand_platform_data *pdata = dev_get_platdata(&pdev->dev);
1394         struct pxa3xx_nand_host *host = info->host[info->cs];
1395         struct mtd_info *mtd = host->mtd;
1396         struct nand_chip *chip = mtd->priv;
1397
1398         /* configure default flash values */
1399         info->reg_ndcr = 0x0; /* enable all interrupts */
1400         info->reg_ndcr |= (pdata->enable_arbiter) ? NDCR_ND_ARB_EN : 0;
1401         info->reg_ndcr |= NDCR_RD_ID_CNT(READ_ID_BYTES);
1402         info->reg_ndcr |= NDCR_SPARE_EN; /* enable spare by default */
1403         info->reg_ndcr |= (host->col_addr_cycles == 2) ? NDCR_RA_START : 0;
1404         info->reg_ndcr |= (chip->page_shift == 6) ? NDCR_PG_PER_BLK : 0;
1405         info->reg_ndcr |= (mtd->writesize == 2048) ? NDCR_PAGE_SZ : 0;
1406
1407         return 0;
1408 }
1409
1410 static int pxa3xx_nand_detect_config(struct pxa3xx_nand_info *info)
1411 {
1412         uint32_t ndcr = nand_readl(info, NDCR);
1413
1414         /* Set an initial chunk size */
1415         info->chunk_size = ndcr & NDCR_PAGE_SZ ? 2048 : 512;
1416         info->reg_ndcr = ndcr &
1417                 ~(NDCR_INT_MASK | NDCR_ND_ARB_EN | NFCV1_NDCR_ARB_CNTL);
1418         info->ndtr0cs0 = nand_readl(info, NDTR0CS0);
1419         info->ndtr1cs0 = nand_readl(info, NDTR1CS0);
1420         return 0;
1421 }
1422
1423 static int pxa3xx_nand_init_buff(struct pxa3xx_nand_info *info)
1424 {
1425         struct platform_device *pdev = info->pdev;
1426         struct dma_slave_config config;
1427         dma_cap_mask_t mask;
1428         struct pxad_param param;
1429         int ret;
1430
1431         info->data_buff = kmalloc(info->buf_size, GFP_KERNEL);
1432         if (info->data_buff == NULL)
1433                 return -ENOMEM;
1434         if (use_dma == 0)
1435                 return 0;
1436
1437         ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
1438         if (ret)
1439                 return ret;
1440
1441         sg_init_one(&info->sg, info->data_buff, info->buf_size);
1442         dma_cap_zero(mask);
1443         dma_cap_set(DMA_SLAVE, mask);
1444         param.prio = PXAD_PRIO_LOWEST;
1445         param.drcmr = info->drcmr_dat;
1446         info->dma_chan = dma_request_slave_channel_compat(mask, pxad_filter_fn,
1447                                                           &param, &pdev->dev,
1448                                                           "data");
1449         if (!info->dma_chan) {
1450                 dev_err(&pdev->dev, "unable to request data dma channel\n");
1451                 return -ENODEV;
1452         }
1453
1454         memset(&config, 0, sizeof(config));
1455         config.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
1456         config.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
1457         config.src_addr = info->mmio_phys + NDDB;
1458         config.dst_addr = info->mmio_phys + NDDB;
1459         config.src_maxburst = 32;
1460         config.dst_maxburst = 32;
1461         ret = dmaengine_slave_config(info->dma_chan, &config);
1462         if (ret < 0) {
1463                 dev_err(&info->pdev->dev,
1464                         "dma channel configuration failed: %d\n",
1465                         ret);
1466                 return ret;
1467         }
1468
1469         /*
1470          * Now that DMA buffers are allocated we turn on
1471          * DMA proper for I/O operations.
1472          */
1473         info->use_dma = 1;
1474         return 0;
1475 }
1476
1477 static void pxa3xx_nand_free_buff(struct pxa3xx_nand_info *info)
1478 {
1479         if (info->use_dma) {
1480                 dmaengine_terminate_all(info->dma_chan);
1481                 dma_release_channel(info->dma_chan);
1482         }
1483         kfree(info->data_buff);
1484 }
1485
1486 static int pxa3xx_nand_sensing(struct pxa3xx_nand_host *host)
1487 {
1488         struct pxa3xx_nand_info *info = host->info_data;
1489         struct mtd_info *mtd;
1490         struct nand_chip *chip;
1491         const struct nand_sdr_timings *timings;
1492         int ret;
1493
1494         mtd = info->host[info->cs]->mtd;
1495         chip = mtd->priv;
1496
1497         /* use the common timing to make a try */
1498         timings = onfi_async_timing_mode_to_sdr_timings(0);
1499         if (IS_ERR(timings))
1500                 return PTR_ERR(timings);
1501
1502         pxa3xx_nand_set_sdr_timing(host, timings);
1503
1504         chip->cmdfunc(mtd, NAND_CMD_RESET, 0, 0);
1505         ret = chip->waitfunc(mtd, chip);
1506         if (ret & NAND_STATUS_FAIL)
1507                 return -ENODEV;
1508
1509         return 0;
1510 }
1511
1512 static int pxa_ecc_init(struct pxa3xx_nand_info *info,
1513                         struct nand_ecc_ctrl *ecc,
1514                         int strength, int ecc_stepsize, int page_size)
1515 {
1516         if (strength == 1 && ecc_stepsize == 512 && page_size == 2048) {
1517                 info->chunk_size = 2048;
1518                 info->spare_size = 40;
1519                 info->ecc_size = 24;
1520                 ecc->mode = NAND_ECC_HW;
1521                 ecc->size = 512;
1522                 ecc->strength = 1;
1523
1524         } else if (strength == 1 && ecc_stepsize == 512 && page_size == 512) {
1525                 info->chunk_size = 512;
1526                 info->spare_size = 8;
1527                 info->ecc_size = 8;
1528                 ecc->mode = NAND_ECC_HW;
1529                 ecc->size = 512;
1530                 ecc->strength = 1;
1531
1532         /*
1533          * Required ECC: 4-bit correction per 512 bytes
1534          * Select: 16-bit correction per 2048 bytes
1535          */
1536         } else if (strength == 4 && ecc_stepsize == 512 && page_size == 2048) {
1537                 info->ecc_bch = 1;
1538                 info->chunk_size = 2048;
1539                 info->spare_size = 32;
1540                 info->ecc_size = 32;
1541                 ecc->mode = NAND_ECC_HW;
1542                 ecc->size = info->chunk_size;
1543                 ecc->layout = &ecc_layout_2KB_bch4bit;
1544                 ecc->strength = 16;
1545
1546         } else if (strength == 4 && ecc_stepsize == 512 && page_size == 4096) {
1547                 info->ecc_bch = 1;
1548                 info->chunk_size = 2048;
1549                 info->spare_size = 32;
1550                 info->ecc_size = 32;
1551                 ecc->mode = NAND_ECC_HW;
1552                 ecc->size = info->chunk_size;
1553                 ecc->layout = &ecc_layout_4KB_bch4bit;
1554                 ecc->strength = 16;
1555
1556         /*
1557          * Required ECC: 8-bit correction per 512 bytes
1558          * Select: 16-bit correction per 1024 bytes
1559          */
1560         } else if (strength == 8 && ecc_stepsize == 512 && page_size == 4096) {
1561                 info->ecc_bch = 1;
1562                 info->chunk_size = 1024;
1563                 info->spare_size = 0;
1564                 info->ecc_size = 32;
1565                 ecc->mode = NAND_ECC_HW;
1566                 ecc->size = info->chunk_size;
1567                 ecc->layout = &ecc_layout_4KB_bch8bit;
1568                 ecc->strength = 16;
1569         } else {
1570                 dev_err(&info->pdev->dev,
1571                         "ECC strength %d at page size %d is not supported\n",
1572                         strength, page_size);
1573                 return -ENODEV;
1574         }
1575
1576         dev_info(&info->pdev->dev, "ECC strength %d, ECC step size %d\n",
1577                  ecc->strength, ecc->size);
1578         return 0;
1579 }
1580
1581 static int pxa3xx_nand_scan(struct mtd_info *mtd)
1582 {
1583         struct pxa3xx_nand_host *host = mtd->priv;
1584         struct pxa3xx_nand_info *info = host->info_data;
1585         struct platform_device *pdev = info->pdev;
1586         struct pxa3xx_nand_platform_data *pdata = dev_get_platdata(&pdev->dev);
1587         struct nand_chip *chip = mtd->priv;
1588         int ret;
1589         uint16_t ecc_strength, ecc_step;
1590
1591         if (pdata->keep_config && !pxa3xx_nand_detect_config(info))
1592                 goto KEEP_CONFIG;
1593
1594         /* Set a default chunk size */
1595         info->chunk_size = 512;
1596
1597         ret = pxa3xx_nand_config_flash(info);
1598         if (ret)
1599                 return ret;
1600
1601         ret = pxa3xx_nand_sensing(host);
1602         if (ret) {
1603                 dev_info(&info->pdev->dev, "There is no chip on cs %d!\n",
1604                          info->cs);
1605
1606                 return ret;
1607         }
1608
1609 KEEP_CONFIG:
1610         info->reg_ndcr |= (pdata->enable_arbiter) ? NDCR_ND_ARB_EN : 0;
1611         if (info->reg_ndcr & NDCR_DWIDTH_M)
1612                 chip->options |= NAND_BUSWIDTH_16;
1613
1614         /* Device detection must be done with ECC disabled */
1615         if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370)
1616                 nand_writel(info, NDECCCTRL, 0x0);
1617
1618         if (nand_scan_ident(mtd, 1, NULL))
1619                 return -ENODEV;
1620
1621         if (!pdata->keep_config) {
1622                 ret = pxa3xx_nand_init(host);
1623                 if (ret) {
1624                         dev_err(&info->pdev->dev, "Failed to init nand: %d\n",
1625                                 ret);
1626                         return ret;
1627                 }
1628         }
1629
1630         if (pdata->flash_bbt) {
1631                 /*
1632                  * We'll use a bad block table stored in-flash and don't
1633                  * allow writing the bad block marker to the flash.
1634                  */
1635                 chip->bbt_options |= NAND_BBT_USE_FLASH |
1636                                      NAND_BBT_NO_OOB_BBM;
1637                 chip->bbt_td = &bbt_main_descr;
1638                 chip->bbt_md = &bbt_mirror_descr;
1639         }
1640
1641         /*
1642          * If the page size is bigger than the FIFO size, let's check
1643          * we are given the right variant and then switch to the extended
1644          * (aka splitted) command handling,
1645          */
1646         if (mtd->writesize > PAGE_CHUNK_SIZE) {
1647                 if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370) {
1648                         chip->cmdfunc = nand_cmdfunc_extended;
1649                 } else {
1650                         dev_err(&info->pdev->dev,
1651                                 "unsupported page size on this variant\n");
1652                         return -ENODEV;
1653                 }
1654         }
1655
1656         if (pdata->ecc_strength && pdata->ecc_step_size) {
1657                 ecc_strength = pdata->ecc_strength;
1658                 ecc_step = pdata->ecc_step_size;
1659         } else {
1660                 ecc_strength = chip->ecc_strength_ds;
1661                 ecc_step = chip->ecc_step_ds;
1662         }
1663
1664         /* Set default ECC strength requirements on non-ONFI devices */
1665         if (ecc_strength < 1 && ecc_step < 1) {
1666                 ecc_strength = 1;
1667                 ecc_step = 512;
1668         }
1669
1670         ret = pxa_ecc_init(info, &chip->ecc, ecc_strength,
1671                            ecc_step, mtd->writesize);
1672         if (ret)
1673                 return ret;
1674
1675         /* calculate addressing information */
1676         if (mtd->writesize >= 2048)
1677                 host->col_addr_cycles = 2;
1678         else
1679                 host->col_addr_cycles = 1;
1680
1681         /* release the initial buffer */
1682         kfree(info->data_buff);
1683
1684         /* allocate the real data + oob buffer */
1685         info->buf_size = mtd->writesize + mtd->oobsize;
1686         ret = pxa3xx_nand_init_buff(info);
1687         if (ret)
1688                 return ret;
1689         info->oob_buff = info->data_buff + mtd->writesize;
1690
1691         if ((mtd->size >> chip->page_shift) > 65536)
1692                 host->row_addr_cycles = 3;
1693         else
1694                 host->row_addr_cycles = 2;
1695         return nand_scan_tail(mtd);
1696 }
1697
1698 static int alloc_nand_resource(struct platform_device *pdev)
1699 {
1700         struct pxa3xx_nand_platform_data *pdata;
1701         struct pxa3xx_nand_info *info;
1702         struct pxa3xx_nand_host *host;
1703         struct nand_chip *chip = NULL;
1704         struct mtd_info *mtd;
1705         struct resource *r;
1706         int ret, irq, cs;
1707
1708         pdata = dev_get_platdata(&pdev->dev);
1709         if (pdata->num_cs <= 0)
1710                 return -ENODEV;
1711         info = devm_kzalloc(&pdev->dev, sizeof(*info) + (sizeof(*mtd) +
1712                             sizeof(*host)) * pdata->num_cs, GFP_KERNEL);
1713         if (!info)
1714                 return -ENOMEM;
1715
1716         info->pdev = pdev;
1717         info->variant = pxa3xx_nand_get_variant(pdev);
1718         for (cs = 0; cs < pdata->num_cs; cs++) {
1719                 mtd = (void *)&info[1] + (sizeof(*mtd) + sizeof(*host)) * cs;
1720                 chip = (struct nand_chip *)(&mtd[1]);
1721                 host = (struct pxa3xx_nand_host *)chip;
1722                 info->host[cs] = host;
1723                 host->mtd = mtd;
1724                 host->cs = cs;
1725                 host->info_data = info;
1726                 mtd->priv = host;
1727                 mtd->dev.parent = &pdev->dev;
1728
1729                 chip->ecc.read_page     = pxa3xx_nand_read_page_hwecc;
1730                 chip->ecc.write_page    = pxa3xx_nand_write_page_hwecc;
1731                 chip->controller        = &info->controller;
1732                 chip->waitfunc          = pxa3xx_nand_waitfunc;
1733                 chip->select_chip       = pxa3xx_nand_select_chip;
1734                 chip->read_word         = pxa3xx_nand_read_word;
1735                 chip->read_byte         = pxa3xx_nand_read_byte;
1736                 chip->read_buf          = pxa3xx_nand_read_buf;
1737                 chip->write_buf         = pxa3xx_nand_write_buf;
1738                 chip->options           |= NAND_NO_SUBPAGE_WRITE;
1739                 chip->cmdfunc           = nand_cmdfunc;
1740         }
1741
1742         spin_lock_init(&chip->controller->lock);
1743         init_waitqueue_head(&chip->controller->wq);
1744         info->clk = devm_clk_get(&pdev->dev, NULL);
1745         if (IS_ERR(info->clk)) {
1746                 dev_err(&pdev->dev, "failed to get nand clock\n");
1747                 return PTR_ERR(info->clk);
1748         }
1749         ret = clk_prepare_enable(info->clk);
1750         if (ret < 0)
1751                 return ret;
1752
1753         if (use_dma) {
1754                 r = platform_get_resource(pdev, IORESOURCE_DMA, 0);
1755                 if (r == NULL) {
1756                         dev_err(&pdev->dev,
1757                                 "no resource defined for data DMA\n");
1758                         ret = -ENXIO;
1759                         goto fail_disable_clk;
1760                 }
1761                 info->drcmr_dat = r->start;
1762
1763                 r = platform_get_resource(pdev, IORESOURCE_DMA, 1);
1764                 if (r == NULL) {
1765                         dev_err(&pdev->dev,
1766                                 "no resource defined for cmd DMA\n");
1767                         ret = -ENXIO;
1768                         goto fail_disable_clk;
1769                 }
1770                 info->drcmr_cmd = r->start;
1771         }
1772
1773         irq = platform_get_irq(pdev, 0);
1774         if (irq < 0) {
1775                 dev_err(&pdev->dev, "no IRQ resource defined\n");
1776                 ret = -ENXIO;
1777                 goto fail_disable_clk;
1778         }
1779
1780         r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1781         info->mmio_base = devm_ioremap_resource(&pdev->dev, r);
1782         if (IS_ERR(info->mmio_base)) {
1783                 ret = PTR_ERR(info->mmio_base);
1784                 goto fail_disable_clk;
1785         }
1786         info->mmio_phys = r->start;
1787
1788         /* Allocate a buffer to allow flash detection */
1789         info->buf_size = INIT_BUFFER_SIZE;
1790         info->data_buff = kmalloc(info->buf_size, GFP_KERNEL);
1791         if (info->data_buff == NULL) {
1792                 ret = -ENOMEM;
1793                 goto fail_disable_clk;
1794         }
1795
1796         /* initialize all interrupts to be disabled */
1797         disable_int(info, NDSR_MASK);
1798
1799         ret = request_threaded_irq(irq, pxa3xx_nand_irq,
1800                                    pxa3xx_nand_irq_thread, IRQF_ONESHOT,
1801                                    pdev->name, info);
1802         if (ret < 0) {
1803                 dev_err(&pdev->dev, "failed to request IRQ\n");
1804                 goto fail_free_buf;
1805         }
1806
1807         platform_set_drvdata(pdev, info);
1808
1809         return 0;
1810
1811 fail_free_buf:
1812         free_irq(irq, info);
1813         kfree(info->data_buff);
1814 fail_disable_clk:
1815         clk_disable_unprepare(info->clk);
1816         return ret;
1817 }
1818
1819 static int pxa3xx_nand_remove(struct platform_device *pdev)
1820 {
1821         struct pxa3xx_nand_info *info = platform_get_drvdata(pdev);
1822         struct pxa3xx_nand_platform_data *pdata;
1823         int irq, cs;
1824
1825         if (!info)
1826                 return 0;
1827
1828         pdata = dev_get_platdata(&pdev->dev);
1829
1830         irq = platform_get_irq(pdev, 0);
1831         if (irq >= 0)
1832                 free_irq(irq, info);
1833         pxa3xx_nand_free_buff(info);
1834
1835         /*
1836          * In the pxa3xx case, the DFI bus is shared between the SMC and NFC.
1837          * In order to prevent a lockup of the system bus, the DFI bus
1838          * arbitration is granted to SMC upon driver removal. This is done by
1839          * setting the x_ARB_CNTL bit, which also prevents the NAND to have
1840          * access to the bus anymore.
1841          */
1842         nand_writel(info, NDCR,
1843                     (nand_readl(info, NDCR) & ~NDCR_ND_ARB_EN) |
1844                     NFCV1_NDCR_ARB_CNTL);
1845         clk_disable_unprepare(info->clk);
1846
1847         for (cs = 0; cs < pdata->num_cs; cs++)
1848                 nand_release(info->host[cs]->mtd);
1849         return 0;
1850 }
1851
1852 static int pxa3xx_nand_probe_dt(struct platform_device *pdev)
1853 {
1854         struct pxa3xx_nand_platform_data *pdata;
1855         struct device_node *np = pdev->dev.of_node;
1856         const struct of_device_id *of_id =
1857                         of_match_device(pxa3xx_nand_dt_ids, &pdev->dev);
1858
1859         if (!of_id)
1860                 return 0;
1861
1862         pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
1863         if (!pdata)
1864                 return -ENOMEM;
1865
1866         if (of_get_property(np, "marvell,nand-enable-arbiter", NULL))
1867                 pdata->enable_arbiter = 1;
1868         if (of_get_property(np, "marvell,nand-keep-config", NULL))
1869                 pdata->keep_config = 1;
1870         of_property_read_u32(np, "num-cs", &pdata->num_cs);
1871         pdata->flash_bbt = of_get_nand_on_flash_bbt(np);
1872
1873         pdata->ecc_strength = of_get_nand_ecc_strength(np);
1874         if (pdata->ecc_strength < 0)
1875                 pdata->ecc_strength = 0;
1876
1877         pdata->ecc_step_size = of_get_nand_ecc_step_size(np);
1878         if (pdata->ecc_step_size < 0)
1879                 pdata->ecc_step_size = 0;
1880
1881         pdev->dev.platform_data = pdata;
1882
1883         return 0;
1884 }
1885
1886 static int pxa3xx_nand_probe(struct platform_device *pdev)
1887 {
1888         struct pxa3xx_nand_platform_data *pdata;
1889         struct mtd_part_parser_data ppdata = {};
1890         struct pxa3xx_nand_info *info;
1891         int ret, cs, probe_success, dma_available;
1892
1893         dma_available = IS_ENABLED(CONFIG_ARM) &&
1894                 (IS_ENABLED(CONFIG_ARCH_PXA) || IS_ENABLED(CONFIG_ARCH_MMP));
1895         if (use_dma && !dma_available) {
1896                 use_dma = 0;
1897                 dev_warn(&pdev->dev,
1898                          "This platform can't do DMA on this device\n");
1899         }
1900
1901         ret = pxa3xx_nand_probe_dt(pdev);
1902         if (ret)
1903                 return ret;
1904
1905         pdata = dev_get_platdata(&pdev->dev);
1906         if (!pdata) {
1907                 dev_err(&pdev->dev, "no platform data defined\n");
1908                 return -ENODEV;
1909         }
1910
1911         ret = alloc_nand_resource(pdev);
1912         if (ret) {
1913                 dev_err(&pdev->dev, "alloc nand resource failed\n");
1914                 return ret;
1915         }
1916
1917         info = platform_get_drvdata(pdev);
1918         probe_success = 0;
1919         for (cs = 0; cs < pdata->num_cs; cs++) {
1920                 struct mtd_info *mtd = info->host[cs]->mtd;
1921
1922                 /*
1923                  * The mtd name matches the one used in 'mtdparts' kernel
1924                  * parameter. This name cannot be changed or otherwise
1925                  * user's mtd partitions configuration would get broken.
1926                  */
1927                 mtd->name = "pxa3xx_nand-0";
1928                 info->cs = cs;
1929                 ret = pxa3xx_nand_scan(mtd);
1930                 if (ret) {
1931                         dev_warn(&pdev->dev, "failed to scan nand at cs %d\n",
1932                                 cs);
1933                         continue;
1934                 }
1935
1936                 ppdata.of_node = pdev->dev.of_node;
1937                 ret = mtd_device_parse_register(mtd, NULL,
1938                                                 &ppdata, pdata->parts[cs],
1939                                                 pdata->nr_parts[cs]);
1940                 if (!ret)
1941                         probe_success = 1;
1942         }
1943
1944         if (!probe_success) {
1945                 pxa3xx_nand_remove(pdev);
1946                 return -ENODEV;
1947         }
1948
1949         return 0;
1950 }
1951
1952 #ifdef CONFIG_PM
1953 static int pxa3xx_nand_suspend(struct device *dev)
1954 {
1955         struct pxa3xx_nand_info *info = dev_get_drvdata(dev);
1956
1957         if (info->state) {
1958                 dev_err(dev, "driver busy, state = %d\n", info->state);
1959                 return -EAGAIN;
1960         }
1961
1962         return 0;
1963 }
1964
1965 static int pxa3xx_nand_resume(struct device *dev)
1966 {
1967         struct pxa3xx_nand_info *info = dev_get_drvdata(dev);
1968
1969         /* We don't want to handle interrupt without calling mtd routine */
1970         disable_int(info, NDCR_INT_MASK);
1971
1972         /*
1973          * Directly set the chip select to a invalid value,
1974          * then the driver would reset the timing according
1975          * to current chip select at the beginning of cmdfunc
1976          */
1977         info->cs = 0xff;
1978
1979         /*
1980          * As the spec says, the NDSR would be updated to 0x1800 when
1981          * doing the nand_clk disable/enable.
1982          * To prevent it damaging state machine of the driver, clear
1983          * all status before resume
1984          */
1985         nand_writel(info, NDSR, NDSR_MASK);
1986
1987         return 0;
1988 }
1989 #else
1990 #define pxa3xx_nand_suspend     NULL
1991 #define pxa3xx_nand_resume      NULL
1992 #endif
1993
1994 static const struct dev_pm_ops pxa3xx_nand_pm_ops = {
1995         .suspend        = pxa3xx_nand_suspend,
1996         .resume         = pxa3xx_nand_resume,
1997 };
1998
1999 static struct platform_driver pxa3xx_nand_driver = {
2000         .driver = {
2001                 .name   = "pxa3xx-nand",
2002                 .of_match_table = pxa3xx_nand_dt_ids,
2003                 .pm     = &pxa3xx_nand_pm_ops,
2004         },
2005         .probe          = pxa3xx_nand_probe,
2006         .remove         = pxa3xx_nand_remove,
2007 };
2008
2009 module_platform_driver(pxa3xx_nand_driver);
2010
2011 MODULE_LICENSE("GPL");
2012 MODULE_DESCRIPTION("PXA3xx NAND controller driver");