Add the rt linux 4.1.3-rt3 as base
[kvmfornfv.git] / kernel / drivers / mtd / nand / lpc32xx_mlc.c
1 /*
2  * Driver for NAND MLC Controller in LPC32xx
3  *
4  * Author: Roland Stigge <stigge@antcom.de>
5  *
6  * Copyright © 2011 WORK Microwave GmbH
7  * Copyright © 2011, 2012 Roland Stigge
8  *
9  * This program is free software; you can redistribute it and/or modify
10  * it under the terms of the GNU General Public License as published by
11  * the Free Software Foundation; either version 2 of the License, or
12  * (at your option) any later version.
13  *
14  * This program is distributed in the hope that it will be useful,
15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
17  * GNU General Public License for more details.
18  *
19  *
20  * NAND Flash Controller Operation:
21  * - Read: Auto Decode
22  * - Write: Auto Encode
23  * - Tested Page Sizes: 2048, 4096
24  */
25
26 #include <linux/slab.h>
27 #include <linux/module.h>
28 #include <linux/platform_device.h>
29 #include <linux/mtd/mtd.h>
30 #include <linux/mtd/nand.h>
31 #include <linux/mtd/partitions.h>
32 #include <linux/clk.h>
33 #include <linux/err.h>
34 #include <linux/delay.h>
35 #include <linux/completion.h>
36 #include <linux/interrupt.h>
37 #include <linux/of.h>
38 #include <linux/of_mtd.h>
39 #include <linux/of_gpio.h>
40 #include <linux/mtd/lpc32xx_mlc.h>
41 #include <linux/io.h>
42 #include <linux/mm.h>
43 #include <linux/dma-mapping.h>
44 #include <linux/dmaengine.h>
45 #include <linux/mtd/nand_ecc.h>
46
47 #define DRV_NAME "lpc32xx_mlc"
48
49 /**********************************************************************
50 * MLC NAND controller register offsets
51 **********************************************************************/
52
53 #define MLC_BUFF(x)                     (x + 0x00000)
54 #define MLC_DATA(x)                     (x + 0x08000)
55 #define MLC_CMD(x)                      (x + 0x10000)
56 #define MLC_ADDR(x)                     (x + 0x10004)
57 #define MLC_ECC_ENC_REG(x)              (x + 0x10008)
58 #define MLC_ECC_DEC_REG(x)              (x + 0x1000C)
59 #define MLC_ECC_AUTO_ENC_REG(x)         (x + 0x10010)
60 #define MLC_ECC_AUTO_DEC_REG(x)         (x + 0x10014)
61 #define MLC_RPR(x)                      (x + 0x10018)
62 #define MLC_WPR(x)                      (x + 0x1001C)
63 #define MLC_RUBP(x)                     (x + 0x10020)
64 #define MLC_ROBP(x)                     (x + 0x10024)
65 #define MLC_SW_WP_ADD_LOW(x)            (x + 0x10028)
66 #define MLC_SW_WP_ADD_HIG(x)            (x + 0x1002C)
67 #define MLC_ICR(x)                      (x + 0x10030)
68 #define MLC_TIME_REG(x)                 (x + 0x10034)
69 #define MLC_IRQ_MR(x)                   (x + 0x10038)
70 #define MLC_IRQ_SR(x)                   (x + 0x1003C)
71 #define MLC_LOCK_PR(x)                  (x + 0x10044)
72 #define MLC_ISR(x)                      (x + 0x10048)
73 #define MLC_CEH(x)                      (x + 0x1004C)
74
75 /**********************************************************************
76 * MLC_CMD bit definitions
77 **********************************************************************/
78 #define MLCCMD_RESET                    0xFF
79
80 /**********************************************************************
81 * MLC_ICR bit definitions
82 **********************************************************************/
83 #define MLCICR_WPROT                    (1 << 3)
84 #define MLCICR_LARGEBLOCK               (1 << 2)
85 #define MLCICR_LONGADDR                 (1 << 1)
86 #define MLCICR_16BIT                    (1 << 0)  /* unsupported by LPC32x0! */
87
88 /**********************************************************************
89 * MLC_TIME_REG bit definitions
90 **********************************************************************/
91 #define MLCTIMEREG_TCEA_DELAY(n)        (((n) & 0x03) << 24)
92 #define MLCTIMEREG_BUSY_DELAY(n)        (((n) & 0x1F) << 19)
93 #define MLCTIMEREG_NAND_TA(n)           (((n) & 0x07) << 16)
94 #define MLCTIMEREG_RD_HIGH(n)           (((n) & 0x0F) << 12)
95 #define MLCTIMEREG_RD_LOW(n)            (((n) & 0x0F) << 8)
96 #define MLCTIMEREG_WR_HIGH(n)           (((n) & 0x0F) << 4)
97 #define MLCTIMEREG_WR_LOW(n)            (((n) & 0x0F) << 0)
98
99 /**********************************************************************
100 * MLC_IRQ_MR and MLC_IRQ_SR bit definitions
101 **********************************************************************/
102 #define MLCIRQ_NAND_READY               (1 << 5)
103 #define MLCIRQ_CONTROLLER_READY         (1 << 4)
104 #define MLCIRQ_DECODE_FAILURE           (1 << 3)
105 #define MLCIRQ_DECODE_ERROR             (1 << 2)
106 #define MLCIRQ_ECC_READY                (1 << 1)
107 #define MLCIRQ_WRPROT_FAULT             (1 << 0)
108
109 /**********************************************************************
110 * MLC_LOCK_PR bit definitions
111 **********************************************************************/
112 #define MLCLOCKPR_MAGIC                 0xA25E
113
114 /**********************************************************************
115 * MLC_ISR bit definitions
116 **********************************************************************/
117 #define MLCISR_DECODER_FAILURE          (1 << 6)
118 #define MLCISR_ERRORS                   ((1 << 4) | (1 << 5))
119 #define MLCISR_ERRORS_DETECTED          (1 << 3)
120 #define MLCISR_ECC_READY                (1 << 2)
121 #define MLCISR_CONTROLLER_READY         (1 << 1)
122 #define MLCISR_NAND_READY               (1 << 0)
123
124 /**********************************************************************
125 * MLC_CEH bit definitions
126 **********************************************************************/
127 #define MLCCEH_NORMAL                   (1 << 0)
128
129 struct lpc32xx_nand_cfg_mlc {
130         uint32_t tcea_delay;
131         uint32_t busy_delay;
132         uint32_t nand_ta;
133         uint32_t rd_high;
134         uint32_t rd_low;
135         uint32_t wr_high;
136         uint32_t wr_low;
137         int wp_gpio;
138         struct mtd_partition *parts;
139         unsigned num_parts;
140 };
141
142 static struct nand_ecclayout lpc32xx_nand_oob = {
143         .eccbytes = 40,
144         .eccpos = { 6,  7,  8,  9, 10, 11, 12, 13, 14, 15,
145                    22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
146                    38, 39, 40, 41, 42, 43, 44, 45, 46, 47,
147                    54, 55, 56, 57, 58, 59, 60, 61, 62, 63 },
148         .oobfree = {
149                 { .offset = 0,
150                   .length = 6, },
151                 { .offset = 16,
152                   .length = 6, },
153                 { .offset = 32,
154                   .length = 6, },
155                 { .offset = 48,
156                   .length = 6, },
157                 },
158 };
159
160 static struct nand_bbt_descr lpc32xx_nand_bbt = {
161         .options = NAND_BBT_ABSPAGE | NAND_BBT_2BIT | NAND_BBT_NO_OOB |
162                    NAND_BBT_WRITE,
163         .pages = { 524224, 0, 0, 0, 0, 0, 0, 0 },
164 };
165
166 static struct nand_bbt_descr lpc32xx_nand_bbt_mirror = {
167         .options = NAND_BBT_ABSPAGE | NAND_BBT_2BIT | NAND_BBT_NO_OOB |
168                    NAND_BBT_WRITE,
169         .pages = { 524160, 0, 0, 0, 0, 0, 0, 0 },
170 };
171
172 struct lpc32xx_nand_host {
173         struct nand_chip        nand_chip;
174         struct lpc32xx_mlc_platform_data *pdata;
175         struct clk              *clk;
176         struct mtd_info         mtd;
177         void __iomem            *io_base;
178         int                     irq;
179         struct lpc32xx_nand_cfg_mlc     *ncfg;
180         struct completion       comp_nand;
181         struct completion       comp_controller;
182         uint32_t llptr;
183         /*
184          * Physical addresses of ECC buffer, DMA data buffers, OOB data buffer
185          */
186         dma_addr_t              oob_buf_phy;
187         /*
188          * Virtual addresses of ECC buffer, DMA data buffers, OOB data buffer
189          */
190         uint8_t                 *oob_buf;
191         /* Physical address of DMA base address */
192         dma_addr_t              io_base_phy;
193
194         struct completion       comp_dma;
195         struct dma_chan         *dma_chan;
196         struct dma_slave_config dma_slave_config;
197         struct scatterlist      sgl;
198         uint8_t                 *dma_buf;
199         uint8_t                 *dummy_buf;
200         int                     mlcsubpages; /* number of 512bytes-subpages */
201 };
202
203 /*
204  * Activate/Deactivate DMA Operation:
205  *
206  * Using the PL080 DMA Controller for transferring the 512 byte subpages
207  * instead of doing readl() / writel() in a loop slows it down significantly.
208  * Measurements via getnstimeofday() upon 512 byte subpage reads reveal:
209  *
210  * - readl() of 128 x 32 bits in a loop: ~20us
211  * - DMA read of 512 bytes (32 bit, 4...128 words bursts): ~60us
212  * - DMA read of 512 bytes (32 bit, no bursts): ~100us
213  *
214  * This applies to the transfer itself. In the DMA case: only the
215  * wait_for_completion() (DMA setup _not_ included).
216  *
217  * Note that the 512 bytes subpage transfer is done directly from/to a
218  * FIFO/buffer inside the NAND controller. Most of the time (~400-800us for a
219  * 2048 bytes page) is spent waiting for the NAND IRQ, anyway. (The NAND
220  * controller transferring data between its internal buffer to/from the NAND
221  * chip.)
222  *
223  * Therefore, using the PL080 DMA is disabled by default, for now.
224  *
225  */
226 static int use_dma;
227
228 static void lpc32xx_nand_setup(struct lpc32xx_nand_host *host)
229 {
230         uint32_t clkrate, tmp;
231
232         /* Reset MLC controller */
233         writel(MLCCMD_RESET, MLC_CMD(host->io_base));
234         udelay(1000);
235
236         /* Get base clock for MLC block */
237         clkrate = clk_get_rate(host->clk);
238         if (clkrate == 0)
239                 clkrate = 104000000;
240
241         /* Unlock MLC_ICR
242          * (among others, will be locked again automatically) */
243         writew(MLCLOCKPR_MAGIC, MLC_LOCK_PR(host->io_base));
244
245         /* Configure MLC Controller: Large Block, 5 Byte Address */
246         tmp = MLCICR_LARGEBLOCK | MLCICR_LONGADDR;
247         writel(tmp, MLC_ICR(host->io_base));
248
249         /* Unlock MLC_TIME_REG
250          * (among others, will be locked again automatically) */
251         writew(MLCLOCKPR_MAGIC, MLC_LOCK_PR(host->io_base));
252
253         /* Compute clock setup values, see LPC and NAND manual */
254         tmp = 0;
255         tmp |= MLCTIMEREG_TCEA_DELAY(clkrate / host->ncfg->tcea_delay + 1);
256         tmp |= MLCTIMEREG_BUSY_DELAY(clkrate / host->ncfg->busy_delay + 1);
257         tmp |= MLCTIMEREG_NAND_TA(clkrate / host->ncfg->nand_ta + 1);
258         tmp |= MLCTIMEREG_RD_HIGH(clkrate / host->ncfg->rd_high + 1);
259         tmp |= MLCTIMEREG_RD_LOW(clkrate / host->ncfg->rd_low);
260         tmp |= MLCTIMEREG_WR_HIGH(clkrate / host->ncfg->wr_high + 1);
261         tmp |= MLCTIMEREG_WR_LOW(clkrate / host->ncfg->wr_low);
262         writel(tmp, MLC_TIME_REG(host->io_base));
263
264         /* Enable IRQ for CONTROLLER_READY and NAND_READY */
265         writeb(MLCIRQ_CONTROLLER_READY | MLCIRQ_NAND_READY,
266                         MLC_IRQ_MR(host->io_base));
267
268         /* Normal nCE operation: nCE controlled by controller */
269         writel(MLCCEH_NORMAL, MLC_CEH(host->io_base));
270 }
271
272 /*
273  * Hardware specific access to control lines
274  */
275 static void lpc32xx_nand_cmd_ctrl(struct mtd_info *mtd, int cmd,
276                                   unsigned int ctrl)
277 {
278         struct nand_chip *nand_chip = mtd->priv;
279         struct lpc32xx_nand_host *host = nand_chip->priv;
280
281         if (cmd != NAND_CMD_NONE) {
282                 if (ctrl & NAND_CLE)
283                         writel(cmd, MLC_CMD(host->io_base));
284                 else
285                         writel(cmd, MLC_ADDR(host->io_base));
286         }
287 }
288
289 /*
290  * Read Device Ready (NAND device _and_ controller ready)
291  */
292 static int lpc32xx_nand_device_ready(struct mtd_info *mtd)
293 {
294         struct nand_chip *nand_chip = mtd->priv;
295         struct lpc32xx_nand_host *host = nand_chip->priv;
296
297         if ((readb(MLC_ISR(host->io_base)) &
298              (MLCISR_CONTROLLER_READY | MLCISR_NAND_READY)) ==
299             (MLCISR_CONTROLLER_READY | MLCISR_NAND_READY))
300                 return  1;
301
302         return 0;
303 }
304
305 static irqreturn_t lpc3xxx_nand_irq(int irq, struct lpc32xx_nand_host *host)
306 {
307         uint8_t sr;
308
309         /* Clear interrupt flag by reading status */
310         sr = readb(MLC_IRQ_SR(host->io_base));
311         if (sr & MLCIRQ_NAND_READY)
312                 complete(&host->comp_nand);
313         if (sr & MLCIRQ_CONTROLLER_READY)
314                 complete(&host->comp_controller);
315
316         return IRQ_HANDLED;
317 }
318
319 static int lpc32xx_waitfunc_nand(struct mtd_info *mtd, struct nand_chip *chip)
320 {
321         struct lpc32xx_nand_host *host = chip->priv;
322
323         if (readb(MLC_ISR(host->io_base)) & MLCISR_NAND_READY)
324                 goto exit;
325
326         wait_for_completion(&host->comp_nand);
327
328         while (!(readb(MLC_ISR(host->io_base)) & MLCISR_NAND_READY)) {
329                 /* Seems to be delayed sometimes by controller */
330                 dev_dbg(&mtd->dev, "Warning: NAND not ready.\n");
331                 cpu_relax();
332         }
333
334 exit:
335         return NAND_STATUS_READY;
336 }
337
338 static int lpc32xx_waitfunc_controller(struct mtd_info *mtd,
339                                        struct nand_chip *chip)
340 {
341         struct lpc32xx_nand_host *host = chip->priv;
342
343         if (readb(MLC_ISR(host->io_base)) & MLCISR_CONTROLLER_READY)
344                 goto exit;
345
346         wait_for_completion(&host->comp_controller);
347
348         while (!(readb(MLC_ISR(host->io_base)) &
349                  MLCISR_CONTROLLER_READY)) {
350                 dev_dbg(&mtd->dev, "Warning: Controller not ready.\n");
351                 cpu_relax();
352         }
353
354 exit:
355         return NAND_STATUS_READY;
356 }
357
358 static int lpc32xx_waitfunc(struct mtd_info *mtd, struct nand_chip *chip)
359 {
360         lpc32xx_waitfunc_nand(mtd, chip);
361         lpc32xx_waitfunc_controller(mtd, chip);
362
363         return NAND_STATUS_READY;
364 }
365
366 /*
367  * Enable NAND write protect
368  */
369 static void lpc32xx_wp_enable(struct lpc32xx_nand_host *host)
370 {
371         if (gpio_is_valid(host->ncfg->wp_gpio))
372                 gpio_set_value(host->ncfg->wp_gpio, 0);
373 }
374
375 /*
376  * Disable NAND write protect
377  */
378 static void lpc32xx_wp_disable(struct lpc32xx_nand_host *host)
379 {
380         if (gpio_is_valid(host->ncfg->wp_gpio))
381                 gpio_set_value(host->ncfg->wp_gpio, 1);
382 }
383
384 static void lpc32xx_dma_complete_func(void *completion)
385 {
386         complete(completion);
387 }
388
389 static int lpc32xx_xmit_dma(struct mtd_info *mtd, void *mem, int len,
390                             enum dma_transfer_direction dir)
391 {
392         struct nand_chip *chip = mtd->priv;
393         struct lpc32xx_nand_host *host = chip->priv;
394         struct dma_async_tx_descriptor *desc;
395         int flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT;
396         int res;
397
398         sg_init_one(&host->sgl, mem, len);
399
400         res = dma_map_sg(host->dma_chan->device->dev, &host->sgl, 1,
401                          DMA_BIDIRECTIONAL);
402         if (res != 1) {
403                 dev_err(mtd->dev.parent, "Failed to map sg list\n");
404                 return -ENXIO;
405         }
406         desc = dmaengine_prep_slave_sg(host->dma_chan, &host->sgl, 1, dir,
407                                        flags);
408         if (!desc) {
409                 dev_err(mtd->dev.parent, "Failed to prepare slave sg\n");
410                 goto out1;
411         }
412
413         init_completion(&host->comp_dma);
414         desc->callback = lpc32xx_dma_complete_func;
415         desc->callback_param = &host->comp_dma;
416
417         dmaengine_submit(desc);
418         dma_async_issue_pending(host->dma_chan);
419
420         wait_for_completion_timeout(&host->comp_dma, msecs_to_jiffies(1000));
421
422         dma_unmap_sg(host->dma_chan->device->dev, &host->sgl, 1,
423                      DMA_BIDIRECTIONAL);
424         return 0;
425 out1:
426         dma_unmap_sg(host->dma_chan->device->dev, &host->sgl, 1,
427                      DMA_BIDIRECTIONAL);
428         return -ENXIO;
429 }
430
431 static int lpc32xx_read_page(struct mtd_info *mtd, struct nand_chip *chip,
432                              uint8_t *buf, int oob_required, int page)
433 {
434         struct lpc32xx_nand_host *host = chip->priv;
435         int i, j;
436         uint8_t *oobbuf = chip->oob_poi;
437         uint32_t mlc_isr;
438         int res;
439         uint8_t *dma_buf;
440         bool dma_mapped;
441
442         if ((void *)buf <= high_memory) {
443                 dma_buf = buf;
444                 dma_mapped = true;
445         } else {
446                 dma_buf = host->dma_buf;
447                 dma_mapped = false;
448         }
449
450         /* Writing Command and Address */
451         chip->cmdfunc(mtd, NAND_CMD_READ0, 0, page);
452
453         /* For all sub-pages */
454         for (i = 0; i < host->mlcsubpages; i++) {
455                 /* Start Auto Decode Command */
456                 writeb(0x00, MLC_ECC_AUTO_DEC_REG(host->io_base));
457
458                 /* Wait for Controller Ready */
459                 lpc32xx_waitfunc_controller(mtd, chip);
460
461                 /* Check ECC Error status */
462                 mlc_isr = readl(MLC_ISR(host->io_base));
463                 if (mlc_isr & MLCISR_DECODER_FAILURE) {
464                         mtd->ecc_stats.failed++;
465                         dev_warn(&mtd->dev, "%s: DECODER_FAILURE\n", __func__);
466                 } else if (mlc_isr & MLCISR_ERRORS_DETECTED) {
467                         mtd->ecc_stats.corrected += ((mlc_isr >> 4) & 0x3) + 1;
468                 }
469
470                 /* Read 512 + 16 Bytes */
471                 if (use_dma) {
472                         res = lpc32xx_xmit_dma(mtd, dma_buf + i * 512, 512,
473                                                DMA_DEV_TO_MEM);
474                         if (res)
475                                 return res;
476                 } else {
477                         for (j = 0; j < (512 >> 2); j++) {
478                                 *((uint32_t *)(buf)) =
479                                         readl(MLC_BUFF(host->io_base));
480                                 buf += 4;
481                         }
482                 }
483                 for (j = 0; j < (16 >> 2); j++) {
484                         *((uint32_t *)(oobbuf)) =
485                                 readl(MLC_BUFF(host->io_base));
486                         oobbuf += 4;
487                 }
488         }
489
490         if (use_dma && !dma_mapped)
491                 memcpy(buf, dma_buf, mtd->writesize);
492
493         return 0;
494 }
495
496 static int lpc32xx_write_page_lowlevel(struct mtd_info *mtd,
497                                        struct nand_chip *chip,
498                                        const uint8_t *buf, int oob_required)
499 {
500         struct lpc32xx_nand_host *host = chip->priv;
501         const uint8_t *oobbuf = chip->oob_poi;
502         uint8_t *dma_buf = (uint8_t *)buf;
503         int res;
504         int i, j;
505
506         if (use_dma && (void *)buf >= high_memory) {
507                 dma_buf = host->dma_buf;
508                 memcpy(dma_buf, buf, mtd->writesize);
509         }
510
511         for (i = 0; i < host->mlcsubpages; i++) {
512                 /* Start Encode */
513                 writeb(0x00, MLC_ECC_ENC_REG(host->io_base));
514
515                 /* Write 512 + 6 Bytes to Buffer */
516                 if (use_dma) {
517                         res = lpc32xx_xmit_dma(mtd, dma_buf + i * 512, 512,
518                                                DMA_MEM_TO_DEV);
519                         if (res)
520                                 return res;
521                 } else {
522                         for (j = 0; j < (512 >> 2); j++) {
523                                 writel(*((uint32_t *)(buf)),
524                                        MLC_BUFF(host->io_base));
525                                 buf += 4;
526                         }
527                 }
528                 writel(*((uint32_t *)(oobbuf)), MLC_BUFF(host->io_base));
529                 oobbuf += 4;
530                 writew(*((uint16_t *)(oobbuf)), MLC_BUFF(host->io_base));
531                 oobbuf += 12;
532
533                 /* Auto Encode w/ Bit 8 = 0 (see LPC MLC Controller manual) */
534                 writeb(0x00, MLC_ECC_AUTO_ENC_REG(host->io_base));
535
536                 /* Wait for Controller Ready */
537                 lpc32xx_waitfunc_controller(mtd, chip);
538         }
539         return 0;
540 }
541
542 static int lpc32xx_read_oob(struct mtd_info *mtd, struct nand_chip *chip,
543                             int page)
544 {
545         struct lpc32xx_nand_host *host = chip->priv;
546
547         /* Read whole page - necessary with MLC controller! */
548         lpc32xx_read_page(mtd, chip, host->dummy_buf, 1, page);
549
550         return 0;
551 }
552
553 static int lpc32xx_write_oob(struct mtd_info *mtd, struct nand_chip *chip,
554                               int page)
555 {
556         /* None, write_oob conflicts with the automatic LPC MLC ECC decoder! */
557         return 0;
558 }
559
560 /* Prepares MLC for transfers with H/W ECC enabled: always enabled anyway */
561 static void lpc32xx_ecc_enable(struct mtd_info *mtd, int mode)
562 {
563         /* Always enabled! */
564 }
565
566 static int lpc32xx_dma_setup(struct lpc32xx_nand_host *host)
567 {
568         struct mtd_info *mtd = &host->mtd;
569         dma_cap_mask_t mask;
570
571         if (!host->pdata || !host->pdata->dma_filter) {
572                 dev_err(mtd->dev.parent, "no DMA platform data\n");
573                 return -ENOENT;
574         }
575
576         dma_cap_zero(mask);
577         dma_cap_set(DMA_SLAVE, mask);
578         host->dma_chan = dma_request_channel(mask, host->pdata->dma_filter,
579                                              "nand-mlc");
580         if (!host->dma_chan) {
581                 dev_err(mtd->dev.parent, "Failed to request DMA channel\n");
582                 return -EBUSY;
583         }
584
585         /*
586          * Set direction to a sensible value even if the dmaengine driver
587          * should ignore it. With the default (DMA_MEM_TO_MEM), the amba-pl08x
588          * driver criticizes it as "alien transfer direction".
589          */
590         host->dma_slave_config.direction = DMA_DEV_TO_MEM;
591         host->dma_slave_config.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
592         host->dma_slave_config.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
593         host->dma_slave_config.src_maxburst = 128;
594         host->dma_slave_config.dst_maxburst = 128;
595         /* DMA controller does flow control: */
596         host->dma_slave_config.device_fc = false;
597         host->dma_slave_config.src_addr = MLC_BUFF(host->io_base_phy);
598         host->dma_slave_config.dst_addr = MLC_BUFF(host->io_base_phy);
599         if (dmaengine_slave_config(host->dma_chan, &host->dma_slave_config)) {
600                 dev_err(mtd->dev.parent, "Failed to setup DMA slave\n");
601                 goto out1;
602         }
603
604         return 0;
605 out1:
606         dma_release_channel(host->dma_chan);
607         return -ENXIO;
608 }
609
610 static struct lpc32xx_nand_cfg_mlc *lpc32xx_parse_dt(struct device *dev)
611 {
612         struct lpc32xx_nand_cfg_mlc *ncfg;
613         struct device_node *np = dev->of_node;
614
615         ncfg = devm_kzalloc(dev, sizeof(*ncfg), GFP_KERNEL);
616         if (!ncfg)
617                 return NULL;
618
619         of_property_read_u32(np, "nxp,tcea-delay", &ncfg->tcea_delay);
620         of_property_read_u32(np, "nxp,busy-delay", &ncfg->busy_delay);
621         of_property_read_u32(np, "nxp,nand-ta", &ncfg->nand_ta);
622         of_property_read_u32(np, "nxp,rd-high", &ncfg->rd_high);
623         of_property_read_u32(np, "nxp,rd-low", &ncfg->rd_low);
624         of_property_read_u32(np, "nxp,wr-high", &ncfg->wr_high);
625         of_property_read_u32(np, "nxp,wr-low", &ncfg->wr_low);
626
627         if (!ncfg->tcea_delay || !ncfg->busy_delay || !ncfg->nand_ta ||
628             !ncfg->rd_high || !ncfg->rd_low || !ncfg->wr_high ||
629             !ncfg->wr_low) {
630                 dev_err(dev, "chip parameters not specified correctly\n");
631                 return NULL;
632         }
633
634         ncfg->wp_gpio = of_get_named_gpio(np, "gpios", 0);
635
636         return ncfg;
637 }
638
639 /*
640  * Probe for NAND controller
641  */
642 static int lpc32xx_nand_probe(struct platform_device *pdev)
643 {
644         struct lpc32xx_nand_host *host;
645         struct mtd_info *mtd;
646         struct nand_chip *nand_chip;
647         struct resource *rc;
648         int res;
649         struct mtd_part_parser_data ppdata = {};
650
651         /* Allocate memory for the device structure (and zero it) */
652         host = devm_kzalloc(&pdev->dev, sizeof(*host), GFP_KERNEL);
653         if (!host)
654                 return -ENOMEM;
655
656         rc = platform_get_resource(pdev, IORESOURCE_MEM, 0);
657         host->io_base = devm_ioremap_resource(&pdev->dev, rc);
658         if (IS_ERR(host->io_base))
659                 return PTR_ERR(host->io_base);
660         
661         host->io_base_phy = rc->start;
662
663         mtd = &host->mtd;
664         nand_chip = &host->nand_chip;
665         if (pdev->dev.of_node)
666                 host->ncfg = lpc32xx_parse_dt(&pdev->dev);
667         if (!host->ncfg) {
668                 dev_err(&pdev->dev,
669                         "Missing or bad NAND config from device tree\n");
670                 return -ENOENT;
671         }
672         if (host->ncfg->wp_gpio == -EPROBE_DEFER)
673                 return -EPROBE_DEFER;
674         if (gpio_is_valid(host->ncfg->wp_gpio) &&
675                         gpio_request(host->ncfg->wp_gpio, "NAND WP")) {
676                 dev_err(&pdev->dev, "GPIO not available\n");
677                 return -EBUSY;
678         }
679         lpc32xx_wp_disable(host);
680
681         host->pdata = dev_get_platdata(&pdev->dev);
682
683         nand_chip->priv = host;         /* link the private data structures */
684         mtd->priv = nand_chip;
685         mtd->owner = THIS_MODULE;
686         mtd->dev.parent = &pdev->dev;
687
688         /* Get NAND clock */
689         host->clk = clk_get(&pdev->dev, NULL);
690         if (IS_ERR(host->clk)) {
691                 dev_err(&pdev->dev, "Clock initialization failure\n");
692                 res = -ENOENT;
693                 goto err_exit1;
694         }
695         clk_enable(host->clk);
696
697         nand_chip->cmd_ctrl = lpc32xx_nand_cmd_ctrl;
698         nand_chip->dev_ready = lpc32xx_nand_device_ready;
699         nand_chip->chip_delay = 25; /* us */
700         nand_chip->IO_ADDR_R = MLC_DATA(host->io_base);
701         nand_chip->IO_ADDR_W = MLC_DATA(host->io_base);
702
703         /* Init NAND controller */
704         lpc32xx_nand_setup(host);
705
706         platform_set_drvdata(pdev, host);
707
708         /* Initialize function pointers */
709         nand_chip->ecc.hwctl = lpc32xx_ecc_enable;
710         nand_chip->ecc.read_page_raw = lpc32xx_read_page;
711         nand_chip->ecc.read_page = lpc32xx_read_page;
712         nand_chip->ecc.write_page_raw = lpc32xx_write_page_lowlevel;
713         nand_chip->ecc.write_page = lpc32xx_write_page_lowlevel;
714         nand_chip->ecc.write_oob = lpc32xx_write_oob;
715         nand_chip->ecc.read_oob = lpc32xx_read_oob;
716         nand_chip->ecc.strength = 4;
717         nand_chip->waitfunc = lpc32xx_waitfunc;
718
719         nand_chip->options = NAND_NO_SUBPAGE_WRITE;
720         nand_chip->bbt_options = NAND_BBT_USE_FLASH | NAND_BBT_NO_OOB;
721         nand_chip->bbt_td = &lpc32xx_nand_bbt;
722         nand_chip->bbt_md = &lpc32xx_nand_bbt_mirror;
723
724         if (use_dma) {
725                 res = lpc32xx_dma_setup(host);
726                 if (res) {
727                         res = -EIO;
728                         goto err_exit2;
729                 }
730         }
731
732         /*
733          * Scan to find existance of the device and
734          * Get the type of NAND device SMALL block or LARGE block
735          */
736         if (nand_scan_ident(mtd, 1, NULL)) {
737                 res = -ENXIO;
738                 goto err_exit3;
739         }
740
741         host->dma_buf = devm_kzalloc(&pdev->dev, mtd->writesize, GFP_KERNEL);
742         if (!host->dma_buf) {
743                 res = -ENOMEM;
744                 goto err_exit3;
745         }
746
747         host->dummy_buf = devm_kzalloc(&pdev->dev, mtd->writesize, GFP_KERNEL);
748         if (!host->dummy_buf) {
749                 res = -ENOMEM;
750                 goto err_exit3;
751         }
752
753         nand_chip->ecc.mode = NAND_ECC_HW;
754         nand_chip->ecc.size = mtd->writesize;
755         nand_chip->ecc.layout = &lpc32xx_nand_oob;
756         host->mlcsubpages = mtd->writesize / 512;
757
758         /* initially clear interrupt status */
759         readb(MLC_IRQ_SR(host->io_base));
760
761         init_completion(&host->comp_nand);
762         init_completion(&host->comp_controller);
763
764         host->irq = platform_get_irq(pdev, 0);
765         if ((host->irq < 0) || (host->irq >= NR_IRQS)) {
766                 dev_err(&pdev->dev, "failed to get platform irq\n");
767                 res = -EINVAL;
768                 goto err_exit3;
769         }
770
771         if (request_irq(host->irq, (irq_handler_t)&lpc3xxx_nand_irq,
772                         IRQF_TRIGGER_HIGH, DRV_NAME, host)) {
773                 dev_err(&pdev->dev, "Error requesting NAND IRQ\n");
774                 res = -ENXIO;
775                 goto err_exit3;
776         }
777
778         /*
779          * Fills out all the uninitialized function pointers with the defaults
780          * And scans for a bad block table if appropriate.
781          */
782         if (nand_scan_tail(mtd)) {
783                 res = -ENXIO;
784                 goto err_exit4;
785         }
786
787         mtd->name = DRV_NAME;
788
789         ppdata.of_node = pdev->dev.of_node;
790         res = mtd_device_parse_register(mtd, NULL, &ppdata, host->ncfg->parts,
791                                         host->ncfg->num_parts);
792         if (!res)
793                 return res;
794
795         nand_release(mtd);
796
797 err_exit4:
798         free_irq(host->irq, host);
799 err_exit3:
800         if (use_dma)
801                 dma_release_channel(host->dma_chan);
802 err_exit2:
803         clk_disable(host->clk);
804         clk_put(host->clk);
805 err_exit1:
806         lpc32xx_wp_enable(host);
807         gpio_free(host->ncfg->wp_gpio);
808
809         return res;
810 }
811
812 /*
813  * Remove NAND device
814  */
815 static int lpc32xx_nand_remove(struct platform_device *pdev)
816 {
817         struct lpc32xx_nand_host *host = platform_get_drvdata(pdev);
818         struct mtd_info *mtd = &host->mtd;
819
820         nand_release(mtd);
821         free_irq(host->irq, host);
822         if (use_dma)
823                 dma_release_channel(host->dma_chan);
824
825         clk_disable(host->clk);
826         clk_put(host->clk);
827
828         lpc32xx_wp_enable(host);
829         gpio_free(host->ncfg->wp_gpio);
830
831         return 0;
832 }
833
834 #ifdef CONFIG_PM
835 static int lpc32xx_nand_resume(struct platform_device *pdev)
836 {
837         struct lpc32xx_nand_host *host = platform_get_drvdata(pdev);
838
839         /* Re-enable NAND clock */
840         clk_enable(host->clk);
841
842         /* Fresh init of NAND controller */
843         lpc32xx_nand_setup(host);
844
845         /* Disable write protect */
846         lpc32xx_wp_disable(host);
847
848         return 0;
849 }
850
851 static int lpc32xx_nand_suspend(struct platform_device *pdev, pm_message_t pm)
852 {
853         struct lpc32xx_nand_host *host = platform_get_drvdata(pdev);
854
855         /* Enable write protect for safety */
856         lpc32xx_wp_enable(host);
857
858         /* Disable clock */
859         clk_disable(host->clk);
860         return 0;
861 }
862
863 #else
864 #define lpc32xx_nand_resume NULL
865 #define lpc32xx_nand_suspend NULL
866 #endif
867
868 static const struct of_device_id lpc32xx_nand_match[] = {
869         { .compatible = "nxp,lpc3220-mlc" },
870         { /* sentinel */ },
871 };
872 MODULE_DEVICE_TABLE(of, lpc32xx_nand_match);
873
874 static struct platform_driver lpc32xx_nand_driver = {
875         .probe          = lpc32xx_nand_probe,
876         .remove         = lpc32xx_nand_remove,
877         .resume         = lpc32xx_nand_resume,
878         .suspend        = lpc32xx_nand_suspend,
879         .driver         = {
880                 .name   = DRV_NAME,
881                 .of_match_table = lpc32xx_nand_match,
882         },
883 };
884
885 module_platform_driver(lpc32xx_nand_driver);
886
887 MODULE_LICENSE("GPL");
888 MODULE_AUTHOR("Roland Stigge <stigge@antcom.de>");
889 MODULE_DESCRIPTION("NAND driver for the NXP LPC32XX MLC controller");