These changes are the raw update to linux-4.4.6-rt14. Kernel sources
[kvmfornfv.git] / kernel / drivers / mtd / nand / sh_flctl.c
1 /*
2  * SuperH FLCTL nand controller
3  *
4  * Copyright (c) 2008 Renesas Solutions Corp.
5  * Copyright (c) 2008 Atom Create Engineering Co., Ltd.
6  *
7  * Based on fsl_elbc_nand.c, Copyright (c) 2006-2007 Freescale Semiconductor
8  *
9  * This program is free software; you can redistribute it and/or modify
10  * it under the terms of the GNU General Public License as published by
11  * the Free Software Foundation; version 2 of the License.
12  *
13  * This program is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16  * GNU General Public License for more details.
17  *
18  * You should have received a copy of the GNU General Public License
19  * along with this program; if not, write to the Free Software
20  * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
21  *
22  */
23
24 #include <linux/module.h>
25 #include <linux/kernel.h>
26 #include <linux/completion.h>
27 #include <linux/delay.h>
28 #include <linux/dmaengine.h>
29 #include <linux/dma-mapping.h>
30 #include <linux/interrupt.h>
31 #include <linux/io.h>
32 #include <linux/of.h>
33 #include <linux/of_device.h>
34 #include <linux/of_mtd.h>
35 #include <linux/platform_device.h>
36 #include <linux/pm_runtime.h>
37 #include <linux/sh_dma.h>
38 #include <linux/slab.h>
39 #include <linux/string.h>
40
41 #include <linux/mtd/mtd.h>
42 #include <linux/mtd/nand.h>
43 #include <linux/mtd/partitions.h>
44 #include <linux/mtd/sh_flctl.h>
45
46 static struct nand_ecclayout flctl_4secc_oob_16 = {
47         .eccbytes = 10,
48         .eccpos = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9},
49         .oobfree = {
50                 {.offset = 12,
51                 . length = 4} },
52 };
53
54 static struct nand_ecclayout flctl_4secc_oob_64 = {
55         .eccbytes = 4 * 10,
56         .eccpos = {
57                  6,  7,  8,  9, 10, 11, 12, 13, 14, 15,
58                 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
59                 38, 39, 40, 41, 42, 43, 44, 45, 46, 47,
60                 54, 55, 56, 57, 58, 59, 60, 61, 62, 63 },
61         .oobfree = {
62                 {.offset =  2, .length = 4},
63                 {.offset = 16, .length = 6},
64                 {.offset = 32, .length = 6},
65                 {.offset = 48, .length = 6} },
66 };
67
68 static uint8_t scan_ff_pattern[] = { 0xff, 0xff };
69
70 static struct nand_bbt_descr flctl_4secc_smallpage = {
71         .options = NAND_BBT_SCAN2NDPAGE,
72         .offs = 11,
73         .len = 1,
74         .pattern = scan_ff_pattern,
75 };
76
77 static struct nand_bbt_descr flctl_4secc_largepage = {
78         .options = NAND_BBT_SCAN2NDPAGE,
79         .offs = 0,
80         .len = 2,
81         .pattern = scan_ff_pattern,
82 };
83
84 static void empty_fifo(struct sh_flctl *flctl)
85 {
86         writel(flctl->flintdmacr_base | AC1CLR | AC0CLR, FLINTDMACR(flctl));
87         writel(flctl->flintdmacr_base, FLINTDMACR(flctl));
88 }
89
90 static void start_translation(struct sh_flctl *flctl)
91 {
92         writeb(TRSTRT, FLTRCR(flctl));
93 }
94
95 static void timeout_error(struct sh_flctl *flctl, const char *str)
96 {
97         dev_err(&flctl->pdev->dev, "Timeout occurred in %s\n", str);
98 }
99
100 static void wait_completion(struct sh_flctl *flctl)
101 {
102         uint32_t timeout = LOOP_TIMEOUT_MAX;
103
104         while (timeout--) {
105                 if (readb(FLTRCR(flctl)) & TREND) {
106                         writeb(0x0, FLTRCR(flctl));
107                         return;
108                 }
109                 udelay(1);
110         }
111
112         timeout_error(flctl, __func__);
113         writeb(0x0, FLTRCR(flctl));
114 }
115
116 static void flctl_dma_complete(void *param)
117 {
118         struct sh_flctl *flctl = param;
119
120         complete(&flctl->dma_complete);
121 }
122
123 static void flctl_release_dma(struct sh_flctl *flctl)
124 {
125         if (flctl->chan_fifo0_rx) {
126                 dma_release_channel(flctl->chan_fifo0_rx);
127                 flctl->chan_fifo0_rx = NULL;
128         }
129         if (flctl->chan_fifo0_tx) {
130                 dma_release_channel(flctl->chan_fifo0_tx);
131                 flctl->chan_fifo0_tx = NULL;
132         }
133 }
134
135 static void flctl_setup_dma(struct sh_flctl *flctl)
136 {
137         dma_cap_mask_t mask;
138         struct dma_slave_config cfg;
139         struct platform_device *pdev = flctl->pdev;
140         struct sh_flctl_platform_data *pdata = dev_get_platdata(&pdev->dev);
141         int ret;
142
143         if (!pdata)
144                 return;
145
146         if (pdata->slave_id_fifo0_tx <= 0 || pdata->slave_id_fifo0_rx <= 0)
147                 return;
148
149         /* We can only either use DMA for both Tx and Rx or not use it at all */
150         dma_cap_zero(mask);
151         dma_cap_set(DMA_SLAVE, mask);
152
153         flctl->chan_fifo0_tx = dma_request_channel(mask, shdma_chan_filter,
154                                 (void *)(uintptr_t)pdata->slave_id_fifo0_tx);
155         dev_dbg(&pdev->dev, "%s: TX: got channel %p\n", __func__,
156                 flctl->chan_fifo0_tx);
157
158         if (!flctl->chan_fifo0_tx)
159                 return;
160
161         memset(&cfg, 0, sizeof(cfg));
162         cfg.direction = DMA_MEM_TO_DEV;
163         cfg.dst_addr = (dma_addr_t)FLDTFIFO(flctl);
164         cfg.src_addr = 0;
165         ret = dmaengine_slave_config(flctl->chan_fifo0_tx, &cfg);
166         if (ret < 0)
167                 goto err;
168
169         flctl->chan_fifo0_rx = dma_request_channel(mask, shdma_chan_filter,
170                                 (void *)(uintptr_t)pdata->slave_id_fifo0_rx);
171         dev_dbg(&pdev->dev, "%s: RX: got channel %p\n", __func__,
172                 flctl->chan_fifo0_rx);
173
174         if (!flctl->chan_fifo0_rx)
175                 goto err;
176
177         cfg.direction = DMA_DEV_TO_MEM;
178         cfg.dst_addr = 0;
179         cfg.src_addr = (dma_addr_t)FLDTFIFO(flctl);
180         ret = dmaengine_slave_config(flctl->chan_fifo0_rx, &cfg);
181         if (ret < 0)
182                 goto err;
183
184         init_completion(&flctl->dma_complete);
185
186         return;
187
188 err:
189         flctl_release_dma(flctl);
190 }
191
192 static void set_addr(struct mtd_info *mtd, int column, int page_addr)
193 {
194         struct sh_flctl *flctl = mtd_to_flctl(mtd);
195         uint32_t addr = 0;
196
197         if (column == -1) {
198                 addr = page_addr;       /* ERASE1 */
199         } else if (page_addr != -1) {
200                 /* SEQIN, READ0, etc.. */
201                 if (flctl->chip.options & NAND_BUSWIDTH_16)
202                         column >>= 1;
203                 if (flctl->page_size) {
204                         addr = column & 0x0FFF;
205                         addr |= (page_addr & 0xff) << 16;
206                         addr |= ((page_addr >> 8) & 0xff) << 24;
207                         /* big than 128MB */
208                         if (flctl->rw_ADRCNT == ADRCNT2_E) {
209                                 uint32_t        addr2;
210                                 addr2 = (page_addr >> 16) & 0xff;
211                                 writel(addr2, FLADR2(flctl));
212                         }
213                 } else {
214                         addr = column;
215                         addr |= (page_addr & 0xff) << 8;
216                         addr |= ((page_addr >> 8) & 0xff) << 16;
217                         addr |= ((page_addr >> 16) & 0xff) << 24;
218                 }
219         }
220         writel(addr, FLADR(flctl));
221 }
222
223 static void wait_rfifo_ready(struct sh_flctl *flctl)
224 {
225         uint32_t timeout = LOOP_TIMEOUT_MAX;
226
227         while (timeout--) {
228                 uint32_t val;
229                 /* check FIFO */
230                 val = readl(FLDTCNTR(flctl)) >> 16;
231                 if (val & 0xFF)
232                         return;
233                 udelay(1);
234         }
235         timeout_error(flctl, __func__);
236 }
237
238 static void wait_wfifo_ready(struct sh_flctl *flctl)
239 {
240         uint32_t len, timeout = LOOP_TIMEOUT_MAX;
241
242         while (timeout--) {
243                 /* check FIFO */
244                 len = (readl(FLDTCNTR(flctl)) >> 16) & 0xFF;
245                 if (len >= 4)
246                         return;
247                 udelay(1);
248         }
249         timeout_error(flctl, __func__);
250 }
251
252 static enum flctl_ecc_res_t wait_recfifo_ready
253                 (struct sh_flctl *flctl, int sector_number)
254 {
255         uint32_t timeout = LOOP_TIMEOUT_MAX;
256         void __iomem *ecc_reg[4];
257         int i;
258         int state = FL_SUCCESS;
259         uint32_t data, size;
260
261         /*
262          * First this loops checks in FLDTCNTR if we are ready to read out the
263          * oob data. This is the case if either all went fine without errors or
264          * if the bottom part of the loop corrected the errors or marked them as
265          * uncorrectable and the controller is given time to push the data into
266          * the FIFO.
267          */
268         while (timeout--) {
269                 /* check if all is ok and we can read out the OOB */
270                 size = readl(FLDTCNTR(flctl)) >> 24;
271                 if ((size & 0xFF) == 4)
272                         return state;
273
274                 /* check if a correction code has been calculated */
275                 if (!(readl(FL4ECCCR(flctl)) & _4ECCEND)) {
276                         /*
277                          * either we wait for the fifo to be filled or a
278                          * correction pattern is being generated
279                          */
280                         udelay(1);
281                         continue;
282                 }
283
284                 /* check for an uncorrectable error */
285                 if (readl(FL4ECCCR(flctl)) & _4ECCFA) {
286                         /* check if we face a non-empty page */
287                         for (i = 0; i < 512; i++) {
288                                 if (flctl->done_buff[i] != 0xff) {
289                                         state = FL_ERROR; /* can't correct */
290                                         break;
291                                 }
292                         }
293
294                         if (state == FL_SUCCESS)
295                                 dev_dbg(&flctl->pdev->dev,
296                                 "reading empty sector %d, ecc error ignored\n",
297                                 sector_number);
298
299                         writel(0, FL4ECCCR(flctl));
300                         continue;
301                 }
302
303                 /* start error correction */
304                 ecc_reg[0] = FL4ECCRESULT0(flctl);
305                 ecc_reg[1] = FL4ECCRESULT1(flctl);
306                 ecc_reg[2] = FL4ECCRESULT2(flctl);
307                 ecc_reg[3] = FL4ECCRESULT3(flctl);
308
309                 for (i = 0; i < 3; i++) {
310                         uint8_t org;
311                         unsigned int index;
312
313                         data = readl(ecc_reg[i]);
314
315                         if (flctl->page_size)
316                                 index = (512 * sector_number) +
317                                         (data >> 16);
318                         else
319                                 index = data >> 16;
320
321                         org = flctl->done_buff[index];
322                         flctl->done_buff[index] = org ^ (data & 0xFF);
323                 }
324                 state = FL_REPAIRABLE;
325                 writel(0, FL4ECCCR(flctl));
326         }
327
328         timeout_error(flctl, __func__);
329         return FL_TIMEOUT;      /* timeout */
330 }
331
332 static void wait_wecfifo_ready(struct sh_flctl *flctl)
333 {
334         uint32_t timeout = LOOP_TIMEOUT_MAX;
335         uint32_t len;
336
337         while (timeout--) {
338                 /* check FLECFIFO */
339                 len = (readl(FLDTCNTR(flctl)) >> 24) & 0xFF;
340                 if (len >= 4)
341                         return;
342                 udelay(1);
343         }
344         timeout_error(flctl, __func__);
345 }
346
347 static int flctl_dma_fifo0_transfer(struct sh_flctl *flctl, unsigned long *buf,
348                                         int len, enum dma_data_direction dir)
349 {
350         struct dma_async_tx_descriptor *desc = NULL;
351         struct dma_chan *chan;
352         enum dma_transfer_direction tr_dir;
353         dma_addr_t dma_addr;
354         dma_cookie_t cookie = -EINVAL;
355         uint32_t reg;
356         int ret;
357
358         if (dir == DMA_FROM_DEVICE) {
359                 chan = flctl->chan_fifo0_rx;
360                 tr_dir = DMA_DEV_TO_MEM;
361         } else {
362                 chan = flctl->chan_fifo0_tx;
363                 tr_dir = DMA_MEM_TO_DEV;
364         }
365
366         dma_addr = dma_map_single(chan->device->dev, buf, len, dir);
367
368         if (dma_addr)
369                 desc = dmaengine_prep_slave_single(chan, dma_addr, len,
370                         tr_dir, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
371
372         if (desc) {
373                 reg = readl(FLINTDMACR(flctl));
374                 reg |= DREQ0EN;
375                 writel(reg, FLINTDMACR(flctl));
376
377                 desc->callback = flctl_dma_complete;
378                 desc->callback_param = flctl;
379                 cookie = dmaengine_submit(desc);
380
381                 dma_async_issue_pending(chan);
382         } else {
383                 /* DMA failed, fall back to PIO */
384                 flctl_release_dma(flctl);
385                 dev_warn(&flctl->pdev->dev,
386                          "DMA failed, falling back to PIO\n");
387                 ret = -EIO;
388                 goto out;
389         }
390
391         ret =
392         wait_for_completion_timeout(&flctl->dma_complete,
393                                 msecs_to_jiffies(3000));
394
395         if (ret <= 0) {
396                 dmaengine_terminate_all(chan);
397                 dev_err(&flctl->pdev->dev, "wait_for_completion_timeout\n");
398         }
399
400 out:
401         reg = readl(FLINTDMACR(flctl));
402         reg &= ~DREQ0EN;
403         writel(reg, FLINTDMACR(flctl));
404
405         dma_unmap_single(chan->device->dev, dma_addr, len, dir);
406
407         /* ret > 0 is success */
408         return ret;
409 }
410
411 static void read_datareg(struct sh_flctl *flctl, int offset)
412 {
413         unsigned long data;
414         unsigned long *buf = (unsigned long *)&flctl->done_buff[offset];
415
416         wait_completion(flctl);
417
418         data = readl(FLDATAR(flctl));
419         *buf = le32_to_cpu(data);
420 }
421
422 static void read_fiforeg(struct sh_flctl *flctl, int rlen, int offset)
423 {
424         int i, len_4align;
425         unsigned long *buf = (unsigned long *)&flctl->done_buff[offset];
426
427         len_4align = (rlen + 3) / 4;
428
429         /* initiate DMA transfer */
430         if (flctl->chan_fifo0_rx && rlen >= 32 &&
431                 flctl_dma_fifo0_transfer(flctl, buf, rlen, DMA_DEV_TO_MEM) > 0)
432                         goto convert;   /* DMA success */
433
434         /* do polling transfer */
435         for (i = 0; i < len_4align; i++) {
436                 wait_rfifo_ready(flctl);
437                 buf[i] = readl(FLDTFIFO(flctl));
438         }
439
440 convert:
441         for (i = 0; i < len_4align; i++)
442                 buf[i] = be32_to_cpu(buf[i]);
443 }
444
445 static enum flctl_ecc_res_t read_ecfiforeg
446                 (struct sh_flctl *flctl, uint8_t *buff, int sector)
447 {
448         int i;
449         enum flctl_ecc_res_t res;
450         unsigned long *ecc_buf = (unsigned long *)buff;
451
452         res = wait_recfifo_ready(flctl , sector);
453
454         if (res != FL_ERROR) {
455                 for (i = 0; i < 4; i++) {
456                         ecc_buf[i] = readl(FLECFIFO(flctl));
457                         ecc_buf[i] = be32_to_cpu(ecc_buf[i]);
458                 }
459         }
460
461         return res;
462 }
463
464 static void write_fiforeg(struct sh_flctl *flctl, int rlen,
465                                                 unsigned int offset)
466 {
467         int i, len_4align;
468         unsigned long *buf = (unsigned long *)&flctl->done_buff[offset];
469
470         len_4align = (rlen + 3) / 4;
471         for (i = 0; i < len_4align; i++) {
472                 wait_wfifo_ready(flctl);
473                 writel(cpu_to_be32(buf[i]), FLDTFIFO(flctl));
474         }
475 }
476
477 static void write_ec_fiforeg(struct sh_flctl *flctl, int rlen,
478                                                 unsigned int offset)
479 {
480         int i, len_4align;
481         unsigned long *buf = (unsigned long *)&flctl->done_buff[offset];
482
483         len_4align = (rlen + 3) / 4;
484
485         for (i = 0; i < len_4align; i++)
486                 buf[i] = cpu_to_be32(buf[i]);
487
488         /* initiate DMA transfer */
489         if (flctl->chan_fifo0_tx && rlen >= 32 &&
490                 flctl_dma_fifo0_transfer(flctl, buf, rlen, DMA_MEM_TO_DEV) > 0)
491                         return; /* DMA success */
492
493         /* do polling transfer */
494         for (i = 0; i < len_4align; i++) {
495                 wait_wecfifo_ready(flctl);
496                 writel(buf[i], FLECFIFO(flctl));
497         }
498 }
499
500 static void set_cmd_regs(struct mtd_info *mtd, uint32_t cmd, uint32_t flcmcdr_val)
501 {
502         struct sh_flctl *flctl = mtd_to_flctl(mtd);
503         uint32_t flcmncr_val = flctl->flcmncr_base & ~SEL_16BIT;
504         uint32_t flcmdcr_val, addr_len_bytes = 0;
505
506         /* Set SNAND bit if page size is 2048byte */
507         if (flctl->page_size)
508                 flcmncr_val |= SNAND_E;
509         else
510                 flcmncr_val &= ~SNAND_E;
511
512         /* default FLCMDCR val */
513         flcmdcr_val = DOCMD1_E | DOADR_E;
514
515         /* Set for FLCMDCR */
516         switch (cmd) {
517         case NAND_CMD_ERASE1:
518                 addr_len_bytes = flctl->erase_ADRCNT;
519                 flcmdcr_val |= DOCMD2_E;
520                 break;
521         case NAND_CMD_READ0:
522         case NAND_CMD_READOOB:
523         case NAND_CMD_RNDOUT:
524                 addr_len_bytes = flctl->rw_ADRCNT;
525                 flcmdcr_val |= CDSRC_E;
526                 if (flctl->chip.options & NAND_BUSWIDTH_16)
527                         flcmncr_val |= SEL_16BIT;
528                 break;
529         case NAND_CMD_SEQIN:
530                 /* This case is that cmd is READ0 or READ1 or READ00 */
531                 flcmdcr_val &= ~DOADR_E;        /* ONLY execute 1st cmd */
532                 break;
533         case NAND_CMD_PAGEPROG:
534                 addr_len_bytes = flctl->rw_ADRCNT;
535                 flcmdcr_val |= DOCMD2_E | CDSRC_E | SELRW;
536                 if (flctl->chip.options & NAND_BUSWIDTH_16)
537                         flcmncr_val |= SEL_16BIT;
538                 break;
539         case NAND_CMD_READID:
540                 flcmncr_val &= ~SNAND_E;
541                 flcmdcr_val |= CDSRC_E;
542                 addr_len_bytes = ADRCNT_1;
543                 break;
544         case NAND_CMD_STATUS:
545         case NAND_CMD_RESET:
546                 flcmncr_val &= ~SNAND_E;
547                 flcmdcr_val &= ~(DOADR_E | DOSR_E);
548                 break;
549         default:
550                 break;
551         }
552
553         /* Set address bytes parameter */
554         flcmdcr_val |= addr_len_bytes;
555
556         /* Now actually write */
557         writel(flcmncr_val, FLCMNCR(flctl));
558         writel(flcmdcr_val, FLCMDCR(flctl));
559         writel(flcmcdr_val, FLCMCDR(flctl));
560 }
561
562 static int flctl_read_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
563                                 uint8_t *buf, int oob_required, int page)
564 {
565         chip->read_buf(mtd, buf, mtd->writesize);
566         if (oob_required)
567                 chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
568         return 0;
569 }
570
571 static int flctl_write_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
572                                   const uint8_t *buf, int oob_required,
573                                   int page)
574 {
575         chip->write_buf(mtd, buf, mtd->writesize);
576         chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
577         return 0;
578 }
579
580 static void execmd_read_page_sector(struct mtd_info *mtd, int page_addr)
581 {
582         struct sh_flctl *flctl = mtd_to_flctl(mtd);
583         int sector, page_sectors;
584         enum flctl_ecc_res_t ecc_result;
585
586         page_sectors = flctl->page_size ? 4 : 1;
587
588         set_cmd_regs(mtd, NAND_CMD_READ0,
589                 (NAND_CMD_READSTART << 8) | NAND_CMD_READ0);
590
591         writel(readl(FLCMNCR(flctl)) | ACM_SACCES_MODE | _4ECCCORRECT,
592                  FLCMNCR(flctl));
593         writel(readl(FLCMDCR(flctl)) | page_sectors, FLCMDCR(flctl));
594         writel(page_addr << 2, FLADR(flctl));
595
596         empty_fifo(flctl);
597         start_translation(flctl);
598
599         for (sector = 0; sector < page_sectors; sector++) {
600                 read_fiforeg(flctl, 512, 512 * sector);
601
602                 ecc_result = read_ecfiforeg(flctl,
603                         &flctl->done_buff[mtd->writesize + 16 * sector],
604                         sector);
605
606                 switch (ecc_result) {
607                 case FL_REPAIRABLE:
608                         dev_info(&flctl->pdev->dev,
609                                 "applied ecc on page 0x%x", page_addr);
610                         flctl->mtd.ecc_stats.corrected++;
611                         break;
612                 case FL_ERROR:
613                         dev_warn(&flctl->pdev->dev,
614                                 "page 0x%x contains corrupted data\n",
615                                 page_addr);
616                         flctl->mtd.ecc_stats.failed++;
617                         break;
618                 default:
619                         ;
620                 }
621         }
622
623         wait_completion(flctl);
624
625         writel(readl(FLCMNCR(flctl)) & ~(ACM_SACCES_MODE | _4ECCCORRECT),
626                         FLCMNCR(flctl));
627 }
628
629 static void execmd_read_oob(struct mtd_info *mtd, int page_addr)
630 {
631         struct sh_flctl *flctl = mtd_to_flctl(mtd);
632         int page_sectors = flctl->page_size ? 4 : 1;
633         int i;
634
635         set_cmd_regs(mtd, NAND_CMD_READ0,
636                 (NAND_CMD_READSTART << 8) | NAND_CMD_READ0);
637
638         empty_fifo(flctl);
639
640         for (i = 0; i < page_sectors; i++) {
641                 set_addr(mtd, (512 + 16) * i + 512 , page_addr);
642                 writel(16, FLDTCNTR(flctl));
643
644                 start_translation(flctl);
645                 read_fiforeg(flctl, 16, 16 * i);
646                 wait_completion(flctl);
647         }
648 }
649
650 static void execmd_write_page_sector(struct mtd_info *mtd)
651 {
652         struct sh_flctl *flctl = mtd_to_flctl(mtd);
653         int page_addr = flctl->seqin_page_addr;
654         int sector, page_sectors;
655
656         page_sectors = flctl->page_size ? 4 : 1;
657
658         set_cmd_regs(mtd, NAND_CMD_PAGEPROG,
659                         (NAND_CMD_PAGEPROG << 8) | NAND_CMD_SEQIN);
660
661         empty_fifo(flctl);
662         writel(readl(FLCMNCR(flctl)) | ACM_SACCES_MODE, FLCMNCR(flctl));
663         writel(readl(FLCMDCR(flctl)) | page_sectors, FLCMDCR(flctl));
664         writel(page_addr << 2, FLADR(flctl));
665         start_translation(flctl);
666
667         for (sector = 0; sector < page_sectors; sector++) {
668                 write_fiforeg(flctl, 512, 512 * sector);
669                 write_ec_fiforeg(flctl, 16, mtd->writesize + 16 * sector);
670         }
671
672         wait_completion(flctl);
673         writel(readl(FLCMNCR(flctl)) & ~ACM_SACCES_MODE, FLCMNCR(flctl));
674 }
675
676 static void execmd_write_oob(struct mtd_info *mtd)
677 {
678         struct sh_flctl *flctl = mtd_to_flctl(mtd);
679         int page_addr = flctl->seqin_page_addr;
680         int sector, page_sectors;
681
682         page_sectors = flctl->page_size ? 4 : 1;
683
684         set_cmd_regs(mtd, NAND_CMD_PAGEPROG,
685                         (NAND_CMD_PAGEPROG << 8) | NAND_CMD_SEQIN);
686
687         for (sector = 0; sector < page_sectors; sector++) {
688                 empty_fifo(flctl);
689                 set_addr(mtd, sector * 528 + 512, page_addr);
690                 writel(16, FLDTCNTR(flctl));    /* set read size */
691
692                 start_translation(flctl);
693                 write_fiforeg(flctl, 16, 16 * sector);
694                 wait_completion(flctl);
695         }
696 }
697
698 static void flctl_cmdfunc(struct mtd_info *mtd, unsigned int command,
699                         int column, int page_addr)
700 {
701         struct sh_flctl *flctl = mtd_to_flctl(mtd);
702         uint32_t read_cmd = 0;
703
704         pm_runtime_get_sync(&flctl->pdev->dev);
705
706         flctl->read_bytes = 0;
707         if (command != NAND_CMD_PAGEPROG)
708                 flctl->index = 0;
709
710         switch (command) {
711         case NAND_CMD_READ1:
712         case NAND_CMD_READ0:
713                 if (flctl->hwecc) {
714                         /* read page with hwecc */
715                         execmd_read_page_sector(mtd, page_addr);
716                         break;
717                 }
718                 if (flctl->page_size)
719                         set_cmd_regs(mtd, command, (NAND_CMD_READSTART << 8)
720                                 | command);
721                 else
722                         set_cmd_regs(mtd, command, command);
723
724                 set_addr(mtd, 0, page_addr);
725
726                 flctl->read_bytes = mtd->writesize + mtd->oobsize;
727                 if (flctl->chip.options & NAND_BUSWIDTH_16)
728                         column >>= 1;
729                 flctl->index += column;
730                 goto read_normal_exit;
731
732         case NAND_CMD_READOOB:
733                 if (flctl->hwecc) {
734                         /* read page with hwecc */
735                         execmd_read_oob(mtd, page_addr);
736                         break;
737                 }
738
739                 if (flctl->page_size) {
740                         set_cmd_regs(mtd, command, (NAND_CMD_READSTART << 8)
741                                 | NAND_CMD_READ0);
742                         set_addr(mtd, mtd->writesize, page_addr);
743                 } else {
744                         set_cmd_regs(mtd, command, command);
745                         set_addr(mtd, 0, page_addr);
746                 }
747                 flctl->read_bytes = mtd->oobsize;
748                 goto read_normal_exit;
749
750         case NAND_CMD_RNDOUT:
751                 if (flctl->hwecc)
752                         break;
753
754                 if (flctl->page_size)
755                         set_cmd_regs(mtd, command, (NAND_CMD_RNDOUTSTART << 8)
756                                 | command);
757                 else
758                         set_cmd_regs(mtd, command, command);
759
760                 set_addr(mtd, column, 0);
761
762                 flctl->read_bytes = mtd->writesize + mtd->oobsize - column;
763                 goto read_normal_exit;
764
765         case NAND_CMD_READID:
766                 set_cmd_regs(mtd, command, command);
767
768                 /* READID is always performed using an 8-bit bus */
769                 if (flctl->chip.options & NAND_BUSWIDTH_16)
770                         column <<= 1;
771                 set_addr(mtd, column, 0);
772
773                 flctl->read_bytes = 8;
774                 writel(flctl->read_bytes, FLDTCNTR(flctl)); /* set read size */
775                 empty_fifo(flctl);
776                 start_translation(flctl);
777                 read_fiforeg(flctl, flctl->read_bytes, 0);
778                 wait_completion(flctl);
779                 break;
780
781         case NAND_CMD_ERASE1:
782                 flctl->erase1_page_addr = page_addr;
783                 break;
784
785         case NAND_CMD_ERASE2:
786                 set_cmd_regs(mtd, NAND_CMD_ERASE1,
787                         (command << 8) | NAND_CMD_ERASE1);
788                 set_addr(mtd, -1, flctl->erase1_page_addr);
789                 start_translation(flctl);
790                 wait_completion(flctl);
791                 break;
792
793         case NAND_CMD_SEQIN:
794                 if (!flctl->page_size) {
795                         /* output read command */
796                         if (column >= mtd->writesize) {
797                                 column -= mtd->writesize;
798                                 read_cmd = NAND_CMD_READOOB;
799                         } else if (column < 256) {
800                                 read_cmd = NAND_CMD_READ0;
801                         } else {
802                                 column -= 256;
803                                 read_cmd = NAND_CMD_READ1;
804                         }
805                 }
806                 flctl->seqin_column = column;
807                 flctl->seqin_page_addr = page_addr;
808                 flctl->seqin_read_cmd = read_cmd;
809                 break;
810
811         case NAND_CMD_PAGEPROG:
812                 empty_fifo(flctl);
813                 if (!flctl->page_size) {
814                         set_cmd_regs(mtd, NAND_CMD_SEQIN,
815                                         flctl->seqin_read_cmd);
816                         set_addr(mtd, -1, -1);
817                         writel(0, FLDTCNTR(flctl));     /* set 0 size */
818                         start_translation(flctl);
819                         wait_completion(flctl);
820                 }
821                 if (flctl->hwecc) {
822                         /* write page with hwecc */
823                         if (flctl->seqin_column == mtd->writesize)
824                                 execmd_write_oob(mtd);
825                         else if (!flctl->seqin_column)
826                                 execmd_write_page_sector(mtd);
827                         else
828                                 printk(KERN_ERR "Invalid address !?\n");
829                         break;
830                 }
831                 set_cmd_regs(mtd, command, (command << 8) | NAND_CMD_SEQIN);
832                 set_addr(mtd, flctl->seqin_column, flctl->seqin_page_addr);
833                 writel(flctl->index, FLDTCNTR(flctl));  /* set write size */
834                 start_translation(flctl);
835                 write_fiforeg(flctl, flctl->index, 0);
836                 wait_completion(flctl);
837                 break;
838
839         case NAND_CMD_STATUS:
840                 set_cmd_regs(mtd, command, command);
841                 set_addr(mtd, -1, -1);
842
843                 flctl->read_bytes = 1;
844                 writel(flctl->read_bytes, FLDTCNTR(flctl)); /* set read size */
845                 start_translation(flctl);
846                 read_datareg(flctl, 0); /* read and end */
847                 break;
848
849         case NAND_CMD_RESET:
850                 set_cmd_regs(mtd, command, command);
851                 set_addr(mtd, -1, -1);
852
853                 writel(0, FLDTCNTR(flctl));     /* set 0 size */
854                 start_translation(flctl);
855                 wait_completion(flctl);
856                 break;
857
858         default:
859                 break;
860         }
861         goto runtime_exit;
862
863 read_normal_exit:
864         writel(flctl->read_bytes, FLDTCNTR(flctl));     /* set read size */
865         empty_fifo(flctl);
866         start_translation(flctl);
867         read_fiforeg(flctl, flctl->read_bytes, 0);
868         wait_completion(flctl);
869 runtime_exit:
870         pm_runtime_put_sync(&flctl->pdev->dev);
871         return;
872 }
873
874 static void flctl_select_chip(struct mtd_info *mtd, int chipnr)
875 {
876         struct sh_flctl *flctl = mtd_to_flctl(mtd);
877         int ret;
878
879         switch (chipnr) {
880         case -1:
881                 flctl->flcmncr_base &= ~CE0_ENABLE;
882
883                 pm_runtime_get_sync(&flctl->pdev->dev);
884                 writel(flctl->flcmncr_base, FLCMNCR(flctl));
885
886                 if (flctl->qos_request) {
887                         dev_pm_qos_remove_request(&flctl->pm_qos);
888                         flctl->qos_request = 0;
889                 }
890
891                 pm_runtime_put_sync(&flctl->pdev->dev);
892                 break;
893         case 0:
894                 flctl->flcmncr_base |= CE0_ENABLE;
895
896                 if (!flctl->qos_request) {
897                         ret = dev_pm_qos_add_request(&flctl->pdev->dev,
898                                                         &flctl->pm_qos,
899                                                         DEV_PM_QOS_RESUME_LATENCY,
900                                                         100);
901                         if (ret < 0)
902                                 dev_err(&flctl->pdev->dev,
903                                         "PM QoS request failed: %d\n", ret);
904                         flctl->qos_request = 1;
905                 }
906
907                 if (flctl->holden) {
908                         pm_runtime_get_sync(&flctl->pdev->dev);
909                         writel(HOLDEN, FLHOLDCR(flctl));
910                         pm_runtime_put_sync(&flctl->pdev->dev);
911                 }
912                 break;
913         default:
914                 BUG();
915         }
916 }
917
918 static void flctl_write_buf(struct mtd_info *mtd, const uint8_t *buf, int len)
919 {
920         struct sh_flctl *flctl = mtd_to_flctl(mtd);
921
922         memcpy(&flctl->done_buff[flctl->index], buf, len);
923         flctl->index += len;
924 }
925
926 static uint8_t flctl_read_byte(struct mtd_info *mtd)
927 {
928         struct sh_flctl *flctl = mtd_to_flctl(mtd);
929         uint8_t data;
930
931         data = flctl->done_buff[flctl->index];
932         flctl->index++;
933         return data;
934 }
935
936 static uint16_t flctl_read_word(struct mtd_info *mtd)
937 {
938         struct sh_flctl *flctl = mtd_to_flctl(mtd);
939         uint16_t *buf = (uint16_t *)&flctl->done_buff[flctl->index];
940
941         flctl->index += 2;
942         return *buf;
943 }
944
945 static void flctl_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
946 {
947         struct sh_flctl *flctl = mtd_to_flctl(mtd);
948
949         memcpy(buf, &flctl->done_buff[flctl->index], len);
950         flctl->index += len;
951 }
952
953 static int flctl_chip_init_tail(struct mtd_info *mtd)
954 {
955         struct sh_flctl *flctl = mtd_to_flctl(mtd);
956         struct nand_chip *chip = &flctl->chip;
957
958         if (mtd->writesize == 512) {
959                 flctl->page_size = 0;
960                 if (chip->chipsize > (32 << 20)) {
961                         /* big than 32MB */
962                         flctl->rw_ADRCNT = ADRCNT_4;
963                         flctl->erase_ADRCNT = ADRCNT_3;
964                 } else if (chip->chipsize > (2 << 16)) {
965                         /* big than 128KB */
966                         flctl->rw_ADRCNT = ADRCNT_3;
967                         flctl->erase_ADRCNT = ADRCNT_2;
968                 } else {
969                         flctl->rw_ADRCNT = ADRCNT_2;
970                         flctl->erase_ADRCNT = ADRCNT_1;
971                 }
972         } else {
973                 flctl->page_size = 1;
974                 if (chip->chipsize > (128 << 20)) {
975                         /* big than 128MB */
976                         flctl->rw_ADRCNT = ADRCNT2_E;
977                         flctl->erase_ADRCNT = ADRCNT_3;
978                 } else if (chip->chipsize > (8 << 16)) {
979                         /* big than 512KB */
980                         flctl->rw_ADRCNT = ADRCNT_4;
981                         flctl->erase_ADRCNT = ADRCNT_2;
982                 } else {
983                         flctl->rw_ADRCNT = ADRCNT_3;
984                         flctl->erase_ADRCNT = ADRCNT_1;
985                 }
986         }
987
988         if (flctl->hwecc) {
989                 if (mtd->writesize == 512) {
990                         chip->ecc.layout = &flctl_4secc_oob_16;
991                         chip->badblock_pattern = &flctl_4secc_smallpage;
992                 } else {
993                         chip->ecc.layout = &flctl_4secc_oob_64;
994                         chip->badblock_pattern = &flctl_4secc_largepage;
995                 }
996
997                 chip->ecc.size = 512;
998                 chip->ecc.bytes = 10;
999                 chip->ecc.strength = 4;
1000                 chip->ecc.read_page = flctl_read_page_hwecc;
1001                 chip->ecc.write_page = flctl_write_page_hwecc;
1002                 chip->ecc.mode = NAND_ECC_HW;
1003
1004                 /* 4 symbols ECC enabled */
1005                 flctl->flcmncr_base |= _4ECCEN;
1006         } else {
1007                 chip->ecc.mode = NAND_ECC_SOFT;
1008         }
1009
1010         return 0;
1011 }
1012
1013 static irqreturn_t flctl_handle_flste(int irq, void *dev_id)
1014 {
1015         struct sh_flctl *flctl = dev_id;
1016
1017         dev_err(&flctl->pdev->dev, "flste irq: %x\n", readl(FLINTDMACR(flctl)));
1018         writel(flctl->flintdmacr_base, FLINTDMACR(flctl));
1019
1020         return IRQ_HANDLED;
1021 }
1022
1023 struct flctl_soc_config {
1024         unsigned long flcmncr_val;
1025         unsigned has_hwecc:1;
1026         unsigned use_holden:1;
1027 };
1028
1029 static struct flctl_soc_config flctl_sh7372_config = {
1030         .flcmncr_val = CLK_16B_12L_4H | TYPESEL_SET | SHBUSSEL,
1031         .has_hwecc = 1,
1032         .use_holden = 1,
1033 };
1034
1035 static const struct of_device_id of_flctl_match[] = {
1036         { .compatible = "renesas,shmobile-flctl-sh7372",
1037                                 .data = &flctl_sh7372_config },
1038         {},
1039 };
1040 MODULE_DEVICE_TABLE(of, of_flctl_match);
1041
1042 static struct sh_flctl_platform_data *flctl_parse_dt(struct device *dev)
1043 {
1044         const struct of_device_id *match;
1045         struct flctl_soc_config *config;
1046         struct sh_flctl_platform_data *pdata;
1047         struct device_node *dn = dev->of_node;
1048         int ret;
1049
1050         match = of_match_device(of_flctl_match, dev);
1051         if (match)
1052                 config = (struct flctl_soc_config *)match->data;
1053         else {
1054                 dev_err(dev, "%s: no OF configuration attached\n", __func__);
1055                 return NULL;
1056         }
1057
1058         pdata = devm_kzalloc(dev, sizeof(struct sh_flctl_platform_data),
1059                                                                 GFP_KERNEL);
1060         if (!pdata)
1061                 return NULL;
1062
1063         /* set SoC specific options */
1064         pdata->flcmncr_val = config->flcmncr_val;
1065         pdata->has_hwecc = config->has_hwecc;
1066         pdata->use_holden = config->use_holden;
1067
1068         /* parse user defined options */
1069         ret = of_get_nand_bus_width(dn);
1070         if (ret == 16)
1071                 pdata->flcmncr_val |= SEL_16BIT;
1072         else if (ret != 8) {
1073                 dev_err(dev, "%s: invalid bus width\n", __func__);
1074                 return NULL;
1075         }
1076
1077         return pdata;
1078 }
1079
1080 static int flctl_probe(struct platform_device *pdev)
1081 {
1082         struct resource *res;
1083         struct sh_flctl *flctl;
1084         struct mtd_info *flctl_mtd;
1085         struct nand_chip *nand;
1086         struct sh_flctl_platform_data *pdata;
1087         int ret;
1088         int irq;
1089         struct mtd_part_parser_data ppdata = {};
1090
1091         flctl = devm_kzalloc(&pdev->dev, sizeof(struct sh_flctl), GFP_KERNEL);
1092         if (!flctl)
1093                 return -ENOMEM;
1094
1095         res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1096         flctl->reg = devm_ioremap_resource(&pdev->dev, res);
1097         if (IS_ERR(flctl->reg))
1098                 return PTR_ERR(flctl->reg);
1099
1100         irq = platform_get_irq(pdev, 0);
1101         if (irq < 0) {
1102                 dev_err(&pdev->dev, "failed to get flste irq data\n");
1103                 return -ENXIO;
1104         }
1105
1106         ret = devm_request_irq(&pdev->dev, irq, flctl_handle_flste, IRQF_SHARED,
1107                                "flste", flctl);
1108         if (ret) {
1109                 dev_err(&pdev->dev, "request interrupt failed.\n");
1110                 return ret;
1111         }
1112
1113         if (pdev->dev.of_node)
1114                 pdata = flctl_parse_dt(&pdev->dev);
1115         else
1116                 pdata = dev_get_platdata(&pdev->dev);
1117
1118         if (!pdata) {
1119                 dev_err(&pdev->dev, "no setup data defined\n");
1120                 return -EINVAL;
1121         }
1122
1123         platform_set_drvdata(pdev, flctl);
1124         flctl_mtd = &flctl->mtd;
1125         nand = &flctl->chip;
1126         flctl_mtd->priv = nand;
1127         flctl_mtd->dev.parent = &pdev->dev;
1128         flctl->pdev = pdev;
1129         flctl->hwecc = pdata->has_hwecc;
1130         flctl->holden = pdata->use_holden;
1131         flctl->flcmncr_base = pdata->flcmncr_val;
1132         flctl->flintdmacr_base = flctl->hwecc ? (STERINTE | ECERB) : STERINTE;
1133
1134         /* Set address of hardware control function */
1135         /* 20 us command delay time */
1136         nand->chip_delay = 20;
1137
1138         nand->read_byte = flctl_read_byte;
1139         nand->write_buf = flctl_write_buf;
1140         nand->read_buf = flctl_read_buf;
1141         nand->select_chip = flctl_select_chip;
1142         nand->cmdfunc = flctl_cmdfunc;
1143
1144         if (pdata->flcmncr_val & SEL_16BIT) {
1145                 nand->options |= NAND_BUSWIDTH_16;
1146                 nand->read_word = flctl_read_word;
1147         }
1148
1149         pm_runtime_enable(&pdev->dev);
1150         pm_runtime_resume(&pdev->dev);
1151
1152         flctl_setup_dma(flctl);
1153
1154         ret = nand_scan_ident(flctl_mtd, 1, NULL);
1155         if (ret)
1156                 goto err_chip;
1157
1158         ret = flctl_chip_init_tail(flctl_mtd);
1159         if (ret)
1160                 goto err_chip;
1161
1162         ret = nand_scan_tail(flctl_mtd);
1163         if (ret)
1164                 goto err_chip;
1165
1166         ppdata.of_node = pdev->dev.of_node;
1167         ret = mtd_device_parse_register(flctl_mtd, NULL, &ppdata, pdata->parts,
1168                         pdata->nr_parts);
1169
1170         return 0;
1171
1172 err_chip:
1173         flctl_release_dma(flctl);
1174         pm_runtime_disable(&pdev->dev);
1175         return ret;
1176 }
1177
1178 static int flctl_remove(struct platform_device *pdev)
1179 {
1180         struct sh_flctl *flctl = platform_get_drvdata(pdev);
1181
1182         flctl_release_dma(flctl);
1183         nand_release(&flctl->mtd);
1184         pm_runtime_disable(&pdev->dev);
1185
1186         return 0;
1187 }
1188
1189 static struct platform_driver flctl_driver = {
1190         .remove         = flctl_remove,
1191         .driver = {
1192                 .name   = "sh_flctl",
1193                 .of_match_table = of_match_ptr(of_flctl_match),
1194         },
1195 };
1196
1197 module_platform_driver_probe(flctl_driver, flctl_probe);
1198
1199 MODULE_LICENSE("GPL");
1200 MODULE_AUTHOR("Yoshihiro Shimoda");
1201 MODULE_DESCRIPTION("SuperH FLCTL driver");
1202 MODULE_ALIAS("platform:sh_flctl");