Add the rt linux 4.1.3-rt3 as base
[kvmfornfv.git] / kernel / drivers / mmc / host / android-goldfish.c
1 /*
2  *  Copyright 2007, Google Inc.
3  *  Copyright 2012, Intel Inc.
4  *
5  *  based on omap.c driver, which was
6  *  Copyright (C) 2004 Nokia Corporation
7  *  Written by Tuukka Tikkanen and Juha Yrjölä <juha.yrjola@nokia.com>
8  *  Misc hacks here and there by Tony Lindgren <tony@atomide.com>
9  *  Other hacks (DMA, SD, etc) by David Brownell
10  *
11  * This program is free software; you can redistribute it and/or modify
12  * it under the terms of the GNU General Public License version 2 as
13  * published by the Free Software Foundation.
14  */
15
16 #include <linux/module.h>
17 #include <linux/platform_device.h>
18 #include <linux/major.h>
19
20 #include <linux/types.h>
21 #include <linux/pci.h>
22 #include <linux/interrupt.h>
23
24 #include <linux/kernel.h>
25 #include <linux/fs.h>
26 #include <linux/errno.h>
27 #include <linux/hdreg.h>
28 #include <linux/kdev_t.h>
29 #include <linux/blkdev.h>
30 #include <linux/mutex.h>
31 #include <linux/scatterlist.h>
32 #include <linux/mmc/mmc.h>
33 #include <linux/mmc/sdio.h>
34 #include <linux/mmc/host.h>
35 #include <linux/mmc/card.h>
36
37 #include <linux/moduleparam.h>
38 #include <linux/init.h>
39 #include <linux/ioport.h>
40 #include <linux/dma-mapping.h>
41 #include <linux/delay.h>
42 #include <linux/spinlock.h>
43 #include <linux/timer.h>
44 #include <linux/clk.h>
45
46 #include <asm/io.h>
47 #include <asm/irq.h>
48 #include <asm/scatterlist.h>
49
50 #include <asm/types.h>
51 #include <asm/io.h>
52 #include <asm/uaccess.h>
53
54 #define DRIVER_NAME "goldfish_mmc"
55
56 #define BUFFER_SIZE   16384
57
58 #define GOLDFISH_MMC_READ(host, addr)   (readl(host->reg_base + addr))
59 #define GOLDFISH_MMC_WRITE(host, addr, x)   (writel(x, host->reg_base + addr))
60
61 enum {
62         /* status register */
63         MMC_INT_STATUS          = 0x00,
64         /* set this to enable IRQ */
65         MMC_INT_ENABLE          = 0x04,
66         /* set this to specify buffer address */
67         MMC_SET_BUFFER          = 0x08,
68
69         /* MMC command number */
70         MMC_CMD                 = 0x0C,
71
72         /* MMC argument */
73         MMC_ARG                 = 0x10,
74
75         /* MMC response (or R2 bits 0 - 31) */
76         MMC_RESP_0                      = 0x14,
77
78         /* MMC R2 response bits 32 - 63 */
79         MMC_RESP_1                      = 0x18,
80
81         /* MMC R2 response bits 64 - 95 */
82         MMC_RESP_2                      = 0x1C,
83
84         /* MMC R2 response bits 96 - 127 */
85         MMC_RESP_3                      = 0x20,
86
87         MMC_BLOCK_LENGTH        = 0x24,
88         MMC_BLOCK_COUNT         = 0x28,
89
90         /* MMC state flags */
91         MMC_STATE               = 0x2C,
92
93         /* MMC_INT_STATUS bits */
94
95         MMC_STAT_END_OF_CMD     = 1U << 0,
96         MMC_STAT_END_OF_DATA    = 1U << 1,
97         MMC_STAT_STATE_CHANGE   = 1U << 2,
98         MMC_STAT_CMD_TIMEOUT    = 1U << 3,
99
100         /* MMC_STATE bits */
101         MMC_STATE_INSERTED     = 1U << 0,
102         MMC_STATE_READ_ONLY    = 1U << 1,
103 };
104
105 /*
106  * Command types
107  */
108 #define OMAP_MMC_CMDTYPE_BC     0
109 #define OMAP_MMC_CMDTYPE_BCR    1
110 #define OMAP_MMC_CMDTYPE_AC     2
111 #define OMAP_MMC_CMDTYPE_ADTC   3
112
113
114 struct goldfish_mmc_host {
115         struct mmc_request      *mrq;
116         struct mmc_command      *cmd;
117         struct mmc_data         *data;
118         struct mmc_host         *mmc;
119         struct device           *dev;
120         unsigned char           id; /* 16xx chips have 2 MMC blocks */
121         void __iomem            *virt_base;
122         unsigned int            phys_base;
123         int                     irq;
124         unsigned char           bus_mode;
125         unsigned char           hw_bus_mode;
126
127         unsigned int            sg_len;
128         unsigned                dma_done:1;
129         unsigned                dma_in_use:1;
130
131         void __iomem            *reg_base;
132 };
133
134 static inline int
135 goldfish_mmc_cover_is_open(struct goldfish_mmc_host *host)
136 {
137         return 0;
138 }
139
140 static ssize_t
141 goldfish_mmc_show_cover_switch(struct device *dev,
142                                struct device_attribute *attr, char *buf)
143 {
144         struct goldfish_mmc_host *host = dev_get_drvdata(dev);
145
146         return sprintf(buf, "%s\n", goldfish_mmc_cover_is_open(host) ? "open" :
147                        "closed");
148 }
149
150 static DEVICE_ATTR(cover_switch, S_IRUGO, goldfish_mmc_show_cover_switch, NULL);
151
152 static void
153 goldfish_mmc_start_command(struct goldfish_mmc_host *host, struct mmc_command *cmd)
154 {
155         u32 cmdreg;
156         u32 resptype;
157         u32 cmdtype;
158
159         host->cmd = cmd;
160
161         resptype = 0;
162         cmdtype = 0;
163
164         /* Our hardware needs to know exact type */
165         switch (mmc_resp_type(cmd)) {
166         case MMC_RSP_NONE:
167                 break;
168         case MMC_RSP_R1:
169         case MMC_RSP_R1B:
170                 /* resp 1, 1b, 6, 7 */
171                 resptype = 1;
172                 break;
173         case MMC_RSP_R2:
174                 resptype = 2;
175                 break;
176         case MMC_RSP_R3:
177                 resptype = 3;
178                 break;
179         default:
180                 dev_err(mmc_dev(host->mmc),
181                         "Invalid response type: %04x\n", mmc_resp_type(cmd));
182                 break;
183         }
184
185         if (mmc_cmd_type(cmd) == MMC_CMD_ADTC)
186                 cmdtype = OMAP_MMC_CMDTYPE_ADTC;
187         else if (mmc_cmd_type(cmd) == MMC_CMD_BC)
188                 cmdtype = OMAP_MMC_CMDTYPE_BC;
189         else if (mmc_cmd_type(cmd) == MMC_CMD_BCR)
190                 cmdtype = OMAP_MMC_CMDTYPE_BCR;
191         else
192                 cmdtype = OMAP_MMC_CMDTYPE_AC;
193
194         cmdreg = cmd->opcode | (resptype << 8) | (cmdtype << 12);
195
196         if (host->bus_mode == MMC_BUSMODE_OPENDRAIN)
197                 cmdreg |= 1 << 6;
198
199         if (cmd->flags & MMC_RSP_BUSY)
200                 cmdreg |= 1 << 11;
201
202         if (host->data && !(host->data->flags & MMC_DATA_WRITE))
203                 cmdreg |= 1 << 15;
204
205         GOLDFISH_MMC_WRITE(host, MMC_ARG, cmd->arg);
206         GOLDFISH_MMC_WRITE(host, MMC_CMD, cmdreg);
207 }
208
209 static void goldfish_mmc_xfer_done(struct goldfish_mmc_host *host,
210                                    struct mmc_data *data)
211 {
212         if (host->dma_in_use) {
213                 enum dma_data_direction dma_data_dir;
214
215                 if (data->flags & MMC_DATA_WRITE)
216                         dma_data_dir = DMA_TO_DEVICE;
217                 else
218                         dma_data_dir = DMA_FROM_DEVICE;
219
220                 if (dma_data_dir == DMA_FROM_DEVICE) {
221                         /*
222                          * We don't really have DMA, so we need
223                          * to copy from our platform driver buffer
224                          */
225                         uint8_t *dest = (uint8_t *)sg_virt(data->sg);
226                         memcpy(dest, host->virt_base, data->sg->length);
227                 }
228                 host->data->bytes_xfered += data->sg->length;
229                 dma_unmap_sg(mmc_dev(host->mmc), data->sg, host->sg_len,
230                              dma_data_dir);
231         }
232
233         host->data = NULL;
234         host->sg_len = 0;
235
236         /*
237          * NOTE:  MMC layer will sometimes poll-wait CMD13 next, issuing
238          * dozens of requests until the card finishes writing data.
239          * It'd be cheaper to just wait till an EOFB interrupt arrives...
240          */
241
242         if (!data->stop) {
243                 host->mrq = NULL;
244                 mmc_request_done(host->mmc, data->mrq);
245                 return;
246         }
247
248         goldfish_mmc_start_command(host, data->stop);
249 }
250
251 static void goldfish_mmc_end_of_data(struct goldfish_mmc_host *host,
252                                      struct mmc_data *data)
253 {
254         if (!host->dma_in_use) {
255                 goldfish_mmc_xfer_done(host, data);
256                 return;
257         }
258         if (host->dma_done)
259                 goldfish_mmc_xfer_done(host, data);
260 }
261
262 static void goldfish_mmc_cmd_done(struct goldfish_mmc_host *host,
263                                   struct mmc_command *cmd)
264 {
265         host->cmd = NULL;
266         if (cmd->flags & MMC_RSP_PRESENT) {
267                 if (cmd->flags & MMC_RSP_136) {
268                         /* response type 2 */
269                         cmd->resp[3] =
270                                 GOLDFISH_MMC_READ(host, MMC_RESP_0);
271                         cmd->resp[2] =
272                                 GOLDFISH_MMC_READ(host, MMC_RESP_1);
273                         cmd->resp[1] =
274                                 GOLDFISH_MMC_READ(host, MMC_RESP_2);
275                         cmd->resp[0] =
276                                 GOLDFISH_MMC_READ(host, MMC_RESP_3);
277                 } else {
278                         /* response types 1, 1b, 3, 4, 5, 6 */
279                         cmd->resp[0] =
280                                 GOLDFISH_MMC_READ(host, MMC_RESP_0);
281                 }
282         }
283
284         if (host->data == NULL || cmd->error) {
285                 host->mrq = NULL;
286                 mmc_request_done(host->mmc, cmd->mrq);
287         }
288 }
289
290 static irqreturn_t goldfish_mmc_irq(int irq, void *dev_id)
291 {
292         struct goldfish_mmc_host *host = (struct goldfish_mmc_host *)dev_id;
293         u16 status;
294         int end_command = 0;
295         int end_transfer = 0;
296         int transfer_error = 0;
297         int state_changed = 0;
298         int cmd_timeout = 0;
299
300         while ((status = GOLDFISH_MMC_READ(host, MMC_INT_STATUS)) != 0) {
301                 GOLDFISH_MMC_WRITE(host, MMC_INT_STATUS, status);
302
303                 if (status & MMC_STAT_END_OF_CMD)
304                         end_command = 1;
305
306                 if (status & MMC_STAT_END_OF_DATA)
307                         end_transfer = 1;
308
309                 if (status & MMC_STAT_STATE_CHANGE)
310                         state_changed = 1;
311
312                 if (status & MMC_STAT_CMD_TIMEOUT) {
313                         end_command = 0;
314                         cmd_timeout = 1;
315                 }
316         }
317
318         if (cmd_timeout) {
319                 struct mmc_request *mrq = host->mrq;
320                 mrq->cmd->error = -ETIMEDOUT;
321                 host->mrq = NULL;
322                 mmc_request_done(host->mmc, mrq);
323         }
324
325         if (end_command)
326                 goldfish_mmc_cmd_done(host, host->cmd);
327
328         if (transfer_error)
329                 goldfish_mmc_xfer_done(host, host->data);
330         else if (end_transfer) {
331                 host->dma_done = 1;
332                 goldfish_mmc_end_of_data(host, host->data);
333         } else if (host->data != NULL) {
334                 /*
335                  * WORKAROUND -- after porting this driver from 2.6 to 3.4,
336                  * during device initialization, cases where host->data is
337                  * non-null but end_transfer is false would occur. Doing
338                  * nothing in such cases results in no further interrupts,
339                  * and initialization failure.
340                  * TODO -- find the real cause.
341                  */
342                 host->dma_done = 1;
343                 goldfish_mmc_end_of_data(host, host->data);
344         }
345
346         if (state_changed) {
347                 u32 state = GOLDFISH_MMC_READ(host, MMC_STATE);
348                 pr_info("%s: Card detect now %d\n", __func__,
349                         (state & MMC_STATE_INSERTED));
350                 mmc_detect_change(host->mmc, 0);
351         }
352
353         if (!end_command && !end_transfer &&
354             !transfer_error && !state_changed && !cmd_timeout) {
355                 status = GOLDFISH_MMC_READ(host, MMC_INT_STATUS);
356                 dev_info(mmc_dev(host->mmc),"spurious irq 0x%04x\n", status);
357                 if (status != 0) {
358                         GOLDFISH_MMC_WRITE(host, MMC_INT_STATUS, status);
359                         GOLDFISH_MMC_WRITE(host, MMC_INT_ENABLE, 0);
360                 }
361         }
362
363         return IRQ_HANDLED;
364 }
365
366 static void goldfish_mmc_prepare_data(struct goldfish_mmc_host *host,
367                                       struct mmc_request *req)
368 {
369         struct mmc_data *data = req->data;
370         int block_size;
371         unsigned sg_len;
372         enum dma_data_direction dma_data_dir;
373
374         host->data = data;
375         if (data == NULL) {
376                 GOLDFISH_MMC_WRITE(host, MMC_BLOCK_LENGTH, 0);
377                 GOLDFISH_MMC_WRITE(host, MMC_BLOCK_COUNT, 0);
378                 host->dma_in_use = 0;
379                 return;
380         }
381
382         block_size = data->blksz;
383
384         GOLDFISH_MMC_WRITE(host, MMC_BLOCK_COUNT, data->blocks - 1);
385         GOLDFISH_MMC_WRITE(host, MMC_BLOCK_LENGTH, block_size - 1);
386
387         /*
388          * Cope with calling layer confusion; it issues "single
389          * block" writes using multi-block scatterlists.
390          */
391         sg_len = (data->blocks == 1) ? 1 : data->sg_len;
392
393         if (data->flags & MMC_DATA_WRITE)
394                 dma_data_dir = DMA_TO_DEVICE;
395         else
396                 dma_data_dir = DMA_FROM_DEVICE;
397
398         host->sg_len = dma_map_sg(mmc_dev(host->mmc), data->sg,
399                                   sg_len, dma_data_dir);
400         host->dma_done = 0;
401         host->dma_in_use = 1;
402
403         if (dma_data_dir == DMA_TO_DEVICE) {
404                 /*
405                  * We don't really have DMA, so we need to copy to our
406                  * platform driver buffer
407                  */
408                 const uint8_t *src = (uint8_t *)sg_virt(data->sg);
409                 memcpy(host->virt_base, src, data->sg->length);
410         }
411 }
412
413 static void goldfish_mmc_request(struct mmc_host *mmc, struct mmc_request *req)
414 {
415         struct goldfish_mmc_host *host = mmc_priv(mmc);
416
417         WARN_ON(host->mrq != NULL);
418
419         host->mrq = req;
420         goldfish_mmc_prepare_data(host, req);
421         goldfish_mmc_start_command(host, req->cmd);
422
423         /*
424          * This is to avoid accidentally being detected as an SDIO card
425          * in mmc_attach_sdio().
426          */
427         if (req->cmd->opcode == SD_IO_SEND_OP_COND &&
428             req->cmd->flags == (MMC_RSP_SPI_R4 | MMC_RSP_R4 | MMC_CMD_BCR))
429                 req->cmd->error = -EINVAL;
430 }
431
432 static void goldfish_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
433 {
434         struct goldfish_mmc_host *host = mmc_priv(mmc);
435
436         host->bus_mode = ios->bus_mode;
437         host->hw_bus_mode = host->bus_mode;
438 }
439
440 static int goldfish_mmc_get_ro(struct mmc_host *mmc)
441 {
442         uint32_t state;
443         struct goldfish_mmc_host *host = mmc_priv(mmc);
444
445         state = GOLDFISH_MMC_READ(host, MMC_STATE);
446         return ((state & MMC_STATE_READ_ONLY) != 0);
447 }
448
449 static const struct mmc_host_ops goldfish_mmc_ops = {
450         .request        = goldfish_mmc_request,
451         .set_ios        = goldfish_mmc_set_ios,
452         .get_ro         = goldfish_mmc_get_ro,
453 };
454
455 static int goldfish_mmc_probe(struct platform_device *pdev)
456 {
457         struct mmc_host *mmc;
458         struct goldfish_mmc_host *host = NULL;
459         struct resource *res;
460         int ret = 0;
461         int irq;
462         dma_addr_t buf_addr;
463
464         res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
465         irq = platform_get_irq(pdev, 0);
466         if (res == NULL || irq < 0)
467                 return -ENXIO;
468
469         mmc = mmc_alloc_host(sizeof(struct goldfish_mmc_host), &pdev->dev);
470         if (mmc == NULL) {
471                 ret = -ENOMEM;
472                 goto err_alloc_host_failed;
473         }
474
475         host = mmc_priv(mmc);
476         host->mmc = mmc;
477
478         pr_err("mmc: Mapping %lX to %lX\n", (long)res->start, (long)res->end);
479         host->reg_base = ioremap(res->start, resource_size(res));
480         if (host->reg_base == NULL) {
481                 ret = -ENOMEM;
482                 goto ioremap_failed;
483         }
484         host->virt_base = dma_alloc_coherent(&pdev->dev, BUFFER_SIZE,
485                                              &buf_addr, GFP_KERNEL);
486
487         if (host->virt_base == 0) {
488                 ret = -ENOMEM;
489                 goto dma_alloc_failed;
490         }
491         host->phys_base = buf_addr;
492
493         host->id = pdev->id;
494         host->irq = irq;
495
496         mmc->ops = &goldfish_mmc_ops;
497         mmc->f_min = 400000;
498         mmc->f_max = 24000000;
499         mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
500         mmc->caps = MMC_CAP_4_BIT_DATA;
501
502         /* Use scatterlist DMA to reduce per-transfer costs.
503          * NOTE max_seg_size assumption that small blocks aren't
504          * normally used (except e.g. for reading SD registers).
505          */
506         mmc->max_segs = 32;
507         mmc->max_blk_size = 2048;       /* MMC_BLOCK_LENGTH is 11 bits (+1) */
508         mmc->max_blk_count = 2048;      /* MMC_BLOCK_COUNT is 11 bits (+1) */
509         mmc->max_req_size = BUFFER_SIZE;
510         mmc->max_seg_size = mmc->max_req_size;
511
512         ret = request_irq(host->irq, goldfish_mmc_irq, 0, DRIVER_NAME, host);
513         if (ret) {
514                 dev_err(&pdev->dev, "Failed IRQ Adding goldfish MMC\n");
515                 goto err_request_irq_failed;
516         }
517
518         host->dev = &pdev->dev;
519         platform_set_drvdata(pdev, host);
520
521         ret = device_create_file(&pdev->dev, &dev_attr_cover_switch);
522         if (ret)
523                 dev_warn(mmc_dev(host->mmc),
524                          "Unable to create sysfs attributes\n");
525
526         GOLDFISH_MMC_WRITE(host, MMC_SET_BUFFER, host->phys_base);
527         GOLDFISH_MMC_WRITE(host, MMC_INT_ENABLE,
528                            MMC_STAT_END_OF_CMD | MMC_STAT_END_OF_DATA |
529                            MMC_STAT_STATE_CHANGE | MMC_STAT_CMD_TIMEOUT);
530
531         mmc_add_host(mmc);
532         return 0;
533
534 err_request_irq_failed:
535         dma_free_coherent(&pdev->dev, BUFFER_SIZE, host->virt_base,
536                           host->phys_base);
537 dma_alloc_failed:
538         iounmap(host->reg_base);
539 ioremap_failed:
540         mmc_free_host(host->mmc);
541 err_alloc_host_failed:
542         return ret;
543 }
544
545 static int goldfish_mmc_remove(struct platform_device *pdev)
546 {
547         struct goldfish_mmc_host *host = platform_get_drvdata(pdev);
548
549         BUG_ON(host == NULL);
550
551         mmc_remove_host(host->mmc);
552         free_irq(host->irq, host);
553         dma_free_coherent(&pdev->dev, BUFFER_SIZE, host->virt_base, host->phys_base);
554         iounmap(host->reg_base);
555         mmc_free_host(host->mmc);
556         return 0;
557 }
558
559 static struct platform_driver goldfish_mmc_driver = {
560         .probe          = goldfish_mmc_probe,
561         .remove         = goldfish_mmc_remove,
562         .driver         = {
563                 .name   = DRIVER_NAME,
564         },
565 };
566
567 module_platform_driver(goldfish_mmc_driver);
568 MODULE_LICENSE("GPL v2");