Add the rt linux 4.1.3-rt3 as base
[kvmfornfv.git] / kernel / drivers / crypto / qce / dma.c
1 /*
2  * Copyright (c) 2012-2014, The Linux Foundation. All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License version 2 and
6  * only version 2 as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
11  * GNU General Public License for more details.
12  */
13
14 #include <linux/dmaengine.h>
15 #include <crypto/scatterwalk.h>
16
17 #include "dma.h"
18
19 int qce_dma_request(struct device *dev, struct qce_dma_data *dma)
20 {
21         int ret;
22
23         dma->txchan = dma_request_slave_channel_reason(dev, "tx");
24         if (IS_ERR(dma->txchan))
25                 return PTR_ERR(dma->txchan);
26
27         dma->rxchan = dma_request_slave_channel_reason(dev, "rx");
28         if (IS_ERR(dma->rxchan)) {
29                 ret = PTR_ERR(dma->rxchan);
30                 goto error_rx;
31         }
32
33         dma->result_buf = kmalloc(QCE_RESULT_BUF_SZ + QCE_IGNORE_BUF_SZ,
34                                   GFP_KERNEL);
35         if (!dma->result_buf) {
36                 ret = -ENOMEM;
37                 goto error_nomem;
38         }
39
40         dma->ignore_buf = dma->result_buf + QCE_RESULT_BUF_SZ;
41
42         return 0;
43 error_nomem:
44         dma_release_channel(dma->rxchan);
45 error_rx:
46         dma_release_channel(dma->txchan);
47         return ret;
48 }
49
50 void qce_dma_release(struct qce_dma_data *dma)
51 {
52         dma_release_channel(dma->txchan);
53         dma_release_channel(dma->rxchan);
54         kfree(dma->result_buf);
55 }
56
57 int qce_mapsg(struct device *dev, struct scatterlist *sg, int nents,
58               enum dma_data_direction dir, bool chained)
59 {
60         int err;
61
62         if (chained) {
63                 while (sg) {
64                         err = dma_map_sg(dev, sg, 1, dir);
65                         if (!err)
66                                 return -EFAULT;
67                         sg = sg_next(sg);
68                 }
69         } else {
70                 err = dma_map_sg(dev, sg, nents, dir);
71                 if (!err)
72                         return -EFAULT;
73         }
74
75         return nents;
76 }
77
78 void qce_unmapsg(struct device *dev, struct scatterlist *sg, int nents,
79                  enum dma_data_direction dir, bool chained)
80 {
81         if (chained)
82                 while (sg) {
83                         dma_unmap_sg(dev, sg, 1, dir);
84                         sg = sg_next(sg);
85                 }
86         else
87                 dma_unmap_sg(dev, sg, nents, dir);
88 }
89
90 int qce_countsg(struct scatterlist *sglist, int nbytes, bool *chained)
91 {
92         struct scatterlist *sg = sglist;
93         int nents = 0;
94
95         if (chained)
96                 *chained = false;
97
98         while (nbytes > 0 && sg) {
99                 nents++;
100                 nbytes -= sg->length;
101                 if (!sg_is_last(sg) && (sg + 1)->length == 0 && chained)
102                         *chained = true;
103                 sg = sg_next(sg);
104         }
105
106         return nents;
107 }
108
109 struct scatterlist *
110 qce_sgtable_add(struct sg_table *sgt, struct scatterlist *new_sgl)
111 {
112         struct scatterlist *sg = sgt->sgl, *sg_last = NULL;
113
114         while (sg) {
115                 if (!sg_page(sg))
116                         break;
117                 sg = sg_next(sg);
118         }
119
120         if (!sg)
121                 return ERR_PTR(-EINVAL);
122
123         while (new_sgl && sg) {
124                 sg_set_page(sg, sg_page(new_sgl), new_sgl->length,
125                             new_sgl->offset);
126                 sg_last = sg;
127                 sg = sg_next(sg);
128                 new_sgl = sg_next(new_sgl);
129         }
130
131         return sg_last;
132 }
133
134 static int qce_dma_prep_sg(struct dma_chan *chan, struct scatterlist *sg,
135                            int nents, unsigned long flags,
136                            enum dma_transfer_direction dir,
137                            dma_async_tx_callback cb, void *cb_param)
138 {
139         struct dma_async_tx_descriptor *desc;
140         dma_cookie_t cookie;
141
142         if (!sg || !nents)
143                 return -EINVAL;
144
145         desc = dmaengine_prep_slave_sg(chan, sg, nents, dir, flags);
146         if (!desc)
147                 return -EINVAL;
148
149         desc->callback = cb;
150         desc->callback_param = cb_param;
151         cookie = dmaengine_submit(desc);
152
153         return dma_submit_error(cookie);
154 }
155
156 int qce_dma_prep_sgs(struct qce_dma_data *dma, struct scatterlist *rx_sg,
157                      int rx_nents, struct scatterlist *tx_sg, int tx_nents,
158                      dma_async_tx_callback cb, void *cb_param)
159 {
160         struct dma_chan *rxchan = dma->rxchan;
161         struct dma_chan *txchan = dma->txchan;
162         unsigned long flags = DMA_PREP_INTERRUPT | DMA_CTRL_ACK;
163         int ret;
164
165         ret = qce_dma_prep_sg(rxchan, rx_sg, rx_nents, flags, DMA_MEM_TO_DEV,
166                              NULL, NULL);
167         if (ret)
168                 return ret;
169
170         return qce_dma_prep_sg(txchan, tx_sg, tx_nents, flags, DMA_DEV_TO_MEM,
171                                cb, cb_param);
172 }
173
174 void qce_dma_issue_pending(struct qce_dma_data *dma)
175 {
176         dma_async_issue_pending(dma->rxchan);
177         dma_async_issue_pending(dma->txchan);
178 }
179
180 int qce_dma_terminate_all(struct qce_dma_data *dma)
181 {
182         int ret;
183
184         ret = dmaengine_terminate_all(dma->rxchan);
185         return ret ?: dmaengine_terminate_all(dma->txchan);
186 }