These changes are the raw update to linux-4.4.6-rt14. Kernel sources
[kvmfornfv.git] / kernel / drivers / crypto / qat / qat_dh895xcc / adf_isr.c
1 /*
2   This file is provided under a dual BSD/GPLv2 license.  When using or
3   redistributing this file, you may do so under either license.
4
5   GPL LICENSE SUMMARY
6   Copyright(c) 2014 Intel Corporation.
7   This program is free software; you can redistribute it and/or modify
8   it under the terms of version 2 of the GNU General Public License as
9   published by the Free Software Foundation.
10
11   This program is distributed in the hope that it will be useful, but
12   WITHOUT ANY WARRANTY; without even the implied warranty of
13   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14   General Public License for more details.
15
16   Contact Information:
17   qat-linux@intel.com
18
19   BSD LICENSE
20   Copyright(c) 2014 Intel Corporation.
21   Redistribution and use in source and binary forms, with or without
22   modification, are permitted provided that the following conditions
23   are met:
24
25     * Redistributions of source code must retain the above copyright
26       notice, this list of conditions and the following disclaimer.
27     * Redistributions in binary form must reproduce the above copyright
28       notice, this list of conditions and the following disclaimer in
29       the documentation and/or other materials provided with the
30       distribution.
31     * Neither the name of Intel Corporation nor the names of its
32       contributors may be used to endorse or promote products derived
33       from this software without specific prior written permission.
34
35   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
36   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
37   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
38   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
39   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
40   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
41   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
42   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
43   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
44   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
45   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
46 */
47 #include <linux/kernel.h>
48 #include <linux/init.h>
49 #include <linux/types.h>
50 #include <linux/pci.h>
51 #include <linux/slab.h>
52 #include <linux/errno.h>
53 #include <linux/interrupt.h>
54 #include <adf_accel_devices.h>
55 #include <adf_common_drv.h>
56 #include <adf_cfg.h>
57 #include <adf_cfg_strings.h>
58 #include <adf_cfg_common.h>
59 #include <adf_transport_access_macros.h>
60 #include <adf_transport_internal.h>
61 #include "adf_drv.h"
62 #include "adf_dh895xcc_hw_data.h"
63
64 static int adf_enable_msix(struct adf_accel_dev *accel_dev)
65 {
66         struct adf_accel_pci *pci_dev_info = &accel_dev->accel_pci_dev;
67         struct adf_hw_device_data *hw_data = accel_dev->hw_device;
68         u32 msix_num_entries = 1;
69
70         /* If SR-IOV is disabled, add entries for each bank */
71         if (!accel_dev->pf.vf_info) {
72                 int i;
73
74                 msix_num_entries += hw_data->num_banks;
75                 for (i = 0; i < msix_num_entries; i++)
76                         pci_dev_info->msix_entries.entries[i].entry = i;
77         } else {
78                 pci_dev_info->msix_entries.entries[0].entry =
79                         hw_data->num_banks;
80         }
81
82         if (pci_enable_msix_exact(pci_dev_info->pci_dev,
83                                   pci_dev_info->msix_entries.entries,
84                                   msix_num_entries)) {
85                 dev_err(&GET_DEV(accel_dev), "Failed to enable MSI-X IRQ(s)\n");
86                 return -EFAULT;
87         }
88         return 0;
89 }
90
91 static void adf_disable_msix(struct adf_accel_pci *pci_dev_info)
92 {
93         pci_disable_msix(pci_dev_info->pci_dev);
94 }
95
96 static irqreturn_t adf_msix_isr_bundle(int irq, void *bank_ptr)
97 {
98         struct adf_etr_bank_data *bank = bank_ptr;
99
100         WRITE_CSR_INT_FLAG_AND_COL(bank->csr_addr, bank->bank_number, 0);
101         tasklet_hi_schedule(&bank->resp_handler);
102         return IRQ_HANDLED;
103 }
104
105 static irqreturn_t adf_msix_isr_ae(int irq, void *dev_ptr)
106 {
107         struct adf_accel_dev *accel_dev = dev_ptr;
108
109 #ifdef CONFIG_PCI_IOV
110         /* If SR-IOV is enabled (vf_info is non-NULL), check for VF->PF ints */
111         if (accel_dev->pf.vf_info) {
112                 void __iomem *pmisc_bar_addr =
113                     (&GET_BARS(accel_dev)[ADF_DH895XCC_PMISC_BAR])->virt_addr;
114                 u32 vf_mask;
115
116                 /* Get the interrupt sources triggered by VFs */
117                 vf_mask = ((ADF_CSR_RD(pmisc_bar_addr, ADF_DH895XCC_ERRSOU5) &
118                             0x0000FFFF) << 16) |
119                           ((ADF_CSR_RD(pmisc_bar_addr, ADF_DH895XCC_ERRSOU3) &
120                             0x01FFFE00) >> 9);
121
122                 if (vf_mask) {
123                         struct adf_accel_vf_info *vf_info;
124                         bool irq_handled = false;
125                         int i;
126
127                         /* Disable VF2PF interrupts for VFs with pending ints */
128                         adf_disable_vf2pf_interrupts(accel_dev, vf_mask);
129
130                         /*
131                          * Schedule tasklets to handle VF2PF interrupt BHs
132                          * unless the VF is malicious and is attempting to
133                          * flood the host OS with VF2PF interrupts.
134                          */
135                         for_each_set_bit(i, (const unsigned long *)&vf_mask,
136                                          (sizeof(vf_mask) * BITS_PER_BYTE)) {
137                                 vf_info = accel_dev->pf.vf_info + i;
138
139                                 if (!__ratelimit(&vf_info->vf2pf_ratelimit)) {
140                                         dev_info(&GET_DEV(accel_dev),
141                                                  "Too many ints from VF%d\n",
142                                                   vf_info->vf_nr + 1);
143                                         continue;
144                                 }
145
146                                 /* Tasklet will re-enable ints from this VF */
147                                 tasklet_hi_schedule(&vf_info->vf2pf_bh_tasklet);
148                                 irq_handled = true;
149                         }
150
151                         if (irq_handled)
152                                 return IRQ_HANDLED;
153                 }
154         }
155 #endif /* CONFIG_PCI_IOV */
156
157         dev_dbg(&GET_DEV(accel_dev), "qat_dev%d spurious AE interrupt\n",
158                 accel_dev->accel_id);
159
160         return IRQ_NONE;
161 }
162
163 static int adf_request_irqs(struct adf_accel_dev *accel_dev)
164 {
165         struct adf_accel_pci *pci_dev_info = &accel_dev->accel_pci_dev;
166         struct adf_hw_device_data *hw_data = accel_dev->hw_device;
167         struct msix_entry *msixe = pci_dev_info->msix_entries.entries;
168         struct adf_etr_data *etr_data = accel_dev->transport;
169         int ret, i = 0;
170         char *name;
171
172         /* Request msix irq for all banks unless SR-IOV enabled */
173         if (!accel_dev->pf.vf_info) {
174                 for (i = 0; i < hw_data->num_banks; i++) {
175                         struct adf_etr_bank_data *bank = &etr_data->banks[i];
176                         unsigned int cpu, cpus = num_online_cpus();
177
178                         name = *(pci_dev_info->msix_entries.names + i);
179                         snprintf(name, ADF_MAX_MSIX_VECTOR_NAME,
180                                  "qat%d-bundle%d", accel_dev->accel_id, i);
181                         ret = request_irq(msixe[i].vector,
182                                           adf_msix_isr_bundle, 0, name, bank);
183                         if (ret) {
184                                 dev_err(&GET_DEV(accel_dev),
185                                         "failed to enable irq %d for %s\n",
186                                         msixe[i].vector, name);
187                                 return ret;
188                         }
189
190                         cpu = ((accel_dev->accel_id * hw_data->num_banks) +
191                                i) % cpus;
192                         irq_set_affinity_hint(msixe[i].vector,
193                                               get_cpu_mask(cpu));
194                 }
195         }
196
197         /* Request msix irq for AE */
198         name = *(pci_dev_info->msix_entries.names + i);
199         snprintf(name, ADF_MAX_MSIX_VECTOR_NAME,
200                  "qat%d-ae-cluster", accel_dev->accel_id);
201         ret = request_irq(msixe[i].vector, adf_msix_isr_ae, 0, name, accel_dev);
202         if (ret) {
203                 dev_err(&GET_DEV(accel_dev),
204                         "failed to enable irq %d, for %s\n",
205                         msixe[i].vector, name);
206                 return ret;
207         }
208         return ret;
209 }
210
211 static void adf_free_irqs(struct adf_accel_dev *accel_dev)
212 {
213         struct adf_accel_pci *pci_dev_info = &accel_dev->accel_pci_dev;
214         struct adf_hw_device_data *hw_data = accel_dev->hw_device;
215         struct msix_entry *msixe = pci_dev_info->msix_entries.entries;
216         struct adf_etr_data *etr_data = accel_dev->transport;
217         int i = 0;
218
219         if (pci_dev_info->msix_entries.num_entries > 1) {
220                 for (i = 0; i < hw_data->num_banks; i++) {
221                         irq_set_affinity_hint(msixe[i].vector, NULL);
222                         free_irq(msixe[i].vector, &etr_data->banks[i]);
223                 }
224         }
225         irq_set_affinity_hint(msixe[i].vector, NULL);
226         free_irq(msixe[i].vector, accel_dev);
227 }
228
229 static int adf_isr_alloc_msix_entry_table(struct adf_accel_dev *accel_dev)
230 {
231         int i;
232         char **names;
233         struct msix_entry *entries;
234         struct adf_hw_device_data *hw_data = accel_dev->hw_device;
235         u32 msix_num_entries = 1;
236
237         /* If SR-IOV is disabled (vf_info is NULL), add entries for each bank */
238         if (!accel_dev->pf.vf_info)
239                 msix_num_entries += hw_data->num_banks;
240
241         entries = kzalloc_node(msix_num_entries * sizeof(*entries),
242                                GFP_KERNEL, dev_to_node(&GET_DEV(accel_dev)));
243         if (!entries)
244                 return -ENOMEM;
245
246         names = kcalloc(msix_num_entries, sizeof(char *), GFP_KERNEL);
247         if (!names) {
248                 kfree(entries);
249                 return -ENOMEM;
250         }
251         for (i = 0; i < msix_num_entries; i++) {
252                 *(names + i) = kzalloc(ADF_MAX_MSIX_VECTOR_NAME, GFP_KERNEL);
253                 if (!(*(names + i)))
254                         goto err;
255         }
256         accel_dev->accel_pci_dev.msix_entries.num_entries = msix_num_entries;
257         accel_dev->accel_pci_dev.msix_entries.entries = entries;
258         accel_dev->accel_pci_dev.msix_entries.names = names;
259         return 0;
260 err:
261         for (i = 0; i < msix_num_entries; i++)
262                 kfree(*(names + i));
263         kfree(entries);
264         kfree(names);
265         return -ENOMEM;
266 }
267
268 static void adf_isr_free_msix_entry_table(struct adf_accel_dev *accel_dev)
269 {
270         char **names = accel_dev->accel_pci_dev.msix_entries.names;
271         int i;
272
273         kfree(accel_dev->accel_pci_dev.msix_entries.entries);
274         for (i = 0; i < accel_dev->accel_pci_dev.msix_entries.num_entries; i++)
275                 kfree(*(names + i));
276         kfree(names);
277 }
278
279 static int adf_setup_bh(struct adf_accel_dev *accel_dev)
280 {
281         struct adf_etr_data *priv_data = accel_dev->transport;
282         struct adf_hw_device_data *hw_data = accel_dev->hw_device;
283         int i;
284
285         for (i = 0; i < hw_data->num_banks; i++)
286                 tasklet_init(&priv_data->banks[i].resp_handler,
287                              adf_response_handler,
288                              (unsigned long)&priv_data->banks[i]);
289         return 0;
290 }
291
292 static void adf_cleanup_bh(struct adf_accel_dev *accel_dev)
293 {
294         struct adf_etr_data *priv_data = accel_dev->transport;
295         struct adf_hw_device_data *hw_data = accel_dev->hw_device;
296         int i;
297
298         for (i = 0; i < hw_data->num_banks; i++) {
299                 tasklet_disable(&priv_data->banks[i].resp_handler);
300                 tasklet_kill(&priv_data->banks[i].resp_handler);
301         }
302 }
303
304 void adf_isr_resource_free(struct adf_accel_dev *accel_dev)
305 {
306         adf_free_irqs(accel_dev);
307         adf_cleanup_bh(accel_dev);
308         adf_disable_msix(&accel_dev->accel_pci_dev);
309         adf_isr_free_msix_entry_table(accel_dev);
310 }
311
312 int adf_isr_resource_alloc(struct adf_accel_dev *accel_dev)
313 {
314         int ret;
315
316         ret = adf_isr_alloc_msix_entry_table(accel_dev);
317         if (ret)
318                 return ret;
319         if (adf_enable_msix(accel_dev))
320                 goto err_out;
321
322         if (adf_setup_bh(accel_dev))
323                 goto err_out;
324
325         if (adf_request_irqs(accel_dev))
326                 goto err_out;
327
328         return 0;
329 err_out:
330         adf_isr_resource_free(accel_dev);
331         return -EFAULT;
332 }