Add the rt linux 4.1.3-rt3 as base
[kvmfornfv.git] / kernel / drivers / net / ethernet / mellanox / mlx4 / icm.c
1 /*
2  * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved.
3  * Copyright (c) 2006, 2007 Cisco Systems, Inc.  All rights reserved.
4  *
5  * This software is available to you under a choice of one of two
6  * licenses.  You may choose to be licensed under the terms of the GNU
7  * General Public License (GPL) Version 2, available from the file
8  * COPYING in the main directory of this source tree, or the
9  * OpenIB.org BSD license below:
10  *
11  *     Redistribution and use in source and binary forms, with or
12  *     without modification, are permitted provided that the following
13  *     conditions are met:
14  *
15  *      - Redistributions of source code must retain the above
16  *        copyright notice, this list of conditions and the following
17  *        disclaimer.
18  *
19  *      - Redistributions in binary form must reproduce the above
20  *        copyright notice, this list of conditions and the following
21  *        disclaimer in the documentation and/or other materials
22  *        provided with the distribution.
23  *
24  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31  * SOFTWARE.
32  */
33
34 #include <linux/errno.h>
35 #include <linux/mm.h>
36 #include <linux/scatterlist.h>
37 #include <linux/slab.h>
38
39 #include <linux/mlx4/cmd.h>
40
41 #include "mlx4.h"
42 #include "icm.h"
43 #include "fw.h"
44
45 /*
46  * We allocate in as big chunks as we can, up to a maximum of 256 KB
47  * per chunk.
48  */
49 enum {
50         MLX4_ICM_ALLOC_SIZE     = 1 << 18,
51         MLX4_TABLE_CHUNK_SIZE   = 1 << 18
52 };
53
54 static void mlx4_free_icm_pages(struct mlx4_dev *dev, struct mlx4_icm_chunk *chunk)
55 {
56         int i;
57
58         if (chunk->nsg > 0)
59                 pci_unmap_sg(dev->persist->pdev, chunk->mem, chunk->npages,
60                              PCI_DMA_BIDIRECTIONAL);
61
62         for (i = 0; i < chunk->npages; ++i)
63                 __free_pages(sg_page(&chunk->mem[i]),
64                              get_order(chunk->mem[i].length));
65 }
66
67 static void mlx4_free_icm_coherent(struct mlx4_dev *dev, struct mlx4_icm_chunk *chunk)
68 {
69         int i;
70
71         for (i = 0; i < chunk->npages; ++i)
72                 dma_free_coherent(&dev->persist->pdev->dev,
73                                   chunk->mem[i].length,
74                                   lowmem_page_address(sg_page(&chunk->mem[i])),
75                                   sg_dma_address(&chunk->mem[i]));
76 }
77
78 void mlx4_free_icm(struct mlx4_dev *dev, struct mlx4_icm *icm, int coherent)
79 {
80         struct mlx4_icm_chunk *chunk, *tmp;
81
82         if (!icm)
83                 return;
84
85         list_for_each_entry_safe(chunk, tmp, &icm->chunk_list, list) {
86                 if (coherent)
87                         mlx4_free_icm_coherent(dev, chunk);
88                 else
89                         mlx4_free_icm_pages(dev, chunk);
90
91                 kfree(chunk);
92         }
93
94         kfree(icm);
95 }
96
97 static int mlx4_alloc_icm_pages(struct scatterlist *mem, int order,
98                                 gfp_t gfp_mask, int node)
99 {
100         struct page *page;
101
102         page = alloc_pages_node(node, gfp_mask, order);
103         if (!page) {
104                 page = alloc_pages(gfp_mask, order);
105                 if (!page)
106                         return -ENOMEM;
107         }
108
109         sg_set_page(mem, page, PAGE_SIZE << order, 0);
110         return 0;
111 }
112
113 static int mlx4_alloc_icm_coherent(struct device *dev, struct scatterlist *mem,
114                                     int order, gfp_t gfp_mask)
115 {
116         void *buf = dma_alloc_coherent(dev, PAGE_SIZE << order,
117                                        &sg_dma_address(mem), gfp_mask);
118         if (!buf)
119                 return -ENOMEM;
120
121         sg_set_buf(mem, buf, PAGE_SIZE << order);
122         BUG_ON(mem->offset);
123         sg_dma_len(mem) = PAGE_SIZE << order;
124         return 0;
125 }
126
127 struct mlx4_icm *mlx4_alloc_icm(struct mlx4_dev *dev, int npages,
128                                 gfp_t gfp_mask, int coherent)
129 {
130         struct mlx4_icm *icm;
131         struct mlx4_icm_chunk *chunk = NULL;
132         int cur_order;
133         int ret;
134
135         /* We use sg_set_buf for coherent allocs, which assumes low memory */
136         BUG_ON(coherent && (gfp_mask & __GFP_HIGHMEM));
137
138         icm = kmalloc_node(sizeof(*icm),
139                            gfp_mask & ~(__GFP_HIGHMEM | __GFP_NOWARN),
140                            dev->numa_node);
141         if (!icm) {
142                 icm = kmalloc(sizeof(*icm),
143                               gfp_mask & ~(__GFP_HIGHMEM | __GFP_NOWARN));
144                 if (!icm)
145                         return NULL;
146         }
147
148         icm->refcount = 0;
149         INIT_LIST_HEAD(&icm->chunk_list);
150
151         cur_order = get_order(MLX4_ICM_ALLOC_SIZE);
152
153         while (npages > 0) {
154                 if (!chunk) {
155                         chunk = kmalloc_node(sizeof(*chunk),
156                                              gfp_mask & ~(__GFP_HIGHMEM |
157                                                           __GFP_NOWARN),
158                                              dev->numa_node);
159                         if (!chunk) {
160                                 chunk = kmalloc(sizeof(*chunk),
161                                                 gfp_mask & ~(__GFP_HIGHMEM |
162                                                              __GFP_NOWARN));
163                                 if (!chunk)
164                                         goto fail;
165                         }
166
167                         sg_init_table(chunk->mem, MLX4_ICM_CHUNK_LEN);
168                         chunk->npages = 0;
169                         chunk->nsg    = 0;
170                         list_add_tail(&chunk->list, &icm->chunk_list);
171                 }
172
173                 while (1 << cur_order > npages)
174                         --cur_order;
175
176                 if (coherent)
177                         ret = mlx4_alloc_icm_coherent(&dev->persist->pdev->dev,
178                                                       &chunk->mem[chunk->npages],
179                                                       cur_order, gfp_mask);
180                 else
181                         ret = mlx4_alloc_icm_pages(&chunk->mem[chunk->npages],
182                                                    cur_order, gfp_mask,
183                                                    dev->numa_node);
184
185                 if (ret) {
186                         if (--cur_order < 0)
187                                 goto fail;
188                         else
189                                 continue;
190                 }
191
192                 ++chunk->npages;
193
194                 if (coherent)
195                         ++chunk->nsg;
196                 else if (chunk->npages == MLX4_ICM_CHUNK_LEN) {
197                         chunk->nsg = pci_map_sg(dev->persist->pdev, chunk->mem,
198                                                 chunk->npages,
199                                                 PCI_DMA_BIDIRECTIONAL);
200
201                         if (chunk->nsg <= 0)
202                                 goto fail;
203                 }
204
205                 if (chunk->npages == MLX4_ICM_CHUNK_LEN)
206                         chunk = NULL;
207
208                 npages -= 1 << cur_order;
209         }
210
211         if (!coherent && chunk) {
212                 chunk->nsg = pci_map_sg(dev->persist->pdev, chunk->mem,
213                                         chunk->npages,
214                                         PCI_DMA_BIDIRECTIONAL);
215
216                 if (chunk->nsg <= 0)
217                         goto fail;
218         }
219
220         return icm;
221
222 fail:
223         mlx4_free_icm(dev, icm, coherent);
224         return NULL;
225 }
226
227 static int mlx4_MAP_ICM(struct mlx4_dev *dev, struct mlx4_icm *icm, u64 virt)
228 {
229         return mlx4_map_cmd(dev, MLX4_CMD_MAP_ICM, icm, virt);
230 }
231
232 static int mlx4_UNMAP_ICM(struct mlx4_dev *dev, u64 virt, u32 page_count)
233 {
234         return mlx4_cmd(dev, virt, page_count, 0, MLX4_CMD_UNMAP_ICM,
235                         MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
236 }
237
238 int mlx4_MAP_ICM_AUX(struct mlx4_dev *dev, struct mlx4_icm *icm)
239 {
240         return mlx4_map_cmd(dev, MLX4_CMD_MAP_ICM_AUX, icm, -1);
241 }
242
243 int mlx4_UNMAP_ICM_AUX(struct mlx4_dev *dev)
244 {
245         return mlx4_cmd(dev, 0, 0, 0, MLX4_CMD_UNMAP_ICM_AUX,
246                         MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
247 }
248
249 int mlx4_table_get(struct mlx4_dev *dev, struct mlx4_icm_table *table, u32 obj,
250                    gfp_t gfp)
251 {
252         u32 i = (obj & (table->num_obj - 1)) /
253                         (MLX4_TABLE_CHUNK_SIZE / table->obj_size);
254         int ret = 0;
255
256         mutex_lock(&table->mutex);
257
258         if (table->icm[i]) {
259                 ++table->icm[i]->refcount;
260                 goto out;
261         }
262
263         table->icm[i] = mlx4_alloc_icm(dev, MLX4_TABLE_CHUNK_SIZE >> PAGE_SHIFT,
264                                        (table->lowmem ? gfp : GFP_HIGHUSER) |
265                                        __GFP_NOWARN, table->coherent);
266         if (!table->icm[i]) {
267                 ret = -ENOMEM;
268                 goto out;
269         }
270
271         if (mlx4_MAP_ICM(dev, table->icm[i], table->virt +
272                          (u64) i * MLX4_TABLE_CHUNK_SIZE)) {
273                 mlx4_free_icm(dev, table->icm[i], table->coherent);
274                 table->icm[i] = NULL;
275                 ret = -ENOMEM;
276                 goto out;
277         }
278
279         ++table->icm[i]->refcount;
280
281 out:
282         mutex_unlock(&table->mutex);
283         return ret;
284 }
285
286 void mlx4_table_put(struct mlx4_dev *dev, struct mlx4_icm_table *table, u32 obj)
287 {
288         u32 i;
289         u64 offset;
290
291         i = (obj & (table->num_obj - 1)) / (MLX4_TABLE_CHUNK_SIZE / table->obj_size);
292
293         mutex_lock(&table->mutex);
294
295         if (--table->icm[i]->refcount == 0) {
296                 offset = (u64) i * MLX4_TABLE_CHUNK_SIZE;
297                 mlx4_UNMAP_ICM(dev, table->virt + offset,
298                                MLX4_TABLE_CHUNK_SIZE / MLX4_ICM_PAGE_SIZE);
299                 mlx4_free_icm(dev, table->icm[i], table->coherent);
300                 table->icm[i] = NULL;
301         }
302
303         mutex_unlock(&table->mutex);
304 }
305
306 void *mlx4_table_find(struct mlx4_icm_table *table, u32 obj,
307                         dma_addr_t *dma_handle)
308 {
309         int offset, dma_offset, i;
310         u64 idx;
311         struct mlx4_icm_chunk *chunk;
312         struct mlx4_icm *icm;
313         struct page *page = NULL;
314
315         if (!table->lowmem)
316                 return NULL;
317
318         mutex_lock(&table->mutex);
319
320         idx = (u64) (obj & (table->num_obj - 1)) * table->obj_size;
321         icm = table->icm[idx / MLX4_TABLE_CHUNK_SIZE];
322         dma_offset = offset = idx % MLX4_TABLE_CHUNK_SIZE;
323
324         if (!icm)
325                 goto out;
326
327         list_for_each_entry(chunk, &icm->chunk_list, list) {
328                 for (i = 0; i < chunk->npages; ++i) {
329                         if (dma_handle && dma_offset >= 0) {
330                                 if (sg_dma_len(&chunk->mem[i]) > dma_offset)
331                                         *dma_handle = sg_dma_address(&chunk->mem[i]) +
332                                                 dma_offset;
333                                 dma_offset -= sg_dma_len(&chunk->mem[i]);
334                         }
335                         /*
336                          * DMA mapping can merge pages but not split them,
337                          * so if we found the page, dma_handle has already
338                          * been assigned to.
339                          */
340                         if (chunk->mem[i].length > offset) {
341                                 page = sg_page(&chunk->mem[i]);
342                                 goto out;
343                         }
344                         offset -= chunk->mem[i].length;
345                 }
346         }
347
348 out:
349         mutex_unlock(&table->mutex);
350         return page ? lowmem_page_address(page) + offset : NULL;
351 }
352
353 int mlx4_table_get_range(struct mlx4_dev *dev, struct mlx4_icm_table *table,
354                          u32 start, u32 end)
355 {
356         int inc = MLX4_TABLE_CHUNK_SIZE / table->obj_size;
357         int err;
358         u32 i;
359
360         for (i = start; i <= end; i += inc) {
361                 err = mlx4_table_get(dev, table, i, GFP_KERNEL);
362                 if (err)
363                         goto fail;
364         }
365
366         return 0;
367
368 fail:
369         while (i > start) {
370                 i -= inc;
371                 mlx4_table_put(dev, table, i);
372         }
373
374         return err;
375 }
376
377 void mlx4_table_put_range(struct mlx4_dev *dev, struct mlx4_icm_table *table,
378                           u32 start, u32 end)
379 {
380         u32 i;
381
382         for (i = start; i <= end; i += MLX4_TABLE_CHUNK_SIZE / table->obj_size)
383                 mlx4_table_put(dev, table, i);
384 }
385
386 int mlx4_init_icm_table(struct mlx4_dev *dev, struct mlx4_icm_table *table,
387                         u64 virt, int obj_size, u32 nobj, int reserved,
388                         int use_lowmem, int use_coherent)
389 {
390         int obj_per_chunk;
391         int num_icm;
392         unsigned chunk_size;
393         int i;
394         u64 size;
395
396         obj_per_chunk = MLX4_TABLE_CHUNK_SIZE / obj_size;
397         num_icm = (nobj + obj_per_chunk - 1) / obj_per_chunk;
398
399         table->icm      = kcalloc(num_icm, sizeof *table->icm, GFP_KERNEL);
400         if (!table->icm)
401                 return -ENOMEM;
402         table->virt     = virt;
403         table->num_icm  = num_icm;
404         table->num_obj  = nobj;
405         table->obj_size = obj_size;
406         table->lowmem   = use_lowmem;
407         table->coherent = use_coherent;
408         mutex_init(&table->mutex);
409
410         size = (u64) nobj * obj_size;
411         for (i = 0; i * MLX4_TABLE_CHUNK_SIZE < reserved * obj_size; ++i) {
412                 chunk_size = MLX4_TABLE_CHUNK_SIZE;
413                 if ((i + 1) * MLX4_TABLE_CHUNK_SIZE > size)
414                         chunk_size = PAGE_ALIGN(size -
415                                         i * MLX4_TABLE_CHUNK_SIZE);
416
417                 table->icm[i] = mlx4_alloc_icm(dev, chunk_size >> PAGE_SHIFT,
418                                                (use_lowmem ? GFP_KERNEL : GFP_HIGHUSER) |
419                                                __GFP_NOWARN, use_coherent);
420                 if (!table->icm[i])
421                         goto err;
422                 if (mlx4_MAP_ICM(dev, table->icm[i], virt + i * MLX4_TABLE_CHUNK_SIZE)) {
423                         mlx4_free_icm(dev, table->icm[i], use_coherent);
424                         table->icm[i] = NULL;
425                         goto err;
426                 }
427
428                 /*
429                  * Add a reference to this ICM chunk so that it never
430                  * gets freed (since it contains reserved firmware objects).
431                  */
432                 ++table->icm[i]->refcount;
433         }
434
435         return 0;
436
437 err:
438         for (i = 0; i < num_icm; ++i)
439                 if (table->icm[i]) {
440                         mlx4_UNMAP_ICM(dev, virt + i * MLX4_TABLE_CHUNK_SIZE,
441                                        MLX4_TABLE_CHUNK_SIZE / MLX4_ICM_PAGE_SIZE);
442                         mlx4_free_icm(dev, table->icm[i], use_coherent);
443                 }
444
445         kfree(table->icm);
446
447         return -ENOMEM;
448 }
449
450 void mlx4_cleanup_icm_table(struct mlx4_dev *dev, struct mlx4_icm_table *table)
451 {
452         int i;
453
454         for (i = 0; i < table->num_icm; ++i)
455                 if (table->icm[i]) {
456                         mlx4_UNMAP_ICM(dev, table->virt + i * MLX4_TABLE_CHUNK_SIZE,
457                                        MLX4_TABLE_CHUNK_SIZE / MLX4_ICM_PAGE_SIZE);
458                         mlx4_free_icm(dev, table->icm[i], table->coherent);
459                 }
460
461         kfree(table->icm);
462 }