These changes are a raw update to a vanilla kernel 4.1.10, with the
[kvmfornfv.git] / kernel / drivers / infiniband / hw / mlx4 / cq.c
1 /*
2  * Copyright (c) 2007 Cisco Systems, Inc. All rights reserved.
3  * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
4  *
5  * This software is available to you under a choice of one of two
6  * licenses.  You may choose to be licensed under the terms of the GNU
7  * General Public License (GPL) Version 2, available from the file
8  * COPYING in the main directory of this source tree, or the
9  * OpenIB.org BSD license below:
10  *
11  *     Redistribution and use in source and binary forms, with or
12  *     without modification, are permitted provided that the following
13  *     conditions are met:
14  *
15  *      - Redistributions of source code must retain the above
16  *        copyright notice, this list of conditions and the following
17  *        disclaimer.
18  *
19  *      - Redistributions in binary form must reproduce the above
20  *        copyright notice, this list of conditions and the following
21  *        disclaimer in the documentation and/or other materials
22  *        provided with the distribution.
23  *
24  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31  * SOFTWARE.
32  */
33
34 #include <linux/mlx4/cq.h>
35 #include <linux/mlx4/qp.h>
36 #include <linux/mlx4/srq.h>
37 #include <linux/slab.h>
38
39 #include "mlx4_ib.h"
40 #include "user.h"
41
42 static void mlx4_ib_cq_comp(struct mlx4_cq *cq)
43 {
44         struct ib_cq *ibcq = &to_mibcq(cq)->ibcq;
45         ibcq->comp_handler(ibcq, ibcq->cq_context);
46 }
47
48 static void mlx4_ib_cq_event(struct mlx4_cq *cq, enum mlx4_event type)
49 {
50         struct ib_event event;
51         struct ib_cq *ibcq;
52
53         if (type != MLX4_EVENT_TYPE_CQ_ERROR) {
54                 pr_warn("Unexpected event type %d "
55                        "on CQ %06x\n", type, cq->cqn);
56                 return;
57         }
58
59         ibcq = &to_mibcq(cq)->ibcq;
60         if (ibcq->event_handler) {
61                 event.device     = ibcq->device;
62                 event.event      = IB_EVENT_CQ_ERR;
63                 event.element.cq = ibcq;
64                 ibcq->event_handler(&event, ibcq->cq_context);
65         }
66 }
67
68 static void *get_cqe_from_buf(struct mlx4_ib_cq_buf *buf, int n)
69 {
70         return mlx4_buf_offset(&buf->buf, n * buf->entry_size);
71 }
72
73 static void *get_cqe(struct mlx4_ib_cq *cq, int n)
74 {
75         return get_cqe_from_buf(&cq->buf, n);
76 }
77
78 static void *get_sw_cqe(struct mlx4_ib_cq *cq, int n)
79 {
80         struct mlx4_cqe *cqe = get_cqe(cq, n & cq->ibcq.cqe);
81         struct mlx4_cqe *tcqe = ((cq->buf.entry_size == 64) ? (cqe + 1) : cqe);
82
83         return (!!(tcqe->owner_sr_opcode & MLX4_CQE_OWNER_MASK) ^
84                 !!(n & (cq->ibcq.cqe + 1))) ? NULL : cqe;
85 }
86
87 static struct mlx4_cqe *next_cqe_sw(struct mlx4_ib_cq *cq)
88 {
89         return get_sw_cqe(cq, cq->mcq.cons_index);
90 }
91
92 int mlx4_ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period)
93 {
94         struct mlx4_ib_cq *mcq = to_mcq(cq);
95         struct mlx4_ib_dev *dev = to_mdev(cq->device);
96
97         return mlx4_cq_modify(dev->dev, &mcq->mcq, cq_count, cq_period);
98 }
99
100 static int mlx4_ib_alloc_cq_buf(struct mlx4_ib_dev *dev, struct mlx4_ib_cq_buf *buf, int nent)
101 {
102         int err;
103
104         err = mlx4_buf_alloc(dev->dev, nent * dev->dev->caps.cqe_size,
105                              PAGE_SIZE * 2, &buf->buf, GFP_KERNEL);
106
107         if (err)
108                 goto out;
109
110         buf->entry_size = dev->dev->caps.cqe_size;
111         err = mlx4_mtt_init(dev->dev, buf->buf.npages, buf->buf.page_shift,
112                                     &buf->mtt);
113         if (err)
114                 goto err_buf;
115
116         err = mlx4_buf_write_mtt(dev->dev, &buf->mtt, &buf->buf, GFP_KERNEL);
117         if (err)
118                 goto err_mtt;
119
120         return 0;
121
122 err_mtt:
123         mlx4_mtt_cleanup(dev->dev, &buf->mtt);
124
125 err_buf:
126         mlx4_buf_free(dev->dev, nent * buf->entry_size, &buf->buf);
127
128 out:
129         return err;
130 }
131
132 static void mlx4_ib_free_cq_buf(struct mlx4_ib_dev *dev, struct mlx4_ib_cq_buf *buf, int cqe)
133 {
134         mlx4_buf_free(dev->dev, (cqe + 1) * buf->entry_size, &buf->buf);
135 }
136
137 static int mlx4_ib_get_cq_umem(struct mlx4_ib_dev *dev, struct ib_ucontext *context,
138                                struct mlx4_ib_cq_buf *buf, struct ib_umem **umem,
139                                u64 buf_addr, int cqe)
140 {
141         int err;
142         int cqe_size = dev->dev->caps.cqe_size;
143
144         *umem = ib_umem_get(context, buf_addr, cqe * cqe_size,
145                             IB_ACCESS_LOCAL_WRITE, 1);
146         if (IS_ERR(*umem))
147                 return PTR_ERR(*umem);
148
149         err = mlx4_mtt_init(dev->dev, ib_umem_page_count(*umem),
150                             ilog2((*umem)->page_size), &buf->mtt);
151         if (err)
152                 goto err_buf;
153
154         err = mlx4_ib_umem_write_mtt(dev, &buf->mtt, *umem);
155         if (err)
156                 goto err_mtt;
157
158         return 0;
159
160 err_mtt:
161         mlx4_mtt_cleanup(dev->dev, &buf->mtt);
162
163 err_buf:
164         ib_umem_release(*umem);
165
166         return err;
167 }
168
169 struct ib_cq *mlx4_ib_create_cq(struct ib_device *ibdev, int entries, int vector,
170                                 struct ib_ucontext *context,
171                                 struct ib_udata *udata)
172 {
173         struct mlx4_ib_dev *dev = to_mdev(ibdev);
174         struct mlx4_ib_cq *cq;
175         struct mlx4_uar *uar;
176         int err;
177
178         if (entries < 1 || entries > dev->dev->caps.max_cqes)
179                 return ERR_PTR(-EINVAL);
180
181         cq = kmalloc(sizeof *cq, GFP_KERNEL);
182         if (!cq)
183                 return ERR_PTR(-ENOMEM);
184
185         entries      = roundup_pow_of_two(entries + 1);
186         cq->ibcq.cqe = entries - 1;
187         mutex_init(&cq->resize_mutex);
188         spin_lock_init(&cq->lock);
189         cq->resize_buf = NULL;
190         cq->resize_umem = NULL;
191         INIT_LIST_HEAD(&cq->send_qp_list);
192         INIT_LIST_HEAD(&cq->recv_qp_list);
193
194         if (context) {
195                 struct mlx4_ib_create_cq ucmd;
196
197                 if (ib_copy_from_udata(&ucmd, udata, sizeof ucmd)) {
198                         err = -EFAULT;
199                         goto err_cq;
200                 }
201
202                 err = mlx4_ib_get_cq_umem(dev, context, &cq->buf, &cq->umem,
203                                           ucmd.buf_addr, entries);
204                 if (err)
205                         goto err_cq;
206
207                 err = mlx4_ib_db_map_user(to_mucontext(context), ucmd.db_addr,
208                                           &cq->db);
209                 if (err)
210                         goto err_mtt;
211
212                 uar = &to_mucontext(context)->uar;
213         } else {
214                 err = mlx4_db_alloc(dev->dev, &cq->db, 1, GFP_KERNEL);
215                 if (err)
216                         goto err_cq;
217
218                 cq->mcq.set_ci_db  = cq->db.db;
219                 cq->mcq.arm_db     = cq->db.db + 1;
220                 *cq->mcq.set_ci_db = 0;
221                 *cq->mcq.arm_db    = 0;
222
223                 err = mlx4_ib_alloc_cq_buf(dev, &cq->buf, entries);
224                 if (err)
225                         goto err_db;
226
227                 uar = &dev->priv_uar;
228         }
229
230         if (dev->eq_table)
231                 vector = dev->eq_table[vector % ibdev->num_comp_vectors];
232
233         err = mlx4_cq_alloc(dev->dev, entries, &cq->buf.mtt, uar,
234                             cq->db.dma, &cq->mcq, vector, 0, 0);
235         if (err)
236                 goto err_dbmap;
237
238         if (context)
239                 cq->mcq.tasklet_ctx.comp = mlx4_ib_cq_comp;
240         else
241                 cq->mcq.comp = mlx4_ib_cq_comp;
242         cq->mcq.event = mlx4_ib_cq_event;
243
244         if (context)
245                 if (ib_copy_to_udata(udata, &cq->mcq.cqn, sizeof (__u32))) {
246                         err = -EFAULT;
247                         goto err_dbmap;
248                 }
249
250         return &cq->ibcq;
251
252 err_dbmap:
253         if (context)
254                 mlx4_ib_db_unmap_user(to_mucontext(context), &cq->db);
255
256 err_mtt:
257         mlx4_mtt_cleanup(dev->dev, &cq->buf.mtt);
258
259         if (context)
260                 ib_umem_release(cq->umem);
261         else
262                 mlx4_ib_free_cq_buf(dev, &cq->buf, cq->ibcq.cqe);
263
264 err_db:
265         if (!context)
266                 mlx4_db_free(dev->dev, &cq->db);
267
268 err_cq:
269         kfree(cq);
270
271         return ERR_PTR(err);
272 }
273
274 static int mlx4_alloc_resize_buf(struct mlx4_ib_dev *dev, struct mlx4_ib_cq *cq,
275                                   int entries)
276 {
277         int err;
278
279         if (cq->resize_buf)
280                 return -EBUSY;
281
282         cq->resize_buf = kmalloc(sizeof *cq->resize_buf, GFP_ATOMIC);
283         if (!cq->resize_buf)
284                 return -ENOMEM;
285
286         err = mlx4_ib_alloc_cq_buf(dev, &cq->resize_buf->buf, entries);
287         if (err) {
288                 kfree(cq->resize_buf);
289                 cq->resize_buf = NULL;
290                 return err;
291         }
292
293         cq->resize_buf->cqe = entries - 1;
294
295         return 0;
296 }
297
298 static int mlx4_alloc_resize_umem(struct mlx4_ib_dev *dev, struct mlx4_ib_cq *cq,
299                                    int entries, struct ib_udata *udata)
300 {
301         struct mlx4_ib_resize_cq ucmd;
302         int err;
303
304         if (cq->resize_umem)
305                 return -EBUSY;
306
307         if (ib_copy_from_udata(&ucmd, udata, sizeof ucmd))
308                 return -EFAULT;
309
310         cq->resize_buf = kmalloc(sizeof *cq->resize_buf, GFP_ATOMIC);
311         if (!cq->resize_buf)
312                 return -ENOMEM;
313
314         err = mlx4_ib_get_cq_umem(dev, cq->umem->context, &cq->resize_buf->buf,
315                                   &cq->resize_umem, ucmd.buf_addr, entries);
316         if (err) {
317                 kfree(cq->resize_buf);
318                 cq->resize_buf = NULL;
319                 return err;
320         }
321
322         cq->resize_buf->cqe = entries - 1;
323
324         return 0;
325 }
326
327 static int mlx4_ib_get_outstanding_cqes(struct mlx4_ib_cq *cq)
328 {
329         u32 i;
330
331         i = cq->mcq.cons_index;
332         while (get_sw_cqe(cq, i))
333                 ++i;
334
335         return i - cq->mcq.cons_index;
336 }
337
338 static void mlx4_ib_cq_resize_copy_cqes(struct mlx4_ib_cq *cq)
339 {
340         struct mlx4_cqe *cqe, *new_cqe;
341         int i;
342         int cqe_size = cq->buf.entry_size;
343         int cqe_inc = cqe_size == 64 ? 1 : 0;
344
345         i = cq->mcq.cons_index;
346         cqe = get_cqe(cq, i & cq->ibcq.cqe);
347         cqe += cqe_inc;
348
349         while ((cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) != MLX4_CQE_OPCODE_RESIZE) {
350                 new_cqe = get_cqe_from_buf(&cq->resize_buf->buf,
351                                            (i + 1) & cq->resize_buf->cqe);
352                 memcpy(new_cqe, get_cqe(cq, i & cq->ibcq.cqe), cqe_size);
353                 new_cqe += cqe_inc;
354
355                 new_cqe->owner_sr_opcode = (cqe->owner_sr_opcode & ~MLX4_CQE_OWNER_MASK) |
356                         (((i + 1) & (cq->resize_buf->cqe + 1)) ? MLX4_CQE_OWNER_MASK : 0);
357                 cqe = get_cqe(cq, ++i & cq->ibcq.cqe);
358                 cqe += cqe_inc;
359         }
360         ++cq->mcq.cons_index;
361 }
362
363 int mlx4_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata)
364 {
365         struct mlx4_ib_dev *dev = to_mdev(ibcq->device);
366         struct mlx4_ib_cq *cq = to_mcq(ibcq);
367         struct mlx4_mtt mtt;
368         int outst_cqe;
369         int err;
370
371         mutex_lock(&cq->resize_mutex);
372         if (entries < 1 || entries > dev->dev->caps.max_cqes) {
373                 err = -EINVAL;
374                 goto out;
375         }
376
377         entries = roundup_pow_of_two(entries + 1);
378         if (entries == ibcq->cqe + 1) {
379                 err = 0;
380                 goto out;
381         }
382
383         if (entries > dev->dev->caps.max_cqes + 1) {
384                 err = -EINVAL;
385                 goto out;
386         }
387
388         if (ibcq->uobject) {
389                 err = mlx4_alloc_resize_umem(dev, cq, entries, udata);
390                 if (err)
391                         goto out;
392         } else {
393                 /* Can't be smaller than the number of outstanding CQEs */
394                 outst_cqe = mlx4_ib_get_outstanding_cqes(cq);
395                 if (entries < outst_cqe + 1) {
396                         err = -EINVAL;
397                         goto out;
398                 }
399
400                 err = mlx4_alloc_resize_buf(dev, cq, entries);
401                 if (err)
402                         goto out;
403         }
404
405         mtt = cq->buf.mtt;
406
407         err = mlx4_cq_resize(dev->dev, &cq->mcq, entries, &cq->resize_buf->buf.mtt);
408         if (err)
409                 goto err_buf;
410
411         mlx4_mtt_cleanup(dev->dev, &mtt);
412         if (ibcq->uobject) {
413                 cq->buf      = cq->resize_buf->buf;
414                 cq->ibcq.cqe = cq->resize_buf->cqe;
415                 ib_umem_release(cq->umem);
416                 cq->umem     = cq->resize_umem;
417
418                 kfree(cq->resize_buf);
419                 cq->resize_buf = NULL;
420                 cq->resize_umem = NULL;
421         } else {
422                 struct mlx4_ib_cq_buf tmp_buf;
423                 int tmp_cqe = 0;
424
425                 spin_lock_irq(&cq->lock);
426                 if (cq->resize_buf) {
427                         mlx4_ib_cq_resize_copy_cqes(cq);
428                         tmp_buf = cq->buf;
429                         tmp_cqe = cq->ibcq.cqe;
430                         cq->buf      = cq->resize_buf->buf;
431                         cq->ibcq.cqe = cq->resize_buf->cqe;
432
433                         kfree(cq->resize_buf);
434                         cq->resize_buf = NULL;
435                 }
436                 spin_unlock_irq(&cq->lock);
437
438                 if (tmp_cqe)
439                         mlx4_ib_free_cq_buf(dev, &tmp_buf, tmp_cqe);
440         }
441
442         goto out;
443
444 err_buf:
445         mlx4_mtt_cleanup(dev->dev, &cq->resize_buf->buf.mtt);
446         if (!ibcq->uobject)
447                 mlx4_ib_free_cq_buf(dev, &cq->resize_buf->buf,
448                                     cq->resize_buf->cqe);
449
450         kfree(cq->resize_buf);
451         cq->resize_buf = NULL;
452
453         if (cq->resize_umem) {
454                 ib_umem_release(cq->resize_umem);
455                 cq->resize_umem = NULL;
456         }
457
458 out:
459         mutex_unlock(&cq->resize_mutex);
460
461         return err;
462 }
463
464 int mlx4_ib_destroy_cq(struct ib_cq *cq)
465 {
466         struct mlx4_ib_dev *dev = to_mdev(cq->device);
467         struct mlx4_ib_cq *mcq = to_mcq(cq);
468
469         mlx4_cq_free(dev->dev, &mcq->mcq);
470         mlx4_mtt_cleanup(dev->dev, &mcq->buf.mtt);
471
472         if (cq->uobject) {
473                 mlx4_ib_db_unmap_user(to_mucontext(cq->uobject->context), &mcq->db);
474                 ib_umem_release(mcq->umem);
475         } else {
476                 mlx4_ib_free_cq_buf(dev, &mcq->buf, cq->cqe);
477                 mlx4_db_free(dev->dev, &mcq->db);
478         }
479
480         kfree(mcq);
481
482         return 0;
483 }
484
485 static void dump_cqe(void *cqe)
486 {
487         __be32 *buf = cqe;
488
489         pr_debug("CQE contents %08x %08x %08x %08x %08x %08x %08x %08x\n",
490                be32_to_cpu(buf[0]), be32_to_cpu(buf[1]), be32_to_cpu(buf[2]),
491                be32_to_cpu(buf[3]), be32_to_cpu(buf[4]), be32_to_cpu(buf[5]),
492                be32_to_cpu(buf[6]), be32_to_cpu(buf[7]));
493 }
494
495 static void mlx4_ib_handle_error_cqe(struct mlx4_err_cqe *cqe,
496                                      struct ib_wc *wc)
497 {
498         if (cqe->syndrome == MLX4_CQE_SYNDROME_LOCAL_QP_OP_ERR) {
499                 pr_debug("local QP operation err "
500                        "(QPN %06x, WQE index %x, vendor syndrome %02x, "
501                        "opcode = %02x)\n",
502                        be32_to_cpu(cqe->my_qpn), be16_to_cpu(cqe->wqe_index),
503                        cqe->vendor_err_syndrome,
504                        cqe->owner_sr_opcode & ~MLX4_CQE_OWNER_MASK);
505                 dump_cqe(cqe);
506         }
507
508         switch (cqe->syndrome) {
509         case MLX4_CQE_SYNDROME_LOCAL_LENGTH_ERR:
510                 wc->status = IB_WC_LOC_LEN_ERR;
511                 break;
512         case MLX4_CQE_SYNDROME_LOCAL_QP_OP_ERR:
513                 wc->status = IB_WC_LOC_QP_OP_ERR;
514                 break;
515         case MLX4_CQE_SYNDROME_LOCAL_PROT_ERR:
516                 wc->status = IB_WC_LOC_PROT_ERR;
517                 break;
518         case MLX4_CQE_SYNDROME_WR_FLUSH_ERR:
519                 wc->status = IB_WC_WR_FLUSH_ERR;
520                 break;
521         case MLX4_CQE_SYNDROME_MW_BIND_ERR:
522                 wc->status = IB_WC_MW_BIND_ERR;
523                 break;
524         case MLX4_CQE_SYNDROME_BAD_RESP_ERR:
525                 wc->status = IB_WC_BAD_RESP_ERR;
526                 break;
527         case MLX4_CQE_SYNDROME_LOCAL_ACCESS_ERR:
528                 wc->status = IB_WC_LOC_ACCESS_ERR;
529                 break;
530         case MLX4_CQE_SYNDROME_REMOTE_INVAL_REQ_ERR:
531                 wc->status = IB_WC_REM_INV_REQ_ERR;
532                 break;
533         case MLX4_CQE_SYNDROME_REMOTE_ACCESS_ERR:
534                 wc->status = IB_WC_REM_ACCESS_ERR;
535                 break;
536         case MLX4_CQE_SYNDROME_REMOTE_OP_ERR:
537                 wc->status = IB_WC_REM_OP_ERR;
538                 break;
539         case MLX4_CQE_SYNDROME_TRANSPORT_RETRY_EXC_ERR:
540                 wc->status = IB_WC_RETRY_EXC_ERR;
541                 break;
542         case MLX4_CQE_SYNDROME_RNR_RETRY_EXC_ERR:
543                 wc->status = IB_WC_RNR_RETRY_EXC_ERR;
544                 break;
545         case MLX4_CQE_SYNDROME_REMOTE_ABORTED_ERR:
546                 wc->status = IB_WC_REM_ABORT_ERR;
547                 break;
548         default:
549                 wc->status = IB_WC_GENERAL_ERR;
550                 break;
551         }
552
553         wc->vendor_err = cqe->vendor_err_syndrome;
554 }
555
556 static int mlx4_ib_ipoib_csum_ok(__be16 status, __be16 checksum)
557 {
558         return ((status & cpu_to_be16(MLX4_CQE_STATUS_IPV4      |
559                                       MLX4_CQE_STATUS_IPV4F     |
560                                       MLX4_CQE_STATUS_IPV4OPT   |
561                                       MLX4_CQE_STATUS_IPV6      |
562                                       MLX4_CQE_STATUS_IPOK)) ==
563                 cpu_to_be16(MLX4_CQE_STATUS_IPV4        |
564                             MLX4_CQE_STATUS_IPOK))              &&
565                 (status & cpu_to_be16(MLX4_CQE_STATUS_UDP       |
566                                       MLX4_CQE_STATUS_TCP))     &&
567                 checksum == cpu_to_be16(0xffff);
568 }
569
570 static int use_tunnel_data(struct mlx4_ib_qp *qp, struct mlx4_ib_cq *cq, struct ib_wc *wc,
571                            unsigned tail, struct mlx4_cqe *cqe, int is_eth)
572 {
573         struct mlx4_ib_proxy_sqp_hdr *hdr;
574
575         ib_dma_sync_single_for_cpu(qp->ibqp.device,
576                                    qp->sqp_proxy_rcv[tail].map,
577                                    sizeof (struct mlx4_ib_proxy_sqp_hdr),
578                                    DMA_FROM_DEVICE);
579         hdr = (struct mlx4_ib_proxy_sqp_hdr *) (qp->sqp_proxy_rcv[tail].addr);
580         wc->pkey_index  = be16_to_cpu(hdr->tun.pkey_index);
581         wc->src_qp      = be32_to_cpu(hdr->tun.flags_src_qp) & 0xFFFFFF;
582         wc->wc_flags   |= (hdr->tun.g_ml_path & 0x80) ? (IB_WC_GRH) : 0;
583         wc->dlid_path_bits = 0;
584
585         if (is_eth) {
586                 wc->vlan_id = be16_to_cpu(hdr->tun.sl_vid);
587                 memcpy(&(wc->smac[0]), (char *)&hdr->tun.mac_31_0, 4);
588                 memcpy(&(wc->smac[4]), (char *)&hdr->tun.slid_mac_47_32, 2);
589                 wc->wc_flags |= (IB_WC_WITH_VLAN | IB_WC_WITH_SMAC);
590         } else {
591                 wc->slid        = be16_to_cpu(hdr->tun.slid_mac_47_32);
592                 wc->sl          = (u8) (be16_to_cpu(hdr->tun.sl_vid) >> 12);
593         }
594
595         return 0;
596 }
597
598 static void mlx4_ib_qp_sw_comp(struct mlx4_ib_qp *qp, int num_entries,
599                                struct ib_wc *wc, int *npolled, int is_send)
600 {
601         struct mlx4_ib_wq *wq;
602         unsigned cur;
603         int i;
604
605         wq = is_send ? &qp->sq : &qp->rq;
606         cur = wq->head - wq->tail;
607
608         if (cur == 0)
609                 return;
610
611         for (i = 0;  i < cur && *npolled < num_entries; i++) {
612                 wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)];
613                 wc->status = IB_WC_WR_FLUSH_ERR;
614                 wc->vendor_err = MLX4_CQE_SYNDROME_WR_FLUSH_ERR;
615                 wq->tail++;
616                 (*npolled)++;
617                 wc->qp = &qp->ibqp;
618                 wc++;
619         }
620 }
621
622 static void mlx4_ib_poll_sw_comp(struct mlx4_ib_cq *cq, int num_entries,
623                                  struct ib_wc *wc, int *npolled)
624 {
625         struct mlx4_ib_qp *qp;
626
627         *npolled = 0;
628         /* Find uncompleted WQEs belonging to that cq and retrun
629          * simulated FLUSH_ERR completions
630          */
631         list_for_each_entry(qp, &cq->send_qp_list, cq_send_list) {
632                 mlx4_ib_qp_sw_comp(qp, num_entries, wc + *npolled, npolled, 1);
633                 if (*npolled >= num_entries)
634                         goto out;
635         }
636
637         list_for_each_entry(qp, &cq->recv_qp_list, cq_recv_list) {
638                 mlx4_ib_qp_sw_comp(qp, num_entries, wc + *npolled, npolled, 0);
639                 if (*npolled >= num_entries)
640                         goto out;
641         }
642
643 out:
644         return;
645 }
646
647 static int mlx4_ib_poll_one(struct mlx4_ib_cq *cq,
648                             struct mlx4_ib_qp **cur_qp,
649                             struct ib_wc *wc)
650 {
651         struct mlx4_cqe *cqe;
652         struct mlx4_qp *mqp;
653         struct mlx4_ib_wq *wq;
654         struct mlx4_ib_srq *srq;
655         struct mlx4_srq *msrq = NULL;
656         int is_send;
657         int is_error;
658         int is_eth;
659         u32 g_mlpath_rqpn;
660         u16 wqe_ctr;
661         unsigned tail = 0;
662
663 repoll:
664         cqe = next_cqe_sw(cq);
665         if (!cqe)
666                 return -EAGAIN;
667
668         if (cq->buf.entry_size == 64)
669                 cqe++;
670
671         ++cq->mcq.cons_index;
672
673         /*
674          * Make sure we read CQ entry contents after we've checked the
675          * ownership bit.
676          */
677         rmb();
678
679         is_send  = cqe->owner_sr_opcode & MLX4_CQE_IS_SEND_MASK;
680         is_error = (cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) ==
681                 MLX4_CQE_OPCODE_ERROR;
682
683         if (unlikely((cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) == MLX4_OPCODE_NOP &&
684                      is_send)) {
685                 pr_warn("Completion for NOP opcode detected!\n");
686                 return -EINVAL;
687         }
688
689         /* Resize CQ in progress */
690         if (unlikely((cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) == MLX4_CQE_OPCODE_RESIZE)) {
691                 if (cq->resize_buf) {
692                         struct mlx4_ib_dev *dev = to_mdev(cq->ibcq.device);
693
694                         mlx4_ib_free_cq_buf(dev, &cq->buf, cq->ibcq.cqe);
695                         cq->buf      = cq->resize_buf->buf;
696                         cq->ibcq.cqe = cq->resize_buf->cqe;
697
698                         kfree(cq->resize_buf);
699                         cq->resize_buf = NULL;
700                 }
701
702                 goto repoll;
703         }
704
705         if (!*cur_qp ||
706             (be32_to_cpu(cqe->vlan_my_qpn) & MLX4_CQE_QPN_MASK) != (*cur_qp)->mqp.qpn) {
707                 /*
708                  * We do not have to take the QP table lock here,
709                  * because CQs will be locked while QPs are removed
710                  * from the table.
711                  */
712                 mqp = __mlx4_qp_lookup(to_mdev(cq->ibcq.device)->dev,
713                                        be32_to_cpu(cqe->vlan_my_qpn));
714                 if (unlikely(!mqp)) {
715                         pr_warn("CQ %06x with entry for unknown QPN %06x\n",
716                                cq->mcq.cqn, be32_to_cpu(cqe->vlan_my_qpn) & MLX4_CQE_QPN_MASK);
717                         return -EINVAL;
718                 }
719
720                 *cur_qp = to_mibqp(mqp);
721         }
722
723         wc->qp = &(*cur_qp)->ibqp;
724
725         if (wc->qp->qp_type == IB_QPT_XRC_TGT) {
726                 u32 srq_num;
727                 g_mlpath_rqpn = be32_to_cpu(cqe->g_mlpath_rqpn);
728                 srq_num       = g_mlpath_rqpn & 0xffffff;
729                 /* SRQ is also in the radix tree */
730                 msrq = mlx4_srq_lookup(to_mdev(cq->ibcq.device)->dev,
731                                        srq_num);
732                 if (unlikely(!msrq)) {
733                         pr_warn("CQ %06x with entry for unknown SRQN %06x\n",
734                                 cq->mcq.cqn, srq_num);
735                         return -EINVAL;
736                 }
737         }
738
739         if (is_send) {
740                 wq = &(*cur_qp)->sq;
741                 if (!(*cur_qp)->sq_signal_bits) {
742                         wqe_ctr = be16_to_cpu(cqe->wqe_index);
743                         wq->tail += (u16) (wqe_ctr - (u16) wq->tail);
744                 }
745                 wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)];
746                 ++wq->tail;
747         } else if ((*cur_qp)->ibqp.srq) {
748                 srq = to_msrq((*cur_qp)->ibqp.srq);
749                 wqe_ctr = be16_to_cpu(cqe->wqe_index);
750                 wc->wr_id = srq->wrid[wqe_ctr];
751                 mlx4_ib_free_srq_wqe(srq, wqe_ctr);
752         } else if (msrq) {
753                 srq = to_mibsrq(msrq);
754                 wqe_ctr = be16_to_cpu(cqe->wqe_index);
755                 wc->wr_id = srq->wrid[wqe_ctr];
756                 mlx4_ib_free_srq_wqe(srq, wqe_ctr);
757         } else {
758                 wq        = &(*cur_qp)->rq;
759                 tail      = wq->tail & (wq->wqe_cnt - 1);
760                 wc->wr_id = wq->wrid[tail];
761                 ++wq->tail;
762         }
763
764         if (unlikely(is_error)) {
765                 mlx4_ib_handle_error_cqe((struct mlx4_err_cqe *) cqe, wc);
766                 return 0;
767         }
768
769         wc->status = IB_WC_SUCCESS;
770
771         if (is_send) {
772                 wc->wc_flags = 0;
773                 switch (cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) {
774                 case MLX4_OPCODE_RDMA_WRITE_IMM:
775                         wc->wc_flags |= IB_WC_WITH_IMM;
776                 case MLX4_OPCODE_RDMA_WRITE:
777                         wc->opcode    = IB_WC_RDMA_WRITE;
778                         break;
779                 case MLX4_OPCODE_SEND_IMM:
780                         wc->wc_flags |= IB_WC_WITH_IMM;
781                 case MLX4_OPCODE_SEND:
782                 case MLX4_OPCODE_SEND_INVAL:
783                         wc->opcode    = IB_WC_SEND;
784                         break;
785                 case MLX4_OPCODE_RDMA_READ:
786                         wc->opcode    = IB_WC_RDMA_READ;
787                         wc->byte_len  = be32_to_cpu(cqe->byte_cnt);
788                         break;
789                 case MLX4_OPCODE_ATOMIC_CS:
790                         wc->opcode    = IB_WC_COMP_SWAP;
791                         wc->byte_len  = 8;
792                         break;
793                 case MLX4_OPCODE_ATOMIC_FA:
794                         wc->opcode    = IB_WC_FETCH_ADD;
795                         wc->byte_len  = 8;
796                         break;
797                 case MLX4_OPCODE_MASKED_ATOMIC_CS:
798                         wc->opcode    = IB_WC_MASKED_COMP_SWAP;
799                         wc->byte_len  = 8;
800                         break;
801                 case MLX4_OPCODE_MASKED_ATOMIC_FA:
802                         wc->opcode    = IB_WC_MASKED_FETCH_ADD;
803                         wc->byte_len  = 8;
804                         break;
805                 case MLX4_OPCODE_BIND_MW:
806                         wc->opcode    = IB_WC_BIND_MW;
807                         break;
808                 case MLX4_OPCODE_LSO:
809                         wc->opcode    = IB_WC_LSO;
810                         break;
811                 case MLX4_OPCODE_FMR:
812                         wc->opcode    = IB_WC_FAST_REG_MR;
813                         break;
814                 case MLX4_OPCODE_LOCAL_INVAL:
815                         wc->opcode    = IB_WC_LOCAL_INV;
816                         break;
817                 }
818         } else {
819                 wc->byte_len = be32_to_cpu(cqe->byte_cnt);
820
821                 switch (cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) {
822                 case MLX4_RECV_OPCODE_RDMA_WRITE_IMM:
823                         wc->opcode      = IB_WC_RECV_RDMA_WITH_IMM;
824                         wc->wc_flags    = IB_WC_WITH_IMM;
825                         wc->ex.imm_data = cqe->immed_rss_invalid;
826                         break;
827                 case MLX4_RECV_OPCODE_SEND_INVAL:
828                         wc->opcode      = IB_WC_RECV;
829                         wc->wc_flags    = IB_WC_WITH_INVALIDATE;
830                         wc->ex.invalidate_rkey = be32_to_cpu(cqe->immed_rss_invalid);
831                         break;
832                 case MLX4_RECV_OPCODE_SEND:
833                         wc->opcode   = IB_WC_RECV;
834                         wc->wc_flags = 0;
835                         break;
836                 case MLX4_RECV_OPCODE_SEND_IMM:
837                         wc->opcode      = IB_WC_RECV;
838                         wc->wc_flags    = IB_WC_WITH_IMM;
839                         wc->ex.imm_data = cqe->immed_rss_invalid;
840                         break;
841                 }
842
843                 is_eth = (rdma_port_get_link_layer(wc->qp->device,
844                                                   (*cur_qp)->port) ==
845                           IB_LINK_LAYER_ETHERNET);
846                 if (mlx4_is_mfunc(to_mdev(cq->ibcq.device)->dev)) {
847                         if ((*cur_qp)->mlx4_ib_qp_type &
848                             (MLX4_IB_QPT_PROXY_SMI_OWNER |
849                              MLX4_IB_QPT_PROXY_SMI | MLX4_IB_QPT_PROXY_GSI))
850                                 return use_tunnel_data(*cur_qp, cq, wc, tail,
851                                                        cqe, is_eth);
852                 }
853
854                 wc->slid           = be16_to_cpu(cqe->rlid);
855                 g_mlpath_rqpn      = be32_to_cpu(cqe->g_mlpath_rqpn);
856                 wc->src_qp         = g_mlpath_rqpn & 0xffffff;
857                 wc->dlid_path_bits = (g_mlpath_rqpn >> 24) & 0x7f;
858                 wc->wc_flags      |= g_mlpath_rqpn & 0x80000000 ? IB_WC_GRH : 0;
859                 wc->pkey_index     = be32_to_cpu(cqe->immed_rss_invalid) & 0x7f;
860                 wc->wc_flags      |= mlx4_ib_ipoib_csum_ok(cqe->status,
861                                         cqe->checksum) ? IB_WC_IP_CSUM_OK : 0;
862                 if (is_eth) {
863                         wc->sl  = be16_to_cpu(cqe->sl_vid) >> 13;
864                         if (be32_to_cpu(cqe->vlan_my_qpn) &
865                                         MLX4_CQE_VLAN_PRESENT_MASK) {
866                                 wc->vlan_id = be16_to_cpu(cqe->sl_vid) &
867                                         MLX4_CQE_VID_MASK;
868                         } else {
869                                 wc->vlan_id = 0xffff;
870                         }
871                         memcpy(wc->smac, cqe->smac, ETH_ALEN);
872                         wc->wc_flags |= (IB_WC_WITH_VLAN | IB_WC_WITH_SMAC);
873                 } else {
874                         wc->sl  = be16_to_cpu(cqe->sl_vid) >> 12;
875                         wc->vlan_id = 0xffff;
876                 }
877         }
878
879         return 0;
880 }
881
882 int mlx4_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
883 {
884         struct mlx4_ib_cq *cq = to_mcq(ibcq);
885         struct mlx4_ib_qp *cur_qp = NULL;
886         unsigned long flags;
887         int npolled;
888         int err = 0;
889         struct mlx4_ib_dev *mdev = to_mdev(cq->ibcq.device);
890
891         spin_lock_irqsave(&cq->lock, flags);
892         if (mdev->dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR) {
893                 mlx4_ib_poll_sw_comp(cq, num_entries, wc, &npolled);
894                 goto out;
895         }
896
897         for (npolled = 0; npolled < num_entries; ++npolled) {
898                 err = mlx4_ib_poll_one(cq, &cur_qp, wc + npolled);
899                 if (err)
900                         break;
901         }
902
903         mlx4_cq_set_ci(&cq->mcq);
904
905 out:
906         spin_unlock_irqrestore(&cq->lock, flags);
907
908         if (err == 0 || err == -EAGAIN)
909                 return npolled;
910         else
911                 return err;
912 }
913
914 int mlx4_ib_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
915 {
916         mlx4_cq_arm(&to_mcq(ibcq)->mcq,
917                     (flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED ?
918                     MLX4_CQ_DB_REQ_NOT_SOL : MLX4_CQ_DB_REQ_NOT,
919                     to_mdev(ibcq->device)->uar_map,
920                     MLX4_GET_DOORBELL_LOCK(&to_mdev(ibcq->device)->uar_lock));
921
922         return 0;
923 }
924
925 void __mlx4_ib_cq_clean(struct mlx4_ib_cq *cq, u32 qpn, struct mlx4_ib_srq *srq)
926 {
927         u32 prod_index;
928         int nfreed = 0;
929         struct mlx4_cqe *cqe, *dest;
930         u8 owner_bit;
931         int cqe_inc = cq->buf.entry_size == 64 ? 1 : 0;
932
933         /*
934          * First we need to find the current producer index, so we
935          * know where to start cleaning from.  It doesn't matter if HW
936          * adds new entries after this loop -- the QP we're worried
937          * about is already in RESET, so the new entries won't come
938          * from our QP and therefore don't need to be checked.
939          */
940         for (prod_index = cq->mcq.cons_index; get_sw_cqe(cq, prod_index); ++prod_index)
941                 if (prod_index == cq->mcq.cons_index + cq->ibcq.cqe)
942                         break;
943
944         /*
945          * Now sweep backwards through the CQ, removing CQ entries
946          * that match our QP by copying older entries on top of them.
947          */
948         while ((int) --prod_index - (int) cq->mcq.cons_index >= 0) {
949                 cqe = get_cqe(cq, prod_index & cq->ibcq.cqe);
950                 cqe += cqe_inc;
951
952                 if ((be32_to_cpu(cqe->vlan_my_qpn) & MLX4_CQE_QPN_MASK) == qpn) {
953                         if (srq && !(cqe->owner_sr_opcode & MLX4_CQE_IS_SEND_MASK))
954                                 mlx4_ib_free_srq_wqe(srq, be16_to_cpu(cqe->wqe_index));
955                         ++nfreed;
956                 } else if (nfreed) {
957                         dest = get_cqe(cq, (prod_index + nfreed) & cq->ibcq.cqe);
958                         dest += cqe_inc;
959
960                         owner_bit = dest->owner_sr_opcode & MLX4_CQE_OWNER_MASK;
961                         memcpy(dest, cqe, sizeof *cqe);
962                         dest->owner_sr_opcode = owner_bit |
963                                 (dest->owner_sr_opcode & ~MLX4_CQE_OWNER_MASK);
964                 }
965         }
966
967         if (nfreed) {
968                 cq->mcq.cons_index += nfreed;
969                 /*
970                  * Make sure update of buffer contents is done before
971                  * updating consumer index.
972                  */
973                 wmb();
974                 mlx4_cq_set_ci(&cq->mcq);
975         }
976 }
977
978 void mlx4_ib_cq_clean(struct mlx4_ib_cq *cq, u32 qpn, struct mlx4_ib_srq *srq)
979 {
980         spin_lock_irq(&cq->lock);
981         __mlx4_ib_cq_clean(cq, qpn, srq);
982         spin_unlock_irq(&cq->lock);
983 }