These changes are the raw update to linux-4.4.6-rt14. Kernel sources
[kvmfornfv.git] / kernel / drivers / infiniband / hw / mlx5 / qp.c
1 /*
2  * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32
33 #include <linux/module.h>
34 #include <rdma/ib_umem.h>
35 #include "mlx5_ib.h"
36 #include "user.h"
37
38 /* not supported currently */
39 static int wq_signature;
40
41 enum {
42         MLX5_IB_ACK_REQ_FREQ    = 8,
43 };
44
45 enum {
46         MLX5_IB_DEFAULT_SCHED_QUEUE     = 0x83,
47         MLX5_IB_DEFAULT_QP0_SCHED_QUEUE = 0x3f,
48         MLX5_IB_LINK_TYPE_IB            = 0,
49         MLX5_IB_LINK_TYPE_ETH           = 1
50 };
51
52 enum {
53         MLX5_IB_SQ_STRIDE       = 6,
54         MLX5_IB_CACHE_LINE_SIZE = 64,
55 };
56
57 static const u32 mlx5_ib_opcode[] = {
58         [IB_WR_SEND]                            = MLX5_OPCODE_SEND,
59         [IB_WR_SEND_WITH_IMM]                   = MLX5_OPCODE_SEND_IMM,
60         [IB_WR_RDMA_WRITE]                      = MLX5_OPCODE_RDMA_WRITE,
61         [IB_WR_RDMA_WRITE_WITH_IMM]             = MLX5_OPCODE_RDMA_WRITE_IMM,
62         [IB_WR_RDMA_READ]                       = MLX5_OPCODE_RDMA_READ,
63         [IB_WR_ATOMIC_CMP_AND_SWP]              = MLX5_OPCODE_ATOMIC_CS,
64         [IB_WR_ATOMIC_FETCH_AND_ADD]            = MLX5_OPCODE_ATOMIC_FA,
65         [IB_WR_SEND_WITH_INV]                   = MLX5_OPCODE_SEND_INVAL,
66         [IB_WR_LOCAL_INV]                       = MLX5_OPCODE_UMR,
67         [IB_WR_REG_MR]                          = MLX5_OPCODE_UMR,
68         [IB_WR_MASKED_ATOMIC_CMP_AND_SWP]       = MLX5_OPCODE_ATOMIC_MASKED_CS,
69         [IB_WR_MASKED_ATOMIC_FETCH_AND_ADD]     = MLX5_OPCODE_ATOMIC_MASKED_FA,
70         [MLX5_IB_WR_UMR]                        = MLX5_OPCODE_UMR,
71 };
72
73
74 static int is_qp0(enum ib_qp_type qp_type)
75 {
76         return qp_type == IB_QPT_SMI;
77 }
78
79 static int is_sqp(enum ib_qp_type qp_type)
80 {
81         return is_qp0(qp_type) || is_qp1(qp_type);
82 }
83
84 static void *get_wqe(struct mlx5_ib_qp *qp, int offset)
85 {
86         return mlx5_buf_offset(&qp->buf, offset);
87 }
88
89 static void *get_recv_wqe(struct mlx5_ib_qp *qp, int n)
90 {
91         return get_wqe(qp, qp->rq.offset + (n << qp->rq.wqe_shift));
92 }
93
94 void *mlx5_get_send_wqe(struct mlx5_ib_qp *qp, int n)
95 {
96         return get_wqe(qp, qp->sq.offset + (n << MLX5_IB_SQ_STRIDE));
97 }
98
99 /**
100  * mlx5_ib_read_user_wqe() - Copy a user-space WQE to kernel space.
101  *
102  * @qp: QP to copy from.
103  * @send: copy from the send queue when non-zero, use the receive queue
104  *        otherwise.
105  * @wqe_index:  index to start copying from. For send work queues, the
106  *              wqe_index is in units of MLX5_SEND_WQE_BB.
107  *              For receive work queue, it is the number of work queue
108  *              element in the queue.
109  * @buffer: destination buffer.
110  * @length: maximum number of bytes to copy.
111  *
112  * Copies at least a single WQE, but may copy more data.
113  *
114  * Return: the number of bytes copied, or an error code.
115  */
116 int mlx5_ib_read_user_wqe(struct mlx5_ib_qp *qp, int send, int wqe_index,
117                           void *buffer, u32 length)
118 {
119         struct ib_device *ibdev = qp->ibqp.device;
120         struct mlx5_ib_dev *dev = to_mdev(ibdev);
121         struct mlx5_ib_wq *wq = send ? &qp->sq : &qp->rq;
122         size_t offset;
123         size_t wq_end;
124         struct ib_umem *umem = qp->umem;
125         u32 first_copy_length;
126         int wqe_length;
127         int ret;
128
129         if (wq->wqe_cnt == 0) {
130                 mlx5_ib_dbg(dev, "mlx5_ib_read_user_wqe for a QP with wqe_cnt == 0. qp_type: 0x%x\n",
131                             qp->ibqp.qp_type);
132                 return -EINVAL;
133         }
134
135         offset = wq->offset + ((wqe_index % wq->wqe_cnt) << wq->wqe_shift);
136         wq_end = wq->offset + (wq->wqe_cnt << wq->wqe_shift);
137
138         if (send && length < sizeof(struct mlx5_wqe_ctrl_seg))
139                 return -EINVAL;
140
141         if (offset > umem->length ||
142             (send && offset + sizeof(struct mlx5_wqe_ctrl_seg) > umem->length))
143                 return -EINVAL;
144
145         first_copy_length = min_t(u32, offset + length, wq_end) - offset;
146         ret = ib_umem_copy_from(buffer, umem, offset, first_copy_length);
147         if (ret)
148                 return ret;
149
150         if (send) {
151                 struct mlx5_wqe_ctrl_seg *ctrl = buffer;
152                 int ds = be32_to_cpu(ctrl->qpn_ds) & MLX5_WQE_CTRL_DS_MASK;
153
154                 wqe_length = ds * MLX5_WQE_DS_UNITS;
155         } else {
156                 wqe_length = 1 << wq->wqe_shift;
157         }
158
159         if (wqe_length <= first_copy_length)
160                 return first_copy_length;
161
162         ret = ib_umem_copy_from(buffer + first_copy_length, umem, wq->offset,
163                                 wqe_length - first_copy_length);
164         if (ret)
165                 return ret;
166
167         return wqe_length;
168 }
169
170 static void mlx5_ib_qp_event(struct mlx5_core_qp *qp, int type)
171 {
172         struct ib_qp *ibqp = &to_mibqp(qp)->ibqp;
173         struct ib_event event;
174
175         if (type == MLX5_EVENT_TYPE_PATH_MIG)
176                 to_mibqp(qp)->port = to_mibqp(qp)->alt_port;
177
178         if (ibqp->event_handler) {
179                 event.device     = ibqp->device;
180                 event.element.qp = ibqp;
181                 switch (type) {
182                 case MLX5_EVENT_TYPE_PATH_MIG:
183                         event.event = IB_EVENT_PATH_MIG;
184                         break;
185                 case MLX5_EVENT_TYPE_COMM_EST:
186                         event.event = IB_EVENT_COMM_EST;
187                         break;
188                 case MLX5_EVENT_TYPE_SQ_DRAINED:
189                         event.event = IB_EVENT_SQ_DRAINED;
190                         break;
191                 case MLX5_EVENT_TYPE_SRQ_LAST_WQE:
192                         event.event = IB_EVENT_QP_LAST_WQE_REACHED;
193                         break;
194                 case MLX5_EVENT_TYPE_WQ_CATAS_ERROR:
195                         event.event = IB_EVENT_QP_FATAL;
196                         break;
197                 case MLX5_EVENT_TYPE_PATH_MIG_FAILED:
198                         event.event = IB_EVENT_PATH_MIG_ERR;
199                         break;
200                 case MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR:
201                         event.event = IB_EVENT_QP_REQ_ERR;
202                         break;
203                 case MLX5_EVENT_TYPE_WQ_ACCESS_ERROR:
204                         event.event = IB_EVENT_QP_ACCESS_ERR;
205                         break;
206                 default:
207                         pr_warn("mlx5_ib: Unexpected event type %d on QP %06x\n", type, qp->qpn);
208                         return;
209                 }
210
211                 ibqp->event_handler(&event, ibqp->qp_context);
212         }
213 }
214
215 static int set_rq_size(struct mlx5_ib_dev *dev, struct ib_qp_cap *cap,
216                        int has_rq, struct mlx5_ib_qp *qp, struct mlx5_ib_create_qp *ucmd)
217 {
218         int wqe_size;
219         int wq_size;
220
221         /* Sanity check RQ size before proceeding */
222         if (cap->max_recv_wr > (1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz)))
223                 return -EINVAL;
224
225         if (!has_rq) {
226                 qp->rq.max_gs = 0;
227                 qp->rq.wqe_cnt = 0;
228                 qp->rq.wqe_shift = 0;
229         } else {
230                 if (ucmd) {
231                         qp->rq.wqe_cnt = ucmd->rq_wqe_count;
232                         qp->rq.wqe_shift = ucmd->rq_wqe_shift;
233                         qp->rq.max_gs = (1 << qp->rq.wqe_shift) / sizeof(struct mlx5_wqe_data_seg) - qp->wq_sig;
234                         qp->rq.max_post = qp->rq.wqe_cnt;
235                 } else {
236                         wqe_size = qp->wq_sig ? sizeof(struct mlx5_wqe_signature_seg) : 0;
237                         wqe_size += cap->max_recv_sge * sizeof(struct mlx5_wqe_data_seg);
238                         wqe_size = roundup_pow_of_two(wqe_size);
239                         wq_size = roundup_pow_of_two(cap->max_recv_wr) * wqe_size;
240                         wq_size = max_t(int, wq_size, MLX5_SEND_WQE_BB);
241                         qp->rq.wqe_cnt = wq_size / wqe_size;
242                         if (wqe_size > MLX5_CAP_GEN(dev->mdev, max_wqe_sz_rq)) {
243                                 mlx5_ib_dbg(dev, "wqe_size %d, max %d\n",
244                                             wqe_size,
245                                             MLX5_CAP_GEN(dev->mdev,
246                                                          max_wqe_sz_rq));
247                                 return -EINVAL;
248                         }
249                         qp->rq.wqe_shift = ilog2(wqe_size);
250                         qp->rq.max_gs = (1 << qp->rq.wqe_shift) / sizeof(struct mlx5_wqe_data_seg) - qp->wq_sig;
251                         qp->rq.max_post = qp->rq.wqe_cnt;
252                 }
253         }
254
255         return 0;
256 }
257
258 static int sq_overhead(enum ib_qp_type qp_type)
259 {
260         int size = 0;
261
262         switch (qp_type) {
263         case IB_QPT_XRC_INI:
264                 size += sizeof(struct mlx5_wqe_xrc_seg);
265                 /* fall through */
266         case IB_QPT_RC:
267                 size += sizeof(struct mlx5_wqe_ctrl_seg) +
268                         sizeof(struct mlx5_wqe_atomic_seg) +
269                         sizeof(struct mlx5_wqe_raddr_seg);
270                 break;
271
272         case IB_QPT_XRC_TGT:
273                 return 0;
274
275         case IB_QPT_UC:
276                 size += sizeof(struct mlx5_wqe_ctrl_seg) +
277                         sizeof(struct mlx5_wqe_raddr_seg) +
278                         sizeof(struct mlx5_wqe_umr_ctrl_seg) +
279                         sizeof(struct mlx5_mkey_seg);
280                 break;
281
282         case IB_QPT_UD:
283         case IB_QPT_SMI:
284         case IB_QPT_GSI:
285                 size += sizeof(struct mlx5_wqe_ctrl_seg) +
286                         sizeof(struct mlx5_wqe_datagram_seg);
287                 break;
288
289         case MLX5_IB_QPT_REG_UMR:
290                 size += sizeof(struct mlx5_wqe_ctrl_seg) +
291                         sizeof(struct mlx5_wqe_umr_ctrl_seg) +
292                         sizeof(struct mlx5_mkey_seg);
293                 break;
294
295         default:
296                 return -EINVAL;
297         }
298
299         return size;
300 }
301
302 static int calc_send_wqe(struct ib_qp_init_attr *attr)
303 {
304         int inl_size = 0;
305         int size;
306
307         size = sq_overhead(attr->qp_type);
308         if (size < 0)
309                 return size;
310
311         if (attr->cap.max_inline_data) {
312                 inl_size = size + sizeof(struct mlx5_wqe_inline_seg) +
313                         attr->cap.max_inline_data;
314         }
315
316         size += attr->cap.max_send_sge * sizeof(struct mlx5_wqe_data_seg);
317         if (attr->create_flags & IB_QP_CREATE_SIGNATURE_EN &&
318             ALIGN(max_t(int, inl_size, size), MLX5_SEND_WQE_BB) < MLX5_SIG_WQE_SIZE)
319                         return MLX5_SIG_WQE_SIZE;
320         else
321                 return ALIGN(max_t(int, inl_size, size), MLX5_SEND_WQE_BB);
322 }
323
324 static int calc_sq_size(struct mlx5_ib_dev *dev, struct ib_qp_init_attr *attr,
325                         struct mlx5_ib_qp *qp)
326 {
327         int wqe_size;
328         int wq_size;
329
330         if (!attr->cap.max_send_wr)
331                 return 0;
332
333         wqe_size = calc_send_wqe(attr);
334         mlx5_ib_dbg(dev, "wqe_size %d\n", wqe_size);
335         if (wqe_size < 0)
336                 return wqe_size;
337
338         if (wqe_size > MLX5_CAP_GEN(dev->mdev, max_wqe_sz_sq)) {
339                 mlx5_ib_dbg(dev, "wqe_size(%d) > max_sq_desc_sz(%d)\n",
340                             wqe_size, MLX5_CAP_GEN(dev->mdev, max_wqe_sz_sq));
341                 return -EINVAL;
342         }
343
344         qp->max_inline_data = wqe_size - sq_overhead(attr->qp_type) -
345                 sizeof(struct mlx5_wqe_inline_seg);
346         attr->cap.max_inline_data = qp->max_inline_data;
347
348         if (attr->create_flags & IB_QP_CREATE_SIGNATURE_EN)
349                 qp->signature_en = true;
350
351         wq_size = roundup_pow_of_two(attr->cap.max_send_wr * wqe_size);
352         qp->sq.wqe_cnt = wq_size / MLX5_SEND_WQE_BB;
353         if (qp->sq.wqe_cnt > (1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz))) {
354                 mlx5_ib_dbg(dev, "wqe count(%d) exceeds limits(%d)\n",
355                             qp->sq.wqe_cnt,
356                             1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz));
357                 return -ENOMEM;
358         }
359         qp->sq.wqe_shift = ilog2(MLX5_SEND_WQE_BB);
360         qp->sq.max_gs = attr->cap.max_send_sge;
361         qp->sq.max_post = wq_size / wqe_size;
362         attr->cap.max_send_wr = qp->sq.max_post;
363
364         return wq_size;
365 }
366
367 static int set_user_buf_size(struct mlx5_ib_dev *dev,
368                             struct mlx5_ib_qp *qp,
369                             struct mlx5_ib_create_qp *ucmd)
370 {
371         int desc_sz = 1 << qp->sq.wqe_shift;
372
373         if (desc_sz > MLX5_CAP_GEN(dev->mdev, max_wqe_sz_sq)) {
374                 mlx5_ib_warn(dev, "desc_sz %d, max_sq_desc_sz %d\n",
375                              desc_sz, MLX5_CAP_GEN(dev->mdev, max_wqe_sz_sq));
376                 return -EINVAL;
377         }
378
379         if (ucmd->sq_wqe_count && ((1 << ilog2(ucmd->sq_wqe_count)) != ucmd->sq_wqe_count)) {
380                 mlx5_ib_warn(dev, "sq_wqe_count %d, sq_wqe_count %d\n",
381                              ucmd->sq_wqe_count, ucmd->sq_wqe_count);
382                 return -EINVAL;
383         }
384
385         qp->sq.wqe_cnt = ucmd->sq_wqe_count;
386
387         if (qp->sq.wqe_cnt > (1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz))) {
388                 mlx5_ib_warn(dev, "wqe_cnt %d, max_wqes %d\n",
389                              qp->sq.wqe_cnt,
390                              1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz));
391                 return -EINVAL;
392         }
393
394         qp->buf_size = (qp->rq.wqe_cnt << qp->rq.wqe_shift) +
395                 (qp->sq.wqe_cnt << 6);
396
397         return 0;
398 }
399
400 static int qp_has_rq(struct ib_qp_init_attr *attr)
401 {
402         if (attr->qp_type == IB_QPT_XRC_INI ||
403             attr->qp_type == IB_QPT_XRC_TGT || attr->srq ||
404             attr->qp_type == MLX5_IB_QPT_REG_UMR ||
405             !attr->cap.max_recv_wr)
406                 return 0;
407
408         return 1;
409 }
410
411 static int first_med_uuar(void)
412 {
413         return 1;
414 }
415
416 static int next_uuar(int n)
417 {
418         n++;
419
420         while (((n % 4) & 2))
421                 n++;
422
423         return n;
424 }
425
426 static int num_med_uuar(struct mlx5_uuar_info *uuari)
427 {
428         int n;
429
430         n = uuari->num_uars * MLX5_NON_FP_BF_REGS_PER_PAGE -
431                 uuari->num_low_latency_uuars - 1;
432
433         return n >= 0 ? n : 0;
434 }
435
436 static int max_uuari(struct mlx5_uuar_info *uuari)
437 {
438         return uuari->num_uars * 4;
439 }
440
441 static int first_hi_uuar(struct mlx5_uuar_info *uuari)
442 {
443         int med;
444         int i;
445         int t;
446
447         med = num_med_uuar(uuari);
448         for (t = 0, i = first_med_uuar();; i = next_uuar(i)) {
449                 t++;
450                 if (t == med)
451                         return next_uuar(i);
452         }
453
454         return 0;
455 }
456
457 static int alloc_high_class_uuar(struct mlx5_uuar_info *uuari)
458 {
459         int i;
460
461         for (i = first_hi_uuar(uuari); i < max_uuari(uuari); i = next_uuar(i)) {
462                 if (!test_bit(i, uuari->bitmap)) {
463                         set_bit(i, uuari->bitmap);
464                         uuari->count[i]++;
465                         return i;
466                 }
467         }
468
469         return -ENOMEM;
470 }
471
472 static int alloc_med_class_uuar(struct mlx5_uuar_info *uuari)
473 {
474         int minidx = first_med_uuar();
475         int i;
476
477         for (i = first_med_uuar(); i < first_hi_uuar(uuari); i = next_uuar(i)) {
478                 if (uuari->count[i] < uuari->count[minidx])
479                         minidx = i;
480         }
481
482         uuari->count[minidx]++;
483         return minidx;
484 }
485
486 static int alloc_uuar(struct mlx5_uuar_info *uuari,
487                       enum mlx5_ib_latency_class lat)
488 {
489         int uuarn = -EINVAL;
490
491         mutex_lock(&uuari->lock);
492         switch (lat) {
493         case MLX5_IB_LATENCY_CLASS_LOW:
494                 uuarn = 0;
495                 uuari->count[uuarn]++;
496                 break;
497
498         case MLX5_IB_LATENCY_CLASS_MEDIUM:
499                 if (uuari->ver < 2)
500                         uuarn = -ENOMEM;
501                 else
502                         uuarn = alloc_med_class_uuar(uuari);
503                 break;
504
505         case MLX5_IB_LATENCY_CLASS_HIGH:
506                 if (uuari->ver < 2)
507                         uuarn = -ENOMEM;
508                 else
509                         uuarn = alloc_high_class_uuar(uuari);
510                 break;
511
512         case MLX5_IB_LATENCY_CLASS_FAST_PATH:
513                 uuarn = 2;
514                 break;
515         }
516         mutex_unlock(&uuari->lock);
517
518         return uuarn;
519 }
520
521 static void free_med_class_uuar(struct mlx5_uuar_info *uuari, int uuarn)
522 {
523         clear_bit(uuarn, uuari->bitmap);
524         --uuari->count[uuarn];
525 }
526
527 static void free_high_class_uuar(struct mlx5_uuar_info *uuari, int uuarn)
528 {
529         clear_bit(uuarn, uuari->bitmap);
530         --uuari->count[uuarn];
531 }
532
533 static void free_uuar(struct mlx5_uuar_info *uuari, int uuarn)
534 {
535         int nuuars = uuari->num_uars * MLX5_BF_REGS_PER_PAGE;
536         int high_uuar = nuuars - uuari->num_low_latency_uuars;
537
538         mutex_lock(&uuari->lock);
539         if (uuarn == 0) {
540                 --uuari->count[uuarn];
541                 goto out;
542         }
543
544         if (uuarn < high_uuar) {
545                 free_med_class_uuar(uuari, uuarn);
546                 goto out;
547         }
548
549         free_high_class_uuar(uuari, uuarn);
550
551 out:
552         mutex_unlock(&uuari->lock);
553 }
554
555 static enum mlx5_qp_state to_mlx5_state(enum ib_qp_state state)
556 {
557         switch (state) {
558         case IB_QPS_RESET:      return MLX5_QP_STATE_RST;
559         case IB_QPS_INIT:       return MLX5_QP_STATE_INIT;
560         case IB_QPS_RTR:        return MLX5_QP_STATE_RTR;
561         case IB_QPS_RTS:        return MLX5_QP_STATE_RTS;
562         case IB_QPS_SQD:        return MLX5_QP_STATE_SQD;
563         case IB_QPS_SQE:        return MLX5_QP_STATE_SQER;
564         case IB_QPS_ERR:        return MLX5_QP_STATE_ERR;
565         default:                return -1;
566         }
567 }
568
569 static int to_mlx5_st(enum ib_qp_type type)
570 {
571         switch (type) {
572         case IB_QPT_RC:                 return MLX5_QP_ST_RC;
573         case IB_QPT_UC:                 return MLX5_QP_ST_UC;
574         case IB_QPT_UD:                 return MLX5_QP_ST_UD;
575         case MLX5_IB_QPT_REG_UMR:       return MLX5_QP_ST_REG_UMR;
576         case IB_QPT_XRC_INI:
577         case IB_QPT_XRC_TGT:            return MLX5_QP_ST_XRC;
578         case IB_QPT_SMI:                return MLX5_QP_ST_QP0;
579         case IB_QPT_GSI:                return MLX5_QP_ST_QP1;
580         case IB_QPT_RAW_IPV6:           return MLX5_QP_ST_RAW_IPV6;
581         case IB_QPT_RAW_ETHERTYPE:      return MLX5_QP_ST_RAW_ETHERTYPE;
582         case IB_QPT_RAW_PACKET:
583         case IB_QPT_MAX:
584         default:                return -EINVAL;
585         }
586 }
587
588 static int uuarn_to_uar_index(struct mlx5_uuar_info *uuari, int uuarn)
589 {
590         return uuari->uars[uuarn / MLX5_BF_REGS_PER_PAGE].index;
591 }
592
593 static int create_user_qp(struct mlx5_ib_dev *dev, struct ib_pd *pd,
594                           struct mlx5_ib_qp *qp, struct ib_udata *udata,
595                           struct mlx5_create_qp_mbox_in **in,
596                           struct mlx5_ib_create_qp_resp *resp, int *inlen)
597 {
598         struct mlx5_ib_ucontext *context;
599         struct mlx5_ib_create_qp ucmd;
600         int page_shift = 0;
601         int uar_index;
602         int npages;
603         u32 offset = 0;
604         int uuarn;
605         int ncont = 0;
606         int err;
607
608         err = ib_copy_from_udata(&ucmd, udata, sizeof(ucmd));
609         if (err) {
610                 mlx5_ib_dbg(dev, "copy failed\n");
611                 return err;
612         }
613
614         context = to_mucontext(pd->uobject->context);
615         /*
616          * TBD: should come from the verbs when we have the API
617          */
618         uuarn = alloc_uuar(&context->uuari, MLX5_IB_LATENCY_CLASS_HIGH);
619         if (uuarn < 0) {
620                 mlx5_ib_dbg(dev, "failed to allocate low latency UUAR\n");
621                 mlx5_ib_dbg(dev, "reverting to medium latency\n");
622                 uuarn = alloc_uuar(&context->uuari, MLX5_IB_LATENCY_CLASS_MEDIUM);
623                 if (uuarn < 0) {
624                         mlx5_ib_dbg(dev, "failed to allocate medium latency UUAR\n");
625                         mlx5_ib_dbg(dev, "reverting to high latency\n");
626                         uuarn = alloc_uuar(&context->uuari, MLX5_IB_LATENCY_CLASS_LOW);
627                         if (uuarn < 0) {
628                                 mlx5_ib_warn(dev, "uuar allocation failed\n");
629                                 return uuarn;
630                         }
631                 }
632         }
633
634         uar_index = uuarn_to_uar_index(&context->uuari, uuarn);
635         mlx5_ib_dbg(dev, "uuarn 0x%x, uar_index 0x%x\n", uuarn, uar_index);
636
637         qp->rq.offset = 0;
638         qp->sq.wqe_shift = ilog2(MLX5_SEND_WQE_BB);
639         qp->sq.offset = qp->rq.wqe_cnt << qp->rq.wqe_shift;
640
641         err = set_user_buf_size(dev, qp, &ucmd);
642         if (err)
643                 goto err_uuar;
644
645         if (ucmd.buf_addr && qp->buf_size) {
646                 qp->umem = ib_umem_get(pd->uobject->context, ucmd.buf_addr,
647                                        qp->buf_size, 0, 0);
648                 if (IS_ERR(qp->umem)) {
649                         mlx5_ib_dbg(dev, "umem_get failed\n");
650                         err = PTR_ERR(qp->umem);
651                         goto err_uuar;
652                 }
653         } else {
654                 qp->umem = NULL;
655         }
656
657         if (qp->umem) {
658                 mlx5_ib_cont_pages(qp->umem, ucmd.buf_addr, &npages, &page_shift,
659                                    &ncont, NULL);
660                 err = mlx5_ib_get_buf_offset(ucmd.buf_addr, page_shift, &offset);
661                 if (err) {
662                         mlx5_ib_warn(dev, "bad offset\n");
663                         goto err_umem;
664                 }
665                 mlx5_ib_dbg(dev, "addr 0x%llx, size %d, npages %d, page_shift %d, ncont %d, offset %d\n",
666                             ucmd.buf_addr, qp->buf_size, npages, page_shift, ncont, offset);
667         }
668
669         *inlen = sizeof(**in) + sizeof(*(*in)->pas) * ncont;
670         *in = mlx5_vzalloc(*inlen);
671         if (!*in) {
672                 err = -ENOMEM;
673                 goto err_umem;
674         }
675         if (qp->umem)
676                 mlx5_ib_populate_pas(dev, qp->umem, page_shift, (*in)->pas, 0);
677         (*in)->ctx.log_pg_sz_remote_qpn =
678                 cpu_to_be32((page_shift - MLX5_ADAPTER_PAGE_SHIFT) << 24);
679         (*in)->ctx.params2 = cpu_to_be32(offset << 6);
680
681         (*in)->ctx.qp_counter_set_usr_page = cpu_to_be32(uar_index);
682         resp->uuar_index = uuarn;
683         qp->uuarn = uuarn;
684
685         err = mlx5_ib_db_map_user(context, ucmd.db_addr, &qp->db);
686         if (err) {
687                 mlx5_ib_dbg(dev, "map failed\n");
688                 goto err_free;
689         }
690
691         err = ib_copy_to_udata(udata, resp, sizeof(*resp));
692         if (err) {
693                 mlx5_ib_dbg(dev, "copy failed\n");
694                 goto err_unmap;
695         }
696         qp->create_type = MLX5_QP_USER;
697
698         return 0;
699
700 err_unmap:
701         mlx5_ib_db_unmap_user(context, &qp->db);
702
703 err_free:
704         kvfree(*in);
705
706 err_umem:
707         if (qp->umem)
708                 ib_umem_release(qp->umem);
709
710 err_uuar:
711         free_uuar(&context->uuari, uuarn);
712         return err;
713 }
714
715 static void destroy_qp_user(struct ib_pd *pd, struct mlx5_ib_qp *qp)
716 {
717         struct mlx5_ib_ucontext *context;
718
719         context = to_mucontext(pd->uobject->context);
720         mlx5_ib_db_unmap_user(context, &qp->db);
721         if (qp->umem)
722                 ib_umem_release(qp->umem);
723         free_uuar(&context->uuari, qp->uuarn);
724 }
725
726 static int create_kernel_qp(struct mlx5_ib_dev *dev,
727                             struct ib_qp_init_attr *init_attr,
728                             struct mlx5_ib_qp *qp,
729                             struct mlx5_create_qp_mbox_in **in, int *inlen)
730 {
731         enum mlx5_ib_latency_class lc = MLX5_IB_LATENCY_CLASS_LOW;
732         struct mlx5_uuar_info *uuari;
733         int uar_index;
734         int uuarn;
735         int err;
736
737         uuari = &dev->mdev->priv.uuari;
738         if (init_attr->create_flags & ~(IB_QP_CREATE_SIGNATURE_EN | IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK))
739                 return -EINVAL;
740
741         if (init_attr->qp_type == MLX5_IB_QPT_REG_UMR)
742                 lc = MLX5_IB_LATENCY_CLASS_FAST_PATH;
743
744         uuarn = alloc_uuar(uuari, lc);
745         if (uuarn < 0) {
746                 mlx5_ib_dbg(dev, "\n");
747                 return -ENOMEM;
748         }
749
750         qp->bf = &uuari->bfs[uuarn];
751         uar_index = qp->bf->uar->index;
752
753         err = calc_sq_size(dev, init_attr, qp);
754         if (err < 0) {
755                 mlx5_ib_dbg(dev, "err %d\n", err);
756                 goto err_uuar;
757         }
758
759         qp->rq.offset = 0;
760         qp->sq.offset = qp->rq.wqe_cnt << qp->rq.wqe_shift;
761         qp->buf_size = err + (qp->rq.wqe_cnt << qp->rq.wqe_shift);
762
763         err = mlx5_buf_alloc(dev->mdev, qp->buf_size, &qp->buf);
764         if (err) {
765                 mlx5_ib_dbg(dev, "err %d\n", err);
766                 goto err_uuar;
767         }
768
769         qp->sq.qend = mlx5_get_send_wqe(qp, qp->sq.wqe_cnt);
770         *inlen = sizeof(**in) + sizeof(*(*in)->pas) * qp->buf.npages;
771         *in = mlx5_vzalloc(*inlen);
772         if (!*in) {
773                 err = -ENOMEM;
774                 goto err_buf;
775         }
776         (*in)->ctx.qp_counter_set_usr_page = cpu_to_be32(uar_index);
777         (*in)->ctx.log_pg_sz_remote_qpn =
778                 cpu_to_be32((qp->buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT) << 24);
779         /* Set "fast registration enabled" for all kernel QPs */
780         (*in)->ctx.params1 |= cpu_to_be32(1 << 11);
781         (*in)->ctx.sq_crq_size |= cpu_to_be16(1 << 4);
782
783         mlx5_fill_page_array(&qp->buf, (*in)->pas);
784
785         err = mlx5_db_alloc(dev->mdev, &qp->db);
786         if (err) {
787                 mlx5_ib_dbg(dev, "err %d\n", err);
788                 goto err_free;
789         }
790
791         qp->sq.wrid = kmalloc(qp->sq.wqe_cnt * sizeof(*qp->sq.wrid), GFP_KERNEL);
792         qp->sq.wr_data = kmalloc(qp->sq.wqe_cnt * sizeof(*qp->sq.wr_data), GFP_KERNEL);
793         qp->rq.wrid = kmalloc(qp->rq.wqe_cnt * sizeof(*qp->rq.wrid), GFP_KERNEL);
794         qp->sq.w_list = kmalloc(qp->sq.wqe_cnt * sizeof(*qp->sq.w_list), GFP_KERNEL);
795         qp->sq.wqe_head = kmalloc(qp->sq.wqe_cnt * sizeof(*qp->sq.wqe_head), GFP_KERNEL);
796
797         if (!qp->sq.wrid || !qp->sq.wr_data || !qp->rq.wrid ||
798             !qp->sq.w_list || !qp->sq.wqe_head) {
799                 err = -ENOMEM;
800                 goto err_wrid;
801         }
802         qp->create_type = MLX5_QP_KERNEL;
803
804         return 0;
805
806 err_wrid:
807         mlx5_db_free(dev->mdev, &qp->db);
808         kfree(qp->sq.wqe_head);
809         kfree(qp->sq.w_list);
810         kfree(qp->sq.wrid);
811         kfree(qp->sq.wr_data);
812         kfree(qp->rq.wrid);
813
814 err_free:
815         kvfree(*in);
816
817 err_buf:
818         mlx5_buf_free(dev->mdev, &qp->buf);
819
820 err_uuar:
821         free_uuar(&dev->mdev->priv.uuari, uuarn);
822         return err;
823 }
824
825 static void destroy_qp_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp)
826 {
827         mlx5_db_free(dev->mdev, &qp->db);
828         kfree(qp->sq.wqe_head);
829         kfree(qp->sq.w_list);
830         kfree(qp->sq.wrid);
831         kfree(qp->sq.wr_data);
832         kfree(qp->rq.wrid);
833         mlx5_buf_free(dev->mdev, &qp->buf);
834         free_uuar(&dev->mdev->priv.uuari, qp->bf->uuarn);
835 }
836
837 static __be32 get_rx_type(struct mlx5_ib_qp *qp, struct ib_qp_init_attr *attr)
838 {
839         if (attr->srq || (attr->qp_type == IB_QPT_XRC_TGT) ||
840             (attr->qp_type == IB_QPT_XRC_INI))
841                 return cpu_to_be32(MLX5_SRQ_RQ);
842         else if (!qp->has_rq)
843                 return cpu_to_be32(MLX5_ZERO_LEN_RQ);
844         else
845                 return cpu_to_be32(MLX5_NON_ZERO_RQ);
846 }
847
848 static int is_connected(enum ib_qp_type qp_type)
849 {
850         if (qp_type == IB_QPT_RC || qp_type == IB_QPT_UC)
851                 return 1;
852
853         return 0;
854 }
855
856 static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd,
857                             struct ib_qp_init_attr *init_attr,
858                             struct ib_udata *udata, struct mlx5_ib_qp *qp)
859 {
860         struct mlx5_ib_resources *devr = &dev->devr;
861         struct mlx5_core_dev *mdev = dev->mdev;
862         struct mlx5_ib_create_qp_resp resp;
863         struct mlx5_create_qp_mbox_in *in;
864         struct mlx5_ib_create_qp ucmd;
865         int inlen = sizeof(*in);
866         int err;
867
868         mlx5_ib_odp_create_qp(qp);
869
870         mutex_init(&qp->mutex);
871         spin_lock_init(&qp->sq.lock);
872         spin_lock_init(&qp->rq.lock);
873
874         if (init_attr->create_flags & IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK) {
875                 if (!MLX5_CAP_GEN(mdev, block_lb_mc)) {
876                         mlx5_ib_dbg(dev, "block multicast loopback isn't supported\n");
877                         return -EINVAL;
878                 } else {
879                         qp->flags |= MLX5_IB_QP_BLOCK_MULTICAST_LOOPBACK;
880                 }
881         }
882
883         if (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR)
884                 qp->sq_signal_bits = MLX5_WQE_CTRL_CQ_UPDATE;
885
886         if (pd && pd->uobject) {
887                 if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd))) {
888                         mlx5_ib_dbg(dev, "copy failed\n");
889                         return -EFAULT;
890                 }
891
892                 qp->wq_sig = !!(ucmd.flags & MLX5_QP_FLAG_SIGNATURE);
893                 qp->scat_cqe = !!(ucmd.flags & MLX5_QP_FLAG_SCATTER_CQE);
894         } else {
895                 qp->wq_sig = !!wq_signature;
896         }
897
898         qp->has_rq = qp_has_rq(init_attr);
899         err = set_rq_size(dev, &init_attr->cap, qp->has_rq,
900                           qp, (pd && pd->uobject) ? &ucmd : NULL);
901         if (err) {
902                 mlx5_ib_dbg(dev, "err %d\n", err);
903                 return err;
904         }
905
906         if (pd) {
907                 if (pd->uobject) {
908                         __u32 max_wqes =
909                                 1 << MLX5_CAP_GEN(mdev, log_max_qp_sz);
910                         mlx5_ib_dbg(dev, "requested sq_wqe_count (%d)\n", ucmd.sq_wqe_count);
911                         if (ucmd.rq_wqe_shift != qp->rq.wqe_shift ||
912                             ucmd.rq_wqe_count != qp->rq.wqe_cnt) {
913                                 mlx5_ib_dbg(dev, "invalid rq params\n");
914                                 return -EINVAL;
915                         }
916                         if (ucmd.sq_wqe_count > max_wqes) {
917                                 mlx5_ib_dbg(dev, "requested sq_wqe_count (%d) > max allowed (%d)\n",
918                                             ucmd.sq_wqe_count, max_wqes);
919                                 return -EINVAL;
920                         }
921                         err = create_user_qp(dev, pd, qp, udata, &in, &resp, &inlen);
922                         if (err)
923                                 mlx5_ib_dbg(dev, "err %d\n", err);
924                 } else {
925                         err = create_kernel_qp(dev, init_attr, qp, &in, &inlen);
926                         if (err)
927                                 mlx5_ib_dbg(dev, "err %d\n", err);
928                 }
929
930                 if (err)
931                         return err;
932         } else {
933                 in = mlx5_vzalloc(sizeof(*in));
934                 if (!in)
935                         return -ENOMEM;
936
937                 qp->create_type = MLX5_QP_EMPTY;
938         }
939
940         if (is_sqp(init_attr->qp_type))
941                 qp->port = init_attr->port_num;
942
943         in->ctx.flags = cpu_to_be32(to_mlx5_st(init_attr->qp_type) << 16 |
944                                     MLX5_QP_PM_MIGRATED << 11);
945
946         if (init_attr->qp_type != MLX5_IB_QPT_REG_UMR)
947                 in->ctx.flags_pd = cpu_to_be32(to_mpd(pd ? pd : devr->p0)->pdn);
948         else
949                 in->ctx.flags_pd = cpu_to_be32(MLX5_QP_LAT_SENSITIVE);
950
951         if (qp->wq_sig)
952                 in->ctx.flags_pd |= cpu_to_be32(MLX5_QP_ENABLE_SIG);
953
954         if (qp->flags & MLX5_IB_QP_BLOCK_MULTICAST_LOOPBACK)
955                 in->ctx.flags_pd |= cpu_to_be32(MLX5_QP_BLOCK_MCAST);
956
957         if (qp->scat_cqe && is_connected(init_attr->qp_type)) {
958                 int rcqe_sz;
959                 int scqe_sz;
960
961                 rcqe_sz = mlx5_ib_get_cqe_size(dev, init_attr->recv_cq);
962                 scqe_sz = mlx5_ib_get_cqe_size(dev, init_attr->send_cq);
963
964                 if (rcqe_sz == 128)
965                         in->ctx.cs_res = MLX5_RES_SCAT_DATA64_CQE;
966                 else
967                         in->ctx.cs_res = MLX5_RES_SCAT_DATA32_CQE;
968
969                 if (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR) {
970                         if (scqe_sz == 128)
971                                 in->ctx.cs_req = MLX5_REQ_SCAT_DATA64_CQE;
972                         else
973                                 in->ctx.cs_req = MLX5_REQ_SCAT_DATA32_CQE;
974                 }
975         }
976
977         if (qp->rq.wqe_cnt) {
978                 in->ctx.rq_size_stride = (qp->rq.wqe_shift - 4);
979                 in->ctx.rq_size_stride |= ilog2(qp->rq.wqe_cnt) << 3;
980         }
981
982         in->ctx.rq_type_srqn = get_rx_type(qp, init_attr);
983
984         if (qp->sq.wqe_cnt)
985                 in->ctx.sq_crq_size |= cpu_to_be16(ilog2(qp->sq.wqe_cnt) << 11);
986         else
987                 in->ctx.sq_crq_size |= cpu_to_be16(0x8000);
988
989         /* Set default resources */
990         switch (init_attr->qp_type) {
991         case IB_QPT_XRC_TGT:
992                 in->ctx.cqn_recv = cpu_to_be32(to_mcq(devr->c0)->mcq.cqn);
993                 in->ctx.cqn_send = cpu_to_be32(to_mcq(devr->c0)->mcq.cqn);
994                 in->ctx.rq_type_srqn |= cpu_to_be32(to_msrq(devr->s0)->msrq.srqn);
995                 in->ctx.xrcd = cpu_to_be32(to_mxrcd(init_attr->xrcd)->xrcdn);
996                 break;
997         case IB_QPT_XRC_INI:
998                 in->ctx.cqn_recv = cpu_to_be32(to_mcq(devr->c0)->mcq.cqn);
999                 in->ctx.xrcd = cpu_to_be32(to_mxrcd(devr->x1)->xrcdn);
1000                 in->ctx.rq_type_srqn |= cpu_to_be32(to_msrq(devr->s0)->msrq.srqn);
1001                 break;
1002         default:
1003                 if (init_attr->srq) {
1004                         in->ctx.xrcd = cpu_to_be32(to_mxrcd(devr->x0)->xrcdn);
1005                         in->ctx.rq_type_srqn |= cpu_to_be32(to_msrq(init_attr->srq)->msrq.srqn);
1006                 } else {
1007                         in->ctx.xrcd = cpu_to_be32(to_mxrcd(devr->x1)->xrcdn);
1008                         in->ctx.rq_type_srqn |=
1009                                 cpu_to_be32(to_msrq(devr->s1)->msrq.srqn);
1010                 }
1011         }
1012
1013         if (init_attr->send_cq)
1014                 in->ctx.cqn_send = cpu_to_be32(to_mcq(init_attr->send_cq)->mcq.cqn);
1015
1016         if (init_attr->recv_cq)
1017                 in->ctx.cqn_recv = cpu_to_be32(to_mcq(init_attr->recv_cq)->mcq.cqn);
1018
1019         in->ctx.db_rec_addr = cpu_to_be64(qp->db.dma);
1020
1021         err = mlx5_core_create_qp(dev->mdev, &qp->mqp, in, inlen);
1022         if (err) {
1023                 mlx5_ib_dbg(dev, "create qp failed\n");
1024                 goto err_create;
1025         }
1026
1027         kvfree(in);
1028         /* Hardware wants QPN written in big-endian order (after
1029          * shifting) for send doorbell.  Precompute this value to save
1030          * a little bit when posting sends.
1031          */
1032         qp->doorbell_qpn = swab32(qp->mqp.qpn << 8);
1033
1034         qp->mqp.event = mlx5_ib_qp_event;
1035
1036         return 0;
1037
1038 err_create:
1039         if (qp->create_type == MLX5_QP_USER)
1040                 destroy_qp_user(pd, qp);
1041         else if (qp->create_type == MLX5_QP_KERNEL)
1042                 destroy_qp_kernel(dev, qp);
1043
1044         kvfree(in);
1045         return err;
1046 }
1047
1048 static void mlx5_ib_lock_cqs(struct mlx5_ib_cq *send_cq, struct mlx5_ib_cq *recv_cq)
1049         __acquires(&send_cq->lock) __acquires(&recv_cq->lock)
1050 {
1051         if (send_cq) {
1052                 if (recv_cq) {
1053                         if (send_cq->mcq.cqn < recv_cq->mcq.cqn)  {
1054                                 spin_lock_irq(&send_cq->lock);
1055                                 spin_lock_nested(&recv_cq->lock,
1056                                                  SINGLE_DEPTH_NESTING);
1057                         } else if (send_cq->mcq.cqn == recv_cq->mcq.cqn) {
1058                                 spin_lock_irq(&send_cq->lock);
1059                                 __acquire(&recv_cq->lock);
1060                         } else {
1061                                 spin_lock_irq(&recv_cq->lock);
1062                                 spin_lock_nested(&send_cq->lock,
1063                                                  SINGLE_DEPTH_NESTING);
1064                         }
1065                 } else {
1066                         spin_lock_irq(&send_cq->lock);
1067                         __acquire(&recv_cq->lock);
1068                 }
1069         } else if (recv_cq) {
1070                 spin_lock_irq(&recv_cq->lock);
1071                 __acquire(&send_cq->lock);
1072         } else {
1073                 __acquire(&send_cq->lock);
1074                 __acquire(&recv_cq->lock);
1075         }
1076 }
1077
1078 static void mlx5_ib_unlock_cqs(struct mlx5_ib_cq *send_cq, struct mlx5_ib_cq *recv_cq)
1079         __releases(&send_cq->lock) __releases(&recv_cq->lock)
1080 {
1081         if (send_cq) {
1082                 if (recv_cq) {
1083                         if (send_cq->mcq.cqn < recv_cq->mcq.cqn)  {
1084                                 spin_unlock(&recv_cq->lock);
1085                                 spin_unlock_irq(&send_cq->lock);
1086                         } else if (send_cq->mcq.cqn == recv_cq->mcq.cqn) {
1087                                 __release(&recv_cq->lock);
1088                                 spin_unlock_irq(&send_cq->lock);
1089                         } else {
1090                                 spin_unlock(&send_cq->lock);
1091                                 spin_unlock_irq(&recv_cq->lock);
1092                         }
1093                 } else {
1094                         __release(&recv_cq->lock);
1095                         spin_unlock_irq(&send_cq->lock);
1096                 }
1097         } else if (recv_cq) {
1098                 __release(&send_cq->lock);
1099                 spin_unlock_irq(&recv_cq->lock);
1100         } else {
1101                 __release(&recv_cq->lock);
1102                 __release(&send_cq->lock);
1103         }
1104 }
1105
1106 static struct mlx5_ib_pd *get_pd(struct mlx5_ib_qp *qp)
1107 {
1108         return to_mpd(qp->ibqp.pd);
1109 }
1110
1111 static void get_cqs(struct mlx5_ib_qp *qp,
1112                     struct mlx5_ib_cq **send_cq, struct mlx5_ib_cq **recv_cq)
1113 {
1114         switch (qp->ibqp.qp_type) {
1115         case IB_QPT_XRC_TGT:
1116                 *send_cq = NULL;
1117                 *recv_cq = NULL;
1118                 break;
1119         case MLX5_IB_QPT_REG_UMR:
1120         case IB_QPT_XRC_INI:
1121                 *send_cq = to_mcq(qp->ibqp.send_cq);
1122                 *recv_cq = NULL;
1123                 break;
1124
1125         case IB_QPT_SMI:
1126         case IB_QPT_GSI:
1127         case IB_QPT_RC:
1128         case IB_QPT_UC:
1129         case IB_QPT_UD:
1130         case IB_QPT_RAW_IPV6:
1131         case IB_QPT_RAW_ETHERTYPE:
1132                 *send_cq = to_mcq(qp->ibqp.send_cq);
1133                 *recv_cq = to_mcq(qp->ibqp.recv_cq);
1134                 break;
1135
1136         case IB_QPT_RAW_PACKET:
1137         case IB_QPT_MAX:
1138         default:
1139                 *send_cq = NULL;
1140                 *recv_cq = NULL;
1141                 break;
1142         }
1143 }
1144
1145 static void destroy_qp_common(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp)
1146 {
1147         struct mlx5_ib_cq *send_cq, *recv_cq;
1148         struct mlx5_modify_qp_mbox_in *in;
1149         int err;
1150
1151         in = kzalloc(sizeof(*in), GFP_KERNEL);
1152         if (!in)
1153                 return;
1154
1155         if (qp->state != IB_QPS_RESET) {
1156                 mlx5_ib_qp_disable_pagefaults(qp);
1157                 if (mlx5_core_qp_modify(dev->mdev, to_mlx5_state(qp->state),
1158                                         MLX5_QP_STATE_RST, in, 0, &qp->mqp))
1159                         mlx5_ib_warn(dev, "mlx5_ib: modify QP %06x to RESET failed\n",
1160                                      qp->mqp.qpn);
1161         }
1162
1163         get_cqs(qp, &send_cq, &recv_cq);
1164
1165         if (qp->create_type == MLX5_QP_KERNEL) {
1166                 mlx5_ib_lock_cqs(send_cq, recv_cq);
1167                 __mlx5_ib_cq_clean(recv_cq, qp->mqp.qpn,
1168                                    qp->ibqp.srq ? to_msrq(qp->ibqp.srq) : NULL);
1169                 if (send_cq != recv_cq)
1170                         __mlx5_ib_cq_clean(send_cq, qp->mqp.qpn, NULL);
1171                 mlx5_ib_unlock_cqs(send_cq, recv_cq);
1172         }
1173
1174         err = mlx5_core_destroy_qp(dev->mdev, &qp->mqp);
1175         if (err)
1176                 mlx5_ib_warn(dev, "failed to destroy QP 0x%x\n", qp->mqp.qpn);
1177         kfree(in);
1178
1179
1180         if (qp->create_type == MLX5_QP_KERNEL)
1181                 destroy_qp_kernel(dev, qp);
1182         else if (qp->create_type == MLX5_QP_USER)
1183                 destroy_qp_user(&get_pd(qp)->ibpd, qp);
1184 }
1185
1186 static const char *ib_qp_type_str(enum ib_qp_type type)
1187 {
1188         switch (type) {
1189         case IB_QPT_SMI:
1190                 return "IB_QPT_SMI";
1191         case IB_QPT_GSI:
1192                 return "IB_QPT_GSI";
1193         case IB_QPT_RC:
1194                 return "IB_QPT_RC";
1195         case IB_QPT_UC:
1196                 return "IB_QPT_UC";
1197         case IB_QPT_UD:
1198                 return "IB_QPT_UD";
1199         case IB_QPT_RAW_IPV6:
1200                 return "IB_QPT_RAW_IPV6";
1201         case IB_QPT_RAW_ETHERTYPE:
1202                 return "IB_QPT_RAW_ETHERTYPE";
1203         case IB_QPT_XRC_INI:
1204                 return "IB_QPT_XRC_INI";
1205         case IB_QPT_XRC_TGT:
1206                 return "IB_QPT_XRC_TGT";
1207         case IB_QPT_RAW_PACKET:
1208                 return "IB_QPT_RAW_PACKET";
1209         case MLX5_IB_QPT_REG_UMR:
1210                 return "MLX5_IB_QPT_REG_UMR";
1211         case IB_QPT_MAX:
1212         default:
1213                 return "Invalid QP type";
1214         }
1215 }
1216
1217 struct ib_qp *mlx5_ib_create_qp(struct ib_pd *pd,
1218                                 struct ib_qp_init_attr *init_attr,
1219                                 struct ib_udata *udata)
1220 {
1221         struct mlx5_ib_dev *dev;
1222         struct mlx5_ib_qp *qp;
1223         u16 xrcdn = 0;
1224         int err;
1225
1226         if (pd) {
1227                 dev = to_mdev(pd->device);
1228         } else {
1229                 /* being cautious here */
1230                 if (init_attr->qp_type != IB_QPT_XRC_TGT &&
1231                     init_attr->qp_type != MLX5_IB_QPT_REG_UMR) {
1232                         pr_warn("%s: no PD for transport %s\n", __func__,
1233                                 ib_qp_type_str(init_attr->qp_type));
1234                         return ERR_PTR(-EINVAL);
1235                 }
1236                 dev = to_mdev(to_mxrcd(init_attr->xrcd)->ibxrcd.device);
1237         }
1238
1239         switch (init_attr->qp_type) {
1240         case IB_QPT_XRC_TGT:
1241         case IB_QPT_XRC_INI:
1242                 if (!MLX5_CAP_GEN(dev->mdev, xrc)) {
1243                         mlx5_ib_dbg(dev, "XRC not supported\n");
1244                         return ERR_PTR(-ENOSYS);
1245                 }
1246                 init_attr->recv_cq = NULL;
1247                 if (init_attr->qp_type == IB_QPT_XRC_TGT) {
1248                         xrcdn = to_mxrcd(init_attr->xrcd)->xrcdn;
1249                         init_attr->send_cq = NULL;
1250                 }
1251
1252                 /* fall through */
1253         case IB_QPT_RC:
1254         case IB_QPT_UC:
1255         case IB_QPT_UD:
1256         case IB_QPT_SMI:
1257         case IB_QPT_GSI:
1258         case MLX5_IB_QPT_REG_UMR:
1259                 qp = kzalloc(sizeof(*qp), GFP_KERNEL);
1260                 if (!qp)
1261                         return ERR_PTR(-ENOMEM);
1262
1263                 err = create_qp_common(dev, pd, init_attr, udata, qp);
1264                 if (err) {
1265                         mlx5_ib_dbg(dev, "create_qp_common failed\n");
1266                         kfree(qp);
1267                         return ERR_PTR(err);
1268                 }
1269
1270                 if (is_qp0(init_attr->qp_type))
1271                         qp->ibqp.qp_num = 0;
1272                 else if (is_qp1(init_attr->qp_type))
1273                         qp->ibqp.qp_num = 1;
1274                 else
1275                         qp->ibqp.qp_num = qp->mqp.qpn;
1276
1277                 mlx5_ib_dbg(dev, "ib qpnum 0x%x, mlx qpn 0x%x, rcqn 0x%x, scqn 0x%x\n",
1278                             qp->ibqp.qp_num, qp->mqp.qpn, to_mcq(init_attr->recv_cq)->mcq.cqn,
1279                             to_mcq(init_attr->send_cq)->mcq.cqn);
1280
1281                 qp->xrcdn = xrcdn;
1282
1283                 break;
1284
1285         case IB_QPT_RAW_IPV6:
1286         case IB_QPT_RAW_ETHERTYPE:
1287         case IB_QPT_RAW_PACKET:
1288         case IB_QPT_MAX:
1289         default:
1290                 mlx5_ib_dbg(dev, "unsupported qp type %d\n",
1291                             init_attr->qp_type);
1292                 /* Don't support raw QPs */
1293                 return ERR_PTR(-EINVAL);
1294         }
1295
1296         return &qp->ibqp;
1297 }
1298
1299 int mlx5_ib_destroy_qp(struct ib_qp *qp)
1300 {
1301         struct mlx5_ib_dev *dev = to_mdev(qp->device);
1302         struct mlx5_ib_qp *mqp = to_mqp(qp);
1303
1304         destroy_qp_common(dev, mqp);
1305
1306         kfree(mqp);
1307
1308         return 0;
1309 }
1310
1311 static __be32 to_mlx5_access_flags(struct mlx5_ib_qp *qp, const struct ib_qp_attr *attr,
1312                                    int attr_mask)
1313 {
1314         u32 hw_access_flags = 0;
1315         u8 dest_rd_atomic;
1316         u32 access_flags;
1317
1318         if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
1319                 dest_rd_atomic = attr->max_dest_rd_atomic;
1320         else
1321                 dest_rd_atomic = qp->resp_depth;
1322
1323         if (attr_mask & IB_QP_ACCESS_FLAGS)
1324                 access_flags = attr->qp_access_flags;
1325         else
1326                 access_flags = qp->atomic_rd_en;
1327
1328         if (!dest_rd_atomic)
1329                 access_flags &= IB_ACCESS_REMOTE_WRITE;
1330
1331         if (access_flags & IB_ACCESS_REMOTE_READ)
1332                 hw_access_flags |= MLX5_QP_BIT_RRE;
1333         if (access_flags & IB_ACCESS_REMOTE_ATOMIC)
1334                 hw_access_flags |= (MLX5_QP_BIT_RAE | MLX5_ATOMIC_MODE_CX);
1335         if (access_flags & IB_ACCESS_REMOTE_WRITE)
1336                 hw_access_flags |= MLX5_QP_BIT_RWE;
1337
1338         return cpu_to_be32(hw_access_flags);
1339 }
1340
1341 enum {
1342         MLX5_PATH_FLAG_FL       = 1 << 0,
1343         MLX5_PATH_FLAG_FREE_AR  = 1 << 1,
1344         MLX5_PATH_FLAG_COUNTER  = 1 << 2,
1345 };
1346
1347 static int ib_rate_to_mlx5(struct mlx5_ib_dev *dev, u8 rate)
1348 {
1349         if (rate == IB_RATE_PORT_CURRENT) {
1350                 return 0;
1351         } else if (rate < IB_RATE_2_5_GBPS || rate > IB_RATE_300_GBPS) {
1352                 return -EINVAL;
1353         } else {
1354                 while (rate != IB_RATE_2_5_GBPS &&
1355                        !(1 << (rate + MLX5_STAT_RATE_OFFSET) &
1356                          MLX5_CAP_GEN(dev->mdev, stat_rate_support)))
1357                         --rate;
1358         }
1359
1360         return rate + MLX5_STAT_RATE_OFFSET;
1361 }
1362
1363 static int mlx5_set_path(struct mlx5_ib_dev *dev, const struct ib_ah_attr *ah,
1364                          struct mlx5_qp_path *path, u8 port, int attr_mask,
1365                          u32 path_flags, const struct ib_qp_attr *attr)
1366 {
1367         int err;
1368
1369         path->fl = (path_flags & MLX5_PATH_FLAG_FL) ? 0x80 : 0;
1370         path->free_ar = (path_flags & MLX5_PATH_FLAG_FREE_AR) ? 0x80 : 0;
1371
1372         if (attr_mask & IB_QP_PKEY_INDEX)
1373                 path->pkey_index = attr->pkey_index;
1374
1375         path->grh_mlid  = ah->src_path_bits & 0x7f;
1376         path->rlid      = cpu_to_be16(ah->dlid);
1377
1378         if (ah->ah_flags & IB_AH_GRH) {
1379                 if (ah->grh.sgid_index >=
1380                     dev->mdev->port_caps[port - 1].gid_table_len) {
1381                         pr_err("sgid_index (%u) too large. max is %d\n",
1382                                ah->grh.sgid_index,
1383                                dev->mdev->port_caps[port - 1].gid_table_len);
1384                         return -EINVAL;
1385                 }
1386                 path->grh_mlid |= 1 << 7;
1387                 path->mgid_index = ah->grh.sgid_index;
1388                 path->hop_limit  = ah->grh.hop_limit;
1389                 path->tclass_flowlabel =
1390                         cpu_to_be32((ah->grh.traffic_class << 20) |
1391                                     (ah->grh.flow_label));
1392                 memcpy(path->rgid, ah->grh.dgid.raw, 16);
1393         }
1394
1395         err = ib_rate_to_mlx5(dev, ah->static_rate);
1396         if (err < 0)
1397                 return err;
1398         path->static_rate = err;
1399         path->port = port;
1400
1401         if (attr_mask & IB_QP_TIMEOUT)
1402                 path->ackto_lt = attr->timeout << 3;
1403
1404         path->sl = ah->sl & 0xf;
1405
1406         return 0;
1407 }
1408
1409 static enum mlx5_qp_optpar opt_mask[MLX5_QP_NUM_STATE][MLX5_QP_NUM_STATE][MLX5_QP_ST_MAX] = {
1410         [MLX5_QP_STATE_INIT] = {
1411                 [MLX5_QP_STATE_INIT] = {
1412                         [MLX5_QP_ST_RC] = MLX5_QP_OPTPAR_RRE            |
1413                                           MLX5_QP_OPTPAR_RAE            |
1414                                           MLX5_QP_OPTPAR_RWE            |
1415                                           MLX5_QP_OPTPAR_PKEY_INDEX     |
1416                                           MLX5_QP_OPTPAR_PRI_PORT,
1417                         [MLX5_QP_ST_UC] = MLX5_QP_OPTPAR_RWE            |
1418                                           MLX5_QP_OPTPAR_PKEY_INDEX     |
1419                                           MLX5_QP_OPTPAR_PRI_PORT,
1420                         [MLX5_QP_ST_UD] = MLX5_QP_OPTPAR_PKEY_INDEX     |
1421                                           MLX5_QP_OPTPAR_Q_KEY          |
1422                                           MLX5_QP_OPTPAR_PRI_PORT,
1423                 },
1424                 [MLX5_QP_STATE_RTR] = {
1425                         [MLX5_QP_ST_RC] = MLX5_QP_OPTPAR_ALT_ADDR_PATH  |
1426                                           MLX5_QP_OPTPAR_RRE            |
1427                                           MLX5_QP_OPTPAR_RAE            |
1428                                           MLX5_QP_OPTPAR_RWE            |
1429                                           MLX5_QP_OPTPAR_PKEY_INDEX,
1430                         [MLX5_QP_ST_UC] = MLX5_QP_OPTPAR_ALT_ADDR_PATH  |
1431                                           MLX5_QP_OPTPAR_RWE            |
1432                                           MLX5_QP_OPTPAR_PKEY_INDEX,
1433                         [MLX5_QP_ST_UD] = MLX5_QP_OPTPAR_PKEY_INDEX     |
1434                                           MLX5_QP_OPTPAR_Q_KEY,
1435                         [MLX5_QP_ST_MLX] = MLX5_QP_OPTPAR_PKEY_INDEX    |
1436                                            MLX5_QP_OPTPAR_Q_KEY,
1437                         [MLX5_QP_ST_XRC] = MLX5_QP_OPTPAR_ALT_ADDR_PATH |
1438                                           MLX5_QP_OPTPAR_RRE            |
1439                                           MLX5_QP_OPTPAR_RAE            |
1440                                           MLX5_QP_OPTPAR_RWE            |
1441                                           MLX5_QP_OPTPAR_PKEY_INDEX,
1442                 },
1443         },
1444         [MLX5_QP_STATE_RTR] = {
1445                 [MLX5_QP_STATE_RTS] = {
1446                         [MLX5_QP_ST_RC] = MLX5_QP_OPTPAR_ALT_ADDR_PATH  |
1447                                           MLX5_QP_OPTPAR_RRE            |
1448                                           MLX5_QP_OPTPAR_RAE            |
1449                                           MLX5_QP_OPTPAR_RWE            |
1450                                           MLX5_QP_OPTPAR_PM_STATE       |
1451                                           MLX5_QP_OPTPAR_RNR_TIMEOUT,
1452                         [MLX5_QP_ST_UC] = MLX5_QP_OPTPAR_ALT_ADDR_PATH  |
1453                                           MLX5_QP_OPTPAR_RWE            |
1454                                           MLX5_QP_OPTPAR_PM_STATE,
1455                         [MLX5_QP_ST_UD] = MLX5_QP_OPTPAR_Q_KEY,
1456                 },
1457         },
1458         [MLX5_QP_STATE_RTS] = {
1459                 [MLX5_QP_STATE_RTS] = {
1460                         [MLX5_QP_ST_RC] = MLX5_QP_OPTPAR_RRE            |
1461                                           MLX5_QP_OPTPAR_RAE            |
1462                                           MLX5_QP_OPTPAR_RWE            |
1463                                           MLX5_QP_OPTPAR_RNR_TIMEOUT    |
1464                                           MLX5_QP_OPTPAR_PM_STATE       |
1465                                           MLX5_QP_OPTPAR_ALT_ADDR_PATH,
1466                         [MLX5_QP_ST_UC] = MLX5_QP_OPTPAR_RWE            |
1467                                           MLX5_QP_OPTPAR_PM_STATE       |
1468                                           MLX5_QP_OPTPAR_ALT_ADDR_PATH,
1469                         [MLX5_QP_ST_UD] = MLX5_QP_OPTPAR_Q_KEY          |
1470                                           MLX5_QP_OPTPAR_SRQN           |
1471                                           MLX5_QP_OPTPAR_CQN_RCV,
1472                 },
1473         },
1474         [MLX5_QP_STATE_SQER] = {
1475                 [MLX5_QP_STATE_RTS] = {
1476                         [MLX5_QP_ST_UD]  = MLX5_QP_OPTPAR_Q_KEY,
1477                         [MLX5_QP_ST_MLX] = MLX5_QP_OPTPAR_Q_KEY,
1478                         [MLX5_QP_ST_UC]  = MLX5_QP_OPTPAR_RWE,
1479                         [MLX5_QP_ST_RC]  = MLX5_QP_OPTPAR_RNR_TIMEOUT   |
1480                                            MLX5_QP_OPTPAR_RWE           |
1481                                            MLX5_QP_OPTPAR_RAE           |
1482                                            MLX5_QP_OPTPAR_RRE,
1483                 },
1484         },
1485 };
1486
1487 static int ib_nr_to_mlx5_nr(int ib_mask)
1488 {
1489         switch (ib_mask) {
1490         case IB_QP_STATE:
1491                 return 0;
1492         case IB_QP_CUR_STATE:
1493                 return 0;
1494         case IB_QP_EN_SQD_ASYNC_NOTIFY:
1495                 return 0;
1496         case IB_QP_ACCESS_FLAGS:
1497                 return MLX5_QP_OPTPAR_RWE | MLX5_QP_OPTPAR_RRE |
1498                         MLX5_QP_OPTPAR_RAE;
1499         case IB_QP_PKEY_INDEX:
1500                 return MLX5_QP_OPTPAR_PKEY_INDEX;
1501         case IB_QP_PORT:
1502                 return MLX5_QP_OPTPAR_PRI_PORT;
1503         case IB_QP_QKEY:
1504                 return MLX5_QP_OPTPAR_Q_KEY;
1505         case IB_QP_AV:
1506                 return MLX5_QP_OPTPAR_PRIMARY_ADDR_PATH |
1507                         MLX5_QP_OPTPAR_PRI_PORT;
1508         case IB_QP_PATH_MTU:
1509                 return 0;
1510         case IB_QP_TIMEOUT:
1511                 return MLX5_QP_OPTPAR_ACK_TIMEOUT;
1512         case IB_QP_RETRY_CNT:
1513                 return MLX5_QP_OPTPAR_RETRY_COUNT;
1514         case IB_QP_RNR_RETRY:
1515                 return MLX5_QP_OPTPAR_RNR_RETRY;
1516         case IB_QP_RQ_PSN:
1517                 return 0;
1518         case IB_QP_MAX_QP_RD_ATOMIC:
1519                 return MLX5_QP_OPTPAR_SRA_MAX;
1520         case IB_QP_ALT_PATH:
1521                 return MLX5_QP_OPTPAR_ALT_ADDR_PATH;
1522         case IB_QP_MIN_RNR_TIMER:
1523                 return MLX5_QP_OPTPAR_RNR_TIMEOUT;
1524         case IB_QP_SQ_PSN:
1525                 return 0;
1526         case IB_QP_MAX_DEST_RD_ATOMIC:
1527                 return MLX5_QP_OPTPAR_RRA_MAX | MLX5_QP_OPTPAR_RWE |
1528                         MLX5_QP_OPTPAR_RRE | MLX5_QP_OPTPAR_RAE;
1529         case IB_QP_PATH_MIG_STATE:
1530                 return MLX5_QP_OPTPAR_PM_STATE;
1531         case IB_QP_CAP:
1532                 return 0;
1533         case IB_QP_DEST_QPN:
1534                 return 0;
1535         }
1536         return 0;
1537 }
1538
1539 static int ib_mask_to_mlx5_opt(int ib_mask)
1540 {
1541         int result = 0;
1542         int i;
1543
1544         for (i = 0; i < 8 * sizeof(int); i++) {
1545                 if ((1 << i) & ib_mask)
1546                         result |= ib_nr_to_mlx5_nr(1 << i);
1547         }
1548
1549         return result;
1550 }
1551
1552 static int __mlx5_ib_modify_qp(struct ib_qp *ibqp,
1553                                const struct ib_qp_attr *attr, int attr_mask,
1554                                enum ib_qp_state cur_state, enum ib_qp_state new_state)
1555 {
1556         struct mlx5_ib_dev *dev = to_mdev(ibqp->device);
1557         struct mlx5_ib_qp *qp = to_mqp(ibqp);
1558         struct mlx5_ib_cq *send_cq, *recv_cq;
1559         struct mlx5_qp_context *context;
1560         struct mlx5_modify_qp_mbox_in *in;
1561         struct mlx5_ib_pd *pd;
1562         enum mlx5_qp_state mlx5_cur, mlx5_new;
1563         enum mlx5_qp_optpar optpar;
1564         int sqd_event;
1565         int mlx5_st;
1566         int err;
1567
1568         in = kzalloc(sizeof(*in), GFP_KERNEL);
1569         if (!in)
1570                 return -ENOMEM;
1571
1572         context = &in->ctx;
1573         err = to_mlx5_st(ibqp->qp_type);
1574         if (err < 0)
1575                 goto out;
1576
1577         context->flags = cpu_to_be32(err << 16);
1578
1579         if (!(attr_mask & IB_QP_PATH_MIG_STATE)) {
1580                 context->flags |= cpu_to_be32(MLX5_QP_PM_MIGRATED << 11);
1581         } else {
1582                 switch (attr->path_mig_state) {
1583                 case IB_MIG_MIGRATED:
1584                         context->flags |= cpu_to_be32(MLX5_QP_PM_MIGRATED << 11);
1585                         break;
1586                 case IB_MIG_REARM:
1587                         context->flags |= cpu_to_be32(MLX5_QP_PM_REARM << 11);
1588                         break;
1589                 case IB_MIG_ARMED:
1590                         context->flags |= cpu_to_be32(MLX5_QP_PM_ARMED << 11);
1591                         break;
1592                 }
1593         }
1594
1595         if (ibqp->qp_type == IB_QPT_GSI || ibqp->qp_type == IB_QPT_SMI) {
1596                 context->mtu_msgmax = (IB_MTU_256 << 5) | 8;
1597         } else if (ibqp->qp_type == IB_QPT_UD ||
1598                    ibqp->qp_type == MLX5_IB_QPT_REG_UMR) {
1599                 context->mtu_msgmax = (IB_MTU_4096 << 5) | 12;
1600         } else if (attr_mask & IB_QP_PATH_MTU) {
1601                 if (attr->path_mtu < IB_MTU_256 ||
1602                     attr->path_mtu > IB_MTU_4096) {
1603                         mlx5_ib_warn(dev, "invalid mtu %d\n", attr->path_mtu);
1604                         err = -EINVAL;
1605                         goto out;
1606                 }
1607                 context->mtu_msgmax = (attr->path_mtu << 5) |
1608                                       (u8)MLX5_CAP_GEN(dev->mdev, log_max_msg);
1609         }
1610
1611         if (attr_mask & IB_QP_DEST_QPN)
1612                 context->log_pg_sz_remote_qpn = cpu_to_be32(attr->dest_qp_num);
1613
1614         if (attr_mask & IB_QP_PKEY_INDEX)
1615                 context->pri_path.pkey_index = attr->pkey_index;
1616
1617         /* todo implement counter_index functionality */
1618
1619         if (is_sqp(ibqp->qp_type))
1620                 context->pri_path.port = qp->port;
1621
1622         if (attr_mask & IB_QP_PORT)
1623                 context->pri_path.port = attr->port_num;
1624
1625         if (attr_mask & IB_QP_AV) {
1626                 err = mlx5_set_path(dev, &attr->ah_attr, &context->pri_path,
1627                                     attr_mask & IB_QP_PORT ? attr->port_num : qp->port,
1628                                     attr_mask, 0, attr);
1629                 if (err)
1630                         goto out;
1631         }
1632
1633         if (attr_mask & IB_QP_TIMEOUT)
1634                 context->pri_path.ackto_lt |= attr->timeout << 3;
1635
1636         if (attr_mask & IB_QP_ALT_PATH) {
1637                 err = mlx5_set_path(dev, &attr->alt_ah_attr, &context->alt_path,
1638                                     attr->alt_port_num, attr_mask, 0, attr);
1639                 if (err)
1640                         goto out;
1641         }
1642
1643         pd = get_pd(qp);
1644         get_cqs(qp, &send_cq, &recv_cq);
1645
1646         context->flags_pd = cpu_to_be32(pd ? pd->pdn : to_mpd(dev->devr.p0)->pdn);
1647         context->cqn_send = send_cq ? cpu_to_be32(send_cq->mcq.cqn) : 0;
1648         context->cqn_recv = recv_cq ? cpu_to_be32(recv_cq->mcq.cqn) : 0;
1649         context->params1  = cpu_to_be32(MLX5_IB_ACK_REQ_FREQ << 28);
1650
1651         if (attr_mask & IB_QP_RNR_RETRY)
1652                 context->params1 |= cpu_to_be32(attr->rnr_retry << 13);
1653
1654         if (attr_mask & IB_QP_RETRY_CNT)
1655                 context->params1 |= cpu_to_be32(attr->retry_cnt << 16);
1656
1657         if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC) {
1658                 if (attr->max_rd_atomic)
1659                         context->params1 |=
1660                                 cpu_to_be32(fls(attr->max_rd_atomic - 1) << 21);
1661         }
1662
1663         if (attr_mask & IB_QP_SQ_PSN)
1664                 context->next_send_psn = cpu_to_be32(attr->sq_psn);
1665
1666         if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) {
1667                 if (attr->max_dest_rd_atomic)
1668                         context->params2 |=
1669                                 cpu_to_be32(fls(attr->max_dest_rd_atomic - 1) << 21);
1670         }
1671
1672         if (attr_mask & (IB_QP_ACCESS_FLAGS | IB_QP_MAX_DEST_RD_ATOMIC))
1673                 context->params2 |= to_mlx5_access_flags(qp, attr, attr_mask);
1674
1675         if (attr_mask & IB_QP_MIN_RNR_TIMER)
1676                 context->rnr_nextrecvpsn |= cpu_to_be32(attr->min_rnr_timer << 24);
1677
1678         if (attr_mask & IB_QP_RQ_PSN)
1679                 context->rnr_nextrecvpsn |= cpu_to_be32(attr->rq_psn);
1680
1681         if (attr_mask & IB_QP_QKEY)
1682                 context->qkey = cpu_to_be32(attr->qkey);
1683
1684         if (qp->rq.wqe_cnt && cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT)
1685                 context->db_rec_addr = cpu_to_be64(qp->db.dma);
1686
1687         if (cur_state == IB_QPS_RTS && new_state == IB_QPS_SQD  &&
1688             attr_mask & IB_QP_EN_SQD_ASYNC_NOTIFY && attr->en_sqd_async_notify)
1689                 sqd_event = 1;
1690         else
1691                 sqd_event = 0;
1692
1693         if (!ibqp->uobject && cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT)
1694                 context->sq_crq_size |= cpu_to_be16(1 << 4);
1695
1696
1697         mlx5_cur = to_mlx5_state(cur_state);
1698         mlx5_new = to_mlx5_state(new_state);
1699         mlx5_st = to_mlx5_st(ibqp->qp_type);
1700         if (mlx5_st < 0)
1701                 goto out;
1702
1703         /* If moving to a reset or error state, we must disable page faults on
1704          * this QP and flush all current page faults. Otherwise a stale page
1705          * fault may attempt to work on this QP after it is reset and moved
1706          * again to RTS, and may cause the driver and the device to get out of
1707          * sync. */
1708         if (cur_state != IB_QPS_RESET && cur_state != IB_QPS_ERR &&
1709             (new_state == IB_QPS_RESET || new_state == IB_QPS_ERR))
1710                 mlx5_ib_qp_disable_pagefaults(qp);
1711
1712         optpar = ib_mask_to_mlx5_opt(attr_mask);
1713         optpar &= opt_mask[mlx5_cur][mlx5_new][mlx5_st];
1714         in->optparam = cpu_to_be32(optpar);
1715         err = mlx5_core_qp_modify(dev->mdev, to_mlx5_state(cur_state),
1716                                   to_mlx5_state(new_state), in, sqd_event,
1717                                   &qp->mqp);
1718         if (err)
1719                 goto out;
1720
1721         if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT)
1722                 mlx5_ib_qp_enable_pagefaults(qp);
1723
1724         qp->state = new_state;
1725
1726         if (attr_mask & IB_QP_ACCESS_FLAGS)
1727                 qp->atomic_rd_en = attr->qp_access_flags;
1728         if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
1729                 qp->resp_depth = attr->max_dest_rd_atomic;
1730         if (attr_mask & IB_QP_PORT)
1731                 qp->port = attr->port_num;
1732         if (attr_mask & IB_QP_ALT_PATH)
1733                 qp->alt_port = attr->alt_port_num;
1734
1735         /*
1736          * If we moved a kernel QP to RESET, clean up all old CQ
1737          * entries and reinitialize the QP.
1738          */
1739         if (new_state == IB_QPS_RESET && !ibqp->uobject) {
1740                 mlx5_ib_cq_clean(recv_cq, qp->mqp.qpn,
1741                                  ibqp->srq ? to_msrq(ibqp->srq) : NULL);
1742                 if (send_cq != recv_cq)
1743                         mlx5_ib_cq_clean(send_cq, qp->mqp.qpn, NULL);
1744
1745                 qp->rq.head = 0;
1746                 qp->rq.tail = 0;
1747                 qp->sq.head = 0;
1748                 qp->sq.tail = 0;
1749                 qp->sq.cur_post = 0;
1750                 qp->sq.last_poll = 0;
1751                 qp->db.db[MLX5_RCV_DBR] = 0;
1752                 qp->db.db[MLX5_SND_DBR] = 0;
1753         }
1754
1755 out:
1756         kfree(in);
1757         return err;
1758 }
1759
1760 int mlx5_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
1761                       int attr_mask, struct ib_udata *udata)
1762 {
1763         struct mlx5_ib_dev *dev = to_mdev(ibqp->device);
1764         struct mlx5_ib_qp *qp = to_mqp(ibqp);
1765         enum ib_qp_state cur_state, new_state;
1766         int err = -EINVAL;
1767         int port;
1768
1769         mutex_lock(&qp->mutex);
1770
1771         cur_state = attr_mask & IB_QP_CUR_STATE ? attr->cur_qp_state : qp->state;
1772         new_state = attr_mask & IB_QP_STATE ? attr->qp_state : cur_state;
1773
1774         if (ibqp->qp_type != MLX5_IB_QPT_REG_UMR &&
1775             !ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type, attr_mask,
1776                                 IB_LINK_LAYER_UNSPECIFIED))
1777                 goto out;
1778
1779         if ((attr_mask & IB_QP_PORT) &&
1780             (attr->port_num == 0 ||
1781              attr->port_num > MLX5_CAP_GEN(dev->mdev, num_ports)))
1782                 goto out;
1783
1784         if (attr_mask & IB_QP_PKEY_INDEX) {
1785                 port = attr_mask & IB_QP_PORT ? attr->port_num : qp->port;
1786                 if (attr->pkey_index >=
1787                     dev->mdev->port_caps[port - 1].pkey_table_len)
1788                         goto out;
1789         }
1790
1791         if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC &&
1792             attr->max_rd_atomic >
1793             (1 << MLX5_CAP_GEN(dev->mdev, log_max_ra_res_qp)))
1794                 goto out;
1795
1796         if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC &&
1797             attr->max_dest_rd_atomic >
1798             (1 << MLX5_CAP_GEN(dev->mdev, log_max_ra_req_qp)))
1799                 goto out;
1800
1801         if (cur_state == new_state && cur_state == IB_QPS_RESET) {
1802                 err = 0;
1803                 goto out;
1804         }
1805
1806         err = __mlx5_ib_modify_qp(ibqp, attr, attr_mask, cur_state, new_state);
1807
1808 out:
1809         mutex_unlock(&qp->mutex);
1810         return err;
1811 }
1812
1813 static int mlx5_wq_overflow(struct mlx5_ib_wq *wq, int nreq, struct ib_cq *ib_cq)
1814 {
1815         struct mlx5_ib_cq *cq;
1816         unsigned cur;
1817
1818         cur = wq->head - wq->tail;
1819         if (likely(cur + nreq < wq->max_post))
1820                 return 0;
1821
1822         cq = to_mcq(ib_cq);
1823         spin_lock(&cq->lock);
1824         cur = wq->head - wq->tail;
1825         spin_unlock(&cq->lock);
1826
1827         return cur + nreq >= wq->max_post;
1828 }
1829
1830 static __always_inline void set_raddr_seg(struct mlx5_wqe_raddr_seg *rseg,
1831                                           u64 remote_addr, u32 rkey)
1832 {
1833         rseg->raddr    = cpu_to_be64(remote_addr);
1834         rseg->rkey     = cpu_to_be32(rkey);
1835         rseg->reserved = 0;
1836 }
1837
1838 static void set_datagram_seg(struct mlx5_wqe_datagram_seg *dseg,
1839                              struct ib_send_wr *wr)
1840 {
1841         memcpy(&dseg->av, &to_mah(ud_wr(wr)->ah)->av, sizeof(struct mlx5_av));
1842         dseg->av.dqp_dct = cpu_to_be32(ud_wr(wr)->remote_qpn | MLX5_EXTENDED_UD_AV);
1843         dseg->av.key.qkey.qkey = cpu_to_be32(ud_wr(wr)->remote_qkey);
1844 }
1845
1846 static void set_data_ptr_seg(struct mlx5_wqe_data_seg *dseg, struct ib_sge *sg)
1847 {
1848         dseg->byte_count = cpu_to_be32(sg->length);
1849         dseg->lkey       = cpu_to_be32(sg->lkey);
1850         dseg->addr       = cpu_to_be64(sg->addr);
1851 }
1852
1853 static __be16 get_klm_octo(int npages)
1854 {
1855         return cpu_to_be16(ALIGN(npages, 8) / 2);
1856 }
1857
1858 static __be64 frwr_mkey_mask(void)
1859 {
1860         u64 result;
1861
1862         result = MLX5_MKEY_MASK_LEN             |
1863                 MLX5_MKEY_MASK_PAGE_SIZE        |
1864                 MLX5_MKEY_MASK_START_ADDR       |
1865                 MLX5_MKEY_MASK_EN_RINVAL        |
1866                 MLX5_MKEY_MASK_KEY              |
1867                 MLX5_MKEY_MASK_LR               |
1868                 MLX5_MKEY_MASK_LW               |
1869                 MLX5_MKEY_MASK_RR               |
1870                 MLX5_MKEY_MASK_RW               |
1871                 MLX5_MKEY_MASK_A                |
1872                 MLX5_MKEY_MASK_SMALL_FENCE      |
1873                 MLX5_MKEY_MASK_FREE;
1874
1875         return cpu_to_be64(result);
1876 }
1877
1878 static __be64 sig_mkey_mask(void)
1879 {
1880         u64 result;
1881
1882         result = MLX5_MKEY_MASK_LEN             |
1883                 MLX5_MKEY_MASK_PAGE_SIZE        |
1884                 MLX5_MKEY_MASK_START_ADDR       |
1885                 MLX5_MKEY_MASK_EN_SIGERR        |
1886                 MLX5_MKEY_MASK_EN_RINVAL        |
1887                 MLX5_MKEY_MASK_KEY              |
1888                 MLX5_MKEY_MASK_LR               |
1889                 MLX5_MKEY_MASK_LW               |
1890                 MLX5_MKEY_MASK_RR               |
1891                 MLX5_MKEY_MASK_RW               |
1892                 MLX5_MKEY_MASK_SMALL_FENCE      |
1893                 MLX5_MKEY_MASK_FREE             |
1894                 MLX5_MKEY_MASK_BSF_EN;
1895
1896         return cpu_to_be64(result);
1897 }
1898
1899 static void set_reg_umr_seg(struct mlx5_wqe_umr_ctrl_seg *umr,
1900                                 struct mlx5_ib_mr *mr)
1901 {
1902         int ndescs = mr->ndescs;
1903
1904         memset(umr, 0, sizeof(*umr));
1905         umr->flags = MLX5_UMR_CHECK_NOT_FREE;
1906         umr->klm_octowords = get_klm_octo(ndescs);
1907         umr->mkey_mask = frwr_mkey_mask();
1908 }
1909
1910 static void set_linv_umr_seg(struct mlx5_wqe_umr_ctrl_seg *umr)
1911 {
1912         memset(umr, 0, sizeof(*umr));
1913         umr->mkey_mask = cpu_to_be64(MLX5_MKEY_MASK_FREE);
1914         umr->flags = 1 << 7;
1915 }
1916
1917 static __be64 get_umr_reg_mr_mask(void)
1918 {
1919         u64 result;
1920
1921         result = MLX5_MKEY_MASK_LEN             |
1922                  MLX5_MKEY_MASK_PAGE_SIZE       |
1923                  MLX5_MKEY_MASK_START_ADDR      |
1924                  MLX5_MKEY_MASK_PD              |
1925                  MLX5_MKEY_MASK_LR              |
1926                  MLX5_MKEY_MASK_LW              |
1927                  MLX5_MKEY_MASK_KEY             |
1928                  MLX5_MKEY_MASK_RR              |
1929                  MLX5_MKEY_MASK_RW              |
1930                  MLX5_MKEY_MASK_A               |
1931                  MLX5_MKEY_MASK_FREE;
1932
1933         return cpu_to_be64(result);
1934 }
1935
1936 static __be64 get_umr_unreg_mr_mask(void)
1937 {
1938         u64 result;
1939
1940         result = MLX5_MKEY_MASK_FREE;
1941
1942         return cpu_to_be64(result);
1943 }
1944
1945 static __be64 get_umr_update_mtt_mask(void)
1946 {
1947         u64 result;
1948
1949         result = MLX5_MKEY_MASK_FREE;
1950
1951         return cpu_to_be64(result);
1952 }
1953
1954 static void set_reg_umr_segment(struct mlx5_wqe_umr_ctrl_seg *umr,
1955                                 struct ib_send_wr *wr)
1956 {
1957         struct mlx5_umr_wr *umrwr = umr_wr(wr);
1958
1959         memset(umr, 0, sizeof(*umr));
1960
1961         if (wr->send_flags & MLX5_IB_SEND_UMR_FAIL_IF_FREE)
1962                 umr->flags = MLX5_UMR_CHECK_FREE; /* fail if free */
1963         else
1964                 umr->flags = MLX5_UMR_CHECK_NOT_FREE; /* fail if not free */
1965
1966         if (!(wr->send_flags & MLX5_IB_SEND_UMR_UNREG)) {
1967                 umr->klm_octowords = get_klm_octo(umrwr->npages);
1968                 if (wr->send_flags & MLX5_IB_SEND_UMR_UPDATE_MTT) {
1969                         umr->mkey_mask = get_umr_update_mtt_mask();
1970                         umr->bsf_octowords = get_klm_octo(umrwr->target.offset);
1971                         umr->flags |= MLX5_UMR_TRANSLATION_OFFSET_EN;
1972                 } else {
1973                         umr->mkey_mask = get_umr_reg_mr_mask();
1974                 }
1975         } else {
1976                 umr->mkey_mask = get_umr_unreg_mr_mask();
1977         }
1978
1979         if (!wr->num_sge)
1980                 umr->flags |= MLX5_UMR_INLINE;
1981 }
1982
1983 static u8 get_umr_flags(int acc)
1984 {
1985         return (acc & IB_ACCESS_REMOTE_ATOMIC ? MLX5_PERM_ATOMIC       : 0) |
1986                (acc & IB_ACCESS_REMOTE_WRITE  ? MLX5_PERM_REMOTE_WRITE : 0) |
1987                (acc & IB_ACCESS_REMOTE_READ   ? MLX5_PERM_REMOTE_READ  : 0) |
1988                (acc & IB_ACCESS_LOCAL_WRITE   ? MLX5_PERM_LOCAL_WRITE  : 0) |
1989                 MLX5_PERM_LOCAL_READ | MLX5_PERM_UMR_EN;
1990 }
1991
1992 static void set_reg_mkey_seg(struct mlx5_mkey_seg *seg,
1993                              struct mlx5_ib_mr *mr,
1994                              u32 key, int access)
1995 {
1996         int ndescs = ALIGN(mr->ndescs, 8) >> 1;
1997
1998         memset(seg, 0, sizeof(*seg));
1999         seg->flags = get_umr_flags(access) | MLX5_ACCESS_MODE_MTT;
2000         seg->qpn_mkey7_0 = cpu_to_be32((key & 0xff) | 0xffffff00);
2001         seg->flags_pd = cpu_to_be32(MLX5_MKEY_REMOTE_INVAL);
2002         seg->start_addr = cpu_to_be64(mr->ibmr.iova);
2003         seg->len = cpu_to_be64(mr->ibmr.length);
2004         seg->xlt_oct_size = cpu_to_be32(ndescs);
2005         seg->log2_page_size = ilog2(mr->ibmr.page_size);
2006 }
2007
2008 static void set_linv_mkey_seg(struct mlx5_mkey_seg *seg)
2009 {
2010         memset(seg, 0, sizeof(*seg));
2011         seg->status = MLX5_MKEY_STATUS_FREE;
2012 }
2013
2014 static void set_reg_mkey_segment(struct mlx5_mkey_seg *seg, struct ib_send_wr *wr)
2015 {
2016         struct mlx5_umr_wr *umrwr = umr_wr(wr);
2017
2018         memset(seg, 0, sizeof(*seg));
2019         if (wr->send_flags & MLX5_IB_SEND_UMR_UNREG) {
2020                 seg->status = MLX5_MKEY_STATUS_FREE;
2021                 return;
2022         }
2023
2024         seg->flags = convert_access(umrwr->access_flags);
2025         if (!(wr->send_flags & MLX5_IB_SEND_UMR_UPDATE_MTT)) {
2026                 seg->flags_pd = cpu_to_be32(to_mpd(umrwr->pd)->pdn);
2027                 seg->start_addr = cpu_to_be64(umrwr->target.virt_addr);
2028         }
2029         seg->len = cpu_to_be64(umrwr->length);
2030         seg->log2_page_size = umrwr->page_shift;
2031         seg->qpn_mkey7_0 = cpu_to_be32(0xffffff00 |
2032                                        mlx5_mkey_variant(umrwr->mkey));
2033 }
2034
2035 static void set_reg_data_seg(struct mlx5_wqe_data_seg *dseg,
2036                              struct mlx5_ib_mr *mr,
2037                              struct mlx5_ib_pd *pd)
2038 {
2039         int bcount = mr->desc_size * mr->ndescs;
2040
2041         dseg->addr = cpu_to_be64(mr->desc_map);
2042         dseg->byte_count = cpu_to_be32(ALIGN(bcount, 64));
2043         dseg->lkey = cpu_to_be32(pd->ibpd.local_dma_lkey);
2044 }
2045
2046 static __be32 send_ieth(struct ib_send_wr *wr)
2047 {
2048         switch (wr->opcode) {
2049         case IB_WR_SEND_WITH_IMM:
2050         case IB_WR_RDMA_WRITE_WITH_IMM:
2051                 return wr->ex.imm_data;
2052
2053         case IB_WR_SEND_WITH_INV:
2054                 return cpu_to_be32(wr->ex.invalidate_rkey);
2055
2056         default:
2057                 return 0;
2058         }
2059 }
2060
2061 static u8 calc_sig(void *wqe, int size)
2062 {
2063         u8 *p = wqe;
2064         u8 res = 0;
2065         int i;
2066
2067         for (i = 0; i < size; i++)
2068                 res ^= p[i];
2069
2070         return ~res;
2071 }
2072
2073 static u8 wq_sig(void *wqe)
2074 {
2075         return calc_sig(wqe, (*((u8 *)wqe + 8) & 0x3f) << 4);
2076 }
2077
2078 static int set_data_inl_seg(struct mlx5_ib_qp *qp, struct ib_send_wr *wr,
2079                             void *wqe, int *sz)
2080 {
2081         struct mlx5_wqe_inline_seg *seg;
2082         void *qend = qp->sq.qend;
2083         void *addr;
2084         int inl = 0;
2085         int copy;
2086         int len;
2087         int i;
2088
2089         seg = wqe;
2090         wqe += sizeof(*seg);
2091         for (i = 0; i < wr->num_sge; i++) {
2092                 addr = (void *)(unsigned long)(wr->sg_list[i].addr);
2093                 len  = wr->sg_list[i].length;
2094                 inl += len;
2095
2096                 if (unlikely(inl > qp->max_inline_data))
2097                         return -ENOMEM;
2098
2099                 if (unlikely(wqe + len > qend)) {
2100                         copy = qend - wqe;
2101                         memcpy(wqe, addr, copy);
2102                         addr += copy;
2103                         len -= copy;
2104                         wqe = mlx5_get_send_wqe(qp, 0);
2105                 }
2106                 memcpy(wqe, addr, len);
2107                 wqe += len;
2108         }
2109
2110         seg->byte_count = cpu_to_be32(inl | MLX5_INLINE_SEG);
2111
2112         *sz = ALIGN(inl + sizeof(seg->byte_count), 16) / 16;
2113
2114         return 0;
2115 }
2116
2117 static u16 prot_field_size(enum ib_signature_type type)
2118 {
2119         switch (type) {
2120         case IB_SIG_TYPE_T10_DIF:
2121                 return MLX5_DIF_SIZE;
2122         default:
2123                 return 0;
2124         }
2125 }
2126
2127 static u8 bs_selector(int block_size)
2128 {
2129         switch (block_size) {
2130         case 512:           return 0x1;
2131         case 520:           return 0x2;
2132         case 4096:          return 0x3;
2133         case 4160:          return 0x4;
2134         case 1073741824:    return 0x5;
2135         default:            return 0;
2136         }
2137 }
2138
2139 static void mlx5_fill_inl_bsf(struct ib_sig_domain *domain,
2140                               struct mlx5_bsf_inl *inl)
2141 {
2142         /* Valid inline section and allow BSF refresh */
2143         inl->vld_refresh = cpu_to_be16(MLX5_BSF_INL_VALID |
2144                                        MLX5_BSF_REFRESH_DIF);
2145         inl->dif_apptag = cpu_to_be16(domain->sig.dif.app_tag);
2146         inl->dif_reftag = cpu_to_be32(domain->sig.dif.ref_tag);
2147         /* repeating block */
2148         inl->rp_inv_seed = MLX5_BSF_REPEAT_BLOCK;
2149         inl->sig_type = domain->sig.dif.bg_type == IB_T10DIF_CRC ?
2150                         MLX5_DIF_CRC : MLX5_DIF_IPCS;
2151
2152         if (domain->sig.dif.ref_remap)
2153                 inl->dif_inc_ref_guard_check |= MLX5_BSF_INC_REFTAG;
2154
2155         if (domain->sig.dif.app_escape) {
2156                 if (domain->sig.dif.ref_escape)
2157                         inl->dif_inc_ref_guard_check |= MLX5_BSF_APPREF_ESCAPE;
2158                 else
2159                         inl->dif_inc_ref_guard_check |= MLX5_BSF_APPTAG_ESCAPE;
2160         }
2161
2162         inl->dif_app_bitmask_check =
2163                 cpu_to_be16(domain->sig.dif.apptag_check_mask);
2164 }
2165
2166 static int mlx5_set_bsf(struct ib_mr *sig_mr,
2167                         struct ib_sig_attrs *sig_attrs,
2168                         struct mlx5_bsf *bsf, u32 data_size)
2169 {
2170         struct mlx5_core_sig_ctx *msig = to_mmr(sig_mr)->sig;
2171         struct mlx5_bsf_basic *basic = &bsf->basic;
2172         struct ib_sig_domain *mem = &sig_attrs->mem;
2173         struct ib_sig_domain *wire = &sig_attrs->wire;
2174
2175         memset(bsf, 0, sizeof(*bsf));
2176
2177         /* Basic + Extended + Inline */
2178         basic->bsf_size_sbs = 1 << 7;
2179         /* Input domain check byte mask */
2180         basic->check_byte_mask = sig_attrs->check_mask;
2181         basic->raw_data_size = cpu_to_be32(data_size);
2182
2183         /* Memory domain */
2184         switch (sig_attrs->mem.sig_type) {
2185         case IB_SIG_TYPE_NONE:
2186                 break;
2187         case IB_SIG_TYPE_T10_DIF:
2188                 basic->mem.bs_selector = bs_selector(mem->sig.dif.pi_interval);
2189                 basic->m_bfs_psv = cpu_to_be32(msig->psv_memory.psv_idx);
2190                 mlx5_fill_inl_bsf(mem, &bsf->m_inl);
2191                 break;
2192         default:
2193                 return -EINVAL;
2194         }
2195
2196         /* Wire domain */
2197         switch (sig_attrs->wire.sig_type) {
2198         case IB_SIG_TYPE_NONE:
2199                 break;
2200         case IB_SIG_TYPE_T10_DIF:
2201                 if (mem->sig.dif.pi_interval == wire->sig.dif.pi_interval &&
2202                     mem->sig_type == wire->sig_type) {
2203                         /* Same block structure */
2204                         basic->bsf_size_sbs |= 1 << 4;
2205                         if (mem->sig.dif.bg_type == wire->sig.dif.bg_type)
2206                                 basic->wire.copy_byte_mask |= MLX5_CPY_GRD_MASK;
2207                         if (mem->sig.dif.app_tag == wire->sig.dif.app_tag)
2208                                 basic->wire.copy_byte_mask |= MLX5_CPY_APP_MASK;
2209                         if (mem->sig.dif.ref_tag == wire->sig.dif.ref_tag)
2210                                 basic->wire.copy_byte_mask |= MLX5_CPY_REF_MASK;
2211                 } else
2212                         basic->wire.bs_selector = bs_selector(wire->sig.dif.pi_interval);
2213
2214                 basic->w_bfs_psv = cpu_to_be32(msig->psv_wire.psv_idx);
2215                 mlx5_fill_inl_bsf(wire, &bsf->w_inl);
2216                 break;
2217         default:
2218                 return -EINVAL;
2219         }
2220
2221         return 0;
2222 }
2223
2224 static int set_sig_data_segment(struct ib_sig_handover_wr *wr,
2225                                 struct mlx5_ib_qp *qp, void **seg, int *size)
2226 {
2227         struct ib_sig_attrs *sig_attrs = wr->sig_attrs;
2228         struct ib_mr *sig_mr = wr->sig_mr;
2229         struct mlx5_bsf *bsf;
2230         u32 data_len = wr->wr.sg_list->length;
2231         u32 data_key = wr->wr.sg_list->lkey;
2232         u64 data_va = wr->wr.sg_list->addr;
2233         int ret;
2234         int wqe_size;
2235
2236         if (!wr->prot ||
2237             (data_key == wr->prot->lkey &&
2238              data_va == wr->prot->addr &&
2239              data_len == wr->prot->length)) {
2240                 /**
2241                  * Source domain doesn't contain signature information
2242                  * or data and protection are interleaved in memory.
2243                  * So need construct:
2244                  *                  ------------------
2245                  *                 |     data_klm     |
2246                  *                  ------------------
2247                  *                 |       BSF        |
2248                  *                  ------------------
2249                  **/
2250                 struct mlx5_klm *data_klm = *seg;
2251
2252                 data_klm->bcount = cpu_to_be32(data_len);
2253                 data_klm->key = cpu_to_be32(data_key);
2254                 data_klm->va = cpu_to_be64(data_va);
2255                 wqe_size = ALIGN(sizeof(*data_klm), 64);
2256         } else {
2257                 /**
2258                  * Source domain contains signature information
2259                  * So need construct a strided block format:
2260                  *               ---------------------------
2261                  *              |     stride_block_ctrl     |
2262                  *               ---------------------------
2263                  *              |          data_klm         |
2264                  *               ---------------------------
2265                  *              |          prot_klm         |
2266                  *               ---------------------------
2267                  *              |             BSF           |
2268                  *               ---------------------------
2269                  **/
2270                 struct mlx5_stride_block_ctrl_seg *sblock_ctrl;
2271                 struct mlx5_stride_block_entry *data_sentry;
2272                 struct mlx5_stride_block_entry *prot_sentry;
2273                 u32 prot_key = wr->prot->lkey;
2274                 u64 prot_va = wr->prot->addr;
2275                 u16 block_size = sig_attrs->mem.sig.dif.pi_interval;
2276                 int prot_size;
2277
2278                 sblock_ctrl = *seg;
2279                 data_sentry = (void *)sblock_ctrl + sizeof(*sblock_ctrl);
2280                 prot_sentry = (void *)data_sentry + sizeof(*data_sentry);
2281
2282                 prot_size = prot_field_size(sig_attrs->mem.sig_type);
2283                 if (!prot_size) {
2284                         pr_err("Bad block size given: %u\n", block_size);
2285                         return -EINVAL;
2286                 }
2287                 sblock_ctrl->bcount_per_cycle = cpu_to_be32(block_size +
2288                                                             prot_size);
2289                 sblock_ctrl->op = cpu_to_be32(MLX5_STRIDE_BLOCK_OP);
2290                 sblock_ctrl->repeat_count = cpu_to_be32(data_len / block_size);
2291                 sblock_ctrl->num_entries = cpu_to_be16(2);
2292
2293                 data_sentry->bcount = cpu_to_be16(block_size);
2294                 data_sentry->key = cpu_to_be32(data_key);
2295                 data_sentry->va = cpu_to_be64(data_va);
2296                 data_sentry->stride = cpu_to_be16(block_size);
2297
2298                 prot_sentry->bcount = cpu_to_be16(prot_size);
2299                 prot_sentry->key = cpu_to_be32(prot_key);
2300                 prot_sentry->va = cpu_to_be64(prot_va);
2301                 prot_sentry->stride = cpu_to_be16(prot_size);
2302
2303                 wqe_size = ALIGN(sizeof(*sblock_ctrl) + sizeof(*data_sentry) +
2304                                  sizeof(*prot_sentry), 64);
2305         }
2306
2307         *seg += wqe_size;
2308         *size += wqe_size / 16;
2309         if (unlikely((*seg == qp->sq.qend)))
2310                 *seg = mlx5_get_send_wqe(qp, 0);
2311
2312         bsf = *seg;
2313         ret = mlx5_set_bsf(sig_mr, sig_attrs, bsf, data_len);
2314         if (ret)
2315                 return -EINVAL;
2316
2317         *seg += sizeof(*bsf);
2318         *size += sizeof(*bsf) / 16;
2319         if (unlikely((*seg == qp->sq.qend)))
2320                 *seg = mlx5_get_send_wqe(qp, 0);
2321
2322         return 0;
2323 }
2324
2325 static void set_sig_mkey_segment(struct mlx5_mkey_seg *seg,
2326                                  struct ib_sig_handover_wr *wr, u32 nelements,
2327                                  u32 length, u32 pdn)
2328 {
2329         struct ib_mr *sig_mr = wr->sig_mr;
2330         u32 sig_key = sig_mr->rkey;
2331         u8 sigerr = to_mmr(sig_mr)->sig->sigerr_count & 1;
2332
2333         memset(seg, 0, sizeof(*seg));
2334
2335         seg->flags = get_umr_flags(wr->access_flags) |
2336                                    MLX5_ACCESS_MODE_KLM;
2337         seg->qpn_mkey7_0 = cpu_to_be32((sig_key & 0xff) | 0xffffff00);
2338         seg->flags_pd = cpu_to_be32(MLX5_MKEY_REMOTE_INVAL | sigerr << 26 |
2339                                     MLX5_MKEY_BSF_EN | pdn);
2340         seg->len = cpu_to_be64(length);
2341         seg->xlt_oct_size = cpu_to_be32(be16_to_cpu(get_klm_octo(nelements)));
2342         seg->bsfs_octo_size = cpu_to_be32(MLX5_MKEY_BSF_OCTO_SIZE);
2343 }
2344
2345 static void set_sig_umr_segment(struct mlx5_wqe_umr_ctrl_seg *umr,
2346                                 u32 nelements)
2347 {
2348         memset(umr, 0, sizeof(*umr));
2349
2350         umr->flags = MLX5_FLAGS_INLINE | MLX5_FLAGS_CHECK_FREE;
2351         umr->klm_octowords = get_klm_octo(nelements);
2352         umr->bsf_octowords = cpu_to_be16(MLX5_MKEY_BSF_OCTO_SIZE);
2353         umr->mkey_mask = sig_mkey_mask();
2354 }
2355
2356
2357 static int set_sig_umr_wr(struct ib_send_wr *send_wr, struct mlx5_ib_qp *qp,
2358                           void **seg, int *size)
2359 {
2360         struct ib_sig_handover_wr *wr = sig_handover_wr(send_wr);
2361         struct mlx5_ib_mr *sig_mr = to_mmr(wr->sig_mr);
2362         u32 pdn = get_pd(qp)->pdn;
2363         u32 klm_oct_size;
2364         int region_len, ret;
2365
2366         if (unlikely(wr->wr.num_sge != 1) ||
2367             unlikely(wr->access_flags & IB_ACCESS_REMOTE_ATOMIC) ||
2368             unlikely(!sig_mr->sig) || unlikely(!qp->signature_en) ||
2369             unlikely(!sig_mr->sig->sig_status_checked))
2370                 return -EINVAL;
2371
2372         /* length of the protected region, data + protection */
2373         region_len = wr->wr.sg_list->length;
2374         if (wr->prot &&
2375             (wr->prot->lkey != wr->wr.sg_list->lkey  ||
2376              wr->prot->addr != wr->wr.sg_list->addr  ||
2377              wr->prot->length != wr->wr.sg_list->length))
2378                 region_len += wr->prot->length;
2379
2380         /**
2381          * KLM octoword size - if protection was provided
2382          * then we use strided block format (3 octowords),
2383          * else we use single KLM (1 octoword)
2384          **/
2385         klm_oct_size = wr->prot ? 3 : 1;
2386
2387         set_sig_umr_segment(*seg, klm_oct_size);
2388         *seg += sizeof(struct mlx5_wqe_umr_ctrl_seg);
2389         *size += sizeof(struct mlx5_wqe_umr_ctrl_seg) / 16;
2390         if (unlikely((*seg == qp->sq.qend)))
2391                 *seg = mlx5_get_send_wqe(qp, 0);
2392
2393         set_sig_mkey_segment(*seg, wr, klm_oct_size, region_len, pdn);
2394         *seg += sizeof(struct mlx5_mkey_seg);
2395         *size += sizeof(struct mlx5_mkey_seg) / 16;
2396         if (unlikely((*seg == qp->sq.qend)))
2397                 *seg = mlx5_get_send_wqe(qp, 0);
2398
2399         ret = set_sig_data_segment(wr, qp, seg, size);
2400         if (ret)
2401                 return ret;
2402
2403         sig_mr->sig->sig_status_checked = false;
2404         return 0;
2405 }
2406
2407 static int set_psv_wr(struct ib_sig_domain *domain,
2408                       u32 psv_idx, void **seg, int *size)
2409 {
2410         struct mlx5_seg_set_psv *psv_seg = *seg;
2411
2412         memset(psv_seg, 0, sizeof(*psv_seg));
2413         psv_seg->psv_num = cpu_to_be32(psv_idx);
2414         switch (domain->sig_type) {
2415         case IB_SIG_TYPE_NONE:
2416                 break;
2417         case IB_SIG_TYPE_T10_DIF:
2418                 psv_seg->transient_sig = cpu_to_be32(domain->sig.dif.bg << 16 |
2419                                                      domain->sig.dif.app_tag);
2420                 psv_seg->ref_tag = cpu_to_be32(domain->sig.dif.ref_tag);
2421                 break;
2422         default:
2423                 pr_err("Bad signature type given.\n");
2424                 return 1;
2425         }
2426
2427         *seg += sizeof(*psv_seg);
2428         *size += sizeof(*psv_seg) / 16;
2429
2430         return 0;
2431 }
2432
2433 static int set_reg_wr(struct mlx5_ib_qp *qp,
2434                       struct ib_reg_wr *wr,
2435                       void **seg, int *size)
2436 {
2437         struct mlx5_ib_mr *mr = to_mmr(wr->mr);
2438         struct mlx5_ib_pd *pd = to_mpd(qp->ibqp.pd);
2439
2440         if (unlikely(wr->wr.send_flags & IB_SEND_INLINE)) {
2441                 mlx5_ib_warn(to_mdev(qp->ibqp.device),
2442                              "Invalid IB_SEND_INLINE send flag\n");
2443                 return -EINVAL;
2444         }
2445
2446         set_reg_umr_seg(*seg, mr);
2447         *seg += sizeof(struct mlx5_wqe_umr_ctrl_seg);
2448         *size += sizeof(struct mlx5_wqe_umr_ctrl_seg) / 16;
2449         if (unlikely((*seg == qp->sq.qend)))
2450                 *seg = mlx5_get_send_wqe(qp, 0);
2451
2452         set_reg_mkey_seg(*seg, mr, wr->key, wr->access);
2453         *seg += sizeof(struct mlx5_mkey_seg);
2454         *size += sizeof(struct mlx5_mkey_seg) / 16;
2455         if (unlikely((*seg == qp->sq.qend)))
2456                 *seg = mlx5_get_send_wqe(qp, 0);
2457
2458         set_reg_data_seg(*seg, mr, pd);
2459         *seg += sizeof(struct mlx5_wqe_data_seg);
2460         *size += (sizeof(struct mlx5_wqe_data_seg) / 16);
2461
2462         return 0;
2463 }
2464
2465 static void set_linv_wr(struct mlx5_ib_qp *qp, void **seg, int *size)
2466 {
2467         set_linv_umr_seg(*seg);
2468         *seg += sizeof(struct mlx5_wqe_umr_ctrl_seg);
2469         *size += sizeof(struct mlx5_wqe_umr_ctrl_seg) / 16;
2470         if (unlikely((*seg == qp->sq.qend)))
2471                 *seg = mlx5_get_send_wqe(qp, 0);
2472         set_linv_mkey_seg(*seg);
2473         *seg += sizeof(struct mlx5_mkey_seg);
2474         *size += sizeof(struct mlx5_mkey_seg) / 16;
2475         if (unlikely((*seg == qp->sq.qend)))
2476                 *seg = mlx5_get_send_wqe(qp, 0);
2477 }
2478
2479 static void dump_wqe(struct mlx5_ib_qp *qp, int idx, int size_16)
2480 {
2481         __be32 *p = NULL;
2482         int tidx = idx;
2483         int i, j;
2484
2485         pr_debug("dump wqe at %p\n", mlx5_get_send_wqe(qp, tidx));
2486         for (i = 0, j = 0; i < size_16 * 4; i += 4, j += 4) {
2487                 if ((i & 0xf) == 0) {
2488                         void *buf = mlx5_get_send_wqe(qp, tidx);
2489                         tidx = (tidx + 1) & (qp->sq.wqe_cnt - 1);
2490                         p = buf;
2491                         j = 0;
2492                 }
2493                 pr_debug("%08x %08x %08x %08x\n", be32_to_cpu(p[j]),
2494                          be32_to_cpu(p[j + 1]), be32_to_cpu(p[j + 2]),
2495                          be32_to_cpu(p[j + 3]));
2496         }
2497 }
2498
2499 static void mlx5_bf_copy(u64 __iomem *dst, u64 *src,
2500                          unsigned bytecnt, struct mlx5_ib_qp *qp)
2501 {
2502         while (bytecnt > 0) {
2503                 __iowrite64_copy(dst++, src++, 8);
2504                 __iowrite64_copy(dst++, src++, 8);
2505                 __iowrite64_copy(dst++, src++, 8);
2506                 __iowrite64_copy(dst++, src++, 8);
2507                 __iowrite64_copy(dst++, src++, 8);
2508                 __iowrite64_copy(dst++, src++, 8);
2509                 __iowrite64_copy(dst++, src++, 8);
2510                 __iowrite64_copy(dst++, src++, 8);
2511                 bytecnt -= 64;
2512                 if (unlikely(src == qp->sq.qend))
2513                         src = mlx5_get_send_wqe(qp, 0);
2514         }
2515 }
2516
2517 static u8 get_fence(u8 fence, struct ib_send_wr *wr)
2518 {
2519         if (unlikely(wr->opcode == IB_WR_LOCAL_INV &&
2520                      wr->send_flags & IB_SEND_FENCE))
2521                 return MLX5_FENCE_MODE_STRONG_ORDERING;
2522
2523         if (unlikely(fence)) {
2524                 if (wr->send_flags & IB_SEND_FENCE)
2525                         return MLX5_FENCE_MODE_SMALL_AND_FENCE;
2526                 else
2527                         return fence;
2528
2529         } else {
2530                 return 0;
2531         }
2532 }
2533
2534 static int begin_wqe(struct mlx5_ib_qp *qp, void **seg,
2535                      struct mlx5_wqe_ctrl_seg **ctrl,
2536                      struct ib_send_wr *wr, unsigned *idx,
2537                      int *size, int nreq)
2538 {
2539         int err = 0;
2540
2541         if (unlikely(mlx5_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq))) {
2542                 err = -ENOMEM;
2543                 return err;
2544         }
2545
2546         *idx = qp->sq.cur_post & (qp->sq.wqe_cnt - 1);
2547         *seg = mlx5_get_send_wqe(qp, *idx);
2548         *ctrl = *seg;
2549         *(uint32_t *)(*seg + 8) = 0;
2550         (*ctrl)->imm = send_ieth(wr);
2551         (*ctrl)->fm_ce_se = qp->sq_signal_bits |
2552                 (wr->send_flags & IB_SEND_SIGNALED ?
2553                  MLX5_WQE_CTRL_CQ_UPDATE : 0) |
2554                 (wr->send_flags & IB_SEND_SOLICITED ?
2555                  MLX5_WQE_CTRL_SOLICITED : 0);
2556
2557         *seg += sizeof(**ctrl);
2558         *size = sizeof(**ctrl) / 16;
2559
2560         return err;
2561 }
2562
2563 static void finish_wqe(struct mlx5_ib_qp *qp,
2564                        struct mlx5_wqe_ctrl_seg *ctrl,
2565                        u8 size, unsigned idx, u64 wr_id,
2566                        int nreq, u8 fence, u8 next_fence,
2567                        u32 mlx5_opcode)
2568 {
2569         u8 opmod = 0;
2570
2571         ctrl->opmod_idx_opcode = cpu_to_be32(((u32)(qp->sq.cur_post) << 8) |
2572                                              mlx5_opcode | ((u32)opmod << 24));
2573         ctrl->qpn_ds = cpu_to_be32(size | (qp->mqp.qpn << 8));
2574         ctrl->fm_ce_se |= fence;
2575         qp->fm_cache = next_fence;
2576         if (unlikely(qp->wq_sig))
2577                 ctrl->signature = wq_sig(ctrl);
2578
2579         qp->sq.wrid[idx] = wr_id;
2580         qp->sq.w_list[idx].opcode = mlx5_opcode;
2581         qp->sq.wqe_head[idx] = qp->sq.head + nreq;
2582         qp->sq.cur_post += DIV_ROUND_UP(size * 16, MLX5_SEND_WQE_BB);
2583         qp->sq.w_list[idx].next = qp->sq.cur_post;
2584 }
2585
2586
2587 int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
2588                       struct ib_send_wr **bad_wr)
2589 {
2590         struct mlx5_wqe_ctrl_seg *ctrl = NULL;  /* compiler warning */
2591         struct mlx5_ib_dev *dev = to_mdev(ibqp->device);
2592         struct mlx5_ib_qp *qp = to_mqp(ibqp);
2593         struct mlx5_ib_mr *mr;
2594         struct mlx5_wqe_data_seg *dpseg;
2595         struct mlx5_wqe_xrc_seg *xrc;
2596         struct mlx5_bf *bf = qp->bf;
2597         int uninitialized_var(size);
2598         void *qend = qp->sq.qend;
2599         unsigned long flags;
2600         unsigned idx;
2601         int err = 0;
2602         int inl = 0;
2603         int num_sge;
2604         void *seg;
2605         int nreq;
2606         int i;
2607         u8 next_fence = 0;
2608         u8 fence;
2609
2610         spin_lock_irqsave(&qp->sq.lock, flags);
2611
2612         for (nreq = 0; wr; nreq++, wr = wr->next) {
2613                 if (unlikely(wr->opcode >= ARRAY_SIZE(mlx5_ib_opcode))) {
2614                         mlx5_ib_warn(dev, "\n");
2615                         err = -EINVAL;
2616                         *bad_wr = wr;
2617                         goto out;
2618                 }
2619
2620                 fence = qp->fm_cache;
2621                 num_sge = wr->num_sge;
2622                 if (unlikely(num_sge > qp->sq.max_gs)) {
2623                         mlx5_ib_warn(dev, "\n");
2624                         err = -ENOMEM;
2625                         *bad_wr = wr;
2626                         goto out;
2627                 }
2628
2629                 err = begin_wqe(qp, &seg, &ctrl, wr, &idx, &size, nreq);
2630                 if (err) {
2631                         mlx5_ib_warn(dev, "\n");
2632                         err = -ENOMEM;
2633                         *bad_wr = wr;
2634                         goto out;
2635                 }
2636
2637                 switch (ibqp->qp_type) {
2638                 case IB_QPT_XRC_INI:
2639                         xrc = seg;
2640                         seg += sizeof(*xrc);
2641                         size += sizeof(*xrc) / 16;
2642                         /* fall through */
2643                 case IB_QPT_RC:
2644                         switch (wr->opcode) {
2645                         case IB_WR_RDMA_READ:
2646                         case IB_WR_RDMA_WRITE:
2647                         case IB_WR_RDMA_WRITE_WITH_IMM:
2648                                 set_raddr_seg(seg, rdma_wr(wr)->remote_addr,
2649                                               rdma_wr(wr)->rkey);
2650                                 seg += sizeof(struct mlx5_wqe_raddr_seg);
2651                                 size += sizeof(struct mlx5_wqe_raddr_seg) / 16;
2652                                 break;
2653
2654                         case IB_WR_ATOMIC_CMP_AND_SWP:
2655                         case IB_WR_ATOMIC_FETCH_AND_ADD:
2656                         case IB_WR_MASKED_ATOMIC_CMP_AND_SWP:
2657                                 mlx5_ib_warn(dev, "Atomic operations are not supported yet\n");
2658                                 err = -ENOSYS;
2659                                 *bad_wr = wr;
2660                                 goto out;
2661
2662                         case IB_WR_LOCAL_INV:
2663                                 next_fence = MLX5_FENCE_MODE_INITIATOR_SMALL;
2664                                 qp->sq.wr_data[idx] = IB_WR_LOCAL_INV;
2665                                 ctrl->imm = cpu_to_be32(wr->ex.invalidate_rkey);
2666                                 set_linv_wr(qp, &seg, &size);
2667                                 num_sge = 0;
2668                                 break;
2669
2670                         case IB_WR_REG_MR:
2671                                 next_fence = MLX5_FENCE_MODE_INITIATOR_SMALL;
2672                                 qp->sq.wr_data[idx] = IB_WR_REG_MR;
2673                                 ctrl->imm = cpu_to_be32(reg_wr(wr)->key);
2674                                 err = set_reg_wr(qp, reg_wr(wr), &seg, &size);
2675                                 if (err) {
2676                                         *bad_wr = wr;
2677                                         goto out;
2678                                 }
2679                                 num_sge = 0;
2680                                 break;
2681
2682                         case IB_WR_REG_SIG_MR:
2683                                 qp->sq.wr_data[idx] = IB_WR_REG_SIG_MR;
2684                                 mr = to_mmr(sig_handover_wr(wr)->sig_mr);
2685
2686                                 ctrl->imm = cpu_to_be32(mr->ibmr.rkey);
2687                                 err = set_sig_umr_wr(wr, qp, &seg, &size);
2688                                 if (err) {
2689                                         mlx5_ib_warn(dev, "\n");
2690                                         *bad_wr = wr;
2691                                         goto out;
2692                                 }
2693
2694                                 finish_wqe(qp, ctrl, size, idx, wr->wr_id,
2695                                            nreq, get_fence(fence, wr),
2696                                            next_fence, MLX5_OPCODE_UMR);
2697                                 /*
2698                                  * SET_PSV WQEs are not signaled and solicited
2699                                  * on error
2700                                  */
2701                                 wr->send_flags &= ~IB_SEND_SIGNALED;
2702                                 wr->send_flags |= IB_SEND_SOLICITED;
2703                                 err = begin_wqe(qp, &seg, &ctrl, wr,
2704                                                 &idx, &size, nreq);
2705                                 if (err) {
2706                                         mlx5_ib_warn(dev, "\n");
2707                                         err = -ENOMEM;
2708                                         *bad_wr = wr;
2709                                         goto out;
2710                                 }
2711
2712                                 err = set_psv_wr(&sig_handover_wr(wr)->sig_attrs->mem,
2713                                                  mr->sig->psv_memory.psv_idx, &seg,
2714                                                  &size);
2715                                 if (err) {
2716                                         mlx5_ib_warn(dev, "\n");
2717                                         *bad_wr = wr;
2718                                         goto out;
2719                                 }
2720
2721                                 finish_wqe(qp, ctrl, size, idx, wr->wr_id,
2722                                            nreq, get_fence(fence, wr),
2723                                            next_fence, MLX5_OPCODE_SET_PSV);
2724                                 err = begin_wqe(qp, &seg, &ctrl, wr,
2725                                                 &idx, &size, nreq);
2726                                 if (err) {
2727                                         mlx5_ib_warn(dev, "\n");
2728                                         err = -ENOMEM;
2729                                         *bad_wr = wr;
2730                                         goto out;
2731                                 }
2732
2733                                 next_fence = MLX5_FENCE_MODE_INITIATOR_SMALL;
2734                                 err = set_psv_wr(&sig_handover_wr(wr)->sig_attrs->wire,
2735                                                  mr->sig->psv_wire.psv_idx, &seg,
2736                                                  &size);
2737                                 if (err) {
2738                                         mlx5_ib_warn(dev, "\n");
2739                                         *bad_wr = wr;
2740                                         goto out;
2741                                 }
2742
2743                                 finish_wqe(qp, ctrl, size, idx, wr->wr_id,
2744                                            nreq, get_fence(fence, wr),
2745                                            next_fence, MLX5_OPCODE_SET_PSV);
2746                                 num_sge = 0;
2747                                 goto skip_psv;
2748
2749                         default:
2750                                 break;
2751                         }
2752                         break;
2753
2754                 case IB_QPT_UC:
2755                         switch (wr->opcode) {
2756                         case IB_WR_RDMA_WRITE:
2757                         case IB_WR_RDMA_WRITE_WITH_IMM:
2758                                 set_raddr_seg(seg, rdma_wr(wr)->remote_addr,
2759                                               rdma_wr(wr)->rkey);
2760                                 seg  += sizeof(struct mlx5_wqe_raddr_seg);
2761                                 size += sizeof(struct mlx5_wqe_raddr_seg) / 16;
2762                                 break;
2763
2764                         default:
2765                                 break;
2766                         }
2767                         break;
2768
2769                 case IB_QPT_UD:
2770                 case IB_QPT_SMI:
2771                 case IB_QPT_GSI:
2772                         set_datagram_seg(seg, wr);
2773                         seg += sizeof(struct mlx5_wqe_datagram_seg);
2774                         size += sizeof(struct mlx5_wqe_datagram_seg) / 16;
2775                         if (unlikely((seg == qend)))
2776                                 seg = mlx5_get_send_wqe(qp, 0);
2777                         break;
2778
2779                 case MLX5_IB_QPT_REG_UMR:
2780                         if (wr->opcode != MLX5_IB_WR_UMR) {
2781                                 err = -EINVAL;
2782                                 mlx5_ib_warn(dev, "bad opcode\n");
2783                                 goto out;
2784                         }
2785                         qp->sq.wr_data[idx] = MLX5_IB_WR_UMR;
2786                         ctrl->imm = cpu_to_be32(umr_wr(wr)->mkey);
2787                         set_reg_umr_segment(seg, wr);
2788                         seg += sizeof(struct mlx5_wqe_umr_ctrl_seg);
2789                         size += sizeof(struct mlx5_wqe_umr_ctrl_seg) / 16;
2790                         if (unlikely((seg == qend)))
2791                                 seg = mlx5_get_send_wqe(qp, 0);
2792                         set_reg_mkey_segment(seg, wr);
2793                         seg += sizeof(struct mlx5_mkey_seg);
2794                         size += sizeof(struct mlx5_mkey_seg) / 16;
2795                         if (unlikely((seg == qend)))
2796                                 seg = mlx5_get_send_wqe(qp, 0);
2797                         break;
2798
2799                 default:
2800                         break;
2801                 }
2802
2803                 if (wr->send_flags & IB_SEND_INLINE && num_sge) {
2804                         int uninitialized_var(sz);
2805
2806                         err = set_data_inl_seg(qp, wr, seg, &sz);
2807                         if (unlikely(err)) {
2808                                 mlx5_ib_warn(dev, "\n");
2809                                 *bad_wr = wr;
2810                                 goto out;
2811                         }
2812                         inl = 1;
2813                         size += sz;
2814                 } else {
2815                         dpseg = seg;
2816                         for (i = 0; i < num_sge; i++) {
2817                                 if (unlikely(dpseg == qend)) {
2818                                         seg = mlx5_get_send_wqe(qp, 0);
2819                                         dpseg = seg;
2820                                 }
2821                                 if (likely(wr->sg_list[i].length)) {
2822                                         set_data_ptr_seg(dpseg, wr->sg_list + i);
2823                                         size += sizeof(struct mlx5_wqe_data_seg) / 16;
2824                                         dpseg++;
2825                                 }
2826                         }
2827                 }
2828
2829                 finish_wqe(qp, ctrl, size, idx, wr->wr_id, nreq,
2830                            get_fence(fence, wr), next_fence,
2831                            mlx5_ib_opcode[wr->opcode]);
2832 skip_psv:
2833                 if (0)
2834                         dump_wqe(qp, idx, size);
2835         }
2836
2837 out:
2838         if (likely(nreq)) {
2839                 qp->sq.head += nreq;
2840
2841                 /* Make sure that descriptors are written before
2842                  * updating doorbell record and ringing the doorbell
2843                  */
2844                 wmb();
2845
2846                 qp->db.db[MLX5_SND_DBR] = cpu_to_be32(qp->sq.cur_post);
2847
2848                 /* Make sure doorbell record is visible to the HCA before
2849                  * we hit doorbell */
2850                 wmb();
2851
2852                 if (bf->need_lock)
2853                         spin_lock(&bf->lock);
2854                 else
2855                         __acquire(&bf->lock);
2856
2857                 /* TBD enable WC */
2858                 if (0 && nreq == 1 && bf->uuarn && inl && size > 1 && size <= bf->buf_size / 16) {
2859                         mlx5_bf_copy(bf->reg + bf->offset, (u64 *)ctrl, ALIGN(size * 16, 64), qp);
2860                         /* wc_wmb(); */
2861                 } else {
2862                         mlx5_write64((__be32 *)ctrl, bf->regreg + bf->offset,
2863                                      MLX5_GET_DOORBELL_LOCK(&bf->lock32));
2864                         /* Make sure doorbells don't leak out of SQ spinlock
2865                          * and reach the HCA out of order.
2866                          */
2867                         mmiowb();
2868                 }
2869                 bf->offset ^= bf->buf_size;
2870                 if (bf->need_lock)
2871                         spin_unlock(&bf->lock);
2872                 else
2873                         __release(&bf->lock);
2874         }
2875
2876         spin_unlock_irqrestore(&qp->sq.lock, flags);
2877
2878         return err;
2879 }
2880
2881 static void set_sig_seg(struct mlx5_rwqe_sig *sig, int size)
2882 {
2883         sig->signature = calc_sig(sig, size);
2884 }
2885
2886 int mlx5_ib_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
2887                       struct ib_recv_wr **bad_wr)
2888 {
2889         struct mlx5_ib_qp *qp = to_mqp(ibqp);
2890         struct mlx5_wqe_data_seg *scat;
2891         struct mlx5_rwqe_sig *sig;
2892         unsigned long flags;
2893         int err = 0;
2894         int nreq;
2895         int ind;
2896         int i;
2897
2898         spin_lock_irqsave(&qp->rq.lock, flags);
2899
2900         ind = qp->rq.head & (qp->rq.wqe_cnt - 1);
2901
2902         for (nreq = 0; wr; nreq++, wr = wr->next) {
2903                 if (mlx5_wq_overflow(&qp->rq, nreq, qp->ibqp.recv_cq)) {
2904                         err = -ENOMEM;
2905                         *bad_wr = wr;
2906                         goto out;
2907                 }
2908
2909                 if (unlikely(wr->num_sge > qp->rq.max_gs)) {
2910                         err = -EINVAL;
2911                         *bad_wr = wr;
2912                         goto out;
2913                 }
2914
2915                 scat = get_recv_wqe(qp, ind);
2916                 if (qp->wq_sig)
2917                         scat++;
2918
2919                 for (i = 0; i < wr->num_sge; i++)
2920                         set_data_ptr_seg(scat + i, wr->sg_list + i);
2921
2922                 if (i < qp->rq.max_gs) {
2923                         scat[i].byte_count = 0;
2924                         scat[i].lkey       = cpu_to_be32(MLX5_INVALID_LKEY);
2925                         scat[i].addr       = 0;
2926                 }
2927
2928                 if (qp->wq_sig) {
2929                         sig = (struct mlx5_rwqe_sig *)scat;
2930                         set_sig_seg(sig, (qp->rq.max_gs + 1) << 2);
2931                 }
2932
2933                 qp->rq.wrid[ind] = wr->wr_id;
2934
2935                 ind = (ind + 1) & (qp->rq.wqe_cnt - 1);
2936         }
2937
2938 out:
2939         if (likely(nreq)) {
2940                 qp->rq.head += nreq;
2941
2942                 /* Make sure that descriptors are written before
2943                  * doorbell record.
2944                  */
2945                 wmb();
2946
2947                 *qp->db.db = cpu_to_be32(qp->rq.head & 0xffff);
2948         }
2949
2950         spin_unlock_irqrestore(&qp->rq.lock, flags);
2951
2952         return err;
2953 }
2954
2955 static inline enum ib_qp_state to_ib_qp_state(enum mlx5_qp_state mlx5_state)
2956 {
2957         switch (mlx5_state) {
2958         case MLX5_QP_STATE_RST:      return IB_QPS_RESET;
2959         case MLX5_QP_STATE_INIT:     return IB_QPS_INIT;
2960         case MLX5_QP_STATE_RTR:      return IB_QPS_RTR;
2961         case MLX5_QP_STATE_RTS:      return IB_QPS_RTS;
2962         case MLX5_QP_STATE_SQ_DRAINING:
2963         case MLX5_QP_STATE_SQD:      return IB_QPS_SQD;
2964         case MLX5_QP_STATE_SQER:     return IB_QPS_SQE;
2965         case MLX5_QP_STATE_ERR:      return IB_QPS_ERR;
2966         default:                     return -1;
2967         }
2968 }
2969
2970 static inline enum ib_mig_state to_ib_mig_state(int mlx5_mig_state)
2971 {
2972         switch (mlx5_mig_state) {
2973         case MLX5_QP_PM_ARMED:          return IB_MIG_ARMED;
2974         case MLX5_QP_PM_REARM:          return IB_MIG_REARM;
2975         case MLX5_QP_PM_MIGRATED:       return IB_MIG_MIGRATED;
2976         default: return -1;
2977         }
2978 }
2979
2980 static int to_ib_qp_access_flags(int mlx5_flags)
2981 {
2982         int ib_flags = 0;
2983
2984         if (mlx5_flags & MLX5_QP_BIT_RRE)
2985                 ib_flags |= IB_ACCESS_REMOTE_READ;
2986         if (mlx5_flags & MLX5_QP_BIT_RWE)
2987                 ib_flags |= IB_ACCESS_REMOTE_WRITE;
2988         if (mlx5_flags & MLX5_QP_BIT_RAE)
2989                 ib_flags |= IB_ACCESS_REMOTE_ATOMIC;
2990
2991         return ib_flags;
2992 }
2993
2994 static void to_ib_ah_attr(struct mlx5_ib_dev *ibdev, struct ib_ah_attr *ib_ah_attr,
2995                                 struct mlx5_qp_path *path)
2996 {
2997         struct mlx5_core_dev *dev = ibdev->mdev;
2998
2999         memset(ib_ah_attr, 0, sizeof(*ib_ah_attr));
3000         ib_ah_attr->port_num      = path->port;
3001
3002         if (ib_ah_attr->port_num == 0 ||
3003             ib_ah_attr->port_num > MLX5_CAP_GEN(dev, num_ports))
3004                 return;
3005
3006         ib_ah_attr->sl = path->sl & 0xf;
3007
3008         ib_ah_attr->dlid          = be16_to_cpu(path->rlid);
3009         ib_ah_attr->src_path_bits = path->grh_mlid & 0x7f;
3010         ib_ah_attr->static_rate   = path->static_rate ? path->static_rate - 5 : 0;
3011         ib_ah_attr->ah_flags      = (path->grh_mlid & (1 << 7)) ? IB_AH_GRH : 0;
3012         if (ib_ah_attr->ah_flags) {
3013                 ib_ah_attr->grh.sgid_index = path->mgid_index;
3014                 ib_ah_attr->grh.hop_limit  = path->hop_limit;
3015                 ib_ah_attr->grh.traffic_class =
3016                         (be32_to_cpu(path->tclass_flowlabel) >> 20) & 0xff;
3017                 ib_ah_attr->grh.flow_label =
3018                         be32_to_cpu(path->tclass_flowlabel) & 0xfffff;
3019                 memcpy(ib_ah_attr->grh.dgid.raw,
3020                        path->rgid, sizeof(ib_ah_attr->grh.dgid.raw));
3021         }
3022 }
3023
3024 int mlx5_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, int qp_attr_mask,
3025                      struct ib_qp_init_attr *qp_init_attr)
3026 {
3027         struct mlx5_ib_dev *dev = to_mdev(ibqp->device);
3028         struct mlx5_ib_qp *qp = to_mqp(ibqp);
3029         struct mlx5_query_qp_mbox_out *outb;
3030         struct mlx5_qp_context *context;
3031         int mlx5_state;
3032         int err = 0;
3033
3034 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
3035         /*
3036          * Wait for any outstanding page faults, in case the user frees memory
3037          * based upon this query's result.
3038          */
3039         flush_workqueue(mlx5_ib_page_fault_wq);
3040 #endif
3041
3042         mutex_lock(&qp->mutex);
3043         outb = kzalloc(sizeof(*outb), GFP_KERNEL);
3044         if (!outb) {
3045                 err = -ENOMEM;
3046                 goto out;
3047         }
3048         context = &outb->ctx;
3049         err = mlx5_core_qp_query(dev->mdev, &qp->mqp, outb, sizeof(*outb));
3050         if (err)
3051                 goto out_free;
3052
3053         mlx5_state = be32_to_cpu(context->flags) >> 28;
3054
3055         qp->state                    = to_ib_qp_state(mlx5_state);
3056         qp_attr->qp_state            = qp->state;
3057         qp_attr->path_mtu            = context->mtu_msgmax >> 5;
3058         qp_attr->path_mig_state      =
3059                 to_ib_mig_state((be32_to_cpu(context->flags) >> 11) & 0x3);
3060         qp_attr->qkey                = be32_to_cpu(context->qkey);
3061         qp_attr->rq_psn              = be32_to_cpu(context->rnr_nextrecvpsn) & 0xffffff;
3062         qp_attr->sq_psn              = be32_to_cpu(context->next_send_psn) & 0xffffff;
3063         qp_attr->dest_qp_num         = be32_to_cpu(context->log_pg_sz_remote_qpn) & 0xffffff;
3064         qp_attr->qp_access_flags     =
3065                 to_ib_qp_access_flags(be32_to_cpu(context->params2));
3066
3067         if (qp->ibqp.qp_type == IB_QPT_RC || qp->ibqp.qp_type == IB_QPT_UC) {
3068                 to_ib_ah_attr(dev, &qp_attr->ah_attr, &context->pri_path);
3069                 to_ib_ah_attr(dev, &qp_attr->alt_ah_attr, &context->alt_path);
3070                 qp_attr->alt_pkey_index = context->alt_path.pkey_index & 0x7f;
3071                 qp_attr->alt_port_num   = qp_attr->alt_ah_attr.port_num;
3072         }
3073
3074         qp_attr->pkey_index = context->pri_path.pkey_index & 0x7f;
3075         qp_attr->port_num = context->pri_path.port;
3076
3077         /* qp_attr->en_sqd_async_notify is only applicable in modify qp */
3078         qp_attr->sq_draining = mlx5_state == MLX5_QP_STATE_SQ_DRAINING;
3079
3080         qp_attr->max_rd_atomic = 1 << ((be32_to_cpu(context->params1) >> 21) & 0x7);
3081
3082         qp_attr->max_dest_rd_atomic =
3083                 1 << ((be32_to_cpu(context->params2) >> 21) & 0x7);
3084         qp_attr->min_rnr_timer      =
3085                 (be32_to_cpu(context->rnr_nextrecvpsn) >> 24) & 0x1f;
3086         qp_attr->timeout            = context->pri_path.ackto_lt >> 3;
3087         qp_attr->retry_cnt          = (be32_to_cpu(context->params1) >> 16) & 0x7;
3088         qp_attr->rnr_retry          = (be32_to_cpu(context->params1) >> 13) & 0x7;
3089         qp_attr->alt_timeout        = context->alt_path.ackto_lt >> 3;
3090         qp_attr->cur_qp_state        = qp_attr->qp_state;
3091         qp_attr->cap.max_recv_wr     = qp->rq.wqe_cnt;
3092         qp_attr->cap.max_recv_sge    = qp->rq.max_gs;
3093
3094         if (!ibqp->uobject) {
3095                 qp_attr->cap.max_send_wr  = qp->sq.wqe_cnt;
3096                 qp_attr->cap.max_send_sge = qp->sq.max_gs;
3097         } else {
3098                 qp_attr->cap.max_send_wr  = 0;
3099                 qp_attr->cap.max_send_sge = 0;
3100         }
3101
3102         /* We don't support inline sends for kernel QPs (yet), and we
3103          * don't know what userspace's value should be.
3104          */
3105         qp_attr->cap.max_inline_data = 0;
3106
3107         qp_init_attr->cap            = qp_attr->cap;
3108
3109         qp_init_attr->create_flags = 0;
3110         if (qp->flags & MLX5_IB_QP_BLOCK_MULTICAST_LOOPBACK)
3111                 qp_init_attr->create_flags |= IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK;
3112
3113         qp_init_attr->sq_sig_type = qp->sq_signal_bits & MLX5_WQE_CTRL_CQ_UPDATE ?
3114                 IB_SIGNAL_ALL_WR : IB_SIGNAL_REQ_WR;
3115
3116 out_free:
3117         kfree(outb);
3118
3119 out:
3120         mutex_unlock(&qp->mutex);
3121         return err;
3122 }
3123
3124 struct ib_xrcd *mlx5_ib_alloc_xrcd(struct ib_device *ibdev,
3125                                           struct ib_ucontext *context,
3126                                           struct ib_udata *udata)
3127 {
3128         struct mlx5_ib_dev *dev = to_mdev(ibdev);
3129         struct mlx5_ib_xrcd *xrcd;
3130         int err;
3131
3132         if (!MLX5_CAP_GEN(dev->mdev, xrc))
3133                 return ERR_PTR(-ENOSYS);
3134
3135         xrcd = kmalloc(sizeof(*xrcd), GFP_KERNEL);
3136         if (!xrcd)
3137                 return ERR_PTR(-ENOMEM);
3138
3139         err = mlx5_core_xrcd_alloc(dev->mdev, &xrcd->xrcdn);
3140         if (err) {
3141                 kfree(xrcd);
3142                 return ERR_PTR(-ENOMEM);
3143         }
3144
3145         return &xrcd->ibxrcd;
3146 }
3147
3148 int mlx5_ib_dealloc_xrcd(struct ib_xrcd *xrcd)
3149 {
3150         struct mlx5_ib_dev *dev = to_mdev(xrcd->device);
3151         u32 xrcdn = to_mxrcd(xrcd)->xrcdn;
3152         int err;
3153
3154         err = mlx5_core_xrcd_dealloc(dev->mdev, xrcdn);
3155         if (err) {
3156                 mlx5_ib_warn(dev, "failed to dealloc xrcdn 0x%x\n", xrcdn);
3157                 return err;
3158         }
3159
3160         kfree(xrcd);
3161
3162         return 0;
3163 }