Upgrade to 4.4.50-rt62
[kvmfornfv.git] / kernel / drivers / infiniband / hw / mlx5 / qp.c
1 /*
2  * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32
33 #include <linux/module.h>
34 #include <rdma/ib_umem.h>
35 #include "mlx5_ib.h"
36 #include "user.h"
37
38 /* not supported currently */
39 static int wq_signature;
40
41 enum {
42         MLX5_IB_ACK_REQ_FREQ    = 8,
43 };
44
45 enum {
46         MLX5_IB_DEFAULT_SCHED_QUEUE     = 0x83,
47         MLX5_IB_DEFAULT_QP0_SCHED_QUEUE = 0x3f,
48         MLX5_IB_LINK_TYPE_IB            = 0,
49         MLX5_IB_LINK_TYPE_ETH           = 1
50 };
51
52 enum {
53         MLX5_IB_SQ_STRIDE       = 6,
54         MLX5_IB_CACHE_LINE_SIZE = 64,
55 };
56
57 static const u32 mlx5_ib_opcode[] = {
58         [IB_WR_SEND]                            = MLX5_OPCODE_SEND,
59         [IB_WR_SEND_WITH_IMM]                   = MLX5_OPCODE_SEND_IMM,
60         [IB_WR_RDMA_WRITE]                      = MLX5_OPCODE_RDMA_WRITE,
61         [IB_WR_RDMA_WRITE_WITH_IMM]             = MLX5_OPCODE_RDMA_WRITE_IMM,
62         [IB_WR_RDMA_READ]                       = MLX5_OPCODE_RDMA_READ,
63         [IB_WR_ATOMIC_CMP_AND_SWP]              = MLX5_OPCODE_ATOMIC_CS,
64         [IB_WR_ATOMIC_FETCH_AND_ADD]            = MLX5_OPCODE_ATOMIC_FA,
65         [IB_WR_SEND_WITH_INV]                   = MLX5_OPCODE_SEND_INVAL,
66         [IB_WR_LOCAL_INV]                       = MLX5_OPCODE_UMR,
67         [IB_WR_REG_MR]                          = MLX5_OPCODE_UMR,
68         [IB_WR_MASKED_ATOMIC_CMP_AND_SWP]       = MLX5_OPCODE_ATOMIC_MASKED_CS,
69         [IB_WR_MASKED_ATOMIC_FETCH_AND_ADD]     = MLX5_OPCODE_ATOMIC_MASKED_FA,
70         [MLX5_IB_WR_UMR]                        = MLX5_OPCODE_UMR,
71 };
72
73
74 static int is_qp0(enum ib_qp_type qp_type)
75 {
76         return qp_type == IB_QPT_SMI;
77 }
78
79 static int is_sqp(enum ib_qp_type qp_type)
80 {
81         return is_qp0(qp_type) || is_qp1(qp_type);
82 }
83
84 static void *get_wqe(struct mlx5_ib_qp *qp, int offset)
85 {
86         return mlx5_buf_offset(&qp->buf, offset);
87 }
88
89 static void *get_recv_wqe(struct mlx5_ib_qp *qp, int n)
90 {
91         return get_wqe(qp, qp->rq.offset + (n << qp->rq.wqe_shift));
92 }
93
94 void *mlx5_get_send_wqe(struct mlx5_ib_qp *qp, int n)
95 {
96         return get_wqe(qp, qp->sq.offset + (n << MLX5_IB_SQ_STRIDE));
97 }
98
99 /**
100  * mlx5_ib_read_user_wqe() - Copy a user-space WQE to kernel space.
101  *
102  * @qp: QP to copy from.
103  * @send: copy from the send queue when non-zero, use the receive queue
104  *        otherwise.
105  * @wqe_index:  index to start copying from. For send work queues, the
106  *              wqe_index is in units of MLX5_SEND_WQE_BB.
107  *              For receive work queue, it is the number of work queue
108  *              element in the queue.
109  * @buffer: destination buffer.
110  * @length: maximum number of bytes to copy.
111  *
112  * Copies at least a single WQE, but may copy more data.
113  *
114  * Return: the number of bytes copied, or an error code.
115  */
116 int mlx5_ib_read_user_wqe(struct mlx5_ib_qp *qp, int send, int wqe_index,
117                           void *buffer, u32 length)
118 {
119         struct ib_device *ibdev = qp->ibqp.device;
120         struct mlx5_ib_dev *dev = to_mdev(ibdev);
121         struct mlx5_ib_wq *wq = send ? &qp->sq : &qp->rq;
122         size_t offset;
123         size_t wq_end;
124         struct ib_umem *umem = qp->umem;
125         u32 first_copy_length;
126         int wqe_length;
127         int ret;
128
129         if (wq->wqe_cnt == 0) {
130                 mlx5_ib_dbg(dev, "mlx5_ib_read_user_wqe for a QP with wqe_cnt == 0. qp_type: 0x%x\n",
131                             qp->ibqp.qp_type);
132                 return -EINVAL;
133         }
134
135         offset = wq->offset + ((wqe_index % wq->wqe_cnt) << wq->wqe_shift);
136         wq_end = wq->offset + (wq->wqe_cnt << wq->wqe_shift);
137
138         if (send && length < sizeof(struct mlx5_wqe_ctrl_seg))
139                 return -EINVAL;
140
141         if (offset > umem->length ||
142             (send && offset + sizeof(struct mlx5_wqe_ctrl_seg) > umem->length))
143                 return -EINVAL;
144
145         first_copy_length = min_t(u32, offset + length, wq_end) - offset;
146         ret = ib_umem_copy_from(buffer, umem, offset, first_copy_length);
147         if (ret)
148                 return ret;
149
150         if (send) {
151                 struct mlx5_wqe_ctrl_seg *ctrl = buffer;
152                 int ds = be32_to_cpu(ctrl->qpn_ds) & MLX5_WQE_CTRL_DS_MASK;
153
154                 wqe_length = ds * MLX5_WQE_DS_UNITS;
155         } else {
156                 wqe_length = 1 << wq->wqe_shift;
157         }
158
159         if (wqe_length <= first_copy_length)
160                 return first_copy_length;
161
162         ret = ib_umem_copy_from(buffer + first_copy_length, umem, wq->offset,
163                                 wqe_length - first_copy_length);
164         if (ret)
165                 return ret;
166
167         return wqe_length;
168 }
169
170 static void mlx5_ib_qp_event(struct mlx5_core_qp *qp, int type)
171 {
172         struct ib_qp *ibqp = &to_mibqp(qp)->ibqp;
173         struct ib_event event;
174
175         if (type == MLX5_EVENT_TYPE_PATH_MIG)
176                 to_mibqp(qp)->port = to_mibqp(qp)->alt_port;
177
178         if (ibqp->event_handler) {
179                 event.device     = ibqp->device;
180                 event.element.qp = ibqp;
181                 switch (type) {
182                 case MLX5_EVENT_TYPE_PATH_MIG:
183                         event.event = IB_EVENT_PATH_MIG;
184                         break;
185                 case MLX5_EVENT_TYPE_COMM_EST:
186                         event.event = IB_EVENT_COMM_EST;
187                         break;
188                 case MLX5_EVENT_TYPE_SQ_DRAINED:
189                         event.event = IB_EVENT_SQ_DRAINED;
190                         break;
191                 case MLX5_EVENT_TYPE_SRQ_LAST_WQE:
192                         event.event = IB_EVENT_QP_LAST_WQE_REACHED;
193                         break;
194                 case MLX5_EVENT_TYPE_WQ_CATAS_ERROR:
195                         event.event = IB_EVENT_QP_FATAL;
196                         break;
197                 case MLX5_EVENT_TYPE_PATH_MIG_FAILED:
198                         event.event = IB_EVENT_PATH_MIG_ERR;
199                         break;
200                 case MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR:
201                         event.event = IB_EVENT_QP_REQ_ERR;
202                         break;
203                 case MLX5_EVENT_TYPE_WQ_ACCESS_ERROR:
204                         event.event = IB_EVENT_QP_ACCESS_ERR;
205                         break;
206                 default:
207                         pr_warn("mlx5_ib: Unexpected event type %d on QP %06x\n", type, qp->qpn);
208                         return;
209                 }
210
211                 ibqp->event_handler(&event, ibqp->qp_context);
212         }
213 }
214
215 static int set_rq_size(struct mlx5_ib_dev *dev, struct ib_qp_cap *cap,
216                        int has_rq, struct mlx5_ib_qp *qp, struct mlx5_ib_create_qp *ucmd)
217 {
218         int wqe_size;
219         int wq_size;
220
221         /* Sanity check RQ size before proceeding */
222         if (cap->max_recv_wr > (1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz)))
223                 return -EINVAL;
224
225         if (!has_rq) {
226                 qp->rq.max_gs = 0;
227                 qp->rq.wqe_cnt = 0;
228                 qp->rq.wqe_shift = 0;
229                 cap->max_recv_wr = 0;
230                 cap->max_recv_sge = 0;
231         } else {
232                 if (ucmd) {
233                         qp->rq.wqe_cnt = ucmd->rq_wqe_count;
234                         qp->rq.wqe_shift = ucmd->rq_wqe_shift;
235                         qp->rq.max_gs = (1 << qp->rq.wqe_shift) / sizeof(struct mlx5_wqe_data_seg) - qp->wq_sig;
236                         qp->rq.max_post = qp->rq.wqe_cnt;
237                 } else {
238                         wqe_size = qp->wq_sig ? sizeof(struct mlx5_wqe_signature_seg) : 0;
239                         wqe_size += cap->max_recv_sge * sizeof(struct mlx5_wqe_data_seg);
240                         wqe_size = roundup_pow_of_two(wqe_size);
241                         wq_size = roundup_pow_of_two(cap->max_recv_wr) * wqe_size;
242                         wq_size = max_t(int, wq_size, MLX5_SEND_WQE_BB);
243                         qp->rq.wqe_cnt = wq_size / wqe_size;
244                         if (wqe_size > MLX5_CAP_GEN(dev->mdev, max_wqe_sz_rq)) {
245                                 mlx5_ib_dbg(dev, "wqe_size %d, max %d\n",
246                                             wqe_size,
247                                             MLX5_CAP_GEN(dev->mdev,
248                                                          max_wqe_sz_rq));
249                                 return -EINVAL;
250                         }
251                         qp->rq.wqe_shift = ilog2(wqe_size);
252                         qp->rq.max_gs = (1 << qp->rq.wqe_shift) / sizeof(struct mlx5_wqe_data_seg) - qp->wq_sig;
253                         qp->rq.max_post = qp->rq.wqe_cnt;
254                 }
255         }
256
257         return 0;
258 }
259
260 static int sq_overhead(enum ib_qp_type qp_type)
261 {
262         int size = 0;
263
264         switch (qp_type) {
265         case IB_QPT_XRC_INI:
266                 size += sizeof(struct mlx5_wqe_xrc_seg);
267                 /* fall through */
268         case IB_QPT_RC:
269                 size += sizeof(struct mlx5_wqe_ctrl_seg) +
270                         sizeof(struct mlx5_wqe_atomic_seg) +
271                         sizeof(struct mlx5_wqe_raddr_seg);
272                 break;
273
274         case IB_QPT_XRC_TGT:
275                 return 0;
276
277         case IB_QPT_UC:
278                 size += sizeof(struct mlx5_wqe_ctrl_seg) +
279                         sizeof(struct mlx5_wqe_raddr_seg) +
280                         sizeof(struct mlx5_wqe_umr_ctrl_seg) +
281                         sizeof(struct mlx5_mkey_seg);
282                 break;
283
284         case IB_QPT_UD:
285         case IB_QPT_SMI:
286         case IB_QPT_GSI:
287                 size += sizeof(struct mlx5_wqe_ctrl_seg) +
288                         sizeof(struct mlx5_wqe_datagram_seg);
289                 break;
290
291         case MLX5_IB_QPT_REG_UMR:
292                 size += sizeof(struct mlx5_wqe_ctrl_seg) +
293                         sizeof(struct mlx5_wqe_umr_ctrl_seg) +
294                         sizeof(struct mlx5_mkey_seg);
295                 break;
296
297         default:
298                 return -EINVAL;
299         }
300
301         return size;
302 }
303
304 static int calc_send_wqe(struct ib_qp_init_attr *attr)
305 {
306         int inl_size = 0;
307         int size;
308
309         size = sq_overhead(attr->qp_type);
310         if (size < 0)
311                 return size;
312
313         if (attr->cap.max_inline_data) {
314                 inl_size = size + sizeof(struct mlx5_wqe_inline_seg) +
315                         attr->cap.max_inline_data;
316         }
317
318         size += attr->cap.max_send_sge * sizeof(struct mlx5_wqe_data_seg);
319         if (attr->create_flags & IB_QP_CREATE_SIGNATURE_EN &&
320             ALIGN(max_t(int, inl_size, size), MLX5_SEND_WQE_BB) < MLX5_SIG_WQE_SIZE)
321                         return MLX5_SIG_WQE_SIZE;
322         else
323                 return ALIGN(max_t(int, inl_size, size), MLX5_SEND_WQE_BB);
324 }
325
326 static int calc_sq_size(struct mlx5_ib_dev *dev, struct ib_qp_init_attr *attr,
327                         struct mlx5_ib_qp *qp)
328 {
329         int wqe_size;
330         int wq_size;
331
332         if (!attr->cap.max_send_wr)
333                 return 0;
334
335         wqe_size = calc_send_wqe(attr);
336         mlx5_ib_dbg(dev, "wqe_size %d\n", wqe_size);
337         if (wqe_size < 0)
338                 return wqe_size;
339
340         if (wqe_size > MLX5_CAP_GEN(dev->mdev, max_wqe_sz_sq)) {
341                 mlx5_ib_dbg(dev, "wqe_size(%d) > max_sq_desc_sz(%d)\n",
342                             wqe_size, MLX5_CAP_GEN(dev->mdev, max_wqe_sz_sq));
343                 return -EINVAL;
344         }
345
346         qp->max_inline_data = wqe_size - sq_overhead(attr->qp_type) -
347                 sizeof(struct mlx5_wqe_inline_seg);
348         attr->cap.max_inline_data = qp->max_inline_data;
349
350         if (attr->create_flags & IB_QP_CREATE_SIGNATURE_EN)
351                 qp->signature_en = true;
352
353         wq_size = roundup_pow_of_two(attr->cap.max_send_wr * wqe_size);
354         qp->sq.wqe_cnt = wq_size / MLX5_SEND_WQE_BB;
355         if (qp->sq.wqe_cnt > (1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz))) {
356                 mlx5_ib_dbg(dev, "wqe count(%d) exceeds limits(%d)\n",
357                             qp->sq.wqe_cnt,
358                             1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz));
359                 return -ENOMEM;
360         }
361         qp->sq.wqe_shift = ilog2(MLX5_SEND_WQE_BB);
362         qp->sq.max_gs = attr->cap.max_send_sge;
363         qp->sq.max_post = wq_size / wqe_size;
364         attr->cap.max_send_wr = qp->sq.max_post;
365
366         return wq_size;
367 }
368
369 static int set_user_buf_size(struct mlx5_ib_dev *dev,
370                             struct mlx5_ib_qp *qp,
371                             struct mlx5_ib_create_qp *ucmd)
372 {
373         int desc_sz = 1 << qp->sq.wqe_shift;
374
375         if (desc_sz > MLX5_CAP_GEN(dev->mdev, max_wqe_sz_sq)) {
376                 mlx5_ib_warn(dev, "desc_sz %d, max_sq_desc_sz %d\n",
377                              desc_sz, MLX5_CAP_GEN(dev->mdev, max_wqe_sz_sq));
378                 return -EINVAL;
379         }
380
381         if (ucmd->sq_wqe_count && ((1 << ilog2(ucmd->sq_wqe_count)) != ucmd->sq_wqe_count)) {
382                 mlx5_ib_warn(dev, "sq_wqe_count %d, sq_wqe_count %d\n",
383                              ucmd->sq_wqe_count, ucmd->sq_wqe_count);
384                 return -EINVAL;
385         }
386
387         qp->sq.wqe_cnt = ucmd->sq_wqe_count;
388
389         if (qp->sq.wqe_cnt > (1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz))) {
390                 mlx5_ib_warn(dev, "wqe_cnt %d, max_wqes %d\n",
391                              qp->sq.wqe_cnt,
392                              1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz));
393                 return -EINVAL;
394         }
395
396         qp->buf_size = (qp->rq.wqe_cnt << qp->rq.wqe_shift) +
397                 (qp->sq.wqe_cnt << 6);
398
399         return 0;
400 }
401
402 static int qp_has_rq(struct ib_qp_init_attr *attr)
403 {
404         if (attr->qp_type == IB_QPT_XRC_INI ||
405             attr->qp_type == IB_QPT_XRC_TGT || attr->srq ||
406             attr->qp_type == MLX5_IB_QPT_REG_UMR ||
407             !attr->cap.max_recv_wr)
408                 return 0;
409
410         return 1;
411 }
412
413 static int first_med_uuar(void)
414 {
415         return 1;
416 }
417
418 static int next_uuar(int n)
419 {
420         n++;
421
422         while (((n % 4) & 2))
423                 n++;
424
425         return n;
426 }
427
428 static int num_med_uuar(struct mlx5_uuar_info *uuari)
429 {
430         int n;
431
432         n = uuari->num_uars * MLX5_NON_FP_BF_REGS_PER_PAGE -
433                 uuari->num_low_latency_uuars - 1;
434
435         return n >= 0 ? n : 0;
436 }
437
438 static int max_uuari(struct mlx5_uuar_info *uuari)
439 {
440         return uuari->num_uars * 4;
441 }
442
443 static int first_hi_uuar(struct mlx5_uuar_info *uuari)
444 {
445         int med;
446         int i;
447         int t;
448
449         med = num_med_uuar(uuari);
450         for (t = 0, i = first_med_uuar();; i = next_uuar(i)) {
451                 t++;
452                 if (t == med)
453                         return next_uuar(i);
454         }
455
456         return 0;
457 }
458
459 static int alloc_high_class_uuar(struct mlx5_uuar_info *uuari)
460 {
461         int i;
462
463         for (i = first_hi_uuar(uuari); i < max_uuari(uuari); i = next_uuar(i)) {
464                 if (!test_bit(i, uuari->bitmap)) {
465                         set_bit(i, uuari->bitmap);
466                         uuari->count[i]++;
467                         return i;
468                 }
469         }
470
471         return -ENOMEM;
472 }
473
474 static int alloc_med_class_uuar(struct mlx5_uuar_info *uuari)
475 {
476         int minidx = first_med_uuar();
477         int i;
478
479         for (i = first_med_uuar(); i < first_hi_uuar(uuari); i = next_uuar(i)) {
480                 if (uuari->count[i] < uuari->count[minidx])
481                         minidx = i;
482         }
483
484         uuari->count[minidx]++;
485         return minidx;
486 }
487
488 static int alloc_uuar(struct mlx5_uuar_info *uuari,
489                       enum mlx5_ib_latency_class lat)
490 {
491         int uuarn = -EINVAL;
492
493         mutex_lock(&uuari->lock);
494         switch (lat) {
495         case MLX5_IB_LATENCY_CLASS_LOW:
496                 uuarn = 0;
497                 uuari->count[uuarn]++;
498                 break;
499
500         case MLX5_IB_LATENCY_CLASS_MEDIUM:
501                 if (uuari->ver < 2)
502                         uuarn = -ENOMEM;
503                 else
504                         uuarn = alloc_med_class_uuar(uuari);
505                 break;
506
507         case MLX5_IB_LATENCY_CLASS_HIGH:
508                 if (uuari->ver < 2)
509                         uuarn = -ENOMEM;
510                 else
511                         uuarn = alloc_high_class_uuar(uuari);
512                 break;
513
514         case MLX5_IB_LATENCY_CLASS_FAST_PATH:
515                 uuarn = 2;
516                 break;
517         }
518         mutex_unlock(&uuari->lock);
519
520         return uuarn;
521 }
522
523 static void free_med_class_uuar(struct mlx5_uuar_info *uuari, int uuarn)
524 {
525         clear_bit(uuarn, uuari->bitmap);
526         --uuari->count[uuarn];
527 }
528
529 static void free_high_class_uuar(struct mlx5_uuar_info *uuari, int uuarn)
530 {
531         clear_bit(uuarn, uuari->bitmap);
532         --uuari->count[uuarn];
533 }
534
535 static void free_uuar(struct mlx5_uuar_info *uuari, int uuarn)
536 {
537         int nuuars = uuari->num_uars * MLX5_BF_REGS_PER_PAGE;
538         int high_uuar = nuuars - uuari->num_low_latency_uuars;
539
540         mutex_lock(&uuari->lock);
541         if (uuarn == 0) {
542                 --uuari->count[uuarn];
543                 goto out;
544         }
545
546         if (uuarn < high_uuar) {
547                 free_med_class_uuar(uuari, uuarn);
548                 goto out;
549         }
550
551         free_high_class_uuar(uuari, uuarn);
552
553 out:
554         mutex_unlock(&uuari->lock);
555 }
556
557 static enum mlx5_qp_state to_mlx5_state(enum ib_qp_state state)
558 {
559         switch (state) {
560         case IB_QPS_RESET:      return MLX5_QP_STATE_RST;
561         case IB_QPS_INIT:       return MLX5_QP_STATE_INIT;
562         case IB_QPS_RTR:        return MLX5_QP_STATE_RTR;
563         case IB_QPS_RTS:        return MLX5_QP_STATE_RTS;
564         case IB_QPS_SQD:        return MLX5_QP_STATE_SQD;
565         case IB_QPS_SQE:        return MLX5_QP_STATE_SQER;
566         case IB_QPS_ERR:        return MLX5_QP_STATE_ERR;
567         default:                return -1;
568         }
569 }
570
571 static int to_mlx5_st(enum ib_qp_type type)
572 {
573         switch (type) {
574         case IB_QPT_RC:                 return MLX5_QP_ST_RC;
575         case IB_QPT_UC:                 return MLX5_QP_ST_UC;
576         case IB_QPT_UD:                 return MLX5_QP_ST_UD;
577         case MLX5_IB_QPT_REG_UMR:       return MLX5_QP_ST_REG_UMR;
578         case IB_QPT_XRC_INI:
579         case IB_QPT_XRC_TGT:            return MLX5_QP_ST_XRC;
580         case IB_QPT_SMI:                return MLX5_QP_ST_QP0;
581         case IB_QPT_GSI:                return MLX5_QP_ST_QP1;
582         case IB_QPT_RAW_IPV6:           return MLX5_QP_ST_RAW_IPV6;
583         case IB_QPT_RAW_ETHERTYPE:      return MLX5_QP_ST_RAW_ETHERTYPE;
584         case IB_QPT_RAW_PACKET:
585         case IB_QPT_MAX:
586         default:                return -EINVAL;
587         }
588 }
589
590 static int uuarn_to_uar_index(struct mlx5_uuar_info *uuari, int uuarn)
591 {
592         return uuari->uars[uuarn / MLX5_BF_REGS_PER_PAGE].index;
593 }
594
595 static int create_user_qp(struct mlx5_ib_dev *dev, struct ib_pd *pd,
596                           struct mlx5_ib_qp *qp, struct ib_udata *udata,
597                           struct mlx5_create_qp_mbox_in **in,
598                           struct mlx5_ib_create_qp_resp *resp, int *inlen)
599 {
600         struct mlx5_ib_ucontext *context;
601         struct mlx5_ib_create_qp ucmd;
602         int page_shift = 0;
603         int uar_index;
604         int npages;
605         u32 offset = 0;
606         int uuarn;
607         int ncont = 0;
608         int err;
609
610         err = ib_copy_from_udata(&ucmd, udata, sizeof(ucmd));
611         if (err) {
612                 mlx5_ib_dbg(dev, "copy failed\n");
613                 return err;
614         }
615
616         context = to_mucontext(pd->uobject->context);
617         /*
618          * TBD: should come from the verbs when we have the API
619          */
620         uuarn = alloc_uuar(&context->uuari, MLX5_IB_LATENCY_CLASS_HIGH);
621         if (uuarn < 0) {
622                 mlx5_ib_dbg(dev, "failed to allocate low latency UUAR\n");
623                 mlx5_ib_dbg(dev, "reverting to medium latency\n");
624                 uuarn = alloc_uuar(&context->uuari, MLX5_IB_LATENCY_CLASS_MEDIUM);
625                 if (uuarn < 0) {
626                         mlx5_ib_dbg(dev, "failed to allocate medium latency UUAR\n");
627                         mlx5_ib_dbg(dev, "reverting to high latency\n");
628                         uuarn = alloc_uuar(&context->uuari, MLX5_IB_LATENCY_CLASS_LOW);
629                         if (uuarn < 0) {
630                                 mlx5_ib_warn(dev, "uuar allocation failed\n");
631                                 return uuarn;
632                         }
633                 }
634         }
635
636         uar_index = uuarn_to_uar_index(&context->uuari, uuarn);
637         mlx5_ib_dbg(dev, "uuarn 0x%x, uar_index 0x%x\n", uuarn, uar_index);
638
639         qp->rq.offset = 0;
640         qp->sq.wqe_shift = ilog2(MLX5_SEND_WQE_BB);
641         qp->sq.offset = qp->rq.wqe_cnt << qp->rq.wqe_shift;
642
643         err = set_user_buf_size(dev, qp, &ucmd);
644         if (err)
645                 goto err_uuar;
646
647         if (ucmd.buf_addr && qp->buf_size) {
648                 qp->umem = ib_umem_get(pd->uobject->context, ucmd.buf_addr,
649                                        qp->buf_size, 0, 0);
650                 if (IS_ERR(qp->umem)) {
651                         mlx5_ib_dbg(dev, "umem_get failed\n");
652                         err = PTR_ERR(qp->umem);
653                         goto err_uuar;
654                 }
655         } else {
656                 qp->umem = NULL;
657         }
658
659         if (qp->umem) {
660                 mlx5_ib_cont_pages(qp->umem, ucmd.buf_addr, &npages, &page_shift,
661                                    &ncont, NULL);
662                 err = mlx5_ib_get_buf_offset(ucmd.buf_addr, page_shift, &offset);
663                 if (err) {
664                         mlx5_ib_warn(dev, "bad offset\n");
665                         goto err_umem;
666                 }
667                 mlx5_ib_dbg(dev, "addr 0x%llx, size %d, npages %d, page_shift %d, ncont %d, offset %d\n",
668                             ucmd.buf_addr, qp->buf_size, npages, page_shift, ncont, offset);
669         }
670
671         *inlen = sizeof(**in) + sizeof(*(*in)->pas) * ncont;
672         *in = mlx5_vzalloc(*inlen);
673         if (!*in) {
674                 err = -ENOMEM;
675                 goto err_umem;
676         }
677         if (qp->umem)
678                 mlx5_ib_populate_pas(dev, qp->umem, page_shift, (*in)->pas, 0);
679         (*in)->ctx.log_pg_sz_remote_qpn =
680                 cpu_to_be32((page_shift - MLX5_ADAPTER_PAGE_SHIFT) << 24);
681         (*in)->ctx.params2 = cpu_to_be32(offset << 6);
682
683         (*in)->ctx.qp_counter_set_usr_page = cpu_to_be32(uar_index);
684         resp->uuar_index = uuarn;
685         qp->uuarn = uuarn;
686
687         err = mlx5_ib_db_map_user(context, ucmd.db_addr, &qp->db);
688         if (err) {
689                 mlx5_ib_dbg(dev, "map failed\n");
690                 goto err_free;
691         }
692
693         err = ib_copy_to_udata(udata, resp, sizeof(*resp));
694         if (err) {
695                 mlx5_ib_dbg(dev, "copy failed\n");
696                 goto err_unmap;
697         }
698         qp->create_type = MLX5_QP_USER;
699
700         return 0;
701
702 err_unmap:
703         mlx5_ib_db_unmap_user(context, &qp->db);
704
705 err_free:
706         kvfree(*in);
707
708 err_umem:
709         if (qp->umem)
710                 ib_umem_release(qp->umem);
711
712 err_uuar:
713         free_uuar(&context->uuari, uuarn);
714         return err;
715 }
716
717 static void destroy_qp_user(struct ib_pd *pd, struct mlx5_ib_qp *qp)
718 {
719         struct mlx5_ib_ucontext *context;
720
721         context = to_mucontext(pd->uobject->context);
722         mlx5_ib_db_unmap_user(context, &qp->db);
723         if (qp->umem)
724                 ib_umem_release(qp->umem);
725         free_uuar(&context->uuari, qp->uuarn);
726 }
727
728 static int create_kernel_qp(struct mlx5_ib_dev *dev,
729                             struct ib_qp_init_attr *init_attr,
730                             struct mlx5_ib_qp *qp,
731                             struct mlx5_create_qp_mbox_in **in, int *inlen)
732 {
733         enum mlx5_ib_latency_class lc = MLX5_IB_LATENCY_CLASS_LOW;
734         struct mlx5_uuar_info *uuari;
735         int uar_index;
736         int uuarn;
737         int err;
738
739         uuari = &dev->mdev->priv.uuari;
740         if (init_attr->create_flags & ~(IB_QP_CREATE_SIGNATURE_EN | IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK))
741                 return -EINVAL;
742
743         if (init_attr->qp_type == MLX5_IB_QPT_REG_UMR)
744                 lc = MLX5_IB_LATENCY_CLASS_FAST_PATH;
745
746         uuarn = alloc_uuar(uuari, lc);
747         if (uuarn < 0) {
748                 mlx5_ib_dbg(dev, "\n");
749                 return -ENOMEM;
750         }
751
752         qp->bf = &uuari->bfs[uuarn];
753         uar_index = qp->bf->uar->index;
754
755         err = calc_sq_size(dev, init_attr, qp);
756         if (err < 0) {
757                 mlx5_ib_dbg(dev, "err %d\n", err);
758                 goto err_uuar;
759         }
760
761         qp->rq.offset = 0;
762         qp->sq.offset = qp->rq.wqe_cnt << qp->rq.wqe_shift;
763         qp->buf_size = err + (qp->rq.wqe_cnt << qp->rq.wqe_shift);
764
765         err = mlx5_buf_alloc(dev->mdev, qp->buf_size, &qp->buf);
766         if (err) {
767                 mlx5_ib_dbg(dev, "err %d\n", err);
768                 goto err_uuar;
769         }
770
771         qp->sq.qend = mlx5_get_send_wqe(qp, qp->sq.wqe_cnt);
772         *inlen = sizeof(**in) + sizeof(*(*in)->pas) * qp->buf.npages;
773         *in = mlx5_vzalloc(*inlen);
774         if (!*in) {
775                 err = -ENOMEM;
776                 goto err_buf;
777         }
778         (*in)->ctx.qp_counter_set_usr_page = cpu_to_be32(uar_index);
779         (*in)->ctx.log_pg_sz_remote_qpn =
780                 cpu_to_be32((qp->buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT) << 24);
781         /* Set "fast registration enabled" for all kernel QPs */
782         (*in)->ctx.params1 |= cpu_to_be32(1 << 11);
783         (*in)->ctx.sq_crq_size |= cpu_to_be16(1 << 4);
784
785         mlx5_fill_page_array(&qp->buf, (*in)->pas);
786
787         err = mlx5_db_alloc(dev->mdev, &qp->db);
788         if (err) {
789                 mlx5_ib_dbg(dev, "err %d\n", err);
790                 goto err_free;
791         }
792
793         qp->sq.wrid = kmalloc(qp->sq.wqe_cnt * sizeof(*qp->sq.wrid), GFP_KERNEL);
794         qp->sq.wr_data = kmalloc(qp->sq.wqe_cnt * sizeof(*qp->sq.wr_data), GFP_KERNEL);
795         qp->rq.wrid = kmalloc(qp->rq.wqe_cnt * sizeof(*qp->rq.wrid), GFP_KERNEL);
796         qp->sq.w_list = kmalloc(qp->sq.wqe_cnt * sizeof(*qp->sq.w_list), GFP_KERNEL);
797         qp->sq.wqe_head = kmalloc(qp->sq.wqe_cnt * sizeof(*qp->sq.wqe_head), GFP_KERNEL);
798
799         if (!qp->sq.wrid || !qp->sq.wr_data || !qp->rq.wrid ||
800             !qp->sq.w_list || !qp->sq.wqe_head) {
801                 err = -ENOMEM;
802                 goto err_wrid;
803         }
804         qp->create_type = MLX5_QP_KERNEL;
805
806         return 0;
807
808 err_wrid:
809         mlx5_db_free(dev->mdev, &qp->db);
810         kfree(qp->sq.wqe_head);
811         kfree(qp->sq.w_list);
812         kfree(qp->sq.wrid);
813         kfree(qp->sq.wr_data);
814         kfree(qp->rq.wrid);
815
816 err_free:
817         kvfree(*in);
818
819 err_buf:
820         mlx5_buf_free(dev->mdev, &qp->buf);
821
822 err_uuar:
823         free_uuar(&dev->mdev->priv.uuari, uuarn);
824         return err;
825 }
826
827 static void destroy_qp_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp)
828 {
829         mlx5_db_free(dev->mdev, &qp->db);
830         kfree(qp->sq.wqe_head);
831         kfree(qp->sq.w_list);
832         kfree(qp->sq.wrid);
833         kfree(qp->sq.wr_data);
834         kfree(qp->rq.wrid);
835         mlx5_buf_free(dev->mdev, &qp->buf);
836         free_uuar(&dev->mdev->priv.uuari, qp->bf->uuarn);
837 }
838
839 static __be32 get_rx_type(struct mlx5_ib_qp *qp, struct ib_qp_init_attr *attr)
840 {
841         if (attr->srq || (attr->qp_type == IB_QPT_XRC_TGT) ||
842             (attr->qp_type == IB_QPT_XRC_INI))
843                 return cpu_to_be32(MLX5_SRQ_RQ);
844         else if (!qp->has_rq)
845                 return cpu_to_be32(MLX5_ZERO_LEN_RQ);
846         else
847                 return cpu_to_be32(MLX5_NON_ZERO_RQ);
848 }
849
850 static int is_connected(enum ib_qp_type qp_type)
851 {
852         if (qp_type == IB_QPT_RC || qp_type == IB_QPT_UC)
853                 return 1;
854
855         return 0;
856 }
857
858 static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd,
859                             struct ib_qp_init_attr *init_attr,
860                             struct ib_udata *udata, struct mlx5_ib_qp *qp)
861 {
862         struct mlx5_ib_resources *devr = &dev->devr;
863         struct mlx5_core_dev *mdev = dev->mdev;
864         struct mlx5_ib_create_qp_resp resp;
865         struct mlx5_create_qp_mbox_in *in;
866         struct mlx5_ib_create_qp ucmd;
867         int inlen = sizeof(*in);
868         int err;
869
870         mlx5_ib_odp_create_qp(qp);
871
872         mutex_init(&qp->mutex);
873         spin_lock_init(&qp->sq.lock);
874         spin_lock_init(&qp->rq.lock);
875
876         if (init_attr->create_flags & IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK) {
877                 if (!MLX5_CAP_GEN(mdev, block_lb_mc)) {
878                         mlx5_ib_dbg(dev, "block multicast loopback isn't supported\n");
879                         return -EINVAL;
880                 } else {
881                         qp->flags |= MLX5_IB_QP_BLOCK_MULTICAST_LOOPBACK;
882                 }
883         }
884
885         if (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR)
886                 qp->sq_signal_bits = MLX5_WQE_CTRL_CQ_UPDATE;
887
888         if (pd && pd->uobject) {
889                 if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd))) {
890                         mlx5_ib_dbg(dev, "copy failed\n");
891                         return -EFAULT;
892                 }
893
894                 qp->wq_sig = !!(ucmd.flags & MLX5_QP_FLAG_SIGNATURE);
895                 qp->scat_cqe = !!(ucmd.flags & MLX5_QP_FLAG_SCATTER_CQE);
896         } else {
897                 qp->wq_sig = !!wq_signature;
898         }
899
900         qp->has_rq = qp_has_rq(init_attr);
901         err = set_rq_size(dev, &init_attr->cap, qp->has_rq,
902                           qp, (pd && pd->uobject) ? &ucmd : NULL);
903         if (err) {
904                 mlx5_ib_dbg(dev, "err %d\n", err);
905                 return err;
906         }
907
908         if (pd) {
909                 if (pd->uobject) {
910                         __u32 max_wqes =
911                                 1 << MLX5_CAP_GEN(mdev, log_max_qp_sz);
912                         mlx5_ib_dbg(dev, "requested sq_wqe_count (%d)\n", ucmd.sq_wqe_count);
913                         if (ucmd.rq_wqe_shift != qp->rq.wqe_shift ||
914                             ucmd.rq_wqe_count != qp->rq.wqe_cnt) {
915                                 mlx5_ib_dbg(dev, "invalid rq params\n");
916                                 return -EINVAL;
917                         }
918                         if (ucmd.sq_wqe_count > max_wqes) {
919                                 mlx5_ib_dbg(dev, "requested sq_wqe_count (%d) > max allowed (%d)\n",
920                                             ucmd.sq_wqe_count, max_wqes);
921                                 return -EINVAL;
922                         }
923                         err = create_user_qp(dev, pd, qp, udata, &in, &resp, &inlen);
924                         if (err)
925                                 mlx5_ib_dbg(dev, "err %d\n", err);
926                 } else {
927                         err = create_kernel_qp(dev, init_attr, qp, &in, &inlen);
928                         if (err)
929                                 mlx5_ib_dbg(dev, "err %d\n", err);
930                 }
931
932                 if (err)
933                         return err;
934         } else {
935                 in = mlx5_vzalloc(sizeof(*in));
936                 if (!in)
937                         return -ENOMEM;
938
939                 qp->create_type = MLX5_QP_EMPTY;
940         }
941
942         if (is_sqp(init_attr->qp_type))
943                 qp->port = init_attr->port_num;
944
945         in->ctx.flags = cpu_to_be32(to_mlx5_st(init_attr->qp_type) << 16 |
946                                     MLX5_QP_PM_MIGRATED << 11);
947
948         if (init_attr->qp_type != MLX5_IB_QPT_REG_UMR)
949                 in->ctx.flags_pd = cpu_to_be32(to_mpd(pd ? pd : devr->p0)->pdn);
950         else
951                 in->ctx.flags_pd = cpu_to_be32(MLX5_QP_LAT_SENSITIVE);
952
953         if (qp->wq_sig)
954                 in->ctx.flags_pd |= cpu_to_be32(MLX5_QP_ENABLE_SIG);
955
956         if (qp->flags & MLX5_IB_QP_BLOCK_MULTICAST_LOOPBACK)
957                 in->ctx.flags_pd |= cpu_to_be32(MLX5_QP_BLOCK_MCAST);
958
959         if (qp->scat_cqe && is_connected(init_attr->qp_type)) {
960                 int rcqe_sz;
961                 int scqe_sz;
962
963                 rcqe_sz = mlx5_ib_get_cqe_size(dev, init_attr->recv_cq);
964                 scqe_sz = mlx5_ib_get_cqe_size(dev, init_attr->send_cq);
965
966                 if (rcqe_sz == 128)
967                         in->ctx.cs_res = MLX5_RES_SCAT_DATA64_CQE;
968                 else
969                         in->ctx.cs_res = MLX5_RES_SCAT_DATA32_CQE;
970
971                 if (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR) {
972                         if (scqe_sz == 128)
973                                 in->ctx.cs_req = MLX5_REQ_SCAT_DATA64_CQE;
974                         else
975                                 in->ctx.cs_req = MLX5_REQ_SCAT_DATA32_CQE;
976                 }
977         }
978
979         if (qp->rq.wqe_cnt) {
980                 in->ctx.rq_size_stride = (qp->rq.wqe_shift - 4);
981                 in->ctx.rq_size_stride |= ilog2(qp->rq.wqe_cnt) << 3;
982         }
983
984         in->ctx.rq_type_srqn = get_rx_type(qp, init_attr);
985
986         if (qp->sq.wqe_cnt)
987                 in->ctx.sq_crq_size |= cpu_to_be16(ilog2(qp->sq.wqe_cnt) << 11);
988         else
989                 in->ctx.sq_crq_size |= cpu_to_be16(0x8000);
990
991         /* Set default resources */
992         switch (init_attr->qp_type) {
993         case IB_QPT_XRC_TGT:
994                 in->ctx.cqn_recv = cpu_to_be32(to_mcq(devr->c0)->mcq.cqn);
995                 in->ctx.cqn_send = cpu_to_be32(to_mcq(devr->c0)->mcq.cqn);
996                 in->ctx.rq_type_srqn |= cpu_to_be32(to_msrq(devr->s0)->msrq.srqn);
997                 in->ctx.xrcd = cpu_to_be32(to_mxrcd(init_attr->xrcd)->xrcdn);
998                 break;
999         case IB_QPT_XRC_INI:
1000                 in->ctx.cqn_recv = cpu_to_be32(to_mcq(devr->c0)->mcq.cqn);
1001                 in->ctx.xrcd = cpu_to_be32(to_mxrcd(devr->x1)->xrcdn);
1002                 in->ctx.rq_type_srqn |= cpu_to_be32(to_msrq(devr->s0)->msrq.srqn);
1003                 break;
1004         default:
1005                 if (init_attr->srq) {
1006                         in->ctx.xrcd = cpu_to_be32(to_mxrcd(devr->x0)->xrcdn);
1007                         in->ctx.rq_type_srqn |= cpu_to_be32(to_msrq(init_attr->srq)->msrq.srqn);
1008                 } else {
1009                         in->ctx.xrcd = cpu_to_be32(to_mxrcd(devr->x1)->xrcdn);
1010                         in->ctx.rq_type_srqn |=
1011                                 cpu_to_be32(to_msrq(devr->s1)->msrq.srqn);
1012                 }
1013         }
1014
1015         if (init_attr->send_cq)
1016                 in->ctx.cqn_send = cpu_to_be32(to_mcq(init_attr->send_cq)->mcq.cqn);
1017
1018         if (init_attr->recv_cq)
1019                 in->ctx.cqn_recv = cpu_to_be32(to_mcq(init_attr->recv_cq)->mcq.cqn);
1020
1021         in->ctx.db_rec_addr = cpu_to_be64(qp->db.dma);
1022
1023         err = mlx5_core_create_qp(dev->mdev, &qp->mqp, in, inlen);
1024         if (err) {
1025                 mlx5_ib_dbg(dev, "create qp failed\n");
1026                 goto err_create;
1027         }
1028
1029         kvfree(in);
1030         /* Hardware wants QPN written in big-endian order (after
1031          * shifting) for send doorbell.  Precompute this value to save
1032          * a little bit when posting sends.
1033          */
1034         qp->doorbell_qpn = swab32(qp->mqp.qpn << 8);
1035
1036         qp->mqp.event = mlx5_ib_qp_event;
1037
1038         return 0;
1039
1040 err_create:
1041         if (qp->create_type == MLX5_QP_USER)
1042                 destroy_qp_user(pd, qp);
1043         else if (qp->create_type == MLX5_QP_KERNEL)
1044                 destroy_qp_kernel(dev, qp);
1045
1046         kvfree(in);
1047         return err;
1048 }
1049
1050 static void mlx5_ib_lock_cqs(struct mlx5_ib_cq *send_cq, struct mlx5_ib_cq *recv_cq)
1051         __acquires(&send_cq->lock) __acquires(&recv_cq->lock)
1052 {
1053         if (send_cq) {
1054                 if (recv_cq) {
1055                         if (send_cq->mcq.cqn < recv_cq->mcq.cqn)  {
1056                                 spin_lock_irq(&send_cq->lock);
1057                                 spin_lock_nested(&recv_cq->lock,
1058                                                  SINGLE_DEPTH_NESTING);
1059                         } else if (send_cq->mcq.cqn == recv_cq->mcq.cqn) {
1060                                 spin_lock_irq(&send_cq->lock);
1061                                 __acquire(&recv_cq->lock);
1062                         } else {
1063                                 spin_lock_irq(&recv_cq->lock);
1064                                 spin_lock_nested(&send_cq->lock,
1065                                                  SINGLE_DEPTH_NESTING);
1066                         }
1067                 } else {
1068                         spin_lock_irq(&send_cq->lock);
1069                         __acquire(&recv_cq->lock);
1070                 }
1071         } else if (recv_cq) {
1072                 spin_lock_irq(&recv_cq->lock);
1073                 __acquire(&send_cq->lock);
1074         } else {
1075                 __acquire(&send_cq->lock);
1076                 __acquire(&recv_cq->lock);
1077         }
1078 }
1079
1080 static void mlx5_ib_unlock_cqs(struct mlx5_ib_cq *send_cq, struct mlx5_ib_cq *recv_cq)
1081         __releases(&send_cq->lock) __releases(&recv_cq->lock)
1082 {
1083         if (send_cq) {
1084                 if (recv_cq) {
1085                         if (send_cq->mcq.cqn < recv_cq->mcq.cqn)  {
1086                                 spin_unlock(&recv_cq->lock);
1087                                 spin_unlock_irq(&send_cq->lock);
1088                         } else if (send_cq->mcq.cqn == recv_cq->mcq.cqn) {
1089                                 __release(&recv_cq->lock);
1090                                 spin_unlock_irq(&send_cq->lock);
1091                         } else {
1092                                 spin_unlock(&send_cq->lock);
1093                                 spin_unlock_irq(&recv_cq->lock);
1094                         }
1095                 } else {
1096                         __release(&recv_cq->lock);
1097                         spin_unlock_irq(&send_cq->lock);
1098                 }
1099         } else if (recv_cq) {
1100                 __release(&send_cq->lock);
1101                 spin_unlock_irq(&recv_cq->lock);
1102         } else {
1103                 __release(&recv_cq->lock);
1104                 __release(&send_cq->lock);
1105         }
1106 }
1107
1108 static struct mlx5_ib_pd *get_pd(struct mlx5_ib_qp *qp)
1109 {
1110         return to_mpd(qp->ibqp.pd);
1111 }
1112
1113 static void get_cqs(struct mlx5_ib_qp *qp,
1114                     struct mlx5_ib_cq **send_cq, struct mlx5_ib_cq **recv_cq)
1115 {
1116         switch (qp->ibqp.qp_type) {
1117         case IB_QPT_XRC_TGT:
1118                 *send_cq = NULL;
1119                 *recv_cq = NULL;
1120                 break;
1121         case MLX5_IB_QPT_REG_UMR:
1122         case IB_QPT_XRC_INI:
1123                 *send_cq = to_mcq(qp->ibqp.send_cq);
1124                 *recv_cq = NULL;
1125                 break;
1126
1127         case IB_QPT_SMI:
1128         case IB_QPT_GSI:
1129         case IB_QPT_RC:
1130         case IB_QPT_UC:
1131         case IB_QPT_UD:
1132         case IB_QPT_RAW_IPV6:
1133         case IB_QPT_RAW_ETHERTYPE:
1134                 *send_cq = to_mcq(qp->ibqp.send_cq);
1135                 *recv_cq = to_mcq(qp->ibqp.recv_cq);
1136                 break;
1137
1138         case IB_QPT_RAW_PACKET:
1139         case IB_QPT_MAX:
1140         default:
1141                 *send_cq = NULL;
1142                 *recv_cq = NULL;
1143                 break;
1144         }
1145 }
1146
1147 static void destroy_qp_common(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp)
1148 {
1149         struct mlx5_ib_cq *send_cq, *recv_cq;
1150         struct mlx5_modify_qp_mbox_in *in;
1151         int err;
1152
1153         in = kzalloc(sizeof(*in), GFP_KERNEL);
1154         if (!in)
1155                 return;
1156
1157         if (qp->state != IB_QPS_RESET) {
1158                 mlx5_ib_qp_disable_pagefaults(qp);
1159                 if (mlx5_core_qp_modify(dev->mdev, to_mlx5_state(qp->state),
1160                                         MLX5_QP_STATE_RST, in, 0, &qp->mqp))
1161                         mlx5_ib_warn(dev, "mlx5_ib: modify QP %06x to RESET failed\n",
1162                                      qp->mqp.qpn);
1163         }
1164
1165         get_cqs(qp, &send_cq, &recv_cq);
1166
1167         if (qp->create_type == MLX5_QP_KERNEL) {
1168                 mlx5_ib_lock_cqs(send_cq, recv_cq);
1169                 __mlx5_ib_cq_clean(recv_cq, qp->mqp.qpn,
1170                                    qp->ibqp.srq ? to_msrq(qp->ibqp.srq) : NULL);
1171                 if (send_cq != recv_cq)
1172                         __mlx5_ib_cq_clean(send_cq, qp->mqp.qpn, NULL);
1173                 mlx5_ib_unlock_cqs(send_cq, recv_cq);
1174         }
1175
1176         err = mlx5_core_destroy_qp(dev->mdev, &qp->mqp);
1177         if (err)
1178                 mlx5_ib_warn(dev, "failed to destroy QP 0x%x\n", qp->mqp.qpn);
1179         kfree(in);
1180
1181
1182         if (qp->create_type == MLX5_QP_KERNEL)
1183                 destroy_qp_kernel(dev, qp);
1184         else if (qp->create_type == MLX5_QP_USER)
1185                 destroy_qp_user(&get_pd(qp)->ibpd, qp);
1186 }
1187
1188 static const char *ib_qp_type_str(enum ib_qp_type type)
1189 {
1190         switch (type) {
1191         case IB_QPT_SMI:
1192                 return "IB_QPT_SMI";
1193         case IB_QPT_GSI:
1194                 return "IB_QPT_GSI";
1195         case IB_QPT_RC:
1196                 return "IB_QPT_RC";
1197         case IB_QPT_UC:
1198                 return "IB_QPT_UC";
1199         case IB_QPT_UD:
1200                 return "IB_QPT_UD";
1201         case IB_QPT_RAW_IPV6:
1202                 return "IB_QPT_RAW_IPV6";
1203         case IB_QPT_RAW_ETHERTYPE:
1204                 return "IB_QPT_RAW_ETHERTYPE";
1205         case IB_QPT_XRC_INI:
1206                 return "IB_QPT_XRC_INI";
1207         case IB_QPT_XRC_TGT:
1208                 return "IB_QPT_XRC_TGT";
1209         case IB_QPT_RAW_PACKET:
1210                 return "IB_QPT_RAW_PACKET";
1211         case MLX5_IB_QPT_REG_UMR:
1212                 return "MLX5_IB_QPT_REG_UMR";
1213         case IB_QPT_MAX:
1214         default:
1215                 return "Invalid QP type";
1216         }
1217 }
1218
1219 struct ib_qp *mlx5_ib_create_qp(struct ib_pd *pd,
1220                                 struct ib_qp_init_attr *init_attr,
1221                                 struct ib_udata *udata)
1222 {
1223         struct mlx5_ib_dev *dev;
1224         struct mlx5_ib_qp *qp;
1225         u16 xrcdn = 0;
1226         int err;
1227
1228         if (pd) {
1229                 dev = to_mdev(pd->device);
1230         } else {
1231                 /* being cautious here */
1232                 if (init_attr->qp_type != IB_QPT_XRC_TGT &&
1233                     init_attr->qp_type != MLX5_IB_QPT_REG_UMR) {
1234                         pr_warn("%s: no PD for transport %s\n", __func__,
1235                                 ib_qp_type_str(init_attr->qp_type));
1236                         return ERR_PTR(-EINVAL);
1237                 }
1238                 dev = to_mdev(to_mxrcd(init_attr->xrcd)->ibxrcd.device);
1239         }
1240
1241         switch (init_attr->qp_type) {
1242         case IB_QPT_XRC_TGT:
1243         case IB_QPT_XRC_INI:
1244                 if (!MLX5_CAP_GEN(dev->mdev, xrc)) {
1245                         mlx5_ib_dbg(dev, "XRC not supported\n");
1246                         return ERR_PTR(-ENOSYS);
1247                 }
1248                 init_attr->recv_cq = NULL;
1249                 if (init_attr->qp_type == IB_QPT_XRC_TGT) {
1250                         xrcdn = to_mxrcd(init_attr->xrcd)->xrcdn;
1251                         init_attr->send_cq = NULL;
1252                 }
1253
1254                 /* fall through */
1255         case IB_QPT_RC:
1256         case IB_QPT_UC:
1257         case IB_QPT_UD:
1258         case IB_QPT_SMI:
1259         case IB_QPT_GSI:
1260         case MLX5_IB_QPT_REG_UMR:
1261                 qp = kzalloc(sizeof(*qp), GFP_KERNEL);
1262                 if (!qp)
1263                         return ERR_PTR(-ENOMEM);
1264
1265                 err = create_qp_common(dev, pd, init_attr, udata, qp);
1266                 if (err) {
1267                         mlx5_ib_dbg(dev, "create_qp_common failed\n");
1268                         kfree(qp);
1269                         return ERR_PTR(err);
1270                 }
1271
1272                 if (is_qp0(init_attr->qp_type))
1273                         qp->ibqp.qp_num = 0;
1274                 else if (is_qp1(init_attr->qp_type))
1275                         qp->ibqp.qp_num = 1;
1276                 else
1277                         qp->ibqp.qp_num = qp->mqp.qpn;
1278
1279                 mlx5_ib_dbg(dev, "ib qpnum 0x%x, mlx qpn 0x%x, rcqn 0x%x, scqn 0x%x\n",
1280                             qp->ibqp.qp_num, qp->mqp.qpn, to_mcq(init_attr->recv_cq)->mcq.cqn,
1281                             to_mcq(init_attr->send_cq)->mcq.cqn);
1282
1283                 qp->xrcdn = xrcdn;
1284
1285                 break;
1286
1287         case IB_QPT_RAW_IPV6:
1288         case IB_QPT_RAW_ETHERTYPE:
1289         case IB_QPT_RAW_PACKET:
1290         case IB_QPT_MAX:
1291         default:
1292                 mlx5_ib_dbg(dev, "unsupported qp type %d\n",
1293                             init_attr->qp_type);
1294                 /* Don't support raw QPs */
1295                 return ERR_PTR(-EINVAL);
1296         }
1297
1298         return &qp->ibqp;
1299 }
1300
1301 int mlx5_ib_destroy_qp(struct ib_qp *qp)
1302 {
1303         struct mlx5_ib_dev *dev = to_mdev(qp->device);
1304         struct mlx5_ib_qp *mqp = to_mqp(qp);
1305
1306         destroy_qp_common(dev, mqp);
1307
1308         kfree(mqp);
1309
1310         return 0;
1311 }
1312
1313 static __be32 to_mlx5_access_flags(struct mlx5_ib_qp *qp, const struct ib_qp_attr *attr,
1314                                    int attr_mask)
1315 {
1316         u32 hw_access_flags = 0;
1317         u8 dest_rd_atomic;
1318         u32 access_flags;
1319
1320         if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
1321                 dest_rd_atomic = attr->max_dest_rd_atomic;
1322         else
1323                 dest_rd_atomic = qp->resp_depth;
1324
1325         if (attr_mask & IB_QP_ACCESS_FLAGS)
1326                 access_flags = attr->qp_access_flags;
1327         else
1328                 access_flags = qp->atomic_rd_en;
1329
1330         if (!dest_rd_atomic)
1331                 access_flags &= IB_ACCESS_REMOTE_WRITE;
1332
1333         if (access_flags & IB_ACCESS_REMOTE_READ)
1334                 hw_access_flags |= MLX5_QP_BIT_RRE;
1335         if (access_flags & IB_ACCESS_REMOTE_ATOMIC)
1336                 hw_access_flags |= (MLX5_QP_BIT_RAE | MLX5_ATOMIC_MODE_CX);
1337         if (access_flags & IB_ACCESS_REMOTE_WRITE)
1338                 hw_access_flags |= MLX5_QP_BIT_RWE;
1339
1340         return cpu_to_be32(hw_access_flags);
1341 }
1342
1343 enum {
1344         MLX5_PATH_FLAG_FL       = 1 << 0,
1345         MLX5_PATH_FLAG_FREE_AR  = 1 << 1,
1346         MLX5_PATH_FLAG_COUNTER  = 1 << 2,
1347 };
1348
1349 static int ib_rate_to_mlx5(struct mlx5_ib_dev *dev, u8 rate)
1350 {
1351         if (rate == IB_RATE_PORT_CURRENT) {
1352                 return 0;
1353         } else if (rate < IB_RATE_2_5_GBPS || rate > IB_RATE_300_GBPS) {
1354                 return -EINVAL;
1355         } else {
1356                 while (rate != IB_RATE_2_5_GBPS &&
1357                        !(1 << (rate + MLX5_STAT_RATE_OFFSET) &
1358                          MLX5_CAP_GEN(dev->mdev, stat_rate_support)))
1359                         --rate;
1360         }
1361
1362         return rate + MLX5_STAT_RATE_OFFSET;
1363 }
1364
1365 static int mlx5_set_path(struct mlx5_ib_dev *dev, const struct ib_ah_attr *ah,
1366                          struct mlx5_qp_path *path, u8 port, int attr_mask,
1367                          u32 path_flags, const struct ib_qp_attr *attr)
1368 {
1369         int err;
1370
1371         path->fl = (path_flags & MLX5_PATH_FLAG_FL) ? 0x80 : 0;
1372         path->free_ar = (path_flags & MLX5_PATH_FLAG_FREE_AR) ? 0x80 : 0;
1373
1374         if (attr_mask & IB_QP_PKEY_INDEX)
1375                 path->pkey_index = attr->pkey_index;
1376
1377         path->grh_mlid  = ah->src_path_bits & 0x7f;
1378         path->rlid      = cpu_to_be16(ah->dlid);
1379
1380         if (ah->ah_flags & IB_AH_GRH) {
1381                 if (ah->grh.sgid_index >=
1382                     dev->mdev->port_caps[port - 1].gid_table_len) {
1383                         pr_err("sgid_index (%u) too large. max is %d\n",
1384                                ah->grh.sgid_index,
1385                                dev->mdev->port_caps[port - 1].gid_table_len);
1386                         return -EINVAL;
1387                 }
1388                 path->grh_mlid |= 1 << 7;
1389                 path->mgid_index = ah->grh.sgid_index;
1390                 path->hop_limit  = ah->grh.hop_limit;
1391                 path->tclass_flowlabel =
1392                         cpu_to_be32((ah->grh.traffic_class << 20) |
1393                                     (ah->grh.flow_label));
1394                 memcpy(path->rgid, ah->grh.dgid.raw, 16);
1395         }
1396
1397         err = ib_rate_to_mlx5(dev, ah->static_rate);
1398         if (err < 0)
1399                 return err;
1400         path->static_rate = err;
1401         path->port = port;
1402
1403         if (attr_mask & IB_QP_TIMEOUT)
1404                 path->ackto_lt = attr->timeout << 3;
1405
1406         path->sl = ah->sl & 0xf;
1407
1408         return 0;
1409 }
1410
1411 static enum mlx5_qp_optpar opt_mask[MLX5_QP_NUM_STATE][MLX5_QP_NUM_STATE][MLX5_QP_ST_MAX] = {
1412         [MLX5_QP_STATE_INIT] = {
1413                 [MLX5_QP_STATE_INIT] = {
1414                         [MLX5_QP_ST_RC] = MLX5_QP_OPTPAR_RRE            |
1415                                           MLX5_QP_OPTPAR_RAE            |
1416                                           MLX5_QP_OPTPAR_RWE            |
1417                                           MLX5_QP_OPTPAR_PKEY_INDEX     |
1418                                           MLX5_QP_OPTPAR_PRI_PORT,
1419                         [MLX5_QP_ST_UC] = MLX5_QP_OPTPAR_RWE            |
1420                                           MLX5_QP_OPTPAR_PKEY_INDEX     |
1421                                           MLX5_QP_OPTPAR_PRI_PORT,
1422                         [MLX5_QP_ST_UD] = MLX5_QP_OPTPAR_PKEY_INDEX     |
1423                                           MLX5_QP_OPTPAR_Q_KEY          |
1424                                           MLX5_QP_OPTPAR_PRI_PORT,
1425                 },
1426                 [MLX5_QP_STATE_RTR] = {
1427                         [MLX5_QP_ST_RC] = MLX5_QP_OPTPAR_ALT_ADDR_PATH  |
1428                                           MLX5_QP_OPTPAR_RRE            |
1429                                           MLX5_QP_OPTPAR_RAE            |
1430                                           MLX5_QP_OPTPAR_RWE            |
1431                                           MLX5_QP_OPTPAR_PKEY_INDEX,
1432                         [MLX5_QP_ST_UC] = MLX5_QP_OPTPAR_ALT_ADDR_PATH  |
1433                                           MLX5_QP_OPTPAR_RWE            |
1434                                           MLX5_QP_OPTPAR_PKEY_INDEX,
1435                         [MLX5_QP_ST_UD] = MLX5_QP_OPTPAR_PKEY_INDEX     |
1436                                           MLX5_QP_OPTPAR_Q_KEY,
1437                         [MLX5_QP_ST_MLX] = MLX5_QP_OPTPAR_PKEY_INDEX    |
1438                                            MLX5_QP_OPTPAR_Q_KEY,
1439                         [MLX5_QP_ST_XRC] = MLX5_QP_OPTPAR_ALT_ADDR_PATH |
1440                                           MLX5_QP_OPTPAR_RRE            |
1441                                           MLX5_QP_OPTPAR_RAE            |
1442                                           MLX5_QP_OPTPAR_RWE            |
1443                                           MLX5_QP_OPTPAR_PKEY_INDEX,
1444                 },
1445         },
1446         [MLX5_QP_STATE_RTR] = {
1447                 [MLX5_QP_STATE_RTS] = {
1448                         [MLX5_QP_ST_RC] = MLX5_QP_OPTPAR_ALT_ADDR_PATH  |
1449                                           MLX5_QP_OPTPAR_RRE            |
1450                                           MLX5_QP_OPTPAR_RAE            |
1451                                           MLX5_QP_OPTPAR_RWE            |
1452                                           MLX5_QP_OPTPAR_PM_STATE       |
1453                                           MLX5_QP_OPTPAR_RNR_TIMEOUT,
1454                         [MLX5_QP_ST_UC] = MLX5_QP_OPTPAR_ALT_ADDR_PATH  |
1455                                           MLX5_QP_OPTPAR_RWE            |
1456                                           MLX5_QP_OPTPAR_PM_STATE,
1457                         [MLX5_QP_ST_UD] = MLX5_QP_OPTPAR_Q_KEY,
1458                 },
1459         },
1460         [MLX5_QP_STATE_RTS] = {
1461                 [MLX5_QP_STATE_RTS] = {
1462                         [MLX5_QP_ST_RC] = MLX5_QP_OPTPAR_RRE            |
1463                                           MLX5_QP_OPTPAR_RAE            |
1464                                           MLX5_QP_OPTPAR_RWE            |
1465                                           MLX5_QP_OPTPAR_RNR_TIMEOUT    |
1466                                           MLX5_QP_OPTPAR_PM_STATE       |
1467                                           MLX5_QP_OPTPAR_ALT_ADDR_PATH,
1468                         [MLX5_QP_ST_UC] = MLX5_QP_OPTPAR_RWE            |
1469                                           MLX5_QP_OPTPAR_PM_STATE       |
1470                                           MLX5_QP_OPTPAR_ALT_ADDR_PATH,
1471                         [MLX5_QP_ST_UD] = MLX5_QP_OPTPAR_Q_KEY          |
1472                                           MLX5_QP_OPTPAR_SRQN           |
1473                                           MLX5_QP_OPTPAR_CQN_RCV,
1474                 },
1475         },
1476         [MLX5_QP_STATE_SQER] = {
1477                 [MLX5_QP_STATE_RTS] = {
1478                         [MLX5_QP_ST_UD]  = MLX5_QP_OPTPAR_Q_KEY,
1479                         [MLX5_QP_ST_MLX] = MLX5_QP_OPTPAR_Q_KEY,
1480                         [MLX5_QP_ST_UC]  = MLX5_QP_OPTPAR_RWE,
1481                         [MLX5_QP_ST_RC]  = MLX5_QP_OPTPAR_RNR_TIMEOUT   |
1482                                            MLX5_QP_OPTPAR_RWE           |
1483                                            MLX5_QP_OPTPAR_RAE           |
1484                                            MLX5_QP_OPTPAR_RRE,
1485                 },
1486         },
1487 };
1488
1489 static int ib_nr_to_mlx5_nr(int ib_mask)
1490 {
1491         switch (ib_mask) {
1492         case IB_QP_STATE:
1493                 return 0;
1494         case IB_QP_CUR_STATE:
1495                 return 0;
1496         case IB_QP_EN_SQD_ASYNC_NOTIFY:
1497                 return 0;
1498         case IB_QP_ACCESS_FLAGS:
1499                 return MLX5_QP_OPTPAR_RWE | MLX5_QP_OPTPAR_RRE |
1500                         MLX5_QP_OPTPAR_RAE;
1501         case IB_QP_PKEY_INDEX:
1502                 return MLX5_QP_OPTPAR_PKEY_INDEX;
1503         case IB_QP_PORT:
1504                 return MLX5_QP_OPTPAR_PRI_PORT;
1505         case IB_QP_QKEY:
1506                 return MLX5_QP_OPTPAR_Q_KEY;
1507         case IB_QP_AV:
1508                 return MLX5_QP_OPTPAR_PRIMARY_ADDR_PATH |
1509                         MLX5_QP_OPTPAR_PRI_PORT;
1510         case IB_QP_PATH_MTU:
1511                 return 0;
1512         case IB_QP_TIMEOUT:
1513                 return MLX5_QP_OPTPAR_ACK_TIMEOUT;
1514         case IB_QP_RETRY_CNT:
1515                 return MLX5_QP_OPTPAR_RETRY_COUNT;
1516         case IB_QP_RNR_RETRY:
1517                 return MLX5_QP_OPTPAR_RNR_RETRY;
1518         case IB_QP_RQ_PSN:
1519                 return 0;
1520         case IB_QP_MAX_QP_RD_ATOMIC:
1521                 return MLX5_QP_OPTPAR_SRA_MAX;
1522         case IB_QP_ALT_PATH:
1523                 return MLX5_QP_OPTPAR_ALT_ADDR_PATH;
1524         case IB_QP_MIN_RNR_TIMER:
1525                 return MLX5_QP_OPTPAR_RNR_TIMEOUT;
1526         case IB_QP_SQ_PSN:
1527                 return 0;
1528         case IB_QP_MAX_DEST_RD_ATOMIC:
1529                 return MLX5_QP_OPTPAR_RRA_MAX | MLX5_QP_OPTPAR_RWE |
1530                         MLX5_QP_OPTPAR_RRE | MLX5_QP_OPTPAR_RAE;
1531         case IB_QP_PATH_MIG_STATE:
1532                 return MLX5_QP_OPTPAR_PM_STATE;
1533         case IB_QP_CAP:
1534                 return 0;
1535         case IB_QP_DEST_QPN:
1536                 return 0;
1537         }
1538         return 0;
1539 }
1540
1541 static int ib_mask_to_mlx5_opt(int ib_mask)
1542 {
1543         int result = 0;
1544         int i;
1545
1546         for (i = 0; i < 8 * sizeof(int); i++) {
1547                 if ((1 << i) & ib_mask)
1548                         result |= ib_nr_to_mlx5_nr(1 << i);
1549         }
1550
1551         return result;
1552 }
1553
1554 static int __mlx5_ib_modify_qp(struct ib_qp *ibqp,
1555                                const struct ib_qp_attr *attr, int attr_mask,
1556                                enum ib_qp_state cur_state, enum ib_qp_state new_state)
1557 {
1558         struct mlx5_ib_dev *dev = to_mdev(ibqp->device);
1559         struct mlx5_ib_qp *qp = to_mqp(ibqp);
1560         struct mlx5_ib_cq *send_cq, *recv_cq;
1561         struct mlx5_qp_context *context;
1562         struct mlx5_modify_qp_mbox_in *in;
1563         struct mlx5_ib_pd *pd;
1564         enum mlx5_qp_state mlx5_cur, mlx5_new;
1565         enum mlx5_qp_optpar optpar;
1566         int sqd_event;
1567         int mlx5_st;
1568         int err;
1569
1570         in = kzalloc(sizeof(*in), GFP_KERNEL);
1571         if (!in)
1572                 return -ENOMEM;
1573
1574         context = &in->ctx;
1575         err = to_mlx5_st(ibqp->qp_type);
1576         if (err < 0)
1577                 goto out;
1578
1579         context->flags = cpu_to_be32(err << 16);
1580
1581         if (!(attr_mask & IB_QP_PATH_MIG_STATE)) {
1582                 context->flags |= cpu_to_be32(MLX5_QP_PM_MIGRATED << 11);
1583         } else {
1584                 switch (attr->path_mig_state) {
1585                 case IB_MIG_MIGRATED:
1586                         context->flags |= cpu_to_be32(MLX5_QP_PM_MIGRATED << 11);
1587                         break;
1588                 case IB_MIG_REARM:
1589                         context->flags |= cpu_to_be32(MLX5_QP_PM_REARM << 11);
1590                         break;
1591                 case IB_MIG_ARMED:
1592                         context->flags |= cpu_to_be32(MLX5_QP_PM_ARMED << 11);
1593                         break;
1594                 }
1595         }
1596
1597         if (ibqp->qp_type == IB_QPT_GSI || ibqp->qp_type == IB_QPT_SMI) {
1598                 context->mtu_msgmax = (IB_MTU_256 << 5) | 8;
1599         } else if (ibqp->qp_type == IB_QPT_UD ||
1600                    ibqp->qp_type == MLX5_IB_QPT_REG_UMR) {
1601                 context->mtu_msgmax = (IB_MTU_4096 << 5) | 12;
1602         } else if (attr_mask & IB_QP_PATH_MTU) {
1603                 if (attr->path_mtu < IB_MTU_256 ||
1604                     attr->path_mtu > IB_MTU_4096) {
1605                         mlx5_ib_warn(dev, "invalid mtu %d\n", attr->path_mtu);
1606                         err = -EINVAL;
1607                         goto out;
1608                 }
1609                 context->mtu_msgmax = (attr->path_mtu << 5) |
1610                                       (u8)MLX5_CAP_GEN(dev->mdev, log_max_msg);
1611         }
1612
1613         if (attr_mask & IB_QP_DEST_QPN)
1614                 context->log_pg_sz_remote_qpn = cpu_to_be32(attr->dest_qp_num);
1615
1616         if (attr_mask & IB_QP_PKEY_INDEX)
1617                 context->pri_path.pkey_index = attr->pkey_index;
1618
1619         /* todo implement counter_index functionality */
1620
1621         if (is_sqp(ibqp->qp_type))
1622                 context->pri_path.port = qp->port;
1623
1624         if (attr_mask & IB_QP_PORT)
1625                 context->pri_path.port = attr->port_num;
1626
1627         if (attr_mask & IB_QP_AV) {
1628                 err = mlx5_set_path(dev, &attr->ah_attr, &context->pri_path,
1629                                     attr_mask & IB_QP_PORT ? attr->port_num : qp->port,
1630                                     attr_mask, 0, attr);
1631                 if (err)
1632                         goto out;
1633         }
1634
1635         if (attr_mask & IB_QP_TIMEOUT)
1636                 context->pri_path.ackto_lt |= attr->timeout << 3;
1637
1638         if (attr_mask & IB_QP_ALT_PATH) {
1639                 err = mlx5_set_path(dev, &attr->alt_ah_attr, &context->alt_path,
1640                                     attr->alt_port_num, attr_mask, 0, attr);
1641                 if (err)
1642                         goto out;
1643         }
1644
1645         pd = get_pd(qp);
1646         get_cqs(qp, &send_cq, &recv_cq);
1647
1648         context->flags_pd = cpu_to_be32(pd ? pd->pdn : to_mpd(dev->devr.p0)->pdn);
1649         context->cqn_send = send_cq ? cpu_to_be32(send_cq->mcq.cqn) : 0;
1650         context->cqn_recv = recv_cq ? cpu_to_be32(recv_cq->mcq.cqn) : 0;
1651         context->params1  = cpu_to_be32(MLX5_IB_ACK_REQ_FREQ << 28);
1652
1653         if (attr_mask & IB_QP_RNR_RETRY)
1654                 context->params1 |= cpu_to_be32(attr->rnr_retry << 13);
1655
1656         if (attr_mask & IB_QP_RETRY_CNT)
1657                 context->params1 |= cpu_to_be32(attr->retry_cnt << 16);
1658
1659         if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC) {
1660                 if (attr->max_rd_atomic)
1661                         context->params1 |=
1662                                 cpu_to_be32(fls(attr->max_rd_atomic - 1) << 21);
1663         }
1664
1665         if (attr_mask & IB_QP_SQ_PSN)
1666                 context->next_send_psn = cpu_to_be32(attr->sq_psn);
1667
1668         if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) {
1669                 if (attr->max_dest_rd_atomic)
1670                         context->params2 |=
1671                                 cpu_to_be32(fls(attr->max_dest_rd_atomic - 1) << 21);
1672         }
1673
1674         if (attr_mask & (IB_QP_ACCESS_FLAGS | IB_QP_MAX_DEST_RD_ATOMIC))
1675                 context->params2 |= to_mlx5_access_flags(qp, attr, attr_mask);
1676
1677         if (attr_mask & IB_QP_MIN_RNR_TIMER)
1678                 context->rnr_nextrecvpsn |= cpu_to_be32(attr->min_rnr_timer << 24);
1679
1680         if (attr_mask & IB_QP_RQ_PSN)
1681                 context->rnr_nextrecvpsn |= cpu_to_be32(attr->rq_psn);
1682
1683         if (attr_mask & IB_QP_QKEY)
1684                 context->qkey = cpu_to_be32(attr->qkey);
1685
1686         if (qp->rq.wqe_cnt && cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT)
1687                 context->db_rec_addr = cpu_to_be64(qp->db.dma);
1688
1689         if (cur_state == IB_QPS_RTS && new_state == IB_QPS_SQD  &&
1690             attr_mask & IB_QP_EN_SQD_ASYNC_NOTIFY && attr->en_sqd_async_notify)
1691                 sqd_event = 1;
1692         else
1693                 sqd_event = 0;
1694
1695         if (!ibqp->uobject && cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT)
1696                 context->sq_crq_size |= cpu_to_be16(1 << 4);
1697
1698
1699         mlx5_cur = to_mlx5_state(cur_state);
1700         mlx5_new = to_mlx5_state(new_state);
1701         mlx5_st = to_mlx5_st(ibqp->qp_type);
1702         if (mlx5_st < 0)
1703                 goto out;
1704
1705         /* If moving to a reset or error state, we must disable page faults on
1706          * this QP and flush all current page faults. Otherwise a stale page
1707          * fault may attempt to work on this QP after it is reset and moved
1708          * again to RTS, and may cause the driver and the device to get out of
1709          * sync. */
1710         if (cur_state != IB_QPS_RESET && cur_state != IB_QPS_ERR &&
1711             (new_state == IB_QPS_RESET || new_state == IB_QPS_ERR))
1712                 mlx5_ib_qp_disable_pagefaults(qp);
1713
1714         optpar = ib_mask_to_mlx5_opt(attr_mask);
1715         optpar &= opt_mask[mlx5_cur][mlx5_new][mlx5_st];
1716         in->optparam = cpu_to_be32(optpar);
1717         err = mlx5_core_qp_modify(dev->mdev, to_mlx5_state(cur_state),
1718                                   to_mlx5_state(new_state), in, sqd_event,
1719                                   &qp->mqp);
1720         if (err)
1721                 goto out;
1722
1723         if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT)
1724                 mlx5_ib_qp_enable_pagefaults(qp);
1725
1726         qp->state = new_state;
1727
1728         if (attr_mask & IB_QP_ACCESS_FLAGS)
1729                 qp->atomic_rd_en = attr->qp_access_flags;
1730         if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
1731                 qp->resp_depth = attr->max_dest_rd_atomic;
1732         if (attr_mask & IB_QP_PORT)
1733                 qp->port = attr->port_num;
1734         if (attr_mask & IB_QP_ALT_PATH)
1735                 qp->alt_port = attr->alt_port_num;
1736
1737         /*
1738          * If we moved a kernel QP to RESET, clean up all old CQ
1739          * entries and reinitialize the QP.
1740          */
1741         if (new_state == IB_QPS_RESET && !ibqp->uobject) {
1742                 mlx5_ib_cq_clean(recv_cq, qp->mqp.qpn,
1743                                  ibqp->srq ? to_msrq(ibqp->srq) : NULL);
1744                 if (send_cq != recv_cq)
1745                         mlx5_ib_cq_clean(send_cq, qp->mqp.qpn, NULL);
1746
1747                 qp->rq.head = 0;
1748                 qp->rq.tail = 0;
1749                 qp->sq.head = 0;
1750                 qp->sq.tail = 0;
1751                 qp->sq.cur_post = 0;
1752                 qp->sq.last_poll = 0;
1753                 qp->db.db[MLX5_RCV_DBR] = 0;
1754                 qp->db.db[MLX5_SND_DBR] = 0;
1755         }
1756
1757 out:
1758         kfree(in);
1759         return err;
1760 }
1761
1762 int mlx5_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
1763                       int attr_mask, struct ib_udata *udata)
1764 {
1765         struct mlx5_ib_dev *dev = to_mdev(ibqp->device);
1766         struct mlx5_ib_qp *qp = to_mqp(ibqp);
1767         enum ib_qp_state cur_state, new_state;
1768         int err = -EINVAL;
1769         int port;
1770
1771         mutex_lock(&qp->mutex);
1772
1773         cur_state = attr_mask & IB_QP_CUR_STATE ? attr->cur_qp_state : qp->state;
1774         new_state = attr_mask & IB_QP_STATE ? attr->qp_state : cur_state;
1775
1776         if (ibqp->qp_type != MLX5_IB_QPT_REG_UMR &&
1777             !ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type, attr_mask,
1778                                 IB_LINK_LAYER_UNSPECIFIED))
1779                 goto out;
1780
1781         if ((attr_mask & IB_QP_PORT) &&
1782             (attr->port_num == 0 ||
1783              attr->port_num > MLX5_CAP_GEN(dev->mdev, num_ports)))
1784                 goto out;
1785
1786         if (attr_mask & IB_QP_PKEY_INDEX) {
1787                 port = attr_mask & IB_QP_PORT ? attr->port_num : qp->port;
1788                 if (attr->pkey_index >=
1789                     dev->mdev->port_caps[port - 1].pkey_table_len)
1790                         goto out;
1791         }
1792
1793         if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC &&
1794             attr->max_rd_atomic >
1795             (1 << MLX5_CAP_GEN(dev->mdev, log_max_ra_res_qp)))
1796                 goto out;
1797
1798         if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC &&
1799             attr->max_dest_rd_atomic >
1800             (1 << MLX5_CAP_GEN(dev->mdev, log_max_ra_req_qp)))
1801                 goto out;
1802
1803         if (cur_state == new_state && cur_state == IB_QPS_RESET) {
1804                 err = 0;
1805                 goto out;
1806         }
1807
1808         err = __mlx5_ib_modify_qp(ibqp, attr, attr_mask, cur_state, new_state);
1809
1810 out:
1811         mutex_unlock(&qp->mutex);
1812         return err;
1813 }
1814
1815 static int mlx5_wq_overflow(struct mlx5_ib_wq *wq, int nreq, struct ib_cq *ib_cq)
1816 {
1817         struct mlx5_ib_cq *cq;
1818         unsigned cur;
1819
1820         cur = wq->head - wq->tail;
1821         if (likely(cur + nreq < wq->max_post))
1822                 return 0;
1823
1824         cq = to_mcq(ib_cq);
1825         spin_lock(&cq->lock);
1826         cur = wq->head - wq->tail;
1827         spin_unlock(&cq->lock);
1828
1829         return cur + nreq >= wq->max_post;
1830 }
1831
1832 static __always_inline void set_raddr_seg(struct mlx5_wqe_raddr_seg *rseg,
1833                                           u64 remote_addr, u32 rkey)
1834 {
1835         rseg->raddr    = cpu_to_be64(remote_addr);
1836         rseg->rkey     = cpu_to_be32(rkey);
1837         rseg->reserved = 0;
1838 }
1839
1840 static void set_datagram_seg(struct mlx5_wqe_datagram_seg *dseg,
1841                              struct ib_send_wr *wr)
1842 {
1843         memcpy(&dseg->av, &to_mah(ud_wr(wr)->ah)->av, sizeof(struct mlx5_av));
1844         dseg->av.dqp_dct = cpu_to_be32(ud_wr(wr)->remote_qpn | MLX5_EXTENDED_UD_AV);
1845         dseg->av.key.qkey.qkey = cpu_to_be32(ud_wr(wr)->remote_qkey);
1846 }
1847
1848 static void set_data_ptr_seg(struct mlx5_wqe_data_seg *dseg, struct ib_sge *sg)
1849 {
1850         dseg->byte_count = cpu_to_be32(sg->length);
1851         dseg->lkey       = cpu_to_be32(sg->lkey);
1852         dseg->addr       = cpu_to_be64(sg->addr);
1853 }
1854
1855 static __be16 get_klm_octo(int npages)
1856 {
1857         return cpu_to_be16(ALIGN(npages, 8) / 2);
1858 }
1859
1860 static __be64 frwr_mkey_mask(void)
1861 {
1862         u64 result;
1863
1864         result = MLX5_MKEY_MASK_LEN             |
1865                 MLX5_MKEY_MASK_PAGE_SIZE        |
1866                 MLX5_MKEY_MASK_START_ADDR       |
1867                 MLX5_MKEY_MASK_EN_RINVAL        |
1868                 MLX5_MKEY_MASK_KEY              |
1869                 MLX5_MKEY_MASK_LR               |
1870                 MLX5_MKEY_MASK_LW               |
1871                 MLX5_MKEY_MASK_RR               |
1872                 MLX5_MKEY_MASK_RW               |
1873                 MLX5_MKEY_MASK_A                |
1874                 MLX5_MKEY_MASK_SMALL_FENCE      |
1875                 MLX5_MKEY_MASK_FREE;
1876
1877         return cpu_to_be64(result);
1878 }
1879
1880 static __be64 sig_mkey_mask(void)
1881 {
1882         u64 result;
1883
1884         result = MLX5_MKEY_MASK_LEN             |
1885                 MLX5_MKEY_MASK_PAGE_SIZE        |
1886                 MLX5_MKEY_MASK_START_ADDR       |
1887                 MLX5_MKEY_MASK_EN_SIGERR        |
1888                 MLX5_MKEY_MASK_EN_RINVAL        |
1889                 MLX5_MKEY_MASK_KEY              |
1890                 MLX5_MKEY_MASK_LR               |
1891                 MLX5_MKEY_MASK_LW               |
1892                 MLX5_MKEY_MASK_RR               |
1893                 MLX5_MKEY_MASK_RW               |
1894                 MLX5_MKEY_MASK_SMALL_FENCE      |
1895                 MLX5_MKEY_MASK_FREE             |
1896                 MLX5_MKEY_MASK_BSF_EN;
1897
1898         return cpu_to_be64(result);
1899 }
1900
1901 static void set_reg_umr_seg(struct mlx5_wqe_umr_ctrl_seg *umr,
1902                                 struct mlx5_ib_mr *mr)
1903 {
1904         int ndescs = mr->ndescs;
1905
1906         memset(umr, 0, sizeof(*umr));
1907         umr->flags = MLX5_UMR_CHECK_NOT_FREE;
1908         umr->klm_octowords = get_klm_octo(ndescs);
1909         umr->mkey_mask = frwr_mkey_mask();
1910 }
1911
1912 static void set_linv_umr_seg(struct mlx5_wqe_umr_ctrl_seg *umr)
1913 {
1914         memset(umr, 0, sizeof(*umr));
1915         umr->mkey_mask = cpu_to_be64(MLX5_MKEY_MASK_FREE);
1916         umr->flags = 1 << 7;
1917 }
1918
1919 static __be64 get_umr_reg_mr_mask(void)
1920 {
1921         u64 result;
1922
1923         result = MLX5_MKEY_MASK_LEN             |
1924                  MLX5_MKEY_MASK_PAGE_SIZE       |
1925                  MLX5_MKEY_MASK_START_ADDR      |
1926                  MLX5_MKEY_MASK_PD              |
1927                  MLX5_MKEY_MASK_LR              |
1928                  MLX5_MKEY_MASK_LW              |
1929                  MLX5_MKEY_MASK_KEY             |
1930                  MLX5_MKEY_MASK_RR              |
1931                  MLX5_MKEY_MASK_RW              |
1932                  MLX5_MKEY_MASK_A               |
1933                  MLX5_MKEY_MASK_FREE;
1934
1935         return cpu_to_be64(result);
1936 }
1937
1938 static __be64 get_umr_unreg_mr_mask(void)
1939 {
1940         u64 result;
1941
1942         result = MLX5_MKEY_MASK_FREE;
1943
1944         return cpu_to_be64(result);
1945 }
1946
1947 static __be64 get_umr_update_mtt_mask(void)
1948 {
1949         u64 result;
1950
1951         result = MLX5_MKEY_MASK_FREE;
1952
1953         return cpu_to_be64(result);
1954 }
1955
1956 static void set_reg_umr_segment(struct mlx5_wqe_umr_ctrl_seg *umr,
1957                                 struct ib_send_wr *wr)
1958 {
1959         struct mlx5_umr_wr *umrwr = umr_wr(wr);
1960
1961         memset(umr, 0, sizeof(*umr));
1962
1963         if (wr->send_flags & MLX5_IB_SEND_UMR_FAIL_IF_FREE)
1964                 umr->flags = MLX5_UMR_CHECK_FREE; /* fail if free */
1965         else
1966                 umr->flags = MLX5_UMR_CHECK_NOT_FREE; /* fail if not free */
1967
1968         if (!(wr->send_flags & MLX5_IB_SEND_UMR_UNREG)) {
1969                 umr->klm_octowords = get_klm_octo(umrwr->npages);
1970                 if (wr->send_flags & MLX5_IB_SEND_UMR_UPDATE_MTT) {
1971                         umr->mkey_mask = get_umr_update_mtt_mask();
1972                         umr->bsf_octowords = get_klm_octo(umrwr->target.offset);
1973                         umr->flags |= MLX5_UMR_TRANSLATION_OFFSET_EN;
1974                 } else {
1975                         umr->mkey_mask = get_umr_reg_mr_mask();
1976                 }
1977         } else {
1978                 umr->mkey_mask = get_umr_unreg_mr_mask();
1979         }
1980
1981         if (!wr->num_sge)
1982                 umr->flags |= MLX5_UMR_INLINE;
1983 }
1984
1985 static u8 get_umr_flags(int acc)
1986 {
1987         return (acc & IB_ACCESS_REMOTE_ATOMIC ? MLX5_PERM_ATOMIC       : 0) |
1988                (acc & IB_ACCESS_REMOTE_WRITE  ? MLX5_PERM_REMOTE_WRITE : 0) |
1989                (acc & IB_ACCESS_REMOTE_READ   ? MLX5_PERM_REMOTE_READ  : 0) |
1990                (acc & IB_ACCESS_LOCAL_WRITE   ? MLX5_PERM_LOCAL_WRITE  : 0) |
1991                 MLX5_PERM_LOCAL_READ | MLX5_PERM_UMR_EN;
1992 }
1993
1994 static void set_reg_mkey_seg(struct mlx5_mkey_seg *seg,
1995                              struct mlx5_ib_mr *mr,
1996                              u32 key, int access)
1997 {
1998         int ndescs = ALIGN(mr->ndescs, 8) >> 1;
1999
2000         memset(seg, 0, sizeof(*seg));
2001         seg->flags = get_umr_flags(access) | MLX5_ACCESS_MODE_MTT;
2002         seg->qpn_mkey7_0 = cpu_to_be32((key & 0xff) | 0xffffff00);
2003         seg->flags_pd = cpu_to_be32(MLX5_MKEY_REMOTE_INVAL);
2004         seg->start_addr = cpu_to_be64(mr->ibmr.iova);
2005         seg->len = cpu_to_be64(mr->ibmr.length);
2006         seg->xlt_oct_size = cpu_to_be32(ndescs);
2007         seg->log2_page_size = ilog2(mr->ibmr.page_size);
2008 }
2009
2010 static void set_linv_mkey_seg(struct mlx5_mkey_seg *seg)
2011 {
2012         memset(seg, 0, sizeof(*seg));
2013         seg->status = MLX5_MKEY_STATUS_FREE;
2014 }
2015
2016 static void set_reg_mkey_segment(struct mlx5_mkey_seg *seg, struct ib_send_wr *wr)
2017 {
2018         struct mlx5_umr_wr *umrwr = umr_wr(wr);
2019
2020         memset(seg, 0, sizeof(*seg));
2021         if (wr->send_flags & MLX5_IB_SEND_UMR_UNREG) {
2022                 seg->status = MLX5_MKEY_STATUS_FREE;
2023                 return;
2024         }
2025
2026         seg->flags = convert_access(umrwr->access_flags);
2027         if (!(wr->send_flags & MLX5_IB_SEND_UMR_UPDATE_MTT)) {
2028                 seg->flags_pd = cpu_to_be32(to_mpd(umrwr->pd)->pdn);
2029                 seg->start_addr = cpu_to_be64(umrwr->target.virt_addr);
2030         }
2031         seg->len = cpu_to_be64(umrwr->length);
2032         seg->log2_page_size = umrwr->page_shift;
2033         seg->qpn_mkey7_0 = cpu_to_be32(0xffffff00 |
2034                                        mlx5_mkey_variant(umrwr->mkey));
2035 }
2036
2037 static void set_reg_data_seg(struct mlx5_wqe_data_seg *dseg,
2038                              struct mlx5_ib_mr *mr,
2039                              struct mlx5_ib_pd *pd)
2040 {
2041         int bcount = mr->desc_size * mr->ndescs;
2042
2043         dseg->addr = cpu_to_be64(mr->desc_map);
2044         dseg->byte_count = cpu_to_be32(ALIGN(bcount, 64));
2045         dseg->lkey = cpu_to_be32(pd->ibpd.local_dma_lkey);
2046 }
2047
2048 static __be32 send_ieth(struct ib_send_wr *wr)
2049 {
2050         switch (wr->opcode) {
2051         case IB_WR_SEND_WITH_IMM:
2052         case IB_WR_RDMA_WRITE_WITH_IMM:
2053                 return wr->ex.imm_data;
2054
2055         case IB_WR_SEND_WITH_INV:
2056                 return cpu_to_be32(wr->ex.invalidate_rkey);
2057
2058         default:
2059                 return 0;
2060         }
2061 }
2062
2063 static u8 calc_sig(void *wqe, int size)
2064 {
2065         u8 *p = wqe;
2066         u8 res = 0;
2067         int i;
2068
2069         for (i = 0; i < size; i++)
2070                 res ^= p[i];
2071
2072         return ~res;
2073 }
2074
2075 static u8 wq_sig(void *wqe)
2076 {
2077         return calc_sig(wqe, (*((u8 *)wqe + 8) & 0x3f) << 4);
2078 }
2079
2080 static int set_data_inl_seg(struct mlx5_ib_qp *qp, struct ib_send_wr *wr,
2081                             void *wqe, int *sz)
2082 {
2083         struct mlx5_wqe_inline_seg *seg;
2084         void *qend = qp->sq.qend;
2085         void *addr;
2086         int inl = 0;
2087         int copy;
2088         int len;
2089         int i;
2090
2091         seg = wqe;
2092         wqe += sizeof(*seg);
2093         for (i = 0; i < wr->num_sge; i++) {
2094                 addr = (void *)(unsigned long)(wr->sg_list[i].addr);
2095                 len  = wr->sg_list[i].length;
2096                 inl += len;
2097
2098                 if (unlikely(inl > qp->max_inline_data))
2099                         return -ENOMEM;
2100
2101                 if (unlikely(wqe + len > qend)) {
2102                         copy = qend - wqe;
2103                         memcpy(wqe, addr, copy);
2104                         addr += copy;
2105                         len -= copy;
2106                         wqe = mlx5_get_send_wqe(qp, 0);
2107                 }
2108                 memcpy(wqe, addr, len);
2109                 wqe += len;
2110         }
2111
2112         seg->byte_count = cpu_to_be32(inl | MLX5_INLINE_SEG);
2113
2114         *sz = ALIGN(inl + sizeof(seg->byte_count), 16) / 16;
2115
2116         return 0;
2117 }
2118
2119 static u16 prot_field_size(enum ib_signature_type type)
2120 {
2121         switch (type) {
2122         case IB_SIG_TYPE_T10_DIF:
2123                 return MLX5_DIF_SIZE;
2124         default:
2125                 return 0;
2126         }
2127 }
2128
2129 static u8 bs_selector(int block_size)
2130 {
2131         switch (block_size) {
2132         case 512:           return 0x1;
2133         case 520:           return 0x2;
2134         case 4096:          return 0x3;
2135         case 4160:          return 0x4;
2136         case 1073741824:    return 0x5;
2137         default:            return 0;
2138         }
2139 }
2140
2141 static void mlx5_fill_inl_bsf(struct ib_sig_domain *domain,
2142                               struct mlx5_bsf_inl *inl)
2143 {
2144         /* Valid inline section and allow BSF refresh */
2145         inl->vld_refresh = cpu_to_be16(MLX5_BSF_INL_VALID |
2146                                        MLX5_BSF_REFRESH_DIF);
2147         inl->dif_apptag = cpu_to_be16(domain->sig.dif.app_tag);
2148         inl->dif_reftag = cpu_to_be32(domain->sig.dif.ref_tag);
2149         /* repeating block */
2150         inl->rp_inv_seed = MLX5_BSF_REPEAT_BLOCK;
2151         inl->sig_type = domain->sig.dif.bg_type == IB_T10DIF_CRC ?
2152                         MLX5_DIF_CRC : MLX5_DIF_IPCS;
2153
2154         if (domain->sig.dif.ref_remap)
2155                 inl->dif_inc_ref_guard_check |= MLX5_BSF_INC_REFTAG;
2156
2157         if (domain->sig.dif.app_escape) {
2158                 if (domain->sig.dif.ref_escape)
2159                         inl->dif_inc_ref_guard_check |= MLX5_BSF_APPREF_ESCAPE;
2160                 else
2161                         inl->dif_inc_ref_guard_check |= MLX5_BSF_APPTAG_ESCAPE;
2162         }
2163
2164         inl->dif_app_bitmask_check =
2165                 cpu_to_be16(domain->sig.dif.apptag_check_mask);
2166 }
2167
2168 static int mlx5_set_bsf(struct ib_mr *sig_mr,
2169                         struct ib_sig_attrs *sig_attrs,
2170                         struct mlx5_bsf *bsf, u32 data_size)
2171 {
2172         struct mlx5_core_sig_ctx *msig = to_mmr(sig_mr)->sig;
2173         struct mlx5_bsf_basic *basic = &bsf->basic;
2174         struct ib_sig_domain *mem = &sig_attrs->mem;
2175         struct ib_sig_domain *wire = &sig_attrs->wire;
2176
2177         memset(bsf, 0, sizeof(*bsf));
2178
2179         /* Basic + Extended + Inline */
2180         basic->bsf_size_sbs = 1 << 7;
2181         /* Input domain check byte mask */
2182         basic->check_byte_mask = sig_attrs->check_mask;
2183         basic->raw_data_size = cpu_to_be32(data_size);
2184
2185         /* Memory domain */
2186         switch (sig_attrs->mem.sig_type) {
2187         case IB_SIG_TYPE_NONE:
2188                 break;
2189         case IB_SIG_TYPE_T10_DIF:
2190                 basic->mem.bs_selector = bs_selector(mem->sig.dif.pi_interval);
2191                 basic->m_bfs_psv = cpu_to_be32(msig->psv_memory.psv_idx);
2192                 mlx5_fill_inl_bsf(mem, &bsf->m_inl);
2193                 break;
2194         default:
2195                 return -EINVAL;
2196         }
2197
2198         /* Wire domain */
2199         switch (sig_attrs->wire.sig_type) {
2200         case IB_SIG_TYPE_NONE:
2201                 break;
2202         case IB_SIG_TYPE_T10_DIF:
2203                 if (mem->sig.dif.pi_interval == wire->sig.dif.pi_interval &&
2204                     mem->sig_type == wire->sig_type) {
2205                         /* Same block structure */
2206                         basic->bsf_size_sbs |= 1 << 4;
2207                         if (mem->sig.dif.bg_type == wire->sig.dif.bg_type)
2208                                 basic->wire.copy_byte_mask |= MLX5_CPY_GRD_MASK;
2209                         if (mem->sig.dif.app_tag == wire->sig.dif.app_tag)
2210                                 basic->wire.copy_byte_mask |= MLX5_CPY_APP_MASK;
2211                         if (mem->sig.dif.ref_tag == wire->sig.dif.ref_tag)
2212                                 basic->wire.copy_byte_mask |= MLX5_CPY_REF_MASK;
2213                 } else
2214                         basic->wire.bs_selector = bs_selector(wire->sig.dif.pi_interval);
2215
2216                 basic->w_bfs_psv = cpu_to_be32(msig->psv_wire.psv_idx);
2217                 mlx5_fill_inl_bsf(wire, &bsf->w_inl);
2218                 break;
2219         default:
2220                 return -EINVAL;
2221         }
2222
2223         return 0;
2224 }
2225
2226 static int set_sig_data_segment(struct ib_sig_handover_wr *wr,
2227                                 struct mlx5_ib_qp *qp, void **seg, int *size)
2228 {
2229         struct ib_sig_attrs *sig_attrs = wr->sig_attrs;
2230         struct ib_mr *sig_mr = wr->sig_mr;
2231         struct mlx5_bsf *bsf;
2232         u32 data_len = wr->wr.sg_list->length;
2233         u32 data_key = wr->wr.sg_list->lkey;
2234         u64 data_va = wr->wr.sg_list->addr;
2235         int ret;
2236         int wqe_size;
2237
2238         if (!wr->prot ||
2239             (data_key == wr->prot->lkey &&
2240              data_va == wr->prot->addr &&
2241              data_len == wr->prot->length)) {
2242                 /**
2243                  * Source domain doesn't contain signature information
2244                  * or data and protection are interleaved in memory.
2245                  * So need construct:
2246                  *                  ------------------
2247                  *                 |     data_klm     |
2248                  *                  ------------------
2249                  *                 |       BSF        |
2250                  *                  ------------------
2251                  **/
2252                 struct mlx5_klm *data_klm = *seg;
2253
2254                 data_klm->bcount = cpu_to_be32(data_len);
2255                 data_klm->key = cpu_to_be32(data_key);
2256                 data_klm->va = cpu_to_be64(data_va);
2257                 wqe_size = ALIGN(sizeof(*data_klm), 64);
2258         } else {
2259                 /**
2260                  * Source domain contains signature information
2261                  * So need construct a strided block format:
2262                  *               ---------------------------
2263                  *              |     stride_block_ctrl     |
2264                  *               ---------------------------
2265                  *              |          data_klm         |
2266                  *               ---------------------------
2267                  *              |          prot_klm         |
2268                  *               ---------------------------
2269                  *              |             BSF           |
2270                  *               ---------------------------
2271                  **/
2272                 struct mlx5_stride_block_ctrl_seg *sblock_ctrl;
2273                 struct mlx5_stride_block_entry *data_sentry;
2274                 struct mlx5_stride_block_entry *prot_sentry;
2275                 u32 prot_key = wr->prot->lkey;
2276                 u64 prot_va = wr->prot->addr;
2277                 u16 block_size = sig_attrs->mem.sig.dif.pi_interval;
2278                 int prot_size;
2279
2280                 sblock_ctrl = *seg;
2281                 data_sentry = (void *)sblock_ctrl + sizeof(*sblock_ctrl);
2282                 prot_sentry = (void *)data_sentry + sizeof(*data_sentry);
2283
2284                 prot_size = prot_field_size(sig_attrs->mem.sig_type);
2285                 if (!prot_size) {
2286                         pr_err("Bad block size given: %u\n", block_size);
2287                         return -EINVAL;
2288                 }
2289                 sblock_ctrl->bcount_per_cycle = cpu_to_be32(block_size +
2290                                                             prot_size);
2291                 sblock_ctrl->op = cpu_to_be32(MLX5_STRIDE_BLOCK_OP);
2292                 sblock_ctrl->repeat_count = cpu_to_be32(data_len / block_size);
2293                 sblock_ctrl->num_entries = cpu_to_be16(2);
2294
2295                 data_sentry->bcount = cpu_to_be16(block_size);
2296                 data_sentry->key = cpu_to_be32(data_key);
2297                 data_sentry->va = cpu_to_be64(data_va);
2298                 data_sentry->stride = cpu_to_be16(block_size);
2299
2300                 prot_sentry->bcount = cpu_to_be16(prot_size);
2301                 prot_sentry->key = cpu_to_be32(prot_key);
2302                 prot_sentry->va = cpu_to_be64(prot_va);
2303                 prot_sentry->stride = cpu_to_be16(prot_size);
2304
2305                 wqe_size = ALIGN(sizeof(*sblock_ctrl) + sizeof(*data_sentry) +
2306                                  sizeof(*prot_sentry), 64);
2307         }
2308
2309         *seg += wqe_size;
2310         *size += wqe_size / 16;
2311         if (unlikely((*seg == qp->sq.qend)))
2312                 *seg = mlx5_get_send_wqe(qp, 0);
2313
2314         bsf = *seg;
2315         ret = mlx5_set_bsf(sig_mr, sig_attrs, bsf, data_len);
2316         if (ret)
2317                 return -EINVAL;
2318
2319         *seg += sizeof(*bsf);
2320         *size += sizeof(*bsf) / 16;
2321         if (unlikely((*seg == qp->sq.qend)))
2322                 *seg = mlx5_get_send_wqe(qp, 0);
2323
2324         return 0;
2325 }
2326
2327 static void set_sig_mkey_segment(struct mlx5_mkey_seg *seg,
2328                                  struct ib_sig_handover_wr *wr, u32 nelements,
2329                                  u32 length, u32 pdn)
2330 {
2331         struct ib_mr *sig_mr = wr->sig_mr;
2332         u32 sig_key = sig_mr->rkey;
2333         u8 sigerr = to_mmr(sig_mr)->sig->sigerr_count & 1;
2334
2335         memset(seg, 0, sizeof(*seg));
2336
2337         seg->flags = get_umr_flags(wr->access_flags) |
2338                                    MLX5_ACCESS_MODE_KLM;
2339         seg->qpn_mkey7_0 = cpu_to_be32((sig_key & 0xff) | 0xffffff00);
2340         seg->flags_pd = cpu_to_be32(MLX5_MKEY_REMOTE_INVAL | sigerr << 26 |
2341                                     MLX5_MKEY_BSF_EN | pdn);
2342         seg->len = cpu_to_be64(length);
2343         seg->xlt_oct_size = cpu_to_be32(be16_to_cpu(get_klm_octo(nelements)));
2344         seg->bsfs_octo_size = cpu_to_be32(MLX5_MKEY_BSF_OCTO_SIZE);
2345 }
2346
2347 static void set_sig_umr_segment(struct mlx5_wqe_umr_ctrl_seg *umr,
2348                                 u32 nelements)
2349 {
2350         memset(umr, 0, sizeof(*umr));
2351
2352         umr->flags = MLX5_FLAGS_INLINE | MLX5_FLAGS_CHECK_FREE;
2353         umr->klm_octowords = get_klm_octo(nelements);
2354         umr->bsf_octowords = cpu_to_be16(MLX5_MKEY_BSF_OCTO_SIZE);
2355         umr->mkey_mask = sig_mkey_mask();
2356 }
2357
2358
2359 static int set_sig_umr_wr(struct ib_send_wr *send_wr, struct mlx5_ib_qp *qp,
2360                           void **seg, int *size)
2361 {
2362         struct ib_sig_handover_wr *wr = sig_handover_wr(send_wr);
2363         struct mlx5_ib_mr *sig_mr = to_mmr(wr->sig_mr);
2364         u32 pdn = get_pd(qp)->pdn;
2365         u32 klm_oct_size;
2366         int region_len, ret;
2367
2368         if (unlikely(wr->wr.num_sge != 1) ||
2369             unlikely(wr->access_flags & IB_ACCESS_REMOTE_ATOMIC) ||
2370             unlikely(!sig_mr->sig) || unlikely(!qp->signature_en) ||
2371             unlikely(!sig_mr->sig->sig_status_checked))
2372                 return -EINVAL;
2373
2374         /* length of the protected region, data + protection */
2375         region_len = wr->wr.sg_list->length;
2376         if (wr->prot &&
2377             (wr->prot->lkey != wr->wr.sg_list->lkey  ||
2378              wr->prot->addr != wr->wr.sg_list->addr  ||
2379              wr->prot->length != wr->wr.sg_list->length))
2380                 region_len += wr->prot->length;
2381
2382         /**
2383          * KLM octoword size - if protection was provided
2384          * then we use strided block format (3 octowords),
2385          * else we use single KLM (1 octoword)
2386          **/
2387         klm_oct_size = wr->prot ? 3 : 1;
2388
2389         set_sig_umr_segment(*seg, klm_oct_size);
2390         *seg += sizeof(struct mlx5_wqe_umr_ctrl_seg);
2391         *size += sizeof(struct mlx5_wqe_umr_ctrl_seg) / 16;
2392         if (unlikely((*seg == qp->sq.qend)))
2393                 *seg = mlx5_get_send_wqe(qp, 0);
2394
2395         set_sig_mkey_segment(*seg, wr, klm_oct_size, region_len, pdn);
2396         *seg += sizeof(struct mlx5_mkey_seg);
2397         *size += sizeof(struct mlx5_mkey_seg) / 16;
2398         if (unlikely((*seg == qp->sq.qend)))
2399                 *seg = mlx5_get_send_wqe(qp, 0);
2400
2401         ret = set_sig_data_segment(wr, qp, seg, size);
2402         if (ret)
2403                 return ret;
2404
2405         sig_mr->sig->sig_status_checked = false;
2406         return 0;
2407 }
2408
2409 static int set_psv_wr(struct ib_sig_domain *domain,
2410                       u32 psv_idx, void **seg, int *size)
2411 {
2412         struct mlx5_seg_set_psv *psv_seg = *seg;
2413
2414         memset(psv_seg, 0, sizeof(*psv_seg));
2415         psv_seg->psv_num = cpu_to_be32(psv_idx);
2416         switch (domain->sig_type) {
2417         case IB_SIG_TYPE_NONE:
2418                 break;
2419         case IB_SIG_TYPE_T10_DIF:
2420                 psv_seg->transient_sig = cpu_to_be32(domain->sig.dif.bg << 16 |
2421                                                      domain->sig.dif.app_tag);
2422                 psv_seg->ref_tag = cpu_to_be32(domain->sig.dif.ref_tag);
2423                 break;
2424         default:
2425                 pr_err("Bad signature type given.\n");
2426                 return 1;
2427         }
2428
2429         *seg += sizeof(*psv_seg);
2430         *size += sizeof(*psv_seg) / 16;
2431
2432         return 0;
2433 }
2434
2435 static int set_reg_wr(struct mlx5_ib_qp *qp,
2436                       struct ib_reg_wr *wr,
2437                       void **seg, int *size)
2438 {
2439         struct mlx5_ib_mr *mr = to_mmr(wr->mr);
2440         struct mlx5_ib_pd *pd = to_mpd(qp->ibqp.pd);
2441
2442         if (unlikely(wr->wr.send_flags & IB_SEND_INLINE)) {
2443                 mlx5_ib_warn(to_mdev(qp->ibqp.device),
2444                              "Invalid IB_SEND_INLINE send flag\n");
2445                 return -EINVAL;
2446         }
2447
2448         set_reg_umr_seg(*seg, mr);
2449         *seg += sizeof(struct mlx5_wqe_umr_ctrl_seg);
2450         *size += sizeof(struct mlx5_wqe_umr_ctrl_seg) / 16;
2451         if (unlikely((*seg == qp->sq.qend)))
2452                 *seg = mlx5_get_send_wqe(qp, 0);
2453
2454         set_reg_mkey_seg(*seg, mr, wr->key, wr->access);
2455         *seg += sizeof(struct mlx5_mkey_seg);
2456         *size += sizeof(struct mlx5_mkey_seg) / 16;
2457         if (unlikely((*seg == qp->sq.qend)))
2458                 *seg = mlx5_get_send_wqe(qp, 0);
2459
2460         set_reg_data_seg(*seg, mr, pd);
2461         *seg += sizeof(struct mlx5_wqe_data_seg);
2462         *size += (sizeof(struct mlx5_wqe_data_seg) / 16);
2463
2464         return 0;
2465 }
2466
2467 static void set_linv_wr(struct mlx5_ib_qp *qp, void **seg, int *size)
2468 {
2469         set_linv_umr_seg(*seg);
2470         *seg += sizeof(struct mlx5_wqe_umr_ctrl_seg);
2471         *size += sizeof(struct mlx5_wqe_umr_ctrl_seg) / 16;
2472         if (unlikely((*seg == qp->sq.qend)))
2473                 *seg = mlx5_get_send_wqe(qp, 0);
2474         set_linv_mkey_seg(*seg);
2475         *seg += sizeof(struct mlx5_mkey_seg);
2476         *size += sizeof(struct mlx5_mkey_seg) / 16;
2477         if (unlikely((*seg == qp->sq.qend)))
2478                 *seg = mlx5_get_send_wqe(qp, 0);
2479 }
2480
2481 static void dump_wqe(struct mlx5_ib_qp *qp, int idx, int size_16)
2482 {
2483         __be32 *p = NULL;
2484         int tidx = idx;
2485         int i, j;
2486
2487         pr_debug("dump wqe at %p\n", mlx5_get_send_wqe(qp, tidx));
2488         for (i = 0, j = 0; i < size_16 * 4; i += 4, j += 4) {
2489                 if ((i & 0xf) == 0) {
2490                         void *buf = mlx5_get_send_wqe(qp, tidx);
2491                         tidx = (tidx + 1) & (qp->sq.wqe_cnt - 1);
2492                         p = buf;
2493                         j = 0;
2494                 }
2495                 pr_debug("%08x %08x %08x %08x\n", be32_to_cpu(p[j]),
2496                          be32_to_cpu(p[j + 1]), be32_to_cpu(p[j + 2]),
2497                          be32_to_cpu(p[j + 3]));
2498         }
2499 }
2500
2501 static void mlx5_bf_copy(u64 __iomem *dst, u64 *src,
2502                          unsigned bytecnt, struct mlx5_ib_qp *qp)
2503 {
2504         while (bytecnt > 0) {
2505                 __iowrite64_copy(dst++, src++, 8);
2506                 __iowrite64_copy(dst++, src++, 8);
2507                 __iowrite64_copy(dst++, src++, 8);
2508                 __iowrite64_copy(dst++, src++, 8);
2509                 __iowrite64_copy(dst++, src++, 8);
2510                 __iowrite64_copy(dst++, src++, 8);
2511                 __iowrite64_copy(dst++, src++, 8);
2512                 __iowrite64_copy(dst++, src++, 8);
2513                 bytecnt -= 64;
2514                 if (unlikely(src == qp->sq.qend))
2515                         src = mlx5_get_send_wqe(qp, 0);
2516         }
2517 }
2518
2519 static u8 get_fence(u8 fence, struct ib_send_wr *wr)
2520 {
2521         if (unlikely(wr->opcode == IB_WR_LOCAL_INV &&
2522                      wr->send_flags & IB_SEND_FENCE))
2523                 return MLX5_FENCE_MODE_STRONG_ORDERING;
2524
2525         if (unlikely(fence)) {
2526                 if (wr->send_flags & IB_SEND_FENCE)
2527                         return MLX5_FENCE_MODE_SMALL_AND_FENCE;
2528                 else
2529                         return fence;
2530         } else if (unlikely(wr->send_flags & IB_SEND_FENCE)) {
2531                 return MLX5_FENCE_MODE_FENCE;
2532         }
2533
2534         return 0;
2535 }
2536
2537 static int begin_wqe(struct mlx5_ib_qp *qp, void **seg,
2538                      struct mlx5_wqe_ctrl_seg **ctrl,
2539                      struct ib_send_wr *wr, unsigned *idx,
2540                      int *size, int nreq)
2541 {
2542         int err = 0;
2543
2544         if (unlikely(mlx5_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq))) {
2545                 err = -ENOMEM;
2546                 return err;
2547         }
2548
2549         *idx = qp->sq.cur_post & (qp->sq.wqe_cnt - 1);
2550         *seg = mlx5_get_send_wqe(qp, *idx);
2551         *ctrl = *seg;
2552         *(uint32_t *)(*seg + 8) = 0;
2553         (*ctrl)->imm = send_ieth(wr);
2554         (*ctrl)->fm_ce_se = qp->sq_signal_bits |
2555                 (wr->send_flags & IB_SEND_SIGNALED ?
2556                  MLX5_WQE_CTRL_CQ_UPDATE : 0) |
2557                 (wr->send_flags & IB_SEND_SOLICITED ?
2558                  MLX5_WQE_CTRL_SOLICITED : 0);
2559
2560         *seg += sizeof(**ctrl);
2561         *size = sizeof(**ctrl) / 16;
2562
2563         return err;
2564 }
2565
2566 static void finish_wqe(struct mlx5_ib_qp *qp,
2567                        struct mlx5_wqe_ctrl_seg *ctrl,
2568                        u8 size, unsigned idx, u64 wr_id,
2569                        int nreq, u8 fence, u8 next_fence,
2570                        u32 mlx5_opcode)
2571 {
2572         u8 opmod = 0;
2573
2574         ctrl->opmod_idx_opcode = cpu_to_be32(((u32)(qp->sq.cur_post) << 8) |
2575                                              mlx5_opcode | ((u32)opmod << 24));
2576         ctrl->qpn_ds = cpu_to_be32(size | (qp->mqp.qpn << 8));
2577         ctrl->fm_ce_se |= fence;
2578         qp->fm_cache = next_fence;
2579         if (unlikely(qp->wq_sig))
2580                 ctrl->signature = wq_sig(ctrl);
2581
2582         qp->sq.wrid[idx] = wr_id;
2583         qp->sq.w_list[idx].opcode = mlx5_opcode;
2584         qp->sq.wqe_head[idx] = qp->sq.head + nreq;
2585         qp->sq.cur_post += DIV_ROUND_UP(size * 16, MLX5_SEND_WQE_BB);
2586         qp->sq.w_list[idx].next = qp->sq.cur_post;
2587 }
2588
2589
2590 int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
2591                       struct ib_send_wr **bad_wr)
2592 {
2593         struct mlx5_wqe_ctrl_seg *ctrl = NULL;  /* compiler warning */
2594         struct mlx5_ib_dev *dev = to_mdev(ibqp->device);
2595         struct mlx5_ib_qp *qp = to_mqp(ibqp);
2596         struct mlx5_ib_mr *mr;
2597         struct mlx5_wqe_data_seg *dpseg;
2598         struct mlx5_wqe_xrc_seg *xrc;
2599         struct mlx5_bf *bf = qp->bf;
2600         int uninitialized_var(size);
2601         void *qend = qp->sq.qend;
2602         unsigned long flags;
2603         unsigned idx;
2604         int err = 0;
2605         int inl = 0;
2606         int num_sge;
2607         void *seg;
2608         int nreq;
2609         int i;
2610         u8 next_fence = 0;
2611         u8 fence;
2612
2613         spin_lock_irqsave(&qp->sq.lock, flags);
2614
2615         for (nreq = 0; wr; nreq++, wr = wr->next) {
2616                 if (unlikely(wr->opcode >= ARRAY_SIZE(mlx5_ib_opcode))) {
2617                         mlx5_ib_warn(dev, "\n");
2618                         err = -EINVAL;
2619                         *bad_wr = wr;
2620                         goto out;
2621                 }
2622
2623                 fence = qp->fm_cache;
2624                 num_sge = wr->num_sge;
2625                 if (unlikely(num_sge > qp->sq.max_gs)) {
2626                         mlx5_ib_warn(dev, "\n");
2627                         err = -ENOMEM;
2628                         *bad_wr = wr;
2629                         goto out;
2630                 }
2631
2632                 err = begin_wqe(qp, &seg, &ctrl, wr, &idx, &size, nreq);
2633                 if (err) {
2634                         mlx5_ib_warn(dev, "\n");
2635                         err = -ENOMEM;
2636                         *bad_wr = wr;
2637                         goto out;
2638                 }
2639
2640                 switch (ibqp->qp_type) {
2641                 case IB_QPT_XRC_INI:
2642                         xrc = seg;
2643                         seg += sizeof(*xrc);
2644                         size += sizeof(*xrc) / 16;
2645                         /* fall through */
2646                 case IB_QPT_RC:
2647                         switch (wr->opcode) {
2648                         case IB_WR_RDMA_READ:
2649                         case IB_WR_RDMA_WRITE:
2650                         case IB_WR_RDMA_WRITE_WITH_IMM:
2651                                 set_raddr_seg(seg, rdma_wr(wr)->remote_addr,
2652                                               rdma_wr(wr)->rkey);
2653                                 seg += sizeof(struct mlx5_wqe_raddr_seg);
2654                                 size += sizeof(struct mlx5_wqe_raddr_seg) / 16;
2655                                 break;
2656
2657                         case IB_WR_ATOMIC_CMP_AND_SWP:
2658                         case IB_WR_ATOMIC_FETCH_AND_ADD:
2659                         case IB_WR_MASKED_ATOMIC_CMP_AND_SWP:
2660                                 mlx5_ib_warn(dev, "Atomic operations are not supported yet\n");
2661                                 err = -ENOSYS;
2662                                 *bad_wr = wr;
2663                                 goto out;
2664
2665                         case IB_WR_LOCAL_INV:
2666                                 next_fence = MLX5_FENCE_MODE_INITIATOR_SMALL;
2667                                 qp->sq.wr_data[idx] = IB_WR_LOCAL_INV;
2668                                 ctrl->imm = cpu_to_be32(wr->ex.invalidate_rkey);
2669                                 set_linv_wr(qp, &seg, &size);
2670                                 num_sge = 0;
2671                                 break;
2672
2673                         case IB_WR_REG_MR:
2674                                 next_fence = MLX5_FENCE_MODE_INITIATOR_SMALL;
2675                                 qp->sq.wr_data[idx] = IB_WR_REG_MR;
2676                                 ctrl->imm = cpu_to_be32(reg_wr(wr)->key);
2677                                 err = set_reg_wr(qp, reg_wr(wr), &seg, &size);
2678                                 if (err) {
2679                                         *bad_wr = wr;
2680                                         goto out;
2681                                 }
2682                                 num_sge = 0;
2683                                 break;
2684
2685                         case IB_WR_REG_SIG_MR:
2686                                 qp->sq.wr_data[idx] = IB_WR_REG_SIG_MR;
2687                                 mr = to_mmr(sig_handover_wr(wr)->sig_mr);
2688
2689                                 ctrl->imm = cpu_to_be32(mr->ibmr.rkey);
2690                                 err = set_sig_umr_wr(wr, qp, &seg, &size);
2691                                 if (err) {
2692                                         mlx5_ib_warn(dev, "\n");
2693                                         *bad_wr = wr;
2694                                         goto out;
2695                                 }
2696
2697                                 finish_wqe(qp, ctrl, size, idx, wr->wr_id,
2698                                            nreq, get_fence(fence, wr),
2699                                            next_fence, MLX5_OPCODE_UMR);
2700                                 /*
2701                                  * SET_PSV WQEs are not signaled and solicited
2702                                  * on error
2703                                  */
2704                                 wr->send_flags &= ~IB_SEND_SIGNALED;
2705                                 wr->send_flags |= IB_SEND_SOLICITED;
2706                                 err = begin_wqe(qp, &seg, &ctrl, wr,
2707                                                 &idx, &size, nreq);
2708                                 if (err) {
2709                                         mlx5_ib_warn(dev, "\n");
2710                                         err = -ENOMEM;
2711                                         *bad_wr = wr;
2712                                         goto out;
2713                                 }
2714
2715                                 err = set_psv_wr(&sig_handover_wr(wr)->sig_attrs->mem,
2716                                                  mr->sig->psv_memory.psv_idx, &seg,
2717                                                  &size);
2718                                 if (err) {
2719                                         mlx5_ib_warn(dev, "\n");
2720                                         *bad_wr = wr;
2721                                         goto out;
2722                                 }
2723
2724                                 finish_wqe(qp, ctrl, size, idx, wr->wr_id,
2725                                            nreq, get_fence(fence, wr),
2726                                            next_fence, MLX5_OPCODE_SET_PSV);
2727                                 err = begin_wqe(qp, &seg, &ctrl, wr,
2728                                                 &idx, &size, nreq);
2729                                 if (err) {
2730                                         mlx5_ib_warn(dev, "\n");
2731                                         err = -ENOMEM;
2732                                         *bad_wr = wr;
2733                                         goto out;
2734                                 }
2735
2736                                 next_fence = MLX5_FENCE_MODE_INITIATOR_SMALL;
2737                                 err = set_psv_wr(&sig_handover_wr(wr)->sig_attrs->wire,
2738                                                  mr->sig->psv_wire.psv_idx, &seg,
2739                                                  &size);
2740                                 if (err) {
2741                                         mlx5_ib_warn(dev, "\n");
2742                                         *bad_wr = wr;
2743                                         goto out;
2744                                 }
2745
2746                                 finish_wqe(qp, ctrl, size, idx, wr->wr_id,
2747                                            nreq, get_fence(fence, wr),
2748                                            next_fence, MLX5_OPCODE_SET_PSV);
2749                                 num_sge = 0;
2750                                 goto skip_psv;
2751
2752                         default:
2753                                 break;
2754                         }
2755                         break;
2756
2757                 case IB_QPT_UC:
2758                         switch (wr->opcode) {
2759                         case IB_WR_RDMA_WRITE:
2760                         case IB_WR_RDMA_WRITE_WITH_IMM:
2761                                 set_raddr_seg(seg, rdma_wr(wr)->remote_addr,
2762                                               rdma_wr(wr)->rkey);
2763                                 seg  += sizeof(struct mlx5_wqe_raddr_seg);
2764                                 size += sizeof(struct mlx5_wqe_raddr_seg) / 16;
2765                                 break;
2766
2767                         default:
2768                                 break;
2769                         }
2770                         break;
2771
2772                 case IB_QPT_UD:
2773                 case IB_QPT_SMI:
2774                 case IB_QPT_GSI:
2775                         set_datagram_seg(seg, wr);
2776                         seg += sizeof(struct mlx5_wqe_datagram_seg);
2777                         size += sizeof(struct mlx5_wqe_datagram_seg) / 16;
2778                         if (unlikely((seg == qend)))
2779                                 seg = mlx5_get_send_wqe(qp, 0);
2780                         break;
2781
2782                 case MLX5_IB_QPT_REG_UMR:
2783                         if (wr->opcode != MLX5_IB_WR_UMR) {
2784                                 err = -EINVAL;
2785                                 mlx5_ib_warn(dev, "bad opcode\n");
2786                                 goto out;
2787                         }
2788                         qp->sq.wr_data[idx] = MLX5_IB_WR_UMR;
2789                         ctrl->imm = cpu_to_be32(umr_wr(wr)->mkey);
2790                         set_reg_umr_segment(seg, wr);
2791                         seg += sizeof(struct mlx5_wqe_umr_ctrl_seg);
2792                         size += sizeof(struct mlx5_wqe_umr_ctrl_seg) / 16;
2793                         if (unlikely((seg == qend)))
2794                                 seg = mlx5_get_send_wqe(qp, 0);
2795                         set_reg_mkey_segment(seg, wr);
2796                         seg += sizeof(struct mlx5_mkey_seg);
2797                         size += sizeof(struct mlx5_mkey_seg) / 16;
2798                         if (unlikely((seg == qend)))
2799                                 seg = mlx5_get_send_wqe(qp, 0);
2800                         break;
2801
2802                 default:
2803                         break;
2804                 }
2805
2806                 if (wr->send_flags & IB_SEND_INLINE && num_sge) {
2807                         int uninitialized_var(sz);
2808
2809                         err = set_data_inl_seg(qp, wr, seg, &sz);
2810                         if (unlikely(err)) {
2811                                 mlx5_ib_warn(dev, "\n");
2812                                 *bad_wr = wr;
2813                                 goto out;
2814                         }
2815                         inl = 1;
2816                         size += sz;
2817                 } else {
2818                         dpseg = seg;
2819                         for (i = 0; i < num_sge; i++) {
2820                                 if (unlikely(dpseg == qend)) {
2821                                         seg = mlx5_get_send_wqe(qp, 0);
2822                                         dpseg = seg;
2823                                 }
2824                                 if (likely(wr->sg_list[i].length)) {
2825                                         set_data_ptr_seg(dpseg, wr->sg_list + i);
2826                                         size += sizeof(struct mlx5_wqe_data_seg) / 16;
2827                                         dpseg++;
2828                                 }
2829                         }
2830                 }
2831
2832                 finish_wqe(qp, ctrl, size, idx, wr->wr_id, nreq,
2833                            get_fence(fence, wr), next_fence,
2834                            mlx5_ib_opcode[wr->opcode]);
2835 skip_psv:
2836                 if (0)
2837                         dump_wqe(qp, idx, size);
2838         }
2839
2840 out:
2841         if (likely(nreq)) {
2842                 qp->sq.head += nreq;
2843
2844                 /* Make sure that descriptors are written before
2845                  * updating doorbell record and ringing the doorbell
2846                  */
2847                 wmb();
2848
2849                 qp->db.db[MLX5_SND_DBR] = cpu_to_be32(qp->sq.cur_post);
2850
2851                 /* Make sure doorbell record is visible to the HCA before
2852                  * we hit doorbell */
2853                 wmb();
2854
2855                 if (bf->need_lock)
2856                         spin_lock(&bf->lock);
2857                 else
2858                         __acquire(&bf->lock);
2859
2860                 /* TBD enable WC */
2861                 if (0 && nreq == 1 && bf->uuarn && inl && size > 1 && size <= bf->buf_size / 16) {
2862                         mlx5_bf_copy(bf->reg + bf->offset, (u64 *)ctrl, ALIGN(size * 16, 64), qp);
2863                         /* wc_wmb(); */
2864                 } else {
2865                         mlx5_write64((__be32 *)ctrl, bf->regreg + bf->offset,
2866                                      MLX5_GET_DOORBELL_LOCK(&bf->lock32));
2867                         /* Make sure doorbells don't leak out of SQ spinlock
2868                          * and reach the HCA out of order.
2869                          */
2870                         mmiowb();
2871                 }
2872                 bf->offset ^= bf->buf_size;
2873                 if (bf->need_lock)
2874                         spin_unlock(&bf->lock);
2875                 else
2876                         __release(&bf->lock);
2877         }
2878
2879         spin_unlock_irqrestore(&qp->sq.lock, flags);
2880
2881         return err;
2882 }
2883
2884 static void set_sig_seg(struct mlx5_rwqe_sig *sig, int size)
2885 {
2886         sig->signature = calc_sig(sig, size);
2887 }
2888
2889 int mlx5_ib_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
2890                       struct ib_recv_wr **bad_wr)
2891 {
2892         struct mlx5_ib_qp *qp = to_mqp(ibqp);
2893         struct mlx5_wqe_data_seg *scat;
2894         struct mlx5_rwqe_sig *sig;
2895         unsigned long flags;
2896         int err = 0;
2897         int nreq;
2898         int ind;
2899         int i;
2900
2901         spin_lock_irqsave(&qp->rq.lock, flags);
2902
2903         ind = qp->rq.head & (qp->rq.wqe_cnt - 1);
2904
2905         for (nreq = 0; wr; nreq++, wr = wr->next) {
2906                 if (mlx5_wq_overflow(&qp->rq, nreq, qp->ibqp.recv_cq)) {
2907                         err = -ENOMEM;
2908                         *bad_wr = wr;
2909                         goto out;
2910                 }
2911
2912                 if (unlikely(wr->num_sge > qp->rq.max_gs)) {
2913                         err = -EINVAL;
2914                         *bad_wr = wr;
2915                         goto out;
2916                 }
2917
2918                 scat = get_recv_wqe(qp, ind);
2919                 if (qp->wq_sig)
2920                         scat++;
2921
2922                 for (i = 0; i < wr->num_sge; i++)
2923                         set_data_ptr_seg(scat + i, wr->sg_list + i);
2924
2925                 if (i < qp->rq.max_gs) {
2926                         scat[i].byte_count = 0;
2927                         scat[i].lkey       = cpu_to_be32(MLX5_INVALID_LKEY);
2928                         scat[i].addr       = 0;
2929                 }
2930
2931                 if (qp->wq_sig) {
2932                         sig = (struct mlx5_rwqe_sig *)scat;
2933                         set_sig_seg(sig, (qp->rq.max_gs + 1) << 2);
2934                 }
2935
2936                 qp->rq.wrid[ind] = wr->wr_id;
2937
2938                 ind = (ind + 1) & (qp->rq.wqe_cnt - 1);
2939         }
2940
2941 out:
2942         if (likely(nreq)) {
2943                 qp->rq.head += nreq;
2944
2945                 /* Make sure that descriptors are written before
2946                  * doorbell record.
2947                  */
2948                 wmb();
2949
2950                 *qp->db.db = cpu_to_be32(qp->rq.head & 0xffff);
2951         }
2952
2953         spin_unlock_irqrestore(&qp->rq.lock, flags);
2954
2955         return err;
2956 }
2957
2958 static inline enum ib_qp_state to_ib_qp_state(enum mlx5_qp_state mlx5_state)
2959 {
2960         switch (mlx5_state) {
2961         case MLX5_QP_STATE_RST:      return IB_QPS_RESET;
2962         case MLX5_QP_STATE_INIT:     return IB_QPS_INIT;
2963         case MLX5_QP_STATE_RTR:      return IB_QPS_RTR;
2964         case MLX5_QP_STATE_RTS:      return IB_QPS_RTS;
2965         case MLX5_QP_STATE_SQ_DRAINING:
2966         case MLX5_QP_STATE_SQD:      return IB_QPS_SQD;
2967         case MLX5_QP_STATE_SQER:     return IB_QPS_SQE;
2968         case MLX5_QP_STATE_ERR:      return IB_QPS_ERR;
2969         default:                     return -1;
2970         }
2971 }
2972
2973 static inline enum ib_mig_state to_ib_mig_state(int mlx5_mig_state)
2974 {
2975         switch (mlx5_mig_state) {
2976         case MLX5_QP_PM_ARMED:          return IB_MIG_ARMED;
2977         case MLX5_QP_PM_REARM:          return IB_MIG_REARM;
2978         case MLX5_QP_PM_MIGRATED:       return IB_MIG_MIGRATED;
2979         default: return -1;
2980         }
2981 }
2982
2983 static int to_ib_qp_access_flags(int mlx5_flags)
2984 {
2985         int ib_flags = 0;
2986
2987         if (mlx5_flags & MLX5_QP_BIT_RRE)
2988                 ib_flags |= IB_ACCESS_REMOTE_READ;
2989         if (mlx5_flags & MLX5_QP_BIT_RWE)
2990                 ib_flags |= IB_ACCESS_REMOTE_WRITE;
2991         if (mlx5_flags & MLX5_QP_BIT_RAE)
2992                 ib_flags |= IB_ACCESS_REMOTE_ATOMIC;
2993
2994         return ib_flags;
2995 }
2996
2997 static void to_ib_ah_attr(struct mlx5_ib_dev *ibdev, struct ib_ah_attr *ib_ah_attr,
2998                                 struct mlx5_qp_path *path)
2999 {
3000         struct mlx5_core_dev *dev = ibdev->mdev;
3001
3002         memset(ib_ah_attr, 0, sizeof(*ib_ah_attr));
3003         ib_ah_attr->port_num      = path->port;
3004
3005         if (ib_ah_attr->port_num == 0 ||
3006             ib_ah_attr->port_num > MLX5_CAP_GEN(dev, num_ports))
3007                 return;
3008
3009         ib_ah_attr->sl = path->sl & 0xf;
3010
3011         ib_ah_attr->dlid          = be16_to_cpu(path->rlid);
3012         ib_ah_attr->src_path_bits = path->grh_mlid & 0x7f;
3013         ib_ah_attr->static_rate   = path->static_rate ? path->static_rate - 5 : 0;
3014         ib_ah_attr->ah_flags      = (path->grh_mlid & (1 << 7)) ? IB_AH_GRH : 0;
3015         if (ib_ah_attr->ah_flags) {
3016                 ib_ah_attr->grh.sgid_index = path->mgid_index;
3017                 ib_ah_attr->grh.hop_limit  = path->hop_limit;
3018                 ib_ah_attr->grh.traffic_class =
3019                         (be32_to_cpu(path->tclass_flowlabel) >> 20) & 0xff;
3020                 ib_ah_attr->grh.flow_label =
3021                         be32_to_cpu(path->tclass_flowlabel) & 0xfffff;
3022                 memcpy(ib_ah_attr->grh.dgid.raw,
3023                        path->rgid, sizeof(ib_ah_attr->grh.dgid.raw));
3024         }
3025 }
3026
3027 int mlx5_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, int qp_attr_mask,
3028                      struct ib_qp_init_attr *qp_init_attr)
3029 {
3030         struct mlx5_ib_dev *dev = to_mdev(ibqp->device);
3031         struct mlx5_ib_qp *qp = to_mqp(ibqp);
3032         struct mlx5_query_qp_mbox_out *outb;
3033         struct mlx5_qp_context *context;
3034         int mlx5_state;
3035         int err = 0;
3036
3037 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
3038         /*
3039          * Wait for any outstanding page faults, in case the user frees memory
3040          * based upon this query's result.
3041          */
3042         flush_workqueue(mlx5_ib_page_fault_wq);
3043 #endif
3044
3045         mutex_lock(&qp->mutex);
3046         outb = kzalloc(sizeof(*outb), GFP_KERNEL);
3047         if (!outb) {
3048                 err = -ENOMEM;
3049                 goto out;
3050         }
3051         context = &outb->ctx;
3052         err = mlx5_core_qp_query(dev->mdev, &qp->mqp, outb, sizeof(*outb));
3053         if (err)
3054                 goto out_free;
3055
3056         mlx5_state = be32_to_cpu(context->flags) >> 28;
3057
3058         qp->state                    = to_ib_qp_state(mlx5_state);
3059         qp_attr->qp_state            = qp->state;
3060         qp_attr->path_mtu            = context->mtu_msgmax >> 5;
3061         qp_attr->path_mig_state      =
3062                 to_ib_mig_state((be32_to_cpu(context->flags) >> 11) & 0x3);
3063         qp_attr->qkey                = be32_to_cpu(context->qkey);
3064         qp_attr->rq_psn              = be32_to_cpu(context->rnr_nextrecvpsn) & 0xffffff;
3065         qp_attr->sq_psn              = be32_to_cpu(context->next_send_psn) & 0xffffff;
3066         qp_attr->dest_qp_num         = be32_to_cpu(context->log_pg_sz_remote_qpn) & 0xffffff;
3067         qp_attr->qp_access_flags     =
3068                 to_ib_qp_access_flags(be32_to_cpu(context->params2));
3069
3070         if (qp->ibqp.qp_type == IB_QPT_RC || qp->ibqp.qp_type == IB_QPT_UC) {
3071                 to_ib_ah_attr(dev, &qp_attr->ah_attr, &context->pri_path);
3072                 to_ib_ah_attr(dev, &qp_attr->alt_ah_attr, &context->alt_path);
3073                 qp_attr->alt_pkey_index = context->alt_path.pkey_index & 0x7f;
3074                 qp_attr->alt_port_num   = qp_attr->alt_ah_attr.port_num;
3075         }
3076
3077         qp_attr->pkey_index = context->pri_path.pkey_index & 0x7f;
3078         qp_attr->port_num = context->pri_path.port;
3079
3080         /* qp_attr->en_sqd_async_notify is only applicable in modify qp */
3081         qp_attr->sq_draining = mlx5_state == MLX5_QP_STATE_SQ_DRAINING;
3082
3083         qp_attr->max_rd_atomic = 1 << ((be32_to_cpu(context->params1) >> 21) & 0x7);
3084
3085         qp_attr->max_dest_rd_atomic =
3086                 1 << ((be32_to_cpu(context->params2) >> 21) & 0x7);
3087         qp_attr->min_rnr_timer      =
3088                 (be32_to_cpu(context->rnr_nextrecvpsn) >> 24) & 0x1f;
3089         qp_attr->timeout            = context->pri_path.ackto_lt >> 3;
3090         qp_attr->retry_cnt          = (be32_to_cpu(context->params1) >> 16) & 0x7;
3091         qp_attr->rnr_retry          = (be32_to_cpu(context->params1) >> 13) & 0x7;
3092         qp_attr->alt_timeout        = context->alt_path.ackto_lt >> 3;
3093         qp_attr->cur_qp_state        = qp_attr->qp_state;
3094         qp_attr->cap.max_recv_wr     = qp->rq.wqe_cnt;
3095         qp_attr->cap.max_recv_sge    = qp->rq.max_gs;
3096
3097         if (!ibqp->uobject) {
3098                 qp_attr->cap.max_send_wr  = qp->sq.max_post;
3099                 qp_attr->cap.max_send_sge = qp->sq.max_gs;
3100                 qp_init_attr->qp_context = ibqp->qp_context;
3101         } else {
3102                 qp_attr->cap.max_send_wr  = 0;
3103                 qp_attr->cap.max_send_sge = 0;
3104         }
3105
3106         qp_init_attr->qp_type = ibqp->qp_type;
3107         qp_init_attr->recv_cq = ibqp->recv_cq;
3108         qp_init_attr->send_cq = ibqp->send_cq;
3109         qp_init_attr->srq = ibqp->srq;
3110         qp_attr->cap.max_inline_data = qp->max_inline_data;
3111
3112         qp_init_attr->cap            = qp_attr->cap;
3113
3114         qp_init_attr->create_flags = 0;
3115         if (qp->flags & MLX5_IB_QP_BLOCK_MULTICAST_LOOPBACK)
3116                 qp_init_attr->create_flags |= IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK;
3117
3118         qp_init_attr->sq_sig_type = qp->sq_signal_bits & MLX5_WQE_CTRL_CQ_UPDATE ?
3119                 IB_SIGNAL_ALL_WR : IB_SIGNAL_REQ_WR;
3120
3121 out_free:
3122         kfree(outb);
3123
3124 out:
3125         mutex_unlock(&qp->mutex);
3126         return err;
3127 }
3128
3129 struct ib_xrcd *mlx5_ib_alloc_xrcd(struct ib_device *ibdev,
3130                                           struct ib_ucontext *context,
3131                                           struct ib_udata *udata)
3132 {
3133         struct mlx5_ib_dev *dev = to_mdev(ibdev);
3134         struct mlx5_ib_xrcd *xrcd;
3135         int err;
3136
3137         if (!MLX5_CAP_GEN(dev->mdev, xrc))
3138                 return ERR_PTR(-ENOSYS);
3139
3140         xrcd = kmalloc(sizeof(*xrcd), GFP_KERNEL);
3141         if (!xrcd)
3142                 return ERR_PTR(-ENOMEM);
3143
3144         err = mlx5_core_xrcd_alloc(dev->mdev, &xrcd->xrcdn);
3145         if (err) {
3146                 kfree(xrcd);
3147                 return ERR_PTR(-ENOMEM);
3148         }
3149
3150         return &xrcd->ibxrcd;
3151 }
3152
3153 int mlx5_ib_dealloc_xrcd(struct ib_xrcd *xrcd)
3154 {
3155         struct mlx5_ib_dev *dev = to_mdev(xrcd->device);
3156         u32 xrcdn = to_mxrcd(xrcd)->xrcdn;
3157         int err;
3158
3159         err = mlx5_core_xrcd_dealloc(dev->mdev, xrcdn);
3160         if (err) {
3161                 mlx5_ib_warn(dev, "failed to dealloc xrcdn 0x%x\n", xrcdn);
3162                 return err;
3163         }
3164
3165         kfree(xrcd);
3166
3167         return 0;
3168 }