2 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/module.h>
34 #include <rdma/ib_umem.h>
38 /* not supported currently */
39 static int wq_signature;
42 MLX5_IB_ACK_REQ_FREQ = 8,
46 MLX5_IB_DEFAULT_SCHED_QUEUE = 0x83,
47 MLX5_IB_DEFAULT_QP0_SCHED_QUEUE = 0x3f,
48 MLX5_IB_LINK_TYPE_IB = 0,
49 MLX5_IB_LINK_TYPE_ETH = 1
53 MLX5_IB_SQ_STRIDE = 6,
54 MLX5_IB_CACHE_LINE_SIZE = 64,
57 static const u32 mlx5_ib_opcode[] = {
58 [IB_WR_SEND] = MLX5_OPCODE_SEND,
59 [IB_WR_SEND_WITH_IMM] = MLX5_OPCODE_SEND_IMM,
60 [IB_WR_RDMA_WRITE] = MLX5_OPCODE_RDMA_WRITE,
61 [IB_WR_RDMA_WRITE_WITH_IMM] = MLX5_OPCODE_RDMA_WRITE_IMM,
62 [IB_WR_RDMA_READ] = MLX5_OPCODE_RDMA_READ,
63 [IB_WR_ATOMIC_CMP_AND_SWP] = MLX5_OPCODE_ATOMIC_CS,
64 [IB_WR_ATOMIC_FETCH_AND_ADD] = MLX5_OPCODE_ATOMIC_FA,
65 [IB_WR_SEND_WITH_INV] = MLX5_OPCODE_SEND_INVAL,
66 [IB_WR_LOCAL_INV] = MLX5_OPCODE_UMR,
67 [IB_WR_REG_MR] = MLX5_OPCODE_UMR,
68 [IB_WR_MASKED_ATOMIC_CMP_AND_SWP] = MLX5_OPCODE_ATOMIC_MASKED_CS,
69 [IB_WR_MASKED_ATOMIC_FETCH_AND_ADD] = MLX5_OPCODE_ATOMIC_MASKED_FA,
70 [MLX5_IB_WR_UMR] = MLX5_OPCODE_UMR,
74 static int is_qp0(enum ib_qp_type qp_type)
76 return qp_type == IB_QPT_SMI;
79 static int is_sqp(enum ib_qp_type qp_type)
81 return is_qp0(qp_type) || is_qp1(qp_type);
84 static void *get_wqe(struct mlx5_ib_qp *qp, int offset)
86 return mlx5_buf_offset(&qp->buf, offset);
89 static void *get_recv_wqe(struct mlx5_ib_qp *qp, int n)
91 return get_wqe(qp, qp->rq.offset + (n << qp->rq.wqe_shift));
94 void *mlx5_get_send_wqe(struct mlx5_ib_qp *qp, int n)
96 return get_wqe(qp, qp->sq.offset + (n << MLX5_IB_SQ_STRIDE));
100 * mlx5_ib_read_user_wqe() - Copy a user-space WQE to kernel space.
102 * @qp: QP to copy from.
103 * @send: copy from the send queue when non-zero, use the receive queue
105 * @wqe_index: index to start copying from. For send work queues, the
106 * wqe_index is in units of MLX5_SEND_WQE_BB.
107 * For receive work queue, it is the number of work queue
108 * element in the queue.
109 * @buffer: destination buffer.
110 * @length: maximum number of bytes to copy.
112 * Copies at least a single WQE, but may copy more data.
114 * Return: the number of bytes copied, or an error code.
116 int mlx5_ib_read_user_wqe(struct mlx5_ib_qp *qp, int send, int wqe_index,
117 void *buffer, u32 length)
119 struct ib_device *ibdev = qp->ibqp.device;
120 struct mlx5_ib_dev *dev = to_mdev(ibdev);
121 struct mlx5_ib_wq *wq = send ? &qp->sq : &qp->rq;
124 struct ib_umem *umem = qp->umem;
125 u32 first_copy_length;
129 if (wq->wqe_cnt == 0) {
130 mlx5_ib_dbg(dev, "mlx5_ib_read_user_wqe for a QP with wqe_cnt == 0. qp_type: 0x%x\n",
135 offset = wq->offset + ((wqe_index % wq->wqe_cnt) << wq->wqe_shift);
136 wq_end = wq->offset + (wq->wqe_cnt << wq->wqe_shift);
138 if (send && length < sizeof(struct mlx5_wqe_ctrl_seg))
141 if (offset > umem->length ||
142 (send && offset + sizeof(struct mlx5_wqe_ctrl_seg) > umem->length))
145 first_copy_length = min_t(u32, offset + length, wq_end) - offset;
146 ret = ib_umem_copy_from(buffer, umem, offset, first_copy_length);
151 struct mlx5_wqe_ctrl_seg *ctrl = buffer;
152 int ds = be32_to_cpu(ctrl->qpn_ds) & MLX5_WQE_CTRL_DS_MASK;
154 wqe_length = ds * MLX5_WQE_DS_UNITS;
156 wqe_length = 1 << wq->wqe_shift;
159 if (wqe_length <= first_copy_length)
160 return first_copy_length;
162 ret = ib_umem_copy_from(buffer + first_copy_length, umem, wq->offset,
163 wqe_length - first_copy_length);
170 static void mlx5_ib_qp_event(struct mlx5_core_qp *qp, int type)
172 struct ib_qp *ibqp = &to_mibqp(qp)->ibqp;
173 struct ib_event event;
175 if (type == MLX5_EVENT_TYPE_PATH_MIG)
176 to_mibqp(qp)->port = to_mibqp(qp)->alt_port;
178 if (ibqp->event_handler) {
179 event.device = ibqp->device;
180 event.element.qp = ibqp;
182 case MLX5_EVENT_TYPE_PATH_MIG:
183 event.event = IB_EVENT_PATH_MIG;
185 case MLX5_EVENT_TYPE_COMM_EST:
186 event.event = IB_EVENT_COMM_EST;
188 case MLX5_EVENT_TYPE_SQ_DRAINED:
189 event.event = IB_EVENT_SQ_DRAINED;
191 case MLX5_EVENT_TYPE_SRQ_LAST_WQE:
192 event.event = IB_EVENT_QP_LAST_WQE_REACHED;
194 case MLX5_EVENT_TYPE_WQ_CATAS_ERROR:
195 event.event = IB_EVENT_QP_FATAL;
197 case MLX5_EVENT_TYPE_PATH_MIG_FAILED:
198 event.event = IB_EVENT_PATH_MIG_ERR;
200 case MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR:
201 event.event = IB_EVENT_QP_REQ_ERR;
203 case MLX5_EVENT_TYPE_WQ_ACCESS_ERROR:
204 event.event = IB_EVENT_QP_ACCESS_ERR;
207 pr_warn("mlx5_ib: Unexpected event type %d on QP %06x\n", type, qp->qpn);
211 ibqp->event_handler(&event, ibqp->qp_context);
215 static int set_rq_size(struct mlx5_ib_dev *dev, struct ib_qp_cap *cap,
216 int has_rq, struct mlx5_ib_qp *qp, struct mlx5_ib_create_qp *ucmd)
221 /* Sanity check RQ size before proceeding */
222 if (cap->max_recv_wr > (1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz)))
228 qp->rq.wqe_shift = 0;
229 cap->max_recv_wr = 0;
230 cap->max_recv_sge = 0;
233 qp->rq.wqe_cnt = ucmd->rq_wqe_count;
234 qp->rq.wqe_shift = ucmd->rq_wqe_shift;
235 qp->rq.max_gs = (1 << qp->rq.wqe_shift) / sizeof(struct mlx5_wqe_data_seg) - qp->wq_sig;
236 qp->rq.max_post = qp->rq.wqe_cnt;
238 wqe_size = qp->wq_sig ? sizeof(struct mlx5_wqe_signature_seg) : 0;
239 wqe_size += cap->max_recv_sge * sizeof(struct mlx5_wqe_data_seg);
240 wqe_size = roundup_pow_of_two(wqe_size);
241 wq_size = roundup_pow_of_two(cap->max_recv_wr) * wqe_size;
242 wq_size = max_t(int, wq_size, MLX5_SEND_WQE_BB);
243 qp->rq.wqe_cnt = wq_size / wqe_size;
244 if (wqe_size > MLX5_CAP_GEN(dev->mdev, max_wqe_sz_rq)) {
245 mlx5_ib_dbg(dev, "wqe_size %d, max %d\n",
247 MLX5_CAP_GEN(dev->mdev,
251 qp->rq.wqe_shift = ilog2(wqe_size);
252 qp->rq.max_gs = (1 << qp->rq.wqe_shift) / sizeof(struct mlx5_wqe_data_seg) - qp->wq_sig;
253 qp->rq.max_post = qp->rq.wqe_cnt;
260 static int sq_overhead(enum ib_qp_type qp_type)
266 size += sizeof(struct mlx5_wqe_xrc_seg);
269 size += sizeof(struct mlx5_wqe_ctrl_seg) +
270 sizeof(struct mlx5_wqe_atomic_seg) +
271 sizeof(struct mlx5_wqe_raddr_seg);
278 size += sizeof(struct mlx5_wqe_ctrl_seg) +
279 sizeof(struct mlx5_wqe_raddr_seg) +
280 sizeof(struct mlx5_wqe_umr_ctrl_seg) +
281 sizeof(struct mlx5_mkey_seg);
287 size += sizeof(struct mlx5_wqe_ctrl_seg) +
288 sizeof(struct mlx5_wqe_datagram_seg);
291 case MLX5_IB_QPT_REG_UMR:
292 size += sizeof(struct mlx5_wqe_ctrl_seg) +
293 sizeof(struct mlx5_wqe_umr_ctrl_seg) +
294 sizeof(struct mlx5_mkey_seg);
304 static int calc_send_wqe(struct ib_qp_init_attr *attr)
309 size = sq_overhead(attr->qp_type);
313 if (attr->cap.max_inline_data) {
314 inl_size = size + sizeof(struct mlx5_wqe_inline_seg) +
315 attr->cap.max_inline_data;
318 size += attr->cap.max_send_sge * sizeof(struct mlx5_wqe_data_seg);
319 if (attr->create_flags & IB_QP_CREATE_SIGNATURE_EN &&
320 ALIGN(max_t(int, inl_size, size), MLX5_SEND_WQE_BB) < MLX5_SIG_WQE_SIZE)
321 return MLX5_SIG_WQE_SIZE;
323 return ALIGN(max_t(int, inl_size, size), MLX5_SEND_WQE_BB);
326 static int calc_sq_size(struct mlx5_ib_dev *dev, struct ib_qp_init_attr *attr,
327 struct mlx5_ib_qp *qp)
332 if (!attr->cap.max_send_wr)
335 wqe_size = calc_send_wqe(attr);
336 mlx5_ib_dbg(dev, "wqe_size %d\n", wqe_size);
340 if (wqe_size > MLX5_CAP_GEN(dev->mdev, max_wqe_sz_sq)) {
341 mlx5_ib_dbg(dev, "wqe_size(%d) > max_sq_desc_sz(%d)\n",
342 wqe_size, MLX5_CAP_GEN(dev->mdev, max_wqe_sz_sq));
346 qp->max_inline_data = wqe_size - sq_overhead(attr->qp_type) -
347 sizeof(struct mlx5_wqe_inline_seg);
348 attr->cap.max_inline_data = qp->max_inline_data;
350 if (attr->create_flags & IB_QP_CREATE_SIGNATURE_EN)
351 qp->signature_en = true;
353 wq_size = roundup_pow_of_two(attr->cap.max_send_wr * wqe_size);
354 qp->sq.wqe_cnt = wq_size / MLX5_SEND_WQE_BB;
355 if (qp->sq.wqe_cnt > (1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz))) {
356 mlx5_ib_dbg(dev, "wqe count(%d) exceeds limits(%d)\n",
358 1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz));
361 qp->sq.wqe_shift = ilog2(MLX5_SEND_WQE_BB);
362 qp->sq.max_gs = attr->cap.max_send_sge;
363 qp->sq.max_post = wq_size / wqe_size;
364 attr->cap.max_send_wr = qp->sq.max_post;
369 static int set_user_buf_size(struct mlx5_ib_dev *dev,
370 struct mlx5_ib_qp *qp,
371 struct mlx5_ib_create_qp *ucmd)
373 int desc_sz = 1 << qp->sq.wqe_shift;
375 if (desc_sz > MLX5_CAP_GEN(dev->mdev, max_wqe_sz_sq)) {
376 mlx5_ib_warn(dev, "desc_sz %d, max_sq_desc_sz %d\n",
377 desc_sz, MLX5_CAP_GEN(dev->mdev, max_wqe_sz_sq));
381 if (ucmd->sq_wqe_count && ((1 << ilog2(ucmd->sq_wqe_count)) != ucmd->sq_wqe_count)) {
382 mlx5_ib_warn(dev, "sq_wqe_count %d, sq_wqe_count %d\n",
383 ucmd->sq_wqe_count, ucmd->sq_wqe_count);
387 qp->sq.wqe_cnt = ucmd->sq_wqe_count;
389 if (qp->sq.wqe_cnt > (1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz))) {
390 mlx5_ib_warn(dev, "wqe_cnt %d, max_wqes %d\n",
392 1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz));
396 qp->buf_size = (qp->rq.wqe_cnt << qp->rq.wqe_shift) +
397 (qp->sq.wqe_cnt << 6);
402 static int qp_has_rq(struct ib_qp_init_attr *attr)
404 if (attr->qp_type == IB_QPT_XRC_INI ||
405 attr->qp_type == IB_QPT_XRC_TGT || attr->srq ||
406 attr->qp_type == MLX5_IB_QPT_REG_UMR ||
407 !attr->cap.max_recv_wr)
413 static int first_med_uuar(void)
418 static int next_uuar(int n)
422 while (((n % 4) & 2))
428 static int num_med_uuar(struct mlx5_uuar_info *uuari)
432 n = uuari->num_uars * MLX5_NON_FP_BF_REGS_PER_PAGE -
433 uuari->num_low_latency_uuars - 1;
435 return n >= 0 ? n : 0;
438 static int max_uuari(struct mlx5_uuar_info *uuari)
440 return uuari->num_uars * 4;
443 static int first_hi_uuar(struct mlx5_uuar_info *uuari)
449 med = num_med_uuar(uuari);
450 for (t = 0, i = first_med_uuar();; i = next_uuar(i)) {
459 static int alloc_high_class_uuar(struct mlx5_uuar_info *uuari)
463 for (i = first_hi_uuar(uuari); i < max_uuari(uuari); i = next_uuar(i)) {
464 if (!test_bit(i, uuari->bitmap)) {
465 set_bit(i, uuari->bitmap);
474 static int alloc_med_class_uuar(struct mlx5_uuar_info *uuari)
476 int minidx = first_med_uuar();
479 for (i = first_med_uuar(); i < first_hi_uuar(uuari); i = next_uuar(i)) {
480 if (uuari->count[i] < uuari->count[minidx])
484 uuari->count[minidx]++;
488 static int alloc_uuar(struct mlx5_uuar_info *uuari,
489 enum mlx5_ib_latency_class lat)
493 mutex_lock(&uuari->lock);
495 case MLX5_IB_LATENCY_CLASS_LOW:
497 uuari->count[uuarn]++;
500 case MLX5_IB_LATENCY_CLASS_MEDIUM:
504 uuarn = alloc_med_class_uuar(uuari);
507 case MLX5_IB_LATENCY_CLASS_HIGH:
511 uuarn = alloc_high_class_uuar(uuari);
514 case MLX5_IB_LATENCY_CLASS_FAST_PATH:
518 mutex_unlock(&uuari->lock);
523 static void free_med_class_uuar(struct mlx5_uuar_info *uuari, int uuarn)
525 clear_bit(uuarn, uuari->bitmap);
526 --uuari->count[uuarn];
529 static void free_high_class_uuar(struct mlx5_uuar_info *uuari, int uuarn)
531 clear_bit(uuarn, uuari->bitmap);
532 --uuari->count[uuarn];
535 static void free_uuar(struct mlx5_uuar_info *uuari, int uuarn)
537 int nuuars = uuari->num_uars * MLX5_BF_REGS_PER_PAGE;
538 int high_uuar = nuuars - uuari->num_low_latency_uuars;
540 mutex_lock(&uuari->lock);
542 --uuari->count[uuarn];
546 if (uuarn < high_uuar) {
547 free_med_class_uuar(uuari, uuarn);
551 free_high_class_uuar(uuari, uuarn);
554 mutex_unlock(&uuari->lock);
557 static enum mlx5_qp_state to_mlx5_state(enum ib_qp_state state)
560 case IB_QPS_RESET: return MLX5_QP_STATE_RST;
561 case IB_QPS_INIT: return MLX5_QP_STATE_INIT;
562 case IB_QPS_RTR: return MLX5_QP_STATE_RTR;
563 case IB_QPS_RTS: return MLX5_QP_STATE_RTS;
564 case IB_QPS_SQD: return MLX5_QP_STATE_SQD;
565 case IB_QPS_SQE: return MLX5_QP_STATE_SQER;
566 case IB_QPS_ERR: return MLX5_QP_STATE_ERR;
571 static int to_mlx5_st(enum ib_qp_type type)
574 case IB_QPT_RC: return MLX5_QP_ST_RC;
575 case IB_QPT_UC: return MLX5_QP_ST_UC;
576 case IB_QPT_UD: return MLX5_QP_ST_UD;
577 case MLX5_IB_QPT_REG_UMR: return MLX5_QP_ST_REG_UMR;
579 case IB_QPT_XRC_TGT: return MLX5_QP_ST_XRC;
580 case IB_QPT_SMI: return MLX5_QP_ST_QP0;
581 case IB_QPT_GSI: return MLX5_QP_ST_QP1;
582 case IB_QPT_RAW_IPV6: return MLX5_QP_ST_RAW_IPV6;
583 case IB_QPT_RAW_ETHERTYPE: return MLX5_QP_ST_RAW_ETHERTYPE;
584 case IB_QPT_RAW_PACKET:
586 default: return -EINVAL;
590 static int uuarn_to_uar_index(struct mlx5_uuar_info *uuari, int uuarn)
592 return uuari->uars[uuarn / MLX5_BF_REGS_PER_PAGE].index;
595 static int create_user_qp(struct mlx5_ib_dev *dev, struct ib_pd *pd,
596 struct mlx5_ib_qp *qp, struct ib_udata *udata,
597 struct mlx5_create_qp_mbox_in **in,
598 struct mlx5_ib_create_qp_resp *resp, int *inlen)
600 struct mlx5_ib_ucontext *context;
601 struct mlx5_ib_create_qp ucmd;
610 err = ib_copy_from_udata(&ucmd, udata, sizeof(ucmd));
612 mlx5_ib_dbg(dev, "copy failed\n");
616 context = to_mucontext(pd->uobject->context);
618 * TBD: should come from the verbs when we have the API
620 uuarn = alloc_uuar(&context->uuari, MLX5_IB_LATENCY_CLASS_HIGH);
622 mlx5_ib_dbg(dev, "failed to allocate low latency UUAR\n");
623 mlx5_ib_dbg(dev, "reverting to medium latency\n");
624 uuarn = alloc_uuar(&context->uuari, MLX5_IB_LATENCY_CLASS_MEDIUM);
626 mlx5_ib_dbg(dev, "failed to allocate medium latency UUAR\n");
627 mlx5_ib_dbg(dev, "reverting to high latency\n");
628 uuarn = alloc_uuar(&context->uuari, MLX5_IB_LATENCY_CLASS_LOW);
630 mlx5_ib_warn(dev, "uuar allocation failed\n");
636 uar_index = uuarn_to_uar_index(&context->uuari, uuarn);
637 mlx5_ib_dbg(dev, "uuarn 0x%x, uar_index 0x%x\n", uuarn, uar_index);
640 qp->sq.wqe_shift = ilog2(MLX5_SEND_WQE_BB);
641 qp->sq.offset = qp->rq.wqe_cnt << qp->rq.wqe_shift;
643 err = set_user_buf_size(dev, qp, &ucmd);
647 if (ucmd.buf_addr && qp->buf_size) {
648 qp->umem = ib_umem_get(pd->uobject->context, ucmd.buf_addr,
650 if (IS_ERR(qp->umem)) {
651 mlx5_ib_dbg(dev, "umem_get failed\n");
652 err = PTR_ERR(qp->umem);
660 mlx5_ib_cont_pages(qp->umem, ucmd.buf_addr, &npages, &page_shift,
662 err = mlx5_ib_get_buf_offset(ucmd.buf_addr, page_shift, &offset);
664 mlx5_ib_warn(dev, "bad offset\n");
667 mlx5_ib_dbg(dev, "addr 0x%llx, size %d, npages %d, page_shift %d, ncont %d, offset %d\n",
668 ucmd.buf_addr, qp->buf_size, npages, page_shift, ncont, offset);
671 *inlen = sizeof(**in) + sizeof(*(*in)->pas) * ncont;
672 *in = mlx5_vzalloc(*inlen);
678 mlx5_ib_populate_pas(dev, qp->umem, page_shift, (*in)->pas, 0);
679 (*in)->ctx.log_pg_sz_remote_qpn =
680 cpu_to_be32((page_shift - MLX5_ADAPTER_PAGE_SHIFT) << 24);
681 (*in)->ctx.params2 = cpu_to_be32(offset << 6);
683 (*in)->ctx.qp_counter_set_usr_page = cpu_to_be32(uar_index);
684 resp->uuar_index = uuarn;
687 err = mlx5_ib_db_map_user(context, ucmd.db_addr, &qp->db);
689 mlx5_ib_dbg(dev, "map failed\n");
693 err = ib_copy_to_udata(udata, resp, sizeof(*resp));
695 mlx5_ib_dbg(dev, "copy failed\n");
698 qp->create_type = MLX5_QP_USER;
703 mlx5_ib_db_unmap_user(context, &qp->db);
710 ib_umem_release(qp->umem);
713 free_uuar(&context->uuari, uuarn);
717 static void destroy_qp_user(struct ib_pd *pd, struct mlx5_ib_qp *qp)
719 struct mlx5_ib_ucontext *context;
721 context = to_mucontext(pd->uobject->context);
722 mlx5_ib_db_unmap_user(context, &qp->db);
724 ib_umem_release(qp->umem);
725 free_uuar(&context->uuari, qp->uuarn);
728 static int create_kernel_qp(struct mlx5_ib_dev *dev,
729 struct ib_qp_init_attr *init_attr,
730 struct mlx5_ib_qp *qp,
731 struct mlx5_create_qp_mbox_in **in, int *inlen)
733 enum mlx5_ib_latency_class lc = MLX5_IB_LATENCY_CLASS_LOW;
734 struct mlx5_uuar_info *uuari;
739 uuari = &dev->mdev->priv.uuari;
740 if (init_attr->create_flags & ~(IB_QP_CREATE_SIGNATURE_EN | IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK))
743 if (init_attr->qp_type == MLX5_IB_QPT_REG_UMR)
744 lc = MLX5_IB_LATENCY_CLASS_FAST_PATH;
746 uuarn = alloc_uuar(uuari, lc);
748 mlx5_ib_dbg(dev, "\n");
752 qp->bf = &uuari->bfs[uuarn];
753 uar_index = qp->bf->uar->index;
755 err = calc_sq_size(dev, init_attr, qp);
757 mlx5_ib_dbg(dev, "err %d\n", err);
762 qp->sq.offset = qp->rq.wqe_cnt << qp->rq.wqe_shift;
763 qp->buf_size = err + (qp->rq.wqe_cnt << qp->rq.wqe_shift);
765 err = mlx5_buf_alloc(dev->mdev, qp->buf_size, &qp->buf);
767 mlx5_ib_dbg(dev, "err %d\n", err);
771 qp->sq.qend = mlx5_get_send_wqe(qp, qp->sq.wqe_cnt);
772 *inlen = sizeof(**in) + sizeof(*(*in)->pas) * qp->buf.npages;
773 *in = mlx5_vzalloc(*inlen);
778 (*in)->ctx.qp_counter_set_usr_page = cpu_to_be32(uar_index);
779 (*in)->ctx.log_pg_sz_remote_qpn =
780 cpu_to_be32((qp->buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT) << 24);
781 /* Set "fast registration enabled" for all kernel QPs */
782 (*in)->ctx.params1 |= cpu_to_be32(1 << 11);
783 (*in)->ctx.sq_crq_size |= cpu_to_be16(1 << 4);
785 mlx5_fill_page_array(&qp->buf, (*in)->pas);
787 err = mlx5_db_alloc(dev->mdev, &qp->db);
789 mlx5_ib_dbg(dev, "err %d\n", err);
793 qp->sq.wrid = kmalloc(qp->sq.wqe_cnt * sizeof(*qp->sq.wrid), GFP_KERNEL);
794 qp->sq.wr_data = kmalloc(qp->sq.wqe_cnt * sizeof(*qp->sq.wr_data), GFP_KERNEL);
795 qp->rq.wrid = kmalloc(qp->rq.wqe_cnt * sizeof(*qp->rq.wrid), GFP_KERNEL);
796 qp->sq.w_list = kmalloc(qp->sq.wqe_cnt * sizeof(*qp->sq.w_list), GFP_KERNEL);
797 qp->sq.wqe_head = kmalloc(qp->sq.wqe_cnt * sizeof(*qp->sq.wqe_head), GFP_KERNEL);
799 if (!qp->sq.wrid || !qp->sq.wr_data || !qp->rq.wrid ||
800 !qp->sq.w_list || !qp->sq.wqe_head) {
804 qp->create_type = MLX5_QP_KERNEL;
809 mlx5_db_free(dev->mdev, &qp->db);
810 kfree(qp->sq.wqe_head);
811 kfree(qp->sq.w_list);
813 kfree(qp->sq.wr_data);
820 mlx5_buf_free(dev->mdev, &qp->buf);
823 free_uuar(&dev->mdev->priv.uuari, uuarn);
827 static void destroy_qp_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp)
829 mlx5_db_free(dev->mdev, &qp->db);
830 kfree(qp->sq.wqe_head);
831 kfree(qp->sq.w_list);
833 kfree(qp->sq.wr_data);
835 mlx5_buf_free(dev->mdev, &qp->buf);
836 free_uuar(&dev->mdev->priv.uuari, qp->bf->uuarn);
839 static __be32 get_rx_type(struct mlx5_ib_qp *qp, struct ib_qp_init_attr *attr)
841 if (attr->srq || (attr->qp_type == IB_QPT_XRC_TGT) ||
842 (attr->qp_type == IB_QPT_XRC_INI))
843 return cpu_to_be32(MLX5_SRQ_RQ);
844 else if (!qp->has_rq)
845 return cpu_to_be32(MLX5_ZERO_LEN_RQ);
847 return cpu_to_be32(MLX5_NON_ZERO_RQ);
850 static int is_connected(enum ib_qp_type qp_type)
852 if (qp_type == IB_QPT_RC || qp_type == IB_QPT_UC)
858 static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd,
859 struct ib_qp_init_attr *init_attr,
860 struct ib_udata *udata, struct mlx5_ib_qp *qp)
862 struct mlx5_ib_resources *devr = &dev->devr;
863 struct mlx5_core_dev *mdev = dev->mdev;
864 struct mlx5_ib_create_qp_resp resp;
865 struct mlx5_create_qp_mbox_in *in;
866 struct mlx5_ib_create_qp ucmd;
867 int inlen = sizeof(*in);
870 mlx5_ib_odp_create_qp(qp);
872 mutex_init(&qp->mutex);
873 spin_lock_init(&qp->sq.lock);
874 spin_lock_init(&qp->rq.lock);
876 if (init_attr->create_flags & IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK) {
877 if (!MLX5_CAP_GEN(mdev, block_lb_mc)) {
878 mlx5_ib_dbg(dev, "block multicast loopback isn't supported\n");
881 qp->flags |= MLX5_IB_QP_BLOCK_MULTICAST_LOOPBACK;
885 if (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR)
886 qp->sq_signal_bits = MLX5_WQE_CTRL_CQ_UPDATE;
888 if (pd && pd->uobject) {
889 if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd))) {
890 mlx5_ib_dbg(dev, "copy failed\n");
894 qp->wq_sig = !!(ucmd.flags & MLX5_QP_FLAG_SIGNATURE);
895 qp->scat_cqe = !!(ucmd.flags & MLX5_QP_FLAG_SCATTER_CQE);
897 qp->wq_sig = !!wq_signature;
900 qp->has_rq = qp_has_rq(init_attr);
901 err = set_rq_size(dev, &init_attr->cap, qp->has_rq,
902 qp, (pd && pd->uobject) ? &ucmd : NULL);
904 mlx5_ib_dbg(dev, "err %d\n", err);
911 1 << MLX5_CAP_GEN(mdev, log_max_qp_sz);
912 mlx5_ib_dbg(dev, "requested sq_wqe_count (%d)\n", ucmd.sq_wqe_count);
913 if (ucmd.rq_wqe_shift != qp->rq.wqe_shift ||
914 ucmd.rq_wqe_count != qp->rq.wqe_cnt) {
915 mlx5_ib_dbg(dev, "invalid rq params\n");
918 if (ucmd.sq_wqe_count > max_wqes) {
919 mlx5_ib_dbg(dev, "requested sq_wqe_count (%d) > max allowed (%d)\n",
920 ucmd.sq_wqe_count, max_wqes);
923 err = create_user_qp(dev, pd, qp, udata, &in, &resp, &inlen);
925 mlx5_ib_dbg(dev, "err %d\n", err);
927 err = create_kernel_qp(dev, init_attr, qp, &in, &inlen);
929 mlx5_ib_dbg(dev, "err %d\n", err);
935 in = mlx5_vzalloc(sizeof(*in));
939 qp->create_type = MLX5_QP_EMPTY;
942 if (is_sqp(init_attr->qp_type))
943 qp->port = init_attr->port_num;
945 in->ctx.flags = cpu_to_be32(to_mlx5_st(init_attr->qp_type) << 16 |
946 MLX5_QP_PM_MIGRATED << 11);
948 if (init_attr->qp_type != MLX5_IB_QPT_REG_UMR)
949 in->ctx.flags_pd = cpu_to_be32(to_mpd(pd ? pd : devr->p0)->pdn);
951 in->ctx.flags_pd = cpu_to_be32(MLX5_QP_LAT_SENSITIVE);
954 in->ctx.flags_pd |= cpu_to_be32(MLX5_QP_ENABLE_SIG);
956 if (qp->flags & MLX5_IB_QP_BLOCK_MULTICAST_LOOPBACK)
957 in->ctx.flags_pd |= cpu_to_be32(MLX5_QP_BLOCK_MCAST);
959 if (qp->scat_cqe && is_connected(init_attr->qp_type)) {
963 rcqe_sz = mlx5_ib_get_cqe_size(dev, init_attr->recv_cq);
964 scqe_sz = mlx5_ib_get_cqe_size(dev, init_attr->send_cq);
967 in->ctx.cs_res = MLX5_RES_SCAT_DATA64_CQE;
969 in->ctx.cs_res = MLX5_RES_SCAT_DATA32_CQE;
971 if (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR) {
973 in->ctx.cs_req = MLX5_REQ_SCAT_DATA64_CQE;
975 in->ctx.cs_req = MLX5_REQ_SCAT_DATA32_CQE;
979 if (qp->rq.wqe_cnt) {
980 in->ctx.rq_size_stride = (qp->rq.wqe_shift - 4);
981 in->ctx.rq_size_stride |= ilog2(qp->rq.wqe_cnt) << 3;
984 in->ctx.rq_type_srqn = get_rx_type(qp, init_attr);
987 in->ctx.sq_crq_size |= cpu_to_be16(ilog2(qp->sq.wqe_cnt) << 11);
989 in->ctx.sq_crq_size |= cpu_to_be16(0x8000);
991 /* Set default resources */
992 switch (init_attr->qp_type) {
994 in->ctx.cqn_recv = cpu_to_be32(to_mcq(devr->c0)->mcq.cqn);
995 in->ctx.cqn_send = cpu_to_be32(to_mcq(devr->c0)->mcq.cqn);
996 in->ctx.rq_type_srqn |= cpu_to_be32(to_msrq(devr->s0)->msrq.srqn);
997 in->ctx.xrcd = cpu_to_be32(to_mxrcd(init_attr->xrcd)->xrcdn);
1000 in->ctx.cqn_recv = cpu_to_be32(to_mcq(devr->c0)->mcq.cqn);
1001 in->ctx.xrcd = cpu_to_be32(to_mxrcd(devr->x1)->xrcdn);
1002 in->ctx.rq_type_srqn |= cpu_to_be32(to_msrq(devr->s0)->msrq.srqn);
1005 if (init_attr->srq) {
1006 in->ctx.xrcd = cpu_to_be32(to_mxrcd(devr->x0)->xrcdn);
1007 in->ctx.rq_type_srqn |= cpu_to_be32(to_msrq(init_attr->srq)->msrq.srqn);
1009 in->ctx.xrcd = cpu_to_be32(to_mxrcd(devr->x1)->xrcdn);
1010 in->ctx.rq_type_srqn |=
1011 cpu_to_be32(to_msrq(devr->s1)->msrq.srqn);
1015 if (init_attr->send_cq)
1016 in->ctx.cqn_send = cpu_to_be32(to_mcq(init_attr->send_cq)->mcq.cqn);
1018 if (init_attr->recv_cq)
1019 in->ctx.cqn_recv = cpu_to_be32(to_mcq(init_attr->recv_cq)->mcq.cqn);
1021 in->ctx.db_rec_addr = cpu_to_be64(qp->db.dma);
1023 err = mlx5_core_create_qp(dev->mdev, &qp->mqp, in, inlen);
1025 mlx5_ib_dbg(dev, "create qp failed\n");
1030 /* Hardware wants QPN written in big-endian order (after
1031 * shifting) for send doorbell. Precompute this value to save
1032 * a little bit when posting sends.
1034 qp->doorbell_qpn = swab32(qp->mqp.qpn << 8);
1036 qp->mqp.event = mlx5_ib_qp_event;
1041 if (qp->create_type == MLX5_QP_USER)
1042 destroy_qp_user(pd, qp);
1043 else if (qp->create_type == MLX5_QP_KERNEL)
1044 destroy_qp_kernel(dev, qp);
1050 static void mlx5_ib_lock_cqs(struct mlx5_ib_cq *send_cq, struct mlx5_ib_cq *recv_cq)
1051 __acquires(&send_cq->lock) __acquires(&recv_cq->lock)
1055 if (send_cq->mcq.cqn < recv_cq->mcq.cqn) {
1056 spin_lock_irq(&send_cq->lock);
1057 spin_lock_nested(&recv_cq->lock,
1058 SINGLE_DEPTH_NESTING);
1059 } else if (send_cq->mcq.cqn == recv_cq->mcq.cqn) {
1060 spin_lock_irq(&send_cq->lock);
1061 __acquire(&recv_cq->lock);
1063 spin_lock_irq(&recv_cq->lock);
1064 spin_lock_nested(&send_cq->lock,
1065 SINGLE_DEPTH_NESTING);
1068 spin_lock_irq(&send_cq->lock);
1069 __acquire(&recv_cq->lock);
1071 } else if (recv_cq) {
1072 spin_lock_irq(&recv_cq->lock);
1073 __acquire(&send_cq->lock);
1075 __acquire(&send_cq->lock);
1076 __acquire(&recv_cq->lock);
1080 static void mlx5_ib_unlock_cqs(struct mlx5_ib_cq *send_cq, struct mlx5_ib_cq *recv_cq)
1081 __releases(&send_cq->lock) __releases(&recv_cq->lock)
1085 if (send_cq->mcq.cqn < recv_cq->mcq.cqn) {
1086 spin_unlock(&recv_cq->lock);
1087 spin_unlock_irq(&send_cq->lock);
1088 } else if (send_cq->mcq.cqn == recv_cq->mcq.cqn) {
1089 __release(&recv_cq->lock);
1090 spin_unlock_irq(&send_cq->lock);
1092 spin_unlock(&send_cq->lock);
1093 spin_unlock_irq(&recv_cq->lock);
1096 __release(&recv_cq->lock);
1097 spin_unlock_irq(&send_cq->lock);
1099 } else if (recv_cq) {
1100 __release(&send_cq->lock);
1101 spin_unlock_irq(&recv_cq->lock);
1103 __release(&recv_cq->lock);
1104 __release(&send_cq->lock);
1108 static struct mlx5_ib_pd *get_pd(struct mlx5_ib_qp *qp)
1110 return to_mpd(qp->ibqp.pd);
1113 static void get_cqs(struct mlx5_ib_qp *qp,
1114 struct mlx5_ib_cq **send_cq, struct mlx5_ib_cq **recv_cq)
1116 switch (qp->ibqp.qp_type) {
1117 case IB_QPT_XRC_TGT:
1121 case MLX5_IB_QPT_REG_UMR:
1122 case IB_QPT_XRC_INI:
1123 *send_cq = to_mcq(qp->ibqp.send_cq);
1132 case IB_QPT_RAW_IPV6:
1133 case IB_QPT_RAW_ETHERTYPE:
1134 *send_cq = to_mcq(qp->ibqp.send_cq);
1135 *recv_cq = to_mcq(qp->ibqp.recv_cq);
1138 case IB_QPT_RAW_PACKET:
1147 static void destroy_qp_common(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp)
1149 struct mlx5_ib_cq *send_cq, *recv_cq;
1150 struct mlx5_modify_qp_mbox_in *in;
1153 in = kzalloc(sizeof(*in), GFP_KERNEL);
1157 if (qp->state != IB_QPS_RESET) {
1158 mlx5_ib_qp_disable_pagefaults(qp);
1159 if (mlx5_core_qp_modify(dev->mdev, to_mlx5_state(qp->state),
1160 MLX5_QP_STATE_RST, in, 0, &qp->mqp))
1161 mlx5_ib_warn(dev, "mlx5_ib: modify QP %06x to RESET failed\n",
1165 get_cqs(qp, &send_cq, &recv_cq);
1167 if (qp->create_type == MLX5_QP_KERNEL) {
1168 mlx5_ib_lock_cqs(send_cq, recv_cq);
1169 __mlx5_ib_cq_clean(recv_cq, qp->mqp.qpn,
1170 qp->ibqp.srq ? to_msrq(qp->ibqp.srq) : NULL);
1171 if (send_cq != recv_cq)
1172 __mlx5_ib_cq_clean(send_cq, qp->mqp.qpn, NULL);
1173 mlx5_ib_unlock_cqs(send_cq, recv_cq);
1176 err = mlx5_core_destroy_qp(dev->mdev, &qp->mqp);
1178 mlx5_ib_warn(dev, "failed to destroy QP 0x%x\n", qp->mqp.qpn);
1182 if (qp->create_type == MLX5_QP_KERNEL)
1183 destroy_qp_kernel(dev, qp);
1184 else if (qp->create_type == MLX5_QP_USER)
1185 destroy_qp_user(&get_pd(qp)->ibpd, qp);
1188 static const char *ib_qp_type_str(enum ib_qp_type type)
1192 return "IB_QPT_SMI";
1194 return "IB_QPT_GSI";
1201 case IB_QPT_RAW_IPV6:
1202 return "IB_QPT_RAW_IPV6";
1203 case IB_QPT_RAW_ETHERTYPE:
1204 return "IB_QPT_RAW_ETHERTYPE";
1205 case IB_QPT_XRC_INI:
1206 return "IB_QPT_XRC_INI";
1207 case IB_QPT_XRC_TGT:
1208 return "IB_QPT_XRC_TGT";
1209 case IB_QPT_RAW_PACKET:
1210 return "IB_QPT_RAW_PACKET";
1211 case MLX5_IB_QPT_REG_UMR:
1212 return "MLX5_IB_QPT_REG_UMR";
1215 return "Invalid QP type";
1219 struct ib_qp *mlx5_ib_create_qp(struct ib_pd *pd,
1220 struct ib_qp_init_attr *init_attr,
1221 struct ib_udata *udata)
1223 struct mlx5_ib_dev *dev;
1224 struct mlx5_ib_qp *qp;
1229 dev = to_mdev(pd->device);
1231 /* being cautious here */
1232 if (init_attr->qp_type != IB_QPT_XRC_TGT &&
1233 init_attr->qp_type != MLX5_IB_QPT_REG_UMR) {
1234 pr_warn("%s: no PD for transport %s\n", __func__,
1235 ib_qp_type_str(init_attr->qp_type));
1236 return ERR_PTR(-EINVAL);
1238 dev = to_mdev(to_mxrcd(init_attr->xrcd)->ibxrcd.device);
1241 switch (init_attr->qp_type) {
1242 case IB_QPT_XRC_TGT:
1243 case IB_QPT_XRC_INI:
1244 if (!MLX5_CAP_GEN(dev->mdev, xrc)) {
1245 mlx5_ib_dbg(dev, "XRC not supported\n");
1246 return ERR_PTR(-ENOSYS);
1248 init_attr->recv_cq = NULL;
1249 if (init_attr->qp_type == IB_QPT_XRC_TGT) {
1250 xrcdn = to_mxrcd(init_attr->xrcd)->xrcdn;
1251 init_attr->send_cq = NULL;
1260 case MLX5_IB_QPT_REG_UMR:
1261 qp = kzalloc(sizeof(*qp), GFP_KERNEL);
1263 return ERR_PTR(-ENOMEM);
1265 err = create_qp_common(dev, pd, init_attr, udata, qp);
1267 mlx5_ib_dbg(dev, "create_qp_common failed\n");
1269 return ERR_PTR(err);
1272 if (is_qp0(init_attr->qp_type))
1273 qp->ibqp.qp_num = 0;
1274 else if (is_qp1(init_attr->qp_type))
1275 qp->ibqp.qp_num = 1;
1277 qp->ibqp.qp_num = qp->mqp.qpn;
1279 mlx5_ib_dbg(dev, "ib qpnum 0x%x, mlx qpn 0x%x, rcqn 0x%x, scqn 0x%x\n",
1280 qp->ibqp.qp_num, qp->mqp.qpn, to_mcq(init_attr->recv_cq)->mcq.cqn,
1281 to_mcq(init_attr->send_cq)->mcq.cqn);
1287 case IB_QPT_RAW_IPV6:
1288 case IB_QPT_RAW_ETHERTYPE:
1289 case IB_QPT_RAW_PACKET:
1292 mlx5_ib_dbg(dev, "unsupported qp type %d\n",
1293 init_attr->qp_type);
1294 /* Don't support raw QPs */
1295 return ERR_PTR(-EINVAL);
1301 int mlx5_ib_destroy_qp(struct ib_qp *qp)
1303 struct mlx5_ib_dev *dev = to_mdev(qp->device);
1304 struct mlx5_ib_qp *mqp = to_mqp(qp);
1306 destroy_qp_common(dev, mqp);
1313 static __be32 to_mlx5_access_flags(struct mlx5_ib_qp *qp, const struct ib_qp_attr *attr,
1316 u32 hw_access_flags = 0;
1320 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
1321 dest_rd_atomic = attr->max_dest_rd_atomic;
1323 dest_rd_atomic = qp->resp_depth;
1325 if (attr_mask & IB_QP_ACCESS_FLAGS)
1326 access_flags = attr->qp_access_flags;
1328 access_flags = qp->atomic_rd_en;
1330 if (!dest_rd_atomic)
1331 access_flags &= IB_ACCESS_REMOTE_WRITE;
1333 if (access_flags & IB_ACCESS_REMOTE_READ)
1334 hw_access_flags |= MLX5_QP_BIT_RRE;
1335 if (access_flags & IB_ACCESS_REMOTE_ATOMIC)
1336 hw_access_flags |= (MLX5_QP_BIT_RAE | MLX5_ATOMIC_MODE_CX);
1337 if (access_flags & IB_ACCESS_REMOTE_WRITE)
1338 hw_access_flags |= MLX5_QP_BIT_RWE;
1340 return cpu_to_be32(hw_access_flags);
1344 MLX5_PATH_FLAG_FL = 1 << 0,
1345 MLX5_PATH_FLAG_FREE_AR = 1 << 1,
1346 MLX5_PATH_FLAG_COUNTER = 1 << 2,
1349 static int ib_rate_to_mlx5(struct mlx5_ib_dev *dev, u8 rate)
1351 if (rate == IB_RATE_PORT_CURRENT) {
1353 } else if (rate < IB_RATE_2_5_GBPS || rate > IB_RATE_300_GBPS) {
1356 while (rate != IB_RATE_2_5_GBPS &&
1357 !(1 << (rate + MLX5_STAT_RATE_OFFSET) &
1358 MLX5_CAP_GEN(dev->mdev, stat_rate_support)))
1362 return rate + MLX5_STAT_RATE_OFFSET;
1365 static int mlx5_set_path(struct mlx5_ib_dev *dev, const struct ib_ah_attr *ah,
1366 struct mlx5_qp_path *path, u8 port, int attr_mask,
1367 u32 path_flags, const struct ib_qp_attr *attr)
1371 path->fl = (path_flags & MLX5_PATH_FLAG_FL) ? 0x80 : 0;
1372 path->free_ar = (path_flags & MLX5_PATH_FLAG_FREE_AR) ? 0x80 : 0;
1374 if (attr_mask & IB_QP_PKEY_INDEX)
1375 path->pkey_index = attr->pkey_index;
1377 path->grh_mlid = ah->src_path_bits & 0x7f;
1378 path->rlid = cpu_to_be16(ah->dlid);
1380 if (ah->ah_flags & IB_AH_GRH) {
1381 if (ah->grh.sgid_index >=
1382 dev->mdev->port_caps[port - 1].gid_table_len) {
1383 pr_err("sgid_index (%u) too large. max is %d\n",
1385 dev->mdev->port_caps[port - 1].gid_table_len);
1388 path->grh_mlid |= 1 << 7;
1389 path->mgid_index = ah->grh.sgid_index;
1390 path->hop_limit = ah->grh.hop_limit;
1391 path->tclass_flowlabel =
1392 cpu_to_be32((ah->grh.traffic_class << 20) |
1393 (ah->grh.flow_label));
1394 memcpy(path->rgid, ah->grh.dgid.raw, 16);
1397 err = ib_rate_to_mlx5(dev, ah->static_rate);
1400 path->static_rate = err;
1403 if (attr_mask & IB_QP_TIMEOUT)
1404 path->ackto_lt = attr->timeout << 3;
1406 path->sl = ah->sl & 0xf;
1411 static enum mlx5_qp_optpar opt_mask[MLX5_QP_NUM_STATE][MLX5_QP_NUM_STATE][MLX5_QP_ST_MAX] = {
1412 [MLX5_QP_STATE_INIT] = {
1413 [MLX5_QP_STATE_INIT] = {
1414 [MLX5_QP_ST_RC] = MLX5_QP_OPTPAR_RRE |
1415 MLX5_QP_OPTPAR_RAE |
1416 MLX5_QP_OPTPAR_RWE |
1417 MLX5_QP_OPTPAR_PKEY_INDEX |
1418 MLX5_QP_OPTPAR_PRI_PORT,
1419 [MLX5_QP_ST_UC] = MLX5_QP_OPTPAR_RWE |
1420 MLX5_QP_OPTPAR_PKEY_INDEX |
1421 MLX5_QP_OPTPAR_PRI_PORT,
1422 [MLX5_QP_ST_UD] = MLX5_QP_OPTPAR_PKEY_INDEX |
1423 MLX5_QP_OPTPAR_Q_KEY |
1424 MLX5_QP_OPTPAR_PRI_PORT,
1426 [MLX5_QP_STATE_RTR] = {
1427 [MLX5_QP_ST_RC] = MLX5_QP_OPTPAR_ALT_ADDR_PATH |
1428 MLX5_QP_OPTPAR_RRE |
1429 MLX5_QP_OPTPAR_RAE |
1430 MLX5_QP_OPTPAR_RWE |
1431 MLX5_QP_OPTPAR_PKEY_INDEX,
1432 [MLX5_QP_ST_UC] = MLX5_QP_OPTPAR_ALT_ADDR_PATH |
1433 MLX5_QP_OPTPAR_RWE |
1434 MLX5_QP_OPTPAR_PKEY_INDEX,
1435 [MLX5_QP_ST_UD] = MLX5_QP_OPTPAR_PKEY_INDEX |
1436 MLX5_QP_OPTPAR_Q_KEY,
1437 [MLX5_QP_ST_MLX] = MLX5_QP_OPTPAR_PKEY_INDEX |
1438 MLX5_QP_OPTPAR_Q_KEY,
1439 [MLX5_QP_ST_XRC] = MLX5_QP_OPTPAR_ALT_ADDR_PATH |
1440 MLX5_QP_OPTPAR_RRE |
1441 MLX5_QP_OPTPAR_RAE |
1442 MLX5_QP_OPTPAR_RWE |
1443 MLX5_QP_OPTPAR_PKEY_INDEX,
1446 [MLX5_QP_STATE_RTR] = {
1447 [MLX5_QP_STATE_RTS] = {
1448 [MLX5_QP_ST_RC] = MLX5_QP_OPTPAR_ALT_ADDR_PATH |
1449 MLX5_QP_OPTPAR_RRE |
1450 MLX5_QP_OPTPAR_RAE |
1451 MLX5_QP_OPTPAR_RWE |
1452 MLX5_QP_OPTPAR_PM_STATE |
1453 MLX5_QP_OPTPAR_RNR_TIMEOUT,
1454 [MLX5_QP_ST_UC] = MLX5_QP_OPTPAR_ALT_ADDR_PATH |
1455 MLX5_QP_OPTPAR_RWE |
1456 MLX5_QP_OPTPAR_PM_STATE,
1457 [MLX5_QP_ST_UD] = MLX5_QP_OPTPAR_Q_KEY,
1460 [MLX5_QP_STATE_RTS] = {
1461 [MLX5_QP_STATE_RTS] = {
1462 [MLX5_QP_ST_RC] = MLX5_QP_OPTPAR_RRE |
1463 MLX5_QP_OPTPAR_RAE |
1464 MLX5_QP_OPTPAR_RWE |
1465 MLX5_QP_OPTPAR_RNR_TIMEOUT |
1466 MLX5_QP_OPTPAR_PM_STATE |
1467 MLX5_QP_OPTPAR_ALT_ADDR_PATH,
1468 [MLX5_QP_ST_UC] = MLX5_QP_OPTPAR_RWE |
1469 MLX5_QP_OPTPAR_PM_STATE |
1470 MLX5_QP_OPTPAR_ALT_ADDR_PATH,
1471 [MLX5_QP_ST_UD] = MLX5_QP_OPTPAR_Q_KEY |
1472 MLX5_QP_OPTPAR_SRQN |
1473 MLX5_QP_OPTPAR_CQN_RCV,
1476 [MLX5_QP_STATE_SQER] = {
1477 [MLX5_QP_STATE_RTS] = {
1478 [MLX5_QP_ST_UD] = MLX5_QP_OPTPAR_Q_KEY,
1479 [MLX5_QP_ST_MLX] = MLX5_QP_OPTPAR_Q_KEY,
1480 [MLX5_QP_ST_UC] = MLX5_QP_OPTPAR_RWE,
1481 [MLX5_QP_ST_RC] = MLX5_QP_OPTPAR_RNR_TIMEOUT |
1482 MLX5_QP_OPTPAR_RWE |
1483 MLX5_QP_OPTPAR_RAE |
1489 static int ib_nr_to_mlx5_nr(int ib_mask)
1494 case IB_QP_CUR_STATE:
1496 case IB_QP_EN_SQD_ASYNC_NOTIFY:
1498 case IB_QP_ACCESS_FLAGS:
1499 return MLX5_QP_OPTPAR_RWE | MLX5_QP_OPTPAR_RRE |
1501 case IB_QP_PKEY_INDEX:
1502 return MLX5_QP_OPTPAR_PKEY_INDEX;
1504 return MLX5_QP_OPTPAR_PRI_PORT;
1506 return MLX5_QP_OPTPAR_Q_KEY;
1508 return MLX5_QP_OPTPAR_PRIMARY_ADDR_PATH |
1509 MLX5_QP_OPTPAR_PRI_PORT;
1510 case IB_QP_PATH_MTU:
1513 return MLX5_QP_OPTPAR_ACK_TIMEOUT;
1514 case IB_QP_RETRY_CNT:
1515 return MLX5_QP_OPTPAR_RETRY_COUNT;
1516 case IB_QP_RNR_RETRY:
1517 return MLX5_QP_OPTPAR_RNR_RETRY;
1520 case IB_QP_MAX_QP_RD_ATOMIC:
1521 return MLX5_QP_OPTPAR_SRA_MAX;
1522 case IB_QP_ALT_PATH:
1523 return MLX5_QP_OPTPAR_ALT_ADDR_PATH;
1524 case IB_QP_MIN_RNR_TIMER:
1525 return MLX5_QP_OPTPAR_RNR_TIMEOUT;
1528 case IB_QP_MAX_DEST_RD_ATOMIC:
1529 return MLX5_QP_OPTPAR_RRA_MAX | MLX5_QP_OPTPAR_RWE |
1530 MLX5_QP_OPTPAR_RRE | MLX5_QP_OPTPAR_RAE;
1531 case IB_QP_PATH_MIG_STATE:
1532 return MLX5_QP_OPTPAR_PM_STATE;
1535 case IB_QP_DEST_QPN:
1541 static int ib_mask_to_mlx5_opt(int ib_mask)
1546 for (i = 0; i < 8 * sizeof(int); i++) {
1547 if ((1 << i) & ib_mask)
1548 result |= ib_nr_to_mlx5_nr(1 << i);
1554 static int __mlx5_ib_modify_qp(struct ib_qp *ibqp,
1555 const struct ib_qp_attr *attr, int attr_mask,
1556 enum ib_qp_state cur_state, enum ib_qp_state new_state)
1558 struct mlx5_ib_dev *dev = to_mdev(ibqp->device);
1559 struct mlx5_ib_qp *qp = to_mqp(ibqp);
1560 struct mlx5_ib_cq *send_cq, *recv_cq;
1561 struct mlx5_qp_context *context;
1562 struct mlx5_modify_qp_mbox_in *in;
1563 struct mlx5_ib_pd *pd;
1564 enum mlx5_qp_state mlx5_cur, mlx5_new;
1565 enum mlx5_qp_optpar optpar;
1570 in = kzalloc(sizeof(*in), GFP_KERNEL);
1575 err = to_mlx5_st(ibqp->qp_type);
1579 context->flags = cpu_to_be32(err << 16);
1581 if (!(attr_mask & IB_QP_PATH_MIG_STATE)) {
1582 context->flags |= cpu_to_be32(MLX5_QP_PM_MIGRATED << 11);
1584 switch (attr->path_mig_state) {
1585 case IB_MIG_MIGRATED:
1586 context->flags |= cpu_to_be32(MLX5_QP_PM_MIGRATED << 11);
1589 context->flags |= cpu_to_be32(MLX5_QP_PM_REARM << 11);
1592 context->flags |= cpu_to_be32(MLX5_QP_PM_ARMED << 11);
1597 if (ibqp->qp_type == IB_QPT_GSI || ibqp->qp_type == IB_QPT_SMI) {
1598 context->mtu_msgmax = (IB_MTU_256 << 5) | 8;
1599 } else if (ibqp->qp_type == IB_QPT_UD ||
1600 ibqp->qp_type == MLX5_IB_QPT_REG_UMR) {
1601 context->mtu_msgmax = (IB_MTU_4096 << 5) | 12;
1602 } else if (attr_mask & IB_QP_PATH_MTU) {
1603 if (attr->path_mtu < IB_MTU_256 ||
1604 attr->path_mtu > IB_MTU_4096) {
1605 mlx5_ib_warn(dev, "invalid mtu %d\n", attr->path_mtu);
1609 context->mtu_msgmax = (attr->path_mtu << 5) |
1610 (u8)MLX5_CAP_GEN(dev->mdev, log_max_msg);
1613 if (attr_mask & IB_QP_DEST_QPN)
1614 context->log_pg_sz_remote_qpn = cpu_to_be32(attr->dest_qp_num);
1616 if (attr_mask & IB_QP_PKEY_INDEX)
1617 context->pri_path.pkey_index = attr->pkey_index;
1619 /* todo implement counter_index functionality */
1621 if (is_sqp(ibqp->qp_type))
1622 context->pri_path.port = qp->port;
1624 if (attr_mask & IB_QP_PORT)
1625 context->pri_path.port = attr->port_num;
1627 if (attr_mask & IB_QP_AV) {
1628 err = mlx5_set_path(dev, &attr->ah_attr, &context->pri_path,
1629 attr_mask & IB_QP_PORT ? attr->port_num : qp->port,
1630 attr_mask, 0, attr);
1635 if (attr_mask & IB_QP_TIMEOUT)
1636 context->pri_path.ackto_lt |= attr->timeout << 3;
1638 if (attr_mask & IB_QP_ALT_PATH) {
1639 err = mlx5_set_path(dev, &attr->alt_ah_attr, &context->alt_path,
1640 attr->alt_port_num, attr_mask, 0, attr);
1646 get_cqs(qp, &send_cq, &recv_cq);
1648 context->flags_pd = cpu_to_be32(pd ? pd->pdn : to_mpd(dev->devr.p0)->pdn);
1649 context->cqn_send = send_cq ? cpu_to_be32(send_cq->mcq.cqn) : 0;
1650 context->cqn_recv = recv_cq ? cpu_to_be32(recv_cq->mcq.cqn) : 0;
1651 context->params1 = cpu_to_be32(MLX5_IB_ACK_REQ_FREQ << 28);
1653 if (attr_mask & IB_QP_RNR_RETRY)
1654 context->params1 |= cpu_to_be32(attr->rnr_retry << 13);
1656 if (attr_mask & IB_QP_RETRY_CNT)
1657 context->params1 |= cpu_to_be32(attr->retry_cnt << 16);
1659 if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC) {
1660 if (attr->max_rd_atomic)
1662 cpu_to_be32(fls(attr->max_rd_atomic - 1) << 21);
1665 if (attr_mask & IB_QP_SQ_PSN)
1666 context->next_send_psn = cpu_to_be32(attr->sq_psn);
1668 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) {
1669 if (attr->max_dest_rd_atomic)
1671 cpu_to_be32(fls(attr->max_dest_rd_atomic - 1) << 21);
1674 if (attr_mask & (IB_QP_ACCESS_FLAGS | IB_QP_MAX_DEST_RD_ATOMIC))
1675 context->params2 |= to_mlx5_access_flags(qp, attr, attr_mask);
1677 if (attr_mask & IB_QP_MIN_RNR_TIMER)
1678 context->rnr_nextrecvpsn |= cpu_to_be32(attr->min_rnr_timer << 24);
1680 if (attr_mask & IB_QP_RQ_PSN)
1681 context->rnr_nextrecvpsn |= cpu_to_be32(attr->rq_psn);
1683 if (attr_mask & IB_QP_QKEY)
1684 context->qkey = cpu_to_be32(attr->qkey);
1686 if (qp->rq.wqe_cnt && cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT)
1687 context->db_rec_addr = cpu_to_be64(qp->db.dma);
1689 if (cur_state == IB_QPS_RTS && new_state == IB_QPS_SQD &&
1690 attr_mask & IB_QP_EN_SQD_ASYNC_NOTIFY && attr->en_sqd_async_notify)
1695 if (!ibqp->uobject && cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT)
1696 context->sq_crq_size |= cpu_to_be16(1 << 4);
1699 mlx5_cur = to_mlx5_state(cur_state);
1700 mlx5_new = to_mlx5_state(new_state);
1701 mlx5_st = to_mlx5_st(ibqp->qp_type);
1705 /* If moving to a reset or error state, we must disable page faults on
1706 * this QP and flush all current page faults. Otherwise a stale page
1707 * fault may attempt to work on this QP after it is reset and moved
1708 * again to RTS, and may cause the driver and the device to get out of
1710 if (cur_state != IB_QPS_RESET && cur_state != IB_QPS_ERR &&
1711 (new_state == IB_QPS_RESET || new_state == IB_QPS_ERR))
1712 mlx5_ib_qp_disable_pagefaults(qp);
1714 optpar = ib_mask_to_mlx5_opt(attr_mask);
1715 optpar &= opt_mask[mlx5_cur][mlx5_new][mlx5_st];
1716 in->optparam = cpu_to_be32(optpar);
1717 err = mlx5_core_qp_modify(dev->mdev, to_mlx5_state(cur_state),
1718 to_mlx5_state(new_state), in, sqd_event,
1723 if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT)
1724 mlx5_ib_qp_enable_pagefaults(qp);
1726 qp->state = new_state;
1728 if (attr_mask & IB_QP_ACCESS_FLAGS)
1729 qp->atomic_rd_en = attr->qp_access_flags;
1730 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
1731 qp->resp_depth = attr->max_dest_rd_atomic;
1732 if (attr_mask & IB_QP_PORT)
1733 qp->port = attr->port_num;
1734 if (attr_mask & IB_QP_ALT_PATH)
1735 qp->alt_port = attr->alt_port_num;
1738 * If we moved a kernel QP to RESET, clean up all old CQ
1739 * entries and reinitialize the QP.
1741 if (new_state == IB_QPS_RESET && !ibqp->uobject) {
1742 mlx5_ib_cq_clean(recv_cq, qp->mqp.qpn,
1743 ibqp->srq ? to_msrq(ibqp->srq) : NULL);
1744 if (send_cq != recv_cq)
1745 mlx5_ib_cq_clean(send_cq, qp->mqp.qpn, NULL);
1751 qp->sq.cur_post = 0;
1752 qp->sq.last_poll = 0;
1753 qp->db.db[MLX5_RCV_DBR] = 0;
1754 qp->db.db[MLX5_SND_DBR] = 0;
1762 int mlx5_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
1763 int attr_mask, struct ib_udata *udata)
1765 struct mlx5_ib_dev *dev = to_mdev(ibqp->device);
1766 struct mlx5_ib_qp *qp = to_mqp(ibqp);
1767 enum ib_qp_state cur_state, new_state;
1771 mutex_lock(&qp->mutex);
1773 cur_state = attr_mask & IB_QP_CUR_STATE ? attr->cur_qp_state : qp->state;
1774 new_state = attr_mask & IB_QP_STATE ? attr->qp_state : cur_state;
1776 if (ibqp->qp_type != MLX5_IB_QPT_REG_UMR &&
1777 !ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type, attr_mask,
1778 IB_LINK_LAYER_UNSPECIFIED))
1781 if ((attr_mask & IB_QP_PORT) &&
1782 (attr->port_num == 0 ||
1783 attr->port_num > MLX5_CAP_GEN(dev->mdev, num_ports)))
1786 if (attr_mask & IB_QP_PKEY_INDEX) {
1787 port = attr_mask & IB_QP_PORT ? attr->port_num : qp->port;
1788 if (attr->pkey_index >=
1789 dev->mdev->port_caps[port - 1].pkey_table_len)
1793 if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC &&
1794 attr->max_rd_atomic >
1795 (1 << MLX5_CAP_GEN(dev->mdev, log_max_ra_res_qp)))
1798 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC &&
1799 attr->max_dest_rd_atomic >
1800 (1 << MLX5_CAP_GEN(dev->mdev, log_max_ra_req_qp)))
1803 if (cur_state == new_state && cur_state == IB_QPS_RESET) {
1808 err = __mlx5_ib_modify_qp(ibqp, attr, attr_mask, cur_state, new_state);
1811 mutex_unlock(&qp->mutex);
1815 static int mlx5_wq_overflow(struct mlx5_ib_wq *wq, int nreq, struct ib_cq *ib_cq)
1817 struct mlx5_ib_cq *cq;
1820 cur = wq->head - wq->tail;
1821 if (likely(cur + nreq < wq->max_post))
1825 spin_lock(&cq->lock);
1826 cur = wq->head - wq->tail;
1827 spin_unlock(&cq->lock);
1829 return cur + nreq >= wq->max_post;
1832 static __always_inline void set_raddr_seg(struct mlx5_wqe_raddr_seg *rseg,
1833 u64 remote_addr, u32 rkey)
1835 rseg->raddr = cpu_to_be64(remote_addr);
1836 rseg->rkey = cpu_to_be32(rkey);
1840 static void set_datagram_seg(struct mlx5_wqe_datagram_seg *dseg,
1841 struct ib_send_wr *wr)
1843 memcpy(&dseg->av, &to_mah(ud_wr(wr)->ah)->av, sizeof(struct mlx5_av));
1844 dseg->av.dqp_dct = cpu_to_be32(ud_wr(wr)->remote_qpn | MLX5_EXTENDED_UD_AV);
1845 dseg->av.key.qkey.qkey = cpu_to_be32(ud_wr(wr)->remote_qkey);
1848 static void set_data_ptr_seg(struct mlx5_wqe_data_seg *dseg, struct ib_sge *sg)
1850 dseg->byte_count = cpu_to_be32(sg->length);
1851 dseg->lkey = cpu_to_be32(sg->lkey);
1852 dseg->addr = cpu_to_be64(sg->addr);
1855 static __be16 get_klm_octo(int npages)
1857 return cpu_to_be16(ALIGN(npages, 8) / 2);
1860 static __be64 frwr_mkey_mask(void)
1864 result = MLX5_MKEY_MASK_LEN |
1865 MLX5_MKEY_MASK_PAGE_SIZE |
1866 MLX5_MKEY_MASK_START_ADDR |
1867 MLX5_MKEY_MASK_EN_RINVAL |
1868 MLX5_MKEY_MASK_KEY |
1874 MLX5_MKEY_MASK_SMALL_FENCE |
1875 MLX5_MKEY_MASK_FREE;
1877 return cpu_to_be64(result);
1880 static __be64 sig_mkey_mask(void)
1884 result = MLX5_MKEY_MASK_LEN |
1885 MLX5_MKEY_MASK_PAGE_SIZE |
1886 MLX5_MKEY_MASK_START_ADDR |
1887 MLX5_MKEY_MASK_EN_SIGERR |
1888 MLX5_MKEY_MASK_EN_RINVAL |
1889 MLX5_MKEY_MASK_KEY |
1894 MLX5_MKEY_MASK_SMALL_FENCE |
1895 MLX5_MKEY_MASK_FREE |
1896 MLX5_MKEY_MASK_BSF_EN;
1898 return cpu_to_be64(result);
1901 static void set_reg_umr_seg(struct mlx5_wqe_umr_ctrl_seg *umr,
1902 struct mlx5_ib_mr *mr)
1904 int ndescs = mr->ndescs;
1906 memset(umr, 0, sizeof(*umr));
1907 umr->flags = MLX5_UMR_CHECK_NOT_FREE;
1908 umr->klm_octowords = get_klm_octo(ndescs);
1909 umr->mkey_mask = frwr_mkey_mask();
1912 static void set_linv_umr_seg(struct mlx5_wqe_umr_ctrl_seg *umr)
1914 memset(umr, 0, sizeof(*umr));
1915 umr->mkey_mask = cpu_to_be64(MLX5_MKEY_MASK_FREE);
1916 umr->flags = 1 << 7;
1919 static __be64 get_umr_reg_mr_mask(void)
1923 result = MLX5_MKEY_MASK_LEN |
1924 MLX5_MKEY_MASK_PAGE_SIZE |
1925 MLX5_MKEY_MASK_START_ADDR |
1929 MLX5_MKEY_MASK_KEY |
1933 MLX5_MKEY_MASK_FREE;
1935 return cpu_to_be64(result);
1938 static __be64 get_umr_unreg_mr_mask(void)
1942 result = MLX5_MKEY_MASK_FREE;
1944 return cpu_to_be64(result);
1947 static __be64 get_umr_update_mtt_mask(void)
1951 result = MLX5_MKEY_MASK_FREE;
1953 return cpu_to_be64(result);
1956 static void set_reg_umr_segment(struct mlx5_wqe_umr_ctrl_seg *umr,
1957 struct ib_send_wr *wr)
1959 struct mlx5_umr_wr *umrwr = umr_wr(wr);
1961 memset(umr, 0, sizeof(*umr));
1963 if (wr->send_flags & MLX5_IB_SEND_UMR_FAIL_IF_FREE)
1964 umr->flags = MLX5_UMR_CHECK_FREE; /* fail if free */
1966 umr->flags = MLX5_UMR_CHECK_NOT_FREE; /* fail if not free */
1968 if (!(wr->send_flags & MLX5_IB_SEND_UMR_UNREG)) {
1969 umr->klm_octowords = get_klm_octo(umrwr->npages);
1970 if (wr->send_flags & MLX5_IB_SEND_UMR_UPDATE_MTT) {
1971 umr->mkey_mask = get_umr_update_mtt_mask();
1972 umr->bsf_octowords = get_klm_octo(umrwr->target.offset);
1973 umr->flags |= MLX5_UMR_TRANSLATION_OFFSET_EN;
1975 umr->mkey_mask = get_umr_reg_mr_mask();
1978 umr->mkey_mask = get_umr_unreg_mr_mask();
1982 umr->flags |= MLX5_UMR_INLINE;
1985 static u8 get_umr_flags(int acc)
1987 return (acc & IB_ACCESS_REMOTE_ATOMIC ? MLX5_PERM_ATOMIC : 0) |
1988 (acc & IB_ACCESS_REMOTE_WRITE ? MLX5_PERM_REMOTE_WRITE : 0) |
1989 (acc & IB_ACCESS_REMOTE_READ ? MLX5_PERM_REMOTE_READ : 0) |
1990 (acc & IB_ACCESS_LOCAL_WRITE ? MLX5_PERM_LOCAL_WRITE : 0) |
1991 MLX5_PERM_LOCAL_READ | MLX5_PERM_UMR_EN;
1994 static void set_reg_mkey_seg(struct mlx5_mkey_seg *seg,
1995 struct mlx5_ib_mr *mr,
1996 u32 key, int access)
1998 int ndescs = ALIGN(mr->ndescs, 8) >> 1;
2000 memset(seg, 0, sizeof(*seg));
2001 seg->flags = get_umr_flags(access) | MLX5_ACCESS_MODE_MTT;
2002 seg->qpn_mkey7_0 = cpu_to_be32((key & 0xff) | 0xffffff00);
2003 seg->flags_pd = cpu_to_be32(MLX5_MKEY_REMOTE_INVAL);
2004 seg->start_addr = cpu_to_be64(mr->ibmr.iova);
2005 seg->len = cpu_to_be64(mr->ibmr.length);
2006 seg->xlt_oct_size = cpu_to_be32(ndescs);
2007 seg->log2_page_size = ilog2(mr->ibmr.page_size);
2010 static void set_linv_mkey_seg(struct mlx5_mkey_seg *seg)
2012 memset(seg, 0, sizeof(*seg));
2013 seg->status = MLX5_MKEY_STATUS_FREE;
2016 static void set_reg_mkey_segment(struct mlx5_mkey_seg *seg, struct ib_send_wr *wr)
2018 struct mlx5_umr_wr *umrwr = umr_wr(wr);
2020 memset(seg, 0, sizeof(*seg));
2021 if (wr->send_flags & MLX5_IB_SEND_UMR_UNREG) {
2022 seg->status = MLX5_MKEY_STATUS_FREE;
2026 seg->flags = convert_access(umrwr->access_flags);
2027 if (!(wr->send_flags & MLX5_IB_SEND_UMR_UPDATE_MTT)) {
2028 seg->flags_pd = cpu_to_be32(to_mpd(umrwr->pd)->pdn);
2029 seg->start_addr = cpu_to_be64(umrwr->target.virt_addr);
2031 seg->len = cpu_to_be64(umrwr->length);
2032 seg->log2_page_size = umrwr->page_shift;
2033 seg->qpn_mkey7_0 = cpu_to_be32(0xffffff00 |
2034 mlx5_mkey_variant(umrwr->mkey));
2037 static void set_reg_data_seg(struct mlx5_wqe_data_seg *dseg,
2038 struct mlx5_ib_mr *mr,
2039 struct mlx5_ib_pd *pd)
2041 int bcount = mr->desc_size * mr->ndescs;
2043 dseg->addr = cpu_to_be64(mr->desc_map);
2044 dseg->byte_count = cpu_to_be32(ALIGN(bcount, 64));
2045 dseg->lkey = cpu_to_be32(pd->ibpd.local_dma_lkey);
2048 static __be32 send_ieth(struct ib_send_wr *wr)
2050 switch (wr->opcode) {
2051 case IB_WR_SEND_WITH_IMM:
2052 case IB_WR_RDMA_WRITE_WITH_IMM:
2053 return wr->ex.imm_data;
2055 case IB_WR_SEND_WITH_INV:
2056 return cpu_to_be32(wr->ex.invalidate_rkey);
2063 static u8 calc_sig(void *wqe, int size)
2069 for (i = 0; i < size; i++)
2075 static u8 wq_sig(void *wqe)
2077 return calc_sig(wqe, (*((u8 *)wqe + 8) & 0x3f) << 4);
2080 static int set_data_inl_seg(struct mlx5_ib_qp *qp, struct ib_send_wr *wr,
2083 struct mlx5_wqe_inline_seg *seg;
2084 void *qend = qp->sq.qend;
2092 wqe += sizeof(*seg);
2093 for (i = 0; i < wr->num_sge; i++) {
2094 addr = (void *)(unsigned long)(wr->sg_list[i].addr);
2095 len = wr->sg_list[i].length;
2098 if (unlikely(inl > qp->max_inline_data))
2101 if (unlikely(wqe + len > qend)) {
2103 memcpy(wqe, addr, copy);
2106 wqe = mlx5_get_send_wqe(qp, 0);
2108 memcpy(wqe, addr, len);
2112 seg->byte_count = cpu_to_be32(inl | MLX5_INLINE_SEG);
2114 *sz = ALIGN(inl + sizeof(seg->byte_count), 16) / 16;
2119 static u16 prot_field_size(enum ib_signature_type type)
2122 case IB_SIG_TYPE_T10_DIF:
2123 return MLX5_DIF_SIZE;
2129 static u8 bs_selector(int block_size)
2131 switch (block_size) {
2132 case 512: return 0x1;
2133 case 520: return 0x2;
2134 case 4096: return 0x3;
2135 case 4160: return 0x4;
2136 case 1073741824: return 0x5;
2141 static void mlx5_fill_inl_bsf(struct ib_sig_domain *domain,
2142 struct mlx5_bsf_inl *inl)
2144 /* Valid inline section and allow BSF refresh */
2145 inl->vld_refresh = cpu_to_be16(MLX5_BSF_INL_VALID |
2146 MLX5_BSF_REFRESH_DIF);
2147 inl->dif_apptag = cpu_to_be16(domain->sig.dif.app_tag);
2148 inl->dif_reftag = cpu_to_be32(domain->sig.dif.ref_tag);
2149 /* repeating block */
2150 inl->rp_inv_seed = MLX5_BSF_REPEAT_BLOCK;
2151 inl->sig_type = domain->sig.dif.bg_type == IB_T10DIF_CRC ?
2152 MLX5_DIF_CRC : MLX5_DIF_IPCS;
2154 if (domain->sig.dif.ref_remap)
2155 inl->dif_inc_ref_guard_check |= MLX5_BSF_INC_REFTAG;
2157 if (domain->sig.dif.app_escape) {
2158 if (domain->sig.dif.ref_escape)
2159 inl->dif_inc_ref_guard_check |= MLX5_BSF_APPREF_ESCAPE;
2161 inl->dif_inc_ref_guard_check |= MLX5_BSF_APPTAG_ESCAPE;
2164 inl->dif_app_bitmask_check =
2165 cpu_to_be16(domain->sig.dif.apptag_check_mask);
2168 static int mlx5_set_bsf(struct ib_mr *sig_mr,
2169 struct ib_sig_attrs *sig_attrs,
2170 struct mlx5_bsf *bsf, u32 data_size)
2172 struct mlx5_core_sig_ctx *msig = to_mmr(sig_mr)->sig;
2173 struct mlx5_bsf_basic *basic = &bsf->basic;
2174 struct ib_sig_domain *mem = &sig_attrs->mem;
2175 struct ib_sig_domain *wire = &sig_attrs->wire;
2177 memset(bsf, 0, sizeof(*bsf));
2179 /* Basic + Extended + Inline */
2180 basic->bsf_size_sbs = 1 << 7;
2181 /* Input domain check byte mask */
2182 basic->check_byte_mask = sig_attrs->check_mask;
2183 basic->raw_data_size = cpu_to_be32(data_size);
2186 switch (sig_attrs->mem.sig_type) {
2187 case IB_SIG_TYPE_NONE:
2189 case IB_SIG_TYPE_T10_DIF:
2190 basic->mem.bs_selector = bs_selector(mem->sig.dif.pi_interval);
2191 basic->m_bfs_psv = cpu_to_be32(msig->psv_memory.psv_idx);
2192 mlx5_fill_inl_bsf(mem, &bsf->m_inl);
2199 switch (sig_attrs->wire.sig_type) {
2200 case IB_SIG_TYPE_NONE:
2202 case IB_SIG_TYPE_T10_DIF:
2203 if (mem->sig.dif.pi_interval == wire->sig.dif.pi_interval &&
2204 mem->sig_type == wire->sig_type) {
2205 /* Same block structure */
2206 basic->bsf_size_sbs |= 1 << 4;
2207 if (mem->sig.dif.bg_type == wire->sig.dif.bg_type)
2208 basic->wire.copy_byte_mask |= MLX5_CPY_GRD_MASK;
2209 if (mem->sig.dif.app_tag == wire->sig.dif.app_tag)
2210 basic->wire.copy_byte_mask |= MLX5_CPY_APP_MASK;
2211 if (mem->sig.dif.ref_tag == wire->sig.dif.ref_tag)
2212 basic->wire.copy_byte_mask |= MLX5_CPY_REF_MASK;
2214 basic->wire.bs_selector = bs_selector(wire->sig.dif.pi_interval);
2216 basic->w_bfs_psv = cpu_to_be32(msig->psv_wire.psv_idx);
2217 mlx5_fill_inl_bsf(wire, &bsf->w_inl);
2226 static int set_sig_data_segment(struct ib_sig_handover_wr *wr,
2227 struct mlx5_ib_qp *qp, void **seg, int *size)
2229 struct ib_sig_attrs *sig_attrs = wr->sig_attrs;
2230 struct ib_mr *sig_mr = wr->sig_mr;
2231 struct mlx5_bsf *bsf;
2232 u32 data_len = wr->wr.sg_list->length;
2233 u32 data_key = wr->wr.sg_list->lkey;
2234 u64 data_va = wr->wr.sg_list->addr;
2239 (data_key == wr->prot->lkey &&
2240 data_va == wr->prot->addr &&
2241 data_len == wr->prot->length)) {
2243 * Source domain doesn't contain signature information
2244 * or data and protection are interleaved in memory.
2245 * So need construct:
2246 * ------------------
2248 * ------------------
2250 * ------------------
2252 struct mlx5_klm *data_klm = *seg;
2254 data_klm->bcount = cpu_to_be32(data_len);
2255 data_klm->key = cpu_to_be32(data_key);
2256 data_klm->va = cpu_to_be64(data_va);
2257 wqe_size = ALIGN(sizeof(*data_klm), 64);
2260 * Source domain contains signature information
2261 * So need construct a strided block format:
2262 * ---------------------------
2263 * | stride_block_ctrl |
2264 * ---------------------------
2266 * ---------------------------
2268 * ---------------------------
2270 * ---------------------------
2272 struct mlx5_stride_block_ctrl_seg *sblock_ctrl;
2273 struct mlx5_stride_block_entry *data_sentry;
2274 struct mlx5_stride_block_entry *prot_sentry;
2275 u32 prot_key = wr->prot->lkey;
2276 u64 prot_va = wr->prot->addr;
2277 u16 block_size = sig_attrs->mem.sig.dif.pi_interval;
2281 data_sentry = (void *)sblock_ctrl + sizeof(*sblock_ctrl);
2282 prot_sentry = (void *)data_sentry + sizeof(*data_sentry);
2284 prot_size = prot_field_size(sig_attrs->mem.sig_type);
2286 pr_err("Bad block size given: %u\n", block_size);
2289 sblock_ctrl->bcount_per_cycle = cpu_to_be32(block_size +
2291 sblock_ctrl->op = cpu_to_be32(MLX5_STRIDE_BLOCK_OP);
2292 sblock_ctrl->repeat_count = cpu_to_be32(data_len / block_size);
2293 sblock_ctrl->num_entries = cpu_to_be16(2);
2295 data_sentry->bcount = cpu_to_be16(block_size);
2296 data_sentry->key = cpu_to_be32(data_key);
2297 data_sentry->va = cpu_to_be64(data_va);
2298 data_sentry->stride = cpu_to_be16(block_size);
2300 prot_sentry->bcount = cpu_to_be16(prot_size);
2301 prot_sentry->key = cpu_to_be32(prot_key);
2302 prot_sentry->va = cpu_to_be64(prot_va);
2303 prot_sentry->stride = cpu_to_be16(prot_size);
2305 wqe_size = ALIGN(sizeof(*sblock_ctrl) + sizeof(*data_sentry) +
2306 sizeof(*prot_sentry), 64);
2310 *size += wqe_size / 16;
2311 if (unlikely((*seg == qp->sq.qend)))
2312 *seg = mlx5_get_send_wqe(qp, 0);
2315 ret = mlx5_set_bsf(sig_mr, sig_attrs, bsf, data_len);
2319 *seg += sizeof(*bsf);
2320 *size += sizeof(*bsf) / 16;
2321 if (unlikely((*seg == qp->sq.qend)))
2322 *seg = mlx5_get_send_wqe(qp, 0);
2327 static void set_sig_mkey_segment(struct mlx5_mkey_seg *seg,
2328 struct ib_sig_handover_wr *wr, u32 nelements,
2329 u32 length, u32 pdn)
2331 struct ib_mr *sig_mr = wr->sig_mr;
2332 u32 sig_key = sig_mr->rkey;
2333 u8 sigerr = to_mmr(sig_mr)->sig->sigerr_count & 1;
2335 memset(seg, 0, sizeof(*seg));
2337 seg->flags = get_umr_flags(wr->access_flags) |
2338 MLX5_ACCESS_MODE_KLM;
2339 seg->qpn_mkey7_0 = cpu_to_be32((sig_key & 0xff) | 0xffffff00);
2340 seg->flags_pd = cpu_to_be32(MLX5_MKEY_REMOTE_INVAL | sigerr << 26 |
2341 MLX5_MKEY_BSF_EN | pdn);
2342 seg->len = cpu_to_be64(length);
2343 seg->xlt_oct_size = cpu_to_be32(be16_to_cpu(get_klm_octo(nelements)));
2344 seg->bsfs_octo_size = cpu_to_be32(MLX5_MKEY_BSF_OCTO_SIZE);
2347 static void set_sig_umr_segment(struct mlx5_wqe_umr_ctrl_seg *umr,
2350 memset(umr, 0, sizeof(*umr));
2352 umr->flags = MLX5_FLAGS_INLINE | MLX5_FLAGS_CHECK_FREE;
2353 umr->klm_octowords = get_klm_octo(nelements);
2354 umr->bsf_octowords = cpu_to_be16(MLX5_MKEY_BSF_OCTO_SIZE);
2355 umr->mkey_mask = sig_mkey_mask();
2359 static int set_sig_umr_wr(struct ib_send_wr *send_wr, struct mlx5_ib_qp *qp,
2360 void **seg, int *size)
2362 struct ib_sig_handover_wr *wr = sig_handover_wr(send_wr);
2363 struct mlx5_ib_mr *sig_mr = to_mmr(wr->sig_mr);
2364 u32 pdn = get_pd(qp)->pdn;
2366 int region_len, ret;
2368 if (unlikely(wr->wr.num_sge != 1) ||
2369 unlikely(wr->access_flags & IB_ACCESS_REMOTE_ATOMIC) ||
2370 unlikely(!sig_mr->sig) || unlikely(!qp->signature_en) ||
2371 unlikely(!sig_mr->sig->sig_status_checked))
2374 /* length of the protected region, data + protection */
2375 region_len = wr->wr.sg_list->length;
2377 (wr->prot->lkey != wr->wr.sg_list->lkey ||
2378 wr->prot->addr != wr->wr.sg_list->addr ||
2379 wr->prot->length != wr->wr.sg_list->length))
2380 region_len += wr->prot->length;
2383 * KLM octoword size - if protection was provided
2384 * then we use strided block format (3 octowords),
2385 * else we use single KLM (1 octoword)
2387 klm_oct_size = wr->prot ? 3 : 1;
2389 set_sig_umr_segment(*seg, klm_oct_size);
2390 *seg += sizeof(struct mlx5_wqe_umr_ctrl_seg);
2391 *size += sizeof(struct mlx5_wqe_umr_ctrl_seg) / 16;
2392 if (unlikely((*seg == qp->sq.qend)))
2393 *seg = mlx5_get_send_wqe(qp, 0);
2395 set_sig_mkey_segment(*seg, wr, klm_oct_size, region_len, pdn);
2396 *seg += sizeof(struct mlx5_mkey_seg);
2397 *size += sizeof(struct mlx5_mkey_seg) / 16;
2398 if (unlikely((*seg == qp->sq.qend)))
2399 *seg = mlx5_get_send_wqe(qp, 0);
2401 ret = set_sig_data_segment(wr, qp, seg, size);
2405 sig_mr->sig->sig_status_checked = false;
2409 static int set_psv_wr(struct ib_sig_domain *domain,
2410 u32 psv_idx, void **seg, int *size)
2412 struct mlx5_seg_set_psv *psv_seg = *seg;
2414 memset(psv_seg, 0, sizeof(*psv_seg));
2415 psv_seg->psv_num = cpu_to_be32(psv_idx);
2416 switch (domain->sig_type) {
2417 case IB_SIG_TYPE_NONE:
2419 case IB_SIG_TYPE_T10_DIF:
2420 psv_seg->transient_sig = cpu_to_be32(domain->sig.dif.bg << 16 |
2421 domain->sig.dif.app_tag);
2422 psv_seg->ref_tag = cpu_to_be32(domain->sig.dif.ref_tag);
2425 pr_err("Bad signature type given.\n");
2429 *seg += sizeof(*psv_seg);
2430 *size += sizeof(*psv_seg) / 16;
2435 static int set_reg_wr(struct mlx5_ib_qp *qp,
2436 struct ib_reg_wr *wr,
2437 void **seg, int *size)
2439 struct mlx5_ib_mr *mr = to_mmr(wr->mr);
2440 struct mlx5_ib_pd *pd = to_mpd(qp->ibqp.pd);
2442 if (unlikely(wr->wr.send_flags & IB_SEND_INLINE)) {
2443 mlx5_ib_warn(to_mdev(qp->ibqp.device),
2444 "Invalid IB_SEND_INLINE send flag\n");
2448 set_reg_umr_seg(*seg, mr);
2449 *seg += sizeof(struct mlx5_wqe_umr_ctrl_seg);
2450 *size += sizeof(struct mlx5_wqe_umr_ctrl_seg) / 16;
2451 if (unlikely((*seg == qp->sq.qend)))
2452 *seg = mlx5_get_send_wqe(qp, 0);
2454 set_reg_mkey_seg(*seg, mr, wr->key, wr->access);
2455 *seg += sizeof(struct mlx5_mkey_seg);
2456 *size += sizeof(struct mlx5_mkey_seg) / 16;
2457 if (unlikely((*seg == qp->sq.qend)))
2458 *seg = mlx5_get_send_wqe(qp, 0);
2460 set_reg_data_seg(*seg, mr, pd);
2461 *seg += sizeof(struct mlx5_wqe_data_seg);
2462 *size += (sizeof(struct mlx5_wqe_data_seg) / 16);
2467 static void set_linv_wr(struct mlx5_ib_qp *qp, void **seg, int *size)
2469 set_linv_umr_seg(*seg);
2470 *seg += sizeof(struct mlx5_wqe_umr_ctrl_seg);
2471 *size += sizeof(struct mlx5_wqe_umr_ctrl_seg) / 16;
2472 if (unlikely((*seg == qp->sq.qend)))
2473 *seg = mlx5_get_send_wqe(qp, 0);
2474 set_linv_mkey_seg(*seg);
2475 *seg += sizeof(struct mlx5_mkey_seg);
2476 *size += sizeof(struct mlx5_mkey_seg) / 16;
2477 if (unlikely((*seg == qp->sq.qend)))
2478 *seg = mlx5_get_send_wqe(qp, 0);
2481 static void dump_wqe(struct mlx5_ib_qp *qp, int idx, int size_16)
2487 pr_debug("dump wqe at %p\n", mlx5_get_send_wqe(qp, tidx));
2488 for (i = 0, j = 0; i < size_16 * 4; i += 4, j += 4) {
2489 if ((i & 0xf) == 0) {
2490 void *buf = mlx5_get_send_wqe(qp, tidx);
2491 tidx = (tidx + 1) & (qp->sq.wqe_cnt - 1);
2495 pr_debug("%08x %08x %08x %08x\n", be32_to_cpu(p[j]),
2496 be32_to_cpu(p[j + 1]), be32_to_cpu(p[j + 2]),
2497 be32_to_cpu(p[j + 3]));
2501 static void mlx5_bf_copy(u64 __iomem *dst, u64 *src,
2502 unsigned bytecnt, struct mlx5_ib_qp *qp)
2504 while (bytecnt > 0) {
2505 __iowrite64_copy(dst++, src++, 8);
2506 __iowrite64_copy(dst++, src++, 8);
2507 __iowrite64_copy(dst++, src++, 8);
2508 __iowrite64_copy(dst++, src++, 8);
2509 __iowrite64_copy(dst++, src++, 8);
2510 __iowrite64_copy(dst++, src++, 8);
2511 __iowrite64_copy(dst++, src++, 8);
2512 __iowrite64_copy(dst++, src++, 8);
2514 if (unlikely(src == qp->sq.qend))
2515 src = mlx5_get_send_wqe(qp, 0);
2519 static u8 get_fence(u8 fence, struct ib_send_wr *wr)
2521 if (unlikely(wr->opcode == IB_WR_LOCAL_INV &&
2522 wr->send_flags & IB_SEND_FENCE))
2523 return MLX5_FENCE_MODE_STRONG_ORDERING;
2525 if (unlikely(fence)) {
2526 if (wr->send_flags & IB_SEND_FENCE)
2527 return MLX5_FENCE_MODE_SMALL_AND_FENCE;
2530 } else if (unlikely(wr->send_flags & IB_SEND_FENCE)) {
2531 return MLX5_FENCE_MODE_FENCE;
2537 static int begin_wqe(struct mlx5_ib_qp *qp, void **seg,
2538 struct mlx5_wqe_ctrl_seg **ctrl,
2539 struct ib_send_wr *wr, unsigned *idx,
2540 int *size, int nreq)
2544 if (unlikely(mlx5_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq))) {
2549 *idx = qp->sq.cur_post & (qp->sq.wqe_cnt - 1);
2550 *seg = mlx5_get_send_wqe(qp, *idx);
2552 *(uint32_t *)(*seg + 8) = 0;
2553 (*ctrl)->imm = send_ieth(wr);
2554 (*ctrl)->fm_ce_se = qp->sq_signal_bits |
2555 (wr->send_flags & IB_SEND_SIGNALED ?
2556 MLX5_WQE_CTRL_CQ_UPDATE : 0) |
2557 (wr->send_flags & IB_SEND_SOLICITED ?
2558 MLX5_WQE_CTRL_SOLICITED : 0);
2560 *seg += sizeof(**ctrl);
2561 *size = sizeof(**ctrl) / 16;
2566 static void finish_wqe(struct mlx5_ib_qp *qp,
2567 struct mlx5_wqe_ctrl_seg *ctrl,
2568 u8 size, unsigned idx, u64 wr_id,
2569 int nreq, u8 fence, u8 next_fence,
2574 ctrl->opmod_idx_opcode = cpu_to_be32(((u32)(qp->sq.cur_post) << 8) |
2575 mlx5_opcode | ((u32)opmod << 24));
2576 ctrl->qpn_ds = cpu_to_be32(size | (qp->mqp.qpn << 8));
2577 ctrl->fm_ce_se |= fence;
2578 qp->fm_cache = next_fence;
2579 if (unlikely(qp->wq_sig))
2580 ctrl->signature = wq_sig(ctrl);
2582 qp->sq.wrid[idx] = wr_id;
2583 qp->sq.w_list[idx].opcode = mlx5_opcode;
2584 qp->sq.wqe_head[idx] = qp->sq.head + nreq;
2585 qp->sq.cur_post += DIV_ROUND_UP(size * 16, MLX5_SEND_WQE_BB);
2586 qp->sq.w_list[idx].next = qp->sq.cur_post;
2590 int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
2591 struct ib_send_wr **bad_wr)
2593 struct mlx5_wqe_ctrl_seg *ctrl = NULL; /* compiler warning */
2594 struct mlx5_ib_dev *dev = to_mdev(ibqp->device);
2595 struct mlx5_ib_qp *qp = to_mqp(ibqp);
2596 struct mlx5_ib_mr *mr;
2597 struct mlx5_wqe_data_seg *dpseg;
2598 struct mlx5_wqe_xrc_seg *xrc;
2599 struct mlx5_bf *bf = qp->bf;
2600 int uninitialized_var(size);
2601 void *qend = qp->sq.qend;
2602 unsigned long flags;
2613 spin_lock_irqsave(&qp->sq.lock, flags);
2615 for (nreq = 0; wr; nreq++, wr = wr->next) {
2616 if (unlikely(wr->opcode >= ARRAY_SIZE(mlx5_ib_opcode))) {
2617 mlx5_ib_warn(dev, "\n");
2623 fence = qp->fm_cache;
2624 num_sge = wr->num_sge;
2625 if (unlikely(num_sge > qp->sq.max_gs)) {
2626 mlx5_ib_warn(dev, "\n");
2632 err = begin_wqe(qp, &seg, &ctrl, wr, &idx, &size, nreq);
2634 mlx5_ib_warn(dev, "\n");
2640 switch (ibqp->qp_type) {
2641 case IB_QPT_XRC_INI:
2643 seg += sizeof(*xrc);
2644 size += sizeof(*xrc) / 16;
2647 switch (wr->opcode) {
2648 case IB_WR_RDMA_READ:
2649 case IB_WR_RDMA_WRITE:
2650 case IB_WR_RDMA_WRITE_WITH_IMM:
2651 set_raddr_seg(seg, rdma_wr(wr)->remote_addr,
2653 seg += sizeof(struct mlx5_wqe_raddr_seg);
2654 size += sizeof(struct mlx5_wqe_raddr_seg) / 16;
2657 case IB_WR_ATOMIC_CMP_AND_SWP:
2658 case IB_WR_ATOMIC_FETCH_AND_ADD:
2659 case IB_WR_MASKED_ATOMIC_CMP_AND_SWP:
2660 mlx5_ib_warn(dev, "Atomic operations are not supported yet\n");
2665 case IB_WR_LOCAL_INV:
2666 next_fence = MLX5_FENCE_MODE_INITIATOR_SMALL;
2667 qp->sq.wr_data[idx] = IB_WR_LOCAL_INV;
2668 ctrl->imm = cpu_to_be32(wr->ex.invalidate_rkey);
2669 set_linv_wr(qp, &seg, &size);
2674 next_fence = MLX5_FENCE_MODE_INITIATOR_SMALL;
2675 qp->sq.wr_data[idx] = IB_WR_REG_MR;
2676 ctrl->imm = cpu_to_be32(reg_wr(wr)->key);
2677 err = set_reg_wr(qp, reg_wr(wr), &seg, &size);
2685 case IB_WR_REG_SIG_MR:
2686 qp->sq.wr_data[idx] = IB_WR_REG_SIG_MR;
2687 mr = to_mmr(sig_handover_wr(wr)->sig_mr);
2689 ctrl->imm = cpu_to_be32(mr->ibmr.rkey);
2690 err = set_sig_umr_wr(wr, qp, &seg, &size);
2692 mlx5_ib_warn(dev, "\n");
2697 finish_wqe(qp, ctrl, size, idx, wr->wr_id,
2698 nreq, get_fence(fence, wr),
2699 next_fence, MLX5_OPCODE_UMR);
2701 * SET_PSV WQEs are not signaled and solicited
2704 wr->send_flags &= ~IB_SEND_SIGNALED;
2705 wr->send_flags |= IB_SEND_SOLICITED;
2706 err = begin_wqe(qp, &seg, &ctrl, wr,
2709 mlx5_ib_warn(dev, "\n");
2715 err = set_psv_wr(&sig_handover_wr(wr)->sig_attrs->mem,
2716 mr->sig->psv_memory.psv_idx, &seg,
2719 mlx5_ib_warn(dev, "\n");
2724 finish_wqe(qp, ctrl, size, idx, wr->wr_id,
2725 nreq, get_fence(fence, wr),
2726 next_fence, MLX5_OPCODE_SET_PSV);
2727 err = begin_wqe(qp, &seg, &ctrl, wr,
2730 mlx5_ib_warn(dev, "\n");
2736 next_fence = MLX5_FENCE_MODE_INITIATOR_SMALL;
2737 err = set_psv_wr(&sig_handover_wr(wr)->sig_attrs->wire,
2738 mr->sig->psv_wire.psv_idx, &seg,
2741 mlx5_ib_warn(dev, "\n");
2746 finish_wqe(qp, ctrl, size, idx, wr->wr_id,
2747 nreq, get_fence(fence, wr),
2748 next_fence, MLX5_OPCODE_SET_PSV);
2758 switch (wr->opcode) {
2759 case IB_WR_RDMA_WRITE:
2760 case IB_WR_RDMA_WRITE_WITH_IMM:
2761 set_raddr_seg(seg, rdma_wr(wr)->remote_addr,
2763 seg += sizeof(struct mlx5_wqe_raddr_seg);
2764 size += sizeof(struct mlx5_wqe_raddr_seg) / 16;
2775 set_datagram_seg(seg, wr);
2776 seg += sizeof(struct mlx5_wqe_datagram_seg);
2777 size += sizeof(struct mlx5_wqe_datagram_seg) / 16;
2778 if (unlikely((seg == qend)))
2779 seg = mlx5_get_send_wqe(qp, 0);
2782 case MLX5_IB_QPT_REG_UMR:
2783 if (wr->opcode != MLX5_IB_WR_UMR) {
2785 mlx5_ib_warn(dev, "bad opcode\n");
2788 qp->sq.wr_data[idx] = MLX5_IB_WR_UMR;
2789 ctrl->imm = cpu_to_be32(umr_wr(wr)->mkey);
2790 set_reg_umr_segment(seg, wr);
2791 seg += sizeof(struct mlx5_wqe_umr_ctrl_seg);
2792 size += sizeof(struct mlx5_wqe_umr_ctrl_seg) / 16;
2793 if (unlikely((seg == qend)))
2794 seg = mlx5_get_send_wqe(qp, 0);
2795 set_reg_mkey_segment(seg, wr);
2796 seg += sizeof(struct mlx5_mkey_seg);
2797 size += sizeof(struct mlx5_mkey_seg) / 16;
2798 if (unlikely((seg == qend)))
2799 seg = mlx5_get_send_wqe(qp, 0);
2806 if (wr->send_flags & IB_SEND_INLINE && num_sge) {
2807 int uninitialized_var(sz);
2809 err = set_data_inl_seg(qp, wr, seg, &sz);
2810 if (unlikely(err)) {
2811 mlx5_ib_warn(dev, "\n");
2819 for (i = 0; i < num_sge; i++) {
2820 if (unlikely(dpseg == qend)) {
2821 seg = mlx5_get_send_wqe(qp, 0);
2824 if (likely(wr->sg_list[i].length)) {
2825 set_data_ptr_seg(dpseg, wr->sg_list + i);
2826 size += sizeof(struct mlx5_wqe_data_seg) / 16;
2832 finish_wqe(qp, ctrl, size, idx, wr->wr_id, nreq,
2833 get_fence(fence, wr), next_fence,
2834 mlx5_ib_opcode[wr->opcode]);
2837 dump_wqe(qp, idx, size);
2842 qp->sq.head += nreq;
2844 /* Make sure that descriptors are written before
2845 * updating doorbell record and ringing the doorbell
2849 qp->db.db[MLX5_SND_DBR] = cpu_to_be32(qp->sq.cur_post);
2851 /* Make sure doorbell record is visible to the HCA before
2852 * we hit doorbell */
2856 spin_lock(&bf->lock);
2858 __acquire(&bf->lock);
2861 if (0 && nreq == 1 && bf->uuarn && inl && size > 1 && size <= bf->buf_size / 16) {
2862 mlx5_bf_copy(bf->reg + bf->offset, (u64 *)ctrl, ALIGN(size * 16, 64), qp);
2865 mlx5_write64((__be32 *)ctrl, bf->regreg + bf->offset,
2866 MLX5_GET_DOORBELL_LOCK(&bf->lock32));
2867 /* Make sure doorbells don't leak out of SQ spinlock
2868 * and reach the HCA out of order.
2872 bf->offset ^= bf->buf_size;
2874 spin_unlock(&bf->lock);
2876 __release(&bf->lock);
2879 spin_unlock_irqrestore(&qp->sq.lock, flags);
2884 static void set_sig_seg(struct mlx5_rwqe_sig *sig, int size)
2886 sig->signature = calc_sig(sig, size);
2889 int mlx5_ib_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
2890 struct ib_recv_wr **bad_wr)
2892 struct mlx5_ib_qp *qp = to_mqp(ibqp);
2893 struct mlx5_wqe_data_seg *scat;
2894 struct mlx5_rwqe_sig *sig;
2895 unsigned long flags;
2901 spin_lock_irqsave(&qp->rq.lock, flags);
2903 ind = qp->rq.head & (qp->rq.wqe_cnt - 1);
2905 for (nreq = 0; wr; nreq++, wr = wr->next) {
2906 if (mlx5_wq_overflow(&qp->rq, nreq, qp->ibqp.recv_cq)) {
2912 if (unlikely(wr->num_sge > qp->rq.max_gs)) {
2918 scat = get_recv_wqe(qp, ind);
2922 for (i = 0; i < wr->num_sge; i++)
2923 set_data_ptr_seg(scat + i, wr->sg_list + i);
2925 if (i < qp->rq.max_gs) {
2926 scat[i].byte_count = 0;
2927 scat[i].lkey = cpu_to_be32(MLX5_INVALID_LKEY);
2932 sig = (struct mlx5_rwqe_sig *)scat;
2933 set_sig_seg(sig, (qp->rq.max_gs + 1) << 2);
2936 qp->rq.wrid[ind] = wr->wr_id;
2938 ind = (ind + 1) & (qp->rq.wqe_cnt - 1);
2943 qp->rq.head += nreq;
2945 /* Make sure that descriptors are written before
2950 *qp->db.db = cpu_to_be32(qp->rq.head & 0xffff);
2953 spin_unlock_irqrestore(&qp->rq.lock, flags);
2958 static inline enum ib_qp_state to_ib_qp_state(enum mlx5_qp_state mlx5_state)
2960 switch (mlx5_state) {
2961 case MLX5_QP_STATE_RST: return IB_QPS_RESET;
2962 case MLX5_QP_STATE_INIT: return IB_QPS_INIT;
2963 case MLX5_QP_STATE_RTR: return IB_QPS_RTR;
2964 case MLX5_QP_STATE_RTS: return IB_QPS_RTS;
2965 case MLX5_QP_STATE_SQ_DRAINING:
2966 case MLX5_QP_STATE_SQD: return IB_QPS_SQD;
2967 case MLX5_QP_STATE_SQER: return IB_QPS_SQE;
2968 case MLX5_QP_STATE_ERR: return IB_QPS_ERR;
2973 static inline enum ib_mig_state to_ib_mig_state(int mlx5_mig_state)
2975 switch (mlx5_mig_state) {
2976 case MLX5_QP_PM_ARMED: return IB_MIG_ARMED;
2977 case MLX5_QP_PM_REARM: return IB_MIG_REARM;
2978 case MLX5_QP_PM_MIGRATED: return IB_MIG_MIGRATED;
2983 static int to_ib_qp_access_flags(int mlx5_flags)
2987 if (mlx5_flags & MLX5_QP_BIT_RRE)
2988 ib_flags |= IB_ACCESS_REMOTE_READ;
2989 if (mlx5_flags & MLX5_QP_BIT_RWE)
2990 ib_flags |= IB_ACCESS_REMOTE_WRITE;
2991 if (mlx5_flags & MLX5_QP_BIT_RAE)
2992 ib_flags |= IB_ACCESS_REMOTE_ATOMIC;
2997 static void to_ib_ah_attr(struct mlx5_ib_dev *ibdev, struct ib_ah_attr *ib_ah_attr,
2998 struct mlx5_qp_path *path)
3000 struct mlx5_core_dev *dev = ibdev->mdev;
3002 memset(ib_ah_attr, 0, sizeof(*ib_ah_attr));
3003 ib_ah_attr->port_num = path->port;
3005 if (ib_ah_attr->port_num == 0 ||
3006 ib_ah_attr->port_num > MLX5_CAP_GEN(dev, num_ports))
3009 ib_ah_attr->sl = path->sl & 0xf;
3011 ib_ah_attr->dlid = be16_to_cpu(path->rlid);
3012 ib_ah_attr->src_path_bits = path->grh_mlid & 0x7f;
3013 ib_ah_attr->static_rate = path->static_rate ? path->static_rate - 5 : 0;
3014 ib_ah_attr->ah_flags = (path->grh_mlid & (1 << 7)) ? IB_AH_GRH : 0;
3015 if (ib_ah_attr->ah_flags) {
3016 ib_ah_attr->grh.sgid_index = path->mgid_index;
3017 ib_ah_attr->grh.hop_limit = path->hop_limit;
3018 ib_ah_attr->grh.traffic_class =
3019 (be32_to_cpu(path->tclass_flowlabel) >> 20) & 0xff;
3020 ib_ah_attr->grh.flow_label =
3021 be32_to_cpu(path->tclass_flowlabel) & 0xfffff;
3022 memcpy(ib_ah_attr->grh.dgid.raw,
3023 path->rgid, sizeof(ib_ah_attr->grh.dgid.raw));
3027 int mlx5_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, int qp_attr_mask,
3028 struct ib_qp_init_attr *qp_init_attr)
3030 struct mlx5_ib_dev *dev = to_mdev(ibqp->device);
3031 struct mlx5_ib_qp *qp = to_mqp(ibqp);
3032 struct mlx5_query_qp_mbox_out *outb;
3033 struct mlx5_qp_context *context;
3037 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
3039 * Wait for any outstanding page faults, in case the user frees memory
3040 * based upon this query's result.
3042 flush_workqueue(mlx5_ib_page_fault_wq);
3045 mutex_lock(&qp->mutex);
3046 outb = kzalloc(sizeof(*outb), GFP_KERNEL);
3051 context = &outb->ctx;
3052 err = mlx5_core_qp_query(dev->mdev, &qp->mqp, outb, sizeof(*outb));
3056 mlx5_state = be32_to_cpu(context->flags) >> 28;
3058 qp->state = to_ib_qp_state(mlx5_state);
3059 qp_attr->qp_state = qp->state;
3060 qp_attr->path_mtu = context->mtu_msgmax >> 5;
3061 qp_attr->path_mig_state =
3062 to_ib_mig_state((be32_to_cpu(context->flags) >> 11) & 0x3);
3063 qp_attr->qkey = be32_to_cpu(context->qkey);
3064 qp_attr->rq_psn = be32_to_cpu(context->rnr_nextrecvpsn) & 0xffffff;
3065 qp_attr->sq_psn = be32_to_cpu(context->next_send_psn) & 0xffffff;
3066 qp_attr->dest_qp_num = be32_to_cpu(context->log_pg_sz_remote_qpn) & 0xffffff;
3067 qp_attr->qp_access_flags =
3068 to_ib_qp_access_flags(be32_to_cpu(context->params2));
3070 if (qp->ibqp.qp_type == IB_QPT_RC || qp->ibqp.qp_type == IB_QPT_UC) {
3071 to_ib_ah_attr(dev, &qp_attr->ah_attr, &context->pri_path);
3072 to_ib_ah_attr(dev, &qp_attr->alt_ah_attr, &context->alt_path);
3073 qp_attr->alt_pkey_index = context->alt_path.pkey_index & 0x7f;
3074 qp_attr->alt_port_num = qp_attr->alt_ah_attr.port_num;
3077 qp_attr->pkey_index = context->pri_path.pkey_index & 0x7f;
3078 qp_attr->port_num = context->pri_path.port;
3080 /* qp_attr->en_sqd_async_notify is only applicable in modify qp */
3081 qp_attr->sq_draining = mlx5_state == MLX5_QP_STATE_SQ_DRAINING;
3083 qp_attr->max_rd_atomic = 1 << ((be32_to_cpu(context->params1) >> 21) & 0x7);
3085 qp_attr->max_dest_rd_atomic =
3086 1 << ((be32_to_cpu(context->params2) >> 21) & 0x7);
3087 qp_attr->min_rnr_timer =
3088 (be32_to_cpu(context->rnr_nextrecvpsn) >> 24) & 0x1f;
3089 qp_attr->timeout = context->pri_path.ackto_lt >> 3;
3090 qp_attr->retry_cnt = (be32_to_cpu(context->params1) >> 16) & 0x7;
3091 qp_attr->rnr_retry = (be32_to_cpu(context->params1) >> 13) & 0x7;
3092 qp_attr->alt_timeout = context->alt_path.ackto_lt >> 3;
3093 qp_attr->cur_qp_state = qp_attr->qp_state;
3094 qp_attr->cap.max_recv_wr = qp->rq.wqe_cnt;
3095 qp_attr->cap.max_recv_sge = qp->rq.max_gs;
3097 if (!ibqp->uobject) {
3098 qp_attr->cap.max_send_wr = qp->sq.max_post;
3099 qp_attr->cap.max_send_sge = qp->sq.max_gs;
3100 qp_init_attr->qp_context = ibqp->qp_context;
3102 qp_attr->cap.max_send_wr = 0;
3103 qp_attr->cap.max_send_sge = 0;
3106 qp_init_attr->qp_type = ibqp->qp_type;
3107 qp_init_attr->recv_cq = ibqp->recv_cq;
3108 qp_init_attr->send_cq = ibqp->send_cq;
3109 qp_init_attr->srq = ibqp->srq;
3110 qp_attr->cap.max_inline_data = qp->max_inline_data;
3112 qp_init_attr->cap = qp_attr->cap;
3114 qp_init_attr->create_flags = 0;
3115 if (qp->flags & MLX5_IB_QP_BLOCK_MULTICAST_LOOPBACK)
3116 qp_init_attr->create_flags |= IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK;
3118 qp_init_attr->sq_sig_type = qp->sq_signal_bits & MLX5_WQE_CTRL_CQ_UPDATE ?
3119 IB_SIGNAL_ALL_WR : IB_SIGNAL_REQ_WR;
3125 mutex_unlock(&qp->mutex);
3129 struct ib_xrcd *mlx5_ib_alloc_xrcd(struct ib_device *ibdev,
3130 struct ib_ucontext *context,
3131 struct ib_udata *udata)
3133 struct mlx5_ib_dev *dev = to_mdev(ibdev);
3134 struct mlx5_ib_xrcd *xrcd;
3137 if (!MLX5_CAP_GEN(dev->mdev, xrc))
3138 return ERR_PTR(-ENOSYS);
3140 xrcd = kmalloc(sizeof(*xrcd), GFP_KERNEL);
3142 return ERR_PTR(-ENOMEM);
3144 err = mlx5_core_xrcd_alloc(dev->mdev, &xrcd->xrcdn);
3147 return ERR_PTR(-ENOMEM);
3150 return &xrcd->ibxrcd;
3153 int mlx5_ib_dealloc_xrcd(struct ib_xrcd *xrcd)
3155 struct mlx5_ib_dev *dev = to_mdev(xrcd->device);
3156 u32 xrcdn = to_mxrcd(xrcd)->xrcdn;
3159 err = mlx5_core_xrcd_dealloc(dev->mdev, xrcdn);
3161 mlx5_ib_warn(dev, "failed to dealloc xrcdn 0x%x\n", xrcdn);