2 * Copyright (c) 2004 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc. All rights reserved.
4 * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved.
5 * Copyright (c) 2004 Voltaire, Inc. All rights reserved.
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
11 * OpenIB.org BSD license below:
13 * Redistribution and use in source and binary forms, with or
14 * without modification, are permitted provided that the following
17 * - Redistributions of source code must retain the above
18 * copyright notice, this list of conditions and the following
21 * - Redistributions in binary form must reproduce the above
22 * copyright notice, this list of conditions and the following
23 * disclaimer in the documentation and/or other materials
24 * provided with the distribution.
26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
36 #include <linux/gfp.h>
37 #include <linux/export.h>
39 #include <linux/mlx4/cmd.h>
40 #include <linux/mlx4/qp.h>
45 /* QP to support BF should have bits 6,7 cleared */
46 #define MLX4_BF_QP_SKIP_MASK 0xc0
47 #define MLX4_MAX_BF_QP_RANGE 0x40
49 void mlx4_qp_event(struct mlx4_dev *dev, u32 qpn, int event_type)
51 struct mlx4_qp_table *qp_table = &mlx4_priv(dev)->qp_table;
54 spin_lock(&qp_table->lock);
56 qp = __mlx4_qp_lookup(dev, qpn);
58 atomic_inc(&qp->refcount);
60 spin_unlock(&qp_table->lock);
63 mlx4_dbg(dev, "Async event for none existent QP %08x\n", qpn);
67 qp->event(qp, event_type);
69 if (atomic_dec_and_test(&qp->refcount))
73 /* used for INIT/CLOSE port logic */
74 static int is_master_qp0(struct mlx4_dev *dev, struct mlx4_qp *qp, int *real_qp0, int *proxy_qp0)
76 /* this procedure is called after we already know we are on the master */
77 /* qp0 is either the proxy qp0, or the real qp0 */
78 u32 pf_proxy_offset = dev->phys_caps.base_proxy_sqpn + 8 * mlx4_master_func_num(dev);
79 *proxy_qp0 = qp->qpn >= pf_proxy_offset && qp->qpn <= pf_proxy_offset + 1;
81 *real_qp0 = qp->qpn >= dev->phys_caps.base_sqpn &&
82 qp->qpn <= dev->phys_caps.base_sqpn + 1;
84 return *real_qp0 || *proxy_qp0;
87 static int __mlx4_qp_modify(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
88 enum mlx4_qp_state cur_state, enum mlx4_qp_state new_state,
89 struct mlx4_qp_context *context,
90 enum mlx4_qp_optpar optpar,
91 int sqd_event, struct mlx4_qp *qp, int native)
93 static const u16 op[MLX4_QP_NUM_STATE][MLX4_QP_NUM_STATE] = {
94 [MLX4_QP_STATE_RST] = {
95 [MLX4_QP_STATE_RST] = MLX4_CMD_2RST_QP,
96 [MLX4_QP_STATE_ERR] = MLX4_CMD_2ERR_QP,
97 [MLX4_QP_STATE_INIT] = MLX4_CMD_RST2INIT_QP,
99 [MLX4_QP_STATE_INIT] = {
100 [MLX4_QP_STATE_RST] = MLX4_CMD_2RST_QP,
101 [MLX4_QP_STATE_ERR] = MLX4_CMD_2ERR_QP,
102 [MLX4_QP_STATE_INIT] = MLX4_CMD_INIT2INIT_QP,
103 [MLX4_QP_STATE_RTR] = MLX4_CMD_INIT2RTR_QP,
105 [MLX4_QP_STATE_RTR] = {
106 [MLX4_QP_STATE_RST] = MLX4_CMD_2RST_QP,
107 [MLX4_QP_STATE_ERR] = MLX4_CMD_2ERR_QP,
108 [MLX4_QP_STATE_RTS] = MLX4_CMD_RTR2RTS_QP,
110 [MLX4_QP_STATE_RTS] = {
111 [MLX4_QP_STATE_RST] = MLX4_CMD_2RST_QP,
112 [MLX4_QP_STATE_ERR] = MLX4_CMD_2ERR_QP,
113 [MLX4_QP_STATE_RTS] = MLX4_CMD_RTS2RTS_QP,
114 [MLX4_QP_STATE_SQD] = MLX4_CMD_RTS2SQD_QP,
116 [MLX4_QP_STATE_SQD] = {
117 [MLX4_QP_STATE_RST] = MLX4_CMD_2RST_QP,
118 [MLX4_QP_STATE_ERR] = MLX4_CMD_2ERR_QP,
119 [MLX4_QP_STATE_RTS] = MLX4_CMD_SQD2RTS_QP,
120 [MLX4_QP_STATE_SQD] = MLX4_CMD_SQD2SQD_QP,
122 [MLX4_QP_STATE_SQER] = {
123 [MLX4_QP_STATE_RST] = MLX4_CMD_2RST_QP,
124 [MLX4_QP_STATE_ERR] = MLX4_CMD_2ERR_QP,
125 [MLX4_QP_STATE_RTS] = MLX4_CMD_SQERR2RTS_QP,
127 [MLX4_QP_STATE_ERR] = {
128 [MLX4_QP_STATE_RST] = MLX4_CMD_2RST_QP,
129 [MLX4_QP_STATE_ERR] = MLX4_CMD_2ERR_QP,
133 struct mlx4_priv *priv = mlx4_priv(dev);
134 struct mlx4_cmd_mailbox *mailbox;
140 if (cur_state >= MLX4_QP_NUM_STATE || new_state >= MLX4_QP_NUM_STATE ||
141 !op[cur_state][new_state])
144 if (op[cur_state][new_state] == MLX4_CMD_2RST_QP) {
145 ret = mlx4_cmd(dev, 0, qp->qpn, 2,
146 MLX4_CMD_2RST_QP, MLX4_CMD_TIME_CLASS_A, native);
147 if (mlx4_is_master(dev) && cur_state != MLX4_QP_STATE_ERR &&
148 cur_state != MLX4_QP_STATE_RST &&
149 is_master_qp0(dev, qp, &real_qp0, &proxy_qp0)) {
150 port = (qp->qpn & 1) + 1;
152 priv->mfunc.master.qp0_state[port].proxy_qp0_active = 0;
154 priv->mfunc.master.qp0_state[port].qp0_active = 0;
159 mailbox = mlx4_alloc_cmd_mailbox(dev);
161 return PTR_ERR(mailbox);
163 if (cur_state == MLX4_QP_STATE_RST && new_state == MLX4_QP_STATE_INIT) {
164 u64 mtt_addr = mlx4_mtt_addr(dev, mtt);
165 context->mtt_base_addr_h = mtt_addr >> 32;
166 context->mtt_base_addr_l = cpu_to_be32(mtt_addr & 0xffffffff);
167 context->log_page_size = mtt->page_shift - MLX4_ICM_PAGE_SHIFT;
170 *(__be32 *) mailbox->buf = cpu_to_be32(optpar);
171 memcpy(mailbox->buf + 8, context, sizeof *context);
173 ((struct mlx4_qp_context *) (mailbox->buf + 8))->local_qpn =
174 cpu_to_be32(qp->qpn);
176 ret = mlx4_cmd(dev, mailbox->dma,
177 qp->qpn | (!!sqd_event << 31),
178 new_state == MLX4_QP_STATE_RST ? 2 : 0,
179 op[cur_state][new_state], MLX4_CMD_TIME_CLASS_C, native);
181 if (mlx4_is_master(dev) && is_master_qp0(dev, qp, &real_qp0, &proxy_qp0)) {
182 port = (qp->qpn & 1) + 1;
183 if (cur_state != MLX4_QP_STATE_ERR &&
184 cur_state != MLX4_QP_STATE_RST &&
185 new_state == MLX4_QP_STATE_ERR) {
187 priv->mfunc.master.qp0_state[port].proxy_qp0_active = 0;
189 priv->mfunc.master.qp0_state[port].qp0_active = 0;
190 } else if (new_state == MLX4_QP_STATE_RTR) {
192 priv->mfunc.master.qp0_state[port].proxy_qp0_active = 1;
194 priv->mfunc.master.qp0_state[port].qp0_active = 1;
198 mlx4_free_cmd_mailbox(dev, mailbox);
202 int mlx4_qp_modify(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
203 enum mlx4_qp_state cur_state, enum mlx4_qp_state new_state,
204 struct mlx4_qp_context *context,
205 enum mlx4_qp_optpar optpar,
206 int sqd_event, struct mlx4_qp *qp)
208 return __mlx4_qp_modify(dev, mtt, cur_state, new_state, context,
209 optpar, sqd_event, qp, 0);
211 EXPORT_SYMBOL_GPL(mlx4_qp_modify);
213 int __mlx4_qp_reserve_range(struct mlx4_dev *dev, int cnt, int align,
217 int bf_qp = !!(flags & (u8)MLX4_RESERVE_ETH_BF_QP);
219 struct mlx4_priv *priv = mlx4_priv(dev);
220 struct mlx4_qp_table *qp_table = &priv->qp_table;
222 if (cnt > MLX4_MAX_BF_QP_RANGE && bf_qp)
225 uid = MLX4_QP_TABLE_ZONE_GENERAL;
226 if (flags & (u8)MLX4_RESERVE_A0_QP) {
228 uid = MLX4_QP_TABLE_ZONE_RAW_ETH;
230 uid = MLX4_QP_TABLE_ZONE_RSS;
233 *base = mlx4_zone_alloc_entries(qp_table->zones, uid, cnt, align,
234 bf_qp ? MLX4_BF_QP_SKIP_MASK : 0, NULL);
241 int mlx4_qp_reserve_range(struct mlx4_dev *dev, int cnt, int align,
248 /* Turn off all unsupported QP allocation flags */
249 flags &= dev->caps.alloc_res_qp_mask;
251 if (mlx4_is_mfunc(dev)) {
252 set_param_l(&in_param, (((u32)flags) << 24) | (u32)cnt);
253 set_param_h(&in_param, align);
254 err = mlx4_cmd_imm(dev, in_param, &out_param,
255 RES_QP, RES_OP_RESERVE,
257 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
261 *base = get_param_l(&out_param);
264 return __mlx4_qp_reserve_range(dev, cnt, align, base, flags);
266 EXPORT_SYMBOL_GPL(mlx4_qp_reserve_range);
268 void __mlx4_qp_release_range(struct mlx4_dev *dev, int base_qpn, int cnt)
270 struct mlx4_priv *priv = mlx4_priv(dev);
271 struct mlx4_qp_table *qp_table = &priv->qp_table;
273 if (mlx4_is_qp_reserved(dev, (u32) base_qpn))
275 mlx4_zone_free_entries_unique(qp_table->zones, base_qpn, cnt);
278 void mlx4_qp_release_range(struct mlx4_dev *dev, int base_qpn, int cnt)
283 if (mlx4_is_mfunc(dev)) {
284 set_param_l(&in_param, base_qpn);
285 set_param_h(&in_param, cnt);
286 err = mlx4_cmd(dev, in_param, RES_QP, RES_OP_RESERVE,
288 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
290 mlx4_warn(dev, "Failed to release qp range base:%d cnt:%d\n",
294 __mlx4_qp_release_range(dev, base_qpn, cnt);
296 EXPORT_SYMBOL_GPL(mlx4_qp_release_range);
298 int __mlx4_qp_alloc_icm(struct mlx4_dev *dev, int qpn, gfp_t gfp)
300 struct mlx4_priv *priv = mlx4_priv(dev);
301 struct mlx4_qp_table *qp_table = &priv->qp_table;
304 err = mlx4_table_get(dev, &qp_table->qp_table, qpn, gfp);
308 err = mlx4_table_get(dev, &qp_table->auxc_table, qpn, gfp);
312 err = mlx4_table_get(dev, &qp_table->altc_table, qpn, gfp);
316 err = mlx4_table_get(dev, &qp_table->rdmarc_table, qpn, gfp);
320 err = mlx4_table_get(dev, &qp_table->cmpt_table, qpn, gfp);
327 mlx4_table_put(dev, &qp_table->rdmarc_table, qpn);
330 mlx4_table_put(dev, &qp_table->altc_table, qpn);
333 mlx4_table_put(dev, &qp_table->auxc_table, qpn);
336 mlx4_table_put(dev, &qp_table->qp_table, qpn);
342 static int mlx4_qp_alloc_icm(struct mlx4_dev *dev, int qpn, gfp_t gfp)
346 if (mlx4_is_mfunc(dev)) {
347 set_param_l(¶m, qpn);
348 return mlx4_cmd_imm(dev, param, ¶m, RES_QP, RES_OP_MAP_ICM,
349 MLX4_CMD_ALLOC_RES, MLX4_CMD_TIME_CLASS_A,
352 return __mlx4_qp_alloc_icm(dev, qpn, gfp);
355 void __mlx4_qp_free_icm(struct mlx4_dev *dev, int qpn)
357 struct mlx4_priv *priv = mlx4_priv(dev);
358 struct mlx4_qp_table *qp_table = &priv->qp_table;
360 mlx4_table_put(dev, &qp_table->cmpt_table, qpn);
361 mlx4_table_put(dev, &qp_table->rdmarc_table, qpn);
362 mlx4_table_put(dev, &qp_table->altc_table, qpn);
363 mlx4_table_put(dev, &qp_table->auxc_table, qpn);
364 mlx4_table_put(dev, &qp_table->qp_table, qpn);
367 static void mlx4_qp_free_icm(struct mlx4_dev *dev, int qpn)
371 if (mlx4_is_mfunc(dev)) {
372 set_param_l(&in_param, qpn);
373 if (mlx4_cmd(dev, in_param, RES_QP, RES_OP_MAP_ICM,
374 MLX4_CMD_FREE_RES, MLX4_CMD_TIME_CLASS_A,
376 mlx4_warn(dev, "Failed to free icm of qp:%d\n", qpn);
378 __mlx4_qp_free_icm(dev, qpn);
381 int mlx4_qp_alloc(struct mlx4_dev *dev, int qpn, struct mlx4_qp *qp, gfp_t gfp)
383 struct mlx4_priv *priv = mlx4_priv(dev);
384 struct mlx4_qp_table *qp_table = &priv->qp_table;
392 err = mlx4_qp_alloc_icm(dev, qpn, gfp);
396 spin_lock_irq(&qp_table->lock);
397 err = radix_tree_insert(&dev->qp_table_tree, qp->qpn &
398 (dev->caps.num_qps - 1), qp);
399 spin_unlock_irq(&qp_table->lock);
403 atomic_set(&qp->refcount, 1);
404 init_completion(&qp->free);
409 mlx4_qp_free_icm(dev, qpn);
413 EXPORT_SYMBOL_GPL(mlx4_qp_alloc);
415 int mlx4_update_qp(struct mlx4_dev *dev, u32 qpn,
416 enum mlx4_update_qp_attr attr,
417 struct mlx4_update_qp_params *params)
419 struct mlx4_cmd_mailbox *mailbox;
420 struct mlx4_update_qp_context *cmd;
421 u64 pri_addr_path_mask = 0;
425 if (!attr || (attr & ~MLX4_UPDATE_QP_SUPPORTED_ATTRS))
428 mailbox = mlx4_alloc_cmd_mailbox(dev);
430 return PTR_ERR(mailbox);
432 cmd = (struct mlx4_update_qp_context *)mailbox->buf;
434 if (attr & MLX4_UPDATE_QP_SMAC) {
435 pri_addr_path_mask |= 1ULL << MLX4_UPD_QP_PATH_MASK_MAC_INDEX;
436 cmd->qp_context.pri_path.grh_mylmc = params->smac_index;
439 if (attr & MLX4_UPDATE_QP_ETH_SRC_CHECK_MC_LB) {
440 if (!(dev->caps.flags2
441 & MLX4_DEV_CAP_FLAG2_UPDATE_QP_SRC_CHECK_LB)) {
443 "Trying to set src check LB, but it isn't supported\n");
447 pri_addr_path_mask |=
448 1ULL << MLX4_UPD_QP_PATH_MASK_ETH_SRC_CHECK_MC_LB;
450 MLX4_UPDATE_QP_PARAMS_FLAGS_ETH_CHECK_MC_LB) {
451 cmd->qp_context.pri_path.fl |=
452 MLX4_FL_ETH_SRC_CHECK_MC_LB;
456 if (attr & MLX4_UPDATE_QP_VSD) {
457 qp_mask |= 1ULL << MLX4_UPD_QP_MASK_VSD;
458 if (params->flags & MLX4_UPDATE_QP_PARAMS_FLAGS_VSD_ENABLE)
459 cmd->qp_context.param3 |= cpu_to_be32(MLX4_STRIP_VLAN);
462 if (attr & MLX4_UPDATE_QP_RATE_LIMIT) {
463 qp_mask |= 1ULL << MLX4_UPD_QP_MASK_RATE_LIMIT;
464 cmd->qp_context.rate_limit_params = cpu_to_be16((params->rate_unit << 14) | params->rate_val);
467 if (attr & MLX4_UPDATE_QP_QOS_VPORT) {
468 qp_mask |= 1ULL << MLX4_UPD_QP_MASK_QOS_VPP;
469 cmd->qp_context.qos_vport = params->qos_vport;
472 cmd->primary_addr_path_mask = cpu_to_be64(pri_addr_path_mask);
473 cmd->qp_mask = cpu_to_be64(qp_mask);
475 err = mlx4_cmd(dev, mailbox->dma, qpn & 0xffffff, 0,
476 MLX4_CMD_UPDATE_QP, MLX4_CMD_TIME_CLASS_A,
479 mlx4_free_cmd_mailbox(dev, mailbox);
482 EXPORT_SYMBOL_GPL(mlx4_update_qp);
484 void mlx4_qp_remove(struct mlx4_dev *dev, struct mlx4_qp *qp)
486 struct mlx4_qp_table *qp_table = &mlx4_priv(dev)->qp_table;
489 spin_lock_irqsave(&qp_table->lock, flags);
490 radix_tree_delete(&dev->qp_table_tree, qp->qpn & (dev->caps.num_qps - 1));
491 spin_unlock_irqrestore(&qp_table->lock, flags);
493 EXPORT_SYMBOL_GPL(mlx4_qp_remove);
495 void mlx4_qp_free(struct mlx4_dev *dev, struct mlx4_qp *qp)
497 if (atomic_dec_and_test(&qp->refcount))
499 wait_for_completion(&qp->free);
501 mlx4_qp_free_icm(dev, qp->qpn);
503 EXPORT_SYMBOL_GPL(mlx4_qp_free);
505 static int mlx4_CONF_SPECIAL_QP(struct mlx4_dev *dev, u32 base_qpn)
507 return mlx4_cmd(dev, 0, base_qpn, 0, MLX4_CMD_CONF_SPECIAL_QP,
508 MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
511 #define MLX4_QP_TABLE_RSS_ETH_PRIORITY 2
512 #define MLX4_QP_TABLE_RAW_ETH_PRIORITY 1
513 #define MLX4_QP_TABLE_RAW_ETH_SIZE 256
515 static int mlx4_create_zones(struct mlx4_dev *dev,
516 u32 reserved_bottom_general,
517 u32 reserved_top_general,
518 u32 reserved_bottom_rss,
519 u32 start_offset_rss,
520 u32 max_table_offset)
522 struct mlx4_qp_table *qp_table = &mlx4_priv(dev)->qp_table;
523 struct mlx4_bitmap (*bitmap)[MLX4_QP_TABLE_ZONE_NUM] = NULL;
524 int bitmap_initialized = 0;
529 qp_table->zones = mlx4_zone_allocator_create(MLX4_ZONE_ALLOC_FLAGS_NO_OVERLAP);
531 if (NULL == qp_table->zones)
534 bitmap = kmalloc(sizeof(*bitmap), GFP_KERNEL);
536 if (NULL == bitmap) {
541 err = mlx4_bitmap_init(*bitmap + MLX4_QP_TABLE_ZONE_GENERAL, dev->caps.num_qps,
542 (1 << 23) - 1, reserved_bottom_general,
543 reserved_top_general);
548 ++bitmap_initialized;
550 err = mlx4_zone_add_one(qp_table->zones, *bitmap + MLX4_QP_TABLE_ZONE_GENERAL,
551 MLX4_ZONE_FALLBACK_TO_HIGHER_PRIO |
553 0, qp_table->zones_uids + MLX4_QP_TABLE_ZONE_GENERAL);
558 err = mlx4_bitmap_init(*bitmap + MLX4_QP_TABLE_ZONE_RSS,
560 reserved_bottom_rss - 1,
561 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW],
562 reserved_bottom_rss - start_offset_rss);
567 ++bitmap_initialized;
569 err = mlx4_zone_add_one(qp_table->zones, *bitmap + MLX4_QP_TABLE_ZONE_RSS,
570 MLX4_ZONE_ALLOW_ALLOC_FROM_LOWER_PRIO |
571 MLX4_ZONE_ALLOW_ALLOC_FROM_EQ_PRIO |
572 MLX4_ZONE_USE_RR, MLX4_QP_TABLE_RSS_ETH_PRIORITY,
573 0, qp_table->zones_uids + MLX4_QP_TABLE_ZONE_RSS);
578 last_offset = dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW];
579 /* We have a single zone for the A0 steering QPs area of the FW. This area
580 * needs to be split into subareas. One set of subareas is for RSS QPs
581 * (in which qp number bits 6 and/or 7 are set); the other set of subareas
582 * is for RAW_ETH QPs, which require that both bits 6 and 7 are zero.
583 * Currently, the values returned by the FW (A0 steering area starting qp number
584 * and A0 steering area size) are such that there are only two subareas -- one
585 * for RSS and one for RAW_ETH.
587 for (k = MLX4_QP_TABLE_ZONE_RSS + 1; k < sizeof(*bitmap)/sizeof((*bitmap)[0]);
590 u32 offset = start_offset_rss;
594 /* Assuming MLX4_BF_QP_SKIP_MASK is consecutive ones, this calculates
595 * a mask of all LSB bits set until (and not including) the first
596 * set bit of MLX4_BF_QP_SKIP_MASK. For example, if MLX4_BF_QP_SKIP_MASK
597 * is 0xc0, bf_mask will be 0x3f.
599 bf_mask = (MLX4_BF_QP_SKIP_MASK & ~(MLX4_BF_QP_SKIP_MASK - 1)) - 1;
600 requested_size = min((u32)MLX4_QP_TABLE_RAW_ETH_SIZE, bf_mask + 1);
602 if (((last_offset & MLX4_BF_QP_SKIP_MASK) &&
603 ((int)(max_table_offset - last_offset)) >=
604 roundup_pow_of_two(MLX4_BF_QP_SKIP_MASK)) ||
605 (!(last_offset & MLX4_BF_QP_SKIP_MASK) &&
606 !((last_offset + requested_size - 1) &
607 MLX4_BF_QP_SKIP_MASK)))
608 size = requested_size;
610 u32 candidate_offset =
611 (last_offset | MLX4_BF_QP_SKIP_MASK | bf_mask) + 1;
613 if (last_offset & MLX4_BF_QP_SKIP_MASK)
614 last_offset = candidate_offset;
616 /* From this point, the BF bits are 0 */
618 if (last_offset > max_table_offset) {
622 size = min3(max_table_offset - last_offset,
623 bf_mask - (last_offset & bf_mask),
625 if (size < requested_size) {
628 candidate_size = min3(
629 max_table_offset - candidate_offset,
630 bf_mask - (last_offset & bf_mask),
633 /* We will not take this path if last_offset was
634 * already set above to candidate_offset
636 if (candidate_size > size) {
637 last_offset = candidate_offset;
638 size = candidate_size;
645 /* mlx4_bitmap_alloc_range will find a contiguous range of "size"
646 * QPs in which both bits 6 and 7 are zero, because we pass it the
647 * MLX4_BF_SKIP_MASK).
649 offset = mlx4_bitmap_alloc_range(
650 *bitmap + MLX4_QP_TABLE_ZONE_RSS,
652 MLX4_BF_QP_SKIP_MASK);
654 if (offset == (u32)-1) {
659 last_offset = offset + size;
661 err = mlx4_bitmap_init(*bitmap + k, roundup_pow_of_two(size),
662 roundup_pow_of_two(size) - 1, 0,
663 roundup_pow_of_two(size) - size);
665 /* Add an empty bitmap, we'll allocate from different zones (since
666 * at least one is reserved)
668 err = mlx4_bitmap_init(*bitmap + k, 1,
669 MLX4_QP_TABLE_RAW_ETH_SIZE - 1, 0,
671 mlx4_bitmap_alloc_range(*bitmap + k, 1, 1, 0);
677 ++bitmap_initialized;
679 err = mlx4_zone_add_one(qp_table->zones, *bitmap + k,
680 MLX4_ZONE_ALLOW_ALLOC_FROM_LOWER_PRIO |
681 MLX4_ZONE_ALLOW_ALLOC_FROM_EQ_PRIO |
682 MLX4_ZONE_USE_RR, MLX4_QP_TABLE_RAW_ETH_PRIORITY,
683 offset, qp_table->zones_uids + k);
692 qp_table->bitmap_gen = *bitmap;
697 for (k = 0; k < bitmap_initialized; k++)
698 mlx4_bitmap_cleanup(*bitmap + k);
701 mlx4_zone_allocator_destroy(qp_table->zones);
705 static void mlx4_cleanup_qp_zones(struct mlx4_dev *dev)
707 struct mlx4_qp_table *qp_table = &mlx4_priv(dev)->qp_table;
709 if (qp_table->zones) {
713 i < sizeof(qp_table->zones_uids)/sizeof(qp_table->zones_uids[0]);
715 struct mlx4_bitmap *bitmap =
716 mlx4_zone_get_bitmap(qp_table->zones,
717 qp_table->zones_uids[i]);
719 mlx4_zone_remove_one(qp_table->zones, qp_table->zones_uids[i]);
723 mlx4_bitmap_cleanup(bitmap);
725 mlx4_zone_allocator_destroy(qp_table->zones);
726 kfree(qp_table->bitmap_gen);
727 qp_table->bitmap_gen = NULL;
728 qp_table->zones = NULL;
732 int mlx4_init_qp_table(struct mlx4_dev *dev)
734 struct mlx4_qp_table *qp_table = &mlx4_priv(dev)->qp_table;
736 int reserved_from_top = 0;
737 int reserved_from_bot;
739 int fixed_reserved_from_bot_rv = 0;
740 int bottom_reserved_for_rss_bitmap;
741 u32 max_table_offset = dev->caps.dmfs_high_rate_qpn_base +
742 dev->caps.dmfs_high_rate_qpn_range;
744 spin_lock_init(&qp_table->lock);
745 INIT_RADIX_TREE(&dev->qp_table_tree, GFP_ATOMIC);
746 if (mlx4_is_slave(dev))
749 /* We reserve 2 extra QPs per port for the special QPs. The
750 * block of special QPs must be aligned to a multiple of 8, so
753 * We also reserve the MSB of the 24-bit QP number to indicate
754 * that a QP is an XRC QP.
756 for (k = 0; k <= MLX4_QP_REGION_BOTTOM; k++)
757 fixed_reserved_from_bot_rv += dev->caps.reserved_qps_cnt[k];
759 if (fixed_reserved_from_bot_rv < max_table_offset)
760 fixed_reserved_from_bot_rv = max_table_offset;
762 /* We reserve at least 1 extra for bitmaps that we don't have enough space for*/
763 bottom_reserved_for_rss_bitmap =
764 roundup_pow_of_two(fixed_reserved_from_bot_rv + 1);
765 dev->phys_caps.base_sqpn = ALIGN(bottom_reserved_for_rss_bitmap, 8);
768 int sort[MLX4_NUM_QP_REGION];
770 int last_base = dev->caps.num_qps;
772 for (i = 1; i < MLX4_NUM_QP_REGION; ++i)
775 for (i = MLX4_NUM_QP_REGION; i > MLX4_QP_REGION_BOTTOM; --i) {
776 for (j = MLX4_QP_REGION_BOTTOM + 2; j < i; ++j) {
777 if (dev->caps.reserved_qps_cnt[sort[j]] >
778 dev->caps.reserved_qps_cnt[sort[j - 1]])
779 swap(sort[j], sort[j - 1]);
783 for (i = MLX4_QP_REGION_BOTTOM + 1; i < MLX4_NUM_QP_REGION; ++i) {
784 last_base -= dev->caps.reserved_qps_cnt[sort[i]];
785 dev->caps.reserved_qps_base[sort[i]] = last_base;
787 dev->caps.reserved_qps_cnt[sort[i]];
791 /* Reserve 8 real SQPs in both native and SRIOV modes.
792 * In addition, in SRIOV mode, reserve 8 proxy SQPs per function
793 * (for all PFs and VFs), and 8 corresponding tunnel QPs.
794 * Each proxy SQP works opposite its own tunnel QP.
796 * The QPs are arranged as follows:
798 * b. All the proxy SQPs (8 per function)
799 * c. All the tunnel QPs (8 per function)
801 reserved_from_bot = mlx4_num_reserved_sqps(dev);
802 if (reserved_from_bot + reserved_from_top > dev->caps.num_qps) {
803 mlx4_err(dev, "Number of reserved QPs is higher than number of QPs\n");
807 err = mlx4_create_zones(dev, reserved_from_bot, reserved_from_bot,
808 bottom_reserved_for_rss_bitmap,
809 fixed_reserved_from_bot_rv,
815 if (mlx4_is_mfunc(dev)) {
817 dev->phys_caps.base_proxy_sqpn = dev->phys_caps.base_sqpn + 8;
818 dev->phys_caps.base_tunnel_sqpn = dev->phys_caps.base_sqpn + 8 + 8 * MLX4_MFUNC_MAX;
820 /* In mfunc, calculate proxy and tunnel qp offsets for the PF here,
821 * since the PF does not call mlx4_slave_caps */
822 dev->caps.qp0_tunnel = kcalloc(dev->caps.num_ports, sizeof (u32), GFP_KERNEL);
823 dev->caps.qp0_proxy = kcalloc(dev->caps.num_ports, sizeof (u32), GFP_KERNEL);
824 dev->caps.qp1_tunnel = kcalloc(dev->caps.num_ports, sizeof (u32), GFP_KERNEL);
825 dev->caps.qp1_proxy = kcalloc(dev->caps.num_ports, sizeof (u32), GFP_KERNEL);
827 if (!dev->caps.qp0_tunnel || !dev->caps.qp0_proxy ||
828 !dev->caps.qp1_tunnel || !dev->caps.qp1_proxy) {
833 for (k = 0; k < dev->caps.num_ports; k++) {
834 dev->caps.qp0_proxy[k] = dev->phys_caps.base_proxy_sqpn +
835 8 * mlx4_master_func_num(dev) + k;
836 dev->caps.qp0_tunnel[k] = dev->caps.qp0_proxy[k] + 8 * MLX4_MFUNC_MAX;
837 dev->caps.qp1_proxy[k] = dev->phys_caps.base_proxy_sqpn +
838 8 * mlx4_master_func_num(dev) + MLX4_MAX_PORTS + k;
839 dev->caps.qp1_tunnel[k] = dev->caps.qp1_proxy[k] + 8 * MLX4_MFUNC_MAX;
844 err = mlx4_CONF_SPECIAL_QP(dev, dev->phys_caps.base_sqpn);
851 kfree(dev->caps.qp0_tunnel);
852 kfree(dev->caps.qp0_proxy);
853 kfree(dev->caps.qp1_tunnel);
854 kfree(dev->caps.qp1_proxy);
855 dev->caps.qp0_tunnel = dev->caps.qp0_proxy =
856 dev->caps.qp1_tunnel = dev->caps.qp1_proxy = NULL;
857 mlx4_cleanup_qp_zones(dev);
861 void mlx4_cleanup_qp_table(struct mlx4_dev *dev)
863 if (mlx4_is_slave(dev))
866 mlx4_CONF_SPECIAL_QP(dev, 0);
868 mlx4_cleanup_qp_zones(dev);
871 int mlx4_qp_query(struct mlx4_dev *dev, struct mlx4_qp *qp,
872 struct mlx4_qp_context *context)
874 struct mlx4_cmd_mailbox *mailbox;
877 mailbox = mlx4_alloc_cmd_mailbox(dev);
879 return PTR_ERR(mailbox);
881 err = mlx4_cmd_box(dev, 0, mailbox->dma, qp->qpn, 0,
882 MLX4_CMD_QUERY_QP, MLX4_CMD_TIME_CLASS_A,
885 memcpy(context, mailbox->buf + 8, sizeof *context);
887 mlx4_free_cmd_mailbox(dev, mailbox);
890 EXPORT_SYMBOL_GPL(mlx4_qp_query);
892 int mlx4_qp_to_ready(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
893 struct mlx4_qp_context *context,
894 struct mlx4_qp *qp, enum mlx4_qp_state *qp_state)
898 enum mlx4_qp_state states[] = {
905 for (i = 0; i < ARRAY_SIZE(states) - 1; i++) {
906 context->flags &= cpu_to_be32(~(0xf << 28));
907 context->flags |= cpu_to_be32(states[i + 1] << 28);
908 if (states[i + 1] != MLX4_QP_STATE_RTR)
909 context->params2 &= ~MLX4_QP_BIT_FPP;
910 err = mlx4_qp_modify(dev, mtt, states[i], states[i + 1],
913 mlx4_err(dev, "Failed to bring QP to state: %d with error: %d\n",
918 *qp_state = states[i + 1];
923 EXPORT_SYMBOL_GPL(mlx4_qp_to_ready);