2 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/highmem.h>
34 #include <linux/module.h>
35 #include <linux/errno.h>
36 #include <linux/pci.h>
37 #include <linux/dma-mapping.h>
38 #include <linux/slab.h>
39 #include <linux/delay.h>
40 #include <linux/random.h>
41 #include <linux/io-mapping.h>
42 #include <linux/mlx5/driver.h>
43 #include <linux/debugfs.h>
45 #include "mlx5_core.h"
59 LONG_LIST_SIZE = (2ULL * 1024 * 1024 * 1024 / PAGE_SIZE) * 8 + 16 +
60 MLX5_CMD_DATA_BLOCK_SIZE,
61 MED_LIST_SIZE = 16 + MLX5_CMD_DATA_BLOCK_SIZE,
65 MLX5_CMD_DELIVERY_STAT_OK = 0x0,
66 MLX5_CMD_DELIVERY_STAT_SIGNAT_ERR = 0x1,
67 MLX5_CMD_DELIVERY_STAT_TOK_ERR = 0x2,
68 MLX5_CMD_DELIVERY_STAT_BAD_BLK_NUM_ERR = 0x3,
69 MLX5_CMD_DELIVERY_STAT_OUT_PTR_ALIGN_ERR = 0x4,
70 MLX5_CMD_DELIVERY_STAT_IN_PTR_ALIGN_ERR = 0x5,
71 MLX5_CMD_DELIVERY_STAT_FW_ERR = 0x6,
72 MLX5_CMD_DELIVERY_STAT_IN_LENGTH_ERR = 0x7,
73 MLX5_CMD_DELIVERY_STAT_OUT_LENGTH_ERR = 0x8,
74 MLX5_CMD_DELIVERY_STAT_RES_FLD_NOT_CLR_ERR = 0x9,
75 MLX5_CMD_DELIVERY_STAT_CMD_DESCR_ERR = 0x10,
78 static struct mlx5_cmd_work_ent *alloc_cmd(struct mlx5_cmd *cmd,
79 struct mlx5_cmd_msg *in,
80 struct mlx5_cmd_msg *out,
81 void *uout, int uout_size,
83 void *context, int page_queue)
85 gfp_t alloc_flags = cbk ? GFP_ATOMIC : GFP_KERNEL;
86 struct mlx5_cmd_work_ent *ent;
88 ent = kzalloc(sizeof(*ent), alloc_flags);
90 return ERR_PTR(-ENOMEM);
95 ent->uout_size = uout_size;
97 ent->context = context;
99 ent->page_queue = page_queue;
104 static u8 alloc_token(struct mlx5_cmd *cmd)
108 spin_lock(&cmd->token_lock);
113 spin_unlock(&cmd->token_lock);
118 static int alloc_ent(struct mlx5_cmd *cmd)
123 spin_lock_irqsave(&cmd->alloc_lock, flags);
124 ret = find_first_bit(&cmd->bitmask, cmd->max_reg_cmds);
125 if (ret < cmd->max_reg_cmds)
126 clear_bit(ret, &cmd->bitmask);
127 spin_unlock_irqrestore(&cmd->alloc_lock, flags);
129 return ret < cmd->max_reg_cmds ? ret : -ENOMEM;
132 static void free_ent(struct mlx5_cmd *cmd, int idx)
136 spin_lock_irqsave(&cmd->alloc_lock, flags);
137 set_bit(idx, &cmd->bitmask);
138 spin_unlock_irqrestore(&cmd->alloc_lock, flags);
141 static struct mlx5_cmd_layout *get_inst(struct mlx5_cmd *cmd, int idx)
143 return cmd->cmd_buf + (idx << cmd->log_stride);
146 static u8 xor8_buf(void *buf, size_t offset, int len)
151 int end = len + offset;
153 for (i = offset; i < end; i++)
159 static int verify_block_sig(struct mlx5_cmd_prot_block *block)
161 size_t rsvd0_off = offsetof(struct mlx5_cmd_prot_block, rsvd0);
162 int xor_len = sizeof(*block) - sizeof(block->data) - 1;
164 if (xor8_buf(block, rsvd0_off, xor_len) != 0xff)
167 if (xor8_buf(block, 0, sizeof(*block)) != 0xff)
173 static void calc_block_sig(struct mlx5_cmd_prot_block *block)
175 int ctrl_xor_len = sizeof(*block) - sizeof(block->data) - 2;
176 size_t rsvd0_off = offsetof(struct mlx5_cmd_prot_block, rsvd0);
178 block->ctrl_sig = ~xor8_buf(block, rsvd0_off, ctrl_xor_len);
179 block->sig = ~xor8_buf(block, 0, sizeof(*block) - 1);
182 static void calc_chain_sig(struct mlx5_cmd_msg *msg)
184 struct mlx5_cmd_mailbox *next = msg->next;
186 int blen = size - min_t(int, sizeof(msg->first.data), size);
187 int n = (blen + MLX5_CMD_DATA_BLOCK_SIZE - 1)
188 / MLX5_CMD_DATA_BLOCK_SIZE;
191 for (i = 0; i < n && next; i++) {
192 calc_block_sig(next->buf);
197 static void set_signature(struct mlx5_cmd_work_ent *ent, int csum)
199 ent->lay->sig = ~xor8_buf(ent->lay, 0, sizeof(*ent->lay));
201 calc_chain_sig(ent->in);
202 calc_chain_sig(ent->out);
206 static void poll_timeout(struct mlx5_cmd_work_ent *ent)
208 unsigned long poll_end = jiffies + msecs_to_jiffies(MLX5_CMD_TIMEOUT_MSEC + 1000);
212 own = ent->lay->status_own;
213 if (!(own & CMD_OWNER_HW)) {
217 usleep_range(5000, 10000);
218 } while (time_before(jiffies, poll_end));
220 ent->ret = -ETIMEDOUT;
223 static void free_cmd(struct mlx5_cmd_work_ent *ent)
229 static int verify_signature(struct mlx5_cmd_work_ent *ent)
231 struct mlx5_cmd_mailbox *next = ent->out->next;
234 int size = ent->out->len;
235 int blen = size - min_t(int, sizeof(ent->out->first.data), size);
236 int n = (blen + MLX5_CMD_DATA_BLOCK_SIZE - 1)
237 / MLX5_CMD_DATA_BLOCK_SIZE;
240 sig = xor8_buf(ent->lay, 0, sizeof(*ent->lay));
244 for (i = 0; i < n && next; i++) {
245 err = verify_block_sig(next->buf);
255 static void dump_buf(void *buf, int size, int data_only, int offset)
260 for (i = 0; i < size; i += 16) {
261 pr_debug("%03x: %08x %08x %08x %08x\n", offset, be32_to_cpu(p[0]),
262 be32_to_cpu(p[1]), be32_to_cpu(p[2]),
272 MLX5_DRIVER_STATUS_ABORTED = 0xfe,
273 MLX5_DRIVER_SYND = 0xbadd00de,
276 static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op,
277 u32 *synd, u8 *status)
283 case MLX5_CMD_OP_TEARDOWN_HCA:
284 case MLX5_CMD_OP_DISABLE_HCA:
285 case MLX5_CMD_OP_MANAGE_PAGES:
286 case MLX5_CMD_OP_DESTROY_MKEY:
287 case MLX5_CMD_OP_DESTROY_EQ:
288 case MLX5_CMD_OP_DESTROY_CQ:
289 case MLX5_CMD_OP_DESTROY_QP:
290 case MLX5_CMD_OP_DESTROY_PSV:
291 case MLX5_CMD_OP_DESTROY_SRQ:
292 case MLX5_CMD_OP_DESTROY_XRC_SRQ:
293 case MLX5_CMD_OP_DESTROY_DCT:
294 case MLX5_CMD_OP_DEALLOC_Q_COUNTER:
295 case MLX5_CMD_OP_DEALLOC_PD:
296 case MLX5_CMD_OP_DEALLOC_UAR:
297 case MLX5_CMD_OP_DETTACH_FROM_MCG:
298 case MLX5_CMD_OP_DEALLOC_XRCD:
299 case MLX5_CMD_OP_DEALLOC_TRANSPORT_DOMAIN:
300 case MLX5_CMD_OP_DELETE_VXLAN_UDP_DPORT:
301 case MLX5_CMD_OP_DELETE_L2_TABLE_ENTRY:
302 case MLX5_CMD_OP_DESTROY_TIR:
303 case MLX5_CMD_OP_DESTROY_SQ:
304 case MLX5_CMD_OP_DESTROY_RQ:
305 case MLX5_CMD_OP_DESTROY_RMP:
306 case MLX5_CMD_OP_DESTROY_TIS:
307 case MLX5_CMD_OP_DESTROY_RQT:
308 case MLX5_CMD_OP_DESTROY_FLOW_TABLE:
309 case MLX5_CMD_OP_DESTROY_FLOW_GROUP:
310 case MLX5_CMD_OP_DELETE_FLOW_TABLE_ENTRY:
311 return MLX5_CMD_STAT_OK;
313 case MLX5_CMD_OP_QUERY_HCA_CAP:
314 case MLX5_CMD_OP_QUERY_ADAPTER:
315 case MLX5_CMD_OP_INIT_HCA:
316 case MLX5_CMD_OP_ENABLE_HCA:
317 case MLX5_CMD_OP_QUERY_PAGES:
318 case MLX5_CMD_OP_SET_HCA_CAP:
319 case MLX5_CMD_OP_QUERY_ISSI:
320 case MLX5_CMD_OP_SET_ISSI:
321 case MLX5_CMD_OP_CREATE_MKEY:
322 case MLX5_CMD_OP_QUERY_MKEY:
323 case MLX5_CMD_OP_QUERY_SPECIAL_CONTEXTS:
324 case MLX5_CMD_OP_PAGE_FAULT_RESUME:
325 case MLX5_CMD_OP_CREATE_EQ:
326 case MLX5_CMD_OP_QUERY_EQ:
327 case MLX5_CMD_OP_GEN_EQE:
328 case MLX5_CMD_OP_CREATE_CQ:
329 case MLX5_CMD_OP_QUERY_CQ:
330 case MLX5_CMD_OP_MODIFY_CQ:
331 case MLX5_CMD_OP_CREATE_QP:
332 case MLX5_CMD_OP_RST2INIT_QP:
333 case MLX5_CMD_OP_INIT2RTR_QP:
334 case MLX5_CMD_OP_RTR2RTS_QP:
335 case MLX5_CMD_OP_RTS2RTS_QP:
336 case MLX5_CMD_OP_SQERR2RTS_QP:
337 case MLX5_CMD_OP_2ERR_QP:
338 case MLX5_CMD_OP_2RST_QP:
339 case MLX5_CMD_OP_QUERY_QP:
340 case MLX5_CMD_OP_SQD_RTS_QP:
341 case MLX5_CMD_OP_INIT2INIT_QP:
342 case MLX5_CMD_OP_CREATE_PSV:
343 case MLX5_CMD_OP_CREATE_SRQ:
344 case MLX5_CMD_OP_QUERY_SRQ:
345 case MLX5_CMD_OP_ARM_RQ:
346 case MLX5_CMD_OP_CREATE_XRC_SRQ:
347 case MLX5_CMD_OP_QUERY_XRC_SRQ:
348 case MLX5_CMD_OP_ARM_XRC_SRQ:
349 case MLX5_CMD_OP_CREATE_DCT:
350 case MLX5_CMD_OP_DRAIN_DCT:
351 case MLX5_CMD_OP_QUERY_DCT:
352 case MLX5_CMD_OP_ARM_DCT_FOR_KEY_VIOLATION:
353 case MLX5_CMD_OP_QUERY_VPORT_STATE:
354 case MLX5_CMD_OP_MODIFY_VPORT_STATE:
355 case MLX5_CMD_OP_QUERY_ESW_VPORT_CONTEXT:
356 case MLX5_CMD_OP_MODIFY_ESW_VPORT_CONTEXT:
357 case MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT:
358 case MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT:
359 case MLX5_CMD_OP_QUERY_ROCE_ADDRESS:
360 case MLX5_CMD_OP_SET_ROCE_ADDRESS:
361 case MLX5_CMD_OP_QUERY_HCA_VPORT_CONTEXT:
362 case MLX5_CMD_OP_MODIFY_HCA_VPORT_CONTEXT:
363 case MLX5_CMD_OP_QUERY_HCA_VPORT_GID:
364 case MLX5_CMD_OP_QUERY_HCA_VPORT_PKEY:
365 case MLX5_CMD_OP_QUERY_VPORT_COUNTER:
366 case MLX5_CMD_OP_ALLOC_Q_COUNTER:
367 case MLX5_CMD_OP_QUERY_Q_COUNTER:
368 case MLX5_CMD_OP_ALLOC_PD:
369 case MLX5_CMD_OP_ALLOC_UAR:
370 case MLX5_CMD_OP_CONFIG_INT_MODERATION:
371 case MLX5_CMD_OP_ACCESS_REG:
372 case MLX5_CMD_OP_ATTACH_TO_MCG:
373 case MLX5_CMD_OP_GET_DROPPED_PACKET_LOG:
374 case MLX5_CMD_OP_MAD_IFC:
375 case MLX5_CMD_OP_QUERY_MAD_DEMUX:
376 case MLX5_CMD_OP_SET_MAD_DEMUX:
377 case MLX5_CMD_OP_NOP:
378 case MLX5_CMD_OP_ALLOC_XRCD:
379 case MLX5_CMD_OP_ALLOC_TRANSPORT_DOMAIN:
380 case MLX5_CMD_OP_QUERY_CONG_STATUS:
381 case MLX5_CMD_OP_MODIFY_CONG_STATUS:
382 case MLX5_CMD_OP_QUERY_CONG_PARAMS:
383 case MLX5_CMD_OP_MODIFY_CONG_PARAMS:
384 case MLX5_CMD_OP_QUERY_CONG_STATISTICS:
385 case MLX5_CMD_OP_ADD_VXLAN_UDP_DPORT:
386 case MLX5_CMD_OP_SET_L2_TABLE_ENTRY:
387 case MLX5_CMD_OP_QUERY_L2_TABLE_ENTRY:
388 case MLX5_CMD_OP_CREATE_TIR:
389 case MLX5_CMD_OP_MODIFY_TIR:
390 case MLX5_CMD_OP_QUERY_TIR:
391 case MLX5_CMD_OP_CREATE_SQ:
392 case MLX5_CMD_OP_MODIFY_SQ:
393 case MLX5_CMD_OP_QUERY_SQ:
394 case MLX5_CMD_OP_CREATE_RQ:
395 case MLX5_CMD_OP_MODIFY_RQ:
396 case MLX5_CMD_OP_QUERY_RQ:
397 case MLX5_CMD_OP_CREATE_RMP:
398 case MLX5_CMD_OP_MODIFY_RMP:
399 case MLX5_CMD_OP_QUERY_RMP:
400 case MLX5_CMD_OP_CREATE_TIS:
401 case MLX5_CMD_OP_MODIFY_TIS:
402 case MLX5_CMD_OP_QUERY_TIS:
403 case MLX5_CMD_OP_CREATE_RQT:
404 case MLX5_CMD_OP_MODIFY_RQT:
405 case MLX5_CMD_OP_QUERY_RQT:
406 case MLX5_CMD_OP_CREATE_FLOW_TABLE:
407 case MLX5_CMD_OP_QUERY_FLOW_TABLE:
408 case MLX5_CMD_OP_CREATE_FLOW_GROUP:
409 case MLX5_CMD_OP_QUERY_FLOW_GROUP:
410 case MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY:
411 case MLX5_CMD_OP_QUERY_FLOW_TABLE_ENTRY:
412 *status = MLX5_DRIVER_STATUS_ABORTED;
413 *synd = MLX5_DRIVER_SYND;
416 mlx5_core_err(dev, "Unknown FW command (%d)\n", op);
421 const char *mlx5_command_str(int command)
424 case MLX5_CMD_OP_QUERY_HCA_CAP:
425 return "QUERY_HCA_CAP";
427 case MLX5_CMD_OP_SET_HCA_CAP:
428 return "SET_HCA_CAP";
430 case MLX5_CMD_OP_QUERY_ADAPTER:
431 return "QUERY_ADAPTER";
433 case MLX5_CMD_OP_INIT_HCA:
436 case MLX5_CMD_OP_TEARDOWN_HCA:
437 return "TEARDOWN_HCA";
439 case MLX5_CMD_OP_ENABLE_HCA:
440 return "MLX5_CMD_OP_ENABLE_HCA";
442 case MLX5_CMD_OP_DISABLE_HCA:
443 return "MLX5_CMD_OP_DISABLE_HCA";
445 case MLX5_CMD_OP_QUERY_PAGES:
446 return "QUERY_PAGES";
448 case MLX5_CMD_OP_MANAGE_PAGES:
449 return "MANAGE_PAGES";
451 case MLX5_CMD_OP_CREATE_MKEY:
452 return "CREATE_MKEY";
454 case MLX5_CMD_OP_QUERY_MKEY:
457 case MLX5_CMD_OP_DESTROY_MKEY:
458 return "DESTROY_MKEY";
460 case MLX5_CMD_OP_QUERY_SPECIAL_CONTEXTS:
461 return "QUERY_SPECIAL_CONTEXTS";
463 case MLX5_CMD_OP_CREATE_EQ:
466 case MLX5_CMD_OP_DESTROY_EQ:
469 case MLX5_CMD_OP_QUERY_EQ:
472 case MLX5_CMD_OP_CREATE_CQ:
475 case MLX5_CMD_OP_DESTROY_CQ:
478 case MLX5_CMD_OP_QUERY_CQ:
481 case MLX5_CMD_OP_MODIFY_CQ:
484 case MLX5_CMD_OP_CREATE_QP:
487 case MLX5_CMD_OP_DESTROY_QP:
490 case MLX5_CMD_OP_RST2INIT_QP:
491 return "RST2INIT_QP";
493 case MLX5_CMD_OP_INIT2RTR_QP:
494 return "INIT2RTR_QP";
496 case MLX5_CMD_OP_RTR2RTS_QP:
499 case MLX5_CMD_OP_RTS2RTS_QP:
502 case MLX5_CMD_OP_SQERR2RTS_QP:
503 return "SQERR2RTS_QP";
505 case MLX5_CMD_OP_2ERR_QP:
508 case MLX5_CMD_OP_2RST_QP:
511 case MLX5_CMD_OP_QUERY_QP:
514 case MLX5_CMD_OP_MAD_IFC:
517 case MLX5_CMD_OP_INIT2INIT_QP:
518 return "INIT2INIT_QP";
520 case MLX5_CMD_OP_CREATE_PSV:
523 case MLX5_CMD_OP_DESTROY_PSV:
524 return "DESTROY_PSV";
526 case MLX5_CMD_OP_CREATE_SRQ:
529 case MLX5_CMD_OP_DESTROY_SRQ:
530 return "DESTROY_SRQ";
532 case MLX5_CMD_OP_QUERY_SRQ:
535 case MLX5_CMD_OP_ARM_RQ:
538 case MLX5_CMD_OP_CREATE_XRC_SRQ:
539 return "CREATE_XRC_SRQ";
541 case MLX5_CMD_OP_DESTROY_XRC_SRQ:
542 return "DESTROY_XRC_SRQ";
544 case MLX5_CMD_OP_QUERY_XRC_SRQ:
545 return "QUERY_XRC_SRQ";
547 case MLX5_CMD_OP_ARM_XRC_SRQ:
548 return "ARM_XRC_SRQ";
550 case MLX5_CMD_OP_ALLOC_PD:
553 case MLX5_CMD_OP_DEALLOC_PD:
556 case MLX5_CMD_OP_ALLOC_UAR:
559 case MLX5_CMD_OP_DEALLOC_UAR:
560 return "DEALLOC_UAR";
562 case MLX5_CMD_OP_ATTACH_TO_MCG:
563 return "ATTACH_TO_MCG";
565 case MLX5_CMD_OP_DETTACH_FROM_MCG:
566 return "DETTACH_FROM_MCG";
568 case MLX5_CMD_OP_ALLOC_XRCD:
571 case MLX5_CMD_OP_DEALLOC_XRCD:
572 return "DEALLOC_XRCD";
574 case MLX5_CMD_OP_ACCESS_REG:
575 return "MLX5_CMD_OP_ACCESS_REG";
577 default: return "unknown command opcode";
581 static void dump_command(struct mlx5_core_dev *dev,
582 struct mlx5_cmd_work_ent *ent, int input)
584 u16 op = be16_to_cpu(((struct mlx5_inbox_hdr *)(ent->lay->in))->opcode);
585 struct mlx5_cmd_msg *msg = input ? ent->in : ent->out;
586 struct mlx5_cmd_mailbox *next = msg->next;
591 data_only = !!(mlx5_core_debug_mask & (1 << MLX5_CMD_DATA));
594 mlx5_core_dbg_mask(dev, 1 << MLX5_CMD_DATA,
595 "dump command data %s(0x%x) %s\n",
596 mlx5_command_str(op), op,
597 input ? "INPUT" : "OUTPUT");
599 mlx5_core_dbg(dev, "dump command %s(0x%x) %s\n",
600 mlx5_command_str(op), op,
601 input ? "INPUT" : "OUTPUT");
605 dump_buf(ent->lay->in, sizeof(ent->lay->in), 1, offset);
606 offset += sizeof(ent->lay->in);
608 dump_buf(ent->lay->out, sizeof(ent->lay->out), 1, offset);
609 offset += sizeof(ent->lay->out);
612 dump_buf(ent->lay, sizeof(*ent->lay), 0, offset);
613 offset += sizeof(*ent->lay);
616 while (next && offset < msg->len) {
618 dump_len = min_t(int, MLX5_CMD_DATA_BLOCK_SIZE, msg->len - offset);
619 dump_buf(next->buf, dump_len, 1, offset);
620 offset += MLX5_CMD_DATA_BLOCK_SIZE;
622 mlx5_core_dbg(dev, "command block:\n");
623 dump_buf(next->buf, sizeof(struct mlx5_cmd_prot_block), 0, offset);
624 offset += sizeof(struct mlx5_cmd_prot_block);
633 static void cmd_work_handler(struct work_struct *work)
635 struct mlx5_cmd_work_ent *ent = container_of(work, struct mlx5_cmd_work_ent, work);
636 struct mlx5_cmd *cmd = ent->cmd;
637 struct mlx5_core_dev *dev = container_of(cmd, struct mlx5_core_dev, cmd);
638 struct mlx5_cmd_layout *lay;
639 struct semaphore *sem;
642 sem = ent->page_queue ? &cmd->pages_sem : &cmd->sem;
644 if (!ent->page_queue) {
645 ent->idx = alloc_ent(cmd);
647 mlx5_core_err(dev, "failed to allocate command entry\n");
652 ent->idx = cmd->max_reg_cmds;
653 spin_lock_irqsave(&cmd->alloc_lock, flags);
654 clear_bit(ent->idx, &cmd->bitmask);
655 spin_unlock_irqrestore(&cmd->alloc_lock, flags);
658 cmd->ent_arr[ent->idx] = ent;
659 lay = get_inst(cmd, ent->idx);
661 memset(lay, 0, sizeof(*lay));
662 memcpy(lay->in, ent->in->first.data, sizeof(lay->in));
663 ent->op = be32_to_cpu(lay->in[0]) >> 16;
665 lay->in_ptr = cpu_to_be64(ent->in->next->dma);
666 lay->inlen = cpu_to_be32(ent->in->len);
668 lay->out_ptr = cpu_to_be64(ent->out->next->dma);
669 lay->outlen = cpu_to_be32(ent->out->len);
670 lay->type = MLX5_PCI_CMD_XPORT;
671 lay->token = ent->token;
672 lay->status_own = CMD_OWNER_HW;
673 set_signature(ent, !cmd->checksum_disabled);
674 dump_command(dev, ent, 1);
675 ent->ts1 = ktime_get_ns();
677 /* ring doorbell after the descriptor is valid */
678 mlx5_core_dbg(dev, "writing 0x%x to command doorbell\n", 1 << ent->idx);
680 iowrite32be(1 << ent->idx, &dev->iseg->cmd_dbell);
682 /* if not in polling don't use ent after this point */
683 if (cmd->mode == CMD_MODE_POLLING) {
685 /* make sure we read the descriptor after ownership is SW */
687 mlx5_cmd_comp_handler(dev, 1UL << ent->idx);
691 static const char *deliv_status_to_str(u8 status)
694 case MLX5_CMD_DELIVERY_STAT_OK:
696 case MLX5_CMD_DELIVERY_STAT_SIGNAT_ERR:
697 return "signature error";
698 case MLX5_CMD_DELIVERY_STAT_TOK_ERR:
699 return "token error";
700 case MLX5_CMD_DELIVERY_STAT_BAD_BLK_NUM_ERR:
701 return "bad block number";
702 case MLX5_CMD_DELIVERY_STAT_OUT_PTR_ALIGN_ERR:
703 return "output pointer not aligned to block size";
704 case MLX5_CMD_DELIVERY_STAT_IN_PTR_ALIGN_ERR:
705 return "input pointer not aligned to block size";
706 case MLX5_CMD_DELIVERY_STAT_FW_ERR:
707 return "firmware internal error";
708 case MLX5_CMD_DELIVERY_STAT_IN_LENGTH_ERR:
709 return "command input length error";
710 case MLX5_CMD_DELIVERY_STAT_OUT_LENGTH_ERR:
711 return "command ouput length error";
712 case MLX5_CMD_DELIVERY_STAT_RES_FLD_NOT_CLR_ERR:
713 return "reserved fields not cleared";
714 case MLX5_CMD_DELIVERY_STAT_CMD_DESCR_ERR:
715 return "bad command descriptor type";
717 return "unknown status code";
721 static u16 msg_to_opcode(struct mlx5_cmd_msg *in)
723 struct mlx5_inbox_hdr *hdr = (struct mlx5_inbox_hdr *)(in->first.data);
725 return be16_to_cpu(hdr->opcode);
728 static int wait_func(struct mlx5_core_dev *dev, struct mlx5_cmd_work_ent *ent)
730 unsigned long timeout = msecs_to_jiffies(MLX5_CMD_TIMEOUT_MSEC);
731 struct mlx5_cmd *cmd = &dev->cmd;
734 if (cmd->mode == CMD_MODE_POLLING) {
735 wait_for_completion(&ent->done);
738 if (!wait_for_completion_timeout(&ent->done, timeout))
743 if (err == -ETIMEDOUT) {
744 mlx5_core_warn(dev, "%s(0x%x) timeout. Will cause a leak of a command resource\n",
745 mlx5_command_str(msg_to_opcode(ent->in)),
746 msg_to_opcode(ent->in));
748 mlx5_core_dbg(dev, "err %d, delivery status %s(%d)\n",
749 err, deliv_status_to_str(ent->status), ent->status);
754 static __be32 *get_synd_ptr(struct mlx5_outbox_hdr *out)
756 return &out->syndrome;
759 static u8 *get_status_ptr(struct mlx5_outbox_hdr *out)
765 * 1. Callback functions may not sleep
766 * 2. page queue commands do not support asynchrous completion
768 static int mlx5_cmd_invoke(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *in,
769 struct mlx5_cmd_msg *out, void *uout, int uout_size,
770 mlx5_cmd_cbk_t callback,
771 void *context, int page_queue, u8 *status,
774 struct mlx5_cmd *cmd = &dev->cmd;
775 struct mlx5_cmd_work_ent *ent;
776 struct mlx5_cmd_stats *stats;
781 if (callback && page_queue)
784 ent = alloc_cmd(cmd, in, out, uout, uout_size, callback, context,
792 init_completion(&ent->done);
794 INIT_WORK(&ent->work, cmd_work_handler);
796 cmd_work_handler(&ent->work);
797 } else if (!queue_work(cmd->wq, &ent->work)) {
798 mlx5_core_warn(dev, "failed to queue work\n");
804 err = wait_func(dev, ent);
805 if (err == -ETIMEDOUT)
808 ds = ent->ts2 - ent->ts1;
809 op = be16_to_cpu(((struct mlx5_inbox_hdr *)in->first.data)->opcode);
810 if (op < ARRAY_SIZE(cmd->stats)) {
811 stats = &cmd->stats[op];
812 spin_lock_irq(&stats->lock);
815 spin_unlock_irq(&stats->lock);
817 mlx5_core_dbg_mask(dev, 1 << MLX5_CMD_TIME,
818 "fw exec time for %s is %lld nsec\n",
819 mlx5_command_str(op), ds);
820 *status = ent->status;
832 static ssize_t dbg_write(struct file *filp, const char __user *buf,
833 size_t count, loff_t *pos)
835 struct mlx5_core_dev *dev = filp->private_data;
836 struct mlx5_cmd_debug *dbg = &dev->cmd.dbg;
840 if (!dbg->in_msg || !dbg->out_msg)
843 if (copy_from_user(lbuf, buf, sizeof(lbuf)))
846 lbuf[sizeof(lbuf) - 1] = 0;
848 if (strcmp(lbuf, "go"))
851 err = mlx5_cmd_exec(dev, dbg->in_msg, dbg->inlen, dbg->out_msg, dbg->outlen);
853 return err ? err : count;
857 static const struct file_operations fops = {
858 .owner = THIS_MODULE,
863 static int mlx5_copy_to_msg(struct mlx5_cmd_msg *to, void *from, int size,
866 struct mlx5_cmd_prot_block *block;
867 struct mlx5_cmd_mailbox *next;
873 copy = min_t(int, size, sizeof(to->first.data));
874 memcpy(to->first.data, from, copy);
885 copy = min_t(int, size, MLX5_CMD_DATA_BLOCK_SIZE);
887 memcpy(block->data, from, copy);
890 block->token = token;
897 static int mlx5_copy_from_msg(void *to, struct mlx5_cmd_msg *from, int size)
899 struct mlx5_cmd_prot_block *block;
900 struct mlx5_cmd_mailbox *next;
906 copy = min_t(int, size, sizeof(from->first.data));
907 memcpy(to, from->first.data, copy);
918 copy = min_t(int, size, MLX5_CMD_DATA_BLOCK_SIZE);
921 memcpy(to, block->data, copy);
930 static struct mlx5_cmd_mailbox *alloc_cmd_box(struct mlx5_core_dev *dev,
933 struct mlx5_cmd_mailbox *mailbox;
935 mailbox = kmalloc(sizeof(*mailbox), flags);
937 return ERR_PTR(-ENOMEM);
939 mailbox->buf = pci_pool_alloc(dev->cmd.pool, flags,
942 mlx5_core_dbg(dev, "failed allocation\n");
944 return ERR_PTR(-ENOMEM);
946 memset(mailbox->buf, 0, sizeof(struct mlx5_cmd_prot_block));
947 mailbox->next = NULL;
952 static void free_cmd_box(struct mlx5_core_dev *dev,
953 struct mlx5_cmd_mailbox *mailbox)
955 pci_pool_free(dev->cmd.pool, mailbox->buf, mailbox->dma);
959 static struct mlx5_cmd_msg *mlx5_alloc_cmd_msg(struct mlx5_core_dev *dev,
960 gfp_t flags, int size,
963 struct mlx5_cmd_mailbox *tmp, *head = NULL;
964 struct mlx5_cmd_prot_block *block;
965 struct mlx5_cmd_msg *msg;
971 msg = kzalloc(sizeof(*msg), flags);
973 return ERR_PTR(-ENOMEM);
975 blen = size - min_t(int, sizeof(msg->first.data), size);
976 n = (blen + MLX5_CMD_DATA_BLOCK_SIZE - 1) / MLX5_CMD_DATA_BLOCK_SIZE;
978 for (i = 0; i < n; i++) {
979 tmp = alloc_cmd_box(dev, flags);
981 mlx5_core_warn(dev, "failed allocating block\n");
988 block->next = cpu_to_be64(tmp->next ? tmp->next->dma : 0);
989 block->block_num = cpu_to_be32(n - i - 1);
990 block->token = token;
1000 free_cmd_box(dev, head);
1005 return ERR_PTR(err);
1008 static void mlx5_free_cmd_msg(struct mlx5_core_dev *dev,
1009 struct mlx5_cmd_msg *msg)
1011 struct mlx5_cmd_mailbox *head = msg->next;
1012 struct mlx5_cmd_mailbox *next;
1016 free_cmd_box(dev, head);
1022 static ssize_t data_write(struct file *filp, const char __user *buf,
1023 size_t count, loff_t *pos)
1025 struct mlx5_core_dev *dev = filp->private_data;
1026 struct mlx5_cmd_debug *dbg = &dev->cmd.dbg;
1037 ptr = kzalloc(count, GFP_KERNEL);
1041 if (copy_from_user(ptr, buf, count)) {
1057 static ssize_t data_read(struct file *filp, char __user *buf, size_t count,
1060 struct mlx5_core_dev *dev = filp->private_data;
1061 struct mlx5_cmd_debug *dbg = &dev->cmd.dbg;
1070 copy = min_t(int, count, dbg->outlen);
1071 if (copy_to_user(buf, dbg->out_msg, copy))
1079 static const struct file_operations dfops = {
1080 .owner = THIS_MODULE,
1081 .open = simple_open,
1082 .write = data_write,
1086 static ssize_t outlen_read(struct file *filp, char __user *buf, size_t count,
1089 struct mlx5_core_dev *dev = filp->private_data;
1090 struct mlx5_cmd_debug *dbg = &dev->cmd.dbg;
1097 err = snprintf(outlen, sizeof(outlen), "%d", dbg->outlen);
1101 if (copy_to_user(buf, &outlen, err))
1109 static ssize_t outlen_write(struct file *filp, const char __user *buf,
1110 size_t count, loff_t *pos)
1112 struct mlx5_core_dev *dev = filp->private_data;
1113 struct mlx5_cmd_debug *dbg = &dev->cmd.dbg;
1119 if (*pos != 0 || count > 6)
1122 kfree(dbg->out_msg);
1123 dbg->out_msg = NULL;
1126 if (copy_from_user(outlen_str, buf, count))
1131 err = sscanf(outlen_str, "%d", &outlen);
1135 ptr = kzalloc(outlen, GFP_KERNEL);
1140 dbg->outlen = outlen;
1147 static const struct file_operations olfops = {
1148 .owner = THIS_MODULE,
1149 .open = simple_open,
1150 .write = outlen_write,
1151 .read = outlen_read,
1154 static void set_wqname(struct mlx5_core_dev *dev)
1156 struct mlx5_cmd *cmd = &dev->cmd;
1158 snprintf(cmd->wq_name, sizeof(cmd->wq_name), "mlx5_cmd_%s",
1159 dev_name(&dev->pdev->dev));
1162 static void clean_debug_files(struct mlx5_core_dev *dev)
1164 struct mlx5_cmd_debug *dbg = &dev->cmd.dbg;
1166 if (!mlx5_debugfs_root)
1169 mlx5_cmdif_debugfs_cleanup(dev);
1170 debugfs_remove_recursive(dbg->dbg_root);
1173 static int create_debugfs_files(struct mlx5_core_dev *dev)
1175 struct mlx5_cmd_debug *dbg = &dev->cmd.dbg;
1178 if (!mlx5_debugfs_root)
1181 dbg->dbg_root = debugfs_create_dir("cmd", dev->priv.dbg_root);
1185 dbg->dbg_in = debugfs_create_file("in", 0400, dbg->dbg_root,
1190 dbg->dbg_out = debugfs_create_file("out", 0200, dbg->dbg_root,
1195 dbg->dbg_outlen = debugfs_create_file("out_len", 0600, dbg->dbg_root,
1197 if (!dbg->dbg_outlen)
1200 dbg->dbg_status = debugfs_create_u8("status", 0600, dbg->dbg_root,
1202 if (!dbg->dbg_status)
1205 dbg->dbg_run = debugfs_create_file("run", 0200, dbg->dbg_root, dev, &fops);
1209 mlx5_cmdif_debugfs_init(dev);
1214 clean_debug_files(dev);
1218 void mlx5_cmd_use_events(struct mlx5_core_dev *dev)
1220 struct mlx5_cmd *cmd = &dev->cmd;
1223 for (i = 0; i < cmd->max_reg_cmds; i++)
1226 down(&cmd->pages_sem);
1228 flush_workqueue(cmd->wq);
1230 cmd->mode = CMD_MODE_EVENTS;
1232 up(&cmd->pages_sem);
1233 for (i = 0; i < cmd->max_reg_cmds; i++)
1237 void mlx5_cmd_use_polling(struct mlx5_core_dev *dev)
1239 struct mlx5_cmd *cmd = &dev->cmd;
1242 for (i = 0; i < cmd->max_reg_cmds; i++)
1245 down(&cmd->pages_sem);
1247 flush_workqueue(cmd->wq);
1248 cmd->mode = CMD_MODE_POLLING;
1250 up(&cmd->pages_sem);
1251 for (i = 0; i < cmd->max_reg_cmds; i++)
1255 static void free_msg(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *msg)
1257 unsigned long flags;
1260 spin_lock_irqsave(&msg->cache->lock, flags);
1261 list_add_tail(&msg->list, &msg->cache->head);
1262 spin_unlock_irqrestore(&msg->cache->lock, flags);
1264 mlx5_free_cmd_msg(dev, msg);
1268 void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec)
1270 struct mlx5_cmd *cmd = &dev->cmd;
1271 struct mlx5_cmd_work_ent *ent;
1272 mlx5_cmd_cbk_t callback;
1277 struct mlx5_cmd_stats *stats;
1278 unsigned long flags;
1279 unsigned long vector;
1281 /* there can be at most 32 command queues */
1282 vector = vec & 0xffffffff;
1283 for (i = 0; i < (1 << cmd->log_sz); i++) {
1284 if (test_bit(i, &vector)) {
1285 struct semaphore *sem;
1287 ent = cmd->ent_arr[i];
1288 if (ent->page_queue)
1289 sem = &cmd->pages_sem;
1292 ent->ts2 = ktime_get_ns();
1293 memcpy(ent->out->first.data, ent->lay->out, sizeof(ent->lay->out));
1294 dump_command(dev, ent, 0);
1296 if (!cmd->checksum_disabled)
1297 ent->ret = verify_signature(ent);
1300 if (vec & MLX5_TRIGGERED_CMD_COMP)
1301 ent->status = MLX5_DRIVER_STATUS_ABORTED;
1303 ent->status = ent->lay->status_own >> 1;
1305 mlx5_core_dbg(dev, "command completed. ret 0x%x, delivery status %s(0x%x)\n",
1306 ent->ret, deliv_status_to_str(ent->status), ent->status);
1308 free_ent(cmd, ent->idx);
1310 if (ent->callback) {
1311 ds = ent->ts2 - ent->ts1;
1312 if (ent->op < ARRAY_SIZE(cmd->stats)) {
1313 stats = &cmd->stats[ent->op];
1314 spin_lock_irqsave(&stats->lock, flags);
1317 spin_unlock_irqrestore(&stats->lock, flags);
1320 callback = ent->callback;
1321 context = ent->context;
1324 err = mlx5_copy_from_msg(ent->uout,
1328 mlx5_free_cmd_msg(dev, ent->out);
1329 free_msg(dev, ent->in);
1331 err = err ? err : ent->status;
1333 callback(err, context);
1335 complete(&ent->done);
1341 EXPORT_SYMBOL(mlx5_cmd_comp_handler);
1343 static int status_to_err(u8 status)
1345 return status ? -1 : 0; /* TBD more meaningful codes */
1348 static struct mlx5_cmd_msg *alloc_msg(struct mlx5_core_dev *dev, int in_size,
1351 struct mlx5_cmd_msg *msg = ERR_PTR(-ENOMEM);
1352 struct mlx5_cmd *cmd = &dev->cmd;
1353 struct cache_ent *ent = NULL;
1355 if (in_size > MED_LIST_SIZE && in_size <= LONG_LIST_SIZE)
1356 ent = &cmd->cache.large;
1357 else if (in_size > 16 && in_size <= MED_LIST_SIZE)
1358 ent = &cmd->cache.med;
1361 spin_lock_irq(&ent->lock);
1362 if (!list_empty(&ent->head)) {
1363 msg = list_entry(ent->head.next, typeof(*msg), list);
1364 /* For cached lists, we must explicitly state what is
1368 list_del(&msg->list);
1370 spin_unlock_irq(&ent->lock);
1374 msg = mlx5_alloc_cmd_msg(dev, gfp, in_size, 0);
1379 static u16 opcode_from_in(struct mlx5_inbox_hdr *in)
1381 return be16_to_cpu(in->opcode);
1384 static int is_manage_pages(struct mlx5_inbox_hdr *in)
1386 return be16_to_cpu(in->opcode) == MLX5_CMD_OP_MANAGE_PAGES;
1389 static int cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out,
1390 int out_size, mlx5_cmd_cbk_t callback, void *context)
1392 struct mlx5_cmd_msg *inb;
1393 struct mlx5_cmd_msg *outb;
1401 if (pci_channel_offline(dev->pdev) ||
1402 dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) {
1403 err = mlx5_internal_err_ret_value(dev, opcode_from_in(in), &drv_synd, &status);
1404 *get_synd_ptr(out) = cpu_to_be32(drv_synd);
1405 *get_status_ptr(out) = status;
1409 pages_queue = is_manage_pages(in);
1410 gfp = callback ? GFP_ATOMIC : GFP_KERNEL;
1412 inb = alloc_msg(dev, in_size, gfp);
1418 token = alloc_token(&dev->cmd);
1420 err = mlx5_copy_to_msg(inb, in, in_size, token);
1422 mlx5_core_warn(dev, "err %d\n", err);
1426 outb = mlx5_alloc_cmd_msg(dev, gfp, out_size, token);
1428 err = PTR_ERR(outb);
1432 err = mlx5_cmd_invoke(dev, inb, outb, out, out_size, callback, context,
1433 pages_queue, &status, token);
1437 mlx5_core_dbg(dev, "err %d, status %d\n", err, status);
1439 err = status_to_err(status);
1444 err = mlx5_copy_from_msg(out, outb, out_size);
1448 mlx5_free_cmd_msg(dev, outb);
1456 int mlx5_cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out,
1459 return cmd_exec(dev, in, in_size, out, out_size, NULL, NULL);
1461 EXPORT_SYMBOL(mlx5_cmd_exec);
1463 int mlx5_cmd_exec_cb(struct mlx5_core_dev *dev, void *in, int in_size,
1464 void *out, int out_size, mlx5_cmd_cbk_t callback,
1467 return cmd_exec(dev, in, in_size, out, out_size, callback, context);
1469 EXPORT_SYMBOL(mlx5_cmd_exec_cb);
1471 static void destroy_msg_cache(struct mlx5_core_dev *dev)
1473 struct mlx5_cmd *cmd = &dev->cmd;
1474 struct mlx5_cmd_msg *msg;
1475 struct mlx5_cmd_msg *n;
1477 list_for_each_entry_safe(msg, n, &cmd->cache.large.head, list) {
1478 list_del(&msg->list);
1479 mlx5_free_cmd_msg(dev, msg);
1482 list_for_each_entry_safe(msg, n, &cmd->cache.med.head, list) {
1483 list_del(&msg->list);
1484 mlx5_free_cmd_msg(dev, msg);
1488 static int create_msg_cache(struct mlx5_core_dev *dev)
1490 struct mlx5_cmd *cmd = &dev->cmd;
1491 struct mlx5_cmd_msg *msg;
1495 spin_lock_init(&cmd->cache.large.lock);
1496 INIT_LIST_HEAD(&cmd->cache.large.head);
1497 spin_lock_init(&cmd->cache.med.lock);
1498 INIT_LIST_HEAD(&cmd->cache.med.head);
1500 for (i = 0; i < NUM_LONG_LISTS; i++) {
1501 msg = mlx5_alloc_cmd_msg(dev, GFP_KERNEL, LONG_LIST_SIZE, 0);
1506 msg->cache = &cmd->cache.large;
1507 list_add_tail(&msg->list, &cmd->cache.large.head);
1510 for (i = 0; i < NUM_MED_LISTS; i++) {
1511 msg = mlx5_alloc_cmd_msg(dev, GFP_KERNEL, MED_LIST_SIZE, 0);
1516 msg->cache = &cmd->cache.med;
1517 list_add_tail(&msg->list, &cmd->cache.med.head);
1523 destroy_msg_cache(dev);
1527 static int alloc_cmd_page(struct mlx5_core_dev *dev, struct mlx5_cmd *cmd)
1529 struct device *ddev = &dev->pdev->dev;
1531 cmd->cmd_alloc_buf = dma_zalloc_coherent(ddev, MLX5_ADAPTER_PAGE_SIZE,
1532 &cmd->alloc_dma, GFP_KERNEL);
1533 if (!cmd->cmd_alloc_buf)
1536 /* make sure it is aligned to 4K */
1537 if (!((uintptr_t)cmd->cmd_alloc_buf & (MLX5_ADAPTER_PAGE_SIZE - 1))) {
1538 cmd->cmd_buf = cmd->cmd_alloc_buf;
1539 cmd->dma = cmd->alloc_dma;
1540 cmd->alloc_size = MLX5_ADAPTER_PAGE_SIZE;
1544 dma_free_coherent(ddev, MLX5_ADAPTER_PAGE_SIZE, cmd->cmd_alloc_buf,
1546 cmd->cmd_alloc_buf = dma_zalloc_coherent(ddev,
1547 2 * MLX5_ADAPTER_PAGE_SIZE - 1,
1548 &cmd->alloc_dma, GFP_KERNEL);
1549 if (!cmd->cmd_alloc_buf)
1552 cmd->cmd_buf = PTR_ALIGN(cmd->cmd_alloc_buf, MLX5_ADAPTER_PAGE_SIZE);
1553 cmd->dma = ALIGN(cmd->alloc_dma, MLX5_ADAPTER_PAGE_SIZE);
1554 cmd->alloc_size = 2 * MLX5_ADAPTER_PAGE_SIZE - 1;
1558 static void free_cmd_page(struct mlx5_core_dev *dev, struct mlx5_cmd *cmd)
1560 struct device *ddev = &dev->pdev->dev;
1562 dma_free_coherent(ddev, cmd->alloc_size, cmd->cmd_alloc_buf,
1566 int mlx5_cmd_init(struct mlx5_core_dev *dev)
1568 int size = sizeof(struct mlx5_cmd_prot_block);
1569 int align = roundup_pow_of_two(size);
1570 struct mlx5_cmd *cmd = &dev->cmd;
1576 memset(cmd, 0, sizeof(*cmd));
1577 cmd_if_rev = cmdif_rev(dev);
1578 if (cmd_if_rev != CMD_IF_REV) {
1579 dev_err(&dev->pdev->dev,
1580 "Driver cmdif rev(%d) differs from firmware's(%d)\n",
1581 CMD_IF_REV, cmd_if_rev);
1585 cmd->pool = pci_pool_create("mlx5_cmd", dev->pdev, size, align, 0);
1589 err = alloc_cmd_page(dev, cmd);
1593 cmd_l = ioread32be(&dev->iseg->cmdq_addr_l_sz) & 0xff;
1594 cmd->log_sz = cmd_l >> 4 & 0xf;
1595 cmd->log_stride = cmd_l & 0xf;
1596 if (1 << cmd->log_sz > MLX5_MAX_COMMANDS) {
1597 dev_err(&dev->pdev->dev, "firmware reports too many outstanding commands %d\n",
1603 if (cmd->log_sz + cmd->log_stride > MLX5_ADAPTER_PAGE_SHIFT) {
1604 dev_err(&dev->pdev->dev, "command queue size overflow\n");
1609 cmd->checksum_disabled = 1;
1610 cmd->max_reg_cmds = (1 << cmd->log_sz) - 1;
1611 cmd->bitmask = (1 << cmd->max_reg_cmds) - 1;
1613 cmd->cmdif_rev = ioread32be(&dev->iseg->cmdif_rev_fw_sub) >> 16;
1614 if (cmd->cmdif_rev > CMD_IF_REV) {
1615 dev_err(&dev->pdev->dev, "driver does not support command interface version. driver %d, firmware %d\n",
1616 CMD_IF_REV, cmd->cmdif_rev);
1621 spin_lock_init(&cmd->alloc_lock);
1622 spin_lock_init(&cmd->token_lock);
1623 for (i = 0; i < ARRAY_SIZE(cmd->stats); i++)
1624 spin_lock_init(&cmd->stats[i].lock);
1626 sema_init(&cmd->sem, cmd->max_reg_cmds);
1627 sema_init(&cmd->pages_sem, 1);
1629 cmd_h = (u32)((u64)(cmd->dma) >> 32);
1630 cmd_l = (u32)(cmd->dma);
1631 if (cmd_l & 0xfff) {
1632 dev_err(&dev->pdev->dev, "invalid command queue address\n");
1637 iowrite32be(cmd_h, &dev->iseg->cmdq_addr_h);
1638 iowrite32be(cmd_l, &dev->iseg->cmdq_addr_l_sz);
1640 /* Make sure firmware sees the complete address before we proceed */
1643 mlx5_core_dbg(dev, "descriptor at dma 0x%llx\n", (unsigned long long)(cmd->dma));
1645 cmd->mode = CMD_MODE_POLLING;
1647 err = create_msg_cache(dev);
1649 dev_err(&dev->pdev->dev, "failed to create command cache\n");
1654 cmd->wq = create_singlethread_workqueue(cmd->wq_name);
1656 dev_err(&dev->pdev->dev, "failed to create command workqueue\n");
1661 err = create_debugfs_files(dev);
1670 destroy_workqueue(cmd->wq);
1673 destroy_msg_cache(dev);
1676 free_cmd_page(dev, cmd);
1679 pci_pool_destroy(cmd->pool);
1683 EXPORT_SYMBOL(mlx5_cmd_init);
1685 void mlx5_cmd_cleanup(struct mlx5_core_dev *dev)
1687 struct mlx5_cmd *cmd = &dev->cmd;
1689 clean_debug_files(dev);
1690 destroy_workqueue(cmd->wq);
1691 destroy_msg_cache(dev);
1692 free_cmd_page(dev, cmd);
1693 pci_pool_destroy(cmd->pool);
1695 EXPORT_SYMBOL(mlx5_cmd_cleanup);
1697 static const char *cmd_status_str(u8 status)
1700 case MLX5_CMD_STAT_OK:
1702 case MLX5_CMD_STAT_INT_ERR:
1703 return "internal error";
1704 case MLX5_CMD_STAT_BAD_OP_ERR:
1705 return "bad operation";
1706 case MLX5_CMD_STAT_BAD_PARAM_ERR:
1707 return "bad parameter";
1708 case MLX5_CMD_STAT_BAD_SYS_STATE_ERR:
1709 return "bad system state";
1710 case MLX5_CMD_STAT_BAD_RES_ERR:
1711 return "bad resource";
1712 case MLX5_CMD_STAT_RES_BUSY:
1713 return "resource busy";
1714 case MLX5_CMD_STAT_LIM_ERR:
1715 return "limits exceeded";
1716 case MLX5_CMD_STAT_BAD_RES_STATE_ERR:
1717 return "bad resource state";
1718 case MLX5_CMD_STAT_IX_ERR:
1720 case MLX5_CMD_STAT_NO_RES_ERR:
1721 return "no resources";
1722 case MLX5_CMD_STAT_BAD_INP_LEN_ERR:
1723 return "bad input length";
1724 case MLX5_CMD_STAT_BAD_OUTP_LEN_ERR:
1725 return "bad output length";
1726 case MLX5_CMD_STAT_BAD_QP_STATE_ERR:
1727 return "bad QP state";
1728 case MLX5_CMD_STAT_BAD_PKT_ERR:
1729 return "bad packet (discarded)";
1730 case MLX5_CMD_STAT_BAD_SIZE_OUTS_CQES_ERR:
1731 return "bad size too many outstanding CQEs";
1733 return "unknown status";
1737 static int cmd_status_to_err(u8 status)
1740 case MLX5_CMD_STAT_OK: return 0;
1741 case MLX5_CMD_STAT_INT_ERR: return -EIO;
1742 case MLX5_CMD_STAT_BAD_OP_ERR: return -EINVAL;
1743 case MLX5_CMD_STAT_BAD_PARAM_ERR: return -EINVAL;
1744 case MLX5_CMD_STAT_BAD_SYS_STATE_ERR: return -EIO;
1745 case MLX5_CMD_STAT_BAD_RES_ERR: return -EINVAL;
1746 case MLX5_CMD_STAT_RES_BUSY: return -EBUSY;
1747 case MLX5_CMD_STAT_LIM_ERR: return -ENOMEM;
1748 case MLX5_CMD_STAT_BAD_RES_STATE_ERR: return -EINVAL;
1749 case MLX5_CMD_STAT_IX_ERR: return -EINVAL;
1750 case MLX5_CMD_STAT_NO_RES_ERR: return -EAGAIN;
1751 case MLX5_CMD_STAT_BAD_INP_LEN_ERR: return -EIO;
1752 case MLX5_CMD_STAT_BAD_OUTP_LEN_ERR: return -EIO;
1753 case MLX5_CMD_STAT_BAD_QP_STATE_ERR: return -EINVAL;
1754 case MLX5_CMD_STAT_BAD_PKT_ERR: return -EINVAL;
1755 case MLX5_CMD_STAT_BAD_SIZE_OUTS_CQES_ERR: return -EINVAL;
1756 default: return -EIO;
1760 /* this will be available till all the commands use set/get macros */
1761 int mlx5_cmd_status_to_err(struct mlx5_outbox_hdr *hdr)
1766 pr_warn("command failed, status %s(0x%x), syndrome 0x%x\n",
1767 cmd_status_str(hdr->status), hdr->status,
1768 be32_to_cpu(hdr->syndrome));
1770 return cmd_status_to_err(hdr->status);
1773 int mlx5_cmd_status_to_err_v2(void *ptr)
1778 status = be32_to_cpu(*(__be32 *)ptr) >> 24;
1782 syndrome = be32_to_cpu(*(__be32 *)(ptr + 4));
1784 pr_warn("command failed, status %s(0x%x), syndrome 0x%x\n",
1785 cmd_status_str(status), status, syndrome);
1787 return cmd_status_to_err(status);