1 /**********************************************************************
4 * Contact: support@cavium.com
5 * Please include "LiquidIO" in the subject.
7 * Copyright (c) 2003-2015 Cavium, Inc.
9 * This file is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License, Version 2, as
11 * published by the Free Software Foundation.
13 * This file is distributed in the hope that it will be useful, but
14 * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
15 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
16 * NONINFRINGEMENT. See the GNU General Public License for more
19 * This file may also be available under a different license from Cavium.
20 * Contact Cavium, Inc. for more information
21 **********************************************************************/
22 #include <linux/version.h>
23 #include <linux/types.h>
24 #include <linux/list.h>
25 #include <linux/interrupt.h>
26 #include <linux/pci.h>
27 #include <linux/crc32.h>
28 #include <linux/kthread.h>
29 #include <linux/netdevice.h>
30 #include <linux/vmalloc.h>
31 #include "octeon_config.h"
32 #include "liquidio_common.h"
33 #include "octeon_droq.h"
34 #include "octeon_iq.h"
35 #include "response_manager.h"
36 #include "octeon_device.h"
37 #include "octeon_nic.h"
38 #include "octeon_main.h"
39 #include "octeon_network.h"
40 #include "cn66xx_regs.h"
41 #include "cn66xx_device.h"
42 #include "cn68xx_regs.h"
43 #include "cn68xx_device.h"
44 #include "liquidio_image.h"
45 #include "octeon_mem_ops.h"
47 /** Default configuration
48 * for CN66XX OCTEON Models.
50 static struct octeon_config default_cn66xx_conf = {
51 .card_type = LIO_210SV,
52 .card_name = LIO_210SV_NAME,
56 .max_iqs = CN6XXX_CFG_IO_QUEUES,
58 (CN6XXX_MAX_IQ_DESCRIPTORS * CN6XXX_CFG_IO_QUEUES),
59 .instr_type = OCTEON_64BYTE_INSTR,
60 .db_min = CN6XXX_DB_MIN,
61 .db_timeout = CN6XXX_DB_TIMEOUT,
67 .max_oqs = CN6XXX_CFG_IO_QUEUES,
68 .info_ptr = OCTEON_OQ_INFOPTR_MODE,
69 .refill_threshold = CN6XXX_OQ_REFIL_THRESHOLD,
70 .oq_intr_pkt = CN6XXX_OQ_INTR_PKT,
71 .oq_intr_time = CN6XXX_OQ_INTR_TIME,
72 .pkts_per_intr = CN6XXX_OQ_PKTSPER_INTR,
76 .num_nic_ports = DEFAULT_NUM_NIC_PORTS_66XX,
77 .num_def_rx_descs = CN6XXX_MAX_OQ_DESCRIPTORS,
78 .num_def_tx_descs = CN6XXX_MAX_IQ_DESCRIPTORS,
79 .def_rx_buf_size = CN6XXX_OQ_BUF_SIZE,
81 /* For ethernet interface 0: Port cfg Attributes */
83 /* Max Txqs: Half for each of the two ports :max_iq/2 */
84 .max_txqs = MAX_TXQS_PER_INTF,
86 /* Actual configured value. Range could be: 1...max_txqs */
87 .num_txqs = DEF_TXQS_PER_INTF,
89 /* Max Rxqs: Half for each of the two ports :max_oq/2 */
90 .max_rxqs = MAX_RXQS_PER_INTF,
92 /* Actual configured value. Range could be: 1...max_rxqs */
93 .num_rxqs = DEF_RXQS_PER_INTF,
95 /* Num of desc for rx rings */
96 .num_rx_descs = CN6XXX_MAX_OQ_DESCRIPTORS,
98 /* Num of desc for tx rings */
99 .num_tx_descs = CN6XXX_MAX_IQ_DESCRIPTORS,
101 /* SKB size, We need not change buf size even for Jumbo frames.
102 * Octeon can send jumbo frames in 4 consecutive descriptors,
104 .rx_buf_size = CN6XXX_OQ_BUF_SIZE,
106 .base_queue = BASE_QUEUE_NOT_REQUESTED,
112 /* Max Txqs: Half for each of the two ports :max_iq/2 */
113 .max_txqs = MAX_TXQS_PER_INTF,
115 /* Actual configured value. Range could be: 1...max_txqs */
116 .num_txqs = DEF_TXQS_PER_INTF,
118 /* Max Rxqs: Half for each of the two ports :max_oq/2 */
119 .max_rxqs = MAX_RXQS_PER_INTF,
121 /* Actual configured value. Range could be: 1...max_rxqs */
122 .num_rxqs = DEF_RXQS_PER_INTF,
124 /* Num of desc for rx rings */
125 .num_rx_descs = CN6XXX_MAX_OQ_DESCRIPTORS,
127 /* Num of desc for tx rings */
128 .num_tx_descs = CN6XXX_MAX_IQ_DESCRIPTORS,
130 /* SKB size, We need not change buf size even for Jumbo frames.
131 * Octeon can send jumbo frames in 4 consecutive descriptors,
133 .rx_buf_size = CN6XXX_OQ_BUF_SIZE,
135 .base_queue = BASE_QUEUE_NOT_REQUESTED,
140 /** Miscellaneous attributes */
142 /* Host driver link query interval */
143 .oct_link_query_interval = 100,
145 /* Octeon link query interval */
146 .host_link_query_interval = 500,
148 .enable_sli_oq_bp = 0,
150 /* Control queue group */
156 /** Default configuration
157 * for CN68XX OCTEON Model.
160 static struct octeon_config default_cn68xx_conf = {
161 .card_type = LIO_410NV,
162 .card_name = LIO_410NV_NAME,
166 .max_iqs = CN6XXX_CFG_IO_QUEUES,
168 (CN6XXX_MAX_IQ_DESCRIPTORS * CN6XXX_CFG_IO_QUEUES),
169 .instr_type = OCTEON_64BYTE_INSTR,
170 .db_min = CN6XXX_DB_MIN,
171 .db_timeout = CN6XXX_DB_TIMEOUT,
177 .max_oqs = CN6XXX_CFG_IO_QUEUES,
178 .info_ptr = OCTEON_OQ_INFOPTR_MODE,
179 .refill_threshold = CN6XXX_OQ_REFIL_THRESHOLD,
180 .oq_intr_pkt = CN6XXX_OQ_INTR_PKT,
181 .oq_intr_time = CN6XXX_OQ_INTR_TIME,
182 .pkts_per_intr = CN6XXX_OQ_PKTSPER_INTR,
186 .num_nic_ports = DEFAULT_NUM_NIC_PORTS_68XX,
187 .num_def_rx_descs = CN6XXX_MAX_OQ_DESCRIPTORS,
188 .num_def_tx_descs = CN6XXX_MAX_IQ_DESCRIPTORS,
189 .def_rx_buf_size = CN6XXX_OQ_BUF_SIZE,
192 /* Max Txqs: Half for each of the two ports :max_iq/2 */
193 .max_txqs = MAX_TXQS_PER_INTF,
195 /* Actual configured value. Range could be: 1...max_txqs */
196 .num_txqs = DEF_TXQS_PER_INTF,
198 /* Max Rxqs: Half for each of the two ports :max_oq/2 */
199 .max_rxqs = MAX_RXQS_PER_INTF,
201 /* Actual configured value. Range could be: 1...max_rxqs */
202 .num_rxqs = DEF_RXQS_PER_INTF,
204 /* Num of desc for rx rings */
205 .num_rx_descs = CN6XXX_MAX_OQ_DESCRIPTORS,
207 /* Num of desc for tx rings */
208 .num_tx_descs = CN6XXX_MAX_IQ_DESCRIPTORS,
210 /* SKB size, We need not change buf size even for Jumbo frames.
211 * Octeon can send jumbo frames in 4 consecutive descriptors,
213 .rx_buf_size = CN6XXX_OQ_BUF_SIZE,
215 .base_queue = BASE_QUEUE_NOT_REQUESTED,
221 /* Max Txqs: Half for each of the two ports :max_iq/2 */
222 .max_txqs = MAX_TXQS_PER_INTF,
224 /* Actual configured value. Range could be: 1...max_txqs */
225 .num_txqs = DEF_TXQS_PER_INTF,
227 /* Max Rxqs: Half for each of the two ports :max_oq/2 */
228 .max_rxqs = MAX_RXQS_PER_INTF,
230 /* Actual configured value. Range could be: 1...max_rxqs */
231 .num_rxqs = DEF_RXQS_PER_INTF,
233 /* Num of desc for rx rings */
234 .num_rx_descs = CN6XXX_MAX_OQ_DESCRIPTORS,
236 /* Num of desc for tx rings */
237 .num_tx_descs = CN6XXX_MAX_IQ_DESCRIPTORS,
239 /* SKB size, We need not change buf size even for Jumbo frames.
240 * Octeon can send jumbo frames in 4 consecutive descriptors,
242 .rx_buf_size = CN6XXX_OQ_BUF_SIZE,
244 .base_queue = BASE_QUEUE_NOT_REQUESTED,
250 /* Max Txqs: Half for each of the two ports :max_iq/2 */
251 .max_txqs = MAX_TXQS_PER_INTF,
253 /* Actual configured value. Range could be: 1...max_txqs */
254 .num_txqs = DEF_TXQS_PER_INTF,
256 /* Max Rxqs: Half for each of the two ports :max_oq/2 */
257 .max_rxqs = MAX_RXQS_PER_INTF,
259 /* Actual configured value. Range could be: 1...max_rxqs */
260 .num_rxqs = DEF_RXQS_PER_INTF,
262 /* Num of desc for rx rings */
263 .num_rx_descs = CN6XXX_MAX_OQ_DESCRIPTORS,
265 /* Num of desc for tx rings */
266 .num_tx_descs = CN6XXX_MAX_IQ_DESCRIPTORS,
268 /* SKB size, We need not change buf size even for Jumbo frames.
269 * Octeon can send jumbo frames in 4 consecutive descriptors,
271 .rx_buf_size = CN6XXX_OQ_BUF_SIZE,
273 .base_queue = BASE_QUEUE_NOT_REQUESTED,
279 /* Max Txqs: Half for each of the two ports :max_iq/2 */
280 .max_txqs = MAX_TXQS_PER_INTF,
282 /* Actual configured value. Range could be: 1...max_txqs */
283 .num_txqs = DEF_TXQS_PER_INTF,
285 /* Max Rxqs: Half for each of the two ports :max_oq/2 */
286 .max_rxqs = MAX_RXQS_PER_INTF,
288 /* Actual configured value. Range could be: 1...max_rxqs */
289 .num_rxqs = DEF_RXQS_PER_INTF,
291 /* Num of desc for rx rings */
292 .num_rx_descs = CN6XXX_MAX_OQ_DESCRIPTORS,
294 /* Num of desc for tx rings */
295 .num_tx_descs = CN6XXX_MAX_IQ_DESCRIPTORS,
297 /* SKB size, We need not change buf size even for Jumbo frames.
298 * Octeon can send jumbo frames in 4 consecutive descriptors,
300 .rx_buf_size = CN6XXX_OQ_BUF_SIZE,
302 .base_queue = BASE_QUEUE_NOT_REQUESTED,
307 /** Miscellaneous attributes */
309 /* Host driver link query interval */
310 .oct_link_query_interval = 100,
312 /* Octeon link query interval */
313 .host_link_query_interval = 500,
315 .enable_sli_oq_bp = 0,
317 /* Control queue group */
323 /** Default configuration
324 * for CN68XX OCTEON Model.
326 static struct octeon_config default_cn68xx_210nv_conf = {
327 .card_type = LIO_210NV,
328 .card_name = LIO_210NV_NAME,
333 .max_iqs = CN6XXX_CFG_IO_QUEUES,
335 (CN6XXX_MAX_IQ_DESCRIPTORS * CN6XXX_CFG_IO_QUEUES),
336 .instr_type = OCTEON_64BYTE_INSTR,
337 .db_min = CN6XXX_DB_MIN,
338 .db_timeout = CN6XXX_DB_TIMEOUT,
344 .max_oqs = CN6XXX_CFG_IO_QUEUES,
345 .info_ptr = OCTEON_OQ_INFOPTR_MODE,
346 .refill_threshold = CN6XXX_OQ_REFIL_THRESHOLD,
347 .oq_intr_pkt = CN6XXX_OQ_INTR_PKT,
348 .oq_intr_time = CN6XXX_OQ_INTR_TIME,
349 .pkts_per_intr = CN6XXX_OQ_PKTSPER_INTR,
353 .num_nic_ports = DEFAULT_NUM_NIC_PORTS_68XX_210NV,
354 .num_def_rx_descs = CN6XXX_MAX_OQ_DESCRIPTORS,
355 .num_def_tx_descs = CN6XXX_MAX_IQ_DESCRIPTORS,
356 .def_rx_buf_size = CN6XXX_OQ_BUF_SIZE,
359 /* Max Txqs: Half for each of the two ports :max_iq/2 */
360 .max_txqs = MAX_TXQS_PER_INTF,
362 /* Actual configured value. Range could be: 1...max_txqs */
363 .num_txqs = DEF_TXQS_PER_INTF,
365 /* Max Rxqs: Half for each of the two ports :max_oq/2 */
366 .max_rxqs = MAX_RXQS_PER_INTF,
368 /* Actual configured value. Range could be: 1...max_rxqs */
369 .num_rxqs = DEF_RXQS_PER_INTF,
371 /* Num of desc for rx rings */
372 .num_rx_descs = CN6XXX_MAX_OQ_DESCRIPTORS,
374 /* Num of desc for tx rings */
375 .num_tx_descs = CN6XXX_MAX_IQ_DESCRIPTORS,
377 /* SKB size, We need not change buf size even for Jumbo frames.
378 * Octeon can send jumbo frames in 4 consecutive descriptors,
380 .rx_buf_size = CN6XXX_OQ_BUF_SIZE,
382 .base_queue = BASE_QUEUE_NOT_REQUESTED,
388 /* Max Txqs: Half for each of the two ports :max_iq/2 */
389 .max_txqs = MAX_TXQS_PER_INTF,
391 /* Actual configured value. Range could be: 1...max_txqs */
392 .num_txqs = DEF_TXQS_PER_INTF,
394 /* Max Rxqs: Half for each of the two ports :max_oq/2 */
395 .max_rxqs = MAX_RXQS_PER_INTF,
397 /* Actual configured value. Range could be: 1...max_rxqs */
398 .num_rxqs = DEF_RXQS_PER_INTF,
400 /* Num of desc for rx rings */
401 .num_rx_descs = CN6XXX_MAX_OQ_DESCRIPTORS,
403 /* Num of desc for tx rings */
404 .num_tx_descs = CN6XXX_MAX_IQ_DESCRIPTORS,
406 /* SKB size, We need not change buf size even for Jumbo frames.
407 * Octeon can send jumbo frames in 4 consecutive descriptors,
409 .rx_buf_size = CN6XXX_OQ_BUF_SIZE,
411 .base_queue = BASE_QUEUE_NOT_REQUESTED,
416 /** Miscellaneous attributes */
418 /* Host driver link query interval */
419 .oct_link_query_interval = 100,
421 /* Octeon link query interval */
422 .host_link_query_interval = 500,
424 .enable_sli_oq_bp = 0,
426 /* Control queue group */
433 OCTEON_CONFIG_TYPE_DEFAULT = 0,
437 static struct octeon_config_ptr {
439 } oct_conf_info[MAX_OCTEON_DEVICES] = {
441 OCTEON_CONFIG_TYPE_DEFAULT,
443 OCTEON_CONFIG_TYPE_DEFAULT,
445 OCTEON_CONFIG_TYPE_DEFAULT,
447 OCTEON_CONFIG_TYPE_DEFAULT,
451 static char oct_dev_state_str[OCT_DEV_STATES + 1][32] = {
452 "BEGIN", "PCI-MAP-DONE", "DISPATCH-INIT-DONE",
453 "IQ-INIT-DONE", "SCBUFF-POOL-INIT-DONE", "RESPLIST-INIT-DONE",
454 "DROQ-INIT-DONE", "IO-QUEUES-INIT-DONE", "CONSOLE-INIT-DONE",
455 "HOST-READY", "CORE-READY", "RUNNING", "IN-RESET",
459 static char oct_dev_app_str[CVM_DRV_APP_COUNT + 1][32] = {
460 "BASE", "NIC", "UNKNOWN"};
462 static struct octeon_device *octeon_device[MAX_OCTEON_DEVICES];
463 static u32 octeon_device_count;
465 static struct octeon_core_setup core_setup[MAX_OCTEON_DEVICES];
467 static void oct_set_config_info(int oct_id, int conf_type)
469 if (conf_type < 0 || conf_type > (NUM_OCTEON_CONFS - 1))
470 conf_type = OCTEON_CONFIG_TYPE_DEFAULT;
471 oct_conf_info[oct_id].conf_type = conf_type;
474 void octeon_init_device_list(int conf_type)
478 memset(octeon_device, 0, (sizeof(void *) * MAX_OCTEON_DEVICES));
479 for (i = 0; i < MAX_OCTEON_DEVICES; i++)
480 oct_set_config_info(i, conf_type);
483 static void *__retrieve_octeon_config_info(struct octeon_device *oct,
486 u32 oct_id = oct->octeon_id;
489 switch (oct_conf_info[oct_id].conf_type) {
490 case OCTEON_CONFIG_TYPE_DEFAULT:
491 if (oct->chip_id == OCTEON_CN66XX) {
492 ret = (void *)&default_cn66xx_conf;
493 } else if ((oct->chip_id == OCTEON_CN68XX) &&
494 (card_type == LIO_210NV)) {
495 ret = (void *)&default_cn68xx_210nv_conf;
496 } else if ((oct->chip_id == OCTEON_CN68XX) &&
497 (card_type == LIO_410NV)) {
498 ret = (void *)&default_cn68xx_conf;
507 static int __verify_octeon_config_info(struct octeon_device *oct, void *conf)
509 switch (oct->chip_id) {
512 return lio_validate_cn6xxx_config_info(oct, conf);
521 void *oct_get_config_info(struct octeon_device *oct, u16 card_type)
525 conf = __retrieve_octeon_config_info(oct, card_type);
529 if (__verify_octeon_config_info(oct, conf)) {
530 dev_err(&oct->pci_dev->dev, "Configuration verification failed\n");
537 char *lio_get_state_string(atomic_t *state_ptr)
539 s32 istate = (s32)atomic_read(state_ptr);
541 if (istate > OCT_DEV_STATES || istate < 0)
542 return oct_dev_state_str[OCT_DEV_STATE_INVALID];
543 return oct_dev_state_str[istate];
546 static char *get_oct_app_string(u32 app_mode)
548 if (app_mode <= CVM_DRV_APP_END)
549 return oct_dev_app_str[app_mode - CVM_DRV_APP_START];
550 return oct_dev_app_str[CVM_DRV_INVALID_APP - CVM_DRV_APP_START];
553 int octeon_download_firmware(struct octeon_device *oct, const u8 *data,
562 struct octeon_firmware_file_header *h;
565 if (size < sizeof(struct octeon_firmware_file_header)) {
566 dev_err(&oct->pci_dev->dev, "Firmware file too small (%d < %d).\n",
568 (u32)sizeof(struct octeon_firmware_file_header));
572 h = (struct octeon_firmware_file_header *)data;
574 if (be32_to_cpu(h->magic) != LIO_NIC_MAGIC) {
575 dev_err(&oct->pci_dev->dev, "Unrecognized firmware file.\n");
581 sizeof(struct octeon_firmware_file_header) -
583 if (crc32_result != be32_to_cpu(h->crc32)) {
584 dev_err(&oct->pci_dev->dev, "Firmware CRC mismatch (0x%08x != 0x%08x).\n",
585 crc32_result, be32_to_cpu(h->crc32));
589 if (memcmp(LIQUIDIO_VERSION, h->version, strlen(LIQUIDIO_VERSION))) {
590 dev_err(&oct->pci_dev->dev, "Unmatched firmware version. Expected %s, got %s.\n",
591 LIQUIDIO_VERSION, h->version);
595 if (be32_to_cpu(h->num_images) > LIO_MAX_IMAGES) {
596 dev_err(&oct->pci_dev->dev, "Too many images in firmware file (%d).\n",
597 be32_to_cpu(h->num_images));
601 dev_info(&oct->pci_dev->dev, "Firmware version: %s\n", h->version);
602 snprintf(oct->fw_info.liquidio_firmware_version, 32, "LIQUIDIO: %s",
605 buffer = kmalloc(size, GFP_KERNEL);
609 memcpy(buffer, data, size);
611 p = buffer + sizeof(struct octeon_firmware_file_header);
613 /* load all images */
614 for (i = 0; i < be32_to_cpu(h->num_images); i++) {
615 load_addr = be64_to_cpu(h->desc[i].addr);
616 image_len = be32_to_cpu(h->desc[i].len);
618 /* validate the image */
619 crc32_result = crc32(~0, p, image_len) ^ ~0U;
620 if (crc32_result != be32_to_cpu(h->desc[i].crc32)) {
621 dev_err(&oct->pci_dev->dev,
622 "Firmware CRC mismatch in image %d (0x%08x != 0x%08x).\n",
624 be32_to_cpu(h->desc[i].crc32));
626 goto done_downloading;
629 /* download the image */
630 octeon_pci_write_core_mem(oct, load_addr, p, image_len);
633 dev_dbg(&oct->pci_dev->dev,
634 "Downloaded image %d (%d bytes) to address 0x%016llx\n",
635 i, image_len, load_addr);
638 /* Invoke the bootcmd */
639 ret = octeon_console_send_cmd(oct, h->bootcmd, 50);
647 void octeon_free_device_mem(struct octeon_device *oct)
651 for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES; i++) {
652 /* could check mask as well */
656 for (i = 0; i < MAX_OCTEON_INSTR_QUEUES; i++) {
657 /* could check mask as well */
658 vfree(oct->instr_queue[i]);
664 octeon_device[i] = NULL;
665 octeon_device_count--;
668 static struct octeon_device *octeon_allocate_device_mem(u32 pci_id,
671 struct octeon_device *oct;
673 u32 octdevsize = 0, configsize = 0, size;
678 configsize = sizeof(struct octeon_cn6xxx);
682 pr_err("%s: Unknown PCI Device: 0x%x\n",
688 if (configsize & 0x7)
689 configsize += (8 - (configsize & 0x7));
691 octdevsize = sizeof(struct octeon_device);
692 if (octdevsize & 0x7)
693 octdevsize += (8 - (octdevsize & 0x7));
696 priv_size += (8 - (priv_size & 0x7));
698 size = octdevsize + priv_size + configsize +
699 (sizeof(struct octeon_dispatch) * DISPATCH_LIST_SIZE);
705 memset(buf, 0, size);
707 oct = (struct octeon_device *)buf;
708 oct->priv = (void *)(buf + octdevsize);
709 oct->chip = (void *)(buf + octdevsize + priv_size);
710 oct->dispatch.dlist = (struct octeon_dispatch *)
711 (buf + octdevsize + priv_size + configsize);
716 struct octeon_device *octeon_allocate_device(u32 pci_id,
720 struct octeon_device *oct = NULL;
722 for (oct_idx = 0; oct_idx < MAX_OCTEON_DEVICES; oct_idx++)
723 if (!octeon_device[oct_idx])
726 if (oct_idx == MAX_OCTEON_DEVICES)
729 oct = octeon_allocate_device_mem(pci_id, priv_size);
733 spin_lock_init(&oct->pci_win_lock);
734 spin_lock_init(&oct->mem_access_lock);
736 octeon_device_count++;
737 octeon_device[oct_idx] = oct;
739 oct->octeon_id = oct_idx;
740 snprintf((oct->device_name), sizeof(oct->device_name),
741 "LiquidIO%d", (oct->octeon_id));
746 int octeon_setup_instr_queues(struct octeon_device *oct)
751 /* this causes queue 0 to be default queue */
752 if (OCTEON_CN6XXX(oct)) {
755 CFG_GET_NUM_DEF_TX_DESCS(CHIP_FIELD(oct, cn6xxx, conf));
760 for (i = 0; i < num_iqs; i++) {
761 oct->instr_queue[i] =
762 vmalloc(sizeof(struct octeon_instr_queue));
763 if (!oct->instr_queue[i])
766 memset(oct->instr_queue[i], 0,
767 sizeof(struct octeon_instr_queue));
769 oct->instr_queue[i]->app_ctx = (void *)(size_t)i;
770 if (octeon_init_instr_queue(oct, i, num_descs))
779 int octeon_setup_output_queues(struct octeon_device *oct)
785 /* this causes queue 0 to be default queue */
786 if (OCTEON_CN6XXX(oct)) {
787 /* CFG_GET_OQ_MAX_BASE_Q(CHIP_FIELD(oct, cn6xxx, conf)); */
790 CFG_GET_NUM_DEF_RX_DESCS(CHIP_FIELD(oct, cn6xxx, conf));
792 CFG_GET_DEF_RX_BUF_SIZE(CHIP_FIELD(oct, cn6xxx, conf));
797 for (i = 0; i < num_oqs; i++) {
798 oct->droq[i] = vmalloc(sizeof(*oct->droq[i]));
802 memset(oct->droq[i], 0, sizeof(struct octeon_droq));
804 if (octeon_init_droq(oct, i, num_descs, desc_size, NULL))
813 void octeon_set_io_queues_off(struct octeon_device *oct)
815 /* Disable the i/p and o/p queues for this Octeon. */
817 octeon_write_csr(oct, CN6XXX_SLI_PKT_INSTR_ENB, 0);
818 octeon_write_csr(oct, CN6XXX_SLI_PKT_OUT_ENB, 0);
821 void octeon_set_droq_pkt_op(struct octeon_device *oct,
827 /* Disable the i/p and o/p queues for this Octeon. */
828 reg_val = octeon_read_csr(oct, CN6XXX_SLI_PKT_OUT_ENB);
831 reg_val = reg_val | (1 << q_no);
833 reg_val = reg_val & (~(1 << q_no));
835 octeon_write_csr(oct, CN6XXX_SLI_PKT_OUT_ENB, reg_val);
838 int octeon_init_dispatch_list(struct octeon_device *oct)
842 oct->dispatch.count = 0;
844 for (i = 0; i < DISPATCH_LIST_SIZE; i++) {
845 oct->dispatch.dlist[i].opcode = 0;
846 INIT_LIST_HEAD(&oct->dispatch.dlist[i].list);
849 for (i = 0; i <= REQTYPE_LAST; i++)
850 octeon_register_reqtype_free_fn(oct, i, NULL);
852 spin_lock_init(&oct->dispatch.lock);
857 void octeon_delete_dispatch_list(struct octeon_device *oct)
860 struct list_head freelist, *temp, *tmp2;
862 INIT_LIST_HEAD(&freelist);
864 spin_lock_bh(&oct->dispatch.lock);
866 for (i = 0; i < DISPATCH_LIST_SIZE; i++) {
867 struct list_head *dispatch;
869 dispatch = &oct->dispatch.dlist[i].list;
870 while (dispatch->next != dispatch) {
871 temp = dispatch->next;
873 list_add_tail(temp, &freelist);
876 oct->dispatch.dlist[i].opcode = 0;
879 oct->dispatch.count = 0;
881 spin_unlock_bh(&oct->dispatch.lock);
883 list_for_each_safe(temp, tmp2, &freelist) {
890 octeon_get_dispatch(struct octeon_device *octeon_dev, u16 opcode,
894 struct list_head *dispatch;
895 octeon_dispatch_fn_t fn = NULL;
896 u16 combined_opcode = OPCODE_SUBCODE(opcode, subcode);
898 idx = combined_opcode & OCTEON_OPCODE_MASK;
900 spin_lock_bh(&octeon_dev->dispatch.lock);
902 if (octeon_dev->dispatch.count == 0) {
903 spin_unlock_bh(&octeon_dev->dispatch.lock);
907 if (!(octeon_dev->dispatch.dlist[idx].opcode)) {
908 spin_unlock_bh(&octeon_dev->dispatch.lock);
912 if (octeon_dev->dispatch.dlist[idx].opcode == combined_opcode) {
913 fn = octeon_dev->dispatch.dlist[idx].dispatch_fn;
915 list_for_each(dispatch,
916 &octeon_dev->dispatch.dlist[idx].list) {
917 if (((struct octeon_dispatch *)dispatch)->opcode ==
919 fn = ((struct octeon_dispatch *)
920 dispatch)->dispatch_fn;
926 spin_unlock_bh(&octeon_dev->dispatch.lock);
930 /* octeon_register_dispatch_fn
932 * octeon_id - id of the octeon device.
933 * opcode - opcode for which driver should call the registered function
934 * subcode - subcode for which driver should call the registered function
935 * fn - The function to call when a packet with "opcode" arrives in
936 * octeon output queues.
937 * fn_arg - The argument to be passed when calling function "fn".
939 * Registers a function and its argument to be called when a packet
940 * arrives in Octeon output queues with "opcode".
948 octeon_register_dispatch_fn(struct octeon_device *oct,
951 octeon_dispatch_fn_t fn, void *fn_arg)
954 octeon_dispatch_fn_t pfn;
955 u16 combined_opcode = OPCODE_SUBCODE(opcode, subcode);
957 idx = combined_opcode & OCTEON_OPCODE_MASK;
959 spin_lock_bh(&oct->dispatch.lock);
960 /* Add dispatch function to first level of lookup table */
961 if (oct->dispatch.dlist[idx].opcode == 0) {
962 oct->dispatch.dlist[idx].opcode = combined_opcode;
963 oct->dispatch.dlist[idx].dispatch_fn = fn;
964 oct->dispatch.dlist[idx].arg = fn_arg;
965 oct->dispatch.count++;
966 spin_unlock_bh(&oct->dispatch.lock);
970 spin_unlock_bh(&oct->dispatch.lock);
972 /* Check if there was a function already registered for this
975 pfn = octeon_get_dispatch(oct, opcode, subcode);
977 struct octeon_dispatch *dispatch;
979 dev_dbg(&oct->pci_dev->dev,
980 "Adding opcode to dispatch list linked list\n");
981 dispatch = (struct octeon_dispatch *)
982 vmalloc(sizeof(struct octeon_dispatch));
984 dev_err(&oct->pci_dev->dev,
985 "No memory to add dispatch function\n");
988 dispatch->opcode = combined_opcode;
989 dispatch->dispatch_fn = fn;
990 dispatch->arg = fn_arg;
992 /* Add dispatch function to linked list of fn ptrs
993 * at the hashed index.
995 spin_lock_bh(&oct->dispatch.lock);
996 list_add(&dispatch->list, &oct->dispatch.dlist[idx].list);
997 oct->dispatch.count++;
998 spin_unlock_bh(&oct->dispatch.lock);
1001 dev_err(&oct->pci_dev->dev,
1002 "Found previously registered dispatch fn for opcode/subcode: %x/%x\n",
1010 /* octeon_unregister_dispatch_fn
1012 * oct - octeon device
1013 * opcode - driver should unregister the function for this opcode
1014 * subcode - driver should unregister the function for this subcode
1016 * Unregister the function set for this opcode+subcode.
1021 * No locks are held.
1024 octeon_unregister_dispatch_fn(struct octeon_device *oct, u16 opcode,
1029 struct list_head *dispatch, *dfree = NULL, *tmp2;
1030 u16 combined_opcode = OPCODE_SUBCODE(opcode, subcode);
1032 idx = combined_opcode & OCTEON_OPCODE_MASK;
1034 spin_lock_bh(&oct->dispatch.lock);
1036 if (oct->dispatch.count == 0) {
1037 spin_unlock_bh(&oct->dispatch.lock);
1038 dev_err(&oct->pci_dev->dev,
1039 "No dispatch functions registered for this device\n");
1043 if (oct->dispatch.dlist[idx].opcode == combined_opcode) {
1044 dispatch = &oct->dispatch.dlist[idx].list;
1045 if (dispatch->next != dispatch) {
1046 dispatch = dispatch->next;
1047 oct->dispatch.dlist[idx].opcode =
1048 ((struct octeon_dispatch *)dispatch)->opcode;
1049 oct->dispatch.dlist[idx].dispatch_fn =
1050 ((struct octeon_dispatch *)
1051 dispatch)->dispatch_fn;
1052 oct->dispatch.dlist[idx].arg =
1053 ((struct octeon_dispatch *)dispatch)->arg;
1057 oct->dispatch.dlist[idx].opcode = 0;
1058 oct->dispatch.dlist[idx].dispatch_fn = NULL;
1059 oct->dispatch.dlist[idx].arg = NULL;
1063 list_for_each_safe(dispatch, tmp2,
1064 &(oct->dispatch.dlist[idx].
1066 if (((struct octeon_dispatch *)dispatch)->opcode ==
1076 oct->dispatch.count--;
1078 spin_unlock_bh(&oct->dispatch.lock);
1083 int octeon_core_drv_init(struct octeon_recv_info *recv_info, void *buf)
1087 struct octeon_device *oct = (struct octeon_device *)buf;
1088 struct octeon_recv_pkt *recv_pkt = recv_info->recv_pkt;
1089 struct octeon_core_setup *cs = NULL;
1090 u32 num_nic_ports = 0;
1092 if (OCTEON_CN6XXX(oct))
1094 CFG_GET_NUM_NIC_PORTS(CHIP_FIELD(oct, cn6xxx, conf));
1096 if (atomic_read(&oct->status) >= OCT_DEV_RUNNING) {
1097 dev_err(&oct->pci_dev->dev, "Received CORE OK when device state is 0x%x\n",
1098 atomic_read(&oct->status));
1099 goto core_drv_init_err;
1104 (u32)recv_pkt->rh.r_core_drv_init.app_mode),
1105 sizeof(app_name) - 1);
1106 oct->app_mode = (u32)recv_pkt->rh.r_core_drv_init.app_mode;
1107 if (recv_pkt->rh.r_core_drv_init.app_mode == CVM_DRV_NIC_APP) {
1108 oct->fw_info.max_nic_ports =
1109 (u32)recv_pkt->rh.r_core_drv_init.max_nic_ports;
1110 oct->fw_info.num_gmx_ports =
1111 (u32)recv_pkt->rh.r_core_drv_init.num_gmx_ports;
1114 if (oct->fw_info.max_nic_ports < num_nic_ports) {
1115 dev_err(&oct->pci_dev->dev,
1116 "Config has more ports than firmware allows (%d > %d).\n",
1117 num_nic_ports, oct->fw_info.max_nic_ports);
1118 goto core_drv_init_err;
1120 oct->fw_info.app_cap_flags = recv_pkt->rh.r_core_drv_init.app_cap_flags;
1121 oct->fw_info.app_mode = (u32)recv_pkt->rh.r_core_drv_init.app_mode;
1123 atomic_set(&oct->status, OCT_DEV_CORE_OK);
1125 cs = &core_setup[oct->octeon_id];
1127 if (recv_pkt->buffer_size[0] != sizeof(*cs)) {
1128 dev_dbg(&oct->pci_dev->dev, "Core setup bytes expected %u found %d\n",
1130 recv_pkt->buffer_size[0]);
1133 memcpy(cs, get_rbd(recv_pkt->buffer_ptr[0]), sizeof(*cs));
1134 strncpy(oct->boardinfo.name, cs->boardname, OCT_BOARD_NAME);
1135 strncpy(oct->boardinfo.serial_number, cs->board_serial_number,
1138 octeon_swap_8B_data((u64 *)cs, (sizeof(*cs) >> 3));
1140 oct->boardinfo.major = cs->board_rev_major;
1141 oct->boardinfo.minor = cs->board_rev_minor;
1143 dev_info(&oct->pci_dev->dev,
1144 "Running %s (%llu Hz)\n",
1145 app_name, CVM_CAST64(cs->corefreq));
1148 for (i = 0; i < recv_pkt->buffer_count; i++)
1149 recv_buffer_free(recv_pkt->buffer_ptr[i]);
1150 octeon_free_recv_info(recv_info);
1154 int octeon_get_tx_qsize(struct octeon_device *oct, u32 q_no)
1157 if (oct && (q_no < MAX_OCTEON_INSTR_QUEUES) &&
1158 (oct->io_qmask.iq & (1UL << q_no)))
1159 return oct->instr_queue[q_no]->max_count;
1164 int octeon_get_rx_qsize(struct octeon_device *oct, u32 q_no)
1166 if (oct && (q_no < MAX_OCTEON_OUTPUT_QUEUES) &&
1167 (oct->io_qmask.oq & (1UL << q_no)))
1168 return oct->droq[q_no]->max_count;
1172 /* Retruns the host firmware handshake OCTEON specific configuration */
1173 struct octeon_config *octeon_get_conf(struct octeon_device *oct)
1175 struct octeon_config *default_oct_conf = NULL;
1177 /* check the OCTEON Device model & return the corresponding octeon
1181 if (OCTEON_CN6XXX(oct)) {
1183 (struct octeon_config *)(CHIP_FIELD(oct, cn6xxx, conf));
1186 return default_oct_conf;
1189 /* scratch register address is same in all the OCT-II and CN70XX models */
1190 #define CNXX_SLI_SCRATCH1 0x3C0
1192 /** Get the octeon device pointer.
1193 * @param octeon_id - The id for which the octeon device pointer is required.
1194 * @return Success: Octeon device pointer.
1195 * @return Failure: NULL.
1197 struct octeon_device *lio_get_device(u32 octeon_id)
1199 if (octeon_id >= MAX_OCTEON_DEVICES)
1202 return octeon_device[octeon_id];
1205 u64 lio_pci_readq(struct octeon_device *oct, u64 addr)
1208 unsigned long flags;
1211 spin_lock_irqsave(&oct->pci_win_lock, flags);
1213 /* The windowed read happens when the LSB of the addr is written.
1214 * So write MSB first
1216 addrhi = (addr >> 32);
1217 if ((oct->chip_id == OCTEON_CN66XX) || (oct->chip_id == OCTEON_CN68XX))
1218 addrhi |= 0x00060000;
1219 writel(addrhi, oct->reg_list.pci_win_rd_addr_hi);
1221 /* Read back to preserve ordering of writes */
1222 val32 = readl(oct->reg_list.pci_win_rd_addr_hi);
1224 writel(addr & 0xffffffff, oct->reg_list.pci_win_rd_addr_lo);
1225 val32 = readl(oct->reg_list.pci_win_rd_addr_lo);
1227 val64 = readq(oct->reg_list.pci_win_rd_data);
1229 spin_unlock_irqrestore(&oct->pci_win_lock, flags);
1234 void lio_pci_writeq(struct octeon_device *oct,
1239 unsigned long flags;
1241 spin_lock_irqsave(&oct->pci_win_lock, flags);
1243 writeq(addr, oct->reg_list.pci_win_wr_addr);
1245 /* The write happens when the LSB is written. So write MSB first. */
1246 writel(val >> 32, oct->reg_list.pci_win_wr_data_hi);
1247 /* Read the MSB to ensure ordering of writes. */
1248 val32 = readl(oct->reg_list.pci_win_wr_data_hi);
1250 writel(val & 0xffffffff, oct->reg_list.pci_win_wr_data_lo);
1252 spin_unlock_irqrestore(&oct->pci_win_lock, flags);
1255 int octeon_mem_access_ok(struct octeon_device *oct)
1257 u64 access_okay = 0;
1259 /* Check to make sure a DDR interface is enabled */
1260 u64 lmc0_reset_ctl = lio_pci_readq(oct, CN6XXX_LMC0_RESET_CTL);
1262 access_okay = (lmc0_reset_ctl & CN6XXX_LMC0_RESET_CTL_DDR3RST_MASK);
1264 return access_okay ? 0 : 1;
1267 int octeon_wait_for_ddr_init(struct octeon_device *oct, u32 *timeout)
1275 while (*timeout == 0)
1276 schedule_timeout_uninterruptible(HZ / 10);
1278 for (ms = 0; (ret != 0) && ((*timeout == 0) || (ms <= *timeout));
1280 ret = octeon_mem_access_ok(oct);
1284 schedule_timeout_uninterruptible(HZ / 10);
1290 /** Get the octeon id assigned to the octeon device passed as argument.
1291 * This function is exported to other modules.
1292 * @param dev - octeon device pointer passed as a void *.
1293 * @return octeon device id
1295 int lio_get_device_id(void *dev)
1297 struct octeon_device *octeon_dev = (struct octeon_device *)dev;
1300 for (i = 0; i < MAX_OCTEON_DEVICES; i++)
1301 if (octeon_device[i] == octeon_dev)
1302 return octeon_dev->octeon_id;