+static int blocked_fl_open(struct inode *inode, struct file *file)
+{
+ file->private_data = inode->i_private;
+ return 0;
+}
+
+static ssize_t blocked_fl_read(struct file *filp, char __user *ubuf,
+ size_t count, loff_t *ppos)
+{
+ int len;
+ const struct adapter *adap = filp->private_data;
+ char *buf;
+ ssize_t size = (adap->sge.egr_sz + 3) / 4 +
+ adap->sge.egr_sz / 32 + 2; /* includes ,/\n/\0 */
+
+ buf = kzalloc(size, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ len = snprintf(buf, size - 1, "%*pb\n",
+ adap->sge.egr_sz, adap->sge.blocked_fl);
+ len += sprintf(buf + len, "\n");
+ size = simple_read_from_buffer(ubuf, count, ppos, buf, len);
+ t4_free_mem(buf);
+ return size;
+}
+
+static ssize_t blocked_fl_write(struct file *filp, const char __user *ubuf,
+ size_t count, loff_t *ppos)
+{
+ int err;
+ unsigned long *t;
+ struct adapter *adap = filp->private_data;
+
+ t = kcalloc(BITS_TO_LONGS(adap->sge.egr_sz), sizeof(long), GFP_KERNEL);
+ if (!t)
+ return -ENOMEM;
+
+ err = bitmap_parse_user(ubuf, count, t, adap->sge.egr_sz);
+ if (err)
+ return err;
+
+ bitmap_copy(adap->sge.blocked_fl, t, adap->sge.egr_sz);
+ t4_free_mem(t);
+ return count;
+}
+
+static const struct file_operations blocked_fl_fops = {
+ .owner = THIS_MODULE,
+ .open = blocked_fl_open,
+ .read = blocked_fl_read,
+ .write = blocked_fl_write,
+ .llseek = generic_file_llseek,
+};
+
+struct mem_desc {
+ unsigned int base;
+ unsigned int limit;
+ unsigned int idx;
+};
+
+static int mem_desc_cmp(const void *a, const void *b)
+{
+ return ((const struct mem_desc *)a)->base -
+ ((const struct mem_desc *)b)->base;
+}
+
+static void mem_region_show(struct seq_file *seq, const char *name,
+ unsigned int from, unsigned int to)
+{
+ char buf[40];
+
+ string_get_size((u64)to - from + 1, 1, STRING_UNITS_2, buf,
+ sizeof(buf));
+ seq_printf(seq, "%-15s %#x-%#x [%s]\n", name, from, to, buf);
+}
+
+static int meminfo_show(struct seq_file *seq, void *v)
+{
+ static const char * const memory[] = { "EDC0:", "EDC1:", "MC:",
+ "MC0:", "MC1:"};
+ static const char * const region[] = {
+ "DBQ contexts:", "IMSG contexts:", "FLM cache:", "TCBs:",
+ "Pstructs:", "Timers:", "Rx FL:", "Tx FL:", "Pstruct FL:",
+ "Tx payload:", "Rx payload:", "LE hash:", "iSCSI region:",
+ "TDDP region:", "TPT region:", "STAG region:", "RQ region:",
+ "RQUDP region:", "PBL region:", "TXPBL region:",
+ "DBVFIFO region:", "ULPRX state:", "ULPTX state:",
+ "On-chip queues:"
+ };
+
+ int i, n;
+ u32 lo, hi, used, alloc;
+ struct mem_desc avail[4];
+ struct mem_desc mem[ARRAY_SIZE(region) + 3]; /* up to 3 holes */
+ struct mem_desc *md = mem;
+ struct adapter *adap = seq->private;
+
+ for (i = 0; i < ARRAY_SIZE(mem); i++) {
+ mem[i].limit = 0;
+ mem[i].idx = i;
+ }
+
+ /* Find and sort the populated memory ranges */
+ i = 0;
+ lo = t4_read_reg(adap, MA_TARGET_MEM_ENABLE_A);
+ if (lo & EDRAM0_ENABLE_F) {
+ hi = t4_read_reg(adap, MA_EDRAM0_BAR_A);
+ avail[i].base = EDRAM0_BASE_G(hi) << 20;
+ avail[i].limit = avail[i].base + (EDRAM0_SIZE_G(hi) << 20);
+ avail[i].idx = 0;
+ i++;
+ }
+ if (lo & EDRAM1_ENABLE_F) {
+ hi = t4_read_reg(adap, MA_EDRAM1_BAR_A);
+ avail[i].base = EDRAM1_BASE_G(hi) << 20;
+ avail[i].limit = avail[i].base + (EDRAM1_SIZE_G(hi) << 20);
+ avail[i].idx = 1;
+ i++;
+ }
+
+ if (is_t5(adap->params.chip)) {
+ if (lo & EXT_MEM0_ENABLE_F) {
+ hi = t4_read_reg(adap, MA_EXT_MEMORY0_BAR_A);
+ avail[i].base = EXT_MEM0_BASE_G(hi) << 20;
+ avail[i].limit =
+ avail[i].base + (EXT_MEM0_SIZE_G(hi) << 20);
+ avail[i].idx = 3;
+ i++;
+ }
+ if (lo & EXT_MEM1_ENABLE_F) {
+ hi = t4_read_reg(adap, MA_EXT_MEMORY1_BAR_A);
+ avail[i].base = EXT_MEM1_BASE_G(hi) << 20;
+ avail[i].limit =
+ avail[i].base + (EXT_MEM1_SIZE_G(hi) << 20);
+ avail[i].idx = 4;
+ i++;
+ }
+ } else {
+ if (lo & EXT_MEM_ENABLE_F) {
+ hi = t4_read_reg(adap, MA_EXT_MEMORY_BAR_A);
+ avail[i].base = EXT_MEM_BASE_G(hi) << 20;
+ avail[i].limit =
+ avail[i].base + (EXT_MEM_SIZE_G(hi) << 20);
+ avail[i].idx = 2;
+ i++;
+ }
+ }
+ if (!i) /* no memory available */
+ return 0;
+ sort(avail, i, sizeof(struct mem_desc), mem_desc_cmp, NULL);
+
+ (md++)->base = t4_read_reg(adap, SGE_DBQ_CTXT_BADDR_A);
+ (md++)->base = t4_read_reg(adap, SGE_IMSG_CTXT_BADDR_A);
+ (md++)->base = t4_read_reg(adap, SGE_FLM_CACHE_BADDR_A);
+ (md++)->base = t4_read_reg(adap, TP_CMM_TCB_BASE_A);
+ (md++)->base = t4_read_reg(adap, TP_CMM_MM_BASE_A);
+ (md++)->base = t4_read_reg(adap, TP_CMM_TIMER_BASE_A);
+ (md++)->base = t4_read_reg(adap, TP_CMM_MM_RX_FLST_BASE_A);
+ (md++)->base = t4_read_reg(adap, TP_CMM_MM_TX_FLST_BASE_A);
+ (md++)->base = t4_read_reg(adap, TP_CMM_MM_PS_FLST_BASE_A);
+
+ /* the next few have explicit upper bounds */
+ md->base = t4_read_reg(adap, TP_PMM_TX_BASE_A);
+ md->limit = md->base - 1 +
+ t4_read_reg(adap, TP_PMM_TX_PAGE_SIZE_A) *
+ PMTXMAXPAGE_G(t4_read_reg(adap, TP_PMM_TX_MAX_PAGE_A));
+ md++;
+
+ md->base = t4_read_reg(adap, TP_PMM_RX_BASE_A);
+ md->limit = md->base - 1 +
+ t4_read_reg(adap, TP_PMM_RX_PAGE_SIZE_A) *
+ PMRXMAXPAGE_G(t4_read_reg(adap, TP_PMM_RX_MAX_PAGE_A));
+ md++;
+
+ if (t4_read_reg(adap, LE_DB_CONFIG_A) & HASHEN_F) {
+ if (CHELSIO_CHIP_VERSION(adap->params.chip) <= CHELSIO_T5) {
+ hi = t4_read_reg(adap, LE_DB_TID_HASHBASE_A) / 4;
+ md->base = t4_read_reg(adap, LE_DB_HASH_TID_BASE_A);
+ } else {
+ hi = t4_read_reg(adap, LE_DB_HASH_TID_BASE_A);
+ md->base = t4_read_reg(adap,
+ LE_DB_HASH_TBL_BASE_ADDR_A);
+ }
+ md->limit = 0;
+ } else {
+ md->base = 0;
+ md->idx = ARRAY_SIZE(region); /* hide it */
+ }
+ md++;
+
+#define ulp_region(reg) do { \
+ md->base = t4_read_reg(adap, ULP_ ## reg ## _LLIMIT_A);\
+ (md++)->limit = t4_read_reg(adap, ULP_ ## reg ## _ULIMIT_A); \
+} while (0)
+
+ ulp_region(RX_ISCSI);
+ ulp_region(RX_TDDP);
+ ulp_region(TX_TPT);
+ ulp_region(RX_STAG);
+ ulp_region(RX_RQ);
+ ulp_region(RX_RQUDP);
+ ulp_region(RX_PBL);
+ ulp_region(TX_PBL);
+#undef ulp_region
+ md->base = 0;
+ md->idx = ARRAY_SIZE(region);
+ if (!is_t4(adap->params.chip)) {
+ u32 size = 0;
+ u32 sge_ctrl = t4_read_reg(adap, SGE_CONTROL2_A);
+ u32 fifo_size = t4_read_reg(adap, SGE_DBVFIFO_SIZE_A);
+
+ if (is_t5(adap->params.chip)) {
+ if (sge_ctrl & VFIFO_ENABLE_F)
+ size = DBVFIFO_SIZE_G(fifo_size);
+ } else {
+ size = T6_DBVFIFO_SIZE_G(fifo_size);
+ }
+
+ if (size) {
+ md->base = BASEADDR_G(t4_read_reg(adap,
+ SGE_DBVFIFO_BADDR_A));
+ md->limit = md->base + (size << 2) - 1;
+ }
+ }
+
+ md++;
+
+ md->base = t4_read_reg(adap, ULP_RX_CTX_BASE_A);
+ md->limit = 0;
+ md++;
+ md->base = t4_read_reg(adap, ULP_TX_ERR_TABLE_BASE_A);
+ md->limit = 0;
+ md++;
+
+ md->base = adap->vres.ocq.start;
+ if (adap->vres.ocq.size)
+ md->limit = md->base + adap->vres.ocq.size - 1;
+ else
+ md->idx = ARRAY_SIZE(region); /* hide it */
+ md++;
+
+ /* add any address-space holes, there can be up to 3 */
+ for (n = 0; n < i - 1; n++)
+ if (avail[n].limit < avail[n + 1].base)
+ (md++)->base = avail[n].limit;
+ if (avail[n].limit)
+ (md++)->base = avail[n].limit;
+
+ n = md - mem;
+ sort(mem, n, sizeof(struct mem_desc), mem_desc_cmp, NULL);
+
+ for (lo = 0; lo < i; lo++)
+ mem_region_show(seq, memory[avail[lo].idx], avail[lo].base,
+ avail[lo].limit - 1);
+
+ seq_putc(seq, '\n');
+ for (i = 0; i < n; i++) {
+ if (mem[i].idx >= ARRAY_SIZE(region))
+ continue; /* skip holes */
+ if (!mem[i].limit)
+ mem[i].limit = i < n - 1 ? mem[i + 1].base - 1 : ~0;
+ mem_region_show(seq, region[mem[i].idx], mem[i].base,
+ mem[i].limit);
+ }
+
+ seq_putc(seq, '\n');
+ lo = t4_read_reg(adap, CIM_SDRAM_BASE_ADDR_A);
+ hi = t4_read_reg(adap, CIM_SDRAM_ADDR_SIZE_A) + lo - 1;
+ mem_region_show(seq, "uP RAM:", lo, hi);
+
+ lo = t4_read_reg(adap, CIM_EXTMEM2_BASE_ADDR_A);
+ hi = t4_read_reg(adap, CIM_EXTMEM2_ADDR_SIZE_A) + lo - 1;
+ mem_region_show(seq, "uP Extmem2:", lo, hi);
+
+ lo = t4_read_reg(adap, TP_PMM_RX_MAX_PAGE_A);
+ seq_printf(seq, "\n%u Rx pages of size %uKiB for %u channels\n",
+ PMRXMAXPAGE_G(lo),
+ t4_read_reg(adap, TP_PMM_RX_PAGE_SIZE_A) >> 10,
+ (lo & PMRXNUMCHN_F) ? 2 : 1);
+
+ lo = t4_read_reg(adap, TP_PMM_TX_MAX_PAGE_A);
+ hi = t4_read_reg(adap, TP_PMM_TX_PAGE_SIZE_A);
+ seq_printf(seq, "%u Tx pages of size %u%ciB for %u channels\n",
+ PMTXMAXPAGE_G(lo),
+ hi >= (1 << 20) ? (hi >> 20) : (hi >> 10),
+ hi >= (1 << 20) ? 'M' : 'K', 1 << PMTXNUMCHN_G(lo));
+ seq_printf(seq, "%u p-structs\n\n",
+ t4_read_reg(adap, TP_CMM_MM_MAX_PSTRUCT_A));
+
+ for (i = 0; i < 4; i++) {
+ if (CHELSIO_CHIP_VERSION(adap->params.chip) > CHELSIO_T5)
+ lo = t4_read_reg(adap, MPS_RX_MAC_BG_PG_CNT0_A + i * 4);
+ else
+ lo = t4_read_reg(adap, MPS_RX_PG_RSV0_A + i * 4);
+ if (is_t5(adap->params.chip)) {
+ used = T5_USED_G(lo);
+ alloc = T5_ALLOC_G(lo);
+ } else {
+ used = USED_G(lo);
+ alloc = ALLOC_G(lo);
+ }
+ /* For T6 these are MAC buffer groups */
+ seq_printf(seq, "Port %d using %u pages out of %u allocated\n",
+ i, used, alloc);
+ }
+ for (i = 0; i < adap->params.arch.nchan; i++) {
+ if (CHELSIO_CHIP_VERSION(adap->params.chip) > CHELSIO_T5)
+ lo = t4_read_reg(adap,
+ MPS_RX_LPBK_BG_PG_CNT0_A + i * 4);
+ else
+ lo = t4_read_reg(adap, MPS_RX_PG_RSV4_A + i * 4);
+ if (is_t5(adap->params.chip)) {
+ used = T5_USED_G(lo);
+ alloc = T5_ALLOC_G(lo);
+ } else {
+ used = USED_G(lo);
+ alloc = ALLOC_G(lo);
+ }
+ /* For T6 these are MAC buffer groups */
+ seq_printf(seq,
+ "Loopback %d using %u pages out of %u allocated\n",
+ i, used, alloc);
+ }
+ return 0;
+}
+
+static int meminfo_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, meminfo_show, inode->i_private);
+}
+
+static const struct file_operations meminfo_fops = {
+ .owner = THIS_MODULE,
+ .open = meminfo_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};