3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
8 * Copyright(c) 2015 Intel Corporation.
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
21 * Copyright(c) 2015 Intel Corporation.
23 * Redistribution and use in source and binary forms, with or without
24 * modification, are permitted provided that the following conditions
27 * - Redistributions of source code must retain the above copyright
28 * notice, this list of conditions and the following disclaimer.
29 * - Redistributions in binary form must reproduce the above copyright
30 * notice, this list of conditions and the following disclaimer in
31 * the documentation and/or other materials provided with the
33 * - Neither the name of Intel Corporation nor the names of its
34 * contributors may be used to endorse or promote products derived
35 * from this software without specific prior written permission.
37 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
38 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
39 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
40 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
41 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
42 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
43 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
44 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
45 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
46 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
47 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
51 #include <linux/delay.h>
52 #include <linux/pci.h>
53 #include <linux/vmalloc.h>
59 * QSFP support for hfi driver, using "Two Wire Serial Interface" driver
62 #define I2C_MAX_RETRY 4
65 * Unlocked i2c write. Must hold dd->qsfp_i2c_mutex.
67 static int __i2c_write(struct hfi1_pportdata *ppd, u32 target, int i2c_addr,
68 int offset, void *bp, int len)
70 struct hfi1_devdata *dd = ppd->dd;
74 /* Make sure TWSI bus is in sane state. */
75 ret = hfi1_twsi_reset(dd, target);
77 hfi1_dev_porterr(dd, ppd->port,
78 "I2C interface Reset for write failed\n");
86 ret = hfi1_twsi_blk_wr(dd, target, i2c_addr, offset,
89 /* hfi1_twsi_blk_wr() 1 for error, else 0 */
96 /* Must wait min 20us between qsfp i2c transactions */
102 int i2c_write(struct hfi1_pportdata *ppd, u32 target, int i2c_addr, int offset,
105 struct hfi1_devdata *dd = ppd->dd;
108 ret = mutex_lock_interruptible(&dd->qsfp_i2c_mutex);
110 ret = __i2c_write(ppd, target, i2c_addr, offset, bp, len);
111 mutex_unlock(&dd->qsfp_i2c_mutex);
118 * Unlocked i2c read. Must hold dd->qsfp_i2c_mutex.
120 static int __i2c_read(struct hfi1_pportdata *ppd, u32 target, int i2c_addr,
121 int offset, void *bp, int len)
123 struct hfi1_devdata *dd = ppd->dd;
124 int ret, cnt, pass = 0;
128 /* Make sure TWSI bus is in sane state. */
129 ret = hfi1_twsi_reset(dd, target);
131 hfi1_dev_porterr(dd, ppd->port,
132 "I2C interface Reset for read failed\n");
140 int rlen = len - cnt;
142 ret = hfi1_twsi_blk_rd(dd, target, i2c_addr, offset,
144 /* Some QSFP's fail first try. Retry as experiment */
145 if (ret && cnt == 0 && ++pass < I2C_MAX_RETRY)
148 /* hfi1_twsi_blk_rd() 1 for error, else 0 */
160 dd_dev_err(dd, "I2C interface bus stuck non-idle\n");
162 if (pass >= I2C_MAX_RETRY && ret)
163 hfi1_dev_porterr(dd, ppd->port,
164 "I2C failed even retrying\n");
166 hfi1_dev_porterr(dd, ppd->port, "I2C retries: %d\n", pass);
168 /* Must wait min 20us between qsfp i2c transactions */
174 int i2c_read(struct hfi1_pportdata *ppd, u32 target, int i2c_addr, int offset,
177 struct hfi1_devdata *dd = ppd->dd;
180 ret = mutex_lock_interruptible(&dd->qsfp_i2c_mutex);
182 ret = __i2c_read(ppd, target, i2c_addr, offset, bp, len);
183 mutex_unlock(&dd->qsfp_i2c_mutex);
189 int qsfp_write(struct hfi1_pportdata *ppd, u32 target, int addr, void *bp,
198 ret = mutex_lock_interruptible(&ppd->dd->qsfp_i2c_mutex);
202 while (count < len) {
204 * Set the qsfp page based on a zero-based addresss
205 * and a page size of QSFP_PAGESIZE bytes.
207 page = (u8)(addr / QSFP_PAGESIZE);
209 ret = __i2c_write(ppd, target, QSFP_DEV,
210 QSFP_PAGE_SELECT_BYTE_OFFS, &page, 1);
215 "can't write QSFP_PAGE_SELECT_BYTE: %d\n", ret);
220 /* truncate write to end of page if crossing page boundary */
221 offset = addr % QSFP_PAGESIZE;
222 nwrite = len - count;
223 if ((offset + nwrite) > QSFP_PAGESIZE)
224 nwrite = QSFP_PAGESIZE - offset;
226 ret = __i2c_write(ppd, target, QSFP_DEV, offset, bp + count,
228 if (ret <= 0) /* stop on error or nothing read */
235 mutex_unlock(&ppd->dd->qsfp_i2c_mutex);
242 int qsfp_read(struct hfi1_pportdata *ppd, u32 target, int addr, void *bp,
251 ret = mutex_lock_interruptible(&ppd->dd->qsfp_i2c_mutex);
255 while (count < len) {
257 * Set the qsfp page based on a zero-based address
258 * and a page size of QSFP_PAGESIZE bytes.
260 page = (u8)(addr / QSFP_PAGESIZE);
261 ret = __i2c_write(ppd, target, QSFP_DEV,
262 QSFP_PAGE_SELECT_BYTE_OFFS, &page, 1);
267 "can't write QSFP_PAGE_SELECT_BYTE: %d\n", ret);
272 /* truncate read to end of page if crossing page boundary */
273 offset = addr % QSFP_PAGESIZE;
275 if ((offset + nread) > QSFP_PAGESIZE)
276 nread = QSFP_PAGESIZE - offset;
278 ret = __i2c_read(ppd, target, QSFP_DEV, offset, bp + count,
280 if (ret <= 0) /* stop on error or nothing read */
287 mutex_unlock(&ppd->dd->qsfp_i2c_mutex);
295 * This function caches the QSFP memory range in 128 byte chunks.
296 * As an example, the next byte after address 255 is byte 128 from
297 * upper page 01H (if existing) rather than byte 0 from lower page 00H.
299 int refresh_qsfp_cache(struct hfi1_pportdata *ppd, struct qsfp_data *cp)
301 u32 target = ppd->dd->hfi1_id;
304 u8 *cache = &cp->cache[0];
306 /* ensure sane contents on invalid reads, for cable swaps */
307 memset(cache, 0, (QSFP_MAX_NUM_PAGES*128));
308 dd_dev_info(ppd->dd, "%s: called\n", __func__);
309 if (!qsfp_mod_present(ppd)) {
314 ret = qsfp_read(ppd, target, 0, cache, 256);
317 "%s: Read of pages 00H failed, expected 256, got %d\n",
322 if (cache[0] != 0x0C && cache[0] != 0x0D)
325 /* Is paging enabled? */
326 if (!(cache[2] & 4)) {
328 /* Paging enabled, page 03 required */
329 if ((cache[195] & 0xC0) == 0xC0) {
331 ret = qsfp_read(ppd, target, 384, cache + 256, 128);
332 if (ret <= 0 || ret != 128) {
333 dd_dev_info(ppd->dd, "%s: failed\n", __func__);
336 ret = qsfp_read(ppd, target, 640, cache + 384, 128);
337 if (ret <= 0 || ret != 128) {
338 dd_dev_info(ppd->dd, "%s: failed\n", __func__);
341 ret = qsfp_read(ppd, target, 896, cache + 512, 128);
342 if (ret <= 0 || ret != 128) {
343 dd_dev_info(ppd->dd, "%s: failed\n", __func__);
346 } else if ((cache[195] & 0x80) == 0x80) {
347 /* only page 2 and 3 */
348 ret = qsfp_read(ppd, target, 640, cache + 384, 128);
349 if (ret <= 0 || ret != 128) {
350 dd_dev_info(ppd->dd, "%s: failed\n", __func__);
353 ret = qsfp_read(ppd, target, 896, cache + 512, 128);
354 if (ret <= 0 || ret != 128) {
355 dd_dev_info(ppd->dd, "%s: failed\n", __func__);
358 } else if ((cache[195] & 0x40) == 0x40) {
359 /* only page 1 and 3 */
360 ret = qsfp_read(ppd, target, 384, cache + 256, 128);
361 if (ret <= 0 || ret != 128) {
362 dd_dev_info(ppd->dd, "%s: failed\n", __func__);
365 ret = qsfp_read(ppd, target, 896, cache + 512, 128);
366 if (ret <= 0 || ret != 128) {
367 dd_dev_info(ppd->dd, "%s: failed\n", __func__);
372 ret = qsfp_read(ppd, target, 896, cache + 512, 128);
373 if (ret <= 0 || ret != 128) {
374 dd_dev_info(ppd->dd, "%s: failed\n", __func__);
380 spin_lock_irqsave(&ppd->qsfp_info.qsfp_lock, flags);
381 ppd->qsfp_info.cache_valid = 1;
382 ppd->qsfp_info.cache_refresh_required = 0;
383 spin_unlock_irqrestore(&ppd->qsfp_info.qsfp_lock, flags);
388 memset(cache, 0, (QSFP_MAX_NUM_PAGES*128));
392 const char * const hfi1_qsfp_devtech[16] = {
393 "850nm VCSEL", "1310nm VCSEL", "1550nm VCSEL", "1310nm FP",
394 "1310nm DFB", "1550nm DFB", "1310nm EML", "1550nm EML",
395 "Cu Misc", "1490nm DFB", "Cu NoEq", "Cu Eq",
396 "Undef", "Cu Active BothEq", "Cu FarEq", "Cu NearEq"
399 #define QSFP_DUMP_CHUNK 16 /* Holds longest string */
400 #define QSFP_DEFAULT_HDR_CNT 224
402 static const char *pwr_codes = "1.5W2.0W2.5W3.5W";
404 int qsfp_mod_present(struct hfi1_pportdata *ppd)
406 struct hfi1_devdata *dd = ppd->dd;
409 reg = read_csr(dd, dd->hfi1_id ? ASIC_QSFP2_IN : ASIC_QSFP1_IN);
410 return !(reg & QSFP_HFI0_MODPRST_N);
414 * This function maps QSFP memory addresses in 128 byte chunks in the following
415 * fashion per the CableInfo SMA query definition in the IBA 1.3 spec/OPA Gen 1
417 * For addr 000-127, lower page 00h
418 * For addr 128-255, upper page 00h
419 * For addr 256-383, upper page 01h
420 * For addr 384-511, upper page 02h
421 * For addr 512-639, upper page 03h
423 * For addresses beyond this range, it returns the invalid range of data buffer
425 * For upper pages that are optional, if they are not valid, returns the
426 * particular range of bytes in the data buffer set to 0.
428 int get_cable_info(struct hfi1_devdata *dd, u32 port_num, u32 addr, u32 len,
431 struct hfi1_pportdata *ppd;
435 if (port_num > dd->num_pports || port_num < 1) {
436 dd_dev_info(dd, "%s: Invalid port number %d\n",
442 ppd = dd->pport + (port_num - 1);
443 if (!qsfp_mod_present(ppd)) {
448 if (!ppd->qsfp_info.cache_valid) {
453 if (addr >= (QSFP_MAX_NUM_PAGES * 128)) {
458 if ((addr + len) > (QSFP_MAX_NUM_PAGES * 128)) {
459 excess_len = (addr + len) - (QSFP_MAX_NUM_PAGES * 128);
460 memcpy(data, &ppd->qsfp_info.cache[addr], (len - excess_len));
461 data += (len - excess_len);
465 memcpy(data, &ppd->qsfp_info.cache[addr], len);
469 memset(data, 0, excess_len);
473 int qsfp_dump(struct hfi1_pportdata *ppd, char *buf, int len)
475 u8 *cache = &ppd->qsfp_info.cache[0];
476 u8 bin_buff[QSFP_DUMP_CHUNK];
480 u8 *atten = &cache[QSFP_ATTEN_OFFS];
481 u8 *vendor_oui = &cache[QSFP_VOUI_OFFS];
487 if (ppd->qsfp_info.cache_valid) {
489 if (QSFP_IS_CU(cache[QSFP_MOD_TECH_OFFS]))
490 sprintf(lenstr, "%dM ", cache[QSFP_MOD_LEN_OFFS]);
492 sofar += scnprintf(buf + sofar, len - sofar, "PWR:%.3sW\n",
494 (QSFP_PWR(cache[QSFP_MOD_PWR_OFFS]) * 4));
496 sofar += scnprintf(buf + sofar, len - sofar, "TECH:%s%s\n",
498 hfi1_qsfp_devtech[(cache[QSFP_MOD_TECH_OFFS]) >> 4]);
500 sofar += scnprintf(buf + sofar, len - sofar, "Vendor:%.*s\n",
501 QSFP_VEND_LEN, &cache[QSFP_VEND_OFFS]);
503 sofar += scnprintf(buf + sofar, len - sofar, "OUI:%06X\n",
504 QSFP_OUI(vendor_oui));
506 sofar += scnprintf(buf + sofar, len - sofar, "Part#:%.*s\n",
507 QSFP_PN_LEN, &cache[QSFP_PN_OFFS]);
509 sofar += scnprintf(buf + sofar, len - sofar, "Rev:%.*s\n",
510 QSFP_REV_LEN, &cache[QSFP_REV_OFFS]);
512 if (QSFP_IS_CU(cache[QSFP_MOD_TECH_OFFS]))
513 sofar += scnprintf(buf + sofar, len - sofar,
515 QSFP_ATTEN_SDR(atten),
516 QSFP_ATTEN_DDR(atten));
518 sofar += scnprintf(buf + sofar, len - sofar, "Serial:%.*s\n",
519 QSFP_SN_LEN, &cache[QSFP_SN_OFFS]);
521 sofar += scnprintf(buf + sofar, len - sofar, "Date:%.*s\n",
522 QSFP_DATE_LEN, &cache[QSFP_DATE_OFFS]);
524 sofar += scnprintf(buf + sofar, len - sofar, "Lot:%.*s\n",
525 QSFP_LOT_LEN, &cache[QSFP_LOT_OFFS]);
527 while (bidx < QSFP_DEFAULT_HDR_CNT) {
530 memcpy(bin_buff, &cache[bidx], QSFP_DUMP_CHUNK);
531 for (iidx = 0; iidx < QSFP_DUMP_CHUNK; ++iidx) {
532 sofar += scnprintf(buf + sofar, len-sofar,
533 " %02X", bin_buff[iidx]);
535 sofar += scnprintf(buf + sofar, len - sofar, "\n");
536 bidx += QSFP_DUMP_CHUNK;