2 * nvme-lightnvm.c - LightNVM NVMe device
4 * Copyright (C) 2014-2015 IT University of Copenhagen
5 * Initial release: Matias Bjorling <mb@lightnvm.io>
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License version
9 * 2 as published by the Free Software Foundation.
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; see the file COPYING. If not, write to
18 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139,
25 #include <linux/nvme.h>
26 #include <linux/bitops.h>
27 #include <linux/lightnvm.h>
28 #include <linux/vmalloc.h>
30 enum nvme_nvm_admin_opcode {
31 nvme_nvm_admin_identity = 0xe2,
32 nvme_nvm_admin_get_l2p_tbl = 0xea,
33 nvme_nvm_admin_get_bb_tbl = 0xf2,
34 nvme_nvm_admin_set_bb_tbl = 0xf1,
37 struct nvme_nvm_hb_rw {
53 struct nvme_nvm_ph_rw {
69 struct nvme_nvm_identity {
81 struct nvme_nvm_l2ptbl {
94 struct nvme_nvm_getbbtbl {
106 struct nvme_nvm_setbbtbl {
121 struct nvme_nvm_erase_blk {
136 struct nvme_nvm_command {
138 struct nvme_common_command common;
139 struct nvme_nvm_identity identity;
140 struct nvme_nvm_hb_rw hb_rw;
141 struct nvme_nvm_ph_rw ph_rw;
142 struct nvme_nvm_l2ptbl l2p;
143 struct nvme_nvm_getbbtbl get_bb;
144 struct nvme_nvm_setbbtbl set_bb;
145 struct nvme_nvm_erase_blk erase;
149 struct nvme_nvm_id_group {
175 struct nvme_nvm_addr_format {
198 struct nvme_nvm_addr_format ppaf;
200 struct nvme_nvm_id_group groups[4];
203 struct nvme_nvm_bb_tbl {
218 * Check we didn't inadvertently grow the command struct
220 static inline void _nvme_nvm_check_size(void)
222 BUILD_BUG_ON(sizeof(struct nvme_nvm_identity) != 64);
223 BUILD_BUG_ON(sizeof(struct nvme_nvm_hb_rw) != 64);
224 BUILD_BUG_ON(sizeof(struct nvme_nvm_ph_rw) != 64);
225 BUILD_BUG_ON(sizeof(struct nvme_nvm_getbbtbl) != 64);
226 BUILD_BUG_ON(sizeof(struct nvme_nvm_setbbtbl) != 64);
227 BUILD_BUG_ON(sizeof(struct nvme_nvm_l2ptbl) != 64);
228 BUILD_BUG_ON(sizeof(struct nvme_nvm_erase_blk) != 64);
229 BUILD_BUG_ON(sizeof(struct nvme_nvm_id_group) != 960);
230 BUILD_BUG_ON(sizeof(struct nvme_nvm_addr_format) != 128);
231 BUILD_BUG_ON(sizeof(struct nvme_nvm_id) != 4096);
232 BUILD_BUG_ON(sizeof(struct nvme_nvm_bb_tbl) != 512);
235 static int init_grps(struct nvm_id *nvm_id, struct nvme_nvm_id *nvme_nvm_id)
237 struct nvme_nvm_id_group *src;
238 struct nvm_id_group *dst;
241 end = min_t(u32, 4, nvm_id->cgrps);
243 for (i = 0; i < end; i++) {
244 src = &nvme_nvm_id->groups[i];
245 dst = &nvm_id->groups[i];
247 dst->mtype = src->mtype;
248 dst->fmtype = src->fmtype;
249 dst->num_ch = src->num_ch;
250 dst->num_lun = src->num_lun;
251 dst->num_pln = src->num_pln;
253 dst->num_pg = le16_to_cpu(src->num_pg);
254 dst->num_blk = le16_to_cpu(src->num_blk);
255 dst->fpg_sz = le16_to_cpu(src->fpg_sz);
256 dst->csecs = le16_to_cpu(src->csecs);
257 dst->sos = le16_to_cpu(src->sos);
259 dst->trdt = le32_to_cpu(src->trdt);
260 dst->trdm = le32_to_cpu(src->trdm);
261 dst->tprt = le32_to_cpu(src->tprt);
262 dst->tprm = le32_to_cpu(src->tprm);
263 dst->tbet = le32_to_cpu(src->tbet);
264 dst->tbem = le32_to_cpu(src->tbem);
265 dst->mpos = le32_to_cpu(src->mpos);
266 dst->mccap = le32_to_cpu(src->mccap);
268 dst->cpar = le16_to_cpu(src->cpar);
274 static int nvme_nvm_identity(struct nvm_dev *nvmdev, struct nvm_id *nvm_id)
276 struct nvme_ns *ns = nvmdev->q->queuedata;
277 struct nvme_dev *dev = ns->dev;
278 struct nvme_nvm_id *nvme_nvm_id;
279 struct nvme_nvm_command c = {};
282 c.identity.opcode = nvme_nvm_admin_identity;
283 c.identity.nsid = cpu_to_le32(ns->ns_id);
284 c.identity.chnl_off = 0;
286 nvme_nvm_id = kmalloc(sizeof(struct nvme_nvm_id), GFP_KERNEL);
290 ret = nvme_submit_sync_cmd(dev->admin_q, (struct nvme_command *)&c,
291 nvme_nvm_id, sizeof(struct nvme_nvm_id));
297 nvm_id->ver_id = nvme_nvm_id->ver_id;
298 nvm_id->vmnt = nvme_nvm_id->vmnt;
299 nvm_id->cgrps = nvme_nvm_id->cgrps;
300 nvm_id->cap = le32_to_cpu(nvme_nvm_id->cap);
301 nvm_id->dom = le32_to_cpu(nvme_nvm_id->dom);
302 memcpy(&nvm_id->ppaf, &nvme_nvm_id->ppaf,
303 sizeof(struct nvme_nvm_addr_format));
305 ret = init_grps(nvm_id, nvme_nvm_id);
311 static int nvme_nvm_get_l2p_tbl(struct nvm_dev *nvmdev, u64 slba, u32 nlb,
312 nvm_l2p_update_fn *update_l2p, void *priv)
314 struct nvme_ns *ns = nvmdev->q->queuedata;
315 struct nvme_dev *dev = ns->dev;
316 struct nvme_nvm_command c = {};
317 u32 len = queue_max_hw_sectors(dev->admin_q) << 9;
318 u32 nlb_pr_rq = len / sizeof(u64);
323 c.l2p.opcode = nvme_nvm_admin_get_l2p_tbl;
324 c.l2p.nsid = cpu_to_le32(ns->ns_id);
325 entries = kmalloc(len, GFP_KERNEL);
330 u32 cmd_nlb = min(nlb_pr_rq, nlb);
332 c.l2p.slba = cpu_to_le64(cmd_slba);
333 c.l2p.nlb = cpu_to_le32(cmd_nlb);
335 ret = nvme_submit_sync_cmd(dev->admin_q,
336 (struct nvme_command *)&c, entries, len);
338 dev_err(dev->dev, "L2P table transfer failed (%d)\n",
344 if (update_l2p(cmd_slba, cmd_nlb, entries, priv)) {
358 static int nvme_nvm_get_bb_tbl(struct nvm_dev *nvmdev, struct ppa_addr ppa,
359 int nr_blocks, nvm_bb_update_fn *update_bbtbl,
362 struct request_queue *q = nvmdev->q;
363 struct nvme_ns *ns = q->queuedata;
364 struct nvme_dev *dev = ns->dev;
365 struct nvme_nvm_command c = {};
366 struct nvme_nvm_bb_tbl *bb_tbl;
367 int tblsz = sizeof(struct nvme_nvm_bb_tbl) + nr_blocks;
370 c.get_bb.opcode = nvme_nvm_admin_get_bb_tbl;
371 c.get_bb.nsid = cpu_to_le32(ns->ns_id);
372 c.get_bb.spba = cpu_to_le64(ppa.ppa);
374 bb_tbl = kzalloc(tblsz, GFP_KERNEL);
378 ret = nvme_submit_sync_cmd(dev->admin_q, (struct nvme_command *)&c,
381 dev_err(dev->dev, "get bad block table failed (%d)\n", ret);
386 if (bb_tbl->tblid[0] != 'B' || bb_tbl->tblid[1] != 'B' ||
387 bb_tbl->tblid[2] != 'L' || bb_tbl->tblid[3] != 'T') {
388 dev_err(dev->dev, "bbt format mismatch\n");
393 if (le16_to_cpu(bb_tbl->verid) != 1) {
395 dev_err(dev->dev, "bbt version not supported\n");
399 if (le32_to_cpu(bb_tbl->tblks) != nr_blocks) {
401 dev_err(dev->dev, "bbt unsuspected blocks returned (%u!=%u)",
402 le32_to_cpu(bb_tbl->tblks), nr_blocks);
406 ppa = dev_to_generic_addr(nvmdev, ppa);
407 ret = update_bbtbl(ppa, nr_blocks, bb_tbl->blk, priv);
418 static int nvme_nvm_set_bb_tbl(struct nvm_dev *nvmdev, struct nvm_rq *rqd,
421 struct nvme_ns *ns = nvmdev->q->queuedata;
422 struct nvme_dev *dev = ns->dev;
423 struct nvme_nvm_command c = {};
426 c.set_bb.opcode = nvme_nvm_admin_set_bb_tbl;
427 c.set_bb.nsid = cpu_to_le32(ns->ns_id);
428 c.set_bb.spba = cpu_to_le64(rqd->ppa_addr.ppa);
429 c.set_bb.nlb = cpu_to_le16(rqd->nr_pages - 1);
430 c.set_bb.value = type;
432 ret = nvme_submit_sync_cmd(dev->admin_q, (struct nvme_command *)&c,
435 dev_err(dev->dev, "set bad block table failed (%d)\n", ret);
439 static inline void nvme_nvm_rqtocmd(struct request *rq, struct nvm_rq *rqd,
440 struct nvme_ns *ns, struct nvme_nvm_command *c)
442 c->ph_rw.opcode = rqd->opcode;
443 c->ph_rw.nsid = cpu_to_le32(ns->ns_id);
444 c->ph_rw.spba = cpu_to_le64(rqd->ppa_addr.ppa);
445 c->ph_rw.control = cpu_to_le16(rqd->flags);
446 c->ph_rw.length = cpu_to_le16(rqd->nr_pages - 1);
448 if (rqd->opcode == NVM_OP_HBWRITE || rqd->opcode == NVM_OP_HBREAD)
449 c->hb_rw.slba = cpu_to_le64(nvme_block_nr(ns,
450 rqd->bio->bi_iter.bi_sector));
453 static void nvme_nvm_end_io(struct request *rq, int error)
455 struct nvm_rq *rqd = rq->end_io_data;
456 struct nvm_dev *dev = rqd->dev;
458 if (dev->mt && dev->mt->end_io(rqd, error))
459 pr_err("nvme: err status: %x result: %lx\n",
460 rq->errors, (unsigned long)rq->special);
463 blk_mq_free_request(rq);
466 static int nvme_nvm_submit_io(struct nvm_dev *dev, struct nvm_rq *rqd)
468 struct request_queue *q = dev->q;
469 struct nvme_ns *ns = q->queuedata;
471 struct bio *bio = rqd->bio;
472 struct nvme_nvm_command *cmd;
474 rq = blk_mq_alloc_request(q, bio_rw(bio), GFP_KERNEL, 0);
478 cmd = kzalloc(sizeof(struct nvme_nvm_command), GFP_KERNEL);
480 blk_mq_free_request(rq);
484 rq->cmd_type = REQ_TYPE_DRV_PRIV;
485 rq->ioprio = bio_prio(bio);
487 if (bio_has_data(bio))
488 rq->nr_phys_segments = bio_phys_segments(q, bio);
490 rq->__data_len = bio->bi_iter.bi_size;
491 rq->bio = rq->biotail = bio;
493 nvme_nvm_rqtocmd(rq, rqd, ns, cmd);
495 rq->cmd = (unsigned char *)cmd;
496 rq->cmd_len = sizeof(struct nvme_nvm_command);
497 rq->special = (void *)0;
499 rq->end_io_data = rqd;
501 blk_execute_rq_nowait(q, NULL, rq, 0, nvme_nvm_end_io);
506 static int nvme_nvm_erase_block(struct nvm_dev *dev, struct nvm_rq *rqd)
508 struct request_queue *q = dev->q;
509 struct nvme_ns *ns = q->queuedata;
510 struct nvme_nvm_command c = {};
512 c.erase.opcode = NVM_OP_ERASE;
513 c.erase.nsid = cpu_to_le32(ns->ns_id);
514 c.erase.spba = cpu_to_le64(rqd->ppa_addr.ppa);
515 c.erase.length = cpu_to_le16(rqd->nr_pages - 1);
517 return nvme_submit_sync_cmd(q, (struct nvme_command *)&c, NULL, 0);
520 static void *nvme_nvm_create_dma_pool(struct nvm_dev *nvmdev, char *name)
522 struct nvme_ns *ns = nvmdev->q->queuedata;
523 struct nvme_dev *dev = ns->dev;
525 return dma_pool_create(name, dev->dev, PAGE_SIZE, PAGE_SIZE, 0);
528 static void nvme_nvm_destroy_dma_pool(void *pool)
530 struct dma_pool *dma_pool = pool;
532 dma_pool_destroy(dma_pool);
535 static void *nvme_nvm_dev_dma_alloc(struct nvm_dev *dev, void *pool,
536 gfp_t mem_flags, dma_addr_t *dma_handler)
538 return dma_pool_alloc(pool, mem_flags, dma_handler);
541 static void nvme_nvm_dev_dma_free(void *pool, void *ppa_list,
542 dma_addr_t dma_handler)
544 dma_pool_free(pool, ppa_list, dma_handler);
547 static struct nvm_dev_ops nvme_nvm_dev_ops = {
548 .identity = nvme_nvm_identity,
550 .get_l2p_tbl = nvme_nvm_get_l2p_tbl,
552 .get_bb_tbl = nvme_nvm_get_bb_tbl,
553 .set_bb_tbl = nvme_nvm_set_bb_tbl,
555 .submit_io = nvme_nvm_submit_io,
556 .erase_block = nvme_nvm_erase_block,
558 .create_dma_pool = nvme_nvm_create_dma_pool,
559 .destroy_dma_pool = nvme_nvm_destroy_dma_pool,
560 .dev_dma_alloc = nvme_nvm_dev_dma_alloc,
561 .dev_dma_free = nvme_nvm_dev_dma_free,
566 int nvme_nvm_register(struct request_queue *q, char *disk_name)
568 return nvm_register(q, disk_name, &nvme_nvm_dev_ops);
571 void nvme_nvm_unregister(struct request_queue *q, char *disk_name)
573 nvm_unregister(disk_name);
576 /* move to shared place when used in multiple places. */
577 #define PCI_VENDOR_ID_CNEX 0x1d1d
578 #define PCI_DEVICE_ID_CNEX_WL 0x2807
579 #define PCI_DEVICE_ID_CNEX_QEMU 0x1f1f
581 int nvme_nvm_ns_supported(struct nvme_ns *ns, struct nvme_id_ns *id)
583 struct nvme_dev *dev = ns->dev;
584 struct pci_dev *pdev = to_pci_dev(dev->dev);
586 /* QEMU NVMe simulator - PCI ID + Vendor specific bit */
587 if (pdev->vendor == PCI_VENDOR_ID_CNEX &&
588 pdev->device == PCI_DEVICE_ID_CNEX_QEMU &&
592 /* CNEX Labs - PCI ID + Vendor specific bit */
593 if (pdev->vendor == PCI_VENDOR_ID_CNEX &&
594 pdev->device == PCI_DEVICE_ID_CNEX_WL &&