1 // SPDX-License-Identifier: GPL-2.0+
3 * Copyright (C) 2017 NXP Semiconductors
4 * Copyright (C) 2017 Bin Meng <bmeng.cn@gmail.com>
16 #include <dm/device-internal.h>
17 #include <linux/compat.h>
20 #define NVME_Q_DEPTH 2
21 #define NVME_AQ_DEPTH 2
22 #define NVME_SQ_SIZE(depth) (depth * sizeof(struct nvme_command))
23 #define NVME_CQ_SIZE(depth) (depth * sizeof(struct nvme_completion))
24 #define ADMIN_TIMEOUT 60
26 #define MAX_PRP_POOL 512
35 * An NVM Express queue. Each device has at least two (one for admin
36 * commands and one for I/O commands).
40 struct nvme_command *sq_cmds;
41 struct nvme_completion *cqes;
42 wait_queue_head_t sq_full;
52 unsigned long cmdid_data[];
55 static int nvme_wait_ready(struct nvme_dev *dev, bool enabled)
57 u32 bit = enabled ? NVME_CSTS_RDY : 0;
61 /* Timeout field in the CAP register is in 500 millisecond units */
62 timeout = NVME_CAP_TIMEOUT(dev->cap) * 500;
65 while (get_timer(start) < timeout) {
66 if ((readl(&dev->bar->csts) & NVME_CSTS_RDY) == bit)
73 static int nvme_setup_prps(struct nvme_dev *dev, u64 *prp2,
74 int total_len, u64 dma_addr)
76 u32 page_size = dev->page_size;
77 int offset = dma_addr & (page_size - 1);
79 int length = total_len;
81 u32 prps_per_page = (page_size >> 3) - 1;
84 length -= (page_size - offset);
92 dma_addr += (page_size - offset);
94 if (length <= page_size) {
99 nprps = DIV_ROUND_UP(length, page_size);
100 num_pages = DIV_ROUND_UP(nprps, prps_per_page);
102 if (nprps > dev->prp_entry_num) {
105 * Always increase in increments of pages. It doesn't waste
106 * much memory and reduces the number of allocations.
108 dev->prp_pool = memalign(page_size, num_pages * page_size);
109 if (!dev->prp_pool) {
110 printf("Error: malloc prp_pool fail\n");
113 dev->prp_entry_num = prps_per_page * num_pages;
116 prp_pool = dev->prp_pool;
119 if (i == ((page_size >> 3) - 1)) {
120 *(prp_pool + i) = cpu_to_le64((ulong)prp_pool +
123 prp_pool += page_size;
125 *(prp_pool + i++) = cpu_to_le64(dma_addr);
126 dma_addr += page_size;
129 *prp2 = (ulong)dev->prp_pool;
131 flush_dcache_range((ulong)dev->prp_pool, (ulong)dev->prp_pool +
132 dev->prp_entry_num * sizeof(u64));
137 static __le16 nvme_get_cmd_id(void)
139 static unsigned short cmdid;
141 return cpu_to_le16((cmdid < USHRT_MAX) ? cmdid++ : 0);
144 static u16 nvme_read_completion_status(struct nvme_queue *nvmeq, u16 index)
146 u64 start = (ulong)&nvmeq->cqes[index];
147 u64 stop = start + sizeof(struct nvme_completion);
149 invalidate_dcache_range(start, stop);
151 return le16_to_cpu(readw(&(nvmeq->cqes[index].status)));
155 * nvme_submit_cmd() - copy a command into a queue and ring the doorbell
157 * @nvmeq: The queue to use
158 * @cmd: The command to send
160 static void nvme_submit_cmd(struct nvme_queue *nvmeq, struct nvme_command *cmd)
162 u16 tail = nvmeq->sq_tail;
164 memcpy(&nvmeq->sq_cmds[tail], cmd, sizeof(*cmd));
165 flush_dcache_range((ulong)&nvmeq->sq_cmds[tail],
166 (ulong)&nvmeq->sq_cmds[tail] + sizeof(*cmd));
168 if (++tail == nvmeq->q_depth)
170 writel(tail, nvmeq->q_db);
171 nvmeq->sq_tail = tail;
174 static int nvme_submit_sync_cmd(struct nvme_queue *nvmeq,
175 struct nvme_command *cmd,
176 u32 *result, unsigned timeout)
178 u16 head = nvmeq->cq_head;
179 u16 phase = nvmeq->cq_phase;
182 ulong timeout_us = timeout * 100000;
184 cmd->common.command_id = nvme_get_cmd_id();
185 nvme_submit_cmd(nvmeq, cmd);
187 start_time = timer_get_us();
190 status = nvme_read_completion_status(nvmeq, head);
191 if ((status & 0x01) == phase)
193 if (timeout_us > 0 && (timer_get_us() - start_time)
200 printf("ERROR: status = %x, phase = %d, head = %d\n",
201 status, phase, head);
203 if (++head == nvmeq->q_depth) {
207 writel(head, nvmeq->q_db + nvmeq->dev->db_stride);
208 nvmeq->cq_head = head;
209 nvmeq->cq_phase = phase;
215 *result = le32_to_cpu(readl(&(nvmeq->cqes[head].result)));
217 if (++head == nvmeq->q_depth) {
221 writel(head, nvmeq->q_db + nvmeq->dev->db_stride);
222 nvmeq->cq_head = head;
223 nvmeq->cq_phase = phase;
228 static int nvme_submit_admin_cmd(struct nvme_dev *dev, struct nvme_command *cmd,
231 return nvme_submit_sync_cmd(dev->queues[NVME_ADMIN_Q], cmd,
232 result, ADMIN_TIMEOUT);
235 static struct nvme_queue *nvme_alloc_queue(struct nvme_dev *dev,
238 struct nvme_queue *nvmeq = malloc(sizeof(*nvmeq));
241 memset(nvmeq, 0, sizeof(*nvmeq));
243 nvmeq->cqes = (void *)memalign(4096, NVME_CQ_SIZE(depth));
246 memset((void *)nvmeq->cqes, 0, NVME_CQ_SIZE(depth));
248 nvmeq->sq_cmds = (void *)memalign(4096, NVME_SQ_SIZE(depth));
251 memset((void *)nvmeq->sq_cmds, 0, NVME_SQ_SIZE(depth));
257 nvmeq->q_db = &dev->dbs[qid * 2 * dev->db_stride];
258 nvmeq->q_depth = depth;
261 dev->queues[qid] = nvmeq;
266 free((void *)nvmeq->cqes);
273 static int nvme_delete_queue(struct nvme_dev *dev, u8 opcode, u16 id)
275 struct nvme_command c;
277 memset(&c, 0, sizeof(c));
278 c.delete_queue.opcode = opcode;
279 c.delete_queue.qid = cpu_to_le16(id);
281 return nvme_submit_admin_cmd(dev, &c, NULL);
284 static int nvme_delete_sq(struct nvme_dev *dev, u16 sqid)
286 return nvme_delete_queue(dev, nvme_admin_delete_sq, sqid);
289 static int nvme_delete_cq(struct nvme_dev *dev, u16 cqid)
291 return nvme_delete_queue(dev, nvme_admin_delete_cq, cqid);
294 static int nvme_enable_ctrl(struct nvme_dev *dev)
296 dev->ctrl_config &= ~NVME_CC_SHN_MASK;
297 dev->ctrl_config |= NVME_CC_ENABLE;
298 writel(cpu_to_le32(dev->ctrl_config), &dev->bar->cc);
300 return nvme_wait_ready(dev, true);
303 static int nvme_disable_ctrl(struct nvme_dev *dev)
305 dev->ctrl_config &= ~NVME_CC_SHN_MASK;
306 dev->ctrl_config &= ~NVME_CC_ENABLE;
307 writel(cpu_to_le32(dev->ctrl_config), &dev->bar->cc);
309 return nvme_wait_ready(dev, false);
312 static void nvme_free_queue(struct nvme_queue *nvmeq)
314 free((void *)nvmeq->cqes);
315 free(nvmeq->sq_cmds);
319 static void nvme_free_queues(struct nvme_dev *dev, int lowest)
323 for (i = dev->queue_count - 1; i >= lowest; i--) {
324 struct nvme_queue *nvmeq = dev->queues[i];
326 dev->queues[i] = NULL;
327 nvme_free_queue(nvmeq);
331 static void nvme_init_queue(struct nvme_queue *nvmeq, u16 qid)
333 struct nvme_dev *dev = nvmeq->dev;
338 nvmeq->q_db = &dev->dbs[qid * 2 * dev->db_stride];
339 memset((void *)nvmeq->cqes, 0, NVME_CQ_SIZE(nvmeq->q_depth));
340 flush_dcache_range((ulong)nvmeq->cqes,
341 (ulong)nvmeq->cqes + NVME_CQ_SIZE(nvmeq->q_depth));
342 dev->online_queues++;
345 static int nvme_configure_admin_queue(struct nvme_dev *dev)
350 struct nvme_queue *nvmeq;
351 /* most architectures use 4KB as the page size */
352 unsigned page_shift = 12;
353 unsigned dev_page_min = NVME_CAP_MPSMIN(cap) + 12;
354 unsigned dev_page_max = NVME_CAP_MPSMAX(cap) + 12;
356 if (page_shift < dev_page_min) {
357 debug("Device minimum page size (%u) too large for host (%u)\n",
358 1 << dev_page_min, 1 << page_shift);
362 if (page_shift > dev_page_max) {
363 debug("Device maximum page size (%u) smaller than host (%u)\n",
364 1 << dev_page_max, 1 << page_shift);
365 page_shift = dev_page_max;
368 result = nvme_disable_ctrl(dev);
372 nvmeq = dev->queues[NVME_ADMIN_Q];
374 nvmeq = nvme_alloc_queue(dev, 0, NVME_AQ_DEPTH);
379 aqa = nvmeq->q_depth - 1;
383 dev->page_size = 1 << page_shift;
385 dev->ctrl_config = NVME_CC_CSS_NVM;
386 dev->ctrl_config |= (page_shift - 12) << NVME_CC_MPS_SHIFT;
387 dev->ctrl_config |= NVME_CC_ARB_RR | NVME_CC_SHN_NONE;
388 dev->ctrl_config |= NVME_CC_IOSQES | NVME_CC_IOCQES;
390 writel(aqa, &dev->bar->aqa);
391 nvme_writeq((ulong)nvmeq->sq_cmds, &dev->bar->asq);
392 nvme_writeq((ulong)nvmeq->cqes, &dev->bar->acq);
394 result = nvme_enable_ctrl(dev);
398 nvmeq->cq_vector = 0;
400 nvme_init_queue(dev->queues[NVME_ADMIN_Q], 0);
405 nvme_free_queues(dev, 0);
410 static int nvme_alloc_cq(struct nvme_dev *dev, u16 qid,
411 struct nvme_queue *nvmeq)
413 struct nvme_command c;
414 int flags = NVME_QUEUE_PHYS_CONTIG | NVME_CQ_IRQ_ENABLED;
416 memset(&c, 0, sizeof(c));
417 c.create_cq.opcode = nvme_admin_create_cq;
418 c.create_cq.prp1 = cpu_to_le64((ulong)nvmeq->cqes);
419 c.create_cq.cqid = cpu_to_le16(qid);
420 c.create_cq.qsize = cpu_to_le16(nvmeq->q_depth - 1);
421 c.create_cq.cq_flags = cpu_to_le16(flags);
422 c.create_cq.irq_vector = cpu_to_le16(nvmeq->cq_vector);
424 return nvme_submit_admin_cmd(dev, &c, NULL);
427 static int nvme_alloc_sq(struct nvme_dev *dev, u16 qid,
428 struct nvme_queue *nvmeq)
430 struct nvme_command c;
431 int flags = NVME_QUEUE_PHYS_CONTIG | NVME_SQ_PRIO_MEDIUM;
433 memset(&c, 0, sizeof(c));
434 c.create_sq.opcode = nvme_admin_create_sq;
435 c.create_sq.prp1 = cpu_to_le64((ulong)nvmeq->sq_cmds);
436 c.create_sq.sqid = cpu_to_le16(qid);
437 c.create_sq.qsize = cpu_to_le16(nvmeq->q_depth - 1);
438 c.create_sq.sq_flags = cpu_to_le16(flags);
439 c.create_sq.cqid = cpu_to_le16(qid);
441 return nvme_submit_admin_cmd(dev, &c, NULL);
444 int nvme_identify(struct nvme_dev *dev, unsigned nsid,
445 unsigned cns, dma_addr_t dma_addr)
447 struct nvme_command c;
448 u32 page_size = dev->page_size;
449 int offset = dma_addr & (page_size - 1);
450 int length = sizeof(struct nvme_id_ctrl);
453 memset(&c, 0, sizeof(c));
454 c.identify.opcode = nvme_admin_identify;
455 c.identify.nsid = cpu_to_le32(nsid);
456 c.identify.prp1 = cpu_to_le64(dma_addr);
458 length -= (page_size - offset);
462 dma_addr += (page_size - offset);
463 c.identify.prp2 = cpu_to_le64(dma_addr);
466 c.identify.cns = cpu_to_le32(cns);
468 ret = nvme_submit_admin_cmd(dev, &c, NULL);
470 invalidate_dcache_range(dma_addr,
471 dma_addr + sizeof(struct nvme_id_ctrl));
476 int nvme_get_features(struct nvme_dev *dev, unsigned fid, unsigned nsid,
477 dma_addr_t dma_addr, u32 *result)
479 struct nvme_command c;
481 memset(&c, 0, sizeof(c));
482 c.features.opcode = nvme_admin_get_features;
483 c.features.nsid = cpu_to_le32(nsid);
484 c.features.prp1 = cpu_to_le64(dma_addr);
485 c.features.fid = cpu_to_le32(fid);
488 * TODO: add cache invalidate operation when the size of
489 * the DMA buffer is known
492 return nvme_submit_admin_cmd(dev, &c, result);
495 int nvme_set_features(struct nvme_dev *dev, unsigned fid, unsigned dword11,
496 dma_addr_t dma_addr, u32 *result)
498 struct nvme_command c;
500 memset(&c, 0, sizeof(c));
501 c.features.opcode = nvme_admin_set_features;
502 c.features.prp1 = cpu_to_le64(dma_addr);
503 c.features.fid = cpu_to_le32(fid);
504 c.features.dword11 = cpu_to_le32(dword11);
507 * TODO: add cache flush operation when the size of
508 * the DMA buffer is known
511 return nvme_submit_admin_cmd(dev, &c, result);
514 static int nvme_create_queue(struct nvme_queue *nvmeq, int qid)
516 struct nvme_dev *dev = nvmeq->dev;
519 nvmeq->cq_vector = qid - 1;
520 result = nvme_alloc_cq(dev, qid, nvmeq);
524 result = nvme_alloc_sq(dev, qid, nvmeq);
528 nvme_init_queue(nvmeq, qid);
533 nvme_delete_sq(dev, qid);
535 nvme_delete_cq(dev, qid);
540 static int nvme_set_queue_count(struct nvme_dev *dev, int count)
544 u32 q_count = (count - 1) | ((count - 1) << 16);
546 status = nvme_set_features(dev, NVME_FEAT_NUM_QUEUES,
547 q_count, 0, &result);
554 return min(result & 0xffff, result >> 16) + 1;
557 static void nvme_create_io_queues(struct nvme_dev *dev)
561 for (i = dev->queue_count; i <= dev->max_qid; i++)
562 if (!nvme_alloc_queue(dev, i, dev->q_depth))
565 for (i = dev->online_queues; i <= dev->queue_count - 1; i++)
566 if (nvme_create_queue(dev->queues[i], i))
570 static int nvme_setup_io_queues(struct nvme_dev *dev)
576 result = nvme_set_queue_count(dev, nr_io_queues);
580 dev->max_qid = nr_io_queues;
582 /* Free previously allocated queues */
583 nvme_free_queues(dev, nr_io_queues + 1);
584 nvme_create_io_queues(dev);
589 static int nvme_get_info_from_identify(struct nvme_dev *dev)
591 struct nvme_id_ctrl *ctrl;
593 int shift = NVME_CAP_MPSMIN(dev->cap) + 12;
595 ctrl = memalign(dev->page_size, sizeof(struct nvme_id_ctrl));
599 ret = nvme_identify(dev, 0, 1, (dma_addr_t)(long)ctrl);
605 dev->nn = le32_to_cpu(ctrl->nn);
606 dev->vwc = ctrl->vwc;
607 memcpy(dev->serial, ctrl->sn, sizeof(ctrl->sn));
608 memcpy(dev->model, ctrl->mn, sizeof(ctrl->mn));
609 memcpy(dev->firmware_rev, ctrl->fr, sizeof(ctrl->fr));
611 dev->max_transfer_shift = (ctrl->mdts + shift);
614 * Maximum Data Transfer Size (MDTS) field indicates the maximum
615 * data transfer size between the host and the controller. The
616 * host should not submit a command that exceeds this transfer
617 * size. The value is in units of the minimum memory page size
618 * and is reported as a power of two (2^n).
620 * The spec also says: a value of 0h indicates no restrictions
621 * on transfer size. But in nvme_blk_read/write() below we have
622 * the following algorithm for maximum number of logic blocks
625 * u16 lbas = 1 << (dev->max_transfer_shift - ns->lba_shift);
627 * In order for lbas not to overflow, the maximum number is 15
628 * which means dev->max_transfer_shift = 15 + 9 (ns->lba_shift).
629 * Let's use 20 which provides 1MB size.
631 dev->max_transfer_shift = 20;
638 int nvme_get_namespace_id(struct udevice *udev, u32 *ns_id, u8 *eui64)
640 struct nvme_ns *ns = dev_get_priv(udev);
645 memcpy(eui64, ns->eui64, sizeof(ns->eui64));
650 int nvme_scan_namespace(void)
656 ret = uclass_get(UCLASS_NVME, &uc);
660 uclass_foreach_dev(dev, uc) {
661 ret = device_probe(dev);
669 static int nvme_blk_probe(struct udevice *udev)
671 struct nvme_dev *ndev = dev_get_priv(udev->parent);
672 struct blk_desc *desc = dev_get_uclass_platdata(udev);
673 struct nvme_ns *ns = dev_get_priv(udev);
675 struct pci_child_platdata *pplat;
676 struct nvme_id_ns *id;
678 id = memalign(ndev->page_size, sizeof(struct nvme_id_ns));
682 memset(ns, 0, sizeof(*ns));
684 /* extract the namespace id from the block device name */
685 ns->ns_id = trailing_strtol(udev->name) + 1;
686 if (nvme_identify(ndev, ns->ns_id, 0, (dma_addr_t)(long)id)) {
691 memcpy(&ns->eui64, &id->eui64, sizeof(id->eui64));
692 flbas = id->flbas & NVME_NS_FLBAS_LBA_MASK;
694 ns->lba_shift = id->lbaf[flbas].ds;
695 ns->mode_select_num_blocks = le64_to_cpu(id->nsze);
696 ns->mode_select_block_len = 1 << ns->lba_shift;
697 list_add(&ns->list, &ndev->namespaces);
699 desc->lba = ns->mode_select_num_blocks;
700 desc->log2blksz = ns->lba_shift;
701 desc->blksz = 1 << ns->lba_shift;
703 pplat = dev_get_parent_platdata(udev->parent);
704 sprintf(desc->vendor, "0x%.4x", pplat->vendor);
705 memcpy(desc->product, ndev->serial, sizeof(ndev->serial));
706 memcpy(desc->revision, ndev->firmware_rev, sizeof(ndev->firmware_rev));
712 static ulong nvme_blk_rw(struct udevice *udev, lbaint_t blknr,
713 lbaint_t blkcnt, void *buffer, bool read)
715 struct nvme_ns *ns = dev_get_priv(udev);
716 struct nvme_dev *dev = ns->dev;
717 struct nvme_command c;
718 struct blk_desc *desc = dev_get_uclass_platdata(udev);
721 u64 total_len = blkcnt << desc->log2blksz;
722 u64 temp_len = total_len;
725 u16 lbas = 1 << (dev->max_transfer_shift - ns->lba_shift);
726 u64 total_lbas = blkcnt;
728 flush_dcache_range((unsigned long)buffer,
729 (unsigned long)buffer + total_len);
731 c.rw.opcode = read ? nvme_cmd_read : nvme_cmd_write;
733 c.rw.nsid = cpu_to_le32(ns->ns_id);
742 if (total_lbas < lbas) {
743 lbas = (u16)total_lbas;
749 if (nvme_setup_prps(dev, &prp2,
750 lbas << ns->lba_shift, (ulong)buffer))
752 c.rw.slba = cpu_to_le64(slba);
754 c.rw.length = cpu_to_le16(lbas - 1);
755 c.rw.prp1 = cpu_to_le64((ulong)buffer);
756 c.rw.prp2 = cpu_to_le64(prp2);
757 status = nvme_submit_sync_cmd(dev->queues[NVME_IO_Q],
758 &c, NULL, IO_TIMEOUT);
761 temp_len -= (u32)lbas << ns->lba_shift;
762 buffer += lbas << ns->lba_shift;
766 invalidate_dcache_range((unsigned long)buffer,
767 (unsigned long)buffer + total_len);
769 return (total_len - temp_len) >> desc->log2blksz;
772 static ulong nvme_blk_read(struct udevice *udev, lbaint_t blknr,
773 lbaint_t blkcnt, void *buffer)
775 return nvme_blk_rw(udev, blknr, blkcnt, buffer, true);
778 static ulong nvme_blk_write(struct udevice *udev, lbaint_t blknr,
779 lbaint_t blkcnt, const void *buffer)
781 return nvme_blk_rw(udev, blknr, blkcnt, (void *)buffer, false);
784 static const struct blk_ops nvme_blk_ops = {
785 .read = nvme_blk_read,
786 .write = nvme_blk_write,
789 U_BOOT_DRIVER(nvme_blk) = {
792 .probe = nvme_blk_probe,
793 .ops = &nvme_blk_ops,
794 .priv_auto_alloc_size = sizeof(struct nvme_ns),
797 static int nvme_bind(struct udevice *udev)
802 sprintf(name, "nvme#%d", ndev_num++);
804 return device_set_name(udev, name);
807 static int nvme_probe(struct udevice *udev)
810 struct nvme_dev *ndev = dev_get_priv(udev);
812 ndev->instance = trailing_strtol(udev->name);
814 INIT_LIST_HEAD(&ndev->namespaces);
815 ndev->bar = dm_pci_map_bar(udev, PCI_BASE_ADDRESS_0,
817 if (readl(&ndev->bar->csts) == -1) {
819 printf("Error: %s: Out of memory!\n", udev->name);
823 ndev->queues = malloc(NVME_Q_NUM * sizeof(struct nvme_queue *));
826 printf("Error: %s: Out of memory!\n", udev->name);
829 memset(ndev->queues, 0, NVME_Q_NUM * sizeof(struct nvme_queue *));
831 ndev->cap = nvme_readq(&ndev->bar->cap);
832 ndev->q_depth = min_t(int, NVME_CAP_MQES(ndev->cap) + 1, NVME_Q_DEPTH);
833 ndev->db_stride = 1 << NVME_CAP_STRIDE(ndev->cap);
834 ndev->dbs = ((void __iomem *)ndev->bar) + 4096;
836 ret = nvme_configure_admin_queue(ndev);
840 /* Allocate after the page size is known */
841 ndev->prp_pool = memalign(ndev->page_size, MAX_PRP_POOL);
842 if (!ndev->prp_pool) {
844 printf("Error: %s: Out of memory!\n", udev->name);
847 ndev->prp_entry_num = MAX_PRP_POOL >> 3;
849 ret = nvme_setup_io_queues(ndev);
853 nvme_get_info_from_identify(ndev);
858 free((void *)ndev->queues);
863 U_BOOT_DRIVER(nvme) = {
868 .priv_auto_alloc_size = sizeof(struct nvme_dev),
871 struct pci_device_id nvme_supported[] = {
872 { PCI_DEVICE_CLASS(PCI_CLASS_STORAGE_EXPRESS, ~0) },
876 U_BOOT_PCI_DEVICE(nvme, nvme_supported);