fdt: Fix alignment issue when reading 64-bits properties from fdt
[oweals/u-boot.git] / drivers / nvme / nvme.c
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * Copyright (C) 2017 NXP Semiconductors
4  * Copyright (C) 2017 Bin Meng <bmeng.cn@gmail.com>
5  */
6
7 #include <common.h>
8 #include <dm.h>
9 #include <errno.h>
10 #include <memalign.h>
11 #include <pci.h>
12 #include <dm/device-internal.h>
13 #include "nvme.h"
14
15 #define NVME_Q_DEPTH            2
16 #define NVME_AQ_DEPTH           2
17 #define NVME_SQ_SIZE(depth)     (depth * sizeof(struct nvme_command))
18 #define NVME_CQ_SIZE(depth)     (depth * sizeof(struct nvme_completion))
19 #define ADMIN_TIMEOUT           60
20 #define IO_TIMEOUT              30
21 #define MAX_PRP_POOL            512
22
23 enum nvme_queue_id {
24         NVME_ADMIN_Q,
25         NVME_IO_Q,
26         NVME_Q_NUM,
27 };
28
29 /*
30  * An NVM Express queue. Each device has at least two (one for admin
31  * commands and one for I/O commands).
32  */
33 struct nvme_queue {
34         struct nvme_dev *dev;
35         struct nvme_command *sq_cmds;
36         struct nvme_completion *cqes;
37         wait_queue_head_t sq_full;
38         u32 __iomem *q_db;
39         u16 q_depth;
40         s16 cq_vector;
41         u16 sq_head;
42         u16 sq_tail;
43         u16 cq_head;
44         u16 qid;
45         u8 cq_phase;
46         u8 cqe_seen;
47         unsigned long cmdid_data[];
48 };
49
50 static int nvme_wait_ready(struct nvme_dev *dev, bool enabled)
51 {
52         u32 bit = enabled ? NVME_CSTS_RDY : 0;
53         int timeout;
54         ulong start;
55
56         /* Timeout field in the CAP register is in 500 millisecond units */
57         timeout = NVME_CAP_TIMEOUT(dev->cap) * 500;
58
59         start = get_timer(0);
60         while (get_timer(start) < timeout) {
61                 if ((readl(&dev->bar->csts) & NVME_CSTS_RDY) == bit)
62                         return 0;
63         }
64
65         return -ETIME;
66 }
67
68 static int nvme_setup_prps(struct nvme_dev *dev, u64 *prp2,
69                            int total_len, u64 dma_addr)
70 {
71         u32 page_size = dev->page_size;
72         int offset = dma_addr & (page_size - 1);
73         u64 *prp_pool;
74         int length = total_len;
75         int i, nprps;
76         u32 prps_per_page = (page_size >> 3) - 1;
77         u32 num_pages;
78
79         length -= (page_size - offset);
80
81         if (length <= 0) {
82                 *prp2 = 0;
83                 return 0;
84         }
85
86         if (length)
87                 dma_addr += (page_size - offset);
88
89         if (length <= page_size) {
90                 *prp2 = dma_addr;
91                 return 0;
92         }
93
94         nprps = DIV_ROUND_UP(length, page_size);
95         num_pages = DIV_ROUND_UP(nprps, prps_per_page);
96
97         if (nprps > dev->prp_entry_num) {
98                 free(dev->prp_pool);
99                 /*
100                  * Always increase in increments of pages.  It doesn't waste
101                  * much memory and reduces the number of allocations.
102                  */
103                 dev->prp_pool = memalign(page_size, num_pages * page_size);
104                 if (!dev->prp_pool) {
105                         printf("Error: malloc prp_pool fail\n");
106                         return -ENOMEM;
107                 }
108                 dev->prp_entry_num = prps_per_page * num_pages;
109         }
110
111         prp_pool = dev->prp_pool;
112         i = 0;
113         while (nprps) {
114                 if (i == ((page_size >> 3) - 1)) {
115                         *(prp_pool + i) = cpu_to_le64((ulong)prp_pool +
116                                         page_size);
117                         i = 0;
118                         prp_pool += page_size;
119                 }
120                 *(prp_pool + i++) = cpu_to_le64(dma_addr);
121                 dma_addr += page_size;
122                 nprps--;
123         }
124         *prp2 = (ulong)dev->prp_pool;
125
126         return 0;
127 }
128
129 static __le16 nvme_get_cmd_id(void)
130 {
131         static unsigned short cmdid;
132
133         return cpu_to_le16((cmdid < USHRT_MAX) ? cmdid++ : 0);
134 }
135
136 static u16 nvme_read_completion_status(struct nvme_queue *nvmeq, u16 index)
137 {
138         u64 start = (ulong)&nvmeq->cqes[index];
139         u64 stop = start + sizeof(struct nvme_completion);
140
141         invalidate_dcache_range(start, stop);
142
143         return le16_to_cpu(readw(&(nvmeq->cqes[index].status)));
144 }
145
146 /**
147  * nvme_submit_cmd() - copy a command into a queue and ring the doorbell
148  *
149  * @nvmeq:      The queue to use
150  * @cmd:        The command to send
151  */
152 static void nvme_submit_cmd(struct nvme_queue *nvmeq, struct nvme_command *cmd)
153 {
154         u16 tail = nvmeq->sq_tail;
155
156         memcpy(&nvmeq->sq_cmds[tail], cmd, sizeof(*cmd));
157         flush_dcache_range((ulong)&nvmeq->sq_cmds[tail],
158                            (ulong)&nvmeq->sq_cmds[tail] + sizeof(*cmd));
159
160         if (++tail == nvmeq->q_depth)
161                 tail = 0;
162         writel(tail, nvmeq->q_db);
163         nvmeq->sq_tail = tail;
164 }
165
166 static int nvme_submit_sync_cmd(struct nvme_queue *nvmeq,
167                                 struct nvme_command *cmd,
168                                 u32 *result, unsigned timeout)
169 {
170         u16 head = nvmeq->cq_head;
171         u16 phase = nvmeq->cq_phase;
172         u16 status;
173         ulong start_time;
174         ulong timeout_us = timeout * 100000;
175
176         cmd->common.command_id = nvme_get_cmd_id();
177         nvme_submit_cmd(nvmeq, cmd);
178
179         start_time = timer_get_us();
180
181         for (;;) {
182                 status = nvme_read_completion_status(nvmeq, head);
183                 if ((status & 0x01) == phase)
184                         break;
185                 if (timeout_us > 0 && (timer_get_us() - start_time)
186                     >= timeout_us)
187                         return -ETIMEDOUT;
188         }
189
190         status >>= 1;
191         if (status) {
192                 printf("ERROR: status = %x, phase = %d, head = %d\n",
193                        status, phase, head);
194                 status = 0;
195                 if (++head == nvmeq->q_depth) {
196                         head = 0;
197                         phase = !phase;
198                 }
199                 writel(head, nvmeq->q_db + nvmeq->dev->db_stride);
200                 nvmeq->cq_head = head;
201                 nvmeq->cq_phase = phase;
202
203                 return -EIO;
204         }
205
206         if (result)
207                 *result = le32_to_cpu(readl(&(nvmeq->cqes[head].result)));
208
209         if (++head == nvmeq->q_depth) {
210                 head = 0;
211                 phase = !phase;
212         }
213         writel(head, nvmeq->q_db + nvmeq->dev->db_stride);
214         nvmeq->cq_head = head;
215         nvmeq->cq_phase = phase;
216
217         return status;
218 }
219
220 static int nvme_submit_admin_cmd(struct nvme_dev *dev, struct nvme_command *cmd,
221                                  u32 *result)
222 {
223         return nvme_submit_sync_cmd(dev->queues[NVME_ADMIN_Q], cmd,
224                                     result, ADMIN_TIMEOUT);
225 }
226
227 static struct nvme_queue *nvme_alloc_queue(struct nvme_dev *dev,
228                                            int qid, int depth)
229 {
230         struct nvme_queue *nvmeq = malloc(sizeof(*nvmeq));
231         if (!nvmeq)
232                 return NULL;
233         memset(nvmeq, 0, sizeof(*nvmeq));
234
235         nvmeq->cqes = (void *)memalign(4096, NVME_CQ_SIZE(depth));
236         if (!nvmeq->cqes)
237                 goto free_nvmeq;
238         memset((void *)nvmeq->cqes, 0, NVME_CQ_SIZE(depth));
239
240         nvmeq->sq_cmds = (void *)memalign(4096, NVME_SQ_SIZE(depth));
241         if (!nvmeq->sq_cmds)
242                 goto free_queue;
243         memset((void *)nvmeq->sq_cmds, 0, NVME_SQ_SIZE(depth));
244
245         nvmeq->dev = dev;
246
247         nvmeq->cq_head = 0;
248         nvmeq->cq_phase = 1;
249         nvmeq->q_db = &dev->dbs[qid * 2 * dev->db_stride];
250         nvmeq->q_depth = depth;
251         nvmeq->qid = qid;
252         dev->queue_count++;
253         dev->queues[qid] = nvmeq;
254
255         return nvmeq;
256
257  free_queue:
258         free((void *)nvmeq->cqes);
259  free_nvmeq:
260         free(nvmeq);
261
262         return NULL;
263 }
264
265 static int nvme_delete_queue(struct nvme_dev *dev, u8 opcode, u16 id)
266 {
267         struct nvme_command c;
268
269         memset(&c, 0, sizeof(c));
270         c.delete_queue.opcode = opcode;
271         c.delete_queue.qid = cpu_to_le16(id);
272
273         return nvme_submit_admin_cmd(dev, &c, NULL);
274 }
275
276 static int nvme_delete_sq(struct nvme_dev *dev, u16 sqid)
277 {
278         return nvme_delete_queue(dev, nvme_admin_delete_sq, sqid);
279 }
280
281 static int nvme_delete_cq(struct nvme_dev *dev, u16 cqid)
282 {
283         return nvme_delete_queue(dev, nvme_admin_delete_cq, cqid);
284 }
285
286 static int nvme_enable_ctrl(struct nvme_dev *dev)
287 {
288         dev->ctrl_config &= ~NVME_CC_SHN_MASK;
289         dev->ctrl_config |= NVME_CC_ENABLE;
290         writel(cpu_to_le32(dev->ctrl_config), &dev->bar->cc);
291
292         return nvme_wait_ready(dev, true);
293 }
294
295 static int nvme_disable_ctrl(struct nvme_dev *dev)
296 {
297         dev->ctrl_config &= ~NVME_CC_SHN_MASK;
298         dev->ctrl_config &= ~NVME_CC_ENABLE;
299         writel(cpu_to_le32(dev->ctrl_config), &dev->bar->cc);
300
301         return nvme_wait_ready(dev, false);
302 }
303
304 static void nvme_free_queue(struct nvme_queue *nvmeq)
305 {
306         free((void *)nvmeq->cqes);
307         free(nvmeq->sq_cmds);
308         free(nvmeq);
309 }
310
311 static void nvme_free_queues(struct nvme_dev *dev, int lowest)
312 {
313         int i;
314
315         for (i = dev->queue_count - 1; i >= lowest; i--) {
316                 struct nvme_queue *nvmeq = dev->queues[i];
317                 dev->queue_count--;
318                 dev->queues[i] = NULL;
319                 nvme_free_queue(nvmeq);
320         }
321 }
322
323 static void nvme_init_queue(struct nvme_queue *nvmeq, u16 qid)
324 {
325         struct nvme_dev *dev = nvmeq->dev;
326
327         nvmeq->sq_tail = 0;
328         nvmeq->cq_head = 0;
329         nvmeq->cq_phase = 1;
330         nvmeq->q_db = &dev->dbs[qid * 2 * dev->db_stride];
331         memset((void *)nvmeq->cqes, 0, NVME_CQ_SIZE(nvmeq->q_depth));
332         flush_dcache_range((ulong)nvmeq->cqes,
333                            (ulong)nvmeq->cqes + NVME_CQ_SIZE(nvmeq->q_depth));
334         dev->online_queues++;
335 }
336
337 static int nvme_configure_admin_queue(struct nvme_dev *dev)
338 {
339         int result;
340         u32 aqa;
341         u64 cap = dev->cap;
342         struct nvme_queue *nvmeq;
343         /* most architectures use 4KB as the page size */
344         unsigned page_shift = 12;
345         unsigned dev_page_min = NVME_CAP_MPSMIN(cap) + 12;
346         unsigned dev_page_max = NVME_CAP_MPSMAX(cap) + 12;
347
348         if (page_shift < dev_page_min) {
349                 debug("Device minimum page size (%u) too large for host (%u)\n",
350                       1 << dev_page_min, 1 << page_shift);
351                 return -ENODEV;
352         }
353
354         if (page_shift > dev_page_max) {
355                 debug("Device maximum page size (%u) smaller than host (%u)\n",
356                       1 << dev_page_max, 1 << page_shift);
357                 page_shift = dev_page_max;
358         }
359
360         result = nvme_disable_ctrl(dev);
361         if (result < 0)
362                 return result;
363
364         nvmeq = dev->queues[NVME_ADMIN_Q];
365         if (!nvmeq) {
366                 nvmeq = nvme_alloc_queue(dev, 0, NVME_AQ_DEPTH);
367                 if (!nvmeq)
368                         return -ENOMEM;
369         }
370
371         aqa = nvmeq->q_depth - 1;
372         aqa |= aqa << 16;
373         aqa |= aqa << 16;
374
375         dev->page_size = 1 << page_shift;
376
377         dev->ctrl_config = NVME_CC_CSS_NVM;
378         dev->ctrl_config |= (page_shift - 12) << NVME_CC_MPS_SHIFT;
379         dev->ctrl_config |= NVME_CC_ARB_RR | NVME_CC_SHN_NONE;
380         dev->ctrl_config |= NVME_CC_IOSQES | NVME_CC_IOCQES;
381
382         writel(aqa, &dev->bar->aqa);
383         nvme_writeq((ulong)nvmeq->sq_cmds, &dev->bar->asq);
384         nvme_writeq((ulong)nvmeq->cqes, &dev->bar->acq);
385
386         result = nvme_enable_ctrl(dev);
387         if (result)
388                 goto free_nvmeq;
389
390         nvmeq->cq_vector = 0;
391
392         nvme_init_queue(dev->queues[NVME_ADMIN_Q], 0);
393
394         return result;
395
396  free_nvmeq:
397         nvme_free_queues(dev, 0);
398
399         return result;
400 }
401
402 static int nvme_alloc_cq(struct nvme_dev *dev, u16 qid,
403                             struct nvme_queue *nvmeq)
404 {
405         struct nvme_command c;
406         int flags = NVME_QUEUE_PHYS_CONTIG | NVME_CQ_IRQ_ENABLED;
407
408         memset(&c, 0, sizeof(c));
409         c.create_cq.opcode = nvme_admin_create_cq;
410         c.create_cq.prp1 = cpu_to_le64((ulong)nvmeq->cqes);
411         c.create_cq.cqid = cpu_to_le16(qid);
412         c.create_cq.qsize = cpu_to_le16(nvmeq->q_depth - 1);
413         c.create_cq.cq_flags = cpu_to_le16(flags);
414         c.create_cq.irq_vector = cpu_to_le16(nvmeq->cq_vector);
415
416         return nvme_submit_admin_cmd(dev, &c, NULL);
417 }
418
419 static int nvme_alloc_sq(struct nvme_dev *dev, u16 qid,
420                             struct nvme_queue *nvmeq)
421 {
422         struct nvme_command c;
423         int flags = NVME_QUEUE_PHYS_CONTIG | NVME_SQ_PRIO_MEDIUM;
424
425         memset(&c, 0, sizeof(c));
426         c.create_sq.opcode = nvme_admin_create_sq;
427         c.create_sq.prp1 = cpu_to_le64((ulong)nvmeq->sq_cmds);
428         c.create_sq.sqid = cpu_to_le16(qid);
429         c.create_sq.qsize = cpu_to_le16(nvmeq->q_depth - 1);
430         c.create_sq.sq_flags = cpu_to_le16(flags);
431         c.create_sq.cqid = cpu_to_le16(qid);
432
433         return nvme_submit_admin_cmd(dev, &c, NULL);
434 }
435
436 int nvme_identify(struct nvme_dev *dev, unsigned nsid,
437                   unsigned cns, dma_addr_t dma_addr)
438 {
439         struct nvme_command c;
440         u32 page_size = dev->page_size;
441         int offset = dma_addr & (page_size - 1);
442         int length = sizeof(struct nvme_id_ctrl);
443         int ret;
444
445         memset(&c, 0, sizeof(c));
446         c.identify.opcode = nvme_admin_identify;
447         c.identify.nsid = cpu_to_le32(nsid);
448         c.identify.prp1 = cpu_to_le64(dma_addr);
449
450         length -= (page_size - offset);
451         if (length <= 0) {
452                 c.identify.prp2 = 0;
453         } else {
454                 dma_addr += (page_size - offset);
455                 c.identify.prp2 = cpu_to_le64(dma_addr);
456         }
457
458         c.identify.cns = cpu_to_le32(cns);
459
460         ret = nvme_submit_admin_cmd(dev, &c, NULL);
461         if (!ret)
462                 invalidate_dcache_range(dma_addr,
463                                         dma_addr + sizeof(struct nvme_id_ctrl));
464
465         return ret;
466 }
467
468 int nvme_get_features(struct nvme_dev *dev, unsigned fid, unsigned nsid,
469                       dma_addr_t dma_addr, u32 *result)
470 {
471         struct nvme_command c;
472
473         memset(&c, 0, sizeof(c));
474         c.features.opcode = nvme_admin_get_features;
475         c.features.nsid = cpu_to_le32(nsid);
476         c.features.prp1 = cpu_to_le64(dma_addr);
477         c.features.fid = cpu_to_le32(fid);
478
479         /*
480          * TODO: add cache invalidate operation when the size of
481          * the DMA buffer is known
482          */
483
484         return nvme_submit_admin_cmd(dev, &c, result);
485 }
486
487 int nvme_set_features(struct nvme_dev *dev, unsigned fid, unsigned dword11,
488                       dma_addr_t dma_addr, u32 *result)
489 {
490         struct nvme_command c;
491
492         memset(&c, 0, sizeof(c));
493         c.features.opcode = nvme_admin_set_features;
494         c.features.prp1 = cpu_to_le64(dma_addr);
495         c.features.fid = cpu_to_le32(fid);
496         c.features.dword11 = cpu_to_le32(dword11);
497
498         /*
499          * TODO: add cache flush operation when the size of
500          * the DMA buffer is known
501          */
502
503         return nvme_submit_admin_cmd(dev, &c, result);
504 }
505
506 static int nvme_create_queue(struct nvme_queue *nvmeq, int qid)
507 {
508         struct nvme_dev *dev = nvmeq->dev;
509         int result;
510
511         nvmeq->cq_vector = qid - 1;
512         result = nvme_alloc_cq(dev, qid, nvmeq);
513         if (result < 0)
514                 goto release_cq;
515
516         result = nvme_alloc_sq(dev, qid, nvmeq);
517         if (result < 0)
518                 goto release_sq;
519
520         nvme_init_queue(nvmeq, qid);
521
522         return result;
523
524  release_sq:
525         nvme_delete_sq(dev, qid);
526  release_cq:
527         nvme_delete_cq(dev, qid);
528
529         return result;
530 }
531
532 static int nvme_set_queue_count(struct nvme_dev *dev, int count)
533 {
534         int status;
535         u32 result;
536         u32 q_count = (count - 1) | ((count - 1) << 16);
537
538         status = nvme_set_features(dev, NVME_FEAT_NUM_QUEUES,
539                         q_count, 0, &result);
540
541         if (status < 0)
542                 return status;
543         if (status > 1)
544                 return 0;
545
546         return min(result & 0xffff, result >> 16) + 1;
547 }
548
549 static void nvme_create_io_queues(struct nvme_dev *dev)
550 {
551         unsigned int i;
552
553         for (i = dev->queue_count; i <= dev->max_qid; i++)
554                 if (!nvme_alloc_queue(dev, i, dev->q_depth))
555                         break;
556
557         for (i = dev->online_queues; i <= dev->queue_count - 1; i++)
558                 if (nvme_create_queue(dev->queues[i], i))
559                         break;
560 }
561
562 static int nvme_setup_io_queues(struct nvme_dev *dev)
563 {
564         int nr_io_queues;
565         int result;
566
567         nr_io_queues = 1;
568         result = nvme_set_queue_count(dev, nr_io_queues);
569         if (result <= 0)
570                 return result;
571
572         dev->max_qid = nr_io_queues;
573
574         /* Free previously allocated queues */
575         nvme_free_queues(dev, nr_io_queues + 1);
576         nvme_create_io_queues(dev);
577
578         return 0;
579 }
580
581 static int nvme_get_info_from_identify(struct nvme_dev *dev)
582 {
583         ALLOC_CACHE_ALIGN_BUFFER(char, buf, sizeof(struct nvme_id_ctrl));
584         struct nvme_id_ctrl *ctrl = (struct nvme_id_ctrl *)buf;
585         int ret;
586         int shift = NVME_CAP_MPSMIN(dev->cap) + 12;
587
588         ret = nvme_identify(dev, 0, 1, (dma_addr_t)(long)ctrl);
589         if (ret)
590                 return -EIO;
591
592         dev->nn = le32_to_cpu(ctrl->nn);
593         dev->vwc = ctrl->vwc;
594         memcpy(dev->serial, ctrl->sn, sizeof(ctrl->sn));
595         memcpy(dev->model, ctrl->mn, sizeof(ctrl->mn));
596         memcpy(dev->firmware_rev, ctrl->fr, sizeof(ctrl->fr));
597         if (ctrl->mdts)
598                 dev->max_transfer_shift = (ctrl->mdts + shift);
599         else {
600                 /*
601                  * Maximum Data Transfer Size (MDTS) field indicates the maximum
602                  * data transfer size between the host and the controller. The
603                  * host should not submit a command that exceeds this transfer
604                  * size. The value is in units of the minimum memory page size
605                  * and is reported as a power of two (2^n).
606                  *
607                  * The spec also says: a value of 0h indicates no restrictions
608                  * on transfer size. But in nvme_blk_read/write() below we have
609                  * the following algorithm for maximum number of logic blocks
610                  * per transfer:
611                  *
612                  * u16 lbas = 1 << (dev->max_transfer_shift - ns->lba_shift);
613                  *
614                  * In order for lbas not to overflow, the maximum number is 15
615                  * which means dev->max_transfer_shift = 15 + 9 (ns->lba_shift).
616                  * Let's use 20 which provides 1MB size.
617                  */
618                 dev->max_transfer_shift = 20;
619         }
620
621         return 0;
622 }
623
624 int nvme_get_namespace_id(struct udevice *udev, u32 *ns_id, u8 *eui64)
625 {
626         struct nvme_ns *ns = dev_get_priv(udev);
627
628         if (ns_id)
629                 *ns_id = ns->ns_id;
630         if (eui64)
631                 memcpy(eui64, ns->eui64, sizeof(ns->eui64));
632
633         return 0;
634 }
635
636 int nvme_scan_namespace(void)
637 {
638         struct uclass *uc;
639         struct udevice *dev;
640         int ret;
641
642         ret = uclass_get(UCLASS_NVME, &uc);
643         if (ret)
644                 return ret;
645
646         uclass_foreach_dev(dev, uc) {
647                 ret = device_probe(dev);
648                 if (ret)
649                         return ret;
650         }
651
652         return 0;
653 }
654
655 static int nvme_blk_probe(struct udevice *udev)
656 {
657         struct nvme_dev *ndev = dev_get_priv(udev->parent);
658         struct blk_desc *desc = dev_get_uclass_platdata(udev);
659         struct nvme_ns *ns = dev_get_priv(udev);
660         u8 flbas;
661         ALLOC_CACHE_ALIGN_BUFFER(char, buf, sizeof(struct nvme_id_ns));
662         struct nvme_id_ns *id = (struct nvme_id_ns *)buf;
663         struct pci_child_platdata *pplat;
664
665         memset(ns, 0, sizeof(*ns));
666         ns->dev = ndev;
667         /* extract the namespace id from the block device name */
668         ns->ns_id = trailing_strtol(udev->name) + 1;
669         if (nvme_identify(ndev, ns->ns_id, 0, (dma_addr_t)(long)id))
670                 return -EIO;
671
672         memcpy(&ns->eui64, &id->eui64, sizeof(id->eui64));
673         flbas = id->flbas & NVME_NS_FLBAS_LBA_MASK;
674         ns->flbas = flbas;
675         ns->lba_shift = id->lbaf[flbas].ds;
676         ns->mode_select_num_blocks = le64_to_cpu(id->nsze);
677         ns->mode_select_block_len = 1 << ns->lba_shift;
678         list_add(&ns->list, &ndev->namespaces);
679
680         desc->lba = ns->mode_select_num_blocks;
681         desc->log2blksz = ns->lba_shift;
682         desc->blksz = 1 << ns->lba_shift;
683         desc->bdev = udev;
684         pplat = dev_get_parent_platdata(udev->parent);
685         sprintf(desc->vendor, "0x%.4x", pplat->vendor);
686         memcpy(desc->product, ndev->serial, sizeof(ndev->serial));
687         memcpy(desc->revision, ndev->firmware_rev, sizeof(ndev->firmware_rev));
688
689         return 0;
690 }
691
692 static ulong nvme_blk_rw(struct udevice *udev, lbaint_t blknr,
693                          lbaint_t blkcnt, void *buffer, bool read)
694 {
695         struct nvme_ns *ns = dev_get_priv(udev);
696         struct nvme_dev *dev = ns->dev;
697         struct nvme_command c;
698         struct blk_desc *desc = dev_get_uclass_platdata(udev);
699         int status;
700         u64 prp2;
701         u64 total_len = blkcnt << desc->log2blksz;
702         u64 temp_len = total_len;
703
704         u64 slba = blknr;
705         u16 lbas = 1 << (dev->max_transfer_shift - ns->lba_shift);
706         u64 total_lbas = blkcnt;
707
708         if (!read)
709                 flush_dcache_range((unsigned long)buffer,
710                                    (unsigned long)buffer + total_len);
711
712         c.rw.opcode = read ? nvme_cmd_read : nvme_cmd_write;
713         c.rw.flags = 0;
714         c.rw.nsid = cpu_to_le32(ns->ns_id);
715         c.rw.control = 0;
716         c.rw.dsmgmt = 0;
717         c.rw.reftag = 0;
718         c.rw.apptag = 0;
719         c.rw.appmask = 0;
720         c.rw.metadata = 0;
721
722         while (total_lbas) {
723                 if (total_lbas < lbas) {
724                         lbas = (u16)total_lbas;
725                         total_lbas = 0;
726                 } else {
727                         total_lbas -= lbas;
728                 }
729
730                 if (nvme_setup_prps(dev, &prp2,
731                                     lbas << ns->lba_shift, (ulong)buffer))
732                         return -EIO;
733                 c.rw.slba = cpu_to_le64(slba);
734                 slba += lbas;
735                 c.rw.length = cpu_to_le16(lbas - 1);
736                 c.rw.prp1 = cpu_to_le64((ulong)buffer);
737                 c.rw.prp2 = cpu_to_le64(prp2);
738                 status = nvme_submit_sync_cmd(dev->queues[NVME_IO_Q],
739                                 &c, NULL, IO_TIMEOUT);
740                 if (status)
741                         break;
742                 temp_len -= (u32)lbas << ns->lba_shift;
743                 buffer += lbas << ns->lba_shift;
744         }
745
746         if (read)
747                 invalidate_dcache_range((unsigned long)buffer,
748                                         (unsigned long)buffer + total_len);
749
750         return (total_len - temp_len) >> desc->log2blksz;
751 }
752
753 static ulong nvme_blk_read(struct udevice *udev, lbaint_t blknr,
754                            lbaint_t blkcnt, void *buffer)
755 {
756         return nvme_blk_rw(udev, blknr, blkcnt, buffer, true);
757 }
758
759 static ulong nvme_blk_write(struct udevice *udev, lbaint_t blknr,
760                             lbaint_t blkcnt, const void *buffer)
761 {
762         return nvme_blk_rw(udev, blknr, blkcnt, (void *)buffer, false);
763 }
764
765 static const struct blk_ops nvme_blk_ops = {
766         .read   = nvme_blk_read,
767         .write  = nvme_blk_write,
768 };
769
770 U_BOOT_DRIVER(nvme_blk) = {
771         .name   = "nvme-blk",
772         .id     = UCLASS_BLK,
773         .probe  = nvme_blk_probe,
774         .ops    = &nvme_blk_ops,
775         .priv_auto_alloc_size = sizeof(struct nvme_ns),
776 };
777
778 static int nvme_bind(struct udevice *udev)
779 {
780         static int ndev_num;
781         char name[20];
782
783         sprintf(name, "nvme#%d", ndev_num++);
784
785         return device_set_name(udev, name);
786 }
787
788 static int nvme_probe(struct udevice *udev)
789 {
790         int ret;
791         struct nvme_dev *ndev = dev_get_priv(udev);
792
793         ndev->instance = trailing_strtol(udev->name);
794
795         INIT_LIST_HEAD(&ndev->namespaces);
796         ndev->bar = dm_pci_map_bar(udev, PCI_BASE_ADDRESS_0,
797                         PCI_REGION_MEM);
798         if (readl(&ndev->bar->csts) == -1) {
799                 ret = -ENODEV;
800                 printf("Error: %s: Out of memory!\n", udev->name);
801                 goto free_nvme;
802         }
803
804         ndev->queues = malloc(NVME_Q_NUM * sizeof(struct nvme_queue *));
805         if (!ndev->queues) {
806                 ret = -ENOMEM;
807                 printf("Error: %s: Out of memory!\n", udev->name);
808                 goto free_nvme;
809         }
810         memset(ndev->queues, 0, NVME_Q_NUM * sizeof(struct nvme_queue *));
811
812         ndev->cap = nvme_readq(&ndev->bar->cap);
813         ndev->q_depth = min_t(int, NVME_CAP_MQES(ndev->cap) + 1, NVME_Q_DEPTH);
814         ndev->db_stride = 1 << NVME_CAP_STRIDE(ndev->cap);
815         ndev->dbs = ((void __iomem *)ndev->bar) + 4096;
816
817         ret = nvme_configure_admin_queue(ndev);
818         if (ret)
819                 goto free_queue;
820
821         /* Allocate after the page size is known */
822         ndev->prp_pool = memalign(ndev->page_size, MAX_PRP_POOL);
823         if (!ndev->prp_pool) {
824                 ret = -ENOMEM;
825                 printf("Error: %s: Out of memory!\n", udev->name);
826                 goto free_nvme;
827         }
828         ndev->prp_entry_num = MAX_PRP_POOL >> 3;
829
830         ret = nvme_setup_io_queues(ndev);
831         if (ret)
832                 goto free_queue;
833
834         nvme_get_info_from_identify(ndev);
835
836         return 0;
837
838 free_queue:
839         free((void *)ndev->queues);
840 free_nvme:
841         return ret;
842 }
843
844 U_BOOT_DRIVER(nvme) = {
845         .name   = "nvme",
846         .id     = UCLASS_NVME,
847         .bind   = nvme_bind,
848         .probe  = nvme_probe,
849         .priv_auto_alloc_size = sizeof(struct nvme_dev),
850 };
851
852 struct pci_device_id nvme_supported[] = {
853         { PCI_DEVICE_CLASS(PCI_CLASS_STORAGE_EXPRESS, ~0) },
854         {}
855 };
856
857 U_BOOT_PCI_DEVICE(nvme, nvme_supported);