Linux-libre 5.3.12-gnu
[librecmc/linux-libre.git] / drivers / nvme / target / io-cmd-file.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * NVMe Over Fabrics Target File I/O commands implementation.
4  * Copyright (c) 2017-2018 Western Digital Corporation or its
5  * affiliates.
6  */
7 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
8 #include <linux/uio.h>
9 #include <linux/falloc.h>
10 #include <linux/file.h>
11 #include "nvmet.h"
12
13 #define NVMET_MAX_MPOOL_BVEC            16
14 #define NVMET_MIN_MPOOL_OBJ             16
15
16 void nvmet_file_ns_disable(struct nvmet_ns *ns)
17 {
18         if (ns->file) {
19                 if (ns->buffered_io)
20                         flush_workqueue(buffered_io_wq);
21                 mempool_destroy(ns->bvec_pool);
22                 ns->bvec_pool = NULL;
23                 kmem_cache_destroy(ns->bvec_cache);
24                 ns->bvec_cache = NULL;
25                 fput(ns->file);
26                 ns->file = NULL;
27         }
28 }
29
30 int nvmet_file_ns_enable(struct nvmet_ns *ns)
31 {
32         int flags = O_RDWR | O_LARGEFILE;
33         struct kstat stat;
34         int ret;
35
36         if (!ns->buffered_io)
37                 flags |= O_DIRECT;
38
39         ns->file = filp_open(ns->device_path, flags, 0);
40         if (IS_ERR(ns->file)) {
41                 pr_err("failed to open file %s: (%ld)\n",
42                                 ns->device_path, PTR_ERR(ns->file));
43                 return PTR_ERR(ns->file);
44         }
45
46         ret = vfs_getattr(&ns->file->f_path,
47                         &stat, STATX_SIZE, AT_STATX_FORCE_SYNC);
48         if (ret)
49                 goto err;
50
51         ns->size = stat.size;
52         /*
53          * i_blkbits can be greater than the universally accepted upper bound,
54          * so make sure we export a sane namespace lba_shift.
55          */
56         ns->blksize_shift = min_t(u8,
57                         file_inode(ns->file)->i_blkbits, 12);
58
59         ns->bvec_cache = kmem_cache_create("nvmet-bvec",
60                         NVMET_MAX_MPOOL_BVEC * sizeof(struct bio_vec),
61                         0, SLAB_HWCACHE_ALIGN, NULL);
62         if (!ns->bvec_cache) {
63                 ret = -ENOMEM;
64                 goto err;
65         }
66
67         ns->bvec_pool = mempool_create(NVMET_MIN_MPOOL_OBJ, mempool_alloc_slab,
68                         mempool_free_slab, ns->bvec_cache);
69
70         if (!ns->bvec_pool) {
71                 ret = -ENOMEM;
72                 goto err;
73         }
74
75         return ret;
76 err:
77         ns->size = 0;
78         ns->blksize_shift = 0;
79         nvmet_file_ns_disable(ns);
80         return ret;
81 }
82
83 static void nvmet_file_init_bvec(struct bio_vec *bv, struct scatterlist *sg)
84 {
85         bv->bv_page = sg_page(sg);
86         bv->bv_offset = sg->offset;
87         bv->bv_len = sg->length;
88 }
89
90 static ssize_t nvmet_file_submit_bvec(struct nvmet_req *req, loff_t pos,
91                 unsigned long nr_segs, size_t count, int ki_flags)
92 {
93         struct kiocb *iocb = &req->f.iocb;
94         ssize_t (*call_iter)(struct kiocb *iocb, struct iov_iter *iter);
95         struct iov_iter iter;
96         int rw;
97
98         if (req->cmd->rw.opcode == nvme_cmd_write) {
99                 if (req->cmd->rw.control & cpu_to_le16(NVME_RW_FUA))
100                         ki_flags |= IOCB_DSYNC;
101                 call_iter = req->ns->file->f_op->write_iter;
102                 rw = WRITE;
103         } else {
104                 call_iter = req->ns->file->f_op->read_iter;
105                 rw = READ;
106         }
107
108         iov_iter_bvec(&iter, rw, req->f.bvec, nr_segs, count);
109
110         iocb->ki_pos = pos;
111         iocb->ki_filp = req->ns->file;
112         iocb->ki_flags = ki_flags | iocb_flags(req->ns->file);
113
114         return call_iter(iocb, &iter);
115 }
116
117 static void nvmet_file_io_done(struct kiocb *iocb, long ret, long ret2)
118 {
119         struct nvmet_req *req = container_of(iocb, struct nvmet_req, f.iocb);
120         u16 status = NVME_SC_SUCCESS;
121
122         if (req->f.bvec != req->inline_bvec) {
123                 if (likely(req->f.mpool_alloc == false))
124                         kfree(req->f.bvec);
125                 else
126                         mempool_free(req->f.bvec, req->ns->bvec_pool);
127         }
128
129         if (unlikely(ret != req->data_len))
130                 status = errno_to_nvme_status(req, ret);
131         nvmet_req_complete(req, status);
132 }
133
134 static bool nvmet_file_execute_io(struct nvmet_req *req, int ki_flags)
135 {
136         ssize_t nr_bvec = req->sg_cnt;
137         unsigned long bv_cnt = 0;
138         bool is_sync = false;
139         size_t len = 0, total_len = 0;
140         ssize_t ret = 0;
141         loff_t pos;
142         int i;
143         struct scatterlist *sg;
144
145         if (req->f.mpool_alloc && nr_bvec > NVMET_MAX_MPOOL_BVEC)
146                 is_sync = true;
147
148         pos = le64_to_cpu(req->cmd->rw.slba) << req->ns->blksize_shift;
149         if (unlikely(pos + req->data_len > req->ns->size)) {
150                 nvmet_req_complete(req, errno_to_nvme_status(req, -ENOSPC));
151                 return true;
152         }
153
154         memset(&req->f.iocb, 0, sizeof(struct kiocb));
155         for_each_sg(req->sg, sg, req->sg_cnt, i) {
156                 nvmet_file_init_bvec(&req->f.bvec[bv_cnt], sg);
157                 len += req->f.bvec[bv_cnt].bv_len;
158                 total_len += req->f.bvec[bv_cnt].bv_len;
159                 bv_cnt++;
160
161                 WARN_ON_ONCE((nr_bvec - 1) < 0);
162
163                 if (unlikely(is_sync) &&
164                     (nr_bvec - 1 == 0 || bv_cnt == NVMET_MAX_MPOOL_BVEC)) {
165                         ret = nvmet_file_submit_bvec(req, pos, bv_cnt, len, 0);
166                         if (ret < 0)
167                                 goto complete;
168
169                         pos += len;
170                         bv_cnt = 0;
171                         len = 0;
172                 }
173                 nr_bvec--;
174         }
175
176         if (WARN_ON_ONCE(total_len != req->data_len)) {
177                 ret = -EIO;
178                 goto complete;
179         }
180
181         if (unlikely(is_sync)) {
182                 ret = total_len;
183                 goto complete;
184         }
185
186         /*
187          * A NULL ki_complete ask for synchronous execution, which we want
188          * for the IOCB_NOWAIT case.
189          */
190         if (!(ki_flags & IOCB_NOWAIT))
191                 req->f.iocb.ki_complete = nvmet_file_io_done;
192
193         ret = nvmet_file_submit_bvec(req, pos, bv_cnt, total_len, ki_flags);
194
195         switch (ret) {
196         case -EIOCBQUEUED:
197                 return true;
198         case -EAGAIN:
199                 if (WARN_ON_ONCE(!(ki_flags & IOCB_NOWAIT)))
200                         goto complete;
201                 return false;
202         case -EOPNOTSUPP:
203                 /*
204                  * For file systems returning error -EOPNOTSUPP, handle
205                  * IOCB_NOWAIT error case separately and retry without
206                  * IOCB_NOWAIT.
207                  */
208                 if ((ki_flags & IOCB_NOWAIT))
209                         return false;
210                 break;
211         }
212
213 complete:
214         nvmet_file_io_done(&req->f.iocb, ret, 0);
215         return true;
216 }
217
218 static void nvmet_file_buffered_io_work(struct work_struct *w)
219 {
220         struct nvmet_req *req = container_of(w, struct nvmet_req, f.work);
221
222         nvmet_file_execute_io(req, 0);
223 }
224
225 static void nvmet_file_submit_buffered_io(struct nvmet_req *req)
226 {
227         INIT_WORK(&req->f.work, nvmet_file_buffered_io_work);
228         queue_work(buffered_io_wq, &req->f.work);
229 }
230
231 static void nvmet_file_execute_rw(struct nvmet_req *req)
232 {
233         ssize_t nr_bvec = req->sg_cnt;
234
235         if (!req->sg_cnt || !nr_bvec) {
236                 nvmet_req_complete(req, 0);
237                 return;
238         }
239
240         if (nr_bvec > NVMET_MAX_INLINE_BIOVEC)
241                 req->f.bvec = kmalloc_array(nr_bvec, sizeof(struct bio_vec),
242                                 GFP_KERNEL);
243         else
244                 req->f.bvec = req->inline_bvec;
245
246         if (unlikely(!req->f.bvec)) {
247                 /* fallback under memory pressure */
248                 req->f.bvec = mempool_alloc(req->ns->bvec_pool, GFP_KERNEL);
249                 req->f.mpool_alloc = true;
250         } else
251                 req->f.mpool_alloc = false;
252
253         if (req->ns->buffered_io) {
254                 if (likely(!req->f.mpool_alloc) &&
255                                 nvmet_file_execute_io(req, IOCB_NOWAIT))
256                         return;
257                 nvmet_file_submit_buffered_io(req);
258         } else
259                 nvmet_file_execute_io(req, 0);
260 }
261
262 u16 nvmet_file_flush(struct nvmet_req *req)
263 {
264         return errno_to_nvme_status(req, vfs_fsync(req->ns->file, 1));
265 }
266
267 static void nvmet_file_flush_work(struct work_struct *w)
268 {
269         struct nvmet_req *req = container_of(w, struct nvmet_req, f.work);
270
271         nvmet_req_complete(req, nvmet_file_flush(req));
272 }
273
274 static void nvmet_file_execute_flush(struct nvmet_req *req)
275 {
276         INIT_WORK(&req->f.work, nvmet_file_flush_work);
277         schedule_work(&req->f.work);
278 }
279
280 static void nvmet_file_execute_discard(struct nvmet_req *req)
281 {
282         int mode = FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE;
283         struct nvme_dsm_range range;
284         loff_t offset, len;
285         u16 status = 0;
286         int ret;
287         int i;
288
289         for (i = 0; i <= le32_to_cpu(req->cmd->dsm.nr); i++) {
290                 status = nvmet_copy_from_sgl(req, i * sizeof(range), &range,
291                                         sizeof(range));
292                 if (status)
293                         break;
294
295                 offset = le64_to_cpu(range.slba) << req->ns->blksize_shift;
296                 len = le32_to_cpu(range.nlb);
297                 len <<= req->ns->blksize_shift;
298                 if (offset + len > req->ns->size) {
299                         req->error_slba = le64_to_cpu(range.slba);
300                         status = errno_to_nvme_status(req, -ENOSPC);
301                         break;
302                 }
303
304                 ret = vfs_fallocate(req->ns->file, mode, offset, len);
305                 if (ret && ret != -EOPNOTSUPP) {
306                         req->error_slba = le64_to_cpu(range.slba);
307                         status = errno_to_nvme_status(req, ret);
308                         break;
309                 }
310         }
311
312         nvmet_req_complete(req, status);
313 }
314
315 static void nvmet_file_dsm_work(struct work_struct *w)
316 {
317         struct nvmet_req *req = container_of(w, struct nvmet_req, f.work);
318
319         switch (le32_to_cpu(req->cmd->dsm.attributes)) {
320         case NVME_DSMGMT_AD:
321                 nvmet_file_execute_discard(req);
322                 return;
323         case NVME_DSMGMT_IDR:
324         case NVME_DSMGMT_IDW:
325         default:
326                 /* Not supported yet */
327                 nvmet_req_complete(req, 0);
328                 return;
329         }
330 }
331
332 static void nvmet_file_execute_dsm(struct nvmet_req *req)
333 {
334         INIT_WORK(&req->f.work, nvmet_file_dsm_work);
335         schedule_work(&req->f.work);
336 }
337
338 static void nvmet_file_write_zeroes_work(struct work_struct *w)
339 {
340         struct nvmet_req *req = container_of(w, struct nvmet_req, f.work);
341         struct nvme_write_zeroes_cmd *write_zeroes = &req->cmd->write_zeroes;
342         int mode = FALLOC_FL_ZERO_RANGE | FALLOC_FL_KEEP_SIZE;
343         loff_t offset;
344         loff_t len;
345         int ret;
346
347         offset = le64_to_cpu(write_zeroes->slba) << req->ns->blksize_shift;
348         len = (((sector_t)le16_to_cpu(write_zeroes->length) + 1) <<
349                         req->ns->blksize_shift);
350
351         if (unlikely(offset + len > req->ns->size)) {
352                 nvmet_req_complete(req, errno_to_nvme_status(req, -ENOSPC));
353                 return;
354         }
355
356         ret = vfs_fallocate(req->ns->file, mode, offset, len);
357         nvmet_req_complete(req, ret < 0 ? errno_to_nvme_status(req, ret) : 0);
358 }
359
360 static void nvmet_file_execute_write_zeroes(struct nvmet_req *req)
361 {
362         INIT_WORK(&req->f.work, nvmet_file_write_zeroes_work);
363         schedule_work(&req->f.work);
364 }
365
366 u16 nvmet_file_parse_io_cmd(struct nvmet_req *req)
367 {
368         struct nvme_command *cmd = req->cmd;
369
370         switch (cmd->common.opcode) {
371         case nvme_cmd_read:
372         case nvme_cmd_write:
373                 req->execute = nvmet_file_execute_rw;
374                 req->data_len = nvmet_rw_len(req);
375                 return 0;
376         case nvme_cmd_flush:
377                 req->execute = nvmet_file_execute_flush;
378                 req->data_len = 0;
379                 return 0;
380         case nvme_cmd_dsm:
381                 req->execute = nvmet_file_execute_dsm;
382                 req->data_len = (le32_to_cpu(cmd->dsm.nr) + 1) *
383                         sizeof(struct nvme_dsm_range);
384                 return 0;
385         case nvme_cmd_write_zeroes:
386                 req->execute = nvmet_file_execute_write_zeroes;
387                 req->data_len = 0;
388                 return 0;
389         default:
390                 pr_err("unhandled cmd for file ns %d on qid %d\n",
391                                 cmd->common.opcode, req->sq->qid);
392                 req->error_loc = offsetof(struct nvme_common_command, opcode);
393                 return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
394         }
395 }