Linux-libre 4.19.116-gnu
[librecmc/linux-libre.git] / drivers / target / target_core_sbc.c
1 /*
2  * SCSI Block Commands (SBC) parsing and emulation.
3  *
4  * (c) Copyright 2002-2013 Datera, Inc.
5  *
6  * Nicholas A. Bellinger <nab@kernel.org>
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License as published by
10  * the Free Software Foundation; either version 2 of the License, or
11  * (at your option) any later version.
12  *
13  * This program is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16  * GNU General Public License for more details.
17  *
18  * You should have received a copy of the GNU General Public License
19  * along with this program; if not, write to the Free Software
20  * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
21  */
22
23 #include <linux/kernel.h>
24 #include <linux/module.h>
25 #include <linux/ratelimit.h>
26 #include <linux/crc-t10dif.h>
27 #include <linux/t10-pi.h>
28 #include <asm/unaligned.h>
29 #include <scsi/scsi_proto.h>
30 #include <scsi/scsi_tcq.h>
31
32 #include <target/target_core_base.h>
33 #include <target/target_core_backend.h>
34 #include <target/target_core_fabric.h>
35
36 #include "target_core_internal.h"
37 #include "target_core_ua.h"
38 #include "target_core_alua.h"
39
40 static sense_reason_t
41 sbc_check_prot(struct se_device *, struct se_cmd *, unsigned char *, u32, bool);
42 static sense_reason_t sbc_execute_unmap(struct se_cmd *cmd);
43
44 static sense_reason_t
45 sbc_emulate_readcapacity(struct se_cmd *cmd)
46 {
47         struct se_device *dev = cmd->se_dev;
48         unsigned char *cdb = cmd->t_task_cdb;
49         unsigned long long blocks_long = dev->transport->get_blocks(dev);
50         unsigned char *rbuf;
51         unsigned char buf[8];
52         u32 blocks;
53
54         /*
55          * SBC-2 says:
56          *   If the PMI bit is set to zero and the LOGICAL BLOCK
57          *   ADDRESS field is not set to zero, the device server shall
58          *   terminate the command with CHECK CONDITION status with
59          *   the sense key set to ILLEGAL REQUEST and the additional
60          *   sense code set to INVALID FIELD IN CDB.
61          *
62          * In SBC-3, these fields are obsolete, but some SCSI
63          * compliance tests actually check this, so we might as well
64          * follow SBC-2.
65          */
66         if (!(cdb[8] & 1) && !!(cdb[2] | cdb[3] | cdb[4] | cdb[5]))
67                 return TCM_INVALID_CDB_FIELD;
68
69         if (blocks_long >= 0x00000000ffffffff)
70                 blocks = 0xffffffff;
71         else
72                 blocks = (u32)blocks_long;
73
74         put_unaligned_be32(blocks, &buf[0]);
75         put_unaligned_be32(dev->dev_attrib.block_size, &buf[4]);
76
77         rbuf = transport_kmap_data_sg(cmd);
78         if (rbuf) {
79                 memcpy(rbuf, buf, min_t(u32, sizeof(buf), cmd->data_length));
80                 transport_kunmap_data_sg(cmd);
81         }
82
83         target_complete_cmd_with_length(cmd, GOOD, 8);
84         return 0;
85 }
86
87 static sense_reason_t
88 sbc_emulate_readcapacity_16(struct se_cmd *cmd)
89 {
90         struct se_device *dev = cmd->se_dev;
91         struct se_session *sess = cmd->se_sess;
92         int pi_prot_type = dev->dev_attrib.pi_prot_type;
93
94         unsigned char *rbuf;
95         unsigned char buf[32];
96         unsigned long long blocks = dev->transport->get_blocks(dev);
97
98         memset(buf, 0, sizeof(buf));
99         put_unaligned_be64(blocks, &buf[0]);
100         put_unaligned_be32(dev->dev_attrib.block_size, &buf[8]);
101         /*
102          * Set P_TYPE and PROT_EN bits for DIF support
103          */
104         if (sess->sup_prot_ops & (TARGET_PROT_DIN_PASS | TARGET_PROT_DOUT_PASS)) {
105                 /*
106                  * Only override a device's pi_prot_type if no T10-PI is
107                  * available, and sess_prot_type has been explicitly enabled.
108                  */
109                 if (!pi_prot_type)
110                         pi_prot_type = sess->sess_prot_type;
111
112                 if (pi_prot_type)
113                         buf[12] = (pi_prot_type - 1) << 1 | 0x1;
114         }
115
116         if (dev->transport->get_lbppbe)
117                 buf[13] = dev->transport->get_lbppbe(dev) & 0x0f;
118
119         if (dev->transport->get_alignment_offset_lbas) {
120                 u16 lalba = dev->transport->get_alignment_offset_lbas(dev);
121
122                 put_unaligned_be16(lalba, &buf[14]);
123         }
124
125         /*
126          * Set Thin Provisioning Enable bit following sbc3r22 in section
127          * READ CAPACITY (16) byte 14 if emulate_tpu or emulate_tpws is enabled.
128          */
129         if (dev->dev_attrib.emulate_tpu || dev->dev_attrib.emulate_tpws) {
130                 buf[14] |= 0x80;
131
132                 /*
133                  * LBPRZ signifies that zeroes will be read back from an LBA after
134                  * an UNMAP or WRITE SAME w/ unmap bit (sbc3r36 5.16.2)
135                  */
136                 if (dev->dev_attrib.unmap_zeroes_data)
137                         buf[14] |= 0x40;
138         }
139
140         rbuf = transport_kmap_data_sg(cmd);
141         if (rbuf) {
142                 memcpy(rbuf, buf, min_t(u32, sizeof(buf), cmd->data_length));
143                 transport_kunmap_data_sg(cmd);
144         }
145
146         target_complete_cmd_with_length(cmd, GOOD, 32);
147         return 0;
148 }
149
150 static sense_reason_t
151 sbc_emulate_startstop(struct se_cmd *cmd)
152 {
153         unsigned char *cdb = cmd->t_task_cdb;
154
155         /*
156          * See sbc3r36 section 5.25
157          * Immediate bit should be set since there is nothing to complete
158          * POWER CONDITION MODIFIER 0h
159          */
160         if (!(cdb[1] & 1) || cdb[2] || cdb[3])
161                 return TCM_INVALID_CDB_FIELD;
162
163         /*
164          * See sbc3r36 section 5.25
165          * POWER CONDITION 0h START_VALID - process START and LOEJ
166          */
167         if (cdb[4] >> 4 & 0xf)
168                 return TCM_INVALID_CDB_FIELD;
169
170         /*
171          * See sbc3r36 section 5.25
172          * LOEJ 0h - nothing to load or unload
173          * START 1h - we are ready
174          */
175         if (!(cdb[4] & 1) || (cdb[4] & 2) || (cdb[4] & 4))
176                 return TCM_INVALID_CDB_FIELD;
177
178         target_complete_cmd(cmd, SAM_STAT_GOOD);
179         return 0;
180 }
181
182 sector_t sbc_get_write_same_sectors(struct se_cmd *cmd)
183 {
184         u32 num_blocks;
185
186         if (cmd->t_task_cdb[0] == WRITE_SAME)
187                 num_blocks = get_unaligned_be16(&cmd->t_task_cdb[7]);
188         else if (cmd->t_task_cdb[0] == WRITE_SAME_16)
189                 num_blocks = get_unaligned_be32(&cmd->t_task_cdb[10]);
190         else /* WRITE_SAME_32 via VARIABLE_LENGTH_CMD */
191                 num_blocks = get_unaligned_be32(&cmd->t_task_cdb[28]);
192
193         /*
194          * Use the explicit range when non zero is supplied, otherwise calculate
195          * the remaining range based on ->get_blocks() - starting LBA.
196          */
197         if (num_blocks)
198                 return num_blocks;
199
200         return cmd->se_dev->transport->get_blocks(cmd->se_dev) -
201                 cmd->t_task_lba + 1;
202 }
203 EXPORT_SYMBOL(sbc_get_write_same_sectors);
204
205 static sense_reason_t
206 sbc_execute_write_same_unmap(struct se_cmd *cmd)
207 {
208         struct sbc_ops *ops = cmd->protocol_data;
209         sector_t nolb = sbc_get_write_same_sectors(cmd);
210         sense_reason_t ret;
211
212         if (nolb) {
213                 ret = ops->execute_unmap(cmd, cmd->t_task_lba, nolb);
214                 if (ret)
215                         return ret;
216         }
217
218         target_complete_cmd(cmd, GOOD);
219         return 0;
220 }
221
222 static sense_reason_t
223 sbc_emulate_noop(struct se_cmd *cmd)
224 {
225         target_complete_cmd(cmd, GOOD);
226         return 0;
227 }
228
229 static inline u32 sbc_get_size(struct se_cmd *cmd, u32 sectors)
230 {
231         return cmd->se_dev->dev_attrib.block_size * sectors;
232 }
233
234 static inline u32 transport_get_sectors_6(unsigned char *cdb)
235 {
236         /*
237          * Use 8-bit sector value.  SBC-3 says:
238          *
239          *   A TRANSFER LENGTH field set to zero specifies that 256
240          *   logical blocks shall be written.  Any other value
241          *   specifies the number of logical blocks that shall be
242          *   written.
243          */
244         return cdb[4] ? : 256;
245 }
246
247 static inline u32 transport_get_sectors_10(unsigned char *cdb)
248 {
249         return get_unaligned_be16(&cdb[7]);
250 }
251
252 static inline u32 transport_get_sectors_12(unsigned char *cdb)
253 {
254         return get_unaligned_be32(&cdb[6]);
255 }
256
257 static inline u32 transport_get_sectors_16(unsigned char *cdb)
258 {
259         return get_unaligned_be32(&cdb[10]);
260 }
261
262 /*
263  * Used for VARIABLE_LENGTH_CDB WRITE_32 and READ_32 variants
264  */
265 static inline u32 transport_get_sectors_32(unsigned char *cdb)
266 {
267         return get_unaligned_be32(&cdb[28]);
268
269 }
270
271 static inline u32 transport_lba_21(unsigned char *cdb)
272 {
273         return get_unaligned_be24(&cdb[1]) & 0x1fffff;
274 }
275
276 static inline u32 transport_lba_32(unsigned char *cdb)
277 {
278         return get_unaligned_be32(&cdb[2]);
279 }
280
281 static inline unsigned long long transport_lba_64(unsigned char *cdb)
282 {
283         return get_unaligned_be64(&cdb[2]);
284 }
285
286 /*
287  * For VARIABLE_LENGTH_CDB w/ 32 byte extended CDBs
288  */
289 static inline unsigned long long transport_lba_64_ext(unsigned char *cdb)
290 {
291         return get_unaligned_be64(&cdb[12]);
292 }
293
294 static sense_reason_t
295 sbc_setup_write_same(struct se_cmd *cmd, unsigned char *flags, struct sbc_ops *ops)
296 {
297         struct se_device *dev = cmd->se_dev;
298         sector_t end_lba = dev->transport->get_blocks(dev) + 1;
299         unsigned int sectors = sbc_get_write_same_sectors(cmd);
300         sense_reason_t ret;
301
302         if ((flags[0] & 0x04) || (flags[0] & 0x02)) {
303                 pr_err("WRITE_SAME PBDATA and LBDATA"
304                         " bits not supported for Block Discard"
305                         " Emulation\n");
306                 return TCM_UNSUPPORTED_SCSI_OPCODE;
307         }
308         if (sectors > cmd->se_dev->dev_attrib.max_write_same_len) {
309                 pr_warn("WRITE_SAME sectors: %u exceeds max_write_same_len: %u\n",
310                         sectors, cmd->se_dev->dev_attrib.max_write_same_len);
311                 return TCM_INVALID_CDB_FIELD;
312         }
313         /*
314          * Sanity check for LBA wrap and request past end of device.
315          */
316         if (((cmd->t_task_lba + sectors) < cmd->t_task_lba) ||
317             ((cmd->t_task_lba + sectors) > end_lba)) {
318                 pr_err("WRITE_SAME exceeds last lba %llu (lba %llu, sectors %u)\n",
319                        (unsigned long long)end_lba, cmd->t_task_lba, sectors);
320                 return TCM_ADDRESS_OUT_OF_RANGE;
321         }
322
323         /* We always have ANC_SUP == 0 so setting ANCHOR is always an error */
324         if (flags[0] & 0x10) {
325                 pr_warn("WRITE SAME with ANCHOR not supported\n");
326                 return TCM_INVALID_CDB_FIELD;
327         }
328         /*
329          * Special case for WRITE_SAME w/ UNMAP=1 that ends up getting
330          * translated into block discard requests within backend code.
331          */
332         if (flags[0] & 0x08) {
333                 if (!ops->execute_unmap)
334                         return TCM_UNSUPPORTED_SCSI_OPCODE;
335
336                 if (!dev->dev_attrib.emulate_tpws) {
337                         pr_err("Got WRITE_SAME w/ UNMAP=1, but backend device"
338                                " has emulate_tpws disabled\n");
339                         return TCM_UNSUPPORTED_SCSI_OPCODE;
340                 }
341                 cmd->execute_cmd = sbc_execute_write_same_unmap;
342                 return 0;
343         }
344         if (!ops->execute_write_same)
345                 return TCM_UNSUPPORTED_SCSI_OPCODE;
346
347         ret = sbc_check_prot(dev, cmd, &cmd->t_task_cdb[0], sectors, true);
348         if (ret)
349                 return ret;
350
351         cmd->execute_cmd = ops->execute_write_same;
352         return 0;
353 }
354
355 static sense_reason_t xdreadwrite_callback(struct se_cmd *cmd, bool success,
356                                            int *post_ret)
357 {
358         unsigned char *buf, *addr;
359         struct scatterlist *sg;
360         unsigned int offset;
361         sense_reason_t ret = TCM_NO_SENSE;
362         int i, count;
363         /*
364          * From sbc3r22.pdf section 5.48 XDWRITEREAD (10) command
365          *
366          * 1) read the specified logical block(s);
367          * 2) transfer logical blocks from the data-out buffer;
368          * 3) XOR the logical blocks transferred from the data-out buffer with
369          *    the logical blocks read, storing the resulting XOR data in a buffer;
370          * 4) if the DISABLE WRITE bit is set to zero, then write the logical
371          *    blocks transferred from the data-out buffer; and
372          * 5) transfer the resulting XOR data to the data-in buffer.
373          */
374         buf = kmalloc(cmd->data_length, GFP_KERNEL);
375         if (!buf) {
376                 pr_err("Unable to allocate xor_callback buf\n");
377                 return TCM_OUT_OF_RESOURCES;
378         }
379         /*
380          * Copy the scatterlist WRITE buffer located at cmd->t_data_sg
381          * into the locally allocated *buf
382          */
383         sg_copy_to_buffer(cmd->t_data_sg,
384                           cmd->t_data_nents,
385                           buf,
386                           cmd->data_length);
387
388         /*
389          * Now perform the XOR against the BIDI read memory located at
390          * cmd->t_mem_bidi_list
391          */
392
393         offset = 0;
394         for_each_sg(cmd->t_bidi_data_sg, sg, cmd->t_bidi_data_nents, count) {
395                 addr = kmap_atomic(sg_page(sg));
396                 if (!addr) {
397                         ret = TCM_OUT_OF_RESOURCES;
398                         goto out;
399                 }
400
401                 for (i = 0; i < sg->length; i++)
402                         *(addr + sg->offset + i) ^= *(buf + offset + i);
403
404                 offset += sg->length;
405                 kunmap_atomic(addr);
406         }
407
408 out:
409         kfree(buf);
410         return ret;
411 }
412
413 static sense_reason_t
414 sbc_execute_rw(struct se_cmd *cmd)
415 {
416         struct sbc_ops *ops = cmd->protocol_data;
417
418         return ops->execute_rw(cmd, cmd->t_data_sg, cmd->t_data_nents,
419                                cmd->data_direction);
420 }
421
422 static sense_reason_t compare_and_write_post(struct se_cmd *cmd, bool success,
423                                              int *post_ret)
424 {
425         struct se_device *dev = cmd->se_dev;
426         sense_reason_t ret = TCM_NO_SENSE;
427
428         /*
429          * Only set SCF_COMPARE_AND_WRITE_POST to force a response fall-through
430          * within target_complete_ok_work() if the command was successfully
431          * sent to the backend driver.
432          */
433         spin_lock_irq(&cmd->t_state_lock);
434         if (cmd->transport_state & CMD_T_SENT) {
435                 cmd->se_cmd_flags |= SCF_COMPARE_AND_WRITE_POST;
436                 *post_ret = 1;
437
438                 if (cmd->scsi_status == SAM_STAT_CHECK_CONDITION)
439                         ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
440         }
441         spin_unlock_irq(&cmd->t_state_lock);
442
443         /*
444          * Unlock ->caw_sem originally obtained during sbc_compare_and_write()
445          * before the original READ I/O submission.
446          */
447         up(&dev->caw_sem);
448
449         return ret;
450 }
451
452 static sense_reason_t compare_and_write_callback(struct se_cmd *cmd, bool success,
453                                                  int *post_ret)
454 {
455         struct se_device *dev = cmd->se_dev;
456         struct scatterlist *write_sg = NULL, *sg;
457         unsigned char *buf = NULL, *addr;
458         struct sg_mapping_iter m;
459         unsigned int offset = 0, len;
460         unsigned int nlbas = cmd->t_task_nolb;
461         unsigned int block_size = dev->dev_attrib.block_size;
462         unsigned int compare_len = (nlbas * block_size);
463         sense_reason_t ret = TCM_NO_SENSE;
464         int rc, i;
465
466         /*
467          * Handle early failure in transport_generic_request_failure(),
468          * which will not have taken ->caw_sem yet..
469          */
470         if (!success && (!cmd->t_data_sg || !cmd->t_bidi_data_sg))
471                 return TCM_NO_SENSE;
472         /*
473          * Handle special case for zero-length COMPARE_AND_WRITE
474          */
475         if (!cmd->data_length)
476                 goto out;
477         /*
478          * Immediately exit + release dev->caw_sem if command has already
479          * been failed with a non-zero SCSI status.
480          */
481         if (cmd->scsi_status) {
482                 pr_debug("compare_and_write_callback: non zero scsi_status:"
483                         " 0x%02x\n", cmd->scsi_status);
484                 *post_ret = 1;
485                 if (cmd->scsi_status == SAM_STAT_CHECK_CONDITION)
486                         ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
487                 goto out;
488         }
489
490         buf = kzalloc(cmd->data_length, GFP_KERNEL);
491         if (!buf) {
492                 pr_err("Unable to allocate compare_and_write buf\n");
493                 ret = TCM_OUT_OF_RESOURCES;
494                 goto out;
495         }
496
497         write_sg = kmalloc_array(cmd->t_data_nents, sizeof(*write_sg),
498                                  GFP_KERNEL);
499         if (!write_sg) {
500                 pr_err("Unable to allocate compare_and_write sg\n");
501                 ret = TCM_OUT_OF_RESOURCES;
502                 goto out;
503         }
504         sg_init_table(write_sg, cmd->t_data_nents);
505         /*
506          * Setup verify and write data payloads from total NumberLBAs.
507          */
508         rc = sg_copy_to_buffer(cmd->t_data_sg, cmd->t_data_nents, buf,
509                                cmd->data_length);
510         if (!rc) {
511                 pr_err("sg_copy_to_buffer() failed for compare_and_write\n");
512                 ret = TCM_OUT_OF_RESOURCES;
513                 goto out;
514         }
515         /*
516          * Compare against SCSI READ payload against verify payload
517          */
518         for_each_sg(cmd->t_bidi_data_sg, sg, cmd->t_bidi_data_nents, i) {
519                 addr = (unsigned char *)kmap_atomic(sg_page(sg));
520                 if (!addr) {
521                         ret = TCM_OUT_OF_RESOURCES;
522                         goto out;
523                 }
524
525                 len = min(sg->length, compare_len);
526
527                 if (memcmp(addr, buf + offset, len)) {
528                         pr_warn("Detected MISCOMPARE for addr: %p buf: %p\n",
529                                 addr, buf + offset);
530                         kunmap_atomic(addr);
531                         goto miscompare;
532                 }
533                 kunmap_atomic(addr);
534
535                 offset += len;
536                 compare_len -= len;
537                 if (!compare_len)
538                         break;
539         }
540
541         i = 0;
542         len = cmd->t_task_nolb * block_size;
543         sg_miter_start(&m, cmd->t_data_sg, cmd->t_data_nents, SG_MITER_TO_SG);
544         /*
545          * Currently assumes NoLB=1 and SGLs are PAGE_SIZE..
546          */
547         while (len) {
548                 sg_miter_next(&m);
549
550                 if (block_size < PAGE_SIZE) {
551                         sg_set_page(&write_sg[i], m.page, block_size,
552                                     m.piter.sg->offset + block_size);
553                 } else {
554                         sg_miter_next(&m);
555                         sg_set_page(&write_sg[i], m.page, block_size,
556                                     m.piter.sg->offset);
557                 }
558                 len -= block_size;
559                 i++;
560         }
561         sg_miter_stop(&m);
562         /*
563          * Save the original SGL + nents values before updating to new
564          * assignments, to be released in transport_free_pages() ->
565          * transport_reset_sgl_orig()
566          */
567         cmd->t_data_sg_orig = cmd->t_data_sg;
568         cmd->t_data_sg = write_sg;
569         cmd->t_data_nents_orig = cmd->t_data_nents;
570         cmd->t_data_nents = 1;
571
572         cmd->sam_task_attr = TCM_HEAD_TAG;
573         cmd->transport_complete_callback = compare_and_write_post;
574         /*
575          * Now reset ->execute_cmd() to the normal sbc_execute_rw() handler
576          * for submitting the adjusted SGL to write instance user-data.
577          */
578         cmd->execute_cmd = sbc_execute_rw;
579
580         spin_lock_irq(&cmd->t_state_lock);
581         cmd->t_state = TRANSPORT_PROCESSING;
582         cmd->transport_state |= CMD_T_ACTIVE | CMD_T_SENT;
583         spin_unlock_irq(&cmd->t_state_lock);
584
585         __target_execute_cmd(cmd, false);
586
587         kfree(buf);
588         return ret;
589
590 miscompare:
591         pr_warn("Target/%s: Send MISCOMPARE check condition and sense\n",
592                 dev->transport->name);
593         ret = TCM_MISCOMPARE_VERIFY;
594 out:
595         /*
596          * In the MISCOMPARE or failure case, unlock ->caw_sem obtained in
597          * sbc_compare_and_write() before the original READ I/O submission.
598          */
599         up(&dev->caw_sem);
600         kfree(write_sg);
601         kfree(buf);
602         return ret;
603 }
604
605 static sense_reason_t
606 sbc_compare_and_write(struct se_cmd *cmd)
607 {
608         struct sbc_ops *ops = cmd->protocol_data;
609         struct se_device *dev = cmd->se_dev;
610         sense_reason_t ret;
611         int rc;
612         /*
613          * Submit the READ first for COMPARE_AND_WRITE to perform the
614          * comparision using SGLs at cmd->t_bidi_data_sg..
615          */
616         rc = down_interruptible(&dev->caw_sem);
617         if (rc != 0) {
618                 cmd->transport_complete_callback = NULL;
619                 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
620         }
621         /*
622          * Reset cmd->data_length to individual block_size in order to not
623          * confuse backend drivers that depend on this value matching the
624          * size of the I/O being submitted.
625          */
626         cmd->data_length = cmd->t_task_nolb * dev->dev_attrib.block_size;
627
628         ret = ops->execute_rw(cmd, cmd->t_bidi_data_sg, cmd->t_bidi_data_nents,
629                               DMA_FROM_DEVICE);
630         if (ret) {
631                 cmd->transport_complete_callback = NULL;
632                 up(&dev->caw_sem);
633                 return ret;
634         }
635         /*
636          * Unlock of dev->caw_sem to occur in compare_and_write_callback()
637          * upon MISCOMPARE, or in compare_and_write_done() upon completion
638          * of WRITE instance user-data.
639          */
640         return TCM_NO_SENSE;
641 }
642
643 static int
644 sbc_set_prot_op_checks(u8 protect, bool fabric_prot, enum target_prot_type prot_type,
645                        bool is_write, struct se_cmd *cmd)
646 {
647         if (is_write) {
648                 cmd->prot_op = fabric_prot ? TARGET_PROT_DOUT_STRIP :
649                                protect ? TARGET_PROT_DOUT_PASS :
650                                TARGET_PROT_DOUT_INSERT;
651                 switch (protect) {
652                 case 0x0:
653                 case 0x3:
654                         cmd->prot_checks = 0;
655                         break;
656                 case 0x1:
657                 case 0x5:
658                         cmd->prot_checks = TARGET_DIF_CHECK_GUARD;
659                         if (prot_type == TARGET_DIF_TYPE1_PROT)
660                                 cmd->prot_checks |= TARGET_DIF_CHECK_REFTAG;
661                         break;
662                 case 0x2:
663                         if (prot_type == TARGET_DIF_TYPE1_PROT)
664                                 cmd->prot_checks = TARGET_DIF_CHECK_REFTAG;
665                         break;
666                 case 0x4:
667                         cmd->prot_checks = TARGET_DIF_CHECK_GUARD;
668                         break;
669                 default:
670                         pr_err("Unsupported protect field %d\n", protect);
671                         return -EINVAL;
672                 }
673         } else {
674                 cmd->prot_op = fabric_prot ? TARGET_PROT_DIN_INSERT :
675                                protect ? TARGET_PROT_DIN_PASS :
676                                TARGET_PROT_DIN_STRIP;
677                 switch (protect) {
678                 case 0x0:
679                 case 0x1:
680                 case 0x5:
681                         cmd->prot_checks = TARGET_DIF_CHECK_GUARD;
682                         if (prot_type == TARGET_DIF_TYPE1_PROT)
683                                 cmd->prot_checks |= TARGET_DIF_CHECK_REFTAG;
684                         break;
685                 case 0x2:
686                         if (prot_type == TARGET_DIF_TYPE1_PROT)
687                                 cmd->prot_checks = TARGET_DIF_CHECK_REFTAG;
688                         break;
689                 case 0x3:
690                         cmd->prot_checks = 0;
691                         break;
692                 case 0x4:
693                         cmd->prot_checks = TARGET_DIF_CHECK_GUARD;
694                         break;
695                 default:
696                         pr_err("Unsupported protect field %d\n", protect);
697                         return -EINVAL;
698                 }
699         }
700
701         return 0;
702 }
703
704 static sense_reason_t
705 sbc_check_prot(struct se_device *dev, struct se_cmd *cmd, unsigned char *cdb,
706                u32 sectors, bool is_write)
707 {
708         u8 protect = cdb[1] >> 5;
709         int sp_ops = cmd->se_sess->sup_prot_ops;
710         int pi_prot_type = dev->dev_attrib.pi_prot_type;
711         bool fabric_prot = false;
712
713         if (!cmd->t_prot_sg || !cmd->t_prot_nents) {
714                 if (unlikely(protect &&
715                     !dev->dev_attrib.pi_prot_type && !cmd->se_sess->sess_prot_type)) {
716                         pr_err("CDB contains protect bit, but device + fabric does"
717                                " not advertise PROTECT=1 feature bit\n");
718                         return TCM_INVALID_CDB_FIELD;
719                 }
720                 if (cmd->prot_pto)
721                         return TCM_NO_SENSE;
722         }
723
724         switch (dev->dev_attrib.pi_prot_type) {
725         case TARGET_DIF_TYPE3_PROT:
726                 cmd->reftag_seed = 0xffffffff;
727                 break;
728         case TARGET_DIF_TYPE2_PROT:
729                 if (protect)
730                         return TCM_INVALID_CDB_FIELD;
731
732                 cmd->reftag_seed = cmd->t_task_lba;
733                 break;
734         case TARGET_DIF_TYPE1_PROT:
735                 cmd->reftag_seed = cmd->t_task_lba;
736                 break;
737         case TARGET_DIF_TYPE0_PROT:
738                 /*
739                  * See if the fabric supports T10-PI, and the session has been
740                  * configured to allow export PROTECT=1 feature bit with backend
741                  * devices that don't support T10-PI.
742                  */
743                 fabric_prot = is_write ?
744                               !!(sp_ops & (TARGET_PROT_DOUT_PASS | TARGET_PROT_DOUT_STRIP)) :
745                               !!(sp_ops & (TARGET_PROT_DIN_PASS | TARGET_PROT_DIN_INSERT));
746
747                 if (fabric_prot && cmd->se_sess->sess_prot_type) {
748                         pi_prot_type = cmd->se_sess->sess_prot_type;
749                         break;
750                 }
751                 if (!protect)
752                         return TCM_NO_SENSE;
753                 /* Fallthrough */
754         default:
755                 pr_err("Unable to determine pi_prot_type for CDB: 0x%02x "
756                        "PROTECT: 0x%02x\n", cdb[0], protect);
757                 return TCM_INVALID_CDB_FIELD;
758         }
759
760         if (sbc_set_prot_op_checks(protect, fabric_prot, pi_prot_type, is_write, cmd))
761                 return TCM_INVALID_CDB_FIELD;
762
763         cmd->prot_type = pi_prot_type;
764         cmd->prot_length = dev->prot_length * sectors;
765
766         /**
767          * In case protection information exists over the wire
768          * we modify command data length to describe pure data.
769          * The actual transfer length is data length + protection
770          * length
771          **/
772         if (protect)
773                 cmd->data_length = sectors * dev->dev_attrib.block_size;
774
775         pr_debug("%s: prot_type=%d, data_length=%d, prot_length=%d "
776                  "prot_op=%d prot_checks=%d\n",
777                  __func__, cmd->prot_type, cmd->data_length, cmd->prot_length,
778                  cmd->prot_op, cmd->prot_checks);
779
780         return TCM_NO_SENSE;
781 }
782
783 static int
784 sbc_check_dpofua(struct se_device *dev, struct se_cmd *cmd, unsigned char *cdb)
785 {
786         if (cdb[1] & 0x10) {
787                 /* see explanation in spc_emulate_modesense */
788                 if (!target_check_fua(dev)) {
789                         pr_err("Got CDB: 0x%02x with DPO bit set, but device"
790                                " does not advertise support for DPO\n", cdb[0]);
791                         return -EINVAL;
792                 }
793         }
794         if (cdb[1] & 0x8) {
795                 if (!target_check_fua(dev)) {
796                         pr_err("Got CDB: 0x%02x with FUA bit set, but device"
797                                " does not advertise support for FUA write\n",
798                                cdb[0]);
799                         return -EINVAL;
800                 }
801                 cmd->se_cmd_flags |= SCF_FUA;
802         }
803         return 0;
804 }
805
806 sense_reason_t
807 sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
808 {
809         struct se_device *dev = cmd->se_dev;
810         unsigned char *cdb = cmd->t_task_cdb;
811         unsigned int size;
812         u32 sectors = 0;
813         sense_reason_t ret;
814
815         cmd->protocol_data = ops;
816
817         switch (cdb[0]) {
818         case READ_6:
819                 sectors = transport_get_sectors_6(cdb);
820                 cmd->t_task_lba = transport_lba_21(cdb);
821                 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
822                 cmd->execute_cmd = sbc_execute_rw;
823                 break;
824         case READ_10:
825                 sectors = transport_get_sectors_10(cdb);
826                 cmd->t_task_lba = transport_lba_32(cdb);
827
828                 if (sbc_check_dpofua(dev, cmd, cdb))
829                         return TCM_INVALID_CDB_FIELD;
830
831                 ret = sbc_check_prot(dev, cmd, cdb, sectors, false);
832                 if (ret)
833                         return ret;
834
835                 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
836                 cmd->execute_cmd = sbc_execute_rw;
837                 break;
838         case READ_12:
839                 sectors = transport_get_sectors_12(cdb);
840                 cmd->t_task_lba = transport_lba_32(cdb);
841
842                 if (sbc_check_dpofua(dev, cmd, cdb))
843                         return TCM_INVALID_CDB_FIELD;
844
845                 ret = sbc_check_prot(dev, cmd, cdb, sectors, false);
846                 if (ret)
847                         return ret;
848
849                 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
850                 cmd->execute_cmd = sbc_execute_rw;
851                 break;
852         case READ_16:
853                 sectors = transport_get_sectors_16(cdb);
854                 cmd->t_task_lba = transport_lba_64(cdb);
855
856                 if (sbc_check_dpofua(dev, cmd, cdb))
857                         return TCM_INVALID_CDB_FIELD;
858
859                 ret = sbc_check_prot(dev, cmd, cdb, sectors, false);
860                 if (ret)
861                         return ret;
862
863                 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
864                 cmd->execute_cmd = sbc_execute_rw;
865                 break;
866         case WRITE_6:
867                 sectors = transport_get_sectors_6(cdb);
868                 cmd->t_task_lba = transport_lba_21(cdb);
869                 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
870                 cmd->execute_cmd = sbc_execute_rw;
871                 break;
872         case WRITE_10:
873         case WRITE_VERIFY:
874                 sectors = transport_get_sectors_10(cdb);
875                 cmd->t_task_lba = transport_lba_32(cdb);
876
877                 if (sbc_check_dpofua(dev, cmd, cdb))
878                         return TCM_INVALID_CDB_FIELD;
879
880                 ret = sbc_check_prot(dev, cmd, cdb, sectors, true);
881                 if (ret)
882                         return ret;
883
884                 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
885                 cmd->execute_cmd = sbc_execute_rw;
886                 break;
887         case WRITE_12:
888                 sectors = transport_get_sectors_12(cdb);
889                 cmd->t_task_lba = transport_lba_32(cdb);
890
891                 if (sbc_check_dpofua(dev, cmd, cdb))
892                         return TCM_INVALID_CDB_FIELD;
893
894                 ret = sbc_check_prot(dev, cmd, cdb, sectors, true);
895                 if (ret)
896                         return ret;
897
898                 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
899                 cmd->execute_cmd = sbc_execute_rw;
900                 break;
901         case WRITE_16:
902         case WRITE_VERIFY_16:
903                 sectors = transport_get_sectors_16(cdb);
904                 cmd->t_task_lba = transport_lba_64(cdb);
905
906                 if (sbc_check_dpofua(dev, cmd, cdb))
907                         return TCM_INVALID_CDB_FIELD;
908
909                 ret = sbc_check_prot(dev, cmd, cdb, sectors, true);
910                 if (ret)
911                         return ret;
912
913                 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
914                 cmd->execute_cmd = sbc_execute_rw;
915                 break;
916         case XDWRITEREAD_10:
917                 if (cmd->data_direction != DMA_TO_DEVICE ||
918                     !(cmd->se_cmd_flags & SCF_BIDI))
919                         return TCM_INVALID_CDB_FIELD;
920                 sectors = transport_get_sectors_10(cdb);
921
922                 if (sbc_check_dpofua(dev, cmd, cdb))
923                         return TCM_INVALID_CDB_FIELD;
924
925                 cmd->t_task_lba = transport_lba_32(cdb);
926                 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
927
928                 /*
929                  * Setup BIDI XOR callback to be run after I/O completion.
930                  */
931                 cmd->execute_cmd = sbc_execute_rw;
932                 cmd->transport_complete_callback = &xdreadwrite_callback;
933                 break;
934         case VARIABLE_LENGTH_CMD:
935         {
936                 u16 service_action = get_unaligned_be16(&cdb[8]);
937                 switch (service_action) {
938                 case XDWRITEREAD_32:
939                         sectors = transport_get_sectors_32(cdb);
940
941                         if (sbc_check_dpofua(dev, cmd, cdb))
942                                 return TCM_INVALID_CDB_FIELD;
943                         /*
944                          * Use WRITE_32 and READ_32 opcodes for the emulated
945                          * XDWRITE_READ_32 logic.
946                          */
947                         cmd->t_task_lba = transport_lba_64_ext(cdb);
948                         cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
949
950                         /*
951                          * Setup BIDI XOR callback to be run during after I/O
952                          * completion.
953                          */
954                         cmd->execute_cmd = sbc_execute_rw;
955                         cmd->transport_complete_callback = &xdreadwrite_callback;
956                         break;
957                 case WRITE_SAME_32:
958                         sectors = transport_get_sectors_32(cdb);
959                         if (!sectors) {
960                                 pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not"
961                                        " supported\n");
962                                 return TCM_INVALID_CDB_FIELD;
963                         }
964
965                         size = sbc_get_size(cmd, 1);
966                         cmd->t_task_lba = get_unaligned_be64(&cdb[12]);
967
968                         ret = sbc_setup_write_same(cmd, &cdb[10], ops);
969                         if (ret)
970                                 return ret;
971                         break;
972                 default:
973                         pr_err("VARIABLE_LENGTH_CMD service action"
974                                 " 0x%04x not supported\n", service_action);
975                         return TCM_UNSUPPORTED_SCSI_OPCODE;
976                 }
977                 break;
978         }
979         case COMPARE_AND_WRITE:
980                 if (!dev->dev_attrib.emulate_caw) {
981                         pr_err_ratelimited("se_device %s/%s (vpd_unit_serial %s) reject COMPARE_AND_WRITE\n",
982                                            dev->se_hba->backend->ops->name,
983                                            config_item_name(&dev->dev_group.cg_item),
984                                            dev->t10_wwn.unit_serial);
985                         return TCM_UNSUPPORTED_SCSI_OPCODE;
986                 }
987                 sectors = cdb[13];
988                 /*
989                  * Currently enforce COMPARE_AND_WRITE for a single sector
990                  */
991                 if (sectors > 1) {
992                         pr_err("COMPARE_AND_WRITE contains NoLB: %u greater"
993                                " than 1\n", sectors);
994                         return TCM_INVALID_CDB_FIELD;
995                 }
996                 if (sbc_check_dpofua(dev, cmd, cdb))
997                         return TCM_INVALID_CDB_FIELD;
998
999                 /*
1000                  * Double size because we have two buffers, note that
1001                  * zero is not an error..
1002                  */
1003                 size = 2 * sbc_get_size(cmd, sectors);
1004                 cmd->t_task_lba = get_unaligned_be64(&cdb[2]);
1005                 cmd->t_task_nolb = sectors;
1006                 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB | SCF_COMPARE_AND_WRITE;
1007                 cmd->execute_cmd = sbc_compare_and_write;
1008                 cmd->transport_complete_callback = compare_and_write_callback;
1009                 break;
1010         case READ_CAPACITY:
1011                 size = READ_CAP_LEN;
1012                 cmd->execute_cmd = sbc_emulate_readcapacity;
1013                 break;
1014         case SERVICE_ACTION_IN_16:
1015                 switch (cmd->t_task_cdb[1] & 0x1f) {
1016                 case SAI_READ_CAPACITY_16:
1017                         cmd->execute_cmd = sbc_emulate_readcapacity_16;
1018                         break;
1019                 case SAI_REPORT_REFERRALS:
1020                         cmd->execute_cmd = target_emulate_report_referrals;
1021                         break;
1022                 default:
1023                         pr_err("Unsupported SA: 0x%02x\n",
1024                                 cmd->t_task_cdb[1] & 0x1f);
1025                         return TCM_INVALID_CDB_FIELD;
1026                 }
1027                 size = get_unaligned_be32(&cdb[10]);
1028                 break;
1029         case SYNCHRONIZE_CACHE:
1030         case SYNCHRONIZE_CACHE_16:
1031                 if (cdb[0] == SYNCHRONIZE_CACHE) {
1032                         sectors = transport_get_sectors_10(cdb);
1033                         cmd->t_task_lba = transport_lba_32(cdb);
1034                 } else {
1035                         sectors = transport_get_sectors_16(cdb);
1036                         cmd->t_task_lba = transport_lba_64(cdb);
1037                 }
1038                 if (ops->execute_sync_cache) {
1039                         cmd->execute_cmd = ops->execute_sync_cache;
1040                         goto check_lba;
1041                 }
1042                 size = 0;
1043                 cmd->execute_cmd = sbc_emulate_noop;
1044                 break;
1045         case UNMAP:
1046                 if (!ops->execute_unmap)
1047                         return TCM_UNSUPPORTED_SCSI_OPCODE;
1048
1049                 if (!dev->dev_attrib.emulate_tpu) {
1050                         pr_err("Got UNMAP, but backend device has"
1051                                " emulate_tpu disabled\n");
1052                         return TCM_UNSUPPORTED_SCSI_OPCODE;
1053                 }
1054                 size = get_unaligned_be16(&cdb[7]);
1055                 cmd->execute_cmd = sbc_execute_unmap;
1056                 break;
1057         case WRITE_SAME_16:
1058                 sectors = transport_get_sectors_16(cdb);
1059                 if (!sectors) {
1060                         pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not supported\n");
1061                         return TCM_INVALID_CDB_FIELD;
1062                 }
1063
1064                 size = sbc_get_size(cmd, 1);
1065                 cmd->t_task_lba = get_unaligned_be64(&cdb[2]);
1066
1067                 ret = sbc_setup_write_same(cmd, &cdb[1], ops);
1068                 if (ret)
1069                         return ret;
1070                 break;
1071         case WRITE_SAME:
1072                 sectors = transport_get_sectors_10(cdb);
1073                 if (!sectors) {
1074                         pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not supported\n");
1075                         return TCM_INVALID_CDB_FIELD;
1076                 }
1077
1078                 size = sbc_get_size(cmd, 1);
1079                 cmd->t_task_lba = get_unaligned_be32(&cdb[2]);
1080
1081                 /*
1082                  * Follow sbcr26 with WRITE_SAME (10) and check for the existence
1083                  * of byte 1 bit 3 UNMAP instead of original reserved field
1084                  */
1085                 ret = sbc_setup_write_same(cmd, &cdb[1], ops);
1086                 if (ret)
1087                         return ret;
1088                 break;
1089         case VERIFY:
1090         case VERIFY_16:
1091                 size = 0;
1092                 if (cdb[0] == VERIFY) {
1093                         sectors = transport_get_sectors_10(cdb);
1094                         cmd->t_task_lba = transport_lba_32(cdb);
1095                 } else {
1096                         sectors = transport_get_sectors_16(cdb);
1097                         cmd->t_task_lba = transport_lba_64(cdb);
1098                 }
1099                 cmd->execute_cmd = sbc_emulate_noop;
1100                 goto check_lba;
1101         case REZERO_UNIT:
1102         case SEEK_6:
1103         case SEEK_10:
1104                 /*
1105                  * There are still clients out there which use these old SCSI-2
1106                  * commands. This mainly happens when running VMs with legacy
1107                  * guest systems, connected via SCSI command pass-through to
1108                  * iSCSI targets. Make them happy and return status GOOD.
1109                  */
1110                 size = 0;
1111                 cmd->execute_cmd = sbc_emulate_noop;
1112                 break;
1113         case START_STOP:
1114                 size = 0;
1115                 cmd->execute_cmd = sbc_emulate_startstop;
1116                 break;
1117         default:
1118                 ret = spc_parse_cdb(cmd, &size);
1119                 if (ret)
1120                         return ret;
1121         }
1122
1123         /* reject any command that we don't have a handler for */
1124         if (!cmd->execute_cmd)
1125                 return TCM_UNSUPPORTED_SCSI_OPCODE;
1126
1127         if (cmd->se_cmd_flags & SCF_SCSI_DATA_CDB) {
1128                 unsigned long long end_lba;
1129 check_lba:
1130                 end_lba = dev->transport->get_blocks(dev) + 1;
1131                 if (((cmd->t_task_lba + sectors) < cmd->t_task_lba) ||
1132                     ((cmd->t_task_lba + sectors) > end_lba)) {
1133                         pr_err("cmd exceeds last lba %llu "
1134                                 "(lba %llu, sectors %u)\n",
1135                                 end_lba, cmd->t_task_lba, sectors);
1136                         return TCM_ADDRESS_OUT_OF_RANGE;
1137                 }
1138
1139                 if (!(cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE))
1140                         size = sbc_get_size(cmd, sectors);
1141         }
1142
1143         return target_cmd_size_check(cmd, size);
1144 }
1145 EXPORT_SYMBOL(sbc_parse_cdb);
1146
1147 u32 sbc_get_device_type(struct se_device *dev)
1148 {
1149         return TYPE_DISK;
1150 }
1151 EXPORT_SYMBOL(sbc_get_device_type);
1152
1153 static sense_reason_t
1154 sbc_execute_unmap(struct se_cmd *cmd)
1155 {
1156         struct sbc_ops *ops = cmd->protocol_data;
1157         struct se_device *dev = cmd->se_dev;
1158         unsigned char *buf, *ptr = NULL;
1159         sector_t lba;
1160         int size;
1161         u32 range;
1162         sense_reason_t ret = 0;
1163         int dl, bd_dl;
1164
1165         /* We never set ANC_SUP */
1166         if (cmd->t_task_cdb[1])
1167                 return TCM_INVALID_CDB_FIELD;
1168
1169         if (cmd->data_length == 0) {
1170                 target_complete_cmd(cmd, SAM_STAT_GOOD);
1171                 return 0;
1172         }
1173
1174         if (cmd->data_length < 8) {
1175                 pr_warn("UNMAP parameter list length %u too small\n",
1176                         cmd->data_length);
1177                 return TCM_PARAMETER_LIST_LENGTH_ERROR;
1178         }
1179
1180         buf = transport_kmap_data_sg(cmd);
1181         if (!buf)
1182                 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
1183
1184         dl = get_unaligned_be16(&buf[0]);
1185         bd_dl = get_unaligned_be16(&buf[2]);
1186
1187         size = cmd->data_length - 8;
1188         if (bd_dl > size)
1189                 pr_warn("UNMAP parameter list length %u too small, ignoring bd_dl %u\n",
1190                         cmd->data_length, bd_dl);
1191         else
1192                 size = bd_dl;
1193
1194         if (size / 16 > dev->dev_attrib.max_unmap_block_desc_count) {
1195                 ret = TCM_INVALID_PARAMETER_LIST;
1196                 goto err;
1197         }
1198
1199         /* First UNMAP block descriptor starts at 8 byte offset */
1200         ptr = &buf[8];
1201         pr_debug("UNMAP: Sub: %s Using dl: %u bd_dl: %u size: %u"
1202                 " ptr: %p\n", dev->transport->name, dl, bd_dl, size, ptr);
1203
1204         while (size >= 16) {
1205                 lba = get_unaligned_be64(&ptr[0]);
1206                 range = get_unaligned_be32(&ptr[8]);
1207                 pr_debug("UNMAP: Using lba: %llu and range: %u\n",
1208                                  (unsigned long long)lba, range);
1209
1210                 if (range > dev->dev_attrib.max_unmap_lba_count) {
1211                         ret = TCM_INVALID_PARAMETER_LIST;
1212                         goto err;
1213                 }
1214
1215                 if (lba + range > dev->transport->get_blocks(dev) + 1) {
1216                         ret = TCM_ADDRESS_OUT_OF_RANGE;
1217                         goto err;
1218                 }
1219
1220                 if (range) {
1221                         ret = ops->execute_unmap(cmd, lba, range);
1222                         if (ret)
1223                                 goto err;
1224                 }
1225
1226                 ptr += 16;
1227                 size -= 16;
1228         }
1229
1230 err:
1231         transport_kunmap_data_sg(cmd);
1232         if (!ret)
1233                 target_complete_cmd(cmd, GOOD);
1234         return ret;
1235 }
1236
1237 void
1238 sbc_dif_generate(struct se_cmd *cmd)
1239 {
1240         struct se_device *dev = cmd->se_dev;
1241         struct t10_pi_tuple *sdt;
1242         struct scatterlist *dsg = cmd->t_data_sg, *psg;
1243         sector_t sector = cmd->t_task_lba;
1244         void *daddr, *paddr;
1245         int i, j, offset = 0;
1246         unsigned int block_size = dev->dev_attrib.block_size;
1247
1248         for_each_sg(cmd->t_prot_sg, psg, cmd->t_prot_nents, i) {
1249                 paddr = kmap_atomic(sg_page(psg)) + psg->offset;
1250                 daddr = kmap_atomic(sg_page(dsg)) + dsg->offset;
1251
1252                 for (j = 0; j < psg->length;
1253                                 j += sizeof(*sdt)) {
1254                         __u16 crc;
1255                         unsigned int avail;
1256
1257                         if (offset >= dsg->length) {
1258                                 offset -= dsg->length;
1259                                 kunmap_atomic(daddr - dsg->offset);
1260                                 dsg = sg_next(dsg);
1261                                 if (!dsg) {
1262                                         kunmap_atomic(paddr - psg->offset);
1263                                         return;
1264                                 }
1265                                 daddr = kmap_atomic(sg_page(dsg)) + dsg->offset;
1266                         }
1267
1268                         sdt = paddr + j;
1269                         avail = min(block_size, dsg->length - offset);
1270                         crc = crc_t10dif(daddr + offset, avail);
1271                         if (avail < block_size) {
1272                                 kunmap_atomic(daddr - dsg->offset);
1273                                 dsg = sg_next(dsg);
1274                                 if (!dsg) {
1275                                         kunmap_atomic(paddr - psg->offset);
1276                                         return;
1277                                 }
1278                                 daddr = kmap_atomic(sg_page(dsg)) + dsg->offset;
1279                                 offset = block_size - avail;
1280                                 crc = crc_t10dif_update(crc, daddr, offset);
1281                         } else {
1282                                 offset += block_size;
1283                         }
1284
1285                         sdt->guard_tag = cpu_to_be16(crc);
1286                         if (cmd->prot_type == TARGET_DIF_TYPE1_PROT)
1287                                 sdt->ref_tag = cpu_to_be32(sector & 0xffffffff);
1288                         sdt->app_tag = 0;
1289
1290                         pr_debug("DIF %s INSERT sector: %llu guard_tag: 0x%04x"
1291                                  " app_tag: 0x%04x ref_tag: %u\n",
1292                                  (cmd->data_direction == DMA_TO_DEVICE) ?
1293                                  "WRITE" : "READ", (unsigned long long)sector,
1294                                  sdt->guard_tag, sdt->app_tag,
1295                                  be32_to_cpu(sdt->ref_tag));
1296
1297                         sector++;
1298                 }
1299
1300                 kunmap_atomic(daddr - dsg->offset);
1301                 kunmap_atomic(paddr - psg->offset);
1302         }
1303 }
1304
1305 static sense_reason_t
1306 sbc_dif_v1_verify(struct se_cmd *cmd, struct t10_pi_tuple *sdt,
1307                   __u16 crc, sector_t sector, unsigned int ei_lba)
1308 {
1309         __be16 csum;
1310
1311         if (!(cmd->prot_checks & TARGET_DIF_CHECK_GUARD))
1312                 goto check_ref;
1313
1314         csum = cpu_to_be16(crc);
1315
1316         if (sdt->guard_tag != csum) {
1317                 pr_err("DIFv1 checksum failed on sector %llu guard tag 0x%04x"
1318                         " csum 0x%04x\n", (unsigned long long)sector,
1319                         be16_to_cpu(sdt->guard_tag), be16_to_cpu(csum));
1320                 return TCM_LOGICAL_BLOCK_GUARD_CHECK_FAILED;
1321         }
1322
1323 check_ref:
1324         if (!(cmd->prot_checks & TARGET_DIF_CHECK_REFTAG))
1325                 return 0;
1326
1327         if (cmd->prot_type == TARGET_DIF_TYPE1_PROT &&
1328             be32_to_cpu(sdt->ref_tag) != (sector & 0xffffffff)) {
1329                 pr_err("DIFv1 Type 1 reference failed on sector: %llu tag: 0x%08x"
1330                        " sector MSB: 0x%08x\n", (unsigned long long)sector,
1331                        be32_to_cpu(sdt->ref_tag), (u32)(sector & 0xffffffff));
1332                 return TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED;
1333         }
1334
1335         if (cmd->prot_type == TARGET_DIF_TYPE2_PROT &&
1336             be32_to_cpu(sdt->ref_tag) != ei_lba) {
1337                 pr_err("DIFv1 Type 2 reference failed on sector: %llu tag: 0x%08x"
1338                        " ei_lba: 0x%08x\n", (unsigned long long)sector,
1339                         be32_to_cpu(sdt->ref_tag), ei_lba);
1340                 return TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED;
1341         }
1342
1343         return 0;
1344 }
1345
1346 void sbc_dif_copy_prot(struct se_cmd *cmd, unsigned int sectors, bool read,
1347                        struct scatterlist *sg, int sg_off)
1348 {
1349         struct se_device *dev = cmd->se_dev;
1350         struct scatterlist *psg;
1351         void *paddr, *addr;
1352         unsigned int i, len, left;
1353         unsigned int offset = sg_off;
1354
1355         if (!sg)
1356                 return;
1357
1358         left = sectors * dev->prot_length;
1359
1360         for_each_sg(cmd->t_prot_sg, psg, cmd->t_prot_nents, i) {
1361                 unsigned int psg_len, copied = 0;
1362
1363                 paddr = kmap_atomic(sg_page(psg)) + psg->offset;
1364                 psg_len = min(left, psg->length);
1365                 while (psg_len) {
1366                         len = min(psg_len, sg->length - offset);
1367                         addr = kmap_atomic(sg_page(sg)) + sg->offset + offset;
1368
1369                         if (read)
1370                                 memcpy(paddr + copied, addr, len);
1371                         else
1372                                 memcpy(addr, paddr + copied, len);
1373
1374                         left -= len;
1375                         offset += len;
1376                         copied += len;
1377                         psg_len -= len;
1378
1379                         kunmap_atomic(addr - sg->offset - offset);
1380
1381                         if (offset >= sg->length) {
1382                                 sg = sg_next(sg);
1383                                 offset = 0;
1384                         }
1385                 }
1386                 kunmap_atomic(paddr - psg->offset);
1387         }
1388 }
1389 EXPORT_SYMBOL(sbc_dif_copy_prot);
1390
1391 sense_reason_t
1392 sbc_dif_verify(struct se_cmd *cmd, sector_t start, unsigned int sectors,
1393                unsigned int ei_lba, struct scatterlist *psg, int psg_off)
1394 {
1395         struct se_device *dev = cmd->se_dev;
1396         struct t10_pi_tuple *sdt;
1397         struct scatterlist *dsg = cmd->t_data_sg;
1398         sector_t sector = start;
1399         void *daddr, *paddr;
1400         int i;
1401         sense_reason_t rc;
1402         int dsg_off = 0;
1403         unsigned int block_size = dev->dev_attrib.block_size;
1404
1405         for (; psg && sector < start + sectors; psg = sg_next(psg)) {
1406                 paddr = kmap_atomic(sg_page(psg)) + psg->offset;
1407                 daddr = kmap_atomic(sg_page(dsg)) + dsg->offset;
1408
1409                 for (i = psg_off; i < psg->length &&
1410                                 sector < start + sectors;
1411                                 i += sizeof(*sdt)) {
1412                         __u16 crc;
1413                         unsigned int avail;
1414
1415                         if (dsg_off >= dsg->length) {
1416                                 dsg_off -= dsg->length;
1417                                 kunmap_atomic(daddr - dsg->offset);
1418                                 dsg = sg_next(dsg);
1419                                 if (!dsg) {
1420                                         kunmap_atomic(paddr - psg->offset);
1421                                         return 0;
1422                                 }
1423                                 daddr = kmap_atomic(sg_page(dsg)) + dsg->offset;
1424                         }
1425
1426                         sdt = paddr + i;
1427
1428                         pr_debug("DIF READ sector: %llu guard_tag: 0x%04x"
1429                                  " app_tag: 0x%04x ref_tag: %u\n",
1430                                  (unsigned long long)sector, sdt->guard_tag,
1431                                  sdt->app_tag, be32_to_cpu(sdt->ref_tag));
1432
1433                         if (sdt->app_tag == T10_PI_APP_ESCAPE) {
1434                                 dsg_off += block_size;
1435                                 goto next;
1436                         }
1437
1438                         avail = min(block_size, dsg->length - dsg_off);
1439                         crc = crc_t10dif(daddr + dsg_off, avail);
1440                         if (avail < block_size) {
1441                                 kunmap_atomic(daddr - dsg->offset);
1442                                 dsg = sg_next(dsg);
1443                                 if (!dsg) {
1444                                         kunmap_atomic(paddr - psg->offset);
1445                                         return 0;
1446                                 }
1447                                 daddr = kmap_atomic(sg_page(dsg)) + dsg->offset;
1448                                 dsg_off = block_size - avail;
1449                                 crc = crc_t10dif_update(crc, daddr, dsg_off);
1450                         } else {
1451                                 dsg_off += block_size;
1452                         }
1453
1454                         rc = sbc_dif_v1_verify(cmd, sdt, crc, sector, ei_lba);
1455                         if (rc) {
1456                                 kunmap_atomic(daddr - dsg->offset);
1457                                 kunmap_atomic(paddr - psg->offset);
1458                                 cmd->bad_sector = sector;
1459                                 return rc;
1460                         }
1461 next:
1462                         sector++;
1463                         ei_lba++;
1464                 }
1465
1466                 psg_off = 0;
1467                 kunmap_atomic(daddr - dsg->offset);
1468                 kunmap_atomic(paddr - psg->offset);
1469         }
1470
1471         return 0;
1472 }
1473 EXPORT_SYMBOL(sbc_dif_verify);