2 * SCSI Block Commands (SBC) parsing and emulation.
4 * (c) Copyright 2002-2013 Datera, Inc.
6 * Nicholas A. Bellinger <nab@kernel.org>
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
23 #include <linux/kernel.h>
24 #include <linux/module.h>
25 #include <linux/ratelimit.h>
26 #include <linux/crc-t10dif.h>
27 #include <asm/unaligned.h>
28 #include <scsi/scsi.h>
29 #include <scsi/scsi_tcq.h>
31 #include <target/target_core_base.h>
32 #include <target/target_core_backend.h>
33 #include <target/target_core_fabric.h>
35 #include "target_core_internal.h"
36 #include "target_core_ua.h"
37 #include "target_core_alua.h"
40 sbc_emulate_readcapacity(struct se_cmd *cmd)
42 struct se_device *dev = cmd->se_dev;
43 unsigned char *cdb = cmd->t_task_cdb;
44 unsigned long long blocks_long = dev->transport->get_blocks(dev);
51 * If the PMI bit is set to zero and the LOGICAL BLOCK
52 * ADDRESS field is not set to zero, the device server shall
53 * terminate the command with CHECK CONDITION status with
54 * the sense key set to ILLEGAL REQUEST and the additional
55 * sense code set to INVALID FIELD IN CDB.
57 * In SBC-3, these fields are obsolete, but some SCSI
58 * compliance tests actually check this, so we might as well
61 if (!(cdb[8] & 1) && !!(cdb[2] | cdb[3] | cdb[4] | cdb[5]))
62 return TCM_INVALID_CDB_FIELD;
64 if (blocks_long >= 0x00000000ffffffff)
67 blocks = (u32)blocks_long;
69 buf[0] = (blocks >> 24) & 0xff;
70 buf[1] = (blocks >> 16) & 0xff;
71 buf[2] = (blocks >> 8) & 0xff;
72 buf[3] = blocks & 0xff;
73 buf[4] = (dev->dev_attrib.block_size >> 24) & 0xff;
74 buf[5] = (dev->dev_attrib.block_size >> 16) & 0xff;
75 buf[6] = (dev->dev_attrib.block_size >> 8) & 0xff;
76 buf[7] = dev->dev_attrib.block_size & 0xff;
78 rbuf = transport_kmap_data_sg(cmd);
80 memcpy(rbuf, buf, min_t(u32, sizeof(buf), cmd->data_length));
81 transport_kunmap_data_sg(cmd);
84 target_complete_cmd_with_length(cmd, GOOD, 8);
89 sbc_emulate_readcapacity_16(struct se_cmd *cmd)
91 struct se_device *dev = cmd->se_dev;
92 struct se_session *sess = cmd->se_sess;
94 unsigned char buf[32];
95 unsigned long long blocks = dev->transport->get_blocks(dev);
97 memset(buf, 0, sizeof(buf));
98 buf[0] = (blocks >> 56) & 0xff;
99 buf[1] = (blocks >> 48) & 0xff;
100 buf[2] = (blocks >> 40) & 0xff;
101 buf[3] = (blocks >> 32) & 0xff;
102 buf[4] = (blocks >> 24) & 0xff;
103 buf[5] = (blocks >> 16) & 0xff;
104 buf[6] = (blocks >> 8) & 0xff;
105 buf[7] = blocks & 0xff;
106 buf[8] = (dev->dev_attrib.block_size >> 24) & 0xff;
107 buf[9] = (dev->dev_attrib.block_size >> 16) & 0xff;
108 buf[10] = (dev->dev_attrib.block_size >> 8) & 0xff;
109 buf[11] = dev->dev_attrib.block_size & 0xff;
111 * Set P_TYPE and PROT_EN bits for DIF support
113 if (sess->sup_prot_ops & (TARGET_PROT_DIN_PASS | TARGET_PROT_DOUT_PASS)) {
114 if (dev->dev_attrib.pi_prot_type)
115 buf[12] = (dev->dev_attrib.pi_prot_type - 1) << 1 | 0x1;
118 if (dev->transport->get_lbppbe)
119 buf[13] = dev->transport->get_lbppbe(dev) & 0x0f;
121 if (dev->transport->get_alignment_offset_lbas) {
122 u16 lalba = dev->transport->get_alignment_offset_lbas(dev);
123 buf[14] = (lalba >> 8) & 0x3f;
124 buf[15] = lalba & 0xff;
128 * Set Thin Provisioning Enable bit following sbc3r22 in section
129 * READ CAPACITY (16) byte 14 if emulate_tpu or emulate_tpws is enabled.
131 if (dev->dev_attrib.emulate_tpu || dev->dev_attrib.emulate_tpws)
134 rbuf = transport_kmap_data_sg(cmd);
136 memcpy(rbuf, buf, min_t(u32, sizeof(buf), cmd->data_length));
137 transport_kunmap_data_sg(cmd);
140 target_complete_cmd_with_length(cmd, GOOD, 32);
144 sector_t sbc_get_write_same_sectors(struct se_cmd *cmd)
148 if (cmd->t_task_cdb[0] == WRITE_SAME)
149 num_blocks = get_unaligned_be16(&cmd->t_task_cdb[7]);
150 else if (cmd->t_task_cdb[0] == WRITE_SAME_16)
151 num_blocks = get_unaligned_be32(&cmd->t_task_cdb[10]);
152 else /* WRITE_SAME_32 via VARIABLE_LENGTH_CMD */
153 num_blocks = get_unaligned_be32(&cmd->t_task_cdb[28]);
156 * Use the explicit range when non zero is supplied, otherwise calculate
157 * the remaining range based on ->get_blocks() - starting LBA.
162 return cmd->se_dev->transport->get_blocks(cmd->se_dev) -
165 EXPORT_SYMBOL(sbc_get_write_same_sectors);
167 static sense_reason_t
168 sbc_emulate_noop(struct se_cmd *cmd)
170 target_complete_cmd(cmd, GOOD);
174 static inline u32 sbc_get_size(struct se_cmd *cmd, u32 sectors)
176 return cmd->se_dev->dev_attrib.block_size * sectors;
179 static inline u32 transport_get_sectors_6(unsigned char *cdb)
182 * Use 8-bit sector value. SBC-3 says:
184 * A TRANSFER LENGTH field set to zero specifies that 256
185 * logical blocks shall be written. Any other value
186 * specifies the number of logical blocks that shall be
189 return cdb[4] ? : 256;
192 static inline u32 transport_get_sectors_10(unsigned char *cdb)
194 return (u32)(cdb[7] << 8) + cdb[8];
197 static inline u32 transport_get_sectors_12(unsigned char *cdb)
199 return (u32)(cdb[6] << 24) + (cdb[7] << 16) + (cdb[8] << 8) + cdb[9];
202 static inline u32 transport_get_sectors_16(unsigned char *cdb)
204 return (u32)(cdb[10] << 24) + (cdb[11] << 16) +
205 (cdb[12] << 8) + cdb[13];
209 * Used for VARIABLE_LENGTH_CDB WRITE_32 and READ_32 variants
211 static inline u32 transport_get_sectors_32(unsigned char *cdb)
213 return (u32)(cdb[28] << 24) + (cdb[29] << 16) +
214 (cdb[30] << 8) + cdb[31];
218 static inline u32 transport_lba_21(unsigned char *cdb)
220 return ((cdb[1] & 0x1f) << 16) | (cdb[2] << 8) | cdb[3];
223 static inline u32 transport_lba_32(unsigned char *cdb)
225 return (cdb[2] << 24) | (cdb[3] << 16) | (cdb[4] << 8) | cdb[5];
228 static inline unsigned long long transport_lba_64(unsigned char *cdb)
230 unsigned int __v1, __v2;
232 __v1 = (cdb[2] << 24) | (cdb[3] << 16) | (cdb[4] << 8) | cdb[5];
233 __v2 = (cdb[6] << 24) | (cdb[7] << 16) | (cdb[8] << 8) | cdb[9];
235 return ((unsigned long long)__v2) | (unsigned long long)__v1 << 32;
239 * For VARIABLE_LENGTH_CDB w/ 32 byte extended CDBs
241 static inline unsigned long long transport_lba_64_ext(unsigned char *cdb)
243 unsigned int __v1, __v2;
245 __v1 = (cdb[12] << 24) | (cdb[13] << 16) | (cdb[14] << 8) | cdb[15];
246 __v2 = (cdb[16] << 24) | (cdb[17] << 16) | (cdb[18] << 8) | cdb[19];
248 return ((unsigned long long)__v2) | (unsigned long long)__v1 << 32;
251 static sense_reason_t
252 sbc_setup_write_same(struct se_cmd *cmd, unsigned char *flags, struct sbc_ops *ops)
254 struct se_device *dev = cmd->se_dev;
255 sector_t end_lba = dev->transport->get_blocks(dev) + 1;
256 unsigned int sectors = sbc_get_write_same_sectors(cmd);
258 if ((flags[0] & 0x04) || (flags[0] & 0x02)) {
259 pr_err("WRITE_SAME PBDATA and LBDATA"
260 " bits not supported for Block Discard"
262 return TCM_UNSUPPORTED_SCSI_OPCODE;
264 if (sectors > cmd->se_dev->dev_attrib.max_write_same_len) {
265 pr_warn("WRITE_SAME sectors: %u exceeds max_write_same_len: %u\n",
266 sectors, cmd->se_dev->dev_attrib.max_write_same_len);
267 return TCM_INVALID_CDB_FIELD;
270 * Sanity check for LBA wrap and request past end of device.
272 if (((cmd->t_task_lba + sectors) < cmd->t_task_lba) ||
273 ((cmd->t_task_lba + sectors) > end_lba)) {
274 pr_err("WRITE_SAME exceeds last lba %llu (lba %llu, sectors %u)\n",
275 (unsigned long long)end_lba, cmd->t_task_lba, sectors);
276 return TCM_ADDRESS_OUT_OF_RANGE;
279 /* We always have ANC_SUP == 0 so setting ANCHOR is always an error */
280 if (flags[0] & 0x10) {
281 pr_warn("WRITE SAME with ANCHOR not supported\n");
282 return TCM_INVALID_CDB_FIELD;
285 * Special case for WRITE_SAME w/ UNMAP=1 that ends up getting
286 * translated into block discard requests within backend code.
288 if (flags[0] & 0x08) {
289 if (!ops->execute_write_same_unmap)
290 return TCM_UNSUPPORTED_SCSI_OPCODE;
292 cmd->execute_cmd = ops->execute_write_same_unmap;
295 if (!ops->execute_write_same)
296 return TCM_UNSUPPORTED_SCSI_OPCODE;
298 cmd->execute_cmd = ops->execute_write_same;
302 static sense_reason_t xdreadwrite_callback(struct se_cmd *cmd, bool success,
305 unsigned char *buf, *addr;
306 struct scatterlist *sg;
308 sense_reason_t ret = TCM_NO_SENSE;
311 * From sbc3r22.pdf section 5.48 XDWRITEREAD (10) command
313 * 1) read the specified logical block(s);
314 * 2) transfer logical blocks from the data-out buffer;
315 * 3) XOR the logical blocks transferred from the data-out buffer with
316 * the logical blocks read, storing the resulting XOR data in a buffer;
317 * 4) if the DISABLE WRITE bit is set to zero, then write the logical
318 * blocks transferred from the data-out buffer; and
319 * 5) transfer the resulting XOR data to the data-in buffer.
321 buf = kmalloc(cmd->data_length, GFP_KERNEL);
323 pr_err("Unable to allocate xor_callback buf\n");
324 return TCM_OUT_OF_RESOURCES;
327 * Copy the scatterlist WRITE buffer located at cmd->t_data_sg
328 * into the locally allocated *buf
330 sg_copy_to_buffer(cmd->t_data_sg,
336 * Now perform the XOR against the BIDI read memory located at
337 * cmd->t_mem_bidi_list
341 for_each_sg(cmd->t_bidi_data_sg, sg, cmd->t_bidi_data_nents, count) {
342 addr = kmap_atomic(sg_page(sg));
344 ret = TCM_OUT_OF_RESOURCES;
348 for (i = 0; i < sg->length; i++)
349 *(addr + sg->offset + i) ^= *(buf + offset + i);
351 offset += sg->length;
360 static sense_reason_t
361 sbc_execute_rw(struct se_cmd *cmd)
363 return cmd->execute_rw(cmd, cmd->t_data_sg, cmd->t_data_nents,
364 cmd->data_direction);
367 static sense_reason_t compare_and_write_post(struct se_cmd *cmd, bool success,
370 struct se_device *dev = cmd->se_dev;
373 * Only set SCF_COMPARE_AND_WRITE_POST to force a response fall-through
374 * within target_complete_ok_work() if the command was successfully
375 * sent to the backend driver.
377 spin_lock_irq(&cmd->t_state_lock);
378 if ((cmd->transport_state & CMD_T_SENT) && !cmd->scsi_status) {
379 cmd->se_cmd_flags |= SCF_COMPARE_AND_WRITE_POST;
382 spin_unlock_irq(&cmd->t_state_lock);
385 * Unlock ->caw_sem originally obtained during sbc_compare_and_write()
386 * before the original READ I/O submission.
393 static sense_reason_t compare_and_write_callback(struct se_cmd *cmd, bool success,
396 struct se_device *dev = cmd->se_dev;
397 struct scatterlist *write_sg = NULL, *sg;
398 unsigned char *buf = NULL, *addr;
399 struct sg_mapping_iter m;
400 unsigned int offset = 0, len;
401 unsigned int nlbas = cmd->t_task_nolb;
402 unsigned int block_size = dev->dev_attrib.block_size;
403 unsigned int compare_len = (nlbas * block_size);
404 sense_reason_t ret = TCM_NO_SENSE;
408 * Handle early failure in transport_generic_request_failure(),
409 * which will not have taken ->caw_sem yet..
411 if (!success && (!cmd->t_data_sg || !cmd->t_bidi_data_sg))
414 * Handle special case for zero-length COMPARE_AND_WRITE
416 if (!cmd->data_length)
419 * Immediately exit + release dev->caw_sem if command has already
420 * been failed with a non-zero SCSI status.
422 if (cmd->scsi_status) {
423 pr_err("compare_and_write_callback: non zero scsi_status:"
424 " 0x%02x\n", cmd->scsi_status);
428 buf = kzalloc(cmd->data_length, GFP_KERNEL);
430 pr_err("Unable to allocate compare_and_write buf\n");
431 ret = TCM_OUT_OF_RESOURCES;
435 write_sg = kmalloc(sizeof(struct scatterlist) * cmd->t_data_nents,
438 pr_err("Unable to allocate compare_and_write sg\n");
439 ret = TCM_OUT_OF_RESOURCES;
442 sg_init_table(write_sg, cmd->t_data_nents);
444 * Setup verify and write data payloads from total NumberLBAs.
446 rc = sg_copy_to_buffer(cmd->t_data_sg, cmd->t_data_nents, buf,
449 pr_err("sg_copy_to_buffer() failed for compare_and_write\n");
450 ret = TCM_OUT_OF_RESOURCES;
454 * Compare against SCSI READ payload against verify payload
456 for_each_sg(cmd->t_bidi_data_sg, sg, cmd->t_bidi_data_nents, i) {
457 addr = (unsigned char *)kmap_atomic(sg_page(sg));
459 ret = TCM_OUT_OF_RESOURCES;
463 len = min(sg->length, compare_len);
465 if (memcmp(addr, buf + offset, len)) {
466 pr_warn("Detected MISCOMPARE for addr: %p buf: %p\n",
480 len = cmd->t_task_nolb * block_size;
481 sg_miter_start(&m, cmd->t_data_sg, cmd->t_data_nents, SG_MITER_TO_SG);
483 * Currently assumes NoLB=1 and SGLs are PAGE_SIZE..
488 if (block_size < PAGE_SIZE) {
489 sg_set_page(&write_sg[i], m.page, block_size,
490 m.piter.sg->offset + block_size);
493 sg_set_page(&write_sg[i], m.page, block_size,
501 * Save the original SGL + nents values before updating to new
502 * assignments, to be released in transport_free_pages() ->
503 * transport_reset_sgl_orig()
505 cmd->t_data_sg_orig = cmd->t_data_sg;
506 cmd->t_data_sg = write_sg;
507 cmd->t_data_nents_orig = cmd->t_data_nents;
508 cmd->t_data_nents = 1;
510 cmd->sam_task_attr = MSG_HEAD_TAG;
511 cmd->transport_complete_callback = compare_and_write_post;
513 * Now reset ->execute_cmd() to the normal sbc_execute_rw() handler
514 * for submitting the adjusted SGL to write instance user-data.
516 cmd->execute_cmd = sbc_execute_rw;
518 spin_lock_irq(&cmd->t_state_lock);
519 cmd->t_state = TRANSPORT_PROCESSING;
520 cmd->transport_state |= CMD_T_ACTIVE|CMD_T_BUSY|CMD_T_SENT;
521 spin_unlock_irq(&cmd->t_state_lock);
523 __target_execute_cmd(cmd);
529 pr_warn("Target/%s: Send MISCOMPARE check condition and sense\n",
530 dev->transport->name);
531 ret = TCM_MISCOMPARE_VERIFY;
534 * In the MISCOMPARE or failure case, unlock ->caw_sem obtained in
535 * sbc_compare_and_write() before the original READ I/O submission.
543 static sense_reason_t
544 sbc_compare_and_write(struct se_cmd *cmd)
546 struct se_device *dev = cmd->se_dev;
550 * Submit the READ first for COMPARE_AND_WRITE to perform the
551 * comparision using SGLs at cmd->t_bidi_data_sg..
553 rc = down_interruptible(&dev->caw_sem);
554 if ((rc != 0) || signal_pending(current)) {
555 cmd->transport_complete_callback = NULL;
556 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
559 * Reset cmd->data_length to individual block_size in order to not
560 * confuse backend drivers that depend on this value matching the
561 * size of the I/O being submitted.
563 cmd->data_length = cmd->t_task_nolb * dev->dev_attrib.block_size;
565 ret = cmd->execute_rw(cmd, cmd->t_bidi_data_sg, cmd->t_bidi_data_nents,
568 cmd->transport_complete_callback = NULL;
573 * Unlock of dev->caw_sem to occur in compare_and_write_callback()
574 * upon MISCOMPARE, or in compare_and_write_done() upon completion
575 * of WRITE instance user-data.
581 sbc_set_prot_op_checks(u8 protect, enum target_prot_type prot_type,
582 bool is_write, struct se_cmd *cmd)
585 cmd->prot_op = protect ? TARGET_PROT_DOUT_PASS :
586 TARGET_PROT_DOUT_INSERT;
590 cmd->prot_checks = 0;
594 cmd->prot_checks = TARGET_DIF_CHECK_GUARD;
595 if (prot_type == TARGET_DIF_TYPE1_PROT)
596 cmd->prot_checks |= TARGET_DIF_CHECK_REFTAG;
599 if (prot_type == TARGET_DIF_TYPE1_PROT)
600 cmd->prot_checks = TARGET_DIF_CHECK_REFTAG;
603 cmd->prot_checks = TARGET_DIF_CHECK_GUARD;
606 pr_err("Unsupported protect field %d\n", protect);
610 cmd->prot_op = protect ? TARGET_PROT_DIN_PASS :
611 TARGET_PROT_DIN_STRIP;
616 cmd->prot_checks = TARGET_DIF_CHECK_GUARD;
617 if (prot_type == TARGET_DIF_TYPE1_PROT)
618 cmd->prot_checks |= TARGET_DIF_CHECK_REFTAG;
621 if (prot_type == TARGET_DIF_TYPE1_PROT)
622 cmd->prot_checks = TARGET_DIF_CHECK_REFTAG;
625 cmd->prot_checks = 0;
628 cmd->prot_checks = TARGET_DIF_CHECK_GUARD;
631 pr_err("Unsupported protect field %d\n", protect);
640 sbc_check_prot(struct se_device *dev, struct se_cmd *cmd, unsigned char *cdb,
641 u32 sectors, bool is_write)
643 u8 protect = cdb[1] >> 5;
645 if ((!cmd->t_prot_sg || !cmd->t_prot_nents) && cmd->prot_pto)
648 switch (dev->dev_attrib.pi_prot_type) {
649 case TARGET_DIF_TYPE3_PROT:
650 cmd->reftag_seed = 0xffffffff;
652 case TARGET_DIF_TYPE2_PROT:
656 cmd->reftag_seed = cmd->t_task_lba;
658 case TARGET_DIF_TYPE1_PROT:
659 cmd->reftag_seed = cmd->t_task_lba;
661 case TARGET_DIF_TYPE0_PROT:
666 if (sbc_set_prot_op_checks(protect, dev->dev_attrib.pi_prot_type,
670 cmd->prot_type = dev->dev_attrib.pi_prot_type;
671 cmd->prot_length = dev->prot_length * sectors;
674 * In case protection information exists over the wire
675 * we modify command data length to describe pure data.
676 * The actual transfer length is data length + protection
680 cmd->data_length = sectors * dev->dev_attrib.block_size;
682 pr_debug("%s: prot_type=%d, data_length=%d, prot_length=%d "
683 "prot_op=%d prot_checks=%d\n",
684 __func__, cmd->prot_type, cmd->data_length, cmd->prot_length,
685 cmd->prot_op, cmd->prot_checks);
691 sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
693 struct se_device *dev = cmd->se_dev;
694 unsigned char *cdb = cmd->t_task_cdb;
701 sectors = transport_get_sectors_6(cdb);
702 cmd->t_task_lba = transport_lba_21(cdb);
703 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
704 cmd->execute_rw = ops->execute_rw;
705 cmd->execute_cmd = sbc_execute_rw;
708 sectors = transport_get_sectors_10(cdb);
709 cmd->t_task_lba = transport_lba_32(cdb);
711 if (!sbc_check_prot(dev, cmd, cdb, sectors, false))
712 return TCM_UNSUPPORTED_SCSI_OPCODE;
714 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
715 cmd->execute_rw = ops->execute_rw;
716 cmd->execute_cmd = sbc_execute_rw;
719 sectors = transport_get_sectors_12(cdb);
720 cmd->t_task_lba = transport_lba_32(cdb);
722 if (!sbc_check_prot(dev, cmd, cdb, sectors, false))
723 return TCM_UNSUPPORTED_SCSI_OPCODE;
725 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
726 cmd->execute_rw = ops->execute_rw;
727 cmd->execute_cmd = sbc_execute_rw;
730 sectors = transport_get_sectors_16(cdb);
731 cmd->t_task_lba = transport_lba_64(cdb);
733 if (!sbc_check_prot(dev, cmd, cdb, sectors, false))
734 return TCM_UNSUPPORTED_SCSI_OPCODE;
736 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
737 cmd->execute_rw = ops->execute_rw;
738 cmd->execute_cmd = sbc_execute_rw;
741 sectors = transport_get_sectors_6(cdb);
742 cmd->t_task_lba = transport_lba_21(cdb);
743 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
744 cmd->execute_rw = ops->execute_rw;
745 cmd->execute_cmd = sbc_execute_rw;
749 sectors = transport_get_sectors_10(cdb);
750 cmd->t_task_lba = transport_lba_32(cdb);
752 if (!sbc_check_prot(dev, cmd, cdb, sectors, true))
753 return TCM_UNSUPPORTED_SCSI_OPCODE;
756 cmd->se_cmd_flags |= SCF_FUA;
757 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
758 cmd->execute_rw = ops->execute_rw;
759 cmd->execute_cmd = sbc_execute_rw;
762 sectors = transport_get_sectors_12(cdb);
763 cmd->t_task_lba = transport_lba_32(cdb);
765 if (!sbc_check_prot(dev, cmd, cdb, sectors, true))
766 return TCM_UNSUPPORTED_SCSI_OPCODE;
769 cmd->se_cmd_flags |= SCF_FUA;
770 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
771 cmd->execute_rw = ops->execute_rw;
772 cmd->execute_cmd = sbc_execute_rw;
775 sectors = transport_get_sectors_16(cdb);
776 cmd->t_task_lba = transport_lba_64(cdb);
778 if (!sbc_check_prot(dev, cmd, cdb, sectors, true))
779 return TCM_UNSUPPORTED_SCSI_OPCODE;
782 cmd->se_cmd_flags |= SCF_FUA;
783 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
784 cmd->execute_rw = ops->execute_rw;
785 cmd->execute_cmd = sbc_execute_rw;
788 if (cmd->data_direction != DMA_TO_DEVICE ||
789 !(cmd->se_cmd_flags & SCF_BIDI))
790 return TCM_INVALID_CDB_FIELD;
791 sectors = transport_get_sectors_10(cdb);
793 cmd->t_task_lba = transport_lba_32(cdb);
794 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
797 * Setup BIDI XOR callback to be run after I/O completion.
799 cmd->execute_rw = ops->execute_rw;
800 cmd->execute_cmd = sbc_execute_rw;
801 cmd->transport_complete_callback = &xdreadwrite_callback;
803 cmd->se_cmd_flags |= SCF_FUA;
805 case VARIABLE_LENGTH_CMD:
807 u16 service_action = get_unaligned_be16(&cdb[8]);
808 switch (service_action) {
810 sectors = transport_get_sectors_32(cdb);
813 * Use WRITE_32 and READ_32 opcodes for the emulated
814 * XDWRITE_READ_32 logic.
816 cmd->t_task_lba = transport_lba_64_ext(cdb);
817 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
820 * Setup BIDI XOR callback to be run during after I/O
823 cmd->execute_rw = ops->execute_rw;
824 cmd->execute_cmd = sbc_execute_rw;
825 cmd->transport_complete_callback = &xdreadwrite_callback;
827 cmd->se_cmd_flags |= SCF_FUA;
830 sectors = transport_get_sectors_32(cdb);
832 pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not"
834 return TCM_INVALID_CDB_FIELD;
837 size = sbc_get_size(cmd, 1);
838 cmd->t_task_lba = get_unaligned_be64(&cdb[12]);
840 ret = sbc_setup_write_same(cmd, &cdb[10], ops);
845 pr_err("VARIABLE_LENGTH_CMD service action"
846 " 0x%04x not supported\n", service_action);
847 return TCM_UNSUPPORTED_SCSI_OPCODE;
851 case COMPARE_AND_WRITE:
854 * Currently enforce COMPARE_AND_WRITE for a single sector
857 pr_err("COMPARE_AND_WRITE contains NoLB: %u greater"
858 " than 1\n", sectors);
859 return TCM_INVALID_CDB_FIELD;
862 * Double size because we have two buffers, note that
863 * zero is not an error..
865 size = 2 * sbc_get_size(cmd, sectors);
866 cmd->t_task_lba = get_unaligned_be64(&cdb[2]);
867 cmd->t_task_nolb = sectors;
868 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB | SCF_COMPARE_AND_WRITE;
869 cmd->execute_rw = ops->execute_rw;
870 cmd->execute_cmd = sbc_compare_and_write;
871 cmd->transport_complete_callback = compare_and_write_callback;
875 cmd->execute_cmd = sbc_emulate_readcapacity;
877 case SERVICE_ACTION_IN:
878 switch (cmd->t_task_cdb[1] & 0x1f) {
879 case SAI_READ_CAPACITY_16:
880 cmd->execute_cmd = sbc_emulate_readcapacity_16;
882 case SAI_REPORT_REFERRALS:
883 cmd->execute_cmd = target_emulate_report_referrals;
886 pr_err("Unsupported SA: 0x%02x\n",
887 cmd->t_task_cdb[1] & 0x1f);
888 return TCM_INVALID_CDB_FIELD;
890 size = (cdb[10] << 24) | (cdb[11] << 16) |
891 (cdb[12] << 8) | cdb[13];
893 case SYNCHRONIZE_CACHE:
894 case SYNCHRONIZE_CACHE_16:
895 if (cdb[0] == SYNCHRONIZE_CACHE) {
896 sectors = transport_get_sectors_10(cdb);
897 cmd->t_task_lba = transport_lba_32(cdb);
899 sectors = transport_get_sectors_16(cdb);
900 cmd->t_task_lba = transport_lba_64(cdb);
902 if (ops->execute_sync_cache) {
903 cmd->execute_cmd = ops->execute_sync_cache;
907 cmd->execute_cmd = sbc_emulate_noop;
910 if (!ops->execute_unmap)
911 return TCM_UNSUPPORTED_SCSI_OPCODE;
913 size = get_unaligned_be16(&cdb[7]);
914 cmd->execute_cmd = ops->execute_unmap;
917 sectors = transport_get_sectors_16(cdb);
919 pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not supported\n");
920 return TCM_INVALID_CDB_FIELD;
923 size = sbc_get_size(cmd, 1);
924 cmd->t_task_lba = get_unaligned_be64(&cdb[2]);
926 ret = sbc_setup_write_same(cmd, &cdb[1], ops);
931 sectors = transport_get_sectors_10(cdb);
933 pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not supported\n");
934 return TCM_INVALID_CDB_FIELD;
937 size = sbc_get_size(cmd, 1);
938 cmd->t_task_lba = get_unaligned_be32(&cdb[2]);
941 * Follow sbcr26 with WRITE_SAME (10) and check for the existence
942 * of byte 1 bit 3 UNMAP instead of original reserved field
944 ret = sbc_setup_write_same(cmd, &cdb[1], ops);
951 if (cdb[0] == VERIFY) {
952 sectors = transport_get_sectors_10(cdb);
953 cmd->t_task_lba = transport_lba_32(cdb);
955 sectors = transport_get_sectors_16(cdb);
956 cmd->t_task_lba = transport_lba_64(cdb);
958 cmd->execute_cmd = sbc_emulate_noop;
964 * There are still clients out there which use these old SCSI-2
965 * commands. This mainly happens when running VMs with legacy
966 * guest systems, connected via SCSI command pass-through to
967 * iSCSI targets. Make them happy and return status GOOD.
970 cmd->execute_cmd = sbc_emulate_noop;
973 ret = spc_parse_cdb(cmd, &size);
978 /* reject any command that we don't have a handler for */
979 if (!cmd->execute_cmd)
980 return TCM_UNSUPPORTED_SCSI_OPCODE;
982 if (cmd->se_cmd_flags & SCF_SCSI_DATA_CDB) {
983 unsigned long long end_lba;
985 end_lba = dev->transport->get_blocks(dev) + 1;
986 if (((cmd->t_task_lba + sectors) < cmd->t_task_lba) ||
987 ((cmd->t_task_lba + sectors) > end_lba)) {
988 pr_err("cmd exceeds last lba %llu "
989 "(lba %llu, sectors %u)\n",
990 end_lba, cmd->t_task_lba, sectors);
991 return TCM_ADDRESS_OUT_OF_RANGE;
994 if (!(cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE))
995 size = sbc_get_size(cmd, sectors);
998 return target_cmd_size_check(cmd, size);
1000 EXPORT_SYMBOL(sbc_parse_cdb);
1002 u32 sbc_get_device_type(struct se_device *dev)
1006 EXPORT_SYMBOL(sbc_get_device_type);
1009 sbc_execute_unmap(struct se_cmd *cmd,
1010 sense_reason_t (*do_unmap_fn)(struct se_cmd *, void *,
1011 sector_t, sector_t),
1014 struct se_device *dev = cmd->se_dev;
1015 unsigned char *buf, *ptr = NULL;
1019 sense_reason_t ret = 0;
1022 /* We never set ANC_SUP */
1023 if (cmd->t_task_cdb[1])
1024 return TCM_INVALID_CDB_FIELD;
1026 if (cmd->data_length == 0) {
1027 target_complete_cmd(cmd, SAM_STAT_GOOD);
1031 if (cmd->data_length < 8) {
1032 pr_warn("UNMAP parameter list length %u too small\n",
1034 return TCM_PARAMETER_LIST_LENGTH_ERROR;
1037 buf = transport_kmap_data_sg(cmd);
1039 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
1041 dl = get_unaligned_be16(&buf[0]);
1042 bd_dl = get_unaligned_be16(&buf[2]);
1044 size = cmd->data_length - 8;
1046 pr_warn("UNMAP parameter list length %u too small, ignoring bd_dl %u\n",
1047 cmd->data_length, bd_dl);
1051 if (size / 16 > dev->dev_attrib.max_unmap_block_desc_count) {
1052 ret = TCM_INVALID_PARAMETER_LIST;
1056 /* First UNMAP block descriptor starts at 8 byte offset */
1058 pr_debug("UNMAP: Sub: %s Using dl: %u bd_dl: %u size: %u"
1059 " ptr: %p\n", dev->transport->name, dl, bd_dl, size, ptr);
1061 while (size >= 16) {
1062 lba = get_unaligned_be64(&ptr[0]);
1063 range = get_unaligned_be32(&ptr[8]);
1064 pr_debug("UNMAP: Using lba: %llu and range: %u\n",
1065 (unsigned long long)lba, range);
1067 if (range > dev->dev_attrib.max_unmap_lba_count) {
1068 ret = TCM_INVALID_PARAMETER_LIST;
1072 if (lba + range > dev->transport->get_blocks(dev) + 1) {
1073 ret = TCM_ADDRESS_OUT_OF_RANGE;
1077 ret = do_unmap_fn(cmd, priv, lba, range);
1086 transport_kunmap_data_sg(cmd);
1088 target_complete_cmd(cmd, GOOD);
1091 EXPORT_SYMBOL(sbc_execute_unmap);
1094 sbc_dif_generate(struct se_cmd *cmd)
1096 struct se_device *dev = cmd->se_dev;
1097 struct se_dif_v1_tuple *sdt;
1098 struct scatterlist *dsg, *psg = cmd->t_prot_sg;
1099 sector_t sector = cmd->t_task_lba;
1100 void *daddr, *paddr;
1101 int i, j, offset = 0;
1103 for_each_sg(cmd->t_data_sg, dsg, cmd->t_data_nents, i) {
1104 daddr = kmap_atomic(sg_page(dsg)) + dsg->offset;
1105 paddr = kmap_atomic(sg_page(psg)) + psg->offset;
1107 for (j = 0; j < dsg->length; j += dev->dev_attrib.block_size) {
1109 if (offset >= psg->length) {
1110 kunmap_atomic(paddr);
1112 paddr = kmap_atomic(sg_page(psg)) + psg->offset;
1116 sdt = paddr + offset;
1117 sdt->guard_tag = cpu_to_be16(crc_t10dif(daddr + j,
1118 dev->dev_attrib.block_size));
1119 if (dev->dev_attrib.pi_prot_type == TARGET_DIF_TYPE1_PROT)
1120 sdt->ref_tag = cpu_to_be32(sector & 0xffffffff);
1123 pr_debug("DIF WRITE INSERT sector: %llu guard_tag: 0x%04x"
1124 " app_tag: 0x%04x ref_tag: %u\n",
1125 (unsigned long long)sector, sdt->guard_tag,
1126 sdt->app_tag, be32_to_cpu(sdt->ref_tag));
1129 offset += sizeof(struct se_dif_v1_tuple);
1132 kunmap_atomic(paddr);
1133 kunmap_atomic(daddr);
1137 static sense_reason_t
1138 sbc_dif_v1_verify(struct se_device *dev, struct se_dif_v1_tuple *sdt,
1139 const void *p, sector_t sector, unsigned int ei_lba)
1141 int block_size = dev->dev_attrib.block_size;
1144 csum = cpu_to_be16(crc_t10dif(p, block_size));
1146 if (sdt->guard_tag != csum) {
1147 pr_err("DIFv1 checksum failed on sector %llu guard tag 0x%04x"
1148 " csum 0x%04x\n", (unsigned long long)sector,
1149 be16_to_cpu(sdt->guard_tag), be16_to_cpu(csum));
1150 return TCM_LOGICAL_BLOCK_GUARD_CHECK_FAILED;
1153 if (dev->dev_attrib.pi_prot_type == TARGET_DIF_TYPE1_PROT &&
1154 be32_to_cpu(sdt->ref_tag) != (sector & 0xffffffff)) {
1155 pr_err("DIFv1 Type 1 reference failed on sector: %llu tag: 0x%08x"
1156 " sector MSB: 0x%08x\n", (unsigned long long)sector,
1157 be32_to_cpu(sdt->ref_tag), (u32)(sector & 0xffffffff));
1158 return TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED;
1161 if (dev->dev_attrib.pi_prot_type == TARGET_DIF_TYPE2_PROT &&
1162 be32_to_cpu(sdt->ref_tag) != ei_lba) {
1163 pr_err("DIFv1 Type 2 reference failed on sector: %llu tag: 0x%08x"
1164 " ei_lba: 0x%08x\n", (unsigned long long)sector,
1165 be32_to_cpu(sdt->ref_tag), ei_lba);
1166 return TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED;
1173 sbc_dif_copy_prot(struct se_cmd *cmd, unsigned int sectors, bool read,
1174 struct scatterlist *sg, int sg_off)
1176 struct se_device *dev = cmd->se_dev;
1177 struct scatterlist *psg;
1179 unsigned int i, len, left;
1180 unsigned int offset = sg_off;
1182 left = sectors * dev->prot_length;
1184 for_each_sg(cmd->t_prot_sg, psg, cmd->t_prot_nents, i) {
1185 unsigned int psg_len, copied = 0;
1187 paddr = kmap_atomic(sg_page(psg)) + psg->offset;
1188 psg_len = min(left, psg->length);
1190 len = min(psg_len, sg->length - offset);
1191 addr = kmap_atomic(sg_page(sg)) + sg->offset + offset;
1194 memcpy(paddr + copied, addr, len);
1196 memcpy(addr, paddr + copied, len);
1203 if (offset >= sg->length) {
1207 kunmap_atomic(addr);
1209 kunmap_atomic(paddr);
1214 sbc_dif_verify_write(struct se_cmd *cmd, sector_t start, unsigned int sectors,
1215 unsigned int ei_lba, struct scatterlist *sg, int sg_off)
1217 struct se_device *dev = cmd->se_dev;
1218 struct se_dif_v1_tuple *sdt;
1219 struct scatterlist *dsg, *psg = cmd->t_prot_sg;
1220 sector_t sector = start;
1221 void *daddr, *paddr;
1222 int i, j, offset = 0;
1225 for_each_sg(cmd->t_data_sg, dsg, cmd->t_data_nents, i) {
1226 daddr = kmap_atomic(sg_page(dsg)) + dsg->offset;
1227 paddr = kmap_atomic(sg_page(psg)) + psg->offset;
1229 for (j = 0; j < dsg->length; j += dev->dev_attrib.block_size) {
1231 if (offset >= psg->length) {
1232 kunmap_atomic(paddr);
1234 paddr = kmap_atomic(sg_page(psg)) + psg->offset;
1238 sdt = paddr + offset;
1240 pr_debug("DIF WRITE sector: %llu guard_tag: 0x%04x"
1241 " app_tag: 0x%04x ref_tag: %u\n",
1242 (unsigned long long)sector, sdt->guard_tag,
1243 sdt->app_tag, be32_to_cpu(sdt->ref_tag));
1245 rc = sbc_dif_v1_verify(dev, sdt, daddr + j, sector,
1248 kunmap_atomic(paddr);
1249 kunmap_atomic(daddr);
1250 cmd->bad_sector = sector;
1256 offset += sizeof(struct se_dif_v1_tuple);
1259 kunmap_atomic(paddr);
1260 kunmap_atomic(daddr);
1262 sbc_dif_copy_prot(cmd, sectors, false, sg, sg_off);
1266 EXPORT_SYMBOL(sbc_dif_verify_write);
1268 static sense_reason_t
1269 __sbc_dif_verify_read(struct se_cmd *cmd, sector_t start, unsigned int sectors,
1270 unsigned int ei_lba, struct scatterlist *sg, int sg_off)
1272 struct se_device *dev = cmd->se_dev;
1273 struct se_dif_v1_tuple *sdt;
1274 struct scatterlist *dsg, *psg = sg;
1275 sector_t sector = start;
1276 void *daddr, *paddr;
1277 int i, j, offset = sg_off;
1280 for_each_sg(cmd->t_data_sg, dsg, cmd->t_data_nents, i) {
1281 daddr = kmap_atomic(sg_page(dsg)) + dsg->offset;
1282 paddr = kmap_atomic(sg_page(psg)) + sg->offset;
1284 for (j = 0; j < dsg->length; j += dev->dev_attrib.block_size) {
1286 if (offset >= psg->length) {
1287 kunmap_atomic(paddr);
1289 paddr = kmap_atomic(sg_page(psg)) + psg->offset;
1293 sdt = paddr + offset;
1295 pr_debug("DIF READ sector: %llu guard_tag: 0x%04x"
1296 " app_tag: 0x%04x ref_tag: %u\n",
1297 (unsigned long long)sector, sdt->guard_tag,
1298 sdt->app_tag, be32_to_cpu(sdt->ref_tag));
1300 if (sdt->app_tag == cpu_to_be16(0xffff)) {
1302 offset += sizeof(struct se_dif_v1_tuple);
1306 rc = sbc_dif_v1_verify(dev, sdt, daddr + j, sector,
1309 kunmap_atomic(paddr);
1310 kunmap_atomic(daddr);
1311 cmd->bad_sector = sector;
1317 offset += sizeof(struct se_dif_v1_tuple);
1320 kunmap_atomic(paddr);
1321 kunmap_atomic(daddr);
1328 sbc_dif_read_strip(struct se_cmd *cmd)
1330 struct se_device *dev = cmd->se_dev;
1331 u32 sectors = cmd->prot_length / dev->prot_length;
1333 return __sbc_dif_verify_read(cmd, cmd->t_task_lba, sectors, 0,
1338 sbc_dif_verify_read(struct se_cmd *cmd, sector_t start, unsigned int sectors,
1339 unsigned int ei_lba, struct scatterlist *sg, int sg_off)
1343 rc = __sbc_dif_verify_read(cmd, start, sectors, ei_lba, sg, sg_off);
1347 sbc_dif_copy_prot(cmd, sectors, true, sg, sg_off);
1350 EXPORT_SYMBOL(sbc_dif_verify_read);