1
0

Merge patch series "scsi: target: Add WRITE_ATOMIC_16 support"

John Garry <john.g.garry@oracle.com> says:

This is a reposting of Mike's atomic writes support for the SCSI
target.

Again, we are now only supporting target_core_iblock. It's implemented
similar to UNMAP where we do not do any emulation and instead pass the
operation to the block layer.

Link: https://patch.msgid.link/20251020103820.2917593-1-john.g.garry@oracle.com
Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
This commit is contained in:
Martin K. Petersen
2025-11-02 21:41:45 -05:00
8 changed files with 148 additions and 15 deletions

View File

@@ -578,6 +578,11 @@ DEF_CONFIGFS_ATTRIB_SHOW(unmap_zeroes_data);
DEF_CONFIGFS_ATTRIB_SHOW(max_write_same_len);
DEF_CONFIGFS_ATTRIB_SHOW(emulate_rsoc);
DEF_CONFIGFS_ATTRIB_SHOW(submit_type);
DEF_CONFIGFS_ATTRIB_SHOW(atomic_max_len);
DEF_CONFIGFS_ATTRIB_SHOW(atomic_alignment);
DEF_CONFIGFS_ATTRIB_SHOW(atomic_granularity);
DEF_CONFIGFS_ATTRIB_SHOW(atomic_max_with_boundary);
DEF_CONFIGFS_ATTRIB_SHOW(atomic_max_boundary);
#define DEF_CONFIGFS_ATTRIB_STORE_U32(_name) \
static ssize_t _name##_store(struct config_item *item, const char *page,\
@@ -1300,6 +1305,11 @@ CONFIGFS_ATTR(, max_write_same_len);
CONFIGFS_ATTR(, alua_support);
CONFIGFS_ATTR(, pgr_support);
CONFIGFS_ATTR(, submit_type);
CONFIGFS_ATTR_RO(, atomic_max_len);
CONFIGFS_ATTR_RO(, atomic_alignment);
CONFIGFS_ATTR_RO(, atomic_granularity);
CONFIGFS_ATTR_RO(, atomic_max_with_boundary);
CONFIGFS_ATTR_RO(, atomic_max_boundary);
/*
* dev_attrib attributes for devices using the target core SBC/SPC
@@ -1343,6 +1353,11 @@ struct configfs_attribute *sbc_attrib_attrs[] = {
&attr_pgr_support,
&attr_emulate_rsoc,
&attr_submit_type,
&attr_atomic_alignment,
&attr_atomic_max_len,
&attr_atomic_granularity,
&attr_atomic_max_with_boundary,
&attr_atomic_max_boundary,
NULL,
};
EXPORT_SYMBOL(sbc_attrib_attrs);

View File

@@ -840,12 +840,29 @@ free_device:
return NULL;
}
void target_configure_write_atomic_from_bdev(struct se_dev_attrib *attrib,
struct block_device *bdev)
{
struct request_queue *q = bdev_get_queue(bdev);
int block_size = bdev_logical_block_size(bdev);
if (!bdev_can_atomic_write(bdev))
return;
attrib->atomic_max_len = queue_atomic_write_max_bytes(q) / block_size;
attrib->atomic_granularity = attrib->atomic_alignment =
queue_atomic_write_unit_min_bytes(q) / block_size;
attrib->atomic_max_with_boundary = 0;
attrib->atomic_max_boundary = 0;
}
EXPORT_SYMBOL_GPL(target_configure_write_atomic_from_bdev);
/*
* Check if the underlying struct block_device supports discard and if yes
* configure the UNMAP parameters.
*/
bool target_configure_unmap_from_queue(struct se_dev_attrib *attrib,
struct block_device *bdev)
bool target_configure_unmap_from_bdev(struct se_dev_attrib *attrib,
struct block_device *bdev)
{
int block_size = bdev_logical_block_size(bdev);
@@ -863,7 +880,7 @@ bool target_configure_unmap_from_queue(struct se_dev_attrib *attrib,
bdev_discard_alignment(bdev) / block_size;
return true;
}
EXPORT_SYMBOL(target_configure_unmap_from_queue);
EXPORT_SYMBOL(target_configure_unmap_from_bdev);
/*
* Convert from blocksize advertised to the initiator to the 512 byte

View File

@@ -92,8 +92,8 @@ static bool fd_configure_unmap(struct se_device *dev)
struct inode *inode = file->f_mapping->host;
if (S_ISBLK(inode->i_mode))
return target_configure_unmap_from_queue(&dev->dev_attrib,
I_BDEV(inode));
return target_configure_unmap_from_bdev(&dev->dev_attrib,
I_BDEV(inode));
/* Limit UNMAP emulation to 8k Number of LBAs (NoLB) */
dev->dev_attrib.max_unmap_lba_count = 0x2000;

View File

@@ -84,8 +84,8 @@ static bool iblock_configure_unmap(struct se_device *dev)
{
struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
return target_configure_unmap_from_queue(&dev->dev_attrib,
ib_dev->ibd_bd);
return target_configure_unmap_from_bdev(&dev->dev_attrib,
ib_dev->ibd_bd);
}
static int iblock_configure_device(struct se_device *dev)
@@ -152,6 +152,8 @@ static int iblock_configure_device(struct se_device *dev)
if (bdev_nonrot(bd))
dev->dev_attrib.is_nonrot = 1;
target_configure_write_atomic_from_bdev(&dev->dev_attrib, bd);
bi = bdev_get_integrity(bd);
if (!bi)
return 0;
@@ -773,6 +775,9 @@ iblock_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
else if (!bdev_write_cache(ib_dev->ibd_bd))
opf |= REQ_FUA;
}
if (cmd->se_cmd_flags & SCF_ATOMIC)
opf |= REQ_ATOMIC;
} else {
opf = REQ_OP_READ;
miter_dir = SG_MITER_FROM_SG;

View File

@@ -764,6 +764,49 @@ sbc_check_dpofua(struct se_device *dev, struct se_cmd *cmd, unsigned char *cdb)
return 0;
}
static sense_reason_t
sbc_check_atomic(struct se_device *dev, struct se_cmd *cmd, unsigned char *cdb)
{
struct se_dev_attrib *attrib = &dev->dev_attrib;
u16 boundary, transfer_len;
u64 lba;
lba = transport_lba_64(cdb);
boundary = get_unaligned_be16(&cdb[10]);
transfer_len = get_unaligned_be16(&cdb[12]);
if (!attrib->atomic_max_len)
return TCM_UNSUPPORTED_SCSI_OPCODE;
if (boundary) {
if (transfer_len > attrib->atomic_max_with_boundary)
return TCM_INVALID_CDB_FIELD;
if (boundary > attrib->atomic_max_boundary)
return TCM_INVALID_CDB_FIELD;
} else {
if (transfer_len > attrib->atomic_max_len)
return TCM_INVALID_CDB_FIELD;
}
if (attrib->atomic_granularity) {
if (transfer_len % attrib->atomic_granularity)
return TCM_INVALID_CDB_FIELD;
if (boundary && boundary % attrib->atomic_granularity)
return TCM_INVALID_CDB_FIELD;
}
if (dev->dev_attrib.atomic_alignment) {
u64 _lba = lba;
if (do_div(_lba, dev->dev_attrib.atomic_alignment))
return TCM_INVALID_CDB_FIELD;
}
return 0;
}
sense_reason_t
sbc_parse_cdb(struct se_cmd *cmd, struct exec_cmd_ops *ops)
{
@@ -861,6 +904,7 @@ sbc_parse_cdb(struct se_cmd *cmd, struct exec_cmd_ops *ops)
break;
case WRITE_16:
case WRITE_VERIFY_16:
case WRITE_ATOMIC_16:
sectors = transport_get_sectors_16(cdb);
cmd->t_task_lba = transport_lba_64(cdb);
@@ -872,6 +916,13 @@ sbc_parse_cdb(struct se_cmd *cmd, struct exec_cmd_ops *ops)
return ret;
cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
if (cdb[0] == WRITE_ATOMIC_16) {
cmd->se_cmd_flags |= SCF_ATOMIC;
ret = sbc_check_atomic(dev, cmd, cdb);
if (ret)
return ret;
}
cmd->execute_cmd = sbc_execute_rw;
break;
case VARIABLE_LENGTH_CMD:

View File

@@ -521,7 +521,6 @@ spc_emulate_evpd_b0(struct se_cmd *cmd, unsigned char *buf)
have_tp = 1;
buf[0] = dev->transport->get_device_type(dev);
buf[3] = have_tp ? 0x3c : 0x10;
/* Set WSNZ to 1 */
buf[4] = 0x01;
@@ -562,11 +561,10 @@ spc_emulate_evpd_b0(struct se_cmd *cmd, unsigned char *buf)
else
put_unaligned_be32(dev->dev_attrib.optimal_sectors, &buf[12]);
/*
* Exit now if we don't support TP.
*/
put_unaligned_be16(12, &buf[2]);
if (!have_tp)
goto max_write_same;
goto try_atomic;
/*
* Set MAXIMUM UNMAP LBA COUNT
@@ -595,9 +593,29 @@ spc_emulate_evpd_b0(struct se_cmd *cmd, unsigned char *buf)
/*
* MAXIMUM WRITE SAME LENGTH
*/
max_write_same:
put_unaligned_be64(dev->dev_attrib.max_write_same_len, &buf[36]);
put_unaligned_be16(40, &buf[2]);
try_atomic:
/*
* ATOMIC
*/
if (!dev->dev_attrib.atomic_max_len)
goto done;
if (dev->dev_attrib.atomic_max_len < io_max_blocks)
put_unaligned_be32(dev->dev_attrib.atomic_max_len, &buf[44]);
else
put_unaligned_be32(io_max_blocks, &buf[44]);
put_unaligned_be32(dev->dev_attrib.atomic_alignment, &buf[48]);
put_unaligned_be32(dev->dev_attrib.atomic_granularity, &buf[52]);
put_unaligned_be32(dev->dev_attrib.atomic_max_with_boundary, &buf[56]);
put_unaligned_be32(dev->dev_attrib.atomic_max_boundary, &buf[60]);
put_unaligned_be16(60, &buf[2]);
done:
return 0;
}
@@ -1452,6 +1470,24 @@ static const struct target_opcode_descriptor tcm_opcode_write_same32 = {
.update_usage_bits = set_dpofua_usage_bits32,
};
static bool tcm_is_atomic_enabled(const struct target_opcode_descriptor *descr,
struct se_cmd *cmd)
{
return cmd->se_dev->dev_attrib.atomic_max_len;
}
static struct target_opcode_descriptor tcm_opcode_write_atomic16 = {
.support = SCSI_SUPPORT_FULL,
.opcode = WRITE_ATOMIC_16,
.cdb_size = 16,
.usage_bits = {WRITE_ATOMIC_16, 0xf8, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff,
0xff, 0xff, SCSI_GROUP_NUMBER_MASK, SCSI_CONTROL_MASK},
.enabled = tcm_is_atomic_enabled,
.update_usage_bits = set_dpofua_usage_bits,
};
static bool tcm_is_caw_enabled(const struct target_opcode_descriptor *descr,
struct se_cmd *cmd)
{
@@ -2008,6 +2044,7 @@ static const struct target_opcode_descriptor *tcm_supported_opcodes[] = {
&tcm_opcode_write16,
&tcm_opcode_write_verify16,
&tcm_opcode_write_same32,
&tcm_opcode_write_atomic16,
&tcm_opcode_compare_write,
&tcm_opcode_read_capacity,
&tcm_opcode_read_capacity16,

View File

@@ -121,8 +121,10 @@ sense_reason_t passthrough_parse_cdb(struct se_cmd *cmd,
bool target_sense_desc_format(struct se_device *dev);
sector_t target_to_linux_sector(struct se_device *dev, sector_t lb);
bool target_configure_unmap_from_queue(struct se_dev_attrib *attrib,
struct block_device *bdev);
bool target_configure_unmap_from_bdev(struct se_dev_attrib *attrib,
struct block_device *bdev);
void target_configure_write_atomic_from_bdev(struct se_dev_attrib *attrib,
struct block_device *bdev);
static inline bool target_dev_configured(struct se_device *se_dev)
{

View File

@@ -158,6 +158,7 @@ enum se_cmd_flags_table {
SCF_TASK_ATTR_SET = (1 << 17),
SCF_TREAT_READ_AS_NORMAL = (1 << 18),
SCF_TASK_ORDERED_SYNC = (1 << 19),
SCF_ATOMIC = (1 << 20),
};
/*
@@ -731,6 +732,11 @@ struct se_dev_attrib {
u32 unmap_granularity;
u32 unmap_granularity_alignment;
u32 max_write_same_len;
u32 atomic_max_len;
u32 atomic_alignment;
u32 atomic_granularity;
u32 atomic_max_with_boundary;
u32 atomic_max_boundary;
u8 submit_type;
struct se_device *da_dev;
struct config_group da_group;