Merge patch series "Optimize the hot path in the UFS driver"
Bart Van Assche <bvanassche@acm.org> says: Hi Martin, This patch series optimizes the hot path of the UFS driver by making struct scsi_cmnd and struct ufshcd_lrb adjacent. Making these two data structures adjacent is realized as follows: @@ -9040,6 +9046,7 @@ static const struct scsi_host_template ufshcd_driver_template = { .name = UFSHCD, .proc_name = UFSHCD, .map_queues = ufshcd_map_queues, + .cmd_size = sizeof(struct ufshcd_lrb), .init_cmd_priv = ufshcd_init_cmd_priv, .queuecommand = ufshcd_queuecommand, .mq_poll = ufshcd_poll, The following changes had to be made prior to making these two data structures adjacent: * Add support for driver-internal and reserved commands in the SCSI core. * Instead of making the reserved command slot (hba->reserved_slot) invisible to the SCSI core, let the SCSI core allocate a reserved command. * Remove all UFS data structure members that are no longer needed because struct scsi_cmnd and struct ufshcd_lrb are now adjacent * Call ufshcd_init_lrb() from inside the code for queueing a command instead of calling this function before I/O starts. This is necessary because ufshcd_memory_alloc() allocates fewer instances than the block layer allocates requests. See also the following code in the block layer core: if (blk_mq_init_request(set, hctx->fq->flush_rq, hctx_idx, hctx->numa_node)) Although the UFS driver could be modified such that ufshcd_init_lrb() is called from ufshcd_init_cmd_priv(), realizing this would require moving the memory allocations that happen from inside ufshcd_memory_alloc() into ufshcd_init_cmd_priv(). That would make this patch series even larger. Although ufshcd_init_lrb() is called for each command, the benefits of reduced indirection and better cache efficiency outweigh the small overhead of per-command lrb initialization. * ufshcd_add_scsi_host() happens now before any device management commands are submitted. This change is necessary because this patch makes device management command allocation happen when the SCSI host is allocated. * Allocate as many command slots as the host controller supports. Decrease host->cmds_per_lun if necessary once it is clear whether or not the UFS device supports less command slots than the host controller. Please consider this patch series for the next merge window. Thanks, Bart. Link: https://patch.msgid.link/20251031204029.2883185-1-bvanassche@acm.org Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
This commit is contained in:
@@ -231,6 +231,12 @@ int scsi_add_host_with_dma(struct Scsi_Host *shost, struct device *dev,
|
||||
goto fail;
|
||||
}
|
||||
|
||||
if (shost->nr_reserved_cmds && !sht->queue_reserved_command) {
|
||||
shost_printk(KERN_ERR, shost,
|
||||
"nr_reserved_cmds set but no method to queue\n");
|
||||
goto fail;
|
||||
}
|
||||
|
||||
/* Use min_t(int, ...) in case shost->can_queue exceeds SHRT_MAX */
|
||||
shost->cmd_per_lun = min_t(int, shost->cmd_per_lun,
|
||||
shost->can_queue);
|
||||
@@ -307,6 +313,14 @@ int scsi_add_host_with_dma(struct Scsi_Host *shost, struct device *dev,
|
||||
if (error)
|
||||
goto out_del_dev;
|
||||
|
||||
if (shost->nr_reserved_cmds) {
|
||||
shost->pseudo_sdev = scsi_get_pseudo_sdev(shost);
|
||||
if (!shost->pseudo_sdev) {
|
||||
error = -ENOMEM;
|
||||
goto out_del_dev;
|
||||
}
|
||||
}
|
||||
|
||||
scsi_proc_host_add(shost);
|
||||
scsi_autopm_put_host(shost);
|
||||
return error;
|
||||
@@ -436,6 +450,7 @@ struct Scsi_Host *scsi_host_alloc(const struct scsi_host_template *sht, int priv
|
||||
shost->hostt = sht;
|
||||
shost->this_id = sht->this_id;
|
||||
shost->can_queue = sht->can_queue;
|
||||
shost->nr_reserved_cmds = sht->nr_reserved_cmds;
|
||||
shost->sg_tablesize = sht->sg_tablesize;
|
||||
shost->sg_prot_tablesize = sht->sg_prot_tablesize;
|
||||
shost->cmd_per_lun = sht->cmd_per_lun;
|
||||
|
||||
@@ -216,6 +216,9 @@ int scsi_device_max_queue_depth(struct scsi_device *sdev)
|
||||
*/
|
||||
int scsi_change_queue_depth(struct scsi_device *sdev, int depth)
|
||||
{
|
||||
if (!sdev->budget_map.map)
|
||||
return -EINVAL;
|
||||
|
||||
depth = min_t(int, depth, scsi_device_max_queue_depth(sdev));
|
||||
|
||||
if (depth > 0) {
|
||||
@@ -255,6 +258,8 @@ EXPORT_SYMBOL(scsi_change_queue_depth);
|
||||
*/
|
||||
int scsi_track_queue_full(struct scsi_device *sdev, int depth)
|
||||
{
|
||||
if (!sdev->budget_map.map)
|
||||
return 0;
|
||||
|
||||
/*
|
||||
* Don't let QUEUE_FULLs on the same
|
||||
@@ -826,8 +831,11 @@ struct scsi_device *__scsi_iterate_devices(struct Scsi_Host *shost,
|
||||
spin_lock_irqsave(shost->host_lock, flags);
|
||||
while (list->next != &shost->__devices) {
|
||||
next = list_entry(list->next, struct scsi_device, siblings);
|
||||
/* skip devices that we can't get a reference to */
|
||||
if (!scsi_device_get(next))
|
||||
/*
|
||||
* Skip pseudo devices and also devices we can't get a
|
||||
* reference to.
|
||||
*/
|
||||
if (!scsi_device_is_pseudo_dev(next) && !scsi_device_get(next))
|
||||
break;
|
||||
next = NULL;
|
||||
list = list->next;
|
||||
|
||||
@@ -6752,20 +6752,59 @@ static bool scsi_debug_stop_cmnd(struct scsi_cmnd *cmnd)
|
||||
return false;
|
||||
}
|
||||
|
||||
struct sdebug_abort_cmd {
|
||||
u32 unique_tag;
|
||||
};
|
||||
|
||||
enum sdebug_internal_cmd_type {
|
||||
SCSI_DEBUG_ABORT_CMD,
|
||||
};
|
||||
|
||||
struct sdebug_internal_cmd {
|
||||
enum sdebug_internal_cmd_type type;
|
||||
|
||||
union {
|
||||
struct sdebug_abort_cmd abort_cmd;
|
||||
};
|
||||
};
|
||||
|
||||
union sdebug_priv {
|
||||
struct sdebug_scsi_cmd cmd;
|
||||
struct sdebug_internal_cmd internal_cmd;
|
||||
};
|
||||
|
||||
/*
|
||||
* Called from scsi_debug_abort() only, which is for timed-out cmd.
|
||||
* Abort SCSI command @cmnd. Only called from scsi_debug_abort(). Although
|
||||
* it would be possible to call scsi_debug_stop_cmnd() directly, an internal
|
||||
* command is allocated and submitted to trigger the reserved command
|
||||
* infrastructure.
|
||||
*/
|
||||
static bool scsi_debug_abort_cmnd(struct scsi_cmnd *cmnd)
|
||||
{
|
||||
struct sdebug_scsi_cmd *sdsc = scsi_cmd_priv(cmnd);
|
||||
unsigned long flags;
|
||||
bool res;
|
||||
struct Scsi_Host *shost = cmnd->device->host;
|
||||
struct request *rq = scsi_cmd_to_rq(cmnd);
|
||||
u32 unique_tag = blk_mq_unique_tag(rq);
|
||||
struct sdebug_internal_cmd *internal_cmd;
|
||||
struct scsi_cmnd *abort_cmd;
|
||||
struct request *abort_rq;
|
||||
blk_status_t res;
|
||||
|
||||
spin_lock_irqsave(&sdsc->lock, flags);
|
||||
res = scsi_debug_stop_cmnd(cmnd);
|
||||
spin_unlock_irqrestore(&sdsc->lock, flags);
|
||||
|
||||
return res;
|
||||
abort_cmd = scsi_get_internal_cmd(shost->pseudo_sdev, DMA_NONE,
|
||||
BLK_MQ_REQ_RESERVED);
|
||||
if (!abort_cmd)
|
||||
return false;
|
||||
internal_cmd = scsi_cmd_priv(abort_cmd);
|
||||
*internal_cmd = (struct sdebug_internal_cmd) {
|
||||
.type = SCSI_DEBUG_ABORT_CMD,
|
||||
.abort_cmd = {
|
||||
.unique_tag = unique_tag,
|
||||
},
|
||||
};
|
||||
abort_rq = scsi_cmd_to_rq(abort_cmd);
|
||||
abort_rq->timeout = secs_to_jiffies(3);
|
||||
res = blk_execute_rq(abort_rq, true);
|
||||
scsi_put_internal_cmd(abort_cmd);
|
||||
return res == BLK_STS_OK;
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -9220,6 +9259,56 @@ out_handle:
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* Process @scp, a request to abort a SCSI command by tag. */
|
||||
static void scsi_debug_abort_cmd(struct Scsi_Host *shost, struct scsi_cmnd *scp)
|
||||
{
|
||||
struct sdebug_internal_cmd *internal_cmd = scsi_cmd_priv(scp);
|
||||
struct sdebug_abort_cmd *abort_cmd = &internal_cmd->abort_cmd;
|
||||
const u32 unique_tag = abort_cmd->unique_tag;
|
||||
struct scsi_cmnd *to_be_aborted_scmd =
|
||||
scsi_host_find_tag(shost, unique_tag);
|
||||
struct sdebug_scsi_cmd *to_be_aborted_sdsc =
|
||||
scsi_cmd_priv(to_be_aborted_scmd);
|
||||
bool res = false;
|
||||
|
||||
if (!to_be_aborted_scmd) {
|
||||
pr_err("%s: command with tag %#x not found\n", __func__,
|
||||
unique_tag);
|
||||
return;
|
||||
}
|
||||
|
||||
scoped_guard(spinlock_irqsave, &to_be_aborted_sdsc->lock)
|
||||
res = scsi_debug_stop_cmnd(to_be_aborted_scmd);
|
||||
|
||||
if (res)
|
||||
pr_info("%s: aborted command with tag %#x\n",
|
||||
__func__, unique_tag);
|
||||
else
|
||||
pr_err("%s: failed to abort command with tag %#x\n",
|
||||
__func__, unique_tag);
|
||||
|
||||
set_host_byte(scp, res ? DID_OK : DID_ERROR);
|
||||
}
|
||||
|
||||
static int scsi_debug_process_reserved_command(struct Scsi_Host *shost,
|
||||
struct scsi_cmnd *scp)
|
||||
{
|
||||
struct sdebug_internal_cmd *internal_cmd = scsi_cmd_priv(scp);
|
||||
|
||||
switch (internal_cmd->type) {
|
||||
case SCSI_DEBUG_ABORT_CMD:
|
||||
scsi_debug_abort_cmd(shost, scp);
|
||||
break;
|
||||
default:
|
||||
WARN_ON_ONCE(true);
|
||||
set_host_byte(scp, DID_ERROR);
|
||||
break;
|
||||
}
|
||||
|
||||
scsi_done(scp);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int scsi_debug_queuecommand(struct Scsi_Host *shost,
|
||||
struct scsi_cmnd *scp)
|
||||
{
|
||||
@@ -9420,6 +9509,9 @@ static int sdebug_init_cmd_priv(struct Scsi_Host *shost, struct scsi_cmnd *cmd)
|
||||
struct sdebug_scsi_cmd *sdsc = scsi_cmd_priv(cmd);
|
||||
struct sdebug_defer *sd_dp = &sdsc->sd_dp;
|
||||
|
||||
if (blk_mq_is_reserved_rq(scsi_cmd_to_rq(cmd)))
|
||||
return 0;
|
||||
|
||||
spin_lock_init(&sdsc->lock);
|
||||
hrtimer_setup(&sd_dp->hrt, sdebug_q_cmd_hrt_complete, CLOCK_MONOTONIC,
|
||||
HRTIMER_MODE_REL_PINNED);
|
||||
@@ -9439,6 +9531,7 @@ static const struct scsi_host_template sdebug_driver_template = {
|
||||
.sdev_destroy = scsi_debug_sdev_destroy,
|
||||
.ioctl = scsi_debug_ioctl,
|
||||
.queuecommand = scsi_debug_queuecommand,
|
||||
.queue_reserved_command = scsi_debug_process_reserved_command,
|
||||
.change_queue_depth = sdebug_change_qdepth,
|
||||
.map_queues = sdebug_map_queues,
|
||||
.mq_poll = sdebug_blk_mq_poll,
|
||||
@@ -9448,6 +9541,7 @@ static const struct scsi_host_template sdebug_driver_template = {
|
||||
.eh_bus_reset_handler = scsi_debug_bus_reset,
|
||||
.eh_host_reset_handler = scsi_debug_host_reset,
|
||||
.can_queue = SDEBUG_CANQUEUE,
|
||||
.nr_reserved_cmds = 1,
|
||||
.this_id = 7,
|
||||
.sg_tablesize = SG_MAX_SEGMENTS,
|
||||
.cmd_per_lun = DEF_CMD_PER_LUN,
|
||||
@@ -9456,7 +9550,7 @@ static const struct scsi_host_template sdebug_driver_template = {
|
||||
.module = THIS_MODULE,
|
||||
.skip_settle_delay = 1,
|
||||
.track_queue_depth = 1,
|
||||
.cmd_size = sizeof(struct sdebug_scsi_cmd),
|
||||
.cmd_size = sizeof(union sdebug_priv),
|
||||
.init_cmd_priv = sdebug_init_cmd_priv,
|
||||
.target_alloc = sdebug_target_alloc,
|
||||
.target_destroy = sdebug_target_destroy,
|
||||
|
||||
@@ -749,6 +749,9 @@ static void scsi_handle_queue_ramp_up(struct scsi_device *sdev)
|
||||
const struct scsi_host_template *sht = sdev->host->hostt;
|
||||
struct scsi_device *tmp_sdev;
|
||||
|
||||
if (!sdev->budget_map.map)
|
||||
return;
|
||||
|
||||
if (!sht->track_queue_depth ||
|
||||
sdev->queue_depth >= sdev->max_queue_depth)
|
||||
return;
|
||||
|
||||
@@ -396,7 +396,8 @@ void scsi_device_unbusy(struct scsi_device *sdev, struct scsi_cmnd *cmd)
|
||||
if (starget->can_queue > 0)
|
||||
atomic_dec(&starget->target_busy);
|
||||
|
||||
sbitmap_put(&sdev->budget_map, cmd->budget_token);
|
||||
if (sdev->budget_map.map)
|
||||
sbitmap_put(&sdev->budget_map, cmd->budget_token);
|
||||
cmd->budget_token = -1;
|
||||
}
|
||||
|
||||
@@ -1360,6 +1361,9 @@ static inline int scsi_dev_queue_ready(struct request_queue *q,
|
||||
{
|
||||
int token;
|
||||
|
||||
if (!sdev->budget_map.map)
|
||||
return INT_MAX;
|
||||
|
||||
token = sbitmap_get(&sdev->budget_map);
|
||||
if (token < 0)
|
||||
return -1;
|
||||
@@ -1530,6 +1534,14 @@ static void scsi_complete(struct request *rq)
|
||||
struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq);
|
||||
enum scsi_disposition disposition;
|
||||
|
||||
if (blk_mq_is_reserved_rq(rq)) {
|
||||
/* Only pass-through requests are supported in this code path. */
|
||||
WARN_ON_ONCE(!blk_rq_is_passthrough(scsi_cmd_to_rq(cmd)));
|
||||
scsi_mq_uninit_cmd(cmd);
|
||||
__blk_mq_end_request(rq, scsi_result_to_blk_status(cmd->result));
|
||||
return;
|
||||
}
|
||||
|
||||
INIT_LIST_HEAD(&cmd->eh_entry);
|
||||
|
||||
atomic_inc(&cmd->device->iodone_cnt);
|
||||
@@ -1749,7 +1761,8 @@ static void scsi_mq_put_budget(struct request_queue *q, int budget_token)
|
||||
{
|
||||
struct scsi_device *sdev = q->queuedata;
|
||||
|
||||
sbitmap_put(&sdev->budget_map, budget_token);
|
||||
if (sdev->budget_map.map)
|
||||
sbitmap_put(&sdev->budget_map, budget_token);
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -1818,25 +1831,31 @@ static blk_status_t scsi_queue_rq(struct blk_mq_hw_ctx *hctx,
|
||||
WARN_ON_ONCE(cmd->budget_token < 0);
|
||||
|
||||
/*
|
||||
* If the device is not in running state we will reject some or all
|
||||
* commands.
|
||||
* Bypass the SCSI device, SCSI target and SCSI host checks for
|
||||
* reserved commands.
|
||||
*/
|
||||
if (unlikely(sdev->sdev_state != SDEV_RUNNING)) {
|
||||
ret = scsi_device_state_check(sdev, req);
|
||||
if (ret != BLK_STS_OK)
|
||||
goto out_put_budget;
|
||||
}
|
||||
if (!blk_mq_is_reserved_rq(req)) {
|
||||
/*
|
||||
* If the device is not in running state we will reject some or
|
||||
* all commands.
|
||||
*/
|
||||
if (unlikely(sdev->sdev_state != SDEV_RUNNING)) {
|
||||
ret = scsi_device_state_check(sdev, req);
|
||||
if (ret != BLK_STS_OK)
|
||||
goto out_put_budget;
|
||||
}
|
||||
|
||||
ret = BLK_STS_RESOURCE;
|
||||
if (!scsi_target_queue_ready(shost, sdev))
|
||||
goto out_put_budget;
|
||||
if (unlikely(scsi_host_in_recovery(shost))) {
|
||||
if (cmd->flags & SCMD_FAIL_IF_RECOVERING)
|
||||
ret = BLK_STS_OFFLINE;
|
||||
goto out_dec_target_busy;
|
||||
ret = BLK_STS_RESOURCE;
|
||||
if (!scsi_target_queue_ready(shost, sdev))
|
||||
goto out_put_budget;
|
||||
if (unlikely(scsi_host_in_recovery(shost))) {
|
||||
if (cmd->flags & SCMD_FAIL_IF_RECOVERING)
|
||||
ret = BLK_STS_OFFLINE;
|
||||
goto out_dec_target_busy;
|
||||
}
|
||||
if (!scsi_host_queue_ready(q, shost, sdev, cmd))
|
||||
goto out_dec_target_busy;
|
||||
}
|
||||
if (!scsi_host_queue_ready(q, shost, sdev, cmd))
|
||||
goto out_dec_target_busy;
|
||||
|
||||
/*
|
||||
* Only clear the driver-private command data if the LLD does not supply
|
||||
@@ -1865,6 +1884,14 @@ static blk_status_t scsi_queue_rq(struct blk_mq_hw_ctx *hctx,
|
||||
cmd->submitter = SUBMITTED_BY_BLOCK_LAYER;
|
||||
|
||||
blk_mq_start_request(req);
|
||||
if (blk_mq_is_reserved_rq(req)) {
|
||||
reason = shost->hostt->queue_reserved_command(shost, cmd);
|
||||
if (reason) {
|
||||
ret = BLK_STS_RESOURCE;
|
||||
goto out_put_budget;
|
||||
}
|
||||
return BLK_STS_OK;
|
||||
}
|
||||
reason = scsi_dispatch_cmd(cmd);
|
||||
if (reason) {
|
||||
scsi_set_blocked(cmd, reason);
|
||||
@@ -2083,7 +2110,8 @@ int scsi_mq_setup_tags(struct Scsi_Host *shost)
|
||||
tag_set->ops = &scsi_mq_ops_no_commit;
|
||||
tag_set->nr_hw_queues = shost->nr_hw_queues ? : 1;
|
||||
tag_set->nr_maps = shost->nr_maps ? : 1;
|
||||
tag_set->queue_depth = shost->can_queue;
|
||||
tag_set->queue_depth = shost->can_queue + shost->nr_reserved_cmds;
|
||||
tag_set->reserved_tags = shost->nr_reserved_cmds;
|
||||
tag_set->cmd_size = cmd_size;
|
||||
tag_set->numa_node = dev_to_node(shost->dma_dev);
|
||||
if (shost->hostt->tag_alloc_policy_rr)
|
||||
@@ -2106,6 +2134,44 @@ void scsi_mq_free_tags(struct kref *kref)
|
||||
complete(&shost->tagset_freed);
|
||||
}
|
||||
|
||||
/**
|
||||
* scsi_get_internal_cmd() - Allocate an internal SCSI command.
|
||||
* @sdev: SCSI device from which to allocate the command
|
||||
* @data_direction: Data direction for the allocated command
|
||||
* @flags: request allocation flags, e.g. BLK_MQ_REQ_RESERVED or
|
||||
* BLK_MQ_REQ_NOWAIT.
|
||||
*
|
||||
* Allocates a SCSI command for internal LLDD use.
|
||||
*/
|
||||
struct scsi_cmnd *scsi_get_internal_cmd(struct scsi_device *sdev,
|
||||
enum dma_data_direction data_direction,
|
||||
blk_mq_req_flags_t flags)
|
||||
{
|
||||
enum req_op op = data_direction == DMA_TO_DEVICE ? REQ_OP_DRV_OUT :
|
||||
REQ_OP_DRV_IN;
|
||||
struct scsi_cmnd *scmd;
|
||||
struct request *rq;
|
||||
|
||||
rq = scsi_alloc_request(sdev->request_queue, op, flags);
|
||||
if (IS_ERR(rq))
|
||||
return NULL;
|
||||
scmd = blk_mq_rq_to_pdu(rq);
|
||||
scmd->device = sdev;
|
||||
|
||||
return scmd;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(scsi_get_internal_cmd);
|
||||
|
||||
/**
|
||||
* scsi_put_internal_cmd() - Free an internal SCSI command.
|
||||
* @scmd: SCSI command to be freed
|
||||
*/
|
||||
void scsi_put_internal_cmd(struct scsi_cmnd *scmd)
|
||||
{
|
||||
blk_mq_free_request(blk_mq_rq_from_pdu(scmd));
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(scsi_put_internal_cmd);
|
||||
|
||||
/**
|
||||
* scsi_device_from_queue - return sdev associated with a request_queue
|
||||
* @q: The request queue to return the sdev from
|
||||
|
||||
@@ -135,6 +135,7 @@ extern int scsi_complete_async_scans(void);
|
||||
extern int scsi_scan_host_selected(struct Scsi_Host *, unsigned int,
|
||||
unsigned int, u64, enum scsi_scan_mode);
|
||||
extern void scsi_forget_host(struct Scsi_Host *);
|
||||
struct scsi_device *scsi_get_pseudo_sdev(struct Scsi_Host *);
|
||||
|
||||
/* scsi_sysctl.c */
|
||||
#ifdef CONFIG_SYSCTL
|
||||
|
||||
@@ -347,6 +347,11 @@ static struct scsi_device *scsi_alloc_sdev(struct scsi_target *starget,
|
||||
kref_get(&sdev->host->tagset_refcnt);
|
||||
sdev->request_queue = q;
|
||||
|
||||
scsi_sysfs_device_initialize(sdev);
|
||||
|
||||
if (scsi_device_is_pseudo_dev(sdev))
|
||||
return sdev;
|
||||
|
||||
depth = sdev->host->cmd_per_lun ?: 1;
|
||||
|
||||
/*
|
||||
@@ -363,8 +368,6 @@ static struct scsi_device *scsi_alloc_sdev(struct scsi_target *starget,
|
||||
|
||||
scsi_change_queue_depth(sdev, depth);
|
||||
|
||||
scsi_sysfs_device_initialize(sdev);
|
||||
|
||||
if (shost->hostt->sdev_init) {
|
||||
ret = shost->hostt->sdev_init(sdev);
|
||||
if (ret) {
|
||||
@@ -1068,6 +1071,11 @@ static int scsi_add_lun(struct scsi_device *sdev, unsigned char *inq_result,
|
||||
|
||||
transport_configure_device(&sdev->sdev_gendev);
|
||||
|
||||
sdev->sdev_bflags = *bflags;
|
||||
|
||||
if (scsi_device_is_pseudo_dev(sdev))
|
||||
return SCSI_SCAN_LUN_PRESENT;
|
||||
|
||||
/*
|
||||
* No need to freeze the queue as it isn't reachable to anyone else yet.
|
||||
*/
|
||||
@@ -1113,7 +1121,6 @@ static int scsi_add_lun(struct scsi_device *sdev, unsigned char *inq_result,
|
||||
|
||||
sdev->max_queue_depth = sdev->queue_depth;
|
||||
WARN_ON_ONCE(sdev->max_queue_depth > sdev->budget_map.depth);
|
||||
sdev->sdev_bflags = *bflags;
|
||||
|
||||
/*
|
||||
* Ok, the device is now all set up, we can
|
||||
@@ -1212,6 +1219,12 @@ static int scsi_probe_and_add_lun(struct scsi_target *starget,
|
||||
if (!sdev)
|
||||
goto out;
|
||||
|
||||
if (scsi_device_is_pseudo_dev(sdev)) {
|
||||
if (bflagsp)
|
||||
*bflagsp = BLIST_NOLUN;
|
||||
return SCSI_SCAN_LUN_PRESENT;
|
||||
}
|
||||
|
||||
result = kmalloc(result_len, GFP_KERNEL);
|
||||
if (!result)
|
||||
goto out_free_sdev;
|
||||
@@ -2083,12 +2096,65 @@ void scsi_forget_host(struct Scsi_Host *shost)
|
||||
restart:
|
||||
spin_lock_irqsave(shost->host_lock, flags);
|
||||
list_for_each_entry(sdev, &shost->__devices, siblings) {
|
||||
if (sdev->sdev_state == SDEV_DEL)
|
||||
if (scsi_device_is_pseudo_dev(sdev) ||
|
||||
sdev->sdev_state == SDEV_DEL)
|
||||
continue;
|
||||
spin_unlock_irqrestore(shost->host_lock, flags);
|
||||
__scsi_remove_device(sdev);
|
||||
goto restart;
|
||||
}
|
||||
spin_unlock_irqrestore(shost->host_lock, flags);
|
||||
|
||||
/*
|
||||
* Remove the pseudo device last since it may be needed during removal
|
||||
* of other SCSI devices.
|
||||
*/
|
||||
if (shost->pseudo_sdev)
|
||||
__scsi_remove_device(shost->pseudo_sdev);
|
||||
}
|
||||
|
||||
/**
|
||||
* scsi_get_pseudo_sdev() - Attach a pseudo SCSI device to a SCSI host
|
||||
* @shost: Host that needs a pseudo SCSI device
|
||||
*
|
||||
* Lock status: None assumed.
|
||||
*
|
||||
* Returns: The scsi_device or NULL
|
||||
*
|
||||
* Notes:
|
||||
* Attach a single scsi_device to the Scsi_Host. The primary aim for this
|
||||
* device is to serve as a container from which SCSI commands can be
|
||||
* allocated. Each SCSI command will carry a command tag allocated by the
|
||||
* block layer. These SCSI commands can be used by the LLDD to send
|
||||
* internal or passthrough commands without having to manage tag allocation
|
||||
* inside the LLDD.
|
||||
*/
|
||||
struct scsi_device *scsi_get_pseudo_sdev(struct Scsi_Host *shost)
|
||||
{
|
||||
struct scsi_device *sdev = NULL;
|
||||
struct scsi_target *starget;
|
||||
|
||||
guard(mutex)(&shost->scan_mutex);
|
||||
|
||||
if (!scsi_host_scan_allowed(shost))
|
||||
goto out;
|
||||
|
||||
starget = scsi_alloc_target(&shost->shost_gendev, 0, shost->max_id);
|
||||
if (!starget)
|
||||
goto out;
|
||||
|
||||
sdev = scsi_alloc_sdev(starget, U64_MAX, NULL);
|
||||
if (!sdev) {
|
||||
scsi_target_reap(starget);
|
||||
goto put_target;
|
||||
}
|
||||
|
||||
sdev->borken = 0;
|
||||
|
||||
put_target:
|
||||
/* See also the get_device(dev) call in scsi_alloc_target(). */
|
||||
put_device(&starget->dev);
|
||||
|
||||
out:
|
||||
return sdev;
|
||||
}
|
||||
|
||||
@@ -1348,6 +1348,9 @@ int scsi_sysfs_add_sdev(struct scsi_device *sdev)
|
||||
int error;
|
||||
struct scsi_target *starget = sdev->sdev_target;
|
||||
|
||||
if (WARN_ON_ONCE(scsi_device_is_pseudo_dev(sdev)))
|
||||
return -EINVAL;
|
||||
|
||||
error = scsi_target_add(starget);
|
||||
if (error)
|
||||
return error;
|
||||
@@ -1455,7 +1458,7 @@ void __scsi_remove_device(struct scsi_device *sdev)
|
||||
kref_put(&sdev->host->tagset_refcnt, scsi_mq_free_tags);
|
||||
cancel_work_sync(&sdev->requeue_work);
|
||||
|
||||
if (sdev->host->hostt->sdev_destroy)
|
||||
if (!scsi_device_is_pseudo_dev(sdev) && sdev->host->hostt->sdev_destroy)
|
||||
sdev->host->hostt->sdev_destroy(sdev);
|
||||
transport_destroy_device(dev);
|
||||
|
||||
|
||||
@@ -134,17 +134,15 @@ unsigned int ufshcd_mcq_queue_cfg_addr(struct ufs_hba *hba)
|
||||
EXPORT_SYMBOL_GPL(ufshcd_mcq_queue_cfg_addr);
|
||||
|
||||
/**
|
||||
* ufshcd_mcq_decide_queue_depth - decide the queue depth
|
||||
* ufshcd_get_hba_mac - Maximum number of commands supported by the host
|
||||
* controller.
|
||||
* @hba: per adapter instance
|
||||
*
|
||||
* Return: queue-depth on success, non-zero on error
|
||||
* Return: queue depth on success; negative upon error.
|
||||
*
|
||||
* MAC - Max. Active Command of the Host Controller (HC)
|
||||
* HC wouldn't send more than this commands to the device.
|
||||
* Calculates and adjusts the queue depth based on the depth
|
||||
* supported by the HC and ufs device.
|
||||
* MAC = Maximum number of Active Commands supported by the Host Controller.
|
||||
*/
|
||||
int ufshcd_mcq_decide_queue_depth(struct ufs_hba *hba)
|
||||
int ufshcd_get_hba_mac(struct ufs_hba *hba)
|
||||
{
|
||||
int mac;
|
||||
|
||||
@@ -162,18 +160,7 @@ int ufshcd_mcq_decide_queue_depth(struct ufs_hba *hba)
|
||||
mac = hba->vops->get_hba_mac(hba);
|
||||
}
|
||||
if (mac < 0)
|
||||
goto err;
|
||||
|
||||
WARN_ON_ONCE(!hba->dev_info.bqueuedepth);
|
||||
/*
|
||||
* max. value of bqueuedepth = 256, mac is host dependent.
|
||||
* It is mandatory for UFS device to define bQueueDepth if
|
||||
* shared queuing architecture is enabled.
|
||||
*/
|
||||
return min_t(int, mac, hba->dev_info.bqueuedepth);
|
||||
|
||||
err:
|
||||
dev_err(hba->dev, "Failed to get mac, err=%d\n", mac);
|
||||
dev_err(hba->dev, "Failed to get mac, err=%d\n", mac);
|
||||
return mac;
|
||||
}
|
||||
|
||||
@@ -307,9 +294,10 @@ static void ufshcd_mcq_process_cqe(struct ufs_hba *hba,
|
||||
struct ufs_hw_queue *hwq)
|
||||
{
|
||||
struct cq_entry *cqe = ufshcd_mcq_cur_cqe(hwq);
|
||||
int tag = ufshcd_mcq_get_tag(hba, cqe);
|
||||
|
||||
if (cqe->command_desc_base_addr) {
|
||||
int tag = ufshcd_mcq_get_tag(hba, cqe);
|
||||
|
||||
ufshcd_compl_one_cqe(hba, tag, cqe);
|
||||
/* After processed the cqe, mark it empty (invalid) entry */
|
||||
cqe->command_desc_base_addr = 0;
|
||||
@@ -491,9 +479,6 @@ int ufshcd_mcq_init(struct ufs_hba *hba)
|
||||
mutex_init(&hwq->sq_mutex);
|
||||
}
|
||||
|
||||
/* The very first HW queue serves device commands */
|
||||
hba->dev_cmd_queue = &hba->uhq[0];
|
||||
|
||||
host->host_tagset = 1;
|
||||
return 0;
|
||||
}
|
||||
@@ -546,8 +531,9 @@ static int ufshcd_mcq_sq_start(struct ufs_hba *hba, struct ufs_hw_queue *hwq)
|
||||
*/
|
||||
int ufshcd_mcq_sq_cleanup(struct ufs_hba *hba, int task_tag)
|
||||
{
|
||||
struct ufshcd_lrb *lrbp = &hba->lrb[task_tag];
|
||||
struct scsi_cmnd *cmd = lrbp->cmd;
|
||||
struct scsi_cmnd *cmd = ufshcd_tag_to_cmd(hba, task_tag);
|
||||
struct ufshcd_lrb *lrbp = scsi_cmd_priv(cmd);
|
||||
struct request *rq = scsi_cmd_to_rq(cmd);
|
||||
struct ufs_hw_queue *hwq;
|
||||
void __iomem *reg, *opr_sqd_base;
|
||||
u32 nexus, id, val;
|
||||
@@ -556,15 +542,12 @@ int ufshcd_mcq_sq_cleanup(struct ufs_hba *hba, int task_tag)
|
||||
if (hba->quirks & UFSHCD_QUIRK_MCQ_BROKEN_RTC)
|
||||
return -ETIMEDOUT;
|
||||
|
||||
if (task_tag != hba->nutrs - UFSHCD_NUM_RESERVED) {
|
||||
if (!cmd)
|
||||
return -EINVAL;
|
||||
hwq = ufshcd_mcq_req_to_hwq(hba, scsi_cmd_to_rq(cmd));
|
||||
if (!hwq)
|
||||
return 0;
|
||||
} else {
|
||||
hwq = hba->dev_cmd_queue;
|
||||
}
|
||||
if (!cmd)
|
||||
return -EINVAL;
|
||||
|
||||
hwq = ufshcd_mcq_req_to_hwq(hba, rq);
|
||||
if (!hwq)
|
||||
return 0;
|
||||
|
||||
id = hwq->id;
|
||||
|
||||
@@ -630,7 +613,8 @@ static void ufshcd_mcq_nullify_sqe(struct utp_transfer_req_desc *utrd)
|
||||
static bool ufshcd_mcq_sqe_search(struct ufs_hba *hba,
|
||||
struct ufs_hw_queue *hwq, int task_tag)
|
||||
{
|
||||
struct ufshcd_lrb *lrbp = &hba->lrb[task_tag];
|
||||
struct scsi_cmnd *cmd = ufshcd_tag_to_cmd(hba, task_tag);
|
||||
struct ufshcd_lrb *lrbp = scsi_cmd_priv(cmd);
|
||||
struct utp_transfer_req_desc *utrd;
|
||||
__le64 cmd_desc_base_addr;
|
||||
bool ret = false;
|
||||
@@ -681,7 +665,7 @@ int ufshcd_mcq_abort(struct scsi_cmnd *cmd)
|
||||
struct Scsi_Host *host = cmd->device->host;
|
||||
struct ufs_hba *hba = shost_priv(host);
|
||||
int tag = scsi_cmd_to_rq(cmd)->tag;
|
||||
struct ufshcd_lrb *lrbp = &hba->lrb[tag];
|
||||
struct ufshcd_lrb *lrbp = scsi_cmd_priv(cmd);
|
||||
struct ufs_hw_queue *hwq;
|
||||
int err;
|
||||
|
||||
|
||||
@@ -38,10 +38,10 @@ ufshcd_prepare_req_desc_hdr_crypto(struct ufshcd_lrb *lrbp,
|
||||
}
|
||||
|
||||
static inline int ufshcd_crypto_fill_prdt(struct ufs_hba *hba,
|
||||
struct ufshcd_lrb *lrbp)
|
||||
struct scsi_cmnd *cmd)
|
||||
{
|
||||
struct scsi_cmnd *cmd = lrbp->cmd;
|
||||
const struct bio_crypt_ctx *crypt_ctx = scsi_cmd_to_rq(cmd)->crypt_ctx;
|
||||
struct ufshcd_lrb *lrbp = scsi_cmd_priv(cmd);
|
||||
|
||||
if (crypt_ctx && hba->vops && hba->vops->fill_crypto_prdt)
|
||||
return hba->vops->fill_crypto_prdt(hba, crypt_ctx,
|
||||
@@ -51,17 +51,19 @@ static inline int ufshcd_crypto_fill_prdt(struct ufs_hba *hba,
|
||||
}
|
||||
|
||||
static inline void ufshcd_crypto_clear_prdt(struct ufs_hba *hba,
|
||||
struct ufshcd_lrb *lrbp)
|
||||
struct scsi_cmnd *cmd)
|
||||
{
|
||||
struct ufshcd_lrb *lrbp = scsi_cmd_priv(cmd);
|
||||
|
||||
if (!(hba->quirks & UFSHCD_QUIRK_KEYS_IN_PRDT))
|
||||
return;
|
||||
|
||||
if (!(scsi_cmd_to_rq(lrbp->cmd)->crypt_ctx))
|
||||
if (!(scsi_cmd_to_rq(cmd)->crypt_ctx))
|
||||
return;
|
||||
|
||||
/* Zeroize the PRDT because it can contain cryptographic keys. */
|
||||
memzero_explicit(lrbp->ucd_prdt_ptr,
|
||||
ufshcd_sg_entry_size(hba) * scsi_sg_count(lrbp->cmd));
|
||||
ufshcd_sg_entry_size(hba) * scsi_sg_count(cmd));
|
||||
}
|
||||
|
||||
bool ufshcd_crypto_enable(struct ufs_hba *hba);
|
||||
@@ -82,13 +84,15 @@ ufshcd_prepare_req_desc_hdr_crypto(struct ufshcd_lrb *lrbp,
|
||||
struct request_desc_header *h) { }
|
||||
|
||||
static inline int ufshcd_crypto_fill_prdt(struct ufs_hba *hba,
|
||||
struct ufshcd_lrb *lrbp)
|
||||
struct scsi_cmnd *cmd)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline void ufshcd_crypto_clear_prdt(struct ufs_hba *hba,
|
||||
struct ufshcd_lrb *lrbp) { }
|
||||
struct scsi_cmnd *cmd)
|
||||
{
|
||||
}
|
||||
|
||||
static inline bool ufshcd_crypto_enable(struct ufs_hba *hba)
|
||||
{
|
||||
|
||||
@@ -67,7 +67,7 @@ void ufshcd_compl_one_cqe(struct ufs_hba *hba, int task_tag,
|
||||
struct cq_entry *cqe);
|
||||
int ufshcd_mcq_init(struct ufs_hba *hba);
|
||||
void ufshcd_mcq_disable(struct ufs_hba *hba);
|
||||
int ufshcd_mcq_decide_queue_depth(struct ufs_hba *hba);
|
||||
int ufshcd_get_hba_mac(struct ufs_hba *hba);
|
||||
int ufshcd_mcq_memory_alloc(struct ufs_hba *hba);
|
||||
struct ufs_hw_queue *ufshcd_mcq_req_to_hwq(struct ufs_hba *hba,
|
||||
struct request *req);
|
||||
@@ -77,8 +77,7 @@ bool ufshcd_cmd_inflight(struct scsi_cmnd *cmd);
|
||||
int ufshcd_mcq_sq_cleanup(struct ufs_hba *hba, int task_tag);
|
||||
int ufshcd_mcq_abort(struct scsi_cmnd *cmd);
|
||||
int ufshcd_try_to_abort_task(struct ufs_hba *hba, int tag);
|
||||
void ufshcd_release_scsi_cmd(struct ufs_hba *hba,
|
||||
struct ufshcd_lrb *lrbp);
|
||||
void ufshcd_release_scsi_cmd(struct ufs_hba *hba, struct scsi_cmnd *cmd);
|
||||
|
||||
#define SD_ASCII_STD true
|
||||
#define SD_RAW false
|
||||
@@ -363,6 +362,21 @@ static inline bool ufs_is_valid_unit_desc_lun(struct ufs_dev_info *dev_info, u8
|
||||
return lun == UFS_UPIU_RPMB_WLUN || (lun < dev_info->max_lu_supported);
|
||||
}
|
||||
|
||||
/*
|
||||
* Convert a block layer tag into a SCSI command pointer. This function is
|
||||
* called once per I/O completion path and is also called from error paths.
|
||||
*/
|
||||
static inline struct scsi_cmnd *ufshcd_tag_to_cmd(struct ufs_hba *hba, u32 tag)
|
||||
{
|
||||
struct blk_mq_tags *tags = hba->host->tag_set.shared_tags;
|
||||
struct request *rq = blk_mq_tag_to_rq(tags, tag);
|
||||
|
||||
if (WARN_ON_ONCE(!rq))
|
||||
return NULL;
|
||||
|
||||
return blk_mq_rq_to_pdu(rq);
|
||||
}
|
||||
|
||||
static inline void ufshcd_inc_sq_tail(struct ufs_hw_queue *q)
|
||||
__must_hold(&q->sq_lock)
|
||||
{
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -564,6 +564,10 @@ int scsi_execute_cmd(struct scsi_device *sdev, const unsigned char *cmd,
|
||||
const struct scsi_exec_args *args);
|
||||
void scsi_failures_reset_retries(struct scsi_failures *failures);
|
||||
|
||||
struct scsi_cmnd *scsi_get_internal_cmd(struct scsi_device *sdev,
|
||||
enum dma_data_direction data_direction,
|
||||
blk_mq_req_flags_t flags);
|
||||
void scsi_put_internal_cmd(struct scsi_cmnd *scmd);
|
||||
extern void sdev_disable_disk_events(struct scsi_device *sdev);
|
||||
extern void sdev_enable_disk_events(struct scsi_device *sdev);
|
||||
extern int scsi_vpd_lun_id(struct scsi_device *, char *, size_t);
|
||||
@@ -595,6 +599,22 @@ static inline unsigned int sdev_id(struct scsi_device *sdev)
|
||||
#define scmd_id(scmd) sdev_id((scmd)->device)
|
||||
#define scmd_channel(scmd) sdev_channel((scmd)->device)
|
||||
|
||||
/**
|
||||
* scsi_device_is_pseudo_dev() - Whether a device is a pseudo SCSI device.
|
||||
* @sdev: SCSI device to examine
|
||||
*
|
||||
* A pseudo SCSI device can be used to allocate SCSI commands but does not show
|
||||
* up in sysfs. Additionally, the logical unit information in *@sdev is made up.
|
||||
*
|
||||
* This function tests the LUN number instead of comparing @sdev with
|
||||
* @sdev->host->pseudo_sdev because this function may be called before
|
||||
* @sdev->host->pseudo_sdev has been initialized.
|
||||
*/
|
||||
static inline bool scsi_device_is_pseudo_dev(struct scsi_device *sdev)
|
||||
{
|
||||
return sdev->lun == U64_MAX;
|
||||
}
|
||||
|
||||
/*
|
||||
* checks for positions of the SCSI state machine
|
||||
*/
|
||||
|
||||
@@ -86,6 +86,12 @@ struct scsi_host_template {
|
||||
*/
|
||||
int (* queuecommand)(struct Scsi_Host *, struct scsi_cmnd *);
|
||||
|
||||
/*
|
||||
* Queue a reserved command (BLK_MQ_REQ_RESERVED). The .queuecommand()
|
||||
* documentation also applies to the .queue_reserved_command() callback.
|
||||
*/
|
||||
int (*queue_reserved_command)(struct Scsi_Host *, struct scsi_cmnd *);
|
||||
|
||||
/*
|
||||
* The commit_rqs function is used to trigger a hardware
|
||||
* doorbell after some requests have been queued with
|
||||
@@ -375,10 +381,19 @@ struct scsi_host_template {
|
||||
/*
|
||||
* This determines if we will use a non-interrupt driven
|
||||
* or an interrupt driven scheme. It is set to the maximum number
|
||||
* of simultaneous commands a single hw queue in HBA will accept.
|
||||
* of simultaneous commands a single hw queue in HBA will accept
|
||||
* excluding internal commands.
|
||||
*/
|
||||
int can_queue;
|
||||
|
||||
/*
|
||||
* This determines how many commands the HBA will set aside
|
||||
* for internal commands. This number will be added to
|
||||
* @can_queue to calculate the maximum number of simultaneous
|
||||
* commands sent to the host.
|
||||
*/
|
||||
int nr_reserved_cmds;
|
||||
|
||||
/*
|
||||
* In many instances, especially where disconnect / reconnect are
|
||||
* supported, our host also has an ID on the SCSI bus. If this is
|
||||
@@ -611,7 +626,17 @@ struct Scsi_Host {
|
||||
unsigned short max_cmd_len;
|
||||
|
||||
int this_id;
|
||||
|
||||
/*
|
||||
* Number of commands this host can handle at the same time.
|
||||
* This excludes reserved commands as specified by nr_reserved_cmds.
|
||||
*/
|
||||
int can_queue;
|
||||
/*
|
||||
* Number of reserved commands to allocate, if any.
|
||||
*/
|
||||
unsigned int nr_reserved_cmds;
|
||||
|
||||
short cmd_per_lun;
|
||||
short unsigned int sg_tablesize;
|
||||
short unsigned int sg_prot_tablesize;
|
||||
@@ -702,6 +727,12 @@ struct Scsi_Host {
|
||||
/* ldm bits */
|
||||
struct device shost_gendev, shost_dev;
|
||||
|
||||
/*
|
||||
* A SCSI device structure used for sending internal commands to the
|
||||
* HBA. There is no corresponding logical unit inside the SCSI device.
|
||||
*/
|
||||
struct scsi_device *pseudo_sdev;
|
||||
|
||||
/*
|
||||
* Points to the transport data (if any) which is allocated
|
||||
* separately
|
||||
|
||||
@@ -161,7 +161,6 @@ struct ufs_pm_lvl_states {
|
||||
* @ucd_prdt_dma_addr: PRDT dma address for debug
|
||||
* @ucd_rsp_dma_addr: UPIU response dma address for debug
|
||||
* @ucd_req_dma_addr: UPIU request dma address for debug
|
||||
* @cmd: pointer to SCSI command
|
||||
* @scsi_status: SCSI status of the command
|
||||
* @command_type: SCSI, UFS, Query.
|
||||
* @task_tag: Task tag of the command
|
||||
@@ -186,11 +185,9 @@ struct ufshcd_lrb {
|
||||
dma_addr_t ucd_rsp_dma_addr;
|
||||
dma_addr_t ucd_prdt_dma_addr;
|
||||
|
||||
struct scsi_cmnd *cmd;
|
||||
int scsi_status;
|
||||
|
||||
int command_type;
|
||||
int task_tag;
|
||||
u8 lun; /* UPIU LUN id field is only 8-bit wide */
|
||||
bool intr_cmd;
|
||||
bool req_abort_skip;
|
||||
@@ -239,13 +236,11 @@ struct ufs_query {
|
||||
* struct ufs_dev_cmd - all assosiated fields with device management commands
|
||||
* @type: device management command type - Query, NOP OUT
|
||||
* @lock: lock to allow one command at a time
|
||||
* @complete: internal commands completion
|
||||
* @query: Device management query information
|
||||
*/
|
||||
struct ufs_dev_cmd {
|
||||
enum dev_cmd_type type;
|
||||
struct mutex lock;
|
||||
struct completion complete;
|
||||
struct ufs_query query;
|
||||
};
|
||||
|
||||
@@ -833,7 +828,6 @@ enum ufshcd_mcq_opr {
|
||||
* @spm_lvl: desired UFS power management level during system PM.
|
||||
* @pm_op_in_progress: whether or not a PM operation is in progress.
|
||||
* @ahit: value of Auto-Hibernate Idle Timer register.
|
||||
* @lrb: local reference block
|
||||
* @outstanding_tasks: Bits representing outstanding task requests
|
||||
* @outstanding_lock: Protects @outstanding_reqs.
|
||||
* @outstanding_reqs: Bits representing outstanding transfer requests
|
||||
@@ -842,7 +836,6 @@ enum ufshcd_mcq_opr {
|
||||
* @nutrs: Transfer Request Queue depth supported by controller
|
||||
* @nortt - Max outstanding RTTs supported by controller
|
||||
* @nutmrs: Task Management Queue depth supported by controller
|
||||
* @reserved_slot: Used to submit device commands. Protected by @dev_cmd.lock.
|
||||
* @ufs_version: UFS Version to which controller complies
|
||||
* @vops: pointer to variant specific operations
|
||||
* @vps: pointer to variant specific parameters
|
||||
@@ -933,7 +926,6 @@ enum ufshcd_mcq_opr {
|
||||
* @res: array of resource info of MCQ registers
|
||||
* @mcq_base: Multi circular queue registers base address
|
||||
* @uhq: array of supported hardware queues
|
||||
* @dev_cmd_queue: Queue for issuing device management commands
|
||||
* @mcq_opr: MCQ operation and runtime registers
|
||||
* @ufs_rtc_update_work: A work for UFS RTC periodic update
|
||||
* @pm_qos_req: PM QoS request handle
|
||||
@@ -976,8 +968,6 @@ struct ufs_hba {
|
||||
/* Auto-Hibernate Idle Timer register value */
|
||||
u32 ahit;
|
||||
|
||||
struct ufshcd_lrb *lrb;
|
||||
|
||||
unsigned long outstanding_tasks;
|
||||
spinlock_t outstanding_lock;
|
||||
unsigned long outstanding_reqs;
|
||||
@@ -987,7 +977,6 @@ struct ufs_hba {
|
||||
int nortt;
|
||||
u32 mcq_capabilities;
|
||||
int nutmrs;
|
||||
u32 reserved_slot;
|
||||
u32 ufs_version;
|
||||
const struct ufs_hba_variant_ops *vops;
|
||||
struct ufs_hba_variant_params *vps;
|
||||
@@ -1105,7 +1094,6 @@ struct ufs_hba {
|
||||
bool mcq_esi_enabled;
|
||||
void __iomem *mcq_base;
|
||||
struct ufs_hw_queue *uhq;
|
||||
struct ufs_hw_queue *dev_cmd_queue;
|
||||
struct ufshcd_mcq_opr_info_t mcq_opr[OPR_MAX];
|
||||
|
||||
struct delayed_work ufs_rtc_update_work;
|
||||
|
||||
Reference in New Issue
Block a user