Merge patch series "Eight small UFS patches"
Bart Van Assche <bvanassche@acm.org> says: Hi Martin, This patch series includes two bug fixes for this development cycle and six small patches that are intended for the next merge window. If applying the first two patches only during the current development cycle would be inconvenient, postponing all patches until the next merge window is fine with me. Please consider including these patches in the upstream kernel. Thanks, Bart. [mkp: Applied patches #1 and #2 to 6.18/scsi-fixes] Link: https://patch.msgid.link/20251014200118.3390839-1-bvanassche@acm.org Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
This commit is contained in:
@@ -568,12 +568,12 @@ int ufshcd_mcq_sq_cleanup(struct ufs_hba *hba, int task_tag)
|
||||
|
||||
id = hwq->id;
|
||||
|
||||
mutex_lock(&hwq->sq_mutex);
|
||||
guard(mutex)(&hwq->sq_mutex);
|
||||
|
||||
/* stop the SQ fetching before working on it */
|
||||
err = ufshcd_mcq_sq_stop(hba, hwq);
|
||||
if (err)
|
||||
goto unlock;
|
||||
return err;
|
||||
|
||||
/* SQCTI = EXT_IID, IID, LUN, Task Tag */
|
||||
nexus = lrbp->lun << 8 | task_tag;
|
||||
@@ -600,8 +600,6 @@ int ufshcd_mcq_sq_cleanup(struct ufs_hba *hba, int task_tag)
|
||||
if (ufshcd_mcq_sq_start(hba, hwq))
|
||||
err = -ETIMEDOUT;
|
||||
|
||||
unlock:
|
||||
mutex_unlock(&hwq->sq_mutex);
|
||||
return err;
|
||||
}
|
||||
|
||||
|
||||
@@ -42,7 +42,6 @@
|
||||
#define UFS_CMD_TRACE_STRINGS \
|
||||
EM(UFS_CMD_SEND, "send_req") \
|
||||
EM(UFS_CMD_COMP, "complete_rsp") \
|
||||
EM(UFS_DEV_COMP, "dev_complete") \
|
||||
EM(UFS_QUERY_SEND, "query_send") \
|
||||
EM(UFS_QUERY_COMP, "query_complete") \
|
||||
EM(UFS_QUERY_ERR, "query_complete_err") \
|
||||
|
||||
@@ -5,7 +5,6 @@
|
||||
enum ufs_trace_str_t {
|
||||
UFS_CMD_SEND,
|
||||
UFS_CMD_COMP,
|
||||
UFS_DEV_COMP,
|
||||
UFS_QUERY_SEND,
|
||||
UFS_QUERY_COMP,
|
||||
UFS_QUERY_ERR,
|
||||
|
||||
@@ -6,6 +6,8 @@
|
||||
#include <linux/pm_runtime.h>
|
||||
#include <ufs/ufshcd.h>
|
||||
|
||||
void ufshcd_enable_intr(struct ufs_hba *hba, u32 intrs);
|
||||
|
||||
static inline bool ufshcd_is_user_access_allowed(struct ufs_hba *hba)
|
||||
{
|
||||
return !hba->shutting_down;
|
||||
|
||||
@@ -2619,7 +2619,7 @@ __ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
|
||||
|
||||
init_completion(&uic_cmd->done);
|
||||
|
||||
uic_cmd->cmd_active = 1;
|
||||
uic_cmd->cmd_active = true;
|
||||
ufshcd_dispatch_uic_cmd(hba, uic_cmd);
|
||||
|
||||
return 0;
|
||||
@@ -5582,7 +5582,7 @@ static irqreturn_t ufshcd_uic_cmd_compl(struct ufs_hba *hba, u32 intr_status)
|
||||
guard(spinlock_irqsave)(hba->host->host_lock);
|
||||
cmd = hba->active_uic_cmd;
|
||||
if (!cmd)
|
||||
goto unlock;
|
||||
return retval;
|
||||
|
||||
if (ufshcd_is_auto_hibern8_error(hba, intr_status))
|
||||
hba->errors |= (UFSHCD_UIC_HIBERN8_MASK & intr_status);
|
||||
@@ -5591,13 +5591,13 @@ static irqreturn_t ufshcd_uic_cmd_compl(struct ufs_hba *hba, u32 intr_status)
|
||||
cmd->argument2 |= ufshcd_get_uic_cmd_result(hba);
|
||||
cmd->argument3 = ufshcd_get_dme_attr_val(hba);
|
||||
if (!hba->uic_async_done)
|
||||
cmd->cmd_active = 0;
|
||||
cmd->cmd_active = false;
|
||||
complete(&cmd->done);
|
||||
retval = IRQ_HANDLED;
|
||||
}
|
||||
|
||||
if (intr_status & UFSHCD_UIC_PWR_MASK && hba->uic_async_done) {
|
||||
cmd->cmd_active = 0;
|
||||
cmd->cmd_active = false;
|
||||
complete(hba->uic_async_done);
|
||||
retval = IRQ_HANDLED;
|
||||
}
|
||||
@@ -5605,7 +5605,6 @@ static irqreturn_t ufshcd_uic_cmd_compl(struct ufs_hba *hba, u32 intr_status)
|
||||
if (retval == IRQ_HANDLED)
|
||||
ufshcd_add_uic_command_trace(hba, cmd, UFS_CMD_COMP);
|
||||
|
||||
unlock:
|
||||
return retval;
|
||||
}
|
||||
|
||||
|
||||
@@ -78,7 +78,7 @@ struct uic_command {
|
||||
const u32 argument1;
|
||||
u32 argument2;
|
||||
u32 argument3;
|
||||
int cmd_active;
|
||||
bool cmd_active;
|
||||
struct completion done;
|
||||
};
|
||||
|
||||
@@ -1297,7 +1297,6 @@ static inline void ufshcd_rmwl(struct ufs_hba *hba, u32 mask, u32 val, u32 reg)
|
||||
|
||||
void ufshcd_enable_irq(struct ufs_hba *hba);
|
||||
void ufshcd_disable_irq(struct ufs_hba *hba);
|
||||
void ufshcd_enable_intr(struct ufs_hba *hba, u32 intrs);
|
||||
int ufshcd_alloc_host(struct device *, struct ufs_hba **);
|
||||
int ufshcd_hba_enable(struct ufs_hba *hba);
|
||||
int ufshcd_init(struct ufs_hba *, void __iomem *, unsigned int);
|
||||
|
||||
@@ -83,12 +83,14 @@ enum {
|
||||
};
|
||||
|
||||
enum {
|
||||
/* Submission Queue (SQ) Configuration Registers */
|
||||
REG_SQATTR = 0x0,
|
||||
REG_SQLBA = 0x4,
|
||||
REG_SQUBA = 0x8,
|
||||
REG_SQDAO = 0xC,
|
||||
REG_SQISAO = 0x10,
|
||||
|
||||
/* Completion Queue (CQ) Configuration Registers */
|
||||
REG_CQATTR = 0x20,
|
||||
REG_CQLBA = 0x24,
|
||||
REG_CQUBA = 0x28,
|
||||
@@ -96,6 +98,7 @@ enum {
|
||||
REG_CQISAO = 0x30,
|
||||
};
|
||||
|
||||
/* Operation and Runtime Registers - Submission Queues and Completion Queues */
|
||||
enum {
|
||||
REG_SQHP = 0x0,
|
||||
REG_SQTP = 0x4,
|
||||
|
||||
Reference in New Issue
Block a user