1
0

Merge tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi

Pull SCSI updates from James Bottomley:
 "Usual driver updates (ufs, lpfc, target, qla2xxx) plus assorted
  cleanups and fixes including the WQ_PERCPU series.

  The biggest core change is the new allocation of pseudo-devices which
  allow the sending of internal commands to a given SCSI target"

* tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi: (147 commits)
  scsi: MAINTAINERS: Add the UFS include directory
  scsi: scsi_debug: Support injecting unaligned write errors
  scsi: qla2xxx: Fix improper freeing of purex item
  scsi: ufs: rockchip: Fix compile error without CONFIG_GPIOLIB
  scsi: ufs: rockchip: Reset controller on PRE_CHANGE of hce enable notify
  scsi: ufs: core: Use scsi_device_busy()
  scsi: ufs: core: Fix single doorbell mode support
  scsi: pm80xx: Add WQ_PERCPU to alloc_workqueue() users
  scsi: target: Add WQ_PERCPU to alloc_workqueue() users
  scsi: qedi: Add WQ_PERCPU to alloc_workqueue() users
  scsi: target: ibmvscsi: Add WQ_PERCPU to alloc_workqueue() users
  scsi: qedf: Add WQ_PERCPU to alloc_workqueue() users
  scsi: bnx2fc: Add WQ_PERCPU to alloc_workqueue() users
  scsi: be2iscsi: Add WQ_PERCPU to alloc_workqueue() users
  scsi: message: fusion: Add WQ_PERCPU to alloc_workqueue() users
  scsi: lpfc: WQ_PERCPU added to alloc_workqueue() users
  scsi: scsi_transport_fc: WQ_PERCPU added to alloc_workqueue users()
  scsi: scsi_dh_alua: WQ_PERCPU added to alloc_workqueue() users
  scsi: qla2xxx: WQ_PERCPU added to alloc_workqueue() users
  scsi: target: sbp: Replace use of system_unbound_wq with system_dfl_wq
  ...
This commit is contained in:
Linus Torvalds
2025-12-05 19:56:50 -08:00
115 changed files with 4921 additions and 1442 deletions

View File

@@ -8,8 +8,9 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
title: MediaTek Universal Flash Storage (UFS) M-PHY title: MediaTek Universal Flash Storage (UFS) M-PHY
maintainers: maintainers:
- Stanley Chu <stanley.chu@mediatek.com>
- Chunfeng Yun <chunfeng.yun@mediatek.com> - Chunfeng Yun <chunfeng.yun@mediatek.com>
- Peter Wang <peter.wang@mediatek.com>
- Chaotian Jing <chaotian.jing@mediatek.com>
description: | description: |
UFS M-PHY nodes are defined to describe on-chip UFS M-PHY hardware macro. UFS M-PHY nodes are defined to describe on-chip UFS M-PHY hardware macro.

View File

@@ -0,0 +1,61 @@
# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
%YAML 1.2
---
$id: http://devicetree.org/schemas/ufs/amd,versal2-ufs.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
title: AMD Versal Gen 2 UFS Host Controller
maintainers:
- Sai Krishna Potthuri <sai.krishna.potthuri@amd.com>
allOf:
- $ref: ufs-common.yaml
properties:
compatible:
const: amd,versal2-ufs
reg:
maxItems: 1
clocks:
maxItems: 1
clock-names:
items:
- const: core
power-domains:
maxItems: 1
resets:
maxItems: 2
reset-names:
items:
- const: host
- const: phy
required:
- reg
- clocks
- clock-names
- resets
- reset-names
unevaluatedProperties: false
examples:
- |
#include <dt-bindings/interrupt-controller/arm-gic.h>
ufs@f10b0000 {
compatible = "amd,versal2-ufs";
reg = <0xf10b0000 0x1000>;
clocks = <&ufs_core_clk>;
clock-names = "core";
resets = <&scmi_reset 4>, <&scmi_reset 35>;
reset-names = "host", "phy";
interrupts = <GIC_SPI 234 IRQ_TYPE_LEVEL_HIGH>;
freq-table-hz = <0 0>;
};

View File

@@ -7,7 +7,8 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
title: Mediatek Universal Flash Storage (UFS) Controller title: Mediatek Universal Flash Storage (UFS) Controller
maintainers: maintainers:
- Stanley Chu <stanley.chu@mediatek.com> - Peter Wang <peter.wang@mediatek.com>
- Chaotian Jing <chaotian.jing@mediatek.com>
properties: properties:
compatible: compatible:

View File

@@ -88,7 +88,6 @@ allOf:
- const: ice_core_clk - const: ice_core_clk
reg: reg:
minItems: 2 minItems: 2
maxItems: 2
reg-names: reg-names:
minItems: 2 minItems: 2
required: required:
@@ -117,7 +116,6 @@ allOf:
- const: tx_lane0_sync_clk - const: tx_lane0_sync_clk
- const: rx_lane0_sync_clk - const: rx_lane0_sync_clk
reg: reg:
minItems: 1
maxItems: 1 maxItems: 1
reg-names: reg-names:
maxItems: 1 maxItems: 1
@@ -147,7 +145,6 @@ allOf:
- const: ice_core_clk - const: ice_core_clk
reg: reg:
minItems: 2 minItems: 2
maxItems: 2
reg-names: reg-names:
minItems: 2 minItems: 2
required: required:

View File

@@ -61,6 +61,9 @@ properties:
phy-names: phy-names:
const: ufs-phy const: ufs-phy
power-domains:
maxItems: 1
samsung,sysreg: samsung,sysreg:
$ref: /schemas/types.yaml#/definitions/phandle-array $ref: /schemas/types.yaml#/definitions/phandle-array
items: items:

View File

@@ -23313,6 +23313,7 @@ F: drivers/scsi/
F: drivers/ufs/ F: drivers/ufs/
F: include/scsi/ F: include/scsi/
F: include/uapi/scsi/ F: include/uapi/scsi/
F: include/ufs/
SCSI TAPE DRIVER SCSI TAPE DRIVER
M: Kai Mäkisara <Kai.Makisara@kolumbus.fi> M: Kai Mäkisara <Kai.Makisara@kolumbus.fi>
@@ -26628,6 +26629,14 @@ S: Supported
F: Documentation/devicetree/bindings/ufs/ F: Documentation/devicetree/bindings/ufs/
F: Documentation/scsi/ufs.rst F: Documentation/scsi/ufs.rst
F: drivers/ufs/core/ F: drivers/ufs/core/
F: include/ufs/
UNIVERSAL FLASH STORAGE HOST CONTROLLER DRIVER AMD VERSAL2
M: Sai Krishna Potthuri <sai.krishna.potthuri@amd.com>
M: Ajay Neeli <ajay.neeli@amd.com>
S: Maintained
F: Documentation/devicetree/bindings/ufs/amd,versal2-ufs.yaml
F: drivers/ufs/host/ufs-amd-versal2.c
UNIVERSAL FLASH STORAGE HOST CONTROLLER DRIVER DWC HOOKS UNIVERSAL FLASH STORAGE HOST CONTROLLER DRIVER DWC HOOKS
M: Pedro Sousa <pedrom.sousa@synopsys.com> M: Pedro Sousa <pedrom.sousa@synopsys.com>
@@ -26645,6 +26654,7 @@ F: drivers/ufs/host/ufs-exynos*
UNIVERSAL FLASH STORAGE HOST CONTROLLER DRIVER MEDIATEK HOOKS UNIVERSAL FLASH STORAGE HOST CONTROLLER DRIVER MEDIATEK HOOKS
M: Peter Wang <peter.wang@mediatek.com> M: Peter Wang <peter.wang@mediatek.com>
M: Chaotian Jing <chaotian.jing@mediatek.com>
R: Stanley Jhu <chu.stanley@gmail.com> R: Stanley Jhu <chu.stanley@gmail.com>
L: linux-scsi@vger.kernel.org L: linux-scsi@vger.kernel.org
L: linux-mediatek@lists.infradead.org (moderated for non-subscribers) L: linux-mediatek@lists.infradead.org (moderated for non-subscribers)

View File

@@ -245,6 +245,73 @@ void ata_acpi_bind_dev(struct ata_device *dev)
ata_acpi_dev_uevent); ata_acpi_dev_uevent);
} }
/**
* ata_acpi_dev_manage_restart - if the disk should be stopped (spun down) on
* system restart.
* @dev: target ATA device
*
* RETURNS:
* true if the disk should be stopped, otherwise false.
*/
bool ata_acpi_dev_manage_restart(struct ata_device *dev)
{
struct device *tdev;
/*
* If ATA_FLAG_ACPI_SATA is set, the acpi fwnode is attached to the
* ata_device instead of the ata_port.
*/
if (dev->link->ap->flags & ATA_FLAG_ACPI_SATA)
tdev = &dev->tdev;
else
tdev = &dev->link->ap->tdev;
if (!is_acpi_device_node(tdev->fwnode))
return false;
return acpi_bus_power_manageable(ACPI_HANDLE(tdev));
}
/**
* ata_acpi_port_power_on - set the power state of the ata port to D0
* @ap: target ATA port
*
* This function is called at the beginning of ata_port_probe().
*/
void ata_acpi_port_power_on(struct ata_port *ap)
{
acpi_handle handle;
int i;
/*
* If ATA_FLAG_ACPI_SATA is set, the acpi fwnode is attached to the
* ata_device instead of the ata_port.
*/
if (ap->flags & ATA_FLAG_ACPI_SATA) {
for (i = 0; i < ATA_MAX_DEVICES; i++) {
struct ata_device *dev = &ap->link.device[i];
if (!is_acpi_device_node(dev->tdev.fwnode))
continue;
handle = ACPI_HANDLE(&dev->tdev);
if (!acpi_bus_power_manageable(handle))
continue;
if (acpi_bus_set_power(handle, ACPI_STATE_D0))
ata_dev_err(dev,
"acpi: failed to set power state to D0\n");
}
return;
}
if (!is_acpi_device_node(ap->tdev.fwnode))
return;
handle = ACPI_HANDLE(&ap->tdev);
if (!acpi_bus_power_manageable(handle))
return;
if (acpi_bus_set_power(handle, ACPI_STATE_D0))
ata_port_err(ap, "acpi: failed to set power state to D0\n");
}
/** /**
* ata_acpi_dissociate - dissociate ATA host from ACPI objects * ata_acpi_dissociate - dissociate ATA host from ACPI objects
* @host: target ATA host * @host: target ATA host

View File

@@ -5915,6 +5915,8 @@ void ata_port_probe(struct ata_port *ap)
struct ata_eh_info *ehi = &ap->link.eh_info; struct ata_eh_info *ehi = &ap->link.eh_info;
unsigned long flags; unsigned long flags;
ata_acpi_port_power_on(ap);
/* kick EH for boot probing */ /* kick EH for boot probing */
spin_lock_irqsave(ap->lock, flags); spin_lock_irqsave(ap->lock, flags);

View File

@@ -1102,6 +1102,7 @@ int ata_scsi_dev_config(struct scsi_device *sdev, struct queue_limits *lim,
*/ */
sdev->manage_runtime_start_stop = 1; sdev->manage_runtime_start_stop = 1;
sdev->manage_shutdown = 1; sdev->manage_shutdown = 1;
sdev->manage_restart = ata_acpi_dev_manage_restart(dev);
sdev->force_runtime_start_on_system_start = 1; sdev->force_runtime_start_on_system_start = 1;
} }

View File

@@ -130,6 +130,8 @@ extern void ata_acpi_on_disable(struct ata_device *dev);
extern void ata_acpi_set_state(struct ata_port *ap, pm_message_t state); extern void ata_acpi_set_state(struct ata_port *ap, pm_message_t state);
extern void ata_acpi_bind_port(struct ata_port *ap); extern void ata_acpi_bind_port(struct ata_port *ap);
extern void ata_acpi_bind_dev(struct ata_device *dev); extern void ata_acpi_bind_dev(struct ata_device *dev);
extern void ata_acpi_port_power_on(struct ata_port *ap);
extern bool ata_acpi_dev_manage_restart(struct ata_device *dev);
extern acpi_handle ata_dev_acpi_handle(struct ata_device *dev); extern acpi_handle ata_dev_acpi_handle(struct ata_device *dev);
#else #else
static inline void ata_acpi_dissociate(struct ata_host *host) { } static inline void ata_acpi_dissociate(struct ata_host *host) { }
@@ -140,6 +142,8 @@ static inline void ata_acpi_set_state(struct ata_port *ap,
pm_message_t state) { } pm_message_t state) { }
static inline void ata_acpi_bind_port(struct ata_port *ap) {} static inline void ata_acpi_bind_port(struct ata_port *ap) {}
static inline void ata_acpi_bind_dev(struct ata_device *dev) {} static inline void ata_acpi_bind_dev(struct ata_device *dev) {}
static inline void ata_acpi_port_power_on(struct ata_port *ap) {}
static inline bool ata_acpi_dev_manage_restart(struct ata_device *dev) { return 0; }
#endif #endif
/* libata-scsi.c */ /* libata-scsi.c */

View File

@@ -1,5 +1,5 @@
# SPDX-License-Identifier: GPL-2.0 # SPDX-License-Identifier: GPL-2.0
# Makefile for Xilinx firmwares # Makefile for Xilinx firmwares
obj-$(CONFIG_ZYNQMP_FIRMWARE) += zynqmp.o obj-$(CONFIG_ZYNQMP_FIRMWARE) += zynqmp.o zynqmp-ufs.o
obj-$(CONFIG_ZYNQMP_FIRMWARE_DEBUG) += zynqmp-debug.o obj-$(CONFIG_ZYNQMP_FIRMWARE_DEBUG) += zynqmp-debug.o

View File

@@ -0,0 +1,118 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Firmware Layer for UFS APIs
*
* Copyright (C) 2025 Advanced Micro Devices, Inc.
*/
#include <linux/firmware/xlnx-zynqmp.h>
#include <linux/module.h>
/* Register Node IDs */
#define PM_REGNODE_PMC_IOU_SLCR 0x30000002 /* PMC IOU SLCR */
#define PM_REGNODE_EFUSE_CACHE 0x30000003 /* EFUSE Cache */
/* Register Offsets for PMC IOU SLCR */
#define SRAM_CSR_OFFSET 0x104C /* SRAM Control and Status */
#define TXRX_CFGRDY_OFFSET 0x1054 /* M-PHY TX-RX Config ready */
/* Masks for SRAM Control and Status Register */
#define SRAM_CSR_INIT_DONE_MASK BIT(0) /* SRAM initialization done */
#define SRAM_CSR_EXT_LD_DONE_MASK BIT(1) /* SRAM External load done */
#define SRAM_CSR_BYPASS_MASK BIT(2) /* Bypass SRAM interface */
/* Mask to check M-PHY TX-RX configuration readiness */
#define TX_RX_CFG_RDY_MASK GENMASK(3, 0)
/* Register Offsets for EFUSE Cache */
#define UFS_CAL_1_OFFSET 0xBE8 /* UFS Calibration Value */
/**
* zynqmp_pm_is_mphy_tx_rx_config_ready - check M-PHY TX-RX config readiness
* @is_ready: Store output status (true/false)
*
* Return: Returns 0 on success or error value on failure.
*/
int zynqmp_pm_is_mphy_tx_rx_config_ready(bool *is_ready)
{
u32 regval;
int ret;
if (!is_ready)
return -EINVAL;
ret = zynqmp_pm_sec_read_reg(PM_REGNODE_PMC_IOU_SLCR, TXRX_CFGRDY_OFFSET, &regval);
if (ret)
return ret;
regval &= TX_RX_CFG_RDY_MASK;
if (regval)
*is_ready = true;
else
*is_ready = false;
return ret;
}
EXPORT_SYMBOL_GPL(zynqmp_pm_is_mphy_tx_rx_config_ready);
/**
* zynqmp_pm_is_sram_init_done - check SRAM initialization
* @is_done: Store output status (true/false)
*
* Return: Returns 0 on success or error value on failure.
*/
int zynqmp_pm_is_sram_init_done(bool *is_done)
{
u32 regval;
int ret;
if (!is_done)
return -EINVAL;
ret = zynqmp_pm_sec_read_reg(PM_REGNODE_PMC_IOU_SLCR, SRAM_CSR_OFFSET, &regval);
if (ret)
return ret;
regval &= SRAM_CSR_INIT_DONE_MASK;
if (regval)
*is_done = true;
else
*is_done = false;
return ret;
}
EXPORT_SYMBOL_GPL(zynqmp_pm_is_sram_init_done);
/**
* zynqmp_pm_set_sram_bypass - Set SRAM bypass Control
*
* Return: Returns 0 on success or error value on failure.
*/
int zynqmp_pm_set_sram_bypass(void)
{
u32 sram_csr;
int ret;
ret = zynqmp_pm_sec_read_reg(PM_REGNODE_PMC_IOU_SLCR, SRAM_CSR_OFFSET, &sram_csr);
if (ret)
return ret;
sram_csr &= ~SRAM_CSR_EXT_LD_DONE_MASK;
sram_csr |= SRAM_CSR_BYPASS_MASK;
return zynqmp_pm_sec_mask_write_reg(PM_REGNODE_PMC_IOU_SLCR, SRAM_CSR_OFFSET,
GENMASK(2, 1), sram_csr);
}
EXPORT_SYMBOL_GPL(zynqmp_pm_set_sram_bypass);
/**
* zynqmp_pm_get_ufs_calibration_values - Read UFS calibration values
* @val: Store the calibration value
*
* Return: Returns 0 on success or error value on failure.
*/
int zynqmp_pm_get_ufs_calibration_values(u32 *val)
{
return zynqmp_pm_sec_read_reg(PM_REGNODE_EFUSE_CACHE, UFS_CAL_1_OFFSET, val);
}
EXPORT_SYMBOL_GPL(zynqmp_pm_get_ufs_calibration_values);

View File

@@ -1653,6 +1653,52 @@ int zynqmp_pm_get_feature_config(enum pm_feature_config_id id,
return zynqmp_pm_invoke_fn(PM_IOCTL, payload, 3, 0, IOCTL_GET_FEATURE_CONFIG, id); return zynqmp_pm_invoke_fn(PM_IOCTL, payload, 3, 0, IOCTL_GET_FEATURE_CONFIG, id);
} }
/**
* zynqmp_pm_sec_read_reg - PM call to securely read from given offset
* of the node
* @node_id: Node Id of the device
* @offset: Offset to be used (20-bit)
* @ret_value: Output data read from the given offset after
* firmware access policy is successfully enforced
*
* Return: Returns 0 on success or error value on failure
*/
int zynqmp_pm_sec_read_reg(u32 node_id, u32 offset, u32 *ret_value)
{
u32 ret_payload[PAYLOAD_ARG_CNT];
u32 count = 1;
int ret;
if (!ret_value)
return -EINVAL;
ret = zynqmp_pm_invoke_fn(PM_IOCTL, ret_payload, 4, node_id, IOCTL_READ_REG,
offset, count);
*ret_value = ret_payload[1];
return ret;
}
EXPORT_SYMBOL_GPL(zynqmp_pm_sec_read_reg);
/**
* zynqmp_pm_sec_mask_write_reg - PM call to securely write to given offset
* of the node
* @node_id: Node Id of the device
* @offset: Offset to be used (20-bit)
* @mask: Mask to be used
* @value: Value to be written
*
* Return: Returns 0 on success or error value on failure
*/
int zynqmp_pm_sec_mask_write_reg(const u32 node_id, const u32 offset, u32 mask,
u32 value)
{
return zynqmp_pm_invoke_fn(PM_IOCTL, NULL, 5, node_id, IOCTL_MASK_WRITE_REG,
offset, mask, value);
}
EXPORT_SYMBOL_GPL(zynqmp_pm_sec_mask_write_reg);
/** /**
* zynqmp_pm_set_sd_config - PM call to set value of SD config registers * zynqmp_pm_set_sd_config - PM call to set value of SD config registers
* @node: SD node ID * @node: SD node ID

View File

@@ -1857,7 +1857,8 @@ mpt_attach(struct pci_dev *pdev, const struct pci_device_id *id)
INIT_DELAYED_WORK(&ioc->fault_reset_work, mpt_fault_reset_work); INIT_DELAYED_WORK(&ioc->fault_reset_work, mpt_fault_reset_work);
ioc->reset_work_q = ioc->reset_work_q =
alloc_workqueue("mpt_poll_%d", WQ_MEM_RECLAIM, 0, ioc->id); alloc_workqueue("mpt_poll_%d", WQ_MEM_RECLAIM | WQ_PERCPU, 0,
ioc->id);
if (!ioc->reset_work_q) { if (!ioc->reset_work_q) {
printk(MYIOC_s_ERR_FMT "Insufficient memory to add adapter!\n", printk(MYIOC_s_ERR_FMT "Insufficient memory to add adapter!\n",
ioc->name); ioc->name);
@@ -1984,7 +1985,9 @@ mpt_attach(struct pci_dev *pdev, const struct pci_device_id *id)
INIT_LIST_HEAD(&ioc->fw_event_list); INIT_LIST_HEAD(&ioc->fw_event_list);
spin_lock_init(&ioc->fw_event_lock); spin_lock_init(&ioc->fw_event_lock);
ioc->fw_event_q = alloc_workqueue("mpt/%d", WQ_MEM_RECLAIM, 0, ioc->id); ioc->fw_event_q = alloc_workqueue("mpt/%d",
WQ_MEM_RECLAIM | WQ_PERCPU, 0,
ioc->id);
if (!ioc->fw_event_q) { if (!ioc->fw_event_q) {
printk(MYIOC_s_ERR_FMT "Insufficient memory to add adapter!\n", printk(MYIOC_s_ERR_FMT "Insufficient memory to add adapter!\n",
ioc->name); ioc->name);

View File

@@ -106,7 +106,7 @@ config PHANTOM
config RPMB config RPMB
tristate "RPMB partition interface" tristate "RPMB partition interface"
depends on MMC depends on MMC || SCSI_UFSHCD
help help
Unified RPMB unit interface for RPMB capable devices such as eMMC and Unified RPMB unit interface for RPMB capable devices such as eMMC and
UFS. Provides interface for in-kernel security controllers to access UFS. Provides interface for in-kernel security controllers to access

View File

@@ -242,7 +242,7 @@ static int aac_queuecommand(struct Scsi_Host *shost,
{ {
aac_priv(cmd)->owner = AAC_OWNER_LOWLEVEL; aac_priv(cmd)->owner = AAC_OWNER_LOWLEVEL;
return aac_scsi_cmd(cmd) ? FAILED : 0; return aac_scsi_cmd(cmd) ? SCSI_MLQUEUE_HOST_BUSY : 0;
} }
/** /**

View File

@@ -2401,8 +2401,7 @@ static void asc_prt_scsi_host(struct Scsi_Host *s)
struct asc_board *boardp = shost_priv(s); struct asc_board *boardp = shost_priv(s);
printk("Scsi_Host at addr 0x%p, device %s\n", s, dev_name(boardp->dev)); printk("Scsi_Host at addr 0x%p, device %s\n", s, dev_name(boardp->dev));
printk(" host_busy %d, host_no %d,\n", printk(" host_no %d,\n", s->host_no);
scsi_host_busy(s), s->host_no);
printk(" base 0x%lx, io_port 0x%lx, irq %d,\n", printk(" base 0x%lx, io_port 0x%lx, irq %d,\n",
(ulong)s->base, (ulong)s->io_port, boardp->irq); (ulong)s->base, (ulong)s->io_port, boardp->irq);

View File

@@ -882,6 +882,9 @@ static void asd_pci_remove(struct pci_dev *dev)
asd_disable_ints(asd_ha); asd_disable_ints(asd_ha);
/* Ensure all scheduled tasklets complete before freeing resources */
tasklet_kill(&asd_ha->seq.dl_tasklet);
asd_remove_dev_attrs(asd_ha); asd_remove_dev_attrs(asd_ha);
/* XXX more here as needed */ /* XXX more here as needed */

View File

@@ -5633,7 +5633,8 @@ static int beiscsi_dev_probe(struct pci_dev *pcidev,
phba->ctrl.mcc_alloc_index = phba->ctrl.mcc_free_index = 0; phba->ctrl.mcc_alloc_index = phba->ctrl.mcc_free_index = 0;
phba->wq = alloc_workqueue("beiscsi_%02x_wq", WQ_MEM_RECLAIM, 1, phba->wq = alloc_workqueue("beiscsi_%02x_wq",
WQ_MEM_RECLAIM | WQ_PERCPU, 1,
phba->shost->host_no); phba->shost->host_no);
if (!phba->wq) { if (!phba->wq) {
beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,

View File

@@ -2695,7 +2695,7 @@ static int __init bnx2fc_mod_init(void)
if (rc) if (rc)
goto detach_ft; goto detach_ft;
bnx2fc_wq = alloc_workqueue("bnx2fc", 0, 0); bnx2fc_wq = alloc_workqueue("bnx2fc", WQ_PERCPU, 0);
if (!bnx2fc_wq) { if (!bnx2fc_wq) {
rc = -ENOMEM; rc = -ENOMEM;
goto release_bt; goto release_bt;

View File

@@ -1300,7 +1300,7 @@ static int __init alua_init(void)
{ {
int r; int r;
kaluad_wq = alloc_workqueue("kaluad", WQ_MEM_RECLAIM, 0); kaluad_wq = alloc_workqueue("kaluad", WQ_MEM_RECLAIM | WQ_PERCPU, 0);
if (!kaluad_wq) if (!kaluad_wq)
return -ENOMEM; return -ENOMEM;

View File

@@ -2438,7 +2438,7 @@ static int __init fcoe_init(void)
unsigned int cpu; unsigned int cpu;
int rc = 0; int rc = 0;
fcoe_wq = alloc_workqueue("fcoe", 0, 0); fcoe_wq = alloc_workqueue("fcoe", WQ_PERCPU, 0);
if (!fcoe_wq) if (!fcoe_wq)
return -ENOMEM; return -ENOMEM;

View File

@@ -134,7 +134,6 @@ int fnic_get_vnic_config(struct fnic *fnic)
c->luns_per_tgt)); c->luns_per_tgt));
c->intr_timer = min_t(u16, VNIC_INTR_TIMER_MAX, c->intr_timer); c->intr_timer = min_t(u16, VNIC_INTR_TIMER_MAX, c->intr_timer);
c->intr_timer_type = c->intr_timer_type;
/* for older firmware, GET_CONFIG will not return anything */ /* for older firmware, GET_CONFIG will not return anything */
if (c->wq_copy_count == 0) if (c->wq_copy_count == 0)

View File

@@ -231,6 +231,12 @@ int scsi_add_host_with_dma(struct Scsi_Host *shost, struct device *dev,
goto fail; goto fail;
} }
if (shost->nr_reserved_cmds && !sht->queue_reserved_command) {
shost_printk(KERN_ERR, shost,
"nr_reserved_cmds set but no method to queue\n");
goto fail;
}
/* Use min_t(int, ...) in case shost->can_queue exceeds SHRT_MAX */ /* Use min_t(int, ...) in case shost->can_queue exceeds SHRT_MAX */
shost->cmd_per_lun = min_t(int, shost->cmd_per_lun, shost->cmd_per_lun = min_t(int, shost->cmd_per_lun,
shost->can_queue); shost->can_queue);
@@ -307,6 +313,14 @@ int scsi_add_host_with_dma(struct Scsi_Host *shost, struct device *dev,
if (error) if (error)
goto out_del_dev; goto out_del_dev;
if (shost->nr_reserved_cmds) {
shost->pseudo_sdev = scsi_get_pseudo_sdev(shost);
if (!shost->pseudo_sdev) {
error = -ENOMEM;
goto out_del_dev;
}
}
scsi_proc_host_add(shost); scsi_proc_host_add(shost);
scsi_autopm_put_host(shost); scsi_autopm_put_host(shost);
return error; return error;
@@ -436,6 +450,7 @@ struct Scsi_Host *scsi_host_alloc(const struct scsi_host_template *sht, int priv
shost->hostt = sht; shost->hostt = sht;
shost->this_id = sht->this_id; shost->this_id = sht->this_id;
shost->can_queue = sht->can_queue; shost->can_queue = sht->can_queue;
shost->nr_reserved_cmds = sht->nr_reserved_cmds;
shost->sg_tablesize = sht->sg_tablesize; shost->sg_tablesize = sht->sg_tablesize;
shost->sg_prot_tablesize = sht->sg_prot_tablesize; shost->sg_prot_tablesize = sht->sg_prot_tablesize;
shost->cmd_per_lun = sht->cmd_per_lun; shost->cmd_per_lun = sht->cmd_per_lun;
@@ -604,8 +619,8 @@ static bool scsi_host_check_in_flight(struct request *rq, void *data)
} }
/** /**
* scsi_host_busy - Return the host busy counter * scsi_host_busy - Return the count of in-flight commands
* @shost: Pointer to Scsi_Host to inc. * @shost: Pointer to Scsi_Host
**/ **/
int scsi_host_busy(struct Scsi_Host *shost) int scsi_host_busy(struct Scsi_Host *shost)
{ {

View File

@@ -3533,7 +3533,8 @@ static int ibmvscsis_probe(struct vio_dev *vdev,
init_completion(&vscsi->wait_idle); init_completion(&vscsi->wait_idle);
init_completion(&vscsi->unconfig); init_completion(&vscsi->unconfig);
vscsi->work_q = alloc_workqueue("ibmvscsis%s", WQ_MEM_RECLAIM, 1, vscsi->work_q = alloc_workqueue("ibmvscsis%s",
WQ_MEM_RECLAIM | WQ_PERCPU, 1,
dev_name(&vdev->dev)); dev_name(&vdev->dev));
if (!vscsi->work_q) { if (!vscsi->work_q) {
rc = -ENOMEM; rc = -ENOMEM;

View File

@@ -85,15 +85,17 @@ struct isci_tmf {
struct completion *complete; struct completion *complete;
enum sas_protocol proto; enum sas_protocol proto;
unsigned char lun[8];
u16 io_tag;
enum isci_tmf_function_codes tmf_code;
int status;
/* Must be last --ends in a flexible-array member. */
union { union {
struct ssp_response_iu resp_iu; struct ssp_response_iu resp_iu;
struct dev_to_host_fis d2h_fis; struct dev_to_host_fis d2h_fis;
u8 rsp_buf[SSP_RESP_IU_MAX_SIZE]; u8 rsp_buf[SSP_RESP_IU_MAX_SIZE];
} resp; } resp;
unsigned char lun[8];
u16 io_tag;
enum isci_tmf_function_codes tmf_code;
int status;
}; };
static inline void isci_print_tmf(struct isci_host *ihost, struct isci_tmf *tmf) static inline void isci_print_tmf(struct isci_host *ihost, struct isci_tmf *tmf)

View File

@@ -311,7 +311,6 @@ struct lpfc_defer_flogi_acc {
u16 rx_id; u16 rx_id;
u16 ox_id; u16 ox_id;
struct lpfc_nodelist *ndlp; struct lpfc_nodelist *ndlp;
}; };
#define LPFC_VMID_TIMER 300 /* timer interval in seconds */ #define LPFC_VMID_TIMER 300 /* timer interval in seconds */
@@ -634,6 +633,7 @@ struct lpfc_vport {
#define FC_CT_RSPN_ID 0x8 /* RSPN_ID accepted by switch */ #define FC_CT_RSPN_ID 0x8 /* RSPN_ID accepted by switch */
#define FC_CT_RFT_ID 0x10 /* RFT_ID accepted by switch */ #define FC_CT_RFT_ID 0x10 /* RFT_ID accepted by switch */
#define FC_CT_RPRT_DEFER 0x20 /* Defer issuing FDMI RPRT */ #define FC_CT_RPRT_DEFER 0x20 /* Defer issuing FDMI RPRT */
#define FC_CT_RSPNI_PNI 0x40 /* RSPNI_PNI accepted by switch */
struct list_head fc_nodes; struct list_head fc_nodes;
spinlock_t fc_nodes_list_lock; /* spinlock for fc_nodes list */ spinlock_t fc_nodes_list_lock; /* spinlock for fc_nodes list */
@@ -1078,6 +1078,8 @@ struct lpfc_hba {
uint32_t nport_event_cnt; /* timestamp for nlplist entry */ uint32_t nport_event_cnt; /* timestamp for nlplist entry */
unsigned long pni; /* 64-bit Platform Name Identifier */
uint8_t wwnn[8]; uint8_t wwnn[8];
uint8_t wwpn[8]; uint8_t wwpn[8];
uint32_t RandomData[7]; uint32_t RandomData[7];

View File

@@ -1742,6 +1742,28 @@ lpfc_cmpl_ct_cmd_rsnn_nn(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
return; return;
} }
static void
lpfc_cmpl_ct_cmd_rspni_pni(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
struct lpfc_iocbq *rspiocb)
{
struct lpfc_vport *vport;
struct lpfc_dmabuf *outp;
struct lpfc_sli_ct_request *ctrsp;
u32 ulp_status;
vport = cmdiocb->vport;
ulp_status = get_job_ulpstatus(phba, rspiocb);
if (ulp_status == IOSTAT_SUCCESS) {
outp = cmdiocb->rsp_dmabuf;
ctrsp = (struct lpfc_sli_ct_request *)outp->virt;
if (be16_to_cpu(ctrsp->CommandResponse.bits.CmdRsp) ==
SLI_CT_RESPONSE_FS_ACC)
vport->ct_flags |= FC_CT_RSPNI_PNI;
}
lpfc_cmpl_ct(phba, cmdiocb, rspiocb);
}
static void static void
lpfc_cmpl_ct_cmd_da_id(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, lpfc_cmpl_ct_cmd_da_id(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
struct lpfc_iocbq *rspiocb) struct lpfc_iocbq *rspiocb)
@@ -1956,6 +1978,8 @@ lpfc_ns_cmd(struct lpfc_vport *vport, int cmdcode,
bpl->tus.f.bdeSize = RSPN_REQUEST_SZ; bpl->tus.f.bdeSize = RSPN_REQUEST_SZ;
else if (cmdcode == SLI_CTNS_RSNN_NN) else if (cmdcode == SLI_CTNS_RSNN_NN)
bpl->tus.f.bdeSize = RSNN_REQUEST_SZ; bpl->tus.f.bdeSize = RSNN_REQUEST_SZ;
else if (cmdcode == SLI_CTNS_RSPNI_PNI)
bpl->tus.f.bdeSize = RSPNI_REQUEST_SZ;
else if (cmdcode == SLI_CTNS_DA_ID) else if (cmdcode == SLI_CTNS_DA_ID)
bpl->tus.f.bdeSize = DA_ID_REQUEST_SZ; bpl->tus.f.bdeSize = DA_ID_REQUEST_SZ;
else if (cmdcode == SLI_CTNS_RFF_ID) else if (cmdcode == SLI_CTNS_RFF_ID)
@@ -2077,6 +2101,18 @@ lpfc_ns_cmd(struct lpfc_vport *vport, int cmdcode,
CtReq->un.rsnn.symbname, size); CtReq->un.rsnn.symbname, size);
cmpl = lpfc_cmpl_ct_cmd_rsnn_nn; cmpl = lpfc_cmpl_ct_cmd_rsnn_nn;
break; break;
case SLI_CTNS_RSPNI_PNI:
vport->ct_flags &= ~FC_CT_RSPNI_PNI;
CtReq->CommandResponse.bits.CmdRsp =
cpu_to_be16(SLI_CTNS_RSPNI_PNI);
CtReq->un.rspni.pni = cpu_to_be64(phba->pni);
scnprintf(CtReq->un.rspni.symbname,
sizeof(CtReq->un.rspni.symbname), "OS Host Name::%s",
phba->os_host_name);
CtReq->un.rspni.len = strnlen(CtReq->un.rspni.symbname,
sizeof(CtReq->un.rspni.symbname));
cmpl = lpfc_cmpl_ct_cmd_rspni_pni;
break;
case SLI_CTNS_DA_ID: case SLI_CTNS_DA_ID:
/* Implement DA_ID Nameserver request */ /* Implement DA_ID Nameserver request */
CtReq->CommandResponse.bits.CmdRsp = CtReq->CommandResponse.bits.CmdRsp =

View File

@@ -1,7 +1,7 @@
/******************************************************************* /*******************************************************************
* This file is part of the Emulex Linux Device Driver for * * This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. * * Fibre Channel Host Bus Adapters. *
* Copyright (C) 2017-2024 Broadcom. All Rights Reserved. The term * * Copyright (C) 2017-2025 Broadcom. All Rights Reserved. The term *
* Broadcom refers to Broadcom Inc. and/or its subsidiaries. * * Broadcom refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2013 Emulex. All rights reserved. * * Copyright (C) 2004-2013 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. * * EMULEX and SLI are trademarks of Emulex. *
@@ -208,6 +208,7 @@ enum lpfc_nlp_flag {
NPR list */ NPR list */
NLP_RM_DFLT_RPI = 26, /* need to remove leftover dflt RPI */ NLP_RM_DFLT_RPI = 26, /* need to remove leftover dflt RPI */
NLP_NODEV_REMOVE = 27, /* Defer removal till discovery ends */ NLP_NODEV_REMOVE = 27, /* Defer removal till discovery ends */
NLP_FLOGI_DFR_ACC = 28, /* FLOGI LS_ACC was Deferred */
NLP_SC_REQ = 29, /* Target requires authentication */ NLP_SC_REQ = 29, /* Target requires authentication */
NLP_FIRSTBURST = 30, /* Target supports FirstBurst */ NLP_FIRSTBURST = 30, /* Target supports FirstBurst */
NLP_RPI_REGISTERED = 31 /* nlp_rpi is valid */ NLP_RPI_REGISTERED = 31 /* nlp_rpi is valid */

View File

@@ -650,8 +650,6 @@ lpfc_cmpl_els_flogi_fabric(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
ndlp->nlp_class_sup |= FC_COS_CLASS2; ndlp->nlp_class_sup |= FC_COS_CLASS2;
if (sp->cls3.classValid) if (sp->cls3.classValid)
ndlp->nlp_class_sup |= FC_COS_CLASS3; ndlp->nlp_class_sup |= FC_COS_CLASS3;
if (sp->cls4.classValid)
ndlp->nlp_class_sup |= FC_COS_CLASS4;
ndlp->nlp_maxframe = ((sp->cmn.bbRcvSizeMsb & 0x0F) << 8) | ndlp->nlp_maxframe = ((sp->cmn.bbRcvSizeMsb & 0x0F) << 8) |
sp->cmn.bbRcvSizeLsb; sp->cmn.bbRcvSizeLsb;
@@ -934,10 +932,15 @@ lpfc_cmpl_els_flogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
/* Check to see if link went down during discovery */ /* Check to see if link went down during discovery */
if (lpfc_els_chk_latt(vport)) { if (lpfc_els_chk_latt(vport)) {
/* One additional decrement on node reference count to /* One additional decrement on node reference count to
* trigger the release of the node * trigger the release of the node. Make sure the ndlp
* is marked NLP_DROPPED.
*/ */
if (!(ndlp->fc4_xpt_flags & SCSI_XPT_REGD)) if (!test_bit(NLP_IN_DEV_LOSS, &ndlp->nlp_flag) &&
!test_bit(NLP_DROPPED, &ndlp->nlp_flag) &&
!(ndlp->fc4_xpt_flags & SCSI_XPT_REGD)) {
set_bit(NLP_DROPPED, &ndlp->nlp_flag);
lpfc_nlp_put(ndlp); lpfc_nlp_put(ndlp);
}
goto out; goto out;
} }
@@ -995,9 +998,10 @@ stop_rr_fcf_flogi:
IOERR_LOOP_OPEN_FAILURE))) IOERR_LOOP_OPEN_FAILURE)))
lpfc_vlog_msg(vport, KERN_WARNING, LOG_ELS, lpfc_vlog_msg(vport, KERN_WARNING, LOG_ELS,
"2858 FLOGI Status:x%x/x%x TMO" "2858 FLOGI Status:x%x/x%x TMO"
":x%x Data x%lx x%x\n", ":x%x Data x%lx x%x x%lx x%x\n",
ulp_status, ulp_word4, tmo, ulp_status, ulp_word4, tmo,
phba->hba_flag, phba->fcf.fcf_flag); phba->hba_flag, phba->fcf.fcf_flag,
ndlp->nlp_flag, ndlp->fc4_xpt_flags);
/* Check for retry */ /* Check for retry */
if (lpfc_els_retry(phba, cmdiocb, rspiocb)) { if (lpfc_els_retry(phba, cmdiocb, rspiocb)) {
@@ -1015,14 +1019,17 @@ stop_rr_fcf_flogi:
* reference to trigger node release. * reference to trigger node release.
*/ */
if (!test_bit(NLP_IN_DEV_LOSS, &ndlp->nlp_flag) && if (!test_bit(NLP_IN_DEV_LOSS, &ndlp->nlp_flag) &&
!(ndlp->fc4_xpt_flags & SCSI_XPT_REGD)) !test_bit(NLP_DROPPED, &ndlp->nlp_flag) &&
!(ndlp->fc4_xpt_flags & SCSI_XPT_REGD)) {
set_bit(NLP_DROPPED, &ndlp->nlp_flag);
lpfc_nlp_put(ndlp); lpfc_nlp_put(ndlp);
}
lpfc_printf_vlog(vport, KERN_WARNING, LOG_ELS, lpfc_printf_vlog(vport, KERN_WARNING, LOG_ELS,
"0150 FLOGI Status:x%x/x%x " "0150 FLOGI Status:x%x/x%x "
"xri x%x TMO:x%x refcnt %d\n", "xri x%x iotag x%x TMO:x%x refcnt %d\n",
ulp_status, ulp_word4, cmdiocb->sli4_xritag, ulp_status, ulp_word4, cmdiocb->sli4_xritag,
tmo, kref_read(&ndlp->kref)); cmdiocb->iotag, tmo, kref_read(&ndlp->kref));
/* If this is not a loop open failure, bail out */ /* If this is not a loop open failure, bail out */
if (!(ulp_status == IOSTAT_LOCAL_REJECT && if (!(ulp_status == IOSTAT_LOCAL_REJECT &&
@@ -1279,6 +1286,19 @@ lpfc_issue_els_flogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
uint32_t tmo, did; uint32_t tmo, did;
int rc; int rc;
/* It's possible for lpfc to reissue a FLOGI on an ndlp that is marked
* NLP_DROPPED. This happens when the FLOGI completed with the XB bit
* set causing lpfc to reference the ndlp until the XRI_ABORTED CQE is
* issued. The time window for the XRI_ABORTED CQE can be as much as
* 2*2*RA_TOV allowing for ndlp reuse of this type when the link is
* cycling quickly. When true, restore the initial reference and remove
* the NLP_DROPPED flag as lpfc is retrying.
*/
if (test_and_clear_bit(NLP_DROPPED, &ndlp->nlp_flag)) {
if (!lpfc_nlp_get(ndlp))
return 1;
}
cmdsize = (sizeof(uint32_t) + sizeof(struct serv_parm)); cmdsize = (sizeof(uint32_t) + sizeof(struct serv_parm));
elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp,
ndlp->nlp_DID, ELS_CMD_FLOGI); ndlp->nlp_DID, ELS_CMD_FLOGI);
@@ -1334,6 +1354,14 @@ lpfc_issue_els_flogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
/* Can't do SLI4 class2 without support sequence coalescing */ /* Can't do SLI4 class2 without support sequence coalescing */
sp->cls2.classValid = 0; sp->cls2.classValid = 0;
sp->cls2.seqDelivery = 0; sp->cls2.seqDelivery = 0;
/* Fill out Auxiliary Parameter Data */
if (phba->pni) {
sp->aux.flags =
AUX_PARM_DATA_VALID | AUX_PARM_PNI_VALID;
sp->aux.pni = cpu_to_be64(phba->pni);
sp->aux.npiv_cnt = cpu_to_be16(phba->max_vpi - 1);
}
} else { } else {
/* Historical, setting sequential-delivery bit for SLI3 */ /* Historical, setting sequential-delivery bit for SLI3 */
sp->cls2.seqDelivery = (sp->cls2.classValid) ? 1 : 0; sp->cls2.seqDelivery = (sp->cls2.classValid) ? 1 : 0;
@@ -1413,11 +1441,12 @@ lpfc_issue_els_flogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
phba->defer_flogi_acc.ox_id; phba->defer_flogi_acc.ox_id;
} }
lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, /* The LS_ACC completion needs to drop the initial reference.
"3354 Xmit deferred FLOGI ACC: rx_id: x%x," * This is a special case for Pt2Pt because both FLOGIs need
" ox_id: x%x, hba_flag x%lx\n", * to complete and lpfc defers the LS_ACC when the remote
phba->defer_flogi_acc.rx_id, * FLOGI arrives before the driver's FLOGI.
phba->defer_flogi_acc.ox_id, phba->hba_flag); */
set_bit(NLP_FLOGI_DFR_ACC, &ndlp->nlp_flag);
/* Send deferred FLOGI ACC */ /* Send deferred FLOGI ACC */
lpfc_els_rsp_acc(vport, ELS_CMD_FLOGI, &defer_flogi_acc, lpfc_els_rsp_acc(vport, ELS_CMD_FLOGI, &defer_flogi_acc,
@@ -1433,6 +1462,14 @@ lpfc_issue_els_flogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
phba->defer_flogi_acc.ndlp = NULL; phba->defer_flogi_acc.ndlp = NULL;
} }
lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
"3354 Xmit deferred FLOGI ACC: rx_id: x%x,"
" ox_id: x%x, ndlp x%px hba_flag x%lx\n",
phba->defer_flogi_acc.rx_id,
phba->defer_flogi_acc.ox_id,
phba->defer_flogi_acc.ndlp,
phba->hba_flag);
vport->fc_myDID = did; vport->fc_myDID = did;
} }
@@ -2248,7 +2285,8 @@ lpfc_issue_els_plogi(struct lpfc_vport *vport, uint32_t did, uint8_t retry)
sp->cmn.valid_vendor_ver_level = 0; sp->cmn.valid_vendor_ver_level = 0;
memset(sp->un.vendorVersion, 0, sizeof(sp->un.vendorVersion)); memset(sp->un.vendorVersion, 0, sizeof(sp->un.vendorVersion));
sp->cmn.bbRcvSizeMsb &= 0xF; if (!test_bit(FC_PT2PT, &vport->fc_flag))
sp->cmn.bbRcvSizeMsb &= 0xF;
/* Check if the destination port supports VMID */ /* Check if the destination port supports VMID */
ndlp->vmid_support = 0; ndlp->vmid_support = 0;
@@ -2367,7 +2405,7 @@ lpfc_cmpl_els_prli(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
mode = KERN_INFO; mode = KERN_INFO;
/* Warn PRLI status */ /* Warn PRLI status */
lpfc_printf_vlog(vport, mode, LOG_ELS, lpfc_vlog_msg(vport, mode, LOG_ELS,
"2754 PRLI DID:%06X Status:x%x/x%x, " "2754 PRLI DID:%06X Status:x%x/x%x, "
"data: x%x x%x x%lx\n", "data: x%x x%x x%lx\n",
ndlp->nlp_DID, ulp_status, ndlp->nlp_DID, ulp_status,
@@ -3024,6 +3062,7 @@ lpfc_cmpl_els_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
ndlp->nlp_DID, ulp_status, ndlp->nlp_DID, ulp_status,
ulp_word4); ulp_word4);
/* Call NLP_EVT_DEVICE_RM if link is down or LOGO is aborted */
if (lpfc_error_lost_link(vport, ulp_status, ulp_word4)) if (lpfc_error_lost_link(vport, ulp_status, ulp_word4))
skip_recovery = 1; skip_recovery = 1;
} }
@@ -3262,7 +3301,7 @@ lpfc_reg_fab_ctrl_node(struct lpfc_vport *vport, struct lpfc_nodelist *fc_ndlp)
return -ENOMEM; return -ENOMEM;
} }
rc = lpfc_reg_rpi(phba, vport->vpi, fc_ndlp->nlp_DID, rc = lpfc_reg_rpi(phba, vport->vpi, fc_ndlp->nlp_DID,
(u8 *)&vport->fc_sparam, mbox, fc_ndlp->nlp_rpi); (u8 *)&ns_ndlp->fc_sparam, mbox, fc_ndlp->nlp_rpi);
if (rc) { if (rc) {
rc = -EACCES; rc = -EACCES;
goto out; goto out;
@@ -3306,7 +3345,8 @@ lpfc_reg_fab_ctrl_node(struct lpfc_vport *vport, struct lpfc_nodelist *fc_ndlp)
* *
* This routine is a generic completion callback function for Discovery ELS cmd. * This routine is a generic completion callback function for Discovery ELS cmd.
* Currently used by the ELS command issuing routines for the ELS State Change * Currently used by the ELS command issuing routines for the ELS State Change
* Request (SCR), lpfc_issue_els_scr() and the ELS RDF, lpfc_issue_els_rdf(). * Request (SCR), lpfc_issue_els_scr(), Exchange Diagnostic Capabilities (EDC),
* lpfc_issue_els_edc() and the ELS RDF, lpfc_issue_els_rdf().
* These commands will be retried once only for ELS timeout errors. * These commands will be retried once only for ELS timeout errors.
**/ **/
static void static void
@@ -3379,11 +3419,21 @@ lpfc_cmpl_els_disc_cmd(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
lpfc_cmpl_els_edc(phba, cmdiocb, rspiocb); lpfc_cmpl_els_edc(phba, cmdiocb, rspiocb);
return; return;
} }
if (ulp_status) { if (ulp_status) {
/* ELS discovery cmd completes with error */ /* ELS discovery cmd completes with error */
lpfc_printf_vlog(vport, KERN_WARNING, LOG_ELS | LOG_CGN_MGMT, lpfc_printf_vlog(vport, KERN_WARNING, LOG_ELS | LOG_CGN_MGMT,
"4203 ELS cmd x%x error: x%x x%X\n", cmd, "4203 ELS cmd x%x error: x%x x%X\n", cmd,
ulp_status, ulp_word4); ulp_status, ulp_word4);
/* In the case where the ELS cmd completes with an error and
* the node does not have RPI registered, the node is
* outstanding and should put its initial reference.
*/
if ((cmd == ELS_CMD_SCR || cmd == ELS_CMD_RDF) &&
!(ndlp->fc4_xpt_flags & SCSI_XPT_REGD) &&
!test_and_set_bit(NLP_DROPPED, &ndlp->nlp_flag))
lpfc_nlp_put(ndlp);
goto out; goto out;
} }
@@ -3452,6 +3502,7 @@ lpfc_issue_els_scr(struct lpfc_vport *vport, uint8_t retry)
uint8_t *pcmd; uint8_t *pcmd;
uint16_t cmdsize; uint16_t cmdsize;
struct lpfc_nodelist *ndlp; struct lpfc_nodelist *ndlp;
bool node_created = false;
cmdsize = (sizeof(uint32_t) + sizeof(SCR)); cmdsize = (sizeof(uint32_t) + sizeof(SCR));
@@ -3461,21 +3512,21 @@ lpfc_issue_els_scr(struct lpfc_vport *vport, uint8_t retry)
if (!ndlp) if (!ndlp)
return 1; return 1;
lpfc_enqueue_node(vport, ndlp); lpfc_enqueue_node(vport, ndlp);
node_created = true;
} }
elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp,
ndlp->nlp_DID, ELS_CMD_SCR); ndlp->nlp_DID, ELS_CMD_SCR);
if (!elsiocb) if (!elsiocb)
return 1; goto out_node_created;
if (phba->sli_rev == LPFC_SLI_REV4) { if (phba->sli_rev == LPFC_SLI_REV4) {
rc = lpfc_reg_fab_ctrl_node(vport, ndlp); rc = lpfc_reg_fab_ctrl_node(vport, ndlp);
if (rc) { if (rc) {
lpfc_els_free_iocb(phba, elsiocb);
lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE, lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE,
"0937 %s: Failed to reg fc node, rc %d\n", "0937 %s: Failed to reg fc node, rc %d\n",
__func__, rc); __func__, rc);
return 1; goto out_free_iocb;
} }
} }
pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt; pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt;
@@ -3494,23 +3545,27 @@ lpfc_issue_els_scr(struct lpfc_vport *vport, uint8_t retry)
phba->fc_stat.elsXmitSCR++; phba->fc_stat.elsXmitSCR++;
elsiocb->cmd_cmpl = lpfc_cmpl_els_disc_cmd; elsiocb->cmd_cmpl = lpfc_cmpl_els_disc_cmd;
elsiocb->ndlp = lpfc_nlp_get(ndlp); elsiocb->ndlp = lpfc_nlp_get(ndlp);
if (!elsiocb->ndlp) { if (!elsiocb->ndlp)
lpfc_els_free_iocb(phba, elsiocb); goto out_free_iocb;
return 1;
}
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
"Issue SCR: did:x%x refcnt %d", "Issue SCR: did:x%x refcnt %d",
ndlp->nlp_DID, kref_read(&ndlp->kref), 0); ndlp->nlp_DID, kref_read(&ndlp->kref), 0);
rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
if (rc == IOCB_ERROR) { if (rc == IOCB_ERROR)
lpfc_els_free_iocb(phba, elsiocb); goto out_iocb_error;
lpfc_nlp_put(ndlp);
return 1;
}
return 0; return 0;
out_iocb_error:
lpfc_nlp_put(ndlp);
out_free_iocb:
lpfc_els_free_iocb(phba, elsiocb);
out_node_created:
if (node_created)
lpfc_nlp_put(ndlp);
return 1;
} }
/** /**
@@ -3597,8 +3652,8 @@ lpfc_issue_els_rscn(struct lpfc_vport *vport, uint8_t retry)
} }
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
"Issue RSCN: did:x%x", "Issue RSCN: did:x%x refcnt %d",
ndlp->nlp_DID, 0, 0); ndlp->nlp_DID, kref_read(&ndlp->kref), 0);
rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
if (rc == IOCB_ERROR) { if (rc == IOCB_ERROR) {
@@ -3705,10 +3760,7 @@ lpfc_issue_els_farpr(struct lpfc_vport *vport, uint32_t nportid, uint8_t retry)
lpfc_nlp_put(ndlp); lpfc_nlp_put(ndlp);
return 1; return 1;
} }
/* This will cause the callback-function lpfc_cmpl_els_cmd to
* trigger the release of the node.
*/
/* Don't release reference count as RDF is likely outstanding */
return 0; return 0;
} }
@@ -3726,7 +3778,12 @@ lpfc_issue_els_farpr(struct lpfc_vport *vport, uint32_t nportid, uint8_t retry)
* *
* Return code * Return code
* 0 - Successfully issued rdf command * 0 - Successfully issued rdf command
* 1 - Failed to issue rdf command * < 0 - Failed to issue rdf command
* -EACCES - RDF not required for NPIV_PORT
* -ENODEV - No fabric controller device available
* -ENOMEM - No available memory
* -EIO - The mailbox failed to complete successfully.
*
**/ **/
int int
lpfc_issue_els_rdf(struct lpfc_vport *vport, uint8_t retry) lpfc_issue_els_rdf(struct lpfc_vport *vport, uint8_t retry)
@@ -3737,25 +3794,30 @@ lpfc_issue_els_rdf(struct lpfc_vport *vport, uint8_t retry)
struct lpfc_nodelist *ndlp; struct lpfc_nodelist *ndlp;
uint16_t cmdsize; uint16_t cmdsize;
int rc; int rc;
bool node_created = false;
int err;
cmdsize = sizeof(*prdf); cmdsize = sizeof(*prdf);
/* RDF ELS is not required on an NPIV VN_Port. */
if (vport->port_type == LPFC_NPIV_PORT)
return -EACCES;
ndlp = lpfc_findnode_did(vport, Fabric_Cntl_DID); ndlp = lpfc_findnode_did(vport, Fabric_Cntl_DID);
if (!ndlp) { if (!ndlp) {
ndlp = lpfc_nlp_init(vport, Fabric_Cntl_DID); ndlp = lpfc_nlp_init(vport, Fabric_Cntl_DID);
if (!ndlp) if (!ndlp)
return -ENODEV; return -ENODEV;
lpfc_enqueue_node(vport, ndlp); lpfc_enqueue_node(vport, ndlp);
node_created = true;
} }
/* RDF ELS is not required on an NPIV VN_Port. */
if (vport->port_type == LPFC_NPIV_PORT)
return -EACCES;
elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp,
ndlp->nlp_DID, ELS_CMD_RDF); ndlp->nlp_DID, ELS_CMD_RDF);
if (!elsiocb) if (!elsiocb) {
return -ENOMEM; err = -ENOMEM;
goto out_node_created;
}
/* Configure the payload for the supported FPIN events. */ /* Configure the payload for the supported FPIN events. */
prdf = (struct lpfc_els_rdf_req *)elsiocb->cmd_dmabuf->virt; prdf = (struct lpfc_els_rdf_req *)elsiocb->cmd_dmabuf->virt;
@@ -3781,8 +3843,8 @@ lpfc_issue_els_rdf(struct lpfc_vport *vport, uint8_t retry)
elsiocb->cmd_cmpl = lpfc_cmpl_els_disc_cmd; elsiocb->cmd_cmpl = lpfc_cmpl_els_disc_cmd;
elsiocb->ndlp = lpfc_nlp_get(ndlp); elsiocb->ndlp = lpfc_nlp_get(ndlp);
if (!elsiocb->ndlp) { if (!elsiocb->ndlp) {
lpfc_els_free_iocb(phba, elsiocb); err = -EIO;
return -EIO; goto out_free_iocb;
} }
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
@@ -3791,11 +3853,19 @@ lpfc_issue_els_rdf(struct lpfc_vport *vport, uint8_t retry)
rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
if (rc == IOCB_ERROR) { if (rc == IOCB_ERROR) {
lpfc_els_free_iocb(phba, elsiocb); err = -EIO;
lpfc_nlp_put(ndlp); goto out_iocb_error;
return -EIO;
} }
return 0; return 0;
out_iocb_error:
lpfc_nlp_put(ndlp);
out_free_iocb:
lpfc_els_free_iocb(phba, elsiocb);
out_node_created:
if (node_created)
lpfc_nlp_put(ndlp);
return err;
} }
/** /**
@@ -3816,19 +3886,23 @@ static int
lpfc_els_rcv_rdf(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, lpfc_els_rcv_rdf(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
struct lpfc_nodelist *ndlp) struct lpfc_nodelist *ndlp)
{ {
int rc;
rc = lpfc_els_rsp_acc(vport, ELS_CMD_RDF, cmdiocb, ndlp, NULL);
/* Send LS_ACC */ /* Send LS_ACC */
if (lpfc_els_rsp_acc(vport, ELS_CMD_RDF, cmdiocb, ndlp, NULL)) { if (rc) {
lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS | LOG_CGN_MGMT, lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS | LOG_CGN_MGMT,
"1623 Failed to RDF_ACC from x%x for x%x\n", "1623 Failed to RDF_ACC from x%x for x%x Data: %d\n",
ndlp->nlp_DID, vport->fc_myDID); ndlp->nlp_DID, vport->fc_myDID, rc);
return -EIO; return -EIO;
} }
rc = lpfc_issue_els_rdf(vport, 0);
/* Issue new RDF for reregistering */ /* Issue new RDF for reregistering */
if (lpfc_issue_els_rdf(vport, 0)) { if (rc) {
lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS | LOG_CGN_MGMT, lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS | LOG_CGN_MGMT,
"2623 Failed to re register RDF for x%x\n", "2623 Failed to re register RDF for x%x Data: %d\n",
vport->fc_myDID); vport->fc_myDID, rc);
return -EIO; return -EIO;
} }
@@ -4299,7 +4373,7 @@ lpfc_issue_els_edc(struct lpfc_vport *vport, uint8_t retry)
rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
if (rc == IOCB_ERROR) { if (rc == IOCB_ERROR) {
/* The additional lpfc_nlp_put will cause the following /* The additional lpfc_nlp_put will cause the following
* lpfc_els_free_iocb routine to trigger the rlease of * lpfc_els_free_iocb routine to trigger the release of
* the node. * the node.
*/ */
lpfc_els_free_iocb(phba, elsiocb); lpfc_els_free_iocb(phba, elsiocb);
@@ -5127,7 +5201,7 @@ lpfc_els_free_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *elsiocb)
{ {
struct lpfc_dmabuf *buf_ptr, *buf_ptr1; struct lpfc_dmabuf *buf_ptr, *buf_ptr1;
/* The I/O iocb is complete. Clear the node and first dmbuf */ /* The I/O iocb is complete. Clear the node and first dmabuf */
elsiocb->ndlp = NULL; elsiocb->ndlp = NULL;
/* cmd_dmabuf = cmd, cmd_dmabuf->next = rsp, bpl_dmabuf = bpl */ /* cmd_dmabuf = cmd, cmd_dmabuf->next = rsp, bpl_dmabuf = bpl */
@@ -5160,14 +5234,12 @@ lpfc_els_free_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *elsiocb)
} else { } else {
buf_ptr1 = elsiocb->cmd_dmabuf; buf_ptr1 = elsiocb->cmd_dmabuf;
lpfc_els_free_data(phba, buf_ptr1); lpfc_els_free_data(phba, buf_ptr1);
elsiocb->cmd_dmabuf = NULL;
} }
} }
if (elsiocb->bpl_dmabuf) { if (elsiocb->bpl_dmabuf) {
buf_ptr = elsiocb->bpl_dmabuf; buf_ptr = elsiocb->bpl_dmabuf;
lpfc_els_free_bpl(phba, buf_ptr); lpfc_els_free_bpl(phba, buf_ptr);
elsiocb->bpl_dmabuf = NULL;
} }
lpfc_sli_release_iocbq(phba, elsiocb); lpfc_sli_release_iocbq(phba, elsiocb);
return 0; return 0;
@@ -5305,11 +5377,12 @@ lpfc_cmpl_els_rsp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
IOCB_t *irsp; IOCB_t *irsp;
LPFC_MBOXQ_t *mbox = NULL; LPFC_MBOXQ_t *mbox = NULL;
u32 ulp_status, ulp_word4, tmo, did, iotag; u32 ulp_status, ulp_word4, tmo, did, iotag;
u32 cmd;
if (!vport) { if (!vport) {
lpfc_printf_log(phba, KERN_WARNING, LOG_ELS, lpfc_printf_log(phba, KERN_WARNING, LOG_ELS,
"3177 null vport in ELS rsp\n"); "3177 null vport in ELS rsp\n");
goto out; goto release;
} }
if (cmdiocb->context_un.mbox) if (cmdiocb->context_un.mbox)
mbox = cmdiocb->context_un.mbox; mbox = cmdiocb->context_un.mbox;
@@ -5419,7 +5492,7 @@ out:
* these conditions because it doesn't need the login. * these conditions because it doesn't need the login.
*/ */
if (phba->sli_rev == LPFC_SLI_REV4 && if (phba->sli_rev == LPFC_SLI_REV4 &&
vport && vport->port_type == LPFC_NPIV_PORT && vport->port_type == LPFC_NPIV_PORT &&
!(ndlp->fc4_xpt_flags & SCSI_XPT_REGD)) { !(ndlp->fc4_xpt_flags & SCSI_XPT_REGD)) {
if (ndlp->nlp_state != NLP_STE_PLOGI_ISSUE && if (ndlp->nlp_state != NLP_STE_PLOGI_ISSUE &&
ndlp->nlp_state != NLP_STE_REG_LOGIN_ISSUE && ndlp->nlp_state != NLP_STE_REG_LOGIN_ISSUE &&
@@ -5435,6 +5508,27 @@ out:
} }
} }
/* The driver's unsolicited deferred FLOGI ACC in Pt2Pt needs to
* release the initial reference because the put after the free_iocb
* call removes only the reference from the defer logic. This FLOGI
* is never registered with the SCSI transport.
*/
if (test_bit(FC_PT2PT, &vport->fc_flag) &&
test_and_clear_bit(NLP_FLOGI_DFR_ACC, &ndlp->nlp_flag)) {
lpfc_printf_vlog(vport, KERN_INFO,
LOG_ELS | LOG_NODE | LOG_DISCOVERY,
"3357 Pt2Pt Defer FLOGI ACC ndlp x%px, "
"nflags x%lx, fc_flag x%lx\n",
ndlp, ndlp->nlp_flag,
vport->fc_flag);
cmd = *((u32 *)cmdiocb->cmd_dmabuf->virt);
if (cmd == ELS_CMD_ACC) {
if (!test_and_set_bit(NLP_DROPPED, &ndlp->nlp_flag))
lpfc_nlp_put(ndlp);
}
}
release:
/* Release the originating I/O reference. */ /* Release the originating I/O reference. */
lpfc_els_free_iocb(phba, cmdiocb); lpfc_els_free_iocb(phba, cmdiocb);
lpfc_nlp_put(ndlp); lpfc_nlp_put(ndlp);
@@ -5569,7 +5663,6 @@ lpfc_els_rsp_acc(struct lpfc_vport *vport, uint32_t flag,
sp->cls1.classValid = 0; sp->cls1.classValid = 0;
sp->cls2.classValid = 0; sp->cls2.classValid = 0;
sp->cls3.classValid = 0; sp->cls3.classValid = 0;
sp->cls4.classValid = 0;
/* Copy our worldwide names */ /* Copy our worldwide names */
memcpy(&sp->portName, &vport->fc_sparam.portName, memcpy(&sp->portName, &vport->fc_sparam.portName,
@@ -5583,7 +5676,8 @@ lpfc_els_rsp_acc(struct lpfc_vport *vport, uint32_t flag,
sp->cmn.valid_vendor_ver_level = 0; sp->cmn.valid_vendor_ver_level = 0;
memset(sp->un.vendorVersion, 0, memset(sp->un.vendorVersion, 0,
sizeof(sp->un.vendorVersion)); sizeof(sp->un.vendorVersion));
sp->cmn.bbRcvSizeMsb &= 0xF; if (!test_bit(FC_PT2PT, &vport->fc_flag))
sp->cmn.bbRcvSizeMsb &= 0xF;
/* If our firmware supports this feature, convey that /* If our firmware supports this feature, convey that
* info to the target using the vendor specific field. * info to the target using the vendor specific field.
@@ -8402,13 +8496,6 @@ lpfc_els_rcv_flogi(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
&wqe->xmit_els_rsp.wqe_com); &wqe->xmit_els_rsp.wqe_com);
vport->fc_myDID = did; vport->fc_myDID = did;
lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
"3344 Deferring FLOGI ACC: rx_id: x%x,"
" ox_id: x%x, hba_flag x%lx\n",
phba->defer_flogi_acc.rx_id,
phba->defer_flogi_acc.ox_id, phba->hba_flag);
phba->defer_flogi_acc.flag = true; phba->defer_flogi_acc.flag = true;
/* This nlp_get is paired with nlp_puts that reset the /* This nlp_get is paired with nlp_puts that reset the
@@ -8417,6 +8504,14 @@ lpfc_els_rcv_flogi(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
* processed or cancelled. * processed or cancelled.
*/ */
phba->defer_flogi_acc.ndlp = lpfc_nlp_get(ndlp); phba->defer_flogi_acc.ndlp = lpfc_nlp_get(ndlp);
lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
"3344 Deferring FLOGI ACC: rx_id: x%x,"
" ox_id: x%x, ndlp x%px, hba_flag x%lx\n",
phba->defer_flogi_acc.rx_id,
phba->defer_flogi_acc.ox_id,
phba->defer_flogi_acc.ndlp,
phba->hba_flag);
return 0; return 0;
} }
@@ -8734,7 +8829,7 @@ reject_out:
* @cmdiocb: pointer to lpfc command iocb data structure. * @cmdiocb: pointer to lpfc command iocb data structure.
* @ndlp: pointer to a node-list data structure. * @ndlp: pointer to a node-list data structure.
* *
* This routine processes Read Timout Value (RTV) IOCB received as an * This routine processes Read Timeout Value (RTV) IOCB received as an
* ELS unsolicited event. It first checks the remote port state. If the * ELS unsolicited event. It first checks the remote port state. If the
* remote port is not in NLP_STE_UNMAPPED_NODE state or NLP_STE_MAPPED_NODE * remote port is not in NLP_STE_UNMAPPED_NODE state or NLP_STE_MAPPED_NODE
* state, it invokes the lpfc_els_rsl_reject() routine to send the reject * state, it invokes the lpfc_els_rsl_reject() routine to send the reject
@@ -10357,11 +10452,8 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
* Do not process any unsolicited ELS commands * Do not process any unsolicited ELS commands
* if the ndlp is in DEV_LOSS * if the ndlp is in DEV_LOSS
*/ */
if (test_bit(NLP_IN_DEV_LOSS, &ndlp->nlp_flag)) { if (test_bit(NLP_IN_DEV_LOSS, &ndlp->nlp_flag))
if (newnode)
lpfc_nlp_put(ndlp);
goto dropit; goto dropit;
}
elsiocb->ndlp = lpfc_nlp_get(ndlp); elsiocb->ndlp = lpfc_nlp_get(ndlp);
if (!elsiocb->ndlp) if (!elsiocb->ndlp)
@@ -10843,7 +10935,7 @@ lpfc_els_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
lpfc_els_unsol_buffer(phba, pring, vport, elsiocb); lpfc_els_unsol_buffer(phba, pring, vport, elsiocb);
/* /*
* The different unsolicited event handlers would tell us * The different unsolicited event handlers would tell us
* if they are done with "mp" by setting cmd_dmabuf to NULL. * if they are done with "mp" by setting cmd_dmabuf/bpl_dmabuf to NULL.
*/ */
if (elsiocb->cmd_dmabuf) { if (elsiocb->cmd_dmabuf) {
lpfc_in_buf_free(phba, elsiocb->cmd_dmabuf); lpfc_in_buf_free(phba, elsiocb->cmd_dmabuf);
@@ -11423,6 +11515,13 @@ lpfc_issue_els_fdisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
sp->cls2.seqDelivery = 1; sp->cls2.seqDelivery = 1;
sp->cls3.seqDelivery = 1; sp->cls3.seqDelivery = 1;
/* Fill out Auxiliary Parameter Data */
if (phba->pni) {
sp->aux.flags =
AUX_PARM_DATA_VALID | AUX_PARM_PNI_VALID;
sp->aux.pni = cpu_to_be64(phba->pni);
}
pcmd += sizeof(uint32_t); /* CSP Word 2 */ pcmd += sizeof(uint32_t); /* CSP Word 2 */
pcmd += sizeof(uint32_t); /* CSP Word 3 */ pcmd += sizeof(uint32_t); /* CSP Word 3 */
pcmd += sizeof(uint32_t); /* CSP Word 4 */ pcmd += sizeof(uint32_t); /* CSP Word 4 */

View File

@@ -424,6 +424,7 @@ lpfc_check_nlp_post_devloss(struct lpfc_vport *vport,
struct lpfc_nodelist *ndlp) struct lpfc_nodelist *ndlp)
{ {
if (test_and_clear_bit(NLP_IN_RECOV_POST_DEV_LOSS, &ndlp->save_flags)) { if (test_and_clear_bit(NLP_IN_RECOV_POST_DEV_LOSS, &ndlp->save_flags)) {
clear_bit(NLP_DROPPED, &ndlp->nlp_flag);
lpfc_nlp_get(ndlp); lpfc_nlp_get(ndlp);
lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY | LOG_NODE, lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY | LOG_NODE,
"8438 Devloss timeout reversed on DID x%x " "8438 Devloss timeout reversed on DID x%x "
@@ -566,7 +567,8 @@ lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp)
return fcf_inuse; return fcf_inuse;
} }
lpfc_nlp_put(ndlp); if (!test_and_set_bit(NLP_DROPPED, &ndlp->nlp_flag))
lpfc_nlp_put(ndlp);
return fcf_inuse; return fcf_inuse;
} }
@@ -4371,6 +4373,8 @@ out:
lpfc_ns_cmd(vport, SLI_CTNS_RNN_ID, 0, 0); lpfc_ns_cmd(vport, SLI_CTNS_RNN_ID, 0, 0);
lpfc_ns_cmd(vport, SLI_CTNS_RSNN_NN, 0, 0); lpfc_ns_cmd(vport, SLI_CTNS_RSNN_NN, 0, 0);
lpfc_ns_cmd(vport, SLI_CTNS_RSPN_ID, 0, 0); lpfc_ns_cmd(vport, SLI_CTNS_RSPN_ID, 0, 0);
if (phba->pni)
lpfc_ns_cmd(vport, SLI_CTNS_RSPNI_PNI, 0, 0);
lpfc_ns_cmd(vport, SLI_CTNS_RFT_ID, 0, 0); lpfc_ns_cmd(vport, SLI_CTNS_RFT_ID, 0, 0);
if ((vport->cfg_enable_fc4_type == LPFC_ENABLE_BOTH) || if ((vport->cfg_enable_fc4_type == LPFC_ENABLE_BOTH) ||

View File

@@ -168,6 +168,11 @@ struct lpfc_sli_ct_request {
uint8_t len; uint8_t len;
uint8_t symbname[255]; uint8_t symbname[255];
} rspn; } rspn;
struct rspni { /* For RSPNI_PNI requests */
__be64 pni;
u8 len;
u8 symbname[255];
} rspni;
struct gff { struct gff {
uint32_t PortId; uint32_t PortId;
} gff; } gff;
@@ -213,6 +218,8 @@ struct lpfc_sli_ct_request {
sizeof(struct da_id)) sizeof(struct da_id))
#define RSPN_REQUEST_SZ (offsetof(struct lpfc_sli_ct_request, un) + \ #define RSPN_REQUEST_SZ (offsetof(struct lpfc_sli_ct_request, un) + \
sizeof(struct rspn)) sizeof(struct rspn))
#define RSPNI_REQUEST_SZ (offsetof(struct lpfc_sli_ct_request, un) + \
sizeof(struct rspni))
/* /*
* FsType Definitions * FsType Definitions
@@ -309,6 +316,7 @@ struct lpfc_sli_ct_request {
#define SLI_CTNS_RIP_NN 0x0235 #define SLI_CTNS_RIP_NN 0x0235
#define SLI_CTNS_RIPA_NN 0x0236 #define SLI_CTNS_RIPA_NN 0x0236
#define SLI_CTNS_RSNN_NN 0x0239 #define SLI_CTNS_RSNN_NN 0x0239
#define SLI_CTNS_RSPNI_PNI 0x0240
#define SLI_CTNS_DA_ID 0x0300 #define SLI_CTNS_DA_ID 0x0300
/* /*
@@ -512,6 +520,21 @@ struct class_parms {
uint8_t word3Reserved2; /* Fc Word 3, bit 0: 7 */ uint8_t word3Reserved2; /* Fc Word 3, bit 0: 7 */
}; };
enum aux_parm_flags {
AUX_PARM_PNI_VALID = 0x20, /* FC Word 0, bit 29 */
AUX_PARM_DATA_VALID = 0x40, /* FC Word 0, bit 30 */
};
struct aux_parm {
u8 flags; /* FC Word 0, bit 31:24 */
u8 ext_feat[3]; /* FC Word 0, bit 23:0 */
__be64 pni; /* FC Word 1 and 2, platform name identifier */
__be16 rsvd; /* FC Word 3, bit 31:16 */
__be16 npiv_cnt; /* FC Word 3, bit 15:0 */
} __packed;
struct serv_parm { /* Structure is in Big Endian format */ struct serv_parm { /* Structure is in Big Endian format */
struct csp cmn; struct csp cmn;
struct lpfc_name portName; struct lpfc_name portName;
@@ -519,7 +542,7 @@ struct serv_parm { /* Structure is in Big Endian format */
struct class_parms cls1; struct class_parms cls1;
struct class_parms cls2; struct class_parms cls2;
struct class_parms cls3; struct class_parms cls3;
struct class_parms cls4; struct aux_parm aux;
union { union {
uint8_t vendorVersion[16]; uint8_t vendorVersion[16];
struct { struct {

View File

@@ -3057,12 +3057,6 @@ lpfc_cleanup(struct lpfc_vport *vport)
lpfc_vmid_vport_cleanup(vport); lpfc_vmid_vport_cleanup(vport);
list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) { list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
if (ndlp->nlp_DID == Fabric_Cntl_DID &&
ndlp->nlp_state == NLP_STE_UNUSED_NODE) {
lpfc_nlp_put(ndlp);
continue;
}
/* Fabric Ports not in UNMAPPED state are cleaned up in the /* Fabric Ports not in UNMAPPED state are cleaned up in the
* DEVICE_RM event. * DEVICE_RM event.
*/ */
@@ -7950,7 +7944,7 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
/* Allocate all driver workqueues here */ /* Allocate all driver workqueues here */
/* The lpfc_wq workqueue for deferred irq use */ /* The lpfc_wq workqueue for deferred irq use */
phba->wq = alloc_workqueue("lpfc_wq", WQ_MEM_RECLAIM, 0); phba->wq = alloc_workqueue("lpfc_wq", WQ_MEM_RECLAIM | WQ_PERCPU, 0);
if (!phba->wq) if (!phba->wq)
return -ENOMEM; return -ENOMEM;
@@ -9082,9 +9076,9 @@ lpfc_setup_fdmi_mask(struct lpfc_vport *vport)
vport->fdmi_port_mask = LPFC_FDMI2_PORT_ATTR; vport->fdmi_port_mask = LPFC_FDMI2_PORT_ATTR;
} }
lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY, lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
"6077 Setup FDMI mask: hba x%x port x%x\n", "6077 Setup FDMI mask: hba x%x port x%x\n",
vport->fdmi_hba_mask, vport->fdmi_port_mask); vport->fdmi_hba_mask, vport->fdmi_port_mask);
} }
/** /**

View File

@@ -432,8 +432,6 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
ndlp->nlp_class_sup |= FC_COS_CLASS2; ndlp->nlp_class_sup |= FC_COS_CLASS2;
if (sp->cls3.classValid) if (sp->cls3.classValid)
ndlp->nlp_class_sup |= FC_COS_CLASS3; ndlp->nlp_class_sup |= FC_COS_CLASS3;
if (sp->cls4.classValid)
ndlp->nlp_class_sup |= FC_COS_CLASS4;
ndlp->nlp_maxframe = ndlp->nlp_maxframe =
((sp->cmn.bbRcvSizeMsb & 0x0F) << 8) | sp->cmn.bbRcvSizeLsb; ((sp->cmn.bbRcvSizeMsb & 0x0F) << 8) | sp->cmn.bbRcvSizeLsb;
/* if already logged in, do implicit logout */ /* if already logged in, do implicit logout */
@@ -452,18 +450,7 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
*/ */
if (!(ndlp->nlp_type & NLP_FABRIC) && if (!(ndlp->nlp_type & NLP_FABRIC) &&
!(phba->nvmet_support)) { !(phba->nvmet_support)) {
/* Clear ndlp info, since follow up PRLI may have break;
* updated ndlp information
*/
ndlp->nlp_type &= ~(NLP_FCP_TARGET | NLP_FCP_INITIATOR);
ndlp->nlp_type &= ~(NLP_NVME_TARGET | NLP_NVME_INITIATOR);
ndlp->nlp_fcp_info &= ~NLP_FCP_2_DEVICE;
ndlp->nlp_nvme_info &= ~NLP_NVME_NSLER;
clear_bit(NLP_FIRSTBURST, &ndlp->nlp_flag);
lpfc_els_rsp_acc(vport, ELS_CMD_PLOGI, cmdiocb,
ndlp, NULL);
return 1;
} }
if (nlp_portwwn != 0 && if (nlp_portwwn != 0 &&
nlp_portwwn != wwn_to_u64(sp->portName.u.wwn)) nlp_portwwn != wwn_to_u64(sp->portName.u.wwn))
@@ -485,7 +472,9 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
break; break;
} }
/* Clear ndlp info, since follow up processes may have
* updated ndlp information
*/
ndlp->nlp_type &= ~(NLP_FCP_TARGET | NLP_FCP_INITIATOR); ndlp->nlp_type &= ~(NLP_FCP_TARGET | NLP_FCP_INITIATOR);
ndlp->nlp_type &= ~(NLP_NVME_TARGET | NLP_NVME_INITIATOR); ndlp->nlp_type &= ~(NLP_NVME_TARGET | NLP_NVME_INITIATOR);
ndlp->nlp_fcp_info &= ~NLP_FCP_2_DEVICE; ndlp->nlp_fcp_info &= ~NLP_FCP_2_DEVICE;
@@ -1426,8 +1415,6 @@ lpfc_cmpl_plogi_plogi_issue(struct lpfc_vport *vport,
ndlp->nlp_class_sup |= FC_COS_CLASS2; ndlp->nlp_class_sup |= FC_COS_CLASS2;
if (sp->cls3.classValid) if (sp->cls3.classValid)
ndlp->nlp_class_sup |= FC_COS_CLASS3; ndlp->nlp_class_sup |= FC_COS_CLASS3;
if (sp->cls4.classValid)
ndlp->nlp_class_sup |= FC_COS_CLASS4;
ndlp->nlp_maxframe = ndlp->nlp_maxframe =
((sp->cmn.bbRcvSizeMsb & 0x0F) << 8) | sp->cmn.bbRcvSizeLsb; ((sp->cmn.bbRcvSizeMsb & 0x0F) << 8) | sp->cmn.bbRcvSizeLsb;

View File

@@ -27,6 +27,8 @@
#include <linux/delay.h> #include <linux/delay.h>
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/lockdep.h> #include <linux/lockdep.h>
#include <linux/dmi.h>
#include <linux/of.h>
#include <scsi/scsi.h> #include <scsi/scsi.h>
#include <scsi/scsi_cmnd.h> #include <scsi/scsi_cmnd.h>
@@ -8446,6 +8448,70 @@ lpfc_set_host_tm(struct lpfc_hba *phba)
return rc; return rc;
} }
/**
* lpfc_get_platform_uuid - Attempts to extract a platform uuid
* @phba: pointer to lpfc hba data structure.
*
* This routine attempts to first read SMBIOS DMI data for the System
* Information structure offset 08h called System UUID. Else, no platform
* UUID will be advertised.
**/
static void
lpfc_get_platform_uuid(struct lpfc_hba *phba)
{
int rc;
const char *uuid;
char pni[17] = {0}; /* 16 characters + '\0' */
bool is_ff = true, is_00 = true;
u8 i;
/* First attempt SMBIOS DMI */
uuid = dmi_get_system_info(DMI_PRODUCT_UUID);
if (uuid) {
lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
"2088 SMBIOS UUID %s\n",
uuid);
} else {
lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
"2099 Could not extract UUID\n");
}
if (uuid && uuid_is_valid(uuid)) {
/* Generate PNI from UUID format.
*
* 1.) Extract lower 64 bits from UUID format.
* 2.) Set 3h for NAA Locally Assigned Name Identifier format.
*
* e.g. xxxxxxxx-xxxx-xxxx-yyyy-yyyyyyyyyyyy
*
* extract the yyyy-yyyyyyyyyyyy portion
* final PNI 3yyyyyyyyyyyyyyy
*/
scnprintf(pni, sizeof(pni), "3%c%c%c%s",
uuid[20], uuid[21], uuid[22], &uuid[24]);
/* Sanitize the converted PNI */
for (i = 1; i < 16 && (is_ff || is_00); i++) {
if (pni[i] != '0')
is_00 = false;
if (pni[i] != 'f' && pni[i] != 'F')
is_ff = false;
}
/* Convert from char* to unsigned long */
rc = kstrtoul(pni, 16, &phba->pni);
if (!rc && !is_ff && !is_00) {
lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
"2100 PNI 0x%016lx\n", phba->pni);
} else {
lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
"2101 PNI %s generation status %d\n",
pni, rc);
phba->pni = 0;
}
}
}
/** /**
* lpfc_sli4_hba_setup - SLI4 device initialization PCI function * lpfc_sli4_hba_setup - SLI4 device initialization PCI function
* @phba: Pointer to HBA context object. * @phba: Pointer to HBA context object.
@@ -8529,6 +8595,10 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
clear_bit(HBA_FCOE_MODE, &phba->hba_flag); clear_bit(HBA_FCOE_MODE, &phba->hba_flag);
} }
/* Obtain platform UUID, only for SLI4 FC adapters */
if (!test_bit(HBA_FCOE_MODE, &phba->hba_flag))
lpfc_get_platform_uuid(phba);
if (bf_get(lpfc_mbx_rd_rev_cee_ver, &mqe->un.read_rev) == if (bf_get(lpfc_mbx_rd_rev_cee_ver, &mqe->un.read_rev) ==
LPFC_DCBX_CEE_MODE) LPFC_DCBX_CEE_MODE)
set_bit(HBA_FIP_SUPPORT, &phba->hba_flag); set_bit(HBA_FIP_SUPPORT, &phba->hba_flag);
@@ -19858,13 +19928,15 @@ lpfc_sli4_remove_rpis(struct lpfc_hba *phba)
} }
/** /**
* lpfc_sli4_resume_rpi - Remove the rpi bitmask region * lpfc_sli4_resume_rpi - Resume traffic relative to an RPI
* @ndlp: pointer to lpfc nodelist data structure. * @ndlp: pointer to lpfc nodelist data structure.
* @cmpl: completion call-back. * @cmpl: completion call-back.
* @iocbq: data to load as mbox ctx_u information * @iocbq: data to load as mbox ctx_u information
* *
* This routine is invoked to remove the memory region that * Return codes
* provided rpi via a bitmask. * 0 - successful
* -ENOMEM - No available memory
* -EIO - The mailbox failed to complete successfully.
**/ **/
int int
lpfc_sli4_resume_rpi(struct lpfc_nodelist *ndlp, lpfc_sli4_resume_rpi(struct lpfc_nodelist *ndlp,
@@ -19894,7 +19966,6 @@ lpfc_sli4_resume_rpi(struct lpfc_nodelist *ndlp,
return -EIO; return -EIO;
} }
/* Post all rpi memory regions to the port. */
lpfc_resume_rpi(mboxq, ndlp); lpfc_resume_rpi(mboxq, ndlp);
if (cmpl) { if (cmpl) {
mboxq->mbox_cmpl = cmpl; mboxq->mbox_cmpl = cmpl;

View File

@@ -20,7 +20,7 @@
* included with this package. * * included with this package. *
*******************************************************************/ *******************************************************************/
#define LPFC_DRIVER_VERSION "14.4.0.11" #define LPFC_DRIVER_VERSION "14.4.0.12"
#define LPFC_DRIVER_NAME "lpfc" #define LPFC_DRIVER_NAME "lpfc"
/* Used for SLI 2/3 */ /* Used for SLI 2/3 */

View File

@@ -1150,9 +1150,13 @@ typedef struct LOG_BLOCK_SPAN_INFO {
} LD_SPAN_INFO, *PLD_SPAN_INFO; } LD_SPAN_INFO, *PLD_SPAN_INFO;
struct MR_FW_RAID_MAP_ALL { struct MR_FW_RAID_MAP_ALL {
struct MR_FW_RAID_MAP raidMap; /* Must be last --ends in a flexible-array member. */
struct MR_LD_SPAN_MAP ldSpanMap[MAX_LOGICAL_DRIVES]; TRAILING_OVERLAP(struct MR_FW_RAID_MAP, raidMap, ldSpanMap,
struct MR_LD_SPAN_MAP ldSpanMap[MAX_LOGICAL_DRIVES];
);
} __attribute__ ((packed)); } __attribute__ ((packed));
static_assert(offsetof(struct MR_FW_RAID_MAP_ALL, raidMap.ldSpanMap) ==
offsetof(struct MR_FW_RAID_MAP_ALL, ldSpanMap));
struct MR_DRV_RAID_MAP { struct MR_DRV_RAID_MAP {
/* total size of this structure, including this field. /* total size of this structure, including this field.
@@ -1194,10 +1198,13 @@ struct MR_DRV_RAID_MAP {
* And it is mainly for code re-use purpose. * And it is mainly for code re-use purpose.
*/ */
struct MR_DRV_RAID_MAP_ALL { struct MR_DRV_RAID_MAP_ALL {
/* Must be last --ends in a flexible-array member. */
struct MR_DRV_RAID_MAP raidMap; TRAILING_OVERLAP(struct MR_DRV_RAID_MAP, raidMap, ldSpanMap,
struct MR_LD_SPAN_MAP ldSpanMap[MAX_LOGICAL_DRIVES_DYN]; struct MR_LD_SPAN_MAP ldSpanMap[MAX_LOGICAL_DRIVES_DYN];
);
} __packed; } __packed;
static_assert(offsetof(struct MR_DRV_RAID_MAP_ALL, raidMap.ldSpanMap) ==
offsetof(struct MR_DRV_RAID_MAP_ALL, ldSpanMap));

View File

@@ -1534,7 +1534,7 @@ static int __init pm8001_init(void)
if (pm8001_use_tasklet && !pm8001_use_msix) if (pm8001_use_tasklet && !pm8001_use_msix)
pm8001_use_tasklet = false; pm8001_use_tasklet = false;
pm8001_wq = alloc_workqueue("pm80xx", 0, 0); pm8001_wq = alloc_workqueue("pm80xx", WQ_PERCPU, 0);
if (!pm8001_wq) if (!pm8001_wq)
goto err; goto err;

View File

@@ -3374,7 +3374,8 @@ retry_probe:
QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_INFO, "qedf->io_mempool=%p.\n", QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_INFO, "qedf->io_mempool=%p.\n",
qedf->io_mempool); qedf->io_mempool);
qedf->link_update_wq = alloc_workqueue("qedf_%u_link", WQ_MEM_RECLAIM, qedf->link_update_wq = alloc_workqueue("qedf_%u_link",
WQ_MEM_RECLAIM | WQ_PERCPU,
1, qedf->lport->host->host_no); 1, qedf->lport->host->host_no);
INIT_DELAYED_WORK(&qedf->link_update, qedf_handle_link_update); INIT_DELAYED_WORK(&qedf->link_update, qedf_handle_link_update);
INIT_DELAYED_WORK(&qedf->link_recovery, qedf_link_recovery); INIT_DELAYED_WORK(&qedf->link_recovery, qedf_link_recovery);
@@ -3585,7 +3586,8 @@ retry_probe:
ether_addr_copy(params.ll2_mac_address, qedf->mac); ether_addr_copy(params.ll2_mac_address, qedf->mac);
/* Start LL2 processing thread */ /* Start LL2 processing thread */
qedf->ll2_recv_wq = alloc_workqueue("qedf_%d_ll2", WQ_MEM_RECLAIM, 1, qedf->ll2_recv_wq = alloc_workqueue("qedf_%d_ll2",
WQ_MEM_RECLAIM | WQ_PERCPU, 1,
host->host_no); host->host_no);
if (!qedf->ll2_recv_wq) { if (!qedf->ll2_recv_wq) {
QEDF_ERR(&(qedf->dbg_ctx), "Failed to LL2 workqueue.\n"); QEDF_ERR(&(qedf->dbg_ctx), "Failed to LL2 workqueue.\n");
@@ -3628,7 +3630,8 @@ retry_probe:
} }
qedf->timer_work_queue = alloc_workqueue("qedf_%u_timer", qedf->timer_work_queue = alloc_workqueue("qedf_%u_timer",
WQ_MEM_RECLAIM, 1, qedf->lport->host->host_no); WQ_MEM_RECLAIM | WQ_PERCPU, 1,
qedf->lport->host->host_no);
if (!qedf->timer_work_queue) { if (!qedf->timer_work_queue) {
QEDF_ERR(&(qedf->dbg_ctx), "Failed to start timer " QEDF_ERR(&(qedf->dbg_ctx), "Failed to start timer "
"workqueue.\n"); "workqueue.\n");
@@ -3641,7 +3644,8 @@ retry_probe:
sprintf(host_buf, "qedf_%u_dpc", sprintf(host_buf, "qedf_%u_dpc",
qedf->lport->host->host_no); qedf->lport->host->host_no);
qedf->dpc_wq = qedf->dpc_wq =
alloc_workqueue("%s", WQ_MEM_RECLAIM, 1, host_buf); alloc_workqueue("%s", WQ_MEM_RECLAIM | WQ_PERCPU, 1,
host_buf);
} }
INIT_DELAYED_WORK(&qedf->recovery_work, qedf_recovery_handler); INIT_DELAYED_WORK(&qedf->recovery_work, qedf_recovery_handler);
@@ -4177,7 +4181,8 @@ static int __init qedf_init(void)
goto err3; goto err3;
} }
qedf_io_wq = alloc_workqueue("%s", WQ_MEM_RECLAIM, 1, "qedf_io_wq"); qedf_io_wq = alloc_workqueue("%s", WQ_MEM_RECLAIM | WQ_PERCPU, 1,
"qedf_io_wq");
if (!qedf_io_wq) { if (!qedf_io_wq) {
QEDF_ERR(NULL, "Could not create qedf_io_wq.\n"); QEDF_ERR(NULL, "Could not create qedf_io_wq.\n");
goto err4; goto err4;

View File

@@ -2768,7 +2768,7 @@ retry_probe:
} }
qedi->offload_thread = alloc_workqueue("qedi_ofld%d", qedi->offload_thread = alloc_workqueue("qedi_ofld%d",
WQ_MEM_RECLAIM, WQ_MEM_RECLAIM | WQ_PERCPU,
1, qedi->shost->host_no); 1, qedi->shost->host_no);
if (!qedi->offload_thread) { if (!qedi->offload_thread) {
QEDI_ERR(&qedi->dbg_ctx, QEDI_ERR(&qedi->dbg_ctx,

View File

@@ -2799,7 +2799,7 @@ qla1280_64bit_start_scsi(struct scsi_qla_host *ha, struct srb * sp)
dprintk(2, "start: cmd=%p sp=%p CDB=%xm, handle %lx\n", cmd, sp, dprintk(2, "start: cmd=%p sp=%p CDB=%xm, handle %lx\n", cmd, sp,
cmd->cmnd[0], (long)CMD_HANDLE(sp->cmd)); cmd->cmnd[0], (long)CMD_HANDLE(sp->cmd));
dprintk(2, " bus %i, target %i, lun %i\n", dprintk(2, " bus %i, target %i, lun %llu\n",
SCSI_BUS_32(cmd), SCSI_TCN_32(cmd), SCSI_LUN_32(cmd)); SCSI_BUS_32(cmd), SCSI_TCN_32(cmd), SCSI_LUN_32(cmd));
qla1280_dump_buffer(2, cmd->cmnd, MAX_COMMAND_SIZE); qla1280_dump_buffer(2, cmd->cmnd, MAX_COMMAND_SIZE);
@@ -2871,7 +2871,7 @@ qla1280_64bit_start_scsi(struct scsi_qla_host *ha, struct srb * sp)
remseg--; remseg--;
} }
dprintk(5, "qla1280_64bit_start_scsi: Scatter/gather " dprintk(5, "qla1280_64bit_start_scsi: Scatter/gather "
"command packet data - b %i, t %i, l %i \n", "command packet data - b %i, t %i, l %llu\n",
SCSI_BUS_32(cmd), SCSI_TCN_32(cmd), SCSI_BUS_32(cmd), SCSI_TCN_32(cmd),
SCSI_LUN_32(cmd)); SCSI_LUN_32(cmd));
qla1280_dump_buffer(5, (char *)pkt, qla1280_dump_buffer(5, (char *)pkt,
@@ -2929,14 +2929,14 @@ qla1280_64bit_start_scsi(struct scsi_qla_host *ha, struct srb * sp)
remseg -= cnt; remseg -= cnt;
dprintk(5, "qla1280_64bit_start_scsi: " dprintk(5, "qla1280_64bit_start_scsi: "
"continuation packet data - b %i, t " "continuation packet data - b %i, t "
"%i, l %i \n", SCSI_BUS_32(cmd), "%i, l %llu\n", SCSI_BUS_32(cmd),
SCSI_TCN_32(cmd), SCSI_LUN_32(cmd)); SCSI_TCN_32(cmd), SCSI_LUN_32(cmd));
qla1280_dump_buffer(5, (char *)pkt, qla1280_dump_buffer(5, (char *)pkt,
REQUEST_ENTRY_SIZE); REQUEST_ENTRY_SIZE);
} }
} else { /* No data transfer */ } else { /* No data transfer */
dprintk(5, "qla1280_64bit_start_scsi: No data, command " dprintk(5, "qla1280_64bit_start_scsi: No data, command "
"packet data - b %i, t %i, l %i \n", "packet data - b %i, t %i, l %llu\n",
SCSI_BUS_32(cmd), SCSI_TCN_32(cmd), SCSI_LUN_32(cmd)); SCSI_BUS_32(cmd), SCSI_TCN_32(cmd), SCSI_LUN_32(cmd));
qla1280_dump_buffer(5, (char *)pkt, REQUEST_ENTRY_SIZE); qla1280_dump_buffer(5, (char *)pkt, REQUEST_ENTRY_SIZE);
} }
@@ -3655,7 +3655,7 @@ qla1280_status_entry(struct scsi_qla_host *ha, struct response *pkt,
dprintk(2, "qla1280_status_entry: Check " dprintk(2, "qla1280_status_entry: Check "
"condition Sense data, b %i, t %i, " "condition Sense data, b %i, t %i, "
"l %i\n", SCSI_BUS_32(cmd), SCSI_TCN_32(cmd), "l %llu\n", SCSI_BUS_32(cmd), SCSI_TCN_32(cmd),
SCSI_LUN_32(cmd)); SCSI_LUN_32(cmd));
if (sense_sz) if (sense_sz)
qla1280_dump_buffer(2, qla1280_dump_buffer(2,
@@ -3955,7 +3955,7 @@ __qla1280_print_scsi_cmd(struct scsi_cmnd *cmd)
sp = scsi_cmd_priv(cmd); sp = scsi_cmd_priv(cmd);
printk("SCSI Command @= 0x%p, Handle=0x%p\n", cmd, CMD_HANDLE(cmd)); printk("SCSI Command @= 0x%p, Handle=0x%p\n", cmd, CMD_HANDLE(cmd));
printk(" chan=%d, target = 0x%02x, lun = 0x%02x, cmd_len = 0x%02x\n", printk(" chan=%d, target = 0x%02x, lun = 0x%02llx, cmd_len = 0x%02x\n",
SCSI_BUS_32(cmd), SCSI_TCN_32(cmd), SCSI_LUN_32(cmd), SCSI_BUS_32(cmd), SCSI_TCN_32(cmd), SCSI_LUN_32(cmd),
CMD_CDBLEN(cmd)); CMD_CDBLEN(cmd));
printk(" CDB = "); printk(" CDB = ");
@@ -3976,29 +3976,6 @@ __qla1280_print_scsi_cmd(struct scsi_cmnd *cmd)
printk(" underflow size = 0x%x, direction=0x%x\n", printk(" underflow size = 0x%x, direction=0x%x\n",
cmd->underflow, cmd->sc_data_direction); cmd->underflow, cmd->sc_data_direction);
} }
/**************************************************************************
* ql1280_dump_device
*
**************************************************************************/
static void
ql1280_dump_device(struct scsi_qla_host *ha)
{
struct scsi_cmnd *cp;
struct srb *sp;
int i;
printk(KERN_DEBUG "Outstanding Commands on controller:\n");
for (i = 0; i < MAX_OUTSTANDING_COMMANDS; i++) {
if ((sp = ha->outstanding_cmds[i]) == NULL)
continue;
if ((cp = sp->cmd) == NULL)
continue;
qla1280_print_scsi_cmd(1, cp);
}
}
#endif #endif

View File

@@ -54,10 +54,11 @@
* | Misc | 0xd303 | 0xd031-0xd0ff | * | Misc | 0xd303 | 0xd031-0xd0ff |
* | | | 0xd101-0xd1fe | * | | | 0xd101-0xd1fe |
* | | | 0xd214-0xd2fe | * | | | 0xd214-0xd2fe |
* | Target Mode | 0xe081 | | * | Target Mode | 0xe089 | |
* | Target Mode Management | 0xf09b | 0xf002 | * | Target Mode Management | 0xf09b | 0xf002 |
* | | | 0xf046-0xf049 | * | | | 0xf046-0xf049 |
* | Target Mode Task Management | 0x1000d | | * | Target Mode Task Management | 0x1000d | |
* | Target Mode SRR | 0x11038 | |
* ---------------------------------------------------------------------- * ----------------------------------------------------------------------
*/ */

View File

@@ -3503,7 +3503,6 @@ struct isp_operations {
#define QLA_MSIX_RSP_Q 0x01 #define QLA_MSIX_RSP_Q 0x01
#define QLA_ATIO_VECTOR 0x02 #define QLA_ATIO_VECTOR 0x02
#define QLA_MSIX_QPAIR_MULTIQ_RSP_Q 0x03 #define QLA_MSIX_QPAIR_MULTIQ_RSP_Q 0x03
#define QLA_MSIX_QPAIR_MULTIQ_RSP_Q_HS 0x04
#define QLA_MIDX_DEFAULT 0 #define QLA_MIDX_DEFAULT 0
#define QLA_MIDX_RSP_Q 1 #define QLA_MIDX_RSP_Q 1

View File

@@ -766,7 +766,7 @@ extern int qla2x00_dfs_remove(scsi_qla_host_t *);
/* Globa function prototypes for multi-q */ /* Globa function prototypes for multi-q */
extern int qla25xx_request_irq(struct qla_hw_data *, struct qla_qpair *, extern int qla25xx_request_irq(struct qla_hw_data *, struct qla_qpair *,
struct qla_msix_entry *, int); struct qla_msix_entry *);
extern int qla25xx_init_req_que(struct scsi_qla_host *, struct req_que *); extern int qla25xx_init_req_que(struct scsi_qla_host *, struct req_que *);
extern int qla25xx_init_rsp_que(struct scsi_qla_host *, struct rsp_que *); extern int qla25xx_init_rsp_que(struct scsi_qla_host *, struct rsp_que *);
extern int qla25xx_create_req_que(struct qla_hw_data *, uint16_t, uint8_t, extern int qla25xx_create_req_que(struct qla_hw_data *, uint16_t, uint8_t,

View File

@@ -4369,6 +4369,7 @@ enable_82xx_npiv:
ha->max_npiv_vports = ha->max_npiv_vports =
MIN_MULTI_ID_FABRIC - 1; MIN_MULTI_ID_FABRIC - 1;
} }
qlt_config_nvram_with_fw_version(vha);
qla2x00_get_resource_cnts(vha); qla2x00_get_resource_cnts(vha);
qla_init_iocb_limit(vha); qla_init_iocb_limit(vha);

View File

@@ -4467,32 +4467,6 @@ qla2xxx_msix_rsp_q(int irq, void *dev_id)
return IRQ_HANDLED; return IRQ_HANDLED;
} }
irqreturn_t
qla2xxx_msix_rsp_q_hs(int irq, void *dev_id)
{
struct qla_hw_data *ha;
struct qla_qpair *qpair;
struct device_reg_24xx __iomem *reg;
unsigned long flags;
qpair = dev_id;
if (!qpair) {
ql_log(ql_log_info, NULL, 0x505b,
"%s: NULL response queue pointer.\n", __func__);
return IRQ_NONE;
}
ha = qpair->hw;
reg = &ha->iobase->isp24;
spin_lock_irqsave(&ha->hardware_lock, flags);
wrt_reg_dword(&reg->hccr, HCCRX_CLR_RISC_INT);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
queue_work(ha->wq, &qpair->q_work);
return IRQ_HANDLED;
}
/* Interrupt handling helpers. */ /* Interrupt handling helpers. */
struct qla_init_msix_entry { struct qla_init_msix_entry {
@@ -4505,7 +4479,6 @@ static const struct qla_init_msix_entry msix_entries[] = {
{ "rsp_q", qla24xx_msix_rsp_q }, { "rsp_q", qla24xx_msix_rsp_q },
{ "atio_q", qla83xx_msix_atio_q }, { "atio_q", qla83xx_msix_atio_q },
{ "qpair_multiq", qla2xxx_msix_rsp_q }, { "qpair_multiq", qla2xxx_msix_rsp_q },
{ "qpair_multiq_hs", qla2xxx_msix_rsp_q_hs },
}; };
static const struct qla_init_msix_entry qla82xx_msix_entries[] = { static const struct qla_init_msix_entry qla82xx_msix_entries[] = {
@@ -4792,9 +4765,10 @@ free_irqs:
} }
int qla25xx_request_irq(struct qla_hw_data *ha, struct qla_qpair *qpair, int qla25xx_request_irq(struct qla_hw_data *ha, struct qla_qpair *qpair,
struct qla_msix_entry *msix, int vector_type) struct qla_msix_entry *msix)
{ {
const struct qla_init_msix_entry *intr = &msix_entries[vector_type]; const struct qla_init_msix_entry *intr =
&msix_entries[QLA_MSIX_QPAIR_MULTIQ_RSP_Q];
scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
int ret; int ret;

View File

@@ -253,6 +253,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
/* Issue set host interrupt command to send cmd out. */ /* Issue set host interrupt command to send cmd out. */
ha->flags.mbox_int = 0; ha->flags.mbox_int = 0;
clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags); clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
reinit_completion(&ha->mbx_intr_comp);
/* Unlock mbx registers and wait for interrupt */ /* Unlock mbx registers and wait for interrupt */
ql_dbg(ql_dbg_mbx, vha, 0x100f, ql_dbg(ql_dbg_mbx, vha, 0x100f,
@@ -279,6 +280,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
"cmd=%x Timeout.\n", command); "cmd=%x Timeout.\n", command);
spin_lock_irqsave(&ha->hardware_lock, flags); spin_lock_irqsave(&ha->hardware_lock, flags);
clear_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags); clear_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags);
reinit_completion(&ha->mbx_intr_comp);
spin_unlock_irqrestore(&ha->hardware_lock, flags); spin_unlock_irqrestore(&ha->hardware_lock, flags);
if (chip_reset != ha->chip_reset) { if (chip_reset != ha->chip_reset) {

View File

@@ -899,9 +899,7 @@ qla25xx_create_rsp_que(struct qla_hw_data *ha, uint16_t options,
rsp->options, rsp->id, rsp->rsp_q_in, rsp->options, rsp->id, rsp->rsp_q_in,
rsp->rsp_q_out); rsp->rsp_q_out);
ret = qla25xx_request_irq(ha, qpair, qpair->msix, ret = qla25xx_request_irq(ha, qpair, qpair->msix);
ha->flags.disable_msix_handshake ?
QLA_MSIX_QPAIR_MULTIQ_RSP_Q : QLA_MSIX_QPAIR_MULTIQ_RSP_Q_HS);
if (ret) if (ret)
goto que_failed; goto que_failed;

View File

@@ -1292,7 +1292,7 @@ void qla2xxx_process_purls_iocb(void **pkt, struct rsp_que **rsp)
a.reason = FCNVME_RJT_RC_LOGIC; a.reason = FCNVME_RJT_RC_LOGIC;
a.explanation = FCNVME_RJT_EXP_NONE; a.explanation = FCNVME_RJT_EXP_NONE;
xmt_reject = true; xmt_reject = true;
kfree(item); qla24xx_free_purex_item(item);
goto out; goto out;
} }

View File

@@ -1862,12 +1862,6 @@ __qla2x00_abort_all_cmds(struct qla_qpair *qp, int res)
for (cnt = 1; cnt < req->num_outstanding_cmds; cnt++) { for (cnt = 1; cnt < req->num_outstanding_cmds; cnt++) {
sp = req->outstanding_cmds[cnt]; sp = req->outstanding_cmds[cnt];
if (sp) { if (sp) {
if (qla2x00_chip_is_down(vha)) {
req->outstanding_cmds[cnt] = NULL;
sp->done(sp, res);
continue;
}
switch (sp->cmd_type) { switch (sp->cmd_type) {
case TYPE_SRB: case TYPE_SRB:
qla2x00_abort_srb(qp, sp, res, &flags); qla2x00_abort_srb(qp, sp, res, &flags);
@@ -1881,10 +1875,26 @@ __qla2x00_abort_all_cmds(struct qla_qpair *qp, int res)
continue; continue;
} }
cmd = (struct qla_tgt_cmd *)sp; cmd = (struct qla_tgt_cmd *)sp;
cmd->aborted = 1;
if (cmd->sg_mapped)
qlt_unmap_sg(vha, cmd);
if (cmd->state == QLA_TGT_STATE_NEED_DATA) {
cmd->aborted = 1;
cmd->write_data_transferred = 0;
cmd->state = QLA_TGT_STATE_DATA_IN;
ha->tgt.tgt_ops->handle_data(cmd);
} else {
ha->tgt.tgt_ops->free_cmd(cmd);
}
break; break;
case TYPE_TGT_TMCMD: case TYPE_TGT_TMCMD:
/* Skip task management functions. */ /*
* Currently, only ABTS response gets on the
* outstanding_cmds[]
*/
qlt_free_ul_mcmd(ha,
(struct qla_tgt_mgmt_cmd *) sp);
break; break;
default: default:
break; break;
@@ -3397,7 +3407,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
"req->req_q_in=%p req->req_q_out=%p rsp->rsp_q_in=%p rsp->rsp_q_out=%p.\n", "req->req_q_in=%p req->req_q_out=%p rsp->rsp_q_in=%p rsp->rsp_q_out=%p.\n",
req->req_q_in, req->req_q_out, rsp->rsp_q_in, rsp->rsp_q_out); req->req_q_in, req->req_q_out, rsp->rsp_q_in, rsp->rsp_q_out);
ha->wq = alloc_workqueue("qla2xxx_wq", WQ_MEM_RECLAIM, 0); ha->wq = alloc_workqueue("qla2xxx_wq", WQ_MEM_RECLAIM | WQ_PERCPU, 0);
if (unlikely(!ha->wq)) { if (unlikely(!ha->wq)) {
ret = -ENOMEM; ret = -ENOMEM;
goto probe_failed; goto probe_failed;
@@ -3444,13 +3454,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
ha->mqenable = 0; ha->mqenable = 0;
if (ha->mqenable) { if (ha->mqenable) {
bool startit = false; bool startit = !!(host->active_mode & MODE_INITIATOR);
if (QLA_TGT_MODE_ENABLED())
startit = false;
if (ql2x_ini_mode == QLA2XXX_INI_MODE_ENABLED)
startit = true;
/* Create start of day qpairs for Block MQ */ /* Create start of day qpairs for Block MQ */
for (i = 0; i < ha->max_qpairs; i++) for (i = 0; i < ha->max_qpairs; i++)
@@ -5280,7 +5284,7 @@ void qla24xx_sched_upd_fcport(fc_port_t *fcport)
qla2x00_set_fcport_disc_state(fcport, DSC_UPD_FCPORT); qla2x00_set_fcport_disc_state(fcport, DSC_UPD_FCPORT);
spin_unlock_irqrestore(&fcport->vha->work_lock, flags); spin_unlock_irqrestore(&fcport->vha->work_lock, flags);
queue_work(system_unbound_wq, &fcport->reg_work); queue_work(system_dfl_wq, &fcport->reg_work);
} }
static static
@@ -7244,6 +7248,7 @@ qla2xxx_wake_dpc(struct scsi_qla_host *vha)
if (!test_bit(UNLOADING, &vha->dpc_flags) && t) if (!test_bit(UNLOADING, &vha->dpc_flags) && t)
wake_up_process(t); wake_up_process(t);
} }
EXPORT_SYMBOL(qla2xxx_wake_dpc);
/* /*
* qla2x00_rst_aen * qla2x00_rst_aen

File diff suppressed because it is too large Load Diff

View File

@@ -184,6 +184,7 @@ struct nack_to_isp {
#define NOTIFY_ACK_SRR_REJECT_REASON_UNABLE_TO_PERFORM 0x9 #define NOTIFY_ACK_SRR_REJECT_REASON_UNABLE_TO_PERFORM 0x9
#define NOTIFY_ACK_SRR_FLAGS_REJECT_EXPL_NO_EXPL 0 #define NOTIFY_ACK_SRR_FLAGS_REJECT_EXPL_NO_EXPL 0
#define NOTIFY_ACK_SRR_FLAGS_REJECT_EXPL_INVALID_OX_ID_RX_ID 0x17
#define NOTIFY_ACK_SRR_FLAGS_REJECT_EXPL_UNABLE_TO_SUPPLY_DATA 0x2a #define NOTIFY_ACK_SRR_FLAGS_REJECT_EXPL_UNABLE_TO_SUPPLY_DATA 0x2a
#define NOTIFY_ACK_SUCCESS 0x01 #define NOTIFY_ACK_SUCCESS 0x01
@@ -686,6 +687,8 @@ struct qla_tgt_func_tmpl {
int (*handle_tmr)(struct qla_tgt_mgmt_cmd *, u64, uint16_t, int (*handle_tmr)(struct qla_tgt_mgmt_cmd *, u64, uint16_t,
uint32_t); uint32_t);
struct qla_tgt_cmd *(*get_cmd)(struct fc_port *); struct qla_tgt_cmd *(*get_cmd)(struct fc_port *);
int (*get_cmd_ref)(struct qla_tgt_cmd *cmd);
void (*put_cmd_ref)(struct qla_tgt_cmd *cmd);
void (*rel_cmd)(struct qla_tgt_cmd *); void (*rel_cmd)(struct qla_tgt_cmd *);
void (*free_cmd)(struct qla_tgt_cmd *); void (*free_cmd)(struct qla_tgt_cmd *);
void (*free_mcmd)(struct qla_tgt_mgmt_cmd *); void (*free_mcmd)(struct qla_tgt_mgmt_cmd *);
@@ -754,6 +757,7 @@ int qla2x00_wait_for_hba_online(struct scsi_qla_host *);
#define QLA_TGT_STATE_NEED_DATA 1 /* target needs data to continue */ #define QLA_TGT_STATE_NEED_DATA 1 /* target needs data to continue */
#define QLA_TGT_STATE_DATA_IN 2 /* Data arrived + target processing */ #define QLA_TGT_STATE_DATA_IN 2 /* Data arrived + target processing */
#define QLA_TGT_STATE_PROCESSED 3 /* target done processing */ #define QLA_TGT_STATE_PROCESSED 3 /* target done processing */
#define QLA_TGT_STATE_DONE 4 /* cmd being freed */
/* ATIO task_codes field */ /* ATIO task_codes field */
#define ATIO_SIMPLE_QUEUE 0 #define ATIO_SIMPLE_QUEUE 0
@@ -822,18 +826,26 @@ struct qla_tgt {
int notify_ack_expected; int notify_ack_expected;
int abts_resp_expected; int abts_resp_expected;
int modify_lun_expected; int modify_lun_expected;
spinlock_t srr_lock;
struct list_head srr_list;
struct work_struct srr_work;
atomic_t tgt_global_resets_count; atomic_t tgt_global_resets_count;
struct list_head tgt_list_entry; struct list_head tgt_list_entry;
}; };
struct qla_tgt_sess_op { struct qla_tgt_sess_op {
struct scsi_qla_host *vha; struct scsi_qla_host *vha;
uint32_t chip_reset; uint32_t chip_reset;
struct atio_from_isp atio;
struct work_struct work; struct work_struct work;
struct list_head cmd_list; struct list_head cmd_list;
bool aborted; bool aborted;
struct rsp_que *rsp; struct rsp_que *rsp;
struct atio_from_isp atio;
/* DO NOT ADD ANYTHING ELSE HERE - atio must be last member */
}; };
enum trace_flags { enum trace_flags {
@@ -858,6 +870,7 @@ enum trace_flags {
TRC_DATA_IN = BIT_18, TRC_DATA_IN = BIT_18,
TRC_ABORT = BIT_19, TRC_ABORT = BIT_19,
TRC_DIF_ERR = BIT_20, TRC_DIF_ERR = BIT_20,
TRC_SRR_IMM = BIT_21,
}; };
struct qla_tgt_cmd { struct qla_tgt_cmd {
@@ -876,25 +889,36 @@ struct qla_tgt_cmd {
/* Sense buffer that will be mapped into outgoing status */ /* Sense buffer that will be mapped into outgoing status */
unsigned char sense_buffer[TRANSPORT_SENSE_BUFFER]; unsigned char sense_buffer[TRANSPORT_SENSE_BUFFER];
spinlock_t cmd_lock;
/* to save extra sess dereferences */
unsigned int conf_compl_supported:1; unsigned int conf_compl_supported:1;
unsigned int sg_mapped:1; unsigned int sg_mapped:1;
/* Call qlt_free_sg() if set. */
unsigned int free_sg:1;
unsigned int write_data_transferred:1; unsigned int write_data_transferred:1;
/* Set if the SCSI status was sent successfully. */
unsigned int rsp_sent:1;
unsigned int q_full:1; unsigned int q_full:1;
unsigned int term_exchg:1; unsigned int term_exchg:1;
unsigned int cmd_sent_to_fw:1; unsigned int cmd_sent_to_fw:1;
unsigned int cmd_in_wq:1; unsigned int cmd_in_wq:1;
unsigned int edif:1; unsigned int edif:1;
/* /* Set if a SRR was rejected. */
* This variable may be set from outside the LIO and I/O completion unsigned int srr_failed:1;
* callback functions. Do not declare this member variable as a
* bitfield to avoid a read-modify-write operation when this variable
* is set.
*/
unsigned int aborted;
/* Set if the exchange has been terminated. */
unsigned int sent_term_exchg:1;
/*
* Set if sent_term_exchg is set, or if the cmd was aborted by a TMR,
* or if some other error prevents normal processing of the command.
*/
unsigned int aborted:1;
struct qla_tgt_srr *srr;
struct scatterlist *sg; /* cmd data buffer SG vector */ struct scatterlist *sg; /* cmd data buffer SG vector */
int sg_cnt; /* SG segments count */ int sg_cnt; /* SG segments count */
int bufflen; /* cmd buffer length */ int bufflen; /* cmd buffer length */
@@ -925,13 +949,23 @@ struct qla_tgt_cmd {
uint8_t scsi_status, sense_key, asc, ascq; uint8_t scsi_status, sense_key, asc, ascq;
struct crc_context *ctx; struct crc_context *ctx;
const uint8_t *cdb; uint8_t *cdb;
uint64_t lba; uint64_t lba;
int cdb_len;
uint16_t a_guard, e_guard, a_app_tag, e_app_tag; uint16_t a_guard, e_guard, a_app_tag, e_app_tag;
uint32_t a_ref_tag, e_ref_tag; uint32_t a_ref_tag, e_ref_tag;
#define DIF_BUNDL_DMA_VALID 1 #define DIF_BUNDL_DMA_VALID 1
uint16_t prot_flags; uint16_t prot_flags;
unsigned long jiffies_at_term_exchg;
/*
* jiffies64 when qlt_rdy_to_xfer() or qlt_xmit_response() first
* called, or 0 when not in those states. Used to limit the number of
* SRR retries.
*/
uint64_t jiffies_at_hw_st_entry;
uint64_t jiffies_at_alloc; uint64_t jiffies_at_alloc;
uint64_t jiffies_at_free; uint64_t jiffies_at_free;
@@ -965,6 +999,7 @@ struct qla_tgt_mgmt_cmd {
unsigned int flags; unsigned int flags;
#define QLA24XX_MGMT_SEND_NACK BIT_0 #define QLA24XX_MGMT_SEND_NACK BIT_0
#define QLA24XX_MGMT_ABORT_IO_ATTR_VALID BIT_1 #define QLA24XX_MGMT_ABORT_IO_ATTR_VALID BIT_1
#define QLA24XX_MGMT_LLD_OWNED BIT_2
uint32_t reset_count; uint32_t reset_count;
struct work_struct work; struct work_struct work;
uint64_t unpacked_lun; uint64_t unpacked_lun;
@@ -993,6 +1028,45 @@ struct qla_tgt_prm {
uint16_t tot_dsds; uint16_t tot_dsds;
}; };
/*
* SRR (Sequence Retransmission Request) - resend or re-receive some or all
* data or status to recover from a transient I/O error.
*/
struct qla_tgt_srr {
/*
* Copy of immediate notify SRR message received from hw; valid only if
* imm_ntfy_recvd is true.
*/
struct imm_ntfy_from_isp imm_ntfy;
struct list_head srr_list_entry;
/* The command affected by this SRR, or NULL if not yet determined. */
struct qla_tgt_cmd *cmd;
/* Used to detect if the HBA has been reset since receiving the SRR. */
uint32_t reset_count;
/*
* The hardware sends two messages for each SRR - an immediate notify
* and a CTIO with CTIO_SRR_RECEIVED status. These keep track of which
* messages have been received. The SRR can be processed once both of
* these are true.
*/
bool imm_ntfy_recvd;
bool ctio_recvd;
/*
* This is set to true if the affected command was aborted (cmd may be
* set to NULL), in which case the immediate notify exchange also needs
* to be aborted.
*/
bool aborted;
/* This is set to true to force the SRR to be rejected. */
bool reject;
};
/* Check for Switch reserved address */ /* Check for Switch reserved address */
#define IS_SW_RESV_ADDR(_s_id) \ #define IS_SW_RESV_ADDR(_s_id) \
((_s_id.b.domain == 0xff) && ((_s_id.b.area & 0xf0) == 0xf0)) ((_s_id.b.domain == 0xff) && ((_s_id.b.area & 0xf0) == 0xf0))
@@ -1047,6 +1121,20 @@ static inline uint32_t sid_to_key(const be_id_t s_id)
s_id.al_pa; s_id.al_pa;
} }
/*
* Free the scatterlist allocated by qlt_set_data_offset(). Call this only if
* cmd->free_sg is set.
*/
static inline void qlt_free_sg(struct qla_tgt_cmd *cmd)
{
/*
* The scatterlist may be chained to the original scatterlist, but we
* only need to free the first segment here since that is the only part
* allocated by qlt_set_data_offset().
*/
kfree(cmd->sg);
}
/* /*
* Exported symbols from qla_target.c LLD logic used by qla2xxx code.. * Exported symbols from qla_target.c LLD logic used by qla2xxx code..
*/ */
@@ -1055,9 +1143,14 @@ extern void qlt_response_pkt_all_vps(struct scsi_qla_host *, struct rsp_que *,
extern int qlt_rdy_to_xfer(struct qla_tgt_cmd *); extern int qlt_rdy_to_xfer(struct qla_tgt_cmd *);
extern int qlt_xmit_response(struct qla_tgt_cmd *, int, uint8_t); extern int qlt_xmit_response(struct qla_tgt_cmd *, int, uint8_t);
extern int qlt_abort_cmd(struct qla_tgt_cmd *); extern int qlt_abort_cmd(struct qla_tgt_cmd *);
void qlt_srr_abort(struct qla_tgt_cmd *cmd, bool reject);
void qlt_send_term_exchange(struct qla_qpair *qpair,
struct qla_tgt_cmd *cmd, struct atio_from_isp *atio, int ha_locked);
extern void qlt_xmit_tm_rsp(struct qla_tgt_mgmt_cmd *); extern void qlt_xmit_tm_rsp(struct qla_tgt_mgmt_cmd *);
void qlt_free_ul_mcmd(struct qla_hw_data *ha, struct qla_tgt_mgmt_cmd *mcmd);
extern void qlt_free_mcmd(struct qla_tgt_mgmt_cmd *); extern void qlt_free_mcmd(struct qla_tgt_mgmt_cmd *);
extern void qlt_free_cmd(struct qla_tgt_cmd *cmd); extern void qlt_free_cmd(struct qla_tgt_cmd *cmd);
extern void qlt_unmap_sg(struct scsi_qla_host *vha, struct qla_tgt_cmd *cmd);
extern void qlt_async_event(uint16_t, struct scsi_qla_host *, uint16_t *); extern void qlt_async_event(uint16_t, struct scsi_qla_host *, uint16_t *);
extern void qlt_enable_vha(struct scsi_qla_host *); extern void qlt_enable_vha(struct scsi_qla_host *);
extern void qlt_vport_create(struct scsi_qla_host *, struct qla_hw_data *); extern void qlt_vport_create(struct scsi_qla_host *, struct qla_hw_data *);
@@ -1073,6 +1166,7 @@ extern void qlt_81xx_config_nvram_stage2(struct scsi_qla_host *,
struct init_cb_81xx *); struct init_cb_81xx *);
extern void qlt_81xx_config_nvram_stage1(struct scsi_qla_host *, extern void qlt_81xx_config_nvram_stage1(struct scsi_qla_host *,
struct nvram_81xx *); struct nvram_81xx *);
void qlt_config_nvram_with_fw_version(struct scsi_qla_host *vha);
extern void qlt_modify_vp_config(struct scsi_qla_host *, extern void qlt_modify_vp_config(struct scsi_qla_host *,
struct vp_config_entry_24xx *); struct vp_config_entry_24xx *);
extern void qlt_probe_one_stage1(struct scsi_qla_host *, struct qla_hw_data *); extern void qlt_probe_one_stage1(struct scsi_qla_host *, struct qla_hw_data *);

View File

@@ -291,6 +291,16 @@ static struct qla_tgt_cmd *tcm_qla2xxx_get_cmd(struct fc_port *sess)
return cmd; return cmd;
} }
static int tcm_qla2xxx_get_cmd_ref(struct qla_tgt_cmd *cmd)
{
return target_get_sess_cmd(&cmd->se_cmd, true);
}
static void tcm_qla2xxx_put_cmd_ref(struct qla_tgt_cmd *cmd)
{
target_put_sess_cmd(&cmd->se_cmd);
}
static void tcm_qla2xxx_rel_cmd(struct qla_tgt_cmd *cmd) static void tcm_qla2xxx_rel_cmd(struct qla_tgt_cmd *cmd)
{ {
target_free_tag(cmd->sess->se_sess, &cmd->se_cmd); target_free_tag(cmd->sess->se_sess, &cmd->se_cmd);
@@ -303,6 +313,8 @@ static void tcm_qla2xxx_rel_cmd(struct qla_tgt_cmd *cmd)
*/ */
static void tcm_qla2xxx_free_cmd(struct qla_tgt_cmd *cmd) static void tcm_qla2xxx_free_cmd(struct qla_tgt_cmd *cmd)
{ {
cmd->state = QLA_TGT_STATE_DONE;
cmd->qpair->tgt_counters.core_qla_free_cmd++; cmd->qpair->tgt_counters.core_qla_free_cmd++;
cmd->cmd_in_wq = 1; cmd->cmd_in_wq = 1;
@@ -529,6 +541,9 @@ static void tcm_qla2xxx_handle_data_work(struct work_struct *work)
if (cmd->se_cmd.pi_err) if (cmd->se_cmd.pi_err)
transport_generic_request_failure(&cmd->se_cmd, transport_generic_request_failure(&cmd->se_cmd,
cmd->se_cmd.pi_err); cmd->se_cmd.pi_err);
else if (cmd->srr_failed)
transport_generic_request_failure(&cmd->se_cmd,
TCM_SNACK_REJECTED);
else else
transport_generic_request_failure(&cmd->se_cmd, transport_generic_request_failure(&cmd->se_cmd,
TCM_CHECK_CONDITION_ABORT_CMD); TCM_CHECK_CONDITION_ABORT_CMD);
@@ -1524,6 +1539,8 @@ static const struct qla_tgt_func_tmpl tcm_qla2xxx_template = {
.handle_data = tcm_qla2xxx_handle_data, .handle_data = tcm_qla2xxx_handle_data,
.handle_tmr = tcm_qla2xxx_handle_tmr, .handle_tmr = tcm_qla2xxx_handle_tmr,
.get_cmd = tcm_qla2xxx_get_cmd, .get_cmd = tcm_qla2xxx_get_cmd,
.get_cmd_ref = tcm_qla2xxx_get_cmd_ref,
.put_cmd_ref = tcm_qla2xxx_put_cmd_ref,
.rel_cmd = tcm_qla2xxx_rel_cmd, .rel_cmd = tcm_qla2xxx_rel_cmd,
.free_cmd = tcm_qla2xxx_free_cmd, .free_cmd = tcm_qla2xxx_free_cmd,
.free_mcmd = tcm_qla2xxx_free_mcmd, .free_mcmd = tcm_qla2xxx_free_mcmd,

View File

@@ -1016,7 +1016,7 @@ void qla4xxx_get_crash_record(struct scsi_qla_host * ha)
uint32_t crash_record_size = 0; uint32_t crash_record_size = 0;
memset(&mbox_cmd, 0, sizeof(mbox_cmd)); memset(&mbox_cmd, 0, sizeof(mbox_cmd));
memset(&mbox_sts, 0, sizeof(mbox_cmd)); memset(&mbox_sts, 0, sizeof(mbox_sts));
/* Get size of crash record. */ /* Get size of crash record. */
mbox_cmd[0] = MBOX_CMD_GET_CRASH_RECORD; mbox_cmd[0] = MBOX_CMD_GET_CRASH_RECORD;
@@ -1099,7 +1099,7 @@ void qla4xxx_get_conn_event_log(struct scsi_qla_host * ha)
/* Get Crash Record. */ /* Get Crash Record. */
memset(&mbox_cmd, 0, sizeof(mbox_cmd)); memset(&mbox_cmd, 0, sizeof(mbox_cmd));
memset(&mbox_sts, 0, sizeof(mbox_cmd)); memset(&mbox_sts, 0, sizeof(mbox_sts));
mbox_cmd[0] = MBOX_CMD_GET_CONN_EVENT_LOG; mbox_cmd[0] = MBOX_CMD_GET_CONN_EVENT_LOG;
mbox_cmd[2] = LSDW(event_log_dma); mbox_cmd[2] = LSDW(event_log_dma);

View File

@@ -216,6 +216,9 @@ int scsi_device_max_queue_depth(struct scsi_device *sdev)
*/ */
int scsi_change_queue_depth(struct scsi_device *sdev, int depth) int scsi_change_queue_depth(struct scsi_device *sdev, int depth)
{ {
if (!sdev->budget_map.map)
return -EINVAL;
depth = min_t(int, depth, scsi_device_max_queue_depth(sdev)); depth = min_t(int, depth, scsi_device_max_queue_depth(sdev));
if (depth > 0) { if (depth > 0) {
@@ -255,6 +258,8 @@ EXPORT_SYMBOL(scsi_change_queue_depth);
*/ */
int scsi_track_queue_full(struct scsi_device *sdev, int depth) int scsi_track_queue_full(struct scsi_device *sdev, int depth)
{ {
if (!sdev->budget_map.map)
return 0;
/* /*
* Don't let QUEUE_FULLs on the same * Don't let QUEUE_FULLs on the same
@@ -826,8 +831,11 @@ struct scsi_device *__scsi_iterate_devices(struct Scsi_Host *shost,
spin_lock_irqsave(shost->host_lock, flags); spin_lock_irqsave(shost->host_lock, flags);
while (list->next != &shost->__devices) { while (list->next != &shost->__devices) {
next = list_entry(list->next, struct scsi_device, siblings); next = list_entry(list->next, struct scsi_device, siblings);
/* skip devices that we can't get a reference to */ /*
if (!scsi_device_get(next)) * Skip pseudo devices and also devices we can't get a
* reference to.
*/
if (!scsi_device_is_pseudo_dev(next) && !scsi_device_get(next))
break; break;
next = NULL; next = NULL;
list = list->next; list = list->next;

View File

@@ -230,6 +230,7 @@ struct tape_block {
#define SDEBUG_OPT_NO_CDB_NOISE 0x4000 #define SDEBUG_OPT_NO_CDB_NOISE 0x4000
#define SDEBUG_OPT_HOST_BUSY 0x8000 #define SDEBUG_OPT_HOST_BUSY 0x8000
#define SDEBUG_OPT_CMD_ABORT 0x10000 #define SDEBUG_OPT_CMD_ABORT 0x10000
#define SDEBUG_OPT_UNALIGNED_WRITE 0x20000
#define SDEBUG_OPT_ALL_NOISE (SDEBUG_OPT_NOISE | SDEBUG_OPT_Q_NOISE | \ #define SDEBUG_OPT_ALL_NOISE (SDEBUG_OPT_NOISE | SDEBUG_OPT_Q_NOISE | \
SDEBUG_OPT_RESET_NOISE) SDEBUG_OPT_RESET_NOISE)
#define SDEBUG_OPT_ALL_INJECTING (SDEBUG_OPT_RECOVERED_ERR | \ #define SDEBUG_OPT_ALL_INJECTING (SDEBUG_OPT_RECOVERED_ERR | \
@@ -237,7 +238,8 @@ struct tape_block {
SDEBUG_OPT_DIF_ERR | SDEBUG_OPT_DIX_ERR | \ SDEBUG_OPT_DIF_ERR | SDEBUG_OPT_DIX_ERR | \
SDEBUG_OPT_SHORT_TRANSFER | \ SDEBUG_OPT_SHORT_TRANSFER | \
SDEBUG_OPT_HOST_BUSY | \ SDEBUG_OPT_HOST_BUSY | \
SDEBUG_OPT_CMD_ABORT) SDEBUG_OPT_CMD_ABORT | \
SDEBUG_OPT_UNALIGNED_WRITE)
#define SDEBUG_OPT_RECOV_DIF_DIX (SDEBUG_OPT_RECOVERED_ERR | \ #define SDEBUG_OPT_RECOV_DIF_DIX (SDEBUG_OPT_RECOVERED_ERR | \
SDEBUG_OPT_DIF_ERR | SDEBUG_OPT_DIX_ERR) SDEBUG_OPT_DIF_ERR | SDEBUG_OPT_DIX_ERR)
@@ -2961,11 +2963,11 @@ static int resp_mode_sense(struct scsi_cmnd *scp,
int target_dev_id; int target_dev_id;
int target = scp->device->id; int target = scp->device->id;
unsigned char *ap; unsigned char *ap;
unsigned char *arr __free(kfree);
unsigned char *cmd = scp->cmnd; unsigned char *cmd = scp->cmnd;
bool dbd, llbaa, msense_6, is_disk, is_zbc, is_tape; bool dbd, llbaa, msense_6, is_disk, is_zbc, is_tape;
arr = kzalloc(SDEBUG_MAX_MSENSE_SZ, GFP_ATOMIC); unsigned char *arr __free(kfree) = kzalloc(SDEBUG_MAX_MSENSE_SZ, GFP_ATOMIC);
if (!arr) if (!arr)
return -ENOMEM; return -ENOMEM;
dbd = !!(cmd[1] & 0x8); /* disable block descriptors */ dbd = !!(cmd[1] & 0x8); /* disable block descriptors */
@@ -4932,6 +4934,14 @@ static int resp_write_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
u8 *cmd = scp->cmnd; u8 *cmd = scp->cmnd;
bool meta_data_locked = false; bool meta_data_locked = false;
if (unlikely(sdebug_opts & SDEBUG_OPT_UNALIGNED_WRITE &&
atomic_read(&sdeb_inject_pending))) {
atomic_set(&sdeb_inject_pending, 0);
mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE,
UNALIGNED_WRITE_ASCQ);
return check_condition_result;
}
switch (cmd[0]) { switch (cmd[0]) {
case WRITE_16: case WRITE_16:
ei_lba = 0; ei_lba = 0;
@@ -6752,20 +6762,59 @@ static bool scsi_debug_stop_cmnd(struct scsi_cmnd *cmnd)
return false; return false;
} }
struct sdebug_abort_cmd {
u32 unique_tag;
};
enum sdebug_internal_cmd_type {
SCSI_DEBUG_ABORT_CMD,
};
struct sdebug_internal_cmd {
enum sdebug_internal_cmd_type type;
union {
struct sdebug_abort_cmd abort_cmd;
};
};
union sdebug_priv {
struct sdebug_scsi_cmd cmd;
struct sdebug_internal_cmd internal_cmd;
};
/* /*
* Called from scsi_debug_abort() only, which is for timed-out cmd. * Abort SCSI command @cmnd. Only called from scsi_debug_abort(). Although
* it would be possible to call scsi_debug_stop_cmnd() directly, an internal
* command is allocated and submitted to trigger the reserved command
* infrastructure.
*/ */
static bool scsi_debug_abort_cmnd(struct scsi_cmnd *cmnd) static bool scsi_debug_abort_cmnd(struct scsi_cmnd *cmnd)
{ {
struct sdebug_scsi_cmd *sdsc = scsi_cmd_priv(cmnd); struct Scsi_Host *shost = cmnd->device->host;
unsigned long flags; struct request *rq = scsi_cmd_to_rq(cmnd);
bool res; u32 unique_tag = blk_mq_unique_tag(rq);
struct sdebug_internal_cmd *internal_cmd;
struct scsi_cmnd *abort_cmd;
struct request *abort_rq;
blk_status_t res;
spin_lock_irqsave(&sdsc->lock, flags); abort_cmd = scsi_get_internal_cmd(shost->pseudo_sdev, DMA_NONE,
res = scsi_debug_stop_cmnd(cmnd); BLK_MQ_REQ_RESERVED);
spin_unlock_irqrestore(&sdsc->lock, flags); if (!abort_cmd)
return false;
return res; internal_cmd = scsi_cmd_priv(abort_cmd);
*internal_cmd = (struct sdebug_internal_cmd) {
.type = SCSI_DEBUG_ABORT_CMD,
.abort_cmd = {
.unique_tag = unique_tag,
},
};
abort_rq = scsi_cmd_to_rq(abort_cmd);
abort_rq->timeout = secs_to_jiffies(3);
res = blk_execute_rq(abort_rq, true);
scsi_put_internal_cmd(abort_cmd);
return res == BLK_STS_OK;
} }
/* /*
@@ -9220,6 +9269,56 @@ out_handle:
return ret; return ret;
} }
/* Process @scp, a request to abort a SCSI command by tag. */
static void scsi_debug_abort_cmd(struct Scsi_Host *shost, struct scsi_cmnd *scp)
{
struct sdebug_internal_cmd *internal_cmd = scsi_cmd_priv(scp);
struct sdebug_abort_cmd *abort_cmd = &internal_cmd->abort_cmd;
const u32 unique_tag = abort_cmd->unique_tag;
struct scsi_cmnd *to_be_aborted_scmd =
scsi_host_find_tag(shost, unique_tag);
struct sdebug_scsi_cmd *to_be_aborted_sdsc =
scsi_cmd_priv(to_be_aborted_scmd);
bool res = false;
if (!to_be_aborted_scmd) {
pr_err("%s: command with tag %#x not found\n", __func__,
unique_tag);
return;
}
scoped_guard(spinlock_irqsave, &to_be_aborted_sdsc->lock)
res = scsi_debug_stop_cmnd(to_be_aborted_scmd);
if (res)
pr_info("%s: aborted command with tag %#x\n",
__func__, unique_tag);
else
pr_err("%s: failed to abort command with tag %#x\n",
__func__, unique_tag);
set_host_byte(scp, res ? DID_OK : DID_ERROR);
}
static int scsi_debug_process_reserved_command(struct Scsi_Host *shost,
struct scsi_cmnd *scp)
{
struct sdebug_internal_cmd *internal_cmd = scsi_cmd_priv(scp);
switch (internal_cmd->type) {
case SCSI_DEBUG_ABORT_CMD:
scsi_debug_abort_cmd(shost, scp);
break;
default:
WARN_ON_ONCE(true);
set_host_byte(scp, DID_ERROR);
break;
}
scsi_done(scp);
return 0;
}
static int scsi_debug_queuecommand(struct Scsi_Host *shost, static int scsi_debug_queuecommand(struct Scsi_Host *shost,
struct scsi_cmnd *scp) struct scsi_cmnd *scp)
{ {
@@ -9420,6 +9519,9 @@ static int sdebug_init_cmd_priv(struct Scsi_Host *shost, struct scsi_cmnd *cmd)
struct sdebug_scsi_cmd *sdsc = scsi_cmd_priv(cmd); struct sdebug_scsi_cmd *sdsc = scsi_cmd_priv(cmd);
struct sdebug_defer *sd_dp = &sdsc->sd_dp; struct sdebug_defer *sd_dp = &sdsc->sd_dp;
if (blk_mq_is_reserved_rq(scsi_cmd_to_rq(cmd)))
return 0;
spin_lock_init(&sdsc->lock); spin_lock_init(&sdsc->lock);
hrtimer_setup(&sd_dp->hrt, sdebug_q_cmd_hrt_complete, CLOCK_MONOTONIC, hrtimer_setup(&sd_dp->hrt, sdebug_q_cmd_hrt_complete, CLOCK_MONOTONIC,
HRTIMER_MODE_REL_PINNED); HRTIMER_MODE_REL_PINNED);
@@ -9439,6 +9541,7 @@ static const struct scsi_host_template sdebug_driver_template = {
.sdev_destroy = scsi_debug_sdev_destroy, .sdev_destroy = scsi_debug_sdev_destroy,
.ioctl = scsi_debug_ioctl, .ioctl = scsi_debug_ioctl,
.queuecommand = scsi_debug_queuecommand, .queuecommand = scsi_debug_queuecommand,
.queue_reserved_command = scsi_debug_process_reserved_command,
.change_queue_depth = sdebug_change_qdepth, .change_queue_depth = sdebug_change_qdepth,
.map_queues = sdebug_map_queues, .map_queues = sdebug_map_queues,
.mq_poll = sdebug_blk_mq_poll, .mq_poll = sdebug_blk_mq_poll,
@@ -9448,6 +9551,7 @@ static const struct scsi_host_template sdebug_driver_template = {
.eh_bus_reset_handler = scsi_debug_bus_reset, .eh_bus_reset_handler = scsi_debug_bus_reset,
.eh_host_reset_handler = scsi_debug_host_reset, .eh_host_reset_handler = scsi_debug_host_reset,
.can_queue = SDEBUG_CANQUEUE, .can_queue = SDEBUG_CANQUEUE,
.nr_reserved_cmds = 1,
.this_id = 7, .this_id = 7,
.sg_tablesize = SG_MAX_SEGMENTS, .sg_tablesize = SG_MAX_SEGMENTS,
.cmd_per_lun = DEF_CMD_PER_LUN, .cmd_per_lun = DEF_CMD_PER_LUN,
@@ -9456,7 +9560,7 @@ static const struct scsi_host_template sdebug_driver_template = {
.module = THIS_MODULE, .module = THIS_MODULE,
.skip_settle_delay = 1, .skip_settle_delay = 1,
.track_queue_depth = 1, .track_queue_depth = 1,
.cmd_size = sizeof(struct sdebug_scsi_cmd), .cmd_size = sizeof(union sdebug_priv),
.init_cmd_priv = sdebug_init_cmd_priv, .init_cmd_priv = sdebug_init_cmd_priv,
.target_alloc = sdebug_target_alloc, .target_alloc = sdebug_target_alloc,
.target_destroy = sdebug_target_destroy, .target_destroy = sdebug_target_destroy,

View File

@@ -749,6 +749,9 @@ static void scsi_handle_queue_ramp_up(struct scsi_device *sdev)
const struct scsi_host_template *sht = sdev->host->hostt; const struct scsi_host_template *sht = sdev->host->hostt;
struct scsi_device *tmp_sdev; struct scsi_device *tmp_sdev;
if (!sdev->budget_map.map)
return;
if (!sht->track_queue_depth || if (!sht->track_queue_depth ||
sdev->queue_depth >= sdev->max_queue_depth) sdev->queue_depth >= sdev->max_queue_depth)
return; return;

View File

@@ -396,7 +396,8 @@ void scsi_device_unbusy(struct scsi_device *sdev, struct scsi_cmnd *cmd)
if (starget->can_queue > 0) if (starget->can_queue > 0)
atomic_dec(&starget->target_busy); atomic_dec(&starget->target_busy);
sbitmap_put(&sdev->budget_map, cmd->budget_token); if (sdev->budget_map.map)
sbitmap_put(&sdev->budget_map, cmd->budget_token);
cmd->budget_token = -1; cmd->budget_token = -1;
} }
@@ -1360,6 +1361,9 @@ static inline int scsi_dev_queue_ready(struct request_queue *q,
{ {
int token; int token;
if (!sdev->budget_map.map)
return INT_MAX;
token = sbitmap_get(&sdev->budget_map); token = sbitmap_get(&sdev->budget_map);
if (token < 0) if (token < 0)
return -1; return -1;
@@ -1530,6 +1534,14 @@ static void scsi_complete(struct request *rq)
struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq); struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq);
enum scsi_disposition disposition; enum scsi_disposition disposition;
if (blk_mq_is_reserved_rq(rq)) {
/* Only pass-through requests are supported in this code path. */
WARN_ON_ONCE(!blk_rq_is_passthrough(scsi_cmd_to_rq(cmd)));
scsi_mq_uninit_cmd(cmd);
__blk_mq_end_request(rq, scsi_result_to_blk_status(cmd->result));
return;
}
INIT_LIST_HEAD(&cmd->eh_entry); INIT_LIST_HEAD(&cmd->eh_entry);
atomic_inc(&cmd->device->iodone_cnt); atomic_inc(&cmd->device->iodone_cnt);
@@ -1749,7 +1761,8 @@ static void scsi_mq_put_budget(struct request_queue *q, int budget_token)
{ {
struct scsi_device *sdev = q->queuedata; struct scsi_device *sdev = q->queuedata;
sbitmap_put(&sdev->budget_map, budget_token); if (sdev->budget_map.map)
sbitmap_put(&sdev->budget_map, budget_token);
} }
/* /*
@@ -1818,25 +1831,31 @@ static blk_status_t scsi_queue_rq(struct blk_mq_hw_ctx *hctx,
WARN_ON_ONCE(cmd->budget_token < 0); WARN_ON_ONCE(cmd->budget_token < 0);
/* /*
* If the device is not in running state we will reject some or all * Bypass the SCSI device, SCSI target and SCSI host checks for
* commands. * reserved commands.
*/ */
if (unlikely(sdev->sdev_state != SDEV_RUNNING)) { if (!blk_mq_is_reserved_rq(req)) {
ret = scsi_device_state_check(sdev, req); /*
if (ret != BLK_STS_OK) * If the device is not in running state we will reject some or
goto out_put_budget; * all commands.
} */
if (unlikely(sdev->sdev_state != SDEV_RUNNING)) {
ret = scsi_device_state_check(sdev, req);
if (ret != BLK_STS_OK)
goto out_put_budget;
}
ret = BLK_STS_RESOURCE; ret = BLK_STS_RESOURCE;
if (!scsi_target_queue_ready(shost, sdev)) if (!scsi_target_queue_ready(shost, sdev))
goto out_put_budget; goto out_put_budget;
if (unlikely(scsi_host_in_recovery(shost))) { if (unlikely(scsi_host_in_recovery(shost))) {
if (cmd->flags & SCMD_FAIL_IF_RECOVERING) if (cmd->flags & SCMD_FAIL_IF_RECOVERING)
ret = BLK_STS_OFFLINE; ret = BLK_STS_OFFLINE;
goto out_dec_target_busy; goto out_dec_target_busy;
}
if (!scsi_host_queue_ready(q, shost, sdev, cmd))
goto out_dec_target_busy;
} }
if (!scsi_host_queue_ready(q, shost, sdev, cmd))
goto out_dec_target_busy;
/* /*
* Only clear the driver-private command data if the LLD does not supply * Only clear the driver-private command data if the LLD does not supply
@@ -1865,6 +1884,14 @@ static blk_status_t scsi_queue_rq(struct blk_mq_hw_ctx *hctx,
cmd->submitter = SUBMITTED_BY_BLOCK_LAYER; cmd->submitter = SUBMITTED_BY_BLOCK_LAYER;
blk_mq_start_request(req); blk_mq_start_request(req);
if (blk_mq_is_reserved_rq(req)) {
reason = shost->hostt->queue_reserved_command(shost, cmd);
if (reason) {
ret = BLK_STS_RESOURCE;
goto out_put_budget;
}
return BLK_STS_OK;
}
reason = scsi_dispatch_cmd(cmd); reason = scsi_dispatch_cmd(cmd);
if (reason) { if (reason) {
scsi_set_blocked(cmd, reason); scsi_set_blocked(cmd, reason);
@@ -2083,7 +2110,8 @@ int scsi_mq_setup_tags(struct Scsi_Host *shost)
tag_set->ops = &scsi_mq_ops_no_commit; tag_set->ops = &scsi_mq_ops_no_commit;
tag_set->nr_hw_queues = shost->nr_hw_queues ? : 1; tag_set->nr_hw_queues = shost->nr_hw_queues ? : 1;
tag_set->nr_maps = shost->nr_maps ? : 1; tag_set->nr_maps = shost->nr_maps ? : 1;
tag_set->queue_depth = shost->can_queue; tag_set->queue_depth = shost->can_queue + shost->nr_reserved_cmds;
tag_set->reserved_tags = shost->nr_reserved_cmds;
tag_set->cmd_size = cmd_size; tag_set->cmd_size = cmd_size;
tag_set->numa_node = dev_to_node(shost->dma_dev); tag_set->numa_node = dev_to_node(shost->dma_dev);
if (shost->hostt->tag_alloc_policy_rr) if (shost->hostt->tag_alloc_policy_rr)
@@ -2106,6 +2134,44 @@ void scsi_mq_free_tags(struct kref *kref)
complete(&shost->tagset_freed); complete(&shost->tagset_freed);
} }
/**
* scsi_get_internal_cmd() - Allocate an internal SCSI command.
* @sdev: SCSI device from which to allocate the command
* @data_direction: Data direction for the allocated command
* @flags: request allocation flags, e.g. BLK_MQ_REQ_RESERVED or
* BLK_MQ_REQ_NOWAIT.
*
* Allocates a SCSI command for internal LLDD use.
*/
struct scsi_cmnd *scsi_get_internal_cmd(struct scsi_device *sdev,
enum dma_data_direction data_direction,
blk_mq_req_flags_t flags)
{
enum req_op op = data_direction == DMA_TO_DEVICE ? REQ_OP_DRV_OUT :
REQ_OP_DRV_IN;
struct scsi_cmnd *scmd;
struct request *rq;
rq = scsi_alloc_request(sdev->request_queue, op, flags);
if (IS_ERR(rq))
return NULL;
scmd = blk_mq_rq_to_pdu(rq);
scmd->device = sdev;
return scmd;
}
EXPORT_SYMBOL_GPL(scsi_get_internal_cmd);
/**
* scsi_put_internal_cmd() - Free an internal SCSI command.
* @scmd: SCSI command to be freed
*/
void scsi_put_internal_cmd(struct scsi_cmnd *scmd)
{
blk_mq_free_request(blk_mq_rq_from_pdu(scmd));
}
EXPORT_SYMBOL_GPL(scsi_put_internal_cmd);
/** /**
* scsi_device_from_queue - return sdev associated with a request_queue * scsi_device_from_queue - return sdev associated with a request_queue
* @q: The request queue to return the sdev from * @q: The request queue to return the sdev from

View File

@@ -26,9 +26,9 @@ static void scsi_log_release_buffer(char *bufptr)
kfree(bufptr); kfree(bufptr);
} }
static inline const char *scmd_name(const struct scsi_cmnd *scmd) static inline const char *scmd_name(struct scsi_cmnd *scmd)
{ {
struct request *rq = scsi_cmd_to_rq((struct scsi_cmnd *)scmd); const struct request *rq = scsi_cmd_to_rq(scmd);
if (!rq->q || !rq->q->disk) if (!rq->q || !rq->q->disk)
return NULL; return NULL;
@@ -80,8 +80,8 @@ void sdev_prefix_printk(const char *level, const struct scsi_device *sdev,
} }
EXPORT_SYMBOL(sdev_prefix_printk); EXPORT_SYMBOL(sdev_prefix_printk);
void scmd_printk(const char *level, const struct scsi_cmnd *scmd, void scmd_printk(const char *level, struct scsi_cmnd *scmd, const char *fmt,
const char *fmt, ...) ...)
{ {
va_list args; va_list args;
char *logbuf; char *logbuf;
@@ -94,7 +94,7 @@ void scmd_printk(const char *level, const struct scsi_cmnd *scmd,
if (!logbuf) if (!logbuf)
return; return;
off = sdev_format_header(logbuf, logbuf_len, scmd_name(scmd), off = sdev_format_header(logbuf, logbuf_len, scmd_name(scmd),
scsi_cmd_to_rq((struct scsi_cmnd *)scmd)->tag); scsi_cmd_to_rq(scmd)->tag);
if (off < logbuf_len) { if (off < logbuf_len) {
va_start(args, fmt); va_start(args, fmt);
off += vscnprintf(logbuf + off, logbuf_len - off, fmt, args); off += vscnprintf(logbuf + off, logbuf_len - off, fmt, args);
@@ -371,16 +371,15 @@ void __scsi_print_sense(const struct scsi_device *sdev, const char *name,
EXPORT_SYMBOL(__scsi_print_sense); EXPORT_SYMBOL(__scsi_print_sense);
/* Normalize and print sense buffer in SCSI command */ /* Normalize and print sense buffer in SCSI command */
void scsi_print_sense(const struct scsi_cmnd *cmd) void scsi_print_sense(struct scsi_cmnd *cmd)
{ {
scsi_log_print_sense(cmd->device, scmd_name(cmd), scsi_log_print_sense(cmd->device, scmd_name(cmd),
scsi_cmd_to_rq((struct scsi_cmnd *)cmd)->tag, scsi_cmd_to_rq(cmd)->tag, cmd->sense_buffer,
cmd->sense_buffer, SCSI_SENSE_BUFFERSIZE); SCSI_SENSE_BUFFERSIZE);
} }
EXPORT_SYMBOL(scsi_print_sense); EXPORT_SYMBOL(scsi_print_sense);
void scsi_print_result(const struct scsi_cmnd *cmd, const char *msg, void scsi_print_result(struct scsi_cmnd *cmd, const char *msg, int disposition)
int disposition)
{ {
char *logbuf; char *logbuf;
size_t off, logbuf_len; size_t off, logbuf_len;
@@ -393,7 +392,7 @@ void scsi_print_result(const struct scsi_cmnd *cmd, const char *msg,
return; return;
off = sdev_format_header(logbuf, logbuf_len, scmd_name(cmd), off = sdev_format_header(logbuf, logbuf_len, scmd_name(cmd),
scsi_cmd_to_rq((struct scsi_cmnd *)cmd)->tag); scsi_cmd_to_rq(cmd)->tag);
if (off >= logbuf_len) if (off >= logbuf_len)
goto out_printk; goto out_printk;

View File

@@ -205,7 +205,6 @@ static int scsi_runtime_idle(struct device *dev)
/* Insert hooks here for targets, hosts, and transport classes */ /* Insert hooks here for targets, hosts, and transport classes */
if (scsi_is_sdev_device(dev)) { if (scsi_is_sdev_device(dev)) {
pm_runtime_mark_last_busy(dev);
pm_runtime_autosuspend(dev); pm_runtime_autosuspend(dev);
return -EBUSY; return -EBUSY;
} }

View File

@@ -135,6 +135,7 @@ extern int scsi_complete_async_scans(void);
extern int scsi_scan_host_selected(struct Scsi_Host *, unsigned int, extern int scsi_scan_host_selected(struct Scsi_Host *, unsigned int,
unsigned int, u64, enum scsi_scan_mode); unsigned int, u64, enum scsi_scan_mode);
extern void scsi_forget_host(struct Scsi_Host *); extern void scsi_forget_host(struct Scsi_Host *);
struct scsi_device *scsi_get_pseudo_sdev(struct Scsi_Host *);
/* scsi_sysctl.c */ /* scsi_sysctl.c */
#ifdef CONFIG_SYSCTL #ifdef CONFIG_SYSCTL

View File

@@ -347,6 +347,11 @@ static struct scsi_device *scsi_alloc_sdev(struct scsi_target *starget,
kref_get(&sdev->host->tagset_refcnt); kref_get(&sdev->host->tagset_refcnt);
sdev->request_queue = q; sdev->request_queue = q;
scsi_sysfs_device_initialize(sdev);
if (scsi_device_is_pseudo_dev(sdev))
return sdev;
depth = sdev->host->cmd_per_lun ?: 1; depth = sdev->host->cmd_per_lun ?: 1;
/* /*
@@ -363,8 +368,6 @@ static struct scsi_device *scsi_alloc_sdev(struct scsi_target *starget,
scsi_change_queue_depth(sdev, depth); scsi_change_queue_depth(sdev, depth);
scsi_sysfs_device_initialize(sdev);
if (shost->hostt->sdev_init) { if (shost->hostt->sdev_init) {
ret = shost->hostt->sdev_init(sdev); ret = shost->hostt->sdev_init(sdev);
if (ret) { if (ret) {
@@ -1068,6 +1071,11 @@ static int scsi_add_lun(struct scsi_device *sdev, unsigned char *inq_result,
transport_configure_device(&sdev->sdev_gendev); transport_configure_device(&sdev->sdev_gendev);
sdev->sdev_bflags = *bflags;
if (scsi_device_is_pseudo_dev(sdev))
return SCSI_SCAN_LUN_PRESENT;
/* /*
* No need to freeze the queue as it isn't reachable to anyone else yet. * No need to freeze the queue as it isn't reachable to anyone else yet.
*/ */
@@ -1113,7 +1121,6 @@ static int scsi_add_lun(struct scsi_device *sdev, unsigned char *inq_result,
sdev->max_queue_depth = sdev->queue_depth; sdev->max_queue_depth = sdev->queue_depth;
WARN_ON_ONCE(sdev->max_queue_depth > sdev->budget_map.depth); WARN_ON_ONCE(sdev->max_queue_depth > sdev->budget_map.depth);
sdev->sdev_bflags = *bflags;
/* /*
* Ok, the device is now all set up, we can * Ok, the device is now all set up, we can
@@ -1212,6 +1219,12 @@ static int scsi_probe_and_add_lun(struct scsi_target *starget,
if (!sdev) if (!sdev)
goto out; goto out;
if (scsi_device_is_pseudo_dev(sdev)) {
if (bflagsp)
*bflagsp = BLIST_NOLUN;
return SCSI_SCAN_LUN_PRESENT;
}
result = kmalloc(result_len, GFP_KERNEL); result = kmalloc(result_len, GFP_KERNEL);
if (!result) if (!result)
goto out_free_sdev; goto out_free_sdev;
@@ -2083,12 +2096,65 @@ void scsi_forget_host(struct Scsi_Host *shost)
restart: restart:
spin_lock_irqsave(shost->host_lock, flags); spin_lock_irqsave(shost->host_lock, flags);
list_for_each_entry(sdev, &shost->__devices, siblings) { list_for_each_entry(sdev, &shost->__devices, siblings) {
if (sdev->sdev_state == SDEV_DEL) if (scsi_device_is_pseudo_dev(sdev) ||
sdev->sdev_state == SDEV_DEL)
continue; continue;
spin_unlock_irqrestore(shost->host_lock, flags); spin_unlock_irqrestore(shost->host_lock, flags);
__scsi_remove_device(sdev); __scsi_remove_device(sdev);
goto restart; goto restart;
} }
spin_unlock_irqrestore(shost->host_lock, flags); spin_unlock_irqrestore(shost->host_lock, flags);
/*
* Remove the pseudo device last since it may be needed during removal
* of other SCSI devices.
*/
if (shost->pseudo_sdev)
__scsi_remove_device(shost->pseudo_sdev);
} }
/**
* scsi_get_pseudo_sdev() - Attach a pseudo SCSI device to a SCSI host
* @shost: Host that needs a pseudo SCSI device
*
* Lock status: None assumed.
*
* Returns: The scsi_device or NULL
*
* Notes:
* Attach a single scsi_device to the Scsi_Host. The primary aim for this
* device is to serve as a container from which SCSI commands can be
* allocated. Each SCSI command will carry a command tag allocated by the
* block layer. These SCSI commands can be used by the LLDD to send
* internal or passthrough commands without having to manage tag allocation
* inside the LLDD.
*/
struct scsi_device *scsi_get_pseudo_sdev(struct Scsi_Host *shost)
{
struct scsi_device *sdev = NULL;
struct scsi_target *starget;
guard(mutex)(&shost->scan_mutex);
if (!scsi_host_scan_allowed(shost))
goto out;
starget = scsi_alloc_target(&shost->shost_gendev, 0, shost->max_id);
if (!starget)
goto out;
sdev = scsi_alloc_sdev(starget, U64_MAX, NULL);
if (!sdev) {
scsi_target_reap(starget);
goto put_target;
}
sdev->borken = 0;
put_target:
/* See also the get_device(dev) call in scsi_alloc_target(). */
put_device(&starget->dev);
out:
return sdev;
}

View File

@@ -605,68 +605,6 @@ sdev_show_##field (struct device *dev, struct device_attribute *attr, \
sdev_show_function(field, format_string) \ sdev_show_function(field, format_string) \
static DEVICE_ATTR(field, S_IRUGO, sdev_show_##field, NULL); static DEVICE_ATTR(field, S_IRUGO, sdev_show_##field, NULL);
/*
* sdev_rw_attr: create a function and attribute variable for a
* read/write field.
*/
#define sdev_rw_attr(field, format_string) \
sdev_show_function(field, format_string) \
\
static ssize_t \
sdev_store_##field (struct device *dev, struct device_attribute *attr, \
const char *buf, size_t count) \
{ \
struct scsi_device *sdev; \
sdev = to_scsi_device(dev); \
sscanf (buf, format_string, &sdev->field); \
return count; \
} \
static DEVICE_ATTR(field, S_IRUGO | S_IWUSR, sdev_show_##field, sdev_store_##field);
/* Currently we don't export bit fields, but we might in future,
* so leave this code in */
#if 0
/*
* sdev_rd_attr: create a function and attribute variable for a
* read/write bit field.
*/
#define sdev_rw_attr_bit(field) \
sdev_show_function(field, "%d\n") \
\
static ssize_t \
sdev_store_##field (struct device *dev, struct device_attribute *attr, \
const char *buf, size_t count) \
{ \
int ret; \
struct scsi_device *sdev; \
ret = scsi_sdev_check_buf_bit(buf); \
if (ret >= 0) { \
sdev = to_scsi_device(dev); \
sdev->field = ret; \
ret = count; \
} \
return ret; \
} \
static DEVICE_ATTR(field, S_IRUGO | S_IWUSR, sdev_show_##field, sdev_store_##field);
/*
* scsi_sdev_check_buf_bit: return 0 if buf is "0", return 1 if buf is "1",
* else return -EINVAL.
*/
static int scsi_sdev_check_buf_bit(const char *buf)
{
if ((buf[1] == '\0') || ((buf[1] == '\n') && (buf[2] == '\0'))) {
if (buf[0] == '1')
return 1;
else if (buf[0] == '0')
return 0;
else
return -EINVAL;
} else
return -EINVAL;
}
#endif
/* /*
* Create the actual show/store functions and data structures. * Create the actual show/store functions and data structures.
*/ */
@@ -710,10 +648,14 @@ static ssize_t
sdev_store_timeout (struct device *dev, struct device_attribute *attr, sdev_store_timeout (struct device *dev, struct device_attribute *attr,
const char *buf, size_t count) const char *buf, size_t count)
{ {
struct scsi_device *sdev; struct scsi_device *sdev = to_scsi_device(dev);
int timeout; int ret, timeout;
sdev = to_scsi_device(dev);
sscanf (buf, "%d\n", &timeout); ret = kstrtoint(buf, 0, &timeout);
if (ret)
return ret;
if (timeout <= 0)
return -EINVAL;
blk_queue_rq_timeout(sdev->request_queue, timeout * HZ); blk_queue_rq_timeout(sdev->request_queue, timeout * HZ);
return count; return count;
} }
@@ -1406,6 +1348,9 @@ int scsi_sysfs_add_sdev(struct scsi_device *sdev)
int error; int error;
struct scsi_target *starget = sdev->sdev_target; struct scsi_target *starget = sdev->sdev_target;
if (WARN_ON_ONCE(scsi_device_is_pseudo_dev(sdev)))
return -EINVAL;
error = scsi_target_add(starget); error = scsi_target_add(starget);
if (error) if (error)
return error; return error;
@@ -1513,7 +1458,7 @@ void __scsi_remove_device(struct scsi_device *sdev)
kref_put(&sdev->host->tagset_refcnt, scsi_mq_free_tags); kref_put(&sdev->host->tagset_refcnt, scsi_mq_free_tags);
cancel_work_sync(&sdev->requeue_work); cancel_work_sync(&sdev->requeue_work);
if (sdev->host->hostt->sdev_destroy) if (!scsi_device_is_pseudo_dev(sdev) && sdev->host->hostt->sdev_destroy)
sdev->host->hostt->sdev_destroy(sdev); sdev->host->hostt->sdev_destroy(sdev);
transport_destroy_device(dev); transport_destroy_device(dev);

View File

@@ -441,7 +441,8 @@ static int fc_host_setup(struct transport_container *tc, struct device *dev,
fc_host->next_vport_number = 0; fc_host->next_vport_number = 0;
fc_host->npiv_vports_inuse = 0; fc_host->npiv_vports_inuse = 0;
fc_host->work_q = alloc_workqueue("fc_wq_%d", 0, 0, shost->host_no); fc_host->work_q = alloc_workqueue("fc_wq_%d", WQ_PERCPU, 0,
shost->host_no);
if (!fc_host->work_q) if (!fc_host->work_q)
return -ENOMEM; return -ENOMEM;
@@ -3088,7 +3089,7 @@ fc_remote_port_create(struct Scsi_Host *shost, int channel,
spin_unlock_irqrestore(shost->host_lock, flags); spin_unlock_irqrestore(shost->host_lock, flags);
rport->devloss_work_q = alloc_workqueue("fc_dl_%d_%d", 0, 0, rport->devloss_work_q = alloc_workqueue("fc_dl_%d_%d", WQ_PERCPU, 0,
shost->host_no, rport->number); shost->host_no, rport->number);
if (!rport->devloss_work_q) { if (!rport->devloss_work_q) {
printk(KERN_ERR "FC Remote Port alloc_workqueue failed\n"); printk(KERN_ERR "FC Remote Port alloc_workqueue failed\n");

View File

@@ -3961,7 +3961,7 @@ iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, uint32_t *group)
list_del_init(&session->sess_list); list_del_init(&session->sess_list);
spin_unlock_irqrestore(&sesslock, flags); spin_unlock_irqrestore(&sesslock, flags);
queue_work(system_unbound_wq, &session->destroy_work); queue_work(system_dfl_wq, &session->destroy_work);
} }
break; break;
case ISCSI_UEVENT_UNBIND_SESSION: case ISCSI_UEVENT_UNBIND_SESSION:

View File

@@ -318,6 +318,35 @@ static ssize_t manage_shutdown_store(struct device *dev,
} }
static DEVICE_ATTR_RW(manage_shutdown); static DEVICE_ATTR_RW(manage_shutdown);
static ssize_t manage_restart_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct scsi_disk *sdkp = to_scsi_disk(dev);
struct scsi_device *sdp = sdkp->device;
return sysfs_emit(buf, "%u\n", sdp->manage_restart);
}
static ssize_t manage_restart_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct scsi_disk *sdkp = to_scsi_disk(dev);
struct scsi_device *sdp = sdkp->device;
bool v;
if (!capable(CAP_SYS_ADMIN))
return -EACCES;
if (kstrtobool(buf, &v))
return -EINVAL;
sdp->manage_restart = v;
return count;
}
static DEVICE_ATTR_RW(manage_restart);
static ssize_t static ssize_t
allow_restart_show(struct device *dev, struct device_attribute *attr, char *buf) allow_restart_show(struct device *dev, struct device_attribute *attr, char *buf)
{ {
@@ -654,6 +683,7 @@ static struct attribute *sd_disk_attrs[] = {
&dev_attr_manage_system_start_stop.attr, &dev_attr_manage_system_start_stop.attr,
&dev_attr_manage_runtime_start_stop.attr, &dev_attr_manage_runtime_start_stop.attr,
&dev_attr_manage_shutdown.attr, &dev_attr_manage_shutdown.attr,
&dev_attr_manage_restart.attr,
&dev_attr_protection_type.attr, &dev_attr_protection_type.attr,
&dev_attr_protection_mode.attr, &dev_attr_protection_mode.attr,
&dev_attr_app_tag_own.attr, &dev_attr_app_tag_own.attr,
@@ -4177,7 +4207,9 @@ static void sd_shutdown(struct device *dev)
(system_state == SYSTEM_POWER_OFF && (system_state == SYSTEM_POWER_OFF &&
sdkp->device->manage_shutdown) || sdkp->device->manage_shutdown) ||
(system_state == SYSTEM_RUNNING && (system_state == SYSTEM_RUNNING &&
sdkp->device->manage_runtime_start_stop)) { sdkp->device->manage_runtime_start_stop) ||
(system_state == SYSTEM_RESTART &&
sdkp->device->manage_restart)) {
sd_printk(KERN_NOTICE, sdkp, "Stopping disk\n"); sd_printk(KERN_NOTICE, sdkp, "Stopping disk\n");
sd_start_stop_device(sdkp, 0); sd_start_stop_device(sdkp, 0);
} }

View File

@@ -133,6 +133,7 @@ static int sim710_probe_common(struct device *dev, unsigned long base_addr,
out_put_host: out_put_host:
scsi_host_put(host); scsi_host_put(host);
out_release: out_release:
ioport_unmap(hostdata->base);
release_region(base_addr, 64); release_region(base_addr, 64);
out_free: out_free:
kfree(hostdata); kfree(hostdata);
@@ -148,6 +149,7 @@ static int sim710_device_remove(struct device *dev)
scsi_remove_host(host); scsi_remove_host(host);
NCR_700_release(host); NCR_700_release(host);
ioport_unmap(hostdata->base);
kfree(hostdata); kfree(hostdata);
free_irq(host->irq, host); free_irq(host->irq, host);
release_region(host->base, 64); release_region(host->base, 64);

View File

@@ -34,11 +34,11 @@
#define BUILD_TIMESTAMP #define BUILD_TIMESTAMP
#endif #endif
#define DRIVER_VERSION "2.1.34-035" #define DRIVER_VERSION "2.1.36-026"
#define DRIVER_MAJOR 2 #define DRIVER_MAJOR 2
#define DRIVER_MINOR 1 #define DRIVER_MINOR 1
#define DRIVER_RELEASE 34 #define DRIVER_RELEASE 36
#define DRIVER_REVISION 35 #define DRIVER_REVISION 26
#define DRIVER_NAME "Microchip SmartPQI Driver (v" \ #define DRIVER_NAME "Microchip SmartPQI Driver (v" \
DRIVER_VERSION BUILD_TIMESTAMP ")" DRIVER_VERSION BUILD_TIMESTAMP ")"
@@ -5555,14 +5555,25 @@ static void pqi_raid_io_complete(struct pqi_io_request *io_request,
pqi_scsi_done(scmd); pqi_scsi_done(scmd);
} }
/*
* Adjust the timeout value for physical devices sent to the firmware
* by subtracting 3 seconds for timeouts greater than or equal to 8 seconds.
*
* This provides the firmware with additional time to attempt early recovery
* before the OS-level timeout occurs.
*/
#define ADJUST_SECS_TIMEOUT_VALUE(tv) (((tv) >= 8) ? ((tv) - 3) : (tv))
static int pqi_raid_submit_io(struct pqi_ctrl_info *ctrl_info, static int pqi_raid_submit_io(struct pqi_ctrl_info *ctrl_info,
struct pqi_scsi_dev *device, struct scsi_cmnd *scmd, struct pqi_scsi_dev *device, struct scsi_cmnd *scmd,
struct pqi_queue_group *queue_group, bool io_high_prio) struct pqi_queue_group *queue_group, bool io_high_prio)
{ {
int rc; int rc;
u32 timeout;
size_t cdb_length; size_t cdb_length;
struct pqi_io_request *io_request; struct pqi_io_request *io_request;
struct pqi_raid_path_request *request; struct pqi_raid_path_request *request;
struct request *rq;
io_request = pqi_alloc_io_request(ctrl_info, scmd); io_request = pqi_alloc_io_request(ctrl_info, scmd);
if (!io_request) if (!io_request)
@@ -5634,6 +5645,12 @@ static int pqi_raid_submit_io(struct pqi_ctrl_info *ctrl_info,
return SCSI_MLQUEUE_HOST_BUSY; return SCSI_MLQUEUE_HOST_BUSY;
} }
if (device->is_physical_device) {
rq = scsi_cmd_to_rq(scmd);
timeout = rq->timeout / HZ;
put_unaligned_le32(ADJUST_SECS_TIMEOUT_VALUE(timeout), &request->timeout);
}
pqi_start_io(ctrl_info, queue_group, RAID_PATH, io_request); pqi_start_io(ctrl_info, queue_group, RAID_PATH, io_request);
return 0; return 0;
@@ -6410,10 +6427,22 @@ static int pqi_device_reset(struct pqi_ctrl_info *ctrl_info, struct pqi_scsi_dev
static int pqi_device_reset_handler(struct pqi_ctrl_info *ctrl_info, struct pqi_scsi_dev *device, u8 lun, struct scsi_cmnd *scmd, u8 scsi_opcode) static int pqi_device_reset_handler(struct pqi_ctrl_info *ctrl_info, struct pqi_scsi_dev *device, u8 lun, struct scsi_cmnd *scmd, u8 scsi_opcode)
{ {
unsigned long flags;
int rc; int rc;
mutex_lock(&ctrl_info->lun_reset_mutex); mutex_lock(&ctrl_info->lun_reset_mutex);
spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
if (pqi_find_scsi_dev(ctrl_info, device->bus, device->target, device->lun) == NULL) {
dev_warn(&ctrl_info->pci_dev->dev,
"skipping reset of scsi %d:%d:%d:%u, device has been removed\n",
ctrl_info->scsi_host->host_no, device->bus, device->target, device->lun);
spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
mutex_unlock(&ctrl_info->lun_reset_mutex);
return 0;
}
spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
dev_err(&ctrl_info->pci_dev->dev, dev_err(&ctrl_info->pci_dev->dev,
"resetting scsi %d:%d:%d:%u SCSI cmd at %p due to cmd opcode 0x%02x\n", "resetting scsi %d:%d:%d:%u SCSI cmd at %p due to cmd opcode 0x%02x\n",
ctrl_info->scsi_host->host_no, device->bus, device->target, lun, scmd, scsi_opcode); ctrl_info->scsi_host->host_no, device->bus, device->target, lun, scmd, scsi_opcode);
@@ -6594,7 +6623,9 @@ static void pqi_sdev_destroy(struct scsi_device *sdev)
{ {
struct pqi_ctrl_info *ctrl_info; struct pqi_ctrl_info *ctrl_info;
struct pqi_scsi_dev *device; struct pqi_scsi_dev *device;
struct pqi_tmf_work *tmf_work;
int mutex_acquired; int mutex_acquired;
unsigned int lun;
unsigned long flags; unsigned long flags;
ctrl_info = shost_to_hba(sdev->host); ctrl_info = shost_to_hba(sdev->host);
@@ -6621,8 +6652,13 @@ static void pqi_sdev_destroy(struct scsi_device *sdev)
mutex_unlock(&ctrl_info->scan_mutex); mutex_unlock(&ctrl_info->scan_mutex);
for (lun = 0, tmf_work = device->tmf_work; lun < PQI_MAX_LUNS_PER_DEVICE; lun++, tmf_work++)
cancel_work_sync(&tmf_work->work_struct);
mutex_lock(&ctrl_info->lun_reset_mutex);
pqi_dev_info(ctrl_info, "removed", device); pqi_dev_info(ctrl_info, "removed", device);
pqi_free_device(device); pqi_free_device(device);
mutex_unlock(&ctrl_info->lun_reset_mutex);
} }
static int pqi_getpciinfo_ioctl(struct pqi_ctrl_info *ctrl_info, void __user *arg) static int pqi_getpciinfo_ioctl(struct pqi_ctrl_info *ctrl_info, void __user *arg)
@@ -8936,7 +8972,8 @@ static int pqi_host_alloc_mem(struct pqi_ctrl_info *ctrl_info,
if (sg_count == 0 || sg_count > PQI_HOST_MAX_SG_DESCRIPTORS) if (sg_count == 0 || sg_count > PQI_HOST_MAX_SG_DESCRIPTORS)
goto out; goto out;
host_memory_descriptor->host_chunk_virt_address = kmalloc(sg_count * sizeof(void *), GFP_KERNEL); host_memory_descriptor->host_chunk_virt_address =
kmalloc_array(sg_count, sizeof(void *), GFP_KERNEL);
if (!host_memory_descriptor->host_chunk_virt_address) if (!host_memory_descriptor->host_chunk_virt_address)
goto out; goto out;
@@ -10108,6 +10145,10 @@ static const struct pci_device_id pqi_pci_id_table[] = {
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
0x207d, 0x4240) 0x207d, 0x4240)
}, },
{
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
0x207d, 0x4840)
},
{ {
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
PCI_VENDOR_ID_ADVANTECH, 0x8312) PCI_VENDOR_ID_ADVANTECH, 0x8312)

View File

@@ -3526,8 +3526,64 @@ static int partition_tape(struct scsi_tape *STp, int size)
out: out:
return result; return result;
} }
/*
* Handles any extra state needed for ioctls which are not st-specific.
* Called with the scsi_tape lock held, released before return
*/
static long st_common_ioctl(struct scsi_tape *STp, struct st_modedef *STm,
struct file *file, unsigned int cmd_in,
unsigned long arg)
{
int i, retval = 0;
if (!STm->defined) {
retval = -ENXIO;
goto out;
}
switch (cmd_in) {
case SCSI_IOCTL_GET_IDLUN:
case SCSI_IOCTL_GET_BUS_NUMBER:
case SCSI_IOCTL_GET_PCI:
break;
case SG_IO:
case SCSI_IOCTL_SEND_COMMAND:
case CDROM_SEND_PACKET:
if (!capable(CAP_SYS_RAWIO)) {
retval = -EPERM;
goto out;
}
fallthrough;
default:
if ((i = flush_buffer(STp, 0)) < 0) {
retval = i;
goto out;
} else { /* flush_buffer succeeds */
if (STp->can_partitions) {
i = switch_partition(STp);
if (i < 0) {
retval = i;
goto out;
}
}
}
}
mutex_unlock(&STp->lock);
retval = scsi_ioctl(STp->device, file->f_mode & FMODE_WRITE,
cmd_in, (void __user *)arg);
if (!retval && cmd_in == SCSI_IOCTL_STOP_UNIT) {
/* unload */
STp->rew_at_close = 0;
STp->ready = ST_NO_TAPE;
}
return retval;
out:
mutex_unlock(&STp->lock);
return retval;
}
/* The ioctl command */ /* The ioctl command */
static long st_ioctl(struct file *file, unsigned int cmd_in, unsigned long arg) static long st_ioctl(struct file *file, unsigned int cmd_in, unsigned long arg)
@@ -3565,6 +3621,15 @@ static long st_ioctl(struct file *file, unsigned int cmd_in, unsigned long arg)
if (retval) if (retval)
goto out; goto out;
switch (cmd_in) {
case MTIOCPOS:
case MTIOCGET:
case MTIOCTOP:
break;
default:
return st_common_ioctl(STp, STm, file, cmd_in, arg);
}
cmd_type = _IOC_TYPE(cmd_in); cmd_type = _IOC_TYPE(cmd_in);
cmd_nr = _IOC_NR(cmd_in); cmd_nr = _IOC_NR(cmd_in);
@@ -3876,29 +3941,7 @@ static long st_ioctl(struct file *file, unsigned int cmd_in, unsigned long arg)
} }
mt_pos.mt_blkno = blk; mt_pos.mt_blkno = blk;
retval = put_user_mtpos(p, &mt_pos); retval = put_user_mtpos(p, &mt_pos);
goto out;
} }
mutex_unlock(&STp->lock);
switch (cmd_in) {
case SG_IO:
case SCSI_IOCTL_SEND_COMMAND:
case CDROM_SEND_PACKET:
if (!capable(CAP_SYS_RAWIO))
return -EPERM;
break;
default:
break;
}
retval = scsi_ioctl(STp->device, file->f_mode & FMODE_WRITE, cmd_in, p);
if (!retval && cmd_in == SCSI_IOCTL_STOP_UNIT) {
/* unload */
STp->rew_at_close = 0;
STp->ready = ST_NO_TAPE;
}
return retval;
out: out:
mutex_unlock(&STp->lock); mutex_unlock(&STp->lock);
return retval; return retval;

View File

@@ -1844,6 +1844,7 @@ out_release_regions:
out_scsi_host_put: out_scsi_host_put:
scsi_host_put(host); scsi_host_put(host);
out_disable: out_disable:
unregister_reboot_notifier(&stex_notifier);
pci_disable_device(pdev); pci_disable_device(pdev);
return err; return err;

View File

@@ -730,7 +730,7 @@ static int tgt_agent_rw_orb_pointer(struct fw_card *card, int tcode, void *data,
pr_debug("tgt_agent ORB_POINTER write: 0x%llx\n", pr_debug("tgt_agent ORB_POINTER write: 0x%llx\n",
agent->orb_pointer); agent->orb_pointer);
queue_work(system_unbound_wq, &agent->work); queue_work(system_dfl_wq, &agent->work);
return RCODE_COMPLETE; return RCODE_COMPLETE;
@@ -764,7 +764,7 @@ static int tgt_agent_rw_doorbell(struct fw_card *card, int tcode, void *data,
pr_debug("tgt_agent DOORBELL\n"); pr_debug("tgt_agent DOORBELL\n");
queue_work(system_unbound_wq, &agent->work); queue_work(system_dfl_wq, &agent->work);
return RCODE_COMPLETE; return RCODE_COMPLETE;
@@ -990,7 +990,7 @@ static void tgt_agent_fetch_work(struct work_struct *work)
if (tgt_agent_check_active(agent) && !doorbell) { if (tgt_agent_check_active(agent) && !doorbell) {
INIT_WORK(&req->work, tgt_agent_process_work); INIT_WORK(&req->work, tgt_agent_process_work);
queue_work(system_unbound_wq, &req->work); queue_work(system_dfl_wq, &req->work);
} else { } else {
/* don't process this request, just check next_ORB */ /* don't process this request, just check next_ORB */
sbp_free_request(req); sbp_free_request(req);
@@ -1618,7 +1618,7 @@ static void sbp_mgt_agent_rw(struct fw_card *card,
agent->orb_offset = sbp2_pointer_to_addr(ptr); agent->orb_offset = sbp2_pointer_to_addr(ptr);
agent->request = req; agent->request = req;
queue_work(system_unbound_wq, &agent->work); queue_work(system_dfl_wq, &agent->work);
rcode = RCODE_COMPLETE; rcode = RCODE_COMPLETE;
} else if (tcode == TCODE_READ_BLOCK_REQUEST) { } else if (tcode == TCODE_READ_BLOCK_REQUEST) {
addr_to_sbp2_pointer(agent->orb_offset, ptr); addr_to_sbp2_pointer(agent->orb_offset, ptr);

View File

@@ -578,6 +578,11 @@ DEF_CONFIGFS_ATTRIB_SHOW(unmap_zeroes_data);
DEF_CONFIGFS_ATTRIB_SHOW(max_write_same_len); DEF_CONFIGFS_ATTRIB_SHOW(max_write_same_len);
DEF_CONFIGFS_ATTRIB_SHOW(emulate_rsoc); DEF_CONFIGFS_ATTRIB_SHOW(emulate_rsoc);
DEF_CONFIGFS_ATTRIB_SHOW(submit_type); DEF_CONFIGFS_ATTRIB_SHOW(submit_type);
DEF_CONFIGFS_ATTRIB_SHOW(atomic_max_len);
DEF_CONFIGFS_ATTRIB_SHOW(atomic_alignment);
DEF_CONFIGFS_ATTRIB_SHOW(atomic_granularity);
DEF_CONFIGFS_ATTRIB_SHOW(atomic_max_with_boundary);
DEF_CONFIGFS_ATTRIB_SHOW(atomic_max_boundary);
#define DEF_CONFIGFS_ATTRIB_STORE_U32(_name) \ #define DEF_CONFIGFS_ATTRIB_STORE_U32(_name) \
static ssize_t _name##_store(struct config_item *item, const char *page,\ static ssize_t _name##_store(struct config_item *item, const char *page,\
@@ -1300,6 +1305,11 @@ CONFIGFS_ATTR(, max_write_same_len);
CONFIGFS_ATTR(, alua_support); CONFIGFS_ATTR(, alua_support);
CONFIGFS_ATTR(, pgr_support); CONFIGFS_ATTR(, pgr_support);
CONFIGFS_ATTR(, submit_type); CONFIGFS_ATTR(, submit_type);
CONFIGFS_ATTR_RO(, atomic_max_len);
CONFIGFS_ATTR_RO(, atomic_alignment);
CONFIGFS_ATTR_RO(, atomic_granularity);
CONFIGFS_ATTR_RO(, atomic_max_with_boundary);
CONFIGFS_ATTR_RO(, atomic_max_boundary);
/* /*
* dev_attrib attributes for devices using the target core SBC/SPC * dev_attrib attributes for devices using the target core SBC/SPC
@@ -1343,6 +1353,11 @@ struct configfs_attribute *sbc_attrib_attrs[] = {
&attr_pgr_support, &attr_pgr_support,
&attr_emulate_rsoc, &attr_emulate_rsoc,
&attr_submit_type, &attr_submit_type,
&attr_atomic_alignment,
&attr_atomic_max_len,
&attr_atomic_granularity,
&attr_atomic_max_with_boundary,
&attr_atomic_max_boundary,
NULL, NULL,
}; };
EXPORT_SYMBOL(sbc_attrib_attrs); EXPORT_SYMBOL(sbc_attrib_attrs);
@@ -2758,33 +2773,24 @@ static ssize_t target_lu_gp_lu_gp_id_store(struct config_item *item,
static ssize_t target_lu_gp_members_show(struct config_item *item, char *page) static ssize_t target_lu_gp_members_show(struct config_item *item, char *page)
{ {
struct t10_alua_lu_gp *lu_gp = to_lu_gp(item); struct t10_alua_lu_gp *lu_gp = to_lu_gp(item);
struct se_device *dev;
struct se_hba *hba;
struct t10_alua_lu_gp_member *lu_gp_mem; struct t10_alua_lu_gp_member *lu_gp_mem;
ssize_t len = 0, cur_len; const char *const end = page + PAGE_SIZE;
unsigned char buf[LU_GROUP_NAME_BUF] = { }; char *cur = page;
spin_lock(&lu_gp->lu_gp_lock); spin_lock(&lu_gp->lu_gp_lock);
list_for_each_entry(lu_gp_mem, &lu_gp->lu_gp_mem_list, lu_gp_mem_list) { list_for_each_entry(lu_gp_mem, &lu_gp->lu_gp_mem_list, lu_gp_mem_list) {
dev = lu_gp_mem->lu_gp_mem_dev; struct se_device *dev = lu_gp_mem->lu_gp_mem_dev;
hba = dev->se_hba; struct se_hba *hba = dev->se_hba;
cur_len = snprintf(buf, LU_GROUP_NAME_BUF, "%s/%s\n", cur += scnprintf(cur, end - cur, "%s/%s\n",
config_item_name(&hba->hba_group.cg_item), config_item_name(&hba->hba_group.cg_item),
config_item_name(&dev->dev_group.cg_item)); config_item_name(&dev->dev_group.cg_item));
cur_len++; /* Extra byte for NULL terminator */ if (WARN_ON_ONCE(cur >= end))
if ((cur_len + len) > PAGE_SIZE || cur_len > LU_GROUP_NAME_BUF) {
pr_warn("Ran out of lu_gp_show_attr"
"_members buffer\n");
break; break;
}
memcpy(page+len, buf, cur_len);
len += cur_len;
} }
spin_unlock(&lu_gp->lu_gp_lock); spin_unlock(&lu_gp->lu_gp_lock);
return len; return cur - page;
} }
CONFIGFS_ATTR(target_lu_gp_, lu_gp_id); CONFIGFS_ATTR(target_lu_gp_, lu_gp_id);

View File

@@ -814,6 +814,7 @@ struct se_device *target_alloc_device(struct se_hba *hba, const char *name)
dev->dev_attrib.max_write_same_len = DA_MAX_WRITE_SAME_LEN; dev->dev_attrib.max_write_same_len = DA_MAX_WRITE_SAME_LEN;
dev->dev_attrib.submit_type = TARGET_FABRIC_DEFAULT_SUBMIT; dev->dev_attrib.submit_type = TARGET_FABRIC_DEFAULT_SUBMIT;
/* Skip allocating lun_stats since we can't export them. */
xcopy_lun = &dev->xcopy_lun; xcopy_lun = &dev->xcopy_lun;
rcu_assign_pointer(xcopy_lun->lun_se_dev, dev); rcu_assign_pointer(xcopy_lun->lun_se_dev, dev);
init_completion(&xcopy_lun->lun_shutdown_comp); init_completion(&xcopy_lun->lun_shutdown_comp);
@@ -840,12 +841,29 @@ free_device:
return NULL; return NULL;
} }
void target_configure_write_atomic_from_bdev(struct se_dev_attrib *attrib,
struct block_device *bdev)
{
struct request_queue *q = bdev_get_queue(bdev);
int block_size = bdev_logical_block_size(bdev);
if (!bdev_can_atomic_write(bdev))
return;
attrib->atomic_max_len = queue_atomic_write_max_bytes(q) / block_size;
attrib->atomic_granularity = attrib->atomic_alignment =
queue_atomic_write_unit_min_bytes(q) / block_size;
attrib->atomic_max_with_boundary = 0;
attrib->atomic_max_boundary = 0;
}
EXPORT_SYMBOL_GPL(target_configure_write_atomic_from_bdev);
/* /*
* Check if the underlying struct block_device supports discard and if yes * Check if the underlying struct block_device supports discard and if yes
* configure the UNMAP parameters. * configure the UNMAP parameters.
*/ */
bool target_configure_unmap_from_queue(struct se_dev_attrib *attrib, bool target_configure_unmap_from_bdev(struct se_dev_attrib *attrib,
struct block_device *bdev) struct block_device *bdev)
{ {
int block_size = bdev_logical_block_size(bdev); int block_size = bdev_logical_block_size(bdev);
@@ -863,7 +881,7 @@ bool target_configure_unmap_from_queue(struct se_dev_attrib *attrib,
bdev_discard_alignment(bdev) / block_size; bdev_discard_alignment(bdev) / block_size;
return true; return true;
} }
EXPORT_SYMBOL(target_configure_unmap_from_queue); EXPORT_SYMBOL(target_configure_unmap_from_bdev);
/* /*
* Convert from blocksize advertised to the initiator to the 512 byte * Convert from blocksize advertised to the initiator to the 512 byte

View File

@@ -697,7 +697,7 @@ static void target_fabric_port_release(struct config_item *item)
struct se_lun *lun = container_of(to_config_group(item), struct se_lun *lun = container_of(to_config_group(item),
struct se_lun, lun_group); struct se_lun, lun_group);
kfree_rcu(lun, rcu_head); call_rcu(&lun->rcu_head, target_tpg_free_lun);
} }
static struct configfs_item_operations target_fabric_port_item_ops = { static struct configfs_item_operations target_fabric_port_item_ops = {

View File

@@ -92,8 +92,8 @@ static bool fd_configure_unmap(struct se_device *dev)
struct inode *inode = file->f_mapping->host; struct inode *inode = file->f_mapping->host;
if (S_ISBLK(inode->i_mode)) if (S_ISBLK(inode->i_mode))
return target_configure_unmap_from_queue(&dev->dev_attrib, return target_configure_unmap_from_bdev(&dev->dev_attrib,
I_BDEV(inode)); I_BDEV(inode));
/* Limit UNMAP emulation to 8k Number of LBAs (NoLB) */ /* Limit UNMAP emulation to 8k Number of LBAs (NoLB) */
dev->dev_attrib.max_unmap_lba_count = 0x2000; dev->dev_attrib.max_unmap_lba_count = 0x2000;

View File

@@ -84,8 +84,8 @@ static bool iblock_configure_unmap(struct se_device *dev)
{ {
struct iblock_dev *ib_dev = IBLOCK_DEV(dev); struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
return target_configure_unmap_from_queue(&dev->dev_attrib, return target_configure_unmap_from_bdev(&dev->dev_attrib,
ib_dev->ibd_bd); ib_dev->ibd_bd);
} }
static int iblock_configure_device(struct se_device *dev) static int iblock_configure_device(struct se_device *dev)
@@ -152,6 +152,8 @@ static int iblock_configure_device(struct se_device *dev)
if (bdev_nonrot(bd)) if (bdev_nonrot(bd))
dev->dev_attrib.is_nonrot = 1; dev->dev_attrib.is_nonrot = 1;
target_configure_write_atomic_from_bdev(&dev->dev_attrib, bd);
bi = bdev_get_integrity(bd); bi = bdev_get_integrity(bd);
if (!bi) if (!bi)
return 0; return 0;
@@ -773,6 +775,9 @@ iblock_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
else if (!bdev_write_cache(ib_dev->ibd_bd)) else if (!bdev_write_cache(ib_dev->ibd_bd))
opf |= REQ_FUA; opf |= REQ_FUA;
} }
if (cmd->se_cmd_flags & SCF_ATOMIC)
opf |= REQ_ATOMIC;
} else { } else {
opf = REQ_OP_READ; opf = REQ_OP_READ;
miter_dir = SG_MITER_FROM_SG; miter_dir = SG_MITER_FROM_SG;

View File

@@ -125,6 +125,7 @@ void core_tpg_add_node_to_devs(struct se_node_acl *, struct se_portal_group *,
struct se_lun *); struct se_lun *);
void core_tpg_wait_for_nacl_pr_ref(struct se_node_acl *); void core_tpg_wait_for_nacl_pr_ref(struct se_node_acl *);
struct se_lun *core_tpg_alloc_lun(struct se_portal_group *, u64); struct se_lun *core_tpg_alloc_lun(struct se_portal_group *, u64);
void target_tpg_free_lun(struct rcu_head *head);
int core_tpg_add_lun(struct se_portal_group *, struct se_lun *, int core_tpg_add_lun(struct se_portal_group *, struct se_lun *,
bool, struct se_device *); bool, struct se_device *);
void core_tpg_remove_lun(struct se_portal_group *, struct se_lun *); void core_tpg_remove_lun(struct se_portal_group *, struct se_lun *);

View File

@@ -764,6 +764,49 @@ sbc_check_dpofua(struct se_device *dev, struct se_cmd *cmd, unsigned char *cdb)
return 0; return 0;
} }
static sense_reason_t
sbc_check_atomic(struct se_device *dev, struct se_cmd *cmd, unsigned char *cdb)
{
struct se_dev_attrib *attrib = &dev->dev_attrib;
u16 boundary, transfer_len;
u64 lba;
lba = transport_lba_64(cdb);
boundary = get_unaligned_be16(&cdb[10]);
transfer_len = get_unaligned_be16(&cdb[12]);
if (!attrib->atomic_max_len)
return TCM_UNSUPPORTED_SCSI_OPCODE;
if (boundary) {
if (transfer_len > attrib->atomic_max_with_boundary)
return TCM_INVALID_CDB_FIELD;
if (boundary > attrib->atomic_max_boundary)
return TCM_INVALID_CDB_FIELD;
} else {
if (transfer_len > attrib->atomic_max_len)
return TCM_INVALID_CDB_FIELD;
}
if (attrib->atomic_granularity) {
if (transfer_len % attrib->atomic_granularity)
return TCM_INVALID_CDB_FIELD;
if (boundary && boundary % attrib->atomic_granularity)
return TCM_INVALID_CDB_FIELD;
}
if (dev->dev_attrib.atomic_alignment) {
u64 _lba = lba;
if (do_div(_lba, dev->dev_attrib.atomic_alignment))
return TCM_INVALID_CDB_FIELD;
}
return 0;
}
sense_reason_t sense_reason_t
sbc_parse_cdb(struct se_cmd *cmd, struct exec_cmd_ops *ops) sbc_parse_cdb(struct se_cmd *cmd, struct exec_cmd_ops *ops)
{ {
@@ -861,6 +904,7 @@ sbc_parse_cdb(struct se_cmd *cmd, struct exec_cmd_ops *ops)
break; break;
case WRITE_16: case WRITE_16:
case WRITE_VERIFY_16: case WRITE_VERIFY_16:
case WRITE_ATOMIC_16:
sectors = transport_get_sectors_16(cdb); sectors = transport_get_sectors_16(cdb);
cmd->t_task_lba = transport_lba_64(cdb); cmd->t_task_lba = transport_lba_64(cdb);
@@ -872,6 +916,13 @@ sbc_parse_cdb(struct se_cmd *cmd, struct exec_cmd_ops *ops)
return ret; return ret;
cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
if (cdb[0] == WRITE_ATOMIC_16) {
cmd->se_cmd_flags |= SCF_ATOMIC;
ret = sbc_check_atomic(dev, cmd, cdb);
if (ret)
return ret;
}
cmd->execute_cmd = sbc_execute_rw; cmd->execute_cmd = sbc_execute_rw;
break; break;
case VARIABLE_LENGTH_CMD: case VARIABLE_LENGTH_CMD:

View File

@@ -521,7 +521,6 @@ spc_emulate_evpd_b0(struct se_cmd *cmd, unsigned char *buf)
have_tp = 1; have_tp = 1;
buf[0] = dev->transport->get_device_type(dev); buf[0] = dev->transport->get_device_type(dev);
buf[3] = have_tp ? 0x3c : 0x10;
/* Set WSNZ to 1 */ /* Set WSNZ to 1 */
buf[4] = 0x01; buf[4] = 0x01;
@@ -562,11 +561,10 @@ spc_emulate_evpd_b0(struct se_cmd *cmd, unsigned char *buf)
else else
put_unaligned_be32(dev->dev_attrib.optimal_sectors, &buf[12]); put_unaligned_be32(dev->dev_attrib.optimal_sectors, &buf[12]);
/* put_unaligned_be16(12, &buf[2]);
* Exit now if we don't support TP.
*/
if (!have_tp) if (!have_tp)
goto max_write_same; goto try_atomic;
/* /*
* Set MAXIMUM UNMAP LBA COUNT * Set MAXIMUM UNMAP LBA COUNT
@@ -595,9 +593,29 @@ spc_emulate_evpd_b0(struct se_cmd *cmd, unsigned char *buf)
/* /*
* MAXIMUM WRITE SAME LENGTH * MAXIMUM WRITE SAME LENGTH
*/ */
max_write_same:
put_unaligned_be64(dev->dev_attrib.max_write_same_len, &buf[36]); put_unaligned_be64(dev->dev_attrib.max_write_same_len, &buf[36]);
put_unaligned_be16(40, &buf[2]);
try_atomic:
/*
* ATOMIC
*/
if (!dev->dev_attrib.atomic_max_len)
goto done;
if (dev->dev_attrib.atomic_max_len < io_max_blocks)
put_unaligned_be32(dev->dev_attrib.atomic_max_len, &buf[44]);
else
put_unaligned_be32(io_max_blocks, &buf[44]);
put_unaligned_be32(dev->dev_attrib.atomic_alignment, &buf[48]);
put_unaligned_be32(dev->dev_attrib.atomic_granularity, &buf[52]);
put_unaligned_be32(dev->dev_attrib.atomic_max_with_boundary, &buf[56]);
put_unaligned_be32(dev->dev_attrib.atomic_max_boundary, &buf[60]);
put_unaligned_be16(60, &buf[2]);
done:
return 0; return 0;
} }
@@ -1452,6 +1470,24 @@ static const struct target_opcode_descriptor tcm_opcode_write_same32 = {
.update_usage_bits = set_dpofua_usage_bits32, .update_usage_bits = set_dpofua_usage_bits32,
}; };
static bool tcm_is_atomic_enabled(const struct target_opcode_descriptor *descr,
struct se_cmd *cmd)
{
return cmd->se_dev->dev_attrib.atomic_max_len;
}
static struct target_opcode_descriptor tcm_opcode_write_atomic16 = {
.support = SCSI_SUPPORT_FULL,
.opcode = WRITE_ATOMIC_16,
.cdb_size = 16,
.usage_bits = {WRITE_ATOMIC_16, 0xf8, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff,
0xff, 0xff, SCSI_GROUP_NUMBER_MASK, SCSI_CONTROL_MASK},
.enabled = tcm_is_atomic_enabled,
.update_usage_bits = set_dpofua_usage_bits,
};
static bool tcm_is_caw_enabled(const struct target_opcode_descriptor *descr, static bool tcm_is_caw_enabled(const struct target_opcode_descriptor *descr,
struct se_cmd *cmd) struct se_cmd *cmd)
{ {
@@ -2008,6 +2044,7 @@ static const struct target_opcode_descriptor *tcm_supported_opcodes[] = {
&tcm_opcode_write16, &tcm_opcode_write16,
&tcm_opcode_write_verify16, &tcm_opcode_write_verify16,
&tcm_opcode_write_same32, &tcm_opcode_write_same32,
&tcm_opcode_write_atomic16,
&tcm_opcode_compare_write, &tcm_opcode_compare_write,
&tcm_opcode_read_capacity, &tcm_opcode_read_capacity,
&tcm_opcode_read_capacity16, &tcm_opcode_read_capacity16,

View File

@@ -276,56 +276,39 @@ static ssize_t target_stat_lu_state_bit_show(struct config_item *item,
return snprintf(page, PAGE_SIZE, "exposed\n"); return snprintf(page, PAGE_SIZE, "exposed\n");
} }
static ssize_t target_stat_lu_num_cmds_show(struct config_item *item, #define per_cpu_stat_snprintf(stats_struct, prefix, field, shift) \
char *page) static ssize_t \
{ per_cpu_stat_##prefix##_snprintf(struct stats_struct __percpu *per_cpu_stats, \
struct se_device *dev = to_stat_lu_dev(item); char *page) \
struct se_dev_io_stats *stats; { \
unsigned int cpu; struct stats_struct *stats; \
u32 cmds = 0; unsigned int cpu; \
u64 sum = 0; \
for_each_possible_cpu(cpu) { \
stats = per_cpu_ptr(dev->stats, cpu); for_each_possible_cpu(cpu) { \
cmds += stats->total_cmds; stats = per_cpu_ptr(per_cpu_stats, cpu); \
} sum += stats->field; \
} \
/* scsiLuNumCommands */ \
return snprintf(page, PAGE_SIZE, "%u\n", cmds); return snprintf(page, PAGE_SIZE, "%llu\n", sum >> shift); \
} }
static ssize_t target_stat_lu_read_mbytes_show(struct config_item *item, #define lu_show_per_cpu_stat(prefix, field, shift) \
char *page) per_cpu_stat_snprintf(se_dev_io_stats, prefix, field, shift); \
{ static ssize_t \
struct se_device *dev = to_stat_lu_dev(item); target_stat_##prefix##_show(struct config_item *item, char *page) \
struct se_dev_io_stats *stats; { \
unsigned int cpu; struct se_device *dev = to_stat_lu_dev(item); \
u32 bytes = 0; \
return per_cpu_stat_##prefix##_snprintf(dev->stats, page); \
} \
for_each_possible_cpu(cpu) { /* scsiLuNumCommands */
stats = per_cpu_ptr(dev->stats, cpu); lu_show_per_cpu_stat(lu_num_cmds, total_cmds, 0);
bytes += stats->read_bytes; /* scsiLuReadMegaBytes */
} lu_show_per_cpu_stat(lu_read_mbytes, read_bytes, 20);
/* scsiLuWrittenMegaBytes */
/* scsiLuReadMegaBytes */ lu_show_per_cpu_stat(lu_write_mbytes, write_bytes, 20);
return snprintf(page, PAGE_SIZE, "%u\n", bytes >> 20);
}
static ssize_t target_stat_lu_write_mbytes_show(struct config_item *item,
char *page)
{
struct se_device *dev = to_stat_lu_dev(item);
struct se_dev_io_stats *stats;
unsigned int cpu;
u32 bytes = 0;
for_each_possible_cpu(cpu) {
stats = per_cpu_ptr(dev->stats, cpu);
bytes += stats->write_bytes;
}
/* scsiLuWrittenMegaBytes */
return snprintf(page, PAGE_SIZE, "%u\n", bytes >> 20);
}
static ssize_t target_stat_lu_resets_show(struct config_item *item, char *page) static ssize_t target_stat_lu_resets_show(struct config_item *item, char *page)
{ {
@@ -623,53 +606,30 @@ static ssize_t target_stat_tgt_port_port_index_show(struct config_item *item,
return ret; return ret;
} }
static ssize_t target_stat_tgt_port_in_cmds_show(struct config_item *item, #define tgt_port_show_per_cpu_stat(prefix, field, shift) \
char *page) per_cpu_stat_snprintf(scsi_port_stats, prefix, field, shift); \
{ static ssize_t \
struct se_lun *lun = to_stat_tgt_port(item); target_stat_##prefix##_show(struct config_item *item, char *page) \
struct se_device *dev; { \
ssize_t ret = -ENODEV; struct se_lun *lun = to_stat_tgt_port(item); \
struct se_device *dev; \
rcu_read_lock(); int ret; \
dev = rcu_dereference(lun->lun_se_dev); \
if (dev) rcu_read_lock(); \
ret = snprintf(page, PAGE_SIZE, "%lu\n", dev = rcu_dereference(lun->lun_se_dev); \
atomic_long_read(&lun->lun_stats.cmd_pdus)); if (!dev) { \
rcu_read_unlock(); rcu_read_unlock(); \
return ret; return -ENODEV; \
} \
\
ret = per_cpu_stat_##prefix##_snprintf(lun->lun_stats, page); \
rcu_read_unlock(); \
return ret; \
} }
static ssize_t target_stat_tgt_port_write_mbytes_show(struct config_item *item, tgt_port_show_per_cpu_stat(tgt_port_in_cmds, cmd_pdus, 0);
char *page) tgt_port_show_per_cpu_stat(tgt_port_write_mbytes, rx_data_octets, 20);
{ tgt_port_show_per_cpu_stat(tgt_port_read_mbytes, tx_data_octets, 20);
struct se_lun *lun = to_stat_tgt_port(item);
struct se_device *dev;
ssize_t ret = -ENODEV;
rcu_read_lock();
dev = rcu_dereference(lun->lun_se_dev);
if (dev)
ret = snprintf(page, PAGE_SIZE, "%u\n",
(u32)(atomic_long_read(&lun->lun_stats.rx_data_octets) >> 20));
rcu_read_unlock();
return ret;
}
static ssize_t target_stat_tgt_port_read_mbytes_show(struct config_item *item,
char *page)
{
struct se_lun *lun = to_stat_tgt_port(item);
struct se_device *dev;
ssize_t ret = -ENODEV;
rcu_read_lock();
dev = rcu_dereference(lun->lun_se_dev);
if (dev)
ret = snprintf(page, PAGE_SIZE, "%u\n",
(u32)(atomic_long_read(&lun->lun_stats.tx_data_octets) >> 20));
rcu_read_unlock();
return ret;
}
static ssize_t target_stat_tgt_port_hs_in_cmds_show(struct config_item *item, static ssize_t target_stat_tgt_port_hs_in_cmds_show(struct config_item *item,
char *page) char *page)
@@ -1035,92 +995,34 @@ static ssize_t target_stat_auth_att_count_show(struct config_item *item,
return ret; return ret;
} }
static ssize_t target_stat_auth_num_cmds_show(struct config_item *item, #define auth_show_per_cpu_stat(prefix, field, shift) \
char *page) per_cpu_stat_snprintf(se_dev_entry_io_stats, prefix, field, shift); \
{ static ssize_t \
struct se_lun_acl *lacl = auth_to_lacl(item); target_stat_##prefix##_show(struct config_item *item, char *page) \
struct se_node_acl *nacl = lacl->se_lun_nacl; { \
struct se_dev_entry_io_stats *stats; struct se_lun_acl *lacl = auth_to_lacl(item); \
struct se_dev_entry *deve; struct se_node_acl *nacl = lacl->se_lun_nacl; \
unsigned int cpu; struct se_dev_entry *deve; \
ssize_t ret; int ret; \
u32 cmds = 0; \
rcu_read_lock(); \
rcu_read_lock(); deve = target_nacl_find_deve(nacl, lacl->mapped_lun); \
deve = target_nacl_find_deve(nacl, lacl->mapped_lun); if (!deve) { \
if (!deve) { rcu_read_unlock(); \
rcu_read_unlock(); return -ENODEV; \
return -ENODEV; } \
} \
ret = per_cpu_stat_##prefix##_snprintf(deve->stats, page); \
for_each_possible_cpu(cpu) { rcu_read_unlock(); \
stats = per_cpu_ptr(deve->stats, cpu); return ret; \
cmds += stats->total_cmds;
}
/* scsiAuthIntrOutCommands */
ret = snprintf(page, PAGE_SIZE, "%u\n", cmds);
rcu_read_unlock();
return ret;
} }
static ssize_t target_stat_auth_read_mbytes_show(struct config_item *item, /* scsiAuthIntrOutCommands */
char *page) auth_show_per_cpu_stat(auth_num_cmds, total_cmds, 0);
{ /* scsiAuthIntrReadMegaBytes */
struct se_lun_acl *lacl = auth_to_lacl(item); auth_show_per_cpu_stat(auth_read_mbytes, read_bytes, 20);
struct se_node_acl *nacl = lacl->se_lun_nacl; /* scsiAuthIntrWrittenMegaBytes */
struct se_dev_entry_io_stats *stats; auth_show_per_cpu_stat(auth_write_mbytes, write_bytes, 20);
struct se_dev_entry *deve;
unsigned int cpu;
ssize_t ret;
u32 bytes = 0;
rcu_read_lock();
deve = target_nacl_find_deve(nacl, lacl->mapped_lun);
if (!deve) {
rcu_read_unlock();
return -ENODEV;
}
for_each_possible_cpu(cpu) {
stats = per_cpu_ptr(deve->stats, cpu);
bytes += stats->read_bytes;
}
/* scsiAuthIntrReadMegaBytes */
ret = snprintf(page, PAGE_SIZE, "%u\n", bytes >> 20);
rcu_read_unlock();
return ret;
}
static ssize_t target_stat_auth_write_mbytes_show(struct config_item *item,
char *page)
{
struct se_lun_acl *lacl = auth_to_lacl(item);
struct se_node_acl *nacl = lacl->se_lun_nacl;
struct se_dev_entry_io_stats *stats;
struct se_dev_entry *deve;
unsigned int cpu;
ssize_t ret;
u32 bytes = 0;
rcu_read_lock();
deve = target_nacl_find_deve(nacl, lacl->mapped_lun);
if (!deve) {
rcu_read_unlock();
return -ENODEV;
}
for_each_possible_cpu(cpu) {
stats = per_cpu_ptr(deve->stats, cpu);
bytes += stats->write_bytes;
}
/* scsiAuthIntrWrittenMegaBytes */
ret = snprintf(page, PAGE_SIZE, "%u\n", bytes >> 20);
rcu_read_unlock();
return ret;
}
static ssize_t target_stat_auth_hs_num_cmds_show(struct config_item *item, static ssize_t target_stat_auth_hs_num_cmds_show(struct config_item *item,
char *page) char *page)

View File

@@ -548,7 +548,7 @@ int core_tpg_register(
ret = core_tpg_add_lun(se_tpg, se_tpg->tpg_virt_lun0, ret = core_tpg_add_lun(se_tpg, se_tpg->tpg_virt_lun0,
true, g_lun0_dev); true, g_lun0_dev);
if (ret < 0) { if (ret < 0) {
kfree(se_tpg->tpg_virt_lun0); target_tpg_free_lun(&se_tpg->tpg_virt_lun0->rcu_head);
return ret; return ret;
} }
} }
@@ -595,7 +595,7 @@ int core_tpg_deregister(struct se_portal_group *se_tpg)
if (se_tpg->proto_id >= 0) { if (se_tpg->proto_id >= 0) {
core_tpg_remove_lun(se_tpg, se_tpg->tpg_virt_lun0); core_tpg_remove_lun(se_tpg, se_tpg->tpg_virt_lun0);
kfree_rcu(se_tpg->tpg_virt_lun0, rcu_head); call_rcu(&se_tpg->tpg_virt_lun0->rcu_head, target_tpg_free_lun);
} }
target_tpg_deregister_rtpi(se_tpg); target_tpg_deregister_rtpi(se_tpg);
@@ -615,6 +615,13 @@ struct se_lun *core_tpg_alloc_lun(
pr_err("Unable to allocate se_lun memory\n"); pr_err("Unable to allocate se_lun memory\n");
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
} }
lun->lun_stats = alloc_percpu(struct scsi_port_stats);
if (!lun->lun_stats) {
pr_err("Unable to allocate se_lun stats memory\n");
goto free_lun;
}
lun->unpacked_lun = unpacked_lun; lun->unpacked_lun = unpacked_lun;
atomic_set(&lun->lun_acl_count, 0); atomic_set(&lun->lun_acl_count, 0);
init_completion(&lun->lun_shutdown_comp); init_completion(&lun->lun_shutdown_comp);
@@ -628,6 +635,18 @@ struct se_lun *core_tpg_alloc_lun(
lun->lun_tpg = tpg; lun->lun_tpg = tpg;
return lun; return lun;
free_lun:
kfree(lun);
return ERR_PTR(-ENOMEM);
}
void target_tpg_free_lun(struct rcu_head *head)
{
struct se_lun *lun = container_of(head, struct se_lun, rcu_head);
free_percpu(lun->lun_stats);
kfree(lun);
} }
int core_tpg_add_lun( int core_tpg_add_lun(

View File

@@ -126,12 +126,12 @@ int init_se_kmem_caches(void)
} }
target_completion_wq = alloc_workqueue("target_completion", target_completion_wq = alloc_workqueue("target_completion",
WQ_MEM_RECLAIM, 0); WQ_MEM_RECLAIM | WQ_PERCPU, 0);
if (!target_completion_wq) if (!target_completion_wq)
goto out_free_lba_map_mem_cache; goto out_free_lba_map_mem_cache;
target_submission_wq = alloc_workqueue("target_submission", target_submission_wq = alloc_workqueue("target_submission",
WQ_MEM_RECLAIM, 0); WQ_MEM_RECLAIM | WQ_PERCPU, 0);
if (!target_submission_wq) if (!target_submission_wq)
goto out_free_completion_wq; goto out_free_completion_wq;
@@ -1571,7 +1571,12 @@ target_cmd_parse_cdb(struct se_cmd *cmd)
return ret; return ret;
cmd->se_cmd_flags |= SCF_SUPPORTED_SAM_OPCODE; cmd->se_cmd_flags |= SCF_SUPPORTED_SAM_OPCODE;
atomic_long_inc(&cmd->se_lun->lun_stats.cmd_pdus); /*
* If this is the xcopy_lun then we won't have lun_stats since we
* can't export them.
*/
if (cmd->se_lun->lun_stats)
this_cpu_inc(cmd->se_lun->lun_stats->cmd_pdus);
return 0; return 0;
} }
EXPORT_SYMBOL(target_cmd_parse_cdb); EXPORT_SYMBOL(target_cmd_parse_cdb);
@@ -2597,8 +2602,9 @@ queue_rsp:
!(cmd->se_cmd_flags & SCF_TREAT_READ_AS_NORMAL)) !(cmd->se_cmd_flags & SCF_TREAT_READ_AS_NORMAL))
goto queue_status; goto queue_status;
atomic_long_add(cmd->data_length, if (cmd->se_lun->lun_stats)
&cmd->se_lun->lun_stats.tx_data_octets); this_cpu_add(cmd->se_lun->lun_stats->tx_data_octets,
cmd->data_length);
/* /*
* Perform READ_STRIP of PI using software emulation when * Perform READ_STRIP of PI using software emulation when
* backend had PI enabled, if the transport will not be * backend had PI enabled, if the transport will not be
@@ -2621,14 +2627,16 @@ queue_rsp:
goto queue_full; goto queue_full;
break; break;
case DMA_TO_DEVICE: case DMA_TO_DEVICE:
atomic_long_add(cmd->data_length, if (cmd->se_lun->lun_stats)
&cmd->se_lun->lun_stats.rx_data_octets); this_cpu_add(cmd->se_lun->lun_stats->rx_data_octets,
cmd->data_length);
/* /*
* Check if we need to send READ payload for BIDI-COMMAND * Check if we need to send READ payload for BIDI-COMMAND
*/ */
if (cmd->se_cmd_flags & SCF_BIDI) { if (cmd->se_cmd_flags & SCF_BIDI) {
atomic_long_add(cmd->data_length, if (cmd->se_lun->lun_stats)
&cmd->se_lun->lun_stats.tx_data_octets); this_cpu_add(cmd->se_lun->lun_stats->tx_data_octets,
cmd->data_length);
ret = cmd->se_tfo->queue_data_in(cmd); ret = cmd->se_tfo->queue_data_in(cmd);
if (ret) if (ret)
goto queue_full; goto queue_full;

View File

@@ -462,7 +462,7 @@ static const struct target_core_fabric_ops xcopy_pt_tfo = {
int target_xcopy_setup_pt(void) int target_xcopy_setup_pt(void)
{ {
xcopy_wq = alloc_workqueue("xcopy_wq", WQ_MEM_RECLAIM, 0); xcopy_wq = alloc_workqueue("xcopy_wq", WQ_MEM_RECLAIM | WQ_PERCPU, 0);
if (!xcopy_wq) { if (!xcopy_wq) {
pr_err("Unable to allocate xcopy_wq\n"); pr_err("Unable to allocate xcopy_wq\n");
return -ENOMEM; return -ENOMEM;

View File

@@ -250,7 +250,7 @@ static struct se_portal_group *ft_add_tpg(struct se_wwn *wwn, const char *name)
tpg->lport_wwn = ft_wwn; tpg->lport_wwn = ft_wwn;
INIT_LIST_HEAD(&tpg->lun_list); INIT_LIST_HEAD(&tpg->lun_list);
wq = alloc_workqueue("tcm_fc", 0, 1); wq = alloc_workqueue("tcm_fc", WQ_PERCPU, 1);
if (!wq) { if (!wq) {
kfree(tpg); kfree(tpg);
return NULL; return NULL;

View File

@@ -2,6 +2,7 @@
obj-$(CONFIG_SCSI_UFSHCD) += ufshcd-core.o obj-$(CONFIG_SCSI_UFSHCD) += ufshcd-core.o
ufshcd-core-y += ufshcd.o ufs-sysfs.o ufs-mcq.o ufshcd-core-y += ufshcd.o ufs-sysfs.o ufs-mcq.o
ufshcd-core-$(CONFIG_RPMB) += ufs-rpmb.o
ufshcd-core-$(CONFIG_DEBUG_FS) += ufs-debugfs.o ufshcd-core-$(CONFIG_DEBUG_FS) += ufs-debugfs.o
ufshcd-core-$(CONFIG_SCSI_UFS_BSG) += ufs_bsg.o ufshcd-core-$(CONFIG_SCSI_UFS_BSG) += ufs_bsg.o
ufshcd-core-$(CONFIG_SCSI_UFS_CRYPTO) += ufshcd-crypto.o ufshcd-core-$(CONFIG_SCSI_UFS_CRYPTO) += ufshcd-crypto.o

View File

@@ -134,17 +134,15 @@ unsigned int ufshcd_mcq_queue_cfg_addr(struct ufs_hba *hba)
EXPORT_SYMBOL_GPL(ufshcd_mcq_queue_cfg_addr); EXPORT_SYMBOL_GPL(ufshcd_mcq_queue_cfg_addr);
/** /**
* ufshcd_mcq_decide_queue_depth - decide the queue depth * ufshcd_get_hba_mac - Maximum number of commands supported by the host
* controller.
* @hba: per adapter instance * @hba: per adapter instance
* *
* Return: queue-depth on success, non-zero on error * Return: queue depth on success; negative upon error.
* *
* MAC - Max. Active Command of the Host Controller (HC) * MAC = Maximum number of Active Commands supported by the Host Controller.
* HC wouldn't send more than this commands to the device.
* Calculates and adjusts the queue depth based on the depth
* supported by the HC and ufs device.
*/ */
int ufshcd_mcq_decide_queue_depth(struct ufs_hba *hba) int ufshcd_get_hba_mac(struct ufs_hba *hba)
{ {
int mac; int mac;
@@ -162,18 +160,7 @@ int ufshcd_mcq_decide_queue_depth(struct ufs_hba *hba)
mac = hba->vops->get_hba_mac(hba); mac = hba->vops->get_hba_mac(hba);
} }
if (mac < 0) if (mac < 0)
goto err; dev_err(hba->dev, "Failed to get mac, err=%d\n", mac);
WARN_ON_ONCE(!hba->dev_info.bqueuedepth);
/*
* max. value of bqueuedepth = 256, mac is host dependent.
* It is mandatory for UFS device to define bQueueDepth if
* shared queuing architecture is enabled.
*/
return min_t(int, mac, hba->dev_info.bqueuedepth);
err:
dev_err(hba->dev, "Failed to get mac, err=%d\n", mac);
return mac; return mac;
} }
@@ -307,9 +294,10 @@ static void ufshcd_mcq_process_cqe(struct ufs_hba *hba,
struct ufs_hw_queue *hwq) struct ufs_hw_queue *hwq)
{ {
struct cq_entry *cqe = ufshcd_mcq_cur_cqe(hwq); struct cq_entry *cqe = ufshcd_mcq_cur_cqe(hwq);
int tag = ufshcd_mcq_get_tag(hba, cqe);
if (cqe->command_desc_base_addr) { if (cqe->command_desc_base_addr) {
int tag = ufshcd_mcq_get_tag(hba, cqe);
ufshcd_compl_one_cqe(hba, tag, cqe); ufshcd_compl_one_cqe(hba, tag, cqe);
/* After processed the cqe, mark it empty (invalid) entry */ /* After processed the cqe, mark it empty (invalid) entry */
cqe->command_desc_base_addr = 0; cqe->command_desc_base_addr = 0;
@@ -491,9 +479,6 @@ int ufshcd_mcq_init(struct ufs_hba *hba)
mutex_init(&hwq->sq_mutex); mutex_init(&hwq->sq_mutex);
} }
/* The very first HW queue serves device commands */
hba->dev_cmd_queue = &hba->uhq[0];
host->host_tagset = 1; host->host_tagset = 1;
return 0; return 0;
} }
@@ -546,8 +531,9 @@ static int ufshcd_mcq_sq_start(struct ufs_hba *hba, struct ufs_hw_queue *hwq)
*/ */
int ufshcd_mcq_sq_cleanup(struct ufs_hba *hba, int task_tag) int ufshcd_mcq_sq_cleanup(struct ufs_hba *hba, int task_tag)
{ {
struct ufshcd_lrb *lrbp = &hba->lrb[task_tag]; struct scsi_cmnd *cmd = ufshcd_tag_to_cmd(hba, task_tag);
struct scsi_cmnd *cmd = lrbp->cmd; struct ufshcd_lrb *lrbp = scsi_cmd_priv(cmd);
struct request *rq = scsi_cmd_to_rq(cmd);
struct ufs_hw_queue *hwq; struct ufs_hw_queue *hwq;
void __iomem *reg, *opr_sqd_base; void __iomem *reg, *opr_sqd_base;
u32 nexus, id, val; u32 nexus, id, val;
@@ -556,24 +542,21 @@ int ufshcd_mcq_sq_cleanup(struct ufs_hba *hba, int task_tag)
if (hba->quirks & UFSHCD_QUIRK_MCQ_BROKEN_RTC) if (hba->quirks & UFSHCD_QUIRK_MCQ_BROKEN_RTC)
return -ETIMEDOUT; return -ETIMEDOUT;
if (task_tag != hba->nutrs - UFSHCD_NUM_RESERVED) { if (!cmd)
if (!cmd) return -EINVAL;
return -EINVAL;
hwq = ufshcd_mcq_req_to_hwq(hba, scsi_cmd_to_rq(cmd)); hwq = ufshcd_mcq_req_to_hwq(hba, rq);
if (!hwq) if (!hwq)
return 0; return 0;
} else {
hwq = hba->dev_cmd_queue;
}
id = hwq->id; id = hwq->id;
mutex_lock(&hwq->sq_mutex); guard(mutex)(&hwq->sq_mutex);
/* stop the SQ fetching before working on it */ /* stop the SQ fetching before working on it */
err = ufshcd_mcq_sq_stop(hba, hwq); err = ufshcd_mcq_sq_stop(hba, hwq);
if (err) if (err)
goto unlock; return err;
/* SQCTI = EXT_IID, IID, LUN, Task Tag */ /* SQCTI = EXT_IID, IID, LUN, Task Tag */
nexus = lrbp->lun << 8 | task_tag; nexus = lrbp->lun << 8 | task_tag;
@@ -600,8 +583,6 @@ int ufshcd_mcq_sq_cleanup(struct ufs_hba *hba, int task_tag)
if (ufshcd_mcq_sq_start(hba, hwq)) if (ufshcd_mcq_sq_start(hba, hwq))
err = -ETIMEDOUT; err = -ETIMEDOUT;
unlock:
mutex_unlock(&hwq->sq_mutex);
return err; return err;
} }
@@ -632,7 +613,8 @@ static void ufshcd_mcq_nullify_sqe(struct utp_transfer_req_desc *utrd)
static bool ufshcd_mcq_sqe_search(struct ufs_hba *hba, static bool ufshcd_mcq_sqe_search(struct ufs_hba *hba,
struct ufs_hw_queue *hwq, int task_tag) struct ufs_hw_queue *hwq, int task_tag)
{ {
struct ufshcd_lrb *lrbp = &hba->lrb[task_tag]; struct scsi_cmnd *cmd = ufshcd_tag_to_cmd(hba, task_tag);
struct ufshcd_lrb *lrbp = scsi_cmd_priv(cmd);
struct utp_transfer_req_desc *utrd; struct utp_transfer_req_desc *utrd;
__le64 cmd_desc_base_addr; __le64 cmd_desc_base_addr;
bool ret = false; bool ret = false;
@@ -683,7 +665,7 @@ int ufshcd_mcq_abort(struct scsi_cmnd *cmd)
struct Scsi_Host *host = cmd->device->host; struct Scsi_Host *host = cmd->device->host;
struct ufs_hba *hba = shost_priv(host); struct ufs_hba *hba = shost_priv(host);
int tag = scsi_cmd_to_rq(cmd)->tag; int tag = scsi_cmd_to_rq(cmd)->tag;
struct ufshcd_lrb *lrbp = &hba->lrb[tag]; struct ufshcd_lrb *lrbp = scsi_cmd_priv(cmd);
struct ufs_hw_queue *hwq; struct ufs_hw_queue *hwq;
int err; int err;

254
drivers/ufs/core/ufs-rpmb.c Normal file
View File

@@ -0,0 +1,254 @@
// SPDX-License-Identifier: GPL-2.0
/*
* UFS OP-TEE based RPMB Driver
*
* Copyright (C) 2025 Micron Technology, Inc.
* Copyright (C) 2025 Qualcomm Technologies, Inc.
*
* Authors:
* Bean Huo <beanhuo@micron.com>
* Can Guo <can.guo@oss.qualcomm.com>
*/
#include <linux/module.h>
#include <linux/device.h>
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/rpmb.h>
#include <linux/string.h>
#include <linux/list.h>
#include <ufs/ufshcd.h>
#include <linux/unaligned.h>
#include "ufshcd-priv.h"
#define UFS_RPMB_SEC_PROTOCOL 0xEC /* JEDEC UFS application */
#define UFS_RPMB_SEC_PROTOCOL_ID 0x01 /* JEDEC UFS RPMB protocol ID, CDB byte3 */
static const struct bus_type ufs_rpmb_bus_type = {
.name = "ufs_rpmb",
};
/* UFS RPMB device structure */
struct ufs_rpmb_dev {
u8 region_id;
struct device dev;
struct rpmb_dev *rdev;
struct ufs_hba *hba;
struct list_head node;
};
static int ufs_sec_submit(struct ufs_hba *hba, u16 spsp, void *buffer, size_t len, bool send)
{
struct scsi_device *sdev = hba->ufs_rpmb_wlun;
u8 cdb[12] = { };
cdb[0] = send ? SECURITY_PROTOCOL_OUT : SECURITY_PROTOCOL_IN;
cdb[1] = UFS_RPMB_SEC_PROTOCOL;
put_unaligned_be16(spsp, &cdb[2]);
put_unaligned_be32(len, &cdb[6]);
return scsi_execute_cmd(sdev, cdb, send ? REQ_OP_DRV_OUT : REQ_OP_DRV_IN,
buffer, len, /*timeout=*/30 * HZ, 0, NULL);
}
/* UFS RPMB route frames implementation */
static int ufs_rpmb_route_frames(struct device *dev, u8 *req, unsigned int req_len, u8 *resp,
unsigned int resp_len)
{
struct ufs_rpmb_dev *ufs_rpmb = dev_get_drvdata(dev);
struct rpmb_frame *frm_out = (struct rpmb_frame *)req;
bool need_result_read = true;
u16 req_type, protocol_id;
struct ufs_hba *hba;
int ret;
if (!ufs_rpmb) {
dev_err(dev, "Missing driver data\n");
return -ENODEV;
}
hba = ufs_rpmb->hba;
req_type = be16_to_cpu(frm_out->req_resp);
switch (req_type) {
case RPMB_PROGRAM_KEY:
if (req_len != sizeof(struct rpmb_frame) || resp_len != sizeof(struct rpmb_frame))
return -EINVAL;
break;
case RPMB_GET_WRITE_COUNTER:
if (req_len != sizeof(struct rpmb_frame) || resp_len != sizeof(struct rpmb_frame))
return -EINVAL;
need_result_read = false;
break;
case RPMB_WRITE_DATA:
if (req_len % sizeof(struct rpmb_frame) || resp_len != sizeof(struct rpmb_frame))
return -EINVAL;
break;
case RPMB_READ_DATA:
if (req_len != sizeof(struct rpmb_frame) || resp_len % sizeof(struct rpmb_frame))
return -EINVAL;
need_result_read = false;
break;
default:
dev_err(dev, "Unknown request type=0x%04x\n", req_type);
return -EINVAL;
}
protocol_id = ufs_rpmb->region_id << 8 | UFS_RPMB_SEC_PROTOCOL_ID;
ret = ufs_sec_submit(hba, protocol_id, req, req_len, true);
if (ret) {
dev_err(dev, "Command failed with ret=%d\n", ret);
return ret;
}
if (need_result_read) {
struct rpmb_frame *frm_resp = (struct rpmb_frame *)resp;
memset(frm_resp, 0, sizeof(*frm_resp));
frm_resp->req_resp = cpu_to_be16(RPMB_RESULT_READ);
ret = ufs_sec_submit(hba, protocol_id, resp, resp_len, true);
if (ret) {
dev_err(dev, "Result read request failed with ret=%d\n", ret);
return ret;
}
}
if (!ret) {
ret = ufs_sec_submit(hba, protocol_id, resp, resp_len, false);
if (ret)
dev_err(dev, "Response read failed with ret=%d\n", ret);
}
return ret;
}
static void ufs_rpmb_device_release(struct device *dev)
{
struct ufs_rpmb_dev *ufs_rpmb = dev_get_drvdata(dev);
rpmb_dev_unregister(ufs_rpmb->rdev);
}
/* UFS RPMB device registration */
int ufs_rpmb_probe(struct ufs_hba *hba)
{
struct ufs_rpmb_dev *ufs_rpmb, *it, *tmp;
struct rpmb_dev *rdev;
char *cid = NULL;
int region;
u32 cap;
int ret;
if (!hba->ufs_rpmb_wlun || hba->dev_info.b_advanced_rpmb_en) {
dev_info(hba->dev, "Skip OP-TEE RPMB registration\n");
return -ENODEV;
}
/* Check if device_id is available */
if (!hba->dev_info.device_id) {
dev_err(hba->dev, "UFS Device ID not available\n");
return -EINVAL;
}
INIT_LIST_HEAD(&hba->rpmbs);
struct rpmb_descr descr = {
.type = RPMB_TYPE_UFS,
.route_frames = ufs_rpmb_route_frames,
.reliable_wr_count = hba->dev_info.rpmb_io_size,
};
for (region = 0; region < ARRAY_SIZE(hba->dev_info.rpmb_region_size); region++) {
cap = hba->dev_info.rpmb_region_size[region];
if (!cap)
continue;
ufs_rpmb = devm_kzalloc(hba->dev, sizeof(*ufs_rpmb), GFP_KERNEL);
if (!ufs_rpmb) {
ret = -ENOMEM;
goto err_out;
}
ufs_rpmb->hba = hba;
ufs_rpmb->dev.parent = &hba->ufs_rpmb_wlun->sdev_gendev;
ufs_rpmb->dev.bus = &ufs_rpmb_bus_type;
ufs_rpmb->dev.release = ufs_rpmb_device_release;
dev_set_name(&ufs_rpmb->dev, "ufs_rpmb%d", region);
/* Set driver data BEFORE device_register */
dev_set_drvdata(&ufs_rpmb->dev, ufs_rpmb);
ret = device_register(&ufs_rpmb->dev);
if (ret) {
dev_err(hba->dev, "Failed to register UFS RPMB device %d\n", region);
put_device(&ufs_rpmb->dev);
goto err_out;
}
/* Create unique ID by appending region number to device_id */
cid = kasprintf(GFP_KERNEL, "%s-R%d", hba->dev_info.device_id, region);
if (!cid) {
device_unregister(&ufs_rpmb->dev);
ret = -ENOMEM;
goto err_out;
}
descr.dev_id = cid;
descr.dev_id_len = strlen(cid);
descr.capacity = cap;
/* Register RPMB device */
rdev = rpmb_dev_register(&ufs_rpmb->dev, &descr);
if (IS_ERR(rdev)) {
dev_err(hba->dev, "Failed to register UFS RPMB device.\n");
device_unregister(&ufs_rpmb->dev);
ret = PTR_ERR(rdev);
goto err_out;
}
kfree(cid);
cid = NULL;
ufs_rpmb->rdev = rdev;
ufs_rpmb->region_id = region;
list_add_tail(&ufs_rpmb->node, &hba->rpmbs);
dev_info(hba->dev, "UFS RPMB region %d registered (capacity=%u)\n", region, cap);
}
return 0;
err_out:
kfree(cid);
list_for_each_entry_safe(it, tmp, &hba->rpmbs, node) {
list_del(&it->node);
device_unregister(&it->dev);
}
return ret;
}
/* UFS RPMB remove handler */
void ufs_rpmb_remove(struct ufs_hba *hba)
{
struct ufs_rpmb_dev *ufs_rpmb, *tmp;
if (list_empty(&hba->rpmbs))
return;
/* Remove all registered RPMB devices */
list_for_each_entry_safe(ufs_rpmb, tmp, &hba->rpmbs, node) {
dev_info(hba->dev, "Removing UFS RPMB region %d\n", ufs_rpmb->region_id);
/* Remove from list first */
list_del(&ufs_rpmb->node);
/* Unregister device */
device_unregister(&ufs_rpmb->dev);
}
dev_info(hba->dev, "All UFS RPMB devices unregistered\n");
}
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("OP-TEE UFS RPMB driver");

View File

@@ -235,7 +235,7 @@ static int ufshcd_ahit_to_us(u32 ahit)
} }
/* Convert microseconds to Auto-Hibernate Idle Timer register value */ /* Convert microseconds to Auto-Hibernate Idle Timer register value */
static u32 ufshcd_us_to_ahit(unsigned int timer) u32 ufshcd_us_to_ahit(unsigned int timer)
{ {
unsigned int scale; unsigned int scale;
@@ -245,6 +245,7 @@ static u32 ufshcd_us_to_ahit(unsigned int timer)
return FIELD_PREP(UFSHCI_AHIBERN8_TIMER_MASK, timer) | return FIELD_PREP(UFSHCI_AHIBERN8_TIMER_MASK, timer) |
FIELD_PREP(UFSHCI_AHIBERN8_SCALE_MASK, scale); FIELD_PREP(UFSHCI_AHIBERN8_SCALE_MASK, scale);
} }
EXPORT_SYMBOL_GPL(ufshcd_us_to_ahit);
static int ufshcd_read_hci_reg(struct ufs_hba *hba, u32 *val, unsigned int reg) static int ufshcd_read_hci_reg(struct ufs_hba *hba, u32 *val, unsigned int reg)
{ {

View File

@@ -105,7 +105,7 @@ static int ufs_bsg_exec_advanced_rpmb_req(struct ufs_hba *hba, struct bsg_job *j
if (dir != DMA_NONE) { if (dir != DMA_NONE) {
payload = &job->request_payload; payload = &job->request_payload;
if (!payload || !payload->payload_len || !payload->sg_cnt) if (!payload->payload_len || !payload->sg_cnt)
return -EINVAL; return -EINVAL;
sg_cnt = dma_map_sg(hba->host->dma_dev, payload->sg_list, payload->sg_cnt, dir); sg_cnt = dma_map_sg(hba->host->dma_dev, payload->sg_list, payload->sg_cnt, dir);

View File

@@ -42,7 +42,6 @@
#define UFS_CMD_TRACE_STRINGS \ #define UFS_CMD_TRACE_STRINGS \
EM(UFS_CMD_SEND, "send_req") \ EM(UFS_CMD_SEND, "send_req") \
EM(UFS_CMD_COMP, "complete_rsp") \ EM(UFS_CMD_COMP, "complete_rsp") \
EM(UFS_DEV_COMP, "dev_complete") \
EM(UFS_QUERY_SEND, "query_send") \ EM(UFS_QUERY_SEND, "query_send") \
EM(UFS_QUERY_COMP, "query_complete") \ EM(UFS_QUERY_COMP, "query_complete") \
EM(UFS_QUERY_ERR, "query_complete_err") \ EM(UFS_QUERY_ERR, "query_complete_err") \

View File

@@ -5,7 +5,6 @@
enum ufs_trace_str_t { enum ufs_trace_str_t {
UFS_CMD_SEND, UFS_CMD_SEND,
UFS_CMD_COMP, UFS_CMD_COMP,
UFS_DEV_COMP,
UFS_QUERY_SEND, UFS_QUERY_SEND,
UFS_QUERY_COMP, UFS_QUERY_COMP,
UFS_QUERY_ERR, UFS_QUERY_ERR,

View File

@@ -38,10 +38,10 @@ ufshcd_prepare_req_desc_hdr_crypto(struct ufshcd_lrb *lrbp,
} }
static inline int ufshcd_crypto_fill_prdt(struct ufs_hba *hba, static inline int ufshcd_crypto_fill_prdt(struct ufs_hba *hba,
struct ufshcd_lrb *lrbp) struct scsi_cmnd *cmd)
{ {
struct scsi_cmnd *cmd = lrbp->cmd;
const struct bio_crypt_ctx *crypt_ctx = scsi_cmd_to_rq(cmd)->crypt_ctx; const struct bio_crypt_ctx *crypt_ctx = scsi_cmd_to_rq(cmd)->crypt_ctx;
struct ufshcd_lrb *lrbp = scsi_cmd_priv(cmd);
if (crypt_ctx && hba->vops && hba->vops->fill_crypto_prdt) if (crypt_ctx && hba->vops && hba->vops->fill_crypto_prdt)
return hba->vops->fill_crypto_prdt(hba, crypt_ctx, return hba->vops->fill_crypto_prdt(hba, crypt_ctx,
@@ -51,17 +51,19 @@ static inline int ufshcd_crypto_fill_prdt(struct ufs_hba *hba,
} }
static inline void ufshcd_crypto_clear_prdt(struct ufs_hba *hba, static inline void ufshcd_crypto_clear_prdt(struct ufs_hba *hba,
struct ufshcd_lrb *lrbp) struct scsi_cmnd *cmd)
{ {
struct ufshcd_lrb *lrbp = scsi_cmd_priv(cmd);
if (!(hba->quirks & UFSHCD_QUIRK_KEYS_IN_PRDT)) if (!(hba->quirks & UFSHCD_QUIRK_KEYS_IN_PRDT))
return; return;
if (!(scsi_cmd_to_rq(lrbp->cmd)->crypt_ctx)) if (!(scsi_cmd_to_rq(cmd)->crypt_ctx))
return; return;
/* Zeroize the PRDT because it can contain cryptographic keys. */ /* Zeroize the PRDT because it can contain cryptographic keys. */
memzero_explicit(lrbp->ucd_prdt_ptr, memzero_explicit(lrbp->ucd_prdt_ptr,
ufshcd_sg_entry_size(hba) * scsi_sg_count(lrbp->cmd)); ufshcd_sg_entry_size(hba) * scsi_sg_count(cmd));
} }
bool ufshcd_crypto_enable(struct ufs_hba *hba); bool ufshcd_crypto_enable(struct ufs_hba *hba);
@@ -82,13 +84,15 @@ ufshcd_prepare_req_desc_hdr_crypto(struct ufshcd_lrb *lrbp,
struct request_desc_header *h) { } struct request_desc_header *h) { }
static inline int ufshcd_crypto_fill_prdt(struct ufs_hba *hba, static inline int ufshcd_crypto_fill_prdt(struct ufs_hba *hba,
struct ufshcd_lrb *lrbp) struct scsi_cmnd *cmd)
{ {
return 0; return 0;
} }
static inline void ufshcd_crypto_clear_prdt(struct ufs_hba *hba, static inline void ufshcd_crypto_clear_prdt(struct ufs_hba *hba,
struct ufshcd_lrb *lrbp) { } struct scsi_cmnd *cmd)
{
}
static inline bool ufshcd_crypto_enable(struct ufs_hba *hba) static inline bool ufshcd_crypto_enable(struct ufs_hba *hba)
{ {

View File

@@ -6,6 +6,8 @@
#include <linux/pm_runtime.h> #include <linux/pm_runtime.h>
#include <ufs/ufshcd.h> #include <ufs/ufshcd.h>
void ufshcd_enable_intr(struct ufs_hba *hba, u32 intrs);
static inline bool ufshcd_is_user_access_allowed(struct ufs_hba *hba) static inline bool ufshcd_is_user_access_allowed(struct ufs_hba *hba)
{ {
return !hba->shutting_down; return !hba->shutting_down;
@@ -65,7 +67,7 @@ void ufshcd_compl_one_cqe(struct ufs_hba *hba, int task_tag,
struct cq_entry *cqe); struct cq_entry *cqe);
int ufshcd_mcq_init(struct ufs_hba *hba); int ufshcd_mcq_init(struct ufs_hba *hba);
void ufshcd_mcq_disable(struct ufs_hba *hba); void ufshcd_mcq_disable(struct ufs_hba *hba);
int ufshcd_mcq_decide_queue_depth(struct ufs_hba *hba); int ufshcd_get_hba_mac(struct ufs_hba *hba);
int ufshcd_mcq_memory_alloc(struct ufs_hba *hba); int ufshcd_mcq_memory_alloc(struct ufs_hba *hba);
struct ufs_hw_queue *ufshcd_mcq_req_to_hwq(struct ufs_hba *hba, struct ufs_hw_queue *ufshcd_mcq_req_to_hwq(struct ufs_hba *hba,
struct request *req); struct request *req);
@@ -75,14 +77,19 @@ bool ufshcd_cmd_inflight(struct scsi_cmnd *cmd);
int ufshcd_mcq_sq_cleanup(struct ufs_hba *hba, int task_tag); int ufshcd_mcq_sq_cleanup(struct ufs_hba *hba, int task_tag);
int ufshcd_mcq_abort(struct scsi_cmnd *cmd); int ufshcd_mcq_abort(struct scsi_cmnd *cmd);
int ufshcd_try_to_abort_task(struct ufs_hba *hba, int tag); int ufshcd_try_to_abort_task(struct ufs_hba *hba, int tag);
void ufshcd_release_scsi_cmd(struct ufs_hba *hba, void ufshcd_release_scsi_cmd(struct ufs_hba *hba, struct scsi_cmnd *cmd);
struct ufshcd_lrb *lrbp);
#define SD_ASCII_STD true /**
#define SD_RAW false * enum ufs_descr_fmt - UFS string descriptor format
int ufshcd_read_string_desc(struct ufs_hba *hba, u8 desc_index, * @SD_RAW: Raw UTF-16 format
u8 **buf, bool ascii); * @SD_ASCII_STD: Convert to null-terminated ASCII string
*/
enum ufs_descr_fmt {
SD_RAW = 0,
SD_ASCII_STD = 1,
};
int ufshcd_read_string_desc(struct ufs_hba *hba, u8 desc_index, u8 **buf, enum ufs_descr_fmt fmt);
int ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd); int ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd);
int ufshcd_send_bsg_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd); int ufshcd_send_bsg_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd);
@@ -361,6 +368,26 @@ static inline bool ufs_is_valid_unit_desc_lun(struct ufs_dev_info *dev_info, u8
return lun == UFS_UPIU_RPMB_WLUN || (lun < dev_info->max_lu_supported); return lun == UFS_UPIU_RPMB_WLUN || (lun < dev_info->max_lu_supported);
} }
/*
* Convert a block layer tag into a SCSI command pointer. This function is
* called once per I/O completion path and is also called from error paths.
*/
static inline struct scsi_cmnd *ufshcd_tag_to_cmd(struct ufs_hba *hba, u32 tag)
{
/*
* Host-wide tags are enabled in MCQ mode only. See also the
* host->host_tagset assignment in ufs-mcq.c.
*/
struct blk_mq_tags *tags = hba->host->tag_set.shared_tags ?:
hba->host->tag_set.tags[0];
struct request *rq = blk_mq_tag_to_rq(tags, tag);
if (WARN_ON_ONCE(!rq))
return NULL;
return blk_mq_rq_to_pdu(rq);
}
static inline void ufshcd_inc_sq_tail(struct ufs_hw_queue *q) static inline void ufshcd_inc_sq_tail(struct ufs_hw_queue *q)
__must_hold(&q->sq_lock) __must_hold(&q->sq_lock)
{ {
@@ -411,4 +438,17 @@ static inline u32 ufshcd_mcq_get_sq_head_slot(struct ufs_hw_queue *q)
return val / sizeof(struct utp_transfer_req_desc); return val / sizeof(struct utp_transfer_req_desc);
} }
#if IS_ENABLED(CONFIG_RPMB)
int ufs_rpmb_probe(struct ufs_hba *hba);
void ufs_rpmb_remove(struct ufs_hba *hba);
#else
static inline int ufs_rpmb_probe(struct ufs_hba *hba)
{
return 0;
}
static inline void ufs_rpmb_remove(struct ufs_hba *hba)
{
}
#endif
#endif /* _UFSHCD_PRIV_H_ */ #endif /* _UFSHCD_PRIV_H_ */

File diff suppressed because it is too large Load Diff

View File

@@ -154,3 +154,16 @@ config SCSI_UFS_ROCKCHIP
Select this if you have UFS controller on Rockchip chipset. Select this if you have UFS controller on Rockchip chipset.
If unsure, say N. If unsure, say N.
config SCSI_UFS_AMD_VERSAL2
tristate "AMD Versal Gen 2 UFS controller platform driver"
depends on SCSI_UFSHCD_PLATFORM && (ARCH_ZYNQMP || COMPILE_TEST)
help
This selects the AMD Versal Gen 2 specific additions on top of
the UFSHCD DWC and UFSHCD platform driver. UFS host on AMD
Versal Gen 2 needs some vendor specific configurations like PHY
and vendor specific register accesses before accessing the
hardware.
Select this if you have UFS controller on AMD Versal Gen 2 SoC.
If unsure, say N.

View File

@@ -13,3 +13,4 @@ obj-$(CONFIG_SCSI_UFS_RENESAS) += ufs-renesas.o
obj-$(CONFIG_SCSI_UFS_ROCKCHIP) += ufs-rockchip.o obj-$(CONFIG_SCSI_UFS_ROCKCHIP) += ufs-rockchip.o
obj-$(CONFIG_SCSI_UFS_SPRD) += ufs-sprd.o obj-$(CONFIG_SCSI_UFS_SPRD) += ufs-sprd.o
obj-$(CONFIG_SCSI_UFS_TI_J721E) += ti-j721e-ufs.o obj-$(CONFIG_SCSI_UFS_TI_J721E) += ti-j721e-ufs.o
obj-$(CONFIG_SCSI_UFS_AMD_VERSAL2) += ufs-amd-versal2.o ufshcd-dwc.o

View File

@@ -15,18 +15,26 @@
#define TI_UFS_SS_RST_N_PCS BIT(0) #define TI_UFS_SS_RST_N_PCS BIT(0)
#define TI_UFS_SS_CLK_26MHZ BIT(4) #define TI_UFS_SS_CLK_26MHZ BIT(4)
struct ti_j721e_ufs {
void __iomem *regbase;
u32 reg;
};
static int ti_j721e_ufs_probe(struct platform_device *pdev) static int ti_j721e_ufs_probe(struct platform_device *pdev)
{ {
struct device *dev = &pdev->dev; struct device *dev = &pdev->dev;
struct ti_j721e_ufs *ufs;
unsigned long clk_rate; unsigned long clk_rate;
void __iomem *regbase;
struct clk *clk; struct clk *clk;
u32 reg = 0;
int ret; int ret;
regbase = devm_platform_ioremap_resource(pdev, 0); ufs = devm_kzalloc(dev, sizeof(*ufs), GFP_KERNEL);
if (IS_ERR(regbase)) if (!ufs)
return PTR_ERR(regbase); return -ENOMEM;
ufs->regbase = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(ufs->regbase))
return PTR_ERR(ufs->regbase);
pm_runtime_enable(dev); pm_runtime_enable(dev);
ret = pm_runtime_resume_and_get(dev); ret = pm_runtime_resume_and_get(dev);
@@ -42,12 +50,14 @@ static int ti_j721e_ufs_probe(struct platform_device *pdev)
} }
clk_rate = clk_get_rate(clk); clk_rate = clk_get_rate(clk);
if (clk_rate == 26000000) if (clk_rate == 26000000)
reg |= TI_UFS_SS_CLK_26MHZ; ufs->reg |= TI_UFS_SS_CLK_26MHZ;
devm_clk_put(dev, clk); devm_clk_put(dev, clk);
/* Take UFS slave device out of reset */ /* Take UFS slave device out of reset */
reg |= TI_UFS_SS_RST_N_PCS; ufs->reg |= TI_UFS_SS_RST_N_PCS;
writel(reg, regbase + TI_UFS_SS_CTRL); writel(ufs->reg, ufs->regbase + TI_UFS_SS_CTRL);
dev_set_drvdata(dev, ufs);
ret = of_platform_populate(pdev->dev.of_node, NULL, NULL, ret = of_platform_populate(pdev->dev.of_node, NULL, NULL,
dev); dev);
@@ -72,6 +82,16 @@ static void ti_j721e_ufs_remove(struct platform_device *pdev)
pm_runtime_disable(&pdev->dev); pm_runtime_disable(&pdev->dev);
} }
static int ti_j721e_ufs_resume(struct device *dev)
{
struct ti_j721e_ufs *ufs = dev_get_drvdata(dev);
writel(ufs->reg, ufs->regbase + TI_UFS_SS_CTRL);
return 0;
}
static DEFINE_SIMPLE_DEV_PM_OPS(ti_j721e_ufs_pm_ops, NULL, ti_j721e_ufs_resume);
static const struct of_device_id ti_j721e_ufs_of_match[] = { static const struct of_device_id ti_j721e_ufs_of_match[] = {
{ {
.compatible = "ti,j721e-ufs", .compatible = "ti,j721e-ufs",
@@ -87,6 +107,7 @@ static struct platform_driver ti_j721e_ufs_driver = {
.driver = { .driver = {
.name = "ti-j721e-ufs", .name = "ti-j721e-ufs",
.of_match_table = ti_j721e_ufs_of_match, .of_match_table = ti_j721e_ufs_of_match,
.pm = pm_sleep_ptr(&ti_j721e_ufs_pm_ops),
}, },
}; };
module_platform_driver(ti_j721e_ufs_driver); module_platform_driver(ti_j721e_ufs_driver);

View File

@@ -0,0 +1,564 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2025 Advanced Micro Devices, Inc.
*
* Authors: Sai Krishna Potthuri <sai.krishna.potthuri@amd.com>
*/
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/firmware/xlnx-zynqmp.h>
#include <linux/irqreturn.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/reset.h>
#include <ufs/unipro.h>
#include "ufshcd-dwc.h"
#include "ufshcd-pltfrm.h"
#include "ufshci-dwc.h"
/* PHY modes */
#define UFSHCD_DWC_PHY_MODE_ROM 0
#define MPHY_FAST_RX_AFE_CAL BIT(2)
#define MPHY_FW_CALIB_CFG_VAL BIT(8)
#define MPHY_RX_OVRD_EN BIT(3)
#define MPHY_RX_OVRD_VAL BIT(2)
#define MPHY_RX_ACK_MASK BIT(0)
#define TIMEOUT_MICROSEC 1000000
struct ufs_versal2_host {
struct ufs_hba *hba;
struct reset_control *rstc;
struct reset_control *rstphy;
u32 phy_mode;
unsigned long host_clk;
u8 attcompval0;
u8 attcompval1;
u8 ctlecompval0;
u8 ctlecompval1;
};
static int ufs_versal2_phy_reg_write(struct ufs_hba *hba, u32 addr, u32 val)
{
static struct ufshcd_dme_attr_val phy_write_attrs[] = {
{ UIC_ARG_MIB(CBCREGADDRLSB), 0, DME_LOCAL },
{ UIC_ARG_MIB(CBCREGADDRMSB), 0, DME_LOCAL },
{ UIC_ARG_MIB(CBCREGWRLSB), 0, DME_LOCAL },
{ UIC_ARG_MIB(CBCREGWRMSB), 0, DME_LOCAL },
{ UIC_ARG_MIB(CBCREGRDWRSEL), 1, DME_LOCAL },
{ UIC_ARG_MIB(VS_MPHYCFGUPDT), 1, DME_LOCAL }
};
phy_write_attrs[0].mib_val = (u8)addr;
phy_write_attrs[1].mib_val = (u8)(addr >> 8);
phy_write_attrs[2].mib_val = (u8)val;
phy_write_attrs[3].mib_val = (u8)(val >> 8);
return ufshcd_dwc_dme_set_attrs(hba, phy_write_attrs, ARRAY_SIZE(phy_write_attrs));
}
static int ufs_versal2_phy_reg_read(struct ufs_hba *hba, u32 addr, u32 *val)
{
u32 mib_val;
int ret;
static struct ufshcd_dme_attr_val phy_read_attrs[] = {
{ UIC_ARG_MIB(CBCREGADDRLSB), 0, DME_LOCAL },
{ UIC_ARG_MIB(CBCREGADDRMSB), 0, DME_LOCAL },
{ UIC_ARG_MIB(CBCREGRDWRSEL), 0, DME_LOCAL },
{ UIC_ARG_MIB(VS_MPHYCFGUPDT), 1, DME_LOCAL }
};
phy_read_attrs[0].mib_val = (u8)addr;
phy_read_attrs[1].mib_val = (u8)(addr >> 8);
ret = ufshcd_dwc_dme_set_attrs(hba, phy_read_attrs, ARRAY_SIZE(phy_read_attrs));
if (ret)
return ret;
ret = ufshcd_dme_get(hba, UIC_ARG_MIB(CBCREGRDLSB), &mib_val);
if (ret)
return ret;
*val = mib_val;
ret = ufshcd_dme_get(hba, UIC_ARG_MIB(CBCREGRDMSB), &mib_val);
if (ret)
return ret;
*val |= (mib_val << 8);
return 0;
}
static int ufs_versal2_enable_phy(struct ufs_hba *hba)
{
u32 offset, reg;
int ret;
ret = ufshcd_dme_set(hba, UIC_ARG_MIB(VS_MPHYDISABLE), 0);
if (ret)
return ret;
ret = ufshcd_dme_set(hba, UIC_ARG_MIB(VS_MPHYCFGUPDT), 1);
if (ret)
return ret;
/* Check Tx/Rx FSM states */
for (offset = 0; offset < 2; offset++) {
u32 time_left, mibsel;
time_left = TIMEOUT_MICROSEC;
mibsel = UIC_ARG_MIB_SEL(MTX_FSM_STATE, UIC_ARG_MPHY_TX_GEN_SEL_INDEX(offset));
do {
ret = ufshcd_dme_get(hba, mibsel, &reg);
if (ret)
return ret;
if (reg == TX_STATE_HIBERN8 || reg == TX_STATE_SLEEP ||
reg == TX_STATE_LSBURST)
break;
time_left--;
usleep_range(1, 5);
} while (time_left);
if (!time_left) {
dev_err(hba->dev, "Invalid Tx FSM state.\n");
return -ETIMEDOUT;
}
time_left = TIMEOUT_MICROSEC;
mibsel = UIC_ARG_MIB_SEL(MRX_FSM_STATE, UIC_ARG_MPHY_RX_GEN_SEL_INDEX(offset));
do {
ret = ufshcd_dme_get(hba, mibsel, &reg);
if (ret)
return ret;
if (reg == RX_STATE_HIBERN8 || reg == RX_STATE_SLEEP ||
reg == RX_STATE_LSBURST)
break;
time_left--;
usleep_range(1, 5);
} while (time_left);
if (!time_left) {
dev_err(hba->dev, "Invalid Rx FSM state.\n");
return -ETIMEDOUT;
}
}
return 0;
}
static int ufs_versal2_setup_phy(struct ufs_hba *hba)
{
struct ufs_versal2_host *host = ufshcd_get_variant(hba);
int ret;
u32 reg;
/* Bypass RX-AFE offset calibrations (ATT/CTLE) */
ret = ufs_versal2_phy_reg_read(hba, FAST_FLAGS(0), &reg);
if (ret)
return ret;
reg |= MPHY_FAST_RX_AFE_CAL;
ret = ufs_versal2_phy_reg_write(hba, FAST_FLAGS(0), reg);
if (ret)
return ret;
ret = ufs_versal2_phy_reg_read(hba, FAST_FLAGS(1), &reg);
if (ret)
return ret;
reg |= MPHY_FAST_RX_AFE_CAL;
ret = ufs_versal2_phy_reg_write(hba, FAST_FLAGS(1), reg);
if (ret)
return ret;
/* Program ATT and CTLE compensation values */
if (host->attcompval0) {
ret = ufs_versal2_phy_reg_write(hba, RX_AFE_ATT_IDAC(0), host->attcompval0);
if (ret)
return ret;
}
if (host->attcompval1) {
ret = ufs_versal2_phy_reg_write(hba, RX_AFE_ATT_IDAC(1), host->attcompval1);
if (ret)
return ret;
}
if (host->ctlecompval0) {
ret = ufs_versal2_phy_reg_write(hba, RX_AFE_CTLE_IDAC(0), host->ctlecompval0);
if (ret)
return ret;
}
if (host->ctlecompval1) {
ret = ufs_versal2_phy_reg_write(hba, RX_AFE_CTLE_IDAC(1), host->ctlecompval1);
if (ret)
return ret;
}
ret = ufs_versal2_phy_reg_read(hba, FW_CALIB_CCFG(0), &reg);
if (ret)
return ret;
reg |= MPHY_FW_CALIB_CFG_VAL;
ret = ufs_versal2_phy_reg_write(hba, FW_CALIB_CCFG(0), reg);
if (ret)
return ret;
ret = ufs_versal2_phy_reg_read(hba, FW_CALIB_CCFG(1), &reg);
if (ret)
return ret;
reg |= MPHY_FW_CALIB_CFG_VAL;
return ufs_versal2_phy_reg_write(hba, FW_CALIB_CCFG(1), reg);
}
static int ufs_versal2_phy_init(struct ufs_hba *hba)
{
struct ufs_versal2_host *host = ufshcd_get_variant(hba);
u32 time_left;
bool is_ready;
int ret;
static const struct ufshcd_dme_attr_val rmmi_attrs[] = {
{ UIC_ARG_MIB(CBREFCLKCTRL2), CBREFREFCLK_GATE_OVR_EN, DME_LOCAL },
{ UIC_ARG_MIB(CBCRCTRL), 1, DME_LOCAL },
{ UIC_ARG_MIB(CBC10DIRECTCONF2), 1, DME_LOCAL },
{ UIC_ARG_MIB(VS_MPHYCFGUPDT), 1, DME_LOCAL }
};
/* Wait for Tx/Rx config_rdy */
time_left = TIMEOUT_MICROSEC;
do {
time_left--;
ret = zynqmp_pm_is_mphy_tx_rx_config_ready(&is_ready);
if (ret)
return ret;
if (!is_ready)
break;
usleep_range(1, 5);
} while (time_left);
if (!time_left) {
dev_err(hba->dev, "Tx/Rx configuration signal busy.\n");
return -ETIMEDOUT;
}
ret = ufshcd_dwc_dme_set_attrs(hba, rmmi_attrs, ARRAY_SIZE(rmmi_attrs));
if (ret)
return ret;
ret = reset_control_deassert(host->rstphy);
if (ret) {
dev_err(hba->dev, "ufsphy reset deassert failed, err = %d\n", ret);
return ret;
}
/* Wait for SRAM init done */
time_left = TIMEOUT_MICROSEC;
do {
time_left--;
ret = zynqmp_pm_is_sram_init_done(&is_ready);
if (ret)
return ret;
if (is_ready)
break;
usleep_range(1, 5);
} while (time_left);
if (!time_left) {
dev_err(hba->dev, "SRAM initialization failed.\n");
return -ETIMEDOUT;
}
ret = ufs_versal2_setup_phy(hba);
if (ret)
return ret;
return ufs_versal2_enable_phy(hba);
}
static int ufs_versal2_init(struct ufs_hba *hba)
{
struct ufs_versal2_host *host;
struct device *dev = hba->dev;
struct ufs_clk_info *clki;
int ret;
u32 cal;
host = devm_kzalloc(dev, sizeof(*host), GFP_KERNEL);
if (!host)
return -ENOMEM;
host->hba = hba;
ufshcd_set_variant(hba, host);
host->phy_mode = UFSHCD_DWC_PHY_MODE_ROM;
list_for_each_entry(clki, &hba->clk_list_head, list) {
if (!strcmp(clki->name, "core"))
host->host_clk = clk_get_rate(clki->clk);
}
host->rstc = devm_reset_control_get_exclusive(dev, "host");
if (IS_ERR(host->rstc)) {
dev_err(dev, "failed to get reset ctrl: host\n");
return PTR_ERR(host->rstc);
}
host->rstphy = devm_reset_control_get_exclusive(dev, "phy");
if (IS_ERR(host->rstphy)) {
dev_err(dev, "failed to get reset ctrl: phy\n");
return PTR_ERR(host->rstphy);
}
ret = reset_control_assert(host->rstc);
if (ret) {
dev_err(hba->dev, "host reset assert failed, err = %d\n", ret);
return ret;
}
ret = reset_control_assert(host->rstphy);
if (ret) {
dev_err(hba->dev, "phy reset assert failed, err = %d\n", ret);
return ret;
}
ret = zynqmp_pm_set_sram_bypass();
if (ret) {
dev_err(dev, "Bypass SRAM interface failed, err = %d\n", ret);
return ret;
}
ret = reset_control_deassert(host->rstc);
if (ret)
dev_err(hba->dev, "host reset deassert failed, err = %d\n", ret);
ret = zynqmp_pm_get_ufs_calibration_values(&cal);
if (ret) {
dev_err(dev, "failed to read calibration values\n");
return ret;
}
host->attcompval0 = (u8)cal;
host->attcompval1 = (u8)(cal >> 8);
host->ctlecompval0 = (u8)(cal >> 16);
host->ctlecompval1 = (u8)(cal >> 24);
hba->quirks |= UFSHCD_QUIRK_SKIP_DEF_UNIPRO_TIMEOUT_SETTING;
return 0;
}
static int ufs_versal2_hce_enable_notify(struct ufs_hba *hba,
enum ufs_notify_change_status status)
{
int ret = 0;
if (status == PRE_CHANGE) {
ret = ufs_versal2_phy_init(hba);
if (ret)
dev_err(hba->dev, "Phy init failed (%d)\n", ret);
}
return ret;
}
static int ufs_versal2_link_startup_notify(struct ufs_hba *hba,
enum ufs_notify_change_status status)
{
struct ufs_versal2_host *host = ufshcd_get_variant(hba);
int ret = 0;
switch (status) {
case PRE_CHANGE:
if (host->host_clk)
ufshcd_writel(hba, host->host_clk / 1000000, DWC_UFS_REG_HCLKDIV);
break;
case POST_CHANGE:
ret = ufshcd_dwc_link_startup_notify(hba, status);
break;
default:
ret = -EINVAL;
break;
}
return ret;
}
static int ufs_versal2_phy_ratesel(struct ufs_hba *hba, u32 activelanes, u32 rx_req)
{
u32 time_left, reg, lane;
int ret;
for (lane = 0; lane < activelanes; lane++) {
time_left = TIMEOUT_MICROSEC;
ret = ufs_versal2_phy_reg_read(hba, RX_OVRD_IN_1(lane), &reg);
if (ret)
return ret;
reg |= MPHY_RX_OVRD_EN;
if (rx_req)
reg |= MPHY_RX_OVRD_VAL;
else
reg &= ~MPHY_RX_OVRD_VAL;
ret = ufs_versal2_phy_reg_write(hba, RX_OVRD_IN_1(lane), reg);
if (ret)
return ret;
do {
ret = ufs_versal2_phy_reg_read(hba, RX_PCS_OUT(lane), &reg);
if (ret)
return ret;
reg &= MPHY_RX_ACK_MASK;
if (reg == rx_req)
break;
time_left--;
usleep_range(1, 5);
} while (time_left);
if (!time_left) {
dev_err(hba->dev, "Invalid Rx Ack value.\n");
return -ETIMEDOUT;
}
}
return 0;
}
static int ufs_versal2_pwr_change_notify(struct ufs_hba *hba, enum ufs_notify_change_status status,
const struct ufs_pa_layer_attr *dev_max_params,
struct ufs_pa_layer_attr *dev_req_params)
{
struct ufs_versal2_host *host = ufshcd_get_variant(hba);
u32 lane, reg, rate = 0;
int ret = 0;
if (status == PRE_CHANGE) {
memcpy(dev_req_params, dev_max_params, sizeof(struct ufs_pa_layer_attr));
/* If it is not a calibrated part, switch PWRMODE to SLOW_MODE */
if (!host->attcompval0 && !host->attcompval1 && !host->ctlecompval0 &&
!host->ctlecompval1) {
dev_req_params->pwr_rx = SLOW_MODE;
dev_req_params->pwr_tx = SLOW_MODE;
return 0;
}
if (dev_req_params->pwr_rx == SLOW_MODE || dev_req_params->pwr_rx == SLOWAUTO_MODE)
return 0;
if (dev_req_params->hs_rate == PA_HS_MODE_B)
rate = 1;
/* Select the rate */
ret = ufshcd_dme_set(hba, UIC_ARG_MIB(CBRATESEL), rate);
if (ret)
return ret;
ret = ufshcd_dme_set(hba, UIC_ARG_MIB(VS_MPHYCFGUPDT), 1);
if (ret)
return ret;
ret = ufs_versal2_phy_ratesel(hba, dev_req_params->lane_tx, 1);
if (ret)
return ret;
ret = ufs_versal2_phy_ratesel(hba, dev_req_params->lane_tx, 0);
if (ret)
return ret;
/* Remove rx_req override */
for (lane = 0; lane < dev_req_params->lane_tx; lane++) {
ret = ufs_versal2_phy_reg_read(hba, RX_OVRD_IN_1(lane), &reg);
if (ret)
return ret;
reg &= ~MPHY_RX_OVRD_EN;
ret = ufs_versal2_phy_reg_write(hba, RX_OVRD_IN_1(lane), reg);
if (ret)
return ret;
}
if (dev_req_params->lane_tx == UFS_LANE_2 && dev_req_params->lane_rx == UFS_LANE_2)
ret = ufshcd_dme_configure_adapt(hba, dev_req_params->gear_tx,
PA_INITIAL_ADAPT);
}
return ret;
}
static struct ufs_hba_variant_ops ufs_versal2_hba_vops = {
.name = "ufs-versal2-pltfm",
.init = ufs_versal2_init,
.link_startup_notify = ufs_versal2_link_startup_notify,
.hce_enable_notify = ufs_versal2_hce_enable_notify,
.pwr_change_notify = ufs_versal2_pwr_change_notify,
};
static const struct of_device_id ufs_versal2_pltfm_match[] = {
{
.compatible = "amd,versal2-ufs",
.data = &ufs_versal2_hba_vops,
},
{ },
};
MODULE_DEVICE_TABLE(of, ufs_versal2_pltfm_match);
static int ufs_versal2_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
int ret;
/* Perform generic probe */
ret = ufshcd_pltfrm_init(pdev, &ufs_versal2_hba_vops);
if (ret)
dev_err(dev, "ufshcd_pltfrm_init() failed %d\n", ret);
return ret;
}
static void ufs_versal2_remove(struct platform_device *pdev)
{
struct ufs_hba *hba = platform_get_drvdata(pdev);
pm_runtime_get_sync(&(pdev)->dev);
ufshcd_remove(hba);
}
static const struct dev_pm_ops ufs_versal2_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(ufshcd_system_suspend, ufshcd_system_resume)
SET_RUNTIME_PM_OPS(ufshcd_runtime_suspend, ufshcd_runtime_resume, NULL)
};
static struct platform_driver ufs_versal2_pltfm = {
.probe = ufs_versal2_probe,
.remove = ufs_versal2_remove,
.driver = {
.name = "ufshcd-versal2",
.pm = &ufs_versal2_pm_ops,
.of_match_table = of_match_ptr(ufs_versal2_pltfm_match),
},
};
module_platform_driver(ufs_versal2_pltfm);
MODULE_AUTHOR("Sai Krishna Potthuri <sai.krishna.potthuri@amd.com>");
MODULE_DESCRIPTION("AMD Versal Gen 2 UFS Host Controller driver");
MODULE_LICENSE("GPL");

View File

@@ -41,8 +41,7 @@ static void _ufs_mtk_clk_scale(struct ufs_hba *hba, bool scale_up);
static const struct ufs_dev_quirk ufs_mtk_dev_fixups[] = { static const struct ufs_dev_quirk ufs_mtk_dev_fixups[] = {
{ .wmanufacturerid = UFS_ANY_VENDOR, { .wmanufacturerid = UFS_ANY_VENDOR,
.model = UFS_ANY_MODEL, .model = UFS_ANY_MODEL,
.quirk = UFS_DEVICE_QUIRK_DELAY_AFTER_LPM | .quirk = UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM },
UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM },
{ .wmanufacturerid = UFS_VENDOR_SKHYNIX, { .wmanufacturerid = UFS_VENDOR_SKHYNIX,
.model = "H9HQ21AFAMZDAR", .model = "H9HQ21AFAMZDAR",
.quirk = UFS_DEVICE_QUIRK_SUPPORT_EXTENDED_FEATURES }, .quirk = UFS_DEVICE_QUIRK_SUPPORT_EXTENDED_FEATURES },
@@ -280,12 +279,21 @@ static int ufs_mtk_hce_enable_notify(struct ufs_hba *hba,
ufshcd_readl(hba, REG_UFS_XOUFS_CTRL) | 0x80, ufshcd_readl(hba, REG_UFS_XOUFS_CTRL) | 0x80,
REG_UFS_XOUFS_CTRL); REG_UFS_XOUFS_CTRL);
if (host->legacy_ip_ver)
return 0;
/* DDR_EN setting */ /* DDR_EN setting */
if (host->ip_ver >= IP_VER_MT6989) { if (host->ip_ver >= IP_VER_MT6989) {
ufshcd_rmwl(hba, UFS_MASK(0x7FFF, 8), ufshcd_rmwl(hba, UFS_MASK(0x7FFF, 8),
0x453000, REG_UFS_MMIO_OPT_CTRL_0); 0x453000, REG_UFS_MMIO_OPT_CTRL_0);
} }
if (host->ip_ver >= IP_VER_MT6991_A0) {
/* Enable multi-rtt */
ufshcd_rmwl(hba, MRTT_EN, MRTT_EN, REG_UFS_MMIO_OPT_CTRL_0);
/* Enable random performance improvement */
ufshcd_rmwl(hba, RDN_PFM_IMPV_DIS, 0, REG_UFS_MMIO_OPT_CTRL_0);
}
} }
return 0; return 0;
@@ -405,7 +413,7 @@ static void ufs_mtk_dbg_sel(struct ufs_hba *hba)
{ {
struct ufs_mtk_host *host = ufshcd_get_variant(hba); struct ufs_mtk_host *host = ufshcd_get_variant(hba);
if (((host->ip_ver >> 16) & 0xFF) >= 0x36) { if (!host->legacy_ip_ver && host->ip_ver >= IP_VER_MT6983) {
ufshcd_writel(hba, 0x820820, REG_UFS_DEBUG_SEL); ufshcd_writel(hba, 0x820820, REG_UFS_DEBUG_SEL);
ufshcd_writel(hba, 0x0, REG_UFS_DEBUG_SEL_B0); ufshcd_writel(hba, 0x0, REG_UFS_DEBUG_SEL_B0);
ufshcd_writel(hba, 0x55555555, REG_UFS_DEBUG_SEL_B1); ufshcd_writel(hba, 0x55555555, REG_UFS_DEBUG_SEL_B1);
@@ -422,6 +430,7 @@ static int ufs_mtk_wait_idle_state(struct ufs_hba *hba,
u64 timeout, time_checked; u64 timeout, time_checked;
u32 val, sm; u32 val, sm;
bool wait_idle; bool wait_idle;
struct ufs_mtk_host *host = ufshcd_get_variant(hba);
/* cannot use plain ktime_get() in suspend */ /* cannot use plain ktime_get() in suspend */
timeout = ktime_get_mono_fast_ns() + retry_ms * 1000000UL; timeout = ktime_get_mono_fast_ns() + retry_ms * 1000000UL;
@@ -432,8 +441,13 @@ static int ufs_mtk_wait_idle_state(struct ufs_hba *hba,
do { do {
time_checked = ktime_get_mono_fast_ns(); time_checked = ktime_get_mono_fast_ns();
ufs_mtk_dbg_sel(hba); if (host->legacy_ip_ver || host->ip_ver < IP_VER_MT6899) {
val = ufshcd_readl(hba, REG_UFS_PROBE); ufs_mtk_dbg_sel(hba);
val = ufshcd_readl(hba, REG_UFS_PROBE);
} else {
val = ufshcd_readl(hba, REG_UFS_UFS_MMIO_OTSD_CTRL);
val = val >> 16;
}
sm = val & 0x1f; sm = val & 0x1f;
@@ -465,13 +479,20 @@ static int ufs_mtk_wait_link_state(struct ufs_hba *hba, u32 state,
{ {
ktime_t timeout, time_checked; ktime_t timeout, time_checked;
u32 val; u32 val;
struct ufs_mtk_host *host = ufshcd_get_variant(hba);
timeout = ktime_add_ms(ktime_get(), max_wait_ms); timeout = ktime_add_ms(ktime_get(), max_wait_ms);
do { do {
time_checked = ktime_get(); time_checked = ktime_get();
ufs_mtk_dbg_sel(hba);
val = ufshcd_readl(hba, REG_UFS_PROBE); if (host->legacy_ip_ver || host->ip_ver < IP_VER_MT6899) {
val = val >> 28; ufs_mtk_dbg_sel(hba);
val = ufshcd_readl(hba, REG_UFS_PROBE);
val = val >> 28;
} else {
val = ufshcd_readl(hba, REG_UFS_UFS_MMIO_OTSD_CTRL);
val = val >> 24;
}
if (val == state) if (val == state)
return 0; return 0;
@@ -1109,18 +1130,6 @@ static void ufs_mtk_setup_clk_gating(struct ufs_hba *hba)
} }
} }
/* Convert microseconds to Auto-Hibernate Idle Timer register value */
static u32 ufs_mtk_us_to_ahit(unsigned int timer)
{
unsigned int scale;
for (scale = 0; timer > UFSHCI_AHIBERN8_TIMER_MASK; ++scale)
timer /= UFSHCI_AHIBERN8_SCALE_FACTOR;
return FIELD_PREP(UFSHCI_AHIBERN8_TIMER_MASK, timer) |
FIELD_PREP(UFSHCI_AHIBERN8_SCALE_MASK, scale);
}
static void ufs_mtk_fix_ahit(struct ufs_hba *hba) static void ufs_mtk_fix_ahit(struct ufs_hba *hba)
{ {
unsigned int us; unsigned int us;
@@ -1143,7 +1152,7 @@ static void ufs_mtk_fix_ahit(struct ufs_hba *hba)
break; break;
} }
hba->ahit = ufs_mtk_us_to_ahit(us); hba->ahit = ufshcd_us_to_ahit(us);
} }
ufs_mtk_setup_clk_gating(hba); ufs_mtk_setup_clk_gating(hba);
@@ -1332,6 +1341,36 @@ static bool ufs_mtk_pmc_via_fastauto(struct ufs_hba *hba,
return true; return true;
} }
static void ufs_mtk_adjust_sync_length(struct ufs_hba *hba)
{
int i;
u32 value;
u32 cnt, att, min;
struct attr_min {
u32 attr;
u32 min_value;
} pa_min_sync_length[] = {
{PA_TXHSG1SYNCLENGTH, 0x48},
{PA_TXHSG2SYNCLENGTH, 0x48},
{PA_TXHSG3SYNCLENGTH, 0x48},
{PA_TXHSG4SYNCLENGTH, 0x48},
{PA_TXHSG5SYNCLENGTH, 0x48}
};
cnt = sizeof(pa_min_sync_length) / sizeof(struct attr_min);
for (i = 0; i < cnt; i++) {
att = pa_min_sync_length[i].attr;
min = pa_min_sync_length[i].min_value;
ufshcd_dme_get(hba, UIC_ARG_MIB(att), &value);
if (value < min)
ufshcd_dme_set(hba, UIC_ARG_MIB(att), min);
ufshcd_dme_peer_get(hba, UIC_ARG_MIB(att), &value);
if (value < min)
ufshcd_dme_peer_set(hba, UIC_ARG_MIB(att), min);
}
}
static int ufs_mtk_pre_pwr_change(struct ufs_hba *hba, static int ufs_mtk_pre_pwr_change(struct ufs_hba *hba,
const struct ufs_pa_layer_attr *dev_max_params, const struct ufs_pa_layer_attr *dev_max_params,
struct ufs_pa_layer_attr *dev_req_params) struct ufs_pa_layer_attr *dev_req_params)
@@ -1355,6 +1394,8 @@ static int ufs_mtk_pre_pwr_change(struct ufs_hba *hba,
} }
if (ufs_mtk_pmc_via_fastauto(hba, dev_req_params)) { if (ufs_mtk_pmc_via_fastauto(hba, dev_req_params)) {
ufs_mtk_adjust_sync_length(hba);
ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXTERMINATION), true); ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXTERMINATION), true);
ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXGEAR), UFS_HS_G1); ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXGEAR), UFS_HS_G1);
@@ -1619,14 +1660,26 @@ static int ufs_mtk_device_reset(struct ufs_hba *hba)
static int ufs_mtk_link_set_hpm(struct ufs_hba *hba) static int ufs_mtk_link_set_hpm(struct ufs_hba *hba)
{ {
int err; int err;
u32 val;
struct ufs_mtk_host *host = ufshcd_get_variant(hba);
err = ufshcd_hba_enable(hba); err = ufshcd_hba_enable(hba);
if (err) if (err)
return err; return err;
err = ufs_mtk_unipro_set_lpm(hba, false); err = ufs_mtk_unipro_set_lpm(hba, false);
if (err) if (err) {
if (host->ip_ver < IP_VER_MT6899) {
ufs_mtk_dbg_sel(hba);
val = ufshcd_readl(hba, REG_UFS_PROBE);
} else {
val = ufshcd_readl(hba, REG_UFS_UFS_MMIO_OTSD_CTRL);
}
ufshcd_update_evt_hist(hba, UFS_EVT_RESUME_ERR, (u32)val);
val = ufshcd_readl(hba, REG_INTERRUPT_STATUS);
ufshcd_update_evt_hist(hba, UFS_EVT_RESUME_ERR, (u32)val);
return err; return err;
}
err = ufshcd_uic_hibern8_exit(hba); err = ufshcd_uic_hibern8_exit(hba);
if (err) if (err)
@@ -1744,6 +1797,7 @@ static int ufs_mtk_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op,
{ {
int err; int err;
struct arm_smccc_res res; struct arm_smccc_res res;
struct ufs_mtk_host *host = ufshcd_get_variant(hba);
if (status == PRE_CHANGE) { if (status == PRE_CHANGE) {
if (ufshcd_is_auto_hibern8_supported(hba)) if (ufshcd_is_auto_hibern8_supported(hba))
@@ -1773,6 +1827,15 @@ static int ufs_mtk_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op,
ufs_mtk_sram_pwr_ctrl(false, res); ufs_mtk_sram_pwr_ctrl(false, res);
/* Release pm_qos/clk if in scale-up mode during suspend */
if (ufshcd_is_clkscaling_supported(hba) && (host->clk_scale_up)) {
ufshcd_pm_qos_update(hba, false);
_ufs_mtk_clk_scale(hba, false);
} else if ((!ufshcd_is_clkscaling_supported(hba) &&
hba->pwr_info.gear_rx >= UFS_HS_G5)) {
_ufs_mtk_clk_scale(hba, false);
}
return 0; return 0;
fail: fail:
/* /*
@@ -1788,6 +1851,7 @@ static int ufs_mtk_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
{ {
int err; int err;
struct arm_smccc_res res; struct arm_smccc_res res;
struct ufs_mtk_host *host = ufshcd_get_variant(hba);
if (hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL) if (hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL)
ufs_mtk_dev_vreg_set_lpm(hba, false); ufs_mtk_dev_vreg_set_lpm(hba, false);
@@ -1798,6 +1862,15 @@ static int ufs_mtk_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
if (err) if (err)
goto fail; goto fail;
/* Request pm_qos/clk if in scale-up mode after resume */
if (ufshcd_is_clkscaling_supported(hba) && (host->clk_scale_up)) {
ufshcd_pm_qos_update(hba, true);
_ufs_mtk_clk_scale(hba, true);
} else if ((!ufshcd_is_clkscaling_supported(hba) &&
hba->pwr_info.gear_rx >= UFS_HS_G5)) {
_ufs_mtk_clk_scale(hba, true);
}
if (ufshcd_is_link_hibern8(hba)) { if (ufshcd_is_link_hibern8(hba)) {
err = ufs_mtk_link_set_hpm(hba); err = ufs_mtk_link_set_hpm(hba);
if (err) if (err)
@@ -1889,15 +1962,13 @@ static void ufs_mtk_fixup_dev_quirks(struct ufs_hba *hba)
{ {
ufshcd_fixup_dev_quirks(hba, ufs_mtk_dev_fixups); ufshcd_fixup_dev_quirks(hba, ufs_mtk_dev_fixups);
if (ufs_mtk_is_broken_vcc(hba) && hba->vreg_info.vcc && if (ufs_mtk_is_broken_vcc(hba) && hba->vreg_info.vcc) {
(hba->dev_quirks & UFS_DEVICE_QUIRK_DELAY_AFTER_LPM)) {
hba->vreg_info.vcc->always_on = true; hba->vreg_info.vcc->always_on = true;
/* /*
* VCC will be kept always-on thus we don't * VCC will be kept always-on thus we don't
* need any delay during regulator operations * need any delay before putting device's VCC in LPM mode.
*/ */
hba->dev_quirks &= ~(UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM | hba->dev_quirks &= ~UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM;
UFS_DEVICE_QUIRK_DELAY_AFTER_LPM);
} }
ufs_mtk_vreg_fix_vcc(hba); ufs_mtk_vreg_fix_vcc(hba);
@@ -2373,6 +2444,11 @@ static int ufs_mtk_system_suspend(struct device *dev)
struct arm_smccc_res res; struct arm_smccc_res res;
int ret; int ret;
if (hba->shutting_down) {
ret = -EBUSY;
goto out;
}
ret = ufshcd_system_suspend(dev); ret = ufshcd_system_suspend(dev);
if (ret) if (ret)
goto out; goto out;

View File

@@ -20,6 +20,9 @@
#define MCQ_MULTI_INTR_EN BIT(2) #define MCQ_MULTI_INTR_EN BIT(2)
#define MCQ_CMB_INTR_EN BIT(3) #define MCQ_CMB_INTR_EN BIT(3)
#define MCQ_AH8 BIT(4) #define MCQ_AH8 BIT(4)
#define MON_EN BIT(5)
#define MRTT_EN BIT(25)
#define RDN_PFM_IMPV_DIS BIT(28)
#define MCQ_INTR_EN_MSK (MCQ_MULTI_INTR_EN | MCQ_CMB_INTR_EN) #define MCQ_INTR_EN_MSK (MCQ_MULTI_INTR_EN | MCQ_CMB_INTR_EN)
@@ -28,6 +31,7 @@
*/ */
#define REG_UFS_XOUFS_CTRL 0x140 #define REG_UFS_XOUFS_CTRL 0x140
#define REG_UFS_REFCLK_CTRL 0x144 #define REG_UFS_REFCLK_CTRL 0x144
#define REG_UFS_UFS_MMIO_OTSD_CTRL 0x14C
#define REG_UFS_MMIO_OPT_CTRL_0 0x160 #define REG_UFS_MMIO_OPT_CTRL_0 0x160
#define REG_UFS_EXTREG 0x2100 #define REG_UFS_EXTREG 0x2100
#define REG_UFS_MPHYCTRL 0x2200 #define REG_UFS_MPHYCTRL 0x2200

Some files were not shown because too many files have changed in this diff Show More