1
0

Merge patch series "ufs: Add support for AMD Versal Gen2 UFS"

Ajay Neeli <ajay.neeli@amd.com> says:

This patch series adds support for the UFS driver on the AMD Versal
Gen 2 SoC.  It includes:

 - Device tree bindings and driver implementation.

 - Secure read support for the secure retrieval of UFS calibration
   values.

The UFS host driver is based upon the Synopsis DesignWare (DWC) UFS
architecture, utilizing the existing UFSHCD_DWC and UFSHCD_PLATFORM
drivers.

Link: https://patch.msgid.link/20251021113003.13650-1-ajay.neeli@amd.com
Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
This commit is contained in:
Martin K. Petersen
2025-10-29 23:03:15 -04:00
12 changed files with 912 additions and 1 deletions

View File

@@ -0,0 +1,61 @@
# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
%YAML 1.2
---
$id: http://devicetree.org/schemas/ufs/amd,versal2-ufs.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
title: AMD Versal Gen 2 UFS Host Controller
maintainers:
- Sai Krishna Potthuri <sai.krishna.potthuri@amd.com>
allOf:
- $ref: ufs-common.yaml
properties:
compatible:
const: amd,versal2-ufs
reg:
maxItems: 1
clocks:
maxItems: 1
clock-names:
items:
- const: core
power-domains:
maxItems: 1
resets:
maxItems: 2
reset-names:
items:
- const: host
- const: phy
required:
- reg
- clocks
- clock-names
- resets
- reset-names
unevaluatedProperties: false
examples:
- |
#include <dt-bindings/interrupt-controller/arm-gic.h>
ufs@f10b0000 {
compatible = "amd,versal2-ufs";
reg = <0xf10b0000 0x1000>;
clocks = <&ufs_core_clk>;
clock-names = "core";
resets = <&scmi_reset 4>, <&scmi_reset 35>;
reset-names = "host", "phy";
interrupts = <GIC_SPI 234 IRQ_TYPE_LEVEL_HIGH>;
freq-table-hz = <0 0>;
};

View File

@@ -26339,6 +26339,13 @@ F: Documentation/devicetree/bindings/ufs/
F: Documentation/scsi/ufs.rst F: Documentation/scsi/ufs.rst
F: drivers/ufs/core/ F: drivers/ufs/core/
UNIVERSAL FLASH STORAGE HOST CONTROLLER DRIVER AMD VERSAL2
M: Sai Krishna Potthuri <sai.krishna.potthuri@amd.com>
M: Ajay Neeli <ajay.neeli@amd.com>
S: Maintained
F: Documentation/devicetree/bindings/ufs/amd,versal2-ufs.yaml
F: drivers/ufs/host/ufs-amd-versal2.c
UNIVERSAL FLASH STORAGE HOST CONTROLLER DRIVER DWC HOOKS UNIVERSAL FLASH STORAGE HOST CONTROLLER DRIVER DWC HOOKS
M: Pedro Sousa <pedrom.sousa@synopsys.com> M: Pedro Sousa <pedrom.sousa@synopsys.com>
L: linux-scsi@vger.kernel.org L: linux-scsi@vger.kernel.org

View File

@@ -1,5 +1,5 @@
# SPDX-License-Identifier: GPL-2.0 # SPDX-License-Identifier: GPL-2.0
# Makefile for Xilinx firmwares # Makefile for Xilinx firmwares
obj-$(CONFIG_ZYNQMP_FIRMWARE) += zynqmp.o obj-$(CONFIG_ZYNQMP_FIRMWARE) += zynqmp.o zynqmp-ufs.o
obj-$(CONFIG_ZYNQMP_FIRMWARE_DEBUG) += zynqmp-debug.o obj-$(CONFIG_ZYNQMP_FIRMWARE_DEBUG) += zynqmp-debug.o

View File

@@ -0,0 +1,118 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Firmware Layer for UFS APIs
*
* Copyright (C) 2025 Advanced Micro Devices, Inc.
*/
#include <linux/firmware/xlnx-zynqmp.h>
#include <linux/module.h>
/* Register Node IDs */
#define PM_REGNODE_PMC_IOU_SLCR 0x30000002 /* PMC IOU SLCR */
#define PM_REGNODE_EFUSE_CACHE 0x30000003 /* EFUSE Cache */
/* Register Offsets for PMC IOU SLCR */
#define SRAM_CSR_OFFSET 0x104C /* SRAM Control and Status */
#define TXRX_CFGRDY_OFFSET 0x1054 /* M-PHY TX-RX Config ready */
/* Masks for SRAM Control and Status Register */
#define SRAM_CSR_INIT_DONE_MASK BIT(0) /* SRAM initialization done */
#define SRAM_CSR_EXT_LD_DONE_MASK BIT(1) /* SRAM External load done */
#define SRAM_CSR_BYPASS_MASK BIT(2) /* Bypass SRAM interface */
/* Mask to check M-PHY TX-RX configuration readiness */
#define TX_RX_CFG_RDY_MASK GENMASK(3, 0)
/* Register Offsets for EFUSE Cache */
#define UFS_CAL_1_OFFSET 0xBE8 /* UFS Calibration Value */
/**
* zynqmp_pm_is_mphy_tx_rx_config_ready - check M-PHY TX-RX config readiness
* @is_ready: Store output status (true/false)
*
* Return: Returns 0 on success or error value on failure.
*/
int zynqmp_pm_is_mphy_tx_rx_config_ready(bool *is_ready)
{
u32 regval;
int ret;
if (!is_ready)
return -EINVAL;
ret = zynqmp_pm_sec_read_reg(PM_REGNODE_PMC_IOU_SLCR, TXRX_CFGRDY_OFFSET, &regval);
if (ret)
return ret;
regval &= TX_RX_CFG_RDY_MASK;
if (regval)
*is_ready = true;
else
*is_ready = false;
return ret;
}
EXPORT_SYMBOL_GPL(zynqmp_pm_is_mphy_tx_rx_config_ready);
/**
* zynqmp_pm_is_sram_init_done - check SRAM initialization
* @is_done: Store output status (true/false)
*
* Return: Returns 0 on success or error value on failure.
*/
int zynqmp_pm_is_sram_init_done(bool *is_done)
{
u32 regval;
int ret;
if (!is_done)
return -EINVAL;
ret = zynqmp_pm_sec_read_reg(PM_REGNODE_PMC_IOU_SLCR, SRAM_CSR_OFFSET, &regval);
if (ret)
return ret;
regval &= SRAM_CSR_INIT_DONE_MASK;
if (regval)
*is_done = true;
else
*is_done = false;
return ret;
}
EXPORT_SYMBOL_GPL(zynqmp_pm_is_sram_init_done);
/**
* zynqmp_pm_set_sram_bypass - Set SRAM bypass Control
*
* Return: Returns 0 on success or error value on failure.
*/
int zynqmp_pm_set_sram_bypass(void)
{
u32 sram_csr;
int ret;
ret = zynqmp_pm_sec_read_reg(PM_REGNODE_PMC_IOU_SLCR, SRAM_CSR_OFFSET, &sram_csr);
if (ret)
return ret;
sram_csr &= ~SRAM_CSR_EXT_LD_DONE_MASK;
sram_csr |= SRAM_CSR_BYPASS_MASK;
return zynqmp_pm_sec_mask_write_reg(PM_REGNODE_PMC_IOU_SLCR, SRAM_CSR_OFFSET,
GENMASK(2, 1), sram_csr);
}
EXPORT_SYMBOL_GPL(zynqmp_pm_set_sram_bypass);
/**
* zynqmp_pm_get_ufs_calibration_values - Read UFS calibration values
* @val: Store the calibration value
*
* Return: Returns 0 on success or error value on failure.
*/
int zynqmp_pm_get_ufs_calibration_values(u32 *val)
{
return zynqmp_pm_sec_read_reg(PM_REGNODE_EFUSE_CACHE, UFS_CAL_1_OFFSET, val);
}
EXPORT_SYMBOL_GPL(zynqmp_pm_get_ufs_calibration_values);

View File

@@ -1616,6 +1616,52 @@ int zynqmp_pm_get_feature_config(enum pm_feature_config_id id,
return zynqmp_pm_invoke_fn(PM_IOCTL, payload, 3, 0, IOCTL_GET_FEATURE_CONFIG, id); return zynqmp_pm_invoke_fn(PM_IOCTL, payload, 3, 0, IOCTL_GET_FEATURE_CONFIG, id);
} }
/**
* zynqmp_pm_sec_read_reg - PM call to securely read from given offset
* of the node
* @node_id: Node Id of the device
* @offset: Offset to be used (20-bit)
* @ret_value: Output data read from the given offset after
* firmware access policy is successfully enforced
*
* Return: Returns 0 on success or error value on failure
*/
int zynqmp_pm_sec_read_reg(u32 node_id, u32 offset, u32 *ret_value)
{
u32 ret_payload[PAYLOAD_ARG_CNT];
u32 count = 1;
int ret;
if (!ret_value)
return -EINVAL;
ret = zynqmp_pm_invoke_fn(PM_IOCTL, ret_payload, 4, node_id, IOCTL_READ_REG,
offset, count);
*ret_value = ret_payload[1];
return ret;
}
EXPORT_SYMBOL_GPL(zynqmp_pm_sec_read_reg);
/**
* zynqmp_pm_sec_mask_write_reg - PM call to securely write to given offset
* of the node
* @node_id: Node Id of the device
* @offset: Offset to be used (20-bit)
* @mask: Mask to be used
* @value: Value to be written
*
* Return: Returns 0 on success or error value on failure
*/
int zynqmp_pm_sec_mask_write_reg(const u32 node_id, const u32 offset, u32 mask,
u32 value)
{
return zynqmp_pm_invoke_fn(PM_IOCTL, NULL, 5, node_id, IOCTL_MASK_WRITE_REG,
offset, mask, value);
}
EXPORT_SYMBOL_GPL(zynqmp_pm_sec_mask_write_reg);
/** /**
* zynqmp_pm_set_sd_config - PM call to set value of SD config registers * zynqmp_pm_set_sd_config - PM call to set value of SD config registers
* @node: SD node ID * @node: SD node ID

View File

@@ -154,3 +154,16 @@ config SCSI_UFS_ROCKCHIP
Select this if you have UFS controller on Rockchip chipset. Select this if you have UFS controller on Rockchip chipset.
If unsure, say N. If unsure, say N.
config SCSI_UFS_AMD_VERSAL2
tristate "AMD Versal Gen 2 UFS controller platform driver"
depends on SCSI_UFSHCD_PLATFORM && (ARCH_ZYNQMP || COMPILE_TEST)
help
This selects the AMD Versal Gen 2 specific additions on top of
the UFSHCD DWC and UFSHCD platform driver. UFS host on AMD
Versal Gen 2 needs some vendor specific configurations like PHY
and vendor specific register accesses before accessing the
hardware.
Select this if you have UFS controller on AMD Versal Gen 2 SoC.
If unsure, say N.

View File

@@ -13,3 +13,4 @@ obj-$(CONFIG_SCSI_UFS_RENESAS) += ufs-renesas.o
obj-$(CONFIG_SCSI_UFS_ROCKCHIP) += ufs-rockchip.o obj-$(CONFIG_SCSI_UFS_ROCKCHIP) += ufs-rockchip.o
obj-$(CONFIG_SCSI_UFS_SPRD) += ufs-sprd.o obj-$(CONFIG_SCSI_UFS_SPRD) += ufs-sprd.o
obj-$(CONFIG_SCSI_UFS_TI_J721E) += ti-j721e-ufs.o obj-$(CONFIG_SCSI_UFS_TI_J721E) += ti-j721e-ufs.o
obj-$(CONFIG_SCSI_UFS_AMD_VERSAL2) += ufs-amd-versal2.o ufshcd-dwc.o

View File

@@ -0,0 +1,564 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2025 Advanced Micro Devices, Inc.
*
* Authors: Sai Krishna Potthuri <sai.krishna.potthuri@amd.com>
*/
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/firmware/xlnx-zynqmp.h>
#include <linux/irqreturn.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/reset.h>
#include <ufs/unipro.h>
#include "ufshcd-dwc.h"
#include "ufshcd-pltfrm.h"
#include "ufshci-dwc.h"
/* PHY modes */
#define UFSHCD_DWC_PHY_MODE_ROM 0
#define MPHY_FAST_RX_AFE_CAL BIT(2)
#define MPHY_FW_CALIB_CFG_VAL BIT(8)
#define MPHY_RX_OVRD_EN BIT(3)
#define MPHY_RX_OVRD_VAL BIT(2)
#define MPHY_RX_ACK_MASK BIT(0)
#define TIMEOUT_MICROSEC 1000000
struct ufs_versal2_host {
struct ufs_hba *hba;
struct reset_control *rstc;
struct reset_control *rstphy;
u32 phy_mode;
unsigned long host_clk;
u8 attcompval0;
u8 attcompval1;
u8 ctlecompval0;
u8 ctlecompval1;
};
static int ufs_versal2_phy_reg_write(struct ufs_hba *hba, u32 addr, u32 val)
{
static struct ufshcd_dme_attr_val phy_write_attrs[] = {
{ UIC_ARG_MIB(CBCREGADDRLSB), 0, DME_LOCAL },
{ UIC_ARG_MIB(CBCREGADDRMSB), 0, DME_LOCAL },
{ UIC_ARG_MIB(CBCREGWRLSB), 0, DME_LOCAL },
{ UIC_ARG_MIB(CBCREGWRMSB), 0, DME_LOCAL },
{ UIC_ARG_MIB(CBCREGRDWRSEL), 1, DME_LOCAL },
{ UIC_ARG_MIB(VS_MPHYCFGUPDT), 1, DME_LOCAL }
};
phy_write_attrs[0].mib_val = (u8)addr;
phy_write_attrs[1].mib_val = (u8)(addr >> 8);
phy_write_attrs[2].mib_val = (u8)val;
phy_write_attrs[3].mib_val = (u8)(val >> 8);
return ufshcd_dwc_dme_set_attrs(hba, phy_write_attrs, ARRAY_SIZE(phy_write_attrs));
}
static int ufs_versal2_phy_reg_read(struct ufs_hba *hba, u32 addr, u32 *val)
{
u32 mib_val;
int ret;
static struct ufshcd_dme_attr_val phy_read_attrs[] = {
{ UIC_ARG_MIB(CBCREGADDRLSB), 0, DME_LOCAL },
{ UIC_ARG_MIB(CBCREGADDRMSB), 0, DME_LOCAL },
{ UIC_ARG_MIB(CBCREGRDWRSEL), 0, DME_LOCAL },
{ UIC_ARG_MIB(VS_MPHYCFGUPDT), 1, DME_LOCAL }
};
phy_read_attrs[0].mib_val = (u8)addr;
phy_read_attrs[1].mib_val = (u8)(addr >> 8);
ret = ufshcd_dwc_dme_set_attrs(hba, phy_read_attrs, ARRAY_SIZE(phy_read_attrs));
if (ret)
return ret;
ret = ufshcd_dme_get(hba, UIC_ARG_MIB(CBCREGRDLSB), &mib_val);
if (ret)
return ret;
*val = mib_val;
ret = ufshcd_dme_get(hba, UIC_ARG_MIB(CBCREGRDMSB), &mib_val);
if (ret)
return ret;
*val |= (mib_val << 8);
return 0;
}
static int ufs_versal2_enable_phy(struct ufs_hba *hba)
{
u32 offset, reg;
int ret;
ret = ufshcd_dme_set(hba, UIC_ARG_MIB(VS_MPHYDISABLE), 0);
if (ret)
return ret;
ret = ufshcd_dme_set(hba, UIC_ARG_MIB(VS_MPHYCFGUPDT), 1);
if (ret)
return ret;
/* Check Tx/Rx FSM states */
for (offset = 0; offset < 2; offset++) {
u32 time_left, mibsel;
time_left = TIMEOUT_MICROSEC;
mibsel = UIC_ARG_MIB_SEL(MTX_FSM_STATE, UIC_ARG_MPHY_TX_GEN_SEL_INDEX(offset));
do {
ret = ufshcd_dme_get(hba, mibsel, &reg);
if (ret)
return ret;
if (reg == TX_STATE_HIBERN8 || reg == TX_STATE_SLEEP ||
reg == TX_STATE_LSBURST)
break;
time_left--;
usleep_range(1, 5);
} while (time_left);
if (!time_left) {
dev_err(hba->dev, "Invalid Tx FSM state.\n");
return -ETIMEDOUT;
}
time_left = TIMEOUT_MICROSEC;
mibsel = UIC_ARG_MIB_SEL(MRX_FSM_STATE, UIC_ARG_MPHY_RX_GEN_SEL_INDEX(offset));
do {
ret = ufshcd_dme_get(hba, mibsel, &reg);
if (ret)
return ret;
if (reg == RX_STATE_HIBERN8 || reg == RX_STATE_SLEEP ||
reg == RX_STATE_LSBURST)
break;
time_left--;
usleep_range(1, 5);
} while (time_left);
if (!time_left) {
dev_err(hba->dev, "Invalid Rx FSM state.\n");
return -ETIMEDOUT;
}
}
return 0;
}
static int ufs_versal2_setup_phy(struct ufs_hba *hba)
{
struct ufs_versal2_host *host = ufshcd_get_variant(hba);
int ret;
u32 reg;
/* Bypass RX-AFE offset calibrations (ATT/CTLE) */
ret = ufs_versal2_phy_reg_read(hba, FAST_FLAGS(0), &reg);
if (ret)
return ret;
reg |= MPHY_FAST_RX_AFE_CAL;
ret = ufs_versal2_phy_reg_write(hba, FAST_FLAGS(0), reg);
if (ret)
return ret;
ret = ufs_versal2_phy_reg_read(hba, FAST_FLAGS(1), &reg);
if (ret)
return ret;
reg |= MPHY_FAST_RX_AFE_CAL;
ret = ufs_versal2_phy_reg_write(hba, FAST_FLAGS(1), reg);
if (ret)
return ret;
/* Program ATT and CTLE compensation values */
if (host->attcompval0) {
ret = ufs_versal2_phy_reg_write(hba, RX_AFE_ATT_IDAC(0), host->attcompval0);
if (ret)
return ret;
}
if (host->attcompval1) {
ret = ufs_versal2_phy_reg_write(hba, RX_AFE_ATT_IDAC(1), host->attcompval1);
if (ret)
return ret;
}
if (host->ctlecompval0) {
ret = ufs_versal2_phy_reg_write(hba, RX_AFE_CTLE_IDAC(0), host->ctlecompval0);
if (ret)
return ret;
}
if (host->ctlecompval1) {
ret = ufs_versal2_phy_reg_write(hba, RX_AFE_CTLE_IDAC(1), host->ctlecompval1);
if (ret)
return ret;
}
ret = ufs_versal2_phy_reg_read(hba, FW_CALIB_CCFG(0), &reg);
if (ret)
return ret;
reg |= MPHY_FW_CALIB_CFG_VAL;
ret = ufs_versal2_phy_reg_write(hba, FW_CALIB_CCFG(0), reg);
if (ret)
return ret;
ret = ufs_versal2_phy_reg_read(hba, FW_CALIB_CCFG(1), &reg);
if (ret)
return ret;
reg |= MPHY_FW_CALIB_CFG_VAL;
return ufs_versal2_phy_reg_write(hba, FW_CALIB_CCFG(1), reg);
}
static int ufs_versal2_phy_init(struct ufs_hba *hba)
{
struct ufs_versal2_host *host = ufshcd_get_variant(hba);
u32 time_left;
bool is_ready;
int ret;
static const struct ufshcd_dme_attr_val rmmi_attrs[] = {
{ UIC_ARG_MIB(CBREFCLKCTRL2), CBREFREFCLK_GATE_OVR_EN, DME_LOCAL },
{ UIC_ARG_MIB(CBCRCTRL), 1, DME_LOCAL },
{ UIC_ARG_MIB(CBC10DIRECTCONF2), 1, DME_LOCAL },
{ UIC_ARG_MIB(VS_MPHYCFGUPDT), 1, DME_LOCAL }
};
/* Wait for Tx/Rx config_rdy */
time_left = TIMEOUT_MICROSEC;
do {
time_left--;
ret = zynqmp_pm_is_mphy_tx_rx_config_ready(&is_ready);
if (ret)
return ret;
if (!is_ready)
break;
usleep_range(1, 5);
} while (time_left);
if (!time_left) {
dev_err(hba->dev, "Tx/Rx configuration signal busy.\n");
return -ETIMEDOUT;
}
ret = ufshcd_dwc_dme_set_attrs(hba, rmmi_attrs, ARRAY_SIZE(rmmi_attrs));
if (ret)
return ret;
ret = reset_control_deassert(host->rstphy);
if (ret) {
dev_err(hba->dev, "ufsphy reset deassert failed, err = %d\n", ret);
return ret;
}
/* Wait for SRAM init done */
time_left = TIMEOUT_MICROSEC;
do {
time_left--;
ret = zynqmp_pm_is_sram_init_done(&is_ready);
if (ret)
return ret;
if (is_ready)
break;
usleep_range(1, 5);
} while (time_left);
if (!time_left) {
dev_err(hba->dev, "SRAM initialization failed.\n");
return -ETIMEDOUT;
}
ret = ufs_versal2_setup_phy(hba);
if (ret)
return ret;
return ufs_versal2_enable_phy(hba);
}
static int ufs_versal2_init(struct ufs_hba *hba)
{
struct ufs_versal2_host *host;
struct device *dev = hba->dev;
struct ufs_clk_info *clki;
int ret;
u32 cal;
host = devm_kzalloc(dev, sizeof(*host), GFP_KERNEL);
if (!host)
return -ENOMEM;
host->hba = hba;
ufshcd_set_variant(hba, host);
host->phy_mode = UFSHCD_DWC_PHY_MODE_ROM;
list_for_each_entry(clki, &hba->clk_list_head, list) {
if (!strcmp(clki->name, "core"))
host->host_clk = clk_get_rate(clki->clk);
}
host->rstc = devm_reset_control_get_exclusive(dev, "host");
if (IS_ERR(host->rstc)) {
dev_err(dev, "failed to get reset ctrl: host\n");
return PTR_ERR(host->rstc);
}
host->rstphy = devm_reset_control_get_exclusive(dev, "phy");
if (IS_ERR(host->rstphy)) {
dev_err(dev, "failed to get reset ctrl: phy\n");
return PTR_ERR(host->rstphy);
}
ret = reset_control_assert(host->rstc);
if (ret) {
dev_err(hba->dev, "host reset assert failed, err = %d\n", ret);
return ret;
}
ret = reset_control_assert(host->rstphy);
if (ret) {
dev_err(hba->dev, "phy reset assert failed, err = %d\n", ret);
return ret;
}
ret = zynqmp_pm_set_sram_bypass();
if (ret) {
dev_err(dev, "Bypass SRAM interface failed, err = %d\n", ret);
return ret;
}
ret = reset_control_deassert(host->rstc);
if (ret)
dev_err(hba->dev, "host reset deassert failed, err = %d\n", ret);
ret = zynqmp_pm_get_ufs_calibration_values(&cal);
if (ret) {
dev_err(dev, "failed to read calibration values\n");
return ret;
}
host->attcompval0 = (u8)cal;
host->attcompval1 = (u8)(cal >> 8);
host->ctlecompval0 = (u8)(cal >> 16);
host->ctlecompval1 = (u8)(cal >> 24);
hba->quirks |= UFSHCD_QUIRK_SKIP_DEF_UNIPRO_TIMEOUT_SETTING;
return 0;
}
static int ufs_versal2_hce_enable_notify(struct ufs_hba *hba,
enum ufs_notify_change_status status)
{
int ret = 0;
if (status == PRE_CHANGE) {
ret = ufs_versal2_phy_init(hba);
if (ret)
dev_err(hba->dev, "Phy init failed (%d)\n", ret);
}
return ret;
}
static int ufs_versal2_link_startup_notify(struct ufs_hba *hba,
enum ufs_notify_change_status status)
{
struct ufs_versal2_host *host = ufshcd_get_variant(hba);
int ret = 0;
switch (status) {
case PRE_CHANGE:
if (host->host_clk)
ufshcd_writel(hba, host->host_clk / 1000000, DWC_UFS_REG_HCLKDIV);
break;
case POST_CHANGE:
ret = ufshcd_dwc_link_startup_notify(hba, status);
break;
default:
ret = -EINVAL;
break;
}
return ret;
}
static int ufs_versal2_phy_ratesel(struct ufs_hba *hba, u32 activelanes, u32 rx_req)
{
u32 time_left, reg, lane;
int ret;
for (lane = 0; lane < activelanes; lane++) {
time_left = TIMEOUT_MICROSEC;
ret = ufs_versal2_phy_reg_read(hba, RX_OVRD_IN_1(lane), &reg);
if (ret)
return ret;
reg |= MPHY_RX_OVRD_EN;
if (rx_req)
reg |= MPHY_RX_OVRD_VAL;
else
reg &= ~MPHY_RX_OVRD_VAL;
ret = ufs_versal2_phy_reg_write(hba, RX_OVRD_IN_1(lane), reg);
if (ret)
return ret;
do {
ret = ufs_versal2_phy_reg_read(hba, RX_PCS_OUT(lane), &reg);
if (ret)
return ret;
reg &= MPHY_RX_ACK_MASK;
if (reg == rx_req)
break;
time_left--;
usleep_range(1, 5);
} while (time_left);
if (!time_left) {
dev_err(hba->dev, "Invalid Rx Ack value.\n");
return -ETIMEDOUT;
}
}
return 0;
}
static int ufs_versal2_pwr_change_notify(struct ufs_hba *hba, enum ufs_notify_change_status status,
const struct ufs_pa_layer_attr *dev_max_params,
struct ufs_pa_layer_attr *dev_req_params)
{
struct ufs_versal2_host *host = ufshcd_get_variant(hba);
u32 lane, reg, rate = 0;
int ret = 0;
if (status == PRE_CHANGE) {
memcpy(dev_req_params, dev_max_params, sizeof(struct ufs_pa_layer_attr));
/* If it is not a calibrated part, switch PWRMODE to SLOW_MODE */
if (!host->attcompval0 && !host->attcompval1 && !host->ctlecompval0 &&
!host->ctlecompval1) {
dev_req_params->pwr_rx = SLOW_MODE;
dev_req_params->pwr_tx = SLOW_MODE;
return 0;
}
if (dev_req_params->pwr_rx == SLOW_MODE || dev_req_params->pwr_rx == SLOWAUTO_MODE)
return 0;
if (dev_req_params->hs_rate == PA_HS_MODE_B)
rate = 1;
/* Select the rate */
ret = ufshcd_dme_set(hba, UIC_ARG_MIB(CBRATESEL), rate);
if (ret)
return ret;
ret = ufshcd_dme_set(hba, UIC_ARG_MIB(VS_MPHYCFGUPDT), 1);
if (ret)
return ret;
ret = ufs_versal2_phy_ratesel(hba, dev_req_params->lane_tx, 1);
if (ret)
return ret;
ret = ufs_versal2_phy_ratesel(hba, dev_req_params->lane_tx, 0);
if (ret)
return ret;
/* Remove rx_req override */
for (lane = 0; lane < dev_req_params->lane_tx; lane++) {
ret = ufs_versal2_phy_reg_read(hba, RX_OVRD_IN_1(lane), &reg);
if (ret)
return ret;
reg &= ~MPHY_RX_OVRD_EN;
ret = ufs_versal2_phy_reg_write(hba, RX_OVRD_IN_1(lane), reg);
if (ret)
return ret;
}
if (dev_req_params->lane_tx == UFS_LANE_2 && dev_req_params->lane_rx == UFS_LANE_2)
ret = ufshcd_dme_configure_adapt(hba, dev_req_params->gear_tx,
PA_INITIAL_ADAPT);
}
return ret;
}
static struct ufs_hba_variant_ops ufs_versal2_hba_vops = {
.name = "ufs-versal2-pltfm",
.init = ufs_versal2_init,
.link_startup_notify = ufs_versal2_link_startup_notify,
.hce_enable_notify = ufs_versal2_hce_enable_notify,
.pwr_change_notify = ufs_versal2_pwr_change_notify,
};
static const struct of_device_id ufs_versal2_pltfm_match[] = {
{
.compatible = "amd,versal2-ufs",
.data = &ufs_versal2_hba_vops,
},
{ },
};
MODULE_DEVICE_TABLE(of, ufs_versal2_pltfm_match);
static int ufs_versal2_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
int ret;
/* Perform generic probe */
ret = ufshcd_pltfrm_init(pdev, &ufs_versal2_hba_vops);
if (ret)
dev_err(dev, "ufshcd_pltfrm_init() failed %d\n", ret);
return ret;
}
static void ufs_versal2_remove(struct platform_device *pdev)
{
struct ufs_hba *hba = platform_get_drvdata(pdev);
pm_runtime_get_sync(&(pdev)->dev);
ufshcd_remove(hba);
}
static const struct dev_pm_ops ufs_versal2_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(ufshcd_system_suspend, ufshcd_system_resume)
SET_RUNTIME_PM_OPS(ufshcd_runtime_suspend, ufshcd_runtime_resume, NULL)
};
static struct platform_driver ufs_versal2_pltfm = {
.probe = ufs_versal2_probe,
.remove = ufs_versal2_remove,
.driver = {
.name = "ufshcd-versal2",
.pm = &ufs_versal2_pm_ops,
.of_match_table = of_match_ptr(ufs_versal2_pltfm_match),
},
};
module_platform_driver(ufs_versal2_pltfm);
MODULE_AUTHOR("Sai Krishna Potthuri <sai.krishna.potthuri@amd.com>");
MODULE_DESCRIPTION("AMD Versal Gen 2 UFS Host Controller driver");
MODULE_LICENSE("GPL");

View File

@@ -12,6 +12,52 @@
#include <ufs/ufshcd.h> #include <ufs/ufshcd.h>
/* RMMI Attributes */
#define CBREFCLKCTRL2 0x8132
#define CBCRCTRL 0x811F
#define CBC10DIRECTCONF2 0x810E
#define CBRATESEL 0x8114
#define CBCREGADDRLSB 0x8116
#define CBCREGADDRMSB 0x8117
#define CBCREGWRLSB 0x8118
#define CBCREGWRMSB 0x8119
#define CBCREGRDLSB 0x811A
#define CBCREGRDMSB 0x811B
#define CBCREGRDWRSEL 0x811C
#define CBREFREFCLK_GATE_OVR_EN BIT(7)
/* M-PHY Attributes */
#define MTX_FSM_STATE 0x41
#define MRX_FSM_STATE 0xC1
/* M-PHY registers */
#define RX_OVRD_IN_1(n) (0x3006 + ((n) * 0x100))
#define RX_PCS_OUT(n) (0x300F + ((n) * 0x100))
#define FAST_FLAGS(n) (0x401C + ((n) * 0x100))
#define RX_AFE_ATT_IDAC(n) (0x4000 + ((n) * 0x100))
#define RX_AFE_CTLE_IDAC(n) (0x4001 + ((n) * 0x100))
#define FW_CALIB_CCFG(n) (0x404D + ((n) * 0x100))
/* Tx/Rx FSM state */
enum rx_fsm_state {
RX_STATE_DISABLED = 0,
RX_STATE_HIBERN8 = 1,
RX_STATE_SLEEP = 2,
RX_STATE_STALL = 3,
RX_STATE_LSBURST = 4,
RX_STATE_HSBURST = 5,
};
enum tx_fsm_state {
TX_STATE_DISABLED = 0,
TX_STATE_HIBERN8 = 1,
TX_STATE_SLEEP = 2,
TX_STATE_STALL = 3,
TX_STATE_LSBURST = 4,
TX_STATE_HSBURST = 5,
};
struct ufshcd_dme_attr_val { struct ufshcd_dme_attr_val {
u32 attr_sel; u32 attr_sel;
u32 mib_val; u32 mib_val;

View File

@@ -0,0 +1,38 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Firmware layer for UFS APIs.
*
* Copyright (c) 2025 Advanced Micro Devices, Inc.
*/
#ifndef __FIRMWARE_XLNX_ZYNQMP_UFS_H__
#define __FIRMWARE_XLNX_ZYNQMP_UFS_H__
#if IS_REACHABLE(CONFIG_ZYNQMP_FIRMWARE)
int zynqmp_pm_is_mphy_tx_rx_config_ready(bool *is_ready);
int zynqmp_pm_is_sram_init_done(bool *is_done);
int zynqmp_pm_set_sram_bypass(void);
int zynqmp_pm_get_ufs_calibration_values(u32 *val);
#else
static inline int zynqmp_pm_is_mphy_tx_rx_config_ready(bool *is_ready)
{
return -ENODEV;
}
static inline int zynqmp_pm_is_sram_init_done(bool *is_done)
{
return -ENODEV;
}
static inline int zynqmp_pm_set_sram_bypass(void)
{
return -ENODEV;
}
static inline int zynqmp_pm_get_ufs_calibration_values(u32 *val)
{
return -ENODEV;
}
#endif
#endif /* __FIRMWARE_XLNX_ZYNQMP_UFS_H__ */

View File

@@ -16,6 +16,7 @@
#include <linux/types.h> #include <linux/types.h>
#include <linux/err.h> #include <linux/err.h>
#include <linux/firmware/xlnx-zynqmp-ufs.h>
#define ZYNQMP_PM_VERSION_MAJOR 1 #define ZYNQMP_PM_VERSION_MAJOR 1
#define ZYNQMP_PM_VERSION_MINOR 0 #define ZYNQMP_PM_VERSION_MINOR 0
@@ -241,6 +242,7 @@ enum pm_ioctl_id {
IOCTL_GET_FEATURE_CONFIG = 27, IOCTL_GET_FEATURE_CONFIG = 27,
/* IOCTL for Secure Read/Write Interface */ /* IOCTL for Secure Read/Write Interface */
IOCTL_READ_REG = 28, IOCTL_READ_REG = 28,
IOCTL_MASK_WRITE_REG = 29,
/* Dynamic SD/GEM configuration */ /* Dynamic SD/GEM configuration */
IOCTL_SET_SD_CONFIG = 30, IOCTL_SET_SD_CONFIG = 30,
IOCTL_SET_GEM_CONFIG = 31, IOCTL_SET_GEM_CONFIG = 31,
@@ -619,6 +621,9 @@ int zynqmp_pm_feature(const u32 api_id);
int zynqmp_pm_is_function_supported(const u32 api_id, const u32 id); int zynqmp_pm_is_function_supported(const u32 api_id, const u32 id);
int zynqmp_pm_set_feature_config(enum pm_feature_config_id id, u32 value); int zynqmp_pm_set_feature_config(enum pm_feature_config_id id, u32 value);
int zynqmp_pm_get_feature_config(enum pm_feature_config_id id, u32 *payload); int zynqmp_pm_get_feature_config(enum pm_feature_config_id id, u32 *payload);
int zynqmp_pm_sec_read_reg(u32 node_id, u32 offset, u32 *ret_value);
int zynqmp_pm_sec_mask_write_reg(const u32 node_id, const u32 offset,
u32 mask, u32 value);
int zynqmp_pm_register_sgi(u32 sgi_num, u32 reset); int zynqmp_pm_register_sgi(u32 sgi_num, u32 reset);
int zynqmp_pm_force_pwrdwn(const u32 target, int zynqmp_pm_force_pwrdwn(const u32 target,
const enum zynqmp_pm_request_ack ack); const enum zynqmp_pm_request_ack ack);
@@ -916,6 +921,17 @@ static inline int zynqmp_pm_request_wake(const u32 node,
return -ENODEV; return -ENODEV;
} }
static inline int zynqmp_pm_sec_read_reg(u32 node_id, u32 offset, u32 *ret_value)
{
return -ENODEV;
}
static inline int zynqmp_pm_sec_mask_write_reg(const u32 node_id, const u32 offset,
u32 mask, u32 value)
{
return -ENODEV;
}
static inline int zynqmp_pm_get_rpu_mode(u32 node_id, enum rpu_oper_mode *rpu_mode) static inline int zynqmp_pm_get_rpu_mode(u32 node_id, enum rpu_oper_mode *rpu_mode)
{ {
return -ENODEV; return -ENODEV;

View File

@@ -179,6 +179,7 @@
#define VS_POWERSTATE 0xD083 #define VS_POWERSTATE 0xD083
#define VS_MPHYCFGUPDT 0xD085 #define VS_MPHYCFGUPDT 0xD085
#define VS_DEBUGOMC 0xD09E #define VS_DEBUGOMC 0xD09E
#define VS_MPHYDISABLE 0xD0C1
#define PA_GRANULARITY_MIN_VAL 1 #define PA_GRANULARITY_MIN_VAL 1
#define PA_GRANULARITY_MAX_VAL 6 #define PA_GRANULARITY_MAX_VAL 6