From d3eba21c71708746672587f1de2cc33e6a10d61a Mon Sep 17 00:00:00 2001 From: Can Guo Date: Wed, 25 Mar 2026 08:21:43 -0700 Subject: [PATCH 01/12] scsi: ufs: core: Introduce a new ufshcd vops negotiate_pwr_mode() Most vendor specific implemenations of vops pwr_change_notify(PRE_CHANGE) are fulfilling two things at once: - Vendor specific target power mode negotiation - Vendor specific power mode change preparation When TX Equalization is added into consideration, before power mode change to a target power mode, TX Equalization Training (EQTR) needs be done for that target power mode. In addition, UFSHCI spec requires to start TX EQTR from HS-G1 (the most reliable High Speed Gear). Adding TX EQTR before pwr_change_notify(PRE_CHANGE) is not applicable because we don't know the negotiated power mode yet. Adding TX EQTR post pwr_change_notify(PRE_CHANGE) is inappropriate because pwr_change_notify(PRE_CHANGE) has finished preparation for a power mode change to negotiated power mode, yet we are changing power mode to HS-G1 for TX EQTR. Add a new vops negotiate_pwr_mode() so that vendor specific power mode negotiation can be fulfilled in its vendor specific implementations. Later on, TX EQTR can be added post vops negotiate_pwr_mode() and before vops pwr_change_notify(PRE_CHANGE). Reviewed-by: Bean Huo Reviewed-by: Bart Van Assche Signed-off-by: Can Guo Reviewed-by: Peter Wang Link: https://patch.msgid.link/20260325152154.1604082-2-can.guo@oss.qualcomm.com Signed-off-by: Martin K. Petersen --- drivers/ufs/core/ufshcd-priv.h | 14 +++++- drivers/ufs/core/ufshcd.c | 70 ++++++++++++++++++++++++------ drivers/ufs/host/ufs-amd-versal2.c | 3 -- drivers/ufs/host/ufs-exynos.c | 34 +++++++-------- drivers/ufs/host/ufs-hisi.c | 23 +++++----- drivers/ufs/host/ufs-mediatek.c | 40 ++++++++--------- drivers/ufs/host/ufs-qcom.c | 24 +++++----- drivers/ufs/host/ufs-sprd.c | 3 -- drivers/ufs/host/ufshcd-pci.c | 6 +-- include/ufs/ufshcd.h | 17 +++++--- 10 files changed, 143 insertions(+), 91 deletions(-) diff --git a/drivers/ufs/core/ufshcd-priv.h b/drivers/ufs/core/ufshcd-priv.h index 37c32071e754..f1cec1cd01d2 100644 --- a/drivers/ufs/core/ufshcd-priv.h +++ b/drivers/ufs/core/ufshcd-priv.h @@ -167,14 +167,24 @@ static inline int ufshcd_vops_link_startup_notify(struct ufs_hba *hba, return 0; } +static inline int ufshcd_vops_negotiate_pwr_mode(struct ufs_hba *hba, + const struct ufs_pa_layer_attr *dev_max_params, + struct ufs_pa_layer_attr *dev_req_params) +{ + if (hba->vops && hba->vops->negotiate_pwr_mode) + return hba->vops->negotiate_pwr_mode(hba, dev_max_params, + dev_req_params); + + return -ENOTSUPP; +} + static inline int ufshcd_vops_pwr_change_notify(struct ufs_hba *hba, enum ufs_notify_change_status status, - const struct ufs_pa_layer_attr *dev_max_params, struct ufs_pa_layer_attr *dev_req_params) { if (hba->vops && hba->vops->pwr_change_notify) return hba->vops->pwr_change_notify(hba, status, - dev_max_params, dev_req_params); + dev_req_params); return -ENOTSUPP; } diff --git a/drivers/ufs/core/ufshcd.c b/drivers/ufs/core/ufshcd.c index 847b55789bb8..33bbdd940b06 100644 --- a/drivers/ufs/core/ufshcd.c +++ b/drivers/ufs/core/ufshcd.c @@ -336,8 +336,6 @@ static void ufshcd_suspend_clkscaling(struct ufs_hba *hba); static int ufshcd_scale_clks(struct ufs_hba *hba, unsigned long freq, bool scale_up); static irqreturn_t ufshcd_intr(int irq, void *__hba); -static int ufshcd_change_power_mode(struct ufs_hba *hba, - struct ufs_pa_layer_attr *pwr_mode); static int ufshcd_setup_hba_vreg(struct ufs_hba *hba, bool on); static int ufshcd_setup_vreg(struct ufs_hba *hba, bool on); static inline int ufshcd_config_vreg_hpm(struct ufs_hba *hba, @@ -4663,8 +4661,26 @@ static int ufshcd_get_max_pwr_mode(struct ufs_hba *hba) return 0; } -static int ufshcd_change_power_mode(struct ufs_hba *hba, - struct ufs_pa_layer_attr *pwr_mode) +/** + * ufshcd_dme_change_power_mode() - UniPro DME Power Mode change sequence + * @hba: per-adapter instance + * @pwr_mode: pointer to the target power mode (gear/lane) attributes + * + * This function handles the low-level DME (Device Management Entity) + * configuration required to transition the UFS link to a new power mode. It + * performs the following steps: + * 1. Checks if the requested mode matches the current state. + * 2. Sets M-PHY and UniPro attributes including Gear (PA_RXGEAR/TXGEAR), + * Lanes, Termination, and HS Series (PA_HSSERIES). + * 3. Configures default UniPro timeout values (DL_FC0, etc.) unless + * explicitly skipped via quirks. + * 4. Triggers the actual hardware mode change via ufshcd_uic_change_pwr_mode(). + * 5. Updates the HBA's cached power information on success. + * + * Return: 0 on success, non-zero error code on failure. + */ +static int ufshcd_dme_change_power_mode(struct ufs_hba *hba, + struct ufs_pa_layer_attr *pwr_mode) { int ret; @@ -4748,6 +4764,34 @@ static int ufshcd_change_power_mode(struct ufs_hba *hba, return ret; } +/** + * ufshcd_change_power_mode() - Change UFS Link Power Mode + * @hba: per-adapter instance + * @pwr_mode: pointer to the target power mode (gear/lane) attributes + * + * This function handles the high-level sequence for changing the UFS link + * power mode. It triggers vendor-specific pre-change notification, + * executes the DME (Device Management Entity) power mode change sequence, + * and, upon success, triggers vendor-specific post-change notification. + * + * Return: 0 on success, non-zero error code on failure. + */ +int ufshcd_change_power_mode(struct ufs_hba *hba, + struct ufs_pa_layer_attr *pwr_mode) +{ + int ret; + + ufshcd_vops_pwr_change_notify(hba, PRE_CHANGE, pwr_mode); + + ret = ufshcd_dme_change_power_mode(hba, pwr_mode); + + if (!ret) + ufshcd_vops_pwr_change_notify(hba, POST_CHANGE, pwr_mode); + + return ret; +} +EXPORT_SYMBOL_GPL(ufshcd_change_power_mode); + /** * ufshcd_config_pwr_mode - configure a new power mode * @hba: per-adapter instance @@ -4761,19 +4805,17 @@ int ufshcd_config_pwr_mode(struct ufs_hba *hba, struct ufs_pa_layer_attr final_params = { 0 }; int ret; - ret = ufshcd_vops_pwr_change_notify(hba, PRE_CHANGE, - desired_pwr_mode, &final_params); + ret = ufshcd_vops_negotiate_pwr_mode(hba, desired_pwr_mode, + &final_params); + if (ret) { + if (ret != -ENOTSUPP) + dev_err(hba->dev, "Failed to negotiate power mode: %d, use desired as is\n", + ret); - if (ret) memcpy(&final_params, desired_pwr_mode, sizeof(final_params)); + } - ret = ufshcd_change_power_mode(hba, &final_params); - - if (!ret) - ufshcd_vops_pwr_change_notify(hba, POST_CHANGE, NULL, - &final_params); - - return ret; + return ufshcd_change_power_mode(hba, &final_params); } EXPORT_SYMBOL_GPL(ufshcd_config_pwr_mode); diff --git a/drivers/ufs/host/ufs-amd-versal2.c b/drivers/ufs/host/ufs-amd-versal2.c index 6c454ae8a9c8..2154d6286817 100644 --- a/drivers/ufs/host/ufs-amd-versal2.c +++ b/drivers/ufs/host/ufs-amd-versal2.c @@ -443,7 +443,6 @@ static int ufs_versal2_phy_ratesel(struct ufs_hba *hba, u32 activelanes, u32 rx_ } static int ufs_versal2_pwr_change_notify(struct ufs_hba *hba, enum ufs_notify_change_status status, - const struct ufs_pa_layer_attr *dev_max_params, struct ufs_pa_layer_attr *dev_req_params) { struct ufs_versal2_host *host = ufshcd_get_variant(hba); @@ -451,8 +450,6 @@ static int ufs_versal2_pwr_change_notify(struct ufs_hba *hba, enum ufs_notify_ch int ret = 0; if (status == PRE_CHANGE) { - memcpy(dev_req_params, dev_max_params, sizeof(struct ufs_pa_layer_attr)); - /* If it is not a calibrated part, switch PWRMODE to SLOW_MODE */ if (!host->attcompval0 && !host->attcompval1 && !host->ctlecompval0 && !host->ctlecompval1) { diff --git a/drivers/ufs/host/ufs-exynos.c b/drivers/ufs/host/ufs-exynos.c index 76fee3a79c77..77a6c8e44485 100644 --- a/drivers/ufs/host/ufs-exynos.c +++ b/drivers/ufs/host/ufs-exynos.c @@ -818,12 +818,10 @@ static u32 exynos_ufs_get_hs_gear(struct ufs_hba *hba) } static int exynos_ufs_pre_pwr_mode(struct ufs_hba *hba, - const struct ufs_pa_layer_attr *dev_max_params, struct ufs_pa_layer_attr *dev_req_params) { struct exynos_ufs *ufs = ufshcd_get_variant(hba); struct phy *generic_phy = ufs->phy; - struct ufs_host_params host_params; int ret; if (!dev_req_params) { @@ -832,18 +830,6 @@ static int exynos_ufs_pre_pwr_mode(struct ufs_hba *hba, goto out; } - ufshcd_init_host_params(&host_params); - - /* This driver only support symmetric gear setting e.g. hs_tx_gear == hs_rx_gear */ - host_params.hs_tx_gear = exynos_ufs_get_hs_gear(hba); - host_params.hs_rx_gear = exynos_ufs_get_hs_gear(hba); - - ret = ufshcd_negotiate_pwr_params(&host_params, dev_max_params, dev_req_params); - if (ret) { - pr_err("%s: failed to determine capabilities\n", __func__); - goto out; - } - if (ufs->drv_data->pre_pwr_change) ufs->drv_data->pre_pwr_change(ufs, dev_req_params); @@ -1677,17 +1663,30 @@ static int exynos_ufs_link_startup_notify(struct ufs_hba *hba, return ret; } +static int exynos_ufs_negotiate_pwr_mode(struct ufs_hba *hba, + const struct ufs_pa_layer_attr *dev_max_params, + struct ufs_pa_layer_attr *dev_req_params) +{ + struct ufs_host_params host_params; + + ufshcd_init_host_params(&host_params); + + /* This driver only support symmetric gear setting e.g. hs_tx_gear == hs_rx_gear */ + host_params.hs_tx_gear = exynos_ufs_get_hs_gear(hba); + host_params.hs_rx_gear = exynos_ufs_get_hs_gear(hba); + + return ufshcd_negotiate_pwr_params(&host_params, dev_max_params, dev_req_params); +} + static int exynos_ufs_pwr_change_notify(struct ufs_hba *hba, enum ufs_notify_change_status status, - const struct ufs_pa_layer_attr *dev_max_params, struct ufs_pa_layer_attr *dev_req_params) { int ret = 0; switch (status) { case PRE_CHANGE: - ret = exynos_ufs_pre_pwr_mode(hba, dev_max_params, - dev_req_params); + ret = exynos_ufs_pre_pwr_mode(hba, dev_req_params); break; case POST_CHANGE: ret = exynos_ufs_post_pwr_mode(hba, dev_req_params); @@ -2015,6 +2014,7 @@ static const struct ufs_hba_variant_ops ufs_hba_exynos_ops = { .exit = exynos_ufs_exit, .hce_enable_notify = exynos_ufs_hce_enable_notify, .link_startup_notify = exynos_ufs_link_startup_notify, + .negotiate_pwr_mode = exynos_ufs_negotiate_pwr_mode, .pwr_change_notify = exynos_ufs_pwr_change_notify, .setup_clocks = exynos_ufs_setup_clocks, .setup_xfer_req = exynos_ufs_specify_nexus_t_xfer_req, diff --git a/drivers/ufs/host/ufs-hisi.c b/drivers/ufs/host/ufs-hisi.c index 6f2e6bf31225..993e20ac211d 100644 --- a/drivers/ufs/host/ufs-hisi.c +++ b/drivers/ufs/host/ufs-hisi.c @@ -298,6 +298,17 @@ static void ufs_hisi_set_dev_cap(struct ufs_host_params *host_params) ufshcd_init_host_params(host_params); } +static int ufs_hisi_negotiate_pwr_mode(struct ufs_hba *hba, + const struct ufs_pa_layer_attr *dev_max_params, + struct ufs_pa_layer_attr *dev_req_params) +{ + struct ufs_host_params host_params; + + ufs_hisi_set_dev_cap(&host_params); + + return ufshcd_negotiate_pwr_params(&host_params, dev_max_params, dev_req_params); +} + static void ufs_hisi_pwr_change_pre_change(struct ufs_hba *hba) { struct ufs_hisi_host *host = ufshcd_get_variant(hba); @@ -362,10 +373,8 @@ static void ufs_hisi_pwr_change_pre_change(struct ufs_hba *hba) static int ufs_hisi_pwr_change_notify(struct ufs_hba *hba, enum ufs_notify_change_status status, - const struct ufs_pa_layer_attr *dev_max_params, struct ufs_pa_layer_attr *dev_req_params) { - struct ufs_host_params host_params; int ret = 0; if (!dev_req_params) { @@ -377,14 +386,6 @@ static int ufs_hisi_pwr_change_notify(struct ufs_hba *hba, switch (status) { case PRE_CHANGE: - ufs_hisi_set_dev_cap(&host_params); - ret = ufshcd_negotiate_pwr_params(&host_params, dev_max_params, dev_req_params); - if (ret) { - dev_err(hba->dev, - "%s: failed to determine capabilities\n", __func__); - goto out; - } - ufs_hisi_pwr_change_pre_change(hba); break; case POST_CHANGE: @@ -543,6 +544,7 @@ static const struct ufs_hba_variant_ops ufs_hba_hi3660_vops = { .name = "hi3660", .init = ufs_hi3660_init, .link_startup_notify = ufs_hisi_link_startup_notify, + .negotiate_pwr_mode = ufs_hisi_negotiate_pwr_mode, .pwr_change_notify = ufs_hisi_pwr_change_notify, .suspend = ufs_hisi_suspend, .resume = ufs_hisi_resume, @@ -552,6 +554,7 @@ static const struct ufs_hba_variant_ops ufs_hba_hi3670_vops = { .name = "hi3670", .init = ufs_hi3670_init, .link_startup_notify = ufs_hisi_link_startup_notify, + .negotiate_pwr_mode = ufs_hisi_negotiate_pwr_mode, .pwr_change_notify = ufs_hisi_pwr_change_notify, .suspend = ufs_hisi_suspend, .resume = ufs_hisi_resume, diff --git a/drivers/ufs/host/ufs-mediatek.c b/drivers/ufs/host/ufs-mediatek.c index b3daaa07e925..2ad7ea855798 100644 --- a/drivers/ufs/host/ufs-mediatek.c +++ b/drivers/ufs/host/ufs-mediatek.c @@ -1317,6 +1317,23 @@ static int ufs_mtk_init(struct ufs_hba *hba) return err; } +static int ufs_mtk_negotiate_pwr_mode(struct ufs_hba *hba, + const struct ufs_pa_layer_attr *dev_max_params, + struct ufs_pa_layer_attr *dev_req_params) +{ + struct ufs_host_params host_params; + + ufshcd_init_host_params(&host_params); + host_params.hs_rx_gear = UFS_HS_G5; + host_params.hs_tx_gear = UFS_HS_G5; + + if (dev_max_params->pwr_rx == SLOW_MODE || + dev_max_params->pwr_tx == SLOW_MODE) + host_params.desired_working_mode = UFS_PWM_MODE; + + return ufshcd_negotiate_pwr_params(&host_params, dev_max_params, dev_req_params); +} + static bool ufs_mtk_pmc_via_fastauto(struct ufs_hba *hba, struct ufs_pa_layer_attr *dev_req_params) { @@ -1372,26 +1389,10 @@ static void ufs_mtk_adjust_sync_length(struct ufs_hba *hba) } static int ufs_mtk_pre_pwr_change(struct ufs_hba *hba, - const struct ufs_pa_layer_attr *dev_max_params, struct ufs_pa_layer_attr *dev_req_params) { struct ufs_mtk_host *host = ufshcd_get_variant(hba); - struct ufs_host_params host_params; - int ret; - - ufshcd_init_host_params(&host_params); - host_params.hs_rx_gear = UFS_HS_G5; - host_params.hs_tx_gear = UFS_HS_G5; - - if (dev_max_params->pwr_rx == SLOW_MODE || - dev_max_params->pwr_tx == SLOW_MODE) - host_params.desired_working_mode = UFS_PWM_MODE; - - ret = ufshcd_negotiate_pwr_params(&host_params, dev_max_params, dev_req_params); - if (ret) { - pr_info("%s: failed to determine capabilities\n", - __func__); - } + int ret = 0; if (ufs_mtk_pmc_via_fastauto(hba, dev_req_params)) { ufs_mtk_adjust_sync_length(hba); @@ -1503,7 +1504,6 @@ static int ufs_mtk_auto_hibern8_disable(struct ufs_hba *hba) static int ufs_mtk_pwr_change_notify(struct ufs_hba *hba, enum ufs_notify_change_status stage, - const struct ufs_pa_layer_attr *dev_max_params, struct ufs_pa_layer_attr *dev_req_params) { int ret = 0; @@ -1515,8 +1515,7 @@ static int ufs_mtk_pwr_change_notify(struct ufs_hba *hba, reg = ufshcd_readl(hba, REG_AUTO_HIBERNATE_IDLE_TIMER); ufs_mtk_auto_hibern8_disable(hba); } - ret = ufs_mtk_pre_pwr_change(hba, dev_max_params, - dev_req_params); + ret = ufs_mtk_pre_pwr_change(hba, dev_req_params); break; case POST_CHANGE: if (ufshcd_is_auto_hibern8_supported(hba)) @@ -2318,6 +2317,7 @@ static const struct ufs_hba_variant_ops ufs_hba_mtk_vops = { .setup_clocks = ufs_mtk_setup_clocks, .hce_enable_notify = ufs_mtk_hce_enable_notify, .link_startup_notify = ufs_mtk_link_startup_notify, + .negotiate_pwr_mode = ufs_mtk_negotiate_pwr_mode, .pwr_change_notify = ufs_mtk_pwr_change_notify, .apply_dev_quirks = ufs_mtk_apply_dev_quirks, .fixup_dev_quirks = ufs_mtk_fixup_dev_quirks, diff --git a/drivers/ufs/host/ufs-qcom.c b/drivers/ufs/host/ufs-qcom.c index 375fd24ba458..cdc769886e82 100644 --- a/drivers/ufs/host/ufs-qcom.c +++ b/drivers/ufs/host/ufs-qcom.c @@ -966,13 +966,21 @@ static void ufs_qcom_set_tx_hs_equalizer(struct ufs_hba *hba, u32 gear, u32 tx_l } } -static int ufs_qcom_pwr_change_notify(struct ufs_hba *hba, - enum ufs_notify_change_status status, - const struct ufs_pa_layer_attr *dev_max_params, - struct ufs_pa_layer_attr *dev_req_params) +static int ufs_qcom_negotiate_pwr_mode(struct ufs_hba *hba, + const struct ufs_pa_layer_attr *dev_max_params, + struct ufs_pa_layer_attr *dev_req_params) { struct ufs_qcom_host *host = ufshcd_get_variant(hba); struct ufs_host_params *host_params = &host->host_params; + + return ufshcd_negotiate_pwr_params(host_params, dev_max_params, dev_req_params); +} + +static int ufs_qcom_pwr_change_notify(struct ufs_hba *hba, + enum ufs_notify_change_status status, + struct ufs_pa_layer_attr *dev_req_params) +{ + struct ufs_qcom_host *host = ufshcd_get_variant(hba); int ret = 0; if (!dev_req_params) { @@ -982,13 +990,6 @@ static int ufs_qcom_pwr_change_notify(struct ufs_hba *hba, switch (status) { case PRE_CHANGE: - ret = ufshcd_negotiate_pwr_params(host_params, dev_max_params, dev_req_params); - if (ret) { - dev_err(hba->dev, "%s: failed to determine capabilities\n", - __func__); - return ret; - } - /* * During UFS driver probe, always update the PHY gear to match the negotiated * gear, so that, if quirk UFSHCD_QUIRK_REINIT_AFTER_MAX_GEAR_SWITCH is enabled, @@ -2341,6 +2342,7 @@ static const struct ufs_hba_variant_ops ufs_hba_qcom_vops = { .setup_clocks = ufs_qcom_setup_clocks, .hce_enable_notify = ufs_qcom_hce_enable_notify, .link_startup_notify = ufs_qcom_link_startup_notify, + .negotiate_pwr_mode = ufs_qcom_negotiate_pwr_mode, .pwr_change_notify = ufs_qcom_pwr_change_notify, .apply_dev_quirks = ufs_qcom_apply_dev_quirks, .fixup_dev_quirks = ufs_qcom_fixup_dev_quirks, diff --git a/drivers/ufs/host/ufs-sprd.c b/drivers/ufs/host/ufs-sprd.c index 65bd8fb96b99..a5e8c591bead 100644 --- a/drivers/ufs/host/ufs-sprd.c +++ b/drivers/ufs/host/ufs-sprd.c @@ -161,14 +161,11 @@ static int ufs_sprd_common_init(struct ufs_hba *hba) static int sprd_ufs_pwr_change_notify(struct ufs_hba *hba, enum ufs_notify_change_status status, - const struct ufs_pa_layer_attr *dev_max_params, struct ufs_pa_layer_attr *dev_req_params) { struct ufs_sprd_host *host = ufshcd_get_variant(hba); if (status == PRE_CHANGE) { - memcpy(dev_req_params, dev_max_params, - sizeof(struct ufs_pa_layer_attr)); if (host->unipro_ver >= UFS_UNIPRO_VER_1_8) ufshcd_dme_configure_adapt(hba, dev_req_params->gear_tx, PA_INITIAL_ADAPT); diff --git a/drivers/ufs/host/ufshcd-pci.c b/drivers/ufs/host/ufshcd-pci.c index 5f65dfad1a71..8a4f2381a32e 100644 --- a/drivers/ufs/host/ufshcd-pci.c +++ b/drivers/ufs/host/ufshcd-pci.c @@ -145,7 +145,7 @@ static int ufs_intel_set_lanes(struct ufs_hba *hba, u32 lanes) pwr_info.lane_rx = lanes; pwr_info.lane_tx = lanes; - ret = ufshcd_config_pwr_mode(hba, &pwr_info); + ret = ufshcd_change_power_mode(hba, &pwr_info); if (ret) dev_err(hba->dev, "%s: Setting %u lanes, err = %d\n", __func__, lanes, ret); @@ -154,17 +154,15 @@ static int ufs_intel_set_lanes(struct ufs_hba *hba, u32 lanes) static int ufs_intel_lkf_pwr_change_notify(struct ufs_hba *hba, enum ufs_notify_change_status status, - const struct ufs_pa_layer_attr *dev_max_params, struct ufs_pa_layer_attr *dev_req_params) { int err = 0; switch (status) { case PRE_CHANGE: - if (ufshcd_is_hs_mode(dev_max_params) && + if (ufshcd_is_hs_mode(dev_req_params) && (hba->pwr_info.lane_rx != 2 || hba->pwr_info.lane_tx != 2)) ufs_intel_set_lanes(hba, 2); - memcpy(dev_req_params, dev_max_params, sizeof(*dev_req_params)); break; case POST_CHANGE: if (ufshcd_is_hs_mode(dev_req_params)) { diff --git a/include/ufs/ufshcd.h b/include/ufs/ufshcd.h index 8563b6648976..51c2555bea73 100644 --- a/include/ufs/ufshcd.h +++ b/include/ufs/ufshcd.h @@ -302,11 +302,10 @@ struct ufs_pwr_mode_info { * variant specific Uni-Pro initialization. * @link_startup_notify: called before and after Link startup is carried out * to allow variant specific Uni-Pro initialization. + * @negotiate_pwr_mode: called to negotiate power mode. * @pwr_change_notify: called before and after a power mode change * is carried out to allow vendor spesific capabilities - * to be set. PRE_CHANGE can modify final_params based - * on desired_pwr_mode, but POST_CHANGE must not alter - * the final_params parameter + * to be set. * @setup_xfer_req: called before any transfer request is issued * to set some things * @setup_task_mgmt: called before any task management request is issued @@ -347,10 +346,12 @@ struct ufs_hba_variant_ops { enum ufs_notify_change_status); int (*link_startup_notify)(struct ufs_hba *, enum ufs_notify_change_status); - int (*pwr_change_notify)(struct ufs_hba *, - enum ufs_notify_change_status status, - const struct ufs_pa_layer_attr *desired_pwr_mode, - struct ufs_pa_layer_attr *final_params); + int (*negotiate_pwr_mode)(struct ufs_hba *hba, + const struct ufs_pa_layer_attr *desired_pwr_mode, + struct ufs_pa_layer_attr *final_params); + int (*pwr_change_notify)(struct ufs_hba *hba, + enum ufs_notify_change_status status, + struct ufs_pa_layer_attr *final_params); void (*setup_xfer_req)(struct ufs_hba *hba, int tag, bool is_scsi_cmd); void (*setup_task_mgmt)(struct ufs_hba *, int, u8); @@ -1361,6 +1362,8 @@ extern int ufshcd_dme_set_attr(struct ufs_hba *hba, u32 attr_sel, u8 attr_set, u32 mib_val, u8 peer); extern int ufshcd_dme_get_attr(struct ufs_hba *hba, u32 attr_sel, u32 *mib_val, u8 peer); +extern int ufshcd_change_power_mode(struct ufs_hba *hba, + struct ufs_pa_layer_attr *pwr_mode); extern int ufshcd_config_pwr_mode(struct ufs_hba *hba, struct ufs_pa_layer_attr *desired_pwr_mode); extern int ufshcd_uic_change_pwr_mode(struct ufs_hba *hba, u8 mode); From c91c83671642d6140b703e999e2aff2d7ad57c74 Mon Sep 17 00:00:00 2001 From: Can Guo Date: Wed, 25 Mar 2026 08:21:44 -0700 Subject: [PATCH 02/12] scsi: ufs: core: Pass force_pmc to ufshcd_config_pwr_mode() as a parameter Currently, callers must manually toggle hba->force_pmc before and after calling ufshcd_config_pwr_mode() to force a Power Mode change. Introduce enum ufshcd_pmc_policy and refactor ufshcd_config_pwr_mode() to accept pmc_policy as a parameter to force a Power Mode change. Reviewed-by: Bart Van Assche Reviewed-by: Bean Huo Signed-off-by: Can Guo Reviewed-by: Peter Wang Link: https://patch.msgid.link/20260325152154.1604082-3-can.guo@oss.qualcomm.com Signed-off-by: Martin K. Petersen --- drivers/ufs/core/ufshcd.c | 35 ++++++++++++++++++++++------------- drivers/ufs/host/ufshcd-pci.c | 3 ++- include/ufs/ufshcd.h | 19 +++++++++++++++---- 3 files changed, 39 insertions(+), 18 deletions(-) diff --git a/drivers/ufs/core/ufshcd.c b/drivers/ufs/core/ufshcd.c index 33bbdd940b06..44faab8b1770 100644 --- a/drivers/ufs/core/ufshcd.c +++ b/drivers/ufs/core/ufshcd.c @@ -1408,7 +1408,8 @@ static int ufshcd_scale_gear(struct ufs_hba *hba, u32 target_gear, bool scale_up config_pwr_mode: /* check if the power mode needs to be changed or not? */ - ret = ufshcd_config_pwr_mode(hba, &new_pwr_info); + ret = ufshcd_config_pwr_mode(hba, &new_pwr_info, + UFSHCD_PMC_POLICY_DONT_FORCE); if (ret) dev_err(hba->dev, "%s: failed err %d, old gear: (tx %d rx %d), new gear: (tx %d rx %d)", __func__, ret, @@ -4249,7 +4250,8 @@ int ufshcd_dme_get_attr(struct ufs_hba *hba, u32 attr_sel, pwr_mode_change = true; } if (pwr_mode_change) { - ret = ufshcd_change_power_mode(hba, &temp_pwr_info); + ret = ufshcd_change_power_mode(hba, &temp_pwr_info, + UFSHCD_PMC_POLICY_DONT_FORCE); if (ret) goto out; } @@ -4273,7 +4275,8 @@ int ufshcd_dme_get_attr(struct ufs_hba *hba, u32 attr_sel, if (peer && (hba->quirks & UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE) && pwr_mode_change) - ufshcd_change_power_mode(hba, &orig_pwr_info); + ufshcd_change_power_mode(hba, &orig_pwr_info, + UFSHCD_PMC_POLICY_DONT_FORCE); out: return ret; } @@ -4665,6 +4668,7 @@ static int ufshcd_get_max_pwr_mode(struct ufs_hba *hba) * ufshcd_dme_change_power_mode() - UniPro DME Power Mode change sequence * @hba: per-adapter instance * @pwr_mode: pointer to the target power mode (gear/lane) attributes + * @pmc_policy: Power Mode change policy * * This function handles the low-level DME (Device Management Entity) * configuration required to transition the UFS link to a new power mode. It @@ -4680,12 +4684,13 @@ static int ufshcd_get_max_pwr_mode(struct ufs_hba *hba) * Return: 0 on success, non-zero error code on failure. */ static int ufshcd_dme_change_power_mode(struct ufs_hba *hba, - struct ufs_pa_layer_attr *pwr_mode) + struct ufs_pa_layer_attr *pwr_mode, + enum ufshcd_pmc_policy pmc_policy) { int ret; /* if already configured to the requested pwr_mode */ - if (!hba->force_pmc && + if (pmc_policy == UFSHCD_PMC_POLICY_DONT_FORCE && pwr_mode->gear_rx == hba->pwr_info.gear_rx && pwr_mode->gear_tx == hba->pwr_info.gear_tx && pwr_mode->lane_rx == hba->pwr_info.lane_rx && @@ -4768,6 +4773,7 @@ static int ufshcd_dme_change_power_mode(struct ufs_hba *hba, * ufshcd_change_power_mode() - Change UFS Link Power Mode * @hba: per-adapter instance * @pwr_mode: pointer to the target power mode (gear/lane) attributes + * @pmc_policy: Power Mode change policy * * This function handles the high-level sequence for changing the UFS link * power mode. It triggers vendor-specific pre-change notification, @@ -4777,13 +4783,14 @@ static int ufshcd_dme_change_power_mode(struct ufs_hba *hba, * Return: 0 on success, non-zero error code on failure. */ int ufshcd_change_power_mode(struct ufs_hba *hba, - struct ufs_pa_layer_attr *pwr_mode) + struct ufs_pa_layer_attr *pwr_mode, + enum ufshcd_pmc_policy pmc_policy) { int ret; ufshcd_vops_pwr_change_notify(hba, PRE_CHANGE, pwr_mode); - ret = ufshcd_dme_change_power_mode(hba, pwr_mode); + ret = ufshcd_dme_change_power_mode(hba, pwr_mode, pmc_policy); if (!ret) ufshcd_vops_pwr_change_notify(hba, POST_CHANGE, pwr_mode); @@ -4796,11 +4803,13 @@ EXPORT_SYMBOL_GPL(ufshcd_change_power_mode); * ufshcd_config_pwr_mode - configure a new power mode * @hba: per-adapter instance * @desired_pwr_mode: desired power configuration + * @pmc_policy: Power Mode change policy * * Return: 0 upon success; < 0 upon failure. */ int ufshcd_config_pwr_mode(struct ufs_hba *hba, - struct ufs_pa_layer_attr *desired_pwr_mode) + struct ufs_pa_layer_attr *desired_pwr_mode, + enum ufshcd_pmc_policy pmc_policy) { struct ufs_pa_layer_attr final_params = { 0 }; int ret; @@ -4815,7 +4824,7 @@ int ufshcd_config_pwr_mode(struct ufs_hba *hba, memcpy(&final_params, desired_pwr_mode, sizeof(final_params)); } - return ufshcd_change_power_mode(hba, &final_params); + return ufshcd_change_power_mode(hba, &final_params, pmc_policy); } EXPORT_SYMBOL_GPL(ufshcd_config_pwr_mode); @@ -6872,14 +6881,13 @@ static void ufshcd_err_handler(struct work_struct *work) * are sent via bsg and/or sysfs. */ down_write(&hba->clk_scaling_lock); - hba->force_pmc = true; - pmc_err = ufshcd_config_pwr_mode(hba, &(hba->pwr_info)); + pmc_err = ufshcd_config_pwr_mode(hba, &hba->pwr_info, + UFSHCD_PMC_POLICY_FORCE); if (pmc_err) { needs_reset = true; dev_err(hba->dev, "%s: Failed to restore power mode, err = %d\n", __func__, pmc_err); } - hba->force_pmc = false; ufshcd_print_pwr_info(hba); up_write(&hba->clk_scaling_lock); spin_lock_irqsave(hba->host->host_lock, flags); @@ -9154,7 +9162,8 @@ static int ufshcd_post_device_init(struct ufs_hba *hba) if (hba->dev_ref_clk_freq != REF_CLK_FREQ_INVAL) ufshcd_set_dev_ref_clk(hba); /* Gear up to HS gear. */ - ret = ufshcd_config_pwr_mode(hba, &hba->max_pwr_info.info); + ret = ufshcd_config_pwr_mode(hba, &hba->max_pwr_info.info, + UFSHCD_PMC_POLICY_DONT_FORCE); if (ret) { dev_err(hba->dev, "%s: Failed setting power mode, err = %d\n", __func__, ret); diff --git a/drivers/ufs/host/ufshcd-pci.c b/drivers/ufs/host/ufshcd-pci.c index 8a4f2381a32e..38d458711c99 100644 --- a/drivers/ufs/host/ufshcd-pci.c +++ b/drivers/ufs/host/ufshcd-pci.c @@ -145,7 +145,8 @@ static int ufs_intel_set_lanes(struct ufs_hba *hba, u32 lanes) pwr_info.lane_rx = lanes; pwr_info.lane_tx = lanes; - ret = ufshcd_change_power_mode(hba, &pwr_info); + ret = ufshcd_change_power_mode(hba, &pwr_info, + UFSHCD_PMC_POLICY_DONT_FORCE); if (ret) dev_err(hba->dev, "%s: Setting %u lanes, err = %d\n", __func__, lanes, ret); diff --git a/include/ufs/ufshcd.h b/include/ufs/ufshcd.h index 51c2555bea73..16facaee3e77 100644 --- a/include/ufs/ufshcd.h +++ b/include/ufs/ufshcd.h @@ -529,6 +529,17 @@ enum ufshcd_state { UFSHCD_STATE_ERROR, }; +/** + * enum ufshcd_pmc_policy - Power Mode change policy + * @UFSHCD_PMC_POLICY_DONT_FORCE: Do not force a Power Mode change. + * @UFSHCD_PMC_POLICY_FORCE: Force a Power Mode change even if current Power + * Mode is same as target Power Mode. + */ +enum ufshcd_pmc_policy { + UFSHCD_PMC_POLICY_DONT_FORCE, + UFSHCD_PMC_POLICY_FORCE, +}; + enum ufshcd_quirks { /* Interrupt aggregation support is broken */ UFSHCD_QUIRK_BROKEN_INTR_AGGR = 1 << 0, @@ -882,7 +893,6 @@ enum ufshcd_mcq_opr { * @saved_uic_err: sticky UIC error mask * @ufs_stats: various error counters * @force_reset: flag to force eh_work perform a full reset - * @force_pmc: flag to force a power mode change * @silence_err_logs: flag to silence error logs * @dev_cmd: ufs device management command information * @last_dme_cmd_tstamp: time stamp of the last completed DME command @@ -1036,7 +1046,6 @@ struct ufs_hba { u32 saved_uic_err; struct ufs_stats ufs_stats; bool force_reset; - bool force_pmc; bool silence_err_logs; /* Device management request data */ @@ -1363,9 +1372,11 @@ extern int ufshcd_dme_set_attr(struct ufs_hba *hba, u32 attr_sel, extern int ufshcd_dme_get_attr(struct ufs_hba *hba, u32 attr_sel, u32 *mib_val, u8 peer); extern int ufshcd_change_power_mode(struct ufs_hba *hba, - struct ufs_pa_layer_attr *pwr_mode); + struct ufs_pa_layer_attr *pwr_mode, + enum ufshcd_pmc_policy pmc_policy); extern int ufshcd_config_pwr_mode(struct ufs_hba *hba, - struct ufs_pa_layer_attr *desired_pwr_mode); + struct ufs_pa_layer_attr *desired_pwr_mode, + enum ufshcd_pmc_policy pmc_policy); extern int ufshcd_uic_change_pwr_mode(struct ufs_hba *hba, u8 mode); /* UIC command interfaces for DME primitives */ From 6669ab18c2238299a685ada1acea1c7d4f1da1d5 Mon Sep 17 00:00:00 2001 From: Can Guo Date: Wed, 25 Mar 2026 08:21:45 -0700 Subject: [PATCH 03/12] scsi: ufs: core: Add UFS_HS_G6 and UFS_HS_GEAR_MAX to enum ufs_hs_gear_tag Add UFS_HS_G6 to enum ufs_hs_gear_tag. In addition, add UFS_HS_GEAR_MAX to enum ufs_hs_gear_tag to facilitate iteration over valid High Speed Gears. Reviewed-by: Bart Van Assche Reviewed-by: Bean Huo Signed-off-by: Can Guo Reviewed-by: Peter Wang Link: https://patch.msgid.link/20260325152154.1604082-4-can.guo@oss.qualcomm.com Signed-off-by: Martin K. Petersen --- include/ufs/unipro.h | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/include/ufs/unipro.h b/include/ufs/unipro.h index 59de737490ca..71a5f643400c 100644 --- a/include/ufs/unipro.h +++ b/include/ufs/unipro.h @@ -233,7 +233,9 @@ enum ufs_hs_gear_tag { UFS_HS_G2, /* HS Gear 2 */ UFS_HS_G3, /* HS Gear 3 */ UFS_HS_G4, /* HS Gear 4 */ - UFS_HS_G5 /* HS Gear 5 */ + UFS_HS_G5, /* HS Gear 5 */ + UFS_HS_G6, /* HS Gear 6 */ + UFS_HS_GEAR_MAX = UFS_HS_G6, }; enum ufs_lanes { From 03e5d38e2f985d8d0b0a60508c0b422f664808e3 Mon Sep 17 00:00:00 2001 From: Can Guo Date: Wed, 25 Mar 2026 08:21:46 -0700 Subject: [PATCH 04/12] scsi: ufs: core: Add support for TX Equalization MIPI Unipro3.0 introduced PA_TxEQGnSetting and PA_PreCodeEn attributes for TX Equalization and Pre-Coding. It is Host Software's responsibility to configure these attributes for both host and device before initiating Power Mode Change to High-Speed Gears. MIPI Unipro3.0 also introduced TX Equalization Training (EQTR) to identify optimal TX Equalization settings for use by both Host's and Device's UniPro. TX EQTR shall be initiated from the most reliable High-Speed Gear (HS-G1) targeting High-Speed Gears (HS-G4 to HS-G6). Implement TX Equalization configuration and TX EQTR procedure as defined in UFSHCI v5.0 specification. The TX EQTR procedure determines the optimal TX Equalization settings by iterating through all possible PreShoot and DeEmphasis combinations and selecting the best combinations for both Host and Device based on Figure of Merit (FOM) evaluation. Reviewed-by: Bean Huo Reviewed-by: Bart Van Assche Signed-off-by: Can Guo Reviewed-by: Peter Wang Link: https://patch.msgid.link/20260325152154.1604082-5-can.guo@oss.qualcomm.com Signed-off-by: Martin K. Petersen --- drivers/ufs/core/Makefile | 2 +- drivers/ufs/core/ufs-txeq.c | 1186 ++++++++++++++++++++++++++++++++ drivers/ufs/core/ufshcd-priv.h | 38 + drivers/ufs/core/ufshcd.c | 50 +- include/ufs/ufshcd.h | 135 ++++ include/ufs/unipro.h | 114 ++- 6 files changed, 1516 insertions(+), 9 deletions(-) create mode 100644 drivers/ufs/core/ufs-txeq.c diff --git a/drivers/ufs/core/Makefile b/drivers/ufs/core/Makefile index 51e1867e524e..ce7d16d2cf35 100644 --- a/drivers/ufs/core/Makefile +++ b/drivers/ufs/core/Makefile @@ -1,7 +1,7 @@ # SPDX-License-Identifier: GPL-2.0 obj-$(CONFIG_SCSI_UFSHCD) += ufshcd-core.o -ufshcd-core-y += ufshcd.o ufs-sysfs.o ufs-mcq.o +ufshcd-core-y += ufshcd.o ufs-sysfs.o ufs-mcq.o ufs-txeq.o ufshcd-core-$(CONFIG_RPMB) += ufs-rpmb.o ufshcd-core-$(CONFIG_DEBUG_FS) += ufs-debugfs.o ufshcd-core-$(CONFIG_SCSI_UFS_BSG) += ufs_bsg.o diff --git a/drivers/ufs/core/ufs-txeq.c b/drivers/ufs/core/ufs-txeq.c new file mode 100644 index 000000000000..04f9f1ffa43e --- /dev/null +++ b/drivers/ufs/core/ufs-txeq.c @@ -0,0 +1,1186 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2026 Qualcomm Technologies, Inc. + * + * Author: + * Can Guo + */ + +#include +#include +#include +#include +#include +#include +#include "ufshcd-priv.h" + +static bool use_adaptive_txeq; +module_param(use_adaptive_txeq, bool, 0644); +MODULE_PARM_DESC(use_adaptive_txeq, "Find and apply optimal TX Equalization settings before changing Power Mode (default: false)"); + +static int txeq_gear_set(const char *val, const struct kernel_param *kp) +{ + return param_set_uint_minmax(val, kp, UFS_HS_G1, UFS_HS_GEAR_MAX); +} + +static const struct kernel_param_ops txeq_gear_ops = { + .set = txeq_gear_set, + .get = param_get_uint, +}; + +static unsigned int adaptive_txeq_gear = UFS_HS_G6; +module_param_cb(adaptive_txeq_gear, &txeq_gear_ops, &adaptive_txeq_gear, 0644); +MODULE_PARM_DESC(adaptive_txeq_gear, "For HS-Gear[n] and above, adaptive txeq shall be used"); + +static bool use_txeq_presets; +module_param(use_txeq_presets, bool, 0644); +MODULE_PARM_DESC(use_txeq_presets, "Use only the 8 TX Equalization Presets (pre-defined Pre-Shoot & De-Emphasis combinations) for TX EQTR (default: false)"); + +static bool txeq_presets_selected[UFS_TX_EQ_PRESET_MAX] = {[0 ... (UFS_TX_EQ_PRESET_MAX - 1)] = 1}; +module_param_array(txeq_presets_selected, bool, NULL, 0644); +MODULE_PARM_DESC(txeq_presets_selected, "Use only the selected Presets out of the 8 TX Equalization Presets for TX EQTR"); + +/* + * ufs_tx_eq_preset - Table of minimum required list of presets. + * + * A HS-G6 capable M-TX shall support the presets defined in M-PHY v6.0 spec. + * Preset Pre-Shoot(dB) De-Emphasis(dB) + * P0 0.0 0.0 + * P1 0.0 0.8 + * P2 0.0 1.6 + * P3 0.8 0.0 + * P4 1.6 0.0 + * P5 0.8 0.8 + * P6 0.8 1.6 + * P7 1.6 0.8 + */ +static const struct __ufs_tx_eq_preset { + u8 preshoot; + u8 deemphasis; +} ufs_tx_eq_preset[UFS_TX_EQ_PRESET_MAX] = { + [UFS_TX_EQ_PRESET_P0] = {UFS_TX_HS_PRESHOOT_DB_0P0, UFS_TX_HS_DEEMPHASIS_DB_0P0}, + [UFS_TX_EQ_PRESET_P1] = {UFS_TX_HS_PRESHOOT_DB_0P0, UFS_TX_HS_DEEMPHASIS_DB_0P8}, + [UFS_TX_EQ_PRESET_P2] = {UFS_TX_HS_PRESHOOT_DB_0P0, UFS_TX_HS_DEEMPHASIS_DB_1P6}, + [UFS_TX_EQ_PRESET_P3] = {UFS_TX_HS_PRESHOOT_DB_0P8, UFS_TX_HS_DEEMPHASIS_DB_0P0}, + [UFS_TX_EQ_PRESET_P4] = {UFS_TX_HS_PRESHOOT_DB_1P6, UFS_TX_HS_DEEMPHASIS_DB_0P0}, + [UFS_TX_EQ_PRESET_P5] = {UFS_TX_HS_PRESHOOT_DB_0P8, UFS_TX_HS_DEEMPHASIS_DB_0P8}, + [UFS_TX_EQ_PRESET_P6] = {UFS_TX_HS_PRESHOOT_DB_0P8, UFS_TX_HS_DEEMPHASIS_DB_1P6}, + [UFS_TX_EQ_PRESET_P7] = {UFS_TX_HS_PRESHOOT_DB_1P6, UFS_TX_HS_DEEMPHASIS_DB_0P8}, +}; + +/* + * pa_peer_rx_adapt_initial - Table of UniPro PA_PeerRxHSGnAdaptInitial + * attribute IDs for High Speed (HS) Gears. + * + * This table maps HS Gears to their respective UniPro PA_PeerRxHSGnAdaptInitial + * attribute IDs. Entries for Gears 1-3 are 0 (unsupported). + */ +static const u32 pa_peer_rx_adapt_initial[UFS_HS_GEAR_MAX] = { + 0, + 0, + 0, + PA_PEERRXHSG4ADAPTINITIAL, + PA_PEERRXHSG5ADAPTINITIAL, + PA_PEERRXHSG6ADAPTINITIALL0L3 +}; + +/* + * rx_adapt_initial_cap - Table of M-PHY RX_HS_Gn_ADAPT_INITIAL_Capability + * attribute IDs for High Speed (HS) Gears. + * + * This table maps HS Gears to their respective M-PHY + * RX_HS_Gn_ADAPT_INITIAL_Capability attribute IDs. Entries for Gears 1-3 are 0 + * (unsupported). + */ +static const u32 rx_adapt_initial_cap[UFS_HS_GEAR_MAX] = { + 0, + 0, + 0, + RX_HS_G4_ADAPT_INITIAL_CAP, + RX_HS_G5_ADAPT_INITIAL_CAP, + RX_HS_G6_ADAPT_INITIAL_CAP +}; + +/* + * pa_tx_eq_setting - Table of UniPro PA_TxEQGnSetting attribute IDs for High + * Speed (HS) Gears. + * + * This table maps HS Gears to their respective UniPro PA_TxEQGnSetting + * attribute IDs. + */ +static const u32 pa_tx_eq_setting[UFS_HS_GEAR_MAX] = { + PA_TXEQG1SETTING, + PA_TXEQG2SETTING, + PA_TXEQG3SETTING, + PA_TXEQG4SETTING, + PA_TXEQG5SETTING, + PA_TXEQG6SETTING +}; + +/** + * ufshcd_configure_precoding - Configure Pre-Coding for all active lanes + * @hba: per adapter instance + * @params: TX EQ parameters data structure + * + * Bit[7] in RX_FOM indicates that the receiver needs to enable Pre-Coding when + * set. Pre-Coding must be enabled on both the transmitter and receiver to + * ensure proper operation. + * + * Returns 0 on success, non-zero error code otherwise + */ +static int ufshcd_configure_precoding(struct ufs_hba *hba, + struct ufshcd_tx_eq_params *params) +{ + struct ufs_pa_layer_attr *pwr_info = &hba->max_pwr_info.info; + u32 local_precode_en = 0; + u32 peer_precode_en = 0; + int lane, ret; + + /* Enable Pre-Coding for Host's TX & Device's RX pair */ + for (lane = 0; lane < pwr_info->lane_tx; lane++) { + if (params->host[lane].precode_en) { + local_precode_en |= PRECODEEN_TX_BIT(lane); + peer_precode_en |= PRECODEEN_RX_BIT(lane); + } + } + + /* Enable Pre-Coding for Device's TX & Host's RX pair */ + for (lane = 0; lane < pwr_info->lane_rx; lane++) { + if (params->device[lane].precode_en) { + peer_precode_en |= PRECODEEN_TX_BIT(lane); + local_precode_en |= PRECODEEN_RX_BIT(lane); + } + } + + if (!local_precode_en && !peer_precode_en) { + dev_dbg(hba->dev, "Pre-Coding is not required for Host and Device\n"); + return 0; + } + + /* Set local PA_PreCodeEn */ + ret = ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PRECODEEN), local_precode_en); + if (ret) { + dev_err(hba->dev, "Failed to set local PA_PreCodeEn: %d\n", ret); + return ret; + } + + /* Set peer PA_PreCodeEn */ + ret = ufshcd_dme_peer_set(hba, UIC_ARG_MIB(PA_PRECODEEN), peer_precode_en); + if (ret) { + dev_err(hba->dev, "Failed to set peer PA_PreCodeEn: %d\n", ret); + return ret; + } + + dev_dbg(hba->dev, "Local PA_PreCodeEn: 0x%02x, Peer PA_PreCodeEn: 0x%02x\n", + local_precode_en, peer_precode_en); + + return 0; +} + +void ufshcd_print_tx_eq_params(struct ufs_hba *hba) +{ + struct ufs_pa_layer_attr *pwr_info = &hba->max_pwr_info.info; + struct ufshcd_tx_eq_params *params; + u32 gear = hba->pwr_info.gear_tx; + int lane; + + if (!ufshcd_is_tx_eq_supported(hba)) + return; + + if (gear < UFS_HS_G1 || gear > UFS_HS_GEAR_MAX) + return; + + params = &hba->tx_eq_params[gear - 1]; + if (!params->is_valid || !params->is_applied) + return; + + for (lane = 0; lane < pwr_info->lane_tx; lane++) + dev_dbg(hba->dev, "Host TX Lane %d: PreShoot %u, DeEmphasis %u, FOM %u, PreCodeEn %d\n", + lane, params->host[lane].preshoot, + params->host[lane].deemphasis, + params->host[lane].fom_val, + params->host[lane].precode_en); + + for (lane = 0; lane < pwr_info->lane_rx; lane++) + dev_dbg(hba->dev, "Device TX Lane %d: PreShoot %u, DeEmphasis %u, FOM %u, PreCodeEn %d\n", + lane, params->device[lane].preshoot, + params->device[lane].deemphasis, + params->device[lane].fom_val, + params->device[lane].precode_en); +} + +static inline u32 +ufshcd_compose_tx_eq_setting(struct ufshcd_tx_eq_settings *settings, + int num_lanes) +{ + u32 setting = 0; + int lane; + + for (lane = 0; lane < num_lanes; lane++, settings++) { + setting |= TX_HS_PRESHOOT_BITS(lane, settings->preshoot); + setting |= TX_HS_DEEMPHASIS_BITS(lane, settings->deemphasis); + } + + return setting; +} + +/** + * ufshcd_apply_tx_eq_settings - Apply TX Equalization settings for target gear + * @hba: per adapter instance + * @params: TX EQ parameters data structure + * @gear: target gear + * + * Returns 0 on success, negative error code otherwise + */ +static int ufshcd_apply_tx_eq_settings(struct ufs_hba *hba, + struct ufshcd_tx_eq_params *params, + u32 gear) +{ + struct ufs_pa_layer_attr *pwr_info = &hba->max_pwr_info.info; + u32 setting; + int ret; + + /* Compose settings for Host's TX Lanes */ + setting = ufshcd_compose_tx_eq_setting(params->host, pwr_info->lane_tx); + ret = ufshcd_dme_set(hba, UIC_ARG_MIB(pa_tx_eq_setting[gear - 1]), setting); + if (ret) + return ret; + + /* Compose settings for Device's TX Lanes */ + setting = ufshcd_compose_tx_eq_setting(params->device, pwr_info->lane_rx); + ret = ufshcd_dme_peer_set(hba, UIC_ARG_MIB(pa_tx_eq_setting[gear - 1]), setting); + if (ret) + return ret; + + /* Configure Pre-Coding */ + if (gear >= UFS_HS_G6) { + ret = ufshcd_configure_precoding(hba, params); + if (ret) { + dev_err(hba->dev, "Failed to configure pre-coding: %d\n", ret); + return ret; + } + } + + return 0; +} + +/** + * ufshcd_evaluate_tx_eqtr_fom - Evaluate TX EQTR FOM results + * @hba: per adapter instance + * @pwr_mode: target power mode containing gear and rate information + * @eqtr_data: TX EQTR data structure + * @h_iter: host TX EQTR iterator data structure + * @d_iter: device TX EQTR iterator data structure + * + * Evaluate TX EQTR FOM results, update host and device TX EQTR data accordingy + * if FOM have been improved compared to previous iteration, and record TX EQTR + * FOM results. + */ +static void ufshcd_evaluate_tx_eqtr_fom(struct ufs_hba *hba, + struct ufs_pa_layer_attr *pwr_mode, + struct ufshcd_tx_eqtr_data *eqtr_data, + struct tx_eqtr_iter *h_iter, + struct tx_eqtr_iter *d_iter) +{ + u8 preshoot, deemphasis, fom_value; + bool precode_en; + int lane; + + for (lane = 0; h_iter->is_updated && lane < pwr_mode->lane_tx; lane++) { + preshoot = h_iter->preshoot; + deemphasis = h_iter->deemphasis; + fom_value = h_iter->fom[lane] & RX_FOM_VALUE_MASK; + precode_en = h_iter->fom[lane] & RX_FOM_PRECODING_EN_BIT; + + /* Record host TX EQTR FOM */ + eqtr_data->host_fom[lane][preshoot][deemphasis] = h_iter->fom[lane]; + + /* Check if FOM has been improved for host's TX Lanes */ + if (fom_value > eqtr_data->host[lane].fom_val) { + eqtr_data->host[lane].preshoot = preshoot; + eqtr_data->host[lane].deemphasis = deemphasis; + eqtr_data->host[lane].fom_val = fom_value; + eqtr_data->host[lane].precode_en = precode_en; + } + + dev_dbg(hba->dev, "TX EQTR: Host TX Lane %d: PreShoot %u, DeEmphasis %u, FOM value %u, PreCodeEn %d\n", + lane, preshoot, deemphasis, fom_value, precode_en); + } + + for (lane = 0; d_iter->is_updated && lane < pwr_mode->lane_rx; lane++) { + preshoot = d_iter->preshoot; + deemphasis = d_iter->deemphasis; + fom_value = d_iter->fom[lane] & RX_FOM_VALUE_MASK; + precode_en = d_iter->fom[lane] & RX_FOM_PRECODING_EN_BIT; + + /* Record device TX EQTR FOM */ + eqtr_data->device_fom[lane][preshoot][deemphasis] = d_iter->fom[lane]; + + /* Check if FOM has been improved for Device's TX Lanes */ + if (fom_value > eqtr_data->device[lane].fom_val) { + eqtr_data->device[lane].preshoot = preshoot; + eqtr_data->device[lane].deemphasis = deemphasis; + eqtr_data->device[lane].fom_val = fom_value; + eqtr_data->device[lane].precode_en = precode_en; + } + + dev_dbg(hba->dev, "TX EQTR: Device TX Lane %d: PreShoot %u, DeEmphasis %u, FOM value %u, PreCodeEn %d\n", + lane, preshoot, deemphasis, fom_value, precode_en); + } +} + +/** + * ufshcd_get_rx_fom - Get Figure of Merit (FOM) for both sides + * @hba: per adapter instance + * @pwr_mode: target power mode containing gear and rate information + * @h_iter: host TX EQTR iterator data structure + * @d_iter: device TX EQTR iterator data structure + * + * Returns 0 on success, negative error code otherwise + */ +static int ufshcd_get_rx_fom(struct ufs_hba *hba, + struct ufs_pa_layer_attr *pwr_mode, + struct tx_eqtr_iter *h_iter, + struct tx_eqtr_iter *d_iter) +{ + int lane, ret; + u32 fom; + + /* Get FOM of host's TX lanes from device's RX_FOM. */ + for (lane = 0; lane < pwr_mode->lane_tx; lane++) { + ret = ufshcd_dme_peer_get(hba, UIC_ARG_MIB_SEL(RX_FOM, + UIC_ARG_MPHY_RX_GEN_SEL_INDEX(lane)), + &fom); + if (ret) + return ret; + + h_iter->fom[lane] = (u8)fom; + } + + /* Get FOM of device's TX lanes from host's RX_FOM. */ + for (lane = 0; lane < pwr_mode->lane_rx; lane++) { + ret = ufshcd_dme_get(hba, UIC_ARG_MIB_SEL(RX_FOM, + UIC_ARG_MPHY_RX_GEN_SEL_INDEX(lane)), + &fom); + if (ret) + return ret; + + d_iter->fom[lane] = (u8)fom; + } + + ret = ufshcd_vops_get_rx_fom(hba, pwr_mode, h_iter, d_iter); + if (ret) + dev_err(hba->dev, "Failed to get FOM via vops: %d\n", ret); + + return ret; +} + +static bool ufshcd_is_txeq_preset_selected(u8 preshoot, u8 deemphasis) +{ + int i; + + for (i = 0; i < UFS_TX_EQ_PRESET_MAX; i++) { + if (!txeq_presets_selected[i]) + continue; + + if (preshoot == ufs_tx_eq_preset[i].preshoot && + deemphasis == ufs_tx_eq_preset[i].deemphasis) + return true; + } + + return false; +} + +/** + * tx_eqtr_iter_try_update - Try to update a TX EQTR iterator + * @iter: TX EQTR iterator data structure + * @preshoot: PreShoot value + * @deemphasis: DeEmphasis value + * + * This function validates whether the provided PreShoot and DeEmphasis + * combination can be used or not. If yes, it updates the TX EQTR iterator with + * the provided PreShoot and DeEmphasis, it also sets the is_updated flag + * to indicate the iterator has been updated. + */ +static void tx_eqtr_iter_try_update(struct tx_eqtr_iter *iter, + u8 preshoot, u8 deemphasis) +{ + if (!test_bit(preshoot, &iter->preshoot_bitmap) || + !test_bit(deemphasis, &iter->deemphasis_bitmap) || + (use_txeq_presets && !ufshcd_is_txeq_preset_selected(preshoot, deemphasis))) { + iter->is_updated = false; + return; + } + + iter->preshoot = preshoot; + iter->deemphasis = deemphasis; + iter->is_updated = true; +} + +/** + * tx_eqtr_iter_update() - Update host and deviceTX EQTR iterators + * @preshoot: PreShoot value + * @deemphasis: DeEmphasis value + * @h_iter: Host TX EQTR iterator data structure + * @d_iter: Device TX EQTR iterator data structure + * + * Updates host and device TX Equalization training iterators with the + * provided PreShoot and DeEmphasis. + * + * Return: true if host and/or device TX Equalization training iterator has + * been updated to the provided PreShoot and DeEmphasis, false otherwise. + */ +static bool tx_eqtr_iter_update(u8 preshoot, u8 deemphasis, + struct tx_eqtr_iter *h_iter, + struct tx_eqtr_iter *d_iter) +{ + tx_eqtr_iter_try_update(h_iter, preshoot, deemphasis); + tx_eqtr_iter_try_update(d_iter, preshoot, deemphasis); + + return h_iter->is_updated || d_iter->is_updated; +} + +/** + * ufshcd_tx_eqtr_iter_init - Initialize host and device TX EQTR iterators + * @hba: per adapter instance + * @h_iter: host TX EQTR iterator data structure + * @d_iter: device TX EQTR iterator data structure + * + * This function initializes the TX EQTR iterator structures for both host and + * device by reading their TX equalization capabilities. The capabilities are + * cached in the hba structure to avoid redundant DME operations in subsequent + * calls. In the TX EQTR procedure, the iterator structures are updated by + * tx_eqtr_iter_update() to systematically iterate through supported TX + * Equalization setting combinations. + * + * Returns 0 on success, negative error code otherwise + */ +static int ufshcd_tx_eqtr_iter_init(struct ufs_hba *hba, + struct tx_eqtr_iter *h_iter, + struct tx_eqtr_iter *d_iter) +{ + u32 cap; + int ret; + + if (!hba->host_preshoot_cap) { + ret = ufshcd_dme_get(hba, UIC_ARG_MIB(TX_HS_PRESHOOT_SETTING_CAP), &cap); + if (ret) + return ret; + + hba->host_preshoot_cap = cap & TX_EQTR_CAP_MASK; + } + + if (!hba->host_deemphasis_cap) { + ret = ufshcd_dme_get(hba, UIC_ARG_MIB(TX_HS_DEEMPHASIS_SETTING_CAP), &cap); + if (ret) + return ret; + + hba->host_deemphasis_cap = cap & TX_EQTR_CAP_MASK; + } + + if (!hba->device_preshoot_cap) { + ret = ufshcd_dme_peer_get(hba, UIC_ARG_MIB(TX_HS_PRESHOOT_SETTING_CAP), &cap); + if (ret) + return ret; + + hba->device_preshoot_cap = cap & TX_EQTR_CAP_MASK; + } + + if (!hba->device_deemphasis_cap) { + ret = ufshcd_dme_peer_get(hba, UIC_ARG_MIB(TX_HS_DEEMPHASIS_SETTING_CAP), &cap); + if (ret) + return ret; + + hba->device_deemphasis_cap = cap & TX_EQTR_CAP_MASK; + } + + /* + * Support PreShoot & DeEmphasis of value 0 is mandatory, hence they are + * not reflected in PreShoot/DeEmphasis capabilities. Left shift the + * capability bitmap by 1 and set bit[0] to reflect value 0 is + * supported, such that test_bit() can be used later for convenience. + */ + h_iter->preshoot_bitmap = (hba->host_preshoot_cap << 0x1) | 0x1; + h_iter->deemphasis_bitmap = (hba->host_deemphasis_cap << 0x1) | 0x1; + d_iter->preshoot_bitmap = (hba->device_preshoot_cap << 0x1) | 0x1; + d_iter->deemphasis_bitmap = (hba->device_deemphasis_cap << 0x1) | 0x1; + + return 0; +} + +/** + * adapt_cap_to_t_adapt - Calculate TAdapt from adapt capability + * @adapt_cap: Adapt capability + * + * For NRZ: + * IF (ADAPT_range = FINE) + * TADAPT = 650 x (ADAPT_length + 1) + * ELSE (IF ADAPT_range = COARSE) + * TADAPT = 650 x 2^ADAPT_length + * + * Returns calculated TAdapt value in term of Unit Intervals (UI) + */ +static inline u64 adapt_cap_to_t_adapt(u32 adapt_cap) +{ + u64 tadapt; + u8 adapt_length = adapt_cap & ADAPT_LENGTH_MASK; + + if (!IS_ADAPT_RANGE_COARSE(adapt_cap)) + tadapt = TADAPT_FACTOR * (adapt_length + 1); + else + tadapt = TADAPT_FACTOR * (1 << adapt_length); + + return tadapt; +} + +/** + * adapt_cap_to_t_adapt_l0l3 - Calculate TAdapt_L0_L3 from adapt capability + * @adapt_cap: Adapt capability + * + * For PAM-4: + * IF (ADAPT_range = FINE) + * TADAPT_L0_L3 = 2^9 x ADAPT_length + * ELSE IF (ADAPT_range = COARSE) + * TADAPT_L0_L3 = 2^9 x (2^ADAPT_length) + * + * Returns calculated TAdapt value in term of Unit Intervals (UI) + */ +static inline u64 adapt_cap_to_t_adapt_l0l3(u32 adapt_cap) +{ + u64 tadapt; + u8 adapt_length = adapt_cap & ADAPT_LENGTH_MASK; + + if (!IS_ADAPT_RANGE_COARSE(adapt_cap)) + tadapt = TADAPT_L0L3_FACTOR * adapt_length; + else + tadapt = TADAPT_L0L3_FACTOR * (1 << adapt_length); + + return tadapt; +} + +/** + * adapt_cap_to_t_adapt_l0l1l2l3 - Calculate TAdapt_L0_L1_L2_L3 from adapt capability + * @adapt_cap: Adapt capability + * + * For PAM-4: + * IF (ADAPT_range_L0_L1_L2_L3 = FINE) + * TADAPT_L0_L1_L2_L3 = 2^15 x (ADAPT_length_L0_L1_L2_L3 + 1) + * ELSE IF (ADAPT_range_L0_L1_L2_L3 = COARSE) + * TADAPT_L0_L1_L2_L3 = 2^15 x 2^ADAPT_length_L0_L1_L2_L3 + * + * Returns calculated TAdapt value in term of Unit Intervals (UI) + */ +static inline u64 adapt_cap_to_t_adapt_l0l1l2l3(u32 adapt_cap) +{ + u64 tadapt; + u8 adapt_length = adapt_cap & ADAPT_LENGTH_MASK; + + if (!IS_ADAPT_RANGE_COARSE(adapt_cap)) + tadapt = TADAPT_L0L1L2L3_FACTOR * (adapt_length + 1); + else + tadapt = TADAPT_L0L1L2L3_FACTOR * (1 << adapt_length); + + return tadapt; +} + +/** + * ufshcd_setup_tx_eqtr_adapt_length - Setup TX adapt length for EQTR + * @hba: per adapter instance + * @params: TX EQ parameters data structure + * @gear: target gear for EQTR + * + * This function determines and configures the proper TX adapt length (TAdapt) + * for the TX EQTR procedure based on the target gear and RX adapt capabilities + * of both host and device. + * + * Guidelines from MIPI UniPro v3.0 spec - select the minimum Adapt Length for + * the Equalization Training procedure based on the following conditions: + * + * If the target High-Speed Gear n is HS-G4 or HS-G5: + * PA_TxAdaptLength_EQTR[7:0] >= Max (10us, RX_HS_Gn_ADAPT_INITIAL_Capability, + * PA_PeerRxHsGnAdaptInitial) + * PA_TxAdaptLength_EQTR[7:0] shall be shorter than PACP_REQUEST_TIMER (10ms) + * PA_TxAdaptLength_EQTR[15:8] is not relevant for HS-G4 and HS-G5. This field + * is set to 255 (reserved value). + * + * If the target High-Speed Gear n is HS-G6: + * PA_TxAdapthLength_EQTR >= 10us + * PA_TxAdapthLength_EQTR[7:0] >= Max (RX_HS_G6_ADAPT_INITIAL_Capability, + * PA_PeerRxHsG6AdaptInitialL0L3) + * PA_TxAdapthLength_EQTR[15:8] >= Max (RX_HS_G6_ADAPT_INITIAL_L0_L1_L2_L3_Capability, + * PA_PeerRxHsG6AdaptInitialL0L1L2L3) + * PA_TxAdaptLength_EQTR shall be shorter than PACP_REQUEST_TIMER value of 10ms. + * + * Since adapt capabilities encode both range (fine/coarse) and length values, + * direct comparison is not possible. This function converts adapt capabilities + * to actual time durations in Unit Intervals (UI) using the Adapt time + * calculation formular in M-PHY v6.0 spec (Table 8), then selects the maximum + * to ensure both host and device use adequate TX adapt length. + * + * Returns 0 on success, negative error code otherwise + */ +static int ufshcd_setup_tx_eqtr_adapt_length(struct ufs_hba *hba, + struct ufshcd_tx_eq_params *params, + u32 gear) +{ + u32 adapt_eqtr; + int ret; + + if (gear == UFS_HS_G4 || gear == UFS_HS_G5) { + u64 t_adapt, t_adapt_local, t_adapt_peer; + u32 adapt_cap_local, adapt_cap_peer, adapt_length; + + ret = ufshcd_dme_get(hba, UIC_ARG_MIB_SEL(rx_adapt_initial_cap[gear - 1], + UIC_ARG_MPHY_RX_GEN_SEL_INDEX(0)), + &adapt_cap_local); + if (ret) + return ret; + + if (adapt_cap_local > ADAPT_LENGTH_MAX) { + dev_err(hba->dev, "local RX_HS_G%u_ADAPT_INITIAL_CAP (0x%x) exceeds MAX\n", + gear, adapt_cap_local); + return -EINVAL; + } + + ret = ufshcd_dme_get(hba, UIC_ARG_MIB(pa_peer_rx_adapt_initial[gear - 1]), + &adapt_cap_peer); + if (ret) + return ret; + + if (adapt_cap_peer > ADAPT_LENGTH_MAX) { + dev_err(hba->dev, "local RX_HS_G%u_ADAPT_INITIAL_CAP (0x%x) exceeds MAX\n", + gear, adapt_cap_peer); + return -EINVAL; + } + + t_adapt_local = adapt_cap_to_t_adapt(adapt_cap_local); + t_adapt_peer = adapt_cap_to_t_adapt(adapt_cap_peer); + t_adapt = max(t_adapt_local, t_adapt_peer); + + dev_dbg(hba->dev, "local RX_HS_G%u_ADAPT_INITIAL_CAP = 0x%x\n", + gear, adapt_cap_local); + dev_dbg(hba->dev, "peer RX_HS_G%u_ADAPT_INITIAL_CAP = 0x%x\n", + gear, adapt_cap_peer); + dev_dbg(hba->dev, "t_adapt_local = %llu UI, t_adapt_peer = %llu UI\n", + t_adapt_local, t_adapt_peer); + dev_dbg(hba->dev, "TAdapt %llu UI selected for TX EQTR\n", + t_adapt); + + adapt_length = (t_adapt_local >= t_adapt_peer) ? + adapt_cap_local : adapt_cap_peer; + + if (gear == UFS_HS_G4 && t_adapt < TX_EQTR_HS_G4_MIN_T_ADAPT) { + dev_dbg(hba->dev, "TAdapt %llu UI is too short for TX EQTR for HS-G%u, use default Adapt 0x%x\n", + t_adapt, gear, TX_EQTR_HS_G4_ADAPT_DEFAULT); + adapt_length = TX_EQTR_HS_G4_ADAPT_DEFAULT; + } else if (gear == UFS_HS_G5 && t_adapt < TX_EQTR_HS_G5_MIN_T_ADAPT) { + dev_dbg(hba->dev, "TAdapt %llu UI is too short for TX EQTR for HS-G%u, use default Adapt 0x%x\n", + t_adapt, gear, TX_EQTR_HS_G5_ADAPT_DEFAULT); + adapt_length = TX_EQTR_HS_G5_ADAPT_DEFAULT; + } + + adapt_eqtr = adapt_length | + (TX_EQTR_ADAPT_RESERVED << TX_EQTR_ADAPT_LENGTH_L0L1L2L3_SHIFT); + } else if (gear == UFS_HS_G6) { + u64 t_adapt, t_adapt_l0l3, t_adapt_l0l3_local, t_adapt_l0l3_peer; + u64 t_adapt_l0l1l2l3, t_adapt_l0l1l2l3_local, t_adapt_l0l1l2l3_peer; + u32 adapt_l0l3_cap_local, adapt_l0l3_cap_peer, adapt_length_l0l3; + u32 adapt_l0l1l2l3_cap_local, adapt_l0l1l2l3_cap_peer, adapt_length_l0l1l2l3; + + ret = ufshcd_dme_get(hba, UIC_ARG_MIB_SEL(rx_adapt_initial_cap[gear - 1], + UIC_ARG_MPHY_RX_GEN_SEL_INDEX(0)), + &adapt_l0l3_cap_local); + if (ret) + return ret; + + if (adapt_l0l3_cap_local > ADAPT_L0L3_LENGTH_MAX) { + dev_err(hba->dev, "local RX_HS_G%u_ADAPT_INITIAL_CAP (0x%x) exceeds MAX\n", + gear, adapt_l0l3_cap_local); + return -EINVAL; + } + + ret = ufshcd_dme_get(hba, UIC_ARG_MIB(pa_peer_rx_adapt_initial[gear - 1]), + &adapt_l0l3_cap_peer); + if (ret) + return ret; + + if (adapt_l0l3_cap_peer > ADAPT_L0L3_LENGTH_MAX) { + dev_err(hba->dev, "peer RX_HS_G%u_ADAPT_INITIAL_CAP (0x%x) exceeds MAX\n", + gear, adapt_l0l3_cap_peer); + return -EINVAL; + } + + t_adapt_l0l3_local = adapt_cap_to_t_adapt_l0l3(adapt_l0l3_cap_local); + t_adapt_l0l3_peer = adapt_cap_to_t_adapt_l0l3(adapt_l0l3_cap_peer); + + dev_dbg(hba->dev, "local RX_HS_G%u_ADAPT_INITIAL_CAP = 0x%x\n", + gear, adapt_l0l3_cap_local); + dev_dbg(hba->dev, "peer RX_HS_G%u_ADAPT_INITIAL_CAP = 0x%x\n", + gear, adapt_l0l3_cap_peer); + dev_dbg(hba->dev, "t_adapt_l0l3_local = %llu UI, t_adapt_l0l3_peer = %llu UI\n", + t_adapt_l0l3_local, t_adapt_l0l3_peer); + + ret = ufshcd_dme_get(hba, UIC_ARG_MIB_SEL(RX_HS_G6_ADAPT_INITIAL_L0L1L2L3_CAP, + UIC_ARG_MPHY_RX_GEN_SEL_INDEX(0)), + &adapt_l0l1l2l3_cap_local); + if (ret) + return ret; + + if (adapt_l0l1l2l3_cap_local > ADAPT_L0L1L2L3_LENGTH_MAX) { + dev_err(hba->dev, "local RX_HS_G%u_ADAPT_INITIAL_L0L1L2L3_CAP (0x%x) exceeds MAX\n", + gear, adapt_l0l1l2l3_cap_local); + return -EINVAL; + } + + ret = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_PEERRXHSG6ADAPTINITIALL0L1L2L3), + &adapt_l0l1l2l3_cap_peer); + if (ret) + return ret; + + if (adapt_l0l1l2l3_cap_peer > ADAPT_L0L1L2L3_LENGTH_MAX) { + dev_err(hba->dev, "peer RX_HS_G%u_ADAPT_INITIAL_L0L1L2L3_CAP (0x%x) exceeds MAX\n", + gear, adapt_l0l1l2l3_cap_peer); + return -EINVAL; + } + + t_adapt_l0l1l2l3_local = adapt_cap_to_t_adapt_l0l1l2l3(adapt_l0l1l2l3_cap_local); + t_adapt_l0l1l2l3_peer = adapt_cap_to_t_adapt_l0l1l2l3(adapt_l0l1l2l3_cap_peer); + + dev_dbg(hba->dev, "local RX_HS_G%u_ADAPT_INITIAL_L0L1L2L3_CAP = 0x%x\n", + gear, adapt_l0l1l2l3_cap_local); + dev_dbg(hba->dev, "peer RX_HS_G%u_ADAPT_INITIAL_L0L1L2L3_CAP = 0x%x\n", + gear, adapt_l0l1l2l3_cap_peer); + dev_dbg(hba->dev, "t_adapt_l0l1l2l3_local = %llu UI, t_adapt_l0l1l2l3_peer = %llu UI\n", + t_adapt_l0l1l2l3_local, t_adapt_l0l1l2l3_peer); + + t_adapt_l0l1l2l3 = max(t_adapt_l0l1l2l3_local, t_adapt_l0l1l2l3_peer); + t_adapt_l0l3 = max(t_adapt_l0l3_local, t_adapt_l0l3_peer); + t_adapt = t_adapt_l0l3 + t_adapt_l0l1l2l3; + + dev_dbg(hba->dev, "TAdapt %llu PAM-4 UI selected for TX EQTR\n", + t_adapt); + + adapt_length_l0l3 = (t_adapt_l0l3_local >= t_adapt_l0l3_peer) ? + adapt_l0l3_cap_local : adapt_l0l3_cap_peer; + adapt_length_l0l1l2l3 = (t_adapt_l0l1l2l3_local >= t_adapt_l0l1l2l3_peer) ? + adapt_l0l1l2l3_cap_local : adapt_l0l1l2l3_cap_peer; + + if (t_adapt < TX_EQTR_HS_G6_MIN_T_ADAPT) { + dev_dbg(hba->dev, "TAdapt %llu UI is too short for TX EQTR for HS-G%u, use default Adapt 0x%x\n", + t_adapt, gear, TX_EQTR_HS_G6_ADAPT_DEFAULT); + adapt_length_l0l3 = TX_EQTR_HS_G6_ADAPT_DEFAULT; + } + + adapt_eqtr = adapt_length_l0l3 | + (adapt_length_l0l1l2l3 << TX_EQTR_ADAPT_LENGTH_L0L1L2L3_SHIFT); + } else { + return -EINVAL; + } + + ret = ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXADAPTLENGTH_EQTR), adapt_eqtr); + if (ret) + dev_err(hba->dev, "Failed to set adapt length for TX EQTR: %d\n", ret); + else + dev_dbg(hba->dev, "PA_TXADAPTLENGTH_EQTR configured to 0x%08x\n", adapt_eqtr); + + return ret; +} + +/** + * ufshcd_compose_tx_eqtr_setting - Compose TX EQTR setting + * @iter: TX EQTR iterator data structure + * @num_lanes: number of active lanes + * + * Returns composed TX EQTR setting, same setting is used for all active lanes + */ +static inline u32 ufshcd_compose_tx_eqtr_setting(struct tx_eqtr_iter *iter, + int num_lanes) +{ + u32 setting = 0; + int lane; + + for (lane = 0; lane < num_lanes; lane++) { + setting |= TX_HS_PRESHOOT_BITS(lane, iter->preshoot); + setting |= TX_HS_DEEMPHASIS_BITS(lane, iter->deemphasis); + } + + return setting; +} + +/** + * ufshcd_apply_tx_eqtr_settings - Apply TX EQTR setting + * @hba: per adapter instance + * @pwr_mode: target power mode containing gear and rate information + * @h_iter: host TX EQTR iterator data structure + * @d_iter: device TX EQTR iterator data structure + * + * Returns 0 on success, negative error code otherwise + */ +static int ufshcd_apply_tx_eqtr_settings(struct ufs_hba *hba, + struct ufs_pa_layer_attr *pwr_mode, + struct tx_eqtr_iter *h_iter, + struct tx_eqtr_iter *d_iter) +{ + u32 setting; + int ret; + + setting = ufshcd_compose_tx_eqtr_setting(h_iter, pwr_mode->lane_tx); + ret = ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXEQTRSETTING), setting); + if (ret) + return ret; + + setting = ufshcd_compose_tx_eqtr_setting(d_iter, pwr_mode->lane_rx); + ret = ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PEERTXEQTRSETTING), setting); + if (ret) + return ret; + + ret = ufshcd_vops_apply_tx_eqtr_settings(hba, pwr_mode, h_iter, d_iter); + + return ret; +} + +/** + * ufshcd_update_tx_eq_params - Update TX Equalization params + * @params: TX EQ parameters data structure + * @eqtr_data: TX EQTR data structure + * + * Update TX Equalization params using results from TX EQTR data. + */ +static inline void +ufshcd_update_tx_eq_params(struct ufshcd_tx_eq_params *params, + struct ufshcd_tx_eqtr_data *eqtr_data) +{ + struct ufshcd_tx_eqtr_record *rec = params->eqtr_record; + + memcpy(params->host, eqtr_data->host, sizeof(params->host)); + memcpy(params->device, eqtr_data->device, sizeof(params->device)); + + if (!rec) + return; + + memcpy(rec->host_fom, eqtr_data->host_fom, sizeof(rec->host_fom)); + memcpy(rec->device_fom, eqtr_data->device_fom, sizeof(rec->device_fom)); + rec->last_record_ts = ktime_get(); + rec->last_record_index++; +} + +/** + * __ufshcd_tx_eqtr - TX Equalization Training (EQTR) procedure + * @hba: per adapter instance + * @params: TX EQ parameters data structure + * @pwr_mode: target power mode containing gear and rate information + * + * This function implements the complete TX EQTR procedure as defined in UFSHCI + * v5.0 specification. It iterates through all possible combinations of PreShoot + * and DeEmphasis settings to find the optimal TX Equalization settings for all + * active lanes. + * + * Returns 0 on success, negative error code otherwise + */ +static int __ufshcd_tx_eqtr(struct ufs_hba *hba, + struct ufshcd_tx_eq_params *params, + struct ufs_pa_layer_attr *pwr_mode) +{ + struct ufshcd_tx_eqtr_data *eqtr_data __free(kfree) = + kzalloc(sizeof(*eqtr_data), GFP_KERNEL); + struct tx_eqtr_iter h_iter = {}; + struct tx_eqtr_iter d_iter = {}; + u32 gear = pwr_mode->gear_tx; + u8 preshoot, deemphasis; + ktime_t start; + int ret; + + if (!eqtr_data) + return -ENOMEM; + + dev_info(hba->dev, "Start TX EQTR procedure for HS-G%u, Rate-%s, RX Lanes: %u, TX Lanes: %u\n", + gear, ufs_hs_rate_to_str(pwr_mode->hs_rate), + pwr_mode->lane_rx, pwr_mode->lane_tx); + + start = ktime_get(); + + /* Step 1 - Determine the TX Adapt Length for EQTR */ + ret = ufshcd_setup_tx_eqtr_adapt_length(hba, params, gear); + if (ret) { + dev_err(hba->dev, "Failed to setup TX EQTR Adaptation length: %d\n", ret); + return ret; + } + + /* Step 2 - Determine TX Equalization setting capabilities */ + ret = ufshcd_tx_eqtr_iter_init(hba, &h_iter, &d_iter); + if (ret) { + dev_err(hba->dev, "Failed to init TX EQTR data: %d\n", ret); + return ret; + } + + /* TX EQTR main loop */ + for (preshoot = 0; preshoot < TX_HS_NUM_PRESHOOT; preshoot++) { + for (deemphasis = 0; deemphasis < TX_HS_NUM_DEEMPHASIS; deemphasis++) { + if (!tx_eqtr_iter_update(preshoot, deemphasis, &h_iter, &d_iter)) + continue; + + /* Step 3 - Apply TX EQTR settings */ + ret = ufshcd_apply_tx_eqtr_settings(hba, pwr_mode, &h_iter, &d_iter); + if (ret) { + dev_err(hba->dev, "Failed to apply TX EQTR settings (PreShoot %u, DeEmphasis %u): %d\n", + preshoot, deemphasis, ret); + return ret; + } + + /* Step 4 - Trigger UIC TX EQTR */ + ret = ufshcd_uic_tx_eqtr(hba, gear); + if (ret) { + dev_err(hba->dev, "Failed to trigger UIC TX EQTR for target gear %u: %d\n", + gear, ret); + return ret; + } + + /* Step 5 - Get FOM */ + ret = ufshcd_get_rx_fom(hba, pwr_mode, &h_iter, &d_iter); + if (ret) { + dev_err(hba->dev, "Failed to get RX_FOM: %d\n", + ret); + return ret; + } + + ufshcd_evaluate_tx_eqtr_fom(hba, pwr_mode, eqtr_data, &h_iter, &d_iter); + } + } + + dev_info(hba->dev, "TX EQTR procedure completed! Time elapsed: %llu ms\n", + ktime_to_ms(ktime_sub(ktime_get(), start))); + + ufshcd_update_tx_eq_params(params, eqtr_data); + + return ret; +} + +/** + * ufshcd_tx_eqtr_prepare - Prepare UFS link for TX EQTR procedure + * @hba: per adapter instance + * @pwr_mode: target power mode containing gear and rate + * + * This function prepares the UFS link for TX Equalization Training (EQTR) by + * establishing the proper initial conditions required by the EQTR procedure. + * It ensures that EQTR starts from the most reliable Power Mode (HS-G1) with + * all connected lanes activated and sets host TX HS Adapt Type to INITIAL. + * + * Returns 0 on successful preparation, negative error code on failure + */ +static int ufshcd_tx_eqtr_prepare(struct ufs_hba *hba, + struct ufs_pa_layer_attr *pwr_mode) +{ + struct ufs_pa_layer_attr pwr_mode_hs_g1 = { + /* TX EQTR shall be initiated from the most reliable HS-G1 */ + .gear_rx = UFS_HS_G1, + .gear_tx = UFS_HS_G1, + .lane_rx = pwr_mode->lane_rx, + .lane_tx = pwr_mode->lane_tx, + .pwr_rx = FAST_MODE, + .pwr_tx = FAST_MODE, + /* Use the target power mode's HS rate */ + .hs_rate = pwr_mode->hs_rate, + }; + u32 rate = pwr_mode->hs_rate; + int ret; + + /* Change power mode to HS-G1, activate all connected lanes. */ + ret = ufshcd_change_power_mode(hba, &pwr_mode_hs_g1, + UFSHCD_PMC_POLICY_DONT_FORCE); + if (ret) { + dev_err(hba->dev, "TX EQTR: Failed to change power mode to HS-G1, Rate-%s: %d\n", + ufs_hs_rate_to_str(rate), ret); + return ret; + } + + ret = ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXHSADAPTTYPE), + PA_INITIAL_ADAPT); + if (ret) + dev_err(hba->dev, "TX EQTR: Failed to set Host Adapt type to INITIAL: %d\n", + ret); + + return ret; +} + +static void ufshcd_tx_eqtr_unprepare(struct ufs_hba *hba, + struct ufs_pa_layer_attr *pwr_mode) +{ + int err; + + if (pwr_mode->pwr_rx == SLOWAUTO_MODE || pwr_mode->hs_rate == 0) + return; + + err = ufshcd_change_power_mode(hba, pwr_mode, + UFSHCD_PMC_POLICY_DONT_FORCE); + if (err) + dev_err(hba->dev, "%s: Failed to restore Power Mode: %d\n", + __func__, err); +} + +/** + * ufshcd_tx_eqtr - Perform TX EQTR procedures with vops callbacks + * @hba: per adapter instance + * @params: TX EQ parameters data structure to populate + * @pwr_mode: target power mode containing gear and rate information + * + * This is the main entry point for performing TX Equalization Training (EQTR) + * procedure as defined in UFSCHI v5.0 specification. It serves as a wrapper + * around __ufshcd_tx_eqtr() to provide vops support through the variant + * operations framework. + * + * Returns 0 on success, negative error code on failure + */ +static int ufshcd_tx_eqtr(struct ufs_hba *hba, + struct ufshcd_tx_eq_params *params, + struct ufs_pa_layer_attr *pwr_mode) +{ + struct ufs_pa_layer_attr old_pwr_info; + int ret; + + if (!params->eqtr_record) { + params->eqtr_record = devm_kzalloc(hba->dev, + sizeof(*params->eqtr_record), + GFP_KERNEL); + if (!params->eqtr_record) + return -ENOMEM; + } + + memcpy(&old_pwr_info, &hba->pwr_info, sizeof(struct ufs_pa_layer_attr)); + + ret = ufshcd_tx_eqtr_prepare(hba, pwr_mode); + if (ret) { + dev_err(hba->dev, "Failed to prepare TX EQTR: %d\n", ret); + goto out; + } + + ret = ufshcd_vops_tx_eqtr_notify(hba, PRE_CHANGE, pwr_mode); + if (ret) + goto out; + + ret = __ufshcd_tx_eqtr(hba, params, pwr_mode); + if (ret) + goto out; + + ret = ufshcd_vops_tx_eqtr_notify(hba, POST_CHANGE, pwr_mode); + +out: + if (ret) + ufshcd_tx_eqtr_unprepare(hba, &old_pwr_info); + + return ret; +} + +/** + * ufshcd_config_tx_eq_settings - Configure TX Equalization settings + * @hba: per adapter instance + * @pwr_mode: target power mode containing gear and rate information + * + * This function finds and sets the TX Equalization settings for the given + * target power mode. + * + * Returns 0 on success, error code otherwise + */ +int ufshcd_config_tx_eq_settings(struct ufs_hba *hba, + struct ufs_pa_layer_attr *pwr_mode) +{ + struct ufshcd_tx_eq_params *params; + u32 gear, rate; + + if (!ufshcd_is_tx_eq_supported(hba) || !use_adaptive_txeq) + return 0; + + if (!hba->max_pwr_info.is_valid) { + dev_err(hba->dev, "Max power info is invalid\n"); + return -EINVAL; + } + + if (!pwr_mode) { + dev_err(hba->dev, "Target power mode is NULL\n"); + return -EINVAL; + } + + gear = pwr_mode->gear_tx; + rate = pwr_mode->hs_rate; + + if (gear < UFS_HS_G1 || gear > UFS_HS_GEAR_MAX) { + dev_err(hba->dev, "Invalid HS-Gear (%u) for TX Equalization\n", + gear); + return -EINVAL; + } else if (gear < max_t(u32, adaptive_txeq_gear, UFS_HS_G4)) { + /* TX EQTR is supported for HS-G4 and higher Gears */ + return 0; + } + + if (rate != PA_HS_MODE_A && rate != PA_HS_MODE_B) { + dev_err(hba->dev, "Invalid HS-Rate (%u) for TX Equalization\n", + rate); + return -EINVAL; + } + + params = &hba->tx_eq_params[gear - 1]; + if (!params->is_valid) { + int ret; + + ret = ufshcd_tx_eqtr(hba, params, pwr_mode); + if (ret) { + dev_err(hba->dev, "Failed to train TX Equalization for HS-G%u, Rate-%s: %d\n", + gear, ufs_hs_rate_to_str(rate), ret); + return ret; + } + + /* Mark TX Equalization settings as valid */ + params->is_valid = true; + params->is_applied = false; + } + + if (params->is_valid && !params->is_applied) { + int ret; + + ret = ufshcd_apply_tx_eq_settings(hba, params, gear); + if (ret) { + dev_err(hba->dev, "Failed to apply TX Equalization settings for HS-G%u, Rate-%s: %d\n", + gear, ufs_hs_rate_to_str(rate), ret); + return ret; + } + + params->is_applied = true; + } + + return 0; +} + +/** + * ufshcd_apply_valid_tx_eq_settings - Apply valid TX Equalization settings + * @hba: per-adapter instance + * + * This function iterates through all supported High-Speed (HS) gears and + * applies valid TX Equalization settings to both Host and Device. + */ +void ufshcd_apply_valid_tx_eq_settings(struct ufs_hba *hba) +{ + struct ufshcd_tx_eq_params *params; + int gear, err; + + if (!ufshcd_is_tx_eq_supported(hba)) + return; + + if (!hba->max_pwr_info.is_valid) { + dev_err(hba->dev, "Max power info is invalid, cannot apply TX Equalization settings\n"); + return; + } + + for (gear = UFS_HS_G1; gear <= UFS_HS_GEAR_MAX; gear++) { + params = &hba->tx_eq_params[gear - 1]; + + if (params->is_valid) { + err = ufshcd_apply_tx_eq_settings(hba, params, gear); + if (err) { + params->is_applied = false; + dev_err(hba->dev, "Failed to apply TX Equalization settings for HS-G%u: %d\n", + gear, err); + } else { + params->is_applied = true; + } + } + } +} diff --git a/drivers/ufs/core/ufshcd-priv.h b/drivers/ufs/core/ufshcd-priv.h index f1cec1cd01d2..c164caa9a825 100644 --- a/drivers/ufs/core/ufshcd-priv.h +++ b/drivers/ufs/core/ufshcd-priv.h @@ -103,6 +103,12 @@ int ufshcd_exec_raw_upiu_cmd(struct ufs_hba *hba, int ufshcd_wb_toggle(struct ufs_hba *hba, bool enable); int ufshcd_read_device_lvl_exception_id(struct ufs_hba *hba, u64 *exception_id); +int ufshcd_uic_tx_eqtr(struct ufs_hba *hba, int gear); +void ufshcd_apply_valid_tx_eq_settings(struct ufs_hba *hba); +int ufshcd_config_tx_eq_settings(struct ufs_hba *hba, + struct ufs_pa_layer_attr *pwr_mode); +void ufshcd_print_tx_eq_params(struct ufs_hba *hba); + /* Wrapper functions for safely calling variant operations */ static inline const char *ufshcd_get_var_name(struct ufs_hba *hba) { @@ -297,6 +303,38 @@ static inline u32 ufshcd_vops_freq_to_gear_speed(struct ufs_hba *hba, unsigned l return 0; } +static inline int ufshcd_vops_get_rx_fom(struct ufs_hba *hba, + struct ufs_pa_layer_attr *pwr_mode, + struct tx_eqtr_iter *h_iter, + struct tx_eqtr_iter *d_iter) +{ + if (hba->vops && hba->vops->get_rx_fom) + return hba->vops->get_rx_fom(hba, pwr_mode, h_iter, d_iter); + + return 0; +} + +static inline int ufshcd_vops_apply_tx_eqtr_settings(struct ufs_hba *hba, + struct ufs_pa_layer_attr *pwr_mode, + struct tx_eqtr_iter *h_iter, + struct tx_eqtr_iter *d_iter) +{ + if (hba->vops && hba->vops->apply_tx_eqtr_settings) + return hba->vops->apply_tx_eqtr_settings(hba, pwr_mode, h_iter, d_iter); + + return 0; +} + +static inline int ufshcd_vops_tx_eqtr_notify(struct ufs_hba *hba, + enum ufs_notify_change_status status, + struct ufs_pa_layer_attr *pwr_mode) +{ + if (hba->vops && hba->vops->tx_eqtr_notify) + return hba->vops->tx_eqtr_notify(hba, status, pwr_mode); + + return 0; +} + extern const struct ufs_pm_lvl_states ufs_pm_lvl_states[]; /** diff --git a/drivers/ufs/core/ufshcd.c b/drivers/ufs/core/ufshcd.c index 44faab8b1770..d78723dea951 100644 --- a/drivers/ufs/core/ufshcd.c +++ b/drivers/ufs/core/ufshcd.c @@ -4343,16 +4343,18 @@ static int ufshcd_uic_pwr_ctrl(struct ufs_hba *hba, struct uic_command *cmd) ret = __ufshcd_send_uic_cmd(hba, cmd); if (ret) { dev_err(hba->dev, - "pwr ctrl cmd 0x%x with mode 0x%x uic error %d\n", - cmd->command, cmd->argument3, ret); + "pwr ctrl cmd 0x%x with (MIBattribute 0x%x, mode 0x%x) uic error %d\n", + cmd->command, UIC_GET_ATTR_ID(cmd->argument1), + cmd->argument3, ret); goto out; } if (!wait_for_completion_timeout(hba->uic_async_done, msecs_to_jiffies(uic_cmd_timeout))) { dev_err(hba->dev, - "pwr ctrl cmd 0x%x with mode 0x%x completion timeout\n", - cmd->command, cmd->argument3); + "pwr ctrl cmd 0x%x with (MIBattribute 0x%x, mode 0x%x) completion timeout\n", + cmd->command, UIC_GET_ATTR_ID(cmd->argument1), + cmd->argument3); if (!cmd->cmd_active) { dev_err(hba->dev, "%s: Power Mode Change operation has been completed, go check UPMCRS\n", @@ -4368,14 +4370,16 @@ static int ufshcd_uic_pwr_ctrl(struct ufs_hba *hba, struct uic_command *cmd) status = ufshcd_get_upmcrs(hba); if (status != PWR_LOCAL) { dev_err(hba->dev, - "pwr ctrl cmd 0x%x failed, host upmcrs:0x%x\n", - cmd->command, status); + "pwr ctrl cmd 0x%x with (MIBattribute 0x%x, mode 0x%x) failed, host upmcrs:0x%x\n", + cmd->command, UIC_GET_ATTR_ID(cmd->argument1), + cmd->argument3, status); ret = (status != PWR_OK) ? status : -1; } out: if (ret) { ufshcd_print_host_state(hba); ufshcd_print_pwr_info(hba); + ufshcd_print_tx_eq_params(hba); ufshcd_print_evt_hist(hba); } @@ -4401,6 +4405,29 @@ static int ufshcd_uic_pwr_ctrl(struct ufs_hba *hba, struct uic_command *cmd) return ret; } +/** + * ufshcd_uic_tx_eqtr - Perform UIC TX Equalization Training + * @hba: per adapter instance + * @gear: target gear for EQTR + * + * Returns 0 on success, negative error code otherwise + */ +int ufshcd_uic_tx_eqtr(struct ufs_hba *hba, int gear) +{ + struct uic_command uic_cmd = { + .command = UIC_CMD_DME_SET, + .argument1 = UIC_ARG_MIB(PA_EQTR_GEAR), + .argument3 = gear, + }; + int ret; + + ufshcd_hold(hba); + ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd); + ufshcd_release(hba); + + return ret; +} + /** * ufshcd_send_bsg_uic_cmd - Send UIC commands requested via BSG layer and retrieve the result * @hba: per adapter instance @@ -4824,6 +4851,12 @@ int ufshcd_config_pwr_mode(struct ufs_hba *hba, memcpy(&final_params, desired_pwr_mode, sizeof(final_params)); } + ret = ufshcd_config_tx_eq_settings(hba, &final_params); + if (ret) + dev_warn(hba->dev, "Failed to configure TX Equalization for HS-G%u, Rate-%s: %d\n", + final_params.gear_tx, + ufs_hs_rate_to_str(final_params.hs_rate), ret); + return ufshcd_change_power_mode(hba, &final_params, pmc_policy); } EXPORT_SYMBOL_GPL(ufshcd_config_pwr_mode); @@ -6823,6 +6856,7 @@ static void ufshcd_err_handler(struct work_struct *work) spin_unlock_irqrestore(hba->host->host_lock, flags); ufshcd_print_host_state(hba); ufshcd_print_pwr_info(hba); + ufshcd_print_tx_eq_params(hba); ufshcd_print_evt_hist(hba); ufshcd_print_tmrs(hba, hba->outstanding_tasks); ufshcd_print_trs_all(hba, pr_prdt); @@ -7086,6 +7120,7 @@ static irqreturn_t ufshcd_check_errors(struct ufs_hba *hba, u32 intr_status) ufshcd_dump_regs(hba, 0, UFSHCI_REG_SPACE_SIZE, "host_regs: "); ufshcd_print_pwr_info(hba); + ufshcd_print_tx_eq_params(hba); } ufshcd_schedule_eh_work(hba); retval |= IRQ_HANDLED; @@ -7867,6 +7902,7 @@ static int ufshcd_abort(struct scsi_cmnd *cmd) ufshcd_print_evt_hist(hba); ufshcd_print_host_state(hba); ufshcd_print_pwr_info(hba); + ufshcd_print_tx_eq_params(hba); ufshcd_print_tr(hba, cmd, true); } else { ufshcd_print_tr(hba, cmd, false); @@ -8844,6 +8880,8 @@ static void ufshcd_tune_unipro_params(struct ufs_hba *hba) if (hba->dev_quirks & UFS_DEVICE_QUIRK_PA_HIBER8TIME) ufshcd_quirk_override_pa_h8time(hba); + + ufshcd_apply_valid_tx_eq_settings(hba); } static void ufshcd_clear_dbg_ufs_stats(struct ufs_hba *hba) diff --git a/include/ufs/ufshcd.h b/include/ufs/ufshcd.h index 16facaee3e77..35b1288327d0 100644 --- a/include/ufs/ufshcd.h +++ b/include/ufs/ufshcd.h @@ -287,6 +287,84 @@ struct ufs_pwr_mode_info { struct ufs_pa_layer_attr info; }; +#define UFS_MAX_LANES 2 + +/** + * struct tx_eqtr_iter - TX Equalization Training iterator + * @preshoot_bitmap: PreShoot bitmap + * @deemphasis_bitmap: DeEmphasis bitmap + * @preshoot: PreShoot value + * @deemphasis: DeEmphasis value + * @fom: Figure-of-Merit read out from RX_FOM + * @is_updated: Flag to indicate if updated since previous iteration + */ +struct tx_eqtr_iter { + unsigned long preshoot_bitmap; + unsigned long deemphasis_bitmap; + u8 preshoot; + u8 deemphasis; + u8 fom[UFS_MAX_LANES]; + bool is_updated; +}; + +/** + * struct ufshcd_tx_eq_settings - TX Equalization settings + * @preshoot: PreShoot value + * @deemphasis: DeEmphasis value + * @fom_val: Figure-of-Merit value read out from RX_FOM (Bit[6:0]) + * @precode_en: Flag to indicate whether need to enable pre-coding + */ +struct ufshcd_tx_eq_settings { + u8 preshoot; + u8 deemphasis; + u8 fom_val; + bool precode_en; +}; + +/** + * struct ufshcd_tx_eqtr_data - Data used during TX Equalization Training procedure + * @host: Optimal TX EQ settings identified for host TX Lanes during TX EQTR + * @device: Optimal TX EQ settings identified for device TX Lanes during TX EQTR + * @host_fom: Host TX EQTR FOM record + * @device_fom: Device TX EQTR FOM record + */ +struct ufshcd_tx_eqtr_data { + struct ufshcd_tx_eq_settings host[UFS_MAX_LANES]; + struct ufshcd_tx_eq_settings device[UFS_MAX_LANES]; + u8 host_fom[UFS_MAX_LANES][TX_HS_NUM_PRESHOOT][TX_HS_NUM_DEEMPHASIS]; + u8 device_fom[UFS_MAX_LANES][TX_HS_NUM_PRESHOOT][TX_HS_NUM_DEEMPHASIS]; +}; + +/** + * struct ufshcd_tx_eqtr_record - TX Equalization Training record + * @host_fom: Host TX EQTR FOM record + * @device_fom: Device TX EQTR FOM record + * @last_record_ts: Timestamp of the most recent TX EQTR record + * @last_record_index: Index of the most recent TX EQTR record + */ +struct ufshcd_tx_eqtr_record { + u8 host_fom[UFS_MAX_LANES][TX_HS_NUM_PRESHOOT][TX_HS_NUM_DEEMPHASIS]; + u8 device_fom[UFS_MAX_LANES][TX_HS_NUM_PRESHOOT][TX_HS_NUM_DEEMPHASIS]; + ktime_t last_record_ts; + u16 last_record_index; +}; + +/** + * struct ufshcd_tx_eq_params - TX Equalization parameters structure + * @host: TX EQ settings for host TX Lanes + * @device: TX EQ settings for device TX Lanes + * @eqtr_record: Pointer to TX EQTR record + * @is_valid: True if parameter contains valid TX Equalization settings + * @is_applied: True if settings have been applied to UniPro of both sides + */ +struct ufshcd_tx_eq_params { + struct ufshcd_tx_eq_settings host[UFS_MAX_LANES]; + struct ufshcd_tx_eq_settings device[UFS_MAX_LANES]; + struct ufshcd_tx_eqtr_record *eqtr_record; + bool is_valid; + bool is_applied; +}; + /** * struct ufs_hba_variant_ops - variant specific callbacks * @name: variant name @@ -330,6 +408,11 @@ struct ufs_pwr_mode_info { * @config_esi: called to config Event Specific Interrupt * @config_scsi_dev: called to configure SCSI device parameters * @freq_to_gear_speed: called to map clock frequency to the max supported gear speed + * @apply_tx_eqtr_settings: called to apply settings for TX Equalization + * Training settings. + * @get_rx_fom: called to get Figure of Merit (FOM) value. + * @tx_eqtr_notify: called before and after TX Equalization Training procedure + * to allow platform vendor specific configs to take place. */ struct ufs_hba_variant_ops { const char *name; @@ -381,6 +464,17 @@ struct ufs_hba_variant_ops { int (*config_esi)(struct ufs_hba *hba); void (*config_scsi_dev)(struct scsi_device *sdev); u32 (*freq_to_gear_speed)(struct ufs_hba *hba, unsigned long freq); + int (*get_rx_fom)(struct ufs_hba *hba, + struct ufs_pa_layer_attr *pwr_mode, + struct tx_eqtr_iter *h_iter, + struct tx_eqtr_iter *d_iter); + int (*apply_tx_eqtr_settings)(struct ufs_hba *hba, + struct ufs_pa_layer_attr *pwr_mode, + struct tx_eqtr_iter *h_iter, + struct tx_eqtr_iter *d_iter); + int (*tx_eqtr_notify)(struct ufs_hba *hba, + enum ufs_notify_change_status status, + struct ufs_pa_layer_attr *pwr_mode); }; /* clock gating state */ @@ -779,6 +873,13 @@ enum ufshcd_caps { * WriteBooster when scaling the clock down. */ UFSHCD_CAP_WB_WITH_CLK_SCALING = 1 << 12, + + /* + * This capability allows the host controller driver to apply TX + * Equalization settings discovered from UFS attributes, variant + * specific operations and TX Equaliztion Training procedure. + */ + UFSHCD_CAP_TX_EQUALIZATION = 1 << 13, }; struct ufs_hba_variant_params { @@ -955,6 +1056,15 @@ enum ufshcd_mcq_opr { * @dev_lvl_exception_count: count of device level exceptions since last reset * @dev_lvl_exception_id: vendor specific information about the device level exception event. * @rpmbs: list of OP-TEE RPMB devices (one per RPMB region) + * @host_preshoot_cap: a bitfield to indicate supported PreShoot dBs of host's TX lanes, cache of + * host M-PHY TX_HS_PreShoot_Setting_Capability Attribute (ID 0x15) + * @host_deemphasis_cap: a bitfield to indicate supported DeEmphasis dBs of host's TX lanes, cache + * of host M-PHY TX_HS_DeEmphasis_Setting_Capability Attribute (ID 0x12) + * @device_preshoot_cap: a bitfield to indicate supported PreShoot dBs of device's TX lanes, cache + * of device M-PHY TX_HS_PreShoot_Setting_Capability Attribute (ID 0x15) + * @device_deemphasis_cap: a bitfield to indicate supported DeEmphasis dBs of device's TX lanes, + * cache of device M-PHY TX_HS_DeEmphasis_Setting_Capability Attribute (ID 0x12) + * @tx_eq_params: TX Equalization settings */ struct ufs_hba { void __iomem *mmio_base; @@ -1128,6 +1238,12 @@ struct ufs_hba { u64 dev_lvl_exception_id; u32 vcc_off_delay_us; struct list_head rpmbs; + + u8 host_preshoot_cap; + u8 host_deemphasis_cap; + u8 device_preshoot_cap; + u8 device_deemphasis_cap; + struct ufshcd_tx_eq_params tx_eq_params[UFS_HS_GEAR_MAX]; }; /** @@ -1272,6 +1388,13 @@ static inline bool ufshcd_enable_wb_if_scaling_up(struct ufs_hba *hba) return hba->caps & UFSHCD_CAP_WB_WITH_CLK_SCALING; } +static inline bool ufshcd_is_tx_eq_supported(struct ufs_hba *hba) +{ + return hba->caps & UFSHCD_CAP_TX_EQUALIZATION && + hba->ufs_version >= ufshci_version(5, 0) && + hba->dev_info.wspecversion >= 0x500; +} + #define ufsmcq_writel(hba, val, reg) \ writel((val), (hba)->mcq_base + (reg)) #define ufsmcq_readl(hba, reg) \ @@ -1287,6 +1410,18 @@ static inline bool ufshcd_enable_wb_if_scaling_up(struct ufs_hba *hba) #define ufshcd_readl(hba, reg) \ readl((hba)->mmio_base + (reg)) +static inline const char *ufs_hs_rate_to_str(enum ufs_hs_gear_rate rate) +{ + switch (rate) { + case PA_HS_MODE_A: + return "A"; + case PA_HS_MODE_B: + return "B"; + default: + return "Unknown"; + } +} + /** * ufshcd_rmwl - perform read/modify/write for a controller register * @hba: per adapter instance diff --git a/include/ufs/unipro.h b/include/ufs/unipro.h index 71a5f643400c..4aa592130b4e 100644 --- a/include/ufs/unipro.h +++ b/include/ufs/unipro.h @@ -10,6 +10,8 @@ * M-TX Configuration Attributes */ #define TX_HIBERN8TIME_CAPABILITY 0x000F +#define TX_HS_DEEMPHASIS_SETTING_CAP 0x0012 +#define TX_HS_PRESHOOT_SETTING_CAP 0x0015 #define TX_MODE 0x0021 #define TX_HSRATE_SERIES 0x0022 #define TX_HSGEAR 0x0023 @@ -38,6 +40,9 @@ /* * M-RX Configuration Attributes */ +#define RX_HS_G5_ADAPT_INITIAL_CAP 0x0074 +#define RX_HS_G6_ADAPT_INITIAL_CAP 0x007B +#define RX_HS_G6_ADAPT_INITIAL_L0L1L2L3_CAP 0x007D #define RX_HS_G1_SYNC_LENGTH_CAP 0x008B #define RX_HS_G1_PREP_LENGTH_CAP 0x008C #define RX_MIN_ACTIVATETIME_CAPABILITY 0x008F @@ -50,6 +55,7 @@ #define RX_HIBERN8TIME_CAP 0x0092 #define RX_ADV_HIBERN8TIME_CAP 0x0099 #define RX_ADV_MIN_ACTIVATETIME_CAP 0x009A +#define RX_HS_G4_ADAPT_INITIAL_CAP 0x009F #define RX_MODE 0x00A1 #define RX_HSRATE_SERIES 0x00A2 #define RX_HSGEAR 0x00A3 @@ -64,6 +70,7 @@ #define CFGRXCDR8 0x00BA #define CFGRXOVR8 0x00BD #define CFGRXOVR6 0x00BF +#define RX_FOM 0x00C2 #define RXDIRECTCTRL2 0x00C7 #define CFGRXOVR4 0x00E9 #define RX_REFCLKFREQ 0x00EB @@ -73,7 +80,6 @@ #define ENARXDIRECTCFG3 0x00F3 #define ENARXDIRECTCFG2 0x00F4 - #define is_mphy_tx_attr(attr) (attr < RX_MODE) #define RX_ADV_FINE_GRAN_STEP(x) ((((x) & 0x3) << 1) | 0x1) #define SYNC_LEN_FINE(x) ((x) & 0x3F) @@ -99,6 +105,18 @@ #define UNIPRO_CB_OFFSET(x) (0x8000 | x) +#define ADAPT_LENGTH_MASK 0x7F +#define ADAPT_RANGE_BIT BIT(7) +#define IS_ADAPT_RANGE_COARSE(x) ((x) & ADAPT_RANGE_BIT) + +/* Adapt definitions */ +#define ADAPT_LENGTH_MAX 0x91 +#define ADAPT_L0L3_LENGTH_MAX 0x90 +#define ADAPT_L0L1L2L3_LENGTH_MAX 0x8C +#define TADAPT_FACTOR 650 +#define TADAPT_L0L3_FACTOR (1 << 9) +#define TADAPT_L0L1L2L3_FACTOR (1 << 15) + /* * PHY Adapter attributes */ @@ -164,10 +182,26 @@ #define PA_PACPERRORCOUNT 0x15C1 #define PA_PHYTESTCONTROL 0x15C2 #define PA_TXHSG4SYNCLENGTH 0x15D0 +#define PA_PEERRXHSG4ADAPTINITIAL 0x15D3 #define PA_TXHSADAPTTYPE 0x15D4 #define PA_TXHSG5SYNCLENGTH 0x15D6 +#define PA_PEERRXHSG5ADAPTINITIAL 0x15D9 +#define PA_PEERRXHSG6ADAPTREFRESHL0L1L2L3 0x15DE +#define PA_PEERRXHSG6ADAPTINITIALL0L3 0x15DF +#define PA_PEERRXHSG6ADAPTINITIALL0L1L2L3 0x15E0 +#define PA_TXEQG1SETTING 0x15E1 +#define PA_TXEQG2SETTING 0x15E2 +#define PA_TXEQG3SETTING 0x15E3 +#define PA_TXEQG4SETTING 0x15E4 +#define PA_TXEQG5SETTING 0x15E5 +#define PA_TXEQG6SETTING 0x15E6 +#define PA_TXEQTRSETTING 0x15E7 +#define PA_PEERTXEQTRSETTING 0x15E8 +#define PA_PRECODEEN 0x15E9 +#define PA_EQTR_GEAR 0x15EA +#define PA_TXADAPTLENGTH_EQTR 0x15EB -/* Adpat type for PA_TXHSADAPTTYPE attribute */ +/* Adapt type for PA_TXHSADAPTTYPE attribute */ #define PA_REFRESH_ADAPT 0x00 #define PA_INITIAL_ADAPT 0x01 #define PA_NO_ADAPT 0x03 @@ -187,6 +221,82 @@ /* PHY Adapter Protocol Constants */ #define PA_MAXDATALANES 4 +/* + * TX EQTR's minimum TAdapt should not be less than 10us. + * This value is rounded up into the nearest Unit Intervals (UI) + */ +#define TX_EQTR_HS_G4_MIN_T_ADAPT 166400 +#define TX_EQTR_HS_G5_MIN_T_ADAPT 332800 +#define TX_EQTR_HS_G6_MIN_T_ADAPT 262144 + +#define TX_EQTR_HS_G4_ADAPT_DEFAULT 0x88 +#define TX_EQTR_HS_G5_ADAPT_DEFAULT 0x89 +#define TX_EQTR_HS_G6_ADAPT_DEFAULT 0x89 + +#define TX_EQTR_CAP_MASK 0x7F + +#define TX_EQTR_ADAPT_LENGTH_L0L1L2L3_SHIFT 8 +#define TX_EQTR_ADAPT_RESERVED 0xFF + +#define TX_HS_NUM_PRESHOOT 8 +#define TX_HS_NUM_DEEMPHASIS 8 +#define TX_HS_PRESHOOT_SHIFT 4 +#define TX_HS_DEEMPHASIS_SHIFT 4 +#define TX_HS_PRESHOOT_OFFSET 0 +#define TX_HS_DEEMPHASIS_OFFSET 16 + +#define TX_HS_PRESHOOT_LANE_SHIFT(lane) \ + (TX_HS_PRESHOOT_OFFSET + (lane) * TX_HS_PRESHOOT_SHIFT) +#define TX_HS_DEEMPHASIS_LANE_SHIFT(lane) \ + (TX_HS_DEEMPHASIS_OFFSET + (lane) * TX_HS_DEEMPHASIS_SHIFT) + +#define TX_HS_PRESHOOT_BITS(lane, val) \ + ((val) << TX_HS_PRESHOOT_LANE_SHIFT(lane)) +#define TX_HS_DEEMPHASIS_BITS(lane, val) \ + ((val) << TX_HS_DEEMPHASIS_LANE_SHIFT(lane)) + +#define RX_FOM_VALUE_MASK 0x7F +#define RX_FOM_PRECODING_EN_BIT BIT(7) + +#define PRECODEEN_TX_OFFSET 0 +#define PRECODEEN_RX_OFFSET 4 +#define PRECODEEN_TX_BIT(lane) (1 << (PRECODEEN_TX_OFFSET + (lane))) +#define PRECODEEN_RX_BIT(lane) (1 << (PRECODEEN_RX_OFFSET + (lane))) + +enum ufs_tx_eq_preset { + UFS_TX_EQ_PRESET_P0, + UFS_TX_EQ_PRESET_P1, + UFS_TX_EQ_PRESET_P2, + UFS_TX_EQ_PRESET_P3, + UFS_TX_EQ_PRESET_P4, + UFS_TX_EQ_PRESET_P5, + UFS_TX_EQ_PRESET_P6, + UFS_TX_EQ_PRESET_P7, + UFS_TX_EQ_PRESET_MAX, +}; + +enum ufs_tx_hs_preshoot { + UFS_TX_HS_PRESHOOT_DB_0P0, + UFS_TX_HS_PRESHOOT_DB_0P4, + UFS_TX_HS_PRESHOOT_DB_0P8, + UFS_TX_HS_PRESHOOT_DB_1P2, + UFS_TX_HS_PRESHOOT_DB_1P6, + UFS_TX_HS_PRESHOOT_DB_2P5, + UFS_TX_HS_PRESHOOT_DB_3P5, + UFS_TX_HS_PRESHOOT_DB_4P7, +}; + +enum ufs_tx_hs_deemphasis { + UFS_TX_HS_DEEMPHASIS_DB_0P0, + UFS_TX_HS_DEEMPHASIS_DB_0P8, + UFS_TX_HS_DEEMPHASIS_DB_1P6, + UFS_TX_HS_DEEMPHASIS_DB_2P5, + UFS_TX_HS_DEEMPHASIS_DB_3P5, + UFS_TX_HS_DEEMPHASIS_DB_4P7, + UFS_TX_HS_DEEMPHASIS_DB_6P0, + UFS_TX_HS_DEEMPHASIS_DB_7P6, +}; + #define DL_FC0ProtectionTimeOutVal_Default 8191 #define DL_TC0ReplayTimeOutVal_Default 65535 #define DL_AFC0ReqTimeOutVal_Default 32767 From 10c40143f369a601cc54dd39777843e000b730a6 Mon Sep 17 00:00:00 2001 From: Can Guo Date: Wed, 25 Mar 2026 08:21:47 -0700 Subject: [PATCH 05/12] scsi: ufs: core: Add debugfs entries for TX Equalization params Add debugfs support for UFS TX Equalization and UFS TX Equalization Training (EQTR) to facilitate runtime inspection of link quality. These entries allow developers to monitor and optimize TX Equalization parameters and EQTR records during live operation. The debugfs entries are organized on a per-gear basis under the HBA's debugfs root. Since TX EQTR is only defined for High Speed Gear 4 (HS-G4) and above, EQTR-related entries are explicitly excluded for HS-G1 through HS-G3 to avoid exposing unsupported attributes. The ufshcd's debugfs folder structure will look like below: /sys/kernel/debug/ufshcd/*ufs*/ |--tx_eq_hs_gear1/ | |--device_tx_eq_params | |--host_tx_eq_params |--tx_eq_hs_gear2/ |--tx_eq_hs_gear3/ |--tx_eq_hs_gear4/ |--tx_eq_hs_gear5/ |--tx_eq_hs_gear6/ |--device_tx_eq_params |--device_tx_eqtr_record |--host_tx_eq_params |--host_tx_eqtr_record Reviewed-by: Bart Van Assche Reviewed-by: Bean Huo Signed-off-by: Can Guo Reviewed-by: Peter Wang Link: https://patch.msgid.link/20260325152154.1604082-6-can.guo@oss.qualcomm.com Signed-off-by: Martin K. Petersen --- drivers/ufs/core/ufs-debugfs.c | 229 +++++++++++++++++++++++++++++++++ drivers/ufs/core/ufs-txeq.c | 7 +- drivers/ufs/core/ufshcd-priv.h | 2 + 3 files changed, 237 insertions(+), 1 deletion(-) diff --git a/drivers/ufs/core/ufs-debugfs.c b/drivers/ufs/core/ufs-debugfs.c index e3baed6c70bd..831758b45163 100644 --- a/drivers/ufs/core/ufs-debugfs.c +++ b/drivers/ufs/core/ufs-debugfs.c @@ -209,6 +209,204 @@ static const struct ufs_debugfs_attr ufs_attrs[] = { { } }; +static int ufs_tx_eq_params_show(struct seq_file *s, void *data) +{ + const char *file_name = s->file->f_path.dentry->d_name.name; + u32 gear = (u32)(uintptr_t)s->file->f_inode->i_private; + struct ufs_hba *hba = hba_from_file(s->file); + struct ufshcd_tx_eq_settings *settings; + struct ufs_pa_layer_attr *pwr_info; + struct ufshcd_tx_eq_params *params; + u32 rate = hba->pwr_info.hs_rate; + u32 num_lanes; + int lane; + + if (!ufshcd_is_tx_eq_supported(hba)) + return -EOPNOTSUPP; + + if (gear < UFS_HS_G1 || gear > UFS_HS_GEAR_MAX) { + seq_printf(s, "Invalid gear selected: %u\n", gear); + return 0; + } + + if (!hba->max_pwr_info.is_valid) { + seq_puts(s, "Max power info is invalid\n"); + return 0; + } + + pwr_info = &hba->max_pwr_info.info; + params = &hba->tx_eq_params[gear - 1]; + if (!params->is_valid) { + seq_printf(s, "TX EQ params are invalid for HS-G%u, Rate-%s\n", + gear, ufs_hs_rate_to_str(rate)); + return 0; + } + + if (strcmp(file_name, "host_tx_eq_params") == 0) { + settings = params->host; + num_lanes = pwr_info->lane_tx; + seq_printf(s, "Host TX EQ PreShoot Cap: 0x%02x, DeEmphasis Cap: 0x%02x\n", + hba->host_preshoot_cap, hba->host_deemphasis_cap); + } else if (strcmp(file_name, "device_tx_eq_params") == 0) { + settings = params->device; + num_lanes = pwr_info->lane_rx; + seq_printf(s, "Device TX EQ PreShoot Cap: 0x%02x, DeEmphasis Cap: 0x%02x\n", + hba->device_preshoot_cap, hba->device_deemphasis_cap); + } else { + return -ENOENT; + } + + seq_printf(s, "TX EQ setting for HS-G%u, Rate-%s:\n", gear, + ufs_hs_rate_to_str(rate)); + for (lane = 0; lane < num_lanes; lane++) + seq_printf(s, "TX Lane %d - PreShoot: %d, DeEmphasis: %d, Pre-Coding %senabled\n", + lane, settings[lane].preshoot, + settings[lane].deemphasis, + settings[lane].precode_en ? "" : "not "); + + return 0; +} + +static int ufs_tx_eq_params_open(struct inode *inode, struct file *file) +{ + return single_open(file, ufs_tx_eq_params_show, inode->i_private); +} + +static const struct file_operations ufs_tx_eq_params_fops = { + .owner = THIS_MODULE, + .open = ufs_tx_eq_params_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +static const struct ufs_debugfs_attr ufs_tx_eq_attrs[] = { + { "host_tx_eq_params", 0400, &ufs_tx_eq_params_fops }, + { "device_tx_eq_params", 0400, &ufs_tx_eq_params_fops }, + { } +}; + +static int ufs_tx_eqtr_record_show(struct seq_file *s, void *data) +{ + const char *file_name = s->file->f_path.dentry->d_name.name; + u8 (*fom_array)[TX_HS_NUM_PRESHOOT][TX_HS_NUM_DEEMPHASIS]; + u32 gear = (u32)(uintptr_t)s->file->f_inode->i_private; + unsigned long preshoot_bitmap, deemphasis_bitmap; + struct ufs_hba *hba = hba_from_file(s->file); + struct ufs_pa_layer_attr *pwr_info; + struct ufshcd_tx_eq_params *params; + struct ufshcd_tx_eqtr_record *rec; + u32 rate = hba->pwr_info.hs_rate; + u8 preshoot, deemphasis; + u32 num_lanes; + char name[32]; + int lane; + + if (!ufshcd_is_tx_eq_supported(hba)) + return -EOPNOTSUPP; + + if (gear < UFS_HS_G1 || gear > UFS_HS_GEAR_MAX) { + seq_printf(s, "Invalid gear selected: %u\n", gear); + return 0; + } + + if (!hba->max_pwr_info.is_valid) { + seq_puts(s, "Max power info is invalid\n"); + return 0; + } + + pwr_info = &hba->max_pwr_info.info; + params = &hba->tx_eq_params[gear - 1]; + if (!params->is_valid) { + seq_printf(s, "TX EQ params are invalid for HS-G%u, Rate-%s\n", + gear, ufs_hs_rate_to_str(rate)); + return 0; + } + + rec = params->eqtr_record; + if (!rec || !rec->last_record_index) { + seq_printf(s, "No TX EQTR records found for HS-G%u, Rate-%s.\n", + gear, ufs_hs_rate_to_str(rate)); + return 0; + } + + if (strcmp(file_name, "host_tx_eqtr_record") == 0) { + preshoot_bitmap = (hba->host_preshoot_cap << 0x1) | 0x1; + deemphasis_bitmap = (hba->host_deemphasis_cap << 0x1) | 0x1; + num_lanes = pwr_info->lane_tx; + fom_array = rec->host_fom; + snprintf(name, sizeof(name), "%s", "Host"); + } else if (strcmp(file_name, "device_tx_eqtr_record") == 0) { + preshoot_bitmap = (hba->device_preshoot_cap << 0x1) | 0x1; + deemphasis_bitmap = (hba->device_deemphasis_cap << 0x1) | 0x1; + num_lanes = pwr_info->lane_rx; + fom_array = rec->device_fom; + snprintf(name, sizeof(name), "%s", "Device"); + } else { + return -ENOENT; + } + + seq_printf(s, "%s TX EQTR record summary -\n", name); + seq_printf(s, "Target Power Mode: HS-G%u, Rate-%s\n", gear, + ufs_hs_rate_to_str(rate)); + seq_printf(s, "Most recent record index: %d\n", + rec->last_record_index); + seq_printf(s, "Most recent record timestamp: %llu us\n", + ktime_to_us(rec->last_record_ts)); + + for (lane = 0; lane < num_lanes; lane++) { + seq_printf(s, "\nTX Lane %d FOM - %s\n", lane, "PreShoot\\DeEmphasis"); + seq_puts(s, "\\"); + /* Print DeEmphasis header as X-axis. */ + for (deemphasis = 0; deemphasis < TX_HS_NUM_DEEMPHASIS; deemphasis++) + seq_printf(s, "%8d%s", deemphasis, " "); + seq_puts(s, "\n"); + /* Print matrix rows with PreShoot as Y-axis. */ + for (preshoot = 0; preshoot < TX_HS_NUM_PRESHOOT; preshoot++) { + seq_printf(s, "%d", preshoot); + for (deemphasis = 0; deemphasis < TX_HS_NUM_DEEMPHASIS; deemphasis++) { + if (test_bit(preshoot, &preshoot_bitmap) && + test_bit(deemphasis, &deemphasis_bitmap)) { + u8 fom = fom_array[lane][preshoot][deemphasis]; + u8 fom_val = fom & RX_FOM_VALUE_MASK; + bool precode_en = fom & RX_FOM_PRECODING_EN_BIT; + + if (ufshcd_is_txeq_presets_used(hba) && + !ufshcd_is_txeq_preset_selected(preshoot, deemphasis)) + seq_printf(s, "%8s%s", "-", " "); + else + seq_printf(s, "%8u%s", fom_val, + precode_en ? "*" : " "); + } else { + seq_printf(s, "%8s%s", "x", " "); + } + } + seq_puts(s, "\n"); + } + } + + return 0; +} + +static int ufs_tx_eqtr_record_open(struct inode *inode, struct file *file) +{ + return single_open(file, ufs_tx_eqtr_record_show, inode->i_private); +} + +static const struct file_operations ufs_tx_eqtr_record_fops = { + .owner = THIS_MODULE, + .open = ufs_tx_eqtr_record_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +static const struct ufs_debugfs_attr ufs_tx_eqtr_attrs[] = { + { "host_tx_eqtr_record", 0400, &ufs_tx_eqtr_record_fops }, + { "device_tx_eqtr_record", 0400, &ufs_tx_eqtr_record_fops }, + { } +}; + void ufs_debugfs_hba_init(struct ufs_hba *hba) { const struct ufs_debugfs_attr *attr; @@ -230,6 +428,37 @@ void ufs_debugfs_hba_init(struct ufs_hba *hba) hba, &ee_usr_mask_fops); debugfs_create_u32("exception_event_rate_limit_ms", 0600, hba->debugfs_root, &hba->debugfs_ee_rate_limit_ms); + + if (!(hba->caps & UFSHCD_CAP_TX_EQUALIZATION)) + return; + + for (u32 gear = UFS_HS_G1; gear <= UFS_HS_GEAR_MAX; gear++) { + struct dentry *txeq_dir; + char name[32]; + + snprintf(name, sizeof(name), "tx_eq_hs_gear%d", gear); + txeq_dir = debugfs_create_dir(name, hba->debugfs_root); + if (IS_ERR_OR_NULL(txeq_dir)) + return; + + d_inode(txeq_dir)->i_private = hba; + + /* Create files for TX Equalization parameters */ + for (attr = ufs_tx_eq_attrs; attr->name; attr++) + debugfs_create_file(attr->name, attr->mode, txeq_dir, + (void *)(uintptr_t)gear, + attr->fops); + + /* TX EQTR is supported for HS-G4 and higher Gears */ + if (gear < UFS_HS_G4) + continue; + + /* Create files for TX EQTR related attributes */ + for (attr = ufs_tx_eqtr_attrs; attr->name; attr++) + debugfs_create_file(attr->name, attr->mode, txeq_dir, + (void *)(uintptr_t)gear, + attr->fops); + } } void ufs_debugfs_hba_exit(struct ufs_hba *hba) diff --git a/drivers/ufs/core/ufs-txeq.c b/drivers/ufs/core/ufs-txeq.c index 04f9f1ffa43e..b68a7af78290 100644 --- a/drivers/ufs/core/ufs-txeq.c +++ b/drivers/ufs/core/ufs-txeq.c @@ -375,7 +375,12 @@ static int ufshcd_get_rx_fom(struct ufs_hba *hba, return ret; } -static bool ufshcd_is_txeq_preset_selected(u8 preshoot, u8 deemphasis) +bool ufshcd_is_txeq_presets_used(struct ufs_hba *hba) +{ + return use_txeq_presets; +} + +bool ufshcd_is_txeq_preset_selected(u8 preshoot, u8 deemphasis) { int i; diff --git a/drivers/ufs/core/ufshcd-priv.h b/drivers/ufs/core/ufshcd-priv.h index c164caa9a825..4c008230e8d6 100644 --- a/drivers/ufs/core/ufshcd-priv.h +++ b/drivers/ufs/core/ufshcd-priv.h @@ -108,6 +108,8 @@ void ufshcd_apply_valid_tx_eq_settings(struct ufs_hba *hba); int ufshcd_config_tx_eq_settings(struct ufs_hba *hba, struct ufs_pa_layer_attr *pwr_mode); void ufshcd_print_tx_eq_params(struct ufs_hba *hba); +bool ufshcd_is_txeq_presets_used(struct ufs_hba *hba); +bool ufshcd_is_txeq_preset_selected(u8 preshoot, u8 deemphasis); /* Wrapper functions for safely calling variant operations */ static inline const char *ufshcd_get_var_name(struct ufs_hba *hba) From dc5dcac5327832bffc1971b1445553823bdebc08 Mon Sep 17 00:00:00 2001 From: Can Guo Date: Wed, 25 Mar 2026 08:21:48 -0700 Subject: [PATCH 06/12] scsi: ufs: core: Add helpers to pause and resume command processing In preparation for supporting TX Equalization refreshing, introduce helper functions to safely pause and resume command processing. ufshcd_pause_command_processing() ensures the host is in a quiescent state by stopping the block layer tagset, acquiring the necessary locks (scan_mutex and clk_scaling_lock), and waiting for any in-flight commands to complete within a specified timeout. ufshcd_resume_command_processing() restores the host to its previous operational state by reversing these steps in the correct order. Reviewed-by: Bean Huo Reviewed-by: Bart Van Assche Signed-off-by: Can Guo Reviewed-by: Peter Wang Link: https://patch.msgid.link/20260325152154.1604082-7-can.guo@oss.qualcomm.com Signed-off-by: Martin K. Petersen --- drivers/ufs/core/ufshcd-priv.h | 2 ++ drivers/ufs/core/ufshcd.c | 42 ++++++++++++++++++++++++++++++++++ 2 files changed, 44 insertions(+) diff --git a/drivers/ufs/core/ufshcd-priv.h b/drivers/ufs/core/ufshcd-priv.h index 4c008230e8d6..45904e5746b2 100644 --- a/drivers/ufs/core/ufshcd-priv.h +++ b/drivers/ufs/core/ufshcd-priv.h @@ -78,6 +78,8 @@ int ufshcd_mcq_sq_cleanup(struct ufs_hba *hba, int task_tag); int ufshcd_mcq_abort(struct scsi_cmnd *cmd); int ufshcd_try_to_abort_task(struct ufs_hba *hba, int tag); void ufshcd_release_scsi_cmd(struct ufs_hba *hba, struct scsi_cmnd *cmd); +int ufshcd_pause_command_processing(struct ufs_hba *hba, u64 timeout_us); +void ufshcd_resume_command_processing(struct ufs_hba *hba); /** * enum ufs_descr_fmt - UFS string descriptor format diff --git a/drivers/ufs/core/ufshcd.c b/drivers/ufs/core/ufshcd.c index d78723dea951..39e6d12e347a 100644 --- a/drivers/ufs/core/ufshcd.c +++ b/drivers/ufs/core/ufshcd.c @@ -1363,6 +1363,48 @@ static int ufshcd_wait_for_pending_cmds(struct ufs_hba *hba, return ret; } +/** + * ufshcd_pause_command_processing - Pause command processing + * @hba: per-adapter instance + * @timeout_us: timeout in microseconds to wait for pending commands to finish + * + * This function stops new command submissions and waits for existing commands + * to complete. + * + * Return: 0 on success, %-EBUSY if commands did not finish within @timeout_us. + * On failure, all acquired locks are released and the tagset is unquiesced. + */ +int ufshcd_pause_command_processing(struct ufs_hba *hba, u64 timeout_us) +{ + int ret = 0; + + mutex_lock(&hba->host->scan_mutex); + blk_mq_quiesce_tagset(&hba->host->tag_set); + down_write(&hba->clk_scaling_lock); + + if (ufshcd_wait_for_pending_cmds(hba, timeout_us)) { + ret = -EBUSY; + up_write(&hba->clk_scaling_lock); + blk_mq_unquiesce_tagset(&hba->host->tag_set); + mutex_unlock(&hba->host->scan_mutex); + } + + return ret; +} + +/** + * ufshcd_resume_command_processing - Resume command processing + * @hba: per-adapter instance + * + * This function resumes command submissions. + */ +void ufshcd_resume_command_processing(struct ufs_hba *hba) +{ + up_write(&hba->clk_scaling_lock); + blk_mq_unquiesce_tagset(&hba->host->tag_set); + mutex_unlock(&hba->host->scan_mutex); +} + /** * ufshcd_scale_gear - scale up/down UFS gear * @hba: per adapter instance From adbabdcf0db0f929e642f95d7528dce0f6bd3a11 Mon Sep 17 00:00:00 2001 From: Can Guo Date: Wed, 25 Mar 2026 08:21:49 -0700 Subject: [PATCH 07/12] scsi: ufs: core: Add support to retrain TX Equalization via debugfs Drastic environmental changes, such as significant temperature shifts, can impact link signal integrity. In such cases, retraining TX Equalization is necessary to compensate for these environmental changes. Add a debugfs entry, 'tx_eq_ctrl', to allow userspace to manually trigger the TX Equalization training (EQTR) procedure and apply the identified optimal settings on the fly. These entries are created on a per-gear basis for High Speed Gear 4 (HS-G4) and above, as TX EQTR is not supported for lower gears. The 'tx_eq_ctrl' entry currently accepts the 'retrain' command to initiate the procedure. The interface is designed to be scalable to support additional commands in the future. Reading the 'tx_eq_ctrl' entry provides a usage hint to the user, ensuring the interface is self-documenting. The ufshcd's debugfs folder structure will look like below: /sys/kernel/debug/ufshcd/*ufs*/ |--tx_eq_hs_gear1/ | |--device_tx_eq_params | |--host_tx_eq_params |--tx_eq_hs_gear2/ |--tx_eq_hs_gear3/ |--tx_eq_hs_gear4/ |--tx_eq_hs_gear5/ |--tx_eq_hs_gear6/ |--device_tx_eq_params |--device_tx_eqtr_record |--host_tx_eq_params |--host_tx_eqtr_record |--tx_eq_ctrl Reviewed-by: Bean Huo Reviewed-by: Bart Van Assche Signed-off-by: Can Guo Reviewed-by: Peter Wang Link: https://patch.msgid.link/20260325152154.1604082-8-can.guo@oss.qualcomm.com Signed-off-by: Martin K. Petersen --- drivers/ufs/core/ufs-debugfs.c | 61 ++++++++++++++++++ drivers/ufs/core/ufs-txeq.c | 110 +++++++++++++++++++++++++++++++-- drivers/ufs/core/ufshcd-priv.h | 5 +- drivers/ufs/core/ufshcd.c | 7 +-- include/ufs/ufshcd.h | 2 + 5 files changed, 175 insertions(+), 10 deletions(-) diff --git a/drivers/ufs/core/ufs-debugfs.c b/drivers/ufs/core/ufs-debugfs.c index 831758b45163..e3dd81d6fe82 100644 --- a/drivers/ufs/core/ufs-debugfs.c +++ b/drivers/ufs/core/ufs-debugfs.c @@ -401,9 +401,70 @@ static const struct file_operations ufs_tx_eqtr_record_fops = { .release = single_release, }; +static ssize_t ufs_tx_eq_ctrl_write(struct file *file, const char __user *buf, + size_t count, loff_t *ppos) +{ + u32 gear = (u32)(uintptr_t)file->f_inode->i_private; + struct ufs_hba *hba = hba_from_file(file); + char kbuf[32]; + int ret; + + if (count >= sizeof(kbuf)) + return -EINVAL; + + if (copy_from_user(kbuf, buf, count)) + return -EFAULT; + + if (!ufshcd_is_tx_eq_supported(hba)) + return -EOPNOTSUPP; + + if (hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL || + !hba->max_pwr_info.is_valid) + return -EBUSY; + + if (!hba->ufs_device_wlun) + return -ENODEV; + + kbuf[count] = '\0'; + + if (sysfs_streq(kbuf, "retrain")) { + ret = ufs_debugfs_get_user_access(hba); + if (ret) + return ret; + ret = ufshcd_retrain_tx_eq(hba, gear); + ufs_debugfs_put_user_access(hba); + } else { + /* Unknown operation */ + return -EINVAL; + } + + return ret ? ret : count; +} + +static int ufs_tx_eq_ctrl_show(struct seq_file *s, void *data) +{ + seq_puts(s, "write 'retrain' to retrain TX Equalization settings\n"); + return 0; +} + +static int ufs_tx_eq_ctrl_open(struct inode *inode, struct file *file) +{ + return single_open(file, ufs_tx_eq_ctrl_show, inode->i_private); +} + +static const struct file_operations ufs_tx_eq_ctrl_fops = { + .owner = THIS_MODULE, + .open = ufs_tx_eq_ctrl_open, + .read = seq_read, + .llseek = seq_lseek, + .write = ufs_tx_eq_ctrl_write, + .release = single_release, +}; + static const struct ufs_debugfs_attr ufs_tx_eqtr_attrs[] = { { "host_tx_eqtr_record", 0400, &ufs_tx_eqtr_record_fops }, { "device_tx_eqtr_record", 0400, &ufs_tx_eqtr_record_fops }, + { "tx_eq_ctrl", 0600, &ufs_tx_eq_ctrl_fops }, { } }; diff --git a/drivers/ufs/core/ufs-txeq.c b/drivers/ufs/core/ufs-txeq.c index b68a7af78290..3a879c644faa 100644 --- a/drivers/ufs/core/ufs-txeq.c +++ b/drivers/ufs/core/ufs-txeq.c @@ -628,9 +628,15 @@ static int ufshcd_setup_tx_eqtr_adapt_length(struct ufs_hba *hba, struct ufshcd_tx_eq_params *params, u32 gear) { + struct ufshcd_tx_eqtr_record *rec = params->eqtr_record; u32 adapt_eqtr; int ret; + if (rec && rec->saved_adapt_eqtr) { + adapt_eqtr = rec->saved_adapt_eqtr; + goto set_adapt_eqtr; + } + if (gear == UFS_HS_G4 || gear == UFS_HS_G5) { u64 t_adapt, t_adapt_local, t_adapt_peer; u32 adapt_cap_local, adapt_cap_peer, adapt_length; @@ -782,6 +788,10 @@ static int ufshcd_setup_tx_eqtr_adapt_length(struct ufs_hba *hba, return -EINVAL; } + if (rec) + rec->saved_adapt_eqtr = (u16)adapt_eqtr; + +set_adapt_eqtr: ret = ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXADAPTLENGTH_EQTR), adapt_eqtr); if (ret) dev_err(hba->dev, "Failed to set adapt length for TX EQTR: %d\n", ret); @@ -847,16 +857,33 @@ static int ufshcd_apply_tx_eqtr_settings(struct ufs_hba *hba, /** * ufshcd_update_tx_eq_params - Update TX Equalization params * @params: TX EQ parameters data structure + * @pwr_mode: target power mode containing gear and rate * @eqtr_data: TX EQTR data structure * - * Update TX Equalization params using results from TX EQTR data. + * Update TX Equalization params using results from TX EQTR data. Check also + * the TX EQTR FOM value for each TX lane in the TX EQTR data. If a TX lane got + * a FOM value of 0, restore the TX Equalization settings from the last known + * valid TX Equalization params for that specific TX lane. */ static inline void ufshcd_update_tx_eq_params(struct ufshcd_tx_eq_params *params, + struct ufs_pa_layer_attr *pwr_mode, struct ufshcd_tx_eqtr_data *eqtr_data) { struct ufshcd_tx_eqtr_record *rec = params->eqtr_record; + if (params->is_valid) { + int lane; + + for (lane = 0; lane < pwr_mode->lane_tx; lane++) + if (eqtr_data->host[lane].fom_val == 0) + eqtr_data->host[lane] = params->host[lane]; + + for (lane = 0; lane < pwr_mode->lane_rx; lane++) + if (eqtr_data->device[lane].fom_val == 0) + eqtr_data->device[lane] = params->device[lane]; + } + memcpy(params->host, eqtr_data->host, sizeof(params->host)); memcpy(params->device, eqtr_data->device, sizeof(params->device)); @@ -955,7 +982,7 @@ static int __ufshcd_tx_eqtr(struct ufs_hba *hba, dev_info(hba->dev, "TX EQTR procedure completed! Time elapsed: %llu ms\n", ktime_to_ms(ktime_sub(ktime_get(), start))); - ufshcd_update_tx_eq_params(params, eqtr_data); + ufshcd_update_tx_eq_params(params, pwr_mode, eqtr_data); return ret; } @@ -1079,6 +1106,7 @@ static int ufshcd_tx_eqtr(struct ufs_hba *hba, * ufshcd_config_tx_eq_settings - Configure TX Equalization settings * @hba: per adapter instance * @pwr_mode: target power mode containing gear and rate information + * @force_tx_eqtr: execute the TX EQTR procedure * * This function finds and sets the TX Equalization settings for the given * target power mode. @@ -1086,7 +1114,8 @@ static int ufshcd_tx_eqtr(struct ufs_hba *hba, * Returns 0 on success, error code otherwise */ int ufshcd_config_tx_eq_settings(struct ufs_hba *hba, - struct ufs_pa_layer_attr *pwr_mode) + struct ufs_pa_layer_attr *pwr_mode, + bool force_tx_eqtr) { struct ufshcd_tx_eq_params *params; u32 gear, rate; @@ -1123,7 +1152,7 @@ int ufshcd_config_tx_eq_settings(struct ufs_hba *hba, } params = &hba->tx_eq_params[gear - 1]; - if (!params->is_valid) { + if (!params->is_valid || force_tx_eqtr) { int ret; ret = ufshcd_tx_eqtr(hba, params, pwr_mode); @@ -1189,3 +1218,76 @@ void ufshcd_apply_valid_tx_eq_settings(struct ufs_hba *hba) } } } + +/** + * ufshcd_retrain_tx_eq - Retrain TX Equalization and apply new settings + * @hba: per-adapter instance + * @gear: target High-Speed (HS) gear for retraining + * + * This function initiates a refresh of the TX Equalization settings for a + * specific HS gear. It scales the clocks to maximum frequency, negotiates the + * power mode with the device, retrains TX EQ and applies new TX EQ settings + * by conducting a Power Mode change. + * + * Returns 0 on success, non-zero error code otherwise + */ +int ufshcd_retrain_tx_eq(struct ufs_hba *hba, u32 gear) +{ + struct ufs_pa_layer_attr new_pwr_info, final_params = {}; + int ret; + + if (!ufshcd_is_tx_eq_supported(hba) || !use_adaptive_txeq) + return -EOPNOTSUPP; + + if (gear < adaptive_txeq_gear) + return -ERANGE; + + ufshcd_hold(hba); + + ret = ufshcd_pause_command_processing(hba, 1 * USEC_PER_SEC); + if (ret) { + ufshcd_release(hba); + return ret; + } + + /* scale up clocks to max frequency before TX EQTR */ + if (ufshcd_is_clkscaling_supported(hba)) + ufshcd_scale_clks(hba, ULONG_MAX, true); + + new_pwr_info = hba->pwr_info; + new_pwr_info.gear_tx = gear; + new_pwr_info.gear_rx = gear; + + ret = ufshcd_vops_negotiate_pwr_mode(hba, &new_pwr_info, &final_params); + if (ret) + memcpy(&final_params, &new_pwr_info, sizeof(final_params)); + + if (final_params.gear_tx != gear) { + dev_err(hba->dev, "Negotiated Gear (%u) does not match target Gear (%u)\n", + final_params.gear_tx, gear); + ret = -EINVAL; + goto out; + } + + ret = ufshcd_config_tx_eq_settings(hba, &final_params, true); + if (ret) { + dev_err(hba->dev, "Failed to config TX Equalization for HS-G%u, Rate-%s: %d\n", + final_params.gear_tx, + ufs_hs_rate_to_str(final_params.hs_rate), ret); + goto out; + } + + /* Change Power Mode to apply the new TX EQ settings */ + ret = ufshcd_change_power_mode(hba, &final_params, + UFSHCD_PMC_POLICY_FORCE); + if (ret) + dev_err(hba->dev, "%s: Failed to change Power Mode to HS-G%u, Rate-%s: %d\n", + __func__, final_params.gear_tx, + ufs_hs_rate_to_str(final_params.hs_rate), ret); + +out: + ufshcd_resume_command_processing(hba); + ufshcd_release(hba); + + return ret; +} diff --git a/drivers/ufs/core/ufshcd-priv.h b/drivers/ufs/core/ufshcd-priv.h index 45904e5746b2..d296f00c099d 100644 --- a/drivers/ufs/core/ufshcd-priv.h +++ b/drivers/ufs/core/ufshcd-priv.h @@ -80,6 +80,7 @@ int ufshcd_try_to_abort_task(struct ufs_hba *hba, int tag); void ufshcd_release_scsi_cmd(struct ufs_hba *hba, struct scsi_cmnd *cmd); int ufshcd_pause_command_processing(struct ufs_hba *hba, u64 timeout_us); void ufshcd_resume_command_processing(struct ufs_hba *hba); +int ufshcd_scale_clks(struct ufs_hba *hba, unsigned long freq, bool scale_up); /** * enum ufs_descr_fmt - UFS string descriptor format @@ -108,10 +109,12 @@ int ufshcd_read_device_lvl_exception_id(struct ufs_hba *hba, u64 *exception_id); int ufshcd_uic_tx_eqtr(struct ufs_hba *hba, int gear); void ufshcd_apply_valid_tx_eq_settings(struct ufs_hba *hba); int ufshcd_config_tx_eq_settings(struct ufs_hba *hba, - struct ufs_pa_layer_attr *pwr_mode); + struct ufs_pa_layer_attr *pwr_mode, + bool force_tx_eqtr); void ufshcd_print_tx_eq_params(struct ufs_hba *hba); bool ufshcd_is_txeq_presets_used(struct ufs_hba *hba); bool ufshcd_is_txeq_preset_selected(u8 preshoot, u8 deemphasis); +int ufshcd_retrain_tx_eq(struct ufs_hba *hba, u32 gear); /* Wrapper functions for safely calling variant operations */ static inline const char *ufshcd_get_var_name(struct ufs_hba *hba) diff --git a/drivers/ufs/core/ufshcd.c b/drivers/ufs/core/ufshcd.c index 39e6d12e347a..2e8255b3d883 100644 --- a/drivers/ufs/core/ufshcd.c +++ b/drivers/ufs/core/ufshcd.c @@ -333,8 +333,6 @@ static inline void ufshcd_add_delay_before_dme_cmd(struct ufs_hba *hba); static int ufshcd_host_reset_and_restore(struct ufs_hba *hba); static void ufshcd_resume_clkscaling(struct ufs_hba *hba); static void ufshcd_suspend_clkscaling(struct ufs_hba *hba); -static int ufshcd_scale_clks(struct ufs_hba *hba, unsigned long freq, - bool scale_up); static irqreturn_t ufshcd_intr(int irq, void *__hba); static int ufshcd_setup_hba_vreg(struct ufs_hba *hba, bool on); static int ufshcd_setup_vreg(struct ufs_hba *hba, bool on); @@ -1209,8 +1207,7 @@ static int ufshcd_opp_set_rate(struct ufs_hba *hba, unsigned long freq) * * Return: 0 if successful; < 0 upon failure. */ -static int ufshcd_scale_clks(struct ufs_hba *hba, unsigned long freq, - bool scale_up) +int ufshcd_scale_clks(struct ufs_hba *hba, unsigned long freq, bool scale_up) { int ret = 0; ktime_t start = ktime_get(); @@ -4893,7 +4890,7 @@ int ufshcd_config_pwr_mode(struct ufs_hba *hba, memcpy(&final_params, desired_pwr_mode, sizeof(final_params)); } - ret = ufshcd_config_tx_eq_settings(hba, &final_params); + ret = ufshcd_config_tx_eq_settings(hba, &final_params, false); if (ret) dev_warn(hba->dev, "Failed to configure TX Equalization for HS-G%u, Rate-%s: %d\n", final_params.gear_tx, diff --git a/include/ufs/ufshcd.h b/include/ufs/ufshcd.h index 35b1288327d0..bc9e48e89db4 100644 --- a/include/ufs/ufshcd.h +++ b/include/ufs/ufshcd.h @@ -341,12 +341,14 @@ struct ufshcd_tx_eqtr_data { * @device_fom: Device TX EQTR FOM record * @last_record_ts: Timestamp of the most recent TX EQTR record * @last_record_index: Index of the most recent TX EQTR record + * @saved_adapt_eqtr: Saved Adaptation length setting for TX EQTR */ struct ufshcd_tx_eqtr_record { u8 host_fom[UFS_MAX_LANES][TX_HS_NUM_PRESHOOT][TX_HS_NUM_DEEMPHASIS]; u8 device_fom[UFS_MAX_LANES][TX_HS_NUM_PRESHOOT][TX_HS_NUM_DEEMPHASIS]; ktime_t last_record_ts; u16 last_record_index; + u16 saved_adapt_eqtr; }; /** From 53c94067efa252e20f813c6eb714dc1cddf06aaa Mon Sep 17 00:00:00 2001 From: Can Guo Date: Wed, 25 Mar 2026 08:21:50 -0700 Subject: [PATCH 08/12] scsi: ufs: ufs-qcom: Fixup PAM-4 TX L0_L1_L2_L3 adaptation pattern length If HS-G6 Power Mode change handshake is successful and outbound data Lanes are expected to transmit ADAPT, M-TX Lanes shall be configured as if (Adapt Type == REFRESH) TX_HS_ADAPT_LENGTH_L0_L1_L2_L3 = PA_PeerRxHsG6AdaptRefreshL0L1L2L3. else if (Adapt Type == INITIAL) TX_HS_ADAPT_LENGTH_L0_L1_L2_L3 = PA_PeerRxHsG6AdaptInitialL0L1L2L3. On some platforms, the ADAPT_L0_L1_L2_L3 duration on Host TX Lanes is only a half of theoretical ADAPT_L0_L1_L2_L3 duration TADAPT_L0_L1_L2_L3 (in PAM-4 UI) calculated from TX_HS_ADAPT_LENGTH_L0_L1_L2_L3. For such platforms, the workaround is to double the ADAPT_L0_L1_L2_L3 duration by uplifting TX_HS_ADAPT_LENGTH_L0_L1_L2_L3. UniPro initializes TX_HS_ADAPT_LENGTH_L0_L1_L2_L3 during HS-G6 Power Mode change handshake, it would be too late for SW to update TX_HS_ADAPT_LENGTH_L0_L1_L2_L3 post HS-G6 Power Mode change. Update PA_PeerRxHsG6AdaptRefreshL0L1L2L3 and PA_PeerRxHsG6AdaptInitialL0L1L2L3 post Link Startup and before HS-G6 Power Mode change, so that the UniPro would use the updated value during HS-G6 Power Mode change handshake. Reviewed-by: Bean Huo Reviewed-by: Bart Van Assche Signed-off-by: Can Guo Reviewed-by: Peter Wang Link: https://patch.msgid.link/20260325152154.1604082-9-can.guo@oss.qualcomm.com Signed-off-by: Martin K. Petersen --- drivers/ufs/host/ufs-qcom.c | 178 ++++++++++++++++++++++++++++++++++++ 1 file changed, 178 insertions(+) diff --git a/drivers/ufs/host/ufs-qcom.c b/drivers/ufs/host/ufs-qcom.c index cdc769886e82..b94fe93b830e 100644 --- a/drivers/ufs/host/ufs-qcom.c +++ b/drivers/ufs/host/ufs-qcom.c @@ -1069,10 +1069,188 @@ static void ufs_qcom_override_pa_tx_hsg1_sync_len(struct ufs_hba *hba) dev_err(hba->dev, "Failed (%d) set PA_TX_HSG1_SYNC_LENGTH\n", err); } +/** + * ufs_qcom_double_t_adapt_l0l1l2l3 - Create a new adapt that doubles the + * adaptation duration TADAPT_L0_L1_L2_L3 derived from the old adapt. + * + * @old_adapt: Original ADAPT_L0_L1_L2_L3 capability + * + * ADAPT_length_L0_L1_L2_L3 formula from M-PHY spec: + * if (ADAPT_range_L0_L1_L2_L3 == COARSE) { + * ADAPT_length_L0_L1_L2_L3 = [0, 12] + * ADAPT_L0_L1_L2_L3 = 215 x 2^ADAPT_length_L0_L1_L2_L3 + * } else if (ADAPT_range_L0_L1_L2_L3 == FINE) { + * ADAPT_length_L0_L1_L2_L3 = [0, 127] + * TADAPT_L0_L1_L2_L3 = 215 x (ADAPT_length_L0_L1_L2_L3 + 1) + * } + * + * To double the adaptation duration TADAPT_L0_L1_L2_L3: + * 1. If adapt range is COARSE (1'b1), new adapt = old adapt + 1. + * 2. If adapt range is FINE (1'b0): + * a) If old adapt length is < 64, (new adapt + 1) = 2 * (old adapt + 1). + * b) If old adapt length is >= 64, set new adapt to 0x88 using COARSE + * range, because new adapt get from equation in a) shall exceed 127. + * + * Examples: + * ADAPT_range_L0_L1_L2_L3 | ADAPT_length_L0_L1_L2_L3 | TADAPT_L0_L1_L2_L3 (PAM-4 UI) + * 0 3 131072 + * 0 7 262144 + * 0 63 2097152 + * 0 64 2129920 + * 0 127 4194304 + * 1 8 8388608 + * 1 9 16777216 + * 1 10 33554432 + * 1 11 67108864 + * 1 12 134217728 + * + * Return: new adapt. + */ +static u32 ufs_qcom_double_t_adapt_l0l1l2l3(u32 old_adapt) +{ + u32 adapt_length = old_adapt & ADAPT_LENGTH_MASK; + u32 new_adapt; + + if (IS_ADAPT_RANGE_COARSE(old_adapt)) { + new_adapt = (adapt_length + 1) | ADAPT_RANGE_BIT; + } else { + if (adapt_length < 64) + new_adapt = (adapt_length << 1) + 1; + else + /* + * 0x88 is the very coarse Adapt value which is two + * times of the largest fine Adapt value (0x7F) + */ + new_adapt = 0x88; + } + + return new_adapt; +} + +static void ufs_qcom_limit_max_gear(struct ufs_hba *hba, + enum ufs_hs_gear_tag gear) +{ + struct ufs_qcom_host *host = ufshcd_get_variant(hba); + struct ufs_pa_layer_attr *pwr_info = &hba->max_pwr_info.info; + struct ufs_host_params *host_params = &host->host_params; + + host_params->hs_tx_gear = gear; + host_params->hs_rx_gear = gear; + pwr_info->gear_tx = gear; + pwr_info->gear_rx = gear; + + dev_warn(hba->dev, "Limited max gear of host and device to HS-G%d\n", gear); +} + +static void ufs_qcom_fixup_tx_adapt_l0l1l2l3(struct ufs_hba *hba) +{ + struct ufs_qcom_host *host = ufshcd_get_variant(hba); + struct ufs_pa_layer_attr *pwr_info = &hba->max_pwr_info.info; + struct ufs_host_params *host_params = &host->host_params; + u32 old_adapt, new_adapt, actual_adapt; + bool limit_speed = false; + int err; + + if (host->hw_ver.major != 0x7 || host->hw_ver.minor > 0x1 || + host_params->hs_tx_gear <= UFS_HS_G5 || + pwr_info->gear_tx <= UFS_HS_G5) + return; + + err = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_PEERRXHSG6ADAPTINITIALL0L1L2L3), &old_adapt); + if (err) + goto out; + + if (old_adapt > ADAPT_L0L1L2L3_LENGTH_MAX) { + dev_err(hba->dev, "PA_PeerRxHsG6AdaptInitialL0L1L2L3 value (0x%x) exceeds MAX\n", + old_adapt); + err = -ERANGE; + goto out; + } + + new_adapt = ufs_qcom_double_t_adapt_l0l1l2l3(old_adapt); + dev_dbg(hba->dev, "Original PA_PeerRxHsG6AdaptInitialL0L1L2L3 = 0x%x, new value = 0x%x\n", + old_adapt, new_adapt); + + /* + * 0x8C is the max possible value allowed by UniPro v3.0 spec, some HWs + * can accept 0x8D but some cannot. + */ + if (new_adapt <= ADAPT_L0L1L2L3_LENGTH_MAX || + (new_adapt == ADAPT_L0L1L2L3_LENGTH_MAX + 1 && host->hw_ver.minor == 0x1)) { + err = ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PEERRXHSG6ADAPTINITIALL0L1L2L3), + new_adapt); + if (err) + goto out; + + err = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_PEERRXHSG6ADAPTINITIALL0L1L2L3), + &actual_adapt); + if (err) + goto out; + + if (actual_adapt != new_adapt) { + limit_speed = true; + dev_warn(hba->dev, "PA_PeerRxHsG6AdaptInitialL0L1L2L3 0x%x, expect 0x%x\n", + actual_adapt, new_adapt); + } + } else { + limit_speed = true; + dev_warn(hba->dev, "New PA_PeerRxHsG6AdaptInitialL0L1L2L3 (0x%x) is too large!\n", + new_adapt); + } + + err = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_PEERRXHSG6ADAPTREFRESHL0L1L2L3), &old_adapt); + if (err) + goto out; + + if (old_adapt > ADAPT_L0L1L2L3_LENGTH_MAX) { + dev_err(hba->dev, "PA_PeerRxHsG6AdaptRefreshL0L1L2L3 value (0x%x) exceeds MAX\n", + old_adapt); + err = -ERANGE; + goto out; + } + + new_adapt = ufs_qcom_double_t_adapt_l0l1l2l3(old_adapt); + dev_dbg(hba->dev, "Original PA_PeerRxHsG6AdaptRefreshL0L1L2L3 = 0x%x, new value = 0x%x\n", + old_adapt, new_adapt); + + /* + * 0x8C is the max possible value allowed by UniPro v3.0 spec, some HWs + * can accept 0x8D but some cannot. + */ + if (new_adapt <= ADAPT_L0L1L2L3_LENGTH_MAX || + (new_adapt == ADAPT_L0L1L2L3_LENGTH_MAX + 1 && host->hw_ver.minor == 0x1)) { + err = ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PEERRXHSG6ADAPTREFRESHL0L1L2L3), + new_adapt); + if (err) + goto out; + + err = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_PEERRXHSG6ADAPTREFRESHL0L1L2L3), + &actual_adapt); + if (err) + goto out; + + if (actual_adapt != new_adapt) { + limit_speed = true; + dev_warn(hba->dev, "PA_PeerRxHsG6AdaptRefreshL0L1L2L3 0x%x, expect 0x%x\n", + new_adapt, actual_adapt); + } + } else { + limit_speed = true; + dev_warn(hba->dev, "New PA_PeerRxHsG6AdaptRefreshL0L1L2L3 (0x%x) is too large!\n", + new_adapt); + } + +out: + if (limit_speed || err) + ufs_qcom_limit_max_gear(hba, UFS_HS_G5); +} + static int ufs_qcom_apply_dev_quirks(struct ufs_hba *hba) { int err = 0; + ufs_qcom_fixup_tx_adapt_l0l1l2l3(hba); + if (hba->dev_quirks & UFS_DEVICE_QUIRK_HOST_PA_SAVECONFIGTIME) err = ufs_qcom_quirk_host_pa_saveconfigtime(hba); From 385b95893e799885ec54a4ec2e240b1d814205be Mon Sep 17 00:00:00 2001 From: Can Guo Date: Wed, 25 Mar 2026 08:21:51 -0700 Subject: [PATCH 09/12] scsi: ufs: ufs-qcom: Implement vops tx_eqtr_notify() On some platforms, HW does not support triggering TX EQTR from the most reliable High-Speed (HS) Gear (HS Gear1), but only allows to trigger TX EQTR for the target HS Gear from the same HS Gear. To work around the HW limitation, implement vops tx_eqtr_notify() to change Power Mode to the target TX EQTR HS Gear prior to TX EQTR procedure and change Power Mode back to HS Gear1 (the most reliable gear) post TX EQTR procedure. Reviewed-by: Bean Huo Reviewed-by: Bart Van Assche Signed-off-by: Can Guo Reviewed-by: Peter Wang Link: https://patch.msgid.link/20260325152154.1604082-10-can.guo@oss.qualcomm.com Signed-off-by: Martin K. Petersen --- drivers/ufs/host/ufs-qcom.c | 41 +++++++++++++++++++++++++++++++++++++ 1 file changed, 41 insertions(+) diff --git a/drivers/ufs/host/ufs-qcom.c b/drivers/ufs/host/ufs-qcom.c index b94fe93b830e..eac5e95e740b 100644 --- a/drivers/ufs/host/ufs-qcom.c +++ b/drivers/ufs/host/ufs-qcom.c @@ -2505,6 +2505,46 @@ static u32 ufs_qcom_freq_to_gear_speed(struct ufs_hba *hba, unsigned long freq) return min_t(u32, gear, hba->max_pwr_info.info.gear_rx); } +static int ufs_qcom_tx_eqtr_notify(struct ufs_hba *hba, + enum ufs_notify_change_status status, + struct ufs_pa_layer_attr *pwr_mode) +{ + struct ufs_qcom_host *host = ufshcd_get_variant(hba); + struct ufs_pa_layer_attr pwr_mode_hs_g1 = { + .gear_rx = UFS_HS_G1, + .gear_tx = UFS_HS_G1, + .lane_rx = pwr_mode->lane_rx, + .lane_tx = pwr_mode->lane_tx, + .pwr_rx = FAST_MODE, + .pwr_tx = FAST_MODE, + .hs_rate = pwr_mode->hs_rate, + }; + u32 gear = pwr_mode->gear_tx; + u32 rate = pwr_mode->hs_rate; + int ret; + + if (host->hw_ver.major != 0x7 || host->hw_ver.minor > 0x1) + return 0; + + if (status == PRE_CHANGE) { + /* PMC to target HS Gear. */ + ret = ufshcd_change_power_mode(hba, pwr_mode, + UFSHCD_PMC_POLICY_DONT_FORCE); + if (ret) + dev_err(hba->dev, "%s: Failed to PMC to target HS-G%u, Rate-%s: %d\n", + __func__, gear, ufs_hs_rate_to_str(rate), ret); + } else { + /* PMC back to HS-G1. */ + ret = ufshcd_change_power_mode(hba, &pwr_mode_hs_g1, + UFSHCD_PMC_POLICY_DONT_FORCE); + if (ret) + dev_err(hba->dev, "%s: Failed to PMC to HS-G1, Rate-%s: %d\n", + __func__, ufs_hs_rate_to_str(rate), ret); + } + + return ret; +} + /* * struct ufs_hba_qcom_vops - UFS QCOM specific variant operations * @@ -2535,6 +2575,7 @@ static const struct ufs_hba_variant_ops ufs_hba_qcom_vops = { .get_outstanding_cqs = ufs_qcom_get_outstanding_cqs, .config_esi = ufs_qcom_config_esi, .freq_to_gear_speed = ufs_qcom_freq_to_gear_speed, + .tx_eqtr_notify = ufs_qcom_tx_eqtr_notify, }; static const struct ufs_hba_variant_ops ufs_hba_qcom_sa8255p_vops = { From 26605db7604deb18cf004cf3ad51e72e5d9b7add Mon Sep 17 00:00:00 2001 From: Can Guo Date: Wed, 25 Mar 2026 08:21:52 -0700 Subject: [PATCH 10/12] scsi: ufs: ufs-qcom: Implement vops get_rx_fom() On some platforms, host's M-PHY RX_FOM Attribute always reads 0, meaning SW cannot rely on Figure of Merit (FOM) to identify the optimal TX Equalization settings for device's TX Lanes. Implement the vops ufs_qcom_get_rx_fom() such that SW can utilize the UFS Eye Opening Monitor (EOM) to evaluate the TX Equalization settings for device's TX Lanes. Reviewed-by: Bean Huo Reviewed-by: Bart Van Assche Signed-off-by: Can Guo Reviewed-by: Peter Wang Link: https://patch.msgid.link/20260325152154.1604082-11-can.guo@oss.qualcomm.com Signed-off-by: Martin K. Petersen --- drivers/ufs/core/ufs-txeq.c | 6 +- drivers/ufs/host/ufs-qcom.c | 312 ++++++++++++++++++++++++++++++++++++ drivers/ufs/host/ufs-qcom.h | 40 +++++ include/ufs/ufshcd.h | 3 + include/ufs/unipro.h | 25 +++ 5 files changed, 383 insertions(+), 3 deletions(-) diff --git a/drivers/ufs/core/ufs-txeq.c b/drivers/ufs/core/ufs-txeq.c index 3a879c644faa..b2dc89124353 100644 --- a/drivers/ufs/core/ufs-txeq.c +++ b/drivers/ufs/core/ufs-txeq.c @@ -232,9 +232,8 @@ ufshcd_compose_tx_eq_setting(struct ufshcd_tx_eq_settings *settings, * * Returns 0 on success, negative error code otherwise */ -static int ufshcd_apply_tx_eq_settings(struct ufs_hba *hba, - struct ufshcd_tx_eq_params *params, - u32 gear) +int ufshcd_apply_tx_eq_settings(struct ufs_hba *hba, + struct ufshcd_tx_eq_params *params, u32 gear) { struct ufs_pa_layer_attr *pwr_info = &hba->max_pwr_info.info; u32 setting; @@ -263,6 +262,7 @@ static int ufshcd_apply_tx_eq_settings(struct ufs_hba *hba, return 0; } +EXPORT_SYMBOL_GPL(ufshcd_apply_tx_eq_settings); /** * ufshcd_evaluate_tx_eqtr_fom - Evaluate TX EQTR FOM results diff --git a/drivers/ufs/host/ufs-qcom.c b/drivers/ufs/host/ufs-qcom.c index eac5e95e740b..a0314cb55c7f 100644 --- a/drivers/ufs/host/ufs-qcom.c +++ b/drivers/ufs/host/ufs-qcom.c @@ -2505,6 +2505,317 @@ static u32 ufs_qcom_freq_to_gear_speed(struct ufs_hba *hba, unsigned long freq) return min_t(u32, gear, hba->max_pwr_info.info.gear_rx); } +static int ufs_qcom_host_eom_config(struct ufs_hba *hba, int lane, + const struct ufs_eom_coord *eom_coord, + u32 target_test_count) +{ + enum ufs_eom_eye_mask eye_mask = eom_coord->eye_mask; + int v_step = eom_coord->v_step; + int t_step = eom_coord->t_step; + u32 volt_step, timing_step; + int ret; + + if (abs(v_step) > UFS_QCOM_EOM_VOLTAGE_STEPS_MAX) { + dev_err(hba->dev, "Invalid EOM Voltage Step: %d\n", v_step); + return -ERANGE; + } + + if (abs(t_step) > UFS_QCOM_EOM_TIMING_STEPS_MAX) { + dev_err(hba->dev, "Invalid EOM Timing Step: %d\n", t_step); + return -ERANGE; + } + + if (v_step < 0) + volt_step = RX_EYEMON_NEGATIVE_STEP_BIT | (u32)(-v_step); + else + volt_step = (u32)v_step; + + if (t_step < 0) + timing_step = RX_EYEMON_NEGATIVE_STEP_BIT | (u32)(-t_step); + else + timing_step = (u32)t_step; + + ret = ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(RX_EYEMON_ENABLE, + UIC_ARG_MPHY_RX_GEN_SEL_INDEX(lane)), + BIT(eye_mask) | RX_EYEMON_EXTENDED_VRANGE_BIT); + if (ret) { + dev_err(hba->dev, "Failed to enable Host EOM on Lane %d: %d\n", + lane, ret); + return ret; + } + + ret = ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(RX_EYEMON_TIMING_STEPS, + UIC_ARG_MPHY_RX_GEN_SEL_INDEX(lane)), + timing_step); + if (ret) { + dev_err(hba->dev, "Failed to set Host EOM timing step on Lane %d: %d\n", + lane, ret); + return ret; + } + + ret = ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(RX_EYEMON_VOLTAGE_STEPS, + UIC_ARG_MPHY_RX_GEN_SEL_INDEX(lane)), + volt_step); + if (ret) { + dev_err(hba->dev, "Failed to set Host EOM voltage step on Lane %d: %d\n", + lane, ret); + return ret; + } + + ret = ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(RX_EYEMON_TARGET_TEST_COUNT, + UIC_ARG_MPHY_RX_GEN_SEL_INDEX(lane)), + target_test_count); + if (ret) + dev_err(hba->dev, "Failed to set Host EOM target test count on Lane %d: %d\n", + lane, ret); + + return ret; +} + +static int ufs_qcom_host_eom_may_stop(struct ufs_hba *hba, int lane, + u32 target_test_count, u32 *err_count) +{ + u32 start, tested_count, error_count; + int ret; + + ret = ufshcd_dme_get(hba, UIC_ARG_MIB_SEL(RX_EYEMON_START, + UIC_ARG_MPHY_RX_GEN_SEL_INDEX(lane)), + &start); + if (ret) { + dev_err(hba->dev, "Failed to get Host EOM start status on Lane %d: %d\n", + lane, ret); + return ret; + } + + if (start & 0x1) + return -EAGAIN; + + ret = ufshcd_dme_get(hba, UIC_ARG_MIB_SEL(RX_EYEMON_TESTED_COUNT, + UIC_ARG_MPHY_RX_GEN_SEL_INDEX(lane)), + &tested_count); + if (ret) { + dev_err(hba->dev, "Failed to get Host EOM tested count on Lane %d: %d\n", + lane, ret); + return ret; + } + + ret = ufshcd_dme_get(hba, UIC_ARG_MIB_SEL(RX_EYEMON_ERROR_COUNT, + UIC_ARG_MPHY_RX_GEN_SEL_INDEX(lane)), + &error_count); + if (ret) { + dev_err(hba->dev, "Failed to get Host EOM error count on Lane %d: %d\n", + lane, ret); + return ret; + } + + /* EOM can stop */ + if ((tested_count >= target_test_count - 3) || error_count > 0) { + *err_count = error_count; + + /* Disable EOM */ + ret = ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(RX_EYEMON_ENABLE, + UIC_ARG_MPHY_RX_GEN_SEL_INDEX(lane)), + 0x0); + if (ret) { + dev_err(hba->dev, "Failed to disable Host EOM on Lane %d: %d\n", + lane, ret); + return ret; + } + } else { + return -EAGAIN; + } + + return 0; +} + +static int ufs_qcom_host_eom_scan(struct ufs_hba *hba, int num_lanes, + const struct ufs_eom_coord *eom_coord, + u32 target_test_count, u32 *err_count) +{ + bool eom_stopped[PA_MAXDATALANES] = { 0 }; + int lane, ret; + u32 setting; + + if (!err_count || !eom_coord) + return -EINVAL; + + if (target_test_count < UFS_QCOM_EOM_TARGET_TEST_COUNT_MIN) { + dev_err(hba->dev, "Target test count (%u) too small for Host EOM\n", + target_test_count); + return -ERANGE; + } + + for (lane = 0; lane < num_lanes; lane++) { + ret = ufs_qcom_host_eom_config(hba, lane, eom_coord, + target_test_count); + if (ret) { + dev_err(hba->dev, "Failed to config Host RX EOM: %d\n", ret); + return ret; + } + } + + /* + * Trigger a PACP_PWR_req to kick start EOM, but not to really change + * the Power Mode. + */ + ret = ufshcd_uic_change_pwr_mode(hba, FAST_MODE << 4 | FAST_MODE); + if (ret) { + dev_err(hba->dev, "Failed to change power mode to kick start Host EOM: %d\n", + ret); + return ret; + } + +more_burst: + /* Create burst on Host RX Lane. */ + ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_LOCALVERINFO), &setting); + + for (lane = 0; lane < num_lanes; lane++) { + if (eom_stopped[lane]) + continue; + + ret = ufs_qcom_host_eom_may_stop(hba, lane, target_test_count, + &err_count[lane]); + if (!ret) { + eom_stopped[lane] = true; + } else if (ret == -EAGAIN) { + /* Need more burst to excercise EOM */ + goto more_burst; + } else { + dev_err(hba->dev, "Failed to stop Host EOM: %d\n", ret); + return ret; + } + + dev_dbg(hba->dev, "Host RX Lane %d EOM, v_step %d, t_step %d, error count %u\n", + lane, eom_coord->v_step, eom_coord->t_step, + err_count[lane]); + } + + return 0; +} + +static int ufs_qcom_host_sw_rx_fom(struct ufs_hba *hba, int num_lanes, u32 *fom) +{ + const struct ufs_eom_coord *eom_coord = sw_rx_fom_eom_coords_g6; + u32 eom_err_count[PA_MAXDATALANES] = { 0 }; + u32 curr_ahit; + int lane, i, ret; + + if (!fom) + return -EINVAL; + + /* Stop the auto hibernate idle timer */ + curr_ahit = ufshcd_readl(hba, REG_AUTO_HIBERNATE_IDLE_TIMER); + if (curr_ahit) + ufshcd_writel(hba, 0, REG_AUTO_HIBERNATE_IDLE_TIMER); + + ret = ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXHSADAPTTYPE), PA_NO_ADAPT); + if (ret) { + dev_err(hba->dev, "Failed to select NO_ADAPT before starting Host EOM: %d\n", ret); + goto out; + } + + for (i = 0; i < SW_RX_FOM_EOM_COORDS; i++, eom_coord++) { + ret = ufs_qcom_host_eom_scan(hba, num_lanes, eom_coord, + UFS_QCOM_EOM_TARGET_TEST_COUNT_G6, + eom_err_count); + if (ret) { + dev_err(hba->dev, "Failed to run Host EOM scan: %d\n", ret); + break; + } + + for (lane = 0; lane < num_lanes; lane++) { + /* Bad coordinates have no weights */ + if (eom_err_count[lane]) + continue; + fom[lane] += SW_RX_FOM_EOM_COORDS_WEIGHT; + } + } + +out: + /* Restore the auto hibernate idle timer */ + if (curr_ahit) + ufshcd_writel(hba, curr_ahit, REG_AUTO_HIBERNATE_IDLE_TIMER); + + return ret; +} + +static int ufs_qcom_get_rx_fom(struct ufs_hba *hba, + struct ufs_pa_layer_attr *pwr_mode, + struct tx_eqtr_iter *h_iter, + struct tx_eqtr_iter *d_iter) +{ + struct ufshcd_tx_eq_params *params __free(kfree) = + kzalloc(sizeof(*params), GFP_KERNEL); + struct ufs_qcom_host *host = ufshcd_get_variant(hba); + struct ufs_pa_layer_attr old_pwr_info; + u32 fom[PA_MAXDATALANES] = { 0 }; + u32 gear = pwr_mode->gear_tx; + u32 rate = pwr_mode->hs_rate; + int lane, ret; + + if (host->hw_ver.major != 0x7 || host->hw_ver.minor > 0x1 || + gear <= UFS_HS_G5 || !d_iter || !d_iter->is_updated) + return 0; + + if (gear < UFS_HS_G1 || gear > UFS_HS_GEAR_MAX) + return -ERANGE; + + if (!params) + return -ENOMEM; + + memcpy(&old_pwr_info, &hba->pwr_info, sizeof(struct ufs_pa_layer_attr)); + + memcpy(params, &hba->tx_eq_params[gear - 1], sizeof(struct ufshcd_tx_eq_params)); + for (lane = 0; lane < pwr_mode->lane_rx; lane++) { + params->device[lane].preshoot = d_iter->preshoot; + params->device[lane].deemphasis = d_iter->deemphasis; + } + + /* Use TX EQTR settings as Device's TX Equalization settings. */ + ret = ufshcd_apply_tx_eq_settings(hba, params, gear); + if (ret) { + dev_err(hba->dev, "%s: Failed to apply TX EQ settings for HS-G%u: %d\n", + __func__, gear, ret); + return ret; + } + + /* Force PMC to target HS Gear to use new TX Equalization settings. */ + ret = ufshcd_change_power_mode(hba, pwr_mode, UFSHCD_PMC_POLICY_FORCE); + if (ret) { + dev_err(hba->dev, "%s: Failed to change power mode to HS-G%u, Rate-%s: %d\n", + __func__, gear, ufs_hs_rate_to_str(rate), ret); + return ret; + } + + ret = ufs_qcom_host_sw_rx_fom(hba, pwr_mode->lane_rx, fom); + if (ret) { + dev_err(hba->dev, "Failed to get SW FOM of TX (PreShoot: %u, DeEmphasis: %u): %d\n", + d_iter->preshoot, d_iter->deemphasis, ret); + return ret; + } + + /* Restore Device's TX Equalization settings. */ + ret = ufshcd_apply_tx_eq_settings(hba, &hba->tx_eq_params[gear - 1], gear); + if (ret) { + dev_err(hba->dev, "%s: Failed to apply TX EQ settings for HS-G%u: %d\n", + __func__, gear, ret); + return ret; + } + + /* Restore Power Mode. */ + ret = ufshcd_change_power_mode(hba, &old_pwr_info, UFSHCD_PMC_POLICY_FORCE); + if (ret) { + dev_err(hba->dev, "%s: Failed to retore power mode to HS-G%u: %d\n", + __func__, old_pwr_info.gear_tx, ret); + return ret; + } + + for (lane = 0; lane < pwr_mode->lane_rx; lane++) + d_iter->fom[lane] = fom[lane]; + + return 0; +} + static int ufs_qcom_tx_eqtr_notify(struct ufs_hba *hba, enum ufs_notify_change_status status, struct ufs_pa_layer_attr *pwr_mode) @@ -2575,6 +2886,7 @@ static const struct ufs_hba_variant_ops ufs_hba_qcom_vops = { .get_outstanding_cqs = ufs_qcom_get_outstanding_cqs, .config_esi = ufs_qcom_config_esi, .freq_to_gear_speed = ufs_qcom_freq_to_gear_speed, + .get_rx_fom = ufs_qcom_get_rx_fom, .tx_eqtr_notify = ufs_qcom_tx_eqtr_notify, }; diff --git a/drivers/ufs/host/ufs-qcom.h b/drivers/ufs/host/ufs-qcom.h index 1111ab34da01..7183d6b2c8bb 100644 --- a/drivers/ufs/host/ufs-qcom.h +++ b/drivers/ufs/host/ufs-qcom.h @@ -33,6 +33,46 @@ #define DL_VS_CLK_CFG_MASK GENMASK(9, 0) #define DME_VS_CORE_CLK_CTRL_DME_HW_CGC_EN BIT(9) +#define UFS_QCOM_EOM_VOLTAGE_STEPS_MAX 127 +#define UFS_QCOM_EOM_TIMING_STEPS_MAX 63 +#define UFS_QCOM_EOM_TARGET_TEST_COUNT_MIN 8 +#define UFS_QCOM_EOM_TARGET_TEST_COUNT_G6 0x3F + +#define SW_RX_FOM_EOM_COORDS 23 +#define SW_RX_FOM_EOM_COORDS_WEIGHT (127 / SW_RX_FOM_EOM_COORDS) + +struct ufs_eom_coord { + int t_step; + int v_step; + u8 eye_mask; +}; + +static const struct ufs_eom_coord sw_rx_fom_eom_coords_g6[SW_RX_FOM_EOM_COORDS] = { + [0] = { -2, -15, UFS_EOM_EYE_MASK_M }, + [1] = { 0, -15, UFS_EOM_EYE_MASK_M }, + [2] = { 2, -15, UFS_EOM_EYE_MASK_M }, + [3] = { -4, -10, UFS_EOM_EYE_MASK_M }, + [4] = { -2, -10, UFS_EOM_EYE_MASK_M }, + [5] = { 0, -10, UFS_EOM_EYE_MASK_M }, + [6] = { 2, -10, UFS_EOM_EYE_MASK_M }, + [7] = { 4, -10, UFS_EOM_EYE_MASK_M }, + [8] = { -6, 0, UFS_EOM_EYE_MASK_M }, + [9] = { -4, 0, UFS_EOM_EYE_MASK_M }, + [10] = { -2, 0, UFS_EOM_EYE_MASK_M }, + [11] = { 0, 0, UFS_EOM_EYE_MASK_M }, + [12] = { 2, 0, UFS_EOM_EYE_MASK_M }, + [13] = { 4, 0, UFS_EOM_EYE_MASK_M }, + [14] = { 6, 0, UFS_EOM_EYE_MASK_M }, + [15] = { -4, 10, UFS_EOM_EYE_MASK_M }, + [16] = { -2, 10, UFS_EOM_EYE_MASK_M }, + [17] = { 0, 10, UFS_EOM_EYE_MASK_M }, + [18] = { 2, 10, UFS_EOM_EYE_MASK_M }, + [19] = { 4, 10, UFS_EOM_EYE_MASK_M }, + [20] = { -2, 15, UFS_EOM_EYE_MASK_M }, + [21] = { 0, 15, UFS_EOM_EYE_MASK_M }, + [22] = { 2, 15, UFS_EOM_EYE_MASK_M }, +}; + /* Qualcomm MCQ Configuration */ #define UFS_QCOM_MCQCAP_QCFGPTR 224 /* 0xE0 in hex */ #define UFS_QCOM_MCQ_CONFIG_OFFSET (UFS_QCOM_MCQCAP_QCFGPTR * 0x200) /* 0x1C000 */ diff --git a/include/ufs/ufshcd.h b/include/ufs/ufshcd.h index bc9e48e89db4..be15b6247303 100644 --- a/include/ufs/ufshcd.h +++ b/include/ufs/ufshcd.h @@ -1515,6 +1515,9 @@ extern int ufshcd_config_pwr_mode(struct ufs_hba *hba, struct ufs_pa_layer_attr *desired_pwr_mode, enum ufshcd_pmc_policy pmc_policy); extern int ufshcd_uic_change_pwr_mode(struct ufs_hba *hba, u8 mode); +extern int ufshcd_apply_tx_eq_settings(struct ufs_hba *hba, + struct ufshcd_tx_eq_params *params, + u32 gear); /* UIC command interfaces for DME primitives */ #define DME_LOCAL 0 diff --git a/include/ufs/unipro.h b/include/ufs/unipro.h index 4aa592130b4e..f849a2a101ae 100644 --- a/include/ufs/unipro.h +++ b/include/ufs/unipro.h @@ -32,6 +32,8 @@ #define TX_LCC_SEQUENCER 0x0032 #define TX_MIN_ACTIVATETIME 0x0033 #define TX_PWM_G6_G7_SYNC_LENGTH 0x0034 +#define TX_HS_DEEMPHASIS_SETTING 0x0037 +#define TX_HS_PRESHOOT_SETTING 0x003B #define TX_REFCLKFREQ 0x00EB #define TX_CFGCLKFREQVAL 0x00EC #define CFGEXTRATTR 0x00F0 @@ -76,10 +78,27 @@ #define RX_REFCLKFREQ 0x00EB #define RX_CFGCLKFREQVAL 0x00EC #define CFGWIDEINLN 0x00F0 +#define RX_EYEMON_CAP 0x00F1 +#define RX_EYEMON_TIMING_MAX_STEPS_CAP 0x00F2 +#define RX_EYEMON_TIMING_MAX_OFFSET_CAP 0x00F3 +#define RX_EYEMON_VOLTAGE_MAX_STEPS_CAP 0x00F4 +#define RX_EYEMON_VOLTAGE_MAX_OFFSET_CAP 0x00F5 +#define RX_EYEMON_ENABLE 0x00F6 +#define RX_EYEMON_TIMING_STEPS 0x00F7 +#define RX_EYEMON_VOLTAGE_STEPS 0x00F8 +#define RX_EYEMON_TARGET_TEST_COUNT 0x00F9 +#define RX_EYEMON_TESTED_COUNT 0x00FA +#define RX_EYEMON_ERROR_COUNT 0x00FB +#define RX_EYEMON_START 0x00FC +#define RX_EYEMON_EXTENDED_ERROR_COUNT 0x00FD + #define ENARXDIRECTCFG4 0x00F2 #define ENARXDIRECTCFG3 0x00F3 #define ENARXDIRECTCFG2 0x00F4 +#define RX_EYEMON_NEGATIVE_STEP_BIT BIT(6) +#define RX_EYEMON_EXTENDED_VRANGE_BIT BIT(6) + #define is_mphy_tx_attr(attr) (attr < RX_MODE) #define RX_ADV_FINE_GRAN_STEP(x) ((((x) & 0x3) << 1) | 0x1) #define SYNC_LEN_FINE(x) ((x) & 0x3F) @@ -297,6 +316,12 @@ enum ufs_tx_hs_deemphasis { UFS_TX_HS_DEEMPHASIS_DB_7P6, }; +enum ufs_eom_eye_mask { + UFS_EOM_EYE_MASK_M, + UFS_EOM_EYE_MASK_L, + UFS_EOM_EYE_MASK_U, +}; + #define DL_FC0ProtectionTimeOutVal_Default 8191 #define DL_TC0ReplayTimeOutVal_Default 65535 #define DL_AFC0ReqTimeOutVal_Default 32767 From 16cbdc8308776270d76340cd52ac63ac4fbf9968 Mon Sep 17 00:00:00 2001 From: Can Guo Date: Wed, 25 Mar 2026 08:21:53 -0700 Subject: [PATCH 11/12] scsi: ufs: ufs-qcom: Implement vops apply_tx_eqtr_settings() On some platforms, when Host Software triggers TX Equalization Training, HW does not take TX EQTR settings programmed in PA_TxEQTRSetting, instead HW takes TX EQTR settings from PA_TxEQG1Setting. Implement vops apply_tx_eqtr_setting() to work around it by programming TX EQTR settings to PA_TxEQG1Setting during TX EQTR procedure. Reviewed-by: Bean Huo Reviewed-by: Bart Van Assche Signed-off-by: Can Guo Reviewed-by: Peter Wang Link: https://patch.msgid.link/20260325152154.1604082-12-can.guo@oss.qualcomm.com Signed-off-by: Martin K. Petersen --- drivers/ufs/host/ufs-qcom.c | 31 +++++++++++++++++++++++++++++++ drivers/ufs/host/ufs-qcom.h | 2 ++ 2 files changed, 33 insertions(+) diff --git a/drivers/ufs/host/ufs-qcom.c b/drivers/ufs/host/ufs-qcom.c index a0314cb55c7f..9abdeeee81f7 100644 --- a/drivers/ufs/host/ufs-qcom.c +++ b/drivers/ufs/host/ufs-qcom.c @@ -2816,6 +2816,26 @@ static int ufs_qcom_get_rx_fom(struct ufs_hba *hba, return 0; } +static int ufs_qcom_apply_tx_eqtr_settings(struct ufs_hba *hba, + struct ufs_pa_layer_attr *pwr_mode, + struct tx_eqtr_iter *h_iter, + struct tx_eqtr_iter *d_iter) +{ + struct ufs_qcom_host *host = ufshcd_get_variant(hba); + u32 setting = 0; + int lane; + + if (host->hw_ver.major != 0x7 || host->hw_ver.minor > 0x1) + return 0; + + for (lane = 0; lane < pwr_mode->lane_tx; lane++) { + setting |= TX_HS_PRESHOOT_BITS(lane, h_iter->preshoot); + setting |= TX_HS_DEEMPHASIS_BITS(lane, h_iter->deemphasis); + } + + return ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXEQG1SETTING), setting); +} + static int ufs_qcom_tx_eqtr_notify(struct ufs_hba *hba, enum ufs_notify_change_status status, struct ufs_pa_layer_attr *pwr_mode) @@ -2838,6 +2858,11 @@ static int ufs_qcom_tx_eqtr_notify(struct ufs_hba *hba, return 0; if (status == PRE_CHANGE) { + ret = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_TXEQG1SETTING), + &host->saved_tx_eq_g1_setting); + if (ret) + return ret; + /* PMC to target HS Gear. */ ret = ufshcd_change_power_mode(hba, pwr_mode, UFSHCD_PMC_POLICY_DONT_FORCE); @@ -2845,6 +2870,11 @@ static int ufs_qcom_tx_eqtr_notify(struct ufs_hba *hba, dev_err(hba->dev, "%s: Failed to PMC to target HS-G%u, Rate-%s: %d\n", __func__, gear, ufs_hs_rate_to_str(rate), ret); } else { + ret = ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXEQG1SETTING), + host->saved_tx_eq_g1_setting); + if (ret) + return ret; + /* PMC back to HS-G1. */ ret = ufshcd_change_power_mode(hba, &pwr_mode_hs_g1, UFSHCD_PMC_POLICY_DONT_FORCE); @@ -2887,6 +2917,7 @@ static const struct ufs_hba_variant_ops ufs_hba_qcom_vops = { .config_esi = ufs_qcom_config_esi, .freq_to_gear_speed = ufs_qcom_freq_to_gear_speed, .get_rx_fom = ufs_qcom_get_rx_fom, + .apply_tx_eqtr_settings = ufs_qcom_apply_tx_eqtr_settings, .tx_eqtr_notify = ufs_qcom_tx_eqtr_notify, }; diff --git a/drivers/ufs/host/ufs-qcom.h b/drivers/ufs/host/ufs-qcom.h index 7183d6b2c8bb..5d083331a7f4 100644 --- a/drivers/ufs/host/ufs-qcom.h +++ b/drivers/ufs/host/ufs-qcom.h @@ -348,6 +348,8 @@ struct ufs_qcom_host { u32 phy_gear; bool esi_enabled; + + u32 saved_tx_eq_g1_setting; }; struct ufs_qcom_drvdata { From 57b7943fd87f086a3497ecbecc502b7418ed4ab8 Mon Sep 17 00:00:00 2001 From: Can Guo Date: Wed, 25 Mar 2026 08:21:54 -0700 Subject: [PATCH 12/12] scsi: ufs: ufs-qcom: Enable TX Equalization Enable TX Equalization for hosts with HW version 0x7 and onwards. Reviewed-by: Bean Huo Reviewed-by: Bart Van Assche Signed-off-by: Can Guo Reviewed-by: Peter Wang Link: https://patch.msgid.link/20260325152154.1604082-13-can.guo@oss.qualcomm.com Signed-off-by: Martin K. Petersen --- drivers/ufs/host/ufs-qcom.c | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/drivers/ufs/host/ufs-qcom.c b/drivers/ufs/host/ufs-qcom.c index 9abdeeee81f7..5a58ffef3d27 100644 --- a/drivers/ufs/host/ufs-qcom.c +++ b/drivers/ufs/host/ufs-qcom.c @@ -1384,6 +1384,8 @@ static void ufs_qcom_set_host_caps(struct ufs_hba *hba) static void ufs_qcom_set_caps(struct ufs_hba *hba) { + struct ufs_qcom_host *host = ufshcd_get_variant(hba); + hba->caps |= UFSHCD_CAP_CLK_GATING | UFSHCD_CAP_HIBERN8_WITH_CLK_GATING; hba->caps |= UFSHCD_CAP_CLK_SCALING | UFSHCD_CAP_WB_WITH_CLK_SCALING; hba->caps |= UFSHCD_CAP_AUTO_BKOPS_SUSPEND; @@ -1391,6 +1393,9 @@ static void ufs_qcom_set_caps(struct ufs_hba *hba) hba->caps |= UFSHCD_CAP_AGGR_POWER_COLLAPSE; hba->caps |= UFSHCD_CAP_RPM_AUTOSUSPEND; + if (host->hw_ver.major >= 0x7) + hba->caps |= UFSHCD_CAP_TX_EQUALIZATION; + ufs_qcom_set_host_caps(hba); }