diff --git a/.clang-format b/.clang-format index f371a13b4d19..ecb5035a3d9d 100644 --- a/.clang-format +++ b/.clang-format @@ -167,7 +167,7 @@ ForEachMacros: - 'drm_connector_for_each_possible_encoder' - 'drm_exec_for_each_locked_object' - 'drm_exec_for_each_locked_object_reverse' - - 'drm_for_each_bridge_in_chain' + - 'drm_for_each_bridge_in_chain_scoped' - 'drm_for_each_connector_iter' - 'drm_for_each_crtc' - 'drm_for_each_crtc_reverse' diff --git a/.mailmap b/.mailmap index 59254c22847f..ccd7d32c3931 100644 --- a/.mailmap +++ b/.mailmap @@ -174,6 +174,7 @@ Carlos Bilbao Changbin Du Chao Yu Chao Yu +Chen-Yu Tsai Chester Lin Chris Chiu Chris Chiu diff --git a/Documentation/ABI/stable/sysfs-driver-qaic b/Documentation/ABI/stable/sysfs-driver-qaic new file mode 100644 index 000000000000..c767a93342b3 --- /dev/null +++ b/Documentation/ABI/stable/sysfs-driver-qaic @@ -0,0 +1,19 @@ +What: /sys/bus/pci/drivers/qaic/XXXX:XX:XX.X/accel/accel/dbc_state +Date: October 2025 +KernelVersion: 6.19 +Contact: Jeff Hugo +Description: Represents the current state of DMA Bridge channel (DBC). Below are the possible + states: + + =================== ========================================================== + IDLE (0) DBC is free and can be activated + ASSIGNED (1) DBC is activated and a workload is running on device + BEFORE_SHUTDOWN (2) Sub-system associated with this workload has crashed and + it will shutdown soon + AFTER_SHUTDOWN (3) Sub-system associated with this workload has crashed and + it has shutdown + BEFORE_POWER_UP (4) Sub-system associated with this workload is shutdown and + it will be powered up soon + AFTER_POWER_UP (5) Sub-system associated with this workload is now powered up + =================== ========================================================== +Users: Any userspace application or clients interested in DBC state. diff --git a/Documentation/ABI/testing/sysfs-driver-intel-xe-sriov b/Documentation/ABI/testing/sysfs-driver-intel-xe-sriov new file mode 100644 index 000000000000..2fd7e9b7bacc --- /dev/null +++ b/Documentation/ABI/testing/sysfs-driver-intel-xe-sriov @@ -0,0 +1,159 @@ +What: /sys/bus/pci/drivers/xe/.../sriov_admin/ +Date: October 2025 +KernelVersion: 6.19 +Contact: intel-xe@lists.freedesktop.org +Description: + This directory appears for the particular Intel Xe device when: + + - device supports SR-IOV, and + - device is a Physical Function (PF), and + - driver support for the SR-IOV PF is enabled on given device. + + This directory is used as a root for all attributes required to + manage both Physical Function (PF) and Virtual Functions (VFs). + + +What: /sys/bus/pci/drivers/xe/.../sriov_admin/pf/ +Date: October 2025 +KernelVersion: 6.19 +Contact: intel-xe@lists.freedesktop.org +Description: + This directory holds attributes related to the SR-IOV Physical + Function (PF). + + +What: /sys/bus/pci/drivers/xe/.../sriov_admin/vf1/ +What: /sys/bus/pci/drivers/xe/.../sriov_admin/vf2/ +What: /sys/bus/pci/drivers/xe/.../sriov_admin/vf/ +Date: October 2025 +KernelVersion: 6.19 +Contact: intel-xe@lists.freedesktop.org +Description: + These directories hold attributes related to the SR-IOV Virtual + Functions (VFs). + + Note that the VF number is 1-based as described in PCI SR-IOV + specification as the Xe driver follows that naming schema. + + There could be "vf1", "vf2" and so on, up to "vf", where + matches the value of the "sriov_totalvfs" attribute. + + +What: /sys/bus/pci/drivers/xe/.../sriov_admin/pf/profile/exec_quantum_ms +What: /sys/bus/pci/drivers/xe/.../sriov_admin/pf/profile/preempt_timeout_us +What: /sys/bus/pci/drivers/xe/.../sriov_admin/pf/profile/sched_priority +What: /sys/bus/pci/drivers/xe/.../sriov_admin/vf/profile/exec_quantum_ms +What: /sys/bus/pci/drivers/xe/.../sriov_admin/vf/profile/preempt_timeout_us +What: /sys/bus/pci/drivers/xe/.../sriov_admin/vf/profile/sched_priority +Date: October 2025 +KernelVersion: 6.19 +Contact: intel-xe@lists.freedesktop.org +Description: + These files expose scheduling parameters for the PF and its VFs, and + are visible only on Intel Xe platforms that use time-sliced GPU sharing. + They can be changed even if VFs are enabled and running and reflect the + settings of all tiles/GTs assigned to the given function. + + exec_quantum_ms: (RW) unsigned integer + The GT execution quantum (EQ) in [ms] for the given function. + Actual quantum value might be aligned per HW/FW requirements. + + Default is 0 (unlimited). + + preempt_timeout_us: (RW) unsigned integer + The GT preemption timeout in [us] of the given function. + Actual timeout value might be aligned per HW/FW requirements. + + Default is 0 (unlimited). + + sched_priority: (RW/RO) string + The GT scheduling priority of the given function. + + "low" - function will be scheduled on the GPU for its EQ/PT + only if function has any work already submitted. + + "normal" - functions will be scheduled on the GPU for its EQ/PT + irrespective of whether it has submitted a work or not. + + "high" - function will be scheduled on the GPU for its EQ/PT + in the next time-slice after the current one completes + and function has a work submitted. + + Default is "low". + + When read, this file will display the current and available + scheduling priorities. The currently active priority level will + be enclosed in square brackets, like: + + [low] normal high + + This file can be read-only if changing the priority is not + supported. + + Writes to these attributes may fail with errors like: + -EINVAL if provided input is malformed or not recognized, + -EPERM if change is not applicable on given HW/FW, + -EIO if FW refuses to change the provisioning. + + Reads from these attributes may fail with: + -EUCLEAN if value is not consistent across all tiles/GTs. + + +What: /sys/bus/pci/drivers/xe/.../sriov_admin/.bulk_profile/exec_quantum_ms +What: /sys/bus/pci/drivers/xe/.../sriov_admin/.bulk_profile/preempt_timeout_us +What: /sys/bus/pci/drivers/xe/.../sriov_admin/.bulk_profile/sched_priority +Date: October 2025 +KernelVersion: 6.19 +Contact: intel-xe@lists.freedesktop.org +Description: + These files allows bulk reconfiguration of the scheduling parameters + of the PF or VFs and are available only for Intel Xe platforms with + GPU sharing based on the time-slice basis. These scheduling parameters + can be changed even if VFs are enabled and running. + + exec_quantum_ms: (WO) unsigned integer + The GT execution quantum (EQ) in [ms] to be applied to all functions. + See sriov_admin/{pf,vf}/profile/exec_quantum_ms for more details. + + preempt_timeout_us: (WO) unsigned integer + The GT preemption timeout (PT) in [us] to be applied to all functions. + See sriov_admin/{pf,vf}/profile/preempt_timeout_us for more details. + + sched_priority: (RW/RO) string + The GT scheduling priority to be applied for all functions. + See sriov_admin/{pf,vf}/profile/sched_priority for more details. + + Writes to these attributes may fail with errors like: + -EINVAL if provided input is malformed or not recognized, + -EPERM if change is not applicable on given HW/FW, + -EIO if FW refuses to change the provisioning. + + +What: /sys/bus/pci/drivers/xe/.../sriov_admin/vf/stop +Date: October 2025 +KernelVersion: 6.19 +Contact: intel-xe@lists.freedesktop.org +Description: + This file allows to control scheduling of the VF on the Intel Xe GPU + platforms. It allows to implement custom policy mechanism in case VFs + are misbehaving or triggering adverse events above defined thresholds. + + stop: (WO) bool + All GT executions of given function shall be immediately stopped. + To allow scheduling this VF again, the VF FLR must be triggered. + + Writes to this attribute may fail with errors like: + -EINVAL if provided input is malformed or not recognized, + -EPERM if change is not applicable on given HW/FW, + -EIO if FW refuses to change the scheduling. + + +What: /sys/bus/pci/drivers/xe/.../sriov_admin/pf/device +What: /sys/bus/pci/drivers/xe/.../sriov_admin/vf/device +Date: October 2025 +KernelVersion: 6.19 +Contact: intel-xe@lists.freedesktop.org +Description: + These are symlinks to the underlying PCI device entry representing + given Xe SR-IOV function. For the PF, this link is always present. + For VFs, this link is present only for currently enabled VFs. diff --git a/Documentation/accel/qaic/aic100.rst b/Documentation/accel/qaic/aic100.rst index 273da6192fb3..41331cf580b1 100644 --- a/Documentation/accel/qaic/aic100.rst +++ b/Documentation/accel/qaic/aic100.rst @@ -487,8 +487,8 @@ one user crashes, the fallout of that should be limited to that workload and not impact other workloads. SSR accomplishes this. If a particular workload crashes, QSM notifies the host via the QAIC_SSR MHI -channel. This notification identifies the workload by it's assigned DBC. A -multi-stage recovery process is then used to cleanup both sides, and get the +channel. This notification identifies the workload by its assigned DBC. A +multi-stage recovery process is then used to cleanup both sides, and gets the DBC/NSPs into a working state. When SSR occurs, any state in the workload is lost. Any inputs that were in @@ -496,6 +496,27 @@ process, or queued by not yet serviced, are lost. The loaded artifacts will remain in on-card DDR, but the host will need to re-activate the workload if it desires to recover the workload. +When SSR occurs for a specific NSP, the assigned DBC goes through the +following state transactions in order: + +DBC_STATE_BEFORE_SHUTDOWN + Indicates that the affected NSP was found in an unrecoverable error + condition. +DBC_STATE_AFTER_SHUTDOWN + Indicates that the NSP is under reset. +DBC_STATE_BEFORE_POWER_UP + Indicates that the NSP's debug information has been collected, and is + ready to be collected by the host (if desired). At that stage the NSP + is restarted by QSM. +DBC_STATE_AFTER_POWER_UP + Indicates that the NSP has been restarted, fully operational and is + in idle state. + +SSR also has an optional crashdump collection feature. If enabled, the host can +collect the memory dump for the crashed NSP and dump it to the user space via +the dev_coredump subsystem. The host can also decline the crashdump collection +request from the device. + Reliability, Accessibility, Serviceability (RAS) ================================================ diff --git a/Documentation/accel/qaic/qaic.rst b/Documentation/accel/qaic/qaic.rst index 018d6cc173d7..ef27e262cb91 100644 --- a/Documentation/accel/qaic/qaic.rst +++ b/Documentation/accel/qaic/qaic.rst @@ -36,7 +36,7 @@ polling mode and reenables the IRQ line. This mitigation in QAIC is very effective. The same lprnet usecase that generates 100k IRQs per second (per /proc/interrupts) is reduced to roughly 64 IRQs over 5 minutes while keeping the host system stable, and having the same -workload throughput performance (within run to run noise variation). +workload throughput performance (within run-to-run noise variation). Single MSI Mode --------------- @@ -49,7 +49,7 @@ useful to be able to fall back to a single MSI when needed. To support this fallback, we allow the case where only one MSI is able to be allocated, and share that one MSI between MHI and the DBCs. The device detects when only one MSI has been configured and directs the interrupts for the DBCs -to the interrupt normally used for MHI. Unfortunately this means that the +to the interrupt normally used for MHI. Unfortunately, this means that the interrupt handlers for every DBC and MHI wake up for every interrupt that arrives; however, the DBC threaded irq handlers only are started when work to be done is detected (MHI will always start its threaded handler). @@ -62,9 +62,9 @@ never disabled, allowing each new entry to the FIFO to trigger a new interrupt. Neural Network Control (NNC) Protocol ===================================== -The implementation of NNC is split between the KMD (QAIC) and UMD. In general +The implementation of NNC is split between the KMD (QAIC) and UMD. In general, QAIC understands how to encode/decode NNC wire protocol, and elements of the -protocol which require kernel space knowledge to process (for example, mapping +protocol which requires kernel space knowledge to process (for example, mapping host memory to device IOVAs). QAIC understands the structure of a message, and all of the transactions. QAIC does not understand commands (the payload of a passthrough transaction). diff --git a/Documentation/devicetree/bindings/display/bridge/fsl,imx8mp-hdmi-tx.yaml b/Documentation/devicetree/bindings/display/bridge/fsl,imx8mp-hdmi-tx.yaml index 05442d437755..6211ab8bbb0e 100644 --- a/Documentation/devicetree/bindings/display/bridge/fsl,imx8mp-hdmi-tx.yaml +++ b/Documentation/devicetree/bindings/display/bridge/fsl,imx8mp-hdmi-tx.yaml @@ -49,6 +49,10 @@ properties: $ref: /schemas/graph.yaml#/properties/port description: HDMI output port + port@2: + $ref: /schemas/graph.yaml#/properties/port + description: Parallel audio input port + required: - port@0 - port@1 @@ -98,5 +102,13 @@ examples: remote-endpoint = <&hdmi0_con>; }; }; + + port@2 { + reg = <2>; + + endpoint { + remote-endpoint = <&pai_to_hdmi_tx>; + }; + }; }; }; diff --git a/Documentation/devicetree/bindings/display/bridge/ite,it66121.yaml b/Documentation/devicetree/bindings/display/bridge/ite,it66121.yaml index ba644c30dcf4..17d1f97ce8c2 100644 --- a/Documentation/devicetree/bindings/display/bridge/ite,it66121.yaml +++ b/Documentation/devicetree/bindings/display/bridge/ite,it66121.yaml @@ -19,6 +19,7 @@ properties: compatible: enum: - ite,it66121 + - ite,it66122 - ite,it6610 reg: diff --git a/Documentation/devicetree/bindings/display/bridge/renesas,dsi-csi2-tx.yaml b/Documentation/devicetree/bindings/display/bridge/renesas,dsi-csi2-tx.yaml index c167795c63f6..b95f10edd3a2 100644 --- a/Documentation/devicetree/bindings/display/bridge/renesas,dsi-csi2-tx.yaml +++ b/Documentation/devicetree/bindings/display/bridge/renesas,dsi-csi2-tx.yaml @@ -14,6 +14,9 @@ description: | R-Car Gen4 SoCs. The encoder can operate in either DSI or CSI-2 mode, with up to four data lanes. +allOf: + - $ref: /schemas/display/dsi-controller.yaml# + properties: compatible: enum: @@ -80,14 +83,14 @@ required: - resets - ports -additionalProperties: false +unevaluatedProperties: false examples: - | #include #include - dsi0: dsi-encoder@fed80000 { + dsi@fed80000 { compatible = "renesas,r8a779a0-dsi-csi2-tx"; reg = <0xfed80000 0x10000>; power-domains = <&sysc R8A779A0_PD_ALWAYS_ON>; @@ -117,4 +120,51 @@ examples: }; }; }; + + - | + #include + #include + + dsi@fed80000 { + #address-cells = <1>; + #size-cells = <0>; + compatible = "renesas,r8a779g0-dsi-csi2-tx"; + reg = <0xfed80000 0x10000>; + clocks = <&cpg CPG_MOD 415>, + <&cpg CPG_CORE R8A779G0_CLK_DSIEXT>, + <&cpg CPG_CORE R8A779G0_CLK_DSIREF>; + clock-names = "fck", "dsi", "pll"; + power-domains = <&sysc R8A779G0_PD_ALWAYS_ON>; + resets = <&cpg 415>; + + ports { + #address-cells = <1>; + #size-cells = <0>; + + port@0 { + reg = <0>; + }; + + port@1 { + reg = <1>; + + dsi0port1_out: endpoint { + remote-endpoint = <&panel_in>; + data-lanes = <1 2>; + }; + }; + }; + + panel@0 { + reg = <0>; + compatible = "raspberrypi,dsi-7inch", "ilitek,ili9881c"; + power-supply = <&vcc_lcd_reg>; + + port { + panel_in: endpoint { + remote-endpoint = <&dsi0port1_out>; + }; + }; + }; + }; ... diff --git a/Documentation/devicetree/bindings/display/bridge/simple-bridge.yaml b/Documentation/devicetree/bindings/display/bridge/simple-bridge.yaml index 9ef587d46506..20c7e0a77802 100644 --- a/Documentation/devicetree/bindings/display/bridge/simple-bridge.yaml +++ b/Documentation/devicetree/bindings/display/bridge/simple-bridge.yaml @@ -27,7 +27,9 @@ properties: - const: adi,adv7123 - enum: - adi,adv7123 + - asl-tek,cs5263 - dumb-vga-dac + - parade,ps185hdm - radxa,ra620 - realtek,rtd2171 - ti,opa362 diff --git a/Documentation/devicetree/bindings/display/imx/fsl,imx8mp-hdmi-pai.yaml b/Documentation/devicetree/bindings/display/imx/fsl,imx8mp-hdmi-pai.yaml new file mode 100644 index 000000000000..4f99682a308d --- /dev/null +++ b/Documentation/devicetree/bindings/display/imx/fsl,imx8mp-hdmi-pai.yaml @@ -0,0 +1,69 @@ +# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/display/imx/fsl,imx8mp-hdmi-pai.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Freescale i.MX8MP HDMI Parallel Audio Interface + +maintainers: + - Shengjiu Wang + +description: + The HDMI TX Parallel Audio Interface (HTX_PAI) is a bridge between the + Audio Subsystem to the HDMI TX Controller. + +properties: + compatible: + const: fsl,imx8mp-hdmi-pai + + reg: + maxItems: 1 + + interrupts: + maxItems: 1 + + clocks: + maxItems: 1 + + clock-names: + const: apb + + power-domains: + maxItems: 1 + + port: + $ref: /schemas/graph.yaml#/properties/port + description: Output to the HDMI TX controller. + +required: + - compatible + - reg + - interrupts + - clocks + - clock-names + - power-domains + - port + +additionalProperties: false + +examples: + - | + #include + #include + + audio-bridge@32fc4800 { + compatible = "fsl,imx8mp-hdmi-pai"; + reg = <0x32fc4800 0x800>; + interrupt-parent = <&irqsteer_hdmi>; + interrupts = <14>; + clocks = <&clk IMX8MP_CLK_HDMI_APB>; + clock-names = "apb"; + power-domains = <&hdmi_blk_ctrl IMX8MP_HDMIBLK_PD_PAI>; + + port { + pai_to_hdmi_tx: endpoint { + remote-endpoint = <&hdmi_tx_from_pai>; + }; + }; + }; diff --git a/Documentation/devicetree/bindings/display/msm/dp-controller.yaml b/Documentation/devicetree/bindings/display/msm/dp-controller.yaml index aeb4e4f36044..ebda78db87a6 100644 --- a/Documentation/devicetree/bindings/display/msm/dp-controller.yaml +++ b/Documentation/devicetree/bindings/display/msm/dp-controller.yaml @@ -18,6 +18,7 @@ properties: compatible: oneOf: - enum: + - qcom,glymur-dp - qcom,sa8775p-dp - qcom,sc7180-dp - qcom,sc7280-dp @@ -31,6 +32,11 @@ properties: - qcom,sm8650-dp - qcom,x1e80100-dp + - items: + - enum: + - qcom,qcs8300-dp + - const: qcom,sa8775p-dp + - items: - enum: - qcom,sm6350-dp @@ -53,6 +59,12 @@ properties: - qcom,sm8550-dp - const: qcom,sm8350-dp + - items: + - enum: + - qcom,sm6150-dp + - const: qcom,sm8150-dp + - const: qcom,sm8350-dp + - items: - enum: - qcom,sm8750-dp @@ -195,9 +207,11 @@ allOf: compatible: contains: enum: + - qcom,glymur-dp - qcom,sa8775p-dp - qcom,x1e80100-dp then: + $ref: /schemas/sound/dai-common.yaml# oneOf: - required: - aux-bus @@ -239,6 +253,7 @@ allOf: enum: # these platforms support 2 streams MST on some interfaces, # others are SST only + - qcom,glymur-dp - qcom,sc8280xp-dp - qcom,x1e80100-dp then: @@ -295,7 +310,7 @@ allOf: minItems: 6 maxItems: 8 -additionalProperties: false +unevaluatedProperties: false examples: - | diff --git a/Documentation/devicetree/bindings/display/msm/gmu.yaml b/Documentation/devicetree/bindings/display/msm/gmu.yaml index afc187935744..e32056ae0f5d 100644 --- a/Documentation/devicetree/bindings/display/msm/gmu.yaml +++ b/Documentation/devicetree/bindings/display/msm/gmu.yaml @@ -21,7 +21,7 @@ properties: compatible: oneOf: - items: - - pattern: '^qcom,adreno-gmu-[67][0-9][0-9]\.[0-9]$' + - pattern: '^qcom,adreno-gmu-[6-8][0-9][0-9]\.[0-9]$' - const: qcom,adreno-gmu - items: - pattern: '^qcom,adreno-gmu-x[1-9][0-9][0-9]\.[0-9]$' @@ -299,6 +299,64 @@ allOf: required: - qcom,qmp + - if: + properties: + compatible: + contains: + const: qcom,adreno-gmu-840.1 + then: + properties: + reg: + items: + - description: Core GMU registers + reg-names: + items: + - const: gmu + clocks: + items: + - description: GPU AHB clock + - description: GMU clock + - description: GPU CX clock + - description: GPU MEMNOC clock + - description: GMU HUB clock + clock-names: + items: + - const: ahb + - const: gmu + - const: cxo + - const: memnoc + - const: hub + + - if: + properties: + compatible: + contains: + const: qcom,adreno-gmu-x285.1 + then: + properties: + reg: + items: + - description: Core GMU registers + reg-names: + items: + - const: gmu + clocks: + items: + - description: GPU AHB clock + - description: GMU clock + - description: GPU CX clock + - description: GPU MEMNOC clock + - description: GMU HUB clock + - description: GMU RSCC HUB clock + clock-names: + items: + - const: ahb + - const: gmu + - const: cxo + - const: memnoc + - const: hub + - const: rscc + - if: properties: compatible: diff --git a/Documentation/devicetree/bindings/display/msm/qcom,glymur-mdss.yaml b/Documentation/devicetree/bindings/display/msm/qcom,glymur-mdss.yaml new file mode 100644 index 000000000000..2329ed96e6cb --- /dev/null +++ b/Documentation/devicetree/bindings/display/msm/qcom,glymur-mdss.yaml @@ -0,0 +1,264 @@ +# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/display/msm/qcom,glymur-mdss.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Qualcomm Glymur Display MDSS + +maintainers: + - Abel Vesa + +description: + Glymur MSM Mobile Display Subsystem(MDSS), which encapsulates sub-blocks like + DPU display controller, DP interfaces, etc. + +$ref: /schemas/display/msm/mdss-common.yaml# + +properties: + compatible: + const: qcom,glymur-mdss + + clocks: + items: + - description: Display AHB + - description: Display hf AXI + - description: Display core + + iommus: + maxItems: 1 + + interconnects: + items: + - description: Interconnect path from mdp0 port to the data bus + - description: Interconnect path from CPU to the reg bus + + interconnect-names: + items: + - const: mdp0-mem + - const: cpu-cfg + +patternProperties: + "^display-controller@[0-9a-f]+$": + type: object + additionalProperties: true + properties: + compatible: + const: qcom,glymur-dpu + + "^displayport-controller@[0-9a-f]+$": + type: object + additionalProperties: true + properties: + compatible: + const: qcom,glymur-dp + + "^phy@[0-9a-f]+$": + type: object + additionalProperties: true + properties: + compatible: + const: qcom,glymur-dp-phy + +required: + - compatible + +unevaluatedProperties: false + +examples: + - | + #include + #include + #include + #include + #include + #include + + display-subsystem@ae00000 { + compatible = "qcom,glymur-mdss"; + reg = <0x0ae00000 0x1000>; + reg-names = "mdss"; + + interrupts = ; + + clocks = <&dispcc_ahb_clk>, + <&gcc_disp_hf_axi_clk>, + <&dispcc_mdp_clk>; + clock-names = "bus", "nrt_bus", "core"; + + interconnects = <&mmss_noc MASTER_MDP QCOM_ICC_TAG_ALWAYS + &mc_virt SLAVE_EBI1 QCOM_ICC_TAG_ALWAYS>, + <&hsc_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ACTIVE_ONLY + &config_noc SLAVE_DISPLAY_CFG QCOM_ICC_TAG_ACTIVE_ONLY>; + interconnect-names = "mdp0-mem", + "cpu-cfg"; + + resets = <&disp_cc_mdss_core_bcr>; + + power-domains = <&mdss_gdsc>; + + iommus = <&apps_smmu 0x1c00 0x2>; + + interrupt-controller; + #interrupt-cells = <1>; + + #address-cells = <1>; + #size-cells = <1>; + ranges; + + display-controller@ae01000 { + compatible = "qcom,glymur-dpu"; + reg = <0x0ae01000 0x8f000>, + <0x0aeb0000 0x2008>; + reg-names = "mdp", "vbif"; + + clocks = <&gcc_axi_clk>, + <&dispcc_ahb_clk>, + <&dispcc_mdp_lut_clk>, + <&dispcc_mdp_clk>, + <&dispcc_mdp_vsync_clk>; + clock-names = "nrt_bus", + "iface", + "lut", + "core", + "vsync"; + + assigned-clocks = <&dispcc_mdp_vsync_clk>; + assigned-clock-rates = <19200000>; + + operating-points-v2 = <&mdp_opp_table>; + power-domains = <&rpmhpd RPMHPD_MMCX>; + + interrupt-parent = <&mdss>; + interrupts = <0>; + + ports { + #address-cells = <1>; + #size-cells = <0>; + + port@0 { + reg = <0>; + dpu_intf1_out: endpoint { + remote-endpoint = <&dsi0_in>; + }; + }; + + port@1 { + reg = <1>; + dpu_intf2_out: endpoint { + remote-endpoint = <&dsi1_in>; + }; + }; + }; + + mdp_opp_table: opp-table { + compatible = "operating-points-v2"; + + opp-200000000 { + opp-hz = /bits/ 64 <200000000>; + required-opps = <&rpmhpd_opp_low_svs>; + }; + + opp-325000000 { + opp-hz = /bits/ 64 <325000000>; + required-opps = <&rpmhpd_opp_svs>; + }; + + opp-375000000 { + opp-hz = /bits/ 64 <375000000>; + required-opps = <&rpmhpd_opp_svs_l1>; + }; + + opp-514000000 { + opp-hz = /bits/ 64 <514000000>; + required-opps = <&rpmhpd_opp_nom>; + }; + }; + }; + + displayport-controller@ae90000 { + compatible = "qcom,glymur-dp"; + reg = <0xae90000 0x200>, + <0xae90200 0x200>, + <0xae90400 0x600>, + <0xae91000 0x400>, + <0xae91400 0x400>; + + interrupt-parent = <&mdss>; + interrupts = <12>; + + clocks = <&dispcc_mdss_ahb_clk>, + <&dispcc_dptx0_aux_clk>, + <&dispcc_dptx0_link_clk>, + <&dispcc_dptx0_link_intf_clk>, + <&dispcc_dptx0_pixel0_clk>, + <&dispcc_dptx0_pixel1_clk>; + clock-names = "core_iface", + "core_aux", + "ctrl_link", + "ctrl_link_iface", + "stream_pixel", + "stream_1_pixel"; + + assigned-clocks = <&dispcc_mdss_dptx0_link_clk_src>, + <&dispcc_mdss_dptx0_pixel0_clk_src>, + <&dispcc_mdss_dptx0_pixel1_clk_src>; + assigned-clock-parents = <&usb_1_ss0_qmpphy QMP_USB43DP_DP_LINK_CLK>, + <&usb_1_ss0_qmpphy QMP_USB43DP_DP_VCO_DIV_CLK>, + <&usb_1_ss0_qmpphy QMP_USB43DP_DP_VCO_DIV_CLK>; + + operating-points-v2 = <&mdss_dp0_opp_table>; + + power-domains = <&rpmhpd RPMHPD_MMCX>; + + phys = <&usb_1_ss0_qmpphy QMP_USB43DP_DP_PHY>; + phy-names = "dp"; + + #sound-dai-cells = <0>; + + ports { + #address-cells = <1>; + #size-cells = <0>; + + port@0 { + reg = <0>; + + mdss_dp0_in: endpoint { + remote-endpoint = <&mdss_intf0_out>; + }; + }; + + port@1 { + reg = <1>; + + mdss_dp0_out: endpoint { + }; + }; + }; + + mdss_dp0_opp_table: opp-table { + compatible = "operating-points-v2"; + + opp-160000000 { + opp-hz = /bits/ 64 <160000000>; + required-opps = <&rpmhpd_opp_low_svs>; + }; + + opp-270000000 { + opp-hz = /bits/ 64 <270000000>; + required-opps = <&rpmhpd_opp_svs>; + }; + + opp-540000000 { + opp-hz = /bits/ 64 <540000000>; + required-opps = <&rpmhpd_opp_svs_l1>; + }; + + opp-810000000 { + opp-hz = /bits/ 64 <810000000>; + required-opps = <&rpmhpd_opp_nom>; + }; + }; + }; + }; +... diff --git a/Documentation/devicetree/bindings/display/msm/qcom,qcs8300-mdss.yaml b/Documentation/devicetree/bindings/display/msm/qcom,qcs8300-mdss.yaml new file mode 100644 index 000000000000..e96baaae9ba9 --- /dev/null +++ b/Documentation/devicetree/bindings/display/msm/qcom,qcs8300-mdss.yaml @@ -0,0 +1,286 @@ +# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/display/msm/qcom,qcs8300-mdss.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Qualcomm Technologies, Inc. QCS8300 Display MDSS + +maintainers: + - Yongxing Mou + +description: + QCS8300 MSM Mobile Display Subsystem(MDSS), which encapsulates sub-blocks like + DPU display controller, DP interfaces and EDP etc. + +$ref: /schemas/display/msm/mdss-common.yaml# + +properties: + compatible: + const: qcom,qcs8300-mdss + + clocks: + items: + - description: Display AHB + - description: Display hf AXI + - description: Display core + + iommus: + maxItems: 1 + + interconnects: + maxItems: 3 + + interconnect-names: + maxItems: 3 + +patternProperties: + "^display-controller@[0-9a-f]+$": + type: object + additionalProperties: true + + properties: + compatible: + contains: + const: qcom,qcs8300-dpu + + "^displayport-controller@[0-9a-f]+$": + type: object + additionalProperties: true + + properties: + compatible: + contains: + const: qcom,qcs8300-dp + + "^phy@[0-9a-f]+$": + type: object + additionalProperties: true + properties: + compatible: + contains: + const: qcom,qcs8300-edp-phy + +required: + - compatible + +unevaluatedProperties: false + +examples: + - | + #include + #include + #include + #include + #include + #include + #include + + mdss: display-subsystem@ae00000 { + compatible = "qcom,qcs8300-mdss"; + reg = <0x0ae00000 0x1000>; + reg-names = "mdss"; + + interconnects = <&mmss_noc MASTER_MDP0 QCOM_ICC_TAG_ACTIVE_ONLY + &mc_virt SLAVE_EBI1 QCOM_ICC_TAG_ACTIVE_ONLY>, + <&mmss_noc MASTER_MDP1 QCOM_ICC_TAG_ACTIVE_ONLY + &mc_virt SLAVE_EBI1 QCOM_ICC_TAG_ACTIVE_ONLY>, + <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ACTIVE_ONLY + &config_noc SLAVE_DISPLAY_CFG QCOM_ICC_TAG_ACTIVE_ONLY>; + interconnect-names = "mdp0-mem", + "mdp1-mem", + "cpu-cfg"; + + resets = <&dispcc_core_bcr>; + power-domains = <&dispcc_gdsc>; + + clocks = <&dispcc_ahb_clk>, + <&gcc GCC_DISP_HF_AXI_CLK>, + <&dispcc_mdp_clk>; + + interrupts = ; + interrupt-controller; + #interrupt-cells = <1>; + + iommus = <&apps_smmu 0x1000 0x402>; + + #address-cells = <1>; + #size-cells = <1>; + ranges; + + display-controller@ae01000 { + compatible = "qcom,qcs8300-dpu", "qcom,sa8775p-dpu"; + reg = <0x0ae01000 0x8f000>, + <0x0aeb0000 0x2008>; + reg-names = "mdp", "vbif"; + + clocks = <&gcc GCC_DISP_HF_AXI_CLK>, + <&dispcc0 MDSS_DISP_CC_MDSS_AHB_CLK>, + <&dispcc0 MDSS_DISP_CC_MDSS_MDP_LUT_CLK>, + <&dispcc0 MDSS_DISP_CC_MDSS_MDP_CLK>, + <&dispcc0 MDSS_DISP_CC_MDSS_VSYNC_CLK>; + clock-names = "nrt_bus", + "iface", + "lut", + "core", + "vsync"; + + assigned-clocks = <&dispcc0 MDSS_DISP_CC_MDSS_VSYNC_CLK>; + assigned-clock-rates = <19200000>; + operating-points-v2 = <&mdp_opp_table>; + power-domains = <&rpmhpd RPMHPD_MMCX>; + + interrupt-parent = <&mdss>; + interrupts = <0>; + ports { + #address-cells = <1>; + #size-cells = <0>; + port@0 { + reg = <0>; + + dpu_intf0_out: endpoint { + remote-endpoint = <&mdss_dp0_in>; + }; + }; + }; + + mdp_opp_table: opp-table { + compatible = "operating-points-v2"; + + opp-375000000 { + opp-hz = /bits/ 64 <375000000>; + required-opps = <&rpmhpd_opp_svs_l1>; + }; + + opp-500000000 { + opp-hz = /bits/ 64 <500000000>; + required-opps = <&rpmhpd_opp_nom>; + }; + + opp-575000000 { + opp-hz = /bits/ 64 <575000000>; + required-opps = <&rpmhpd_opp_turbo>; + }; + + opp-650000000 { + opp-hz = /bits/ 64 <650000000>; + required-opps = <&rpmhpd_opp_turbo_l1>; + }; + }; + }; + + mdss_dp0_phy: phy@aec2a00 { + compatible = "qcom,qcs8300-edp-phy", "qcom,sa8775p-edp-phy"; + + reg = <0x0aec2a00 0x200>, + <0x0aec2200 0xd0>, + <0x0aec2600 0xd0>, + <0x0aec2000 0x1c8>; + + clocks = <&dispcc MDSS_DISP_CC_MDSS_DPTX0_AUX_CLK>, + <&dispcc MDSS_DISP_CC_MDSS_AHB_CLK>; + clock-names = "aux", + "cfg_ahb"; + + #clock-cells = <1>; + #phy-cells = <0>; + + vdda-phy-supply = <&vreg_l1c>; + vdda-pll-supply = <&vreg_l4a>; + }; + + displayport-controller@af54000 { + compatible = "qcom,qcs8300-dp", "qcom,sa8775p-dp"; + + pinctrl-0 = <&dp_hot_plug_det>; + pinctrl-names = "default"; + + reg = <0xaf54000 0x104>, + <0xaf54200 0x0c0>, + <0xaf55000 0x770>, + <0xaf56000 0x09c>, + <0xaf57000 0x09c>, + <0xaf58000 0x09c>, + <0xaf59000 0x09c>, + <0xaf5a000 0x23c>, + <0xaf5b000 0x23c>; + + interrupt-parent = <&mdss>; + interrupts = <12>; + clocks = <&dispcc0 MDSS_DISP_CC_MDSS_AHB_CLK>, + <&dispcc0 MDSS_DISP_CC_MDSS_DPTX0_AUX_CLK>, + <&dispcc0 MDSS_DISP_CC_MDSS_DPTX0_LINK_CLK>, + <&dispcc0 MDSS_DISP_CC_MDSS_DPTX0_LINK_INTF_CLK>, + <&dispcc0 MDSS_DISP_CC_MDSS_DPTX0_PIXEL0_CLK>, + <&dispcc0 MDSS_DISP_CC_MDSS_DPTX0_PIXEL1_CLK>, + <&dispcc0 MDSS_DISP_CC_MDSS_DPTX0_PIXEL2_CLK>, + <&dispcc0 MDSS_DISP_CC_MDSS_DPTX0_PIXEL3_CLK>; + clock-names = "core_iface", + "core_aux", + "ctrl_link", + "ctrl_link_iface", + "stream_pixel", + "stream_1_pixel", + "stream_2_pixel", + "stream_3_pixel"; + assigned-clocks = <&dispcc0 MDSS_DISP_CC_MDSS_DPTX0_LINK_CLK_SRC>, + <&dispcc0 MDSS_DISP_CC_MDSS_DPTX0_PIXEL0_CLK_SRC>, + <&dispcc0 MDSS_DISP_CC_MDSS_DPTX0_PIXEL1_CLK_SRC>, + <&dispcc0 MDSS_DISP_CC_MDSS_DPTX0_PIXEL2_CLK_SRC>, + <&dispcc0 MDSS_DISP_CC_MDSS_DPTX0_PIXEL3_CLK_SRC>; + assigned-clock-parents = <&mdss_dp0_phy 0>, + <&mdss_dp0_phy 1>, + <&mdss_dp0_phy 1>, + <&mdss_dp0_phy 1>; + phys = <&mdss_dp0_phy>; + phy-names = "dp"; + operating-points-v2 = <&dp_opp_table>; + power-domains = <&rpmhpd RPMHPD_MMCX>; + + #sound-dai-cells = <0>; + + ports { + #address-cells = <1>; + #size-cells = <0>; + + port@0 { + reg = <0>; + + mdss_dp0_in: endpoint { + remote-endpoint = <&dpu_intf0_out>; + }; + }; + + port@1 { + reg = <1>; + + mdss_dp_out: endpoint { }; + }; + }; + + dp_opp_table: opp-table { + compatible = "operating-points-v2"; + + opp-160000000 { + opp-hz = /bits/ 64 <160000000>; + required-opps = <&rpmhpd_opp_low_svs>; + }; + + opp-270000000 { + opp-hz = /bits/ 64 <270000000>; + required-opps = <&rpmhpd_opp_svs>; + }; + + opp-540000000 { + opp-hz = /bits/ 64 <540000000>; + required-opps = <&rpmhpd_opp_svs_l1>; + }; + + opp-810000000 { + opp-hz = /bits/ 64 <810000000>; + required-opps = <&rpmhpd_opp_nom>; + }; + }; + }; + }; +... diff --git a/Documentation/devicetree/bindings/display/msm/qcom,sm6150-mdss.yaml b/Documentation/devicetree/bindings/display/msm/qcom,sm6150-mdss.yaml index 9ac24f99d3ad..46e9335f849f 100644 --- a/Documentation/devicetree/bindings/display/msm/qcom,sm6150-mdss.yaml +++ b/Documentation/devicetree/bindings/display/msm/qcom,sm6150-mdss.yaml @@ -51,6 +51,14 @@ patternProperties: compatible: const: qcom,sm6150-dpu + "^displayport-controller@[0-9a-f]+$": + type: object + additionalProperties: true + properties: + compatible: + contains: + const: qcom,sm6150-dp + "^dsi@[0-9a-f]+$": type: object additionalProperties: true @@ -130,35 +138,37 @@ examples: #size-cells = <0>; port@0 { - reg = <0>; - dpu_intf0_out: endpoint { - }; + reg = <0>; + + dpu_intf0_out: endpoint { + }; }; port@1 { - reg = <1>; - dpu_intf1_out: endpoint { - remote-endpoint = <&mdss_dsi0_in>; - }; + reg = <1>; + + dpu_intf1_out: endpoint { + remote-endpoint = <&mdss_dsi0_in>; + }; }; }; mdp_opp_table: opp-table { compatible = "operating-points-v2"; - opp-19200000 { - opp-hz = /bits/ 64 <19200000>; - required-opps = <&rpmhpd_opp_low_svs>; + opp-192000000 { + opp-hz = /bits/ 64 <192000000>; + required-opps = <&rpmhpd_opp_low_svs>; }; - opp-25600000 { - opp-hz = /bits/ 64 <25600000>; - required-opps = <&rpmhpd_opp_svs>; + opp-256000000 { + opp-hz = /bits/ 64 <256000000>; + required-opps = <&rpmhpd_opp_svs>; }; opp-307200000 { - opp-hz = /bits/ 64 <307200000>; - required-opps = <&rpmhpd_opp_nom>; + opp-hz = /bits/ 64 <307200000>; + required-opps = <&rpmhpd_opp_nom>; }; }; }; diff --git a/Documentation/devicetree/bindings/display/msm/qcom,sm8650-dpu.yaml b/Documentation/devicetree/bindings/display/msm/qcom,sm8650-dpu.yaml index 0a46120dd868..fe296e3186d0 100644 --- a/Documentation/devicetree/bindings/display/msm/qcom,sm8650-dpu.yaml +++ b/Documentation/devicetree/bindings/display/msm/qcom,sm8650-dpu.yaml @@ -13,11 +13,17 @@ $ref: /schemas/display/msm/dpu-common.yaml# properties: compatible: - enum: - - qcom,sa8775p-dpu - - qcom,sm8650-dpu - - qcom,sm8750-dpu - - qcom,x1e80100-dpu + oneOf: + - enum: + - qcom,glymur-dpu + - qcom,sa8775p-dpu + - qcom,sm8650-dpu + - qcom,sm8750-dpu + - qcom,x1e80100-dpu + - items: + - enum: + - qcom,qcs8300-dpu + - const: qcom,sa8775p-dpu reg: items: diff --git a/Documentation/devicetree/bindings/display/panel/ilitek,il79900a.yaml b/Documentation/devicetree/bindings/display/panel/ilitek,il79900a.yaml new file mode 100644 index 000000000000..02f7fb1f16dc --- /dev/null +++ b/Documentation/devicetree/bindings/display/panel/ilitek,il79900a.yaml @@ -0,0 +1,68 @@ +# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/display/panel/ilitek,il79900a.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Ilitek IL79900a based MIPI-DSI panels + +maintainers: + - Langyan Ye + +allOf: + - $ref: panel-common.yaml# + +properties: + compatible: + items: + - enum: + - tianma,tl121bvms07-00 + - const: ilitek,il79900a + + reg: + maxItems: 1 + description: DSI virtual channel used by the panel + + enable-gpios: + maxItems: 1 + description: GPIO specifier for the enable pin + + avdd-supply: + description: Positive analog voltage supply (AVDD) + + avee-supply: + description: Negative analog voltage supply (AVEE) + + pp1800-supply: + description: 1.8V logic voltage supply + + backlight: true + +required: + - compatible + - reg + - enable-gpios + - avdd-supply + - avee-supply + - pp1800-supply + +additionalProperties: false + +examples: + - | + dsi { + #address-cells = <1>; + #size-cells = <0>; + + panel@0 { + compatible = "tianma,tl121bvms07-00", "ilitek,il79900a"; + reg = <0>; + enable-gpios = <&pio 25 0>; + avdd-supply = <®_avdd>; + avee-supply = <®_avee>; + pp1800-supply = <®_pp1800>; + backlight = <&backlight>; + }; + }; + +... diff --git a/Documentation/devicetree/bindings/display/panel/ilitek,ili9881c.yaml b/Documentation/devicetree/bindings/display/panel/ilitek,ili9881c.yaml index 434cc6af9c95..d979701a00a8 100644 --- a/Documentation/devicetree/bindings/display/panel/ilitek,ili9881c.yaml +++ b/Documentation/devicetree/bindings/display/panel/ilitek,ili9881c.yaml @@ -20,9 +20,11 @@ properties: - bananapi,lhr050h41 - bestar,bsd1218-a101kl68 - feixin,k101-im2byl02 + - raspberrypi,dsi-5inch - raspberrypi,dsi-7inch - startek,kd050hdfia020 - tdo,tl050hdv35 + - wanchanglong,w552946aaa - wanchanglong,w552946aba - const: ilitek,ili9881c @@ -30,6 +32,7 @@ properties: maxItems: 1 backlight: true + port: true power-supply: true reset-gpios: true rotation: true diff --git a/Documentation/devicetree/bindings/display/panel/lg,ld070wx3-sl01.yaml b/Documentation/devicetree/bindings/display/panel/lg,ld070wx3-sl01.yaml new file mode 100644 index 000000000000..0f0b9079f199 --- /dev/null +++ b/Documentation/devicetree/bindings/display/panel/lg,ld070wx3-sl01.yaml @@ -0,0 +1,60 @@ +# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/display/panel/lg,ld070wx3-sl01.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: LG Corporation 7" WXGA TFT LCD panel + +maintainers: + - Svyatoslav Ryhel + +allOf: + - $ref: panel-common.yaml# + +properties: + compatible: + items: + - const: lg,ld070wx3-sl01 + + reg: + maxItems: 1 + + vdd-supply: true + vcc-supply: true + + backlight: true + port: true + +required: + - compatible + - vdd-supply + - vcc-supply + +additionalProperties: false + +examples: + - | + #include + + dsi { + #address-cells = <1>; + #size-cells = <0>; + + panel@0 { + compatible = "lg,ld070wx3-sl01"; + reg = <0>; + + vdd-supply = <&vdd_3v3_lcd>; + vcc-supply = <&vcc_1v8_lcd>; + + backlight = <&backlight>; + + port { + endpoint { + remote-endpoint = <&dsi0_out>; + }; + }; + }; + }; +... diff --git a/Documentation/devicetree/bindings/display/panel/panel-lvds.yaml b/Documentation/devicetree/bindings/display/panel/panel-lvds.yaml index 4388d5375851..dbc01e640895 100644 --- a/Documentation/devicetree/bindings/display/panel/panel-lvds.yaml +++ b/Documentation/devicetree/bindings/display/panel/panel-lvds.yaml @@ -59,6 +59,8 @@ properties: # Jenson Display BL-JT60050-01A 7" WSVGA (1024x600) color TFT LCD LVDS panel - jenson,bl-jt60050-01a - tbs,a711-panel + # Winstar WF70A8SYJHLNGA 7" WSVGA (1024x600) color TFT LCD LVDS panel + - winstar,wf70a8syjhlnga - const: panel-lvds diff --git a/Documentation/devicetree/bindings/display/panel/panel-simple-dsi.yaml b/Documentation/devicetree/bindings/display/panel/panel-simple-dsi.yaml index 9b92a05791cc..8d668979b62d 100644 --- a/Documentation/devicetree/bindings/display/panel/panel-simple-dsi.yaml +++ b/Documentation/devicetree/bindings/display/panel/panel-simple-dsi.yaml @@ -19,6 +19,9 @@ description: | If the panel is more advanced a dedicated binding file is required. +allOf: + - $ref: panel-common.yaml# + properties: compatible: @@ -42,8 +45,6 @@ properties: - kingdisplay,kd097d04 # LG ACX467AKM-7 4.95" 1080×1920 LCD Panel - lg,acx467akm-7 - # LG Corporation 7" WXGA TFT LCD panel - - lg,ld070wx3-sl01 # LG Corporation 5" HD TFT LCD panel - lg,lh500wx1-sd03 # Lincoln LCD197 5" 1080x1920 LCD panel @@ -56,10 +57,6 @@ properties: - panasonic,vvx10f034n00 # Samsung s6e3fa7 1080x2220 based AMS559NK06 AMOLED panel - samsung,s6e3fa7-ams559nk06 - # Samsung s6e3fc2x01 1080x2340 AMOLED panel - - samsung,s6e3fc2x01 - # Samsung sofef00 1080x2280 AMOLED panel - - samsung,sofef00 # Shangai Top Display Optoelectronics 7" TL070WSH30 1024x600 TFT LCD panel - tdo,tl070wsh30 @@ -72,31 +69,12 @@ properties: reset-gpios: true port: true power-supply: true - vddio-supply: true - -allOf: - - $ref: panel-common.yaml# - - if: - properties: - compatible: - enum: - - samsung,s6e3fc2x01 - - samsung,sofef00 - then: - properties: - power-supply: false - required: - - vddio-supply - else: - properties: - vddio-supply: false - required: - - power-supply additionalProperties: false required: - compatible + - power-supply - reg examples: diff --git a/Documentation/devicetree/bindings/display/panel/panel-simple.yaml b/Documentation/devicetree/bindings/display/panel/panel-simple.yaml index 2017428d8828..24e277b19094 100644 --- a/Documentation/devicetree/bindings/display/panel/panel-simple.yaml +++ b/Documentation/devicetree/bindings/display/panel/panel-simple.yaml @@ -184,6 +184,8 @@ properties: - innolux,n156bge-l21 # Innolux Corporation 7.0" WSVGA (1024x600) TFT LCD panel - innolux,zj070na-01p + # JuTouch Technology Co.. 10" JT101TM023 WXGA (1280 x 800) LVDS panel + - jutouch,jt101tm023 # Kaohsiung Opto-Electronics Inc. 5.7" QVGA (320 x 240) TFT LCD panel - koe,tx14d24vm1bpa # Kaohsiung Opto-Electronics. TX31D200VM0BAA 12.3" HSXGA LVDS panel @@ -268,6 +270,8 @@ properties: - qiaodian,qd43003c0-40 # Shenzhen QiShenglong Industrialist Co., Ltd. Gopher 2b 4.3" 480(RGB)x272 TFT LCD panel - qishenglong,gopher2b-lcd + # Raystar Optronics, Inc. RFF500F-AWH-DNN 5.0" TFT 840x480 + - raystar,rff500f-awh-dnn # Rocktech Displays Ltd. RK101II01D-CT 10.1" TFT 1280x800 - rocktech,rk101ii01d-ct # Rocktech Display Ltd. RK070ER9427 800(RGB)x480 TFT LCD panel @@ -276,6 +280,8 @@ properties: - rocktech,rk043fn48h # Samsung Electronics 10.1" WXGA (1280x800) TFT LCD panel - samsung,ltl101al01 + # Samsung Electronics 10.6" FWXGA (1366x768) TFT LCD panel + - samsung,ltl106al01 # Samsung Electronics 10.1" WSVGA TFT LCD panel - samsung,ltn101nt05 # Satoz SAT050AT40H12R2 5.0" WVGA TFT LCD panel diff --git a/Documentation/devicetree/bindings/display/panel/ronbo,rb070d30.yaml b/Documentation/devicetree/bindings/display/panel/ronbo,rb070d30.yaml index 04f86e0cbac9..694037301583 100644 --- a/Documentation/devicetree/bindings/display/panel/ronbo,rb070d30.yaml +++ b/Documentation/devicetree/bindings/display/panel/ronbo,rb070d30.yaml @@ -9,6 +9,9 @@ title: Ronbo RB070D30 DSI Display Panel maintainers: - Maxime Ripard +allOf: + - $ref: panel-common.yaml# + properties: compatible: const: ronbo,rb070d30 @@ -20,10 +23,6 @@ properties: description: GPIO used for the power pin maxItems: 1 - reset-gpios: - description: GPIO used for the reset pin - maxItems: 1 - shlr-gpios: description: GPIO used for the shlr pin (horizontal flip) maxItems: 1 @@ -35,10 +34,6 @@ properties: vcc-lcd-supply: description: Power regulator - backlight: - description: Backlight used by the panel - $ref: /schemas/types.yaml#/definitions/phandle - required: - compatible - power-gpios @@ -47,5 +42,6 @@ required: - shlr-gpios - updn-gpios - vcc-lcd-supply + - port -additionalProperties: false +unevaluatedProperties: false diff --git a/Documentation/devicetree/bindings/display/panel/samsung,atna33xc20.yaml b/Documentation/devicetree/bindings/display/panel/samsung,atna33xc20.yaml index ccb574caed28..f1723e910252 100644 --- a/Documentation/devicetree/bindings/display/panel/samsung,atna33xc20.yaml +++ b/Documentation/devicetree/bindings/display/panel/samsung,atna33xc20.yaml @@ -33,6 +33,8 @@ properties: - samsung,atna45dc02 # Samsung 15.6" 3K (2880x1620 pixels) eDP AMOLED panel - samsung,atna56ac03 + # Samsung 16.0" 3K (2880x1800 pixels) eDP AMOLED panel + - samsung,atna60cl08 - const: samsung,atna33xc20 enable-gpios: true diff --git a/Documentation/devicetree/bindings/display/panel/samsung,s6e3fc2x01.yaml b/Documentation/devicetree/bindings/display/panel/samsung,s6e3fc2x01.yaml new file mode 100644 index 000000000000..d48354fb52ea --- /dev/null +++ b/Documentation/devicetree/bindings/display/panel/samsung,s6e3fc2x01.yaml @@ -0,0 +1,81 @@ +# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/display/panel/samsung,s6e3fc2x01.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Samsung S6E3FC2X01 AMOLED DDIC + +description: The S6E3FC2X01 is display driver IC with connected panel. + +maintainers: + - David Heidelberg + +allOf: + - $ref: panel-common.yaml# + +properties: + compatible: + items: + - enum: + # Samsung 6.41 inch, 1080x2340 pixels, 19.5:9 ratio + - samsung,s6e3fc2x01-ams641rw + - const: samsung,s6e3fc2x01 + + reg: + maxItems: 1 + + reset-gpios: true + + port: true + + vddio-supply: + description: VDD regulator + + vci-supply: + description: VCI regulator + + poc-supply: + description: POC regulator + +required: + - compatible + - reset-gpios + - vddio-supply + - vci-supply + - poc-supply + +unevaluatedProperties: false + +examples: + - | + #include + + dsi { + #address-cells = <1>; + #size-cells = <0>; + + panel@0 { + compatible = "samsung,s6e3fc2x01-ams641rw", "samsung,s6e3fc2x01"; + reg = <0>; + + vddio-supply = <&vreg_l14a_1p88>; + vci-supply = <&s2dos05_buck1>; + poc-supply = <&s2dos05_ldo1>; + + te-gpios = <&tlmm 10 GPIO_ACTIVE_HIGH>; + reset-gpios = <&tlmm 6 GPIO_ACTIVE_HIGH>; + + pinctrl-0 = <&sde_dsi_active &sde_te_active_sleep>; + pinctrl-1 = <&sde_dsi_suspend &sde_te_active_sleep>; + pinctrl-names = "default", "sleep"; + + port { + panel_in: endpoint { + remote-endpoint = <&mdss_dsi0_out>; + }; + }; + }; + }; + +... diff --git a/Documentation/devicetree/bindings/display/panel/samsung,sofef00.yaml b/Documentation/devicetree/bindings/display/panel/samsung,sofef00.yaml new file mode 100644 index 000000000000..eeee3cac72e3 --- /dev/null +++ b/Documentation/devicetree/bindings/display/panel/samsung,sofef00.yaml @@ -0,0 +1,79 @@ +# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/display/panel/samsung,sofef00.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Samsung SOFEF00 AMOLED DDIC + +description: The SOFEF00 is display driver IC with connected panel. + +maintainers: + - David Heidelberg + +allOf: + - $ref: panel-common.yaml# + +properties: + compatible: + items: + - enum: + # Samsung 6.01 inch, 1080x2160 pixels, 18:9 ratio + - samsung,sofef00-ams601nt22 + # Samsung 6.28 inch, 1080x2280 pixels, 19:9 ratio + - samsung,sofef00-ams628nw01 + - const: samsung,sofef00 + + reg: + maxItems: 1 + + poc-supply: + description: POC regulator + + vci-supply: + description: VCI regulator + + vddio-supply: + description: VDD regulator + +required: + - compatible + - reset-gpios + - poc-supply + - vci-supply + - vddio-supply + +unevaluatedProperties: false + +examples: + - | + #include + + dsi { + #address-cells = <1>; + #size-cells = <0>; + + panel@0 { + compatible = "samsung,sofef00-ams628nw01", "samsung,sofef00"; + reg = <0>; + + vddio-supply = <&vreg_l14a_1p88>; + vci-supply = <&s2dos05_buck1>; + poc-supply = <&s2dos05_ldo1>; + + te-gpios = <&tlmm 10 GPIO_ACTIVE_HIGH>; + reset-gpios = <&tlmm 6 GPIO_ACTIVE_HIGH>; + + pinctrl-0 = <&panel_active>; + pinctrl-1 = <&panel_suspend>; + pinctrl-names = "default", "sleep"; + + port { + panel_in: endpoint { + remote-endpoint = <&mdss_dsi0_out>; + }; + }; + }; + }; + +... diff --git a/Documentation/devicetree/bindings/display/panel/sharp,lq079l1sx01.yaml b/Documentation/devicetree/bindings/display/panel/sharp,lq079l1sx01.yaml new file mode 100644 index 000000000000..08a35ebbbb3c --- /dev/null +++ b/Documentation/devicetree/bindings/display/panel/sharp,lq079l1sx01.yaml @@ -0,0 +1,99 @@ +# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/display/panel/sharp,lq079l1sx01.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Sharp Microelectronics 7.9" WQXGA TFT LCD panel + +maintainers: + - Svyatoslav Ryhel + +description: > + This panel requires a dual-channel DSI host to operate and it supports + only left-right split mode, where each channel drives the left or right + half of the screen and only video mode. + + Each of the DSI channels controls a separate DSI peripheral. + The peripheral driven by the first link (DSI-LINK1), left one, is + considered the primary peripheral and controls the device. + +allOf: + - $ref: panel-common-dual.yaml# + +properties: + compatible: + const: sharp,lq079l1sx01 + + reg: + maxItems: 1 + + avdd-supply: + description: regulator that supplies the analog voltage + + vddio-supply: + description: regulator that supplies the I/O voltage + + vsp-supply: + description: positive boost supply regulator + + vsn-supply: + description: negative boost supply regulator + + reset-gpios: + maxItems: 1 + + backlight: true + ports: true + +required: + - compatible + - reg + - avdd-supply + - vddio-supply + - ports + +additionalProperties: false + +examples: + - | + #include + + dsi { + #address-cells = <1>; + #size-cells = <0>; + + panel@0 { + compatible = "sharp,lq079l1sx01"; + reg = <0>; + + reset-gpios = <&gpio 59 GPIO_ACTIVE_LOW>; + + avdd-supply = <&avdd_lcd>; + vddio-supply = <&vdd_lcd_io>; + vsp-supply = <&vsp_5v5_lcd>; + vsn-supply = <&vsn_5v5_lcd>; + + backlight = <&backlight>; + + ports { + #address-cells = <1>; + #size-cells = <0>; + + port@0 { + reg = <0>; + panel_in0: endpoint { + remote-endpoint = <&dsi0_out>; + }; + }; + + port@1 { + reg = <1>; + panel_in1: endpoint { + remote-endpoint = <&dsi1_out>; + }; + }; + }; + }; + }; +... diff --git a/Documentation/devicetree/bindings/display/panel/synaptics,td4300-panel.yaml b/Documentation/devicetree/bindings/display/panel/synaptics,td4300-panel.yaml new file mode 100644 index 000000000000..152d94367130 --- /dev/null +++ b/Documentation/devicetree/bindings/display/panel/synaptics,td4300-panel.yaml @@ -0,0 +1,89 @@ +# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/display/panel/synaptics,td4300-panel.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Synaptics TDDI Display Panel Controller + +maintainers: + - Kaustabh Chakraborty + +allOf: + - $ref: panel-common.yaml# + +properties: + compatible: + enum: + - syna,td4101-panel + - syna,td4300-panel + + reg: + maxItems: 1 + + vio-supply: + description: core I/O voltage supply + + vsn-supply: + description: negative voltage supply for analog circuits + + vsp-supply: + description: positive voltage supply for analog circuits + + backlight-gpios: + maxItems: 1 + description: backlight enable GPIO + + reset-gpios: true + width-mm: true + height-mm: true + panel-timing: true + +required: + - compatible + - reg + - width-mm + - height-mm + - panel-timing + +additionalProperties: false + +examples: + - | + #include + + dsi { + #address-cells = <1>; + #size-cells = <0>; + + panel@0 { + compatible = "syna,td4300-panel"; + reg = <0>; + + vio-supply = <&panel_vio_reg>; + vsn-supply = <&panel_vsn_reg>; + vsp-supply = <&panel_vsp_reg>; + + backlight-gpios = <&gpd3 5 GPIO_ACTIVE_LOW>; + reset-gpios = <&gpd3 4 GPIO_ACTIVE_LOW>; + + width-mm = <68>; + height-mm = <121>; + + panel-timing { + clock-frequency = <144389520>; + + hactive = <1080>; + hsync-len = <4>; + hfront-porch = <120>; + hback-porch = <32>; + + vactive = <1920>; + vsync-len = <2>; + vfront-porch = <21>; + vback-porch = <4>; + }; + }; + }; + +... diff --git a/Documentation/devicetree/bindings/display/renesas,rzg2l-du.yaml b/Documentation/devicetree/bindings/display/renesas,rzg2l-du.yaml index 1e32d14b6edb..2cc66dcef870 100644 --- a/Documentation/devicetree/bindings/display/renesas,rzg2l-du.yaml +++ b/Documentation/devicetree/bindings/display/renesas,rzg2l-du.yaml @@ -25,6 +25,9 @@ properties: - enum: - renesas,r9a07g054-du # RZ/V2L - const: renesas,r9a07g044-du # RZ/G2L fallback + - items: + - const: renesas,r9a09g056-du # RZ/V2N + - const: renesas,r9a09g057-du # RZ/V2H(P) fallback reg: maxItems: 1 diff --git a/Documentation/devicetree/bindings/display/rockchip/rockchip,dw-mipi-dsi.yaml b/Documentation/devicetree/bindings/display/rockchip/rockchip,dw-mipi-dsi.yaml index c59df3c1a3f7..632b48bfabb9 100644 --- a/Documentation/devicetree/bindings/display/rockchip/rockchip,dw-mipi-dsi.yaml +++ b/Documentation/devicetree/bindings/display/rockchip/rockchip,dw-mipi-dsi.yaml @@ -17,6 +17,7 @@ properties: - rockchip,px30-mipi-dsi - rockchip,rk3128-mipi-dsi - rockchip,rk3288-mipi-dsi + - rockchip,rk3368-mipi-dsi - rockchip,rk3399-mipi-dsi - rockchip,rk3568-mipi-dsi - rockchip,rv1126-mipi-dsi @@ -73,6 +74,7 @@ allOf: enum: - rockchip,px30-mipi-dsi - rockchip,rk3128-mipi-dsi + - rockchip,rk3368-mipi-dsi - rockchip,rk3568-mipi-dsi - rockchip,rv1126-mipi-dsi diff --git a/Documentation/devicetree/bindings/display/rockchip/rockchip,rk3588-dw-hdmi-qp.yaml b/Documentation/devicetree/bindings/display/rockchip/rockchip,rk3588-dw-hdmi-qp.yaml index 96b4b088eebe..d649808c59da 100644 --- a/Documentation/devicetree/bindings/display/rockchip/rockchip,rk3588-dw-hdmi-qp.yaml +++ b/Documentation/devicetree/bindings/display/rockchip/rockchip,rk3588-dw-hdmi-qp.yaml @@ -113,6 +113,14 @@ properties: description: Additional HDMI QP related data is accessed through VO GRF regs. + frl-enable-gpios: + description: + Optional GPIO line to be asserted when operating in HDMI 2.1 FRL mode and + deasserted for HDMI 1.4/2.0 TMDS. It can be used to control external + voltage bias for HDMI data lines. When not present the HDMI encoder will + operate in TMDS mode only. + maxItems: 1 + required: - compatible - reg @@ -132,8 +140,10 @@ unevaluatedProperties: false examples: - | #include + #include #include #include + #include #include #include @@ -164,6 +174,7 @@ examples: rockchip,grf = <&sys_grf>; rockchip,vo-grf = <&vo1_grf>; #sound-dai-cells = <0>; + frl-enable-gpios = <&gpio4 RK_PB1 GPIO_ACTIVE_LOW>; ports { #address-cells = <1>; diff --git a/Documentation/devicetree/bindings/gpu/arm,mali-valhall-csf.yaml b/Documentation/devicetree/bindings/gpu/arm,mali-valhall-csf.yaml index a5b4e0021758..bee9faf1d3f8 100644 --- a/Documentation/devicetree/bindings/gpu/arm,mali-valhall-csf.yaml +++ b/Documentation/devicetree/bindings/gpu/arm,mali-valhall-csf.yaml @@ -18,6 +18,8 @@ properties: oneOf: - items: - enum: + - mediatek,mt8196-mali + - nxp,imx95-mali # G310 - rockchip,rk3588-mali - const: arm,mali-valhall-csf # Mali Valhall GPU model/revision is fully discoverable @@ -44,7 +46,9 @@ properties: minItems: 1 items: - const: core - - const: coregroup + - enum: + - coregroup + - stacks - const: stacks mali-supply: true @@ -91,7 +95,6 @@ required: - interrupts - interrupt-names - clocks - - mali-supply additionalProperties: false @@ -108,6 +111,29 @@ allOf: power-domains: maxItems: 1 power-domain-names: false + required: + - mali-supply + - if: + properties: + compatible: + contains: + const: mediatek,mt8196-mali + then: + properties: + mali-supply: false + sram-supply: false + operating-points-v2: false + power-domains: + maxItems: 1 + power-domain-names: false + clocks: + maxItems: 2 + clock-names: + items: + - const: core + - const: stacks + required: + - power-domains examples: - | @@ -143,5 +169,17 @@ examples: }; }; }; + - | + gpu@48000000 { + compatible = "mediatek,mt8196-mali", "arm,mali-valhall-csf"; + reg = <0x48000000 0x480000>; + clocks = <&gpufreq 0>, <&gpufreq 1>; + clock-names = "core", "stacks"; + interrupts = , + , + ; + interrupt-names = "job", "mmu", "gpu"; + power-domains = <&gpufreq>; + }; ... diff --git a/Documentation/devicetree/bindings/gpu/img,powervr-rogue.yaml b/Documentation/devicetree/bindings/gpu/img,powervr-rogue.yaml index c87d7bece0ec..225a6e1b7fcd 100644 --- a/Documentation/devicetree/bindings/gpu/img,powervr-rogue.yaml +++ b/Documentation/devicetree/bindings/gpu/img,powervr-rogue.yaml @@ -13,6 +13,16 @@ maintainers: properties: compatible: oneOf: + - items: + - enum: + - renesas,r8a7796-gpu + - renesas,r8a77961-gpu + - const: img,img-gx6250 + - const: img,img-rogue + - items: + - const: renesas,r8a77965-gpu + - const: img,img-ge7800 + - const: img,img-rogue - items: - enum: - ti,am62-gpu @@ -82,6 +92,33 @@ required: additionalProperties: false allOf: + - if: + properties: + compatible: + contains: + enum: + - ti,am62-gpu + - ti,j721s2-gpu + then: + properties: + clocks: + maxItems: 1 + + - if: + properties: + compatible: + contains: + enum: + - img,img-ge7800 + - img,img-gx6250 + - thead,th1520-gpu + then: + properties: + clocks: + minItems: 3 + clock-names: + minItems: 3 + - if: properties: compatible: @@ -90,14 +127,31 @@ allOf: then: properties: power-domains: - items: - - description: Power domain A + maxItems: 1 power-domain-names: maxItems: 1 required: - power-domains - power-domain-names + - if: + properties: + compatible: + contains: + enum: + - img,img-bxs-4-64 + - img,img-ge7800 + - img,img-gx6250 + then: + properties: + power-domains: + minItems: 2 + power-domain-names: + minItems: 2 + required: + - power-domains + - power-domain-names + - if: properties: compatible: @@ -105,10 +159,6 @@ allOf: const: thead,th1520-gpu then: properties: - clocks: - minItems: 3 - clock-names: - minItems: 3 power-domains: items: - description: The single, unified power domain for the GPU on the @@ -117,35 +167,6 @@ allOf: required: - power-domains - - if: - properties: - compatible: - contains: - const: img,img-bxs-4-64 - then: - properties: - power-domains: - items: - - description: Power domain A - - description: Power domain B - power-domain-names: - minItems: 2 - required: - - power-domains - - power-domain-names - - - if: - properties: - compatible: - contains: - enum: - - ti,am62-gpu - - ti,j721s2-gpu - then: - properties: - clocks: - maxItems: 1 - examples: - | #include diff --git a/Documentation/devicetree/bindings/iommu/arm,smmu.yaml b/Documentation/devicetree/bindings/iommu/arm,smmu.yaml index 89495f094d52..c9efdd1a6d1c 100644 --- a/Documentation/devicetree/bindings/iommu/arm,smmu.yaml +++ b/Documentation/devicetree/bindings/iommu/arm,smmu.yaml @@ -89,6 +89,8 @@ properties: - description: Qcom Adreno GPUs implementing "qcom,smmu-500" and "arm,mmu-500" items: - enum: + - qcom,glymur-smmu-500 + - qcom,kaanapali-smmu-500 - qcom,milos-smmu-500 - qcom,qcm2290-smmu-500 - qcom,qcs615-smmu-500 diff --git a/Documentation/devicetree/bindings/npu/arm,ethos.yaml b/Documentation/devicetree/bindings/npu/arm,ethos.yaml new file mode 100644 index 000000000000..716c4997f976 --- /dev/null +++ b/Documentation/devicetree/bindings/npu/arm,ethos.yaml @@ -0,0 +1,79 @@ +# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/npu/arm,ethos.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Arm Ethos U65/U85 + +maintainers: + - Rob Herring + +description: > + The Arm Ethos-U NPUs are designed for IoT inference applications. The NPUs + can accelerate 8-bit and 16-bit integer quantized networks: + + Transformer networks (U85 only) + Convolutional Neural Networks (CNN) + Recurrent Neural Networks (RNN) + + Further documentation is available here: + + U65 TRM: https://developer.arm.com/documentation/102023/ + U85 TRM: https://developer.arm.com/documentation/102685/ + +properties: + compatible: + oneOf: + - items: + - enum: + - fsl,imx93-npu + - const: arm,ethos-u65 + - items: + - {} + - const: arm,ethos-u85 + + reg: + maxItems: 1 + + interrupts: + maxItems: 1 + + clocks: + maxItems: 2 + + clock-names: + items: + - const: core + - const: apb + + power-domains: + maxItems: 1 + + sram: + maxItems: 1 + +required: + - compatible + - reg + - interrupts + - clocks + +additionalProperties: false + +examples: + - | + #include + #include + #include + + npu@4a900000 { + compatible = "fsl,imx93-npu", "arm,ethos-u65"; + reg = <0x4a900000 0x1000>; + interrupts = ; + power-domains = <&mlmix>; + clocks = <&clk IMX93_CLK_ML>, <&clk IMX93_CLK_ML_APB>; + clock-names = "core", "apb"; + sram = <&sram>; + }; +... diff --git a/Documentation/devicetree/bindings/vendor-prefixes.yaml b/Documentation/devicetree/bindings/vendor-prefixes.yaml index beab81e462d2..f2ae0c538e48 100644 --- a/Documentation/devicetree/bindings/vendor-prefixes.yaml +++ b/Documentation/devicetree/bindings/vendor-prefixes.yaml @@ -176,6 +176,8 @@ patternProperties: description: All Sensors Corporation "^asix,.*": description: ASIX Electronics Corporation + "^asl-tek,.*": + description: ASL Xiamen Technology Co., Ltd. "^aspeed,.*": description: ASPEED Technology Inc. "^asrock,.*": @@ -835,6 +837,8 @@ patternProperties: description: JOZ BV "^jty,.*": description: JTY + "^jutouch,.*": + description: JuTouch Technology Co., Ltd. "^kam,.*": description: Kamstrup A/S "^karo,.*": @@ -1323,6 +1327,8 @@ patternProperties: description: Raumfeld GmbH "^raydium,.*": description: Raydium Semiconductor Corp. + "^raystar,.*": + description: Raystar Optronics, Inc. "^rda,.*": description: Unisoc Communications, Inc. "^realtek,.*": diff --git a/Documentation/gpu/drm-kms-helpers.rst b/Documentation/gpu/drm-kms-helpers.rst index 5139705089f2..781129f78b06 100644 --- a/Documentation/gpu/drm-kms-helpers.rst +++ b/Documentation/gpu/drm-kms-helpers.rst @@ -92,6 +92,18 @@ GEM Atomic Helper Reference .. kernel-doc:: drivers/gpu/drm/drm_gem_atomic_helper.c :export: +VBLANK Helper Reference +----------------------- + +.. kernel-doc:: drivers/gpu/drm/drm_vblank_helper.c + :doc: overview + +.. kernel-doc:: include/drm/drm_vblank_helper.h + :internal: + +.. kernel-doc:: drivers/gpu/drm/drm_vblank_helper.c + :export: + Simple KMS Helper Reference =========================== diff --git a/Documentation/gpu/drm-kms.rst b/Documentation/gpu/drm-kms.rst index abfe220764e1..2292e65f044c 100644 --- a/Documentation/gpu/drm-kms.rst +++ b/Documentation/gpu/drm-kms.rst @@ -413,6 +413,21 @@ Plane Panic Functions Reference .. kernel-doc:: drivers/gpu/drm/drm_panic.c :export: +Colorop Abstraction +=================== + +.. kernel-doc:: drivers/gpu/drm/drm_colorop.c + :doc: overview + +Colorop Functions Reference +--------------------------- + +.. kernel-doc:: include/drm/drm_colorop.h + :internal: + +.. kernel-doc:: drivers/gpu/drm/drm_colorop.c + :export: + Display Modes Function Reference ================================ diff --git a/Documentation/gpu/nova/core/todo.rst b/Documentation/gpu/nova/core/todo.rst index 0972cb905f7a..35cc7c31d423 100644 --- a/Documentation/gpu/nova/core/todo.rst +++ b/Documentation/gpu/nova/core/todo.rst @@ -44,25 +44,6 @@ automatically generates the corresponding mappings between a value and a number. | Complexity: Beginner | Link: https://docs.rs/num/latest/num/trait.FromPrimitive.html -Conversion from byte slices for types implementing FromBytes [TRSM] -------------------------------------------------------------------- - -We retrieve several structures from byte streams coming from the BIOS or loaded -firmware. At the moment converting the bytes slice into the proper type require -an inelegant `unsafe` operation; this will go away once `FromBytes` implements -a proper `from_bytes` method. - -| Complexity: Beginner - -CoherentAllocation improvements [COHA] --------------------------------------- - -`CoherentAllocation` needs a safe way to write into the allocation, and to -obtain slices within the allocation. - -| Complexity: Beginner -| Contact: Abdiel Janulgue - Generic register abstraction [REGA] ----------------------------------- @@ -153,17 +134,6 @@ A `num` core kernel module is being designed to provide these operations. | Complexity: Intermediate | Contact: Alexandre Courbot -Delay / Sleep abstractions [DLAY] ---------------------------------- - -Rust abstractions for the kernel's delay() and sleep() functions. - -FUJITA Tomonori plans to work on abstractions for read_poll_timeout_atomic() -(and friends) [1]. - -| Complexity: Beginner -| Link: https://lore.kernel.org/netdev/20250228.080550.354359820929821928.fujita.tomonori@gmail.com/ [1] - IRQ abstractions ---------------- diff --git a/Documentation/gpu/rfc/color_pipeline.rst b/Documentation/gpu/rfc/color_pipeline.rst new file mode 100644 index 000000000000..cd1cc2d0f988 --- /dev/null +++ b/Documentation/gpu/rfc/color_pipeline.rst @@ -0,0 +1,378 @@ +.. SPDX-License-Identifier: GPL-2.0 + +======================== +Linux Color Pipeline API +======================== + +What problem are we solving? +============================ + +We would like to support pre-, and post-blending complex color +transformations in display controller hardware in order to allow for +HW-supported HDR use-cases, as well as to provide support to +color-managed applications, such as video or image editors. + +It is possible to support an HDR output on HW supporting the Colorspace +and HDR Metadata drm_connector properties, but that requires the +compositor or application to render and compose the content into one +final buffer intended for display. Doing so is costly. + +Most modern display HW offers various 1D LUTs, 3D LUTs, matrices, and other +operations to support color transformations. These operations are often +implemented in fixed-function HW and therefore much more power efficient than +performing similar operations via shaders or CPU. + +We would like to make use of this HW functionality to support complex color +transformations with no, or minimal CPU or shader load. The switch between HW +fixed-function blocks and shaders/CPU must be seamless with no visible +difference when fallback to shaders/CPU is neceesary at any time. + + +How are other OSes solving this problem? +======================================== + +The most widely supported use-cases regard HDR content, whether video or +gaming. + +Most OSes will specify the source content format (color gamut, encoding transfer +function, and other metadata, such as max and average light levels) to a driver. +Drivers will then program their fixed-function HW accordingly to map from a +source content buffer's space to a display's space. + +When fixed-function HW is not available the compositor will assemble a shader to +ask the GPU to perform the transformation from the source content format to the +display's format. + +A compositor's mapping function and a driver's mapping function are usually +entirely separate concepts. On OSes where a HW vendor has no insight into +closed-source compositor code such a vendor will tune their color management +code to visually match the compositor's. On other OSes, where both mapping +functions are open to an implementer they will ensure both mappings match. + +This results in mapping algorithm lock-in, meaning that no-one alone can +experiment with or introduce new mapping algorithms and achieve +consistent results regardless of which implementation path is taken. + +Why is Linux different? +======================= + +Unlike other OSes, where there is one compositor for one or more drivers, on +Linux we have a many-to-many relationship. Many compositors; many drivers. +In addition each compositor vendor or community has their own view of how +color management should be done. This is what makes Linux so beautiful. + +This means that a HW vendor can now no longer tune their driver to one +compositor, as tuning it to one could make it look fairly different from +another compositor's color mapping. + +We need a better solution. + + +Descriptive API +=============== + +An API that describes the source and destination colorspaces is a descriptive +API. It describes the input and output color spaces but does not describe +how precisely they should be mapped. Such a mapping includes many minute +design decision that can greatly affect the look of the final result. + +It is not feasible to describe such mapping with enough detail to ensure the +same result from each implementation. In fact, these mappings are a very active +research area. + + +Prescriptive API +================ + +A prescriptive API describes not the source and destination colorspaces. It +instead prescribes a recipe for how to manipulate pixel values to arrive at the +desired outcome. + +This recipe is generally an ordered list of straight-forward operations, +with clear mathematical definitions, such as 1D LUTs, 3D LUTs, matrices, +or other operations that can be described in a precise manner. + + +The Color Pipeline API +====================== + +HW color management pipelines can significantly differ between HW +vendors in terms of availability, ordering, and capabilities of HW +blocks. This makes a common definition of color management blocks and +their ordering nigh impossible. Instead we are defining an API that +allows user space to discover the HW capabilities in a generic manner, +agnostic of specific drivers and hardware. + + +drm_colorop Object +================== + +To support the definition of color pipelines we define the DRM core +object type drm_colorop. Individual drm_colorop objects will be chained +via the NEXT property of a drm_colorop to constitute a color pipeline. +Each drm_colorop object is unique, i.e., even if multiple color +pipelines have the same operation they won't share the same drm_colorop +object to describe that operation. + +Note that drivers are not expected to map drm_colorop objects statically +to specific HW blocks. The mapping of drm_colorop objects is entirely a +driver-internal detail and can be as dynamic or static as a driver needs +it to be. See more in the Driver Implementation Guide section below. + +Each drm_colorop has three core properties: + +TYPE: An enumeration property, defining the type of transformation, such as +* enumerated curve +* custom (uniform) 1D LUT +* 3x3 matrix +* 3x4 matrix +* 3D LUT +* etc. + +Depending on the type of transformation other properties will describe +more details. + +BYPASS: A boolean property that can be used to easily put a block into +bypass mode. The BYPASS property is not mandatory for a colorop, as long +as the entire pipeline can get bypassed by setting the COLOR_PIPELINE on +a plane to '0'. + +NEXT: The ID of the next drm_colorop in a color pipeline, or 0 if this +drm_colorop is the last in the chain. + +An example of a drm_colorop object might look like one of these:: + + /* 1D enumerated curve */ + Color operation 42 + ├─ "TYPE": immutable enum {1D enumerated curve, 1D LUT, 3x3 matrix, 3x4 matrix, 3D LUT, etc.} = 1D enumerated curve + ├─ "BYPASS": bool {true, false} + ├─ "CURVE_1D_TYPE": enum {sRGB EOTF, sRGB inverse EOTF, PQ EOTF, PQ inverse EOTF, …} + └─ "NEXT": immutable color operation ID = 43 + + /* custom 4k entry 1D LUT */ + Color operation 52 + ├─ "TYPE": immutable enum {1D enumerated curve, 1D LUT, 3x3 matrix, 3x4 matrix, 3D LUT, etc.} = 1D LUT + ├─ "BYPASS": bool {true, false} + ├─ "SIZE": immutable range = 4096 + ├─ "DATA": blob + └─ "NEXT": immutable color operation ID = 0 + + /* 17^3 3D LUT */ + Color operation 72 + ├─ "TYPE": immutable enum {1D enumerated curve, 1D LUT, 3x3 matrix, 3x4 matrix, 3D LUT, etc.} = 3D LUT + ├─ "BYPASS": bool {true, false} + ├─ "SIZE": immutable range = 17 + ├─ "DATA": blob + └─ "NEXT": immutable color operation ID = 73 + +drm_colorop extensibility +------------------------- + +Unlike existing DRM core objects, like &drm_plane, drm_colorop is not +extensible. This simplifies implementations and keeps all functionality +for managing &drm_colorop objects in the DRM core. + +If there is a need one may introduce a simple &drm_colorop_funcs +function table in the future, for example to support an IN_FORMATS +property on a &drm_colorop. + +If a driver requires the ability to create a driver-specific colorop +object they will need to add &drm_colorop func table support with +support for the usual functions, like destroy, atomic_duplicate_state, +and atomic_destroy_state. + + +COLOR_PIPELINE Plane Property +============================= + +Color Pipelines are created by a driver and advertised via a new +COLOR_PIPELINE enum property on each plane. Values of the property +always include object id 0, which is the default and means all color +processing is disabled. Additional values will be the object IDs of the +first drm_colorop in a pipeline. A driver can create and advertise none, +one, or more possible color pipelines. A DRM client will select a color +pipeline by setting the COLOR PIPELINE to the respective value. + +NOTE: Many DRM clients will set enumeration properties via the string +value, often hard-coding it. Since this enumeration is generated based +on the colorop object IDs it is important to perform the Color Pipeline +Discovery, described below, instead of hard-coding color pipeline +assignment. Drivers might generate the enum strings dynamically. +Hard-coded strings might only work for specific drivers on a specific +pieces of HW. Color Pipeline Discovery can work universally, as long as +drivers implement the required color operations. + +The COLOR_PIPELINE property is only exposed when the +DRM_CLIENT_CAP_PLANE_COLOR_PIPELINE is set. Drivers shall ignore any +existing pre-blend color operations when this cap is set, such as +COLOR_RANGE and COLOR_ENCODING. If drivers want to support COLOR_RANGE +or COLOR_ENCODING functionality when the color pipeline client cap is +set, they are expected to expose colorops in the pipeline to allow for +the appropriate color transformation. + +Setting of the COLOR_PIPELINE plane property or drm_colorop properties +is only allowed for userspace that sets this client cap. + +An example of a COLOR_PIPELINE property on a plane might look like this:: + + Plane 10 + ├─ "TYPE": immutable enum {Overlay, Primary, Cursor} = Primary + ├─ … + └─ "COLOR_PIPELINE": enum {0, 42, 52} = 0 + + +Color Pipeline Discovery +======================== + +A DRM client wanting color management on a drm_plane will: + +1. Get the COLOR_PIPELINE property of the plane +2. iterate all COLOR_PIPELINE enum values +3. for each enum value walk the color pipeline (via the NEXT pointers) + and see if the available color operations are suitable for the + desired color management operations + +If userspace encounters an unknown or unsuitable color operation during +discovery it does not need to reject the entire color pipeline outright, +as long as the unknown or unsuitable colorop has a "BYPASS" property. +Drivers will ensure that a bypassed block does not have any effect. + +An example of chained properties to define an AMD pre-blending color +pipeline might look like this:: + + Plane 10 + ├─ "TYPE" (immutable) = Primary + └─ "COLOR_PIPELINE": enum {0, 44} = 0 + + Color operation 44 + ├─ "TYPE" (immutable) = 1D enumerated curve + ├─ "BYPASS": bool + ├─ "CURVE_1D_TYPE": enum {sRGB EOTF, PQ EOTF} = sRGB EOTF + └─ "NEXT" (immutable) = 45 + + Color operation 45 + ├─ "TYPE" (immutable) = 3x4 Matrix + ├─ "BYPASS": bool + ├─ "DATA": blob + └─ "NEXT" (immutable) = 46 + + Color operation 46 + ├─ "TYPE" (immutable) = 1D enumerated curve + ├─ "BYPASS": bool + ├─ "CURVE_1D_TYPE": enum {sRGB Inverse EOTF, PQ Inverse EOTF} = sRGB EOTF + └─ "NEXT" (immutable) = 47 + + Color operation 47 + ├─ "TYPE" (immutable) = 1D LUT + ├─ "SIZE": immutable range = 4096 + ├─ "DATA": blob + └─ "NEXT" (immutable) = 48 + + Color operation 48 + ├─ "TYPE" (immutable) = 3D LUT + ├─ "DATA": blob + └─ "NEXT" (immutable) = 49 + + Color operation 49 + ├─ "TYPE" (immutable) = 1D enumerated curve + ├─ "BYPASS": bool + ├─ "CURVE_1D_TYPE": enum {sRGB EOTF, PQ EOTF} = sRGB EOTF + └─ "NEXT" (immutable) = 0 + + +Color Pipeline Programming +========================== + +Once a DRM client has found a suitable pipeline it will: + +1. Set the COLOR_PIPELINE enum value to the one pointing at the first + drm_colorop object of the desired pipeline +2. Set the properties for all drm_colorop objects in the pipeline to the + desired values, setting BYPASS to true for unused drm_colorop blocks, + and false for enabled drm_colorop blocks +3. Perform (TEST_ONLY or not) atomic commit with all the other KMS + states it wishes to change + +To configure the pipeline for an HDR10 PQ plane and blending in linear +space, a compositor might perform an atomic commit with the following +property values:: + + Plane 10 + └─ "COLOR_PIPELINE" = 42 + + Color operation 42 + └─ "BYPASS" = true + + Color operation 44 + └─ "BYPASS" = true + + Color operation 45 + └─ "BYPASS" = true + + Color operation 46 + └─ "BYPASS" = true + + Color operation 47 + ├─ "DATA" = Gamut mapping + tone mapping + night mode + └─ "BYPASS" = false + + Color operation 48 + ├─ "CURVE_1D_TYPE" = PQ EOTF + └─ "BYPASS" = false + + +Driver Implementer's Guide +========================== + +What does this all mean for driver implementations? As noted above the +colorops can map to HW directly but don't need to do so. Here are some +suggestions on how to think about creating your color pipelines: + +- Try to expose pipelines that use already defined colorops, even if + your hardware pipeline is split differently. This allows existing + userspace to immediately take advantage of the hardware. + +- Additionally, try to expose your actual hardware blocks as colorops. + Define new colorop types where you believe it can offer significant + benefits if userspace learns to program them. + +- Avoid defining new colorops for compound operations with very narrow + scope. If you have a hardware block for a special operation that + cannot be split further, you can expose that as a new colorop type. + However, try to not define colorops for "use cases", especially if + they require you to combine multiple hardware blocks. + +- Design new colorops as prescriptive, not descriptive; by the + mathematical formula, not by the assumed input and output. + +A defined colorop type must be deterministic. The exact behavior of the +colorop must be documented entirely, whether via a mathematical formula +or some other description. Its operation can depend only on its +properties and input and nothing else, allowed error tolerance +notwithstanding. + + +Driver Forward/Backward Compatibility +===================================== + +As this is uAPI drivers can't regress color pipelines that have been +introduced for a given HW generation. New HW generations are free to +abandon color pipelines advertised for previous generations. +Nevertheless, it can be beneficial to carry support for existing color +pipelines forward as those will likely already have support in DRM +clients. + +Introducing new colorops to a pipeline is fine, as long as they can be +bypassed or are purely informational. DRM clients implementing support +for the pipeline can always skip unknown properties as long as they can +be confident that doing so will not cause unexpected results. + +If a new colorop doesn't fall into one of the above categories +(bypassable or informational) the modified pipeline would be unusable +for user space. In this case a new pipeline should be defined. + + +References +========== + +1. https://lore.kernel.org/dri-devel/QMers3awXvNCQlyhWdTtsPwkp5ie9bze_hD5nAccFW7a_RXlWjYB7MoUW_8CKLT2bSQwIXVi5H6VULYIxCdgvryZoAoJnC5lZgyK1QWn488=@emersion.fr/ \ No newline at end of file diff --git a/Documentation/gpu/rfc/index.rst b/Documentation/gpu/rfc/index.rst index 396e535377fb..ef19b0ba2a3e 100644 --- a/Documentation/gpu/rfc/index.rst +++ b/Documentation/gpu/rfc/index.rst @@ -35,3 +35,6 @@ host such documentation: .. toctree:: i915_vm_bind.rst + +.. toctree:: + color_pipeline.rst \ No newline at end of file diff --git a/Documentation/gpu/todo.rst b/Documentation/gpu/todo.rst index b5f58b4274b1..9013ced318cb 100644 --- a/Documentation/gpu/todo.rst +++ b/Documentation/gpu/todo.rst @@ -623,6 +623,43 @@ Contact: Thomas Zimmermann , Simona Vetter Level: Advanced +Implement a new DUMB_CREATE2 ioctl +---------------------------------- + +The current DUMB_CREATE ioctl is not well defined. Instead of a pixel and +framebuffer format, it only accepts a color mode of vague semantics. Assuming +a linear framebuffer, the color mode gives an idea of the supported pixel +format. But userspace effectively has to guess the correct values. It really +only works reliably with framebuffers in XRGB8888. Userspace has begun to +workaround these limitations by computing arbitrary format's buffer sizes and +calculating their sizes in terms of XRGB8888 pixels. + +One possible solution is a new ioctl DUMB_CREATE2. It should accept a DRM +format and a format modifier to resolve the color mode's ambiguity. As +framebuffers can be multi-planar, the new ioctl has to return the buffer size, +pitch and GEM handle for each individual color plane. + +In the first step, the new ioctl can be limited to the current features of +the existing DUMB_CREATE. Individual drivers can then be extended to support +multi-planar formats. Rockchip might require this and would be a good candidate. + +It might also be helpful to userspace to query information about the size of +a potential buffer, if allocated. Userspace would supply geometry and format; +the kernel would return minimal allocation sizes and scanline pitch. There is +interest to allocate that memory from another device and provide it to the +DRM driver (say via dma-buf). + +Another requested feature is the ability to allocate a buffer by size, without +format. Accelators use this for their buffer allocation and it could likely be +generalized. + +In addition to the kernel implementation, there must be user-space support +for the new ioctl. There's code in Mesa that might be able to use the new +call. + +Contact: Thomas Zimmermann + +Level: Advanced Better Testing ============== diff --git a/Documentation/gpu/vkms.rst b/Documentation/gpu/vkms.rst index 8a8b1002931f..1e79e62a6bc4 100644 --- a/Documentation/gpu/vkms.rst +++ b/Documentation/gpu/vkms.rst @@ -51,6 +51,97 @@ To disable the driver, use :: sudo modprobe -r vkms +Configuring With Configfs +========================= + +It is possible to create and configure multiple VKMS instances via configfs. + +Start by mounting configfs and loading VKMS:: + + sudo mount -t configfs none /config + sudo modprobe vkms + +Once VKMS is loaded, ``/config/vkms`` is created automatically. Each directory +under ``/config/vkms`` represents a VKMS instance, create a new one:: + + sudo mkdir /config/vkms/my-vkms + +By default, the instance is disabled:: + + cat /config/vkms/my-vkms/enabled + 0 + +And directories are created for each configurable item of the display pipeline:: + + tree /config/vkms/my-vkms + ├── connectors + ├── crtcs + ├── enabled + ├── encoders + └── planes + +To add items to the display pipeline, create one or more directories under the +available paths. + +Start by creating one or more planes:: + + sudo mkdir /config/vkms/my-vkms/planes/plane0 + +Planes have 1 configurable attribute: + +- type: Plane type: 0 overlay, 1 primary, 2 cursor (same values as those + exposed by the "type" property of a plane) + +Continue by creating one or more CRTCs:: + + sudo mkdir /config/vkms/my-vkms/crtcs/crtc0 + +CRTCs have 1 configurable attribute: + +- writeback: Enable or disable writeback connector support by writing 1 or 0 + +Next, create one or more encoders:: + + sudo mkdir /config/vkms/my-vkms/encoders/encoder0 + +Last but not least, create one or more connectors:: + + sudo mkdir /config/vkms/my-vkms/connectors/connector0 + +Connectors have 1 configurable attribute: + +- status: Connection status: 1 connected, 2 disconnected, 3 unknown (same values + as those exposed by the "status" property of a connector) + +To finish the configuration, link the different pipeline items:: + + sudo ln -s /config/vkms/my-vkms/crtcs/crtc0 /config/vkms/my-vkms/planes/plane0/possible_crtcs + sudo ln -s /config/vkms/my-vkms/crtcs/crtc0 /config/vkms/my-vkms/encoders/encoder0/possible_crtcs + sudo ln -s /config/vkms/my-vkms/encoders/encoder0 /config/vkms/my-vkms/connectors/connector0/possible_encoders + +Since at least one primary plane is required, make sure to set the right type:: + + echo "1" | sudo tee /config/vkms/my-vkms/planes/plane0/type + +Once you are done configuring the VKMS instance, enable it:: + + echo "1" | sudo tee /config/vkms/my-vkms/enabled + +Finally, you can remove the VKMS instance disabling it:: + + echo "0" | sudo tee /config/vkms/my-vkms/enabled + +And removing the top level directory and its subdirectories:: + + sudo rm /config/vkms/my-vkms/planes/*/possible_crtcs/* + sudo rm /config/vkms/my-vkms/encoders/*/possible_crtcs/* + sudo rm /config/vkms/my-vkms/connectors/*/possible_encoders/* + sudo rmdir /config/vkms/my-vkms/planes/* + sudo rmdir /config/vkms/my-vkms/crtcs/* + sudo rmdir /config/vkms/my-vkms/encoders/* + sudo rmdir /config/vkms/my-vkms/connectors/* + sudo rmdir /config/vkms/my-vkms + Testing With IGT ================ @@ -68,26 +159,23 @@ To return to graphical mode, do:: sudo systemctl isolate graphical.target -Once you are in text only mode, you can run tests using the --device switch -or IGT_DEVICE variable to specify the device filter for the driver we want -to test. IGT_DEVICE can also be used with the run-test.sh script to run the +Once you are in text only mode, you can run tests using the IGT_FORCE_DRIVER +variable to specify the device filter for the driver we want to test. +IGT_FORCE_DRIVER can also be used with the run-tests.sh script to run the tests for a specific driver:: - sudo ./build/tests/ --device "sys:/sys/devices/platform/vkms" - sudo IGT_DEVICE="sys:/sys/devices/platform/vkms" ./build/tests/ - sudo IGT_DEVICE="sys:/sys/devices/platform/vkms" ./scripts/run-tests.sh -t + sudo IGT_FORCE_DRIVER="vkms" ./build/tests/ + sudo IGT_FORCE_DRIVER="vkms" ./scripts/run-tests.sh -t For example, to test the functionality of the writeback library, we can run the kms_writeback test:: - sudo ./build/tests/kms_writeback --device "sys:/sys/devices/platform/vkms" - sudo IGT_DEVICE="sys:/sys/devices/platform/vkms" ./build/tests/kms_writeback - sudo IGT_DEVICE="sys:/sys/devices/platform/vkms" ./scripts/run-tests.sh -t kms_writeback + sudo IGT_FORCE_DRIVER="vkms" ./build/tests/kms_writeback + sudo IGT_FORCE_DRIVER="vkms" ./scripts/run-tests.sh -t kms_writeback You can also run subtests if you do not want to run the entire test:: - sudo ./build/tests/kms_flip --run-subtest basic-plain-flip --device "sys:/sys/devices/platform/vkms" - sudo IGT_DEVICE="sys:/sys/devices/platform/vkms" ./build/tests/kms_flip --run-subtest basic-plain-flip + sudo IGT_FORCE_DRIVER="vkms" ./build/tests/kms_flip --run-subtest basic-plain-flip Testing With KUnit ================== @@ -147,21 +235,14 @@ Runtime Configuration --------------------- We want to be able to reconfigure vkms instance without having to reload the -module. Use/Test-cases: +module through configfs. Use/Test-cases: - Hotplug/hotremove connectors on the fly (to be able to test DP MST handling of compositors). -- Configure planes/crtcs/connectors (we'd need some code to have more than 1 of - them first). - - Change output configuration: Plug/unplug screens, change EDID, allow changing the refresh rate. -The currently proposed solution is to expose vkms configuration through -configfs. All existing module options should be supported through configfs -too. - Writeback support ----------------- diff --git a/Documentation/gpu/xe/index.rst b/Documentation/gpu/xe/index.rst index 88b22fad880e..bc432c95d1a3 100644 --- a/Documentation/gpu/xe/index.rst +++ b/Documentation/gpu/xe/index.rst @@ -14,6 +14,7 @@ DG2, etc is provided to prototype the driver. xe_mm xe_map xe_migrate + xe_exec_queue xe_cs xe_pm xe_gt_freq diff --git a/Documentation/gpu/xe/xe_exec_queue.rst b/Documentation/gpu/xe/xe_exec_queue.rst new file mode 100644 index 000000000000..6076569e311c --- /dev/null +++ b/Documentation/gpu/xe/xe_exec_queue.rst @@ -0,0 +1,20 @@ +.. SPDX-License-Identifier: (GPL-2.0+ OR MIT) + +=============== +Execution Queue +=============== + +.. kernel-doc:: drivers/gpu/drm/xe/xe_exec_queue.c + :doc: Execution Queue + +Internal API +============ + +.. kernel-doc:: drivers/gpu/drm/xe/xe_exec_queue_types.h + :internal: + +.. kernel-doc:: drivers/gpu/drm/xe/xe_exec_queue.h + :internal: + +.. kernel-doc:: drivers/gpu/drm/xe/xe_exec_queue.c + :internal: diff --git a/Documentation/gpu/xe/xe_gt_freq.rst b/Documentation/gpu/xe/xe_gt_freq.rst index c0811200e327..182d6aabeee1 100644 --- a/Documentation/gpu/xe/xe_gt_freq.rst +++ b/Documentation/gpu/xe/xe_gt_freq.rst @@ -7,6 +7,9 @@ Xe GT Frequency Management .. kernel-doc:: drivers/gpu/drm/xe/xe_gt_freq.c :doc: Xe GT Frequency Management +.. kernel-doc:: drivers/gpu/drm/xe/xe_gt_throttle.c + :doc: Xe GT Throttle + Internal API ============ diff --git a/Documentation/userspace-api/dma-buf-heaps.rst b/Documentation/userspace-api/dma-buf-heaps.rst index 1dfe5e7acd5a..05445c83b79a 100644 --- a/Documentation/userspace-api/dma-buf-heaps.rst +++ b/Documentation/userspace-api/dma-buf-heaps.rst @@ -16,13 +16,52 @@ following heaps: - The ``system`` heap allocates virtually contiguous, cacheable, buffers. - - The ``cma`` heap allocates physically contiguous, cacheable, - buffers. Only present if a CMA region is present. Such a region is - usually created either through the kernel commandline through the - ``cma`` parameter, a memory region Device-Tree node with the - ``linux,cma-default`` property set, or through the ``CMA_SIZE_MBYTES`` or - ``CMA_SIZE_PERCENTAGE`` Kconfig options. The heap's name in devtmpfs is - ``default_cma_region``. For backwards compatibility, when the - ``DMABUF_HEAPS_CMA_LEGACY`` Kconfig option is set, a duplicate node is - created following legacy naming conventions; the legacy name might be - ``reserved``, ``linux,cma``, or ``default-pool``. + - The ``default_cma_region`` heap allocates physically contiguous, + cacheable, buffers. Only present if a CMA region is present. Such a + region is usually created either through the kernel commandline + through the ``cma`` parameter, a memory region Device-Tree node with + the ``linux,cma-default`` property set, or through the + ``CMA_SIZE_MBYTES`` or ``CMA_SIZE_PERCENTAGE`` Kconfig options. Prior + to Linux 6.17, its name wasn't stable and could be called + ``reserved``, ``linux,cma``, or ``default-pool``, depending on the + platform. + + - A heap will be created for each reusable region in the device tree + with the ``shared-dma-pool`` compatible, using the full device tree + node name as its name. The buffer semantics are identical to + ``default-cma-region``. + +Naming Convention +================= + +``dma-buf`` heaps name should meet a number of constraints: + +- The name must be stable, and must not change from one version to the other. + Userspace identifies heaps by their name, so if the names ever change, we + would be likely to introduce regressions. + +- The name must describe the memory region the heap will allocate from, and + must uniquely identify it in a given platform. Since userspace applications + use the heap name as the discriminant, it must be able to tell which heap it + wants to use reliably if there's multiple heaps. + +- The name must not mention implementation details, such as the allocator. The + heap driver will change over time, and implementation details when it was + introduced might not be relevant in the future. + +- The name should describe properties of the buffers that would be allocated. + Doing so will make heap identification easier for userspace. Such properties + are: + + - ``contiguous`` for physically contiguous buffers; + + - ``protected`` for encrypted buffers not accessible the OS; + +- The name may describe intended usage. Doing so will make heap identification + easier for userspace applications and users. + +For example, assuming a platform with a reserved memory region located +at the RAM address 0x42000000, intended to allocate video framebuffers, +physically contiguous, and backed by the CMA kernel allocator, good +names would be ``memory@42000000-contiguous`` or ``video@42000000``, but +``cma-video`` wouldn't. diff --git a/MAINTAINERS b/MAINTAINERS index 8ecd74ffab8a..b5a009627516 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -1081,7 +1081,7 @@ M: Austin Zheng M: Jun Lei S: Supported F: drivers/gpu/drm/amd/display/dc/dml/ -F: drivers/gpu/drm/amd/display/dc/dml2/ +F: drivers/gpu/drm/amd/display/dc/dml2_0/ AMD FAM15H PROCESSOR POWER MONITORING DRIVER M: Huang Rui @@ -2022,6 +2022,15 @@ F: arch/arm64/include/asm/arch_timer.h F: drivers/clocksource/arm_arch_timer.c F: drivers/clocksource/arm_arch_timer_mmio.c +ARM ETHOS-U NPU DRIVER +M: Rob Herring (Arm) +M: Tomeu Vizoso +L: dri-devel@lists.freedesktop.org +S: Supported +T: git https://gitlab.freedesktop.org/drm/misc/kernel.git +F: drivers/accel/ethosu/ +F: include/uapi/drm/ethosu_accel.h + ARM GENERIC INTERRUPT CONTROLLER DRIVERS M: Marc Zyngier L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) @@ -2097,7 +2106,8 @@ F: drivers/gpu/drm/arm/display/komeda/ ARM MALI PANFROST DRM DRIVER M: Boris Brezillon M: Rob Herring -R: Steven Price +M: Steven Price +M: Adrián Larumbe L: dri-devel@lists.freedesktop.org S: Supported T: git https://gitlab.freedesktop.org/drm/misc/kernel.git @@ -2317,7 +2327,7 @@ S: Maintained F: drivers/clk/sunxi/ ARM/Allwinner sunXi SoC support -M: Chen-Yu Tsai +M: Chen-Yu Tsai M: Jernej Skrabec M: Samuel Holland L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) @@ -7331,6 +7341,7 @@ F: Documentation/userspace-api/dma-buf-alloc-exchange.rst F: drivers/dma-buf/ F: include/linux/*fence.h F: include/linux/dma-buf.h +F: include/linux/dma-buf/ F: include/linux/dma-resv.h K: \bdma_(?:buf|fence|resv)\b @@ -7654,8 +7665,7 @@ F: drivers/accel/ F: include/drm/drm_accel.h DRM DRIVER FOR ALLWINNER DE2 AND DE3 ENGINE -M: Maxime Ripard -M: Chen-Yu Tsai +M: Chen-Yu Tsai R: Jernej Skrabec L: dri-devel@lists.freedesktop.org S: Supported @@ -7764,7 +7774,8 @@ F: Documentation/devicetree/bindings/display/panel/panel-edp.yaml F: drivers/gpu/drm/panel/panel-edp.c DRM DRIVER FOR GENERIC USB DISPLAY -S: Orphan +M: Ruben Wauters +S: Maintained W: https://github.com/notro/gud/wiki T: git https://gitlab.freedesktop.org/drm/misc/kernel.git F: drivers/gpu/drm/gud/ @@ -7886,6 +7897,7 @@ DRM DRIVER for Qualcomm Adreno GPUs M: Rob Clark R: Sean Paul R: Konrad Dybcio +R: Akhil P Oommen L: linux-arm-msm@vger.kernel.org L: dri-devel@lists.freedesktop.org L: freedreno@lists.freedesktop.org @@ -7905,7 +7917,7 @@ DRM DRIVER for Qualcomm display hardware M: Rob Clark M: Dmitry Baryshkov R: Abhinav Kumar -R: Jessica Zhang +R: Jessica Zhang R: Sean Paul R: Marijn Suijten L: linux-arm-msm@vger.kernel.org @@ -8074,12 +8086,25 @@ S: Maintained F: Documentation/devicetree/bindings/display/panel/samsung,s6d7aa0.yaml F: drivers/gpu/drm/panel/panel-samsung-s6d7aa0.c +DRM DRIVER FOR SAMSUNG S6E3FC2X01 DDIC +M: David Heidelberg +S: Maintained +F: Documentation/devicetree/bindings/display/panel/samsung,s6e3fc2x01.yaml +F: drivers/gpu/drm/panel/panel-samsung-s6e3fc2x01.c + DRM DRIVER FOR SAMSUNG S6E3HA8 PANELS M: Dzmitry Sankouski S: Maintained F: Documentation/devicetree/bindings/display/panel/samsung,s6e3ha8.yaml F: drivers/gpu/drm/panel/panel-samsung-s6e3ha8.c +DRM DRIVER FOR SAMSUNG SOFEF00 DDIC +M: David Heidelberg +M: Casey Connolly +S: Maintained +F: Documentation/devicetree/bindings/display/panel/samsung,sofef00.yaml +F: drivers/gpu/drm/panel/panel-samsung-sofef00.c + DRM DRIVER FOR SHARP MEMORY LCD M: Alex Lanzano S: Maintained @@ -8264,12 +8289,12 @@ S: Supported W: https://drm.pages.freedesktop.org/maintainer-tools/drm-rust.html T: git https://gitlab.freedesktop.org/drm/rust/kernel.git F: drivers/gpu/drm/nova/ +F: drivers/gpu/drm/tyr/ F: drivers/gpu/nova-core/ F: rust/kernel/drm/ DRM DRIVERS FOR ALLWINNER A10 -M: Maxime Ripard -M: Chen-Yu Tsai +M: Chen-Yu Tsai L: dri-devel@lists.freedesktop.org S: Supported T: git https://gitlab.freedesktop.org/drm/misc/kernel.git @@ -8596,6 +8621,7 @@ S: Supported T: git https://gitlab.freedesktop.org/drm/misc/kernel.git F: drivers/gpu/drm/scheduler/ F: include/drm/gpu_scheduler.h +F: include/drm/spsc_queue.h DRM GPUVM M: Danilo Krummrich @@ -8618,7 +8644,7 @@ F: drivers/gpu/drm/clients/drm_log.c DRM PANEL DRIVERS M: Neil Armstrong -R: Jessica Zhang +R: Jessica Zhang L: dri-devel@lists.freedesktop.org S: Maintained T: git https://gitlab.freedesktop.org/drm/misc/kernel.git @@ -27832,7 +27858,7 @@ F: drivers/acpi/pmic/intel_pmic_xpower.c N: axp288 X-POWERS MULTIFUNCTION PMIC DEVICE DRIVERS -M: Chen-Yu Tsai +M: Chen-Yu Tsai L: linux-kernel@vger.kernel.org S: Maintained N: axp[128] diff --git a/arch/arm/boot/dts/st/stih410.dtsi b/arch/arm/boot/dts/st/stih410.dtsi index d56343f44fda..07da9b48ccac 100644 --- a/arch/arm/boot/dts/st/stih410.dtsi +++ b/arch/arm/boot/dts/st/stih410.dtsi @@ -34,6 +34,41 @@ usb2_picophy2: phy3 { status = "disabled"; }; + display-subsystem { + compatible = "st,sti-display-subsystem"; + ports = <&compositor>, <&hqvdp>, <&tvout>, <&sti_hdmi>; + + assigned-clocks = <&clk_s_d2_quadfs 0>, + <&clk_s_d2_quadfs 1>, + <&clk_s_c0_pll1 0>, + <&clk_s_c0_flexgen CLK_COMPO_DVP>, + <&clk_s_c0_flexgen CLK_MAIN_DISP>, + <&clk_s_d2_flexgen CLK_PIX_MAIN_DISP>, + <&clk_s_d2_flexgen CLK_PIX_AUX_DISP>, + <&clk_s_d2_flexgen CLK_PIX_GDP1>, + <&clk_s_d2_flexgen CLK_PIX_GDP2>, + <&clk_s_d2_flexgen CLK_PIX_GDP3>, + <&clk_s_d2_flexgen CLK_PIX_GDP4>; + + assigned-clock-parents = <0>, + <0>, + <0>, + <&clk_s_c0_pll1 0>, + <&clk_s_c0_pll1 0>, + <&clk_s_d2_quadfs 0>, + <&clk_s_d2_quadfs 1>, + <&clk_s_d2_quadfs 0>, + <&clk_s_d2_quadfs 0>, + <&clk_s_d2_quadfs 0>, + <&clk_s_d2_quadfs 0>; + + assigned-clock-rates = <297000000>, + <297000000>, + <0>, + <400000000>, + <400000000>; + }; + soc { ohci0: usb@9a03c00 { compatible = "st,st-ohci-300x"; @@ -99,153 +134,176 @@ ehci1: usb@9a83e00 { status = "disabled"; }; - sti-display-subsystem@0 { - compatible = "st,sti-display-subsystem"; - #address-cells = <1>; - #size-cells = <1>; + compositor: display-controller@9d11000 { + compatible = "st,stih407-compositor"; + reg = <0x9d11000 0x1000>; - reg = <0 0>; - assigned-clocks = <&clk_s_d2_quadfs 0>, - <&clk_s_d2_quadfs 1>, - <&clk_s_c0_pll1 0>, - <&clk_s_c0_flexgen CLK_COMPO_DVP>, - <&clk_s_c0_flexgen CLK_MAIN_DISP>, - <&clk_s_d2_flexgen CLK_PIX_MAIN_DISP>, - <&clk_s_d2_flexgen CLK_PIX_AUX_DISP>, - <&clk_s_d2_flexgen CLK_PIX_GDP1>, - <&clk_s_d2_flexgen CLK_PIX_GDP2>, - <&clk_s_d2_flexgen CLK_PIX_GDP3>, - <&clk_s_d2_flexgen CLK_PIX_GDP4>; + clock-names = "compo_main", + "compo_aux", + "pix_main", + "pix_aux", + "pix_gdp1", + "pix_gdp2", + "pix_gdp3", + "pix_gdp4", + "main_parent", + "aux_parent"; - assigned-clock-parents = <0>, - <0>, - <0>, - <&clk_s_c0_pll1 0>, - <&clk_s_c0_pll1 0>, - <&clk_s_d2_quadfs 0>, - <&clk_s_d2_quadfs 1>, - <&clk_s_d2_quadfs 0>, + clocks = <&clk_s_c0_flexgen CLK_COMPO_DVP>, + <&clk_s_c0_flexgen CLK_COMPO_DVP>, + <&clk_s_d2_flexgen CLK_PIX_MAIN_DISP>, + <&clk_s_d2_flexgen CLK_PIX_AUX_DISP>, + <&clk_s_d2_flexgen CLK_PIX_GDP1>, + <&clk_s_d2_flexgen CLK_PIX_GDP2>, + <&clk_s_d2_flexgen CLK_PIX_GDP3>, + <&clk_s_d2_flexgen CLK_PIX_GDP4>, + <&clk_s_d2_quadfs 0>, + <&clk_s_d2_quadfs 1>; + + reset-names = "compo-main", "compo-aux"; + resets = <&softreset STIH407_COMPO_SOFTRESET>, + <&softreset STIH407_COMPO_SOFTRESET>; + st,vtg = <&vtg_main>, <&vtg_aux>; + + ports { + #address-cells = <1>; + #size-cells = <0>; + + port@0 { + reg = <0>; + compo_main_out: endpoint { + remote-endpoint = <&tvout_in0>; + }; + }; + + port@1 { + reg = <1>; + compo_aux_out: endpoint { + remote-endpoint = <&tvout_in1>; + }; + }; + }; + }; + + tvout: encoder@8d08000 { + compatible = "st,stih407-tvout"; + reg = <0x8d08000 0x1000>; + reg-names = "tvout-reg"; + reset-names = "tvout"; + resets = <&softreset STIH407_HDTVOUT_SOFTRESET>; + assigned-clocks = <&clk_s_d2_flexgen CLK_PIX_HDMI>, + <&clk_s_d2_flexgen CLK_TMDS_HDMI>, + <&clk_s_d2_flexgen CLK_REF_HDMIPHY>, + <&clk_s_d0_flexgen CLK_PCM_0>, + <&clk_s_d2_flexgen CLK_PIX_HDDAC>, + <&clk_s_d2_flexgen CLK_HDDAC>; + + assigned-clock-parents = <&clk_s_d2_quadfs 0>, + <&clk_tmdsout_hdmi>, <&clk_s_d2_quadfs 0>, + <&clk_s_d0_quadfs 0>, <&clk_s_d2_quadfs 0>, <&clk_s_d2_quadfs 0>; - assigned-clock-rates = <297000000>, - <297000000>, - <0>, - <400000000>, - <400000000>; - - ranges; - - sti-compositor@9d11000 { - compatible = "st,stih407-compositor"; - reg = <0x9d11000 0x1000>; - - clock-names = "compo_main", - "compo_aux", - "pix_main", - "pix_aux", - "pix_gdp1", - "pix_gdp2", - "pix_gdp3", - "pix_gdp4", - "main_parent", - "aux_parent"; - - clocks = <&clk_s_c0_flexgen CLK_COMPO_DVP>, - <&clk_s_c0_flexgen CLK_COMPO_DVP>, - <&clk_s_d2_flexgen CLK_PIX_MAIN_DISP>, - <&clk_s_d2_flexgen CLK_PIX_AUX_DISP>, - <&clk_s_d2_flexgen CLK_PIX_GDP1>, - <&clk_s_d2_flexgen CLK_PIX_GDP2>, - <&clk_s_d2_flexgen CLK_PIX_GDP3>, - <&clk_s_d2_flexgen CLK_PIX_GDP4>, - <&clk_s_d2_quadfs 0>, - <&clk_s_d2_quadfs 1>; - - reset-names = "compo-main", "compo-aux"; - resets = <&softreset STIH407_COMPO_SOFTRESET>, - <&softreset STIH407_COMPO_SOFTRESET>; - st,vtg = <&vtg_main>, <&vtg_aux>; - }; - - sti-tvout@8d08000 { - compatible = "st,stih407-tvout"; - reg = <0x8d08000 0x1000>; - reg-names = "tvout-reg"; - reset-names = "tvout"; - resets = <&softreset STIH407_HDTVOUT_SOFTRESET>; + ports { #address-cells = <1>; - #size-cells = <1>; - assigned-clocks = <&clk_s_d2_flexgen CLK_PIX_HDMI>, - <&clk_s_d2_flexgen CLK_TMDS_HDMI>, - <&clk_s_d2_flexgen CLK_REF_HDMIPHY>, - <&clk_s_d0_flexgen CLK_PCM_0>, - <&clk_s_d2_flexgen CLK_PIX_HDDAC>, - <&clk_s_d2_flexgen CLK_HDDAC>; + #size-cells = <0>; - assigned-clock-parents = <&clk_s_d2_quadfs 0>, - <&clk_tmdsout_hdmi>, - <&clk_s_d2_quadfs 0>, - <&clk_s_d0_quadfs 0>, - <&clk_s_d2_quadfs 0>, - <&clk_s_d2_quadfs 0>; + port@0 { + reg = <0>; + tvout_in0: endpoint { + remote-endpoint = <&compo_main_out>; + }; + }; + + port@1 { + reg = <1>; + tvout_in1: endpoint { + remote-endpoint = <&compo_aux_out>; + }; + }; + + port@2 { + reg = <2>; + tvout_out0: endpoint { + remote-endpoint = <&hdmi_in>; + }; + }; + + port@3 { + reg = <3>; + tvout_out1: endpoint { + remote-endpoint = <&hda_in>; + }; + }; }; + }; - sti_hdmi: sti-hdmi@8d04000 { - compatible = "st,stih407-hdmi"; - reg = <0x8d04000 0x1000>; - reg-names = "hdmi-reg"; - #sound-dai-cells = <0>; - interrupts = ; - interrupt-names = "irq"; - clock-names = "pix", - "tmds", - "phy", - "audio", - "main_parent", - "aux_parent"; + sti_hdmi: hdmi@8d04000 { + compatible = "st,stih407-hdmi"; + reg = <0x8d04000 0x1000>; + reg-names = "hdmi-reg"; + #sound-dai-cells = <0>; + interrupts = ; + interrupt-names = "irq"; + clock-names = "pix", + "tmds", + "phy", + "audio", + "main_parent", + "aux_parent"; - clocks = <&clk_s_d2_flexgen CLK_PIX_HDMI>, - <&clk_s_d2_flexgen CLK_TMDS_HDMI>, - <&clk_s_d2_flexgen CLK_REF_HDMIPHY>, - <&clk_s_d0_flexgen CLK_PCM_0>, - <&clk_s_d2_quadfs 0>, - <&clk_s_d2_quadfs 1>; + clocks = <&clk_s_d2_flexgen CLK_PIX_HDMI>, + <&clk_s_d2_flexgen CLK_TMDS_HDMI>, + <&clk_s_d2_flexgen CLK_REF_HDMIPHY>, + <&clk_s_d0_flexgen CLK_PCM_0>, + <&clk_s_d2_quadfs 0>, + <&clk_s_d2_quadfs 1>; - hdmi,hpd-gpio = <&pio5 3 GPIO_ACTIVE_LOW>; - reset-names = "hdmi"; - resets = <&softreset STIH407_HDMI_TX_PHY_SOFTRESET>; - ddc = <&hdmiddc>; + hdmi,hpd-gpio = <&pio5 3 GPIO_ACTIVE_LOW>; + reset-names = "hdmi"; + resets = <&softreset STIH407_HDMI_TX_PHY_SOFTRESET>; + ddc = <&hdmiddc>; + + port { + hdmi_in: endpoint { + remote-endpoint = <&tvout_out0>; + }; }; + }; - sti-hda@8d02000 { - compatible = "st,stih407-hda"; - status = "disabled"; - reg = <0x8d02000 0x400>, <0x92b0120 0x4>; - reg-names = "hda-reg", "video-dacs-ctrl"; - clock-names = "pix", - "hddac", - "main_parent", - "aux_parent"; - clocks = <&clk_s_d2_flexgen CLK_PIX_HDDAC>, - <&clk_s_d2_flexgen CLK_HDDAC>, - <&clk_s_d2_quadfs 0>, - <&clk_s_d2_quadfs 1>; - }; + analog@8d02000 { + compatible = "st,stih407-hda"; + status = "disabled"; + reg = <0x8d02000 0x400>, <0x92b0120 0x4>; + reg-names = "hda-reg", "video-dacs-ctrl"; + clock-names = "pix", + "hddac", + "main_parent", + "aux_parent"; + clocks = <&clk_s_d2_flexgen CLK_PIX_HDDAC>, + <&clk_s_d2_flexgen CLK_HDDAC>, + <&clk_s_d2_quadfs 0>, + <&clk_s_d2_quadfs 1>; - sti-hqvdp@9c00000 { - compatible = "st,stih407-hqvdp"; - reg = <0x9C00000 0x100000>; - clock-names = "hqvdp", "pix_main"; - clocks = <&clk_s_c0_flexgen CLK_MAIN_DISP>, - <&clk_s_d2_flexgen CLK_PIX_MAIN_DISP>; - reset-names = "hqvdp"; - resets = <&softreset STIH407_HDQVDP_SOFTRESET>; - st,vtg = <&vtg_main>; + port { + hda_in: endpoint { + remote-endpoint = <&tvout_out1>; + }; }; }; + hqvdp: plane@9c00000 { + compatible = "st,stih407-hqvdp"; + reg = <0x9C00000 0x100000>; + clock-names = "hqvdp", "pix_main"; + clocks = <&clk_s_c0_flexgen CLK_MAIN_DISP>, + <&clk_s_d2_flexgen CLK_PIX_MAIN_DISP>; + reset-names = "hqvdp"; + resets = <&softreset STIH407_HDQVDP_SOFTRESET>; + st,vtg = <&vtg_main>; + }; + bdisp0:bdisp@9f10000 { compatible = "st,stih407-bdisp"; reg = <0x9f10000 0x1000>; diff --git a/arch/arm64/boot/dts/freescale/imx95.dtsi b/arch/arm64/boot/dts/freescale/imx95.dtsi index 6da961eb3fe5..75b74077c10f 100644 --- a/arch/arm64/boot/dts/freescale/imx95.dtsi +++ b/arch/arm64/boot/dts/freescale/imx95.dtsi @@ -250,6 +250,28 @@ dummy: clock-dummy { clock-output-names = "dummy"; }; + gpu_opp_table: opp-table { + compatible = "operating-points-v2"; + + opp-500000000 { + opp-hz = /bits/ 64 <500000000>; + opp-hz-real = /bits/ 64 <500000000>; + opp-microvolt = <920000>; + }; + + opp-800000000 { + opp-hz = /bits/ 64 <800000000>; + opp-hz-real = /bits/ 64 <800000000>; + opp-microvolt = <920000>; + }; + + opp-1000000000 { + opp-hz = /bits/ 64 <1000000000>; + opp-hz-real = /bits/ 64 <1000000000>; + opp-microvolt = <920000>; + }; + }; + clk_ext1: clock-ext1 { compatible = "fixed-clock"; #clock-cells = <0>; @@ -2139,6 +2161,21 @@ netc_emdio: mdio@0,0 { }; }; + gpu: gpu@4d900000 { + compatible = "nxp,imx95-mali", "arm,mali-valhall-csf"; + reg = <0 0x4d900000 0 0x480000>; + clocks = <&scmi_clk IMX95_CLK_GPU>, <&scmi_clk IMX95_CLK_GPUAPB>; + clock-names = "core", "coregroup"; + interrupts = , + , + ; + interrupt-names = "job", "mmu", "gpu"; + operating-points-v2 = <&gpu_opp_table>; + power-domains = <&scmi_devpd IMX95_PD_GPU>; + #cooling-cells = <2>; + dynamic-power-coefficient = <1013>; + }; + ddr-pmu@4e090dc0 { compatible = "fsl,imx95-ddr-pmu", "fsl,imx93-ddr-pmu"; reg = <0x0 0x4e090dc0 0x0 0x200>; diff --git a/drivers/accel/Kconfig b/drivers/accel/Kconfig index bb01cebc42bf..bdf48ccafcf2 100644 --- a/drivers/accel/Kconfig +++ b/drivers/accel/Kconfig @@ -25,6 +25,7 @@ menuconfig DRM_ACCEL and debugfs). source "drivers/accel/amdxdna/Kconfig" +source "drivers/accel/ethosu/Kconfig" source "drivers/accel/habanalabs/Kconfig" source "drivers/accel/ivpu/Kconfig" source "drivers/accel/qaic/Kconfig" diff --git a/drivers/accel/Makefile b/drivers/accel/Makefile index ffc3fa588666..1d3a7251b950 100644 --- a/drivers/accel/Makefile +++ b/drivers/accel/Makefile @@ -1,6 +1,7 @@ # SPDX-License-Identifier: GPL-2.0-only obj-$(CONFIG_DRM_ACCEL_AMDXDNA) += amdxdna/ +obj-$(CONFIG_DRM_ACCEL_ARM_ETHOSU) += ethosu/ obj-$(CONFIG_DRM_ACCEL_HABANALABS) += habanalabs/ obj-$(CONFIG_DRM_ACCEL_IVPU) += ivpu/ obj-$(CONFIG_DRM_ACCEL_QAIC) += qaic/ diff --git a/drivers/accel/amdxdna/Makefile b/drivers/accel/amdxdna/Makefile index 6797dac65efa..6344aaf523fa 100644 --- a/drivers/accel/amdxdna/Makefile +++ b/drivers/accel/amdxdna/Makefile @@ -14,6 +14,7 @@ amdxdna-y := \ amdxdna_mailbox.o \ amdxdna_mailbox_helper.o \ amdxdna_pci_drv.o \ + amdxdna_pm.o \ amdxdna_sysfs.o \ amdxdna_ubuf.o \ npu1_regs.o \ diff --git a/drivers/accel/amdxdna/TODO b/drivers/accel/amdxdna/TODO index ad8ac6e315b6..0e4bbebeaedf 100644 --- a/drivers/accel/amdxdna/TODO +++ b/drivers/accel/amdxdna/TODO @@ -1,2 +1 @@ - Add debugfs support -- Add debug BO support diff --git a/drivers/accel/amdxdna/aie2_ctx.c b/drivers/accel/amdxdna/aie2_ctx.c index e9f9b1fa5dc1..42d876a427c5 100644 --- a/drivers/accel/amdxdna/aie2_ctx.c +++ b/drivers/accel/amdxdna/aie2_ctx.c @@ -21,6 +21,7 @@ #include "amdxdna_gem.h" #include "amdxdna_mailbox.h" #include "amdxdna_pci_drv.h" +#include "amdxdna_pm.h" static bool force_cmdlist; module_param(force_cmdlist, bool, 0600); @@ -88,7 +89,7 @@ static int aie2_hwctx_restart(struct amdxdna_dev *xdna, struct amdxdna_hwctx *hw goto out; } - ret = aie2_config_cu(hwctx); + ret = aie2_config_cu(hwctx, NULL); if (ret) { XDNA_ERR(xdna, "Config cu failed, ret %d", ret); goto out; @@ -167,14 +168,11 @@ static int aie2_hwctx_resume_cb(struct amdxdna_hwctx *hwctx, void *arg) int aie2_hwctx_resume(struct amdxdna_client *client) { - struct amdxdna_dev *xdna = client->xdna; - /* * The resume path cannot guarantee that mailbox channel can be * regenerated. If this happen, when submit message to this * mailbox channel, error will return. */ - drm_WARN_ON(&xdna->ddev, !mutex_is_locked(&xdna->dev_lock)); return amdxdna_hwctx_walk(client, NULL, aie2_hwctx_resume_cb); } @@ -184,12 +182,13 @@ aie2_sched_notify(struct amdxdna_sched_job *job) struct dma_fence *fence = job->fence; trace_xdna_job(&job->base, job->hwctx->name, "signaled fence", job->seq); + + amdxdna_pm_suspend_put(job->hwctx->client->xdna); job->hwctx->priv->completed++; dma_fence_signal(fence); up(&job->hwctx->priv->job_sem); job->job_done = true; - dma_fence_put(fence); mmput_async(job->mm); aie2_job_put(job); } @@ -204,10 +203,13 @@ aie2_sched_resp_handler(void *handle, void __iomem *data, size_t size) cmd_abo = job->cmd_bo; - if (unlikely(!data)) + if (unlikely(job->job_timeout)) { + amdxdna_cmd_set_state(cmd_abo, ERT_CMD_STATE_TIMEOUT); + ret = -EINVAL; goto out; + } - if (unlikely(size != sizeof(u32))) { + if (unlikely(!data) || unlikely(size != sizeof(u32))) { amdxdna_cmd_set_state(cmd_abo, ERT_CMD_STATE_ABORT); ret = -EINVAL; goto out; @@ -226,11 +228,10 @@ aie2_sched_resp_handler(void *handle, void __iomem *data, size_t size) } static int -aie2_sched_nocmd_resp_handler(void *handle, void __iomem *data, size_t size) +aie2_sched_drvcmd_resp_handler(void *handle, void __iomem *data, size_t size) { struct amdxdna_sched_job *job = handle; int ret = 0; - u32 status; if (unlikely(!data)) goto out; @@ -240,8 +241,7 @@ aie2_sched_nocmd_resp_handler(void *handle, void __iomem *data, size_t size) goto out; } - status = readl(data); - XDNA_DBG(job->hwctx->client->xdna, "Resp status 0x%x", status); + job->drv_cmd->result = readl(data); out: aie2_sched_notify(job); @@ -260,6 +260,13 @@ aie2_sched_cmdlist_resp_handler(void *handle, void __iomem *data, size_t size) int ret = 0; cmd_abo = job->cmd_bo; + + if (unlikely(job->job_timeout)) { + amdxdna_cmd_set_state(cmd_abo, ERT_CMD_STATE_TIMEOUT); + ret = -EINVAL; + goto out; + } + if (unlikely(!data) || unlikely(size != sizeof(u32) * 3)) { amdxdna_cmd_set_state(cmd_abo, ERT_CMD_STATE_ABORT); ret = -EINVAL; @@ -314,8 +321,18 @@ aie2_sched_job_run(struct drm_sched_job *sched_job) kref_get(&job->refcnt); fence = dma_fence_get(job->fence); - if (unlikely(!cmd_abo)) { - ret = aie2_sync_bo(hwctx, job, aie2_sched_nocmd_resp_handler); + if (job->drv_cmd) { + switch (job->drv_cmd->opcode) { + case SYNC_DEBUG_BO: + ret = aie2_sync_bo(hwctx, job, aie2_sched_drvcmd_resp_handler); + break; + case ATTACH_DEBUG_BO: + ret = aie2_config_debug_bo(hwctx, job, aie2_sched_drvcmd_resp_handler); + break; + default: + ret = -EINVAL; + break; + } goto out; } @@ -362,6 +379,7 @@ aie2_sched_job_timedout(struct drm_sched_job *sched_job) xdna = hwctx->client->xdna; trace_xdna_job(sched_job, hwctx->name, "job timedout", job->seq); + job->job_timeout = true; mutex_lock(&xdna->dev_lock); aie2_hwctx_stop(xdna, hwctx, sched_job); @@ -531,13 +549,12 @@ int aie2_hwctx_init(struct amdxdna_hwctx *hwctx) .num_rqs = DRM_SCHED_PRIORITY_COUNT, .credit_limit = HWCTX_MAX_CMDS, .timeout = msecs_to_jiffies(HWCTX_MAX_TIMEOUT), - .name = hwctx->name, + .name = "amdxdna_js", .dev = xdna->ddev.dev, }; struct drm_gpu_scheduler *sched; struct amdxdna_hwctx_priv *priv; struct amdxdna_gem_obj *heap; - struct amdxdna_dev_hdl *ndev; int i, ret; priv = kzalloc(sizeof(*hwctx->priv), GFP_KERNEL); @@ -610,10 +627,14 @@ int aie2_hwctx_init(struct amdxdna_hwctx *hwctx) goto free_entity; } + ret = amdxdna_pm_resume_get(xdna); + if (ret) + goto free_col_list; + ret = aie2_alloc_resource(hwctx); if (ret) { XDNA_ERR(xdna, "Alloc hw resource failed, ret %d", ret); - goto free_col_list; + goto suspend_put; } ret = aie2_map_host_buf(xdna->dev_handle, hwctx->fw_ctx_id, @@ -628,10 +649,9 @@ int aie2_hwctx_init(struct amdxdna_hwctx *hwctx) XDNA_ERR(xdna, "Create syncobj failed, ret %d", ret); goto release_resource; } + amdxdna_pm_suspend_put(xdna); hwctx->status = HWCTX_STAT_INIT; - ndev = xdna->dev_handle; - ndev->hwctx_num++; init_waitqueue_head(&priv->job_free_wq); XDNA_DBG(xdna, "hwctx %s init completed", hwctx->name); @@ -640,6 +660,8 @@ int aie2_hwctx_init(struct amdxdna_hwctx *hwctx) release_resource: aie2_release_resource(hwctx); +suspend_put: + amdxdna_pm_suspend_put(xdna); free_col_list: kfree(hwctx->col_list); free_entity: @@ -662,26 +684,25 @@ int aie2_hwctx_init(struct amdxdna_hwctx *hwctx) void aie2_hwctx_fini(struct amdxdna_hwctx *hwctx) { - struct amdxdna_dev_hdl *ndev; struct amdxdna_dev *xdna; int idx; xdna = hwctx->client->xdna; - ndev = xdna->dev_handle; - ndev->hwctx_num--; XDNA_DBG(xdna, "%s sequence number %lld", hwctx->name, hwctx->priv->seq); - drm_sched_entity_destroy(&hwctx->priv->entity); - aie2_hwctx_wait_for_idle(hwctx); /* Request fw to destroy hwctx and cancel the rest pending requests */ aie2_release_resource(hwctx); + mutex_unlock(&xdna->dev_lock); + drm_sched_entity_destroy(&hwctx->priv->entity); + /* Wait for all submitted jobs to be completed or canceled */ wait_event(hwctx->priv->job_free_wq, atomic64_read(&hwctx->job_submit_cnt) == atomic64_read(&hwctx->job_free_cnt)); + mutex_lock(&xdna->dev_lock); drm_sched_fini(&hwctx->priv->sched); aie2_ctx_syncobj_destroy(hwctx); @@ -697,6 +718,14 @@ void aie2_hwctx_fini(struct amdxdna_hwctx *hwctx) kfree(hwctx->cus); } +static int aie2_config_cu_resp_handler(void *handle, void __iomem *data, size_t size) +{ + struct amdxdna_hwctx *hwctx = handle; + + amdxdna_pm_suspend_put(hwctx->client->xdna); + return 0; +} + static int aie2_hwctx_cu_config(struct amdxdna_hwctx *hwctx, void *buf, u32 size) { struct amdxdna_hwctx_param_config_cu *config = buf; @@ -728,10 +757,14 @@ static int aie2_hwctx_cu_config(struct amdxdna_hwctx *hwctx, void *buf, u32 size if (!hwctx->cus) return -ENOMEM; - ret = aie2_config_cu(hwctx); + ret = amdxdna_pm_resume_get(xdna); + if (ret) + goto free_cus; + + ret = aie2_config_cu(hwctx, aie2_config_cu_resp_handler); if (ret) { XDNA_ERR(xdna, "Config CU to firmware failed, ret %d", ret); - goto free_cus; + goto pm_suspend_put; } wmb(); /* To avoid locking in command submit when check status */ @@ -739,12 +772,82 @@ static int aie2_hwctx_cu_config(struct amdxdna_hwctx *hwctx, void *buf, u32 size return 0; +pm_suspend_put: + amdxdna_pm_suspend_put(xdna); free_cus: kfree(hwctx->cus); hwctx->cus = NULL; return ret; } +static void aie2_cmd_wait(struct amdxdna_hwctx *hwctx, u64 seq) +{ + struct dma_fence *out_fence = aie2_cmd_get_out_fence(hwctx, seq); + + if (!out_fence) { + XDNA_ERR(hwctx->client->xdna, "Failed to get fence"); + return; + } + + dma_fence_wait_timeout(out_fence, false, MAX_SCHEDULE_TIMEOUT); + dma_fence_put(out_fence); +} + +static int aie2_hwctx_cfg_debug_bo(struct amdxdna_hwctx *hwctx, u32 bo_hdl, + bool attach) +{ + struct amdxdna_client *client = hwctx->client; + struct amdxdna_dev *xdna = client->xdna; + struct amdxdna_drv_cmd cmd = { 0 }; + struct amdxdna_gem_obj *abo; + u64 seq; + int ret; + + abo = amdxdna_gem_get_obj(client, bo_hdl, AMDXDNA_BO_DEV); + if (!abo) { + XDNA_ERR(xdna, "Get bo %d failed", bo_hdl); + return -EINVAL; + } + + if (attach) { + if (abo->assigned_hwctx != AMDXDNA_INVALID_CTX_HANDLE) { + ret = -EBUSY; + goto put_obj; + } + cmd.opcode = ATTACH_DEBUG_BO; + } else { + if (abo->assigned_hwctx != hwctx->id) { + ret = -EINVAL; + goto put_obj; + } + cmd.opcode = DETACH_DEBUG_BO; + } + + ret = amdxdna_cmd_submit(client, &cmd, AMDXDNA_INVALID_BO_HANDLE, + &bo_hdl, 1, hwctx->id, &seq); + if (ret) { + XDNA_ERR(xdna, "Submit command failed"); + goto put_obj; + } + + aie2_cmd_wait(hwctx, seq); + if (cmd.result) { + XDNA_ERR(xdna, "Response failure 0x%x", cmd.result); + goto put_obj; + } + + if (attach) + abo->assigned_hwctx = hwctx->id; + else + abo->assigned_hwctx = AMDXDNA_INVALID_CTX_HANDLE; + + XDNA_DBG(xdna, "Config debug BO %d to %s", bo_hdl, hwctx->name); + +put_obj: + amdxdna_gem_put_obj(abo); + return ret; +} + int aie2_hwctx_config(struct amdxdna_hwctx *hwctx, u32 type, u64 value, void *buf, u32 size) { struct amdxdna_dev *xdna = hwctx->client->xdna; @@ -754,14 +857,40 @@ int aie2_hwctx_config(struct amdxdna_hwctx *hwctx, u32 type, u64 value, void *bu case DRM_AMDXDNA_HWCTX_CONFIG_CU: return aie2_hwctx_cu_config(hwctx, buf, size); case DRM_AMDXDNA_HWCTX_ASSIGN_DBG_BUF: + return aie2_hwctx_cfg_debug_bo(hwctx, (u32)value, true); case DRM_AMDXDNA_HWCTX_REMOVE_DBG_BUF: - return -EOPNOTSUPP; + return aie2_hwctx_cfg_debug_bo(hwctx, (u32)value, false); default: XDNA_DBG(xdna, "Not supported type %d", type); return -EOPNOTSUPP; } } +int aie2_hwctx_sync_debug_bo(struct amdxdna_hwctx *hwctx, u32 debug_bo_hdl) +{ + struct amdxdna_client *client = hwctx->client; + struct amdxdna_dev *xdna = client->xdna; + struct amdxdna_drv_cmd cmd = { 0 }; + u64 seq; + int ret; + + cmd.opcode = SYNC_DEBUG_BO; + ret = amdxdna_cmd_submit(client, &cmd, AMDXDNA_INVALID_BO_HANDLE, + &debug_bo_hdl, 1, hwctx->id, &seq); + if (ret) { + XDNA_ERR(xdna, "Submit command failed"); + return ret; + } + + aie2_cmd_wait(hwctx, seq); + if (cmd.result) { + XDNA_ERR(xdna, "Response failure 0x%x", cmd.result); + return -EINVAL; + } + + return 0; +} + static int aie2_populate_range(struct amdxdna_gem_obj *abo) { struct amdxdna_dev *xdna = to_xdna_dev(to_gobj(abo)->dev); @@ -862,11 +991,15 @@ int aie2_cmd_submit(struct amdxdna_hwctx *hwctx, struct amdxdna_sched_job *job, goto free_chain; } + ret = amdxdna_pm_resume_get(xdna); + if (ret) + goto cleanup_job; + retry: ret = drm_gem_lock_reservations(job->bos, job->bo_cnt, &acquire_ctx); if (ret) { XDNA_WARN(xdna, "Failed to lock BOs, ret %d", ret); - goto cleanup_job; + goto suspend_put; } for (i = 0; i < job->bo_cnt; i++) { @@ -874,7 +1007,7 @@ int aie2_cmd_submit(struct amdxdna_hwctx *hwctx, struct amdxdna_sched_job *job, if (ret) { XDNA_WARN(xdna, "Failed to reserve fences %d", ret); drm_gem_unlock_reservations(job->bos, job->bo_cnt, &acquire_ctx); - goto cleanup_job; + goto suspend_put; } } @@ -889,12 +1022,12 @@ int aie2_cmd_submit(struct amdxdna_hwctx *hwctx, struct amdxdna_sched_job *job, msecs_to_jiffies(HMM_RANGE_DEFAULT_TIMEOUT); } else if (time_after(jiffies, timeout)) { ret = -ETIME; - goto cleanup_job; + goto suspend_put; } ret = aie2_populate_range(abo); if (ret) - goto cleanup_job; + goto suspend_put; goto retry; } } @@ -920,6 +1053,8 @@ int aie2_cmd_submit(struct amdxdna_hwctx *hwctx, struct amdxdna_sched_job *job, return 0; +suspend_put: + amdxdna_pm_suspend_put(xdna); cleanup_job: drm_sched_job_cleanup(&job->base); free_chain: diff --git a/drivers/accel/amdxdna/aie2_error.c b/drivers/accel/amdxdna/aie2_error.c index 5ee905632a39..d452008ec4f4 100644 --- a/drivers/accel/amdxdna/aie2_error.c +++ b/drivers/accel/amdxdna/aie2_error.c @@ -13,6 +13,7 @@ #include "aie2_msg_priv.h" #include "aie2_pci.h" +#include "amdxdna_error.h" #include "amdxdna_mailbox.h" #include "amdxdna_pci_drv.h" @@ -46,6 +47,7 @@ enum aie_module_type { AIE_MEM_MOD = 0, AIE_CORE_MOD, AIE_PL_MOD, + AIE_UNKNOWN_MOD, }; enum aie_error_category { @@ -143,6 +145,31 @@ static const struct aie_event_category aie_ml_shim_tile_event_cat[] = { EVENT_CATEGORY(74U, AIE_ERROR_LOCK), }; +static const enum amdxdna_error_num aie_cat_err_num_map[] = { + [AIE_ERROR_SATURATION] = AMDXDNA_ERROR_NUM_AIE_SATURATION, + [AIE_ERROR_FP] = AMDXDNA_ERROR_NUM_AIE_FP, + [AIE_ERROR_STREAM] = AMDXDNA_ERROR_NUM_AIE_STREAM, + [AIE_ERROR_ACCESS] = AMDXDNA_ERROR_NUM_AIE_ACCESS, + [AIE_ERROR_BUS] = AMDXDNA_ERROR_NUM_AIE_BUS, + [AIE_ERROR_INSTRUCTION] = AMDXDNA_ERROR_NUM_AIE_INSTRUCTION, + [AIE_ERROR_ECC] = AMDXDNA_ERROR_NUM_AIE_ECC, + [AIE_ERROR_LOCK] = AMDXDNA_ERROR_NUM_AIE_LOCK, + [AIE_ERROR_DMA] = AMDXDNA_ERROR_NUM_AIE_DMA, + [AIE_ERROR_MEM_PARITY] = AMDXDNA_ERROR_NUM_AIE_MEM_PARITY, + [AIE_ERROR_UNKNOWN] = AMDXDNA_ERROR_NUM_UNKNOWN, +}; + +static_assert(ARRAY_SIZE(aie_cat_err_num_map) == AIE_ERROR_UNKNOWN + 1); + +static const enum amdxdna_error_module aie_err_mod_map[] = { + [AIE_MEM_MOD] = AMDXDNA_ERROR_MODULE_AIE_MEMORY, + [AIE_CORE_MOD] = AMDXDNA_ERROR_MODULE_AIE_CORE, + [AIE_PL_MOD] = AMDXDNA_ERROR_MODULE_AIE_PL, + [AIE_UNKNOWN_MOD] = AMDXDNA_ERROR_MODULE_UNKNOWN, +}; + +static_assert(ARRAY_SIZE(aie_err_mod_map) == AIE_UNKNOWN_MOD + 1); + static enum aie_error_category aie_get_error_category(u8 row, u8 event_id, enum aie_module_type mod_type) { @@ -176,12 +203,40 @@ aie_get_error_category(u8 row, u8 event_id, enum aie_module_type mod_type) if (event_id != lut[i].event_id) continue; + if (lut[i].category > AIE_ERROR_UNKNOWN) + return AIE_ERROR_UNKNOWN; + return lut[i].category; } return AIE_ERROR_UNKNOWN; } +static void aie2_update_last_async_error(struct amdxdna_dev_hdl *ndev, void *err_info, u32 num_err) +{ + struct aie_error *errs = err_info; + enum amdxdna_error_module err_mod; + enum aie_error_category aie_err; + enum amdxdna_error_num err_num; + struct aie_error *last_err; + + last_err = &errs[num_err - 1]; + if (last_err->mod_type >= AIE_UNKNOWN_MOD) { + err_num = aie_cat_err_num_map[AIE_ERROR_UNKNOWN]; + err_mod = aie_err_mod_map[AIE_UNKNOWN_MOD]; + } else { + aie_err = aie_get_error_category(last_err->row, + last_err->event_id, + last_err->mod_type); + err_num = aie_cat_err_num_map[aie_err]; + err_mod = aie_err_mod_map[last_err->mod_type]; + } + + ndev->last_async_err.err_code = AMDXDNA_ERROR_ENCODE(err_num, err_mod); + ndev->last_async_err.ts_us = ktime_to_us(ktime_get_real()); + ndev->last_async_err.ex_err_code = AMDXDNA_EXTRA_ERR_ENCODE(last_err->row, last_err->col); +} + static u32 aie2_error_backtrack(struct amdxdna_dev_hdl *ndev, void *err_info, u32 num_err) { struct aie_error *errs = err_info; @@ -264,29 +319,14 @@ static void aie2_error_worker(struct work_struct *err_work) } mutex_lock(&xdna->dev_lock); + aie2_update_last_async_error(e->ndev, info->payload, info->err_cnt); + /* Re-sent this event to firmware */ if (aie2_error_event_send(e)) XDNA_WARN(xdna, "Unable to register async event"); mutex_unlock(&xdna->dev_lock); } -int aie2_error_async_events_send(struct amdxdna_dev_hdl *ndev) -{ - struct amdxdna_dev *xdna = ndev->xdna; - struct async_event *e; - int i, ret; - - drm_WARN_ON(&xdna->ddev, !mutex_is_locked(&xdna->dev_lock)); - for (i = 0; i < ndev->async_events->event_cnt; i++) { - e = &ndev->async_events->event[i]; - ret = aie2_error_event_send(e); - if (ret) - return ret; - } - - return 0; -} - void aie2_error_async_events_free(struct amdxdna_dev_hdl *ndev) { struct amdxdna_dev *xdna = ndev->xdna; @@ -341,6 +381,10 @@ int aie2_error_async_events_alloc(struct amdxdna_dev_hdl *ndev) e->size = ASYNC_BUF_SIZE; e->resp.status = MAX_AIE2_STATUS_CODE; INIT_WORK(&e->work, aie2_error_worker); + + ret = aie2_error_event_send(e); + if (ret) + goto free_wq; } ndev->async_events = events; @@ -349,6 +393,8 @@ int aie2_error_async_events_alloc(struct amdxdna_dev_hdl *ndev) events->event_cnt, events->size); return 0; +free_wq: + destroy_workqueue(events->wq); free_buf: dma_free_noncoherent(xdna->ddev.dev, events->size, events->buf, events->addr, DMA_FROM_DEVICE); @@ -356,3 +402,18 @@ int aie2_error_async_events_alloc(struct amdxdna_dev_hdl *ndev) kfree(events); return ret; } + +int aie2_get_array_async_error(struct amdxdna_dev_hdl *ndev, struct amdxdna_drm_get_array *args) +{ + struct amdxdna_dev *xdna = ndev->xdna; + + drm_WARN_ON(&xdna->ddev, !mutex_is_locked(&xdna->dev_lock)); + + args->num_element = 1; + args->element_size = sizeof(ndev->last_async_err); + if (copy_to_user(u64_to_user_ptr(args->buffer), + &ndev->last_async_err, args->element_size)) + return -EFAULT; + + return 0; +} diff --git a/drivers/accel/amdxdna/aie2_message.c b/drivers/accel/amdxdna/aie2_message.c index 9caad083543d..d493bb1c3360 100644 --- a/drivers/accel/amdxdna/aie2_message.c +++ b/drivers/accel/amdxdna/aie2_message.c @@ -27,6 +27,8 @@ #define DECLARE_AIE2_MSG(name, op) \ DECLARE_XDNA_MSG_COMMON(name, op, MAX_AIE2_STATUS_CODE) +#define EXEC_MSG_OPS(xdna) ((xdna)->dev_handle->exec_msg_ops) + static int aie2_send_mgmt_msg_wait(struct amdxdna_dev_hdl *ndev, struct xdna_mailbox_msg *msg) { @@ -37,7 +39,7 @@ static int aie2_send_mgmt_msg_wait(struct amdxdna_dev_hdl *ndev, if (!ndev->mgmt_chann) return -ENODEV; - drm_WARN_ON(&xdna->ddev, !mutex_is_locked(&xdna->dev_lock)); + drm_WARN_ON(&xdna->ddev, xdna->rpm_on && !mutex_is_locked(&xdna->dev_lock)); ret = xdna_send_msg_wait(xdna, ndev->mgmt_chann, msg); if (ret == -ETIME) { xdna_mailbox_stop_channel(ndev->mgmt_chann); @@ -45,7 +47,7 @@ static int aie2_send_mgmt_msg_wait(struct amdxdna_dev_hdl *ndev, ndev->mgmt_chann = NULL; } - if (!ret && *hdl->data != AIE2_STATUS_SUCCESS) { + if (!ret && *hdl->status != AIE2_STATUS_SUCCESS) { XDNA_ERR(xdna, "command opcode 0x%x failed, status 0x%x", msg->opcode, *hdl->data); ret = -EINVAL; @@ -208,6 +210,14 @@ int aie2_create_context(struct amdxdna_dev_hdl *ndev, struct amdxdna_hwctx *hwct hwctx->fw_ctx_id = resp.context_id; WARN_ONCE(hwctx->fw_ctx_id == -1, "Unexpected context id"); + if (ndev->force_preempt_enabled) { + ret = aie2_runtime_cfg(ndev, AIE2_RT_CFG_FORCE_PREEMPT, &hwctx->fw_ctx_id); + if (ret) { + XDNA_ERR(xdna, "failed to enable force preempt %d", ret); + return ret; + } + } + cq_pair = &resp.cq_pair[0]; x2i.mb_head_ptr_reg = AIE2_MBOX_OFF(ndev, cq_pair->x2i_q.head_addr); x2i.mb_tail_ptr_reg = AIE2_MBOX_OFF(ndev, cq_pair->x2i_q.tail_addr); @@ -233,6 +243,7 @@ int aie2_create_context(struct amdxdna_dev_hdl *ndev, struct amdxdna_hwctx *hwct ret = -EINVAL; goto out_destroy_context; } + ndev->hwctx_num++; XDNA_DBG(xdna, "%s mailbox channel irq: %d, msix_id: %d", hwctx->name, ret, resp.msix_id); @@ -267,6 +278,7 @@ int aie2_destroy_context(struct amdxdna_dev_hdl *ndev, struct amdxdna_hwctx *hwc hwctx->fw_ctx_id); hwctx->priv->mbox_chann = NULL; hwctx->fw_ctx_id = -1; + ndev->hwctx_num--; return ret; } @@ -332,11 +344,6 @@ int aie2_query_status(struct amdxdna_dev_hdl *ndev, char __user *buf, goto fail; } - if (resp.status != AIE2_STATUS_SUCCESS) { - XDNA_ERR(xdna, "Query NPU status failed, status 0x%x", resp.status); - ret = -EINVAL; - goto fail; - } XDNA_DBG(xdna, "Query NPU status completed"); if (size < resp.size) { @@ -358,6 +365,55 @@ int aie2_query_status(struct amdxdna_dev_hdl *ndev, char __user *buf, return ret; } +int aie2_query_telemetry(struct amdxdna_dev_hdl *ndev, + char __user *buf, u32 size, + struct amdxdna_drm_query_telemetry_header *header) +{ + DECLARE_AIE2_MSG(get_telemetry, MSG_OP_GET_TELEMETRY); + struct amdxdna_dev *xdna = ndev->xdna; + dma_addr_t dma_addr; + u8 *addr; + int ret; + + if (header->type >= MAX_TELEMETRY_TYPE) + return -EINVAL; + + addr = dma_alloc_noncoherent(xdna->ddev.dev, size, &dma_addr, + DMA_FROM_DEVICE, GFP_KERNEL); + if (!addr) + return -ENOMEM; + + req.buf_addr = dma_addr; + req.buf_size = size; + req.type = header->type; + + drm_clflush_virt_range(addr, size); /* device can access */ + ret = aie2_send_mgmt_msg_wait(ndev, &msg); + if (ret) { + XDNA_ERR(xdna, "Query telemetry failed, status %d", ret); + goto free_buf; + } + + if (size < resp.size) { + ret = -EINVAL; + XDNA_ERR(xdna, "Bad buffer size. Available: %u. Needs: %u", size, resp.size); + goto free_buf; + } + + if (copy_to_user(buf, addr, resp.size)) { + ret = -EFAULT; + XDNA_ERR(xdna, "Failed to copy telemetry to user space"); + goto free_buf; + } + + header->major = resp.major; + header->minor = resp.minor; + +free_buf: + dma_free_noncoherent(xdna->ddev.dev, size, addr, dma_addr, DMA_FROM_DEVICE); + return ret; +} + int aie2_register_asyn_event_msg(struct amdxdna_dev_hdl *ndev, dma_addr_t addr, u32 size, void *handle, int (*cb)(void*, void __iomem *, size_t)) { @@ -377,15 +433,17 @@ int aie2_register_asyn_event_msg(struct amdxdna_dev_hdl *ndev, dma_addr_t addr, return xdna_mailbox_send_msg(ndev->mgmt_chann, &msg, TX_TIMEOUT); } -int aie2_config_cu(struct amdxdna_hwctx *hwctx) +int aie2_config_cu(struct amdxdna_hwctx *hwctx, + int (*notify_cb)(void *, void __iomem *, size_t)) { struct mailbox_channel *chann = hwctx->priv->mbox_chann; struct amdxdna_dev *xdna = hwctx->client->xdna; u32 shift = xdna->dev_info->dev_mem_buf_shift; - DECLARE_AIE2_MSG(config_cu, MSG_OP_CONFIG_CU); + struct config_cu_req req = { 0 }; + struct xdna_mailbox_msg msg; struct drm_gem_object *gobj; struct amdxdna_gem_obj *abo; - int ret, i; + int i; if (!chann) return -ENODEV; @@ -423,80 +481,411 @@ int aie2_config_cu(struct amdxdna_hwctx *hwctx) } req.num_cus = hwctx->cus->num_cus; - ret = xdna_send_msg_wait(xdna, chann, &msg); - if (ret == -ETIME) - aie2_destroy_context(xdna->dev_handle, hwctx); + msg.send_data = (u8 *)&req; + msg.send_size = sizeof(req); + msg.handle = hwctx; + msg.opcode = MSG_OP_CONFIG_CU; + msg.notify_cb = notify_cb; + return xdna_mailbox_send_msg(chann, &msg, TX_TIMEOUT); +} - if (resp.status == AIE2_STATUS_SUCCESS) { - XDNA_DBG(xdna, "Configure %d CUs, ret %d", req.num_cus, ret); - return 0; +static int aie2_init_exec_cu_req(struct amdxdna_gem_obj *cmd_bo, void *req, + size_t *size, u32 *msg_op) +{ + struct execute_buffer_req *cu_req = req; + u32 cmd_len; + void *cmd; + + cmd = amdxdna_cmd_get_payload(cmd_bo, &cmd_len); + if (cmd_len > sizeof(cu_req->payload)) + return -EINVAL; + + cu_req->cu_idx = amdxdna_cmd_get_cu_idx(cmd_bo); + if (cu_req->cu_idx == INVALID_CU_IDX) + return -EINVAL; + + memcpy(cu_req->payload, cmd, cmd_len); + + *size = sizeof(*cu_req); + *msg_op = MSG_OP_EXECUTE_BUFFER_CF; + return 0; +} + +static int aie2_init_exec_dpu_req(struct amdxdna_gem_obj *cmd_bo, void *req, + size_t *size, u32 *msg_op) +{ + struct exec_dpu_req *dpu_req = req; + struct amdxdna_cmd_start_npu *sn; + u32 cmd_len; + + sn = amdxdna_cmd_get_payload(cmd_bo, &cmd_len); + if (cmd_len - sizeof(*sn) > sizeof(dpu_req->payload)) + return -EINVAL; + + dpu_req->cu_idx = amdxdna_cmd_get_cu_idx(cmd_bo); + if (dpu_req->cu_idx == INVALID_CU_IDX) + return -EINVAL; + + dpu_req->inst_buf_addr = sn->buffer; + dpu_req->inst_size = sn->buffer_size; + dpu_req->inst_prop_cnt = sn->prop_count; + memcpy(dpu_req->payload, sn->prop_args, cmd_len - sizeof(*sn)); + + *size = sizeof(*dpu_req); + *msg_op = MSG_OP_EXEC_DPU; + return 0; +} + +static void aie2_init_exec_chain_req(void *req, u64 slot_addr, size_t size, u32 cmd_cnt) +{ + struct cmd_chain_req *chain_req = req; + + chain_req->buf_addr = slot_addr; + chain_req->buf_size = size; + chain_req->count = cmd_cnt; +} + +static void aie2_init_npu_chain_req(void *req, u64 slot_addr, size_t size, u32 cmd_cnt) +{ + struct cmd_chain_npu_req *npu_chain_req = req; + + npu_chain_req->flags = 0; + npu_chain_req->reserved = 0; + npu_chain_req->buf_addr = slot_addr; + npu_chain_req->buf_size = size; + npu_chain_req->count = cmd_cnt; +} + +static int +aie2_cmdlist_fill_cf(struct amdxdna_gem_obj *cmd_bo, void *slot, size_t *size) +{ + struct cmd_chain_slot_execbuf_cf *cf_slot = slot; + u32 cmd_len; + void *cmd; + + cmd = amdxdna_cmd_get_payload(cmd_bo, &cmd_len); + if (*size < sizeof(*cf_slot) + cmd_len) + return -EINVAL; + + cf_slot->cu_idx = amdxdna_cmd_get_cu_idx(cmd_bo); + if (cf_slot->cu_idx == INVALID_CU_IDX) + return -EINVAL; + + cf_slot->arg_cnt = cmd_len / sizeof(u32); + memcpy(cf_slot->args, cmd, cmd_len); + /* Accurate slot size to hint firmware to do necessary copy */ + *size = sizeof(*cf_slot) + cmd_len; + return 0; +} + +static int +aie2_cmdlist_fill_dpu(struct amdxdna_gem_obj *cmd_bo, void *slot, size_t *size) +{ + struct cmd_chain_slot_dpu *dpu_slot = slot; + struct amdxdna_cmd_start_npu *sn; + u32 cmd_len; + u32 arg_sz; + + sn = amdxdna_cmd_get_payload(cmd_bo, &cmd_len); + arg_sz = cmd_len - sizeof(*sn); + if (cmd_len < sizeof(*sn) || arg_sz > MAX_DPU_ARGS_SIZE) + return -EINVAL; + + if (*size < sizeof(*dpu_slot) + arg_sz) + return -EINVAL; + + dpu_slot->cu_idx = amdxdna_cmd_get_cu_idx(cmd_bo); + if (dpu_slot->cu_idx == INVALID_CU_IDX) + return -EINVAL; + + dpu_slot->inst_buf_addr = sn->buffer; + dpu_slot->inst_size = sn->buffer_size; + dpu_slot->inst_prop_cnt = sn->prop_count; + dpu_slot->arg_cnt = arg_sz / sizeof(u32); + memcpy(dpu_slot->args, sn->prop_args, arg_sz); + + /* Accurate slot size to hint firmware to do necessary copy */ + *size = sizeof(*dpu_slot) + arg_sz; + return 0; +} + +static int aie2_cmdlist_unsupp(struct amdxdna_gem_obj *cmd_bo, void *slot, size_t *size) +{ + return -EOPNOTSUPP; +} + +static u32 aie2_get_chain_msg_op(u32 cmd_op) +{ + switch (cmd_op) { + case ERT_START_CU: + return MSG_OP_CHAIN_EXEC_BUFFER_CF; + case ERT_START_NPU: + return MSG_OP_CHAIN_EXEC_DPU; + default: + break; + } + + return MSG_OP_MAX_OPCODE; +} + +static struct aie2_exec_msg_ops legacy_exec_message_ops = { + .init_cu_req = aie2_init_exec_cu_req, + .init_dpu_req = aie2_init_exec_dpu_req, + .init_chain_req = aie2_init_exec_chain_req, + .fill_cf_slot = aie2_cmdlist_fill_cf, + .fill_dpu_slot = aie2_cmdlist_fill_dpu, + .fill_preempt_slot = aie2_cmdlist_unsupp, + .fill_elf_slot = aie2_cmdlist_unsupp, + .get_chain_msg_op = aie2_get_chain_msg_op, +}; + +static int +aie2_cmdlist_fill_npu_cf(struct amdxdna_gem_obj *cmd_bo, void *slot, size_t *size) +{ + struct cmd_chain_slot_npu *npu_slot = slot; + u32 cmd_len; + void *cmd; + + cmd = amdxdna_cmd_get_payload(cmd_bo, &cmd_len); + if (*size < sizeof(*npu_slot) + cmd_len) + return -EINVAL; + + npu_slot->cu_idx = amdxdna_cmd_get_cu_idx(cmd_bo); + if (npu_slot->cu_idx == INVALID_CU_IDX) + return -EINVAL; + + memset(npu_slot, 0, sizeof(*npu_slot)); + npu_slot->type = EXEC_NPU_TYPE_NON_ELF; + npu_slot->arg_cnt = cmd_len / sizeof(u32); + memcpy(npu_slot->args, cmd, cmd_len); + + *size = sizeof(*npu_slot) + cmd_len; + return 0; +} + +static int +aie2_cmdlist_fill_npu_dpu(struct amdxdna_gem_obj *cmd_bo, void *slot, size_t *size) +{ + struct cmd_chain_slot_npu *npu_slot = slot; + struct amdxdna_cmd_start_npu *sn; + u32 cmd_len; + u32 arg_sz; + + sn = amdxdna_cmd_get_payload(cmd_bo, &cmd_len); + arg_sz = cmd_len - sizeof(*sn); + if (cmd_len < sizeof(*sn) || arg_sz > MAX_NPU_ARGS_SIZE) + return -EINVAL; + + if (*size < sizeof(*npu_slot) + arg_sz) + return -EINVAL; + + npu_slot->cu_idx = amdxdna_cmd_get_cu_idx(cmd_bo); + if (npu_slot->cu_idx == INVALID_CU_IDX) + return -EINVAL; + + memset(npu_slot, 0, sizeof(*npu_slot)); + npu_slot->type = EXEC_NPU_TYPE_PARTIAL_ELF; + npu_slot->inst_buf_addr = sn->buffer; + npu_slot->inst_size = sn->buffer_size; + npu_slot->inst_prop_cnt = sn->prop_count; + npu_slot->arg_cnt = arg_sz / sizeof(u32); + memcpy(npu_slot->args, sn->prop_args, arg_sz); + + *size = sizeof(*npu_slot) + arg_sz; + return 0; +} + +static int +aie2_cmdlist_fill_npu_preempt(struct amdxdna_gem_obj *cmd_bo, void *slot, size_t *size) +{ + struct cmd_chain_slot_npu *npu_slot = slot; + struct amdxdna_cmd_preempt_data *pd; + u32 cmd_len; + u32 arg_sz; + + pd = amdxdna_cmd_get_payload(cmd_bo, &cmd_len); + arg_sz = cmd_len - sizeof(*pd); + if (cmd_len < sizeof(*pd) || arg_sz > MAX_NPU_ARGS_SIZE) + return -EINVAL; + + if (*size < sizeof(*npu_slot) + arg_sz) + return -EINVAL; + + npu_slot->cu_idx = amdxdna_cmd_get_cu_idx(cmd_bo); + if (npu_slot->cu_idx == INVALID_CU_IDX) + return -EINVAL; + + memset(npu_slot, 0, sizeof(*npu_slot)); + npu_slot->type = EXEC_NPU_TYPE_PREEMPT; + npu_slot->inst_buf_addr = pd->inst_buf; + npu_slot->save_buf_addr = pd->save_buf; + npu_slot->restore_buf_addr = pd->restore_buf; + npu_slot->inst_size = pd->inst_size; + npu_slot->save_size = pd->save_size; + npu_slot->restore_size = pd->restore_size; + npu_slot->inst_prop_cnt = pd->inst_prop_cnt; + npu_slot->arg_cnt = arg_sz / sizeof(u32); + memcpy(npu_slot->args, pd->prop_args, arg_sz); + + *size = sizeof(*npu_slot) + arg_sz; + return 0; +} + +static int +aie2_cmdlist_fill_npu_elf(struct amdxdna_gem_obj *cmd_bo, void *slot, size_t *size) +{ + struct cmd_chain_slot_npu *npu_slot = slot; + struct amdxdna_cmd_preempt_data *pd; + u32 cmd_len; + u32 arg_sz; + + pd = amdxdna_cmd_get_payload(cmd_bo, &cmd_len); + arg_sz = cmd_len - sizeof(*pd); + if (cmd_len < sizeof(*pd) || arg_sz > MAX_NPU_ARGS_SIZE) + return -EINVAL; + + if (*size < sizeof(*npu_slot) + arg_sz) + return -EINVAL; + + memset(npu_slot, 0, sizeof(*npu_slot)); + npu_slot->type = EXEC_NPU_TYPE_ELF; + npu_slot->inst_buf_addr = pd->inst_buf; + npu_slot->save_buf_addr = pd->save_buf; + npu_slot->restore_buf_addr = pd->restore_buf; + npu_slot->inst_size = pd->inst_size; + npu_slot->save_size = pd->save_size; + npu_slot->restore_size = pd->restore_size; + npu_slot->inst_prop_cnt = pd->inst_prop_cnt; + npu_slot->arg_cnt = 1; + npu_slot->args[0] = AIE2_EXEC_BUFFER_KERNEL_OP_TXN; + + *size = struct_size(npu_slot, args, npu_slot->arg_cnt); + return 0; +} + +static u32 aie2_get_npu_chain_msg_op(u32 cmd_op) +{ + return MSG_OP_CHAIN_EXEC_NPU; +} + +static struct aie2_exec_msg_ops npu_exec_message_ops = { + .init_cu_req = aie2_init_exec_cu_req, + .init_dpu_req = aie2_init_exec_dpu_req, + .init_chain_req = aie2_init_npu_chain_req, + .fill_cf_slot = aie2_cmdlist_fill_npu_cf, + .fill_dpu_slot = aie2_cmdlist_fill_npu_dpu, + .fill_preempt_slot = aie2_cmdlist_fill_npu_preempt, + .fill_elf_slot = aie2_cmdlist_fill_npu_elf, + .get_chain_msg_op = aie2_get_npu_chain_msg_op, +}; + +static int aie2_init_exec_req(void *req, struct amdxdna_gem_obj *cmd_abo, + size_t *size, u32 *msg_op) +{ + struct amdxdna_dev *xdna = cmd_abo->client->xdna; + int ret; + u32 op; + + + op = amdxdna_cmd_get_op(cmd_abo); + switch (op) { + case ERT_START_CU: + ret = EXEC_MSG_OPS(xdna)->init_cu_req(cmd_abo, req, size, msg_op); + if (ret) { + XDNA_DBG(xdna, "Init CU req failed ret %d", ret); + return ret; + } + break; + case ERT_START_NPU: + ret = EXEC_MSG_OPS(xdna)->init_dpu_req(cmd_abo, req, size, msg_op); + if (ret) { + XDNA_DBG(xdna, "Init DPU req failed ret %d", ret); + return ret; + } + + break; + default: + XDNA_ERR(xdna, "Unsupported op %d", op); + ret = -EOPNOTSUPP; + break; } - XDNA_ERR(xdna, "Command opcode 0x%x failed, status 0x%x ret %d", - msg.opcode, resp.status, ret); return ret; } +static int +aie2_cmdlist_fill_slot(void *slot, struct amdxdna_gem_obj *cmd_abo, + size_t *size, u32 *cmd_op) +{ + struct amdxdna_dev *xdna = cmd_abo->client->xdna; + int ret; + u32 op; + + op = amdxdna_cmd_get_op(cmd_abo); + if (*cmd_op == ERT_INVALID_CMD) + *cmd_op = op; + else if (op != *cmd_op) + return -EINVAL; + + switch (op) { + case ERT_START_CU: + ret = EXEC_MSG_OPS(xdna)->fill_cf_slot(cmd_abo, slot, size); + break; + case ERT_START_NPU: + ret = EXEC_MSG_OPS(xdna)->fill_dpu_slot(cmd_abo, slot, size); + break; + case ERT_START_NPU_PREEMPT: + if (!AIE2_FEATURE_ON(xdna->dev_handle, AIE2_PREEMPT)) + return -EOPNOTSUPP; + ret = EXEC_MSG_OPS(xdna)->fill_preempt_slot(cmd_abo, slot, size); + break; + case ERT_START_NPU_PREEMPT_ELF: + if (!AIE2_FEATURE_ON(xdna->dev_handle, AIE2_PREEMPT)) + return -EOPNOTSUPP; + ret = EXEC_MSG_OPS(xdna)->fill_elf_slot(cmd_abo, slot, size); + break; + default: + XDNA_INFO(xdna, "Unsupported op %d", op); + ret = -EOPNOTSUPP; + break; + } + + return ret; +} + +void aie2_msg_init(struct amdxdna_dev_hdl *ndev) +{ + if (AIE2_FEATURE_ON(ndev, AIE2_NPU_COMMAND)) + ndev->exec_msg_ops = &npu_exec_message_ops; + else + ndev->exec_msg_ops = &legacy_exec_message_ops; +} + +static inline struct amdxdna_gem_obj * +aie2_cmdlist_get_cmd_buf(struct amdxdna_sched_job *job) +{ + int idx = get_job_idx(job->seq); + + return job->hwctx->priv->cmd_buf[idx]; +} + int aie2_execbuf(struct amdxdna_hwctx *hwctx, struct amdxdna_sched_job *job, int (*notify_cb)(void *, void __iomem *, size_t)) { struct mailbox_channel *chann = hwctx->priv->mbox_chann; struct amdxdna_dev *xdna = hwctx->client->xdna; struct amdxdna_gem_obj *cmd_abo = job->cmd_bo; - union { - struct execute_buffer_req ebuf; - struct exec_dpu_req dpu; - } req; struct xdna_mailbox_msg msg; - u32 payload_len; - void *payload; - int cu_idx; + union exec_req req; int ret; - u32 op; if (!chann) return -ENODEV; - payload = amdxdna_cmd_get_payload(cmd_abo, &payload_len); - if (!payload) { - XDNA_ERR(xdna, "Invalid command, cannot get payload"); - return -EINVAL; - } + ret = aie2_init_exec_req(&req, cmd_abo, &msg.send_size, &msg.opcode); + if (ret) + return ret; - cu_idx = amdxdna_cmd_get_cu_idx(cmd_abo); - if (cu_idx < 0) { - XDNA_DBG(xdna, "Invalid cu idx"); - return -EINVAL; - } - - op = amdxdna_cmd_get_op(cmd_abo); - switch (op) { - case ERT_START_CU: - if (unlikely(payload_len > sizeof(req.ebuf.payload))) - XDNA_DBG(xdna, "Invalid ebuf payload len: %d", payload_len); - req.ebuf.cu_idx = cu_idx; - memcpy(req.ebuf.payload, payload, sizeof(req.ebuf.payload)); - msg.send_size = sizeof(req.ebuf); - msg.opcode = MSG_OP_EXECUTE_BUFFER_CF; - break; - case ERT_START_NPU: { - struct amdxdna_cmd_start_npu *sn = payload; - - if (unlikely(payload_len - sizeof(*sn) > sizeof(req.dpu.payload))) - XDNA_DBG(xdna, "Invalid dpu payload len: %d", payload_len); - req.dpu.inst_buf_addr = sn->buffer; - req.dpu.inst_size = sn->buffer_size; - req.dpu.inst_prop_cnt = sn->prop_count; - req.dpu.cu_idx = cu_idx; - memcpy(req.dpu.payload, sn->prop_args, sizeof(req.dpu.payload)); - msg.send_size = sizeof(req.dpu); - msg.opcode = MSG_OP_EXEC_DPU; - break; - } - default: - XDNA_DBG(xdna, "Invalid ERT cmd op code: %d", op); - return -EINVAL; - } msg.handle = job; msg.notify_cb = notify_cb; msg.send_data = (u8 *)&req; @@ -512,135 +901,6 @@ int aie2_execbuf(struct amdxdna_hwctx *hwctx, struct amdxdna_sched_job *job, return 0; } -static int -aie2_cmdlist_fill_one_slot_cf(void *cmd_buf, u32 offset, - struct amdxdna_gem_obj *abo, u32 *size) -{ - struct cmd_chain_slot_execbuf_cf *buf = cmd_buf + offset; - int cu_idx = amdxdna_cmd_get_cu_idx(abo); - u32 payload_len; - void *payload; - - if (cu_idx < 0) - return -EINVAL; - - payload = amdxdna_cmd_get_payload(abo, &payload_len); - if (!payload) - return -EINVAL; - - if (!slot_has_space(*buf, offset, payload_len)) - return -ENOSPC; - - buf->cu_idx = cu_idx; - buf->arg_cnt = payload_len / sizeof(u32); - memcpy(buf->args, payload, payload_len); - /* Accurate buf size to hint firmware to do necessary copy */ - *size = sizeof(*buf) + payload_len; - return 0; -} - -static int -aie2_cmdlist_fill_one_slot_dpu(void *cmd_buf, u32 offset, - struct amdxdna_gem_obj *abo, u32 *size) -{ - struct cmd_chain_slot_dpu *buf = cmd_buf + offset; - int cu_idx = amdxdna_cmd_get_cu_idx(abo); - struct amdxdna_cmd_start_npu *sn; - u32 payload_len; - void *payload; - u32 arg_sz; - - if (cu_idx < 0) - return -EINVAL; - - payload = amdxdna_cmd_get_payload(abo, &payload_len); - if (!payload) - return -EINVAL; - sn = payload; - arg_sz = payload_len - sizeof(*sn); - if (payload_len < sizeof(*sn) || arg_sz > MAX_DPU_ARGS_SIZE) - return -EINVAL; - - if (!slot_has_space(*buf, offset, arg_sz)) - return -ENOSPC; - - buf->inst_buf_addr = sn->buffer; - buf->inst_size = sn->buffer_size; - buf->inst_prop_cnt = sn->prop_count; - buf->cu_idx = cu_idx; - buf->arg_cnt = arg_sz / sizeof(u32); - memcpy(buf->args, sn->prop_args, arg_sz); - - /* Accurate buf size to hint firmware to do necessary copy */ - *size = sizeof(*buf) + arg_sz; - return 0; -} - -static int -aie2_cmdlist_fill_one_slot(u32 op, struct amdxdna_gem_obj *cmdbuf_abo, u32 offset, - struct amdxdna_gem_obj *abo, u32 *size) -{ - u32 this_op = amdxdna_cmd_get_op(abo); - void *cmd_buf = cmdbuf_abo->mem.kva; - int ret; - - if (this_op != op) { - ret = -EINVAL; - goto done; - } - - switch (op) { - case ERT_START_CU: - ret = aie2_cmdlist_fill_one_slot_cf(cmd_buf, offset, abo, size); - break; - case ERT_START_NPU: - ret = aie2_cmdlist_fill_one_slot_dpu(cmd_buf, offset, abo, size); - break; - default: - ret = -EOPNOTSUPP; - } - -done: - if (ret) { - XDNA_ERR(abo->client->xdna, "Can't fill slot for cmd op %d ret %d", - op, ret); - } - return ret; -} - -static inline struct amdxdna_gem_obj * -aie2_cmdlist_get_cmd_buf(struct amdxdna_sched_job *job) -{ - int idx = get_job_idx(job->seq); - - return job->hwctx->priv->cmd_buf[idx]; -} - -static void -aie2_cmdlist_prepare_request(struct cmd_chain_req *req, - struct amdxdna_gem_obj *cmdbuf_abo, u32 size, u32 cnt) -{ - req->buf_addr = cmdbuf_abo->mem.dev_addr; - req->buf_size = size; - req->count = cnt; - drm_clflush_virt_range(cmdbuf_abo->mem.kva, size); - XDNA_DBG(cmdbuf_abo->client->xdna, "Command buf addr 0x%llx size 0x%x count %d", - req->buf_addr, size, cnt); -} - -static inline u32 -aie2_cmd_op_to_msg_op(u32 op) -{ - switch (op) { - case ERT_START_CU: - return MSG_OP_CHAIN_EXEC_BUFFER_CF; - case ERT_START_NPU: - return MSG_OP_CHAIN_EXEC_DPU; - default: - return MSG_OP_MAX_OPCODE; - } -} - int aie2_cmdlist_multi_execbuf(struct amdxdna_hwctx *hwctx, struct amdxdna_sched_job *job, int (*notify_cb)(void *, void __iomem *, size_t)) @@ -649,12 +909,13 @@ int aie2_cmdlist_multi_execbuf(struct amdxdna_hwctx *hwctx, struct mailbox_channel *chann = hwctx->priv->mbox_chann; struct amdxdna_client *client = hwctx->client; struct amdxdna_gem_obj *cmd_abo = job->cmd_bo; + struct amdxdna_dev *xdna = client->xdna; struct amdxdna_cmd_chain *payload; struct xdna_mailbox_msg msg; - struct cmd_chain_req req; + union exec_chain_req req; u32 payload_len; u32 offset = 0; - u32 size; + size_t size; int ret; u32 op; u32 i; @@ -665,41 +926,42 @@ int aie2_cmdlist_multi_execbuf(struct amdxdna_hwctx *hwctx, payload_len < struct_size(payload, data, payload->command_count)) return -EINVAL; + op = ERT_INVALID_CMD; for (i = 0; i < payload->command_count; i++) { u32 boh = (u32)(payload->data[i]); struct amdxdna_gem_obj *abo; abo = amdxdna_gem_get_obj(client, boh, AMDXDNA_BO_CMD); if (!abo) { - XDNA_ERR(client->xdna, "Failed to find cmd BO %d", boh); + XDNA_ERR(xdna, "Failed to find cmd BO %d", boh); return -ENOENT; } - /* All sub-cmd should have same op, use the first one. */ - if (i == 0) - op = amdxdna_cmd_get_op(abo); - - ret = aie2_cmdlist_fill_one_slot(op, cmdbuf_abo, offset, abo, &size); + size = cmdbuf_abo->mem.size - offset; + ret = aie2_cmdlist_fill_slot(cmdbuf_abo->mem.kva + offset, + abo, &size, &op); amdxdna_gem_put_obj(abo); if (ret) - return -EINVAL; + return ret; offset += size; } - - /* The offset is the accumulated total size of the cmd buffer */ - aie2_cmdlist_prepare_request(&req, cmdbuf_abo, offset, payload->command_count); - - msg.opcode = aie2_cmd_op_to_msg_op(op); + msg.opcode = EXEC_MSG_OPS(xdna)->get_chain_msg_op(op); if (msg.opcode == MSG_OP_MAX_OPCODE) return -EOPNOTSUPP; + + /* The offset is the accumulated total size of the cmd buffer */ + EXEC_MSG_OPS(xdna)->init_chain_req(&req, cmdbuf_abo->mem.dev_addr, + offset, payload->command_count); + drm_clflush_virt_range(cmdbuf_abo->mem.kva, offset); + msg.handle = job; msg.notify_cb = notify_cb; msg.send_data = (u8 *)&req; msg.send_size = sizeof(req); ret = xdna_mailbox_send_msg(chann, &msg, TX_TIMEOUT); if (ret) { - XDNA_ERR(hwctx->client->xdna, "Send message failed"); + XDNA_ERR(xdna, "Send message failed"); return ret; } @@ -712,23 +974,27 @@ int aie2_cmdlist_single_execbuf(struct amdxdna_hwctx *hwctx, { struct amdxdna_gem_obj *cmdbuf_abo = aie2_cmdlist_get_cmd_buf(job); struct mailbox_channel *chann = hwctx->priv->mbox_chann; + struct amdxdna_dev *xdna = hwctx->client->xdna; struct amdxdna_gem_obj *cmd_abo = job->cmd_bo; struct xdna_mailbox_msg msg; - struct cmd_chain_req req; - u32 size; + union exec_chain_req req; + u32 op = ERT_INVALID_CMD; + size_t size; int ret; - u32 op; - op = amdxdna_cmd_get_op(cmd_abo); - ret = aie2_cmdlist_fill_one_slot(op, cmdbuf_abo, 0, cmd_abo, &size); + size = cmdbuf_abo->mem.size; + ret = aie2_cmdlist_fill_slot(cmdbuf_abo->mem.kva, cmd_abo, &size, &op); if (ret) return ret; - aie2_cmdlist_prepare_request(&req, cmdbuf_abo, size, 1); - - msg.opcode = aie2_cmd_op_to_msg_op(op); + msg.opcode = EXEC_MSG_OPS(xdna)->get_chain_msg_op(op); if (msg.opcode == MSG_OP_MAX_OPCODE) return -EOPNOTSUPP; + + EXEC_MSG_OPS(xdna)->init_chain_req(&req, cmdbuf_abo->mem.dev_addr, + size, 1); + drm_clflush_virt_range(cmdbuf_abo->mem.kva, size); + msg.handle = job; msg.notify_cb = notify_cb; msg.send_data = (u8 *)&req; @@ -753,7 +1019,7 @@ int aie2_sync_bo(struct amdxdna_hwctx *hwctx, struct amdxdna_sched_job *job, int ret = 0; req.src_addr = 0; - req.dst_addr = abo->mem.dev_addr - hwctx->client->dev_heap->mem.dev_addr; + req.dst_addr = amdxdna_dev_bo_offset(abo); req.size = abo->mem.size; /* Device to Host */ @@ -777,3 +1043,32 @@ int aie2_sync_bo(struct amdxdna_hwctx *hwctx, struct amdxdna_sched_job *job, return 0; } + +int aie2_config_debug_bo(struct amdxdna_hwctx *hwctx, struct amdxdna_sched_job *job, + int (*notify_cb)(void *, void __iomem *, size_t)) +{ + struct mailbox_channel *chann = hwctx->priv->mbox_chann; + struct amdxdna_gem_obj *abo = to_xdna_obj(job->bos[0]); + struct amdxdna_dev *xdna = hwctx->client->xdna; + struct config_debug_bo_req req; + struct xdna_mailbox_msg msg; + + if (job->drv_cmd->opcode == ATTACH_DEBUG_BO) + req.config = DEBUG_BO_REGISTER; + else + req.config = DEBUG_BO_UNREGISTER; + + req.offset = amdxdna_dev_bo_offset(abo); + req.size = abo->mem.size; + + XDNA_DBG(xdna, "offset 0x%llx size 0x%llx config %d", + req.offset, req.size, req.config); + + msg.handle = job; + msg.notify_cb = notify_cb; + msg.send_data = (u8 *)&req; + msg.send_size = sizeof(req); + msg.opcode = MSG_OP_CONFIG_DEBUG_BO; + + return xdna_mailbox_send_msg(chann, &msg, TX_TIMEOUT); +} diff --git a/drivers/accel/amdxdna/aie2_msg_priv.h b/drivers/accel/amdxdna/aie2_msg_priv.h index 6df9065b13f6..1c957a6298d3 100644 --- a/drivers/accel/amdxdna/aie2_msg_priv.h +++ b/drivers/accel/amdxdna/aie2_msg_priv.h @@ -9,7 +9,8 @@ enum aie2_msg_opcode { MSG_OP_CREATE_CONTEXT = 0x2, MSG_OP_DESTROY_CONTEXT = 0x3, - MSG_OP_SYNC_BO = 0x7, + MSG_OP_GET_TELEMETRY = 0x4, + MSG_OP_SYNC_BO = 0x7, MSG_OP_EXECUTE_BUFFER_CF = 0xC, MSG_OP_QUERY_COL_STATUS = 0xD, MSG_OP_QUERY_AIE_TILE_INFO = 0xE, @@ -18,6 +19,8 @@ enum aie2_msg_opcode { MSG_OP_CONFIG_CU = 0x11, MSG_OP_CHAIN_EXEC_BUFFER_CF = 0x12, MSG_OP_CHAIN_EXEC_DPU = 0x13, + MSG_OP_CONFIG_DEBUG_BO = 0x14, + MSG_OP_CHAIN_EXEC_NPU = 0x18, MSG_OP_MAX_XRT_OPCODE, MSG_OP_SUSPEND = 0x101, MSG_OP_RESUME = 0x102, @@ -135,6 +138,28 @@ struct destroy_ctx_resp { enum aie2_msg_status status; } __packed; +enum telemetry_type { + TELEMETRY_TYPE_DISABLED, + TELEMETRY_TYPE_HEALTH, + TELEMETRY_TYPE_ERROR_INFO, + TELEMETRY_TYPE_PROFILING, + TELEMETRY_TYPE_DEBUG, + MAX_TELEMETRY_TYPE +}; + +struct get_telemetry_req { + enum telemetry_type type; + __u64 buf_addr; + __u32 buf_size; +} __packed; + +struct get_telemetry_resp { + __u32 major; + __u32 minor; + __u32 size; + enum aie2_msg_status status; +} __packed; + struct execute_buffer_req { __u32 cu_idx; __u32 payload[19]; @@ -148,6 +173,18 @@ struct exec_dpu_req { __u32 payload[35]; } __packed; +enum exec_npu_type { + EXEC_NPU_TYPE_NON_ELF = 0x1, + EXEC_NPU_TYPE_PARTIAL_ELF = 0x2, + EXEC_NPU_TYPE_PREEMPT = 0x3, + EXEC_NPU_TYPE_ELF = 0x4, +}; + +union exec_req { + struct execute_buffer_req ebuf; + struct exec_dpu_req dpu_req; +}; + struct execute_buffer_resp { enum aie2_msg_status status; } __packed; @@ -319,9 +356,6 @@ struct async_event_msg_resp { } __packed; #define MAX_CHAIN_CMDBUF_SIZE SZ_4K -#define slot_has_space(slot, offset, payload_size) \ - (MAX_CHAIN_CMDBUF_SIZE >= (offset) + (payload_size) + \ - sizeof(typeof(slot))) struct cmd_chain_slot_execbuf_cf { __u32 cu_idx; @@ -339,12 +373,41 @@ struct cmd_chain_slot_dpu { __u32 args[] __counted_by(arg_cnt); }; +#define MAX_NPU_ARGS_SIZE (26 * sizeof(__u32)) +#define AIE2_EXEC_BUFFER_KERNEL_OP_TXN 3 +struct cmd_chain_slot_npu { + enum exec_npu_type type; + u64 inst_buf_addr; + u64 save_buf_addr; + u64 restore_buf_addr; + u32 inst_size; + u32 save_size; + u32 restore_size; + u32 inst_prop_cnt; + u32 cu_idx; + u32 arg_cnt; + u32 args[] __counted_by(arg_cnt); +} __packed; + struct cmd_chain_req { __u64 buf_addr; __u32 buf_size; __u32 count; } __packed; +struct cmd_chain_npu_req { + u32 flags; + u32 reserved; + u64 buf_addr; + u32 buf_size; + u32 count; +} __packed; + +union exec_chain_req { + struct cmd_chain_npu_req npu_req; + struct cmd_chain_req req; +}; + struct cmd_chain_resp { enum aie2_msg_status status; __u32 fail_cmd_idx; @@ -365,4 +428,21 @@ struct sync_bo_req { struct sync_bo_resp { enum aie2_msg_status status; } __packed; + +#define DEBUG_BO_UNREGISTER 0 +#define DEBUG_BO_REGISTER 1 +struct config_debug_bo_req { + __u64 offset; + __u64 size; + /* + * config operations. + * DEBUG_BO_REGISTER: Register debug buffer + * DEBUG_BO_UNREGISTER: Unregister debug buffer + */ + __u32 config; +} __packed; + +struct config_debug_bo_resp { + enum aie2_msg_status status; +} __packed; #endif /* _AIE2_MSG_PRIV_H_ */ diff --git a/drivers/accel/amdxdna/aie2_pci.c b/drivers/accel/amdxdna/aie2_pci.c index 87c425e3d2b9..ceef1c502e9e 100644 --- a/drivers/accel/amdxdna/aie2_pci.c +++ b/drivers/accel/amdxdna/aie2_pci.c @@ -25,6 +25,7 @@ #include "amdxdna_gem.h" #include "amdxdna_mailbox.h" #include "amdxdna_pci_drv.h" +#include "amdxdna_pm.h" static int aie2_max_col = XRS_MAX_COL; module_param(aie2_max_col, uint, 0600); @@ -54,6 +55,7 @@ struct mgmt_mbox_chann_info { static int aie2_check_protocol(struct amdxdna_dev_hdl *ndev, u32 fw_major, u32 fw_minor) { + const struct aie2_fw_feature_tbl *feature; struct amdxdna_dev *xdna = ndev->xdna; /* @@ -77,6 +79,17 @@ static int aie2_check_protocol(struct amdxdna_dev_hdl *ndev, u32 fw_major, u32 f XDNA_ERR(xdna, "Firmware minor version smaller than supported"); return -EINVAL; } + + for (feature = ndev->priv->fw_feature_tbl; feature && feature->min_minor; + feature++) { + if (fw_minor < feature->min_minor) + continue; + if (feature->max_minor > 0 && fw_minor > feature->max_minor) + continue; + + set_bit(feature->feature, &ndev->feature_mask); + } + return 0; } @@ -170,6 +183,10 @@ int aie2_runtime_cfg(struct amdxdna_dev_hdl *ndev, if (cfg->category != category) continue; + if (cfg->feature_mask && + bitmap_subset(&cfg->feature_mask, &ndev->feature_mask, AIE2_FEATURE_MAX)) + continue; + value = val ? *val : cfg->value; ret = aie2_set_runtime_cfg(ndev, cfg->type, value); if (ret) { @@ -223,15 +240,6 @@ static int aie2_mgmt_fw_init(struct amdxdna_dev_hdl *ndev) return ret; } - if (!ndev->async_events) - return 0; - - ret = aie2_error_async_events_send(ndev); - if (ret) { - XDNA_ERR(ndev->xdna, "Send async events failed"); - return ret; - } - return 0; } @@ -257,6 +265,8 @@ static int aie2_mgmt_fw_query(struct amdxdna_dev_hdl *ndev) return ret; } + ndev->total_col = min(aie2_max_col, ndev->metadata.cols); + return 0; } @@ -338,6 +348,7 @@ static void aie2_hw_stop(struct amdxdna_dev *xdna) ndev->mbox = NULL; aie2_psp_stop(ndev->psp_hdl); aie2_smu_fini(ndev); + aie2_error_async_events_free(ndev); pci_disable_device(pdev); ndev->dev_status = AIE2_DEV_INIT; @@ -424,6 +435,18 @@ static int aie2_hw_start(struct amdxdna_dev *xdna) goto destroy_mgmt_chann; } + ret = aie2_mgmt_fw_query(ndev); + if (ret) { + XDNA_ERR(xdna, "failed to query fw, ret %d", ret); + goto destroy_mgmt_chann; + } + + ret = aie2_error_async_events_alloc(ndev); + if (ret) { + XDNA_ERR(xdna, "Allocate async events failed, ret %d", ret); + goto destroy_mgmt_chann; + } + ndev->dev_status = AIE2_DEV_START; return 0; @@ -459,7 +482,6 @@ static int aie2_hw_resume(struct amdxdna_dev *xdna) struct amdxdna_client *client; int ret; - guard(mutex)(&xdna->dev_lock); ret = aie2_hw_start(xdna); if (ret) { XDNA_ERR(xdna, "Start hardware failed, %d", ret); @@ -565,13 +587,6 @@ static int aie2_init(struct amdxdna_dev *xdna) goto release_fw; } - ret = aie2_mgmt_fw_query(ndev); - if (ret) { - XDNA_ERR(xdna, "Query firmware failed, ret %d", ret); - goto stop_hw; - } - ndev->total_col = min(aie2_max_col, ndev->metadata.cols); - xrs_cfg.clk_list.num_levels = ndev->max_dpm_level + 1; for (i = 0; i < xrs_cfg.clk_list.num_levels; i++) xrs_cfg.clk_list.cu_clk_list[i] = ndev->priv->dpm_clk_tbl[i].hclk; @@ -587,30 +602,11 @@ static int aie2_init(struct amdxdna_dev *xdna) goto stop_hw; } - ret = aie2_error_async_events_alloc(ndev); - if (ret) { - XDNA_ERR(xdna, "Allocate async events failed, ret %d", ret); - goto stop_hw; - } - - ret = aie2_error_async_events_send(ndev); - if (ret) { - XDNA_ERR(xdna, "Send async events failed, ret %d", ret); - goto async_event_free; - } - - /* Issue a command to make sure firmware handled async events */ - ret = aie2_query_firmware_version(ndev, &ndev->xdna->fw_ver); - if (ret) { - XDNA_ERR(xdna, "Re-query firmware version failed"); - goto async_event_free; - } - release_firmware(fw); + aie2_msg_init(ndev); + amdxdna_pm_init(xdna); return 0; -async_event_free: - aie2_error_async_events_free(ndev); stop_hw: aie2_hw_stop(xdna); release_fw: @@ -621,10 +617,8 @@ static int aie2_init(struct amdxdna_dev *xdna) static void aie2_fini(struct amdxdna_dev *xdna) { - struct amdxdna_dev_hdl *ndev = xdna->dev_handle; - + amdxdna_pm_fini(xdna); aie2_hw_stop(xdna); - aie2_error_async_events_free(ndev); } static int aie2_get_aie_status(struct amdxdna_client *client, @@ -845,7 +839,120 @@ static int aie2_get_hwctx_status(struct amdxdna_client *client, } args->buffer_size -= (u32)(array_args.buffer - args->buffer); - return ret; + return 0; +} + +static int aie2_query_resource_info(struct amdxdna_client *client, + struct amdxdna_drm_get_info *args) +{ + struct amdxdna_drm_get_resource_info res_info; + const struct amdxdna_dev_priv *priv; + struct amdxdna_dev_hdl *ndev; + struct amdxdna_dev *xdna; + + xdna = client->xdna; + ndev = xdna->dev_handle; + priv = ndev->priv; + + res_info.npu_clk_max = priv->dpm_clk_tbl[ndev->max_dpm_level].hclk; + res_info.npu_tops_max = ndev->max_tops; + res_info.npu_task_max = priv->hwctx_limit; + res_info.npu_tops_curr = ndev->curr_tops; + res_info.npu_task_curr = ndev->hwctx_num; + + if (copy_to_user(u64_to_user_ptr(args->buffer), &res_info, sizeof(res_info))) + return -EFAULT; + + return 0; +} + +static int aie2_fill_hwctx_map(struct amdxdna_hwctx *hwctx, void *arg) +{ + struct amdxdna_dev *xdna = hwctx->client->xdna; + u32 *map = arg; + + if (hwctx->fw_ctx_id >= xdna->dev_handle->priv->hwctx_limit) { + XDNA_ERR(xdna, "Invalid fw ctx id %d/%d ", hwctx->fw_ctx_id, + xdna->dev_handle->priv->hwctx_limit); + return -EINVAL; + } + + map[hwctx->fw_ctx_id] = hwctx->id; + return 0; +} + +static int aie2_get_telemetry(struct amdxdna_client *client, + struct amdxdna_drm_get_info *args) +{ + struct amdxdna_drm_query_telemetry_header *header __free(kfree) = NULL; + u32 telemetry_data_sz, header_sz, elem_num; + struct amdxdna_dev *xdna = client->xdna; + struct amdxdna_client *tmp_client; + int ret; + + elem_num = xdna->dev_handle->priv->hwctx_limit; + header_sz = struct_size(header, map, elem_num); + if (args->buffer_size <= header_sz) { + XDNA_ERR(xdna, "Invalid buffer size"); + return -EINVAL; + } + + telemetry_data_sz = args->buffer_size - header_sz; + if (telemetry_data_sz > SZ_4M) { + XDNA_ERR(xdna, "Buffer size is too big, %d", telemetry_data_sz); + return -EINVAL; + } + + header = kzalloc(header_sz, GFP_KERNEL); + if (!header) + return -ENOMEM; + + if (copy_from_user(header, u64_to_user_ptr(args->buffer), sizeof(*header))) { + XDNA_ERR(xdna, "Failed to copy telemetry header from user"); + return -EFAULT; + } + + header->map_num_elements = elem_num; + list_for_each_entry(tmp_client, &xdna->client_list, node) { + ret = amdxdna_hwctx_walk(tmp_client, &header->map, + aie2_fill_hwctx_map); + if (ret) + return ret; + } + + ret = aie2_query_telemetry(xdna->dev_handle, + u64_to_user_ptr(args->buffer + header_sz), + telemetry_data_sz, header); + if (ret) { + XDNA_ERR(xdna, "Query telemetry failed ret %d", ret); + return ret; + } + + if (copy_to_user(u64_to_user_ptr(args->buffer), header, header_sz)) { + XDNA_ERR(xdna, "Copy header failed"); + return -EFAULT; + } + + return 0; +} + +static int aie2_get_preempt_state(struct amdxdna_client *client, + struct amdxdna_drm_get_info *args) +{ + struct amdxdna_drm_attribute_state state = {}; + struct amdxdna_dev *xdna = client->xdna; + struct amdxdna_dev_hdl *ndev; + + ndev = xdna->dev_handle; + if (args->param == DRM_AMDXDNA_GET_FORCE_PREEMPT_STATE) + state.state = ndev->force_preempt_enabled; + else if (args->param == DRM_AMDXDNA_GET_FRAME_BOUNDARY_PREEMPT_STATE) + state.state = ndev->frame_boundary_preempt; + + if (copy_to_user(u64_to_user_ptr(args->buffer), &state, sizeof(state))) + return -EFAULT; + + return 0; } static int aie2_get_info(struct amdxdna_client *client, struct amdxdna_drm_get_info *args) @@ -856,6 +963,10 @@ static int aie2_get_info(struct amdxdna_client *client, struct amdxdna_drm_get_i if (!drm_dev_enter(&xdna->ddev, &idx)) return -ENODEV; + ret = amdxdna_pm_resume_get(xdna); + if (ret) + goto dev_exit; + switch (args->param) { case DRM_AMDXDNA_QUERY_AIE_STATUS: ret = aie2_get_aie_status(client, args); @@ -878,12 +989,25 @@ static int aie2_get_info(struct amdxdna_client *client, struct amdxdna_drm_get_i case DRM_AMDXDNA_GET_POWER_MODE: ret = aie2_get_power_mode(client, args); break; + case DRM_AMDXDNA_QUERY_TELEMETRY: + ret = aie2_get_telemetry(client, args); + break; + case DRM_AMDXDNA_QUERY_RESOURCE_INFO: + ret = aie2_query_resource_info(client, args); + break; + case DRM_AMDXDNA_GET_FORCE_PREEMPT_STATE: + case DRM_AMDXDNA_GET_FRAME_BOUNDARY_PREEMPT_STATE: + ret = aie2_get_preempt_state(client, args); + break; default: XDNA_ERR(xdna, "Not supported request parameter %u", args->param); ret = -EOPNOTSUPP; } + + amdxdna_pm_suspend_put(xdna); XDNA_DBG(xdna, "Got param %d", args->param); +dev_exit: drm_dev_exit(idx); return ret; } @@ -898,6 +1022,12 @@ static int aie2_query_ctx_status_array(struct amdxdna_client *client, drm_WARN_ON(&xdna->ddev, !mutex_is_locked(&xdna->dev_lock)); + if (args->element_size > SZ_4K || args->num_element > SZ_1K) { + XDNA_DBG(xdna, "Invalid element size %d or number of element %d", + args->element_size, args->num_element); + return -EINVAL; + } + array_args.element_size = min(args->element_size, sizeof(struct amdxdna_drm_hwctx_entry)); array_args.buffer = args->buffer; @@ -914,7 +1044,7 @@ static int aie2_query_ctx_status_array(struct amdxdna_client *client, args->num_element = (u32)((array_args.buffer - args->buffer) / args->element_size); - return ret; + return 0; } static int aie2_get_array(struct amdxdna_client *client, @@ -926,16 +1056,26 @@ static int aie2_get_array(struct amdxdna_client *client, if (!drm_dev_enter(&xdna->ddev, &idx)) return -ENODEV; + ret = amdxdna_pm_resume_get(xdna); + if (ret) + goto dev_exit; + switch (args->param) { case DRM_AMDXDNA_HW_CONTEXT_ALL: ret = aie2_query_ctx_status_array(client, args); break; + case DRM_AMDXDNA_HW_LAST_ASYNC_ERR: + ret = aie2_get_array_async_error(xdna->dev_handle, args); + break; default: XDNA_ERR(xdna, "Not supported request parameter %u", args->param); ret = -EOPNOTSUPP; } + + amdxdna_pm_suspend_put(xdna); XDNA_DBG(xdna, "Got param %d", args->param); +dev_exit: drm_dev_exit(idx); return ret; } @@ -965,6 +1105,38 @@ static int aie2_set_power_mode(struct amdxdna_client *client, return aie2_pm_set_mode(xdna->dev_handle, power_mode); } +static int aie2_set_preempt_state(struct amdxdna_client *client, + struct amdxdna_drm_set_state *args) +{ + struct amdxdna_dev_hdl *ndev = client->xdna->dev_handle; + struct amdxdna_drm_attribute_state state; + u32 val; + int ret; + + if (copy_from_user(&state, u64_to_user_ptr(args->buffer), sizeof(state))) + return -EFAULT; + + if (state.state > 1) + return -EINVAL; + + if (XDNA_MBZ_DBG(client->xdna, state.pad, sizeof(state.pad))) + return -EINVAL; + + if (args->param == DRM_AMDXDNA_SET_FORCE_PREEMPT) { + ndev->force_preempt_enabled = state.state; + } else if (args->param == DRM_AMDXDNA_SET_FRAME_BOUNDARY_PREEMPT) { + val = state.state; + ret = aie2_runtime_cfg(ndev, AIE2_RT_CFG_FRAME_BOUNDARY_PREEMPT, + &val); + if (ret) + return ret; + + ndev->frame_boundary_preempt = state.state; + } + + return 0; +} + static int aie2_set_state(struct amdxdna_client *client, struct amdxdna_drm_set_state *args) { @@ -974,16 +1146,26 @@ static int aie2_set_state(struct amdxdna_client *client, if (!drm_dev_enter(&xdna->ddev, &idx)) return -ENODEV; + ret = amdxdna_pm_resume_get(xdna); + if (ret) + goto dev_exit; + switch (args->param) { case DRM_AMDXDNA_SET_POWER_MODE: ret = aie2_set_power_mode(client, args); break; + case DRM_AMDXDNA_SET_FORCE_PREEMPT: + case DRM_AMDXDNA_SET_FRAME_BOUNDARY_PREEMPT: + ret = aie2_set_preempt_state(client, args); + break; default: XDNA_ERR(xdna, "Not supported request parameter %u", args->param); ret = -EOPNOTSUPP; break; } + amdxdna_pm_suspend_put(xdna); +dev_exit: drm_dev_exit(idx); return ret; } @@ -998,6 +1180,7 @@ const struct amdxdna_dev_ops aie2_ops = { .hwctx_init = aie2_hwctx_init, .hwctx_fini = aie2_hwctx_fini, .hwctx_config = aie2_hwctx_config, + .hwctx_sync_debug_bo = aie2_hwctx_sync_debug_bo, .cmd_submit = aie2_cmd_submit, .hmm_invalidate = aie2_hmm_invalidate, .get_array = aie2_get_array, diff --git a/drivers/accel/amdxdna/aie2_pci.h b/drivers/accel/amdxdna/aie2_pci.h index 91a8e948f82a..a5f9c42155d1 100644 --- a/drivers/accel/amdxdna/aie2_pci.h +++ b/drivers/accel/amdxdna/aie2_pci.h @@ -110,12 +110,15 @@ struct aie_metadata { enum rt_config_category { AIE2_RT_CFG_INIT, AIE2_RT_CFG_CLK_GATING, + AIE2_RT_CFG_FORCE_PREEMPT, + AIE2_RT_CFG_FRAME_BOUNDARY_PREEMPT, }; struct rt_config { u32 type; u32 value; u32 category; + unsigned long feature_mask; }; struct dpm_clk_freq { @@ -156,6 +159,19 @@ enum aie2_dev_status { AIE2_DEV_START, }; +struct aie2_exec_msg_ops { + int (*init_cu_req)(struct amdxdna_gem_obj *cmd_bo, void *req, + size_t *size, u32 *msg_op); + int (*init_dpu_req)(struct amdxdna_gem_obj *cmd_bo, void *req, + size_t *size, u32 *msg_op); + void (*init_chain_req)(void *req, u64 slot_addr, size_t size, u32 cmd_cnt); + int (*fill_cf_slot)(struct amdxdna_gem_obj *cmd_bo, void *slot, size_t *size); + int (*fill_dpu_slot)(struct amdxdna_gem_obj *cmd_bo, void *slot, size_t *size); + int (*fill_preempt_slot)(struct amdxdna_gem_obj *cmd_bo, void *slot, size_t *size); + int (*fill_elf_slot)(struct amdxdna_gem_obj *cmd_bo, void *slot, size_t *size); + u32 (*get_chain_msg_op)(u32 cmd_op); +}; + struct amdxdna_dev_hdl { struct amdxdna_dev *xdna; const struct amdxdna_dev_priv *priv; @@ -173,6 +189,8 @@ struct amdxdna_dev_hdl { u32 total_col; struct aie_version version; struct aie_metadata metadata; + unsigned long feature_mask; + struct aie2_exec_msg_ops *exec_msg_ops; /* power management and clock*/ enum amdxdna_power_mode_type pw_mode; @@ -182,6 +200,10 @@ struct amdxdna_dev_hdl { u32 clk_gating; u32 npuclk_freq; u32 hclk_freq; + u32 max_tops; + u32 curr_tops; + u32 force_preempt_enabled; + u32 frame_boundary_preempt; /* Mailbox and the management channel */ struct mailbox *mbox; @@ -190,6 +212,8 @@ struct amdxdna_dev_hdl { enum aie2_dev_status dev_status; u32 hwctx_num; + + struct amdxdna_async_error last_async_err; }; #define DEFINE_BAR_OFFSET(reg_name, bar, reg_addr) \ @@ -204,12 +228,27 @@ struct aie2_hw_ops { int (*set_dpm)(struct amdxdna_dev_hdl *ndev, u32 dpm_level); }; +enum aie2_fw_feature { + AIE2_NPU_COMMAND, + AIE2_PREEMPT, + AIE2_FEATURE_MAX +}; + +struct aie2_fw_feature_tbl { + enum aie2_fw_feature feature; + u32 max_minor; + u32 min_minor; +}; + +#define AIE2_FEATURE_ON(ndev, feature) test_bit(feature, &(ndev)->feature_mask) + struct amdxdna_dev_priv { const char *fw_path; u64 protocol_major; u64 protocol_minor; const struct rt_config *rt_config; const struct dpm_clk_freq *dpm_clk_tbl; + const struct aie2_fw_feature_tbl *fw_feature_tbl; #define COL_ALIGN_NONE 0 #define COL_ALIGN_NATURE 1 @@ -217,6 +256,7 @@ struct amdxdna_dev_priv { u32 mbox_dev_addr; /* If mbox_size is 0, use BAR size. See MBOX_SIZE macro */ u32 mbox_size; + u32 hwctx_limit; u32 sram_dev_addr; struct aie2_bar_off_pair sram_offs[SRAM_MAX_INDEX]; struct aie2_bar_off_pair psp_regs_off[PSP_MAX_REGS]; @@ -234,6 +274,7 @@ extern const struct dpm_clk_freq npu1_dpm_clk_table[]; extern const struct dpm_clk_freq npu4_dpm_clk_table[]; extern const struct rt_config npu1_default_rt_cfg[]; extern const struct rt_config npu4_default_rt_cfg[]; +extern const struct aie2_fw_feature_tbl npu4_fw_feature_table[]; /* aie2_smu.c */ int aie2_smu_init(struct amdxdna_dev_hdl *ndev); @@ -253,10 +294,12 @@ void aie2_psp_stop(struct psp_device *psp); /* aie2_error.c */ int aie2_error_async_events_alloc(struct amdxdna_dev_hdl *ndev); void aie2_error_async_events_free(struct amdxdna_dev_hdl *ndev); -int aie2_error_async_events_send(struct amdxdna_dev_hdl *ndev); int aie2_error_async_msg_thread(void *data); +int aie2_get_array_async_error(struct amdxdna_dev_hdl *ndev, + struct amdxdna_drm_get_array *args); /* aie2_message.c */ +void aie2_msg_init(struct amdxdna_dev_hdl *ndev); int aie2_suspend_fw(struct amdxdna_dev_hdl *ndev); int aie2_resume_fw(struct amdxdna_dev_hdl *ndev); int aie2_set_runtime_cfg(struct amdxdna_dev_hdl *ndev, u32 type, u64 value); @@ -270,9 +313,13 @@ int aie2_create_context(struct amdxdna_dev_hdl *ndev, struct amdxdna_hwctx *hwct int aie2_destroy_context(struct amdxdna_dev_hdl *ndev, struct amdxdna_hwctx *hwctx); int aie2_map_host_buf(struct amdxdna_dev_hdl *ndev, u32 context_id, u64 addr, u64 size); int aie2_query_status(struct amdxdna_dev_hdl *ndev, char __user *buf, u32 size, u32 *cols_filled); +int aie2_query_telemetry(struct amdxdna_dev_hdl *ndev, + char __user *buf, u32 size, + struct amdxdna_drm_query_telemetry_header *header); int aie2_register_asyn_event_msg(struct amdxdna_dev_hdl *ndev, dma_addr_t addr, u32 size, void *handle, int (*cb)(void*, void __iomem *, size_t)); -int aie2_config_cu(struct amdxdna_hwctx *hwctx); +int aie2_config_cu(struct amdxdna_hwctx *hwctx, + int (*notify_cb)(void *, void __iomem *, size_t)); int aie2_execbuf(struct amdxdna_hwctx *hwctx, struct amdxdna_sched_job *job, int (*notify_cb)(void *, void __iomem *, size_t)); int aie2_cmdlist_single_execbuf(struct amdxdna_hwctx *hwctx, @@ -283,11 +330,14 @@ int aie2_cmdlist_multi_execbuf(struct amdxdna_hwctx *hwctx, int (*notify_cb)(void *, void __iomem *, size_t)); int aie2_sync_bo(struct amdxdna_hwctx *hwctx, struct amdxdna_sched_job *job, int (*notify_cb)(void *, void __iomem *, size_t)); +int aie2_config_debug_bo(struct amdxdna_hwctx *hwctx, struct amdxdna_sched_job *job, + int (*notify_cb)(void *, void __iomem *, size_t)); /* aie2_hwctx.c */ int aie2_hwctx_init(struct amdxdna_hwctx *hwctx); void aie2_hwctx_fini(struct amdxdna_hwctx *hwctx); int aie2_hwctx_config(struct amdxdna_hwctx *hwctx, u32 type, u64 value, void *buf, u32 size); +int aie2_hwctx_sync_debug_bo(struct amdxdna_hwctx *hwctx, u32 debug_bo_hdl); void aie2_hwctx_suspend(struct amdxdna_client *client); int aie2_hwctx_resume(struct amdxdna_client *client); int aie2_cmd_submit(struct amdxdna_hwctx *hwctx, struct amdxdna_sched_job *job, u64 *seq); diff --git a/drivers/accel/amdxdna/aie2_smu.c b/drivers/accel/amdxdna/aie2_smu.c index d303701b0ded..bd94ee96c2bc 100644 --- a/drivers/accel/amdxdna/aie2_smu.c +++ b/drivers/accel/amdxdna/aie2_smu.c @@ -11,6 +11,7 @@ #include "aie2_pci.h" #include "amdxdna_pci_drv.h" +#include "amdxdna_pm.h" #define SMU_RESULT_OK 1 @@ -22,6 +23,13 @@ #define AIE2_SMU_SET_SOFT_DPMLEVEL 0x7 #define AIE2_SMU_SET_HARD_DPMLEVEL 0x8 +#define NPU4_DPM_TOPS(ndev, dpm_level) \ +({ \ + typeof(ndev) _ndev = ndev; \ + (4096 * (_ndev)->total_col * \ + (_ndev)->priv->dpm_clk_tbl[dpm_level].hclk / 1000000); \ +}) + static int aie2_smu_exec(struct amdxdna_dev_hdl *ndev, u32 reg_cmd, u32 reg_arg, u32 *out) { @@ -59,12 +67,16 @@ int npu1_set_dpm(struct amdxdna_dev_hdl *ndev, u32 dpm_level) u32 freq; int ret; + ret = amdxdna_pm_resume_get(ndev->xdna); + if (ret) + return ret; + ret = aie2_smu_exec(ndev, AIE2_SMU_SET_MPNPUCLK_FREQ, ndev->priv->dpm_clk_tbl[dpm_level].npuclk, &freq); if (ret) { XDNA_ERR(ndev->xdna, "Set npu clock to %d failed, ret %d\n", ndev->priv->dpm_clk_tbl[dpm_level].npuclk, ret); - return ret; + goto suspend_put; } ndev->npuclk_freq = freq; @@ -73,49 +85,78 @@ int npu1_set_dpm(struct amdxdna_dev_hdl *ndev, u32 dpm_level) if (ret) { XDNA_ERR(ndev->xdna, "Set h clock to %d failed, ret %d\n", ndev->priv->dpm_clk_tbl[dpm_level].hclk, ret); - return ret; + goto suspend_put; } + + amdxdna_pm_suspend_put(ndev->xdna); ndev->hclk_freq = freq; ndev->dpm_level = dpm_level; + ndev->max_tops = 2 * ndev->total_col; + ndev->curr_tops = ndev->max_tops * freq / 1028; XDNA_DBG(ndev->xdna, "MP-NPU clock %d, H clock %d\n", ndev->npuclk_freq, ndev->hclk_freq); return 0; + +suspend_put: + amdxdna_pm_suspend_put(ndev->xdna); + return ret; } int npu4_set_dpm(struct amdxdna_dev_hdl *ndev, u32 dpm_level) { int ret; + ret = amdxdna_pm_resume_get(ndev->xdna); + if (ret) + return ret; + ret = aie2_smu_exec(ndev, AIE2_SMU_SET_HARD_DPMLEVEL, dpm_level, NULL); if (ret) { XDNA_ERR(ndev->xdna, "Set hard dpm level %d failed, ret %d ", dpm_level, ret); - return ret; + goto suspend_put; } ret = aie2_smu_exec(ndev, AIE2_SMU_SET_SOFT_DPMLEVEL, dpm_level, NULL); if (ret) { XDNA_ERR(ndev->xdna, "Set soft dpm level %d failed, ret %d", dpm_level, ret); - return ret; + goto suspend_put; } + amdxdna_pm_suspend_put(ndev->xdna); ndev->npuclk_freq = ndev->priv->dpm_clk_tbl[dpm_level].npuclk; ndev->hclk_freq = ndev->priv->dpm_clk_tbl[dpm_level].hclk; ndev->dpm_level = dpm_level; + ndev->max_tops = NPU4_DPM_TOPS(ndev, ndev->max_dpm_level); + ndev->curr_tops = NPU4_DPM_TOPS(ndev, dpm_level); XDNA_DBG(ndev->xdna, "MP-NPU clock %d, H clock %d\n", ndev->npuclk_freq, ndev->hclk_freq); return 0; + +suspend_put: + amdxdna_pm_suspend_put(ndev->xdna); + return ret; } int aie2_smu_init(struct amdxdna_dev_hdl *ndev) { int ret; + /* + * Failing to set power off indicates an unrecoverable hardware or + * firmware error. + */ + ret = aie2_smu_exec(ndev, AIE2_SMU_POWER_OFF, 0, NULL); + if (ret) { + XDNA_ERR(ndev->xdna, "Access power failed, ret %d", ret); + return ret; + } + ret = aie2_smu_exec(ndev, AIE2_SMU_POWER_ON, 0, NULL); if (ret) { XDNA_ERR(ndev->xdna, "Power on failed, ret %d", ret); diff --git a/drivers/accel/amdxdna/amdxdna_ctx.c b/drivers/accel/amdxdna/amdxdna_ctx.c index 4bfe4ef20550..d17aef89a0ad 100644 --- a/drivers/accel/amdxdna/amdxdna_ctx.c +++ b/drivers/accel/amdxdna/amdxdna_ctx.c @@ -113,14 +113,14 @@ void *amdxdna_cmd_get_payload(struct amdxdna_gem_obj *abo, u32 *size) return &cmd->data[num_masks]; } -int amdxdna_cmd_get_cu_idx(struct amdxdna_gem_obj *abo) +u32 amdxdna_cmd_get_cu_idx(struct amdxdna_gem_obj *abo) { struct amdxdna_cmd *cmd = abo->mem.kva; u32 num_masks, i; u32 *cu_mask; if (amdxdna_cmd_get_op(abo) == ERT_CMD_CHAIN) - return -1; + return INVALID_CU_IDX; num_masks = 1 + FIELD_GET(AMDXDNA_CMD_EXTRA_CU_MASK, cmd->header); cu_mask = cmd->data; @@ -129,7 +129,7 @@ int amdxdna_cmd_get_cu_idx(struct amdxdna_gem_obj *abo) return ffs(cu_mask[i]) - 1; } - return -1; + return INVALID_CU_IDX; } /* @@ -161,19 +161,14 @@ int amdxdna_drm_create_hwctx_ioctl(struct drm_device *dev, void *data, struct dr if (args->ext || args->ext_flags) return -EINVAL; - if (!drm_dev_enter(dev, &idx)) - return -ENODEV; - hwctx = kzalloc(sizeof(*hwctx), GFP_KERNEL); - if (!hwctx) { - ret = -ENOMEM; - goto exit; - } + if (!hwctx) + return -ENOMEM; if (copy_from_user(&hwctx->qos, u64_to_user_ptr(args->qos_p), sizeof(hwctx->qos))) { XDNA_ERR(xdna, "Access QoS info failed"); - ret = -EFAULT; - goto free_hwctx; + kfree(hwctx); + return -EFAULT; } hwctx->client = client; @@ -181,30 +176,36 @@ int amdxdna_drm_create_hwctx_ioctl(struct drm_device *dev, void *data, struct dr hwctx->num_tiles = args->num_tiles; hwctx->mem_size = args->mem_size; hwctx->max_opc = args->max_opc; + + guard(mutex)(&xdna->dev_lock); + + if (!drm_dev_enter(dev, &idx)) { + ret = -ENODEV; + goto free_hwctx; + } + + ret = xdna->dev_info->ops->hwctx_init(hwctx); + if (ret) { + XDNA_ERR(xdna, "Init hwctx failed, ret %d", ret); + goto dev_exit; + } + + hwctx->name = kasprintf(GFP_KERNEL, "hwctx.%d.%d", client->pid, hwctx->fw_ctx_id); + if (!hwctx->name) { + ret = -ENOMEM; + goto fini_hwctx; + } + ret = xa_alloc_cyclic(&client->hwctx_xa, &hwctx->id, hwctx, XA_LIMIT(AMDXDNA_INVALID_CTX_HANDLE + 1, MAX_HWCTX_ID), &client->next_hwctxid, GFP_KERNEL); if (ret < 0) { XDNA_ERR(xdna, "Allocate hwctx ID failed, ret %d", ret); - goto free_hwctx; - } - - hwctx->name = kasprintf(GFP_KERNEL, "hwctx.%d.%d", client->pid, hwctx->id); - if (!hwctx->name) { - ret = -ENOMEM; - goto rm_id; - } - - mutex_lock(&xdna->dev_lock); - ret = xdna->dev_info->ops->hwctx_init(hwctx); - if (ret) { - mutex_unlock(&xdna->dev_lock); - XDNA_ERR(xdna, "Init hwctx failed, ret %d", ret); goto free_name; } + args->handle = hwctx->id; args->syncobj_handle = hwctx->syncobj_hdl; - mutex_unlock(&xdna->dev_lock); atomic64_set(&hwctx->job_submit_cnt, 0); atomic64_set(&hwctx->job_free_cnt, 0); @@ -214,12 +215,12 @@ int amdxdna_drm_create_hwctx_ioctl(struct drm_device *dev, void *data, struct dr free_name: kfree(hwctx->name); -rm_id: - xa_erase(&client->hwctx_xa, hwctx->id); +fini_hwctx: + xdna->dev_info->ops->hwctx_fini(hwctx); +dev_exit: + drm_dev_exit(idx); free_hwctx: kfree(hwctx); -exit: - drm_dev_exit(idx); return ret; } @@ -327,6 +328,38 @@ int amdxdna_drm_config_hwctx_ioctl(struct drm_device *dev, void *data, struct dr return ret; } +int amdxdna_hwctx_sync_debug_bo(struct amdxdna_client *client, u32 debug_bo_hdl) +{ + struct amdxdna_dev *xdna = client->xdna; + struct amdxdna_hwctx *hwctx; + struct amdxdna_gem_obj *abo; + struct drm_gem_object *gobj; + int ret, idx; + + if (!xdna->dev_info->ops->hwctx_sync_debug_bo) + return -EOPNOTSUPP; + + gobj = drm_gem_object_lookup(client->filp, debug_bo_hdl); + if (!gobj) + return -EINVAL; + + abo = to_xdna_obj(gobj); + guard(mutex)(&xdna->dev_lock); + idx = srcu_read_lock(&client->hwctx_srcu); + hwctx = xa_load(&client->hwctx_xa, abo->assigned_hwctx); + if (!hwctx) { + ret = -EINVAL; + goto unlock_srcu; + } + + ret = xdna->dev_info->ops->hwctx_sync_debug_bo(hwctx, debug_bo_hdl); + +unlock_srcu: + srcu_read_unlock(&client->hwctx_srcu, idx); + drm_gem_object_put(gobj); + return ret; +} + static void amdxdna_arg_bos_put(struct amdxdna_sched_job *job) { @@ -389,9 +422,11 @@ void amdxdna_sched_job_cleanup(struct amdxdna_sched_job *job) trace_amdxdna_debug_point(job->hwctx->name, job->seq, "job release"); amdxdna_arg_bos_put(job); amdxdna_gem_put_obj(job->cmd_bo); + dma_fence_put(job->fence); } int amdxdna_cmd_submit(struct amdxdna_client *client, + struct amdxdna_drv_cmd *drv_cmd, u32 cmd_bo_hdl, u32 *arg_bo_hdls, u32 arg_bo_cnt, u32 hwctx_hdl, u64 *seq) { @@ -405,6 +440,8 @@ int amdxdna_cmd_submit(struct amdxdna_client *client, if (!job) return -ENOMEM; + job->drv_cmd = drv_cmd; + if (cmd_bo_hdl != AMDXDNA_INVALID_BO_HANDLE) { job->cmd_bo = amdxdna_gem_get_obj(client, cmd_bo_hdl, AMDXDNA_BO_CMD); if (!job->cmd_bo) { @@ -412,8 +449,6 @@ int amdxdna_cmd_submit(struct amdxdna_client *client, ret = -EINVAL; goto free_job; } - } else { - job->cmd_bo = NULL; } ret = amdxdna_arg_bos_lookup(client, job, arg_bo_hdls, arg_bo_cnt); @@ -431,11 +466,6 @@ int amdxdna_cmd_submit(struct amdxdna_client *client, goto unlock_srcu; } - if (hwctx->status != HWCTX_STAT_READY) { - XDNA_ERR(xdna, "HW Context is not ready"); - ret = -EINVAL; - goto unlock_srcu; - } job->hwctx = hwctx; job->mm = current->mm; @@ -512,7 +542,7 @@ static int amdxdna_drm_submit_execbuf(struct amdxdna_client *client, } } - ret = amdxdna_cmd_submit(client, cmd_bo_hdl, arg_bo_hdls, + ret = amdxdna_cmd_submit(client, NULL, cmd_bo_hdl, arg_bo_hdls, args->arg_count, args->hwctx, &args->seq); if (ret) XDNA_DBG(xdna, "Submit cmds failed, ret %d", ret); diff --git a/drivers/accel/amdxdna/amdxdna_ctx.h b/drivers/accel/amdxdna/amdxdna_ctx.h index 7cd7a55936f0..b6151244d64f 100644 --- a/drivers/accel/amdxdna/amdxdna_ctx.h +++ b/drivers/accel/amdxdna/amdxdna_ctx.h @@ -13,9 +13,12 @@ struct amdxdna_hwctx_priv; enum ert_cmd_opcode { - ERT_START_CU = 0, - ERT_CMD_CHAIN = 19, - ERT_START_NPU = 20, + ERT_START_CU = 0, + ERT_CMD_CHAIN = 19, + ERT_START_NPU = 20, + ERT_START_NPU_PREEMPT = 21, + ERT_START_NPU_PREEMPT_ELF = 22, + ERT_INVALID_CMD = ~0U, }; enum ert_cmd_state { @@ -54,6 +57,21 @@ struct amdxdna_cmd_chain { u64 data[] __counted_by(command_count); }; +/* + * Interpretation of the beginning of data payload for ERT_START_NPU_PREEMPT in + * amdxdna_cmd. The rest of the payload in amdxdna_cmd is regular kernel args. + */ +struct amdxdna_cmd_preempt_data { + u64 inst_buf; /* instruction buffer address */ + u64 save_buf; /* save buffer address */ + u64 restore_buf; /* restore buffer address */ + u32 inst_size; /* size of instruction buffer in bytes */ + u32 save_size; /* size of save buffer in bytes */ + u32 restore_size; /* size of restore buffer in bytes */ + u32 inst_prop_cnt; /* properties count */ + u32 prop_args[]; /* properties and regular kernel arguments */ +}; + /* Exec buffer command header format */ #define AMDXDNA_CMD_STATE GENMASK(3, 0) #define AMDXDNA_CMD_EXTRA_CU_MASK GENMASK(11, 10) @@ -64,6 +82,8 @@ struct amdxdna_cmd { u32 data[]; }; +#define INVALID_CU_IDX (~0U) + struct amdxdna_hwctx { struct amdxdna_client *client; struct amdxdna_hwctx_priv *priv; @@ -95,6 +115,17 @@ struct amdxdna_hwctx { #define drm_job_to_xdna_job(j) \ container_of(j, struct amdxdna_sched_job, base) +enum amdxdna_job_opcode { + SYNC_DEBUG_BO, + ATTACH_DEBUG_BO, + DETACH_DEBUG_BO, +}; + +struct amdxdna_drv_cmd { + enum amdxdna_job_opcode opcode; + u32 result; +}; + struct amdxdna_sched_job { struct drm_sched_job base; struct kref refcnt; @@ -105,7 +136,9 @@ struct amdxdna_sched_job { /* user can wait on this fence */ struct dma_fence *out_fence; bool job_done; + bool job_timeout; u64 seq; + struct amdxdna_drv_cmd *drv_cmd; struct amdxdna_gem_obj *cmd_bo; size_t bo_cnt; struct drm_gem_object *bos[] __counted_by(bo_cnt); @@ -137,15 +170,17 @@ amdxdna_cmd_get_state(struct amdxdna_gem_obj *abo) } void *amdxdna_cmd_get_payload(struct amdxdna_gem_obj *abo, u32 *size); -int amdxdna_cmd_get_cu_idx(struct amdxdna_gem_obj *abo); +u32 amdxdna_cmd_get_cu_idx(struct amdxdna_gem_obj *abo); void amdxdna_sched_job_cleanup(struct amdxdna_sched_job *job); void amdxdna_hwctx_remove_all(struct amdxdna_client *client); int amdxdna_hwctx_walk(struct amdxdna_client *client, void *arg, int (*walk)(struct amdxdna_hwctx *hwctx, void *arg)); +int amdxdna_hwctx_sync_debug_bo(struct amdxdna_client *client, u32 debug_bo_hdl); int amdxdna_cmd_submit(struct amdxdna_client *client, - u32 cmd_bo_hdls, u32 *arg_bo_hdls, u32 arg_bo_cnt, + struct amdxdna_drv_cmd *drv_cmd, u32 cmd_bo_hdls, + u32 *arg_bo_hdls, u32 arg_bo_cnt, u32 hwctx_hdl, u64 *seq); int amdxdna_cmd_wait(struct amdxdna_client *client, u32 hwctx_hdl, diff --git a/drivers/accel/amdxdna/amdxdna_error.h b/drivers/accel/amdxdna/amdxdna_error.h new file mode 100644 index 000000000000..c51de86ec12b --- /dev/null +++ b/drivers/accel/amdxdna/amdxdna_error.h @@ -0,0 +1,59 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2025, Advanced Micro Devices, Inc. + */ + +#ifndef _AMDXDNA_ERROR_H_ +#define _AMDXDNA_ERROR_H_ + +#include +#include + +#define AMDXDNA_ERR_DRV_AIE 4 +#define AMDXDNA_ERR_SEV_CRITICAL 3 +#define AMDXDNA_ERR_CLASS_AIE 2 + +#define AMDXDNA_ERR_NUM_MASK GENMASK_U64(15, 0) +#define AMDXDNA_ERR_DRV_MASK GENMASK_U64(23, 16) +#define AMDXDNA_ERR_SEV_MASK GENMASK_U64(31, 24) +#define AMDXDNA_ERR_MOD_MASK GENMASK_U64(39, 32) +#define AMDXDNA_ERR_CLASS_MASK GENMASK_U64(47, 40) + +enum amdxdna_error_num { + AMDXDNA_ERROR_NUM_AIE_SATURATION = 3, + AMDXDNA_ERROR_NUM_AIE_FP, + AMDXDNA_ERROR_NUM_AIE_STREAM, + AMDXDNA_ERROR_NUM_AIE_ACCESS, + AMDXDNA_ERROR_NUM_AIE_BUS, + AMDXDNA_ERROR_NUM_AIE_INSTRUCTION, + AMDXDNA_ERROR_NUM_AIE_ECC, + AMDXDNA_ERROR_NUM_AIE_LOCK, + AMDXDNA_ERROR_NUM_AIE_DMA, + AMDXDNA_ERROR_NUM_AIE_MEM_PARITY, + AMDXDNA_ERROR_NUM_UNKNOWN = 15, +}; + +enum amdxdna_error_module { + AMDXDNA_ERROR_MODULE_AIE_CORE = 3, + AMDXDNA_ERROR_MODULE_AIE_MEMORY, + AMDXDNA_ERROR_MODULE_AIE_SHIM, + AMDXDNA_ERROR_MODULE_AIE_NOC, + AMDXDNA_ERROR_MODULE_AIE_PL, + AMDXDNA_ERROR_MODULE_UNKNOWN = 8, +}; + +#define AMDXDNA_ERROR_ENCODE(err_num, err_mod) \ + (FIELD_PREP(AMDXDNA_ERR_NUM_MASK, err_num) | \ + FIELD_PREP_CONST(AMDXDNA_ERR_DRV_MASK, AMDXDNA_ERR_DRV_AIE) | \ + FIELD_PREP_CONST(AMDXDNA_ERR_SEV_MASK, AMDXDNA_ERR_SEV_CRITICAL) | \ + FIELD_PREP(AMDXDNA_ERR_MOD_MASK, err_mod) | \ + FIELD_PREP_CONST(AMDXDNA_ERR_CLASS_MASK, AMDXDNA_ERR_CLASS_AIE)) + +#define AMDXDNA_EXTRA_ERR_COL_MASK GENMASK_U64(7, 0) +#define AMDXDNA_EXTRA_ERR_ROW_MASK GENMASK_U64(15, 8) + +#define AMDXDNA_EXTRA_ERR_ENCODE(row, col) \ + (FIELD_PREP(AMDXDNA_EXTRA_ERR_COL_MASK, col) | \ + FIELD_PREP(AMDXDNA_EXTRA_ERR_ROW_MASK, row)) + +#endif /* _AMDXDNA_ERROR_H_ */ diff --git a/drivers/accel/amdxdna/amdxdna_gem.c b/drivers/accel/amdxdna/amdxdna_gem.c index d407a36eb412..dfa916eeb2d9 100644 --- a/drivers/accel/amdxdna/amdxdna_gem.c +++ b/drivers/accel/amdxdna/amdxdna_gem.c @@ -8,6 +8,7 @@ #include #include #include +#include #include #include #include @@ -392,35 +393,33 @@ static const struct dma_buf_ops amdxdna_dmabuf_ops = { .vunmap = drm_gem_dmabuf_vunmap, }; -static int amdxdna_gem_obj_vmap(struct drm_gem_object *obj, struct iosys_map *map) +static int amdxdna_gem_obj_vmap(struct amdxdna_gem_obj *abo, void **vaddr) { - struct amdxdna_gem_obj *abo = to_xdna_obj(obj); - - iosys_map_clear(map); - - dma_resv_assert_held(obj->resv); + struct iosys_map map = IOSYS_MAP_INIT_VADDR(NULL); + int ret; if (is_import_bo(abo)) - dma_buf_vmap(abo->dma_buf, map); + ret = dma_buf_vmap_unlocked(abo->dma_buf, &map); else - drm_gem_shmem_object_vmap(obj, map); + ret = drm_gem_vmap(to_gobj(abo), &map); - if (!map->vaddr) - return -ENOMEM; - - return 0; + *vaddr = map.vaddr; + return ret; } -static void amdxdna_gem_obj_vunmap(struct drm_gem_object *obj, struct iosys_map *map) +static void amdxdna_gem_obj_vunmap(struct amdxdna_gem_obj *abo) { - struct amdxdna_gem_obj *abo = to_xdna_obj(obj); + struct iosys_map map; - dma_resv_assert_held(obj->resv); + if (!abo->mem.kva) + return; + + iosys_map_set_vaddr(&map, abo->mem.kva); if (is_import_bo(abo)) - dma_buf_vunmap(abo->dma_buf, map); + dma_buf_vunmap_unlocked(abo->dma_buf, &map); else - drm_gem_shmem_object_vunmap(obj, map); + drm_gem_vunmap(to_gobj(abo), &map); } static struct dma_buf *amdxdna_gem_prime_export(struct drm_gem_object *gobj, int flags) @@ -455,7 +454,6 @@ static void amdxdna_gem_obj_free(struct drm_gem_object *gobj) { struct amdxdna_dev *xdna = to_xdna_dev(gobj->dev); struct amdxdna_gem_obj *abo = to_xdna_obj(gobj); - struct iosys_map map = IOSYS_MAP_INIT_VADDR(abo->mem.kva); XDNA_DBG(xdna, "BO type %d xdna_addr 0x%llx", abo->type, abo->mem.dev_addr); @@ -468,7 +466,7 @@ static void amdxdna_gem_obj_free(struct drm_gem_object *gobj) if (abo->type == AMDXDNA_BO_DEV_HEAP) drm_mm_takedown(&abo->mm); - drm_gem_vunmap(gobj, &map); + amdxdna_gem_obj_vunmap(abo); mutex_destroy(&abo->lock); if (is_import_bo(abo)) { @@ -489,8 +487,8 @@ static const struct drm_gem_object_funcs amdxdna_gem_shmem_funcs = { .pin = drm_gem_shmem_object_pin, .unpin = drm_gem_shmem_object_unpin, .get_sg_table = drm_gem_shmem_object_get_sg_table, - .vmap = amdxdna_gem_obj_vmap, - .vunmap = amdxdna_gem_obj_vunmap, + .vmap = drm_gem_shmem_object_vmap, + .vunmap = drm_gem_shmem_object_vunmap, .mmap = amdxdna_gem_obj_mmap, .vm_ops = &drm_gem_shmem_vm_ops, .export = amdxdna_gem_prime_export, @@ -663,7 +661,6 @@ amdxdna_drm_create_dev_heap(struct drm_device *dev, struct drm_file *filp) { struct amdxdna_client *client = filp->driver_priv; - struct iosys_map map = IOSYS_MAP_INIT_VADDR(NULL); struct amdxdna_dev *xdna = to_xdna_dev(dev); struct amdxdna_gem_obj *abo; int ret; @@ -692,12 +689,11 @@ amdxdna_drm_create_dev_heap(struct drm_device *dev, abo->mem.dev_addr = client->xdna->dev_info->dev_mem_base; drm_mm_init(&abo->mm, abo->mem.dev_addr, abo->mem.size); - ret = drm_gem_vmap(to_gobj(abo), &map); + ret = amdxdna_gem_obj_vmap(abo, &abo->mem.kva); if (ret) { XDNA_ERR(xdna, "Vmap heap bo failed, ret %d", ret); goto release_obj; } - abo->mem.kva = map.vaddr; client->dev_heap = abo; drm_gem_object_get(to_gobj(abo)); @@ -748,7 +744,6 @@ amdxdna_drm_create_cmd_bo(struct drm_device *dev, struct amdxdna_drm_create_bo *args, struct drm_file *filp) { - struct iosys_map map = IOSYS_MAP_INIT_VADDR(NULL); struct amdxdna_dev *xdna = to_xdna_dev(dev); struct amdxdna_gem_obj *abo; int ret; @@ -770,12 +765,11 @@ amdxdna_drm_create_cmd_bo(struct drm_device *dev, abo->type = AMDXDNA_BO_CMD; abo->client = filp->driver_priv; - ret = drm_gem_vmap(to_gobj(abo), &map); + ret = amdxdna_gem_obj_vmap(abo, &abo->mem.kva); if (ret) { XDNA_ERR(xdna, "Vmap cmd bo failed, ret %d", ret); goto release_obj; } - abo->mem.kva = map.vaddr; return abo; @@ -969,6 +963,9 @@ int amdxdna_drm_sync_bo_ioctl(struct drm_device *dev, XDNA_DBG(xdna, "Sync bo %d offset 0x%llx, size 0x%llx\n", args->handle, args->offset, args->size); + if (args->direction == SYNC_DIRECT_FROM_DEVICE) + ret = amdxdna_hwctx_sync_debug_bo(abo->client, args->handle); + put_obj: drm_gem_object_put(gobj); return ret; diff --git a/drivers/accel/amdxdna/amdxdna_gem.h b/drivers/accel/amdxdna/amdxdna_gem.h index ae29db94a9d3..f79fc7f3c93b 100644 --- a/drivers/accel/amdxdna/amdxdna_gem.h +++ b/drivers/accel/amdxdna/amdxdna_gem.h @@ -7,6 +7,7 @@ #define _AMDXDNA_GEM_H_ #include +#include "amdxdna_pci_drv.h" struct amdxdna_umap { struct vm_area_struct *vma; @@ -62,6 +63,11 @@ static inline void amdxdna_gem_put_obj(struct amdxdna_gem_obj *abo) drm_gem_object_put(to_gobj(abo)); } +static inline u64 amdxdna_dev_bo_offset(struct amdxdna_gem_obj *abo) +{ + return abo->mem.dev_addr - abo->client->dev_heap->mem.dev_addr; +} + void amdxdna_umap_put(struct amdxdna_umap *mapp); struct drm_gem_object * diff --git a/drivers/accel/amdxdna/amdxdna_mailbox.c b/drivers/accel/amdxdna/amdxdna_mailbox.c index da1ac89bb78f..858df97cd3fb 100644 --- a/drivers/accel/amdxdna/amdxdna_mailbox.c +++ b/drivers/accel/amdxdna/amdxdna_mailbox.c @@ -194,7 +194,8 @@ static void mailbox_release_msg(struct mailbox_channel *mb_chann, { MB_DBG(mb_chann, "msg_id 0x%x msg opcode 0x%x", mb_msg->pkg.header.id, mb_msg->pkg.header.opcode); - mb_msg->notify_cb(mb_msg->handle, NULL, 0); + if (mb_msg->notify_cb) + mb_msg->notify_cb(mb_msg->handle, NULL, 0); kfree(mb_msg); } @@ -248,7 +249,7 @@ mailbox_get_resp(struct mailbox_channel *mb_chann, struct xdna_msg_header *heade { struct mailbox_msg *mb_msg; int msg_id; - int ret; + int ret = 0; msg_id = header->id; if (!mailbox_validate_msgid(msg_id)) { @@ -265,9 +266,11 @@ mailbox_get_resp(struct mailbox_channel *mb_chann, struct xdna_msg_header *heade MB_DBG(mb_chann, "opcode 0x%x size %d id 0x%x", header->opcode, header->total_size, header->id); - ret = mb_msg->notify_cb(mb_msg->handle, data, header->total_size); - if (unlikely(ret)) - MB_ERR(mb_chann, "Message callback ret %d", ret); + if (mb_msg->notify_cb) { + ret = mb_msg->notify_cb(mb_msg->handle, data, header->total_size); + if (unlikely(ret)) + MB_ERR(mb_chann, "Message callback ret %d", ret); + } kfree(mb_msg); return ret; @@ -513,6 +516,7 @@ xdna_mailbox_create_channel(struct mailbox *mb, } mb_chann->bad_state = false; + mailbox_reg_write(mb_chann, mb_chann->iohub_int_addr, 0); MB_DBG(mb_chann, "Mailbox channel created (irq: %d)", mb_chann->msix_irq); return mb_chann; diff --git a/drivers/accel/amdxdna/amdxdna_mailbox_helper.h b/drivers/accel/amdxdna/amdxdna_mailbox_helper.h index 710ff8873d61..556c712cad0a 100644 --- a/drivers/accel/amdxdna/amdxdna_mailbox_helper.h +++ b/drivers/accel/amdxdna/amdxdna_mailbox_helper.h @@ -16,16 +16,18 @@ struct xdna_notify { u32 *data; size_t size; int error; + u32 *status; }; -#define DECLARE_XDNA_MSG_COMMON(name, op, status) \ +#define DECLARE_XDNA_MSG_COMMON(name, op, s) \ struct name##_req req = { 0 }; \ - struct name##_resp resp = { status }; \ + struct name##_resp resp = { .status = s }; \ struct xdna_notify hdl = { \ .error = 0, \ .data = (u32 *)&resp, \ .size = sizeof(resp), \ .comp = COMPLETION_INITIALIZER_ONSTACK(hdl.comp), \ + .status = (u32 *)&resp.status, \ }; \ struct xdna_mailbox_msg msg = { \ .send_data = (u8 *)&req, \ diff --git a/drivers/accel/amdxdna/amdxdna_pci_drv.c b/drivers/accel/amdxdna/amdxdna_pci_drv.c index 569cd703729d..1973ab67721b 100644 --- a/drivers/accel/amdxdna/amdxdna_pci_drv.c +++ b/drivers/accel/amdxdna/amdxdna_pci_drv.c @@ -13,13 +13,11 @@ #include #include #include -#include #include "amdxdna_ctx.h" #include "amdxdna_gem.h" #include "amdxdna_pci_drv.h" - -#define AMDXDNA_AUTOSUSPEND_DELAY 5000 /* milliseconds */ +#include "amdxdna_pm.h" MODULE_FIRMWARE("amdnpu/1502_00/npu.sbin"); MODULE_FIRMWARE("amdnpu/17f0_10/npu.sbin"); @@ -29,9 +27,14 @@ MODULE_FIRMWARE("amdnpu/17f0_20/npu.sbin"); /* * 0.0: Initial version * 0.1: Support getting all hardware contexts by DRM_IOCTL_AMDXDNA_GET_ARRAY + * 0.2: Support getting last error hardware error + * 0.3: Support firmware debug buffer + * 0.4: Support getting resource information + * 0.5: Support getting telemetry data + * 0.6: Support preemption */ #define AMDXDNA_DRIVER_MAJOR 0 -#define AMDXDNA_DRIVER_MINOR 1 +#define AMDXDNA_DRIVER_MINOR 6 /* * Bind the driver base on (vendor_id, device_id) pair and later use the @@ -61,17 +64,9 @@ static int amdxdna_drm_open(struct drm_device *ddev, struct drm_file *filp) struct amdxdna_client *client; int ret; - ret = pm_runtime_resume_and_get(ddev->dev); - if (ret) { - XDNA_ERR(xdna, "Failed to get rpm, ret %d", ret); - return ret; - } - client = kzalloc(sizeof(*client), GFP_KERNEL); - if (!client) { - ret = -ENOMEM; - goto put_rpm; - } + if (!client) + return -ENOMEM; client->pid = pid_nr(rcu_access_pointer(filp->pid)); client->xdna = xdna; @@ -106,9 +101,6 @@ static int amdxdna_drm_open(struct drm_device *ddev, struct drm_file *filp) iommu_sva_unbind_device(client->sva); failed: kfree(client); -put_rpm: - pm_runtime_mark_last_busy(ddev->dev); - pm_runtime_put_autosuspend(ddev->dev); return ret; } @@ -130,8 +122,6 @@ static void amdxdna_drm_close(struct drm_device *ddev, struct drm_file *filp) XDNA_DBG(xdna, "pid %d closed", client->pid); kfree(client); - pm_runtime_mark_last_busy(ddev->dev); - pm_runtime_put_autosuspend(ddev->dev); } static int amdxdna_flush(struct file *f, fl_owner_t id) @@ -310,19 +300,12 @@ static int amdxdna_probe(struct pci_dev *pdev, const struct pci_device_id *id) goto failed_dev_fini; } - pm_runtime_set_autosuspend_delay(dev, AMDXDNA_AUTOSUSPEND_DELAY); - pm_runtime_use_autosuspend(dev); - pm_runtime_allow(dev); - ret = drm_dev_register(&xdna->ddev, 0); if (ret) { XDNA_ERR(xdna, "DRM register failed, ret %d", ret); - pm_runtime_forbid(dev); goto failed_sysfs_fini; } - pm_runtime_mark_last_busy(dev); - pm_runtime_put_autosuspend(dev); return 0; failed_sysfs_fini: @@ -339,14 +322,10 @@ static int amdxdna_probe(struct pci_dev *pdev, const struct pci_device_id *id) static void amdxdna_remove(struct pci_dev *pdev) { struct amdxdna_dev *xdna = pci_get_drvdata(pdev); - struct device *dev = &pdev->dev; struct amdxdna_client *client; destroy_workqueue(xdna->notifier_wq); - pm_runtime_get_noresume(dev); - pm_runtime_forbid(dev); - drm_dev_unplug(&xdna->ddev); amdxdna_sysfs_fini(xdna); @@ -365,29 +344,9 @@ static void amdxdna_remove(struct pci_dev *pdev) mutex_unlock(&xdna->dev_lock); } -static int amdxdna_pmops_suspend(struct device *dev) -{ - struct amdxdna_dev *xdna = pci_get_drvdata(to_pci_dev(dev)); - - if (!xdna->dev_info->ops->suspend) - return -EOPNOTSUPP; - - return xdna->dev_info->ops->suspend(xdna); -} - -static int amdxdna_pmops_resume(struct device *dev) -{ - struct amdxdna_dev *xdna = pci_get_drvdata(to_pci_dev(dev)); - - if (!xdna->dev_info->ops->resume) - return -EOPNOTSUPP; - - return xdna->dev_info->ops->resume(xdna); -} - static const struct dev_pm_ops amdxdna_pm_ops = { - SYSTEM_SLEEP_PM_OPS(amdxdna_pmops_suspend, amdxdna_pmops_resume) - RUNTIME_PM_OPS(amdxdna_pmops_suspend, amdxdna_pmops_resume, NULL) + SYSTEM_SLEEP_PM_OPS(amdxdna_pm_suspend, amdxdna_pm_resume) + RUNTIME_PM_OPS(amdxdna_pm_suspend, amdxdna_pm_resume, NULL) }; static struct pci_driver amdxdna_pci_driver = { diff --git a/drivers/accel/amdxdna/amdxdna_pci_drv.h b/drivers/accel/amdxdna/amdxdna_pci_drv.h index 72d6696d49da..c99477f5e454 100644 --- a/drivers/accel/amdxdna/amdxdna_pci_drv.h +++ b/drivers/accel/amdxdna/amdxdna_pci_drv.h @@ -6,6 +6,7 @@ #ifndef _AMDXDNA_PCI_DRV_H_ #define _AMDXDNA_PCI_DRV_H_ +#include #include #include @@ -54,6 +55,7 @@ struct amdxdna_dev_ops { int (*hwctx_init)(struct amdxdna_hwctx *hwctx); void (*hwctx_fini)(struct amdxdna_hwctx *hwctx); int (*hwctx_config)(struct amdxdna_hwctx *hwctx, u32 type, u64 value, void *buf, u32 size); + int (*hwctx_sync_debug_bo)(struct amdxdna_hwctx *hwctx, u32 debug_bo_hdl); void (*hmm_invalidate)(struct amdxdna_gem_obj *abo, unsigned long cur_seq); int (*cmd_submit)(struct amdxdna_hwctx *hwctx, struct amdxdna_sched_job *job, u64 *seq); int (*get_aie_info)(struct amdxdna_client *client, struct amdxdna_drm_get_info *args); @@ -99,6 +101,7 @@ struct amdxdna_dev { struct amdxdna_fw_ver fw_ver; struct rw_semaphore notifier_lock; /* for mmu notifier*/ struct workqueue_struct *notifier_wq; + bool rpm_on; }; /* diff --git a/drivers/accel/amdxdna/amdxdna_pm.c b/drivers/accel/amdxdna/amdxdna_pm.c new file mode 100644 index 000000000000..fa38e65d617c --- /dev/null +++ b/drivers/accel/amdxdna/amdxdna_pm.c @@ -0,0 +1,94 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2025, Advanced Micro Devices, Inc. + */ + +#include +#include +#include + +#include "amdxdna_pm.h" + +#define AMDXDNA_AUTOSUSPEND_DELAY 5000 /* milliseconds */ + +int amdxdna_pm_suspend(struct device *dev) +{ + struct amdxdna_dev *xdna = to_xdna_dev(dev_get_drvdata(dev)); + int ret = -EOPNOTSUPP; + bool rpm; + + if (xdna->dev_info->ops->suspend) { + rpm = xdna->rpm_on; + xdna->rpm_on = false; + ret = xdna->dev_info->ops->suspend(xdna); + xdna->rpm_on = rpm; + } + + XDNA_DBG(xdna, "Suspend done ret %d", ret); + return ret; +} + +int amdxdna_pm_resume(struct device *dev) +{ + struct amdxdna_dev *xdna = to_xdna_dev(dev_get_drvdata(dev)); + int ret = -EOPNOTSUPP; + bool rpm; + + if (xdna->dev_info->ops->resume) { + rpm = xdna->rpm_on; + xdna->rpm_on = false; + ret = xdna->dev_info->ops->resume(xdna); + xdna->rpm_on = rpm; + } + + XDNA_DBG(xdna, "Resume done ret %d", ret); + return ret; +} + +int amdxdna_pm_resume_get(struct amdxdna_dev *xdna) +{ + struct device *dev = xdna->ddev.dev; + int ret; + + if (!xdna->rpm_on) + return 0; + + ret = pm_runtime_resume_and_get(dev); + if (ret) { + XDNA_ERR(xdna, "Resume failed: %d", ret); + pm_runtime_set_suspended(dev); + } + + return ret; +} + +void amdxdna_pm_suspend_put(struct amdxdna_dev *xdna) +{ + struct device *dev = xdna->ddev.dev; + + if (!xdna->rpm_on) + return; + + pm_runtime_put_autosuspend(dev); +} + +void amdxdna_pm_init(struct amdxdna_dev *xdna) +{ + struct device *dev = xdna->ddev.dev; + + pm_runtime_set_active(dev); + pm_runtime_set_autosuspend_delay(dev, AMDXDNA_AUTOSUSPEND_DELAY); + pm_runtime_use_autosuspend(dev); + pm_runtime_allow(dev); + pm_runtime_put_autosuspend(dev); + xdna->rpm_on = true; +} + +void amdxdna_pm_fini(struct amdxdna_dev *xdna) +{ + struct device *dev = xdna->ddev.dev; + + xdna->rpm_on = false; + pm_runtime_get_noresume(dev); + pm_runtime_forbid(dev); +} diff --git a/drivers/accel/amdxdna/amdxdna_pm.h b/drivers/accel/amdxdna/amdxdna_pm.h new file mode 100644 index 000000000000..77b2d6e45570 --- /dev/null +++ b/drivers/accel/amdxdna/amdxdna_pm.h @@ -0,0 +1,18 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2025, Advanced Micro Devices, Inc. + */ + +#ifndef _AMDXDNA_PM_H_ +#define _AMDXDNA_PM_H_ + +#include "amdxdna_pci_drv.h" + +int amdxdna_pm_suspend(struct device *dev); +int amdxdna_pm_resume(struct device *dev); +int amdxdna_pm_resume_get(struct amdxdna_dev *xdna); +void amdxdna_pm_suspend_put(struct amdxdna_dev *xdna); +void amdxdna_pm_init(struct amdxdna_dev *xdna); +void amdxdna_pm_fini(struct amdxdna_dev *xdna); + +#endif /* _AMDXDNA_PM_H_ */ diff --git a/drivers/accel/amdxdna/npu1_regs.c b/drivers/accel/amdxdna/npu1_regs.c index e4f6dac7d00f..ec407f3b48fc 100644 --- a/drivers/accel/amdxdna/npu1_regs.c +++ b/drivers/accel/amdxdna/npu1_regs.c @@ -46,6 +46,7 @@ const struct rt_config npu1_default_rt_cfg[] = { { 2, 1, AIE2_RT_CFG_INIT }, /* PDI APP LOAD MODE */ + { 4, 1, AIE2_RT_CFG_INIT }, /* Debug BO */ { 1, 1, AIE2_RT_CFG_CLK_GATING }, /* Clock gating on */ { 0 }, }; @@ -62,16 +63,23 @@ const struct dpm_clk_freq npu1_dpm_clk_table[] = { { 0 } }; +static const struct aie2_fw_feature_tbl npu1_fw_feature_table[] = { + { .feature = AIE2_NPU_COMMAND, .min_minor = 8 }, + { 0 } +}; + static const struct amdxdna_dev_priv npu1_dev_priv = { .fw_path = "amdnpu/1502_00/npu.sbin", .protocol_major = 0x5, .protocol_minor = 0x7, .rt_config = npu1_default_rt_cfg, .dpm_clk_tbl = npu1_dpm_clk_table, + .fw_feature_tbl = npu1_fw_feature_table, .col_align = COL_ALIGN_NONE, .mbox_dev_addr = NPU1_MBOX_BAR_BASE, .mbox_size = 0, /* Use BAR size */ .sram_dev_addr = NPU1_SRAM_BAR_BASE, + .hwctx_limit = 6, .sram_offs = { DEFINE_BAR_OFFSET(MBOX_CHANN_OFF, NPU1_SRAM, MPNPU_SRAM_X2I_MAILBOX_0), DEFINE_BAR_OFFSET(FW_ALIVE_OFF, NPU1_SRAM, MPNPU_SRAM_I2X_MAILBOX_15), diff --git a/drivers/accel/amdxdna/npu2_regs.c b/drivers/accel/amdxdna/npu2_regs.c index a081cac75ee0..86f87d0d1354 100644 --- a/drivers/accel/amdxdna/npu2_regs.c +++ b/drivers/accel/amdxdna/npu2_regs.c @@ -67,10 +67,12 @@ static const struct amdxdna_dev_priv npu2_dev_priv = { .protocol_minor = 0x6, .rt_config = npu4_default_rt_cfg, .dpm_clk_tbl = npu4_dpm_clk_table, + .fw_feature_tbl = npu4_fw_feature_table, .col_align = COL_ALIGN_NATURE, .mbox_dev_addr = NPU2_MBOX_BAR_BASE, .mbox_size = 0, /* Use BAR size */ .sram_dev_addr = NPU2_SRAM_BAR_BASE, + .hwctx_limit = 16, .sram_offs = { DEFINE_BAR_OFFSET(MBOX_CHANN_OFF, NPU2_SRAM, MPNPU_SRAM_X2I_MAILBOX_0), DEFINE_BAR_OFFSET(FW_ALIVE_OFF, NPU2_SRAM, MPNPU_SRAM_X2I_MAILBOX_15), diff --git a/drivers/accel/amdxdna/npu4_regs.c b/drivers/accel/amdxdna/npu4_regs.c index 9f2e33182ec6..986a5f28ba24 100644 --- a/drivers/accel/amdxdna/npu4_regs.c +++ b/drivers/accel/amdxdna/npu4_regs.c @@ -63,10 +63,14 @@ const struct rt_config npu4_default_rt_cfg[] = { { 5, 1, AIE2_RT_CFG_INIT }, /* PDI APP LOAD MODE */ + { 10, 1, AIE2_RT_CFG_INIT }, /* DEBUG BUF */ + { 14, 0, AIE2_RT_CFG_INIT, BIT_U64(AIE2_PREEMPT) }, /* Frame boundary preemption */ { 1, 1, AIE2_RT_CFG_CLK_GATING }, /* Clock gating on */ { 2, 1, AIE2_RT_CFG_CLK_GATING }, /* Clock gating on */ { 3, 1, AIE2_RT_CFG_CLK_GATING }, /* Clock gating on */ { 4, 1, AIE2_RT_CFG_CLK_GATING }, /* Clock gating on */ + { 13, 0, AIE2_RT_CFG_FORCE_PREEMPT }, + { 14, 0, AIE2_RT_CFG_FRAME_BOUNDARY_PREEMPT }, { 0 }, }; @@ -82,16 +86,24 @@ const struct dpm_clk_freq npu4_dpm_clk_table[] = { { 0 } }; +const struct aie2_fw_feature_tbl npu4_fw_feature_table[] = { + { .feature = AIE2_NPU_COMMAND, .min_minor = 15 }, + { .feature = AIE2_PREEMPT, .min_minor = 12 }, + { 0 } +}; + static const struct amdxdna_dev_priv npu4_dev_priv = { .fw_path = "amdnpu/17f0_10/npu.sbin", .protocol_major = 0x6, .protocol_minor = 12, .rt_config = npu4_default_rt_cfg, .dpm_clk_tbl = npu4_dpm_clk_table, + .fw_feature_tbl = npu4_fw_feature_table, .col_align = COL_ALIGN_NATURE, .mbox_dev_addr = NPU4_MBOX_BAR_BASE, .mbox_size = 0, /* Use BAR size */ .sram_dev_addr = NPU4_SRAM_BAR_BASE, + .hwctx_limit = 16, .sram_offs = { DEFINE_BAR_OFFSET(MBOX_CHANN_OFF, NPU4_SRAM, MPNPU_SRAM_X2I_MAILBOX_0), DEFINE_BAR_OFFSET(FW_ALIVE_OFF, NPU4_SRAM, MPNPU_SRAM_X2I_MAILBOX_15), diff --git a/drivers/accel/amdxdna/npu5_regs.c b/drivers/accel/amdxdna/npu5_regs.c index 5f1cf83461c4..75ad97f0b937 100644 --- a/drivers/accel/amdxdna/npu5_regs.c +++ b/drivers/accel/amdxdna/npu5_regs.c @@ -67,10 +67,12 @@ static const struct amdxdna_dev_priv npu5_dev_priv = { .protocol_minor = 12, .rt_config = npu4_default_rt_cfg, .dpm_clk_tbl = npu4_dpm_clk_table, + .fw_feature_tbl = npu4_fw_feature_table, .col_align = COL_ALIGN_NATURE, .mbox_dev_addr = NPU5_MBOX_BAR_BASE, .mbox_size = 0, /* Use BAR size */ .sram_dev_addr = NPU5_SRAM_BAR_BASE, + .hwctx_limit = 16, .sram_offs = { DEFINE_BAR_OFFSET(MBOX_CHANN_OFF, NPU5_SRAM, MPNPU_SRAM_X2I_MAILBOX_0), DEFINE_BAR_OFFSET(FW_ALIVE_OFF, NPU5_SRAM, MPNPU_SRAM_X2I_MAILBOX_15), diff --git a/drivers/accel/amdxdna/npu6_regs.c b/drivers/accel/amdxdna/npu6_regs.c index 94a7005685a7..758dc013fe13 100644 --- a/drivers/accel/amdxdna/npu6_regs.c +++ b/drivers/accel/amdxdna/npu6_regs.c @@ -67,10 +67,12 @@ static const struct amdxdna_dev_priv npu6_dev_priv = { .protocol_minor = 12, .rt_config = npu4_default_rt_cfg, .dpm_clk_tbl = npu4_dpm_clk_table, + .fw_feature_tbl = npu4_fw_feature_table, .col_align = COL_ALIGN_NATURE, .mbox_dev_addr = NPU6_MBOX_BAR_BASE, .mbox_size = 0, /* Use BAR size */ .sram_dev_addr = NPU6_SRAM_BAR_BASE, + .hwctx_limit = 16, .sram_offs = { DEFINE_BAR_OFFSET(MBOX_CHANN_OFF, NPU6_SRAM, MPNPU_SRAM_X2I_MAILBOX_0), DEFINE_BAR_OFFSET(FW_ALIVE_OFF, NPU6_SRAM, MPNPU_SRAM_X2I_MAILBOX_15), diff --git a/drivers/accel/ethosu/Kconfig b/drivers/accel/ethosu/Kconfig new file mode 100644 index 000000000000..d25f9b3eb317 --- /dev/null +++ b/drivers/accel/ethosu/Kconfig @@ -0,0 +1,11 @@ +# SPDX-License-Identifier: GPL-2.0-only + +config DRM_ACCEL_ARM_ETHOSU + tristate "Arm Ethos-U65/U85 NPU" + depends on HAS_IOMEM + depends on DRM_ACCEL + select DRM_GEM_DMA_HELPER + select DRM_SCHED + select GENERIC_ALLOCATOR + help + Enables driver for Arm Ethos-U65/U85 NPUs diff --git a/drivers/accel/ethosu/Makefile b/drivers/accel/ethosu/Makefile new file mode 100644 index 000000000000..17db5a600416 --- /dev/null +++ b/drivers/accel/ethosu/Makefile @@ -0,0 +1,4 @@ +# SPDX-License-Identifier: GPL-2.0-only + +obj-$(CONFIG_DRM_ACCEL_ARM_ETHOSU) := ethosu.o +ethosu-y += ethosu_drv.o ethosu_gem.o ethosu_job.o diff --git a/drivers/accel/ethosu/ethosu_device.h b/drivers/accel/ethosu/ethosu_device.h new file mode 100644 index 000000000000..b189fa783d6a --- /dev/null +++ b/drivers/accel/ethosu/ethosu_device.h @@ -0,0 +1,197 @@ +/* SPDX-License-Identifier: GPL-2.0-only or MIT */ +/* Copyright 2025 Arm, Ltd. */ + +#ifndef __ETHOSU_DEVICE_H__ +#define __ETHOSU_DEVICE_H__ + +#include +#include +#include + +#include +#include + +#include + +struct clk; +struct gen_pool; + +#define NPU_REG_ID 0x0000 +#define NPU_REG_STATUS 0x0004 +#define NPU_REG_CMD 0x0008 +#define NPU_REG_RESET 0x000c +#define NPU_REG_QBASE 0x0010 +#define NPU_REG_QBASE_HI 0x0014 +#define NPU_REG_QREAD 0x0018 +#define NPU_REG_QCONFIG 0x001c +#define NPU_REG_QSIZE 0x0020 +#define NPU_REG_PROT 0x0024 +#define NPU_REG_CONFIG 0x0028 +#define NPU_REG_REGIONCFG 0x003c +#define NPU_REG_AXILIMIT0 0x0040 // U65 +#define NPU_REG_AXILIMIT1 0x0044 // U65 +#define NPU_REG_AXILIMIT2 0x0048 // U65 +#define NPU_REG_AXILIMIT3 0x004c // U65 +#define NPU_REG_MEM_ATTR0 0x0040 // U85 +#define NPU_REG_MEM_ATTR1 0x0044 // U85 +#define NPU_REG_MEM_ATTR2 0x0048 // U85 +#define NPU_REG_MEM_ATTR3 0x004c // U85 +#define NPU_REG_AXI_SRAM 0x0050 // U85 +#define NPU_REG_AXI_EXT 0x0054 // U85 + +#define NPU_REG_BASEP(x) (0x0080 + (x) * 8) +#define NPU_REG_BASEP_HI(x) (0x0084 + (x) * 8) +#define NPU_BASEP_REGION_MAX 8 + +#define ID_ARCH_MAJOR_MASK GENMASK(31, 28) +#define ID_ARCH_MINOR_MASK GENMASK(27, 20) +#define ID_ARCH_PATCH_MASK GENMASK(19, 16) +#define ID_VER_MAJOR_MASK GENMASK(11, 8) +#define ID_VER_MINOR_MASK GENMASK(7, 4) + +#define CONFIG_MACS_PER_CC_MASK GENMASK(3, 0) +#define CONFIG_CMD_STREAM_VER_MASK GENMASK(7, 4) + +#define STATUS_STATE_RUNNING BIT(0) +#define STATUS_IRQ_RAISED BIT(1) +#define STATUS_BUS_STATUS BIT(2) +#define STATUS_RESET_STATUS BIT(3) +#define STATUS_CMD_PARSE_ERR BIT(4) +#define STATUS_CMD_END_REACHED BIT(5) + +#define CMD_CLEAR_IRQ BIT(1) +#define CMD_TRANSITION_TO_RUN BIT(0) + +#define RESET_PENDING_CSL BIT(1) +#define RESET_PENDING_CPL BIT(0) + +#define PROT_ACTIVE_CSL BIT(1) + +enum ethosu_cmds { + NPU_OP_CONV = 0x2, + NPU_OP_DEPTHWISE = 0x3, + NPU_OP_POOL = 0x5, + NPU_OP_ELEMENTWISE = 0x6, + NPU_OP_RESIZE = 0x7, // U85 only + NPU_OP_DMA_START = 0x10, + NPU_SET_IFM_PAD_TOP = 0x100, + NPU_SET_IFM_PAD_LEFT = 0x101, + NPU_SET_IFM_PAD_RIGHT = 0x102, + NPU_SET_IFM_PAD_BOTTOM = 0x103, + NPU_SET_IFM_DEPTH_M1 = 0x104, + NPU_SET_IFM_PRECISION = 0x105, + NPU_SET_IFM_BROADCAST = 0x108, + NPU_SET_IFM_WIDTH0_M1 = 0x10a, + NPU_SET_IFM_HEIGHT0_M1 = 0x10b, + NPU_SET_IFM_HEIGHT1_M1 = 0x10c, + NPU_SET_IFM_REGION = 0x10f, + NPU_SET_OFM_WIDTH_M1 = 0x111, + NPU_SET_OFM_HEIGHT_M1 = 0x112, + NPU_SET_OFM_DEPTH_M1 = 0x113, + NPU_SET_OFM_PRECISION = 0x114, + NPU_SET_OFM_WIDTH0_M1 = 0x11a, + NPU_SET_OFM_HEIGHT0_M1 = 0x11b, + NPU_SET_OFM_HEIGHT1_M1 = 0x11c, + NPU_SET_OFM_REGION = 0x11f, + NPU_SET_KERNEL_WIDTH_M1 = 0x120, + NPU_SET_KERNEL_HEIGHT_M1 = 0x121, + NPU_SET_KERNEL_STRIDE = 0x122, + NPU_SET_WEIGHT_REGION = 0x128, + NPU_SET_SCALE_REGION = 0x129, + NPU_SET_DMA0_SRC_REGION = 0x130, + NPU_SET_DMA0_DST_REGION = 0x131, + NPU_SET_DMA0_SIZE0 = 0x132, + NPU_SET_DMA0_SIZE1 = 0x133, + NPU_SET_IFM2_BROADCAST = 0x180, + NPU_SET_IFM2_PRECISION = 0x185, + NPU_SET_IFM2_WIDTH0_M1 = 0x18a, + NPU_SET_IFM2_HEIGHT0_M1 = 0x18b, + NPU_SET_IFM2_HEIGHT1_M1 = 0x18c, + NPU_SET_IFM2_REGION = 0x18f, + NPU_SET_IFM_BASE0 = 0x4000, + NPU_SET_IFM_BASE1 = 0x4001, + NPU_SET_IFM_BASE2 = 0x4002, + NPU_SET_IFM_BASE3 = 0x4003, + NPU_SET_IFM_STRIDE_X = 0x4004, + NPU_SET_IFM_STRIDE_Y = 0x4005, + NPU_SET_IFM_STRIDE_C = 0x4006, + NPU_SET_OFM_BASE0 = 0x4010, + NPU_SET_OFM_BASE1 = 0x4011, + NPU_SET_OFM_BASE2 = 0x4012, + NPU_SET_OFM_BASE3 = 0x4013, + NPU_SET_OFM_STRIDE_X = 0x4014, + NPU_SET_OFM_STRIDE_Y = 0x4015, + NPU_SET_OFM_STRIDE_C = 0x4016, + NPU_SET_WEIGHT_BASE = 0x4020, + NPU_SET_WEIGHT_LENGTH = 0x4021, + NPU_SET_SCALE_BASE = 0x4022, + NPU_SET_SCALE_LENGTH = 0x4023, + NPU_SET_DMA0_SRC = 0x4030, + NPU_SET_DMA0_DST = 0x4031, + NPU_SET_DMA0_LEN = 0x4032, + NPU_SET_DMA0_SRC_STRIDE0 = 0x4033, + NPU_SET_DMA0_SRC_STRIDE1 = 0x4034, + NPU_SET_DMA0_DST_STRIDE0 = 0x4035, + NPU_SET_DMA0_DST_STRIDE1 = 0x4036, + NPU_SET_IFM2_BASE0 = 0x4080, + NPU_SET_IFM2_BASE1 = 0x4081, + NPU_SET_IFM2_BASE2 = 0x4082, + NPU_SET_IFM2_BASE3 = 0x4083, + NPU_SET_IFM2_STRIDE_X = 0x4084, + NPU_SET_IFM2_STRIDE_Y = 0x4085, + NPU_SET_IFM2_STRIDE_C = 0x4086, + NPU_SET_WEIGHT1_BASE = 0x4090, + NPU_SET_WEIGHT1_LENGTH = 0x4091, + NPU_SET_SCALE1_BASE = 0x4092, + NPU_SET_WEIGHT2_BASE = 0x4092, + NPU_SET_SCALE1_LENGTH = 0x4093, + NPU_SET_WEIGHT2_LENGTH = 0x4093, + NPU_SET_WEIGHT3_BASE = 0x4094, + NPU_SET_WEIGHT3_LENGTH = 0x4095, +}; + +#define ETHOSU_SRAM_REGION 2 /* Matching Vela compiler */ + +/** + * struct ethosu_device - Ethosu device + */ +struct ethosu_device { + /** @base: Base drm_device. */ + struct drm_device base; + + /** @iomem: CPU mapping of the registers. */ + void __iomem *regs; + + void __iomem *sram; + struct gen_pool *srampool; + dma_addr_t sramphys; + + struct clk_bulk_data *clks; + int num_clks; + int irq; + + struct drm_ethosu_npu_info npu_info; + + struct ethosu_job *in_flight_job; + /* For in_flight_job and ethosu_job_hw_submit() */ + struct mutex job_lock; + + /* For dma_fence */ + spinlock_t fence_lock; + + struct drm_gpu_scheduler sched; + /* For ethosu_job_do_push() */ + struct mutex sched_lock; + u64 fence_context; + u64 emit_seqno; +}; + +#define to_ethosu_device(drm_dev) \ + ((struct ethosu_device *)container_of(drm_dev, struct ethosu_device, base)) + +static inline bool ethosu_is_u65(const struct ethosu_device *ethosudev) +{ + return FIELD_GET(ID_ARCH_MAJOR_MASK, ethosudev->npu_info.id) == 1; +} + +#endif diff --git a/drivers/accel/ethosu/ethosu_drv.c b/drivers/accel/ethosu/ethosu_drv.c new file mode 100644 index 000000000000..e05a69bf5574 --- /dev/null +++ b/drivers/accel/ethosu/ethosu_drv.c @@ -0,0 +1,403 @@ +// SPDX-License-Identifier: GPL-2.0-only or MIT +// Copyright (C) 2025 Arm, Ltd. + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +#include "ethosu_drv.h" +#include "ethosu_device.h" +#include "ethosu_gem.h" +#include "ethosu_job.h" + +static int ethosu_ioctl_dev_query(struct drm_device *ddev, void *data, + struct drm_file *file) +{ + struct ethosu_device *ethosudev = to_ethosu_device(ddev); + struct drm_ethosu_dev_query *args = data; + + if (!args->pointer) { + switch (args->type) { + case DRM_ETHOSU_DEV_QUERY_NPU_INFO: + args->size = sizeof(ethosudev->npu_info); + return 0; + default: + return -EINVAL; + } + } + + switch (args->type) { + case DRM_ETHOSU_DEV_QUERY_NPU_INFO: + if (args->size < offsetofend(struct drm_ethosu_npu_info, sram_size)) + return -EINVAL; + return copy_struct_to_user(u64_to_user_ptr(args->pointer), + args->size, + ðosudev->npu_info, + sizeof(ethosudev->npu_info), NULL); + default: + return -EINVAL; + } +} + +#define ETHOSU_BO_FLAGS DRM_ETHOSU_BO_NO_MMAP + +static int ethosu_ioctl_bo_create(struct drm_device *ddev, void *data, + struct drm_file *file) +{ + struct drm_ethosu_bo_create *args = data; + int cookie, ret; + + if (!drm_dev_enter(ddev, &cookie)) + return -ENODEV; + + if (!args->size || (args->flags & ~ETHOSU_BO_FLAGS)) { + ret = -EINVAL; + goto out_dev_exit; + } + + ret = ethosu_gem_create_with_handle(file, ddev, &args->size, + args->flags, &args->handle); + +out_dev_exit: + drm_dev_exit(cookie); + return ret; +} + +static int ethosu_ioctl_bo_wait(struct drm_device *ddev, void *data, + struct drm_file *file) +{ + struct drm_ethosu_bo_wait *args = data; + int cookie, ret; + unsigned long timeout = drm_timeout_abs_to_jiffies(args->timeout_ns); + + if (args->pad) + return -EINVAL; + + if (!drm_dev_enter(ddev, &cookie)) + return -ENODEV; + + ret = drm_gem_dma_resv_wait(file, args->handle, true, timeout); + + drm_dev_exit(cookie); + return ret; +} + +static int ethosu_ioctl_bo_mmap_offset(struct drm_device *ddev, void *data, + struct drm_file *file) +{ + struct drm_ethosu_bo_mmap_offset *args = data; + struct drm_gem_object *obj; + + if (args->pad) + return -EINVAL; + + obj = drm_gem_object_lookup(file, args->handle); + if (!obj) + return -ENOENT; + + args->offset = drm_vma_node_offset_addr(&obj->vma_node); + drm_gem_object_put(obj); + return 0; +} + +static int ethosu_ioctl_cmdstream_bo_create(struct drm_device *ddev, void *data, + struct drm_file *file) +{ + struct drm_ethosu_cmdstream_bo_create *args = data; + int cookie, ret; + + if (!drm_dev_enter(ddev, &cookie)) + return -ENODEV; + + if (!args->size || !args->data || args->pad || args->flags) { + ret = -EINVAL; + goto out_dev_exit; + } + + args->flags |= DRM_ETHOSU_BO_NO_MMAP; + + ret = ethosu_gem_cmdstream_create(file, ddev, args->size, args->data, + args->flags, &args->handle); + +out_dev_exit: + drm_dev_exit(cookie); + return ret; +} + +static int ethosu_open(struct drm_device *ddev, struct drm_file *file) +{ + int ret = 0; + + if (!try_module_get(THIS_MODULE)) + return -EINVAL; + + struct ethosu_file_priv __free(kfree) *priv = kzalloc(sizeof(*priv), GFP_KERNEL); + if (!priv) { + ret = -ENOMEM; + goto err_put_mod; + } + priv->edev = to_ethosu_device(ddev); + + ret = ethosu_job_open(priv); + if (ret) + goto err_put_mod; + + file->driver_priv = no_free_ptr(priv); + return 0; + +err_put_mod: + module_put(THIS_MODULE); + return ret; +} + +static void ethosu_postclose(struct drm_device *ddev, struct drm_file *file) +{ + ethosu_job_close(file->driver_priv); + kfree(file->driver_priv); + module_put(THIS_MODULE); +} + +static const struct drm_ioctl_desc ethosu_drm_driver_ioctls[] = { +#define ETHOSU_IOCTL(n, func, flags) \ + DRM_IOCTL_DEF_DRV(ETHOSU_##n, ethosu_ioctl_##func, flags) + + ETHOSU_IOCTL(DEV_QUERY, dev_query, 0), + ETHOSU_IOCTL(BO_CREATE, bo_create, 0), + ETHOSU_IOCTL(BO_WAIT, bo_wait, 0), + ETHOSU_IOCTL(BO_MMAP_OFFSET, bo_mmap_offset, 0), + ETHOSU_IOCTL(CMDSTREAM_BO_CREATE, cmdstream_bo_create, 0), + ETHOSU_IOCTL(SUBMIT, submit, 0), +}; + +DEFINE_DRM_ACCEL_FOPS(ethosu_drm_driver_fops); + +/* + * Ethosu driver version: + * - 1.0 - initial interface + */ +static const struct drm_driver ethosu_drm_driver = { + .driver_features = DRIVER_COMPUTE_ACCEL | DRIVER_GEM, + .open = ethosu_open, + .postclose = ethosu_postclose, + .ioctls = ethosu_drm_driver_ioctls, + .num_ioctls = ARRAY_SIZE(ethosu_drm_driver_ioctls), + .fops = ðosu_drm_driver_fops, + .name = "ethosu", + .desc = "Arm Ethos-U Accel driver", + .major = 1, + .minor = 0, + + .gem_create_object = ethosu_gem_create_object, +}; + +#define U65_DRAM_AXI_LIMIT_CFG 0x1f3f0002 +#define U65_SRAM_AXI_LIMIT_CFG 0x1f3f00b0 +#define U85_AXI_EXT_CFG 0x00021f3f +#define U85_AXI_SRAM_CFG 0x00021f3f +#define U85_MEM_ATTR0_CFG 0x00000000 +#define U85_MEM_ATTR2_CFG 0x000000b7 + +static int ethosu_reset(struct ethosu_device *ethosudev) +{ + int ret; + u32 reg; + + writel_relaxed(RESET_PENDING_CSL, ethosudev->regs + NPU_REG_RESET); + ret = readl_poll_timeout(ethosudev->regs + NPU_REG_STATUS, reg, + !FIELD_GET(STATUS_RESET_STATUS, reg), + USEC_PER_MSEC, USEC_PER_SEC); + if (ret) + return ret; + + if (!FIELD_GET(PROT_ACTIVE_CSL, readl_relaxed(ethosudev->regs + NPU_REG_PROT))) { + dev_warn(ethosudev->base.dev, "Could not reset to non-secure mode (PROT = %x)\n", + readl_relaxed(ethosudev->regs + NPU_REG_PROT)); + } + + /* + * Assign region 2 (SRAM) to AXI M0 (AXILIMIT0), + * everything else to AXI M1 (AXILIMIT2) + */ + writel_relaxed(0x0000aa8a, ethosudev->regs + NPU_REG_REGIONCFG); + if (ethosu_is_u65(ethosudev)) { + writel_relaxed(U65_SRAM_AXI_LIMIT_CFG, ethosudev->regs + NPU_REG_AXILIMIT0); + writel_relaxed(U65_DRAM_AXI_LIMIT_CFG, ethosudev->regs + NPU_REG_AXILIMIT2); + } else { + writel_relaxed(U85_AXI_SRAM_CFG, ethosudev->regs + NPU_REG_AXI_SRAM); + writel_relaxed(U85_AXI_EXT_CFG, ethosudev->regs + NPU_REG_AXI_EXT); + writel_relaxed(U85_MEM_ATTR0_CFG, ethosudev->regs + NPU_REG_MEM_ATTR0); // SRAM + writel_relaxed(U85_MEM_ATTR2_CFG, ethosudev->regs + NPU_REG_MEM_ATTR2); // DRAM + } + + if (ethosudev->sram) + memset_io(ethosudev->sram, 0, ethosudev->npu_info.sram_size); + + return 0; +} + +static int ethosu_device_resume(struct device *dev) +{ + struct ethosu_device *ethosudev = dev_get_drvdata(dev); + int ret; + + ret = clk_bulk_prepare_enable(ethosudev->num_clks, ethosudev->clks); + if (ret) + return ret; + + ret = ethosu_reset(ethosudev); + if (!ret) + return 0; + + clk_bulk_disable_unprepare(ethosudev->num_clks, ethosudev->clks); + return ret; +} + +static int ethosu_device_suspend(struct device *dev) +{ + struct ethosu_device *ethosudev = dev_get_drvdata(dev); + + clk_bulk_disable_unprepare(ethosudev->num_clks, ethosudev->clks); + return 0; +} + +static int ethosu_sram_init(struct ethosu_device *ethosudev) +{ + ethosudev->npu_info.sram_size = 0; + + ethosudev->srampool = of_gen_pool_get(ethosudev->base.dev->of_node, "sram", 0); + if (!ethosudev->srampool) + return 0; + + ethosudev->npu_info.sram_size = gen_pool_size(ethosudev->srampool); + + ethosudev->sram = (void __iomem *)gen_pool_dma_alloc(ethosudev->srampool, + ethosudev->npu_info.sram_size, + ðosudev->sramphys); + if (!ethosudev->sram) { + dev_err(ethosudev->base.dev, "failed to allocate from SRAM pool\n"); + return -ENOMEM; + } + + return 0; +} + +static int ethosu_init(struct ethosu_device *ethosudev) +{ + int ret; + u32 id, config; + + ret = ethosu_device_resume(ethosudev->base.dev); + if (ret) + return ret; + + pm_runtime_set_autosuspend_delay(ethosudev->base.dev, 50); + pm_runtime_use_autosuspend(ethosudev->base.dev); + ret = devm_pm_runtime_set_active_enabled(ethosudev->base.dev); + if (ret) + return ret; + pm_runtime_get_noresume(ethosudev->base.dev); + + ethosudev->npu_info.id = id = readl_relaxed(ethosudev->regs + NPU_REG_ID); + ethosudev->npu_info.config = config = readl_relaxed(ethosudev->regs + NPU_REG_CONFIG); + + ethosu_sram_init(ethosudev); + + dev_info(ethosudev->base.dev, + "Ethos-U NPU, arch v%ld.%ld.%ld, rev r%ldp%ld, cmd stream ver%ld, %d MACs, %dKB SRAM\n", + FIELD_GET(ID_ARCH_MAJOR_MASK, id), + FIELD_GET(ID_ARCH_MINOR_MASK, id), + FIELD_GET(ID_ARCH_PATCH_MASK, id), + FIELD_GET(ID_VER_MAJOR_MASK, id), + FIELD_GET(ID_VER_MINOR_MASK, id), + FIELD_GET(CONFIG_CMD_STREAM_VER_MASK, config), + 1 << FIELD_GET(CONFIG_MACS_PER_CC_MASK, config), + ethosudev->npu_info.sram_size / 1024); + + return 0; +} + +static int ethosu_probe(struct platform_device *pdev) +{ + int ret; + struct ethosu_device *ethosudev; + + ethosudev = devm_drm_dev_alloc(&pdev->dev, ðosu_drm_driver, + struct ethosu_device, base); + if (IS_ERR(ethosudev)) + return -ENOMEM; + platform_set_drvdata(pdev, ethosudev); + + dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(40)); + + ethosudev->regs = devm_platform_ioremap_resource(pdev, 0); + + ethosudev->num_clks = devm_clk_bulk_get_all(&pdev->dev, ðosudev->clks); + if (ethosudev->num_clks < 0) + return ethosudev->num_clks; + + ret = ethosu_job_init(ethosudev); + if (ret) + return ret; + + ret = ethosu_init(ethosudev); + if (ret) + return ret; + + ret = drm_dev_register(ðosudev->base, 0); + if (ret) + pm_runtime_dont_use_autosuspend(ethosudev->base.dev); + + pm_runtime_put_autosuspend(ethosudev->base.dev); + return ret; +} + +static void ethosu_remove(struct platform_device *pdev) +{ + struct ethosu_device *ethosudev = dev_get_drvdata(&pdev->dev); + + drm_dev_unregister(ðosudev->base); + ethosu_job_fini(ethosudev); + if (ethosudev->sram) + gen_pool_free(ethosudev->srampool, (unsigned long)ethosudev->sram, + ethosudev->npu_info.sram_size); +} + +static const struct of_device_id dt_match[] = { + { .compatible = "arm,ethos-u65" }, + { .compatible = "arm,ethos-u85" }, + {} +}; +MODULE_DEVICE_TABLE(of, dt_match); + +static DEFINE_RUNTIME_DEV_PM_OPS(ethosu_pm_ops, + ethosu_device_suspend, + ethosu_device_resume, + NULL); + +static struct platform_driver ethosu_driver = { + .probe = ethosu_probe, + .remove = ethosu_remove, + .driver = { + .name = "ethosu", + .pm = pm_ptr(ðosu_pm_ops), + .of_match_table = dt_match, + }, +}; +module_platform_driver(ethosu_driver); + +MODULE_AUTHOR("Rob Herring "); +MODULE_DESCRIPTION("Arm Ethos-U Accel Driver"); +MODULE_LICENSE("Dual MIT/GPL"); diff --git a/drivers/accel/ethosu/ethosu_drv.h b/drivers/accel/ethosu/ethosu_drv.h new file mode 100644 index 000000000000..9e21dfe94184 --- /dev/null +++ b/drivers/accel/ethosu/ethosu_drv.h @@ -0,0 +1,15 @@ +/* SPDX-License-Identifier: GPL-2.0-only OR MIT */ +/* Copyright 2025 Arm, Ltd. */ +#ifndef __ETHOSU_DRV_H__ +#define __ETHOSU_DRV_H__ + +#include + +struct ethosu_device; + +struct ethosu_file_priv { + struct ethosu_device *edev; + struct drm_sched_entity sched_entity; +}; + +#endif diff --git a/drivers/accel/ethosu/ethosu_gem.c b/drivers/accel/ethosu/ethosu_gem.c new file mode 100644 index 000000000000..473b5f5d7514 --- /dev/null +++ b/drivers/accel/ethosu/ethosu_gem.c @@ -0,0 +1,704 @@ +// SPDX-License-Identifier: GPL-2.0-only or MIT +/* Copyright 2025 Arm, Ltd. */ + +#include +#include + +#include + +#include "ethosu_device.h" +#include "ethosu_gem.h" + +static void ethosu_gem_free_object(struct drm_gem_object *obj) +{ + struct ethosu_gem_object *bo = to_ethosu_bo(obj); + + kfree(bo->info); + drm_gem_free_mmap_offset(&bo->base.base); + drm_gem_dma_free(&bo->base); +} + +static int ethosu_gem_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma) +{ + struct ethosu_gem_object *bo = to_ethosu_bo(obj); + + /* Don't allow mmap on objects that have the NO_MMAP flag set. */ + if (bo->flags & DRM_ETHOSU_BO_NO_MMAP) + return -EINVAL; + + return drm_gem_dma_object_mmap(obj, vma); +} + +static const struct drm_gem_object_funcs ethosu_gem_funcs = { + .free = ethosu_gem_free_object, + .print_info = drm_gem_dma_object_print_info, + .get_sg_table = drm_gem_dma_object_get_sg_table, + .vmap = drm_gem_dma_object_vmap, + .mmap = ethosu_gem_mmap, + .vm_ops = &drm_gem_dma_vm_ops, +}; + +/** + * ethosu_gem_create_object - Implementation of driver->gem_create_object. + * @ddev: DRM device + * @size: Size in bytes of the memory the object will reference + * + * This lets the GEM helpers allocate object structs for us, and keep + * our BO stats correct. + */ +struct drm_gem_object *ethosu_gem_create_object(struct drm_device *ddev, size_t size) +{ + struct ethosu_gem_object *obj; + + obj = kzalloc(sizeof(*obj), GFP_KERNEL); + if (!obj) + return ERR_PTR(-ENOMEM); + + obj->base.base.funcs = ðosu_gem_funcs; + return &obj->base.base; +} + +/** + * ethosu_gem_create_with_handle() - Create a GEM object and attach it to a handle. + * @file: DRM file. + * @ddev: DRM device. + * @size: Size of the GEM object to allocate. + * @flags: Combination of drm_ethosu_bo_flags flags. + * @handle: Pointer holding the handle pointing to the new GEM object. + * + * Return: Zero on success + */ +int ethosu_gem_create_with_handle(struct drm_file *file, + struct drm_device *ddev, + u64 *size, u32 flags, u32 *handle) +{ + struct drm_gem_dma_object *mem; + struct ethosu_gem_object *bo; + int ret; + + mem = drm_gem_dma_create(ddev, *size); + if (IS_ERR(mem)) + return PTR_ERR(mem); + + bo = to_ethosu_bo(&mem->base); + bo->flags = flags; + + /* + * Allocate an id of idr table where the obj is registered + * and handle has the id what user can see. + */ + ret = drm_gem_handle_create(file, &mem->base, handle); + if (!ret) + *size = bo->base.base.size; + + /* drop reference from allocate - handle holds it now. */ + drm_gem_object_put(&mem->base); + + return ret; +} + +struct dma { + s8 region; + u64 len; + u64 offset; + s64 stride[2]; +}; + +struct dma_state { + u16 size0; + u16 size1; + s8 mode; + struct dma src; + struct dma dst; +}; + +struct buffer { + u64 base; + u32 length; + s8 region; +}; + +struct feat_matrix { + u64 base[4]; + s64 stride_x; + s64 stride_y; + s64 stride_c; + s8 region; + u8 broadcast; + u16 stride_kernel; + u16 precision; + u16 depth; + u16 width; + u16 width0; + u16 height[3]; + u8 pad_top; + u8 pad_left; + u8 pad_bottom; + u8 pad_right; +}; + +struct cmd_state { + struct dma_state dma; + struct buffer scale[2]; + struct buffer weight[4]; + struct feat_matrix ofm; + struct feat_matrix ifm; + struct feat_matrix ifm2; +}; + +static void cmd_state_init(struct cmd_state *st) +{ + /* Initialize to all 1s to detect missing setup */ + memset(st, 0xff, sizeof(*st)); +} + +static u64 cmd_to_addr(u32 *cmd) +{ + return ((u64)((cmd[0] & 0xff0000) << 16)) | cmd[1]; +} + +static u64 dma_length(struct ethosu_validated_cmdstream_info *info, + struct dma_state *dma_st, struct dma *dma) +{ + s8 mode = dma_st->mode; + u64 len = dma->len; + + if (mode >= 1) { + len += dma->stride[0]; + len *= dma_st->size0; + } + if (mode == 2) { + len += dma->stride[1]; + len *= dma_st->size1; + } + if (dma->region >= 0) + info->region_size[dma->region] = max(info->region_size[dma->region], + len + dma->offset); + + return len; +} + +static u64 feat_matrix_length(struct ethosu_validated_cmdstream_info *info, + struct feat_matrix *fm, + u32 x, u32 y, u32 c) +{ + u32 element_size, storage = fm->precision >> 14; + int tile = 0; + u64 addr; + + if (fm->region < 0) + return U64_MAX; + + switch (storage) { + case 0: + if (x >= fm->width0 + 1) { + x -= fm->width0 + 1; + tile += 1; + } + if (y >= fm->height[tile] + 1) { + y -= fm->height[tile] + 1; + tile += 2; + } + break; + case 1: + if (y >= fm->height[1] + 1) { + y -= fm->height[1] + 1; + tile = 2; + } else if (y >= fm->height[0] + 1) { + y -= fm->height[0] + 1; + tile = 1; + } + break; + } + if (fm->base[tile] == U64_MAX) + return U64_MAX; + + addr = fm->base[tile] + y * fm->stride_y; + + switch ((fm->precision >> 6) & 0x3) { // format + case 0: //nhwc: + addr += x * fm->stride_x + c; + break; + case 1: //nhcwb16: + element_size = BIT((fm->precision >> 1) & 0x3); + + addr += (c / 16) * fm->stride_c + (16 * x + (c & 0xf)) * element_size; + break; + } + + info->region_size[fm->region] = max(info->region_size[fm->region], addr + 1); + + return addr; +} + +static int calc_sizes(struct drm_device *ddev, + struct ethosu_validated_cmdstream_info *info, + u16 op, struct cmd_state *st, + bool ifm, bool ifm2, bool weight, bool scale) +{ + u64 len; + + if (ifm) { + if (st->ifm.stride_kernel == U16_MAX) + return -EINVAL; + u32 stride_y = ((st->ifm.stride_kernel >> 8) & 0x2) + + ((st->ifm.stride_kernel >> 1) & 0x1) + 1; + u32 stride_x = ((st->ifm.stride_kernel >> 5) & 0x2) + + (st->ifm.stride_kernel & 0x1) + 1; + u32 ifm_height = st->ofm.height[2] * stride_y + + st->ifm.height[2] - (st->ifm.pad_top + st->ifm.pad_bottom); + u32 ifm_width = st->ofm.width * stride_x + + st->ifm.width - (st->ifm.pad_left + st->ifm.pad_right); + + len = feat_matrix_length(info, &st->ifm, ifm_width, + ifm_height, st->ifm.depth); + dev_dbg(ddev->dev, "op %d: IFM:%d:0x%llx-0x%llx\n", + op, st->ifm.region, st->ifm.base[0], len); + if (len == U64_MAX) + return -EINVAL; + } + + if (ifm2) { + len = feat_matrix_length(info, &st->ifm2, st->ifm.depth, + 0, st->ofm.depth); + dev_dbg(ddev->dev, "op %d: IFM2:%d:0x%llx-0x%llx\n", + op, st->ifm2.region, st->ifm2.base[0], len); + if (len == U64_MAX) + return -EINVAL; + } + + if (weight) { + dev_dbg(ddev->dev, "op %d: W:%d:0x%llx-0x%llx\n", + op, st->weight[0].region, st->weight[0].base, + st->weight[0].base + st->weight[0].length - 1); + if (st->weight[0].region < 0 || st->weight[0].base == U64_MAX || + st->weight[0].length == U32_MAX) + return -EINVAL; + info->region_size[st->weight[0].region] = + max(info->region_size[st->weight[0].region], + st->weight[0].base + st->weight[0].length); + } + + if (scale) { + dev_dbg(ddev->dev, "op %d: S:%d:0x%llx-0x%llx\n", + op, st->scale[0].region, st->scale[0].base, + st->scale[0].base + st->scale[0].length - 1); + if (st->scale[0].region < 0 || st->scale[0].base == U64_MAX || + st->scale[0].length == U32_MAX) + return -EINVAL; + info->region_size[st->scale[0].region] = + max(info->region_size[st->scale[0].region], + st->scale[0].base + st->scale[0].length); + } + + len = feat_matrix_length(info, &st->ofm, st->ofm.width, + st->ofm.height[2], st->ofm.depth); + dev_dbg(ddev->dev, "op %d: OFM:%d:0x%llx-0x%llx\n", + op, st->ofm.region, st->ofm.base[0], len); + if (len == U64_MAX) + return -EINVAL; + info->output_region[st->ofm.region] = true; + + return 0; +} + +static int calc_sizes_elemwise(struct drm_device *ddev, + struct ethosu_validated_cmdstream_info *info, + u16 op, struct cmd_state *st, + bool ifm, bool ifm2) +{ + u32 height, width, depth; + u64 len; + + if (ifm) { + height = st->ifm.broadcast & 0x1 ? 0 : st->ofm.height[2]; + width = st->ifm.broadcast & 0x2 ? 0 : st->ofm.width; + depth = st->ifm.broadcast & 0x4 ? 0 : st->ofm.depth; + + len = feat_matrix_length(info, &st->ifm, width, + height, depth); + dev_dbg(ddev->dev, "op %d: IFM:%d:0x%llx-0x%llx\n", + op, st->ifm.region, st->ifm.base[0], len); + if (len == U64_MAX) + return -EINVAL; + } + + if (ifm2) { + height = st->ifm2.broadcast & 0x1 ? 0 : st->ofm.height[2]; + width = st->ifm2.broadcast & 0x2 ? 0 : st->ofm.width; + depth = st->ifm2.broadcast & 0x4 ? 0 : st->ofm.depth; + + len = feat_matrix_length(info, &st->ifm2, width, + height, depth); + dev_dbg(ddev->dev, "op %d: IFM2:%d:0x%llx-0x%llx\n", + op, st->ifm2.region, st->ifm2.base[0], len); + if (len == U64_MAX) + return -EINVAL; + } + + len = feat_matrix_length(info, &st->ofm, st->ofm.width, + st->ofm.height[2], st->ofm.depth); + dev_dbg(ddev->dev, "op %d: OFM:%d:0x%llx-0x%llx\n", + op, st->ofm.region, st->ofm.base[0], len); + if (len == U64_MAX) + return -EINVAL; + info->output_region[st->ofm.region] = true; + + return 0; +} + +static int ethosu_gem_cmdstream_copy_and_validate(struct drm_device *ddev, + u32 __user *ucmds, + struct ethosu_gem_object *bo, + u32 size) +{ + struct ethosu_validated_cmdstream_info __free(kfree) *info = kzalloc(sizeof(*info), GFP_KERNEL); + struct ethosu_device *edev = to_ethosu_device(ddev); + u32 *bocmds = bo->base.vaddr; + struct cmd_state st; + int i, ret; + + if (!info) + return -ENOMEM; + info->cmd_size = size; + + cmd_state_init(&st); + + for (i = 0; i < size / 4; i++) { + bool use_ifm, use_ifm2, use_scale; + u64 dstlen, srclen; + u16 cmd, param; + u32 cmds[2]; + u64 addr; + + if (get_user(cmds[0], ucmds++)) + return -EFAULT; + + bocmds[i] = cmds[0]; + + cmd = cmds[0]; + param = cmds[0] >> 16; + + if (cmd & 0x4000) { + if (get_user(cmds[1], ucmds++)) + return -EFAULT; + + i++; + bocmds[i] = cmds[1]; + addr = cmd_to_addr(cmds); + } + + switch (cmd) { + case NPU_OP_DMA_START: + srclen = dma_length(info, &st.dma, &st.dma.src); + dstlen = dma_length(info, &st.dma, &st.dma.dst); + + if (st.dma.dst.region >= 0) + info->output_region[st.dma.dst.region] = true; + dev_dbg(ddev->dev, "cmd: DMA SRC:%d:0x%llx+0x%llx DST:%d:0x%llx+0x%llx\n", + st.dma.src.region, st.dma.src.offset, srclen, + st.dma.dst.region, st.dma.dst.offset, dstlen); + break; + case NPU_OP_CONV: + case NPU_OP_DEPTHWISE: + use_ifm2 = param & 0x1; // weights_ifm2 + use_scale = !(st.ofm.precision & 0x100); + ret = calc_sizes(ddev, info, cmd, &st, true, use_ifm2, + !use_ifm2, use_scale); + if (ret) + return ret; + break; + case NPU_OP_POOL: + use_ifm = param != 0x4; // pooling mode + use_scale = !(st.ofm.precision & 0x100); + ret = calc_sizes(ddev, info, cmd, &st, use_ifm, false, + false, use_scale); + if (ret) + return ret; + break; + case NPU_OP_ELEMENTWISE: + use_ifm2 = !((st.ifm2.broadcast == 8) || (param == 5) || + (param == 6) || (param == 7) || (param == 0x24)); + use_ifm = st.ifm.broadcast != 8; + ret = calc_sizes_elemwise(ddev, info, cmd, &st, use_ifm, use_ifm2); + if (ret) + return ret; + break; + case NPU_OP_RESIZE: // U85 only + WARN_ON(1); // TODO + break; + case NPU_SET_KERNEL_WIDTH_M1: + st.ifm.width = param; + break; + case NPU_SET_KERNEL_HEIGHT_M1: + st.ifm.height[2] = param; + break; + case NPU_SET_KERNEL_STRIDE: + st.ifm.stride_kernel = param; + break; + case NPU_SET_IFM_PAD_TOP: + st.ifm.pad_top = param & 0x7f; + break; + case NPU_SET_IFM_PAD_LEFT: + st.ifm.pad_left = param & 0x7f; + break; + case NPU_SET_IFM_PAD_RIGHT: + st.ifm.pad_right = param & 0xff; + break; + case NPU_SET_IFM_PAD_BOTTOM: + st.ifm.pad_bottom = param & 0xff; + break; + case NPU_SET_IFM_DEPTH_M1: + st.ifm.depth = param; + break; + case NPU_SET_IFM_PRECISION: + st.ifm.precision = param; + break; + case NPU_SET_IFM_BROADCAST: + st.ifm.broadcast = param; + break; + case NPU_SET_IFM_REGION: + st.ifm.region = param & 0x7f; + break; + case NPU_SET_IFM_WIDTH0_M1: + st.ifm.width0 = param; + break; + case NPU_SET_IFM_HEIGHT0_M1: + st.ifm.height[0] = param; + break; + case NPU_SET_IFM_HEIGHT1_M1: + st.ifm.height[1] = param; + break; + case NPU_SET_IFM_BASE0: + case NPU_SET_IFM_BASE1: + case NPU_SET_IFM_BASE2: + case NPU_SET_IFM_BASE3: + st.ifm.base[cmd & 0x3] = addr; + break; + case NPU_SET_IFM_STRIDE_X: + st.ifm.stride_x = addr; + break; + case NPU_SET_IFM_STRIDE_Y: + st.ifm.stride_y = addr; + break; + case NPU_SET_IFM_STRIDE_C: + st.ifm.stride_c = addr; + break; + + case NPU_SET_OFM_WIDTH_M1: + st.ofm.width = param; + break; + case NPU_SET_OFM_HEIGHT_M1: + st.ofm.height[2] = param; + break; + case NPU_SET_OFM_DEPTH_M1: + st.ofm.depth = param; + break; + case NPU_SET_OFM_PRECISION: + st.ofm.precision = param; + break; + case NPU_SET_OFM_REGION: + st.ofm.region = param & 0x7; + break; + case NPU_SET_OFM_WIDTH0_M1: + st.ofm.width0 = param; + break; + case NPU_SET_OFM_HEIGHT0_M1: + st.ofm.height[0] = param; + break; + case NPU_SET_OFM_HEIGHT1_M1: + st.ofm.height[1] = param; + break; + case NPU_SET_OFM_BASE0: + case NPU_SET_OFM_BASE1: + case NPU_SET_OFM_BASE2: + case NPU_SET_OFM_BASE3: + st.ofm.base[cmd & 0x3] = addr; + break; + case NPU_SET_OFM_STRIDE_X: + st.ofm.stride_x = addr; + break; + case NPU_SET_OFM_STRIDE_Y: + st.ofm.stride_y = addr; + break; + case NPU_SET_OFM_STRIDE_C: + st.ofm.stride_c = addr; + break; + + case NPU_SET_IFM2_BROADCAST: + st.ifm2.broadcast = param; + break; + case NPU_SET_IFM2_PRECISION: + st.ifm2.precision = param; + break; + case NPU_SET_IFM2_REGION: + st.ifm2.region = param & 0x7; + break; + case NPU_SET_IFM2_WIDTH0_M1: + st.ifm2.width0 = param; + break; + case NPU_SET_IFM2_HEIGHT0_M1: + st.ifm2.height[0] = param; + break; + case NPU_SET_IFM2_HEIGHT1_M1: + st.ifm2.height[1] = param; + break; + case NPU_SET_IFM2_BASE0: + case NPU_SET_IFM2_BASE1: + case NPU_SET_IFM2_BASE2: + case NPU_SET_IFM2_BASE3: + st.ifm2.base[cmd & 0x3] = addr; + break; + case NPU_SET_IFM2_STRIDE_X: + st.ifm2.stride_x = addr; + break; + case NPU_SET_IFM2_STRIDE_Y: + st.ifm2.stride_y = addr; + break; + case NPU_SET_IFM2_STRIDE_C: + st.ifm2.stride_c = addr; + break; + + case NPU_SET_WEIGHT_REGION: + st.weight[0].region = param & 0x7; + break; + case NPU_SET_SCALE_REGION: + st.scale[0].region = param & 0x7; + break; + case NPU_SET_WEIGHT_BASE: + st.weight[0].base = addr; + break; + case NPU_SET_WEIGHT_LENGTH: + st.weight[0].length = cmds[1]; + break; + case NPU_SET_SCALE_BASE: + st.scale[0].base = addr; + break; + case NPU_SET_SCALE_LENGTH: + st.scale[0].length = cmds[1]; + break; + case NPU_SET_WEIGHT1_BASE: + st.weight[1].base = addr; + break; + case NPU_SET_WEIGHT1_LENGTH: + st.weight[1].length = cmds[1]; + break; + case NPU_SET_SCALE1_BASE: // NPU_SET_WEIGHT2_BASE (U85) + if (ethosu_is_u65(edev)) + st.scale[1].base = addr; + else + st.weight[2].base = addr; + break; + case NPU_SET_SCALE1_LENGTH: // NPU_SET_WEIGHT2_LENGTH (U85) + if (ethosu_is_u65(edev)) + st.scale[1].length = cmds[1]; + else + st.weight[1].length = cmds[1]; + break; + case NPU_SET_WEIGHT3_BASE: + st.weight[3].base = addr; + break; + case NPU_SET_WEIGHT3_LENGTH: + st.weight[3].length = cmds[1]; + break; + + case NPU_SET_DMA0_SRC_REGION: + if (param & 0x100) + st.dma.src.region = -1; + else + st.dma.src.region = param & 0x7; + st.dma.mode = (param >> 9) & 0x3; + break; + case NPU_SET_DMA0_DST_REGION: + if (param & 0x100) + st.dma.dst.region = -1; + else + st.dma.dst.region = param & 0x7; + break; + case NPU_SET_DMA0_SIZE0: + st.dma.size0 = param; + break; + case NPU_SET_DMA0_SIZE1: + st.dma.size1 = param; + break; + case NPU_SET_DMA0_SRC_STRIDE0: + st.dma.src.stride[0] = ((s64)addr << 24) >> 24; + break; + case NPU_SET_DMA0_SRC_STRIDE1: + st.dma.src.stride[1] = ((s64)addr << 24) >> 24; + break; + case NPU_SET_DMA0_DST_STRIDE0: + st.dma.dst.stride[0] = ((s64)addr << 24) >> 24; + break; + case NPU_SET_DMA0_DST_STRIDE1: + st.dma.dst.stride[1] = ((s64)addr << 24) >> 24; + break; + case NPU_SET_DMA0_SRC: + st.dma.src.offset = addr; + break; + case NPU_SET_DMA0_DST: + st.dma.dst.offset = addr; + break; + case NPU_SET_DMA0_LEN: + st.dma.src.len = st.dma.dst.len = addr; + break; + default: + break; + } + } + + for (i = 0; i < NPU_BASEP_REGION_MAX; i++) { + if (!info->region_size[i]) + continue; + dev_dbg(ddev->dev, "region %d max size: 0x%llx\n", + i, info->region_size[i]); + } + + bo->info = no_free_ptr(info); + return 0; +} + +/** + * ethosu_gem_cmdstream_create() - Create a GEM object and attach it to a handle. + * @file: DRM file. + * @ddev: DRM device. + * @exclusive_vm: Exclusive VM. Not NULL if the GEM object can't be shared. + * @size: Size of the GEM object to allocate. + * @flags: Combination of drm_ethosu_bo_flags flags. + * @handle: Pointer holding the handle pointing to the new GEM object. + * + * Return: Zero on success + */ +int ethosu_gem_cmdstream_create(struct drm_file *file, + struct drm_device *ddev, + u32 size, u64 data, u32 flags, u32 *handle) +{ + int ret; + struct drm_gem_dma_object *mem; + struct ethosu_gem_object *bo; + + mem = drm_gem_dma_create(ddev, size); + if (IS_ERR(mem)) + return PTR_ERR(mem); + + bo = to_ethosu_bo(&mem->base); + bo->flags = flags; + + ret = ethosu_gem_cmdstream_copy_and_validate(ddev, + (void __user *)(uintptr_t)data, + bo, size); + if (ret) + goto fail; + + /* + * Allocate an id of idr table where the obj is registered + * and handle has the id what user can see. + */ + ret = drm_gem_handle_create(file, &mem->base, handle); + +fail: + /* drop reference from allocate - handle holds it now. */ + drm_gem_object_put(&mem->base); + + return ret; +} diff --git a/drivers/accel/ethosu/ethosu_gem.h b/drivers/accel/ethosu/ethosu_gem.h new file mode 100644 index 000000000000..3922895a60fb --- /dev/null +++ b/drivers/accel/ethosu/ethosu_gem.h @@ -0,0 +1,46 @@ +/* SPDX-License-Identifier: GPL-2.0 or MIT */ +/* Copyright 2025 Arm, Ltd. */ + +#ifndef __ETHOSU_GEM_H__ +#define __ETHOSU_GEM_H__ + +#include "ethosu_device.h" +#include + +struct ethosu_validated_cmdstream_info { + u32 cmd_size; + u64 region_size[NPU_BASEP_REGION_MAX]; + bool output_region[NPU_BASEP_REGION_MAX]; +}; + +/** + * struct ethosu_gem_object - Driver specific GEM object. + */ +struct ethosu_gem_object { + /** @base: Inherit from drm_gem_shmem_object. */ + struct drm_gem_dma_object base; + + struct ethosu_validated_cmdstream_info *info; + + /** @flags: Combination of drm_ethosu_bo_flags flags. */ + u32 flags; +}; + +static inline +struct ethosu_gem_object *to_ethosu_bo(struct drm_gem_object *obj) +{ + return container_of(to_drm_gem_dma_obj(obj), struct ethosu_gem_object, base); +} + +struct drm_gem_object *ethosu_gem_create_object(struct drm_device *ddev, + size_t size); + +int ethosu_gem_create_with_handle(struct drm_file *file, + struct drm_device *ddev, + u64 *size, u32 flags, uint32_t *handle); + +int ethosu_gem_cmdstream_create(struct drm_file *file, + struct drm_device *ddev, + u32 size, u64 data, u32 flags, u32 *handle); + +#endif /* __ETHOSU_GEM_H__ */ diff --git a/drivers/accel/ethosu/ethosu_job.c b/drivers/accel/ethosu/ethosu_job.c new file mode 100644 index 000000000000..26e7a2f64d71 --- /dev/null +++ b/drivers/accel/ethosu/ethosu_job.c @@ -0,0 +1,497 @@ +// SPDX-License-Identifier: GPL-2.0-only OR MIT +/* Copyright 2024-2025 Tomeu Vizoso */ +/* Copyright 2025 Arm, Ltd. */ + +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +#include "ethosu_device.h" +#include "ethosu_drv.h" +#include "ethosu_gem.h" +#include "ethosu_job.h" + +#define JOB_TIMEOUT_MS 500 + +static struct ethosu_job *to_ethosu_job(struct drm_sched_job *sched_job) +{ + return container_of(sched_job, struct ethosu_job, base); +} + +static const char *ethosu_fence_get_driver_name(struct dma_fence *fence) +{ + return "ethosu"; +} + +static const char *ethosu_fence_get_timeline_name(struct dma_fence *fence) +{ + return "ethosu-npu"; +} + +static const struct dma_fence_ops ethosu_fence_ops = { + .get_driver_name = ethosu_fence_get_driver_name, + .get_timeline_name = ethosu_fence_get_timeline_name, +}; + +static void ethosu_job_hw_submit(struct ethosu_device *dev, struct ethosu_job *job) +{ + struct drm_gem_dma_object *cmd_bo = to_drm_gem_dma_obj(job->cmd_bo); + struct ethosu_validated_cmdstream_info *cmd_info = to_ethosu_bo(job->cmd_bo)->info; + + for (int i = 0; i < job->region_cnt; i++) { + struct drm_gem_dma_object *bo; + int region = job->region_bo_num[i]; + + bo = to_drm_gem_dma_obj(job->region_bo[i]); + writel_relaxed(lower_32_bits(bo->dma_addr), dev->regs + NPU_REG_BASEP(region)); + writel_relaxed(upper_32_bits(bo->dma_addr), dev->regs + NPU_REG_BASEP_HI(region)); + dev_dbg(dev->base.dev, "Region %d base addr = %pad\n", region, &bo->dma_addr); + } + + if (job->sram_size) { + writel_relaxed(lower_32_bits(dev->sramphys), + dev->regs + NPU_REG_BASEP(ETHOSU_SRAM_REGION)); + writel_relaxed(upper_32_bits(dev->sramphys), + dev->regs + NPU_REG_BASEP_HI(ETHOSU_SRAM_REGION)); + dev_dbg(dev->base.dev, "Region %d base addr = %pad (SRAM)\n", + ETHOSU_SRAM_REGION, &dev->sramphys); + } + + writel_relaxed(lower_32_bits(cmd_bo->dma_addr), dev->regs + NPU_REG_QBASE); + writel_relaxed(upper_32_bits(cmd_bo->dma_addr), dev->regs + NPU_REG_QBASE_HI); + writel_relaxed(cmd_info->cmd_size, dev->regs + NPU_REG_QSIZE); + + writel(CMD_TRANSITION_TO_RUN, dev->regs + NPU_REG_CMD); + + dev_dbg(dev->base.dev, + "Submitted cmd at %pad to core\n", &cmd_bo->dma_addr); +} + +static int ethosu_acquire_object_fences(struct ethosu_job *job) +{ + int i, ret; + struct drm_gem_object **bos = job->region_bo; + struct ethosu_validated_cmdstream_info *info = to_ethosu_bo(job->cmd_bo)->info; + + for (i = 0; i < job->region_cnt; i++) { + bool is_write; + + if (!bos[i]) + break; + + ret = dma_resv_reserve_fences(bos[i]->resv, 1); + if (ret) + return ret; + + is_write = info->output_region[job->region_bo_num[i]]; + ret = drm_sched_job_add_implicit_dependencies(&job->base, bos[i], + is_write); + if (ret) + return ret; + } + + return 0; +} + +static void ethosu_attach_object_fences(struct ethosu_job *job) +{ + int i; + struct dma_fence *fence = job->inference_done_fence; + struct drm_gem_object **bos = job->region_bo; + struct ethosu_validated_cmdstream_info *info = to_ethosu_bo(job->cmd_bo)->info; + + for (i = 0; i < job->region_cnt; i++) + if (info->output_region[job->region_bo_num[i]]) + dma_resv_add_fence(bos[i]->resv, fence, DMA_RESV_USAGE_WRITE); +} + +static int ethosu_job_push(struct ethosu_job *job) +{ + struct ww_acquire_ctx acquire_ctx; + int ret; + + ret = drm_gem_lock_reservations(job->region_bo, job->region_cnt, &acquire_ctx); + if (ret) + return ret; + + ret = ethosu_acquire_object_fences(job); + if (ret) + goto out; + + ret = pm_runtime_resume_and_get(job->dev->base.dev); + if (!ret) { + guard(mutex)(&job->dev->sched_lock); + + drm_sched_job_arm(&job->base); + job->inference_done_fence = dma_fence_get(&job->base.s_fence->finished); + kref_get(&job->refcount); /* put by scheduler job completion */ + drm_sched_entity_push_job(&job->base); + ethosu_attach_object_fences(job); + } + +out: + drm_gem_unlock_reservations(job->region_bo, job->region_cnt, &acquire_ctx); + return ret; +} + +static void ethosu_job_cleanup(struct kref *ref) +{ + struct ethosu_job *job = container_of(ref, struct ethosu_job, + refcount); + unsigned int i; + + pm_runtime_put_autosuspend(job->dev->base.dev); + + dma_fence_put(job->done_fence); + dma_fence_put(job->inference_done_fence); + + for (i = 0; i < job->region_cnt; i++) + drm_gem_object_put(job->region_bo[i]); + + drm_gem_object_put(job->cmd_bo); + + kfree(job); +} + +static void ethosu_job_put(struct ethosu_job *job) +{ + kref_put(&job->refcount, ethosu_job_cleanup); +} + +static void ethosu_job_free(struct drm_sched_job *sched_job) +{ + struct ethosu_job *job = to_ethosu_job(sched_job); + + drm_sched_job_cleanup(sched_job); + ethosu_job_put(job); +} + +static struct dma_fence *ethosu_job_run(struct drm_sched_job *sched_job) +{ + struct ethosu_job *job = to_ethosu_job(sched_job); + struct ethosu_device *dev = job->dev; + struct dma_fence *fence = job->done_fence; + + if (unlikely(job->base.s_fence->finished.error)) + return NULL; + + dma_fence_init(fence, ðosu_fence_ops, &dev->fence_lock, + dev->fence_context, ++dev->emit_seqno); + dma_fence_get(fence); + + scoped_guard(mutex, &dev->job_lock) { + dev->in_flight_job = job; + ethosu_job_hw_submit(dev, job); + } + + return fence; +} + +static void ethosu_job_handle_irq(struct ethosu_device *dev) +{ + u32 status = readl_relaxed(dev->regs + NPU_REG_STATUS); + + if (status & (STATUS_BUS_STATUS | STATUS_CMD_PARSE_ERR)) { + dev_err(dev->base.dev, "Error IRQ - %x\n", status); + drm_sched_fault(&dev->sched); + return; + } + + scoped_guard(mutex, &dev->job_lock) { + if (dev->in_flight_job) { + dma_fence_signal(dev->in_flight_job->done_fence); + dev->in_flight_job = NULL; + } + } +} + +static irqreturn_t ethosu_job_irq_handler_thread(int irq, void *data) +{ + struct ethosu_device *dev = data; + + ethosu_job_handle_irq(dev); + + return IRQ_HANDLED; +} + +static irqreturn_t ethosu_job_irq_handler(int irq, void *data) +{ + struct ethosu_device *dev = data; + u32 status = readl_relaxed(dev->regs + NPU_REG_STATUS); + + if (!(status & STATUS_IRQ_RAISED)) + return IRQ_NONE; + + writel_relaxed(CMD_CLEAR_IRQ, dev->regs + NPU_REG_CMD); + return IRQ_WAKE_THREAD; +} + +static enum drm_gpu_sched_stat ethosu_job_timedout(struct drm_sched_job *bad) +{ + struct ethosu_job *job = to_ethosu_job(bad); + struct ethosu_device *dev = job->dev; + bool running; + u32 *bocmds = to_drm_gem_dma_obj(job->cmd_bo)->vaddr; + u32 cmdaddr; + + cmdaddr = readl_relaxed(dev->regs + NPU_REG_QREAD); + running = FIELD_GET(STATUS_STATE_RUNNING, readl_relaxed(dev->regs + NPU_REG_STATUS)); + + if (running) { + int ret; + u32 reg; + + ret = readl_relaxed_poll_timeout(dev->regs + NPU_REG_QREAD, + reg, + reg != cmdaddr, + USEC_PER_MSEC, 100 * USEC_PER_MSEC); + + /* If still running and progress is being made, just return */ + if (!ret) + return DRM_GPU_SCHED_STAT_NO_HANG; + } + + dev_err(dev->base.dev, "NPU sched timed out: NPU %s, cmdstream offset 0x%x: 0x%x\n", + running ? "running" : "stopped", + cmdaddr, bocmds[cmdaddr / 4]); + + drm_sched_stop(&dev->sched, bad); + + scoped_guard(mutex, &dev->job_lock) + dev->in_flight_job = NULL; + + /* Proceed with reset now. */ + pm_runtime_force_suspend(dev->base.dev); + pm_runtime_force_resume(dev->base.dev); + + /* Restart the scheduler */ + drm_sched_start(&dev->sched, 0); + + return DRM_GPU_SCHED_STAT_RESET; +} + +static const struct drm_sched_backend_ops ethosu_sched_ops = { + .run_job = ethosu_job_run, + .timedout_job = ethosu_job_timedout, + .free_job = ethosu_job_free +}; + +int ethosu_job_init(struct ethosu_device *edev) +{ + struct device *dev = edev->base.dev; + struct drm_sched_init_args args = { + .ops = ðosu_sched_ops, + .num_rqs = DRM_SCHED_PRIORITY_COUNT, + .credit_limit = 1, + .timeout = msecs_to_jiffies(JOB_TIMEOUT_MS), + .name = dev_name(dev), + .dev = dev, + }; + int ret; + + spin_lock_init(&edev->fence_lock); + ret = devm_mutex_init(dev, &edev->job_lock); + if (ret) + return ret; + ret = devm_mutex_init(dev, &edev->sched_lock); + if (ret) + return ret; + + edev->irq = platform_get_irq(to_platform_device(dev), 0); + if (edev->irq < 0) + return edev->irq; + + ret = devm_request_threaded_irq(dev, edev->irq, + ethosu_job_irq_handler, + ethosu_job_irq_handler_thread, + IRQF_SHARED, KBUILD_MODNAME, + edev); + if (ret) { + dev_err(dev, "failed to request irq\n"); + return ret; + } + + edev->fence_context = dma_fence_context_alloc(1); + + ret = drm_sched_init(&edev->sched, &args); + if (ret) { + dev_err(dev, "Failed to create scheduler: %d\n", ret); + goto err_sched; + } + + return 0; + +err_sched: + drm_sched_fini(&edev->sched); + return ret; +} + +void ethosu_job_fini(struct ethosu_device *dev) +{ + drm_sched_fini(&dev->sched); +} + +int ethosu_job_open(struct ethosu_file_priv *ethosu_priv) +{ + struct ethosu_device *dev = ethosu_priv->edev; + struct drm_gpu_scheduler *sched = &dev->sched; + int ret; + + ret = drm_sched_entity_init(ðosu_priv->sched_entity, + DRM_SCHED_PRIORITY_NORMAL, + &sched, 1, NULL); + return WARN_ON(ret); +} + +void ethosu_job_close(struct ethosu_file_priv *ethosu_priv) +{ + struct drm_sched_entity *entity = ðosu_priv->sched_entity; + + drm_sched_entity_destroy(entity); +} + +static int ethosu_ioctl_submit_job(struct drm_device *dev, struct drm_file *file, + struct drm_ethosu_job *job) +{ + struct ethosu_device *edev = to_ethosu_device(dev); + struct ethosu_file_priv *file_priv = file->driver_priv; + struct ethosu_job *ejob = NULL; + struct ethosu_validated_cmdstream_info *cmd_info; + int ret = 0; + + /* BO region 2 is reserved if SRAM is used */ + if (job->region_bo_handles[ETHOSU_SRAM_REGION] && job->sram_size) + return -EINVAL; + + if (edev->npu_info.sram_size < job->sram_size) + return -EINVAL; + + ejob = kzalloc(sizeof(*ejob), GFP_KERNEL); + if (!ejob) + return -ENOMEM; + + kref_init(&ejob->refcount); + + ejob->dev = edev; + ejob->sram_size = job->sram_size; + + ejob->done_fence = kzalloc(sizeof(*ejob->done_fence), GFP_KERNEL); + if (!ejob->done_fence) { + ret = -ENOMEM; + goto out_cleanup_job; + } + + ret = drm_sched_job_init(&ejob->base, + &file_priv->sched_entity, + 1, NULL, file->client_id); + if (ret) + goto out_put_job; + + ejob->cmd_bo = drm_gem_object_lookup(file, job->cmd_bo); + if (!ejob->cmd_bo) { + ret = -ENOENT; + goto out_cleanup_job; + } + cmd_info = to_ethosu_bo(ejob->cmd_bo)->info; + if (!cmd_info) { + ret = -EINVAL; + goto out_cleanup_job; + } + + for (int i = 0; i < NPU_BASEP_REGION_MAX; i++) { + struct drm_gem_object *gem; + + /* Can only omit a BO handle if the region is not used or used for SRAM */ + if (!job->region_bo_handles[i] && + (!cmd_info->region_size[i] || (i == ETHOSU_SRAM_REGION && job->sram_size))) + continue; + + if (job->region_bo_handles[i] && !cmd_info->region_size[i]) { + dev_err(dev->dev, + "Cmdstream BO handle %d set for unused region %d\n", + job->region_bo_handles[i], i); + ret = -EINVAL; + goto out_cleanup_job; + } + + gem = drm_gem_object_lookup(file, job->region_bo_handles[i]); + if (!gem) { + dev_err(dev->dev, + "Invalid BO handle %d for region %d\n", + job->region_bo_handles[i], i); + ret = -ENOENT; + goto out_cleanup_job; + } + + ejob->region_bo[ejob->region_cnt] = gem; + ejob->region_bo_num[ejob->region_cnt] = i; + ejob->region_cnt++; + + if (to_ethosu_bo(gem)->info) { + dev_err(dev->dev, + "Cmdstream BO handle %d used for region %d\n", + job->region_bo_handles[i], i); + ret = -EINVAL; + goto out_cleanup_job; + } + + /* Verify the command stream doesn't have accesses outside the BO */ + if (cmd_info->region_size[i] > gem->size) { + dev_err(dev->dev, + "cmd stream region %d size greater than BO size (%llu > %zu)\n", + i, cmd_info->region_size[i], gem->size); + ret = -EOVERFLOW; + goto out_cleanup_job; + } + } + ret = ethosu_job_push(ejob); + +out_cleanup_job: + if (ret) + drm_sched_job_cleanup(&ejob->base); +out_put_job: + ethosu_job_put(ejob); + + return ret; +} + +int ethosu_ioctl_submit(struct drm_device *dev, void *data, struct drm_file *file) +{ + struct drm_ethosu_submit *args = data; + int ret = 0; + unsigned int i = 0; + + if (args->pad) { + drm_dbg(dev, "Reserved field in drm_ethosu_submit struct should be 0.\n"); + return -EINVAL; + } + + struct drm_ethosu_job __free(kvfree) *jobs = + kvmalloc_array(args->job_count, sizeof(*jobs), GFP_KERNEL); + if (!jobs) + return -ENOMEM; + + if (copy_from_user(jobs, + (void __user *)(uintptr_t)args->jobs, + args->job_count * sizeof(*jobs))) { + drm_dbg(dev, "Failed to copy incoming job array\n"); + return -EFAULT; + } + + for (i = 0; i < args->job_count; i++) { + ret = ethosu_ioctl_submit_job(dev, file, &jobs[i]); + if (ret) + return ret; + } + + return 0; +} diff --git a/drivers/accel/ethosu/ethosu_job.h b/drivers/accel/ethosu/ethosu_job.h new file mode 100644 index 000000000000..ff1cf448d094 --- /dev/null +++ b/drivers/accel/ethosu/ethosu_job.h @@ -0,0 +1,40 @@ +/* SPDX-License-Identifier: GPL-2.0-only OR MIT */ +/* Copyright 2024-2025 Tomeu Vizoso */ +/* Copyright 2025 Arm, Ltd. */ + +#ifndef __ETHOSU_JOB_H__ +#define __ETHOSU_JOB_H__ + +#include +#include + +struct ethosu_device; +struct ethosu_file_priv; + +struct ethosu_job { + struct drm_sched_job base; + struct ethosu_device *dev; + + struct drm_gem_object *cmd_bo; + struct drm_gem_object *region_bo[NPU_BASEP_REGION_MAX]; + u8 region_bo_num[NPU_BASEP_REGION_MAX]; + u8 region_cnt; + u32 sram_size; + + /* Fence to be signaled by drm-sched once its done with the job */ + struct dma_fence *inference_done_fence; + + /* Fence to be signaled by IRQ handler when the job is complete. */ + struct dma_fence *done_fence; + + struct kref refcount; +}; + +int ethosu_ioctl_submit(struct drm_device *dev, void *data, struct drm_file *file); + +int ethosu_job_init(struct ethosu_device *dev); +void ethosu_job_fini(struct ethosu_device *dev); +int ethosu_job_open(struct ethosu_file_priv *ethosu_priv); +void ethosu_job_close(struct ethosu_file_priv *ethosu_priv); + +#endif diff --git a/drivers/accel/ivpu/Makefile b/drivers/accel/ivpu/Makefile index 1029e0bab061..dbf76b8a5b4c 100644 --- a/drivers/accel/ivpu/Makefile +++ b/drivers/accel/ivpu/Makefile @@ -6,6 +6,7 @@ intel_vpu-y := \ ivpu_fw.o \ ivpu_fw_log.o \ ivpu_gem.o \ + ivpu_gem_userptr.o \ ivpu_hw.o \ ivpu_hw_btrs.o \ ivpu_hw_ip.o \ diff --git a/drivers/accel/ivpu/ivpu_debugfs.c b/drivers/accel/ivpu/ivpu_debugfs.c index cd24ccd20ba6..3bd85ee6c26b 100644 --- a/drivers/accel/ivpu/ivpu_debugfs.c +++ b/drivers/accel/ivpu/ivpu_debugfs.c @@ -398,35 +398,25 @@ static int dct_active_set(void *data, u64 active_percent) DEFINE_DEBUGFS_ATTRIBUTE(ivpu_dct_fops, dct_active_get, dct_active_set, "%llu\n"); +static void print_priority_band(struct seq_file *s, struct ivpu_hw_info *hw, + int band, const char *name) +{ + seq_printf(s, "%-9s: grace_period %9u process_grace_period %9u process_quantum %9u\n", + name, + hw->hws.grace_period[band], + hw->hws.process_grace_period[band], + hw->hws.process_quantum[band]); +} + static int priority_bands_show(struct seq_file *s, void *v) { struct ivpu_device *vdev = s->private; struct ivpu_hw_info *hw = vdev->hw; - for (int band = VPU_JOB_SCHEDULING_PRIORITY_BAND_IDLE; - band < VPU_JOB_SCHEDULING_PRIORITY_BAND_COUNT; band++) { - switch (band) { - case VPU_JOB_SCHEDULING_PRIORITY_BAND_IDLE: - seq_puts(s, "Idle: "); - break; - - case VPU_JOB_SCHEDULING_PRIORITY_BAND_NORMAL: - seq_puts(s, "Normal: "); - break; - - case VPU_JOB_SCHEDULING_PRIORITY_BAND_FOCUS: - seq_puts(s, "Focus: "); - break; - - case VPU_JOB_SCHEDULING_PRIORITY_BAND_REALTIME: - seq_puts(s, "Realtime: "); - break; - } - - seq_printf(s, "grace_period %9u process_grace_period %9u process_quantum %9u\n", - hw->hws.grace_period[band], hw->hws.process_grace_period[band], - hw->hws.process_quantum[band]); - } + print_priority_band(s, hw, VPU_JOB_SCHEDULING_PRIORITY_BAND_IDLE, "Idle"); + print_priority_band(s, hw, VPU_JOB_SCHEDULING_PRIORITY_BAND_NORMAL, "Normal"); + print_priority_band(s, hw, VPU_JOB_SCHEDULING_PRIORITY_BAND_FOCUS, "Focus"); + print_priority_band(s, hw, VPU_JOB_SCHEDULING_PRIORITY_BAND_REALTIME, "Realtime"); return 0; } diff --git a/drivers/accel/ivpu/ivpu_drv.c b/drivers/accel/ivpu/ivpu_drv.c index 3289751b4757..3d6fccdefdd6 100644 --- a/drivers/accel/ivpu/ivpu_drv.c +++ b/drivers/accel/ivpu/ivpu_drv.c @@ -57,7 +57,7 @@ MODULE_PARM_DESC(pll_max_ratio, "Maximum PLL ratio used to set NPU frequency"); int ivpu_sched_mode = IVPU_SCHED_MODE_AUTO; module_param_named(sched_mode, ivpu_sched_mode, int, 0444); -MODULE_PARM_DESC(sched_mode, "Scheduler mode: -1 - Use default scheduler, 0 - Use OS scheduler, 1 - Use HW scheduler"); +MODULE_PARM_DESC(sched_mode, "Scheduler mode: -1 - Use default scheduler, 0 - Use OS scheduler (supported on 27XX - 50XX), 1 - Use HW scheduler"); bool ivpu_disable_mmu_cont_pages; module_param_named(disable_mmu_cont_pages, ivpu_disable_mmu_cont_pages, bool, 0444); @@ -134,6 +134,8 @@ bool ivpu_is_capable(struct ivpu_device *vdev, u32 capability) return true; case DRM_IVPU_CAP_DMA_MEMORY_RANGE: return true; + case DRM_IVPU_CAP_BO_CREATE_FROM_USERPTR: + return true; case DRM_IVPU_CAP_MANAGE_CMDQ: return vdev->fw->sched_mode == VPU_SCHEDULING_MODE_HW; default: @@ -200,6 +202,9 @@ static int ivpu_get_param_ioctl(struct drm_device *dev, void *data, struct drm_f case DRM_IVPU_PARAM_CAPABILITIES: args->value = ivpu_is_capable(vdev, args->index); break; + case DRM_IVPU_PARAM_PREEMPT_BUFFER_SIZE: + args->value = ivpu_fw_preempt_buf_size(vdev); + break; default: ret = -EINVAL; break; @@ -310,6 +315,7 @@ static const struct drm_ioctl_desc ivpu_drm_ioctls[] = { DRM_IOCTL_DEF_DRV(IVPU_CMDQ_CREATE, ivpu_cmdq_create_ioctl, 0), DRM_IOCTL_DEF_DRV(IVPU_CMDQ_DESTROY, ivpu_cmdq_destroy_ioctl, 0), DRM_IOCTL_DEF_DRV(IVPU_CMDQ_SUBMIT, ivpu_cmdq_submit_ioctl, 0), + DRM_IOCTL_DEF_DRV(IVPU_BO_CREATE_FROM_USERPTR, ivpu_bo_create_from_userptr_ioctl, 0), }; static int ivpu_wait_for_ready(struct ivpu_device *vdev) @@ -377,8 +383,7 @@ int ivpu_boot(struct ivpu_device *vdev) drm_WARN_ON(&vdev->drm, atomic_read(&vdev->job_timeout_counter)); drm_WARN_ON(&vdev->drm, !xa_empty(&vdev->submitted_jobs_xa)); - /* Update boot params located at first 4KB of FW memory */ - ivpu_fw_boot_params_setup(vdev, ivpu_bo_vaddr(vdev->fw->mem)); + ivpu_fw_boot_params_setup(vdev, ivpu_bo_vaddr(vdev->fw->mem_bp)); ret = ivpu_hw_boot_fw(vdev); if (ret) { @@ -450,6 +455,9 @@ int ivpu_shutdown(struct ivpu_device *vdev) static const struct file_operations ivpu_fops = { .owner = THIS_MODULE, DRM_ACCEL_FOPS, +#ifdef CONFIG_PROC_FS + .show_fdinfo = drm_show_fdinfo, +#endif }; static const struct drm_driver driver = { @@ -464,6 +472,9 @@ static const struct drm_driver driver = { .ioctls = ivpu_drm_ioctls, .num_ioctls = ARRAY_SIZE(ivpu_drm_ioctls), .fops = &ivpu_fops, +#ifdef CONFIG_PROC_FS + .show_fdinfo = drm_show_memory_stats, +#endif .name = DRIVER_NAME, .desc = DRIVER_DESC, @@ -705,6 +716,7 @@ static struct pci_device_id ivpu_pci_ids[] = { { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_LNL) }, { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_PTL_P) }, { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_WCL) }, + { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_NVL) }, { } }; MODULE_DEVICE_TABLE(pci, ivpu_pci_ids); diff --git a/drivers/accel/ivpu/ivpu_drv.h b/drivers/accel/ivpu/ivpu_drv.h index 62ab1c654e63..5b34b6f50e69 100644 --- a/drivers/accel/ivpu/ivpu_drv.h +++ b/drivers/accel/ivpu/ivpu_drv.h @@ -27,6 +27,7 @@ #define PCI_DEVICE_ID_LNL 0x643e #define PCI_DEVICE_ID_PTL_P 0xb03e #define PCI_DEVICE_ID_WCL 0xfd3e +#define PCI_DEVICE_ID_NVL 0xd71d #define IVPU_HW_IP_37XX 37 #define IVPU_HW_IP_40XX 40 @@ -78,6 +79,7 @@ #define IVPU_DBG_KREF BIT(11) #define IVPU_DBG_RPM BIT(12) #define IVPU_DBG_MMU_MAP BIT(13) +#define IVPU_DBG_IOCTL BIT(14) #define ivpu_err(vdev, fmt, ...) \ drm_err(&(vdev)->drm, "%s(): " fmt, __func__, ##__VA_ARGS__) @@ -245,6 +247,8 @@ static inline int ivpu_hw_ip_gen(struct ivpu_device *vdev) case PCI_DEVICE_ID_PTL_P: case PCI_DEVICE_ID_WCL: return IVPU_HW_IP_50XX; + case PCI_DEVICE_ID_NVL: + return IVPU_HW_IP_60XX; default: dump_stack(); ivpu_err(vdev, "Unknown NPU IP generation\n"); @@ -261,6 +265,7 @@ static inline int ivpu_hw_btrs_gen(struct ivpu_device *vdev) case PCI_DEVICE_ID_LNL: case PCI_DEVICE_ID_PTL_P: case PCI_DEVICE_ID_WCL: + case PCI_DEVICE_ID_NVL: return IVPU_HW_BTRS_LNL; default: dump_stack(); diff --git a/drivers/accel/ivpu/ivpu_fw.c b/drivers/accel/ivpu/ivpu_fw.c index 9db741695401..48386d2cddbb 100644 --- a/drivers/accel/ivpu/ivpu_fw.c +++ b/drivers/accel/ivpu/ivpu_fw.c @@ -17,15 +17,10 @@ #include "ivpu_ipc.h" #include "ivpu_pm.h" -#define FW_GLOBAL_MEM_START (2ull * SZ_1G) -#define FW_GLOBAL_MEM_END (3ull * SZ_1G) -#define FW_SHARED_MEM_SIZE SZ_256M /* Must be aligned to FW_SHARED_MEM_ALIGNMENT */ -#define FW_SHARED_MEM_ALIGNMENT SZ_128K /* VPU MTRR limitation */ -#define FW_RUNTIME_MAX_SIZE SZ_512M #define FW_SHAVE_NN_MAX_SIZE SZ_2M -#define FW_RUNTIME_MIN_ADDR (FW_GLOBAL_MEM_START) -#define FW_RUNTIME_MAX_ADDR (FW_GLOBAL_MEM_END - FW_SHARED_MEM_SIZE) #define FW_FILE_IMAGE_OFFSET (VPU_FW_HEADER_SIZE + FW_VERSION_HEADER_SIZE) +#define FW_PREEMPT_BUF_MIN_SIZE SZ_4K +#define FW_PREEMPT_BUF_MAX_SIZE SZ_32M #define WATCHDOG_MSS_REDIRECT 32 #define WATCHDOG_NCE_REDIRECT 33 @@ -61,12 +56,14 @@ static struct { { IVPU_HW_IP_40XX, "intel/vpu/vpu_40xx_v0.0.bin" }, { IVPU_HW_IP_50XX, "intel/vpu/vpu_50xx_v1.bin" }, { IVPU_HW_IP_50XX, "intel/vpu/vpu_50xx_v0.0.bin" }, + { IVPU_HW_IP_60XX, "intel/vpu/vpu_60xx_v1.bin" }, }; /* Production fw_names from the table above */ MODULE_FIRMWARE("intel/vpu/vpu_37xx_v1.bin"); MODULE_FIRMWARE("intel/vpu/vpu_40xx_v1.bin"); MODULE_FIRMWARE("intel/vpu/vpu_50xx_v1.bin"); +MODULE_FIRMWARE("intel/vpu/vpu_60xx_v1.bin"); static int ivpu_fw_request(struct ivpu_device *vdev) { @@ -131,9 +128,14 @@ ivpu_fw_check_api_ver_lt(struct ivpu_device *vdev, const struct vpu_firmware_hea return false; } -static bool is_within_range(u64 addr, size_t size, u64 range_start, size_t range_size) +bool ivpu_is_within_range(u64 addr, size_t size, struct ivpu_addr_range *range) { - if (addr < range_start || addr + size > range_start + range_size) + u64 addr_end; + + if (!range || check_add_overflow(addr, size, &addr_end)) + return false; + + if (addr < range->start || addr_end > range->end) return false; return true; @@ -142,6 +144,12 @@ static bool is_within_range(u64 addr, size_t size, u64 range_start, size_t range static u32 ivpu_fw_sched_mode_select(struct ivpu_device *vdev, const struct vpu_firmware_header *fw_hdr) { + if (ivpu_hw_ip_gen(vdev) >= IVPU_HW_IP_60XX && + ivpu_sched_mode == VPU_SCHEDULING_MODE_OS) { + ivpu_warn(vdev, "OS sched mode is not supported, using HW mode\n"); + return VPU_SCHEDULING_MODE_HW; + } + if (ivpu_sched_mode != IVPU_SCHED_MODE_AUTO) return ivpu_sched_mode; @@ -151,11 +159,56 @@ ivpu_fw_sched_mode_select(struct ivpu_device *vdev, const struct vpu_firmware_he return VPU_SCHEDULING_MODE_HW; } +static void +ivpu_preemption_config_parse(struct ivpu_device *vdev, const struct vpu_firmware_header *fw_hdr) +{ + struct ivpu_fw_info *fw = vdev->fw; + u32 primary_preempt_buf_size, secondary_preempt_buf_size; + + if (fw_hdr->preemption_buffer_1_max_size) + primary_preempt_buf_size = fw_hdr->preemption_buffer_1_max_size; + else + primary_preempt_buf_size = fw_hdr->preemption_buffer_1_size; + + if (fw_hdr->preemption_buffer_2_max_size) + secondary_preempt_buf_size = fw_hdr->preemption_buffer_2_max_size; + else + secondary_preempt_buf_size = fw_hdr->preemption_buffer_2_size; + + ivpu_dbg(vdev, FW_BOOT, "Preemption buffer size, primary: %u, secondary: %u\n", + primary_preempt_buf_size, secondary_preempt_buf_size); + + if (primary_preempt_buf_size < FW_PREEMPT_BUF_MIN_SIZE || + secondary_preempt_buf_size < FW_PREEMPT_BUF_MIN_SIZE) { + ivpu_warn(vdev, "Preemption buffers size too small\n"); + return; + } + + if (primary_preempt_buf_size > FW_PREEMPT_BUF_MAX_SIZE || + secondary_preempt_buf_size > FW_PREEMPT_BUF_MAX_SIZE) { + ivpu_warn(vdev, "Preemption buffers size too big\n"); + return; + } + + if (fw->sched_mode != VPU_SCHEDULING_MODE_HW) + return; + + if (ivpu_test_mode & IVPU_TEST_MODE_MIP_DISABLE) + return; + + vdev->fw->primary_preempt_buf_size = ALIGN(primary_preempt_buf_size, PAGE_SIZE); + vdev->fw->secondary_preempt_buf_size = ALIGN(secondary_preempt_buf_size, PAGE_SIZE); +} + static int ivpu_fw_parse(struct ivpu_device *vdev) { struct ivpu_fw_info *fw = vdev->fw; const struct vpu_firmware_header *fw_hdr = (const void *)fw->file->data; - u64 runtime_addr, image_load_addr, runtime_size, image_size; + struct ivpu_addr_range fw_image_range; + u64 boot_params_addr, boot_params_size; + u64 fw_version_addr, fw_version_size; + u64 runtime_addr, runtime_size; + u64 image_load_addr, image_size; if (fw->file->size <= FW_FILE_IMAGE_OFFSET) { ivpu_err(vdev, "Firmware file is too small: %zu\n", fw->file->size); @@ -167,18 +220,37 @@ static int ivpu_fw_parse(struct ivpu_device *vdev) return -EINVAL; } - runtime_addr = fw_hdr->boot_params_load_address; - runtime_size = fw_hdr->runtime_size; - image_load_addr = fw_hdr->image_load_address; - image_size = fw_hdr->image_size; + boot_params_addr = fw_hdr->boot_params_load_address; + boot_params_size = SZ_4K; - if (runtime_addr < FW_RUNTIME_MIN_ADDR || runtime_addr > FW_RUNTIME_MAX_ADDR) { - ivpu_err(vdev, "Invalid firmware runtime address: 0x%llx\n", runtime_addr); + if (!ivpu_is_within_range(boot_params_addr, boot_params_size, &vdev->hw->ranges.runtime)) { + ivpu_err(vdev, "Invalid boot params address: 0x%llx\n", boot_params_addr); return -EINVAL; } - if (runtime_size < fw->file->size || runtime_size > FW_RUNTIME_MAX_SIZE) { - ivpu_err(vdev, "Invalid firmware runtime size: %llu\n", runtime_size); + fw_version_addr = fw_hdr->firmware_version_load_address; + fw_version_size = ALIGN(fw_hdr->firmware_version_size, SZ_4K); + + if (fw_version_size != SZ_4K) { + ivpu_err(vdev, "Invalid firmware version size: %u\n", + fw_hdr->firmware_version_size); + return -EINVAL; + } + + if (!ivpu_is_within_range(fw_version_addr, fw_version_size, &vdev->hw->ranges.runtime)) { + ivpu_err(vdev, "Invalid firmware version address: 0x%llx\n", fw_version_addr); + return -EINVAL; + } + + runtime_addr = fw_hdr->image_load_address; + runtime_size = fw_hdr->runtime_size - boot_params_size - fw_version_size; + + image_load_addr = fw_hdr->image_load_address; + image_size = fw_hdr->image_size; + + if (!ivpu_is_within_range(runtime_addr, runtime_size, &vdev->hw->ranges.runtime)) { + ivpu_err(vdev, "Invalid firmware runtime address: 0x%llx and size %llu\n", + runtime_addr, runtime_size); return -EINVAL; } @@ -187,23 +259,25 @@ static int ivpu_fw_parse(struct ivpu_device *vdev) return -EINVAL; } - if (image_load_addr < runtime_addr || - image_load_addr + image_size > runtime_addr + runtime_size) { - ivpu_err(vdev, "Invalid firmware load address size: 0x%llx and size %llu\n", + if (!ivpu_is_within_range(image_load_addr, image_size, &vdev->hw->ranges.runtime)) { + ivpu_err(vdev, "Invalid firmware load address: 0x%llx and size %llu\n", image_load_addr, image_size); return -EINVAL; } + if (ivpu_hw_range_init(vdev, &fw_image_range, image_load_addr, image_size)) + return -EINVAL; + + if (!ivpu_is_within_range(fw_hdr->entry_point, SZ_4K, &fw_image_range)) { + ivpu_err(vdev, "Invalid entry point: 0x%llx\n", fw_hdr->entry_point); + return -EINVAL; + } + if (fw_hdr->shave_nn_fw_size > FW_SHAVE_NN_MAX_SIZE) { ivpu_err(vdev, "SHAVE NN firmware is too big: %u\n", fw_hdr->shave_nn_fw_size); return -EINVAL; } - if (fw_hdr->entry_point < image_load_addr || - fw_hdr->entry_point >= image_load_addr + image_size) { - ivpu_err(vdev, "Invalid entry point: 0x%llx\n", fw_hdr->entry_point); - return -EINVAL; - } ivpu_dbg(vdev, FW_BOOT, "Header version: 0x%x, format 0x%x\n", fw_hdr->header_version, fw_hdr->image_format); @@ -217,6 +291,10 @@ static int ivpu_fw_parse(struct ivpu_device *vdev) if (IVPU_FW_CHECK_API_COMPAT(vdev, fw_hdr, JSM, 3)) return -EINVAL; + fw->boot_params_addr = boot_params_addr; + fw->boot_params_size = boot_params_size; + fw->fw_version_addr = fw_version_addr; + fw->fw_version_size = fw_version_size; fw->runtime_addr = runtime_addr; fw->runtime_size = runtime_size; fw->image_load_offset = image_load_addr - runtime_addr; @@ -235,22 +313,13 @@ static int ivpu_fw_parse(struct ivpu_device *vdev) fw->sched_mode = ivpu_fw_sched_mode_select(vdev, fw_hdr); ivpu_info(vdev, "Scheduler mode: %s\n", fw->sched_mode ? "HW" : "OS"); - if (fw_hdr->preemption_buffer_1_max_size) - fw->primary_preempt_buf_size = fw_hdr->preemption_buffer_1_max_size; - else - fw->primary_preempt_buf_size = fw_hdr->preemption_buffer_1_size; + ivpu_preemption_config_parse(vdev, fw_hdr); + ivpu_dbg(vdev, FW_BOOT, "Mid-inference preemption %s supported\n", + ivpu_fw_preempt_buf_size(vdev) ? "is" : "is not"); - if (fw_hdr->preemption_buffer_2_max_size) - fw->secondary_preempt_buf_size = fw_hdr->preemption_buffer_2_max_size; - else - fw->secondary_preempt_buf_size = fw_hdr->preemption_buffer_2_size; - ivpu_dbg(vdev, FW_BOOT, "Preemption buffer sizes: primary %u, secondary %u\n", - fw->primary_preempt_buf_size, fw->secondary_preempt_buf_size); - - if (fw_hdr->ro_section_start_address && !is_within_range(fw_hdr->ro_section_start_address, - fw_hdr->ro_section_size, - fw_hdr->image_load_address, - fw_hdr->image_size)) { + if (fw_hdr->ro_section_start_address && + !ivpu_is_within_range(fw_hdr->ro_section_start_address, fw_hdr->ro_section_size, + &fw_image_range)) { ivpu_err(vdev, "Invalid read-only section: start address 0x%llx, size %u\n", fw_hdr->ro_section_start_address, fw_hdr->ro_section_size); return -EINVAL; @@ -259,12 +328,18 @@ static int ivpu_fw_parse(struct ivpu_device *vdev) fw->read_only_addr = fw_hdr->ro_section_start_address; fw->read_only_size = fw_hdr->ro_section_size; - ivpu_dbg(vdev, FW_BOOT, "Size: file %lu image %u runtime %u shavenn %u\n", - fw->file->size, fw->image_size, fw->runtime_size, fw->shave_nn_size); - ivpu_dbg(vdev, FW_BOOT, "Address: runtime 0x%llx, load 0x%llx, entry point 0x%llx\n", - fw->runtime_addr, image_load_addr, fw->entry_point); + ivpu_dbg(vdev, FW_BOOT, "Boot params: address 0x%llx, size %llu\n", + fw->boot_params_addr, fw->boot_params_size); + ivpu_dbg(vdev, FW_BOOT, "FW version: address 0x%llx, size %llu\n", + fw->fw_version_addr, fw->fw_version_size); + ivpu_dbg(vdev, FW_BOOT, "Runtime: address 0x%llx, size %u\n", + fw->runtime_addr, fw->runtime_size); + ivpu_dbg(vdev, FW_BOOT, "Image load offset: 0x%llx, size %u\n", + fw->image_load_offset, fw->image_size); ivpu_dbg(vdev, FW_BOOT, "Read-only section: address 0x%llx, size %u\n", fw->read_only_addr, fw->read_only_size); + ivpu_dbg(vdev, FW_BOOT, "FW entry point: 0x%llx\n", fw->entry_point); + ivpu_dbg(vdev, FW_BOOT, "SHAVE NN size: %u\n", fw->shave_nn_size); return 0; } @@ -291,39 +366,33 @@ ivpu_fw_init_wa(struct ivpu_device *vdev) IVPU_PRINT_WA(disable_d0i3_msg); } -static int ivpu_fw_update_global_range(struct ivpu_device *vdev) -{ - struct ivpu_fw_info *fw = vdev->fw; - u64 start = ALIGN(fw->runtime_addr + fw->runtime_size, FW_SHARED_MEM_ALIGNMENT); - u64 size = FW_SHARED_MEM_SIZE; - - if (start + size > FW_GLOBAL_MEM_END) { - ivpu_err(vdev, "No space for shared region, start %lld, size %lld\n", start, size); - return -EINVAL; - } - - ivpu_hw_range_init(&vdev->hw->ranges.global, start, size); - return 0; -} - static int ivpu_fw_mem_init(struct ivpu_device *vdev) { struct ivpu_fw_info *fw = vdev->fw; - struct ivpu_addr_range fw_range; int log_verb_size; int ret; - ret = ivpu_fw_update_global_range(vdev); - if (ret) - return ret; + fw->mem_bp = ivpu_bo_create_runtime(vdev, fw->boot_params_addr, fw->boot_params_size, + DRM_IVPU_BO_WC | DRM_IVPU_BO_MAPPABLE); + if (!fw->mem_bp) { + ivpu_err(vdev, "Failed to create firmware boot params memory buffer\n"); + return -ENOMEM; + } - fw_range.start = fw->runtime_addr; - fw_range.end = fw->runtime_addr + fw->runtime_size; - fw->mem = ivpu_bo_create(vdev, &vdev->gctx, &fw_range, fw->runtime_size, - DRM_IVPU_BO_WC | DRM_IVPU_BO_MAPPABLE); + fw->mem_fw_ver = ivpu_bo_create_runtime(vdev, fw->fw_version_addr, fw->fw_version_size, + DRM_IVPU_BO_WC | DRM_IVPU_BO_MAPPABLE); + if (!fw->mem_fw_ver) { + ivpu_err(vdev, "Failed to create firmware version memory buffer\n"); + ret = -ENOMEM; + goto err_free_bp; + } + + fw->mem = ivpu_bo_create_runtime(vdev, fw->runtime_addr, fw->runtime_size, + DRM_IVPU_BO_WC | DRM_IVPU_BO_MAPPABLE); if (!fw->mem) { ivpu_err(vdev, "Failed to create firmware runtime memory buffer\n"); - return -ENOMEM; + ret = -ENOMEM; + goto err_free_fw_ver; } ret = ivpu_mmu_context_set_pages_ro(vdev, &vdev->gctx, fw->read_only_addr, @@ -372,6 +441,10 @@ static int ivpu_fw_mem_init(struct ivpu_device *vdev) ivpu_bo_free(fw->mem_log_crit); err_free_fw_mem: ivpu_bo_free(fw->mem); +err_free_fw_ver: + ivpu_bo_free(fw->mem_fw_ver); +err_free_bp: + ivpu_bo_free(fw->mem_bp); return ret; } @@ -387,10 +460,14 @@ static void ivpu_fw_mem_fini(struct ivpu_device *vdev) ivpu_bo_free(fw->mem_log_verb); ivpu_bo_free(fw->mem_log_crit); ivpu_bo_free(fw->mem); + ivpu_bo_free(fw->mem_fw_ver); + ivpu_bo_free(fw->mem_bp); fw->mem_log_verb = NULL; fw->mem_log_crit = NULL; fw->mem = NULL; + fw->mem_fw_ver = NULL; + fw->mem_bp = NULL; } int ivpu_fw_init(struct ivpu_device *vdev) @@ -483,11 +560,6 @@ static void ivpu_fw_boot_params_print(struct ivpu_device *vdev, struct vpu_boot_ ivpu_dbg(vdev, FW_BOOT, "boot_params.cache_defaults[VPU_BOOT_L2_CACHE_CFG_NN].cfg = 0x%x\n", boot_params->cache_defaults[VPU_BOOT_L2_CACHE_CFG_NN].cfg); - ivpu_dbg(vdev, FW_BOOT, "boot_params.global_memory_allocator_base = 0x%llx\n", - boot_params->global_memory_allocator_base); - ivpu_dbg(vdev, FW_BOOT, "boot_params.global_memory_allocator_size = 0x%x\n", - boot_params->global_memory_allocator_size); - ivpu_dbg(vdev, FW_BOOT, "boot_params.shave_nn_fw_base = 0x%llx\n", boot_params->shave_nn_fw_base); @@ -495,10 +567,6 @@ static void ivpu_fw_boot_params_print(struct ivpu_device *vdev, struct vpu_boot_ boot_params->watchdog_irq_mss); ivpu_dbg(vdev, FW_BOOT, "boot_params.watchdog_irq_nce = 0x%x\n", boot_params->watchdog_irq_nce); - ivpu_dbg(vdev, FW_BOOT, "boot_params.host_to_vpu_irq = 0x%x\n", - boot_params->host_to_vpu_irq); - ivpu_dbg(vdev, FW_BOOT, "boot_params.job_done_irq = 0x%x\n", - boot_params->job_done_irq); ivpu_dbg(vdev, FW_BOOT, "boot_params.host_version_id = 0x%x\n", boot_params->host_version_id); @@ -546,6 +614,8 @@ static void ivpu_fw_boot_params_print(struct ivpu_device *vdev, struct vpu_boot_ boot_params->system_time_us); ivpu_dbg(vdev, FW_BOOT, "boot_params.power_profile = 0x%x\n", boot_params->power_profile); + ivpu_dbg(vdev, FW_BOOT, "boot_params.vpu_uses_ecc_mca_signal = 0x%x\n", + boot_params->vpu_uses_ecc_mca_signal); } void ivpu_fw_boot_params_setup(struct ivpu_device *vdev, struct vpu_boot_params *boot_params) @@ -572,6 +642,7 @@ void ivpu_fw_boot_params_setup(struct ivpu_device *vdev, struct vpu_boot_params return; } + memset(boot_params, 0, sizeof(*boot_params)); vdev->pm->is_warmboot = false; boot_params->magic = VPU_BOOT_PARAMS_MAGIC; @@ -647,6 +718,8 @@ void ivpu_fw_boot_params_setup(struct ivpu_device *vdev, struct vpu_boot_params boot_params->d0i3_entry_vpu_ts = 0; if (IVPU_WA(disable_d0i2)) boot_params->power_profile |= BIT(1); + boot_params->vpu_uses_ecc_mca_signal = + ivpu_hw_uses_ecc_mca_signal(vdev) ? VPU_BOOT_MCA_ECC_BOTH : 0; boot_params->system_time_us = ktime_to_us(ktime_get_real()); wmb(); /* Flush WC buffers after writing bootparams */ diff --git a/drivers/accel/ivpu/ivpu_fw.h b/drivers/accel/ivpu/ivpu_fw.h index 7081913fb0dd..00945892b55e 100644 --- a/drivers/accel/ivpu/ivpu_fw.h +++ b/drivers/accel/ivpu/ivpu_fw.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0-only */ /* - * Copyright (C) 2020-2024 Intel Corporation + * Copyright (C) 2020-2025 Intel Corporation */ #ifndef __IVPU_FW_H__ @@ -19,10 +19,16 @@ struct ivpu_fw_info { const struct firmware *file; const char *name; char version[FW_VERSION_STR_SIZE]; + struct ivpu_bo *mem_bp; + struct ivpu_bo *mem_fw_ver; struct ivpu_bo *mem; struct ivpu_bo *mem_shave_nn; struct ivpu_bo *mem_log_crit; struct ivpu_bo *mem_log_verb; + u64 boot_params_addr; + u64 boot_params_size; + u64 fw_version_addr; + u64 fw_version_size; u64 runtime_addr; u32 runtime_size; u64 image_load_offset; @@ -42,6 +48,7 @@ struct ivpu_fw_info { u64 last_heartbeat; }; +bool ivpu_is_within_range(u64 addr, size_t size, struct ivpu_addr_range *range); int ivpu_fw_init(struct ivpu_device *vdev); void ivpu_fw_fini(struct ivpu_device *vdev); void ivpu_fw_load(struct ivpu_device *vdev); @@ -52,4 +59,9 @@ static inline bool ivpu_fw_is_cold_boot(struct ivpu_device *vdev) return vdev->fw->entry_point == vdev->fw->cold_boot_entry_point; } +static inline u32 ivpu_fw_preempt_buf_size(struct ivpu_device *vdev) +{ + return vdev->fw->primary_preempt_buf_size + vdev->fw->secondary_preempt_buf_size; +} + #endif /* __IVPU_FW_H__ */ diff --git a/drivers/accel/ivpu/ivpu_gem.c b/drivers/accel/ivpu/ivpu_gem.c index 59cfcf3eaded..ece68f570b7e 100644 --- a/drivers/accel/ivpu/ivpu_gem.c +++ b/drivers/accel/ivpu/ivpu_gem.c @@ -15,6 +15,7 @@ #include #include "ivpu_drv.h" +#include "ivpu_fw.h" #include "ivpu_gem.h" #include "ivpu_hw.h" #include "ivpu_mmu.h" @@ -27,8 +28,8 @@ static const struct drm_gem_object_funcs ivpu_gem_funcs; static inline void ivpu_dbg_bo(struct ivpu_device *vdev, struct ivpu_bo *bo, const char *action) { ivpu_dbg(vdev, BO, - "%6s: bo %8p vpu_addr %9llx size %8zu ctx %d has_pages %d dma_mapped %d mmu_mapped %d wc %d imported %d\n", - action, bo, bo->vpu_addr, ivpu_bo_size(bo), bo->ctx_id, + "%6s: bo %8p size %9zu ctx %d vpu_addr %9llx pages %d sgt %d mmu_mapped %d wc %d imported %d\n", + action, bo, ivpu_bo_size(bo), bo->ctx_id, bo->vpu_addr, (bool)bo->base.pages, (bool)bo->base.sgt, bo->mmu_mapped, bo->base.map_wc, (bool)drm_gem_is_imported(&bo->base.base)); } @@ -43,22 +44,47 @@ static inline void ivpu_bo_unlock(struct ivpu_bo *bo) dma_resv_unlock(bo->base.base.resv); } +static struct sg_table *ivpu_bo_map_attachment(struct ivpu_device *vdev, struct ivpu_bo *bo) +{ + struct sg_table *sgt; + + drm_WARN_ON(&vdev->drm, !bo->base.base.import_attach); + + ivpu_bo_lock(bo); + + sgt = bo->base.sgt; + if (!sgt) { + sgt = dma_buf_map_attachment(bo->base.base.import_attach, DMA_BIDIRECTIONAL); + if (IS_ERR(sgt)) + ivpu_err(vdev, "Failed to map BO in IOMMU: %ld\n", PTR_ERR(sgt)); + else + bo->base.sgt = sgt; + } + + ivpu_bo_unlock(bo); + + return sgt; +} + /* - * ivpu_bo_pin() - pin the backing physical pages and map them to VPU. + * ivpu_bo_bind() - pin the backing physical pages and map them to VPU. * * This function pins physical memory pages, then maps the physical pages * to IOMMU address space and finally updates the VPU MMU page tables * to allow the VPU to translate VPU address to IOMMU address. */ -int __must_check ivpu_bo_pin(struct ivpu_bo *bo) +int __must_check ivpu_bo_bind(struct ivpu_bo *bo) { struct ivpu_device *vdev = ivpu_bo_to_vdev(bo); struct sg_table *sgt; int ret = 0; - ivpu_dbg_bo(vdev, bo, "pin"); + ivpu_dbg_bo(vdev, bo, "bind"); - sgt = drm_gem_shmem_get_pages_sgt(&bo->base); + if (bo->base.base.import_attach) + sgt = ivpu_bo_map_attachment(vdev, bo); + else + sgt = drm_gem_shmem_get_pages_sgt(&bo->base); if (IS_ERR(sgt)) { ret = PTR_ERR(sgt); ivpu_err(vdev, "Failed to map BO in IOMMU: %d\n", ret); @@ -70,7 +96,7 @@ int __must_check ivpu_bo_pin(struct ivpu_bo *bo) if (!bo->mmu_mapped) { drm_WARN_ON(&vdev->drm, !bo->ctx); ret = ivpu_mmu_context_map_sgt(vdev, bo->ctx, bo->vpu_addr, sgt, - ivpu_bo_is_snooped(bo)); + ivpu_bo_is_snooped(bo), ivpu_bo_is_read_only(bo)); if (ret) { ivpu_err(vdev, "Failed to map BO in MMU: %d\n", ret); goto unlock; @@ -99,9 +125,9 @@ ivpu_bo_alloc_vpu_addr(struct ivpu_bo *bo, struct ivpu_mmu_context *ctx, ret = ivpu_mmu_context_insert_node(ctx, range, ivpu_bo_size(bo), &bo->mm_node); if (!ret) { bo->ctx = ctx; + bo->ctx_id = ctx->id; bo->vpu_addr = bo->mm_node.start; - } else { - ivpu_err(vdev, "Failed to add BO to context %u: %d\n", ctx->id, ret); + ivpu_dbg_bo(vdev, bo, "vaddr"); } ivpu_bo_unlock(bo); @@ -115,7 +141,7 @@ static void ivpu_bo_unbind_locked(struct ivpu_bo *bo) { struct ivpu_device *vdev = ivpu_bo_to_vdev(bo); - lockdep_assert(dma_resv_held(bo->base.base.resv) || !kref_read(&bo->base.base.refcount)); + dma_resv_assert_held(bo->base.base.resv); if (bo->mmu_mapped) { drm_WARN_ON(&vdev->drm, !bo->ctx); @@ -130,13 +156,15 @@ static void ivpu_bo_unbind_locked(struct ivpu_bo *bo) bo->ctx = NULL; } - if (drm_gem_is_imported(&bo->base.base)) - return; - if (bo->base.sgt) { - dma_unmap_sgtable(vdev->drm.dev, bo->base.sgt, DMA_BIDIRECTIONAL, 0); - sg_free_table(bo->base.sgt); - kfree(bo->base.sgt); + if (bo->base.base.import_attach) { + dma_buf_unmap_attachment(bo->base.base.import_attach, + bo->base.sgt, DMA_BIDIRECTIONAL); + } else { + dma_unmap_sgtable(vdev->drm.dev, bo->base.sgt, DMA_BIDIRECTIONAL, 0); + sg_free_table(bo->base.sgt); + kfree(bo->base.sgt); + } bo->base.sgt = NULL; } } @@ -182,10 +210,11 @@ struct drm_gem_object *ivpu_gem_create_object(struct drm_device *dev, size_t siz struct drm_gem_object *ivpu_gem_prime_import(struct drm_device *dev, struct dma_buf *dma_buf) { + struct ivpu_device *vdev = to_ivpu_device(dev); struct device *attach_dev = dev->dev; struct dma_buf_attachment *attach; - struct sg_table *sgt; struct drm_gem_object *obj; + struct ivpu_bo *bo; int ret; attach = dma_buf_attach(dma_buf, attach_dev); @@ -194,25 +223,25 @@ struct drm_gem_object *ivpu_gem_prime_import(struct drm_device *dev, get_dma_buf(dma_buf); - sgt = dma_buf_map_attachment_unlocked(attach, DMA_BIDIRECTIONAL); - if (IS_ERR(sgt)) { - ret = PTR_ERR(sgt); - goto fail_detach; - } - - obj = drm_gem_shmem_prime_import_sg_table(dev, attach, sgt); + obj = drm_gem_shmem_prime_import_sg_table(dev, attach, NULL); if (IS_ERR(obj)) { ret = PTR_ERR(obj); - goto fail_unmap; + goto fail_detach; } obj->import_attach = attach; obj->resv = dma_buf->resv; + bo = to_ivpu_bo(obj); + + mutex_lock(&vdev->bo_list_lock); + list_add_tail(&bo->bo_list_node, &vdev->bo_list); + mutex_unlock(&vdev->bo_list_lock); + + ivpu_dbg(vdev, BO, "import: bo %8p size %9zu\n", bo, ivpu_bo_size(bo)); + return obj; -fail_unmap: - dma_buf_unmap_attachment_unlocked(attach, sgt, DMA_BIDIRECTIONAL); fail_detach: dma_buf_detach(dma_buf, attach); dma_buf_put(dma_buf); @@ -220,7 +249,7 @@ struct drm_gem_object *ivpu_gem_prime_import(struct drm_device *dev, return ERR_PTR(ret); } -static struct ivpu_bo *ivpu_bo_alloc(struct ivpu_device *vdev, u64 size, u32 flags, u32 ctx_id) +static struct ivpu_bo *ivpu_bo_alloc(struct ivpu_device *vdev, u64 size, u32 flags) { struct drm_gem_shmem_object *shmem; struct ivpu_bo *bo; @@ -238,7 +267,6 @@ static struct ivpu_bo *ivpu_bo_alloc(struct ivpu_device *vdev, u64 size, u32 fla return ERR_CAST(shmem); bo = to_ivpu_bo(&shmem->base); - bo->ctx_id = ctx_id; bo->base.map_wc = flags & DRM_IVPU_BO_WC; bo->flags = flags; @@ -246,7 +274,7 @@ static struct ivpu_bo *ivpu_bo_alloc(struct ivpu_device *vdev, u64 size, u32 fla list_add_tail(&bo->bo_list_node, &vdev->bo_list); mutex_unlock(&vdev->bo_list_lock); - ivpu_dbg_bo(vdev, bo, "alloc"); + ivpu_dbg(vdev, BO, " alloc: bo %8p size %9llu\n", bo, size); return bo; } @@ -259,8 +287,8 @@ static int ivpu_gem_bo_open(struct drm_gem_object *obj, struct drm_file *file) struct ivpu_addr_range *range; if (bo->ctx) { - ivpu_warn(vdev, "Can't add BO to ctx %u: already in ctx %u\n", - file_priv->ctx.id, bo->ctx->id); + ivpu_dbg(vdev, IOCTL, "Can't add BO %pe to ctx %u: already in ctx %u\n", + bo, file_priv->ctx.id, bo->ctx->id); return -EALREADY; } @@ -281,23 +309,41 @@ static void ivpu_gem_bo_free(struct drm_gem_object *obj) ivpu_dbg_bo(vdev, bo, "free"); + drm_WARN_ON(&vdev->drm, list_empty(&bo->bo_list_node)); + mutex_lock(&vdev->bo_list_lock); list_del(&bo->bo_list_node); - mutex_unlock(&vdev->bo_list_lock); drm_WARN_ON(&vdev->drm, !drm_gem_is_imported(&bo->base.base) && !dma_resv_test_signaled(obj->resv, DMA_RESV_USAGE_READ)); drm_WARN_ON(&vdev->drm, ivpu_bo_size(bo) == 0); drm_WARN_ON(&vdev->drm, bo->base.vaddr); + ivpu_bo_lock(bo); ivpu_bo_unbind_locked(bo); + ivpu_bo_unlock(bo); + + mutex_unlock(&vdev->bo_list_lock); + drm_WARN_ON(&vdev->drm, bo->mmu_mapped); drm_WARN_ON(&vdev->drm, bo->ctx); drm_WARN_ON(obj->dev, refcount_read(&bo->base.pages_use_count) > 1); + drm_WARN_ON(obj->dev, bo->base.base.vma_node.vm_files.rb_node); drm_gem_shmem_free(&bo->base); } +static enum drm_gem_object_status ivpu_gem_status(struct drm_gem_object *obj) +{ + struct ivpu_bo *bo = to_ivpu_bo(obj); + enum drm_gem_object_status status = 0; + + if (ivpu_bo_is_resident(bo)) + status |= DRM_GEM_OBJECT_RESIDENT; + + return status; +} + static const struct drm_gem_object_funcs ivpu_gem_funcs = { .free = ivpu_gem_bo_free, .open = ivpu_gem_bo_open, @@ -308,6 +354,7 @@ static const struct drm_gem_object_funcs ivpu_gem_funcs = { .vmap = drm_gem_shmem_object_vmap, .vunmap = drm_gem_shmem_object_vunmap, .mmap = drm_gem_shmem_object_mmap, + .status = ivpu_gem_status, .vm_ops = &drm_gem_shmem_vm_ops, }; @@ -320,25 +367,33 @@ int ivpu_bo_create_ioctl(struct drm_device *dev, void *data, struct drm_file *fi struct ivpu_bo *bo; int ret; - if (args->flags & ~DRM_IVPU_BO_FLAGS) + if (args->flags & ~DRM_IVPU_BO_FLAGS) { + ivpu_dbg(vdev, IOCTL, "Invalid BO flags 0x%x\n", args->flags); return -EINVAL; + } - if (size == 0) + if (size == 0) { + ivpu_dbg(vdev, IOCTL, "Invalid BO size %llu\n", args->size); return -EINVAL; + } - bo = ivpu_bo_alloc(vdev, size, args->flags, file_priv->ctx.id); + bo = ivpu_bo_alloc(vdev, size, args->flags); if (IS_ERR(bo)) { - ivpu_err(vdev, "Failed to allocate BO: %pe (ctx %u size %llu flags 0x%x)", + ivpu_dbg(vdev, IOCTL, "Failed to allocate BO: %pe ctx %u size %llu flags 0x%x\n", bo, file_priv->ctx.id, args->size, args->flags); return PTR_ERR(bo); } + drm_WARN_ON(&vdev->drm, bo->base.base.handle_count != 0); + ret = drm_gem_handle_create(file, &bo->base.base, &args->handle); - if (ret) - ivpu_err(vdev, "Failed to create handle for BO: %pe (ctx %u size %llu flags 0x%x)", + if (ret) { + ivpu_dbg(vdev, IOCTL, "Failed to create handle for BO: %pe ctx %u size %llu flags 0x%x\n", bo, file_priv->ctx.id, args->size, args->flags); - else + } else { args->vpu_addr = bo->vpu_addr; + drm_WARN_ON(&vdev->drm, bo->base.base.handle_count != 1); + } drm_gem_object_put(&bo->base.base); @@ -360,18 +415,21 @@ ivpu_bo_create(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx, drm_WARN_ON(&vdev->drm, !PAGE_ALIGNED(range->end)); drm_WARN_ON(&vdev->drm, !PAGE_ALIGNED(size)); - bo = ivpu_bo_alloc(vdev, size, flags, IVPU_GLOBAL_CONTEXT_MMU_SSID); + bo = ivpu_bo_alloc(vdev, size, flags); if (IS_ERR(bo)) { - ivpu_err(vdev, "Failed to allocate BO: %pe (vpu_addr 0x%llx size %llu flags 0x%x)", + ivpu_err(vdev, "Failed to allocate BO: %pe vpu_addr 0x%llx size %llu flags 0x%x\n", bo, range->start, size, flags); return NULL; } ret = ivpu_bo_alloc_vpu_addr(bo, ctx, range); - if (ret) + if (ret) { + ivpu_err(vdev, "Failed to allocate NPU address for BO: %pe ctx %u size %llu: %d\n", + bo, ctx->id, size, ret); goto err_put; + } - ret = ivpu_bo_pin(bo); + ret = ivpu_bo_bind(bo); if (ret) goto err_put; @@ -391,6 +449,21 @@ ivpu_bo_create(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx, return NULL; } +struct ivpu_bo *ivpu_bo_create_runtime(struct ivpu_device *vdev, u64 addr, u64 size, u32 flags) +{ + struct ivpu_addr_range range; + + if (!ivpu_is_within_range(addr, size, &vdev->hw->ranges.runtime)) { + ivpu_err(vdev, "Invalid runtime BO address 0x%llx size %llu\n", addr, size); + return NULL; + } + + if (ivpu_hw_range_init(vdev, &range, addr, size)) + return NULL; + + return ivpu_bo_create(vdev, &vdev->gctx, &range, size, flags); +} + struct ivpu_bo *ivpu_bo_create_global(struct ivpu_device *vdev, u64 size, u32 flags) { return ivpu_bo_create(vdev, &vdev->gctx, &vdev->hw->ranges.global, size, flags); diff --git a/drivers/accel/ivpu/ivpu_gem.h b/drivers/accel/ivpu/ivpu_gem.h index aa8ff14f7aae..0c3350f22b55 100644 --- a/drivers/accel/ivpu/ivpu_gem.h +++ b/drivers/accel/ivpu/ivpu_gem.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0-only */ /* - * Copyright (C) 2020-2023 Intel Corporation + * Copyright (C) 2020-2025 Intel Corporation */ #ifndef __IVPU_GEM_H__ #define __IVPU_GEM_H__ @@ -24,19 +24,22 @@ struct ivpu_bo { bool mmu_mapped; }; -int ivpu_bo_pin(struct ivpu_bo *bo); +int ivpu_bo_bind(struct ivpu_bo *bo); void ivpu_bo_unbind_all_bos_from_context(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx); struct drm_gem_object *ivpu_gem_create_object(struct drm_device *dev, size_t size); struct drm_gem_object *ivpu_gem_prime_import(struct drm_device *dev, struct dma_buf *dma_buf); struct ivpu_bo *ivpu_bo_create(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx, struct ivpu_addr_range *range, u64 size, u32 flags); +struct ivpu_bo *ivpu_bo_create_runtime(struct ivpu_device *vdev, u64 addr, u64 size, u32 flags); struct ivpu_bo *ivpu_bo_create_global(struct ivpu_device *vdev, u64 size, u32 flags); void ivpu_bo_free(struct ivpu_bo *bo); int ivpu_bo_create_ioctl(struct drm_device *dev, void *data, struct drm_file *file); int ivpu_bo_info_ioctl(struct drm_device *dev, void *data, struct drm_file *file); int ivpu_bo_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file); +int ivpu_bo_create_from_userptr_ioctl(struct drm_device *dev, void *data, + struct drm_file *file); void ivpu_bo_list(struct drm_device *dev, struct drm_printer *p); void ivpu_bo_list_print(struct drm_device *dev); @@ -74,6 +77,16 @@ static inline bool ivpu_bo_is_snooped(struct ivpu_bo *bo) return ivpu_bo_cache_mode(bo) == DRM_IVPU_BO_CACHED; } +static inline bool ivpu_bo_is_read_only(struct ivpu_bo *bo) +{ + return bo->flags & DRM_IVPU_BO_READ_ONLY; +} + +static inline bool ivpu_bo_is_resident(struct ivpu_bo *bo) +{ + return !!bo->base.pages; +} + static inline void *ivpu_to_cpu_addr(struct ivpu_bo *bo, u32 vpu_addr) { if (vpu_addr < bo->vpu_addr) @@ -96,4 +109,9 @@ static inline u32 cpu_to_vpu_addr(struct ivpu_bo *bo, void *cpu_addr) return bo->vpu_addr + (cpu_addr - ivpu_bo_vaddr(bo)); } +static inline bool ivpu_bo_is_mappable(struct ivpu_bo *bo) +{ + return bo->flags & DRM_IVPU_BO_MAPPABLE; +} + #endif /* __IVPU_GEM_H__ */ diff --git a/drivers/accel/ivpu/ivpu_gem_userptr.c b/drivers/accel/ivpu/ivpu_gem_userptr.c new file mode 100644 index 000000000000..25ba606164c0 --- /dev/null +++ b/drivers/accel/ivpu/ivpu_gem_userptr.c @@ -0,0 +1,213 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2020-2025 Intel Corporation + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include "ivpu_drv.h" +#include "ivpu_gem.h" + +static struct sg_table * +ivpu_gem_userptr_dmabuf_map(struct dma_buf_attachment *attachment, + enum dma_data_direction direction) +{ + struct sg_table *sgt = attachment->dmabuf->priv; + int ret; + + ret = dma_map_sgtable(attachment->dev, sgt, direction, DMA_ATTR_SKIP_CPU_SYNC); + if (ret) + return ERR_PTR(ret); + + return sgt; +} + +static void ivpu_gem_userptr_dmabuf_unmap(struct dma_buf_attachment *attachment, + struct sg_table *sgt, + enum dma_data_direction direction) +{ + dma_unmap_sgtable(attachment->dev, sgt, direction, DMA_ATTR_SKIP_CPU_SYNC); +} + +static void ivpu_gem_userptr_dmabuf_release(struct dma_buf *dma_buf) +{ + struct sg_table *sgt = dma_buf->priv; + struct sg_page_iter page_iter; + struct page *page; + + for_each_sgtable_page(sgt, &page_iter, 0) { + page = sg_page_iter_page(&page_iter); + unpin_user_page(page); + } + + sg_free_table(sgt); + kfree(sgt); +} + +static const struct dma_buf_ops ivpu_gem_userptr_dmabuf_ops = { + .map_dma_buf = ivpu_gem_userptr_dmabuf_map, + .unmap_dma_buf = ivpu_gem_userptr_dmabuf_unmap, + .release = ivpu_gem_userptr_dmabuf_release, +}; + +static struct dma_buf * +ivpu_create_userptr_dmabuf(struct ivpu_device *vdev, void __user *user_ptr, + size_t size, uint32_t flags) +{ + struct dma_buf_export_info exp_info = {}; + struct dma_buf *dma_buf; + struct sg_table *sgt; + struct page **pages; + unsigned long nr_pages = size >> PAGE_SHIFT; + unsigned int gup_flags = FOLL_LONGTERM; + int ret, i, pinned; + + /* Add FOLL_WRITE only if the BO is not read-only */ + if (!(flags & DRM_IVPU_BO_READ_ONLY)) + gup_flags |= FOLL_WRITE; + + pages = kvmalloc_array(nr_pages, sizeof(*pages), GFP_KERNEL); + if (!pages) + return ERR_PTR(-ENOMEM); + + pinned = pin_user_pages_fast((unsigned long)user_ptr, nr_pages, gup_flags, pages); + if (pinned < 0) { + ret = pinned; + ivpu_dbg(vdev, IOCTL, "Failed to pin user pages: %d\n", ret); + goto free_pages_array; + } + + if (pinned != nr_pages) { + ivpu_dbg(vdev, IOCTL, "Pinned %d pages, expected %lu\n", pinned, nr_pages); + ret = -EFAULT; + goto unpin_pages; + } + + sgt = kmalloc(sizeof(*sgt), GFP_KERNEL); + if (!sgt) { + ret = -ENOMEM; + goto unpin_pages; + } + + ret = sg_alloc_table_from_pages(sgt, pages, nr_pages, 0, size, GFP_KERNEL); + if (ret) { + ivpu_dbg(vdev, IOCTL, "Failed to create sg table: %d\n", ret); + goto free_sgt; + } + + exp_info.exp_name = "ivpu_userptr_dmabuf"; + exp_info.owner = THIS_MODULE; + exp_info.ops = &ivpu_gem_userptr_dmabuf_ops; + exp_info.size = size; + exp_info.flags = O_RDWR | O_CLOEXEC; + exp_info.priv = sgt; + + dma_buf = dma_buf_export(&exp_info); + if (IS_ERR(dma_buf)) { + ret = PTR_ERR(dma_buf); + ivpu_dbg(vdev, IOCTL, "Failed to export userptr dma-buf: %d\n", ret); + goto free_sg_table; + } + + kvfree(pages); + return dma_buf; + +free_sg_table: + sg_free_table(sgt); +free_sgt: + kfree(sgt); +unpin_pages: + for (i = 0; i < pinned; i++) + unpin_user_page(pages[i]); +free_pages_array: + kvfree(pages); + return ERR_PTR(ret); +} + +static struct ivpu_bo * +ivpu_bo_create_from_userptr(struct ivpu_device *vdev, void __user *user_ptr, + size_t size, uint32_t flags) +{ + struct dma_buf *dma_buf; + struct drm_gem_object *obj; + struct ivpu_bo *bo; + + dma_buf = ivpu_create_userptr_dmabuf(vdev, user_ptr, size, flags); + if (IS_ERR(dma_buf)) + return ERR_CAST(dma_buf); + + obj = ivpu_gem_prime_import(&vdev->drm, dma_buf); + if (IS_ERR(obj)) { + dma_buf_put(dma_buf); + return ERR_CAST(obj); + } + + dma_buf_put(dma_buf); + + bo = to_ivpu_bo(obj); + bo->flags = flags; + + return bo; +} + +int ivpu_bo_create_from_userptr_ioctl(struct drm_device *dev, void *data, struct drm_file *file) +{ + struct drm_ivpu_bo_create_from_userptr *args = data; + struct ivpu_file_priv *file_priv = file->driver_priv; + struct ivpu_device *vdev = to_ivpu_device(dev); + void __user *user_ptr = u64_to_user_ptr(args->user_ptr); + struct ivpu_bo *bo; + int ret; + + if (args->flags & ~(DRM_IVPU_BO_HIGH_MEM | DRM_IVPU_BO_DMA_MEM | DRM_IVPU_BO_READ_ONLY)) { + ivpu_dbg(vdev, IOCTL, "Invalid BO flags: 0x%x\n", args->flags); + return -EINVAL; + } + + if (!args->user_ptr || !args->size) { + ivpu_dbg(vdev, IOCTL, "Userptr or size are zero: ptr %llx size %llu\n", + args->user_ptr, args->size); + return -EINVAL; + } + + if (!PAGE_ALIGNED(args->user_ptr) || !PAGE_ALIGNED(args->size)) { + ivpu_dbg(vdev, IOCTL, "Userptr or size not page aligned: ptr %llx size %llu\n", + args->user_ptr, args->size); + return -EINVAL; + } + + if (!access_ok(user_ptr, args->size)) { + ivpu_dbg(vdev, IOCTL, "Userptr is not accessible: ptr %llx size %llu\n", + args->user_ptr, args->size); + return -EFAULT; + } + + bo = ivpu_bo_create_from_userptr(vdev, user_ptr, args->size, args->flags); + if (IS_ERR(bo)) + return PTR_ERR(bo); + + ret = drm_gem_handle_create(file, &bo->base.base, &args->handle); + if (ret) { + ivpu_dbg(vdev, IOCTL, "Failed to create handle for BO: %pe ctx %u size %llu flags 0x%x\n", + bo, file_priv->ctx.id, args->size, args->flags); + } else { + ivpu_dbg(vdev, BO, "Created userptr BO: handle=%u vpu_addr=0x%llx size=%llu flags=0x%x\n", + args->handle, bo->vpu_addr, args->size, bo->flags); + args->vpu_addr = bo->vpu_addr; + } + + drm_gem_object_put(&bo->base.base); + + return ret; +} diff --git a/drivers/accel/ivpu/ivpu_hw.c b/drivers/accel/ivpu/ivpu_hw.c index 08dcc31b56f4..d69cd0d93569 100644 --- a/drivers/accel/ivpu/ivpu_hw.c +++ b/drivers/accel/ivpu/ivpu_hw.c @@ -8,6 +8,8 @@ #include "ivpu_hw_btrs.h" #include "ivpu_hw_ip.h" +#include +#include #include #include #include @@ -20,6 +22,10 @@ module_param_named_unsafe(fail_hw, ivpu_fail_hw, charp, 0444); MODULE_PARM_DESC(fail_hw, ",,,"); #endif +#define FW_SHARED_MEM_ALIGNMENT SZ_512K /* VPU MTRR limitation */ + +#define ECC_MCA_SIGNAL_ENABLE_MASK 0xff + static char *platform_to_str(u32 platform) { switch (platform) { @@ -147,19 +153,39 @@ static void priority_bands_init(struct ivpu_device *vdev) vdev->hw->hws.process_quantum[VPU_JOB_SCHEDULING_PRIORITY_BAND_REALTIME] = 200000; } +int ivpu_hw_range_init(struct ivpu_device *vdev, struct ivpu_addr_range *range, u64 start, u64 size) +{ + u64 end; + + if (!range || check_add_overflow(start, size, &end)) { + ivpu_err(vdev, "Invalid range: start 0x%llx size %llu\n", start, size); + return -EINVAL; + } + + range->start = start; + range->end = end; + + return 0; +} + static void memory_ranges_init(struct ivpu_device *vdev) { if (ivpu_hw_ip_gen(vdev) == IVPU_HW_IP_37XX) { - ivpu_hw_range_init(&vdev->hw->ranges.global, 0x80000000, SZ_512M); - ivpu_hw_range_init(&vdev->hw->ranges.user, 0x88000000, 511 * SZ_1M); - ivpu_hw_range_init(&vdev->hw->ranges.shave, 0x180000000, SZ_2G); - ivpu_hw_range_init(&vdev->hw->ranges.dma, 0x200000000, SZ_128G); + ivpu_hw_range_init(vdev, &vdev->hw->ranges.runtime, 0x84800000, SZ_64M); + ivpu_hw_range_init(vdev, &vdev->hw->ranges.global, 0x90000000, SZ_256M); + ivpu_hw_range_init(vdev, &vdev->hw->ranges.user, 0xa0000000, 511 * SZ_1M); + ivpu_hw_range_init(vdev, &vdev->hw->ranges.shave, 0x180000000, SZ_2G); + ivpu_hw_range_init(vdev, &vdev->hw->ranges.dma, 0x200000000, SZ_128G); } else { - ivpu_hw_range_init(&vdev->hw->ranges.global, 0x80000000, SZ_512M); - ivpu_hw_range_init(&vdev->hw->ranges.shave, 0x80000000, SZ_2G); - ivpu_hw_range_init(&vdev->hw->ranges.user, 0x100000000, SZ_256G); + ivpu_hw_range_init(vdev, &vdev->hw->ranges.runtime, 0x80000000, SZ_64M); + ivpu_hw_range_init(vdev, &vdev->hw->ranges.global, 0x90000000, SZ_256M); + ivpu_hw_range_init(vdev, &vdev->hw->ranges.shave, 0x80000000, SZ_2G); + ivpu_hw_range_init(vdev, &vdev->hw->ranges.user, 0x100000000, SZ_256G); vdev->hw->ranges.dma = vdev->hw->ranges.user; } + + drm_WARN_ON(&vdev->drm, !IS_ALIGNED(vdev->hw->ranges.global.start, + FW_SHARED_MEM_ALIGNMENT)); } static int wp_enable(struct ivpu_device *vdev) @@ -373,3 +399,22 @@ irqreturn_t ivpu_hw_irq_handler(int irq, void *ptr) pm_runtime_mark_last_busy(vdev->drm.dev); return IRQ_HANDLED; } + +bool ivpu_hw_uses_ecc_mca_signal(struct ivpu_device *vdev) +{ + unsigned long long msr_integrity_caps; + int ret; + + if (ivpu_hw_ip_gen(vdev) < IVPU_HW_IP_50XX) + return false; + + ret = rdmsrq_safe(MSR_INTEGRITY_CAPS, &msr_integrity_caps); + if (ret) { + ivpu_warn(vdev, "Error reading MSR_INTEGRITY_CAPS: %d", ret); + return false; + } + + ivpu_dbg(vdev, MISC, "MSR_INTEGRITY_CAPS: 0x%llx\n", msr_integrity_caps); + + return msr_integrity_caps & ECC_MCA_SIGNAL_ENABLE_MASK; +} diff --git a/drivers/accel/ivpu/ivpu_hw.h b/drivers/accel/ivpu/ivpu_hw.h index d79668fe1609..b6d0f0d0dccc 100644 --- a/drivers/accel/ivpu/ivpu_hw.h +++ b/drivers/accel/ivpu/ivpu_hw.h @@ -21,6 +21,7 @@ struct ivpu_hw_info { bool (*ip_irq_handler)(struct ivpu_device *vdev, int irq); } irq; struct { + struct ivpu_addr_range runtime; struct ivpu_addr_range global; struct ivpu_addr_range user; struct ivpu_addr_range shave; @@ -51,6 +52,8 @@ struct ivpu_hw_info { }; int ivpu_hw_init(struct ivpu_device *vdev); +int ivpu_hw_range_init(struct ivpu_device *vdev, struct ivpu_addr_range *range, u64 start, + u64 size); int ivpu_hw_power_up(struct ivpu_device *vdev); int ivpu_hw_power_down(struct ivpu_device *vdev); int ivpu_hw_reset(struct ivpu_device *vdev); @@ -60,6 +63,7 @@ void ivpu_irq_handlers_init(struct ivpu_device *vdev); void ivpu_hw_irq_enable(struct ivpu_device *vdev); void ivpu_hw_irq_disable(struct ivpu_device *vdev); irqreturn_t ivpu_hw_irq_handler(int irq, void *ptr); +bool ivpu_hw_uses_ecc_mca_signal(struct ivpu_device *vdev); static inline u32 ivpu_hw_btrs_irq_handler(struct ivpu_device *vdev, int irq) { @@ -71,12 +75,6 @@ static inline u32 ivpu_hw_ip_irq_handler(struct ivpu_device *vdev, int irq) return vdev->hw->irq.ip_irq_handler(vdev, irq); } -static inline void ivpu_hw_range_init(struct ivpu_addr_range *range, u64 start, u64 size) -{ - range->start = start; - range->end = start + size; -} - static inline u64 ivpu_hw_range_size(const struct ivpu_addr_range *range) { return range->end - range->start; diff --git a/drivers/accel/ivpu/ivpu_hw_btrs.c b/drivers/accel/ivpu/ivpu_hw_btrs.c index afdb3b2aa72a..06e65c592618 100644 --- a/drivers/accel/ivpu/ivpu_hw_btrs.c +++ b/drivers/accel/ivpu/ivpu_hw_btrs.c @@ -321,6 +321,14 @@ static int wait_for_pll_lock(struct ivpu_device *vdev, bool enable) return REGB_POLL_FLD(VPU_HW_BTRS_MTL_PLL_STATUS, LOCK, exp_val, PLL_TIMEOUT_US); } +static int wait_for_cdyn_deassert(struct ivpu_device *vdev) +{ + if (ivpu_hw_btrs_gen(vdev) == IVPU_HW_BTRS_MTL) + return 0; + + return REGB_POLL_FLD(VPU_HW_BTRS_LNL_CDYN, CDYN, 0, PLL_TIMEOUT_US); +} + int ivpu_hw_btrs_wp_drive(struct ivpu_device *vdev, bool enable) { struct wp_request wp; @@ -354,6 +362,14 @@ int ivpu_hw_btrs_wp_drive(struct ivpu_device *vdev, bool enable) return ret; } + if (!enable) { + ret = wait_for_cdyn_deassert(vdev); + if (ret) { + ivpu_err(vdev, "Timed out waiting for CDYN deassert\n"); + return ret; + } + } + return 0; } @@ -673,7 +689,7 @@ bool ivpu_hw_btrs_irq_handler_lnl(struct ivpu_device *vdev, int irq) if (REG_TEST_FLD(VPU_HW_BTRS_LNL_INTERRUPT_STAT, SURV_ERR, status)) { ivpu_dbg(vdev, IRQ, "Survivability IRQ\n"); - queue_work(system_wq, &vdev->irq_dct_work); + queue_work(system_percpu_wq, &vdev->irq_dct_work); } if (REG_TEST_FLD(VPU_HW_BTRS_LNL_INTERRUPT_STAT, FREQ_CHANGE, status)) { @@ -752,7 +768,7 @@ int ivpu_hw_btrs_dct_get_request(struct ivpu_device *vdev, bool *enable) } } -void ivpu_hw_btrs_dct_set_status(struct ivpu_device *vdev, bool enable, u32 active_percent) +void ivpu_hw_btrs_dct_set_status(struct ivpu_device *vdev, bool enable, u8 active_percent) { u32 val = 0; u32 cmd = enable ? DCT_ENABLE : DCT_DISABLE; diff --git a/drivers/accel/ivpu/ivpu_hw_btrs.h b/drivers/accel/ivpu/ivpu_hw_btrs.h index 032c384ac3d4..c4c10e22f30f 100644 --- a/drivers/accel/ivpu/ivpu_hw_btrs.h +++ b/drivers/accel/ivpu/ivpu_hw_btrs.h @@ -36,7 +36,7 @@ u32 ivpu_hw_btrs_dpu_freq_get(struct ivpu_device *vdev); bool ivpu_hw_btrs_irq_handler_mtl(struct ivpu_device *vdev, int irq); bool ivpu_hw_btrs_irq_handler_lnl(struct ivpu_device *vdev, int irq); int ivpu_hw_btrs_dct_get_request(struct ivpu_device *vdev, bool *enable); -void ivpu_hw_btrs_dct_set_status(struct ivpu_device *vdev, bool enable, u32 active_percent); +void ivpu_hw_btrs_dct_set_status(struct ivpu_device *vdev, bool enable, u8 active_percent); u32 ivpu_hw_btrs_telemetry_offset_get(struct ivpu_device *vdev); u32 ivpu_hw_btrs_telemetry_size_get(struct ivpu_device *vdev); u32 ivpu_hw_btrs_telemetry_enable_get(struct ivpu_device *vdev); diff --git a/drivers/accel/ivpu/ivpu_hw_btrs_lnl_reg.h b/drivers/accel/ivpu/ivpu_hw_btrs_lnl_reg.h index fff2ef2cada6..a81a9ba540fa 100644 --- a/drivers/accel/ivpu/ivpu_hw_btrs_lnl_reg.h +++ b/drivers/accel/ivpu/ivpu_hw_btrs_lnl_reg.h @@ -74,6 +74,9 @@ #define VPU_HW_BTRS_LNL_PLL_FREQ 0x00000148u #define VPU_HW_BTRS_LNL_PLL_FREQ_RATIO_MASK GENMASK(15, 0) +#define VPU_HW_BTRS_LNL_CDYN 0x0000014cu +#define VPU_HW_BTRS_LNL_CDYN_CDYN_MASK GENMASK(15, 0) + #define VPU_HW_BTRS_LNL_TILE_FUSE 0x00000150u #define VPU_HW_BTRS_LNL_TILE_FUSE_VALID_MASK BIT_MASK(0) #define VPU_HW_BTRS_LNL_TILE_FUSE_CONFIG_MASK GENMASK(6, 1) diff --git a/drivers/accel/ivpu/ivpu_hw_ip.c b/drivers/accel/ivpu/ivpu_hw_ip.c index 2bf9882ab52e..06aa1e7dc50b 100644 --- a/drivers/accel/ivpu/ivpu_hw_ip.c +++ b/drivers/accel/ivpu/ivpu_hw_ip.c @@ -691,6 +691,13 @@ static void pwr_island_delay_set(struct ivpu_device *vdev) status = high ? 46 : 3; break; + case PCI_DEVICE_ID_NVL: + post = high ? 198 : 17; + post1 = 0; + post2 = high ? 198 : 17; + status = 0; + break; + default: dump_stack(); ivpu_err(vdev, "Unknown device ID\n"); @@ -889,6 +896,9 @@ static int soc_cpu_drive_40xx(struct ivpu_device *vdev, bool enable) static int soc_cpu_enable(struct ivpu_device *vdev) { + if (ivpu_hw_ip_gen(vdev) >= IVPU_HW_IP_60XX) + return 0; + return soc_cpu_drive_40xx(vdev, true); } diff --git a/drivers/accel/ivpu/ivpu_ipc.c b/drivers/accel/ivpu/ivpu_ipc.c index 5f00809d448a..1f13bf95b2b3 100644 --- a/drivers/accel/ivpu/ivpu_ipc.c +++ b/drivers/accel/ivpu/ivpu_ipc.c @@ -459,7 +459,7 @@ void ivpu_ipc_irq_handler(struct ivpu_device *vdev) } } - queue_work(system_wq, &vdev->irq_ipc_work); + queue_work(system_percpu_wq, &vdev->irq_ipc_work); } void ivpu_ipc_irq_work_fn(struct work_struct *work) diff --git a/drivers/accel/ivpu/ivpu_job.c b/drivers/accel/ivpu/ivpu_job.c index 060f1fc031d3..4f8564e2878a 100644 --- a/drivers/accel/ivpu/ivpu_job.c +++ b/drivers/accel/ivpu/ivpu_job.c @@ -34,22 +34,20 @@ static void ivpu_cmdq_ring_db(struct ivpu_device *vdev, struct ivpu_cmdq *cmdq) static int ivpu_preemption_buffers_create(struct ivpu_device *vdev, struct ivpu_file_priv *file_priv, struct ivpu_cmdq *cmdq) { - u64 primary_size = ALIGN(vdev->fw->primary_preempt_buf_size, PAGE_SIZE); - u64 secondary_size = ALIGN(vdev->fw->secondary_preempt_buf_size, PAGE_SIZE); - - if (vdev->fw->sched_mode != VPU_SCHEDULING_MODE_HW || - ivpu_test_mode & IVPU_TEST_MODE_MIP_DISABLE) + if (ivpu_fw_preempt_buf_size(vdev) == 0) return 0; cmdq->primary_preempt_buf = ivpu_bo_create(vdev, &file_priv->ctx, &vdev->hw->ranges.user, - primary_size, DRM_IVPU_BO_WC); + vdev->fw->primary_preempt_buf_size, + DRM_IVPU_BO_WC); if (!cmdq->primary_preempt_buf) { ivpu_err(vdev, "Failed to create primary preemption buffer\n"); return -ENOMEM; } cmdq->secondary_preempt_buf = ivpu_bo_create(vdev, &file_priv->ctx, &vdev->hw->ranges.dma, - secondary_size, DRM_IVPU_BO_WC); + vdev->fw->secondary_preempt_buf_size, + DRM_IVPU_BO_WC); if (!cmdq->secondary_preempt_buf) { ivpu_err(vdev, "Failed to create secondary preemption buffer\n"); goto err_free_primary; @@ -66,20 +64,39 @@ static int ivpu_preemption_buffers_create(struct ivpu_device *vdev, static void ivpu_preemption_buffers_free(struct ivpu_device *vdev, struct ivpu_file_priv *file_priv, struct ivpu_cmdq *cmdq) { - if (vdev->fw->sched_mode != VPU_SCHEDULING_MODE_HW) - return; - if (cmdq->primary_preempt_buf) ivpu_bo_free(cmdq->primary_preempt_buf); if (cmdq->secondary_preempt_buf) ivpu_bo_free(cmdq->secondary_preempt_buf); } +static int ivpu_preemption_job_init(struct ivpu_device *vdev, struct ivpu_file_priv *file_priv, + struct ivpu_cmdq *cmdq, struct ivpu_job *job) +{ + int ret; + + /* Use preemption buffer provided by the user space */ + if (job->primary_preempt_buf) + return 0; + + if (!cmdq->primary_preempt_buf) { + /* Allocate per command queue preemption buffers */ + ret = ivpu_preemption_buffers_create(vdev, file_priv, cmdq); + if (ret) + return ret; + } + + /* Use preemption buffers allocated by the kernel */ + job->primary_preempt_buf = cmdq->primary_preempt_buf; + job->secondary_preempt_buf = cmdq->secondary_preempt_buf; + + return 0; +} + static struct ivpu_cmdq *ivpu_cmdq_alloc(struct ivpu_file_priv *file_priv) { struct ivpu_device *vdev = file_priv->vdev; struct ivpu_cmdq *cmdq; - int ret; cmdq = kzalloc(sizeof(*cmdq), GFP_KERNEL); if (!cmdq) @@ -89,10 +106,6 @@ static struct ivpu_cmdq *ivpu_cmdq_alloc(struct ivpu_file_priv *file_priv) if (!cmdq->mem) goto err_free_cmdq; - ret = ivpu_preemption_buffers_create(vdev, file_priv, cmdq); - if (ret) - ivpu_warn(vdev, "Failed to allocate preemption buffers, preemption limited\n"); - return cmdq; err_free_cmdq: @@ -219,11 +232,13 @@ static int ivpu_register_db(struct ivpu_file_priv *file_priv, struct ivpu_cmdq * ret = ivpu_jsm_register_db(vdev, file_priv->ctx.id, cmdq->db_id, cmdq->mem->vpu_addr, ivpu_bo_size(cmdq->mem)); - if (!ret) + if (!ret) { ivpu_dbg(vdev, JOB, "DB %d registered to cmdq %d ctx %d priority %d\n", cmdq->db_id, cmdq->id, file_priv->ctx.id, cmdq->priority); - else + } else { xa_erase(&vdev->db_xa, cmdq->db_id); + cmdq->db_id = 0; + } return ret; } @@ -333,7 +348,7 @@ static struct ivpu_cmdq *ivpu_cmdq_acquire(struct ivpu_file_priv *file_priv, u32 cmdq = xa_load(&file_priv->cmdq_xa, cmdq_id); if (!cmdq) { - ivpu_warn_ratelimited(vdev, "Failed to find command queue with ID: %u\n", cmdq_id); + ivpu_dbg(vdev, IOCTL, "Failed to find command queue with ID: %u\n", cmdq_id); return NULL; } @@ -427,17 +442,14 @@ static int ivpu_cmdq_push_job(struct ivpu_cmdq *cmdq, struct ivpu_job *job) if (unlikely(ivpu_test_mode & IVPU_TEST_MODE_NULL_SUBMISSION)) entry->flags = VPU_JOB_FLAGS_NULL_SUBMISSION_MASK; - if (vdev->fw->sched_mode == VPU_SCHEDULING_MODE_HW) { - if (cmdq->primary_preempt_buf) { - entry->primary_preempt_buf_addr = cmdq->primary_preempt_buf->vpu_addr; - entry->primary_preempt_buf_size = ivpu_bo_size(cmdq->primary_preempt_buf); - } + if (job->primary_preempt_buf) { + entry->primary_preempt_buf_addr = job->primary_preempt_buf->vpu_addr; + entry->primary_preempt_buf_size = ivpu_bo_size(job->primary_preempt_buf); + } - if (cmdq->secondary_preempt_buf) { - entry->secondary_preempt_buf_addr = cmdq->secondary_preempt_buf->vpu_addr; - entry->secondary_preempt_buf_size = - ivpu_bo_size(cmdq->secondary_preempt_buf); - } + if (job->secondary_preempt_buf) { + entry->secondary_preempt_buf_addr = job->secondary_preempt_buf->vpu_addr; + entry->secondary_preempt_buf_size = ivpu_bo_size(job->secondary_preempt_buf); } wmb(); /* Ensure that tail is updated after filling entry */ @@ -522,7 +534,7 @@ ivpu_job_create(struct ivpu_file_priv *file_priv, u32 engine_idx, u32 bo_count) job->bo_count = bo_count; job->done_fence = ivpu_fence_create(vdev); if (!job->done_fence) { - ivpu_warn_ratelimited(vdev, "Failed to create a fence\n"); + ivpu_err(vdev, "Failed to create a fence\n"); goto err_free_job; } @@ -552,6 +564,44 @@ static struct ivpu_job *ivpu_job_remove_from_submitted_jobs(struct ivpu_device * return job; } +bool ivpu_job_handle_engine_error(struct ivpu_device *vdev, u32 job_id, u32 job_status) +{ + lockdep_assert_held(&vdev->submitted_jobs_lock); + + switch (job_status) { + case VPU_JSM_STATUS_PROCESSING_ERR: + case VPU_JSM_STATUS_ENGINE_RESET_REQUIRED_MIN ... VPU_JSM_STATUS_ENGINE_RESET_REQUIRED_MAX: + { + struct ivpu_job *job = xa_load(&vdev->submitted_jobs_xa, job_id); + + if (!job) + return false; + + /* Trigger an engine reset */ + guard(mutex)(&job->file_priv->lock); + + job->job_status = job_status; + + if (job->file_priv->has_mmu_faults) + return false; + + /* + * Mark context as faulty and defer destruction of the job to jobs abort thread + * handler to synchronize between both faults and jobs returning context violation + * status and ensure both are handled in the same way + */ + job->file_priv->has_mmu_faults = true; + queue_work(system_percpu_wq, &vdev->context_abort_work); + return true; + } + default: + /* Complete job with error status, engine reset not required */ + break; + } + + return false; +} + static int ivpu_job_signal_and_destroy(struct ivpu_device *vdev, u32 job_id, u32 job_status) { struct ivpu_job *job; @@ -562,35 +612,22 @@ static int ivpu_job_signal_and_destroy(struct ivpu_device *vdev, u32 job_id, u32 if (!job) return -ENOENT; - if (job_status == VPU_JSM_STATUS_MVNCI_CONTEXT_VIOLATION_HW) { - guard(mutex)(&job->file_priv->lock); + ivpu_job_remove_from_submitted_jobs(vdev, job_id); + if (job->job_status == VPU_JSM_STATUS_SUCCESS) { if (job->file_priv->has_mmu_faults) - return 0; - - /* - * Mark context as faulty and defer destruction of the job to jobs abort thread - * handler to synchronize between both faults and jobs returning context violation - * status and ensure both are handled in the same way - */ - job->file_priv->has_mmu_faults = true; - queue_work(system_wq, &vdev->context_abort_work); - return 0; + job->job_status = DRM_IVPU_JOB_STATUS_ABORTED; + else + job->job_status = job_status; } - job = ivpu_job_remove_from_submitted_jobs(vdev, job_id); - if (!job) - return -ENOENT; - - if (job->file_priv->has_mmu_faults) - job_status = DRM_IVPU_JOB_STATUS_ABORTED; - - job->bos[CMD_BUF_IDX]->job_status = job_status; + job->bos[CMD_BUF_IDX]->job_status = job->job_status; dma_fence_signal(job->done_fence); trace_job("done", job); ivpu_dbg(vdev, JOB, "Job complete: id %3u ctx %2d cmdq_id %u engine %d status 0x%x\n", - job->job_id, job->file_priv->ctx.id, job->cmdq_id, job->engine_idx, job_status); + job->job_id, job->file_priv->ctx.id, job->cmdq_id, job->engine_idx, + job->job_status); ivpu_job_destroy(job); ivpu_stop_job_timeout_detection(vdev); @@ -650,7 +687,6 @@ static int ivpu_job_submit(struct ivpu_job *job, u8 priority, u32 cmdq_id) else cmdq = ivpu_cmdq_acquire(file_priv, cmdq_id); if (!cmdq) { - ivpu_warn_ratelimited(vdev, "Failed to get job queue, ctx %d\n", file_priv->ctx.id); ret = -EINVAL; goto err_unlock; } @@ -661,6 +697,13 @@ static int ivpu_job_submit(struct ivpu_job *job, u8 priority, u32 cmdq_id) goto err_unlock; } + ret = ivpu_preemption_job_init(vdev, file_priv, cmdq, job); + if (ret) { + ivpu_err(vdev, "Failed to initialize preemption buffers for job %d: %d\n", + job->job_id, ret); + goto err_unlock; + } + job->cmdq_id = cmdq->id; is_first_job = xa_empty(&vdev->submitted_jobs_xa); @@ -714,7 +757,7 @@ static int ivpu_job_submit(struct ivpu_job *job, u8 priority, u32 cmdq_id) static int ivpu_job_prepare_bos_for_submit(struct drm_file *file, struct ivpu_job *job, u32 *buf_handles, - u32 buf_count, u32 commands_offset) + u32 buf_count, u32 commands_offset, u32 preempt_buffer_index) { struct ivpu_file_priv *file_priv = job->file_priv; struct ivpu_device *vdev = file_priv->vdev; @@ -727,40 +770,58 @@ ivpu_job_prepare_bos_for_submit(struct drm_file *file, struct ivpu_job *job, u32 for (i = 0; i < buf_count; i++) { struct drm_gem_object *obj = drm_gem_object_lookup(file, buf_handles[i]); - if (!obj) + if (!obj) { + ivpu_dbg(vdev, IOCTL, "Failed to lookup GEM object with handle %u\n", + buf_handles[i]); return -ENOENT; + } job->bos[i] = to_ivpu_bo(obj); - ret = ivpu_bo_pin(job->bos[i]); + ret = ivpu_bo_bind(job->bos[i]); if (ret) return ret; } bo = job->bos[CMD_BUF_IDX]; if (!dma_resv_test_signaled(bo->base.base.resv, DMA_RESV_USAGE_READ)) { - ivpu_warn(vdev, "Buffer is already in use\n"); + ivpu_dbg(vdev, IOCTL, "Buffer is already in use by another job\n"); return -EBUSY; } if (commands_offset >= ivpu_bo_size(bo)) { - ivpu_warn(vdev, "Invalid command buffer offset %u\n", commands_offset); + ivpu_dbg(vdev, IOCTL, "Invalid commands offset %u for buffer size %zu\n", + commands_offset, ivpu_bo_size(bo)); return -EINVAL; } job->cmd_buf_vpu_addr = bo->vpu_addr + commands_offset; + if (preempt_buffer_index) { + struct ivpu_bo *preempt_bo = job->bos[preempt_buffer_index]; + + if (ivpu_bo_size(preempt_bo) < ivpu_fw_preempt_buf_size(vdev)) { + ivpu_dbg(vdev, IOCTL, "Preemption buffer is too small\n"); + return -EINVAL; + } + if (ivpu_bo_is_mappable(preempt_bo)) { + ivpu_dbg(vdev, IOCTL, "Preemption buffer cannot be mappable\n"); + return -EINVAL; + } + job->primary_preempt_buf = preempt_bo; + } + ret = drm_gem_lock_reservations((struct drm_gem_object **)job->bos, buf_count, &acquire_ctx); if (ret) { - ivpu_warn(vdev, "Failed to lock reservations: %d\n", ret); + ivpu_warn_ratelimited(vdev, "Failed to lock reservations: %d\n", ret); return ret; } for (i = 0; i < buf_count; i++) { ret = dma_resv_reserve_fences(job->bos[i]->base.base.resv, 1); if (ret) { - ivpu_warn(vdev, "Failed to reserve fences: %d\n", ret); + ivpu_warn_ratelimited(vdev, "Failed to reserve fences: %d\n", ret); goto unlock_reservations; } } @@ -780,7 +841,7 @@ ivpu_job_prepare_bos_for_submit(struct drm_file *file, struct ivpu_job *job, u32 static int ivpu_submit(struct drm_file *file, struct ivpu_file_priv *file_priv, u32 cmdq_id, u32 buffer_count, u32 engine, void __user *buffers_ptr, u32 cmds_offset, - u8 priority) + u32 preempt_buffer_index, u8 priority) { struct ivpu_device *vdev = file_priv->vdev; struct ivpu_job *job; @@ -807,16 +868,14 @@ static int ivpu_submit(struct drm_file *file, struct ivpu_file_priv *file_priv, job = ivpu_job_create(file_priv, engine, buffer_count); if (!job) { - ivpu_err(vdev, "Failed to create job\n"); ret = -ENOMEM; goto err_exit_dev; } - ret = ivpu_job_prepare_bos_for_submit(file, job, buf_handles, buffer_count, cmds_offset); - if (ret) { - ivpu_err(vdev, "Failed to prepare job: %d\n", ret); + ret = ivpu_job_prepare_bos_for_submit(file, job, buf_handles, buffer_count, cmds_offset, + preempt_buffer_index); + if (ret) goto err_destroy_job; - } down_read(&vdev->pm->reset_lock); ret = ivpu_job_submit(job, priority, cmdq_id); @@ -842,58 +901,91 @@ static int ivpu_submit(struct drm_file *file, struct ivpu_file_priv *file_priv, int ivpu_submit_ioctl(struct drm_device *dev, void *data, struct drm_file *file) { struct ivpu_file_priv *file_priv = file->driver_priv; + struct ivpu_device *vdev = file_priv->vdev; struct drm_ivpu_submit *args = data; u8 priority; - if (args->engine != DRM_IVPU_ENGINE_COMPUTE) + if (args->engine != DRM_IVPU_ENGINE_COMPUTE) { + ivpu_dbg(vdev, IOCTL, "Invalid engine %d\n", args->engine); return -EINVAL; + } - if (args->priority > DRM_IVPU_JOB_PRIORITY_REALTIME) + if (args->priority > DRM_IVPU_JOB_PRIORITY_REALTIME) { + ivpu_dbg(vdev, IOCTL, "Invalid priority %d\n", args->priority); return -EINVAL; + } - if (args->buffer_count == 0 || args->buffer_count > JOB_MAX_BUFFER_COUNT) + if (args->buffer_count == 0 || args->buffer_count > JOB_MAX_BUFFER_COUNT) { + ivpu_dbg(vdev, IOCTL, "Invalid buffer count %u\n", args->buffer_count); return -EINVAL; + } - if (!IS_ALIGNED(args->commands_offset, 8)) + if (!IS_ALIGNED(args->commands_offset, 8)) { + ivpu_dbg(vdev, IOCTL, "Invalid commands offset %u\n", args->commands_offset); return -EINVAL; + } - if (!file_priv->ctx.id) + if (!file_priv->ctx.id) { + ivpu_dbg(vdev, IOCTL, "Context not initialized\n"); return -EINVAL; + } - if (file_priv->has_mmu_faults) + if (file_priv->has_mmu_faults) { + ivpu_dbg(vdev, IOCTL, "Context %u has MMU faults\n", file_priv->ctx.id); return -EBADFD; + } priority = ivpu_job_to_jsm_priority(args->priority); return ivpu_submit(file, file_priv, 0, args->buffer_count, args->engine, - (void __user *)args->buffers_ptr, args->commands_offset, priority); + (void __user *)args->buffers_ptr, args->commands_offset, 0, priority); } int ivpu_cmdq_submit_ioctl(struct drm_device *dev, void *data, struct drm_file *file) { struct ivpu_file_priv *file_priv = file->driver_priv; + struct ivpu_device *vdev = file_priv->vdev; struct drm_ivpu_cmdq_submit *args = data; - if (!ivpu_is_capable(file_priv->vdev, DRM_IVPU_CAP_MANAGE_CMDQ)) + if (!ivpu_is_capable(file_priv->vdev, DRM_IVPU_CAP_MANAGE_CMDQ)) { + ivpu_dbg(vdev, IOCTL, "Command queue management not supported\n"); return -ENODEV; + } - if (args->cmdq_id < IVPU_CMDQ_MIN_ID || args->cmdq_id > IVPU_CMDQ_MAX_ID) + if (args->cmdq_id < IVPU_CMDQ_MIN_ID || args->cmdq_id > IVPU_CMDQ_MAX_ID) { + ivpu_dbg(vdev, IOCTL, "Invalid command queue ID %u\n", args->cmdq_id); return -EINVAL; + } - if (args->buffer_count == 0 || args->buffer_count > JOB_MAX_BUFFER_COUNT) + if (args->buffer_count == 0 || args->buffer_count > JOB_MAX_BUFFER_COUNT) { + ivpu_dbg(vdev, IOCTL, "Invalid buffer count %u\n", args->buffer_count); return -EINVAL; + } - if (!IS_ALIGNED(args->commands_offset, 8)) + if (args->preempt_buffer_index >= args->buffer_count) { + ivpu_dbg(vdev, IOCTL, "Invalid preemption buffer index %u\n", + args->preempt_buffer_index); return -EINVAL; + } - if (!file_priv->ctx.id) + if (!IS_ALIGNED(args->commands_offset, 8)) { + ivpu_dbg(vdev, IOCTL, "Invalid commands offset %u\n", args->commands_offset); return -EINVAL; + } - if (file_priv->has_mmu_faults) + if (!file_priv->ctx.id) { + ivpu_dbg(vdev, IOCTL, "Context not initialized\n"); + return -EINVAL; + } + + if (file_priv->has_mmu_faults) { + ivpu_dbg(vdev, IOCTL, "Context %u has MMU faults\n", file_priv->ctx.id); return -EBADFD; + } return ivpu_submit(file, file_priv, args->cmdq_id, args->buffer_count, VPU_ENGINE_COMPUTE, - (void __user *)args->buffers_ptr, args->commands_offset, 0); + (void __user *)args->buffers_ptr, args->commands_offset, + args->preempt_buffer_index, 0); } int ivpu_cmdq_create_ioctl(struct drm_device *dev, void *data, struct drm_file *file) @@ -904,11 +996,15 @@ int ivpu_cmdq_create_ioctl(struct drm_device *dev, void *data, struct drm_file * struct ivpu_cmdq *cmdq; int ret; - if (!ivpu_is_capable(vdev, DRM_IVPU_CAP_MANAGE_CMDQ)) + if (!ivpu_is_capable(vdev, DRM_IVPU_CAP_MANAGE_CMDQ)) { + ivpu_dbg(vdev, IOCTL, "Command queue management not supported\n"); return -ENODEV; + } - if (args->priority > DRM_IVPU_JOB_PRIORITY_REALTIME) + if (args->priority > DRM_IVPU_JOB_PRIORITY_REALTIME) { + ivpu_dbg(vdev, IOCTL, "Invalid priority %d\n", args->priority); return -EINVAL; + } ret = ivpu_rpm_get(vdev); if (ret < 0) @@ -936,8 +1032,10 @@ int ivpu_cmdq_destroy_ioctl(struct drm_device *dev, void *data, struct drm_file u32 cmdq_id = 0; int ret; - if (!ivpu_is_capable(vdev, DRM_IVPU_CAP_MANAGE_CMDQ)) + if (!ivpu_is_capable(vdev, DRM_IVPU_CAP_MANAGE_CMDQ)) { + ivpu_dbg(vdev, IOCTL, "Command queue management not supported\n"); return -ENODEV; + } ret = ivpu_rpm_get(vdev); if (ret < 0) @@ -984,7 +1082,9 @@ ivpu_job_done_callback(struct ivpu_device *vdev, struct ivpu_ipc_hdr *ipc_hdr, payload = (struct vpu_ipc_msg_payload_job_done *)&jsm_msg->payload; mutex_lock(&vdev->submitted_jobs_lock); - ivpu_job_signal_and_destroy(vdev, payload->job_id, payload->job_status); + if (!ivpu_job_handle_engine_error(vdev, payload->job_id, payload->job_status)) + /* No engine error, complete the job normally */ + ivpu_job_signal_and_destroy(vdev, payload->job_id, payload->job_status); mutex_unlock(&vdev->submitted_jobs_lock); } @@ -1012,7 +1112,7 @@ void ivpu_context_abort_work_fn(struct work_struct *work) if (vdev->fw->sched_mode == VPU_SCHEDULING_MODE_HW) if (ivpu_jsm_reset_engine(vdev, 0)) - return; + goto runtime_put; mutex_lock(&vdev->context_list_lock); xa_for_each(&vdev->context_xa, ctx_id, file_priv) { @@ -1036,7 +1136,7 @@ void ivpu_context_abort_work_fn(struct work_struct *work) goto runtime_put; if (ivpu_jsm_hws_resume_engine(vdev, 0)) - return; + goto runtime_put; /* * In hardware scheduling mode NPU already has stopped processing jobs * and won't send us any further notifications, thus we have to free job related resources @@ -1049,6 +1149,5 @@ void ivpu_context_abort_work_fn(struct work_struct *work) mutex_unlock(&vdev->submitted_jobs_lock); runtime_put: - pm_runtime_mark_last_busy(vdev->drm.dev); pm_runtime_put_autosuspend(vdev->drm.dev); } diff --git a/drivers/accel/ivpu/ivpu_job.h b/drivers/accel/ivpu/ivpu_job.h index 2e301c2eea7b..3ab61e6a5616 100644 --- a/drivers/accel/ivpu/ivpu_job.h +++ b/drivers/accel/ivpu/ivpu_job.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0-only */ /* - * Copyright (C) 2020-2024 Intel Corporation + * Copyright (C) 2020-2025 Intel Corporation */ #ifndef __IVPU_JOB_H__ @@ -15,12 +15,17 @@ struct ivpu_device; struct ivpu_file_priv; /** - * struct ivpu_cmdq - Object representing device queue used to send jobs. - * @jobq: Pointer to job queue memory shared with the device - * @mem: Memory allocated for the job queue, shared with device - * @entry_count Number of job entries in the queue - * @db_id: Doorbell assigned to this job queue - * @db_registered: True if doorbell is registered in device + * struct ivpu_cmdq - Represents a command queue for submitting jobs to the VPU. + * Tracks queue memory, preemption buffers, and metadata for job management. + * @jobq: Pointer to job queue memory shared with the device + * @primary_preempt_buf: Primary preemption buffer for this queue (optional) + * @secondary_preempt_buf: Secondary preemption buffer for this queue (optional) + * @mem: Memory allocated for the job queue, shared with device + * @entry_count: Number of job entries in the queue + * @id: Unique command queue ID + * @db_id: Doorbell ID assigned to this job queue + * @priority: Priority level of the command queue + * @is_legacy: True if this is a legacy command queue */ struct ivpu_cmdq { struct vpu_job_queue *jobq; @@ -35,16 +40,22 @@ struct ivpu_cmdq { }; /** - * struct ivpu_job - KMD object that represents batchbuffer / DMA buffer. - * Each batch / DMA buffer is a job to be submitted and executed by the VPU FW. - * This is a unit of execution, and be tracked by the job_id for - * any status reporting from VPU FW through IPC JOB RET/DONE message. - * @file_priv: The client that submitted this job - * @job_id: Job ID for KMD tracking and job status reporting from VPU FW - * @status: Status of the Job from IPC JOB RET/DONE message - * @batch_buffer: CPU vaddr points to the batch buffer memory allocated for the job - * @submit_status_offset: Offset within batch buffer where job completion handler - will update the job status + * struct ivpu_job - Representing a batch or DMA buffer submitted to the VPU. + * Each job is a unit of execution, tracked by job_id for status reporting from VPU FW. + * The structure holds all resources and metadata needed for job submission, execution, + * and completion handling. + * @vdev: Pointer to the VPU device + * @file_priv: The client context that submitted this job + * @done_fence: Fence signaled when job completes + * @cmd_buf_vpu_addr: VPU address of the command buffer for this job + * @cmdq_id: Command queue ID used for submission + * @job_id: Unique job ID for tracking and status reporting + * @engine_idx: Engine index for job execution + * @job_status: Status reported by firmware for this job + * @primary_preempt_buf: Primary preemption buffer for job + * @secondary_preempt_buf: Secondary preemption buffer for job (optional) + * @bo_count: Number of buffer objects associated with this job + * @bos: Array of buffer objects used by the job (batch buffer is at index 0) */ struct ivpu_job { struct ivpu_device *vdev; @@ -54,6 +65,9 @@ struct ivpu_job { u32 cmdq_id; u32 job_id; u32 engine_idx; + u32 job_status; + struct ivpu_bo *primary_preempt_buf; + struct ivpu_bo *secondary_preempt_buf; size_t bo_count; struct ivpu_bo *bos[] __counted_by(bo_count); }; @@ -71,6 +85,7 @@ void ivpu_cmdq_abort_all_jobs(struct ivpu_device *vdev, u32 ctx_id, u32 cmdq_id) void ivpu_job_done_consumer_init(struct ivpu_device *vdev); void ivpu_job_done_consumer_fini(struct ivpu_device *vdev); +bool ivpu_job_handle_engine_error(struct ivpu_device *vdev, u32 job_id, u32 job_status); void ivpu_context_abort_work_fn(struct work_struct *work); void ivpu_jobs_abort_all(struct ivpu_device *vdev); diff --git a/drivers/accel/ivpu/ivpu_mmu.c b/drivers/accel/ivpu/ivpu_mmu.c index 5ea010568faa..e1baf6b64935 100644 --- a/drivers/accel/ivpu/ivpu_mmu.c +++ b/drivers/accel/ivpu/ivpu_mmu.c @@ -970,7 +970,7 @@ void ivpu_mmu_irq_evtq_handler(struct ivpu_device *vdev) } } - queue_work(system_wq, &vdev->context_abort_work); + queue_work(system_percpu_wq, &vdev->context_abort_work); } void ivpu_mmu_evtq_dump(struct ivpu_device *vdev) diff --git a/drivers/accel/ivpu/ivpu_mmu_context.c b/drivers/accel/ivpu/ivpu_mmu_context.c index f0267efa55aa..87ad593ef47d 100644 --- a/drivers/accel/ivpu/ivpu_mmu_context.c +++ b/drivers/accel/ivpu/ivpu_mmu_context.c @@ -430,7 +430,7 @@ static void ivpu_mmu_context_unmap_pages(struct ivpu_mmu_context *ctx, u64 vpu_a int ivpu_mmu_context_map_sgt(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx, - u64 vpu_addr, struct sg_table *sgt, bool llc_coherent) + u64 vpu_addr, struct sg_table *sgt, bool llc_coherent, bool read_only) { size_t start_vpu_addr = vpu_addr; struct scatterlist *sg; @@ -450,6 +450,8 @@ ivpu_mmu_context_map_sgt(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx, prot = IVPU_MMU_ENTRY_MAPPED; if (llc_coherent) prot |= IVPU_MMU_ENTRY_FLAG_LLC_COHERENT; + if (read_only) + prot |= IVPU_MMU_ENTRY_FLAG_RO; mutex_lock(&ctx->lock); @@ -527,7 +529,8 @@ ivpu_mmu_context_unmap_sgt(struct ivpu_device *vdev, struct ivpu_mmu_context *ct ret = ivpu_mmu_invalidate_tlb(vdev, ctx->id); if (ret) - ivpu_warn(vdev, "Failed to invalidate TLB for ctx %u: %d\n", ctx->id, ret); + ivpu_warn_ratelimited(vdev, "Failed to invalidate TLB for ctx %u: %d\n", + ctx->id, ret); } int @@ -568,7 +571,7 @@ void ivpu_mmu_context_init(struct ivpu_device *vdev, struct ivpu_mmu_context *ct mutex_init(&ctx->lock); if (!context_id) { - start = vdev->hw->ranges.global.start; + start = vdev->hw->ranges.runtime.start; end = vdev->hw->ranges.shave.end; } else { start = min_t(u64, vdev->hw->ranges.user.start, vdev->hw->ranges.shave.start); diff --git a/drivers/accel/ivpu/ivpu_mmu_context.h b/drivers/accel/ivpu/ivpu_mmu_context.h index f255310968cf..663a11a9db11 100644 --- a/drivers/accel/ivpu/ivpu_mmu_context.h +++ b/drivers/accel/ivpu/ivpu_mmu_context.h @@ -42,7 +42,7 @@ int ivpu_mmu_context_insert_node(struct ivpu_mmu_context *ctx, const struct ivpu void ivpu_mmu_context_remove_node(struct ivpu_mmu_context *ctx, struct drm_mm_node *node); int ivpu_mmu_context_map_sgt(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx, - u64 vpu_addr, struct sg_table *sgt, bool llc_coherent); + u64 vpu_addr, struct sg_table *sgt, bool llc_coherent, bool read_only); void ivpu_mmu_context_unmap_sgt(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx, u64 vpu_addr, struct sg_table *sgt); int ivpu_mmu_context_set_pages_ro(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx, diff --git a/drivers/accel/ivpu/ivpu_ms.c b/drivers/accel/ivpu/ivpu_ms.c index 2a043baf10ca..1d9c1cb17924 100644 --- a/drivers/accel/ivpu/ivpu_ms.c +++ b/drivers/accel/ivpu/ivpu_ms.c @@ -8,6 +8,7 @@ #include "ivpu_drv.h" #include "ivpu_gem.h" +#include "ivpu_hw.h" #include "ivpu_jsm_msg.h" #include "ivpu_ms.h" #include "ivpu_pm.h" @@ -37,8 +38,8 @@ int ivpu_ms_start_ioctl(struct drm_device *dev, void *data, struct drm_file *fil struct drm_ivpu_metric_streamer_start *args = data; struct ivpu_device *vdev = file_priv->vdev; struct ivpu_ms_instance *ms; - u64 single_buff_size; u32 sample_size; + u64 buf_size; int ret; if (!args->metric_group_mask || !args->read_period_samples || @@ -52,7 +53,8 @@ int ivpu_ms_start_ioctl(struct drm_device *dev, void *data, struct drm_file *fil mutex_lock(&file_priv->ms_lock); if (get_instance_by_mask(file_priv, args->metric_group_mask)) { - ivpu_err(vdev, "Instance already exists (mask %#llx)\n", args->metric_group_mask); + ivpu_dbg(vdev, IOCTL, "Instance already exists (mask %#llx)\n", + args->metric_group_mask); ret = -EALREADY; goto unlock; } @@ -69,12 +71,18 @@ int ivpu_ms_start_ioctl(struct drm_device *dev, void *data, struct drm_file *fil if (ret) goto err_free_ms; - single_buff_size = sample_size * - ((u64)args->read_period_samples * MS_READ_PERIOD_MULTIPLIER); - ms->bo = ivpu_bo_create_global(vdev, PAGE_ALIGN(single_buff_size * MS_NUM_BUFFERS), - DRM_IVPU_BO_CACHED | DRM_IVPU_BO_MAPPABLE); + buf_size = PAGE_ALIGN((u64)args->read_period_samples * sample_size * + MS_READ_PERIOD_MULTIPLIER * MS_NUM_BUFFERS); + if (buf_size > ivpu_hw_range_size(&vdev->hw->ranges.global)) { + ivpu_dbg(vdev, IOCTL, "Requested MS buffer size %llu exceeds range size %llu\n", + buf_size, ivpu_hw_range_size(&vdev->hw->ranges.global)); + ret = -EINVAL; + goto err_free_ms; + } + + ms->bo = ivpu_bo_create_global(vdev, buf_size, DRM_IVPU_BO_CACHED | DRM_IVPU_BO_MAPPABLE); if (!ms->bo) { - ivpu_err(vdev, "Failed to allocate MS buffer (size %llu)\n", single_buff_size); + ivpu_dbg(vdev, IOCTL, "Failed to allocate MS buffer (size %llu)\n", buf_size); ret = -ENOMEM; goto err_free_ms; } @@ -175,7 +183,8 @@ int ivpu_ms_get_data_ioctl(struct drm_device *dev, void *data, struct drm_file * ms = get_instance_by_mask(file_priv, args->metric_group_mask); if (!ms) { - ivpu_err(vdev, "Instance doesn't exist for mask: %#llx\n", args->metric_group_mask); + ivpu_dbg(vdev, IOCTL, "Instance doesn't exist for mask: %#llx\n", + args->metric_group_mask); ret = -EINVAL; goto unlock; } diff --git a/drivers/accel/ivpu/ivpu_pm.c b/drivers/accel/ivpu/ivpu_pm.c index 475ddc94f1cf..480c075d87f6 100644 --- a/drivers/accel/ivpu/ivpu_pm.c +++ b/drivers/accel/ivpu/ivpu_pm.c @@ -54,7 +54,7 @@ static void ivpu_pm_prepare_cold_boot(struct ivpu_device *vdev) static void ivpu_pm_prepare_warm_boot(struct ivpu_device *vdev) { struct ivpu_fw_info *fw = vdev->fw; - struct vpu_boot_params *bp = ivpu_bo_vaddr(fw->mem); + struct vpu_boot_params *bp = ivpu_bo_vaddr(fw->mem_bp); if (!bp->save_restore_ret_address) { ivpu_pm_prepare_cold_boot(vdev); @@ -186,7 +186,7 @@ void ivpu_pm_trigger_recovery(struct ivpu_device *vdev, const char *reason) if (atomic_cmpxchg(&vdev->pm->reset_pending, 0, 1) == 0) { ivpu_hw_diagnose_failure(vdev); ivpu_hw_irq_disable(vdev); /* Disable IRQ early to protect from IRQ storm */ - queue_work(system_unbound_wq, &vdev->pm->recovery_work); + queue_work(system_dfl_wq, &vdev->pm->recovery_work); } } @@ -226,7 +226,8 @@ void ivpu_start_job_timeout_detection(struct ivpu_device *vdev) unsigned long timeout_ms = ivpu_tdr_timeout_ms ? ivpu_tdr_timeout_ms : vdev->timeout.tdr; /* No-op if already queued */ - queue_delayed_work(system_wq, &vdev->pm->job_timeout_work, msecs_to_jiffies(timeout_ms)); + queue_delayed_work(system_percpu_wq, &vdev->pm->job_timeout_work, + msecs_to_jiffies(timeout_ms)); } void ivpu_stop_job_timeout_detection(struct ivpu_device *vdev) @@ -359,7 +360,6 @@ int ivpu_rpm_get(struct ivpu_device *vdev) void ivpu_rpm_put(struct ivpu_device *vdev) { - pm_runtime_mark_last_busy(vdev->drm.dev); pm_runtime_put_autosuspend(vdev->drm.dev); } @@ -428,7 +428,6 @@ void ivpu_pm_enable(struct ivpu_device *vdev) struct device *dev = vdev->drm.dev; pm_runtime_allow(dev); - pm_runtime_mark_last_busy(dev); pm_runtime_put_autosuspend(dev); } @@ -502,6 +501,11 @@ void ivpu_pm_irq_dct_work_fn(struct work_struct *work) else ret = ivpu_pm_dct_disable(vdev); - if (!ret) - ivpu_hw_btrs_dct_set_status(vdev, enable, vdev->pm->dct_active_percent); + if (!ret) { + /* Convert percent to U1.7 format */ + u8 val = DIV_ROUND_CLOSEST(vdev->pm->dct_active_percent * 128, 100); + + ivpu_hw_btrs_dct_set_status(vdev, enable, val); + } + } diff --git a/drivers/accel/ivpu/ivpu_sysfs.c b/drivers/accel/ivpu/ivpu_sysfs.c index 268ab7744a8b..d250a10caca9 100644 --- a/drivers/accel/ivpu/ivpu_sysfs.c +++ b/drivers/accel/ivpu/ivpu_sysfs.c @@ -63,7 +63,8 @@ npu_memory_utilization_show(struct device *dev, struct device_attribute *attr, c mutex_lock(&vdev->bo_list_lock); list_for_each_entry(bo, &vdev->bo_list, bo_list_node) - total_npu_memory += bo->base.base.size; + if (ivpu_bo_is_resident(bo)) + total_npu_memory += ivpu_bo_size(bo); mutex_unlock(&vdev->bo_list_lock); return sysfs_emit(buf, "%lld\n", total_npu_memory); diff --git a/drivers/accel/ivpu/vpu_jsm_api.h b/drivers/accel/ivpu/vpu_jsm_api.h index 4b6b2b3d2583..bca6a44dc041 100644 --- a/drivers/accel/ivpu/vpu_jsm_api.h +++ b/drivers/accel/ivpu/vpu_jsm_api.h @@ -1,15 +1,16 @@ /* SPDX-License-Identifier: MIT */ /* - * Copyright (c) 2020-2024, Intel Corporation. + * Copyright (c) 2020-2025, Intel Corporation. + */ + +/** + * @addtogroup Jsm + * @{ */ /** * @file * @brief JSM shared definitions - * - * @ingroup Jsm - * @brief JSM shared definitions - * @{ */ #ifndef VPU_JSM_API_H #define VPU_JSM_API_H @@ -22,7 +23,7 @@ /* * Minor version changes when API backward compatibility is preserved. */ -#define VPU_JSM_API_VER_MINOR 29 +#define VPU_JSM_API_VER_MINOR 33 /* * API header changed (field names, documentation, formatting) but API itself has not been changed @@ -71,9 +72,15 @@ #define VPU_JSM_STATUS_MVNCI_OUT_OF_RESOURCES 0xAU #define VPU_JSM_STATUS_MVNCI_NOT_IMPLEMENTED 0xBU #define VPU_JSM_STATUS_MVNCI_INTERNAL_ERROR 0xCU -/* Job status returned when the job was preempted mid-inference */ +/* @deprecated (use VPU_JSM_STATUS_PREEMPTED_MID_COMMAND instead) */ #define VPU_JSM_STATUS_PREEMPTED_MID_INFERENCE 0xDU +/* Job status returned when the job was preempted mid-command */ +#define VPU_JSM_STATUS_PREEMPTED_MID_COMMAND 0xDU +/* Range of status codes that require engine reset */ +#define VPU_JSM_STATUS_ENGINE_RESET_REQUIRED_MIN 0xEU #define VPU_JSM_STATUS_MVNCI_CONTEXT_VIOLATION_HW 0xEU +#define VPU_JSM_STATUS_MVNCI_PREEMPTION_TIMED_OUT 0xFU +#define VPU_JSM_STATUS_ENGINE_RESET_REQUIRED_MAX 0x1FU /* * Host <-> VPU IPC channels. @@ -134,11 +141,21 @@ enum { * 2. Native fence queues are only supported on VPU 40xx onwards. */ VPU_JOB_QUEUE_FLAGS_USE_NATIVE_FENCE_MASK = (1 << 1U), - /* * Enable turbo mode for testing NPU performance; not recommended for regular usage. */ - VPU_JOB_QUEUE_FLAGS_TURBO_MODE = (1 << 2U) + VPU_JOB_QUEUE_FLAGS_TURBO_MODE = (1 << 2U), + /* + * Queue error detection mode flag + * For 'interactive' queues (this bit not set), the FW will identify queues that have not + * completed a job inside the TDR timeout as in error as part of engine reset sequence. + * For 'non-interactive' queues (this bit set), the FW will identify queues that have not + * progressed the heartbeat inside the non-interactive no-progress timeout as in error as + * part of engine reset sequence. Additionally, there is an upper limit applied to these + * queues: even if they progress the heartbeat, if they run longer than non-interactive + * timeout, then the FW will also identify them as in error. + */ + VPU_JOB_QUEUE_FLAGS_NON_INTERACTIVE = (1 << 3U) }; /* @@ -209,7 +226,7 @@ enum { */ #define VPU_INLINE_CMD_TYPE_FENCE_SIGNAL 0x2 -/* +/** * Job scheduling priority bands for both hardware scheduling and OS scheduling. */ enum vpu_job_scheduling_priority_band { @@ -220,16 +237,16 @@ enum vpu_job_scheduling_priority_band { VPU_JOB_SCHEDULING_PRIORITY_BAND_COUNT = 4, }; -/* +/** * Job format. * Jobs defines the actual workloads to be executed by a given engine. */ struct vpu_job_queue_entry { - /**< Address of VPU commands batch buffer */ + /** Address of VPU commands batch buffer */ u64 batch_buf_addr; - /**< Job ID */ + /** Job ID */ u32 job_id; - /**< Flags bit field, see VPU_JOB_FLAGS_* above */ + /** Flags bit field, see VPU_JOB_FLAGS_* above */ u32 flags; /** * Doorbell ring timestamp taken by KMD from SoC's global system clock, in @@ -237,20 +254,20 @@ struct vpu_job_queue_entry { * to match other profiling timestamps. */ u64 doorbell_timestamp; - /**< Extra id for job tracking, used only in the firmware perf traces */ + /** Extra id for job tracking, used only in the firmware perf traces */ u64 host_tracking_id; - /**< Address of the primary preemption buffer to use for this job */ + /** Address of the primary preemption buffer to use for this job */ u64 primary_preempt_buf_addr; - /**< Size of the primary preemption buffer to use for this job */ + /** Size of the primary preemption buffer to use for this job */ u32 primary_preempt_buf_size; - /**< Size of secondary preemption buffer to use for this job */ + /** Size of secondary preemption buffer to use for this job */ u32 secondary_preempt_buf_size; - /**< Address of secondary preemption buffer to use for this job */ + /** Address of secondary preemption buffer to use for this job */ u64 secondary_preempt_buf_addr; u64 reserved_0; }; -/* +/** * Inline command format. * Inline commands are the commands executed at scheduler level (typically, * synchronization directives). Inline command and job objects must be of @@ -258,34 +275,36 @@ struct vpu_job_queue_entry { */ struct vpu_inline_cmd { u64 reserved_0; - /* Inline command type, see VPU_INLINE_CMD_TYPE_* defines. */ + /** Inline command type, see VPU_INLINE_CMD_TYPE_* defines. */ u32 type; - /* Flags bit field, see VPU_JOB_FLAGS_* above. */ + /** Flags bit field, see VPU_JOB_FLAGS_* above. */ u32 flags; - /* Inline command payload. Depends on inline command type. */ - union { - /* Fence (wait and signal) commands' payload. */ - struct { - /* Fence object handle. */ + /** Inline command payload. Depends on inline command type. */ + union payload { + /** Fence (wait and signal) commands' payload. */ + struct fence { + /** Fence object handle. */ u64 fence_handle; - /* User VA of the current fence value. */ + /** User VA of the current fence value. */ u64 current_value_va; - /* User VA of the monitored fence value (read-only). */ + /** User VA of the monitored fence value (read-only). */ u64 monitored_value_va; - /* Value to wait for or write in fence location. */ + /** Value to wait for or write in fence location. */ u64 value; - /* User VA of the log buffer in which to add log entry on completion. */ + /** User VA of the log buffer in which to add log entry on completion. */ u64 log_buffer_va; - /* NPU private data. */ + /** NPU private data. */ u64 npu_private_data; } fence; - /* Other commands do not have a payload. */ - /* Payload definition for future inline commands can be inserted here. */ + /** + * Other commands do not have a payload: + * Payload definition for future inline commands can be inserted here. + */ u64 reserved_1[6]; } payload; }; -/* +/** * Job queue slots can be populated either with job objects or inline command objects. */ union vpu_jobq_slot { @@ -293,7 +312,7 @@ union vpu_jobq_slot { struct vpu_inline_cmd inline_cmd; }; -/* +/** * Job queue control registers. */ struct vpu_job_queue_header { @@ -301,18 +320,18 @@ struct vpu_job_queue_header { u32 head; u32 tail; u32 flags; - /* Set to 1 to indicate priority_band field is valid */ + /** Set to 1 to indicate priority_band field is valid */ u32 priority_band_valid; - /* + /** * Priority for the work of this job queue, valid only if the HWS is NOT used - * and the `priority_band_valid` is set to 1. It is applied only during - * the VPU_JSM_MSG_REGISTER_DB message processing. - * The device firmware might use the `priority_band` to optimize the power + * and the @ref priority_band_valid is set to 1. It is applied only during + * the @ref VPU_JSM_MSG_REGISTER_DB message processing. + * The device firmware might use the priority_band to optimize the power * management logic, but it will not affect the order of jobs. * Available priority bands: @see enum vpu_job_scheduling_priority_band */ u32 priority_band; - /* Inside realtime band assigns a further priority, limited to 0..31 range */ + /** Inside realtime band assigns a further priority, limited to 0..31 range */ u32 realtime_priority_level; u32 reserved_0[9]; }; @@ -337,16 +356,16 @@ enum vpu_trace_entity_type { VPU_TRACE_ENTITY_TYPE_HW_COMPONENT = 2, }; -/* +/** * HWS specific log buffer header details. * Total size is 32 bytes. */ struct vpu_hws_log_buffer_header { - /* Written by VPU after adding a log entry. Initialised by host to 0. */ + /** Written by VPU after adding a log entry. Initialised by host to 0. */ u32 first_free_entry_index; - /* Incremented by VPU every time the VPU writes the 0th entry; initialised by host to 0. */ + /** Incremented by VPU every time the VPU writes the 0th entry; initialised by host to 0. */ u32 wraparound_count; - /* + /** * This is the number of buffers that can be stored in the log buffer provided by the host. * It is written by host before passing buffer to VPU. VPU should consider it read-only. */ @@ -354,14 +373,14 @@ struct vpu_hws_log_buffer_header { u64 reserved[2]; }; -/* +/** * HWS specific log buffer entry details. * Total size is 32 bytes. */ struct vpu_hws_log_buffer_entry { - /* VPU timestamp must be an invariant timer tick (not impacted by DVFS) */ + /** VPU timestamp must be an invariant timer tick (not impacted by DVFS) */ u64 vpu_timestamp; - /* + /** * Operation type: * 0 - context state change * 1 - queue new work @@ -371,7 +390,7 @@ struct vpu_hws_log_buffer_entry { */ u32 operation_type; u32 reserved; - /* Operation data depends on operation type */ + /** Operation data depends on operation type */ u64 operation_data[2]; }; @@ -381,51 +400,54 @@ enum vpu_hws_native_fence_log_type { VPU_HWS_NATIVE_FENCE_LOG_TYPE_SIGNALS = 2 }; -/* HWS native fence log buffer header. */ +/** HWS native fence log buffer header. */ struct vpu_hws_native_fence_log_header { union { struct { - /* Index of the first free entry in buffer. */ + /** Index of the first free entry in buffer. */ u32 first_free_entry_idx; - /* Incremented each time NPU wraps around the buffer to write next entry. */ + /** + * Incremented whenever the NPU wraps around the buffer and writes + * to the first entry again. + */ u32 wraparound_count; }; - /* Field allowing atomic update of both fields above. */ + /** Field allowing atomic update of both fields above. */ u64 atomic_wraparound_and_entry_idx; }; - /* Log buffer type, see enum vpu_hws_native_fence_log_type. */ + /** Log buffer type, see enum vpu_hws_native_fence_log_type. */ u64 type; - /* Allocated number of entries in the log buffer. */ + /** Allocated number of entries in the log buffer. */ u64 entry_nb; u64 reserved[2]; }; -/* Native fence log operation types. */ +/** Native fence log operation types. */ enum vpu_hws_native_fence_log_op { VPU_HWS_NATIVE_FENCE_LOG_OP_SIGNAL_EXECUTED = 0, VPU_HWS_NATIVE_FENCE_LOG_OP_WAIT_UNBLOCKED = 1 }; -/* HWS native fence log entry. */ +/** HWS native fence log entry. */ struct vpu_hws_native_fence_log_entry { - /* Newly signaled/unblocked fence value. */ + /** Newly signaled/unblocked fence value. */ u64 fence_value; - /* Native fence object handle to which this operation belongs. */ + /** Native fence object handle to which this operation belongs. */ u64 fence_handle; - /* Operation type, see enum vpu_hws_native_fence_log_op. */ + /** Operation type, see enum vpu_hws_native_fence_log_op. */ u64 op_type; u64 reserved_0; - /* + /** * VPU_HWS_NATIVE_FENCE_LOG_OP_WAIT_UNBLOCKED only: Timestamp at which fence * wait was started (in NPU SysTime). */ u64 fence_wait_start_ts; u64 reserved_1; - /* Timestamp at which fence operation was completed (in NPU SysTime). */ + /** Timestamp at which fence operation was completed (in NPU SysTime). */ u64 fence_end_ts; }; -/* Native fence log buffer. */ +/** Native fence log buffer. */ struct vpu_hws_native_fence_log_buffer { struct vpu_hws_native_fence_log_header header; struct vpu_hws_native_fence_log_entry entry[]; @@ -435,10 +457,17 @@ struct vpu_hws_native_fence_log_buffer { * Host <-> VPU IPC messages types. */ enum vpu_ipc_msg_type { + /** Unsupported command */ VPU_JSM_MSG_UNKNOWN = 0xFFFFFFFF, - /* IPC Host -> Device, Async commands */ + /** IPC Host -> Device, base id for async commands */ VPU_JSM_MSG_ASYNC_CMD = 0x1100, + /** + * Reset engine. The NPU cancels all the jobs currently executing on the target + * engine making the engine become idle and then does a HW reset, before returning + * to the host. + * @see struct vpu_ipc_msg_payload_engine_reset + */ VPU_JSM_MSG_ENGINE_RESET = VPU_JSM_MSG_ASYNC_CMD, /** * Preempt engine. The NPU stops (preempts) all the jobs currently @@ -448,10 +477,24 @@ enum vpu_ipc_msg_type { * the target engine, but it stops processing them (until the queue doorbell * is rung again); the host is responsible to reset the job queue, either * after preemption or when resubmitting jobs to the queue. + * @see vpu_ipc_msg_payload_engine_preempt */ VPU_JSM_MSG_ENGINE_PREEMPT = 0x1101, + /** + * OS scheduling doorbell register command + * @see vpu_ipc_msg_payload_register_db + */ VPU_JSM_MSG_REGISTER_DB = 0x1102, + /** + * OS scheduling doorbell unregister command + * @see vpu_ipc_msg_payload_unregister_db + */ VPU_JSM_MSG_UNREGISTER_DB = 0x1103, + /** + * Query engine heartbeat. Heartbeat is expected to increase monotonically + * and increase while work is being progressed by NPU. + * @see vpu_ipc_msg_payload_query_engine_hb + */ VPU_JSM_MSG_QUERY_ENGINE_HB = 0x1104, VPU_JSM_MSG_GET_POWER_LEVEL_COUNT = 0x1105, VPU_JSM_MSG_GET_POWER_LEVEL = 0x1106, @@ -477,6 +520,7 @@ enum vpu_ipc_msg_type { * aborted and removed from internal scheduling queues. All doorbells assigned * to the host_ssid are unregistered and any internal FW resources belonging to * the host_ssid are released. + * @see vpu_ipc_msg_payload_ssid_release */ VPU_JSM_MSG_SSID_RELEASE = 0x110e, /** @@ -504,43 +548,78 @@ enum vpu_ipc_msg_type { * @see vpu_jsm_metric_streamer_start */ VPU_JSM_MSG_METRIC_STREAMER_INFO = 0x1112, - /** Control command: Priority band setup */ + /** + * Control command: Priority band setup + * @see vpu_ipc_msg_payload_hws_priority_band_setup + */ VPU_JSM_MSG_SET_PRIORITY_BAND_SETUP = 0x1113, - /** Control command: Create command queue */ + /** + * Control command: Create command queue + * @see vpu_ipc_msg_payload_hws_create_cmdq + */ VPU_JSM_MSG_CREATE_CMD_QUEUE = 0x1114, - /** Control command: Destroy command queue */ + /** + * Control command: Destroy command queue + * @see vpu_ipc_msg_payload_hws_destroy_cmdq + */ VPU_JSM_MSG_DESTROY_CMD_QUEUE = 0x1115, - /** Control command: Set context scheduling properties */ + /** + * Control command: Set context scheduling properties + * @see vpu_ipc_msg_payload_hws_set_context_sched_properties + */ VPU_JSM_MSG_SET_CONTEXT_SCHED_PROPERTIES = 0x1116, - /* + /** * Register a doorbell to notify VPU of new work. The doorbell may later be * deallocated or reassigned to another context. + * @see vpu_jsm_hws_register_db */ VPU_JSM_MSG_HWS_REGISTER_DB = 0x1117, - /** Control command: Log buffer setting */ + /** + * Control command: Log buffer setting + * @see vpu_ipc_msg_payload_hws_set_scheduling_log + */ VPU_JSM_MSG_HWS_SET_SCHEDULING_LOG = 0x1118, - /* Control command: Suspend command queue. */ + /** + * Control command: Suspend command queue. + * @see vpu_ipc_msg_payload_hws_suspend_cmdq + */ VPU_JSM_MSG_HWS_SUSPEND_CMDQ = 0x1119, - /* Control command: Resume command queue */ + /** + * Control command: Resume command queue + * @see vpu_ipc_msg_payload_hws_resume_cmdq + */ VPU_JSM_MSG_HWS_RESUME_CMDQ = 0x111a, - /* Control command: Resume engine after reset */ + /** + * Control command: Resume engine after reset + * @see vpu_ipc_msg_payload_hws_resume_engine + */ VPU_JSM_MSG_HWS_ENGINE_RESUME = 0x111b, - /* Control command: Enable survivability/DCT mode */ + /** + * Control command: Enable survivability/DCT mode + * @see vpu_ipc_msg_payload_pwr_dct_control + */ VPU_JSM_MSG_DCT_ENABLE = 0x111c, - /* Control command: Disable survivability/DCT mode */ + /** + * Control command: Disable survivability/DCT mode + * This command has no payload + */ VPU_JSM_MSG_DCT_DISABLE = 0x111d, /** * Dump VPU state. To be used for debug purposes only. - * NOTE: Please introduce new ASYNC commands before this one. * + * This command has no payload. + * NOTE: Please introduce new ASYNC commands before this one. */ VPU_JSM_MSG_STATE_DUMP = 0x11FF, - /* IPC Host -> Device, General commands */ + /** IPC Host -> Device, base id for general commands */ VPU_JSM_MSG_GENERAL_CMD = 0x1200, + /** Unsupported command */ VPU_JSM_MSG_BLOB_DEINIT_DEPRECATED = VPU_JSM_MSG_GENERAL_CMD, /** * Control dyndbg behavior by executing a dyndbg command; equivalent to - * Linux command: `echo '' > /dynamic_debug/control`. + * Linux command: + * @verbatim echo '' > /dynamic_debug/control @endverbatim + * @see vpu_ipc_msg_payload_dyndbg_control */ VPU_JSM_MSG_DYNDBG_CONTROL = 0x1201, /** @@ -548,17 +627,35 @@ enum vpu_ipc_msg_type { */ VPU_JSM_MSG_PWR_D0I3_ENTER = 0x1202, - /* IPC Device -> Host, Job completion */ + /** + * IPC Device -> Host, Job completion + * @see struct vpu_ipc_msg_payload_job_done + */ VPU_JSM_MSG_JOB_DONE = 0x2100, - /* IPC Device -> Host, Fence signalled */ + /** + * IPC Device -> Host, Fence signalled + * @see vpu_ipc_msg_payload_native_fence_signalled + */ VPU_JSM_MSG_NATIVE_FENCE_SIGNALLED = 0x2101, /* IPC Device -> Host, Async command completion */ VPU_JSM_MSG_ASYNC_CMD_DONE = 0x2200, + /** + * IPC Device -> Host, engine reset complete + * @see vpu_ipc_msg_payload_engine_reset_done + */ VPU_JSM_MSG_ENGINE_RESET_DONE = VPU_JSM_MSG_ASYNC_CMD_DONE, + /** + * Preempt complete message + * @see vpu_ipc_msg_payload_engine_preempt_done + */ VPU_JSM_MSG_ENGINE_PREEMPT_DONE = 0x2201, VPU_JSM_MSG_REGISTER_DB_DONE = 0x2202, VPU_JSM_MSG_UNREGISTER_DB_DONE = 0x2203, + /** + * Response to query engine heartbeat. + * @see vpu_ipc_msg_payload_query_engine_hb_done + */ VPU_JSM_MSG_QUERY_ENGINE_HB_DONE = 0x2204, VPU_JSM_MSG_GET_POWER_LEVEL_COUNT_DONE = 0x2205, VPU_JSM_MSG_GET_POWER_LEVEL_DONE = 0x2206, @@ -575,7 +672,10 @@ enum vpu_ipc_msg_type { VPU_JSM_MSG_TRACE_GET_CAPABILITY_RSP = 0x220c, /** Response to VPU_JSM_MSG_TRACE_GET_NAME. */ VPU_JSM_MSG_TRACE_GET_NAME_RSP = 0x220d, - /** Response to VPU_JSM_MSG_SSID_RELEASE. */ + /** + * Response to VPU_JSM_MSG_SSID_RELEASE. + * @see vpu_ipc_msg_payload_ssid_release + */ VPU_JSM_MSG_SSID_RELEASE_DONE = 0x220e, /** * Response to VPU_JSM_MSG_METRIC_STREAMER_START. @@ -605,37 +705,71 @@ enum vpu_ipc_msg_type { /** * Asynchronous event sent from the VPU to the host either when the current * metric buffer is full or when the VPU has collected a multiple of - * @notify_sample_count samples as indicated through the start command - * (VPU_JSM_MSG_METRIC_STREAMER_START). Returns information about collected - * metric data. + * @ref vpu_jsm_metric_streamer_start::notify_sample_count samples as indicated + * through the start command (VPU_JSM_MSG_METRIC_STREAMER_START). Returns + * information about collected metric data. * @see vpu_jsm_metric_streamer_done */ VPU_JSM_MSG_METRIC_STREAMER_NOTIFICATION = 0x2213, - /** Response to control command: Priority band setup */ + /** + * Response to control command: Priority band setup + * @see vpu_ipc_msg_payload_hws_priority_band_setup + */ VPU_JSM_MSG_SET_PRIORITY_BAND_SETUP_RSP = 0x2214, - /** Response to control command: Create command queue */ + /** + * Response to control command: Create command queue + * @see vpu_ipc_msg_payload_hws_create_cmdq_rsp + */ VPU_JSM_MSG_CREATE_CMD_QUEUE_RSP = 0x2215, - /** Response to control command: Destroy command queue */ + /** + * Response to control command: Destroy command queue + * @see vpu_ipc_msg_payload_hws_destroy_cmdq + */ VPU_JSM_MSG_DESTROY_CMD_QUEUE_RSP = 0x2216, - /** Response to control command: Set context scheduling properties */ + /** + * Response to control command: Set context scheduling properties + * @see vpu_ipc_msg_payload_hws_set_context_sched_properties + */ VPU_JSM_MSG_SET_CONTEXT_SCHED_PROPERTIES_RSP = 0x2217, - /** Response to control command: Log buffer setting */ + /** + * Response to control command: Log buffer setting + * @see vpu_ipc_msg_payload_hws_set_scheduling_log + */ VPU_JSM_MSG_HWS_SET_SCHEDULING_LOG_RSP = 0x2218, - /* IPC Device -> Host, HWS notify index entry of log buffer written */ + /** + * IPC Device -> Host, HWS notify index entry of log buffer written + * @see vpu_ipc_msg_payload_hws_scheduling_log_notification + */ VPU_JSM_MSG_HWS_SCHEDULING_LOG_NOTIFICATION = 0x2219, - /* IPC Device -> Host, HWS completion of a context suspend request */ + /** + * IPC Device -> Host, HWS completion of a context suspend request + * @see vpu_ipc_msg_payload_hws_suspend_cmdq + */ VPU_JSM_MSG_HWS_SUSPEND_CMDQ_DONE = 0x221a, - /* Response to control command: Resume command queue */ + /** + * Response to control command: Resume command queue + * @see vpu_ipc_msg_payload_hws_resume_cmdq + */ VPU_JSM_MSG_HWS_RESUME_CMDQ_RSP = 0x221b, - /* Response to control command: Resume engine command response */ + /** + * Response to control command: Resume engine command response + * @see vpu_ipc_msg_payload_hws_resume_engine + */ VPU_JSM_MSG_HWS_RESUME_ENGINE_DONE = 0x221c, - /* Response to control command: Enable survivability/DCT mode */ + /** + * Response to control command: Enable survivability/DCT mode + * This command has no payload + */ VPU_JSM_MSG_DCT_ENABLE_DONE = 0x221d, - /* Response to control command: Disable survivability/DCT mode */ + /** + * Response to control command: Disable survivability/DCT mode + * This command has no payload + */ VPU_JSM_MSG_DCT_DISABLE_DONE = 0x221e, /** * Response to state dump control command. - * NOTE: Please introduce new ASYNC responses before this one. * + * This command has no payload. + * NOTE: Please introduce new ASYNC responses before this one. */ VPU_JSM_MSG_STATE_DUMP_RSP = 0x22FF, @@ -653,57 +787,66 @@ enum vpu_ipc_msg_type { enum vpu_ipc_msg_status { VPU_JSM_MSG_FREE, VPU_JSM_MSG_ALLOCATED }; -/* - * Host <-> LRT IPC message payload definitions +/** + * Engine reset request payload + * @see VPU_JSM_MSG_ENGINE_RESET */ struct vpu_ipc_msg_payload_engine_reset { - /* Engine to be reset. */ + /** Engine to be reset. */ u32 engine_idx; - /* Reserved */ + /** Reserved */ u32 reserved_0; }; +/** + * Engine preemption request struct + * @see VPU_JSM_MSG_ENGINE_PREEMPT + */ struct vpu_ipc_msg_payload_engine_preempt { - /* Engine to be preempted. */ + /** Engine to be preempted. */ u32 engine_idx; - /* ID of the preemption request. */ + /** ID of the preemption request. */ u32 preempt_id; }; -/* - * @brief Register doorbell command structure. +/** + * Register doorbell command structure. * This structure supports doorbell registration for only OS scheduling. * @see VPU_JSM_MSG_REGISTER_DB */ struct vpu_ipc_msg_payload_register_db { - /* Index of the doorbell to register. */ + /** Index of the doorbell to register. */ u32 db_idx; - /* Reserved */ + /** Reserved */ u32 reserved_0; - /* Virtual address in Global GTT pointing to the start of job queue. */ + /** Virtual address in Global GTT pointing to the start of job queue. */ u64 jobq_base; - /* Size of the job queue in bytes. */ + /** Size of the job queue in bytes. */ u32 jobq_size; - /* Host sub-stream ID for the context assigned to the doorbell. */ + /** Host sub-stream ID for the context assigned to the doorbell. */ u32 host_ssid; }; /** - * @brief Unregister doorbell command structure. + * Unregister doorbell command structure. * Request structure to unregister a doorbell for both HW and OS scheduling. * @see VPU_JSM_MSG_UNREGISTER_DB */ struct vpu_ipc_msg_payload_unregister_db { - /* Index of the doorbell to unregister. */ + /** Index of the doorbell to unregister. */ u32 db_idx; - /* Reserved */ + /** Reserved */ u32 reserved_0; }; +/** + * Heartbeat request structure + * @see VPU_JSM_MSG_QUERY_ENGINE_HB + */ struct vpu_ipc_msg_payload_query_engine_hb { - /* Engine to return heartbeat value. */ + /** Engine to return heartbeat value. */ u32 engine_idx; - /* Reserved */ + /** Reserved */ u32 reserved_0; }; @@ -723,10 +866,14 @@ struct vpu_ipc_msg_payload_power_level { u32 reserved_0; }; +/** + * Structure for requesting ssid release + * @see VPU_JSM_MSG_SSID_RELEASE + */ struct vpu_ipc_msg_payload_ssid_release { - /* Host sub-stream ID for the context to be released. */ + /** Host sub-stream ID for the context to be released. */ u32 host_ssid; - /* Reserved */ + /** Reserved */ u32 reserved_0; }; @@ -752,7 +899,7 @@ struct vpu_jsm_metric_streamer_start { u64 sampling_rate; /** * If > 0 the VPU will send a VPU_JSM_MSG_METRIC_STREAMER_NOTIFICATION message - * after every @notify_sample_count samples is collected or dropped by the VPU. + * after every @ref notify_sample_count samples is collected or dropped by the VPU. * If set to UINT_MAX the VPU will only generate a notification when the metric * buffer is full. If set to 0 the VPU will never generate a notification. */ @@ -762,9 +909,9 @@ struct vpu_jsm_metric_streamer_start { * Address and size of the buffer where the VPU will write metric data. The * VPU writes all counters from enabled metric groups one after another. If * there is no space left to write data at the next sample period the VPU - * will switch to the next buffer (@see next_buffer_addr) and will optionally - * send a notification to the host driver if @notify_sample_count is non-zero. - * If @next_buffer_addr is NULL the VPU will stop collecting metric data. + * will switch to the next buffer (@ref next_buffer_addr) and will optionally + * send a notification to the host driver if @ref notify_sample_count is non-zero. + * If @ref next_buffer_addr is NULL the VPU will stop collecting metric data. */ u64 buffer_addr; u64 buffer_size; @@ -827,63 +974,80 @@ struct vpu_jsm_metric_streamer_update { u64 next_buffer_size; }; +/** + * Device -> host job completion message. + * @see VPU_JSM_MSG_JOB_DONE + */ struct vpu_ipc_msg_payload_job_done { - /* Engine to which the job was submitted. */ + /** Engine to which the job was submitted. */ u32 engine_idx; - /* Index of the doorbell to which the job was submitted */ + /** Index of the doorbell to which the job was submitted */ u32 db_idx; - /* ID of the completed job */ + /** ID of the completed job */ u32 job_id; - /* Status of the completed job */ + /** Status of the completed job */ u32 job_status; - /* Host SSID */ + /** Host SSID */ u32 host_ssid; - /* Zero Padding */ + /** Zero Padding */ u32 reserved_0; - /* Command queue id */ + /** Command queue id */ u64 cmdq_id; }; -/* +/** * Notification message upon native fence signalling. * @see VPU_JSM_MSG_NATIVE_FENCE_SIGNALLED */ struct vpu_ipc_msg_payload_native_fence_signalled { - /* Engine ID. */ + /** Engine ID. */ u32 engine_idx; - /* Host SSID. */ + /** Host SSID. */ u32 host_ssid; - /* CMDQ ID */ + /** CMDQ ID */ u64 cmdq_id; - /* Fence object handle. */ + /** Fence object handle. */ u64 fence_handle; }; +/** + * vpu_ipc_msg_payload_engine_reset_done will contain an array of this structure + * which contains which queues caused reset if FW was able to detect any error. + * @see vpu_ipc_msg_payload_engine_reset_done + */ struct vpu_jsm_engine_reset_context { - /* Host SSID */ + /** Host SSID */ u32 host_ssid; - /* Zero Padding */ + /** Zero Padding */ u32 reserved_0; - /* Command queue id */ + /** Command queue id */ u64 cmdq_id; - /* See VPU_ENGINE_RESET_CONTEXT_* defines */ + /** See VPU_ENGINE_RESET_CONTEXT_* defines */ u64 flags; }; +/** + * Engine reset response. + * @see VPU_JSM_MSG_ENGINE_RESET_DONE + */ struct vpu_ipc_msg_payload_engine_reset_done { - /* Engine ordinal */ + /** Engine ordinal */ u32 engine_idx; - /* Number of impacted contexts */ + /** Number of impacted contexts */ u32 num_impacted_contexts; - /* Array of impacted command queue ids and their flags */ + /** Array of impacted command queue ids and their flags */ struct vpu_jsm_engine_reset_context impacted_contexts[VPU_MAX_ENGINE_RESET_IMPACTED_CONTEXTS]; }; +/** + * Preemption response struct + * @see VPU_JSM_MSG_ENGINE_PREEMPT_DONE + */ struct vpu_ipc_msg_payload_engine_preempt_done { - /* Engine preempted. */ + /** Engine preempted. */ u32 engine_idx; - /* ID of the preemption request. */ + /** ID of the preemption request. */ u32 preempt_id; }; @@ -912,12 +1076,16 @@ struct vpu_ipc_msg_payload_unregister_db_done { u32 reserved_0; }; +/** + * Structure for heartbeat response + * @see VPU_JSM_MSG_QUERY_ENGINE_HB_DONE + */ struct vpu_ipc_msg_payload_query_engine_hb_done { - /* Engine returning heartbeat value. */ + /** Engine returning heartbeat value. */ u32 engine_idx; - /* Reserved */ + /** Reserved */ u32 reserved_0; - /* Heartbeat value. */ + /** Heartbeat value. */ u64 heartbeat; }; @@ -937,7 +1105,10 @@ struct vpu_ipc_msg_payload_get_power_level_count_done { u8 power_limit[16]; }; -/* HWS priority band setup request / response */ +/** + * HWS priority band setup request / response + * @see VPU_JSM_MSG_SET_PRIORITY_BAND_SETUP + */ struct vpu_ipc_msg_payload_hws_priority_band_setup { /* * Grace period in 100ns units when preempting another priority band for @@ -964,15 +1135,23 @@ struct vpu_ipc_msg_payload_hws_priority_band_setup { * TDR timeout value in milliseconds. Default value of 0 meaning no timeout. */ u32 tdr_timeout; + /* Non-interactive queue timeout for no progress of heartbeat in milliseconds. + * Default value of 0 meaning no timeout. + */ + u32 non_interactive_no_progress_timeout; + /* + * Non-interactive queue upper limit timeout value in milliseconds. Default + * value of 0 meaning no timeout. + */ + u32 non_interactive_timeout; }; -/* +/** * @brief HWS create command queue request. * Host will create a command queue via this command. * Note: Cmdq group is a handle of an object which * may contain one or more command queues. * @see VPU_JSM_MSG_CREATE_CMD_QUEUE - * @see VPU_JSM_MSG_CREATE_CMD_QUEUE_RSP */ struct vpu_ipc_msg_payload_hws_create_cmdq { /* Process id */ @@ -993,66 +1172,73 @@ struct vpu_ipc_msg_payload_hws_create_cmdq { u32 reserved_0; }; -/* - * @brief HWS create command queue response. - * @see VPU_JSM_MSG_CREATE_CMD_QUEUE +/** + * HWS create command queue response. * @see VPU_JSM_MSG_CREATE_CMD_QUEUE_RSP */ struct vpu_ipc_msg_payload_hws_create_cmdq_rsp { - /* Process id */ + /** Process id */ u64 process_id; - /* Host SSID */ + /** Host SSID */ u32 host_ssid; - /* Engine for which queue is being created */ + /** Engine for which queue is being created */ u32 engine_idx; - /* Command queue group */ + /** Command queue group */ u64 cmdq_group; - /* Command queue id */ + /** Command queue id */ u64 cmdq_id; }; -/* HWS destroy command queue request / response */ +/** + * HWS destroy command queue request / response + * @see VPU_JSM_MSG_DESTROY_CMD_QUEUE + * @see VPU_JSM_MSG_DESTROY_CMD_QUEUE_RSP + */ struct vpu_ipc_msg_payload_hws_destroy_cmdq { - /* Host SSID */ + /** Host SSID */ u32 host_ssid; - /* Zero Padding */ + /** Zero Padding */ u32 reserved; - /* Command queue id */ + /** Command queue id */ u64 cmdq_id; }; -/* HWS set context scheduling properties request / response */ +/** + * HWS set context scheduling properties request / response + * @see VPU_JSM_MSG_SET_CONTEXT_SCHED_PROPERTIES + * @see VPU_JSM_MSG_SET_CONTEXT_SCHED_PROPERTIES_RSP + */ struct vpu_ipc_msg_payload_hws_set_context_sched_properties { - /* Host SSID */ + /** Host SSID */ u32 host_ssid; - /* Zero Padding */ + /** Zero Padding */ u32 reserved_0; - /* Command queue id */ + /** Command queue id */ u64 cmdq_id; - /* + /** * Priority band to assign to work of this context. * Available priority bands: @see enum vpu_job_scheduling_priority_band */ u32 priority_band; - /* Inside realtime band assigns a further priority */ + /** Inside realtime band assigns a further priority */ u32 realtime_priority_level; - /* Priority relative to other contexts in the same process */ + /** Priority relative to other contexts in the same process */ s32 in_process_priority; - /* Zero padding / Reserved */ + /** Zero padding / Reserved */ u32 reserved_1; - /* + /** * Context quantum relative to other contexts of same priority in the same process * Minimum value supported by NPU is 1ms (10000 in 100ns units). */ u64 context_quantum; - /* Grace period when preempting context of the same priority within the same process */ + /** Grace period when preempting context of the same priority within the same process */ u64 grace_period_same_priority; - /* Grace period when preempting context of a lower priority within the same process */ + /** Grace period when preempting context of a lower priority within the same process */ u64 grace_period_lower_priority; }; -/* - * @brief Register doorbell command structure. +/** + * Register doorbell command structure. * This structure supports doorbell registration for both HW and OS scheduling. * Note: Queue base and size are added here so that the same structure can be used for * OS scheduling and HW scheduling. For OS scheduling, cmdq_id will be ignored @@ -1061,27 +1247,27 @@ struct vpu_ipc_msg_payload_hws_set_context_sched_properties { * @see VPU_JSM_MSG_HWS_REGISTER_DB */ struct vpu_jsm_hws_register_db { - /* Index of the doorbell to register. */ + /** Index of the doorbell to register. */ u32 db_id; - /* Host sub-stream ID for the context assigned to the doorbell. */ + /** Host sub-stream ID for the context assigned to the doorbell. */ u32 host_ssid; - /* ID of the command queue associated with the doorbell. */ + /** ID of the command queue associated with the doorbell. */ u64 cmdq_id; - /* Virtual address pointing to the start of command queue. */ + /** Virtual address pointing to the start of command queue. */ u64 cmdq_base; - /* Size of the command queue in bytes. */ + /** Size of the command queue in bytes. */ u64 cmdq_size; }; -/* - * @brief Structure to set another buffer to be used for scheduling-related logging. +/** + * Structure to set another buffer to be used for scheduling-related logging. * The size of the logging buffer and the number of entries is defined as part of the * buffer itself as described next. * The log buffer received from the host is made up of; - * - header: 32 bytes in size, as shown in 'struct vpu_hws_log_buffer_header'. + * - header: 32 bytes in size, as shown in @ref vpu_hws_log_buffer_header. * The header contains the number of log entries in the buffer. * - log entry: 0 to n-1, each log entry is 32 bytes in size, as shown in - * 'struct vpu_hws_log_buffer_entry'. + * @ref vpu_hws_log_buffer_entry. * The entry contains the VPU timestamp, operation type and data. * The host should provide the notify index value of log buffer to VPU. This is a * value defined within the log buffer and when written to will generate the @@ -1095,30 +1281,30 @@ struct vpu_jsm_hws_register_db { * @see VPU_JSM_MSG_HWS_SCHEDULING_LOG_NOTIFICATION */ struct vpu_ipc_msg_payload_hws_set_scheduling_log { - /* Engine ordinal */ + /** Engine ordinal */ u32 engine_idx; - /* Host SSID */ + /** Host SSID */ u32 host_ssid; - /* + /** * VPU log buffer virtual address. * Set to 0 to disable logging for this engine. */ u64 vpu_log_buffer_va; - /* + /** * Notify index of log buffer. VPU_JSM_MSG_HWS_SCHEDULING_LOG_NOTIFICATION * is generated when an event log is written to this index. */ u64 notify_index; - /* + /** * Field is now deprecated, will be removed when KMD is updated to support removal */ u32 enable_extra_events; - /* Zero Padding */ + /** Zero Padding */ u32 reserved_0; }; -/* - * @brief The scheduling log notification is generated by VPU when it writes +/** + * The scheduling log notification is generated by VPU when it writes * an event into the log buffer at the notify_index. VPU notifies host with * VPU_JSM_MSG_HWS_SCHEDULING_LOG_NOTIFICATION. This is an asynchronous * message from VPU to host. @@ -1126,14 +1312,14 @@ struct vpu_ipc_msg_payload_hws_set_scheduling_log { * @see VPU_JSM_MSG_HWS_SET_SCHEDULING_LOG */ struct vpu_ipc_msg_payload_hws_scheduling_log_notification { - /* Engine ordinal */ + /** Engine ordinal */ u32 engine_idx; - /* Zero Padding */ + /** Zero Padding */ u32 reserved_0; }; -/* - * @brief HWS suspend command queue request and done structure. +/** + * HWS suspend command queue request and done structure. * Host will request the suspend of contexts and VPU will; * - Suspend all work on this context * - Preempt any running work @@ -1152,21 +1338,21 @@ struct vpu_ipc_msg_payload_hws_scheduling_log_notification { * @see VPU_JSM_MSG_HWS_SUSPEND_CMDQ_DONE */ struct vpu_ipc_msg_payload_hws_suspend_cmdq { - /* Host SSID */ + /** Host SSID */ u32 host_ssid; - /* Zero Padding */ + /** Zero Padding */ u32 reserved_0; - /* Command queue id */ + /** Command queue id */ u64 cmdq_id; - /* + /** * Suspend fence value - reported by the VPU suspend context * completed once suspend is complete. */ u64 suspend_fence_value; }; -/* - * @brief HWS Resume command queue request / response structure. +/** + * HWS Resume command queue request / response structure. * Host will request the resume of a context; * - VPU will resume all work on this context * - Scheduler will allow this context to be scheduled @@ -1174,25 +1360,25 @@ struct vpu_ipc_msg_payload_hws_suspend_cmdq { * @see VPU_JSM_MSG_HWS_RESUME_CMDQ_RSP */ struct vpu_ipc_msg_payload_hws_resume_cmdq { - /* Host SSID */ + /** Host SSID */ u32 host_ssid; - /* Zero Padding */ + /** Zero Padding */ u32 reserved_0; - /* Command queue id */ + /** Command queue id */ u64 cmdq_id; }; -/* - * @brief HWS Resume engine request / response structure. - * After a HWS engine reset, all scheduling is stopped on VPU until a engine resume. +/** + * HWS Resume engine request / response structure. + * After a HWS engine reset, all scheduling is stopped on VPU until an engine resume. * Host shall send this command to resume scheduling of any valid queue. - * @see VPU_JSM_MSG_HWS_RESUME_ENGINE + * @see VPU_JSM_MSG_HWS_ENGINE_RESUME * @see VPU_JSM_MSG_HWS_RESUME_ENGINE_DONE */ struct vpu_ipc_msg_payload_hws_resume_engine { - /* Engine to be resumed */ + /** Engine to be resumed */ u32 engine_idx; - /* Reserved */ + /** Reserved */ u32 reserved_0; }; @@ -1326,7 +1512,7 @@ struct vpu_jsm_metric_streamer_done { /** * Metric group description placed in the metric buffer after successful completion * of the VPU_JSM_MSG_METRIC_STREAMER_INFO command. This is followed by one or more - * @vpu_jsm_metric_counter_descriptor records. + * @ref vpu_jsm_metric_counter_descriptor records. * @see VPU_JSM_MSG_METRIC_STREAMER_INFO */ struct vpu_jsm_metric_group_descriptor { @@ -1413,29 +1599,24 @@ struct vpu_jsm_metric_counter_descriptor { }; /** - * Payload for VPU_JSM_MSG_DYNDBG_CONTROL requests. + * Payload for @ref VPU_JSM_MSG_DYNDBG_CONTROL requests. * - * VPU_JSM_MSG_DYNDBG_CONTROL are used to control the VPU FW Dynamic Debug - * feature, which allows developers to selectively enable / disable MVLOG_DEBUG - * messages. This is equivalent to the Dynamic Debug functionality provided by - * Linux - * (https://www.kernel.org/doc/html/latest/admin-guide/dynamic-debug-howto.html) - * The host can control Dynamic Debug behavior by sending dyndbg commands, which - * have the same syntax as Linux - * dyndbg commands. + * VPU_JSM_MSG_DYNDBG_CONTROL requests are used to control the VPU FW dynamic debug + * feature, which allows developers to selectively enable/disable code to obtain + * additional FW information. This is equivalent to the dynamic debug functionality + * provided by Linux. The host can control dynamic debug behavior by sending dyndbg + * commands, using the same syntax as for Linux dynamic debug commands. * - * NOTE: in order for MVLOG_DEBUG messages to be actually printed, the host - * still has to set the logging level to MVLOG_DEBUG, using the - * VPU_JSM_MSG_TRACE_SET_CONFIG command. + * @see https://www.kernel.org/doc/html/latest/admin-guide/dynamic-debug-howto.html. * - * The host can see the current dynamic debug configuration by executing a - * special 'show' command. The dyndbg configuration will be printed to the - * configured logging destination using MVLOG_INFO logging level. + * NOTE: + * As the dynamic debug feature uses MVLOG messages to provide information, the host + * must first set the logging level to MVLOG_DEBUG, using the @ref VPU_JSM_MSG_TRACE_SET_CONFIG + * command. */ struct vpu_ipc_msg_payload_dyndbg_control { /** - * Dyndbg command (same format as Linux dyndbg); must be a NULL-terminated - * string. + * Dyndbg command to be executed. */ char dyndbg_cmd[VPU_DYNDBG_CMD_MAX_LEN]; }; @@ -1456,7 +1637,7 @@ struct vpu_ipc_msg_payload_pwr_d0i3_enter { }; /** - * Payload for VPU_JSM_MSG_DCT_ENABLE message. + * Payload for @ref VPU_JSM_MSG_DCT_ENABLE message. * * Default values for DCT active/inactive times are 5.3ms and 30ms respectively, * corresponding to a 85% duty cycle. This payload allows the host to tune these @@ -1513,28 +1694,28 @@ union vpu_ipc_msg_payload { struct vpu_ipc_msg_payload_pwr_dct_control pwr_dct_control; }; -/* - * Host <-> LRT IPC message base structure. +/** + * Host <-> NPU IPC message base structure. * * NOTE: All instances of this object must be aligned on a 64B boundary * to allow proper handling of VPU cache operations. */ struct vpu_jsm_msg { - /* Reserved */ + /** Reserved */ u64 reserved_0; - /* Message type, see vpu_ipc_msg_type enum. */ + /** Message type, see @ref vpu_ipc_msg_type. */ u32 type; - /* Buffer status, see vpu_ipc_msg_status enum. */ + /** Buffer status, see @ref vpu_ipc_msg_status. */ u32 status; - /* + /** * Request ID, provided by the host in a request message and passed * back by VPU in the response message. */ u32 request_id; - /* Request return code set by the VPU, see VPU_JSM_STATUS_* defines. */ + /** Request return code set by the VPU, see VPU_JSM_STATUS_* defines. */ u32 result; u64 reserved_1; - /* Message payload depending on message type, see vpu_ipc_msg_payload union. */ + /** Message payload depending on message type, see vpu_ipc_msg_payload union. */ union vpu_ipc_msg_payload payload; }; diff --git a/drivers/accel/qaic/Kconfig b/drivers/accel/qaic/Kconfig index 5e405a19c157..116e42d152ca 100644 --- a/drivers/accel/qaic/Kconfig +++ b/drivers/accel/qaic/Kconfig @@ -9,6 +9,7 @@ config DRM_ACCEL_QAIC depends on PCI && HAS_IOMEM depends on MHI_BUS select CRC32 + select WANT_DEV_COREDUMP help Enables driver for Qualcomm's Cloud AI accelerator PCIe cards that are designed to accelerate Deep Learning inference workloads. diff --git a/drivers/accel/qaic/Makefile b/drivers/accel/qaic/Makefile index 1106b876f737..71f727b74da3 100644 --- a/drivers/accel/qaic/Makefile +++ b/drivers/accel/qaic/Makefile @@ -11,6 +11,8 @@ qaic-y := \ qaic_data.o \ qaic_drv.o \ qaic_ras.o \ + qaic_ssr.o \ + qaic_sysfs.o \ qaic_timesync.o \ sahara.o diff --git a/drivers/accel/qaic/qaic.h b/drivers/accel/qaic/qaic.h index 820d133236dd..fa7a8155658c 100644 --- a/drivers/accel/qaic/qaic.h +++ b/drivers/accel/qaic/qaic.h @@ -21,6 +21,7 @@ #define QAIC_DBC_BASE SZ_128K #define QAIC_DBC_SIZE SZ_4K +#define QAIC_SSR_DBC_SENTINEL U32_MAX /* No ongoing SSR sentinel */ #define QAIC_NO_PARTITION -1 @@ -47,6 +48,22 @@ enum __packed dev_states { QAIC_ONLINE, }; +enum dbc_states { + /* DBC is free and can be activated */ + DBC_STATE_IDLE, + /* DBC is activated and a workload is running on device */ + DBC_STATE_ASSIGNED, + /* Sub-system associated with this workload has crashed and it will shutdown soon */ + DBC_STATE_BEFORE_SHUTDOWN, + /* Sub-system associated with this workload has crashed and it has shutdown */ + DBC_STATE_AFTER_SHUTDOWN, + /* Sub-system associated with this workload is shutdown and it will be powered up soon */ + DBC_STATE_BEFORE_POWER_UP, + /* Sub-system associated with this workload is now powered up */ + DBC_STATE_AFTER_POWER_UP, + DBC_STATE_MAX, +}; + extern bool datapath_polling; struct qaic_user { @@ -114,6 +131,8 @@ struct dma_bridge_chan { unsigned int irq; /* Polling work item to simulate interrupts */ struct work_struct poll_work; + /* Represents various states of this DBC from enum dbc_states */ + unsigned int state; }; struct qaic_device { @@ -161,6 +180,8 @@ struct qaic_device { struct mhi_device *qts_ch; /* Work queue for tasks related to MHI "QAIC_TIMESYNC" channel */ struct workqueue_struct *qts_wq; + /* MHI "QAIC_TIMESYNC_PERIODIC" channel device */ + struct mhi_device *mqts_ch; /* Head of list of page allocated by MHI bootlog device */ struct list_head bootlog; /* MHI bootlog channel device */ @@ -177,6 +198,14 @@ struct qaic_device { unsigned int ue_count; /* Un-correctable non-fatal error count */ unsigned int ue_nf_count; + /* MHI SSR channel device */ + struct mhi_device *ssr_ch; + /* Work queue for tasks related to MHI SSR device */ + struct workqueue_struct *ssr_wq; + /* Buffer to collect SSR crashdump via SSR MHI channel */ + void *ssr_mhi_buf; + /* DBC which is under SSR. Sentinel U32_MAX would mean that no SSR in progress */ + u32 ssr_dbc; }; struct qaic_drm_device { @@ -195,6 +224,8 @@ struct qaic_drm_device { struct list_head users; /* Synchronizes access to users list */ struct mutex users_mutex; + /* Pointer to array of DBC sysfs attributes */ + void *sysfs_attrs; }; struct qaic_bo { @@ -317,6 +348,13 @@ int qaic_partial_execute_bo_ioctl(struct drm_device *dev, void *data, struct drm int qaic_wait_bo_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); int qaic_perf_stats_bo_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); int qaic_detach_slice_bo_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); -void irq_polling_work(struct work_struct *work); +void qaic_irq_polling_work(struct work_struct *work); +void qaic_dbc_enter_ssr(struct qaic_device *qdev, u32 dbc_id); +void qaic_dbc_exit_ssr(struct qaic_device *qdev); + +/* qaic_sysfs.c */ +int qaic_sysfs_init(struct qaic_drm_device *qddev); +void qaic_sysfs_remove(struct qaic_drm_device *qddev); +void set_dbc_state(struct qaic_device *qdev, u32 dbc_id, unsigned int state); #endif /* _QAIC_H_ */ diff --git a/drivers/accel/qaic/qaic_control.c b/drivers/accel/qaic/qaic_control.c index b86a8e48e731..428d8f65bff3 100644 --- a/drivers/accel/qaic/qaic_control.c +++ b/drivers/accel/qaic/qaic_control.c @@ -17,6 +17,7 @@ #include #include #include +#include #include #include #include @@ -30,7 +31,7 @@ #define MANAGE_MAGIC_NUMBER ((__force __le32)0x43494151) /* "QAIC" in little endian */ #define QAIC_DBC_Q_GAP SZ_256 #define QAIC_DBC_Q_BUF_ALIGN SZ_4K -#define QAIC_MANAGE_EXT_MSG_LENGTH SZ_64K /* Max DMA message length */ +#define QAIC_MANAGE_WIRE_MSG_LENGTH SZ_64K /* Max DMA message length */ #define QAIC_WRAPPER_MAX_SIZE SZ_4K #define QAIC_MHI_RETRY_WAIT_MS 100 #define QAIC_MHI_RETRY_MAX 20 @@ -309,6 +310,7 @@ static void save_dbc_buf(struct qaic_device *qdev, struct ioctl_resources *resou enable_dbc(qdev, dbc_id, usr); qdev->dbc[dbc_id].in_use = true; resources->buf = NULL; + set_dbc_state(qdev, dbc_id, DBC_STATE_ASSIGNED); } } @@ -367,7 +369,7 @@ static int encode_passthrough(struct qaic_device *qdev, void *trans, struct wrap if (in_trans->hdr.len % 8 != 0) return -EINVAL; - if (size_add(msg_hdr_len, in_trans->hdr.len) > QAIC_MANAGE_EXT_MSG_LENGTH) + if (size_add(msg_hdr_len, in_trans->hdr.len) > QAIC_MANAGE_WIRE_MSG_LENGTH) return -ENOSPC; trans_wrapper = add_wrapper(wrappers, @@ -495,7 +497,7 @@ static int encode_addr_size_pairs(struct dma_xfer *xfer, struct wrapper_list *wr nents = sgt->nents; nents_dma = nents; - *size = QAIC_MANAGE_EXT_MSG_LENGTH - msg_hdr_len - sizeof(**out_trans); + *size = QAIC_MANAGE_WIRE_MSG_LENGTH - msg_hdr_len - sizeof(**out_trans); for_each_sgtable_dma_sg(sgt, sg, i) { *size -= sizeof(*asp); /* Save 1K for possible follow-up transactions. */ @@ -576,7 +578,7 @@ static int encode_dma(struct qaic_device *qdev, void *trans, struct wrapper_list /* There should be enough space to hold at least one ASP entry. */ if (size_add(msg_hdr_len, sizeof(*out_trans) + sizeof(struct wire_addr_size_pair)) > - QAIC_MANAGE_EXT_MSG_LENGTH) + QAIC_MANAGE_WIRE_MSG_LENGTH) return -ENOMEM; xfer = kmalloc(sizeof(*xfer), GFP_KERNEL); @@ -645,7 +647,7 @@ static int encode_activate(struct qaic_device *qdev, void *trans, struct wrapper msg = &wrapper->msg; msg_hdr_len = le32_to_cpu(msg->hdr.len); - if (size_add(msg_hdr_len, sizeof(*out_trans)) > QAIC_MANAGE_MAX_MSG_LENGTH) + if (size_add(msg_hdr_len, sizeof(*out_trans)) > QAIC_MANAGE_WIRE_MSG_LENGTH) return -ENOSPC; if (!in_trans->queue_size) @@ -655,8 +657,9 @@ static int encode_activate(struct qaic_device *qdev, void *trans, struct wrapper return -EINVAL; nelem = in_trans->queue_size; - size = (get_dbc_req_elem_size() + get_dbc_rsp_elem_size()) * nelem; - if (size / nelem != get_dbc_req_elem_size() + get_dbc_rsp_elem_size()) + if (check_mul_overflow((u32)(get_dbc_req_elem_size() + get_dbc_rsp_elem_size()), + nelem, + &size)) return -EINVAL; if (size + QAIC_DBC_Q_GAP + QAIC_DBC_Q_BUF_ALIGN < size) @@ -729,7 +732,7 @@ static int encode_status(struct qaic_device *qdev, void *trans, struct wrapper_l msg = &wrapper->msg; msg_hdr_len = le32_to_cpu(msg->hdr.len); - if (size_add(msg_hdr_len, in_trans->hdr.len) > QAIC_MANAGE_MAX_MSG_LENGTH) + if (size_add(msg_hdr_len, in_trans->hdr.len) > QAIC_MANAGE_WIRE_MSG_LENGTH) return -ENOSPC; trans_wrapper = add_wrapper(wrappers, sizeof(*trans_wrapper)); @@ -810,7 +813,7 @@ static int encode_message(struct qaic_device *qdev, struct manage_msg *user_msg, } if (ret) - break; + goto out; } if (user_len != user_msg->len) @@ -921,6 +924,7 @@ static int decode_deactivate(struct qaic_device *qdev, void *trans, u32 *msg_len } release_dbc(qdev, dbc_id); + set_dbc_state(qdev, dbc_id, DBC_STATE_IDLE); *msg_len += sizeof(*in_trans); return 0; @@ -1052,7 +1056,7 @@ static void *msg_xfer(struct qaic_device *qdev, struct wrapper_list *wrappers, u init_completion(&elem.xfer_done); if (likely(!qdev->cntl_lost_buf)) { /* - * The max size of request to device is QAIC_MANAGE_EXT_MSG_LENGTH. + * The max size of request to device is QAIC_MANAGE_WIRE_MSG_LENGTH. * The max size of response from device is QAIC_MANAGE_MAX_MSG_LENGTH. */ out_buf = kmalloc(QAIC_MANAGE_MAX_MSG_LENGTH, GFP_KERNEL); @@ -1079,7 +1083,6 @@ static void *msg_xfer(struct qaic_device *qdev, struct wrapper_list *wrappers, u list_for_each_entry(w, &wrappers->list, list) { kref_get(&w->ref_count); - retry_count = 0; ret = mhi_queue_buf(qdev->cntl_ch, DMA_TO_DEVICE, &w->msg, w->len, list_is_last(&w->list, &wrappers->list) ? MHI_EOT : MHI_CHAIN); if (ret) { diff --git a/drivers/accel/qaic/qaic_data.c b/drivers/accel/qaic/qaic_data.c index c4f117edb266..60cb4d65d48e 100644 --- a/drivers/accel/qaic/qaic_data.c +++ b/drivers/accel/qaic/qaic_data.c @@ -18,6 +18,7 @@ #include #include #include +#include #include #include #include @@ -165,7 +166,7 @@ static void free_slice(struct kref *kref) drm_gem_object_put(&slice->bo->base); sg_free_table(slice->sgt); kfree(slice->sgt); - kfree(slice->reqs); + kvfree(slice->reqs); kfree(slice); } @@ -404,7 +405,7 @@ static int qaic_map_one_slice(struct qaic_device *qdev, struct qaic_bo *bo, goto free_sgt; } - slice->reqs = kcalloc(sgt->nents, sizeof(*slice->reqs), GFP_KERNEL); + slice->reqs = kvcalloc(sgt->nents, sizeof(*slice->reqs), GFP_KERNEL); if (!slice->reqs) { ret = -ENOMEM; goto free_slice; @@ -430,7 +431,7 @@ static int qaic_map_one_slice(struct qaic_device *qdev, struct qaic_bo *bo, return 0; free_req: - kfree(slice->reqs); + kvfree(slice->reqs); free_slice: kfree(slice); free_sgt: @@ -643,8 +644,36 @@ static void qaic_free_object(struct drm_gem_object *obj) kfree(bo); } +static struct sg_table *qaic_get_sg_table(struct drm_gem_object *obj) +{ + struct qaic_bo *bo = to_qaic_bo(obj); + struct scatterlist *sg, *sg_in; + struct sg_table *sgt, *sgt_in; + int i; + + sgt_in = bo->sgt; + + sgt = kmalloc(sizeof(*sgt), GFP_KERNEL); + if (!sgt) + return ERR_PTR(-ENOMEM); + + if (sg_alloc_table(sgt, sgt_in->orig_nents, GFP_KERNEL)) { + kfree(sgt); + return ERR_PTR(-ENOMEM); + } + + sg = sgt->sgl; + for_each_sgtable_sg(sgt_in, sg_in, i) { + memcpy(sg, sg_in, sizeof(*sg)); + sg = sg_next(sg); + } + + return sgt; +} + static const struct drm_gem_object_funcs qaic_gem_funcs = { .free = qaic_free_object, + .get_sg_table = qaic_get_sg_table, .print_info = qaic_gem_print_info, .mmap = qaic_gem_object_mmap, .vm_ops = &drm_vm_ops, @@ -953,8 +982,9 @@ int qaic_attach_slice_bo_ioctl(struct drm_device *dev, void *data, struct drm_fi if (args->hdr.count == 0) return -EINVAL; - arg_size = args->hdr.count * sizeof(*slice_ent); - if (arg_size / args->hdr.count != sizeof(*slice_ent)) + if (check_mul_overflow((unsigned long)args->hdr.count, + (unsigned long)sizeof(*slice_ent), + &arg_size)) return -EINVAL; if (!(args->hdr.dir == DMA_TO_DEVICE || args->hdr.dir == DMA_FROM_DEVICE)) @@ -984,18 +1014,12 @@ int qaic_attach_slice_bo_ioctl(struct drm_device *dev, void *data, struct drm_fi user_data = u64_to_user_ptr(args->data); - slice_ent = kzalloc(arg_size, GFP_KERNEL); - if (!slice_ent) { - ret = -EINVAL; + slice_ent = memdup_user(user_data, arg_size); + if (IS_ERR(slice_ent)) { + ret = PTR_ERR(slice_ent); goto unlock_dev_srcu; } - ret = copy_from_user(slice_ent, user_data, arg_size); - if (ret) { - ret = -EFAULT; - goto free_slice_ent; - } - obj = drm_gem_object_lookup(file_priv, args->hdr.handle); if (!obj) { ret = -ENOENT; @@ -1023,6 +1047,11 @@ int qaic_attach_slice_bo_ioctl(struct drm_device *dev, void *data, struct drm_fi goto unlock_ch_srcu; } + if (dbc->id == qdev->ssr_dbc) { + ret = -EPIPE; + goto unlock_ch_srcu; + } + ret = qaic_prepare_bo(qdev, bo, &args->hdr); if (ret) goto unlock_ch_srcu; @@ -1300,8 +1329,6 @@ static int __qaic_execute_bo_ioctl(struct drm_device *dev, void *data, struct dr int usr_rcu_id, qdev_rcu_id; struct qaic_device *qdev; struct qaic_user *usr; - u8 __user *user_data; - unsigned long n; u64 received_ts; u32 queue_level; u64 submit_ts; @@ -1314,20 +1341,12 @@ static int __qaic_execute_bo_ioctl(struct drm_device *dev, void *data, struct dr received_ts = ktime_get_ns(); size = is_partial ? sizeof(struct qaic_partial_execute_entry) : sizeof(*exec); - n = (unsigned long)size * args->hdr.count; - if (args->hdr.count == 0 || n / args->hdr.count != size) + if (args->hdr.count == 0) return -EINVAL; - user_data = u64_to_user_ptr(args->data); - - exec = kcalloc(args->hdr.count, size, GFP_KERNEL); - if (!exec) - return -ENOMEM; - - if (copy_from_user(exec, user_data, n)) { - ret = -EFAULT; - goto free_exec; - } + exec = memdup_array_user(u64_to_user_ptr(args->data), args->hdr.count, size); + if (IS_ERR(exec)) + return PTR_ERR(exec); usr = file_priv->driver_priv; usr_rcu_id = srcu_read_lock(&usr->qddev_lock); @@ -1356,6 +1375,11 @@ static int __qaic_execute_bo_ioctl(struct drm_device *dev, void *data, struct dr goto release_ch_rcu; } + if (dbc->id == qdev->ssr_dbc) { + ret = -EPIPE; + goto release_ch_rcu; + } + ret = mutex_lock_interruptible(&dbc->req_lock); if (ret) goto release_ch_rcu; @@ -1396,7 +1420,6 @@ static int __qaic_execute_bo_ioctl(struct drm_device *dev, void *data, struct dr srcu_read_unlock(&qdev->dev_lock, qdev_rcu_id); unlock_usr_srcu: srcu_read_unlock(&usr->qddev_lock, usr_rcu_id); -free_exec: kfree(exec); return ret; } @@ -1491,7 +1514,7 @@ irqreturn_t dbc_irq_handler(int irq, void *data) return IRQ_WAKE_THREAD; } -void irq_polling_work(struct work_struct *work) +void qaic_irq_polling_work(struct work_struct *work) { struct dma_bridge_chan *dbc = container_of(work, struct dma_bridge_chan, poll_work); unsigned long flags; @@ -1709,6 +1732,11 @@ int qaic_wait_bo_ioctl(struct drm_device *dev, void *data, struct drm_file *file goto unlock_ch_srcu; } + if (dbc->id == qdev->ssr_dbc) { + ret = -EPIPE; + goto unlock_ch_srcu; + } + obj = drm_gem_object_lookup(file_priv, args->handle); if (!obj) { ret = -ENOENT; @@ -1729,6 +1757,9 @@ int qaic_wait_bo_ioctl(struct drm_device *dev, void *data, struct drm_file *file if (!dbc->usr) ret = -EPERM; + if (dbc->id == qdev->ssr_dbc) + ret = -EPIPE; + put_obj: drm_gem_object_put(obj); unlock_ch_srcu: @@ -1749,7 +1780,8 @@ int qaic_perf_stats_bo_ioctl(struct drm_device *dev, void *data, struct drm_file struct qaic_device *qdev; struct qaic_user *usr; struct qaic_bo *bo; - int ret, i; + int ret = 0; + int i; usr = file_priv->driver_priv; usr_rcu_id = srcu_read_lock(&usr->qddev_lock); @@ -1770,18 +1802,12 @@ int qaic_perf_stats_bo_ioctl(struct drm_device *dev, void *data, struct drm_file goto unlock_dev_srcu; } - ent = kcalloc(args->hdr.count, sizeof(*ent), GFP_KERNEL); - if (!ent) { - ret = -EINVAL; + ent = memdup_array_user(u64_to_user_ptr(args->data), args->hdr.count, sizeof(*ent)); + if (IS_ERR(ent)) { + ret = PTR_ERR(ent); goto unlock_dev_srcu; } - ret = copy_from_user(ent, u64_to_user_ptr(args->data), args->hdr.count * sizeof(*ent)); - if (ret) { - ret = -EFAULT; - goto free_ent; - } - for (i = 0; i < args->hdr.count; i++) { obj = drm_gem_object_lookup(file_priv, ent[i].handle); if (!obj) { @@ -1789,6 +1815,16 @@ int qaic_perf_stats_bo_ioctl(struct drm_device *dev, void *data, struct drm_file goto free_ent; } bo = to_qaic_bo(obj); + if (!bo->sliced) { + drm_gem_object_put(obj); + ret = -EINVAL; + goto free_ent; + } + if (bo->dbc->id != args->hdr.dbc_id) { + drm_gem_object_put(obj); + ret = -EINVAL; + goto free_ent; + } /* * perf stats ioctl is called before wait ioctl is complete then * the latency information is invalid. @@ -1927,6 +1963,17 @@ static void empty_xfer_list(struct qaic_device *qdev, struct dma_bridge_chan *db spin_unlock_irqrestore(&dbc->xfer_lock, flags); } +static void sync_empty_xfer_list(struct qaic_device *qdev, struct dma_bridge_chan *dbc) +{ + empty_xfer_list(qdev, dbc); + synchronize_srcu(&dbc->ch_lock); + /* + * Threads holding channel lock, may add more elements in the xfer_list. + * Flush out these elements from xfer_list. + */ + empty_xfer_list(qdev, dbc); +} + int disable_dbc(struct qaic_device *qdev, u32 dbc_id, struct qaic_user *usr) { if (!qdev->dbc[dbc_id].usr || qdev->dbc[dbc_id].usr->handle != usr->handle) @@ -1941,7 +1988,7 @@ int disable_dbc(struct qaic_device *qdev, u32 dbc_id, struct qaic_user *usr) * enable_dbc - Enable the DBC. DBCs are disabled by removing the context of * user. Add user context back to DBC to enable it. This function trusts the * DBC ID passed and expects the DBC to be disabled. - * @qdev: Qranium device handle + * @qdev: qaic device handle * @dbc_id: ID of the DBC * @usr: User context */ @@ -1955,13 +2002,7 @@ void wakeup_dbc(struct qaic_device *qdev, u32 dbc_id) struct dma_bridge_chan *dbc = &qdev->dbc[dbc_id]; dbc->usr = NULL; - empty_xfer_list(qdev, dbc); - synchronize_srcu(&dbc->ch_lock); - /* - * Threads holding channel lock, may add more elements in the xfer_list. - * Flush out these elements from xfer_list. - */ - empty_xfer_list(qdev, dbc); + sync_empty_xfer_list(qdev, dbc); } void release_dbc(struct qaic_device *qdev, u32 dbc_id) @@ -2002,3 +2043,30 @@ void qaic_data_get_fifo_info(struct dma_bridge_chan *dbc, u32 *head, u32 *tail) *head = readl(dbc->dbc_base + REQHP_OFF); *tail = readl(dbc->dbc_base + REQTP_OFF); } + +/* + * qaic_dbc_enter_ssr - Prepare to enter in sub system reset(SSR) for given DBC ID. + * @qdev: qaic device handle + * @dbc_id: ID of the DBC which will enter SSR + * + * The device will automatically deactivate the workload as not + * all errors can be silently recovered. The user will be + * notified and will need to decide the required recovery + * action to take. + */ +void qaic_dbc_enter_ssr(struct qaic_device *qdev, u32 dbc_id) +{ + qdev->ssr_dbc = dbc_id; + release_dbc(qdev, dbc_id); +} + +/* + * qaic_dbc_exit_ssr - Prepare to exit from sub system reset(SSR) for given DBC ID. + * @qdev: qaic device handle + * + * The DBC returns to an operational state and begins accepting work after exiting SSR. + */ +void qaic_dbc_exit_ssr(struct qaic_device *qdev) +{ + qdev->ssr_dbc = QAIC_SSR_DBC_SENTINEL; +} diff --git a/drivers/accel/qaic/qaic_drv.c b/drivers/accel/qaic/qaic_drv.c index e162f4b8a262..4c70bd949d53 100644 --- a/drivers/accel/qaic/qaic_drv.c +++ b/drivers/accel/qaic/qaic_drv.c @@ -30,6 +30,7 @@ #include "qaic.h" #include "qaic_debugfs.h" #include "qaic_ras.h" +#include "qaic_ssr.h" #include "qaic_timesync.h" #include "sahara.h" @@ -270,6 +271,13 @@ static int qaic_create_drm_device(struct qaic_device *qdev, s32 partition_id) return ret; } + ret = qaic_sysfs_init(qddev); + if (ret) { + drm_dev_unregister(drm); + pci_dbg(qdev->pdev, "qaic_sysfs_init failed %d\n", ret); + return ret; + } + qaic_debugfs_init(qddev); return ret; @@ -281,6 +289,7 @@ static void qaic_destroy_drm_device(struct qaic_device *qdev, s32 partition_id) struct drm_device *drm = to_drm(qddev); struct qaic_user *usr; + qaic_sysfs_remove(qddev); drm_dev_unregister(drm); qddev->partition_id = 0; /* @@ -382,6 +391,7 @@ void qaic_dev_reset_clean_local_state(struct qaic_device *qdev) qaic_notify_reset(qdev); /* start tearing things down */ + qaic_clean_up_ssr(qdev); for (i = 0; i < qdev->num_dbc; ++i) release_dbc(qdev, i); } @@ -431,11 +441,18 @@ static struct qaic_device *create_qdev(struct pci_dev *pdev, qdev->qts_wq = qaicm_wq_init(drm, "qaic_ts"); if (IS_ERR(qdev->qts_wq)) return NULL; + qdev->ssr_wq = qaicm_wq_init(drm, "qaic_ssr"); + if (IS_ERR(qdev->ssr_wq)) + return NULL; ret = qaicm_srcu_init(drm, &qdev->dev_lock); if (ret) return NULL; + ret = qaic_ssr_init(qdev, drm); + if (ret) + pci_info(pdev, "QAIC SSR crashdump collection not supported.\n"); + qdev->qddev = qddev; qdev->pdev = pdev; qddev->qdev = qdev; @@ -545,7 +562,7 @@ static int init_msi(struct qaic_device *qdev, struct pci_dev *pdev) qdev->dbc[i].irq = pci_irq_vector(pdev, qdev->single_msi ? 0 : i + 1); if (!qdev->single_msi) disable_irq_nosync(qdev->dbc[i].irq); - INIT_WORK(&qdev->dbc[i].poll_work, irq_polling_work); + INIT_WORK(&qdev->dbc[i].poll_work, qaic_irq_polling_work); } } @@ -660,6 +677,92 @@ static const struct pci_error_handlers qaic_pci_err_handler = { .reset_done = qaic_pci_reset_done, }; +static bool qaic_is_under_reset(struct qaic_device *qdev) +{ + int rcu_id; + bool ret; + + rcu_id = srcu_read_lock(&qdev->dev_lock); + ret = qdev->dev_state != QAIC_ONLINE; + srcu_read_unlock(&qdev->dev_lock, rcu_id); + return ret; +} + +static bool qaic_data_path_busy(struct qaic_device *qdev) +{ + bool ret = false; + int dev_rcu_id; + int i; + + dev_rcu_id = srcu_read_lock(&qdev->dev_lock); + if (qdev->dev_state != QAIC_ONLINE) { + srcu_read_unlock(&qdev->dev_lock, dev_rcu_id); + return false; + } + for (i = 0; i < qdev->num_dbc; i++) { + struct dma_bridge_chan *dbc = &qdev->dbc[i]; + unsigned long flags; + int ch_rcu_id; + + ch_rcu_id = srcu_read_lock(&dbc->ch_lock); + if (!dbc->usr || !dbc->in_use) { + srcu_read_unlock(&dbc->ch_lock, ch_rcu_id); + continue; + } + spin_lock_irqsave(&dbc->xfer_lock, flags); + ret = !list_empty(&dbc->xfer_list); + spin_unlock_irqrestore(&dbc->xfer_lock, flags); + srcu_read_unlock(&dbc->ch_lock, ch_rcu_id); + if (ret) + break; + } + srcu_read_unlock(&qdev->dev_lock, dev_rcu_id); + return ret; +} + +static int qaic_pm_suspend(struct device *dev) +{ + struct qaic_device *qdev = pci_get_drvdata(to_pci_dev(dev)); + + dev_dbg(dev, "Suspending..\n"); + if (qaic_data_path_busy(qdev)) { + dev_dbg(dev, "Device's datapath is busy. Aborting suspend..\n"); + return -EBUSY; + } + if (qaic_is_under_reset(qdev)) { + dev_dbg(dev, "Device is under reset. Aborting suspend..\n"); + return -EBUSY; + } + qaic_mqts_ch_stop_timer(qdev->mqts_ch); + qaic_pci_reset_prepare(qdev->pdev); + pci_save_state(qdev->pdev); + pci_disable_device(qdev->pdev); + pci_set_power_state(qdev->pdev, PCI_D3hot); + return 0; +} + +static int qaic_pm_resume(struct device *dev) +{ + struct qaic_device *qdev = pci_get_drvdata(to_pci_dev(dev)); + int ret; + + dev_dbg(dev, "Resuming..\n"); + pci_set_power_state(qdev->pdev, PCI_D0); + pci_restore_state(qdev->pdev); + ret = pci_enable_device(qdev->pdev); + if (ret) { + dev_err(dev, "pci_enable_device failed on resume %d\n", ret); + return ret; + } + pci_set_master(qdev->pdev); + qaic_pci_reset_done(qdev->pdev); + return 0; +} + +static const struct dev_pm_ops qaic_pm_ops = { + SYSTEM_SLEEP_PM_OPS(qaic_pm_suspend, qaic_pm_resume) +}; + static struct pci_driver qaic_pci_driver = { .name = QAIC_NAME, .id_table = qaic_ids, @@ -667,6 +770,9 @@ static struct pci_driver qaic_pci_driver = { .remove = qaic_pci_remove, .shutdown = qaic_pci_shutdown, .err_handler = &qaic_pci_err_handler, + .driver = { + .pm = pm_sleep_ptr(&qaic_pm_ops), + }, }; static int __init qaic_init(void) @@ -702,9 +808,16 @@ static int __init qaic_init(void) ret = qaic_ras_register(); if (ret) pr_debug("qaic: qaic_ras_register failed %d\n", ret); + ret = qaic_ssr_register(); + if (ret) { + pr_debug("qaic: qaic_ssr_register failed %d\n", ret); + goto free_bootlog; + } return 0; +free_bootlog: + qaic_bootlog_unregister(); free_mhi: mhi_driver_unregister(&qaic_mhi_driver); free_pci: @@ -730,6 +843,7 @@ static void __exit qaic_exit(void) * reinitializing the link_up state after the cleanup is done. */ link_up = true; + qaic_ssr_unregister(); qaic_ras_unregister(); qaic_bootlog_unregister(); qaic_timesync_deinit(); diff --git a/drivers/accel/qaic/qaic_ras.c b/drivers/accel/qaic/qaic_ras.c index 914ffc4a9970..f1d52a710136 100644 --- a/drivers/accel/qaic/qaic_ras.c +++ b/drivers/accel/qaic/qaic_ras.c @@ -514,21 +514,21 @@ static ssize_t ce_count_show(struct device *dev, struct device_attribute *attr, { struct qaic_device *qdev = pci_get_drvdata(to_pci_dev(dev)); - return snprintf(buf, PAGE_SIZE, "%d\n", qdev->ce_count); + return sysfs_emit(buf, "%d\n", qdev->ce_count); } static ssize_t ue_count_show(struct device *dev, struct device_attribute *attr, char *buf) { struct qaic_device *qdev = pci_get_drvdata(to_pci_dev(dev)); - return snprintf(buf, PAGE_SIZE, "%d\n", qdev->ue_count); + return sysfs_emit(buf, "%d\n", qdev->ue_count); } static ssize_t ue_nonfatal_count_show(struct device *dev, struct device_attribute *attr, char *buf) { struct qaic_device *qdev = pci_get_drvdata(to_pci_dev(dev)); - return snprintf(buf, PAGE_SIZE, "%d\n", qdev->ue_nf_count); + return sysfs_emit(buf, "%d\n", qdev->ue_nf_count); } static DEVICE_ATTR_RO(ce_count); diff --git a/drivers/accel/qaic/qaic_ssr.c b/drivers/accel/qaic/qaic_ssr.c new file mode 100644 index 000000000000..9b662d690371 --- /dev/null +++ b/drivers/accel/qaic/qaic_ssr.c @@ -0,0 +1,815 @@ +// SPDX-License-Identifier: GPL-2.0-only + +/* Copyright (c) 2020-2021, The Linux Foundation. All rights reserved. */ +/* Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved. */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "qaic.h" +#include "qaic_ssr.h" + +#define SSR_RESP_MSG_SZ 32 +#define SSR_MHI_BUF_SIZE SZ_64K +#define SSR_MEM_READ_DATA_SIZE ((u64)SSR_MHI_BUF_SIZE - sizeof(struct ssr_crashdump)) +#define SSR_MEM_READ_CHUNK_SIZE ((u64)SSR_MEM_READ_DATA_SIZE - sizeof(struct ssr_memory_read_rsp)) + +#define DEBUG_TRANSFER_INFO BIT(0) +#define DEBUG_TRANSFER_INFO_RSP BIT(1) +#define MEMORY_READ BIT(2) +#define MEMORY_READ_RSP BIT(3) +#define DEBUG_TRANSFER_DONE BIT(4) +#define DEBUG_TRANSFER_DONE_RSP BIT(5) +#define SSR_EVENT BIT(8) +#define SSR_EVENT_RSP BIT(9) + +#define SSR_EVENT_NACK BIT(0) +#define BEFORE_SHUTDOWN BIT(1) +#define AFTER_SHUTDOWN BIT(2) +#define BEFORE_POWER_UP BIT(3) +#define AFTER_POWER_UP BIT(4) + +struct debug_info_table { + /* Save preferences. Default is mandatory */ + u64 save_perf; + /* Base address of the debug region */ + u64 mem_base; + /* Size of debug region in bytes */ + u64 len; + /* Description */ + char desc[20]; + /* Filename of debug region */ + char filename[20]; +}; + +struct _ssr_hdr { + __le32 cmd; + __le32 len; + __le32 dbc_id; +}; + +struct ssr_hdr { + u32 cmd; + u32 len; + u32 dbc_id; +}; + +struct ssr_debug_transfer_info { + struct ssr_hdr hdr; + u32 resv; + u64 tbl_addr; + u64 tbl_len; +} __packed; + +struct ssr_debug_transfer_info_rsp { + struct _ssr_hdr hdr; + __le32 ret; +} __packed; + +struct ssr_memory_read { + struct _ssr_hdr hdr; + __le32 resv; + __le64 addr; + __le64 len; +} __packed; + +struct ssr_memory_read_rsp { + struct _ssr_hdr hdr; + __le32 resv; + u8 data[]; +} __packed; + +struct ssr_debug_transfer_done { + struct _ssr_hdr hdr; + __le32 resv; +} __packed; + +struct ssr_debug_transfer_done_rsp { + struct _ssr_hdr hdr; + __le32 ret; +} __packed; + +struct ssr_event { + struct ssr_hdr hdr; + u32 event; +} __packed; + +struct ssr_event_rsp { + struct _ssr_hdr hdr; + __le32 event; +} __packed; + +struct ssr_resp { + /* Work struct to schedule work coming on QAIC_SSR channel */ + struct work_struct work; + /* Root struct of device, used to access device resources */ + struct qaic_device *qdev; + /* Buffer used by MHI for transfer requests */ + u8 data[] __aligned(8); +}; + +/* SSR crashdump book keeping structure */ +struct ssr_dump_info { + /* DBC associated with this SSR crashdump */ + struct dma_bridge_chan *dbc; + /* + * It will be used when we complete the crashdump download and switch + * to waiting on SSR events + */ + struct ssr_resp *resp; + /* MEMORY READ request MHI buffer.*/ + struct ssr_memory_read *read_buf_req; + /* TRUE: ->read_buf_req is queued for MHI transaction. FALSE: Otherwise */ + bool read_buf_req_queued; + /* Address of table in host */ + void *tbl_addr; + /* Total size of table */ + u64 tbl_len; + /* Offset of table(->tbl_addr) where the new chunk will be dumped */ + u64 tbl_off; + /* Address of table in device/target */ + u64 tbl_addr_dev; + /* Ptr to the entire dump */ + void *dump_addr; + /* Entire crashdump size */ + u64 dump_sz; + /* Offset of crashdump(->dump_addr) where the new chunk will be dumped */ + u64 dump_off; + /* Points to the table entry we are currently downloading */ + struct debug_info_table *tbl_ent; + /* Offset in the current table entry(->tbl_ent) for next chuck */ + u64 tbl_ent_off; +}; + +struct ssr_crashdump { + /* + * Points to a book keeping struct maintained by MHI SSR device while + * downloading a SSR crashdump. It is NULL when crashdump downloading + * not in progress. + */ + struct ssr_dump_info *dump_info; + /* Work struct to schedule work coming on QAIC_SSR channel */ + struct work_struct work; + /* Root struct of device, used to access device resources */ + struct qaic_device *qdev; + /* Buffer used by MHI for transfer requests */ + u8 data[]; +}; + +#define QAIC_SSR_DUMP_V1_MAGIC 0x1234567890abcdef +#define QAIC_SSR_DUMP_V1_VER 1 +struct dump_file_meta { + u64 magic; + u64 version; + u64 size; /* Total size of the entire dump */ + u64 tbl_len; /* Length of the table in byte */ +}; + +/* + * Layout of crashdump + * +------------------------------------------+ + * | Crashdump Meta structure | + * | type: struct dump_file_meta | + * +------------------------------------------+ + * | Crashdump Table | + * | type: array of struct debug_info_table | + * | | + * | | + * | | + * +------------------------------------------+ + * | Crashdump | + * | | + * | | + * | | + * | | + * | | + * +------------------------------------------+ + */ + +static void free_ssr_dump_info(struct ssr_crashdump *ssr_crash) +{ + struct ssr_dump_info *dump_info = ssr_crash->dump_info; + + ssr_crash->dump_info = NULL; + if (!dump_info) + return; + if (!dump_info->read_buf_req_queued) + kfree(dump_info->read_buf_req); + vfree(dump_info->tbl_addr); + vfree(dump_info->dump_addr); + kfree(dump_info); +} + +void qaic_clean_up_ssr(struct qaic_device *qdev) +{ + struct ssr_crashdump *ssr_crash = qdev->ssr_mhi_buf; + + if (!ssr_crash) + return; + + qaic_dbc_exit_ssr(qdev); + free_ssr_dump_info(ssr_crash); +} + +static int alloc_dump(struct ssr_dump_info *dump_info) +{ + struct debug_info_table *tbl_ent = dump_info->tbl_addr; + struct dump_file_meta *dump_meta; + u64 tbl_sz_lp = 0; + u64 dump_size = 0; + + while (tbl_sz_lp < dump_info->tbl_len) { + le64_to_cpus(&tbl_ent->save_perf); + le64_to_cpus(&tbl_ent->mem_base); + le64_to_cpus(&tbl_ent->len); + + if (tbl_ent->len == 0) + return -EINVAL; + + dump_size += tbl_ent->len; + tbl_ent++; + tbl_sz_lp += sizeof(*tbl_ent); + } + + dump_info->dump_sz = dump_size + dump_info->tbl_len + sizeof(*dump_meta); + dump_info->dump_addr = vzalloc(dump_info->dump_sz); + if (!dump_info->dump_addr) + return -ENOMEM; + + /* Copy crashdump meta and table */ + dump_meta = dump_info->dump_addr; + dump_meta->magic = QAIC_SSR_DUMP_V1_MAGIC; + dump_meta->version = QAIC_SSR_DUMP_V1_VER; + dump_meta->size = dump_info->dump_sz; + dump_meta->tbl_len = dump_info->tbl_len; + memcpy(dump_info->dump_addr + sizeof(*dump_meta), dump_info->tbl_addr, dump_info->tbl_len); + /* Offset by crashdump meta and table (copied above) */ + dump_info->dump_off = dump_info->tbl_len + sizeof(*dump_meta); + + return 0; +} + +static int send_xfer_done(struct qaic_device *qdev, void *resp, u32 dbc_id) +{ + struct ssr_debug_transfer_done *xfer_done; + int ret; + + xfer_done = kmalloc(sizeof(*xfer_done), GFP_KERNEL); + if (!xfer_done) { + ret = -ENOMEM; + goto out; + } + + ret = mhi_queue_buf(qdev->ssr_ch, DMA_FROM_DEVICE, resp, SSR_RESP_MSG_SZ, MHI_EOT); + if (ret) + goto free_xfer_done; + + xfer_done->hdr.cmd = cpu_to_le32(DEBUG_TRANSFER_DONE); + xfer_done->hdr.len = cpu_to_le32(sizeof(*xfer_done)); + xfer_done->hdr.dbc_id = cpu_to_le32(dbc_id); + + ret = mhi_queue_buf(qdev->ssr_ch, DMA_TO_DEVICE, xfer_done, sizeof(*xfer_done), MHI_EOT); + if (ret) + goto free_xfer_done; + + return 0; + +free_xfer_done: + kfree(xfer_done); +out: + return ret; +} + +static int mem_read_req(struct qaic_device *qdev, u64 dest_addr, u64 dest_len) +{ + struct ssr_crashdump *ssr_crash = qdev->ssr_mhi_buf; + struct ssr_memory_read *read_buf_req; + struct ssr_dump_info *dump_info; + int ret; + + dump_info = ssr_crash->dump_info; + ret = mhi_queue_buf(qdev->ssr_ch, DMA_FROM_DEVICE, ssr_crash->data, SSR_MEM_READ_DATA_SIZE, + MHI_EOT); + if (ret) + goto out; + + read_buf_req = dump_info->read_buf_req; + read_buf_req->hdr.cmd = cpu_to_le32(MEMORY_READ); + read_buf_req->hdr.len = cpu_to_le32(sizeof(*read_buf_req)); + read_buf_req->hdr.dbc_id = cpu_to_le32(qdev->ssr_dbc); + read_buf_req->addr = cpu_to_le64(dest_addr); + read_buf_req->len = cpu_to_le64(dest_len); + + ret = mhi_queue_buf(qdev->ssr_ch, DMA_TO_DEVICE, read_buf_req, sizeof(*read_buf_req), + MHI_EOT); + if (!ret) + dump_info->read_buf_req_queued = true; + +out: + return ret; +} + +static int ssr_copy_table(struct ssr_dump_info *dump_info, void *data, u64 len) +{ + if (len > dump_info->tbl_len - dump_info->tbl_off) + return -EINVAL; + + memcpy(dump_info->tbl_addr + dump_info->tbl_off, data, len); + dump_info->tbl_off += len; + + /* Entire table has been downloaded, alloc dump memory */ + if (dump_info->tbl_off == dump_info->tbl_len) { + dump_info->tbl_ent = dump_info->tbl_addr; + return alloc_dump(dump_info); + } + + return 0; +} + +static int ssr_copy_dump(struct ssr_dump_info *dump_info, void *data, u64 len) +{ + struct debug_info_table *tbl_ent; + + tbl_ent = dump_info->tbl_ent; + + if (len > tbl_ent->len - dump_info->tbl_ent_off) + return -EINVAL; + + memcpy(dump_info->dump_addr + dump_info->dump_off, data, len); + dump_info->dump_off += len; + dump_info->tbl_ent_off += len; + + /* + * Current segment (a entry in table) of the crashdump is complete, + * move to next one + */ + if (tbl_ent->len == dump_info->tbl_ent_off) { + dump_info->tbl_ent++; + dump_info->tbl_ent_off = 0; + } + + return 0; +} + +static void ssr_dump_worker(struct work_struct *work) +{ + struct ssr_crashdump *ssr_crash = container_of(work, struct ssr_crashdump, work); + struct qaic_device *qdev = ssr_crash->qdev; + struct ssr_memory_read_rsp *mem_rd_resp; + struct debug_info_table *tbl_ent; + struct ssr_dump_info *dump_info; + u64 dest_addr, dest_len; + struct _ssr_hdr *_hdr; + struct ssr_hdr hdr; + u64 data_len; + int ret; + + mem_rd_resp = (struct ssr_memory_read_rsp *)ssr_crash->data; + _hdr = &mem_rd_resp->hdr; + hdr.cmd = le32_to_cpu(_hdr->cmd); + hdr.len = le32_to_cpu(_hdr->len); + hdr.dbc_id = le32_to_cpu(_hdr->dbc_id); + + if (hdr.dbc_id != qdev->ssr_dbc) + goto reset_device; + + dump_info = ssr_crash->dump_info; + if (!dump_info) + goto reset_device; + + if (hdr.cmd != MEMORY_READ_RSP) + goto free_dump_info; + + if (hdr.len > SSR_MEM_READ_DATA_SIZE) + goto free_dump_info; + + data_len = hdr.len - sizeof(*mem_rd_resp); + + if (dump_info->tbl_off < dump_info->tbl_len) /* Chunk belongs to table */ + ret = ssr_copy_table(dump_info, mem_rd_resp->data, data_len); + else /* Chunk belongs to crashdump */ + ret = ssr_copy_dump(dump_info, mem_rd_resp->data, data_len); + + if (ret) + goto free_dump_info; + + if (dump_info->tbl_off < dump_info->tbl_len) { + /* Continue downloading table */ + dest_addr = dump_info->tbl_addr_dev + dump_info->tbl_off; + dest_len = min(SSR_MEM_READ_CHUNK_SIZE, dump_info->tbl_len - dump_info->tbl_off); + ret = mem_read_req(qdev, dest_addr, dest_len); + } else if (dump_info->dump_off < dump_info->dump_sz) { + /* Continue downloading crashdump */ + tbl_ent = dump_info->tbl_ent; + dest_addr = tbl_ent->mem_base + dump_info->tbl_ent_off; + dest_len = min(SSR_MEM_READ_CHUNK_SIZE, tbl_ent->len - dump_info->tbl_ent_off); + ret = mem_read_req(qdev, dest_addr, dest_len); + } else { + /* Crashdump download complete */ + ret = send_xfer_done(qdev, dump_info->resp->data, hdr.dbc_id); + } + + /* Most likely a MHI xfer has failed */ + if (ret) + goto free_dump_info; + + return; + +free_dump_info: + /* Free the allocated memory */ + free_ssr_dump_info(ssr_crash); +reset_device: + /* + * After subsystem crashes in device crashdump collection begins but + * something went wrong while collecting crashdump, now instead of + * handling this error we just reset the device as the best effort has + * been made + */ + mhi_soc_reset(qdev->mhi_cntrl); +} + +static struct ssr_dump_info *alloc_dump_info(struct qaic_device *qdev, + struct ssr_debug_transfer_info *debug_info) +{ + struct ssr_dump_info *dump_info; + int ret; + + le64_to_cpus(&debug_info->tbl_len); + le64_to_cpus(&debug_info->tbl_addr); + + if (debug_info->tbl_len == 0 || + debug_info->tbl_len % sizeof(struct debug_info_table) != 0) { + ret = -EINVAL; + goto out; + } + + /* Allocate SSR crashdump book keeping structure */ + dump_info = kzalloc(sizeof(*dump_info), GFP_KERNEL); + if (!dump_info) { + ret = -ENOMEM; + goto out; + } + + /* Buffer used to send MEMORY READ request to device via MHI */ + dump_info->read_buf_req = kzalloc(sizeof(*dump_info->read_buf_req), GFP_KERNEL); + if (!dump_info->read_buf_req) { + ret = -ENOMEM; + goto free_dump_info; + } + + /* Crashdump meta table buffer */ + dump_info->tbl_addr = vzalloc(debug_info->tbl_len); + if (!dump_info->tbl_addr) { + ret = -ENOMEM; + goto free_read_buf_req; + } + + dump_info->tbl_addr_dev = debug_info->tbl_addr; + dump_info->tbl_len = debug_info->tbl_len; + + return dump_info; + +free_read_buf_req: + kfree(dump_info->read_buf_req); +free_dump_info: + kfree(dump_info); +out: + return ERR_PTR(ret); +} + +static int dbg_xfer_info_rsp(struct qaic_device *qdev, struct dma_bridge_chan *dbc, + struct ssr_debug_transfer_info *debug_info) +{ + struct ssr_debug_transfer_info_rsp *debug_rsp; + struct ssr_crashdump *ssr_crash = NULL; + int ret = 0, ret2; + + debug_rsp = kmalloc(sizeof(*debug_rsp), GFP_KERNEL); + if (!debug_rsp) + return -ENOMEM; + + if (!qdev->ssr_mhi_buf) { + ret = -ENOMEM; + goto send_rsp; + } + + if (dbc->state != DBC_STATE_BEFORE_POWER_UP) { + ret = -EINVAL; + goto send_rsp; + } + + ssr_crash = qdev->ssr_mhi_buf; + ssr_crash->dump_info = alloc_dump_info(qdev, debug_info); + if (IS_ERR(ssr_crash->dump_info)) { + ret = PTR_ERR(ssr_crash->dump_info); + ssr_crash->dump_info = NULL; + } + +send_rsp: + debug_rsp->hdr.cmd = cpu_to_le32(DEBUG_TRANSFER_INFO_RSP); + debug_rsp->hdr.len = cpu_to_le32(sizeof(*debug_rsp)); + debug_rsp->hdr.dbc_id = cpu_to_le32(dbc->id); + /* + * 0 = Return an ACK confirming the host is ready to download crashdump + * 1 = Return an NACK confirming the host is not ready to download crashdump + */ + debug_rsp->ret = cpu_to_le32(ret ? 1 : 0); + + ret2 = mhi_queue_buf(qdev->ssr_ch, DMA_TO_DEVICE, debug_rsp, sizeof(*debug_rsp), MHI_EOT); + if (ret2) { + free_ssr_dump_info(ssr_crash); + kfree(debug_rsp); + return ret2; + } + + return ret; +} + +static void dbg_xfer_done_rsp(struct qaic_device *qdev, struct dma_bridge_chan *dbc, + struct ssr_debug_transfer_done_rsp *xfer_rsp) +{ + struct ssr_crashdump *ssr_crash = qdev->ssr_mhi_buf; + u32 status = le32_to_cpu(xfer_rsp->ret); + struct device *dev = &qdev->pdev->dev; + struct ssr_dump_info *dump_info; + + dump_info = ssr_crash->dump_info; + if (!dump_info) + return; + + if (status) { + free_ssr_dump_info(ssr_crash); + return; + } + + dev_coredumpv(dev, dump_info->dump_addr, dump_info->dump_sz, GFP_KERNEL); + /* dev_coredumpv will free dump_info->dump_addr */ + dump_info->dump_addr = NULL; + free_ssr_dump_info(ssr_crash); +} + +static void ssr_worker(struct work_struct *work) +{ + struct ssr_resp *resp = container_of(work, struct ssr_resp, work); + struct ssr_hdr *hdr = (struct ssr_hdr *)resp->data; + struct ssr_dump_info *dump_info = NULL; + struct qaic_device *qdev = resp->qdev; + struct ssr_crashdump *ssr_crash; + struct ssr_event_rsp *event_rsp; + struct dma_bridge_chan *dbc; + struct ssr_event *event; + u32 ssr_event_ack; + int ret; + + le32_to_cpus(&hdr->cmd); + le32_to_cpus(&hdr->len); + le32_to_cpus(&hdr->dbc_id); + + if (hdr->len > SSR_RESP_MSG_SZ) + goto out; + + if (hdr->dbc_id >= qdev->num_dbc) + goto out; + + dbc = &qdev->dbc[hdr->dbc_id]; + + switch (hdr->cmd) { + case DEBUG_TRANSFER_INFO: + ret = dbg_xfer_info_rsp(qdev, dbc, (struct ssr_debug_transfer_info *)resp->data); + if (ret) + break; + + ssr_crash = qdev->ssr_mhi_buf; + dump_info = ssr_crash->dump_info; + dump_info->dbc = dbc; + dump_info->resp = resp; + + /* Start by downloading debug table */ + ret = mem_read_req(qdev, dump_info->tbl_addr_dev, + min(dump_info->tbl_len, SSR_MEM_READ_CHUNK_SIZE)); + if (ret) { + free_ssr_dump_info(ssr_crash); + break; + } + + /* + * Till now everything went fine, which means that we will be + * collecting crashdump chunk by chunk. Do not queue a response + * buffer for SSR cmds till the crashdump is complete. + */ + return; + case SSR_EVENT: + event = (struct ssr_event *)hdr; + le32_to_cpus(&event->event); + ssr_event_ack = event->event; + ssr_crash = qdev->ssr_mhi_buf; + + switch (event->event) { + case BEFORE_SHUTDOWN: + set_dbc_state(qdev, hdr->dbc_id, DBC_STATE_BEFORE_SHUTDOWN); + qaic_dbc_enter_ssr(qdev, hdr->dbc_id); + break; + case AFTER_SHUTDOWN: + set_dbc_state(qdev, hdr->dbc_id, DBC_STATE_AFTER_SHUTDOWN); + break; + case BEFORE_POWER_UP: + set_dbc_state(qdev, hdr->dbc_id, DBC_STATE_BEFORE_POWER_UP); + break; + case AFTER_POWER_UP: + /* + * If dump info is a non NULL value it means that we + * have received this SSR event while downloading a + * crashdump for this DBC is still in progress. NACK + * the SSR event + */ + if (ssr_crash && ssr_crash->dump_info) { + free_ssr_dump_info(ssr_crash); + ssr_event_ack = SSR_EVENT_NACK; + break; + } + + set_dbc_state(qdev, hdr->dbc_id, DBC_STATE_AFTER_POWER_UP); + break; + default: + break; + } + + event_rsp = kmalloc(sizeof(*event_rsp), GFP_KERNEL); + if (!event_rsp) + break; + + event_rsp->hdr.cmd = cpu_to_le32(SSR_EVENT_RSP); + event_rsp->hdr.len = cpu_to_le32(sizeof(*event_rsp)); + event_rsp->hdr.dbc_id = cpu_to_le32(hdr->dbc_id); + event_rsp->event = cpu_to_le32(ssr_event_ack); + + ret = mhi_queue_buf(qdev->ssr_ch, DMA_TO_DEVICE, event_rsp, sizeof(*event_rsp), + MHI_EOT); + if (ret) + kfree(event_rsp); + + if (event->event == AFTER_POWER_UP && ssr_event_ack != SSR_EVENT_NACK) { + qaic_dbc_exit_ssr(qdev); + set_dbc_state(qdev, hdr->dbc_id, DBC_STATE_IDLE); + } + + break; + case DEBUG_TRANSFER_DONE_RSP: + dbg_xfer_done_rsp(qdev, dbc, (struct ssr_debug_transfer_done_rsp *)hdr); + break; + default: + break; + } + +out: + ret = mhi_queue_buf(qdev->ssr_ch, DMA_FROM_DEVICE, resp->data, SSR_RESP_MSG_SZ, MHI_EOT); + if (ret) + kfree(resp); +} + +static int qaic_ssr_mhi_probe(struct mhi_device *mhi_dev, const struct mhi_device_id *id) +{ + struct qaic_device *qdev = pci_get_drvdata(to_pci_dev(mhi_dev->mhi_cntrl->cntrl_dev)); + struct ssr_resp *resp; + int ret; + + ret = mhi_prepare_for_transfer(mhi_dev); + if (ret) + return ret; + + resp = kzalloc(sizeof(*resp) + SSR_RESP_MSG_SZ, GFP_KERNEL); + if (!resp) { + mhi_unprepare_from_transfer(mhi_dev); + return -ENOMEM; + } + + resp->qdev = qdev; + INIT_WORK(&resp->work, ssr_worker); + + ret = mhi_queue_buf(mhi_dev, DMA_FROM_DEVICE, resp->data, SSR_RESP_MSG_SZ, MHI_EOT); + if (ret) { + kfree(resp); + mhi_unprepare_from_transfer(mhi_dev); + return ret; + } + + dev_set_drvdata(&mhi_dev->dev, qdev); + qdev->ssr_ch = mhi_dev; + + return 0; +} + +static void qaic_ssr_mhi_remove(struct mhi_device *mhi_dev) +{ + struct qaic_device *qdev; + + qdev = dev_get_drvdata(&mhi_dev->dev); + mhi_unprepare_from_transfer(qdev->ssr_ch); + qdev->ssr_ch = NULL; +} + +static void qaic_ssr_mhi_ul_xfer_cb(struct mhi_device *mhi_dev, struct mhi_result *mhi_result) +{ + struct qaic_device *qdev = dev_get_drvdata(&mhi_dev->dev); + struct ssr_crashdump *ssr_crash = qdev->ssr_mhi_buf; + struct _ssr_hdr *hdr = mhi_result->buf_addr; + struct ssr_dump_info *dump_info; + + if (mhi_result->transaction_status) { + kfree(mhi_result->buf_addr); + return; + } + + /* + * MEMORY READ is used to download crashdump. And crashdump is + * downloaded chunk by chunk in a series of MEMORY READ SSR commands. + * Hence to avoid too many kmalloc() and kfree() of the same MEMORY READ + * request buffer, we allocate only one such buffer and free it only + * once. + */ + if (le32_to_cpu(hdr->cmd) == MEMORY_READ) { + dump_info = ssr_crash->dump_info; + if (dump_info) { + dump_info->read_buf_req_queued = false; + return; + } + } + + kfree(mhi_result->buf_addr); +} + +static void qaic_ssr_mhi_dl_xfer_cb(struct mhi_device *mhi_dev, struct mhi_result *mhi_result) +{ + struct ssr_resp *resp = container_of(mhi_result->buf_addr, struct ssr_resp, data); + struct qaic_device *qdev = dev_get_drvdata(&mhi_dev->dev); + struct ssr_crashdump *ssr_crash = qdev->ssr_mhi_buf; + bool memory_read_rsp = false; + + if (ssr_crash && ssr_crash->data == mhi_result->buf_addr) + memory_read_rsp = true; + + if (mhi_result->transaction_status) { + /* Do not free SSR crashdump buffer as it allocated via managed APIs */ + if (!memory_read_rsp) + kfree(resp); + return; + } + + if (memory_read_rsp) + queue_work(qdev->ssr_wq, &ssr_crash->work); + else + queue_work(qdev->ssr_wq, &resp->work); +} + +static const struct mhi_device_id qaic_ssr_mhi_match_table[] = { + { .chan = "QAIC_SSR", }, + {}, +}; + +static struct mhi_driver qaic_ssr_mhi_driver = { + .id_table = qaic_ssr_mhi_match_table, + .remove = qaic_ssr_mhi_remove, + .probe = qaic_ssr_mhi_probe, + .ul_xfer_cb = qaic_ssr_mhi_ul_xfer_cb, + .dl_xfer_cb = qaic_ssr_mhi_dl_xfer_cb, + .driver = { + .name = "qaic_ssr", + }, +}; + +int qaic_ssr_init(struct qaic_device *qdev, struct drm_device *drm) +{ + struct ssr_crashdump *ssr_crash; + + qdev->ssr_dbc = QAIC_SSR_DBC_SENTINEL; + + /* + * Device requests only one SSR at a time. So allocating only one + * buffer to download crashdump is good enough. + */ + ssr_crash = drmm_kzalloc(drm, SSR_MHI_BUF_SIZE, GFP_KERNEL); + if (!ssr_crash) + return -ENOMEM; + + ssr_crash->qdev = qdev; + INIT_WORK(&ssr_crash->work, ssr_dump_worker); + qdev->ssr_mhi_buf = ssr_crash; + + return 0; +} + +int qaic_ssr_register(void) +{ + return mhi_driver_register(&qaic_ssr_mhi_driver); +} + +void qaic_ssr_unregister(void) +{ + mhi_driver_unregister(&qaic_ssr_mhi_driver); +} diff --git a/drivers/accel/qaic/qaic_ssr.h b/drivers/accel/qaic/qaic_ssr.h new file mode 100644 index 000000000000..97ccff305750 --- /dev/null +++ b/drivers/accel/qaic/qaic_ssr.h @@ -0,0 +1,17 @@ +/* SPDX-License-Identifier: GPL-2.0-only + * + * Copyright (c) 2020, The Linux Foundation. All rights reserved. + * Copyright (c) 2021, 2024 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef __QAIC_SSR_H__ +#define __QAIC_SSR_H__ + +struct drm_device; +struct qaic_device; + +int qaic_ssr_register(void); +void qaic_ssr_unregister(void); +void qaic_clean_up_ssr(struct qaic_device *qdev); +int qaic_ssr_init(struct qaic_device *qdev, struct drm_device *drm); +#endif /* __QAIC_SSR_H__ */ diff --git a/drivers/accel/qaic/qaic_sysfs.c b/drivers/accel/qaic/qaic_sysfs.c new file mode 100644 index 000000000000..e0afb0ffb589 --- /dev/null +++ b/drivers/accel/qaic/qaic_sysfs.c @@ -0,0 +1,109 @@ +// SPDX-License-Identifier: GPL-2.0-only + +/* Copyright (c) 2020-2025, The Linux Foundation. All rights reserved. */ + +#include +#include +#include +#include +#include +#include +#include + +#include "qaic.h" + +#define NAME_LEN 14 + +struct dbc_attribute { + struct device_attribute dev_attr; + u32 dbc_id; + char name[NAME_LEN]; +}; + +static ssize_t dbc_state_show(struct device *dev, struct device_attribute *a, char *buf) +{ + struct dbc_attribute *dbc_attr = container_of(a, struct dbc_attribute, dev_attr); + struct drm_minor *minor = dev_get_drvdata(dev); + struct qaic_device *qdev; + + qdev = to_qaic_device(minor->dev); + return sysfs_emit(buf, "%d\n", qdev->dbc[dbc_attr->dbc_id].state); +} + +void set_dbc_state(struct qaic_device *qdev, u32 dbc_id, unsigned int state) +{ + struct device *kdev = to_accel_kdev(qdev->qddev); + char *envp[3] = {}; + char state_str[16]; + char id_str[12]; + + envp[0] = id_str; + envp[1] = state_str; + + if (state >= DBC_STATE_MAX) + return; + if (dbc_id >= qdev->num_dbc) + return; + if (state == qdev->dbc[dbc_id].state) + return; + + scnprintf(id_str, ARRAY_SIZE(id_str), "DBC_ID=%d", dbc_id); + scnprintf(state_str, ARRAY_SIZE(state_str), "DBC_STATE=%d", state); + + qdev->dbc[dbc_id].state = state; + kobject_uevent_env(&kdev->kobj, KOBJ_CHANGE, envp); +} + +int qaic_sysfs_init(struct qaic_drm_device *qddev) +{ + struct device *kdev = to_accel_kdev(qddev); + struct drm_device *drm = to_drm(qddev); + u32 num_dbc = qddev->qdev->num_dbc; + struct dbc_attribute *dbc_attrs; + int i, ret; + + dbc_attrs = drmm_kcalloc(drm, num_dbc, sizeof(*dbc_attrs), GFP_KERNEL); + if (!dbc_attrs) + return -ENOMEM; + + for (i = 0; i < num_dbc; ++i) { + struct dbc_attribute *dbc_attr = &dbc_attrs[i]; + + sysfs_attr_init(&dbc_attr->dev_attr.attr); + dbc_attr->dbc_id = i; + scnprintf(dbc_attr->name, NAME_LEN, "dbc%d_state", i); + dbc_attr->dev_attr.attr.name = dbc_attr->name; + dbc_attr->dev_attr.attr.mode = 0444; + dbc_attr->dev_attr.show = dbc_state_show; + ret = sysfs_create_file(&kdev->kobj, &dbc_attr->dev_attr.attr); + if (ret) { + int j; + + for (j = 0; j < i; ++j) { + dbc_attr = &dbc_attrs[j]; + sysfs_remove_file(&kdev->kobj, &dbc_attr->dev_attr.attr); + } + drmm_kfree(drm, dbc_attrs); + return ret; + } + } + + qddev->sysfs_attrs = dbc_attrs; + return 0; +} + +void qaic_sysfs_remove(struct qaic_drm_device *qddev) +{ + struct dbc_attribute *dbc_attrs = qddev->sysfs_attrs; + struct device *kdev = to_accel_kdev(qddev); + u32 num_dbc = qddev->qdev->num_dbc; + int i; + + if (!dbc_attrs) + return; + + qddev->sysfs_attrs = NULL; + for (i = 0; i < num_dbc; ++i) + sysfs_remove_file(&kdev->kobj, &dbc_attrs[i].dev_attr.attr); + drmm_kfree(to_drm(qddev), dbc_attrs); +} diff --git a/drivers/accel/qaic/qaic_timesync.c b/drivers/accel/qaic/qaic_timesync.c index 3fac540f8e03..8af2475f4f36 100644 --- a/drivers/accel/qaic/qaic_timesync.c +++ b/drivers/accel/qaic/qaic_timesync.c @@ -171,6 +171,13 @@ static void qaic_timesync_timer(struct timer_list *t) dev_err(mqtsdev->dev, "%s mod_timer error:%d\n", __func__, ret); } +void qaic_mqts_ch_stop_timer(struct mhi_device *mhi_dev) +{ + struct mqts_dev *mqtsdev = dev_get_drvdata(&mhi_dev->dev); + + timer_delete_sync(&mqtsdev->timer); +} + static int qaic_timesync_probe(struct mhi_device *mhi_dev, const struct mhi_device_id *id) { struct qaic_device *qdev = pci_get_drvdata(to_pci_dev(mhi_dev->mhi_cntrl->cntrl_dev)); @@ -206,6 +213,7 @@ static int qaic_timesync_probe(struct mhi_device *mhi_dev, const struct mhi_devi timer->expires = jiffies + msecs_to_jiffies(timesync_delay_ms); add_timer(timer); dev_set_drvdata(&mhi_dev->dev, mqtsdev); + qdev->mqts_ch = mhi_dev; return 0; @@ -221,6 +229,7 @@ static void qaic_timesync_remove(struct mhi_device *mhi_dev) { struct mqts_dev *mqtsdev = dev_get_drvdata(&mhi_dev->dev); + mqtsdev->qdev->mqts_ch = NULL; timer_delete_sync(&mqtsdev->timer); mhi_unprepare_from_transfer(mqtsdev->mhi_dev); kfree(mqtsdev->sync_msg); diff --git a/drivers/accel/qaic/qaic_timesync.h b/drivers/accel/qaic/qaic_timesync.h index 851b7acd43bb..77b9c2b55057 100644 --- a/drivers/accel/qaic/qaic_timesync.h +++ b/drivers/accel/qaic/qaic_timesync.h @@ -6,6 +6,9 @@ #ifndef __QAIC_TIMESYNC_H__ #define __QAIC_TIMESYNC_H__ +#include + int qaic_timesync_init(void); void qaic_timesync_deinit(void); +void qaic_mqts_ch_stop_timer(struct mhi_device *mhi_dev); #endif /* __QAIC_TIMESYNC_H__ */ diff --git a/drivers/accel/qaic/sahara.c b/drivers/accel/qaic/sahara.c index 3ebcc1f7ff58..fd3c3b2d1fd3 100644 --- a/drivers/accel/qaic/sahara.c +++ b/drivers/accel/qaic/sahara.c @@ -159,6 +159,7 @@ struct sahara_context { struct sahara_packet *rx; struct work_struct fw_work; struct work_struct dump_work; + struct work_struct read_data_work; struct mhi_device *mhi_dev; const char * const *image_table; u32 table_size; @@ -174,7 +175,10 @@ struct sahara_context { u64 dump_image_offset; void *mem_dump_freespace; u64 dump_images_left; + u32 read_data_offset; + u32 read_data_length; bool is_mem_dump_mode; + bool non_streaming; }; static const char * const aic100_image_table[] = { @@ -194,6 +198,7 @@ static const char * const aic200_image_table[] = { [23] = "qcom/aic200/aop.mbn", [32] = "qcom/aic200/tz.mbn", [33] = "qcom/aic200/hypvm.mbn", + [38] = "qcom/aic200/xbl_config.elf", [39] = "qcom/aic200/aic200_abl.elf", [40] = "qcom/aic200/apdp.mbn", [41] = "qcom/aic200/devcfg.mbn", @@ -202,6 +207,7 @@ static const char * const aic200_image_table[] = { [49] = "qcom/aic200/shrm.elf", [50] = "qcom/aic200/cpucp.elf", [51] = "qcom/aic200/aop_devcfg.mbn", + [54] = "qcom/aic200/qupv3fw.elf", [57] = "qcom/aic200/cpucp_dtbs.elf", [62] = "qcom/aic200/uefi_dtbs.elf", [63] = "qcom/aic200/xbl_ac_config.mbn", @@ -213,9 +219,15 @@ static const char * const aic200_image_table[] = { [69] = "qcom/aic200/dcd.mbn", [73] = "qcom/aic200/gearvm.mbn", [74] = "qcom/aic200/sti.bin", - [75] = "qcom/aic200/pvs.bin", + [76] = "qcom/aic200/tz_qti_config.mbn", + [78] = "qcom/aic200/pvs.bin", }; +static bool is_streaming(struct sahara_context *context) +{ + return !context->non_streaming; +} + static int sahara_find_image(struct sahara_context *context, u32 image_id) { int ret; @@ -265,6 +277,8 @@ static void sahara_send_reset(struct sahara_context *context) int ret; context->is_mem_dump_mode = false; + context->read_data_offset = 0; + context->read_data_length = 0; context->tx[0]->cmd = cpu_to_le32(SAHARA_RESET_CMD); context->tx[0]->length = cpu_to_le32(SAHARA_RESET_LENGTH); @@ -319,9 +333,39 @@ static void sahara_hello(struct sahara_context *context) dev_err(&context->mhi_dev->dev, "Unable to send hello response %d\n", ret); } +static int read_data_helper(struct sahara_context *context, int buf_index) +{ + enum mhi_flags mhi_flag; + u32 pkt_data_len; + int ret; + + pkt_data_len = min(context->read_data_length, SAHARA_PACKET_MAX_SIZE); + + memcpy(context->tx[buf_index], + &context->firmware->data[context->read_data_offset], + pkt_data_len); + + context->read_data_offset += pkt_data_len; + context->read_data_length -= pkt_data_len; + + if (is_streaming(context) || !context->read_data_length) + mhi_flag = MHI_EOT; + else + mhi_flag = MHI_CHAIN; + + ret = mhi_queue_buf(context->mhi_dev, DMA_TO_DEVICE, + context->tx[buf_index], pkt_data_len, mhi_flag); + if (ret) { + dev_err(&context->mhi_dev->dev, "Unable to send read_data response %d\n", ret); + return ret; + } + + return 0; +} + static void sahara_read_data(struct sahara_context *context) { - u32 image_id, data_offset, data_len, pkt_data_len; + u32 image_id, data_offset, data_len; int ret; int i; @@ -357,7 +401,7 @@ static void sahara_read_data(struct sahara_context *context) * and is not needed here on error. */ - if (data_len > SAHARA_TRANSFER_MAX_SIZE) { + if (context->non_streaming && data_len > SAHARA_TRANSFER_MAX_SIZE) { dev_err(&context->mhi_dev->dev, "Malformed read_data packet - data len %d exceeds max xfer size %d\n", data_len, SAHARA_TRANSFER_MAX_SIZE); sahara_send_reset(context); @@ -378,22 +422,18 @@ static void sahara_read_data(struct sahara_context *context) return; } - for (i = 0; i < SAHARA_NUM_TX_BUF && data_len; ++i) { - pkt_data_len = min(data_len, SAHARA_PACKET_MAX_SIZE); + context->read_data_offset = data_offset; + context->read_data_length = data_len; - memcpy(context->tx[i], &context->firmware->data[data_offset], pkt_data_len); + if (is_streaming(context)) { + schedule_work(&context->read_data_work); + return; + } - data_offset += pkt_data_len; - data_len -= pkt_data_len; - - ret = mhi_queue_buf(context->mhi_dev, DMA_TO_DEVICE, - context->tx[i], pkt_data_len, - !data_len ? MHI_EOT : MHI_CHAIN); - if (ret) { - dev_err(&context->mhi_dev->dev, "Unable to send read_data response %d\n", - ret); - return; - } + for (i = 0; i < SAHARA_NUM_TX_BUF && context->read_data_length; ++i) { + ret = read_data_helper(context, i); + if (ret) + break; } } @@ -538,6 +578,7 @@ static void sahara_parse_dump_table(struct sahara_context *context) struct sahara_memory_dump_meta_v1 *dump_meta; u64 table_nents; u64 dump_length; + u64 mul_bytes; int ret; u64 i; @@ -551,8 +592,9 @@ static void sahara_parse_dump_table(struct sahara_context *context) dev_table[i].description[SAHARA_TABLE_ENTRY_STR_LEN - 1] = 0; dev_table[i].filename[SAHARA_TABLE_ENTRY_STR_LEN - 1] = 0; - dump_length = size_add(dump_length, le64_to_cpu(dev_table[i].length)); - if (dump_length == SIZE_MAX) { + if (check_add_overflow(dump_length, + le64_to_cpu(dev_table[i].length), + &dump_length)) { /* Discard the dump */ sahara_send_reset(context); return; @@ -568,14 +610,17 @@ static void sahara_parse_dump_table(struct sahara_context *context) dev_table[i].filename); } - dump_length = size_add(dump_length, sizeof(*dump_meta)); - if (dump_length == SIZE_MAX) { + if (check_add_overflow(dump_length, (u64)sizeof(*dump_meta), &dump_length)) { /* Discard the dump */ sahara_send_reset(context); return; } - dump_length = size_add(dump_length, size_mul(sizeof(*image_out_table), table_nents)); - if (dump_length == SIZE_MAX) { + if (check_mul_overflow((u64)sizeof(*image_out_table), table_nents, &mul_bytes)) { + /* Discard the dump */ + sahara_send_reset(context); + return; + } + if (check_add_overflow(dump_length, mul_bytes, &dump_length)) { /* Discard the dump */ sahara_send_reset(context); return; @@ -615,7 +660,7 @@ static void sahara_parse_dump_table(struct sahara_context *context) /* Request the first chunk of the first image */ context->dump_image = &image_out_table[0]; - dump_length = min(context->dump_image->length, SAHARA_READ_MAX_SIZE); + dump_length = min_t(u64, context->dump_image->length, SAHARA_READ_MAX_SIZE); /* Avoid requesting EOI sized data so that we can identify errors */ if (dump_length == SAHARA_END_OF_IMAGE_LENGTH) dump_length = SAHARA_END_OF_IMAGE_LENGTH / 2; @@ -663,7 +708,7 @@ static void sahara_parse_dump_image(struct sahara_context *context) /* Get next image chunk */ dump_length = context->dump_image->length - context->dump_image_offset; - dump_length = min(dump_length, SAHARA_READ_MAX_SIZE); + dump_length = min_t(u64, dump_length, SAHARA_READ_MAX_SIZE); /* Avoid requesting EOI sized data so that we can identify errors */ if (dump_length == SAHARA_END_OF_IMAGE_LENGTH) dump_length = SAHARA_END_OF_IMAGE_LENGTH / 2; @@ -742,6 +787,13 @@ static void sahara_dump_processing(struct work_struct *work) sahara_send_reset(context); } +static void sahara_read_data_processing(struct work_struct *work) +{ + struct sahara_context *context = container_of(work, struct sahara_context, read_data_work); + + read_data_helper(context, 0); +} + static int sahara_mhi_probe(struct mhi_device *mhi_dev, const struct mhi_device_id *id) { struct sahara_context *context; @@ -756,35 +808,57 @@ static int sahara_mhi_probe(struct mhi_device *mhi_dev, const struct mhi_device_ if (!context->rx) return -ENOMEM; - /* - * AIC100 defines SAHARA_TRANSFER_MAX_SIZE as the largest value it - * will request for READ_DATA. This is larger than - * SAHARA_PACKET_MAX_SIZE, and we need 9x SAHARA_PACKET_MAX_SIZE to - * cover SAHARA_TRANSFER_MAX_SIZE. When the remote side issues a - * READ_DATA, it requires a transfer of the exact size requested. We - * can use MHI_CHAIN to link multiple buffers into a single transfer - * but the remote side will not consume the buffers until it sees an - * EOT, thus we need to allocate enough buffers to put in the tx fifo - * to cover an entire READ_DATA request of the max size. - */ - for (i = 0; i < SAHARA_NUM_TX_BUF; ++i) { - context->tx[i] = devm_kzalloc(&mhi_dev->dev, SAHARA_PACKET_MAX_SIZE, GFP_KERNEL); - if (!context->tx[i]) - return -ENOMEM; - } - - context->mhi_dev = mhi_dev; - INIT_WORK(&context->fw_work, sahara_processing); - INIT_WORK(&context->dump_work, sahara_dump_processing); - if (!strcmp(mhi_dev->mhi_cntrl->name, "AIC200")) { context->image_table = aic200_image_table; context->table_size = ARRAY_SIZE(aic200_image_table); } else { context->image_table = aic100_image_table; context->table_size = ARRAY_SIZE(aic100_image_table); + context->non_streaming = true; } + /* + * There are two firmware implementations for READ_DATA handling. + * The older "SBL" implementation defines a Sahara transfer size, and + * expects that the response is a single transport transfer. If the + * FW wants to transfer a file that is larger than the transfer size, + * the FW will issue multiple READ_DATA commands. For this + * implementation, we need to allocate enough buffers to contain the + * entire Sahara transfer size. + * + * The newer "XBL" implementation does not define a maximum transfer + * size and instead expects the data to be streamed over using the + * transport level MTU. The FW will issue a single READ_DATA command + * of whatever size, and consume multiple transport level transfers + * until the expected amount of data is consumed. For this + * implementation we only need a single buffer of the transport MTU + * but we'll need to be able to use it multiple times for a single + * READ_DATA request. + * + * AIC100 is the SBL implementation and defines SAHARA_TRANSFER_MAX_SIZE + * and we need 9x SAHARA_PACKET_MAX_SIZE to cover that. We can use + * MHI_CHAIN to link multiple buffers into a single transfer but the + * remote side will not consume the buffers until it sees an EOT, thus + * we need to allocate enough buffers to put in the tx fifo to cover an + * entire READ_DATA request of the max size. + * + * AIC200 is the XBL implementation, and so a single buffer will work. + */ + for (i = 0; i < SAHARA_NUM_TX_BUF; ++i) { + context->tx[i] = devm_kzalloc(&mhi_dev->dev, + SAHARA_PACKET_MAX_SIZE, + GFP_KERNEL); + if (!context->tx[i]) + return -ENOMEM; + if (is_streaming(context)) + break; + } + + context->mhi_dev = mhi_dev; + INIT_WORK(&context->fw_work, sahara_processing); + INIT_WORK(&context->dump_work, sahara_dump_processing); + INIT_WORK(&context->read_data_work, sahara_read_data_processing); + context->active_image_id = SAHARA_IMAGE_ID_NONE; dev_set_drvdata(&mhi_dev->dev, context); @@ -814,6 +888,10 @@ static void sahara_mhi_remove(struct mhi_device *mhi_dev) static void sahara_mhi_ul_xfer_cb(struct mhi_device *mhi_dev, struct mhi_result *mhi_result) { + struct sahara_context *context = dev_get_drvdata(&mhi_dev->dev); + + if (!mhi_result->transaction_status && context->read_data_length && is_streaming(context)) + schedule_work(&context->read_data_work); } static void sahara_mhi_dl_xfer_cb(struct mhi_device *mhi_dev, struct mhi_result *mhi_result) diff --git a/drivers/accel/rocket/rocket_gem.c b/drivers/accel/rocket/rocket_gem.c index 0551e11cc184..624c4ecf5a34 100644 --- a/drivers/accel/rocket/rocket_gem.c +++ b/drivers/accel/rocket/rocket_gem.c @@ -2,6 +2,7 @@ /* Copyright 2024-2025 Tomeu Vizoso */ #include +#include #include #include #include diff --git a/drivers/dma-buf/dma-fence.c b/drivers/dma-buf/dma-fence.c index 39e6f93dc310..b4f5c8635276 100644 --- a/drivers/dma-buf/dma-fence.c +++ b/drivers/dma-buf/dma-fence.c @@ -121,29 +121,27 @@ static const struct dma_fence_ops dma_fence_stub_ops = { .get_timeline_name = dma_fence_stub_get_name, }; +static int __init dma_fence_init_stub(void) +{ + dma_fence_init(&dma_fence_stub, &dma_fence_stub_ops, + &dma_fence_stub_lock, 0, 0); + + set_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, + &dma_fence_stub.flags); + + dma_fence_signal(&dma_fence_stub); + return 0; +} +subsys_initcall(dma_fence_init_stub); + /** * dma_fence_get_stub - return a signaled fence * - * Return a stub fence which is already signaled. The fence's - * timestamp corresponds to the first time after boot this - * function is called. + * Return a stub fence which is already signaled. The fence's timestamp + * corresponds to the initialisation time of the linux kernel. */ struct dma_fence *dma_fence_get_stub(void) { - spin_lock(&dma_fence_stub_lock); - if (!dma_fence_stub.ops) { - dma_fence_init(&dma_fence_stub, - &dma_fence_stub_ops, - &dma_fence_stub_lock, - 0, 0); - - set_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, - &dma_fence_stub.flags); - - dma_fence_signal_locked(&dma_fence_stub); - } - spin_unlock(&dma_fence_stub_lock); - return dma_fence_get(&dma_fence_stub); } EXPORT_SYMBOL(dma_fence_get_stub); @@ -999,19 +997,21 @@ EXPORT_SYMBOL(dma_fence_set_deadline); */ void dma_fence_describe(struct dma_fence *fence, struct seq_file *seq) { - const char __rcu *timeline; - const char __rcu *driver; + const char __rcu *timeline = ""; + const char __rcu *driver = ""; + const char *signaled = ""; rcu_read_lock(); - timeline = dma_fence_timeline_name(fence); - driver = dma_fence_driver_name(fence); + if (!dma_fence_is_signaled(fence)) { + timeline = dma_fence_timeline_name(fence); + driver = dma_fence_driver_name(fence); + signaled = "un"; + } - seq_printf(seq, "%s %s seq %llu %ssignalled\n", - rcu_dereference(driver), - rcu_dereference(timeline), - fence->seqno, - dma_fence_is_signaled(fence) ? "" : "un"); + seq_printf(seq, "%llu:%llu %s %s %ssignalled\n", + fence->context, fence->seqno, timeline, driver, + signaled); rcu_read_unlock(); } diff --git a/drivers/dma-buf/heaps/Kconfig b/drivers/dma-buf/heaps/Kconfig index bb369b38b001..a5eef06c4226 100644 --- a/drivers/dma-buf/heaps/Kconfig +++ b/drivers/dma-buf/heaps/Kconfig @@ -12,13 +12,3 @@ config DMABUF_HEAPS_CMA Choose this option to enable dma-buf CMA heap. This heap is backed by the Contiguous Memory Allocator (CMA). If your system has these regions, you should say Y here. - -config DMABUF_HEAPS_CMA_LEGACY - bool "Legacy DMA-BUF CMA Heap" - default y - depends on DMABUF_HEAPS_CMA - help - Add a duplicate CMA-backed dma-buf heap with legacy naming derived - from the CMA area's devicetree node, or "reserved" if the area is not - defined in the devicetree. This uses the same underlying allocator as - CONFIG_DMABUF_HEAPS_CMA. diff --git a/drivers/dma-buf/heaps/cma_heap.c b/drivers/dma-buf/heaps/cma_heap.c index 0df007111975..42f88193eab9 100644 --- a/drivers/dma-buf/heaps/cma_heap.c +++ b/drivers/dma-buf/heaps/cma_heap.c @@ -14,6 +14,7 @@ #include #include +#include #include #include #include @@ -21,12 +22,27 @@ #include #include #include +#include +#include #include #include #include #define DEFAULT_CMA_NAME "default_cma_region" +static struct cma *dma_areas[MAX_CMA_AREAS] __initdata; +static unsigned int dma_areas_num __initdata; + +int __init dma_heap_cma_register_heap(struct cma *cma) +{ + if (dma_areas_num >= ARRAY_SIZE(dma_areas)) + return -EINVAL; + + dma_areas[dma_areas_num++] = cma; + + return 0; +} + struct cma_heap { struct dma_heap *heap; struct cma *cma; @@ -395,33 +411,30 @@ static int __init __add_cma_heap(struct cma *cma, const char *name) return 0; } -static int __init add_default_cma_heap(void) +static int __init add_cma_heaps(void) { struct cma *default_cma = dev_get_cma_area(NULL); - const char *legacy_cma_name; + unsigned int i; int ret; - if (!default_cma) - return 0; + if (default_cma) { + ret = __add_cma_heap(default_cma, DEFAULT_CMA_NAME); + if (ret) + return ret; + } - ret = __add_cma_heap(default_cma, DEFAULT_CMA_NAME); - if (ret) - return ret; + for (i = 0; i < dma_areas_num; i++) { + struct cma *cma = dma_areas[i]; - if (IS_ENABLED(CONFIG_DMABUF_HEAPS_CMA_LEGACY)) { - legacy_cma_name = cma_get_name(default_cma); - if (!strcmp(legacy_cma_name, DEFAULT_CMA_NAME)) { - pr_warn("legacy name and default name are the same, skipping legacy heap\n"); - return 0; + ret = __add_cma_heap(cma, cma_get_name(cma)); + if (ret) { + pr_warn("Failed to add CMA heap %s", cma_get_name(cma)); + continue; } - ret = __add_cma_heap(default_cma, legacy_cma_name); - if (ret) - pr_warn("failed to add legacy heap: %pe\n", - ERR_PTR(ret)); } return 0; } -module_init(add_default_cma_heap); +module_init(add_cma_heaps); MODULE_DESCRIPTION("DMA-BUF CMA Heap"); diff --git a/drivers/dma-buf/heaps/system_heap.c b/drivers/dma-buf/heaps/system_heap.c index bbe7881f1360..4c782fe33fd4 100644 --- a/drivers/dma-buf/heaps/system_heap.c +++ b/drivers/dma-buf/heaps/system_heap.c @@ -186,20 +186,35 @@ static int system_heap_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma) struct system_heap_buffer *buffer = dmabuf->priv; struct sg_table *table = &buffer->sg_table; unsigned long addr = vma->vm_start; - struct sg_page_iter piter; - int ret; + unsigned long pgoff = vma->vm_pgoff; + struct scatterlist *sg; + int i, ret; - for_each_sgtable_page(table, &piter, vma->vm_pgoff) { - struct page *page = sg_page_iter_page(&piter); + for_each_sgtable_sg(table, sg, i) { + unsigned long n = sg->length >> PAGE_SHIFT; - ret = remap_pfn_range(vma, addr, page_to_pfn(page), PAGE_SIZE, - vma->vm_page_prot); + if (pgoff < n) + break; + pgoff -= n; + } + + for (; sg && addr < vma->vm_end; sg = sg_next(sg)) { + unsigned long n = (sg->length >> PAGE_SHIFT) - pgoff; + struct page *page = sg_page(sg) + pgoff; + unsigned long size = n << PAGE_SHIFT; + + if (addr + size > vma->vm_end) + size = vma->vm_end - addr; + + ret = remap_pfn_range(vma, addr, page_to_pfn(page), + size, vma->vm_page_prot); if (ret) return ret; - addr += PAGE_SIZE; - if (addr >= vma->vm_end) - return 0; + + addr += size; + pgoff = 0; } + return 0; } diff --git a/drivers/dma-buf/sw_sync.c b/drivers/dma-buf/sw_sync.c index 3c20f1d31cf5..6f09d13be6b6 100644 --- a/drivers/dma-buf/sw_sync.c +++ b/drivers/dma-buf/sw_sync.c @@ -8,6 +8,7 @@ #include #include #include +#include #include #include @@ -349,6 +350,9 @@ static long sw_sync_ioctl_create_fence(struct sync_timeline *obj, struct sync_file *sync_file; struct sw_sync_create_fence_data data; + /* SW sync fence are inherently unsafe and can deadlock the kernel */ + add_taint(TAINT_SOFTLOCKUP, LOCKDEP_STILL_OK); + if (fd < 0) return fd; diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile index 1701e5cafd9f..0e1c668b46d2 100644 --- a/drivers/gpu/drm/Makefile +++ b/drivers/gpu/drm/Makefile @@ -41,6 +41,7 @@ drm-y := \ drm_bridge.o \ drm_cache.o \ drm_color_mgmt.o \ + drm_colorop.o \ drm_connector.o \ drm_crtc.o \ drm_displayid.o \ @@ -76,7 +77,8 @@ drm-y := \ drm-$(CONFIG_DRM_CLIENT) += \ drm_client.o \ drm_client_event.o \ - drm_client_modeset.o + drm_client_modeset.o \ + drm_client_sysrq.o drm-$(CONFIG_DRM_LIB_RANDOM) += lib/drm_random.o drm-$(CONFIG_COMPAT) += drm_ioc32.o drm-$(CONFIG_DRM_PANEL) += drm_panel.o @@ -150,7 +152,8 @@ drm_kms_helper-y := \ drm_plane_helper.o \ drm_probe_helper.o \ drm_self_refresh_helper.o \ - drm_simple_kms_helper.o + drm_simple_kms_helper.o \ + drm_vblank_helper.o drm_kms_helper-$(CONFIG_DRM_PANEL_BRIDGE) += bridge/panel.o drm_kms_helper-$(CONFIG_DRM_FBDEV_EMULATION) += drm_fb_helper.o obj-$(CONFIG_DRM_KMS_HELPER) += drm_kms_helper.o diff --git a/drivers/gpu/drm/adp/adp_drv.c b/drivers/gpu/drm/adp/adp_drv.c index 54cde090c3f4..4554cf75565e 100644 --- a/drivers/gpu/drm/adp/adp_drv.c +++ b/drivers/gpu/drm/adp/adp_drv.c @@ -16,6 +16,7 @@ #include #include #include +#include #include #include diff --git a/drivers/gpu/drm/amd/amdgpu/Kconfig b/drivers/gpu/drm/amd/amdgpu/Kconfig index 1acfed2f92ef..7f515be5185d 100644 --- a/drivers/gpu/drm/amd/amdgpu/Kconfig +++ b/drivers/gpu/drm/amd/amdgpu/Kconfig @@ -43,14 +43,16 @@ config DRM_AMDGPU_SI bool "Enable amdgpu support for SI parts" depends on DRM_AMDGPU help - Choose this option if you want to enable experimental support + Choose this option if you want to enable support for SI (Southern Islands) asics. - SI is already supported in radeon. Experimental support for SI - in amdgpu will be disabled by default and is still provided by - radeon. Use module options to override this: + SI (Southern Islands) are first generation GCN GPUs, + supported by both drivers: radeon (old) and amdgpu (new). + By default, SI dedicated GPUs are supported by amdgpu. - radeon.si_support=0 amdgpu.si_support=1 + Use module options to override this: + To use radeon for SI, + radeon.si_support=1 amdgpu.si_support=0 config DRM_AMDGPU_CIK bool "Enable amdgpu support for CIK parts" @@ -59,11 +61,17 @@ config DRM_AMDGPU_CIK Choose this option if you want to enable support for CIK (Sea Islands) asics. - CIK is already supported in radeon. Support for CIK in amdgpu - will be disabled by default and is still provided by radeon. - Use module options to override this: + CIK (Sea Islands) are second generation GCN GPUs, + supported by both drivers: radeon (old) and amdgpu (new). + By default, + CIK dedicated GPUs are supported by amdgpu + CIK APUs are supported by radeon + Use module options to override this: + To use amdgpu for CIK, radeon.cik_support=0 amdgpu.cik_support=1 + To use radeon for CIK, + radeon.cik_support=1 amdgpu.cik_support=0 config DRM_AMDGPU_USERPTR bool "Always enable userptr write support" diff --git a/drivers/gpu/drm/amd/amdgpu/Makefile b/drivers/gpu/drm/amd/amdgpu/Makefile index 64e7acff8f18..c88760fb52ea 100644 --- a/drivers/gpu/drm/amd/amdgpu/Makefile +++ b/drivers/gpu/drm/amd/amdgpu/Makefile @@ -37,7 +37,8 @@ ccflags-y := -I$(FULL_AMD_PATH)/include/asic_reg \ -I$(FULL_AMD_DISPLAY_PATH)/modules/inc \ -I$(FULL_AMD_DISPLAY_PATH)/dc \ -I$(FULL_AMD_DISPLAY_PATH)/amdgpu_dm \ - -I$(FULL_AMD_PATH)/amdkfd + -I$(FULL_AMD_PATH)/amdkfd \ + -I$(FULL_AMD_PATH)/ras/ras_mgr # Locally disable W=1 warnings enabled in drm subsystem Makefile subdir-ccflags-y += -Wno-override-init @@ -77,7 +78,7 @@ amdgpu-$(CONFIG_DRM_AMDGPU_CIK)+= cik.o cik_ih.o \ dce_v8_0.o gfx_v7_0.o cik_sdma.o uvd_v4_2.o vce_v2_0.o amdgpu-$(CONFIG_DRM_AMDGPU_SI)+= si.o gmc_v6_0.o gfx_v6_0.o si_ih.o si_dma.o dce_v6_0.o \ - uvd_v3_1.o + uvd_v3_1.o vce_v1_0.o amdgpu-y += \ vi.o mxgpu_vi.o nbio_v6_1.o soc15.o emu_soc.o mxgpu_ai.o nbio_v7_0.o vega10_reg_init.o \ @@ -324,4 +325,9 @@ amdgpu-y += \ isp_v4_1_1.o endif +AMD_GPU_RAS_PATH := ../ras +AMD_GPU_RAS_FULL_PATH := $(FULL_AMD_PATH)/ras +include $(AMD_GPU_RAS_FULL_PATH)/Makefile +amdgpu-y += $(AMD_GPU_RAS_FILES) + obj-$(CONFIG_DRM_AMDGPU)+= amdgpu.o diff --git a/drivers/gpu/drm/amd/amdgpu/aldebaran.c b/drivers/gpu/drm/amd/amdgpu/aldebaran.c index 9569dc16dd3d..daa7b23bc775 100644 --- a/drivers/gpu/drm/amd/amdgpu/aldebaran.c +++ b/drivers/gpu/drm/amd/amdgpu/aldebaran.c @@ -88,6 +88,10 @@ static int aldebaran_mode2_suspend_ip(struct amdgpu_device *adev) uint32_t ip_block; int r, i; + /* Skip suspend of SDMA IP versions >= 4.4.2. They are multi-aid */ + if (adev->aid_mask) + ip_block_mask &= ~BIT(AMD_IP_BLOCK_TYPE_SDMA); + amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE); amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index 6f5b4a0e0a34..9f9774f58ce1 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -372,13 +372,15 @@ void amdgpu_device_ip_get_clockgating_state(struct amdgpu_device *adev, u64 *flags); int amdgpu_device_ip_wait_for_idle(struct amdgpu_device *adev, enum amd_ip_block_type block_type); +bool amdgpu_device_ip_is_hw(struct amdgpu_device *adev, + enum amd_ip_block_type block_type); bool amdgpu_device_ip_is_valid(struct amdgpu_device *adev, enum amd_ip_block_type block_type); int amdgpu_ip_block_suspend(struct amdgpu_ip_block *ip_block); int amdgpu_ip_block_resume(struct amdgpu_ip_block *ip_block); -#define AMDGPU_MAX_IP_NUM 16 +#define AMDGPU_MAX_IP_NUM AMD_IP_BLOCK_TYPE_NUM struct amdgpu_ip_block_status { bool valid; @@ -839,8 +841,6 @@ struct amd_powerplay { const struct amd_pm_funcs *pp_funcs; }; -struct ip_discovery_top; - /* polaris10 kickers */ #define ASICID_IS_P20(did, rid) (((did == 0x67DF) && \ ((rid == 0xE3) || \ @@ -972,8 +972,7 @@ struct amdgpu_device { struct notifier_block acpi_nb; struct notifier_block pm_nb; struct amdgpu_i2c_chan *i2c_bus[AMDGPU_MAX_I2C_BUS]; - struct debugfs_blob_wrapper debugfs_vbios_blob; - struct debugfs_blob_wrapper debugfs_discovery_blob; + struct debugfs_blob_wrapper debugfs_vbios_blob; struct mutex srbm_mutex; /* GRBM index mutex. Protects concurrent access to GRBM index */ struct mutex grbm_idx_mutex; @@ -1063,6 +1062,9 @@ struct amdgpu_device { u32 log2_max_MBps; } mm_stats; + /* discovery*/ + struct amdgpu_discovery_info discovery; + /* display */ bool enable_virtual_display; struct amdgpu_vkms_output *amdgpu_vkms_output; @@ -1174,6 +1176,12 @@ struct amdgpu_device { * queue fence. */ struct xarray userq_xa; + /** + * @userq_doorbell_xa: Global user queue map (doorbell index → queue) + * Key: doorbell_index (unique global identifier for the queue) + * Value: struct amdgpu_usermode_queue + */ + struct xarray userq_doorbell_xa; /* df */ struct amdgpu_df df; @@ -1265,8 +1273,6 @@ struct amdgpu_device { struct list_head ras_list; - struct ip_discovery_top *ip_top; - struct amdgpu_reset_domain *reset_domain; struct mutex benchmark_mutex; @@ -1309,9 +1315,8 @@ struct amdgpu_device { */ bool apu_prefer_gtt; - struct list_head userq_mgr_list; - struct mutex userq_mutex; bool userq_halt_for_enforce_isolation; + struct work_struct userq_reset_work; struct amdgpu_uid *uid_info; /* KFD @@ -1535,11 +1540,6 @@ int emu_soc_asic_init(struct amdgpu_device *adev); #define amdgpu_asic_read_bios_from_rom(adev, b, l) (adev)->asic_funcs->read_bios_from_rom((adev), (b), (l)) #define amdgpu_asic_read_register(adev, se, sh, offset, v)((adev)->asic_funcs->read_register((adev), (se), (sh), (offset), (v))) #define amdgpu_asic_get_config_memsize(adev) (adev)->asic_funcs->get_config_memsize((adev)) -#define amdgpu_asic_flush_hdp(adev, r) \ - ((adev)->asic_funcs->flush_hdp ? (adev)->asic_funcs->flush_hdp((adev), (r)) : (adev)->hdp.funcs->flush_hdp((adev), (r))) -#define amdgpu_asic_invalidate_hdp(adev, r) \ - ((adev)->asic_funcs->invalidate_hdp ? (adev)->asic_funcs->invalidate_hdp((adev), (r)) : \ - ((adev)->hdp.funcs->invalidate_hdp ? (adev)->hdp.funcs->invalidate_hdp((adev), (r)) : (void)0)) #define amdgpu_asic_need_full_reset(adev) (adev)->asic_funcs->need_full_reset((adev)) #define amdgpu_asic_init_doorbell_index(adev) (adev)->asic_funcs->init_doorbell_index((adev)) #define amdgpu_asic_get_pcie_usage(adev, cnt0, cnt1) ((adev)->asic_funcs->get_pcie_usage((adev), (cnt0), (cnt1))) @@ -1638,7 +1638,6 @@ void amdgpu_driver_postclose_kms(struct drm_device *dev, struct drm_file *file_priv); void amdgpu_driver_release_kms(struct drm_device *dev); -int amdgpu_device_ip_suspend(struct amdgpu_device *adev); int amdgpu_device_prepare(struct drm_device *dev); void amdgpu_device_complete(struct drm_device *dev); int amdgpu_device_suspend(struct drm_device *dev, bool fbcon); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c index 4926996f94da..381ef205b0df 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c @@ -302,17 +302,19 @@ static int acp_hw_init(struct amdgpu_ip_block *ip_block) adev->acp.acp_res[2].end = adev->acp.acp_res[2].start; adev->acp.acp_cell[0].name = "acp_audio_dma"; + adev->acp.acp_cell[0].id = 0; adev->acp.acp_cell[0].num_resources = 3; adev->acp.acp_cell[0].resources = &adev->acp.acp_res[0]; adev->acp.acp_cell[0].platform_data = &adev->asic_type; adev->acp.acp_cell[0].pdata_size = sizeof(adev->asic_type); adev->acp.acp_cell[1].name = "designware-i2s"; + adev->acp.acp_cell[1].id = 1; adev->acp.acp_cell[1].num_resources = 1; adev->acp.acp_cell[1].resources = &adev->acp.acp_res[1]; adev->acp.acp_cell[1].platform_data = &i2s_pdata[0]; adev->acp.acp_cell[1].pdata_size = sizeof(struct i2s_platform_data); - r = mfd_add_hotplug_devices(adev->acp.parent, adev->acp.acp_cell, 2); + r = mfd_add_devices(adev->acp.parent, 0, adev->acp.acp_cell, 2, NULL, 0, NULL); if (r) goto failure; r = device_for_each_child(adev->acp.parent, &adev->acp.acp_genpd->gpd, @@ -410,30 +412,34 @@ static int acp_hw_init(struct amdgpu_ip_block *ip_block) adev->acp.acp_res[4].end = adev->acp.acp_res[4].start; adev->acp.acp_cell[0].name = "acp_audio_dma"; + adev->acp.acp_cell[0].id = 0; adev->acp.acp_cell[0].num_resources = 5; adev->acp.acp_cell[0].resources = &adev->acp.acp_res[0]; adev->acp.acp_cell[0].platform_data = &adev->asic_type; adev->acp.acp_cell[0].pdata_size = sizeof(adev->asic_type); adev->acp.acp_cell[1].name = "designware-i2s"; + adev->acp.acp_cell[1].id = 1; adev->acp.acp_cell[1].num_resources = 1; adev->acp.acp_cell[1].resources = &adev->acp.acp_res[1]; adev->acp.acp_cell[1].platform_data = &i2s_pdata[0]; adev->acp.acp_cell[1].pdata_size = sizeof(struct i2s_platform_data); adev->acp.acp_cell[2].name = "designware-i2s"; + adev->acp.acp_cell[2].id = 2; adev->acp.acp_cell[2].num_resources = 1; adev->acp.acp_cell[2].resources = &adev->acp.acp_res[2]; adev->acp.acp_cell[2].platform_data = &i2s_pdata[1]; adev->acp.acp_cell[2].pdata_size = sizeof(struct i2s_platform_data); adev->acp.acp_cell[3].name = "designware-i2s"; + adev->acp.acp_cell[3].id = 3; adev->acp.acp_cell[3].num_resources = 1; adev->acp.acp_cell[3].resources = &adev->acp.acp_res[3]; adev->acp.acp_cell[3].platform_data = &i2s_pdata[2]; adev->acp.acp_cell[3].pdata_size = sizeof(struct i2s_platform_data); - r = mfd_add_hotplug_devices(adev->acp.parent, adev->acp.acp_cell, ACP_DEVS); + r = mfd_add_devices(adev->acp.parent, 0, adev->acp.acp_cell, ACP_DEVS, NULL, 0, NULL); if (r) goto failure; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c index 6c62e27b9800..d31460a9e958 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c @@ -507,7 +507,6 @@ static int amdgpu_atif_handler(struct amdgpu_device *adev, pm_runtime_get_sync(adev_to_drm(adev)->dev); /* Just fire off a uevent and let userspace tell us what to do */ drm_helper_hpd_irq_event(adev_to_drm(adev)); - pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); } } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h index 9e120c934cc1..8bdfcde2029b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h @@ -71,7 +71,7 @@ struct kgd_mem { struct mutex lock; struct amdgpu_bo *bo; struct dma_buf *dmabuf; - struct hmm_range *range; + struct amdgpu_hmm_range *range; struct list_head attachments; /* protected by amdkfd_process_info.lock */ struct list_head validate_list; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c index 923f0fa7350c..b1c24c8fa686 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c @@ -1057,7 +1057,7 @@ static int init_user_pages(struct kgd_mem *mem, uint64_t user_addr, struct amdkfd_process_info *process_info = mem->process_info; struct amdgpu_bo *bo = mem->bo; struct ttm_operation_ctx ctx = { true, false }; - struct hmm_range *range; + struct amdgpu_hmm_range *range; int ret = 0; mutex_lock(&process_info->lock); @@ -1089,8 +1089,15 @@ static int init_user_pages(struct kgd_mem *mem, uint64_t user_addr, return 0; } - ret = amdgpu_ttm_tt_get_user_pages(bo, &range); + range = amdgpu_hmm_range_alloc(NULL); + if (unlikely(!range)) { + ret = -ENOMEM; + goto unregister_out; + } + + ret = amdgpu_ttm_tt_get_user_pages(bo, range); if (ret) { + amdgpu_hmm_range_free(range); if (ret == -EAGAIN) pr_debug("Failed to get user pages, try again\n"); else @@ -1113,7 +1120,7 @@ static int init_user_pages(struct kgd_mem *mem, uint64_t user_addr, amdgpu_bo_unreserve(bo); release_out: - amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm, range); + amdgpu_hmm_range_free(range); unregister_out: if (ret) amdgpu_hmm_unregister(bo); @@ -1920,7 +1927,7 @@ int amdgpu_amdkfd_gpuvm_free_memory_of_gpu( if (amdgpu_ttm_tt_get_usermm(mem->bo->tbo.ttm)) { amdgpu_hmm_unregister(mem->bo); mutex_lock(&process_info->notifier_lock); - amdgpu_ttm_tt_discard_user_pages(mem->bo->tbo.ttm, mem->range); + amdgpu_hmm_range_free(mem->range); mutex_unlock(&process_info->notifier_lock); } @@ -1958,9 +1965,7 @@ int amdgpu_amdkfd_gpuvm_free_memory_of_gpu( */ if (size) { if (!is_imported && - (mem->bo->preferred_domains == AMDGPU_GEM_DOMAIN_VRAM || - (adev->apu_prefer_gtt && - mem->bo->preferred_domains == AMDGPU_GEM_DOMAIN_GTT))) + mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_VRAM) *size = bo_size; else *size = 0; @@ -2546,7 +2551,7 @@ static int update_invalid_user_pages(struct amdkfd_process_info *process_info, bo = mem->bo; - amdgpu_ttm_tt_discard_user_pages(bo->tbo.ttm, mem->range); + amdgpu_hmm_range_free(mem->range); mem->range = NULL; /* BO reservations and getting user pages (hmm_range_fault) @@ -2570,9 +2575,14 @@ static int update_invalid_user_pages(struct amdkfd_process_info *process_info, } } + mem->range = amdgpu_hmm_range_alloc(NULL); + if (unlikely(!mem->range)) + return -ENOMEM; /* Get updated user pages */ - ret = amdgpu_ttm_tt_get_user_pages(bo, &mem->range); + ret = amdgpu_ttm_tt_get_user_pages(bo, mem->range); if (ret) { + amdgpu_hmm_range_free(mem->range); + mem->range = NULL; pr_debug("Failed %d to get user pages\n", ret); /* Return -EFAULT bad address error as success. It will @@ -2745,8 +2755,8 @@ static int confirm_valid_user_pages_locked(struct amdkfd_process_info *process_i continue; /* Only check mem with hmm range associated */ - valid = amdgpu_ttm_tt_get_user_pages_done( - mem->bo->tbo.ttm, mem->range); + valid = amdgpu_hmm_range_valid(mem->range); + amdgpu_hmm_range_free(mem->range); mem->range = NULL; if (!valid) { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c index c7d32fb216e4..636385c80f64 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c @@ -181,19 +181,22 @@ int amdgpu_atomfirmware_allocate_fb_scratch(struct amdgpu_device *adev) u8 frev, crev; int usage_bytes = 0; - if (amdgpu_atom_parse_data_header(ctx, index, NULL, &frev, &crev, &data_offset)) { - if (frev == 2 && crev == 1) { - fw_usage_v2_1 = - (struct vram_usagebyfirmware_v2_1 *)(ctx->bios + data_offset); - amdgpu_atomfirmware_allocate_fb_v2_1(adev, - fw_usage_v2_1, - &usage_bytes); - } else if (frev >= 2 && crev >= 2) { - fw_usage_v2_2 = - (struct vram_usagebyfirmware_v2_2 *)(ctx->bios + data_offset); - amdgpu_atomfirmware_allocate_fb_v2_2(adev, - fw_usage_v2_2, - &usage_bytes); + /* Skip atomfirmware allocation for SRIOV VFs when dynamic crit regn is enabled */ + if (!(amdgpu_sriov_vf(adev) && adev->virt.is_dynamic_crit_regn_enabled)) { + if (amdgpu_atom_parse_data_header(ctx, index, NULL, &frev, &crev, &data_offset)) { + if (frev == 2 && crev == 1) { + fw_usage_v2_1 = + (struct vram_usagebyfirmware_v2_1 *)(ctx->bios + data_offset); + amdgpu_atomfirmware_allocate_fb_v2_1(adev, + fw_usage_v2_1, + &usage_bytes); + } else if (frev >= 2 && crev >= 2) { + fw_usage_v2_2 = + (struct vram_usagebyfirmware_v2_2 *)(ctx->bios + data_offset); + amdgpu_atomfirmware_allocate_fb_v2_2(adev, + fw_usage_v2_2, + &usage_bytes); + } } } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c index 00e96419fcda..35d04e69aec0 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c @@ -96,13 +96,14 @@ void amdgpu_bios_release(struct amdgpu_device *adev) * part of the system bios. On boot, the system bios puts a * copy of the igp rom at the start of vram if a discrete card is * present. - * For SR-IOV, the vbios image is also put in VRAM in the VF. + * For SR-IOV, if dynamic critical region is not enabled, + * the vbios image is also put at the start of VRAM in the VF. */ static bool amdgpu_read_bios_from_vram(struct amdgpu_device *adev) { - uint8_t __iomem *bios; + uint8_t __iomem *bios = NULL; resource_size_t vram_base; - resource_size_t size = 256 * 1024; /* ??? */ + u32 size = 256U * 1024U; /* ??? */ if (!(adev->flags & AMD_IS_APU)) if (amdgpu_device_need_post(adev)) @@ -114,18 +115,33 @@ static bool amdgpu_read_bios_from_vram(struct amdgpu_device *adev) adev->bios = NULL; vram_base = pci_resource_start(adev->pdev, 0); - bios = ioremap_wc(vram_base, size); - if (!bios) - return false; adev->bios = kmalloc(size, GFP_KERNEL); - if (!adev->bios) { - iounmap(bios); + if (!adev->bios) return false; + + /* For SRIOV with dynamic critical region is enabled, + * the vbios image is put at a dynamic offset of VRAM in the VF. + * If dynamic critical region is disabled, follow the existing logic as on baremetal. + */ + if (amdgpu_sriov_vf(adev) && adev->virt.is_dynamic_crit_regn_enabled) { + if (amdgpu_virt_get_dynamic_data_info(adev, + AMD_SRIOV_MSG_VBIOS_IMG_TABLE_ID, adev->bios, &size)) { + amdgpu_bios_release(adev); + return false; + } + } else { + bios = ioremap_wc(vram_base, size); + if (!bios) { + amdgpu_bios_release(adev); + return false; + } + + memcpy_fromio(adev->bios, bios, size); + iounmap(bios); } + adev->bios_size = size; - memcpy_fromio(adev->bios, bios, size); - iounmap(bios); if (!check_atom_bios(adev, size)) { amdgpu_bios_release(adev); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.h index a716c9886c74..2b5e7c46a39d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.h @@ -38,7 +38,7 @@ struct amdgpu_bo_list_entry { struct amdgpu_bo *bo; struct amdgpu_bo_va *bo_va; uint32_t priority; - struct hmm_range *range; + struct amdgpu_hmm_range *range; bool user_invalidated; }; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c index 47e9bfba0642..9f96d568acf2 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c @@ -734,10 +734,8 @@ amdgpu_connector_lvds_detect(struct drm_connector *connector, bool force) amdgpu_connector_update_scratch_regs(connector, ret); - if (!drm_kms_helper_is_poll_worker()) { - pm_runtime_mark_last_busy(connector->dev->dev); + if (!drm_kms_helper_is_poll_worker()) pm_runtime_put_autosuspend(connector->dev->dev); - } return ret; } @@ -919,10 +917,8 @@ amdgpu_connector_vga_detect(struct drm_connector *connector, bool force) amdgpu_connector_update_scratch_regs(connector, ret); out: - if (!drm_kms_helper_is_poll_worker()) { - pm_runtime_mark_last_busy(connector->dev->dev); + if (!drm_kms_helper_is_poll_worker()) pm_runtime_put_autosuspend(connector->dev->dev); - } return ret; } @@ -1146,10 +1142,8 @@ amdgpu_connector_dvi_detect(struct drm_connector *connector, bool force) amdgpu_connector_update_scratch_regs(connector, ret); exit: - if (!drm_kms_helper_is_poll_worker()) { - pm_runtime_mark_last_busy(connector->dev->dev); + if (!drm_kms_helper_is_poll_worker()) pm_runtime_put_autosuspend(connector->dev->dev); - } return ret; } @@ -1486,10 +1480,8 @@ amdgpu_connector_dp_detect(struct drm_connector *connector, bool force) amdgpu_connector_update_scratch_regs(connector, ret); out: - if (!drm_kms_helper_is_poll_worker()) { - pm_runtime_mark_last_busy(connector->dev->dev); + if (!drm_kms_helper_is_poll_worker()) pm_runtime_put_autosuspend(connector->dev->dev); - } if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort || connector->connector_type == DRM_MODE_CONNECTOR_eDP) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c index 2f6a96af7fb1..ecdfe6cb36cc 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c @@ -29,7 +29,6 @@ #include #include #include -#include #include #include @@ -41,6 +40,7 @@ #include "amdgpu_gmc.h" #include "amdgpu_gem.h" #include "amdgpu_ras.h" +#include "amdgpu_hmm.h" static int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, struct amdgpu_device *adev, @@ -891,12 +891,17 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p, bool userpage_invalidated = false; struct amdgpu_bo *bo = e->bo; - r = amdgpu_ttm_tt_get_user_pages(bo, &e->range); + e->range = amdgpu_hmm_range_alloc(NULL); + if (unlikely(!e->range)) + return -ENOMEM; + + r = amdgpu_ttm_tt_get_user_pages(bo, e->range); if (r) goto out_free_user_pages; for (i = 0; i < bo->tbo.ttm->num_pages; i++) { - if (bo->tbo.ttm->pages[i] != hmm_pfn_to_page(e->range->hmm_pfns[i])) { + if (bo->tbo.ttm->pages[i] != + hmm_pfn_to_page(e->range->hmm_range.hmm_pfns[i])) { userpage_invalidated = true; break; } @@ -990,9 +995,7 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p, out_free_user_pages: amdgpu_bo_list_for_each_userptr_entry(e, p->bo_list) { - struct amdgpu_bo *bo = e->bo; - - amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm, e->range); + amdgpu_hmm_range_free(e->range); e->range = NULL; } mutex_unlock(&p->bo_list->bo_list_mutex); @@ -1323,8 +1326,8 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p, */ r = 0; amdgpu_bo_list_for_each_userptr_entry(e, p->bo_list) { - r |= !amdgpu_ttm_tt_get_user_pages_done(e->bo->tbo.ttm, - e->range); + r |= !amdgpu_hmm_range_valid(e->range); + amdgpu_hmm_range_free(e->range); e->range = NULL; } if (r) { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c index a70651050acf..62d43b8cbe58 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c @@ -129,7 +129,6 @@ static int amdgpu_debugfs_process_reg_op(bool read, struct file *f, if (use_bank) { if ((sh_bank != 0xFFFFFFFF && sh_bank >= adev->gfx.config.max_sh_per_se) || (se_bank != 0xFFFFFFFF && se_bank >= adev->gfx.config.max_shader_engines)) { - pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); amdgpu_virt_disable_access_debugfs(adev); return -EINVAL; @@ -179,7 +178,6 @@ static int amdgpu_debugfs_process_reg_op(bool read, struct file *f, if (pm_pg_lock) mutex_unlock(&adev->pm.mutex); - pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); amdgpu_virt_disable_access_debugfs(adev); @@ -255,7 +253,6 @@ static ssize_t amdgpu_debugfs_regs2_op(struct file *f, char __user *buf, u32 off if (rd->id.use_grbm) { if ((rd->id.grbm.sh != 0xFFFFFFFF && rd->id.grbm.sh >= adev->gfx.config.max_sh_per_se) || (rd->id.grbm.se != 0xFFFFFFFF && rd->id.grbm.se >= adev->gfx.config.max_shader_engines)) { - pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); amdgpu_virt_disable_access_debugfs(adev); mutex_unlock(&rd->lock); @@ -310,7 +307,6 @@ static ssize_t amdgpu_debugfs_regs2_op(struct file *f, char __user *buf, u32 off mutex_unlock(&rd->lock); - pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); amdgpu_virt_disable_access_debugfs(adev); @@ -446,7 +442,6 @@ static ssize_t amdgpu_debugfs_gprwave_read(struct file *f, char __user *buf, siz amdgpu_gfx_select_se_sh(adev, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, rd->id.xcc_id); mutex_unlock(&adev->grbm_idx_mutex); - pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); if (!x) { @@ -557,7 +552,6 @@ static ssize_t amdgpu_debugfs_regs_pcie_read(struct file *f, char __user *buf, r = result; out: - pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); amdgpu_virt_disable_access_debugfs(adev); return r; @@ -617,7 +611,6 @@ static ssize_t amdgpu_debugfs_regs_pcie_write(struct file *f, const char __user r = result; out: - pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); amdgpu_virt_disable_access_debugfs(adev); return r; @@ -676,7 +669,6 @@ static ssize_t amdgpu_debugfs_regs_didt_read(struct file *f, char __user *buf, r = result; out: - pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); amdgpu_virt_disable_access_debugfs(adev); return r; @@ -736,7 +728,6 @@ static ssize_t amdgpu_debugfs_regs_didt_write(struct file *f, const char __user r = result; out: - pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); amdgpu_virt_disable_access_debugfs(adev); return r; @@ -795,7 +786,6 @@ static ssize_t amdgpu_debugfs_regs_smc_read(struct file *f, char __user *buf, r = result; out: - pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); amdgpu_virt_disable_access_debugfs(adev); return r; @@ -855,7 +845,6 @@ static ssize_t amdgpu_debugfs_regs_smc_write(struct file *f, const char __user * r = result; out: - pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); amdgpu_virt_disable_access_debugfs(adev); return r; @@ -1003,7 +992,6 @@ static ssize_t amdgpu_debugfs_sensor_read(struct file *f, char __user *buf, r = amdgpu_dpm_read_sensor(adev, idx, &values[0], &valuesize); - pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); if (r) { @@ -1094,7 +1082,6 @@ static ssize_t amdgpu_debugfs_wave_read(struct file *f, char __user *buf, amdgpu_gfx_select_se_sh(adev, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0); mutex_unlock(&adev->grbm_idx_mutex); - pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); if (!x) { @@ -1192,7 +1179,6 @@ static ssize_t amdgpu_debugfs_gpr_read(struct file *f, char __user *buf, amdgpu_gfx_select_se_sh(adev, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0); mutex_unlock(&adev->grbm_idx_mutex); - pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); while (size) { @@ -1266,7 +1252,6 @@ static ssize_t amdgpu_debugfs_gfxoff_residency_read(struct file *f, char __user r = result; out: - pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); return r; @@ -1315,7 +1300,6 @@ static ssize_t amdgpu_debugfs_gfxoff_residency_write(struct file *f, const char r = result; out: - pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); return r; @@ -1365,7 +1349,6 @@ static ssize_t amdgpu_debugfs_gfxoff_count_read(struct file *f, char __user *buf r = result; out: - pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); return r; @@ -1414,7 +1397,6 @@ static ssize_t amdgpu_debugfs_gfxoff_write(struct file *f, const char __user *bu r = result; out: - pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); return r; @@ -1460,7 +1442,6 @@ static ssize_t amdgpu_debugfs_gfxoff_read(struct file *f, char __user *buf, r = result; out: - pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); return r; @@ -1501,7 +1482,6 @@ static ssize_t amdgpu_debugfs_gfxoff_status_read(struct file *f, char __user *bu r = result; out: - pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); return r; @@ -1701,7 +1681,6 @@ static int amdgpu_debugfs_test_ib_show(struct seq_file *m, void *unused) up_write(&adev->reset_domain->sem); - pm_runtime_mark_last_busy(dev->dev); pm_runtime_put_autosuspend(dev->dev); return 0; @@ -1721,7 +1700,6 @@ static int amdgpu_debugfs_evict_vram(void *data, u64 *val) *val = amdgpu_ttm_evict_resources(adev, TTM_PL_VRAM); - pm_runtime_mark_last_busy(dev->dev); pm_runtime_put_autosuspend(dev->dev); return 0; @@ -1742,7 +1720,6 @@ static int amdgpu_debugfs_evict_gtt(void *data, u64 *val) *val = amdgpu_ttm_evict_resources(adev, TTM_PL_TT); - pm_runtime_mark_last_busy(dev->dev); pm_runtime_put_autosuspend(dev->dev); return 0; @@ -1762,7 +1739,6 @@ static int amdgpu_debugfs_benchmark(void *data, u64 val) r = amdgpu_benchmark(adev, val); - pm_runtime_mark_last_busy(dev->dev); pm_runtime_put_autosuspend(dev->dev); return r; @@ -1902,7 +1878,7 @@ static void amdgpu_ib_preempt_mark_partial_job(struct amdgpu_ring *ring) continue; } job = to_amdgpu_job(s_job); - if (preempted && (&job->hw_fence.base) == fence) + if (preempted && (&job->hw_fence->base) == fence) /* mark the job as preempted */ job->preemption_status |= AMDGPU_IB_PREEMPTED; } @@ -2014,7 +1990,6 @@ static int amdgpu_debugfs_sclk_set(void *data, u64 val) ret = -EINVAL; out: - pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); return ret; @@ -2123,10 +2098,9 @@ int amdgpu_debugfs_init(struct amdgpu_device *adev) debugfs_create_blob("amdgpu_vbios", 0444, root, &adev->debugfs_vbios_blob); - adev->debugfs_discovery_blob.data = adev->mman.discovery_bin; - adev->debugfs_discovery_blob.size = adev->mman.discovery_tmr_size; - debugfs_create_blob("amdgpu_discovery", 0444, root, - &adev->debugfs_discovery_blob); + if (adev->discovery.debugfs_blob.size) + debugfs_create_blob("amdgpu_discovery", 0444, root, + &adev->discovery.debugfs_blob); return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 96b6738e6252..a1817b4b5173 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -71,6 +71,7 @@ #include "amdgpu_xgmi.h" #include "amdgpu_ras.h" +#include "amdgpu_ras_mgr.h" #include "amdgpu_pmu.h" #include "amdgpu_fru_eeprom.h" #include "amdgpu_reset.h" @@ -179,6 +180,10 @@ struct amdgpu_init_level amdgpu_init_minimal_xgmi = { BIT(AMD_IP_BLOCK_TYPE_PSP) }; +static int amdgpu_device_ip_resume_phase1(struct amdgpu_device *adev); +static int amdgpu_device_ip_resume_phase2(struct amdgpu_device *adev); +static int amdgpu_device_ip_resume_phase3(struct amdgpu_device *adev); + static void amdgpu_device_load_switch_state(struct amdgpu_device *adev); static inline bool amdgpu_ip_member_of_hwini(struct amdgpu_device *adev, @@ -2387,7 +2392,7 @@ int amdgpu_device_ip_wait_for_idle(struct amdgpu_device *adev, } /** - * amdgpu_device_ip_is_valid - is the hardware IP enabled + * amdgpu_device_ip_is_hw - is the hardware IP enabled * * @adev: amdgpu_device pointer * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.) @@ -2395,6 +2400,27 @@ int amdgpu_device_ip_wait_for_idle(struct amdgpu_device *adev, * Check if the hardware IP is enable or not. * Returns true if it the IP is enable, false if not. */ +bool amdgpu_device_ip_is_hw(struct amdgpu_device *adev, + enum amd_ip_block_type block_type) +{ + int i; + + for (i = 0; i < adev->num_ip_blocks; i++) { + if (adev->ip_blocks[i].version->type == block_type) + return adev->ip_blocks[i].status.hw; + } + return false; +} + +/** + * amdgpu_device_ip_is_valid - is the hardware IP valid + * + * @adev: amdgpu_device pointer + * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.) + * + * Check if the hardware IP is valid or not. + * Returns true if it the IP is valid, false if not. + */ bool amdgpu_device_ip_is_valid(struct amdgpu_device *adev, enum amd_ip_block_type block_type) { @@ -2473,6 +2499,7 @@ static const char *ip_block_names[] = { [AMD_IP_BLOCK_TYPE_VPE] = "vpe", [AMD_IP_BLOCK_TYPE_UMSCH_MM] = "umsch_mm", [AMD_IP_BLOCK_TYPE_ISP] = "isp", + [AMD_IP_BLOCK_TYPE_RAS] = "ras", }; static const char *ip_block_name(struct amdgpu_device *adev, enum amd_ip_block_type type) @@ -2633,12 +2660,12 @@ static int amdgpu_device_parse_gpu_info_fw(struct amdgpu_device *adev) chip_name = "arcturus"; break; case CHIP_NAVI12: - if (adev->mman.discovery_bin) + if (adev->discovery.bin) return 0; chip_name = "navi12"; break; case CHIP_CYAN_SKILLFISH: - if (adev->mman.discovery_bin) + if (adev->discovery.bin) return 0; chip_name = "cyan_skillfish"; break; @@ -2763,6 +2790,10 @@ static int amdgpu_device_ip_early_init(struct amdgpu_device *adev) r = amdgpu_virt_request_full_gpu(adev, true); if (r) return r; + + r = amdgpu_virt_init_critical_region(adev); + if (r) + return r; } switch (adev->asic_type) { @@ -3652,6 +3683,20 @@ static int amdgpu_device_ip_fini_early(struct amdgpu_device *adev) "failed to release exclusive mode on fini\n"); } + /* + * Driver reload on the APU can fail due to firmware validation because + * the PSP is always running, as it is shared across the whole SoC. + * This same issue does not occur on dGPU because it has a mechanism + * that checks whether the PSP is running. A solution for those issues + * in the APU is to trigger a GPU reset, but this should be done during + * the unload phase to avoid adding boot latency and screen flicker. + */ + if ((adev->flags & AMD_IS_APU) && !adev->gmc.is_app_apu) { + r = amdgpu_asic_reset(adev); + if (r) + dev_err(adev->dev, "asic reset on %s failed\n", __func__); + } + return 0; } @@ -3762,7 +3807,7 @@ static void amdgpu_device_delay_enable_gfx_off(struct work_struct *work) */ static int amdgpu_device_ip_suspend_phase1(struct amdgpu_device *adev) { - int i, r; + int i, r, rec; amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE); amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE); @@ -3783,13 +3828,25 @@ static int amdgpu_device_ip_suspend_phase1(struct amdgpu_device *adev) if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_DCE) continue; - /* XXX handle errors */ r = amdgpu_ip_block_suspend(&adev->ip_blocks[i]); if (r) - return r; + goto unwind; } return 0; +unwind: + rec = amdgpu_device_ip_resume_phase3(adev); + if (rec) + dev_err(adev->dev, + "amdgpu_device_ip_resume_phase3 failed during unwind: %d\n", + rec); + + amdgpu_dpm_set_df_cstate(adev, DF_CSTATE_ALLOW); + + amdgpu_device_set_pg_state(adev, AMD_PG_STATE_GATE); + amdgpu_device_set_cg_state(adev, AMD_CG_STATE_GATE); + + return r; } /** @@ -3805,7 +3862,7 @@ static int amdgpu_device_ip_suspend_phase1(struct amdgpu_device *adev) */ static int amdgpu_device_ip_suspend_phase2(struct amdgpu_device *adev) { - int i, r; + int i, r, rec; if (adev->in_s0ix) amdgpu_dpm_gfx_state_change(adev, sGpuChangeState_D3Entry); @@ -3866,9 +3923,9 @@ static int amdgpu_device_ip_suspend_phase2(struct amdgpu_device *adev) adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP) continue; - /* XXX handle errors */ r = amdgpu_ip_block_suspend(&adev->ip_blocks[i]); - adev->ip_blocks[i].status.hw = false; + if (r) + goto unwind; /* handle putting the SMC in the appropriate state */ if (!amdgpu_sriov_vf(adev)) { @@ -3878,13 +3935,40 @@ static int amdgpu_device_ip_suspend_phase2(struct amdgpu_device *adev) dev_err(adev->dev, "SMC failed to set mp1 state %d, %d\n", adev->mp1_state, r); - return r; + goto unwind; } } } } return 0; +unwind: + /* suspend phase 2 = resume phase 1 + resume phase 2 */ + rec = amdgpu_device_ip_resume_phase1(adev); + if (rec) { + dev_err(adev->dev, + "amdgpu_device_ip_resume_phase1 failed during unwind: %d\n", + rec); + return r; + } + + rec = amdgpu_device_fw_loading(adev); + if (rec) { + dev_err(adev->dev, + "amdgpu_device_fw_loading failed during unwind: %d\n", + rec); + return r; + } + + rec = amdgpu_device_ip_resume_phase2(adev); + if (rec) { + dev_err(adev->dev, + "amdgpu_device_ip_resume_phase2 failed during unwind: %d\n", + rec); + return r; + } + + return r; } /** @@ -3898,7 +3982,7 @@ static int amdgpu_device_ip_suspend_phase2(struct amdgpu_device *adev) * in each IP into a state suitable for suspend. * Returns 0 on success, negative error code on failure. */ -int amdgpu_device_ip_suspend(struct amdgpu_device *adev) +static int amdgpu_device_ip_suspend(struct amdgpu_device *adev) { int r; @@ -4182,25 +4266,13 @@ bool amdgpu_device_asic_has_dc_support(struct pci_dev *pdev, case CHIP_PITCAIRN: case CHIP_VERDE: case CHIP_OLAND: - /* - * We have systems in the wild with these ASICs that require - * LVDS and VGA support which is not supported with DC. - * - * Fallback to the non-DC driver here by default so as not to - * cause regressions. - */ -#if defined(CONFIG_DRM_AMD_DC_SI) - return amdgpu_dc > 0; -#else - return false; -#endif - case CHIP_BONAIRE: + return amdgpu_dc != 0 && IS_ENABLED(CONFIG_DRM_AMD_DC_SI); case CHIP_KAVERI: case CHIP_KABINI: case CHIP_MULLINS: /* * We have systems in the wild with these ASICs that require - * VGA support which is not supported with DC. + * TRAVIS and NUTMEG support which is not supported with DC. * * Fallback to the non-DC driver here by default so as not to * cause regressions. @@ -4288,58 +4360,53 @@ static int amdgpu_device_get_job_timeout_settings(struct amdgpu_device *adev) long timeout; int ret = 0; - /* - * By default timeout for jobs is 10 sec - */ - adev->compute_timeout = adev->gfx_timeout = msecs_to_jiffies(10000); - adev->sdma_timeout = adev->video_timeout = adev->gfx_timeout; + /* By default timeout for all queues is 2 sec */ + adev->gfx_timeout = adev->compute_timeout = adev->sdma_timeout = + adev->video_timeout = msecs_to_jiffies(2000); - if (strnlen(input, AMDGPU_MAX_TIMEOUT_PARAM_LENGTH)) { - while ((timeout_setting = strsep(&input, ",")) && - strnlen(timeout_setting, AMDGPU_MAX_TIMEOUT_PARAM_LENGTH)) { - ret = kstrtol(timeout_setting, 0, &timeout); - if (ret) - return ret; + if (!strnlen(input, AMDGPU_MAX_TIMEOUT_PARAM_LENGTH)) + return 0; - if (timeout == 0) { - index++; - continue; - } else if (timeout < 0) { - timeout = MAX_SCHEDULE_TIMEOUT; - dev_warn(adev->dev, "lockup timeout disabled"); - add_taint(TAINT_SOFTLOCKUP, LOCKDEP_STILL_OK); - } else { - timeout = msecs_to_jiffies(timeout); - } + while ((timeout_setting = strsep(&input, ",")) && + strnlen(timeout_setting, AMDGPU_MAX_TIMEOUT_PARAM_LENGTH)) { + ret = kstrtol(timeout_setting, 0, &timeout); + if (ret) + return ret; - switch (index++) { - case 0: - adev->gfx_timeout = timeout; - break; - case 1: - adev->compute_timeout = timeout; - break; - case 2: - adev->sdma_timeout = timeout; - break; - case 3: - adev->video_timeout = timeout; - break; - default: - break; - } + if (timeout == 0) { + index++; + continue; + } else if (timeout < 0) { + timeout = MAX_SCHEDULE_TIMEOUT; + dev_warn(adev->dev, "lockup timeout disabled"); + add_taint(TAINT_SOFTLOCKUP, LOCKDEP_STILL_OK); + } else { + timeout = msecs_to_jiffies(timeout); } - /* - * There is only one value specified and - * it should apply to all non-compute jobs. - */ - if (index == 1) { - adev->sdma_timeout = adev->video_timeout = adev->gfx_timeout; - if (amdgpu_sriov_vf(adev) || amdgpu_passthrough(adev)) - adev->compute_timeout = adev->gfx_timeout; + + switch (index++) { + case 0: + adev->gfx_timeout = timeout; + break; + case 1: + adev->compute_timeout = timeout; + break; + case 2: + adev->sdma_timeout = timeout; + break; + case 3: + adev->video_timeout = timeout; + break; + default: + break; } } + /* When only one value specified apply it to all queues. */ + if (index == 1) + adev->gfx_timeout = adev->compute_timeout = adev->sdma_timeout = + adev->video_timeout = timeout; + return ret; } @@ -4394,6 +4461,55 @@ static void amdgpu_device_set_mcbp(struct amdgpu_device *adev) dev_info(adev->dev, "MCBP is enabled\n"); } +static int amdgpu_device_sys_interface_init(struct amdgpu_device *adev) +{ + int r; + + r = amdgpu_atombios_sysfs_init(adev); + if (r) + drm_err(&adev->ddev, + "registering atombios sysfs failed (%d).\n", r); + + r = amdgpu_pm_sysfs_init(adev); + if (r) + dev_err(adev->dev, "registering pm sysfs failed (%d).\n", r); + + r = amdgpu_ucode_sysfs_init(adev); + if (r) { + adev->ucode_sysfs_en = false; + dev_err(adev->dev, "Creating firmware sysfs failed (%d).\n", r); + } else + adev->ucode_sysfs_en = true; + + r = amdgpu_device_attr_sysfs_init(adev); + if (r) + dev_err(adev->dev, "Could not create amdgpu device attr\n"); + + r = devm_device_add_group(adev->dev, &amdgpu_board_attrs_group); + if (r) + dev_err(adev->dev, + "Could not create amdgpu board attributes\n"); + + amdgpu_fru_sysfs_init(adev); + amdgpu_reg_state_sysfs_init(adev); + amdgpu_xcp_sysfs_init(adev); + + return r; +} + +static void amdgpu_device_sys_interface_fini(struct amdgpu_device *adev) +{ + if (adev->pm.sysfs_initialized) + amdgpu_pm_sysfs_fini(adev); + if (adev->ucode_sysfs_en) + amdgpu_ucode_sysfs_fini(adev); + amdgpu_device_attr_sysfs_fini(adev); + amdgpu_fru_sysfs_fini(adev); + + amdgpu_reg_state_sysfs_fini(adev); + amdgpu_xcp_sysfs_fini(adev); +} + /** * amdgpu_device_init - initialize the driver * @@ -4493,7 +4609,6 @@ int amdgpu_device_init(struct amdgpu_device *adev, mutex_init(&adev->gfx.userq_sch_mutex); mutex_init(&adev->gfx.workload_profile_mutex); mutex_init(&adev->vcn.workload_profile_mutex); - mutex_init(&adev->userq_mutex); amdgpu_device_init_apu_flags(adev); @@ -4521,7 +4636,7 @@ int amdgpu_device_init(struct amdgpu_device *adev, INIT_LIST_HEAD(&adev->pm.od_kobj_list); - INIT_LIST_HEAD(&adev->userq_mgr_list); + xa_init(&adev->userq_doorbell_xa); INIT_DELAYED_WORK(&adev->delayed_init_work, amdgpu_device_delayed_init_work_handler); @@ -4544,6 +4659,7 @@ int amdgpu_device_init(struct amdgpu_device *adev, } INIT_WORK(&adev->xgmi_reset_work, amdgpu_device_xgmi_reset_func); + INIT_WORK(&adev->userq_reset_work, amdgpu_userq_reset_work); adev->gfx.gfx_off_req_count = 1; adev->gfx.gfx_off_residency = 0; @@ -4817,39 +4933,14 @@ int amdgpu_device_init(struct amdgpu_device *adev, flush_delayed_work(&adev->delayed_init_work); } + if (adev->init_lvl->level == AMDGPU_INIT_LEVEL_MINIMAL_XGMI) + amdgpu_xgmi_reset_on_init(adev); /* * Place those sysfs registering after `late_init`. As some of those * operations performed in `late_init` might affect the sysfs * interfaces creating. */ - r = amdgpu_atombios_sysfs_init(adev); - if (r) - drm_err(&adev->ddev, - "registering atombios sysfs failed (%d).\n", r); - - r = amdgpu_pm_sysfs_init(adev); - if (r) - dev_err(adev->dev, "registering pm sysfs failed (%d).\n", r); - - r = amdgpu_ucode_sysfs_init(adev); - if (r) { - adev->ucode_sysfs_en = false; - dev_err(adev->dev, "Creating firmware sysfs failed (%d).\n", r); - } else - adev->ucode_sysfs_en = true; - - r = amdgpu_device_attr_sysfs_init(adev); - if (r) - dev_err(adev->dev, "Could not create amdgpu device attr\n"); - - r = devm_device_add_group(adev->dev, &amdgpu_board_attrs_group); - if (r) - dev_err(adev->dev, - "Could not create amdgpu board attributes\n"); - - amdgpu_fru_sysfs_init(adev); - amdgpu_reg_state_sysfs_init(adev); - amdgpu_xcp_sysfs_init(adev); + r = amdgpu_device_sys_interface_init(adev); if (IS_ENABLED(CONFIG_PERF_EVENTS)) r = amdgpu_pmu_init(adev); @@ -4877,9 +4968,6 @@ int amdgpu_device_init(struct amdgpu_device *adev, if (px) vga_switcheroo_init_domain_pm_ops(adev->dev, &adev->vga_pm_domain); - if (adev->init_lvl->level == AMDGPU_INIT_LEVEL_MINIMAL_XGMI) - amdgpu_xgmi_reset_on_init(adev); - amdgpu_device_check_iommu_direct_map(adev); adev->pm_nb.notifier_call = amdgpu_device_pm_notifier; @@ -4971,15 +5059,7 @@ void amdgpu_device_fini_hw(struct amdgpu_device *adev) } amdgpu_fence_driver_hw_fini(adev); - if (adev->pm.sysfs_initialized) - amdgpu_pm_sysfs_fini(adev); - if (adev->ucode_sysfs_en) - amdgpu_ucode_sysfs_fini(adev); - amdgpu_device_attr_sysfs_fini(adev); - amdgpu_fru_sysfs_fini(adev); - - amdgpu_reg_state_sysfs_fini(adev); - amdgpu_xcp_sysfs_fini(adev); + amdgpu_device_sys_interface_fini(adev); /* disable ras feature must before hw fini */ amdgpu_ras_pre_fini(adev); @@ -5054,7 +5134,7 @@ void amdgpu_device_fini_sw(struct amdgpu_device *adev) if (IS_ENABLED(CONFIG_PERF_EVENTS)) amdgpu_pmu_fini(adev); - if (adev->mman.discovery_bin) + if (adev->discovery.bin) amdgpu_discovery_fini(adev); amdgpu_reset_put_reset_domain(adev->reset_domain); @@ -5202,7 +5282,7 @@ void amdgpu_device_complete(struct drm_device *dev) int amdgpu_device_suspend(struct drm_device *dev, bool notify_clients) { struct amdgpu_device *adev = drm_to_adev(dev); - int r = 0; + int r, rec; if (dev->switch_power_state == DRM_SWITCH_POWER_OFF) return 0; @@ -5218,35 +5298,92 @@ int amdgpu_device_suspend(struct drm_device *dev, bool notify_clients) return r; } - if (amdgpu_acpi_smart_shift_update(adev, AMDGPU_SS_DEV_D3)) - dev_warn(adev->dev, "smart shift update failed\n"); + r = amdgpu_acpi_smart_shift_update(adev, AMDGPU_SS_DEV_D3); + if (r) + goto unwind_sriov; if (notify_clients) - drm_client_dev_suspend(adev_to_drm(adev), false); + drm_client_dev_suspend(adev_to_drm(adev)); cancel_delayed_work_sync(&adev->delayed_init_work); amdgpu_ras_suspend(adev); - amdgpu_device_ip_suspend_phase1(adev); + r = amdgpu_device_ip_suspend_phase1(adev); + if (r) + goto unwind_smartshift; amdgpu_amdkfd_suspend(adev, !amdgpu_sriov_vf(adev) && !adev->in_runpm); - amdgpu_userq_suspend(adev); + r = amdgpu_userq_suspend(adev); + if (r) + goto unwind_ip_phase1; r = amdgpu_device_evict_resources(adev); if (r) - return r; + goto unwind_userq; amdgpu_ttm_set_buffer_funcs_status(adev, false); amdgpu_fence_driver_hw_fini(adev); - amdgpu_device_ip_suspend_phase2(adev); + r = amdgpu_device_ip_suspend_phase2(adev); + if (r) + goto unwind_evict; if (amdgpu_sriov_vf(adev)) amdgpu_virt_release_full_gpu(adev, false); return 0; + +unwind_evict: + if (adev->mman.buffer_funcs_ring->sched.ready) + amdgpu_ttm_set_buffer_funcs_status(adev, true); + amdgpu_fence_driver_hw_init(adev); + +unwind_userq: + rec = amdgpu_userq_resume(adev); + if (rec) { + dev_warn(adev->dev, "failed to re-initialize user queues: %d\n", rec); + return r; + } + rec = amdgpu_amdkfd_resume(adev, !amdgpu_sriov_vf(adev) && !adev->in_runpm); + if (rec) { + dev_warn(adev->dev, "failed to re-initialize kfd: %d\n", rec); + return r; + } + +unwind_ip_phase1: + /* suspend phase 1 = resume phase 3 */ + rec = amdgpu_device_ip_resume_phase3(adev); + if (rec) { + dev_warn(adev->dev, "failed to re-initialize IPs phase1: %d\n", rec); + return r; + } + +unwind_smartshift: + rec = amdgpu_acpi_smart_shift_update(adev, AMDGPU_SS_DEV_D0); + if (rec) { + dev_warn(adev->dev, "failed to re-update smart shift: %d\n", rec); + return r; + } + + if (notify_clients) + drm_client_dev_resume(adev_to_drm(adev)); + + amdgpu_ras_resume(adev); + +unwind_sriov: + if (amdgpu_sriov_vf(adev)) { + rec = amdgpu_virt_request_full_gpu(adev, true); + if (rec) { + dev_warn(adev->dev, "failed to reinitialize sriov: %d\n", rec); + return r; + } + } + + adev->in_suspend = adev->in_s0ix = adev->in_s3 = false; + + return r; } static inline int amdgpu_virt_resume(struct amdgpu_device *adev) @@ -5352,7 +5489,7 @@ int amdgpu_device_resume(struct drm_device *dev, bool notify_clients) flush_delayed_work(&adev->delayed_init_work); if (notify_clients) - drm_client_dev_resume(adev_to_drm(adev), false); + drm_client_dev_resume(adev_to_drm(adev)); amdgpu_ras_resume(adev); @@ -5808,11 +5945,6 @@ int amdgpu_device_pre_asic_reset(struct amdgpu_device *adev, if (!amdgpu_ring_sched_ready(ring)) continue; - /* Clear job fence from fence drv to avoid force_completion - * leave NULL and vm flush fence in fence drv - */ - amdgpu_fence_driver_clear_job_fences(ring); - /* after all hw jobs are reset, hw fence is meaningless, so force_completion */ amdgpu_fence_driver_force_completion(ring); } @@ -5957,7 +6089,11 @@ int amdgpu_device_reinit_after_reset(struct amdgpu_reset_context *reset_context) if (r) goto out; - drm_client_dev_resume(adev_to_drm(tmp_adev), false); + r = amdgpu_userq_post_reset(tmp_adev, vram_lost); + if (r) + goto out; + + drm_client_dev_resume(adev_to_drm(tmp_adev)); /* * The GPU enters bad state once faulty pages @@ -6179,6 +6315,7 @@ static inline void amdgpu_device_stop_pending_resets(struct amdgpu_device *adev) if (!amdgpu_sriov_vf(adev)) cancel_work(&adev->reset_work); #endif + cancel_work(&adev->userq_reset_work); if (adev->kfd.dev) cancel_work(&adev->kfd.reset_work); @@ -6292,13 +6429,15 @@ static void amdgpu_device_halt_activities(struct amdgpu_device *adev, */ amdgpu_unregister_gpu_instance(tmp_adev); - drm_client_dev_suspend(adev_to_drm(tmp_adev), false); + drm_client_dev_suspend(adev_to_drm(tmp_adev)); /* disable ras on ALL IPs */ if (!need_emergency_restart && !amdgpu_reset_in_dpc(adev) && amdgpu_device_ip_need_full_reset(tmp_adev)) amdgpu_ras_suspend(tmp_adev); + amdgpu_userq_pre_reset(tmp_adev); + for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { struct amdgpu_ring *ring = tmp_adev->rings[i]; @@ -6528,6 +6667,9 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev, goto end_reset; } + /* Cannot be called after locking reset domain */ + amdgpu_ras_pre_reset(adev, &device_list); + /* We need to lock reset domain only once both for XGMI and single device */ amdgpu_device_recovery_get_reset_lock(adev, &device_list); @@ -6541,7 +6683,7 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev, * * job->base holds a reference to parent fence */ - if (job && dma_fence_is_signaled(&job->hw_fence.base)) { + if (job && dma_fence_is_signaled(&job->hw_fence->base)) { job_signaled = true; dev_info(adev->dev, "Guilty job already signaled, skipping HW reset"); goto skip_hw_reset; @@ -6558,6 +6700,7 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev, amdgpu_device_gpu_resume(adev, &device_list, need_emergency_restart); reset_unlock: amdgpu_device_recovery_put_reset_lock(adev, &device_list); + amdgpu_ras_post_reset(adev, &device_list); end_reset: if (hive) { mutex_unlock(&hive->hive_lock); @@ -7285,10 +7428,17 @@ void amdgpu_device_flush_hdp(struct amdgpu_device *adev, if (adev->gmc.xgmi.connected_to_cpu) return; - if (ring && ring->funcs->emit_hdp_flush) + if (ring && ring->funcs->emit_hdp_flush) { amdgpu_ring_emit_hdp_flush(ring); - else - amdgpu_asic_flush_hdp(adev, ring); + return; + } + + if (!ring && amdgpu_sriov_runtime(adev)) { + if (!amdgpu_kiq_hdp_flush(adev)) + return; + } + + amdgpu_hdp_flush(adev, ring); } void amdgpu_device_invalidate_hdp(struct amdgpu_device *adev, @@ -7301,7 +7451,7 @@ void amdgpu_device_invalidate_hdp(struct amdgpu_device *adev, if (adev->gmc.xgmi.connected_to_cpu) return; - amdgpu_asic_invalidate_hdp(adev, ring); + amdgpu_hdp_invalidate(adev, ring); } int amdgpu_in_reset(struct amdgpu_device *adev) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c index dd7b2b796427..fa2a22dfa048 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c @@ -107,6 +107,7 @@ #include "vcn_v5_0_1.h" #include "jpeg_v5_0_0.h" #include "jpeg_v5_0_1.h" +#include "amdgpu_ras_mgr.h" #include "amdgpu_vpe.h" #if defined(CONFIG_DRM_AMD_ISP) @@ -254,9 +255,9 @@ static int amdgpu_discovery_read_binary_from_sysmem(struct amdgpu_device *adev, pos = tmr_offset + tmr_size - DISCOVERY_TMR_OFFSET; /* This region is read-only and reserved from system use */ - discv_regn = memremap(pos, adev->mman.discovery_tmr_size, MEMREMAP_WC); + discv_regn = memremap(pos, adev->discovery.size, MEMREMAP_WC); if (discv_regn) { - memcpy(binary, discv_regn, adev->mman.discovery_tmr_size); + memcpy(binary, discv_regn, adev->discovery.size); memunmap(discv_regn); return 0; } @@ -298,10 +299,31 @@ static int amdgpu_discovery_read_binary_from_mem(struct amdgpu_device *adev, else vram_size <<= 20; + /* + * If in VRAM, discovery TMR is marked for reservation. If it is in system mem, + * then it is not required to be reserved. + */ if (sz_valid) { - uint64_t pos = vram_size - DISCOVERY_TMR_OFFSET; - amdgpu_device_vram_access(adev, pos, (uint32_t *)binary, - adev->mman.discovery_tmr_size, false); + if (amdgpu_sriov_vf(adev) && adev->virt.is_dynamic_crit_regn_enabled) { + /* For SRIOV VFs with dynamic critical region enabled, + * we will get the IPD binary via below call. + * If dynamic critical is disabled, fall through to normal seq. + */ + if (amdgpu_virt_get_dynamic_data_info(adev, + AMD_SRIOV_MSG_IPD_TABLE_ID, binary, + &adev->discovery.size)) { + dev_err(adev->dev, + "failed to read discovery info from dynamic critical region."); + ret = -EINVAL; + goto exit; + } + } else { + uint64_t pos = vram_size - DISCOVERY_TMR_OFFSET; + + amdgpu_device_vram_access(adev, pos, (uint32_t *)binary, + adev->discovery.size, false); + adev->discovery.reserve_tmr = true; + } } else { ret = amdgpu_discovery_read_binary_from_sysmem(adev, binary); } @@ -310,7 +332,7 @@ static int amdgpu_discovery_read_binary_from_mem(struct amdgpu_device *adev, dev_err(adev->dev, "failed to read discovery info from memory, vram size read: %llx", vram_size); - +exit: return ret; } @@ -389,6 +411,7 @@ static void amdgpu_discovery_harvest_config_quirk(struct amdgpu_device *adev) static int amdgpu_discovery_verify_npsinfo(struct amdgpu_device *adev, struct binary_header *bhdr) { + uint8_t *discovery_bin = adev->discovery.bin; struct table_info *info; uint16_t checksum; uint16_t offset; @@ -398,14 +421,14 @@ static int amdgpu_discovery_verify_npsinfo(struct amdgpu_device *adev, checksum = le16_to_cpu(info->checksum); struct nps_info_header *nhdr = - (struct nps_info_header *)(adev->mman.discovery_bin + offset); + (struct nps_info_header *)(discovery_bin + offset); if (le32_to_cpu(nhdr->table_id) != NPS_INFO_TABLE_ID) { dev_dbg(adev->dev, "invalid ip discovery nps info table id\n"); return -EINVAL; } - if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset, + if (!amdgpu_discovery_verify_checksum(discovery_bin + offset, le32_to_cpu(nhdr->size_bytes), checksum)) { dev_dbg(adev->dev, "invalid nps info data table checksum\n"); @@ -417,8 +440,11 @@ static int amdgpu_discovery_verify_npsinfo(struct amdgpu_device *adev, static const char *amdgpu_discovery_get_fw_name(struct amdgpu_device *adev) { - if (amdgpu_discovery == 2) + if (amdgpu_discovery == 2) { + /* Assume there is valid discovery TMR in VRAM even if binary is sideloaded */ + adev->discovery.reserve_tmr = true; return "amdgpu/ip_discovery.bin"; + } switch (adev->asic_type) { case CHIP_VEGA10: @@ -447,49 +473,53 @@ static int amdgpu_discovery_init(struct amdgpu_device *adev) { struct table_info *info; struct binary_header *bhdr; + uint8_t *discovery_bin; const char *fw_name; uint16_t offset; uint16_t size; uint16_t checksum; int r; - adev->mman.discovery_tmr_size = DISCOVERY_TMR_SIZE; - adev->mman.discovery_bin = kzalloc(adev->mman.discovery_tmr_size, GFP_KERNEL); - if (!adev->mman.discovery_bin) + adev->discovery.bin = kzalloc(DISCOVERY_TMR_SIZE, GFP_KERNEL); + if (!adev->discovery.bin) return -ENOMEM; + adev->discovery.size = DISCOVERY_TMR_SIZE; + adev->discovery.debugfs_blob.data = adev->discovery.bin; + adev->discovery.debugfs_blob.size = adev->discovery.size; + discovery_bin = adev->discovery.bin; /* Read from file if it is the preferred option */ fw_name = amdgpu_discovery_get_fw_name(adev); if (fw_name != NULL) { drm_dbg(&adev->ddev, "use ip discovery information from file"); - r = amdgpu_discovery_read_binary_from_file(adev, adev->mman.discovery_bin, fw_name); + r = amdgpu_discovery_read_binary_from_file(adev, discovery_bin, + fw_name); if (r) goto out; } else { drm_dbg(&adev->ddev, "use ip discovery information from memory"); - r = amdgpu_discovery_read_binary_from_mem( - adev, adev->mman.discovery_bin); + r = amdgpu_discovery_read_binary_from_mem(adev, discovery_bin); if (r) goto out; } /* check the ip discovery binary signature */ - if (!amdgpu_discovery_verify_binary_signature(adev->mman.discovery_bin)) { + if (!amdgpu_discovery_verify_binary_signature(discovery_bin)) { dev_err(adev->dev, "get invalid ip discovery binary signature\n"); r = -EINVAL; goto out; } - bhdr = (struct binary_header *)adev->mman.discovery_bin; + bhdr = (struct binary_header *)discovery_bin; offset = offsetof(struct binary_header, binary_checksum) + sizeof(bhdr->binary_checksum); size = le16_to_cpu(bhdr->binary_size) - offset; checksum = le16_to_cpu(bhdr->binary_checksum); - if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset, - size, checksum)) { + if (!amdgpu_discovery_verify_checksum(discovery_bin + offset, size, + checksum)) { dev_err(adev->dev, "invalid ip discovery binary checksum\n"); r = -EINVAL; goto out; @@ -501,15 +531,16 @@ static int amdgpu_discovery_init(struct amdgpu_device *adev) if (offset) { struct ip_discovery_header *ihdr = - (struct ip_discovery_header *)(adev->mman.discovery_bin + offset); + (struct ip_discovery_header *)(discovery_bin + offset); if (le32_to_cpu(ihdr->signature) != DISCOVERY_TABLE_SIGNATURE) { dev_err(adev->dev, "invalid ip discovery data table signature\n"); r = -EINVAL; goto out; } - if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset, - le16_to_cpu(ihdr->size), checksum)) { + if (!amdgpu_discovery_verify_checksum(discovery_bin + offset, + le16_to_cpu(ihdr->size), + checksum)) { dev_err(adev->dev, "invalid ip discovery data table checksum\n"); r = -EINVAL; goto out; @@ -522,7 +553,7 @@ static int amdgpu_discovery_init(struct amdgpu_device *adev) if (offset) { struct gpu_info_header *ghdr = - (struct gpu_info_header *)(adev->mman.discovery_bin + offset); + (struct gpu_info_header *)(discovery_bin + offset); if (le32_to_cpu(ghdr->table_id) != GC_TABLE_ID) { dev_err(adev->dev, "invalid ip discovery gc table id\n"); @@ -530,8 +561,9 @@ static int amdgpu_discovery_init(struct amdgpu_device *adev) goto out; } - if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset, - le32_to_cpu(ghdr->size), checksum)) { + if (!amdgpu_discovery_verify_checksum(discovery_bin + offset, + le32_to_cpu(ghdr->size), + checksum)) { dev_err(adev->dev, "invalid gc data table checksum\n"); r = -EINVAL; goto out; @@ -544,7 +576,7 @@ static int amdgpu_discovery_init(struct amdgpu_device *adev) if (offset) { struct harvest_info_header *hhdr = - (struct harvest_info_header *)(adev->mman.discovery_bin + offset); + (struct harvest_info_header *)(discovery_bin + offset); if (le32_to_cpu(hhdr->signature) != HARVEST_TABLE_SIGNATURE) { dev_err(adev->dev, "invalid ip discovery harvest table signature\n"); @@ -552,8 +584,9 @@ static int amdgpu_discovery_init(struct amdgpu_device *adev) goto out; } - if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset, - sizeof(struct harvest_table), checksum)) { + if (!amdgpu_discovery_verify_checksum( + discovery_bin + offset, + sizeof(struct harvest_table), checksum)) { dev_err(adev->dev, "invalid harvest data table checksum\n"); r = -EINVAL; goto out; @@ -566,7 +599,7 @@ static int amdgpu_discovery_init(struct amdgpu_device *adev) if (offset) { struct vcn_info_header *vhdr = - (struct vcn_info_header *)(adev->mman.discovery_bin + offset); + (struct vcn_info_header *)(discovery_bin + offset); if (le32_to_cpu(vhdr->table_id) != VCN_INFO_TABLE_ID) { dev_err(adev->dev, "invalid ip discovery vcn table id\n"); @@ -574,8 +607,9 @@ static int amdgpu_discovery_init(struct amdgpu_device *adev) goto out; } - if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset, - le32_to_cpu(vhdr->size_bytes), checksum)) { + if (!amdgpu_discovery_verify_checksum( + discovery_bin + offset, + le32_to_cpu(vhdr->size_bytes), checksum)) { dev_err(adev->dev, "invalid vcn data table checksum\n"); r = -EINVAL; goto out; @@ -588,7 +622,7 @@ static int amdgpu_discovery_init(struct amdgpu_device *adev) if (0 && offset) { struct mall_info_header *mhdr = - (struct mall_info_header *)(adev->mman.discovery_bin + offset); + (struct mall_info_header *)(discovery_bin + offset); if (le32_to_cpu(mhdr->table_id) != MALL_INFO_TABLE_ID) { dev_err(adev->dev, "invalid ip discovery mall table id\n"); @@ -596,8 +630,9 @@ static int amdgpu_discovery_init(struct amdgpu_device *adev) goto out; } - if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset, - le32_to_cpu(mhdr->size_bytes), checksum)) { + if (!amdgpu_discovery_verify_checksum( + discovery_bin + offset, + le32_to_cpu(mhdr->size_bytes), checksum)) { dev_err(adev->dev, "invalid mall data table checksum\n"); r = -EINVAL; goto out; @@ -607,8 +642,8 @@ static int amdgpu_discovery_init(struct amdgpu_device *adev) return 0; out: - kfree(adev->mman.discovery_bin); - adev->mman.discovery_bin = NULL; + kfree(adev->discovery.bin); + adev->discovery.bin = NULL; if ((amdgpu_discovery != 2) && (RREG32(mmIP_DISCOVERY_VERSION) == 4)) amdgpu_ras_query_boot_status(adev, 4); @@ -620,8 +655,8 @@ static void amdgpu_discovery_sysfs_fini(struct amdgpu_device *adev); void amdgpu_discovery_fini(struct amdgpu_device *adev) { amdgpu_discovery_sysfs_fini(adev); - kfree(adev->mman.discovery_bin); - adev->mman.discovery_bin = NULL; + kfree(adev->discovery.bin); + adev->discovery.bin = NULL; } static int amdgpu_discovery_validate_ip(struct amdgpu_device *adev, @@ -646,6 +681,7 @@ static int amdgpu_discovery_validate_ip(struct amdgpu_device *adev, static void amdgpu_discovery_read_harvest_bit_per_ip(struct amdgpu_device *adev, uint32_t *vcn_harvest_count) { + uint8_t *discovery_bin = adev->discovery.bin; struct binary_header *bhdr; struct ip_discovery_header *ihdr; struct die_header *dhdr; @@ -655,21 +691,21 @@ static void amdgpu_discovery_read_harvest_bit_per_ip(struct amdgpu_device *adev, uint8_t inst; int i, j; - bhdr = (struct binary_header *)adev->mman.discovery_bin; - ihdr = (struct ip_discovery_header *)(adev->mman.discovery_bin + - le16_to_cpu(bhdr->table_list[IP_DISCOVERY].offset)); + bhdr = (struct binary_header *)discovery_bin; + ihdr = (struct ip_discovery_header + *)(discovery_bin + + le16_to_cpu(bhdr->table_list[IP_DISCOVERY].offset)); num_dies = le16_to_cpu(ihdr->num_dies); /* scan harvest bit of all IP data structures */ for (i = 0; i < num_dies; i++) { die_offset = le16_to_cpu(ihdr->die_info[i].die_offset); - dhdr = (struct die_header *)(adev->mman.discovery_bin + die_offset); + dhdr = (struct die_header *)(discovery_bin + die_offset); num_ips = le16_to_cpu(dhdr->num_ips); ip_offset = die_offset + sizeof(*dhdr); for (j = 0; j < num_ips; j++) { - ip = (struct ip *)(adev->mman.discovery_bin + - ip_offset); + ip = (struct ip *)(discovery_bin + ip_offset); inst = ip->number_instance; hw_id = le16_to_cpu(ip->hw_id); if (amdgpu_discovery_validate_ip(adev, inst, hw_id)) @@ -711,13 +747,14 @@ static void amdgpu_discovery_read_from_harvest_table(struct amdgpu_device *adev, uint32_t *vcn_harvest_count, uint32_t *umc_harvest_count) { + uint8_t *discovery_bin = adev->discovery.bin; struct binary_header *bhdr; struct harvest_table *harvest_info; u16 offset; int i; uint32_t umc_harvest_config = 0; - bhdr = (struct binary_header *)adev->mman.discovery_bin; + bhdr = (struct binary_header *)discovery_bin; offset = le16_to_cpu(bhdr->table_list[HARVEST_INFO].offset); if (!offset) { @@ -725,7 +762,7 @@ static void amdgpu_discovery_read_from_harvest_table(struct amdgpu_device *adev, return; } - harvest_info = (struct harvest_table *)(adev->mman.discovery_bin + offset); + harvest_info = (struct harvest_table *)(discovery_bin + offset); for (i = 0; i < 32; i++) { if (le16_to_cpu(harvest_info->list[i].hw_id) == 0) @@ -1021,8 +1058,8 @@ static void ip_disc_release(struct kobject *kobj) kobj); struct amdgpu_device *adev = ip_top->adev; - adev->ip_top = NULL; kfree(ip_top); + adev->discovery.ip_top = NULL; } static uint8_t amdgpu_discovery_get_harvest_info(struct amdgpu_device *adev, @@ -1062,6 +1099,7 @@ static int amdgpu_discovery_sysfs_ips(struct amdgpu_device *adev, const size_t _ip_offset, const int num_ips, bool reg_base_64) { + uint8_t *discovery_bin = adev->discovery.bin; int ii, jj, kk, res; uint16_t hw_id; uint8_t inst; @@ -1079,7 +1117,7 @@ static int amdgpu_discovery_sysfs_ips(struct amdgpu_device *adev, struct ip_v4 *ip; struct ip_hw_instance *ip_hw_instance; - ip = (struct ip_v4 *)(adev->mman.discovery_bin + ip_offset); + ip = (struct ip_v4 *)(discovery_bin + ip_offset); inst = ip->instance_number; hw_id = le16_to_cpu(ip->hw_id); if (amdgpu_discovery_validate_ip(adev, inst, hw_id) || @@ -1166,17 +1204,20 @@ static int amdgpu_discovery_sysfs_ips(struct amdgpu_device *adev, static int amdgpu_discovery_sysfs_recurse(struct amdgpu_device *adev) { + struct ip_discovery_top *ip_top = adev->discovery.ip_top; + uint8_t *discovery_bin = adev->discovery.bin; struct binary_header *bhdr; struct ip_discovery_header *ihdr; struct die_header *dhdr; - struct kset *die_kset = &adev->ip_top->die_kset; + struct kset *die_kset = &ip_top->die_kset; u16 num_dies, die_offset, num_ips; size_t ip_offset; int ii, res; - bhdr = (struct binary_header *)adev->mman.discovery_bin; - ihdr = (struct ip_discovery_header *)(adev->mman.discovery_bin + - le16_to_cpu(bhdr->table_list[IP_DISCOVERY].offset)); + bhdr = (struct binary_header *)discovery_bin; + ihdr = (struct ip_discovery_header + *)(discovery_bin + + le16_to_cpu(bhdr->table_list[IP_DISCOVERY].offset)); num_dies = le16_to_cpu(ihdr->num_dies); DRM_DEBUG("number of dies: %d\n", num_dies); @@ -1185,7 +1226,7 @@ static int amdgpu_discovery_sysfs_recurse(struct amdgpu_device *adev) struct ip_die_entry *ip_die_entry; die_offset = le16_to_cpu(ihdr->die_info[ii].die_offset); - dhdr = (struct die_header *)(adev->mman.discovery_bin + die_offset); + dhdr = (struct die_header *)(discovery_bin + die_offset); num_ips = le16_to_cpu(dhdr->num_ips); ip_offset = die_offset + sizeof(*dhdr); @@ -1219,30 +1260,32 @@ static int amdgpu_discovery_sysfs_recurse(struct amdgpu_device *adev) static int amdgpu_discovery_sysfs_init(struct amdgpu_device *adev) { + uint8_t *discovery_bin = adev->discovery.bin; + struct ip_discovery_top *ip_top; struct kset *die_kset; int res, ii; - if (!adev->mman.discovery_bin) + if (!discovery_bin) return -EINVAL; - adev->ip_top = kzalloc(sizeof(*adev->ip_top), GFP_KERNEL); - if (!adev->ip_top) + ip_top = kzalloc(sizeof(*ip_top), GFP_KERNEL); + if (!ip_top) return -ENOMEM; - adev->ip_top->adev = adev; - - res = kobject_init_and_add(&adev->ip_top->kobj, &ip_discovery_ktype, + ip_top->adev = adev; + adev->discovery.ip_top = ip_top; + res = kobject_init_and_add(&ip_top->kobj, &ip_discovery_ktype, &adev->dev->kobj, "ip_discovery"); if (res) { DRM_ERROR("Couldn't init and add ip_discovery/"); goto Err; } - die_kset = &adev->ip_top->die_kset; + die_kset = &ip_top->die_kset; kobject_set_name(&die_kset->kobj, "%s", "die"); - die_kset->kobj.parent = &adev->ip_top->kobj; + die_kset->kobj.parent = &ip_top->kobj; die_kset->kobj.ktype = &die_kobj_ktype; - res = kset_register(&adev->ip_top->die_kset); + res = kset_register(&ip_top->die_kset); if (res) { DRM_ERROR("Couldn't register die_kset"); goto Err; @@ -1256,7 +1299,7 @@ static int amdgpu_discovery_sysfs_init(struct amdgpu_device *adev) return res; Err: - kobject_put(&adev->ip_top->kobj); + kobject_put(&ip_top->kobj); return res; } @@ -1301,10 +1344,11 @@ static void amdgpu_discovery_sysfs_die_free(struct ip_die_entry *ip_die_entry) static void amdgpu_discovery_sysfs_fini(struct amdgpu_device *adev) { + struct ip_discovery_top *ip_top = adev->discovery.ip_top; struct list_head *el, *tmp; struct kset *die_kset; - die_kset = &adev->ip_top->die_kset; + die_kset = &ip_top->die_kset; spin_lock(&die_kset->list_lock); list_for_each_prev_safe(el, tmp, &die_kset->list) { list_del_init(el); @@ -1313,8 +1357,8 @@ static void amdgpu_discovery_sysfs_fini(struct amdgpu_device *adev) spin_lock(&die_kset->list_lock); } spin_unlock(&die_kset->list_lock); - kobject_put(&adev->ip_top->die_kset.kobj); - kobject_put(&adev->ip_top->kobj); + kobject_put(&ip_top->die_kset.kobj); + kobject_put(&ip_top->kobj); } /* ================================================== */ @@ -1325,6 +1369,7 @@ static int amdgpu_discovery_reg_base_init(struct amdgpu_device *adev) struct binary_header *bhdr; struct ip_discovery_header *ihdr; struct die_header *dhdr; + uint8_t *discovery_bin; struct ip_v4 *ip; uint16_t die_offset; uint16_t ip_offset; @@ -1340,22 +1385,23 @@ static int amdgpu_discovery_reg_base_init(struct amdgpu_device *adev) r = amdgpu_discovery_init(adev); if (r) return r; - + discovery_bin = adev->discovery.bin; wafl_ver = 0; adev->gfx.xcc_mask = 0; adev->sdma.sdma_mask = 0; adev->vcn.inst_mask = 0; adev->jpeg.inst_mask = 0; - bhdr = (struct binary_header *)adev->mman.discovery_bin; - ihdr = (struct ip_discovery_header *)(adev->mman.discovery_bin + - le16_to_cpu(bhdr->table_list[IP_DISCOVERY].offset)); + bhdr = (struct binary_header *)discovery_bin; + ihdr = (struct ip_discovery_header + *)(discovery_bin + + le16_to_cpu(bhdr->table_list[IP_DISCOVERY].offset)); num_dies = le16_to_cpu(ihdr->num_dies); DRM_DEBUG("number of dies: %d\n", num_dies); for (i = 0; i < num_dies; i++) { die_offset = le16_to_cpu(ihdr->die_info[i].die_offset); - dhdr = (struct die_header *)(adev->mman.discovery_bin + die_offset); + dhdr = (struct die_header *)(discovery_bin + die_offset); num_ips = le16_to_cpu(dhdr->num_ips); ip_offset = die_offset + sizeof(*dhdr); @@ -1369,7 +1415,7 @@ static int amdgpu_discovery_reg_base_init(struct amdgpu_device *adev) le16_to_cpu(dhdr->die_id), num_ips); for (j = 0; j < num_ips; j++) { - ip = (struct ip_v4 *)(adev->mman.discovery_bin + ip_offset); + ip = (struct ip_v4 *)(discovery_bin + ip_offset); inst = ip->instance_number; hw_id = le16_to_cpu(ip->hw_id); @@ -1519,16 +1565,16 @@ static int amdgpu_discovery_reg_base_init(struct amdgpu_device *adev) static void amdgpu_discovery_harvest_ip(struct amdgpu_device *adev) { + uint8_t *discovery_bin = adev->discovery.bin; struct ip_discovery_header *ihdr; struct binary_header *bhdr; int vcn_harvest_count = 0; int umc_harvest_count = 0; uint16_t offset, ihdr_ver; - bhdr = (struct binary_header *)adev->mman.discovery_bin; + bhdr = (struct binary_header *)discovery_bin; offset = le16_to_cpu(bhdr->table_list[IP_DISCOVERY].offset); - ihdr = (struct ip_discovery_header *)(adev->mman.discovery_bin + - offset); + ihdr = (struct ip_discovery_header *)(discovery_bin + offset); ihdr_ver = le16_to_cpu(ihdr->version); /* * Harvest table does not fit Navi1x and legacy GPUs, @@ -1575,22 +1621,23 @@ union gc_info { static int amdgpu_discovery_get_gfx_info(struct amdgpu_device *adev) { + uint8_t *discovery_bin = adev->discovery.bin; struct binary_header *bhdr; union gc_info *gc_info; u16 offset; - if (!adev->mman.discovery_bin) { + if (!discovery_bin) { DRM_ERROR("ip discovery uninitialized\n"); return -EINVAL; } - bhdr = (struct binary_header *)adev->mman.discovery_bin; + bhdr = (struct binary_header *)discovery_bin; offset = le16_to_cpu(bhdr->table_list[GC].offset); if (!offset) return 0; - gc_info = (union gc_info *)(adev->mman.discovery_bin + offset); + gc_info = (union gc_info *)(discovery_bin + offset); switch (le16_to_cpu(gc_info->v1.header.version_major)) { case 1: @@ -1683,24 +1730,25 @@ union mall_info { static int amdgpu_discovery_get_mall_info(struct amdgpu_device *adev) { + uint8_t *discovery_bin = adev->discovery.bin; struct binary_header *bhdr; union mall_info *mall_info; u32 u, mall_size_per_umc, m_s_present, half_use; u64 mall_size; u16 offset; - if (!adev->mman.discovery_bin) { + if (!discovery_bin) { DRM_ERROR("ip discovery uninitialized\n"); return -EINVAL; } - bhdr = (struct binary_header *)adev->mman.discovery_bin; + bhdr = (struct binary_header *)discovery_bin; offset = le16_to_cpu(bhdr->table_list[MALL_INFO].offset); if (!offset) return 0; - mall_info = (union mall_info *)(adev->mman.discovery_bin + offset); + mall_info = (union mall_info *)(discovery_bin + offset); switch (le16_to_cpu(mall_info->v1.header.version_major)) { case 1: @@ -1739,12 +1787,13 @@ union vcn_info { static int amdgpu_discovery_get_vcn_info(struct amdgpu_device *adev) { + uint8_t *discovery_bin = adev->discovery.bin; struct binary_header *bhdr; union vcn_info *vcn_info; u16 offset; int v; - if (!adev->mman.discovery_bin) { + if (!discovery_bin) { DRM_ERROR("ip discovery uninitialized\n"); return -EINVAL; } @@ -1759,13 +1808,13 @@ static int amdgpu_discovery_get_vcn_info(struct amdgpu_device *adev) return -EINVAL; } - bhdr = (struct binary_header *)adev->mman.discovery_bin; + bhdr = (struct binary_header *)discovery_bin; offset = le16_to_cpu(bhdr->table_list[VCN_INFO].offset); if (!offset) return 0; - vcn_info = (union vcn_info *)(adev->mman.discovery_bin + offset); + vcn_info = (union vcn_info *)(discovery_bin + offset); switch (le16_to_cpu(vcn_info->v1.header.version_major)) { case 1: @@ -1825,6 +1874,7 @@ int amdgpu_discovery_get_nps_info(struct amdgpu_device *adev, struct amdgpu_gmc_memrange **ranges, int *range_cnt, bool refresh) { + uint8_t *discovery_bin = adev->discovery.bin; struct amdgpu_gmc_memrange *mem_ranges; struct binary_header *bhdr; union nps_info *nps_info; @@ -1841,13 +1891,13 @@ int amdgpu_discovery_get_nps_info(struct amdgpu_device *adev, return r; nps_info = &nps_data; } else { - if (!adev->mman.discovery_bin) { + if (!discovery_bin) { dev_err(adev->dev, "fetch mem range failed, ip discovery uninitialized\n"); return -EINVAL; } - bhdr = (struct binary_header *)adev->mman.discovery_bin; + bhdr = (struct binary_header *)discovery_bin; offset = le16_to_cpu(bhdr->table_list[NPS_INFO].offset); if (!offset) @@ -1857,8 +1907,7 @@ int amdgpu_discovery_get_nps_info(struct amdgpu_device *adev, if (amdgpu_discovery_verify_npsinfo(adev, bhdr)) return -ENOENT; - nps_info = - (union nps_info *)(adev->mman.discovery_bin + offset); + nps_info = (union nps_info *)(discovery_bin + offset); } switch (le16_to_cpu(nps_info->v1.header.version_major)) { @@ -2361,6 +2410,21 @@ static int amdgpu_discovery_set_sdma_ip_blocks(struct amdgpu_device *adev) amdgpu_ip_version(adev, SDMA0_HWIP, 0)); return -EINVAL; } + + return 0; +} + +static int amdgpu_discovery_set_ras_ip_blocks(struct amdgpu_device *adev) +{ + switch (amdgpu_ip_version(adev, MP0_HWIP, 0)) { + case IP_VERSION(13, 0, 6): + case IP_VERSION(13, 0, 12): + case IP_VERSION(13, 0, 14): + amdgpu_device_ip_block_add(adev, &ras_v1_0_ip_block); + break; + default: + break; + } return 0; } @@ -3141,6 +3205,10 @@ int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev) if (r) return r; + r = amdgpu_discovery_set_ras_ip_blocks(adev); + if (r) + return r; + if ((adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT && !amdgpu_sriov_vf(adev)) || (adev->firmware.load_type == AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO && amdgpu_dpm == 1)) { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.h index b44d56465c5b..4ce04486cc31 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.h @@ -24,9 +24,21 @@ #ifndef __AMDGPU_DISCOVERY__ #define __AMDGPU_DISCOVERY__ +#include + #define DISCOVERY_TMR_SIZE (10 << 10) #define DISCOVERY_TMR_OFFSET (64 << 10) +struct ip_discovery_top; + +struct amdgpu_discovery_info { + struct debugfs_blob_wrapper debugfs_blob; + struct ip_discovery_top *ip_top; + uint32_t size; + uint8_t *bin; + bool reserve_tmr; +}; + void amdgpu_discovery_fini(struct amdgpu_device *adev); int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c index 51bab32fd8c6..b5d34797d606 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c @@ -332,8 +332,6 @@ int amdgpu_display_crtc_set_config(struct drm_mode_set *set, if (crtc->enabled) active = true; - pm_runtime_mark_last_busy(dev->dev); - adev = drm_to_adev(dev); /* if we have active crtcs and we don't have a power ref, * take the current one @@ -1365,6 +1363,64 @@ static const struct drm_prop_enum_list amdgpu_dither_enum_list[] = { { AMDGPU_FMT_DITHER_ENABLE, "on" }, }; +/** + * DOC: property for adaptive backlight modulation + * + * The 'adaptive backlight modulation' property is used for the compositor to + * directly control the adaptive backlight modulation power savings feature + * that is part of DCN hardware. + * + * The property will be attached specifically to eDP panels that support it. + * + * The property is by default set to 'sysfs' to allow the sysfs file 'panel_power_savings' + * to be able to control it. + * If set to 'off' the compositor will ensure it stays off. + * The other values 'min', 'bias min', 'bias max', and 'max' will control the + * intensity of the power savings. + * + * Modifying this value can have implications on color accuracy, so tread + * carefully. + */ +static int amdgpu_display_setup_abm_prop(struct amdgpu_device *adev) +{ + const struct drm_prop_enum_list props[] = { + { ABM_SYSFS_CONTROL, "sysfs" }, + { ABM_LEVEL_OFF, "off" }, + { ABM_LEVEL_MIN, "min" }, + { ABM_LEVEL_BIAS_MIN, "bias min" }, + { ABM_LEVEL_BIAS_MAX, "bias max" }, + { ABM_LEVEL_MAX, "max" }, + }; + struct drm_property *prop; + int i; + + if (!adev->dc_enabled) + return 0; + + prop = drm_property_create(adev_to_drm(adev), DRM_MODE_PROP_ENUM, + "adaptive backlight modulation", + 6); + if (!prop) + return -ENOMEM; + + for (i = 0; i < ARRAY_SIZE(props); i++) { + int ret; + + ret = drm_property_add_enum(prop, props[i].type, + props[i].name); + + if (ret) { + drm_property_destroy(adev_to_drm(adev), prop); + + return ret; + } + } + + adev->mode_info.abm_level_property = prop; + + return 0; +} + int amdgpu_display_modeset_create_props(struct amdgpu_device *adev) { int sz; @@ -1411,7 +1467,7 @@ int amdgpu_display_modeset_create_props(struct amdgpu_device *adev) "dither", amdgpu_dither_enum_list, sz); - return 0; + return amdgpu_display_setup_abm_prop(adev); } void amdgpu_display_update_priority(struct amdgpu_device *adev) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.h index 930c171473b4..49a29bf47a37 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.h @@ -55,4 +55,11 @@ int amdgpu_display_resume_helper(struct amdgpu_device *adev); int amdgpu_display_get_scanout_buffer(struct drm_plane *plane, struct drm_scanout_buffer *sb); +#define ABM_SYSFS_CONTROL -1 +#define ABM_LEVEL_OFF 0 +#define ABM_LEVEL_MIN 1 +#define ABM_LEVEL_BIAS_MIN 2 +#define ABM_LEVEL_BIAS_MAX 3 +#define ABM_LEVEL_MAX 4 + #endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c index ed3bef1edfe4..e22cfa7c6d32 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c @@ -81,6 +81,19 @@ static int amdgpu_dma_buf_attach(struct dma_buf *dmabuf, struct drm_gem_object *obj = dmabuf->priv; struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj); struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); + int r; + + /* + * Disable peer-to-peer access for DCC-enabled VRAM surfaces on GFX12+. + * Such buffers cannot be safely accessed over P2P due to device-local + * compression metadata. Fallback to system-memory path instead. + * Device supports GFX12 (GC 12.x or newer) + * BO was created with the AMDGPU_GEM_CREATE_GFX12_DCC flag + * + */ + if (amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(12, 0, 0) && + bo->flags & AMDGPU_GEM_CREATE_GFX12_DCC) + attach->peer2peer = false; /* * Disable peer-to-peer access for DCC-enabled VRAM surfaces on GFX12+. @@ -98,8 +111,14 @@ static int amdgpu_dma_buf_attach(struct dma_buf *dmabuf, pci_p2pdma_distance(adev->pdev, attach->dev, false) < 0) attach->peer2peer = false; + r = dma_resv_lock(bo->tbo.base.resv, NULL); + if (r) + return r; + amdgpu_vm_bo_update_shared(bo); + dma_resv_unlock(bo->tbo.base.resv); + return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c index 7333e19291cf..2dfbddcef9ab 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c @@ -312,7 +312,7 @@ module_param_named(moverate, amdgpu_moverate, int, 0600); * DOC: audio (int) * Set HDMI/DPAudio. Only affects non-DC display handling. The default is -1 (Enabled), set 0 to disabled it. */ -MODULE_PARM_DESC(audio, "Audio enable (-1 = auto, 0 = disable, 1 = enable)"); +MODULE_PARM_DESC(audio, "HDMI/DP Audio enable for non DC displays (-1 = auto, 0 = disable, 1 = enable)"); module_param_named(audio, amdgpu_audio, int, 0444); /** @@ -354,22 +354,16 @@ module_param_named(svm_default_granularity, amdgpu_svm_default_granularity, uint * DOC: lockup_timeout (string) * Set GPU scheduler timeout value in ms. * - * The format can be [Non-Compute] or [GFX,Compute,SDMA,Video]. That is there can be one or - * multiple values specified. 0 and negative values are invalidated. They will be adjusted - * to the default timeout. + * The format can be [single value] for setting all timeouts at once or + * [GFX,Compute,SDMA,Video] to set individual timeouts. + * Negative values mean infinity. * - * - With one value specified, the setting will apply to all non-compute jobs. - * - With multiple values specified, the first one will be for GFX. - * The second one is for Compute. The third and fourth ones are - * for SDMA and Video. - * - * By default(with no lockup_timeout settings), the timeout for all jobs is 10000. + * By default(with no lockup_timeout settings), the timeout for all queues is 2000. */ MODULE_PARM_DESC(lockup_timeout, - "GPU lockup timeout in ms (default: 10000 for all jobs. " - "0: keep default value. negative: infinity timeout), format: for bare metal [Non-Compute] or [GFX,Compute,SDMA,Video]; " - "for passthrough or sriov [all jobs] or [GFX,Compute,SDMA,Video]."); -module_param_string(lockup_timeout, amdgpu_lockup_timeout, sizeof(amdgpu_lockup_timeout), 0444); + "GPU lockup timeout in ms (default: 2000. 0: keep default value. negative: infinity timeout), format: [single value for all] or [GFX,Compute,SDMA,Video]."); +module_param_string(lockup_timeout, amdgpu_lockup_timeout, + sizeof(amdgpu_lockup_timeout), 0444); /** * DOC: dpm (int) @@ -624,39 +618,39 @@ module_param_named(timeout_period, amdgpu_watchdog_timer.period, uint, 0644); /** * DOC: si_support (int) - * Set SI support driver. This parameter works after set config CONFIG_DRM_AMDGPU_SI. For SI asic, when radeon driver is enabled, - * set value 0 to use radeon driver, while set value 1 to use amdgpu driver. The default is using radeon driver when it available, - * otherwise using amdgpu driver. + * 1 = enabled, 0 = disabled, -1 = default + * + * SI (Southern Islands) are first generation GCN GPUs, supported by both + * drivers: radeon (old) and amdgpu (new). This parameter controls whether + * amdgpu should support SI. + * By default, SI dedicated GPUs are supported by amdgpu. + * Only relevant when CONFIG_DRM_AMDGPU_SI is enabled to build SI support in amdgpu. + * See also radeon.si_support which should be disabled when amdgpu.si_support is + * enabled, and vice versa. */ +int amdgpu_si_support = -1; #ifdef CONFIG_DRM_AMDGPU_SI - -#if IS_ENABLED(CONFIG_DRM_RADEON) || IS_ENABLED(CONFIG_DRM_RADEON_MODULE) -int amdgpu_si_support; -MODULE_PARM_DESC(si_support, "SI support (1 = enabled, 0 = disabled (default))"); -#else -int amdgpu_si_support = 1; -MODULE_PARM_DESC(si_support, "SI support (1 = enabled (default), 0 = disabled)"); -#endif - +MODULE_PARM_DESC(si_support, "SI support (1 = enabled, 0 = disabled, -1 = default)"); module_param_named(si_support, amdgpu_si_support, int, 0444); #endif /** * DOC: cik_support (int) - * Set CIK support driver. This parameter works after set config CONFIG_DRM_AMDGPU_CIK. For CIK asic, when radeon driver is enabled, - * set value 0 to use radeon driver, while set value 1 to use amdgpu driver. The default is using radeon driver when it available, - * otherwise using amdgpu driver. + * 1 = enabled, 0 = disabled, -1 = default + * + * CIK (Sea Islands) are second generation GCN GPUs, supported by both + * drivers: radeon (old) and amdgpu (new). This parameter controls whether + * amdgpu should support CIK. + * By default: + * - CIK dedicated GPUs are supported by amdgpu. + * - CIK APUs are supported by radeon (except when radeon is not built). + * Only relevant when CONFIG_DRM_AMDGPU_CIK is enabled to build CIK support in amdgpu. + * See also radeon.cik_support which should be disabled when amdgpu.cik_support is + * enabled, and vice versa. */ +int amdgpu_cik_support = -1; #ifdef CONFIG_DRM_AMDGPU_CIK - -#if IS_ENABLED(CONFIG_DRM_RADEON) || IS_ENABLED(CONFIG_DRM_RADEON_MODULE) -int amdgpu_cik_support; -MODULE_PARM_DESC(cik_support, "CIK support (1 = enabled, 0 = disabled (default))"); -#else -int amdgpu_cik_support = 1; -MODULE_PARM_DESC(cik_support, "CIK support (1 = enabled (default), 0 = disabled)"); -#endif - +MODULE_PARM_DESC(cik_support, "CIK support (1 = enabled, 0 = disabled, -1 = default)"); module_param_named(cik_support, amdgpu_cik_support, int, 0444); #endif @@ -2234,7 +2228,6 @@ static void amdgpu_get_secondary_funcs(struct amdgpu_device *adev) adev->pdev->bus->number, i); if (p) { pm_runtime_get_sync(&p->dev); - pm_runtime_mark_last_busy(&p->dev); pm_runtime_put_autosuspend(&p->dev); pci_dev_put(p); } @@ -2313,6 +2306,72 @@ static unsigned long amdgpu_fix_asic_type(struct pci_dev *pdev, unsigned long fl return flags; } +static bool amdgpu_support_enabled(struct device *dev, + const enum amd_asic_type family) +{ + const char *gen; + const char *param; + int module_param = -1; + bool radeon_support_built = IS_ENABLED(CONFIG_DRM_RADEON); + bool amdgpu_support_built = false; + bool support_by_default = false; + + switch (family) { + case CHIP_TAHITI: + case CHIP_PITCAIRN: + case CHIP_VERDE: + case CHIP_OLAND: + case CHIP_HAINAN: + gen = "SI"; + param = "si_support"; + module_param = amdgpu_si_support; + amdgpu_support_built = IS_ENABLED(CONFIG_DRM_AMDGPU_SI); + support_by_default = true; + break; + + case CHIP_BONAIRE: + case CHIP_HAWAII: + support_by_default = true; + fallthrough; + case CHIP_KAVERI: + case CHIP_KABINI: + case CHIP_MULLINS: + gen = "CIK"; + param = "cik_support"; + module_param = amdgpu_cik_support; + amdgpu_support_built = IS_ENABLED(CONFIG_DRM_AMDGPU_CIK); + break; + + default: + /* All other chips are supported by amdgpu only */ + return true; + } + + if (!amdgpu_support_built) { + dev_info(dev, "amdgpu built without %s support\n", gen); + return false; + } + + if ((module_param == -1 && (support_by_default || !radeon_support_built)) || + module_param == 1) { + if (radeon_support_built) + dev_info(dev, "%s support provided by amdgpu.\n" + "Use radeon.%s=1 amdgpu.%s=0 to override.\n", + gen, param, param); + + return true; + } + + if (radeon_support_built) + dev_info(dev, "%s support provided by radeon.\n" + "Use radeon.%s=0 amdgpu.%s=1 to override.\n", + gen, param, param); + else if (module_param == 0) + dev_info(dev, "%s support disabled by module param\n", gen); + + return false; +} + static int amdgpu_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { @@ -2360,48 +2419,8 @@ static int amdgpu_pci_probe(struct pci_dev *pdev, return -ENOTSUPP; } - switch (flags & AMD_ASIC_MASK) { - case CHIP_TAHITI: - case CHIP_PITCAIRN: - case CHIP_VERDE: - case CHIP_OLAND: - case CHIP_HAINAN: -#ifdef CONFIG_DRM_AMDGPU_SI - if (!amdgpu_si_support) { - dev_info(&pdev->dev, - "SI support provided by radeon.\n"); - dev_info(&pdev->dev, - "Use radeon.si_support=0 amdgpu.si_support=1 to override.\n" - ); - return -ENODEV; - } - break; -#else - dev_info(&pdev->dev, "amdgpu is built without SI support.\n"); + if (!amdgpu_support_enabled(&pdev->dev, flags & AMD_ASIC_MASK)) return -ENODEV; -#endif - case CHIP_KAVERI: - case CHIP_BONAIRE: - case CHIP_HAWAII: - case CHIP_KABINI: - case CHIP_MULLINS: -#ifdef CONFIG_DRM_AMDGPU_CIK - if (!amdgpu_cik_support) { - dev_info(&pdev->dev, - "CIK support provided by radeon.\n"); - dev_info(&pdev->dev, - "Use radeon.cik_support=0 amdgpu.cik_support=1 to override.\n" - ); - return -ENODEV; - } - break; -#else - dev_info(&pdev->dev, "amdgpu is built without CIK support.\n"); - return -ENODEV; -#endif - default: - break; - } adev = devm_drm_dev_alloc(&pdev->dev, &amdgpu_kms_driver, typeof(*adev), ddev); if (IS_ERR(adev)) @@ -2480,7 +2499,6 @@ static int amdgpu_pci_probe(struct pci_dev *pdev, pm_runtime_allow(ddev->dev); - pm_runtime_mark_last_busy(ddev->dev); pm_runtime_put_autosuspend(ddev->dev); pci_wake_from_d3(pdev, TRUE); @@ -2564,7 +2582,8 @@ amdgpu_pci_shutdown(struct pci_dev *pdev) */ if (!amdgpu_passthrough(adev)) adev->mp1_state = PP_MP1_STATE_UNLOAD; - amdgpu_device_ip_suspend(adev); + amdgpu_device_prepare(dev); + amdgpu_device_suspend(dev, true); adev->mp1_state = PP_MP1_STATE_NONE; } @@ -2782,22 +2801,8 @@ static int amdgpu_runtime_idle_check_userq(struct device *dev) struct pci_dev *pdev = to_pci_dev(dev); struct drm_device *drm_dev = pci_get_drvdata(pdev); struct amdgpu_device *adev = drm_to_adev(drm_dev); - struct amdgpu_usermode_queue *queue; - struct amdgpu_userq_mgr *uqm, *tmp; - int queue_id; - int ret = 0; - mutex_lock(&adev->userq_mutex); - list_for_each_entry_safe(uqm, tmp, &adev->userq_mgr_list, list) { - idr_for_each_entry(&uqm->userq_idr, queue, queue_id) { - ret = -EBUSY; - goto done; - } - } -done: - mutex_unlock(&adev->userq_mutex); - - return ret; + return xa_empty(&adev->userq_doorbell_xa) ? 0 : -EBUSY; } static int amdgpu_pmops_runtime_suspend(struct device *dev) @@ -2944,7 +2949,6 @@ static int amdgpu_pmops_runtime_idle(struct device *dev) ret = amdgpu_runtime_idle_check_userq(dev); done: - pm_runtime_mark_last_busy(dev); pm_runtime_autosuspend(dev); return ret; } @@ -2980,7 +2984,6 @@ long amdgpu_drm_ioctl(struct file *filp, ret = drm_ioctl(filp, cmd, arg); - pm_runtime_mark_last_busy(dev->dev); out: pm_runtime_put_autosuspend(dev->dev); return ret; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c index 18a7829122d2..c7843e336310 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c @@ -45,16 +45,11 @@ * Cast helper */ static const struct dma_fence_ops amdgpu_fence_ops; -static const struct dma_fence_ops amdgpu_job_fence_ops; static inline struct amdgpu_fence *to_amdgpu_fence(struct dma_fence *f) { struct amdgpu_fence *__f = container_of(f, struct amdgpu_fence, base); - if (__f->base.ops == &amdgpu_fence_ops || - __f->base.ops == &amdgpu_job_fence_ops) - return __f; - - return NULL; + return __f; } /** @@ -98,51 +93,32 @@ static u32 amdgpu_fence_read(struct amdgpu_ring *ring) * amdgpu_fence_emit - emit a fence on the requested ring * * @ring: ring the fence is associated with - * @f: resulting fence object * @af: amdgpu fence input * @flags: flags to pass into the subordinate .emit_fence() call * * Emits a fence command on the requested ring (all asics). * Returns 0 on success, -ENOMEM on failure. */ -int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **f, - struct amdgpu_fence *af, unsigned int flags) +int amdgpu_fence_emit(struct amdgpu_ring *ring, struct amdgpu_fence *af, + unsigned int flags) { struct amdgpu_device *adev = ring->adev; struct dma_fence *fence; - struct amdgpu_fence *am_fence; struct dma_fence __rcu **ptr; uint32_t seq; int r; - if (!af) { - /* create a separate hw fence */ - am_fence = kzalloc(sizeof(*am_fence), GFP_KERNEL); - if (!am_fence) - return -ENOMEM; - } else { - am_fence = af; - } - fence = &am_fence->base; - am_fence->ring = ring; + fence = &af->base; + af->ring = ring; seq = ++ring->fence_drv.sync_seq; - am_fence->seq = seq; - if (af) { - dma_fence_init(fence, &amdgpu_job_fence_ops, - &ring->fence_drv.lock, - adev->fence_context + ring->idx, seq); - /* Against remove in amdgpu_job_{free, free_cb} */ - dma_fence_get(fence); - } else { - dma_fence_init(fence, &amdgpu_fence_ops, - &ring->fence_drv.lock, - adev->fence_context + ring->idx, seq); - } + dma_fence_init(fence, &amdgpu_fence_ops, + &ring->fence_drv.lock, + adev->fence_context + ring->idx, seq); amdgpu_ring_emit_fence(ring, ring->fence_drv.gpu_addr, seq, flags | AMDGPU_FENCE_FLAG_INT); - amdgpu_fence_save_wptr(fence); + amdgpu_fence_save_wptr(af); pm_runtime_get_noresume(adev_to_drm(adev)->dev); ptr = &ring->fence_drv.fences[seq & ring->fence_drv.num_fences_mask]; if (unlikely(rcu_dereference_protected(*ptr, 1))) { @@ -167,8 +143,6 @@ int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **f, */ rcu_assign_pointer(*ptr, dma_fence_get(fence)); - *f = fence; - return 0; } @@ -276,7 +250,6 @@ bool amdgpu_fence_process(struct amdgpu_ring *ring) drv->signalled_wptr = am_fence->wptr; dma_fence_signal(fence); dma_fence_put(fence); - pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); } while (last_seq != seq); @@ -669,36 +642,6 @@ void amdgpu_fence_driver_hw_init(struct amdgpu_device *adev) } } -/** - * amdgpu_fence_driver_clear_job_fences - clear job embedded fences of ring - * - * @ring: fence of the ring to be cleared - * - */ -void amdgpu_fence_driver_clear_job_fences(struct amdgpu_ring *ring) -{ - int i; - struct dma_fence *old, **ptr; - - for (i = 0; i <= ring->fence_drv.num_fences_mask; i++) { - ptr = &ring->fence_drv.fences[i]; - old = rcu_dereference_protected(*ptr, 1); - if (old && old->ops == &amdgpu_job_fence_ops) { - struct amdgpu_job *job; - - /* For non-scheduler bad job, i.e. failed ib test, we need to signal - * it right here or we won't be able to track them in fence_drv - * and they will remain unsignaled during sa_bo free. - */ - job = container_of(old, struct amdgpu_job, hw_fence.base); - if (!job->base.s_fence && !dma_fence_is_signaled(old)) - dma_fence_signal(old); - RCU_INIT_POINTER(*ptr, NULL); - dma_fence_put(old); - } - } -} - /** * amdgpu_fence_driver_set_error - set error code on fences * @ring: the ring which contains the fences @@ -755,7 +698,7 @@ void amdgpu_fence_driver_force_completion(struct amdgpu_ring *ring) /** * amdgpu_fence_driver_guilty_force_completion - force signal of specified sequence * - * @fence: fence of the ring to signal + * @af: fence of the ring to signal * */ void amdgpu_fence_driver_guilty_force_completion(struct amdgpu_fence *af) @@ -792,15 +735,13 @@ void amdgpu_fence_driver_guilty_force_completion(struct amdgpu_fence *af) } while (last_seq != seq); spin_unlock_irqrestore(&ring->fence_drv.lock, flags); /* signal the guilty fence */ - amdgpu_fence_write(ring, af->seq); + amdgpu_fence_write(ring, (u32)af->base.seqno); amdgpu_fence_process(ring); } -void amdgpu_fence_save_wptr(struct dma_fence *fence) +void amdgpu_fence_save_wptr(struct amdgpu_fence *af) { - struct amdgpu_fence *am_fence = container_of(fence, struct amdgpu_fence, base); - - am_fence->wptr = am_fence->ring->wptr; + af->wptr = af->ring->wptr; } static void amdgpu_ring_backup_unprocessed_command(struct amdgpu_ring *ring, @@ -866,13 +807,6 @@ static const char *amdgpu_fence_get_timeline_name(struct dma_fence *f) return (const char *)to_amdgpu_fence(f)->ring->name; } -static const char *amdgpu_job_fence_get_timeline_name(struct dma_fence *f) -{ - struct amdgpu_job *job = container_of(f, struct amdgpu_job, hw_fence.base); - - return (const char *)to_amdgpu_ring(job->base.sched)->name; -} - /** * amdgpu_fence_enable_signaling - enable signalling on fence * @f: fence @@ -889,23 +823,6 @@ static bool amdgpu_fence_enable_signaling(struct dma_fence *f) return true; } -/** - * amdgpu_job_fence_enable_signaling - enable signalling on job fence - * @f: fence - * - * This is the simliar function with amdgpu_fence_enable_signaling above, it - * only handles the job embedded fence. - */ -static bool amdgpu_job_fence_enable_signaling(struct dma_fence *f) -{ - struct amdgpu_job *job = container_of(f, struct amdgpu_job, hw_fence.base); - - if (!timer_pending(&to_amdgpu_ring(job->base.sched)->fence_drv.fallback_timer)) - amdgpu_fence_schedule_fallback(to_amdgpu_ring(job->base.sched)); - - return true; -} - /** * amdgpu_fence_free - free up the fence memory * @@ -921,21 +838,6 @@ static void amdgpu_fence_free(struct rcu_head *rcu) kfree(to_amdgpu_fence(f)); } -/** - * amdgpu_job_fence_free - free up the job with embedded fence - * - * @rcu: RCU callback head - * - * Free up the job with embedded fence after the RCU grace period. - */ -static void amdgpu_job_fence_free(struct rcu_head *rcu) -{ - struct dma_fence *f = container_of(rcu, struct dma_fence, rcu); - - /* free job if fence has a parent job */ - kfree(container_of(f, struct amdgpu_job, hw_fence.base)); -} - /** * amdgpu_fence_release - callback that fence can be freed * @@ -949,19 +851,6 @@ static void amdgpu_fence_release(struct dma_fence *f) call_rcu(&f->rcu, amdgpu_fence_free); } -/** - * amdgpu_job_fence_release - callback that job embedded fence can be freed - * - * @f: fence - * - * This is the simliar function with amdgpu_fence_release above, it - * only handles the job embedded fence. - */ -static void amdgpu_job_fence_release(struct dma_fence *f) -{ - call_rcu(&f->rcu, amdgpu_job_fence_free); -} - static const struct dma_fence_ops amdgpu_fence_ops = { .get_driver_name = amdgpu_fence_get_driver_name, .get_timeline_name = amdgpu_fence_get_timeline_name, @@ -969,13 +858,6 @@ static const struct dma_fence_ops amdgpu_fence_ops = { .release = amdgpu_fence_release, }; -static const struct dma_fence_ops amdgpu_job_fence_ops = { - .get_driver_name = amdgpu_fence_get_driver_name, - .get_timeline_name = amdgpu_job_fence_get_timeline_name, - .enable_signaling = amdgpu_job_fence_enable_signaling, - .release = amdgpu_job_fence_release, -}; - /* * Fence debugfs */ @@ -1045,7 +927,6 @@ static int gpu_recover_get(void *data, u64 *val) *val = atomic_read(&adev->reset_domain->reset_res); - pm_runtime_mark_last_busy(dev->dev); pm_runtime_put_autosuspend(dev->dev); return 0; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c index b2033f8352f5..d2237ce9da70 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c @@ -302,7 +302,6 @@ void amdgpu_gart_unbind(struct amdgpu_device *adev, uint64_t offset, int pages) { unsigned t; - unsigned p; int i, j; u64 page_base; /* Starting from VEGA10, system bit must be 0 to mean invalid. */ @@ -316,8 +315,7 @@ void amdgpu_gart_unbind(struct amdgpu_device *adev, uint64_t offset, return; t = offset / AMDGPU_GPU_PAGE_SIZE; - p = t / AMDGPU_GPU_PAGES_IN_CPU_PAGE; - for (i = 0; i < pages; i++, p++) { + for (i = 0; i < pages; i++) { page_base = adev->dummy_page_addr; if (!adev->gart.ptr) continue; @@ -369,6 +367,42 @@ void amdgpu_gart_map(struct amdgpu_device *adev, uint64_t offset, drm_dev_exit(idx); } +/** + * amdgpu_gart_map_vram_range - map VRAM pages into the GART page table + * + * @adev: amdgpu_device pointer + * @pa: physical address of the first page to be mapped + * @start_page: first page to map in the GART aperture + * @num_pages: number of pages to be mapped + * @flags: page table entry flags + * @dst: CPU address of the GART table + * + * Binds a BO that is allocated in VRAM to the GART page table + * (all ASICs). + * + * Useful when a kernel BO is located in VRAM but + * needs to be accessed from the GART address space. + */ +void amdgpu_gart_map_vram_range(struct amdgpu_device *adev, uint64_t pa, + uint64_t start_page, uint64_t num_pages, + uint64_t flags, void *dst) +{ + u32 i, idx; + + /* The SYSTEM flag indicates the pages aren't in VRAM. */ + WARN_ON_ONCE(flags & AMDGPU_PTE_SYSTEM); + + if (!drm_dev_enter(adev_to_drm(adev), &idx)) + return; + + for (i = 0; i < num_pages; ++i) { + amdgpu_gmc_set_pte_pde(adev, adev->gart.ptr, + start_page + i, pa + AMDGPU_GPU_PAGE_SIZE * i, flags); + } + + drm_dev_exit(idx); +} + /** * amdgpu_gart_bind - bind pages into the gart page table * diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.h index 7cc980bf4725..d3118275ddae 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.h @@ -64,5 +64,8 @@ void amdgpu_gart_map(struct amdgpu_device *adev, uint64_t offset, void *dst); void amdgpu_gart_bind(struct amdgpu_device *adev, uint64_t offset, int pages, dma_addr_t *dma_addr, uint64_t flags); +void amdgpu_gart_map_vram_range(struct amdgpu_device *adev, uint64_t pa, + uint64_t start_page, uint64_t num_pages, + uint64_t flags, void *dst); void amdgpu_gart_invalidate_tlb(struct amdgpu_device *adev); #endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c index b7ebae289bea..3e38c5db2987 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c @@ -198,7 +198,7 @@ static void amdgpu_gem_object_free(struct drm_gem_object *gobj) struct amdgpu_bo *aobj = gem_to_amdgpu_bo(gobj); amdgpu_hmm_unregister(aobj); - ttm_bo_put(&aobj->tbo); + ttm_bo_fini(&aobj->tbo); } int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size, @@ -531,7 +531,7 @@ int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data, struct drm_amdgpu_gem_userptr *args = data; struct amdgpu_fpriv *fpriv = filp->driver_priv; struct drm_gem_object *gobj; - struct hmm_range *range; + struct amdgpu_hmm_range *range; struct amdgpu_bo *bo; uint32_t handle; int r; @@ -572,10 +572,14 @@ int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data, goto release_object; if (args->flags & AMDGPU_GEM_USERPTR_VALIDATE) { - r = amdgpu_ttm_tt_get_user_pages(bo, &range); - if (r) + range = amdgpu_hmm_range_alloc(NULL); + if (unlikely(!range)) + return -ENOMEM; + r = amdgpu_ttm_tt_get_user_pages(bo, range); + if (r) { + amdgpu_hmm_range_free(range); goto release_object; - + } r = amdgpu_bo_reserve(bo, true); if (r) goto user_pages_done; @@ -597,8 +601,7 @@ int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data, user_pages_done: if (args->flags & AMDGPU_GEM_USERPTR_VALIDATE) - amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm, range); - + amdgpu_hmm_range_free(range); release_object: drm_gem_object_put(gobj); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c index ebe2b4c68b0f..8b118c53f351 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c @@ -33,6 +33,7 @@ #include "amdgpu_reset.h" #include "amdgpu_xcp.h" #include "amdgpu_xgmi.h" +#include "amdgpu_mes.h" #include "nvd.h" /* delay 0.1 second to enable gfx off feature */ @@ -1194,6 +1195,75 @@ void amdgpu_kiq_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v, uint3 dev_err(adev->dev, "failed to write reg:%x\n", reg); } +int amdgpu_kiq_hdp_flush(struct amdgpu_device *adev) +{ + signed long r, cnt = 0; + unsigned long flags; + uint32_t seq; + struct amdgpu_kiq *kiq = &adev->gfx.kiq[0]; + struct amdgpu_ring *ring = &kiq->ring; + + if (amdgpu_device_skip_hw_access(adev)) + return 0; + + if (adev->enable_mes_kiq && adev->mes.ring[0].sched.ready) + return amdgpu_mes_hdp_flush(adev); + + if (!ring->funcs->emit_hdp_flush) { + return -EOPNOTSUPP; + } + + spin_lock_irqsave(&kiq->ring_lock, flags); + r = amdgpu_ring_alloc(ring, 32); + if (r) + goto failed_unlock; + + amdgpu_ring_emit_hdp_flush(ring); + r = amdgpu_fence_emit_polling(ring, &seq, MAX_KIQ_REG_WAIT); + if (r) + goto failed_undo; + + amdgpu_ring_commit(ring); + spin_unlock_irqrestore(&kiq->ring_lock, flags); + + r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT); + + /* don't wait anymore for gpu reset case because this way may + * block gpu_recover() routine forever, e.g. this virt_kiq_rreg + * is triggered in TTM and ttm_bo_lock_delayed_workqueue() will + * never return if we keep waiting in virt_kiq_rreg, which cause + * gpu_recover() hang there. + * + * also don't wait anymore for IRQ context + * */ + if (r < 1 && (amdgpu_in_reset(adev) || in_interrupt())) + goto failed_kiq_hdp_flush; + + might_sleep(); + while (r < 1 && cnt++ < MAX_KIQ_REG_TRY) { + if (amdgpu_in_reset(adev)) + goto failed_kiq_hdp_flush; + + msleep(MAX_KIQ_REG_BAILOUT_INTERVAL); + r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT); + } + + if (cnt > MAX_KIQ_REG_TRY) { + dev_err(adev->dev, "failed to flush HDP via KIQ timeout\n"); + return -ETIMEDOUT; + } + + return 0; + +failed_undo: + amdgpu_ring_undo(ring); +failed_unlock: + spin_unlock_irqrestore(&kiq->ring_lock, flags); +failed_kiq_hdp_flush: + dev_err(adev->dev, "failed to flush HDP via KIQ\n"); + return r < 0 ? r : -EIO; +} + int amdgpu_gfx_get_num_kcq(struct amdgpu_device *adev) { if (amdgpu_num_kcq == -1) { @@ -1600,7 +1670,6 @@ static ssize_t amdgpu_gfx_set_run_cleaner_shader(struct device *dev, ret = amdgpu_gfx_run_cleaner_shader(adev, value); - pm_runtime_mark_last_busy(ddev->dev); pm_runtime_put_autosuspend(ddev->dev); if (ret) @@ -2485,3 +2554,4 @@ void amdgpu_debugfs_compute_sched_mask_init(struct amdgpu_device *adev) &amdgpu_debugfs_compute_sched_mask_fops); #endif } + diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h index fb5f7a0ee029..efd61a1ccc66 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h @@ -615,6 +615,7 @@ int amdgpu_gfx_cp_ecc_error_irq(struct amdgpu_device *adev, struct amdgpu_iv_entry *entry); uint32_t amdgpu_kiq_rreg(struct amdgpu_device *adev, uint32_t reg, uint32_t xcc_id); void amdgpu_kiq_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v, uint32_t xcc_id); +int amdgpu_kiq_hdp_flush(struct amdgpu_device *adev); int amdgpu_gfx_get_num_kcq(struct amdgpu_device *adev); void amdgpu_gfx_cp_init_microcode(struct amdgpu_device *adev, uint32_t ucode_id); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h index 55097ca10738..727342689d4b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h @@ -86,6 +86,11 @@ enum amdgpu_memory_partition { #define AMDGPU_MAX_MEM_RANGES 8 +#define AMDGPU_GMC9_FAULT_SOURCE_DATA_RETRY 0x80 +#define AMDGPU_GMC9_FAULT_SOURCE_DATA_READ 0x40 +#define AMDGPU_GMC9_FAULT_SOURCE_DATA_WRITE 0x20 +#define AMDGPU_GMC9_FAULT_SOURCE_DATA_EXE 0x10 + /* * GMC page fault information */ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c index 0760e70402ec..895c1e4c6747 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c @@ -284,6 +284,7 @@ int amdgpu_gtt_mgr_init(struct amdgpu_device *adev, uint64_t gtt_size) ttm_resource_manager_init(man, &adev->mman.bdev, gtt_size); start = AMDGPU_GTT_MAX_TRANSFER_SIZE * AMDGPU_GTT_NUM_TRANSFER_WINDOWS; + start += amdgpu_vce_required_gart_pages(adev); size = (adev->gmc.gart_size >> PAGE_SHIFT) - start; drm_mm_init(&mgr->mm, start, size); spin_lock_init(&mgr->lock); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_hdp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_hdp.c index 6e02fb9ac2f6..5a60d69a3e1f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_hdp.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_hdp.c @@ -66,3 +66,19 @@ void amdgpu_hdp_generic_flush(struct amdgpu_device *adev, 0); } } + +void amdgpu_hdp_invalidate(struct amdgpu_device *adev, struct amdgpu_ring *ring) +{ + if (adev->asic_funcs && adev->asic_funcs->invalidate_hdp) + adev->asic_funcs->invalidate_hdp(adev, ring); + else if (adev->hdp.funcs && adev->hdp.funcs->invalidate_hdp) + adev->hdp.funcs->invalidate_hdp(adev, ring); +} + +void amdgpu_hdp_flush(struct amdgpu_device *adev, struct amdgpu_ring *ring) +{ + if (adev->asic_funcs && adev->asic_funcs->flush_hdp) + adev->asic_funcs->flush_hdp(adev, ring); + else if (adev->hdp.funcs && adev->hdp.funcs->flush_hdp) + adev->hdp.funcs->flush_hdp(adev, ring); +} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_hdp.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_hdp.h index 4cfd932b7e91..d9f488fa76b9 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_hdp.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_hdp.h @@ -46,4 +46,8 @@ struct amdgpu_hdp { int amdgpu_hdp_ras_sw_init(struct amdgpu_device *adev); void amdgpu_hdp_generic_flush(struct amdgpu_device *adev, struct amdgpu_ring *ring); +void amdgpu_hdp_invalidate(struct amdgpu_device *adev, + struct amdgpu_ring *ring); +void amdgpu_hdp_flush(struct amdgpu_device *adev, + struct amdgpu_ring *ring); #endif /* __AMDGPU_HDP_H__ */ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_hmm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_hmm.c index 2c6a6b858112..90d26d820bac 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_hmm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_hmm.c @@ -168,17 +168,13 @@ void amdgpu_hmm_unregister(struct amdgpu_bo *bo) int amdgpu_hmm_range_get_pages(struct mmu_interval_notifier *notifier, uint64_t start, uint64_t npages, bool readonly, void *owner, - struct hmm_range **phmm_range) + struct amdgpu_hmm_range *range) { - struct hmm_range *hmm_range; unsigned long end; unsigned long timeout; unsigned long *pfns; int r = 0; - - hmm_range = kzalloc(sizeof(*hmm_range), GFP_KERNEL); - if (unlikely(!hmm_range)) - return -ENOMEM; + struct hmm_range *hmm_range = &range->hmm_range; pfns = kvmalloc_array(npages, sizeof(*pfns), GFP_KERNEL); if (unlikely(!pfns)) { @@ -221,28 +217,77 @@ int amdgpu_hmm_range_get_pages(struct mmu_interval_notifier *notifier, hmm_range->start = start; hmm_range->hmm_pfns = pfns; - *phmm_range = hmm_range; - return 0; out_free_pfns: kvfree(pfns); + hmm_range->hmm_pfns = NULL; out_free_range: - kfree(hmm_range); - if (r == -EBUSY) r = -EAGAIN; return r; } -bool amdgpu_hmm_range_get_pages_done(struct hmm_range *hmm_range) +/** + * amdgpu_hmm_range_valid - check if an HMM range is still valid + * @range: pointer to the &struct amdgpu_hmm_range to validate + * + * Determines whether the given HMM range @range is still valid by + * checking for invalidations via the MMU notifier sequence. This is + * typically used to verify that the range has not been invalidated + * by concurrent address space updates before it is accessed. + * + * Return: + * * true if @range is valid and can be used safely + * * false if @range is NULL or has been invalidated + */ +bool amdgpu_hmm_range_valid(struct amdgpu_hmm_range *range) { - bool r; + if (!range) + return false; - r = mmu_interval_read_retry(hmm_range->notifier, - hmm_range->notifier_seq); - kvfree(hmm_range->hmm_pfns); - kfree(hmm_range); - - return r; + return !mmu_interval_read_retry(range->hmm_range.notifier, + range->hmm_range.notifier_seq); +} + +/** + * amdgpu_hmm_range_alloc - allocate and initialize an AMDGPU HMM range + * @bo: optional buffer object to associate with this HMM range + * + * Allocates memory for amdgpu_hmm_range and associates it with the @bo passed. + * The reference count of the @bo is incremented. + * + * Return: + * Pointer to a newly allocated struct amdgpu_hmm_range on success, + * or NULL if memory allocation fails. + */ +struct amdgpu_hmm_range *amdgpu_hmm_range_alloc(struct amdgpu_bo *bo) +{ + struct amdgpu_hmm_range *range; + + range = kzalloc(sizeof(*range), GFP_KERNEL); + if (!range) + return NULL; + + range->bo = amdgpu_bo_ref(bo); + return range; +} + +/** + * amdgpu_hmm_range_free - release an AMDGPU HMM range + * @range: pointer to the range object to free + * + * Releases all resources held by @range, including the associated + * hmm_pfns and the dropping reference of associated bo if any. + * + * Return: void + */ +void amdgpu_hmm_range_free(struct amdgpu_hmm_range *range) +{ + if (!range) + return; + + kvfree(range->hmm_range.hmm_pfns); + amdgpu_bo_unref(&range->bo); + kfree(range); } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_hmm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_hmm.h index 953e1d06de20..140bc9cd57b4 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_hmm.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_hmm.h @@ -31,13 +31,20 @@ #include #include +struct amdgpu_hmm_range { + struct hmm_range hmm_range; + struct amdgpu_bo *bo; +}; + int amdgpu_hmm_range_get_pages(struct mmu_interval_notifier *notifier, uint64_t start, uint64_t npages, bool readonly, void *owner, - struct hmm_range **phmm_range); -bool amdgpu_hmm_range_get_pages_done(struct hmm_range *hmm_range); + struct amdgpu_hmm_range *range); #if defined(CONFIG_HMM_MIRROR) +bool amdgpu_hmm_range_valid(struct amdgpu_hmm_range *range); +struct amdgpu_hmm_range *amdgpu_hmm_range_alloc(struct amdgpu_bo *bo); +void amdgpu_hmm_range_free(struct amdgpu_hmm_range *range); int amdgpu_hmm_register(struct amdgpu_bo *bo, unsigned long addr); void amdgpu_hmm_unregister(struct amdgpu_bo *bo); #else @@ -47,7 +54,20 @@ static inline int amdgpu_hmm_register(struct amdgpu_bo *bo, unsigned long addr) "add CONFIG_ZONE_DEVICE=y in config file to fix this\n"); return -ENODEV; } + static inline void amdgpu_hmm_unregister(struct amdgpu_bo *bo) {} + +static inline bool amdgpu_hmm_range_valid(struct amdgpu_hmm_range *range) +{ + return false; +} + +static inline struct amdgpu_hmm_range *amdgpu_hmm_range_alloc(struct amdgpu_bo *bo) +{ + return NULL; +} + +static inline void amdgpu_hmm_range_free(struct amdgpu_hmm_range *range) {} #endif #endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c index 7d9bcb72e8dd..586a58facca1 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c @@ -149,17 +149,19 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned int num_ibs, if (job) { vm = job->vm; fence_ctx = job->base.s_fence ? - job->base.s_fence->scheduled.context : 0; + job->base.s_fence->finished.context : 0; shadow_va = job->shadow_va; csa_va = job->csa_va; gds_va = job->gds_va; init_shadow = job->init_shadow; - af = &job->hw_fence; + af = job->hw_fence; /* Save the context of the job for reset handling. * The driver needs this so it can skip the ring * contents for guilty contexts. */ - af->context = job->base.s_fence ? job->base.s_fence->finished.context : 0; + af->context = fence_ctx; + /* the vm fence is also part of the job's context */ + job->hw_vm_fence->context = fence_ctx; } else { vm = NULL; fence_ctx = 0; @@ -167,23 +169,28 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned int num_ibs, csa_va = 0; gds_va = 0; init_shadow = false; - af = NULL; + af = kzalloc(sizeof(*af), GFP_ATOMIC); + if (!af) + return -ENOMEM; } if (!ring->sched.ready) { dev_err(adev->dev, "couldn't schedule ib on ring <%s>\n", ring->name); - return -EINVAL; + r = -EINVAL; + goto free_fence; } if (vm && !job->vmid) { dev_err(adev->dev, "VM IB without ID\n"); - return -EINVAL; + r = -EINVAL; + goto free_fence; } if ((ib->flags & AMDGPU_IB_FLAGS_SECURE) && (!ring->funcs->secure_submission_supported)) { dev_err(adev->dev, "secure submissions not supported on ring <%s>\n", ring->name); - return -EINVAL; + r = -EINVAL; + goto free_fence; } alloc_size = ring->funcs->emit_frame_size + num_ibs * @@ -192,7 +199,7 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned int num_ibs, r = amdgpu_ring_alloc(ring, alloc_size); if (r) { dev_err(adev->dev, "scheduling IB failed (%d).\n", r); - return r; + goto free_fence; } need_ctx_switch = ring->current_ctx != fence_ctx; @@ -289,7 +296,7 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned int num_ibs, amdgpu_ring_init_cond_exec(ring, ring->cond_exe_gpu_addr); } - r = amdgpu_fence_emit(ring, f, af, fence_flags); + r = amdgpu_fence_emit(ring, af, fence_flags); if (r) { dev_err(adev->dev, "failed to emit fence (%d)\n", r); if (job && job->vmid) @@ -297,6 +304,10 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned int num_ibs, amdgpu_ring_undo(ring); return r; } + *f = &af->base; + /* get a ref for the job */ + if (job) + dma_fence_get(*f); if (ring->funcs->insert_end) ring->funcs->insert_end(ring); @@ -317,12 +328,17 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned int num_ibs, * fence so we know what rings contents to backup * after we reset the queue. */ - amdgpu_fence_save_wptr(*f); + amdgpu_fence_save_wptr(af); amdgpu_ring_ib_end(ring); amdgpu_ring_commit(ring); return 0; + +free_fence: + if (!job) + kfree(af); + return r; } /** diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c index 3ef5bc95642c..9cab36322c16 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c @@ -201,58 +201,34 @@ static int amdgpu_vmid_grab_idle(struct amdgpu_ring *ring, struct amdgpu_device *adev = ring->adev; unsigned vmhub = ring->vm_hub; struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub]; - struct dma_fence **fences; - unsigned i; + /* If anybody is waiting for a VMID let everybody wait for fairness */ if (!dma_fence_is_signaled(ring->vmid_wait)) { *fence = dma_fence_get(ring->vmid_wait); return 0; } - fences = kmalloc_array(id_mgr->num_ids, sizeof(void *), GFP_NOWAIT); - if (!fences) - return -ENOMEM; - /* Check if we have an idle VMID */ - i = 0; - list_for_each_entry((*idle), &id_mgr->ids_lru, list) { + list_for_each_entry_reverse((*idle), &id_mgr->ids_lru, list) { /* Don't use per engine and per process VMID at the same time */ struct amdgpu_ring *r = adev->vm_manager.concurrent_flush ? NULL : ring; - fences[i] = amdgpu_sync_peek_fence(&(*idle)->active, r); - if (!fences[i]) - break; - ++i; + *fence = amdgpu_sync_peek_fence(&(*idle)->active, r); + if (!(*fence)) + return 0; } - /* If we can't find a idle VMID to use, wait till one becomes available */ - if (&(*idle)->list == &id_mgr->ids_lru) { - u64 fence_context = adev->vm_manager.fence_context + ring->idx; - unsigned seqno = ++adev->vm_manager.seqno[ring->idx]; - struct dma_fence_array *array; - unsigned j; - - *idle = NULL; - for (j = 0; j < i; ++j) - dma_fence_get(fences[j]); - - array = dma_fence_array_create(i, fences, fence_context, - seqno, true); - if (!array) { - for (j = 0; j < i; ++j) - dma_fence_put(fences[j]); - kfree(fences); - return -ENOMEM; - } - - *fence = dma_fence_get(&array->base); - dma_fence_put(ring->vmid_wait); - ring->vmid_wait = &array->base; - return 0; - } - kfree(fences); + /* + * If we can't find a idle VMID to use, wait on a fence from the least + * recently used in the hope that it will be available soon. + */ + *idle = NULL; + dma_fence_put(ring->vmid_wait); + ring->vmid_wait = dma_fence_get(*fence); + /* This is the reference we return */ + dma_fence_get(*fence); return 0; } @@ -313,7 +289,7 @@ static int amdgpu_vmid_grab_reserved(struct amdgpu_vm *vm, * user of the VMID. */ r = amdgpu_sync_fence(&(*id)->active, &job->base.s_fence->finished, - GFP_NOWAIT); + GFP_ATOMIC); if (r) return r; @@ -373,7 +349,7 @@ static int amdgpu_vmid_grab_used(struct amdgpu_vm *vm, */ r = amdgpu_sync_fence(&(*id)->active, &job->base.s_fence->finished, - GFP_NOWAIT); + GFP_ATOMIC); if (r) return r; @@ -426,7 +402,7 @@ int amdgpu_vmid_grab(struct amdgpu_vm *vm, struct amdgpu_ring *ring, /* Remember this submission as user of the VMID */ r = amdgpu_sync_fence(&id->active, &job->base.s_fence->finished, - GFP_NOWAIT); + GFP_ATOMIC); if (r) goto error; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c index d020a890a0ea..0a0dcbf0798d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c @@ -130,14 +130,12 @@ static enum drm_gpu_sched_stat amdgpu_job_timedout(struct drm_sched_job *s_job) } /* attempt a per ring reset */ - if (unlikely(adev->debug_disable_gpu_ring_reset)) { - dev_err(adev->dev, "Ring reset disabled by debug mask\n"); - } else if (amdgpu_gpu_recovery && - amdgpu_ring_is_reset_type_supported(ring, AMDGPU_RESET_TYPE_PER_QUEUE) && - ring->funcs->reset) { + if (amdgpu_gpu_recovery && + amdgpu_ring_is_reset_type_supported(ring, AMDGPU_RESET_TYPE_PER_QUEUE) && + ring->funcs->reset) { dev_err(adev->dev, "Starting %s ring reset\n", s_job->sched->name); - r = amdgpu_ring_reset(ring, job->vmid, &job->hw_fence); + r = amdgpu_ring_reset(ring, job->vmid, job->hw_fence); if (!r) { atomic_inc(&ring->adev->gpu_reset_counter); dev_err(adev->dev, "Ring %s reset succeeded\n", @@ -186,6 +184,9 @@ int amdgpu_job_alloc(struct amdgpu_device *adev, struct amdgpu_vm *vm, unsigned int num_ibs, struct amdgpu_job **job, u64 drm_client_id) { + struct amdgpu_fence *af; + int r; + if (num_ibs == 0) return -EINVAL; @@ -193,6 +194,20 @@ int amdgpu_job_alloc(struct amdgpu_device *adev, struct amdgpu_vm *vm, if (!*job) return -ENOMEM; + af = kzalloc(sizeof(struct amdgpu_fence), GFP_KERNEL); + if (!af) { + r = -ENOMEM; + goto err_job; + } + (*job)->hw_fence = af; + + af = kzalloc(sizeof(struct amdgpu_fence), GFP_KERNEL); + if (!af) { + r = -ENOMEM; + goto err_fence; + } + (*job)->hw_vm_fence = af; + (*job)->vm = vm; amdgpu_sync_create(&(*job)->explicit_sync); @@ -204,6 +219,14 @@ int amdgpu_job_alloc(struct amdgpu_device *adev, struct amdgpu_vm *vm, return drm_sched_job_init(&(*job)->base, entity, 1, owner, drm_client_id); + +err_fence: + kfree((*job)->hw_fence); +err_job: + kfree(*job); + *job = NULL; + + return r; } int amdgpu_job_alloc_with_ib(struct amdgpu_device *adev, @@ -223,7 +246,10 @@ int amdgpu_job_alloc_with_ib(struct amdgpu_device *adev, if (r) { if (entity) drm_sched_job_cleanup(&(*job)->base); + kfree((*job)->hw_vm_fence); + kfree((*job)->hw_fence); kfree(*job); + *job = NULL; } return r; @@ -251,11 +277,11 @@ void amdgpu_job_free_resources(struct amdgpu_job *job) struct dma_fence *f; unsigned i; - /* Check if any fences where initialized */ + /* Check if any fences were initialized */ if (job->base.s_fence && job->base.s_fence->finished.ops) f = &job->base.s_fence->finished; - else if (job->hw_fence.base.ops) - f = &job->hw_fence.base; + else if (job->hw_fence && job->hw_fence->base.ops) + f = &job->hw_fence->base; else f = NULL; @@ -271,11 +297,16 @@ static void amdgpu_job_free_cb(struct drm_sched_job *s_job) amdgpu_sync_free(&job->explicit_sync); - /* only put the hw fence if has embedded fence */ - if (!job->hw_fence.base.ops) - kfree(job); + if (job->hw_fence->base.ops) + dma_fence_put(&job->hw_fence->base); else - dma_fence_put(&job->hw_fence.base); + kfree(job->hw_fence); + if (job->hw_vm_fence->base.ops) + dma_fence_put(&job->hw_vm_fence->base); + else + kfree(job->hw_vm_fence); + + kfree(job); } void amdgpu_job_set_gang_leader(struct amdgpu_job *job, @@ -304,10 +335,16 @@ void amdgpu_job_free(struct amdgpu_job *job) if (job->gang_submit != &job->base.s_fence->scheduled) dma_fence_put(job->gang_submit); - if (!job->hw_fence.base.ops) - kfree(job); + if (job->hw_fence->base.ops) + dma_fence_put(&job->hw_fence->base); else - dma_fence_put(&job->hw_fence.base); + kfree(job->hw_fence); + if (job->hw_vm_fence->base.ops) + dma_fence_put(&job->hw_vm_fence->base); + else + kfree(job->hw_vm_fence); + + kfree(job); } struct dma_fence *amdgpu_job_submit(struct amdgpu_job *job) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h index 4a6487eb6cb5..7abf069d17d4 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h @@ -64,7 +64,8 @@ struct amdgpu_job { struct drm_sched_job base; struct amdgpu_vm *vm; struct amdgpu_sync explicit_sync; - struct amdgpu_fence hw_fence; + struct amdgpu_fence *hw_fence; + struct amdgpu_fence *hw_vm_fence; struct dma_fence *gang_submit; uint32_t preamble_status; uint32_t preemption_status; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c index b3e6b3fcdf2c..6ee77f431d56 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c @@ -1471,7 +1471,6 @@ int amdgpu_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv) kfree(fpriv); out_suspend: - pm_runtime_mark_last_busy(dev->dev); pm_put: pm_runtime_put_autosuspend(dev->dev); @@ -1539,7 +1538,6 @@ void amdgpu_driver_postclose_kms(struct drm_device *dev, kfree(fpriv); file_priv->driver_priv = NULL; - pm_runtime_mark_last_busy(dev->dev); pm_runtime_put_autosuspend(dev->dev); } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c index 4883adcfbb4b..9c182ce501af 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c @@ -105,8 +105,8 @@ int amdgpu_mes_init(struct amdgpu_device *adev) spin_lock_init(&adev->mes.ring_lock[i]); adev->mes.total_max_queue = AMDGPU_FENCE_MES_QUEUE_ID_MASK; - adev->mes.vmid_mask_mmhub = 0xffffff00; - adev->mes.vmid_mask_gfxhub = adev->gfx.disable_kq ? 0xfffffffe : 0xffffff00; + adev->mes.vmid_mask_mmhub = 0xFF00; + adev->mes.vmid_mask_gfxhub = adev->gfx.disable_kq ? 0xFFFE : 0xFF00; num_pipes = adev->gfx.me.num_pipe_per_me * adev->gfx.me.num_me; if (num_pipes > AMDGPU_MES_MAX_GFX_PIPES) @@ -528,6 +528,18 @@ int amdgpu_mes_reg_write_reg_wait(struct amdgpu_device *adev, return r; } +int amdgpu_mes_hdp_flush(struct amdgpu_device *adev) +{ + uint32_t hdp_flush_req_offset, hdp_flush_done_offset, ref_and_mask; + + hdp_flush_req_offset = adev->nbio.funcs->get_hdp_flush_req_offset(adev); + hdp_flush_done_offset = adev->nbio.funcs->get_hdp_flush_done_offset(adev); + ref_and_mask = adev->nbio.hdp_flush_reg->ref_and_mask_cp0; + + return amdgpu_mes_reg_write_reg_wait(adev, hdp_flush_req_offset, hdp_flush_done_offset, + ref_and_mask, ref_and_mask); +} + int amdgpu_mes_set_shader_debugger(struct amdgpu_device *adev, uint64_t process_context_addr, uint32_t spi_gdbg_per_vmid_cntl, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h index 97c137c90f97..e989225b354b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h @@ -239,6 +239,7 @@ struct mes_add_queue_input { struct mes_remove_queue_input { uint32_t doorbell_offset; uint64_t gang_context_addr; + bool remove_queue_after_reset; }; struct mes_map_legacy_queue_input { @@ -428,6 +429,7 @@ int amdgpu_mes_wreg(struct amdgpu_device *adev, int amdgpu_mes_reg_write_reg_wait(struct amdgpu_device *adev, uint32_t reg0, uint32_t reg1, uint32_t ref, uint32_t mask); +int amdgpu_mes_hdp_flush(struct amdgpu_device *adev); int amdgpu_mes_set_shader_debugger(struct amdgpu_device *adev, uint64_t process_context_addr, uint32_t spi_gdbg_per_vmid_cntl, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h index 20460cfd09bc..dc8d2f52c7d6 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h @@ -326,6 +326,8 @@ struct amdgpu_mode_info { struct drm_property *audio_property; /* FMT dithering */ struct drm_property *dither_property; + /* Adaptive Backlight Modulation (power feature) */ + struct drm_property *abm_level_property; /* hardcoded DFP edid from BIOS */ const struct drm_edid *bios_hardcoded_edid; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h index 656b8a931dae..52c2d1731aab 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h @@ -96,6 +96,7 @@ struct amdgpu_bo_va { * if non-zero, cannot unmap from GPU because user queues may still access it */ unsigned int queue_refcount; + atomic_t userq_va_mapped; }; struct amdgpu_bo { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c index aa7987d0806c..0b10497d487c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c @@ -1539,6 +1539,7 @@ static void psp_xgmi_reflect_topology_info(struct psp_context *psp, uint64_t src_node_id = psp->adev->gmc.xgmi.node_id; uint64_t dst_node_id = node_info.node_id; uint8_t dst_num_hops = node_info.num_hops; + uint8_t dst_is_sharing_enabled = node_info.is_sharing_enabled; uint8_t dst_num_links = node_info.num_links; hive = amdgpu_get_xgmi_hive(psp->adev); @@ -1558,13 +1559,20 @@ static void psp_xgmi_reflect_topology_info(struct psp_context *psp, continue; mirror_top_info->nodes[j].num_hops = dst_num_hops; - /* - * prevent 0 num_links value re-reflection since reflection + mirror_top_info->nodes[j].is_sharing_enabled = dst_is_sharing_enabled; + /* prevent 0 num_links value re-reflection since reflection * criteria is based on num_hops (direct or indirect). - * */ - if (dst_num_links) + if (dst_num_links) { mirror_top_info->nodes[j].num_links = dst_num_links; + /* swap src and dst due to frame of reference */ + for (int k = 0; k < dst_num_links; k++) { + mirror_top_info->nodes[j].port_num[k].src_xgmi_port_num = + node_info.port_num[k].dst_xgmi_port_num; + mirror_top_info->nodes[j].port_num[k].dst_xgmi_port_num = + node_info.port_num[k].src_xgmi_port_num; + } + } break; } @@ -1639,9 +1647,10 @@ int psp_xgmi_get_topology_info(struct psp_context *psp, amdgpu_ip_version(psp->adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 6) || amdgpu_ip_version(psp->adev, MP0_HWIP, 0) == - IP_VERSION(13, 0, 14); - bool ta_port_num_support = amdgpu_sriov_vf(psp->adev) ? 0 : - psp->xgmi_context.xgmi_ta_caps & EXTEND_PEER_LINK_INFO_CMD_FLAG; + IP_VERSION(13, 0, 14) || + amdgpu_sriov_vf(psp->adev); + bool ta_port_num_support = psp->xgmi_context.xgmi_ta_caps & EXTEND_PEER_LINK_INFO_CMD_FLAG || + amdgpu_sriov_xgmi_ta_ext_peer_link_en(psp->adev); /* popluate the shared output buffer rather than the cmd input buffer * with node_ids as the input for GET_PEER_LINKS command execution. diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_rap.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_rap.c index 123bcf5c2bb1..bacf888735db 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_rap.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_rap.c @@ -101,7 +101,6 @@ static ssize_t amdgpu_rap_debugfs_write(struct file *f, const char __user *buf, } amdgpu_gfx_off_ctrl(adev, true); - pm_runtime_mark_last_busy(dev->dev); pm_runtime_put_autosuspend(dev->dev); return size; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c index e0ee21150860..2a6cf7963dde 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c @@ -41,6 +41,7 @@ #include "atom.h" #include "amdgpu_reset.h" #include "amdgpu_psp.h" +#include "amdgpu_ras_mgr.h" #ifdef CONFIG_X86_MCE_AMD #include @@ -149,6 +150,8 @@ static void amdgpu_ras_critical_region_fini(struct amdgpu_device *adev); #ifdef CONFIG_X86_MCE_AMD static void amdgpu_register_bad_pages_mca_notifier(struct amdgpu_device *adev); +static void +amdgpu_unregister_bad_pages_mca_notifier(struct amdgpu_device *adev); struct mce_notifier_adev_list { struct amdgpu_device *devs[MAX_GPU_INSTANCE]; int num_gpu; @@ -611,6 +614,8 @@ static ssize_t amdgpu_ras_debugfs_ctrl_write(struct file *f, return size; } +static int amdgpu_uniras_clear_badpages_info(struct amdgpu_device *adev); + /** * DOC: AMDGPU RAS debugfs EEPROM table reset interface * @@ -635,6 +640,11 @@ static ssize_t amdgpu_ras_debugfs_eeprom_write(struct file *f, (struct amdgpu_device *)file_inode(f)->i_private; int ret; + if (amdgpu_uniras_enabled(adev)) { + ret = amdgpu_uniras_clear_badpages_info(adev); + return ret ? ret : size; + } + ret = amdgpu_ras_eeprom_reset_table( &(amdgpu_ras_get_context(adev)->eeprom_control)); @@ -1542,9 +1552,51 @@ static int amdgpu_ras_query_error_status_with_event(struct amdgpu_device *adev, return ret; } +static int amdgpu_uniras_clear_badpages_info(struct amdgpu_device *adev) +{ + struct ras_cmd_dev_handle req = {0}; + int ret; + + ret = amdgpu_ras_mgr_handle_ras_cmd(adev, RAS_CMD__CLEAR_BAD_PAGE_INFO, + &req, sizeof(req), NULL, 0); + if (ret) { + dev_err(adev->dev, "Failed to clear bad pages info, ret: %d\n", ret); + return ret; + } + + return 0; +} + +static int amdgpu_uniras_query_block_ecc(struct amdgpu_device *adev, + struct ras_query_if *info) +{ + struct ras_cmd_block_ecc_info_req req = {0}; + struct ras_cmd_block_ecc_info_rsp rsp = {0}; + int ret; + + if (!info) + return -EINVAL; + + req.block_id = info->head.block; + req.subblock_id = info->head.sub_block_index; + + ret = amdgpu_ras_mgr_handle_ras_cmd(adev, RAS_CMD__GET_BLOCK_ECC_STATUS, + &req, sizeof(req), &rsp, sizeof(rsp)); + if (!ret) { + info->ce_count = rsp.ce_count; + info->ue_count = rsp.ue_count; + info->de_count = rsp.de_count; + } + + return ret; +} + int amdgpu_ras_query_error_status(struct amdgpu_device *adev, struct ras_query_if *info) { - return amdgpu_ras_query_error_status_with_event(adev, info, RAS_EVENT_TYPE_INVALID); + if (amdgpu_uniras_enabled(adev)) + return amdgpu_uniras_query_block_ecc(adev, info); + else + return amdgpu_ras_query_error_status_with_event(adev, info, RAS_EVENT_TYPE_INVALID); } int amdgpu_ras_reset_error_count(struct amdgpu_device *adev, @@ -1596,6 +1648,27 @@ int amdgpu_ras_reset_error_status(struct amdgpu_device *adev, return 0; } +static int amdgpu_uniras_error_inject(struct amdgpu_device *adev, + struct ras_inject_if *info) +{ + struct ras_cmd_inject_error_req inject_req; + struct ras_cmd_inject_error_rsp rsp; + + if (!info) + return -EINVAL; + + memset(&inject_req, 0, sizeof(inject_req)); + inject_req.block_id = info->head.block; + inject_req.subblock_id = info->head.sub_block_index; + inject_req.address = info->address; + inject_req.error_type = info->head.type; + inject_req.instance_mask = info->instance_mask; + inject_req.method = info->value; + + return amdgpu_ras_mgr_handle_ras_cmd(adev, RAS_CMD__INJECT_ERROR, + &inject_req, sizeof(inject_req), &rsp, sizeof(rsp)); +} + /* wrapper of psp_ras_trigger_error */ int amdgpu_ras_error_inject(struct amdgpu_device *adev, struct ras_inject_if *info) @@ -1613,6 +1686,9 @@ int amdgpu_ras_error_inject(struct amdgpu_device *adev, info->head.block, info->head.sub_block_index); + if (amdgpu_uniras_enabled(adev)) + return amdgpu_uniras_error_inject(adev, info); + /* inject on guest isn't allowed, return success directly */ if (amdgpu_sriov_vf(adev)) return 0; @@ -1757,7 +1833,9 @@ int amdgpu_ras_query_error_count(struct amdgpu_device *adev, /* sysfs begin */ static int amdgpu_ras_badpages_read(struct amdgpu_device *adev, - struct ras_badpage **bps, unsigned int *count); + struct ras_badpage *bps, uint32_t count, uint32_t start); +static int amdgpu_uniras_badpages_read(struct amdgpu_device *adev, + struct ras_badpage *bps, uint32_t count, uint32_t start); static char *amdgpu_ras_badpage_flags_str(unsigned int flags) { @@ -1815,19 +1893,50 @@ static ssize_t amdgpu_ras_sysfs_badpages_read(struct file *f, unsigned int end = div64_ul(ppos + count - 1, element_size); ssize_t s = 0; struct ras_badpage *bps = NULL; - unsigned int bps_count = 0; + int bps_count = 0, i, status; + uint64_t address; memset(buf, 0, count); - if (amdgpu_ras_badpages_read(adev, &bps, &bps_count)) + bps_count = end - start; + bps = kmalloc_array(bps_count, sizeof(*bps), GFP_KERNEL); + if (!bps) return 0; - for (; start < end && start < bps_count; start++) + memset(bps, 0, sizeof(*bps) * bps_count); + + if (amdgpu_uniras_enabled(adev)) + bps_count = amdgpu_uniras_badpages_read(adev, bps, bps_count, start); + else + bps_count = amdgpu_ras_badpages_read(adev, bps, bps_count, start); + + if (bps_count <= 0) { + kfree(bps); + return 0; + } + + for (i = 0; i < bps_count; i++) { + address = ((uint64_t)bps[i].bp) << AMDGPU_GPU_PAGE_SHIFT; + if (amdgpu_ras_check_critical_address(adev, address)) + continue; + + bps[i].size = AMDGPU_GPU_PAGE_SIZE; + + status = amdgpu_vram_mgr_query_page_status(&adev->mman.vram_mgr, + address); + if (status == -EBUSY) + bps[i].flags = AMDGPU_RAS_RETIRE_PAGE_PENDING; + else if (status == -ENOENT) + bps[i].flags = AMDGPU_RAS_RETIRE_PAGE_FAULT; + else + bps[i].flags = AMDGPU_RAS_RETIRE_PAGE_RESERVED; + s += scnprintf(&buf[s], element_size + 1, "0x%08x : 0x%08x : %1s\n", - bps[start].bp, - bps[start].size, - amdgpu_ras_badpage_flags_str(bps[start].flags)); + bps[i].bp, + bps[i].size, + amdgpu_ras_badpage_flags_str(bps[i].flags)); + } kfree(bps); @@ -1843,12 +1952,42 @@ static ssize_t amdgpu_ras_sysfs_features_read(struct device *dev, return sysfs_emit(buf, "feature mask: 0x%x\n", con->features); } +static bool amdgpu_ras_get_version_info(struct amdgpu_device *adev, u32 *major, + u32 *minor, u32 *rev) +{ + int i; + + if (!adev || !major || !minor || !rev || !amdgpu_uniras_enabled(adev)) + return false; + + for (i = 0; i < adev->num_ip_blocks; i++) { + if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_RAS) { + *major = adev->ip_blocks[i].version->major; + *minor = adev->ip_blocks[i].version->minor; + *rev = adev->ip_blocks[i].version->rev; + return true; + } + } + + return false; +} + static ssize_t amdgpu_ras_sysfs_version_show(struct device *dev, struct device_attribute *attr, char *buf) { struct amdgpu_ras *con = container_of(attr, struct amdgpu_ras, version_attr); - return sysfs_emit(buf, "table version: 0x%x\n", con->eeprom_control.tbl_hdr.version); + u32 major, minor, rev; + ssize_t size = 0; + + size += sysfs_emit_at(buf, size, "table version: 0x%x\n", + con->eeprom_control.tbl_hdr.version); + + if (amdgpu_ras_get_version_info(con->adev, &major, &minor, &rev)) + size += sysfs_emit_at(buf, size, "ras version: %u.%u.%u\n", + major, minor, rev); + + return size; } static ssize_t amdgpu_ras_sysfs_schema_show(struct device *dev, @@ -2241,6 +2380,11 @@ void amdgpu_ras_interrupt_fatal_error_handler(struct amdgpu_device *adev) amdgpu_ras_is_err_state(adev, AMDGPU_RAS_BLOCK__ANY)) return; + if (amdgpu_uniras_enabled(adev)) { + amdgpu_ras_mgr_handle_fatal_interrupt(adev, NULL); + return; + } + if (adev->nbio.ras && adev->nbio.ras->handle_ras_controller_intr_no_bifring) adev->nbio.ras->handle_ras_controller_intr_no_bifring(adev); @@ -2411,6 +2555,16 @@ int amdgpu_ras_interrupt_dispatch(struct amdgpu_device *adev, struct ras_manager *obj; struct ras_ih_data *data; + if (amdgpu_uniras_enabled(adev)) { + struct ras_ih_info ih_info; + + memset(&ih_info, 0, sizeof(ih_info)); + ih_info.block = info->head.block; + memcpy(&ih_info.iv_entry, info->entry, sizeof(struct amdgpu_iv_entry)); + + return amdgpu_ras_mgr_handle_controller_interrupt(adev, &ih_info); + } + obj = amdgpu_ras_find_obj(adev, &info->head); if (!obj) return -EINVAL; @@ -2605,62 +2759,83 @@ static void amdgpu_ras_query_err_status(struct amdgpu_device *adev) } } -/* recovery begin */ - -/* return 0 on success. - * caller need free bps. - */ static int amdgpu_ras_badpages_read(struct amdgpu_device *adev, - struct ras_badpage **bps, unsigned int *count) + struct ras_badpage *bps, uint32_t count, uint32_t start) { struct amdgpu_ras *con = amdgpu_ras_get_context(adev); struct ras_err_handler_data *data; - int i = 0; - int ret = 0, status; + int r = 0; + uint32_t i; if (!con || !con->eh_data || !bps || !count) return -EINVAL; mutex_lock(&con->recovery_lock); data = con->eh_data; - if (!data || data->count == 0) { - *bps = NULL; - ret = -EINVAL; - goto out; + if (start < data->count) { + for (i = start; i < data->count; i++) { + if (!data->bps[i].ts) + continue; + + bps[r].bp = data->bps[i].retired_page; + r++; + if (r >= count) + break; + } } - - *bps = kmalloc_array(data->count, sizeof(struct ras_badpage), GFP_KERNEL); - if (!*bps) { - ret = -ENOMEM; - goto out; - } - - for (; i < data->count; i++) { - if (!data->bps[i].ts) - continue; - - (*bps)[i] = (struct ras_badpage){ - .bp = data->bps[i].retired_page, - .size = AMDGPU_GPU_PAGE_SIZE, - .flags = AMDGPU_RAS_RETIRE_PAGE_RESERVED, - }; - - if (amdgpu_ras_check_critical_address(adev, - data->bps[i].retired_page << AMDGPU_GPU_PAGE_SHIFT)) - continue; - - status = amdgpu_vram_mgr_query_page_status(&adev->mman.vram_mgr, - data->bps[i].retired_page << AMDGPU_GPU_PAGE_SHIFT); - if (status == -EBUSY) - (*bps)[i].flags = AMDGPU_RAS_RETIRE_PAGE_PENDING; - else if (status == -ENOENT) - (*bps)[i].flags = AMDGPU_RAS_RETIRE_PAGE_FAULT; - } - - *count = con->bad_page_num; -out: mutex_unlock(&con->recovery_lock); - return ret; + + return r; +} + +static int amdgpu_uniras_badpages_read(struct amdgpu_device *adev, + struct ras_badpage *bps, uint32_t count, uint32_t start) +{ + struct ras_cmd_bad_pages_info_req cmd_input; + struct ras_cmd_bad_pages_info_rsp *output; + uint32_t group, start_group, end_group; + uint32_t pos, pos_in_group; + int r = 0, i; + + if (!bps || !count) + return -EINVAL; + + output = kmalloc(sizeof(*output), GFP_KERNEL); + if (!output) + return -ENOMEM; + + memset(&cmd_input, 0, sizeof(cmd_input)); + + start_group = start / RAS_CMD_MAX_BAD_PAGES_PER_GROUP; + end_group = (start + count + RAS_CMD_MAX_BAD_PAGES_PER_GROUP - 1) / + RAS_CMD_MAX_BAD_PAGES_PER_GROUP; + + pos = start; + for (group = start_group; group < end_group; group++) { + memset(output, 0, sizeof(*output)); + cmd_input.group_index = group; + if (amdgpu_ras_mgr_handle_ras_cmd(adev, RAS_CMD__GET_BAD_PAGES, + &cmd_input, sizeof(cmd_input), output, sizeof(*output))) + goto out; + + if (pos >= output->bp_total_cnt) + goto out; + + pos_in_group = pos - group * RAS_CMD_MAX_BAD_PAGES_PER_GROUP; + for (i = pos_in_group; i < output->bp_in_group; i++, pos++) { + if (!output->records[i].ts) + continue; + + bps[r].bp = output->records[i].retired_page; + r++; + if (r >= count) + goto out; + } + } + +out: + kfree(output); + return r; } static void amdgpu_ras_set_fed_all(struct amdgpu_device *adev, @@ -2748,8 +2923,12 @@ static void amdgpu_ras_do_recovery(struct work_struct *work) type = amdgpu_ras_get_fatal_error_event(adev); list_for_each_entry(remote_adev, device_list_handle, gmc.xgmi.head) { - amdgpu_ras_query_err_status(remote_adev); - amdgpu_ras_log_on_err_counter(remote_adev, type); + if (amdgpu_uniras_enabled(remote_adev)) { + amdgpu_ras_mgr_update_ras_ecc(remote_adev); + } else { + amdgpu_ras_query_err_status(remote_adev); + amdgpu_ras_log_on_err_counter(remote_adev, type); + } } } @@ -2837,8 +3016,13 @@ static int amdgpu_ras_mca2pa_by_idx(struct amdgpu_device *adev, addr_in.ma.err_addr = bps->address; addr_in.ma.socket_id = socket; addr_in.ma.ch_inst = bps->mem_channel; - /* tell RAS TA the node instance is not used */ - addr_in.ma.node_inst = TA_RAS_INV_NODE; + if (!amdgpu_ras_smu_eeprom_supported(adev)) { + /* tell RAS TA the node instance is not used */ + addr_in.ma.node_inst = TA_RAS_INV_NODE; + } else { + addr_in.ma.umc_inst = bps->mcumc_id; + addr_in.ma.node_inst = bps->cu; + } if (adev->umc.ras && adev->umc.ras->convert_ras_err_addr) ret = adev->umc.ras->convert_ras_err_addr(adev, err_data, @@ -2981,8 +3165,16 @@ static int __amdgpu_ras_convert_rec_from_rom(struct amdgpu_device *adev, int i = 0; enum amdgpu_memory_partition save_nps; - save_nps = (bps->retired_page >> UMC_NPS_SHIFT) & UMC_NPS_MASK; - bps->retired_page &= ~(UMC_NPS_MASK << UMC_NPS_SHIFT); + if (!amdgpu_ras_smu_eeprom_supported(adev)) { + save_nps = (bps->retired_page >> UMC_NPS_SHIFT) & UMC_NPS_MASK; + bps->retired_page &= ~(UMC_NPS_MASK << UMC_NPS_SHIFT); + } else { + /* if pmfw manages eeprom, save_nps is not stored on eeprom, + * we should always convert mca address into physical address, + * make save_nps different from nps + */ + save_nps = nps + 1; + } if (save_nps == nps) { if (amdgpu_umc_pages_in_a_row(adev, err_data, @@ -3048,7 +3240,8 @@ int amdgpu_ras_add_bad_pages(struct amdgpu_device *adev, if (from_rom) { /* there is no pa recs in V3, so skip pa recs processing */ - if (control->tbl_hdr.version < RAS_TABLE_VER_V3) { + if ((control->tbl_hdr.version < RAS_TABLE_VER_V3) && + !amdgpu_ras_smu_eeprom_supported(adev)) { for (i = 0; i < pages; i++) { if (control->ras_num_recs - i >= adev->umc.retire_unit) { if ((bps[i].address == bps[i + 1].address) && @@ -3118,7 +3311,13 @@ int amdgpu_ras_save_bad_pages(struct amdgpu_device *adev, mutex_lock(&con->recovery_lock); control = &con->eeprom_control; data = con->eh_data; - unit_num = data->count / adev->umc.retire_unit - control->ras_num_recs; + if (amdgpu_ras_smu_eeprom_supported(adev)) + unit_num = control->ras_num_recs - + control->ras_num_recs_old; + else + unit_num = data->count / adev->umc.retire_unit - + control->ras_num_recs; + save_count = con->bad_page_num - control->ras_num_bad_pages; mutex_unlock(&con->recovery_lock); @@ -3126,7 +3325,7 @@ int amdgpu_ras_save_bad_pages(struct amdgpu_device *adev, *new_cnt = unit_num; /* only new entries are saved */ - if (unit_num > 0) { + if (unit_num && save_count) { /*old asics only save pa to eeprom like before*/ if (IP_VERSION_MAJ(amdgpu_ip_version(adev, UMC_HWIP, 0)) < 12) { if (amdgpu_ras_eeprom_append(control, @@ -3179,7 +3378,8 @@ static int amdgpu_ras_load_bad_pages(struct amdgpu_device *adev) /*In V3, there is no pa recs, and some cases(when address==0) may be parsed as pa recs, so add verion check to avoid it. */ - if (control->tbl_hdr.version < RAS_TABLE_VER_V3) { + if ((control->tbl_hdr.version < RAS_TABLE_VER_V3) && + !amdgpu_ras_smu_eeprom_supported(adev)) { for (i = 0; i < control->ras_num_recs; i++) { if ((control->ras_num_recs - i) >= adev->umc.retire_unit) { if ((bps[i].address == bps[i + 1].address) && @@ -3590,7 +3790,12 @@ int amdgpu_ras_init_badpage_info(struct amdgpu_device *adev) if (!con || amdgpu_sriov_vf(adev)) return 0; + if (amdgpu_uniras_enabled(adev)) + return 0; + control = &con->eeprom_control; + con->ras_smu_drv = amdgpu_dpm_get_ras_smu_driver(adev); + ret = amdgpu_ras_eeprom_init(control); control->is_eeprom_valid = !ret; @@ -3751,7 +3956,9 @@ static int amdgpu_ras_recovery_fini(struct amdgpu_device *adev) mutex_unlock(&con->recovery_lock); amdgpu_ras_critical_region_init(adev); - +#ifdef CONFIG_X86_MCE_AMD + amdgpu_unregister_bad_pages_mca_notifier(adev); +#endif return 0; } /* recovery end */ @@ -3975,7 +4182,6 @@ static void amdgpu_ras_counte_dw(struct work_struct *work) atomic_set(&con->ras_ue_count, ue_count); } - pm_runtime_mark_last_busy(dev->dev); Out: pm_runtime_put_autosuspend(dev->dev); } @@ -4584,6 +4790,9 @@ int amdgpu_ras_mark_ras_event_caller(struct amdgpu_device *adev, enum ras_event_ struct ras_event_state *event_state; int ret = 0; + if (amdgpu_uniras_enabled(adev)) + return 0; + if (type >= RAS_EVENT_TYPE_COUNT) { ret = -EINVAL; goto out; @@ -4634,20 +4843,18 @@ u64 amdgpu_ras_acquire_event_id(struct amdgpu_device *adev, enum ras_event_type return id; } -void amdgpu_ras_global_ras_isr(struct amdgpu_device *adev) +int amdgpu_ras_global_ras_isr(struct amdgpu_device *adev) { if (atomic_cmpxchg(&amdgpu_ras_in_intr, 0, 1) == 0) { struct amdgpu_ras *ras = amdgpu_ras_get_context(adev); enum ras_event_type type = RAS_EVENT_TYPE_FATAL; - u64 event_id; + u64 event_id = RAS_EVENT_INVALID_ID; - if (amdgpu_ras_mark_ras_event(adev, type)) { - dev_err(adev->dev, - "uncorrectable hardware error (ERREVENT_ATHUB_INTERRUPT) detected!\n"); - return; - } + if (amdgpu_uniras_enabled(adev)) + return 0; - event_id = amdgpu_ras_acquire_event_id(adev, type); + if (!amdgpu_ras_mark_ras_event(adev, type)) + event_id = amdgpu_ras_acquire_event_id(adev, type); RAS_EVENT_LOG(adev, event_id, "uncorrectable hardware error" "(ERREVENT_ATHUB_INTERRUPT) detected!\n"); @@ -4656,6 +4863,8 @@ void amdgpu_ras_global_ras_isr(struct amdgpu_device *adev) ras->gpu_reset_flags |= AMDGPU_RAS_GPU_RESET_MODE1_RESET; amdgpu_ras_reset_gpu(adev); } + + return -EBUSY; } bool amdgpu_ras_need_emergency_restart(struct amdgpu_device *adev) @@ -4783,6 +4992,28 @@ static void amdgpu_register_bad_pages_mca_notifier(struct amdgpu_device *adev) notifier_registered = true; } } +static void amdgpu_unregister_bad_pages_mca_notifier(struct amdgpu_device *adev) +{ + int i, j; + + if (!notifier_registered && !mce_adev_list.num_gpu) + return; + for (i = 0, j = 0; i < mce_adev_list.num_gpu; i++) { + if (mce_adev_list.devs[i] == adev) + mce_adev_list.devs[i] = NULL; + if (!mce_adev_list.devs[i]) + ++j; + } + + if (j == mce_adev_list.num_gpu) { + mce_adev_list.num_gpu = 0; + /* Unregister x86 notifier with MCE subsystem. */ + if (notifier_registered) { + mce_unregister_decode_chain(&amdgpu_bad_page_nb); + notifier_registered = false; + } + } +} #endif struct amdgpu_ras *amdgpu_ras_get_context(struct amdgpu_device *adev) @@ -5408,6 +5639,9 @@ bool amdgpu_ras_is_rma(struct amdgpu_device *adev) { struct amdgpu_ras *con = amdgpu_ras_get_context(adev); + if (amdgpu_uniras_enabled(adev)) + return amdgpu_ras_mgr_is_rma(adev); + if (!con) return false; @@ -5490,3 +5724,25 @@ bool amdgpu_ras_check_critical_address(struct amdgpu_device *adev, uint64_t addr return ret; } + +void amdgpu_ras_pre_reset(struct amdgpu_device *adev, + struct list_head *device_list) +{ + struct amdgpu_device *tmp_adev = NULL; + + list_for_each_entry(tmp_adev, device_list, reset_list) { + if (amdgpu_uniras_enabled(tmp_adev)) + amdgpu_ras_mgr_pre_reset(tmp_adev); + } +} + +void amdgpu_ras_post_reset(struct amdgpu_device *adev, + struct list_head *device_list) +{ + struct amdgpu_device *tmp_adev = NULL; + + list_for_each_entry(tmp_adev, device_list, reset_list) { + if (amdgpu_uniras_enabled(tmp_adev)) + amdgpu_ras_mgr_post_reset(tmp_adev); + } +} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h index 6cf0dfd38be8..ff44190d7d98 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h @@ -503,7 +503,34 @@ struct ras_critical_region { uint64_t size; }; +struct ras_eeprom_table_version { + uint32_t minor : 16; + uint32_t major : 16; +}; + +struct ras_eeprom_smu_funcs { + int (*get_ras_table_version)(struct amdgpu_device *adev, + uint32_t *table_version); + int (*get_badpage_count)(struct amdgpu_device *adev, uint32_t *count, uint32_t timeout); + int (*get_badpage_mca_addr)(struct amdgpu_device *adev, uint16_t index, uint64_t *mca_addr); + int (*set_timestamp)(struct amdgpu_device *adev, uint64_t timestamp); + int (*get_timestamp)(struct amdgpu_device *adev, + uint16_t index, uint64_t *timestamp); + int (*get_badpage_ipid)(struct amdgpu_device *adev, uint16_t index, uint64_t *ipid); + int (*erase_ras_table)(struct amdgpu_device *adev, uint32_t *result); +}; + +enum ras_smu_feature_flags { + RAS_SMU_FEATURE_BIT__RAS_EEPROM = BIT_ULL(0), +}; + +struct ras_smu_drv { + const struct ras_eeprom_smu_funcs *smu_eeprom_funcs; + void (*ras_smu_feature_flags)(struct amdgpu_device *adev, uint64_t *flags); +}; + struct amdgpu_ras { + void *ras_mgr; /* ras infrastructure */ /* for ras itself. */ uint32_t features; @@ -590,6 +617,10 @@ struct amdgpu_ras { /* Protect poison injection */ struct mutex poison_lock; + + /* Disable/Enable uniras switch */ + bool uniras_enabled; + const struct ras_smu_drv *ras_smu_drv; }; struct ras_fs_data { @@ -909,7 +940,7 @@ static inline void amdgpu_ras_intr_cleared(void) atomic_set(&amdgpu_ras_in_intr, 0); } -void amdgpu_ras_global_ras_isr(struct amdgpu_device *adev); +int amdgpu_ras_global_ras_isr(struct amdgpu_device *adev); void amdgpu_ras_set_error_query_ready(struct amdgpu_device *adev, bool ready); @@ -1008,4 +1039,9 @@ void amdgpu_ras_event_log_print(struct amdgpu_device *adev, u64 event_id, const char *fmt, ...); bool amdgpu_ras_is_rma(struct amdgpu_device *adev); + +void amdgpu_ras_pre_reset(struct amdgpu_device *adev, + struct list_head *device_list); +void amdgpu_ras_post_reset(struct amdgpu_device *adev, + struct list_head *device_list); #endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c index 3eb3fb55ccb0..64dd7a81bff5 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c @@ -32,6 +32,7 @@ #include #include "amdgpu_reset.h" +#include "amdgpu_ras_mgr.h" /* These are memory addresses as would be seen by one or more EEPROM * chips strung on the I2C bus, usually by manipulating pins 1-3 of a @@ -123,6 +124,8 @@ RAS_TABLE_V2_1_INFO_SIZE) \ / RAS_TABLE_RECORD_SIZE) +#define RAS_SMU_MESSAGE_TIMEOUT_MS 1000 /* 1s */ + /* Given a zero-based index of an EEPROM RAS record, yields the EEPROM * offset off of RAS_TABLE_START. That is, this is something you can * add to control->i2c_address, and then tell I2C layer to read @@ -443,40 +446,57 @@ int amdgpu_ras_eeprom_reset_table(struct amdgpu_ras_eeprom_control *control) struct amdgpu_ras_eeprom_table_header *hdr = &control->tbl_hdr; struct amdgpu_ras_eeprom_table_ras_info *rai = &control->tbl_rai; struct amdgpu_ras *con = amdgpu_ras_get_context(adev); + u32 erase_res = 0; u8 csum; int res; mutex_lock(&control->ras_tbl_mutex); - hdr->header = RAS_TABLE_HDR_VAL; - amdgpu_ras_set_eeprom_table_version(control); + if (!amdgpu_ras_smu_eeprom_supported(adev)) { + hdr->header = RAS_TABLE_HDR_VAL; + amdgpu_ras_set_eeprom_table_version(control); - if (hdr->version >= RAS_TABLE_VER_V2_1) { - hdr->first_rec_offset = RAS_RECORD_START_V2_1; - hdr->tbl_size = RAS_TABLE_HEADER_SIZE + - RAS_TABLE_V2_1_INFO_SIZE; - rai->rma_status = GPU_HEALTH_USABLE; - /** - * GPU health represented as a percentage. - * 0 means worst health, 100 means fully health. - */ - rai->health_percent = 100; - /* ecc_page_threshold = 0 means disable bad page retirement */ - rai->ecc_page_threshold = con->bad_page_cnt_threshold; + if (hdr->version >= RAS_TABLE_VER_V2_1) { + hdr->first_rec_offset = RAS_RECORD_START_V2_1; + hdr->tbl_size = RAS_TABLE_HEADER_SIZE + + RAS_TABLE_V2_1_INFO_SIZE; + rai->rma_status = GPU_HEALTH_USABLE; + + control->ras_record_offset = RAS_RECORD_START_V2_1; + control->ras_max_record_count = RAS_MAX_RECORD_COUNT_V2_1; + /** + * GPU health represented as a percentage. + * 0 means worst health, 100 means fully health. + */ + rai->health_percent = 100; + /* ecc_page_threshold = 0 means disable bad page retirement */ + rai->ecc_page_threshold = con->bad_page_cnt_threshold; + } else { + hdr->first_rec_offset = RAS_RECORD_START; + hdr->tbl_size = RAS_TABLE_HEADER_SIZE; + + control->ras_record_offset = RAS_RECORD_START; + control->ras_max_record_count = RAS_MAX_RECORD_COUNT; + } + + csum = __calc_hdr_byte_sum(control); + if (hdr->version >= RAS_TABLE_VER_V2_1) + csum += __calc_ras_info_byte_sum(control); + csum = -csum; + hdr->checksum = csum; + res = __write_table_header(control); + if (!res && hdr->version > RAS_TABLE_VER_V1) + res = __write_table_ras_info(control); } else { - hdr->first_rec_offset = RAS_RECORD_START; - hdr->tbl_size = RAS_TABLE_HEADER_SIZE; + res = amdgpu_ras_smu_erase_ras_table(adev, &erase_res); + if (res || erase_res) { + dev_warn(adev->dev, "RAS EEPROM reset failed, res:%d result:%d", + res, erase_res); + if (!res) + res = -EIO; + } } - csum = __calc_hdr_byte_sum(control); - if (hdr->version >= RAS_TABLE_VER_V2_1) - csum += __calc_ras_info_byte_sum(control); - csum = -csum; - hdr->checksum = csum; - res = __write_table_header(control); - if (!res && hdr->version > RAS_TABLE_VER_V1) - res = __write_table_ras_info(control); - control->ras_num_recs = 0; control->ras_num_bad_pages = 0; control->ras_num_mca_recs = 0; @@ -556,6 +576,9 @@ bool amdgpu_ras_eeprom_check_err_threshold(struct amdgpu_device *adev) { struct amdgpu_ras *con = amdgpu_ras_get_context(adev); + if (amdgpu_uniras_enabled(adev)) + return amdgpu_ras_mgr_check_eeprom_safety_watermark(adev); + if (!__is_ras_eeprom_supported(adev) || !amdgpu_bad_page_threshold) return false; @@ -766,7 +789,8 @@ amdgpu_ras_eeprom_update_header(struct amdgpu_ras_eeprom_control *control) "Saved bad pages %d reaches threshold value %d\n", control->ras_num_bad_pages, ras->bad_page_cnt_threshold); - if (adev->cper.enabled && amdgpu_cper_generate_bp_threshold_record(adev)) + if (adev->cper.enabled && !amdgpu_uniras_enabled(adev) && + amdgpu_cper_generate_bp_threshold_record(adev)) dev_warn(adev->dev, "fail to generate bad page threshold cper records\n"); if ((amdgpu_bad_page_threshold != -1) && @@ -849,6 +873,71 @@ amdgpu_ras_eeprom_update_header(struct amdgpu_ras_eeprom_control *control) return res; } +int amdgpu_ras_eeprom_update_record_num(struct amdgpu_ras_eeprom_control *control) +{ + struct amdgpu_device *adev = to_amdgpu_device(control); + int ret, retry = 20; + + if (!amdgpu_ras_smu_eeprom_supported(adev)) + return 0; + + control->ras_num_recs_old = control->ras_num_recs; + + do { + /* 1000ms timeout is long enough, smu_get_badpage_count won't + * return -EBUSY before timeout. + */ + ret = amdgpu_ras_smu_get_badpage_count(adev, + &(control->ras_num_recs), RAS_SMU_MESSAGE_TIMEOUT_MS); + if (!ret && + (control->ras_num_recs_old == control->ras_num_recs)) { + /* record number update in PMFW needs some time, + * smu_get_badpage_count may return immediately without + * count update, sleep for a while and retry again. + */ + msleep(50); + retry--; + } else { + break; + } + } while (retry); + + /* no update of record number is not a real failure, + * don't print warning here + */ + if (!ret && (control->ras_num_recs_old == control->ras_num_recs)) + ret = -EINVAL; + + return ret; +} + +static int amdgpu_ras_smu_eeprom_append(struct amdgpu_ras_eeprom_control *control) +{ + struct amdgpu_device *adev = to_amdgpu_device(control); + struct amdgpu_ras *con = amdgpu_ras_get_context(adev); + + if (!amdgpu_ras_smu_eeprom_supported(adev) || !con) + return 0; + + control->ras_num_bad_pages = con->bad_page_num; + + if (amdgpu_bad_page_threshold != 0 && + control->ras_num_bad_pages > con->bad_page_cnt_threshold) { + dev_warn(adev->dev, + "Saved bad pages %d reaches threshold value %d\n", + control->ras_num_bad_pages, con->bad_page_cnt_threshold); + + if (adev->cper.enabled && amdgpu_cper_generate_bp_threshold_record(adev)) + dev_warn(adev->dev, "fail to generate bad page threshold cper records\n"); + + if ((amdgpu_bad_page_threshold != -1) && + (amdgpu_bad_page_threshold != -2)) + con->is_rma = true; + } + + return 0; +} + /** * amdgpu_ras_eeprom_append -- append records to the EEPROM RAS table * @control: pointer to control structure @@ -873,6 +962,9 @@ int amdgpu_ras_eeprom_append(struct amdgpu_ras_eeprom_control *control, if (!__is_ras_eeprom_supported(adev)) return 0; + if (amdgpu_ras_smu_eeprom_supported(adev)) + return amdgpu_ras_smu_eeprom_append(control); + if (num == 0) { dev_err(adev->dev, "will not append 0 records\n"); return -EINVAL; @@ -948,6 +1040,50 @@ static int __amdgpu_ras_eeprom_read(struct amdgpu_ras_eeprom_control *control, return res; } +int amdgpu_ras_eeprom_read_idx(struct amdgpu_ras_eeprom_control *control, + struct eeprom_table_record *record, u32 rec_idx, + const u32 num) +{ + struct amdgpu_device *adev = to_amdgpu_device(control); + uint64_t ts, end_idx; + int i, ret; + u64 mca, ipid; + + if (!amdgpu_ras_smu_eeprom_supported(adev)) + return 0; + + if (!adev->umc.ras || !adev->umc.ras->mca_ipid_parse) + return -EOPNOTSUPP; + + end_idx = rec_idx + num; + for (i = rec_idx; i < end_idx; i++) { + ret = amdgpu_ras_smu_get_badpage_mca_addr(adev, i, &mca); + if (ret) + return ret; + + ret = amdgpu_ras_smu_get_badpage_ipid(adev, i, &ipid); + if (ret) + return ret; + + ret = amdgpu_ras_smu_get_timestamp(adev, i, &ts); + if (ret) + return ret; + + record[i - rec_idx].address = mca; + /* retired_page (pa) is unused now */ + record[i - rec_idx].retired_page = 0x1ULL; + record[i - rec_idx].ts = ts; + record[i - rec_idx].err_type = AMDGPU_RAS_EEPROM_ERR_NON_RECOVERABLE; + + adev->umc.ras->mca_ipid_parse(adev, ipid, + (uint32_t *)&(record[i - rec_idx].cu), + (uint32_t *)&(record[i - rec_idx].mem_channel), + (uint32_t *)&(record[i - rec_idx].mcumc_id), NULL); + } + + return 0; +} + /** * amdgpu_ras_eeprom_read -- read EEPROM * @control: pointer to control structure @@ -969,6 +1105,9 @@ int amdgpu_ras_eeprom_read(struct amdgpu_ras_eeprom_control *control, u8 *buf, *pp; u32 g0, g1; + if (amdgpu_ras_smu_eeprom_supported(adev)) + return amdgpu_ras_eeprom_read_idx(control, record, 0, num); + if (!__is_ras_eeprom_supported(adev)) return 0; @@ -1140,6 +1279,10 @@ static ssize_t amdgpu_ras_debugfs_table_read(struct file *f, char __user *buf, int res = -EFAULT; size_t data_len; + /* pmfw manages eeprom data by itself */ + if (amdgpu_ras_smu_eeprom_supported(adev)) + return 0; + mutex_lock(&control->ras_tbl_mutex); /* We want *pos - data_len > 0, which means there's @@ -1370,6 +1513,42 @@ static int __read_table_ras_info(struct amdgpu_ras_eeprom_control *control) return res == RAS_TABLE_V2_1_INFO_SIZE ? 0 : res; } +static int amdgpu_ras_smu_eeprom_init(struct amdgpu_ras_eeprom_control *control) +{ + struct amdgpu_device *adev = to_amdgpu_device(control); + struct amdgpu_ras_eeprom_table_header *hdr = &control->tbl_hdr; + struct amdgpu_ras *ras = amdgpu_ras_get_context(adev); + uint64_t local_time; + int res; + + ras->is_rma = false; + + if (!__is_ras_eeprom_supported(adev)) + return 0; + mutex_init(&control->ras_tbl_mutex); + + res = amdgpu_ras_smu_get_table_version(adev, &(hdr->version)); + if (res) + return res; + + res = amdgpu_ras_smu_get_badpage_count(adev, + &(control->ras_num_recs), 100); + if (res) + return res; + + local_time = (uint64_t)ktime_get_real_seconds(); + res = amdgpu_ras_smu_set_timestamp(adev, local_time); + if (res) + return res; + + control->ras_max_record_count = 4000; + + control->ras_num_mca_recs = 0; + control->ras_num_pa_recs = 0; + + return 0; +} + int amdgpu_ras_eeprom_init(struct amdgpu_ras_eeprom_control *control) { struct amdgpu_device *adev = to_amdgpu_device(control); @@ -1378,6 +1557,9 @@ int amdgpu_ras_eeprom_init(struct amdgpu_ras_eeprom_control *control) struct amdgpu_ras *ras = amdgpu_ras_get_context(adev); int res; + if (amdgpu_ras_smu_eeprom_supported(adev)) + return amdgpu_ras_smu_eeprom_init(control); + ras->is_rma = false; if (!__is_ras_eeprom_supported(adev)) @@ -1444,6 +1626,47 @@ int amdgpu_ras_eeprom_init(struct amdgpu_ras_eeprom_control *control) return 0; } +static int amdgpu_ras_smu_eeprom_check(struct amdgpu_ras_eeprom_control *control) +{ + struct amdgpu_device *adev = to_amdgpu_device(control); + struct amdgpu_ras *ras = amdgpu_ras_get_context(adev); + + if (!__is_ras_eeprom_supported(adev)) + return 0; + + control->ras_num_bad_pages = ras->bad_page_num; + + if ((ras->bad_page_cnt_threshold < control->ras_num_bad_pages) && + amdgpu_bad_page_threshold != 0) { + dev_warn(adev->dev, + "RAS records:%d exceed threshold:%d\n", + control->ras_num_bad_pages, ras->bad_page_cnt_threshold); + if ((amdgpu_bad_page_threshold == -1) || + (amdgpu_bad_page_threshold == -2)) { + dev_warn(adev->dev, + "Please consult AMD Service Action Guide (SAG) for appropriate service procedures\n"); + } else { + ras->is_rma = true; + dev_warn(adev->dev, + "User defined threshold is set, runtime service will be halt when threshold is reached\n"); + } + + return 0; + } + + dev_dbg(adev->dev, + "Found existing EEPROM table with %d records", + control->ras_num_bad_pages); + + /* Warn if we are at 90% of the threshold or above + */ + if (10 * control->ras_num_bad_pages >= 9 * ras->bad_page_cnt_threshold) + dev_warn(adev->dev, "RAS records:%u exceeds 90%% of threshold:%d", + control->ras_num_bad_pages, + ras->bad_page_cnt_threshold); + return 0; +} + int amdgpu_ras_eeprom_check(struct amdgpu_ras_eeprom_control *control) { struct amdgpu_device *adev = to_amdgpu_device(control); @@ -1451,6 +1674,9 @@ int amdgpu_ras_eeprom_check(struct amdgpu_ras_eeprom_control *control) struct amdgpu_ras *ras = amdgpu_ras_get_context(adev); int res = 0; + if (amdgpu_ras_smu_eeprom_supported(adev)) + return amdgpu_ras_smu_eeprom_check(control); + if (!__is_ras_eeprom_supported(adev)) return 0; @@ -1541,7 +1767,8 @@ void amdgpu_ras_eeprom_check_and_recover(struct amdgpu_device *adev) struct amdgpu_ras_eeprom_control *control; int res; - if (!__is_ras_eeprom_supported(adev) || !ras) + if (!__is_ras_eeprom_supported(adev) || !ras || + amdgpu_ras_smu_eeprom_supported(adev)) return; control = &ras->eeprom_control; if (!control->is_eeprom_valid) @@ -1561,4 +1788,143 @@ void amdgpu_ras_eeprom_check_and_recover(struct amdgpu_device *adev) control->is_eeprom_valid = false; } return; -} \ No newline at end of file +} + +static const struct ras_smu_drv *amdgpu_ras_get_smu_ras_drv(struct amdgpu_device *adev) +{ + struct amdgpu_ras *ras = amdgpu_ras_get_context(adev); + + if (!ras) + return NULL; + + return ras->ras_smu_drv; +} + +static uint64_t amdgpu_ras_smu_get_feature_flags(struct amdgpu_device *adev) +{ + const struct ras_smu_drv *ras_smu_drv = amdgpu_ras_get_smu_ras_drv(adev); + uint64_t flags = 0ULL; + + if (!ras_smu_drv) + goto out; + + if (ras_smu_drv->ras_smu_feature_flags) + ras_smu_drv->ras_smu_feature_flags(adev, &flags); + +out: + return flags; +} + +bool amdgpu_ras_smu_eeprom_supported(struct amdgpu_device *adev) +{ + const struct ras_smu_drv *smu_ras_drv = amdgpu_ras_get_smu_ras_drv(adev); + uint64_t flags = 0ULL; + + if (!__is_ras_eeprom_supported(adev) || !smu_ras_drv) + return false; + + if (!smu_ras_drv->smu_eeprom_funcs) + return false; + + flags = amdgpu_ras_smu_get_feature_flags(adev); + + return !!(flags & RAS_SMU_FEATURE_BIT__RAS_EEPROM); +} + +int amdgpu_ras_smu_get_table_version(struct amdgpu_device *adev, + uint32_t *table_version) +{ + const struct ras_smu_drv *smu_ras_drv = amdgpu_ras_get_smu_ras_drv(adev); + + if (!amdgpu_ras_smu_eeprom_supported(adev)) + return -EOPNOTSUPP; + + if (smu_ras_drv->smu_eeprom_funcs->get_ras_table_version) + return smu_ras_drv->smu_eeprom_funcs->get_ras_table_version(adev, + table_version); + return -EOPNOTSUPP; +} + +int amdgpu_ras_smu_get_badpage_count(struct amdgpu_device *adev, + uint32_t *count, uint32_t timeout) +{ + const struct ras_smu_drv *smu_ras_drv = amdgpu_ras_get_smu_ras_drv(adev); + + if (!amdgpu_ras_smu_eeprom_supported(adev)) + return -EOPNOTSUPP; + + if (smu_ras_drv->smu_eeprom_funcs->get_badpage_count) + return smu_ras_drv->smu_eeprom_funcs->get_badpage_count(adev, + count, timeout); + return -EOPNOTSUPP; +} + +int amdgpu_ras_smu_get_badpage_mca_addr(struct amdgpu_device *adev, + uint16_t index, uint64_t *mca_addr) +{ + const struct ras_smu_drv *smu_ras_drv = amdgpu_ras_get_smu_ras_drv(adev); + + if (!amdgpu_ras_smu_eeprom_supported(adev)) + return -EOPNOTSUPP; + + if (smu_ras_drv->smu_eeprom_funcs->get_badpage_mca_addr) + return smu_ras_drv->smu_eeprom_funcs->get_badpage_mca_addr(adev, + index, mca_addr); + return -EOPNOTSUPP; +} + +int amdgpu_ras_smu_set_timestamp(struct amdgpu_device *adev, + uint64_t timestamp) +{ + const struct ras_smu_drv *smu_ras_drv = amdgpu_ras_get_smu_ras_drv(adev); + + if (!amdgpu_ras_smu_eeprom_supported(adev)) + return -EOPNOTSUPP; + + if (smu_ras_drv->smu_eeprom_funcs->set_timestamp) + return smu_ras_drv->smu_eeprom_funcs->set_timestamp(adev, + timestamp); + return -EOPNOTSUPP; +} + +int amdgpu_ras_smu_get_timestamp(struct amdgpu_device *adev, + uint16_t index, uint64_t *timestamp) +{ + const struct ras_smu_drv *smu_ras_drv = amdgpu_ras_get_smu_ras_drv(adev); + + if (!amdgpu_ras_smu_eeprom_supported(adev)) + return -EOPNOTSUPP; + + if (smu_ras_drv->smu_eeprom_funcs->get_timestamp) + return smu_ras_drv->smu_eeprom_funcs->get_timestamp(adev, + index, timestamp); + return -EOPNOTSUPP; +} + +int amdgpu_ras_smu_get_badpage_ipid(struct amdgpu_device *adev, + uint16_t index, uint64_t *ipid) +{ + const struct ras_smu_drv *smu_ras_drv = amdgpu_ras_get_smu_ras_drv(adev); + + if (!amdgpu_ras_smu_eeprom_supported(adev)) + return -EOPNOTSUPP; + + if (smu_ras_drv->smu_eeprom_funcs->get_badpage_ipid) + return smu_ras_drv->smu_eeprom_funcs->get_badpage_ipid(adev, + index, ipid); + return -EOPNOTSUPP; +} + +int amdgpu_ras_smu_erase_ras_table(struct amdgpu_device *adev, + uint32_t *result) +{ + const struct ras_smu_drv *smu_ras_drv = amdgpu_ras_get_smu_ras_drv(adev); + + if (!amdgpu_ras_smu_eeprom_supported(adev)) + return -EOPNOTSUPP; + + if (smu_ras_drv->smu_eeprom_funcs->erase_ras_table) + return smu_ras_drv->smu_eeprom_funcs->erase_ras_table(adev, + result); + return -EOPNOTSUPP; +} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.h index ebfca4cb5688..2e5d63957e71 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.h @@ -82,6 +82,7 @@ struct amdgpu_ras_eeprom_control { /* Number of records in the table. */ u32 ras_num_recs; + u32 ras_num_recs_old; /* the bad page number is ras_num_recs or * ras_num_recs * umc.retire_unit @@ -163,6 +164,35 @@ int amdgpu_ras_eeprom_check(struct amdgpu_ras_eeprom_control *control); void amdgpu_ras_eeprom_check_and_recover(struct amdgpu_device *adev); +bool amdgpu_ras_smu_eeprom_supported(struct amdgpu_device *adev); + +int amdgpu_ras_smu_get_table_version(struct amdgpu_device *adev, + uint32_t *table_version); + +int amdgpu_ras_smu_get_badpage_count(struct amdgpu_device *adev, + uint32_t *count, uint32_t timeout); + +int amdgpu_ras_smu_get_badpage_mca_addr(struct amdgpu_device *adev, + uint16_t index, uint64_t *mca_addr); + +int amdgpu_ras_smu_set_timestamp(struct amdgpu_device *adev, + uint64_t timestamp); + +int amdgpu_ras_smu_get_timestamp(struct amdgpu_device *adev, + uint16_t index, uint64_t *timestamp); + +int amdgpu_ras_smu_get_badpage_ipid(struct amdgpu_device *adev, + uint16_t index, uint64_t *ipid); + +int amdgpu_ras_smu_erase_ras_table(struct amdgpu_device *adev, + uint32_t *result); + +int amdgpu_ras_eeprom_read_idx(struct amdgpu_ras_eeprom_control *control, + struct eeprom_table_record *record, u32 rec_idx, + const u32 num); + +int amdgpu_ras_eeprom_update_record_num(struct amdgpu_ras_eeprom_control *control); + extern const struct file_operations amdgpu_ras_debugfs_eeprom_size_ops; extern const struct file_operations amdgpu_ras_debugfs_eeprom_table_ops; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c index 5ec5c3ff22bb..c596b6df2e2d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c @@ -33,6 +33,7 @@ #include #include "amdgpu.h" +#include "amdgpu_ras_mgr.h" #include "atom.h" /* @@ -159,8 +160,16 @@ void amdgpu_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count) */ void amdgpu_ring_generic_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib) { - while (ib->length_dw & ring->funcs->align_mask) - ib->ptr[ib->length_dw++] = ring->funcs->nop; + u32 align_mask = ring->funcs->align_mask; + u32 count = ib->length_dw & align_mask; + + if (count) { + count = align_mask + 1 - count; + + memset32(&ib->ptr[ib->length_dw], ring->funcs->nop, count); + + ib->length_dw += count; + } } /** @@ -460,9 +469,6 @@ bool amdgpu_ring_soft_recovery(struct amdgpu_ring *ring, unsigned int vmid, ktime_t deadline; bool ret; - if (unlikely(ring->adev->debug_disable_soft_recovery)) - return false; - deadline = ktime_add_us(ktime_get(), 10000); if (amdgpu_sriov_vf(ring->adev) || !ring->funcs->soft_recovery || !fence) @@ -490,6 +496,66 @@ bool amdgpu_ring_soft_recovery(struct amdgpu_ring *ring, unsigned int vmid, */ #if defined(CONFIG_DEBUG_FS) +static ssize_t amdgpu_ras_cper_debugfs_read(struct file *f, char __user *buf, + size_t size, loff_t *offset) +{ + const uint8_t ring_header_size = 12; + struct amdgpu_ring *ring = file_inode(f)->i_private; + struct ras_cmd_cper_snapshot_req *snapshot_req __free(kfree) = + kzalloc(sizeof(struct ras_cmd_cper_snapshot_req), GFP_KERNEL); + struct ras_cmd_cper_snapshot_rsp *snapshot_rsp __free(kfree) = + kzalloc(sizeof(struct ras_cmd_cper_snapshot_rsp), GFP_KERNEL); + struct ras_cmd_cper_record_req *record_req __free(kfree) = + kzalloc(sizeof(struct ras_cmd_cper_record_req), GFP_KERNEL); + struct ras_cmd_cper_record_rsp *record_rsp __free(kfree) = + kzalloc(sizeof(struct ras_cmd_cper_record_rsp), GFP_KERNEL); + uint8_t *ring_header __free(kfree) = + kzalloc(ring_header_size, GFP_KERNEL); + uint32_t total_cper_num; + uint64_t start_cper_id; + int r; + + if (!snapshot_req || !snapshot_rsp || !record_req || !record_rsp || + !ring_header) + return -ENOMEM; + + if (!(*offset)) { + /* Need at least 12 bytes for the header on the first read */ + if (size < ring_header_size) + return -EINVAL; + + if (copy_to_user(buf, ring_header, ring_header_size)) + return -EFAULT; + buf += ring_header_size; + size -= ring_header_size; + } + + r = amdgpu_ras_mgr_handle_ras_cmd(ring->adev, + RAS_CMD__GET_CPER_SNAPSHOT, + snapshot_req, sizeof(struct ras_cmd_cper_snapshot_req), + snapshot_rsp, sizeof(struct ras_cmd_cper_snapshot_rsp)); + if (r || !snapshot_rsp->total_cper_num) + return r; + + start_cper_id = snapshot_rsp->start_cper_id; + total_cper_num = snapshot_rsp->total_cper_num; + + record_req->buf_ptr = (uint64_t)(uintptr_t)buf; + record_req->buf_size = size; + record_req->cper_start_id = start_cper_id + *offset; + record_req->cper_num = total_cper_num; + r = amdgpu_ras_mgr_handle_ras_cmd(ring->adev, RAS_CMD__GET_CPER_RECORD, + record_req, sizeof(struct ras_cmd_cper_record_req), + record_rsp, sizeof(struct ras_cmd_cper_record_rsp)); + if (r) + return r; + + r = *offset ? record_rsp->real_data_size : record_rsp->real_data_size + ring_header_size; + (*offset) += record_rsp->real_cper_num; + + return r; +} + /* Layout of file is 12 bytes consisting of * - rptr * - wptr @@ -506,6 +572,9 @@ static ssize_t amdgpu_debugfs_ring_read(struct file *f, char __user *buf, loff_t i; int r; + if (ring->funcs->type == AMDGPU_RING_TYPE_CPER && amdgpu_uniras_enabled(ring->adev)) + return amdgpu_ras_cper_debugfs_read(f, buf, size, pos); + if (*pos & 3 || size & 3) return -EINVAL; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h index 4b46e3c26ff3..7a27c6c4bb44 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h @@ -83,6 +83,7 @@ enum amdgpu_ring_type { AMDGPU_RING_TYPE_MES, AMDGPU_RING_TYPE_UMSCH_MM, AMDGPU_RING_TYPE_CPER, + AMDGPU_RING_TYPE_MAX, }; enum amdgpu_ib_pool_type { @@ -147,16 +148,14 @@ struct amdgpu_fence { u64 wptr; /* fence context for resets */ u64 context; - uint32_t seq; }; extern const struct drm_sched_backend_ops amdgpu_sched_ops; -void amdgpu_fence_driver_clear_job_fences(struct amdgpu_ring *ring); void amdgpu_fence_driver_set_error(struct amdgpu_ring *ring, int error); void amdgpu_fence_driver_force_completion(struct amdgpu_ring *ring); void amdgpu_fence_driver_guilty_force_completion(struct amdgpu_fence *af); -void amdgpu_fence_save_wptr(struct dma_fence *fence); +void amdgpu_fence_save_wptr(struct amdgpu_fence *af); int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring); int amdgpu_fence_driver_start_ring(struct amdgpu_ring *ring, @@ -166,8 +165,8 @@ void amdgpu_fence_driver_hw_init(struct amdgpu_device *adev); void amdgpu_fence_driver_hw_fini(struct amdgpu_device *adev); int amdgpu_fence_driver_sw_init(struct amdgpu_device *adev); void amdgpu_fence_driver_sw_fini(struct amdgpu_device *adev); -int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **f, - struct amdgpu_fence *af, unsigned int flags); +int amdgpu_fence_emit(struct amdgpu_ring *ring, struct amdgpu_fence *af, + unsigned int flags); int amdgpu_fence_emit_polling(struct amdgpu_ring *ring, uint32_t *s, uint32_t timeout); bool amdgpu_fence_process(struct amdgpu_ring *ring); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_securedisplay.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_securedisplay.c index 41ebe690eeff..3739be1b71e0 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_securedisplay.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_securedisplay.c @@ -159,7 +159,6 @@ static ssize_t amdgpu_securedisplay_debugfs_write(struct file *f, const char __u dev_err(adev->dev, "Invalid input: %s\n", str); } - pm_runtime_mark_last_busy(dev->dev); pm_runtime_put_autosuspend(dev->dev); return size; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index 9d568c16beb1..2b931e855abd 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c @@ -188,7 +188,6 @@ static int amdgpu_ttm_map_buffer(struct ttm_buffer_object *bo, struct amdgpu_job *job; void *cpu_addr; uint64_t flags; - unsigned int i; int r; BUG_ON(adev->mman.buffer_funcs->copy_max_bytes < @@ -255,16 +254,9 @@ static int amdgpu_ttm_map_buffer(struct ttm_buffer_object *bo, dma_addr = &bo->ttm->dma_address[mm_cur->start >> PAGE_SHIFT]; amdgpu_gart_map(adev, 0, num_pages, dma_addr, flags, cpu_addr); } else { - dma_addr_t dma_address; + u64 pa = mm_cur->start + adev->vm_manager.vram_base_offset; - dma_address = mm_cur->start; - dma_address += adev->vm_manager.vram_base_offset; - - for (i = 0; i < num_pages; ++i) { - amdgpu_gart_map(adev, i << PAGE_SHIFT, 1, &dma_address, - flags, cpu_addr); - dma_address += PAGE_SIZE; - } + amdgpu_gart_map_vram_range(adev, pa, 0, num_pages, flags, cpu_addr); } dma_fence_put(amdgpu_job_submit(job)); @@ -286,12 +278,13 @@ static int amdgpu_ttm_map_buffer(struct ttm_buffer_object *bo, * move and different for a BO to BO copy. * */ -int amdgpu_ttm_copy_mem_to_mem(struct amdgpu_device *adev, - const struct amdgpu_copy_mem *src, - const struct amdgpu_copy_mem *dst, - uint64_t size, bool tmz, - struct dma_resv *resv, - struct dma_fence **f) +__attribute__((nonnull)) +static int amdgpu_ttm_copy_mem_to_mem(struct amdgpu_device *adev, + const struct amdgpu_copy_mem *src, + const struct amdgpu_copy_mem *dst, + uint64_t size, bool tmz, + struct dma_resv *resv, + struct dma_fence **f) { struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring; struct amdgpu_res_cursor src_mm, dst_mm; @@ -365,9 +358,7 @@ int amdgpu_ttm_copy_mem_to_mem(struct amdgpu_device *adev, } error: mutex_unlock(&adev->mman.gtt_window_lock); - if (f) - *f = dma_fence_get(fence); - dma_fence_put(fence); + *f = fence; return r; } @@ -706,10 +697,11 @@ struct amdgpu_ttm_tt { * memory and start HMM tracking CPU page table update * * Calling function must call amdgpu_ttm_tt_userptr_range_done() once and only - * once afterwards to stop HMM tracking + * once afterwards to stop HMM tracking. Its the caller responsibility to ensure + * that range is a valid memory and it is freed too. */ int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo, - struct hmm_range **range) + struct amdgpu_hmm_range *range) { struct ttm_tt *ttm = bo->tbo.ttm; struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm); @@ -719,9 +711,6 @@ int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo, bool readonly; int r = 0; - /* Make sure get_user_pages_done() can cleanup gracefully */ - *range = NULL; - mm = bo->notifier.mm; if (unlikely(!mm)) { DRM_DEBUG_DRIVER("BO is not registered?\n"); @@ -756,38 +745,6 @@ int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo, return r; } -/* amdgpu_ttm_tt_discard_user_pages - Discard range and pfn array allocations - */ -void amdgpu_ttm_tt_discard_user_pages(struct ttm_tt *ttm, - struct hmm_range *range) -{ - struct amdgpu_ttm_tt *gtt = (void *)ttm; - - if (gtt && gtt->userptr && range) - amdgpu_hmm_range_get_pages_done(range); -} - -/* - * amdgpu_ttm_tt_get_user_pages_done - stop HMM track the CPU page table change - * Check if the pages backing this ttm range have been invalidated - * - * Returns: true if pages are still valid - */ -bool amdgpu_ttm_tt_get_user_pages_done(struct ttm_tt *ttm, - struct hmm_range *range) -{ - struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm); - - if (!gtt || !gtt->userptr || !range) - return false; - - DRM_DEBUG_DRIVER("user_pages_done 0x%llx pages 0x%x\n", - gtt->userptr, ttm->num_pages); - - WARN_ONCE(!range->hmm_pfns, "No user pages to check\n"); - - return !amdgpu_hmm_range_get_pages_done(range); -} #endif /* @@ -797,12 +754,12 @@ bool amdgpu_ttm_tt_get_user_pages_done(struct ttm_tt *ttm, * that backs user memory and will ultimately be mapped into the device * address space. */ -void amdgpu_ttm_tt_set_user_pages(struct ttm_tt *ttm, struct hmm_range *range) +void amdgpu_ttm_tt_set_user_pages(struct ttm_tt *ttm, struct amdgpu_hmm_range *range) { unsigned long i; for (i = 0; i < ttm->num_pages; ++i) - ttm->pages[i] = range ? hmm_pfn_to_page(range->hmm_pfns[i]) : NULL; + ttm->pages[i] = range ? hmm_pfn_to_page(range->hmm_range.hmm_pfns[i]) : NULL; } /* @@ -1529,6 +1486,7 @@ static int amdgpu_ttm_access_memory_sdma(struct ttm_buffer_object *bo, if (r) goto out; + mutex_lock(&adev->mman.gtt_window_lock); amdgpu_res_first(abo->tbo.resource, offset, len, &src_mm); src_addr = amdgpu_ttm_domain_start(adev, bo->resource->mem_type) + src_mm.start; @@ -1543,6 +1501,7 @@ static int amdgpu_ttm_access_memory_sdma(struct ttm_buffer_object *bo, WARN_ON(job->ibs[0].length_dw > num_dw); fence = amdgpu_job_submit(job); + mutex_unlock(&adev->mman.gtt_window_lock); if (!dma_fence_wait_timeout(fence, false, adev->sdma_timeout)) r = -ETIMEDOUT; @@ -1804,18 +1763,14 @@ static int amdgpu_ttm_reserve_tmr(struct amdgpu_device *adev) ctx->init = PSP_MEM_TRAIN_RESERVE_SUCCESS; } - if (!adev->gmc.is_app_apu) { - ret = amdgpu_bo_create_kernel_at( - adev, adev->gmc.real_vram_size - reserve_size, - reserve_size, &adev->mman.fw_reserved_memory, NULL); - if (ret) { - dev_err(adev->dev, "alloc tmr failed(%d)!\n", ret); - amdgpu_bo_free_kernel(&adev->mman.fw_reserved_memory, - NULL, NULL); - return ret; - } - } else { - DRM_DEBUG_DRIVER("backdoor fw loading path for PSP TMR, no reservation needed\n"); + ret = amdgpu_bo_create_kernel_at( + adev, adev->gmc.real_vram_size - reserve_size, reserve_size, + &adev->mman.fw_reserved_memory, NULL); + if (ret) { + dev_err(adev->dev, "alloc tmr failed(%d)!\n", ret); + amdgpu_bo_free_kernel(&adev->mman.fw_reserved_memory, NULL, + NULL); + return ret; } return 0; @@ -1837,7 +1792,7 @@ static int amdgpu_ttm_pools_init(struct amdgpu_device *adev) for (i = 0; i < adev->gmc.num_mem_partitions; i++) { ttm_pool_init(&adev->mman.ttm_pools[i], adev->dev, adev->gmc.mem_partitions[i].numa.node, - false, false); + TTM_ALLOCATION_POOL_BENEFICIAL_ORDER(get_order(SZ_2M))); } return 0; } @@ -1930,8 +1885,11 @@ int amdgpu_ttm_init(struct amdgpu_device *adev) r = ttm_device_init(&adev->mman.bdev, &amdgpu_bo_driver, adev->dev, adev_to_drm(adev)->anon_inode->i_mapping, adev_to_drm(adev)->vma_offset_manager, - adev->need_swiotlb, - dma_addressing_limited(adev->dev)); + (adev->need_swiotlb ? + TTM_ALLOCATION_POOL_USE_DMA_ALLOC : 0) | + (dma_addressing_limited(adev->dev) ? + TTM_ALLOCATION_POOL_USE_DMA32 : 0) | + TTM_ALLOCATION_POOL_BENEFICIAL_ORDER(get_order(SZ_2M))); if (r) { dev_err(adev->dev, "failed initializing buffer object driver(%d).\n", r); @@ -1980,19 +1938,19 @@ int amdgpu_ttm_init(struct amdgpu_device *adev) return r; /* - *The reserved vram for driver must be pinned to the specified - *place on the VRAM, so reserve it early. + * The reserved VRAM for the driver must be pinned to a specific + * location in VRAM, so reserve it early. */ r = amdgpu_ttm_drv_reserve_vram_init(adev); if (r) return r; /* - * only NAVI10 and onwards ASIC support for IP discovery. - * If IP discovery enabled, a block of memory should be - * reserved for IP discovey. + * only NAVI10 and later ASICs support IP discovery. + * If IP discovery is enabled, a block of memory should be + * reserved for it. */ - if (adev->mman.discovery_bin) { + if (adev->discovery.reserve_tmr) { r = amdgpu_ttm_reserve_tmr(adev); if (r) return r; @@ -2229,8 +2187,10 @@ void amdgpu_ttm_set_buffer_funcs_status(struct amdgpu_device *adev, bool enable) } else { drm_sched_entity_destroy(&adev->mman.high_pr); drm_sched_entity_destroy(&adev->mman.low_pr); - dma_fence_put(man->move); - man->move = NULL; + /* Drop all the old fences since re-creating the scheduler entities + * will allocate new contexts. + */ + ttm_resource_manager_cleanup(man); } /* this just adjusts TTM size idea, which sets lpfn to the correct value */ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h index 0be2728aa872..577ee04ce0bf 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h @@ -28,6 +28,7 @@ #include #include #include "amdgpu_vram_mgr.h" +#include "amdgpu_hmm.h" #define AMDGPU_PL_GDS (TTM_PL_PRIV + 0) #define AMDGPU_PL_GWS (TTM_PL_PRIV + 1) @@ -82,9 +83,6 @@ struct amdgpu_mman { uint64_t stolen_reserved_offset; uint64_t stolen_reserved_size; - /* discovery */ - uint8_t *discovery_bin; - uint32_t discovery_tmr_size; /* fw reserved memory */ struct amdgpu_bo *fw_reserved_memory; struct amdgpu_bo *fw_reserved_memory_extend; @@ -170,12 +168,6 @@ int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset, struct dma_resv *resv, struct dma_fence **fence, bool direct_submit, bool vm_needs_flush, uint32_t copy_flags); -int amdgpu_ttm_copy_mem_to_mem(struct amdgpu_device *adev, - const struct amdgpu_copy_mem *src, - const struct amdgpu_copy_mem *dst, - uint64_t size, bool tmz, - struct dma_resv *resv, - struct dma_fence **f); int amdgpu_ttm_clear_buffer(struct amdgpu_bo *bo, struct dma_resv *resv, struct dma_fence **fence); @@ -192,29 +184,16 @@ uint64_t amdgpu_ttm_domain_start(struct amdgpu_device *adev, uint32_t type); #if IS_ENABLED(CONFIG_DRM_AMDGPU_USERPTR) int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo, - struct hmm_range **range); -void amdgpu_ttm_tt_discard_user_pages(struct ttm_tt *ttm, - struct hmm_range *range); -bool amdgpu_ttm_tt_get_user_pages_done(struct ttm_tt *ttm, - struct hmm_range *range); + struct amdgpu_hmm_range *range); #else static inline int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo, - struct hmm_range **range) + struct amdgpu_hmm_range *range) { return -EPERM; } -static inline void amdgpu_ttm_tt_discard_user_pages(struct ttm_tt *ttm, - struct hmm_range *range) -{ -} -static inline bool amdgpu_ttm_tt_get_user_pages_done(struct ttm_tt *ttm, - struct hmm_range *range) -{ - return false; -} #endif -void amdgpu_ttm_tt_set_user_pages(struct ttm_tt *ttm, struct hmm_range *range); +void amdgpu_ttm_tt_set_user_pages(struct ttm_tt *ttm, struct amdgpu_hmm_range *range); int amdgpu_ttm_tt_get_userptr(const struct ttm_buffer_object *tbo, uint64_t *user_addr); int amdgpu_ttm_tt_set_userptr(struct ttm_buffer_object *bo, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c index 2e039fb778ea..3f0b0e9af4f3 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c @@ -24,6 +24,7 @@ #include #include "amdgpu.h" #include "umc_v6_7.h" +#include "amdgpu_ras_mgr.h" #define MAX_UMC_POISON_POLLING_TIME_SYNC 20 //ms #define MAX_UMC_HASH_STRING_SIZE 256 @@ -96,67 +97,96 @@ void amdgpu_umc_handle_bad_pages(struct amdgpu_device *adev, { struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status; struct amdgpu_ras *con = amdgpu_ras_get_context(adev); + struct amdgpu_ras_eeprom_control *control = &con->eeprom_control; unsigned int error_query_mode; int ret = 0; unsigned long err_count; amdgpu_ras_get_error_query_mode(adev, &error_query_mode); + err_data->err_addr = + kcalloc(adev->umc.max_ras_err_cnt_per_query, + sizeof(struct eeprom_table_record), GFP_KERNEL); + + /* still call query_ras_error_address to clear error status + * even NOMEM error is encountered + */ + if (!err_data->err_addr) + dev_warn(adev->dev, + "Failed to alloc memory for umc error address record!\n"); + else + err_data->err_addr_len = adev->umc.max_ras_err_cnt_per_query; + mutex_lock(&con->page_retirement_lock); - ret = amdgpu_dpm_get_ecc_info(adev, (void *)&(con->umc_ecc)); - if (ret == -EOPNOTSUPP && - error_query_mode == AMDGPU_RAS_DIRECT_ERROR_QUERY) { - if (adev->umc.ras && adev->umc.ras->ras_block.hw_ops && - adev->umc.ras->ras_block.hw_ops->query_ras_error_count) - adev->umc.ras->ras_block.hw_ops->query_ras_error_count(adev, ras_error_status); + if (!amdgpu_ras_smu_eeprom_supported(adev)) { + ret = amdgpu_dpm_get_ecc_info(adev, (void *)&(con->umc_ecc)); + if (ret == -EOPNOTSUPP && + error_query_mode == AMDGPU_RAS_DIRECT_ERROR_QUERY) { + if (adev->umc.ras && adev->umc.ras->ras_block.hw_ops && + adev->umc.ras->ras_block.hw_ops->query_ras_error_count) + adev->umc.ras->ras_block.hw_ops->query_ras_error_count(adev, + ras_error_status); - if (adev->umc.ras && adev->umc.ras->ras_block.hw_ops && - adev->umc.ras->ras_block.hw_ops->query_ras_error_address && - adev->umc.max_ras_err_cnt_per_query) { - err_data->err_addr = - kcalloc(adev->umc.max_ras_err_cnt_per_query, - sizeof(struct eeprom_table_record), GFP_KERNEL); + if (adev->umc.ras && adev->umc.ras->ras_block.hw_ops && + adev->umc.ras->ras_block.hw_ops->query_ras_error_address && + adev->umc.max_ras_err_cnt_per_query) { + err_data->err_addr = + kcalloc(adev->umc.max_ras_err_cnt_per_query, + sizeof(struct eeprom_table_record), GFP_KERNEL); - /* still call query_ras_error_address to clear error status - * even NOMEM error is encountered - */ - if(!err_data->err_addr) - dev_warn(adev->dev, "Failed to alloc memory for " - "umc error address record!\n"); - else - err_data->err_addr_len = adev->umc.max_ras_err_cnt_per_query; + /* still call query_ras_error_address to clear error status + * even NOMEM error is encountered + */ + if (!err_data->err_addr) + dev_warn(adev->dev, + "Failed to alloc memory for umc error address record!\n"); + else + err_data->err_addr_len = + adev->umc.max_ras_err_cnt_per_query; - /* umc query_ras_error_address is also responsible for clearing - * error status - */ - adev->umc.ras->ras_block.hw_ops->query_ras_error_address(adev, ras_error_status); + /* umc query_ras_error_address is also responsible for clearing + * error status + */ + adev->umc.ras->ras_block.hw_ops->query_ras_error_address(adev, + ras_error_status); + } + } else if (error_query_mode == AMDGPU_RAS_FIRMWARE_ERROR_QUERY || + (!ret && error_query_mode == AMDGPU_RAS_DIRECT_ERROR_QUERY)) { + if (adev->umc.ras && + adev->umc.ras->ecc_info_query_ras_error_count) + adev->umc.ras->ecc_info_query_ras_error_count(adev, + ras_error_status); + + if (adev->umc.ras && + adev->umc.ras->ecc_info_query_ras_error_address && + adev->umc.max_ras_err_cnt_per_query) { + err_data->err_addr = + kcalloc(adev->umc.max_ras_err_cnt_per_query, + sizeof(struct eeprom_table_record), GFP_KERNEL); + + /* still call query_ras_error_address to clear error status + * even NOMEM error is encountered + */ + if (!err_data->err_addr) + dev_warn(adev->dev, + "Failed to alloc memory for umc error address record!\n"); + else + err_data->err_addr_len = + adev->umc.max_ras_err_cnt_per_query; + + /* umc query_ras_error_address is also responsible for clearing + * error status + */ + adev->umc.ras->ecc_info_query_ras_error_address(adev, + ras_error_status); + } } - } else if (error_query_mode == AMDGPU_RAS_FIRMWARE_ERROR_QUERY || - (!ret && error_query_mode == AMDGPU_RAS_DIRECT_ERROR_QUERY)) { - if (adev->umc.ras && - adev->umc.ras->ecc_info_query_ras_error_count) - adev->umc.ras->ecc_info_query_ras_error_count(adev, ras_error_status); - - if (adev->umc.ras && - adev->umc.ras->ecc_info_query_ras_error_address && - adev->umc.max_ras_err_cnt_per_query) { - err_data->err_addr = - kcalloc(adev->umc.max_ras_err_cnt_per_query, - sizeof(struct eeprom_table_record), GFP_KERNEL); - - /* still call query_ras_error_address to clear error status - * even NOMEM error is encountered - */ - if(!err_data->err_addr) - dev_warn(adev->dev, "Failed to alloc memory for " - "umc error address record!\n"); - else - err_data->err_addr_len = adev->umc.max_ras_err_cnt_per_query; - - /* umc query_ras_error_address is also responsible for clearing - * error status - */ - adev->umc.ras->ecc_info_query_ras_error_address(adev, ras_error_status); + } else { + if (!amdgpu_ras_eeprom_update_record_num(control)) { + err_data->err_addr_cnt = err_data->de_count = + control->ras_num_recs - control->ras_num_recs_old; + amdgpu_ras_eeprom_read_idx(control, err_data->err_addr, + control->ras_num_recs_old, err_data->de_count); } } @@ -166,7 +196,7 @@ void amdgpu_umc_handle_bad_pages(struct amdgpu_device *adev, if ((amdgpu_bad_page_threshold != 0) && err_data->err_addr_cnt) { amdgpu_ras_add_bad_pages(adev, err_data->err_addr, - err_data->err_addr_cnt, false); + err_data->err_addr_cnt, amdgpu_ras_smu_eeprom_supported(adev)); amdgpu_ras_save_bad_pages(adev, &err_count); amdgpu_dpm_send_hbm_bad_pages_num(adev, @@ -244,6 +274,15 @@ int amdgpu_umc_pasid_poison_handler(struct amdgpu_device *adev, } amdgpu_ras_error_data_fini(&err_data); + } else if (amdgpu_uniras_enabled(adev)) { + struct ras_ih_info ih_info = {0}; + + ih_info.block = block; + ih_info.pasid = pasid; + ih_info.reset = reset; + ih_info.pasid_fn = pasid_fn; + ih_info.data = data; + amdgpu_ras_mgr_handle_consumer_interrupt(adev, &ih_info); } else { struct amdgpu_ras *con = amdgpu_ras_get_context(adev); int ret; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h index ec203f9e5ffa..28dff750c47e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h @@ -113,6 +113,8 @@ struct amdgpu_umc_ras { uint32_t (*get_die_id_from_pa)(struct amdgpu_device *adev, uint64_t mca_addr, uint64_t retired_page); void (*get_retire_flip_bits)(struct amdgpu_device *adev); + void (*mca_ipid_parse)(struct amdgpu_device *adev, uint64_t ipid, + uint32_t *did, uint32_t *ch, uint32_t *umc_inst, uint32_t *sid); }; struct amdgpu_umc_funcs { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.c index 1add21160d21..9a969175900e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.c @@ -25,10 +25,13 @@ #include #include #include +#include #include "amdgpu.h" +#include "amdgpu_reset.h" #include "amdgpu_vm.h" #include "amdgpu_userq.h" +#include "amdgpu_hmm.h" #include "amdgpu_userq_fence.h" u32 amdgpu_userq_get_supported_ip_mask(struct amdgpu_device *adev) @@ -44,10 +47,130 @@ u32 amdgpu_userq_get_supported_ip_mask(struct amdgpu_device *adev) return userq_ip_mask; } -int amdgpu_userq_input_va_validate(struct amdgpu_vm *vm, u64 addr, - u64 expected_size) +static bool amdgpu_userq_is_reset_type_supported(struct amdgpu_device *adev, + enum amdgpu_ring_type ring_type, int reset_type) +{ + + if (ring_type < 0 || ring_type >= AMDGPU_RING_TYPE_MAX) + return false; + + switch (ring_type) { + case AMDGPU_RING_TYPE_GFX: + if (adev->gfx.gfx_supported_reset & reset_type) + return true; + break; + case AMDGPU_RING_TYPE_COMPUTE: + if (adev->gfx.compute_supported_reset & reset_type) + return true; + break; + case AMDGPU_RING_TYPE_SDMA: + if (adev->sdma.supported_reset & reset_type) + return true; + break; + case AMDGPU_RING_TYPE_VCN_DEC: + case AMDGPU_RING_TYPE_VCN_ENC: + if (adev->vcn.supported_reset & reset_type) + return true; + break; + case AMDGPU_RING_TYPE_VCN_JPEG: + if (adev->jpeg.supported_reset & reset_type) + return true; + break; + default: + break; + } + return false; +} + +static void amdgpu_userq_gpu_reset(struct amdgpu_device *adev) +{ + if (amdgpu_device_should_recover_gpu(adev)) { + amdgpu_reset_domain_schedule(adev->reset_domain, + &adev->userq_reset_work); + /* Wait for the reset job to complete */ + flush_work(&adev->userq_reset_work); + } +} + +static int +amdgpu_userq_detect_and_reset_queues(struct amdgpu_userq_mgr *uq_mgr) +{ + struct amdgpu_device *adev = uq_mgr->adev; + const int queue_types[] = { + AMDGPU_RING_TYPE_COMPUTE, + AMDGPU_RING_TYPE_GFX, + AMDGPU_RING_TYPE_SDMA + }; + const int num_queue_types = ARRAY_SIZE(queue_types); + bool gpu_reset = false; + int r = 0; + int i; + + /* Warning if current process mutex is not held */ + WARN_ON(!mutex_is_locked(&uq_mgr->userq_mutex)); + + if (unlikely(adev->debug_disable_gpu_ring_reset)) { + dev_err(adev->dev, "userq reset disabled by debug mask\n"); + return 0; + } + + /* + * If GPU recovery feature is disabled system-wide, + * skip all reset detection logic + */ + if (!amdgpu_gpu_recovery) + return 0; + + /* + * Iterate through all queue types to detect and reset problematic queues + * Process each queue type in the defined order + */ + for (i = 0; i < num_queue_types; i++) { + int ring_type = queue_types[i]; + const struct amdgpu_userq_funcs *funcs = adev->userq_funcs[ring_type]; + + if (!amdgpu_userq_is_reset_type_supported(adev, ring_type, AMDGPU_RESET_TYPE_PER_QUEUE)) + continue; + + if (atomic_read(&uq_mgr->userq_count[ring_type]) > 0 && + funcs && funcs->detect_and_reset) { + r = funcs->detect_and_reset(adev, ring_type); + if (r) { + gpu_reset = true; + break; + } + } + } + + if (gpu_reset) + amdgpu_userq_gpu_reset(adev); + + return r; +} + +static int amdgpu_userq_buffer_va_list_add(struct amdgpu_usermode_queue *queue, + struct amdgpu_bo_va_mapping *va_map, u64 addr) +{ + struct amdgpu_userq_va_cursor *va_cursor; + struct userq_va_list; + + va_cursor = kzalloc(sizeof(*va_cursor), GFP_KERNEL); + if (!va_cursor) + return -ENOMEM; + + INIT_LIST_HEAD(&va_cursor->list); + va_cursor->gpu_addr = addr; + atomic_set(&va_map->bo_va->userq_va_mapped, 1); + list_add(&va_cursor->list, &queue->userq_va_list); + + return 0; +} + +int amdgpu_userq_input_va_validate(struct amdgpu_usermode_queue *queue, + u64 addr, u64 expected_size) { struct amdgpu_bo_va_mapping *va_map; + struct amdgpu_vm *vm = queue->vm; u64 user_addr; u64 size; int r = 0; @@ -67,6 +190,7 @@ int amdgpu_userq_input_va_validate(struct amdgpu_vm *vm, u64 addr, /* Only validate the userq whether resident in the VM mapping range */ if (user_addr >= va_map->start && va_map->last - user_addr + 1 >= size) { + amdgpu_userq_buffer_va_list_add(queue, va_map, user_addr); amdgpu_bo_unreserve(vm->root.bo); return 0; } @@ -77,6 +201,76 @@ int amdgpu_userq_input_va_validate(struct amdgpu_vm *vm, u64 addr, return r; } +static bool amdgpu_userq_buffer_va_mapped(struct amdgpu_vm *vm, u64 addr) +{ + struct amdgpu_bo_va_mapping *mapping; + bool r; + + if (amdgpu_bo_reserve(vm->root.bo, false)) + return false; + + mapping = amdgpu_vm_bo_lookup_mapping(vm, addr); + if (!IS_ERR_OR_NULL(mapping) && atomic_read(&mapping->bo_va->userq_va_mapped)) + r = true; + else + r = false; + amdgpu_bo_unreserve(vm->root.bo); + + return r; +} + +static bool amdgpu_userq_buffer_vas_mapped(struct amdgpu_usermode_queue *queue) +{ + struct amdgpu_userq_va_cursor *va_cursor, *tmp; + int r = 0; + + list_for_each_entry_safe(va_cursor, tmp, &queue->userq_va_list, list) { + r += amdgpu_userq_buffer_va_mapped(queue->vm, va_cursor->gpu_addr); + dev_dbg(queue->userq_mgr->adev->dev, + "validate the userq mapping:%p va:%llx r:%d\n", + queue, va_cursor->gpu_addr, r); + } + + if (r != 0) + return true; + + return false; +} + +static void amdgpu_userq_buffer_va_list_del(struct amdgpu_bo_va_mapping *mapping, + struct amdgpu_userq_va_cursor *va_cursor) +{ + atomic_set(&mapping->bo_va->userq_va_mapped, 0); + list_del(&va_cursor->list); + kfree(va_cursor); +} + +static int amdgpu_userq_buffer_vas_list_cleanup(struct amdgpu_device *adev, + struct amdgpu_usermode_queue *queue) +{ + struct amdgpu_userq_va_cursor *va_cursor, *tmp; + struct amdgpu_bo_va_mapping *mapping; + int r; + + r = amdgpu_bo_reserve(queue->vm->root.bo, false); + if (r) + return r; + + list_for_each_entry_safe(va_cursor, tmp, &queue->userq_va_list, list) { + mapping = amdgpu_vm_bo_lookup_mapping(queue->vm, va_cursor->gpu_addr); + if (!mapping) { + r = -EINVAL; + goto err; + } + dev_dbg(adev->dev, "delete the userq:%p va:%llx\n", + queue, va_cursor->gpu_addr); + amdgpu_userq_buffer_va_list_del(mapping, va_cursor); + } +err: + amdgpu_bo_unreserve(queue->vm->root.bo); + return r; +} + static int amdgpu_userq_preempt_helper(struct amdgpu_userq_mgr *uq_mgr, struct amdgpu_usermode_queue *queue) @@ -84,17 +278,22 @@ amdgpu_userq_preempt_helper(struct amdgpu_userq_mgr *uq_mgr, struct amdgpu_device *adev = uq_mgr->adev; const struct amdgpu_userq_funcs *userq_funcs = adev->userq_funcs[queue->queue_type]; + bool found_hung_queue = false; int r = 0; if (queue->state == AMDGPU_USERQ_STATE_MAPPED) { r = userq_funcs->preempt(uq_mgr, queue); if (r) { queue->state = AMDGPU_USERQ_STATE_HUNG; + found_hung_queue = true; } else { queue->state = AMDGPU_USERQ_STATE_PREEMPTED; } } + if (found_hung_queue) + amdgpu_userq_detect_and_reset_queues(uq_mgr); + return r; } @@ -126,16 +325,23 @@ amdgpu_userq_unmap_helper(struct amdgpu_userq_mgr *uq_mgr, struct amdgpu_device *adev = uq_mgr->adev; const struct amdgpu_userq_funcs *userq_funcs = adev->userq_funcs[queue->queue_type]; + bool found_hung_queue = false; int r = 0; if ((queue->state == AMDGPU_USERQ_STATE_MAPPED) || (queue->state == AMDGPU_USERQ_STATE_PREEMPTED)) { r = userq_funcs->unmap(uq_mgr, queue); - if (r) + if (r) { queue->state = AMDGPU_USERQ_STATE_HUNG; - else + found_hung_queue = true; + } else { queue->state = AMDGPU_USERQ_STATE_UNMAPPED; + } } + + if (found_hung_queue) + amdgpu_userq_detect_and_reset_queues(uq_mgr); + return r; } @@ -152,26 +358,33 @@ amdgpu_userq_map_helper(struct amdgpu_userq_mgr *uq_mgr, r = userq_funcs->map(uq_mgr, queue); if (r) { queue->state = AMDGPU_USERQ_STATE_HUNG; + amdgpu_userq_detect_and_reset_queues(uq_mgr); } else { queue->state = AMDGPU_USERQ_STATE_MAPPED; } } + return r; } -static void +static int amdgpu_userq_wait_for_last_fence(struct amdgpu_userq_mgr *uq_mgr, struct amdgpu_usermode_queue *queue) { struct dma_fence *f = queue->last_fence; - int ret; + int ret = 0; if (f && !dma_fence_is_signaled(f)) { - ret = dma_fence_wait_timeout(f, true, msecs_to_jiffies(100)); - if (ret <= 0) + ret = dma_fence_wait_timeout(f, true, MAX_SCHEDULE_TIMEOUT); + if (ret <= 0) { drm_file_err(uq_mgr->file, "Timed out waiting for fence=%llu:%llu\n", f->context, f->seqno); + queue->state = AMDGPU_USERQ_STATE_HUNG; + return -ETIME; + } } + + return ret; } static void @@ -182,16 +395,27 @@ amdgpu_userq_cleanup(struct amdgpu_userq_mgr *uq_mgr, struct amdgpu_device *adev = uq_mgr->adev; const struct amdgpu_userq_funcs *uq_funcs = adev->userq_funcs[queue->queue_type]; + /* Wait for mode-1 reset to complete */ + down_read(&adev->reset_domain->sem); + + /* Drop the userq reference. */ + amdgpu_userq_buffer_vas_list_cleanup(adev, queue); uq_funcs->mqd_destroy(uq_mgr, queue); amdgpu_userq_fence_driver_free(queue); - idr_remove(&uq_mgr->userq_idr, queue_id); + /* Use interrupt-safe locking since IRQ handlers may access these XArrays */ + xa_erase_irq(&uq_mgr->userq_mgr_xa, (unsigned long)queue_id); + xa_erase_irq(&adev->userq_doorbell_xa, queue->doorbell_index); + queue->userq_mgr = NULL; + list_del(&queue->userq_va_list); kfree(queue); + + up_read(&adev->reset_domain->sem); } static struct amdgpu_usermode_queue * amdgpu_userq_find(struct amdgpu_userq_mgr *uq_mgr, int qid) { - return idr_find(&uq_mgr->userq_idr, qid); + return xa_load(&uq_mgr->userq_mgr_xa, qid); } void @@ -319,17 +543,6 @@ amdgpu_userq_get_doorbell_index(struct amdgpu_userq_mgr *uq_mgr, case AMDGPU_HW_IP_DMA: db_size = sizeof(u64); break; - - case AMDGPU_HW_IP_VCN_ENC: - db_size = sizeof(u32); - db_info->doorbell_offset += AMDGPU_NAVI10_DOORBELL64_VCN0_1 << 1; - break; - - case AMDGPU_HW_IP_VPE: - db_size = sizeof(u32); - db_info->doorbell_offset += AMDGPU_NAVI10_DOORBELL64_VPE << 1; - break; - default: drm_file_err(uq_mgr->file, "[Usermode queues] IP %d not support\n", db_info->queue_type); @@ -378,10 +591,11 @@ amdgpu_userq_destroy(struct drm_file *filp, int queue_id) amdgpu_bo_unreserve(queue->db_obj.obj); } amdgpu_bo_unref(&queue->db_obj.obj); - + atomic_dec(&uq_mgr->userq_count[queue->queue_type]); #if defined(CONFIG_DEBUG_FS) debugfs_remove_recursive(queue->debugfs_queue); #endif + amdgpu_userq_detect_and_reset_queues(uq_mgr); r = amdgpu_userq_unmap_helper(uq_mgr, queue); /*TODO: It requires a reset for userq hw unmap error*/ if (unlikely(r != AMDGPU_USERQ_STATE_UNMAPPED)) { @@ -391,7 +605,6 @@ amdgpu_userq_destroy(struct drm_file *filp, int queue_id) amdgpu_userq_cleanup(uq_mgr, queue, queue_id); mutex_unlock(&uq_mgr->userq_mutex); - pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); return r; @@ -463,8 +676,9 @@ amdgpu_userq_create(struct drm_file *filp, union drm_amdgpu_userq *args) struct amdgpu_db_info db_info; char *queue_name; bool skip_map_queue; + u32 qid; uint64_t index; - int qid, r = 0; + int r = 0; int priority = (args->in.flags & AMDGPU_USERQ_CREATE_FLAGS_QUEUE_PRIORITY_MASK) >> AMDGPU_USERQ_CREATE_FLAGS_QUEUE_PRIORITY_SHIFT; @@ -487,7 +701,6 @@ amdgpu_userq_create(struct drm_file *filp, union drm_amdgpu_userq *args) * * This will also make sure we have a valid eviction fence ready to be used. */ - mutex_lock(&adev->userq_mutex); amdgpu_userq_ensure_ev_fence(&fpriv->userq_mgr, &fpriv->evf_mgr); uq_funcs = adev->userq_funcs[args->in.ip_type]; @@ -505,14 +718,7 @@ amdgpu_userq_create(struct drm_file *filp, union drm_amdgpu_userq *args) goto unlock; } - /* Validate the userq virtual address.*/ - if (amdgpu_userq_input_va_validate(&fpriv->vm, args->in.queue_va, args->in.queue_size) || - amdgpu_userq_input_va_validate(&fpriv->vm, args->in.rptr_va, AMDGPU_GPU_PAGE_SIZE) || - amdgpu_userq_input_va_validate(&fpriv->vm, args->in.wptr_va, AMDGPU_GPU_PAGE_SIZE)) { - r = -EINVAL; - kfree(queue); - goto unlock; - } + INIT_LIST_HEAD(&queue->userq_va_list); queue->doorbell_handle = args->in.doorbell_handle; queue->queue_type = args->in.ip_type; queue->vm = &fpriv->vm; @@ -523,6 +729,15 @@ amdgpu_userq_create(struct drm_file *filp, union drm_amdgpu_userq *args) db_info.db_obj = &queue->db_obj; db_info.doorbell_offset = args->in.doorbell_offset; + /* Validate the userq virtual address.*/ + if (amdgpu_userq_input_va_validate(queue, args->in.queue_va, args->in.queue_size) || + amdgpu_userq_input_va_validate(queue, args->in.rptr_va, AMDGPU_GPU_PAGE_SIZE) || + amdgpu_userq_input_va_validate(queue, args->in.wptr_va, AMDGPU_GPU_PAGE_SIZE)) { + r = -EINVAL; + kfree(queue); + goto unlock; + } + /* Convert relative doorbell offset into absolute doorbell index */ index = amdgpu_userq_get_doorbell_index(uq_mgr, &db_info, filp); if (index == (uint64_t)-EINVAL) { @@ -548,16 +763,27 @@ amdgpu_userq_create(struct drm_file *filp, union drm_amdgpu_userq *args) goto unlock; } + /* Wait for mode-1 reset to complete */ + down_read(&adev->reset_domain->sem); + r = xa_err(xa_store_irq(&adev->userq_doorbell_xa, index, queue, GFP_KERNEL)); + if (r) { + kfree(queue); + up_read(&adev->reset_domain->sem); + goto unlock; + } - qid = idr_alloc(&uq_mgr->userq_idr, queue, 1, AMDGPU_MAX_USERQ_COUNT, GFP_KERNEL); - if (qid < 0) { + r = xa_alloc(&uq_mgr->userq_mgr_xa, &qid, queue, XA_LIMIT(1, AMDGPU_MAX_USERQ_COUNT), GFP_KERNEL); + if (r) { drm_file_err(uq_mgr->file, "Failed to allocate a queue id\n"); amdgpu_userq_fence_driver_free(queue); uq_funcs->mqd_destroy(uq_mgr, queue); kfree(queue); r = -ENOMEM; + up_read(&adev->reset_domain->sem); goto unlock; } + up_read(&adev->reset_domain->sem); + queue->userq_mgr = uq_mgr; /* don't map the queue if scheduling is halted */ if (adev->userq_halt_for_enforce_isolation && @@ -570,7 +796,7 @@ amdgpu_userq_create(struct drm_file *filp, union drm_amdgpu_userq *args) r = amdgpu_userq_map_helper(uq_mgr, queue); if (r) { drm_file_err(uq_mgr->file, "Failed to map Queue\n"); - idr_remove(&uq_mgr->userq_idr, qid); + xa_erase(&uq_mgr->userq_mgr_xa, qid); amdgpu_userq_fence_driver_free(queue); uq_funcs->mqd_destroy(uq_mgr, queue); kfree(queue); @@ -592,10 +818,10 @@ amdgpu_userq_create(struct drm_file *filp, union drm_amdgpu_userq *args) kfree(queue_name); args->out.queue_id = qid; + atomic_inc(&uq_mgr->userq_count[queue->queue_type]); unlock: mutex_unlock(&uq_mgr->userq_mutex); - mutex_unlock(&adev->userq_mutex); return r; } @@ -693,11 +919,19 @@ static int amdgpu_userq_restore_all(struct amdgpu_userq_mgr *uq_mgr) { struct amdgpu_usermode_queue *queue; - int queue_id; + unsigned long queue_id; int ret = 0, r; /* Resume all the queues for this process */ - idr_for_each_entry(&uq_mgr->userq_idr, queue, queue_id) { + xa_for_each(&uq_mgr->userq_mgr_xa, queue_id, queue) { + + if (!amdgpu_userq_buffer_vas_mapped(queue)) { + drm_file_err(uq_mgr->file, + "trying restore queue without va mapping\n"); + queue->state = AMDGPU_USERQ_STATE_INVALID_VA; + continue; + } + r = amdgpu_userq_restore_helper(uq_mgr, queue); if (r) ret = r; @@ -760,12 +994,21 @@ static int amdgpu_userq_vm_validate(struct amdgpu_userq_mgr *uq_mgr) { struct amdgpu_fpriv *fpriv = uq_mgr_to_fpriv(uq_mgr); + bool invalidated = false, new_addition = false; + struct ttm_operation_ctx ctx = { true, false }; struct amdgpu_device *adev = uq_mgr->adev; + struct amdgpu_hmm_range *range; struct amdgpu_vm *vm = &fpriv->vm; + unsigned long key, tmp_key; struct amdgpu_bo_va *bo_va; + struct amdgpu_bo *bo; struct drm_exec exec; + struct xarray xa; int ret; + xa_init(&xa); + +retry_lock: drm_exec_init(&exec, DRM_EXEC_IGNORE_DUPLICATES, 0); drm_exec_until_all_locked(&exec) { ret = amdgpu_vm_lock_pd(vm, &exec, 1); @@ -792,10 +1035,74 @@ amdgpu_userq_vm_validate(struct amdgpu_userq_mgr *uq_mgr) goto unlock_all; } + if (invalidated) { + xa_for_each(&xa, tmp_key, range) { + bo = range->bo; + amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_CPU); + ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); + if (ret) + goto unlock_all; + + amdgpu_ttm_tt_set_user_pages(bo->tbo.ttm, range); + + amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_GTT); + ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); + if (ret) + goto unlock_all; + } + invalidated = false; + } + ret = amdgpu_vm_handle_moved(adev, vm, NULL); if (ret) goto unlock_all; + key = 0; + /* Validate User Ptr BOs */ + list_for_each_entry(bo_va, &vm->done, base.vm_status) { + bo = bo_va->base.bo; + if (!bo) + continue; + + if (!amdgpu_ttm_tt_is_userptr(bo->tbo.ttm)) + continue; + + range = xa_load(&xa, key); + if (range && range->bo != bo) { + xa_erase(&xa, key); + amdgpu_hmm_range_free(range); + range = NULL; + } + + if (!range) { + range = amdgpu_hmm_range_alloc(bo); + if (!range) { + ret = -ENOMEM; + goto unlock_all; + } + + xa_store(&xa, key, range, GFP_KERNEL); + new_addition = true; + } + key++; + } + + if (new_addition) { + drm_exec_fini(&exec); + xa_for_each(&xa, tmp_key, range) { + if (!range) + continue; + bo = range->bo; + ret = amdgpu_ttm_tt_get_user_pages(bo, range); + if (ret) + goto unlock_all; + } + + invalidated = true; + new_addition = false; + goto retry_lock; + } + ret = amdgpu_vm_update_pdes(adev, vm, false); if (ret) goto unlock_all; @@ -815,6 +1122,13 @@ amdgpu_userq_vm_validate(struct amdgpu_userq_mgr *uq_mgr) unlock_all: drm_exec_fini(&exec); + xa_for_each(&xa, tmp_key, range) { + if (!range) + continue; + bo = range->bo; + amdgpu_hmm_range_free(range); + } + xa_destroy(&xa); return ret; } @@ -848,11 +1162,12 @@ static int amdgpu_userq_evict_all(struct amdgpu_userq_mgr *uq_mgr) { struct amdgpu_usermode_queue *queue; - int queue_id; + unsigned long queue_id; int ret = 0, r; + amdgpu_userq_detect_and_reset_queues(uq_mgr); /* Try to unmap all the queues in this process ctx */ - idr_for_each_entry(&uq_mgr->userq_idr, queue, queue_id) { + xa_for_each(&uq_mgr->userq_mgr_xa, queue_id, queue) { r = amdgpu_userq_preempt_helper(uq_mgr, queue); if (r) ret = r; @@ -863,13 +1178,31 @@ amdgpu_userq_evict_all(struct amdgpu_userq_mgr *uq_mgr) return ret; } +void amdgpu_userq_reset_work(struct work_struct *work) +{ + struct amdgpu_device *adev = container_of(work, struct amdgpu_device, + userq_reset_work); + struct amdgpu_reset_context reset_context; + + memset(&reset_context, 0, sizeof(reset_context)); + + reset_context.method = AMD_RESET_METHOD_NONE; + reset_context.reset_req_dev = adev; + reset_context.src = AMDGPU_RESET_SRC_USERQ; + set_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags); + /*set_bit(AMDGPU_SKIP_COREDUMP, &reset_context.flags);*/ + + amdgpu_device_gpu_recover(adev, NULL, &reset_context); +} + static int amdgpu_userq_wait_for_signal(struct amdgpu_userq_mgr *uq_mgr) { struct amdgpu_usermode_queue *queue; - int queue_id, ret; + unsigned long queue_id; + int ret; - idr_for_each_entry(&uq_mgr->userq_idr, queue, queue_id) { + xa_for_each(&uq_mgr->userq_mgr_xa, queue_id, queue) { struct dma_fence *f = queue->last_fence; if (!f || dma_fence_is_signaled(f)) @@ -889,22 +1222,19 @@ void amdgpu_userq_evict(struct amdgpu_userq_mgr *uq_mgr, struct amdgpu_eviction_fence *ev_fence) { - int ret; struct amdgpu_fpriv *fpriv = uq_mgr_to_fpriv(uq_mgr); struct amdgpu_eviction_fence_mgr *evf_mgr = &fpriv->evf_mgr; + struct amdgpu_device *adev = uq_mgr->adev; + int ret; /* Wait for any pending userqueue fence work to finish */ ret = amdgpu_userq_wait_for_signal(uq_mgr); - if (ret) { - drm_file_err(uq_mgr->file, "Not evicting userqueue, timeout waiting for work\n"); - return; - } + if (ret) + dev_err(adev->dev, "Not evicting userqueue, timeout waiting for work\n"); ret = amdgpu_userq_evict_all(uq_mgr); - if (ret) { - drm_file_err(uq_mgr->file, "Failed to evict userqueue\n"); - return; - } + if (ret) + dev_err(adev->dev, "Failed to evict userqueue\n"); /* Signal current eviction fence */ amdgpu_eviction_fence_signal(evf_mgr, ev_fence); @@ -922,44 +1252,31 @@ int amdgpu_userq_mgr_init(struct amdgpu_userq_mgr *userq_mgr, struct drm_file *f struct amdgpu_device *adev) { mutex_init(&userq_mgr->userq_mutex); - idr_init_base(&userq_mgr->userq_idr, 1); + xa_init_flags(&userq_mgr->userq_mgr_xa, XA_FLAGS_ALLOC); userq_mgr->adev = adev; userq_mgr->file = file_priv; - mutex_lock(&adev->userq_mutex); - list_add(&userq_mgr->list, &adev->userq_mgr_list); - mutex_unlock(&adev->userq_mutex); - INIT_DELAYED_WORK(&userq_mgr->resume_work, amdgpu_userq_restore_worker); return 0; } void amdgpu_userq_mgr_fini(struct amdgpu_userq_mgr *userq_mgr) { - struct amdgpu_device *adev = userq_mgr->adev; struct amdgpu_usermode_queue *queue; - struct amdgpu_userq_mgr *uqm, *tmp; - uint32_t queue_id; + unsigned long queue_id; cancel_delayed_work_sync(&userq_mgr->resume_work); - mutex_lock(&adev->userq_mutex); mutex_lock(&userq_mgr->userq_mutex); - idr_for_each_entry(&userq_mgr->userq_idr, queue, queue_id) { + amdgpu_userq_detect_and_reset_queues(userq_mgr); + xa_for_each(&userq_mgr->userq_mgr_xa, queue_id, queue) { amdgpu_userq_wait_for_last_fence(userq_mgr, queue); amdgpu_userq_unmap_helper(userq_mgr, queue); amdgpu_userq_cleanup(userq_mgr, queue, queue_id); } - list_for_each_entry_safe(uqm, tmp, &adev->userq_mgr_list, list) { - if (uqm == userq_mgr) { - list_del(&uqm->list); - break; - } - } - idr_destroy(&userq_mgr->userq_idr); + xa_destroy(&userq_mgr->userq_mgr_xa); mutex_unlock(&userq_mgr->userq_mutex); - mutex_unlock(&adev->userq_mutex); mutex_destroy(&userq_mgr->userq_mutex); } @@ -967,57 +1284,51 @@ int amdgpu_userq_suspend(struct amdgpu_device *adev) { u32 ip_mask = amdgpu_userq_get_supported_ip_mask(adev); struct amdgpu_usermode_queue *queue; - struct amdgpu_userq_mgr *uqm, *tmp; - int queue_id; - int ret = 0, r; + struct amdgpu_userq_mgr *uqm; + unsigned long queue_id; + int r; if (!ip_mask) return 0; - mutex_lock(&adev->userq_mutex); - list_for_each_entry_safe(uqm, tmp, &adev->userq_mgr_list, list) { + xa_for_each(&adev->userq_doorbell_xa, queue_id, queue) { + uqm = queue->userq_mgr; cancel_delayed_work_sync(&uqm->resume_work); - mutex_lock(&uqm->userq_mutex); - idr_for_each_entry(&uqm->userq_idr, queue, queue_id) { - if (adev->in_s0ix) - r = amdgpu_userq_preempt_helper(uqm, queue); - else - r = amdgpu_userq_unmap_helper(uqm, queue); - if (r) - ret = r; - } - mutex_unlock(&uqm->userq_mutex); + guard(mutex)(&uqm->userq_mutex); + amdgpu_userq_detect_and_reset_queues(uqm); + if (adev->in_s0ix) + r = amdgpu_userq_preempt_helper(uqm, queue); + else + r = amdgpu_userq_unmap_helper(uqm, queue); + if (r) + return r; } - mutex_unlock(&adev->userq_mutex); - return ret; + return 0; } int amdgpu_userq_resume(struct amdgpu_device *adev) { u32 ip_mask = amdgpu_userq_get_supported_ip_mask(adev); struct amdgpu_usermode_queue *queue; - struct amdgpu_userq_mgr *uqm, *tmp; - int queue_id; - int ret = 0, r; + struct amdgpu_userq_mgr *uqm; + unsigned long queue_id; + int r; if (!ip_mask) return 0; - mutex_lock(&adev->userq_mutex); - list_for_each_entry_safe(uqm, tmp, &adev->userq_mgr_list, list) { - mutex_lock(&uqm->userq_mutex); - idr_for_each_entry(&uqm->userq_idr, queue, queue_id) { - if (adev->in_s0ix) - r = amdgpu_userq_restore_helper(uqm, queue); - else - r = amdgpu_userq_map_helper(uqm, queue); - if (r) - ret = r; - } - mutex_unlock(&uqm->userq_mutex); + xa_for_each(&adev->userq_doorbell_xa, queue_id, queue) { + uqm = queue->userq_mgr; + guard(mutex)(&uqm->userq_mutex); + if (adev->in_s0ix) + r = amdgpu_userq_restore_helper(uqm, queue); + else + r = amdgpu_userq_map_helper(uqm, queue); + if (r) + return r; } - mutex_unlock(&adev->userq_mutex); - return ret; + + return 0; } int amdgpu_userq_stop_sched_for_enforce_isolation(struct amdgpu_device *adev, @@ -1025,33 +1336,32 @@ int amdgpu_userq_stop_sched_for_enforce_isolation(struct amdgpu_device *adev, { u32 ip_mask = amdgpu_userq_get_supported_ip_mask(adev); struct amdgpu_usermode_queue *queue; - struct amdgpu_userq_mgr *uqm, *tmp; - int queue_id; + struct amdgpu_userq_mgr *uqm; + unsigned long queue_id; int ret = 0, r; /* only need to stop gfx/compute */ if (!(ip_mask & ((1 << AMDGPU_HW_IP_GFX) | (1 << AMDGPU_HW_IP_COMPUTE)))) return 0; - mutex_lock(&adev->userq_mutex); if (adev->userq_halt_for_enforce_isolation) dev_warn(adev->dev, "userq scheduling already stopped!\n"); adev->userq_halt_for_enforce_isolation = true; - list_for_each_entry_safe(uqm, tmp, &adev->userq_mgr_list, list) { + xa_for_each(&adev->userq_doorbell_xa, queue_id, queue) { + uqm = queue->userq_mgr; cancel_delayed_work_sync(&uqm->resume_work); mutex_lock(&uqm->userq_mutex); - idr_for_each_entry(&uqm->userq_idr, queue, queue_id) { - if (((queue->queue_type == AMDGPU_HW_IP_GFX) || - (queue->queue_type == AMDGPU_HW_IP_COMPUTE)) && - (queue->xcp_id == idx)) { - r = amdgpu_userq_preempt_helper(uqm, queue); - if (r) - ret = r; - } + if (((queue->queue_type == AMDGPU_HW_IP_GFX) || + (queue->queue_type == AMDGPU_HW_IP_COMPUTE)) && + (queue->xcp_id == idx)) { + amdgpu_userq_detect_and_reset_queues(uqm); + r = amdgpu_userq_preempt_helper(uqm, queue); + if (r) + ret = r; } mutex_unlock(&uqm->userq_mutex); } - mutex_unlock(&adev->userq_mutex); + return ret; } @@ -1060,21 +1370,20 @@ int amdgpu_userq_start_sched_for_enforce_isolation(struct amdgpu_device *adev, { u32 ip_mask = amdgpu_userq_get_supported_ip_mask(adev); struct amdgpu_usermode_queue *queue; - struct amdgpu_userq_mgr *uqm, *tmp; - int queue_id; + struct amdgpu_userq_mgr *uqm; + unsigned long queue_id; int ret = 0, r; /* only need to stop gfx/compute */ if (!(ip_mask & ((1 << AMDGPU_HW_IP_GFX) | (1 << AMDGPU_HW_IP_COMPUTE)))) return 0; - mutex_lock(&adev->userq_mutex); if (!adev->userq_halt_for_enforce_isolation) dev_warn(adev->dev, "userq scheduling already started!\n"); adev->userq_halt_for_enforce_isolation = false; - list_for_each_entry_safe(uqm, tmp, &adev->userq_mgr_list, list) { + xa_for_each(&adev->userq_doorbell_xa, queue_id, queue) { + uqm = queue->userq_mgr; mutex_lock(&uqm->userq_mutex); - idr_for_each_entry(&uqm->userq_idr, queue, queue_id) { if (((queue->queue_type == AMDGPU_HW_IP_GFX) || (queue->queue_type == AMDGPU_HW_IP_COMPUTE)) && (queue->xcp_id == idx)) { @@ -1082,9 +1391,92 @@ int amdgpu_userq_start_sched_for_enforce_isolation(struct amdgpu_device *adev, if (r) ret = r; } - } mutex_unlock(&uqm->userq_mutex); } - mutex_unlock(&adev->userq_mutex); + return ret; } + +int amdgpu_userq_gem_va_unmap_validate(struct amdgpu_device *adev, + struct amdgpu_bo_va_mapping *mapping, + uint64_t saddr) +{ + u32 ip_mask = amdgpu_userq_get_supported_ip_mask(adev); + struct amdgpu_bo_va *bo_va = mapping->bo_va; + struct dma_resv *resv = bo_va->base.bo->tbo.base.resv; + int ret = 0; + + if (!ip_mask) + return 0; + + dev_warn_once(adev->dev, "now unmapping a vital queue va:%llx\n", saddr); + /** + * The userq VA mapping reservation should include the eviction fence, + * if the eviction fence can't signal successfully during unmapping, + * then driver will warn to flag this improper unmap of the userq VA. + * Note: The eviction fence may be attached to different BOs, and this + * unmap is only for one kind of userq VAs, so at this point suppose + * the eviction fence is always unsignaled. + */ + if (!dma_resv_test_signaled(resv, DMA_RESV_USAGE_BOOKKEEP)) { + ret = dma_resv_wait_timeout(resv, DMA_RESV_USAGE_BOOKKEEP, true, + MAX_SCHEDULE_TIMEOUT); + if (ret <= 0) + return -EBUSY; + } + + return 0; +} + +void amdgpu_userq_pre_reset(struct amdgpu_device *adev) +{ + const struct amdgpu_userq_funcs *userq_funcs; + struct amdgpu_usermode_queue *queue; + struct amdgpu_userq_mgr *uqm; + unsigned long queue_id; + + xa_for_each(&adev->userq_doorbell_xa, queue_id, queue) { + uqm = queue->userq_mgr; + cancel_delayed_work_sync(&uqm->resume_work); + if (queue->state == AMDGPU_USERQ_STATE_MAPPED) { + amdgpu_userq_wait_for_last_fence(uqm, queue); + userq_funcs = adev->userq_funcs[queue->queue_type]; + userq_funcs->unmap(uqm, queue); + /* just mark all queues as hung at this point. + * if unmap succeeds, we could map again + * in amdgpu_userq_post_reset() if vram is not lost + */ + queue->state = AMDGPU_USERQ_STATE_HUNG; + amdgpu_userq_fence_driver_force_completion(queue); + } + } +} + +int amdgpu_userq_post_reset(struct amdgpu_device *adev, bool vram_lost) +{ + /* if any queue state is AMDGPU_USERQ_STATE_UNMAPPED + * at this point, we should be able to map it again + * and continue if vram is not lost. + */ + struct amdgpu_userq_mgr *uqm; + struct amdgpu_usermode_queue *queue; + const struct amdgpu_userq_funcs *userq_funcs; + unsigned long queue_id; + int r = 0; + + xa_for_each(&adev->userq_doorbell_xa, queue_id, queue) { + uqm = queue->userq_mgr; + if (queue->state == AMDGPU_USERQ_STATE_HUNG && !vram_lost) { + userq_funcs = adev->userq_funcs[queue->queue_type]; + /* Re-map queue */ + r = userq_funcs->map(uqm, queue); + if (r) { + dev_err(adev->dev, "Failed to remap queue %ld\n", queue_id); + continue; + } + queue->state = AMDGPU_USERQ_STATE_MAPPED; + } + } + + return r; +} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.h index c027dd916672..c37444427a14 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.h @@ -37,6 +37,7 @@ enum amdgpu_userq_state { AMDGPU_USERQ_STATE_MAPPED, AMDGPU_USERQ_STATE_PREEMPTED, AMDGPU_USERQ_STATE_HUNG, + AMDGPU_USERQ_STATE_INVALID_VA, }; struct amdgpu_mqd_prop; @@ -47,6 +48,11 @@ struct amdgpu_userq_obj { struct amdgpu_bo *obj; }; +struct amdgpu_userq_va_cursor { + u64 gpu_addr; + struct list_head list; +}; + struct amdgpu_usermode_queue { int queue_type; enum amdgpu_userq_state state; @@ -66,6 +72,8 @@ struct amdgpu_usermode_queue { u32 xcp_id; int priority; struct dentry *debugfs_queue; + + struct list_head userq_va_list; }; struct amdgpu_userq_funcs { @@ -88,12 +96,17 @@ struct amdgpu_userq_funcs { /* Usermode queues for gfx */ struct amdgpu_userq_mgr { - struct idr userq_idr; + /** + * @userq_mgr_xa: Per-process user queue map (queue ID → queue) + * Key: queue_id (unique ID within the process's userq manager) + * Value: struct amdgpu_usermode_queue + */ + struct xarray userq_mgr_xa; struct mutex userq_mutex; struct amdgpu_device *adev; struct delayed_work resume_work; - struct list_head list; struct drm_file *file; + atomic_t userq_count[AMDGPU_RING_TYPE_MAX]; }; struct amdgpu_db_info { @@ -136,7 +149,13 @@ int amdgpu_userq_stop_sched_for_enforce_isolation(struct amdgpu_device *adev, u32 idx); int amdgpu_userq_start_sched_for_enforce_isolation(struct amdgpu_device *adev, u32 idx); +void amdgpu_userq_reset_work(struct work_struct *work); +void amdgpu_userq_pre_reset(struct amdgpu_device *adev); +int amdgpu_userq_post_reset(struct amdgpu_device *adev, bool vram_lost); -int amdgpu_userq_input_va_validate(struct amdgpu_vm *vm, u64 addr, - u64 expected_size); +int amdgpu_userq_input_va_validate(struct amdgpu_usermode_queue *queue, + u64 addr, u64 expected_size); +int amdgpu_userq_gem_va_unmap_validate(struct amdgpu_device *adev, + struct amdgpu_bo_va_mapping *mapping, + uint64_t saddr); #endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_userq_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_userq_fence.c index 4d0096d0baa9..eba9fb359047 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_userq_fence.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_userq_fence.c @@ -387,6 +387,7 @@ static int amdgpu_userq_fence_read_wptr(struct amdgpu_usermode_queue *queue, amdgpu_bo_unreserve(queue->vm->root.bo); r = amdgpu_bo_reserve(bo, true); if (r) { + amdgpu_bo_unref(&bo); DRM_ERROR("Failed to reserve userqueue wptr bo"); return r; } @@ -538,7 +539,7 @@ int amdgpu_userq_signal_ioctl(struct drm_device *dev, void *data, } /* Retrieve the user queue */ - queue = idr_find(&userq_mgr->userq_idr, args->queue_id); + queue = xa_load(&userq_mgr->userq_mgr_xa, args->queue_id); if (!queue) { r = -ENOENT; goto put_gobj_write; @@ -900,7 +901,7 @@ int amdgpu_userq_wait_ioctl(struct drm_device *dev, void *data, */ num_fences = dma_fence_dedup_array(fences, num_fences); - waitq = idr_find(&userq_mgr->userq_idr, wait_info->waitq_id); + waitq = xa_load(&userq_mgr->userq_mgr_xa, wait_info->waitq_id); if (!waitq) { r = -EINVAL; goto free_fences; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c index ce318f5de047..a7d8f1ce6ac2 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c @@ -41,6 +41,9 @@ #define VCE_IDLE_TIMEOUT msecs_to_jiffies(1000) /* Firmware Names */ +#ifdef CONFIG_DRM_AMDGPU_SI +#define FIRMWARE_VCE_V1_0 "amdgpu/vce_1_0_0.bin" +#endif #ifdef CONFIG_DRM_AMDGPU_CIK #define FIRMWARE_BONAIRE "amdgpu/bonaire_vce.bin" #define FIRMWARE_KABINI "amdgpu/kabini_vce.bin" @@ -61,6 +64,9 @@ #define FIRMWARE_VEGA12 "amdgpu/vega12_vce.bin" #define FIRMWARE_VEGA20 "amdgpu/vega20_vce.bin" +#ifdef CONFIG_DRM_AMDGPU_SI +MODULE_FIRMWARE(FIRMWARE_VCE_V1_0); +#endif #ifdef CONFIG_DRM_AMDGPU_CIK MODULE_FIRMWARE(FIRMWARE_BONAIRE); MODULE_FIRMWARE(FIRMWARE_KABINI); @@ -88,82 +94,93 @@ static int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle, bool direct, struct dma_fence **fence); /** - * amdgpu_vce_sw_init - allocate memory, load vce firmware + * amdgpu_vce_firmware_name() - determine the firmware file name for VCE * * @adev: amdgpu_device pointer - * @size: size for the new BO * - * First step to get VCE online, allocate memory and load the firmware + * Each chip that has VCE IP may need a different firmware. + * This function returns the name of the VCE firmware file + * appropriate for the current chip. */ -int amdgpu_vce_sw_init(struct amdgpu_device *adev, unsigned long size) +static const char *amdgpu_vce_firmware_name(struct amdgpu_device *adev) { - const char *fw_name; - const struct common_firmware_header *hdr; - unsigned int ucode_version, version_major, version_minor, binary_id; - int i, r; - switch (adev->asic_type) { +#ifdef CONFIG_DRM_AMDGPU_SI + case CHIP_PITCAIRN: + case CHIP_TAHITI: + case CHIP_VERDE: + return FIRMWARE_VCE_V1_0; +#endif #ifdef CONFIG_DRM_AMDGPU_CIK case CHIP_BONAIRE: - fw_name = FIRMWARE_BONAIRE; - break; + return FIRMWARE_BONAIRE; case CHIP_KAVERI: - fw_name = FIRMWARE_KAVERI; - break; + return FIRMWARE_KAVERI; case CHIP_KABINI: - fw_name = FIRMWARE_KABINI; - break; + return FIRMWARE_KABINI; case CHIP_HAWAII: - fw_name = FIRMWARE_HAWAII; - break; + return FIRMWARE_HAWAII; case CHIP_MULLINS: - fw_name = FIRMWARE_MULLINS; - break; + return FIRMWARE_MULLINS; #endif case CHIP_TONGA: - fw_name = FIRMWARE_TONGA; - break; + return FIRMWARE_TONGA; case CHIP_CARRIZO: - fw_name = FIRMWARE_CARRIZO; - break; + return FIRMWARE_CARRIZO; case CHIP_FIJI: - fw_name = FIRMWARE_FIJI; - break; + return FIRMWARE_FIJI; case CHIP_STONEY: - fw_name = FIRMWARE_STONEY; - break; + return FIRMWARE_STONEY; case CHIP_POLARIS10: - fw_name = FIRMWARE_POLARIS10; - break; + return FIRMWARE_POLARIS10; case CHIP_POLARIS11: - fw_name = FIRMWARE_POLARIS11; - break; + return FIRMWARE_POLARIS11; case CHIP_POLARIS12: - fw_name = FIRMWARE_POLARIS12; - break; + return FIRMWARE_POLARIS12; case CHIP_VEGAM: - fw_name = FIRMWARE_VEGAM; - break; + return FIRMWARE_VEGAM; case CHIP_VEGA10: - fw_name = FIRMWARE_VEGA10; - break; + return FIRMWARE_VEGA10; case CHIP_VEGA12: - fw_name = FIRMWARE_VEGA12; - break; + return FIRMWARE_VEGA12; case CHIP_VEGA20: - fw_name = FIRMWARE_VEGA20; - break; + return FIRMWARE_VEGA20; default: - return -EINVAL; + return NULL; } +} + +/** + * amdgpu_vce_early_init() - try to load VCE firmware + * + * @adev: amdgpu_device pointer + * + * Tries to load the VCE firmware. + * + * When not found, returns ENOENT so that the driver can + * still load and initialize the rest of the IP blocks. + * The GPU can function just fine without VCE, they will just + * not support video encoding. + */ +int amdgpu_vce_early_init(struct amdgpu_device *adev) +{ + const char *fw_name = amdgpu_vce_firmware_name(adev); + const struct common_firmware_header *hdr; + unsigned int ucode_version, version_major, version_minor, binary_id; + int r; + + if (!fw_name) + return -ENOENT; r = amdgpu_ucode_request(adev, &adev->vce.fw, AMDGPU_UCODE_REQUIRED, "%s", fw_name); if (r) { - dev_err(adev->dev, "amdgpu_vce: Can't validate firmware \"%s\"\n", - fw_name); + dev_err(adev->dev, + "amdgpu_vce: Firmware \"%s\" not found or failed to validate (%d)\n", + fw_name, r); + amdgpu_ucode_release(&adev->vce.fw); - return r; + return -ENOENT; } hdr = (const struct common_firmware_header *)adev->vce.fw->data; @@ -172,11 +189,35 @@ int amdgpu_vce_sw_init(struct amdgpu_device *adev, unsigned long size) version_major = (ucode_version >> 20) & 0xfff; version_minor = (ucode_version >> 8) & 0xfff; binary_id = ucode_version & 0xff; - DRM_INFO("Found VCE firmware Version: %d.%d Binary ID: %d\n", + dev_info(adev->dev, "Found VCE firmware Version: %d.%d Binary ID: %d\n", version_major, version_minor, binary_id); adev->vce.fw_version = ((version_major << 24) | (version_minor << 16) | (binary_id << 8)); + return 0; +} + +/** + * amdgpu_vce_sw_init() - allocate memory for VCE BO + * + * @adev: amdgpu_device pointer + * @size: size for the new BO + * + * First step to get VCE online: allocate memory for VCE BO. + * The VCE firmware binary is copied into the VCE BO later, + * in amdgpu_vce_resume. The VCE executes its code from the + * VCE BO and also uses the space in this BO for its stack and data. + * + * Ideally this BO should be placed in VRAM for optimal performance, + * although technically it also runs from system RAM (albeit slowly). + */ +int amdgpu_vce_sw_init(struct amdgpu_device *adev, unsigned long size) +{ + int i, r; + + if (!adev->vce.fw) + return -ENOENT; + r = amdgpu_bo_create_kernel(adev, size, PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM | AMDGPU_GEM_DOMAIN_GTT, @@ -285,40 +326,23 @@ int amdgpu_vce_suspend(struct amdgpu_device *adev) */ int amdgpu_vce_resume(struct amdgpu_device *adev) { - void *cpu_addr; const struct common_firmware_header *hdr; unsigned int offset; - int r, idx; + int idx; if (adev->vce.vcpu_bo == NULL) return -EINVAL; - r = amdgpu_bo_reserve(adev->vce.vcpu_bo, false); - if (r) { - dev_err(adev->dev, "(%d) failed to reserve VCE bo\n", r); - return r; - } - - r = amdgpu_bo_kmap(adev->vce.vcpu_bo, &cpu_addr); - if (r) { - amdgpu_bo_unreserve(adev->vce.vcpu_bo); - dev_err(adev->dev, "(%d) VCE map failed\n", r); - return r; - } - hdr = (const struct common_firmware_header *)adev->vce.fw->data; offset = le32_to_cpu(hdr->ucode_array_offset_bytes); if (drm_dev_enter(adev_to_drm(adev), &idx)) { - memcpy_toio(cpu_addr, adev->vce.fw->data + offset, + memset_io(adev->vce.cpu_addr, 0, amdgpu_bo_size(adev->vce.vcpu_bo)); + memcpy_toio(adev->vce.cpu_addr, adev->vce.fw->data + offset, adev->vce.fw->size - offset); drm_dev_exit(idx); } - amdgpu_bo_kunmap(adev->vce.vcpu_bo); - - amdgpu_bo_unreserve(adev->vce.vcpu_bo); - return 0; } @@ -426,6 +450,24 @@ void amdgpu_vce_free_handles(struct amdgpu_device *adev, struct drm_file *filp) } } +/** + * amdgpu_vce_required_gart_pages() - gets number of GART pages required by VCE + * + * @adev: amdgpu_device pointer + * + * Returns how many GART pages we need before GTT for the VCE IP block. + * For VCE1, see vce_v1_0_ensure_vcpu_bo_32bit_addr for details. + * For VCE2+, this is not needed so return zero. + */ +u32 amdgpu_vce_required_gart_pages(struct amdgpu_device *adev) +{ + /* VCE IP block not added yet, so can't use amdgpu_ip_version */ + if (adev->family == AMDGPU_FAMILY_SI) + return 512; + + return 0; +} + /** * amdgpu_vce_get_create_msg - generate a VCE create msg * diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h index 6e53f872d084..1c3464ce5037 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h @@ -51,14 +51,17 @@ struct amdgpu_vce { struct drm_sched_entity entity; uint32_t srbm_soft_reset; unsigned num_rings; + uint32_t keyselect; }; +int amdgpu_vce_early_init(struct amdgpu_device *adev); int amdgpu_vce_sw_init(struct amdgpu_device *adev, unsigned long size); int amdgpu_vce_sw_fini(struct amdgpu_device *adev); int amdgpu_vce_entity_init(struct amdgpu_device *adev, struct amdgpu_ring *ring); int amdgpu_vce_suspend(struct amdgpu_device *adev); int amdgpu_vce_resume(struct amdgpu_device *adev); void amdgpu_vce_free_handles(struct amdgpu_device *adev, struct drm_file *filp); +u32 amdgpu_vce_required_gart_pages(struct amdgpu_device *adev); int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser *p, struct amdgpu_job *job, struct amdgpu_ib *ib); int amdgpu_vce_ring_parse_cs_vm(struct amdgpu_cs_parser *p, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h index dc8a17bcc3c8..82624b44e661 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h @@ -100,7 +100,8 @@ #define SOC15_DPG_MODE_OFFSET(ip, inst_idx, reg) \ ({ \ - uint32_t internal_reg_offset, addr; \ + /* To avoid a -Wunused-but-set-variable warning. */ \ + uint32_t internal_reg_offset __maybe_unused, addr; \ bool video_range, video1_range, aon_range, aon1_range; \ \ addr = (adev->reg_offset[ip##_HWIP][inst_idx][reg##_BASE_IDX] + reg); \ @@ -161,7 +162,8 @@ #define SOC24_DPG_MODE_OFFSET(ip, inst_idx, reg) \ ({ \ - uint32_t internal_reg_offset, addr; \ + /* To avoid a -Wunused-but-set-variable warning. */ \ + uint32_t internal_reg_offset __maybe_unused, addr; \ bool video_range, video1_range, aon_range, aon1_range; \ \ addr = (adev->reg_offset[ip##_HWIP][inst_idx][reg##_BASE_IDX] + reg); \ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c index f96beb96c75c..47a6ce4fdc74 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c @@ -44,6 +44,18 @@ vf2pf_info->ucode_info[ucode].version = ver; \ } while (0) +#define mmRCC_CONFIG_MEMSIZE 0xde3 + +const char *amdgpu_virt_dynamic_crit_table_name[] = { + "IP DISCOVERY", + "VBIOS IMG", + "RAS TELEMETRY", + "DATA EXCHANGE", + "BAD PAGE INFO", + "INIT HEADER", + "LAST", +}; + bool amdgpu_virt_mmio_blocked(struct amdgpu_device *adev) { /* By now all MMIO pages except mailbox are blocked */ @@ -150,9 +162,10 @@ void amdgpu_virt_request_init_data(struct amdgpu_device *adev) virt->ops->req_init_data(adev); if (adev->virt.req_init_data_ver > 0) - DRM_INFO("host supports REQ_INIT_DATA handshake\n"); + dev_info(adev->dev, "host supports REQ_INIT_DATA handshake of critical_region_version %d\n", + adev->virt.req_init_data_ver); else - DRM_WARN("host doesn't support REQ_INIT_DATA handshake\n"); + dev_warn(adev->dev, "host doesn't support REQ_INIT_DATA handshake\n"); } /** @@ -205,12 +218,12 @@ int amdgpu_virt_alloc_mm_table(struct amdgpu_device *adev) &adev->virt.mm_table.gpu_addr, (void *)&adev->virt.mm_table.cpu_addr); if (r) { - DRM_ERROR("failed to alloc mm table and error = %d.\n", r); + dev_err(adev->dev, "failed to alloc mm table and error = %d.\n", r); return r; } memset((void *)adev->virt.mm_table.cpu_addr, 0, PAGE_SIZE); - DRM_INFO("MM table gpu addr = 0x%llx, cpu addr = %p.\n", + dev_info(adev->dev, "MM table gpu addr = 0x%llx, cpu addr = %p.\n", adev->virt.mm_table.gpu_addr, adev->virt.mm_table.cpu_addr); return 0; @@ -390,7 +403,9 @@ static void amdgpu_virt_ras_reserve_bps(struct amdgpu_device *adev) if (amdgpu_bo_create_kernel_at(adev, bp << AMDGPU_GPU_PAGE_SHIFT, AMDGPU_GPU_PAGE_SIZE, &bo, NULL)) - DRM_DEBUG("RAS WARN: reserve vram for retired page %llx fail\n", bp); + dev_dbg(adev->dev, + "RAS WARN: reserve vram for retired page %llx fail\n", + bp); data->bps_bo[i] = bo; } data->last_reserved = i + 1; @@ -658,10 +673,34 @@ static void amdgpu_virt_update_vf2pf_work_item(struct work_struct *work) schedule_delayed_work(&(adev->virt.vf2pf_work), adev->virt.vf2pf_update_interval_ms); } +static int amdgpu_virt_read_exchange_data_from_mem(struct amdgpu_device *adev, uint32_t *pfvf_data) +{ + uint32_t dataexchange_offset = + adev->virt.crit_regn_tbl[AMD_SRIOV_MSG_DATAEXCHANGE_TABLE_ID].offset; + uint32_t dataexchange_size = + adev->virt.crit_regn_tbl[AMD_SRIOV_MSG_DATAEXCHANGE_TABLE_ID].size_kb << 10; + uint64_t pos = 0; + + dev_info(adev->dev, + "Got data exchange info from dynamic crit_region_table at offset 0x%x with size of 0x%x bytes.\n", + dataexchange_offset, dataexchange_size); + + if (!IS_ALIGNED(dataexchange_offset, 4) || !IS_ALIGNED(dataexchange_size, 4)) { + dev_err(adev->dev, "Data exchange data not aligned to 4 bytes\n"); + return -EINVAL; + } + + pos = (uint64_t)dataexchange_offset; + amdgpu_device_vram_access(adev, pos, pfvf_data, + dataexchange_size, false); + + return 0; +} + void amdgpu_virt_fini_data_exchange(struct amdgpu_device *adev) { if (adev->virt.vf2pf_update_interval_ms != 0) { - DRM_INFO("clean up the vf2pf work item\n"); + dev_info(adev->dev, "clean up the vf2pf work item\n"); cancel_delayed_work_sync(&adev->virt.vf2pf_work); adev->virt.vf2pf_update_interval_ms = 0; } @@ -669,13 +708,15 @@ void amdgpu_virt_fini_data_exchange(struct amdgpu_device *adev) void amdgpu_virt_init_data_exchange(struct amdgpu_device *adev) { + uint32_t *pfvf_data = NULL; + adev->virt.fw_reserve.p_pf2vf = NULL; adev->virt.fw_reserve.p_vf2pf = NULL; adev->virt.vf2pf_update_interval_ms = 0; adev->virt.vf2pf_update_retry_cnt = 0; if (adev->mman.fw_vram_usage_va && adev->mman.drv_vram_usage_va) { - DRM_WARN("Currently fw_vram and drv_vram should not have values at the same time!"); + dev_warn(adev->dev, "Currently fw_vram and drv_vram should not have values at the same time!"); } else if (adev->mman.fw_vram_usage_va || adev->mman.drv_vram_usage_va) { /* go through this logic in ip_init and reset to init workqueue*/ amdgpu_virt_exchange_data(adev); @@ -684,11 +725,34 @@ void amdgpu_virt_init_data_exchange(struct amdgpu_device *adev) schedule_delayed_work(&(adev->virt.vf2pf_work), msecs_to_jiffies(adev->virt.vf2pf_update_interval_ms)); } else if (adev->bios != NULL) { /* got through this logic in early init stage to get necessary flags, e.g. rlcg_acc related*/ - adev->virt.fw_reserve.p_pf2vf = - (struct amd_sriov_msg_pf2vf_info_header *) - (adev->bios + (AMD_SRIOV_MSG_PF2VF_OFFSET_KB << 10)); + if (adev->virt.req_init_data_ver == GPU_CRIT_REGION_V2) { + pfvf_data = + kzalloc(adev->virt.crit_regn_tbl[AMD_SRIOV_MSG_DATAEXCHANGE_TABLE_ID].size_kb << 10, + GFP_KERNEL); + if (!pfvf_data) { + dev_err(adev->dev, "Failed to allocate memory for pfvf_data\n"); + return; + } - amdgpu_virt_read_pf2vf_data(adev); + if (amdgpu_virt_read_exchange_data_from_mem(adev, pfvf_data)) + goto free_pfvf_data; + + adev->virt.fw_reserve.p_pf2vf = + (struct amd_sriov_msg_pf2vf_info_header *)pfvf_data; + + amdgpu_virt_read_pf2vf_data(adev); + +free_pfvf_data: + kfree(pfvf_data); + pfvf_data = NULL; + adev->virt.fw_reserve.p_pf2vf = NULL; + } else { + adev->virt.fw_reserve.p_pf2vf = + (struct amd_sriov_msg_pf2vf_info_header *) + (adev->bios + (AMD_SRIOV_MSG_PF2VF_OFFSET_KB_V1 << 10)); + + amdgpu_virt_read_pf2vf_data(adev); + } } } @@ -701,23 +765,38 @@ void amdgpu_virt_exchange_data(struct amdgpu_device *adev) if (adev->mman.fw_vram_usage_va || adev->mman.drv_vram_usage_va) { if (adev->mman.fw_vram_usage_va) { - adev->virt.fw_reserve.p_pf2vf = - (struct amd_sriov_msg_pf2vf_info_header *) - (adev->mman.fw_vram_usage_va + (AMD_SRIOV_MSG_PF2VF_OFFSET_KB << 10)); - adev->virt.fw_reserve.p_vf2pf = - (struct amd_sriov_msg_vf2pf_info_header *) - (adev->mman.fw_vram_usage_va + (AMD_SRIOV_MSG_VF2PF_OFFSET_KB << 10)); - adev->virt.fw_reserve.ras_telemetry = - (adev->mman.fw_vram_usage_va + (AMD_SRIOV_MSG_RAS_TELEMETRY_OFFSET_KB << 10)); + if (adev->virt.req_init_data_ver == GPU_CRIT_REGION_V2) { + adev->virt.fw_reserve.p_pf2vf = + (struct amd_sriov_msg_pf2vf_info_header *) + (adev->mman.fw_vram_usage_va + + adev->virt.crit_regn_tbl[AMD_SRIOV_MSG_DATAEXCHANGE_TABLE_ID].offset); + adev->virt.fw_reserve.p_vf2pf = + (struct amd_sriov_msg_vf2pf_info_header *) + (adev->mman.fw_vram_usage_va + + adev->virt.crit_regn_tbl[AMD_SRIOV_MSG_DATAEXCHANGE_TABLE_ID].offset + + (AMD_SRIOV_MSG_SIZE_KB << 10)); + adev->virt.fw_reserve.ras_telemetry = + (adev->mman.fw_vram_usage_va + + adev->virt.crit_regn_tbl[AMD_SRIOV_MSG_RAS_TELEMETRY_TABLE_ID].offset); + } else { + adev->virt.fw_reserve.p_pf2vf = + (struct amd_sriov_msg_pf2vf_info_header *) + (adev->mman.fw_vram_usage_va + (AMD_SRIOV_MSG_PF2VF_OFFSET_KB_V1 << 10)); + adev->virt.fw_reserve.p_vf2pf = + (struct amd_sriov_msg_vf2pf_info_header *) + (adev->mman.fw_vram_usage_va + (AMD_SRIOV_MSG_VF2PF_OFFSET_KB_V1 << 10)); + adev->virt.fw_reserve.ras_telemetry = + (adev->mman.fw_vram_usage_va + (AMD_SRIOV_MSG_RAS_TELEMETRY_OFFSET_KB_V1 << 10)); + } } else if (adev->mman.drv_vram_usage_va) { adev->virt.fw_reserve.p_pf2vf = (struct amd_sriov_msg_pf2vf_info_header *) - (adev->mman.drv_vram_usage_va + (AMD_SRIOV_MSG_PF2VF_OFFSET_KB << 10)); + (adev->mman.drv_vram_usage_va + (AMD_SRIOV_MSG_PF2VF_OFFSET_KB_V1 << 10)); adev->virt.fw_reserve.p_vf2pf = (struct amd_sriov_msg_vf2pf_info_header *) - (adev->mman.drv_vram_usage_va + (AMD_SRIOV_MSG_VF2PF_OFFSET_KB << 10)); + (adev->mman.drv_vram_usage_va + (AMD_SRIOV_MSG_VF2PF_OFFSET_KB_V1 << 10)); adev->virt.fw_reserve.ras_telemetry = - (adev->mman.drv_vram_usage_va + (AMD_SRIOV_MSG_RAS_TELEMETRY_OFFSET_KB << 10)); + (adev->mman.drv_vram_usage_va + (AMD_SRIOV_MSG_RAS_TELEMETRY_OFFSET_KB_V1 << 10)); } amdgpu_virt_read_pf2vf_data(adev); @@ -816,7 +895,7 @@ static bool amdgpu_virt_init_req_data(struct amdgpu_device *adev, u32 reg) break; default: /* other chip doesn't support SRIOV */ is_sriov = false; - DRM_ERROR("Unknown asic type: %d!\n", adev->asic_type); + dev_err(adev->dev, "Unknown asic type: %d!\n", adev->asic_type); break; } } @@ -838,10 +917,220 @@ static void amdgpu_virt_init_ras(struct amdgpu_device *adev) RATELIMIT_MSG_ON_RELEASE); mutex_init(&adev->virt.ras.ras_telemetry_mutex); + mutex_init(&adev->virt.access_req_mutex); adev->virt.ras.cper_rptr = 0; } +static uint8_t amdgpu_virt_crit_region_calc_checksum(uint8_t *buf_start, uint8_t *buf_end) +{ + uint32_t sum = 0; + + if (buf_start >= buf_end) + return 0; + + for (; buf_start < buf_end; buf_start++) + sum += buf_start[0]; + + return 0xffffffff - sum; +} + +int amdgpu_virt_init_critical_region(struct amdgpu_device *adev) +{ + struct amd_sriov_msg_init_data_header *init_data_hdr = NULL; + u64 init_hdr_offset = adev->virt.init_data_header.offset; + u64 init_hdr_size = (u64)adev->virt.init_data_header.size_kb << 10; /* KB → bytes */ + u64 vram_size; + u64 end; + int r = 0; + uint8_t checksum = 0; + + /* Skip below init if critical region version != v2 */ + if (adev->virt.req_init_data_ver != GPU_CRIT_REGION_V2) + return 0; + + if (init_hdr_offset < 0) { + dev_err(adev->dev, "Invalid init header offset\n"); + return -EINVAL; + } + + vram_size = RREG32(mmRCC_CONFIG_MEMSIZE); + if (!vram_size || vram_size == U32_MAX) + return -EINVAL; + vram_size <<= 20; + + if (check_add_overflow(init_hdr_offset, init_hdr_size, &end) || end > vram_size) { + dev_err(adev->dev, "init_data_header exceeds VRAM size, exiting\n"); + return -EINVAL; + } + + /* Allocate for init_data_hdr */ + init_data_hdr = kzalloc(sizeof(struct amd_sriov_msg_init_data_header), GFP_KERNEL); + if (!init_data_hdr) + return -ENOMEM; + + amdgpu_device_vram_access(adev, (uint64_t)init_hdr_offset, (uint32_t *)init_data_hdr, + sizeof(struct amd_sriov_msg_init_data_header), false); + + /* Table validation */ + if (strncmp(init_data_hdr->signature, + AMDGPU_SRIOV_CRIT_DATA_SIGNATURE, + AMDGPU_SRIOV_CRIT_DATA_SIG_LEN) != 0) { + dev_err(adev->dev, "Invalid init data signature: %.4s\n", + init_data_hdr->signature); + r = -EINVAL; + goto out; + } + + checksum = amdgpu_virt_crit_region_calc_checksum( + (uint8_t *)&init_data_hdr->initdata_offset, + (uint8_t *)init_data_hdr + + sizeof(struct amd_sriov_msg_init_data_header)); + if (checksum != init_data_hdr->checksum) { + dev_err(adev->dev, "Found unmatching checksum from calculation 0x%x and init_data 0x%x\n", + checksum, init_data_hdr->checksum); + r = -EINVAL; + goto out; + } + + memset(&adev->virt.crit_regn, 0, sizeof(adev->virt.crit_regn)); + memset(adev->virt.crit_regn_tbl, 0, sizeof(adev->virt.crit_regn_tbl)); + + adev->virt.crit_regn.offset = init_data_hdr->initdata_offset; + adev->virt.crit_regn.size_kb = init_data_hdr->initdata_size_in_kb; + + /* Validation and initialization for each table entry */ + if (IS_SRIOV_CRIT_REGN_ENTRY_VALID(init_data_hdr, AMD_SRIOV_MSG_IPD_TABLE_ID)) { + if (!init_data_hdr->ip_discovery_size_in_kb || + init_data_hdr->ip_discovery_size_in_kb > DISCOVERY_TMR_SIZE) { + dev_err(adev->dev, "Invalid %s size: 0x%x\n", + amdgpu_virt_dynamic_crit_table_name[AMD_SRIOV_MSG_IPD_TABLE_ID], + init_data_hdr->ip_discovery_size_in_kb); + r = -EINVAL; + goto out; + } + + adev->virt.crit_regn_tbl[AMD_SRIOV_MSG_IPD_TABLE_ID].offset = + init_data_hdr->ip_discovery_offset; + adev->virt.crit_regn_tbl[AMD_SRIOV_MSG_IPD_TABLE_ID].size_kb = + init_data_hdr->ip_discovery_size_in_kb; + } + + if (IS_SRIOV_CRIT_REGN_ENTRY_VALID(init_data_hdr, AMD_SRIOV_MSG_VBIOS_IMG_TABLE_ID)) { + if (!init_data_hdr->vbios_img_size_in_kb) { + dev_err(adev->dev, "Invalid %s size: 0x%x\n", + amdgpu_virt_dynamic_crit_table_name[AMD_SRIOV_MSG_VBIOS_IMG_TABLE_ID], + init_data_hdr->vbios_img_size_in_kb); + r = -EINVAL; + goto out; + } + + adev->virt.crit_regn_tbl[AMD_SRIOV_MSG_VBIOS_IMG_TABLE_ID].offset = + init_data_hdr->vbios_img_offset; + adev->virt.crit_regn_tbl[AMD_SRIOV_MSG_VBIOS_IMG_TABLE_ID].size_kb = + init_data_hdr->vbios_img_size_in_kb; + } + + if (IS_SRIOV_CRIT_REGN_ENTRY_VALID(init_data_hdr, AMD_SRIOV_MSG_RAS_TELEMETRY_TABLE_ID)) { + if (!init_data_hdr->ras_tele_info_size_in_kb) { + dev_err(adev->dev, "Invalid %s size: 0x%x\n", + amdgpu_virt_dynamic_crit_table_name[AMD_SRIOV_MSG_RAS_TELEMETRY_TABLE_ID], + init_data_hdr->ras_tele_info_size_in_kb); + r = -EINVAL; + goto out; + } + + adev->virt.crit_regn_tbl[AMD_SRIOV_MSG_RAS_TELEMETRY_TABLE_ID].offset = + init_data_hdr->ras_tele_info_offset; + adev->virt.crit_regn_tbl[AMD_SRIOV_MSG_RAS_TELEMETRY_TABLE_ID].size_kb = + init_data_hdr->ras_tele_info_size_in_kb; + } + + if (IS_SRIOV_CRIT_REGN_ENTRY_VALID(init_data_hdr, AMD_SRIOV_MSG_DATAEXCHANGE_TABLE_ID)) { + if (!init_data_hdr->dataexchange_size_in_kb) { + dev_err(adev->dev, "Invalid %s size: 0x%x\n", + amdgpu_virt_dynamic_crit_table_name[AMD_SRIOV_MSG_DATAEXCHANGE_TABLE_ID], + init_data_hdr->dataexchange_size_in_kb); + r = -EINVAL; + goto out; + } + + adev->virt.crit_regn_tbl[AMD_SRIOV_MSG_DATAEXCHANGE_TABLE_ID].offset = + init_data_hdr->dataexchange_offset; + adev->virt.crit_regn_tbl[AMD_SRIOV_MSG_DATAEXCHANGE_TABLE_ID].size_kb = + init_data_hdr->dataexchange_size_in_kb; + } + + if (IS_SRIOV_CRIT_REGN_ENTRY_VALID(init_data_hdr, AMD_SRIOV_MSG_BAD_PAGE_INFO_TABLE_ID)) { + if (!init_data_hdr->bad_page_size_in_kb) { + dev_err(adev->dev, "Invalid %s size: 0x%x\n", + amdgpu_virt_dynamic_crit_table_name[AMD_SRIOV_MSG_BAD_PAGE_INFO_TABLE_ID], + init_data_hdr->bad_page_size_in_kb); + r = -EINVAL; + goto out; + } + + adev->virt.crit_regn_tbl[AMD_SRIOV_MSG_BAD_PAGE_INFO_TABLE_ID].offset = + init_data_hdr->bad_page_info_offset; + adev->virt.crit_regn_tbl[AMD_SRIOV_MSG_BAD_PAGE_INFO_TABLE_ID].size_kb = + init_data_hdr->bad_page_size_in_kb; + } + + /* Validation for critical region info */ + if (adev->virt.crit_regn_tbl[AMD_SRIOV_MSG_IPD_TABLE_ID].size_kb > DISCOVERY_TMR_SIZE) { + dev_err(adev->dev, "Invalid IP discovery size: 0x%x\n", + adev->virt.crit_regn_tbl[AMD_SRIOV_MSG_IPD_TABLE_ID].size_kb); + r = -EINVAL; + goto out; + } + + /* reserved memory starts from crit region base offset with the size of 5MB */ + adev->mman.fw_vram_usage_start_offset = adev->virt.crit_regn.offset; + adev->mman.fw_vram_usage_size = adev->virt.crit_regn.size_kb << 10; + dev_info(adev->dev, + "critical region v%d requested to reserve memory start at %08llx with %llu KB.\n", + init_data_hdr->version, + adev->mman.fw_vram_usage_start_offset, + adev->mman.fw_vram_usage_size >> 10); + + adev->virt.is_dynamic_crit_regn_enabled = true; + +out: + kfree(init_data_hdr); + init_data_hdr = NULL; + + return r; +} + +int amdgpu_virt_get_dynamic_data_info(struct amdgpu_device *adev, + int data_id, uint8_t *binary, u32 *size) +{ + uint32_t data_offset = 0; + uint32_t data_size = 0; + enum amd_sriov_msg_table_id_enum data_table_id = data_id; + + if (data_table_id >= AMD_SRIOV_MSG_MAX_TABLE_ID) + return -EINVAL; + + data_offset = adev->virt.crit_regn_tbl[data_table_id].offset; + data_size = adev->virt.crit_regn_tbl[data_table_id].size_kb << 10; + + /* Validate on input params */ + if (!binary || !size || *size < (uint64_t)data_size) + return -EINVAL; + + /* Proceed to copy the dynamic content */ + amdgpu_device_vram_access(adev, + (uint64_t)data_offset, (uint32_t *)binary, data_size, false); + *size = (uint64_t)data_size; + + dev_dbg(adev->dev, + "Got %s info from dynamic crit_region_table at offset 0x%x with size of 0x%x bytes.\n", + amdgpu_virt_dynamic_crit_table_name[data_id], data_offset, data_size); + + return 0; +} + void amdgpu_virt_init(struct amdgpu_device *adev) { bool is_sriov = false; @@ -1289,7 +1578,7 @@ amdgpu_ras_block_to_sriov(struct amdgpu_device *adev, enum amdgpu_ras_block bloc case AMDGPU_RAS_BLOCK__MPIO: return RAS_TELEMETRY_GPU_BLOCK_MPIO; default: - DRM_WARN_ONCE("Unsupported SRIOV RAS telemetry block 0x%x\n", + dev_warn(adev->dev, "Unsupported SRIOV RAS telemetry block 0x%x\n", block); return RAS_TELEMETRY_GPU_BLOCK_COUNT; } @@ -1304,7 +1593,7 @@ static int amdgpu_virt_cache_host_error_counts(struct amdgpu_device *adev, checksum = host_telemetry->header.checksum; used_size = host_telemetry->header.used_size; - if (used_size > (AMD_SRIOV_RAS_TELEMETRY_SIZE_KB << 10)) + if (used_size > (AMD_SRIOV_MSG_RAS_TELEMETRY_SIZE_KB_V1 << 10)) return 0; tmp = kmemdup(&host_telemetry->body.error_count, used_size, GFP_KERNEL); @@ -1383,7 +1672,7 @@ amdgpu_virt_write_cpers_to_ring(struct amdgpu_device *adev, checksum = host_telemetry->header.checksum; used_size = host_telemetry->header.used_size; - if (used_size > (AMD_SRIOV_RAS_TELEMETRY_SIZE_KB << 10)) + if (used_size > (AMD_SRIOV_MSG_RAS_TELEMETRY_SIZE_KB_V1 << 10)) return -EINVAL; cper_dump = kmemdup(&host_telemetry->body.cper_dump, used_size, GFP_KERNEL); @@ -1515,7 +1804,7 @@ static int amdgpu_virt_cache_chk_criti_hit(struct amdgpu_device *adev, checksum = host_telemetry->header.checksum; used_size = host_telemetry->header.used_size; - if (used_size > (AMD_SRIOV_RAS_TELEMETRY_SIZE_KB << 10)) + if (used_size > (AMD_SRIOV_MSG_RAS_TELEMETRY_SIZE_KB_V1 << 10)) return 0; tmp = kmemdup(&host_telemetry->body.chk_criti, used_size, GFP_KERNEL); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h index d1172c8e58c4..01d5bca2dee1 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h @@ -54,6 +54,12 @@ #define AMDGPU_VF2PF_UPDATE_MAX_RETRY_LIMIT 2 +/* Signature used to validate the SR-IOV dynamic critical region init data header ("INDA") */ +#define AMDGPU_SRIOV_CRIT_DATA_SIGNATURE "INDA" +#define AMDGPU_SRIOV_CRIT_DATA_SIG_LEN 4 + +#define IS_SRIOV_CRIT_REGN_ENTRY_VALID(hdr, id) ((hdr)->valid_tables & (1 << (id))) + enum amdgpu_sriov_vf_mode { SRIOV_VF_MODE_BARE_METAL = 0, SRIOV_VF_MODE_ONE_VF, @@ -144,6 +150,7 @@ enum AMDGIM_FEATURE_FLAG { AMDGIM_FEATURE_RAS_CAPS = (1 << 9), AMDGIM_FEATURE_RAS_TELEMETRY = (1 << 10), AMDGIM_FEATURE_RAS_CPER = (1 << 11), + AMDGIM_FEATURE_XGMI_TA_EXT_PEER_LINK = (1 << 12), }; enum AMDGIM_REG_ACCESS_FLAG { @@ -262,6 +269,11 @@ struct amdgpu_virt_ras { DECLARE_ATTR_CAP_CLASS(amdgpu_virt, AMDGPU_VIRT_CAPS_LIST); +struct amdgpu_virt_region { + uint32_t offset; + uint32_t size_kb; +}; + /* GPU virtualization */ struct amdgpu_virt { uint32_t caps; @@ -289,6 +301,12 @@ struct amdgpu_virt { bool ras_init_done; uint32_t reg_access; + /* dynamic(v2) critical regions */ + struct amdgpu_virt_region init_data_header; + struct amdgpu_virt_region crit_regn; + struct amdgpu_virt_region crit_regn_tbl[AMD_SRIOV_MSG_MAX_TABLE_ID]; + bool is_dynamic_crit_regn_enabled; + /* vf2pf message */ struct delayed_work vf2pf_work; uint32_t vf2pf_update_interval_ms; @@ -307,6 +325,8 @@ struct amdgpu_virt { /* Spinlock to protect access to the RLCG register interface */ spinlock_t rlcg_reg_lock; + struct mutex access_req_mutex; + union amd_sriov_ras_caps ras_en_caps; union amd_sriov_ras_caps ras_telemetry_en_caps; struct amdgpu_virt_ras ras; @@ -378,6 +398,9 @@ struct amdgpu_video_codec_info; #define amdgpu_sriov_ras_cper_en(adev) \ ((adev)->virt.gim_feature & AMDGIM_FEATURE_RAS_CPER) +#define amdgpu_sriov_xgmi_ta_ext_peer_link_en(adev) \ +((adev)->virt.gim_feature & AMDGIM_FEATURE_XGMI_TA_EXT_PEER_LINK) + static inline bool is_virtual_machine(void) { #if defined(CONFIG_X86) @@ -424,6 +447,10 @@ void amdgpu_virt_exchange_data(struct amdgpu_device *adev); void amdgpu_virt_fini_data_exchange(struct amdgpu_device *adev); void amdgpu_virt_init(struct amdgpu_device *adev); +int amdgpu_virt_init_critical_region(struct amdgpu_device *adev); +int amdgpu_virt_get_dynamic_data_info(struct amdgpu_device *adev, + int data_id, uint8_t *binary, u32 *size); + bool amdgpu_virt_can_access_debugfs(struct amdgpu_device *adev); int amdgpu_virt_enable_access_debugfs(struct amdgpu_device *adev); void amdgpu_virt_disable_access_debugfs(struct amdgpu_device *adev); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index 676e24fb8864..a67285118c37 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -484,15 +484,19 @@ int amdgpu_vm_lock_done_list(struct amdgpu_vm *vm, struct drm_exec *exec, spin_lock(&vm->status_lock); while (!list_is_head(prev->next, &vm->done)) { bo_va = list_entry(prev->next, typeof(*bo_va), base.vm_status); - spin_unlock(&vm->status_lock); bo = bo_va->base.bo; if (bo) { + amdgpu_bo_ref(bo); + spin_unlock(&vm->status_lock); + ret = drm_exec_prepare_obj(exec, &bo->tbo.base, 1); + amdgpu_bo_unref(&bo); if (unlikely(ret)) return ret; + + spin_lock(&vm->status_lock); } - spin_lock(&vm->status_lock); prev = prev->next; } spin_unlock(&vm->status_lock); @@ -779,7 +783,6 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job, bool cleaner_shader_needed = false; bool pasid_mapping_needed = false; struct dma_fence *fence = NULL; - struct amdgpu_fence *af; unsigned int patch; int r; @@ -842,12 +845,12 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job, } if (vm_flush_needed || pasid_mapping_needed || cleaner_shader_needed) { - r = amdgpu_fence_emit(ring, &fence, NULL, 0); + r = amdgpu_fence_emit(ring, job->hw_vm_fence, 0); if (r) return r; - /* this is part of the job's context */ - af = container_of(fence, struct amdgpu_fence, base); - af->context = job->base.s_fence ? job->base.s_fence->finished.context : 0; + fence = &job->hw_vm_fence->base; + /* get a ref for the job */ + dma_fence_get(fence); } if (vm_flush_needed) { @@ -1952,6 +1955,7 @@ int amdgpu_vm_bo_unmap(struct amdgpu_device *adev, struct amdgpu_bo_va_mapping *mapping; struct amdgpu_vm *vm = bo_va->base.vm; bool valid = true; + int r; saddr /= AMDGPU_GPU_PAGE_SIZE; @@ -1972,6 +1976,17 @@ int amdgpu_vm_bo_unmap(struct amdgpu_device *adev, return -ENOENT; } + /* It's unlikely to happen that the mapping userq hasn't been idled + * during user requests GEM unmap IOCTL except for forcing the unmap + * from user space. + */ + if (unlikely(atomic_read(&bo_va->userq_va_mapped) > 0)) { + r = amdgpu_userq_gem_va_unmap_validate(adev, mapping, saddr); + if (unlikely(r == -EBUSY)) + dev_warn_once(adev->dev, + "Attempt to unmap an active userq buffer\n"); + } + list_del(&mapping->list); amdgpu_vm_it_remove(mapping, &vm->va); mapping->bo_va = NULL; @@ -2828,8 +2843,6 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm) */ void amdgpu_vm_manager_init(struct amdgpu_device *adev) { - unsigned i; - /* Concurrent flushes are only possible starting with Vega10 and * are broken on Navi10 and Navi14. */ @@ -2838,11 +2851,6 @@ void amdgpu_vm_manager_init(struct amdgpu_device *adev) adev->asic_type == CHIP_NAVI14); amdgpu_vmid_mgr_init(adev); - adev->vm_manager.fence_context = - dma_fence_context_alloc(AMDGPU_MAX_RINGS); - for (i = 0; i < AMDGPU_MAX_RINGS; ++i) - adev->vm_manager.seqno[i] = 0; - spin_lock_init(&adev->vm_manager.prt_lock); atomic_set(&adev->vm_manager.num_prt_users, 0); @@ -2908,8 +2916,7 @@ int amdgpu_vm_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) switch (args->in.op) { case AMDGPU_VM_OP_RESERVE_VMID: /* We only have requirement to reserve vmid from gfxhub */ - amdgpu_vmid_alloc_reserved(adev, vm, AMDGPU_GFXHUB(0)); - break; + return amdgpu_vmid_alloc_reserved(adev, vm, AMDGPU_GFXHUB(0)); case AMDGPU_VM_OP_UNRESERVE_VMID: amdgpu_vmid_free_reserved(adev, vm, AMDGPU_GFXHUB(0)); break; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h index cf0ec94e8a07..15d757c016cb 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h @@ -453,10 +453,6 @@ struct amdgpu_vm_manager { unsigned int first_kfd_vmid; bool concurrent_flush; - /* Handling of VM fences */ - u64 fence_context; - unsigned seqno[AMDGPU_MAX_RINGS]; - uint64_t max_pfn; uint32_t num_level; uint32_t block_size; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c index 1ede308a7c67..aad530c46a9f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c @@ -298,6 +298,9 @@ int amdgpu_xgmi_get_ext_link(struct amdgpu_device *adev, int link_num) { int link_map_6_4_x[8] = { 0, 3, 1, 2, 7, 6, 4, 5 }; + if (adev->gmc.xgmi.num_physical_nodes <= 1) + return -EINVAL; + switch (amdgpu_ip_version(adev, XGMI_HWIP, 0)) { case IP_VERSION(6, 4, 0): case IP_VERSION(6, 4, 1): @@ -333,6 +336,10 @@ static u32 xgmi_v6_4_get_link_status(struct amdgpu_device *adev, int global_link } i = global_link_num / n; + + if (!(adev->aid_mask & BIT(i))) + return U32_MAX; + addr += adev->asic_funcs->encode_ext_smn_addressing(i); return RREG32_PCIE_EXT(addr); @@ -342,6 +349,9 @@ int amdgpu_get_xgmi_link_status(struct amdgpu_device *adev, int global_link_num) { u32 xgmi_state_reg_val; + if (adev->gmc.xgmi.num_physical_nodes <= 1) + return -EINVAL; + switch (amdgpu_ip_version(adev, XGMI_HWIP, 0)) { case IP_VERSION(6, 4, 0): case IP_VERSION(6, 4, 1): @@ -958,28 +968,6 @@ static int amdgpu_xgmi_initialize_hive_get_data_partition(struct amdgpu_hive_inf return 0; } -static void amdgpu_xgmi_fill_topology_info(struct amdgpu_device *adev, - struct amdgpu_device *peer_adev) -{ - struct psp_xgmi_topology_info *top_info = &adev->psp.xgmi_context.top_info; - struct psp_xgmi_topology_info *peer_info = &peer_adev->psp.xgmi_context.top_info; - - for (int i = 0; i < peer_info->num_nodes; i++) { - if (peer_info->nodes[i].node_id == adev->gmc.xgmi.node_id) { - for (int j = 0; j < top_info->num_nodes; j++) { - if (top_info->nodes[j].node_id == peer_adev->gmc.xgmi.node_id) { - peer_info->nodes[i].num_hops = top_info->nodes[j].num_hops; - peer_info->nodes[i].is_sharing_enabled = - top_info->nodes[j].is_sharing_enabled; - peer_info->nodes[i].num_links = - top_info->nodes[j].num_links; - return; - } - } - } - } -} - int amdgpu_xgmi_add_device(struct amdgpu_device *adev) { struct psp_xgmi_topology_info *top_info; @@ -1065,11 +1053,6 @@ int amdgpu_xgmi_add_device(struct amdgpu_device *adev) /* To do: continue with some node failed or disable the whole hive*/ goto exit_unlock; } - - /* fill the topology info for peers instead of getting from PSP */ - list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) { - amdgpu_xgmi_fill_topology_info(adev, tmp_adev); - } } else { /* get latest topology info for each device from psp */ list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgv_sriovmsg.h b/drivers/gpu/drm/amd/amdgpu/amdgv_sriovmsg.h index 3a79ed7d8031..3cdb1e0eca37 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgv_sriovmsg.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgv_sriovmsg.h @@ -23,26 +23,84 @@ #ifndef AMDGV_SRIOV_MSG__H_ #define AMDGV_SRIOV_MSG__H_ -/* unit in kilobytes */ -#define AMD_SRIOV_MSG_VBIOS_OFFSET 0 -#define AMD_SRIOV_MSG_VBIOS_SIZE_KB 64 -#define AMD_SRIOV_MSG_DATAEXCHANGE_OFFSET_KB AMD_SRIOV_MSG_VBIOS_SIZE_KB -#define AMD_SRIOV_MSG_DATAEXCHANGE_SIZE_KB 4 -#define AMD_SRIOV_MSG_TMR_OFFSET_KB 2048 -#define AMD_SRIOV_MSG_BAD_PAGE_SIZE_KB 2 -#define AMD_SRIOV_RAS_TELEMETRY_SIZE_KB 64 +#define AMD_SRIOV_MSG_SIZE_KB 1 + /* - * layout + * layout v1 * 0 64KB 65KB 66KB 68KB 132KB * | VBIOS | PF2VF | VF2PF | Bad Page | RAS Telemetry Region | ... * | 64KB | 1KB | 1KB | 2KB | 64KB | ... */ -#define AMD_SRIOV_MSG_SIZE_KB 1 -#define AMD_SRIOV_MSG_PF2VF_OFFSET_KB AMD_SRIOV_MSG_DATAEXCHANGE_OFFSET_KB -#define AMD_SRIOV_MSG_VF2PF_OFFSET_KB (AMD_SRIOV_MSG_PF2VF_OFFSET_KB + AMD_SRIOV_MSG_SIZE_KB) -#define AMD_SRIOV_MSG_BAD_PAGE_OFFSET_KB (AMD_SRIOV_MSG_VF2PF_OFFSET_KB + AMD_SRIOV_MSG_SIZE_KB) -#define AMD_SRIOV_MSG_RAS_TELEMETRY_OFFSET_KB (AMD_SRIOV_MSG_BAD_PAGE_OFFSET_KB + AMD_SRIOV_MSG_BAD_PAGE_SIZE_KB) +/* + * layout v2 (offsets are dynamically allocated and the offsets below are examples) + * 0 1KB 64KB 65KB 66KB 68KB 132KB + * | INITD_H | VBIOS | PF2VF | VF2PF | Bad Page | RAS Telemetry Region | ... + * | 1KB | 64KB | 1KB | 1KB | 2KB | 64KB | ... + * + * Note: PF2VF + VF2PF + Bad Page = DataExchange region (allocated contiguously) + */ + +/* v1 layout sizes */ +#define AMD_SRIOV_MSG_VBIOS_SIZE_KB_V1 64 +#define AMD_SRIOV_MSG_PF2VF_SIZE_KB_V1 1 +#define AMD_SRIOV_MSG_VF2PF_SIZE_KB_V1 1 +#define AMD_SRIOV_MSG_BAD_PAGE_SIZE_KB_V1 2 +#define AMD_SRIOV_MSG_RAS_TELEMETRY_SIZE_KB_V1 64 +#define AMD_SRIOV_MSG_DATAEXCHANGE_SIZE_KB_V1 \ + (AMD_SRIOV_MSG_PF2VF_SIZE_KB_V1 + AMD_SRIOV_MSG_VF2PF_SIZE_KB_V1 + \ + AMD_SRIOV_MSG_BAD_PAGE_SIZE_KB_V1) + +/* v1 offsets */ +#define AMD_SRIOV_MSG_VBIOS_OFFSET_V1 0 +#define AMD_SRIOV_MSG_DATAEXCHANGE_OFFSET_KB_V1 AMD_SRIOV_MSG_VBIOS_SIZE_KB_V1 +#define AMD_SRIOV_MSG_TMR_OFFSET_KB 2048 +#define AMD_SRIOV_MSG_PF2VF_OFFSET_KB_V1 AMD_SRIOV_MSG_DATAEXCHANGE_OFFSET_KB_V1 +#define AMD_SRIOV_MSG_VF2PF_OFFSET_KB_V1 \ + (AMD_SRIOV_MSG_PF2VF_OFFSET_KB_V1 + AMD_SRIOV_MSG_SIZE_KB) +#define AMD_SRIOV_MSG_BAD_PAGE_OFFSET_KB_V1 \ + (AMD_SRIOV_MSG_VF2PF_OFFSET_KB_V1 + AMD_SRIOV_MSG_SIZE_KB) +#define AMD_SRIOV_MSG_RAS_TELEMETRY_OFFSET_KB_V1 \ + (AMD_SRIOV_MSG_BAD_PAGE_OFFSET_KB_V1 + AMD_SRIOV_MSG_BAD_PAGE_SIZE_KB_V1) +#define AMD_SRIOV_MSG_INIT_DATA_TOT_SIZE_KB_V1 \ + (AMD_SRIOV_MSG_VBIOS_SIZE_KB_V1 + AMD_SRIOV_MSG_DATAEXCHANGE_SIZE_KB_V1 + \ + AMD_SRIOV_MSG_RAS_TELEMETRY_SIZE_KB_V1) + +enum amd_sriov_crit_region_version { + GPU_CRIT_REGION_V1 = 1, + GPU_CRIT_REGION_V2 = 2, +}; + +/* v2 layout offset enum (in order of allocation) */ +enum amd_sriov_msg_table_id_enum { + AMD_SRIOV_MSG_IPD_TABLE_ID = 0, + AMD_SRIOV_MSG_VBIOS_IMG_TABLE_ID, + AMD_SRIOV_MSG_RAS_TELEMETRY_TABLE_ID, + AMD_SRIOV_MSG_DATAEXCHANGE_TABLE_ID, + AMD_SRIOV_MSG_BAD_PAGE_INFO_TABLE_ID, + AMD_SRIOV_MSG_INITD_H_TABLE_ID, + AMD_SRIOV_MSG_MAX_TABLE_ID, +}; + +struct amd_sriov_msg_init_data_header { + char signature[4]; /* "INDA" */ + uint32_t version; + uint32_t checksum; + uint32_t initdata_offset; /* 0 */ + uint32_t initdata_size_in_kb; /* 5MB */ + uint32_t valid_tables; + uint32_t vbios_img_offset; + uint32_t vbios_img_size_in_kb; + uint32_t dataexchange_offset; + uint32_t dataexchange_size_in_kb; + uint32_t ras_tele_info_offset; + uint32_t ras_tele_info_size_in_kb; + uint32_t ip_discovery_offset; + uint32_t ip_discovery_size_in_kb; + uint32_t bad_page_info_offset; + uint32_t bad_page_size_in_kb; + uint32_t reserved[8]; +}; /* * PF2VF history log: @@ -102,7 +160,8 @@ union amd_sriov_msg_feature_flags { uint32_t ras_caps : 1; uint32_t ras_telemetry : 1; uint32_t ras_cper : 1; - uint32_t reserved : 20; + uint32_t xgmi_ta_ext_peer_link : 1; + uint32_t reserved : 19; } flags; uint32_t all; }; @@ -140,8 +199,9 @@ union amd_sriov_ras_caps { uint64_t block_jpeg : 1; uint64_t block_ih : 1; uint64_t block_mpio : 1; + uint64_t block_mmsch : 1; uint64_t poison_propogation_mode : 1; - uint64_t reserved : 44; + uint64_t reserved : 43; } bits; uint64_t all; }; diff --git a/drivers/gpu/drm/amd/amdgpu/cik_ih.c b/drivers/gpu/drm/amd/amdgpu/cik_ih.c index 41f4705bdbbd..876a3256dba4 100644 --- a/drivers/gpu/drm/amd/amdgpu/cik_ih.c +++ b/drivers/gpu/drm/amd/amdgpu/cik_ih.c @@ -156,6 +156,9 @@ static int cik_ih_irq_init(struct amdgpu_device *adev) /* enable irqs */ cik_ih_enable_interrupts(adev); + if (adev->irq.ih_soft.ring_size) + adev->irq.ih_soft.enabled = true; + return 0; } @@ -192,6 +195,9 @@ static u32 cik_ih_get_wptr(struct amdgpu_device *adev, wptr = le32_to_cpu(*ih->wptr_cpu); + if (ih == &adev->irq.ih_soft) + goto out; + if (wptr & IH_RB_WPTR__RB_OVERFLOW_MASK) { wptr &= ~IH_RB_WPTR__RB_OVERFLOW_MASK; /* When a ring buffer overflow happen start parsing interrupt @@ -211,6 +217,8 @@ static u32 cik_ih_get_wptr(struct amdgpu_device *adev, tmp &= ~IH_RB_CNTL__WPTR_OVERFLOW_CLEAR_MASK; WREG32(mmIH_RB_CNTL, tmp); } + +out: return (wptr & ih->ptr_mask); } @@ -306,6 +314,10 @@ static int cik_ih_sw_init(struct amdgpu_ip_block *ip_block) if (r) return r; + r = amdgpu_ih_ring_init(adev, &adev->irq.ih_soft, IH_SW_RING_SIZE, true); + if (r) + return r; + r = amdgpu_irq_init(adev); return r; diff --git a/drivers/gpu/drm/amd/amdgpu/cz_ih.c b/drivers/gpu/drm/amd/amdgpu/cz_ih.c index 2f891fb846d5..bc7a2e06ab5f 100644 --- a/drivers/gpu/drm/amd/amdgpu/cz_ih.c +++ b/drivers/gpu/drm/amd/amdgpu/cz_ih.c @@ -157,6 +157,9 @@ static int cz_ih_irq_init(struct amdgpu_device *adev) /* enable interrupts */ cz_ih_enable_interrupts(adev); + if (adev->irq.ih_soft.ring_size) + adev->irq.ih_soft.enabled = true; + return 0; } @@ -194,6 +197,9 @@ static u32 cz_ih_get_wptr(struct amdgpu_device *adev, wptr = le32_to_cpu(*ih->wptr_cpu); + if (ih == &adev->irq.ih_soft) + goto out; + if (!REG_GET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW)) goto out; @@ -297,6 +303,10 @@ static int cz_ih_sw_init(struct amdgpu_ip_block *ip_block) if (r) return r; + r = amdgpu_ih_ring_init(adev, &adev->irq.ih_soft, IH_SW_RING_SIZE, true); + if (r) + return r; + r = amdgpu_irq_init(adev); return r; diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c index 8841d7213de4..d75b9940f248 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c @@ -4956,7 +4956,8 @@ static int gfx_v10_0_sw_init(struct amdgpu_ip_block *ip_block) amdgpu_get_soft_full_reset_mask(&adev->gfx.gfx_ring[0]); adev->gfx.compute_supported_reset = amdgpu_get_soft_full_reset_mask(&adev->gfx.compute_ring[0]); - if (!amdgpu_sriov_vf(adev)) { + if (!amdgpu_sriov_vf(adev) && + !adev->debug_disable_gpu_ring_reset) { adev->gfx.compute_supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE; adev->gfx.gfx_supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE; } @@ -9951,6 +9952,7 @@ static const struct amdgpu_ring_funcs gfx_v10_0_ring_funcs_kiq = { .emit_wreg = gfx_v10_0_ring_emit_wreg, .emit_reg_wait = gfx_v10_0_ring_emit_reg_wait, .emit_reg_write_reg_wait = gfx_v10_0_ring_emit_reg_write_reg_wait, + .emit_hdp_flush = gfx_v10_0_ring_emit_hdp_flush, }; static void gfx_v10_0_set_ring_funcs(struct amdgpu_device *adev) diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c index f2be16e700c4..8a2ee2de390f 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c @@ -1821,13 +1821,15 @@ static int gfx_v11_0_sw_init(struct amdgpu_ip_block *ip_block) case IP_VERSION(11, 0, 3): if ((adev->gfx.me_fw_version >= 2280) && (adev->gfx.mec_fw_version >= 2410) && - !amdgpu_sriov_vf(adev)) { + !amdgpu_sriov_vf(adev) && + !adev->debug_disable_gpu_ring_reset) { adev->gfx.compute_supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE; adev->gfx.gfx_supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE; } break; default: - if (!amdgpu_sriov_vf(adev)) { + if (!amdgpu_sriov_vf(adev) && + !adev->debug_disable_gpu_ring_reset) { adev->gfx.compute_supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE; adev->gfx.gfx_supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE; } @@ -2438,7 +2440,7 @@ static int gfx_v11_0_rlc_load_microcode(struct amdgpu_device *adev) if (version_minor == 3) gfx_v11_0_load_rlcp_rlcv_microcode(adev); } - + return 0; } @@ -3886,7 +3888,7 @@ static int gfx_v11_0_cp_compute_load_microcode(struct amdgpu_device *adev) } memcpy(fw, fw_data, fw_size); - + amdgpu_bo_kunmap(adev->gfx.mec.mec_fw_obj); amdgpu_bo_unreserve(adev->gfx.mec.mec_fw_obj); @@ -7318,6 +7320,7 @@ static const struct amdgpu_ring_funcs gfx_v11_0_ring_funcs_kiq = { .emit_wreg = gfx_v11_0_ring_emit_wreg, .emit_reg_wait = gfx_v11_0_ring_emit_reg_wait, .emit_reg_write_reg_wait = gfx_v11_0_ring_emit_reg_write_reg_wait, + .emit_hdp_flush = gfx_v11_0_ring_emit_hdp_flush, }; static void gfx_v11_0_set_ring_funcs(struct amdgpu_device *adev) diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c index 93fde0f9af87..d01d2712cf57 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c @@ -1548,7 +1548,8 @@ static int gfx_v12_0_sw_init(struct amdgpu_ip_block *ip_block) case IP_VERSION(12, 0, 1): if ((adev->gfx.me_fw_version >= 2660) && (adev->gfx.mec_fw_version >= 2920) && - !amdgpu_sriov_vf(adev)) { + !amdgpu_sriov_vf(adev) && + !adev->debug_disable_gpu_ring_reset) { adev->gfx.compute_supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE; adev->gfx.gfx_supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE; } @@ -5595,6 +5596,7 @@ static const struct amdgpu_ring_funcs gfx_v12_0_ring_funcs_kiq = { .emit_wreg = gfx_v12_0_ring_emit_wreg, .emit_reg_wait = gfx_v12_0_ring_emit_reg_wait, .emit_reg_write_reg_wait = gfx_v12_0_ring_emit_reg_write_reg_wait, + .emit_hdp_flush = gfx_v12_0_ring_emit_hdp_flush, }; static void gfx_v12_0_set_ring_funcs(struct amdgpu_device *adev) diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c index 8a81713d97aa..1c87375e1dd5 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c @@ -6944,6 +6944,7 @@ static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_kiq = { .pad_ib = amdgpu_ring_generic_pad_ib, .emit_rreg = gfx_v8_0_ring_emit_rreg, .emit_wreg = gfx_v8_0_ring_emit_wreg, + .emit_hdp_flush = gfx_v8_0_ring_emit_hdp_flush, }; static void gfx_v8_0_set_ring_funcs(struct amdgpu_device *adev) diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c index dd19a97436db..0148d7ff34d9 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c @@ -2409,7 +2409,7 @@ static int gfx_v9_0_sw_init(struct amdgpu_ip_block *ip_block) amdgpu_get_soft_full_reset_mask(&adev->gfx.gfx_ring[0]); adev->gfx.compute_supported_reset = amdgpu_get_soft_full_reset_mask(&adev->gfx.compute_ring[0]); - if (!amdgpu_sriov_vf(adev)) + if (!amdgpu_sriov_vf(adev) && !adev->debug_disable_gpu_ring_reset) adev->gfx.compute_supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE; r = amdgpu_gfx_kiq_init(adev, GFX9_MEC_HPD_SIZE, 0); @@ -7586,6 +7586,7 @@ static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_kiq = { .emit_wreg = gfx_v9_0_ring_emit_wreg, .emit_reg_wait = gfx_v9_0_ring_emit_reg_wait, .emit_reg_write_reg_wait = gfx_v9_0_ring_emit_reg_write_reg_wait, + .emit_hdp_flush = gfx_v9_0_ring_emit_hdp_flush, }; static void gfx_v9_0_set_ring_funcs(struct amdgpu_device *adev) diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c index c90cbe053ef3..cbb74ffc4792 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c @@ -1149,14 +1149,16 @@ static int gfx_v9_4_3_sw_init(struct amdgpu_ip_block *ip_block) case IP_VERSION(9, 4, 3): case IP_VERSION(9, 4, 4): if ((adev->gfx.mec_fw_version >= 155) && - !amdgpu_sriov_vf(adev)) { + !amdgpu_sriov_vf(adev) && + !adev->debug_disable_gpu_ring_reset) { adev->gfx.compute_supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE; adev->gfx.compute_supported_reset |= AMDGPU_RESET_TYPE_PER_PIPE; } break; case IP_VERSION(9, 5, 0): if ((adev->gfx.mec_fw_version >= 21) && - !amdgpu_sriov_vf(adev)) { + !amdgpu_sriov_vf(adev) && + !adev->debug_disable_gpu_ring_reset) { adev->gfx.compute_supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE; adev->gfx.compute_supported_reset |= AMDGPU_RESET_TYPE_PER_PIPE; } @@ -2152,7 +2154,8 @@ static int gfx_v9_4_3_xcc_kiq_init_queue(struct amdgpu_ring *ring, int xcc_id) return 0; } -static int gfx_v9_4_3_xcc_kcq_init_queue(struct amdgpu_ring *ring, int xcc_id, bool restore) +static void gfx_v9_4_3_xcc_kcq_init_queue(struct amdgpu_ring *ring, int xcc_id, + bool restore) { struct amdgpu_device *adev = ring->adev; struct v9_mqd *mqd = ring->mqd_ptr; @@ -2186,8 +2189,6 @@ static int gfx_v9_4_3_xcc_kcq_init_queue(struct amdgpu_ring *ring, int xcc_id, b atomic64_set((atomic64_t *)&adev->wb.wb[ring->wptr_offs], 0); amdgpu_ring_clear_ring(ring); } - - return 0; } static int gfx_v9_4_3_xcc_kcq_fini_register(struct amdgpu_device *adev, int xcc_id) @@ -2220,7 +2221,7 @@ static int gfx_v9_4_3_xcc_kiq_resume(struct amdgpu_device *adev, int xcc_id) static int gfx_v9_4_3_xcc_kcq_resume(struct amdgpu_device *adev, int xcc_id) { struct amdgpu_ring *ring; - int i, r; + int i; gfx_v9_4_3_xcc_cp_compute_enable(adev, true, xcc_id); @@ -2228,9 +2229,7 @@ static int gfx_v9_4_3_xcc_kcq_resume(struct amdgpu_device *adev, int xcc_id) ring = &adev->gfx.compute_ring[i + xcc_id * adev->gfx.num_compute_rings]; - r = gfx_v9_4_3_xcc_kcq_init_queue(ring, xcc_id, false); - if (r) - return r; + gfx_v9_4_3_xcc_kcq_init_queue(ring, xcc_id, false); } return amdgpu_gfx_enable_kcq(adev, xcc_id); @@ -3607,11 +3606,8 @@ static int gfx_v9_4_3_reset_kcq(struct amdgpu_ring *ring, return r; } - r = gfx_v9_4_3_xcc_kcq_init_queue(ring, ring->xcc_id, true); - if (r) { - dev_err(adev->dev, "fail to init kcq\n"); - return r; - } + gfx_v9_4_3_xcc_kcq_init_queue(ring, ring->xcc_id, true); + spin_lock_irqsave(&kiq->ring_lock, flags); r = amdgpu_ring_alloc(kiq_ring, kiq->pmf->map_queues_size); if (r) { @@ -4800,6 +4796,7 @@ static const struct amdgpu_ring_funcs gfx_v9_4_3_ring_funcs_kiq = { .emit_wreg = gfx_v9_4_3_ring_emit_wreg, .emit_reg_wait = gfx_v9_4_3_ring_emit_reg_wait, .emit_reg_write_reg_wait = gfx_v9_4_3_ring_emit_reg_write_reg_wait, + .emit_hdp_flush = gfx_v9_4_3_ring_emit_hdp_flush, }; static void gfx_v9_4_3_set_ring_funcs(struct amdgpu_device *adev) diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c index d7499be8c4bf..ce6e04242c52 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c @@ -103,8 +103,10 @@ static int gmc_v10_0_process_interrupt(struct amdgpu_device *adev, uint32_t vmhub_index = entry->client_id == SOC15_IH_CLIENTID_VMC ? AMDGPU_MMHUB0(0) : AMDGPU_GFXHUB(0); struct amdgpu_vmhub *hub = &adev->vmhub[vmhub_index]; - bool retry_fault = !!(entry->src_data[1] & 0x80); - bool write_fault = !!(entry->src_data[1] & 0x20); + bool retry_fault = !!(entry->src_data[1] & + AMDGPU_GMC9_FAULT_SOURCE_DATA_RETRY); + bool write_fault = !!(entry->src_data[1] & + AMDGPU_GMC9_FAULT_SOURCE_DATA_WRITE); struct amdgpu_task_info *task_info; uint32_t status = 0; u64 addr; diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c index 7bc389d9f5c4..ba59ee8e398a 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c @@ -103,12 +103,41 @@ static int gmc_v11_0_process_interrupt(struct amdgpu_device *adev, uint32_t vmhub_index = entry->client_id == SOC21_IH_CLIENTID_VMC ? AMDGPU_MMHUB0(0) : AMDGPU_GFXHUB(0); struct amdgpu_vmhub *hub = &adev->vmhub[vmhub_index]; + bool retry_fault = !!(entry->src_data[1] & + AMDGPU_GMC9_FAULT_SOURCE_DATA_RETRY); + bool write_fault = !!(entry->src_data[1] & + AMDGPU_GMC9_FAULT_SOURCE_DATA_WRITE); uint32_t status = 0; u64 addr; addr = (u64)entry->src_data[0] << 12; addr |= ((u64)entry->src_data[1] & 0xf) << 44; + if (retry_fault) { + /* Returning 1 here also prevents sending the IV to the KFD */ + + /* Process it only if it's the first fault for this address */ + if (entry->ih != &adev->irq.ih_soft && + amdgpu_gmc_filter_faults(adev, entry->ih, addr, entry->pasid, + entry->timestamp)) + return 1; + + /* Delegate it to a different ring if the hardware hasn't + * already done it. + */ + if (entry->ih == &adev->irq.ih) { + amdgpu_irq_delegate(adev, entry, 8); + return 1; + } + + /* Try to handle the recoverable page faults by filling page + * tables + */ + if (amdgpu_vm_handle_fault(adev, entry->pasid, 0, 0, addr, + entry->timestamp, write_fault)) + return 1; + } + if (!amdgpu_sriov_vf(adev)) { /* * Issue a dummy read to wait for the status register to diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v12_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v12_0.c index f4a19357ccbc..7a9d6894e321 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v12_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v12_0.c @@ -91,6 +91,10 @@ static int gmc_v12_0_process_interrupt(struct amdgpu_device *adev, struct amdgpu_iv_entry *entry) { struct amdgpu_vmhub *hub; + bool retry_fault = !!(entry->src_data[1] & + AMDGPU_GMC9_FAULT_SOURCE_DATA_RETRY); + bool write_fault = !!(entry->src_data[1] & + AMDGPU_GMC9_FAULT_SOURCE_DATA_WRITE); uint32_t status = 0; u64 addr; @@ -102,6 +106,31 @@ static int gmc_v12_0_process_interrupt(struct amdgpu_device *adev, else hub = &adev->vmhub[AMDGPU_GFXHUB(0)]; + if (retry_fault) { + /* Returning 1 here also prevents sending the IV to the KFD */ + + /* Process it only if it's the first fault for this address */ + if (entry->ih != &adev->irq.ih_soft && + amdgpu_gmc_filter_faults(adev, entry->ih, addr, entry->pasid, + entry->timestamp)) + return 1; + + /* Delegate it to a different ring if the hardware hasn't + * already done it. + */ + if (entry->ih == &adev->irq.ih) { + amdgpu_irq_delegate(adev, entry, 8); + return 1; + } + + /* Try to handle the recoverable page faults by filling page + * tables + */ + if (amdgpu_vm_handle_fault(adev, entry->pasid, 0, 0, addr, + entry->timestamp, write_fault)) + return 1; + } + if (!amdgpu_sriov_vf(adev)) { /* * Issue a dummy read to wait for the status register to @@ -312,9 +341,7 @@ static void gmc_v12_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid, return; } - mutex_lock(&adev->mman.gtt_window_lock); gmc_v12_0_flush_vm_hub(adev, vmid, vmhub, 0); - mutex_unlock(&adev->mman.gtt_window_lock); return; } diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c index f6ad7911f1e6..a8ec95f42926 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c @@ -213,7 +213,7 @@ static void gmc_v6_0_vram_gtt_location(struct amdgpu_device *adev, amdgpu_gmc_set_agp_default(adev, mc); amdgpu_gmc_vram_location(adev, mc, base); - amdgpu_gmc_gart_location(adev, mc, AMDGPU_GART_PLACEMENT_BEST_FIT); + amdgpu_gmc_gart_location(adev, mc, AMDGPU_GART_PLACEMENT_LOW); } static void gmc_v6_0_mc_program(struct amdgpu_device *adev) @@ -610,23 +610,21 @@ static void gmc_v6_0_gart_disable(struct amdgpu_device *adev) } static void gmc_v6_0_vm_decode_fault(struct amdgpu_device *adev, - u32 status, u32 addr, u32 mc_client) + u32 status, u32 addr) { u32 mc_id; u32 vmid = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS, VMID); u32 protections = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS, PROTECTIONS); - char block[5] = { mc_client >> 24, (mc_client >> 16) & 0xff, - (mc_client >> 8) & 0xff, mc_client & 0xff, 0 }; mc_id = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS, MEMORY_CLIENT_ID); - dev_err(adev->dev, "VM fault (0x%02x, vmid %d) at page %u, %s from '%s' (0x%08x) (%d)\n", + dev_err(adev->dev, "VM fault (0x%02x, vmid %d) at page %u, %s from %d\n", protections, vmid, addr, REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS, MEMORY_CLIENT_RW) ? - "write" : "read", block, mc_client, mc_id); + "write" : "read", mc_id); } static const u32 mc_cg_registers[] = { @@ -1072,6 +1070,12 @@ static int gmc_v6_0_process_interrupt(struct amdgpu_device *adev, { u32 addr, status; + /* Delegate to the soft IRQ handler ring */ + if (adev->irq.ih_soft.enabled && entry->ih != &adev->irq.ih_soft) { + amdgpu_irq_delegate(adev, entry, 4); + return 1; + } + addr = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_ADDR); status = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_STATUS); WREG32_P(mmVM_CONTEXT1_CNTL2, 1, ~1); @@ -1079,6 +1083,10 @@ static int gmc_v6_0_process_interrupt(struct amdgpu_device *adev, if (!addr && !status) return 0; + amdgpu_vm_update_fault_cache(adev, entry->pasid, + ((u64)addr) << AMDGPU_GPU_PAGE_SHIFT, + status, AMDGPU_GFXHUB(0)); + if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_FIRST) gmc_v6_0_set_fault_enable_default(adev, false); @@ -1089,7 +1097,7 @@ static int gmc_v6_0_process_interrupt(struct amdgpu_device *adev, addr); dev_err(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n", status); - gmc_v6_0_vm_decode_fault(adev, status, addr, 0); + gmc_v6_0_vm_decode_fault(adev, status, addr); } return 0; diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c index 0e5e54d0a9a5..fbd0bf147f50 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c @@ -1261,6 +1261,12 @@ static int gmc_v7_0_process_interrupt(struct amdgpu_device *adev, { u32 addr, status, mc_client, vmid; + /* Delegate to the soft IRQ handler ring */ + if (adev->irq.ih_soft.enabled && entry->ih != &adev->irq.ih_soft) { + amdgpu_irq_delegate(adev, entry, 4); + return 1; + } + addr = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_ADDR); status = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_STATUS); mc_client = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_MCCLIENT); diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c index e1509480dfc2..6551b60f2584 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c @@ -1439,6 +1439,12 @@ static int gmc_v8_0_process_interrupt(struct amdgpu_device *adev, return 0; } + /* Delegate to the soft IRQ handler ring */ + if (adev->irq.ih_soft.enabled && entry->ih != &adev->irq.ih_soft) { + amdgpu_irq_delegate(adev, entry, 4); + return 1; + } + addr = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_ADDR); status = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_STATUS); mc_client = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_MCCLIENT); diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c index 0d1dd587db5f..8ad7519f7b58 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c @@ -544,8 +544,10 @@ static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev, struct amdgpu_irq_src *source, struct amdgpu_iv_entry *entry) { - bool retry_fault = !!(entry->src_data[1] & 0x80); - bool write_fault = !!(entry->src_data[1] & 0x20); + bool retry_fault = !!(entry->src_data[1] & + AMDGPU_GMC9_FAULT_SOURCE_DATA_RETRY); + bool write_fault = !!(entry->src_data[1] & + AMDGPU_GMC9_FAULT_SOURCE_DATA_WRITE); uint32_t status = 0, cid = 0, rw = 0, fed = 0; struct amdgpu_task_info *task_info; struct amdgpu_vmhub *hub; @@ -1843,6 +1845,10 @@ static void gmc_v9_4_3_init_vram_info(struct amdgpu_device *adev) if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 5, 0)) adev->gmc.vram_type = AMDGPU_VRAM_TYPE_HBM3E; + if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 4) && + adev->rev_id == 0x3) + adev->gmc.vram_type = AMDGPU_VRAM_TYPE_HBM3E; + if (!(adev->flags & AMD_IS_APU) && !amdgpu_sriov_vf(adev)) { vram_info = RREG32(regBIF_BIOS_SCRATCH_4); adev->gmc.vram_vendor = vram_info & 0xF; diff --git a/drivers/gpu/drm/amd/amdgpu/iceland_ih.c b/drivers/gpu/drm/amd/amdgpu/iceland_ih.c index 1317ede131b6..01cadf898c00 100644 --- a/drivers/gpu/drm/amd/amdgpu/iceland_ih.c +++ b/drivers/gpu/drm/amd/amdgpu/iceland_ih.c @@ -157,6 +157,9 @@ static int iceland_ih_irq_init(struct amdgpu_device *adev) /* enable interrupts */ iceland_ih_enable_interrupts(adev); + if (adev->irq.ih_soft.ring_size) + adev->irq.ih_soft.enabled = true; + return 0; } @@ -194,6 +197,9 @@ static u32 iceland_ih_get_wptr(struct amdgpu_device *adev, wptr = le32_to_cpu(*ih->wptr_cpu); + if (ih == &adev->irq.ih_soft) + goto out; + if (!REG_GET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW)) goto out; @@ -296,6 +302,10 @@ static int iceland_ih_sw_init(struct amdgpu_ip_block *ip_block) if (r) return r; + r = amdgpu_ih_ring_init(adev, &adev->irq.ih_soft, IH_SW_RING_SIZE, true); + if (r) + return r; + r = amdgpu_irq_init(adev); return r; diff --git a/drivers/gpu/drm/amd/amdgpu/mes_userqueue.c b/drivers/gpu/drm/amd/amdgpu/mes_userqueue.c index 1cd9eaeef38f..64cae89357b6 100644 --- a/drivers/gpu/drm/amd/amdgpu/mes_userqueue.c +++ b/drivers/gpu/drm/amd/amdgpu/mes_userqueue.c @@ -205,10 +205,11 @@ static int mes_userq_detect_and_reset(struct amdgpu_device *adev, int db_array_size = amdgpu_mes_get_hung_queue_db_array_size(adev); struct mes_detect_and_reset_queue_input input; struct amdgpu_usermode_queue *queue; - struct amdgpu_userq_mgr *uqm, *tmp; unsigned int hung_db_num = 0; - int queue_id, r, i; + unsigned long queue_id; u32 db_array[8]; + bool found_hung_queue = false; + int r, i; if (db_array_size > 8) { dev_err(adev->dev, "DB array size (%d vs 8) too small\n", @@ -227,22 +228,26 @@ static int mes_userq_detect_and_reset(struct amdgpu_device *adev, if (r) { dev_err(adev->dev, "Failed to detect and reset queues, err (%d)\n", r); } else if (hung_db_num) { - list_for_each_entry_safe(uqm, tmp, &adev->userq_mgr_list, list) { - idr_for_each_entry(&uqm->userq_idr, queue, queue_id) { - if (queue->queue_type == queue_type) { - for (i = 0; i < hung_db_num; i++) { - if (queue->doorbell_index == db_array[i]) { - queue->state = AMDGPU_USERQ_STATE_HUNG; - atomic_inc(&adev->gpu_reset_counter); - amdgpu_userq_fence_driver_force_completion(queue); - drm_dev_wedged_event(adev_to_drm(adev), DRM_WEDGE_RECOVERY_NONE, NULL); - } + xa_for_each(&adev->userq_doorbell_xa, queue_id, queue) { + if (queue->queue_type == queue_type) { + for (i = 0; i < hung_db_num; i++) { + if (queue->doorbell_index == db_array[i]) { + queue->state = AMDGPU_USERQ_STATE_HUNG; + found_hung_queue = true; + atomic_inc(&adev->gpu_reset_counter); + amdgpu_userq_fence_driver_force_completion(queue); + drm_dev_wedged_event(adev_to_drm(adev), DRM_WEDGE_RECOVERY_NONE, NULL); } } } } } + if (found_hung_queue) { + /* Resume scheduling after hang recovery */ + r = amdgpu_mes_resume(adev); + } + return r; } @@ -254,7 +259,6 @@ static int mes_userq_mqd_create(struct amdgpu_userq_mgr *uq_mgr, struct amdgpu_mqd *mqd_hw_default = &adev->mqds[queue->queue_type]; struct drm_amdgpu_userq_in *mqd_user = args_in; struct amdgpu_mqd_prop *userq_props; - struct amdgpu_gfx_shadow_info shadow_info; int r; /* Structure to initialize MQD for userqueue using generic MQD init function */ @@ -280,8 +284,6 @@ static int mes_userq_mqd_create(struct amdgpu_userq_mgr *uq_mgr, userq_props->doorbell_index = queue->doorbell_index; userq_props->fence_address = queue->fence_drv->gpu_addr; - if (adev->gfx.funcs->get_gfx_shadow_info) - adev->gfx.funcs->get_gfx_shadow_info(adev, &shadow_info, true); if (queue->queue_type == AMDGPU_HW_IP_COMPUTE) { struct drm_amdgpu_userq_mqd_compute_gfx11 *compute_mqd; @@ -298,8 +300,9 @@ static int mes_userq_mqd_create(struct amdgpu_userq_mgr *uq_mgr, goto free_mqd; } - if (amdgpu_userq_input_va_validate(queue->vm, compute_mqd->eop_va, - max_t(u32, PAGE_SIZE, AMDGPU_GPU_PAGE_SIZE))) + r = amdgpu_userq_input_va_validate(queue, compute_mqd->eop_va, + 2048); + if (r) goto free_mqd; userq_props->eop_gpu_addr = compute_mqd->eop_va; @@ -311,6 +314,14 @@ static int mes_userq_mqd_create(struct amdgpu_userq_mgr *uq_mgr, kfree(compute_mqd); } else if (queue->queue_type == AMDGPU_HW_IP_GFX) { struct drm_amdgpu_userq_mqd_gfx11 *mqd_gfx_v11; + struct amdgpu_gfx_shadow_info shadow_info; + + if (adev->gfx.funcs->get_gfx_shadow_info) { + adev->gfx.funcs->get_gfx_shadow_info(adev, &shadow_info, true); + } else { + r = -EINVAL; + goto free_mqd; + } if (mqd_user->mqd_size != sizeof(*mqd_gfx_v11) || !mqd_user->mqd) { DRM_ERROR("Invalid GFX MQD\n"); @@ -330,8 +341,13 @@ static int mes_userq_mqd_create(struct amdgpu_userq_mgr *uq_mgr, userq_props->tmz_queue = mqd_user->flags & AMDGPU_USERQ_CREATE_FLAGS_QUEUE_SECURE; - if (amdgpu_userq_input_va_validate(queue->vm, mqd_gfx_v11->shadow_va, - shadow_info.shadow_size)) + r = amdgpu_userq_input_va_validate(queue, mqd_gfx_v11->shadow_va, + shadow_info.shadow_size); + if (r) + goto free_mqd; + r = amdgpu_userq_input_va_validate(queue, mqd_gfx_v11->csa_va, + shadow_info.csa_size); + if (r) goto free_mqd; kfree(mqd_gfx_v11); @@ -350,9 +366,9 @@ static int mes_userq_mqd_create(struct amdgpu_userq_mgr *uq_mgr, r = -ENOMEM; goto free_mqd; } - - if (amdgpu_userq_input_va_validate(queue->vm, mqd_sdma_v11->csa_va, - shadow_info.csa_size)) + r = amdgpu_userq_input_va_validate(queue, mqd_sdma_v11->csa_va, + 32); + if (r) goto free_mqd; userq_props->csa_addr = mqd_sdma_v11->csa_va; diff --git a/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c b/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c index da575bb1377f..3a52754b5cad 100644 --- a/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c @@ -369,6 +369,7 @@ static int mes_v11_0_remove_hw_queue(struct amdgpu_mes *mes, struct mes_remove_queue_input *input) { union MESAPI__REMOVE_QUEUE mes_remove_queue_pkt; + uint32_t mes_rev = mes->sched_version & AMDGPU_MES_VERSION_MASK; memset(&mes_remove_queue_pkt, 0, sizeof(mes_remove_queue_pkt)); @@ -379,6 +380,9 @@ static int mes_v11_0_remove_hw_queue(struct amdgpu_mes *mes, mes_remove_queue_pkt.doorbell_offset = input->doorbell_offset; mes_remove_queue_pkt.gang_context_addr = input->gang_context_addr; + if (mes_rev >= 0x60) + mes_remove_queue_pkt.remove_queue_after_reset = input->remove_queue_after_reset; + return mes_v11_0_submit_pkt_and_poll_completion(mes, &mes_remove_queue_pkt, sizeof(mes_remove_queue_pkt), offsetof(union MESAPI__REMOVE_QUEUE, api_status)); diff --git a/drivers/gpu/drm/amd/amdgpu/mes_v12_0.c b/drivers/gpu/drm/amd/amdgpu/mes_v12_0.c index 7f3512d9de07..744e95d3984a 100644 --- a/drivers/gpu/drm/amd/amdgpu/mes_v12_0.c +++ b/drivers/gpu/drm/amd/amdgpu/mes_v12_0.c @@ -361,6 +361,7 @@ static int mes_v12_0_remove_hw_queue(struct amdgpu_mes *mes, struct mes_remove_queue_input *input) { union MESAPI__REMOVE_QUEUE mes_remove_queue_pkt; + uint32_t mes_rev = mes->sched_version & AMDGPU_MES_VERSION_MASK; memset(&mes_remove_queue_pkt, 0, sizeof(mes_remove_queue_pkt)); @@ -371,6 +372,9 @@ static int mes_v12_0_remove_hw_queue(struct amdgpu_mes *mes, mes_remove_queue_pkt.doorbell_offset = input->doorbell_offset; mes_remove_queue_pkt.gang_context_addr = input->gang_context_addr; + if (mes_rev >= 0x5a) + mes_remove_queue_pkt.remove_queue_after_reset = input->remove_queue_after_reset; + return mes_v12_0_submit_pkt_and_poll_completion(mes, AMDGPU_MES_SCHED_PIPE, &mes_remove_queue_pkt, sizeof(mes_remove_queue_pkt), diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c b/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c index e5282a5d05d9..e7cd07383d56 100644 --- a/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c +++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c @@ -173,13 +173,17 @@ static void xgpu_nv_mailbox_trans_msg (struct amdgpu_device *adev, static int xgpu_nv_send_access_requests_with_param(struct amdgpu_device *adev, enum idh_request req, u32 data1, u32 data2, u32 data3) { - int r, retry = 1; + struct amdgpu_virt *virt = &adev->virt; + int r = 0, retry = 1; enum idh_event event = -1; + mutex_lock(&virt->access_req_mutex); send_request: - if (amdgpu_ras_is_rma(adev)) - return -ENODEV; + if (amdgpu_ras_is_rma(adev)) { + r = -ENODEV; + goto out; + } xgpu_nv_mailbox_trans_msg(adev, req, data1, data2, data3); @@ -217,17 +221,25 @@ static int xgpu_nv_send_access_requests_with_param(struct amdgpu_device *adev, if (req != IDH_REQ_GPU_INIT_DATA) { dev_err(adev->dev, "Doesn't get msg:%d from pf, error=%d\n", event, r); - return r; + goto out; } else /* host doesn't support REQ_GPU_INIT_DATA handshake */ adev->virt.req_init_data_ver = 0; } else { if (req == IDH_REQ_GPU_INIT_DATA) { - adev->virt.req_init_data_ver = - RREG32_NO_KIQ(mmMAILBOX_MSGBUF_RCV_DW1); - - /* assume V1 in case host doesn't set version number */ - if (adev->virt.req_init_data_ver < 1) - adev->virt.req_init_data_ver = 1; + switch (RREG32_NO_KIQ(mmMAILBOX_MSGBUF_RCV_DW1)) { + case GPU_CRIT_REGION_V2: + adev->virt.req_init_data_ver = GPU_CRIT_REGION_V2; + adev->virt.init_data_header.offset = + RREG32_NO_KIQ(mmMAILBOX_MSGBUF_RCV_DW2); + adev->virt.init_data_header.size_kb = + RREG32_NO_KIQ(mmMAILBOX_MSGBUF_RCV_DW3); + break; + default: + adev->virt.req_init_data_ver = GPU_CRIT_REGION_V1; + adev->virt.init_data_header.offset = -1; + adev->virt.init_data_header.size_kb = 0; + break; + } } } @@ -238,7 +250,10 @@ static int xgpu_nv_send_access_requests_with_param(struct amdgpu_device *adev, } } - return 0; +out: + mutex_unlock(&virt->access_req_mutex); + + return r; } static int xgpu_nv_send_access_requests(struct amdgpu_device *adev, @@ -285,7 +300,8 @@ static int xgpu_nv_release_full_gpu_access(struct amdgpu_device *adev, static int xgpu_nv_request_init_data(struct amdgpu_device *adev) { - return xgpu_nv_send_access_requests(adev, IDH_REQ_GPU_INIT_DATA); + return xgpu_nv_send_access_requests_with_param(adev, IDH_REQ_GPU_INIT_DATA, + 0, GPU_CRIT_REGION_V2, 0); } static int xgpu_nv_mailbox_ack_irq(struct amdgpu_device *adev, diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_9.c b/drivers/gpu/drm/amd/amdgpu/nbio_v7_9.c index 1c22bc11c1f8..bdfd2917e3ca 100644 --- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_9.c +++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_9.c @@ -41,19 +41,21 @@ static void nbio_v7_9_remap_hdp_registers(struct amdgpu_device *adev) static u32 nbio_v7_9_get_rev_id(struct amdgpu_device *adev) { - u32 tmp; + u32 rev_id; - tmp = IP_VERSION_SUBREV(amdgpu_ip_version_full(adev, NBIO_HWIP, 0)); - /* If it is VF or subrevision holds a non-zero value, that should be used */ - if (tmp || amdgpu_sriov_vf(adev)) - return tmp; + /* + * fetch the sub-revision field from the IP-discovery table + * (returns zero if the table entry is not populated). + */ + if (amdgpu_sriov_vf(adev)) { + rev_id = IP_VERSION_SUBREV(amdgpu_ip_version_full(adev, NBIO_HWIP, 0)); + } else { + rev_id = RREG32_SOC15(NBIO, 0, regRCC_STRAP0_RCC_DEV0_EPF0_STRAP0); + rev_id = REG_GET_FIELD(rev_id, RCC_STRAP0_RCC_DEV0_EPF0_STRAP0, + STRAP_ATI_REV_ID_DEV0_F0); + } - /* If discovery subrev is not updated, use register version */ - tmp = RREG32_SOC15(NBIO, 0, regRCC_STRAP0_RCC_DEV0_EPF0_STRAP0); - tmp = REG_GET_FIELD(tmp, RCC_STRAP0_RCC_DEV0_EPF0_STRAP0, - STRAP_ATI_REV_ID_DEV0_F0); - - return tmp; + return rev_id; } static void nbio_v7_9_mc_access_enable(struct amdgpu_device *adev, bool enable) diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c index 36b1ca73c2ed..a1443990d5c6 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c @@ -2361,11 +2361,15 @@ static void sdma_v4_4_2_update_reset_mask(struct amdgpu_device *adev) switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { case IP_VERSION(9, 4, 3): case IP_VERSION(9, 4, 4): - if ((adev->gfx.mec_fw_version >= 0xb0) && amdgpu_dpm_reset_sdma_is_supported(adev)) + if ((adev->gfx.mec_fw_version >= 0xb0) && + amdgpu_dpm_reset_sdma_is_supported(adev) && + !adev->debug_disable_gpu_ring_reset) adev->sdma.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE; break; case IP_VERSION(9, 5, 0): - if ((adev->gfx.mec_fw_version >= 0xf) && amdgpu_dpm_reset_sdma_is_supported(adev)) + if ((adev->gfx.mec_fw_version >= 0xf) && + amdgpu_dpm_reset_sdma_is_supported(adev) && + !adev->debug_disable_gpu_ring_reset) adev->sdma.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE; break; default: diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c index 7dc67a22a7a0..8ddc4df06a1f 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c @@ -1429,7 +1429,8 @@ static int sdma_v5_0_sw_init(struct amdgpu_ip_block *ip_block) case IP_VERSION(5, 0, 2): case IP_VERSION(5, 0, 5): if ((adev->sdma.instance[0].fw_version >= 35) && - !amdgpu_sriov_vf(adev)) + !amdgpu_sriov_vf(adev) && + !adev->debug_disable_gpu_ring_reset) adev->sdma.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE; break; default: diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c b/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c index 3bd44c24f692..51101b0aa2fa 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c @@ -342,7 +342,7 @@ static void sdma_v5_2_ring_emit_hdp_flush(struct amdgpu_ring *ring) const struct nbio_hdp_flush_reg *nbio_hf_reg = adev->nbio.hdp_flush_reg; if (ring->me > 1) { - amdgpu_asic_flush_hdp(adev, ring); + amdgpu_hdp_flush(adev, ring); } else { ref_and_mask = nbio_hf_reg->ref_and_mask_sdma0 << ring->me; @@ -1348,12 +1348,14 @@ static int sdma_v5_2_sw_init(struct amdgpu_ip_block *ip_block) case IP_VERSION(5, 2, 3): case IP_VERSION(5, 2, 4): if ((adev->sdma.instance[0].fw_version >= 76) && - !amdgpu_sriov_vf(adev)) + !amdgpu_sriov_vf(adev) && + !adev->debug_disable_gpu_ring_reset) adev->sdma.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE; break; case IP_VERSION(5, 2, 5): if ((adev->sdma.instance[0].fw_version >= 34) && - !amdgpu_sriov_vf(adev)) + !amdgpu_sriov_vf(adev) && + !adev->debug_disable_gpu_ring_reset) adev->sdma.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE; break; default: diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c index db6e41967f12..217040044987 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c @@ -1356,7 +1356,8 @@ static int sdma_v6_0_sw_init(struct amdgpu_ip_block *ip_block) case IP_VERSION(6, 0, 2): case IP_VERSION(6, 0, 3): if ((adev->sdma.instance[0].fw_version >= 21) && - !amdgpu_sriov_vf(adev)) + !amdgpu_sriov_vf(adev) && + !adev->debug_disable_gpu_ring_reset) adev->sdma.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE; break; default: @@ -1389,7 +1390,7 @@ static int sdma_v6_0_sw_init(struct amdgpu_ip_block *ip_block) adev->userq_funcs[AMDGPU_HW_IP_DMA] = &userq_mes_funcs; break; case IP_VERSION(6, 0, 3): - if ((adev->sdma.instance[0].fw_version >= 27) && !adev->sdma.disable_uq) + if (adev->sdma.instance[0].fw_version >= 29 && !adev->sdma.disable_uq) adev->userq_funcs[AMDGPU_HW_IP_DMA] = &userq_mes_funcs; break; case IP_VERSION(6, 1, 0): diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v7_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v7_0.c index 326ecc8d37d2..2b81344dcd66 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v7_0.c @@ -1337,7 +1337,8 @@ static int sdma_v7_0_sw_init(struct amdgpu_ip_block *ip_block) adev->sdma.supported_reset = amdgpu_get_soft_full_reset_mask(&adev->sdma.instance[0].ring); - if (!amdgpu_sriov_vf(adev)) + if (!amdgpu_sriov_vf(adev) && + !adev->debug_disable_gpu_ring_reset) adev->sdma.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE; r = amdgpu_sdma_sysfs_reset_mask_init(adev); diff --git a/drivers/gpu/drm/amd/amdgpu/si.c b/drivers/gpu/drm/amd/amdgpu/si.c index e0f139de7991..f7288372ee61 100644 --- a/drivers/gpu/drm/amd/amdgpu/si.c +++ b/drivers/gpu/drm/amd/amdgpu/si.c @@ -45,6 +45,7 @@ #include "dce_v6_0.h" #include "si.h" #include "uvd_v3_1.h" +#include "vce_v1_0.h" #include "uvd/uvd_4_0_d.h" @@ -921,8 +922,6 @@ static const u32 hainan_mgcg_cgcg_init[] = 0x3630, 0xfffffff0, 0x00000100, }; -/* XXX: update when we support VCE */ -#if 0 /* tahiti, pitcairn, verde */ static const struct amdgpu_video_codec_info tahiti_video_codecs_encode_array[] = { @@ -940,13 +939,7 @@ static const struct amdgpu_video_codecs tahiti_video_codecs_encode = .codec_count = ARRAY_SIZE(tahiti_video_codecs_encode_array), .codec_array = tahiti_video_codecs_encode_array, }; -#else -static const struct amdgpu_video_codecs tahiti_video_codecs_encode = -{ - .codec_count = 0, - .codec_array = NULL, -}; -#endif + /* oland and hainan don't support encode */ static const struct amdgpu_video_codecs hainan_video_codecs_encode = { @@ -1925,6 +1918,14 @@ static int si_set_vce_clocks(struct amdgpu_device *adev, u32 evclk, u32 ecclk) ~VCEPLL_BYPASS_EN_MASK); if (!evclk || !ecclk) { + /* + * On some chips, the PLL takes way too long to get out of + * sleep mode, causing a timeout waiting on CTLACK/CTLACK2. + * Leave the PLL running in bypass mode. + */ + if (adev->pdev->device == 0x6780) + return 0; + /* Keep the Bypass mode, put PLL to sleep */ WREG32_SMC_P(CG_VCEPLL_FUNC_CNTL, VCEPLL_SLEEP_MASK, ~VCEPLL_SLEEP_MASK); @@ -2717,7 +2718,7 @@ int si_set_ip_blocks(struct amdgpu_device *adev) else amdgpu_device_ip_block_add(adev, &dce_v6_0_ip_block); amdgpu_device_ip_block_add(adev, &uvd_v3_1_ip_block); - /* amdgpu_device_ip_block_add(adev, &vce_v1_0_ip_block); */ + amdgpu_device_ip_block_add(adev, &vce_v1_0_ip_block); break; case CHIP_OLAND: amdgpu_device_ip_block_add(adev, &si_common_ip_block); @@ -2735,7 +2736,6 @@ int si_set_ip_blocks(struct amdgpu_device *adev) else amdgpu_device_ip_block_add(adev, &dce_v6_4_ip_block); amdgpu_device_ip_block_add(adev, &uvd_v3_1_ip_block); - /* amdgpu_device_ip_block_add(adev, &vce_v1_0_ip_block); */ break; case CHIP_HAINAN: amdgpu_device_ip_block_add(adev, &si_common_ip_block); diff --git a/drivers/gpu/drm/amd/amdgpu/si_ih.c b/drivers/gpu/drm/amd/amdgpu/si_ih.c index 1df00f8a2406..66f650f87243 100644 --- a/drivers/gpu/drm/amd/amdgpu/si_ih.c +++ b/drivers/gpu/drm/amd/amdgpu/si_ih.c @@ -96,6 +96,9 @@ static int si_ih_irq_init(struct amdgpu_device *adev) pci_set_master(adev->pdev); si_ih_enable_interrupts(adev); + if (adev->irq.ih_soft.ring_size) + adev->irq.ih_soft.enabled = true; + return 0; } @@ -112,6 +115,9 @@ static u32 si_ih_get_wptr(struct amdgpu_device *adev, wptr = le32_to_cpu(*ih->wptr_cpu); + if (ih == &adev->irq.ih_soft) + goto out; + if (wptr & IH_RB_WPTR__RB_OVERFLOW_MASK) { wptr &= ~IH_RB_WPTR__RB_OVERFLOW_MASK; dev_warn(adev->dev, "IH ring buffer overflow (0x%08X, 0x%08X, 0x%08X)\n", @@ -127,6 +133,8 @@ static u32 si_ih_get_wptr(struct amdgpu_device *adev, tmp &= ~IH_RB_CNTL__WPTR_OVERFLOW_CLEAR_MASK; WREG32(IH_RB_CNTL, tmp); } + +out: return (wptr & ih->ptr_mask); } @@ -175,6 +183,10 @@ static int si_ih_sw_init(struct amdgpu_ip_block *ip_block) if (r) return r; + r = amdgpu_ih_ring_init(adev, &adev->irq.ih_soft, IH_SW_RING_SIZE, true); + if (r) + return r; + return amdgpu_irq_init(adev); } diff --git a/drivers/gpu/drm/amd/amdgpu/sid.h b/drivers/gpu/drm/amd/amdgpu/sid.h index cbd4f8951cfa..561462a8332e 100644 --- a/drivers/gpu/drm/amd/amdgpu/sid.h +++ b/drivers/gpu/drm/amd/amdgpu/sid.h @@ -582,45 +582,6 @@ #define DMA_PACKET_NOP 0xf /* VCE */ -#define VCE_STATUS 0x20004 -#define VCE_VCPU_CNTL 0x20014 -#define VCE_CLK_EN (1 << 0) -#define VCE_VCPU_CACHE_OFFSET0 0x20024 -#define VCE_VCPU_CACHE_SIZE0 0x20028 -#define VCE_VCPU_CACHE_OFFSET1 0x2002c -#define VCE_VCPU_CACHE_SIZE1 0x20030 -#define VCE_VCPU_CACHE_OFFSET2 0x20034 -#define VCE_VCPU_CACHE_SIZE2 0x20038 -#define VCE_SOFT_RESET 0x20120 -#define VCE_ECPU_SOFT_RESET (1 << 0) -#define VCE_FME_SOFT_RESET (1 << 2) -#define VCE_RB_BASE_LO2 0x2016c -#define VCE_RB_BASE_HI2 0x20170 -#define VCE_RB_SIZE2 0x20174 -#define VCE_RB_RPTR2 0x20178 -#define VCE_RB_WPTR2 0x2017c -#define VCE_RB_BASE_LO 0x20180 -#define VCE_RB_BASE_HI 0x20184 -#define VCE_RB_SIZE 0x20188 -#define VCE_RB_RPTR 0x2018c -#define VCE_RB_WPTR 0x20190 -#define VCE_CLOCK_GATING_A 0x202f8 -#define VCE_CLOCK_GATING_B 0x202fc -#define VCE_UENC_CLOCK_GATING 0x205bc -#define VCE_UENC_REG_CLOCK_GATING 0x205c0 -#define VCE_FW_REG_STATUS 0x20e10 -# define VCE_FW_REG_STATUS_BUSY (1 << 0) -# define VCE_FW_REG_STATUS_PASS (1 << 3) -# define VCE_FW_REG_STATUS_DONE (1 << 11) -#define VCE_LMI_FW_START_KEYSEL 0x20e18 -#define VCE_LMI_FW_PERIODIC_CTRL 0x20e20 -#define VCE_LMI_CTRL2 0x20e74 -#define VCE_LMI_CTRL 0x20e98 -#define VCE_LMI_VM_CTRL 0x20ea0 -#define VCE_LMI_SWAP_CNTL 0x20eb4 -#define VCE_LMI_SWAP_CNTL1 0x20eb8 -#define VCE_LMI_CACHE_CTRL 0x20ef4 - #define VCE_CMD_NO_OP 0x00000000 #define VCE_CMD_END 0x00000001 #define VCE_CMD_IB 0x00000002 @@ -629,7 +590,6 @@ #define VCE_CMD_IB_AUTO 0x00000005 #define VCE_CMD_SEMAPHORE 0x00000006 - //#dce stupp /* display controller offsets used for crtc/cur/lut/grph/viewport/etc. */ #define CRTC0_REGISTER_OFFSET (0x1b7c - 0x1b7c) //(0x6df0 - 0x6df0)/4 diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c index 9785fada4fa7..42f5d9c0e3af 100644 --- a/drivers/gpu/drm/amd/amdgpu/soc15.c +++ b/drivers/gpu/drm/amd/amdgpu/soc15.c @@ -853,10 +853,6 @@ static bool soc15_need_reset_on_init(struct amdgpu_device *adev) { u32 sol_reg; - /* CP hangs in IGT reloading test on RN, reset to WA */ - if (adev->asic_type == CHIP_RENOIR) - return true; - if (amdgpu_gmc_need_reset_on_init(adev)) return true; if (amdgpu_psp_tos_reload_needed(adev)) diff --git a/drivers/gpu/drm/amd/amdgpu/tonga_ih.c b/drivers/gpu/drm/amd/amdgpu/tonga_ih.c index 7d17ae56f901..ee8038df17e3 100644 --- a/drivers/gpu/drm/amd/amdgpu/tonga_ih.c +++ b/drivers/gpu/drm/amd/amdgpu/tonga_ih.c @@ -159,6 +159,9 @@ static int tonga_ih_irq_init(struct amdgpu_device *adev) /* enable interrupts */ tonga_ih_enable_interrupts(adev); + if (adev->irq.ih_soft.ring_size) + adev->irq.ih_soft.enabled = true; + return 0; } @@ -196,6 +199,9 @@ static u32 tonga_ih_get_wptr(struct amdgpu_device *adev, wptr = le32_to_cpu(*ih->wptr_cpu); + if (ih == &adev->irq.ih_soft) + goto out; + if (!REG_GET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW)) goto out; @@ -306,6 +312,10 @@ static int tonga_ih_sw_init(struct amdgpu_ip_block *ip_block) if (r) return r; + r = amdgpu_ih_ring_init(adev, &adev->irq.ih_soft, IH_SW_RING_SIZE, true); + if (r) + return r; + adev->irq.ih.use_doorbell = true; adev->irq.ih.doorbell_index = adev->doorbell_index.ih; diff --git a/drivers/gpu/drm/amd/amdgpu/umc_v12_0.c b/drivers/gpu/drm/amd/amdgpu/umc_v12_0.c index 8dc32787d625..0f5b1719fda5 100644 --- a/drivers/gpu/drm/amd/amdgpu/umc_v12_0.c +++ b/drivers/gpu/drm/amd/amdgpu/umc_v12_0.c @@ -711,6 +711,19 @@ static uint32_t umc_v12_0_get_die_id(struct amdgpu_device *adev, return die; } +static void umc_v12_0_mca_ipid_parse(struct amdgpu_device *adev, uint64_t ipid, + uint32_t *did, uint32_t *ch, uint32_t *umc_inst, uint32_t *sid) +{ + if (did) + *did = MCA_IPID_2_DIE_ID(ipid); + if (ch) + *ch = MCA_IPID_2_UMC_CH(ipid); + if (umc_inst) + *umc_inst = MCA_IPID_2_UMC_INST(ipid); + if (sid) + *sid = MCA_IPID_2_SOCKET_ID(ipid); +} + struct amdgpu_umc_ras umc_v12_0_ras = { .ras_block = { .hw_ops = &umc_v12_0_ras_hw_ops, @@ -724,5 +737,6 @@ struct amdgpu_umc_ras umc_v12_0_ras = { .convert_ras_err_addr = umc_v12_0_convert_error_address, .get_die_id_from_pa = umc_v12_0_get_die_id, .get_retire_flip_bits = umc_v12_0_get_retire_flip_bits, + .mca_ipid_parse = umc_v12_0_mca_ipid_parse, }; diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v1_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v1_0.c new file mode 100644 index 000000000000..9ae424618556 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/vce_v1_0.c @@ -0,0 +1,839 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright 2013 Advanced Micro Devices, Inc. + * Copyright 2025 Valve Corporation + * Copyright 2025 Alexandre Demers + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sub license, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE + * USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + * Authors: Christian König + * Timur Kristóf + * Alexandre Demers + */ + +#include + +#include "amdgpu.h" +#include "amdgpu_vce.h" +#include "amdgpu_gart.h" +#include "sid.h" +#include "vce_v1_0.h" +#include "vce/vce_1_0_d.h" +#include "vce/vce_1_0_sh_mask.h" +#include "oss/oss_1_0_d.h" +#include "oss/oss_1_0_sh_mask.h" + +#define VCE_V1_0_FW_SIZE (256 * 1024) +#define VCE_V1_0_STACK_SIZE (64 * 1024) +#define VCE_V1_0_DATA_SIZE (7808 * (AMDGPU_MAX_VCE_HANDLES + 1)) +#define VCE_STATUS_VCPU_REPORT_FW_LOADED_MASK 0x02 + +#define VCE_V1_0_GART_PAGE_START \ + (AMDGPU_GTT_MAX_TRANSFER_SIZE * AMDGPU_GTT_NUM_TRANSFER_WINDOWS) +#define VCE_V1_0_GART_ADDR_START \ + (VCE_V1_0_GART_PAGE_START * AMDGPU_GPU_PAGE_SIZE) + +static void vce_v1_0_set_ring_funcs(struct amdgpu_device *adev); +static void vce_v1_0_set_irq_funcs(struct amdgpu_device *adev); + +struct vce_v1_0_fw_signature { + int32_t offset; + uint32_t length; + int32_t number; + struct { + uint32_t chip_id; + uint32_t keyselect; + uint32_t nonce[4]; + uint32_t sigval[4]; + } val[8]; +}; + +/** + * vce_v1_0_ring_get_rptr - get read pointer + * + * @ring: amdgpu_ring pointer + * + * Returns the current hardware read pointer + */ +static uint64_t vce_v1_0_ring_get_rptr(struct amdgpu_ring *ring) +{ + struct amdgpu_device *adev = ring->adev; + + if (ring->me == 0) + return RREG32(mmVCE_RB_RPTR); + else + return RREG32(mmVCE_RB_RPTR2); +} + +/** + * vce_v1_0_ring_get_wptr - get write pointer + * + * @ring: amdgpu_ring pointer + * + * Returns the current hardware write pointer + */ +static uint64_t vce_v1_0_ring_get_wptr(struct amdgpu_ring *ring) +{ + struct amdgpu_device *adev = ring->adev; + + if (ring->me == 0) + return RREG32(mmVCE_RB_WPTR); + else + return RREG32(mmVCE_RB_WPTR2); +} + +/** + * vce_v1_0_ring_set_wptr - set write pointer + * + * @ring: amdgpu_ring pointer + * + * Commits the write pointer to the hardware + */ +static void vce_v1_0_ring_set_wptr(struct amdgpu_ring *ring) +{ + struct amdgpu_device *adev = ring->adev; + + if (ring->me == 0) + WREG32(mmVCE_RB_WPTR, lower_32_bits(ring->wptr)); + else + WREG32(mmVCE_RB_WPTR2, lower_32_bits(ring->wptr)); +} + +static int vce_v1_0_lmi_clean(struct amdgpu_device *adev) +{ + int i, j; + + for (i = 0; i < 10; ++i) { + for (j = 0; j < 100; ++j) { + if (RREG32(mmVCE_LMI_STATUS) & 0x337f) + return 0; + + mdelay(10); + } + } + + return -ETIMEDOUT; +} + +static int vce_v1_0_firmware_loaded(struct amdgpu_device *adev) +{ + int i, j; + + for (i = 0; i < 10; ++i) { + for (j = 0; j < 100; ++j) { + if (RREG32(mmVCE_STATUS) & VCE_STATUS_VCPU_REPORT_FW_LOADED_MASK) + return 0; + mdelay(10); + } + + dev_err(adev->dev, "VCE not responding, trying to reset the ECPU\n"); + + WREG32_P(mmVCE_SOFT_RESET, + VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK, + ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK); + mdelay(10); + WREG32_P(mmVCE_SOFT_RESET, 0, + ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK); + mdelay(10); + } + + return -ETIMEDOUT; +} + +static void vce_v1_0_init_cg(struct amdgpu_device *adev) +{ + u32 tmp; + + tmp = RREG32(mmVCE_CLOCK_GATING_A); + tmp |= VCE_CLOCK_GATING_A__CGC_DYN_CLOCK_MODE_MASK; + WREG32(mmVCE_CLOCK_GATING_A, tmp); + + tmp = RREG32(mmVCE_CLOCK_GATING_B); + tmp |= 0x1e; + tmp &= ~0xe100e1; + WREG32(mmVCE_CLOCK_GATING_B, tmp); + + tmp = RREG32(mmVCE_UENC_CLOCK_GATING); + tmp &= ~0xff9ff000; + WREG32(mmVCE_UENC_CLOCK_GATING, tmp); + + tmp = RREG32(mmVCE_UENC_REG_CLOCK_GATING); + tmp &= ~0x3ff; + WREG32(mmVCE_UENC_REG_CLOCK_GATING, tmp); +} + +/** + * vce_v1_0_load_fw_signature - load firmware signature into VCPU BO + * + * @adev: amdgpu_device pointer + * + * The VCE1 firmware validation mechanism needs a firmware signature. + * This function finds the signature appropriate for the current + * ASIC and writes that into the VCPU BO. + */ +static int vce_v1_0_load_fw_signature(struct amdgpu_device *adev) +{ + const struct common_firmware_header *hdr; + struct vce_v1_0_fw_signature *sign; + unsigned int ucode_offset; + uint32_t chip_id; + u32 *cpu_addr; + int i; + + hdr = (const struct common_firmware_header *)adev->vce.fw->data; + ucode_offset = le32_to_cpu(hdr->ucode_array_offset_bytes); + cpu_addr = adev->vce.cpu_addr; + + sign = (void *)adev->vce.fw->data + ucode_offset; + + switch (adev->asic_type) { + case CHIP_TAHITI: + chip_id = 0x01000014; + break; + case CHIP_VERDE: + chip_id = 0x01000015; + break; + case CHIP_PITCAIRN: + chip_id = 0x01000016; + break; + default: + dev_err(adev->dev, "asic_type %#010x was not found!", adev->asic_type); + return -EINVAL; + } + + for (i = 0; i < le32_to_cpu(sign->number); ++i) { + if (le32_to_cpu(sign->val[i].chip_id) == chip_id) + break; + } + + if (i == le32_to_cpu(sign->number)) { + dev_err(adev->dev, "chip_id 0x%x for %s was not found in VCE firmware", + chip_id, amdgpu_asic_name[adev->asic_type]); + return -EINVAL; + } + + cpu_addr += (256 - 64) / 4; + memcpy_toio(&cpu_addr[0], &sign->val[i].nonce[0], 16); + cpu_addr[4] = cpu_to_le32(le32_to_cpu(sign->length) + 64); + + memset_io(&cpu_addr[5], 0, 44); + memcpy_toio(&cpu_addr[16], &sign[1], hdr->ucode_size_bytes - sizeof(*sign)); + + cpu_addr += (le32_to_cpu(sign->length) + 64) / 4; + memcpy_toio(&cpu_addr[0], &sign->val[i].sigval[0], 16); + + adev->vce.keyselect = le32_to_cpu(sign->val[i].keyselect); + + return 0; +} + +static int vce_v1_0_wait_for_fw_validation(struct amdgpu_device *adev) +{ + int i; + + dev_dbg(adev->dev, "VCE keyselect: %d", adev->vce.keyselect); + WREG32(mmVCE_LMI_FW_START_KEYSEL, adev->vce.keyselect); + + for (i = 0; i < 10; ++i) { + mdelay(10); + if (RREG32(mmVCE_FW_REG_STATUS) & VCE_FW_REG_STATUS__DONE_MASK) + break; + } + + if (!(RREG32(mmVCE_FW_REG_STATUS) & VCE_FW_REG_STATUS__DONE_MASK)) { + dev_err(adev->dev, "VCE FW validation timeout\n"); + return -ETIMEDOUT; + } + + if (!(RREG32(mmVCE_FW_REG_STATUS) & VCE_FW_REG_STATUS__PASS_MASK)) { + dev_err(adev->dev, "VCE FW validation failed\n"); + return -EINVAL; + } + + for (i = 0; i < 10; ++i) { + mdelay(10); + if (!(RREG32(mmVCE_FW_REG_STATUS) & VCE_FW_REG_STATUS__BUSY_MASK)) + break; + } + + if (RREG32(mmVCE_FW_REG_STATUS) & VCE_FW_REG_STATUS__BUSY_MASK) { + dev_err(adev->dev, "VCE FW busy timeout\n"); + return -ETIMEDOUT; + } + + return 0; +} + +static int vce_v1_0_mc_resume(struct amdgpu_device *adev) +{ + uint32_t offset; + uint32_t size; + + /* + * When the keyselect is already set, don't perturb VCE FW. + * Validation seems to always fail the second time. + */ + if (RREG32(mmVCE_LMI_FW_START_KEYSEL)) { + dev_dbg(adev->dev, "keyselect already set: 0x%x (on CPU: 0x%x)\n", + RREG32(mmVCE_LMI_FW_START_KEYSEL), adev->vce.keyselect); + + WREG32_P(mmVCE_LMI_CTRL2, 0x0, ~0x100); + return 0; + } + + WREG32_P(mmVCE_CLOCK_GATING_A, 0, ~(1 << 16)); + WREG32_P(mmVCE_UENC_CLOCK_GATING, 0x1FF000, ~0xFF9FF000); + WREG32_P(mmVCE_UENC_REG_CLOCK_GATING, 0x3F, ~0x3F); + WREG32(mmVCE_CLOCK_GATING_B, 0); + + WREG32_P(mmVCE_LMI_FW_PERIODIC_CTRL, 0x4, ~0x4); + + WREG32(mmVCE_LMI_CTRL, 0x00398000); + + WREG32_P(mmVCE_LMI_CACHE_CTRL, 0x0, ~0x1); + WREG32(mmVCE_LMI_SWAP_CNTL, 0); + WREG32(mmVCE_LMI_SWAP_CNTL1, 0); + WREG32(mmVCE_LMI_VM_CTRL, 0); + + WREG32(mmVCE_VCPU_SCRATCH7, AMDGPU_MAX_VCE_HANDLES); + + offset = adev->vce.gpu_addr + AMDGPU_VCE_FIRMWARE_OFFSET; + size = VCE_V1_0_FW_SIZE; + WREG32(mmVCE_VCPU_CACHE_OFFSET0, offset & 0x7fffffff); + WREG32(mmVCE_VCPU_CACHE_SIZE0, size); + + offset += size; + size = VCE_V1_0_STACK_SIZE; + WREG32(mmVCE_VCPU_CACHE_OFFSET1, offset & 0x7fffffff); + WREG32(mmVCE_VCPU_CACHE_SIZE1, size); + + offset += size; + size = VCE_V1_0_DATA_SIZE; + WREG32(mmVCE_VCPU_CACHE_OFFSET2, offset & 0x7fffffff); + WREG32(mmVCE_VCPU_CACHE_SIZE2, size); + + WREG32_P(mmVCE_LMI_CTRL2, 0x0, ~0x100); + + return vce_v1_0_wait_for_fw_validation(adev); +} + +/** + * vce_v1_0_is_idle() - Check idle status of VCE1 IP block + * + * @ip_block: amdgpu_ip_block pointer + * + * Check whether VCE is busy according to VCE_STATUS. + * Also check whether the SRBM thinks VCE is busy, although + * SRBM_STATUS.VCE_BUSY seems to be bogus because it + * appears to mirror the VCE_STATUS.VCPU_REPORT_FW_LOADED bit. + */ +static bool vce_v1_0_is_idle(struct amdgpu_ip_block *ip_block) +{ + struct amdgpu_device *adev = ip_block->adev; + bool busy = + (RREG32(mmVCE_STATUS) & (VCE_STATUS__JOB_BUSY_MASK | VCE_STATUS__UENC_BUSY_MASK)) || + (RREG32(mmSRBM_STATUS2) & SRBM_STATUS2__VCE_BUSY_MASK); + + return !busy; +} + +static int vce_v1_0_wait_for_idle(struct amdgpu_ip_block *ip_block) +{ + struct amdgpu_device *adev = ip_block->adev; + unsigned int i; + + for (i = 0; i < adev->usec_timeout; i++) { + udelay(1); + if (vce_v1_0_is_idle(ip_block)) + return 0; + } + return -ETIMEDOUT; +} + +/** + * vce_v1_0_start - start VCE block + * + * @adev: amdgpu_device pointer + * + * Setup and start the VCE block + */ +static int vce_v1_0_start(struct amdgpu_device *adev) +{ + struct amdgpu_ring *ring; + int r; + + WREG32_P(mmVCE_STATUS, 1, ~1); + + r = vce_v1_0_mc_resume(adev); + if (r) + return r; + + ring = &adev->vce.ring[0]; + WREG32(mmVCE_RB_RPTR, lower_32_bits(ring->wptr)); + WREG32(mmVCE_RB_WPTR, lower_32_bits(ring->wptr)); + WREG32(mmVCE_RB_BASE_LO, lower_32_bits(ring->gpu_addr)); + WREG32(mmVCE_RB_BASE_HI, upper_32_bits(ring->gpu_addr)); + WREG32(mmVCE_RB_SIZE, ring->ring_size / 4); + + ring = &adev->vce.ring[1]; + WREG32(mmVCE_RB_RPTR2, lower_32_bits(ring->wptr)); + WREG32(mmVCE_RB_WPTR2, lower_32_bits(ring->wptr)); + WREG32(mmVCE_RB_BASE_LO2, lower_32_bits(ring->gpu_addr)); + WREG32(mmVCE_RB_BASE_HI2, upper_32_bits(ring->gpu_addr)); + WREG32(mmVCE_RB_SIZE2, ring->ring_size / 4); + + WREG32_P(mmVCE_VCPU_CNTL, VCE_VCPU_CNTL__CLK_EN_MASK, + ~VCE_VCPU_CNTL__CLK_EN_MASK); + + WREG32_P(mmVCE_SOFT_RESET, + VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK | + VCE_SOFT_RESET__FME_SOFT_RESET_MASK, + ~(VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK | + VCE_SOFT_RESET__FME_SOFT_RESET_MASK)); + + mdelay(100); + + WREG32_P(mmVCE_SOFT_RESET, 0, + ~(VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK | + VCE_SOFT_RESET__FME_SOFT_RESET_MASK)); + + r = vce_v1_0_firmware_loaded(adev); + + /* Clear VCE_STATUS, otherwise SRBM thinks VCE1 is busy. */ + WREG32(mmVCE_STATUS, 0); + + if (r) { + dev_err(adev->dev, "VCE not responding, giving up\n"); + return r; + } + + return 0; +} + +static int vce_v1_0_stop(struct amdgpu_device *adev) +{ + struct amdgpu_ip_block *ip_block; + int status; + int i; + + ip_block = amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_VCE); + if (!ip_block) + return -EINVAL; + + if (vce_v1_0_lmi_clean(adev)) + dev_warn(adev->dev, "VCE not idle\n"); + + if (vce_v1_0_wait_for_idle(ip_block)) + dev_warn(adev->dev, "VCE busy: VCE_STATUS=0x%x, SRBM_STATUS2=0x%x\n", + RREG32(mmVCE_STATUS), RREG32(mmSRBM_STATUS2)); + + /* Stall UMC and register bus before resetting VCPU */ + WREG32_P(mmVCE_LMI_CTRL2, 1 << 8, ~(1 << 8)); + + for (i = 0; i < 100; ++i) { + status = RREG32(mmVCE_LMI_STATUS); + if (status & 0x240) + break; + mdelay(1); + } + + WREG32_P(mmVCE_VCPU_CNTL, 0, ~VCE_VCPU_CNTL__CLK_EN_MASK); + + WREG32_P(mmVCE_SOFT_RESET, + VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK | + VCE_SOFT_RESET__FME_SOFT_RESET_MASK, + ~(VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK | + VCE_SOFT_RESET__FME_SOFT_RESET_MASK)); + + WREG32(mmVCE_STATUS, 0); + + return 0; +} + +static void vce_v1_0_enable_mgcg(struct amdgpu_device *adev, bool enable) +{ + u32 tmp; + + if (enable && (adev->cg_flags & AMD_CG_SUPPORT_VCE_MGCG)) { + tmp = RREG32(mmVCE_CLOCK_GATING_A); + tmp |= VCE_CLOCK_GATING_A__CGC_DYN_CLOCK_MODE_MASK; + WREG32(mmVCE_CLOCK_GATING_A, tmp); + + tmp = RREG32(mmVCE_UENC_CLOCK_GATING); + tmp &= ~0x1ff000; + tmp |= 0xff800000; + WREG32(mmVCE_UENC_CLOCK_GATING, tmp); + + tmp = RREG32(mmVCE_UENC_REG_CLOCK_GATING); + tmp &= ~0x3ff; + WREG32(mmVCE_UENC_REG_CLOCK_GATING, tmp); + } else { + tmp = RREG32(mmVCE_CLOCK_GATING_A); + tmp &= ~VCE_CLOCK_GATING_A__CGC_DYN_CLOCK_MODE_MASK; + WREG32(mmVCE_CLOCK_GATING_A, tmp); + + tmp = RREG32(mmVCE_UENC_CLOCK_GATING); + tmp |= 0x1ff000; + tmp &= ~0xff800000; + WREG32(mmVCE_UENC_CLOCK_GATING, tmp); + + tmp = RREG32(mmVCE_UENC_REG_CLOCK_GATING); + tmp |= 0x3ff; + WREG32(mmVCE_UENC_REG_CLOCK_GATING, tmp); + } +} + +static int vce_v1_0_early_init(struct amdgpu_ip_block *ip_block) +{ + struct amdgpu_device *adev = ip_block->adev; + int r; + + r = amdgpu_vce_early_init(adev); + if (r) + return r; + + adev->vce.num_rings = 2; + + vce_v1_0_set_ring_funcs(adev); + vce_v1_0_set_irq_funcs(adev); + + return 0; +} + +/** + * vce_v1_0_ensure_vcpu_bo_32bit_addr() - ensure the VCPU BO has a 32-bit address + * + * @adev: amdgpu_device pointer + * + * Due to various hardware limitations, the VCE1 requires + * the VCPU BO to be in the low 32 bit address range. + * Ensure that the VCPU BO has a 32-bit GPU address, + * or return an error code when that isn't possible. + * + * To accomodate that, we put GART to the LOW address range + * and reserve some GART pages where we map the VCPU BO, + * so that it gets a 32-bit address. + */ +static int vce_v1_0_ensure_vcpu_bo_32bit_addr(struct amdgpu_device *adev) +{ + u64 gpu_addr = amdgpu_bo_gpu_offset(adev->vce.vcpu_bo); + u64 bo_size = amdgpu_bo_size(adev->vce.vcpu_bo); + u64 max_vcpu_bo_addr = 0xffffffff - bo_size; + u64 num_pages = ALIGN(bo_size, AMDGPU_GPU_PAGE_SIZE) / AMDGPU_GPU_PAGE_SIZE; + u64 pa = amdgpu_gmc_vram_pa(adev, adev->vce.vcpu_bo); + u64 flags = AMDGPU_PTE_READABLE | AMDGPU_PTE_WRITEABLE | AMDGPU_PTE_VALID; + + /* + * Check if the VCPU BO already has a 32-bit address. + * Eg. if MC is configured to put VRAM in the low address range. + */ + if (gpu_addr <= max_vcpu_bo_addr) + return 0; + + /* Check if we can map the VCPU BO in GART to a 32-bit address. */ + if (adev->gmc.gart_start + VCE_V1_0_GART_ADDR_START > max_vcpu_bo_addr) + return -EINVAL; + + amdgpu_gart_map_vram_range(adev, pa, VCE_V1_0_GART_PAGE_START, + num_pages, flags, adev->gart.ptr); + adev->vce.gpu_addr = adev->gmc.gart_start + VCE_V1_0_GART_ADDR_START; + if (adev->vce.gpu_addr > max_vcpu_bo_addr) + return -EINVAL; + + return 0; +} + +static int vce_v1_0_sw_init(struct amdgpu_ip_block *ip_block) +{ + struct amdgpu_device *adev = ip_block->adev; + struct amdgpu_ring *ring; + int r, i; + + r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 167, &adev->vce.irq); + if (r) + return r; + + r = amdgpu_vce_sw_init(adev, VCE_V1_0_FW_SIZE + + VCE_V1_0_STACK_SIZE + VCE_V1_0_DATA_SIZE); + if (r) + return r; + + r = amdgpu_vce_resume(adev); + if (r) + return r; + r = vce_v1_0_load_fw_signature(adev); + if (r) + return r; + r = vce_v1_0_ensure_vcpu_bo_32bit_addr(adev); + if (r) + return r; + + for (i = 0; i < adev->vce.num_rings; i++) { + enum amdgpu_ring_priority_level hw_prio = amdgpu_vce_get_ring_prio(i); + + ring = &adev->vce.ring[i]; + sprintf(ring->name, "vce%d", i); + r = amdgpu_ring_init(adev, ring, 512, &adev->vce.irq, 0, + hw_prio, NULL); + if (r) + return r; + } + + return r; +} + +static int vce_v1_0_sw_fini(struct amdgpu_ip_block *ip_block) +{ + struct amdgpu_device *adev = ip_block->adev; + int r; + + r = amdgpu_vce_suspend(adev); + if (r) + return r; + + return amdgpu_vce_sw_fini(adev); +} + +/** + * vce_v1_0_hw_init - start and test VCE block + * + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. + * + * Initialize the hardware, boot up the VCPU and do some testing + */ +static int vce_v1_0_hw_init(struct amdgpu_ip_block *ip_block) +{ + struct amdgpu_device *adev = ip_block->adev; + int i, r; + + if (adev->pm.dpm_enabled) + amdgpu_dpm_enable_vce(adev, true); + else + amdgpu_asic_set_vce_clocks(adev, 10000, 10000); + + for (i = 0; i < adev->vce.num_rings; i++) { + r = amdgpu_ring_test_helper(&adev->vce.ring[i]); + if (r) + return r; + } + + dev_info(adev->dev, "VCE initialized successfully.\n"); + + return 0; +} + +static int vce_v1_0_hw_fini(struct amdgpu_ip_block *ip_block) +{ + int r; + + r = vce_v1_0_stop(ip_block->adev); + if (r) + return r; + + cancel_delayed_work_sync(&ip_block->adev->vce.idle_work); + return 0; +} + +static int vce_v1_0_suspend(struct amdgpu_ip_block *ip_block) +{ + struct amdgpu_device *adev = ip_block->adev; + int r; + + /* + * Proper cleanups before halting the HW engine: + * - cancel the delayed idle work + * - enable powergating + * - enable clockgating + * - disable dpm + * + * TODO: to align with the VCN implementation, move the + * jobs for clockgating/powergating/dpm setting to + * ->set_powergating_state(). + */ + cancel_delayed_work_sync(&adev->vce.idle_work); + + if (adev->pm.dpm_enabled) { + amdgpu_dpm_enable_vce(adev, false); + } else { + amdgpu_asic_set_vce_clocks(adev, 0, 0); + amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCE, + AMD_PG_STATE_GATE); + amdgpu_device_ip_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_VCE, + AMD_CG_STATE_GATE); + } + + r = vce_v1_0_hw_fini(ip_block); + if (r) { + dev_err(adev->dev, "vce_v1_0_hw_fini() failed with error %i", r); + return r; + } + + return amdgpu_vce_suspend(adev); +} + +static int vce_v1_0_resume(struct amdgpu_ip_block *ip_block) +{ + struct amdgpu_device *adev = ip_block->adev; + int r; + + r = amdgpu_vce_resume(adev); + if (r) + return r; + r = vce_v1_0_load_fw_signature(adev); + if (r) + return r; + r = vce_v1_0_ensure_vcpu_bo_32bit_addr(adev); + if (r) + return r; + + return vce_v1_0_hw_init(ip_block); +} + +static int vce_v1_0_set_interrupt_state(struct amdgpu_device *adev, + struct amdgpu_irq_src *source, + unsigned int type, + enum amdgpu_interrupt_state state) +{ + uint32_t val = 0; + + if (state == AMDGPU_IRQ_STATE_ENABLE) + val |= VCE_SYS_INT_EN__VCE_SYS_INT_TRAP_INTERRUPT_EN_MASK; + + WREG32_P(mmVCE_SYS_INT_EN, val, + ~VCE_SYS_INT_EN__VCE_SYS_INT_TRAP_INTERRUPT_EN_MASK); + return 0; +} + +static int vce_v1_0_process_interrupt(struct amdgpu_device *adev, + struct amdgpu_irq_src *source, + struct amdgpu_iv_entry *entry) +{ + dev_dbg(adev->dev, "IH: VCE\n"); + switch (entry->src_data[0]) { + case 0: + case 1: + amdgpu_fence_process(&adev->vce.ring[entry->src_data[0]]); + break; + default: + dev_err(adev->dev, "Unhandled interrupt: %d %d\n", + entry->src_id, entry->src_data[0]); + break; + } + + return 0; +} + +static int vce_v1_0_set_clockgating_state(struct amdgpu_ip_block *ip_block, + enum amd_clockgating_state state) +{ + struct amdgpu_device *adev = ip_block->adev; + + vce_v1_0_init_cg(adev); + vce_v1_0_enable_mgcg(adev, state == AMD_CG_STATE_GATE); + + return 0; +} + +static int vce_v1_0_set_powergating_state(struct amdgpu_ip_block *ip_block, + enum amd_powergating_state state) +{ + struct amdgpu_device *adev = ip_block->adev; + + /* + * This doesn't actually powergate the VCE block. + * That's done in the dpm code via the SMC. This + * just re-inits the block as necessary. The actual + * gating still happens in the dpm code. We should + * revisit this when there is a cleaner line between + * the smc and the hw blocks + */ + if (state == AMD_PG_STATE_GATE) + return vce_v1_0_stop(adev); + else + return vce_v1_0_start(adev); +} + +static const struct amd_ip_funcs vce_v1_0_ip_funcs = { + .name = "vce_v1_0", + .early_init = vce_v1_0_early_init, + .sw_init = vce_v1_0_sw_init, + .sw_fini = vce_v1_0_sw_fini, + .hw_init = vce_v1_0_hw_init, + .hw_fini = vce_v1_0_hw_fini, + .suspend = vce_v1_0_suspend, + .resume = vce_v1_0_resume, + .is_idle = vce_v1_0_is_idle, + .wait_for_idle = vce_v1_0_wait_for_idle, + .set_clockgating_state = vce_v1_0_set_clockgating_state, + .set_powergating_state = vce_v1_0_set_powergating_state, +}; + +static const struct amdgpu_ring_funcs vce_v1_0_ring_funcs = { + .type = AMDGPU_RING_TYPE_VCE, + .align_mask = 0xf, + .nop = VCE_CMD_NO_OP, + .support_64bit_ptrs = false, + .no_user_fence = true, + .get_rptr = vce_v1_0_ring_get_rptr, + .get_wptr = vce_v1_0_ring_get_wptr, + .set_wptr = vce_v1_0_ring_set_wptr, + .parse_cs = amdgpu_vce_ring_parse_cs, + .emit_frame_size = 6, /* amdgpu_vce_ring_emit_fence x1 no user fence */ + .emit_ib_size = 4, /* amdgpu_vce_ring_emit_ib */ + .emit_ib = amdgpu_vce_ring_emit_ib, + .emit_fence = amdgpu_vce_ring_emit_fence, + .test_ring = amdgpu_vce_ring_test_ring, + .test_ib = amdgpu_vce_ring_test_ib, + .insert_nop = amdgpu_ring_insert_nop, + .pad_ib = amdgpu_ring_generic_pad_ib, + .begin_use = amdgpu_vce_ring_begin_use, + .end_use = amdgpu_vce_ring_end_use, +}; + +static void vce_v1_0_set_ring_funcs(struct amdgpu_device *adev) +{ + int i; + + for (i = 0; i < adev->vce.num_rings; i++) { + adev->vce.ring[i].funcs = &vce_v1_0_ring_funcs; + adev->vce.ring[i].me = i; + } +}; + +static const struct amdgpu_irq_src_funcs vce_v1_0_irq_funcs = { + .set = vce_v1_0_set_interrupt_state, + .process = vce_v1_0_process_interrupt, +}; + +static void vce_v1_0_set_irq_funcs(struct amdgpu_device *adev) +{ + adev->vce.irq.num_types = 1; + adev->vce.irq.funcs = &vce_v1_0_irq_funcs; +}; + +const struct amdgpu_ip_block_version vce_v1_0_ip_block = { + .type = AMD_IP_BLOCK_TYPE_VCE, + .major = 1, + .minor = 0, + .rev = 0, + .funcs = &vce_v1_0_ip_funcs, +}; diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v1_0.h b/drivers/gpu/drm/amd/amdgpu/vce_v1_0.h new file mode 100644 index 000000000000..206e7bec897f --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/vce_v1_0.h @@ -0,0 +1,32 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright 2025 Advanced Micro Devices, Inc. + * Copyright 2025 Valve Corporation + * Copyright 2025 Alexandre Demers + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef __VCE_V1_0_H__ +#define __VCE_V1_0_H__ + +extern const struct amdgpu_ip_block_version vce_v1_0_ip_block; + +#endif diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c index bee3e904a6bc..8ea8a6193492 100644 --- a/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c @@ -407,6 +407,11 @@ static void vce_v2_0_enable_mgcg(struct amdgpu_device *adev, bool enable, static int vce_v2_0_early_init(struct amdgpu_ip_block *ip_block) { struct amdgpu_device *adev = ip_block->adev; + int r; + + r = amdgpu_vce_early_init(adev); + if (r) + return r; adev->vce.num_rings = 2; diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c index 708123899c41..719e9643c43d 100644 --- a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c @@ -399,6 +399,7 @@ static unsigned vce_v3_0_get_harvest_config(struct amdgpu_device *adev) static int vce_v3_0_early_init(struct amdgpu_ip_block *ip_block) { struct amdgpu_device *adev = ip_block->adev; + int r; adev->vce.harvest_config = vce_v3_0_get_harvest_config(adev); @@ -407,6 +408,10 @@ static int vce_v3_0_early_init(struct amdgpu_ip_block *ip_block) (AMDGPU_VCE_HARVEST_VCE0 | AMDGPU_VCE_HARVEST_VCE1)) return -ENOENT; + r = amdgpu_vce_early_init(adev); + if (r) + return r; + adev->vce.num_rings = 3; vce_v3_0_set_ring_funcs(adev); diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c index 335bda64ff5b..2d64002bed61 100644 --- a/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c @@ -410,6 +410,11 @@ static int vce_v4_0_stop(struct amdgpu_device *adev) static int vce_v4_0_early_init(struct amdgpu_ip_block *ip_block) { struct amdgpu_device *adev = ip_block->adev; + int r; + + r = amdgpu_vce_early_init(adev); + if (r) + return r; if (amdgpu_sriov_vf(adev)) /* currently only VCN0 support SRIOV */ adev->vce.num_rings = 1; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c index 0f0719528bcc..22925df6a791 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c @@ -2826,7 +2826,7 @@ static int runtime_enable(struct kfd_process *p, uint64_t r_debug, static int runtime_disable(struct kfd_process *p) { - int i = 0, ret; + int i = 0, ret = 0; bool was_enabled = p->runtime_info.runtime_state == DEBUG_RUNTIME_STATE_ENABLED; p->runtime_info.runtime_state = DEBUG_RUNTIME_STATE_DISABLED; @@ -2863,6 +2863,7 @@ static int runtime_disable(struct kfd_process *p) /* disable ttmp setup */ for (i = 0; i < p->n_pdds; i++) { struct kfd_process_device *pdd = p->pdds[i]; + int last_err = 0; if (kfd_dbg_is_per_vmid_supported(pdd->dev)) { pdd->spi_dbg_override = @@ -2872,14 +2873,17 @@ static int runtime_disable(struct kfd_process *p) pdd->dev->vm_info.last_vmid_kfd); if (!pdd->dev->kfd->shared_resources.enable_mes) - debug_refresh_runlist(pdd->dev->dqm); + last_err = debug_refresh_runlist(pdd->dev->dqm); else - kfd_dbg_set_mes_debug_mode(pdd, + last_err = kfd_dbg_set_mes_debug_mode(pdd, !kfd_dbg_has_cwsr_workaround(pdd->dev)); + + if (last_err) + ret = last_err; } } - return 0; + return ret; } static int kfd_ioctl_runtime_enable(struct file *filep, struct kfd_process *p, void *data) diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c index 6e7bc983fc0b..d7a2e7178ea9 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c @@ -1897,6 +1897,8 @@ static int start_cpsch(struct device_queue_manager *dqm) static int stop_cpsch(struct device_queue_manager *dqm) { + int ret = 0; + dqm_lock(dqm); if (!dqm->sched_running) { dqm_unlock(dqm); @@ -1904,9 +1906,10 @@ static int stop_cpsch(struct device_queue_manager *dqm) } if (!dqm->dev->kfd->shared_resources.enable_mes) - unmap_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES, 0, USE_DEFAULT_GRACE_PERIOD, false); + ret = unmap_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES, + 0, USE_DEFAULT_GRACE_PERIOD, false); else - remove_all_kfd_queues_mes(dqm); + ret = remove_all_kfd_queues_mes(dqm); dqm->sched_running = false; @@ -1920,7 +1923,7 @@ static int stop_cpsch(struct device_queue_manager *dqm) dqm->detect_hang_info = NULL; dqm_unlock(dqm); - return 0; + return ret; } static int create_kernel_queue_cpsch(struct device_queue_manager *dqm, @@ -2091,7 +2094,8 @@ int amdkfd_fence_wait_timeout(struct device_queue_manager *dqm, while (*fence_addr != fence_value) { /* Fatal err detected, this response won't come */ - if (amdgpu_amdkfd_is_fed(dqm->dev->adev)) + if (amdgpu_amdkfd_is_fed(dqm->dev->adev) || + amdgpu_in_reset(dqm->dev->adev)) return -EIO; if (time_after(jiffies, end_jiffies)) { diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_events.c b/drivers/gpu/drm/amd/amdkfd/kfd_events.c index 82905f3e54dd..5a190dd6be4e 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_events.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_events.c @@ -748,16 +748,6 @@ void kfd_signal_event_interrupt(u32 pasid, uint32_t partial_id, uint64_t *slots = page_slots(p->signal_page); uint32_t id; - /* - * If id is valid but slot is not signaled, GPU may signal the same event twice - * before driver have chance to process the first interrupt, then signal slot is - * auto-reset after set_event wakeup the user space, just drop the second event as - * the application only need wakeup once. - */ - if ((valid_id_bits > 31 || (1U << valid_id_bits) >= KFD_SIGNAL_EVENT_LIMIT) && - partial_id < KFD_SIGNAL_EVENT_LIMIT && slots[partial_id] == UNSIGNALED_EVENT_SLOT) - goto out_unlock; - if (valid_id_bits) pr_debug_ratelimited("Partial ID invalid: %u (%u valid bits)\n", partial_id, valid_id_bits); @@ -786,7 +776,6 @@ void kfd_signal_event_interrupt(u32 pasid, uint32_t partial_id, } } -out_unlock: rcu_read_unlock(); kfd_unref_process(p); } diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c index 4ceb251312a6..d76fb61869c7 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c @@ -28,6 +28,7 @@ #include "kfd_device_queue_manager.h" #include "kfd_smi_events.h" #include "amdgpu_ras.h" +#include "amdgpu_ras_mgr.h" /* * GFX9 SQ Interrupts @@ -228,7 +229,11 @@ static void event_interrupt_poison_consumption_v9(struct kfd_node *dev, kfd_signal_poison_consumed_event(dev, pasid); - event_id = amdgpu_ras_acquire_event_id(dev->adev, type); + if (amdgpu_uniras_enabled(dev->adev)) + event_id = amdgpu_ras_mgr_gen_ras_event_seqno(dev->adev, + RAS_SEQNO_TYPE_POISON_CONSUMPTION); + else + event_id = amdgpu_ras_acquire_event_id(dev->adev, type); RAS_EVENT_LOG(dev->adev, event_id, "poison is consumed by client %d, kick off gpu reset flow\n", client_id); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c index 59a5a3fea65d..46c84fc60af1 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c @@ -21,7 +21,6 @@ * OTHER DEALINGS IN THE SOFTWARE. */ #include -#include #include #include #include diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.h b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.h index 2eebf67f9c2c..2b7fd442d29c 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.h @@ -31,7 +31,6 @@ #include #include #include -#include #include "kfd_priv.h" #include "kfd_svm.h" diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c index ddfe30c13e9d..a085faac9fe1 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c @@ -1083,7 +1083,6 @@ static void kfd_process_destroy_pdds(struct kfd_process *p) * for auto suspend */ if (pdd->runtime_inuse) { - pm_runtime_mark_last_busy(adev_to_drm(pdd->dev->adev)->dev); pm_runtime_put_autosuspend(adev_to_drm(pdd->dev->adev)->dev); pdd->runtime_inuse = false; } @@ -1162,9 +1161,6 @@ static void kfd_process_wq_release(struct work_struct *work) release_work); struct dma_fence *ef; - kfd_process_dequeue_from_all_devices(p); - pqm_uninit(&p->pqm); - /* * If GPU in reset, user queues may still running, wait for reset complete. */ @@ -1226,6 +1222,14 @@ static void kfd_process_notifier_release_internal(struct kfd_process *p) cancel_delayed_work_sync(&p->eviction_work); cancel_delayed_work_sync(&p->restore_work); + /* + * Dequeue and destroy user queues, it is not safe for GPU to access + * system memory after mmu release notifier callback returns because + * exit_mmap free process memory afterwards. + */ + kfd_process_dequeue_from_all_devices(p); + pqm_uninit(&p->pqm); + for (i = 0; i < p->n_pdds; i++) { struct kfd_process_device *pdd = p->pdds[i]; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c index 74a1d3e1d52b..97c2270f278f 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c @@ -1698,7 +1698,7 @@ static int svm_range_validate_and_map(struct mm_struct *mm, start = map_start << PAGE_SHIFT; end = (map_last + 1) << PAGE_SHIFT; for (addr = start; !r && addr < end; ) { - struct hmm_range *hmm_range = NULL; + struct amdgpu_hmm_range *range = NULL; unsigned long map_start_vma; unsigned long map_last_vma; struct vm_area_struct *vma; @@ -1737,9 +1737,12 @@ static int svm_range_validate_and_map(struct mm_struct *mm, } WRITE_ONCE(p->svms.faulting_task, current); - r = amdgpu_hmm_range_get_pages(&prange->notifier, addr, npages, - readonly, owner, - &hmm_range); + range = amdgpu_hmm_range_alloc(NULL); + if (likely(range)) + r = amdgpu_hmm_range_get_pages(&prange->notifier, addr, npages, + readonly, owner, range); + else + r = -ENOMEM; WRITE_ONCE(p->svms.faulting_task, NULL); if (r) pr_debug("failed %d to get svm range pages\n", r); @@ -1750,7 +1753,7 @@ static int svm_range_validate_and_map(struct mm_struct *mm, if (!r) { offset = (addr >> PAGE_SHIFT) - prange->start; r = svm_range_dma_map(prange, ctx->bitmap, offset, npages, - hmm_range->hmm_pfns); + range->hmm_range.hmm_pfns); if (r) pr_debug("failed %d to dma map range\n", r); } @@ -1758,14 +1761,17 @@ static int svm_range_validate_and_map(struct mm_struct *mm, svm_range_lock(prange); /* Free backing memory of hmm_range if it was initialized - * Overrride return value to TRY AGAIN only if prior returns + * Override return value to TRY AGAIN only if prior returns * were successful */ - if (hmm_range && amdgpu_hmm_range_get_pages_done(hmm_range) && !r) { + if (range && !amdgpu_hmm_range_valid(range) && !r) { pr_debug("hmm update the range, need validate again\n"); r = -EAGAIN; } + /* Free the hmm range */ + amdgpu_hmm_range_free(range); + if (!r && !list_empty(&prange->child_list)) { pr_debug("range split by unmap in parallel, validate again\n"); r = -EAGAIN; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.h b/drivers/gpu/drm/amd/amdkfd/kfd_svm.h index 01c7a4877904..a63dfc95b602 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.h @@ -31,7 +31,6 @@ #include #include #include -#include #include "amdgpu.h" #include "kfd_priv.h" diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c index 5c98746eb72d..811636af14ea 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c @@ -530,7 +530,9 @@ static ssize_t node_show(struct kobject *kobj, struct attribute *attr, sysfs_show_32bit_prop(buffer, offs, "sdma_fw_version", dev->gpu->kfd->sdma_fw_version); sysfs_show_64bit_prop(buffer, offs, "unique_id", - dev->gpu->xcp ? + dev->gpu->xcp && + (dev->gpu->xcp->xcp_mgr->mode != + AMDGPU_SPX_PARTITION_MODE) ? dev->gpu->xcp->unique_id : dev->gpu->adev->unique_id); sysfs_show_32bit_prop(buffer, offs, "num_xcc", diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/Makefile b/drivers/gpu/drm/amd/display/amdgpu_dm/Makefile index 7329b8cc2576..8e949fe77312 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/Makefile +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/Makefile @@ -39,7 +39,8 @@ AMDGPUDM = \ amdgpu_dm_psr.o \ amdgpu_dm_replay.o \ amdgpu_dm_quirks.o \ - amdgpu_dm_wb.o + amdgpu_dm_wb.o \ + amdgpu_dm_colorop.o ifdef CONFIG_DRM_AMD_DC_FP AMDGPUDM += dc_fpu.o diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index 7fe40bbba265..740711ac1037 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -3392,6 +3392,67 @@ static void apply_delay_after_dpcd_poweroff(struct amdgpu_device *adev, } } +/** + * amdgpu_dm_dump_links_and_sinks - Debug dump of all DC links and their sinks + * @adev: amdgpu device pointer + * + * Iterates through all DC links and dumps information about local and remote + * (MST) sinks. Should be called after connector detection is complete to see + * the final state of all links. + */ +static void amdgpu_dm_dump_links_and_sinks(struct amdgpu_device *adev) +{ + struct dc *dc = adev->dm.dc; + struct drm_device *dev = adev_to_drm(adev); + int li; + + if (!dc) + return; + + for (li = 0; li < dc->link_count; li++) { + struct dc_link *l = dc->links[li]; + const char *name = NULL; + int rs; + + if (!l) + continue; + if (l->local_sink && l->local_sink->edid_caps.display_name[0]) + name = l->local_sink->edid_caps.display_name; + else + name = "n/a"; + + drm_dbg_kms(dev, + "LINK_DUMP[%d]: local_sink=%p type=%d sink_signal=%d sink_count=%u edid_name=%s mst_capable=%d mst_alloc_streams=%d\n", + li, + l->local_sink, + l->type, + l->local_sink ? l->local_sink->sink_signal : SIGNAL_TYPE_NONE, + l->sink_count, + name, + l->dpcd_caps.is_mst_capable, + l->mst_stream_alloc_table.stream_count); + + /* Dump remote (MST) sinks if any */ + for (rs = 0; rs < l->sink_count; rs++) { + struct dc_sink *rsink = l->remote_sinks[rs]; + const char *rname = NULL; + + if (!rsink) + continue; + if (rsink->edid_caps.display_name[0]) + rname = rsink->edid_caps.display_name; + else + rname = "n/a"; + drm_dbg_kms(dev, + " REMOTE_SINK[%d:%d]: sink=%p signal=%d edid_name=%s\n", + li, rs, + rsink, + rsink->sink_signal, + rname); + } + } +} + static int dm_resume(struct amdgpu_ip_block *ip_block) { struct amdgpu_device *adev = ip_block->adev; @@ -3584,6 +3645,12 @@ static int dm_resume(struct amdgpu_ip_block *ip_block) } drm_connector_list_iter_end(&iter); + /* Debug dump: list all DC links and their associated sinks after detection + * is complete for all connectors. This provides a comprehensive view of the + * final state without repeating the dump for each connector. + */ + amdgpu_dm_dump_links_and_sinks(adev); + amdgpu_dm_irq_resume_late(adev); amdgpu_dm_smu_write_watermarks_table(adev); @@ -3794,7 +3861,9 @@ void amdgpu_dm_update_connector_after_detect( drm_dbg_kms(dev, "DCHPD: connector_id=%d: Old sink=%p New sink=%p\n", aconnector->connector_id, aconnector->dc_sink, sink); - guard(mutex)(&dev->mode_config.mutex); + /* When polling, DRM has already locked the mutex for us. */ + if (!drm_kms_helper_is_poll_worker()) + mutex_lock(&dev->mode_config.mutex); /* * 1. Update status of the drm connector @@ -3857,6 +3926,10 @@ void amdgpu_dm_update_connector_after_detect( } update_subconnector_property(aconnector); + + /* When polling, the mutex will be unlocked for us by DRM. */ + if (!drm_kms_helper_is_poll_worker()) + mutex_unlock(&dev->mode_config.mutex); } static bool are_sinks_equal(const struct dc_sink *sink1, const struct dc_sink *sink2) @@ -5283,6 +5356,7 @@ static int initialize_plane(struct amdgpu_display_manager *dm, static void setup_backlight_device(struct amdgpu_display_manager *dm, struct amdgpu_dm_connector *aconnector) { + struct amdgpu_dm_backlight_caps *caps; struct dc_link *link = aconnector->dc_link; int bl_idx = dm->num_of_edps; @@ -5302,6 +5376,13 @@ static void setup_backlight_device(struct amdgpu_display_manager *dm, dm->num_of_edps++; update_connector_ext_caps(aconnector); + caps = &dm->backlight_caps[aconnector->bl_idx]; + + /* Only offer ABM property when non-OLED and user didn't turn off by module parameter */ + if (!caps->ext_caps->bits.oled && amdgpu_dm_abm_level < 0) + drm_object_attach_property(&aconnector->base.base, + dm->adev->mode_info.abm_level_property, + ABM_SYSFS_CONTROL); } static void amdgpu_set_panel_orientation(struct drm_connector *connector); @@ -5557,6 +5638,12 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev) amdgpu_set_panel_orientation(&aconnector->base); } + /* Debug dump: list all DC links and their associated sinks after detection + * is complete for all connectors. This provides a comprehensive view of the + * final state without repeating the dump for each connector. + */ + amdgpu_dm_dump_links_and_sinks(adev); + /* Software is initialized. Now we can register interrupt handlers. */ switch (adev->asic_type) { #if defined(CONFIG_DRM_AMD_DC_SI) @@ -5943,6 +6030,10 @@ fill_plane_color_attributes(const struct drm_plane_state *plane_state, *color_space = COLOR_SPACE_SRGB; + /* Ignore properties when DRM_CLIENT_CAP_PLANE_COLOR_PIPELINE is set */ + if (plane_state->state && plane_state->state->plane_color_pipeline) + return 0; + /* DRM color properties only affect non-RGB formats. */ if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) return 0; @@ -7295,29 +7386,117 @@ create_stream_for_sink(struct drm_connector *connector, return stream; } +/** + * amdgpu_dm_connector_poll - Poll a connector to see if it's connected to a display + * @aconnector: DM connector to poll (owns @base drm_connector and @dc_link) + * @force: if true, force polling even when DAC load detection was used + * + * Used for connectors that don't support HPD (hotplug detection) to + * periodically check whether the connector is connected to a display. + * + * When connection was determined via DAC load detection, we avoid + * re-running it on normal polls to prevent visible glitches, unless + * @force is set. + * + * Return: The probed connector status (connected/disconnected/unknown). + */ +static enum drm_connector_status +amdgpu_dm_connector_poll(struct amdgpu_dm_connector *aconnector, bool force) +{ + struct drm_connector *connector = &aconnector->base; + struct drm_device *dev = connector->dev; + struct amdgpu_device *adev = drm_to_adev(dev); + struct dc_link *link = aconnector->dc_link; + enum dc_connection_type conn_type = dc_connection_none; + enum drm_connector_status status = connector_status_disconnected; + + /* When we determined the connection using DAC load detection, + * do NOT poll the connector do detect disconnect because + * that would run DAC load detection again which can cause + * visible visual glitches. + * + * Only allow to poll such a connector again when forcing. + */ + if (!force && link->local_sink && link->type == dc_connection_dac_load) + return connector->status; + + mutex_lock(&aconnector->hpd_lock); + + if (dc_link_detect_connection_type(aconnector->dc_link, &conn_type) && + conn_type != dc_connection_none) { + mutex_lock(&adev->dm.dc_lock); + + /* Only call full link detection when a sink isn't created yet, + * ie. just when the display is plugged in, otherwise we risk flickering. + */ + if (link->local_sink || + dc_link_detect(link, DETECT_REASON_HPD)) + status = connector_status_connected; + + mutex_unlock(&adev->dm.dc_lock); + } + + if (connector->status != status) { + if (status == connector_status_disconnected) { + if (link->local_sink) + dc_sink_release(link->local_sink); + + link->local_sink = NULL; + link->dpcd_sink_count = 0; + link->type = dc_connection_none; + } + + amdgpu_dm_update_connector_after_detect(aconnector); + } + + mutex_unlock(&aconnector->hpd_lock); + return status; +} + +/** + * amdgpu_dm_connector_detect() - Detect whether a DRM connector is connected to a display + * + * A connector is considered connected when it has a sink that is not NULL. + * For connectors that support HPD (hotplug detection), the connection is + * handled in the HPD interrupt. + * For connectors that may not support HPD, such as analog connectors, + * DRM will call this function repeatedly to poll them. + * + * Notes: + * 1. This interface is NOT called in context of HPD irq. + * 2. This interface *is called* in context of user-mode ioctl. Which + * makes it a bad place for *any* MST-related activity. + * + * @connector: The DRM connector we are checking. We convert it to + * amdgpu_dm_connector so we can read the DC link and state. + * @force: If true, do a full detect again. This is used even when + * a lighter check would normally be used to avoid flicker. + * + * Return: The connector status (connected, disconnected, or unknown). + * + */ static enum drm_connector_status amdgpu_dm_connector_detect(struct drm_connector *connector, bool force) { - bool connected; struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); - /* - * Notes: - * 1. This interface is NOT called in context of HPD irq. - * 2. This interface *is called* in context of user-mode ioctl. Which - * makes it a bad place for *any* MST-related activity. - */ - - if (aconnector->base.force == DRM_FORCE_UNSPECIFIED && - !aconnector->fake_enable) - connected = (aconnector->dc_sink != NULL); - else - connected = (aconnector->base.force == DRM_FORCE_ON || - aconnector->base.force == DRM_FORCE_ON_DIGITAL); - update_subconnector_property(aconnector); - return (connected ? connector_status_connected : + if (aconnector->base.force == DRM_FORCE_ON || + aconnector->base.force == DRM_FORCE_ON_DIGITAL) + return connector_status_connected; + else if (aconnector->base.force == DRM_FORCE_OFF) + return connector_status_disconnected; + + /* Poll analog connectors and only when either + * disconnected or connected to an analog display. + */ + if (drm_kms_helper_is_poll_worker() && + dc_connector_supports_analog(aconnector->dc_link->link_id.id) && + (!aconnector->dc_sink || aconnector->dc_sink->edid_caps.analog)) + return amdgpu_dm_connector_poll(aconnector, force); + + return (aconnector->dc_sink ? connector_status_connected : connector_status_disconnected); } @@ -7368,6 +7547,20 @@ int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector, } else if (property == adev->mode_info.underscan_property) { dm_new_state->underscan_enable = val; ret = 0; + } else if (property == adev->mode_info.abm_level_property) { + switch (val) { + case ABM_SYSFS_CONTROL: + dm_new_state->abm_sysfs_forbidden = false; + break; + case ABM_LEVEL_OFF: + dm_new_state->abm_sysfs_forbidden = true; + dm_new_state->abm_level = ABM_LEVEL_IMMEDIATE_DISABLE; + break; + default: + dm_new_state->abm_sysfs_forbidden = true; + dm_new_state->abm_level = val; + } + ret = 0; } return ret; @@ -7410,6 +7603,13 @@ int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector, } else if (property == adev->mode_info.underscan_property) { *val = dm_state->underscan_enable; ret = 0; + } else if (property == adev->mode_info.abm_level_property) { + if (!dm_state->abm_sysfs_forbidden) + *val = ABM_SYSFS_CONTROL; + else + *val = (dm_state->abm_level != ABM_LEVEL_IMMEDIATE_DISABLE) ? + dm_state->abm_level : 0; + ret = 0; } return ret; @@ -7462,10 +7662,16 @@ static ssize_t panel_power_savings_store(struct device *device, return -EINVAL; drm_modeset_lock(&dev->mode_config.connection_mutex, NULL); - to_dm_connector_state(connector->state)->abm_level = val ?: - ABM_LEVEL_IMMEDIATE_DISABLE; + if (to_dm_connector_state(connector->state)->abm_sysfs_forbidden) + ret = -EBUSY; + else + to_dm_connector_state(connector->state)->abm_level = val ?: + ABM_LEVEL_IMMEDIATE_DISABLE; drm_modeset_unlock(&dev->mode_config.connection_mutex); + if (ret) + return ret; + drm_kms_helper_hotplug_event(dev); return count; @@ -8312,7 +8518,7 @@ static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state, return 0; } -static int to_drm_connector_type(enum signal_type st) +static int to_drm_connector_type(enum signal_type st, uint32_t connector_id) { switch (st) { case SIGNAL_TYPE_HDMI_TYPE_A: @@ -8328,6 +8534,10 @@ static int to_drm_connector_type(enum signal_type st) return DRM_MODE_CONNECTOR_DisplayPort; case SIGNAL_TYPE_DVI_DUAL_LINK: case SIGNAL_TYPE_DVI_SINGLE_LINK: + if (connector_id == CONNECTOR_ID_SINGLE_LINK_DVII || + connector_id == CONNECTOR_ID_DUAL_LINK_DVII) + return DRM_MODE_CONNECTOR_DVII; + return DRM_MODE_CONNECTOR_DVID; case SIGNAL_TYPE_VIRTUAL: return DRM_MODE_CONNECTOR_VIRTUAL; @@ -8379,7 +8589,7 @@ static void amdgpu_dm_get_native_mode(struct drm_connector *connector) static struct drm_display_mode * amdgpu_dm_create_common_mode(struct drm_encoder *encoder, - char *name, + const char *name, int hdisplay, int vdisplay) { struct drm_device *dev = encoder->dev; @@ -8401,6 +8611,24 @@ amdgpu_dm_create_common_mode(struct drm_encoder *encoder, } +static const struct amdgpu_dm_mode_size { + char name[DRM_DISPLAY_MODE_LEN]; + int w; + int h; +} common_modes[] = { + { "640x480", 640, 480}, + { "800x600", 800, 600}, + { "1024x768", 1024, 768}, + { "1280x720", 1280, 720}, + { "1280x800", 1280, 800}, + {"1280x1024", 1280, 1024}, + { "1440x900", 1440, 900}, + {"1680x1050", 1680, 1050}, + {"1600x1200", 1600, 1200}, + {"1920x1080", 1920, 1080}, + {"1920x1200", 1920, 1200} +}; + static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder, struct drm_connector *connector) { @@ -8411,23 +8639,6 @@ static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder, to_amdgpu_dm_connector(connector); int i; int n; - struct mode_size { - char name[DRM_DISPLAY_MODE_LEN]; - int w; - int h; - } common_modes[] = { - { "640x480", 640, 480}, - { "800x600", 800, 600}, - { "1024x768", 1024, 768}, - { "1280x720", 1280, 720}, - { "1280x800", 1280, 800}, - {"1280x1024", 1280, 1024}, - { "1440x900", 1440, 900}, - {"1680x1050", 1680, 1050}, - {"1600x1200", 1600, 1200}, - {"1920x1080", 1920, 1080}, - {"1920x1200", 1920, 1200} - }; if ((connector->connector_type != DRM_MODE_CONNECTOR_eDP) && (connector->connector_type != DRM_MODE_CONNECTOR_LVDS)) @@ -8628,6 +8839,16 @@ static void amdgpu_dm_connector_add_freesync_modes(struct drm_connector *connect if (!(amdgpu_freesync_vid_mode && drm_edid)) return; + if (!amdgpu_dm_connector->dc_sink || !amdgpu_dm_connector->dc_link) + return; + + if (!dc_supports_vrr(amdgpu_dm_connector->dc_sink->ctx->dce_version)) + return; + + if (dc_connector_supports_analog(amdgpu_dm_connector->dc_link->link_id.id) && + amdgpu_dm_connector->dc_sink->edid_caps.analog) + return; + if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10) amdgpu_dm_connector->num_modes += add_fs_modes(amdgpu_dm_connector); @@ -8637,11 +8858,11 @@ static int amdgpu_dm_connector_get_modes(struct drm_connector *connector) { struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector); + struct dc_link *dc_link = amdgpu_dm_connector->dc_link; struct drm_encoder *encoder; const struct drm_edid *drm_edid = amdgpu_dm_connector->drm_edid; - struct dc_link_settings *verified_link_cap = - &amdgpu_dm_connector->dc_link->verified_link_cap; - const struct dc *dc = amdgpu_dm_connector->dc_link->dc; + struct dc_link_settings *verified_link_cap = &dc_link->verified_link_cap; + const struct dc *dc = dc_link->dc; encoder = amdgpu_dm_connector_to_encoder(connector); @@ -8651,6 +8872,17 @@ static int amdgpu_dm_connector_get_modes(struct drm_connector *connector) if (dc->link_srv->dp_get_encoding_format(verified_link_cap) == DP_128b_132b_ENCODING) amdgpu_dm_connector->num_modes += drm_add_modes_noedid(connector, 1920, 1080); + + if (amdgpu_dm_connector->dc_sink && + amdgpu_dm_connector->dc_sink->edid_caps.analog && + dc_connector_supports_analog(dc_link->link_id.id)) { + /* Analog monitor connected by DAC load detection. + * Add common modes. It will be up to the user to select one that works. + */ + for (int i = 0; i < ARRAY_SIZE(common_modes); i++) + amdgpu_dm_connector->num_modes += drm_add_modes_noedid( + connector, common_modes[i].w, common_modes[i].h); + } } else { amdgpu_dm_connector_ddc_get_modes(connector, drm_edid); if (encoder) @@ -8723,6 +8955,11 @@ void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm, case DRM_MODE_CONNECTOR_DVID: aconnector->base.polled = DRM_CONNECTOR_POLL_HPD; break; + case DRM_MODE_CONNECTOR_DVII: + case DRM_MODE_CONNECTOR_VGA: + aconnector->base.polled = + DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT; + break; default: break; } @@ -8924,7 +9161,7 @@ static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm, goto out_free; } - connector_type = to_drm_connector_type(link->connector_signal); + connector_type = to_drm_connector_type(link->connector_signal, link->link_id.id); res = drm_connector_init_with_ddc( dm->ddev, @@ -10680,7 +10917,7 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state) * Here we create an empty update on each plane. * To fix this, DC should permit updating only stream properties. */ - dummy_updates = kzalloc(sizeof(struct dc_surface_update) * MAX_SURFACES, GFP_ATOMIC); + dummy_updates = kzalloc(sizeof(struct dc_surface_update) * MAX_SURFACES, GFP_KERNEL); if (!dummy_updates) { drm_err(adev_to_drm(adev), "Failed to allocate memory for dummy_updates.\n"); continue; @@ -12607,7 +12844,7 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev, int j = state->num_private_objs-1; dm_atomic_destroy_state(obj, - state->private_objs[i].state); + state->private_objs[i].state_to_destroy); /* If i is not at the end of the array then the * last element needs to be moved to where i was @@ -12618,7 +12855,7 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev, state->private_objs[j]; state->private_objs[j].ptr = NULL; - state->private_objs[j].state = NULL; + state->private_objs[j].state_to_destroy = NULL; state->private_objs[j].old_state = NULL; state->private_objs[j].new_state = NULL; diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h index 8ca738957598..ef97cede9926 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h @@ -999,6 +999,7 @@ struct dm_connector_state { bool underscan_enable; bool freesync_capable; bool update_hdcp; + bool abm_sysfs_forbidden; uint8_t abm_level; int vcpi_slots; uint64_t pbn; diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c index a4ac6d442278..1dcc79b35225 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c @@ -26,12 +26,39 @@ #include "amdgpu.h" #include "amdgpu_mode.h" #include "amdgpu_dm.h" +#include "amdgpu_dm_colorop.h" #include "dc.h" #include "modules/color/color_gamma.h" /** * DOC: overview * + * We have three types of color management in the AMD display driver. + * 1. the legacy &drm_crtc DEGAMMA, CTM, and GAMMA properties + * 2. AMD driver private color management on &drm_plane and &drm_crtc + * 3. AMD plane color pipeline + * + * The CRTC properties are the original color management. When they were + * implemented per-plane color management was not a thing yet. Because + * of that we could get away with plumbing the DEGAMMA and CTM + * properties to pre-blending HW functions. This is incompatible with + * per-plane color management, such as via the AMD private properties or + * the new drm_plane color pipeline. The only compatible CRTC property + * with per-plane color management is the GAMMA property as it is + * applied post-blending. + * + * The AMD driver private color management properties are only exposed + * when the kernel is built explicitly with -DAMD_PRIVATE_COLOR. They + * are temporary building blocks on the path to full-fledged &drm_plane + * and &drm_crtc color pipelines and lay the driver's groundwork for the + * color pipelines. + * + * The AMD plane color pipeline describes AMD's &drm_colorops via the + * &drm_plane's COLOR_PIPELINE property. + * + * drm_crtc Properties + * ------------------- + * * The DC interface to HW gives us the following color management blocks * per pipe (surface): * @@ -42,36 +69,93 @@ * - Surface regamma LUT (normalized) * - Output CSC (normalized) * - * But these aren't a direct mapping to DRM color properties. The current DRM - * interface exposes CRTC degamma, CRTC CTM and CRTC regamma while our hardware - * is essentially giving: + * But these aren't a direct mapping to DRM color properties. The + * current DRM interface exposes CRTC degamma, CRTC CTM and CRTC regamma + * while our hardware is essentially giving: * * Plane CTM -> Plane degamma -> Plane CTM -> Plane regamma -> Plane CTM * - * The input gamma LUT block isn't really applicable here since it operates - * on the actual input data itself rather than the HW fp representation. The - * input and output CSC blocks are technically available to use as part of - * the DC interface but are typically used internally by DC for conversions - * between color spaces. These could be blended together with user - * adjustments in the future but for now these should remain untouched. + * The input gamma LUT block isn't really applicable here since it + * operates on the actual input data itself rather than the HW fp + * representation. The input and output CSC blocks are technically + * available to use as part of the DC interface but are typically used + * internally by DC for conversions between color spaces. These could be + * blended together with user adjustments in the future but for now + * these should remain untouched. * - * The pipe blending also happens after these blocks so we don't actually - * support any CRTC props with correct blending with multiple planes - but we - * can still support CRTC color management properties in DM in most single - * plane cases correctly with clever management of the DC interface in DM. + * The pipe blending also happens after these blocks so we don't + * actually support any CRTC props with correct blending with multiple + * planes - but we can still support CRTC color management properties in + * DM in most single plane cases correctly with clever management of the + * DC interface in DM. * - * As per DRM documentation, blocks should be in hardware bypass when their - * respective property is set to NULL. A linear DGM/RGM LUT should also - * considered as putting the respective block into bypass mode. + * As per DRM documentation, blocks should be in hardware bypass when + * their respective property is set to NULL. A linear DGM/RGM LUT should + * also considered as putting the respective block into bypass mode. * - * This means that the following - * configuration is assumed to be the default: + * This means that the following configuration is assumed to be the + * default: + * + * Plane DGM Bypass -> Plane CTM Bypass -> Plane RGM Bypass -> ... CRTC + * DGM Bypass -> CRTC CTM Bypass -> CRTC RGM Bypass + * + * AMD Private Color Management on drm_plane + * ----------------------------------------- + * + * The AMD private color management properties on a &drm_plane are: + * + * - AMD_PLANE_DEGAMMA_LUT + * - AMD_PLANE_DEGAMMA_LUT_SIZE + * - AMD_PLANE_DEGAMMA_TF + * - AMD_PLANE_HDR_MULT + * - AMD_PLANE_CTM + * - AMD_PLANE_SHAPER_LUT + * - AMD_PLANE_SHAPER_LUT_SIZE + * - AMD_PLANE_SHAPER_TF + * - AMD_PLANE_LUT3D + * - AMD_PLANE_LUT3D_SIZE + * - AMD_PLANE_BLEND_LUT + * - AMD_PLANE_BLEND_LUT_SIZE + * - AMD_PLANE_BLEND_TF + * + * The AMD private color management property on a &drm_crtc is: + * + * - AMD_CRTC_REGAMMA_TF + * + * Use of these properties is discouraged. + * + * AMD plane color pipeline + * ------------------------ + * + * The AMD &drm_plane color pipeline is advertised for DCN generations + * 3.0 and newer. It exposes these elements in this order: + * + * 1. 1D curve colorop + * 2. Multiplier + * 3. 3x4 CTM + * 4. 1D curve colorop + * 5. 1D LUT + * 6. 3D LUT + * 7. 1D curve colorop + * 8. 1D LUT + * + * The multiplier (#2) is a simple multiplier that is applied to all + * channels. + * + * The 3x4 CTM (#3) is a simple 3x4 matrix. + * + * #1, and #7 are non-linear to linear curves. #4 is a linear to + * non-linear curve. They support sRGB, PQ, and BT.709/BT.2020 EOTFs or + * their inverse. + * + * The 1D LUTs (#5 and #8) are plain 4096 entry LUTs. + * + * The 3DLUT (#6) is a tetrahedrally interpolated 17 cube LUT. * - * Plane DGM Bypass -> Plane CTM Bypass -> Plane RGM Bypass -> ... - * CRTC DGM Bypass -> CRTC CTM Bypass -> CRTC RGM Bypass */ #define MAX_DRM_LUT_VALUE 0xFFFF +#define MAX_DRM_LUT32_VALUE 0xFFFFFFFF #define SDR_WHITE_LEVEL_INIT_VALUE 80 /** @@ -341,6 +425,21 @@ __extract_blob_lut(const struct drm_property_blob *blob, uint32_t *size) return blob ? (struct drm_color_lut *)blob->data : NULL; } +/** + * __extract_blob_lut32 - Extracts the DRM lut and lut size from a blob. + * @blob: DRM color mgmt property blob + * @size: lut size + * + * Returns: + * DRM LUT or NULL + */ +static const struct drm_color_lut32 * +__extract_blob_lut32(const struct drm_property_blob *blob, uint32_t *size) +{ + *size = blob ? drm_color_lut32_size(blob) : 0; + return blob ? (struct drm_color_lut32 *)blob->data : NULL; +} + /** * __is_lut_linear - check if the given lut is a linear mapping of values * @lut: given lut to check values @@ -414,6 +513,24 @@ static void __drm_lut_to_dc_gamma(const struct drm_color_lut *lut, } } +/** + * __drm_lut32_to_dc_gamma - convert the drm_color_lut to dc_gamma. + * @lut: DRM lookup table for color conversion + * @gamma: DC gamma to set entries + * + * The conversion depends on the size of the lut - whether or not it's legacy. + */ +static void __drm_lut32_to_dc_gamma(const struct drm_color_lut32 *lut, struct dc_gamma *gamma) +{ + int i; + + for (i = 0; i < MAX_COLOR_LUT_ENTRIES; i++) { + gamma->entries.red[i] = dc_fixpt_from_fraction(lut[i].red, MAX_DRM_LUT32_VALUE); + gamma->entries.green[i] = dc_fixpt_from_fraction(lut[i].green, MAX_DRM_LUT32_VALUE); + gamma->entries.blue[i] = dc_fixpt_from_fraction(lut[i].blue, MAX_DRM_LUT32_VALUE); + } +} + /** * __drm_ctm_to_dc_matrix - converts a DRM CTM to a DC CSC float matrix * @ctm: DRM color transformation matrix @@ -566,6 +683,63 @@ static int __set_output_tf(struct dc_transfer_func *func, return res ? 0 : -ENOMEM; } +/** + * __set_output_tf_32 - calculates the output transfer function based on expected input space. + * @func: transfer function + * @lut: lookup table that defines the color space + * @lut_size: size of respective lut + * @has_rom: if ROM can be used for hardcoded curve + * + * Returns: + * 0 in case of success. -ENOMEM if fails. + */ +static int __set_output_tf_32(struct dc_transfer_func *func, + const struct drm_color_lut32 *lut, uint32_t lut_size, + bool has_rom) +{ + struct dc_gamma *gamma = NULL; + struct calculate_buffer cal_buffer = {0}; + bool res; + + cal_buffer.buffer_index = -1; + + if (lut_size) { + gamma = dc_create_gamma(); + if (!gamma) + return -ENOMEM; + + gamma->num_entries = lut_size; + __drm_lut32_to_dc_gamma(lut, gamma); + } + + if (func->tf == TRANSFER_FUNCTION_LINEAR) { + /* + * Color module doesn't like calculating regamma params + * on top of a linear input. But degamma params can be used + * instead to simulate this. + */ + if (gamma) + gamma->type = GAMMA_CUSTOM; + res = mod_color_calculate_degamma_params(NULL, func, + gamma, gamma != NULL); + } else { + /* + * Assume sRGB. The actual mapping will depend on whether the + * input was legacy or not. + */ + if (gamma) + gamma->type = GAMMA_CS_TFM_1D; + res = mod_color_calculate_regamma_params(func, gamma, gamma != NULL, + has_rom, NULL, &cal_buffer); + } + + if (gamma) + dc_gamma_release(&gamma); + + return res ? 0 : -ENOMEM; +} + + static int amdgpu_dm_set_atomic_regamma(struct dc_transfer_func *out_tf, const struct drm_color_lut *regamma_lut, uint32_t regamma_size, bool has_rom, @@ -638,6 +812,42 @@ static int __set_input_tf(struct dc_color_caps *caps, struct dc_transfer_func *f return res ? 0 : -ENOMEM; } +/** + * __set_input_tf_32 - calculates the input transfer function based on expected + * input space. + * @caps: dc color capabilities + * @func: transfer function + * @lut: lookup table that defines the color space + * @lut_size: size of respective lut. + * + * Returns: + * 0 in case of success. -ENOMEM if fails. + */ +static int __set_input_tf_32(struct dc_color_caps *caps, struct dc_transfer_func *func, + const struct drm_color_lut32 *lut, uint32_t lut_size) +{ + struct dc_gamma *gamma = NULL; + bool res; + + if (lut_size) { + gamma = dc_create_gamma(); + if (!gamma) + return -ENOMEM; + + gamma->type = GAMMA_CUSTOM; + gamma->num_entries = lut_size; + + __drm_lut32_to_dc_gamma(lut, gamma); + } + + res = mod_color_calculate_degamma_params(caps, func, gamma, gamma != NULL); + + if (gamma) + dc_gamma_release(&gamma); + + return res ? 0 : -ENOMEM; +} + static enum dc_transfer_func_predefined amdgpu_tf_to_dc_tf(enum amdgpu_transfer_function tf) { @@ -667,6 +877,27 @@ amdgpu_tf_to_dc_tf(enum amdgpu_transfer_function tf) } } +static enum dc_transfer_func_predefined +amdgpu_colorop_tf_to_dc_tf(enum drm_colorop_curve_1d_type tf) +{ + switch (tf) { + case DRM_COLOROP_1D_CURVE_SRGB_EOTF: + case DRM_COLOROP_1D_CURVE_SRGB_INV_EOTF: + return TRANSFER_FUNCTION_SRGB; + case DRM_COLOROP_1D_CURVE_PQ_125_EOTF: + case DRM_COLOROP_1D_CURVE_PQ_125_INV_EOTF: + return TRANSFER_FUNCTION_PQ; + case DRM_COLOROP_1D_CURVE_BT2020_INV_OETF: + case DRM_COLOROP_1D_CURVE_BT2020_OETF: + return TRANSFER_FUNCTION_BT709; + case DRM_COLOROP_1D_CURVE_GAMMA22: + case DRM_COLOROP_1D_CURVE_GAMMA22_INV: + return TRANSFER_FUNCTION_GAMMA22; + default: + return TRANSFER_FUNCTION_LINEAR; + } +} + static void __to_dc_lut3d_color(struct dc_rgb *rgb, const struct drm_color_lut lut, int bit_precision) @@ -720,6 +951,59 @@ static void __drm_3dlut_to_dc_3dlut(const struct drm_color_lut *lut, __to_dc_lut3d_color(&lut0[lut_i], lut[i], bit_depth); } +static void __to_dc_lut3d_32_color(struct dc_rgb *rgb, + const struct drm_color_lut32 lut, + int bit_precision) +{ + rgb->red = drm_color_lut32_extract(lut.red, bit_precision); + rgb->green = drm_color_lut32_extract(lut.green, bit_precision); + rgb->blue = drm_color_lut32_extract(lut.blue, bit_precision); +} + +static void __drm_3dlut32_to_dc_3dlut(const struct drm_color_lut32 *lut, + uint32_t lut3d_size, + struct tetrahedral_params *params, + bool use_tetrahedral_9, + int bit_depth) +{ + struct dc_rgb *lut0; + struct dc_rgb *lut1; + struct dc_rgb *lut2; + struct dc_rgb *lut3; + int lut_i, i; + + + if (use_tetrahedral_9) { + lut0 = params->tetrahedral_9.lut0; + lut1 = params->tetrahedral_9.lut1; + lut2 = params->tetrahedral_9.lut2; + lut3 = params->tetrahedral_9.lut3; + } else { + lut0 = params->tetrahedral_17.lut0; + lut1 = params->tetrahedral_17.lut1; + lut2 = params->tetrahedral_17.lut2; + lut3 = params->tetrahedral_17.lut3; + } + + for (lut_i = 0, i = 0; i < lut3d_size - 4; lut_i++, i += 4) { + /* + * We should consider the 3D LUT RGB values are distributed + * along four arrays lut0-3 where the first sizes 1229 and the + * other 1228. The bit depth supported for 3dlut channel is + * 12-bit, but DC also supports 10-bit. + * + * TODO: improve color pipeline API to enable the userspace set + * bit depth and 3D LUT size/stride, as specified by VA-API. + */ + __to_dc_lut3d_32_color(&lut0[lut_i], lut[i], bit_depth); + __to_dc_lut3d_32_color(&lut1[lut_i], lut[i + 1], bit_depth); + __to_dc_lut3d_32_color(&lut2[lut_i], lut[i + 2], bit_depth); + __to_dc_lut3d_32_color(&lut3[lut_i], lut[i + 3], bit_depth); + } + /* lut0 has 1229 points (lut_size/4 + 1) */ + __to_dc_lut3d_32_color(&lut0[lut_i], lut[i], bit_depth); +} + /* amdgpu_dm_atomic_lut3d - set DRM 3D LUT to DC stream * @drm_lut3d: user 3D LUT * @drm_lut3d_size: size of 3D LUT @@ -1177,6 +1461,360 @@ __set_dm_plane_degamma(struct drm_plane_state *plane_state, return 0; } +static int +__set_colorop_in_tf_1d_curve(struct dc_plane_state *dc_plane_state, + struct drm_colorop_state *colorop_state) +{ + struct dc_transfer_func *tf = &dc_plane_state->in_transfer_func; + struct drm_colorop *colorop = colorop_state->colorop; + struct drm_device *drm = colorop->dev; + + if (colorop->type != DRM_COLOROP_1D_CURVE) + return -EINVAL; + + if (!(BIT(colorop_state->curve_1d_type) & amdgpu_dm_supported_degam_tfs)) + return -EINVAL; + + if (colorop_state->bypass) { + tf->type = TF_TYPE_BYPASS; + tf->tf = TRANSFER_FUNCTION_LINEAR; + return 0; + } + + drm_dbg(drm, "Degamma colorop with ID: %d\n", colorop->base.id); + + tf->type = TF_TYPE_PREDEFINED; + tf->tf = amdgpu_colorop_tf_to_dc_tf(colorop_state->curve_1d_type); + + return 0; +} + +static int +__set_dm_plane_colorop_degamma(struct drm_plane_state *plane_state, + struct dc_plane_state *dc_plane_state, + struct drm_colorop *colorop) +{ + struct drm_colorop *old_colorop; + struct drm_colorop_state *colorop_state = NULL, *new_colorop_state; + struct drm_atomic_state *state = plane_state->state; + int i = 0; + + old_colorop = colorop; + + /* 1st op: 1d curve - degamma */ + for_each_new_colorop_in_state(state, colorop, new_colorop_state, i) { + if (new_colorop_state->colorop == old_colorop && + (BIT(new_colorop_state->curve_1d_type) & amdgpu_dm_supported_degam_tfs)) { + colorop_state = new_colorop_state; + break; + } + } + + if (!colorop_state) + return -EINVAL; + + return __set_colorop_in_tf_1d_curve(dc_plane_state, colorop_state); +} + +static int +__set_dm_plane_colorop_3x4_matrix(struct drm_plane_state *plane_state, + struct dc_plane_state *dc_plane_state, + struct drm_colorop *colorop) +{ + struct drm_colorop *old_colorop; + struct drm_colorop_state *colorop_state = NULL, *new_colorop_state; + struct drm_atomic_state *state = plane_state->state; + const struct drm_device *dev = colorop->dev; + const struct drm_property_blob *blob; + struct drm_color_ctm_3x4 *ctm = NULL; + int i = 0; + + /* 3x4 matrix */ + old_colorop = colorop; + for_each_new_colorop_in_state(state, colorop, new_colorop_state, i) { + if (new_colorop_state->colorop == old_colorop && + new_colorop_state->colorop->type == DRM_COLOROP_CTM_3X4) { + colorop_state = new_colorop_state; + break; + } + } + + if (colorop_state && !colorop_state->bypass && colorop->type == DRM_COLOROP_CTM_3X4) { + drm_dbg(dev, "3x4 matrix colorop with ID: %d\n", colorop->base.id); + blob = colorop_state->data; + if (blob->length == sizeof(struct drm_color_ctm_3x4)) { + ctm = (struct drm_color_ctm_3x4 *) blob->data; + __drm_ctm_3x4_to_dc_matrix(ctm, dc_plane_state->gamut_remap_matrix.matrix); + dc_plane_state->gamut_remap_matrix.enable_remap = true; + dc_plane_state->input_csc_color_matrix.enable_adjustment = false; + } else { + drm_warn(dev, "blob->length (%zu) isn't equal to drm_color_ctm_3x4 (%zu)\n", + blob->length, sizeof(struct drm_color_ctm_3x4)); + return -EINVAL; + } + } + + return 0; +} + +static int +__set_dm_plane_colorop_multiplier(struct drm_plane_state *plane_state, + struct dc_plane_state *dc_plane_state, + struct drm_colorop *colorop) +{ + struct drm_colorop *old_colorop; + struct drm_colorop_state *colorop_state = NULL, *new_colorop_state; + struct drm_atomic_state *state = plane_state->state; + const struct drm_device *dev = colorop->dev; + int i = 0; + + /* Multiplier */ + old_colorop = colorop; + for_each_new_colorop_in_state(state, colorop, new_colorop_state, i) { + if (new_colorop_state->colorop == old_colorop && + new_colorop_state->colorop->type == DRM_COLOROP_MULTIPLIER) { + colorop_state = new_colorop_state; + break; + } + } + + if (colorop_state && !colorop_state->bypass && colorop->type == DRM_COLOROP_MULTIPLIER) { + drm_dbg(dev, "Multiplier colorop with ID: %d\n", colorop->base.id); + dc_plane_state->hdr_mult = amdgpu_dm_fixpt_from_s3132(colorop_state->multiplier); + } + + return 0; +} + +static int +__set_dm_plane_colorop_shaper(struct drm_plane_state *plane_state, + struct dc_plane_state *dc_plane_state, + struct drm_colorop *colorop) +{ + struct drm_colorop *old_colorop; + struct drm_colorop_state *colorop_state = NULL, *new_colorop_state; + struct drm_atomic_state *state = plane_state->state; + enum dc_transfer_func_predefined default_tf = TRANSFER_FUNCTION_LINEAR; + struct dc_transfer_func *tf = &dc_plane_state->in_shaper_func; + const struct drm_color_lut32 *shaper_lut; + struct drm_device *dev = colorop->dev; + bool enabled = false; + u32 shaper_size; + int i = 0, ret = 0; + + /* 1D Curve - SHAPER TF */ + old_colorop = colorop; + for_each_new_colorop_in_state(state, colorop, new_colorop_state, i) { + if (new_colorop_state->colorop == old_colorop && + (BIT(new_colorop_state->curve_1d_type) & amdgpu_dm_supported_shaper_tfs)) { + colorop_state = new_colorop_state; + break; + } + } + + if (colorop_state && !colorop_state->bypass && colorop->type == DRM_COLOROP_1D_CURVE) { + drm_dbg(dev, "Shaper TF colorop with ID: %d\n", colorop->base.id); + tf->type = TF_TYPE_DISTRIBUTED_POINTS; + tf->tf = default_tf = amdgpu_colorop_tf_to_dc_tf(colorop_state->curve_1d_type); + tf->sdr_ref_white_level = SDR_WHITE_LEVEL_INIT_VALUE; + ret = __set_output_tf(tf, 0, 0, false); + if (ret) + return ret; + enabled = true; + } + + /* 1D LUT - SHAPER LUT */ + colorop = old_colorop->next; + if (!colorop) { + drm_dbg(dev, "no Shaper LUT colorop found\n"); + return -EINVAL; + } + + old_colorop = colorop; + for_each_new_colorop_in_state(state, colorop, new_colorop_state, i) { + if (new_colorop_state->colorop == old_colorop && + new_colorop_state->colorop->type == DRM_COLOROP_1D_LUT) { + colorop_state = new_colorop_state; + break; + } + } + + if (colorop_state && !colorop_state->bypass && colorop->type == DRM_COLOROP_1D_LUT) { + drm_dbg(dev, "Shaper LUT colorop with ID: %d\n", colorop->base.id); + tf->type = TF_TYPE_DISTRIBUTED_POINTS; + tf->tf = default_tf; + tf->sdr_ref_white_level = SDR_WHITE_LEVEL_INIT_VALUE; + shaper_lut = __extract_blob_lut32(colorop_state->data, &shaper_size); + shaper_size = shaper_lut != NULL ? shaper_size : 0; + + /* Custom LUT size must be the same as supported size */ + if (shaper_size == colorop->size) { + ret = __set_output_tf_32(tf, shaper_lut, shaper_size, false); + if (ret) + return ret; + enabled = true; + } + } + + if (!enabled) + tf->type = TF_TYPE_BYPASS; + + return 0; +} + +/* __set_colorop_3dlut - set DRM 3D LUT to DC stream + * @drm_lut3d: user 3D LUT + * @drm_lut3d_size: size of 3D LUT + * @lut3d: DC 3D LUT + * + * Map user 3D LUT data to DC 3D LUT and all necessary bits to program it + * on DCN accordingly. + * + * Returns: + * 0 on success. -EINVAL if drm_lut3d_size is zero. + */ +static int __set_colorop_3dlut(const struct drm_color_lut32 *drm_lut3d, + uint32_t drm_lut3d_size, + struct dc_3dlut *lut) +{ + if (!drm_lut3d_size) { + lut->state.bits.initialized = 0; + return -EINVAL; + } + + /* Only supports 17x17x17 3D LUT (12-bit) now */ + lut->lut_3d.use_12bits = true; + lut->lut_3d.use_tetrahedral_9 = false; + + lut->state.bits.initialized = 1; + __drm_3dlut32_to_dc_3dlut(drm_lut3d, drm_lut3d_size, &lut->lut_3d, + lut->lut_3d.use_tetrahedral_9, 12); + + return 0; +} + +static int +__set_dm_plane_colorop_3dlut(struct drm_plane_state *plane_state, + struct dc_plane_state *dc_plane_state, + struct drm_colorop *colorop) +{ + struct drm_colorop *old_colorop; + struct drm_colorop_state *colorop_state = NULL, *new_colorop_state; + struct dc_transfer_func *tf = &dc_plane_state->in_shaper_func; + struct drm_atomic_state *state = plane_state->state; + const struct amdgpu_device *adev = drm_to_adev(colorop->dev); + const struct drm_device *dev = colorop->dev; + const struct drm_color_lut32 *lut3d; + uint32_t lut3d_size; + int i = 0, ret = 0; + + /* 3D LUT */ + old_colorop = colorop; + for_each_new_colorop_in_state(state, colorop, new_colorop_state, i) { + if (new_colorop_state->colorop == old_colorop && + new_colorop_state->colorop->type == DRM_COLOROP_3D_LUT) { + colorop_state = new_colorop_state; + break; + } + } + + if (colorop_state && !colorop_state->bypass && colorop->type == DRM_COLOROP_3D_LUT) { + if (!adev->dm.dc->caps.color.dpp.hw_3d_lut) { + drm_dbg(dev, "3D LUT is not supported by hardware\n"); + return -EINVAL; + } + + drm_dbg(dev, "3D LUT colorop with ID: %d\n", colorop->base.id); + lut3d = __extract_blob_lut32(colorop_state->data, &lut3d_size); + lut3d_size = lut3d != NULL ? lut3d_size : 0; + ret = __set_colorop_3dlut(lut3d, lut3d_size, &dc_plane_state->lut3d_func); + if (ret) { + drm_dbg(dev, "3D LUT colorop with ID: %d has LUT size = %d\n", + colorop->base.id, lut3d_size); + return ret; + } + + /* 3D LUT requires shaper. If shaper colorop is bypassed, enable shaper curve + * with TRANSFER_FUNCTION_LINEAR + */ + if (tf->type == TF_TYPE_BYPASS) { + tf->type = TF_TYPE_DISTRIBUTED_POINTS; + tf->tf = TRANSFER_FUNCTION_LINEAR; + tf->sdr_ref_white_level = SDR_WHITE_LEVEL_INIT_VALUE; + ret = __set_output_tf_32(tf, NULL, 0, false); + } + } + + return ret; +} + +static int +__set_dm_plane_colorop_blend(struct drm_plane_state *plane_state, + struct dc_plane_state *dc_plane_state, + struct drm_colorop *colorop) +{ + struct drm_colorop *old_colorop; + struct drm_colorop_state *colorop_state = NULL, *new_colorop_state; + struct drm_atomic_state *state = plane_state->state; + enum dc_transfer_func_predefined default_tf = TRANSFER_FUNCTION_LINEAR; + struct dc_transfer_func *tf = &dc_plane_state->blend_tf; + const struct drm_color_lut32 *blend_lut = NULL; + struct drm_device *dev = colorop->dev; + uint32_t blend_size = 0; + int i = 0; + + /* 1D Curve - BLND TF */ + old_colorop = colorop; + for_each_new_colorop_in_state(state, colorop, new_colorop_state, i) { + if (new_colorop_state->colorop == old_colorop && + (BIT(new_colorop_state->curve_1d_type) & amdgpu_dm_supported_blnd_tfs)) { + colorop_state = new_colorop_state; + break; + } + } + + if (colorop_state && !colorop_state->bypass && colorop->type == DRM_COLOROP_1D_CURVE && + (BIT(colorop_state->curve_1d_type) & amdgpu_dm_supported_blnd_tfs)) { + drm_dbg(dev, "Blend TF colorop with ID: %d\n", colorop->base.id); + tf->type = TF_TYPE_DISTRIBUTED_POINTS; + tf->tf = default_tf = amdgpu_colorop_tf_to_dc_tf(colorop_state->curve_1d_type); + tf->sdr_ref_white_level = SDR_WHITE_LEVEL_INIT_VALUE; + __set_input_tf_32(NULL, tf, blend_lut, blend_size); + } + + /* 1D Curve - BLND LUT */ + colorop = old_colorop->next; + if (!colorop) { + drm_dbg(dev, "no Blend LUT colorop found\n"); + return -EINVAL; + } + + old_colorop = colorop; + for_each_new_colorop_in_state(state, colorop, new_colorop_state, i) { + if (new_colorop_state->colorop == old_colorop && + new_colorop_state->colorop->type == DRM_COLOROP_1D_LUT) { + colorop_state = new_colorop_state; + break; + } + } + + if (colorop_state && !colorop_state->bypass && colorop->type == DRM_COLOROP_1D_LUT && + (BIT(colorop_state->curve_1d_type) & amdgpu_dm_supported_blnd_tfs)) { + drm_dbg(dev, "Blend LUT colorop with ID: %d\n", colorop->base.id); + tf->type = TF_TYPE_DISTRIBUTED_POINTS; + tf->tf = default_tf; + tf->sdr_ref_white_level = SDR_WHITE_LEVEL_INIT_VALUE; + blend_lut = __extract_blob_lut32(colorop_state->data, &blend_size); + blend_size = blend_lut != NULL ? blend_size : 0; + + /* Custom LUT size must be the same as supported size */ + if (blend_size == colorop->size) + __set_input_tf_32(NULL, tf, blend_lut, blend_size); + } + + return 0; +} + static int amdgpu_dm_plane_set_color_properties(struct drm_plane_state *plane_state, struct dc_plane_state *dc_plane_state) @@ -1227,6 +1865,93 @@ amdgpu_dm_plane_set_color_properties(struct drm_plane_state *plane_state, return 0; } +static int +amdgpu_dm_plane_set_colorop_properties(struct drm_plane_state *plane_state, + struct dc_plane_state *dc_plane_state) +{ + struct drm_colorop *colorop = plane_state->color_pipeline; + struct drm_device *dev = plane_state->plane->dev; + struct amdgpu_device *adev = drm_to_adev(dev); + int ret; + + /* 1D Curve - DEGAM TF */ + if (!colorop) + return -EINVAL; + + ret = __set_dm_plane_colorop_degamma(plane_state, dc_plane_state, colorop); + if (ret) + return ret; + + /* Multiplier */ + colorop = colorop->next; + if (!colorop) { + drm_dbg(dev, "no multiplier colorop found\n"); + return -EINVAL; + } + + ret = __set_dm_plane_colorop_multiplier(plane_state, dc_plane_state, colorop); + if (ret) + return ret; + + /* 3x4 matrix */ + colorop = colorop->next; + if (!colorop) { + drm_dbg(dev, "no 3x4 matrix colorop found\n"); + return -EINVAL; + } + + ret = __set_dm_plane_colorop_3x4_matrix(plane_state, dc_plane_state, colorop); + if (ret) + return ret; + + if (adev->dm.dc->caps.color.dpp.hw_3d_lut) { + /* 1D Curve & LUT - SHAPER TF & LUT */ + colorop = colorop->next; + if (!colorop) { + drm_dbg(dev, "no Shaper TF colorop found\n"); + return -EINVAL; + } + + ret = __set_dm_plane_colorop_shaper(plane_state, dc_plane_state, colorop); + if (ret) + return ret; + + /* Shaper LUT colorop is already handled, just skip here */ + colorop = colorop->next; + if (!colorop) + return -EINVAL; + + /* 3D LUT */ + colorop = colorop->next; + if (!colorop) { + drm_dbg(dev, "no 3D LUT colorop found\n"); + return -EINVAL; + } + + ret = __set_dm_plane_colorop_3dlut(plane_state, dc_plane_state, colorop); + if (ret) + return ret; + } + + /* 1D Curve & LUT - BLND TF & LUT */ + colorop = colorop->next; + if (!colorop) { + drm_dbg(dev, "no Blend TF colorop found\n"); + return -EINVAL; + } + + ret = __set_dm_plane_colorop_blend(plane_state, dc_plane_state, colorop); + if (ret) + return ret; + + /* BLND LUT colorop is already handled, just skip here */ + colorop = colorop->next; + if (!colorop) + return -EINVAL; + + return 0; +} + /** * amdgpu_dm_update_plane_color_mgmt: Maps DRM color management to DC plane. * @crtc: amdgpu_dm crtc state @@ -1323,5 +2048,8 @@ int amdgpu_dm_update_plane_color_mgmt(struct dm_crtc_state *crtc, dc_plane_state->input_csc_color_matrix.enable_adjustment = false; } + if (!amdgpu_dm_plane_set_colorop_properties(plane_state, dc_plane_state)) + return 0; + return amdgpu_dm_plane_set_color_properties(plane_state, dc_plane_state); } diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_colorop.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_colorop.c new file mode 100644 index 000000000000..d585618b8064 --- /dev/null +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_colorop.c @@ -0,0 +1,209 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright 2023 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#include +#include +#include +#include + +#include "amdgpu.h" +#include "amdgpu_dm_colorop.h" +#include "dc.h" + +const u64 amdgpu_dm_supported_degam_tfs = + BIT(DRM_COLOROP_1D_CURVE_SRGB_EOTF) | + BIT(DRM_COLOROP_1D_CURVE_PQ_125_EOTF) | + BIT(DRM_COLOROP_1D_CURVE_BT2020_INV_OETF) | + BIT(DRM_COLOROP_1D_CURVE_GAMMA22_INV); + +const u64 amdgpu_dm_supported_shaper_tfs = + BIT(DRM_COLOROP_1D_CURVE_SRGB_INV_EOTF) | + BIT(DRM_COLOROP_1D_CURVE_PQ_125_INV_EOTF) | + BIT(DRM_COLOROP_1D_CURVE_BT2020_OETF) | + BIT(DRM_COLOROP_1D_CURVE_GAMMA22); + +const u64 amdgpu_dm_supported_blnd_tfs = + BIT(DRM_COLOROP_1D_CURVE_SRGB_EOTF) | + BIT(DRM_COLOROP_1D_CURVE_PQ_125_EOTF) | + BIT(DRM_COLOROP_1D_CURVE_BT2020_INV_OETF) | + BIT(DRM_COLOROP_1D_CURVE_GAMMA22_INV); + +#define MAX_COLOR_PIPELINE_OPS 10 + +#define LUT3D_SIZE 17 + +int amdgpu_dm_initialize_default_pipeline(struct drm_plane *plane, struct drm_prop_enum_list *list) +{ + struct drm_colorop *ops[MAX_COLOR_PIPELINE_OPS]; + struct drm_device *dev = plane->dev; + struct amdgpu_device *adev = drm_to_adev(dev); + int ret; + int i = 0; + + memset(ops, 0, sizeof(ops)); + + /* 1D curve - DEGAM TF */ + ops[i] = kzalloc(sizeof(*ops[0]), GFP_KERNEL); + if (!ops[i]) { + ret = -ENOMEM; + goto cleanup; + } + + ret = drm_plane_colorop_curve_1d_init(dev, ops[i], plane, + amdgpu_dm_supported_degam_tfs, + DRM_COLOROP_FLAG_ALLOW_BYPASS); + if (ret) + goto cleanup; + + list->type = ops[i]->base.id; + list->name = kasprintf(GFP_KERNEL, "Color Pipeline %d", ops[i]->base.id); + + i++; + + /* Multiplier */ + ops[i] = kzalloc(sizeof(struct drm_colorop), GFP_KERNEL); + if (!ops[i]) { + ret = -ENOMEM; + goto cleanup; + } + + ret = drm_plane_colorop_mult_init(dev, ops[i], plane, DRM_COLOROP_FLAG_ALLOW_BYPASS); + if (ret) + goto cleanup; + + drm_colorop_set_next_property(ops[i-1], ops[i]); + + i++; + + /* 3x4 matrix */ + ops[i] = kzalloc(sizeof(struct drm_colorop), GFP_KERNEL); + if (!ops[i]) { + ret = -ENOMEM; + goto cleanup; + } + + ret = drm_plane_colorop_ctm_3x4_init(dev, ops[i], plane, DRM_COLOROP_FLAG_ALLOW_BYPASS); + if (ret) + goto cleanup; + + drm_colorop_set_next_property(ops[i-1], ops[i]); + + i++; + + if (adev->dm.dc->caps.color.dpp.hw_3d_lut) { + /* 1D curve - SHAPER TF */ + ops[i] = kzalloc(sizeof(*ops[0]), GFP_KERNEL); + if (!ops[i]) { + ret = -ENOMEM; + goto cleanup; + } + + ret = drm_plane_colorop_curve_1d_init(dev, ops[i], plane, + amdgpu_dm_supported_shaper_tfs, + DRM_COLOROP_FLAG_ALLOW_BYPASS); + if (ret) + goto cleanup; + + drm_colorop_set_next_property(ops[i-1], ops[i]); + + i++; + + /* 1D LUT - SHAPER LUT */ + ops[i] = kzalloc(sizeof(*ops[0]), GFP_KERNEL); + if (!ops[i]) { + ret = -ENOMEM; + goto cleanup; + } + + ret = drm_plane_colorop_curve_1d_lut_init(dev, ops[i], plane, MAX_COLOR_LUT_ENTRIES, + DRM_COLOROP_LUT1D_INTERPOLATION_LINEAR, + DRM_COLOROP_FLAG_ALLOW_BYPASS); + if (ret) + goto cleanup; + + drm_colorop_set_next_property(ops[i-1], ops[i]); + + i++; + + /* 3D LUT */ + ops[i] = kzalloc(sizeof(*ops[0]), GFP_KERNEL); + if (!ops[i]) { + ret = -ENOMEM; + goto cleanup; + } + + ret = drm_plane_colorop_3dlut_init(dev, ops[i], plane, LUT3D_SIZE, + DRM_COLOROP_LUT3D_INTERPOLATION_TETRAHEDRAL, + DRM_COLOROP_FLAG_ALLOW_BYPASS); + if (ret) + goto cleanup; + + drm_colorop_set_next_property(ops[i-1], ops[i]); + + i++; + } + + /* 1D curve - BLND TF */ + ops[i] = kzalloc(sizeof(*ops[0]), GFP_KERNEL); + if (!ops[i]) { + ret = -ENOMEM; + goto cleanup; + } + + ret = drm_plane_colorop_curve_1d_init(dev, ops[i], plane, + amdgpu_dm_supported_blnd_tfs, + DRM_COLOROP_FLAG_ALLOW_BYPASS); + if (ret) + goto cleanup; + + drm_colorop_set_next_property(ops[i - 1], ops[i]); + + i++; + + /* 1D LUT - BLND LUT */ + ops[i] = kzalloc(sizeof(struct drm_colorop), GFP_KERNEL); + if (!ops[i]) { + ret = -ENOMEM; + goto cleanup; + } + + ret = drm_plane_colorop_curve_1d_lut_init(dev, ops[i], plane, MAX_COLOR_LUT_ENTRIES, + DRM_COLOROP_LUT1D_INTERPOLATION_LINEAR, + DRM_COLOROP_FLAG_ALLOW_BYPASS); + if (ret) + goto cleanup; + + drm_colorop_set_next_property(ops[i-1], ops[i]); + return 0; + +cleanup: + if (ret == -ENOMEM) + drm_err(plane->dev, "KMS: Failed to allocate colorop\n"); + + drm_colorop_pipeline_destroy(dev); + + return ret; +} diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_colorop.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_colorop.h new file mode 100644 index 000000000000..2e1617ffc8ee --- /dev/null +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_colorop.h @@ -0,0 +1,36 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright 2023 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef __AMDGPU_DM_COLOROP_H__ +#define __AMDGPU_DM_COLOROP_H__ + +extern const u64 amdgpu_dm_supported_degam_tfs; +extern const u64 amdgpu_dm_supported_shaper_tfs; +extern const u64 amdgpu_dm_supported_blnd_tfs; + +int amdgpu_dm_initialize_default_pipeline(struct drm_plane *plane, struct drm_prop_enum_list *list); + +#endif /* __AMDGPU_DM_COLOROP_H__*/ diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c index 38f9ea313dcb..697e232acebf 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c @@ -736,7 +736,7 @@ int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm, { struct amdgpu_crtc *acrtc = NULL; struct drm_plane *cursor_plane; - bool is_dcn; + bool has_degamma; int res = -ENOMEM; cursor_plane = kzalloc(sizeof(*cursor_plane), GFP_KERNEL); @@ -775,20 +775,18 @@ int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm, dm->adev->mode_info.crtcs[crtc_index] = acrtc; - /* Don't enable DRM CRTC degamma property for DCE since it doesn't - * support programmable degamma anywhere. + /* Don't enable DRM CRTC degamma property for + * 1. Degamma is replaced by color pipeline. + * 2. DCE since it doesn't support programmable degamma anywhere. + * 3. DCN401 since pre-blending degamma LUT doesn't apply to cursor. */ - is_dcn = dm->adev->dm.dc->caps.color.dpp.dcn_arch; - /* Dont't enable DRM CRTC degamma property for DCN401 since the - * pre-blending degamma LUT doesn't apply to cursor, and therefore - * can't work similar to a post-blending degamma LUT as in other hw - * versions. - * TODO: revisit it once KMS plane color API is merged. - */ - drm_crtc_enable_color_mgmt(&acrtc->base, - (is_dcn && - dm->adev->dm.dc->ctx->dce_version != DCN_VERSION_4_01) ? - MAX_COLOR_LUT_ENTRIES : 0, + if (plane->color_pipeline_property) + has_degamma = false; + else + has_degamma = dm->adev->dm.dc->caps.color.dpp.dcn_arch && + dm->adev->dm.dc->ctx->dce_version != DCN_VERSION_4_01; + + drm_crtc_enable_color_mgmt(&acrtc->base, has_degamma ? MAX_COLOR_LUT_ENTRIES : 0, true, MAX_COLOR_LUT_ENTRIES); drm_mode_crtc_set_gamma_size(&acrtc->base, MAX_COLOR_LEGACY_LUT_ENTRIES); diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c index 00dac862b665..a9839485f2a2 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c @@ -759,6 +759,7 @@ static ssize_t dp_phy_test_pattern_debugfs_write(struct file *f, const char __us int max_param_num = 11; enum dp_test_pattern test_pattern = DP_TEST_PATTERN_UNSUPPORTED; bool disable_hpd = false; + bool supports_hpd = link->irq_source_hpd != DC_IRQ_SOURCE_INVALID; bool valid_test_pattern = false; uint8_t param_nums = 0; /* init with default 80bit custom pattern */ @@ -850,7 +851,7 @@ static ssize_t dp_phy_test_pattern_debugfs_write(struct file *f, const char __us * because it might have been disabled after a test pattern was set. * AUX depends on HPD * sequence dependent, do not move! */ - if (!disable_hpd) + if (supports_hpd && !disable_hpd) dc_link_enable_hpd(link); prefer_link_settings.lane_count = link->verified_link_cap.lane_count; @@ -888,7 +889,7 @@ static ssize_t dp_phy_test_pattern_debugfs_write(struct file *f, const char __us * Need disable interrupt to avoid SW driver disable DP output. This is * done after the test pattern is set. */ - if (valid_test_pattern && disable_hpd) + if (valid_test_pattern && supports_hpd && disable_hpd) dc_link_disable_hpd(link); kfree(wr_buf); diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c index 19038f336155..85ce558cefc5 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c @@ -201,6 +201,7 @@ void hdcp_update_display(struct hdcp_workqueue *hdcp_work, struct mod_hdcp_link_adjustment link_adjust; struct mod_hdcp_display_adjustment display_adjust; unsigned int conn_index = aconnector->base.index; + const struct dc *dc = aconnector->dc_link->dc; guard(mutex)(&hdcp_w->mutex); drm_connector_get(&aconnector->base); @@ -231,6 +232,9 @@ void hdcp_update_display(struct hdcp_workqueue *hdcp_work, link_adjust.hdcp1.disable = 1; link_adjust.hdcp2.force_type = MOD_HDCP_FORCE_TYPE_1; } + link_adjust.hdcp2.use_fw_locality_check = + (dc->caps.fused_io_supported || dc->debug.hdcp_lc_force_fw_enable); + link_adjust.hdcp2.use_sw_locality_fallback = dc->debug.hdcp_lc_enable_sw_fallback; schedule_delayed_work(&hdcp_w->property_validate_dwork, msecs_to_jiffies(DRM_HDCP_CHECK_PERIOD_MS)); @@ -534,6 +538,7 @@ static void update_config(void *handle, struct cp_psp_stream_config *config) struct hdcp_workqueue *hdcp_w = &hdcp_work[link_index]; struct dc_sink *sink = NULL; bool link_is_hdcp14 = false; + const struct dc *dc = aconnector->dc_link->dc; if (config->dpms_off) { hdcp_remove_display(hdcp_work, link_index, aconnector); @@ -575,6 +580,8 @@ static void update_config(void *handle, struct cp_psp_stream_config *config) link->adjust.auth_delay = 2; link->adjust.retry_limit = MAX_NUM_OF_ATTEMPTS; link->adjust.hdcp1.disable = 0; + link->adjust.hdcp2.use_fw_locality_check = (dc->caps.fused_io_supported || dc->debug.hdcp_lc_force_fw_enable); + link->adjust.hdcp2.use_sw_locality_fallback = dc->debug.hdcp_lc_enable_sw_fallback; hdcp_w->encryption_status[display->index] = MOD_HDCP_ENCRYPTION_STATUS_HDCP_OFF; DRM_DEBUG_DRIVER("[HDCP_DM] display %d, CP %d, type %d\n", aconnector->base.index, @@ -786,15 +793,8 @@ struct hdcp_workqueue *hdcp_create_workqueue(struct amdgpu_device *adev, ddc_funcs->read_i2c = lp_read_i2c; ddc_funcs->write_dpcd = lp_write_dpcd; ddc_funcs->read_dpcd = lp_read_dpcd; - - config->debug.lc_enable_sw_fallback = dc->debug.hdcp_lc_enable_sw_fallback; - if (dc->caps.fused_io_supported || dc->debug.hdcp_lc_force_fw_enable) { - ddc_funcs->atomic_write_poll_read_i2c = lp_atomic_write_poll_read_i2c; - ddc_funcs->atomic_write_poll_read_aux = lp_atomic_write_poll_read_aux; - } else { - ddc_funcs->atomic_write_poll_read_i2c = NULL; - ddc_funcs->atomic_write_poll_read_aux = NULL; - } + ddc_funcs->atomic_write_poll_read_i2c = lp_atomic_write_poll_read_i2c; + ddc_funcs->atomic_write_poll_read_aux = lp_atomic_write_poll_read_aux; memset(hdcp_work[i].aconnector, 0, sizeof(struct amdgpu_dm_connector *) * diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c index d0f770dd0a95..ac98c746c3de 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c @@ -131,6 +131,7 @@ enum dc_edid_status dm_helpers_parse_edid_caps( edid_caps->serial_number = edid_buf->serial; edid_caps->manufacture_week = edid_buf->mfg_week; edid_caps->manufacture_year = edid_buf->mfg_year; + edid_caps->analog = !(edid_buf->input & DRM_EDID_INPUT_DIGITAL); drm_edid_get_monitor_name(edid_buf, edid_caps->display_name, diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c index a1c722112c22..0a2a3f233a0e 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c @@ -476,6 +476,7 @@ void amdgpu_dm_irq_fini(struct amdgpu_device *adev) void amdgpu_dm_irq_suspend(struct amdgpu_device *adev) { + struct drm_device *dev = adev_to_drm(adev); int src; struct list_head *hnd_list_h; struct list_head *hnd_list_l; @@ -512,6 +513,9 @@ void amdgpu_dm_irq_suspend(struct amdgpu_device *adev) } DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags); + + if (dev->mode_config.poll_enabled) + drm_kms_helper_poll_disable(dev); } void amdgpu_dm_irq_resume_early(struct amdgpu_device *adev) @@ -537,6 +541,7 @@ void amdgpu_dm_irq_resume_early(struct amdgpu_device *adev) void amdgpu_dm_irq_resume_late(struct amdgpu_device *adev) { + struct drm_device *dev = adev_to_drm(adev); int src; struct list_head *hnd_list_h, *hnd_list_l; unsigned long irq_table_flags; @@ -557,6 +562,9 @@ void amdgpu_dm_irq_resume_late(struct amdgpu_device *adev) } DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags); + + if (dev->mode_config.poll_enabled) + drm_kms_helper_poll_enable(dev); } /* @@ -893,6 +901,7 @@ void amdgpu_dm_hpd_init(struct amdgpu_device *adev) struct drm_connector_list_iter iter; int irq_type; int i; + bool use_polling = false; /* First, clear all hpd and hpdrx interrupts */ for (i = DC_IRQ_SOURCE_HPD1; i <= DC_IRQ_SOURCE_HPD6RX; i++) { @@ -906,6 +915,8 @@ void amdgpu_dm_hpd_init(struct amdgpu_device *adev) struct amdgpu_dm_connector *amdgpu_dm_connector; const struct dc_link *dc_link; + use_polling |= connector->polled != DRM_CONNECTOR_POLL_HPD; + if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK) continue; @@ -947,6 +958,9 @@ void amdgpu_dm_hpd_init(struct amdgpu_device *adev) } } drm_connector_list_iter_end(&iter); + + if (use_polling) + drm_kms_helper_poll_init(dev); } /** @@ -997,4 +1011,7 @@ void amdgpu_dm_hpd_fini(struct amdgpu_device *adev) } } drm_connector_list_iter_end(&iter); + + if (dev->mode_config.poll_enabled) + drm_kms_helper_poll_fini(dev); } diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c index e027798ece03..2e3ee78999d9 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c @@ -37,6 +37,7 @@ #include "amdgpu_display.h" #include "amdgpu_dm_trace.h" #include "amdgpu_dm_plane.h" +#include "amdgpu_dm_colorop.h" #include "gc/gc_11_0_0_offset.h" #include "gc/gc_11_0_0_sh_mask.h" @@ -1782,6 +1783,39 @@ dm_atomic_plane_get_property(struct drm_plane *plane, return 0; } +#else + +#define MAX_COLOR_PIPELINES 5 + +static int +dm_plane_init_colorops(struct drm_plane *plane) +{ + struct drm_prop_enum_list pipelines[MAX_COLOR_PIPELINES]; + struct drm_device *dev = plane->dev; + struct amdgpu_device *adev = drm_to_adev(dev); + struct dc *dc = adev->dm.dc; + int len = 0; + int ret; + + if (plane->type == DRM_PLANE_TYPE_CURSOR) + return 0; + + /* initialize pipeline */ + if (dc->ctx->dce_version >= DCN_VERSION_3_0) { + ret = amdgpu_dm_initialize_default_pipeline(plane, &pipelines[len]); + if (ret) { + drm_err(plane->dev, "Failed to create color pipeline for plane %d: %d\n", + plane->base.id, ret); + return ret; + } + len++; + + /* Create COLOR_PIPELINE property and attach */ + drm_plane_create_color_pipeline_property(plane, pipelines, len); + } + + return 0; +} #endif static const struct drm_plane_funcs dm_plane_funcs = { @@ -1890,7 +1924,12 @@ int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm, #ifdef AMD_PRIVATE_COLOR dm_atomic_plane_attach_color_mgmt_properties(dm, plane); +#else + res = dm_plane_init_colorops(plane); + if (res) + return res; #endif + /* Create (reset) the plane state */ if (plane->funcs->reset) plane->funcs->reset(plane); diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_replay.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_replay.c index 80704d709e44..da94e3544b65 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_replay.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_replay.c @@ -162,7 +162,7 @@ bool amdgpu_dm_replay_enable(struct dc_stream_state *stream, bool wait) if (link) { link->dc->link_srv->edp_setup_replay(link, stream); - link->dc->link_srv->edp_set_coasting_vtotal(link, stream->timing.v_total); + link->dc->link_srv->edp_set_coasting_vtotal(link, stream->timing.v_total, 0); DRM_DEBUG_DRIVER("Enabling replay...\n"); link->dc->link_srv->edp_set_replay_allow_active(link, &replay_active, wait, false, NULL); return true; diff --git a/drivers/gpu/drm/amd/display/dc/Makefile b/drivers/gpu/drm/amd/display/dc/Makefile index dc943abd6dba..7277ed21552f 100644 --- a/drivers/gpu/drm/amd/display/dc/Makefile +++ b/drivers/gpu/drm/amd/display/dc/Makefile @@ -36,7 +36,7 @@ DC_LIBS += dcn30 DC_LIBS += dcn301 DC_LIBS += dcn31 DC_LIBS += dml -DC_LIBS += dml2 +DC_LIBS += dml2_0 DC_LIBS += soc_and_ip_translator endif diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c b/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c index 154fd2c18e88..d1471f34e419 100644 --- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c +++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c @@ -67,7 +67,9 @@ static ATOM_HPD_INT_RECORD *get_hpd_record(struct bios_parser *bp, ATOM_OBJECT *object); static struct device_id device_type_from_device_id(uint16_t device_id); static uint32_t signal_to_ss_id(enum as_signal_type signal); -static uint32_t get_support_mask_for_device_id(struct device_id device_id); +static uint32_t get_support_mask_for_device_id( + enum dal_device_type device_type, + uint32_t enum_id); static ATOM_ENCODER_CAP_RECORD_V2 *get_encoder_cap_record( struct bios_parser *bp, ATOM_OBJECT *object); @@ -441,6 +443,7 @@ static enum bp_result get_firmware_info_v1_4( le32_to_cpu(firmware_info->ulMinPixelClockPLL_Output) * 10; info->pll_info.max_output_pxl_clk_pll_frequency = le32_to_cpu(firmware_info->ulMaxPixelClockPLL_Output) * 10; + info->max_pixel_clock = le16_to_cpu(firmware_info->usMaxPixelClock) * 10; if (firmware_info->usFirmwareCapability.sbfAccess.MemoryClockSS_Support) /* Since there is no information on the SS, report conservative @@ -497,6 +500,7 @@ static enum bp_result get_firmware_info_v2_1( info->external_clock_source_frequency_for_dp = le16_to_cpu(firmwareInfo->usUniphyDPModeExtClkFreq) * 10; info->min_allowed_bl_level = firmwareInfo->ucMinAllowedBL_Level; + info->max_pixel_clock = le16_to_cpu(firmwareInfo->usMaxPixelClock) * 10; /* There should be only one entry in the SS info table for Memory Clock */ @@ -736,18 +740,94 @@ static enum bp_result bios_parser_transmitter_control( return bp->cmd_tbl.transmitter_control(bp, cntl); } +static enum bp_result bios_parser_select_crtc_source( + struct dc_bios *dcb, + struct bp_crtc_source_select *bp_params) +{ + struct bios_parser *bp = BP_FROM_DCB(dcb); + + if (!bp->cmd_tbl.select_crtc_source) + return BP_RESULT_FAILURE; + + return bp->cmd_tbl.select_crtc_source(bp, bp_params); +} + static enum bp_result bios_parser_encoder_control( struct dc_bios *dcb, struct bp_encoder_control *cntl) { struct bios_parser *bp = BP_FROM_DCB(dcb); + if (cntl->engine_id == ENGINE_ID_DACA) { + if (!bp->cmd_tbl.dac1_encoder_control) + return BP_RESULT_FAILURE; + + return bp->cmd_tbl.dac1_encoder_control( + bp, cntl->action == ENCODER_CONTROL_ENABLE, + cntl->pixel_clock, ATOM_DAC1_PS2); + } else if (cntl->engine_id == ENGINE_ID_DACB) { + if (!bp->cmd_tbl.dac2_encoder_control) + return BP_RESULT_FAILURE; + + return bp->cmd_tbl.dac2_encoder_control( + bp, cntl->action == ENCODER_CONTROL_ENABLE, + cntl->pixel_clock, ATOM_DAC1_PS2); + } + if (!bp->cmd_tbl.dig_encoder_control) return BP_RESULT_FAILURE; return bp->cmd_tbl.dig_encoder_control(bp, cntl); } +static enum bp_result bios_parser_dac_load_detection( + struct dc_bios *dcb, + enum engine_id engine_id, + enum dal_device_type device_type, + uint32_t enum_id) +{ + struct bios_parser *bp = BP_FROM_DCB(dcb); + struct dc_context *ctx = dcb->ctx; + struct bp_load_detection_parameters bp_params = {0}; + enum bp_result bp_result; + uint32_t bios_0_scratch; + uint32_t device_id_mask = 0; + + bp_params.engine_id = engine_id; + bp_params.device_id = get_support_mask_for_device_id(device_type, enum_id); + + if (engine_id != ENGINE_ID_DACA && + engine_id != ENGINE_ID_DACB) + return BP_RESULT_UNSUPPORTED; + + if (!bp->cmd_tbl.dac_load_detection) + return BP_RESULT_UNSUPPORTED; + + if (bp_params.device_id == ATOM_DEVICE_CRT1_SUPPORT) + device_id_mask = ATOM_S0_CRT1_MASK; + else if (bp_params.device_id == ATOM_DEVICE_CRT2_SUPPORT) + device_id_mask = ATOM_S0_CRT2_MASK; + else + return BP_RESULT_UNSUPPORTED; + + /* BIOS will write the detected devices to BIOS_SCRATCH_0, clear corresponding bit */ + bios_0_scratch = dm_read_reg(ctx, bp->base.regs->BIOS_SCRATCH_0); + bios_0_scratch &= ~device_id_mask; + dm_write_reg(ctx, bp->base.regs->BIOS_SCRATCH_0, bios_0_scratch); + + bp_result = bp->cmd_tbl.dac_load_detection(bp, &bp_params); + + if (bp_result != BP_RESULT_OK) + return bp_result; + + bios_0_scratch = dm_read_reg(ctx, bp->base.regs->BIOS_SCRATCH_0); + + if (bios_0_scratch & device_id_mask) + return BP_RESULT_OK; + + return BP_RESULT_FAILURE; +} + static enum bp_result bios_parser_adjust_pixel_clock( struct dc_bios *dcb, struct bp_adjust_pixel_clock_parameters *bp_params) @@ -858,7 +938,7 @@ static bool bios_parser_is_device_id_supported( { struct bios_parser *bp = BP_FROM_DCB(dcb); - uint32_t mask = get_support_mask_for_device_id(id); + uint32_t mask = get_support_mask_for_device_id(id.device_type, id.enum_id); return (le16_to_cpu(bp->object_info_tbl.v1_1->usDeviceSupport) & mask) != 0; } @@ -2149,11 +2229,10 @@ static uint32_t signal_to_ss_id(enum as_signal_type signal) return clk_id_ss; } -static uint32_t get_support_mask_for_device_id(struct device_id device_id) +static uint32_t get_support_mask_for_device_id( + enum dal_device_type device_type, + uint32_t enum_id) { - enum dal_device_type device_type = device_id.device_type; - uint32_t enum_id = device_id.enum_id; - switch (device_type) { case DEVICE_TYPE_LCD: switch (enum_id) { @@ -2829,8 +2908,12 @@ static const struct dc_vbios_funcs vbios_funcs = { .is_device_id_supported = bios_parser_is_device_id_supported, /* COMMANDS */ + .select_crtc_source = bios_parser_select_crtc_source, + .encoder_control = bios_parser_encoder_control, + .dac_load_detection = bios_parser_dac_load_detection, + .transmitter_control = bios_parser_transmitter_control, .enable_crtc = bios_parser_enable_crtc, diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c index 04eb647acc4e..550a9f1d03f8 100644 --- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c +++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c @@ -1480,10 +1480,10 @@ static enum bp_result get_embedded_panel_info_v2_1( /* not provided by VBIOS */ info->lcd_timing.misc_info.HORIZONTAL_CUT_OFF = 0; - info->lcd_timing.misc_info.H_SYNC_POLARITY = ~(uint32_t) (lvds->lcd_timing.miscinfo - & ATOM_HSYNC_POLARITY); - info->lcd_timing.misc_info.V_SYNC_POLARITY = ~(uint32_t) (lvds->lcd_timing.miscinfo - & ATOM_VSYNC_POLARITY); + info->lcd_timing.misc_info.H_SYNC_POLARITY = !(lvds->lcd_timing.miscinfo & + ATOM_HSYNC_POLARITY); + info->lcd_timing.misc_info.V_SYNC_POLARITY = !(lvds->lcd_timing.miscinfo & + ATOM_VSYNC_POLARITY); /* not provided by VBIOS */ info->lcd_timing.misc_info.VERTICAL_CUT_OFF = 0; diff --git a/drivers/gpu/drm/amd/display/dc/bios/command_table.c b/drivers/gpu/drm/amd/display/dc/bios/command_table.c index 58e88778da7f..22457f417e65 100644 --- a/drivers/gpu/drm/amd/display/dc/bios/command_table.c +++ b/drivers/gpu/drm/amd/display/dc/bios/command_table.c @@ -52,7 +52,9 @@ static void init_transmitter_control(struct bios_parser *bp); static void init_set_pixel_clock(struct bios_parser *bp); static void init_enable_spread_spectrum_on_ppll(struct bios_parser *bp); static void init_adjust_display_pll(struct bios_parser *bp); +static void init_select_crtc_source(struct bios_parser *bp); static void init_dac_encoder_control(struct bios_parser *bp); +static void init_dac_load_detection(struct bios_parser *bp); static void init_dac_output_control(struct bios_parser *bp); static void init_set_crtc_timing(struct bios_parser *bp); static void init_enable_crtc(struct bios_parser *bp); @@ -69,7 +71,9 @@ void dal_bios_parser_init_cmd_tbl(struct bios_parser *bp) init_set_pixel_clock(bp); init_enable_spread_spectrum_on_ppll(bp); init_adjust_display_pll(bp); + init_select_crtc_source(bp); init_dac_encoder_control(bp); + init_dac_load_detection(bp); init_dac_output_control(bp); init_set_crtc_timing(bp); init_enable_crtc(bp); @@ -1609,6 +1613,198 @@ static enum bp_result adjust_display_pll_v3( return result; } +/******************************************************************************* + ******************************************************************************** + ** + ** SELECT CRTC SOURCE + ** + ******************************************************************************** + *******************************************************************************/ + +static enum bp_result select_crtc_source_v1( + struct bios_parser *bp, + struct bp_crtc_source_select *bp_params); +static enum bp_result select_crtc_source_v2( + struct bios_parser *bp, + struct bp_crtc_source_select *bp_params); +static enum bp_result select_crtc_source_v3( + struct bios_parser *bp, + struct bp_crtc_source_select *bp_params); + +static void init_select_crtc_source(struct bios_parser *bp) +{ + switch (BIOS_CMD_TABLE_PARA_REVISION(SelectCRTC_Source)) { + case 1: + bp->cmd_tbl.select_crtc_source = select_crtc_source_v1; + break; + case 2: + bp->cmd_tbl.select_crtc_source = select_crtc_source_v2; + break; + case 3: + bp->cmd_tbl.select_crtc_source = select_crtc_source_v3; + break; + default: + bp->cmd_tbl.select_crtc_source = NULL; + break; + } +} + +static enum bp_result select_crtc_source_v1( + struct bios_parser *bp, + struct bp_crtc_source_select *bp_params) +{ + enum bp_result result = BP_RESULT_FAILURE; + SELECT_CRTC_SOURCE_PS_ALLOCATION params; + + if (!bp->cmd_helper->controller_id_to_atom(bp_params->controller_id, ¶ms.ucCRTC)) + return BP_RESULT_BADINPUT; + + switch (bp_params->engine_id) { + case ENGINE_ID_DACA: + params.ucDevice = ATOM_DEVICE_CRT1_INDEX; + break; + case ENGINE_ID_DACB: + params.ucDevice = ATOM_DEVICE_CRT2_INDEX; + break; + default: + return BP_RESULT_BADINPUT; + } + + if (EXEC_BIOS_CMD_TABLE(SelectCRTC_Source, params)) + result = BP_RESULT_OK; + + return result; +} + +static bool select_crtc_source_v2_encoder_id( + enum engine_id engine_id, uint8_t *out_encoder_id) +{ + uint8_t encoder_id = 0; + + switch (engine_id) { + case ENGINE_ID_DIGA: + encoder_id = ASIC_INT_DIG1_ENCODER_ID; + break; + case ENGINE_ID_DIGB: + encoder_id = ASIC_INT_DIG2_ENCODER_ID; + break; + case ENGINE_ID_DIGC: + encoder_id = ASIC_INT_DIG3_ENCODER_ID; + break; + case ENGINE_ID_DIGD: + encoder_id = ASIC_INT_DIG4_ENCODER_ID; + break; + case ENGINE_ID_DIGE: + encoder_id = ASIC_INT_DIG5_ENCODER_ID; + break; + case ENGINE_ID_DIGF: + encoder_id = ASIC_INT_DIG6_ENCODER_ID; + break; + case ENGINE_ID_DIGG: + encoder_id = ASIC_INT_DIG7_ENCODER_ID; + break; + case ENGINE_ID_DACA: + encoder_id = ASIC_INT_DAC1_ENCODER_ID; + break; + case ENGINE_ID_DACB: + encoder_id = ASIC_INT_DAC2_ENCODER_ID; + break; + default: + return false; + } + + *out_encoder_id = encoder_id; + return true; +} + +static bool select_crtc_source_v2_encoder_mode( + enum signal_type signal_type, uint8_t *out_encoder_mode) +{ + uint8_t encoder_mode = 0; + + switch (signal_type) { + case SIGNAL_TYPE_DVI_SINGLE_LINK: + case SIGNAL_TYPE_DVI_DUAL_LINK: + encoder_mode = ATOM_ENCODER_MODE_DVI; + break; + case SIGNAL_TYPE_HDMI_TYPE_A: + encoder_mode = ATOM_ENCODER_MODE_HDMI; + break; + case SIGNAL_TYPE_LVDS: + encoder_mode = ATOM_ENCODER_MODE_LVDS; + break; + case SIGNAL_TYPE_RGB: + encoder_mode = ATOM_ENCODER_MODE_CRT; + break; + case SIGNAL_TYPE_DISPLAY_PORT: + encoder_mode = ATOM_ENCODER_MODE_DP; + break; + case SIGNAL_TYPE_DISPLAY_PORT_MST: + encoder_mode = ATOM_ENCODER_MODE_DP_MST; + break; + case SIGNAL_TYPE_EDP: + encoder_mode = ATOM_ENCODER_MODE_DP; + break; + default: + return false; + } + + *out_encoder_mode = encoder_mode; + return true; +} + +static enum bp_result select_crtc_source_v2( + struct bios_parser *bp, + struct bp_crtc_source_select *bp_params) +{ + enum bp_result result = BP_RESULT_FAILURE; + SELECT_CRTC_SOURCE_PARAMETERS_V3 params; + + if (!bp->cmd_helper->controller_id_to_atom(bp_params->controller_id, ¶ms.ucCRTC)) + return BP_RESULT_BADINPUT; + + if (!select_crtc_source_v2_encoder_id( + bp_params->engine_id, + ¶ms.ucEncoderID)) + return BP_RESULT_BADINPUT; + if (!select_crtc_source_v2_encoder_mode( + bp_params->sink_signal, + ¶ms.ucEncodeMode)) + return BP_RESULT_BADINPUT; + + if (EXEC_BIOS_CMD_TABLE(SelectCRTC_Source, params)) + result = BP_RESULT_OK; + + return result; +} + +static enum bp_result select_crtc_source_v3( + struct bios_parser *bp, + struct bp_crtc_source_select *bp_params) +{ + enum bp_result result = BP_RESULT_FAILURE; + SELECT_CRTC_SOURCE_PARAMETERS_V3 params; + + if (!bp->cmd_helper->controller_id_to_atom(bp_params->controller_id, ¶ms.ucCRTC)) + return BP_RESULT_BADINPUT; + + if (!select_crtc_source_v2_encoder_id( + bp_params->engine_id, + ¶ms.ucEncoderID)) + return BP_RESULT_BADINPUT; + if (!select_crtc_source_v2_encoder_mode( + bp_params->sink_signal, + ¶ms.ucEncodeMode)) + return BP_RESULT_BADINPUT; + + params.ucDstBpc = bp_params->bit_depth; + + if (EXEC_BIOS_CMD_TABLE(SelectCRTC_Source, params)) + result = BP_RESULT_OK; + + return result; +} + /******************************************************************************* ******************************************************************************** ** @@ -1708,6 +1904,96 @@ static enum bp_result dac2_encoder_control_v1( return result; } +/******************************************************************************* + ******************************************************************************** + ** + ** DAC LOAD DETECTION + ** + ******************************************************************************** + *******************************************************************************/ + +static enum bp_result dac_load_detection_v1( + struct bios_parser *bp, + struct bp_load_detection_parameters *bp_params); + +static enum bp_result dac_load_detection_v3( + struct bios_parser *bp, + struct bp_load_detection_parameters *bp_params); + +static void init_dac_load_detection(struct bios_parser *bp) +{ + switch (BIOS_CMD_TABLE_PARA_REVISION(DAC_LoadDetection)) { + case 1: + case 2: + bp->cmd_tbl.dac_load_detection = dac_load_detection_v1; + break; + case 3: + default: + bp->cmd_tbl.dac_load_detection = dac_load_detection_v3; + break; + } +} + +static void dac_load_detect_prepare_params( + struct _DAC_LOAD_DETECTION_PS_ALLOCATION *params, + enum engine_id engine_id, + uint16_t device_id, + uint8_t misc) +{ + uint8_t dac_type = ENGINE_ID_DACA; + + if (engine_id == ENGINE_ID_DACB) + dac_type = ATOM_DAC_B; + + params->sDacload.usDeviceID = cpu_to_le16(device_id); + params->sDacload.ucDacType = dac_type; + params->sDacload.ucMisc = misc; +} + +static enum bp_result dac_load_detection_v1( + struct bios_parser *bp, + struct bp_load_detection_parameters *bp_params) +{ + enum bp_result result = BP_RESULT_FAILURE; + DAC_LOAD_DETECTION_PS_ALLOCATION params; + + dac_load_detect_prepare_params( + ¶ms, + bp_params->engine_id, + bp_params->device_id, + 0); + + if (EXEC_BIOS_CMD_TABLE(DAC_LoadDetection, params)) + result = BP_RESULT_OK; + + return result; +} + +static enum bp_result dac_load_detection_v3( + struct bios_parser *bp, + struct bp_load_detection_parameters *bp_params) +{ + enum bp_result result = BP_RESULT_FAILURE; + DAC_LOAD_DETECTION_PS_ALLOCATION params; + + uint8_t misc = 0; + + if (bp_params->device_id == ATOM_DEVICE_CV_SUPPORT || + bp_params->device_id == ATOM_DEVICE_TV1_SUPPORT) + misc = DAC_LOAD_MISC_YPrPb; + + dac_load_detect_prepare_params( + ¶ms, + bp_params->engine_id, + bp_params->device_id, + misc); + + if (EXEC_BIOS_CMD_TABLE(DAC_LoadDetection, params)) + result = BP_RESULT_OK; + + return result; +} + /******************************************************************************* ******************************************************************************** ** diff --git a/drivers/gpu/drm/amd/display/dc/bios/command_table.h b/drivers/gpu/drm/amd/display/dc/bios/command_table.h index ad533775e724..e89b1ba0048b 100644 --- a/drivers/gpu/drm/amd/display/dc/bios/command_table.h +++ b/drivers/gpu/drm/amd/display/dc/bios/command_table.h @@ -52,6 +52,9 @@ struct cmd_tbl { enum bp_result (*adjust_display_pll)( struct bios_parser *bp, struct bp_adjust_pixel_clock_parameters *bp_params); + enum bp_result (*select_crtc_source)( + struct bios_parser *bp, + struct bp_crtc_source_select *bp_params); enum bp_result (*dac1_encoder_control)( struct bios_parser *bp, bool enable, @@ -68,6 +71,9 @@ struct cmd_tbl { enum bp_result (*dac2_output_control)( struct bios_parser *bp, bool enable); + enum bp_result (*dac_load_detection)( + struct bios_parser *bp, + struct bp_load_detection_parameters *bp_params); enum bp_result (*set_crtc_timing)( struct bios_parser *bp, struct bp_hw_crtc_timing_parameters *bp_params); diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c index 9e63fa72101c..db687a13174d 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c @@ -509,16 +509,16 @@ void dcn314_dump_clk_registers(struct clk_state_registers_and_bypass *regs_and_b regs_and_bypass->dtbclk = internal.CLK1_CLK4_CURRENT_CNT / 10; regs_and_bypass->dppclk_bypass = internal.CLK1_CLK1_BYPASS_CNTL & 0x0007; - if (regs_and_bypass->dppclk_bypass < 0 || regs_and_bypass->dppclk_bypass > 4) + if (regs_and_bypass->dppclk_bypass > 4) regs_and_bypass->dppclk_bypass = 0; regs_and_bypass->dcfclk_bypass = internal.CLK1_CLK3_BYPASS_CNTL & 0x0007; - if (regs_and_bypass->dcfclk_bypass < 0 || regs_and_bypass->dcfclk_bypass > 4) + if (regs_and_bypass->dcfclk_bypass > 4) regs_and_bypass->dcfclk_bypass = 0; regs_and_bypass->dispclk_bypass = internal.CLK1_CLK0_BYPASS_CNTL & 0x0007; - if (regs_and_bypass->dispclk_bypass < 0 || regs_and_bypass->dispclk_bypass > 4) + if (regs_and_bypass->dispclk_bypass > 4) regs_and_bypass->dispclk_bypass = 0; regs_and_bypass->dprefclk_bypass = internal.CLK1_CLK2_BYPASS_CNTL & 0x0007; - if (regs_and_bypass->dprefclk_bypass < 0 || regs_and_bypass->dprefclk_bypass > 4) + if (regs_and_bypass->dprefclk_bypass > 4) regs_and_bypass->dprefclk_bypass = 0; } diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.c index b315ed91e010..3a881451e9da 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.c @@ -40,7 +40,7 @@ #include "dm_helpers.h" #include "dc_dmub_srv.h" - +#include "reg_helper.h" #include "logger_types.h" #undef DC_LOGGER #define DC_LOGGER \ @@ -48,9 +48,43 @@ #include "link_service.h" +#define MAX_INSTANCE 7 +#define MAX_SEGMENT 8 + +struct IP_BASE_INSTANCE { + unsigned int segment[MAX_SEGMENT]; +}; + +struct IP_BASE { + struct IP_BASE_INSTANCE instance[MAX_INSTANCE]; +}; + +static const struct IP_BASE CLK_BASE = { { { { 0x00016C00, 0x02401800, 0, 0, 0, 0, 0, 0 } }, + { { 0x00016E00, 0x02401C00, 0, 0, 0, 0, 0, 0 } }, + { { 0x00017000, 0x02402000, 0, 0, 0, 0, 0, 0 } }, + { { 0x00017200, 0x02402400, 0, 0, 0, 0, 0, 0 } }, + { { 0x0001B000, 0x0242D800, 0, 0, 0, 0, 0, 0 } }, + { { 0x0001B200, 0x0242DC00, 0, 0, 0, 0, 0, 0 } } } }; + +#define regCLK1_CLK0_CURRENT_CNT 0x0314 +#define regCLK1_CLK0_CURRENT_CNT_BASE_IDX 0 +#define regCLK1_CLK1_CURRENT_CNT 0x0315 +#define regCLK1_CLK1_CURRENT_CNT_BASE_IDX 0 +#define regCLK1_CLK2_CURRENT_CNT 0x0316 +#define regCLK1_CLK2_CURRENT_CNT_BASE_IDX 0 +#define regCLK1_CLK3_CURRENT_CNT 0x0317 +#define regCLK1_CLK3_CURRENT_CNT_BASE_IDX 0 +#define regCLK1_CLK4_CURRENT_CNT 0x0318 +#define regCLK1_CLK4_CURRENT_CNT_BASE_IDX 0 +#define regCLK1_CLK5_CURRENT_CNT 0x0319 +#define regCLK1_CLK5_CURRENT_CNT_BASE_IDX 0 + #define TO_CLK_MGR_DCN315(clk_mgr)\ container_of(clk_mgr, struct clk_mgr_dcn315, base) +#define REG(reg_name) \ + (CLK_BASE.instance[0].segment[reg ## reg_name ## _BASE_IDX] + reg ## reg_name) + #define UNSUPPORTED_DCFCLK 10000000 #define MIN_DPP_DISP_CLK 100000 @@ -245,9 +279,38 @@ static void dcn315_update_clocks(struct clk_mgr *clk_mgr_base, dc_wake_and_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT); } +static void dcn315_dump_clk_registers_internal(struct dcn35_clk_internal *internal, struct clk_mgr *clk_mgr_base) +{ + struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base); + + // read dtbclk + internal->CLK1_CLK4_CURRENT_CNT = REG_READ(CLK1_CLK4_CURRENT_CNT); + + // read dcfclk + internal->CLK1_CLK3_CURRENT_CNT = REG_READ(CLK1_CLK3_CURRENT_CNT); + + // read dppclk + internal->CLK1_CLK1_CURRENT_CNT = REG_READ(CLK1_CLK1_CURRENT_CNT); + + // read dprefclk + internal->CLK1_CLK2_CURRENT_CNT = REG_READ(CLK1_CLK2_CURRENT_CNT); + + // read dispclk + internal->CLK1_CLK0_CURRENT_CNT = REG_READ(CLK1_CLK0_CURRENT_CNT); +} + static void dcn315_dump_clk_registers(struct clk_state_registers_and_bypass *regs_and_bypass, struct clk_mgr *clk_mgr_base, struct clk_log_info *log_info) { + struct dcn35_clk_internal internal = {0}; + + dcn315_dump_clk_registers_internal(&internal, clk_mgr_base); + + regs_and_bypass->dcfclk = internal.CLK1_CLK3_CURRENT_CNT / 10; + regs_and_bypass->dprefclk = internal.CLK1_CLK2_CURRENT_CNT / 10; + regs_and_bypass->dispclk = internal.CLK1_CLK0_CURRENT_CNT / 10; + regs_and_bypass->dppclk = internal.CLK1_CLK1_CURRENT_CNT / 10; + regs_and_bypass->dtbclk = internal.CLK1_CLK4_CURRENT_CNT / 10; return; } @@ -594,13 +657,32 @@ static struct clk_mgr_funcs dcn315_funcs = { .get_dp_ref_clk_frequency = dce12_get_dp_ref_freq_khz, .get_dtb_ref_clk_frequency = dcn31_get_dtb_ref_freq_khz, .update_clocks = dcn315_update_clocks, - .init_clocks = dcn31_init_clocks, + .init_clocks = dcn315_init_clocks, .enable_pme_wa = dcn315_enable_pme_wa, .are_clock_states_equal = dcn31_are_clock_states_equal, .notify_wm_ranges = dcn315_notify_wm_ranges }; extern struct clk_mgr_funcs dcn3_fpga_funcs; +void dcn315_init_clocks(struct clk_mgr *clk_mgr) +{ + struct clk_mgr_internal *clk_mgr_int = TO_CLK_MGR_INTERNAL(clk_mgr); + uint32_t ref_dtbclk = clk_mgr->clks.ref_dtbclk_khz; + struct clk_mgr_dcn315 *clk_mgr_dcn315 = TO_CLK_MGR_DCN315(clk_mgr_int); + struct clk_log_info log_info = {0}; + + memset(&(clk_mgr->clks), 0, sizeof(struct dc_clocks)); + // Assumption is that boot state always supports pstate + clk_mgr->clks.ref_dtbclk_khz = ref_dtbclk; // restore ref_dtbclk + clk_mgr->clks.p_state_change_support = true; + clk_mgr->clks.prev_p_state_change_support = true; + clk_mgr->clks.pwr_state = DCN_PWR_STATE_UNKNOWN; + clk_mgr->clks.zstate_support = DCN_ZSTATE_SUPPORT_UNKNOWN; + + dcn315_dump_clk_registers(&clk_mgr->boot_snapshot, &clk_mgr_dcn315->base.base, &log_info); + clk_mgr->clks.dispclk_khz = clk_mgr->boot_snapshot.dispclk * 1000; +} + void dcn315_clk_mgr_construct( struct dc_context *ctx, struct clk_mgr_dcn315 *clk_mgr, @@ -661,6 +743,7 @@ void dcn315_clk_mgr_construct( /* Saved clocks configured at boot for debug purposes */ dcn315_dump_clk_registers(&clk_mgr->base.base.boot_snapshot, &clk_mgr->base.base, &log_info); + clk_mgr->base.base.clks.dispclk_khz = clk_mgr->base.base.boot_snapshot.dispclk * 1000; clk_mgr->base.base.dprefclk_khz = 600000; clk_mgr->base.base.dprefclk_khz = dcn315_smu_get_dpref_clk(&clk_mgr->base); diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.h b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.h index ac36ddf5dd1a..642ae3d4a790 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.h +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.h @@ -44,6 +44,7 @@ void dcn315_clk_mgr_construct(struct dc_context *ctx, struct pp_smu_funcs *pp_smu, struct dccg *dccg); +void dcn315_init_clocks(struct clk_mgr *clk_mgr); void dcn315_clk_mgr_destroy(struct clk_mgr_internal *clk_mgr_int); #endif //__DCN315_CLK_MGR_H__ diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c index 1eb04772f5da..dfd0c9505af0 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c @@ -635,16 +635,16 @@ static void dcn35_save_clk_registers(struct clk_state_registers_and_bypass *regs regs_and_bypass->dtbclk = internal.CLK1_CLK4_CURRENT_CNT / 10; regs_and_bypass->dppclk_bypass = internal.CLK1_CLK1_BYPASS_CNTL & 0x0007; - if (regs_and_bypass->dppclk_bypass < 0 || regs_and_bypass->dppclk_bypass > 4) + if (regs_and_bypass->dppclk_bypass > 4) regs_and_bypass->dppclk_bypass = 0; regs_and_bypass->dcfclk_bypass = internal.CLK1_CLK3_BYPASS_CNTL & 0x0007; - if (regs_and_bypass->dcfclk_bypass < 0 || regs_and_bypass->dcfclk_bypass > 4) + if (regs_and_bypass->dcfclk_bypass > 4) regs_and_bypass->dcfclk_bypass = 0; regs_and_bypass->dispclk_bypass = internal.CLK1_CLK0_BYPASS_CNTL & 0x0007; - if (regs_and_bypass->dispclk_bypass < 0 || regs_and_bypass->dispclk_bypass > 4) + if (regs_and_bypass->dispclk_bypass > 4) regs_and_bypass->dispclk_bypass = 0; regs_and_bypass->dprefclk_bypass = internal.CLK1_CLK2_BYPASS_CNTL & 0x0007; - if (regs_and_bypass->dprefclk_bypass < 0 || regs_and_bypass->dprefclk_bypass > 4) + if (regs_and_bypass->dprefclk_bypass > 4) regs_and_bypass->dprefclk_bypass = 0; if (clk_mgr->base.base.ctx->dc->debug.pstate_enabled) { @@ -1295,6 +1295,35 @@ static void dcn35_update_clocks_fpga(struct clk_mgr *clk_mgr, dcn35_update_clocks_update_dtb_dto(clk_mgr_int, context, clk_mgr->clks.ref_dtbclk_khz); } +static unsigned int dcn35_get_max_clock_khz(struct clk_mgr *clk_mgr_base, enum clk_type clk_type) +{ + struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base); + + unsigned int num_clk_levels; + + switch (clk_type) { + case CLK_TYPE_DISPCLK: + num_clk_levels = clk_mgr->base.bw_params->clk_table.num_entries_per_clk.num_dispclk_levels; + return num_clk_levels ? + clk_mgr->base.bw_params->clk_table.entries[num_clk_levels - 1].dispclk_mhz * 1000 : + clk_mgr->base.boot_snapshot.dispclk; + case CLK_TYPE_DPPCLK: + num_clk_levels = clk_mgr->base.bw_params->clk_table.num_entries_per_clk.num_dppclk_levels; + return num_clk_levels ? + clk_mgr->base.bw_params->clk_table.entries[num_clk_levels - 1].dppclk_mhz * 1000 : + clk_mgr->base.boot_snapshot.dppclk; + case CLK_TYPE_DSCCLK: + num_clk_levels = clk_mgr->base.bw_params->clk_table.num_entries_per_clk.num_dispclk_levels; + return num_clk_levels ? + clk_mgr->base.bw_params->clk_table.entries[num_clk_levels - 1].dispclk_mhz * 1000 / 3 : + clk_mgr->base.boot_snapshot.dispclk / 3; + default: + break; + } + + return 0; +} + static struct clk_mgr_funcs dcn35_funcs = { .get_dp_ref_clk_frequency = dce12_get_dp_ref_freq_khz, .get_dtb_ref_clk_frequency = dcn31_get_dtb_ref_freq_khz, @@ -1306,6 +1335,7 @@ static struct clk_mgr_funcs dcn35_funcs = { .set_low_power_state = dcn35_set_low_power_state, .exit_low_power_state = dcn35_exit_low_power_state, .is_ips_supported = dcn35_is_ips_supported, + .get_max_clock_khz = dcn35_get_max_clock_khz, }; struct clk_mgr_funcs dcn35_fpga_funcs = { diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c index 5f2d5638c819..8be9cbd43e18 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc.c @@ -83,7 +83,7 @@ #include "hw_sequencer_private.h" #if defined(CONFIG_DRM_AMD_DC_FP) -#include "dml2/dml2_internal_types.h" +#include "dml2_0/dml2_internal_types.h" #include "soc_and_ip_translator.h" #endif @@ -148,10 +148,16 @@ static const char DC_BUILD_ID[] = "production-build"; /* Private functions */ -static inline void elevate_update_type(enum surface_update_type *original, enum surface_update_type new) +static inline void elevate_update_type( + struct surface_update_descriptor *descriptor, + enum surface_update_type new_type, + enum dc_lock_descriptor new_locks +) { - if (new > *original) - *original = new; + if (new_type > descriptor->update_type) + descriptor->update_type = new_type; + + descriptor->lock_descriptor |= new_locks; } static void destroy_links(struct dc *dc) @@ -297,6 +303,7 @@ static bool create_links( link->link_id.id = CONNECTOR_ID_VIRTUAL; link->link_id.enum_id = ENUM_ID_1; link->psr_settings.psr_version = DC_PSR_VERSION_UNSUPPORTED; + link->replay_settings.config.replay_version = DC_REPLAY_VERSION_UNSUPPORTED; link->link_enc = kzalloc(sizeof(*link->link_enc), GFP_KERNEL); if (!link->link_enc) { @@ -493,9 +500,14 @@ bool dc_stream_adjust_vmin_vmax(struct dc *dc, 1, *adjust); stream->adjust.timing_adjust_pending = false; + + if (dc->hwss.notify_cursor_offload_drr_update) + dc->hwss.notify_cursor_offload_drr_update(dc, dc->current_state, stream); + return true; } } + return false; } @@ -1143,8 +1155,8 @@ static bool dc_construct(struct dc *dc, /* set i2c speed if not done by the respective dcnxxx__resource.c */ if (dc->caps.i2c_speed_in_khz_hdcp == 0) dc->caps.i2c_speed_in_khz_hdcp = dc->caps.i2c_speed_in_khz; - if (dc->caps.max_optimizable_video_width == 0) - dc->caps.max_optimizable_video_width = 5120; + if (dc->check_config.max_optimizable_video_width == 0) + dc->check_config.max_optimizable_video_width = 5120; dc->clk_mgr = dc_clk_mgr_create(dc->ctx, dc->res_pool->pp_smu, dc->res_pool->dccg); if (!dc->clk_mgr) goto fail; @@ -2135,6 +2147,14 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c if (!dcb->funcs->is_accelerated_mode(dcb)) { disable_vbios_mode_if_required(dc, context); dc->hwss.enable_accelerated_mode(dc, context); + } else if (get_seamless_boot_stream_count(dc->current_state) > 0) { + /* If the previous Stream still retains the apply seamless boot flag, + * it means the OS has not actually performed a flip yet. + * At this point, if we receive dc_commit_streams again, we should + * once more check whether the actual HW timing matches what the OS + * has provided + */ + disable_vbios_mode_if_required(dc, context); } if (dc->hwseq->funcs.wait_for_pipe_update_if_needed) { @@ -2158,8 +2178,8 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c */ if (dc->hwss.subvp_pipe_control_lock) dc->hwss.subvp_pipe_control_lock(dc, context, true, true, NULL, subvp_prev_use); - if (dc->hwss.fams2_global_control_lock) - dc->hwss.fams2_global_control_lock(dc, context, true); + if (dc->hwss.dmub_hw_control_lock) + dc->hwss.dmub_hw_control_lock(dc, context, true); if (dc->hwss.update_dsc_pg) dc->hwss.update_dsc_pg(dc, context, false); @@ -2188,8 +2208,14 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c dc->hwss.wait_for_mpcc_disconnect(dc, dc->res_pool, pipe); } + for (i = 0; i < dc->current_state->stream_count; i++) + dc_dmub_srv_control_cursor_offload(dc, dc->current_state, dc->current_state->streams[i], false); + result = dc->hwss.apply_ctx_to_hw(dc, context); + for (i = 0; i < context->stream_count; i++) + dc_dmub_srv_control_cursor_offload(dc, context, context->streams[i], true); + if (result != DC_OK) { /* Application of dc_state to hardware stopped. */ dc->current_state->res_ctx.link_enc_cfg_ctx.mode = LINK_ENC_CFG_STEADY; @@ -2229,8 +2255,8 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c dc->hwss.commit_subvp_config(dc, context); if (dc->hwss.subvp_pipe_control_lock) dc->hwss.subvp_pipe_control_lock(dc, context, false, true, NULL, subvp_prev_use); - if (dc->hwss.fams2_global_control_lock) - dc->hwss.fams2_global_control_lock(dc, context, false); + if (dc->hwss.dmub_hw_control_lock) + dc->hwss.dmub_hw_control_lock(dc, context, false); for (i = 0; i < context->stream_count; i++) { const struct dc_link *link = context->streams[i]->link; @@ -2645,47 +2671,50 @@ static bool is_surface_in_context( return false; } -static enum surface_update_type get_plane_info_update_type(const struct dc *dc, const struct dc_surface_update *u) +static struct surface_update_descriptor get_plane_info_update_type(const struct dc_surface_update *u) { union surface_update_flags *update_flags = &u->surface->update_flags; - enum surface_update_type update_type = UPDATE_TYPE_FAST; + struct surface_update_descriptor update_type = { UPDATE_TYPE_FAST, LOCK_DESCRIPTOR_NONE }; if (!u->plane_info) - return UPDATE_TYPE_FAST; + return update_type; + + // `plane_info` present means at least `STREAM` lock is required + elevate_update_type(&update_type, UPDATE_TYPE_FAST, LOCK_DESCRIPTOR_STREAM); if (u->plane_info->color_space != u->surface->color_space) { update_flags->bits.color_space_change = 1; - elevate_update_type(&update_type, UPDATE_TYPE_MED); + elevate_update_type(&update_type, UPDATE_TYPE_MED, LOCK_DESCRIPTOR_STREAM); } if (u->plane_info->horizontal_mirror != u->surface->horizontal_mirror) { update_flags->bits.horizontal_mirror_change = 1; - elevate_update_type(&update_type, UPDATE_TYPE_MED); + elevate_update_type(&update_type, UPDATE_TYPE_MED, LOCK_DESCRIPTOR_STREAM); } if (u->plane_info->rotation != u->surface->rotation) { update_flags->bits.rotation_change = 1; - elevate_update_type(&update_type, UPDATE_TYPE_FULL); + elevate_update_type(&update_type, UPDATE_TYPE_FULL, LOCK_DESCRIPTOR_GLOBAL); } if (u->plane_info->format != u->surface->format) { update_flags->bits.pixel_format_change = 1; - elevate_update_type(&update_type, UPDATE_TYPE_FULL); + elevate_update_type(&update_type, UPDATE_TYPE_FULL, LOCK_DESCRIPTOR_GLOBAL); } if (u->plane_info->stereo_format != u->surface->stereo_format) { update_flags->bits.stereo_format_change = 1; - elevate_update_type(&update_type, UPDATE_TYPE_FULL); + elevate_update_type(&update_type, UPDATE_TYPE_FULL, LOCK_DESCRIPTOR_GLOBAL); } if (u->plane_info->per_pixel_alpha != u->surface->per_pixel_alpha) { update_flags->bits.per_pixel_alpha_change = 1; - elevate_update_type(&update_type, UPDATE_TYPE_MED); + elevate_update_type(&update_type, UPDATE_TYPE_MED, LOCK_DESCRIPTOR_STREAM); } if (u->plane_info->global_alpha_value != u->surface->global_alpha_value) { update_flags->bits.global_alpha_change = 1; - elevate_update_type(&update_type, UPDATE_TYPE_MED); + elevate_update_type(&update_type, UPDATE_TYPE_MED, LOCK_DESCRIPTOR_STREAM); } if (u->plane_info->dcc.enable != u->surface->dcc.enable @@ -2697,7 +2726,7 @@ static enum surface_update_type get_plane_info_update_type(const struct dc *dc, * recalculate stutter period. */ update_flags->bits.dcc_change = 1; - elevate_update_type(&update_type, UPDATE_TYPE_FULL); + elevate_update_type(&update_type, UPDATE_TYPE_FULL, LOCK_DESCRIPTOR_GLOBAL); } if (resource_pixel_format_to_bpp(u->plane_info->format) != @@ -2706,30 +2735,41 @@ static enum surface_update_type get_plane_info_update_type(const struct dc *dc, * and DML calculation */ update_flags->bits.bpp_change = 1; - elevate_update_type(&update_type, UPDATE_TYPE_FULL); + elevate_update_type(&update_type, UPDATE_TYPE_FULL, LOCK_DESCRIPTOR_GLOBAL); } if (u->plane_info->plane_size.surface_pitch != u->surface->plane_size.surface_pitch || u->plane_info->plane_size.chroma_pitch != u->surface->plane_size.chroma_pitch) { update_flags->bits.plane_size_change = 1; - elevate_update_type(&update_type, UPDATE_TYPE_MED); + elevate_update_type(&update_type, UPDATE_TYPE_MED, LOCK_DESCRIPTOR_STREAM); } + const struct dc_tiling_info *tiling = &u->plane_info->tiling_info; - if (memcmp(&u->plane_info->tiling_info, &u->surface->tiling_info, - sizeof(struct dc_tiling_info)) != 0) { + if (memcmp(tiling, &u->surface->tiling_info, sizeof(*tiling)) != 0) { update_flags->bits.swizzle_change = 1; - elevate_update_type(&update_type, UPDATE_TYPE_MED); + elevate_update_type(&update_type, UPDATE_TYPE_MED, LOCK_DESCRIPTOR_STREAM); - /* todo: below are HW dependent, we should add a hook to - * DCE/N resource and validated there. - */ - if (!dc->debug.skip_full_updated_if_possible) { - /* swizzled mode requires RQ to be setup properly, - * thus need to run DML to calculate RQ settings - */ - update_flags->bits.bandwidth_change = 1; - elevate_update_type(&update_type, UPDATE_TYPE_FULL); + switch (tiling->gfxversion) { + case DcGfxVersion9: + case DcGfxVersion10: + case DcGfxVersion11: + if (tiling->gfx9.swizzle != DC_SW_LINEAR) { + update_flags->bits.bandwidth_change = 1; + elevate_update_type(&update_type, UPDATE_TYPE_FULL, LOCK_DESCRIPTOR_GLOBAL); + } + break; + case DcGfxAddr3: + if (tiling->gfx_addr3.swizzle != DC_ADDR3_SW_LINEAR) { + update_flags->bits.bandwidth_change = 1; + elevate_update_type(&update_type, UPDATE_TYPE_FULL, LOCK_DESCRIPTOR_GLOBAL); + } + break; + case DcGfxVersion7: + case DcGfxVersion8: + case DcGfxVersionUnknown: + default: + break; } } @@ -2737,14 +2777,18 @@ static enum surface_update_type get_plane_info_update_type(const struct dc *dc, return update_type; } -static enum surface_update_type get_scaling_info_update_type( - const struct dc *dc, +static struct surface_update_descriptor get_scaling_info_update_type( + const struct dc_check_config *check_config, const struct dc_surface_update *u) { union surface_update_flags *update_flags = &u->surface->update_flags; + struct surface_update_descriptor update_type = { UPDATE_TYPE_FAST, LOCK_DESCRIPTOR_NONE }; if (!u->scaling_info) - return UPDATE_TYPE_FAST; + return update_type; + + // `scaling_info` present means at least `STREAM` lock is required + elevate_update_type(&update_type, UPDATE_TYPE_FAST, LOCK_DESCRIPTOR_STREAM); if (u->scaling_info->src_rect.width != u->surface->src_rect.width || u->scaling_info->src_rect.height != u->surface->src_rect.height @@ -2755,6 +2799,7 @@ static enum surface_update_type get_scaling_info_update_type( || u->scaling_info->scaling_quality.integer_scaling != u->surface->scaling_quality.integer_scaling) { update_flags->bits.scaling_change = 1; + elevate_update_type(&update_type, UPDATE_TYPE_FULL, LOCK_DESCRIPTOR_GLOBAL); if (u->scaling_info->src_rect.width > u->surface->src_rect.width || u->scaling_info->src_rect.height > u->surface->src_rect.height) @@ -2768,7 +2813,7 @@ static enum surface_update_type get_scaling_info_update_type( /* Making dst rect smaller requires a bandwidth change */ update_flags->bits.bandwidth_change = 1; - if (u->scaling_info->src_rect.width > dc->caps.max_optimizable_video_width && + if (u->scaling_info->src_rect.width > check_config->max_optimizable_video_width && (u->scaling_info->clip_rect.width > u->surface->clip_rect.width || u->scaling_info->clip_rect.height > u->surface->clip_rect.height)) /* Changing clip size of a large surface may result in MPC slice count change */ @@ -2780,123 +2825,109 @@ static enum surface_update_type get_scaling_info_update_type( || u->scaling_info->clip_rect.x != u->surface->clip_rect.x || u->scaling_info->clip_rect.y != u->surface->clip_rect.y || u->scaling_info->dst_rect.x != u->surface->dst_rect.x - || u->scaling_info->dst_rect.y != u->surface->dst_rect.y) + || u->scaling_info->dst_rect.y != u->surface->dst_rect.y) { + elevate_update_type(&update_type, UPDATE_TYPE_MED, LOCK_DESCRIPTOR_STREAM); update_flags->bits.position_change = 1; + } - /* process every update flag before returning */ - if (update_flags->bits.clock_change - || update_flags->bits.bandwidth_change - || update_flags->bits.scaling_change) - return UPDATE_TYPE_FULL; - - if (update_flags->bits.position_change) - return UPDATE_TYPE_MED; - - return UPDATE_TYPE_FAST; + return update_type; } -static enum surface_update_type det_surface_update(const struct dc *dc, - const struct dc_surface_update *u) +static struct surface_update_descriptor det_surface_update( + const struct dc_check_config *check_config, + struct dc_surface_update *u) { - const struct dc_state *context = dc->current_state; - enum surface_update_type type; - enum surface_update_type overall_type = UPDATE_TYPE_FAST; + struct surface_update_descriptor overall_type = { UPDATE_TYPE_FAST, LOCK_DESCRIPTOR_NONE }; union surface_update_flags *update_flags = &u->surface->update_flags; - if (!is_surface_in_context(context, u->surface) || u->surface->force_full_update) { + if (u->surface->force_full_update) { update_flags->raw = 0xFFFFFFFF; - return UPDATE_TYPE_FULL; + elevate_update_type(&overall_type, UPDATE_TYPE_FULL, LOCK_DESCRIPTOR_GLOBAL); + return overall_type; } update_flags->raw = 0; // Reset all flags - type = get_plane_info_update_type(dc, u); - elevate_update_type(&overall_type, type); + struct surface_update_descriptor inner_type = get_plane_info_update_type(u); - type = get_scaling_info_update_type(dc, u); - elevate_update_type(&overall_type, type); + elevate_update_type(&overall_type, inner_type.update_type, inner_type.lock_descriptor); + + inner_type = get_scaling_info_update_type(check_config, u); + elevate_update_type(&overall_type, inner_type.update_type, inner_type.lock_descriptor); if (u->flip_addr) { update_flags->bits.addr_update = 1; + elevate_update_type(&overall_type, UPDATE_TYPE_FAST, LOCK_DESCRIPTOR_STREAM); + if (u->flip_addr->address.tmz_surface != u->surface->address.tmz_surface) { update_flags->bits.tmz_changed = 1; - elevate_update_type(&overall_type, UPDATE_TYPE_FULL); + elevate_update_type(&overall_type, UPDATE_TYPE_FULL, LOCK_DESCRIPTOR_GLOBAL); } } - if (u->in_transfer_func) + if (u->in_transfer_func) { update_flags->bits.in_transfer_func_change = 1; - - if (u->input_csc_color_matrix) - update_flags->bits.input_csc_change = 1; - - if (u->coeff_reduction_factor) - update_flags->bits.coeff_reduction_change = 1; - - if (u->gamut_remap_matrix) - update_flags->bits.gamut_remap_change = 1; - - if (u->blend_tf) - update_flags->bits.gamma_change = 1; - - if (u->gamma) { - enum surface_pixel_format format = SURFACE_PIXEL_FORMAT_GRPH_BEGIN; - - if (u->plane_info) - format = u->plane_info->format; - else - format = u->surface->format; - - if (dce_use_lut(format)) - update_flags->bits.gamma_change = 1; + elevate_update_type(&overall_type, UPDATE_TYPE_MED, LOCK_DESCRIPTOR_STREAM); } - if (u->lut3d_func || u->func_shaper) + if (u->input_csc_color_matrix) { + update_flags->bits.input_csc_change = 1; + elevate_update_type(&overall_type, UPDATE_TYPE_FAST, LOCK_DESCRIPTOR_STREAM); + } + + if (u->coeff_reduction_factor) { + update_flags->bits.coeff_reduction_change = 1; + elevate_update_type(&overall_type, UPDATE_TYPE_FAST, LOCK_DESCRIPTOR_STREAM); + } + + if (u->gamut_remap_matrix) { + update_flags->bits.gamut_remap_change = 1; + elevate_update_type(&overall_type, UPDATE_TYPE_FAST, LOCK_DESCRIPTOR_STREAM); + } + + if (u->blend_tf || (u->gamma && dce_use_lut(u->plane_info ? u->plane_info->format : u->surface->format))) { + update_flags->bits.gamma_change = 1; + elevate_update_type(&overall_type, UPDATE_TYPE_FAST, LOCK_DESCRIPTOR_STREAM); + } + + if (u->lut3d_func || u->func_shaper) { update_flags->bits.lut_3d = 1; + elevate_update_type(&overall_type, UPDATE_TYPE_FAST, LOCK_DESCRIPTOR_STREAM); + } if (u->hdr_mult.value) if (u->hdr_mult.value != u->surface->hdr_mult.value) { + // TODO: Should be fast? update_flags->bits.hdr_mult = 1; - elevate_update_type(&overall_type, UPDATE_TYPE_MED); + elevate_update_type(&overall_type, UPDATE_TYPE_MED, LOCK_DESCRIPTOR_STREAM); } if (u->sdr_white_level_nits) if (u->sdr_white_level_nits != u->surface->sdr_white_level_nits) { + // TODO: Should be fast? update_flags->bits.sdr_white_level_nits = 1; - elevate_update_type(&overall_type, UPDATE_TYPE_FULL); + elevate_update_type(&overall_type, UPDATE_TYPE_FULL, LOCK_DESCRIPTOR_GLOBAL); } if (u->cm2_params) { - if ((u->cm2_params->component_settings.shaper_3dlut_setting - != u->surface->mcm_shaper_3dlut_setting) - || (u->cm2_params->component_settings.lut1d_enable - != u->surface->mcm_lut1d_enable)) + if (u->cm2_params->component_settings.shaper_3dlut_setting != u->surface->mcm_shaper_3dlut_setting + || u->cm2_params->component_settings.lut1d_enable != u->surface->mcm_lut1d_enable + || u->cm2_params->cm2_luts.lut3d_data.lut3d_src != u->surface->mcm_luts.lut3d_data.lut3d_src) { update_flags->bits.mcm_transfer_function_enable_change = 1; - if (u->cm2_params->cm2_luts.lut3d_data.lut3d_src - != u->surface->mcm_luts.lut3d_data.lut3d_src) - update_flags->bits.mcm_transfer_function_enable_change = 1; - } - if (update_flags->bits.in_transfer_func_change) { - type = UPDATE_TYPE_MED; - elevate_update_type(&overall_type, type); + elevate_update_type(&overall_type, UPDATE_TYPE_FULL, LOCK_DESCRIPTOR_GLOBAL); + } } if (update_flags->bits.lut_3d && u->surface->mcm_luts.lut3d_data.lut3d_src != DC_CM2_TRANSFER_FUNC_SOURCE_VIDMEM) { - type = UPDATE_TYPE_FULL; - elevate_update_type(&overall_type, type); - } - if (update_flags->bits.mcm_transfer_function_enable_change) { - type = UPDATE_TYPE_FULL; - elevate_update_type(&overall_type, type); + elevate_update_type(&overall_type, UPDATE_TYPE_FULL, LOCK_DESCRIPTOR_GLOBAL); } - if (dc->debug.enable_legacy_fast_update && + if (check_config->enable_legacy_fast_update && (update_flags->bits.gamma_change || update_flags->bits.gamut_remap_change || update_flags->bits.input_csc_change || update_flags->bits.coeff_reduction_change)) { - type = UPDATE_TYPE_FULL; - elevate_update_type(&overall_type, type); + elevate_update_type(&overall_type, UPDATE_TYPE_FULL, LOCK_DESCRIPTOR_GLOBAL); } return overall_type; } @@ -2924,32 +2955,26 @@ static void force_immediate_gsl_plane_flip(struct dc *dc, struct dc_surface_upda } } -static enum surface_update_type check_update_surfaces_for_stream( - struct dc *dc, +static struct surface_update_descriptor check_update_surfaces_for_stream( + const struct dc_check_config *check_config, struct dc_surface_update *updates, int surface_count, - struct dc_stream_update *stream_update, - const struct dc_stream_status *stream_status) + struct dc_stream_update *stream_update) { - int i; - enum surface_update_type overall_type = UPDATE_TYPE_FAST; - - if (dc->idle_optimizations_allowed || dc_can_clear_cursor_limit(dc)) - overall_type = UPDATE_TYPE_FULL; - - if (stream_status == NULL || stream_status->plane_count != surface_count) - overall_type = UPDATE_TYPE_FULL; + struct surface_update_descriptor overall_type = { UPDATE_TYPE_FAST, LOCK_DESCRIPTOR_NONE }; if (stream_update && stream_update->pending_test_pattern) { - overall_type = UPDATE_TYPE_FULL; + elevate_update_type(&overall_type, UPDATE_TYPE_FULL, LOCK_DESCRIPTOR_GLOBAL); } if (stream_update && stream_update->hw_cursor_req) { - overall_type = UPDATE_TYPE_FULL; + elevate_update_type(&overall_type, UPDATE_TYPE_FULL, LOCK_DESCRIPTOR_GLOBAL); } /* some stream updates require passive update */ if (stream_update) { + elevate_update_type(&overall_type, UPDATE_TYPE_FAST, LOCK_DESCRIPTOR_STREAM); + union stream_update_flags *su_flags = &stream_update->stream->update_flags; if ((stream_update->src.height != 0 && stream_update->src.width != 0) || @@ -2957,14 +2982,16 @@ static enum surface_update_type check_update_surfaces_for_stream( stream_update->integer_scaling_update) su_flags->bits.scaling = 1; - if (dc->debug.enable_legacy_fast_update && stream_update->out_transfer_func) + if (check_config->enable_legacy_fast_update && stream_update->out_transfer_func) su_flags->bits.out_tf = 1; if (stream_update->abm_level) su_flags->bits.abm_level = 1; - if (stream_update->dpms_off) + if (stream_update->dpms_off) { su_flags->bits.dpms_off = 1; + elevate_update_type(&overall_type, UPDATE_TYPE_FULL, LOCK_DESCRIPTOR_GLOBAL | LOCK_DESCRIPTOR_LINK); + } if (stream_update->gamut_remap) su_flags->bits.gamut_remap = 1; @@ -2992,24 +3019,27 @@ static enum surface_update_type check_update_surfaces_for_stream( if (stream_update->output_color_space) su_flags->bits.out_csc = 1; - if (su_flags->raw != 0) - overall_type = UPDATE_TYPE_FULL; + // TODO: Make each elevation explicit, as to not override fast stream in crct_timing_adjust + if (su_flags->raw) + elevate_update_type(&overall_type, UPDATE_TYPE_FULL, LOCK_DESCRIPTOR_GLOBAL); - if (stream_update->output_csc_transform) + // Non-global cases + if (stream_update->output_csc_transform) { su_flags->bits.out_csc = 1; + elevate_update_type(&overall_type, UPDATE_TYPE_FAST, LOCK_DESCRIPTOR_STREAM); + } - /* Output transfer function changes do not require bandwidth recalculation, - * so don't trigger a full update - */ - if (!dc->debug.enable_legacy_fast_update && stream_update->out_transfer_func) + if (!check_config->enable_legacy_fast_update && stream_update->out_transfer_func) { su_flags->bits.out_tf = 1; + elevate_update_type(&overall_type, UPDATE_TYPE_FAST, LOCK_DESCRIPTOR_STREAM); + } } - for (i = 0 ; i < surface_count; i++) { - enum surface_update_type type = - det_surface_update(dc, &updates[i]); + for (int i = 0 ; i < surface_count; i++) { + struct surface_update_descriptor inner_type = + det_surface_update(check_config, &updates[i]); - elevate_update_type(&overall_type, type); + elevate_update_type(&overall_type, inner_type.update_type, inner_type.lock_descriptor); } return overall_type; @@ -3020,44 +3050,18 @@ static enum surface_update_type check_update_surfaces_for_stream( * * See :c:type:`enum surface_update_type ` for explanation of update types */ -enum surface_update_type dc_check_update_surfaces_for_stream( - struct dc *dc, +struct surface_update_descriptor dc_check_update_surfaces_for_stream( + const struct dc_check_config *check_config, struct dc_surface_update *updates, int surface_count, - struct dc_stream_update *stream_update, - const struct dc_stream_status *stream_status) + struct dc_stream_update *stream_update) { - int i; - enum surface_update_type type; - if (stream_update) stream_update->stream->update_flags.raw = 0; - for (i = 0; i < surface_count; i++) + for (size_t i = 0; i < surface_count; i++) updates[i].surface->update_flags.raw = 0; - type = check_update_surfaces_for_stream(dc, updates, surface_count, stream_update, stream_status); - if (type == UPDATE_TYPE_FULL) { - if (stream_update) { - uint32_t dsc_changed = stream_update->stream->update_flags.bits.dsc_changed; - stream_update->stream->update_flags.raw = 0xFFFFFFFF; - stream_update->stream->update_flags.bits.dsc_changed = dsc_changed; - } - for (i = 0; i < surface_count; i++) - updates[i].surface->update_flags.raw = 0xFFFFFFFF; - } - - if (type == UPDATE_TYPE_FAST) { - // If there's an available clock comparator, we use that. - if (dc->clk_mgr->funcs->are_clock_states_equal) { - if (!dc->clk_mgr->funcs->are_clock_states_equal(&dc->clk_mgr->clks, &dc->current_state->bw_ctx.bw.dcn.clk)) - dc->optimized_required = true; - // Else we fallback to mem compare. - } else if (memcmp(&dc->current_state->bw_ctx.bw.dcn.clk, &dc->clk_mgr->clks, offsetof(struct dc_clocks, prev_p_state_change_support)) != 0) { - dc->optimized_required = true; - } - } - - return type; + return check_update_surfaces_for_stream(check_config, updates, surface_count, stream_update); } static struct dc_stream_status *stream_get_status( @@ -3388,7 +3392,11 @@ static void restore_planes_and_stream_state( for (i = 0; i < status->plane_count; i++) { dc_plane_copy_config(status->plane_states[i], &scratch->plane_states[i]); } + + // refcount is persistent + struct kref temp_refcount = stream->refcount; *stream = scratch->stream_state; + stream->refcount = temp_refcount; } /** @@ -3426,6 +3434,13 @@ static void update_seamless_boot_flags(struct dc *dc, } } +static bool full_update_required_weak( + const struct dc *dc, + const struct dc_surface_update *srf_updates, + int surface_count, + const struct dc_stream_update *stream_update, + const struct dc_stream_state *stream); + /** * update_planes_and_stream_state() - The function takes planes and stream * updates as inputs and determines the appropriate update type. If update type @@ -3472,7 +3487,10 @@ static bool update_planes_and_stream_state(struct dc *dc, context = dc->current_state; update_type = dc_check_update_surfaces_for_stream( - dc, srf_updates, surface_count, stream_update, stream_status); + &dc->check_config, srf_updates, surface_count, stream_update).update_type; + if (full_update_required_weak(dc, srf_updates, surface_count, stream_update, stream)) + update_type = UPDATE_TYPE_FULL; + /* It is possible to receive a flip for one plane while there are multiple flip_immediate planes in the same stream. * E.g. Desktop and MPO plane are flip_immediate but only the MPO plane received a flip * Force the other flip_immediate planes to flip so GSL doesn't wait for a flip that won't come. @@ -3504,6 +3522,16 @@ static bool update_planes_and_stream_state(struct dc *dc, } } + if (update_type == UPDATE_TYPE_FULL) { + if (stream_update) { + uint32_t dsc_changed = stream_update->stream->update_flags.bits.dsc_changed; + stream_update->stream->update_flags.raw = 0xFFFFFFFF; + stream_update->stream->update_flags.bits.dsc_changed = dsc_changed; + } + for (i = 0; i < surface_count; i++) + srf_updates[i].surface->update_flags.raw = 0xFFFFFFFF; + } + if (update_type >= update_surface_trace_level) update_surface_trace(dc, srf_updates, surface_count); @@ -4149,7 +4177,7 @@ static void commit_planes_for_stream(struct dc *dc, if ((update_type != UPDATE_TYPE_FAST) && stream->update_flags.bits.dsc_changed) if (top_pipe_to_program && top_pipe_to_program->stream_res.tg->funcs->lock_doublebuffer_enable) { - if (should_use_dmub_lock(stream->link)) { + if (should_use_dmub_inbox1_lock(dc, stream->link)) { union dmub_hw_lock_flags hw_locks = { 0 }; struct dmub_hw_lock_inst_flags inst_flags = { 0 }; @@ -4176,16 +4204,16 @@ static void commit_planes_for_stream(struct dc *dc, if (dc->hwss.subvp_pipe_control_lock) dc->hwss.subvp_pipe_control_lock(dc, context, true, should_lock_all_pipes, NULL, subvp_prev_use); - if (dc->hwss.fams2_global_control_lock) - dc->hwss.fams2_global_control_lock(dc, context, true); + if (dc->hwss.dmub_hw_control_lock) + dc->hwss.dmub_hw_control_lock(dc, context, true); dc->hwss.interdependent_update_lock(dc, context, true); } else { if (dc->hwss.subvp_pipe_control_lock) dc->hwss.subvp_pipe_control_lock(dc, context, true, should_lock_all_pipes, top_pipe_to_program, subvp_prev_use); - if (dc->hwss.fams2_global_control_lock) - dc->hwss.fams2_global_control_lock(dc, context, true); + if (dc->hwss.dmub_hw_control_lock) + dc->hwss.dmub_hw_control_lock(dc, context, true); /* Lock the top pipe while updating plane addrs, since freesync requires * plane addr update event triggers to be synchronized. @@ -4228,9 +4256,8 @@ static void commit_planes_for_stream(struct dc *dc, dc->hwss.subvp_pipe_control_lock(dc, context, false, should_lock_all_pipes, NULL, subvp_prev_use); - if (dc->hwss.fams2_global_control_lock) - dc->hwss.fams2_global_control_lock(dc, context, false); - + if (dc->hwss.dmub_hw_control_lock) + dc->hwss.dmub_hw_control_lock(dc, context, false); return; } @@ -4419,7 +4446,7 @@ static void commit_planes_for_stream(struct dc *dc, top_pipe_to_program->stream_res.tg, CRTC_STATE_VACTIVE); - if (should_use_dmub_lock(stream->link)) { + if (should_use_dmub_inbox1_lock(dc, stream->link)) { union dmub_hw_lock_flags hw_locks = { 0 }; struct dmub_hw_lock_inst_flags inst_flags = { 0 }; @@ -4467,13 +4494,13 @@ static void commit_planes_for_stream(struct dc *dc, if (should_lock_all_pipes && dc->hwss.interdependent_update_lock) { if (dc->hwss.subvp_pipe_control_lock) dc->hwss.subvp_pipe_control_lock(dc, context, false, should_lock_all_pipes, NULL, subvp_prev_use); - if (dc->hwss.fams2_global_control_lock) - dc->hwss.fams2_global_control_lock(dc, context, false); + if (dc->hwss.dmub_hw_control_lock) + dc->hwss.dmub_hw_control_lock(dc, context, false); } else { if (dc->hwss.subvp_pipe_control_lock) dc->hwss.subvp_pipe_control_lock(dc, context, false, should_lock_all_pipes, top_pipe_to_program, subvp_prev_use); - if (dc->hwss.fams2_global_control_lock) - dc->hwss.fams2_global_control_lock(dc, context, false); + if (dc->hwss.dmub_hw_control_lock) + dc->hwss.dmub_hw_control_lock(dc, context, false); } // Fire manual trigger only when bottom plane is flipped @@ -4489,6 +4516,8 @@ static void commit_planes_for_stream(struct dc *dc, pipe_ctx->plane_state->skip_manual_trigger) continue; + if (dc->hwss.program_cursor_offload_now) + dc->hwss.program_cursor_offload_now(dc, pipe_ctx); if (pipe_ctx->stream_res.tg->funcs->program_manual_trigger) pipe_ctx->stream_res.tg->funcs->program_manual_trigger(pipe_ctx->stream_res.tg); } @@ -4994,7 +5023,7 @@ void populate_fast_updates(struct dc_fast_update *fast_update, } } -static bool fast_updates_exist(struct dc_fast_update *fast_update, int surface_count) +static bool fast_updates_exist(const struct dc_fast_update *fast_update, int surface_count) { int i; @@ -5035,18 +5064,44 @@ bool fast_nonaddr_updates_exist(struct dc_fast_update *fast_update, int surface_ return false; } -static bool full_update_required(struct dc *dc, - struct dc_surface_update *srf_updates, +static bool full_update_required_weak( + const struct dc *dc, + const struct dc_surface_update *srf_updates, int surface_count, - struct dc_stream_update *stream_update, - struct dc_stream_state *stream) + const struct dc_stream_update *stream_update, + const struct dc_stream_state *stream) { - - int i; - struct dc_stream_status *stream_status; const struct dc_state *context = dc->current_state; + if (srf_updates) + for (int i = 0; i < surface_count; i++) + if (!is_surface_in_context(context, srf_updates[i].surface)) + return true; - for (i = 0; i < surface_count; i++) { + if (stream) { + const struct dc_stream_status *stream_status = dc_stream_get_status_const(stream); + if (stream_status == NULL || stream_status->plane_count != surface_count) + return true; + } + if (dc->idle_optimizations_allowed) + return true; + + if (dc_can_clear_cursor_limit(dc)) + return true; + + return false; +} + +static bool full_update_required( + const struct dc *dc, + const struct dc_surface_update *srf_updates, + int surface_count, + const struct dc_stream_update *stream_update, + const struct dc_stream_state *stream) +{ + if (full_update_required_weak(dc, srf_updates, surface_count, stream_update, stream)) + return true; + + for (int i = 0; i < surface_count; i++) { if (srf_updates && (srf_updates[i].plane_info || srf_updates[i].scaling_info || @@ -5062,8 +5117,7 @@ static bool full_update_required(struct dc *dc, srf_updates[i].flip_addr->address.tmz_surface != srf_updates[i].surface->address.tmz_surface) || (srf_updates[i].cm2_params && (srf_updates[i].cm2_params->component_settings.shaper_3dlut_setting != srf_updates[i].surface->mcm_shaper_3dlut_setting || - srf_updates[i].cm2_params->component_settings.lut1d_enable != srf_updates[i].surface->mcm_lut1d_enable)) || - !is_surface_in_context(context, srf_updates[i].surface))) + srf_updates[i].cm2_params->component_settings.lut1d_enable != srf_updates[i].surface->mcm_lut1d_enable)))) return true; } @@ -5099,26 +5153,16 @@ static bool full_update_required(struct dc *dc, stream_update->hw_cursor_req)) return true; - if (stream) { - stream_status = dc_stream_get_status(stream); - if (stream_status == NULL || stream_status->plane_count != surface_count) - return true; - } - if (dc->idle_optimizations_allowed) - return true; - - if (dc_can_clear_cursor_limit(dc)) - return true; - return false; } -static bool fast_update_only(struct dc *dc, - struct dc_fast_update *fast_update, - struct dc_surface_update *srf_updates, +static bool fast_update_only( + const struct dc *dc, + const struct dc_fast_update *fast_update, + const struct dc_surface_update *srf_updates, int surface_count, - struct dc_stream_update *stream_update, - struct dc_stream_state *stream) + const struct dc_stream_update *stream_update, + const struct dc_stream_state *stream) { return fast_updates_exist(fast_update, surface_count) && !full_update_required(dc, srf_updates, surface_count, stream_update, stream); @@ -5181,7 +5225,7 @@ static bool update_planes_and_stream_v2(struct dc *dc, commit_minimal_transition_state_in_dc_update(dc, context, stream, srf_updates, surface_count); - if (is_fast_update_only && !dc->debug.enable_legacy_fast_update) { + if (is_fast_update_only && !dc->check_config.enable_legacy_fast_update) { commit_planes_for_stream_fast(dc, srf_updates, surface_count, @@ -5224,7 +5268,7 @@ static void commit_planes_and_stream_update_on_current_context(struct dc *dc, stream_update); if (fast_update_only(dc, fast_update, srf_updates, surface_count, stream_update, stream) && - !dc->debug.enable_legacy_fast_update) + !dc->check_config.enable_legacy_fast_update) commit_planes_for_stream_fast(dc, srf_updates, surface_count, @@ -5350,7 +5394,8 @@ bool dc_update_planes_and_stream(struct dc *dc, * specially handle compatibility problems with transitions among those * features as they are now transparent to the new sequence. */ - if (dc->ctx->dce_version >= DCN_VERSION_4_01) + if (dc->ctx->dce_version >= DCN_VERSION_4_01 || dc->ctx->dce_version == DCN_VERSION_3_2 || + dc->ctx->dce_version == DCN_VERSION_3_21) ret = update_planes_and_stream_v3(dc, srf_updates, surface_count, stream, stream_update); else @@ -5935,6 +5980,101 @@ bool dc_process_dmub_aux_transfer_async(struct dc *dc, return true; } +bool dc_smart_power_oled_enable(const struct dc_link *link, bool enable, uint16_t peak_nits, + uint8_t debug_control, uint16_t fixed_CLL, uint32_t triggerline) +{ + bool status = false; + struct dc *dc = link->ctx->dc; + union dmub_rb_cmd cmd; + uint8_t otg_inst = 0; + unsigned int panel_inst = 0; + struct pipe_ctx *pipe_ctx = NULL; + struct resource_context *res_ctx = &link->ctx->dc->current_state->res_ctx; + int i = 0; + + // get panel_inst + if (!dc_get_edp_link_panel_inst(dc, link, &panel_inst)) + return status; + + // get otg_inst + for (i = 0; i < MAX_PIPES; i++) { + if (res_ctx && + res_ctx->pipe_ctx[i].stream && + res_ctx->pipe_ctx[i].stream->link && + res_ctx->pipe_ctx[i].stream->link == link && + res_ctx->pipe_ctx[i].stream->link->connector_signal == SIGNAL_TYPE_EDP) { + pipe_ctx = &res_ctx->pipe_ctx[i]; + //TODO: refactor for multi edp support + break; + } + } + + if (pipe_ctx) + otg_inst = pipe_ctx->stream_res.tg->inst; + + // before enable smart power OLED, we need to call set pipe for DMUB to set ABM config + if (enable) { + if (dc->hwss.set_pipe && pipe_ctx) + dc->hwss.set_pipe(pipe_ctx); + } + + // fill in cmd + memset(&cmd, 0, sizeof(cmd)); + + cmd.smart_power_oled_enable.header.type = DMUB_CMD__SMART_POWER_OLED; + cmd.smart_power_oled_enable.header.sub_type = DMUB_CMD__SMART_POWER_OLED_ENABLE; + cmd.smart_power_oled_enable.header.payload_bytes = + sizeof(struct dmub_rb_cmd_smart_power_oled_enable_data) - sizeof(struct dmub_cmd_header); + cmd.smart_power_oled_enable.header.ret_status = 1; + cmd.smart_power_oled_enable.data.enable = enable; + cmd.smart_power_oled_enable.data.panel_inst = panel_inst; + cmd.smart_power_oled_enable.data.peak_nits = peak_nits; + cmd.smart_power_oled_enable.data.otg_inst = otg_inst; + cmd.smart_power_oled_enable.data.digfe_inst = link->link_enc->preferred_engine; + cmd.smart_power_oled_enable.data.digbe_inst = link->link_enc->transmitter; + + cmd.smart_power_oled_enable.data.debugcontrol = debug_control; + cmd.smart_power_oled_enable.data.triggerline = triggerline; + cmd.smart_power_oled_enable.data.fixed_max_cll = fixed_CLL; + + // send cmd + status = dc_wake_and_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT); + + return status; +} + +bool dc_smart_power_oled_get_max_cll(const struct dc_link *link, unsigned int *pCurrent_MaxCLL) +{ + struct dc *dc = link->ctx->dc; + union dmub_rb_cmd cmd; + bool status = false; + unsigned int panel_inst = 0; + + // get panel_inst + if (!dc_get_edp_link_panel_inst(dc, link, &panel_inst)) + return status; + + // fill in cmd + memset(&cmd, 0, sizeof(cmd)); + + cmd.smart_power_oled_getmaxcll.header.type = DMUB_CMD__SMART_POWER_OLED; + cmd.smart_power_oled_getmaxcll.header.sub_type = DMUB_CMD__SMART_POWER_OLED_GETMAXCLL; + cmd.smart_power_oled_getmaxcll.header.payload_bytes = sizeof(cmd.smart_power_oled_getmaxcll.data); + cmd.smart_power_oled_getmaxcll.header.ret_status = 1; + + cmd.smart_power_oled_getmaxcll.data.input.panel_inst = panel_inst; + + // send cmd and wait for reply + status = dc_wake_and_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY); + + if (status) + *pCurrent_MaxCLL = cmd.smart_power_oled_getmaxcll.data.output.current_max_cll; + else + *pCurrent_MaxCLL = 0; + + return status; +} + uint8_t get_link_index_from_dpia_port_index(const struct dc *dc, uint8_t dpia_port_index) { @@ -6349,7 +6489,7 @@ bool dc_is_cursor_limit_pending(struct dc *dc) return false; } -bool dc_can_clear_cursor_limit(struct dc *dc) +bool dc_can_clear_cursor_limit(const struct dc *dc) { uint32_t i; @@ -6378,3 +6518,576 @@ void dc_get_underflow_debug_data_for_otg(struct dc *dc, int primary_otg_inst, if (dc->hwss.get_underflow_debug_data) dc->hwss.get_underflow_debug_data(dc, tg, out_data); } + +void dc_get_power_feature_status(struct dc *dc, int primary_otg_inst, + struct power_features *out_data) +{ + out_data->uclk_p_state = dc->current_state->clk_mgr->clks.p_state_change_support; + out_data->fams = dc->current_state->bw_ctx.bw.dcn.clk.fw_based_mclk_switching; +} + +bool dc_capture_register_software_state(struct dc *dc, struct dc_register_software_state *state) +{ + struct dc_state *context; + struct resource_context *res_ctx; + int i; + + if (!dc || !dc->current_state || !state) { + if (state) + state->state_valid = false; + return false; + } + + /* Initialize the state structure */ + memset(state, 0, sizeof(struct dc_register_software_state)); + + context = dc->current_state; + res_ctx = &context->res_ctx; + + /* Count active pipes and streams */ + state->active_pipe_count = 0; + state->active_stream_count = context->stream_count; + + for (i = 0; i < dc->res_pool->pipe_count; i++) { + if (res_ctx->pipe_ctx[i].stream) + state->active_pipe_count++; + } + + /* Capture HUBP programming state for each pipe */ + for (i = 0; i < MAX_PIPES && i < dc->res_pool->pipe_count; i++) { + struct pipe_ctx *pipe_ctx = &res_ctx->pipe_ctx[i]; + + state->hubp[i].valid_stream = false; + if (!pipe_ctx->stream) + continue; + + state->hubp[i].valid_stream = true; + + /* HUBP register programming variables */ + if (pipe_ctx->stream_res.tg) + state->hubp[i].vtg_sel = pipe_ctx->stream_res.tg->inst; + + state->hubp[i].hubp_clock_enable = (pipe_ctx->plane_res.hubp != NULL) ? 1 : 0; + + state->hubp[i].valid_plane_state = false; + if (pipe_ctx->plane_state) { + state->hubp[i].valid_plane_state = true; + state->hubp[i].surface_pixel_format = pipe_ctx->plane_state->format; + state->hubp[i].rotation_angle = pipe_ctx->plane_state->rotation; + state->hubp[i].h_mirror_en = pipe_ctx->plane_state->horizontal_mirror ? 1 : 0; + + /* Surface size */ + if (pipe_ctx->plane_state->plane_size.surface_size.width > 0) { + state->hubp[i].surface_size_width = pipe_ctx->plane_state->plane_size.surface_size.width; + state->hubp[i].surface_size_height = pipe_ctx->plane_state->plane_size.surface_size.height; + } + + /* Viewport dimensions from scaler data */ + if (pipe_ctx->plane_state->src_rect.width > 0) { + state->hubp[i].pri_viewport_width = pipe_ctx->plane_state->src_rect.width; + state->hubp[i].pri_viewport_height = pipe_ctx->plane_state->src_rect.height; + state->hubp[i].pri_viewport_x_start = pipe_ctx->plane_state->src_rect.x; + state->hubp[i].pri_viewport_y_start = pipe_ctx->plane_state->src_rect.y; + } + + /* DCC settings */ + state->hubp[i].surface_dcc_en = (pipe_ctx->plane_state->dcc.enable) ? 1 : 0; + state->hubp[i].surface_dcc_ind_64b_blk = pipe_ctx->plane_state->dcc.independent_64b_blks; + state->hubp[i].surface_dcc_ind_128b_blk = pipe_ctx->plane_state->dcc.dcc_ind_blk; + + /* Surface pitch */ + state->hubp[i].surface_pitch = pipe_ctx->plane_state->plane_size.surface_pitch; + state->hubp[i].meta_pitch = pipe_ctx->plane_state->dcc.meta_pitch; + state->hubp[i].chroma_pitch = pipe_ctx->plane_state->plane_size.chroma_pitch; + state->hubp[i].meta_pitch_c = pipe_ctx->plane_state->dcc.meta_pitch_c; + + /* Surface addresses - primary */ + state->hubp[i].primary_surface_address_low = pipe_ctx->plane_state->address.grph.addr.low_part; + state->hubp[i].primary_surface_address_high = pipe_ctx->plane_state->address.grph.addr.high_part; + state->hubp[i].primary_meta_surface_address_low = pipe_ctx->plane_state->address.grph.meta_addr.low_part; + state->hubp[i].primary_meta_surface_address_high = pipe_ctx->plane_state->address.grph.meta_addr.high_part; + + /* TMZ settings */ + state->hubp[i].primary_surface_tmz = pipe_ctx->plane_state->address.tmz_surface; + state->hubp[i].primary_meta_surface_tmz = pipe_ctx->plane_state->address.tmz_surface; + + /* Tiling configuration */ + state->hubp[i].min_dc_gfx_version9 = false; + if (pipe_ctx->plane_state->tiling_info.gfxversion >= DcGfxVersion9) { + state->hubp[i].min_dc_gfx_version9 = true; + state->hubp[i].sw_mode = pipe_ctx->plane_state->tiling_info.gfx9.swizzle; + state->hubp[i].num_pipes = pipe_ctx->plane_state->tiling_info.gfx9.num_pipes; + state->hubp[i].num_banks = pipe_ctx->plane_state->tiling_info.gfx9.num_banks; + state->hubp[i].pipe_interleave = pipe_ctx->plane_state->tiling_info.gfx9.pipe_interleave; + state->hubp[i].num_shader_engines = pipe_ctx->plane_state->tiling_info.gfx9.num_shader_engines; + state->hubp[i].num_rb_per_se = pipe_ctx->plane_state->tiling_info.gfx9.num_rb_per_se; + state->hubp[i].num_pkrs = pipe_ctx->plane_state->tiling_info.gfx9.num_pkrs; + } + } + + /* DML Request Size Configuration */ + if (pipe_ctx->rq_regs.rq_regs_l.chunk_size > 0) { + state->hubp[i].rq_chunk_size = pipe_ctx->rq_regs.rq_regs_l.chunk_size; + state->hubp[i].rq_min_chunk_size = pipe_ctx->rq_regs.rq_regs_l.min_chunk_size; + state->hubp[i].rq_meta_chunk_size = pipe_ctx->rq_regs.rq_regs_l.meta_chunk_size; + state->hubp[i].rq_min_meta_chunk_size = pipe_ctx->rq_regs.rq_regs_l.min_meta_chunk_size; + state->hubp[i].rq_dpte_group_size = pipe_ctx->rq_regs.rq_regs_l.dpte_group_size; + state->hubp[i].rq_mpte_group_size = pipe_ctx->rq_regs.rq_regs_l.mpte_group_size; + state->hubp[i].rq_swath_height_l = pipe_ctx->rq_regs.rq_regs_l.swath_height; + state->hubp[i].rq_pte_row_height_l = pipe_ctx->rq_regs.rq_regs_l.pte_row_height_linear; + } + + /* Chroma request size configuration */ + if (pipe_ctx->rq_regs.rq_regs_c.chunk_size > 0) { + state->hubp[i].rq_chunk_size_c = pipe_ctx->rq_regs.rq_regs_c.chunk_size; + state->hubp[i].rq_min_chunk_size_c = pipe_ctx->rq_regs.rq_regs_c.min_chunk_size; + state->hubp[i].rq_meta_chunk_size_c = pipe_ctx->rq_regs.rq_regs_c.meta_chunk_size; + state->hubp[i].rq_min_meta_chunk_size_c = pipe_ctx->rq_regs.rq_regs_c.min_meta_chunk_size; + state->hubp[i].rq_dpte_group_size_c = pipe_ctx->rq_regs.rq_regs_c.dpte_group_size; + state->hubp[i].rq_mpte_group_size_c = pipe_ctx->rq_regs.rq_regs_c.mpte_group_size; + state->hubp[i].rq_swath_height_c = pipe_ctx->rq_regs.rq_regs_c.swath_height; + state->hubp[i].rq_pte_row_height_c = pipe_ctx->rq_regs.rq_regs_c.pte_row_height_linear; + } + + /* DML expansion modes */ + state->hubp[i].drq_expansion_mode = pipe_ctx->rq_regs.drq_expansion_mode; + state->hubp[i].prq_expansion_mode = pipe_ctx->rq_regs.prq_expansion_mode; + state->hubp[i].mrq_expansion_mode = pipe_ctx->rq_regs.mrq_expansion_mode; + state->hubp[i].crq_expansion_mode = pipe_ctx->rq_regs.crq_expansion_mode; + + /* DML DLG parameters - nominal */ + state->hubp[i].dst_y_per_vm_vblank = pipe_ctx->dlg_regs.dst_y_per_vm_vblank; + state->hubp[i].dst_y_per_row_vblank = pipe_ctx->dlg_regs.dst_y_per_row_vblank; + state->hubp[i].dst_y_per_vm_flip = pipe_ctx->dlg_regs.dst_y_per_vm_flip; + state->hubp[i].dst_y_per_row_flip = pipe_ctx->dlg_regs.dst_y_per_row_flip; + + /* DML prefetch settings */ + state->hubp[i].dst_y_prefetch = pipe_ctx->dlg_regs.dst_y_prefetch; + state->hubp[i].vratio_prefetch = pipe_ctx->dlg_regs.vratio_prefetch; + state->hubp[i].vratio_prefetch_c = pipe_ctx->dlg_regs.vratio_prefetch_c; + + /* TTU parameters */ + state->hubp[i].qos_level_low_wm = pipe_ctx->ttu_regs.qos_level_low_wm; + state->hubp[i].qos_level_high_wm = pipe_ctx->ttu_regs.qos_level_high_wm; + state->hubp[i].qos_level_flip = pipe_ctx->ttu_regs.qos_level_flip; + state->hubp[i].min_ttu_vblank = pipe_ctx->ttu_regs.min_ttu_vblank; + } + + /* Capture HUBBUB programming state */ + if (dc->res_pool->hubbub) { + /* Individual DET buffer sizes - software state variables that program DET registers */ + for (i = 0; i < 4 && i < dc->res_pool->pipe_count; i++) { + uint32_t det_size = res_ctx->pipe_ctx[i].det_buffer_size_kb; + switch (i) { + case 0: + state->hubbub.det0_size = det_size; + break; + case 1: + state->hubbub.det1_size = det_size; + break; + case 2: + state->hubbub.det2_size = det_size; + break; + case 3: + state->hubbub.det3_size = det_size; + break; + } + } + + /* Compression buffer configuration - software state that programs COMPBUF_SIZE register */ + // TODO: Handle logic for legacy DCN pre-DCN401 + state->hubbub.compbuf_size = context->bw_ctx.bw.dcn.arb_regs.compbuf_size; + } + + /* Capture DPP programming state for each pipe */ + for (i = 0; i < MAX_PIPES && i < dc->res_pool->pipe_count; i++) { + struct pipe_ctx *pipe_ctx = &res_ctx->pipe_ctx[i]; + + if (!pipe_ctx->stream) + continue; + + state->dpp[i].dpp_clock_enable = (pipe_ctx->plane_res.dpp != NULL) ? 1 : 0; + + if (pipe_ctx->plane_state && pipe_ctx->plane_res.scl_data.recout.width > 0) { + /* Access dscl_prog_data directly - this contains the actual software state used for register programming */ + struct dscl_prog_data *dscl_data = &pipe_ctx->plane_res.scl_data.dscl_prog_data; + + /* Recout (Rectangle of Interest) configuration - software state that programs RECOUT registers */ + state->dpp[i].recout_start_x = dscl_data->recout.x; + state->dpp[i].recout_start_y = dscl_data->recout.y; + state->dpp[i].recout_width = dscl_data->recout.width; + state->dpp[i].recout_height = dscl_data->recout.height; + + /* MPC (Multiple Pipe/Plane Combiner) size - software state that programs MPC_SIZE registers */ + state->dpp[i].mpc_width = dscl_data->mpc_size.width; + state->dpp[i].mpc_height = dscl_data->mpc_size.height; + + /* DSCL mode - software state that programs SCL_MODE registers */ + state->dpp[i].dscl_mode = dscl_data->dscl_mode; + + /* Scaler ratios - software state that programs scale ratio registers (use actual programmed ratios) */ + state->dpp[i].horz_ratio_int = dscl_data->ratios.h_scale_ratio >> 19; // Extract integer part from programmed ratio + state->dpp[i].vert_ratio_int = dscl_data->ratios.v_scale_ratio >> 19; // Extract integer part from programmed ratio + + /* Basic scaler taps - software state that programs tap control registers (use actual programmed taps) */ + state->dpp[i].h_taps = dscl_data->taps.h_taps + 1; // dscl_prog_data.taps stores (taps - 1), so add 1 back + state->dpp[i].v_taps = dscl_data->taps.v_taps + 1; // dscl_prog_data.taps stores (taps - 1), so add 1 back + } + } + + /* Capture essential clock state for underflow analysis */ + if (dc->clk_mgr && dc->clk_mgr->clks.dispclk_khz > 0) { + /* Core display clocks affecting bandwidth and timing */ + state->dccg.dispclk_khz = dc->clk_mgr->clks.dispclk_khz; + + /* Per-pipe clock configuration - only capture what's essential */ + for (i = 0; i < MAX_PIPES && i < dc->res_pool->pipe_count; i++) { + struct pipe_ctx *pipe_ctx = &res_ctx->pipe_ctx[i]; + if (pipe_ctx->stream) { + /* Essential clocks that directly affect underflow risk */ + state->dccg.dppclk_khz[i] = dc->clk_mgr->clks.dppclk_khz; + state->dccg.pixclk_khz[i] = pipe_ctx->stream->timing.pix_clk_100hz / 10; + state->dccg.dppclk_enable[i] = 1; + + /* DP stream clock only for DP signals */ + if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT || + pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) { + state->dccg.dpstreamclk_enable[i] = 1; + } else { + state->dccg.dpstreamclk_enable[i] = 0; + } + } else { + /* Inactive pipe - no clocks */ + state->dccg.dppclk_khz[i] = 0; + state->dccg.pixclk_khz[i] = 0; + state->dccg.dppclk_enable[i] = 0; + if (i < 4) { + state->dccg.dpstreamclk_enable[i] = 0; + } + } + } + + /* DSC clock state - only when actually using DSC */ + for (i = 0; i < MAX_PIPES; i++) { + struct pipe_ctx *pipe_ctx = (i < dc->res_pool->pipe_count) ? &res_ctx->pipe_ctx[i] : NULL; + if (pipe_ctx && pipe_ctx->stream && pipe_ctx->stream->timing.dsc_cfg.num_slices_h > 0) { + state->dccg.dscclk_khz[i] = 400000; /* Typical DSC clock frequency */ + } else { + state->dccg.dscclk_khz[i] = 0; + } + } + + /* SYMCLK32 LE Control - only the essential HPO state for underflow analysis */ + for (i = 0; i < 2; i++) { + state->dccg.symclk32_le_enable[i] = 0; /* Default: disabled */ + } + + } + + /* Capture essential DSC configuration for underflow analysis */ + for (i = 0; i < MAX_PIPES && i < dc->res_pool->pipe_count; i++) { + struct pipe_ctx *pipe_ctx = &res_ctx->pipe_ctx[i]; + + if (pipe_ctx->stream && pipe_ctx->stream->timing.dsc_cfg.num_slices_h > 0) { + /* DSC is enabled - capture essential configuration */ + state->dsc[i].dsc_clock_enable = 1; + + /* DSC configuration affecting bandwidth and timing */ + struct dc_dsc_config *dsc_cfg = &pipe_ctx->stream->timing.dsc_cfg; + state->dsc[i].dsc_num_slices_h = dsc_cfg->num_slices_h; + state->dsc[i].dsc_num_slices_v = dsc_cfg->num_slices_v; + state->dsc[i].dsc_bits_per_pixel = dsc_cfg->bits_per_pixel; + + /* OPP pipe source for DSC forwarding */ + if (pipe_ctx->stream_res.opp) { + state->dsc[i].dscrm_dsc_forward_enable = 1; + state->dsc[i].dscrm_dsc_opp_pipe_source = pipe_ctx->stream_res.opp->inst; + } else { + state->dsc[i].dscrm_dsc_forward_enable = 0; + state->dsc[i].dscrm_dsc_opp_pipe_source = 0; + } + } else { + /* DSC not enabled - clear all fields */ + memset(&state->dsc[i], 0, sizeof(state->dsc[i])); + } + } + + /* Capture MPC programming state - comprehensive register field coverage */ + for (i = 0; i < MAX_PIPES && i < dc->res_pool->pipe_count; i++) { + struct pipe_ctx *pipe_ctx = &res_ctx->pipe_ctx[i]; + + if (pipe_ctx->plane_state && pipe_ctx->stream) { + struct dc_plane_state *plane_state = pipe_ctx->plane_state; + + /* MPCC blending tree and mode control - capture actual blend configuration */ + state->mpc.mpcc_mode[i] = (plane_state->blend_tf.type != TF_TYPE_BYPASS) ? 1 : 0; + state->mpc.mpcc_alpha_blend_mode[i] = plane_state->per_pixel_alpha ? 1 : 0; + state->mpc.mpcc_alpha_multiplied_mode[i] = plane_state->pre_multiplied_alpha ? 1 : 0; + state->mpc.mpcc_blnd_active_overlap_only[i] = 0; /* Default - no overlap restriction */ + state->mpc.mpcc_global_alpha[i] = plane_state->global_alpha_value; + state->mpc.mpcc_global_gain[i] = plane_state->global_alpha ? 255 : 0; + state->mpc.mpcc_bg_bpc[i] = 8; /* Standard 8-bit background */ + state->mpc.mpcc_bot_gain_mode[i] = 0; /* Standard gain mode */ + + /* MPCC blending tree connections - capture tree topology */ + if (pipe_ctx->bottom_pipe) { + state->mpc.mpcc_bot_sel[i] = pipe_ctx->bottom_pipe->pipe_idx; + } else { + state->mpc.mpcc_bot_sel[i] = 0xF; /* No bottom connection */ + } + state->mpc.mpcc_top_sel[i] = pipe_ctx->pipe_idx; /* This pipe's DPP ID */ + + /* MPCC output gamma control - capture gamma programming */ + if (plane_state->gamma_correction.type != GAMMA_CS_TFM_1D && plane_state->gamma_correction.num_entries > 0) { + state->mpc.mpcc_ogam_mode[i] = 1; /* Gamma enabled */ + state->mpc.mpcc_ogam_select[i] = 0; /* Bank A selection */ + state->mpc.mpcc_ogam_pwl_disable[i] = 0; /* PWL enabled */ + } else { + state->mpc.mpcc_ogam_mode[i] = 0; /* Bypass mode */ + state->mpc.mpcc_ogam_select[i] = 0; + state->mpc.mpcc_ogam_pwl_disable[i] = 1; /* PWL disabled */ + } + + /* MPCC pipe assignment and operational status */ + if (pipe_ctx->stream_res.opp) { + state->mpc.mpcc_opp_id[i] = pipe_ctx->stream_res.opp->inst; + } else { + state->mpc.mpcc_opp_id[i] = 0xF; /* No OPP assignment */ + } + + /* MPCC status indicators - active pipe state */ + state->mpc.mpcc_idle[i] = 0; /* Active pipe - not idle */ + state->mpc.mpcc_busy[i] = 1; /* Active pipe - busy processing */ + + } else { + /* Pipe not active - set disabled/idle state for all fields */ + state->mpc.mpcc_mode[i] = 0; + state->mpc.mpcc_alpha_blend_mode[i] = 0; + state->mpc.mpcc_alpha_multiplied_mode[i] = 0; + state->mpc.mpcc_blnd_active_overlap_only[i] = 0; + state->mpc.mpcc_global_alpha[i] = 0; + state->mpc.mpcc_global_gain[i] = 0; + state->mpc.mpcc_bg_bpc[i] = 0; + state->mpc.mpcc_bot_gain_mode[i] = 0; + state->mpc.mpcc_bot_sel[i] = 0xF; /* No bottom connection */ + state->mpc.mpcc_top_sel[i] = 0xF; /* No top connection */ + state->mpc.mpcc_ogam_mode[i] = 0; /* Bypass */ + state->mpc.mpcc_ogam_select[i] = 0; + state->mpc.mpcc_ogam_pwl_disable[i] = 1; /* PWL disabled */ + state->mpc.mpcc_opp_id[i] = 0xF; /* No OPP assignment */ + state->mpc.mpcc_idle[i] = 1; /* Idle */ + state->mpc.mpcc_busy[i] = 0; /* Not busy */ + } + } + + /* Capture OPP programming state for each pipe - comprehensive register field coverage */ + for (i = 0; i < MAX_PIPES && i < dc->res_pool->pipe_count; i++) { + struct pipe_ctx *pipe_ctx = &res_ctx->pipe_ctx[i]; + + if (!pipe_ctx->stream) + continue; + + if (pipe_ctx->stream_res.opp) { + struct dc_crtc_timing *timing = &pipe_ctx->stream->timing; + + /* OPP Pipe Control */ + state->opp[i].opp_pipe_clock_enable = 1; /* Active pipe has clock enabled */ + + /* Display Pattern Generator (DPG) Control - 19 fields */ + if (pipe_ctx->stream->test_pattern.type != DP_TEST_PATTERN_VIDEO_MODE) { + state->opp[i].dpg_enable = 1; + } else { + /* Video mode - DPG disabled */ + state->opp[i].dpg_enable = 0; + } + + /* Format Control (FMT) - 18 fields */ + state->opp[i].fmt_pixel_encoding = timing->pixel_encoding; + + /* Chroma subsampling mode based on pixel encoding */ + if (timing->pixel_encoding == PIXEL_ENCODING_YCBCR420) { + state->opp[i].fmt_subsampling_mode = 1; /* 4:2:0 subsampling */ + } else if (timing->pixel_encoding == PIXEL_ENCODING_YCBCR422) { + state->opp[i].fmt_subsampling_mode = 2; /* 4:2:2 subsampling */ + } else { + state->opp[i].fmt_subsampling_mode = 0; /* No subsampling (4:4:4) */ + } + + state->opp[i].fmt_cbcr_bit_reduction_bypass = (timing->pixel_encoding == PIXEL_ENCODING_RGB) ? 1 : 0; + state->opp[i].fmt_stereosync_override = (timing->timing_3d_format != TIMING_3D_FORMAT_NONE) ? 1 : 0; + + /* Dithering control based on bit depth */ + if (timing->display_color_depth < COLOR_DEPTH_121212) { + state->opp[i].fmt_spatial_dither_frame_counter_max = 15; /* Typical frame counter max */ + state->opp[i].fmt_spatial_dither_frame_counter_bit_swap = 0; /* No bit swapping */ + state->opp[i].fmt_spatial_dither_enable = 1; + state->opp[i].fmt_spatial_dither_mode = 0; /* Spatial dithering mode */ + state->opp[i].fmt_spatial_dither_depth = timing->display_color_depth; + state->opp[i].fmt_temporal_dither_enable = 0; /* Spatial dithering preferred */ + } else { + state->opp[i].fmt_spatial_dither_frame_counter_max = 0; + state->opp[i].fmt_spatial_dither_frame_counter_bit_swap = 0; + state->opp[i].fmt_spatial_dither_enable = 0; + state->opp[i].fmt_spatial_dither_mode = 0; + state->opp[i].fmt_spatial_dither_depth = 0; + state->opp[i].fmt_temporal_dither_enable = 0; + } + + /* Truncation control for bit depth reduction */ + if (timing->display_color_depth < COLOR_DEPTH_121212) { + state->opp[i].fmt_truncate_enable = 1; + state->opp[i].fmt_truncate_depth = timing->display_color_depth; + state->opp[i].fmt_truncate_mode = 0; /* Round mode */ + } else { + state->opp[i].fmt_truncate_enable = 0; + state->opp[i].fmt_truncate_depth = 0; + state->opp[i].fmt_truncate_mode = 0; + } + + /* Data clamping control */ + state->opp[i].fmt_clamp_data_enable = 1; /* Clamping typically enabled */ + state->opp[i].fmt_clamp_color_format = timing->pixel_encoding; + + /* Dynamic expansion for limited range content */ + if (timing->pixel_encoding != PIXEL_ENCODING_RGB) { + state->opp[i].fmt_dynamic_exp_enable = 1; /* YCbCr typically needs expansion */ + state->opp[i].fmt_dynamic_exp_mode = 0; /* Standard expansion */ + } else { + state->opp[i].fmt_dynamic_exp_enable = 0; /* RGB typically full range */ + state->opp[i].fmt_dynamic_exp_mode = 0; + } + + /* Legacy field for compatibility */ + state->opp[i].fmt_bit_depth_control = timing->display_color_depth; + + /* Output Buffer (OPPBUF) Control - 6 fields */ + state->opp[i].oppbuf_active_width = timing->h_addressable; + state->opp[i].oppbuf_pixel_repetition = 0; /* No pixel repetition by default */ + + /* Multi-Stream Output (MSO) / ODM segmentation */ + if (pipe_ctx->next_odm_pipe) { + state->opp[i].oppbuf_display_segmentation = 1; /* Segmented display */ + state->opp[i].oppbuf_overlap_pixel_num = 0; /* ODM overlap pixels */ + } else { + state->opp[i].oppbuf_display_segmentation = 0; /* Single segment */ + state->opp[i].oppbuf_overlap_pixel_num = 0; + } + + /* 3D/Stereo control */ + if (timing->timing_3d_format != TIMING_3D_FORMAT_NONE) { + state->opp[i].oppbuf_3d_vact_space1_size = 30; /* Typical stereo blanking */ + state->opp[i].oppbuf_3d_vact_space2_size = 30; + } else { + state->opp[i].oppbuf_3d_vact_space1_size = 0; + state->opp[i].oppbuf_3d_vact_space2_size = 0; + } + + /* DSC Forward Config - 3 fields */ + if (timing->dsc_cfg.num_slices_h > 0) { + state->opp[i].dscrm_dsc_forward_enable = 1; + state->opp[i].dscrm_dsc_opp_pipe_source = pipe_ctx->stream_res.opp->inst; + state->opp[i].dscrm_dsc_forward_enable_status = 1; /* Status follows enable */ + } else { + state->opp[i].dscrm_dsc_forward_enable = 0; + state->opp[i].dscrm_dsc_opp_pipe_source = 0; + state->opp[i].dscrm_dsc_forward_enable_status = 0; + } + } else { + /* No OPP resource - set all fields to disabled state */ + memset(&state->opp[i], 0, sizeof(state->opp[i])); + } + } + + /* Capture OPTC programming state for each pipe - comprehensive register field coverage */ + for (i = 0; i < MAX_PIPES && i < dc->res_pool->pipe_count; i++) { + struct pipe_ctx *pipe_ctx = &res_ctx->pipe_ctx[i]; + + if (!pipe_ctx->stream) + continue; + + if (pipe_ctx->stream_res.tg) { + struct dc_crtc_timing *timing = &pipe_ctx->stream->timing; + + state->optc[i].otg_master_inst = pipe_ctx->stream_res.tg->inst; + + /* OTG_CONTROL register - 5 fields */ + state->optc[i].otg_master_enable = 1; /* Active stream */ + state->optc[i].otg_disable_point_cntl = 0; /* Normal operation */ + state->optc[i].otg_start_point_cntl = 0; /* Normal start */ + state->optc[i].otg_field_number_cntl = (timing->flags.INTERLACE) ? 1 : 0; + state->optc[i].otg_out_mux = 0; /* Direct output */ + + /* OTG Horizontal Timing - 7 fields */ + state->optc[i].otg_h_total = timing->h_total; + state->optc[i].otg_h_blank_start = timing->h_addressable; + state->optc[i].otg_h_blank_end = timing->h_total - timing->h_front_porch; + state->optc[i].otg_h_sync_start = timing->h_addressable + timing->h_front_porch; + state->optc[i].otg_h_sync_end = timing->h_addressable + timing->h_front_porch + timing->h_sync_width; + state->optc[i].otg_h_sync_polarity = timing->flags.HSYNC_POSITIVE_POLARITY ? 0 : 1; + state->optc[i].otg_h_timing_div_mode = (pipe_ctx->next_odm_pipe) ? 1 : 0; /* ODM divide mode */ + + /* OTG Vertical Timing - 7 fields */ + state->optc[i].otg_v_total = timing->v_total; + state->optc[i].otg_v_blank_start = timing->v_addressable; + state->optc[i].otg_v_blank_end = timing->v_total - timing->v_front_porch; + state->optc[i].otg_v_sync_start = timing->v_addressable + timing->v_front_porch; + state->optc[i].otg_v_sync_end = timing->v_addressable + timing->v_front_porch + timing->v_sync_width; + state->optc[i].otg_v_sync_polarity = timing->flags.VSYNC_POSITIVE_POLARITY ? 0 : 1; + state->optc[i].otg_v_sync_mode = 0; /* Normal sync mode */ + + /* Initialize remaining core fields with appropriate defaults */ + // TODO: Update logic for accurate vtotal min/max + state->optc[i].otg_v_total_max = timing->v_total + 100; /* Typical DRR range */ + state->optc[i].otg_v_total_min = timing->v_total - 50; + state->optc[i].otg_v_total_mid = timing->v_total; + + /* ODM configuration */ + // TODO: Update logic to have complete ODM mappings (e.g. 3:1 and 4:1) stored in single pipe + if (pipe_ctx->next_odm_pipe) { + state->optc[i].optc_seg0_src_sel = pipe_ctx->stream_res.opp ? pipe_ctx->stream_res.opp->inst : 0; + state->optc[i].optc_seg1_src_sel = pipe_ctx->next_odm_pipe->stream_res.opp ? pipe_ctx->next_odm_pipe->stream_res.opp->inst : 0; + state->optc[i].optc_num_of_input_segment = 1; /* 2 segments - 1 */ + } else { + state->optc[i].optc_seg0_src_sel = pipe_ctx->stream_res.opp ? pipe_ctx->stream_res.opp->inst : 0; + state->optc[i].optc_seg1_src_sel = 0; + state->optc[i].optc_num_of_input_segment = 0; /* Single segment */ + } + + /* DSC configuration */ + if (timing->dsc_cfg.num_slices_h > 0) { + state->optc[i].optc_dsc_mode = 1; /* DSC enabled */ + state->optc[i].optc_dsc_bytes_per_pixel = timing->dsc_cfg.bits_per_pixel / 16; /* Convert to bytes */ + state->optc[i].optc_dsc_slice_width = timing->h_addressable / timing->dsc_cfg.num_slices_h; + } else { + state->optc[i].optc_dsc_mode = 0; + state->optc[i].optc_dsc_bytes_per_pixel = 0; + state->optc[i].optc_dsc_slice_width = 0; + } + + /* Essential control fields */ + state->optc[i].otg_stereo_enable = (timing->timing_3d_format != TIMING_3D_FORMAT_NONE) ? 1 : 0; + state->optc[i].otg_interlace_enable = timing->flags.INTERLACE ? 1 : 0; + state->optc[i].otg_clock_enable = 1; /* OTG clock enabled */ + state->optc[i].vtg0_enable = 1; /* VTG enabled for timing generation */ + + /* Initialize other key fields to defaults */ + state->optc[i].optc_input_pix_clk_en = 1; + state->optc[i].optc_segment_width = (pipe_ctx->next_odm_pipe) ? (timing->h_addressable / 2) : timing->h_addressable; + state->optc[i].otg_vready_offset = 1; + state->optc[i].otg_vstartup_start = timing->v_addressable + 10; + state->optc[i].otg_vupdate_offset = 0; + state->optc[i].otg_vupdate_width = 5; + } else { + /* No timing generator resource - initialize all fields to 0 */ + memset(&state->optc[i], 0, sizeof(state->optc[i])); + } + } + + state->state_valid = true; + return true; +} + +void dc_log_preos_dmcub_info(const struct dc *dc) +{ + dc_dmub_srv_log_preos_dmcub_info(dc->ctx->dmub_srv); +} diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c index d82b1cb467f4..e2763b60482a 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c @@ -32,6 +32,13 @@ #include "resource.h" #include "dc_dmub_srv.h" #include "dc_state_priv.h" +#include "opp.h" +#include "dsc.h" +#include "dchubbub.h" +#include "dccg.h" +#include "abm.h" +#include "dcn10/dcn10_hubbub.h" +#include "dce/dmub_hw_lock_mgr.h" #define NUM_ELEMENTS(a) (sizeof(a) / sizeof((a)[0])) #define MAX_NUM_MCACHE 8 @@ -258,7 +265,7 @@ void color_space_to_black_color( black_color_format[BLACK_COLOR_FORMAT_RGB_LIMITED]; break; - /** + /* * Remove default and add case for all color space * so when we forget to add new color space * compiler will give a warning @@ -755,11 +762,13 @@ void hwss_build_fast_sequence(struct dc *dc, block_sequence[*num_steps].func = DMUB_SUBVP_PIPE_CONTROL_LOCK_FAST; (*num_steps)++; } - if (dc->hwss.fams2_global_control_lock_fast) { - block_sequence[*num_steps].params.fams2_global_control_lock_fast_params.dc = dc; - block_sequence[*num_steps].params.fams2_global_control_lock_fast_params.lock = true; - block_sequence[*num_steps].params.fams2_global_control_lock_fast_params.is_required = dc_state_is_fams2_in_use(dc, context); - block_sequence[*num_steps].func = DMUB_FAMS2_GLOBAL_CONTROL_LOCK_FAST; + if (dc->hwss.dmub_hw_control_lock_fast) { + block_sequence[*num_steps].params.dmub_hw_control_lock_fast_params.dc = dc; + block_sequence[*num_steps].params.dmub_hw_control_lock_fast_params.lock = true; + block_sequence[*num_steps].params.dmub_hw_control_lock_fast_params.is_required = + dc_state_is_fams2_in_use(dc, context) || + dmub_hw_lock_mgr_does_link_require_lock(dc, stream->link); + block_sequence[*num_steps].func = DMUB_HW_CONTROL_LOCK_FAST; (*num_steps)++; } if (dc->hwss.pipe_control_lock) { @@ -784,7 +793,7 @@ void hwss_build_fast_sequence(struct dc *dc, while (current_mpc_pipe) { if (current_mpc_pipe->plane_state) { if (dc->hwss.set_flip_control_gsl && current_mpc_pipe->plane_state->update_flags.raw) { - block_sequence[*num_steps].params.set_flip_control_gsl_params.pipe_ctx = current_mpc_pipe; + block_sequence[*num_steps].params.set_flip_control_gsl_params.hubp = current_mpc_pipe->plane_res.hubp; block_sequence[*num_steps].params.set_flip_control_gsl_params.flip_immediate = current_mpc_pipe->plane_state->flip_immediate; block_sequence[*num_steps].func = HUBP_SET_FLIP_CONTROL_GSL; (*num_steps)++; @@ -894,11 +903,11 @@ void hwss_build_fast_sequence(struct dc *dc, block_sequence[*num_steps].func = DMUB_SUBVP_PIPE_CONTROL_LOCK_FAST; (*num_steps)++; } - if (dc->hwss.fams2_global_control_lock_fast) { - block_sequence[*num_steps].params.fams2_global_control_lock_fast_params.dc = dc; - block_sequence[*num_steps].params.fams2_global_control_lock_fast_params.lock = false; - block_sequence[*num_steps].params.fams2_global_control_lock_fast_params.is_required = dc_state_is_fams2_in_use(dc, context); - block_sequence[*num_steps].func = DMUB_FAMS2_GLOBAL_CONTROL_LOCK_FAST; + if (dc->hwss.dmub_hw_control_lock_fast) { + block_sequence[*num_steps].params.dmub_hw_control_lock_fast_params.dc = dc; + block_sequence[*num_steps].params.dmub_hw_control_lock_fast_params.lock = false; + block_sequence[*num_steps].params.dmub_hw_control_lock_fast_params.is_required = dc_state_is_fams2_in_use(dc, context); + block_sequence[*num_steps].func = DMUB_HW_CONTROL_LOCK_FAST; (*num_steps)++; } @@ -911,6 +920,13 @@ void hwss_build_fast_sequence(struct dc *dc, current_mpc_pipe->stream && current_mpc_pipe->plane_state && current_mpc_pipe->plane_state->update_flags.bits.addr_update && !current_mpc_pipe->plane_state->skip_manual_trigger) { + if (dc->hwss.program_cursor_offload_now) { + block_sequence[*num_steps].params.program_cursor_update_now_params.dc = dc; + block_sequence[*num_steps].params.program_cursor_update_now_params.pipe_ctx = current_mpc_pipe; + block_sequence[*num_steps].func = PROGRAM_CURSOR_UPDATE_NOW; + (*num_steps)++; + } + block_sequence[*num_steps].params.program_manual_trigger_params.pipe_ctx = current_mpc_pipe; block_sequence[*num_steps].func = OPTC_PROGRAM_MANUAL_TRIGGER; (*num_steps)++; @@ -942,8 +958,9 @@ void hwss_execute_sequence(struct dc *dc, params->pipe_control_lock_params.lock); break; case HUBP_SET_FLIP_CONTROL_GSL: - dc->hwss.set_flip_control_gsl(params->set_flip_control_gsl_params.pipe_ctx, - params->set_flip_control_gsl_params.flip_immediate); + params->set_flip_control_gsl_params.hubp->funcs->hubp_set_flip_control_surface_gsl( + params->set_flip_control_gsl_params.hubp, + params->set_flip_control_gsl_params.flip_immediate); break; case HUBP_PROGRAM_TRIPLEBUFFER: dc->hwss.program_triplebuffer(params->program_triplebuffer_params.dc, @@ -1001,8 +1018,301 @@ void hwss_execute_sequence(struct dc *dc, params->wait_for_dcc_meta_propagation_params.dc, params->wait_for_dcc_meta_propagation_params.top_pipe_to_program); break; - case DMUB_FAMS2_GLOBAL_CONTROL_LOCK_FAST: - dc->hwss.fams2_global_control_lock_fast(params); + case DMUB_HW_CONTROL_LOCK_FAST: + dc->hwss.dmub_hw_control_lock_fast(params); + break; + case HUBP_PROGRAM_SURFACE_CONFIG: + hwss_program_surface_config(params); + break; + case HUBP_PROGRAM_MCACHE_ID: + hwss_program_mcache_id_and_split_coordinate(params); + break; + case PROGRAM_CURSOR_UPDATE_NOW: + dc->hwss.program_cursor_offload_now( + params->program_cursor_update_now_params.dc, + params->program_cursor_update_now_params.pipe_ctx); + break; + case HUBP_WAIT_PIPE_READ_START: + params->hubp_wait_pipe_read_start_params.hubp->funcs->hubp_wait_pipe_read_start( + params->hubp_wait_pipe_read_start_params.hubp); + break; + case HWS_APPLY_UPDATE_FLAGS_FOR_PHANTOM: + dc->hwss.apply_update_flags_for_phantom(params->apply_update_flags_for_phantom_params.pipe_ctx); + break; + case HWS_UPDATE_PHANTOM_VP_POSITION: + dc->hwss.update_phantom_vp_position(params->update_phantom_vp_position_params.dc, + params->update_phantom_vp_position_params.context, + params->update_phantom_vp_position_params.pipe_ctx); + break; + case OPTC_SET_ODM_COMBINE: + hwss_set_odm_combine(params); + break; + case OPTC_SET_ODM_BYPASS: + hwss_set_odm_bypass(params); + break; + case OPP_PIPE_CLOCK_CONTROL: + hwss_opp_pipe_clock_control(params); + break; + case OPP_PROGRAM_LEFT_EDGE_EXTRA_PIXEL: + hwss_opp_program_left_edge_extra_pixel(params); + break; + case DCCG_SET_DTO_DSCCLK: + hwss_dccg_set_dto_dscclk(params); + break; + case DSC_SET_CONFIG: + hwss_dsc_set_config(params); + break; + case DSC_ENABLE: + hwss_dsc_enable(params); + break; + case TG_SET_DSC_CONFIG: + hwss_tg_set_dsc_config(params); + break; + case DSC_DISCONNECT: + hwss_dsc_disconnect(params); + break; + case DSC_READ_STATE: + hwss_dsc_read_state(params); + break; + case DSC_CALCULATE_AND_SET_CONFIG: + hwss_dsc_calculate_and_set_config(params); + break; + case DSC_ENABLE_WITH_OPP: + hwss_dsc_enable_with_opp(params); + break; + case TG_PROGRAM_GLOBAL_SYNC: + hwss_tg_program_global_sync(params); + break; + case TG_WAIT_FOR_STATE: + hwss_tg_wait_for_state(params); + break; + case TG_SET_VTG_PARAMS: + hwss_tg_set_vtg_params(params); + break; + case TG_SETUP_VERTICAL_INTERRUPT2: + hwss_tg_setup_vertical_interrupt2(params); + break; + case DPP_SET_HDR_MULTIPLIER: + hwss_dpp_set_hdr_multiplier(params); + break; + case HUBP_PROGRAM_DET_SIZE: + hwss_program_det_size(params); + break; + case HUBP_PROGRAM_DET_SEGMENTS: + hwss_program_det_segments(params); + break; + case OPP_SET_DYN_EXPANSION: + hwss_opp_set_dyn_expansion(params); + break; + case OPP_PROGRAM_FMT: + hwss_opp_program_fmt(params); + break; + case OPP_PROGRAM_BIT_DEPTH_REDUCTION: + hwss_opp_program_bit_depth_reduction(params); + break; + case OPP_SET_DISP_PATTERN_GENERATOR: + hwss_opp_set_disp_pattern_generator(params); + break; + case ABM_SET_PIPE: + hwss_set_abm_pipe(params); + break; + case ABM_SET_LEVEL: + hwss_set_abm_level(params); + break; + case ABM_SET_IMMEDIATE_DISABLE: + hwss_set_abm_immediate_disable(params); + break; + case MPC_REMOVE_MPCC: + hwss_mpc_remove_mpcc(params); + break; + case OPP_SET_MPCC_DISCONNECT_PENDING: + hwss_opp_set_mpcc_disconnect_pending(params); + break; + case DC_SET_OPTIMIZED_REQUIRED: + hwss_dc_set_optimized_required(params); + break; + case HUBP_DISCONNECT: + hwss_hubp_disconnect(params); + break; + case HUBBUB_FORCE_PSTATE_CHANGE_CONTROL: + hwss_hubbub_force_pstate_change_control(params); + break; + case TG_ENABLE_CRTC: + hwss_tg_enable_crtc(params); + break; + case TG_SET_GSL: + hwss_tg_set_gsl(params); + break; + case TG_SET_GSL_SOURCE_SELECT: + hwss_tg_set_gsl_source_select(params); + break; + case HUBP_WAIT_FLIP_PENDING: + hwss_hubp_wait_flip_pending(params); + break; + case TG_WAIT_DOUBLE_BUFFER_PENDING: + hwss_tg_wait_double_buffer_pending(params); + break; + case UPDATE_FORCE_PSTATE: + hwss_update_force_pstate(params); + break; + case HUBBUB_APPLY_DEDCN21_147_WA: + hwss_hubbub_apply_dedcn21_147_wa(params); + break; + case HUBBUB_ALLOW_SELF_REFRESH_CONTROL: + hwss_hubbub_allow_self_refresh_control(params); + break; + case TG_GET_FRAME_COUNT: + hwss_tg_get_frame_count(params); + break; + case MPC_SET_DWB_MUX: + hwss_mpc_set_dwb_mux(params); + break; + case MPC_DISABLE_DWB_MUX: + hwss_mpc_disable_dwb_mux(params); + break; + case MCIF_WB_CONFIG_BUF: + hwss_mcif_wb_config_buf(params); + break; + case MCIF_WB_CONFIG_ARB: + hwss_mcif_wb_config_arb(params); + break; + case MCIF_WB_ENABLE: + hwss_mcif_wb_enable(params); + break; + case MCIF_WB_DISABLE: + hwss_mcif_wb_disable(params); + break; + case DWBC_ENABLE: + hwss_dwbc_enable(params); + break; + case DWBC_DISABLE: + hwss_dwbc_disable(params); + break; + case DWBC_UPDATE: + hwss_dwbc_update(params); + break; + case HUBP_UPDATE_MALL_SEL: + hwss_hubp_update_mall_sel(params); + break; + case HUBP_PREPARE_SUBVP_BUFFERING: + hwss_hubp_prepare_subvp_buffering(params); + break; + case HUBP_SET_BLANK_EN: + hwss_hubp_set_blank_en(params); + break; + case HUBP_DISABLE_CONTROL: + hwss_hubp_disable_control(params); + break; + case HUBBUB_SOFT_RESET: + hwss_hubbub_soft_reset(params); + break; + case HUBP_CLK_CNTL: + hwss_hubp_clk_cntl(params); + break; + case HUBP_INIT: + hwss_hubp_init(params); + break; + case HUBP_SET_VM_SYSTEM_APERTURE_SETTINGS: + hwss_hubp_set_vm_system_aperture_settings(params); + break; + case HUBP_SET_FLIP_INT: + hwss_hubp_set_flip_int(params); + break; + case DPP_DPPCLK_CONTROL: + hwss_dpp_dppclk_control(params); + break; + case DISABLE_PHANTOM_CRTC: + hwss_disable_phantom_crtc(params); + break; + case DSC_PG_STATUS: + hwss_dsc_pg_status(params); + break; + case DSC_WAIT_DISCONNECT_PENDING_CLEAR: + hwss_dsc_wait_disconnect_pending_clear(params); + break; + case DSC_DISABLE: + hwss_dsc_disable(params); + break; + case DCCG_SET_REF_DSCCLK: + hwss_dccg_set_ref_dscclk(params); + break; + case DPP_PG_CONTROL: + hwss_dpp_pg_control(params); + break; + case HUBP_PG_CONTROL: + hwss_hubp_pg_control(params); + break; + case HUBP_RESET: + hwss_hubp_reset(params); + break; + case DPP_RESET: + hwss_dpp_reset(params); + break; + case DPP_ROOT_CLOCK_CONTROL: + hwss_dpp_root_clock_control(params); + break; + case DC_IP_REQUEST_CNTL: + hwss_dc_ip_request_cntl(params); + break; + case DCCG_UPDATE_DPP_DTO: + hwss_dccg_update_dpp_dto(params); + break; + case HUBP_VTG_SEL: + hwss_hubp_vtg_sel(params); + break; + case HUBP_SETUP2: + hwss_hubp_setup2(params); + break; + case HUBP_SETUP: + hwss_hubp_setup(params); + break; + case HUBP_SET_UNBOUNDED_REQUESTING: + hwss_hubp_set_unbounded_requesting(params); + break; + case HUBP_SETUP_INTERDEPENDENT2: + hwss_hubp_setup_interdependent2(params); + break; + case HUBP_SETUP_INTERDEPENDENT: + hwss_hubp_setup_interdependent(params); + break; + case DPP_SET_CURSOR_MATRIX: + hwss_dpp_set_cursor_matrix(params); + break; + case MPC_UPDATE_BLENDING: + hwss_mpc_update_blending(params); + break; + case MPC_ASSERT_IDLE_MPCC: + hwss_mpc_assert_idle_mpcc(params); + break; + case MPC_INSERT_PLANE: + hwss_mpc_insert_plane(params); + break; + case DPP_SET_SCALER: + hwss_dpp_set_scaler(params); + break; + case HUBP_MEM_PROGRAM_VIEWPORT: + hwss_hubp_mem_program_viewport(params); + break; + case ABORT_CURSOR_OFFLOAD_UPDATE: + hwss_abort_cursor_offload_update(params); + break; + case SET_CURSOR_ATTRIBUTE: + hwss_set_cursor_attribute(params); + break; + case SET_CURSOR_POSITION: + hwss_set_cursor_position(params); + break; + case SET_CURSOR_SDR_WHITE_LEVEL: + hwss_set_cursor_sdr_white_level(params); + break; + case PROGRAM_OUTPUT_CSC: + hwss_program_output_csc(params); + break; + case HUBP_SET_BLANK: + hwss_hubp_set_blank(params); + break; + case PHANTOM_HUBP_POST_ENABLE: + hwss_phantom_hubp_post_enable(params); break; default: ASSERT(false); @@ -1011,6 +1321,338 @@ void hwss_execute_sequence(struct dc *dc, } } +/* + * Helper function to add OPTC pipe control lock to block sequence + */ +void hwss_add_optc_pipe_control_lock(struct block_sequence_state *seq_state, + struct dc *dc, + struct pipe_ctx *pipe_ctx, + bool lock) +{ + if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) { + seq_state->steps[*seq_state->num_steps].params.pipe_control_lock_params.dc = dc; + seq_state->steps[*seq_state->num_steps].params.pipe_control_lock_params.pipe_ctx = pipe_ctx; + seq_state->steps[*seq_state->num_steps].params.pipe_control_lock_params.lock = lock; + seq_state->steps[*seq_state->num_steps].func = OPTC_PIPE_CONTROL_LOCK; + (*seq_state->num_steps)++; + } +} + +/* + * Helper function to add HUBP set flip control GSL to block sequence + */ +void hwss_add_hubp_set_flip_control_gsl(struct block_sequence_state *seq_state, + struct hubp *hubp, + bool flip_immediate) +{ + if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) { + seq_state->steps[*seq_state->num_steps].params.set_flip_control_gsl_params.hubp = hubp; + seq_state->steps[*seq_state->num_steps].params.set_flip_control_gsl_params.flip_immediate = flip_immediate; + seq_state->steps[*seq_state->num_steps].func = HUBP_SET_FLIP_CONTROL_GSL; + (*seq_state->num_steps)++; + } +} + +/* + * Helper function to add HUBP program triplebuffer to block sequence + */ +void hwss_add_hubp_program_triplebuffer(struct block_sequence_state *seq_state, + struct dc *dc, + struct pipe_ctx *pipe_ctx, + bool enableTripleBuffer) +{ + if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) { + seq_state->steps[*seq_state->num_steps].params.program_triplebuffer_params.dc = dc; + seq_state->steps[*seq_state->num_steps].params.program_triplebuffer_params.pipe_ctx = pipe_ctx; + seq_state->steps[*seq_state->num_steps].params.program_triplebuffer_params.enableTripleBuffer = enableTripleBuffer; + seq_state->steps[*seq_state->num_steps].func = HUBP_PROGRAM_TRIPLEBUFFER; + (*seq_state->num_steps)++; + } +} + +/* + * Helper function to add HUBP update plane address to block sequence + */ +void hwss_add_hubp_update_plane_addr(struct block_sequence_state *seq_state, + struct dc *dc, + struct pipe_ctx *pipe_ctx) +{ + if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) { + seq_state->steps[*seq_state->num_steps].params.update_plane_addr_params.dc = dc; + seq_state->steps[*seq_state->num_steps].params.update_plane_addr_params.pipe_ctx = pipe_ctx; + seq_state->steps[*seq_state->num_steps].func = HUBP_UPDATE_PLANE_ADDR; + (*seq_state->num_steps)++; + } +} + +/* + * Helper function to add DPP set input transfer function to block sequence + */ +void hwss_add_dpp_set_input_transfer_func(struct block_sequence_state *seq_state, + struct dc *dc, + struct pipe_ctx *pipe_ctx, + struct dc_plane_state *plane_state) +{ + if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) { + seq_state->steps[*seq_state->num_steps].params.set_input_transfer_func_params.dc = dc; + seq_state->steps[*seq_state->num_steps].params.set_input_transfer_func_params.pipe_ctx = pipe_ctx; + seq_state->steps[*seq_state->num_steps].params.set_input_transfer_func_params.plane_state = plane_state; + seq_state->steps[*seq_state->num_steps].func = DPP_SET_INPUT_TRANSFER_FUNC; + (*seq_state->num_steps)++; + } +} + +/* + * Helper function to add DPP program gamut remap to block sequence + */ +void hwss_add_dpp_program_gamut_remap(struct block_sequence_state *seq_state, + struct pipe_ctx *pipe_ctx) +{ + if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) { + seq_state->steps[*seq_state->num_steps].params.program_gamut_remap_params.pipe_ctx = pipe_ctx; + seq_state->steps[*seq_state->num_steps].func = DPP_PROGRAM_GAMUT_REMAP; + (*seq_state->num_steps)++; + } +} + +/* + * Helper function to add DPP program bias and scale to block sequence + */ +void hwss_add_dpp_program_bias_and_scale(struct block_sequence_state *seq_state, struct pipe_ctx *pipe_ctx) +{ + if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) { + seq_state->steps[*seq_state->num_steps].params.program_bias_and_scale_params.pipe_ctx = pipe_ctx; + seq_state->steps[*seq_state->num_steps].func = DPP_PROGRAM_BIAS_AND_SCALE; + (*seq_state->num_steps)++; + } +} + +/* + * Helper function to add OPTC program manual trigger to block sequence + */ +void hwss_add_optc_program_manual_trigger(struct block_sequence_state *seq_state, + struct pipe_ctx *pipe_ctx) +{ + if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) { + seq_state->steps[*seq_state->num_steps].params.program_manual_trigger_params.pipe_ctx = pipe_ctx; + seq_state->steps[*seq_state->num_steps].func = OPTC_PROGRAM_MANUAL_TRIGGER; + (*seq_state->num_steps)++; + } +} + +/* + * Helper function to add DPP set output transfer function to block sequence + */ +void hwss_add_dpp_set_output_transfer_func(struct block_sequence_state *seq_state, + struct dc *dc, + struct pipe_ctx *pipe_ctx, + struct dc_stream_state *stream) +{ + if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) { + seq_state->steps[*seq_state->num_steps].params.set_output_transfer_func_params.dc = dc; + seq_state->steps[*seq_state->num_steps].params.set_output_transfer_func_params.pipe_ctx = pipe_ctx; + seq_state->steps[*seq_state->num_steps].params.set_output_transfer_func_params.stream = stream; + seq_state->steps[*seq_state->num_steps].func = DPP_SET_OUTPUT_TRANSFER_FUNC; + (*seq_state->num_steps)++; + } +} + +/* + * Helper function to add MPC update visual confirm to block sequence + */ +void hwss_add_mpc_update_visual_confirm(struct block_sequence_state *seq_state, + struct dc *dc, + struct pipe_ctx *pipe_ctx, + int mpcc_id) +{ + if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) { + seq_state->steps[*seq_state->num_steps].params.update_visual_confirm_params.dc = dc; + seq_state->steps[*seq_state->num_steps].params.update_visual_confirm_params.pipe_ctx = pipe_ctx; + seq_state->steps[*seq_state->num_steps].params.update_visual_confirm_params.mpcc_id = mpcc_id; + seq_state->steps[*seq_state->num_steps].func = MPC_UPDATE_VISUAL_CONFIRM; + (*seq_state->num_steps)++; + } +} + +/* + * Helper function to add MPC power on MPC mem PWR to block sequence + */ +void hwss_add_mpc_power_on_mpc_mem_pwr(struct block_sequence_state *seq_state, + struct mpc *mpc, + int mpcc_id, + bool power_on) +{ + if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) { + seq_state->steps[*seq_state->num_steps].params.power_on_mpc_mem_pwr_params.mpc = mpc; + seq_state->steps[*seq_state->num_steps].params.power_on_mpc_mem_pwr_params.mpcc_id = mpcc_id; + seq_state->steps[*seq_state->num_steps].params.power_on_mpc_mem_pwr_params.power_on = power_on; + seq_state->steps[*seq_state->num_steps].func = MPC_POWER_ON_MPC_MEM_PWR; + (*seq_state->num_steps)++; + } +} + +/* + * Helper function to add MPC set output CSC to block sequence + */ +void hwss_add_mpc_set_output_csc(struct block_sequence_state *seq_state, + struct mpc *mpc, + int opp_id, + const uint16_t *regval, + enum mpc_output_csc_mode ocsc_mode) +{ + if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) { + seq_state->steps[*seq_state->num_steps].params.set_output_csc_params.mpc = mpc; + seq_state->steps[*seq_state->num_steps].params.set_output_csc_params.opp_id = opp_id; + seq_state->steps[*seq_state->num_steps].params.set_output_csc_params.regval = regval; + seq_state->steps[*seq_state->num_steps].params.set_output_csc_params.ocsc_mode = ocsc_mode; + seq_state->steps[*seq_state->num_steps].func = MPC_SET_OUTPUT_CSC; + (*seq_state->num_steps)++; + } +} + +/* + * Helper function to add MPC set OCSC default to block sequence + */ +void hwss_add_mpc_set_ocsc_default(struct block_sequence_state *seq_state, + struct mpc *mpc, + int opp_id, + enum dc_color_space colorspace, + enum mpc_output_csc_mode ocsc_mode) +{ + if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) { + seq_state->steps[*seq_state->num_steps].params.set_ocsc_default_params.mpc = mpc; + seq_state->steps[*seq_state->num_steps].params.set_ocsc_default_params.opp_id = opp_id; + seq_state->steps[*seq_state->num_steps].params.set_ocsc_default_params.color_space = colorspace; + seq_state->steps[*seq_state->num_steps].params.set_ocsc_default_params.ocsc_mode = ocsc_mode; + seq_state->steps[*seq_state->num_steps].func = MPC_SET_OCSC_DEFAULT; + (*seq_state->num_steps)++; + } +} + +/* + * Helper function to add DMUB send DMCUB command to block sequence + */ +void hwss_add_dmub_send_dmcub_cmd(struct block_sequence_state *seq_state, + struct dc_context *ctx, + union dmub_rb_cmd *cmd, + enum dm_dmub_wait_type wait_type) +{ + if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) { + seq_state->steps[*seq_state->num_steps].params.send_dmcub_cmd_params.ctx = ctx; + seq_state->steps[*seq_state->num_steps].params.send_dmcub_cmd_params.cmd = cmd; + seq_state->steps[*seq_state->num_steps].params.send_dmcub_cmd_params.wait_type = wait_type; + seq_state->steps[*seq_state->num_steps].func = DMUB_SEND_DMCUB_CMD; + (*seq_state->num_steps)++; + } +} + +/* + * Helper function to add DMUB SubVP save surface address to block sequence + */ +void hwss_add_dmub_subvp_save_surf_addr(struct block_sequence_state *seq_state, + struct dc_dmub_srv *dc_dmub_srv, + struct dc_plane_address *addr, + uint8_t subvp_index) +{ + if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) { + seq_state->steps[*seq_state->num_steps].params.subvp_save_surf_addr.dc_dmub_srv = dc_dmub_srv; + seq_state->steps[*seq_state->num_steps].params.subvp_save_surf_addr.addr = addr; + seq_state->steps[*seq_state->num_steps].params.subvp_save_surf_addr.subvp_index = subvp_index; + seq_state->steps[*seq_state->num_steps].func = DMUB_SUBVP_SAVE_SURF_ADDR; + (*seq_state->num_steps)++; + } +} + +/* + * Helper function to add HUBP wait for DCC meta propagation to block sequence + */ +void hwss_add_hubp_wait_for_dcc_meta_prop(struct block_sequence_state *seq_state, + struct dc *dc, + struct pipe_ctx *top_pipe_to_program) +{ + if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) { + seq_state->steps[*seq_state->num_steps].params.wait_for_dcc_meta_propagation_params.dc = dc; + seq_state->steps[*seq_state->num_steps].params.wait_for_dcc_meta_propagation_params.top_pipe_to_program = top_pipe_to_program; + seq_state->steps[*seq_state->num_steps].func = HUBP_WAIT_FOR_DCC_META_PROP; + (*seq_state->num_steps)++; + } +} + +/* + * Helper function to add HUBP wait pipe read start to block sequence + */ +void hwss_add_hubp_wait_pipe_read_start(struct block_sequence_state *seq_state, + struct hubp *hubp) +{ + if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) { + seq_state->steps[*seq_state->num_steps].params.hubp_wait_pipe_read_start_params.hubp = hubp; + seq_state->steps[*seq_state->num_steps].func = HUBP_WAIT_PIPE_READ_START; + (*seq_state->num_steps)++; + } +} + +/* + * Helper function to add HWS apply update flags for phantom to block sequence + */ +void hwss_add_hws_apply_update_flags_for_phantom(struct block_sequence_state *seq_state, + struct pipe_ctx *pipe_ctx) +{ + if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) { + seq_state->steps[*seq_state->num_steps].params.apply_update_flags_for_phantom_params.pipe_ctx = pipe_ctx; + seq_state->steps[*seq_state->num_steps].func = HWS_APPLY_UPDATE_FLAGS_FOR_PHANTOM; + (*seq_state->num_steps)++; + } +} + +/* + * Helper function to add HWS update phantom VP position to block sequence + */ +void hwss_add_hws_update_phantom_vp_position(struct block_sequence_state *seq_state, + struct dc *dc, + struct dc_state *context, + struct pipe_ctx *pipe_ctx) +{ + if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) { + seq_state->steps[*seq_state->num_steps].params.update_phantom_vp_position_params.dc = dc; + seq_state->steps[*seq_state->num_steps].params.update_phantom_vp_position_params.context = context; + seq_state->steps[*seq_state->num_steps].params.update_phantom_vp_position_params.pipe_ctx = pipe_ctx; + seq_state->steps[*seq_state->num_steps].func = HWS_UPDATE_PHANTOM_VP_POSITION; + (*seq_state->num_steps)++; + } +} + +/* + * Helper function to add OPTC set ODM combine to block sequence + */ +void hwss_add_optc_set_odm_combine(struct block_sequence_state *seq_state, + struct timing_generator *tg, int opp_inst[MAX_PIPES], int opp_head_count, + int odm_slice_width, int last_odm_slice_width) +{ + if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) { + seq_state->steps[*seq_state->num_steps].params.set_odm_combine_params.tg = tg; + memcpy(seq_state->steps[*seq_state->num_steps].params.set_odm_combine_params.opp_inst, opp_inst, sizeof(int) * MAX_PIPES); + seq_state->steps[*seq_state->num_steps].params.set_odm_combine_params.opp_head_count = opp_head_count; + seq_state->steps[*seq_state->num_steps].params.set_odm_combine_params.odm_slice_width = odm_slice_width; + seq_state->steps[*seq_state->num_steps].params.set_odm_combine_params.last_odm_slice_width = last_odm_slice_width; + seq_state->steps[*seq_state->num_steps].func = OPTC_SET_ODM_COMBINE; + (*seq_state->num_steps)++; + } +} + +/* + * Helper function to add OPTC set ODM bypass to block sequence + */ +void hwss_add_optc_set_odm_bypass(struct block_sequence_state *seq_state, + struct timing_generator *tg, struct dc_crtc_timing *timing) +{ + if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) { + seq_state->steps[*seq_state->num_steps].params.set_odm_bypass_params.tg = tg; + seq_state->steps[*seq_state->num_steps].params.set_odm_bypass_params.timing = timing; + seq_state->steps[*seq_state->num_steps].func = OPTC_SET_ODM_BYPASS; + (*seq_state->num_steps)++; + } +} + void hwss_send_dmcub_cmd(union block_sequence_params *params) { struct dc_context *ctx = params->send_dmcub_cmd_params.ctx; @@ -1020,6 +1662,276 @@ void hwss_send_dmcub_cmd(union block_sequence_params *params) dc_wake_and_execute_dmub_cmd(ctx, cmd, wait_type); } +/* + * Helper function to add TG program global sync to block sequence + */ +void hwss_add_tg_program_global_sync(struct block_sequence_state *seq_state, + struct timing_generator *tg, + int vready_offset, + unsigned int vstartup_lines, + unsigned int vupdate_offset_pixels, + unsigned int vupdate_vupdate_width_pixels, + unsigned int pstate_keepout_start_lines) +{ + if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) { + seq_state->steps[*seq_state->num_steps].params.tg_program_global_sync_params.tg = tg; + seq_state->steps[*seq_state->num_steps].params.tg_program_global_sync_params.vready_offset = vready_offset; + seq_state->steps[*seq_state->num_steps].params.tg_program_global_sync_params.vstartup_lines = vstartup_lines; + seq_state->steps[*seq_state->num_steps].params.tg_program_global_sync_params.vupdate_offset_pixels = vupdate_offset_pixels; + seq_state->steps[*seq_state->num_steps].params.tg_program_global_sync_params.vupdate_vupdate_width_pixels = vupdate_vupdate_width_pixels; + seq_state->steps[*seq_state->num_steps].params.tg_program_global_sync_params.pstate_keepout_start_lines = pstate_keepout_start_lines; + seq_state->steps[*seq_state->num_steps].func = TG_PROGRAM_GLOBAL_SYNC; + (*seq_state->num_steps)++; + } +} + +/* + * Helper function to add TG wait for state to block sequence + */ +void hwss_add_tg_wait_for_state(struct block_sequence_state *seq_state, + struct timing_generator *tg, + enum crtc_state state) +{ + if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) { + seq_state->steps[*seq_state->num_steps].params.tg_wait_for_state_params.tg = tg; + seq_state->steps[*seq_state->num_steps].params.tg_wait_for_state_params.state = state; + seq_state->steps[*seq_state->num_steps].func = TG_WAIT_FOR_STATE; + (*seq_state->num_steps)++; + } +} + +/* + * Helper function to add TG set VTG params to block sequence + */ +void hwss_add_tg_set_vtg_params(struct block_sequence_state *seq_state, + struct timing_generator *tg, + struct dc_crtc_timing *dc_crtc_timing, + bool program_fp2) +{ + if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) { + seq_state->steps[*seq_state->num_steps].params.tg_set_vtg_params_params.tg = tg; + seq_state->steps[*seq_state->num_steps].params.tg_set_vtg_params_params.timing = dc_crtc_timing; + seq_state->steps[*seq_state->num_steps].params.tg_set_vtg_params_params.program_fp2 = program_fp2; + seq_state->steps[*seq_state->num_steps].func = TG_SET_VTG_PARAMS; + (*seq_state->num_steps)++; + } +} + +/* + * Helper function to add TG setup vertical interrupt2 to block sequence + */ +void hwss_add_tg_setup_vertical_interrupt2(struct block_sequence_state *seq_state, + struct timing_generator *tg, int start_line) +{ + if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) { + seq_state->steps[*seq_state->num_steps].params.tg_setup_vertical_interrupt2_params.tg = tg; + seq_state->steps[*seq_state->num_steps].params.tg_setup_vertical_interrupt2_params.start_line = start_line; + seq_state->steps[*seq_state->num_steps].func = TG_SETUP_VERTICAL_INTERRUPT2; + (*seq_state->num_steps)++; + } +} + +/* + * Helper function to add DPP set HDR multiplier to block sequence + */ +void hwss_add_dpp_set_hdr_multiplier(struct block_sequence_state *seq_state, + struct dpp *dpp, uint32_t hw_mult) +{ + if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) { + seq_state->steps[*seq_state->num_steps].params.dpp_set_hdr_multiplier_params.dpp = dpp; + seq_state->steps[*seq_state->num_steps].params.dpp_set_hdr_multiplier_params.hw_mult = hw_mult; + seq_state->steps[*seq_state->num_steps].func = DPP_SET_HDR_MULTIPLIER; + (*seq_state->num_steps)++; + } +} + +/* + * Helper function to add HUBP program DET size to block sequence + */ +void hwss_add_hubp_program_det_size(struct block_sequence_state *seq_state, + struct hubbub *hubbub, + unsigned int hubp_inst, + unsigned int det_buffer_size_kb) +{ + if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) { + seq_state->steps[*seq_state->num_steps].params.program_det_size_params.hubbub = hubbub; + seq_state->steps[*seq_state->num_steps].params.program_det_size_params.hubp_inst = hubp_inst; + seq_state->steps[*seq_state->num_steps].params.program_det_size_params.det_buffer_size_kb = det_buffer_size_kb; + seq_state->steps[*seq_state->num_steps].func = HUBP_PROGRAM_DET_SIZE; + (*seq_state->num_steps)++; + } +} + +void hwss_add_hubp_program_mcache_id(struct block_sequence_state *seq_state, + struct hubp *hubp, + struct dml2_hubp_pipe_mcache_regs *mcache_regs) +{ + if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) { + seq_state->steps[*seq_state->num_steps].params.program_mcache_id_and_split_coordinate.hubp = hubp; + seq_state->steps[*seq_state->num_steps].params.program_mcache_id_and_split_coordinate.mcache_regs = mcache_regs; + seq_state->steps[*seq_state->num_steps].func = HUBP_PROGRAM_MCACHE_ID; + (*seq_state->num_steps)++; + } +} + +void hwss_add_hubbub_force_pstate_change_control(struct block_sequence_state *seq_state, + struct hubbub *hubbub, + bool enable, + bool wait) +{ + if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) { + seq_state->steps[*seq_state->num_steps].params.hubbub_force_pstate_change_control_params.hubbub = hubbub; + seq_state->steps[*seq_state->num_steps].params.hubbub_force_pstate_change_control_params.enable = enable; + seq_state->steps[*seq_state->num_steps].params.hubbub_force_pstate_change_control_params.wait = wait; + seq_state->steps[*seq_state->num_steps].func = HUBBUB_FORCE_PSTATE_CHANGE_CONTROL; + (*seq_state->num_steps)++; + } +} + +/* + * Helper function to add HUBP program DET segments to block sequence + */ +void hwss_add_hubp_program_det_segments(struct block_sequence_state *seq_state, + struct hubbub *hubbub, + unsigned int hubp_inst, + unsigned int det_size) +{ + if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) { + seq_state->steps[*seq_state->num_steps].params.program_det_segments_params.hubbub = hubbub; + seq_state->steps[*seq_state->num_steps].params.program_det_segments_params.hubp_inst = hubp_inst; + seq_state->steps[*seq_state->num_steps].params.program_det_segments_params.det_size = det_size; + seq_state->steps[*seq_state->num_steps].func = HUBP_PROGRAM_DET_SEGMENTS; + (*seq_state->num_steps)++; + } +} + +/* + * Helper function to add OPP set dynamic expansion to block sequence + */ +void hwss_add_opp_set_dyn_expansion(struct block_sequence_state *seq_state, + struct output_pixel_processor *opp, + enum dc_color_space color_space, + enum dc_color_depth color_depth, + enum signal_type signal) +{ + if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) { + seq_state->steps[*seq_state->num_steps].params.opp_set_dyn_expansion_params.opp = opp; + seq_state->steps[*seq_state->num_steps].params.opp_set_dyn_expansion_params.color_space = color_space; + seq_state->steps[*seq_state->num_steps].params.opp_set_dyn_expansion_params.color_depth = color_depth; + seq_state->steps[*seq_state->num_steps].params.opp_set_dyn_expansion_params.signal = signal; + seq_state->steps[*seq_state->num_steps].func = OPP_SET_DYN_EXPANSION; + (*seq_state->num_steps)++; + } +} + +/* + * Helper function to add OPP program FMT to block sequence + */ +void hwss_add_opp_program_fmt(struct block_sequence_state *seq_state, + struct output_pixel_processor *opp, + struct bit_depth_reduction_params *fmt_bit_depth, + struct clamping_and_pixel_encoding_params *clamping) +{ + if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) { + seq_state->steps[*seq_state->num_steps].params.opp_program_fmt_params.opp = opp; + seq_state->steps[*seq_state->num_steps].params.opp_program_fmt_params.fmt_bit_depth = fmt_bit_depth; + seq_state->steps[*seq_state->num_steps].params.opp_program_fmt_params.clamping = clamping; + seq_state->steps[*seq_state->num_steps].func = OPP_PROGRAM_FMT; + (*seq_state->num_steps)++; + } +} + +void hwss_add_opp_program_left_edge_extra_pixel(struct block_sequence_state *seq_state, + struct output_pixel_processor *opp, + enum dc_pixel_encoding pixel_encoding, + bool is_otg_master) +{ + if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) { + seq_state->steps[*seq_state->num_steps].func = OPP_PROGRAM_LEFT_EDGE_EXTRA_PIXEL; + seq_state->steps[*seq_state->num_steps].params.opp_program_left_edge_extra_pixel_params.opp = opp; + seq_state->steps[*seq_state->num_steps].params.opp_program_left_edge_extra_pixel_params.pixel_encoding = pixel_encoding; + seq_state->steps[*seq_state->num_steps].params.opp_program_left_edge_extra_pixel_params.is_otg_master = is_otg_master; + (*seq_state->num_steps)++; + } +} + +/* + * Helper function to add ABM set pipe to block sequence + */ +void hwss_add_abm_set_pipe(struct block_sequence_state *seq_state, + struct dc *dc, + struct pipe_ctx *pipe_ctx) +{ + if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) { + seq_state->steps[*seq_state->num_steps].params.set_abm_pipe_params.dc = dc; + seq_state->steps[*seq_state->num_steps].params.set_abm_pipe_params.pipe_ctx = pipe_ctx; + seq_state->steps[*seq_state->num_steps].func = ABM_SET_PIPE; + (*seq_state->num_steps)++; + } +} + +/* + * Helper function to add ABM set level to block sequence + */ +void hwss_add_abm_set_level(struct block_sequence_state *seq_state, + struct abm *abm, + uint32_t abm_level) +{ + if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) { + seq_state->steps[*seq_state->num_steps].params.set_abm_level_params.abm = abm; + seq_state->steps[*seq_state->num_steps].params.set_abm_level_params.abm_level = abm_level; + seq_state->steps[*seq_state->num_steps].func = ABM_SET_LEVEL; + (*seq_state->num_steps)++; + } +} + +/* + * Helper function to add TG enable CRTC to block sequence + */ +void hwss_add_tg_enable_crtc(struct block_sequence_state *seq_state, + struct timing_generator *tg) +{ + if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) { + seq_state->steps[*seq_state->num_steps].params.tg_enable_crtc_params.tg = tg; + seq_state->steps[*seq_state->num_steps].func = TG_ENABLE_CRTC; + (*seq_state->num_steps)++; + } +} + +/* + * Helper function to add HUBP wait flip pending to block sequence + */ +void hwss_add_hubp_wait_flip_pending(struct block_sequence_state *seq_state, + struct hubp *hubp, + unsigned int timeout_us, + unsigned int polling_interval_us) +{ + if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) { + seq_state->steps[*seq_state->num_steps].params.hubp_wait_flip_pending_params.hubp = hubp; + seq_state->steps[*seq_state->num_steps].params.hubp_wait_flip_pending_params.timeout_us = timeout_us; + seq_state->steps[*seq_state->num_steps].params.hubp_wait_flip_pending_params.polling_interval_us = polling_interval_us; + seq_state->steps[*seq_state->num_steps].func = HUBP_WAIT_FLIP_PENDING; + (*seq_state->num_steps)++; + } +} + +/* + * Helper function to add TG wait double buffer pending to block sequence + */ +void hwss_add_tg_wait_double_buffer_pending(struct block_sequence_state *seq_state, + struct timing_generator *tg, + unsigned int timeout_us, + unsigned int polling_interval_us) +{ + if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) { + seq_state->steps[*seq_state->num_steps].params.tg_wait_double_buffer_pending_params.tg = tg; + seq_state->steps[*seq_state->num_steps].params.tg_wait_double_buffer_pending_params.timeout_us = timeout_us; + seq_state->steps[*seq_state->num_steps].params.tg_wait_double_buffer_pending_params.polling_interval_us = polling_interval_us; + seq_state->steps[*seq_state->num_steps].func = TG_WAIT_DOUBLE_BUFFER_PENDING; + (*seq_state->num_steps)++; + } +} + void hwss_program_manual_trigger(union block_sequence_params *params) { struct pipe_ctx *pipe_ctx = params->program_manual_trigger_params.pipe_ctx; @@ -1046,12 +1958,6 @@ void hwss_setup_dpp(union block_sequence_params *params) plane_state->color_space, NULL); } - - if (dpp && dpp->funcs->set_cursor_matrix) { - dpp->funcs->set_cursor_matrix(dpp, - plane_state->color_space, - plane_state->cursor_csc_color_matrix); - } } void hwss_program_bias_and_scale(union block_sequence_params *params) @@ -1062,9 +1968,8 @@ void hwss_program_bias_and_scale(union block_sequence_params *params) struct dc_bias_and_scale bns_params = plane_state->bias_and_scale; //TODO :for CNVC set scale and bias registers if necessary - if (dpp->funcs->dpp_program_bias_and_scale) { + if (dpp->funcs->dpp_program_bias_and_scale) dpp->funcs->dpp_program_bias_and_scale(dpp, &bns_params); - } } void hwss_power_on_mpc_mem_pwr(union block_sequence_params *params) @@ -1114,6 +2019,39 @@ void hwss_subvp_save_surf_addr(union block_sequence_params *params) dc_dmub_srv_subvp_save_surf_addr(dc_dmub_srv, addr, subvp_index); } +void hwss_program_surface_config(union block_sequence_params *params) +{ + struct hubp *hubp = params->program_surface_config_params.hubp; + enum surface_pixel_format format = params->program_surface_config_params.format; + struct dc_tiling_info *tiling_info = params->program_surface_config_params.tiling_info; + struct plane_size size = params->program_surface_config_params.plane_size; + enum dc_rotation_angle rotation = params->program_surface_config_params.rotation; + struct dc_plane_dcc_param *dcc = params->program_surface_config_params.dcc; + bool horizontal_mirror = params->program_surface_config_params.horizontal_mirror; + int compat_level = params->program_surface_config_params.compat_level; + + hubp->funcs->hubp_program_surface_config( + hubp, + format, + tiling_info, + &size, + rotation, + dcc, + horizontal_mirror, + compat_level); + + hubp->power_gated = false; +} + +void hwss_program_mcache_id_and_split_coordinate(union block_sequence_params *params) +{ + struct hubp *hubp = params->program_mcache_id_and_split_coordinate.hubp; + struct dml2_hubp_pipe_mcache_regs *mcache_regs = params->program_mcache_id_and_split_coordinate.mcache_regs; + + hubp->funcs->hubp_program_mcache_id_and_split_coordinate(hubp, mcache_regs); + +} + void get_surface_tile_visual_confirm_color( struct pipe_ctx *pipe_ctx, struct tg_color *color) @@ -1188,6 +2126,7 @@ void hwss_wait_for_odm_update_pending_complete(struct dc *dc, struct dc_state *c void hwss_wait_for_no_pipes_pending(struct dc *dc, struct dc_state *context) { int i; + for (i = 0; i < MAX_PIPES; i++) { int count = 0; struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; @@ -1264,3 +2203,1869 @@ void hwss_process_outstanding_hw_updates(struct dc *dc, struct dc_state *dc_cont if (dc->hwss.program_outstanding_updates) dc->hwss.program_outstanding_updates(dc, dc_context); } + +void hwss_set_odm_combine(union block_sequence_params *params) +{ + struct timing_generator *tg = params->set_odm_combine_params.tg; + int *opp_inst = params->set_odm_combine_params.opp_inst; + int opp_head_count = params->set_odm_combine_params.opp_head_count; + int odm_slice_width = params->set_odm_combine_params.odm_slice_width; + int last_odm_slice_width = params->set_odm_combine_params.last_odm_slice_width; + + if (tg && tg->funcs->set_odm_combine) + tg->funcs->set_odm_combine(tg, opp_inst, opp_head_count, + odm_slice_width, last_odm_slice_width); +} + +void hwss_set_odm_bypass(union block_sequence_params *params) +{ + struct timing_generator *tg = params->set_odm_bypass_params.tg; + const struct dc_crtc_timing *timing = params->set_odm_bypass_params.timing; + + if (tg && tg->funcs->set_odm_bypass) + tg->funcs->set_odm_bypass(tg, timing); +} + +void hwss_opp_pipe_clock_control(union block_sequence_params *params) +{ + struct output_pixel_processor *opp = params->opp_pipe_clock_control_params.opp; + bool enable = params->opp_pipe_clock_control_params.enable; + + if (opp && opp->funcs->opp_pipe_clock_control) + opp->funcs->opp_pipe_clock_control(opp, enable); +} + +void hwss_opp_program_left_edge_extra_pixel(union block_sequence_params *params) +{ + struct output_pixel_processor *opp = params->opp_program_left_edge_extra_pixel_params.opp; + enum dc_pixel_encoding pixel_encoding = params->opp_program_left_edge_extra_pixel_params.pixel_encoding; + bool is_otg_master = params->opp_program_left_edge_extra_pixel_params.is_otg_master; + + if (opp && opp->funcs->opp_program_left_edge_extra_pixel) + opp->funcs->opp_program_left_edge_extra_pixel(opp, pixel_encoding, is_otg_master); +} + +void hwss_dccg_set_dto_dscclk(union block_sequence_params *params) +{ + struct dccg *dccg = params->dccg_set_dto_dscclk_params.dccg; + int inst = params->dccg_set_dto_dscclk_params.inst; + int num_slices_h = params->dccg_set_dto_dscclk_params.num_slices_h; + + if (dccg && dccg->funcs->set_dto_dscclk) + dccg->funcs->set_dto_dscclk(dccg, inst, num_slices_h); +} + +void hwss_dsc_set_config(union block_sequence_params *params) +{ + struct display_stream_compressor *dsc = params->dsc_set_config_params.dsc; + struct dsc_config *dsc_cfg = params->dsc_set_config_params.dsc_cfg; + struct dsc_optc_config *dsc_optc_cfg = params->dsc_set_config_params.dsc_optc_cfg; + + if (dsc && dsc->funcs->dsc_set_config) + dsc->funcs->dsc_set_config(dsc, dsc_cfg, dsc_optc_cfg); +} + +void hwss_dsc_enable(union block_sequence_params *params) +{ + struct display_stream_compressor *dsc = params->dsc_enable_params.dsc; + int opp_inst = params->dsc_enable_params.opp_inst; + + if (dsc && dsc->funcs->dsc_enable) + dsc->funcs->dsc_enable(dsc, opp_inst); +} + +void hwss_tg_set_dsc_config(union block_sequence_params *params) +{ + struct timing_generator *tg = params->tg_set_dsc_config_params.tg; + enum optc_dsc_mode optc_dsc_mode = OPTC_DSC_DISABLED; + uint32_t bytes_per_pixel = 0; + uint32_t slice_width = 0; + + if (params->tg_set_dsc_config_params.enable) { + struct dsc_optc_config *dsc_optc_cfg = params->tg_set_dsc_config_params.dsc_optc_cfg; + + if (dsc_optc_cfg) { + bytes_per_pixel = dsc_optc_cfg->bytes_per_pixel; + slice_width = dsc_optc_cfg->slice_width; + optc_dsc_mode = dsc_optc_cfg->is_pixel_format_444 ? + OPTC_DSC_ENABLED_444 : OPTC_DSC_ENABLED_NATIVE_SUBSAMPLED; + } + } + + if (tg && tg->funcs->set_dsc_config) + tg->funcs->set_dsc_config(tg, optc_dsc_mode, bytes_per_pixel, slice_width); +} + +void hwss_dsc_disconnect(union block_sequence_params *params) +{ + struct display_stream_compressor *dsc = params->dsc_disconnect_params.dsc; + + if (dsc && dsc->funcs->dsc_disconnect) + dsc->funcs->dsc_disconnect(dsc); +} + +void hwss_dsc_read_state(union block_sequence_params *params) +{ + struct display_stream_compressor *dsc = params->dsc_read_state_params.dsc; + struct dcn_dsc_state *dsc_state = params->dsc_read_state_params.dsc_state; + + if (dsc && dsc->funcs->dsc_read_state) + dsc->funcs->dsc_read_state(dsc, dsc_state); +} + +void hwss_dsc_calculate_and_set_config(union block_sequence_params *params) +{ + struct pipe_ctx *pipe_ctx = params->dsc_calculate_and_set_config_params.pipe_ctx; + struct pipe_ctx *top_pipe = pipe_ctx; + bool enable = params->dsc_calculate_and_set_config_params.enable; + int opp_cnt = params->dsc_calculate_and_set_config_params.opp_cnt; + + struct display_stream_compressor *dsc = pipe_ctx->stream_res.dsc; + struct dc_stream_state *stream = pipe_ctx->stream; + + if (!dsc || !enable) + return; + + /* Calculate DSC configuration - extracted from dcn32_update_dsc_on_stream */ + struct dsc_config dsc_cfg; + + while (top_pipe->prev_odm_pipe) + top_pipe = top_pipe->prev_odm_pipe; + + dsc_cfg.pic_width = (stream->timing.h_addressable + top_pipe->dsc_padding_params.dsc_hactive_padding + + stream->timing.h_border_left + stream->timing.h_border_right) / opp_cnt; + dsc_cfg.pic_height = stream->timing.v_addressable + stream->timing.v_border_top + stream->timing.v_border_bottom; + dsc_cfg.pixel_encoding = stream->timing.pixel_encoding; + dsc_cfg.color_depth = stream->timing.display_color_depth; + dsc_cfg.is_odm = top_pipe->next_odm_pipe ? true : false; + dsc_cfg.dc_dsc_cfg = stream->timing.dsc_cfg; + dsc_cfg.dc_dsc_cfg.num_slices_h /= opp_cnt; + dsc_cfg.dsc_padding = top_pipe->dsc_padding_params.dsc_hactive_padding; + + /* Set DSC configuration */ + if (dsc->funcs->dsc_set_config) + dsc->funcs->dsc_set_config(dsc, &dsc_cfg, + ¶ms->dsc_calculate_and_set_config_params.dsc_optc_cfg); +} + +void hwss_dsc_enable_with_opp(union block_sequence_params *params) +{ + struct pipe_ctx *pipe_ctx = params->dsc_enable_with_opp_params.pipe_ctx; + struct display_stream_compressor *dsc = pipe_ctx->stream_res.dsc; + + if (dsc && dsc->funcs->dsc_enable) + dsc->funcs->dsc_enable(dsc, pipe_ctx->stream_res.opp->inst); +} + +void hwss_tg_program_global_sync(union block_sequence_params *params) +{ + struct timing_generator *tg = params->tg_program_global_sync_params.tg; + int vready_offset = params->tg_program_global_sync_params.vready_offset; + unsigned int vstartup_lines = params->tg_program_global_sync_params.vstartup_lines; + unsigned int vupdate_offset_pixels = params->tg_program_global_sync_params.vupdate_offset_pixels; + unsigned int vupdate_vupdate_width_pixels = params->tg_program_global_sync_params.vupdate_vupdate_width_pixels; + unsigned int pstate_keepout_start_lines = params->tg_program_global_sync_params.pstate_keepout_start_lines; + + if (tg->funcs->program_global_sync) { + tg->funcs->program_global_sync(tg, vready_offset, vstartup_lines, + vupdate_offset_pixels, vupdate_vupdate_width_pixels, pstate_keepout_start_lines); + } +} + +void hwss_tg_wait_for_state(union block_sequence_params *params) +{ + struct timing_generator *tg = params->tg_wait_for_state_params.tg; + enum crtc_state state = params->tg_wait_for_state_params.state; + + if (tg->funcs->wait_for_state) + tg->funcs->wait_for_state(tg, state); +} + +void hwss_tg_set_vtg_params(union block_sequence_params *params) +{ + struct timing_generator *tg = params->tg_set_vtg_params_params.tg; + struct dc_crtc_timing *timing = params->tg_set_vtg_params_params.timing; + bool program_fp2 = params->tg_set_vtg_params_params.program_fp2; + + if (tg->funcs->set_vtg_params) + tg->funcs->set_vtg_params(tg, timing, program_fp2); +} + +void hwss_tg_setup_vertical_interrupt2(union block_sequence_params *params) +{ + struct timing_generator *tg = params->tg_setup_vertical_interrupt2_params.tg; + int start_line = params->tg_setup_vertical_interrupt2_params.start_line; + + if (tg->funcs->setup_vertical_interrupt2) + tg->funcs->setup_vertical_interrupt2(tg, start_line); +} + +void hwss_dpp_set_hdr_multiplier(union block_sequence_params *params) +{ + struct dpp *dpp = params->dpp_set_hdr_multiplier_params.dpp; + uint32_t hw_mult = params->dpp_set_hdr_multiplier_params.hw_mult; + + if (dpp->funcs->dpp_set_hdr_multiplier) + dpp->funcs->dpp_set_hdr_multiplier(dpp, hw_mult); +} + +void hwss_program_det_size(union block_sequence_params *params) +{ + struct hubbub *hubbub = params->program_det_size_params.hubbub; + unsigned int hubp_inst = params->program_det_size_params.hubp_inst; + unsigned int det_buffer_size_kb = params->program_det_size_params.det_buffer_size_kb; + + if (hubbub->funcs->program_det_size) + hubbub->funcs->program_det_size(hubbub, hubp_inst, det_buffer_size_kb); +} + +void hwss_program_det_segments(union block_sequence_params *params) +{ + struct hubbub *hubbub = params->program_det_segments_params.hubbub; + unsigned int hubp_inst = params->program_det_segments_params.hubp_inst; + unsigned int det_size = params->program_det_segments_params.det_size; + + if (hubbub->funcs->program_det_segments) + hubbub->funcs->program_det_segments(hubbub, hubp_inst, det_size); +} + +void hwss_opp_set_dyn_expansion(union block_sequence_params *params) +{ + struct output_pixel_processor *opp = params->opp_set_dyn_expansion_params.opp; + enum dc_color_space color_space = params->opp_set_dyn_expansion_params.color_space; + enum dc_color_depth color_depth = params->opp_set_dyn_expansion_params.color_depth; + enum signal_type signal = params->opp_set_dyn_expansion_params.signal; + + if (opp->funcs->opp_set_dyn_expansion) + opp->funcs->opp_set_dyn_expansion(opp, color_space, color_depth, signal); +} + +void hwss_opp_program_fmt(union block_sequence_params *params) +{ + struct output_pixel_processor *opp = params->opp_program_fmt_params.opp; + struct bit_depth_reduction_params *fmt_bit_depth = params->opp_program_fmt_params.fmt_bit_depth; + struct clamping_and_pixel_encoding_params *clamping = params->opp_program_fmt_params.clamping; + + if (opp->funcs->opp_program_fmt) + opp->funcs->opp_program_fmt(opp, fmt_bit_depth, clamping); +} + +void hwss_opp_program_bit_depth_reduction(union block_sequence_params *params) +{ + struct output_pixel_processor *opp = params->opp_program_bit_depth_reduction_params.opp; + bool use_default_params = params->opp_program_bit_depth_reduction_params.use_default_params; + struct pipe_ctx *pipe_ctx = params->opp_program_bit_depth_reduction_params.pipe_ctx; + struct bit_depth_reduction_params bit_depth_params; + + if (use_default_params) + memset(&bit_depth_params, 0, sizeof(bit_depth_params)); + else + resource_build_bit_depth_reduction_params(pipe_ctx->stream, &bit_depth_params); + + if (opp->funcs->opp_program_bit_depth_reduction) + opp->funcs->opp_program_bit_depth_reduction(opp, &bit_depth_params); +} + +void hwss_opp_set_disp_pattern_generator(union block_sequence_params *params) +{ + struct output_pixel_processor *opp = params->opp_set_disp_pattern_generator_params.opp; + enum controller_dp_test_pattern test_pattern = params->opp_set_disp_pattern_generator_params.test_pattern; + enum controller_dp_color_space color_space = params->opp_set_disp_pattern_generator_params.color_space; + enum dc_color_depth color_depth = params->opp_set_disp_pattern_generator_params.color_depth; + struct tg_color *solid_color = params->opp_set_disp_pattern_generator_params.use_solid_color ? + ¶ms->opp_set_disp_pattern_generator_params.solid_color : NULL; + int width = params->opp_set_disp_pattern_generator_params.width; + int height = params->opp_set_disp_pattern_generator_params.height; + int offset = params->opp_set_disp_pattern_generator_params.offset; + + if (opp && opp->funcs->opp_set_disp_pattern_generator) { + opp->funcs->opp_set_disp_pattern_generator(opp, test_pattern, color_space, + color_depth, solid_color, width, height, offset); + } +} + +void hwss_set_abm_pipe(union block_sequence_params *params) +{ + struct dc *dc = params->set_abm_pipe_params.dc; + struct pipe_ctx *pipe_ctx = params->set_abm_pipe_params.pipe_ctx; + + dc->hwss.set_pipe(pipe_ctx); +} + +void hwss_set_abm_level(union block_sequence_params *params) +{ + struct abm *abm = params->set_abm_level_params.abm; + unsigned int abm_level = params->set_abm_level_params.abm_level; + + if (abm->funcs->set_abm_level) + abm->funcs->set_abm_level(abm, abm_level); +} + +void hwss_set_abm_immediate_disable(union block_sequence_params *params) +{ + struct dc *dc = params->set_abm_immediate_disable_params.dc; + struct pipe_ctx *pipe_ctx = params->set_abm_immediate_disable_params.pipe_ctx; + + if (dc && dc->hwss.set_abm_immediate_disable) + dc->hwss.set_abm_immediate_disable(pipe_ctx); +} + +void hwss_mpc_remove_mpcc(union block_sequence_params *params) +{ + struct mpc *mpc = params->mpc_remove_mpcc_params.mpc; + struct mpc_tree *mpc_tree_params = params->mpc_remove_mpcc_params.mpc_tree_params; + struct mpcc *mpcc_to_remove = params->mpc_remove_mpcc_params.mpcc_to_remove; + + mpc->funcs->remove_mpcc(mpc, mpc_tree_params, mpcc_to_remove); +} + +void hwss_opp_set_mpcc_disconnect_pending(union block_sequence_params *params) +{ + struct output_pixel_processor *opp = params->opp_set_mpcc_disconnect_pending_params.opp; + int mpcc_inst = params->opp_set_mpcc_disconnect_pending_params.mpcc_inst; + bool pending = params->opp_set_mpcc_disconnect_pending_params.pending; + + opp->mpcc_disconnect_pending[mpcc_inst] = pending; +} + +void hwss_dc_set_optimized_required(union block_sequence_params *params) +{ + struct dc *dc = params->dc_set_optimized_required_params.dc; + bool optimized_required = params->dc_set_optimized_required_params.optimized_required; + + dc->optimized_required = optimized_required; +} + +void hwss_hubp_disconnect(union block_sequence_params *params) +{ + struct hubp *hubp = params->hubp_disconnect_params.hubp; + + if (hubp->funcs->hubp_disconnect) + hubp->funcs->hubp_disconnect(hubp); +} + +void hwss_hubbub_force_pstate_change_control(union block_sequence_params *params) +{ + struct hubbub *hubbub = params->hubbub_force_pstate_change_control_params.hubbub; + bool enable = params->hubbub_force_pstate_change_control_params.enable; + bool wait = params->hubbub_force_pstate_change_control_params.wait; + + if (hubbub->funcs->force_pstate_change_control) { + hubbub->funcs->force_pstate_change_control(hubbub, enable, wait); + /* Add delay when enabling pstate change control */ + if (enable) + udelay(500); + } +} + +void hwss_tg_enable_crtc(union block_sequence_params *params) +{ + struct timing_generator *tg = params->tg_enable_crtc_params.tg; + + if (tg->funcs->enable_crtc) + tg->funcs->enable_crtc(tg); +} + +void hwss_tg_set_gsl(union block_sequence_params *params) +{ + struct timing_generator *tg = params->tg_set_gsl_params.tg; + struct gsl_params *gsl = ¶ms->tg_set_gsl_params.gsl; + + if (tg->funcs->set_gsl) + tg->funcs->set_gsl(tg, gsl); +} + +void hwss_tg_set_gsl_source_select(union block_sequence_params *params) +{ + struct timing_generator *tg = params->tg_set_gsl_source_select_params.tg; + int group_idx = params->tg_set_gsl_source_select_params.group_idx; + uint32_t gsl_ready_signal = params->tg_set_gsl_source_select_params.gsl_ready_signal; + + if (tg->funcs->set_gsl_source_select) + tg->funcs->set_gsl_source_select(tg, group_idx, gsl_ready_signal); +} + +void hwss_hubp_wait_flip_pending(union block_sequence_params *params) +{ + struct hubp *hubp = params->hubp_wait_flip_pending_params.hubp; + unsigned int timeout_us = params->hubp_wait_flip_pending_params.timeout_us; + unsigned int polling_interval_us = params->hubp_wait_flip_pending_params.polling_interval_us; + int j = 0; + + for (j = 0; j < timeout_us / polling_interval_us + && hubp->funcs->hubp_is_flip_pending(hubp); j++) + udelay(polling_interval_us); +} + +void hwss_tg_wait_double_buffer_pending(union block_sequence_params *params) +{ + struct timing_generator *tg = params->tg_wait_double_buffer_pending_params.tg; + unsigned int timeout_us = params->tg_wait_double_buffer_pending_params.timeout_us; + unsigned int polling_interval_us = params->tg_wait_double_buffer_pending_params.polling_interval_us; + int j = 0; + + if (tg->funcs->get_optc_double_buffer_pending) { + for (j = 0; j < timeout_us / polling_interval_us + && tg->funcs->get_optc_double_buffer_pending(tg); j++) + udelay(polling_interval_us); + } +} + +void hwss_update_force_pstate(union block_sequence_params *params) +{ + struct dc *dc = params->update_force_pstate_params.dc; + struct dc_state *context = params->update_force_pstate_params.context; + struct dce_hwseq *hwseq = dc->hwseq; + + if (hwseq->funcs.update_force_pstate) + hwseq->funcs.update_force_pstate(dc, context); +} + +void hwss_hubbub_apply_dedcn21_147_wa(union block_sequence_params *params) +{ + struct hubbub *hubbub = params->hubbub_apply_dedcn21_147_wa_params.hubbub; + + hubbub->funcs->apply_DEDCN21_147_wa(hubbub); +} + +void hwss_hubbub_allow_self_refresh_control(union block_sequence_params *params) +{ + struct hubbub *hubbub = params->hubbub_allow_self_refresh_control_params.hubbub; + bool allow = params->hubbub_allow_self_refresh_control_params.allow; + + hubbub->funcs->allow_self_refresh_control(hubbub, allow); + + if (!allow && params->hubbub_allow_self_refresh_control_params.disallow_self_refresh_applied) + *params->hubbub_allow_self_refresh_control_params.disallow_self_refresh_applied = true; +} + +void hwss_tg_get_frame_count(union block_sequence_params *params) +{ + struct timing_generator *tg = params->tg_get_frame_count_params.tg; + unsigned int *frame_count = params->tg_get_frame_count_params.frame_count; + + *frame_count = tg->funcs->get_frame_count(tg); +} + +void hwss_mpc_set_dwb_mux(union block_sequence_params *params) +{ + struct mpc *mpc = params->mpc_set_dwb_mux_params.mpc; + int dwb_id = params->mpc_set_dwb_mux_params.dwb_id; + int mpcc_id = params->mpc_set_dwb_mux_params.mpcc_id; + + if (mpc->funcs->set_dwb_mux) + mpc->funcs->set_dwb_mux(mpc, dwb_id, mpcc_id); +} + +void hwss_mpc_disable_dwb_mux(union block_sequence_params *params) +{ + struct mpc *mpc = params->mpc_disable_dwb_mux_params.mpc; + unsigned int dwb_id = params->mpc_disable_dwb_mux_params.dwb_id; + + if (mpc->funcs->disable_dwb_mux) + mpc->funcs->disable_dwb_mux(mpc, dwb_id); +} + +void hwss_mcif_wb_config_buf(union block_sequence_params *params) +{ + struct mcif_wb *mcif_wb = params->mcif_wb_config_buf_params.mcif_wb; + struct mcif_buf_params *mcif_buf_params = params->mcif_wb_config_buf_params.mcif_buf_params; + unsigned int dest_height = params->mcif_wb_config_buf_params.dest_height; + + if (mcif_wb->funcs->config_mcif_buf) + mcif_wb->funcs->config_mcif_buf(mcif_wb, mcif_buf_params, dest_height); +} + +void hwss_mcif_wb_config_arb(union block_sequence_params *params) +{ + struct mcif_wb *mcif_wb = params->mcif_wb_config_arb_params.mcif_wb; + struct mcif_arb_params *mcif_arb_params = params->mcif_wb_config_arb_params.mcif_arb_params; + + if (mcif_wb->funcs->config_mcif_arb) + mcif_wb->funcs->config_mcif_arb(mcif_wb, mcif_arb_params); +} + +void hwss_mcif_wb_enable(union block_sequence_params *params) +{ + struct mcif_wb *mcif_wb = params->mcif_wb_enable_params.mcif_wb; + + if (mcif_wb->funcs->enable_mcif) + mcif_wb->funcs->enable_mcif(mcif_wb); +} + +void hwss_mcif_wb_disable(union block_sequence_params *params) +{ + struct mcif_wb *mcif_wb = params->mcif_wb_disable_params.mcif_wb; + + if (mcif_wb->funcs->disable_mcif) + mcif_wb->funcs->disable_mcif(mcif_wb); +} + +void hwss_dwbc_enable(union block_sequence_params *params) +{ + struct dwbc *dwb = params->dwbc_enable_params.dwb; + struct dc_dwb_params *dwb_params = params->dwbc_enable_params.dwb_params; + + if (dwb->funcs->enable) + dwb->funcs->enable(dwb, dwb_params); +} + +void hwss_dwbc_disable(union block_sequence_params *params) +{ + struct dwbc *dwb = params->dwbc_disable_params.dwb; + + if (dwb->funcs->disable) + dwb->funcs->disable(dwb); +} + +void hwss_dwbc_update(union block_sequence_params *params) +{ + struct dwbc *dwb = params->dwbc_update_params.dwb; + struct dc_dwb_params *dwb_params = params->dwbc_update_params.dwb_params; + + if (dwb->funcs->update) + dwb->funcs->update(dwb, dwb_params); +} + +void hwss_hubp_update_mall_sel(union block_sequence_params *params) +{ + struct hubp *hubp = params->hubp_update_mall_sel_params.hubp; + uint32_t mall_sel = params->hubp_update_mall_sel_params.mall_sel; + bool cache_cursor = params->hubp_update_mall_sel_params.cache_cursor; + + if (hubp && hubp->funcs->hubp_update_mall_sel) + hubp->funcs->hubp_update_mall_sel(hubp, mall_sel, cache_cursor); +} + +void hwss_hubp_prepare_subvp_buffering(union block_sequence_params *params) +{ + struct hubp *hubp = params->hubp_prepare_subvp_buffering_params.hubp; + bool enable = params->hubp_prepare_subvp_buffering_params.enable; + + if (hubp && hubp->funcs->hubp_prepare_subvp_buffering) + hubp->funcs->hubp_prepare_subvp_buffering(hubp, enable); +} + +void hwss_hubp_set_blank_en(union block_sequence_params *params) +{ + struct hubp *hubp = params->hubp_set_blank_en_params.hubp; + bool enable = params->hubp_set_blank_en_params.enable; + + if (hubp && hubp->funcs->set_hubp_blank_en) + hubp->funcs->set_hubp_blank_en(hubp, enable); +} + +void hwss_hubp_disable_control(union block_sequence_params *params) +{ + struct hubp *hubp = params->hubp_disable_control_params.hubp; + bool disable = params->hubp_disable_control_params.disable; + + if (hubp && hubp->funcs->hubp_disable_control) + hubp->funcs->hubp_disable_control(hubp, disable); +} + +void hwss_hubbub_soft_reset(union block_sequence_params *params) +{ + struct hubbub *hubbub = params->hubbub_soft_reset_params.hubbub; + bool reset = params->hubbub_soft_reset_params.reset; + + if (hubbub) + params->hubbub_soft_reset_params.hubbub_soft_reset(hubbub, reset); +} + +void hwss_hubp_clk_cntl(union block_sequence_params *params) +{ + struct hubp *hubp = params->hubp_clk_cntl_params.hubp; + bool enable = params->hubp_clk_cntl_params.enable; + + if (hubp && hubp->funcs->hubp_clk_cntl) { + hubp->funcs->hubp_clk_cntl(hubp, enable); + hubp->power_gated = !enable; + } +} + +void hwss_hubp_init(union block_sequence_params *params) +{ + struct hubp *hubp = params->hubp_init_params.hubp; + + if (hubp && hubp->funcs->hubp_init) + hubp->funcs->hubp_init(hubp); +} + +void hwss_hubp_set_vm_system_aperture_settings(union block_sequence_params *params) +{ + struct hubp *hubp = params->hubp_set_vm_system_aperture_settings_params.hubp; + struct vm_system_aperture_param apt; + + apt.sys_default = params->hubp_set_vm_system_aperture_settings_params.sys_default; + apt.sys_high = params->hubp_set_vm_system_aperture_settings_params.sys_high; + apt.sys_low = params->hubp_set_vm_system_aperture_settings_params.sys_low; + + if (hubp && hubp->funcs->hubp_set_vm_system_aperture_settings) + hubp->funcs->hubp_set_vm_system_aperture_settings(hubp, &apt); +} + +void hwss_hubp_set_flip_int(union block_sequence_params *params) +{ + struct hubp *hubp = params->hubp_set_flip_int_params.hubp; + + if (hubp && hubp->funcs->hubp_set_flip_int) + hubp->funcs->hubp_set_flip_int(hubp); +} + +void hwss_dpp_dppclk_control(union block_sequence_params *params) +{ + struct dpp *dpp = params->dpp_dppclk_control_params.dpp; + bool dppclk_div = params->dpp_dppclk_control_params.dppclk_div; + bool enable = params->dpp_dppclk_control_params.enable; + + if (dpp && dpp->funcs->dpp_dppclk_control) + dpp->funcs->dpp_dppclk_control(dpp, dppclk_div, enable); +} + +void hwss_disable_phantom_crtc(union block_sequence_params *params) +{ + struct timing_generator *tg = params->disable_phantom_crtc_params.tg; + + if (tg && tg->funcs->disable_phantom_crtc) + tg->funcs->disable_phantom_crtc(tg); +} + +void hwss_dsc_pg_status(union block_sequence_params *params) +{ + struct dce_hwseq *hws = params->dsc_pg_status_params.hws; + int dsc_inst = params->dsc_pg_status_params.dsc_inst; + + if (hws && hws->funcs.dsc_pg_status) + params->dsc_pg_status_params.is_ungated = hws->funcs.dsc_pg_status(hws, dsc_inst); +} + +void hwss_dsc_wait_disconnect_pending_clear(union block_sequence_params *params) +{ + struct display_stream_compressor *dsc = params->dsc_wait_disconnect_pending_clear_params.dsc; + + if (!params->dsc_wait_disconnect_pending_clear_params.is_ungated) + return; + if (*params->dsc_wait_disconnect_pending_clear_params.is_ungated == false) + return; + + if (dsc && dsc->funcs->dsc_wait_disconnect_pending_clear) + dsc->funcs->dsc_wait_disconnect_pending_clear(dsc); +} + +void hwss_dsc_disable(union block_sequence_params *params) +{ + struct display_stream_compressor *dsc = params->dsc_disable_params.dsc; + + if (!params->dsc_disable_params.is_ungated) + return; + if (*params->dsc_disable_params.is_ungated == false) + return; + + if (dsc && dsc->funcs->dsc_disable) + dsc->funcs->dsc_disable(dsc); +} + +void hwss_dccg_set_ref_dscclk(union block_sequence_params *params) +{ + struct dccg *dccg = params->dccg_set_ref_dscclk_params.dccg; + int dsc_inst = params->dccg_set_ref_dscclk_params.dsc_inst; + + if (!params->dccg_set_ref_dscclk_params.is_ungated) + return; + if (*params->dccg_set_ref_dscclk_params.is_ungated == false) + return; + + if (dccg && dccg->funcs->set_ref_dscclk) + dccg->funcs->set_ref_dscclk(dccg, dsc_inst); +} + +void hwss_dpp_pg_control(union block_sequence_params *params) +{ + struct dce_hwseq *hws = params->dpp_pg_control_params.hws; + unsigned int dpp_inst = params->dpp_pg_control_params.dpp_inst; + bool power_on = params->dpp_pg_control_params.power_on; + + if (hws->funcs.dpp_pg_control) + hws->funcs.dpp_pg_control(hws, dpp_inst, power_on); +} + +void hwss_hubp_pg_control(union block_sequence_params *params) +{ + struct dce_hwseq *hws = params->hubp_pg_control_params.hws; + unsigned int hubp_inst = params->hubp_pg_control_params.hubp_inst; + bool power_on = params->hubp_pg_control_params.power_on; + + if (hws->funcs.hubp_pg_control) + hws->funcs.hubp_pg_control(hws, hubp_inst, power_on); +} + +void hwss_hubp_reset(union block_sequence_params *params) +{ + struct hubp *hubp = params->hubp_reset_params.hubp; + + if (hubp && hubp->funcs->hubp_reset) + hubp->funcs->hubp_reset(hubp); +} + +void hwss_dpp_reset(union block_sequence_params *params) +{ + struct dpp *dpp = params->dpp_reset_params.dpp; + + if (dpp && dpp->funcs->dpp_reset) + dpp->funcs->dpp_reset(dpp); +} + +void hwss_dpp_root_clock_control(union block_sequence_params *params) +{ + struct dce_hwseq *hws = params->dpp_root_clock_control_params.hws; + unsigned int dpp_inst = params->dpp_root_clock_control_params.dpp_inst; + bool clock_on = params->dpp_root_clock_control_params.clock_on; + + if (hws->funcs.dpp_root_clock_control) + hws->funcs.dpp_root_clock_control(hws, dpp_inst, clock_on); +} + +void hwss_dc_ip_request_cntl(union block_sequence_params *params) +{ + struct dc *dc = params->dc_ip_request_cntl_params.dc; + bool enable = params->dc_ip_request_cntl_params.enable; + struct dce_hwseq *hws = dc->hwseq; + + if (hws->funcs.dc_ip_request_cntl) + hws->funcs.dc_ip_request_cntl(dc, enable); +} + +void hwss_dccg_update_dpp_dto(union block_sequence_params *params) +{ + struct dccg *dccg = params->dccg_update_dpp_dto_params.dccg; + int dpp_inst = params->dccg_update_dpp_dto_params.dpp_inst; + int dppclk_khz = params->dccg_update_dpp_dto_params.dppclk_khz; + + if (dccg && dccg->funcs->update_dpp_dto) + dccg->funcs->update_dpp_dto(dccg, dpp_inst, dppclk_khz); +} + +void hwss_hubp_vtg_sel(union block_sequence_params *params) +{ + struct hubp *hubp = params->hubp_vtg_sel_params.hubp; + uint32_t otg_inst = params->hubp_vtg_sel_params.otg_inst; + + if (hubp && hubp->funcs->hubp_vtg_sel) + hubp->funcs->hubp_vtg_sel(hubp, otg_inst); +} + +void hwss_hubp_setup2(union block_sequence_params *params) +{ + struct hubp *hubp = params->hubp_setup2_params.hubp; + struct dml2_dchub_per_pipe_register_set *hubp_regs = params->hubp_setup2_params.hubp_regs; + union dml2_global_sync_programming *global_sync = params->hubp_setup2_params.global_sync; + struct dc_crtc_timing *timing = params->hubp_setup2_params.timing; + + if (hubp && hubp->funcs->hubp_setup2) + hubp->funcs->hubp_setup2(hubp, hubp_regs, global_sync, timing); +} + +void hwss_hubp_setup(union block_sequence_params *params) +{ + struct hubp *hubp = params->hubp_setup_params.hubp; + struct _vcs_dpi_display_dlg_regs_st *dlg_regs = params->hubp_setup_params.dlg_regs; + struct _vcs_dpi_display_ttu_regs_st *ttu_regs = params->hubp_setup_params.ttu_regs; + struct _vcs_dpi_display_rq_regs_st *rq_regs = params->hubp_setup_params.rq_regs; + struct _vcs_dpi_display_pipe_dest_params_st *pipe_dest = params->hubp_setup_params.pipe_dest; + + if (hubp && hubp->funcs->hubp_setup) + hubp->funcs->hubp_setup(hubp, dlg_regs, ttu_regs, rq_regs, pipe_dest); +} + +void hwss_hubp_set_unbounded_requesting(union block_sequence_params *params) +{ + struct hubp *hubp = params->hubp_set_unbounded_requesting_params.hubp; + bool unbounded_req = params->hubp_set_unbounded_requesting_params.unbounded_req; + + if (hubp && hubp->funcs->set_unbounded_requesting) + hubp->funcs->set_unbounded_requesting(hubp, unbounded_req); +} + +void hwss_hubp_setup_interdependent2(union block_sequence_params *params) +{ + struct hubp *hubp = params->hubp_setup_interdependent2_params.hubp; + struct dml2_dchub_per_pipe_register_set *hubp_regs = params->hubp_setup_interdependent2_params.hubp_regs; + + if (hubp && hubp->funcs->hubp_setup_interdependent2) + hubp->funcs->hubp_setup_interdependent2(hubp, hubp_regs); +} + +void hwss_hubp_setup_interdependent(union block_sequence_params *params) +{ + struct hubp *hubp = params->hubp_setup_interdependent_params.hubp; + struct _vcs_dpi_display_dlg_regs_st *dlg_regs = params->hubp_setup_interdependent_params.dlg_regs; + struct _vcs_dpi_display_ttu_regs_st *ttu_regs = params->hubp_setup_interdependent_params.ttu_regs; + + if (hubp && hubp->funcs->hubp_setup_interdependent) + hubp->funcs->hubp_setup_interdependent(hubp, dlg_regs, ttu_regs); +} + +void hwss_dpp_set_cursor_matrix(union block_sequence_params *params) +{ + struct dpp *dpp = params->dpp_set_cursor_matrix_params.dpp; + enum dc_color_space color_space = params->dpp_set_cursor_matrix_params.color_space; + struct dc_csc_transform *cursor_csc_color_matrix = params->dpp_set_cursor_matrix_params.cursor_csc_color_matrix; + + if (dpp && dpp->funcs->set_cursor_matrix) + dpp->funcs->set_cursor_matrix(dpp, color_space, *cursor_csc_color_matrix); +} + +void hwss_mpc_update_mpcc(union block_sequence_params *params) +{ + struct dc *dc = params->mpc_update_mpcc_params.dc; + struct pipe_ctx *pipe_ctx = params->mpc_update_mpcc_params.pipe_ctx; + struct dce_hwseq *hws = dc->hwseq; + + if (hws->funcs.update_mpcc) + hws->funcs.update_mpcc(dc, pipe_ctx); +} + +void hwss_mpc_update_blending(union block_sequence_params *params) +{ + struct mpc *mpc = params->mpc_update_blending_params.mpc; + struct mpcc_blnd_cfg *blnd_cfg = ¶ms->mpc_update_blending_params.blnd_cfg; + int mpcc_id = params->mpc_update_blending_params.mpcc_id; + + if (mpc && mpc->funcs->update_blending) + mpc->funcs->update_blending(mpc, blnd_cfg, mpcc_id); +} + +void hwss_mpc_assert_idle_mpcc(union block_sequence_params *params) +{ + struct mpc *mpc = params->mpc_assert_idle_mpcc_params.mpc; + int mpcc_id = params->mpc_assert_idle_mpcc_params.mpcc_id; + + if (mpc && mpc->funcs->wait_for_idle) + mpc->funcs->wait_for_idle(mpc, mpcc_id); +} + +void hwss_mpc_insert_plane(union block_sequence_params *params) +{ + struct mpc *mpc = params->mpc_insert_plane_params.mpc; + struct mpc_tree *tree = params->mpc_insert_plane_params.mpc_tree_params; + struct mpcc_blnd_cfg *blnd_cfg = ¶ms->mpc_insert_plane_params.blnd_cfg; + struct mpcc_sm_cfg *sm_cfg = params->mpc_insert_plane_params.sm_cfg; + struct mpcc *insert_above_mpcc = params->mpc_insert_plane_params.insert_above_mpcc; + int mpcc_id = params->mpc_insert_plane_params.mpcc_id; + int dpp_id = params->mpc_insert_plane_params.dpp_id; + + if (mpc && mpc->funcs->insert_plane) + mpc->funcs->insert_plane(mpc, tree, blnd_cfg, sm_cfg, insert_above_mpcc, + dpp_id, mpcc_id); +} + +void hwss_dpp_set_scaler(union block_sequence_params *params) +{ + struct dpp *dpp = params->dpp_set_scaler_params.dpp; + const struct scaler_data *scl_data = params->dpp_set_scaler_params.scl_data; + + if (dpp && dpp->funcs->dpp_set_scaler) + dpp->funcs->dpp_set_scaler(dpp, scl_data); +} + +void hwss_hubp_mem_program_viewport(union block_sequence_params *params) +{ + struct hubp *hubp = params->hubp_mem_program_viewport_params.hubp; + const struct rect *viewport = params->hubp_mem_program_viewport_params.viewport; + const struct rect *viewport_c = params->hubp_mem_program_viewport_params.viewport_c; + + if (hubp && hubp->funcs->mem_program_viewport) + hubp->funcs->mem_program_viewport(hubp, viewport, viewport_c); +} + +void hwss_abort_cursor_offload_update(union block_sequence_params *params) +{ + struct dc *dc = params->abort_cursor_offload_update_params.dc; + struct pipe_ctx *pipe_ctx = params->abort_cursor_offload_update_params.pipe_ctx; + + if (dc && dc->hwss.abort_cursor_offload_update) + dc->hwss.abort_cursor_offload_update(dc, pipe_ctx); +} + +void hwss_set_cursor_attribute(union block_sequence_params *params) +{ + struct dc *dc = params->set_cursor_attribute_params.dc; + struct pipe_ctx *pipe_ctx = params->set_cursor_attribute_params.pipe_ctx; + + if (dc && dc->hwss.set_cursor_attribute) + dc->hwss.set_cursor_attribute(pipe_ctx); +} + +void hwss_set_cursor_position(union block_sequence_params *params) +{ + struct dc *dc = params->set_cursor_position_params.dc; + struct pipe_ctx *pipe_ctx = params->set_cursor_position_params.pipe_ctx; + + if (dc && dc->hwss.set_cursor_position) + dc->hwss.set_cursor_position(pipe_ctx); +} + +void hwss_set_cursor_sdr_white_level(union block_sequence_params *params) +{ + struct dc *dc = params->set_cursor_sdr_white_level_params.dc; + struct pipe_ctx *pipe_ctx = params->set_cursor_sdr_white_level_params.pipe_ctx; + + if (dc && dc->hwss.set_cursor_sdr_white_level) + dc->hwss.set_cursor_sdr_white_level(pipe_ctx); +} + +void hwss_program_output_csc(union block_sequence_params *params) +{ + struct dc *dc = params->program_output_csc_params.dc; + struct pipe_ctx *pipe_ctx = params->program_output_csc_params.pipe_ctx; + enum dc_color_space colorspace = params->program_output_csc_params.colorspace; + uint16_t *matrix = params->program_output_csc_params.matrix; + int opp_id = params->program_output_csc_params.opp_id; + + if (dc && dc->hwss.program_output_csc) + dc->hwss.program_output_csc(dc, pipe_ctx, colorspace, matrix, opp_id); +} + +void hwss_hubp_set_blank(union block_sequence_params *params) +{ + struct hubp *hubp = params->hubp_set_blank_params.hubp; + bool blank = params->hubp_set_blank_params.blank; + + if (hubp && hubp->funcs->set_blank) + hubp->funcs->set_blank(hubp, blank); +} + +void hwss_phantom_hubp_post_enable(union block_sequence_params *params) +{ + struct hubp *hubp = params->phantom_hubp_post_enable_params.hubp; + + if (hubp && hubp->funcs->phantom_hubp_post_enable) + hubp->funcs->phantom_hubp_post_enable(hubp); +} + +void hwss_add_dccg_set_dto_dscclk(struct block_sequence_state *seq_state, + struct dccg *dccg, int inst, int num_slices_h) +{ + if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) { + seq_state->steps[*seq_state->num_steps].func = DCCG_SET_DTO_DSCCLK; + seq_state->steps[*seq_state->num_steps].params.dccg_set_dto_dscclk_params.dccg = dccg; + seq_state->steps[*seq_state->num_steps].params.dccg_set_dto_dscclk_params.inst = inst; + seq_state->steps[*seq_state->num_steps].params.dccg_set_dto_dscclk_params.num_slices_h = num_slices_h; + (*seq_state->num_steps)++; + } +} + +void hwss_add_dsc_calculate_and_set_config(struct block_sequence_state *seq_state, + struct pipe_ctx *pipe_ctx, bool enable, int opp_cnt) +{ + if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) { + seq_state->steps[*seq_state->num_steps].func = DSC_CALCULATE_AND_SET_CONFIG; + seq_state->steps[*seq_state->num_steps].params.dsc_calculate_and_set_config_params.pipe_ctx = pipe_ctx; + seq_state->steps[*seq_state->num_steps].params.dsc_calculate_and_set_config_params.enable = enable; + seq_state->steps[*seq_state->num_steps].params.dsc_calculate_and_set_config_params.opp_cnt = opp_cnt; + (*seq_state->num_steps)++; + } +} + +void hwss_add_mpc_remove_mpcc(struct block_sequence_state *seq_state, + struct mpc *mpc, struct mpc_tree *mpc_tree_params, struct mpcc *mpcc_to_remove) +{ + if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) { + seq_state->steps[*seq_state->num_steps].func = MPC_REMOVE_MPCC; + seq_state->steps[*seq_state->num_steps].params.mpc_remove_mpcc_params.mpc = mpc; + seq_state->steps[*seq_state->num_steps].params.mpc_remove_mpcc_params.mpc_tree_params = mpc_tree_params; + seq_state->steps[*seq_state->num_steps].params.mpc_remove_mpcc_params.mpcc_to_remove = mpcc_to_remove; + (*seq_state->num_steps)++; + } +} + +void hwss_add_opp_set_mpcc_disconnect_pending(struct block_sequence_state *seq_state, + struct output_pixel_processor *opp, int mpcc_inst, bool pending) +{ + if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) { + seq_state->steps[*seq_state->num_steps].func = OPP_SET_MPCC_DISCONNECT_PENDING; + seq_state->steps[*seq_state->num_steps].params.opp_set_mpcc_disconnect_pending_params.opp = opp; + seq_state->steps[*seq_state->num_steps].params.opp_set_mpcc_disconnect_pending_params.mpcc_inst = mpcc_inst; + seq_state->steps[*seq_state->num_steps].params.opp_set_mpcc_disconnect_pending_params.pending = pending; + (*seq_state->num_steps)++; + } +} + +void hwss_add_hubp_disconnect(struct block_sequence_state *seq_state, + struct hubp *hubp) +{ + if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) { + seq_state->steps[*seq_state->num_steps].func = HUBP_DISCONNECT; + seq_state->steps[*seq_state->num_steps].params.hubp_disconnect_params.hubp = hubp; + (*seq_state->num_steps)++; + } +} + +void hwss_add_dsc_enable_with_opp(struct block_sequence_state *seq_state, + struct pipe_ctx *pipe_ctx) +{ + if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) { + seq_state->steps[*seq_state->num_steps].func = DSC_ENABLE_WITH_OPP; + seq_state->steps[*seq_state->num_steps].params.dsc_enable_with_opp_params.pipe_ctx = pipe_ctx; + (*seq_state->num_steps)++; + } +} + +void hwss_add_tg_set_dsc_config(struct block_sequence_state *seq_state, + struct timing_generator *tg, struct dsc_optc_config *dsc_optc_cfg, bool enable) +{ + if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) { + seq_state->steps[*seq_state->num_steps].func = TG_SET_DSC_CONFIG; + seq_state->steps[*seq_state->num_steps].params.tg_set_dsc_config_params.tg = tg; + seq_state->steps[*seq_state->num_steps].params.tg_set_dsc_config_params.dsc_optc_cfg = dsc_optc_cfg; + seq_state->steps[*seq_state->num_steps].params.tg_set_dsc_config_params.enable = enable; + (*seq_state->num_steps)++; + } +} + +void hwss_add_dsc_disconnect(struct block_sequence_state *seq_state, + struct display_stream_compressor *dsc) +{ + if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) { + seq_state->steps[*seq_state->num_steps].func = DSC_DISCONNECT; + seq_state->steps[*seq_state->num_steps].params.dsc_disconnect_params.dsc = dsc; + (*seq_state->num_steps)++; + } +} + +void hwss_add_dc_set_optimized_required(struct block_sequence_state *seq_state, + struct dc *dc, bool optimized_required) +{ + if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) { + seq_state->steps[*seq_state->num_steps].func = DC_SET_OPTIMIZED_REQUIRED; + seq_state->steps[*seq_state->num_steps].params.dc_set_optimized_required_params.dc = dc; + seq_state->steps[*seq_state->num_steps].params.dc_set_optimized_required_params.optimized_required = optimized_required; + (*seq_state->num_steps)++; + } +} + +void hwss_add_abm_set_immediate_disable(struct block_sequence_state *seq_state, + struct dc *dc, struct pipe_ctx *pipe_ctx) +{ + if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) { + seq_state->steps[*seq_state->num_steps].func = ABM_SET_IMMEDIATE_DISABLE; + seq_state->steps[*seq_state->num_steps].params.set_abm_immediate_disable_params.dc = dc; + seq_state->steps[*seq_state->num_steps].params.set_abm_immediate_disable_params.pipe_ctx = pipe_ctx; + (*seq_state->num_steps)++; + } +} + +void hwss_add_opp_set_disp_pattern_generator(struct block_sequence_state *seq_state, + struct output_pixel_processor *opp, + enum controller_dp_test_pattern test_pattern, + enum controller_dp_color_space color_space, + enum dc_color_depth color_depth, + struct tg_color solid_color, + bool use_solid_color, + int width, + int height, + int offset) +{ + if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) { + seq_state->steps[*seq_state->num_steps].func = OPP_SET_DISP_PATTERN_GENERATOR; + seq_state->steps[*seq_state->num_steps].params.opp_set_disp_pattern_generator_params.opp = opp; + seq_state->steps[*seq_state->num_steps].params.opp_set_disp_pattern_generator_params.test_pattern = test_pattern; + seq_state->steps[*seq_state->num_steps].params.opp_set_disp_pattern_generator_params.color_space = color_space; + seq_state->steps[*seq_state->num_steps].params.opp_set_disp_pattern_generator_params.color_depth = color_depth; + seq_state->steps[*seq_state->num_steps].params.opp_set_disp_pattern_generator_params.solid_color = solid_color; + seq_state->steps[*seq_state->num_steps].params.opp_set_disp_pattern_generator_params.use_solid_color = use_solid_color; + seq_state->steps[*seq_state->num_steps].params.opp_set_disp_pattern_generator_params.width = width; + seq_state->steps[*seq_state->num_steps].params.opp_set_disp_pattern_generator_params.height = height; + seq_state->steps[*seq_state->num_steps].params.opp_set_disp_pattern_generator_params.offset = offset; + (*seq_state->num_steps)++; + } +} + +/* + * Helper function to add MPC update blending to block sequence + */ +void hwss_add_mpc_update_blending(struct block_sequence_state *seq_state, + struct mpc *mpc, + struct mpcc_blnd_cfg blnd_cfg, + int mpcc_id) +{ + if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) { + seq_state->steps[*seq_state->num_steps].func = MPC_UPDATE_BLENDING; + seq_state->steps[*seq_state->num_steps].params.mpc_update_blending_params.mpc = mpc; + seq_state->steps[*seq_state->num_steps].params.mpc_update_blending_params.blnd_cfg = blnd_cfg; + seq_state->steps[*seq_state->num_steps].params.mpc_update_blending_params.mpcc_id = mpcc_id; + (*seq_state->num_steps)++; + } +} + +/* + * Helper function to add MPC insert plane to block sequence + */ +void hwss_add_mpc_insert_plane(struct block_sequence_state *seq_state, + struct mpc *mpc, + struct mpc_tree *mpc_tree_params, + struct mpcc_blnd_cfg blnd_cfg, + struct mpcc_sm_cfg *sm_cfg, + struct mpcc *insert_above_mpcc, + int dpp_id, + int mpcc_id) +{ + if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) { + seq_state->steps[*seq_state->num_steps].func = MPC_INSERT_PLANE; + seq_state->steps[*seq_state->num_steps].params.mpc_insert_plane_params.mpc = mpc; + seq_state->steps[*seq_state->num_steps].params.mpc_insert_plane_params.mpc_tree_params = mpc_tree_params; + seq_state->steps[*seq_state->num_steps].params.mpc_insert_plane_params.blnd_cfg = blnd_cfg; + seq_state->steps[*seq_state->num_steps].params.mpc_insert_plane_params.sm_cfg = sm_cfg; + seq_state->steps[*seq_state->num_steps].params.mpc_insert_plane_params.insert_above_mpcc = insert_above_mpcc; + seq_state->steps[*seq_state->num_steps].params.mpc_insert_plane_params.dpp_id = dpp_id; + seq_state->steps[*seq_state->num_steps].params.mpc_insert_plane_params.mpcc_id = mpcc_id; + (*seq_state->num_steps)++; + } +} + +/* + * Helper function to add MPC assert idle MPCC to block sequence + */ +void hwss_add_mpc_assert_idle_mpcc(struct block_sequence_state *seq_state, + struct mpc *mpc, + int mpcc_id) +{ + if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) { + seq_state->steps[*seq_state->num_steps].func = MPC_ASSERT_IDLE_MPCC; + seq_state->steps[*seq_state->num_steps].params.mpc_assert_idle_mpcc_params.mpc = mpc; + seq_state->steps[*seq_state->num_steps].params.mpc_assert_idle_mpcc_params.mpcc_id = mpcc_id; + (*seq_state->num_steps)++; + } +} + +/* + * Helper function to add HUBP set blank to block sequence + */ +void hwss_add_hubp_set_blank(struct block_sequence_state *seq_state, + struct hubp *hubp, + bool blank) +{ + if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) { + seq_state->steps[*seq_state->num_steps].func = HUBP_SET_BLANK; + seq_state->steps[*seq_state->num_steps].params.hubp_set_blank_params.hubp = hubp; + seq_state->steps[*seq_state->num_steps].params.hubp_set_blank_params.blank = blank; + (*seq_state->num_steps)++; + } +} + +void hwss_add_opp_program_bit_depth_reduction(struct block_sequence_state *seq_state, + struct output_pixel_processor *opp, + bool use_default_params, + struct pipe_ctx *pipe_ctx) +{ + if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) { + seq_state->steps[*seq_state->num_steps].func = OPP_PROGRAM_BIT_DEPTH_REDUCTION; + seq_state->steps[*seq_state->num_steps].params.opp_program_bit_depth_reduction_params.opp = opp; + seq_state->steps[*seq_state->num_steps].params.opp_program_bit_depth_reduction_params.use_default_params = use_default_params; + seq_state->steps[*seq_state->num_steps].params.opp_program_bit_depth_reduction_params.pipe_ctx = pipe_ctx; + (*seq_state->num_steps)++; + } +} + +void hwss_add_dc_ip_request_cntl(struct block_sequence_state *seq_state, + struct dc *dc, + bool enable) +{ + if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) { + seq_state->steps[*seq_state->num_steps].func = DC_IP_REQUEST_CNTL; + seq_state->steps[*seq_state->num_steps].params.dc_ip_request_cntl_params.dc = dc; + seq_state->steps[*seq_state->num_steps].params.dc_ip_request_cntl_params.enable = enable; + (*seq_state->num_steps)++; + } +} + +void hwss_add_dwbc_update(struct block_sequence_state *seq_state, + struct dwbc *dwb, + struct dc_dwb_params *dwb_params) +{ + if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) { + seq_state->steps[*seq_state->num_steps].func = DWBC_UPDATE; + seq_state->steps[*seq_state->num_steps].params.dwbc_update_params.dwb = dwb; + seq_state->steps[*seq_state->num_steps].params.dwbc_update_params.dwb_params = dwb_params; + (*seq_state->num_steps)++; + } +} + +void hwss_add_mcif_wb_config_buf(struct block_sequence_state *seq_state, + struct mcif_wb *mcif_wb, + struct mcif_buf_params *mcif_buf_params, + unsigned int dest_height) +{ + if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) { + seq_state->steps[*seq_state->num_steps].func = MCIF_WB_CONFIG_BUF; + seq_state->steps[*seq_state->num_steps].params.mcif_wb_config_buf_params.mcif_wb = mcif_wb; + seq_state->steps[*seq_state->num_steps].params.mcif_wb_config_buf_params.mcif_buf_params = mcif_buf_params; + seq_state->steps[*seq_state->num_steps].params.mcif_wb_config_buf_params.dest_height = dest_height; + (*seq_state->num_steps)++; + } +} + +void hwss_add_mcif_wb_config_arb(struct block_sequence_state *seq_state, + struct mcif_wb *mcif_wb, + struct mcif_arb_params *mcif_arb_params) +{ + if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) { + seq_state->steps[*seq_state->num_steps].func = MCIF_WB_CONFIG_ARB; + seq_state->steps[*seq_state->num_steps].params.mcif_wb_config_arb_params.mcif_wb = mcif_wb; + seq_state->steps[*seq_state->num_steps].params.mcif_wb_config_arb_params.mcif_arb_params = mcif_arb_params; + (*seq_state->num_steps)++; + } +} + +void hwss_add_mcif_wb_enable(struct block_sequence_state *seq_state, + struct mcif_wb *mcif_wb) +{ + if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) { + seq_state->steps[*seq_state->num_steps].func = MCIF_WB_ENABLE; + seq_state->steps[*seq_state->num_steps].params.mcif_wb_enable_params.mcif_wb = mcif_wb; + (*seq_state->num_steps)++; + } +} + +void hwss_add_mcif_wb_disable(struct block_sequence_state *seq_state, + struct mcif_wb *mcif_wb) +{ + if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) { + seq_state->steps[*seq_state->num_steps].func = MCIF_WB_DISABLE; + seq_state->steps[*seq_state->num_steps].params.mcif_wb_disable_params.mcif_wb = mcif_wb; + (*seq_state->num_steps)++; + } +} + +void hwss_add_mpc_set_dwb_mux(struct block_sequence_state *seq_state, + struct mpc *mpc, + int dwb_id, + int mpcc_id) +{ + if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) { + seq_state->steps[*seq_state->num_steps].func = MPC_SET_DWB_MUX; + seq_state->steps[*seq_state->num_steps].params.mpc_set_dwb_mux_params.mpc = mpc; + seq_state->steps[*seq_state->num_steps].params.mpc_set_dwb_mux_params.dwb_id = dwb_id; + seq_state->steps[*seq_state->num_steps].params.mpc_set_dwb_mux_params.mpcc_id = mpcc_id; + (*seq_state->num_steps)++; + } +} + +void hwss_add_mpc_disable_dwb_mux(struct block_sequence_state *seq_state, + struct mpc *mpc, + unsigned int dwb_id) +{ + if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) { + seq_state->steps[*seq_state->num_steps].func = MPC_DISABLE_DWB_MUX; + seq_state->steps[*seq_state->num_steps].params.mpc_disable_dwb_mux_params.mpc = mpc; + seq_state->steps[*seq_state->num_steps].params.mpc_disable_dwb_mux_params.dwb_id = dwb_id; + (*seq_state->num_steps)++; + } +} + +void hwss_add_dwbc_enable(struct block_sequence_state *seq_state, + struct dwbc *dwb, + struct dc_dwb_params *dwb_params) +{ + if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) { + seq_state->steps[*seq_state->num_steps].func = DWBC_ENABLE; + seq_state->steps[*seq_state->num_steps].params.dwbc_enable_params.dwb = dwb; + seq_state->steps[*seq_state->num_steps].params.dwbc_enable_params.dwb_params = dwb_params; + (*seq_state->num_steps)++; + } +} + +void hwss_add_dwbc_disable(struct block_sequence_state *seq_state, + struct dwbc *dwb) +{ + if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) { + seq_state->steps[*seq_state->num_steps].func = DWBC_DISABLE; + seq_state->steps[*seq_state->num_steps].params.dwbc_disable_params.dwb = dwb; + (*seq_state->num_steps)++; + } +} + +void hwss_add_tg_set_gsl(struct block_sequence_state *seq_state, + struct timing_generator *tg, + struct gsl_params gsl) +{ + if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) { + seq_state->steps[*seq_state->num_steps].func = TG_SET_GSL; + seq_state->steps[*seq_state->num_steps].params.tg_set_gsl_params.tg = tg; + seq_state->steps[*seq_state->num_steps].params.tg_set_gsl_params.gsl = gsl; + (*seq_state->num_steps)++; + } +} + +void hwss_add_tg_set_gsl_source_select(struct block_sequence_state *seq_state, + struct timing_generator *tg, + int group_idx, + uint32_t gsl_ready_signal) +{ + if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) { + seq_state->steps[*seq_state->num_steps].func = TG_SET_GSL_SOURCE_SELECT; + seq_state->steps[*seq_state->num_steps].params.tg_set_gsl_source_select_params.tg = tg; + seq_state->steps[*seq_state->num_steps].params.tg_set_gsl_source_select_params.group_idx = group_idx; + seq_state->steps[*seq_state->num_steps].params.tg_set_gsl_source_select_params.gsl_ready_signal = gsl_ready_signal; + (*seq_state->num_steps)++; + } +} + +void hwss_add_hubp_update_mall_sel(struct block_sequence_state *seq_state, + struct hubp *hubp, + uint32_t mall_sel, + bool cache_cursor) +{ + if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) { + seq_state->steps[*seq_state->num_steps].func = HUBP_UPDATE_MALL_SEL; + seq_state->steps[*seq_state->num_steps].params.hubp_update_mall_sel_params.hubp = hubp; + seq_state->steps[*seq_state->num_steps].params.hubp_update_mall_sel_params.mall_sel = mall_sel; + seq_state->steps[*seq_state->num_steps].params.hubp_update_mall_sel_params.cache_cursor = cache_cursor; + (*seq_state->num_steps)++; + } +} + +void hwss_add_hubp_prepare_subvp_buffering(struct block_sequence_state *seq_state, + struct hubp *hubp, + bool enable) +{ + if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) { + seq_state->steps[*seq_state->num_steps].func = HUBP_PREPARE_SUBVP_BUFFERING; + seq_state->steps[*seq_state->num_steps].params.hubp_prepare_subvp_buffering_params.hubp = hubp; + seq_state->steps[*seq_state->num_steps].params.hubp_prepare_subvp_buffering_params.enable = enable; + (*seq_state->num_steps)++; + } +} + +void hwss_add_hubp_set_blank_en(struct block_sequence_state *seq_state, + struct hubp *hubp, + bool enable) +{ + if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) { + seq_state->steps[*seq_state->num_steps].func = HUBP_SET_BLANK_EN; + seq_state->steps[*seq_state->num_steps].params.hubp_set_blank_en_params.hubp = hubp; + seq_state->steps[*seq_state->num_steps].params.hubp_set_blank_en_params.enable = enable; + (*seq_state->num_steps)++; + } +} + +void hwss_add_hubp_disable_control(struct block_sequence_state *seq_state, + struct hubp *hubp, + bool disable) +{ + if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) { + seq_state->steps[*seq_state->num_steps].func = HUBP_DISABLE_CONTROL; + seq_state->steps[*seq_state->num_steps].params.hubp_disable_control_params.hubp = hubp; + seq_state->steps[*seq_state->num_steps].params.hubp_disable_control_params.disable = disable; + (*seq_state->num_steps)++; + } +} + +void hwss_add_hubbub_soft_reset(struct block_sequence_state *seq_state, + struct hubbub *hubbub, + void (*hubbub_soft_reset)(struct hubbub *hubbub, bool reset), + bool reset) +{ + if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) { + seq_state->steps[*seq_state->num_steps].func = HUBBUB_SOFT_RESET; + seq_state->steps[*seq_state->num_steps].params.hubbub_soft_reset_params.hubbub = hubbub; + seq_state->steps[*seq_state->num_steps].params.hubbub_soft_reset_params.hubbub_soft_reset = hubbub_soft_reset; + seq_state->steps[*seq_state->num_steps].params.hubbub_soft_reset_params.reset = reset; + (*seq_state->num_steps)++; + } +} + +void hwss_add_hubp_clk_cntl(struct block_sequence_state *seq_state, + struct hubp *hubp, + bool enable) +{ + if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) { + seq_state->steps[*seq_state->num_steps].func = HUBP_CLK_CNTL; + seq_state->steps[*seq_state->num_steps].params.hubp_clk_cntl_params.hubp = hubp; + seq_state->steps[*seq_state->num_steps].params.hubp_clk_cntl_params.enable = enable; + (*seq_state->num_steps)++; + } +} + +void hwss_add_dpp_dppclk_control(struct block_sequence_state *seq_state, + struct dpp *dpp, + bool dppclk_div, + bool enable) +{ + if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) { + seq_state->steps[*seq_state->num_steps].func = DPP_DPPCLK_CONTROL; + seq_state->steps[*seq_state->num_steps].params.dpp_dppclk_control_params.dpp = dpp; + seq_state->steps[*seq_state->num_steps].params.dpp_dppclk_control_params.dppclk_div = dppclk_div; + seq_state->steps[*seq_state->num_steps].params.dpp_dppclk_control_params.enable = enable; + (*seq_state->num_steps)++; + } +} + +void hwss_add_disable_phantom_crtc(struct block_sequence_state *seq_state, + struct timing_generator *tg) +{ + if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) { + seq_state->steps[*seq_state->num_steps].func = DISABLE_PHANTOM_CRTC; + seq_state->steps[*seq_state->num_steps].params.disable_phantom_crtc_params.tg = tg; + (*seq_state->num_steps)++; + } +} + +void hwss_add_dsc_pg_status(struct block_sequence_state *seq_state, + struct dce_hwseq *hws, + int dsc_inst, + bool is_ungated) +{ + if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) { + seq_state->steps[*seq_state->num_steps].func = DSC_PG_STATUS; + seq_state->steps[*seq_state->num_steps].params.dsc_pg_status_params.hws = hws; + seq_state->steps[*seq_state->num_steps].params.dsc_pg_status_params.dsc_inst = dsc_inst; + seq_state->steps[*seq_state->num_steps].params.dsc_pg_status_params.is_ungated = is_ungated; + (*seq_state->num_steps)++; + } +} + +void hwss_add_dsc_wait_disconnect_pending_clear(struct block_sequence_state *seq_state, + struct display_stream_compressor *dsc, + bool *is_ungated) +{ + if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) { + seq_state->steps[*seq_state->num_steps].func = DSC_WAIT_DISCONNECT_PENDING_CLEAR; + seq_state->steps[*seq_state->num_steps].params.dsc_wait_disconnect_pending_clear_params.dsc = dsc; + seq_state->steps[*seq_state->num_steps].params.dsc_wait_disconnect_pending_clear_params.is_ungated = is_ungated; + (*seq_state->num_steps)++; + } +} + +void hwss_add_dsc_disable(struct block_sequence_state *seq_state, + struct display_stream_compressor *dsc, + bool *is_ungated) +{ + if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) { + seq_state->steps[*seq_state->num_steps].func = DSC_DISABLE; + seq_state->steps[*seq_state->num_steps].params.dsc_disable_params.dsc = dsc; + seq_state->steps[*seq_state->num_steps].params.dsc_disable_params.is_ungated = is_ungated; + (*seq_state->num_steps)++; + } +} + +void hwss_add_dccg_set_ref_dscclk(struct block_sequence_state *seq_state, + struct dccg *dccg, + int dsc_inst, + bool *is_ungated) +{ + if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) { + seq_state->steps[*seq_state->num_steps].func = DCCG_SET_REF_DSCCLK; + seq_state->steps[*seq_state->num_steps].params.dccg_set_ref_dscclk_params.dccg = dccg; + seq_state->steps[*seq_state->num_steps].params.dccg_set_ref_dscclk_params.dsc_inst = dsc_inst; + seq_state->steps[*seq_state->num_steps].params.dccg_set_ref_dscclk_params.is_ungated = is_ungated; + (*seq_state->num_steps)++; + } +} + +void hwss_add_dpp_root_clock_control(struct block_sequence_state *seq_state, + struct dce_hwseq *hws, + unsigned int dpp_inst, + bool clock_on) +{ + if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) { + seq_state->steps[*seq_state->num_steps].func = DPP_ROOT_CLOCK_CONTROL; + seq_state->steps[*seq_state->num_steps].params.dpp_root_clock_control_params.hws = hws; + seq_state->steps[*seq_state->num_steps].params.dpp_root_clock_control_params.dpp_inst = dpp_inst; + seq_state->steps[*seq_state->num_steps].params.dpp_root_clock_control_params.clock_on = clock_on; + (*seq_state->num_steps)++; + } +} + +void hwss_add_dpp_pg_control(struct block_sequence_state *seq_state, + struct dce_hwseq *hws, + unsigned int dpp_inst, + bool power_on) +{ + if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) { + seq_state->steps[*seq_state->num_steps].func = DPP_PG_CONTROL; + seq_state->steps[*seq_state->num_steps].params.dpp_pg_control_params.hws = hws; + seq_state->steps[*seq_state->num_steps].params.dpp_pg_control_params.dpp_inst = dpp_inst; + seq_state->steps[*seq_state->num_steps].params.dpp_pg_control_params.power_on = power_on; + (*seq_state->num_steps)++; + } +} + +void hwss_add_hubp_pg_control(struct block_sequence_state *seq_state, + struct dce_hwseq *hws, + unsigned int hubp_inst, + bool power_on) +{ + if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) { + seq_state->steps[*seq_state->num_steps].func = HUBP_PG_CONTROL; + seq_state->steps[*seq_state->num_steps].params.hubp_pg_control_params.hws = hws; + seq_state->steps[*seq_state->num_steps].params.hubp_pg_control_params.hubp_inst = hubp_inst; + seq_state->steps[*seq_state->num_steps].params.hubp_pg_control_params.power_on = power_on; + (*seq_state->num_steps)++; + } +} + +void hwss_add_hubp_init(struct block_sequence_state *seq_state, + struct hubp *hubp) +{ + if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) { + seq_state->steps[*seq_state->num_steps].func = HUBP_INIT; + seq_state->steps[*seq_state->num_steps].params.hubp_init_params.hubp = hubp; + (*seq_state->num_steps)++; + } +} + +void hwss_add_hubp_reset(struct block_sequence_state *seq_state, + struct hubp *hubp) +{ + if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) { + seq_state->steps[*seq_state->num_steps].func = HUBP_RESET; + seq_state->steps[*seq_state->num_steps].params.hubp_reset_params.hubp = hubp; + (*seq_state->num_steps)++; + } +} + +void hwss_add_dpp_reset(struct block_sequence_state *seq_state, + struct dpp *dpp) +{ + if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) { + seq_state->steps[*seq_state->num_steps].func = DPP_RESET; + seq_state->steps[*seq_state->num_steps].params.dpp_reset_params.dpp = dpp; + (*seq_state->num_steps)++; + } +} + +void hwss_add_opp_pipe_clock_control(struct block_sequence_state *seq_state, + struct output_pixel_processor *opp, + bool enable) +{ + if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) { + seq_state->steps[*seq_state->num_steps].func = OPP_PIPE_CLOCK_CONTROL; + seq_state->steps[*seq_state->num_steps].params.opp_pipe_clock_control_params.opp = opp; + seq_state->steps[*seq_state->num_steps].params.opp_pipe_clock_control_params.enable = enable; + (*seq_state->num_steps)++; + } +} + +void hwss_add_hubp_set_vm_system_aperture_settings(struct block_sequence_state *seq_state, + struct hubp *hubp, + uint64_t sys_default, + uint64_t sys_low, + uint64_t sys_high) +{ + if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) { + seq_state->steps[*seq_state->num_steps].func = HUBP_SET_VM_SYSTEM_APERTURE_SETTINGS; + seq_state->steps[*seq_state->num_steps].params.hubp_set_vm_system_aperture_settings_params.hubp = hubp; + seq_state->steps[*seq_state->num_steps].params.hubp_set_vm_system_aperture_settings_params.sys_default.quad_part = sys_default; + seq_state->steps[*seq_state->num_steps].params.hubp_set_vm_system_aperture_settings_params.sys_low.quad_part = sys_low; + seq_state->steps[*seq_state->num_steps].params.hubp_set_vm_system_aperture_settings_params.sys_high.quad_part = sys_high; + (*seq_state->num_steps)++; + } +} + +void hwss_add_hubp_set_flip_int(struct block_sequence_state *seq_state, + struct hubp *hubp) +{ + if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) { + seq_state->steps[*seq_state->num_steps].func = HUBP_SET_FLIP_INT; + seq_state->steps[*seq_state->num_steps].params.hubp_set_flip_int_params.hubp = hubp; + (*seq_state->num_steps)++; + } +} + +void hwss_add_dccg_update_dpp_dto(struct block_sequence_state *seq_state, + struct dccg *dccg, + int dpp_inst, + int dppclk_khz) +{ + if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) { + seq_state->steps[*seq_state->num_steps].func = DCCG_UPDATE_DPP_DTO; + seq_state->steps[*seq_state->num_steps].params.dccg_update_dpp_dto_params.dccg = dccg; + seq_state->steps[*seq_state->num_steps].params.dccg_update_dpp_dto_params.dpp_inst = dpp_inst; + seq_state->steps[*seq_state->num_steps].params.dccg_update_dpp_dto_params.dppclk_khz = dppclk_khz; + (*seq_state->num_steps)++; + } +} + +void hwss_add_hubp_vtg_sel(struct block_sequence_state *seq_state, + struct hubp *hubp, + uint32_t otg_inst) +{ + if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) { + seq_state->steps[*seq_state->num_steps].func = HUBP_VTG_SEL; + seq_state->steps[*seq_state->num_steps].params.hubp_vtg_sel_params.hubp = hubp; + seq_state->steps[*seq_state->num_steps].params.hubp_vtg_sel_params.otg_inst = otg_inst; + (*seq_state->num_steps)++; + } +} + +void hwss_add_hubp_setup2(struct block_sequence_state *seq_state, + struct hubp *hubp, + struct dml2_dchub_per_pipe_register_set *hubp_regs, + union dml2_global_sync_programming *global_sync, + struct dc_crtc_timing *timing) +{ + if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) { + seq_state->steps[*seq_state->num_steps].func = HUBP_SETUP2; + seq_state->steps[*seq_state->num_steps].params.hubp_setup2_params.hubp = hubp; + seq_state->steps[*seq_state->num_steps].params.hubp_setup2_params.hubp_regs = hubp_regs; + seq_state->steps[*seq_state->num_steps].params.hubp_setup2_params.global_sync = global_sync; + seq_state->steps[*seq_state->num_steps].params.hubp_setup2_params.timing = timing; + (*seq_state->num_steps)++; + } +} + +void hwss_add_hubp_setup(struct block_sequence_state *seq_state, + struct hubp *hubp, + struct _vcs_dpi_display_dlg_regs_st *dlg_regs, + struct _vcs_dpi_display_ttu_regs_st *ttu_regs, + struct _vcs_dpi_display_rq_regs_st *rq_regs, + struct _vcs_dpi_display_pipe_dest_params_st *pipe_dest) +{ + if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) { + seq_state->steps[*seq_state->num_steps].func = HUBP_SETUP; + seq_state->steps[*seq_state->num_steps].params.hubp_setup_params.hubp = hubp; + seq_state->steps[*seq_state->num_steps].params.hubp_setup_params.dlg_regs = dlg_regs; + seq_state->steps[*seq_state->num_steps].params.hubp_setup_params.ttu_regs = ttu_regs; + seq_state->steps[*seq_state->num_steps].params.hubp_setup_params.rq_regs = rq_regs; + seq_state->steps[*seq_state->num_steps].params.hubp_setup_params.pipe_dest = pipe_dest; + (*seq_state->num_steps)++; + } +} + +void hwss_add_hubp_set_unbounded_requesting(struct block_sequence_state *seq_state, + struct hubp *hubp, + bool unbounded_req) +{ + if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) { + seq_state->steps[*seq_state->num_steps].func = HUBP_SET_UNBOUNDED_REQUESTING; + seq_state->steps[*seq_state->num_steps].params.hubp_set_unbounded_requesting_params.hubp = hubp; + seq_state->steps[*seq_state->num_steps].params.hubp_set_unbounded_requesting_params.unbounded_req = unbounded_req; + (*seq_state->num_steps)++; + } +} + +void hwss_add_hubp_setup_interdependent2(struct block_sequence_state *seq_state, + struct hubp *hubp, + struct dml2_dchub_per_pipe_register_set *hubp_regs) +{ + if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) { + seq_state->steps[*seq_state->num_steps].func = HUBP_SETUP_INTERDEPENDENT2; + seq_state->steps[*seq_state->num_steps].params.hubp_setup_interdependent2_params.hubp = hubp; + seq_state->steps[*seq_state->num_steps].params.hubp_setup_interdependent2_params.hubp_regs = hubp_regs; + (*seq_state->num_steps)++; + } +} + +void hwss_add_hubp_setup_interdependent(struct block_sequence_state *seq_state, + struct hubp *hubp, + struct _vcs_dpi_display_dlg_regs_st *dlg_regs, + struct _vcs_dpi_display_ttu_regs_st *ttu_regs) +{ + if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) { + seq_state->steps[*seq_state->num_steps].func = HUBP_SETUP_INTERDEPENDENT; + seq_state->steps[*seq_state->num_steps].params.hubp_setup_interdependent_params.hubp = hubp; + seq_state->steps[*seq_state->num_steps].params.hubp_setup_interdependent_params.dlg_regs = dlg_regs; + seq_state->steps[*seq_state->num_steps].params.hubp_setup_interdependent_params.ttu_regs = ttu_regs; + (*seq_state->num_steps)++; + } +} + +void hwss_add_hubp_program_surface_config(struct block_sequence_state *seq_state, + struct hubp *hubp, + enum surface_pixel_format format, + struct dc_tiling_info *tiling_info, + struct plane_size plane_size, + enum dc_rotation_angle rotation, + struct dc_plane_dcc_param *dcc, + bool horizontal_mirror, + int compat_level) +{ + if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) { + seq_state->steps[*seq_state->num_steps].func = HUBP_PROGRAM_SURFACE_CONFIG; + seq_state->steps[*seq_state->num_steps].params.program_surface_config_params.hubp = hubp; + seq_state->steps[*seq_state->num_steps].params.program_surface_config_params.format = format; + seq_state->steps[*seq_state->num_steps].params.program_surface_config_params.tiling_info = tiling_info; + seq_state->steps[*seq_state->num_steps].params.program_surface_config_params.plane_size = plane_size; + seq_state->steps[*seq_state->num_steps].params.program_surface_config_params.rotation = rotation; + seq_state->steps[*seq_state->num_steps].params.program_surface_config_params.dcc = dcc; + seq_state->steps[*seq_state->num_steps].params.program_surface_config_params.horizontal_mirror = horizontal_mirror; + seq_state->steps[*seq_state->num_steps].params.program_surface_config_params.compat_level = compat_level; + (*seq_state->num_steps)++; + } +} + +void hwss_add_dpp_setup_dpp(struct block_sequence_state *seq_state, + struct pipe_ctx *pipe_ctx) +{ + if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) { + seq_state->steps[*seq_state->num_steps].func = DPP_SETUP_DPP; + seq_state->steps[*seq_state->num_steps].params.setup_dpp_params.pipe_ctx = pipe_ctx; + (*seq_state->num_steps)++; + } +} + +void hwss_add_dpp_set_cursor_matrix(struct block_sequence_state *seq_state, + struct dpp *dpp, + enum dc_color_space color_space, + struct dc_csc_transform *cursor_csc_color_matrix) +{ + if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) { + seq_state->steps[*seq_state->num_steps].func = DPP_SET_CURSOR_MATRIX; + seq_state->steps[*seq_state->num_steps].params.dpp_set_cursor_matrix_params.dpp = dpp; + seq_state->steps[*seq_state->num_steps].params.dpp_set_cursor_matrix_params.color_space = color_space; + seq_state->steps[*seq_state->num_steps].params.dpp_set_cursor_matrix_params.cursor_csc_color_matrix = cursor_csc_color_matrix; + (*seq_state->num_steps)++; + } +} + +void hwss_add_dpp_set_scaler(struct block_sequence_state *seq_state, + struct dpp *dpp, + const struct scaler_data *scl_data) +{ + if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) { + seq_state->steps[*seq_state->num_steps].func = DPP_SET_SCALER; + seq_state->steps[*seq_state->num_steps].params.dpp_set_scaler_params.dpp = dpp; + seq_state->steps[*seq_state->num_steps].params.dpp_set_scaler_params.scl_data = scl_data; + (*seq_state->num_steps)++; + } +} + +void hwss_add_hubp_mem_program_viewport(struct block_sequence_state *seq_state, + struct hubp *hubp, + const struct rect *viewport, + const struct rect *viewport_c) +{ + if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) { + seq_state->steps[*seq_state->num_steps].func = HUBP_MEM_PROGRAM_VIEWPORT; + seq_state->steps[*seq_state->num_steps].params.hubp_mem_program_viewport_params.hubp = hubp; + seq_state->steps[*seq_state->num_steps].params.hubp_mem_program_viewport_params.viewport = viewport; + seq_state->steps[*seq_state->num_steps].params.hubp_mem_program_viewport_params.viewport_c = viewport_c; + (*seq_state->num_steps)++; + } +} + +void hwss_add_abort_cursor_offload_update(struct block_sequence_state *seq_state, + struct dc *dc, + struct pipe_ctx *pipe_ctx) +{ + if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) { + seq_state->steps[*seq_state->num_steps].func = ABORT_CURSOR_OFFLOAD_UPDATE; + seq_state->steps[*seq_state->num_steps].params.abort_cursor_offload_update_params.dc = dc; + seq_state->steps[*seq_state->num_steps].params.abort_cursor_offload_update_params.pipe_ctx = pipe_ctx; + (*seq_state->num_steps)++; + } +} + +void hwss_add_set_cursor_attribute(struct block_sequence_state *seq_state, + struct dc *dc, + struct pipe_ctx *pipe_ctx) +{ + if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) { + seq_state->steps[*seq_state->num_steps].func = SET_CURSOR_ATTRIBUTE; + seq_state->steps[*seq_state->num_steps].params.set_cursor_attribute_params.dc = dc; + seq_state->steps[*seq_state->num_steps].params.set_cursor_attribute_params.pipe_ctx = pipe_ctx; + (*seq_state->num_steps)++; + } +} + +void hwss_add_set_cursor_position(struct block_sequence_state *seq_state, + struct dc *dc, + struct pipe_ctx *pipe_ctx) +{ + if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) { + seq_state->steps[*seq_state->num_steps].func = SET_CURSOR_POSITION; + seq_state->steps[*seq_state->num_steps].params.set_cursor_position_params.dc = dc; + seq_state->steps[*seq_state->num_steps].params.set_cursor_position_params.pipe_ctx = pipe_ctx; + (*seq_state->num_steps)++; + } +} + +void hwss_add_set_cursor_sdr_white_level(struct block_sequence_state *seq_state, + struct dc *dc, + struct pipe_ctx *pipe_ctx) +{ + if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) { + seq_state->steps[*seq_state->num_steps].func = SET_CURSOR_SDR_WHITE_LEVEL; + seq_state->steps[*seq_state->num_steps].params.set_cursor_sdr_white_level_params.dc = dc; + seq_state->steps[*seq_state->num_steps].params.set_cursor_sdr_white_level_params.pipe_ctx = pipe_ctx; + (*seq_state->num_steps)++; + } +} + +void hwss_add_program_output_csc(struct block_sequence_state *seq_state, + struct dc *dc, + struct pipe_ctx *pipe_ctx, + enum dc_color_space colorspace, + uint16_t *matrix, + int opp_id) +{ + if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) { + seq_state->steps[*seq_state->num_steps].func = PROGRAM_OUTPUT_CSC; + seq_state->steps[*seq_state->num_steps].params.program_output_csc_params.dc = dc; + seq_state->steps[*seq_state->num_steps].params.program_output_csc_params.pipe_ctx = pipe_ctx; + seq_state->steps[*seq_state->num_steps].params.program_output_csc_params.colorspace = colorspace; + seq_state->steps[*seq_state->num_steps].params.program_output_csc_params.matrix = matrix; + seq_state->steps[*seq_state->num_steps].params.program_output_csc_params.opp_id = opp_id; + (*seq_state->num_steps)++; + } +} + +void hwss_add_phantom_hubp_post_enable(struct block_sequence_state *seq_state, + struct hubp *hubp) +{ + if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) { + seq_state->steps[*seq_state->num_steps].func = PHANTOM_HUBP_POST_ENABLE; + seq_state->steps[*seq_state->num_steps].params.phantom_hubp_post_enable_params.hubp = hubp; + (*seq_state->num_steps)++; + } +} + +void hwss_add_update_force_pstate(struct block_sequence_state *seq_state, + struct dc *dc, + struct dc_state *context) +{ + if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) { + seq_state->steps[*seq_state->num_steps].func = UPDATE_FORCE_PSTATE; + seq_state->steps[*seq_state->num_steps].params.update_force_pstate_params.dc = dc; + seq_state->steps[*seq_state->num_steps].params.update_force_pstate_params.context = context; + (*seq_state->num_steps)++; + } +} + +void hwss_add_hubbub_apply_dedcn21_147_wa(struct block_sequence_state *seq_state, + struct hubbub *hubbub) +{ + if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) { + seq_state->steps[*seq_state->num_steps].func = HUBBUB_APPLY_DEDCN21_147_WA; + seq_state->steps[*seq_state->num_steps].params.hubbub_apply_dedcn21_147_wa_params.hubbub = hubbub; + (*seq_state->num_steps)++; + } +} + +void hwss_add_hubbub_allow_self_refresh_control(struct block_sequence_state *seq_state, + struct hubbub *hubbub, + bool allow, + bool *disallow_self_refresh_applied) +{ + if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) { + seq_state->steps[*seq_state->num_steps].func = HUBBUB_ALLOW_SELF_REFRESH_CONTROL; + seq_state->steps[*seq_state->num_steps].params.hubbub_allow_self_refresh_control_params.hubbub = hubbub; + seq_state->steps[*seq_state->num_steps].params.hubbub_allow_self_refresh_control_params.allow = allow; + seq_state->steps[*seq_state->num_steps].params.hubbub_allow_self_refresh_control_params.disallow_self_refresh_applied = disallow_self_refresh_applied; + (*seq_state->num_steps)++; + } +} + +void hwss_add_tg_get_frame_count(struct block_sequence_state *seq_state, + struct timing_generator *tg, + unsigned int *frame_count) +{ + if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) { + seq_state->steps[*seq_state->num_steps].func = TG_GET_FRAME_COUNT; + seq_state->steps[*seq_state->num_steps].params.tg_get_frame_count_params.tg = tg; + seq_state->steps[*seq_state->num_steps].params.tg_get_frame_count_params.frame_count = frame_count; + (*seq_state->num_steps)++; + } +} diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_enc_cfg.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_enc_cfg.c index a180f68f711c..deb23d20bca6 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link_enc_cfg.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_enc_cfg.c @@ -522,10 +522,10 @@ struct link_encoder *link_enc_cfg_get_link_enc_used_by_link( struct link_encoder *link_enc_cfg_get_next_avail_link_enc(struct dc *dc) { struct link_encoder *link_enc = NULL; - enum engine_id encs_assigned[MAX_DIG_LINK_ENCODERS]; + enum engine_id encs_assigned[MAX_LINK_ENCODERS]; int i; - for (i = 0; i < MAX_DIG_LINK_ENCODERS; i++) + for (i = 0; i < MAX_LINK_ENCODERS; i++) encs_assigned[i] = ENGINE_ID_UNKNOWN; /* Add assigned encoders to list. */ diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c index bc5dedf5f60c..848c267ef11e 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c @@ -95,10 +95,44 @@ #define DC_LOGGER \ dc->ctx->logger #define DC_LOGGER_INIT(logger) -#include "dml2/dml2_wrapper.h" +#include "dml2_0/dml2_wrapper.h" #define UNABLE_TO_SPLIT -1 +static void capture_pipe_topology_data(struct dc *dc, int plane_idx, int slice_idx, int stream_idx, + int dpp_inst, int opp_inst, int tg_inst, bool is_phantom_pipe) +{ + struct pipe_topology_snapshot *current_snapshot = &dc->debug_data.topology_history.snapshots[dc->debug_data.topology_history.current_snapshot_index]; + + if (current_snapshot->line_count >= MAX_PIPES) + return; + + current_snapshot->pipe_log_lines[current_snapshot->line_count].is_phantom_pipe = is_phantom_pipe; + current_snapshot->pipe_log_lines[current_snapshot->line_count].plane_idx = plane_idx; + current_snapshot->pipe_log_lines[current_snapshot->line_count].slice_idx = slice_idx; + current_snapshot->pipe_log_lines[current_snapshot->line_count].stream_idx = stream_idx; + current_snapshot->pipe_log_lines[current_snapshot->line_count].dpp_inst = dpp_inst; + current_snapshot->pipe_log_lines[current_snapshot->line_count].opp_inst = opp_inst; + current_snapshot->pipe_log_lines[current_snapshot->line_count].tg_inst = tg_inst; + + current_snapshot->line_count++; +} + +static void start_new_topology_snapshot(struct dc *dc, struct dc_state *state) +{ + // Move to next snapshot slot (circular buffer) + dc->debug_data.topology_history.current_snapshot_index = (dc->debug_data.topology_history.current_snapshot_index + 1) % MAX_TOPOLOGY_SNAPSHOTS; + + // Clear the new snapshot + struct pipe_topology_snapshot *current_snapshot = &dc->debug_data.topology_history.snapshots[dc->debug_data.topology_history.current_snapshot_index]; + memset(current_snapshot, 0, sizeof(*current_snapshot)); + + // Set metadata + current_snapshot->timestamp_us = dm_get_timestamp(dc->ctx); + current_snapshot->stream_count = state->stream_count; + current_snapshot->phantom_stream_count = state->phantom_stream_count; +} + enum dce_version resource_parse_asic_id(struct hw_asic_id asic_id) { enum dce_version dc_version = DCE_VERSION_UNKNOWN; @@ -446,6 +480,14 @@ bool resource_construct( DC_ERR("DC: failed to create stream_encoder!\n"); pool->stream_enc_count++; } + + for (i = 0; i < caps->num_analog_stream_encoder; i++) { + pool->stream_enc[caps->num_stream_encoder + i] = + create_funcs->create_stream_encoder(ENGINE_ID_DACA + i, ctx); + if (pool->stream_enc[caps->num_stream_encoder + i] == NULL) + DC_ERR("DC: failed to create analog stream_encoder %d!\n", i); + pool->stream_enc_count++; + } } pool->hpo_dp_stream_enc_count = 0; @@ -2303,10 +2345,11 @@ bool resource_is_odm_topology_changed(const struct pipe_ctx *otg_master_a, static void resource_log_pipe(struct dc *dc, struct pipe_ctx *pipe, int stream_idx, int slice_idx, int plane_idx, int slice_count, - bool is_primary) + bool is_primary, bool is_phantom_pipe) { DC_LOGGER_INIT(dc->ctx->logger); + // new format for logging: bit storing code if (slice_idx == 0 && plane_idx == 0 && is_primary) { /* case 0 (OTG master pipe with plane) */ DC_LOG_DC(" | plane%d slice%d stream%d|", @@ -2315,6 +2358,10 @@ static void resource_log_pipe(struct dc *dc, struct pipe_ctx *pipe, pipe->plane_res.dpp->inst, pipe->stream_res.opp->inst, pipe->stream_res.tg->inst); + capture_pipe_topology_data(dc, plane_idx, slice_idx, stream_idx, + pipe->plane_res.dpp->inst, + pipe->stream_res.opp->inst, + pipe->stream_res.tg->inst, is_phantom_pipe); } else if (slice_idx == 0 && plane_idx == -1) { /* case 1 (OTG master pipe without plane) */ DC_LOG_DC(" | slice%d stream%d|", @@ -2323,6 +2370,10 @@ static void resource_log_pipe(struct dc *dc, struct pipe_ctx *pipe, pipe->stream_res.opp->inst, pipe->stream_res.opp->inst, pipe->stream_res.tg->inst); + capture_pipe_topology_data(dc, 0xF, slice_idx, stream_idx, + pipe->plane_res.dpp->inst, + pipe->stream_res.opp->inst, + pipe->stream_res.tg->inst, is_phantom_pipe); } else if (slice_idx != 0 && plane_idx == 0 && is_primary) { /* case 2 (OPP head pipe with plane) */ DC_LOG_DC(" | plane%d slice%d | |", @@ -2330,27 +2381,43 @@ static void resource_log_pipe(struct dc *dc, struct pipe_ctx *pipe, DC_LOG_DC(" |DPP%d----OPP%d----| |", pipe->plane_res.dpp->inst, pipe->stream_res.opp->inst); + capture_pipe_topology_data(dc, plane_idx, slice_idx, stream_idx, + pipe->plane_res.dpp->inst, + pipe->stream_res.opp->inst, + pipe->stream_res.tg->inst, is_phantom_pipe); } else if (slice_idx != 0 && plane_idx == -1) { /* case 3 (OPP head pipe without plane) */ DC_LOG_DC(" | slice%d | |", slice_idx); DC_LOG_DC(" |DPG%d----OPP%d----| |", pipe->plane_res.dpp->inst, pipe->stream_res.opp->inst); + capture_pipe_topology_data(dc, 0xF, slice_idx, stream_idx, + pipe->plane_res.dpp->inst, + pipe->stream_res.opp->inst, + pipe->stream_res.tg->inst, is_phantom_pipe); } else if (slice_idx == slice_count - 1) { /* case 4 (DPP pipe in last slice) */ DC_LOG_DC(" | plane%d | |", plane_idx); DC_LOG_DC(" |DPP%d----| |", pipe->plane_res.dpp->inst); + capture_pipe_topology_data(dc, plane_idx, slice_idx, stream_idx, + pipe->plane_res.dpp->inst, + pipe->stream_res.opp->inst, + pipe->stream_res.tg->inst, is_phantom_pipe); } else { /* case 5 (DPP pipe not in last slice) */ DC_LOG_DC(" | plane%d | | |", plane_idx); DC_LOG_DC(" |DPP%d----| | |", pipe->plane_res.dpp->inst); + capture_pipe_topology_data(dc, plane_idx, slice_idx, stream_idx, + pipe->plane_res.dpp->inst, + pipe->stream_res.opp->inst, + pipe->stream_res.tg->inst, is_phantom_pipe); } } static void resource_log_pipe_for_stream(struct dc *dc, struct dc_state *state, - struct pipe_ctx *otg_master, int stream_idx) + struct pipe_ctx *otg_master, int stream_idx, bool is_phantom_pipe) { struct pipe_ctx *opp_heads[MAX_PIPES]; struct pipe_ctx *dpp_pipes[MAX_PIPES]; @@ -2376,12 +2443,12 @@ static void resource_log_pipe_for_stream(struct dc *dc, struct dc_state *state, resource_log_pipe(dc, dpp_pipes[dpp_idx], stream_idx, slice_idx, plane_idx, slice_count, - is_primary); + is_primary, is_phantom_pipe); } } else { resource_log_pipe(dc, opp_heads[slice_idx], stream_idx, slice_idx, plane_idx, - slice_count, true); + slice_count, true, is_phantom_pipe); } } @@ -2412,6 +2479,10 @@ void resource_log_pipe_topology_update(struct dc *dc, struct dc_state *state) struct pipe_ctx *otg_master; int stream_idx, phantom_stream_idx; DC_LOGGER_INIT(dc->ctx->logger); + bool is_phantom_pipe = false; + + // Start a new snapshot for this topology update + start_new_topology_snapshot(dc, state); DC_LOG_DC(" pipe topology update"); DC_LOG_DC(" ________________________"); @@ -2425,9 +2496,10 @@ void resource_log_pipe_topology_update(struct dc *dc, struct dc_state *state) if (!otg_master) continue; - resource_log_pipe_for_stream(dc, state, otg_master, stream_idx); + resource_log_pipe_for_stream(dc, state, otg_master, stream_idx, is_phantom_pipe); } if (state->phantom_stream_count > 0) { + is_phantom_pipe = true; DC_LOG_DC(" | (phantom pipes) |"); for (stream_idx = 0; stream_idx < state->stream_count; stream_idx++) { if (state->stream_status[stream_idx].mall_stream_config.type != SUBVP_MAIN) @@ -2440,7 +2512,7 @@ void resource_log_pipe_topology_update(struct dc *dc, struct dc_state *state) if (!otg_master) continue; - resource_log_pipe_for_stream(dc, state, otg_master, stream_idx); + resource_log_pipe_for_stream(dc, state, otg_master, stream_idx, is_phantom_pipe); } } DC_LOG_DC(" |________________________|\n"); @@ -2690,17 +2762,40 @@ static inline int find_fixed_dio_link_enc(const struct dc_link *link) } static inline int find_free_dio_link_enc(const struct resource_context *res_ctx, - const struct dc_link *link, const struct resource_pool *pool) + const struct dc_link *link, const struct resource_pool *pool, struct dc_stream_state *stream) { - int i; + int i, j = -1; + int stream_enc_inst = -1; int enc_count = pool->dig_link_enc_count; - /* for dpia, check preferred encoder first and then the next one */ - for (i = 0; i < enc_count; i++) - if (res_ctx->dio_link_enc_ref_cnts[(link->dpia_preferred_eng_id + i) % enc_count] == 0) - break; + /* Find stream encoder instance for the stream */ + if (stream) { + for (i = 0; i < pool->pipe_count; i++) { + if ((res_ctx->pipe_ctx[i].stream == stream) && + (res_ctx->pipe_ctx[i].stream_res.stream_enc != NULL)) { + stream_enc_inst = res_ctx->pipe_ctx[i].stream_res.stream_enc->id; + break; + } + } + } - return (i >= 0 && i < enc_count) ? (link->dpia_preferred_eng_id + i) % enc_count : -1; + /* Assign dpia preferred > stream enc instance > available */ + for (i = 0; i < enc_count; i++) { + if (res_ctx->dio_link_enc_ref_cnts[i] == 0) { + if (j == -1) + j = i; + + if (link->dpia_preferred_eng_id == i) { + j = i; + break; + } + + if (stream_enc_inst == i) { + j = stream_enc_inst; + } + } + } + return j; } static inline void acquire_dio_link_enc( @@ -2781,7 +2876,7 @@ static bool add_dio_link_enc_to_ctx(const struct dc *dc, retain_dio_link_enc(res_ctx, enc_index); } else { if (stream->link->is_dig_mapping_flexible) - enc_index = find_free_dio_link_enc(res_ctx, stream->link, pool); + enc_index = find_free_dio_link_enc(res_ctx, stream->link, pool, stream); else { int link_index = 0; @@ -2791,7 +2886,7 @@ static bool add_dio_link_enc_to_ctx(const struct dc *dc, * one into the acquiring link. */ if (enc_index >= 0 && is_dio_enc_acquired_by_other_link(stream->link, enc_index, &link_index)) { - int new_enc_index = find_free_dio_link_enc(res_ctx, dc->links[link_index], pool); + int new_enc_index = find_free_dio_link_enc(res_ctx, dc->links[link_index], pool, stream); if (new_enc_index >= 0) swap_dio_link_enc_to_muxable_ctx(context, pool, new_enc_index, enc_index); @@ -5201,7 +5296,7 @@ struct link_encoder *get_temp_dio_link_enc( enc_index = link->eng_id; if (enc_index < 0) - enc_index = find_free_dio_link_enc(res_ctx, link, pool); + enc_index = find_free_dio_link_enc(res_ctx, link, pool, NULL); if (enc_index >= 0) link_enc = pool->link_encoders[enc_index]; diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_state.c b/drivers/gpu/drm/amd/display/dc/core/dc_state.c index c61300a7cb1c..2de8ef4a58ec 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_state.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_state.c @@ -35,8 +35,8 @@ #include "link_enc_cfg.h" #if defined(CONFIG_DRM_AMD_DC_FP) -#include "dml2/dml2_wrapper.h" -#include "dml2/dml2_internal_types.h" +#include "dml2_0/dml2_wrapper.h" +#include "dml2_0/dml2_internal_types.h" #endif #define DC_LOGGER \ diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c index 0a46e834357a..129cd5f84983 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c @@ -224,6 +224,14 @@ struct dc_stream_status *dc_stream_get_status( return dc_state_get_stream_status(dc->current_state, stream); } +const struct dc_stream_status *dc_stream_get_status_const( + const struct dc_stream_state *stream) +{ + struct dc *dc = stream->ctx->dc; + + return dc_state_get_stream_status(dc->current_state, stream); +} + void program_cursor_attributes( struct dc *dc, struct dc_stream_state *stream) @@ -231,6 +239,7 @@ void program_cursor_attributes( int i; struct resource_context *res_ctx; struct pipe_ctx *pipe_to_program = NULL; + bool enable_cursor_offload = dc_dmub_srv_is_cursor_offload_enabled(dc); if (!stream) return; @@ -245,9 +254,14 @@ void program_cursor_attributes( if (!pipe_to_program) { pipe_to_program = pipe_ctx; - dc->hwss.cursor_lock(dc, pipe_to_program, true); - if (pipe_to_program->next_odm_pipe) - dc->hwss.cursor_lock(dc, pipe_to_program->next_odm_pipe, true); + + if (enable_cursor_offload && dc->hwss.begin_cursor_offload_update) { + dc->hwss.begin_cursor_offload_update(dc, pipe_ctx); + } else { + dc->hwss.cursor_lock(dc, pipe_to_program, true); + if (pipe_to_program->next_odm_pipe) + dc->hwss.cursor_lock(dc, pipe_to_program->next_odm_pipe, true); + } } dc->hwss.set_cursor_attribute(pipe_ctx); @@ -255,12 +269,18 @@ void program_cursor_attributes( dc_send_update_cursor_info_to_dmu(pipe_ctx, i); if (dc->hwss.set_cursor_sdr_white_level) dc->hwss.set_cursor_sdr_white_level(pipe_ctx); + if (enable_cursor_offload && dc->hwss.update_cursor_offload_pipe) + dc->hwss.update_cursor_offload_pipe(dc, pipe_ctx); } if (pipe_to_program) { - dc->hwss.cursor_lock(dc, pipe_to_program, false); - if (pipe_to_program->next_odm_pipe) - dc->hwss.cursor_lock(dc, pipe_to_program->next_odm_pipe, false); + if (enable_cursor_offload && dc->hwss.commit_cursor_offload_update) { + dc->hwss.commit_cursor_offload_update(dc, pipe_to_program); + } else { + dc->hwss.cursor_lock(dc, pipe_to_program, false); + if (pipe_to_program->next_odm_pipe) + dc->hwss.cursor_lock(dc, pipe_to_program->next_odm_pipe, false); + } } } @@ -366,6 +386,7 @@ void program_cursor_position( int i; struct resource_context *res_ctx; struct pipe_ctx *pipe_to_program = NULL; + bool enable_cursor_offload = dc_dmub_srv_is_cursor_offload_enabled(dc); if (!stream) return; @@ -384,16 +405,27 @@ void program_cursor_position( if (!pipe_to_program) { pipe_to_program = pipe_ctx; - dc->hwss.cursor_lock(dc, pipe_to_program, true); + + if (enable_cursor_offload && dc->hwss.begin_cursor_offload_update) + dc->hwss.begin_cursor_offload_update(dc, pipe_ctx); + else + dc->hwss.cursor_lock(dc, pipe_to_program, true); } dc->hwss.set_cursor_position(pipe_ctx); + if (enable_cursor_offload && dc->hwss.update_cursor_offload_pipe) + dc->hwss.update_cursor_offload_pipe(dc, pipe_ctx); + if (dc->ctx->dmub_srv) dc_send_update_cursor_info_to_dmu(pipe_ctx, i); } - if (pipe_to_program) - dc->hwss.cursor_lock(dc, pipe_to_program, false); + if (pipe_to_program) { + if (enable_cursor_offload && dc->hwss.commit_cursor_offload_update) + dc->hwss.commit_cursor_offload_update(dc, pipe_to_program); + else + dc->hwss.cursor_lock(dc, pipe_to_program, false); + } } bool dc_stream_set_cursor_position( @@ -860,9 +892,11 @@ void dc_stream_log(const struct dc *dc, const struct dc_stream_state *stream) stream->sink->sink_signal != SIGNAL_TYPE_NONE) { DC_LOG_DC( - "\tdispname: %s signal: %x\n", + "\tsignal: %x dispname: %s manufacturer_id: 0x%x product_id: 0x%x\n", + stream->signal, stream->sink->edid_caps.display_name, - stream->signal); + stream->sink->edid_caps.manufacturer_id, + stream->sink->edid_caps.product_id); } } } diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h index 98f0b6b3c213..29edfa51ea2c 100644 --- a/drivers/gpu/drm/amd/display/dc/dc.h +++ b/drivers/gpu/drm/amd/display/dc/dc.h @@ -42,7 +42,7 @@ #include "inc/hw/dmcu.h" #include "dml/display_mode_lib.h" -#include "dml2/dml2_wrapper.h" +#include "dml2_0/dml2_wrapper.h" #include "dmub/inc/dmub_cmd.h" @@ -54,8 +54,16 @@ struct abm_save_restore; struct aux_payload; struct set_config_cmd_payload; struct dmub_notification; +struct dcn_hubbub_reg_state; +struct dcn_hubp_reg_state; +struct dcn_dpp_reg_state; +struct dcn_mpc_reg_state; +struct dcn_opp_reg_state; +struct dcn_dsc_reg_state; +struct dcn_optc_reg_state; +struct dcn_dccg_reg_state; -#define DC_VER "3.2.351" +#define DC_VER "3.2.359" /** * MAX_SURFACES - representative of the upper bound of surfaces that can be piped to a single CRTC @@ -278,6 +286,15 @@ struct dc_scl_caps { bool sharpener_support; }; +struct dc_check_config { + /** + * max video plane width that can be safely assumed to be always + * supported by single DPP pipe. + */ + unsigned int max_optimizable_video_width; + bool enable_legacy_fast_update; +}; + struct dc_caps { uint32_t max_streams; uint32_t max_links; @@ -293,11 +310,6 @@ struct dc_caps { unsigned int max_cursor_size; unsigned int max_buffered_cursor_size; unsigned int max_video_width; - /* - * max video plane width that can be safely assumed to be always - * supported by single DPP pipe. - */ - unsigned int max_optimizable_video_width; unsigned int min_horizontal_blanking_period; int linear_pitch_alignment; bool dcc_const_color; @@ -455,6 +467,18 @@ enum surface_update_type { UPDATE_TYPE_FULL, /* may need to shuffle resources */ }; +enum dc_lock_descriptor { + LOCK_DESCRIPTOR_NONE = 0x0, + LOCK_DESCRIPTOR_STREAM = 0x1, + LOCK_DESCRIPTOR_LINK = 0x2, + LOCK_DESCRIPTOR_GLOBAL = 0x4, +}; + +struct surface_update_descriptor { + enum surface_update_type update_type; + enum dc_lock_descriptor lock_descriptor; +}; + /* Forward declaration*/ struct dc; struct dc_plane_state; @@ -530,6 +554,7 @@ struct dc_config { bool set_pipe_unlock_order; bool enable_dpia_pre_training; bool unify_link_enc_assignment; + bool enable_cursor_offload; struct spl_sharpness_range dcn_sharpness_range; struct spl_sharpness_range dcn_override_sharpness_range; }; @@ -849,8 +874,7 @@ union dpia_debug_options { uint32_t enable_force_tbt3_work_around:1; /* bit 4 */ uint32_t disable_usb4_pm_support:1; /* bit 5 */ uint32_t enable_usb4_bw_zero_alloc_patch:1; /* bit 6 */ - uint32_t enable_bw_allocation_mode:1; /* bit 7 */ - uint32_t reserved:24; + uint32_t reserved:25; } bits; uint32_t raw; }; @@ -875,6 +899,7 @@ struct dc_debug_data { uint32_t ltFailCount; uint32_t i2cErrorCount; uint32_t auxErrorCount; + struct pipe_topology_history topology_history; }; struct dc_phy_addr_space_config { @@ -1120,7 +1145,6 @@ struct dc_debug_options { uint32_t fpo_vactive_min_active_margin_us; uint32_t fpo_vactive_max_blank_us; bool enable_hpo_pg_support; - bool enable_legacy_fast_update; bool disable_dc_mode_overwrite; bool replay_skip_crtc_disabled; bool ignore_pg;/*do nothing, let pmfw control it*/ @@ -1152,7 +1176,6 @@ struct dc_debug_options { bool enable_ips_visual_confirm; unsigned int sharpen_policy; unsigned int scale_to_sharpness_policy; - bool skip_full_updated_if_possible; unsigned int enable_oled_edp_power_up_opt; bool enable_hblank_borrow; bool force_subvp_df_throttle; @@ -1164,6 +1187,7 @@ struct dc_debug_options { unsigned int auxless_alpm_lfps_t1t2_us; short auxless_alpm_lfps_t1t2_offset_us; bool disable_stutter_for_wm_program; + bool enable_block_sequence_programming; }; @@ -1702,6 +1726,7 @@ struct dc { struct dc_debug_options debug; struct dc_versions versions; struct dc_caps caps; + struct dc_check_config check_config; struct dc_cap_funcs cap_funcs; struct dc_config config; struct dc_bounding_box_overrides bb_overrides; @@ -1830,20 +1855,26 @@ struct dc_surface_update { }; struct dc_underflow_debug_data { - uint32_t otg_inst; - uint32_t otg_underflow; - uint32_t h_position; - uint32_t v_position; - uint32_t otg_frame_count; - struct dc_underflow_per_hubp_debug_data { - uint32_t hubp_underflow; - uint32_t hubp_in_blank; - uint32_t hubp_readline; - uint32_t det_config_error; - } hubps[MAX_PIPES]; - uint32_t curr_det_sizes[MAX_PIPES]; - uint32_t target_det_sizes[MAX_PIPES]; - uint32_t compbuf_config_error; + struct dcn_hubbub_reg_state *hubbub_reg_state; + struct dcn_hubp_reg_state *hubp_reg_state[MAX_PIPES]; + struct dcn_dpp_reg_state *dpp_reg_state[MAX_PIPES]; + struct dcn_mpc_reg_state *mpc_reg_state[MAX_PIPES]; + struct dcn_opp_reg_state *opp_reg_state[MAX_PIPES]; + struct dcn_dsc_reg_state *dsc_reg_state[MAX_PIPES]; + struct dcn_optc_reg_state *optc_reg_state[MAX_PIPES]; + struct dcn_dccg_reg_state *dccg_reg_state[MAX_PIPES]; +}; + +struct power_features { + bool ips; + bool rcg; + bool replay; + bool dds; + bool sprs; + bool psr; + bool fams; + bool mpo; + bool uclk_p_state; }; /* @@ -2688,6 +2719,13 @@ bool dc_process_dmub_aux_transfer_async(struct dc *dc, uint32_t link_index, struct aux_payload *payload); +/* + * smart power OLED Interfaces + */ +bool dc_smart_power_oled_enable(const struct dc_link *link, bool enable, uint16_t peak_nits, + uint8_t debug_control, uint16_t fixed_CLL, uint32_t triggerline); +bool dc_smart_power_oled_get_max_cll(const struct dc_link *link, unsigned int *pCurrent_MaxCLL); + /* Get dc link index from dpia port index */ uint8_t get_link_index_from_dpia_port_index(const struct dc *dc, uint8_t dpia_port_index); @@ -2721,6 +2759,8 @@ unsigned int dc_get_det_buffer_size_from_state(const struct dc_state *context); bool dc_get_host_router_index(const struct dc_link *link, unsigned int *host_router_index); +void dc_log_preos_dmcub_info(const struct dc *dc); + /* DSC Interfaces */ #include "dc_dsc.h" @@ -2736,7 +2776,7 @@ bool dc_is_timing_changed(struct dc_stream_state *cur_stream, struct dc_stream_state *new_stream); bool dc_is_cursor_limit_pending(struct dc *dc); -bool dc_can_clear_cursor_limit(struct dc *dc); +bool dc_can_clear_cursor_limit(const struct dc *dc); /** * dc_get_underflow_debug_data_for_otg() - Retrieve underflow debug data. @@ -2751,4 +2791,493 @@ bool dc_can_clear_cursor_limit(struct dc *dc); */ void dc_get_underflow_debug_data_for_otg(struct dc *dc, int primary_otg_inst, struct dc_underflow_debug_data *out_data); +void dc_get_power_feature_status(struct dc *dc, int primary_otg_inst, struct power_features *out_data); + +/** + * Software state variables used to program register fields across the display pipeline + */ +struct dc_register_software_state { + /* HUBP register programming variables for each pipe */ + struct { + bool valid_plane_state; + bool valid_stream; + bool min_dc_gfx_version9; + uint32_t vtg_sel; /* DCHUBP_CNTL->HUBP_VTG_SEL from pipe_ctx->stream_res.tg->inst */ + uint32_t hubp_clock_enable; /* HUBP_CLK_CNTL->HUBP_CLOCK_ENABLE from power management */ + uint32_t surface_pixel_format; /* DCSURF_SURFACE_CONFIG->SURFACE_PIXEL_FORMAT from plane_state->format */ + uint32_t rotation_angle; /* DCSURF_SURFACE_CONFIG->ROTATION_ANGLE from plane_state->rotation */ + uint32_t h_mirror_en; /* DCSURF_SURFACE_CONFIG->H_MIRROR_EN from plane_state->horizontal_mirror */ + uint32_t surface_dcc_en; /* DCSURF_SURFACE_CONTROL->PRIMARY_SURFACE_DCC_EN from dcc->enable */ + uint32_t surface_size_width; /* HUBP_SIZE->SURFACE_SIZE_WIDTH from plane_size.surface_size.width */ + uint32_t surface_size_height; /* HUBP_SIZE->SURFACE_SIZE_HEIGHT from plane_size.surface_size.height */ + uint32_t pri_viewport_width; /* DCSURF_PRI_VIEWPORT_DIMENSION->PRI_VIEWPORT_WIDTH from scaler_data.viewport.width */ + uint32_t pri_viewport_height; /* DCSURF_PRI_VIEWPORT_DIMENSION->PRI_VIEWPORT_HEIGHT from scaler_data.viewport.height */ + uint32_t pri_viewport_x_start; /* DCSURF_PRI_VIEWPORT_START->PRI_VIEWPORT_X_START from scaler_data.viewport.x */ + uint32_t pri_viewport_y_start; /* DCSURF_PRI_VIEWPORT_START->PRI_VIEWPORT_Y_START from scaler_data.viewport.y */ + uint32_t cursor_enable; /* CURSOR_CONTROL->CURSOR_ENABLE from cursor_attributes.enable */ + uint32_t cursor_width; /* CURSOR_SETTINGS->CURSOR_WIDTH from cursor_position.width */ + uint32_t cursor_height; /* CURSOR_SETTINGS->CURSOR_HEIGHT from cursor_position.height */ + + /* Additional DCC configuration */ + uint32_t surface_dcc_ind_64b_blk; /* DCSURF_SURFACE_CONTROL->PRIMARY_SURFACE_DCC_IND_64B_BLK from dcc.independent_64b_blks */ + uint32_t surface_dcc_ind_128b_blk; /* DCSURF_SURFACE_CONTROL->PRIMARY_SURFACE_DCC_IND_128B_BLK from dcc.independent_128b_blks */ + + /* Surface pitch configuration */ + uint32_t surface_pitch; /* DCSURF_SURFACE_PITCH->PITCH from plane_size.surface_pitch */ + uint32_t meta_pitch; /* DCSURF_SURFACE_PITCH->META_PITCH from dcc.meta_pitch */ + uint32_t chroma_pitch; /* DCSURF_SURFACE_PITCH_C->PITCH_C from plane_size.chroma_pitch */ + uint32_t meta_pitch_c; /* DCSURF_SURFACE_PITCH_C->META_PITCH_C from dcc.meta_pitch_c */ + + /* Surface addresses */ + uint32_t primary_surface_address_low; /* DCSURF_PRIMARY_SURFACE_ADDRESS->PRIMARY_SURFACE_ADDRESS from address.grph.addr.low_part */ + uint32_t primary_surface_address_high; /* DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH->PRIMARY_SURFACE_ADDRESS_HIGH from address.grph.addr.high_part */ + uint32_t primary_meta_surface_address_low; /* DCSURF_PRIMARY_META_SURFACE_ADDRESS->PRIMARY_META_SURFACE_ADDRESS from address.grph.meta_addr.low_part */ + uint32_t primary_meta_surface_address_high; /* DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH->PRIMARY_META_SURFACE_ADDRESS_HIGH from address.grph.meta_addr.high_part */ + + /* TMZ configuration */ + uint32_t primary_surface_tmz; /* DCSURF_SURFACE_CONTROL->PRIMARY_SURFACE_TMZ from address.tmz_surface */ + uint32_t primary_meta_surface_tmz; /* DCSURF_SURFACE_CONTROL->PRIMARY_META_SURFACE_TMZ from address.tmz_surface */ + + /* Tiling configuration */ + uint32_t sw_mode; /* DCSURF_TILING_CONFIG->SW_MODE from tiling_info.gfx9.swizzle */ + uint32_t num_pipes; /* DCSURF_ADDR_CONFIG->NUM_PIPES from tiling_info.gfx9.num_pipes */ + uint32_t num_banks; /* DCSURF_ADDR_CONFIG->NUM_BANKS from tiling_info.gfx9.num_banks */ + uint32_t pipe_interleave; /* DCSURF_ADDR_CONFIG->PIPE_INTERLEAVE from tiling_info.gfx9.pipe_interleave */ + uint32_t num_shader_engines; /* DCSURF_ADDR_CONFIG->NUM_SE from tiling_info.gfx9.num_shader_engines */ + uint32_t num_rb_per_se; /* DCSURF_ADDR_CONFIG->NUM_RB_PER_SE from tiling_info.gfx9.num_rb_per_se */ + uint32_t num_pkrs; /* DCSURF_ADDR_CONFIG->NUM_PKRS from tiling_info.gfx9.num_pkrs */ + + /* DML Request Size Configuration - Luma */ + uint32_t rq_chunk_size; /* DCHUBP_REQ_SIZE_CONFIG->CHUNK_SIZE from rq_regs.rq_regs_l.chunk_size */ + uint32_t rq_min_chunk_size; /* DCHUBP_REQ_SIZE_CONFIG->MIN_CHUNK_SIZE from rq_regs.rq_regs_l.min_chunk_size */ + uint32_t rq_meta_chunk_size; /* DCHUBP_REQ_SIZE_CONFIG->META_CHUNK_SIZE from rq_regs.rq_regs_l.meta_chunk_size */ + uint32_t rq_min_meta_chunk_size; /* DCHUBP_REQ_SIZE_CONFIG->MIN_META_CHUNK_SIZE from rq_regs.rq_regs_l.min_meta_chunk_size */ + uint32_t rq_dpte_group_size; /* DCHUBP_REQ_SIZE_CONFIG->DPTE_GROUP_SIZE from rq_regs.rq_regs_l.dpte_group_size */ + uint32_t rq_mpte_group_size; /* DCHUBP_REQ_SIZE_CONFIG->MPTE_GROUP_SIZE from rq_regs.rq_regs_l.mpte_group_size */ + uint32_t rq_swath_height_l; /* DCHUBP_REQ_SIZE_CONFIG->SWATH_HEIGHT_L from rq_regs.rq_regs_l.swath_height */ + uint32_t rq_pte_row_height_l; /* DCHUBP_REQ_SIZE_CONFIG->PTE_ROW_HEIGHT_L from rq_regs.rq_regs_l.pte_row_height */ + + /* DML Request Size Configuration - Chroma */ + uint32_t rq_chunk_size_c; /* DCHUBP_REQ_SIZE_CONFIG_C->CHUNK_SIZE_C from rq_regs.rq_regs_c.chunk_size */ + uint32_t rq_min_chunk_size_c; /* DCHUBP_REQ_SIZE_CONFIG_C->MIN_CHUNK_SIZE_C from rq_regs.rq_regs_c.min_chunk_size */ + uint32_t rq_meta_chunk_size_c; /* DCHUBP_REQ_SIZE_CONFIG_C->META_CHUNK_SIZE_C from rq_regs.rq_regs_c.meta_chunk_size */ + uint32_t rq_min_meta_chunk_size_c; /* DCHUBP_REQ_SIZE_CONFIG_C->MIN_META_CHUNK_SIZE_C from rq_regs.rq_regs_c.min_meta_chunk_size */ + uint32_t rq_dpte_group_size_c; /* DCHUBP_REQ_SIZE_CONFIG_C->DPTE_GROUP_SIZE_C from rq_regs.rq_regs_c.dpte_group_size */ + uint32_t rq_mpte_group_size_c; /* DCHUBP_REQ_SIZE_CONFIG_C->MPTE_GROUP_SIZE_C from rq_regs.rq_regs_c.mpte_group_size */ + uint32_t rq_swath_height_c; /* DCHUBP_REQ_SIZE_CONFIG_C->SWATH_HEIGHT_C from rq_regs.rq_regs_c.swath_height */ + uint32_t rq_pte_row_height_c; /* DCHUBP_REQ_SIZE_CONFIG_C->PTE_ROW_HEIGHT_C from rq_regs.rq_regs_c.pte_row_height */ + + /* DML Expansion Modes */ + uint32_t drq_expansion_mode; /* DCN_EXPANSION_MODE->DRQ_EXPANSION_MODE from rq_regs.drq_expansion_mode */ + uint32_t prq_expansion_mode; /* DCN_EXPANSION_MODE->PRQ_EXPANSION_MODE from rq_regs.prq_expansion_mode */ + uint32_t mrq_expansion_mode; /* DCN_EXPANSION_MODE->MRQ_EXPANSION_MODE from rq_regs.mrq_expansion_mode */ + uint32_t crq_expansion_mode; /* DCN_EXPANSION_MODE->CRQ_EXPANSION_MODE from rq_regs.crq_expansion_mode */ + + /* DML DLG parameters - nominal */ + uint32_t dst_y_per_vm_vblank; /* NOM_PARAMETERS_0->DST_Y_PER_VM_VBLANK from dlg_regs.dst_y_per_vm_vblank */ + uint32_t dst_y_per_row_vblank; /* NOM_PARAMETERS_0->DST_Y_PER_ROW_VBLANK from dlg_regs.dst_y_per_row_vblank */ + uint32_t dst_y_per_vm_flip; /* NOM_PARAMETERS_1->DST_Y_PER_VM_FLIP from dlg_regs.dst_y_per_vm_flip */ + uint32_t dst_y_per_row_flip; /* NOM_PARAMETERS_1->DST_Y_PER_ROW_FLIP from dlg_regs.dst_y_per_row_flip */ + + /* DML prefetch settings */ + uint32_t dst_y_prefetch; /* PREFETCH_SETTINS->DST_Y_PREFETCH from dlg_regs.dst_y_prefetch */ + uint32_t vratio_prefetch; /* PREFETCH_SETTINS->VRATIO_PREFETCH from dlg_regs.vratio_prefetch */ + uint32_t vratio_prefetch_c; /* PREFETCH_SETTINS_C->VRATIO_PREFETCH_C from dlg_regs.vratio_prefetch_c */ + + /* TTU parameters */ + uint32_t qos_level_low_wm; /* TTU_CNTL1->QoSLevelLowWaterMark from ttu_regs.qos_level_low_wm */ + uint32_t qos_level_high_wm; /* TTU_CNTL1->QoSLevelHighWaterMark from ttu_regs.qos_level_high_wm */ + uint32_t qos_level_flip; /* TTU_CNTL2->QoS_LEVEL_FLIP_L from ttu_regs.qos_level_flip */ + uint32_t min_ttu_vblank; /* DCN_GLOBAL_TTU_CNTL->MIN_TTU_VBLANK from ttu_regs.min_ttu_vblank */ + } hubp[MAX_PIPES]; + + /* HUBBUB register programming variables */ + struct { + /* Individual DET buffer control per pipe - software state that programs DET registers */ + uint32_t det0_size; /* DCHUBBUB_DET0_CTRL->DET0_SIZE from hubbub->funcs->program_det_size(hubbub, 0, det_buffer_size_kb) */ + uint32_t det1_size; /* DCHUBBUB_DET1_CTRL->DET1_SIZE from hubbub->funcs->program_det_size(hubbub, 1, det_buffer_size_kb) */ + uint32_t det2_size; /* DCHUBBUB_DET2_CTRL->DET2_SIZE from hubbub->funcs->program_det_size(hubbub, 2, det_buffer_size_kb) */ + uint32_t det3_size; /* DCHUBBUB_DET3_CTRL->DET3_SIZE from hubbub->funcs->program_det_size(hubbub, 3, det_buffer_size_kb) */ + + /* Compression buffer control - software state that programs COMPBUF registers */ + uint32_t compbuf_size; /* DCHUBBUB_COMPBUF_CTRL->COMPBUF_SIZE from hubbub->funcs->program_compbuf_size(hubbub, compbuf_size_kb, safe_to_increase) */ + uint32_t compbuf_reserved_space_64b; /* COMPBUF_RESERVED_SPACE->COMPBUF_RESERVED_SPACE_64B from hubbub2->pixel_chunk_size / 32 */ + uint32_t compbuf_reserved_space_zs; /* COMPBUF_RESERVED_SPACE->COMPBUF_RESERVED_SPACE_ZS from hubbub2->pixel_chunk_size / 128 */ + } hubbub; + + /* DPP register programming variables for each pipe (simplified for available fields) */ + struct { + uint32_t dpp_clock_enable; /* DPP_CONTROL->DPP_CLOCK_ENABLE from dppclk_enable */ + + /* Recout (Rectangle of Interest) configuration */ + uint32_t recout_start_x; /* RECOUT_START->RECOUT_START_X from pipe_ctx->plane_res.scl_data.recout.x */ + uint32_t recout_start_y; /* RECOUT_START->RECOUT_START_Y from pipe_ctx->plane_res.scl_data.recout.y */ + uint32_t recout_width; /* RECOUT_SIZE->RECOUT_WIDTH from pipe_ctx->plane_res.scl_data.recout.width */ + uint32_t recout_height; /* RECOUT_SIZE->RECOUT_HEIGHT from pipe_ctx->plane_res.scl_data.recout.height */ + + /* MPC (Multiple Pipe/Plane Combiner) size configuration */ + uint32_t mpc_width; /* MPC_SIZE->MPC_WIDTH from pipe_ctx->plane_res.scl_data.h_active */ + uint32_t mpc_height; /* MPC_SIZE->MPC_HEIGHT from pipe_ctx->plane_res.scl_data.v_active */ + + /* DSCL mode configuration */ + uint32_t dscl_mode; /* SCL_MODE->DSCL_MODE from pipe_ctx->plane_res.scl_data.dscl_prog_data.dscl_mode */ + + /* Scaler ratios (simplified to integer parts) */ + uint32_t horz_ratio_int; /* SCL_HORZ_FILTER_SCALE_RATIO->SCL_H_SCALE_RATIO integer part from ratios.horz */ + uint32_t vert_ratio_int; /* SCL_VERT_FILTER_SCALE_RATIO->SCL_V_SCALE_RATIO integer part from ratios.vert */ + + /* Basic scaler taps */ + uint32_t h_taps; /* SCL_TAP_CONTROL->SCL_H_NUM_TAPS from taps.h_taps */ + uint32_t v_taps; /* SCL_TAP_CONTROL->SCL_V_NUM_TAPS from taps.v_taps */ + } dpp[MAX_PIPES]; + + /* DCCG register programming variables */ + struct { + /* Core Display Clock Control */ + uint32_t dispclk_khz; /* DENTIST_DISPCLK_CNTL->DENTIST_DISPCLK_WDIVIDER from clk_mgr.dispclk_khz */ + uint32_t dc_mem_global_pwr_req_dis; /* DC_MEM_GLOBAL_PWR_REQ_CNTL->DC_MEM_GLOBAL_PWR_REQ_DIS from memory power management settings */ + + /* DPP Clock Control - 4 fields per pipe */ + uint32_t dppclk_khz[MAX_PIPES]; /* DPPCLK_CTRL->DPPCLK_R_GATE_DISABLE from dpp_clocks[pipe] */ + uint32_t dppclk_enable[MAX_PIPES]; /* DPPCLK_CTRL->DPPCLK0_EN,DPPCLK1_EN,DPPCLK2_EN,DPPCLK3_EN from dccg31_update_dpp_dto() */ + uint32_t dppclk_dto_enable[MAX_PIPES]; /* DPPCLK_DTO_CTRL->DPPCLK_DTO_ENABLE from dccg->dpp_clock_gated[dpp_inst] state */ + uint32_t dppclk_dto_phase[MAX_PIPES]; /* DPPCLK0_DTO_PARAM->DPPCLK0_DTO_PHASE from phase calculation req_dppclk/ref_dppclk */ + uint32_t dppclk_dto_modulo[MAX_PIPES]; /* DPPCLK0_DTO_PARAM->DPPCLK0_DTO_MODULO from modulo = 0xff */ + + /* DSC Clock Control - 4 fields per DSC resource */ + uint32_t dscclk_khz[MAX_PIPES]; /* DSCCLK_DTO_CTRL->DSCCLK_DTO_ENABLE from dsc_clocks */ + uint32_t dscclk_dto_enable[MAX_PIPES]; /* DSCCLK_DTO_CTRL->DSCCLK0_DTO_ENABLE,DSCCLK1_DTO_ENABLE,DSCCLK2_DTO_ENABLE,DSCCLK3_DTO_ENABLE */ + uint32_t dscclk_dto_phase[MAX_PIPES]; /* DSCCLK0_DTO_PARAM->DSCCLK0_DTO_PHASE from dccg31_enable_dscclk() */ + uint32_t dscclk_dto_modulo[MAX_PIPES]; /* DSCCLK0_DTO_PARAM->DSCCLK0_DTO_MODULO from dccg31_enable_dscclk() */ + + /* Pixel Clock Control - per pipe */ + uint32_t pixclk_khz[MAX_PIPES]; /* PIXCLK_RESYNC_CNTL->PIXCLK_RESYNC_ENABLE from stream.timing.pix_clk_100hz */ + uint32_t otg_pixel_rate_div[MAX_PIPES]; /* OTG_PIXEL_RATE_DIV->OTG_PIXEL_RATE_DIV from OTG pixel rate divider control */ + uint32_t dtbclk_dto_enable[MAX_PIPES]; /* OTG0_PIXEL_RATE_CNTL->DTBCLK_DTO_ENABLE from dccg31_set_dtbclk_dto() */ + uint32_t pipe_dto_src_sel[MAX_PIPES]; /* OTG0_PIXEL_RATE_CNTL->PIPE_DTO_SRC_SEL from dccg31_set_dtbclk_dto() source selection */ + uint32_t dtbclk_dto_div[MAX_PIPES]; /* OTG0_PIXEL_RATE_CNTL->DTBCLK_DTO_DIV from dtbdto_div calculation */ + uint32_t otg_add_pixel[MAX_PIPES]; /* OTG0_PIXEL_RATE_CNTL->OTG_ADD_PIXEL from dccg31_otg_add_pixel() */ + uint32_t otg_drop_pixel[MAX_PIPES]; /* OTG0_PIXEL_RATE_CNTL->OTG_DROP_PIXEL from dccg31_otg_drop_pixel() */ + + /* DTBCLK DTO Control - 4 DTOs */ + uint32_t dtbclk_dto_modulo[4]; /* DTBCLK_DTO0_MODULO->DTBCLK_DTO0_MODULO from dccg31_set_dtbclk_dto() modulo calculation */ + uint32_t dtbclk_dto_phase[4]; /* DTBCLK_DTO0_PHASE->DTBCLK_DTO0_PHASE from phase calculation pixclk_khz/ref_dtbclk_khz */ + uint32_t dtbclk_dto_dbuf_en; /* DTBCLK_DTO_DBUF_EN->DTBCLK DTO data buffer enable */ + + /* DP Stream Clock Control - 4 pipes */ + uint32_t dpstreamclk_enable[MAX_PIPES]; /* DPSTREAMCLK_CNTL->DPSTREAMCLK_PIPE0_EN,DPSTREAMCLK_PIPE1_EN,DPSTREAMCLK_PIPE2_EN,DPSTREAMCLK_PIPE3_EN */ + uint32_t dp_dto_modulo[4]; /* DP_DTO0_MODULO->DP_DTO0_MODULO from DP stream DTO programming */ + uint32_t dp_dto_phase[4]; /* DP_DTO0_PHASE->DP_DTO0_PHASE from DP stream DTO programming */ + uint32_t dp_dto_dbuf_en; /* DP_DTO_DBUF_EN->DP DTO data buffer enable */ + + /* PHY Symbol Clock Control - 5 PHYs (A,B,C,D,E) */ + uint32_t phy_symclk_force_en[5]; /* PHYASYMCLK_CLOCK_CNTL->PHYASYMCLK_FORCE_EN from dccg31_set_physymclk() force_enable */ + uint32_t phy_symclk_force_src_sel[5]; /* PHYASYMCLK_CLOCK_CNTL->PHYASYMCLK_FORCE_SRC_SEL from dccg31_set_physymclk() clk_src */ + uint32_t phy_symclk_gate_disable[5]; /* DCCG_GATE_DISABLE_CNTL2->PHYASYMCLK_GATE_DISABLE from debug.root_clock_optimization.bits.physymclk */ + + /* SYMCLK32 SE Control - 4 instances */ + uint32_t symclk32_se_src_sel[4]; /* SYMCLK32_SE_CNTL->SYMCLK32_SE0_SRC_SEL from dccg31_enable_symclk32_se() with get_phy_mux_symclk() mapping */ + uint32_t symclk32_se_enable[4]; /* SYMCLK32_SE_CNTL->SYMCLK32_SE0_EN from dccg31_enable_symclk32_se() enable */ + uint32_t symclk32_se_gate_disable[4]; /* DCCG_GATE_DISABLE_CNTL3->SYMCLK32_SE0_GATE_DISABLE from debug.root_clock_optimization.bits.symclk32_se */ + + /* SYMCLK32 LE Control - 2 instances */ + uint32_t symclk32_le_src_sel[2]; /* SYMCLK32_LE_CNTL->SYMCLK32_LE0_SRC_SEL from dccg31_enable_symclk32_le() phyd32clk source */ + uint32_t symclk32_le_enable[2]; /* SYMCLK32_LE_CNTL->SYMCLK32_LE0_EN from dccg31_enable_symclk32_le() enable */ + uint32_t symclk32_le_gate_disable[2]; /* DCCG_GATE_DISABLE_CNTL3->SYMCLK32_LE0_GATE_DISABLE from debug.root_clock_optimization.bits.symclk32_le */ + + /* DPIA Clock Control */ + uint32_t dpiaclk_540m_dto_modulo; /* DPIACLK_540M_DTO_MODULO->DPIA 540MHz DTO modulo */ + uint32_t dpiaclk_540m_dto_phase; /* DPIACLK_540M_DTO_PHASE->DPIA 540MHz DTO phase */ + uint32_t dpiaclk_810m_dto_modulo; /* DPIACLK_810M_DTO_MODULO->DPIA 810MHz DTO modulo */ + uint32_t dpiaclk_810m_dto_phase; /* DPIACLK_810M_DTO_PHASE->DPIA 810MHz DTO phase */ + uint32_t dpiaclk_dto_cntl; /* DPIACLK_DTO_CNTL->DPIA clock DTO control */ + uint32_t dpiasymclk_cntl; /* DPIASYMCLK_CNTL->DPIA symbol clock control */ + + /* Clock Gating Control */ + uint32_t dccg_gate_disable_cntl; /* DCCG_GATE_DISABLE_CNTL->Clock gate disable control from dccg31_init() */ + uint32_t dpstreamclk_gate_disable; /* DCCG_GATE_DISABLE_CNTL3->DPSTREAMCLK_GATE_DISABLE from debug.root_clock_optimization.bits.dpstream */ + uint32_t dpstreamclk_root_gate_disable; /* DCCG_GATE_DISABLE_CNTL3->DPSTREAMCLK_ROOT_GATE_DISABLE from debug.root_clock_optimization.bits.dpstream */ + + /* VSync Control */ + uint32_t vsync_cnt_ctrl; /* DCCG_VSYNC_CNT_CTRL->VSync counter control */ + uint32_t vsync_cnt_int_ctrl; /* DCCG_VSYNC_CNT_INT_CTRL->VSync counter interrupt control */ + uint32_t vsync_otg_latch_value[6]; /* DCCG_VSYNC_OTG0_LATCH_VALUE->OTG0 VSync latch value (for OTG0-5) */ + + /* Time Base Control */ + uint32_t microsecond_time_base_div; /* MICROSECOND_TIME_BASE_DIV->Microsecond time base divider */ + uint32_t millisecond_time_base_div; /* MILLISECOND_TIME_BASE_DIV->Millisecond time base divider */ + } dccg; + + /* DSC essential configuration for underflow analysis */ + struct { + /* DSC active state - critical for bandwidth analysis */ + uint32_t dsc_clock_enable; /* DSC enabled - affects bandwidth requirements */ + + /* DSC configuration affecting bandwidth and timing */ + uint32_t dsc_num_slices_h; /* Horizontal slice count - affects throughput */ + uint32_t dsc_num_slices_v; /* Vertical slice count - affects throughput */ + uint32_t dsc_bits_per_pixel; /* Compression ratio - affects bandwidth */ + + /* OPP integration - affects pipeline flow */ + uint32_t dscrm_dsc_forward_enable; /* DSC forwarding to OPP enabled */ + uint32_t dscrm_dsc_opp_pipe_source; /* Which OPP receives DSC output */ + } dsc[MAX_PIPES]; + + /* MPC register programming variables */ + struct { + /* MPCC blending tree and mode control */ + uint32_t mpcc_mode[MAX_PIPES]; /* MPCC_CONTROL->MPCC_MODE from blend_cfg.blend_mode */ + uint32_t mpcc_alpha_blend_mode[MAX_PIPES]; /* MPCC_CONTROL->MPCC_ALPHA_BLND_MODE from blend_cfg.alpha_mode */ + uint32_t mpcc_alpha_multiplied_mode[MAX_PIPES]; /* MPCC_CONTROL->MPCC_ALPHA_MULTIPLIED_MODE from blend_cfg.pre_multiplied_alpha */ + uint32_t mpcc_blnd_active_overlap_only[MAX_PIPES]; /* MPCC_CONTROL->MPCC_BLND_ACTIVE_OVERLAP_ONLY from blend_cfg.overlap_only */ + uint32_t mpcc_global_alpha[MAX_PIPES]; /* MPCC_CONTROL->MPCC_GLOBAL_ALPHA from blend_cfg.global_alpha */ + uint32_t mpcc_global_gain[MAX_PIPES]; /* MPCC_CONTROL->MPCC_GLOBAL_GAIN from blend_cfg.global_gain */ + uint32_t mpcc_bg_bpc[MAX_PIPES]; /* MPCC_CONTROL->MPCC_BG_BPC from background color depth */ + uint32_t mpcc_bot_gain_mode[MAX_PIPES]; /* MPCC_CONTROL->MPCC_BOT_GAIN_MODE from bottom layer gain control */ + + /* MPCC blending tree connections */ + uint32_t mpcc_bot_sel[MAX_PIPES]; /* MPCC_BOT_SEL->MPCC_BOT_SEL from mpcc_state->bot_sel */ + uint32_t mpcc_top_sel[MAX_PIPES]; /* MPCC_TOP_SEL->MPCC_TOP_SEL from mpcc_state->dpp_id */ + + /* MPCC output gamma control */ + uint32_t mpcc_ogam_mode[MAX_PIPES]; /* MPCC_OGAM_CONTROL->MPCC_OGAM_MODE from output gamma mode */ + uint32_t mpcc_ogam_select[MAX_PIPES]; /* MPCC_OGAM_CONTROL->MPCC_OGAM_SELECT from gamma LUT bank selection */ + uint32_t mpcc_ogam_pwl_disable[MAX_PIPES]; /* MPCC_OGAM_CONTROL->MPCC_OGAM_PWL_DISABLE from PWL control */ + + /* MPCC pipe assignment and status */ + uint32_t mpcc_opp_id[MAX_PIPES]; /* MPCC_OPP_ID->MPCC_OPP_ID from mpcc_state->opp_id */ + uint32_t mpcc_idle[MAX_PIPES]; /* MPCC_STATUS->MPCC_IDLE from mpcc idle status */ + uint32_t mpcc_busy[MAX_PIPES]; /* MPCC_STATUS->MPCC_BUSY from mpcc busy status */ + + /* MPC output processing */ + uint32_t mpc_out_csc_mode; /* MPC_OUT_CSC_COEF->MPC_OUT_CSC_MODE from output_csc */ + uint32_t mpc_out_gamma_mode; /* MPC_OUT_GAMMA_LUT->MPC_OUT_GAMMA_MODE from output_gamma */ + } mpc; + + /* OPP register programming variables for each pipe */ + struct { + /* Display Pattern Generator (DPG) Control - 19 fields from DPG_CONTROL register */ + uint32_t dpg_enable; /* DPG_CONTROL->DPG_EN from test_pattern parameter (enable/disable) */ + + /* Format Control (FMT) - 18 fields from FMT_CONTROL register */ + uint32_t fmt_pixel_encoding; /* FMT_CONTROL->FMT_PIXEL_ENCODING from clamping->pixel_encoding */ + uint32_t fmt_subsampling_mode; /* FMT_CONTROL->FMT_SUBSAMPLING_MODE from force_chroma_subsampling_1tap */ + uint32_t fmt_cbcr_bit_reduction_bypass; /* FMT_CONTROL->FMT_CBCR_BIT_REDUCTION_BYPASS from pixel_encoding bypass control */ + uint32_t fmt_stereosync_override; /* FMT_CONTROL->FMT_STEREOSYNC_OVERRIDE from stereo timing override */ + uint32_t fmt_spatial_dither_frame_counter_max; /* FMT_CONTROL->FMT_SPATIAL_DITHER_FRAME_COUNTER_MAX from fmt_bit_depth->flags */ + uint32_t fmt_spatial_dither_frame_counter_bit_swap; /* FMT_CONTROL->FMT_SPATIAL_DITHER_FRAME_COUNTER_BIT_SWAP from dither control */ + uint32_t fmt_truncate_enable; /* FMT_CONTROL->FMT_TRUNCATE_EN from fmt_bit_depth->flags.TRUNCATE_ENABLED */ + uint32_t fmt_truncate_depth; /* FMT_CONTROL->FMT_TRUNCATE_DEPTH from fmt_bit_depth->flags.TRUNCATE_DEPTH */ + uint32_t fmt_truncate_mode; /* FMT_CONTROL->FMT_TRUNCATE_MODE from fmt_bit_depth->flags.TRUNCATE_MODE */ + uint32_t fmt_spatial_dither_enable; /* FMT_CONTROL->FMT_SPATIAL_DITHER_EN from fmt_bit_depth->flags.SPATIAL_DITHER_ENABLED */ + uint32_t fmt_spatial_dither_mode; /* FMT_CONTROL->FMT_SPATIAL_DITHER_MODE from fmt_bit_depth->flags.SPATIAL_DITHER_MODE */ + uint32_t fmt_spatial_dither_depth; /* FMT_CONTROL->FMT_SPATIAL_DITHER_DEPTH from fmt_bit_depth->flags.SPATIAL_DITHER_DEPTH */ + uint32_t fmt_temporal_dither_enable; /* FMT_CONTROL->FMT_TEMPORAL_DITHER_EN from fmt_bit_depth->flags.TEMPORAL_DITHER_ENABLED */ + uint32_t fmt_clamp_data_enable; /* FMT_CONTROL->FMT_CLAMP_DATA_EN from clamping->clamping_range enable */ + uint32_t fmt_clamp_color_format; /* FMT_CONTROL->FMT_CLAMP_COLOR_FORMAT from clamping->color_format */ + uint32_t fmt_dynamic_exp_enable; /* FMT_CONTROL->FMT_DYNAMIC_EXP_EN from color_sp/color_dpth/signal */ + uint32_t fmt_dynamic_exp_mode; /* FMT_CONTROL->FMT_DYNAMIC_EXP_MODE from color space mode mapping */ + uint32_t fmt_bit_depth_control; /* Legacy field - kept for compatibility */ + + /* OPP Pipe Control - 1 field from OPP_PIPE_CONTROL register */ + uint32_t opp_pipe_clock_enable; /* OPP_PIPE_CONTROL->OPP_PIPE_CLOCK_EN from enable parameter (bool) */ + + /* OPP CRC Control - 3 fields from OPP_PIPE_CRC_CONTROL register */ + uint32_t opp_crc_enable; /* OPP_PIPE_CRC_CONTROL->CRC_EN from CRC enable control */ + uint32_t opp_crc_select_source; /* OPP_PIPE_CRC_CONTROL->CRC_SELECT_SOURCE from CRC source selection */ + uint32_t opp_crc_stereo_cont; /* OPP_PIPE_CRC_CONTROL->CRC_STEREO_CONT from stereo continuous CRC */ + + /* Output Buffer (OPPBUF) Control - 6 fields from OPPBUF_CONTROL register */ + uint32_t oppbuf_active_width; /* OPPBUF_CONTROL->OPPBUF_ACTIVE_WIDTH from oppbuf_params->active_width */ + uint32_t oppbuf_pixel_repetition; /* OPPBUF_CONTROL->OPPBUF_PIXEL_REPETITION from oppbuf_params->pixel_repetition */ + uint32_t oppbuf_display_segmentation; /* OPPBUF_CONTROL->OPPBUF_DISPLAY_SEGMENTATION from oppbuf_params->mso_segmentation */ + uint32_t oppbuf_overlap_pixel_num; /* OPPBUF_CONTROL->OPPBUF_OVERLAP_PIXEL_NUM from oppbuf_params->mso_overlap_pixel_num */ + uint32_t oppbuf_3d_vact_space1_size; /* OPPBUF_CONTROL->OPPBUF_3D_VACT_SPACE1_SIZE from 3D timing space1_size */ + uint32_t oppbuf_3d_vact_space2_size; /* OPPBUF_CONTROL->OPPBUF_3D_VACT_SPACE2_SIZE from 3D timing space2_size */ + + /* DSC Forward Config - 3 fields from DSCRM_DSC_FORWARD_CONFIG register */ + uint32_t dscrm_dsc_forward_enable; /* DSCRM_DSC_FORWARD_CONFIG->DSCRM_DSC_FORWARD_EN from DSC forward enable control */ + uint32_t dscrm_dsc_opp_pipe_source; /* DSCRM_DSC_FORWARD_CONFIG->DSCRM_DSC_OPP_PIPE_SOURCE from opp_pipe parameter */ + uint32_t dscrm_dsc_forward_enable_status; /* DSCRM_DSC_FORWARD_CONFIG->DSCRM_DSC_FORWARD_EN_STATUS from DSC forward status (read-only) */ + } opp[MAX_PIPES]; + + /* OPTC register programming variables for each pipe */ + struct { + uint32_t otg_master_inst; + + /* OTG_CONTROL register - 5 fields for OTG control */ + uint32_t otg_master_enable; /* OTG_CONTROL->OTG_MASTER_EN from timing enable/disable control */ + uint32_t otg_disable_point_cntl; /* OTG_CONTROL->OTG_DISABLE_POINT_CNTL from disable timing control */ + uint32_t otg_start_point_cntl; /* OTG_CONTROL->OTG_START_POINT_CNTL from start timing control */ + uint32_t otg_field_number_cntl; /* OTG_CONTROL->OTG_FIELD_NUMBER_CNTL from interlace field control */ + uint32_t otg_out_mux; /* OTG_CONTROL->OTG_OUT_MUX from output mux selection */ + + /* OTG Horizontal Timing - 7 fields */ + uint32_t otg_h_total; /* OTG_H_TOTAL->OTG_H_TOTAL from dc_crtc_timing->h_total */ + uint32_t otg_h_blank_start; /* OTG_H_BLANK_START_END->OTG_H_BLANK_START from dc_crtc_timing->h_front_porch */ + uint32_t otg_h_blank_end; /* OTG_H_BLANK_START_END->OTG_H_BLANK_END from dc_crtc_timing->h_addressable_video_pixel_width */ + uint32_t otg_h_sync_start; /* OTG_H_SYNC_A->OTG_H_SYNC_A_START from dc_crtc_timing->h_sync_width */ + uint32_t otg_h_sync_end; /* OTG_H_SYNC_A->OTG_H_SYNC_A_END from calculated sync end position */ + uint32_t otg_h_sync_polarity; /* OTG_H_SYNC_A_CNTL->OTG_H_SYNC_A_POL from dc_crtc_timing->flags.HSYNC_POSITIVE_POLARITY */ + uint32_t otg_h_timing_div_mode; /* OTG_H_TIMING_CNTL->OTG_H_TIMING_DIV_MODE from horizontal timing division mode */ + + /* OTG Vertical Timing - 7 fields */ + uint32_t otg_v_total; /* OTG_V_TOTAL->OTG_V_TOTAL from dc_crtc_timing->v_total */ + uint32_t otg_v_blank_start; /* OTG_V_BLANK_START_END->OTG_V_BLANK_START from dc_crtc_timing->v_front_porch */ + uint32_t otg_v_blank_end; /* OTG_V_BLANK_START_END->OTG_V_BLANK_END from dc_crtc_timing->v_addressable_video_line_width */ + uint32_t otg_v_sync_start; /* OTG_V_SYNC_A->OTG_V_SYNC_A_START from dc_crtc_timing->v_sync_width */ + uint32_t otg_v_sync_end; /* OTG_V_SYNC_A->OTG_V_SYNC_A_END from calculated sync end position */ + uint32_t otg_v_sync_polarity; /* OTG_V_SYNC_A_CNTL->OTG_V_SYNC_A_POL from dc_crtc_timing->flags.VSYNC_POSITIVE_POLARITY */ + uint32_t otg_v_sync_mode; /* OTG_V_SYNC_A_CNTL->OTG_V_SYNC_MODE from sync mode selection */ + + /* OTG DRR (Dynamic Refresh Rate) Control - 8 fields */ + uint32_t otg_v_total_max; /* OTG_V_TOTAL_MAX->OTG_V_TOTAL_MAX from drr_params->vertical_total_max */ + uint32_t otg_v_total_min; /* OTG_V_TOTAL_MIN->OTG_V_TOTAL_MIN from drr_params->vertical_total_min */ + uint32_t otg_v_total_mid; /* OTG_V_TOTAL_MID->OTG_V_TOTAL_MID from drr_params->vertical_total_mid */ + uint32_t otg_v_total_max_sel; /* OTG_V_TOTAL_CONTROL->OTG_V_TOTAL_MAX_SEL from DRR max selection enable */ + uint32_t otg_v_total_min_sel; /* OTG_V_TOTAL_CONTROL->OTG_V_TOTAL_MIN_SEL from DRR min selection enable */ + uint32_t otg_vtotal_mid_replacing_max_en; /* OTG_V_TOTAL_CONTROL->OTG_VTOTAL_MID_REPLACING_MAX_EN from DRR mid-frame enable */ + uint32_t otg_vtotal_mid_frame_num; /* OTG_V_TOTAL_CONTROL->OTG_VTOTAL_MID_FRAME_NUM from drr_params->vertical_total_mid_frame_num */ + uint32_t otg_set_v_total_min_mask; /* OTG_V_TOTAL_CONTROL->OTG_SET_V_TOTAL_MIN_MASK from DRR trigger mask */ + uint32_t otg_force_lock_on_event; /* OTG_V_TOTAL_CONTROL->OTG_FORCE_LOCK_ON_EVENT from DRR force lock control */ + + /* OPTC Data Source and ODM - 6 fields */ + uint32_t optc_seg0_src_sel; /* OPTC_DATA_SOURCE_SELECT->OPTC_SEG0_SRC_SEL from opp_id[0] ODM segment 0 source */ + uint32_t optc_seg1_src_sel; /* OPTC_DATA_SOURCE_SELECT->OPTC_SEG1_SRC_SEL from opp_id[1] ODM segment 1 source */ + uint32_t optc_seg2_src_sel; /* OPTC_DATA_SOURCE_SELECT->OPTC_SEG2_SRC_SEL from opp_id[2] ODM segment 2 source */ + uint32_t optc_seg3_src_sel; /* OPTC_DATA_SOURCE_SELECT->OPTC_SEG3_SRC_SEL from opp_id[3] ODM segment 3 source */ + uint32_t optc_num_of_input_segment; /* OPTC_DATA_SOURCE_SELECT->OPTC_NUM_OF_INPUT_SEGMENT from opp_cnt-1 number of input segments */ + uint32_t optc_mem_sel; /* OPTC_MEMORY_CONFIG->OPTC_MEM_SEL from memory_mask ODM memory selection */ + + /* OPTC Data Format and DSC - 4 fields */ + uint32_t optc_data_format; /* OPTC_DATA_FORMAT_CONTROL->OPTC_DATA_FORMAT from data format selection */ + uint32_t optc_dsc_mode; /* OPTC_DATA_FORMAT_CONTROL->OPTC_DSC_MODE from dsc_mode parameter */ + uint32_t optc_dsc_bytes_per_pixel; /* OPTC_BYTES_PER_PIXEL->OPTC_DSC_BYTES_PER_PIXEL from dsc_bytes_per_pixel parameter */ + uint32_t optc_segment_width; /* OPTC_WIDTH_CONTROL->OPTC_SEGMENT_WIDTH from segment_width parameter */ + uint32_t optc_dsc_slice_width; /* OPTC_WIDTH_CONTROL->OPTC_DSC_SLICE_WIDTH from dsc_slice_width parameter */ + + /* OPTC Clock and Underflow Control - 4 fields */ + uint32_t optc_input_pix_clk_en; /* OPTC_INPUT_CLOCK_CONTROL->OPTC_INPUT_PIX_CLK_EN from pixel clock enable */ + uint32_t optc_underflow_occurred_status; /* OPTC_INPUT_GLOBAL_CONTROL->OPTC_UNDERFLOW_OCCURRED_STATUS from underflow status (read-only) */ + uint32_t optc_underflow_clear; /* OPTC_INPUT_GLOBAL_CONTROL->OPTC_UNDERFLOW_CLEAR from underflow clear control */ + uint32_t otg_clock_enable; /* OTG_CLOCK_CONTROL->OTG_CLOCK_EN from OTG clock enable */ + uint32_t otg_clock_gate_dis; /* OTG_CLOCK_CONTROL->OTG_CLOCK_GATE_DIS from clock gate disable */ + + /* OTG Stereo and 3D Control - 6 fields */ + uint32_t otg_stereo_enable; /* OTG_STEREO_CONTROL->OTG_STEREO_EN from stereo enable control */ + uint32_t otg_stereo_sync_output_line_num; /* OTG_STEREO_CONTROL->OTG_STEREO_SYNC_OUTPUT_LINE_NUM from timing->stereo_3d_format line num */ + uint32_t otg_stereo_sync_output_polarity; /* OTG_STEREO_CONTROL->OTG_STEREO_SYNC_OUTPUT_POLARITY from stereo polarity control */ + uint32_t otg_3d_structure_en; /* OTG_3D_STRUCTURE_CONTROL->OTG_3D_STRUCTURE_EN from 3D structure enable */ + uint32_t otg_3d_structure_v_update_mode; /* OTG_3D_STRUCTURE_CONTROL->OTG_3D_STRUCTURE_V_UPDATE_MODE from 3D vertical update mode */ + uint32_t otg_3d_structure_stereo_sel_ovr; /* OTG_3D_STRUCTURE_CONTROL->OTG_3D_STRUCTURE_STEREO_SEL_OVR from 3D stereo selection override */ + uint32_t otg_interlace_enable; /* OTG_INTERLACE_CONTROL->OTG_INTERLACE_ENABLE from dc_crtc_timing->flags.INTERLACE */ + + /* OTG GSL (Global Sync Lock) Control - 5 fields */ + uint32_t otg_gsl0_en; /* OTG_GSL_CONTROL->OTG_GSL0_EN from GSL group 0 enable */ + uint32_t otg_gsl1_en; /* OTG_GSL_CONTROL->OTG_GSL1_EN from GSL group 1 enable */ + uint32_t otg_gsl2_en; /* OTG_GSL_CONTROL->OTG_GSL2_EN from GSL group 2 enable */ + uint32_t otg_gsl_master_en; /* OTG_GSL_CONTROL->OTG_GSL_MASTER_EN from GSL master enable */ + uint32_t otg_gsl_master_mode; /* OTG_GSL_CONTROL->OTG_GSL_MASTER_MODE from gsl_params->gsl_master mode */ + + /* OTG DRR Advanced Control - 4 fields */ + uint32_t otg_v_total_last_used_by_drr; /* OTG_DRR_CONTROL->OTG_V_TOTAL_LAST_USED_BY_DRR from last used DRR V_TOTAL (read-only) */ + uint32_t otg_drr_trigger_window_start_x; /* OTG_DRR_TRIGGER_WINDOW->OTG_DRR_TRIGGER_WINDOW_START_X from window_start parameter */ + uint32_t otg_drr_trigger_window_end_x; /* OTG_DRR_TRIGGER_WINDOW->OTG_DRR_TRIGGER_WINDOW_END_X from window_end parameter */ + uint32_t otg_drr_v_total_change_limit; /* OTG_DRR_V_TOTAL_CHANGE->OTG_DRR_V_TOTAL_CHANGE_LIMIT from limit parameter */ + + /* OTG DSC Position Control - 2 fields */ + uint32_t otg_dsc_start_position_x; /* OTG_DSC_START_POSITION->OTG_DSC_START_POSITION_X from DSC start X position */ + uint32_t otg_dsc_start_position_line_num; /* OTG_DSC_START_POSITION->OTG_DSC_START_POSITION_LINE_NUM from DSC start line number */ + + /* OTG Double Buffer Control - 2 fields */ + uint32_t otg_drr_timing_dbuf_update_mode; /* OTG_DOUBLE_BUFFER_CONTROL->OTG_DRR_TIMING_DBUF_UPDATE_MODE from DRR double buffer mode */ + uint32_t otg_blank_data_double_buffer_en; /* OTG_DOUBLE_BUFFER_CONTROL->OTG_BLANK_DATA_DOUBLE_BUFFER_EN from blank data double buffer enable */ + + /* OTG Vertical Interrupts - 6 fields */ + uint32_t otg_vertical_interrupt0_int_enable; /* OTG_VERTICAL_INTERRUPT0_CONTROL->OTG_VERTICAL_INTERRUPT0_INT_ENABLE from interrupt 0 enable */ + uint32_t otg_vertical_interrupt0_line_start; /* OTG_VERTICAL_INTERRUPT0_POSITION->OTG_VERTICAL_INTERRUPT0_LINE_START from start_line parameter */ + uint32_t otg_vertical_interrupt1_int_enable; /* OTG_VERTICAL_INTERRUPT1_CONTROL->OTG_VERTICAL_INTERRUPT1_INT_ENABLE from interrupt 1 enable */ + uint32_t otg_vertical_interrupt1_line_start; /* OTG_VERTICAL_INTERRUPT1_POSITION->OTG_VERTICAL_INTERRUPT1_LINE_START from start_line parameter */ + uint32_t otg_vertical_interrupt2_int_enable; /* OTG_VERTICAL_INTERRUPT2_CONTROL->OTG_VERTICAL_INTERRUPT2_INT_ENABLE from interrupt 2 enable */ + uint32_t otg_vertical_interrupt2_line_start; /* OTG_VERTICAL_INTERRUPT2_POSITION->OTG_VERTICAL_INTERRUPT2_LINE_START from start_line parameter */ + + /* OTG Global Sync Parameters - 6 fields */ + uint32_t otg_vready_offset; /* OTG_VREADY_PARAM->OTG_VREADY_OFFSET from vready_offset parameter */ + uint32_t otg_vstartup_start; /* OTG_VSTARTUP_PARAM->OTG_VSTARTUP_START from vstartup_start parameter */ + uint32_t otg_vupdate_offset; /* OTG_VUPDATE_PARAM->OTG_VUPDATE_OFFSET from vupdate_offset parameter */ + uint32_t otg_vupdate_width; /* OTG_VUPDATE_PARAM->OTG_VUPDATE_WIDTH from vupdate_width parameter */ + uint32_t master_update_lock_vupdate_keepout_start_offset; /* OTG_VUPDATE_KEEPOUT->MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_START_OFFSET from pstate_keepout start */ + uint32_t master_update_lock_vupdate_keepout_end_offset; /* OTG_VUPDATE_KEEPOUT->MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_END_OFFSET from pstate_keepout end */ + + /* OTG Manual Trigger Control - 11 fields */ + uint32_t otg_triga_source_select; /* OTG_TRIGA_CNTL->OTG_TRIGA_SOURCE_SELECT from trigger A source selection */ + uint32_t otg_triga_source_pipe_select; /* OTG_TRIGA_CNTL->OTG_TRIGA_SOURCE_PIPE_SELECT from trigger A pipe selection */ + uint32_t otg_triga_rising_edge_detect_cntl; /* OTG_TRIGA_CNTL->OTG_TRIGA_RISING_EDGE_DETECT_CNTL from trigger A rising edge detect */ + uint32_t otg_triga_falling_edge_detect_cntl; /* OTG_TRIGA_CNTL->OTG_TRIGA_FALLING_EDGE_DETECT_CNTL from trigger A falling edge detect */ + uint32_t otg_triga_polarity_select; /* OTG_TRIGA_CNTL->OTG_TRIGA_POLARITY_SELECT from trigger A polarity selection */ + uint32_t otg_triga_frequency_select; /* OTG_TRIGA_CNTL->OTG_TRIGA_FREQUENCY_SELECT from trigger A frequency selection */ + uint32_t otg_triga_delay; /* OTG_TRIGA_CNTL->OTG_TRIGA_DELAY from trigger A delay */ + uint32_t otg_triga_clear; /* OTG_TRIGA_CNTL->OTG_TRIGA_CLEAR from trigger A clear */ + uint32_t otg_triga_manual_trig; /* OTG_TRIGA_MANUAL_TRIG->OTG_TRIGA_MANUAL_TRIG from manual trigger A */ + uint32_t otg_trigb_source_select; /* OTG_TRIGB_CNTL->OTG_TRIGB_SOURCE_SELECT from trigger B source selection */ + uint32_t otg_trigb_polarity_select; /* OTG_TRIGB_CNTL->OTG_TRIGB_POLARITY_SELECT from trigger B polarity selection */ + uint32_t otg_trigb_manual_trig; /* OTG_TRIGB_MANUAL_TRIG->OTG_TRIGB_MANUAL_TRIG from manual trigger B */ + + /* OTG Static Screen and Update Control - 6 fields */ + uint32_t otg_static_screen_event_mask; /* OTG_STATIC_SCREEN_CONTROL->OTG_STATIC_SCREEN_EVENT_MASK from event_triggers parameter */ + uint32_t otg_static_screen_frame_count; /* OTG_STATIC_SCREEN_CONTROL->OTG_STATIC_SCREEN_FRAME_COUNT from num_frames parameter */ + uint32_t master_update_lock; /* OTG_MASTER_UPDATE_LOCK->MASTER_UPDATE_LOCK from update lock control */ + uint32_t master_update_mode; /* OTG_MASTER_UPDATE_MODE->MASTER_UPDATE_MODE from update mode selection */ + uint32_t otg_force_count_now_mode; /* OTG_FORCE_COUNT_NOW_CNTL->OTG_FORCE_COUNT_NOW_MODE from force count mode */ + uint32_t otg_force_count_now_clear; /* OTG_FORCE_COUNT_NOW_CNTL->OTG_FORCE_COUNT_NOW_CLEAR from force count clear */ + + /* VTG Control - 3 fields */ + uint32_t vtg0_enable; /* CONTROL->VTG0_ENABLE from VTG enable control */ + uint32_t vtg0_fp2; /* CONTROL->VTG0_FP2 from VTG front porch 2 */ + uint32_t vtg0_vcount_init; /* CONTROL->VTG0_VCOUNT_INIT from VTG vertical count init */ + + /* OTG Status (Read-Only) - 12 fields */ + uint32_t otg_v_blank; /* OTG_STATUS->OTG_V_BLANK from vertical blank status (read-only) */ + uint32_t otg_v_active_disp; /* OTG_STATUS->OTG_V_ACTIVE_DISP from vertical active display (read-only) */ + uint32_t otg_frame_count; /* OTG_STATUS_FRAME_COUNT->OTG_FRAME_COUNT from frame count (read-only) */ + uint32_t otg_horz_count; /* OTG_STATUS_POSITION->OTG_HORZ_COUNT from horizontal position (read-only) */ + uint32_t otg_vert_count; /* OTG_STATUS_POSITION->OTG_VERT_COUNT from vertical position (read-only) */ + uint32_t otg_horz_count_hv; /* OTG_STATUS_HV_COUNT->OTG_HORZ_COUNT from horizontal count (read-only) */ + uint32_t otg_vert_count_nom; /* OTG_STATUS_HV_COUNT->OTG_VERT_COUNT_NOM from vertical count nominal (read-only) */ + uint32_t otg_flip_pending; /* OTG_PIPE_UPDATE_STATUS->OTG_FLIP_PENDING from flip pending status (read-only) */ + uint32_t otg_dc_reg_update_pending; /* OTG_PIPE_UPDATE_STATUS->OTG_DC_REG_UPDATE_PENDING from DC register update pending (read-only) */ + uint32_t otg_cursor_update_pending; /* OTG_PIPE_UPDATE_STATUS->OTG_CURSOR_UPDATE_PENDING from cursor update pending (read-only) */ + uint32_t otg_vupdate_keepout_status; /* OTG_PIPE_UPDATE_STATUS->OTG_VUPDATE_KEEPOUT_STATUS from VUPDATE keepout status (read-only) */ + } optc[MAX_PIPES]; + + /* Metadata */ + uint32_t active_pipe_count; + uint32_t active_stream_count; + bool state_valid; +}; + +/** + * dc_capture_register_software_state() - Capture software state for register programming + * @dc: DC context containing current display configuration + * @state: Pointer to dc_register_software_state structure to populate + * + * Extracts all software state variables that are used to program hardware register + * fields across the display driver pipeline. This provides a complete snapshot + * of the software configuration that drives hardware register programming. + * + * The function traverses the DC context and extracts values from: + * - Stream configurations (timing, format, DSC settings) + * - Plane states (surface format, rotation, scaling, cursor) + * - Pipe contexts (resource allocation, blending, viewport) + * - Clock manager (display clocks, DPP clocks, pixel clocks) + * - Resource context (DET buffer allocation, ODM configuration) + * + * This is essential for underflow debugging as it captures the exact software + * state that determines how registers are programmed, allowing analysis of + * whether underflow is caused by incorrect register programming or timing issues. + * + * Return: true if state was successfully captured, false on error + */ +bool dc_capture_register_software_state(struct dc *dc, struct dc_register_software_state *state); + #endif /* DC_INTERFACE_H_ */ diff --git a/drivers/gpu/drm/amd/display/dc/dc_bios_types.h b/drivers/gpu/drm/amd/display/dc/dc_bios_types.h index 5fa5e2b63fb7..40d7a7d83c40 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_bios_types.h +++ b/drivers/gpu/drm/amd/display/dc/dc_bios_types.h @@ -91,9 +91,17 @@ struct dc_vbios_funcs { struct device_id id); /* COMMANDS */ + enum bp_result (*select_crtc_source)( + struct dc_bios *bios, + struct bp_crtc_source_select *bp_params); enum bp_result (*encoder_control)( struct dc_bios *bios, struct bp_encoder_control *cntl); + enum bp_result (*dac_load_detection)( + struct dc_bios *bios, + enum engine_id engine_id, + enum dal_device_type device_type, + uint32_t enum_id); enum bp_result (*transmitter_control)( struct dc_bios *bios, struct bp_transmitter_control *cntl); @@ -165,6 +173,7 @@ struct dc_vbios_funcs { }; struct bios_registers { + uint32_t BIOS_SCRATCH_0; uint32_t BIOS_SCRATCH_3; uint32_t BIOS_SCRATCH_6; }; diff --git a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c index 53a088ebddef..7b09af1cb306 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c +++ b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c @@ -442,7 +442,6 @@ bool dc_dmub_srv_p_state_delegate(struct dc *dc, bool should_manage_pstate, stru int i = 0, k = 0; int ramp_up_num_steps = 1; // TODO: Ramp is currently disabled. Reenable it. uint8_t visual_confirm_enabled; - int pipe_idx = 0; struct dc_stream_status *stream_status = NULL; if (dc == NULL) @@ -457,7 +456,7 @@ bool dc_dmub_srv_p_state_delegate(struct dc *dc, bool should_manage_pstate, stru cmd.fw_assisted_mclk_switch.config_data.visual_confirm_enabled = visual_confirm_enabled; if (should_manage_pstate) { - for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) { + for (i = 0; i < dc->res_pool->pipe_count; i++) { struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; if (!pipe->stream) @@ -472,7 +471,6 @@ bool dc_dmub_srv_p_state_delegate(struct dc *dc, bool should_manage_pstate, stru cmd.fw_assisted_mclk_switch.config_data.vactive_stretch_margin_us = dc->debug.fpo_vactive_margin_us; break; } - pipe_idx++; } } @@ -872,7 +870,7 @@ void dc_dmub_setup_subvp_dmub_command(struct dc *dc, bool enable) { uint8_t cmd_pipe_index = 0; - uint32_t i, pipe_idx; + uint32_t i; uint8_t subvp_count = 0; union dmub_rb_cmd cmd; struct pipe_ctx *subvp_pipes[2]; @@ -899,7 +897,7 @@ void dc_dmub_setup_subvp_dmub_command(struct dc *dc, if (enable) { // For each pipe that is a "main" SUBVP pipe, fill in pipe data for DMUB SUBVP cmd - for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) { + for (i = 0; i < dc->res_pool->pipe_count; i++) { struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; pipe_mall_type = dc_state_get_pipe_subvp_type(context, pipe); @@ -922,7 +920,6 @@ void dc_dmub_setup_subvp_dmub_command(struct dc *dc, populate_subvp_cmd_vblank_pipe_info(dc, context, &cmd, pipe, cmd_pipe_index++); } - pipe_idx++; } if (subvp_count == 2) { update_subvp_prefetch_end_to_mall_start(dc, context, &cmd, subvp_pipes); @@ -1174,6 +1171,100 @@ void dc_dmub_srv_subvp_save_surf_addr(const struct dc_dmub_srv *dc_dmub_srv, con dmub_srv_subvp_save_surf_addr(dc_dmub_srv->dmub, addr, subvp_index); } +void dc_dmub_srv_cursor_offload_init(struct dc *dc) +{ + struct dmub_rb_cmd_cursor_offload_init *init; + struct dc_dmub_srv *dc_dmub_srv = dc->ctx->dmub_srv; + union dmub_rb_cmd cmd; + + if (!dc->config.enable_cursor_offload) + return; + + if (!dc_dmub_srv->dmub->meta_info.feature_bits.bits.cursor_offload_v1_support) + return; + + if (!dc_dmub_srv->dmub->cursor_offload_fb.gpu_addr || !dc_dmub_srv->dmub->cursor_offload_fb.cpu_addr) + return; + + if (!dc_dmub_srv->dmub->cursor_offload_v1) + return; + + if (!dc_dmub_srv->dmub->shared_state) + return; + + memset(&cmd, 0, sizeof(cmd)); + + init = &cmd.cursor_offload_init; + init->header.type = DMUB_CMD__CURSOR_OFFLOAD; + init->header.sub_type = DMUB_CMD__CURSOR_OFFLOAD_INIT; + init->header.payload_bytes = sizeof(init->init_data); + init->init_data.state_addr.quad_part = dc_dmub_srv->dmub->cursor_offload_fb.gpu_addr; + init->init_data.state_size = dc_dmub_srv->dmub->cursor_offload_fb.size; + + dc_wake_and_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT); + + dc_dmub_srv->cursor_offload_enabled = true; +} + +void dc_dmub_srv_control_cursor_offload(struct dc *dc, struct dc_state *context, + const struct dc_stream_state *stream, bool enable) +{ + struct pipe_ctx const *pipe_ctx; + struct dmub_rb_cmd_cursor_offload_stream_cntl *cntl; + union dmub_rb_cmd cmd; + + if (!dc_dmub_srv_is_cursor_offload_enabled(dc)) + return; + + if (!stream) + return; + + pipe_ctx = resource_get_otg_master_for_stream(&context->res_ctx, stream); + if (!pipe_ctx || !pipe_ctx->stream_res.tg || pipe_ctx->stream != stream) + return; + + memset(&cmd, 0, sizeof(cmd)); + + cntl = &cmd.cursor_offload_stream_ctnl; + cntl->header.type = DMUB_CMD__CURSOR_OFFLOAD; + cntl->header.sub_type = + enable ? DMUB_CMD__CURSOR_OFFLOAD_STREAM_ENABLE : DMUB_CMD__CURSOR_OFFLOAD_STREAM_DISABLE; + cntl->header.payload_bytes = sizeof(cntl->data); + + cntl->data.otg_inst = pipe_ctx->stream_res.tg->inst; + cntl->data.line_time_in_ns = 1u + (uint32_t)(div64_u64(stream->timing.h_total * 1000000ull, + stream->timing.pix_clk_100hz / 10)); + + cntl->data.v_total_max = stream->adjust.v_total_max > stream->timing.v_total ? + stream->adjust.v_total_max : + stream->timing.v_total; + + dc_wake_and_execute_dmub_cmd(dc->ctx, &cmd, + enable ? DM_DMUB_WAIT_TYPE_NO_WAIT : DM_DMUB_WAIT_TYPE_WAIT); +} + +void dc_dmub_srv_program_cursor_now(struct dc *dc, const struct pipe_ctx *pipe) +{ + struct dmub_rb_cmd_cursor_offload_stream_cntl *cntl; + union dmub_rb_cmd cmd; + + if (!dc_dmub_srv_is_cursor_offload_enabled(dc)) + return; + + if (!pipe || !pipe->stream || !pipe->stream_res.tg) + return; + + memset(&cmd, 0, sizeof(cmd)); + + cntl = &cmd.cursor_offload_stream_ctnl; + cntl->header.type = DMUB_CMD__CURSOR_OFFLOAD; + cntl->header.sub_type = DMUB_CMD__CURSOR_OFFLOAD_STREAM_PROGRAM; + cntl->header.payload_bytes = sizeof(cntl->data); + cntl->data.otg_inst = pipe->stream_res.tg->inst; + + dc_wake_and_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_NO_WAIT); +} + bool dc_dmub_srv_is_hw_pwr_up(struct dc_dmub_srv *dc_dmub_srv, bool wait) { struct dc_context *dc_ctx; @@ -1993,6 +2084,9 @@ bool dmub_lsdma_init(struct dc_dmub_srv *dc_dmub_srv) struct dmub_cmd_lsdma_data *lsdma_data = &cmd.lsdma.lsdma_data; bool result; + if (!dc_dmub_srv->dmub->feature_caps.lsdma_support_in_dmu) + return false; + memset(&cmd, 0, sizeof(cmd)); cmd.cmd_common.header.type = DMUB_CMD__LSDMA; @@ -2231,6 +2325,11 @@ bool dmub_lsdma_send_poll_reg_write_command(struct dc_dmub_srv *dc_dmub_srv, uin return result; } +bool dc_dmub_srv_is_cursor_offload_enabled(const struct dc *dc) +{ + return dc->ctx->dmub_srv && dc->ctx->dmub_srv->cursor_offload_enabled; +} + void dc_dmub_srv_release_hw(const struct dc *dc) { struct dc_dmub_srv *dc_dmub_srv = dc->ctx->dmub_srv; @@ -2248,3 +2347,24 @@ void dc_dmub_srv_release_hw(const struct dc *dc) dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT); } + +void dc_dmub_srv_log_preos_dmcub_info(struct dc_dmub_srv *dc_dmub_srv) +{ + struct dmub_srv *dmub; + + if (!dc_dmub_srv || !dc_dmub_srv->dmub) + return; + + dmub = dc_dmub_srv->dmub; + + if (dmub_srv_get_preos_info(dmub)) { + DC_LOG_DEBUG("%s: PreOS DMCUB Info", __func__); + DC_LOG_DEBUG("fw_version : 0x%08x", dmub->preos_info.fw_version); + DC_LOG_DEBUG("boot_options : 0x%08x", dmub->preos_info.boot_options); + DC_LOG_DEBUG("boot_status : 0x%08x", dmub->preos_info.boot_status); + DC_LOG_DEBUG("trace_buffer_phy_addr : 0x%016llx", dmub->preos_info.trace_buffer_phy_addr); + DC_LOG_DEBUG("trace_buffer_size_bytes : 0x%08x", dmub->preos_info.trace_buffer_size); + DC_LOG_DEBUG("fb_base : 0x%016llx", dmub->preos_info.fb_base); + DC_LOG_DEBUG("fb_offset : 0x%016llx", dmub->preos_info.fb_offset); + } +} diff --git a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h index 7ef93444ef3c..72e0a41f39f0 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h +++ b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h @@ -56,6 +56,7 @@ struct dc_dmub_srv { union dmub_shared_state_ips_driver_signals driver_signals; bool idle_allowed; bool needs_idle_wake; + bool cursor_offload_enabled; }; bool dc_dmub_srv_wait_for_pending(struct dc_dmub_srv *dc_dmub_srv); @@ -325,10 +326,52 @@ bool dc_dmub_srv_ips_query_residency_info(const struct dc_context *ctx, uint8_t struct dmub_ips_residency_info *driver_info, enum ips_residency_mode ips_mode); +/** + * dc_dmub_srv_cursor_offload_init() - Enables or disables cursor offloading for a stream. + * + * @dc: pointer to DC object + */ +void dc_dmub_srv_cursor_offload_init(struct dc *dc); + +/** + * dc_dmub_srv_control_cursor_offload() - Enables or disables cursor offloading for a stream. + * + * @dc: pointer to DC object + * @context: the DC context to reference for pipe allocations + * @stream: the stream to control + * @enable: true to enable cursor offload, false to disable + */ +void dc_dmub_srv_control_cursor_offload(struct dc *dc, struct dc_state *context, + const struct dc_stream_state *stream, bool enable); + +/** + * dc_dmub_srv_program_cursor_now() - Requests immediate cursor programming for a given pipe. + * + * @dc: pointer to DC object + * @pipe: top-most pipe for a stream. + */ +void dc_dmub_srv_program_cursor_now(struct dc *dc, const struct pipe_ctx *pipe); + +/** + * dc_dmub_srv_is_cursor_offload_enabled() - Checks if cursor offload is supported. + * + * @dc: pointer to DC object + * + * Return: true if cursor offload is supported, false otherwise + */ +bool dc_dmub_srv_is_cursor_offload_enabled(const struct dc *dc); + /** * dc_dmub_srv_release_hw() - Notifies DMUB service that HW access is no longer required. * * @dc - pointer to DC object */ void dc_dmub_srv_release_hw(const struct dc *dc); + +/** + * dc_dmub_srv_log_preos_dmcub_info() - Logs preos dmcub fw info. + * + * @dc - pointer to DC object + */ +void dc_dmub_srv_log_preos_dmcub_info(struct dc_dmub_srv *dc_dmub_srv); #endif /* _DMUB_DC_SRV_H_ */ diff --git a/drivers/gpu/drm/amd/display/dc/dc_dp_types.h b/drivers/gpu/drm/amd/display/dc/dc_dp_types.h index db669ccb1d58..79e1696def63 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_dp_types.h +++ b/drivers/gpu/drm/amd/display/dc/dc_dp_types.h @@ -1157,6 +1157,16 @@ struct dprx_states { bool cable_id_written; }; +union dpcd_panel_replay_capability_supported { + struct { + unsigned char PANEL_REPLAY_SUPPORT :1; + unsigned char SELECTIVE_UPDATE_SUPPORT :1; + unsigned char EARLY_TRANSPORT_SUPPORT :1; + unsigned char RESERVED :5; + } bits; + unsigned char raw; +}; + enum dpcd_downstream_port_max_bpc { DOWN_STREAM_MAX_8BPC = 0, DOWN_STREAM_MAX_10BPC, @@ -1280,6 +1290,7 @@ struct dpcd_caps { struct edp_psr_info psr_info; struct replay_info pr_info; + union dpcd_panel_replay_capability_supported pr_caps_supported; uint16_t edp_oled_emission_rate; union dp_receive_port0_cap receive_port0_cap; /* Indicates the number of SST links supported by MSO (Multi-Stream Output) */ @@ -1346,6 +1357,31 @@ union dpcd_replay_configuration { unsigned char raw; }; +union panel_replay_enable_and_configuration_1 { + struct { + unsigned char PANEL_REPLAY_ENABLE :1; + unsigned char PANEL_REPLAY_CRC_ENABLE :1; + unsigned char IRQ_HPD_ASSDP_MISSING :1; + unsigned char IRQ_HPD_VSCSDP_UNCORRECTABLE_ERROR :1; + unsigned char IRQ_HPD_RFB_ERROR :1; + unsigned char IRQ_HPD_ACTIVE_FRAME_CRC_ERROR :1; + unsigned char PANEL_REPLAY_SELECTIVE_UPDATE_ENABLE :1; + unsigned char PANEL_REPLAY_EARLY_TRANSPORT_ENABLE :1; + } bits; + unsigned char raw; +}; + +union panel_replay_enable_and_configuration_2 { + struct { + unsigned char SINK_REFRESH_RATE_UNLOCK_GRANTED :1; + unsigned char RESERVED :1; + unsigned char SU_Y_GRANULARITY_EXT_VALUE_ENABLED :1; + unsigned char SU_Y_GRANULARITY_EXT_VALUE :4; + unsigned char SU_REGION_SCAN_LINE_CAPTURE_INDICATION :1; + } bits; + unsigned char raw; +}; + union dpcd_alpm_configuration { struct { unsigned char ENABLE : 1; diff --git a/drivers/gpu/drm/amd/display/dc/dc_spl_translate.c b/drivers/gpu/drm/amd/display/dc/dc_spl_translate.c index 55704d4457ef..37d1a79e8241 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_spl_translate.c +++ b/drivers/gpu/drm/amd/display/dc/dc_spl_translate.c @@ -147,6 +147,8 @@ void translate_SPL_in_params_from_pipe_ctx(struct pipe_ctx *pipe_ctx, struct spl spl_in->prefer_easf = false; else if (pipe_ctx->stream->ctx->dc->debug.force_easf == 2) spl_in->disable_easf = true; + else if (pipe_ctx->stream->ctx->dc->debug.force_easf == 3) + spl_in->override_easf = true; /* Translate adaptive sharpening preference */ unsigned int sharpness_setting = pipe_ctx->stream->ctx->dc->debug.force_sharpness; unsigned int force_sharpness_level = pipe_ctx->stream->ctx->dc->debug.force_sharpness_level; diff --git a/drivers/gpu/drm/amd/display/dc/dc_stream.h b/drivers/gpu/drm/amd/display/dc/dc_stream.h index 76cf9fdedab0..321cfe92d799 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_stream.h +++ b/drivers/gpu/drm/amd/display/dc/dc_stream.h @@ -473,12 +473,11 @@ void dc_enable_stereo( /* Triggers multi-stream synchronization. */ void dc_trigger_sync(struct dc *dc, struct dc_state *context); -enum surface_update_type dc_check_update_surfaces_for_stream( - struct dc *dc, +struct surface_update_descriptor dc_check_update_surfaces_for_stream( + const struct dc_check_config *check_config, struct dc_surface_update *updates, int surface_count, - struct dc_stream_update *stream_update, - const struct dc_stream_status *stream_status); + struct dc_stream_update *stream_update); /** * Create a new default stream for the requested sink @@ -492,8 +491,8 @@ void update_stream_signal(struct dc_stream_state *stream, struct dc_sink *sink); void dc_stream_retain(struct dc_stream_state *dc_stream); void dc_stream_release(struct dc_stream_state *dc_stream); -struct dc_stream_status *dc_stream_get_status( - struct dc_stream_state *dc_stream); +struct dc_stream_status *dc_stream_get_status(struct dc_stream_state *dc_stream); +const struct dc_stream_status *dc_stream_get_status_const(const struct dc_stream_state *dc_stream); /******************************************************************************* * Cursor interfaces - To manages the cursor within a stream diff --git a/drivers/gpu/drm/amd/display/dc/dc_types.h b/drivers/gpu/drm/amd/display/dc/dc_types.h index b5aa03a3e39c..f46039f64203 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_types.h +++ b/drivers/gpu/drm/amd/display/dc/dc_types.h @@ -185,6 +185,10 @@ struct dc_panel_patch { unsigned int wait_after_dpcd_poweroff_ms; }; +/** + * struct dc_edid_caps - Capabilities read from EDID. + * @analog: Whether the monitor is analog. Used by DVI-I handling. + */ struct dc_edid_caps { /* sink identification */ uint16_t manufacturer_id; @@ -212,6 +216,8 @@ struct dc_edid_caps { bool edid_hdmi; bool hdr_supported; bool rr_capable; + bool scdc_present; + bool analog; struct dc_panel_patch panel_patch; }; @@ -347,7 +353,8 @@ enum dc_connection_type { dc_connection_none, dc_connection_single, dc_connection_mst_branch, - dc_connection_sst_branch + dc_connection_sst_branch, + dc_connection_dac_load }; struct dc_csc_adjustments { @@ -934,6 +941,12 @@ enum dc_psr_version { DC_PSR_VERSION_UNSUPPORTED = 0xFFFFFFFF, }; +enum dc_replay_version { + DC_FREESYNC_REPLAY = 0, + DC_VESA_PANEL_REPLAY = 1, + DC_REPLAY_VERSION_UNSUPPORTED = 0XFF, +}; + /* Possible values of display_endpoint_id.endpoint */ enum display_endpoint_type { DISPLAY_ENDPOINT_PHY = 0, /* Physical connector. */ @@ -1086,6 +1099,7 @@ enum replay_FW_Message_type { Replay_Set_Residency_Frameupdate_Timer, Replay_Set_Pseudo_VTotal, Replay_Disabled_Adaptive_Sync_SDP, + Replay_Set_Version, Replay_Set_General_Cmd, }; @@ -1121,6 +1135,8 @@ union replay_low_refresh_rate_enable_options { }; struct replay_config { + /* Replay version */ + enum dc_replay_version replay_version; /* Replay feature is supported */ bool replay_supported; /* Replay caps support DPCD & EDID caps*/ @@ -1177,6 +1193,10 @@ struct replay_settings { uint32_t coasting_vtotal_table[PR_COASTING_TYPE_NUM]; /* Defer Update Coasting vtotal table */ uint32_t defer_update_coasting_vtotal_table[PR_COASTING_TYPE_NUM]; + /* Skip frame number table */ + uint32_t frame_skip_number_table[PR_COASTING_TYPE_NUM]; + /* Defer skip frame number table */ + uint32_t defer_frame_skip_number_table[PR_COASTING_TYPE_NUM]; /* Maximum link off frame count */ uint32_t link_off_frame_count; /* Replay pseudo vtotal for low refresh rate*/ @@ -1185,6 +1205,8 @@ struct replay_settings { uint16_t last_pseudo_vtotal; /* Replay desync error */ uint32_t replay_desync_error_fail_count; + /* The frame skip number dal send to DMUB */ + uint16_t frame_skip_number; }; /* To split out "global" and "per-panel" config settings. diff --git a/drivers/gpu/drm/amd/display/dc/dccg/dcn20/dcn20_dccg.c b/drivers/gpu/drm/amd/display/dc/dccg/dcn20/dcn20_dccg.c index 5999b2da3a01..33d8bd91cb01 100644 --- a/drivers/gpu/drm/amd/display/dc/dccg/dcn20/dcn20_dccg.c +++ b/drivers/gpu/drm/amd/display/dc/dccg/dcn20/dcn20_dccg.c @@ -148,7 +148,7 @@ struct dccg *dccg2_create( const struct dccg_shift *dccg_shift, const struct dccg_mask *dccg_mask) { - struct dcn_dccg *dccg_dcn = kzalloc(sizeof(*dccg_dcn), GFP_ATOMIC); + struct dcn_dccg *dccg_dcn = kzalloc(sizeof(*dccg_dcn), GFP_KERNEL); struct dccg *base; if (dccg_dcn == NULL) { diff --git a/drivers/gpu/drm/amd/display/dc/dccg/dcn20/dcn20_dccg.h b/drivers/gpu/drm/amd/display/dc/dccg/dcn20/dcn20_dccg.h index a9b88f5e0c04..8bdffd9ff31b 100644 --- a/drivers/gpu/drm/amd/display/dc/dccg/dcn20/dcn20_dccg.h +++ b/drivers/gpu/drm/amd/display/dc/dccg/dcn20/dcn20_dccg.h @@ -425,7 +425,69 @@ struct dccg_mask { uint32_t SYMCLKD_CLOCK_ENABLE; \ uint32_t SYMCLKE_CLOCK_ENABLE; \ uint32_t DP_DTO_MODULO[MAX_PIPES]; \ - uint32_t DP_DTO_PHASE[MAX_PIPES] + uint32_t DP_DTO_PHASE[MAX_PIPES]; \ + uint32_t DC_MEM_GLOBAL_PWR_REQ_CNTL; \ + uint32_t DCCG_AUDIO_DTO0_MODULE; \ + uint32_t DCCG_AUDIO_DTO0_PHASE; \ + uint32_t DCCG_AUDIO_DTO1_MODULE; \ + uint32_t DCCG_AUDIO_DTO1_PHASE; \ + uint32_t DCCG_CAC_STATUS; \ + uint32_t DCCG_CAC_STATUS2; \ + uint32_t DCCG_DISP_CNTL_REG; \ + uint32_t DCCG_DS_CNTL; \ + uint32_t DCCG_DS_DTO_INCR; \ + uint32_t DCCG_DS_DTO_MODULO; \ + uint32_t DCCG_DS_HW_CAL_INTERVAL; \ + uint32_t DCCG_GTC_CNTL; \ + uint32_t DCCG_GTC_CURRENT; \ + uint32_t DCCG_GTC_DTO_INCR; \ + uint32_t DCCG_GTC_DTO_MODULO; \ + uint32_t DCCG_PERFMON_CNTL; \ + uint32_t DCCG_PERFMON_CNTL2; \ + uint32_t DCCG_SOFT_RESET; \ + uint32_t DCCG_TEST_CLK_SEL; \ + uint32_t DCCG_VSYNC_CNT_CTRL; \ + uint32_t DCCG_VSYNC_CNT_INT_CTRL; \ + uint32_t DCCG_VSYNC_OTG0_LATCH_VALUE; \ + uint32_t DCCG_VSYNC_OTG1_LATCH_VALUE; \ + uint32_t DCCG_VSYNC_OTG2_LATCH_VALUE; \ + uint32_t DCCG_VSYNC_OTG3_LATCH_VALUE; \ + uint32_t DCCG_VSYNC_OTG4_LATCH_VALUE; \ + uint32_t DCCG_VSYNC_OTG5_LATCH_VALUE; \ + uint32_t DISPCLK_CGTT_BLK_CTRL_REG; \ + uint32_t DP_DTO_DBUF_EN; \ + uint32_t DPIACLK_540M_DTO_MODULO; \ + uint32_t DPIACLK_540M_DTO_PHASE; \ + uint32_t DPIACLK_810M_DTO_MODULO; \ + uint32_t DPIACLK_810M_DTO_PHASE; \ + uint32_t DPIACLK_DTO_CNTL; \ + uint32_t DPIASYMCLK_CNTL; \ + uint32_t DPPCLK_CGTT_BLK_CTRL_REG; \ + uint32_t DPREFCLK_CGTT_BLK_CTRL_REG; \ + uint32_t DPREFCLK_CNTL; \ + uint32_t DTBCLK_DTO_DBUF_EN; \ + uint32_t FORCE_SYMCLK_DISABLE; \ + uint32_t HDMICHARCLK0_CLOCK_CNTL; \ + uint32_t MICROSECOND_TIME_BASE_DIV; \ + uint32_t MILLISECOND_TIME_BASE_DIV; \ + uint32_t OTG0_PHYPLL_PIXEL_RATE_CNTL; \ + uint32_t OTG0_PIXEL_RATE_CNTL; \ + uint32_t OTG1_PHYPLL_PIXEL_RATE_CNTL; \ + uint32_t OTG1_PIXEL_RATE_CNTL; \ + uint32_t OTG2_PHYPLL_PIXEL_RATE_CNTL; \ + uint32_t OTG2_PIXEL_RATE_CNTL; \ + uint32_t OTG3_PHYPLL_PIXEL_RATE_CNTL; \ + uint32_t OTG3_PIXEL_RATE_CNTL; \ + uint32_t PHYPLLA_PIXCLK_RESYNC_CNTL; \ + uint32_t PHYPLLB_PIXCLK_RESYNC_CNTL; \ + uint32_t PHYPLLC_PIXCLK_RESYNC_CNTL; \ + uint32_t PHYPLLD_PIXCLK_RESYNC_CNTL; \ + uint32_t PHYPLLE_PIXCLK_RESYNC_CNTL; \ + uint32_t REFCLK_CGTT_BLK_CTRL_REG; \ + uint32_t SOCCLK_CGTT_BLK_CTRL_REG; \ + uint32_t SYMCLK_CGTT_BLK_CTRL_REG; \ + uint32_t SYMCLK_PSP_CNTL + struct dccg_registers { DCCG_REG_VARIABLE_LIST; }; diff --git a/drivers/gpu/drm/amd/display/dc/dccg/dcn31/dcn31_dccg.c b/drivers/gpu/drm/amd/display/dc/dccg/dcn31/dcn31_dccg.c index 8664f0c4c9b7..97df04b7e39d 100644 --- a/drivers/gpu/drm/amd/display/dc/dccg/dcn31/dcn31_dccg.c +++ b/drivers/gpu/drm/amd/display/dc/dccg/dcn31/dcn31_dccg.c @@ -709,6 +709,128 @@ void dccg31_otg_drop_pixel(struct dccg *dccg, OTG_DROP_PIXEL[otg_inst], 1); } +void dccg31_read_reg_state(struct dccg *dccg, struct dcn_dccg_reg_state *dccg_reg_state) +{ + struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg); + + dccg_reg_state->dc_mem_global_pwr_req_cntl = REG_READ(DC_MEM_GLOBAL_PWR_REQ_CNTL); + dccg_reg_state->dccg_audio_dtbclk_dto_modulo = REG_READ(DCCG_AUDIO_DTBCLK_DTO_MODULO); + dccg_reg_state->dccg_audio_dtbclk_dto_phase = REG_READ(DCCG_AUDIO_DTBCLK_DTO_PHASE); + dccg_reg_state->dccg_audio_dto_source = REG_READ(DCCG_AUDIO_DTO_SOURCE); + dccg_reg_state->dccg_audio_dto0_module = REG_READ(DCCG_AUDIO_DTO0_MODULE); + dccg_reg_state->dccg_audio_dto0_phase = REG_READ(DCCG_AUDIO_DTO0_PHASE); + dccg_reg_state->dccg_audio_dto1_module = REG_READ(DCCG_AUDIO_DTO1_MODULE); + dccg_reg_state->dccg_audio_dto1_phase = REG_READ(DCCG_AUDIO_DTO1_PHASE); + dccg_reg_state->dccg_cac_status = REG_READ(DCCG_CAC_STATUS); + dccg_reg_state->dccg_cac_status2 = REG_READ(DCCG_CAC_STATUS2); + dccg_reg_state->dccg_disp_cntl_reg = REG_READ(DCCG_DISP_CNTL_REG); + dccg_reg_state->dccg_ds_cntl = REG_READ(DCCG_DS_CNTL); + dccg_reg_state->dccg_ds_dto_incr = REG_READ(DCCG_DS_DTO_INCR); + dccg_reg_state->dccg_ds_dto_modulo = REG_READ(DCCG_DS_DTO_MODULO); + dccg_reg_state->dccg_ds_hw_cal_interval = REG_READ(DCCG_DS_HW_CAL_INTERVAL); + dccg_reg_state->dccg_gate_disable_cntl = REG_READ(DCCG_GATE_DISABLE_CNTL); + dccg_reg_state->dccg_gate_disable_cntl2 = REG_READ(DCCG_GATE_DISABLE_CNTL2); + dccg_reg_state->dccg_gate_disable_cntl3 = REG_READ(DCCG_GATE_DISABLE_CNTL3); + dccg_reg_state->dccg_gate_disable_cntl4 = REG_READ(DCCG_GATE_DISABLE_CNTL4); + dccg_reg_state->dccg_gate_disable_cntl5 = REG_READ(DCCG_GATE_DISABLE_CNTL5); + dccg_reg_state->dccg_gate_disable_cntl6 = REG_READ(DCCG_GATE_DISABLE_CNTL6); + dccg_reg_state->dccg_global_fgcg_rep_cntl = REG_READ(DCCG_GLOBAL_FGCG_REP_CNTL); + dccg_reg_state->dccg_gtc_cntl = REG_READ(DCCG_GTC_CNTL); + dccg_reg_state->dccg_gtc_current = REG_READ(DCCG_GTC_CURRENT); + dccg_reg_state->dccg_gtc_dto_incr = REG_READ(DCCG_GTC_DTO_INCR); + dccg_reg_state->dccg_gtc_dto_modulo = REG_READ(DCCG_GTC_DTO_MODULO); + dccg_reg_state->dccg_perfmon_cntl = REG_READ(DCCG_PERFMON_CNTL); + dccg_reg_state->dccg_perfmon_cntl2 = REG_READ(DCCG_PERFMON_CNTL2); + dccg_reg_state->dccg_soft_reset = REG_READ(DCCG_SOFT_RESET); + dccg_reg_state->dccg_test_clk_sel = REG_READ(DCCG_TEST_CLK_SEL); + dccg_reg_state->dccg_vsync_cnt_ctrl = REG_READ(DCCG_VSYNC_CNT_CTRL); + dccg_reg_state->dccg_vsync_cnt_int_ctrl = REG_READ(DCCG_VSYNC_CNT_INT_CTRL); + dccg_reg_state->dccg_vsync_otg0_latch_value = REG_READ(DCCG_VSYNC_OTG0_LATCH_VALUE); + dccg_reg_state->dccg_vsync_otg1_latch_value = REG_READ(DCCG_VSYNC_OTG1_LATCH_VALUE); + dccg_reg_state->dccg_vsync_otg2_latch_value = REG_READ(DCCG_VSYNC_OTG2_LATCH_VALUE); + dccg_reg_state->dccg_vsync_otg3_latch_value = REG_READ(DCCG_VSYNC_OTG3_LATCH_VALUE); + dccg_reg_state->dccg_vsync_otg4_latch_value = REG_READ(DCCG_VSYNC_OTG4_LATCH_VALUE); + dccg_reg_state->dccg_vsync_otg5_latch_value = REG_READ(DCCG_VSYNC_OTG5_LATCH_VALUE); + dccg_reg_state->dispclk_cgtt_blk_ctrl_reg = REG_READ(DISPCLK_CGTT_BLK_CTRL_REG); + dccg_reg_state->dispclk_freq_change_cntl = REG_READ(DISPCLK_FREQ_CHANGE_CNTL); + dccg_reg_state->dp_dto_dbuf_en = REG_READ(DP_DTO_DBUF_EN); + dccg_reg_state->dp_dto0_modulo = REG_READ(DP_DTO_MODULO[0]); + dccg_reg_state->dp_dto0_phase = REG_READ(DP_DTO_PHASE[0]); + dccg_reg_state->dp_dto1_modulo = REG_READ(DP_DTO_MODULO[1]); + dccg_reg_state->dp_dto1_phase = REG_READ(DP_DTO_PHASE[1]); + dccg_reg_state->dp_dto2_modulo = REG_READ(DP_DTO_MODULO[2]); + dccg_reg_state->dp_dto2_phase = REG_READ(DP_DTO_PHASE[2]); + dccg_reg_state->dp_dto3_modulo = REG_READ(DP_DTO_MODULO[3]); + dccg_reg_state->dp_dto3_phase = REG_READ(DP_DTO_PHASE[3]); + dccg_reg_state->dpiaclk_540m_dto_modulo = REG_READ(DPIACLK_540M_DTO_MODULO); + dccg_reg_state->dpiaclk_540m_dto_phase = REG_READ(DPIACLK_540M_DTO_PHASE); + dccg_reg_state->dpiaclk_810m_dto_modulo = REG_READ(DPIACLK_810M_DTO_MODULO); + dccg_reg_state->dpiaclk_810m_dto_phase = REG_READ(DPIACLK_810M_DTO_PHASE); + dccg_reg_state->dpiaclk_dto_cntl = REG_READ(DPIACLK_DTO_CNTL); + dccg_reg_state->dpiasymclk_cntl = REG_READ(DPIASYMCLK_CNTL); + dccg_reg_state->dppclk_cgtt_blk_ctrl_reg = REG_READ(DPPCLK_CGTT_BLK_CTRL_REG); + dccg_reg_state->dppclk_ctrl = REG_READ(DPPCLK_CTRL); + dccg_reg_state->dppclk_dto_ctrl = REG_READ(DPPCLK_DTO_CTRL); + dccg_reg_state->dppclk0_dto_param = REG_READ(DPPCLK_DTO_PARAM[0]); + dccg_reg_state->dppclk1_dto_param = REG_READ(DPPCLK_DTO_PARAM[1]); + dccg_reg_state->dppclk2_dto_param = REG_READ(DPPCLK_DTO_PARAM[2]); + dccg_reg_state->dppclk3_dto_param = REG_READ(DPPCLK_DTO_PARAM[3]); + dccg_reg_state->dprefclk_cgtt_blk_ctrl_reg = REG_READ(DPREFCLK_CGTT_BLK_CTRL_REG); + dccg_reg_state->dprefclk_cntl = REG_READ(DPREFCLK_CNTL); + dccg_reg_state->dpstreamclk_cntl = REG_READ(DPSTREAMCLK_CNTL); + dccg_reg_state->dscclk_dto_ctrl = REG_READ(DSCCLK_DTO_CTRL); + dccg_reg_state->dscclk0_dto_param = REG_READ(DSCCLK0_DTO_PARAM); + dccg_reg_state->dscclk1_dto_param = REG_READ(DSCCLK1_DTO_PARAM); + dccg_reg_state->dscclk2_dto_param = REG_READ(DSCCLK2_DTO_PARAM); + dccg_reg_state->dscclk3_dto_param = REG_READ(DSCCLK3_DTO_PARAM); + dccg_reg_state->dtbclk_dto_dbuf_en = REG_READ(DTBCLK_DTO_DBUF_EN); + dccg_reg_state->dtbclk_dto0_modulo = REG_READ(DTBCLK_DTO_MODULO[0]); + dccg_reg_state->dtbclk_dto0_phase = REG_READ(DTBCLK_DTO_PHASE[0]); + dccg_reg_state->dtbclk_dto1_modulo = REG_READ(DTBCLK_DTO_MODULO[1]); + dccg_reg_state->dtbclk_dto1_phase = REG_READ(DTBCLK_DTO_PHASE[1]); + dccg_reg_state->dtbclk_dto2_modulo = REG_READ(DTBCLK_DTO_MODULO[2]); + dccg_reg_state->dtbclk_dto2_phase = REG_READ(DTBCLK_DTO_PHASE[2]); + dccg_reg_state->dtbclk_dto3_modulo = REG_READ(DTBCLK_DTO_MODULO[3]); + dccg_reg_state->dtbclk_dto3_phase = REG_READ(DTBCLK_DTO_PHASE[3]); + dccg_reg_state->dtbclk_p_cntl = REG_READ(DTBCLK_P_CNTL); + dccg_reg_state->force_symclk_disable = REG_READ(FORCE_SYMCLK_DISABLE); + dccg_reg_state->hdmicharclk0_clock_cntl = REG_READ(HDMICHARCLK0_CLOCK_CNTL); + dccg_reg_state->hdmistreamclk_cntl = REG_READ(HDMISTREAMCLK_CNTL); + dccg_reg_state->hdmistreamclk0_dto_param = REG_READ(HDMISTREAMCLK0_DTO_PARAM); + dccg_reg_state->microsecond_time_base_div = REG_READ(MICROSECOND_TIME_BASE_DIV); + dccg_reg_state->millisecond_time_base_div = REG_READ(MILLISECOND_TIME_BASE_DIV); + dccg_reg_state->otg_pixel_rate_div = REG_READ(OTG_PIXEL_RATE_DIV); + dccg_reg_state->otg0_phypll_pixel_rate_cntl = REG_READ(OTG0_PHYPLL_PIXEL_RATE_CNTL); + dccg_reg_state->otg0_pixel_rate_cntl = REG_READ(OTG0_PIXEL_RATE_CNTL); + dccg_reg_state->otg1_phypll_pixel_rate_cntl = REG_READ(OTG1_PHYPLL_PIXEL_RATE_CNTL); + dccg_reg_state->otg1_pixel_rate_cntl = REG_READ(OTG1_PIXEL_RATE_CNTL); + dccg_reg_state->otg2_phypll_pixel_rate_cntl = REG_READ(OTG2_PHYPLL_PIXEL_RATE_CNTL); + dccg_reg_state->otg2_pixel_rate_cntl = REG_READ(OTG2_PIXEL_RATE_CNTL); + dccg_reg_state->otg3_phypll_pixel_rate_cntl = REG_READ(OTG3_PHYPLL_PIXEL_RATE_CNTL); + dccg_reg_state->otg3_pixel_rate_cntl = REG_READ(OTG3_PIXEL_RATE_CNTL); + dccg_reg_state->phyasymclk_clock_cntl = REG_READ(PHYASYMCLK_CLOCK_CNTL); + dccg_reg_state->phybsymclk_clock_cntl = REG_READ(PHYBSYMCLK_CLOCK_CNTL); + dccg_reg_state->phycsymclk_clock_cntl = REG_READ(PHYCSYMCLK_CLOCK_CNTL); + dccg_reg_state->phydsymclk_clock_cntl = REG_READ(PHYDSYMCLK_CLOCK_CNTL); + dccg_reg_state->phyesymclk_clock_cntl = REG_READ(PHYESYMCLK_CLOCK_CNTL); + dccg_reg_state->phyplla_pixclk_resync_cntl = REG_READ(PHYPLLA_PIXCLK_RESYNC_CNTL); + dccg_reg_state->phypllb_pixclk_resync_cntl = REG_READ(PHYPLLB_PIXCLK_RESYNC_CNTL); + dccg_reg_state->phypllc_pixclk_resync_cntl = REG_READ(PHYPLLC_PIXCLK_RESYNC_CNTL); + dccg_reg_state->phyplld_pixclk_resync_cntl = REG_READ(PHYPLLD_PIXCLK_RESYNC_CNTL); + dccg_reg_state->phyplle_pixclk_resync_cntl = REG_READ(PHYPLLE_PIXCLK_RESYNC_CNTL); + dccg_reg_state->refclk_cgtt_blk_ctrl_reg = REG_READ(REFCLK_CGTT_BLK_CTRL_REG); + dccg_reg_state->socclk_cgtt_blk_ctrl_reg = REG_READ(SOCCLK_CGTT_BLK_CTRL_REG); + dccg_reg_state->symclk_cgtt_blk_ctrl_reg = REG_READ(SYMCLK_CGTT_BLK_CTRL_REG); + dccg_reg_state->symclk_psp_cntl = REG_READ(SYMCLK_PSP_CNTL); + dccg_reg_state->symclk32_le_cntl = REG_READ(SYMCLK32_LE_CNTL); + dccg_reg_state->symclk32_se_cntl = REG_READ(SYMCLK32_SE_CNTL); + dccg_reg_state->symclka_clock_enable = REG_READ(SYMCLKA_CLOCK_ENABLE); + dccg_reg_state->symclkb_clock_enable = REG_READ(SYMCLKB_CLOCK_ENABLE); + dccg_reg_state->symclkc_clock_enable = REG_READ(SYMCLKC_CLOCK_ENABLE); + dccg_reg_state->symclkd_clock_enable = REG_READ(SYMCLKD_CLOCK_ENABLE); + dccg_reg_state->symclke_clock_enable = REG_READ(SYMCLKE_CLOCK_ENABLE); +} + static const struct dccg_funcs dccg31_funcs = { .update_dpp_dto = dccg31_update_dpp_dto, .get_dccg_ref_freq = dccg31_get_dccg_ref_freq, @@ -727,6 +849,7 @@ static const struct dccg_funcs dccg31_funcs = { .set_dispclk_change_mode = dccg31_set_dispclk_change_mode, .disable_dsc = dccg31_disable_dscclk, .enable_dsc = dccg31_enable_dscclk, + .dccg_read_reg_state = dccg31_read_reg_state, }; struct dccg *dccg31_create( diff --git a/drivers/gpu/drm/amd/display/dc/dccg/dcn31/dcn31_dccg.h b/drivers/gpu/drm/amd/display/dc/dccg/dcn31/dcn31_dccg.h index cd261051dc2c..bf659920d4cc 100644 --- a/drivers/gpu/drm/amd/display/dc/dccg/dcn31/dcn31_dccg.h +++ b/drivers/gpu/drm/amd/display/dc/dccg/dcn31/dcn31_dccg.h @@ -236,4 +236,6 @@ void dccg31_disable_dscclk(struct dccg *dccg, int inst); void dccg31_enable_dscclk(struct dccg *dccg, int inst); +void dccg31_read_reg_state(struct dccg *dccg, struct dcn_dccg_reg_state *dccg_reg_state); + #endif //__DCN31_DCCG_H__ diff --git a/drivers/gpu/drm/amd/display/dc/dccg/dcn314/dcn314_dccg.c b/drivers/gpu/drm/amd/display/dc/dccg/dcn314/dcn314_dccg.c index 8f6edd8e9beb..ef3db6beba25 100644 --- a/drivers/gpu/drm/amd/display/dc/dccg/dcn314/dcn314_dccg.c +++ b/drivers/gpu/drm/amd/display/dc/dccg/dcn314/dcn314_dccg.c @@ -377,7 +377,8 @@ static const struct dccg_funcs dccg314_funcs = { .get_pixel_rate_div = dccg314_get_pixel_rate_div, .trigger_dio_fifo_resync = dccg314_trigger_dio_fifo_resync, .set_valid_pixel_rate = dccg314_set_valid_pixel_rate, - .set_dtbclk_p_src = dccg314_set_dtbclk_p_src + .set_dtbclk_p_src = dccg314_set_dtbclk_p_src, + .dccg_read_reg_state = dccg31_read_reg_state }; struct dccg *dccg314_create( diff --git a/drivers/gpu/drm/amd/display/dc/dccg/dcn314/dcn314_dccg.h b/drivers/gpu/drm/amd/display/dc/dccg/dcn314/dcn314_dccg.h index 60ea1d248deb..a609635f35db 100644 --- a/drivers/gpu/drm/amd/display/dc/dccg/dcn314/dcn314_dccg.h +++ b/drivers/gpu/drm/amd/display/dc/dccg/dcn314/dcn314_dccg.h @@ -74,8 +74,7 @@ SR(DCCG_GATE_DISABLE_CNTL3),\ SR(HDMISTREAMCLK0_DTO_PARAM),\ SR(OTG_PIXEL_RATE_DIV),\ - SR(DTBCLK_P_CNTL),\ - SR(DCCG_AUDIO_DTO_SOURCE) + SR(DTBCLK_P_CNTL) #define DCCG_MASK_SH_LIST_DCN314_COMMON(mask_sh) \ DCCG_SFI(DPPCLK_DTO_CTRL, DTO_DB_EN, DPPCLK, 0, mask_sh),\ diff --git a/drivers/gpu/drm/amd/display/dc/dccg/dcn35/dcn35_dccg.c b/drivers/gpu/drm/amd/display/dc/dccg/dcn35/dcn35_dccg.c index c899c09ea31b..bd2f528137b2 100644 --- a/drivers/gpu/drm/amd/display/dc/dccg/dcn35/dcn35_dccg.c +++ b/drivers/gpu/drm/amd/display/dc/dccg/dcn35/dcn35_dccg.c @@ -1114,6 +1114,16 @@ static void dccg35_trigger_dio_fifo_resync(struct dccg *dccg) if (dispclk_rdivider_value != 0) REG_UPDATE(DENTIST_DISPCLK_CNTL, DENTIST_DISPCLK_WDIVIDER, dispclk_rdivider_value); } +static void dccg35_wait_for_dentist_change_done( + struct dccg *dccg) +{ + struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg); + + uint32_t dentist_dispclk_value = REG_READ(DENTIST_DISPCLK_CNTL); + + REG_WRITE(DENTIST_DISPCLK_CNTL, dentist_dispclk_value); + REG_WAIT(DENTIST_DISPCLK_CNTL, DENTIST_DISPCLK_CHG_DONE, 1, 50, 2000); +} static void dcn35_set_dppclk_enable(struct dccg *dccg, uint32_t dpp_inst, uint32_t enable) @@ -1174,9 +1184,9 @@ static void dccg35_update_dpp_dto(struct dccg *dccg, int dpp_inst, dcn35_set_dppclk_enable(dccg, dpp_inst, true); } else { dcn35_set_dppclk_enable(dccg, dpp_inst, false); - /*we have this in hwss: disable_plane*/ - //dccg35_set_dppclk_rcg(dccg, dpp_inst, true); + dccg35_set_dppclk_rcg(dccg, dpp_inst, true); } + udelay(10); dccg->pipe_dppclk_khz[dpp_inst] = req_dppclk; } @@ -1300,6 +1310,8 @@ static void dccg35_set_pixel_rate_div( BREAK_TO_DEBUGGER(); return; } + if (otg_inst < 4) + dccg35_wait_for_dentist_change_done(dccg); } static void dccg35_set_dtbclk_p_src( @@ -1664,7 +1676,7 @@ static void dccg35_dpp_root_clock_control( { struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg); - if (dccg->dpp_clock_gated[dpp_inst] == clock_on) + if (dccg->dpp_clock_gated[dpp_inst] != clock_on) return; if (clock_on) { @@ -1682,9 +1694,12 @@ static void dccg35_dpp_root_clock_control( DPPCLK0_DTO_PHASE, 0, DPPCLK0_DTO_MODULO, 1); /*we have this in hwss: disable_plane*/ - //dccg35_set_dppclk_rcg(dccg, dpp_inst, true); + dccg35_set_dppclk_rcg(dccg, dpp_inst, true); } + // wait for clock to fully ramp + udelay(10); + dccg->dpp_clock_gated[dpp_inst] = !clock_on; DC_LOG_DEBUG("%s: dpp_inst(%d) clock_on = %d\n", __func__, dpp_inst, clock_on); } @@ -2438,6 +2453,7 @@ static const struct dccg_funcs dccg35_funcs = { .disable_symclk_se = dccg35_disable_symclk_se, .set_dtbclk_p_src = dccg35_set_dtbclk_p_src, .dccg_root_gate_disable_control = dccg35_root_gate_disable_control, + .dccg_read_reg_state = dccg31_read_reg_state, }; struct dccg *dccg35_create( diff --git a/drivers/gpu/drm/amd/display/dc/dccg/dcn35/dcn35_dccg.h b/drivers/gpu/drm/amd/display/dc/dccg/dcn35/dcn35_dccg.h index 51f98c5c51c4..7b9c36456cd9 100644 --- a/drivers/gpu/drm/amd/display/dc/dccg/dcn35/dcn35_dccg.h +++ b/drivers/gpu/drm/amd/display/dc/dccg/dcn35/dcn35_dccg.h @@ -41,8 +41,9 @@ SR(SYMCLKA_CLOCK_ENABLE),\ SR(SYMCLKB_CLOCK_ENABLE),\ SR(SYMCLKC_CLOCK_ENABLE),\ - SR(SYMCLKD_CLOCK_ENABLE),\ - SR(SYMCLKE_CLOCK_ENABLE) + SR(SYMCLKD_CLOCK_ENABLE), \ + SR(SYMCLKE_CLOCK_ENABLE),\ + SR(SYMCLK_PSP_CNTL) #define DCCG_MASK_SH_LIST_DCN35(mask_sh) \ DCCG_SFI(DPPCLK_DTO_CTRL, DTO_DB_EN, DPPCLK, 0, mask_sh),\ @@ -231,6 +232,14 @@ DCCG_SF(DCCG_GATE_DISABLE_CNTL5, DPSTREAMCLK1_GATE_DISABLE, mask_sh),\ DCCG_SF(DCCG_GATE_DISABLE_CNTL5, DPSTREAMCLK2_GATE_DISABLE, mask_sh),\ DCCG_SF(DCCG_GATE_DISABLE_CNTL5, DPSTREAMCLK3_GATE_DISABLE, mask_sh),\ + DCCG_SF(DISPCLK_FREQ_CHANGE_CNTL, DISPCLK_STEP_DELAY, mask_sh),\ + DCCG_SF(DISPCLK_FREQ_CHANGE_CNTL, DISPCLK_STEP_SIZE, mask_sh),\ + DCCG_SF(DISPCLK_FREQ_CHANGE_CNTL, DISPCLK_FREQ_RAMP_DONE, mask_sh),\ + DCCG_SF(DISPCLK_FREQ_CHANGE_CNTL, DISPCLK_MAX_ERRDET_CYCLES, mask_sh),\ + DCCG_SF(DISPCLK_FREQ_CHANGE_CNTL, DCCG_FIFO_ERRDET_RESET, mask_sh),\ + DCCG_SF(DISPCLK_FREQ_CHANGE_CNTL, DCCG_FIFO_ERRDET_STATE, mask_sh),\ + DCCG_SF(DISPCLK_FREQ_CHANGE_CNTL, DCCG_FIFO_ERRDET_OVR_EN, mask_sh),\ + DCCG_SF(DISPCLK_FREQ_CHANGE_CNTL, DISPCLK_CHG_FWD_CORR_DISABLE, mask_sh),\ struct dccg *dccg35_create( struct dc_context *ctx, diff --git a/drivers/gpu/drm/amd/display/dc/dccg/dcn401/dcn401_dccg.c b/drivers/gpu/drm/amd/display/dc/dccg/dcn401/dcn401_dccg.c index 0b8ed9b94d3c..663a18ee5162 100644 --- a/drivers/gpu/drm/amd/display/dc/dccg/dcn401/dcn401_dccg.c +++ b/drivers/gpu/drm/amd/display/dc/dccg/dcn401/dcn401_dccg.c @@ -886,6 +886,7 @@ static const struct dccg_funcs dccg401_funcs = { .enable_symclk_se = dccg401_enable_symclk_se, .disable_symclk_se = dccg401_disable_symclk_se, .set_dtbclk_p_src = dccg401_set_dtbclk_p_src, + .dccg_read_reg_state = dccg31_read_reg_state }; struct dccg *dccg401_create( diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_abm.c b/drivers/gpu/drm/amd/display/dc/dce/dce_abm.c index a6006776333d..2dcf394edf22 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_abm.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_abm.c @@ -283,7 +283,7 @@ struct abm *dce_abm_create( const struct dce_abm_shift *abm_shift, const struct dce_abm_mask *abm_mask) { - struct dce_abm *abm_dce = kzalloc(sizeof(*abm_dce), GFP_ATOMIC); + struct dce_abm *abm_dce = kzalloc(sizeof(*abm_dce), GFP_KERNEL); if (abm_dce == NULL) { BREAK_TO_DEBUGGER(); diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_audio.c b/drivers/gpu/drm/amd/display/dc/dce/dce_audio.c index eeed840073fe..fcad61c618a1 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_audio.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_audio.c @@ -1143,7 +1143,8 @@ void dce_aud_wall_dto_setup( REG_UPDATE(DCCG_AUDIO_DTO1_PHASE, DCCG_AUDIO_DTO1_PHASE, clock_info.audio_dto_phase); - REG_UPDATE(DCCG_AUDIO_DTO_SOURCE, + if (aud->masks->DCCG_AUDIO_DTO2_USE_512FBR_DTO) + REG_UPDATE(DCCG_AUDIO_DTO_SOURCE, DCCG_AUDIO_DTO2_USE_512FBR_DTO, 1); } diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c b/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c index a8e79104b684..5f8fba45d98d 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c @@ -1126,7 +1126,7 @@ struct dmcu *dcn10_dmcu_create( const struct dce_dmcu_shift *dmcu_shift, const struct dce_dmcu_mask *dmcu_mask) { - struct dce_dmcu *dmcu_dce = kzalloc(sizeof(*dmcu_dce), GFP_ATOMIC); + struct dce_dmcu *dmcu_dce = kzalloc(sizeof(*dmcu_dce), GFP_KERNEL); if (dmcu_dce == NULL) { BREAK_TO_DEBUGGER(); @@ -1147,7 +1147,7 @@ struct dmcu *dcn20_dmcu_create( const struct dce_dmcu_shift *dmcu_shift, const struct dce_dmcu_mask *dmcu_mask) { - struct dce_dmcu *dmcu_dce = kzalloc(sizeof(*dmcu_dce), GFP_ATOMIC); + struct dce_dmcu *dmcu_dce = kzalloc(sizeof(*dmcu_dce), GFP_KERNEL); if (dmcu_dce == NULL) { BREAK_TO_DEBUGGER(); @@ -1168,7 +1168,7 @@ struct dmcu *dcn21_dmcu_create( const struct dce_dmcu_shift *dmcu_shift, const struct dce_dmcu_mask *dmcu_mask) { - struct dce_dmcu *dmcu_dce = kzalloc(sizeof(*dmcu_dce), GFP_ATOMIC); + struct dce_dmcu *dmcu_dce = kzalloc(sizeof(*dmcu_dce), GFP_KERNEL); if (dmcu_dce == NULL) { BREAK_TO_DEBUGGER(); diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c index 0c50fe266c8a..87dbb8d7ed27 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c @@ -302,6 +302,10 @@ static void setup_panel_mode( if (ctx->dc->caps.psp_setup_panel_mode) return; + /* The code below is only applicable to encoders with a digital transmitter. */ + if (enc110->base.transmitter == TRANSMITTER_UNKNOWN) + return; + ASSERT(REG(DP_DPHY_INTERNAL_CTRL)); value = REG_READ(DP_DPHY_INTERNAL_CTRL); @@ -804,6 +808,33 @@ bool dce110_link_encoder_validate_dp_output( return true; } +static bool dce110_link_encoder_validate_rgb_output( + const struct dce110_link_encoder *enc110, + const struct dc_crtc_timing *crtc_timing) +{ + /* When the VBIOS doesn't specify any limits, use 400 MHz. + * The value comes from amdgpu_atombios_get_clock_info. + */ + uint32_t max_pixel_clock_khz = 400000; + + if (enc110->base.ctx->dc_bios->fw_info_valid && + enc110->base.ctx->dc_bios->fw_info.max_pixel_clock) { + max_pixel_clock_khz = + enc110->base.ctx->dc_bios->fw_info.max_pixel_clock; + } + + if (crtc_timing->pix_clk_100hz > max_pixel_clock_khz * 10) + return false; + + if (crtc_timing->display_color_depth != COLOR_DEPTH_888) + return false; + + if (crtc_timing->pixel_encoding != PIXEL_ENCODING_RGB) + return false; + + return true; +} + void dce110_link_encoder_construct( struct dce110_link_encoder *enc110, const struct encoder_init_data *init_data, @@ -824,6 +855,7 @@ void dce110_link_encoder_construct( enc110->base.connector = init_data->connector; enc110->base.preferred_engine = ENGINE_ID_UNKNOWN; + enc110->base.analog_engine = init_data->analog_engine; enc110->base.features = *enc_features; @@ -847,6 +879,11 @@ void dce110_link_encoder_construct( SIGNAL_TYPE_EDP | SIGNAL_TYPE_HDMI_TYPE_A; + if ((enc110->base.connector.id == CONNECTOR_ID_DUAL_LINK_DVII || + enc110->base.connector.id == CONNECTOR_ID_SINGLE_LINK_DVII) && + enc110->base.analog_engine != ENGINE_ID_UNKNOWN) + enc110->base.output_signals |= SIGNAL_TYPE_RGB; + /* For DCE 8.0 and 8.1, by design, UNIPHY is hardwired to DIG_BE. * SW always assign DIG_FE 1:1 mapped to DIG_FE for non-MST UNIPHY. * SW assign DIG_FE to non-MST UNIPHY first and MST last. So prefer @@ -885,6 +922,13 @@ void dce110_link_encoder_construct( enc110->base.preferred_engine = ENGINE_ID_DIGG; break; default: + if (init_data->analog_engine != ENGINE_ID_UNKNOWN) { + /* The connector is analog-only, ie. VGA */ + enc110->base.preferred_engine = init_data->analog_engine; + enc110->base.output_signals = SIGNAL_TYPE_RGB; + enc110->base.transmitter = TRANSMITTER_UNKNOWN; + break; + } ASSERT_CRITICAL(false); enc110->base.preferred_engine = ENGINE_ID_UNKNOWN; } @@ -939,6 +983,10 @@ bool dce110_link_encoder_validate_output_with_stream( is_valid = dce110_link_encoder_validate_dp_output( enc110, &stream->timing); break; + case SIGNAL_TYPE_RGB: + is_valid = dce110_link_encoder_validate_rgb_output( + enc110, &stream->timing); + break; case SIGNAL_TYPE_EDP: case SIGNAL_TYPE_LVDS: is_valid = stream->timing.pixel_encoding == PIXEL_ENCODING_RGB; @@ -969,6 +1017,10 @@ void dce110_link_encoder_hw_init( cntl.coherent = false; cntl.hpd_sel = enc110->base.hpd_source; + /* The code below is only applicable to encoders with a digital transmitter. */ + if (enc110->base.transmitter == TRANSMITTER_UNKNOWN) + return; + if (enc110->base.connector.id == CONNECTOR_ID_EDP) cntl.signal = SIGNAL_TYPE_EDP; @@ -1034,6 +1086,8 @@ void dce110_link_encoder_setup( /* DP MST */ REG_UPDATE(DIG_BE_CNTL, DIG_MODE, 5); break; + case SIGNAL_TYPE_RGB: + break; default: ASSERT_CRITICAL(false); /* invalid mode ! */ @@ -1282,6 +1336,24 @@ void dce110_link_encoder_disable_output( struct bp_transmitter_control cntl = { 0 }; enum bp_result result; + switch (enc->analog_engine) { + case ENGINE_ID_DACA: + REG_UPDATE(DAC_ENABLE, DAC_ENABLE, 0); + break; + case ENGINE_ID_DACB: + /* DACB doesn't seem to be present on DCE6+, + * although there are references to it in the register file. + */ + DC_LOG_ERROR("%s DACB is unsupported\n", __func__); + break; + default: + break; + } + + /* The code below only applies to connectors that support digital signals. */ + if (enc->transmitter == TRANSMITTER_UNKNOWN) + return; + if (!dce110_is_dig_enabled(enc)) { /* OF_SKIP_POWER_DOWN_INACTIVE_ENCODER */ return; @@ -1726,6 +1798,7 @@ void dce60_link_encoder_construct( enc110->base.connector = init_data->connector; enc110->base.preferred_engine = ENGINE_ID_UNKNOWN; + enc110->base.analog_engine = init_data->analog_engine; enc110->base.features = *enc_features; @@ -1749,6 +1822,11 @@ void dce60_link_encoder_construct( SIGNAL_TYPE_EDP | SIGNAL_TYPE_HDMI_TYPE_A; + if ((enc110->base.connector.id == CONNECTOR_ID_DUAL_LINK_DVII || + enc110->base.connector.id == CONNECTOR_ID_SINGLE_LINK_DVII) && + enc110->base.analog_engine != ENGINE_ID_UNKNOWN) + enc110->base.output_signals |= SIGNAL_TYPE_RGB; + /* For DCE 8.0 and 8.1, by design, UNIPHY is hardwired to DIG_BE. * SW always assign DIG_FE 1:1 mapped to DIG_FE for non-MST UNIPHY. * SW assign DIG_FE to non-MST UNIPHY first and MST last. So prefer @@ -1787,6 +1865,13 @@ void dce60_link_encoder_construct( enc110->base.preferred_engine = ENGINE_ID_DIGG; break; default: + if (init_data->analog_engine != ENGINE_ID_UNKNOWN) { + /* The connector is analog-only, ie. VGA */ + enc110->base.preferred_engine = init_data->analog_engine; + enc110->base.output_signals = SIGNAL_TYPE_RGB; + enc110->base.transmitter = TRANSMITTER_UNKNOWN; + break; + } ASSERT_CRITICAL(false); enc110->base.preferred_engine = ENGINE_ID_UNKNOWN; } diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.h b/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.h index 261c70e01e33..c58b69bc319b 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.h +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.h @@ -101,18 +101,21 @@ SRI(DP_SEC_CNTL, DP, id), \ SRI(DP_VID_STREAM_CNTL, DP, id), \ SRI(DP_DPHY_FAST_TRAINING, DP, id), \ - SRI(DP_SEC_CNTL1, DP, id) + SRI(DP_SEC_CNTL1, DP, id), \ + SR(DAC_ENABLE) #endif #define LE_DCE80_REG_LIST(id)\ SRI(DP_DPHY_INTERNAL_CTRL, DP, id), \ - LE_COMMON_REG_LIST_BASE(id) + LE_COMMON_REG_LIST_BASE(id), \ + SR(DAC_ENABLE) #define LE_DCE100_REG_LIST(id)\ LE_COMMON_REG_LIST_BASE(id), \ SRI(DP_DPHY_BS_SR_SWAP_CNTL, DP, id), \ SRI(DP_DPHY_INTERNAL_CTRL, DP, id), \ - SR(DCI_MEM_PWR_STATUS) + SR(DCI_MEM_PWR_STATUS), \ + SR(DAC_ENABLE) #define LE_DCE110_REG_LIST(id)\ LE_COMMON_REG_LIST_BASE(id), \ @@ -181,6 +184,9 @@ struct dce110_link_enc_registers { uint32_t DP_DPHY_BS_SR_SWAP_CNTL; uint32_t DP_DPHY_HBR2_PATTERN_CONTROL; uint32_t DP_SEC_CNTL1; + + /* DAC registers */ + uint32_t DAC_ENABLE; }; struct dce110_link_encoder { @@ -215,10 +221,6 @@ bool dce110_link_encoder_validate_dvi_output( enum signal_type signal, const struct dc_crtc_timing *crtc_timing); -bool dce110_link_encoder_validate_rgb_output( - const struct dce110_link_encoder *enc110, - const struct dc_crtc_timing *crtc_timing); - bool dce110_link_encoder_validate_dp_output( const struct dce110_link_encoder *enc110, const struct dc_crtc_timing *crtc_timing); diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c index 1130d7619b26..574618d5d4a4 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c @@ -1567,3 +1567,17 @@ void dce110_stream_encoder_construct( enc110->se_shift = se_shift; enc110->se_mask = se_mask; } + +static const struct stream_encoder_funcs dce110_an_str_enc_funcs = {}; + +void dce110_analog_stream_encoder_construct( + struct dce110_stream_encoder *enc110, + struct dc_context *ctx, + struct dc_bios *bp, + enum engine_id eng_id) +{ + enc110->base.funcs = &dce110_an_str_enc_funcs; + enc110->base.ctx = ctx; + enc110->base.id = eng_id; + enc110->base.bp = bp; +} diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.h b/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.h index cc5020a8e1e1..068de1392121 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.h +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.h @@ -708,6 +708,11 @@ void dce110_stream_encoder_construct( const struct dce_stream_encoder_shift *se_shift, const struct dce_stream_encoder_mask *se_mask); +void dce110_analog_stream_encoder_construct( + struct dce110_stream_encoder *enc110, + struct dc_context *ctx, + struct dc_bios *bp, + enum engine_id eng_id); void dce110_se_audio_mute_control( struct stream_encoder *enc, bool mute); diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_hw_lock_mgr.c b/drivers/gpu/drm/amd/display/dc/dce/dmub_hw_lock_mgr.c index d37ecfdde4f1..5bfa2b0d2afd 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dmub_hw_lock_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_hw_lock_mgr.c @@ -61,10 +61,9 @@ void dmub_hw_lock_mgr_inbox0_cmd(struct dc_dmub_srv *dmub_srv, dc_dmub_srv_wait_for_inbox0_ack(dmub_srv); } -bool should_use_dmub_lock(struct dc_link *link) +bool dmub_hw_lock_mgr_does_link_require_lock(const struct dc *dc, const struct dc_link *link) { - /* ASIC doesn't support DMUB */ - if (!link->ctx->dmub_srv) + if (!link) return false; if (link->psr_settings.psr_version == DC_PSR_VERSION_SU_1) @@ -73,16 +72,38 @@ bool should_use_dmub_lock(struct dc_link *link) if (link->replay_settings.replay_feature_enabled) return true; - /* only use HW lock for PSR1 on single eDP */ if (link->psr_settings.psr_version == DC_PSR_VERSION_1) { struct dc_link *edp_links[MAX_NUM_EDP]; int edp_num; - dc_get_edp_links(link->dc, edp_links, &edp_num); - + dc_get_edp_links(dc, edp_links, &edp_num); if (edp_num == 1) return true; } - return false; } + +bool dmub_hw_lock_mgr_does_context_require_lock(const struct dc *dc, const struct dc_state *context) +{ + if (!context) + return false; + for (int i = 0; i < context->stream_count; i++) { + const struct dc_link *link = context->streams[i]->link; + + if (dmub_hw_lock_mgr_does_link_require_lock(dc, link)) + return true; + } + return false; +} + +bool should_use_dmub_inbox1_lock(const struct dc *dc, const struct dc_link *link) +{ + /* ASIC doesn't support DMUB */ + if (!dc->ctx->dmub_srv) + return false; + + if (dc->ctx->dce_version >= DCN_VERSION_4_01) + return false; + + return dmub_hw_lock_mgr_does_link_require_lock(dc, link); +} diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_hw_lock_mgr.h b/drivers/gpu/drm/amd/display/dc/dce/dmub_hw_lock_mgr.h index 5a72b168fb4a..4c80ca8484ad 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dmub_hw_lock_mgr.h +++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_hw_lock_mgr.h @@ -37,6 +37,16 @@ void dmub_hw_lock_mgr_cmd(struct dc_dmub_srv *dmub_srv, void dmub_hw_lock_mgr_inbox0_cmd(struct dc_dmub_srv *dmub_srv, union dmub_inbox0_cmd_lock_hw hw_lock_cmd); -bool should_use_dmub_lock(struct dc_link *link); +/** + * should_use_dmub_inbox1_lock() - Checks if the DMCUB hardware lock via inbox1 should be used. + * + * @dc: pointer to DC object + * @link: optional pointer to the link object to check for enabled link features + * + * Return: true if the inbox1 lock should be used, false otherwise + */ +bool should_use_dmub_inbox1_lock(const struct dc *dc, const struct dc_link *link); +bool dmub_hw_lock_mgr_does_link_require_lock(const struct dc *dc, const struct dc_link *link); +bool dmub_hw_lock_mgr_does_context_require_lock(const struct dc *dc, const struct dc_state *context); #endif /*_DMUB_HW_LOCK_MGR_H_ */ diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_replay.c b/drivers/gpu/drm/amd/display/dc/dce/dmub_replay.c index f9542edff14b..cf1372aaff6c 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dmub_replay.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_replay.c @@ -213,7 +213,8 @@ static bool dmub_replay_copy_settings(struct dmub_replay *dmub, */ static void dmub_replay_set_coasting_vtotal(struct dmub_replay *dmub, uint32_t coasting_vtotal, - uint8_t panel_inst) + uint8_t panel_inst, + uint16_t frame_skip_number) { union dmub_rb_cmd cmd; struct dc_context *dc = dmub->ctx; @@ -227,6 +228,7 @@ static void dmub_replay_set_coasting_vtotal(struct dmub_replay *dmub, pCmd->header.payload_bytes = sizeof(struct dmub_cmd_replay_set_coasting_vtotal_data); pCmd->replay_set_coasting_vtotal_data.coasting_vtotal = (coasting_vtotal & 0xFFFF); pCmd->replay_set_coasting_vtotal_data.coasting_vtotal_high = (coasting_vtotal & 0xFFFF0000) >> 16; + pCmd->replay_set_coasting_vtotal_data.frame_skip_number = frame_skip_number; dc_wake_and_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT); } @@ -283,7 +285,7 @@ static void dmub_replay_residency(struct dmub_replay *dmub, uint8_t panel_inst, * Set REPLAY power optimization flags and coasting vtotal. */ static void dmub_replay_set_power_opt_and_coasting_vtotal(struct dmub_replay *dmub, - unsigned int power_opt, uint8_t panel_inst, uint32_t coasting_vtotal) + unsigned int power_opt, uint8_t panel_inst, uint32_t coasting_vtotal, uint16_t frame_skip_number) { union dmub_rb_cmd cmd; struct dc_context *dc = dmub->ctx; @@ -301,6 +303,7 @@ static void dmub_replay_set_power_opt_and_coasting_vtotal(struct dmub_replay *dm pCmd->replay_set_power_opt_data.panel_inst = panel_inst; pCmd->replay_set_coasting_vtotal_data.coasting_vtotal = (coasting_vtotal & 0xFFFF); pCmd->replay_set_coasting_vtotal_data.coasting_vtotal_high = (coasting_vtotal & 0xFFFF0000) >> 16; + pCmd->replay_set_coasting_vtotal_data.frame_skip_number = frame_skip_number; dc_wake_and_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT); } @@ -384,6 +387,19 @@ static void dmub_replay_send_cmd(struct dmub_replay *dmub, cmd.replay_disabled_adaptive_sync_sdp.data.force_disabled = cmd_element->disabled_adaptive_sync_sdp_data.force_disabled; break; + case Replay_Set_Version: + //Header + cmd.replay_set_version.header.sub_type = + DMUB_CMD__REPLAY_SET_VERSION; + cmd.replay_set_version.header.payload_bytes = + sizeof(struct dmub_rb_cmd_replay_set_version) - + sizeof(struct dmub_cmd_header); + //Cmd Body + cmd.replay_set_version.replay_set_version_data.panel_inst = + cmd_element->version_data.panel_inst; + cmd.replay_set_version.replay_set_version_data.version = + cmd_element->version_data.version; + break; case Replay_Set_General_Cmd: //Header cmd.replay_set_general_cmd.header.sub_type = diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_replay.h b/drivers/gpu/drm/amd/display/dc/dce/dmub_replay.h index e6346c0ffc0e..07c79739a980 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dmub_replay.h +++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_replay.h @@ -27,11 +27,12 @@ struct dmub_replay_funcs { void (*replay_send_cmd)(struct dmub_replay *dmub, enum replay_FW_Message_type msg, union dmub_replay_cmd_set *cmd_element); void (*replay_set_coasting_vtotal)(struct dmub_replay *dmub, uint32_t coasting_vtotal, - uint8_t panel_inst); + uint8_t panel_inst, uint16_t frame_skip_number); void (*replay_residency)(struct dmub_replay *dmub, uint8_t panel_inst, uint32_t *residency, const bool is_start, const enum pr_residency_mode mode); void (*replay_set_power_opt_and_coasting_vtotal)(struct dmub_replay *dmub, - unsigned int power_opt, uint8_t panel_inst, uint32_t coasting_vtotal); + unsigned int power_opt, uint8_t panel_inst, uint32_t coasting_vtotal, + uint16_t frame_skip_number); }; struct dmub_replay *dmub_replay_create(struct dc_context *ctx); diff --git a/drivers/gpu/drm/amd/display/dc/dml2/Makefile b/drivers/gpu/drm/amd/display/dc/dml2/Makefile deleted file mode 100644 index 4c21ce42054c..000000000000 --- a/drivers/gpu/drm/amd/display/dc/dml2/Makefile +++ /dev/null @@ -1,141 +0,0 @@ -# SPDX-License-Identifier: MIT */ -# -# Copyright 2023 Advanced Micro Devices, Inc. -# -# Permission is hereby granted, free of charge, to any person obtaining a -# copy of this software and associated documentation files (the "Software"), -# to deal in the Software without restriction, including without limitation -# the rights to use, copy, modify, merge, publish, distribute, sublicense, -# and/or sell copies of the Software, and to permit persons to whom the -# Software is furnished to do so, subject to the following conditions: -# -# The above copyright notice and this permission notice shall be included in -# all copies or substantial portions of the Software. -# -# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL -# THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR -# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, -# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR -# OTHER DEALINGS IN THE SOFTWARE. -# -# Authors: AMD -# -# Makefile for dml2. - -dml2_ccflags := $(CC_FLAGS_FPU) -dml2_rcflags := $(CC_FLAGS_NO_FPU) - -ifneq ($(CONFIG_FRAME_WARN),0) - ifeq ($(filter y,$(CONFIG_KASAN)$(CONFIG_KCSAN)),y) - ifeq ($(CONFIG_CC_IS_CLANG)$(CONFIG_COMPILE_TEST),yy) - frame_warn_limit := 4096 - else - frame_warn_limit := 3072 - endif - else - frame_warn_limit := 2048 - endif - - ifeq ($(call test-lt, $(CONFIG_FRAME_WARN), $(frame_warn_limit)),y) - frame_warn_flag := -Wframe-larger-than=$(frame_warn_limit) - endif -endif - -subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/dml2 -subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/dml2/dml21/src/dml2_core -subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/dml2/dml21/src/dml2_mcg/ -subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/dml2/dml21/src/dml2_dpmm/ -subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/dml2/dml21/src/dml2_pmo/ -subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/dml2/dml21/src/dml2_standalone_libraries/ -subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/dml2/dml21/src/inc -subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/dml2/dml21/inc -subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/dml2/dml21/ - -CFLAGS_$(AMDDALPATH)/dc/dml2/display_mode_core.o := $(dml2_ccflags) $(frame_warn_flag) -CFLAGS_$(AMDDALPATH)/dc/dml2/display_mode_util.o := $(dml2_ccflags) -CFLAGS_$(AMDDALPATH)/dc/dml2/dml2_wrapper.o := $(dml2_ccflags) -CFLAGS_$(AMDDALPATH)/dc/dml2/dml2_utils.o := $(dml2_ccflags) -CFLAGS_$(AMDDALPATH)/dc/dml2/dml2_policy.o := $(dml2_ccflags) -CFLAGS_$(AMDDALPATH)/dc/dml2/dml2_translation_helper.o := $(dml2_ccflags) -CFLAGS_$(AMDDALPATH)/dc/dml2/dml2_mall_phantom.o := $(dml2_ccflags) -CFLAGS_$(AMDDALPATH)/dc/dml2/dml_display_rq_dlg_calc.o := $(dml2_ccflags) -CFLAGS_$(AMDDALPATH)/dc/dml2/dml2_dc_resource_mgmt.o := $(dml2_ccflags) - -CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/display_mode_core.o := $(dml2_rcflags) -CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/display_mode_util.o := $(dml2_rcflags) -CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml2_wrapper.o := $(dml2_rcflags) -CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml2_utils.o := $(dml2_rcflags) -CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml2_policy.o := $(dml2_rcflags) -CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml2_translation_helper.o := $(dml2_rcflags) -CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml2_mall_phantom.o := $(dml2_rcflags) -CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml_display_rq_dlg_calc.o := $(dml2_rcflags) -CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml2_dc_resource_mgmt.o := $(dml2_rcflags) - -DML2 = display_mode_core.o display_mode_util.o dml2_wrapper.o \ - dml2_utils.o dml2_policy.o dml2_translation_helper.o dml2_dc_resource_mgmt.o dml2_mall_phantom.o \ - dml_display_rq_dlg_calc.o - -AMD_DAL_DML2 = $(addprefix $(AMDDALPATH)/dc/dml2/,$(DML2)) - -AMD_DISPLAY_FILES += $(AMD_DAL_DML2) - -CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4.o := $(dml2_ccflags) -CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4_calcs.o := $(dml2_ccflags) $(frame_warn_flag) -CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_core/dml2_core_utils.o := $(dml2_ccflags) $(frame_warn_flag) -CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_top/dml2_top_interfaces.o := $(dml2_ccflags) -CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_top/dml2_top_soc15.o := $(dml2_ccflags) -CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_core/dml2_core_factory.o := $(dml2_ccflags) -CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_dpmm/dml2_dpmm_dcn4.o := $(dml2_ccflags) -CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_dpmm/dml2_dpmm_factory.o := $(dml2_ccflags) -CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_mcg/dml2_mcg_dcn4.o := $(dml2_ccflags) -CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_mcg/dml2_mcg_factory.o := $(dml2_ccflags) -CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn3.o := $(dml2_ccflags) -CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn4_fams2.o := $(dml2_ccflags) -CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_factory.o := $(dml2_ccflags) -CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_standalone_libraries/lib_float_math.o := $(dml2_ccflags) -CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/src/dml21_wrapper.o := $(dml2_ccflags) -CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/dml21_translation_helper.o := $(dml2_ccflags) -CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/dml21_utils.o := $(dml2_ccflags) - -CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4.o := $(dml2_rcflags) -CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4_calcs.o := $(dml2_rcflags) -CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_core/dml2_core_factory.o := $(dml2_rcflags) -CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_core/dml2_core_utils.o := $(dml2_rcflags) -CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_top/dml2_top_interfaces.o := $(dml2_rcflags) -CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_top/dml2_top_soc15.o := $(dml2_rcflags) -CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_dpmm/dml2_dpmm_dcn4.o := $(dml2_rcflags) -CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_dpmm/dml2_dpmm_factory.o := $(dml2_rcflags) -CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_mcg/dml2_mcg_dcn4.o := $(dml2_rcflags) -CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_mcg/dml2_mcg_factory.o := $(dml2_rcflags) -CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn3.o := $(dml2_rcflags) -CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn4_fams2.o := $(dml2_rcflags) -CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_factory.o := $(dml2_rcflags) -CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_standalone_libraries/lib_float_math.o := $(dml2_rcflags) -CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/src/dml21_wrapper.o := $(dml2_rcflags) -CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/dml21_translation_helper.o := $(dml2_rcflags) -CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/dml21_utils.o := $(dml2_rcflags) - -DML21 := src/dml2_top/dml2_top_interfaces.o -DML21 += src/dml2_top/dml2_top_soc15.o -DML21 += src/dml2_core/dml2_core_dcn4.o -DML21 += src/dml2_core/dml2_core_utils.o -DML21 += src/dml2_core/dml2_core_factory.o -DML21 += src/dml2_core/dml2_core_dcn4_calcs.o -DML21 += src/dml2_dpmm/dml2_dpmm_dcn4.o -DML21 += src/dml2_dpmm/dml2_dpmm_factory.o -DML21 += src/dml2_mcg/dml2_mcg_dcn4.o -DML21 += src/dml2_mcg/dml2_mcg_factory.o -DML21 += src/dml2_pmo/dml2_pmo_dcn3.o -DML21 += src/dml2_pmo/dml2_pmo_factory.o -DML21 += src/dml2_pmo/dml2_pmo_dcn4_fams2.o -DML21 += src/dml2_standalone_libraries/lib_float_math.o -DML21 += dml21_translation_helper.o -DML21 += dml21_wrapper.o -DML21 += dml21_utils.o - -AMD_DAL_DML21 = $(addprefix $(AMDDALPATH)/dc/dml2/dml21/,$(DML21)) - -AMD_DISPLAY_FILES += $(AMD_DAL_DML21) - diff --git a/drivers/gpu/drm/amd/display/dc/dml2_0/Makefile b/drivers/gpu/drm/amd/display/dc/dml2_0/Makefile new file mode 100644 index 000000000000..97e068b6bf6b --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/dml2_0/Makefile @@ -0,0 +1,140 @@ +# SPDX-License-Identifier: MIT */ +# +# Copyright 2023 Advanced Micro Devices, Inc. +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the "Software"), +# to deal in the Software without restriction, including without limitation +# the rights to use, copy, modify, merge, publish, distribute, sublicense, +# and/or sell copies of the Software, and to permit persons to whom the +# Software is furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL +# THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR +# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, +# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR +# OTHER DEALINGS IN THE SOFTWARE. +# +# Authors: AMD +# +# Makefile for dml2. + +dml2_ccflags := $(CC_FLAGS_FPU) +dml2_rcflags := $(CC_FLAGS_NO_FPU) + +ifneq ($(CONFIG_FRAME_WARN),0) + ifeq ($(filter y,$(CONFIG_KASAN)$(CONFIG_KCSAN)),y) + ifeq ($(CONFIG_CC_IS_CLANG)$(CONFIG_COMPILE_TEST),yy) + frame_warn_limit := 4096 + else + frame_warn_limit := 3072 + endif + else + frame_warn_limit := 2056 + endif + + ifeq ($(call test-lt, $(CONFIG_FRAME_WARN), $(frame_warn_limit)),y) + frame_warn_flag := -Wframe-larger-than=$(frame_warn_limit) + endif +endif + +subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/dml2_0 +subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/dml2_0/dml21/src/dml2_core +subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/dml2_0/dml21/src/dml2_mcg/ +subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/dml2_0/dml21/src/dml2_dpmm/ +subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/dml2_0/dml21/src/dml2_pmo/ +subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/dml2_0/dml21/src/dml2_standalone_libraries/ +subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/dml2_0/dml21/src/inc +subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/dml2_0/dml21/inc +subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/dml2_0/dml21/ + +CFLAGS_$(AMDDALPATH)/dc/dml2_0/display_mode_core.o := $(dml2_ccflags) $(frame_warn_flag) +CFLAGS_$(AMDDALPATH)/dc/dml2_0/display_mode_util.o := $(dml2_ccflags) +CFLAGS_$(AMDDALPATH)/dc/dml2_0/dml2_wrapper.o := $(dml2_ccflags) +CFLAGS_$(AMDDALPATH)/dc/dml2_0/dml2_utils.o := $(dml2_ccflags) +CFLAGS_$(AMDDALPATH)/dc/dml2_0/dml2_policy.o := $(dml2_ccflags) +CFLAGS_$(AMDDALPATH)/dc/dml2_0/dml2_translation_helper.o := $(dml2_ccflags) +CFLAGS_$(AMDDALPATH)/dc/dml2_0/dml2_mall_phantom.o := $(dml2_ccflags) +CFLAGS_$(AMDDALPATH)/dc/dml2_0/dml_display_rq_dlg_calc.o := $(dml2_ccflags) +CFLAGS_$(AMDDALPATH)/dc/dml2_0/dml2_dc_resource_mgmt.o := $(dml2_ccflags) + +CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2_0/display_mode_core.o := $(dml2_rcflags) +CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2_0/display_mode_util.o := $(dml2_rcflags) +CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2_0/dml2_wrapper.o := $(dml2_rcflags) +CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2_0/dml2_utils.o := $(dml2_rcflags) +CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2_0/dml2_policy.o := $(dml2_rcflags) +CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2_0/dml2_translation_helper.o := $(dml2_rcflags) +CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2_0/dml2_mall_phantom.o := $(dml2_rcflags) +CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2_0/dml_display_rq_dlg_calc.o := $(dml2_rcflags) +CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2_0/dml2_dc_resource_mgmt.o := $(dml2_rcflags) + +DML2 = display_mode_core.o display_mode_util.o dml2_wrapper.o \ + dml2_utils.o dml2_policy.o dml2_translation_helper.o dml2_dc_resource_mgmt.o dml2_mall_phantom.o \ + dml_display_rq_dlg_calc.o + +AMD_DAL_DML2 = $(addprefix $(AMDDALPATH)/dc/dml2_0/,$(DML2)) + +AMD_DISPLAY_FILES += $(AMD_DAL_DML2) + +CFLAGS_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_core/dml2_core_dcn4.o := $(dml2_ccflags) +CFLAGS_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_core/dml2_core_dcn4_calcs.o := $(dml2_ccflags) $(frame_warn_flag) +CFLAGS_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_core/dml2_core_utils.o := $(dml2_ccflags) $(frame_warn_flag) +CFLAGS_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_top/dml2_top_interfaces.o := $(dml2_ccflags) +CFLAGS_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_top/dml2_top_soc15.o := $(dml2_ccflags) +CFLAGS_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_core/dml2_core_factory.o := $(dml2_ccflags) +CFLAGS_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_dpmm/dml2_dpmm_dcn4.o := $(dml2_ccflags) +CFLAGS_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_dpmm/dml2_dpmm_factory.o := $(dml2_ccflags) +CFLAGS_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_mcg/dml2_mcg_dcn4.o := $(dml2_ccflags) +CFLAGS_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_mcg/dml2_mcg_factory.o := $(dml2_ccflags) +CFLAGS_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_pmo/dml2_pmo_dcn3.o := $(dml2_ccflags) +CFLAGS_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_pmo/dml2_pmo_dcn4_fams2.o := $(dml2_ccflags) +CFLAGS_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_pmo/dml2_pmo_factory.o := $(dml2_ccflags) +CFLAGS_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_standalone_libraries/lib_float_math.o := $(dml2_ccflags) +CFLAGS_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml21_wrapper.o := $(dml2_ccflags) +CFLAGS_$(AMDDALPATH)/dc/dml2_0/dml21/dml21_translation_helper.o := $(dml2_ccflags) +CFLAGS_$(AMDDALPATH)/dc/dml2_0/dml21/dml21_utils.o := $(dml2_ccflags) + +CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_core/dml2_core_dcn4.o := $(dml2_rcflags) +CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_core/dml2_core_dcn4_calcs.o := $(dml2_rcflags) +CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_core/dml2_core_factory.o := $(dml2_rcflags) +CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_core/dml2_core_utils.o := $(dml2_rcflags) +CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_top/dml2_top_interfaces.o := $(dml2_rcflags) +CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_top/dml2_top_soc15.o := $(dml2_rcflags) +CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_dpmm/dml2_dpmm_dcn4.o := $(dml2_rcflags) +CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_dpmm/dml2_dpmm_factory.o := $(dml2_rcflags) +CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_mcg/dml2_mcg_dcn4.o := $(dml2_rcflags) +CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_mcg/dml2_mcg_factory.o := $(dml2_rcflags) +CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_pmo/dml2_pmo_dcn3.o := $(dml2_rcflags) +CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_pmo/dml2_pmo_dcn4_fams2.o := $(dml2_rcflags) +CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_pmo/dml2_pmo_factory.o := $(dml2_rcflags) +CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_standalone_libraries/lib_float_math.o := $(dml2_rcflags) +CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml21_wrapper.o := $(dml2_rcflags) +CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2_0/dml21/dml21_translation_helper.o := $(dml2_rcflags) +CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2_0/dml21/dml21_utils.o := $(dml2_rcflags) + +DML21 := src/dml2_top/dml2_top_interfaces.o +DML21 += src/dml2_top/dml2_top_soc15.o +DML21 += src/dml2_core/dml2_core_dcn4.o +DML21 += src/dml2_core/dml2_core_utils.o +DML21 += src/dml2_core/dml2_core_factory.o +DML21 += src/dml2_core/dml2_core_dcn4_calcs.o +DML21 += src/dml2_dpmm/dml2_dpmm_dcn4.o +DML21 += src/dml2_dpmm/dml2_dpmm_factory.o +DML21 += src/dml2_mcg/dml2_mcg_dcn4.o +DML21 += src/dml2_mcg/dml2_mcg_factory.o +DML21 += src/dml2_pmo/dml2_pmo_dcn3.o +DML21 += src/dml2_pmo/dml2_pmo_factory.o +DML21 += src/dml2_pmo/dml2_pmo_dcn4_fams2.o +DML21 += src/dml2_standalone_libraries/lib_float_math.o +DML21 += dml21_translation_helper.o +DML21 += dml21_wrapper.o +DML21 += dml21_utils.o + +AMD_DAL_DML21 = $(addprefix $(AMDDALPATH)/dc/dml2_0/dml21/,$(DML21)) + +AMD_DISPLAY_FILES += $(AMD_DAL_DML21) diff --git a/drivers/gpu/drm/amd/display/dc/dml2/cmntypes.h b/drivers/gpu/drm/amd/display/dc/dml2_0/cmntypes.h similarity index 93% rename from drivers/gpu/drm/amd/display/dc/dml2/cmntypes.h rename to drivers/gpu/drm/amd/display/dc/dml2_0/cmntypes.h index e450445bc05d..b954c9648fbe 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/cmntypes.h +++ b/drivers/gpu/drm/amd/display/dc/dml2_0/cmntypes.h @@ -53,17 +53,17 @@ typedef const void *const_pvoid; typedef const char *const_pchar; typedef struct rgba_struct { - uint8 a; - uint8 r; - uint8 g; - uint8 b; + uint8 a; + uint8 r; + uint8 g; + uint8 b; } rgba_t; typedef struct { - uint8 blue; - uint8 green; - uint8 red; - uint8 alpha; + uint8 blue; + uint8 green; + uint8 red; + uint8 alpha; } gen_color_t; typedef union { @@ -87,7 +87,7 @@ typedef union { } uintfloat64; #ifndef UNREFERENCED_PARAMETER -#define UNREFERENCED_PARAMETER(x) x = x +#define UNREFERENCED_PARAMETER(x) (x = x) #endif #endif diff --git a/drivers/gpu/drm/amd/display/dc/dml2/display_mode_core.c b/drivers/gpu/drm/amd/display/dc/dml2_0/display_mode_core.c similarity index 99% rename from drivers/gpu/drm/amd/display/dc/dml2/display_mode_core.c rename to drivers/gpu/drm/amd/display/dc/dml2_0/display_mode_core.c index 4b9b2e84d381..c468f492b876 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/display_mode_core.c +++ b/drivers/gpu/drm/amd/display/dc/dml2_0/display_mode_core.c @@ -10205,6 +10205,7 @@ dml_bool_t dml_get_is_phantom_pipe(struct display_mode_lib_st *mode_lib, dml_uin return (mode_lib->ms.cache_display_cfg.plane.UseMALLForPStateChange[plane_idx] == dml_use_mall_pstate_change_phantom_pipe); } + #define dml_get_per_surface_var_func(variable, type, interval_var) type dml_get_##variable(struct display_mode_lib_st *mode_lib, dml_uint_t surface_idx) \ { \ dml_uint_t plane_idx; \ @@ -10333,3 +10334,4 @@ dml_get_per_surface_var_func(bigk_fragment_size, dml_uint_t, mode_lib->mp.BIGK_F dml_get_per_surface_var_func(dpte_bytes_per_row, dml_uint_t, mode_lib->mp.PixelPTEBytesPerRow); dml_get_per_surface_var_func(meta_bytes_per_row, dml_uint_t, mode_lib->mp.MetaRowByte); dml_get_per_surface_var_func(det_buffer_size_kbytes, dml_uint_t, mode_lib->ms.DETBufferSizeInKByte); + diff --git a/drivers/gpu/drm/amd/display/dc/dml2/display_mode_core.h b/drivers/gpu/drm/amd/display/dc/dml2_0/display_mode_core.h similarity index 100% rename from drivers/gpu/drm/amd/display/dc/dml2/display_mode_core.h rename to drivers/gpu/drm/amd/display/dc/dml2_0/display_mode_core.h diff --git a/drivers/gpu/drm/amd/display/dc/dml2/display_mode_core_structs.h b/drivers/gpu/drm/amd/display/dc/dml2_0/display_mode_core_structs.h similarity index 99% rename from drivers/gpu/drm/amd/display/dc/dml2/display_mode_core_structs.h rename to drivers/gpu/drm/amd/display/dc/dml2_0/display_mode_core_structs.h index dbeb08466092..5b40dcdc4406 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/display_mode_core_structs.h +++ b/drivers/gpu/drm/amd/display/dc/dml2_0/display_mode_core_structs.h @@ -274,7 +274,6 @@ enum dml_clk_cfg_policy { dml_use_state_freq = 2 }; - struct soc_state_bounding_box_st { dml_float_t socclk_mhz; dml_float_t dscclk_mhz; @@ -1894,7 +1893,7 @@ struct display_mode_lib_scratch_st { struct CalculatePrefetchSchedule_params_st CalculatePrefetchSchedule_params; }; -/// @brief Represent the overall soc/ip enviroment. It contains data structure represent the soc/ip characteristic and also structures that hold calculation output +/// @brief Represent the overall soc/ip environment. It contains data structure represent the soc/ip characteristic and also structures that hold calculation output struct display_mode_lib_st { dml_uint_t project; diff --git a/drivers/gpu/drm/amd/display/dc/dml2/display_mode_lib_defines.h b/drivers/gpu/drm/amd/display/dc/dml2_0/display_mode_lib_defines.h similarity index 95% rename from drivers/gpu/drm/amd/display/dc/dml2/display_mode_lib_defines.h rename to drivers/gpu/drm/amd/display/dc/dml2_0/display_mode_lib_defines.h index 14d389525296..e574c81edf5e 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/display_mode_lib_defines.h +++ b/drivers/gpu/drm/amd/display/dc/dml2_0/display_mode_lib_defines.h @@ -52,7 +52,7 @@ #define __DML_VBA_DEBUG__ #define __DML_VBA_ENABLE_INLINE_CHECK_ 0 #define __DML_VBA_MIN_VSTARTUP__ 9 //config.svp_pstate.callbacks.release_phantom_streams_and_planes(in_dc, context); /* Populate stream, plane mappings and other fields in display config. */ - DC_FP_START(); result = dml21_map_dc_state_into_dml_display_cfg(in_dc, context, dml_ctx); - DC_FP_END(); if (!result) return false; @@ -281,9 +279,7 @@ static bool dml21_check_mode_support(const struct dc *in_dc, struct dc_state *co dml_ctx->config.svp_pstate.callbacks.release_phantom_streams_and_planes(in_dc, context); mode_support->dml2_instance = dml_init->dml2_instance; - DC_FP_START(); dml21_map_dc_state_into_dml_display_cfg(in_dc, context, dml_ctx); - DC_FP_END(); dml_ctx->v21.mode_programming.dml2_instance->scratch.build_mode_programming_locals.mode_programming_params.programming = dml_ctx->v21.mode_programming.programming; DC_FP_START(); is_supported = dml2_check_mode_supported(mode_support); diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_wrapper.h b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/dml21_wrapper.h similarity index 100% rename from drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_wrapper.h rename to drivers/gpu/drm/amd/display/dc/dml2_0/dml21/dml21_wrapper.h diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/bounding_boxes/dcn4_soc_bb.h b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/inc/bounding_boxes/dcn4_soc_bb.h similarity index 99% rename from drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/bounding_boxes/dcn4_soc_bb.h rename to drivers/gpu/drm/amd/display/dc/dml2_0/dml21/inc/bounding_boxes/dcn4_soc_bb.h index 793e1c038efd..16a4f97bca4e 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/bounding_boxes/dcn4_soc_bb.h +++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/inc/bounding_boxes/dcn4_soc_bb.h @@ -2,7 +2,6 @@ // // Copyright 2024 Advanced Micro Devices, Inc. - #ifndef __DML_DML_DCN4_SOC_BB__ #define __DML_DML_DCN4_SOC_BB__ diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml2_external_lib_deps.h b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/inc/dml2_external_lib_deps.h similarity index 100% rename from drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml2_external_lib_deps.h rename to drivers/gpu/drm/amd/display/dc/dml2_0/dml21/inc/dml2_external_lib_deps.h diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top.h b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/inc/dml_top.h similarity index 100% rename from drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top.h rename to drivers/gpu/drm/amd/display/dc/dml2_0/dml21/inc/dml_top.h diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_dchub_registers.h b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/inc/dml_top_dchub_registers.h similarity index 98% rename from drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_dchub_registers.h rename to drivers/gpu/drm/amd/display/dc/dml2_0/dml21/inc/dml_top_dchub_registers.h index 91955bbe24b8..bf57df42d1d9 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_dchub_registers.h +++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/inc/dml_top_dchub_registers.h @@ -46,7 +46,6 @@ struct dml2_display_dlg_regs { uint32_t dst_y_delta_drq_limit; uint32_t refcyc_per_vm_dmdata; uint32_t dmdata_dl_delta; - uint32_t dst_y_svp_drq_limit; // MRQ uint32_t refcyc_per_meta_chunk_vblank_l; @@ -122,6 +121,8 @@ struct dml2_display_rq_regs { uint32_t crq_expansion_mode; uint32_t plane1_base_address; uint32_t unbounded_request_enabled; + bool pte_buffer_mode; + bool force_one_row_for_frame; // MRQ uint32_t mrq_expansion_mode; diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_display_cfg_types.h b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/inc/dml_top_display_cfg_types.h similarity index 95% rename from drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_display_cfg_types.h rename to drivers/gpu/drm/amd/display/dc/dml2_0/dml21/inc/dml_top_display_cfg_types.h index e8dc6471c0be..35aa954248cd 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_display_cfg_types.h +++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/inc/dml_top_display_cfg_types.h @@ -49,6 +49,11 @@ enum dml2_source_format_class { dml2_422_packed_12 = 18 }; +enum dml2_sample_positioning { + dml2_interstitial = 0, + dml2_cosited = 1 +}; + enum dml2_rotation_angle { dml2_rotation_0 = 0, dml2_rotation_90 = 1, @@ -82,6 +87,15 @@ enum dml2_output_link_dp_rate { dml2_dp_rate_uhbr20 = 6 }; +enum dml2_pstate_type { + dml2_pstate_type_uclk = 0, + dml2_pstate_type_fclk = 1, + dml2_pstate_type_ppt = 2, + dml2_pstate_type_temp_read = 3, + dml2_pstate_type_dummy_pstate = 4, + dml2_pstate_type_count = 5 +}; + enum dml2_uclk_pstate_change_strategy { dml2_uclk_pstate_change_strategy_auto = 0, dml2_uclk_pstate_change_strategy_force_vactive = 1, @@ -222,7 +236,11 @@ struct dml2_composition_cfg { struct { bool enabled; + bool easf_enabled; + bool isharp_enabled; bool upsp_enabled; + enum dml2_sample_positioning upsp_sample_positioning; + unsigned int upsp_vtaps; struct { double h_ratio; double v_ratio; @@ -384,7 +402,7 @@ struct dml2_plane_parameters { // reserved_vblank_time_ns is the minimum time to reserve in vblank for Twait // The actual reserved vblank time used for the corresponding stream in mode_programming would be at least as much as this per-plane override. long reserved_vblank_time_ns; - unsigned int max_vactive_det_fill_delay_us; // 0 = no reserved time, +ve = explicit max delay + unsigned int max_vactive_det_fill_delay_us[dml2_pstate_type_count]; // 0 = no reserved time, +ve = explicit max delay unsigned int gpuvm_min_page_size_kbytes; unsigned int hostvm_min_page_size_kbytes; @@ -413,7 +431,6 @@ struct dml2_stream_parameters { bool disable_dynamic_odm; bool disable_subvp; int minimum_vblank_idle_requirement_us; - bool minimize_active_latency_hiding; struct { struct { @@ -456,6 +473,7 @@ struct dml2_display_cfg { bool enable; bool value; } force_nom_det_size_kbytes; + bool mode_support_check_disable; bool mcache_admissibility_check_disable; bool surface_viewport_size_check_disable; @@ -478,7 +496,6 @@ struct dml2_display_cfg { bool synchronize_ddr_displays_for_uclk_pstate_change; bool max_outstanding_when_urgent_expected_disable; bool enable_subvp_implicit_pmo; //enables PMO to switch pipe uclk strategy to subvp, and generate phantom programming - unsigned int best_effort_min_active_latency_hiding_us; bool all_streams_blanked; } overrides; }; diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_policy_types.h b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/inc/dml_top_policy_types.h similarity index 100% rename from drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_policy_types.h rename to drivers/gpu/drm/amd/display/dc/dml2_0/dml21/inc/dml_top_policy_types.h diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_soc_parameter_types.h b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/inc/dml_top_soc_parameter_types.h similarity index 94% rename from drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_soc_parameter_types.h rename to drivers/gpu/drm/amd/display/dc/dml2_0/dml21/inc/dml_top_soc_parameter_types.h index 176f55947664..1fbc520c2540 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_soc_parameter_types.h +++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/inc/dml_top_soc_parameter_types.h @@ -89,8 +89,8 @@ struct dml2_soc_qos_parameters { struct dml2_soc_power_management_parameters { double dram_clk_change_blackout_us; - double dram_clk_change_read_only_us; - double dram_clk_change_write_only_us; + double dram_clk_change_read_only_us; // deprecated + double dram_clk_change_write_only_us; // deprecated double fclk_change_blackout_us; double g7_ppt_blackout_us; double g7_temperature_read_blackout_us; @@ -145,6 +145,8 @@ struct dml2_soc_bb { struct dml2_soc_vmin_clock_limits vmin_limit; double lower_bound_bandwidth_dchub; + double fraction_of_urgent_bandwidth_nominal_target; + double fraction_of_urgent_bandwidth_flip_target; unsigned int dprefclk_mhz; unsigned int xtalclk_mhz; unsigned int pcie_refclk_mhz; @@ -170,6 +172,7 @@ struct dml2_soc_bb { struct dml2_ip_capabilities { unsigned int pipe_count; unsigned int otg_count; + unsigned int TDLUT_33cube_count; unsigned int num_dsc; unsigned int max_num_dp2p0_streams; unsigned int max_num_hdmi_frl_outputs; @@ -188,7 +191,9 @@ struct dml2_ip_capabilities { unsigned int subvp_prefetch_end_to_mall_start_us; unsigned int subvp_fw_processing_delay; unsigned int max_vactive_det_fill_delay_us; - + unsigned int ppt_max_allow_delay_us; + unsigned int temp_read_max_allow_delay_us; + unsigned int dummy_pstate_max_allow_delay_us; /* FAMS2 delays */ struct { unsigned int max_allow_delay_us; diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_types.h b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/inc/dml_top_types.h similarity index 98% rename from drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_types.h rename to drivers/gpu/drm/amd/display/dc/dml2_0/dml21/inc/dml_top_types.h index 41adb1104d0f..452e4a2e72c0 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_types.h +++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/inc/dml_top_types.h @@ -70,6 +70,8 @@ struct dml2_pmo_options { bool disable_dyn_odm; bool disable_dyn_odm_for_multi_stream; bool disable_dyn_odm_for_stream_with_svp; + struct dml2_pmo_pstate_strategy *override_strategy_lists[DML2_MAX_PLANES]; + unsigned int num_override_strategies_per_list[DML2_MAX_PLANES]; }; struct dml2_options { @@ -310,6 +312,7 @@ struct dml2_mode_support_info { bool NumberOfOTGSupport; bool NumberOfHDMIFRLSupport; bool NumberOfDP2p0Support; + bool NumberOfTDLUT33cubeSupport; bool WritebackScaleRatioAndTapsSupport; bool CursorSupport; bool PitchSupport; @@ -357,6 +360,8 @@ struct dml2_mode_support_info { unsigned int AlignedCPitch[DML2_MAX_PLANES]; bool g6_temp_read_support; bool temp_read_or_ppt_support; + bool qos_bandwidth_support; + bool dcfclk_support; }; // dml2_mode_support_info struct dml2_display_cfg_programming { @@ -671,6 +676,8 @@ struct dml2_display_cfg_programming { unsigned int PrefetchMode[DML2_MAX_PLANES]; // LEGACY_ONLY bool ROBUrgencyAvoidance; double LowestPrefetchMargin; + + unsigned int pstate_recout_reduction_lines[DML2_MAX_PLANES]; } misc; struct dml2_mode_support_info mode_support_info; diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4.c b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_core/dml2_core_dcn4.c similarity index 99% rename from drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4.c rename to drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_core/dml2_core_dcn4.c index 6ee37386f672..eba948e187c1 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4.c +++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_core/dml2_core_dcn4.c @@ -28,6 +28,7 @@ struct dml2_core_ip_params core_dcn4_ip_caps_base = { .writeback_interface_buffer_size_kbytes = 90, //Number of pipes after DCN Pipe harvesting .max_num_dpp = 4, + .max_num_opp = 4, .max_num_otg = 4, .max_num_wb = 1, .max_dchub_pscl_bw_pix_per_clk = 4, diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4.h b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_core/dml2_core_dcn4.h similarity index 100% rename from drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4.h rename to drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_core/dml2_core_dcn4.h diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4_calcs.c b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_core/dml2_core_dcn4_calcs.c similarity index 99% rename from drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4_calcs.c rename to drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_core/dml2_core_dcn4_calcs.c index bf62d42b3f78..a02e9fd6b5ca 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4_calcs.c +++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_core/dml2_core_dcn4_calcs.c @@ -1303,6 +1303,7 @@ static double TruncToValidBPP( MinDSCBPP = 8; MaxDSCBPP = 16; } else { + if (Output == dml2_hdmi || Output == dml2_hdmifrl) { NonDSCBPP0 = 24; NonDSCBPP1 = 24; @@ -1320,6 +1321,7 @@ static double TruncToValidBPP( MaxDSCBPP = 16; } } + if (Output == dml2_dp2p0) { MaxLinkBPP = LinkBitRate * Lanes / PixelClock * 128.0 / 132.0 * 383.0 / 384.0 * 65536.0 / 65540.0; } else if (DSCEnable && Output == dml2_dp) { @@ -4047,7 +4049,9 @@ static bool ValidateODMMode(enum dml2_odm_mode ODMMode, bool UseDSC, unsigned int NumberOfDSCSlices, unsigned int TotalNumberOfActiveDPP, + unsigned int TotalNumberOfActiveOPP, unsigned int MaxNumDPP, + unsigned int MaxNumOPP, double DISPCLKRequired, unsigned int NumberOfDPPRequired, unsigned int MaxHActiveForDSC, @@ -4063,7 +4067,7 @@ static bool ValidateODMMode(enum dml2_odm_mode ODMMode, if (DISPCLKRequired > MaxDispclk) return false; - if ((TotalNumberOfActiveDPP + NumberOfDPPRequired) > MaxNumDPP) + if ((TotalNumberOfActiveDPP + NumberOfDPPRequired) > MaxNumDPP || (TotalNumberOfActiveOPP + NumberOfDPPRequired) > MaxNumOPP) return false; if (are_odm_segments_symmetrical) { if (HActive % (NumberOfDPPRequired * pixels_per_clock_cycle)) @@ -4109,7 +4113,9 @@ static noinline_for_stack void CalculateODMMode( double MaxDispclk, bool DSCEnable, unsigned int TotalNumberOfActiveDPP, + unsigned int TotalNumberOfActiveOPP, unsigned int MaxNumDPP, + unsigned int MaxNumOPP, double PixelClock, unsigned int NumberOfDSCSlices, @@ -4179,7 +4185,9 @@ static noinline_for_stack void CalculateODMMode( UseDSC, NumberOfDSCSlices, TotalNumberOfActiveDPP, + TotalNumberOfActiveOPP, MaxNumDPP, + MaxNumOPP, DISPCLKRequired, NumberOfDPPRequired, MaxHActiveForDSC, @@ -6964,7 +6972,7 @@ static void calculate_bytes_to_fetch_required_to_hide_latency( stream_index = p->display_cfg->plane_descriptors[plane_index].stream_index; - dst_lines_to_hide = (unsigned int)math_ceil(p->latency_to_hide_us / + dst_lines_to_hide = (unsigned int)math_ceil(p->latency_to_hide_us[0] / ((double)p->display_cfg->stream_descriptors[stream_index].timing.h_total / (double)p->display_cfg->stream_descriptors[stream_index].timing.pixel_clock_khz * 1000.0)); @@ -7061,9 +7069,9 @@ static void calculate_excess_vactive_bandwidth_required( excess_vactive_fill_bw_l[plane_index] = 0.0; excess_vactive_fill_bw_c[plane_index] = 0.0; - if (display_cfg->plane_descriptors[plane_index].overrides.max_vactive_det_fill_delay_us > 0) { - excess_vactive_fill_bw_l[plane_index] = (double)bytes_required_l[plane_index] / (double)display_cfg->plane_descriptors[plane_index].overrides.max_vactive_det_fill_delay_us; - excess_vactive_fill_bw_c[plane_index] = (double)bytes_required_c[plane_index] / (double)display_cfg->plane_descriptors[plane_index].overrides.max_vactive_det_fill_delay_us; + if (display_cfg->plane_descriptors[plane_index].overrides.max_vactive_det_fill_delay_us[dml2_pstate_type_uclk] > 0) { + excess_vactive_fill_bw_l[plane_index] = (double)bytes_required_l[plane_index] / (double)display_cfg->plane_descriptors[plane_index].overrides.max_vactive_det_fill_delay_us[dml2_pstate_type_uclk]; + excess_vactive_fill_bw_c[plane_index] = (double)bytes_required_c[plane_index] / (double)display_cfg->plane_descriptors[plane_index].overrides.max_vactive_det_fill_delay_us[dml2_pstate_type_uclk]; } } } @@ -8358,6 +8366,7 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out CalculateSwathAndDETConfiguration(&mode_lib->scratch, CalculateSwathAndDETConfiguration_params); mode_lib->ms.TotalNumberOfActiveDPP = 0; + mode_lib->ms.TotalNumberOfActiveOPP = 0; mode_lib->ms.support.TotalAvailablePipesSupport = true; for (k = 0; k < mode_lib->ms.num_active_planes; ++k) { @@ -8393,7 +8402,9 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out mode_lib->ms.max_dispclk_freq_mhz, false, // DSCEnable mode_lib->ms.TotalNumberOfActiveDPP, + mode_lib->ms.TotalNumberOfActiveOPP, mode_lib->ip.max_num_dpp, + mode_lib->ip.max_num_opp, ((double)display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.pixel_clock_khz / 1000), mode_lib->ms.support.NumberOfDSCSlices[k], @@ -8412,7 +8423,9 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out mode_lib->ms.max_dispclk_freq_mhz, true, // DSCEnable mode_lib->ms.TotalNumberOfActiveDPP, + mode_lib->ms.TotalNumberOfActiveOPP, mode_lib->ip.max_num_dpp, + mode_lib->ip.max_num_opp, ((double)display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.pixel_clock_khz / 1000), mode_lib->ms.support.NumberOfDSCSlices[k], @@ -8516,20 +8529,23 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out for (k = 0; k < mode_lib->ms.num_active_planes; ++k) { mode_lib->ms.MPCCombine[k] = false; mode_lib->ms.NoOfDPP[k] = 1; + mode_lib->ms.NoOfOPP[k] = 1; if (mode_lib->ms.ODMMode[k] == dml2_odm_mode_combine_4to1) { mode_lib->ms.MPCCombine[k] = false; mode_lib->ms.NoOfDPP[k] = 4; + mode_lib->ms.NoOfOPP[k] = 4; } else if (mode_lib->ms.ODMMode[k] == dml2_odm_mode_combine_3to1) { mode_lib->ms.MPCCombine[k] = false; mode_lib->ms.NoOfDPP[k] = 3; + mode_lib->ms.NoOfOPP[k] = 3; } else if (mode_lib->ms.ODMMode[k] == dml2_odm_mode_combine_2to1) { mode_lib->ms.MPCCombine[k] = false; mode_lib->ms.NoOfDPP[k] = 2; + mode_lib->ms.NoOfOPP[k] = 2; } else if (display_cfg->plane_descriptors[k].overrides.mpcc_combine_factor == 2) { mode_lib->ms.MPCCombine[k] = true; mode_lib->ms.NoOfDPP[k] = 2; - mode_lib->ms.TotalNumberOfActiveDPP++; } else if (display_cfg->plane_descriptors[k].overrides.mpcc_combine_factor == 1) { mode_lib->ms.MPCCombine[k] = false; mode_lib->ms.NoOfDPP[k] = 1; @@ -8540,7 +8556,6 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out if ((mode_lib->ms.MinDPPCLKUsingSingleDPP[k] > mode_lib->ms.max_dppclk_freq_mhz) || !mode_lib->ms.SingleDPPViewportSizeSupportPerSurface[k]) { mode_lib->ms.MPCCombine[k] = true; mode_lib->ms.NoOfDPP[k] = 2; - mode_lib->ms.TotalNumberOfActiveDPP++; } } #if defined(__DML_VBA_DEBUG__) @@ -8548,8 +8563,16 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out #endif } + mode_lib->ms.TotalNumberOfActiveDPP = 0; + mode_lib->ms.TotalNumberOfActiveOPP = 0; + for (k = 0; k < mode_lib->ms.num_active_planes; ++k) { + mode_lib->ms.TotalNumberOfActiveDPP += mode_lib->ms.NoOfDPP[k]; + mode_lib->ms.TotalNumberOfActiveOPP += mode_lib->ms.NoOfOPP[k]; + } if (mode_lib->ms.TotalNumberOfActiveDPP > (unsigned int)mode_lib->ip.max_num_dpp) mode_lib->ms.support.TotalAvailablePipesSupport = false; + if (mode_lib->ms.TotalNumberOfActiveOPP > (unsigned int)mode_lib->ip.max_num_opp) + mode_lib->ms.support.TotalAvailablePipesSupport = false; mode_lib->ms.TotalNumberOfSingleDPPSurfaces = 0; @@ -9028,11 +9051,11 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out calculate_bytes_to_fetch_required_to_hide_latency_params->swath_width_c = mode_lib->ms.SwathWidthC; calculate_bytes_to_fetch_required_to_hide_latency_params->swath_height_l = mode_lib->ms.SwathHeightY; calculate_bytes_to_fetch_required_to_hide_latency_params->swath_height_c = mode_lib->ms.SwathHeightC; - calculate_bytes_to_fetch_required_to_hide_latency_params->latency_to_hide_us = mode_lib->soc.power_management_parameters.dram_clk_change_blackout_us; + calculate_bytes_to_fetch_required_to_hide_latency_params->latency_to_hide_us[0] = mode_lib->soc.power_management_parameters.dram_clk_change_blackout_us; /* outputs */ - calculate_bytes_to_fetch_required_to_hide_latency_params->bytes_required_l = s->pstate_bytes_required_l; - calculate_bytes_to_fetch_required_to_hide_latency_params->bytes_required_c = s->pstate_bytes_required_c; + calculate_bytes_to_fetch_required_to_hide_latency_params->bytes_required_l = s->pstate_bytes_required_l[dml2_pstate_type_uclk]; + calculate_bytes_to_fetch_required_to_hide_latency_params->bytes_required_c = s->pstate_bytes_required_c[dml2_pstate_type_uclk]; calculate_bytes_to_fetch_required_to_hide_latency(calculate_bytes_to_fetch_required_to_hide_latency_params); @@ -9040,8 +9063,8 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out calculate_excess_vactive_bandwidth_required( display_cfg, mode_lib->ms.num_active_planes, - s->pstate_bytes_required_l, - s->pstate_bytes_required_c, + s->pstate_bytes_required_l[dml2_pstate_type_uclk], + s->pstate_bytes_required_c[dml2_pstate_type_uclk], /* outputs */ mode_lib->ms.excess_vactive_fill_bw_l, mode_lib->ms.excess_vactive_fill_bw_c); @@ -9483,8 +9506,8 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out calculate_vactive_det_fill_latency( display_cfg, mode_lib->ms.num_active_planes, - s->pstate_bytes_required_l, - s->pstate_bytes_required_c, + s->pstate_bytes_required_l[dml2_pstate_type_uclk], + s->pstate_bytes_required_c[dml2_pstate_type_uclk], mode_lib->ms.dcc_dram_bw_nom_overhead_factor_p0, mode_lib->ms.dcc_dram_bw_nom_overhead_factor_p1, mode_lib->ms.vactive_sw_bw_l, @@ -9492,7 +9515,7 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out mode_lib->ms.surface_avg_vactive_required_bw, mode_lib->ms.surface_peak_required_bw, /* outputs */ - mode_lib->ms.dram_change_vactive_det_fill_delay_us); + mode_lib->ms.pstate_vactive_det_fill_delay_us[dml2_pstate_type_uclk]); #ifdef __DML_VBA_DEBUG__ DML_LOG_VERBOSE("DML::%s: max_urgent_latency_us = %f\n", __func__, s->mSOCParameters.max_urgent_latency_us); @@ -10986,11 +11009,11 @@ static bool dml_core_mode_programming(struct dml2_core_calcs_mode_programming_ex calculate_bytes_to_fetch_required_to_hide_latency_params->swath_width_c = mode_lib->mp.SwathWidthC; calculate_bytes_to_fetch_required_to_hide_latency_params->swath_height_l = mode_lib->mp.SwathHeightY; calculate_bytes_to_fetch_required_to_hide_latency_params->swath_height_c = mode_lib->mp.SwathHeightC; - calculate_bytes_to_fetch_required_to_hide_latency_params->latency_to_hide_us = mode_lib->soc.power_management_parameters.dram_clk_change_blackout_us; + calculate_bytes_to_fetch_required_to_hide_latency_params->latency_to_hide_us[0] = mode_lib->soc.power_management_parameters.dram_clk_change_blackout_us; /* outputs */ - calculate_bytes_to_fetch_required_to_hide_latency_params->bytes_required_l = s->pstate_bytes_required_l; - calculate_bytes_to_fetch_required_to_hide_latency_params->bytes_required_c = s->pstate_bytes_required_c; + calculate_bytes_to_fetch_required_to_hide_latency_params->bytes_required_l = s->pstate_bytes_required_l[dml2_pstate_type_uclk]; + calculate_bytes_to_fetch_required_to_hide_latency_params->bytes_required_c = s->pstate_bytes_required_c[dml2_pstate_type_uclk]; calculate_bytes_to_fetch_required_to_hide_latency(calculate_bytes_to_fetch_required_to_hide_latency_params); @@ -10998,8 +11021,8 @@ static bool dml_core_mode_programming(struct dml2_core_calcs_mode_programming_ex calculate_excess_vactive_bandwidth_required( display_cfg, s->num_active_planes, - s->pstate_bytes_required_l, - s->pstate_bytes_required_c, + s->pstate_bytes_required_l[dml2_pstate_type_uclk], + s->pstate_bytes_required_c[dml2_pstate_type_uclk], /* outputs */ mode_lib->mp.excess_vactive_fill_bw_l, mode_lib->mp.excess_vactive_fill_bw_c); @@ -12756,7 +12779,7 @@ void dml2_core_calcs_get_stream_fams2_programming(const struct dml2_core_interna { const struct dml2_plane_parameters *plane_descriptor = &display_cfg->display_config.plane_descriptors[plane_index]; const struct dml2_stream_parameters *stream_descriptor = &display_cfg->display_config.stream_descriptors[plane_descriptor->stream_index]; - const struct dml2_fams2_meta *stream_fams2_meta = &display_cfg->stage3.stream_fams2_meta[plane_descriptor->stream_index]; + const struct dml2_pstate_meta *stream_pstate_meta = &display_cfg->stage3.stream_pstate_meta[plane_descriptor->stream_index]; struct dmub_fams2_cmd_stream_static_base_state *base_programming = &fams2_base_programming->stream_v1.base; union dmub_fams2_cmd_stream_static_sub_state *sub_programming = &fams2_sub_programming->stream_v1.sub_state; @@ -12771,24 +12794,24 @@ void dml2_core_calcs_get_stream_fams2_programming(const struct dml2_core_interna /* from display configuration */ base_programming->htotal = (uint16_t)stream_descriptor->timing.h_total; base_programming->vtotal = (uint16_t)stream_descriptor->timing.v_total; - base_programming->vblank_start = (uint16_t)(stream_fams2_meta->nom_vtotal - + base_programming->vblank_start = (uint16_t)(stream_pstate_meta->nom_vtotal - stream_descriptor->timing.v_front_porch); - base_programming->vblank_end = (uint16_t)(stream_fams2_meta->nom_vtotal - + base_programming->vblank_end = (uint16_t)(stream_pstate_meta->nom_vtotal - stream_descriptor->timing.v_front_porch - stream_descriptor->timing.v_active); base_programming->config.bits.is_drr = stream_descriptor->timing.drr_config.enabled; /* from meta */ base_programming->otg_vline_time_ns = - (unsigned int)(stream_fams2_meta->otg_vline_time_us * 1000.0); - base_programming->scheduling_delay_otg_vlines = (uint8_t)stream_fams2_meta->scheduling_delay_otg_vlines; - base_programming->contention_delay_otg_vlines = (uint8_t)stream_fams2_meta->contention_delay_otg_vlines; - base_programming->vline_int_ack_delay_otg_vlines = (uint8_t)stream_fams2_meta->vertical_interrupt_ack_delay_otg_vlines; - base_programming->drr_keepout_otg_vline = (uint16_t)(stream_fams2_meta->nom_vtotal - + (unsigned int)(stream_pstate_meta->otg_vline_time_us * 1000.0); + base_programming->scheduling_delay_otg_vlines = (uint8_t)stream_pstate_meta->scheduling_delay_otg_vlines; + base_programming->contention_delay_otg_vlines = (uint8_t)stream_pstate_meta->contention_delay_otg_vlines; + base_programming->vline_int_ack_delay_otg_vlines = (uint8_t)stream_pstate_meta->vertical_interrupt_ack_delay_otg_vlines; + base_programming->drr_keepout_otg_vline = (uint16_t)(stream_pstate_meta->nom_vtotal - stream_descriptor->timing.v_front_porch - - stream_fams2_meta->method_drr.programming_delay_otg_vlines); - base_programming->allow_to_target_delay_otg_vlines = (uint8_t)stream_fams2_meta->allow_to_target_delay_otg_vlines; - base_programming->max_vtotal = (uint16_t)stream_fams2_meta->max_vtotal; + stream_pstate_meta->method_drr.programming_delay_otg_vlines); + base_programming->allow_to_target_delay_otg_vlines = (uint8_t)stream_pstate_meta->allow_to_target_delay_otg_vlines; + base_programming->max_vtotal = (uint16_t)stream_pstate_meta->max_vtotal; /* from core */ base_programming->config.bits.min_ttu_vblank_usable = true; @@ -12807,11 +12830,11 @@ void dml2_core_calcs_get_stream_fams2_programming(const struct dml2_core_interna /* legacy vactive */ base_programming->type = FAMS2_STREAM_TYPE_VACTIVE; sub_programming->legacy.vactive_det_fill_delay_otg_vlines = - (uint8_t)stream_fams2_meta->method_vactive.max_vactive_det_fill_delay_otg_vlines; + (uint8_t)stream_pstate_meta->method_vactive.max_vactive_det_fill_delay_otg_vlines; base_programming->allow_start_otg_vline = - (uint16_t)stream_fams2_meta->method_vactive.common.allow_start_otg_vline; + (uint16_t)stream_pstate_meta->method_vactive.common.allow_start_otg_vline; base_programming->allow_end_otg_vline = - (uint16_t)stream_fams2_meta->method_vactive.common.allow_end_otg_vline; + (uint16_t)stream_pstate_meta->method_vactive.common.allow_end_otg_vline; base_programming->config.bits.clamp_vtotal_min = true; break; case dml2_pstate_method_vblank: @@ -12819,22 +12842,22 @@ void dml2_core_calcs_get_stream_fams2_programming(const struct dml2_core_interna /* legacy vblank */ base_programming->type = FAMS2_STREAM_TYPE_VBLANK; base_programming->allow_start_otg_vline = - (uint16_t)stream_fams2_meta->method_vblank.common.allow_start_otg_vline; + (uint16_t)stream_pstate_meta->method_vblank.common.allow_start_otg_vline; base_programming->allow_end_otg_vline = - (uint16_t)stream_fams2_meta->method_vblank.common.allow_end_otg_vline; + (uint16_t)stream_pstate_meta->method_vblank.common.allow_end_otg_vline; base_programming->config.bits.clamp_vtotal_min = true; break; case dml2_pstate_method_fw_drr: /* drr */ base_programming->type = FAMS2_STREAM_TYPE_DRR; sub_programming->drr.programming_delay_otg_vlines = - (uint8_t)stream_fams2_meta->method_drr.programming_delay_otg_vlines; + (uint8_t)stream_pstate_meta->method_drr.programming_delay_otg_vlines; sub_programming->drr.nom_stretched_vtotal = - (uint16_t)stream_fams2_meta->method_drr.stretched_vtotal; + (uint16_t)stream_pstate_meta->method_drr.stretched_vtotal; base_programming->allow_start_otg_vline = - (uint16_t)stream_fams2_meta->method_drr.common.allow_start_otg_vline; + (uint16_t)stream_pstate_meta->method_drr.common.allow_start_otg_vline; base_programming->allow_end_otg_vline = - (uint16_t)stream_fams2_meta->method_drr.common.allow_end_otg_vline; + (uint16_t)stream_pstate_meta->method_drr.common.allow_end_otg_vline; /* drr only clamps to vtotal min for single display */ base_programming->config.bits.clamp_vtotal_min = display_cfg->display_config.num_streams == 1; sub_programming->drr.only_stretch_if_required = true; @@ -12847,13 +12870,13 @@ void dml2_core_calcs_get_stream_fams2_programming(const struct dml2_core_interna (uint16_t)(plane_descriptor->composition.scaler_info.plane0.v_ratio * 1000.0); sub_programming->subvp.vratio_denominator = 1000; sub_programming->subvp.programming_delay_otg_vlines = - (uint8_t)stream_fams2_meta->method_subvp.programming_delay_otg_vlines; + (uint8_t)stream_pstate_meta->method_subvp.programming_delay_otg_vlines; sub_programming->subvp.prefetch_to_mall_otg_vlines = - (uint8_t)stream_fams2_meta->method_subvp.prefetch_to_mall_delay_otg_vlines; + (uint8_t)stream_pstate_meta->method_subvp.prefetch_to_mall_delay_otg_vlines; sub_programming->subvp.phantom_vtotal = - (uint16_t)stream_fams2_meta->method_subvp.phantom_vtotal; + (uint16_t)stream_pstate_meta->method_subvp.phantom_vtotal; sub_programming->subvp.phantom_vactive = - (uint16_t)stream_fams2_meta->method_subvp.phantom_vactive; + (uint16_t)stream_pstate_meta->method_subvp.phantom_vactive; sub_programming->subvp.config.bits.is_multi_planar = plane_descriptor->surface.plane1.height > 0; sub_programming->subvp.config.bits.is_yuv420 = @@ -12862,9 +12885,9 @@ void dml2_core_calcs_get_stream_fams2_programming(const struct dml2_core_interna plane_descriptor->pixel_format == dml2_420_12; base_programming->allow_start_otg_vline = - (uint16_t)stream_fams2_meta->method_subvp.common.allow_start_otg_vline; + (uint16_t)stream_pstate_meta->method_subvp.common.allow_start_otg_vline; base_programming->allow_end_otg_vline = - (uint16_t)stream_fams2_meta->method_subvp.common.allow_end_otg_vline; + (uint16_t)stream_pstate_meta->method_subvp.common.allow_end_otg_vline; base_programming->config.bits.clamp_vtotal_min = true; break; case dml2_pstate_method_reserved_hw: @@ -12920,7 +12943,8 @@ void dml2_core_calcs_get_plane_support_info(const struct dml2_display_cfg *displ out->active_latency_hiding_us = (int)mode_lib->ms.VActiveLatencyHidingUs[plane_idx]; - out->dram_change_vactive_det_fill_delay_us = (unsigned int)math_ceil(mode_lib->ms.dram_change_vactive_det_fill_delay_us[plane_idx]); + out->vactive_det_fill_delay_us[dml2_pstate_type_uclk] = + (unsigned int)math_ceil(mode_lib->ms.pstate_vactive_det_fill_delay_us[dml2_pstate_type_uclk][plane_idx]); } void dml2_core_calcs_get_stream_support_info(const struct dml2_display_cfg *display_cfg, const struct dml2_core_internal_display_mode_lib *mode_lib, struct core_stream_support_info *out, int plane_index) @@ -13001,7 +13025,7 @@ void dml2_core_calcs_get_informative(const struct dml2_core_internal_display_mod out->informative.mode_support_info.InvalidCombinationOfMALLUseForPState = mode_lib->ms.support.InvalidCombinationOfMALLUseForPState; out->informative.mode_support_info.ExceededMALLSize = mode_lib->ms.support.ExceededMALLSize; out->informative.mode_support_info.EnoughWritebackUnits = mode_lib->ms.support.EnoughWritebackUnits; - out->informative.mode_support_info.temp_read_or_ppt_support = mode_lib->ms.support.temp_read_or_ppt_support; + out->informative.mode_support_info.temp_read_or_ppt_support = mode_lib->ms.support.global_temp_read_or_ppt_supported; out->informative.mode_support_info.g6_temp_read_support = mode_lib->ms.support.g6_temp_read_support; out->informative.mode_support_info.ExceededMultistreamSlots = mode_lib->ms.support.ExceededMultistreamSlots; @@ -13027,7 +13051,10 @@ void dml2_core_calcs_get_informative(const struct dml2_core_internal_display_mod out->informative.mode_support_info.VRatioInPrefetchSupported = mode_lib->ms.support.VRatioInPrefetchSupported; out->informative.mode_support_info.DISPCLK_DPPCLK_Support = mode_lib->ms.support.DISPCLK_DPPCLK_Support; out->informative.mode_support_info.TotalAvailablePipesSupport = mode_lib->ms.support.TotalAvailablePipesSupport; + out->informative.mode_support_info.NumberOfTDLUT33cubeSupport = mode_lib->ms.support.NumberOfTDLUT33cubeSupport; out->informative.mode_support_info.ViewportSizeSupport = mode_lib->ms.support.ViewportSizeSupport; + out->informative.mode_support_info.qos_bandwidth_support = mode_lib->ms.support.qos_bandwidth_support; + out->informative.mode_support_info.dcfclk_support = mode_lib->ms.support.dcfclk_support; for (k = 0; k < out->display_config.num_planes; k++) { diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4_calcs.h b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_core/dml2_core_dcn4_calcs.h similarity index 100% rename from drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4_calcs.h rename to drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_core/dml2_core_dcn4_calcs.h diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_factory.c b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_core/dml2_core_factory.c similarity index 96% rename from drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_factory.c rename to drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_core/dml2_core_factory.c index 640087e862f8..cc4f0663c6d6 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_factory.c +++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_core/dml2_core_factory.c @@ -15,6 +15,8 @@ bool dml2_core_create(enum dml2_project_id project_id, struct dml2_core_instance memset(out, 0, sizeof(struct dml2_core_instance)); + out->project_id = project_id; + switch (project_id) { case dml2_project_dcn4x_stage1: result = false; diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_factory.h b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_core/dml2_core_factory.h similarity index 100% rename from drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_factory.h rename to drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_core/dml2_core_factory.h diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_shared_types.h b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_core/dml2_core_shared_types.h similarity index 98% rename from drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_shared_types.h rename to drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_core/dml2_core_shared_types.h index ffb8c09f37a5..1087a8c926ff 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_shared_types.h +++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_core/dml2_core_shared_types.h @@ -36,7 +36,9 @@ struct dml2_core_ip_params { unsigned int max_line_buffer_lines; unsigned int writeback_interface_buffer_size_kbytes; unsigned int max_num_dpp; + unsigned int max_num_opp; unsigned int max_num_otg; + unsigned int TDLUT_33cube_count; unsigned int max_num_wb; unsigned int max_dchub_pscl_bw_pix_per_clk; unsigned int max_pscl_lb_bw_pix_per_clk; @@ -46,6 +48,7 @@ struct dml2_core_ip_params { double max_vscl_ratio; unsigned int max_hscl_taps; unsigned int max_vscl_taps; + unsigned int odm_combine_support_mask; unsigned int num_dsc; unsigned int maximum_dsc_bits_per_component; unsigned int maximum_pixels_per_line_per_dsc_unit; @@ -82,7 +85,6 @@ struct dml2_core_ip_params { unsigned int subvp_swath_height_margin_lines; unsigned int subvp_fw_processing_delay_us; unsigned int subvp_pstate_allow_width_us; - // MRQ bool dcn_mrq_present; unsigned int zero_size_buffer_entries; @@ -103,6 +105,8 @@ struct dml2_core_internal_DmlPipe { unsigned int DPPPerSurface; bool ScalerEnabled; bool UPSPEnabled; + unsigned int UPSPVTaps; + enum dml2_sample_positioning UPSPSamplePositioning; enum dml2_rotation_angle RotationAngle; bool mirrored; unsigned int ViewportHeight; @@ -230,6 +234,7 @@ struct dml2_core_internal_mode_support_info { bool MSOOrODMSplitWithNonDPLink; bool NotEnoughLanesForMSO; bool NumberOfOTGSupport; + bool NumberOfTDLUT33cubeSupport; bool NumberOfHDMIFRLSupport; bool NumberOfDP2p0Support; bool WritebackScaleRatioAndTapsSupport; @@ -259,8 +264,11 @@ struct dml2_core_internal_mode_support_info { bool DCCMetaBufferSizeNotExceeded; enum dml2_pstate_change_support DRAMClockChangeSupport[DML2_MAX_PLANES]; enum dml2_pstate_change_support FCLKChangeSupport[DML2_MAX_PLANES]; + enum dml2_pstate_change_support temp_read_or_ppt_support[DML2_MAX_PLANES]; + bool global_dram_clock_change_support_required; bool global_dram_clock_change_supported; bool global_fclk_change_supported; + bool global_temp_read_or_ppt_supported; bool USRRetrainingSupport; bool AvgBandwidthSupport; bool UrgVactiveBandwidthSupport; @@ -331,7 +339,6 @@ struct dml2_core_internal_mode_support_info { bool incorrect_imall_usage; bool g6_temp_read_support; - bool temp_read_or_ppt_support; struct dml2_core_internal_watermarks watermarks; bool dcfclk_support; @@ -566,6 +573,7 @@ struct dml2_core_internal_mode_support { enum dml2_odm_mode ODMMode[DML2_MAX_PLANES]; unsigned int SurfaceSizeInMALL[DML2_MAX_PLANES]; unsigned int NoOfDPP[DML2_MAX_PLANES]; + unsigned int NoOfOPP[DML2_MAX_PLANES]; bool MPCCombine[DML2_MAX_PLANES]; double dcfclk_deepsleep; double MinDPPCLKUsingSingleDPP[DML2_MAX_PLANES]; @@ -576,6 +584,7 @@ struct dml2_core_internal_mode_support { bool PTEBufferSizeNotExceeded[DML2_MAX_PLANES]; bool DCCMetaBufferSizeNotExceeded[DML2_MAX_PLANES]; unsigned int TotalNumberOfActiveDPP; + unsigned int TotalNumberOfActiveOPP; unsigned int TotalNumberOfSingleDPPSurfaces; unsigned int TotalNumberOfDCCActiveDPP; unsigned int Total3dlutActive; @@ -584,7 +593,7 @@ struct dml2_core_internal_mode_support { double VActiveLatencyHidingMargin[DML2_MAX_PLANES]; double VActiveLatencyHidingUs[DML2_MAX_PLANES]; unsigned int MaxVStartupLines[DML2_MAX_PLANES]; - double dram_change_vactive_det_fill_delay_us[DML2_MAX_PLANES]; + double pstate_vactive_det_fill_delay_us[dml2_pstate_type_count][DML2_MAX_PLANES]; unsigned int num_mcaches_l[DML2_MAX_PLANES]; unsigned int mcache_row_bytes_l[DML2_MAX_PLANES]; @@ -614,8 +623,8 @@ struct dml2_core_internal_mode_support { unsigned int dpte_row_bytes_per_row_l[DML2_MAX_PLANES]; unsigned int dpte_row_bytes_per_row_c[DML2_MAX_PLANES]; - unsigned int pstate_bytes_required_l[DML2_MAX_PLANES]; - unsigned int pstate_bytes_required_c[DML2_MAX_PLANES]; + unsigned int pstate_bytes_required_l[dml2_pstate_type_count][DML2_MAX_PLANES]; + unsigned int pstate_bytes_required_c[dml2_pstate_type_count][DML2_MAX_PLANES]; unsigned int cursor_bytes_per_chunk[DML2_MAX_PLANES]; unsigned int cursor_bytes_per_line[DML2_MAX_PLANES]; @@ -639,7 +648,7 @@ struct dml2_core_internal_mode_support { unsigned int DSTYAfterScaler[DML2_MAX_PLANES]; unsigned int DSTXAfterScaler[DML2_MAX_PLANES]; - enum dml2_pstate_method pstate_switch_modes[DML2_MAX_PLANES]; + enum dml2_pstate_method uclk_pstate_switch_modes[DML2_MAX_PLANES]; }; /// @brief A mega structure that houses various info for model programming step. @@ -830,6 +839,7 @@ struct dml2_core_internal_mode_program { double max_urgent_latency_us; double df_response_time_us; + enum dml2_pstate_method uclk_pstate_switch_modes[DML2_MAX_PLANES]; // ------------------- // Output // ------------------- @@ -956,11 +966,12 @@ struct dml2_core_internal_mode_program { double MaxActiveFCLKChangeLatencySupported; bool USRRetrainingSupport; bool g6_temp_read_support; - bool temp_read_or_ppt_support; enum dml2_pstate_change_support FCLKChangeSupport[DML2_MAX_PLANES]; enum dml2_pstate_change_support DRAMClockChangeSupport[DML2_MAX_PLANES]; + enum dml2_pstate_change_support temp_read_or_ppt_support[DML2_MAX_PLANES]; bool global_dram_clock_change_supported; bool global_fclk_change_supported; + bool global_temp_read_or_ppt_supported; double MaxActiveDRAMClockChangeLatencySupported[DML2_MAX_PLANES]; double WritebackAllowFCLKChangeEndPosition[DML2_MAX_PLANES]; double WritebackAllowDRAMClockChangeEndPosition[DML2_MAX_PLANES]; @@ -1127,8 +1138,8 @@ struct dml2_core_calcs_mode_support_locals { unsigned int cursor_bytes[DML2_MAX_PLANES]; bool stream_visited[DML2_MAX_PLANES]; - unsigned int pstate_bytes_required_l[DML2_MAX_PLANES]; - unsigned int pstate_bytes_required_c[DML2_MAX_PLANES]; + unsigned int pstate_bytes_required_l[dml2_pstate_type_count][DML2_MAX_PLANES]; + unsigned int pstate_bytes_required_c[dml2_pstate_type_count][DML2_MAX_PLANES]; double prefetch_sw_bytes[DML2_MAX_PLANES]; double Tpre_rounded[DML2_MAX_PLANES]; @@ -1219,8 +1230,8 @@ struct dml2_core_calcs_mode_programming_locals { double Tr0_trips_flip_rounded[DML2_MAX_PLANES]; unsigned int per_pipe_flip_bytes[DML2_MAX_PLANES]; - unsigned int pstate_bytes_required_l[DML2_MAX_PLANES]; - unsigned int pstate_bytes_required_c[DML2_MAX_PLANES]; + unsigned int pstate_bytes_required_l[dml2_pstate_type_count][DML2_MAX_PLANES]; + unsigned int pstate_bytes_required_c[dml2_pstate_type_count][DML2_MAX_PLANES]; double prefetch_sw_bytes[DML2_MAX_PLANES]; double Tpre_rounded[DML2_MAX_PLANES]; @@ -1306,7 +1317,7 @@ struct dml2_core_calcs_CalculateVMRowAndSwath_params { unsigned int HostVMMinPageSize; unsigned int DCCMetaBufferSizeBytes; bool mrq_present; - enum dml2_pstate_method pstate_switch_modes[DML2_MAX_PLANES]; + enum dml2_pstate_method *uclk_pstate_switch_modes; // Output bool *PTEBufferSizeNotExceeded; @@ -1733,10 +1744,12 @@ struct dml2_core_calcs_CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport_param unsigned int max_request_size_bytes; unsigned int *meta_row_height_l; unsigned int *meta_row_height_c; + enum dml2_pstate_method *uclk_pstate_switch_modes; // Output struct dml2_core_internal_watermarks *Watermark; enum dml2_pstate_change_support *DRAMClockChangeSupport; + bool *global_dram_clock_change_support_required; bool *global_dram_clock_change_supported; double *MaxActiveDRAMClockChangeLatencySupported; unsigned int *SubViewportLinesNeededInMALL; @@ -1747,10 +1760,10 @@ struct dml2_core_calcs_CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport_param double *VActiveLatencyHidingMargin; double *VActiveLatencyHidingUs; bool *g6_temp_read_support; - bool *temp_read_or_ppt_support; + enum dml2_pstate_change_support *temp_read_or_ppt_support; + bool *global_temp_read_or_ppt_supported; }; - struct dml2_core_calcs_CalculateSwathAndDETConfiguration_params { const struct dml2_display_cfg *display_cfg; unsigned int ConfigReturnBufferSizeInKByte; @@ -2240,7 +2253,7 @@ struct dml2_core_calcs_calculate_bytes_to_fetch_required_to_hide_latency_params unsigned int *swath_width_c; unsigned int *swath_height_l; unsigned int *swath_height_c; - double latency_to_hide_us; + double latency_to_hide_us[DML2_MAX_PLANES]; /* outputs */ unsigned int *bytes_required_l; @@ -2308,6 +2321,7 @@ struct dml2_core_calcs_mode_support_ex { const struct dml2_display_cfg *in_display_cfg; const struct dml2_mcg_min_clock_table *min_clk_table; int min_clk_index; + enum dml2_project_id project_id; //unsigned int in_state_index; struct dml2_core_internal_mode_support_info *out_evaluation_info; }; @@ -2320,6 +2334,7 @@ struct dml2_core_calcs_mode_programming_ex { const struct dml2_mcg_min_clock_table *min_clk_table; const struct core_display_cfg_support_info *cfg_support_info; int min_clk_index; + enum dml2_project_id project_id; struct dml2_display_cfg_programming *programming; }; diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_utils.c b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_core/dml2_core_utils.c similarity index 99% rename from drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_utils.c rename to drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_core/dml2_core_utils.c index 5f301befed16..b57d0f6ea6a1 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_utils.c +++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_core/dml2_core_utils.c @@ -306,6 +306,8 @@ void dml2_core_utils_print_mode_support_info(const struct dml2_core_internal_mod DML_LOG_VERBOSE("DML: support: ExceededMALLSize = %d\n", support->ExceededMALLSize); if (!fail_only || support->g6_temp_read_support == 0) DML_LOG_VERBOSE("DML: support: g6_temp_read_support = %d\n", support->g6_temp_read_support); + if (!fail_only || (support->global_dram_clock_change_supported == 0 && support->global_dram_clock_change_support_required)) + DML_LOG_VERBOSE("DML: support: dram_clock_change_support = %d\n", support->global_dram_clock_change_supported); if (!fail_only || support->ImmediateFlipSupport == 0) DML_LOG_VERBOSE("DML: support: ImmediateFlipSupport = %d\n", support->ImmediateFlipSupport); if (!fail_only || support->LinkCapacitySupport == 0) diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_utils.h b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_core/dml2_core_utils.h similarity index 100% rename from drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_utils.h rename to drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_core/dml2_core_utils.h diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_dpmm/dml2_dpmm_dcn4.c b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_dpmm/dml2_dpmm_dcn4.c similarity index 100% rename from drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_dpmm/dml2_dpmm_dcn4.c rename to drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_dpmm/dml2_dpmm_dcn4.c diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_dpmm/dml2_dpmm_dcn4.h b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_dpmm/dml2_dpmm_dcn4.h similarity index 100% rename from drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_dpmm/dml2_dpmm_dcn4.h rename to drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_dpmm/dml2_dpmm_dcn4.h diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_dpmm/dml2_dpmm_factory.c b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_dpmm/dml2_dpmm_factory.c similarity index 100% rename from drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_dpmm/dml2_dpmm_factory.c rename to drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_dpmm/dml2_dpmm_factory.c diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_dpmm/dml2_dpmm_factory.h b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_dpmm/dml2_dpmm_factory.h similarity index 100% rename from drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_dpmm/dml2_dpmm_factory.h rename to drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_dpmm/dml2_dpmm_factory.h diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_mcg/dml2_mcg_dcn4.c b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_mcg/dml2_mcg_dcn4.c similarity index 100% rename from drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_mcg/dml2_mcg_dcn4.c rename to drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_mcg/dml2_mcg_dcn4.c diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_mcg/dml2_mcg_dcn4.h b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_mcg/dml2_mcg_dcn4.h similarity index 97% rename from drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_mcg/dml2_mcg_dcn4.h rename to drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_mcg/dml2_mcg_dcn4.h index 02da6f45cbf7..f54fde8fba90 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_mcg/dml2_mcg_dcn4.h +++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_mcg/dml2_mcg_dcn4.h @@ -10,4 +10,4 @@ bool mcg_dcn4_build_min_clock_table(struct dml2_mcg_build_min_clock_table_params_in_out *in_out); bool mcg_dcn4_unit_test(void); -#endif +#endif \ No newline at end of file diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_mcg/dml2_mcg_factory.c b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_mcg/dml2_mcg_factory.c similarity index 100% rename from drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_mcg/dml2_mcg_factory.c rename to drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_mcg/dml2_mcg_factory.c diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_mcg/dml2_mcg_factory.h b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_mcg/dml2_mcg_factory.h similarity index 100% rename from drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_mcg/dml2_mcg_factory.h rename to drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_mcg/dml2_mcg_factory.h diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn3.c b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_pmo/dml2_pmo_dcn3.c similarity index 100% rename from drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn3.c rename to drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_pmo/dml2_pmo_dcn3.c diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn3.h b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_pmo/dml2_pmo_dcn3.h similarity index 100% rename from drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn3.h rename to drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_pmo/dml2_pmo_dcn3.h diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn4_fams2.c b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_pmo/dml2_pmo_dcn4_fams2.c similarity index 82% rename from drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn4_fams2.c rename to drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_pmo/dml2_pmo_dcn4_fams2.c index d88b3e0082dd..c26e100fcaf2 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn4_fams2.c +++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_pmo/dml2_pmo_dcn4_fams2.c @@ -642,6 +642,11 @@ bool pmo_dcn4_fams2_initialize(struct dml2_pmo_initialize_in_out *in_out) int i = 0; struct dml2_pmo_instance *pmo = in_out->instance; + unsigned int base_list_size = 0; + const struct dml2_pmo_pstate_strategy *base_list = NULL; + unsigned int *expanded_list_size = NULL; + struct dml2_pmo_pstate_strategy *expanded_list = NULL; + pmo->soc_bb = in_out->soc_bb; pmo->ip_caps = in_out->ip_caps; pmo->mpc_combine_limit = 2; @@ -656,53 +661,71 @@ bool pmo_dcn4_fams2_initialize(struct dml2_pmo_initialize_in_out *in_out) pmo->options = in_out->options; /* generate permutations of p-state configs from base strategy list */ - for (i = 1; i <= PMO_DCN4_MAX_DISPLAYS; i++) { - switch (i) { + for (i = 0; i < PMO_DCN4_MAX_DISPLAYS; i++) { + switch (i+1) { case 1: - DML_ASSERT(base_strategy_list_1_display_size <= PMO_DCN4_MAX_BASE_STRATEGIES); + if (pmo->options->override_strategy_lists[i] && pmo->options->num_override_strategies_per_list[i]) { + base_list = pmo->options->override_strategy_lists[i]; + base_list_size = pmo->options->num_override_strategies_per_list[i]; + } else { + base_list = base_strategy_list_1_display; + base_list_size = base_strategy_list_1_display_size; + } + + expanded_list_size = &pmo->init_data.pmo_dcn4.num_expanded_strategies_per_list[i]; + expanded_list = pmo->init_data.pmo_dcn4.expanded_strategy_list_1_display; - /* populate list */ - pmo_dcn4_fams2_expand_base_pstate_strategies( - base_strategy_list_1_display, - base_strategy_list_1_display_size, - i, - pmo->init_data.pmo_dcn4.expanded_strategy_list_1_display, - &pmo->init_data.pmo_dcn4.num_expanded_strategies_per_list[i - 1]); break; case 2: - DML_ASSERT(base_strategy_list_2_display_size <= PMO_DCN4_MAX_BASE_STRATEGIES); + if (pmo->options->override_strategy_lists[i] && pmo->options->num_override_strategies_per_list[i]) { + base_list = pmo->options->override_strategy_lists[i]; + base_list_size = pmo->options->num_override_strategies_per_list[i]; + } else { + base_list = base_strategy_list_2_display; + base_list_size = base_strategy_list_2_display_size; + } + + expanded_list_size = &pmo->init_data.pmo_dcn4.num_expanded_strategies_per_list[i]; + expanded_list = pmo->init_data.pmo_dcn4.expanded_strategy_list_2_display; - /* populate list */ - pmo_dcn4_fams2_expand_base_pstate_strategies( - base_strategy_list_2_display, - base_strategy_list_2_display_size, - i, - pmo->init_data.pmo_dcn4.expanded_strategy_list_2_display, - &pmo->init_data.pmo_dcn4.num_expanded_strategies_per_list[i - 1]); break; case 3: - DML_ASSERT(base_strategy_list_3_display_size <= PMO_DCN4_MAX_BASE_STRATEGIES); + if (pmo->options->override_strategy_lists[i] && pmo->options->num_override_strategies_per_list[i]) { + base_list = pmo->options->override_strategy_lists[i]; + base_list_size = pmo->options->num_override_strategies_per_list[i]; + } else { + base_list = base_strategy_list_3_display; + base_list_size = base_strategy_list_3_display_size; + } + + expanded_list_size = &pmo->init_data.pmo_dcn4.num_expanded_strategies_per_list[i]; + expanded_list = pmo->init_data.pmo_dcn4.expanded_strategy_list_3_display; - /* populate list */ - pmo_dcn4_fams2_expand_base_pstate_strategies( - base_strategy_list_3_display, - base_strategy_list_3_display_size, - i, - pmo->init_data.pmo_dcn4.expanded_strategy_list_3_display, - &pmo->init_data.pmo_dcn4.num_expanded_strategies_per_list[i - 1]); break; case 4: - DML_ASSERT(base_strategy_list_4_display_size <= PMO_DCN4_MAX_BASE_STRATEGIES); + if (pmo->options->override_strategy_lists[i] && pmo->options->num_override_strategies_per_list[i]) { + base_list = pmo->options->override_strategy_lists[i]; + base_list_size = pmo->options->num_override_strategies_per_list[i]; + } else { + base_list = base_strategy_list_4_display; + base_list_size = base_strategy_list_4_display_size; + } + + expanded_list_size = &pmo->init_data.pmo_dcn4.num_expanded_strategies_per_list[i]; + expanded_list = pmo->init_data.pmo_dcn4.expanded_strategy_list_4_display; - /* populate list */ - pmo_dcn4_fams2_expand_base_pstate_strategies( - base_strategy_list_4_display, - base_strategy_list_4_display_size, - i, - pmo->init_data.pmo_dcn4.expanded_strategy_list_4_display, - &pmo->init_data.pmo_dcn4.num_expanded_strategies_per_list[i - 1]); break; } + + DML_ASSERT(base_list_size <= PMO_DCN4_MAX_BASE_STRATEGIES); + + /* populate list */ + pmo_dcn4_fams2_expand_base_pstate_strategies( + base_list, + base_list_size, + i + 1, + expanded_list, + expanded_list_size); } return true; @@ -1026,13 +1049,13 @@ static bool all_timings_support_vblank(const struct dml2_pmo_instance *pmo, return synchronizable; } -static unsigned int calc_svp_microschedule(const struct dml2_fams2_meta *fams2_meta) +static unsigned int calc_svp_microschedule(const struct dml2_pstate_meta *pstate_meta) { - return fams2_meta->contention_delay_otg_vlines + - fams2_meta->method_subvp.programming_delay_otg_vlines + - fams2_meta->method_subvp.phantom_vtotal + - fams2_meta->method_subvp.prefetch_to_mall_delay_otg_vlines + - fams2_meta->dram_clk_change_blackout_otg_vlines; + return pstate_meta->contention_delay_otg_vlines + + pstate_meta->method_subvp.programming_delay_otg_vlines + + pstate_meta->method_subvp.phantom_vtotal + + pstate_meta->method_subvp.prefetch_to_mall_delay_otg_vlines + + pstate_meta->blackout_otg_vlines; } static bool all_timings_support_drr(const struct dml2_pmo_instance *pmo, @@ -1042,29 +1065,29 @@ static bool all_timings_support_drr(const struct dml2_pmo_instance *pmo, unsigned int i; for (i = 0; i < DML2_MAX_PLANES; i++) { const struct dml2_stream_parameters *stream_descriptor; - const struct dml2_fams2_meta *stream_fams2_meta; + const struct dml2_pstate_meta *stream_pstate_meta; if (is_bit_set_in_bitfield(mask, i)) { stream_descriptor = &display_config->display_config.stream_descriptors[i]; - stream_fams2_meta = &pmo->scratch.pmo_dcn4.stream_fams2_meta[i]; + stream_pstate_meta = &pmo->scratch.pmo_dcn4.stream_pstate_meta[i]; if (!stream_descriptor->timing.drr_config.enabled) return false; /* cannot support required vtotal */ - if (stream_fams2_meta->method_drr.stretched_vtotal > stream_fams2_meta->max_vtotal) { + if (stream_pstate_meta->method_drr.stretched_vtotal > stream_pstate_meta->max_vtotal) { return false; } /* check rr is within bounds */ - if (stream_fams2_meta->nom_refresh_rate_hz < pmo->fams_params.v2.drr.refresh_rate_limit_min || - stream_fams2_meta->nom_refresh_rate_hz > pmo->fams_params.v2.drr.refresh_rate_limit_max) { + if (stream_pstate_meta->nom_refresh_rate_hz < pmo->fams_params.v2.drr.refresh_rate_limit_min || + stream_pstate_meta->nom_refresh_rate_hz > pmo->fams_params.v2.drr.refresh_rate_limit_max) { return false; } /* check required stretch is allowed */ if (stream_descriptor->timing.drr_config.max_instant_vtotal_delta > 0 && - stream_fams2_meta->method_drr.stretched_vtotal - stream_fams2_meta->nom_vtotal > stream_descriptor->timing.drr_config.max_instant_vtotal_delta) { + stream_pstate_meta->method_drr.stretched_vtotal - stream_pstate_meta->nom_vtotal > (int)stream_descriptor->timing.drr_config.max_instant_vtotal_delta) { return false; } } @@ -1079,7 +1102,7 @@ static bool all_timings_support_svp(const struct dml2_pmo_instance *pmo, { const struct dml2_stream_parameters *stream_descriptor; const struct dml2_plane_parameters *plane_descriptor; - const struct dml2_fams2_meta *stream_fams2_meta; + const struct dml2_pstate_meta *stream_pstate_meta; unsigned int microschedule_vlines; unsigned int i; unsigned int mcaches_per_plane; @@ -1124,13 +1147,13 @@ static bool all_timings_support_svp(const struct dml2_pmo_instance *pmo, for (i = 0; i < DML2_MAX_PLANES; i++) { if (is_bit_set_in_bitfield(mask, i)) { stream_descriptor = &display_config->display_config.stream_descriptors[i]; - stream_fams2_meta = &pmo->scratch.pmo_dcn4.stream_fams2_meta[i]; + stream_pstate_meta = &pmo->scratch.pmo_dcn4.stream_pstate_meta[i]; if (stream_descriptor->overrides.disable_subvp) { return false; } - microschedule_vlines = calc_svp_microschedule(&pmo->scratch.pmo_dcn4.stream_fams2_meta[i]); + microschedule_vlines = calc_svp_microschedule(&pmo->scratch.pmo_dcn4.stream_pstate_meta[i]); /* block if using an interlaced timing */ if (stream_descriptor->timing.interlaced) { @@ -1141,8 +1164,8 @@ static bool all_timings_support_svp(const struct dml2_pmo_instance *pmo, * 2) refresh rate must be within the allowed bounds */ if (microschedule_vlines >= stream_descriptor->timing.v_active || - (stream_fams2_meta->nom_refresh_rate_hz < pmo->fams_params.v2.subvp.refresh_rate_limit_min || - stream_fams2_meta->nom_refresh_rate_hz > pmo->fams_params.v2.subvp.refresh_rate_limit_max)) { + (stream_pstate_meta->nom_refresh_rate_hz < pmo->fams_params.v2.subvp.refresh_rate_limit_min || + stream_pstate_meta->nom_refresh_rate_hz > pmo->fams_params.v2.subvp.refresh_rate_limit_max)) { return false; } } @@ -1232,43 +1255,43 @@ static bool all_planes_match_method(const struct display_configuation_with_meta } static void build_method_scheduling_params( - struct dml2_fams2_per_method_common_meta *stream_method_fams2_meta, - struct dml2_fams2_meta *stream_fams2_meta) + struct dml2_pstate_per_method_common_meta *stream_method_pstate_meta, + struct dml2_pstate_meta *stream_pstate_meta) { - stream_method_fams2_meta->allow_time_us = - (double)((int)stream_method_fams2_meta->allow_end_otg_vline - (int)stream_method_fams2_meta->allow_start_otg_vline) * - stream_fams2_meta->otg_vline_time_us; - if (stream_method_fams2_meta->allow_time_us >= stream_method_fams2_meta->period_us) { + stream_method_pstate_meta->allow_time_us = + (double)((int)stream_method_pstate_meta->allow_end_otg_vline - (int)stream_method_pstate_meta->allow_start_otg_vline) * + stream_pstate_meta->otg_vline_time_us; + if (stream_method_pstate_meta->allow_time_us >= stream_method_pstate_meta->period_us) { /* when allow wave overlaps an entire frame, it is always schedulable (DRR can do this)*/ - stream_method_fams2_meta->disallow_time_us = 0.0; + stream_method_pstate_meta->disallow_time_us = 0.0; } else { - stream_method_fams2_meta->disallow_time_us = - stream_method_fams2_meta->period_us - stream_method_fams2_meta->allow_time_us; + stream_method_pstate_meta->disallow_time_us = + stream_method_pstate_meta->period_us - stream_method_pstate_meta->allow_time_us; } } -static struct dml2_fams2_per_method_common_meta *get_per_method_common_meta( +static struct dml2_pstate_per_method_common_meta *get_per_method_common_meta( struct dml2_pmo_instance *pmo, enum dml2_pstate_method stream_pstate_method, int stream_idx) { - struct dml2_fams2_per_method_common_meta *stream_method_fams2_meta = NULL; + struct dml2_pstate_per_method_common_meta *stream_method_pstate_meta = NULL; switch (stream_pstate_method) { case dml2_pstate_method_vactive: case dml2_pstate_method_fw_vactive_drr: - stream_method_fams2_meta = &pmo->scratch.pmo_dcn4.stream_fams2_meta[stream_idx].method_vactive.common; + stream_method_pstate_meta = &pmo->scratch.pmo_dcn4.stream_pstate_meta[stream_idx].method_vactive.common; break; case dml2_pstate_method_vblank: case dml2_pstate_method_fw_vblank_drr: - stream_method_fams2_meta = &pmo->scratch.pmo_dcn4.stream_fams2_meta[stream_idx].method_vblank.common; + stream_method_pstate_meta = &pmo->scratch.pmo_dcn4.stream_pstate_meta[stream_idx].method_vblank.common; break; case dml2_pstate_method_fw_svp: case dml2_pstate_method_fw_svp_drr: - stream_method_fams2_meta = &pmo->scratch.pmo_dcn4.stream_fams2_meta[stream_idx].method_subvp.common; + stream_method_pstate_meta = &pmo->scratch.pmo_dcn4.stream_pstate_meta[stream_idx].method_subvp.common; break; case dml2_pstate_method_fw_drr: - stream_method_fams2_meta = &pmo->scratch.pmo_dcn4.stream_fams2_meta[stream_idx].method_drr.common; + stream_method_pstate_meta = &pmo->scratch.pmo_dcn4.stream_pstate_meta[stream_idx].method_drr.common; break; case dml2_pstate_method_reserved_hw: case dml2_pstate_method_reserved_fw: @@ -1277,10 +1300,10 @@ static struct dml2_fams2_per_method_common_meta *get_per_method_common_meta( case dml2_pstate_method_count: case dml2_pstate_method_na: default: - stream_method_fams2_meta = NULL; + stream_method_pstate_meta = NULL; } - return stream_method_fams2_meta; + return stream_method_pstate_meta; } static bool is_timing_group_schedulable( @@ -1288,10 +1311,10 @@ static bool is_timing_group_schedulable( const struct display_configuation_with_meta *display_cfg, const struct dml2_pmo_pstate_strategy *pstate_strategy, const unsigned int timing_group_idx, - struct dml2_fams2_per_method_common_meta *group_fams2_meta) + struct dml2_pstate_per_method_common_meta *group_pstate_meta) { unsigned int i; - struct dml2_fams2_per_method_common_meta *stream_method_fams2_meta; + struct dml2_pstate_per_method_common_meta *stream_method_pstate_meta; unsigned int base_stream_idx = 0; struct dml2_pmo_scratch *s = &pmo->scratch; @@ -1305,31 +1328,31 @@ static bool is_timing_group_schedulable( } /* init allow start and end lines for timing group */ - stream_method_fams2_meta = get_per_method_common_meta(pmo, pstate_strategy->per_stream_pstate_method[base_stream_idx], base_stream_idx); - if (!stream_method_fams2_meta) + stream_method_pstate_meta = get_per_method_common_meta(pmo, pstate_strategy->per_stream_pstate_method[base_stream_idx], base_stream_idx); + if (!stream_method_pstate_meta) return false; - group_fams2_meta->allow_start_otg_vline = stream_method_fams2_meta->allow_start_otg_vline; - group_fams2_meta->allow_end_otg_vline = stream_method_fams2_meta->allow_end_otg_vline; - group_fams2_meta->period_us = stream_method_fams2_meta->period_us; + group_pstate_meta->allow_start_otg_vline = stream_method_pstate_meta->allow_start_otg_vline; + group_pstate_meta->allow_end_otg_vline = stream_method_pstate_meta->allow_end_otg_vline; + group_pstate_meta->period_us = stream_method_pstate_meta->period_us; for (i = base_stream_idx + 1; i < display_cfg->display_config.num_streams; i++) { if (is_bit_set_in_bitfield(pmo->scratch.pmo_dcn4.synchronized_timing_group_masks[timing_group_idx], i)) { - stream_method_fams2_meta = get_per_method_common_meta(pmo, pstate_strategy->per_stream_pstate_method[i], i); - if (!stream_method_fams2_meta) + stream_method_pstate_meta = get_per_method_common_meta(pmo, pstate_strategy->per_stream_pstate_method[i], i); + if (!stream_method_pstate_meta) continue; - if (group_fams2_meta->allow_start_otg_vline < stream_method_fams2_meta->allow_start_otg_vline) { + if (group_pstate_meta->allow_start_otg_vline < stream_method_pstate_meta->allow_start_otg_vline) { /* set group allow start to larger otg vline */ - group_fams2_meta->allow_start_otg_vline = stream_method_fams2_meta->allow_start_otg_vline; + group_pstate_meta->allow_start_otg_vline = stream_method_pstate_meta->allow_start_otg_vline; } - if (group_fams2_meta->allow_end_otg_vline > stream_method_fams2_meta->allow_end_otg_vline) { + if (group_pstate_meta->allow_end_otg_vline > stream_method_pstate_meta->allow_end_otg_vline) { /* set group allow end to smaller otg vline */ - group_fams2_meta->allow_end_otg_vline = stream_method_fams2_meta->allow_end_otg_vline; + group_pstate_meta->allow_end_otg_vline = stream_method_pstate_meta->allow_end_otg_vline; } /* check waveform still has positive width */ - if (group_fams2_meta->allow_start_otg_vline >= group_fams2_meta->allow_end_otg_vline) { + if (group_pstate_meta->allow_start_otg_vline >= group_pstate_meta->allow_end_otg_vline) { /* timing group is not schedulable */ return false; } @@ -1337,10 +1360,10 @@ static bool is_timing_group_schedulable( } /* calculate the rest of the meta */ - build_method_scheduling_params(group_fams2_meta, &pmo->scratch.pmo_dcn4.stream_fams2_meta[base_stream_idx]); + build_method_scheduling_params(group_pstate_meta, &pmo->scratch.pmo_dcn4.stream_pstate_meta[base_stream_idx]); - return group_fams2_meta->allow_time_us > 0.0 && - group_fams2_meta->disallow_time_us < pmo->ip_caps->fams2.max_allow_delay_us; + return group_pstate_meta->allow_time_us > 0.0 && + group_pstate_meta->disallow_time_us < pmo->ip_caps->fams2.max_allow_delay_us; } static bool is_config_schedulable( @@ -1354,7 +1377,7 @@ static bool is_config_schedulable( double max_allow_delay_us = 0.0; - memset(s->pmo_dcn4.group_common_fams2_meta, 0, sizeof(s->pmo_dcn4.group_common_fams2_meta)); + memset(s->pmo_dcn4.group_common_pstate_meta, 0, sizeof(s->pmo_dcn4.group_common_pstate_meta)); memset(s->pmo_dcn4.sorted_group_gtl_disallow_index, 0, sizeof(unsigned int) * DML2_MAX_PLANES); /* search for a general solution to the schedule */ @@ -1369,12 +1392,12 @@ static bool is_config_schedulable( for (i = 0; i < s->pmo_dcn4.num_timing_groups; i++) { s->pmo_dcn4.sorted_group_gtl_disallow_index[i] = i; s->pmo_dcn4.sorted_group_gtl_period_index[i] = i; - if (!is_timing_group_schedulable(pmo, display_cfg, pstate_strategy, i, &s->pmo_dcn4.group_common_fams2_meta[i])) { + if (!is_timing_group_schedulable(pmo, display_cfg, pstate_strategy, i, &s->pmo_dcn4.group_common_pstate_meta[i])) { /* synchronized timing group was not schedulable */ schedulable = false; break; } - max_allow_delay_us += s->pmo_dcn4.group_common_fams2_meta[i].disallow_time_us; + max_allow_delay_us += s->pmo_dcn4.group_common_pstate_meta[i].disallow_time_us; } if ((schedulable && s->pmo_dcn4.num_timing_groups <= 1) || !schedulable) { @@ -1391,8 +1414,8 @@ static bool is_config_schedulable( bool swapped = false; for (j = 0; j < s->pmo_dcn4.num_timing_groups - 1; j++) { - double j_disallow_us = s->pmo_dcn4.group_common_fams2_meta[s->pmo_dcn4.sorted_group_gtl_disallow_index[j]].disallow_time_us; - double jp1_disallow_us = s->pmo_dcn4.group_common_fams2_meta[s->pmo_dcn4.sorted_group_gtl_disallow_index[j + 1]].disallow_time_us; + double j_disallow_us = s->pmo_dcn4.group_common_pstate_meta[s->pmo_dcn4.sorted_group_gtl_disallow_index[j]].disallow_time_us; + double jp1_disallow_us = s->pmo_dcn4.group_common_pstate_meta[s->pmo_dcn4.sorted_group_gtl_disallow_index[j + 1]].disallow_time_us; if (j_disallow_us < jp1_disallow_us) { /* swap as A < B */ swap(s->pmo_dcn4.sorted_group_gtl_disallow_index[j], @@ -1410,19 +1433,19 @@ static bool is_config_schedulable( * other display, or when >2 streams continue to halve the remaining allow time. */ for (i = 0; i < s->pmo_dcn4.num_timing_groups; i++) { - if (s->pmo_dcn4.group_common_fams2_meta[i].disallow_time_us <= 0.0) { + if (s->pmo_dcn4.group_common_pstate_meta[i].disallow_time_us <= 0.0) { /* this timing group always allows */ continue; } - double max_allow_time_us = s->pmo_dcn4.group_common_fams2_meta[i].allow_time_us; + double max_allow_time_us = s->pmo_dcn4.group_common_pstate_meta[i].allow_time_us; for (j = 0; j < s->pmo_dcn4.num_timing_groups; j++) { unsigned int sorted_j = s->pmo_dcn4.sorted_group_gtl_disallow_index[j]; /* stream can't overlap itself */ - if (i != sorted_j && s->pmo_dcn4.group_common_fams2_meta[sorted_j].disallow_time_us > 0.0) { + if (i != sorted_j && s->pmo_dcn4.group_common_pstate_meta[sorted_j].disallow_time_us > 0.0) { max_allow_time_us = math_min2( - s->pmo_dcn4.group_common_fams2_meta[sorted_j].allow_time_us, - (max_allow_time_us - s->pmo_dcn4.group_common_fams2_meta[sorted_j].disallow_time_us) / 2); + s->pmo_dcn4.group_common_pstate_meta[sorted_j].allow_time_us, + (max_allow_time_us - s->pmo_dcn4.group_common_pstate_meta[sorted_j].disallow_time_us) / 2); if (max_allow_time_us < 0.0) { /* failed exit early */ @@ -1450,8 +1473,8 @@ static bool is_config_schedulable( bool swapped = false; for (j = 0; j < s->pmo_dcn4.num_timing_groups - 1; j++) { - double j_period_us = s->pmo_dcn4.group_common_fams2_meta[s->pmo_dcn4.sorted_group_gtl_period_index[j]].period_us; - double jp1_period_us = s->pmo_dcn4.group_common_fams2_meta[s->pmo_dcn4.sorted_group_gtl_period_index[j + 1]].period_us; + double j_period_us = s->pmo_dcn4.group_common_pstate_meta[s->pmo_dcn4.sorted_group_gtl_period_index[j]].period_us; + double jp1_period_us = s->pmo_dcn4.group_common_pstate_meta[s->pmo_dcn4.sorted_group_gtl_period_index[j + 1]].period_us; if (j_period_us < jp1_period_us) { /* swap as A < B */ swap(s->pmo_dcn4.sorted_group_gtl_period_index[j], @@ -1470,7 +1493,7 @@ static bool is_config_schedulable( unsigned int sorted_i = s->pmo_dcn4.sorted_group_gtl_period_index[i]; unsigned int sorted_ip1 = s->pmo_dcn4.sorted_group_gtl_period_index[i + 1]; - if (s->pmo_dcn4.group_common_fams2_meta[sorted_i].allow_time_us < s->pmo_dcn4.group_common_fams2_meta[sorted_ip1].period_us || + if (s->pmo_dcn4.group_common_pstate_meta[sorted_i].allow_time_us < s->pmo_dcn4.group_common_pstate_meta[sorted_ip1].period_us || (s->pmo_dcn4.group_is_drr_enabled[sorted_ip1] && s->pmo_dcn4.group_is_drr_active[sorted_ip1])) { schedulable = false; break; @@ -1492,18 +1515,18 @@ static bool is_config_schedulable( /* default period_0 > period_1 */ unsigned int lrg_idx = 0; unsigned int sml_idx = 1; - if (s->pmo_dcn4.group_common_fams2_meta[0].period_us < s->pmo_dcn4.group_common_fams2_meta[1].period_us) { + if (s->pmo_dcn4.group_common_pstate_meta[0].period_us < s->pmo_dcn4.group_common_pstate_meta[1].period_us) { /* period_0 < period_1 */ lrg_idx = 1; sml_idx = 0; } - period_ratio = s->pmo_dcn4.group_common_fams2_meta[lrg_idx].period_us / s->pmo_dcn4.group_common_fams2_meta[sml_idx].period_us; - shift_per_period = s->pmo_dcn4.group_common_fams2_meta[sml_idx].period_us * (period_ratio - math_floor(period_ratio)); - max_shift_us = s->pmo_dcn4.group_common_fams2_meta[lrg_idx].disallow_time_us - s->pmo_dcn4.group_common_fams2_meta[sml_idx].allow_time_us; - max_allow_delay_us = max_shift_us / shift_per_period * s->pmo_dcn4.group_common_fams2_meta[lrg_idx].period_us; + period_ratio = s->pmo_dcn4.group_common_pstate_meta[lrg_idx].period_us / s->pmo_dcn4.group_common_pstate_meta[sml_idx].period_us; + shift_per_period = s->pmo_dcn4.group_common_pstate_meta[sml_idx].period_us * (period_ratio - math_floor(period_ratio)); + max_shift_us = s->pmo_dcn4.group_common_pstate_meta[lrg_idx].disallow_time_us - s->pmo_dcn4.group_common_pstate_meta[sml_idx].allow_time_us; + max_allow_delay_us = max_shift_us / shift_per_period * s->pmo_dcn4.group_common_pstate_meta[lrg_idx].period_us; if (shift_per_period > 0.0 && - shift_per_period < s->pmo_dcn4.group_common_fams2_meta[lrg_idx].allow_time_us + s->pmo_dcn4.group_common_fams2_meta[sml_idx].allow_time_us && + shift_per_period < s->pmo_dcn4.group_common_pstate_meta[lrg_idx].allow_time_us + s->pmo_dcn4.group_common_pstate_meta[sml_idx].allow_time_us && max_allow_delay_us < pmo->ip_caps->fams2.max_allow_delay_us) { schedulable = true; } @@ -1646,22 +1669,22 @@ static int get_vactive_pstate_margin(const struct display_configuation_with_meta return min_vactive_margin_us; } -static unsigned int get_vactive_det_fill_latency_delay_us(const struct display_configuation_with_meta *display_cfg, int plane_mask) +static int get_vactive_det_fill_latency_delay_us(const struct display_configuation_with_meta *display_cfg, int plane_mask) { unsigned char i; - unsigned int max_vactive_fill_us = 0; + int max_vactive_fill_us = 0; for (i = 0; i < DML2_MAX_PLANES; i++) { if (is_bit_set_in_bitfield(plane_mask, i)) { - if (display_cfg->mode_support_result.cfg_support_info.plane_support_info[i].dram_change_vactive_det_fill_delay_us > max_vactive_fill_us) - max_vactive_fill_us = display_cfg->mode_support_result.cfg_support_info.plane_support_info[i].dram_change_vactive_det_fill_delay_us; + if (display_cfg->mode_support_result.cfg_support_info.plane_support_info[i].vactive_det_fill_delay_us[dml2_pstate_type_uclk] > max_vactive_fill_us) + max_vactive_fill_us = display_cfg->mode_support_result.cfg_support_info.plane_support_info[i].vactive_det_fill_delay_us[dml2_pstate_type_uclk]; } } return max_vactive_fill_us; } -static void build_fams2_meta_per_stream(struct dml2_pmo_instance *pmo, +static void build_pstate_meta_per_stream(struct dml2_pmo_instance *pmo, struct display_configuation_with_meta *display_config, int stream_index) { @@ -1669,7 +1692,7 @@ static void build_fams2_meta_per_stream(struct dml2_pmo_instance *pmo, const struct dml2_stream_parameters *stream_descriptor = &display_config->display_config.stream_descriptors[stream_index]; const struct core_stream_support_info *stream_info = &display_config->mode_support_result.cfg_support_info.stream_support_info[stream_index]; const struct dml2_timing_cfg *timing = &stream_descriptor->timing; - struct dml2_fams2_meta *stream_fams2_meta = &pmo->scratch.pmo_dcn4.stream_fams2_meta[stream_index]; + struct dml2_pstate_meta *stream_pstate_meta = &pmo->scratch.pmo_dcn4.stream_pstate_meta[stream_index]; /* worst case all other streams require some programming at the same time, 0 if only 1 stream */ unsigned int contention_delay_us = (ip_caps->fams2.vertical_interrupt_ack_delay_us + @@ -1677,142 +1700,142 @@ static void build_fams2_meta_per_stream(struct dml2_pmo_instance *pmo, (display_config->display_config.num_streams - 1); /* common */ - stream_fams2_meta->valid = true; - stream_fams2_meta->otg_vline_time_us = (double)timing->h_total / timing->pixel_clock_khz * 1000.0; - stream_fams2_meta->nom_vtotal = stream_descriptor->timing.vblank_nom + stream_descriptor->timing.v_active; - stream_fams2_meta->nom_refresh_rate_hz = timing->pixel_clock_khz * 1000.0 / - (stream_fams2_meta->nom_vtotal * timing->h_total); - stream_fams2_meta->nom_frame_time_us = - (double)stream_fams2_meta->nom_vtotal * stream_fams2_meta->otg_vline_time_us; - stream_fams2_meta->vblank_start = timing->v_blank_end + timing->v_active; + stream_pstate_meta->valid = true; + stream_pstate_meta->otg_vline_time_us = (double)timing->h_total / timing->pixel_clock_khz * 1000.0; + stream_pstate_meta->nom_vtotal = stream_descriptor->timing.vblank_nom + stream_descriptor->timing.v_active; + stream_pstate_meta->nom_refresh_rate_hz = timing->pixel_clock_khz * 1000.0 / + (stream_pstate_meta->nom_vtotal * timing->h_total); + stream_pstate_meta->nom_frame_time_us = + (double)stream_pstate_meta->nom_vtotal * stream_pstate_meta->otg_vline_time_us; + stream_pstate_meta->vblank_start = timing->v_blank_end + timing->v_active; if (stream_descriptor->timing.drr_config.enabled == true) { if (stream_descriptor->timing.drr_config.min_refresh_uhz != 0.0) { - stream_fams2_meta->max_vtotal = (unsigned int)math_floor((double)stream_descriptor->timing.pixel_clock_khz / + stream_pstate_meta->max_vtotal = (unsigned int)math_floor((double)stream_descriptor->timing.pixel_clock_khz / ((double)stream_descriptor->timing.drr_config.min_refresh_uhz * stream_descriptor->timing.h_total) * 1e9); } else { /* assume min of 48Hz */ - stream_fams2_meta->max_vtotal = (unsigned int)math_floor((double)stream_descriptor->timing.pixel_clock_khz / + stream_pstate_meta->max_vtotal = (unsigned int)math_floor((double)stream_descriptor->timing.pixel_clock_khz / (48000000.0 * stream_descriptor->timing.h_total) * 1e9); } } else { - stream_fams2_meta->max_vtotal = stream_fams2_meta->nom_vtotal; + stream_pstate_meta->max_vtotal = stream_pstate_meta->nom_vtotal; } - stream_fams2_meta->min_refresh_rate_hz = timing->pixel_clock_khz * 1000.0 / - (stream_fams2_meta->max_vtotal * timing->h_total); - stream_fams2_meta->max_frame_time_us = - (double)stream_fams2_meta->max_vtotal * stream_fams2_meta->otg_vline_time_us; + stream_pstate_meta->min_refresh_rate_hz = timing->pixel_clock_khz * 1000.0 / + (stream_pstate_meta->max_vtotal * timing->h_total); + stream_pstate_meta->max_frame_time_us = + (double)stream_pstate_meta->max_vtotal * stream_pstate_meta->otg_vline_time_us; - stream_fams2_meta->scheduling_delay_otg_vlines = - (unsigned int)math_ceil(ip_caps->fams2.scheduling_delay_us / stream_fams2_meta->otg_vline_time_us); - stream_fams2_meta->vertical_interrupt_ack_delay_otg_vlines = - (unsigned int)math_ceil(ip_caps->fams2.vertical_interrupt_ack_delay_us / stream_fams2_meta->otg_vline_time_us); - stream_fams2_meta->contention_delay_otg_vlines = - (unsigned int)math_ceil(contention_delay_us / stream_fams2_meta->otg_vline_time_us); + stream_pstate_meta->scheduling_delay_otg_vlines = + (unsigned int)math_ceil(ip_caps->fams2.scheduling_delay_us / stream_pstate_meta->otg_vline_time_us); + stream_pstate_meta->vertical_interrupt_ack_delay_otg_vlines = + (unsigned int)math_ceil(ip_caps->fams2.vertical_interrupt_ack_delay_us / stream_pstate_meta->otg_vline_time_us); + stream_pstate_meta->contention_delay_otg_vlines = + (unsigned int)math_ceil(contention_delay_us / stream_pstate_meta->otg_vline_time_us); /* worst case allow to target needs to account for all streams' allow events overlapping, and 1 line for error */ - stream_fams2_meta->allow_to_target_delay_otg_vlines = - (unsigned int)(math_ceil((ip_caps->fams2.vertical_interrupt_ack_delay_us + contention_delay_us + ip_caps->fams2.allow_programming_delay_us) / stream_fams2_meta->otg_vline_time_us)) + 1; - stream_fams2_meta->min_allow_width_otg_vlines = - (unsigned int)math_ceil(ip_caps->fams2.min_allow_width_us / stream_fams2_meta->otg_vline_time_us); + stream_pstate_meta->allow_to_target_delay_otg_vlines = + (unsigned int)(math_ceil((ip_caps->fams2.vertical_interrupt_ack_delay_us + contention_delay_us + ip_caps->fams2.allow_programming_delay_us) / stream_pstate_meta->otg_vline_time_us)) + 1; + stream_pstate_meta->min_allow_width_otg_vlines = + (unsigned int)math_ceil(ip_caps->fams2.min_allow_width_us / stream_pstate_meta->otg_vline_time_us); /* this value should account for urgent latency */ - stream_fams2_meta->dram_clk_change_blackout_otg_vlines = + stream_pstate_meta->blackout_otg_vlines = (unsigned int)math_ceil(pmo->soc_bb->power_management_parameters.dram_clk_change_blackout_us / - stream_fams2_meta->otg_vline_time_us); + stream_pstate_meta->otg_vline_time_us); /* scheduling params should be built based on the worst case for allow_time:disallow_time */ /* vactive */ if (display_config->display_config.num_streams == 1) { /* for single stream, guarantee at least an instant of allow */ - stream_fams2_meta->method_vactive.max_vactive_det_fill_delay_otg_vlines = (unsigned int)math_floor( + stream_pstate_meta->method_vactive.max_vactive_det_fill_delay_otg_vlines = (unsigned int)math_floor( math_max2(0.0, - timing->v_active - math_max2(1.0, stream_fams2_meta->min_allow_width_otg_vlines) - stream_fams2_meta->dram_clk_change_blackout_otg_vlines)); + timing->v_active - math_max2(1.0, stream_pstate_meta->min_allow_width_otg_vlines) - stream_pstate_meta->blackout_otg_vlines)); } else { /* for multi stream, bound to a max fill time defined by IP caps */ - stream_fams2_meta->method_vactive.max_vactive_det_fill_delay_otg_vlines = - (unsigned int)math_floor((double)ip_caps->max_vactive_det_fill_delay_us / stream_fams2_meta->otg_vline_time_us); + stream_pstate_meta->method_vactive.max_vactive_det_fill_delay_otg_vlines = + (unsigned int)math_floor((double)ip_caps->max_vactive_det_fill_delay_us / stream_pstate_meta->otg_vline_time_us); } - stream_fams2_meta->method_vactive.max_vactive_det_fill_delay_us = stream_fams2_meta->method_vactive.max_vactive_det_fill_delay_otg_vlines * stream_fams2_meta->otg_vline_time_us; + stream_pstate_meta->method_vactive.max_vactive_det_fill_delay_us = stream_pstate_meta->method_vactive.max_vactive_det_fill_delay_otg_vlines * stream_pstate_meta->otg_vline_time_us; - if (stream_fams2_meta->method_vactive.max_vactive_det_fill_delay_us > 0.0) { - stream_fams2_meta->method_vactive.common.allow_start_otg_vline = - timing->v_blank_end + stream_fams2_meta->method_vactive.max_vactive_det_fill_delay_otg_vlines; - stream_fams2_meta->method_vactive.common.allow_end_otg_vline = - stream_fams2_meta->vblank_start - - stream_fams2_meta->dram_clk_change_blackout_otg_vlines; + if (stream_pstate_meta->method_vactive.max_vactive_det_fill_delay_us > 0.0) { + stream_pstate_meta->method_vactive.common.allow_start_otg_vline = + timing->v_blank_end + stream_pstate_meta->method_vactive.max_vactive_det_fill_delay_otg_vlines; + stream_pstate_meta->method_vactive.common.allow_end_otg_vline = + stream_pstate_meta->vblank_start - + stream_pstate_meta->blackout_otg_vlines; } else { - stream_fams2_meta->method_vactive.common.allow_start_otg_vline = 0; - stream_fams2_meta->method_vactive.common.allow_end_otg_vline = 0; + stream_pstate_meta->method_vactive.common.allow_start_otg_vline = 0; + stream_pstate_meta->method_vactive.common.allow_end_otg_vline = 0; } - stream_fams2_meta->method_vactive.common.period_us = stream_fams2_meta->nom_frame_time_us; - build_method_scheduling_params(&stream_fams2_meta->method_vactive.common, stream_fams2_meta); + stream_pstate_meta->method_vactive.common.period_us = stream_pstate_meta->nom_frame_time_us; + build_method_scheduling_params(&stream_pstate_meta->method_vactive.common, stream_pstate_meta); /* vblank */ - stream_fams2_meta->method_vblank.common.allow_start_otg_vline = stream_fams2_meta->vblank_start; - stream_fams2_meta->method_vblank.common.allow_end_otg_vline = - stream_fams2_meta->method_vblank.common.allow_start_otg_vline + 1; - stream_fams2_meta->method_vblank.common.period_us = stream_fams2_meta->nom_frame_time_us; - build_method_scheduling_params(&stream_fams2_meta->method_vblank.common, stream_fams2_meta); + stream_pstate_meta->method_vblank.common.allow_start_otg_vline = stream_pstate_meta->vblank_start; + stream_pstate_meta->method_vblank.common.allow_end_otg_vline = + stream_pstate_meta->method_vblank.common.allow_start_otg_vline + 1; + stream_pstate_meta->method_vblank.common.period_us = stream_pstate_meta->nom_frame_time_us; + build_method_scheduling_params(&stream_pstate_meta->method_vblank.common, stream_pstate_meta); /* subvp */ - stream_fams2_meta->method_subvp.programming_delay_otg_vlines = - (unsigned int)math_ceil(ip_caps->fams2.subvp_programming_delay_us / stream_fams2_meta->otg_vline_time_us); - stream_fams2_meta->method_subvp.df_throttle_delay_otg_vlines = - (unsigned int)math_ceil(ip_caps->fams2.subvp_df_throttle_delay_us / stream_fams2_meta->otg_vline_time_us); - stream_fams2_meta->method_subvp.prefetch_to_mall_delay_otg_vlines = - (unsigned int)math_ceil(ip_caps->fams2.subvp_prefetch_to_mall_delay_us / stream_fams2_meta->otg_vline_time_us); - stream_fams2_meta->method_subvp.phantom_vactive = - stream_fams2_meta->allow_to_target_delay_otg_vlines + - stream_fams2_meta->min_allow_width_otg_vlines + + stream_pstate_meta->method_subvp.programming_delay_otg_vlines = + (unsigned int)math_ceil(ip_caps->fams2.subvp_programming_delay_us / stream_pstate_meta->otg_vline_time_us); + stream_pstate_meta->method_subvp.df_throttle_delay_otg_vlines = + (unsigned int)math_ceil(ip_caps->fams2.subvp_df_throttle_delay_us / stream_pstate_meta->otg_vline_time_us); + stream_pstate_meta->method_subvp.prefetch_to_mall_delay_otg_vlines = + (unsigned int)math_ceil(ip_caps->fams2.subvp_prefetch_to_mall_delay_us / stream_pstate_meta->otg_vline_time_us); + stream_pstate_meta->method_subvp.phantom_vactive = + stream_pstate_meta->allow_to_target_delay_otg_vlines + + stream_pstate_meta->min_allow_width_otg_vlines + stream_info->phantom_min_v_active; - stream_fams2_meta->method_subvp.phantom_vfp = - stream_fams2_meta->method_subvp.df_throttle_delay_otg_vlines; + stream_pstate_meta->method_subvp.phantom_vfp = + stream_pstate_meta->method_subvp.df_throttle_delay_otg_vlines; /* phantom vtotal = v_bp(vstartup) + v_sync(1) + v_fp(throttle_delay) + v_active(allow_to_target + min_allow + min_vactive)*/ - stream_fams2_meta->method_subvp.phantom_vtotal = + stream_pstate_meta->method_subvp.phantom_vtotal = stream_info->phantom_v_startup + - stream_fams2_meta->method_subvp.phantom_vfp + + stream_pstate_meta->method_subvp.phantom_vfp + 1 + - stream_fams2_meta->method_subvp.df_throttle_delay_otg_vlines + - stream_fams2_meta->method_subvp.phantom_vactive; - stream_fams2_meta->method_subvp.common.allow_start_otg_vline = + stream_pstate_meta->method_subvp.df_throttle_delay_otg_vlines + + stream_pstate_meta->method_subvp.phantom_vactive; + stream_pstate_meta->method_subvp.common.allow_start_otg_vline = stream_descriptor->timing.v_blank_end + - stream_fams2_meta->contention_delay_otg_vlines + - stream_fams2_meta->method_subvp.programming_delay_otg_vlines + - stream_fams2_meta->method_subvp.phantom_vtotal + - stream_fams2_meta->method_subvp.prefetch_to_mall_delay_otg_vlines + - stream_fams2_meta->allow_to_target_delay_otg_vlines; - stream_fams2_meta->method_subvp.common.allow_end_otg_vline = - stream_fams2_meta->vblank_start - - stream_fams2_meta->dram_clk_change_blackout_otg_vlines; - stream_fams2_meta->method_subvp.common.period_us = stream_fams2_meta->nom_frame_time_us; - build_method_scheduling_params(&stream_fams2_meta->method_subvp.common, stream_fams2_meta); + stream_pstate_meta->contention_delay_otg_vlines + + stream_pstate_meta->method_subvp.programming_delay_otg_vlines + + stream_pstate_meta->method_subvp.phantom_vtotal + + stream_pstate_meta->method_subvp.prefetch_to_mall_delay_otg_vlines + + stream_pstate_meta->allow_to_target_delay_otg_vlines; + stream_pstate_meta->method_subvp.common.allow_end_otg_vline = + stream_pstate_meta->vblank_start - + stream_pstate_meta->blackout_otg_vlines; + stream_pstate_meta->method_subvp.common.period_us = stream_pstate_meta->nom_frame_time_us; + build_method_scheduling_params(&stream_pstate_meta->method_subvp.common, stream_pstate_meta); /* drr */ - stream_fams2_meta->method_drr.programming_delay_otg_vlines = - (unsigned int)math_ceil(ip_caps->fams2.drr_programming_delay_us / stream_fams2_meta->otg_vline_time_us); - stream_fams2_meta->method_drr.common.allow_start_otg_vline = - stream_fams2_meta->vblank_start + - stream_fams2_meta->allow_to_target_delay_otg_vlines; - stream_fams2_meta->method_drr.common.period_us = stream_fams2_meta->nom_frame_time_us; + stream_pstate_meta->method_drr.programming_delay_otg_vlines = + (unsigned int)math_ceil(ip_caps->fams2.drr_programming_delay_us / stream_pstate_meta->otg_vline_time_us); + stream_pstate_meta->method_drr.common.allow_start_otg_vline = + stream_pstate_meta->vblank_start + + stream_pstate_meta->allow_to_target_delay_otg_vlines; + stream_pstate_meta->method_drr.common.period_us = stream_pstate_meta->nom_frame_time_us; if (display_config->display_config.num_streams <= 1) { /* only need to stretch vblank for blackout time */ - stream_fams2_meta->method_drr.stretched_vtotal = - stream_fams2_meta->nom_vtotal + - stream_fams2_meta->allow_to_target_delay_otg_vlines + - stream_fams2_meta->min_allow_width_otg_vlines + - stream_fams2_meta->dram_clk_change_blackout_otg_vlines; + stream_pstate_meta->method_drr.stretched_vtotal = + stream_pstate_meta->nom_vtotal + + stream_pstate_meta->allow_to_target_delay_otg_vlines + + stream_pstate_meta->min_allow_width_otg_vlines + + stream_pstate_meta->blackout_otg_vlines; } else { /* multi display needs to always be schedulable */ - stream_fams2_meta->method_drr.stretched_vtotal = - stream_fams2_meta->nom_vtotal * 2 + - stream_fams2_meta->allow_to_target_delay_otg_vlines + - stream_fams2_meta->min_allow_width_otg_vlines + - stream_fams2_meta->dram_clk_change_blackout_otg_vlines; + stream_pstate_meta->method_drr.stretched_vtotal = + stream_pstate_meta->nom_vtotal * 2 + + stream_pstate_meta->allow_to_target_delay_otg_vlines + + stream_pstate_meta->min_allow_width_otg_vlines + + stream_pstate_meta->blackout_otg_vlines; } - stream_fams2_meta->method_drr.common.allow_end_otg_vline = - stream_fams2_meta->method_drr.stretched_vtotal - - stream_fams2_meta->dram_clk_change_blackout_otg_vlines; - build_method_scheduling_params(&stream_fams2_meta->method_drr.common, stream_fams2_meta); + stream_pstate_meta->method_drr.common.allow_end_otg_vline = + stream_pstate_meta->method_drr.stretched_vtotal - + stream_pstate_meta->blackout_otg_vlines; + build_method_scheduling_params(&stream_pstate_meta->method_drr.common, stream_pstate_meta); } static void build_subvp_meta_per_stream(struct dml2_pmo_instance *pmo, @@ -1820,14 +1843,14 @@ static void build_subvp_meta_per_stream(struct dml2_pmo_instance *pmo, int stream_index) { struct dml2_implicit_svp_meta *stream_svp_meta = &pmo->scratch.pmo_dcn4.stream_svp_meta[stream_index]; - struct dml2_fams2_meta *stream_fams2_meta = &pmo->scratch.pmo_dcn4.stream_fams2_meta[stream_index]; + struct dml2_pstate_meta *stream_pstate_meta = &pmo->scratch.pmo_dcn4.stream_pstate_meta[stream_index]; stream_svp_meta->valid = true; /* PMO FAMS2 precaulcates these values */ - stream_svp_meta->v_active = stream_fams2_meta->method_subvp.phantom_vactive; - stream_svp_meta->v_front_porch = stream_fams2_meta->method_subvp.phantom_vfp; - stream_svp_meta->v_total = stream_fams2_meta->method_subvp.phantom_vtotal; + stream_svp_meta->v_active = stream_pstate_meta->method_subvp.phantom_vactive; + stream_svp_meta->v_front_porch = stream_pstate_meta->method_subvp.phantom_vfp; + stream_svp_meta->v_total = stream_pstate_meta->method_subvp.phantom_vtotal; } bool pmo_dcn4_fams2_init_for_pstate_support(struct dml2_pmo_init_for_pstate_support_in_out *in_out) @@ -1879,7 +1902,7 @@ bool pmo_dcn4_fams2_init_for_pstate_support(struct dml2_pmo_init_for_pstate_supp set_bit_in_bitfield(&s->pmo_dcn4.stream_vactive_capability_mask, stream_index); /* FAMS2 meta */ - build_fams2_meta_per_stream(pmo, display_config, stream_index); + build_pstate_meta_per_stream(pmo, display_config, stream_index); /* SVP meta */ build_subvp_meta_per_stream(pmo, display_config, stream_index); @@ -1939,9 +1962,6 @@ static void reset_display_configuration(struct display_configuation_with_meta *d for (stream_index = 0; stream_index < display_config->display_config.num_streams; stream_index++) { display_config->stage3.stream_svp_meta[stream_index].valid = false; - - display_config->display_config.stream_descriptors[stream_index].overrides.minimize_active_latency_hiding = false; - display_config->display_config.overrides.best_effort_min_active_latency_hiding_us = 0; } for (plane_index = 0; plane_index < display_config->display_config.num_planes; plane_index++) { @@ -1974,7 +1994,6 @@ static void setup_planes_for_drr_by_mask(struct display_configuation_with_meta * plane->overrides.uclk_pstate_change_strategy = dml2_uclk_pstate_change_strategy_force_drr; display_config->stage3.pstate_switch_modes[plane_index] = dml2_pstate_method_fw_drr; - } } } @@ -2040,7 +2059,6 @@ static void setup_planes_for_vblank_by_mask(struct display_configuation_with_met plane->overrides.reserved_vblank_time_ns); display_config->stage3.pstate_switch_modes[plane_index] = dml2_pstate_method_vblank; - } } } @@ -2055,6 +2073,7 @@ static void setup_planes_for_vblank_drr_by_mask(struct display_configuation_with for (plane_index = 0; plane_index < display_config->display_config.num_planes; plane_index++) { if (is_bit_set_in_bitfield(plane_mask, plane_index)) { plane = &display_config->display_config.plane_descriptors[plane_index]; + plane->overrides.reserved_vblank_time_ns = (long)(pmo->soc_bb->power_management_parameters.dram_clk_change_blackout_us * 1000); display_config->stage3.pstate_switch_modes[plane_index] = dml2_pstate_method_fw_vblank_drr; @@ -2076,8 +2095,8 @@ static void setup_planes_for_vactive_by_mask(struct display_configuation_with_me display_config->stage3.pstate_switch_modes[plane_index] = dml2_pstate_method_vactive; if (!pmo->options->disable_vactive_det_fill_bw_pad) { - display_config->display_config.plane_descriptors[plane_index].overrides.max_vactive_det_fill_delay_us = - (unsigned int)math_floor(pmo->scratch.pmo_dcn4.stream_fams2_meta[stream_index].method_vactive.max_vactive_det_fill_delay_us); + display_config->display_config.plane_descriptors[plane_index].overrides.max_vactive_det_fill_delay_us[dml2_pstate_type_uclk] = + (unsigned int)math_floor(pmo->scratch.pmo_dcn4.stream_pstate_meta[stream_index].method_vactive.max_vactive_det_fill_delay_us); } } } @@ -2097,8 +2116,8 @@ static void setup_planes_for_vactive_drr_by_mask(struct display_configuation_wit display_config->stage3.pstate_switch_modes[plane_index] = dml2_pstate_method_fw_vactive_drr; if (!pmo->options->disable_vactive_det_fill_bw_pad) { - display_config->display_config.plane_descriptors[plane_index].overrides.max_vactive_det_fill_delay_us = - (unsigned int)math_floor(pmo->scratch.pmo_dcn4.stream_fams2_meta[stream_index].method_vactive.max_vactive_det_fill_delay_us); + display_config->display_config.plane_descriptors[plane_index].overrides.max_vactive_det_fill_delay_us[dml2_pstate_type_uclk] = + (unsigned int)math_floor(pmo->scratch.pmo_dcn4.stream_pstate_meta[stream_index].method_vactive.max_vactive_det_fill_delay_us); } } } @@ -2144,9 +2163,9 @@ static bool setup_display_config(struct display_configuation_with_meta *display_ /* copy FAMS2 meta */ if (success) { display_config->stage3.fams2_required = fams2_required; - memcpy(&display_config->stage3.stream_fams2_meta, - &scratch->pmo_dcn4.stream_fams2_meta, - sizeof(struct dml2_fams2_meta) * DML2_MAX_PLANES); + memcpy(&display_config->stage3.stream_pstate_meta, + &scratch->pmo_dcn4.stream_pstate_meta, + sizeof(struct dml2_pstate_meta) * DML2_MAX_PLANES); } return success; @@ -2188,12 +2207,12 @@ bool pmo_dcn4_fams2_test_for_pstate_support(struct dml2_pmo_test_for_pstate_supp return false; for (stream_index = 0; stream_index < in_out->base_display_config->display_config.num_streams; stream_index++) { - struct dml2_fams2_meta *stream_fams2_meta = &s->pmo_dcn4.stream_fams2_meta[stream_index]; + struct dml2_pstate_meta *stream_pstate_meta = &s->pmo_dcn4.stream_pstate_meta[stream_index]; if (s->pmo_dcn4.pstate_strategy_candidates[s->pmo_dcn4.cur_pstate_candidate].per_stream_pstate_method[stream_index] == dml2_pstate_method_vactive || s->pmo_dcn4.pstate_strategy_candidates[s->pmo_dcn4.cur_pstate_candidate].per_stream_pstate_method[stream_index] == dml2_pstate_method_fw_vactive_drr) { if (get_vactive_pstate_margin(in_out->base_display_config, s->pmo_dcn4.stream_plane_mask[stream_index]) < (MIN_VACTIVE_MARGIN_PCT * in_out->instance->soc_bb->power_management_parameters.dram_clk_change_blackout_us) || - get_vactive_det_fill_latency_delay_us(in_out->base_display_config, s->pmo_dcn4.stream_plane_mask[stream_index]) > stream_fams2_meta->method_vactive.max_vactive_det_fill_delay_us) { + get_vactive_det_fill_latency_delay_us(in_out->base_display_config, s->pmo_dcn4.stream_plane_mask[stream_index]) > stream_pstate_meta->method_vactive.max_vactive_det_fill_delay_us) { p_state_supported = false; break; } diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn4_fams2.h b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_pmo/dml2_pmo_dcn4_fams2.h similarity index 100% rename from drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn4_fams2.h rename to drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_pmo/dml2_pmo_dcn4_fams2.h diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_factory.c b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_pmo/dml2_pmo_factory.c similarity index 100% rename from drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_factory.c rename to drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_pmo/dml2_pmo_factory.c diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_factory.h b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_pmo/dml2_pmo_factory.h similarity index 97% rename from drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_factory.h rename to drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_pmo/dml2_pmo_factory.h index 7218de1824cc..b90f6263cd85 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_factory.h +++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_pmo/dml2_pmo_factory.h @@ -10,4 +10,4 @@ bool dml2_pmo_create(enum dml2_project_id project_id, struct dml2_pmo_instance *out); -#endif +#endif \ No newline at end of file diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_standalone_libraries/lib_float_math.c b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_standalone_libraries/lib_float_math.c similarity index 100% rename from drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_standalone_libraries/lib_float_math.c rename to drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_standalone_libraries/lib_float_math.c diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_standalone_libraries/lib_float_math.h b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_standalone_libraries/lib_float_math.h similarity index 100% rename from drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_standalone_libraries/lib_float_math.h rename to drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_standalone_libraries/lib_float_math.h diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml2_top_interfaces.c b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_top/dml2_top_interfaces.c similarity index 100% rename from drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml2_top_interfaces.c rename to drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_top/dml2_top_interfaces.c diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml2_top_legacy.c b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_top/dml2_top_legacy.c similarity index 100% rename from drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml2_top_legacy.c rename to drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_top/dml2_top_legacy.c diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml2_top_legacy.h b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_top/dml2_top_legacy.h similarity index 100% rename from drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml2_top_legacy.h rename to drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_top/dml2_top_legacy.h diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml2_top_soc15.c b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_top/dml2_top_soc15.c similarity index 100% rename from drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml2_top_soc15.c rename to drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_top/dml2_top_soc15.c diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml2_top_soc15.h b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_top/dml2_top_soc15.h similarity index 100% rename from drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml2_top_soc15.h rename to drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_top/dml2_top_soc15.h diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/inc/dml2_debug.h b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/inc/dml2_debug.h similarity index 100% rename from drivers/gpu/drm/amd/display/dc/dml2/dml21/src/inc/dml2_debug.h rename to drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/inc/dml2_debug.h diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/inc/dml2_internal_shared_types.h b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/inc/dml2_internal_shared_types.h similarity index 93% rename from drivers/gpu/drm/amd/display/dc/dml2/dml21/src/inc/dml2_internal_shared_types.h rename to drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/inc/dml2_internal_shared_types.h index d52aa82283b3..1a6c0727cd2a 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/inc/dml2_internal_shared_types.h +++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/inc/dml2_internal_shared_types.h @@ -152,7 +152,7 @@ struct core_plane_support_info { int active_latency_hiding_us; int mall_svp_size_requirement_ways; int nominal_vblank_pstate_latency_hiding_us; - unsigned int dram_change_vactive_det_fill_delay_us; + int vactive_det_fill_delay_us[dml2_pstate_type_count]; }; struct core_stream_support_info { @@ -209,6 +209,7 @@ struct dml2_core_mode_support_result { unsigned int uclk_pstate_supported; unsigned int fclk_pstate_supported; + struct dml2_core_internal_watermarks watermarks; } global; struct { @@ -255,56 +256,70 @@ struct dml2_implicit_svp_meta { unsigned long v_front_porch; }; -struct dml2_fams2_per_method_common_meta { +struct dml2_pstate_per_method_common_meta { /* generic params */ - unsigned int allow_start_otg_vline; - unsigned int allow_end_otg_vline; + int allow_start_otg_vline; + int allow_end_otg_vline; /* scheduling params */ double allow_time_us; double disallow_time_us; double period_us; }; -struct dml2_fams2_meta { +struct dml2_pstate_meta { bool valid; double otg_vline_time_us; - unsigned int scheduling_delay_otg_vlines; - unsigned int vertical_interrupt_ack_delay_otg_vlines; - unsigned int allow_to_target_delay_otg_vlines; - unsigned int contention_delay_otg_vlines; - unsigned int min_allow_width_otg_vlines; - unsigned int nom_vtotal; - unsigned int vblank_start; + int scheduling_delay_otg_vlines; + int vertical_interrupt_ack_delay_otg_vlines; + int allow_to_target_delay_otg_vlines; + int contention_delay_otg_vlines; + int min_allow_width_otg_vlines; + int nom_vtotal; + int vblank_start; double nom_refresh_rate_hz; double nom_frame_time_us; - unsigned int max_vtotal; + int max_vtotal; double min_refresh_rate_hz; double max_frame_time_us; - unsigned int dram_clk_change_blackout_otg_vlines; + int blackout_otg_vlines; + int max_allow_delay_otg_vlines; + double nom_vblank_time_us; struct { double max_vactive_det_fill_delay_us; - unsigned int max_vactive_det_fill_delay_otg_vlines; - struct dml2_fams2_per_method_common_meta common; + double vactive_latency_hiding_us; + double reserved_vblank_required_us; + int max_vactive_det_fill_delay_otg_vlines; + int reserved_blank_required_vlines; + struct dml2_pstate_per_method_common_meta common; } method_vactive; struct { - struct dml2_fams2_per_method_common_meta common; + struct dml2_pstate_per_method_common_meta common; } method_vblank; struct { - unsigned int programming_delay_otg_vlines; - unsigned int df_throttle_delay_otg_vlines; - unsigned int prefetch_to_mall_delay_otg_vlines; + int programming_delay_otg_vlines; + int df_throttle_delay_otg_vlines; + int prefetch_to_mall_delay_otg_vlines; unsigned long phantom_vactive; unsigned long phantom_vfp; unsigned long phantom_vtotal; - struct dml2_fams2_per_method_common_meta common; + struct dml2_pstate_per_method_common_meta common; } method_subvp; struct { - unsigned int programming_delay_otg_vlines; - unsigned int stretched_vtotal; - struct dml2_fams2_per_method_common_meta common; + int programming_delay_otg_vlines; + int stretched_vtotal; + struct dml2_pstate_per_method_common_meta common; } method_drr; }; +/* mask of synchronized timings by stream index */ +struct dml2_pmo_synchronized_timing_groups { + unsigned int num_timing_groups; + unsigned int synchronized_timing_group_masks[DML2_MAX_PLANES]; + bool group_is_drr_enabled[DML2_MAX_PLANES]; + bool group_is_drr_active[DML2_MAX_PLANES]; + double group_line_time_us[DML2_MAX_PLANES]; +}; + struct dml2_optimization_stage3_state { bool performed; bool success; @@ -319,7 +334,7 @@ struct dml2_optimization_stage3_state { // Meta-data for FAMS2 bool fams2_required; - struct dml2_fams2_meta stream_fams2_meta[DML2_MAX_PLANES]; + struct dml2_pstate_meta stream_pstate_meta[DML2_MAX_PLANES]; int min_clk_index_for_latency; }; @@ -472,6 +487,7 @@ struct dml2_core_scratch { }; struct dml2_core_instance { + enum dml2_project_id project_id; struct dml2_mcg_min_clock_table *minimum_clock_table; struct dml2_core_internal_state_inputs inputs; struct dml2_core_internal_state_intermediates intermediates; @@ -619,6 +635,12 @@ struct dml2_pmo_optimize_for_stutter_in_out { #define PMO_DCN4_MAX_NUM_VARIANTS 2 #define PMO_DCN4_MAX_BASE_STRATEGIES 10 +struct dml2_scheduling_check_locals { + struct dml2_pstate_per_method_common_meta group_common_pstate_meta[DML2_MAX_PLANES]; + unsigned int sorted_group_gtl_disallow_index[DML2_MAX_PLANES]; + unsigned int sorted_group_gtl_period_index[DML2_MAX_PLANES]; +}; + struct dml2_pmo_scratch { union { struct { @@ -648,7 +670,7 @@ struct dml2_pmo_scratch { // Stores all the implicit SVP meta information indexed by stream index of the display // configuration under inspection, built at optimization stage init struct dml2_implicit_svp_meta stream_svp_meta[DML2_MAX_PLANES]; - struct dml2_fams2_meta stream_fams2_meta[DML2_MAX_PLANES]; + struct dml2_pstate_meta stream_pstate_meta[DML2_MAX_PLANES]; unsigned int optimal_vblank_reserved_time_for_stutter_us[DML2_PMO_STUTTER_CANDIDATE_LIST_SIZE]; unsigned int num_stutter_candidates; @@ -663,7 +685,7 @@ struct dml2_pmo_scratch { double group_line_time_us[DML2_MAX_PLANES]; /* scheduling check locals */ - struct dml2_fams2_per_method_common_meta group_common_fams2_meta[DML2_MAX_PLANES]; + struct dml2_pstate_per_method_common_meta group_common_pstate_meta[DML2_MAX_PLANES]; unsigned int sorted_group_gtl_disallow_index[DML2_MAX_PLANES]; unsigned int sorted_group_gtl_period_index[DML2_MAX_PLANES]; double group_phase_offset[DML2_MAX_PLANES]; diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_dc_resource_mgmt.c b/drivers/gpu/drm/amd/display/dc/dml2_0/dml2_dc_resource_mgmt.c similarity index 100% rename from drivers/gpu/drm/amd/display/dc/dml2/dml2_dc_resource_mgmt.c rename to drivers/gpu/drm/amd/display/dc/dml2_0/dml2_dc_resource_mgmt.c diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_dc_resource_mgmt.h b/drivers/gpu/drm/amd/display/dc/dml2_0/dml2_dc_resource_mgmt.h similarity index 100% rename from drivers/gpu/drm/amd/display/dc/dml2/dml2_dc_resource_mgmt.h rename to drivers/gpu/drm/amd/display/dc/dml2_0/dml2_dc_resource_mgmt.h diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_dc_types.h b/drivers/gpu/drm/amd/display/dc/dml2_0/dml2_dc_types.h similarity index 100% rename from drivers/gpu/drm/amd/display/dc/dml2/dml2_dc_types.h rename to drivers/gpu/drm/amd/display/dc/dml2_0/dml2_dc_types.h diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_internal_types.h b/drivers/gpu/drm/amd/display/dc/dml2_0/dml2_internal_types.h similarity index 99% rename from drivers/gpu/drm/amd/display/dc/dml2/dml2_internal_types.h rename to drivers/gpu/drm/amd/display/dc/dml2_0/dml2_internal_types.h index 140ec01545db..55b3e3ca54f7 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_internal_types.h +++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml2_internal_types.h @@ -23,7 +23,7 @@ * Authors: AMD * */ - + #ifndef __DML2_INTERNAL_TYPES_H__ #define __DML2_INTERNAL_TYPES_H__ diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_mall_phantom.c b/drivers/gpu/drm/amd/display/dc/dml2_0/dml2_mall_phantom.c similarity index 99% rename from drivers/gpu/drm/amd/display/dc/dml2/dml2_mall_phantom.c rename to drivers/gpu/drm/amd/display/dc/dml2_0/dml2_mall_phantom.c index c59f825cfae9..66040c877d68 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_mall_phantom.c +++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml2_mall_phantom.c @@ -24,6 +24,7 @@ * */ + #include "dml2_dc_types.h" #include "dml2_internal_types.h" #include "dml2_utils.h" diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_mall_phantom.h b/drivers/gpu/drm/amd/display/dc/dml2_0/dml2_mall_phantom.h similarity index 100% rename from drivers/gpu/drm/amd/display/dc/dml2/dml2_mall_phantom.h rename to drivers/gpu/drm/amd/display/dc/dml2_0/dml2_mall_phantom.h diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_policy.c b/drivers/gpu/drm/amd/display/dc/dml2_0/dml2_policy.c similarity index 100% rename from drivers/gpu/drm/amd/display/dc/dml2/dml2_policy.c rename to drivers/gpu/drm/amd/display/dc/dml2_0/dml2_policy.c diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_policy.h b/drivers/gpu/drm/amd/display/dc/dml2_0/dml2_policy.h similarity index 100% rename from drivers/gpu/drm/amd/display/dc/dml2/dml2_policy.h rename to drivers/gpu/drm/amd/display/dc/dml2_0/dml2_policy.h diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c b/drivers/gpu/drm/amd/display/dc/dml2_0/dml2_translation_helper.c similarity index 99% rename from drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c rename to drivers/gpu/drm/amd/display/dc/dml2_0/dml2_translation_helper.c index 3b866e876bf4..d834cb595afa 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c +++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml2_translation_helper.c @@ -301,6 +301,7 @@ void dml2_init_socbb_params(struct dml2_context *dml2, const struct dc *in_dc, s out->pct_ideal_dram_bw_after_urgent_pixel_only = 65.0; break; + case dml_project_dcn401: out->pct_ideal_fabric_bw_after_urgent = 76; //67; out->max_avg_sdp_bw_use_normal_percent = 75; //80; @@ -424,6 +425,8 @@ void dml2_init_soc_states(struct dml2_context *dml2, const struct dc *in_dc, p->in_states->state_array[1].dcfclk_mhz = 1434.0; p->in_states->state_array[1].dram_speed_mts = 1000 * transactions_per_mem_clock; break; + + case dml_project_dcn401: p->in_states->num_states = 2; transactions_per_mem_clock = 16; diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.h b/drivers/gpu/drm/amd/display/dc/dml2_0/dml2_translation_helper.h similarity index 100% rename from drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.h rename to drivers/gpu/drm/amd/display/dc/dml2_0/dml2_translation_helper.h diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_utils.c b/drivers/gpu/drm/amd/display/dc/dml2_0/dml2_utils.c similarity index 100% rename from drivers/gpu/drm/amd/display/dc/dml2/dml2_utils.c rename to drivers/gpu/drm/amd/display/dc/dml2_0/dml2_utils.c diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_utils.h b/drivers/gpu/drm/amd/display/dc/dml2_0/dml2_utils.h similarity index 100% rename from drivers/gpu/drm/amd/display/dc/dml2/dml2_utils.h rename to drivers/gpu/drm/amd/display/dc/dml2_0/dml2_utils.h diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c b/drivers/gpu/drm/amd/display/dc/dml2_0/dml2_wrapper.c similarity index 100% rename from drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c rename to drivers/gpu/drm/amd/display/dc/dml2_0/dml2_wrapper.c diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.h b/drivers/gpu/drm/amd/display/dc/dml2_0/dml2_wrapper.h similarity index 100% rename from drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.h rename to drivers/gpu/drm/amd/display/dc/dml2_0/dml2_wrapper.h diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml_assert.h b/drivers/gpu/drm/amd/display/dc/dml2_0/dml_assert.h similarity index 100% rename from drivers/gpu/drm/amd/display/dc/dml2/dml_assert.h rename to drivers/gpu/drm/amd/display/dc/dml2_0/dml_assert.h diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml_depedencies.h b/drivers/gpu/drm/amd/display/dc/dml2_0/dml_depedencies.h similarity index 99% rename from drivers/gpu/drm/amd/display/dc/dml2/dml_depedencies.h rename to drivers/gpu/drm/amd/display/dc/dml2_0/dml_depedencies.h index f7d30b47beff..d459f93cf40b 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml_depedencies.h +++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml_depedencies.h @@ -31,3 +31,4 @@ */ #include "os_types.h" #include "cmntypes.h" + diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml_display_rq_dlg_calc.c b/drivers/gpu/drm/amd/display/dc/dml2_0/dml_display_rq_dlg_calc.c similarity index 100% rename from drivers/gpu/drm/amd/display/dc/dml2/dml_display_rq_dlg_calc.c rename to drivers/gpu/drm/amd/display/dc/dml2_0/dml_display_rq_dlg_calc.c diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml_display_rq_dlg_calc.h b/drivers/gpu/drm/amd/display/dc/dml2_0/dml_display_rq_dlg_calc.h similarity index 100% rename from drivers/gpu/drm/amd/display/dc/dml2/dml_display_rq_dlg_calc.h rename to drivers/gpu/drm/amd/display/dc/dml2_0/dml_display_rq_dlg_calc.h diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml_logging.h b/drivers/gpu/drm/amd/display/dc/dml2_0/dml_logging.h similarity index 99% rename from drivers/gpu/drm/amd/display/dc/dml2/dml_logging.h rename to drivers/gpu/drm/amd/display/dc/dml2_0/dml_logging.h index 2a2f84e07ca8..7fadbe6d7af4 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml_logging.h +++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml_logging.h @@ -23,6 +23,7 @@ * Authors: AMD * */ + #ifndef __DML_LOGGING_H__ #define __DML_LOGGING_H__ diff --git a/drivers/gpu/drm/amd/display/dc/dpp/dcn10/dcn10_dpp.c b/drivers/gpu/drm/amd/display/dc/dpp/dcn10/dcn10_dpp.c index 01480a04f85e..ce91e5d28956 100644 --- a/drivers/gpu/drm/amd/display/dc/dpp/dcn10/dcn10_dpp.c +++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn10/dcn10_dpp.c @@ -199,6 +199,8 @@ void dpp_reset(struct dpp *dpp_base) memset(&dpp->scl_data, 0, sizeof(dpp->scl_data)); memset(&dpp->pwl_data, 0, sizeof(dpp->pwl_data)); + + dpp_base->cursor_offload = false; } @@ -484,10 +486,12 @@ void dpp1_set_cursor_position( cur_en = 0; /* not visible beyond top edge*/ if (dpp_base->pos.cur0_ctl.bits.cur0_enable != cur_en) { - REG_UPDATE(CURSOR0_CONTROL, CUR0_ENABLE, cur_en); - - dpp_base->pos.cur0_ctl.bits.cur0_enable = cur_en; + if (!dpp_base->cursor_offload) + REG_UPDATE(CURSOR0_CONTROL, CUR0_ENABLE, cur_en); } + + dpp_base->pos.cur0_ctl.bits.cur0_enable = cur_en; + dpp_base->att.cur0_ctl.bits.cur0_enable = cur_en; } void dpp1_cnv_set_optional_cursor_attributes( @@ -497,8 +501,13 @@ void dpp1_cnv_set_optional_cursor_attributes( struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base); if (attr) { - REG_UPDATE(CURSOR0_FP_SCALE_BIAS, CUR0_FP_BIAS, attr->bias); - REG_UPDATE(CURSOR0_FP_SCALE_BIAS, CUR0_FP_SCALE, attr->scale); + if (!dpp_base->cursor_offload) { + REG_UPDATE(CURSOR0_FP_SCALE_BIAS, CUR0_FP_BIAS, attr->bias); + REG_UPDATE(CURSOR0_FP_SCALE_BIAS, CUR0_FP_SCALE, attr->scale); + } + + dpp_base->att.fp_scale_bias.bits.fp_bias = attr->bias; + dpp_base->att.fp_scale_bias.bits.fp_scale = attr->scale; } } diff --git a/drivers/gpu/drm/amd/display/dc/dpp/dcn10/dcn10_dpp.h b/drivers/gpu/drm/amd/display/dc/dpp/dcn10/dcn10_dpp.h index f466182963f7..b12f34345a58 100644 --- a/drivers/gpu/drm/amd/display/dc/dpp/dcn10/dcn10_dpp.h +++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn10/dcn10_dpp.h @@ -1348,7 +1348,8 @@ struct dcn_dpp_mask { uint32_t CURSOR0_COLOR1; \ uint32_t DPP_CONTROL; \ uint32_t CM_HDR_MULT_COEF; \ - uint32_t CURSOR0_FP_SCALE_BIAS; + uint32_t CURSOR0_FP_SCALE_BIAS; \ + uint32_t OBUF_CONTROL; struct dcn_dpp_registers { DPP_COMMON_REG_VARIABLE_LIST @@ -1450,7 +1451,6 @@ void dpp1_set_degamma( void dpp1_set_degamma_pwl(struct dpp *dpp_base, const struct pwl_params *params); - void dpp_read_state(struct dpp *dpp_base, struct dcn_dpp_state *s); diff --git a/drivers/gpu/drm/amd/display/dc/dpp/dcn30/dcn30_dpp.c b/drivers/gpu/drm/amd/display/dc/dpp/dcn30/dcn30_dpp.c index 4f569cd8a5d6..ef4a16117181 100644 --- a/drivers/gpu/drm/amd/display/dc/dpp/dcn30/dcn30_dpp.c +++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn30/dcn30_dpp.c @@ -84,6 +84,22 @@ void dpp30_read_state(struct dpp *dpp_base, struct dcn_dpp_state *s) } } +void dpp30_read_reg_state(struct dpp *dpp_base, struct dcn_dpp_reg_state *dpp_reg_state) +{ + struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base); + + dpp_reg_state->recout_start = REG_READ(RECOUT_START); + dpp_reg_state->recout_size = REG_READ(RECOUT_SIZE); + dpp_reg_state->scl_horz_filter_scale_ratio = REG_READ(SCL_HORZ_FILTER_SCALE_RATIO); + dpp_reg_state->scl_vert_filter_scale_ratio = REG_READ(SCL_VERT_FILTER_SCALE_RATIO); + dpp_reg_state->scl_mode = REG_READ(SCL_MODE); + dpp_reg_state->cm_control = REG_READ(CM_CONTROL); + dpp_reg_state->dpp_control = REG_READ(DPP_CONTROL); + dpp_reg_state->dscl_control = REG_READ(DSCL_CONTROL); + dpp_reg_state->obuf_control = REG_READ(OBUF_CONTROL); + dpp_reg_state->mpc_size = REG_READ(MPC_SIZE); +} + /*program post scaler scs block in dpp CM*/ void dpp3_program_post_csc( struct dpp *dpp_base, @@ -396,17 +412,21 @@ void dpp3_set_cursor_attributes( } } - REG_UPDATE_3(CURSOR0_CONTROL, - CUR0_MODE, color_format, - CUR0_EXPANSION_MODE, 0, - CUR0_ROM_EN, cur_rom_en); + if (!dpp_base->cursor_offload) + REG_UPDATE_3(CURSOR0_CONTROL, + CUR0_MODE, color_format, + CUR0_EXPANSION_MODE, 0, + CUR0_ROM_EN, cur_rom_en); if (color_format == CURSOR_MODE_MONO) { /* todo: clarify what to program these to */ - REG_UPDATE(CURSOR0_COLOR0, - CUR0_COLOR0, 0x00000000); - REG_UPDATE(CURSOR0_COLOR1, - CUR0_COLOR1, 0xFFFFFFFF); + + if (!dpp_base->cursor_offload) { + REG_UPDATE(CURSOR0_COLOR0, + CUR0_COLOR0, 0x00000000); + REG_UPDATE(CURSOR0_COLOR1, + CUR0_COLOR1, 0xFFFFFFFF); + } } dpp_base->att.cur0_ctl.bits.expansion_mode = 0; diff --git a/drivers/gpu/drm/amd/display/dc/dpp/dcn30/dcn30_dpp.h b/drivers/gpu/drm/amd/display/dc/dpp/dcn30/dcn30_dpp.h index f236824126e9..d4a70b4379ea 100644 --- a/drivers/gpu/drm/amd/display/dc/dpp/dcn30/dcn30_dpp.h +++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn30/dcn30_dpp.h @@ -594,6 +594,8 @@ void dpp3_program_CM_dealpha( void dpp30_read_state(struct dpp *dpp_base, struct dcn_dpp_state *s); +void dpp30_read_reg_state(struct dpp *dpp_base, struct dcn_dpp_reg_state *dpp_reg_state); + bool dpp3_get_optimal_number_of_taps( struct dpp *dpp, struct scaler_data *scl_data, diff --git a/drivers/gpu/drm/amd/display/dc/dpp/dcn32/dcn32_dpp.c b/drivers/gpu/drm/amd/display/dc/dpp/dcn32/dcn32_dpp.c index fa67e54bf94e..8a5aa5e86850 100644 --- a/drivers/gpu/drm/amd/display/dc/dpp/dcn32/dcn32_dpp.c +++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn32/dcn32_dpp.c @@ -134,6 +134,7 @@ static struct dpp_funcs dcn32_dpp_funcs = { .dpp_dppclk_control = dpp1_dppclk_control, .dpp_set_hdr_multiplier = dpp3_set_hdr_multiplier, .dpp_get_gamut_remap = dpp3_cm_get_gamut_remap, + .dpp_read_reg_state = dpp30_read_reg_state, }; diff --git a/drivers/gpu/drm/amd/display/dc/dpp/dcn35/dcn35_dpp.c b/drivers/gpu/drm/amd/display/dc/dpp/dcn35/dcn35_dpp.c index f7a373a3d70a..977d83bf7741 100644 --- a/drivers/gpu/drm/amd/display/dc/dpp/dcn35/dcn35_dpp.c +++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn35/dcn35_dpp.c @@ -95,6 +95,7 @@ void dpp35_program_bias_and_scale_fcnv( static struct dpp_funcs dcn35_dpp_funcs = { .dpp_program_gamcor_lut = dpp3_program_gamcor_lut, .dpp_read_state = dpp30_read_state, + .dpp_read_reg_state = dpp30_read_reg_state, .dpp_reset = dpp_reset, .dpp_set_scaler = dpp1_dscl_set_scaler_manual_scale, .dpp_get_optimal_number_of_taps = dpp3_get_optimal_number_of_taps, diff --git a/drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp.c b/drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp.c index 36187f890d5d..96c2c853de42 100644 --- a/drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp.c +++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp.c @@ -248,6 +248,7 @@ static struct dpp_funcs dcn401_dpp_funcs = { .set_optional_cursor_attributes = dpp401_set_optional_cursor_attributes, .dpp_dppclk_control = dpp1_dppclk_control, .dpp_set_hdr_multiplier = dpp3_set_hdr_multiplier, + .dpp_read_reg_state = dpp30_read_reg_state, .set_cursor_matrix = dpp401_set_cursor_matrix, }; diff --git a/drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp_cm.c b/drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp_cm.c index 7aab77b58869..62bf7cea21d8 100644 --- a/drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp_cm.c +++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp_cm.c @@ -103,17 +103,21 @@ void dpp401_set_cursor_attributes( } } - REG_UPDATE_3(CURSOR0_CONTROL, - CUR0_MODE, color_format, - CUR0_EXPANSION_MODE, 0, - CUR0_ROM_EN, cur_rom_en); + if (!dpp_base->cursor_offload) + REG_UPDATE_3(CURSOR0_CONTROL, + CUR0_MODE, color_format, + CUR0_EXPANSION_MODE, 0, + CUR0_ROM_EN, cur_rom_en); if (color_format == CURSOR_MODE_MONO) { /* todo: clarify what to program these to */ - REG_UPDATE(CURSOR0_COLOR0, - CUR0_COLOR0, 0x00000000); - REG_UPDATE(CURSOR0_COLOR1, - CUR0_COLOR1, 0xFFFFFFFF); + + if (!dpp_base->cursor_offload) { + REG_UPDATE(CURSOR0_COLOR0, + CUR0_COLOR0, 0x00000000); + REG_UPDATE(CURSOR0_COLOR1, + CUR0_COLOR1, 0xFFFFFFFF); + } } dpp_base->att.cur0_ctl.bits.expansion_mode = 0; @@ -132,10 +136,12 @@ void dpp401_set_cursor_position( uint32_t cur_en = pos->enable ? 1 : 0; if (dpp_base->pos.cur0_ctl.bits.cur0_enable != cur_en) { - REG_UPDATE(CURSOR0_CONTROL, CUR0_ENABLE, cur_en); - - dpp_base->pos.cur0_ctl.bits.cur0_enable = cur_en; + if (!dpp_base->cursor_offload) + REG_UPDATE(CURSOR0_CONTROL, CUR0_ENABLE, cur_en); } + + dpp_base->pos.cur0_ctl.bits.cur0_enable = cur_en; + dpp_base->att.cur0_ctl.bits.cur0_enable = cur_en; } void dpp401_set_optional_cursor_attributes( @@ -145,10 +151,17 @@ void dpp401_set_optional_cursor_attributes( struct dcn401_dpp *dpp = TO_DCN401_DPP(dpp_base); if (attr) { - REG_UPDATE(CURSOR0_FP_SCALE_BIAS_G_Y, CUR0_FP_BIAS_G_Y, attr->bias); - REG_UPDATE(CURSOR0_FP_SCALE_BIAS_G_Y, CUR0_FP_SCALE_G_Y, attr->scale); - REG_UPDATE(CURSOR0_FP_SCALE_BIAS_RB_CRCB, CUR0_FP_BIAS_RB_CRCB, attr->bias); - REG_UPDATE(CURSOR0_FP_SCALE_BIAS_RB_CRCB, CUR0_FP_SCALE_RB_CRCB, attr->scale); + if (!dpp_base->cursor_offload) { + REG_UPDATE(CURSOR0_FP_SCALE_BIAS_G_Y, CUR0_FP_BIAS_G_Y, attr->bias); + REG_UPDATE(CURSOR0_FP_SCALE_BIAS_G_Y, CUR0_FP_SCALE_G_Y, attr->scale); + REG_UPDATE(CURSOR0_FP_SCALE_BIAS_RB_CRCB, CUR0_FP_BIAS_RB_CRCB, attr->bias); + REG_UPDATE(CURSOR0_FP_SCALE_BIAS_RB_CRCB, CUR0_FP_SCALE_RB_CRCB, attr->scale); + } + + dpp_base->att.fp_scale_bias_g_y.bits.fp_bias_g_y = attr->bias; + dpp_base->att.fp_scale_bias_g_y.bits.fp_scale_g_y = attr->scale; + dpp_base->att.fp_scale_bias_rb_crcb.bits.fp_bias_rb_crcb = attr->bias; + dpp_base->att.fp_scale_bias_rb_crcb.bits.fp_scale_rb_crcb = attr->scale; } } diff --git a/drivers/gpu/drm/amd/display/dc/dsc/dcn20/dcn20_dsc.c b/drivers/gpu/drm/amd/display/dc/dsc/dcn20/dcn20_dsc.c index 89f0d999bf35..242f1e6f0d8f 100644 --- a/drivers/gpu/drm/amd/display/dc/dsc/dcn20/dcn20_dsc.c +++ b/drivers/gpu/drm/amd/display/dc/dsc/dcn20/dcn20_dsc.c @@ -35,6 +35,7 @@ static void dsc_write_to_registers(struct display_stream_compressor *dsc, const static const struct dsc_funcs dcn20_dsc_funcs = { .dsc_get_enc_caps = dsc2_get_enc_caps, .dsc_read_state = dsc2_read_state, + .dsc_read_reg_state = dsc2_read_reg_state, .dsc_validate_stream = dsc2_validate_stream, .dsc_set_config = dsc2_set_config, .dsc_get_packed_pps = dsc2_get_packed_pps, @@ -155,6 +156,13 @@ void dsc2_read_state(struct display_stream_compressor *dsc, struct dcn_dsc_state DSCRM_DSC_OPP_PIPE_SOURCE, &s->dsc_opp_source); } +void dsc2_read_reg_state(struct display_stream_compressor *dsc, struct dcn_dsc_reg_state *dccg_reg_state) +{ + struct dcn20_dsc *dsc20 = TO_DCN20_DSC(dsc); + + dccg_reg_state->dsc_top_control = REG_READ(DSC_TOP_CONTROL); + dccg_reg_state->dscc_interrupt_control_status = REG_READ(DSCC_INTERRUPT_CONTROL_STATUS); +} bool dsc2_validate_stream(struct display_stream_compressor *dsc, const struct dsc_config *dsc_cfg) { @@ -407,7 +415,7 @@ bool dsc_prepare_config(const struct dsc_config *dsc_cfg, struct dsc_reg_values dsc_reg_vals->ich_reset_at_eol = (dsc_cfg->is_odm || dsc_reg_vals->num_slices_h > 1) ? 0xF : 0; // Need to find the ceiling value for the slice width - dsc_reg_vals->pps.slice_width = (dsc_cfg->pic_width + dsc_cfg->dc_dsc_cfg.num_slices_h - 1) / dsc_cfg->dc_dsc_cfg.num_slices_h; + dsc_reg_vals->pps.slice_width = (dsc_cfg->pic_width + dsc_cfg->dsc_padding + dsc_cfg->dc_dsc_cfg.num_slices_h - 1) / dsc_cfg->dc_dsc_cfg.num_slices_h; // TODO: in addition to validating slice height (pic height must be divisible by slice height), // see what happens when the same condition doesn't apply for slice_width/pic_width. dsc_reg_vals->pps.slice_height = dsc_cfg->pic_height / dsc_cfg->dc_dsc_cfg.num_slices_v; diff --git a/drivers/gpu/drm/amd/display/dc/dsc/dcn20/dcn20_dsc.h b/drivers/gpu/drm/amd/display/dc/dsc/dcn20/dcn20_dsc.h index a9c04fc95bd1..2337c3a97235 100644 --- a/drivers/gpu/drm/amd/display/dc/dsc/dcn20/dcn20_dsc.h +++ b/drivers/gpu/drm/amd/display/dc/dsc/dcn20/dcn20_dsc.h @@ -606,6 +606,7 @@ bool dsc2_get_packed_pps(struct display_stream_compressor *dsc, uint8_t *dsc_packed_pps); void dsc2_read_state(struct display_stream_compressor *dsc, struct dcn_dsc_state *s); +void dsc2_read_reg_state(struct display_stream_compressor *dsc, struct dcn_dsc_reg_state *dccg_reg_state); bool dsc2_validate_stream(struct display_stream_compressor *dsc, const struct dsc_config *dsc_cfg); void dsc2_set_config(struct display_stream_compressor *dsc, const struct dsc_config *dsc_cfg, struct dsc_optc_config *dsc_optc_cfg); diff --git a/drivers/gpu/drm/amd/display/dc/dsc/dcn35/dcn35_dsc.c b/drivers/gpu/drm/amd/display/dc/dsc/dcn35/dcn35_dsc.c index 6f4f5a3c4861..e712985f7abd 100644 --- a/drivers/gpu/drm/amd/display/dc/dsc/dcn35/dcn35_dsc.c +++ b/drivers/gpu/drm/amd/display/dc/dsc/dcn35/dcn35_dsc.c @@ -28,10 +28,11 @@ #include "reg_helper.h" static void dsc35_enable(struct display_stream_compressor *dsc, int opp_pipe); +static void dsc35_get_single_enc_caps(struct dsc_enc_caps *dsc_enc_caps, unsigned int max_dscclk_khz); static const struct dsc_funcs dcn35_dsc_funcs = { - .dsc_get_enc_caps = dsc2_get_enc_caps, .dsc_read_state = dsc2_read_state, + .dsc_read_reg_state = dsc2_read_reg_state, .dsc_validate_stream = dsc2_validate_stream, .dsc_set_config = dsc2_set_config, .dsc_get_packed_pps = dsc2_get_packed_pps, @@ -39,6 +40,7 @@ static const struct dsc_funcs dcn35_dsc_funcs = { .dsc_disable = dsc2_disable, .dsc_disconnect = dsc2_disconnect, .dsc_wait_disconnect_pending_clear = dsc2_wait_disconnect_pending_clear, + .dsc_get_single_enc_caps = dsc35_get_single_enc_caps, }; /* Macro definitios for REG_SET macros*/ @@ -110,3 +112,31 @@ void dsc35_set_fgcg(struct dcn20_dsc *dsc20, bool enable) { REG_UPDATE(DSC_TOP_CONTROL, DSC_FGCG_REP_DIS, !enable); } + +void dsc35_get_single_enc_caps(struct dsc_enc_caps *dsc_enc_caps, unsigned int max_dscclk_khz) +{ + dsc_enc_caps->dsc_version = 0x21; /* v1.2 - DP spec defined it in reverse order and we kept it */ + + dsc_enc_caps->slice_caps.bits.NUM_SLICES_1 = 1; + dsc_enc_caps->slice_caps.bits.NUM_SLICES_2 = 1; + dsc_enc_caps->slice_caps.bits.NUM_SLICES_3 = 1; + dsc_enc_caps->slice_caps.bits.NUM_SLICES_4 = 1; + + dsc_enc_caps->lb_bit_depth = 13; + dsc_enc_caps->is_block_pred_supported = true; + + dsc_enc_caps->color_formats.bits.RGB = 1; + dsc_enc_caps->color_formats.bits.YCBCR_444 = 1; + dsc_enc_caps->color_formats.bits.YCBCR_SIMPLE_422 = 1; + dsc_enc_caps->color_formats.bits.YCBCR_NATIVE_422 = 0; + dsc_enc_caps->color_formats.bits.YCBCR_NATIVE_420 = 1; + + dsc_enc_caps->color_depth.bits.COLOR_DEPTH_8_BPC = 1; + dsc_enc_caps->color_depth.bits.COLOR_DEPTH_10_BPC = 1; + dsc_enc_caps->color_depth.bits.COLOR_DEPTH_12_BPC = 1; + + dsc_enc_caps->max_total_throughput_mps = max_dscclk_khz * 3 / 1000; + + dsc_enc_caps->max_slice_width = 5184; /* (including 64 overlap pixels for eDP MSO mode) */ + dsc_enc_caps->bpp_increment_div = 16; /* 1/16th of a bit */ +} diff --git a/drivers/gpu/drm/amd/display/dc/dsc/dcn401/dcn401_dsc.c b/drivers/gpu/drm/amd/display/dc/dsc/dcn401/dcn401_dsc.c index 7bd92ae8b13e..c1bdbb38c690 100644 --- a/drivers/gpu/drm/amd/display/dc/dsc/dcn401/dcn401_dsc.c +++ b/drivers/gpu/drm/amd/display/dc/dsc/dcn401/dcn401_dsc.c @@ -26,6 +26,7 @@ static const struct dsc_funcs dcn401_dsc_funcs = { .dsc_disconnect = dsc401_disconnect, .dsc_wait_disconnect_pending_clear = dsc401_wait_disconnect_pending_clear, .dsc_get_single_enc_caps = dsc401_get_single_enc_caps, + .dsc_read_reg_state = dsc2_read_reg_state }; /* Macro definitios for REG_SET macros*/ diff --git a/drivers/gpu/drm/amd/display/dc/dsc/dsc.h b/drivers/gpu/drm/amd/display/dc/dsc/dsc.h index b0bd1f9425b5..81c83d5fe042 100644 --- a/drivers/gpu/drm/amd/display/dc/dsc/dsc.h +++ b/drivers/gpu/drm/amd/display/dc/dsc/dsc.h @@ -41,6 +41,7 @@ struct dsc_config { enum dc_color_depth color_depth; /* Bits per component */ bool is_odm; struct dc_dsc_config dc_dsc_cfg; + uint32_t dsc_padding; }; @@ -65,6 +66,10 @@ struct dcn_dsc_state { uint32_t dsc_opp_source; }; +struct dcn_dsc_reg_state { + uint32_t dsc_top_control; + uint32_t dscc_interrupt_control_status; +}; /* DSC encoder capabilities * They differ from the DPCD DSC caps because they are based on AMD DSC encoder caps. @@ -99,6 +104,7 @@ struct dsc_enc_caps { struct dsc_funcs { void (*dsc_get_enc_caps)(struct dsc_enc_caps *dsc_enc_caps, int pixel_clock_100Hz); void (*dsc_read_state)(struct display_stream_compressor *dsc, struct dcn_dsc_state *s); + void (*dsc_read_reg_state)(struct display_stream_compressor *dsc, struct dcn_dsc_reg_state *dccg_reg_state); bool (*dsc_validate_stream)(struct display_stream_compressor *dsc, const struct dsc_config *dsc_cfg); void (*dsc_set_config)(struct display_stream_compressor *dsc, const struct dsc_config *dsc_cfg, struct dsc_optc_config *dsc_optc_cfg); diff --git a/drivers/gpu/drm/amd/display/dc/hubbub/dcn30/dcn30_hubbub.c b/drivers/gpu/drm/amd/display/dc/hubbub/dcn30/dcn30_hubbub.c index e7e5f6d4778e..181a93dc46e6 100644 --- a/drivers/gpu/drm/amd/display/dc/hubbub/dcn30/dcn30_hubbub.c +++ b/drivers/gpu/drm/amd/display/dc/hubbub/dcn30/dcn30_hubbub.c @@ -440,33 +440,15 @@ void hubbub3_init_watermarks(struct hubbub *hubbub) REG_WRITE(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D, reg); } -void hubbub3_get_det_sizes(struct hubbub *hubbub, uint32_t *curr_det_sizes, uint32_t *target_det_sizes) +void hubbub3_read_reg_state(struct hubbub *hubbub, struct dcn_hubbub_reg_state *hubbub_reg_state) { struct dcn20_hubbub *hubbub1 = TO_DCN20_HUBBUB(hubbub); - REG_GET_2(DCHUBBUB_DET0_CTRL, DET0_SIZE_CURRENT, &curr_det_sizes[0], - DET0_SIZE, &target_det_sizes[0]); - - REG_GET_2(DCHUBBUB_DET1_CTRL, DET1_SIZE_CURRENT, &curr_det_sizes[1], - DET1_SIZE, &target_det_sizes[1]); - - REG_GET_2(DCHUBBUB_DET2_CTRL, DET2_SIZE_CURRENT, &curr_det_sizes[2], - DET2_SIZE, &target_det_sizes[2]); - - REG_GET_2(DCHUBBUB_DET3_CTRL, DET3_SIZE_CURRENT, &curr_det_sizes[3], - DET3_SIZE, &target_det_sizes[3]); - -} - -uint32_t hubbub3_compbuf_config_error(struct hubbub *hubbub) -{ - struct dcn20_hubbub *hubbub1 = TO_DCN20_HUBBUB(hubbub); - uint32_t compbuf_config_error = 0; - - REG_GET(DCHUBBUB_COMPBUF_CTRL, CONFIG_ERROR, - &compbuf_config_error); - - return compbuf_config_error; + hubbub_reg_state->det0_ctrl = REG_READ(DCHUBBUB_DET0_CTRL); + hubbub_reg_state->det1_ctrl = REG_READ(DCHUBBUB_DET1_CTRL); + hubbub_reg_state->det2_ctrl = REG_READ(DCHUBBUB_DET2_CTRL); + hubbub_reg_state->det3_ctrl = REG_READ(DCHUBBUB_DET3_CTRL); + hubbub_reg_state->compbuf_ctrl = REG_READ(DCHUBBUB_COMPBUF_CTRL); } static const struct hubbub_funcs hubbub30_funcs = { @@ -486,8 +468,7 @@ static const struct hubbub_funcs hubbub30_funcs = { .force_pstate_change_control = hubbub3_force_pstate_change_control, .init_watermarks = hubbub3_init_watermarks, .hubbub_read_state = hubbub2_read_state, - .get_det_sizes = hubbub3_get_det_sizes, - .compbuf_config_error = hubbub3_compbuf_config_error, + .hubbub_read_reg_state = hubbub3_read_reg_state }; void hubbub3_construct(struct dcn20_hubbub *hubbub3, diff --git a/drivers/gpu/drm/amd/display/dc/hubbub/dcn30/dcn30_hubbub.h b/drivers/gpu/drm/amd/display/dc/hubbub/dcn30/dcn30_hubbub.h index 49a469969d36..9e14de3ccaee 100644 --- a/drivers/gpu/drm/amd/display/dc/hubbub/dcn30/dcn30_hubbub.h +++ b/drivers/gpu/drm/amd/display/dc/hubbub/dcn30/dcn30_hubbub.h @@ -133,10 +133,6 @@ void hubbub3_force_pstate_change_control(struct hubbub *hubbub, void hubbub3_init_watermarks(struct hubbub *hubbub); -void hubbub3_get_det_sizes(struct hubbub *hubbub, - uint32_t *curr_det_sizes, - uint32_t *target_det_sizes); - -uint32_t hubbub3_compbuf_config_error(struct hubbub *hubbub); +void hubbub3_read_reg_state(struct hubbub *hubbub, struct dcn_hubbub_reg_state *hubbub_reg_state); #endif diff --git a/drivers/gpu/drm/amd/display/dc/hubbub/dcn31/dcn31_hubbub.c b/drivers/gpu/drm/amd/display/dc/hubbub/dcn31/dcn31_hubbub.c index cdb20251a154..5a03758e3de6 100644 --- a/drivers/gpu/drm/amd/display/dc/hubbub/dcn31/dcn31_hubbub.c +++ b/drivers/gpu/drm/amd/display/dc/hubbub/dcn31/dcn31_hubbub.c @@ -933,8 +933,8 @@ int hubbub31_init_dchub_sys_ctx(struct hubbub *hubbub, dcn20_vmid_setup(&hubbub2->vmid[15], &phys_config); } - - dcn21_dchvm_init(hubbub); + if (hubbub->funcs->dchvm_init) + hubbub->funcs->dchvm_init(hubbub); return NUM_VMID; } @@ -1071,8 +1071,8 @@ static const struct hubbub_funcs hubbub31_funcs = { .program_compbuf_size = dcn31_program_compbuf_size, .init_crb = dcn31_init_crb, .hubbub_read_state = hubbub2_read_state, - .get_det_sizes = hubbub3_get_det_sizes, - .compbuf_config_error = hubbub3_compbuf_config_error, + .hubbub_read_reg_state = hubbub3_read_reg_state, + .dchvm_init = dcn21_dchvm_init }; void hubbub31_construct(struct dcn20_hubbub *hubbub31, diff --git a/drivers/gpu/drm/amd/display/dc/hubbub/dcn32/dcn32_hubbub.c b/drivers/gpu/drm/amd/display/dc/hubbub/dcn32/dcn32_hubbub.c index 4d4ca6d77bbd..237331b35378 100644 --- a/drivers/gpu/drm/amd/display/dc/hubbub/dcn32/dcn32_hubbub.c +++ b/drivers/gpu/drm/amd/display/dc/hubbub/dcn32/dcn32_hubbub.c @@ -1037,8 +1037,7 @@ static const struct hubbub_funcs hubbub32_funcs = { .force_usr_retraining_allow = hubbub32_force_usr_retraining_allow, .set_request_limit = hubbub32_set_request_limit, .get_mall_en = hubbub32_get_mall_en, - .get_det_sizes = hubbub3_get_det_sizes, - .compbuf_config_error = hubbub3_compbuf_config_error, + .hubbub_read_reg_state = hubbub3_read_reg_state }; void hubbub32_construct(struct dcn20_hubbub *hubbub2, diff --git a/drivers/gpu/drm/amd/display/dc/hubbub/dcn35/dcn35_hubbub.c b/drivers/gpu/drm/amd/display/dc/hubbub/dcn35/dcn35_hubbub.c index a443722a8632..43ba399f4822 100644 --- a/drivers/gpu/drm/amd/display/dc/hubbub/dcn35/dcn35_hubbub.c +++ b/drivers/gpu/drm/amd/display/dc/hubbub/dcn35/dcn35_hubbub.c @@ -549,6 +549,55 @@ void hubbub35_init(struct hubbub *hubbub) memset(&hubbub2->watermarks.a.cstate_pstate, 0, sizeof(hubbub2->watermarks.a.cstate_pstate)); } +void dcn35_dchvm_init(struct hubbub *hubbub) +{ + struct dcn20_hubbub *hubbub2 = TO_DCN20_HUBBUB(hubbub); + uint32_t riommu_active; + int i; + + //Init DCHVM block + REG_UPDATE(DCHVM_CTRL0, HOSTVM_INIT_REQ, 1); + + //Poll until RIOMMU_ACTIVE = 1 + for (i = 0; i < 100; i++) { + REG_GET(DCHVM_RIOMMU_STAT0, RIOMMU_ACTIVE, &riommu_active); + + if (riommu_active) + break; + else + udelay(5); + } + + if (riommu_active) { + // Disable gating and memory power requests + REG_UPDATE(DCHVM_MEM_CTRL, HVM_GPUVMRET_PWR_REQ_DIS, 1); + REG_UPDATE_4(DCHVM_CLK_CTRL, + HVM_DISPCLK_R_GATE_DIS, 1, + HVM_DISPCLK_G_GATE_DIS, 1, + HVM_DCFCLK_R_GATE_DIS, 1, + HVM_DCFCLK_G_GATE_DIS, 1); + + //Reflect the power status of DCHUBBUB + REG_UPDATE(DCHVM_RIOMMU_CTRL0, HOSTVM_POWERSTATUS, 1); + + //Start rIOMMU prefetching + REG_UPDATE(DCHVM_RIOMMU_CTRL0, HOSTVM_PREFETCH_REQ, 1); + + //Poll until HOSTVM_PREFETCH_DONE = 1 + REG_WAIT(DCHVM_RIOMMU_STAT0, HOSTVM_PREFETCH_DONE, 1, 5, 100); + + //Enable memory power requests + REG_UPDATE(DCHVM_MEM_CTRL, HVM_GPUVMRET_PWR_REQ_DIS, 0); + // Enable dynamic clock gating + REG_UPDATE_4(DCHVM_CLK_CTRL, + HVM_DISPCLK_R_GATE_DIS, 0, + HVM_DISPCLK_G_GATE_DIS, 0, + HVM_DCFCLK_R_GATE_DIS, 0, + HVM_DCFCLK_G_GATE_DIS, 0); + hubbub->riommu_active = true; + } +} + /*static void hubbub35_set_request_limit(struct hubbub *hubbub, int memory_channel_count, int words_per_channel) @@ -589,8 +638,8 @@ static const struct hubbub_funcs hubbub35_funcs = { .hubbub_read_state = hubbub2_read_state, .force_usr_retraining_allow = hubbub32_force_usr_retraining_allow, .dchubbub_init = hubbub35_init, - .get_det_sizes = hubbub3_get_det_sizes, - .compbuf_config_error = hubbub3_compbuf_config_error, + .hubbub_read_reg_state = hubbub3_read_reg_state, + .dchvm_init = dcn35_dchvm_init }; void hubbub35_construct(struct dcn20_hubbub *hubbub2, diff --git a/drivers/gpu/drm/amd/display/dc/hubbub/dcn35/dcn35_hubbub.h b/drivers/gpu/drm/amd/display/dc/hubbub/dcn35/dcn35_hubbub.h index 23fecf88556c..9f65fff1bd4d 100644 --- a/drivers/gpu/drm/amd/display/dc/hubbub/dcn35/dcn35_hubbub.h +++ b/drivers/gpu/drm/amd/display/dc/hubbub/dcn35/dcn35_hubbub.h @@ -168,4 +168,5 @@ void dcn35_program_compbuf_size(struct hubbub *hubbub, unsigned int compbuf_size_kb, bool safe_to_increase); void dcn35_init_crb(struct hubbub *hubbub); void hubbub35_init(struct hubbub *hubbub); +void dcn35_dchvm_init(struct hubbub *hubbub); #endif diff --git a/drivers/gpu/drm/amd/display/dc/hubbub/dcn401/dcn401_hubbub.c b/drivers/gpu/drm/amd/display/dc/hubbub/dcn401/dcn401_hubbub.c index a36273a52880..d11afd1ce72a 100644 --- a/drivers/gpu/drm/amd/display/dc/hubbub/dcn401/dcn401_hubbub.c +++ b/drivers/gpu/drm/amd/display/dc/hubbub/dcn401/dcn401_hubbub.c @@ -1247,8 +1247,7 @@ static const struct hubbub_funcs hubbub4_01_funcs = { .program_compbuf_segments = dcn401_program_compbuf_segments, .wait_for_det_update = dcn401_wait_for_det_update, .program_arbiter = dcn401_program_arbiter, - .get_det_sizes = hubbub3_get_det_sizes, - .compbuf_config_error = hubbub3_compbuf_config_error, + .hubbub_read_reg_state = hubbub3_read_reg_state }; void hubbub401_construct(struct dcn20_hubbub *hubbub2, diff --git a/drivers/gpu/drm/amd/display/dc/hubp/dcn10/dcn10_hubp.c b/drivers/gpu/drm/amd/display/dc/hubp/dcn10/dcn10_hubp.c index 9b026600b90e..6378e3fd7249 100644 --- a/drivers/gpu/drm/amd/display/dc/hubp/dcn10/dcn10_hubp.c +++ b/drivers/gpu/drm/amd/display/dc/hubp/dcn10/dcn10_hubp.c @@ -550,6 +550,7 @@ void hubp_reset(struct hubp *hubp) { memset(&hubp->pos, 0, sizeof(hubp->pos)); memset(&hubp->att, 0, sizeof(hubp->att)); + hubp->cursor_offload = false; } void hubp1_program_surface_config( diff --git a/drivers/gpu/drm/amd/display/dc/hubp/dcn10/dcn10_hubp.h b/drivers/gpu/drm/amd/display/dc/hubp/dcn10/dcn10_hubp.h index cf2eb9793008..f2571076fc50 100644 --- a/drivers/gpu/drm/amd/display/dc/hubp/dcn10/dcn10_hubp.h +++ b/drivers/gpu/drm/amd/display/dc/hubp/dcn10/dcn10_hubp.h @@ -105,7 +105,9 @@ SRI(DCN_CUR0_TTU_CNTL0, HUBPREQ, id),\ SRI(DCN_CUR0_TTU_CNTL1, HUBPREQ, id),\ SRI(HUBP_CLK_CNTL, HUBP, id),\ - SRI(HUBPRET_READ_LINE_VALUE, HUBPRET, id) + SRI(HUBPRET_READ_LINE_VALUE, HUBPRET, id),\ + SRI(HUBP_MEASURE_WIN_CTRL_DCFCLK, HUBP, id),\ + SRI(HUBP_MEASURE_WIN_CTRL_DPPCLK, HUBP, id) /* Register address initialization macro for ASICs with VM */ #define HUBP_REG_LIST_DCN_VM(id)\ @@ -251,7 +253,19 @@ uint32_t CURSOR_HOT_SPOT; \ uint32_t CURSOR_DST_OFFSET; \ uint32_t HUBP_CLK_CNTL; \ - uint32_t HUBPRET_READ_LINE_VALUE + uint32_t HUBPRET_READ_LINE_VALUE; \ + uint32_t HUBP_MEASURE_WIN_CTRL_DCFCLK; \ + uint32_t HUBP_MEASURE_WIN_CTRL_DPPCLK; \ + uint32_t HUBPRET_INTERRUPT; \ + uint32_t HUBPRET_MEM_PWR_CTRL; \ + uint32_t HUBPRET_MEM_PWR_STATUS; \ + uint32_t HUBPRET_READ_LINE_CTRL0; \ + uint32_t HUBPRET_READ_LINE_CTRL1; \ + uint32_t HUBPRET_READ_LINE0; \ + uint32_t HUBPRET_READ_LINE1; \ + uint32_t HUBPREQ_MEM_PWR_CTRL; \ + uint32_t HUBPREQ_MEM_PWR_STATUS + #define HUBP_SF(reg_name, field_name, post_fix)\ .field_name = reg_name ## __ ## field_name ## post_fix @@ -688,6 +702,123 @@ struct dcn_fl_regs_st { uint32_t lut_fl_mode; uint32_t lut_fl_format; }; +struct dcn_hubp_reg_state { + uint32_t hubp_cntl; + uint32_t mall_config; + uint32_t mall_sub_vp; + uint32_t hubp_req_size_config; + uint32_t hubp_req_size_config_c; + uint32_t vmpg_config; + uint32_t addr_config; + uint32_t pri_viewport_dimension; + uint32_t pri_viewport_dimension_c; + uint32_t pri_viewport_start; + uint32_t pri_viewport_start_c; + uint32_t sec_viewport_dimension; + uint32_t sec_viewport_dimension_c; + uint32_t sec_viewport_start; + uint32_t sec_viewport_start_c; + uint32_t surface_config; + uint32_t tiling_config; + uint32_t clk_cntl; + uint32_t mall_status; + uint32_t measure_win_ctrl_dcfclk; + uint32_t measure_win_ctrl_dppclk; + + uint32_t blank_offset_0; + uint32_t blank_offset_1; + uint32_t cursor_settings; + uint32_t dcn_cur0_ttu_cntl0; + uint32_t dcn_cur0_ttu_cntl1; + uint32_t dcn_cur1_ttu_cntl0; + uint32_t dcn_cur1_ttu_cntl1; + uint32_t dcn_dmdat_vm_cntl; + uint32_t dcn_expansion_mode; + uint32_t dcn_global_ttu_cntl; + uint32_t dcn_surf0_ttu_cntl0; + uint32_t dcn_surf0_ttu_cntl1; + uint32_t dcn_surf1_ttu_cntl0; + uint32_t dcn_surf1_ttu_cntl1; + uint32_t dcn_ttu_qos_wm; + uint32_t dcn_vm_mx_l1_tlb_cntl; + uint32_t dcn_vm_system_aperture_high_addr; + uint32_t dcn_vm_system_aperture_low_addr; + uint32_t dcsurf_flip_control; + uint32_t dcsurf_flip_control2; + uint32_t dcsurf_primary_meta_surface_address; + uint32_t dcsurf_primary_meta_surface_address_c; + uint32_t dcsurf_primary_meta_surface_address_high; + uint32_t dcsurf_primary_meta_surface_address_high_c; + uint32_t dcsurf_primary_surface_address; + uint32_t dcsurf_primary_surface_address_c; + uint32_t dcsurf_primary_surface_address_high; + uint32_t dcsurf_primary_surface_address_high_c; + uint32_t dcsurf_secondary_meta_surface_address; + uint32_t dcsurf_secondary_meta_surface_address_c; + uint32_t dcsurf_secondary_meta_surface_address_high; + uint32_t dcsurf_secondary_meta_surface_address_high_c; + uint32_t dcsurf_secondary_surface_address; + uint32_t dcsurf_secondary_surface_address_c; + uint32_t dcsurf_secondary_surface_address_high; + uint32_t dcsurf_secondary_surface_address_high_c; + uint32_t dcsurf_surface_control; + uint32_t dcsurf_surface_earliest_inuse; + uint32_t dcsurf_surface_earliest_inuse_c; + uint32_t dcsurf_surface_earliest_inuse_high; + uint32_t dcsurf_surface_earliest_inuse_high_c; + uint32_t dcsurf_surface_flip_interrupt; + uint32_t dcsurf_surface_inuse; + uint32_t dcsurf_surface_inuse_c; + uint32_t dcsurf_surface_inuse_high; + uint32_t dcsurf_surface_inuse_high_c; + uint32_t dcsurf_surface_pitch; + uint32_t dcsurf_surface_pitch_c; + uint32_t dst_after_scaler; + uint32_t dst_dimensions; + uint32_t dst_y_delta_drq_limit; + uint32_t flip_parameters_0; + uint32_t flip_parameters_1; + uint32_t flip_parameters_2; + uint32_t flip_parameters_3; + uint32_t flip_parameters_4; + uint32_t flip_parameters_5; + uint32_t flip_parameters_6; + uint32_t hubpreq_mem_pwr_ctrl; + uint32_t hubpreq_mem_pwr_status; + uint32_t nom_parameters_0; + uint32_t nom_parameters_1; + uint32_t nom_parameters_2; + uint32_t nom_parameters_3; + uint32_t nom_parameters_4; + uint32_t nom_parameters_5; + uint32_t nom_parameters_6; + uint32_t nom_parameters_7; + uint32_t per_line_delivery; + uint32_t per_line_delivery_pre; + uint32_t prefetch_settings; + uint32_t prefetch_settings_c; + uint32_t ref_freq_to_pix_freq; + uint32_t uclk_pstate_force; + uint32_t vblank_parameters_0; + uint32_t vblank_parameters_1; + uint32_t vblank_parameters_2; + uint32_t vblank_parameters_3; + uint32_t vblank_parameters_4; + uint32_t vblank_parameters_5; + uint32_t vblank_parameters_6; + uint32_t vmid_settings_0; + + uint32_t hubpret_control; + uint32_t hubpret_interrupt; + uint32_t hubpret_mem_pwr_ctrl; + uint32_t hubpret_mem_pwr_status; + uint32_t hubpret_read_line_ctrl0; + uint32_t hubpret_read_line_ctrl1; + uint32_t hubpret_read_line_status; + uint32_t hubpret_read_line_value; + uint32_t hubpret_read_line0; + uint32_t hubpret_read_line1; +}; struct dcn_hubp_state { struct _vcs_dpi_display_dlg_regs_st dlg_attr; @@ -718,7 +849,6 @@ struct dcn_hubp_state { uint32_t hubp_cntl; uint32_t flip_control; }; - struct dcn10_hubp { struct hubp base; struct dcn_hubp_state state; diff --git a/drivers/gpu/drm/amd/display/dc/hubp/dcn20/dcn20_hubp.c b/drivers/gpu/drm/amd/display/dc/hubp/dcn20/dcn20_hubp.c index 91259b896e03..92288de4cc10 100644 --- a/drivers/gpu/drm/amd/display/dc/hubp/dcn20/dcn20_hubp.c +++ b/drivers/gpu/drm/amd/display/dc/hubp/dcn20/dcn20_hubp.c @@ -613,26 +613,28 @@ void hubp2_cursor_set_attributes( hubp->curs_attr = *attr; - REG_UPDATE(CURSOR_SURFACE_ADDRESS_HIGH, - CURSOR_SURFACE_ADDRESS_HIGH, attr->address.high_part); - REG_UPDATE(CURSOR_SURFACE_ADDRESS, - CURSOR_SURFACE_ADDRESS, attr->address.low_part); + if (!hubp->cursor_offload) { + REG_UPDATE(CURSOR_SURFACE_ADDRESS_HIGH, + CURSOR_SURFACE_ADDRESS_HIGH, attr->address.high_part); + REG_UPDATE(CURSOR_SURFACE_ADDRESS, + CURSOR_SURFACE_ADDRESS, attr->address.low_part); - REG_UPDATE_2(CURSOR_SIZE, - CURSOR_WIDTH, attr->width, - CURSOR_HEIGHT, attr->height); + REG_UPDATE_2(CURSOR_SIZE, + CURSOR_WIDTH, attr->width, + CURSOR_HEIGHT, attr->height); - REG_UPDATE_4(CURSOR_CONTROL, - CURSOR_MODE, attr->color_format, - CURSOR_2X_MAGNIFY, attr->attribute_flags.bits.ENABLE_MAGNIFICATION, - CURSOR_PITCH, hw_pitch, - CURSOR_LINES_PER_CHUNK, lpc); + REG_UPDATE_4(CURSOR_CONTROL, + CURSOR_MODE, attr->color_format, + CURSOR_2X_MAGNIFY, attr->attribute_flags.bits.ENABLE_MAGNIFICATION, + CURSOR_PITCH, hw_pitch, + CURSOR_LINES_PER_CHUNK, lpc); - REG_SET_2(CURSOR_SETTINGS, 0, - /* no shift of the cursor HDL schedule */ - CURSOR0_DST_Y_OFFSET, 0, - /* used to shift the cursor chunk request deadline */ - CURSOR0_CHUNK_HDL_ADJUST, 3); + REG_SET_2(CURSOR_SETTINGS, 0, + /* no shift of the cursor HDL schedule */ + CURSOR0_DST_Y_OFFSET, 0, + /* used to shift the cursor chunk request deadline */ + CURSOR0_CHUNK_HDL_ADJUST, 3); + } hubp->att.SURFACE_ADDR_HIGH = attr->address.high_part; hubp->att.SURFACE_ADDR = attr->address.low_part; @@ -1059,23 +1061,28 @@ void hubp2_cursor_set_position( cur_en = 0; /* not visible beyond top edge*/ if (hubp->pos.cur_ctl.bits.cur_enable != cur_en) { - if (cur_en && REG_READ(CURSOR_SURFACE_ADDRESS) == 0) + bool cursor_not_programmed = hubp->att.SURFACE_ADDR == 0 && hubp->att.SURFACE_ADDR_HIGH == 0; + + if (cur_en && cursor_not_programmed) hubp->funcs->set_cursor_attributes(hubp, &hubp->curs_attr); - REG_UPDATE(CURSOR_CONTROL, - CURSOR_ENABLE, cur_en); + if (!hubp->cursor_offload) + REG_UPDATE(CURSOR_CONTROL, CURSOR_ENABLE, cur_en); } - REG_SET_2(CURSOR_POSITION, 0, - CURSOR_X_POSITION, pos->x, - CURSOR_Y_POSITION, pos->y); + if (!hubp->cursor_offload) { + REG_SET_2(CURSOR_POSITION, 0, + CURSOR_X_POSITION, pos->x, + CURSOR_Y_POSITION, pos->y); - REG_SET_2(CURSOR_HOT_SPOT, 0, - CURSOR_HOT_SPOT_X, pos->x_hotspot, - CURSOR_HOT_SPOT_Y, pos->y_hotspot); + REG_SET_2(CURSOR_HOT_SPOT, 0, + CURSOR_HOT_SPOT_X, pos->x_hotspot, + CURSOR_HOT_SPOT_Y, pos->y_hotspot); + + REG_SET(CURSOR_DST_OFFSET, 0, + CURSOR_DST_X_OFFSET, dst_x_offset); + } - REG_SET(CURSOR_DST_OFFSET, 0, - CURSOR_DST_X_OFFSET, dst_x_offset); /* TODO Handle surface pixel formats other than 4:4:4 */ /* Cursor Position Register Config */ hubp->pos.cur_ctl.bits.cur_enable = cur_en; diff --git a/drivers/gpu/drm/amd/display/dc/hubp/dcn20/dcn20_hubp.h b/drivers/gpu/drm/amd/display/dc/hubp/dcn20/dcn20_hubp.h index f325db555102..7062e6653062 100644 --- a/drivers/gpu/drm/amd/display/dc/hubp/dcn20/dcn20_hubp.h +++ b/drivers/gpu/drm/amd/display/dc/hubp/dcn20/dcn20_hubp.h @@ -145,7 +145,8 @@ uint32_t FLIP_PARAMETERS_2;\ uint32_t DCN_CUR1_TTU_CNTL0;\ uint32_t DCN_CUR1_TTU_CNTL1;\ - uint32_t VMID_SETTINGS_0 + uint32_t VMID_SETTINGS_0;\ + uint32_t DST_Y_DELTA_DRQ_LIMIT /*shared with dcn3.x*/ #define DCN21_HUBP_REG_COMMON_VARIABLE_LIST \ @@ -176,7 +177,10 @@ uint32_t HUBP_3DLUT_CONTROL;\ uint32_t HUBP_3DLUT_DLG_PARAM;\ uint32_t DCSURF_VIEWPORT_MCACHE_SPLIT_COORDINATE;\ - uint32_t DCHUBP_MCACHEID_CONFIG + uint32_t DCHUBP_MCACHEID_CONFIG;\ + uint32_t DCHUBP_MALL_SUB_VP;\ + uint32_t DCHUBP_ADDR_CONFIG;\ + uint32_t HUBP_MALL_STATUS #define DCN2_HUBP_REG_FIELD_VARIABLE_LIST(type) \ DCN_HUBP_REG_FIELD_BASE_LIST(type); \ diff --git a/drivers/gpu/drm/amd/display/dc/hubp/dcn21/dcn21_hubp.c b/drivers/gpu/drm/amd/display/dc/hubp/dcn21/dcn21_hubp.c index e2740482e1cf..08ea0a1b9e7f 100644 --- a/drivers/gpu/drm/amd/display/dc/hubp/dcn21/dcn21_hubp.c +++ b/drivers/gpu/drm/amd/display/dc/hubp/dcn21/dcn21_hubp.c @@ -73,8 +73,6 @@ * On any mode switch, if the new reg values are smaller than the current values, * then update the regs with the new values. * - * Link to the ticket: http://ontrack-internal.amd.com/browse/DEDCN21-142 - * */ void apply_DEDCN21_142_wa_for_hostvm_deadline( struct hubp *hubp, diff --git a/drivers/gpu/drm/amd/display/dc/hubp/dcn30/dcn30_hubp.c b/drivers/gpu/drm/amd/display/dc/hubp/dcn30/dcn30_hubp.c index 556214b2227d..0cc6f4558989 100644 --- a/drivers/gpu/drm/amd/display/dc/hubp/dcn30/dcn30_hubp.c +++ b/drivers/gpu/drm/amd/display/dc/hubp/dcn30/dcn30_hubp.c @@ -476,6 +476,126 @@ void hubp3_read_state(struct hubp *hubp) } +void hubp3_read_reg_state(struct hubp *hubp, struct dcn_hubp_reg_state *reg_state) +{ + struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp); + + reg_state->hubp_cntl = REG_READ(DCHUBP_CNTL); + reg_state->mall_config = REG_READ(DCHUBP_MALL_CONFIG); + reg_state->mall_sub_vp = REG_READ(DCHUBP_MALL_SUB_VP); + reg_state->hubp_req_size_config = REG_READ(DCHUBP_REQ_SIZE_CONFIG); + reg_state->hubp_req_size_config_c = REG_READ(DCHUBP_REQ_SIZE_CONFIG_C); + reg_state->vmpg_config = REG_READ(DCHUBP_VMPG_CONFIG); + reg_state->addr_config = REG_READ(DCSURF_ADDR_CONFIG); + reg_state->pri_viewport_dimension = REG_READ(DCSURF_PRI_VIEWPORT_DIMENSION); + reg_state->pri_viewport_dimension_c = REG_READ(DCSURF_PRI_VIEWPORT_DIMENSION_C); + reg_state->pri_viewport_start = REG_READ(DCSURF_PRI_VIEWPORT_START); + reg_state->pri_viewport_start_c = REG_READ(DCSURF_PRI_VIEWPORT_START_C); + reg_state->sec_viewport_dimension = REG_READ(DCSURF_SEC_VIEWPORT_DIMENSION); + reg_state->sec_viewport_dimension_c = REG_READ(DCSURF_SEC_VIEWPORT_DIMENSION_C); + reg_state->sec_viewport_start = REG_READ(DCSURF_SEC_VIEWPORT_START); + reg_state->sec_viewport_start_c = REG_READ(DCSURF_SEC_VIEWPORT_START_C); + reg_state->surface_config = REG_READ(DCSURF_SURFACE_CONFIG); + reg_state->tiling_config = REG_READ(DCSURF_TILING_CONFIG); + reg_state->clk_cntl = REG_READ(HUBP_CLK_CNTL); + reg_state->mall_status = REG_READ(HUBP_MALL_STATUS); + reg_state->measure_win_ctrl_dcfclk = REG_READ(HUBP_MEASURE_WIN_CTRL_DCFCLK); + reg_state->measure_win_ctrl_dppclk = REG_READ(HUBP_MEASURE_WIN_CTRL_DPPCLK); + + reg_state->blank_offset_0 = REG_READ(BLANK_OFFSET_0); + reg_state->blank_offset_1 = REG_READ(BLANK_OFFSET_1); + reg_state->cursor_settings = REG_READ(CURSOR_SETTINGS); + reg_state->dcn_cur0_ttu_cntl0 = REG_READ(DCN_CUR0_TTU_CNTL0); + reg_state->dcn_cur0_ttu_cntl1 = REG_READ(DCN_CUR0_TTU_CNTL1); + reg_state->dcn_cur1_ttu_cntl0 = REG_READ(DCN_CUR1_TTU_CNTL0); + reg_state->dcn_cur1_ttu_cntl1 = REG_READ(DCN_CUR1_TTU_CNTL1); + reg_state->dcn_dmdat_vm_cntl = REG_READ(DCN_DMDATA_VM_CNTL); + reg_state->dcn_expansion_mode = REG_READ(DCN_EXPANSION_MODE); + reg_state->dcn_global_ttu_cntl = REG_READ(DCN_GLOBAL_TTU_CNTL); + reg_state->dcn_surf0_ttu_cntl0 = REG_READ(DCN_SURF0_TTU_CNTL0); + reg_state->dcn_surf0_ttu_cntl1 = REG_READ(DCN_SURF0_TTU_CNTL1); + reg_state->dcn_surf1_ttu_cntl0 = REG_READ(DCN_SURF1_TTU_CNTL0); + reg_state->dcn_surf1_ttu_cntl1 = REG_READ(DCN_SURF1_TTU_CNTL1); + reg_state->dcn_ttu_qos_wm = REG_READ(DCN_TTU_QOS_WM); + reg_state->dcn_vm_mx_l1_tlb_cntl = REG_READ(DCN_VM_MX_L1_TLB_CNTL); + reg_state->dcn_vm_system_aperture_high_addr = REG_READ(DCN_VM_SYSTEM_APERTURE_HIGH_ADDR); + reg_state->dcn_vm_system_aperture_low_addr = REG_READ(DCN_VM_SYSTEM_APERTURE_LOW_ADDR); + reg_state->dcsurf_flip_control = REG_READ(DCSURF_FLIP_CONTROL); + reg_state->dcsurf_flip_control2 = REG_READ(DCSURF_FLIP_CONTROL2); + reg_state->dcsurf_primary_meta_surface_address = REG_READ(DCSURF_PRIMARY_META_SURFACE_ADDRESS); + reg_state->dcsurf_primary_meta_surface_address_c = REG_READ(DCSURF_PRIMARY_META_SURFACE_ADDRESS_C); + reg_state->dcsurf_primary_meta_surface_address_high = REG_READ(DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH); + reg_state->dcsurf_primary_meta_surface_address_high_c = REG_READ(DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH_C); + reg_state->dcsurf_primary_surface_address = REG_READ(DCSURF_PRIMARY_SURFACE_ADDRESS); + reg_state->dcsurf_primary_surface_address_c = REG_READ(DCSURF_PRIMARY_SURFACE_ADDRESS_C); + reg_state->dcsurf_primary_surface_address_high = REG_READ(DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH); + reg_state->dcsurf_primary_surface_address_high_c = REG_READ(DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_C); + reg_state->dcsurf_secondary_meta_surface_address = REG_READ(DCSURF_SECONDARY_META_SURFACE_ADDRESS); + reg_state->dcsurf_secondary_meta_surface_address_c = REG_READ(DCSURF_SECONDARY_META_SURFACE_ADDRESS_C); + reg_state->dcsurf_secondary_meta_surface_address_high = REG_READ(DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH); + reg_state->dcsurf_secondary_meta_surface_address_high_c = REG_READ(DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH_C); + reg_state->dcsurf_secondary_surface_address = REG_READ(DCSURF_SECONDARY_SURFACE_ADDRESS); + reg_state->dcsurf_secondary_surface_address_c = REG_READ(DCSURF_SECONDARY_SURFACE_ADDRESS_C); + reg_state->dcsurf_secondary_surface_address_high = REG_READ(DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH); + reg_state->dcsurf_secondary_surface_address_high_c = REG_READ(DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH_C); + reg_state->dcsurf_surface_control = REG_READ(DCSURF_SURFACE_CONTROL); + reg_state->dcsurf_surface_earliest_inuse = REG_READ(DCSURF_SURFACE_EARLIEST_INUSE); + reg_state->dcsurf_surface_earliest_inuse_c = REG_READ(DCSURF_SURFACE_EARLIEST_INUSE_C); + reg_state->dcsurf_surface_earliest_inuse_high = REG_READ(DCSURF_SURFACE_EARLIEST_INUSE_HIGH); + reg_state->dcsurf_surface_earliest_inuse_high_c = REG_READ(DCSURF_SURFACE_EARLIEST_INUSE_HIGH_C); + reg_state->dcsurf_surface_flip_interrupt = REG_READ(DCSURF_SURFACE_FLIP_INTERRUPT); + reg_state->dcsurf_surface_inuse = REG_READ(DCSURF_SURFACE_INUSE); + reg_state->dcsurf_surface_inuse_c = REG_READ(DCSURF_SURFACE_INUSE_C); + reg_state->dcsurf_surface_inuse_high = REG_READ(DCSURF_SURFACE_INUSE_HIGH); + reg_state->dcsurf_surface_inuse_high_c = REG_READ(DCSURF_SURFACE_INUSE_HIGH_C); + reg_state->dcsurf_surface_pitch = REG_READ(DCSURF_SURFACE_PITCH); + reg_state->dcsurf_surface_pitch_c = REG_READ(DCSURF_SURFACE_PITCH_C); + reg_state->dst_after_scaler = REG_READ(DST_AFTER_SCALER); + reg_state->dst_dimensions = REG_READ(DST_DIMENSIONS); + reg_state->dst_y_delta_drq_limit = REG_READ(DST_Y_DELTA_DRQ_LIMIT); + reg_state->flip_parameters_0 = REG_READ(FLIP_PARAMETERS_0); + reg_state->flip_parameters_1 = REG_READ(FLIP_PARAMETERS_1); + reg_state->flip_parameters_2 = REG_READ(FLIP_PARAMETERS_2); + reg_state->flip_parameters_3 = REG_READ(FLIP_PARAMETERS_3); + reg_state->flip_parameters_4 = REG_READ(FLIP_PARAMETERS_4); + reg_state->flip_parameters_5 = REG_READ(FLIP_PARAMETERS_5); + reg_state->flip_parameters_6 = REG_READ(FLIP_PARAMETERS_6); + reg_state->hubpreq_mem_pwr_ctrl = REG_READ(HUBPREQ_MEM_PWR_CTRL); + reg_state->hubpreq_mem_pwr_status = REG_READ(HUBPREQ_MEM_PWR_STATUS); + reg_state->nom_parameters_0 = REG_READ(NOM_PARAMETERS_0); + reg_state->nom_parameters_1 = REG_READ(NOM_PARAMETERS_1); + reg_state->nom_parameters_2 = REG_READ(NOM_PARAMETERS_2); + reg_state->nom_parameters_3 = REG_READ(NOM_PARAMETERS_3); + reg_state->nom_parameters_4 = REG_READ(NOM_PARAMETERS_4); + reg_state->nom_parameters_5 = REG_READ(NOM_PARAMETERS_5); + reg_state->nom_parameters_6 = REG_READ(NOM_PARAMETERS_6); + reg_state->nom_parameters_7 = REG_READ(NOM_PARAMETERS_7); + reg_state->per_line_delivery = REG_READ(PER_LINE_DELIVERY); + reg_state->per_line_delivery_pre = REG_READ(PER_LINE_DELIVERY_PRE); + reg_state->prefetch_settings = REG_READ(PREFETCH_SETTINGS); + reg_state->prefetch_settings_c = REG_READ(PREFETCH_SETTINGS_C); + reg_state->ref_freq_to_pix_freq = REG_READ(REF_FREQ_TO_PIX_FREQ); + reg_state->uclk_pstate_force = REG_READ(UCLK_PSTATE_FORCE); + reg_state->vblank_parameters_0 = REG_READ(VBLANK_PARAMETERS_0); + reg_state->vblank_parameters_1 = REG_READ(VBLANK_PARAMETERS_1); + reg_state->vblank_parameters_2 = REG_READ(VBLANK_PARAMETERS_2); + reg_state->vblank_parameters_3 = REG_READ(VBLANK_PARAMETERS_3); + reg_state->vblank_parameters_4 = REG_READ(VBLANK_PARAMETERS_4); + reg_state->vblank_parameters_5 = REG_READ(VBLANK_PARAMETERS_5); + reg_state->vblank_parameters_6 = REG_READ(VBLANK_PARAMETERS_6); + reg_state->vmid_settings_0 = REG_READ(VMID_SETTINGS_0); + reg_state->hubpret_control = REG_READ(HUBPRET_CONTROL); + reg_state->hubpret_interrupt = REG_READ(HUBPRET_INTERRUPT); + reg_state->hubpret_mem_pwr_ctrl = REG_READ(HUBPRET_MEM_PWR_CTRL); + reg_state->hubpret_mem_pwr_status = REG_READ(HUBPRET_MEM_PWR_STATUS); + reg_state->hubpret_read_line_ctrl0 = REG_READ(HUBPRET_READ_LINE_CTRL0); + reg_state->hubpret_read_line_ctrl1 = REG_READ(HUBPRET_READ_LINE_CTRL1); + reg_state->hubpret_read_line_status = REG_READ(HUBPRET_READ_LINE_STATUS); + reg_state->hubpret_read_line_value = REG_READ(HUBPRET_READ_LINE_VALUE); + reg_state->hubpret_read_line0 = REG_READ(HUBPRET_READ_LINE0); + reg_state->hubpret_read_line1 = REG_READ(HUBPRET_READ_LINE1); +} + void hubp3_setup( struct hubp *hubp, struct _vcs_dpi_display_dlg_regs_st *dlg_attr, @@ -505,30 +625,6 @@ void hubp3_init(struct hubp *hubp) hubp_reset(hubp); } -uint32_t hubp3_get_current_read_line(struct hubp *hubp) -{ - uint32_t read_line = 0; - struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp); - - REG_GET(HUBPRET_READ_LINE_VALUE, - PIPE_READ_LINE, - &read_line); - - return read_line; -} - -unsigned int hubp3_get_underflow_status(struct hubp *hubp) -{ - uint32_t hubp_underflow = 0; - struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp); - - REG_GET(DCHUBP_CNTL, - HUBP_UNDERFLOW_STATUS, - &hubp_underflow); - - return hubp_underflow; -} - static struct hubp_funcs dcn30_hubp_funcs = { .hubp_enable_tripleBuffer = hubp2_enable_triplebuffer, .hubp_is_triplebuffer_enabled = hubp2_is_triplebuffer_enabled, @@ -558,8 +654,7 @@ static struct hubp_funcs dcn30_hubp_funcs = { .hubp_soft_reset = hubp1_soft_reset, .hubp_set_flip_int = hubp1_set_flip_int, .hubp_clear_tiling = hubp3_clear_tiling, - .hubp_get_underflow_status = hubp3_get_underflow_status, - .hubp_get_current_read_line = hubp3_get_current_read_line, + .hubp_read_reg_state = hubp3_read_reg_state }; bool hubp3_construct( diff --git a/drivers/gpu/drm/amd/display/dc/hubp/dcn30/dcn30_hubp.h b/drivers/gpu/drm/amd/display/dc/hubp/dcn30/dcn30_hubp.h index 842f4eb72cc8..c767e9f4f9b3 100644 --- a/drivers/gpu/drm/amd/display/dc/hubp/dcn30/dcn30_hubp.h +++ b/drivers/gpu/drm/amd/display/dc/hubp/dcn30/dcn30_hubp.h @@ -296,6 +296,8 @@ void hubp3_dmdata_set_attributes( void hubp3_read_state(struct hubp *hubp); +void hubp3_read_reg_state(struct hubp *hubp, struct dcn_hubp_reg_state *reg_state); + void hubp3_init(struct hubp *hubp); void hubp3_clear_tiling(struct hubp *hubp); diff --git a/drivers/gpu/drm/amd/display/dc/hubp/dcn31/dcn31_hubp.c b/drivers/gpu/drm/amd/display/dc/hubp/dcn31/dcn31_hubp.c index 47101847c2b7..189045f85039 100644 --- a/drivers/gpu/drm/amd/display/dc/hubp/dcn31/dcn31_hubp.c +++ b/drivers/gpu/drm/amd/display/dc/hubp/dcn31/dcn31_hubp.c @@ -110,9 +110,7 @@ static struct hubp_funcs dcn31_hubp_funcs = { .hubp_in_blank = hubp1_in_blank, .program_extended_blank = hubp31_program_extended_blank, .hubp_clear_tiling = hubp3_clear_tiling, - .hubp_get_underflow_status = hubp3_get_underflow_status, - .hubp_get_current_read_line = hubp3_get_current_read_line, - .hubp_get_det_config_error = hubp31_get_det_config_error, + .hubp_read_reg_state = hubp3_read_reg_state, }; bool hubp31_construct( diff --git a/drivers/gpu/drm/amd/display/dc/hubp/dcn32/dcn32_hubp.c b/drivers/gpu/drm/amd/display/dc/hubp/dcn32/dcn32_hubp.c index a5f23bb2a76a..a781085b046b 100644 --- a/drivers/gpu/drm/amd/display/dc/hubp/dcn32/dcn32_hubp.c +++ b/drivers/gpu/drm/amd/display/dc/hubp/dcn32/dcn32_hubp.c @@ -118,29 +118,7 @@ void hubp32_cursor_set_attributes( uint32_t cursor_width = ((attr->width + 63) / 64) * 64; uint32_t cursor_height = attr->height; uint32_t cursor_size = cursor_width * cursor_height; - - hubp->curs_attr = *attr; - - REG_UPDATE(CURSOR_SURFACE_ADDRESS_HIGH, - CURSOR_SURFACE_ADDRESS_HIGH, attr->address.high_part); - REG_UPDATE(CURSOR_SURFACE_ADDRESS, - CURSOR_SURFACE_ADDRESS, attr->address.low_part); - - REG_UPDATE_2(CURSOR_SIZE, - CURSOR_WIDTH, attr->width, - CURSOR_HEIGHT, attr->height); - - REG_UPDATE_4(CURSOR_CONTROL, - CURSOR_MODE, attr->color_format, - CURSOR_2X_MAGNIFY, attr->attribute_flags.bits.ENABLE_MAGNIFICATION, - CURSOR_PITCH, hw_pitch, - CURSOR_LINES_PER_CHUNK, lpc); - - REG_SET_2(CURSOR_SETTINGS, 0, - /* no shift of the cursor HDL schedule */ - CURSOR0_DST_Y_OFFSET, 0, - /* used to shift the cursor chunk request deadline */ - CURSOR0_CHUNK_HDL_ADJUST, 3); + bool use_mall_for_cursor; switch (attr->color_format) { case CURSOR_MODE_MONO: @@ -158,11 +136,49 @@ void hubp32_cursor_set_attributes( cursor_size *= 8; break; } + use_mall_for_cursor = cursor_size > 16384 ? 1 : 0; - if (cursor_size > 16384) - REG_UPDATE(DCHUBP_MALL_CONFIG, USE_MALL_FOR_CURSOR, true); - else - REG_UPDATE(DCHUBP_MALL_CONFIG, USE_MALL_FOR_CURSOR, false); + hubp->curs_attr = *attr; + + if (!hubp->cursor_offload) { + REG_UPDATE(CURSOR_SURFACE_ADDRESS_HIGH, + CURSOR_SURFACE_ADDRESS_HIGH, attr->address.high_part); + REG_UPDATE(CURSOR_SURFACE_ADDRESS, + CURSOR_SURFACE_ADDRESS, attr->address.low_part); + + REG_UPDATE_2(CURSOR_SIZE, + CURSOR_WIDTH, attr->width, + CURSOR_HEIGHT, attr->height); + + REG_UPDATE_4(CURSOR_CONTROL, + CURSOR_MODE, attr->color_format, + CURSOR_2X_MAGNIFY, attr->attribute_flags.bits.ENABLE_MAGNIFICATION, + CURSOR_PITCH, hw_pitch, + CURSOR_LINES_PER_CHUNK, lpc); + + REG_SET_2(CURSOR_SETTINGS, 0, + /* no shift of the cursor HDL schedule */ + CURSOR0_DST_Y_OFFSET, 0, + /* used to shift the cursor chunk request deadline */ + CURSOR0_CHUNK_HDL_ADJUST, 3); + + REG_UPDATE(DCHUBP_MALL_CONFIG, USE_MALL_FOR_CURSOR, use_mall_for_cursor); + } + hubp->att.SURFACE_ADDR_HIGH = attr->address.high_part; + hubp->att.SURFACE_ADDR = attr->address.low_part; + hubp->att.size.bits.width = attr->width; + hubp->att.size.bits.height = attr->height; + hubp->att.cur_ctl.bits.mode = attr->color_format; + + hubp->cur_rect.w = attr->width; + hubp->cur_rect.h = attr->height; + + hubp->att.cur_ctl.bits.pitch = hw_pitch; + hubp->att.cur_ctl.bits.line_per_chunk = lpc; + hubp->att.cur_ctl.bits.cur_2x_magnify = attr->attribute_flags.bits.ENABLE_MAGNIFICATION; + hubp->att.settings.bits.dst_y_offset = 0; + hubp->att.settings.bits.chunk_hdl_adjust = 3; + hubp->use_mall_for_cursor = use_mall_for_cursor; } void hubp32_init(struct hubp *hubp) { @@ -206,9 +222,7 @@ static struct hubp_funcs dcn32_hubp_funcs = { .hubp_update_mall_sel = hubp32_update_mall_sel, .hubp_prepare_subvp_buffering = hubp32_prepare_subvp_buffering, .hubp_clear_tiling = hubp3_clear_tiling, - .hubp_get_underflow_status = hubp3_get_underflow_status, - .hubp_get_current_read_line = hubp3_get_current_read_line, - .hubp_get_det_config_error = hubp31_get_det_config_error, + .hubp_read_reg_state = hubp3_read_reg_state }; bool hubp32_construct( diff --git a/drivers/gpu/drm/amd/display/dc/hubp/dcn35/dcn35_hubp.c b/drivers/gpu/drm/amd/display/dc/hubp/dcn35/dcn35_hubp.c index b140808f21af..79c583e258c7 100644 --- a/drivers/gpu/drm/amd/display/dc/hubp/dcn35/dcn35_hubp.c +++ b/drivers/gpu/drm/amd/display/dc/hubp/dcn35/dcn35_hubp.c @@ -209,6 +209,7 @@ static struct hubp_funcs dcn35_hubp_funcs = { .dmdata_load = hubp2_dmdata_load, .dmdata_status_done = hubp2_dmdata_status_done, .hubp_read_state = hubp3_read_state, + .hubp_read_reg_state = hubp3_read_reg_state, .hubp_clear_underflow = hubp2_clear_underflow, .hubp_set_flip_control_surface_gsl = hubp2_set_flip_control_surface_gsl, .hubp_init = hubp35_init, @@ -218,9 +219,6 @@ static struct hubp_funcs dcn35_hubp_funcs = { .hubp_in_blank = hubp1_in_blank, .program_extended_blank = hubp31_program_extended_blank_value, .hubp_clear_tiling = hubp3_clear_tiling, - .hubp_get_underflow_status = hubp3_get_underflow_status, - .hubp_get_current_read_line = hubp3_get_current_read_line, - .hubp_get_det_config_error = hubp31_get_det_config_error, }; bool hubp35_construct( diff --git a/drivers/gpu/drm/amd/display/dc/hubp/dcn401/dcn401_hubp.c b/drivers/gpu/drm/amd/display/dc/hubp/dcn401/dcn401_hubp.c index 0fcbc6a35be6..f01eae50d02f 100644 --- a/drivers/gpu/drm/amd/display/dc/hubp/dcn401/dcn401_hubp.c +++ b/drivers/gpu/drm/amd/display/dc/hubp/dcn401/dcn401_hubp.c @@ -783,21 +783,23 @@ void hubp401_cursor_set_position( if (cur_en && REG_READ(CURSOR_SURFACE_ADDRESS) == 0) hubp->funcs->set_cursor_attributes(hubp, &hubp->curs_attr); - REG_UPDATE(CURSOR_CONTROL, - CURSOR_ENABLE, cur_en); + if (!hubp->cursor_offload) + REG_UPDATE(CURSOR_CONTROL, + CURSOR_ENABLE, cur_en); } - REG_SET_2(CURSOR_POSITION, 0, - CURSOR_X_POSITION, x_pos, - CURSOR_Y_POSITION, y_pos); + if (!hubp->cursor_offload) { + REG_SET_2(CURSOR_POSITION, 0, + CURSOR_X_POSITION, x_pos, + CURSOR_Y_POSITION, y_pos); - REG_SET_2(CURSOR_HOT_SPOT, 0, - CURSOR_HOT_SPOT_X, pos->x_hotspot, - CURSOR_HOT_SPOT_Y, pos->y_hotspot); - - REG_SET(CURSOR_DST_OFFSET, 0, - CURSOR_DST_X_OFFSET, dst_x_offset); + REG_SET_2(CURSOR_HOT_SPOT, 0, + CURSOR_HOT_SPOT_X, pos->x_hotspot, + CURSOR_HOT_SPOT_Y, pos->y_hotspot); + REG_SET(CURSOR_DST_OFFSET, 0, + CURSOR_DST_X_OFFSET, dst_x_offset); + } /* Cursor Position Register Config */ hubp->pos.cur_ctl.bits.cur_enable = cur_en; hubp->pos.position.bits.x_pos = pos->x; @@ -1071,9 +1073,7 @@ static struct hubp_funcs dcn401_hubp_funcs = { .hubp_get_3dlut_fl_done = hubp401_get_3dlut_fl_done, .hubp_clear_tiling = hubp401_clear_tiling, .hubp_program_3dlut_fl_config = hubp401_program_3dlut_fl_config, - .hubp_get_underflow_status = hubp3_get_underflow_status, - .hubp_get_current_read_line = hubp3_get_current_read_line, - .hubp_get_det_config_error = hubp31_get_det_config_error, + .hubp_read_reg_state = hubp3_read_reg_state }; bool hubp401_construct( diff --git a/drivers/gpu/drm/amd/display/dc/hubp/dcn401/dcn401_hubp.h b/drivers/gpu/drm/amd/display/dc/hubp/dcn401/dcn401_hubp.h index fdabbeec8ffa..4570b8016de5 100644 --- a/drivers/gpu/drm/amd/display/dc/hubp/dcn401/dcn401_hubp.h +++ b/drivers/gpu/drm/amd/display/dc/hubp/dcn401/dcn401_hubp.h @@ -31,7 +31,7 @@ #include "dcn30/dcn30_hubp.h" #include "dcn31/dcn31_hubp.h" #include "dcn32/dcn32_hubp.h" -#include "dml2/dml21/inc/dml_top_dchub_registers.h" +#include "dml2_0/dml21/inc/dml_top_dchub_registers.h" #define HUBP_3DLUT_FL_REG_LIST_DCN401(inst)\ SRI_ARR_US(_3DLUT_FL_CONFIG, HUBP, inst),\ diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c index ebc220b29d14..8fe399939220 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c @@ -659,6 +659,20 @@ void dce110_update_info_frame(struct pipe_ctx *pipe_ctx) } } +static void +dce110_dac_encoder_control(struct pipe_ctx *pipe_ctx, bool enable) +{ + struct dc_link *link = pipe_ctx->stream->link; + struct dc_bios *bios = link->ctx->dc_bios; + struct bp_encoder_control encoder_control = {0}; + + encoder_control.action = enable ? ENCODER_CONTROL_ENABLE : ENCODER_CONTROL_DISABLE; + encoder_control.engine_id = link->link_enc->analog_engine; + encoder_control.pixel_clock = pipe_ctx->stream->timing.pix_clk_100hz / 10; + + bios->funcs->encoder_control(bios, &encoder_control); +} + void dce110_enable_stream(struct pipe_ctx *pipe_ctx) { enum dc_lane_count lane_count = @@ -688,6 +702,9 @@ void dce110_enable_stream(struct pipe_ctx *pipe_ctx) early_control = lane_count; tg->funcs->set_early_control(tg, early_control); + + if (dc_is_rgb_signal(pipe_ctx->stream->signal)) + dce110_dac_encoder_control(pipe_ctx, true); } static enum bp_result link_transmitter_control( @@ -1085,6 +1102,9 @@ void dce110_enable_audio_stream(struct pipe_ctx *pipe_ctx) if (!pipe_ctx->stream) return; + if (dc_is_rgb_signal(pipe_ctx->stream->signal)) + return; + dc = pipe_ctx->stream->ctx->dc; clk_mgr = dc->clk_mgr; link_hwss = get_link_hwss(pipe_ctx->stream->link, &pipe_ctx->link_res); @@ -1121,6 +1141,9 @@ void dce110_disable_audio_stream(struct pipe_ctx *pipe_ctx) if (!pipe_ctx || !pipe_ctx->stream) return; + if (dc_is_rgb_signal(pipe_ctx->stream->signal)) + return; + dc = pipe_ctx->stream->ctx->dc; clk_mgr = dc->clk_mgr; link_hwss = get_link_hwss(pipe_ctx->stream->link, &pipe_ctx->link_res); @@ -1195,6 +1218,9 @@ void dce110_disable_stream(struct pipe_ctx *pipe_ctx) dccg->funcs->disable_symclk_se(dccg, stream_enc->stream_enc_inst, link_enc->transmitter - TRANSMITTER_UNIPHY_A); } + + if (dc_is_rgb_signal(pipe_ctx->stream->signal)) + dce110_dac_encoder_control(pipe_ctx, false); } void dce110_unblank_stream(struct pipe_ctx *pipe_ctx, @@ -1580,6 +1606,51 @@ static enum dc_status dce110_enable_stream_timing( return DC_OK; } +static void +dce110_select_crtc_source(struct pipe_ctx *pipe_ctx) +{ + struct dc_link *link = pipe_ctx->stream->link; + struct dc_bios *bios = link->ctx->dc_bios; + struct bp_crtc_source_select crtc_source_select = {0}; + enum engine_id engine_id = link->link_enc->preferred_engine; + uint8_t bit_depth; + + if (dc_is_rgb_signal(pipe_ctx->stream->signal)) + engine_id = link->link_enc->analog_engine; + + switch (pipe_ctx->stream->timing.display_color_depth) { + case COLOR_DEPTH_UNDEFINED: + bit_depth = 0; + break; + case COLOR_DEPTH_666: + bit_depth = 6; + break; + default: + case COLOR_DEPTH_888: + bit_depth = 8; + break; + case COLOR_DEPTH_101010: + bit_depth = 10; + break; + case COLOR_DEPTH_121212: + bit_depth = 12; + break; + case COLOR_DEPTH_141414: + bit_depth = 14; + break; + case COLOR_DEPTH_161616: + bit_depth = 16; + break; + } + + crtc_source_select.controller_id = CONTROLLER_ID_D0 + pipe_ctx->stream_res.tg->inst; + crtc_source_select.bit_depth = bit_depth; + crtc_source_select.engine_id = engine_id; + crtc_source_select.sink_signal = pipe_ctx->stream->signal; + + bios->funcs->select_crtc_source(bios, &crtc_source_select); +} + enum dc_status dce110_apply_single_controller_ctx_to_hw( struct pipe_ctx *pipe_ctx, struct dc_state *context, @@ -1599,6 +1670,10 @@ enum dc_status dce110_apply_single_controller_ctx_to_hw( hws->funcs.disable_stream_gating(dc, pipe_ctx); } + if (pipe_ctx->stream->signal == SIGNAL_TYPE_RGB) { + dce110_select_crtc_source(pipe_ctx); + } + if (pipe_ctx->stream_res.audio != NULL) { struct audio_output audio_output = {0}; @@ -1678,7 +1753,8 @@ enum dc_status dce110_apply_single_controller_ctx_to_hw( pipe_ctx->stream_res.tg->funcs->set_static_screen_control( pipe_ctx->stream_res.tg, event_triggers, 2); - if (!dc_is_virtual_signal(pipe_ctx->stream->signal)) + if (!dc_is_virtual_signal(pipe_ctx->stream->signal) && + !dc_is_rgb_signal(pipe_ctx->stream->signal)) pipe_ctx->stream_res.stream_enc->funcs->dig_connect_to_otg( pipe_ctx->stream_res.stream_enc, pipe_ctx->stream_res.tg->inst); @@ -1912,6 +1988,7 @@ void dce110_enable_accelerated_mode(struct dc *dc, struct dc_state *context) bool can_apply_edp_fast_boot = false; bool can_apply_seamless_boot = false; bool keep_edp_vdd_on = false; + bool should_clean_dsc_block = true; struct dc_bios *dcb = dc->ctx->dc_bios; DC_LOGGER_INIT(); @@ -2004,9 +2081,15 @@ void dce110_enable_accelerated_mode(struct dc *dc, struct dc_state *context) power_down_all_hw_blocks(dc); /* DSC could be enabled on eDP during VBIOS post. - * To clean up dsc blocks if eDP is in link but not active. + * To clean up dsc blocks if all eDP dpms_off is true. */ - if (edp_link_with_sink && (edp_stream_num == 0)) + for (i = 0; i < edp_stream_num; i++) { + if (!edp_streams[i]->dpms_off) { + should_clean_dsc_block = false; + } + } + + if (should_clean_dsc_block) clean_up_dsc_blocks(dc); disable_vga_and_power_gate_all_controllers(dc); diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c index e9fe97f0c4ea..fa62e40a9858 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c @@ -2245,7 +2245,7 @@ void dcn10_cursor_lock(struct dc *dc, struct pipe_ctx *pipe, bool lock) if (lock) delay_cursor_until_vupdate(dc, pipe); - if (pipe->stream && should_use_dmub_lock(pipe->stream->link)) { + if (pipe->stream && should_use_dmub_inbox1_lock(dc, pipe->stream->link)) { union dmub_hw_lock_flags hw_locks = { 0 }; struct dmub_hw_lock_inst_flags inst_flags = { 0 }; @@ -3090,6 +3090,9 @@ static void dcn10_update_dchubp_dpp( } if (pipe_ctx->stream->cursor_attributes.address.quad_part != 0) { + if (dc->hwss.abort_cursor_offload_update) + dc->hwss.abort_cursor_offload_update(dc, pipe_ctx); + dc->hwss.set_cursor_attribute(pipe_ctx); dc->hwss.set_cursor_position(pipe_ctx); diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c index 56c1ab6c7330..c8ff8ae85a03 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c @@ -1457,7 +1457,7 @@ void dcn20_pipe_control_lock( !flip_immediate) dcn20_setup_gsl_group_as_lock(dc, pipe, false); - if (pipe->stream && should_use_dmub_lock(pipe->stream->link)) { + if (pipe->stream && should_use_dmub_inbox1_lock(dc, pipe->stream->link)) { union dmub_hw_lock_flags hw_locks = { 0 }; struct dmub_hw_lock_inst_flags inst_flags = { 0 }; @@ -1801,6 +1801,9 @@ void dcn20_update_dchubp_dpp( if ((pipe_ctx->update_flags.bits.enable || pipe_ctx->update_flags.bits.opp_changed || pipe_ctx->update_flags.bits.scaler || viewport_changed == true) && pipe_ctx->stream->cursor_attributes.address.quad_part != 0) { + if (dc->hwss.abort_cursor_offload_update) + dc->hwss.abort_cursor_offload_update(dc, pipe_ctx); + dc->hwss.set_cursor_attribute(pipe_ctx); dc->hwss.set_cursor_position(pipe_ctx); diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.c index e47ed5571dfd..81bcadf5e57e 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.c @@ -53,7 +53,8 @@ #include "link_service.h" #include "dc_state_priv.h" - +#define TO_DCN_DCCG(dccg)\ + container_of(dccg, struct dcn_dccg, base) #define DC_LOGGER_INIT(logger) @@ -1235,44 +1236,47 @@ void dcn30_get_underflow_debug_data(const struct dc *dc, { struct hubbub *hubbub = dc->res_pool->hubbub; - if (tg) { - uint32_t v_blank_start = 0, v_blank_end = 0; - - out_data->otg_inst = tg->inst; - - tg->funcs->get_scanoutpos(tg, - &v_blank_start, - &v_blank_end, - &out_data->h_position, - &out_data->v_position); - - out_data->otg_frame_count = tg->funcs->get_frame_count(tg); - - out_data->otg_underflow = tg->funcs->is_optc_underflow_occurred(tg); + if (hubbub) { + if (hubbub->funcs->hubbub_read_reg_state) { + hubbub->funcs->hubbub_read_reg_state(hubbub, out_data->hubbub_reg_state); + } } for (int i = 0; i < MAX_PIPES; i++) { struct hubp *hubp = dc->res_pool->hubps[i]; + struct dpp *dpp = dc->res_pool->dpps[i]; + struct output_pixel_processor *opp = dc->res_pool->opps[i]; + struct display_stream_compressor *dsc = dc->res_pool->dscs[i]; + struct mpc *mpc = dc->res_pool->mpc; + struct timing_generator *optc = dc->res_pool->timing_generators[i]; + struct dccg *dccg = dc->res_pool->dccg; - if (hubp) { - if (hubp->funcs->hubp_get_underflow_status) - out_data->hubps[i].hubp_underflow = hubp->funcs->hubp_get_underflow_status(hubp); + if (hubp) + if (hubp->funcs->hubp_read_reg_state) + hubp->funcs->hubp_read_reg_state(hubp, out_data->hubp_reg_state[i]); - if (hubp->funcs->hubp_in_blank) - out_data->hubps[i].hubp_in_blank = hubp->funcs->hubp_in_blank(hubp); + if (dpp) + if (dpp->funcs->dpp_read_reg_state) + dpp->funcs->dpp_read_reg_state(dpp, out_data->dpp_reg_state[i]); - if (hubp->funcs->hubp_get_current_read_line) - out_data->hubps[i].hubp_readline = hubp->funcs->hubp_get_current_read_line(hubp); + if (opp) + if (opp->funcs->opp_read_reg_state) + opp->funcs->opp_read_reg_state(opp, out_data->opp_reg_state[i]); - if (hubp->funcs->hubp_get_det_config_error) - out_data->hubps[i].det_config_error = hubp->funcs->hubp_get_det_config_error(hubp); - } + if (dsc) + if (dsc->funcs->dsc_read_reg_state) + dsc->funcs->dsc_read_reg_state(dsc, out_data->dsc_reg_state[i]); + + if (mpc) + if (mpc->funcs->mpc_read_reg_state) + mpc->funcs->mpc_read_reg_state(mpc, i, out_data->mpc_reg_state[i]); + + if (optc) + if (optc->funcs->optc_read_reg_state) + optc->funcs->optc_read_reg_state(optc, out_data->optc_reg_state[i]); + + if (dccg) + if (dccg->funcs->dccg_read_reg_state) + dccg->funcs->dccg_read_reg_state(dccg, out_data->dccg_reg_state[i]); } - - if (hubbub->funcs->get_det_sizes) - hubbub->funcs->get_det_sizes(hubbub, out_data->curr_det_sizes, out_data->target_det_sizes); - - if (hubbub->funcs->compbuf_config_error) - out_data->compbuf_config_error = hubbub->funcs->compbuf_config_error(hubbub); - } diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_hwseq.c index b822f2dffff0..d1ecdb92b072 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_hwseq.c @@ -710,7 +710,8 @@ bool dcn31_set_backlight_level(struct pipe_ctx *pipe_ctx, panel_cntl->inst, panel_cntl->pwrseq_inst); - dmub_abm_set_backlight(dc, backlight_level_params, panel_cntl->inst); + if (backlight_level_params->control_type != BACKLIGHT_CONTROL_AMD_AUX) + dmub_abm_set_backlight(dc, backlight_level_params, panel_cntl->inst); return true; } diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_hwseq.c index f925f669f2a4..4ee6ed610de0 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_hwseq.c @@ -108,6 +108,7 @@ static void update_dsc_on_stream(struct pipe_ctx *pipe_ctx, bool enable) dsc_cfg.dc_dsc_cfg = stream->timing.dsc_cfg; ASSERT(dsc_cfg.dc_dsc_cfg.num_slices_h % opp_cnt == 0); dsc_cfg.dc_dsc_cfg.num_slices_h /= opp_cnt; + dsc_cfg.dsc_padding = pipe_ctx->dsc_padding_params.dsc_hactive_padding; dsc->funcs->dsc_set_config(dsc, &dsc_cfg, &dsc_optc_cfg); dsc->funcs->dsc_enable(dsc, pipe_ctx->stream_res.opp->inst); diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c index f39292952702..bf19ba65d09a 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c @@ -1061,6 +1061,7 @@ void dcn32_update_dsc_on_stream(struct pipe_ctx *pipe_ctx, bool enable) dsc_cfg.dc_dsc_cfg = stream->timing.dsc_cfg; ASSERT(dsc_cfg.dc_dsc_cfg.num_slices_h % opp_cnt == 0); dsc_cfg.dc_dsc_cfg.num_slices_h /= opp_cnt; + dsc_cfg.dsc_padding = pipe_ctx->dsc_padding_params.dsc_hactive_padding; if (should_use_dto_dscclk) dccg->funcs->set_dto_dscclk(dccg, dsc->inst, dsc_cfg.dc_dsc_cfg.num_slices_h); diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c index 05011061822c..7aa0f452e8f7 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c @@ -364,6 +364,7 @@ static void update_dsc_on_stream(struct pipe_ctx *pipe_ctx, bool enable) dsc_cfg.dc_dsc_cfg = stream->timing.dsc_cfg; ASSERT(dsc_cfg.dc_dsc_cfg.num_slices_h % opp_cnt == 0); dsc_cfg.dc_dsc_cfg.num_slices_h /= opp_cnt; + dsc_cfg.dsc_padding = pipe_ctx->dsc_padding_params.dsc_hactive_padding; dsc->funcs->dsc_set_config(dsc, &dsc_cfg, &dsc_optc_cfg); dsc->funcs->dsc_enable(dsc, pipe_ctx->stream_res.opp->inst); @@ -816,8 +817,6 @@ void dcn35_enable_plane(struct dc *dc, struct pipe_ctx *pipe_ctx, struct dc_state *context) { struct dpp *dpp = pipe_ctx->plane_res.dpp; - struct dccg *dccg = dc->res_pool->dccg; - /* enable DCFCLK current DCHUB */ pipe_ctx->plane_res.hubp->funcs->hubp_clk_cntl(pipe_ctx->plane_res.hubp, true); @@ -825,7 +824,6 @@ void dcn35_enable_plane(struct dc *dc, struct pipe_ctx *pipe_ctx, /* initialize HUBP on power up */ pipe_ctx->plane_res.hubp->funcs->hubp_init(pipe_ctx->plane_res.hubp); /*make sure DPPCLK is on*/ - dccg->funcs->dccg_root_gate_disable_control(dccg, dpp->inst, true); dpp->funcs->dpp_dppclk_control(dpp, false, true); /* make sure OPP_PIPE_CLOCK_EN = 1 */ pipe_ctx->stream_res.opp->funcs->opp_pipe_clock_control( @@ -859,7 +857,6 @@ void dcn35_plane_atomic_disable(struct dc *dc, struct pipe_ctx *pipe_ctx) { struct hubp *hubp = pipe_ctx->plane_res.hubp; struct dpp *dpp = pipe_ctx->plane_res.dpp; - struct dccg *dccg = dc->res_pool->dccg; dc->hwss.wait_for_mpcc_disconnect(dc, dc->res_pool, pipe_ctx); @@ -878,7 +875,6 @@ void dcn35_plane_atomic_disable(struct dc *dc, struct pipe_ctx *pipe_ctx) hubp->funcs->hubp_clk_cntl(hubp, false); dpp->funcs->dpp_dppclk_control(dpp, false, false); - dccg->funcs->dccg_root_gate_disable_control(dccg, dpp->inst, false); hubp->power_gated = true; @@ -1592,3 +1588,141 @@ void dcn35_hardware_release(struct dc *dc) if (dc->hwss.hw_block_power_up) dc->hwss.hw_block_power_up(dc, &pg_update_state); } + +void dcn35_abort_cursor_offload_update(struct dc *dc, const struct pipe_ctx *pipe) +{ + if (!dc_dmub_srv_is_cursor_offload_enabled(dc)) + return; + + /* + * Insert a blank update to modify the write index and set pipe_mask to 0. + * + * While the DMU is interlocked with driver full pipe programming via + * the DMU HW lock, if the cursor update begins to execute after a full + * pipe programming occurs there are two possible issues: + * + * 1. Outdated cursor information is programmed, replacing the current update + * 2. The cursor update in firmware holds the cursor lock, preventing + * the current update from being latched atomically in the same frame + * as the rest of the update. + * + * This blank update, treated as a no-op, will allow the firmware to skip + * the programming. + */ + + if (dc->hwss.begin_cursor_offload_update) + dc->hwss.begin_cursor_offload_update(dc, pipe); + + if (dc->hwss.commit_cursor_offload_update) + dc->hwss.commit_cursor_offload_update(dc, pipe); +} + +void dcn35_begin_cursor_offload_update(struct dc *dc, const struct pipe_ctx *pipe) +{ + volatile struct dmub_cursor_offload_v1 *cs = dc->ctx->dmub_srv->dmub->cursor_offload_v1; + const struct pipe_ctx *top_pipe = resource_get_otg_master(pipe); + uint32_t stream_idx, write_idx, payload_idx; + + if (!top_pipe) + return; + + stream_idx = top_pipe->pipe_idx; + write_idx = cs->offload_streams[stream_idx].write_idx + 1; /* new payload (+1) */ + payload_idx = write_idx % ARRAY_SIZE(cs->offload_streams[stream_idx].payloads); + + cs->offload_streams[stream_idx].payloads[payload_idx].write_idx_start = write_idx; + + if (pipe->plane_res.hubp) + pipe->plane_res.hubp->cursor_offload = true; + + if (pipe->plane_res.dpp) + pipe->plane_res.dpp->cursor_offload = true; +} + +void dcn35_commit_cursor_offload_update(struct dc *dc, const struct pipe_ctx *pipe) +{ + volatile struct dmub_cursor_offload_v1 *cs = dc->ctx->dmub_srv->dmub->cursor_offload_v1; + volatile struct dmub_shared_state_cursor_offload_stream_v1 *shared_stream; + const struct pipe_ctx *top_pipe = resource_get_otg_master(pipe); + uint32_t stream_idx, write_idx, payload_idx; + + if (pipe->plane_res.hubp) + pipe->plane_res.hubp->cursor_offload = false; + + if (pipe->plane_res.dpp) + pipe->plane_res.dpp->cursor_offload = false; + + if (!top_pipe) + return; + + stream_idx = top_pipe->pipe_idx; + write_idx = cs->offload_streams[stream_idx].write_idx + 1; /* new payload (+1) */ + payload_idx = write_idx % ARRAY_SIZE(cs->offload_streams[stream_idx].payloads); + + shared_stream = &dc->ctx->dmub_srv->dmub->shared_state[DMUB_SHARED_STATE_FEATURE__CURSOR_OFFLOAD_V1] + .data.cursor_offload_v1.offload_streams[stream_idx]; + + shared_stream->last_write_idx = write_idx; + + cs->offload_streams[stream_idx].write_idx = write_idx; + cs->offload_streams[stream_idx].payloads[payload_idx].write_idx_finish = write_idx; +} + +void dcn35_update_cursor_offload_pipe(struct dc *dc, const struct pipe_ctx *pipe) +{ + volatile struct dmub_cursor_offload_v1 *cs = dc->ctx->dmub_srv->dmub->cursor_offload_v1; + const struct pipe_ctx *top_pipe = resource_get_otg_master(pipe); + const struct hubp *hubp = pipe->plane_res.hubp; + const struct dpp *dpp = pipe->plane_res.dpp; + volatile struct dmub_cursor_offload_pipe_data_dcn30_v1 *p; + uint32_t stream_idx, write_idx, payload_idx; + + if (!top_pipe || !hubp || !dpp) + return; + + stream_idx = top_pipe->pipe_idx; + write_idx = cs->offload_streams[stream_idx].write_idx + 1; /* new payload (+1) */ + payload_idx = write_idx % ARRAY_SIZE(cs->offload_streams[stream_idx].payloads); + + p = &cs->offload_streams[stream_idx].payloads[payload_idx].pipe_data[pipe->pipe_idx].dcn30; + + p->CURSOR0_0_CURSOR_SURFACE_ADDRESS = hubp->att.SURFACE_ADDR; + p->CURSOR0_0_CURSOR_SURFACE_ADDRESS_HIGH = hubp->att.SURFACE_ADDR_HIGH; + p->CURSOR0_0_CURSOR_SIZE__CURSOR_WIDTH = hubp->att.size.bits.width; + p->CURSOR0_0_CURSOR_SIZE__CURSOR_HEIGHT = hubp->att.size.bits.height; + p->CURSOR0_0_CURSOR_POSITION__CURSOR_X_POSITION = hubp->pos.position.bits.x_pos; + p->CURSOR0_0_CURSOR_POSITION__CURSOR_Y_POSITION = hubp->pos.position.bits.y_pos; + p->CURSOR0_0_CURSOR_HOT_SPOT__CURSOR_HOT_SPOT_X = hubp->pos.hot_spot.bits.x_hot; + p->CURSOR0_0_CURSOR_HOT_SPOT__CURSOR_HOT_SPOT_Y = hubp->pos.hot_spot.bits.y_hot; + p->CURSOR0_0_CURSOR_DST_OFFSET__CURSOR_DST_X_OFFSET = hubp->pos.dst_offset.bits.dst_x_offset; + p->CURSOR0_0_CURSOR_CONTROL__CURSOR_ENABLE = hubp->pos.cur_ctl.bits.cur_enable; + p->CURSOR0_0_CURSOR_CONTROL__CURSOR_MODE = hubp->att.cur_ctl.bits.mode; + p->CURSOR0_0_CURSOR_CONTROL__CURSOR_2X_MAGNIFY = hubp->pos.cur_ctl.bits.cur_2x_magnify; + p->CURSOR0_0_CURSOR_CONTROL__CURSOR_PITCH = hubp->att.cur_ctl.bits.pitch; + p->CURSOR0_0_CURSOR_CONTROL__CURSOR_LINES_PER_CHUNK = hubp->att.cur_ctl.bits.line_per_chunk; + + p->CNVC_CUR0_CURSOR0_CONTROL__CUR0_ENABLE = dpp->att.cur0_ctl.bits.cur0_enable; + p->CNVC_CUR0_CURSOR0_CONTROL__CUR0_MODE = dpp->att.cur0_ctl.bits.mode; + p->CNVC_CUR0_CURSOR0_CONTROL__CUR0_EXPANSION_MODE = dpp->att.cur0_ctl.bits.expansion_mode; + p->CNVC_CUR0_CURSOR0_CONTROL__CUR0_ROM_EN = dpp->att.cur0_ctl.bits.cur0_rom_en; + p->CNVC_CUR0_CURSOR0_COLOR0__CUR0_COLOR0 = 0x000000; + p->CNVC_CUR0_CURSOR0_COLOR1__CUR0_COLOR1 = 0xFFFFFF; + p->CNVC_CUR0_CURSOR0_FP_SCALE_BIAS__CUR0_FP_BIAS = dpp->att.fp_scale_bias.bits.fp_bias; + p->CNVC_CUR0_CURSOR0_FP_SCALE_BIAS__CUR0_FP_SCALE = dpp->att.fp_scale_bias.bits.fp_scale; + + p->HUBPREQ0_CURSOR_SETTINGS__CURSOR0_DST_Y_OFFSET = hubp->att.settings.bits.dst_y_offset; + p->HUBPREQ0_CURSOR_SETTINGS__CURSOR0_CHUNK_HDL_ADJUST = hubp->att.settings.bits.chunk_hdl_adjust; + + cs->offload_streams[stream_idx].payloads[payload_idx].pipe_mask |= (1u << pipe->pipe_idx); +} + +void dcn35_notify_cursor_offload_drr_update(struct dc *dc, struct dc_state *context, + const struct dc_stream_state *stream) +{ + dc_dmub_srv_control_cursor_offload(dc, context, stream, true); +} + +void dcn35_program_cursor_offload_now(struct dc *dc, const struct pipe_ctx *pipe) +{ + dc_dmub_srv_program_cursor_now(dc, pipe); +} diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.h b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.h index 0b1d6f608edd..1ff41dba556c 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.h +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.h @@ -101,4 +101,12 @@ bool dcn35_is_dp_dig_pixel_rate_div_policy(struct pipe_ctx *pipe_ctx); void dcn35_hardware_release(struct dc *dc); +void dcn35_abort_cursor_offload_update(struct dc *dc, const struct pipe_ctx *pipe); +void dcn35_begin_cursor_offload_update(struct dc *dc, const struct pipe_ctx *pipe); +void dcn35_commit_cursor_offload_update(struct dc *dc, const struct pipe_ctx *pipe); +void dcn35_update_cursor_offload_pipe(struct dc *dc, const struct pipe_ctx *pipe); +void dcn35_notify_cursor_offload_drr_update(struct dc *dc, struct dc_state *context, + const struct dc_stream_state *stream); +void dcn35_program_cursor_offload_now(struct dc *dc, const struct pipe_ctx *pipe); + #endif /* __DC_HWSS_DCN35_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_init.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_init.c index f2f16a0bdb4f..5a66c9db2670 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_init.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_init.c @@ -86,6 +86,12 @@ static const struct hw_sequencer_funcs dcn35_funcs = { .set_cursor_position = dcn10_set_cursor_position, .set_cursor_attribute = dcn10_set_cursor_attribute, .set_cursor_sdr_white_level = dcn10_set_cursor_sdr_white_level, + .abort_cursor_offload_update = dcn35_abort_cursor_offload_update, + .begin_cursor_offload_update = dcn35_begin_cursor_offload_update, + .commit_cursor_offload_update = dcn35_commit_cursor_offload_update, + .update_cursor_offload_pipe = dcn35_update_cursor_offload_pipe, + .notify_cursor_offload_drr_update = dcn35_notify_cursor_offload_drr_update, + .program_cursor_offload_now = dcn35_program_cursor_offload_now, .setup_periodic_interrupt = dcn10_setup_periodic_interrupt, .set_clock = dcn10_set_clock, .get_clock = dcn10_get_clock, diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.c index 68e48a2492c9..2fbc22afb89c 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.c @@ -26,9 +26,11 @@ #include "clk_mgr.h" #include "dsc.h" #include "link_service.h" +#include "custom_float.h" #include "dce/dmub_hw_lock_mgr.h" #include "dcn10/dcn10_cm_common.h" +#include "dcn10/dcn10_hubbub.h" #include "dcn20/dcn20_optc.h" #include "dcn30/dcn30_cm_common.h" #include "dcn32/dcn32_hwseq.h" @@ -36,6 +38,7 @@ #include "dcn401/dcn401_resource.h" #include "dc_state_priv.h" #include "link_enc_cfg.h" +#include "../hw_sequencer.h" #define DC_LOGGER_INIT(logger) @@ -1405,9 +1408,9 @@ void dcn401_prepare_bandwidth(struct dc *dc, } if (dc->debug.fams2_config.bits.enable) { - dcn401_fams2_global_control_lock(dc, context, true); + dcn401_dmub_hw_control_lock(dc, context, true); dcn401_fams2_update_config(dc, context, false); - dcn401_fams2_global_control_lock(dc, context, false); + dcn401_dmub_hw_control_lock(dc, context, false); } if (p_state_change_support != context->bw_ctx.bw.dcn.clk.p_state_change_support) { @@ -1426,9 +1429,9 @@ void dcn401_optimize_bandwidth( /* enable fams2 if needed */ if (dc->debug.fams2_config.bits.enable) { - dcn401_fams2_global_control_lock(dc, context, true); + dcn401_dmub_hw_control_lock(dc, context, true); dcn401_fams2_update_config(dc, context, true); - dcn401_fams2_global_control_lock(dc, context, false); + dcn401_dmub_hw_control_lock(dc, context, false); } /* program dchubbub watermarks */ @@ -1467,14 +1470,17 @@ void dcn401_optimize_bandwidth( } } -void dcn401_fams2_global_control_lock(struct dc *dc, +void dcn401_dmub_hw_control_lock(struct dc *dc, struct dc_state *context, bool lock) { /* use always for now */ union dmub_inbox0_cmd_lock_hw hw_lock_cmd = { 0 }; - if (!dc->ctx || !dc->ctx->dmub_srv || !dc->debug.fams2_config.bits.enable) + if (!dc->ctx || !dc->ctx->dmub_srv) + return; + + if (!dc->debug.fams2_config.bits.enable && !dc_dmub_srv_is_cursor_offload_enabled(dc)) return; hw_lock_cmd.bits.command_code = DMUB_INBOX0_CMD__HW_LOCK; @@ -1484,12 +1490,12 @@ void dcn401_fams2_global_control_lock(struct dc *dc, dmub_hw_lock_mgr_inbox0_cmd(dc->ctx->dmub_srv, hw_lock_cmd); } -void dcn401_fams2_global_control_lock_fast(union block_sequence_params *params) +void dcn401_dmub_hw_control_lock_fast(union block_sequence_params *params) { - struct dc *dc = params->fams2_global_control_lock_fast_params.dc; - bool lock = params->fams2_global_control_lock_fast_params.lock; + struct dc *dc = params->dmub_hw_control_lock_fast_params.dc; + bool lock = params->dmub_hw_control_lock_fast_params.lock; - if (params->fams2_global_control_lock_fast_params.is_required) { + if (params->dmub_hw_control_lock_fast_params.is_required) { union dmub_inbox0_cmd_lock_hw hw_lock_cmd = { 0 }; hw_lock_cmd.bits.command_code = DMUB_INBOX0_CMD__HW_LOCK; @@ -1596,6 +1602,143 @@ void dcn401_update_odm(struct dc *dc, struct dc_state *context, dc->hwseq->funcs.blank_pixel_data(dc, otg_master, true); } +static void dcn401_add_dsc_sequence_for_odm_change(struct dc *dc, struct dc_state *context, + struct pipe_ctx *otg_master, struct block_sequence_state *seq_state) +{ + struct pipe_ctx *old_pipe; + struct pipe_ctx *new_pipe; + struct pipe_ctx *old_opp_heads[MAX_PIPES]; + struct pipe_ctx *old_otg_master; + int old_opp_head_count = 0; + int i; + + old_otg_master = &dc->current_state->res_ctx.pipe_ctx[otg_master->pipe_idx]; + + if (resource_is_pipe_type(old_otg_master, OTG_MASTER)) { + old_opp_head_count = resource_get_opp_heads_for_otg_master(old_otg_master, + &dc->current_state->res_ctx, + old_opp_heads); + } else { + old_otg_master = NULL; + } + + /* Process new DSC configuration if DSC is enabled */ + if (otg_master->stream_res.dsc && otg_master->stream->timing.flags.DSC) { + struct dc_stream_state *stream = otg_master->stream; + struct pipe_ctx *odm_pipe; + int opp_cnt = 1; + int last_dsc_calc = 0; + bool should_use_dto_dscclk = (dc->res_pool->dccg->funcs->set_dto_dscclk != NULL) && + stream->timing.pix_clk_100hz > 480000; + + /* Count ODM pipes */ + for (odm_pipe = otg_master->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) + opp_cnt++; + + int num_slices_h = stream->timing.dsc_cfg.num_slices_h / opp_cnt; + + /* Step 1: Set DTO DSCCLK for main DSC if needed */ + if (should_use_dto_dscclk) { + hwss_add_dccg_set_dto_dscclk(seq_state, dc->res_pool->dccg, + otg_master->stream_res.dsc->inst, num_slices_h); + } + + /* Step 2: Calculate and set DSC config for main DSC */ + last_dsc_calc = *seq_state->num_steps; + hwss_add_dsc_calculate_and_set_config(seq_state, otg_master, true, opp_cnt); + + /* Step 3: Enable main DSC block */ + hwss_add_dsc_enable_with_opp(seq_state, otg_master); + + /* Step 4: Configure and enable ODM DSC blocks */ + for (odm_pipe = otg_master->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) { + if (!odm_pipe->stream_res.dsc) + continue; + + /* Set DTO DSCCLK for ODM DSC if needed */ + if (should_use_dto_dscclk) { + hwss_add_dccg_set_dto_dscclk(seq_state, dc->res_pool->dccg, + odm_pipe->stream_res.dsc->inst, num_slices_h); + } + + /* Calculate and set DSC config for ODM DSC */ + last_dsc_calc = *seq_state->num_steps; + hwss_add_dsc_calculate_and_set_config(seq_state, odm_pipe, true, opp_cnt); + + /* Enable ODM DSC block */ + hwss_add_dsc_enable_with_opp(seq_state, odm_pipe); + } + + /* Step 5: Configure DSC in timing generator */ + hwss_add_tg_set_dsc_config(seq_state, otg_master->stream_res.tg, + &seq_state->steps[last_dsc_calc].params.dsc_calculate_and_set_config_params.dsc_optc_cfg, true); + } else if (otg_master->stream_res.dsc && !otg_master->stream->timing.flags.DSC) { + /* Disable DSC in OPTC */ + hwss_add_tg_set_dsc_config(seq_state, otg_master->stream_res.tg, NULL, false); + + hwss_add_dsc_disconnect(seq_state, otg_master->stream_res.dsc); + } + + /* Disable DSC for old pipes that no longer need it */ + if (old_otg_master && old_otg_master->stream_res.dsc) { + for (i = 0; i < old_opp_head_count; i++) { + old_pipe = old_opp_heads[i]; + new_pipe = &context->res_ctx.pipe_ctx[old_pipe->pipe_idx]; + + /* If old pipe had DSC but new pipe doesn't, disable the old DSC */ + if (old_pipe->stream_res.dsc && !new_pipe->stream_res.dsc) { + /* Then disconnect DSC block */ + hwss_add_dsc_disconnect(seq_state, old_pipe->stream_res.dsc); + } + } + } +} + +void dcn401_update_odm_sequence(struct dc *dc, struct dc_state *context, + struct pipe_ctx *otg_master, struct block_sequence_state *seq_state) +{ + struct pipe_ctx *opp_heads[MAX_PIPES]; + int opp_inst[MAX_PIPES] = {0}; + int opp_head_count; + int odm_slice_width = resource_get_odm_slice_dst_width(otg_master, false); + int last_odm_slice_width = resource_get_odm_slice_dst_width(otg_master, true); + int i; + + opp_head_count = resource_get_opp_heads_for_otg_master( + otg_master, &context->res_ctx, opp_heads); + + for (i = 0; i < opp_head_count; i++) + opp_inst[i] = opp_heads[i]->stream_res.opp->inst; + + /* Add ODM combine/bypass operation to sequence */ + if (opp_head_count > 1) { + hwss_add_optc_set_odm_combine(seq_state, otg_master->stream_res.tg, opp_inst, + opp_head_count, odm_slice_width, last_odm_slice_width); + } else { + hwss_add_optc_set_odm_bypass(seq_state, otg_master->stream_res.tg, &otg_master->stream->timing); + } + + /* Add OPP operations to sequence */ + for (i = 0; i < opp_head_count; i++) { + /* Add OPP pipe clock control operation */ + hwss_add_opp_pipe_clock_control(seq_state, opp_heads[i]->stream_res.opp, true); + + /* Add OPP program left edge extra pixel operation */ + hwss_add_opp_program_left_edge_extra_pixel(seq_state, opp_heads[i]->stream_res.opp, + opp_heads[i]->stream->timing.pixel_encoding, resource_is_pipe_type(opp_heads[i], OTG_MASTER)); + } + + /* Add DSC update operations to sequence */ + dcn401_add_dsc_sequence_for_odm_change(dc, context, otg_master, seq_state); + + /* Add blank pixel data operation if needed */ + if (!resource_is_pipe_type(otg_master, DPP_PIPE)) { + if (dc->hwseq->funcs.blank_pixel_data_sequence) + dc->hwseq->funcs.blank_pixel_data_sequence( + dc, otg_master, true, seq_state); + } +} + void dcn401_unblank_stream(struct pipe_ctx *pipe_ctx, struct dc_link_settings *link_settings) { @@ -2084,6 +2227,157 @@ void dcn401_program_pipe( } } +/* + * dcn401_program_pipe_sequence - Sequence-based version of dcn401_program_pipe + * + * This function creates a sequence-based version of the original dcn401_program_pipe + * function. Instead of directly calling hardware programming functions, it appends + * sequence steps to the provided block_sequence array that can later be executed + * as part of hwss_execute_sequence. + * + */ +void dcn401_program_pipe_sequence( + struct dc *dc, + struct pipe_ctx *pipe_ctx, + struct dc_state *context, + struct block_sequence_state *seq_state) +{ + struct dce_hwseq *hws = dc->hwseq; + + /* Only need to unblank on top pipe */ + if (resource_is_pipe_type(pipe_ctx, OTG_MASTER)) { + if (pipe_ctx->update_flags.bits.enable || + pipe_ctx->update_flags.bits.odm || + pipe_ctx->stream->update_flags.bits.abm_level) { + if (dc->hwseq->funcs.blank_pixel_data_sequence) + dc->hwseq->funcs.blank_pixel_data_sequence(dc, pipe_ctx, + !pipe_ctx->plane_state || !pipe_ctx->plane_state->visible, + seq_state); + } + } + + /* Only update TG on top pipe */ + if (pipe_ctx->update_flags.bits.global_sync && !pipe_ctx->top_pipe + && !pipe_ctx->prev_odm_pipe) { + + /* Step 1: Program global sync */ + hwss_add_tg_program_global_sync(seq_state, pipe_ctx->stream_res.tg, + dcn401_calculate_vready_offset_for_group(pipe_ctx), + (unsigned int)pipe_ctx->global_sync.dcn4x.vstartup_lines, + (unsigned int)pipe_ctx->global_sync.dcn4x.vupdate_offset_pixels, + (unsigned int)pipe_ctx->global_sync.dcn4x.vupdate_vupdate_width_pixels, + (unsigned int)pipe_ctx->global_sync.dcn4x.pstate_keepout_start_lines); + + /* Step 2: Wait for VACTIVE state (if not phantom pipe) */ + if (dc_state_get_pipe_subvp_type(context, pipe_ctx) != SUBVP_PHANTOM) + hwss_add_tg_wait_for_state(seq_state, pipe_ctx->stream_res.tg, CRTC_STATE_VACTIVE); + + /* Step 3: Set VTG params */ + hwss_add_tg_set_vtg_params(seq_state, pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing, true); + + /* Step 4: Setup vupdate interrupt (if available) */ + if (hws->funcs.setup_vupdate_interrupt) + dcn401_setup_vupdate_interrupt_sequence(dc, pipe_ctx, seq_state); + } + + if (pipe_ctx->update_flags.bits.odm) { + if (hws->funcs.update_odm_sequence) + hws->funcs.update_odm_sequence(dc, context, pipe_ctx, seq_state); + } + + if (pipe_ctx->update_flags.bits.enable) { + if (dc->hwss.enable_plane_sequence) + dc->hwss.enable_plane_sequence(dc, pipe_ctx, context, seq_state); + } + + if (pipe_ctx->update_flags.bits.det_size) { + if (dc->res_pool->hubbub->funcs->program_det_size) { + hwss_add_hubp_program_det_size(seq_state, dc->res_pool->hubbub, + pipe_ctx->plane_res.hubp->inst, pipe_ctx->det_buffer_size_kb); + } + + if (dc->res_pool->hubbub->funcs->program_det_segments) { + hwss_add_hubp_program_det_segments(seq_state, dc->res_pool->hubbub, + pipe_ctx->plane_res.hubp->inst, pipe_ctx->hubp_regs.det_size); + } + } + + if (pipe_ctx->plane_state && (pipe_ctx->update_flags.raw || + pipe_ctx->plane_state->update_flags.raw || + pipe_ctx->stream->update_flags.raw)) { + + if (dc->hwss.update_dchubp_dpp_sequence) + dc->hwss.update_dchubp_dpp_sequence(dc, pipe_ctx, context, seq_state); + } + + if (pipe_ctx->plane_state && (pipe_ctx->update_flags.bits.enable || + pipe_ctx->plane_state->update_flags.bits.hdr_mult)) { + + hws->funcs.set_hdr_multiplier_sequence(pipe_ctx, seq_state); + } + + if (pipe_ctx->plane_state && + (pipe_ctx->plane_state->update_flags.bits.in_transfer_func_change || + pipe_ctx->plane_state->update_flags.bits.gamma_change || + pipe_ctx->plane_state->update_flags.bits.lut_3d || + pipe_ctx->update_flags.bits.enable)) { + + hwss_add_dpp_set_input_transfer_func(seq_state, dc, pipe_ctx, pipe_ctx->plane_state); + } + + /* dcn10_translate_regamma_to_hw_format takes 750us to finish + * only do gamma programming for powering on, internal memcmp to avoid + * updating on slave planes + */ + if (pipe_ctx->update_flags.bits.enable || + pipe_ctx->update_flags.bits.plane_changed || + pipe_ctx->stream->update_flags.bits.out_tf) { + hwss_add_dpp_set_output_transfer_func(seq_state, dc, pipe_ctx, pipe_ctx->stream); + } + + /* If the pipe has been enabled or has a different opp, we + * should reprogram the fmt. This deals with cases where + * interation between mpc and odm combine on different streams + * causes a different pipe to be chosen to odm combine with. + */ + if (pipe_ctx->update_flags.bits.enable + || pipe_ctx->update_flags.bits.opp_changed) { + + hwss_add_opp_set_dyn_expansion(seq_state, pipe_ctx->stream_res.opp, COLOR_SPACE_YCBCR601, + pipe_ctx->stream->timing.display_color_depth, pipe_ctx->stream->signal); + + hwss_add_opp_program_fmt(seq_state, pipe_ctx->stream_res.opp, + &pipe_ctx->stream->bit_depth_params, &pipe_ctx->stream->clamping); + } + + /* Set ABM pipe after other pipe configurations done */ + if ((pipe_ctx->plane_state && pipe_ctx->plane_state->visible)) { + if (pipe_ctx->stream_res.abm) { + hwss_add_abm_set_pipe(seq_state, dc, pipe_ctx); + + hwss_add_abm_set_level(seq_state, pipe_ctx->stream_res.abm, pipe_ctx->stream->abm_level); + } + } + + if (pipe_ctx->update_flags.bits.test_pattern_changed) { + struct output_pixel_processor *odm_opp = pipe_ctx->stream_res.opp; + + hwss_add_opp_program_bit_depth_reduction(seq_state, odm_opp, true, pipe_ctx); + + hwss_add_opp_set_disp_pattern_generator(seq_state, + odm_opp, + pipe_ctx->stream_res.test_pattern_params.test_pattern, + pipe_ctx->stream_res.test_pattern_params.color_space, + pipe_ctx->stream_res.test_pattern_params.color_depth, + (struct tg_color){0}, + false, + pipe_ctx->stream_res.test_pattern_params.width, + pipe_ctx->stream_res.test_pattern_params.height, + pipe_ctx->stream_res.test_pattern_params.offset); + } + +} + void dcn401_program_front_end_for_ctx( struct dc *dc, struct dc_state *context) @@ -2161,7 +2455,6 @@ void dcn401_program_front_end_for_ctx( && context->res_ctx.pipe_ctx[i].stream) hws->funcs.blank_pixel_data(dc, &context->res_ctx.pipe_ctx[i], true); - /* Disconnect mpcc */ for (i = 0; i < dc->res_pool->pipe_count; i++) if (context->res_ctx.pipe_ctx[i].update_flags.bits.disable @@ -2240,11 +2533,11 @@ void dcn401_program_front_end_for_ctx( /* Avoid underflow by check of pipe line read when adding 2nd plane. */ if (hws->wa.wait_hubpret_read_start_during_mpo_transition && - !pipe->top_pipe && - pipe->stream && - pipe->plane_res.hubp->funcs->hubp_wait_pipe_read_start && - dc->current_state->stream_status[0].plane_count == 1 && - context->stream_status[0].plane_count > 1) { + !pipe->top_pipe && + pipe->stream && + pipe->plane_res.hubp->funcs->hubp_wait_pipe_read_start && + dc->current_state->stream_status[0].plane_count == 1 && + context->stream_status[0].plane_count > 1) { pipe->plane_res.hubp->funcs->hubp_wait_pipe_read_start(pipe->plane_res.hubp); } } @@ -2356,7 +2649,6 @@ void dcn401_post_unlock_program_front_end( */ if (hwseq->funcs.update_force_pstate) dc->hwseq->funcs.update_force_pstate(dc, context); - /* Only program the MALL registers after all the main and phantom pipes * are done programming. */ @@ -2670,3 +2962,1084 @@ void dcn401_plane_atomic_power_down(struct dc *dc, if (hws->funcs.dpp_root_clock_control) hws->funcs.dpp_root_clock_control(hws, dpp->inst, false); } + +void dcn401_update_cursor_offload_pipe(struct dc *dc, const struct pipe_ctx *pipe) +{ + volatile struct dmub_cursor_offload_v1 *cs = dc->ctx->dmub_srv->dmub->cursor_offload_v1; + const struct pipe_ctx *top_pipe = resource_get_otg_master(pipe); + const struct hubp *hubp = pipe->plane_res.hubp; + const struct dpp *dpp = pipe->plane_res.dpp; + volatile struct dmub_cursor_offload_pipe_data_dcn401_v1 *p; + uint32_t stream_idx, write_idx, payload_idx; + + if (!top_pipe || !hubp || !dpp) + return; + + stream_idx = top_pipe->pipe_idx; + write_idx = cs->offload_streams[stream_idx].write_idx + 1; /* new payload (+1) */ + payload_idx = write_idx % ARRAY_SIZE(cs->offload_streams[stream_idx].payloads); + + p = &cs->offload_streams[stream_idx].payloads[payload_idx].pipe_data[pipe->pipe_idx].dcn401; + + p->CURSOR0_0_CURSOR_SURFACE_ADDRESS = hubp->att.SURFACE_ADDR; + p->CURSOR0_0_CURSOR_SURFACE_ADDRESS_HIGH = hubp->att.SURFACE_ADDR_HIGH; + p->CURSOR0_0_CURSOR_SIZE__CURSOR_WIDTH = hubp->att.size.bits.width; + p->CURSOR0_0_CURSOR_SIZE__CURSOR_HEIGHT = hubp->att.size.bits.height; + p->CURSOR0_0_CURSOR_POSITION__CURSOR_X_POSITION = hubp->pos.position.bits.x_pos; + p->CURSOR0_0_CURSOR_POSITION__CURSOR_Y_POSITION = hubp->pos.position.bits.y_pos; + p->CURSOR0_0_CURSOR_HOT_SPOT__CURSOR_HOT_SPOT_X = hubp->pos.hot_spot.bits.x_hot; + p->CURSOR0_0_CURSOR_HOT_SPOT__CURSOR_HOT_SPOT_Y = hubp->pos.hot_spot.bits.y_hot; + p->CURSOR0_0_CURSOR_DST_OFFSET__CURSOR_DST_X_OFFSET = hubp->pos.dst_offset.bits.dst_x_offset; + p->CURSOR0_0_CURSOR_CONTROL__CURSOR_ENABLE = hubp->pos.cur_ctl.bits.cur_enable; + p->CURSOR0_0_CURSOR_CONTROL__CURSOR_MODE = hubp->att.cur_ctl.bits.mode; + p->CURSOR0_0_CURSOR_CONTROL__CURSOR_2X_MAGNIFY = hubp->pos.cur_ctl.bits.cur_2x_magnify; + p->CURSOR0_0_CURSOR_CONTROL__CURSOR_PITCH = hubp->att.cur_ctl.bits.pitch; + p->CURSOR0_0_CURSOR_CONTROL__CURSOR_LINES_PER_CHUNK = hubp->att.cur_ctl.bits.line_per_chunk; + + p->CM_CUR0_CURSOR0_CONTROL__CUR0_ENABLE = dpp->att.cur0_ctl.bits.cur0_enable; + p->CM_CUR0_CURSOR0_CONTROL__CUR0_MODE = dpp->att.cur0_ctl.bits.mode; + p->CM_CUR0_CURSOR0_CONTROL__CUR0_EXPANSION_MODE = dpp->att.cur0_ctl.bits.expansion_mode; + p->CM_CUR0_CURSOR0_CONTROL__CUR0_ROM_EN = dpp->att.cur0_ctl.bits.cur0_rom_en; + p->CM_CUR0_CURSOR0_COLOR0__CUR0_COLOR0 = 0x000000; + p->CM_CUR0_CURSOR0_COLOR1__CUR0_COLOR1 = 0xFFFFFF; + + p->CM_CUR0_CURSOR0_FP_SCALE_BIAS_G_Y__CUR0_FP_BIAS_G_Y = + dpp->att.fp_scale_bias_g_y.bits.fp_bias_g_y; + p->CM_CUR0_CURSOR0_FP_SCALE_BIAS_G_Y__CUR0_FP_SCALE_G_Y = + dpp->att.fp_scale_bias_g_y.bits.fp_scale_g_y; + p->CM_CUR0_CURSOR0_FP_SCALE_BIAS_RB_CRCB__CUR0_FP_BIAS_RB_CRCB = + dpp->att.fp_scale_bias_rb_crcb.bits.fp_bias_rb_crcb; + p->CM_CUR0_CURSOR0_FP_SCALE_BIAS_RB_CRCB__CUR0_FP_SCALE_RB_CRCB = + dpp->att.fp_scale_bias_rb_crcb.bits.fp_scale_rb_crcb; + + p->HUBPREQ0_CURSOR_SETTINGS__CURSOR0_DST_Y_OFFSET = hubp->att.settings.bits.dst_y_offset; + p->HUBPREQ0_CURSOR_SETTINGS__CURSOR0_CHUNK_HDL_ADJUST = hubp->att.settings.bits.chunk_hdl_adjust; + p->HUBP0_DCHUBP_MALL_CONFIG__USE_MALL_FOR_CURSOR = hubp->use_mall_for_cursor; + + cs->offload_streams[stream_idx].payloads[payload_idx].pipe_mask |= (1u << pipe->pipe_idx); +} + +void dcn401_plane_atomic_power_down_sequence(struct dc *dc, + struct dpp *dpp, + struct hubp *hubp, + struct block_sequence_state *seq_state) +{ + struct dce_hwseq *hws = dc->hwseq; + uint32_t org_ip_request_cntl = 0; + + DC_LOGGER_INIT(dc->ctx->logger); + + /* Check and set DC_IP_REQUEST_CNTL if needed */ + if (REG(DC_IP_REQUEST_CNTL)) { + REG_GET(DC_IP_REQUEST_CNTL, IP_REQUEST_EN, &org_ip_request_cntl); + if (org_ip_request_cntl == 0) + hwss_add_dc_ip_request_cntl(seq_state, dc, true); + } + + /* DPP power gating control */ + hwss_add_dpp_pg_control(seq_state, hws, dpp->inst, false); + + /* HUBP power gating control */ + hwss_add_hubp_pg_control(seq_state, hws, hubp->inst, false); + + /* HUBP reset */ + hwss_add_hubp_reset(seq_state, hubp); + + /* DPP reset */ + hwss_add_dpp_reset(seq_state, dpp); + + /* Restore DC_IP_REQUEST_CNTL if it was originally 0 */ + if (org_ip_request_cntl == 0 && REG(DC_IP_REQUEST_CNTL)) + hwss_add_dc_ip_request_cntl(seq_state, dc, false); + + DC_LOG_DEBUG("Power gated front end %d\n", hubp->inst); + + /* DPP root clock control */ + hwss_add_dpp_root_clock_control(seq_state, hws, dpp->inst, false); +} + +/* trigger HW to start disconnect plane from stream on the next vsync using block sequence */ +void dcn401_plane_atomic_disconnect_sequence(struct dc *dc, + struct dc_state *state, + struct pipe_ctx *pipe_ctx, + struct block_sequence_state *seq_state) +{ + struct hubp *hubp = pipe_ctx->plane_res.hubp; + int dpp_id = pipe_ctx->plane_res.dpp->inst; + struct mpc *mpc = dc->res_pool->mpc; + struct mpc_tree *mpc_tree_params; + struct mpcc *mpcc_to_remove = NULL; + struct output_pixel_processor *opp = pipe_ctx->stream_res.opp; + + mpc_tree_params = &(opp->mpc_tree_params); + mpcc_to_remove = mpc->funcs->get_mpcc_for_dpp(mpc_tree_params, dpp_id); + + /*Already reset*/ + if (mpcc_to_remove == NULL) + return; + + /* Step 1: Remove MPCC from MPC tree */ + hwss_add_mpc_remove_mpcc(seq_state, mpc, mpc_tree_params, mpcc_to_remove); + + // Phantom pipes have OTG disabled by default, so MPCC_STATUS will never assert idle, + // so don't wait for MPCC_IDLE in the programming sequence + if (dc_state_get_pipe_subvp_type(state, pipe_ctx) != SUBVP_PHANTOM) { + /* Step 2: Set MPCC disconnect pending flag */ + hwss_add_opp_set_mpcc_disconnect_pending(seq_state, opp, pipe_ctx->plane_res.mpcc_inst, true); + } + + /* Step 3: Set optimized required flag */ + hwss_add_dc_set_optimized_required(seq_state, dc, true); + + /* Step 4: Disconnect HUBP if function exists */ + if (hubp->funcs->hubp_disconnect) + hwss_add_hubp_disconnect(seq_state, hubp); + + /* Step 5: Verify pstate change high if debug sanity checks are enabled */ + if (dc->debug.sanity_checks) + dc->hwseq->funcs.verify_allow_pstate_change_high_sequence(dc, seq_state); +} + +void dcn401_blank_pixel_data_sequence( + struct dc *dc, + struct pipe_ctx *pipe_ctx, + bool blank, + struct block_sequence_state *seq_state) +{ + struct tg_color black_color = {0}; + struct stream_resource *stream_res = &pipe_ctx->stream_res; + struct dc_stream_state *stream = pipe_ctx->stream; + enum dc_color_space color_space = stream->output_color_space; + enum controller_dp_test_pattern test_pattern = CONTROLLER_DP_TEST_PATTERN_SOLID_COLOR; + enum controller_dp_color_space test_pattern_color_space = CONTROLLER_DP_COLOR_SPACE_UDEFINED; + struct pipe_ctx *odm_pipe; + struct rect odm_slice_src; + + if (stream->link->test_pattern_enabled) + return; + + /* get opp dpg blank color */ + color_space_to_black_color(dc, color_space, &black_color); + + if (blank) { + /* Set ABM immediate disable */ + hwss_add_abm_set_immediate_disable(seq_state, dc, pipe_ctx); + + if (dc->debug.visual_confirm != VISUAL_CONFIRM_DISABLE) { + test_pattern = CONTROLLER_DP_TEST_PATTERN_COLORSQUARES; + test_pattern_color_space = CONTROLLER_DP_COLOR_SPACE_RGB; + } + } else { + test_pattern = CONTROLLER_DP_TEST_PATTERN_VIDEOMODE; + } + + odm_pipe = pipe_ctx; + + /* Set display pattern generator for all ODM pipes */ + while (odm_pipe->next_odm_pipe) { + odm_slice_src = resource_get_odm_slice_src_rect(odm_pipe); + + hwss_add_opp_set_disp_pattern_generator(seq_state, + odm_pipe->stream_res.opp, + test_pattern, + test_pattern_color_space, + stream->timing.display_color_depth, + black_color, + true, + odm_slice_src.width, + odm_slice_src.height, + odm_slice_src.x); + + odm_pipe = odm_pipe->next_odm_pipe; + } + + /* Set display pattern generator for final ODM pipe */ + odm_slice_src = resource_get_odm_slice_src_rect(odm_pipe); + + hwss_add_opp_set_disp_pattern_generator(seq_state, + odm_pipe->stream_res.opp, + test_pattern, + test_pattern_color_space, + stream->timing.display_color_depth, + black_color, + true, + odm_slice_src.width, + odm_slice_src.height, + odm_slice_src.x); + + /* Handle ABM level setting when not blanking */ + if (!blank) { + if (stream_res->abm) { + /* Set pipe for ABM */ + hwss_add_abm_set_pipe(seq_state, dc, pipe_ctx); + + /* Set ABM level */ + hwss_add_abm_set_level(seq_state, stream_res->abm, stream->abm_level); + } + } +} + +void dcn401_program_all_writeback_pipes_in_tree_sequence( + struct dc *dc, + const struct dc_stream_state *stream, + struct dc_state *context, + struct block_sequence_state *seq_state) +{ + struct dwbc *dwb; + int i_wb, i_pipe; + + if (!stream || stream->num_wb_info > dc->res_pool->res_cap->num_dwb) + return; + + /* For each writeback pipe */ + for (i_wb = 0; i_wb < stream->num_wb_info; i_wb++) { + /* Get direct pointer to writeback info */ + struct dc_writeback_info *wb_info = (struct dc_writeback_info *)&stream->writeback_info[i_wb]; + int mpcc_inst = -1; + + if (wb_info->wb_enabled) { + /* Get the MPCC instance for writeback_source_plane */ + for (i_pipe = 0; i_pipe < dc->res_pool->pipe_count; i_pipe++) { + struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i_pipe]; + + if (!pipe_ctx->plane_state) + continue; + + if (pipe_ctx->plane_state == wb_info->writeback_source_plane) { + mpcc_inst = pipe_ctx->plane_res.mpcc_inst; + break; + } + } + + if (mpcc_inst == -1) { + /* Disable writeback pipe and disconnect from MPCC + * if source plane has been removed + */ + dcn401_disable_writeback_sequence(dc, wb_info, seq_state); + continue; + } + + ASSERT(wb_info->dwb_pipe_inst < dc->res_pool->res_cap->num_dwb); + dwb = dc->res_pool->dwbc[wb_info->dwb_pipe_inst]; + + if (dwb->funcs->is_enabled(dwb)) { + /* Writeback pipe already enabled, only need to update */ + dcn401_update_writeback_sequence(dc, wb_info, context, seq_state); + } else { + /* Enable writeback pipe and connect to MPCC */ + dcn401_enable_writeback_sequence(dc, wb_info, context, mpcc_inst, seq_state); + } + } else { + /* Disable writeback pipe and disconnect from MPCC */ + dcn401_disable_writeback_sequence(dc, wb_info, seq_state); + } + } +} + +void dcn401_enable_writeback_sequence( + struct dc *dc, + struct dc_writeback_info *wb_info, + struct dc_state *context, + int mpcc_inst, + struct block_sequence_state *seq_state) +{ + struct dwbc *dwb; + struct mcif_wb *mcif_wb; + + if (!wb_info->wb_enabled || wb_info->dwb_pipe_inst >= dc->res_pool->res_cap->num_dwb) + return; + + dwb = dc->res_pool->dwbc[wb_info->dwb_pipe_inst]; + mcif_wb = dc->res_pool->mcif_wb[wb_info->dwb_pipe_inst]; + + /* Update DWBC with new parameters */ + hwss_add_dwbc_update(seq_state, dwb, &wb_info->dwb_params); + + /* Configure MCIF_WB buffer settings */ + hwss_add_mcif_wb_config_buf(seq_state, mcif_wb, &wb_info->mcif_buf_params, wb_info->dwb_params.dest_height); + + /* Configure MCIF_WB arbitration */ + hwss_add_mcif_wb_config_arb(seq_state, mcif_wb, &context->bw_ctx.bw.dcn.bw_writeback.mcif_wb_arb[wb_info->dwb_pipe_inst]); + + /* Enable MCIF_WB */ + hwss_add_mcif_wb_enable(seq_state, mcif_wb); + + /* Set DWB MUX to connect writeback to MPCC */ + hwss_add_mpc_set_dwb_mux(seq_state, dc->res_pool->mpc, wb_info->dwb_pipe_inst, mpcc_inst); + + /* Enable DWBC */ + hwss_add_dwbc_enable(seq_state, dwb, &wb_info->dwb_params); +} + +void dcn401_disable_writeback_sequence( + struct dc *dc, + struct dc_writeback_info *wb_info, + struct block_sequence_state *seq_state) +{ + struct dwbc *dwb; + struct mcif_wb *mcif_wb; + + if (wb_info->dwb_pipe_inst >= dc->res_pool->res_cap->num_dwb) + return; + + dwb = dc->res_pool->dwbc[wb_info->dwb_pipe_inst]; + mcif_wb = dc->res_pool->mcif_wb[wb_info->dwb_pipe_inst]; + + /* Disable DWBC */ + hwss_add_dwbc_disable(seq_state, dwb); + + /* Disable DWB MUX */ + hwss_add_mpc_disable_dwb_mux(seq_state, dc->res_pool->mpc, wb_info->dwb_pipe_inst); + + /* Disable MCIF_WB */ + hwss_add_mcif_wb_disable(seq_state, mcif_wb); +} + +void dcn401_update_writeback_sequence( + struct dc *dc, + struct dc_writeback_info *wb_info, + struct dc_state *context, + struct block_sequence_state *seq_state) +{ + struct dwbc *dwb; + struct mcif_wb *mcif_wb; + + if (!wb_info->wb_enabled || wb_info->dwb_pipe_inst >= dc->res_pool->res_cap->num_dwb) + return; + + dwb = dc->res_pool->dwbc[wb_info->dwb_pipe_inst]; + mcif_wb = dc->res_pool->mcif_wb[wb_info->dwb_pipe_inst]; + + /* Update writeback pipe */ + hwss_add_dwbc_update(seq_state, dwb, &wb_info->dwb_params); + + /* Update MCIF_WB buffer settings if needed */ + hwss_add_mcif_wb_config_buf(seq_state, mcif_wb, &wb_info->mcif_buf_params, wb_info->dwb_params.dest_height); +} + +static int find_free_gsl_group(const struct dc *dc) +{ + if (dc->res_pool->gsl_groups.gsl_0 == 0) + return 1; + if (dc->res_pool->gsl_groups.gsl_1 == 0) + return 2; + if (dc->res_pool->gsl_groups.gsl_2 == 0) + return 3; + + return 0; +} + +void dcn401_setup_gsl_group_as_lock_sequence( + const struct dc *dc, + struct pipe_ctx *pipe_ctx, + bool enable, + struct block_sequence_state *seq_state) +{ + struct gsl_params gsl; + int group_idx; + + memset(&gsl, 0, sizeof(struct gsl_params)); + + if (enable) { + /* return if group already assigned since GSL was set up + * for vsync flip, we would unassign so it can't be "left over" + */ + if (pipe_ctx->stream_res.gsl_group > 0) + return; + + group_idx = find_free_gsl_group(dc); + ASSERT(group_idx != 0); + pipe_ctx->stream_res.gsl_group = group_idx; + + /* set gsl group reg field and mark resource used */ + switch (group_idx) { + case 1: + gsl.gsl0_en = 1; + dc->res_pool->gsl_groups.gsl_0 = 1; + break; + case 2: + gsl.gsl1_en = 1; + dc->res_pool->gsl_groups.gsl_1 = 1; + break; + case 3: + gsl.gsl2_en = 1; + dc->res_pool->gsl_groups.gsl_2 = 1; + break; + default: + BREAK_TO_DEBUGGER(); + return; // invalid case + } + gsl.gsl_master_en = 1; + } else { + group_idx = pipe_ctx->stream_res.gsl_group; + if (group_idx == 0) + return; // if not in use, just return + + pipe_ctx->stream_res.gsl_group = 0; + + /* unset gsl group reg field and mark resource free */ + switch (group_idx) { + case 1: + gsl.gsl0_en = 0; + dc->res_pool->gsl_groups.gsl_0 = 0; + break; + case 2: + gsl.gsl1_en = 0; + dc->res_pool->gsl_groups.gsl_1 = 0; + break; + case 3: + gsl.gsl2_en = 0; + dc->res_pool->gsl_groups.gsl_2 = 0; + break; + default: + BREAK_TO_DEBUGGER(); + return; + } + gsl.gsl_master_en = 0; + } + + hwss_add_tg_set_gsl(seq_state, pipe_ctx->stream_res.tg, gsl); + hwss_add_tg_set_gsl_source_select(seq_state, pipe_ctx->stream_res.tg, group_idx, enable ? 4 : 0); +} + +void dcn401_disable_plane_sequence( + struct dc *dc, + struct dc_state *state, + struct pipe_ctx *pipe_ctx, + struct block_sequence_state *seq_state) +{ + bool is_phantom = dc_state_get_pipe_subvp_type(state, pipe_ctx) == SUBVP_PHANTOM; + struct timing_generator *tg = is_phantom ? pipe_ctx->stream_res.tg : NULL; + + if (!pipe_ctx->plane_res.hubp || pipe_ctx->plane_res.hubp->power_gated) + return; + + /* Wait for MPCC disconnect */ + if (dc->hwss.wait_for_mpcc_disconnect_sequence) + dc->hwss.wait_for_mpcc_disconnect_sequence(dc, dc->res_pool, pipe_ctx, seq_state); + + /* In flip immediate with pipe splitting case GSL is used for synchronization + * so we must disable it when the plane is disabled. + */ + if (pipe_ctx->stream_res.gsl_group != 0) + dcn401_setup_gsl_group_as_lock_sequence(dc, pipe_ctx, false, seq_state); + + /* Update HUBP mall sel */ + if (pipe_ctx->plane_res.hubp && pipe_ctx->plane_res.hubp->funcs->hubp_update_mall_sel) + hwss_add_hubp_update_mall_sel(seq_state, pipe_ctx->plane_res.hubp, 0, false); + + /* Set flip control GSL */ + hwss_add_hubp_set_flip_control_gsl(seq_state, pipe_ctx->plane_res.hubp, false); + + /* HUBP clock control */ + hwss_add_hubp_clk_cntl(seq_state, pipe_ctx->plane_res.hubp, false); + + /* DPP clock control */ + hwss_add_dpp_dppclk_control(seq_state, pipe_ctx->plane_res.dpp, false, false); + + /* Plane atomic power down */ + if (dc->hwseq->funcs.plane_atomic_power_down_sequence) + dc->hwseq->funcs.plane_atomic_power_down_sequence(dc, pipe_ctx->plane_res.dpp, + pipe_ctx->plane_res.hubp, seq_state); + + pipe_ctx->stream = NULL; + memset(&pipe_ctx->stream_res, 0, sizeof(pipe_ctx->stream_res)); + memset(&pipe_ctx->plane_res, 0, sizeof(pipe_ctx->plane_res)); + pipe_ctx->top_pipe = NULL; + pipe_ctx->bottom_pipe = NULL; + pipe_ctx->prev_odm_pipe = NULL; + pipe_ctx->next_odm_pipe = NULL; + pipe_ctx->plane_state = NULL; + + /* Turn back off the phantom OTG after the phantom plane is fully disabled */ + if (is_phantom && tg && tg->funcs->disable_phantom_crtc) + hwss_add_disable_phantom_crtc(seq_state, tg); +} + +void dcn401_post_unlock_reset_opp_sequence( + struct dc *dc, + struct pipe_ctx *opp_head, + struct block_sequence_state *seq_state) +{ + struct display_stream_compressor *dsc = opp_head->stream_res.dsc; + struct dccg *dccg = dc->res_pool->dccg; + + /* Wait for all DPP pipes in current mpc blending tree completes double + * buffered disconnection before resetting OPP + */ + if (dc->hwss.wait_for_mpcc_disconnect_sequence) + dc->hwss.wait_for_mpcc_disconnect_sequence(dc, dc->res_pool, opp_head, seq_state); + + if (dsc) { + bool *is_ungated = NULL; + /* Check DSC power gate status */ + if (dc->hwseq && dc->hwseq->funcs.dsc_pg_status) + hwss_add_dsc_pg_status(seq_state, dc->hwseq, dsc->inst, false); + + /* Seamless update specific where we will postpone non + * double buffered DSCCLK disable logic in post unlock + * sequence after DSC is disconnected from OPP but not + * yet power gated. + */ + + /* DSC wait disconnect pending clear */ + hwss_add_dsc_wait_disconnect_pending_clear(seq_state, dsc, is_ungated); + + /* DSC disable */ + hwss_add_dsc_disable(seq_state, dsc, is_ungated); + + /* Set reference DSCCLK */ + if (dccg && dccg->funcs->set_ref_dscclk) + hwss_add_dccg_set_ref_dscclk(seq_state, dccg, dsc->inst, 0); + } +} + +void dcn401_dc_ip_request_cntl(struct dc *dc, bool enable) +{ + struct dce_hwseq *hws = dc->hwseq; + + if (REG(DC_IP_REQUEST_CNTL)) + REG_SET(DC_IP_REQUEST_CNTL, 0, IP_REQUEST_EN, enable ? 1 : 0); +} + +void dcn401_enable_plane_sequence(struct dc *dc, struct pipe_ctx *pipe_ctx, + struct dc_state *context, + struct block_sequence_state *seq_state) +{ + struct dce_hwseq *hws = dc->hwseq; + uint32_t org_ip_request_cntl = 0; + + if (!pipe_ctx->plane_res.dpp || !pipe_ctx->plane_res.hubp || !pipe_ctx->stream_res.opp) + return; + + if (REG(DC_IP_REQUEST_CNTL)) + REG_GET(DC_IP_REQUEST_CNTL, IP_REQUEST_EN, &org_ip_request_cntl); + + /* Step 1: DPP root clock control - enable clock */ + if (hws->funcs.dpp_root_clock_control) + hwss_add_dpp_root_clock_control(seq_state, hws, pipe_ctx->plane_res.dpp->inst, true); + + /* Step 2: Enable DC IP request (if needed) */ + if (hws->funcs.dc_ip_request_cntl) + hwss_add_dc_ip_request_cntl(seq_state, dc, true); + + /* Step 3: DPP power gating control - power on */ + if (REG(DC_IP_REQUEST_CNTL) && hws->funcs.dpp_pg_control) + hwss_add_dpp_pg_control(seq_state, hws, pipe_ctx->plane_res.dpp->inst, true); + + /* Step 4: HUBP power gating control - power on */ + if (REG(DC_IP_REQUEST_CNTL) && hws->funcs.hubp_pg_control) + hwss_add_hubp_pg_control(seq_state, hws, pipe_ctx->plane_res.hubp->inst, true); + + /* Step 5: Disable DC IP request (restore state) */ + if (org_ip_request_cntl == 0 && hws->funcs.dc_ip_request_cntl) + hwss_add_dc_ip_request_cntl(seq_state, dc, false); + + /* Step 6: HUBP clock control - enable DCFCLK */ + if (pipe_ctx->plane_res.hubp->funcs->hubp_clk_cntl) + hwss_add_hubp_clk_cntl(seq_state, pipe_ctx->plane_res.hubp, true); + + /* Step 7: HUBP initialization */ + if (pipe_ctx->plane_res.hubp->funcs->hubp_init) + hwss_add_hubp_init(seq_state, pipe_ctx->plane_res.hubp); + + /* Step 8: OPP pipe clock control - enable */ + if (pipe_ctx->stream_res.opp->funcs->opp_pipe_clock_control) + hwss_add_opp_pipe_clock_control(seq_state, pipe_ctx->stream_res.opp, true); + + /* Step 9: VM system aperture settings */ + if (dc->vm_pa_config.valid && pipe_ctx->plane_res.hubp->funcs->hubp_set_vm_system_aperture_settings) { + hwss_add_hubp_set_vm_system_aperture_settings(seq_state, pipe_ctx->plane_res.hubp, 0, + dc->vm_pa_config.system_aperture.start_addr, dc->vm_pa_config.system_aperture.end_addr); + } + + /* Step 10: Flip interrupt setup */ + if (!pipe_ctx->top_pipe + && pipe_ctx->plane_state + && pipe_ctx->plane_state->flip_int_enabled + && pipe_ctx->plane_res.hubp->funcs->hubp_set_flip_int) { + hwss_add_hubp_set_flip_int(seq_state, pipe_ctx->plane_res.hubp); + } +} + +void dcn401_update_dchubp_dpp_sequence(struct dc *dc, + struct pipe_ctx *pipe_ctx, + struct dc_state *context, + struct block_sequence_state *seq_state) +{ + struct dce_hwseq *hws = dc->hwseq; + struct hubp *hubp = pipe_ctx->plane_res.hubp; + struct dpp *dpp = pipe_ctx->plane_res.dpp; + struct dc_plane_state *plane_state = pipe_ctx->plane_state; + struct dccg *dccg = dc->res_pool->dccg; + bool viewport_changed = false; + enum mall_stream_type pipe_mall_type = dc_state_get_pipe_subvp_type(context, pipe_ctx); + + if (!hubp || !dpp || !plane_state) + return; + + /* Step 1: DPP DPPCLK control */ + if (pipe_ctx->update_flags.bits.dppclk) + hwss_add_dpp_dppclk_control(seq_state, dpp, false, true); + + /* Step 2: DCCG update DPP DTO */ + if (pipe_ctx->update_flags.bits.enable) + hwss_add_dccg_update_dpp_dto(seq_state, dccg, dpp->inst, pipe_ctx->plane_res.bw.dppclk_khz); + + /* Step 3: HUBP VTG selection */ + if (pipe_ctx->update_flags.bits.hubp_rq_dlg_ttu) { + hwss_add_hubp_vtg_sel(seq_state, hubp, pipe_ctx->stream_res.tg->inst); + + /* Step 4: HUBP setup (choose setup2 or setup) */ + if (hubp->funcs->hubp_setup2) { + hwss_add_hubp_setup2(seq_state, hubp, &pipe_ctx->hubp_regs, + &pipe_ctx->global_sync, &pipe_ctx->stream->timing); + } else if (hubp->funcs->hubp_setup) { + hwss_add_hubp_setup(seq_state, hubp, &pipe_ctx->dlg_regs, + &pipe_ctx->ttu_regs, &pipe_ctx->rq_regs, &pipe_ctx->pipe_dlg_param); + } + } + + /* Step 5: Set unbounded requesting */ + if (pipe_ctx->update_flags.bits.unbounded_req && hubp->funcs->set_unbounded_requesting) + hwss_add_hubp_set_unbounded_requesting(seq_state, hubp, pipe_ctx->unbounded_req); + + /* Step 6: HUBP interdependent setup */ + if (pipe_ctx->update_flags.bits.hubp_interdependent) { + if (hubp->funcs->hubp_setup_interdependent2) + hwss_add_hubp_setup_interdependent2(seq_state, hubp, &pipe_ctx->hubp_regs); + else if (hubp->funcs->hubp_setup_interdependent) + hwss_add_hubp_setup_interdependent(seq_state, hubp, &pipe_ctx->dlg_regs, &pipe_ctx->ttu_regs); + } + + /* Step 7: DPP setup - input CSC and format setup */ + if (pipe_ctx->update_flags.bits.enable || + pipe_ctx->update_flags.bits.plane_changed || + plane_state->update_flags.bits.bpp_change || + plane_state->update_flags.bits.input_csc_change || + plane_state->update_flags.bits.color_space_change || + plane_state->update_flags.bits.coeff_reduction_change) { + hwss_add_dpp_setup_dpp(seq_state, pipe_ctx); + + /* Step 8: DPP cursor matrix setup */ + if (dpp->funcs->set_cursor_matrix) { + hwss_add_dpp_set_cursor_matrix(seq_state, dpp, plane_state->color_space, + &plane_state->cursor_csc_color_matrix); + } + + /* Step 9: DPP program bias and scale */ + if (dpp->funcs->dpp_program_bias_and_scale) + hwss_add_dpp_program_bias_and_scale(seq_state, pipe_ctx); + } + + /* Step 10: MPCC updates */ + if (pipe_ctx->update_flags.bits.mpcc || + pipe_ctx->update_flags.bits.plane_changed || + plane_state->update_flags.bits.global_alpha_change || + plane_state->update_flags.bits.per_pixel_alpha_change) { + + /* Check if update_mpcc_sequence is implemented and prefer it over single MPC_UPDATE_MPCC step */ + if (hws->funcs.update_mpcc_sequence) + hws->funcs.update_mpcc_sequence(dc, pipe_ctx, seq_state); + } + + /* Step 11: DPP scaler setup */ + if (pipe_ctx->update_flags.bits.scaler || + plane_state->update_flags.bits.scaling_change || + plane_state->update_flags.bits.position_change || + plane_state->update_flags.bits.per_pixel_alpha_change || + pipe_ctx->stream->update_flags.bits.scaling) { + pipe_ctx->plane_res.scl_data.lb_params.alpha_en = pipe_ctx->plane_state->per_pixel_alpha; + ASSERT(pipe_ctx->plane_res.scl_data.lb_params.depth == LB_PIXEL_DEPTH_36BPP); + hwss_add_dpp_set_scaler(seq_state, pipe_ctx->plane_res.dpp, &pipe_ctx->plane_res.scl_data); + } + + /* Step 12: HUBP viewport programming */ + if (pipe_ctx->update_flags.bits.viewport || + (context == dc->current_state && plane_state->update_flags.bits.position_change) || + (context == dc->current_state && plane_state->update_flags.bits.scaling_change) || + (context == dc->current_state && pipe_ctx->stream->update_flags.bits.scaling)) { + hwss_add_hubp_mem_program_viewport(seq_state, hubp, + &pipe_ctx->plane_res.scl_data.viewport, &pipe_ctx->plane_res.scl_data.viewport_c); + viewport_changed = true; + } + + /* Step 13: HUBP program mcache if available */ + if (hubp->funcs->hubp_program_mcache_id_and_split_coordinate) + hwss_add_hubp_program_mcache_id(seq_state, hubp, &pipe_ctx->mcache_regs); + + /* Step 14: Cursor attribute setup */ + if ((pipe_ctx->update_flags.bits.enable || pipe_ctx->update_flags.bits.opp_changed || + pipe_ctx->update_flags.bits.scaler || viewport_changed == true) && + pipe_ctx->stream->cursor_attributes.address.quad_part != 0) { + + hwss_add_abort_cursor_offload_update(seq_state, dc, pipe_ctx); + + hwss_add_set_cursor_attribute(seq_state, dc, pipe_ctx); + + /* Step 15: Cursor position setup */ + hwss_add_set_cursor_position(seq_state, dc, pipe_ctx); + + /* Step 16: Cursor SDR white level */ + if (dc->hwss.set_cursor_sdr_white_level) + hwss_add_set_cursor_sdr_white_level(seq_state, dc, pipe_ctx); + } + + /* Step 17: Gamut remap and output CSC */ + if (pipe_ctx->update_flags.bits.enable || pipe_ctx->update_flags.bits.opp_changed || + pipe_ctx->update_flags.bits.plane_changed || + pipe_ctx->stream->update_flags.bits.gamut_remap || + plane_state->update_flags.bits.gamut_remap_change || + pipe_ctx->stream->update_flags.bits.out_csc) { + + /* Gamut remap */ + hwss_add_dpp_program_gamut_remap(seq_state, pipe_ctx); + + /* Output CSC */ + hwss_add_program_output_csc(seq_state, dc, pipe_ctx, pipe_ctx->stream->output_color_space, + pipe_ctx->stream->csc_color_matrix.matrix, hubp->opp_id); + } + + /* Step 18: HUBP surface configuration */ + if (pipe_ctx->update_flags.bits.enable || + pipe_ctx->update_flags.bits.plane_changed || + pipe_ctx->update_flags.bits.opp_changed || + plane_state->update_flags.bits.pixel_format_change || + plane_state->update_flags.bits.horizontal_mirror_change || + plane_state->update_flags.bits.rotation_change || + plane_state->update_flags.bits.swizzle_change || + plane_state->update_flags.bits.dcc_change || + plane_state->update_flags.bits.bpp_change || + plane_state->update_flags.bits.scaling_change || + plane_state->update_flags.bits.plane_size_change) { + struct plane_size size = plane_state->plane_size; + + size.surface_size = pipe_ctx->plane_res.scl_data.viewport; + hwss_add_hubp_program_surface_config(seq_state, hubp, + plane_state->format, &plane_state->tiling_info, size, + plane_state->rotation, &plane_state->dcc, + plane_state->horizontal_mirror, 0); + hubp->power_gated = false; + } + + /* Step 19: Update plane address (with SubVP support) */ + if (pipe_ctx->update_flags.bits.enable || + pipe_ctx->update_flags.bits.plane_changed || + plane_state->update_flags.bits.addr_update) { + + /* SubVP save surface address if needed */ + if (resource_is_pipe_type(pipe_ctx, OTG_MASTER) && pipe_mall_type == SUBVP_MAIN) { + hwss_add_dmub_subvp_save_surf_addr(seq_state, dc->ctx->dmub_srv, + &pipe_ctx->plane_state->address, pipe_ctx->subvp_index); + } + + /* Update plane address */ + hwss_add_hubp_update_plane_addr(seq_state, dc, pipe_ctx); + } + + /* Step 20: HUBP set blank - enable plane */ + if (pipe_ctx->update_flags.bits.enable) + hwss_add_hubp_set_blank(seq_state, hubp, false); + + /* Step 21: Phantom HUBP post enable */ + if (pipe_mall_type == SUBVP_PHANTOM && hubp->funcs->phantom_hubp_post_enable) + hwss_add_phantom_hubp_post_enable(seq_state, hubp); +} + +void dcn401_update_mpcc_sequence(struct dc *dc, + struct pipe_ctx *pipe_ctx, + struct block_sequence_state *seq_state) +{ + struct hubp *hubp = pipe_ctx->plane_res.hubp; + struct mpcc_blnd_cfg blnd_cfg = {0}; + bool per_pixel_alpha; + int mpcc_id; + struct mpcc *new_mpcc; + struct mpc *mpc = dc->res_pool->mpc; + struct mpc_tree *mpc_tree_params = &(pipe_ctx->stream_res.opp->mpc_tree_params); + + if (!hubp || !pipe_ctx->plane_state) + return; + + per_pixel_alpha = pipe_ctx->plane_state->per_pixel_alpha; + + /* Initialize blend configuration */ + blnd_cfg.overlap_only = false; + blnd_cfg.global_gain = 0xff; + + if (per_pixel_alpha) { + blnd_cfg.pre_multiplied_alpha = pipe_ctx->plane_state->pre_multiplied_alpha; + if (pipe_ctx->plane_state->global_alpha) { + blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_PER_PIXEL_ALPHA_COMBINED_GLOBAL_GAIN; + blnd_cfg.global_gain = pipe_ctx->plane_state->global_alpha_value; + } else { + blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_PER_PIXEL_ALPHA; + } + } else { + blnd_cfg.pre_multiplied_alpha = false; + blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_GLOBAL_ALPHA; + } + + if (pipe_ctx->plane_state->global_alpha) + blnd_cfg.global_alpha = pipe_ctx->plane_state->global_alpha_value; + else + blnd_cfg.global_alpha = 0xff; + + blnd_cfg.background_color_bpc = 4; + blnd_cfg.bottom_gain_mode = 0; + blnd_cfg.top_gain = 0x1f000; + blnd_cfg.bottom_inside_gain = 0x1f000; + blnd_cfg.bottom_outside_gain = 0x1f000; + + if (pipe_ctx->plane_state->format == SURFACE_PIXEL_FORMAT_GRPH_RGBE_ALPHA) + blnd_cfg.pre_multiplied_alpha = false; + + /* MPCC instance is equal to HUBP instance */ + mpcc_id = hubp->inst; + + /* Step 1: Update blending if no full update needed */ + if (!pipe_ctx->plane_state->update_flags.bits.full_update && + !pipe_ctx->update_flags.bits.mpcc) { + + /* Update blending configuration */ + hwss_add_mpc_update_blending(seq_state, mpc, blnd_cfg, mpcc_id); + + /* Update visual confirm color */ + hwss_add_mpc_update_visual_confirm(seq_state, dc, pipe_ctx, mpcc_id); + return; + } + + /* Step 2: Get existing MPCC for DPP */ + new_mpcc = mpc->funcs->get_mpcc_for_dpp(mpc_tree_params, mpcc_id); + + /* Step 3: Remove MPCC if being used */ + if (new_mpcc != NULL) { + hwss_add_mpc_remove_mpcc(seq_state, mpc, mpc_tree_params, new_mpcc); + } else { + /* Step 4: Assert MPCC idle (debug only) */ + if (dc->debug.sanity_checks) + hwss_add_mpc_assert_idle_mpcc(seq_state, mpc, mpcc_id); + } + + /* Step 5: Insert new plane into MPC tree */ + hwss_add_mpc_insert_plane(seq_state, mpc, mpc_tree_params, blnd_cfg, NULL, NULL, hubp->inst, mpcc_id); + + /* Step 6: Update visual confirm color */ + hwss_add_mpc_update_visual_confirm(seq_state, dc, pipe_ctx, mpcc_id); + + /* Step 7: Set HUBP OPP and MPCC IDs */ + hubp->opp_id = pipe_ctx->stream_res.opp->inst; + hubp->mpcc_id = mpcc_id; +} + +static struct hubp *get_hubp_by_inst(struct resource_pool *res_pool, int mpcc_inst) +{ + int i; + + for (i = 0; i < res_pool->pipe_count; i++) { + if (res_pool->hubps[i]->inst == mpcc_inst) + return res_pool->hubps[i]; + } + ASSERT(false); + return NULL; +} + +void dcn401_wait_for_mpcc_disconnect_sequence( + struct dc *dc, + struct resource_pool *res_pool, + struct pipe_ctx *pipe_ctx, + struct block_sequence_state *seq_state) +{ + int mpcc_inst; + + if (dc->debug.sanity_checks) + dc->hwseq->funcs.verify_allow_pstate_change_high_sequence(dc, seq_state); + + if (!pipe_ctx->stream_res.opp) + return; + + for (mpcc_inst = 0; mpcc_inst < MAX_PIPES; mpcc_inst++) { + if (pipe_ctx->stream_res.opp->mpcc_disconnect_pending[mpcc_inst]) { + struct hubp *hubp = get_hubp_by_inst(res_pool, mpcc_inst); + + if (pipe_ctx->stream_res.tg && + pipe_ctx->stream_res.tg->funcs->is_tg_enabled(pipe_ctx->stream_res.tg)) { + hwss_add_mpc_assert_idle_mpcc(seq_state, res_pool->mpc, mpcc_inst); + } + pipe_ctx->stream_res.opp->mpcc_disconnect_pending[mpcc_inst] = false; + if (hubp) + hwss_add_hubp_set_blank(seq_state, hubp, true); + } + } + + if (dc->debug.sanity_checks) + dc->hwseq->funcs.verify_allow_pstate_change_high_sequence(dc, seq_state); +} + +void dcn401_setup_vupdate_interrupt_sequence(struct dc *dc, struct pipe_ctx *pipe_ctx, + struct block_sequence_state *seq_state) +{ + struct timing_generator *tg = pipe_ctx->stream_res.tg; + int start_line = dc->hwss.get_vupdate_offset_from_vsync(pipe_ctx); + + if (start_line < 0) + start_line = 0; + + if (tg->funcs->setup_vertical_interrupt2) + hwss_add_tg_setup_vertical_interrupt2(seq_state, tg, start_line); +} + +void dcn401_set_hdr_multiplier_sequence(struct pipe_ctx *pipe_ctx, + struct block_sequence_state *seq_state) +{ + struct fixed31_32 multiplier = pipe_ctx->plane_state->hdr_mult; + uint32_t hw_mult = 0x1f000; // 1.0 default multiplier + struct custom_float_format fmt; + + fmt.exponenta_bits = 6; + fmt.mantissa_bits = 12; + fmt.sign = true; + + if (!dc_fixpt_eq(multiplier, dc_fixpt_from_int(0))) // check != 0 + convert_to_custom_float_format(multiplier, &fmt, &hw_mult); + + hwss_add_dpp_set_hdr_multiplier(seq_state, pipe_ctx->plane_res.dpp, hw_mult); +} + +void dcn401_program_mall_pipe_config_sequence(struct dc *dc, struct dc_state *context, + struct block_sequence_state *seq_state) +{ + int i; + unsigned int num_ways = dcn401_calculate_cab_allocation(dc, context); + bool cache_cursor = false; + + // Don't force p-state disallow -- can't block dummy p-state + + // Update MALL_SEL register for each pipe (break down update_mall_sel call) + for (i = 0; i < dc->res_pool->pipe_count; i++) { + struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; + struct hubp *hubp = pipe->plane_res.hubp; + + if (pipe->stream && pipe->plane_state && hubp && hubp->funcs->hubp_update_mall_sel) { + int cursor_size = hubp->curs_attr.pitch * hubp->curs_attr.height; + + switch (hubp->curs_attr.color_format) { + case CURSOR_MODE_MONO: + cursor_size /= 2; + break; + case CURSOR_MODE_COLOR_1BIT_AND: + case CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA: + case CURSOR_MODE_COLOR_UN_PRE_MULTIPLIED_ALPHA: + cursor_size *= 4; + break; + + case CURSOR_MODE_COLOR_64BIT_FP_PRE_MULTIPLIED: + case CURSOR_MODE_COLOR_64BIT_FP_UN_PRE_MULTIPLIED: + default: + cursor_size *= 8; + break; + } + + if (cursor_size > 16384) + cache_cursor = true; + + if (dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_PHANTOM) { + hwss_add_hubp_update_mall_sel(seq_state, hubp, 1, false); + } else { + // MALL not supported with Stereo3D + uint32_t mall_sel = (num_ways <= dc->caps.cache_num_ways && + pipe->stream->link->psr_settings.psr_version == DC_PSR_VERSION_UNSUPPORTED && + pipe->plane_state->address.type != PLN_ADDR_TYPE_GRPH_STEREO && + !pipe->plane_state->address.tmz_surface) ? 2 : 0; + hwss_add_hubp_update_mall_sel(seq_state, hubp, mall_sel, cache_cursor); + } + } + } + + // Program FORCE_ONE_ROW_FOR_FRAME and CURSOR_REQ_MODE for main subvp pipes + for (i = 0; i < dc->res_pool->pipe_count; i++) { + struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; + struct hubp *hubp = pipe->plane_res.hubp; + + if (pipe->stream && hubp && hubp->funcs->hubp_prepare_subvp_buffering) { + if (dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_MAIN) + hwss_add_hubp_prepare_subvp_buffering(seq_state, hubp, true); + } + } +} + +void dcn401_verify_allow_pstate_change_high_sequence(struct dc *dc, + struct block_sequence_state *seq_state) +{ + struct hubbub *hubbub = dc->res_pool->hubbub; + + if (!hubbub->funcs->verify_allow_pstate_change_high) + return; + + if (!hubbub->funcs->verify_allow_pstate_change_high(hubbub)) { + /* Attempt hardware workaround force recovery */ + dcn401_hw_wa_force_recovery_sequence(dc, seq_state); + } +} + +bool dcn401_hw_wa_force_recovery_sequence(struct dc *dc, + struct block_sequence_state *seq_state) +{ + struct hubp *hubp; + unsigned int i; + + if (!dc->debug.recovery_enabled) + return false; + + /* Step 1: Set HUBP_BLANK_EN=1 for all active pipes */ + for (i = 0; i < dc->res_pool->pipe_count; i++) { + struct pipe_ctx *pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i]; + + if (pipe_ctx != NULL) { + hubp = pipe_ctx->plane_res.hubp; + if (hubp != NULL && hubp->funcs->set_hubp_blank_en) + hwss_add_hubp_set_blank_en(seq_state, hubp, true); + } + } + + /* Step 2: DCHUBBUB_GLOBAL_SOFT_RESET=1 */ + hwss_add_hubbub_soft_reset(seq_state, dc->res_pool->hubbub, hubbub1_soft_reset, true); + + /* Step 3: Set HUBP_DISABLE=1 for all active pipes */ + for (i = 0; i < dc->res_pool->pipe_count; i++) { + struct pipe_ctx *pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i]; + + if (pipe_ctx != NULL) { + hubp = pipe_ctx->plane_res.hubp; + if (hubp != NULL && hubp->funcs->hubp_disable_control) + hwss_add_hubp_disable_control(seq_state, hubp, true); + } + } + + /* Step 4: Set HUBP_DISABLE=0 for all active pipes */ + for (i = 0; i < dc->res_pool->pipe_count; i++) { + struct pipe_ctx *pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i]; + + if (pipe_ctx != NULL) { + hubp = pipe_ctx->plane_res.hubp; + if (hubp != NULL && hubp->funcs->hubp_disable_control) + hwss_add_hubp_disable_control(seq_state, hubp, false); + } + } + + /* Step 5: DCHUBBUB_GLOBAL_SOFT_RESET=0 */ + hwss_add_hubbub_soft_reset(seq_state, dc->res_pool->hubbub, hubbub1_soft_reset, false); + + /* Step 6: Set HUBP_BLANK_EN=0 for all active pipes */ + for (i = 0; i < dc->res_pool->pipe_count; i++) { + struct pipe_ctx *pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i]; + + if (pipe_ctx != NULL) { + hubp = pipe_ctx->plane_res.hubp; + if (hubp != NULL && hubp->funcs->set_hubp_blank_en) + hwss_add_hubp_set_blank_en(seq_state, hubp, false); + } + } + + return true; +} diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.h b/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.h index 2621b7725267..f78162ab859b 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.h +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.h @@ -9,6 +9,7 @@ #include "dc.h" #include "dc_stream.h" #include "hw_sequencer_private.h" +#include "hwss/hw_sequencer.h" #include "dcn401/dcn401_dccg.h" struct dc; @@ -73,15 +74,17 @@ void dcn401_optimize_bandwidth( struct dc *dc, struct dc_state *context); -void dcn401_fams2_global_control_lock(struct dc *dc, +void dcn401_dmub_hw_control_lock(struct dc *dc, struct dc_state *context, bool lock); void dcn401_fams2_update_config(struct dc *dc, struct dc_state *context, bool enable); -void dcn401_fams2_global_control_lock_fast(union block_sequence_params *params); +void dcn401_dmub_hw_control_lock_fast(union block_sequence_params *params); void dcn401_unblank_stream(struct pipe_ctx *pipe_ctx, struct dc_link_settings *link_settings); void dcn401_hardware_release(struct dc *dc); void dcn401_update_odm(struct dc *dc, struct dc_state *context, struct pipe_ctx *otg_master); +void dcn401_update_odm_sequence(struct dc *dc, struct dc_state *context, + struct pipe_ctx *otg_master, struct block_sequence_state *seq_state); void adjust_hotspot_between_slices_for_2x_magnify(uint32_t cursor_width, struct dc_cursor_position *pos_cpy); void dcn401_wait_for_det_buffer_update_under_otg_master(struct dc *dc, struct dc_state *context, struct pipe_ctx *otg_master); void dcn401_interdependent_update_lock(struct dc *dc, struct dc_state *context, bool lock); @@ -97,6 +100,11 @@ void dcn401_program_pipe( struct dc *dc, struct pipe_ctx *pipe_ctx, struct dc_state *context); +void dcn401_program_pipe_sequence( + struct dc *dc, + struct pipe_ctx *pipe_ctx, + struct dc_state *context, + struct block_sequence_state *seq_state); void dcn401_perform_3dlut_wa_unlock(struct pipe_ctx *pipe_ctx); void dcn401_program_front_end_for_ctx(struct dc *dc, struct dc_state *context); void dcn401_post_unlock_program_front_end(struct dc *dc, struct dc_state *context); @@ -109,5 +117,97 @@ void dcn401_detect_pipe_changes( void dcn401_plane_atomic_power_down(struct dc *dc, struct dpp *dpp, struct hubp *hubp); +void dcn401_plane_atomic_power_down_sequence(struct dc *dc, + struct dpp *dpp, + struct hubp *hubp, + struct block_sequence_state *seq_state); +void dcn401_plane_atomic_disconnect_sequence(struct dc *dc, + struct dc_state *state, + struct pipe_ctx *pipe_ctx, + struct block_sequence_state *seq_state); +void dcn401_blank_pixel_data_sequence( + struct dc *dc, + struct pipe_ctx *pipe_ctx, + bool blank, + struct block_sequence_state *seq_state); void dcn401_initialize_min_clocks(struct dc *dc); +void dcn401_update_cursor_offload_pipe(struct dc *dc, const struct pipe_ctx *pipe); + +void dcn401_program_all_writeback_pipes_in_tree_sequence( + struct dc *dc, + const struct dc_stream_state *stream, + struct dc_state *context, + struct block_sequence_state *seq_state); + +void dcn401_enable_writeback_sequence( + struct dc *dc, + struct dc_writeback_info *wb_info, + struct dc_state *context, + int mpcc_inst, + struct block_sequence_state *seq_state); + +void dcn401_disable_writeback_sequence( + struct dc *dc, + struct dc_writeback_info *wb_info, + struct block_sequence_state *seq_state); + +void dcn401_update_writeback_sequence( + struct dc *dc, + struct dc_writeback_info *wb_info, + struct dc_state *context, + struct block_sequence_state *seq_state); + +void dcn401_setup_gsl_group_as_lock_sequence( + const struct dc *dc, + struct pipe_ctx *pipe_ctx, + bool enable, + struct block_sequence_state *seq_state); + +void dcn401_disable_plane_sequence( + struct dc *dc, + struct dc_state *state, + struct pipe_ctx *pipe_ctx, + struct block_sequence_state *seq_state); + +void dcn401_post_unlock_reset_opp_sequence( + struct dc *dc, + struct pipe_ctx *opp_head, + struct block_sequence_state *seq_state); + +void dcn401_dc_ip_request_cntl(struct dc *dc, bool enable); + +void dcn401_enable_plane_sequence(struct dc *dc, struct pipe_ctx *pipe_ctx, + struct dc_state *context, + struct block_sequence_state *seq_state); + +void dcn401_update_dchubp_dpp_sequence(struct dc *dc, + struct pipe_ctx *pipe_ctx, + struct dc_state *context, + struct block_sequence_state *seq_state); + +void dcn401_update_mpcc_sequence(struct dc *dc, + struct pipe_ctx *pipe_ctx, + struct block_sequence_state *seq_state); + +void dcn401_wait_for_mpcc_disconnect_sequence( + struct dc *dc, + struct resource_pool *res_pool, + struct pipe_ctx *pipe_ctx, + struct block_sequence_state *seq_state); + +void dcn401_setup_vupdate_interrupt_sequence(struct dc *dc, struct pipe_ctx *pipe_ctx, + struct block_sequence_state *seq_state); + +void dcn401_set_hdr_multiplier_sequence(struct pipe_ctx *pipe_ctx, + struct block_sequence_state *seq_state); + +void dcn401_program_mall_pipe_config_sequence(struct dc *dc, struct dc_state *context, + struct block_sequence_state *seq_state); + +void dcn401_verify_allow_pstate_change_high_sequence(struct dc *dc, + struct block_sequence_state *seq_state); + +bool dcn401_hw_wa_force_recovery_sequence(struct dc *dc, + struct block_sequence_state *seq_state); + #endif /* __DC_HWSS_DCN401_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_init.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_init.c index d6e11b7e4fce..162096ce0bdf 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_init.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_init.c @@ -9,6 +9,7 @@ #include "dcn30/dcn30_hwseq.h" #include "dcn31/dcn31_hwseq.h" #include "dcn32/dcn32_hwseq.h" +#include "dcn35/dcn35_hwseq.h" #include "dcn401/dcn401_hwseq.h" #include "dcn401_init.h" @@ -38,6 +39,7 @@ static const struct hw_sequencer_funcs dcn401_funcs = { .enable_audio_stream = dce110_enable_audio_stream, .disable_audio_stream = dce110_disable_audio_stream, .disable_plane = dcn20_disable_plane, + .disable_plane_sequence = dcn401_disable_plane_sequence, .pipe_control_lock = dcn20_pipe_control_lock, .interdependent_update_lock = dcn401_interdependent_update_lock, .cursor_lock = dcn10_cursor_lock, @@ -53,6 +55,7 @@ static const struct hw_sequencer_funcs dcn401_funcs = { .get_hw_state = dcn10_get_hw_state, .clear_status_bits = dcn10_clear_status_bits, .wait_for_mpcc_disconnect = dcn10_wait_for_mpcc_disconnect, + .wait_for_mpcc_disconnect_sequence = dcn401_wait_for_mpcc_disconnect_sequence, .edp_backlight_control = dce110_edp_backlight_control, .edp_power_control = dce110_edp_power_control, .edp_wait_for_hpd_ready = dce110_edp_wait_for_hpd_ready, @@ -60,6 +63,12 @@ static const struct hw_sequencer_funcs dcn401_funcs = { .set_cursor_position = dcn401_set_cursor_position, .set_cursor_attribute = dcn10_set_cursor_attribute, .set_cursor_sdr_white_level = dcn10_set_cursor_sdr_white_level, + .abort_cursor_offload_update = dcn35_abort_cursor_offload_update, + .begin_cursor_offload_update = dcn35_begin_cursor_offload_update, + .commit_cursor_offload_update = dcn35_commit_cursor_offload_update, + .update_cursor_offload_pipe = dcn401_update_cursor_offload_pipe, + .notify_cursor_offload_drr_update = dcn35_notify_cursor_offload_drr_update, + .program_cursor_offload_now = dcn35_program_cursor_offload_now, .setup_periodic_interrupt = dcn10_setup_periodic_interrupt, .set_clock = dcn10_set_clock, .get_clock = dcn10_get_clock, @@ -95,55 +104,70 @@ static const struct hw_sequencer_funcs dcn401_funcs = { .apply_update_flags_for_phantom = dcn32_apply_update_flags_for_phantom, .wait_for_dcc_meta_propagation = dcn401_wait_for_dcc_meta_propagation, .is_pipe_topology_transition_seamless = dcn32_is_pipe_topology_transition_seamless, - .fams2_global_control_lock = dcn401_fams2_global_control_lock, + .dmub_hw_control_lock = dcn401_dmub_hw_control_lock, .fams2_update_config = dcn401_fams2_update_config, - .fams2_global_control_lock_fast = dcn401_fams2_global_control_lock_fast, + .dmub_hw_control_lock_fast = dcn401_dmub_hw_control_lock_fast, .program_outstanding_updates = dcn401_program_outstanding_updates, .wait_for_all_pending_updates = dcn30_wait_for_all_pending_updates, .detect_pipe_changes = dcn401_detect_pipe_changes, .enable_plane = dcn20_enable_plane, + .enable_plane_sequence = dcn401_enable_plane_sequence, .update_dchubp_dpp = dcn20_update_dchubp_dpp, + .update_dchubp_dpp_sequence = dcn401_update_dchubp_dpp_sequence, .post_unlock_reset_opp = dcn20_post_unlock_reset_opp, + .post_unlock_reset_opp_sequence = dcn401_post_unlock_reset_opp_sequence, .get_underflow_debug_data = dcn30_get_underflow_debug_data, }; static const struct hwseq_private_funcs dcn401_private_funcs = { .init_pipes = dcn10_init_pipes, .plane_atomic_disconnect = dcn10_plane_atomic_disconnect, + .plane_atomic_disconnect_sequence = dcn401_plane_atomic_disconnect_sequence, .update_mpcc = dcn20_update_mpcc, + .update_mpcc_sequence = dcn401_update_mpcc_sequence, .set_input_transfer_func = dcn32_set_input_transfer_func, .set_output_transfer_func = dcn401_set_output_transfer_func, .power_down = dce110_power_down, .enable_display_power_gating = dcn10_dummy_display_power_gating, .blank_pixel_data = dcn20_blank_pixel_data, + .blank_pixel_data_sequence = dcn401_blank_pixel_data_sequence, .reset_hw_ctx_wrap = dcn401_reset_hw_ctx_wrap, .enable_stream_timing = dcn401_enable_stream_timing, .edp_backlight_control = dce110_edp_backlight_control, .setup_vupdate_interrupt = dcn20_setup_vupdate_interrupt, + .setup_vupdate_interrupt_sequence = dcn401_setup_vupdate_interrupt_sequence, .did_underflow_occur = dcn10_did_underflow_occur, .init_blank = dcn32_init_blank, .disable_vga = dcn20_disable_vga, .bios_golden_init = dcn10_bios_golden_init, .plane_atomic_disable = dcn20_plane_atomic_disable, .plane_atomic_power_down = dcn401_plane_atomic_power_down, + .plane_atomic_power_down_sequence = dcn401_plane_atomic_power_down_sequence, .enable_power_gating_plane = dcn32_enable_power_gating_plane, .hubp_pg_control = dcn32_hubp_pg_control, .program_all_writeback_pipes_in_tree = dcn30_program_all_writeback_pipes_in_tree, + .program_all_writeback_pipes_in_tree_sequence = dcn401_program_all_writeback_pipes_in_tree_sequence, .update_odm = dcn401_update_odm, + .update_odm_sequence = dcn401_update_odm_sequence, .dsc_pg_control = dcn32_dsc_pg_control, .dsc_pg_status = dcn32_dsc_pg_status, .set_hdr_multiplier = dcn10_set_hdr_multiplier, + .set_hdr_multiplier_sequence = dcn401_set_hdr_multiplier_sequence, .verify_allow_pstate_change_high = dcn10_verify_allow_pstate_change_high, + .verify_allow_pstate_change_high_sequence = dcn401_verify_allow_pstate_change_high_sequence, .wait_for_blank_complete = dcn20_wait_for_blank_complete, .dccg_init = dcn20_dccg_init, .set_mcm_luts = dcn401_set_mcm_luts, .program_mall_pipe_config = dcn32_program_mall_pipe_config, + .program_mall_pipe_config_sequence = dcn401_program_mall_pipe_config_sequence, .update_mall_sel = dcn32_update_mall_sel, .calculate_dccg_k1_k2_values = NULL, .apply_single_controller_ctx_to_hw = dce110_apply_single_controller_ctx_to_hw, .reset_back_end_for_pipe = dcn401_reset_back_end_for_pipe, .populate_mcm_luts = NULL, .perform_3dlut_wa_unlock = dcn401_perform_3dlut_wa_unlock, + .program_pipe_sequence = dcn401_program_pipe_sequence, + .dc_ip_request_cntl = dcn401_dc_ip_request_cntl, }; void dcn401_hw_sequencer_init_functions(struct dc *dc) diff --git a/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer.h index 1723bbcf2c46..8ed9eea40c56 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer.h +++ b/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer.h @@ -31,6 +31,8 @@ #include "inc/hw/opp.h" #include "inc/hw/link_encoder.h" #include "inc/core_status.h" +#include "inc/hw/hw_shared.h" +#include "dsc/dsc.h" struct pipe_ctx; struct dc_state; @@ -48,6 +50,8 @@ struct dc_dmub_cmd; struct pg_block_update; struct drr_params; struct dc_underflow_debug_data; +struct dsc_optc_config; +struct vm_system_aperture_param; struct subvp_pipe_control_lock_fast_params { struct dc *dc; @@ -62,7 +66,7 @@ struct pipe_control_lock_params { }; struct set_flip_control_gsl_params { - struct pipe_ctx *pipe_ctx; + struct hubp *hubp; bool flip_immediate; }; @@ -148,12 +152,587 @@ struct wait_for_dcc_meta_propagation_params { const struct pipe_ctx *top_pipe_to_program; }; -struct fams2_global_control_lock_fast_params { +struct dmub_hw_control_lock_fast_params { struct dc *dc; bool is_required; bool lock; }; +struct program_surface_config_params { + struct hubp *hubp; + enum surface_pixel_format format; + struct dc_tiling_info *tiling_info; + struct plane_size plane_size; + enum dc_rotation_angle rotation; + struct dc_plane_dcc_param *dcc; + bool horizontal_mirror; + int compat_level; +}; + +struct program_mcache_id_and_split_coordinate { + struct hubp *hubp; + struct dml2_hubp_pipe_mcache_regs *mcache_regs; +}; + +struct program_cursor_update_now_params { + struct dc *dc; + struct pipe_ctx *pipe_ctx; +}; + +struct hubp_wait_pipe_read_start_params { + struct hubp *hubp; +}; + +struct apply_update_flags_for_phantom_params { + struct pipe_ctx *pipe_ctx; +}; + +struct update_phantom_vp_position_params { + struct dc *dc; + struct pipe_ctx *pipe_ctx; + struct dc_state *context; +}; + +struct set_odm_combine_params { + struct timing_generator *tg; + int opp_inst[MAX_PIPES]; + int opp_head_count; + int odm_slice_width; + int last_odm_slice_width; +}; + +struct set_odm_bypass_params { + struct timing_generator *tg; + const struct dc_crtc_timing *timing; +}; + +struct opp_pipe_clock_control_params { + struct output_pixel_processor *opp; + bool enable; +}; + +struct opp_program_left_edge_extra_pixel_params { + struct output_pixel_processor *opp; + enum dc_pixel_encoding pixel_encoding; + bool is_otg_master; +}; + +struct dccg_set_dto_dscclk_params { + struct dccg *dccg; + int inst; + int num_slices_h; +}; + +struct dsc_set_config_params { + struct display_stream_compressor *dsc; + struct dsc_config *dsc_cfg; + struct dsc_optc_config *dsc_optc_cfg; +}; + +struct dsc_enable_params { + struct display_stream_compressor *dsc; + int opp_inst; +}; + +struct tg_set_dsc_config_params { + struct timing_generator *tg; + struct dsc_optc_config *dsc_optc_cfg; + bool enable; +}; + +struct dsc_disconnect_params { + struct display_stream_compressor *dsc; +}; + +struct dsc_read_state_params { + struct display_stream_compressor *dsc; + struct dcn_dsc_state *dsc_state; +}; + +struct dsc_calculate_and_set_config_params { + struct pipe_ctx *pipe_ctx; + struct dsc_optc_config dsc_optc_cfg; + bool enable; + int opp_cnt; +}; + +struct dsc_enable_with_opp_params { + struct pipe_ctx *pipe_ctx; +}; + +struct program_tg_params { + struct dc *dc; + struct pipe_ctx *pipe_ctx; + struct dc_state *context; +}; + +struct tg_program_global_sync_params { + struct timing_generator *tg; + int vready_offset; + unsigned int vstartup_lines; + unsigned int vupdate_offset_pixels; + unsigned int vupdate_vupdate_width_pixels; + unsigned int pstate_keepout_start_lines; +}; + +struct tg_wait_for_state_params { + struct timing_generator *tg; + enum crtc_state state; +}; + +struct tg_set_vtg_params_params { + struct timing_generator *tg; + struct dc_crtc_timing *timing; + bool program_fp2; +}; + +struct tg_set_gsl_params { + struct timing_generator *tg; + struct gsl_params gsl; +}; + +struct tg_set_gsl_source_select_params { + struct timing_generator *tg; + int group_idx; + uint32_t gsl_ready_signal; +}; + +struct setup_vupdate_interrupt_params { + struct dc *dc; + struct pipe_ctx *pipe_ctx; +}; + +struct tg_setup_vertical_interrupt2_params { + struct timing_generator *tg; + int start_line; +}; + +struct dpp_set_hdr_multiplier_params { + struct dpp *dpp; + uint32_t hw_mult; +}; + +struct program_det_size_params { + struct hubbub *hubbub; + unsigned int hubp_inst; + unsigned int det_buffer_size_kb; +}; + +struct program_det_segments_params { + struct hubbub *hubbub; + unsigned int hubp_inst; + unsigned int det_size; +}; + +struct update_dchubp_dpp_params { + struct dc *dc; + struct pipe_ctx *pipe_ctx; + struct dc_state *context; +}; + +struct opp_set_dyn_expansion_params { + struct output_pixel_processor *opp; + enum dc_color_space color_space; + enum dc_color_depth color_depth; + enum signal_type signal; +}; + +struct opp_program_fmt_params { + struct output_pixel_processor *opp; + struct bit_depth_reduction_params *fmt_bit_depth; + struct clamping_and_pixel_encoding_params *clamping; +}; + +struct opp_program_bit_depth_reduction_params { + struct output_pixel_processor *opp; + bool use_default_params; + struct pipe_ctx *pipe_ctx; +}; + +struct opp_set_disp_pattern_generator_params { + struct output_pixel_processor *opp; + enum controller_dp_test_pattern test_pattern; + enum controller_dp_color_space color_space; + enum dc_color_depth color_depth; + struct tg_color solid_color; + bool use_solid_color; + int width; + int height; + int offset; +}; + +struct set_abm_pipe_params { + struct dc *dc; + struct pipe_ctx *pipe_ctx; +}; + +struct set_abm_level_params { + struct abm *abm; + unsigned int abm_level; +}; + +struct set_abm_immediate_disable_params { + struct dc *dc; + struct pipe_ctx *pipe_ctx; +}; + +struct set_disp_pattern_generator_params { + struct dc *dc; + struct pipe_ctx *pipe_ctx; + enum controller_dp_test_pattern test_pattern; + enum controller_dp_color_space color_space; + enum dc_color_depth color_depth; + const struct tg_color *solid_color; + int width; + int height; + int offset; +}; + +struct mpc_update_blending_params { + struct mpc *mpc; + struct mpcc_blnd_cfg blnd_cfg; + int mpcc_id; +}; + +struct mpc_assert_idle_mpcc_params { + struct mpc *mpc; + int mpcc_id; +}; + +struct mpc_insert_plane_params { + struct mpc *mpc; + struct mpc_tree *mpc_tree_params; + struct mpcc_blnd_cfg blnd_cfg; + struct mpcc_sm_cfg *sm_cfg; + struct mpcc *insert_above_mpcc; + int dpp_id; + int mpcc_id; +}; + +struct mpc_remove_mpcc_params { + struct mpc *mpc; + struct mpc_tree *mpc_tree_params; + struct mpcc *mpcc_to_remove; +}; + +struct opp_set_mpcc_disconnect_pending_params { + struct output_pixel_processor *opp; + int mpcc_inst; + bool pending; +}; + +struct dc_set_optimized_required_params { + struct dc *dc; + bool optimized_required; +}; + +struct hubp_disconnect_params { + struct hubp *hubp; +}; + +struct hubbub_force_pstate_change_control_params { + struct hubbub *hubbub; + bool enable; + bool wait; +}; + +struct tg_enable_crtc_params { + struct timing_generator *tg; +}; + +struct hubp_wait_flip_pending_params { + struct hubp *hubp; + unsigned int timeout_us; + unsigned int polling_interval_us; +}; + +struct tg_wait_double_buffer_pending_params { + struct timing_generator *tg; + unsigned int timeout_us; + unsigned int polling_interval_us; +}; + +struct update_force_pstate_params { + struct dc *dc; + struct dc_state *context; +}; + +struct hubbub_apply_dedcn21_147_wa_params { + struct hubbub *hubbub; +}; + +struct hubbub_allow_self_refresh_control_params { + struct hubbub *hubbub; + bool allow; + bool *disallow_self_refresh_applied; +}; + +struct tg_get_frame_count_params { + struct timing_generator *tg; + unsigned int *frame_count; +}; + +struct mpc_set_dwb_mux_params { + struct mpc *mpc; + int dwb_id; + int mpcc_id; +}; + +struct mpc_disable_dwb_mux_params { + struct mpc *mpc; + unsigned int dwb_id; +}; + +struct mcif_wb_config_buf_params { + struct mcif_wb *mcif_wb; + struct mcif_buf_params *mcif_buf_params; + unsigned int dest_height; +}; + +struct mcif_wb_config_arb_params { + struct mcif_wb *mcif_wb; + struct mcif_arb_params *mcif_arb_params; +}; + +struct mcif_wb_enable_params { + struct mcif_wb *mcif_wb; +}; + +struct mcif_wb_disable_params { + struct mcif_wb *mcif_wb; +}; + +struct dwbc_enable_params { + struct dwbc *dwb; + struct dc_dwb_params *dwb_params; +}; + +struct dwbc_disable_params { + struct dwbc *dwb; +}; + +struct dwbc_update_params { + struct dwbc *dwb; + struct dc_dwb_params *dwb_params; +}; + +struct hubp_update_mall_sel_params { + struct hubp *hubp; + uint32_t mall_sel; + bool cache_cursor; +}; + +struct hubp_prepare_subvp_buffering_params { + struct hubp *hubp; + bool enable; +}; + +struct hubp_set_blank_en_params { + struct hubp *hubp; + bool enable; +}; + +struct hubp_disable_control_params { + struct hubp *hubp; + bool disable; +}; + +struct hubbub_soft_reset_params { + struct hubbub *hubbub; + void (*hubbub_soft_reset)(struct hubbub *hubbub, bool reset); + bool reset; +}; + +struct hubp_clk_cntl_params { + struct hubp *hubp; + bool enable; +}; + +struct hubp_init_params { + struct hubp *hubp; +}; + +struct hubp_set_vm_system_aperture_settings_params { + struct hubp *hubp; + //struct vm_system_aperture_param apt; + PHYSICAL_ADDRESS_LOC sys_default; + PHYSICAL_ADDRESS_LOC sys_low; + PHYSICAL_ADDRESS_LOC sys_high; +}; + +struct hubp_set_flip_int_params { + struct hubp *hubp; +}; + +struct dpp_dppclk_control_params { + struct dpp *dpp; + bool dppclk_div; + bool enable; +}; + +struct disable_phantom_crtc_params { + struct timing_generator *tg; +}; + +struct dpp_pg_control_params { + struct dce_hwseq *hws; + unsigned int dpp_inst; + bool power_on; +}; + +struct hubp_pg_control_params { + struct dce_hwseq *hws; + unsigned int hubp_inst; + bool power_on; +}; + +struct hubp_reset_params { + struct hubp *hubp; +}; + +struct dpp_reset_params { + struct dpp *dpp; +}; + +struct dpp_root_clock_control_params { + struct dce_hwseq *hws; + unsigned int dpp_inst; + bool clock_on; +}; + +struct dc_ip_request_cntl_params { + struct dc *dc; + bool enable; +}; + +struct dsc_pg_status_params { + struct dce_hwseq *hws; + int dsc_inst; + bool is_ungated; +}; + +struct dsc_wait_disconnect_pending_clear_params { + struct display_stream_compressor *dsc; + bool *is_ungated; +}; + +struct dsc_disable_params { + struct display_stream_compressor *dsc; + bool *is_ungated; +}; + +struct dccg_set_ref_dscclk_params { + struct dccg *dccg; + int dsc_inst; + bool *is_ungated; +}; + +struct dccg_update_dpp_dto_params { + struct dccg *dccg; + int dpp_inst; + int dppclk_khz; +}; + +struct hubp_vtg_sel_params { + struct hubp *hubp; + uint32_t otg_inst; +}; + +struct hubp_setup2_params { + struct hubp *hubp; + struct dml2_dchub_per_pipe_register_set *hubp_regs; + union dml2_global_sync_programming *global_sync; + struct dc_crtc_timing *timing; +}; + +struct hubp_setup_params { + struct hubp *hubp; + struct _vcs_dpi_display_dlg_regs_st *dlg_regs; + struct _vcs_dpi_display_ttu_regs_st *ttu_regs; + struct _vcs_dpi_display_rq_regs_st *rq_regs; + struct _vcs_dpi_display_pipe_dest_params_st *pipe_dest; +}; + +struct hubp_set_unbounded_requesting_params { + struct hubp *hubp; + bool unbounded_req; +}; + +struct hubp_setup_interdependent2_params { + struct hubp *hubp; + struct dml2_dchub_per_pipe_register_set *hubp_regs; +}; + +struct hubp_setup_interdependent_params { + struct hubp *hubp; + struct _vcs_dpi_display_dlg_regs_st *dlg_regs; + struct _vcs_dpi_display_ttu_regs_st *ttu_regs; +}; + +struct dpp_set_cursor_matrix_params { + struct dpp *dpp; + enum dc_color_space color_space; + struct dc_csc_transform *cursor_csc_color_matrix; +}; + +struct mpc_update_mpcc_params { + struct dc *dc; + struct pipe_ctx *pipe_ctx; +}; + +struct dpp_set_scaler_params { + struct dpp *dpp; + const struct scaler_data *scl_data; +}; + +struct hubp_mem_program_viewport_params { + struct hubp *hubp; + const struct rect *viewport; + const struct rect *viewport_c; +}; + +struct hubp_program_mcache_id_and_split_coordinate_params { + struct hubp *hubp; + struct mcache_regs_struct *mcache_regs; +}; + +struct abort_cursor_offload_update_params { + struct dc *dc; + struct pipe_ctx *pipe_ctx; +}; + +struct set_cursor_attribute_params { + struct dc *dc; + struct pipe_ctx *pipe_ctx; +}; + +struct set_cursor_position_params { + struct dc *dc; + struct pipe_ctx *pipe_ctx; +}; + +struct set_cursor_sdr_white_level_params { + struct dc *dc; + struct pipe_ctx *pipe_ctx; +}; + +struct program_output_csc_params { + struct dc *dc; + struct pipe_ctx *pipe_ctx; + enum dc_color_space colorspace; + uint16_t *matrix; + int opp_id; +}; + +struct hubp_set_blank_params { + struct hubp *hubp; + bool blank; +}; + +struct phantom_hubp_post_enable_params { + struct hubp *hubp; +}; + union block_sequence_params { struct update_plane_addr_params update_plane_addr_params; struct subvp_pipe_control_lock_fast_params subvp_pipe_control_lock_fast_params; @@ -173,7 +752,108 @@ union block_sequence_params { struct set_ocsc_default_params set_ocsc_default_params; struct subvp_save_surf_addr subvp_save_surf_addr; struct wait_for_dcc_meta_propagation_params wait_for_dcc_meta_propagation_params; - struct fams2_global_control_lock_fast_params fams2_global_control_lock_fast_params; + struct dmub_hw_control_lock_fast_params dmub_hw_control_lock_fast_params; + struct program_surface_config_params program_surface_config_params; + struct program_mcache_id_and_split_coordinate program_mcache_id_and_split_coordinate; + struct program_cursor_update_now_params program_cursor_update_now_params; + struct hubp_wait_pipe_read_start_params hubp_wait_pipe_read_start_params; + struct apply_update_flags_for_phantom_params apply_update_flags_for_phantom_params; + struct update_phantom_vp_position_params update_phantom_vp_position_params; + struct set_odm_combine_params set_odm_combine_params; + struct set_odm_bypass_params set_odm_bypass_params; + struct opp_pipe_clock_control_params opp_pipe_clock_control_params; + struct opp_program_left_edge_extra_pixel_params opp_program_left_edge_extra_pixel_params; + struct dccg_set_dto_dscclk_params dccg_set_dto_dscclk_params; + struct dsc_set_config_params dsc_set_config_params; + struct dsc_enable_params dsc_enable_params; + struct tg_set_dsc_config_params tg_set_dsc_config_params; + struct dsc_disconnect_params dsc_disconnect_params; + struct dsc_read_state_params dsc_read_state_params; + struct dsc_calculate_and_set_config_params dsc_calculate_and_set_config_params; + struct dsc_enable_with_opp_params dsc_enable_with_opp_params; + struct program_tg_params program_tg_params; + struct tg_program_global_sync_params tg_program_global_sync_params; + struct tg_wait_for_state_params tg_wait_for_state_params; + struct tg_set_vtg_params_params tg_set_vtg_params_params; + struct tg_setup_vertical_interrupt2_params tg_setup_vertical_interrupt2_params; + struct dpp_set_hdr_multiplier_params dpp_set_hdr_multiplier_params; + struct tg_set_gsl_params tg_set_gsl_params; + struct tg_set_gsl_source_select_params tg_set_gsl_source_select_params; + struct setup_vupdate_interrupt_params setup_vupdate_interrupt_params; + struct program_det_size_params program_det_size_params; + struct program_det_segments_params program_det_segments_params; + struct update_dchubp_dpp_params update_dchubp_dpp_params; + struct opp_set_dyn_expansion_params opp_set_dyn_expansion_params; + struct opp_program_fmt_params opp_program_fmt_params; + struct opp_program_bit_depth_reduction_params opp_program_bit_depth_reduction_params; + struct opp_set_disp_pattern_generator_params opp_set_disp_pattern_generator_params; + struct set_abm_pipe_params set_abm_pipe_params; + struct set_abm_level_params set_abm_level_params; + struct set_abm_immediate_disable_params set_abm_immediate_disable_params; + struct set_disp_pattern_generator_params set_disp_pattern_generator_params; + struct mpc_remove_mpcc_params mpc_remove_mpcc_params; + struct opp_set_mpcc_disconnect_pending_params opp_set_mpcc_disconnect_pending_params; + struct dc_set_optimized_required_params dc_set_optimized_required_params; + struct hubp_disconnect_params hubp_disconnect_params; + struct hubbub_force_pstate_change_control_params hubbub_force_pstate_change_control_params; + struct tg_enable_crtc_params tg_enable_crtc_params; + struct hubp_wait_flip_pending_params hubp_wait_flip_pending_params; + struct tg_wait_double_buffer_pending_params tg_wait_double_buffer_pending_params; + struct update_force_pstate_params update_force_pstate_params; + struct hubbub_apply_dedcn21_147_wa_params hubbub_apply_dedcn21_147_wa_params; + struct hubbub_allow_self_refresh_control_params hubbub_allow_self_refresh_control_params; + struct tg_get_frame_count_params tg_get_frame_count_params; + struct mpc_set_dwb_mux_params mpc_set_dwb_mux_params; + struct mpc_disable_dwb_mux_params mpc_disable_dwb_mux_params; + struct mcif_wb_config_buf_params mcif_wb_config_buf_params; + struct mcif_wb_config_arb_params mcif_wb_config_arb_params; + struct mcif_wb_enable_params mcif_wb_enable_params; + struct mcif_wb_disable_params mcif_wb_disable_params; + struct dwbc_enable_params dwbc_enable_params; + struct dwbc_disable_params dwbc_disable_params; + struct dwbc_update_params dwbc_update_params; + struct hubp_update_mall_sel_params hubp_update_mall_sel_params; + struct hubp_prepare_subvp_buffering_params hubp_prepare_subvp_buffering_params; + struct hubp_set_blank_en_params hubp_set_blank_en_params; + struct hubp_disable_control_params hubp_disable_control_params; + struct hubbub_soft_reset_params hubbub_soft_reset_params; + struct hubp_clk_cntl_params hubp_clk_cntl_params; + struct hubp_init_params hubp_init_params; + struct hubp_set_vm_system_aperture_settings_params hubp_set_vm_system_aperture_settings_params; + struct hubp_set_flip_int_params hubp_set_flip_int_params; + struct dpp_dppclk_control_params dpp_dppclk_control_params; + struct disable_phantom_crtc_params disable_phantom_crtc_params; + struct dpp_pg_control_params dpp_pg_control_params; + struct hubp_pg_control_params hubp_pg_control_params; + struct hubp_reset_params hubp_reset_params; + struct dpp_reset_params dpp_reset_params; + struct dpp_root_clock_control_params dpp_root_clock_control_params; + struct dc_ip_request_cntl_params dc_ip_request_cntl_params; + struct dsc_pg_status_params dsc_pg_status_params; + struct dsc_wait_disconnect_pending_clear_params dsc_wait_disconnect_pending_clear_params; + struct dsc_disable_params dsc_disable_params; + struct dccg_set_ref_dscclk_params dccg_set_ref_dscclk_params; + struct dccg_update_dpp_dto_params dccg_update_dpp_dto_params; + struct hubp_vtg_sel_params hubp_vtg_sel_params; + struct hubp_setup2_params hubp_setup2_params; + struct hubp_setup_params hubp_setup_params; + struct hubp_set_unbounded_requesting_params hubp_set_unbounded_requesting_params; + struct hubp_setup_interdependent2_params hubp_setup_interdependent2_params; + struct hubp_setup_interdependent_params hubp_setup_interdependent_params; + struct dpp_set_cursor_matrix_params dpp_set_cursor_matrix_params; + struct mpc_update_mpcc_params mpc_update_mpcc_params; + struct mpc_update_blending_params mpc_update_blending_params; + struct mpc_assert_idle_mpcc_params mpc_assert_idle_mpcc_params; + struct mpc_insert_plane_params mpc_insert_plane_params; + struct dpp_set_scaler_params dpp_set_scaler_params; + struct hubp_mem_program_viewport_params hubp_mem_program_viewport_params; + struct abort_cursor_offload_update_params abort_cursor_offload_update_params; + struct set_cursor_attribute_params set_cursor_attribute_params; + struct set_cursor_position_params set_cursor_position_params; + struct set_cursor_sdr_white_level_params set_cursor_sdr_white_level_params; + struct program_output_csc_params program_output_csc_params; + struct hubp_set_blank_params hubp_set_blank_params; + struct phantom_hubp_post_enable_params phantom_hubp_post_enable_params; }; enum block_sequence_func { @@ -189,13 +869,111 @@ enum block_sequence_func { DPP_SETUP_DPP, DPP_PROGRAM_BIAS_AND_SCALE, DPP_SET_OUTPUT_TRANSFER_FUNC, + DPP_SET_HDR_MULTIPLIER, MPC_UPDATE_VISUAL_CONFIRM, MPC_POWER_ON_MPC_MEM_PWR, MPC_SET_OUTPUT_CSC, MPC_SET_OCSC_DEFAULT, DMUB_SUBVP_SAVE_SURF_ADDR, HUBP_WAIT_FOR_DCC_META_PROP, - DMUB_FAMS2_GLOBAL_CONTROL_LOCK_FAST, + DMUB_HW_CONTROL_LOCK_FAST, + HUBP_PROGRAM_SURFACE_CONFIG, + HUBP_PROGRAM_MCACHE_ID, + PROGRAM_CURSOR_UPDATE_NOW, + HUBP_WAIT_PIPE_READ_START, + HWS_APPLY_UPDATE_FLAGS_FOR_PHANTOM, + HWS_UPDATE_PHANTOM_VP_POSITION, + OPTC_SET_ODM_COMBINE, + OPTC_SET_ODM_BYPASS, + OPP_PIPE_CLOCK_CONTROL, + OPP_PROGRAM_LEFT_EDGE_EXTRA_PIXEL, + DCCG_SET_DTO_DSCCLK, + DSC_SET_CONFIG, + DSC_ENABLE, + TG_SET_DSC_CONFIG, + DSC_DISCONNECT, + DSC_READ_STATE, + DSC_CALCULATE_AND_SET_CONFIG, + DSC_ENABLE_WITH_OPP, + TG_PROGRAM_GLOBAL_SYNC, + TG_WAIT_FOR_STATE, + TG_SET_VTG_PARAMS, + TG_SETUP_VERTICAL_INTERRUPT2, + HUBP_PROGRAM_DET_SIZE, + HUBP_PROGRAM_DET_SEGMENTS, + OPP_SET_DYN_EXPANSION, + OPP_PROGRAM_FMT, + OPP_PROGRAM_BIT_DEPTH_REDUCTION, + OPP_SET_DISP_PATTERN_GENERATOR, + ABM_SET_PIPE, + ABM_SET_LEVEL, + ABM_SET_IMMEDIATE_DISABLE, + MPC_REMOVE_MPCC, + OPP_SET_MPCC_DISCONNECT_PENDING, + DC_SET_OPTIMIZED_REQUIRED, + HUBP_DISCONNECT, + HUBBUB_FORCE_PSTATE_CHANGE_CONTROL, + TG_ENABLE_CRTC, + TG_SET_GSL, + TG_SET_GSL_SOURCE_SELECT, + HUBP_WAIT_FLIP_PENDING, + TG_WAIT_DOUBLE_BUFFER_PENDING, + UPDATE_FORCE_PSTATE, + PROGRAM_MALL_PIPE_CONFIG, + HUBBUB_APPLY_DEDCN21_147_WA, + HUBBUB_ALLOW_SELF_REFRESH_CONTROL, + TG_GET_FRAME_COUNT, + MPC_SET_DWB_MUX, + MPC_DISABLE_DWB_MUX, + MCIF_WB_CONFIG_BUF, + MCIF_WB_CONFIG_ARB, + MCIF_WB_ENABLE, + MCIF_WB_DISABLE, + DWBC_ENABLE, + DWBC_DISABLE, + DWBC_UPDATE, + HUBP_UPDATE_MALL_SEL, + HUBP_PREPARE_SUBVP_BUFFERING, + HUBP_SET_BLANK_EN, + HUBP_DISABLE_CONTROL, + HUBBUB_SOFT_RESET, + HUBP_CLK_CNTL, + HUBP_INIT, + HUBP_SET_VM_SYSTEM_APERTURE_SETTINGS, + HUBP_SET_FLIP_INT, + DPP_DPPCLK_CONTROL, + DISABLE_PHANTOM_CRTC, + DSC_PG_STATUS, + DSC_WAIT_DISCONNECT_PENDING_CLEAR, + DSC_DISABLE, + DCCG_SET_REF_DSCCLK, + DPP_PG_CONTROL, + HUBP_PG_CONTROL, + HUBP_RESET, + DPP_RESET, + DPP_ROOT_CLOCK_CONTROL, + DC_IP_REQUEST_CNTL, + DCCG_UPDATE_DPP_DTO, + HUBP_VTG_SEL, + HUBP_SETUP2, + HUBP_SETUP, + HUBP_SET_UNBOUNDED_REQUESTING, + HUBP_SETUP_INTERDEPENDENT2, + HUBP_SETUP_INTERDEPENDENT, + DPP_SET_CURSOR_MATRIX, + MPC_UPDATE_BLENDING, + MPC_ASSERT_IDLE_MPCC, + MPC_INSERT_PLANE, + DPP_SET_SCALER, + HUBP_MEM_PROGRAM_VIEWPORT, + ABORT_CURSOR_OFFLOAD_UPDATE, + SET_CURSOR_ATTRIBUTE, + SET_CURSOR_POSITION, + SET_CURSOR_SDR_WHITE_LEVEL, + PROGRAM_OUTPUT_CSC, + HUBP_SET_LEGACY_TILING_COMPAT_LEVEL, + HUBP_SET_BLANK, + PHANTOM_HUBP_POST_ENABLE, /* This must be the last value in this enum, add new ones above */ HWSS_BLOCK_SEQUENCE_FUNC_COUNT }; @@ -205,6 +983,11 @@ struct block_sequence { enum block_sequence_func func; }; +struct block_sequence_state { + struct block_sequence *steps; + unsigned int *num_steps; +}; + #define MAX_HWSS_BLOCK_SEQUENCE_SIZE (HWSS_BLOCK_SEQUENCE_FUNC_COUNT * MAX_PIPES) struct hw_sequencer_funcs { @@ -222,6 +1005,8 @@ struct hw_sequencer_funcs { enum dc_status (*apply_ctx_to_hw)(struct dc *dc, struct dc_state *context); void (*disable_plane)(struct dc *dc, struct dc_state *state, struct pipe_ctx *pipe_ctx); + void (*disable_plane_sequence)(struct dc *dc, struct dc_state *state, struct pipe_ctx *pipe_ctx, + struct block_sequence_state *seq_state); void (*disable_pixel_data)(struct dc *dc, struct pipe_ctx *pipe_ctx, bool blank); void (*apply_ctx_for_surface)(struct dc *dc, const struct dc_stream_state *stream, @@ -239,6 +1024,10 @@ struct hw_sequencer_funcs { void (*wait_for_mpcc_disconnect)(struct dc *dc, struct resource_pool *res_pool, struct pipe_ctx *pipe_ctx); + void (*wait_for_mpcc_disconnect_sequence)(struct dc *dc, + struct resource_pool *res_pool, + struct pipe_ctx *pipe_ctx, + struct block_sequence_state *seq_state); void (*edp_backlight_control)( struct dc_link *link, bool enable); @@ -310,6 +1099,13 @@ struct hw_sequencer_funcs { void (*set_cursor_position)(struct pipe_ctx *pipe); void (*set_cursor_attribute)(struct pipe_ctx *pipe); void (*set_cursor_sdr_white_level)(struct pipe_ctx *pipe); + void (*abort_cursor_offload_update)(struct dc *dc, const struct pipe_ctx *pipe); + void (*begin_cursor_offload_update)(struct dc *dc, const struct pipe_ctx *pipe); + void (*commit_cursor_offload_update)(struct dc *dc, const struct pipe_ctx *pipe); + void (*update_cursor_offload_pipe)(struct dc *dc, const struct pipe_ctx *pipe); + void (*notify_cursor_offload_drr_update)(struct dc *dc, struct dc_state *context, + const struct dc_stream_state *stream); + void (*program_cursor_offload_now)(struct dc *dc, const struct pipe_ctx *pipe); /* Colour Related */ void (*program_gamut_remap)(struct pipe_ctx *pipe_ctx); @@ -452,13 +1248,13 @@ struct hw_sequencer_funcs { const struct dc_state *new_ctx); void (*wait_for_dcc_meta_propagation)(const struct dc *dc, const struct pipe_ctx *top_pipe_to_program); - void (*fams2_global_control_lock)(struct dc *dc, + void (*dmub_hw_control_lock)(struct dc *dc, struct dc_state *context, bool lock); void (*fams2_update_config)(struct dc *dc, struct dc_state *context, bool enable); - void (*fams2_global_control_lock_fast)(union block_sequence_params *params); + void (*dmub_hw_control_lock_fast)(union block_sequence_params *params); void (*set_long_vtotal)(struct pipe_ctx **pipe_ctx, int num_pipes, uint32_t v_total_min, uint32_t v_total_max); void (*program_outstanding_updates)(struct dc *dc, struct dc_state *context); @@ -471,11 +1267,23 @@ struct hw_sequencer_funcs { void (*enable_plane)(struct dc *dc, struct pipe_ctx *pipe_ctx, struct dc_state *context); + void (*enable_plane_sequence)(struct dc *dc, + struct pipe_ctx *pipe_ctx, + struct dc_state *context, + struct block_sequence_state *seq_state); void (*update_dchubp_dpp)(struct dc *dc, struct pipe_ctx *pipe_ctx, struct dc_state *context); + void (*update_dchubp_dpp_sequence)(struct dc *dc, + struct pipe_ctx *pipe_ctx, + struct dc_state *context, + struct block_sequence_state *seq_state); void (*post_unlock_reset_opp)(struct dc *dc, struct pipe_ctx *opp_head); + void (*post_unlock_reset_opp_sequence)( + struct dc *dc, + struct pipe_ctx *opp_head, + struct block_sequence_state *seq_state); void (*get_underflow_debug_data)(const struct dc *dc, struct timing_generator *tg, struct dc_underflow_debug_data *out_data); @@ -588,4 +1396,630 @@ void hwss_set_ocsc_default(union block_sequence_params *params); void hwss_subvp_save_surf_addr(union block_sequence_params *params); +void hwss_program_surface_config(union block_sequence_params *params); + +void hwss_program_mcache_id_and_split_coordinate(union block_sequence_params *params); + +void hwss_set_odm_combine(union block_sequence_params *params); + +void hwss_set_odm_bypass(union block_sequence_params *params); + +void hwss_opp_pipe_clock_control(union block_sequence_params *params); + +void hwss_opp_program_left_edge_extra_pixel(union block_sequence_params *params); + +void hwss_blank_pixel_data(union block_sequence_params *params); + +void hwss_dccg_set_dto_dscclk(union block_sequence_params *params); + +void hwss_dsc_set_config(union block_sequence_params *params); + +void hwss_dsc_enable(union block_sequence_params *params); + +void hwss_tg_set_dsc_config(union block_sequence_params *params); + +void hwss_dsc_disconnect(union block_sequence_params *params); + +void hwss_dsc_read_state(union block_sequence_params *params); + +void hwss_dsc_calculate_and_set_config(union block_sequence_params *params); + +void hwss_dsc_enable_with_opp(union block_sequence_params *params); + +void hwss_program_tg(union block_sequence_params *params); + +void hwss_tg_program_global_sync(union block_sequence_params *params); + +void hwss_tg_wait_for_state(union block_sequence_params *params); + +void hwss_tg_set_vtg_params(union block_sequence_params *params); + +void hwss_tg_setup_vertical_interrupt2(union block_sequence_params *params); + +void hwss_dpp_set_hdr_multiplier(union block_sequence_params *params); + +void hwss_program_det_size(union block_sequence_params *params); + +void hwss_program_det_segments(union block_sequence_params *params); + +void hwss_opp_set_dyn_expansion(union block_sequence_params *params); + +void hwss_opp_program_fmt(union block_sequence_params *params); + +void hwss_opp_program_bit_depth_reduction(union block_sequence_params *params); + +void hwss_opp_set_disp_pattern_generator(union block_sequence_params *params); + +void hwss_set_abm_pipe(union block_sequence_params *params); + +void hwss_set_abm_level(union block_sequence_params *params); + +void hwss_set_abm_immediate_disable(union block_sequence_params *params); + +void hwss_mpc_remove_mpcc(union block_sequence_params *params); + +void hwss_opp_set_mpcc_disconnect_pending(union block_sequence_params *params); + +void hwss_dc_set_optimized_required(union block_sequence_params *params); + +void hwss_hubp_disconnect(union block_sequence_params *params); + +void hwss_hubbub_force_pstate_change_control(union block_sequence_params *params); + +void hwss_tg_enable_crtc(union block_sequence_params *params); + +void hwss_tg_set_gsl(union block_sequence_params *params); + +void hwss_tg_set_gsl_source_select(union block_sequence_params *params); + +void hwss_hubp_wait_flip_pending(union block_sequence_params *params); + +void hwss_tg_wait_double_buffer_pending(union block_sequence_params *params); + +void hwss_update_force_pstate(union block_sequence_params *params); + +void hwss_hubbub_apply_dedcn21_147_wa(union block_sequence_params *params); + +void hwss_hubbub_allow_self_refresh_control(union block_sequence_params *params); + +void hwss_tg_get_frame_count(union block_sequence_params *params); + +void hwss_mpc_set_dwb_mux(union block_sequence_params *params); + +void hwss_mpc_disable_dwb_mux(union block_sequence_params *params); + +void hwss_mcif_wb_config_buf(union block_sequence_params *params); + +void hwss_mcif_wb_config_arb(union block_sequence_params *params); + +void hwss_mcif_wb_enable(union block_sequence_params *params); + +void hwss_mcif_wb_disable(union block_sequence_params *params); + +void hwss_dwbc_enable(union block_sequence_params *params); + +void hwss_dwbc_disable(union block_sequence_params *params); + +void hwss_dwbc_update(union block_sequence_params *params); + +void hwss_hubp_update_mall_sel(union block_sequence_params *params); + +void hwss_hubp_prepare_subvp_buffering(union block_sequence_params *params); + +void hwss_hubp_set_blank_en(union block_sequence_params *params); + +void hwss_hubp_disable_control(union block_sequence_params *params); + +void hwss_hubbub_soft_reset(union block_sequence_params *params); + +void hwss_hubp_clk_cntl(union block_sequence_params *params); + +void hwss_hubp_init(union block_sequence_params *params); + +void hwss_hubp_set_vm_system_aperture_settings(union block_sequence_params *params); + +void hwss_hubp_set_flip_int(union block_sequence_params *params); + +void hwss_dpp_dppclk_control(union block_sequence_params *params); + +void hwss_disable_phantom_crtc(union block_sequence_params *params); + +void hwss_dsc_pg_status(union block_sequence_params *params); + +void hwss_dsc_wait_disconnect_pending_clear(union block_sequence_params *params); + +void hwss_dsc_disable(union block_sequence_params *params); + +void hwss_dccg_set_ref_dscclk(union block_sequence_params *params); + +void hwss_dpp_pg_control(union block_sequence_params *params); + +void hwss_hubp_pg_control(union block_sequence_params *params); + +void hwss_hubp_reset(union block_sequence_params *params); + +void hwss_dpp_reset(union block_sequence_params *params); + +void hwss_dpp_root_clock_control(union block_sequence_params *params); + +void hwss_dc_ip_request_cntl(union block_sequence_params *params); + +void hwss_dccg_update_dpp_dto(union block_sequence_params *params); + +void hwss_hubp_vtg_sel(union block_sequence_params *params); + +void hwss_hubp_setup2(union block_sequence_params *params); + +void hwss_hubp_setup(union block_sequence_params *params); + +void hwss_hubp_set_unbounded_requesting(union block_sequence_params *params); + +void hwss_hubp_setup_interdependent2(union block_sequence_params *params); + +void hwss_hubp_setup_interdependent(union block_sequence_params *params); + +void hwss_dpp_set_cursor_matrix(union block_sequence_params *params); + +void hwss_mpc_update_mpcc(union block_sequence_params *params); + +void hwss_mpc_update_blending(union block_sequence_params *params); + +void hwss_mpc_assert_idle_mpcc(union block_sequence_params *params); + +void hwss_mpc_insert_plane(union block_sequence_params *params); + +void hwss_dpp_set_scaler(union block_sequence_params *params); + +void hwss_hubp_mem_program_viewport(union block_sequence_params *params); + +void hwss_abort_cursor_offload_update(union block_sequence_params *params); + +void hwss_set_cursor_attribute(union block_sequence_params *params); + +void hwss_set_cursor_position(union block_sequence_params *params); + +void hwss_set_cursor_sdr_white_level(union block_sequence_params *params); + +void hwss_program_output_csc(union block_sequence_params *params); + +void hwss_hubp_set_legacy_tiling_compat_level(union block_sequence_params *params); + +void hwss_hubp_set_blank(union block_sequence_params *params); + +void hwss_phantom_hubp_post_enable(union block_sequence_params *params); + +void hwss_add_optc_pipe_control_lock(struct block_sequence_state *seq_state, + struct dc *dc, struct pipe_ctx *pipe_ctx, bool lock); + +void hwss_add_hubp_set_flip_control_gsl(struct block_sequence_state *seq_state, + struct hubp *hubp, bool flip_immediate); + +void hwss_add_hubp_program_triplebuffer(struct block_sequence_state *seq_state, + struct dc *dc, struct pipe_ctx *pipe_ctx, bool enableTripleBuffer); + +void hwss_add_hubp_update_plane_addr(struct block_sequence_state *seq_state, + struct dc *dc, struct pipe_ctx *pipe_ctx); + +void hwss_add_dpp_set_input_transfer_func(struct block_sequence_state *seq_state, + struct dc *dc, struct pipe_ctx *pipe_ctx, struct dc_plane_state *plane_state); + +void hwss_add_dpp_program_gamut_remap(struct block_sequence_state *seq_state, + struct pipe_ctx *pipe_ctx); + +void hwss_add_dpp_program_bias_and_scale(struct block_sequence_state *seq_state, + struct pipe_ctx *pipe_ctx); + +void hwss_add_optc_program_manual_trigger(struct block_sequence_state *seq_state, + struct pipe_ctx *pipe_ctx); + +void hwss_add_dpp_set_output_transfer_func(struct block_sequence_state *seq_state, + struct dc *dc, struct pipe_ctx *pipe_ctx, struct dc_stream_state *stream); + +void hwss_add_mpc_update_visual_confirm(struct block_sequence_state *seq_state, + struct dc *dc, struct pipe_ctx *pipe_ctx, int mpcc_id); + +void hwss_add_mpc_power_on_mpc_mem_pwr(struct block_sequence_state *seq_state, + struct mpc *mpc, int mpcc_id, bool power_on); + +void hwss_add_mpc_set_output_csc(struct block_sequence_state *seq_state, + struct mpc *mpc, int opp_id, const uint16_t *regval, enum mpc_output_csc_mode ocsc_mode); + +void hwss_add_mpc_set_ocsc_default(struct block_sequence_state *seq_state, + struct mpc *mpc, int opp_id, enum dc_color_space colorspace, enum mpc_output_csc_mode ocsc_mode); + +void hwss_add_dmub_send_dmcub_cmd(struct block_sequence_state *seq_state, + struct dc_context *ctx, union dmub_rb_cmd *cmd, enum dm_dmub_wait_type wait_type); + +void hwss_add_dmub_subvp_save_surf_addr(struct block_sequence_state *seq_state, + struct dc_dmub_srv *dc_dmub_srv, struct dc_plane_address *addr, uint8_t subvp_index); + +void hwss_add_hubp_wait_for_dcc_meta_prop(struct block_sequence_state *seq_state, + struct dc *dc, struct pipe_ctx *top_pipe_to_program); + +void hwss_add_hubp_wait_pipe_read_start(struct block_sequence_state *seq_state, + struct hubp *hubp); + +void hwss_add_hws_apply_update_flags_for_phantom(struct block_sequence_state *seq_state, + struct pipe_ctx *pipe_ctx); + +void hwss_add_hws_update_phantom_vp_position(struct block_sequence_state *seq_state, + struct dc *dc, struct dc_state *context, struct pipe_ctx *pipe_ctx); + +void hwss_add_optc_set_odm_combine(struct block_sequence_state *seq_state, + struct timing_generator *tg, int opp_inst[MAX_PIPES], int opp_head_count, + int odm_slice_width, int last_odm_slice_width); + +void hwss_add_optc_set_odm_bypass(struct block_sequence_state *seq_state, + struct timing_generator *optc, struct dc_crtc_timing *timing); + +void hwss_add_tg_program_global_sync(struct block_sequence_state *seq_state, + struct timing_generator *tg, + int vready_offset, + unsigned int vstartup_lines, + unsigned int vupdate_offset_pixels, + unsigned int vupdate_vupdate_width_pixels, + unsigned int pstate_keepout_start_lines); + +void hwss_add_tg_wait_for_state(struct block_sequence_state *seq_state, + struct timing_generator *tg, enum crtc_state state); + +void hwss_add_tg_set_vtg_params(struct block_sequence_state *seq_state, + struct timing_generator *tg, struct dc_crtc_timing *dc_crtc_timing, bool program_fp2); + +void hwss_add_tg_setup_vertical_interrupt2(struct block_sequence_state *seq_state, + struct timing_generator *tg, int start_line); + +void hwss_add_dpp_set_hdr_multiplier(struct block_sequence_state *seq_state, + struct dpp *dpp, uint32_t hw_mult); + +void hwss_add_hubp_program_det_size(struct block_sequence_state *seq_state, + struct hubbub *hubbub, unsigned int hubp_inst, unsigned int det_buffer_size_kb); + +void hwss_add_hubp_program_mcache_id(struct block_sequence_state *seq_state, + struct hubp *hubp, struct dml2_hubp_pipe_mcache_regs *mcache_regs); + +void hwss_add_hubbub_force_pstate_change_control(struct block_sequence_state *seq_state, + struct hubbub *hubbub, bool enable, bool wait); + +void hwss_add_hubp_program_det_segments(struct block_sequence_state *seq_state, + struct hubbub *hubbub, unsigned int hubp_inst, unsigned int det_size); + +void hwss_add_opp_set_dyn_expansion(struct block_sequence_state *seq_state, + struct output_pixel_processor *opp, enum dc_color_space color_sp, + enum dc_color_depth color_dpth, enum signal_type signal); + +void hwss_add_opp_program_fmt(struct block_sequence_state *seq_state, + struct output_pixel_processor *opp, struct bit_depth_reduction_params *fmt_bit_depth, + struct clamping_and_pixel_encoding_params *clamping); + +void hwss_add_abm_set_pipe(struct block_sequence_state *seq_state, + struct dc *dc, struct pipe_ctx *pipe_ctx); + +void hwss_add_abm_set_level(struct block_sequence_state *seq_state, + struct abm *abm, uint32_t abm_level); + +void hwss_add_tg_enable_crtc(struct block_sequence_state *seq_state, + struct timing_generator *tg); + +void hwss_add_hubp_wait_flip_pending(struct block_sequence_state *seq_state, + struct hubp *hubp, unsigned int timeout_us, unsigned int polling_interval_us); + +void hwss_add_tg_wait_double_buffer_pending(struct block_sequence_state *seq_state, + struct timing_generator *tg, unsigned int timeout_us, unsigned int polling_interval_us); + +void hwss_add_dccg_set_dto_dscclk(struct block_sequence_state *seq_state, + struct dccg *dccg, int inst, int num_slices_h); + +void hwss_add_dsc_calculate_and_set_config(struct block_sequence_state *seq_state, + struct pipe_ctx *pipe_ctx, bool enable, int opp_cnt); + +void hwss_add_mpc_remove_mpcc(struct block_sequence_state *seq_state, + struct mpc *mpc, struct mpc_tree *mpc_tree_params, struct mpcc *mpcc_to_remove); + +void hwss_add_opp_set_mpcc_disconnect_pending(struct block_sequence_state *seq_state, + struct output_pixel_processor *opp, int mpcc_inst, bool pending); + +void hwss_add_hubp_disconnect(struct block_sequence_state *seq_state, + struct hubp *hubp); + +void hwss_add_dsc_enable_with_opp(struct block_sequence_state *seq_state, + struct pipe_ctx *pipe_ctx); + +void hwss_add_dsc_disconnect(struct block_sequence_state *seq_state, + struct display_stream_compressor *dsc); + +void hwss_add_dc_set_optimized_required(struct block_sequence_state *seq_state, + struct dc *dc, bool optimized_required); + +void hwss_add_abm_set_immediate_disable(struct block_sequence_state *seq_state, + struct dc *dc, struct pipe_ctx *pipe_ctx); + +void hwss_add_opp_set_disp_pattern_generator(struct block_sequence_state *seq_state, + struct output_pixel_processor *opp, + enum controller_dp_test_pattern test_pattern, + enum controller_dp_color_space color_space, + enum dc_color_depth color_depth, + struct tg_color solid_color, + bool use_solid_color, + int width, + int height, + int offset); + +void hwss_add_opp_program_bit_depth_reduction(struct block_sequence_state *seq_state, + struct output_pixel_processor *opp, + bool use_default_params, + struct pipe_ctx *pipe_ctx); + +void hwss_add_dc_ip_request_cntl(struct block_sequence_state *seq_state, + struct dc *dc, + bool enable); + +void hwss_add_dwbc_update(struct block_sequence_state *seq_state, + struct dwbc *dwb, + struct dc_dwb_params *dwb_params); + +void hwss_add_mcif_wb_config_buf(struct block_sequence_state *seq_state, + struct mcif_wb *mcif_wb, + struct mcif_buf_params *mcif_buf_params, + unsigned int dest_height); + +void hwss_add_mcif_wb_config_arb(struct block_sequence_state *seq_state, + struct mcif_wb *mcif_wb, + struct mcif_arb_params *mcif_arb_params); + +void hwss_add_mcif_wb_enable(struct block_sequence_state *seq_state, + struct mcif_wb *mcif_wb); + +void hwss_add_mcif_wb_disable(struct block_sequence_state *seq_state, + struct mcif_wb *mcif_wb); + +void hwss_add_mpc_set_dwb_mux(struct block_sequence_state *seq_state, + struct mpc *mpc, + int dwb_id, + int mpcc_id); + +void hwss_add_mpc_disable_dwb_mux(struct block_sequence_state *seq_state, + struct mpc *mpc, + unsigned int dwb_id); + +void hwss_add_dwbc_enable(struct block_sequence_state *seq_state, + struct dwbc *dwb, + struct dc_dwb_params *dwb_params); + +void hwss_add_dwbc_disable(struct block_sequence_state *seq_state, + struct dwbc *dwb); + +void hwss_add_tg_set_gsl(struct block_sequence_state *seq_state, + struct timing_generator *tg, + struct gsl_params gsl); + +void hwss_add_tg_set_gsl_source_select(struct block_sequence_state *seq_state, + struct timing_generator *tg, + int group_idx, + uint32_t gsl_ready_signal); + +void hwss_add_hubp_update_mall_sel(struct block_sequence_state *seq_state, + struct hubp *hubp, + uint32_t mall_sel, + bool cache_cursor); + +void hwss_add_hubp_prepare_subvp_buffering(struct block_sequence_state *seq_state, + struct hubp *hubp, + bool enable); + +void hwss_add_hubp_set_blank_en(struct block_sequence_state *seq_state, + struct hubp *hubp, + bool enable); + +void hwss_add_hubp_disable_control(struct block_sequence_state *seq_state, + struct hubp *hubp, + bool disable); + +void hwss_add_hubbub_soft_reset(struct block_sequence_state *seq_state, + struct hubbub *hubbub, + void (*hubbub_soft_reset)(struct hubbub *hubbub, bool reset), + bool reset); + +void hwss_add_hubp_clk_cntl(struct block_sequence_state *seq_state, + struct hubp *hubp, + bool enable); + +void hwss_add_dpp_dppclk_control(struct block_sequence_state *seq_state, + struct dpp *dpp, + bool dppclk_div, + bool enable); + +void hwss_add_disable_phantom_crtc(struct block_sequence_state *seq_state, + struct timing_generator *tg); + +void hwss_add_dsc_pg_status(struct block_sequence_state *seq_state, + struct dce_hwseq *hws, + int dsc_inst, + bool is_ungated); + +void hwss_add_dsc_wait_disconnect_pending_clear(struct block_sequence_state *seq_state, + struct display_stream_compressor *dsc, + bool *is_ungated); + +void hwss_add_dsc_disable(struct block_sequence_state *seq_state, + struct display_stream_compressor *dsc, + bool *is_ungated); + +void hwss_add_dccg_set_ref_dscclk(struct block_sequence_state *seq_state, + struct dccg *dccg, + int dsc_inst, + bool *is_ungated); + +void hwss_add_dpp_root_clock_control(struct block_sequence_state *seq_state, + struct dce_hwseq *hws, + unsigned int dpp_inst, + bool clock_on); + +void hwss_add_dpp_pg_control(struct block_sequence_state *seq_state, + struct dce_hwseq *hws, + unsigned int dpp_inst, + bool power_on); + +void hwss_add_hubp_pg_control(struct block_sequence_state *seq_state, + struct dce_hwseq *hws, + unsigned int hubp_inst, + bool power_on); + +void hwss_add_hubp_set_blank(struct block_sequence_state *seq_state, + struct hubp *hubp, + bool blank); + +void hwss_add_hubp_init(struct block_sequence_state *seq_state, + struct hubp *hubp); + +void hwss_add_hubp_reset(struct block_sequence_state *seq_state, + struct hubp *hubp); + +void hwss_add_dpp_reset(struct block_sequence_state *seq_state, + struct dpp *dpp); + +void hwss_add_opp_pipe_clock_control(struct block_sequence_state *seq_state, + struct output_pixel_processor *opp, + bool enable); + +void hwss_add_hubp_set_vm_system_aperture_settings(struct block_sequence_state *seq_state, + struct hubp *hubp, + uint64_t sys_default, + uint64_t sys_low, + uint64_t sys_high); + +void hwss_add_hubp_set_flip_int(struct block_sequence_state *seq_state, + struct hubp *hubp); + +void hwss_add_dccg_update_dpp_dto(struct block_sequence_state *seq_state, + struct dccg *dccg, + int dpp_inst, + int dppclk_khz); + +void hwss_add_hubp_vtg_sel(struct block_sequence_state *seq_state, + struct hubp *hubp, + uint32_t otg_inst); + +void hwss_add_hubp_setup2(struct block_sequence_state *seq_state, + struct hubp *hubp, + struct dml2_dchub_per_pipe_register_set *hubp_regs, + union dml2_global_sync_programming *global_sync, + struct dc_crtc_timing *timing); + +void hwss_add_hubp_setup(struct block_sequence_state *seq_state, + struct hubp *hubp, + struct _vcs_dpi_display_dlg_regs_st *dlg_regs, + struct _vcs_dpi_display_ttu_regs_st *ttu_regs, + struct _vcs_dpi_display_rq_regs_st *rq_regs, + struct _vcs_dpi_display_pipe_dest_params_st *pipe_dest); + +void hwss_add_hubp_set_unbounded_requesting(struct block_sequence_state *seq_state, + struct hubp *hubp, + bool unbounded_req); + +void hwss_add_hubp_setup_interdependent2(struct block_sequence_state *seq_state, + struct hubp *hubp, + struct dml2_dchub_per_pipe_register_set *hubp_regs); + +void hwss_add_hubp_setup_interdependent(struct block_sequence_state *seq_state, + struct hubp *hubp, + struct _vcs_dpi_display_dlg_regs_st *dlg_regs, + struct _vcs_dpi_display_ttu_regs_st *ttu_regs); +void hwss_add_hubp_program_surface_config(struct block_sequence_state *seq_state, + struct hubp *hubp, + enum surface_pixel_format format, + struct dc_tiling_info *tiling_info, + struct plane_size plane_size, + enum dc_rotation_angle rotation, + struct dc_plane_dcc_param *dcc, + bool horizontal_mirror, + int compat_level); + +void hwss_add_dpp_setup_dpp(struct block_sequence_state *seq_state, + struct pipe_ctx *pipe_ctx); + +void hwss_add_dpp_set_cursor_matrix(struct block_sequence_state *seq_state, + struct dpp *dpp, + enum dc_color_space color_space, + struct dc_csc_transform *cursor_csc_color_matrix); + +void hwss_add_mpc_update_blending(struct block_sequence_state *seq_state, + struct mpc *mpc, + struct mpcc_blnd_cfg blnd_cfg, + int mpcc_id); + +void hwss_add_mpc_assert_idle_mpcc(struct block_sequence_state *seq_state, + struct mpc *mpc, + int mpcc_id); + +void hwss_add_mpc_insert_plane(struct block_sequence_state *seq_state, + struct mpc *mpc, + struct mpc_tree *mpc_tree_params, + struct mpcc_blnd_cfg blnd_cfg, + struct mpcc_sm_cfg *sm_cfg, + struct mpcc *insert_above_mpcc, + int dpp_id, + int mpcc_id); + +void hwss_add_dpp_set_scaler(struct block_sequence_state *seq_state, + struct dpp *dpp, + const struct scaler_data *scl_data); + +void hwss_add_hubp_mem_program_viewport(struct block_sequence_state *seq_state, + struct hubp *hubp, + const struct rect *viewport, + const struct rect *viewport_c); + +void hwss_add_abort_cursor_offload_update(struct block_sequence_state *seq_state, + struct dc *dc, + struct pipe_ctx *pipe_ctx); + +void hwss_add_set_cursor_attribute(struct block_sequence_state *seq_state, + struct dc *dc, + struct pipe_ctx *pipe_ctx); + +void hwss_add_set_cursor_position(struct block_sequence_state *seq_state, + struct dc *dc, + struct pipe_ctx *pipe_ctx); + +void hwss_add_set_cursor_sdr_white_level(struct block_sequence_state *seq_state, + struct dc *dc, + struct pipe_ctx *pipe_ctx); + +void hwss_add_program_output_csc(struct block_sequence_state *seq_state, + struct dc *dc, + struct pipe_ctx *pipe_ctx, + enum dc_color_space colorspace, + uint16_t *matrix, + int opp_id); + +void hwss_add_phantom_hubp_post_enable(struct block_sequence_state *seq_state, + struct hubp *hubp); + +void hwss_add_update_force_pstate(struct block_sequence_state *seq_state, + struct dc *dc, + struct dc_state *context); + +void hwss_add_hubbub_apply_dedcn21_147_wa(struct block_sequence_state *seq_state, + struct hubbub *hubbub); + +void hwss_add_hubbub_allow_self_refresh_control(struct block_sequence_state *seq_state, + struct hubbub *hubbub, + bool allow, + bool *disallow_self_refresh_applied); + +void hwss_add_tg_get_frame_count(struct block_sequence_state *seq_state, + struct timing_generator *tg, + unsigned int *frame_count); + +void hwss_add_tg_set_dsc_config(struct block_sequence_state *seq_state, + struct timing_generator *tg, + struct dsc_optc_config *dsc_optc_cfg, + bool enable); + +void hwss_add_opp_program_left_edge_extra_pixel(struct block_sequence_state *seq_state, + struct output_pixel_processor *opp, + enum dc_pixel_encoding pixel_encoding, + bool is_otg_master); + #endif /* __DC_HW_SEQUENCER_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer_private.h b/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer_private.h index 1e2d247fbbac..406db231bc72 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer_private.h +++ b/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer_private.h @@ -27,6 +27,7 @@ #define __DC_HW_SEQUENCER_PRIVATE_H__ #include "dc_types.h" +#include "hw_sequencer.h" enum pipe_gating_control { PIPE_GATING_CONTROL_DISABLE = 0, @@ -80,7 +81,13 @@ struct hwseq_private_funcs { void (*plane_atomic_disconnect)(struct dc *dc, struct dc_state *state, struct pipe_ctx *pipe_ctx); + void (*plane_atomic_disconnect_sequence)(struct dc *dc, + struct dc_state *state, + struct pipe_ctx *pipe_ctx, + struct block_sequence_state *seq_state); void (*update_mpcc)(struct dc *dc, struct pipe_ctx *pipe_ctx); + void (*update_mpcc_sequence)(struct dc *dc, struct pipe_ctx *pipe_ctx, + struct block_sequence_state *seq_state); bool (*set_input_transfer_func)(struct dc *dc, struct pipe_ctx *pipe_ctx, const struct dc_plane_state *plane_state); @@ -97,6 +104,10 @@ struct hwseq_private_funcs { void (*blank_pixel_data)(struct dc *dc, struct pipe_ctx *pipe_ctx, bool blank); + void (*blank_pixel_data_sequence)(struct dc *dc, + struct pipe_ctx *pipe_ctx, + bool blank, + struct block_sequence_state *seq_state); enum dc_status (*enable_stream_timing)( struct pipe_ctx *pipe_ctx, struct dc_state *context, @@ -105,6 +116,8 @@ struct hwseq_private_funcs { bool enable); void (*setup_vupdate_interrupt)(struct dc *dc, struct pipe_ctx *pipe_ctx); + void (*setup_vupdate_interrupt_sequence)(struct dc *dc, struct pipe_ctx *pipe_ctx, + struct block_sequence_state *seq_state); bool (*did_underflow_occur)(struct dc *dc, struct pipe_ctx *pipe_ctx); void (*init_blank)(struct dc *dc, struct timing_generator *tg); void (*disable_vga)(struct dce_hwseq *hws); @@ -112,6 +125,10 @@ struct hwseq_private_funcs { void (*plane_atomic_power_down)(struct dc *dc, struct dpp *dpp, struct hubp *hubp); + void (*plane_atomic_power_down_sequence)(struct dc *dc, + struct dpp *dpp, + struct hubp *hubp, + struct block_sequence_state *seq_state); void (*plane_atomic_disable)(struct dc *dc, struct pipe_ctx *pipe_ctx); void (*enable_power_gating_plane)(struct dce_hwseq *hws, bool enable); @@ -140,15 +157,31 @@ struct hwseq_private_funcs { unsigned int dsc_inst); void (*update_odm)(struct dc *dc, struct dc_state *context, struct pipe_ctx *pipe_ctx); + void (*update_odm_sequence)(struct dc *dc, struct dc_state *context, + struct pipe_ctx *pipe_ctx, struct block_sequence_state *seq_state); void (*program_all_writeback_pipes_in_tree)(struct dc *dc, const struct dc_stream_state *stream, struct dc_state *context); + void (*program_all_writeback_pipes_in_tree_sequence)( + struct dc *dc, + const struct dc_stream_state *stream, + struct dc_state *context, + struct block_sequence_state *seq_state); bool (*s0i3_golden_init_wa)(struct dc *dc); void (*set_hdr_multiplier)(struct pipe_ctx *pipe_ctx); + void (*set_hdr_multiplier_sequence)(struct pipe_ctx *pipe_ctx, + struct block_sequence_state *seq_state); void (*verify_allow_pstate_change_high)(struct dc *dc); + void (*verify_allow_pstate_change_high_sequence)(struct dc *dc, + struct block_sequence_state *seq_state); void (*program_pipe)(struct dc *dc, struct pipe_ctx *pipe_ctx, struct dc_state *context); + void (*program_pipe_sequence)( + struct dc *dc, + struct pipe_ctx *pipe_ctx, + struct dc_state *context, + struct block_sequence_state *seq_state); bool (*wait_for_blank_complete)(struct output_pixel_processor *opp); void (*dccg_init)(struct dce_hwseq *hws); bool (*set_blend_lut)(struct pipe_ctx *pipe_ctx, @@ -163,6 +196,8 @@ struct hwseq_private_funcs { void (*enable_plane)(struct dc *dc, struct pipe_ctx *pipe_ctx, struct dc_state *context); void (*program_mall_pipe_config)(struct dc *dc, struct dc_state *context); + void (*program_mall_pipe_config_sequence)(struct dc *dc, struct dc_state *context, + struct block_sequence_state *seq_state); void (*update_force_pstate)(struct dc *dc, struct dc_state *context); void (*update_mall_sel)(struct dc *dc, struct dc_state *context); unsigned int (*calculate_dccg_k1_k2_values)(struct pipe_ctx *pipe_ctx, @@ -186,6 +221,7 @@ struct hwseq_private_funcs { void (*perform_3dlut_wa_unlock)(struct pipe_ctx *pipe_ctx); void (*wait_for_pipe_update_if_needed)(struct dc *dc, struct pipe_ctx *pipe_ctx, bool is_surface_update_only); void (*set_wait_for_update_needed_for_pipe)(struct dc *dc, struct pipe_ctx *pipe_ctx); + void (*dc_ip_request_cntl)(struct dc *dc, bool enable); }; struct dce_hwseq { diff --git a/drivers/gpu/drm/amd/display/dc/inc/core_types.h b/drivers/gpu/drm/amd/display/dc/inc/core_types.h index d11893f8c916..5ed2cd344804 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/core_types.h +++ b/drivers/gpu/drm/amd/display/dc/inc/core_types.h @@ -58,8 +58,8 @@ #include "transform.h" #include "dpp.h" -#include "dml2/dml21/inc/dml_top_dchub_registers.h" -#include "dml2/dml21/inc/dml_top_types.h" +#include "dml2_0/dml21/inc/dml_top_dchub_registers.h" +#include "dml2_0/dml21/inc/dml_top_types.h" struct resource_pool; struct dc_state; @@ -274,7 +274,7 @@ struct resource_pool { /* An array for accessing the link encoder objects that have been created. * Index in array corresponds to engine ID - viz. 0: ENGINE_ID_DIGA */ - struct link_encoder *link_encoders[MAX_DIG_LINK_ENCODERS]; + struct link_encoder *link_encoders[MAX_LINK_ENCODERS]; /* Number of DIG link encoder objects created - i.e. number of valid * entries in link_encoders array. */ @@ -514,7 +514,7 @@ struct pipe_ctx { struct link_enc_cfg_context { enum link_enc_cfg_mode mode; struct link_enc_assignment link_enc_assignments[MAX_PIPES]; - enum engine_id link_enc_avail[MAX_DIG_LINK_ENCODERS]; + enum engine_id link_enc_avail[MAX_LINK_ENCODERS]; struct link_enc_assignment transient_assignments[MAX_PIPES]; }; @@ -526,8 +526,8 @@ struct resource_context { uint8_t dp_clock_source_ref_count; bool is_dsc_acquired[MAX_PIPES]; struct link_enc_cfg_context link_enc_cfg_ctx; - unsigned int dio_link_enc_to_link_idx[MAX_DIG_LINK_ENCODERS]; - int dio_link_enc_ref_cnts[MAX_DIG_LINK_ENCODERS]; + unsigned int dio_link_enc_to_link_idx[MAX_LINK_ENCODERS]; + int dio_link_enc_ref_cnts[MAX_LINK_ENCODERS]; bool is_hpo_dp_stream_enc_acquired[MAX_HPO_DP2_ENCODERS]; unsigned int hpo_dp_link_enc_to_link_idx[MAX_HPO_DP2_LINK_ENCODERS]; int hpo_dp_link_enc_ref_cnts[MAX_HPO_DP2_LINK_ENCODERS]; diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/cursor_reg_cache.h b/drivers/gpu/drm/amd/display/dc/inc/hw/cursor_reg_cache.h index 45645f9fd86c..7ce2f417f86a 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/cursor_reg_cache.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/cursor_reg_cache.h @@ -57,9 +57,9 @@ struct cursor_attribute_cache_hubp { } size; union reg_cursor_settings_cfg { struct { - uint32_t dst_y_offset: 8; - uint32_t chunk_hdl_adjust: 2; - uint32_t reserved: 22; + uint32_t dst_y_offset: 8; + uint32_t chunk_hdl_adjust: 2; + uint32_t reserved: 22; } bits; uint32_t raw; } settings; @@ -83,12 +83,34 @@ union reg_cur0_control_cfg { } bits; uint32_t raw; }; + struct cursor_position_cache_dpp { union reg_cur0_control_cfg cur0_ctl; }; struct cursor_attribute_cache_dpp { union reg_cur0_control_cfg cur0_ctl; + union reg_cur0_fp_scale_bias { + struct { + uint32_t fp_bias: 16; + uint32_t fp_scale: 16; + } bits; + uint32_t raw; + } fp_scale_bias; + union reg_cur0_fp_scale_bias_g_y { + struct { + uint32_t fp_bias_g_y: 16; + uint32_t fp_scale_g_y: 16; + } bits; + uint32_t raw; + } fp_scale_bias_g_y; + union reg_cur0_fp_scale_bias_rb_crcb { + struct { + uint32_t fp_bias_rb_crcb: 16; + uint32_t fp_scale_rb_crcb: 16; + } bits; + uint32_t raw; + } fp_scale_bias_rb_crcb; }; struct cursor_attributes_cfg { diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h index 61c4d2a7db1c..500a601e99b5 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h @@ -71,6 +71,125 @@ enum pixel_rate_div { PIXEL_RATE_DIV_NA = 0xF }; +struct dcn_dccg_reg_state { + uint32_t dc_mem_global_pwr_req_cntl; + uint32_t dccg_audio_dtbclk_dto_modulo; + uint32_t dccg_audio_dtbclk_dto_phase; + uint32_t dccg_audio_dto_source; + uint32_t dccg_audio_dto0_module; + uint32_t dccg_audio_dto0_phase; + uint32_t dccg_audio_dto1_module; + uint32_t dccg_audio_dto1_phase; + uint32_t dccg_cac_status; + uint32_t dccg_cac_status2; + uint32_t dccg_disp_cntl_reg; + uint32_t dccg_ds_cntl; + uint32_t dccg_ds_dto_incr; + uint32_t dccg_ds_dto_modulo; + uint32_t dccg_ds_hw_cal_interval; + uint32_t dccg_gate_disable_cntl; + uint32_t dccg_gate_disable_cntl2; + uint32_t dccg_gate_disable_cntl3; + uint32_t dccg_gate_disable_cntl4; + uint32_t dccg_gate_disable_cntl5; + uint32_t dccg_gate_disable_cntl6; + uint32_t dccg_global_fgcg_rep_cntl; + uint32_t dccg_gtc_cntl; + uint32_t dccg_gtc_current; + uint32_t dccg_gtc_dto_incr; + uint32_t dccg_gtc_dto_modulo; + uint32_t dccg_perfmon_cntl; + uint32_t dccg_perfmon_cntl2; + uint32_t dccg_soft_reset; + uint32_t dccg_test_clk_sel; + uint32_t dccg_vsync_cnt_ctrl; + uint32_t dccg_vsync_cnt_int_ctrl; + uint32_t dccg_vsync_otg0_latch_value; + uint32_t dccg_vsync_otg1_latch_value; + uint32_t dccg_vsync_otg2_latch_value; + uint32_t dccg_vsync_otg3_latch_value; + uint32_t dccg_vsync_otg4_latch_value; + uint32_t dccg_vsync_otg5_latch_value; + uint32_t dispclk_cgtt_blk_ctrl_reg; + uint32_t dispclk_freq_change_cntl; + uint32_t dp_dto_dbuf_en; + uint32_t dp_dto0_modulo; + uint32_t dp_dto0_phase; + uint32_t dp_dto1_modulo; + uint32_t dp_dto1_phase; + uint32_t dp_dto2_modulo; + uint32_t dp_dto2_phase; + uint32_t dp_dto3_modulo; + uint32_t dp_dto3_phase; + uint32_t dpiaclk_540m_dto_modulo; + uint32_t dpiaclk_540m_dto_phase; + uint32_t dpiaclk_810m_dto_modulo; + uint32_t dpiaclk_810m_dto_phase; + uint32_t dpiaclk_dto_cntl; + uint32_t dpiasymclk_cntl; + uint32_t dppclk_cgtt_blk_ctrl_reg; + uint32_t dppclk_ctrl; + uint32_t dppclk_dto_ctrl; + uint32_t dppclk0_dto_param; + uint32_t dppclk1_dto_param; + uint32_t dppclk2_dto_param; + uint32_t dppclk3_dto_param; + uint32_t dprefclk_cgtt_blk_ctrl_reg; + uint32_t dprefclk_cntl; + uint32_t dpstreamclk_cntl; + uint32_t dscclk_dto_ctrl; + uint32_t dscclk0_dto_param; + uint32_t dscclk1_dto_param; + uint32_t dscclk2_dto_param; + uint32_t dscclk3_dto_param; + uint32_t dtbclk_dto_dbuf_en; + uint32_t dtbclk_dto0_modulo; + uint32_t dtbclk_dto0_phase; + uint32_t dtbclk_dto1_modulo; + uint32_t dtbclk_dto1_phase; + uint32_t dtbclk_dto2_modulo; + uint32_t dtbclk_dto2_phase; + uint32_t dtbclk_dto3_modulo; + uint32_t dtbclk_dto3_phase; + uint32_t dtbclk_p_cntl; + uint32_t force_symclk_disable; + uint32_t hdmicharclk0_clock_cntl; + uint32_t hdmistreamclk_cntl; + uint32_t hdmistreamclk0_dto_param; + uint32_t microsecond_time_base_div; + uint32_t millisecond_time_base_div; + uint32_t otg_pixel_rate_div; + uint32_t otg0_phypll_pixel_rate_cntl; + uint32_t otg0_pixel_rate_cntl; + uint32_t otg1_phypll_pixel_rate_cntl; + uint32_t otg1_pixel_rate_cntl; + uint32_t otg2_phypll_pixel_rate_cntl; + uint32_t otg2_pixel_rate_cntl; + uint32_t otg3_phypll_pixel_rate_cntl; + uint32_t otg3_pixel_rate_cntl; + uint32_t phyasymclk_clock_cntl; + uint32_t phybsymclk_clock_cntl; + uint32_t phycsymclk_clock_cntl; + uint32_t phydsymclk_clock_cntl; + uint32_t phyesymclk_clock_cntl; + uint32_t phyplla_pixclk_resync_cntl; + uint32_t phypllb_pixclk_resync_cntl; + uint32_t phypllc_pixclk_resync_cntl; + uint32_t phyplld_pixclk_resync_cntl; + uint32_t phyplle_pixclk_resync_cntl; + uint32_t refclk_cgtt_blk_ctrl_reg; + uint32_t socclk_cgtt_blk_ctrl_reg; + uint32_t symclk_cgtt_blk_ctrl_reg; + uint32_t symclk_psp_cntl; + uint32_t symclk32_le_cntl; + uint32_t symclk32_se_cntl; + uint32_t symclka_clock_enable; + uint32_t symclkb_clock_enable; + uint32_t symclkc_clock_enable; + uint32_t symclkd_clock_enable; + uint32_t symclke_clock_enable; +}; + struct dccg { struct dc_context *ctx; const struct dccg_funcs *funcs; @@ -81,7 +200,6 @@ struct dccg { //int audio_dtbclk_khz;/* TODO needs to be removed */ //int ref_dtbclk_khz;/* TODO needs to be removed */ }; - struct dtbclk_dto_params { const struct dc_crtc_timing *timing; int otg_inst; @@ -214,6 +332,7 @@ struct dccg_funcs { void (*set_dto_dscclk)(struct dccg *dccg, uint32_t dsc_inst, uint32_t num_slices_h); void (*set_ref_dscclk)(struct dccg *dccg, uint32_t dsc_inst); void (*dccg_root_gate_disable_control)(struct dccg *dccg, uint32_t pipe_idx, uint32_t disable_clock_gating); + void (*dccg_read_reg_state)(struct dccg *dccg, struct dcn_dccg_reg_state *dccg_reg_state); }; #endif //__DAL_DCCG_H__ diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h index 843a18287c83..1ddfa30411c8 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h @@ -137,6 +137,14 @@ struct dcn_hubbub_state { uint32_t dram_state_cntl; }; +struct dcn_hubbub_reg_state { + uint32_t det0_ctrl; + uint32_t det1_ctrl; + uint32_t det2_ctrl; + uint32_t det3_ctrl; + uint32_t compbuf_ctrl; +}; + struct hubbub_system_latencies { uint32_t max_latency_ns; uint32_t avg_latency_ns; @@ -216,6 +224,8 @@ struct hubbub_funcs { void (*init_watermarks)(struct hubbub *hubbub); + void (*hubbub_read_reg_state)(struct hubbub *hubbub, struct dcn_hubbub_reg_state *hubbub_reg_state); + /** * @program_det_size: * @@ -242,17 +252,39 @@ struct hubbub_funcs { void (*program_compbuf_segments)(struct hubbub *hubbub, unsigned compbuf_size_seg, bool safe_to_increase); void (*wait_for_det_update)(struct hubbub *hubbub, int hubp_inst); bool (*program_arbiter)(struct hubbub *hubbub, struct dml2_display_arb_regs *arb_regs, bool safe_to_lower); - void (*get_det_sizes)(struct hubbub *hubbub, uint32_t *curr_det_sizes, uint32_t *target_det_sizes); - uint32_t (*compbuf_config_error)(struct hubbub *hubbub); - struct hubbub_perfmon_funcs{ - void (*start_system_latency_measurement)(struct hubbub *hubbub); - void (*get_system_latency_result)(struct hubbub *hubbub, uint32_t refclk_mhz, struct hubbub_system_latencies *latencies); - void (*start_in_order_bandwidth_measurement)(struct hubbub *hubbub); - void (*get_in_order_bandwidth_result)(struct hubbub *hubbub, uint32_t refclk_mhz, uint32_t *bandwidth_mbps); - void (*start_urgent_ramp_latency_measurement)(struct hubbub *hubbub, const struct hubbub_urgent_latency_params *params); - void (*get_urgent_ramp_latency_result)(struct hubbub *hubbub, uint32_t refclk_mhz, uint32_t *latency_ns); + void (*dchvm_init)(struct hubbub *hubbub); + + struct hubbub_perfmon_funcs { void (*reset)(struct hubbub *hubbub); + void (*start_measuring_max_memory_latency_ns)( + struct hubbub *hubbub); + uint32_t (*get_max_memory_latency_ns)(struct hubbub *hubbub, + uint32_t refclk_mhz, uint32_t *sample_count); + void (*start_measuring_average_memory_latency_ns)( + struct hubbub *hubbub); + uint32_t (*get_average_memory_latency_ns)(struct hubbub *hubbub, + uint32_t refclk_mhz, uint32_t *sample_count); + void (*start_measuring_urgent_ramp_latency_ns)( + struct hubbub *hubbub, + const struct hubbub_urgent_latency_params *params); + uint32_t (*get_urgent_ramp_latency_ns)(struct hubbub *hubbub, + uint32_t refclk_mhz); + void (*start_measuring_unbounded_bandwidth_mbps)( + struct hubbub *hubbub); + uint32_t (*get_unbounded_bandwidth_mbps)(struct hubbub *hubbub, + uint32_t refclk_mhz, uint32_t *duration_ns); + void (*start_measuring_average_bandwidth_mbps)( + struct hubbub *hubbub); + uint32_t (*get_average_bandwidth_mbps)(struct hubbub *hubbub, + uint32_t refclk_mhz, uint32_t min_duration_ns, + uint32_t *duration_ns); } perfmon; + + struct hubbub_qos_funcs { + void (*force_display_nominal_profile)(struct hubbub *hubbub); + void (*force_display_urgent_profile)(struct hubbub *hubbub); + void (*reset_display_qos_profile)(struct hubbub *hubbub); + } qos; }; struct hubbub { diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h index 1b7c085dc2cc..d88b57d4f512 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h @@ -65,7 +65,6 @@ union defer_reg_writes { } bits; uint32_t raw; }; - struct dpp { const struct dpp_funcs *funcs; struct dc_context *ctx; @@ -84,6 +83,7 @@ struct dpp { struct pwl_params shaper_params; bool cm_bypass_mode; + bool cursor_offload; struct cursor_position_cache_dpp pos; struct cursor_attribute_cache_dpp att; @@ -202,6 +202,19 @@ struct dcn_dpp_state { uint32_t gamcor_mode; }; +struct dcn_dpp_reg_state { + uint32_t recout_start; + uint32_t recout_size; + uint32_t scl_horz_filter_scale_ratio; + uint32_t scl_vert_filter_scale_ratio; + uint32_t scl_mode; + uint32_t cm_control; + uint32_t dpp_control; + uint32_t dscl_control; + uint32_t obuf_control; + uint32_t mpc_size; +}; + struct CM_bias_params { uint32_t cm_bias_cr_r; uint32_t cm_bias_y_g; @@ -225,6 +238,8 @@ struct dpp_funcs { void (*dpp_read_state)(struct dpp *dpp, struct dcn_dpp_state *s); + void (*dpp_read_reg_state)(struct dpp *dpp, struct dcn_dpp_reg_state *dpp_reg_state); + void (*dpp_reset)(struct dpp *dpp); void (*dpp_set_scaler)(struct dpp *dpp, diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h b/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h index 2b874d2cc61c..a79019365af8 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h @@ -41,8 +41,8 @@ #include "mem_input.h" #include "cursor_reg_cache.h" -#include "dml2/dml21/inc/dml_top_dchub_registers.h" -#include "dml2/dml21/inc/dml_top_types.h" +#include "dml2_0/dml21/inc/dml_top_dchub_registers.h" +#include "dml2_0/dml21/inc/dml_top_types.h" #define OPP_ID_INVALID 0xf #define MAX_TTU 0xffffff @@ -126,11 +126,13 @@ struct hubp { int mpcc_id; struct dc_cursor_attributes curs_attr; struct dc_cursor_position curs_pos; + bool cursor_offload; bool power_gated; struct cursor_position_cache_hubp pos; struct cursor_attribute_cache_hubp att; struct cursor_rect cur_rect; + bool use_mall_for_cursor; }; struct surface_flip_registers { @@ -236,6 +238,7 @@ struct hubp_funcs { void (*hubp_clk_cntl)(struct hubp *hubp, bool enable); void (*hubp_vtg_sel)(struct hubp *hubp, uint32_t otg_inst); void (*hubp_read_state)(struct hubp *hubp); + void (*hubp_read_reg_state)(struct hubp *hubp, struct dcn_hubp_reg_state *reg_state); void (*hubp_clear_underflow)(struct hubp *hubp); void (*hubp_disable_control)(struct hubp *hubp, bool disable_hubp); unsigned int (*hubp_get_underflow_status)(struct hubp *hubp); diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h b/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h index 62a39204fe0b..a61d12ec61bc 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h @@ -51,11 +51,60 @@ #define MAX_LINKS (MAX_DPIA + MAX_CONNECTOR + MAX_VIRTUAL_LINKS) +/** + * define MAX_DIG_LINK_ENCODERS - maximum number of digital encoders + * + * Digital encoders are ENGINE_ID_DIGA...G, there are at most 7, + * although not every GPU may have that many. + */ #define MAX_DIG_LINK_ENCODERS 7 + +/** + * define MAX_DAC_LINK_ENCODERS - maximum number of analog link encoders + * + * Analog encoders are ENGINE_ID_DACA/B, there are at most 2, + * although not every GPU may have that many. Modern GPUs typically + * don't have analog encoders. + */ +#define MAX_DAC_LINK_ENCODERS 2 + +/** + * define MAX_LINK_ENCODERS - maximum number link encoders in total + * + * This includes both analog and digital encoders. + */ +#define MAX_LINK_ENCODERS (MAX_DIG_LINK_ENCODERS + MAX_DAC_LINK_ENCODERS) + #define MAX_DWB_PIPES 1 #define MAX_HPO_DP2_ENCODERS 4 #define MAX_HPO_DP2_LINK_ENCODERS 4 +/* Pipe topology snapshot structures */ +#define MAX_TOPOLOGY_SNAPSHOTS 4 + +struct pipe_topology_line { + bool is_phantom_pipe; + int plane_idx; + int slice_idx; + int stream_idx; + int dpp_inst; + int opp_inst; + int tg_inst; +}; + +struct pipe_topology_snapshot { + struct pipe_topology_line pipe_log_lines[MAX_PIPES]; + int line_count; + uint64_t timestamp_us; + int stream_count; + int phantom_stream_count; +}; + +struct pipe_topology_history { + struct pipe_topology_snapshot snapshots[MAX_TOPOLOGY_SNAPSHOTS]; + int current_snapshot_index; +}; + struct gamma_curve { uint32_t offset; uint32_t segments_num; diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h b/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h index 08c16ba52a51..df512920a9fa 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h @@ -47,6 +47,7 @@ struct encoder_init_data { enum hpd_source_id hpd_source; /* TODO: in DAL2, here was pointer to EventManagerInterface */ struct graphics_object_id encoder; + enum engine_id analog_engine; struct dc_context *ctx; enum transmitter transmitter; }; @@ -83,6 +84,7 @@ struct link_encoder { struct graphics_object_id connector; uint32_t output_signals; enum engine_id preferred_engine; + enum engine_id analog_engine; struct encoder_feature_support features; enum transmitter transmitter; enum hpd_source_id hpd_source; diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/mem_input.h b/drivers/gpu/drm/amd/display/dc/inc/hw/mem_input.h index 42fbc70f7056..d468bc85566a 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/mem_input.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/mem_input.h @@ -29,7 +29,7 @@ #include "include/grph_object_id.h" #include "dml/display_mode_structs.h" -#include "dml2/dml21/inc/dml_top_dchub_registers.h" +#include "dml2_0/dml21/inc/dml_top_dchub_registers.h" struct dchub_init_data; struct cstate_pstate_watermarks_st { diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h b/drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h index 22960ee03dee..a8d1abe20f62 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h @@ -350,6 +350,15 @@ struct mpcc_state { struct mpc_rmcm_regs rmcm_regs; }; +struct dcn_mpc_reg_state { + uint32_t mpcc_bot_sel; + uint32_t mpcc_control; + uint32_t mpcc_status; + uint32_t mpcc_top_sel; + uint32_t mpcc_opp_id; + uint32_t mpcc_ogam_control; +}; + /** * struct mpc_funcs - funcs */ @@ -373,6 +382,24 @@ struct mpc_funcs { struct mpc *mpc, int mpcc_inst, struct mpcc_state *s); + /** + * @mpc_read_reg_state: + * + * Read MPC register state for debugging underflow purposes. + * + * Parameters: + * + * - [in] mpc - MPC context + * - [out] reg_state - MPC register state structure + * + * Return: + * + * void + */ + void (*mpc_read_reg_state)( + struct mpc *mpc, + int mpcc_inst, + struct dcn_mpc_reg_state *mpc_reg_state); /** * @insert_plane: diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/opp.h b/drivers/gpu/drm/amd/display/dc/inc/hw/opp.h index 747679cb4944..e1428a83ecbc 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/opp.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/opp.h @@ -297,6 +297,16 @@ struct oppbuf_params { uint32_t num_segment_padded_pixels; }; +struct dcn_opp_reg_state { + uint32_t dpg_control; + uint32_t fmt_control; + uint32_t oppbuf_control; + uint32_t opp_pipe_control; + uint32_t opp_pipe_crc_control; + uint32_t opp_abm_control; + uint32_t dscrm_dsc_forward_config; +}; + struct opp_funcs { @@ -368,6 +378,9 @@ struct opp_funcs { struct output_pixel_processor *opp, enum dc_pixel_encoding pixel_encoding, bool is_primary); + + void (*opp_read_reg_state)( + struct output_pixel_processor *opp, struct dcn_opp_reg_state *opp_reg_state); }; #endif diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h b/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h index f2de2cf23859..da7bf59c4b9d 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h @@ -175,6 +175,135 @@ struct dcn_otg_state { uint32_t otg_double_buffer_control; }; +struct dcn_optc_reg_state { + uint32_t optc_bytes_per_pixel; + uint32_t optc_data_format_control; + uint32_t optc_data_source_select; + uint32_t optc_input_clock_control; + uint32_t optc_input_global_control; + uint32_t optc_input_spare_register; + uint32_t optc_memory_config; + uint32_t optc_rsmu_underflow; + uint32_t optc_underflow_threshold; + uint32_t optc_width_control; + + uint32_t otg_3d_structure_control; + uint32_t otg_clock_control; + uint32_t otg_control; + uint32_t otg_count_control; + uint32_t otg_count_reset; + uint32_t otg_crc_cntl; + uint32_t otg_crc_sig_blue_control_mask; + uint32_t otg_crc_sig_red_green_mask; + uint32_t otg_crc0_data_b; + uint32_t otg_crc0_data_rg; + uint32_t otg_crc0_windowa_x_control; + uint32_t otg_crc0_windowa_x_control_readback; + uint32_t otg_crc0_windowa_y_control; + uint32_t otg_crc0_windowa_y_control_readback; + uint32_t otg_crc0_windowb_x_control; + uint32_t otg_crc0_windowb_x_control_readback; + uint32_t otg_crc0_windowb_y_control; + uint32_t otg_crc0_windowb_y_control_readback; + uint32_t otg_crc1_data_b; + uint32_t otg_crc1_data_rg; + uint32_t otg_crc1_windowa_x_control; + uint32_t otg_crc1_windowa_x_control_readback; + uint32_t otg_crc1_windowa_y_control; + uint32_t otg_crc1_windowa_y_control_readback; + uint32_t otg_crc1_windowb_x_control; + uint32_t otg_crc1_windowb_x_control_readback; + uint32_t otg_crc1_windowb_y_control; + uint32_t otg_crc1_windowb_y_control_readback; + uint32_t otg_crc2_data_b; + uint32_t otg_crc2_data_rg; + uint32_t otg_crc3_data_b; + uint32_t otg_crc3_data_rg; + uint32_t otg_dlpc_control; + uint32_t otg_double_buffer_control; + uint32_t otg_drr_control2; + uint32_t otg_drr_control; + uint32_t otg_drr_timing_int_status; + uint32_t otg_drr_trigger_window; + uint32_t otg_drr_v_total_change; + uint32_t otg_drr_v_total_reach_range; + uint32_t otg_dsc_start_position; + uint32_t otg_force_count_now_cntl; + uint32_t otg_global_control0; + uint32_t otg_global_control1; + uint32_t otg_global_control2; + uint32_t otg_global_control3; + uint32_t otg_global_control4; + uint32_t otg_global_sync_status; + uint32_t otg_gsl_control; + uint32_t otg_gsl_vsync_gap; + uint32_t otg_gsl_window_x; + uint32_t otg_gsl_window_y; + uint32_t otg_h_blank_start_end; + uint32_t otg_h_sync_a; + uint32_t otg_h_sync_a_cntl; + uint32_t otg_h_timing_cntl; + uint32_t otg_h_total; + uint32_t otg_interlace_control; + uint32_t otg_interlace_status; + uint32_t otg_interrupt_control; + uint32_t otg_long_vblank_status; + uint32_t otg_m_const_dto0; + uint32_t otg_m_const_dto1; + uint32_t otg_manual_force_vsync_next_line; + uint32_t otg_master_en; + uint32_t otg_master_update_lock; + uint32_t otg_master_update_mode; + uint32_t otg_nom_vert_position; + uint32_t otg_pipe_update_status; + uint32_t otg_pixel_data_readback0; + uint32_t otg_pixel_data_readback1; + uint32_t otg_request_control; + uint32_t otg_snapshot_control; + uint32_t otg_snapshot_frame; + uint32_t otg_snapshot_position; + uint32_t otg_snapshot_status; + uint32_t otg_spare_register; + uint32_t otg_static_screen_control; + uint32_t otg_status; + uint32_t otg_status_frame_count; + uint32_t otg_status_hv_count; + uint32_t otg_status_position; + uint32_t otg_status_vf_count; + uint32_t otg_stereo_control; + uint32_t otg_stereo_force_next_eye; + uint32_t otg_stereo_status; + uint32_t otg_trig_manual_control; + uint32_t otg_triga_cntl; + uint32_t otg_triga_manual_trig; + uint32_t otg_trigb_cntl; + uint32_t otg_trigb_manual_trig; + uint32_t otg_update_lock; + uint32_t otg_v_blank_start_end; + uint32_t otg_v_count_stop_control; + uint32_t otg_v_count_stop_control2; + uint32_t otg_v_sync_a; + uint32_t otg_v_sync_a_cntl; + uint32_t otg_v_total; + uint32_t otg_v_total_control; + uint32_t otg_v_total_int_status; + uint32_t otg_v_total_max; + uint32_t otg_v_total_mid; + uint32_t otg_v_total_min; + uint32_t otg_vert_sync_control; + uint32_t otg_vertical_interrupt0_control; + uint32_t otg_vertical_interrupt0_position; + uint32_t otg_vertical_interrupt1_control; + uint32_t otg_vertical_interrupt1_position; + uint32_t otg_vertical_interrupt2_control; + uint32_t otg_vertical_interrupt2_position; + uint32_t otg_vready_param; + uint32_t otg_vstartup_param; + uint32_t otg_vsync_nom_int_status; + uint32_t otg_vupdate_keepout; + uint32_t otg_vupdate_param; +}; + /** * struct timing_generator - Entry point to Output Timing Generator feature. */ @@ -381,6 +510,7 @@ struct timing_generator_funcs { void (*set_vupdate_keepout)(struct timing_generator *tg, bool enable); bool (*wait_update_lock_status)(struct timing_generator *tg, bool locked); void (*read_otg_state)(struct timing_generator *tg, struct dcn_otg_state *s); + void (*optc_read_reg_state)(struct timing_generator *tg, struct dcn_optc_reg_state *optc_reg_state); }; #endif diff --git a/drivers/gpu/drm/amd/display/dc/inc/link_service.h b/drivers/gpu/drm/amd/display/dc/inc/link_service.h index 1e34e84160aa..6f94e48a24d1 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/link_service.h +++ b/drivers/gpu/drm/amd/display/dc/inc/link_service.h @@ -292,12 +292,12 @@ struct link_service { enum replay_FW_Message_type msg, union dmub_replay_cmd_set *cmd_data); bool (*edp_set_coasting_vtotal)( - struct dc_link *link, uint32_t coasting_vtotal); + struct dc_link *link, uint32_t coasting_vtotal, uint16_t frame_skip_number); bool (*edp_replay_residency)(const struct dc_link *link, unsigned int *residency, const bool is_start, const enum pr_residency_mode mode); bool (*edp_set_replay_power_opt_and_coasting_vtotal)(struct dc_link *link, - const unsigned int *power_opts, uint32_t coasting_vtotal); + const unsigned int *power_opts, uint32_t coasting_vtotal, uint16_t frame_skip_number); bool (*edp_wait_for_t12)(struct dc_link *link); bool (*edp_is_ilr_optimization_required)(struct dc_link *link, diff --git a/drivers/gpu/drm/amd/display/dc/inc/resource.h b/drivers/gpu/drm/amd/display/dc/inc/resource.h index 4e26a16a8743..79746d931471 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/resource.h +++ b/drivers/gpu/drm/amd/display/dc/inc/resource.h @@ -49,6 +49,7 @@ struct resource_caps { int num_video_plane; int num_audio; int num_stream_encoder; + int num_analog_stream_encoder; int num_pll; int num_dwb; int num_ddc; diff --git a/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_cts.c b/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_cts.c index 2676ae9f6fe8..1045c268672e 100644 --- a/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_cts.c +++ b/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_cts.c @@ -877,7 +877,7 @@ bool dp_set_test_pattern( return false; if (pipe_ctx->stream_res.tg->funcs->lock_doublebuffer_enable) { - if (should_use_dmub_lock(pipe_ctx->stream->link)) { + if (should_use_dmub_inbox1_lock(pipe_ctx->stream->link->dc, pipe_ctx->stream->link)) { union dmub_hw_lock_flags hw_locks = { 0 }; struct dmub_hw_lock_inst_flags inst_flags = { 0 }; @@ -925,7 +925,7 @@ bool dp_set_test_pattern( CRTC_STATE_VACTIVE); if (pipe_ctx->stream_res.tg->funcs->lock_doublebuffer_disable) { - if (should_use_dmub_lock(pipe_ctx->stream->link)) { + if (should_use_dmub_inbox1_lock(pipe_ctx->stream->link->dc, pipe_ctx->stream->link)) { union dmub_hw_lock_flags hw_locks = { 0 }; struct dmub_hw_lock_inst_flags inst_flags = { 0 }; diff --git a/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio.c b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio.c index 892907991f91..befa67b2b2ae 100644 --- a/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio.c +++ b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio.c @@ -58,8 +58,9 @@ void setup_dio_stream_encoder(struct pipe_ctx *pipe_ctx) return; } - link_enc->funcs->connect_dig_be_to_fe(link_enc, - pipe_ctx->stream_res.stream_enc->id, true); + if (!dc_is_rgb_signal(pipe_ctx->stream->signal)) + link_enc->funcs->connect_dig_be_to_fe(link_enc, + pipe_ctx->stream_res.stream_enc->id, true); if (dc_is_dp_signal(pipe_ctx->stream->signal)) pipe_ctx->stream->ctx->dc->link_srv->dp_trace_source_sequence(pipe_ctx->stream->link, DPCD_SOURCE_SEQ_AFTER_CONNECT_DIG_FE_BE); @@ -98,10 +99,13 @@ void reset_dio_stream_encoder(struct pipe_ctx *pipe_ctx) if (stream_enc->funcs->enable_stream) stream_enc->funcs->enable_stream(stream_enc, pipe_ctx->stream->signal, false); - link_enc->funcs->connect_dig_be_to_fe( - link_enc, - pipe_ctx->stream_res.stream_enc->id, - false); + + if (!dc_is_rgb_signal(pipe_ctx->stream->signal)) + link_enc->funcs->connect_dig_be_to_fe( + link_enc, + pipe_ctx->stream_res.stream_enc->id, + false); + if (dc_is_dp_signal(pipe_ctx->stream->signal)) pipe_ctx->stream->ctx->dc->link_srv->dp_trace_source_sequence( pipe_ctx->stream->link, @@ -115,7 +119,8 @@ void setup_dio_stream_attribute(struct pipe_ctx *pipe_ctx) struct dc_stream_state *stream = pipe_ctx->stream; struct dc_link *link = stream->link; - if (!dc_is_virtual_signal(stream->signal)) + if (!dc_is_virtual_signal(stream->signal) && + !dc_is_rgb_signal(stream->signal)) stream_encoder->funcs->setup_stereo_sync( stream_encoder, pipe_ctx->stream_res.tg->inst, diff --git a/drivers/gpu/drm/amd/display/dc/link/link_detection.c b/drivers/gpu/drm/amd/display/dc/link/link_detection.c index 1173c53359b0..6d31f4967f1a 100644 --- a/drivers/gpu/drm/amd/display/dc/link/link_detection.c +++ b/drivers/gpu/drm/amd/display/dc/link/link_detection.c @@ -270,6 +270,10 @@ static void read_scdc_caps(struct ddc_service *ddc_service, uint8_t slave_address = HDMI_SCDC_ADDRESS; uint8_t offset = HDMI_SCDC_MANUFACTURER_OUI; + if (ddc_service->link->local_sink && + !ddc_service->link->local_sink->edid_caps.scdc_present) + return; + link_query_ddc_data(ddc_service, slave_address, &offset, sizeof(offset), sink->scdc_caps.manufacturer_OUI.byte, sizeof(sink->scdc_caps.manufacturer_OUI.byte)); @@ -858,6 +862,94 @@ static void verify_link_capability(struct dc_link *link, struct dc_sink *sink, verify_link_capability_non_destructive(link); } +/** + * link_detect_evaluate_edid_header() - Evaluate if an EDID header is acceptable. + * + * Evaluates an 8-byte EDID header to check if it's good enough + * for the purpose of determining whether a display is connected + * without reading the full EDID. + * + * @edid_header: The first 8 bytes of the EDID read from DDC. + * + * Return: true if the header looks valid (>= 6 of 8 bytes match the + * expected 00/FF pattern), false otherwise. + */ +static bool link_detect_evaluate_edid_header(uint8_t edid_header[8]) +{ + int edid_header_score = 0; + int i; + + for (i = 0; i < 8; ++i) + edid_header_score += edid_header[i] == ((i == 0 || i == 7) ? 0x00 : 0xff); + + return edid_header_score >= 6; +} + +/** + * link_detect_ddc_probe() - Probe the DDC to see if a display is connected. + * + * Detect whether a display is connected to DDC without reading full EDID. + * Reads only the EDID header (the first 8 bytes of EDID) from DDC and + * evaluates whether that matches. + * + * @link: DC link whose DDC/I2C is probed for the EDID header. + * + * Return: true if the EDID header was read and passes validation, + * false otherwise. + */ +static bool link_detect_ddc_probe(struct dc_link *link) +{ + if (!link->ddc) + return false; + + uint8_t edid_header[8] = {0}; + bool ddc_probed = i2c_read(link->ddc, 0x50, edid_header, sizeof(edid_header)); + + if (!ddc_probed) + return false; + + if (!link_detect_evaluate_edid_header(edid_header)) + return false; + + return true; +} + +/** + * link_detect_dac_load_detect() - Performs DAC load detection. + * + * Load detection can be used to detect the presence of an + * analog display when we can't read DDC. This causes a visible + * visual glitch so it should be used sparingly. + * + * @link: DC link to test using the DAC load-detect path. + * + * Return: true if the VBIOS load-detect call reports OK, false + * otherwise. + */ +static bool link_detect_dac_load_detect(struct dc_link *link) +{ + struct dc_bios *bios = link->ctx->dc_bios; + struct link_encoder *link_enc = link->link_enc; + enum engine_id engine_id = link_enc->preferred_engine; + enum dal_device_type device_type = DEVICE_TYPE_CRT; + enum bp_result bp_result; + uint32_t enum_id; + + switch (engine_id) { + case ENGINE_ID_DACB: + enum_id = 2; + break; + case ENGINE_ID_DACA: + default: + engine_id = ENGINE_ID_DACA; + enum_id = 1; + break; + } + + bp_result = bios->funcs->dac_load_detection(bios, engine_id, device_type, enum_id); + return bp_result == BP_RESULT_OK; +} + /* * detect_link_and_local_sink() - Detect if a sink is attached to a given link * @@ -942,6 +1034,12 @@ static bool detect_link_and_local_sink(struct dc_link *link, break; } + case SIGNAL_TYPE_RGB: { + sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C; + sink_caps.signal = SIGNAL_TYPE_RGB; + break; + } + case SIGNAL_TYPE_LVDS: { sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C; sink_caps.signal = SIGNAL_TYPE_LVDS; @@ -1066,7 +1164,30 @@ static bool detect_link_and_local_sink(struct dc_link *link, DC_LOG_ERROR("Partial EDID valid, abandon invalid blocks.\n"); break; case EDID_NO_RESPONSE: + /* Analog connectors without EDID: + * - old monitor that actually doesn't have EDID + * - cheap DVI-A cable or adapter that doesn't connect DDC + */ + if (dc_connector_supports_analog(link->link_id.id)) { + /* If we didn't do DAC load detection yet, do it now + * to verify there really is a display connected. + */ + if (link->type != dc_connection_dac_load && + !link_detect_dac_load_detect(link)) { + if (prev_sink) + dc_sink_release(prev_sink); + link_disconnect_sink(link); + return false; + } + + DC_LOG_INFO("%s detected analog display without EDID\n", __func__); + link->type = dc_connection_dac_load; + sink->edid_caps.analog = true; + break; + } + DC_LOG_ERROR("No EDID read.\n"); + /* * Abort detection for non-DP connectors if we have * no EDID @@ -1133,9 +1254,17 @@ static bool detect_link_and_local_sink(struct dc_link *link, sink = prev_sink; prev_sink = NULL; } - query_hdcp_capability(sink->sink_signal, link); + + if (!sink->edid_caps.analog) + query_hdcp_capability(sink->sink_signal, link); } + /* DVI-I connector connected to analog display. */ + if ((link->link_id.id == CONNECTOR_ID_DUAL_LINK_DVII || + link->link_id.id == CONNECTOR_ID_SINGLE_LINK_DVII) && + sink->edid_caps.analog) + sink->sink_signal = SIGNAL_TYPE_RGB; + /* HDMI-DVI Dongle */ if (sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A && !sink->edid_caps.edid_hdmi) @@ -1233,6 +1362,36 @@ static bool detect_link_and_local_sink(struct dc_link *link, return true; } +/** + * link_detect_analog() - Determines if an analog sink is connected. + * + * @link: DC link to evaluate (must support analog signalling). + * @type: Updated with the detected connection type: + * dc_connection_single (analog via DDC), + * dc_connection_dac_load (via load-detect), + * or dc_connection_none. + * + * Return: true if detection completed. + */ +static bool link_detect_analog(struct dc_link *link, enum dc_connection_type *type) +{ + /* Don't care about connectors that don't support an analog signal. */ + ASSERT(dc_connector_supports_analog(link->link_id.id)); + + if (link_detect_ddc_probe(link)) { + *type = dc_connection_single; + return true; + } + + if (link_detect_dac_load_detect(link)) { + *type = dc_connection_dac_load; + return true; + } + + *type = dc_connection_none; + return true; +} + /* * link_detect_connection_type() - Determine if there is a sink connected * @@ -1249,6 +1408,17 @@ bool link_detect_connection_type(struct dc_link *link, enum dc_connection_type * return true; } + /* Ignore the HPD pin (if any) for analog connectors. + * Instead rely on DDC and DAC. + * + * - VGA connectors don't have any HPD at all. + * - Some DVI-A cables don't connect the HPD pin. + * - Some DVI-A cables pull up the HPD pin. + * (So it's high even when no display is connected.) + */ + if (dc_connector_supports_analog(link->link_id.id)) + return link_detect_analog(link, type); + if (link->connector_signal == SIGNAL_TYPE_EDP) { /*in case it is not on*/ if (!link->dc->config.edp_no_power_sequencing) diff --git a/drivers/gpu/drm/amd/display/dc/link/link_dpms.c b/drivers/gpu/drm/amd/display/dc/link/link_dpms.c index b66fbcb0040d..6ae134147617 100644 --- a/drivers/gpu/drm/amd/display/dc/link/link_dpms.c +++ b/drivers/gpu/drm/amd/display/dc/link/link_dpms.c @@ -841,6 +841,7 @@ void link_set_dsc_on_stream(struct pipe_ctx *pipe_ctx, bool enable) dsc_cfg.dc_dsc_cfg = stream->timing.dsc_cfg; ASSERT(dsc_cfg.dc_dsc_cfg.num_slices_h % opp_cnt == 0); dsc_cfg.dc_dsc_cfg.num_slices_h /= opp_cnt; + dsc_cfg.dsc_padding = pipe_ctx->dsc_padding_params.dsc_hactive_padding; if (should_use_dto_dscclk) dccg->funcs->set_dto_dscclk(dccg, dsc->inst, dsc_cfg.dc_dsc_cfg.num_slices_h); @@ -970,6 +971,7 @@ bool link_set_dsc_pps_packet(struct pipe_ctx *pipe_ctx, bool enable, bool immedi dsc_cfg.color_depth = stream->timing.display_color_depth; dsc_cfg.is_odm = pipe_ctx->next_odm_pipe ? true : false; dsc_cfg.dc_dsc_cfg = stream->timing.dsc_cfg; + dsc_cfg.dsc_padding = pipe_ctx->dsc_padding_params.dsc_hactive_padding; dsc->funcs->dsc_get_packed_pps(dsc, &dsc_cfg, &dsc_packed_pps[0]); memcpy(&stream->dsc_packed_pps[0], &dsc_packed_pps[0], sizeof(stream->dsc_packed_pps)); @@ -2224,7 +2226,11 @@ static enum dc_status enable_link( { enum dc_status status = DC_ERROR_UNEXPECTED; struct dc_stream_state *stream = pipe_ctx->stream; - struct dc_link *link = stream->link; + struct dc_link *link = NULL; + + if (stream == NULL) + return DC_ERROR_UNEXPECTED; + link = stream->link; /* There's some scenarios where driver is unloaded with display * still enabled. When driver is reloaded, it may cause a display @@ -2256,6 +2262,9 @@ static enum dc_status enable_link( enable_link_lvds(pipe_ctx); status = DC_OK; break; + case SIGNAL_TYPE_RGB: + status = DC_OK; + break; case SIGNAL_TYPE_VIRTUAL: status = enable_link_virtual(pipe_ctx); break; diff --git a/drivers/gpu/drm/amd/display/dc/link/link_factory.c b/drivers/gpu/drm/amd/display/dc/link/link_factory.c index 31a73867cd4c..a6e2b0821969 100644 --- a/drivers/gpu/drm/amd/display/dc/link/link_factory.c +++ b/drivers/gpu/drm/amd/display/dc/link/link_factory.c @@ -451,6 +451,46 @@ static enum channel_id get_ddc_line(struct dc_link *link) return channel; } +static enum engine_id find_analog_engine(struct dc_link *link) +{ + struct dc_bios *bp = link->ctx->dc_bios; + struct graphics_object_id encoder = {0}; + enum bp_result bp_result = BP_RESULT_OK; + int i; + + for (i = 0; i < 3; i++) { + bp_result = bp->funcs->get_src_obj(bp, link->link_id, i, &encoder); + + if (bp_result != BP_RESULT_OK) + return ENGINE_ID_UNKNOWN; + + switch (encoder.id) { + case ENCODER_ID_INTERNAL_DAC1: + case ENCODER_ID_INTERNAL_KLDSCP_DAC1: + return ENGINE_ID_DACA; + case ENCODER_ID_INTERNAL_DAC2: + case ENCODER_ID_INTERNAL_KLDSCP_DAC2: + return ENGINE_ID_DACB; + } + } + + return ENGINE_ID_UNKNOWN; +} + +static bool transmitter_supported(const enum transmitter transmitter) +{ + return transmitter != TRANSMITTER_UNKNOWN && + transmitter != TRANSMITTER_NUTMEG_CRT && + transmitter != TRANSMITTER_TRAVIS_CRT && + transmitter != TRANSMITTER_TRAVIS_LCD; +} + +static bool analog_engine_supported(const enum engine_id engine_id) +{ + return engine_id == ENGINE_ID_DACA || + engine_id == ENGINE_ID_DACB; +} + static bool construct_phy(struct dc_link *link, const struct link_init_data *init_params) { @@ -482,10 +522,23 @@ static bool construct_phy(struct dc_link *link, link->link_id = bios->funcs->get_connector_id(bios, init_params->connector_index); + /* Determine early if the link has any supported encoders, + * so that we avoid initializing DDC and HPD, etc. + */ + bp_funcs->get_src_obj(bios, link->link_id, 0, &enc_init_data.encoder); + enc_init_data.transmitter = translate_encoder_to_transmitter(enc_init_data.encoder); + enc_init_data.analog_engine = find_analog_engine(link); + link->ep_type = DISPLAY_ENDPOINT_PHY; DC_LOG_DC("BIOS object table - link_id: %d", link->link_id.id); + if (!transmitter_supported(enc_init_data.transmitter) && + !analog_engine_supported(enc_init_data.analog_engine)) { + DC_LOG_WARNING("link_id %d has unsupported encoder\n", link->link_id.id); + goto unsupported_fail; + } + if (bios->funcs->get_disp_connector_caps_info) { bios->funcs->get_disp_connector_caps_info(bios, link->link_id, &disp_connect_caps_info); link->is_internal_display = disp_connect_caps_info.INTERNAL_DISPLAY; @@ -530,6 +583,9 @@ static bool construct_phy(struct dc_link *link, case CONNECTOR_ID_DUAL_LINK_DVII: link->connector_signal = SIGNAL_TYPE_DVI_DUAL_LINK; break; + case CONNECTOR_ID_VGA: + link->connector_signal = SIGNAL_TYPE_RGB; + break; case CONNECTOR_ID_DISPLAY_PORT: case CONNECTOR_ID_MXM: case CONNECTOR_ID_USBC: @@ -611,16 +667,12 @@ static bool construct_phy(struct dc_link *link, dal_ddc_get_line(get_ddc_pin(link->ddc)); enc_init_data.ctx = dc_ctx; - bp_funcs->get_src_obj(dc_ctx->dc_bios, link->link_id, 0, - &enc_init_data.encoder); enc_init_data.connector = link->link_id; enc_init_data.channel = get_ddc_line(link); enc_init_data.hpd_source = get_hpd_line(link); link->hpd_src = enc_init_data.hpd_source; - enc_init_data.transmitter = - translate_encoder_to_transmitter(enc_init_data.encoder); link->link_enc = link->dc->res_pool->funcs->link_enc_create(dc_ctx, &enc_init_data); @@ -735,6 +787,7 @@ static bool construct_phy(struct dc_link *link, link->psr_settings.psr_vtotal_control_support = false; link->psr_settings.psr_version = DC_PSR_VERSION_UNSUPPORTED; + link->replay_settings.config.replay_version = DC_REPLAY_VERSION_UNSUPPORTED; DC_LOG_DC("BIOS object table - %s finished successfully.\n", __func__); return true; @@ -753,6 +806,7 @@ static bool construct_phy(struct dc_link *link, link->hpd_gpio = NULL; } +unsupported_fail: DC_LOG_DC("BIOS object table - %s failed.\n", __func__); return false; } @@ -816,9 +870,7 @@ static bool construct_dpia(struct dc_link *link, /* TODO: Create link encoder */ link->psr_settings.psr_version = DC_PSR_VERSION_UNSUPPORTED; - - /* Some docks seem to NAK I2C writes to segment pointer with mot=0. */ - link->wa_flags.dp_mot_reset_segment = true; + link->replay_settings.config.replay_version = DC_REPLAY_VERSION_UNSUPPORTED; return true; diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_ddc.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_ddc.c index 267180e7bc48..5d2bcce2f669 100644 --- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_ddc.c +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_ddc.c @@ -549,7 +549,8 @@ void write_scdc_data(struct ddc_service *ddc_service, /*Lower than 340 Scramble bit from SCDC caps*/ if (ddc_service->link->local_sink && - ddc_service->link->local_sink->edid_caps.panel_patch.skip_scdc_overwrite) + (ddc_service->link->local_sink->edid_caps.panel_patch.skip_scdc_overwrite || + !ddc_service->link->local_sink->edid_caps.scdc_present)) return; link_query_ddc_data(ddc_service, slave_address, &offset, diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c index eb262ce42e2d..ad90a0106938 100644 --- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c @@ -357,7 +357,9 @@ bool dp_should_enable_fec(const struct dc_link *link) { bool force_disable = false; - if (link->fec_state == dc_link_fec_enabled) + if (link->dc->debug.disable_fec) + force_disable = true; + else if (link->fec_state == dc_link_fec_enabled) force_disable = false; else if (link->connector_signal != SIGNAL_TYPE_DISPLAY_PORT_MST && link->local_sink && @@ -424,6 +426,21 @@ static enum dc_link_rate get_link_rate_from_max_link_bw( return link_rate; } +static enum dc_lane_count get_lttpr_max_lane_count(struct dc_link *link) +{ + enum dc_lane_count lttpr_max_lane_count = LANE_COUNT_UNKNOWN; + + if (link->dpcd_caps.lttpr_caps.max_lane_count <= LANE_COUNT_DP_MAX) + lttpr_max_lane_count = link->dpcd_caps.lttpr_caps.max_lane_count; + + /* if bw_allocation is enabled and nrd_max_lane_count is set, use it */ + if (link->dpia_bw_alloc_config.bw_alloc_enabled && + link->dpia_bw_alloc_config.nrd_max_lane_count > 0) + lttpr_max_lane_count = link->dpia_bw_alloc_config.nrd_max_lane_count; + + return lttpr_max_lane_count; +} + static enum dc_link_rate get_lttpr_max_link_rate(struct dc_link *link) { @@ -438,6 +455,11 @@ static enum dc_link_rate get_lttpr_max_link_rate(struct dc_link *link) break; } + /* if bw_allocation is enabled and nrd_max_link_rate is set, use it */ + if (link->dpia_bw_alloc_config.bw_alloc_enabled && + link->dpia_bw_alloc_config.nrd_max_link_rate > 0) + lttpr_max_link_rate = link->dpia_bw_alloc_config.nrd_max_link_rate; + if (link->dpcd_caps.lttpr_caps.supported_128b_132b_rates.bits.UHBR20) lttpr_max_link_rate = LINK_RATE_UHBR20; else if (link->dpcd_caps.lttpr_caps.supported_128b_132b_rates.bits.UHBR13_5) @@ -1846,6 +1868,12 @@ static bool retrieve_link_cap(struct dc_link *link) link->dpcd_caps.is_mst_capable = read_is_mst_supported(link); DC_LOG_DC("%s: MST_Support: %s\n", __func__, str_yes_no(link->dpcd_caps.is_mst_capable)); + /* Some MST docks seem to NAK I2C writes to segment pointer with mot=0. */ + if (link->dpcd_caps.is_mst_capable) + link->wa_flags.dp_mot_reset_segment = true; + else + link->wa_flags.dp_mot_reset_segment = false; + get_active_converter_info(ds_port.byte, link); dp_wa_power_up_0010FA(link, dpcd_data, sizeof(dpcd_data)); @@ -2064,6 +2092,11 @@ static bool retrieve_link_cap(struct dc_link *link) link->dpcd_caps.max_uncompressed_pixel_rate_cap.raw, sizeof(link->dpcd_caps.max_uncompressed_pixel_rate_cap.raw)); + core_link_read_dpcd(link, + DP_PANEL_REPLAY_CAPABILITY_SUPPORT, + &link->dpcd_caps.pr_caps_supported.raw, + sizeof(link->dpcd_caps.pr_caps_supported.raw)); + /* Read DP tunneling information. */ status = dpcd_get_tunneling_device_data(link); if (status != DC_OK) @@ -2242,6 +2275,7 @@ const struct dc_link_settings *dp_get_verified_link_cap( struct dc_link_settings dp_get_max_link_cap(struct dc_link *link) { struct dc_link_settings max_link_cap = {0}; + enum dc_lane_count lttpr_max_lane_count; enum dc_link_rate lttpr_max_link_rate; enum dc_link_rate cable_max_link_rate; struct resource_context *res_ctx = &link->dc->current_state->res_ctx; @@ -2306,8 +2340,11 @@ struct dc_link_settings dp_get_max_link_cap(struct dc_link *link) /* Some LTTPR devices do not report valid DPCD revisions, if so, do not take it's link cap into consideration. */ if (link->dpcd_caps.lttpr_caps.revision.raw >= DPCD_REV_14) { - if (link->dpcd_caps.lttpr_caps.max_lane_count < max_link_cap.lane_count) - max_link_cap.lane_count = link->dpcd_caps.lttpr_caps.max_lane_count; + lttpr_max_lane_count = get_lttpr_max_lane_count(link); + + if (lttpr_max_lane_count < max_link_cap.lane_count) + max_link_cap.lane_count = lttpr_max_lane_count; + lttpr_max_link_rate = get_lttpr_max_link_rate(link); if (lttpr_max_link_rate < max_link_cap.link_rate) @@ -2413,6 +2450,11 @@ bool dp_verify_link_cap_with_retries( dp_trace_detect_lt_init(link); + DC_LOG_HW_LINK_TRAINING("%s: Link[%d] LinkRate=0x%x LaneCount=%d", + __func__, link->link_index, + known_limit_link_setting->link_rate, + known_limit_link_setting->lane_count); + if (link->link_enc && link->link_enc->features.flags.bits.DP_IS_USB_C && link->dc->debug.usbc_combo_phy_reset_wa) apply_usbc_combo_phy_reset_wa(link, known_limit_link_setting); @@ -2449,6 +2491,11 @@ bool dp_verify_link_cap_with_retries( dp_trace_lt_fail_count_update(link, fail_count, true); dp_trace_set_lt_end_timestamp(link, true); + DC_LOG_HW_LINK_TRAINING("%s: Link[%d] Exit. is_success=%d fail_count=%d", + __func__, link->link_index, + success, + fail_count); + return success; } diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.c index 8a3c18ae97a7..c958d3f600c8 100644 --- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.c +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.c @@ -225,11 +225,6 @@ bool link_dpia_enable_usb4_dp_bw_alloc_mode(struct dc_link *link) bool ret = false; uint8_t val; - if (link->dc->debug.dpia_debug.bits.enable_bw_allocation_mode == false) { - DC_LOG_DEBUG("%s: link[%d] DPTX BW allocation mode disabled", __func__, link->link_index); - return false; - } - val = DPTX_BW_ALLOC_MODE_ENABLE | DPTX_BW_ALLOC_UNMASK_IRQ; if (core_link_write_dpcd(link, DPTX_BW_ALLOCATION_MODE_CONTROL, &val, sizeof(uint8_t)) == DC_OK) { @@ -273,17 +268,28 @@ bool link_dpia_enable_usb4_dp_bw_alloc_mode(struct dc_link *link) */ void link_dp_dpia_handle_bw_alloc_status(struct dc_link *link, uint8_t status) { - link->dpia_bw_alloc_config.estimated_bw = get_estimated_bw(link); - if (status & DP_TUNNELING_BW_REQUEST_SUCCEEDED) { DC_LOG_DEBUG("%s: BW Allocation request succeeded on link(%d)", __func__, link->link_index); - } else if (status & DP_TUNNELING_BW_REQUEST_FAILED) { + } + + if (status & DP_TUNNELING_BW_REQUEST_FAILED) { DC_LOG_DEBUG("%s: BW Allocation request failed on link(%d) allocated/estimated BW=%d", __func__, link->link_index, link->dpia_bw_alloc_config.estimated_bw); link_dpia_send_bw_alloc_request(link, link->dpia_bw_alloc_config.estimated_bw); - } else if (status & DP_TUNNELING_ESTIMATED_BW_CHANGED) { + } + + if (status & DP_TUNNELING_BW_ALLOC_CAP_CHANGED) { + link->dpia_bw_alloc_config.bw_granularity = get_bw_granularity(link); + + DC_LOG_DEBUG("%s: Granularity changed on link(%d) new granularity=%d", + __func__, link->link_index, link->dpia_bw_alloc_config.bw_granularity); + } + + if (status & DP_TUNNELING_ESTIMATED_BW_CHANGED) { + link->dpia_bw_alloc_config.estimated_bw = get_estimated_bw(link); + DC_LOG_DEBUG("%s: Estimated BW changed on link(%d) new estimated BW=%d", __func__, link->link_index, link->dpia_bw_alloc_config.estimated_bw); } diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_irq_handler.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_irq_handler.c index 693477413347..4b01ab0a5a7f 100644 --- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_irq_handler.c +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_irq_handler.c @@ -398,10 +398,12 @@ bool dp_should_allow_hpd_rx_irq(const struct dc_link *link) * Don't handle RX IRQ unless one of following is met: * 1) The link is established (cur_link_settings != unknown) * 2) We know we're dealing with a branch device, SST or MST + * 3) The link is bw_alloc enabled. */ if ((link->cur_link_settings.lane_count != LANE_COUNT_UNKNOWN) || - is_dp_branch_device(link)) + is_dp_branch_device(link) || + link->dpia_bw_alloc_config.bw_alloc_enabled) return true; return false; diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c index 5e806edbb9f6..c56e69eb27ef 100644 --- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c @@ -949,7 +949,7 @@ bool edp_set_replay_allow_active(struct dc_link *link, const bool *allow_active, /* Set power optimization flag */ if (power_opts && link->replay_settings.replay_power_opt_active != *power_opts) { if (replay != NULL && link->replay_settings.replay_feature_enabled && - replay->funcs->replay_set_power_opt) { + replay->funcs->replay_set_power_opt) { replay->funcs->replay_set_power_opt(replay, *power_opts, panel_inst); link->replay_settings.replay_power_opt_active = *power_opts; } @@ -984,7 +984,117 @@ bool edp_get_replay_state(const struct dc_link *link, uint64_t *state) return true; } -bool edp_setup_replay(struct dc_link *link, const struct dc_stream_state *stream) +static bool edp_setup_panel_replay(struct dc_link *link, const struct dc_stream_state *stream) +{ + /* To-do: Setup Replay */ + struct dc *dc; + struct dmub_replay *replay; + int i; + unsigned int panel_inst; + struct replay_context replay_context = { 0 }; + unsigned int lineTimeInNs = 0; + + union panel_replay_enable_and_configuration_1 pr_config_1 = { 0 }; + union panel_replay_enable_and_configuration_2 pr_config_2 = { 0 }; + + union dpcd_alpm_configuration alpm_config; + + replay_context.controllerId = CONTROLLER_ID_UNDEFINED; + + if (!link) + return false; + + //Clear Panel Replay enable & config + dm_helpers_dp_write_dpcd(link->ctx, link, + DP_PANEL_REPLAY_ENABLE_AND_CONFIGURATION_1, + (uint8_t *)&(pr_config_1.raw), sizeof(uint8_t)); + + dm_helpers_dp_write_dpcd(link->ctx, link, + DP_PANEL_REPLAY_ENABLE_AND_CONFIGURATION_2, + (uint8_t *)&(pr_config_2.raw), sizeof(uint8_t)); + + if (!(link->replay_settings.config.replay_supported)) + return false; + + dc = link->ctx->dc; + + //not sure should keep or not + replay = dc->res_pool->replay; + + if (!replay) + return false; + + if (!dc_get_edp_link_panel_inst(dc, link, &panel_inst)) + return false; + + replay_context.aux_inst = link->ddc->ddc_pin->hw_info.ddc_channel; + replay_context.digbe_inst = link->link_enc->transmitter; + replay_context.digfe_inst = link->link_enc->preferred_engine; + + for (i = 0; i < MAX_PIPES; i++) { + if (dc->current_state->res_ctx.pipe_ctx[i].stream + == stream) { + /* dmcu -1 for all controller id values, + * therefore +1 here + */ + replay_context.controllerId = + dc->current_state->res_ctx.pipe_ctx[i].stream_res.tg->inst + 1; + break; + } + } + + lineTimeInNs = + ((stream->timing.h_total * 1000000) / + (stream->timing.pix_clk_100hz / 10)) + 1; + + replay_context.line_time_in_ns = lineTimeInNs; + + link->replay_settings.replay_feature_enabled = + replay->funcs->replay_copy_settings(replay, link, &replay_context, panel_inst); + + if (link->replay_settings.replay_feature_enabled) { + pr_config_1.bits.PANEL_REPLAY_ENABLE = 1; + pr_config_1.bits.PANEL_REPLAY_CRC_ENABLE = 1; + pr_config_1.bits.IRQ_HPD_ASSDP_MISSING = 1; + pr_config_1.bits.IRQ_HPD_VSCSDP_UNCORRECTABLE_ERROR = 1; + pr_config_1.bits.IRQ_HPD_RFB_ERROR = 1; + pr_config_1.bits.IRQ_HPD_ACTIVE_FRAME_CRC_ERROR = 1; + pr_config_1.bits.PANEL_REPLAY_SELECTIVE_UPDATE_ENABLE = 1; + pr_config_1.bits.PANEL_REPLAY_EARLY_TRANSPORT_ENABLE = 1; + + pr_config_2.bits.SINK_REFRESH_RATE_UNLOCK_GRANTED = 0; + pr_config_2.bits.SU_Y_GRANULARITY_EXT_VALUE_ENABLED = 0; + pr_config_2.bits.SU_REGION_SCAN_LINE_CAPTURE_INDICATION = 0; + + dm_helpers_dp_write_dpcd(link->ctx, link, + DP_PANEL_REPLAY_ENABLE_AND_CONFIGURATION_1, + (uint8_t *)&(pr_config_1.raw), sizeof(uint8_t)); + + dm_helpers_dp_write_dpcd(link->ctx, link, + DP_PANEL_REPLAY_ENABLE_AND_CONFIGURATION_2, + (uint8_t *)&(pr_config_2.raw), sizeof(uint8_t)); + + //ALPM Setup + memset(&alpm_config, 0, sizeof(alpm_config)); + alpm_config.bits.ENABLE = link->replay_settings.config.alpm_mode != DC_ALPM_UNSUPPORTED ? 1 : 0; + + if (link->replay_settings.config.alpm_mode == DC_ALPM_AUXLESS) { + alpm_config.bits.ALPM_MODE_SEL = 1; + alpm_config.bits.ACDS_PERIOD_DURATION = 1; + } + + dm_helpers_dp_write_dpcd( + link->ctx, + link, + DP_RECEIVER_ALPM_CONFIG, + &alpm_config.raw, + sizeof(alpm_config.raw)); + } + + return true; +} + +static bool edp_setup_freesync_replay(struct dc_link *link, const struct dc_stream_state *stream) { /* To-do: Setup Replay */ struct dc *dc; @@ -1080,6 +1190,18 @@ bool edp_setup_replay(struct dc_link *link, const struct dc_stream_state *stream return true; } +bool edp_setup_replay(struct dc_link *link, const struct dc_stream_state *stream) +{ + if (!link) + return false; + if (link->replay_settings.config.replay_version == DC_VESA_PANEL_REPLAY) + return edp_setup_panel_replay(link, stream); + else if (link->replay_settings.config.replay_version == DC_FREESYNC_REPLAY) + return edp_setup_freesync_replay(link, stream); + else + return false; +} + /* * This is general Interface for Replay to set an 32 bit variable to dmub * replay_FW_Message_type: Indicates which instruction or variable pass to DMUB @@ -1110,7 +1232,7 @@ bool edp_send_replay_cmd(struct dc_link *link, return true; } -bool edp_set_coasting_vtotal(struct dc_link *link, uint32_t coasting_vtotal) +bool edp_set_coasting_vtotal(struct dc_link *link, uint32_t coasting_vtotal, uint16_t frame_skip_number) { struct dc *dc = link->ctx->dc; struct dmub_replay *replay = dc->res_pool->replay; @@ -1122,9 +1244,11 @@ bool edp_set_coasting_vtotal(struct dc_link *link, uint32_t coasting_vtotal) if (!dc_get_edp_link_panel_inst(dc, link, &panel_inst)) return false; - if (coasting_vtotal && link->replay_settings.coasting_vtotal != coasting_vtotal) { - replay->funcs->replay_set_coasting_vtotal(replay, coasting_vtotal, panel_inst); + if (coasting_vtotal && (link->replay_settings.coasting_vtotal != coasting_vtotal || + link->replay_settings.frame_skip_number != frame_skip_number)) { + replay->funcs->replay_set_coasting_vtotal(replay, coasting_vtotal, panel_inst, frame_skip_number); link->replay_settings.coasting_vtotal = coasting_vtotal; + link->replay_settings.frame_skip_number = frame_skip_number; } return true; @@ -1152,7 +1276,7 @@ bool edp_replay_residency(const struct dc_link *link, } bool edp_set_replay_power_opt_and_coasting_vtotal(struct dc_link *link, - const unsigned int *power_opts, uint32_t coasting_vtotal) + const unsigned int *power_opts, uint32_t coasting_vtotal, uint16_t frame_skip_number) { struct dc *dc = link->ctx->dc; struct dmub_replay *replay = dc->res_pool->replay; @@ -1163,13 +1287,16 @@ bool edp_set_replay_power_opt_and_coasting_vtotal(struct dc_link *link, /* Only both power and coasting vtotal changed, this func could return true */ if (power_opts && link->replay_settings.replay_power_opt_active != *power_opts && - coasting_vtotal && link->replay_settings.coasting_vtotal != coasting_vtotal) { + (coasting_vtotal && + (link->replay_settings.coasting_vtotal != coasting_vtotal || + link->replay_settings.frame_skip_number != frame_skip_number))) { if (link->replay_settings.replay_feature_enabled && replay->funcs->replay_set_power_opt_and_coasting_vtotal) { replay->funcs->replay_set_power_opt_and_coasting_vtotal(replay, - *power_opts, panel_inst, coasting_vtotal); + *power_opts, panel_inst, coasting_vtotal, frame_skip_number); link->replay_settings.replay_power_opt_active = *power_opts; link->replay_settings.coasting_vtotal = coasting_vtotal; + link->replay_settings.frame_skip_number = frame_skip_number; } else return false; } else diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.h b/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.h index 62a6344e613e..dd79c7cd2828 100644 --- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.h +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.h @@ -59,12 +59,12 @@ bool edp_setup_replay(struct dc_link *link, bool edp_send_replay_cmd(struct dc_link *link, enum replay_FW_Message_type msg, union dmub_replay_cmd_set *cmd_data); -bool edp_set_coasting_vtotal(struct dc_link *link, uint32_t coasting_vtotal); +bool edp_set_coasting_vtotal(struct dc_link *link, uint32_t coasting_vtotal, uint16_t frame_skip_number); bool edp_replay_residency(const struct dc_link *link, unsigned int *residency, const bool is_start, const enum pr_residency_mode mode); bool edp_get_replay_state(const struct dc_link *link, uint64_t *state); bool edp_set_replay_power_opt_and_coasting_vtotal(struct dc_link *link, - const unsigned int *power_opts, uint32_t coasting_vtotal); + const unsigned int *power_opts, uint32_t coasting_vtotal, uint16_t frame_skip_number); bool edp_wait_for_t12(struct dc_link *link); bool edp_is_ilr_optimization_required(struct dc_link *link, struct dc_crtc_timing *crtc_timing); diff --git a/drivers/gpu/drm/amd/display/dc/mpc/dcn30/dcn30_mpc.c b/drivers/gpu/drm/amd/display/dc/mpc/dcn30/dcn30_mpc.c index 85298b8a1b5e..6bfd2c1294e5 100644 --- a/drivers/gpu/drm/amd/display/dc/mpc/dcn30/dcn30_mpc.c +++ b/drivers/gpu/drm/amd/display/dc/mpc/dcn30/dcn30_mpc.c @@ -1514,6 +1514,21 @@ static void mpc3_read_mpcc_state( MPCC_OGAM_SELECT_CURRENT, &s->rgam_lut); } +void mpc3_read_reg_state( + struct mpc *mpc, + int mpcc_inst, struct dcn_mpc_reg_state *mpc_reg_state) +{ + struct dcn30_mpc *mpc30 = TO_DCN30_MPC(mpc); + + mpc_reg_state->mpcc_bot_sel = REG_READ(MPCC_BOT_SEL[mpcc_inst]); + mpc_reg_state->mpcc_control = REG_READ(MPCC_CONTROL[mpcc_inst]); + mpc_reg_state->mpcc_ogam_control = REG_READ(MPCC_OGAM_CONTROL[mpcc_inst]); + mpc_reg_state->mpcc_opp_id = REG_READ(MPCC_OPP_ID[mpcc_inst]); + mpc_reg_state->mpcc_status = REG_READ(MPCC_STATUS[mpcc_inst]); + mpc_reg_state->mpcc_top_sel = REG_READ(MPCC_TOP_SEL[mpcc_inst]); + +} + static const struct mpc_funcs dcn30_mpc_funcs = { .read_mpcc_state = mpc3_read_mpcc_state, .insert_plane = mpc1_insert_plane, @@ -1544,6 +1559,7 @@ static const struct mpc_funcs dcn30_mpc_funcs = { .release_rmu = mpcc3_release_rmu, .power_on_mpc_mem_pwr = mpc3_power_on_ogam_lut, .get_mpc_out_mux = mpc1_get_mpc_out_mux, + .mpc_read_reg_state = mpc3_read_reg_state, .set_bg_color = mpc1_set_bg_color, .set_mpc_mem_lp_mode = mpc3_set_mpc_mem_lp_mode, }; diff --git a/drivers/gpu/drm/amd/display/dc/mpc/dcn30/dcn30_mpc.h b/drivers/gpu/drm/amd/display/dc/mpc/dcn30/dcn30_mpc.h index 103f29900a2c..e2f147d17178 100644 --- a/drivers/gpu/drm/amd/display/dc/mpc/dcn30/dcn30_mpc.h +++ b/drivers/gpu/drm/amd/display/dc/mpc/dcn30/dcn30_mpc.h @@ -1096,6 +1096,11 @@ void mpc3_power_on_ogam_lut( struct mpc *mpc, int mpcc_id, bool power_on); +void mpc3_read_reg_state( + struct mpc *mpc, + int mpcc_inst, + struct dcn_mpc_reg_state *mpc_reg_state); + void mpc3_init_mpcc(struct mpcc *mpcc, int mpcc_inst); enum dc_lut_mode mpc3_get_ogam_current( diff --git a/drivers/gpu/drm/amd/display/dc/mpc/dcn32/dcn32_mpc.c b/drivers/gpu/drm/amd/display/dc/mpc/dcn32/dcn32_mpc.c index 6f0e017a8ae2..83bbbf34bcac 100644 --- a/drivers/gpu/drm/amd/display/dc/mpc/dcn32/dcn32_mpc.c +++ b/drivers/gpu/drm/amd/display/dc/mpc/dcn32/dcn32_mpc.c @@ -1020,6 +1020,7 @@ static const struct mpc_funcs dcn32_mpc_funcs = { .release_rmu = NULL, .power_on_mpc_mem_pwr = mpc3_power_on_ogam_lut, .get_mpc_out_mux = mpc1_get_mpc_out_mux, + .mpc_read_reg_state = mpc3_read_reg_state, .set_bg_color = mpc1_set_bg_color, }; diff --git a/drivers/gpu/drm/amd/display/dc/mpc/dcn401/dcn401_mpc.c b/drivers/gpu/drm/amd/display/dc/mpc/dcn401/dcn401_mpc.c index e1a0308dee57..eeac13fdd6f5 100644 --- a/drivers/gpu/drm/amd/display/dc/mpc/dcn401/dcn401_mpc.c +++ b/drivers/gpu/drm/amd/display/dc/mpc/dcn401/dcn401_mpc.c @@ -598,6 +598,7 @@ static const struct mpc_funcs dcn401_mpc_funcs = { .release_rmu = NULL, .power_on_mpc_mem_pwr = mpc3_power_on_ogam_lut, .get_mpc_out_mux = mpc1_get_mpc_out_mux, + .mpc_read_reg_state = mpc3_read_reg_state, .set_bg_color = mpc1_set_bg_color, .set_movable_cm_location = mpc401_set_movable_cm_location, .update_3dlut_fast_load_select = mpc401_update_3dlut_fast_load_select, diff --git a/drivers/gpu/drm/amd/display/dc/opp/dcn10/dcn10_opp.c b/drivers/gpu/drm/amd/display/dc/opp/dcn10/dcn10_opp.c index 71e9288d60ed..45d418636d0c 100644 --- a/drivers/gpu/drm/amd/display/dc/opp/dcn10/dcn10_opp.c +++ b/drivers/gpu/drm/amd/display/dc/opp/dcn10/dcn10_opp.c @@ -372,6 +372,17 @@ void opp1_pipe_clock_control(struct output_pixel_processor *opp, bool enable) REG_UPDATE(OPP_PIPE_CONTROL, OPP_PIPE_CLOCK_EN, regval); } + +void opp1_read_reg_state(struct output_pixel_processor *opp, struct dcn_opp_reg_state *opp_reg_state) +{ + struct dcn10_opp *oppn10 = TO_DCN10_OPP(opp); + + opp_reg_state->fmt_control = REG_READ(FMT_CONTROL); + opp_reg_state->opp_pipe_control = REG_READ(OPP_PIPE_CONTROL); + opp_reg_state->opp_pipe_crc_control = REG_READ(OPP_PIPE_CRC_CONTROL); + opp_reg_state->oppbuf_control = REG_READ(OPPBUF_CONTROL); +} + /*****************************************/ /* Constructor, Destructor */ /*****************************************/ @@ -392,7 +403,8 @@ static const struct opp_funcs dcn10_opp_funcs = { .opp_program_dpg_dimensions = NULL, .dpg_is_blanked = NULL, .dpg_is_pending = NULL, - .opp_destroy = opp1_destroy + .opp_destroy = opp1_destroy, + .opp_read_reg_state = opp1_read_reg_state }; void dcn10_opp_construct(struct dcn10_opp *oppn10, diff --git a/drivers/gpu/drm/amd/display/dc/opp/dcn10/dcn10_opp.h b/drivers/gpu/drm/amd/display/dc/opp/dcn10/dcn10_opp.h index c87de68a509e..38d0d530a9b7 100644 --- a/drivers/gpu/drm/amd/display/dc/opp/dcn10/dcn10_opp.h +++ b/drivers/gpu/drm/amd/display/dc/opp/dcn10/dcn10_opp.h @@ -63,7 +63,8 @@ uint32_t OPPBUF_CONTROL1; \ uint32_t OPPBUF_3D_PARAMETERS_0; \ uint32_t OPPBUF_3D_PARAMETERS_1; \ - uint32_t OPP_PIPE_CONTROL + uint32_t OPP_PIPE_CONTROL; \ + uint32_t OPP_PIPE_CRC_CONTROL #define OPP_MASK_SH_LIST_DCN(mask_sh) \ OPP_SF(FMT0_FMT_BIT_DEPTH_CONTROL, FMT_TRUNCATE_EN, mask_sh), \ @@ -153,7 +154,6 @@ struct dcn10_opp { const struct dcn10_opp_registers *regs; const struct dcn10_opp_shift *opp_shift; const struct dcn10_opp_mask *opp_mask; - bool is_write_to_ram_a_safe; }; @@ -188,4 +188,6 @@ void opp1_pipe_clock_control(struct output_pixel_processor *opp, bool enable); void opp1_destroy(struct output_pixel_processor **opp); +void opp1_read_reg_state(struct output_pixel_processor *opp, struct dcn_opp_reg_state *opp_reg_state); + #endif diff --git a/drivers/gpu/drm/amd/display/dc/opp/dcn20/dcn20_opp.c b/drivers/gpu/drm/amd/display/dc/opp/dcn20/dcn20_opp.c index f5fe0cac7cb0..ce826a5be4c7 100644 --- a/drivers/gpu/drm/amd/display/dc/opp/dcn20/dcn20_opp.c +++ b/drivers/gpu/drm/amd/display/dc/opp/dcn20/dcn20_opp.c @@ -377,6 +377,18 @@ uint32_t opp2_get_left_edge_extra_pixel_count(struct output_pixel_processor *opp return 0; } +void opp2_read_reg_state(struct output_pixel_processor *opp, struct dcn_opp_reg_state *opp_reg_state) +{ + struct dcn20_opp *oppn20 = TO_DCN20_OPP(opp); + + opp_reg_state->dpg_control = REG_READ(DPG_CONTROL); + opp_reg_state->fmt_control = REG_READ(FMT_CONTROL); + opp_reg_state->opp_pipe_control = REG_READ(OPP_PIPE_CONTROL); + opp_reg_state->opp_pipe_crc_control = REG_READ(OPP_PIPE_CRC_CONTROL); + opp_reg_state->oppbuf_control = REG_READ(OPPBUF_CONTROL); + opp_reg_state->dscrm_dsc_forward_config = REG_READ(DSCRM_DSC_FORWARD_CONFIG); +} + /*****************************************/ /* Constructor, Destructor */ /*****************************************/ @@ -395,6 +407,7 @@ static struct opp_funcs dcn20_opp_funcs = { .opp_destroy = opp1_destroy, .opp_program_left_edge_extra_pixel = opp2_program_left_edge_extra_pixel, .opp_get_left_edge_extra_pixel_count = opp2_get_left_edge_extra_pixel_count, + .opp_read_reg_state = opp2_read_reg_state }; void dcn20_opp_construct(struct dcn20_opp *oppn20, diff --git a/drivers/gpu/drm/amd/display/dc/opp/dcn20/dcn20_opp.h b/drivers/gpu/drm/amd/display/dc/opp/dcn20/dcn20_opp.h index 34936e6c49f3..fb0c047c1788 100644 --- a/drivers/gpu/drm/amd/display/dc/opp/dcn20/dcn20_opp.h +++ b/drivers/gpu/drm/amd/display/dc/opp/dcn20/dcn20_opp.h @@ -59,7 +59,8 @@ uint32_t DPG_COLOUR_G_Y; \ uint32_t DPG_COLOUR_R_CR; \ uint32_t DPG_RAMP_CONTROL; \ - uint32_t DPG_STATUS + uint32_t DPG_STATUS; \ + uint32_t DSCRM_DSC_FORWARD_CONFIG #define OPP_DPG_MASK_SH_LIST(mask_sh) \ OPP_SF(DPG0_DPG_CONTROL, DPG_EN, mask_sh), \ @@ -171,4 +172,7 @@ void opp2_program_left_edge_extra_pixel ( uint32_t opp2_get_left_edge_extra_pixel_count(struct output_pixel_processor *opp, enum dc_pixel_encoding pixel_encoding, bool is_primary); + +void opp2_read_reg_state(struct output_pixel_processor *opp, struct dcn_opp_reg_state *opp_reg_state); + #endif diff --git a/drivers/gpu/drm/amd/display/dc/opp/dcn35/dcn35_opp.c b/drivers/gpu/drm/amd/display/dc/opp/dcn35/dcn35_opp.c index 3542b51c9aac..e11c4e16402f 100644 --- a/drivers/gpu/drm/amd/display/dc/opp/dcn35/dcn35_opp.c +++ b/drivers/gpu/drm/amd/display/dc/opp/dcn35/dcn35_opp.c @@ -51,3 +51,16 @@ void dcn35_opp_set_fgcg(struct dcn20_opp *oppn20, bool enable) { REG_UPDATE(OPP_TOP_CLK_CONTROL, OPP_FGCG_REP_DIS, !enable); } + +void dcn35_opp_read_reg_state(struct output_pixel_processor *opp, struct dcn_opp_reg_state *opp_reg_state) +{ + struct dcn20_opp *oppn20 = TO_DCN20_OPP(opp); + + opp_reg_state->dpg_control = REG_READ(DPG_CONTROL); + opp_reg_state->fmt_control = REG_READ(FMT_CONTROL); + opp_reg_state->opp_abm_control = REG_READ(OPP_ABM_CONTROL); + opp_reg_state->opp_pipe_control = REG_READ(OPP_PIPE_CONTROL); + opp_reg_state->opp_pipe_crc_control = REG_READ(OPP_PIPE_CRC_CONTROL); + opp_reg_state->oppbuf_control = REG_READ(OPPBUF_CONTROL); + opp_reg_state->dscrm_dsc_forward_config = REG_READ(DSCRM_DSC_FORWARD_CONFIG); +} diff --git a/drivers/gpu/drm/amd/display/dc/opp/dcn35/dcn35_opp.h b/drivers/gpu/drm/amd/display/dc/opp/dcn35/dcn35_opp.h index a9a413527801..c6cace90e8f2 100644 --- a/drivers/gpu/drm/amd/display/dc/opp/dcn35/dcn35_opp.h +++ b/drivers/gpu/drm/amd/display/dc/opp/dcn35/dcn35_opp.h @@ -31,7 +31,8 @@ #define OPP_REG_VARIABLE_LIST_DCN3_5 \ OPP_REG_VARIABLE_LIST_DCN2_0; \ - uint32_t OPP_TOP_CLK_CONTROL + uint32_t OPP_TOP_CLK_CONTROL; \ + uint32_t OPP_ABM_CONTROL #define OPP_MASK_SH_LIST_DCN35(mask_sh) \ OPP_MASK_SH_LIST_DCN20(mask_sh), \ @@ -64,4 +65,5 @@ void dcn35_opp_construct(struct dcn20_opp *oppn20, void dcn35_opp_set_fgcg(struct dcn20_opp *oppn20, bool enable); +void dcn35_opp_read_reg_state(struct output_pixel_processor *opp, struct dcn_opp_reg_state *opp_reg_state); #endif diff --git a/drivers/gpu/drm/amd/display/dc/optc/dcn10/dcn10_optc.h b/drivers/gpu/drm/amd/display/dc/optc/dcn10/dcn10_optc.h index 8b2a8455eb56..803bcc25601c 100644 --- a/drivers/gpu/drm/amd/display/dc/optc/dcn10/dcn10_optc.h +++ b/drivers/gpu/drm/amd/display/dc/optc/dcn10/dcn10_optc.h @@ -209,7 +209,43 @@ uint32_t OPTC_WIDTH_CONTROL2; \ uint32_t OTG_PSTATE_REGISTER; \ uint32_t OTG_PIPE_UPDATE_STATUS; \ - uint32_t INTERRUPT_DEST + uint32_t INTERRUPT_DEST; \ + uint32_t OPTC_INPUT_SPARE_REGISTER; \ + uint32_t OPTC_RSMU_UNDERFLOW; \ + uint32_t OPTC_UNDERFLOW_THRESHOLD; \ + uint32_t OTG_COUNT_CONTROL; \ + uint32_t OTG_COUNT_RESET; \ + uint32_t OTG_CRC_SIG_BLUE_CONTROL_MASK; \ + uint32_t OTG_CRC_SIG_RED_GREEN_MASK; \ + uint32_t OTG_DLPC_CONTROL; \ + uint32_t OTG_DRR_CONTROL2; \ + uint32_t OTG_DRR_TIMING_INT_STATUS; \ + uint32_t OTG_GLOBAL_CONTROL3; \ + uint32_t OTG_GLOBAL_SYNC_STATUS; \ + uint32_t OTG_GSL_VSYNC_GAP; \ + uint32_t OTG_INTERLACE_STATUS; \ + uint32_t OTG_INTERRUPT_CONTROL; \ + uint32_t OTG_LONG_VBLANK_STATUS; \ + uint32_t OTG_MANUAL_FORCE_VSYNC_NEXT_LINE; \ + uint32_t OTG_MASTER_EN; \ + uint32_t OTG_PIXEL_DATA_READBACK0; \ + uint32_t OTG_PIXEL_DATA_READBACK1; \ + uint32_t OTG_REQUEST_CONTROL; \ + uint32_t OTG_SNAPSHOT_CONTROL; \ + uint32_t OTG_SNAPSHOT_FRAME; \ + uint32_t OTG_SNAPSHOT_POSITION; \ + uint32_t OTG_SNAPSHOT_STATUS; \ + uint32_t OTG_SPARE_REGISTER; \ + uint32_t OTG_STATUS_HV_COUNT; \ + uint32_t OTG_STATUS_VF_COUNT; \ + uint32_t OTG_STEREO_FORCE_NEXT_EYE; \ + uint32_t OTG_TRIG_MANUAL_CONTROL; \ + uint32_t OTG_TRIGB_CNTL; \ + uint32_t OTG_TRIGB_MANUAL_TRIG; \ + uint32_t OTG_UPDATE_LOCK; \ + uint32_t OTG_V_TOTAL_INT_STATUS; \ + uint32_t OTG_VSYNC_NOM_INT_STATUS + struct dcn_optc_registers { OPTC_REG_VARIABLE_LIST_DCN; diff --git a/drivers/gpu/drm/amd/display/dc/optc/dcn31/dcn31_optc.c b/drivers/gpu/drm/amd/display/dc/optc/dcn31/dcn31_optc.c index 4f1830ba619f..c6417538090f 100644 --- a/drivers/gpu/drm/amd/display/dc/optc/dcn31/dcn31_optc.c +++ b/drivers/gpu/drm/amd/display/dc/optc/dcn31/dcn31_optc.c @@ -315,6 +315,136 @@ void optc31_read_otg_state(struct timing_generator *optc, s->otg_double_buffer_control = REG_READ(OTG_DOUBLE_BUFFER_CONTROL); } +void optc31_read_reg_state(struct timing_generator *optc, struct dcn_optc_reg_state *optc_reg_state) +{ + struct optc *optc1 = DCN10TG_FROM_TG(optc); + + optc_reg_state->optc_bytes_per_pixel = REG_READ(OPTC_BYTES_PER_PIXEL); + optc_reg_state->optc_data_format_control = REG_READ(OPTC_DATA_FORMAT_CONTROL); + optc_reg_state->optc_data_source_select = REG_READ(OPTC_DATA_SOURCE_SELECT); + optc_reg_state->optc_input_clock_control = REG_READ(OPTC_INPUT_CLOCK_CONTROL); + optc_reg_state->optc_input_global_control = REG_READ(OPTC_INPUT_GLOBAL_CONTROL); + optc_reg_state->optc_input_spare_register = REG_READ(OPTC_INPUT_SPARE_REGISTER); + optc_reg_state->optc_memory_config = REG_READ(OPTC_MEMORY_CONFIG); + optc_reg_state->optc_rsmu_underflow = REG_READ(OPTC_RSMU_UNDERFLOW); + optc_reg_state->optc_underflow_threshold = REG_READ(OPTC_UNDERFLOW_THRESHOLD); + optc_reg_state->optc_width_control = REG_READ(OPTC_WIDTH_CONTROL); + optc_reg_state->otg_3d_structure_control = REG_READ(OTG_3D_STRUCTURE_CONTROL); + optc_reg_state->otg_clock_control = REG_READ(OTG_CLOCK_CONTROL); + optc_reg_state->otg_control = REG_READ(OTG_CONTROL); + optc_reg_state->otg_count_control = REG_READ(OTG_COUNT_CONTROL); + optc_reg_state->otg_count_reset = REG_READ(OTG_COUNT_RESET); + optc_reg_state->otg_crc_cntl = REG_READ(OTG_CRC_CNTL); + optc_reg_state->otg_crc_sig_blue_control_mask = REG_READ(OTG_CRC_SIG_BLUE_CONTROL_MASK); + optc_reg_state->otg_crc_sig_red_green_mask = REG_READ(OTG_CRC_SIG_RED_GREEN_MASK); + optc_reg_state->otg_crc0_data_b = REG_READ(OTG_CRC0_DATA_B); + optc_reg_state->otg_crc0_data_rg = REG_READ(OTG_CRC0_DATA_RG); + optc_reg_state->otg_crc0_windowa_x_control = REG_READ(OTG_CRC0_WINDOWA_X_CONTROL); + optc_reg_state->otg_crc0_windowa_x_control_readback = REG_READ(OTG_CRC0_WINDOWA_X_CONTROL_READBACK); + optc_reg_state->otg_crc0_windowa_y_control = REG_READ(OTG_CRC0_WINDOWA_Y_CONTROL); + optc_reg_state->otg_crc0_windowa_y_control_readback = REG_READ(OTG_CRC0_WINDOWA_Y_CONTROL_READBACK); + optc_reg_state->otg_crc0_windowb_x_control = REG_READ(OTG_CRC0_WINDOWB_X_CONTROL); + optc_reg_state->otg_crc0_windowb_x_control_readback = REG_READ(OTG_CRC0_WINDOWB_X_CONTROL_READBACK); + optc_reg_state->otg_crc0_windowb_y_control = REG_READ(OTG_CRC0_WINDOWB_Y_CONTROL); + optc_reg_state->otg_crc0_windowb_y_control_readback = REG_READ(OTG_CRC0_WINDOWB_Y_CONTROL_READBACK); + optc_reg_state->otg_crc1_data_b = REG_READ(OTG_CRC1_DATA_B); + optc_reg_state->otg_crc1_data_rg = REG_READ(OTG_CRC1_DATA_RG); + optc_reg_state->otg_crc1_windowa_x_control = REG_READ(OTG_CRC1_WINDOWA_X_CONTROL); + optc_reg_state->otg_crc1_windowa_x_control_readback = REG_READ(OTG_CRC1_WINDOWA_X_CONTROL_READBACK); + optc_reg_state->otg_crc1_windowa_y_control = REG_READ(OTG_CRC1_WINDOWA_Y_CONTROL); + optc_reg_state->otg_crc1_windowa_y_control_readback = REG_READ(OTG_CRC1_WINDOWA_Y_CONTROL_READBACK); + optc_reg_state->otg_crc1_windowb_x_control = REG_READ(OTG_CRC1_WINDOWB_X_CONTROL); + optc_reg_state->otg_crc1_windowb_x_control_readback = REG_READ(OTG_CRC1_WINDOWB_X_CONTROL_READBACK); + optc_reg_state->otg_crc1_windowb_y_control = REG_READ(OTG_CRC1_WINDOWB_Y_CONTROL); + optc_reg_state->otg_crc1_windowb_y_control_readback = REG_READ(OTG_CRC1_WINDOWB_Y_CONTROL_READBACK); + optc_reg_state->otg_crc2_data_b = REG_READ(OTG_CRC2_DATA_B); + optc_reg_state->otg_crc2_data_rg = REG_READ(OTG_CRC2_DATA_RG); + optc_reg_state->otg_crc3_data_b = REG_READ(OTG_CRC3_DATA_B); + optc_reg_state->otg_crc3_data_rg = REG_READ(OTG_CRC3_DATA_RG); + optc_reg_state->otg_dlpc_control = REG_READ(OTG_DLPC_CONTROL); + optc_reg_state->otg_double_buffer_control = REG_READ(OTG_DOUBLE_BUFFER_CONTROL); + optc_reg_state->otg_drr_control2 = REG_READ(OTG_DRR_CONTROL2); + optc_reg_state->otg_drr_control = REG_READ(OTG_DRR_CONTROL); + optc_reg_state->otg_drr_timing_int_status = REG_READ(OTG_DRR_TIMING_INT_STATUS); + optc_reg_state->otg_drr_trigger_window = REG_READ(OTG_DRR_TRIGGER_WINDOW); + optc_reg_state->otg_drr_v_total_change = REG_READ(OTG_DRR_V_TOTAL_CHANGE); + optc_reg_state->otg_dsc_start_position = REG_READ(OTG_DSC_START_POSITION); + optc_reg_state->otg_force_count_now_cntl = REG_READ(OTG_FORCE_COUNT_NOW_CNTL); + optc_reg_state->otg_global_control0 = REG_READ(OTG_GLOBAL_CONTROL0); + optc_reg_state->otg_global_control1 = REG_READ(OTG_GLOBAL_CONTROL1); + optc_reg_state->otg_global_control2 = REG_READ(OTG_GLOBAL_CONTROL2); + optc_reg_state->otg_global_control3 = REG_READ(OTG_GLOBAL_CONTROL3); + optc_reg_state->otg_global_control4 = REG_READ(OTG_GLOBAL_CONTROL4); + optc_reg_state->otg_global_sync_status = REG_READ(OTG_GLOBAL_SYNC_STATUS); + optc_reg_state->otg_gsl_control = REG_READ(OTG_GSL_CONTROL); + optc_reg_state->otg_gsl_vsync_gap = REG_READ(OTG_GSL_VSYNC_GAP); + optc_reg_state->otg_gsl_window_x = REG_READ(OTG_GSL_WINDOW_X); + optc_reg_state->otg_gsl_window_y = REG_READ(OTG_GSL_WINDOW_Y); + optc_reg_state->otg_h_blank_start_end = REG_READ(OTG_H_BLANK_START_END); + optc_reg_state->otg_h_sync_a = REG_READ(OTG_H_SYNC_A); + optc_reg_state->otg_h_sync_a_cntl = REG_READ(OTG_H_SYNC_A_CNTL); + optc_reg_state->otg_h_timing_cntl = REG_READ(OTG_H_TIMING_CNTL); + optc_reg_state->otg_h_total = REG_READ(OTG_H_TOTAL); + optc_reg_state->otg_interlace_control = REG_READ(OTG_INTERLACE_CONTROL); + optc_reg_state->otg_interlace_status = REG_READ(OTG_INTERLACE_STATUS); + optc_reg_state->otg_interrupt_control = REG_READ(OTG_INTERRUPT_CONTROL); + optc_reg_state->otg_long_vblank_status = REG_READ(OTG_LONG_VBLANK_STATUS); + optc_reg_state->otg_m_const_dto0 = REG_READ(OTG_M_CONST_DTO0); + optc_reg_state->otg_m_const_dto1 = REG_READ(OTG_M_CONST_DTO1); + optc_reg_state->otg_manual_force_vsync_next_line = REG_READ(OTG_MANUAL_FORCE_VSYNC_NEXT_LINE); + optc_reg_state->otg_master_en = REG_READ(OTG_MASTER_EN); + optc_reg_state->otg_master_update_lock = REG_READ(OTG_MASTER_UPDATE_LOCK); + optc_reg_state->otg_master_update_mode = REG_READ(OTG_MASTER_UPDATE_MODE); + optc_reg_state->otg_nom_vert_position = REG_READ(OTG_NOM_VERT_POSITION); + optc_reg_state->otg_pipe_update_status = REG_READ(OTG_PIPE_UPDATE_STATUS); + optc_reg_state->otg_pixel_data_readback0 = REG_READ(OTG_PIXEL_DATA_READBACK0); + optc_reg_state->otg_pixel_data_readback1 = REG_READ(OTG_PIXEL_DATA_READBACK1); + optc_reg_state->otg_request_control = REG_READ(OTG_REQUEST_CONTROL); + optc_reg_state->otg_snapshot_control = REG_READ(OTG_SNAPSHOT_CONTROL); + optc_reg_state->otg_snapshot_frame = REG_READ(OTG_SNAPSHOT_FRAME); + optc_reg_state->otg_snapshot_position = REG_READ(OTG_SNAPSHOT_POSITION); + optc_reg_state->otg_snapshot_status = REG_READ(OTG_SNAPSHOT_STATUS); + optc_reg_state->otg_spare_register = REG_READ(OTG_SPARE_REGISTER); + optc_reg_state->otg_static_screen_control = REG_READ(OTG_STATIC_SCREEN_CONTROL); + optc_reg_state->otg_status = REG_READ(OTG_STATUS); + optc_reg_state->otg_status_frame_count = REG_READ(OTG_STATUS_FRAME_COUNT); + optc_reg_state->otg_status_hv_count = REG_READ(OTG_STATUS_HV_COUNT); + optc_reg_state->otg_status_position = REG_READ(OTG_STATUS_POSITION); + optc_reg_state->otg_status_vf_count = REG_READ(OTG_STATUS_VF_COUNT); + optc_reg_state->otg_stereo_control = REG_READ(OTG_STEREO_CONTROL); + optc_reg_state->otg_stereo_force_next_eye = REG_READ(OTG_STEREO_FORCE_NEXT_EYE); + optc_reg_state->otg_stereo_status = REG_READ(OTG_STEREO_STATUS); + optc_reg_state->otg_trig_manual_control = REG_READ(OTG_TRIG_MANUAL_CONTROL); + optc_reg_state->otg_triga_cntl = REG_READ(OTG_TRIGA_CNTL); + optc_reg_state->otg_triga_manual_trig = REG_READ(OTG_TRIGA_MANUAL_TRIG); + optc_reg_state->otg_trigb_cntl = REG_READ(OTG_TRIGB_CNTL); + optc_reg_state->otg_trigb_manual_trig = REG_READ(OTG_TRIGB_MANUAL_TRIG); + optc_reg_state->otg_update_lock = REG_READ(OTG_UPDATE_LOCK); + optc_reg_state->otg_v_blank_start_end = REG_READ(OTG_V_BLANK_START_END); + optc_reg_state->otg_v_count_stop_control = REG_READ(OTG_V_COUNT_STOP_CONTROL); + optc_reg_state->otg_v_count_stop_control2 = REG_READ(OTG_V_COUNT_STOP_CONTROL2); + optc_reg_state->otg_v_sync_a = REG_READ(OTG_V_SYNC_A); + optc_reg_state->otg_v_sync_a_cntl = REG_READ(OTG_V_SYNC_A_CNTL); + optc_reg_state->otg_v_total = REG_READ(OTG_V_TOTAL); + optc_reg_state->otg_v_total_control = REG_READ(OTG_V_TOTAL_CONTROL); + optc_reg_state->otg_v_total_int_status = REG_READ(OTG_V_TOTAL_INT_STATUS); + optc_reg_state->otg_v_total_max = REG_READ(OTG_V_TOTAL_MAX); + optc_reg_state->otg_v_total_mid = REG_READ(OTG_V_TOTAL_MID); + optc_reg_state->otg_v_total_min = REG_READ(OTG_V_TOTAL_MIN); + optc_reg_state->otg_vert_sync_control = REG_READ(OTG_VERT_SYNC_CONTROL); + optc_reg_state->otg_vertical_interrupt0_control = REG_READ(OTG_VERTICAL_INTERRUPT0_CONTROL); + optc_reg_state->otg_vertical_interrupt0_position = REG_READ(OTG_VERTICAL_INTERRUPT0_POSITION); + optc_reg_state->otg_vertical_interrupt1_control = REG_READ(OTG_VERTICAL_INTERRUPT1_CONTROL); + optc_reg_state->otg_vertical_interrupt1_position = REG_READ(OTG_VERTICAL_INTERRUPT1_POSITION); + optc_reg_state->otg_vertical_interrupt2_control = REG_READ(OTG_VERTICAL_INTERRUPT2_CONTROL); + optc_reg_state->otg_vertical_interrupt2_position = REG_READ(OTG_VERTICAL_INTERRUPT2_POSITION); + optc_reg_state->otg_vready_param = REG_READ(OTG_VREADY_PARAM); + optc_reg_state->otg_vstartup_param = REG_READ(OTG_VSTARTUP_PARAM); + optc_reg_state->otg_vsync_nom_int_status = REG_READ(OTG_VSYNC_NOM_INT_STATUS); + optc_reg_state->otg_vupdate_keepout = REG_READ(OTG_VUPDATE_KEEPOUT); + optc_reg_state->otg_vupdate_param = REG_READ(OTG_VUPDATE_PARAM); +} + static const struct timing_generator_funcs dcn31_tg_funcs = { .validate_timing = optc1_validate_timing, .program_timing = optc1_program_timing, @@ -377,6 +507,7 @@ static const struct timing_generator_funcs dcn31_tg_funcs = { .init_odm = optc3_init_odm, .is_two_pixels_per_container = optc1_is_two_pixels_per_container, .read_otg_state = optc31_read_otg_state, + .optc_read_reg_state = optc31_read_reg_state, }; void dcn31_timing_generator_init(struct optc *optc1) diff --git a/drivers/gpu/drm/amd/display/dc/optc/dcn31/dcn31_optc.h b/drivers/gpu/drm/amd/display/dc/optc/dcn31/dcn31_optc.h index 0f72c274f40b..98f7d2e299c5 100644 --- a/drivers/gpu/drm/amd/display/dc/optc/dcn31/dcn31_optc.h +++ b/drivers/gpu/drm/amd/display/dc/optc/dcn31/dcn31_optc.h @@ -274,4 +274,6 @@ void optc3_init_odm(struct timing_generator *optc); void optc31_read_otg_state(struct timing_generator *optc, struct dcn_otg_state *s); +void optc31_read_reg_state(struct timing_generator *optc, struct dcn_optc_reg_state *optc_reg_state); + #endif /* __DC_OPTC_DCN31_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/optc/dcn314/dcn314_optc.c b/drivers/gpu/drm/amd/display/dc/optc/dcn314/dcn314_optc.c index 4a2caca37255..43ff957288b2 100644 --- a/drivers/gpu/drm/amd/display/dc/optc/dcn314/dcn314_optc.c +++ b/drivers/gpu/drm/amd/display/dc/optc/dcn314/dcn314_optc.c @@ -256,6 +256,7 @@ static const struct timing_generator_funcs dcn314_tg_funcs = { .set_h_timing_div_manual_mode = optc314_set_h_timing_div_manual_mode, .is_two_pixels_per_container = optc1_is_two_pixels_per_container, .read_otg_state = optc31_read_otg_state, + .optc_read_reg_state = optc31_read_reg_state, }; void dcn314_timing_generator_init(struct optc *optc1) diff --git a/drivers/gpu/drm/amd/display/dc/optc/dcn32/dcn32_optc.c b/drivers/gpu/drm/amd/display/dc/optc/dcn32/dcn32_optc.c index b2b226bcd871..3dcb0d0c931c 100644 --- a/drivers/gpu/drm/amd/display/dc/optc/dcn32/dcn32_optc.c +++ b/drivers/gpu/drm/amd/display/dc/optc/dcn32/dcn32_optc.c @@ -365,6 +365,7 @@ static const struct timing_generator_funcs dcn32_tg_funcs = { .get_otg_double_buffer_pending = optc3_get_otg_update_pending, .get_pipe_update_pending = optc3_get_pipe_update_pending, .read_otg_state = optc31_read_otg_state, + .optc_read_reg_state = optc31_read_reg_state, }; void dcn32_timing_generator_init(struct optc *optc1) diff --git a/drivers/gpu/drm/amd/display/dc/optc/dcn35/dcn35_optc.c b/drivers/gpu/drm/amd/display/dc/optc/dcn35/dcn35_optc.c index 52d5ea98c86b..f699e95059f3 100644 --- a/drivers/gpu/drm/amd/display/dc/optc/dcn35/dcn35_optc.c +++ b/drivers/gpu/drm/amd/display/dc/optc/dcn35/dcn35_optc.c @@ -511,6 +511,7 @@ static const struct timing_generator_funcs dcn35_tg_funcs = { .set_long_vtotal = optc35_set_long_vtotal, .is_two_pixels_per_container = optc1_is_two_pixels_per_container, .read_otg_state = optc31_read_otg_state, + .optc_read_reg_state = optc31_read_reg_state, }; void dcn35_timing_generator_init(struct optc *optc1) diff --git a/drivers/gpu/drm/amd/display/dc/optc/dcn401/dcn401_optc.c b/drivers/gpu/drm/amd/display/dc/optc/dcn401/dcn401_optc.c index 5af13706e601..a8e978d1fae8 100644 --- a/drivers/gpu/drm/amd/display/dc/optc/dcn401/dcn401_optc.c +++ b/drivers/gpu/drm/amd/display/dc/optc/dcn401/dcn401_optc.c @@ -533,6 +533,7 @@ static const struct timing_generator_funcs dcn401_tg_funcs = { .set_vupdate_keepout = optc401_set_vupdate_keepout, .wait_update_lock_status = optc401_wait_update_lock_status, .read_otg_state = optc31_read_otg_state, + .optc_read_reg_state = optc31_read_reg_state, }; void dcn401_timing_generator_init(struct optc *optc1) diff --git a/drivers/gpu/drm/amd/display/dc/resource/dce100/dce100_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dce100/dce100_resource.c index c4b4dc3ad8c9..d40d91ec2035 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dce100/dce100_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dce100/dce100_resource.c @@ -78,6 +78,7 @@ #endif #ifndef mmBIOS_SCRATCH_2 + #define mmBIOS_SCRATCH_0 0x05C9 #define mmBIOS_SCRATCH_2 0x05CB #define mmBIOS_SCRATCH_3 0x05CC #define mmBIOS_SCRATCH_6 0x05CF @@ -225,6 +226,7 @@ static const struct dce110_link_enc_registers link_enc_regs[] = { link_regs(4), link_regs(5), link_regs(6), + { .DAC_ENABLE = mmDAC_ENABLE }, }; #define stream_enc_regs(id)\ @@ -368,6 +370,7 @@ static const struct dce_abm_mask abm_mask = { #define DCFE_MEM_PWR_CTRL_REG_BASE 0x1b03 static const struct bios_registers bios_regs = { + .BIOS_SCRATCH_0 = mmBIOS_SCRATCH_0, .BIOS_SCRATCH_3 = mmBIOS_SCRATCH_3, .BIOS_SCRATCH_6 = mmBIOS_SCRATCH_6 }; @@ -375,6 +378,7 @@ static const struct bios_registers bios_regs = { static const struct resource_caps res_cap = { .num_timing_generator = 6, .num_audio = 6, + .num_analog_stream_encoder = 1, .num_stream_encoder = 6, .num_pll = 3, .num_ddc = 6, @@ -402,8 +406,10 @@ static const struct dc_plane_cap plane_cap = { } }; -static const struct dc_debug_options debug_defaults = { - .enable_legacy_fast_update = true, +static const struct dc_debug_options debug_defaults = { 0 }; + +static const struct dc_check_config config_defaults = { + .enable_legacy_fast_update = true, }; #define CTX ctx @@ -484,6 +490,11 @@ static struct stream_encoder *dce100_stream_encoder_create( if (!enc110) return NULL; + if (eng_id == ENGINE_ID_DACA || eng_id == ENGINE_ID_DACB) { + dce110_analog_stream_encoder_construct(enc110, ctx, ctx->dc_bios, eng_id); + return &enc110->base; + } + dce110_stream_encoder_construct(enc110, ctx, ctx->dc_bios, eng_id, &stream_enc_regs[eng_id], &se_shift, &se_mask); return &enc110->base; @@ -624,7 +635,20 @@ static struct link_encoder *dce100_link_encoder_create( kzalloc(sizeof(struct dce110_link_encoder), GFP_KERNEL); int link_regs_id; - if (!enc110 || enc_init_data->hpd_source >= ARRAY_SIZE(link_enc_hpd_regs)) + if (!enc110) + return NULL; + + if (enc_init_data->connector.id == CONNECTOR_ID_VGA) { + dce110_link_encoder_construct(enc110, + enc_init_data, + &link_enc_feature, + &link_enc_regs[ENGINE_ID_DACA], + NULL, + NULL); + return &enc110->base; + } + + if (enc_init_data->hpd_source >= ARRAY_SIZE(link_enc_hpd_regs)) return NULL; link_regs_id = @@ -952,6 +976,10 @@ struct stream_encoder *dce100_find_first_free_match_stream_enc_for_link( int i; int j = -1; struct dc_link *link = stream->link; + enum engine_id preferred_engine = link->link_enc->preferred_engine; + + if (dc_is_rgb_signal(stream->signal)) + preferred_engine = link->link_enc->analog_engine; for (i = 0; i < pool->stream_enc_count; i++) { if (!res_ctx->is_stream_enc_acquired[i] && @@ -960,8 +988,7 @@ struct stream_encoder *dce100_find_first_free_match_stream_enc_for_link( * in daisy chain use case */ j = i; - if (pool->stream_enc[i]->id == - link->link_enc->preferred_engine) + if (pool->stream_enc[i]->id == preferred_engine) return pool->stream_enc[i]; } } @@ -1093,6 +1120,7 @@ static bool dce100_resource_construct( dc->caps.disable_dp_clk_share = true; dc->caps.extended_aux_timeout_support = false; dc->debug = debug_defaults; + dc->check_config = config_defaults; for (i = 0; i < pool->base.pipe_count; i++) { pool->base.timing_generators[i] = diff --git a/drivers/gpu/drm/amd/display/dc/resource/dce110/dce110_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dce110/dce110_resource.c index cccde5a6f3cd..cd54382c0af3 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dce110/dce110_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dce110/dce110_resource.c @@ -82,6 +82,7 @@ #endif #ifndef mmBIOS_SCRATCH_2 + #define mmBIOS_SCRATCH_0 0x05C9 #define mmBIOS_SCRATCH_2 0x05CB #define mmBIOS_SCRATCH_3 0x05CC #define mmBIOS_SCRATCH_6 0x05CF @@ -377,6 +378,7 @@ static const struct dce110_clk_src_mask cs_mask = { }; static const struct bios_registers bios_regs = { + .BIOS_SCRATCH_0 = mmBIOS_SCRATCH_0, .BIOS_SCRATCH_3 = mmBIOS_SCRATCH_3, .BIOS_SCRATCH_6 = mmBIOS_SCRATCH_6 }; @@ -424,7 +426,9 @@ static const struct dc_plane_cap plane_cap = { 64 }; -static const struct dc_debug_options debug_defaults = { +static const struct dc_debug_options debug_defaults = { 0 }; + +static const struct dc_check_config config_defaults = { .enable_legacy_fast_update = true, }; @@ -1376,6 +1380,7 @@ static bool dce110_resource_construct( dc->caps.is_apu = true; dc->caps.extended_aux_timeout_support = false; dc->debug = debug_defaults; + dc->check_config = config_defaults; /************************************************* * Create resources * diff --git a/drivers/gpu/drm/amd/display/dc/resource/dce112/dce112_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dce112/dce112_resource.c index 869a8e515fc0..3f0a6bc4dcc2 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dce112/dce112_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dce112/dce112_resource.c @@ -76,6 +76,7 @@ #endif #ifndef mmBIOS_SCRATCH_2 + #define mmBIOS_SCRATCH_0 0x05C9 #define mmBIOS_SCRATCH_2 0x05CB #define mmBIOS_SCRATCH_3 0x05CC #define mmBIOS_SCRATCH_6 0x05CF @@ -385,6 +386,7 @@ static const struct dce110_clk_src_mask cs_mask = { }; static const struct bios_registers bios_regs = { + .BIOS_SCRATCH_0 = mmBIOS_SCRATCH_0, .BIOS_SCRATCH_3 = mmBIOS_SCRATCH_3, .BIOS_SCRATCH_6 = mmBIOS_SCRATCH_6 }; @@ -429,8 +431,10 @@ static const struct dc_plane_cap plane_cap = { 64 }; -static const struct dc_debug_options debug_defaults = { - .enable_legacy_fast_update = true, +static const struct dc_debug_options debug_defaults = { 0 }; + +static const struct dc_check_config config_defaults = { + .enable_legacy_fast_update = true, }; #define CTX ctx @@ -1247,6 +1251,7 @@ static bool dce112_resource_construct( dc->caps.dual_link_dvi = true; dc->caps.extended_aux_timeout_support = false; dc->debug = debug_defaults; + dc->check_config = config_defaults; /************************************************* * Create resources * diff --git a/drivers/gpu/drm/amd/display/dc/resource/dce120/dce120_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dce120/dce120_resource.c index 540e04ec1e2d..b1570b6b1af3 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dce120/dce120_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dce120/dce120_resource.c @@ -491,6 +491,7 @@ static struct dce_i2c_hw *dce120_i2c_hw_create( return dce_i2c_hw; } static const struct bios_registers bios_regs = { + .BIOS_SCRATCH_0 = mmBIOS_SCRATCH_0 + NBIO_BASE(mmBIOS_SCRATCH_0_BASE_IDX), .BIOS_SCRATCH_3 = mmBIOS_SCRATCH_3 + NBIO_BASE(mmBIOS_SCRATCH_3_BASE_IDX), .BIOS_SCRATCH_6 = mmBIOS_SCRATCH_6 + NBIO_BASE(mmBIOS_SCRATCH_6_BASE_IDX) }; @@ -526,8 +527,11 @@ static const struct dc_plane_cap plane_cap = { }; static const struct dc_debug_options debug_defaults = { - .disable_clock_gate = true, - .enable_legacy_fast_update = true, + .disable_clock_gate = true, +}; + +static const struct dc_check_config config_defaults = { + .enable_legacy_fast_update = true, }; static struct clock_source *dce120_clock_source_create( @@ -1089,6 +1093,7 @@ static bool dce120_resource_construct( dc->caps.psp_setup_panel_mode = true; dc->caps.extended_aux_timeout_support = false; dc->debug = debug_defaults; + dc->check_config = config_defaults; /************************************************* * Create resources * diff --git a/drivers/gpu/drm/amd/display/dc/resource/dce60/dce60_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dce60/dce60_resource.c index b75be6ad64f6..f0152933bee2 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dce60/dce60_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dce60/dce60_resource.c @@ -80,6 +80,7 @@ #ifndef mmBIOS_SCRATCH_2 + #define mmBIOS_SCRATCH_0 0x05C9 #define mmBIOS_SCRATCH_2 0x05CB #define mmBIOS_SCRATCH_3 0x05CC #define mmBIOS_SCRATCH_6 0x05CF @@ -240,7 +241,9 @@ static const struct dce110_link_enc_registers link_enc_regs[] = { link_regs(2), link_regs(3), link_regs(4), - link_regs(5) + link_regs(5), + {0}, + { .DAC_ENABLE = mmDAC_ENABLE }, }; #define stream_enc_regs(id)\ @@ -366,6 +369,7 @@ static const struct dce110_clk_src_mask cs_mask = { }; static const struct bios_registers bios_regs = { + .BIOS_SCRATCH_0 = mmBIOS_SCRATCH_0, .BIOS_SCRATCH_3 = mmBIOS_SCRATCH_3, .BIOS_SCRATCH_6 = mmBIOS_SCRATCH_6 }; @@ -373,6 +377,7 @@ static const struct bios_registers bios_regs = { static const struct resource_caps res_cap = { .num_timing_generator = 6, .num_audio = 6, + .num_analog_stream_encoder = 1, .num_stream_encoder = 6, .num_pll = 3, .num_ddc = 6, @@ -382,6 +387,7 @@ static const struct resource_caps res_cap_61 = { .num_timing_generator = 4, .num_audio = 6, .num_stream_encoder = 6, + .num_analog_stream_encoder = 1, .num_pll = 3, .num_ddc = 6, }; @@ -389,6 +395,7 @@ static const struct resource_caps res_cap_61 = { static const struct resource_caps res_cap_64 = { .num_timing_generator = 2, .num_audio = 2, + .num_analog_stream_encoder = 1, .num_stream_encoder = 2, .num_pll = 3, .num_ddc = 2, @@ -599,6 +606,11 @@ static struct stream_encoder *dce60_stream_encoder_create( if (!enc110) return NULL; + if (eng_id == ENGINE_ID_DACA || eng_id == ENGINE_ID_DACB) { + dce110_analog_stream_encoder_construct(enc110, ctx, ctx->dc_bios, eng_id); + return &enc110->base; + } + dce110_stream_encoder_construct(enc110, ctx, ctx->dc_bios, eng_id, &stream_enc_regs[eng_id], &se_shift, &se_mask); @@ -718,7 +730,20 @@ static struct link_encoder *dce60_link_encoder_create( kzalloc(sizeof(struct dce110_link_encoder), GFP_KERNEL); int link_regs_id; - if (!enc110 || enc_init_data->hpd_source >= ARRAY_SIZE(link_enc_hpd_regs)) + if (!enc110) + return NULL; + + if (enc_init_data->connector.id == CONNECTOR_ID_VGA) { + dce110_link_encoder_construct(enc110, + enc_init_data, + &link_enc_feature, + &link_enc_regs[ENGINE_ID_DACA], + NULL, + NULL); + return &enc110->base; + } + + if (enc_init_data->hpd_source >= ARRAY_SIZE(link_enc_hpd_regs)) return NULL; link_regs_id = diff --git a/drivers/gpu/drm/amd/display/dc/resource/dce80/dce80_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dce80/dce80_resource.c index 5b7769745202..8687104cabb7 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dce80/dce80_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dce80/dce80_resource.c @@ -78,6 +78,7 @@ #ifndef mmBIOS_SCRATCH_2 + #define mmBIOS_SCRATCH_0 0x05C9 #define mmBIOS_SCRATCH_2 0x05CB #define mmBIOS_SCRATCH_3 0x05CC #define mmBIOS_SCRATCH_6 0x05CF @@ -241,6 +242,7 @@ static const struct dce110_link_enc_registers link_enc_regs[] = { link_regs(4), link_regs(5), link_regs(6), + { .DAC_ENABLE = mmDAC_ENABLE }, }; #define stream_enc_regs(id)\ @@ -368,6 +370,7 @@ static const struct dce110_clk_src_mask cs_mask = { }; static const struct bios_registers bios_regs = { + .BIOS_SCRATCH_0 = mmBIOS_SCRATCH_0, .BIOS_SCRATCH_3 = mmBIOS_SCRATCH_3, .BIOS_SCRATCH_6 = mmBIOS_SCRATCH_6 }; @@ -375,6 +378,7 @@ static const struct bios_registers bios_regs = { static const struct resource_caps res_cap = { .num_timing_generator = 6, .num_audio = 6, + .num_analog_stream_encoder = 1, .num_stream_encoder = 6, .num_pll = 3, .num_ddc = 6, @@ -383,6 +387,7 @@ static const struct resource_caps res_cap = { static const struct resource_caps res_cap_81 = { .num_timing_generator = 4, .num_audio = 7, + .num_analog_stream_encoder = 1, .num_stream_encoder = 7, .num_pll = 3, .num_ddc = 6, @@ -391,6 +396,7 @@ static const struct resource_caps res_cap_81 = { static const struct resource_caps res_cap_83 = { .num_timing_generator = 2, .num_audio = 6, + .num_analog_stream_encoder = 1, .num_stream_encoder = 6, .num_pll = 2, .num_ddc = 2, @@ -418,8 +424,10 @@ static const struct dc_plane_cap plane_cap = { } }; -static const struct dc_debug_options debug_defaults = { - .enable_legacy_fast_update = true, +static const struct dc_debug_options debug_defaults = { 0 }; + +static const struct dc_check_config config_defaults = { + .enable_legacy_fast_update = true, }; static const struct dce_dmcu_registers dmcu_regs = { @@ -605,6 +613,11 @@ static struct stream_encoder *dce80_stream_encoder_create( if (!enc110) return NULL; + if (eng_id == ENGINE_ID_DACA || eng_id == ENGINE_ID_DACB) { + dce110_analog_stream_encoder_construct(enc110, ctx, ctx->dc_bios, eng_id); + return &enc110->base; + } + dce110_stream_encoder_construct(enc110, ctx, ctx->dc_bios, eng_id, &stream_enc_regs[eng_id], &se_shift, &se_mask); @@ -724,7 +737,20 @@ static struct link_encoder *dce80_link_encoder_create( kzalloc(sizeof(struct dce110_link_encoder), GFP_KERNEL); int link_regs_id; - if (!enc110 || enc_init_data->hpd_source >= ARRAY_SIZE(link_enc_hpd_regs)) + if (!enc110) + return NULL; + + if (enc_init_data->connector.id == CONNECTOR_ID_VGA) { + dce110_link_encoder_construct(enc110, + enc_init_data, + &link_enc_feature, + &link_enc_regs[ENGINE_ID_DACA], + NULL, + NULL); + return &enc110->base; + } + + if (enc_init_data->hpd_source >= ARRAY_SIZE(link_enc_hpd_regs)) return NULL; link_regs_id = @@ -919,6 +945,7 @@ static bool dce80_construct( dc->caps.dual_link_dvi = true; dc->caps.extended_aux_timeout_support = false; dc->debug = debug_defaults; + dc->check_config = config_defaults; /************************************************* * Create resources * @@ -1320,6 +1347,7 @@ static bool dce83_construct( dc->caps.min_horizontal_blanking_period = 80; dc->caps.is_apu = true; dc->debug = debug_defaults; + dc->check_config = config_defaults; /************************************************* * Create resources * diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn10/dcn10_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn10/dcn10_resource.c index 652c05c35494..f12367adf145 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn10/dcn10_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn10/dcn10_resource.c @@ -556,10 +556,13 @@ static const struct dc_debug_options debug_defaults_drv = { .recovery_enabled = false, /*enable this by default after testing.*/ .max_downscale_src_width = 3840, .underflow_assert_delay_us = 0xFFFFFFFF, - .enable_legacy_fast_update = true, .using_dml2 = false, }; +static const struct dc_check_config config_defaults = { + .enable_legacy_fast_update = true, +}; + static void dcn10_dpp_destroy(struct dpp **dpp) { kfree(TO_DCN10_DPP(*dpp)); @@ -1395,6 +1398,8 @@ static bool dcn10_resource_construct( dc->caps.color.mpc.ogam_rom_caps.pq = 0; dc->caps.color.mpc.ogam_rom_caps.hlg = 0; dc->caps.color.mpc.ocsc = 0; + dc->debug = debug_defaults_drv; + dc->check_config = config_defaults; if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV) dc->debug = debug_defaults_drv; diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.c index 84b38d2d6967..6679c1a14f2f 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.c @@ -718,10 +718,13 @@ static const struct dc_debug_options debug_defaults_drv = { .scl_reset_length10 = true, .sanity_checks = false, .underflow_assert_delay_us = 0xFFFFFFFF, - .enable_legacy_fast_update = true, .using_dml2 = false, }; +static const struct dc_check_config config_defaults = { + .enable_legacy_fast_update = true, +}; + void dcn20_dpp_destroy(struct dpp **dpp) { kfree(TO_DCN20_DPP(*dpp)); @@ -733,7 +736,7 @@ struct dpp *dcn20_dpp_create( uint32_t inst) { struct dcn20_dpp *dpp = - kzalloc(sizeof(struct dcn20_dpp), GFP_ATOMIC); + kzalloc(sizeof(struct dcn20_dpp), GFP_KERNEL); if (!dpp) return NULL; @@ -751,7 +754,7 @@ struct input_pixel_processor *dcn20_ipp_create( struct dc_context *ctx, uint32_t inst) { struct dcn10_ipp *ipp = - kzalloc(sizeof(struct dcn10_ipp), GFP_ATOMIC); + kzalloc(sizeof(struct dcn10_ipp), GFP_KERNEL); if (!ipp) { BREAK_TO_DEBUGGER(); @@ -768,7 +771,7 @@ struct output_pixel_processor *dcn20_opp_create( struct dc_context *ctx, uint32_t inst) { struct dcn20_opp *opp = - kzalloc(sizeof(struct dcn20_opp), GFP_ATOMIC); + kzalloc(sizeof(struct dcn20_opp), GFP_KERNEL); if (!opp) { BREAK_TO_DEBUGGER(); @@ -785,7 +788,7 @@ struct dce_aux *dcn20_aux_engine_create( uint32_t inst) { struct aux_engine_dce110 *aux_engine = - kzalloc(sizeof(struct aux_engine_dce110), GFP_ATOMIC); + kzalloc(sizeof(struct aux_engine_dce110), GFP_KERNEL); if (!aux_engine) return NULL; @@ -823,7 +826,7 @@ struct dce_i2c_hw *dcn20_i2c_hw_create( uint32_t inst) { struct dce_i2c_hw *dce_i2c_hw = - kzalloc(sizeof(struct dce_i2c_hw), GFP_ATOMIC); + kzalloc(sizeof(struct dce_i2c_hw), GFP_KERNEL); if (!dce_i2c_hw) return NULL; @@ -835,8 +838,7 @@ struct dce_i2c_hw *dcn20_i2c_hw_create( } struct mpc *dcn20_mpc_create(struct dc_context *ctx) { - struct dcn20_mpc *mpc20 = kzalloc(sizeof(struct dcn20_mpc), - GFP_ATOMIC); + struct dcn20_mpc *mpc20 = kzalloc(sizeof(struct dcn20_mpc), GFP_KERNEL); if (!mpc20) return NULL; @@ -853,8 +855,7 @@ struct mpc *dcn20_mpc_create(struct dc_context *ctx) struct hubbub *dcn20_hubbub_create(struct dc_context *ctx) { int i; - struct dcn20_hubbub *hubbub = kzalloc(sizeof(struct dcn20_hubbub), - GFP_ATOMIC); + struct dcn20_hubbub *hubbub = kzalloc(sizeof(struct dcn20_hubbub), GFP_KERNEL); if (!hubbub) return NULL; @@ -882,7 +883,7 @@ struct timing_generator *dcn20_timing_generator_create( uint32_t instance) { struct optc *tgn10 = - kzalloc(sizeof(struct optc), GFP_ATOMIC); + kzalloc(sizeof(struct optc), GFP_KERNEL); if (!tgn10) return NULL; @@ -962,7 +963,7 @@ static struct clock_source *dcn20_clock_source_create( bool dp_clk_src) { struct dce110_clk_src *clk_src = - kzalloc(sizeof(struct dce110_clk_src), GFP_ATOMIC); + kzalloc(sizeof(struct dce110_clk_src), GFP_KERNEL); if (!clk_src) return NULL; @@ -1061,7 +1062,7 @@ struct display_stream_compressor *dcn20_dsc_create( struct dc_context *ctx, uint32_t inst) { struct dcn20_dsc *dsc = - kzalloc(sizeof(struct dcn20_dsc), GFP_ATOMIC); + kzalloc(sizeof(struct dcn20_dsc), GFP_KERNEL); if (!dsc) { BREAK_TO_DEBUGGER(); @@ -1198,7 +1199,7 @@ struct hubp *dcn20_hubp_create( uint32_t inst) { struct dcn20_hubp *hubp2 = - kzalloc(sizeof(struct dcn20_hubp), GFP_ATOMIC); + kzalloc(sizeof(struct dcn20_hubp), GFP_KERNEL); if (!hubp2) return NULL; @@ -1668,6 +1669,7 @@ bool dcn20_validate_dsc(struct dc *dc, struct dc_state *new_ctx) dsc_cfg.is_odm = pipe_ctx->next_odm_pipe ? true : false; dsc_cfg.dc_dsc_cfg = stream->timing.dsc_cfg; dsc_cfg.dc_dsc_cfg.num_slices_h /= opp_cnt; + dsc_cfg.dsc_padding = pipe_ctx->dsc_padding_params.dsc_hactive_padding; if (!pipe_ctx->stream_res.dsc->funcs->dsc_validate_stream(pipe_ctx->stream_res.dsc, &dsc_cfg)) return false; @@ -2286,7 +2288,7 @@ bool dcn20_mmhubbub_create(struct dc_context *ctx, struct resource_pool *pool) static struct pp_smu_funcs *dcn20_pp_smu_create(struct dc_context *ctx) { - struct pp_smu_funcs *pp_smu = kzalloc(sizeof(*pp_smu), GFP_ATOMIC); + struct pp_smu_funcs *pp_smu = kzalloc(sizeof(*pp_smu), GFP_KERNEL); if (!pp_smu) return pp_smu; @@ -2472,6 +2474,7 @@ static bool dcn20_resource_construct( dc->caps.color.mpc.ocsc = 1; dc->caps.dp_hdmi21_pcon_support = true; + dc->check_config = config_defaults; if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV) dc->debug = debug_defaults_drv; @@ -2765,7 +2768,7 @@ struct resource_pool *dcn20_create_resource_pool( struct dc *dc) { struct dcn20_resource_pool *pool = - kzalloc(sizeof(struct dcn20_resource_pool), GFP_ATOMIC); + kzalloc(sizeof(struct dcn20_resource_pool), GFP_KERNEL); if (!pool) return NULL; diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn201/dcn201_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn201/dcn201_resource.c index e4a1338d21e0..055107843a70 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn201/dcn201_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn201/dcn201_resource.c @@ -614,10 +614,13 @@ static const struct dc_debug_options debug_defaults_drv = { .sanity_checks = false, .underflow_assert_delay_us = 0xFFFFFFFF, .enable_tri_buf = true, - .enable_legacy_fast_update = true, .using_dml2 = false, }; +static const struct dc_check_config config_defaults = { + .enable_legacy_fast_update = true, +}; + static void dcn201_dpp_destroy(struct dpp **dpp) { kfree(TO_DCN201_DPP(*dpp)); @@ -629,7 +632,7 @@ static struct dpp *dcn201_dpp_create( uint32_t inst) { struct dcn201_dpp *dpp = - kzalloc(sizeof(struct dcn201_dpp), GFP_ATOMIC); + kzalloc(sizeof(struct dcn201_dpp), GFP_KERNEL); if (!dpp) return NULL; @@ -646,7 +649,7 @@ static struct input_pixel_processor *dcn201_ipp_create( struct dc_context *ctx, uint32_t inst) { struct dcn10_ipp *ipp = - kzalloc(sizeof(struct dcn10_ipp), GFP_ATOMIC); + kzalloc(sizeof(struct dcn10_ipp), GFP_KERNEL); if (!ipp) { return NULL; @@ -662,7 +665,7 @@ static struct output_pixel_processor *dcn201_opp_create( struct dc_context *ctx, uint32_t inst) { struct dcn201_opp *opp = - kzalloc(sizeof(struct dcn201_opp), GFP_ATOMIC); + kzalloc(sizeof(struct dcn201_opp), GFP_KERNEL); if (!opp) { return NULL; @@ -677,7 +680,7 @@ static struct dce_aux *dcn201_aux_engine_create(struct dc_context *ctx, uint32_t inst) { struct aux_engine_dce110 *aux_engine = - kzalloc(sizeof(struct aux_engine_dce110), GFP_ATOMIC); + kzalloc(sizeof(struct aux_engine_dce110), GFP_KERNEL); if (!aux_engine) return NULL; @@ -710,7 +713,7 @@ static struct dce_i2c_hw *dcn201_i2c_hw_create(struct dc_context *ctx, uint32_t inst) { struct dce_i2c_hw *dce_i2c_hw = - kzalloc(sizeof(struct dce_i2c_hw), GFP_ATOMIC); + kzalloc(sizeof(struct dce_i2c_hw), GFP_KERNEL); if (!dce_i2c_hw) return NULL; @@ -723,8 +726,7 @@ static struct dce_i2c_hw *dcn201_i2c_hw_create(struct dc_context *ctx, static struct mpc *dcn201_mpc_create(struct dc_context *ctx, uint32_t num_mpcc) { - struct dcn201_mpc *mpc201 = kzalloc(sizeof(struct dcn201_mpc), - GFP_ATOMIC); + struct dcn201_mpc *mpc201 = kzalloc(sizeof(struct dcn201_mpc), GFP_KERNEL); if (!mpc201) return NULL; @@ -740,8 +742,7 @@ static struct mpc *dcn201_mpc_create(struct dc_context *ctx, uint32_t num_mpcc) static struct hubbub *dcn201_hubbub_create(struct dc_context *ctx) { - struct dcn20_hubbub *hubbub = kzalloc(sizeof(struct dcn20_hubbub), - GFP_ATOMIC); + struct dcn20_hubbub *hubbub = kzalloc(sizeof(struct dcn20_hubbub), GFP_KERNEL); if (!hubbub) return NULL; @@ -759,7 +760,7 @@ static struct timing_generator *dcn201_timing_generator_create( uint32_t instance) { struct optc *tgn10 = - kzalloc(sizeof(struct optc), GFP_ATOMIC); + kzalloc(sizeof(struct optc), GFP_KERNEL); if (!tgn10) return NULL; @@ -793,7 +794,7 @@ static struct link_encoder *dcn201_link_encoder_create( const struct encoder_init_data *enc_init_data) { struct dcn20_link_encoder *enc20 = - kzalloc(sizeof(struct dcn20_link_encoder), GFP_ATOMIC); + kzalloc(sizeof(struct dcn20_link_encoder), GFP_KERNEL); struct dcn10_link_encoder *enc10; if (!enc20 || enc_init_data->hpd_source >= ARRAY_SIZE(link_enc_hpd_regs)) @@ -821,7 +822,7 @@ static struct clock_source *dcn201_clock_source_create( bool dp_clk_src) { struct dce110_clk_src *clk_src = - kzalloc(sizeof(struct dce110_clk_src), GFP_ATOMIC); + kzalloc(sizeof(struct dce110_clk_src), GFP_KERNEL); if (!clk_src) return NULL; @@ -856,7 +857,7 @@ static struct stream_encoder *dcn201_stream_encoder_create( struct dc_context *ctx) { struct dcn10_stream_encoder *enc1 = - kzalloc(sizeof(struct dcn10_stream_encoder), GFP_ATOMIC); + kzalloc(sizeof(struct dcn10_stream_encoder), GFP_KERNEL); if (!enc1) return NULL; @@ -883,7 +884,7 @@ static const struct dce_hwseq_mask hwseq_mask = { static struct dce_hwseq *dcn201_hwseq_create( struct dc_context *ctx) { - struct dce_hwseq *hws = kzalloc(sizeof(struct dce_hwseq), GFP_ATOMIC); + struct dce_hwseq *hws = kzalloc(sizeof(struct dce_hwseq), GFP_KERNEL); if (hws) { hws->ctx = ctx; @@ -983,7 +984,7 @@ static struct hubp *dcn201_hubp_create( uint32_t inst) { struct dcn201_hubp *hubp201 = - kzalloc(sizeof(struct dcn201_hubp), GFP_ATOMIC); + kzalloc(sizeof(struct dcn201_hubp), GFP_KERNEL); if (!hubp201) return NULL; @@ -1153,6 +1154,7 @@ static bool dcn201_resource_construct( dc->caps.color.mpc.ocsc = 1; dc->debug = debug_defaults_drv; + dc->check_config = config_defaults; /*a0 only, remove later*/ dc->work_arounds.no_connect_phy_config = true; @@ -1303,7 +1305,7 @@ struct resource_pool *dcn201_create_resource_pool( struct dc *dc) { struct dcn201_resource_pool *pool = - kzalloc(sizeof(struct dcn201_resource_pool), GFP_ATOMIC); + kzalloc(sizeof(struct dcn201_resource_pool), GFP_KERNEL); if (!pool) return NULL; diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn21/dcn21_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn21/dcn21_resource.c index 918742a42ded..2060acd5ae09 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn21/dcn21_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn21/dcn21_resource.c @@ -626,10 +626,13 @@ static const struct dc_debug_options debug_defaults_drv = { .usbc_combo_phy_reset_wa = true, .dmub_command_table = true, .use_max_lb = true, - .enable_legacy_fast_update = true, .using_dml2 = false, }; +static const struct dc_check_config config_defaults = { + .enable_legacy_fast_update = true, +}; + static const struct dc_panel_config panel_config_defaults = { .psr = { .disable_psr = false, @@ -1458,6 +1461,7 @@ static bool dcn21_resource_construct( dc->caps.color.mpc.ocsc = 1; dc->caps.dp_hdmi21_pcon_support = true; + dc->check_config = config_defaults; if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV) dc->debug = debug_defaults_drv; diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn30/dcn30_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn30/dcn30_resource.c index ff63f59ff928..d0ebb733e802 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn30/dcn30_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn30/dcn30_resource.c @@ -727,10 +727,13 @@ static const struct dc_debug_options debug_defaults_drv = { .dmub_command_table = true, .use_max_lb = true, .exit_idle_opt_for_cursor_updates = true, - .enable_legacy_fast_update = false, .using_dml2 = false, }; +static const struct dc_check_config config_defaults = { + .enable_legacy_fast_update = false, +}; + static const struct dc_panel_config panel_config_defaults = { .psr = { .disable_psr = false, @@ -2374,6 +2377,7 @@ static bool dcn30_resource_construct( dc->caps.vbios_lttpr_aware = (bp_query_result == BP_RESULT_OK) && !!is_vbios_interop_enabled; } } + dc->check_config = config_defaults; if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV) dc->debug = debug_defaults_drv; diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn301/dcn301_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn301/dcn301_resource.c index 82a205a7c25c..3ad6a3d4858e 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn301/dcn301_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn301/dcn301_resource.c @@ -701,10 +701,13 @@ static const struct dc_debug_options debug_defaults_drv = { .dmub_command_table = true, .use_max_lb = false, .exit_idle_opt_for_cursor_updates = true, - .enable_legacy_fast_update = true, .using_dml2 = false, }; +static const struct dc_check_config config_defaults = { + .enable_legacy_fast_update = true, +}; + static void dcn301_dpp_destroy(struct dpp **dpp) { kfree(TO_DCN20_DPP(*dpp)); @@ -1498,6 +1501,7 @@ static bool dcn301_resource_construct( bp_query_result = ctx->dc_bios->funcs->get_lttpr_interop(ctx->dc_bios, &is_vbios_interop_enabled); dc->caps.vbios_lttpr_aware = (bp_query_result == BP_RESULT_OK) && !!is_vbios_interop_enabled; } + dc->check_config = config_defaults; if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV) dc->debug = debug_defaults_drv; diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn302/dcn302_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn302/dcn302_resource.c index 61623cb518d9..c0d4a1dc94f8 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn302/dcn302_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn302/dcn302_resource.c @@ -98,10 +98,13 @@ static const struct dc_debug_options debug_defaults_drv = { .dmub_command_table = true, .use_max_lb = true, .exit_idle_opt_for_cursor_updates = true, - .enable_legacy_fast_update = false, .using_dml2 = false, }; +static const struct dc_check_config config_defaults = { + .enable_legacy_fast_update = false, +}; + static const struct dc_panel_config panel_config_defaults = { .psr = { .disable_psr = false, @@ -1290,6 +1293,7 @@ static bool dcn302_resource_construct( &is_vbios_interop_enabled); dc->caps.vbios_lttpr_aware = (bp_query_result == BP_RESULT_OK) && !!is_vbios_interop_enabled; } + dc->check_config = config_defaults; if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV) dc->debug = debug_defaults_drv; diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn303/dcn303_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn303/dcn303_resource.c index 02b9a84f2db3..75e09c2c283e 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn303/dcn303_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn303/dcn303_resource.c @@ -98,10 +98,13 @@ static const struct dc_debug_options debug_defaults_drv = { .dmub_command_table = true, .use_max_lb = true, .exit_idle_opt_for_cursor_updates = true, - .enable_legacy_fast_update = false, .using_dml2 = false, }; +static const struct dc_check_config config_defaults = { + .enable_legacy_fast_update = false, +}; + static const struct dc_panel_config panel_config_defaults = { .psr = { .disable_psr = false, @@ -1234,6 +1237,7 @@ static bool dcn303_resource_construct( bp_query_result = ctx->dc_bios->funcs->get_lttpr_interop(ctx->dc_bios, &is_vbios_interop_enabled); dc->caps.vbios_lttpr_aware = (bp_query_result == BP_RESULT_OK) && !!is_vbios_interop_enabled; } + dc->check_config = config_defaults; if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV) dc->debug = debug_defaults_drv; diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.c index 3ed7f50554e2..0d667b54ccf8 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.c @@ -888,12 +888,15 @@ static const struct dc_debug_options debug_defaults_drv = { } }, .disable_z10 = true, - .enable_legacy_fast_update = true, .enable_z9_disable_interface = true, /* Allow support for the PMFW interface for disable Z9*/ .dml_hostvm_override = DML_HOSTVM_OVERRIDE_FALSE, .using_dml2 = false, }; +static const struct dc_check_config config_defaults = { + .enable_legacy_fast_update = true, +}; + static const struct dc_panel_config panel_config_defaults = { .psr = { .disable_psr = false, @@ -1978,6 +1981,7 @@ static bool dcn31_resource_construct( dc->caps.vbios_lttpr_aware = true; } } + dc->check_config = config_defaults; if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV) dc->debug = debug_defaults_drv; diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn314/dcn314_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn314/dcn314_resource.c index d4917a35b991..3ccde75a4ecb 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn314/dcn314_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn314/dcn314_resource.c @@ -924,12 +924,15 @@ static const struct dc_debug_options debug_defaults_drv = { }, .seamless_boot_odm_combine = true, - .enable_legacy_fast_update = true, .using_dml2 = false, .disable_dsc_power_gate = true, .min_disp_clk_khz = 100000, }; +static const struct dc_check_config config_defaults = { + .enable_legacy_fast_update = true, +}; + static const struct dc_panel_config panel_config_defaults = { .psr = { .disable_psr = false, @@ -1910,6 +1913,7 @@ static bool dcn314_resource_construct( dc->caps.vbios_lttpr_aware = true; } } + dc->check_config = config_defaults; if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV) dc->debug = debug_defaults_drv; diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn315/dcn315_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn315/dcn315_resource.c index 82cc78c291d8..4e962f522f1b 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn315/dcn315_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn315/dcn315_resource.c @@ -887,9 +887,13 @@ static const struct dc_debug_options debug_defaults_drv = { .afmt = true, } }, - .enable_legacy_fast_update = true, .psr_power_use_phy_fsm = 0, .using_dml2 = false, + .min_disp_clk_khz = 100000, +}; + +static const struct dc_check_config config_defaults = { + .enable_legacy_fast_update = true, }; static const struct dc_panel_config panel_config_defaults = { @@ -1939,6 +1943,7 @@ static bool dcn315_resource_construct( dc->caps.vbios_lttpr_aware = true; } } + dc->check_config = config_defaults; if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV) dc->debug = debug_defaults_drv; diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn316/dcn316_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn316/dcn316_resource.c index 636110e48d01..5a95dd54cb42 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn316/dcn316_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn316/dcn316_resource.c @@ -882,10 +882,13 @@ static const struct dc_debug_options debug_defaults_drv = { .afmt = true, } }, - .enable_legacy_fast_update = true, .using_dml2 = false, }; +static const struct dc_check_config config_defaults = { + .enable_legacy_fast_update = true, +}; + static const struct dc_panel_config panel_config_defaults = { .psr = { .disable_psr = false, @@ -1815,6 +1818,7 @@ static bool dcn316_resource_construct( dc->caps.vbios_lttpr_aware = true; } } + dc->check_config = config_defaults; if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV) dc->debug = debug_defaults_drv; diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c index 3965a7f1b64b..b276fec3e479 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c @@ -92,7 +92,7 @@ #include "dc_state_priv.h" -#include "dml2/dml2_wrapper.h" +#include "dml2_0/dml2_wrapper.h" #define DC_LOGGER_INIT(logger) @@ -738,10 +738,13 @@ static const struct dc_debug_options debug_defaults_drv = { .disable_dp_plus_plus_wa = true, .fpo_vactive_min_active_margin_us = 200, .fpo_vactive_max_blank_us = 1000, - .enable_legacy_fast_update = false, .disable_stutter_for_wm_program = true }; +static const struct dc_check_config config_defaults = { + .enable_legacy_fast_update = false, +}; + static struct dce_aux *dcn32_aux_engine_create( struct dc_context *ctx, uint32_t inst) @@ -1844,7 +1847,7 @@ enum dc_status dcn32_validate_bandwidth(struct dc *dc, dc_state_set_stream_cursor_subvp_limit(stream, context, true); status = DC_FAIL_HW_CURSOR_SUPPORT; } - }; + } } if (validate_mode == DC_VALIDATE_MODE_AND_PROGRAMMING && status == DC_FAIL_HW_CURSOR_SUPPORT) { @@ -2197,7 +2200,8 @@ static bool dcn32_resource_construct( dc->caps.i2c_speed_in_khz_hdcp = 100; /*1.4 w/a applied by default*/ /* TODO: Bring max_cursor_size back to 256 after subvp cursor corruption is fixed*/ dc->caps.max_cursor_size = 64; - dc->caps.max_buffered_cursor_size = 64; // sqrt(16 * 1024 / 4) + /* floor(sqrt(buf_size_bytes / bpp ) * bpp, fixed_req_size) / bpp = max_width */ + dc->caps.max_buffered_cursor_size = 64; // floor(sqrt(16 * 1024 / 4) * 4, 256) / 4 = 64 dc->caps.min_horizontal_blanking_period = 80; dc->caps.dmdata_alloc_size = 2048; dc->caps.mall_size_per_mem_channel = 4; @@ -2294,6 +2298,7 @@ static bool dcn32_resource_construct( dc->caps.vbios_lttpr_aware = true; } } + dc->check_config = config_defaults; if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV) dc->debug = debug_defaults_drv; diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn321/dcn321_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn321/dcn321_resource.c index ad214986f7ac..3466ca34c93f 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn321/dcn321_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn321/dcn321_resource.c @@ -731,11 +731,14 @@ static const struct dc_debug_options debug_defaults_drv = { .disable_subvp_high_refresh = false, .fpo_vactive_min_active_margin_us = 200, .fpo_vactive_max_blank_us = 1000, - .enable_legacy_fast_update = false, .disable_dc_mode_overwrite = true, .using_dml2 = false, }; +static const struct dc_check_config config_defaults = { + .enable_legacy_fast_update = false, +}; + static struct dce_aux *dcn321_aux_engine_create( struct dc_context *ctx, uint32_t inst) @@ -1797,6 +1800,7 @@ static bool dcn321_resource_construct( dc->caps.vbios_lttpr_aware = true; } } + dc->check_config = config_defaults; if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV) dc->debug = debug_defaults_drv; diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c index fff57f23f4f7..ef69898d2cc5 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c @@ -33,7 +33,7 @@ #include "resource.h" #include "include/irq_service_interface.h" #include "dcn35_resource.h" -#include "dml2/dml2_wrapper.h" +#include "dml2_0/dml2_wrapper.h" #include "dcn20/dcn20_resource.h" #include "dcn30/dcn30_resource.h" @@ -767,7 +767,6 @@ static const struct dc_debug_options debug_defaults_drv = { .using_dml2 = true, .support_eDP1_5 = true, .enable_hpo_pg_support = false, - .enable_legacy_fast_update = true, .enable_single_display_2to1_odm_policy = true, .disable_idle_power_optimizations = false, .dmcub_emulation = false, @@ -788,6 +787,10 @@ static const struct dc_debug_options debug_defaults_drv = { .min_disp_clk_khz = 50000, }; +static const struct dc_check_config config_defaults = { + .enable_legacy_fast_update = true, +}; + static const struct dc_panel_config panel_config_defaults = { .psr = { .disable_psr = false, @@ -1946,6 +1949,7 @@ static bool dcn35_resource_construct( dc->caps.vbios_lttpr_aware = true; } } + dc->check_config = config_defaults; if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV) dc->debug = debug_defaults_drv; diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.c index 0abd163b425e..f3c614c4490c 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.c @@ -83,7 +83,7 @@ #include "vm_helper.h" #include "dcn20/dcn20_vmid.h" -#include "dml2/dml2_wrapper.h" +#include "dml2_0/dml2_wrapper.h" #include "link_enc_cfg.h" #define DC_LOGGER_INIT(logger) @@ -747,7 +747,6 @@ static const struct dc_debug_options debug_defaults_drv = { .using_dml2 = true, .support_eDP1_5 = true, .enable_hpo_pg_support = false, - .enable_legacy_fast_update = true, .enable_single_display_2to1_odm_policy = true, .disable_idle_power_optimizations = false, .dmcub_emulation = false, @@ -768,6 +767,10 @@ static const struct dc_debug_options debug_defaults_drv = { .min_disp_clk_khz = 50000, }; +static const struct dc_check_config config_defaults = { + .enable_legacy_fast_update = true, +}; + static const struct dc_panel_config panel_config_defaults = { .psr = { .disable_psr = false, @@ -1917,6 +1920,7 @@ static bool dcn351_resource_construct( dc->caps.vbios_lttpr_aware = true; } } + dc->check_config = config_defaults; if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV) dc->debug = debug_defaults_drv; diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn36/dcn36_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn36/dcn36_resource.c index ca125ee6c2fb..6469d5fe2e6d 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn36/dcn36_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn36/dcn36_resource.c @@ -11,7 +11,7 @@ #include "resource.h" #include "include/irq_service_interface.h" #include "dcn36_resource.h" -#include "dml2/dml2_wrapper.h" +#include "dml2_0/dml2_wrapper.h" #include "dcn20/dcn20_resource.h" #include "dcn30/dcn30_resource.h" @@ -748,7 +748,6 @@ static const struct dc_debug_options debug_defaults_drv = { .using_dml2 = true, .support_eDP1_5 = true, .enable_hpo_pg_support = false, - .enable_legacy_fast_update = true, .enable_single_display_2to1_odm_policy = true, .disable_idle_power_optimizations = false, .dmcub_emulation = false, @@ -769,6 +768,10 @@ static const struct dc_debug_options debug_defaults_drv = { .min_disp_clk_khz = 50000, }; +static const struct dc_check_config config_defaults = { + .enable_legacy_fast_update = true, +}; + static const struct dc_panel_config panel_config_defaults = { .psr = { .disable_psr = false, @@ -1918,6 +1921,7 @@ static bool dcn36_resource_construct( dc->caps.vbios_lttpr_aware = true; } } + dc->check_config = config_defaults; if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV) dc->debug = debug_defaults_drv; diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.c index 1d18807e4749..875ae97489d3 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.c @@ -73,7 +73,7 @@ #include "dc_state_priv.h" -#include "dml2/dml2_wrapper.h" +#include "dml2_0/dml2_wrapper.h" #define DC_LOGGER_INIT(logger) @@ -721,7 +721,6 @@ static const struct dc_debug_options debug_defaults_drv = { .alloc_extra_way_for_cursor = true, .min_prefetch_in_strobe_ns = 60000, // 60us .disable_unbounded_requesting = false, - .enable_legacy_fast_update = false, .dcc_meta_propagation_delay_us = 10, .fams_version = { .minor = 1, @@ -737,6 +736,10 @@ static const struct dc_debug_options debug_defaults_drv = { .force_cositing = CHROMA_COSITING_NONE + 1, }; +static const struct dc_check_config config_defaults = { + .enable_legacy_fast_update = false, +}; + static struct dce_aux *dcn401_aux_engine_create( struct dc_context *ctx, uint32_t inst) @@ -1668,7 +1671,7 @@ enum dc_status dcn401_validate_bandwidth(struct dc *dc, dc_state_set_stream_cursor_subvp_limit(stream, context, true); status = DC_FAIL_HW_CURSOR_SUPPORT; } - }; + } } if (validate_mode == DC_VALIDATE_MODE_AND_PROGRAMMING && status == DC_FAIL_HW_CURSOR_SUPPORT) { @@ -1995,6 +1998,7 @@ static bool dcn401_resource_construct( dc->caps.vbios_lttpr_aware = true; } } + dc->check_config = config_defaults; if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV) dc->debug = debug_defaults_drv; diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.h b/drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.h index 0fc66487d800..e1fa2e80a15a 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.h +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.h @@ -227,7 +227,8 @@ void dcn401_prepare_mcache_programming(struct dc *dc, struct dc_state *context); #define LE_DCN401_REG_LIST_RI(id) \ LE_DCN3_REG_LIST_RI(id), \ SRI_ARR(DP_DPHY_INTERNAL_CTRL, DP, id), \ - SRI_ARR(DIG_BE_CLK_CNTL, DIG, id) + SRI_ARR(DIG_BE_CLK_CNTL, DIG, id),\ + SR_ARR(DIO_CLK_CNTL, id) /* DPP */ #define DPP_REG_LIST_DCN401_COMMON_RI(id) \ diff --git a/drivers/gpu/drm/amd/display/dc/soc_and_ip_translator/dcn401/dcn401_soc_and_ip_translator.h b/drivers/gpu/drm/amd/display/dc/soc_and_ip_translator/dcn401/dcn401_soc_and_ip_translator.h index 21d842857601..88c11b6be004 100644 --- a/drivers/gpu/drm/amd/display/dc/soc_and_ip_translator/dcn401/dcn401_soc_and_ip_translator.h +++ b/drivers/gpu/drm/amd/display/dc/soc_and_ip_translator/dcn401/dcn401_soc_and_ip_translator.h @@ -9,7 +9,7 @@ #include "dc.h" #include "clk_mgr.h" #include "soc_and_ip_translator.h" -#include "dml2/dml21/inc/dml_top_soc_parameter_types.h" +#include "dml2_0/dml21/inc/dml_top_soc_parameter_types.h" void dcn401_construct_soc_and_ip_translator(struct soc_and_ip_translator *soc_and_ip_translator); diff --git a/drivers/gpu/drm/amd/display/dc/sspl/dc_spl.c b/drivers/gpu/drm/amd/display/dc/sspl/dc_spl.c index b1fb0f8a253a..7a839984dbc0 100644 --- a/drivers/gpu/drm/amd/display/dc/sspl/dc_spl.c +++ b/drivers/gpu/drm/amd/display/dc/sspl/dc_spl.c @@ -1018,6 +1018,21 @@ static bool spl_get_optimal_number_of_taps( spl_scratch->scl_data.taps.h_taps_c = 6; spl_scratch->scl_data.taps.v_taps_c = 6; } + + /* Override mode: keep EASF enabled but use input taps if valid */ + if (spl_in->override_easf) { + spl_scratch->scl_data.taps.h_taps = (in_taps->h_taps != 0) ? in_taps->h_taps : spl_scratch->scl_data.taps.h_taps; + spl_scratch->scl_data.taps.v_taps = (in_taps->v_taps != 0) ? in_taps->v_taps : spl_scratch->scl_data.taps.v_taps; + spl_scratch->scl_data.taps.h_taps_c = (in_taps->h_taps_c != 0) ? in_taps->h_taps_c : spl_scratch->scl_data.taps.h_taps_c; + spl_scratch->scl_data.taps.v_taps_c = (in_taps->v_taps_c != 0) ? in_taps->v_taps_c : spl_scratch->scl_data.taps.v_taps_c; + + if ((spl_scratch->scl_data.taps.h_taps > 6) || (spl_scratch->scl_data.taps.v_taps > 6)) + skip_easf = true; + if ((spl_scratch->scl_data.taps.h_taps > 1) && (spl_scratch->scl_data.taps.h_taps % 2)) + spl_scratch->scl_data.taps.h_taps--; + if ((spl_scratch->scl_data.taps.h_taps_c > 1) && (spl_scratch->scl_data.taps.h_taps_c % 2)) + spl_scratch->scl_data.taps.h_taps_c--; + } } /*Ensure we can support the requested number of vtaps*/ diff --git a/drivers/gpu/drm/amd/display/dc/sspl/dc_spl_types.h b/drivers/gpu/drm/amd/display/dc/sspl/dc_spl_types.h index 23d254dea18f..20e4e52a77ac 100644 --- a/drivers/gpu/drm/amd/display/dc/sspl/dc_spl_types.h +++ b/drivers/gpu/drm/amd/display/dc/sspl/dc_spl_types.h @@ -545,6 +545,7 @@ struct spl_in { enum linear_light_scaling lls_pref; // Linear Light Scaling bool prefer_easf; bool disable_easf; + bool override_easf; /* If true, keep EASF enabled but use provided in_taps */ struct spl_debug debug; bool is_fullscreen; bool is_hdr_on; diff --git a/drivers/gpu/drm/amd/display/dmub/dmub_srv.h b/drivers/gpu/drm/amd/display/dmub/dmub_srv.h index 338fdc651f2c..9d0168986fe7 100644 --- a/drivers/gpu/drm/amd/display/dmub/dmub_srv.h +++ b/drivers/gpu/drm/amd/display/dmub/dmub_srv.h @@ -132,6 +132,7 @@ enum dmub_window_id { DMUB_WINDOW_IB_MEM, DMUB_WINDOW_SHARED_STATE, DMUB_WINDOW_LSDMA_BUFFER, + DMUB_WINDOW_CURSOR_OFFLOAD, DMUB_WINDOW_TOTAL, }; @@ -317,6 +318,7 @@ struct dmub_srv_hw_params { bool enable_non_transparent_setconfig; bool lower_hbr3_phy_ssc; bool override_hbr3_pll_vco; + bool disable_dpia_bw_allocation; }; /** @@ -361,6 +363,19 @@ struct dmub_diagnostic_data { uint8_t is_pwait : 1; }; +/** + * struct dmub_preos_info - preos fw info before loading post os fw. + */ +struct dmub_preos_info { + uint64_t fb_base; + uint64_t fb_offset; + uint64_t trace_buffer_phy_addr; + uint32_t trace_buffer_size; + uint32_t fw_version; + uint32_t boot_status; + uint32_t boot_options; +}; + struct dmub_srv_inbox { /* generic status */ uint64_t num_submitted; @@ -486,6 +501,7 @@ struct dmub_srv_hw_funcs { uint32_t (*get_current_time)(struct dmub_srv *dmub); void (*get_diagnostic_data)(struct dmub_srv *dmub); + bool (*get_preos_fw_info)(struct dmub_srv *dmub); bool (*should_detect)(struct dmub_srv *dmub); void (*init_reg_offsets)(struct dmub_srv *dmub, struct dc_context *ctx); @@ -535,7 +551,8 @@ struct dmub_srv_create_params { * @fw_version: the current firmware version, if any * @is_virtual: false if hardware support only * @shared_state: dmub shared state between firmware and driver - * @fw_state: dmub firmware state pointer + * @cursor_offload_v1: Cursor offload state + * @fw_state: dmub firmware state pointer (debug purpose only) */ struct dmub_srv { enum dmub_asic asic; @@ -544,7 +561,9 @@ struct dmub_srv { bool is_virtual; struct dmub_fb scratch_mem_fb; struct dmub_fb ib_mem_gart; + struct dmub_fb cursor_offload_fb; volatile struct dmub_shared_state_feature_block *shared_state; + volatile struct dmub_cursor_offload_v1 *cursor_offload_v1; volatile const struct dmub_fw_state *fw_state; /* private: internal use only */ @@ -583,6 +602,7 @@ struct dmub_srv { enum dmub_srv_power_state_type power_state; struct dmub_diagnostic_data debug; struct dmub_fb lsdma_rb_fb; + struct dmub_preos_info preos_info; }; /** @@ -1068,4 +1088,14 @@ enum dmub_status dmub_srv_wait_for_inbox_free(struct dmub_srv *dmub, */ enum dmub_status dmub_srv_update_inbox_status(struct dmub_srv *dmub); +/** + * dmub_srv_get_preos_info() - retrieves preos fw info + * @dmub: the dmub service + * + * Return: + * true - preos fw info retrieved successfully + * false - preos fw info not retrieved successfully + */ +bool dmub_srv_get_preos_info(struct dmub_srv *dmub); + #endif /* _DMUB_SRV_H_ */ diff --git a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h index 92248224b713..3f2a0ed02c59 100644 --- a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h +++ b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h @@ -485,7 +485,19 @@ union replay_debug_flags { */ uint32_t enable_visual_confirm_debug : 1; - uint32_t reserved : 18; + /** + * 0x4000 (bit 14) + * @debug_log_enabled: Debug Log Enabled + */ + uint32_t debug_log_enabled : 1; + + /** + * 0x8000 (bit 15) + * @enable_sub_feature_visual_confirm: Enable Sub Feature Visual Confirm + */ + uint32_t enable_sub_feature_visual_confirm : 1; + + uint32_t reserved : 16; } bitfields; uint32_t u32All; @@ -593,6 +605,104 @@ union replay_hw_flags { uint32_t u32All; }; +/** + * Flags that can be set by driver to change some Panel Replay behaviour. + */ +union pr_debug_flags { + struct { + /** + * 0x1 (bit 0) + * Enable visual confirm in FW. + */ + uint32_t visual_confirm : 1; + + /** + * 0x2 (bit 1) + * @skip_crc: Set if need to skip CRC. + */ + uint32_t skip_crc : 1; + + /** + * 0x4 (bit 2) + * @force_link_power_on: Force disable ALPM control + */ + uint32_t force_link_power_on : 1; + + /** + * 0x8 (bit 3) + * @force_phy_power_on: Force phy power on + */ + uint32_t force_phy_power_on : 1; + + /** + * 0x10 (bit 4) + * @skip_crtc_disabled: CRTC disable skipped + */ + uint32_t skip_crtc_disabled : 1; + + /* + * 0x20 (bit 5) + * @visual_confirm_rate_control: Enable Visual Confirm rate control detection + */ + uint32_t visual_confirm_rate_control : 1; + + uint32_t reserved : 26; + } bitfields; + + uint32_t u32All; +}; + +union pr_hw_flags { + struct { + /** + * @allow_alpm_fw_standby_mode: To indicate whether the + * ALPM FW standby mode is allowed + */ + uint32_t allow_alpm_fw_standby_mode : 1; + + /* + * @dsc_enable_status: DSC enable status in driver + */ + uint32_t dsc_enable_status : 1; + + /** + * @fec_enable_status: receive fec enable/disable status from driver + */ + uint32_t fec_enable_status : 1; + + /* + * @smu_optimizations_en: SMU power optimization. + * Only when active display is Replay capable and display enters Replay. + * Trigger interrupt to SMU to powerup/down. + */ + uint32_t smu_optimizations_en : 1; + + /** + * @phy_power_state: Indicates current phy power state + */ + uint32_t phy_power_state : 1; + + /** + * @link_power_state: Indicates current link power state + */ + uint32_t link_power_state : 1; + /** + * Use TPS3 signal when restore main link. + */ + uint32_t force_wakeup_by_tps3 : 1; + /** + * @is_alpm_initialized: Indicates whether ALPM is initialized + */ + uint32_t is_alpm_initialized : 1; + /** + * @alpm_mode: Indicates ALPM mode selected + */ + uint32_t alpm_mode : 2; + } bitfields; + + uint32_t u32All; +}; + union fw_assisted_mclk_switch_version { struct { uint8_t minor : 5; @@ -617,6 +727,7 @@ struct dmub_feature_caps { uint8_t replay_supported; uint8_t replay_reserved[3]; uint8_t abm_aux_backlight_support; + uint8_t lsdma_support_in_dmu; }; struct dmub_visual_confirm_color { @@ -629,6 +740,112 @@ struct dmub_visual_confirm_color { uint16_t panel_inst; }; +/** + * struct dmub_cursor_offload_pipe_data_dcn30_v1 - DCN30+ per pipe data. + */ +struct dmub_cursor_offload_pipe_data_dcn30_v1 { + uint32_t CURSOR0_0_CURSOR_SURFACE_ADDRESS; + uint32_t CURSOR0_0_CURSOR_SURFACE_ADDRESS_HIGH; + uint32_t CURSOR0_0_CURSOR_SIZE__CURSOR_WIDTH : 16; + uint32_t CURSOR0_0_CURSOR_SIZE__CURSOR_HEIGHT : 16; + uint32_t CURSOR0_0_CURSOR_POSITION__CURSOR_X_POSITION : 16; + uint32_t CURSOR0_0_CURSOR_POSITION__CURSOR_Y_POSITION : 16; + uint32_t CURSOR0_0_CURSOR_HOT_SPOT__CURSOR_HOT_SPOT_X : 16; + uint32_t CURSOR0_0_CURSOR_HOT_SPOT__CURSOR_HOT_SPOT_Y : 16; + uint32_t CURSOR0_0_CURSOR_DST_OFFSET__CURSOR_DST_X_OFFSET : 13; + uint32_t CURSOR0_0_CURSOR_CONTROL__CURSOR_ENABLE : 1; + uint32_t CURSOR0_0_CURSOR_CONTROL__CURSOR_MODE : 3; + uint32_t CURSOR0_0_CURSOR_CONTROL__CURSOR_2X_MAGNIFY : 1; + uint32_t CURSOR0_0_CURSOR_CONTROL__CURSOR_PITCH : 2; + uint32_t CURSOR0_0_CURSOR_CONTROL__CURSOR_LINES_PER_CHUNK : 5; + uint32_t reserved0[4]; + uint32_t CNVC_CUR0_CURSOR0_CONTROL__CUR0_ENABLE : 1; + uint32_t CNVC_CUR0_CURSOR0_CONTROL__CUR0_MODE : 3; + uint32_t CNVC_CUR0_CURSOR0_CONTROL__CUR0_EXPANSION_MODE : 1; + uint32_t CNVC_CUR0_CURSOR0_CONTROL__CUR0_ROM_EN : 1; + uint32_t CNVC_CUR0_CURSOR0_COLOR0__CUR0_COLOR0 : 24; + uint32_t CNVC_CUR0_CURSOR0_COLOR1__CUR0_COLOR1 : 24; + uint32_t CNVC_CUR0_CURSOR0_FP_SCALE_BIAS__CUR0_FP_BIAS : 16; + uint32_t CNVC_CUR0_CURSOR0_FP_SCALE_BIAS__CUR0_FP_SCALE, : 16; + uint32_t reserved1[5]; + uint32_t HUBPREQ0_CURSOR_SETTINGS__CURSOR0_DST_Y_OFFSET : 8; + uint32_t HUBPREQ0_CURSOR_SETTINGS__CURSOR0_CHUNK_HDL_ADJUST : 8; + uint32_t reserved2[3]; +}; + +/** + * struct dmub_cursor_offload_pipe_data_dcn401_v1 - DCN401 per pipe data. + */ +struct dmub_cursor_offload_pipe_data_dcn401_v1 { + uint32_t CURSOR0_0_CURSOR_SURFACE_ADDRESS; + uint32_t CURSOR0_0_CURSOR_SURFACE_ADDRESS_HIGH; + uint32_t CURSOR0_0_CURSOR_SIZE__CURSOR_WIDTH : 16; + uint32_t CURSOR0_0_CURSOR_SIZE__CURSOR_HEIGHT : 16; + uint32_t CURSOR0_0_CURSOR_POSITION__CURSOR_X_POSITION : 16; + uint32_t CURSOR0_0_CURSOR_POSITION__CURSOR_Y_POSITION : 16; + uint32_t CURSOR0_0_CURSOR_HOT_SPOT__CURSOR_HOT_SPOT_X : 16; + uint32_t CURSOR0_0_CURSOR_HOT_SPOT__CURSOR_HOT_SPOT_Y : 16; + uint32_t CURSOR0_0_CURSOR_DST_OFFSET__CURSOR_DST_X_OFFSET : 13; + uint32_t CURSOR0_0_CURSOR_CONTROL__CURSOR_ENABLE : 1; + uint32_t CURSOR0_0_CURSOR_CONTROL__CURSOR_MODE : 3; + uint32_t CURSOR0_0_CURSOR_CONTROL__CURSOR_2X_MAGNIFY : 1; + uint32_t CURSOR0_0_CURSOR_CONTROL__CURSOR_PITCH : 2; + uint32_t CURSOR0_0_CURSOR_CONTROL__CURSOR_LINES_PER_CHUNK : 5; + uint32_t reserved0[4]; + uint32_t CM_CUR0_CURSOR0_CONTROL__CUR0_ENABLE : 1; + uint32_t CM_CUR0_CURSOR0_CONTROL__CUR0_MODE : 3; + uint32_t CM_CUR0_CURSOR0_CONTROL__CUR0_EXPANSION_MODE : 1; + uint32_t CM_CUR0_CURSOR0_CONTROL__CUR0_ROM_EN : 1; + uint32_t CM_CUR0_CURSOR0_COLOR0__CUR0_COLOR0 : 24; + uint32_t CM_CUR0_CURSOR0_COLOR1__CUR0_COLOR1 : 24; + uint32_t CM_CUR0_CURSOR0_FP_SCALE_BIAS_G_Y__CUR0_FP_BIAS_G_Y : 16; + uint32_t CM_CUR0_CURSOR0_FP_SCALE_BIAS_G_Y__CUR0_FP_SCALE_G_Y, : 16; + uint32_t CM_CUR0_CURSOR0_FP_SCALE_BIAS_RB_CRCB__CUR0_FP_BIAS_RB_CRCB : 16; + uint32_t CM_CUR0_CURSOR0_FP_SCALE_BIAS_RB_CRCB__CUR0_FP_SCALE_RB_CRCB : 16; + uint32_t reserved1[4]; + uint32_t HUBPREQ0_CURSOR_SETTINGS__CURSOR0_DST_Y_OFFSET : 8; + uint32_t HUBPREQ0_CURSOR_SETTINGS__CURSOR0_CHUNK_HDL_ADJUST : 8; + uint32_t HUBP0_DCHUBP_MALL_CONFIG__USE_MALL_FOR_CURSOR : 1; + uint32_t reserved2[3]; +}; + +/** + * struct dmub_cursor_offload_pipe_data_v1 - Per pipe data for cursor offload. + */ +struct dmub_cursor_offload_pipe_data_v1 { + union { + struct dmub_cursor_offload_pipe_data_dcn30_v1 dcn30; /**< DCN30 cursor data. */ + struct dmub_cursor_offload_pipe_data_dcn401_v1 dcn401; /**< DCN401 cursor data. */ + uint8_t payload[96]; /**< Guarantees the cursor pipe data size per-pipe. */ + }; +}; + +/** + * struct dmub_cursor_offload_payload_data_v1 - A payload of stream data. + */ +struct dmub_cursor_offload_payload_data_v1 { + uint32_t write_idx_start; /**< Write index, updated before pipe_data is written. */ + uint32_t write_idx_finish; /**< Write index, updated after pipe_data is written. */ + uint32_t pipe_mask; /**< Mask of pipes to update. */ + uint32_t reserved; /**< Reserved for future use. */ + struct dmub_cursor_offload_pipe_data_v1 pipe_data[6]; /**< Per-pipe cursor data. */ +}; + +/** + * struct dmub_cursor_offload_stream_v1 - Per-stream data for cursor offload. + */ +struct dmub_cursor_offload_stream_v1 { + struct dmub_cursor_offload_payload_data_v1 payloads[4]; /**< A small buffer of cursor payloads. */ + uint32_t write_idx; /**< The index of the last written payload. */ +}; + +/** + * struct dmub_cursor_offload_v1 - Cursor offload feature state. + */ +struct dmub_cursor_offload_v1 { + struct dmub_cursor_offload_stream_v1 offload_streams[6]; /**< Per-stream cursor offload data */ +}; + //============================================================================== //================================================================= //============================================================================== @@ -648,7 +865,8 @@ struct dmub_visual_confirm_color { union dmub_fw_meta_feature_bits { struct { uint32_t shared_state_link_detection : 1; /**< 1 supports link detection via shared state */ - uint32_t reserved : 31; + uint32_t cursor_offload_v1_support: 1; /**< 1 supports cursor offload */ + uint32_t reserved : 30; } bits; /**< status bits */ uint32_t all; /**< 32-bit access to status bits */ }; @@ -813,6 +1031,28 @@ enum dmub_ips_comand_type { DMUB_CMD__IPS_QUERY_RESIDENCY_INFO = 1, }; +/** + * enum dmub_cursor_offload_comand_type - Cursor offload subcommands. + */ +enum dmub_cursor_offload_comand_type { + /** + * Initializes the cursor offload feature. + */ + DMUB_CMD__CURSOR_OFFLOAD_INIT = 0, + /** + * Enables cursor offloading for a stream and updates the timing parameters. + */ + DMUB_CMD__CURSOR_OFFLOAD_STREAM_ENABLE = 1, + /** + * Disables cursor offloading for a given stream. + */ + DMUB_CMD__CURSOR_OFFLOAD_STREAM_DISABLE = 2, + /** + * Programs the latest data for a given stream. + */ + DMUB_CMD__CURSOR_OFFLOAD_STREAM_PROGRAM = 3, +}; + /** * union dmub_fw_boot_options - Boot option definitions for SCRATCH14 */ @@ -844,7 +1084,8 @@ union dmub_fw_boot_options { uint32_t disable_sldo_opt: 1; /**< 1 to disable SLDO optimizations */ uint32_t lower_hbr3_phy_ssc: 1; /**< 1 to lower hbr3 phy ssc to 0.125 percent */ uint32_t override_hbr3_pll_vco: 1; /**< 1 to override the hbr3 pll vco to 0 */ - uint32_t reserved : 5; /**< reserved */ + uint32_t disable_dpia_bw_allocation: 1; /**< 1 to disable the USB4 DPIA BW allocation */ + uint32_t reserved : 4; /**< reserved */ } bits; /**< boot bits */ uint32_t all; /**< 32-bit access to bits */ }; @@ -877,6 +1118,7 @@ enum dmub_shared_state_feature_id { DMUB_SHARED_SHARE_FEATURE__IPS_FW = 1, DMUB_SHARED_SHARE_FEATURE__IPS_DRIVER = 2, DMUB_SHARED_SHARE_FEATURE__DEBUG_SETUP = 3, + DMUB_SHARED_STATE_FEATURE__CURSOR_OFFLOAD_V1 = 4, DMUB_SHARED_STATE_FEATURE__LAST, /* Total number of features. */ }; @@ -957,6 +1199,22 @@ struct dmub_shared_state_ips_driver { uint32_t reserved[61]; /**< Reversed, to be updated when adding new fields. */ }; /* 248-bytes, fixed */ +/** + * struct dmub_shared_state_cursor_offload_v1 - Header metadata for cursor offload. + */ +struct dmub_shared_state_cursor_offload_stream_v1 { + uint32_t last_write_idx; /**< Last write index */ + uint8_t reserved[28]; /**< Reserved bytes. */ +}; /* 32-bytes, fixed */ + +/** + * struct dmub_shared_state_cursor_offload_v1 - Header metadata for cursor offload. + */ +struct dmub_shared_state_cursor_offload_v1 { + struct dmub_shared_state_cursor_offload_stream_v1 offload_streams[6]; /**< stream state, 32-bytes each */ + uint8_t reserved[56]; /**< reserved for future use */ +}; /* 248-bytes, fixed */ + /** * enum dmub_shared_state_feature_common - Generic payload. */ @@ -983,6 +1241,7 @@ struct dmub_shared_state_feature_block { struct dmub_shared_state_ips_fw ips_fw; /**< IPS firmware state */ struct dmub_shared_state_ips_driver ips_driver; /**< IPS driver state */ struct dmub_shared_state_debug_setup debug_setup; /**< Debug setup */ + struct dmub_shared_state_cursor_offload_v1 cursor_offload_v1; /**< Cursor offload */ } data; /**< Shared state data. */ }; /* 256-bytes, fixed */ @@ -1572,6 +1831,25 @@ enum dmub_cmd_type { */ DMUB_CMD__IPS = 91, + /** + * Command type use for Cursor offload. + */ + DMUB_CMD__CURSOR_OFFLOAD = 92, + + /** + * Command type used for all SMART_POWER_OLED commands. + */ + DMUB_CMD__SMART_POWER_OLED = 93, + + /** + * Command type use for all Panel Replay commands. + */ + DMUB_CMD__PR = 94, + + + /** + * Command type use for VBIOS shared commands. + */ DMUB_CMD__VBIOS = 128, }; @@ -2369,6 +2647,7 @@ struct dmub_cmd_fams2_global_config { union dmub_cmd_fams2_config { struct dmub_cmd_fams2_global_config global; +// coverity[cert_dcl37_c_violation:FALSE] errno.h, stddef.h, stdint.h not included in atombios.h struct dmub_fams2_stream_static_state stream; //v0 union { struct dmub_fams2_cmd_stream_static_base_state base; @@ -3980,6 +4259,33 @@ enum replay_state { REPLAY_STATE_INVALID = 0xFF, }; +/** + * Definition of a panel replay state + */ +enum pr_state { + PR_STATE_0 = 0x00, // State 0 steady state + // Pending SDP and Unlock before back to State 0 + PR_STATE_0_PENDING_SDP_AND_UNLOCK = 0x01, + PR_STATE_1 = 0x10, // State 1 + PR_STATE_2 = 0x20, // State 2 steady state + // Pending frame transmission before transition to State 2 + PR_STATE_2_PENDING_FRAME_TRANSMISSION = 0x30, + // Active and Powered Up + PR_STATE_2_POWERED = 0x31, + // Active and Powered Down, but need to blank HUBP after DPG_EN latch + PR_STATE_2_PENDING_HUBP_BLANK = 0x32, + // Active and Pending Power Up + PR_STATE_2_PENDING_POWER_UP = 0x33, + // Active and Powered Up, Pending DPG latch + PR_STATE_2_PENDING_LOCK_FOR_DPG_POWER_ON = 0x34, + // Active and Powered Up, Pending SDP and Unlock + PR_STATE_2_PENDING_SDP_AND_UNLOCK = 0x35, + // Pending transmission of AS SDP for timing sync, but no rfb update + PR_STATE_2_PENDING_AS_SDP = 0x36, + // Invalid + PR_STATE_INVALID = 0xFF, +}; + /** * Replay command sub-types. */ @@ -4030,6 +4336,25 @@ enum dmub_cmd_replay_type { DMUB_CMD__REPLAY_SET_GENERAL_CMD = 16, }; +/* + * Panel Replay sub-types + */ +enum dmub_cmd_panel_replay_type { + DMUB_CMD__PR_ENABLE = 0, + DMUB_CMD__PR_COPY_SETTINGS = 1, + DMUB_CMD__PR_UPDATE_STATE = 2, + DMUB_CMD__PR_GENERAL_CMD = 3, +}; + +enum dmub_cmd_panel_replay_state_update_subtype { + PR_STATE_UPDATE_COASTING_VTOTAL = 0x1, + PR_STATE_UPDATE_SYNC_MODE = 0x2, +}; + +enum dmub_cmd_panel_replay_general_subtype { + PR_GENERAL_CMD_DEBUG_OPTION = 0x1, +}; + /** * Replay general command sub-types. */ @@ -4045,6 +4370,7 @@ enum dmub_cmd_replay_general_subtype { REPLAY_GENERAL_CMD_DISABLED_DESYNC_ERROR_DETECTION, REPLAY_GENERAL_CMD_UPDATE_ERROR_STATUS, REPLAY_GENERAL_CMD_SET_LOW_RR_ACTIVATE, + REPLAY_GENERAL_CMD_VIDEO_CONFERENCING, }; struct dmub_alpm_auxless_data { @@ -4182,17 +4508,13 @@ struct dmub_cmd_replay_set_version_data { */ uint8_t panel_inst; /** - * PSR version that FW should implement. + * Replay version that FW should implement. */ enum replay_version version; - /** - * PSR control version. - */ - uint8_t cmd_version; /** * Explicit padding to 4 byte boundary. */ - uint8_t pad[2]; + uint8_t pad[3]; }; /** @@ -4237,6 +4559,45 @@ enum replay_enable { REPLAY_ENABLE = 1, }; +/** + * Data passed from driver to FW in a DMUB_CMD__SMART_POWER_OLED_ENABLE command. + */ +struct dmub_rb_cmd_smart_power_oled_enable_data { + /** + * SMART_POWER_OLED enable or disable. + */ + uint8_t enable; + /** + * Panel Instance. + * Panel isntance to identify which replay_state to use + * Currently the support is only for 0 or 1 + */ + uint8_t panel_inst; + + uint16_t peak_nits; + /** + * OTG HW instance. + */ + uint8_t otg_inst; + /** + * DIG FE HW instance. + */ + uint8_t digfe_inst; + /** + * DIG BE HW instance. + */ + uint8_t digbe_inst; + uint8_t debugcontrol; + /* + * vertical interrupt trigger line + */ + uint32_t triggerline; + + uint16_t fixed_max_cll; + + uint8_t pad[2]; +}; + /** * Data passed from driver to FW in a DMUB_CMD__REPLAY_ENABLE command. */ @@ -4408,9 +4769,9 @@ struct dmub_cmd_replay_set_coasting_vtotal_data { */ uint16_t coasting_vtotal_high; /** - * Explicit padding to 4 byte boundary. + * frame skip number. */ - uint8_t pad[2]; + uint16_t frame_skip_number; }; /** @@ -4570,6 +4931,58 @@ union dmub_replay_cmd_set { struct dmub_cmd_replay_set_general_cmd_data set_general_cmd_data; }; +/** + * SMART POWER OLED command sub-types. + */ +enum dmub_cmd_smart_power_oled_type { + + /** + * Enable/Disable SMART_POWER_OLED. + */ + DMUB_CMD__SMART_POWER_OLED_ENABLE = 1, + /** + * Get current MaxCLL value if SMART POWER OLED is enabled. + */ + DMUB_CMD__SMART_POWER_OLED_GETMAXCLL = 2, +}; + +/** + * Definition of a DMUB_CMD__SMART_POWER_OLED command. + */ +struct dmub_rb_cmd_smart_power_oled_enable { + /** + * Command header. + */ + struct dmub_cmd_header header; + + struct dmub_rb_cmd_smart_power_oled_enable_data data; +}; + +struct dmub_cmd_smart_power_oled_getmaxcll_input { + uint8_t panel_inst; + uint8_t pad[3]; +}; + +struct dmub_cmd_smart_power_oled_getmaxcll_output { + uint16_t current_max_cll; + uint8_t pad[2]; +}; + +/** + * Definition of a DMUB_CMD__SMART_POWER_OLED command. + */ +struct dmub_rb_cmd_smart_power_oled_getmaxcll { + struct dmub_cmd_header header; /**< Command header */ + /** + * Data passed from driver to FW in a DMUB_CMD__SMART_POWER_OLED_GETMAXCLL command. + */ + union dmub_cmd_smart_power_oled_getmaxcll_data { + struct dmub_cmd_smart_power_oled_getmaxcll_input input; /**< Input */ + struct dmub_cmd_smart_power_oled_getmaxcll_output output; /**< Output */ + uint32_t output_raw; /**< Raw data output */ + } data; +}; + /** * Set of HW components that can be locked. * @@ -4652,6 +5065,7 @@ enum hw_lock_client { */ HW_LOCK_CLIENT_REPLAY = 4, HW_LOCK_CLIENT_FAMS2 = 5, + HW_LOCK_CLIENT_CURSOR_OFFLOAD = 6, /** * Invalid client. */ @@ -6063,6 +6477,257 @@ struct dmub_rb_cmd_ips_query_residency_info { struct dmub_cmd_ips_query_residency_info_data info_data; }; +/** + * struct dmub_cmd_cursor_offload_init_data - Payload for cursor offload init command. + */ +struct dmub_cmd_cursor_offload_init_data { + union dmub_addr state_addr; /**< State address for dmub_cursor_offload */ + uint32_t state_size; /**< State size for dmub_cursor_offload */ +}; + +/** + * struct dmub_rb_cmd_cursor_offload_init - Data for initializing cursor offload. + */ +struct dmub_rb_cmd_cursor_offload_init { + struct dmub_cmd_header header; + struct dmub_cmd_cursor_offload_init_data init_data; +}; + +/** + * struct dmub_cmd_cursor_offload_stream_data - Payload for cursor offload stream command. + */ +struct dmub_cmd_cursor_offload_stream_data { + uint32_t otg_inst: 4; /**< OTG instance to control */ + uint32_t reserved: 28; /**< Reserved for future use */ + uint32_t line_time_in_ns; /**< Line time in ns for the OTG */ + uint32_t v_total_max; /**< OTG v_total_max */ +}; + +/** + * struct dmub_rb_cmd_cursor_offload_stream_cntl - Controls a stream for cursor offload. + */ +struct dmub_rb_cmd_cursor_offload_stream_cntl { + struct dmub_cmd_header header; + struct dmub_cmd_cursor_offload_stream_data data; +}; + +/** + * Data passed from driver to FW in a DMUB_CMD__PR_ENABLE command. + */ +struct dmub_cmd_pr_enable_data { + /** + * Panel Replay enable or disable. + */ + uint8_t enable; + /** + * Panel Instance. + * Panel isntance to identify which replay_state to use + * Currently the support is only for 0 or 1 + */ + uint8_t panel_inst; + /** + * Phy state to enter. + * Values to use are defined in dmub_phy_fsm_state + */ + uint8_t phy_fsm_state; + /** + * Phy rate for DP - RBR/HBR/HBR2/HBR3. + * Set this using enum phy_link_rate. + * This does not support HDMI/DP2 for now. + */ + uint8_t phy_rate; + /** + * @hpo_stream_enc_inst: HPO stream encoder instance + */ + uint8_t hpo_stream_enc_inst; + /** + * @hpo_link_enc_inst: HPO link encoder instance + */ + uint8_t hpo_link_enc_inst; + /** + * @pad: Align structure to 4 byte boundary. + */ + uint8_t pad[2]; +}; + +/** + * Definition of a DMUB_CMD__PR_ENABLE command. + * Panel Replay enable/disable is controlled using action in data. + */ +struct dmub_rb_cmd_pr_enable { + /** + * Command header. + */ + struct dmub_cmd_header header; + + struct dmub_cmd_pr_enable_data data; +}; + +/** + * Data passed from driver to FW in a DMUB_CMD__PR_COPY_SETTINGS command. + */ +struct dmub_cmd_pr_copy_settings_data { + /** + * Flags that can be set by driver to change some replay behaviour. + */ + union pr_debug_flags debug; + + /** + * @flags: Flags used to determine feature functionality. + */ + union pr_hw_flags flags; + + /** + * DPP HW instance. + */ + uint8_t dpp_inst; + /** + * OTG HW instance. + */ + uint8_t otg_inst; + /** + * DIG FE HW instance. + */ + uint8_t digfe_inst; + /** + * DIG BE HW instance. + */ + uint8_t digbe_inst; + /** + * AUX HW instance. + */ + uint8_t aux_inst; + /** + * Panel Instance. + * Panel isntance to identify which psr_state to use + * Currently the support is only for 0 or 1 + */ + uint8_t panel_inst; + /** + * Length of each horizontal line in ns. + */ + uint32_t line_time_in_ns; + /** + * PHY instance. + */ + uint8_t dpphy_inst; + /** + * Determines if SMU optimzations are enabled/disabled. + */ + uint8_t smu_optimizations_en; + /* + * Use FSM state for Replay power up/down + */ + uint8_t use_phy_fsm; + /* + * Use FSFT afftet pixel clk + */ + uint32_t pix_clk_100hz; + /* + * Use Original pixel clock + */ + uint32_t sink_pix_clk_100hz; + /** + * Use for AUX-less ALPM LFPS wake operation + */ + struct dmub_alpm_auxless_data auxless_alpm_data; + /** + * @hpo_stream_enc_inst: HPO stream encoder instance + */ + uint8_t hpo_stream_enc_inst; + /** + * @hpo_link_enc_inst: HPO link encoder instance + */ + uint8_t hpo_link_enc_inst; + /** + * @pad: Align structure to 4 byte boundary. + */ + uint8_t pad[2]; +}; + +/** + * Definition of a DMUB_CMD__PR_COPY_SETTINGS command. + */ +struct dmub_rb_cmd_pr_copy_settings { + /** + * Command header. + */ + struct dmub_cmd_header header; + /** + * Data passed from driver to FW in a DMUB_CMD__PR_COPY_SETTINGS command. + */ + struct dmub_cmd_pr_copy_settings_data data; +}; + +struct dmub_cmd_pr_update_state_data { + /** + * Panel Instance. + * Panel isntance to identify which psr_state to use + * Currently the support is only for 0 or 1 + */ + uint8_t panel_inst; + + uint8_t pad[3]; // align to 4-byte boundary + /* + * Update flags to control the update behavior. + */ + uint32_t update_flag; + /** + * state/data to set. + */ + uint32_t coasting_vtotal; + uint32_t sync_mode; +}; + +struct dmub_cmd_pr_general_cmd_data { + /** + * Panel Instance. + * Panel isntance to identify which psr_state to use + * Currently the support is only for 0 or 1 + */ + uint8_t panel_inst; + /** + * subtype: PR general cmd sub type + */ + uint8_t subtype; + + uint8_t pad[2]; + /** + * config data by different subtypes + */ + union { + uint32_t u32All; + } data; +}; + +/** + * Definition of a DMUB_CMD__PR_UPDATE_STATE command. + */ +struct dmub_rb_cmd_pr_update_state { + /** + * Command header. + */ + struct dmub_cmd_header header; + /** + * Data passed from driver to FW in a DMUB_CMD__PR_UPDATE_STATE command. + */ + struct dmub_cmd_pr_update_state_data data; +}; + +/** + * Definition of a DMUB_CMD__PR_GENERAL_CMD command. + */ +struct dmub_rb_cmd_pr_general_cmd { + /** + * Command header. + */ + struct dmub_cmd_header header; + /** + * Data passed from driver to FW in a DMUB_CMD__PR_GENERAL_CMD command. + */ + struct dmub_cmd_pr_general_cmd_data data; +}; + /** * union dmub_rb_cmd - DMUB inbox command. */ @@ -6392,6 +7057,38 @@ union dmub_rb_cmd { struct dmub_rb_cmd_ips_residency_cntl ips_residency_cntl; struct dmub_rb_cmd_ips_query_residency_info ips_query_residency_info; + /** + * Definition of a DMUB_CMD__CURSOR_OFFLOAD_INIT command. + */ + struct dmub_rb_cmd_cursor_offload_init cursor_offload_init; + /** + * Definition of a DMUB_CMD__CURSOR_OFFLOAD control commands. + * - DMUB_CMD__CURSOR_OFFLOAD_STREAM_ENABLE + * - DMUB_CMD__CURSOR_OFFLOAD_STREAM_DISABLE + * - DMUB_CMD__CURSOR_OFFLOAD_STREAM_PROGRAM + * - DMUB_CMD__CURSOR_OFFLOAD_STREAM_UPDATE_DRR + */ + struct dmub_rb_cmd_cursor_offload_stream_cntl cursor_offload_stream_ctnl; + /** + * Definition of a DMUB_CMD__SMART_POWER_OLED_ENABLE command. + */ + struct dmub_rb_cmd_smart_power_oled_enable smart_power_oled_enable; + /** + * Definition of a DMUB_CMD__DMUB_CMD__SMART_POWER_OLED_GETMAXCLL command. + */ + struct dmub_rb_cmd_smart_power_oled_getmaxcll smart_power_oled_getmaxcll; + /* + * Definition of a DMUB_CMD__REPLAY_COPY_SETTINGS command. + */ + struct dmub_rb_cmd_pr_copy_settings pr_copy_settings; + /** + * Definition of a DMUB_CMD__REPLAY_ENABLE command. + */ + struct dmub_rb_cmd_pr_enable pr_enable; + + struct dmub_rb_cmd_pr_update_state pr_update_state; + + struct dmub_rb_cmd_pr_general_cmd pr_general_cmd; }; /** diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c index 4777c7203b2c..cd04d7c756c3 100644 --- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c +++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c @@ -380,6 +380,7 @@ void dmub_dcn31_enable_dmub_boot_options(struct dmub_srv *dmub, const struct dmu boot_options.bits.override_hbr3_pll_vco = params->override_hbr3_pll_vco; boot_options.bits.sel_mux_phy_c_d_phy_f_g = (dmub->asic == DMUB_ASIC_DCN31B) ? 1 : 0; + boot_options.bits.disable_dpia_bw_allocation = params->disable_dpia_bw_allocation; REG_WRITE(DMCUB_SCRATCH14, boot_options.all); } diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn32.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn32.c index ce041f6239dc..7e9856289910 100644 --- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn32.c +++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn32.c @@ -89,50 +89,58 @@ static inline void dmub_dcn32_translate_addr(const union dmub_addr *addr_in, void dmub_dcn32_reset(struct dmub_srv *dmub) { union dmub_gpint_data_register cmd; - const uint32_t timeout = 100000; - uint32_t in_reset, is_enabled, scratch, i, pwait_mode; + const uint32_t timeout_us = 1 * 1000 * 1000; //1s + const uint32_t poll_delay_us = 1; //1us + uint32_t i = 0; + uint32_t enabled, in_reset, scratch, pwait_mode; - REG_GET(DMCUB_CNTL2, DMCUB_SOFT_RESET, &in_reset); - REG_GET(DMCUB_CNTL, DMCUB_ENABLE, &is_enabled); + REG_GET(DMCUB_CNTL, + DMCUB_ENABLE, &enabled); + REG_GET(DMCUB_CNTL2, + DMCUB_SOFT_RESET, &in_reset); - if (in_reset == 0 && is_enabled != 0) { + if (enabled && in_reset == 0) { cmd.bits.status = 1; cmd.bits.command_code = DMUB_GPINT__STOP_FW; cmd.bits.param = 0; dmub->hw_funcs.set_gpint(dmub, cmd); - for (i = 0; i < timeout; ++i) { - if (dmub->hw_funcs.is_gpint_acked(dmub, cmd)) - break; - - udelay(1); - } - - for (i = 0; i < timeout; ++i) { + for (; i < timeout_us; i++) { scratch = REG_READ(DMCUB_SCRATCH7); if (scratch == DMUB_GPINT__STOP_FW_RESPONSE) break; - udelay(1); + udelay(poll_delay_us); } - for (i = 0; i < timeout; ++i) { + for (; i < timeout_us; i++) { REG_GET(DMCUB_CNTL, DMCUB_PWAIT_MODE_STATUS, &pwait_mode); if (pwait_mode & (1 << 0)) break; - udelay(1); + udelay(poll_delay_us); } - /* Force reset in case we timed out, DMCUB is likely hung. */ } - if (is_enabled) { + if (enabled) { REG_UPDATE(DMCUB_CNTL2, DMCUB_SOFT_RESET, 1); udelay(1); REG_UPDATE(DMCUB_CNTL, DMCUB_ENABLE, 0); } + if (i >= timeout_us) { + /* timeout should never occur */ + BREAK_TO_DEBUGGER(); + } + + REG_UPDATE(DMCUB_REGION3_CW2_TOP_ADDRESS, DMCUB_REGION3_CW2_ENABLE, 0); + REG_UPDATE(DMCUB_REGION3_CW3_TOP_ADDRESS, DMCUB_REGION3_CW3_ENABLE, 0); + REG_UPDATE(DMCUB_REGION3_CW4_TOP_ADDRESS, DMCUB_REGION3_CW4_ENABLE, 0); + REG_UPDATE(DMCUB_REGION3_CW5_TOP_ADDRESS, DMCUB_REGION3_CW5_ENABLE, 0); + REG_UPDATE(DMCUB_REGION3_CW6_TOP_ADDRESS, DMCUB_REGION3_CW6_ENABLE, 0); + REG_UPDATE(DMCUB_REGION3_CW7_TOP_ADDRESS, DMCUB_REGION3_CW7_ENABLE, 0); + REG_WRITE(DMCUB_INBOX1_RPTR, 0); REG_WRITE(DMCUB_INBOX1_WPTR, 0); REG_WRITE(DMCUB_OUTBOX1_RPTR, 0); @@ -141,7 +149,7 @@ void dmub_dcn32_reset(struct dmub_srv *dmub) REG_WRITE(DMCUB_OUTBOX0_WPTR, 0); REG_WRITE(DMCUB_SCRATCH0, 0); - /* Clear the GPINT command manually so we don't send anything during boot. */ + /* Clear the GPINT command manually so we don't reset again. */ cmd.all = 0; dmub->hw_funcs.set_gpint(dmub, cmd); } @@ -163,7 +171,9 @@ void dmub_dcn32_backdoor_load(struct dmub_srv *dmub, dmub_dcn32_get_fb_base_offset(dmub, &fb_base, &fb_offset); + /* reset and disable DMCUB and MMHUBBUB DMUIF */ REG_UPDATE(DMCUB_SEC_CNTL, DMCUB_SEC_RESET, 1); + REG_UPDATE(DMCUB_CNTL, DMCUB_ENABLE, 0); dmub_dcn32_translate_addr(&cw0->offset, fb_base, fb_offset, &offset); @@ -193,7 +203,9 @@ void dmub_dcn32_backdoor_load_zfb_mode(struct dmub_srv *dmub, { union dmub_addr offset; + /* reset and disable DMCUB and MMHUBBUB DMUIF */ REG_UPDATE(DMCUB_SEC_CNTL, DMCUB_SEC_RESET, 1); + REG_UPDATE(DMCUB_CNTL, DMCUB_ENABLE, 0); offset = cw0->offset; diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn35.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn35.c index 834e5434ccb8..e13557ed97be 100644 --- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn35.c +++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn35.c @@ -418,6 +418,7 @@ void dmub_dcn35_enable_dmub_boot_options(struct dmub_srv *dmub, const struct dmu boot_options.bits.disable_sldo_opt = params->disable_sldo_opt; boot_options.bits.enable_non_transparent_setconfig = params->enable_non_transparent_setconfig; boot_options.bits.lower_hbr3_phy_ssc = params->lower_hbr3_phy_ssc; + boot_options.bits.disable_dpia_bw_allocation = params->disable_dpia_bw_allocation; REG_WRITE(DMCUB_SCRATCH14, boot_options.all); } @@ -520,6 +521,45 @@ void dmub_dcn35_get_diagnostic_data(struct dmub_srv *dmub) dmub->debug.gpint_datain0 = REG_READ(DMCUB_GPINT_DATAIN0); } + +bool dmub_dcn35_get_preos_fw_info(struct dmub_srv *dmub) +{ + uint64_t region3_cw5_offset; + uint32_t top_addr, top_addr_enable, offset_low; + uint32_t offset_high, base_addr, fw_version; + bool is_vbios_fw = false; + + memset(&dmub->preos_info, 0, sizeof(dmub->preos_info)); + + fw_version = REG_READ(DMCUB_SCRATCH1); + is_vbios_fw = ((fw_version >> 6) & 0x01) ? true : false; + if (!is_vbios_fw) + return false; + + dmub->preos_info.boot_status = REG_READ(DMCUB_SCRATCH0); + dmub->preos_info.fw_version = REG_READ(DMCUB_SCRATCH1); + dmub->preos_info.boot_options = REG_READ(DMCUB_SCRATCH14); + REG_GET(DMCUB_REGION3_CW5_TOP_ADDRESS, + DMCUB_REGION3_CW5_ENABLE, &top_addr_enable); + if (top_addr_enable) { + dmub_dcn35_get_fb_base_offset(dmub, + &dmub->preos_info.fb_base, &dmub->preos_info.fb_offset); + offset_low = REG_READ(DMCUB_REGION3_CW5_OFFSET); + offset_high = REG_READ(DMCUB_REGION3_CW5_OFFSET_HIGH); + region3_cw5_offset = ((uint64_t)offset_high << 32) | offset_low; + dmub->preos_info.trace_buffer_phy_addr = region3_cw5_offset + - dmub->preos_info.fb_base + dmub->preos_info.fb_offset; + + REG_GET(DMCUB_REGION3_CW5_TOP_ADDRESS, + DMCUB_REGION3_CW5_TOP_ADDRESS, &top_addr); + base_addr = REG_READ(DMCUB_REGION3_CW5_BASE_ADDRESS) & 0x1FFFFFFF; + dmub->preos_info.trace_buffer_size = + (top_addr > base_addr) ? (top_addr - base_addr + 1) : 0; + } + + return true; +} + void dmub_dcn35_configure_dmub_in_system_memory(struct dmub_srv *dmub) { /* DMCUB_REGION3_TMR_AXI_SPACE values: diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn35.h b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn35.h index 39fcb7275da5..92e6695a2c9b 100644 --- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn35.h +++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn35.h @@ -285,4 +285,6 @@ bool dmub_dcn35_is_hw_powered_up(struct dmub_srv *dmub); void dmub_srv_dcn35_regs_init(struct dmub_srv *dmub, struct dc_context *ctx); +bool dmub_dcn35_get_preos_fw_info(struct dmub_srv *dmub); + #endif /* _DMUB_DCN35_H_ */ diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn401.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn401.c index b31adbd0d685..95542299e3b3 100644 --- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn401.c +++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn401.c @@ -81,7 +81,7 @@ void dmub_dcn401_reset(struct dmub_srv *dmub) dmub->hw_funcs.set_gpint(dmub, cmd); for (; i < timeout_us; i++) { - scratch = dmub->hw_funcs.get_gpint_response(dmub); + scratch = REG_READ(DMCUB_SCRATCH7); if (scratch == DMUB_GPINT__STOP_FW_RESPONSE) break; @@ -97,11 +97,24 @@ void dmub_dcn401_reset(struct dmub_srv *dmub) } } + if (enabled) { + REG_UPDATE(DMCUB_CNTL2, DMCUB_SOFT_RESET, 1); + udelay(1); + REG_UPDATE(DMCUB_CNTL, DMCUB_ENABLE, 0); + } + if (i >= timeout_us) { /* timeout should never occur */ BREAK_TO_DEBUGGER(); } + REG_UPDATE(DMCUB_REGION3_CW2_TOP_ADDRESS, DMCUB_REGION3_CW2_ENABLE, 0); + REG_UPDATE(DMCUB_REGION3_CW3_TOP_ADDRESS, DMCUB_REGION3_CW3_ENABLE, 0); + REG_UPDATE(DMCUB_REGION3_CW4_TOP_ADDRESS, DMCUB_REGION3_CW4_ENABLE, 0); + REG_UPDATE(DMCUB_REGION3_CW5_TOP_ADDRESS, DMCUB_REGION3_CW5_ENABLE, 0); + REG_UPDATE(DMCUB_REGION3_CW6_TOP_ADDRESS, DMCUB_REGION3_CW6_ENABLE, 0); + REG_UPDATE(DMCUB_REGION3_CW7_TOP_ADDRESS, DMCUB_REGION3_CW7_ENABLE, 0); + REG_WRITE(DMCUB_INBOX1_RPTR, 0); REG_WRITE(DMCUB_INBOX1_WPTR, 0); REG_WRITE(DMCUB_OUTBOX1_RPTR, 0); @@ -134,7 +147,6 @@ void dmub_dcn401_backdoor_load(struct dmub_srv *dmub, /* reset and disable DMCUB and MMHUBBUB DMUIF */ REG_UPDATE(DMCUB_SEC_CNTL, DMCUB_SEC_RESET, 1); - REG_UPDATE(MMHUBBUB_SOFT_RESET, DMUIF_SOFT_RESET, 1); REG_UPDATE(DMCUB_CNTL, DMCUB_ENABLE, 0); dmub_dcn401_translate_addr(&cw0->offset, fb_base, fb_offset, &offset); @@ -168,7 +180,6 @@ void dmub_dcn401_backdoor_load_zfb_mode(struct dmub_srv *dmub, /* reset and disable DMCUB and MMHUBBUB DMUIF */ REG_UPDATE(DMCUB_SEC_CNTL, DMCUB_SEC_RESET, 1); - REG_UPDATE(MMHUBBUB_SOFT_RESET, DMUIF_SOFT_RESET, 1); REG_UPDATE(DMCUB_CNTL, DMCUB_ENABLE, 0); offset = cw0->offset; diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c index b17a19400c06..a6ae1d2e9685 100644 --- a/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c +++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c @@ -66,7 +66,7 @@ #define DMUB_SCRATCH_MEM_SIZE (1024) /* Default indirect buffer size. */ -#define DMUB_IB_MEM_SIZE (1280) +#define DMUB_IB_MEM_SIZE (2560) /* Default LSDMA ring buffer size. */ #define DMUB_LSDMA_RB_SIZE (64 * 1024) @@ -359,6 +359,7 @@ static bool dmub_srv_hw_setup(struct dmub_srv *dmub, enum dmub_asic asic) funcs->get_current_time = dmub_dcn35_get_current_time; funcs->get_diagnostic_data = dmub_dcn35_get_diagnostic_data; + funcs->get_preos_fw_info = dmub_dcn35_get_preos_fw_info; funcs->init_reg_offsets = dmub_srv_dcn35_regs_init; if (asic == DMUB_ASIC_DCN351) @@ -564,10 +565,11 @@ enum dmub_status window_sizes[DMUB_WINDOW_4_MAILBOX] = DMUB_MAILBOX_SIZE; window_sizes[DMUB_WINDOW_5_TRACEBUFF] = trace_buffer_size; window_sizes[DMUB_WINDOW_6_FW_STATE] = fw_state_size; - window_sizes[DMUB_WINDOW_7_SCRATCH_MEM] = DMUB_SCRATCH_MEM_SIZE; + window_sizes[DMUB_WINDOW_7_SCRATCH_MEM] = dmub_align(DMUB_SCRATCH_MEM_SIZE, 64); window_sizes[DMUB_WINDOW_IB_MEM] = DMUB_IB_MEM_SIZE; window_sizes[DMUB_WINDOW_SHARED_STATE] = max(DMUB_FW_HEADER_SHARED_STATE_SIZE, shared_state_size); window_sizes[DMUB_WINDOW_LSDMA_BUFFER] = DMUB_LSDMA_RB_SIZE; + window_sizes[DMUB_WINDOW_CURSOR_OFFLOAD] = dmub_align(sizeof(struct dmub_cursor_offload_v1), 64); out->fb_size = dmub_srv_calc_regions_for_memory_type(params, out, window_sizes, DMUB_WINDOW_MEMORY_TYPE_FB); @@ -652,21 +654,22 @@ enum dmub_status dmub_srv_hw_init(struct dmub_srv *dmub, struct dmub_fb *mail_fb = params->fb[DMUB_WINDOW_4_MAILBOX]; struct dmub_fb *tracebuff_fb = params->fb[DMUB_WINDOW_5_TRACEBUFF]; struct dmub_fb *fw_state_fb = params->fb[DMUB_WINDOW_6_FW_STATE]; - struct dmub_fb *scratch_mem_fb = params->fb[DMUB_WINDOW_7_SCRATCH_MEM]; - struct dmub_fb *ib_mem_gart = params->fb[DMUB_WINDOW_IB_MEM]; struct dmub_fb *shared_state_fb = params->fb[DMUB_WINDOW_SHARED_STATE]; struct dmub_rb_init_params rb_params, outbox0_rb_params; struct dmub_window cw0, cw1, cw2, cw3, cw4, cw5, cw6, region6; struct dmub_region inbox1, outbox1, outbox0; + uint32_t i; + if (!dmub->sw_init) return DMUB_STATUS_INVALID; - if (!inst_fb || !stack_fb || !data_fb || !bios_fb || !mail_fb || - !tracebuff_fb || !fw_state_fb || !scratch_mem_fb || !ib_mem_gart) { - ASSERT(0); - return DMUB_STATUS_INVALID; + for (i = 0; i < DMUB_WINDOW_TOTAL; ++i) { + if (!params->fb[i]) { + ASSERT(0); + return DMUB_STATUS_INVALID; + } } dmub->fb_base = params->fb_base; @@ -748,9 +751,11 @@ enum dmub_status dmub_srv_hw_init(struct dmub_srv *dmub, dmub->shared_state = shared_state_fb->cpu_addr; - dmub->scratch_mem_fb = *scratch_mem_fb; + dmub->scratch_mem_fb = *params->fb[DMUB_WINDOW_7_SCRATCH_MEM]; + dmub->ib_mem_gart = *params->fb[DMUB_WINDOW_IB_MEM]; - dmub->ib_mem_gart = *ib_mem_gart; + dmub->cursor_offload_fb = *params->fb[DMUB_WINDOW_CURSOR_OFFLOAD]; + dmub->cursor_offload_v1 = (struct dmub_cursor_offload_v1 *)dmub->cursor_offload_fb.cpu_addr; if (dmub->hw_funcs.setup_windows) dmub->hw_funcs.setup_windows(dmub, &cw2, &cw3, &cw4, &cw5, &cw6, ®ion6); @@ -1368,3 +1373,11 @@ enum dmub_status dmub_srv_update_inbox_status(struct dmub_srv *dmub) return DMUB_STATUS_OK; } + +bool dmub_srv_get_preos_info(struct dmub_srv *dmub) +{ + if (!dmub || !dmub->hw_funcs.get_preos_fw_info) + return false; + + return dmub->hw_funcs.get_preos_fw_info(dmub); +} diff --git a/drivers/gpu/drm/amd/display/include/bios_parser_types.h b/drivers/gpu/drm/amd/display/include/bios_parser_types.h index 812377d9e48f..973b6bdbac63 100644 --- a/drivers/gpu/drm/amd/display/include/bios_parser_types.h +++ b/drivers/gpu/drm/amd/display/include/bios_parser_types.h @@ -135,12 +135,8 @@ struct bp_external_encoder_control { struct bp_crtc_source_select { enum engine_id engine_id; enum controller_id controller_id; - /* from GPU Tx aka asic_signal */ - enum signal_type signal; - /* sink_signal may differ from asicSignal if Translator encoder */ enum signal_type sink_signal; - enum display_output_bit_depth display_output_bit_depth; - bool enable_dp_audio; + uint8_t bit_depth; }; struct bp_transmitter_control { @@ -166,6 +162,11 @@ struct bp_transmitter_control { bool single_pll_mode; }; +struct bp_load_detection_parameters { + enum engine_id engine_id; + uint16_t device_id; +}; + struct bp_hw_crtc_timing_parameters { enum controller_id controller_id; /* horizontal part */ diff --git a/drivers/gpu/drm/amd/display/include/dpcd_defs.h b/drivers/gpu/drm/amd/display/include/dpcd_defs.h index de8f3cfed6c8..07b937b92efc 100644 --- a/drivers/gpu/drm/amd/display/include/dpcd_defs.h +++ b/drivers/gpu/drm/amd/display/include/dpcd_defs.h @@ -30,6 +30,22 @@ #ifndef DP_SINK_HW_REVISION_START // can remove this once the define gets into linux drm_dp_helper.h #define DP_SINK_HW_REVISION_START 0x409 #endif +/* Panel Replay*/ +#ifndef DP_PANEL_REPLAY_CAPABILITY_SUPPORT // can remove this once the define gets into linux drm_dp_helper.h +#define DP_PANEL_REPLAY_CAPABILITY_SUPPORT 0x0b0 +#endif /* DP_PANEL_REPLAY_CAPABILITY_SUPPORT */ +#ifndef DP_PANEL_REPLAY_CAPABILITY // can remove this once the define gets into linux drm_dp_helper.h +#define DP_PANEL_REPLAY_CAPABILITY 0x0b1 +#endif /* DP_PANEL_REPLAY_CAPABILITY */ +#ifndef DP_PANEL_REPLAY_ENABLE_AND_CONFIGURATION_1 // can remove this once the define gets into linux drm_dp_helper.h +#define DP_PANEL_REPLAY_ENABLE_AND_CONFIGURATION_1 0x1b0 +#endif /* DP_PANEL_REPLAY_ENABLE_AND_CONFIGURATION_1 */ +#ifndef DP_PANEL_REPLAY_ENABLE // can remove this once the define gets into linux drm_dp_helper.h +#define DP_PANEL_REPLAY_ENABLE (1 << 0) +#endif /* DP_PANEL_REPLAY_ENABLE */ +#ifndef DP_PANEL_REPLAY_ENABLE_AND_CONFIGURATION_2 // can remove this once the define gets into linux drm_dp_helper.h +#define DP_PANEL_REPLAY_ENABLE_AND_CONFIGURATION_2 0x1b1 +#endif /* DP_PANEL_REPLAY_ENABLE_AND_CONFIGURATION_2 */ enum dpcd_revision { DPCD_REV_10 = 0x10, diff --git a/drivers/gpu/drm/amd/display/include/grph_object_ctrl_defs.h b/drivers/gpu/drm/amd/display/include/grph_object_ctrl_defs.h index cc467031651d..38a77fa9b4af 100644 --- a/drivers/gpu/drm/amd/display/include/grph_object_ctrl_defs.h +++ b/drivers/gpu/drm/amd/display/include/grph_object_ctrl_defs.h @@ -169,6 +169,7 @@ struct dc_firmware_info { uint32_t engine_clk_ss_percentage; } feature; + uint32_t max_pixel_clock; /* in KHz */ uint32_t default_display_engine_pll_frequency; /* in KHz */ uint32_t external_clock_source_frequency_for_dp; /* in KHz */ uint32_t smu_gpu_pll_output_freq; /* in KHz */ diff --git a/drivers/gpu/drm/amd/display/include/grph_object_id.h b/drivers/gpu/drm/amd/display/include/grph_object_id.h index 54e33062b3c0..1386fa124e85 100644 --- a/drivers/gpu/drm/amd/display/include/grph_object_id.h +++ b/drivers/gpu/drm/amd/display/include/grph_object_id.h @@ -310,4 +310,11 @@ static inline bool dal_graphics_object_id_equal( } return false; } + +static inline bool dc_connector_supports_analog(const enum connector_id conn) +{ + return conn == CONNECTOR_ID_VGA || + conn == CONNECTOR_ID_SINGLE_LINK_DVII || + conn == CONNECTOR_ID_DUAL_LINK_DVII; +} #endif diff --git a/drivers/gpu/drm/amd/display/include/signal_types.h b/drivers/gpu/drm/amd/display/include/signal_types.h index a10d6b988aab..3a2c2d2fb629 100644 --- a/drivers/gpu/drm/amd/display/include/signal_types.h +++ b/drivers/gpu/drm/amd/display/include/signal_types.h @@ -118,6 +118,18 @@ static inline bool dc_is_dvi_signal(enum signal_type signal) } } +/** + * dc_is_rgb_signal() - Whether the signal is analog RGB. + * + * Returns whether the given signal type is an analog RGB signal + * that is used with a DAC on VGA or DVI-I connectors. + * Not to be confused with other uses of "RGB", such as RGB color space. + */ +static inline bool dc_is_rgb_signal(enum signal_type signal) +{ + return (signal == SIGNAL_TYPE_RGB); +} + static inline bool dc_is_tmds_signal(enum signal_type signal) { switch (signal) { diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.c index c760216a6240..ca402ddcdacc 100644 --- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.c +++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.c @@ -354,7 +354,7 @@ enum mod_hdcp_status mod_hdcp_add_display(struct mod_hdcp *hdcp, /* reset retry counters */ reset_retry_counts(hdcp); - /* reset error trace */ + /* reset trace */ memset(&hdcp->connection.trace, 0, sizeof(hdcp->connection.trace)); /* add display to connection */ @@ -400,7 +400,7 @@ enum mod_hdcp_status mod_hdcp_remove_display(struct mod_hdcp *hdcp, /* clear retry counters */ reset_retry_counts(hdcp); - /* reset error trace */ + /* reset trace */ memset(&hdcp->connection.trace, 0, sizeof(hdcp->connection.trace)); /* remove display */ @@ -464,7 +464,7 @@ enum mod_hdcp_status mod_hdcp_update_display(struct mod_hdcp *hdcp, /* clear retry counters */ reset_retry_counts(hdcp); - /* reset error trace */ + /* reset trace */ memset(&hdcp->connection.trace, 0, sizeof(hdcp->connection.trace)); /* set new adjustment */ diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.h b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.h index a37634942b07..26a351a184f3 100644 --- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.h +++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.h @@ -88,6 +88,7 @@ struct mod_hdcp_transition_input_hdcp2 { uint8_t lc_init_write; uint8_t l_prime_available_poll; uint8_t l_prime_read; + uint8_t l_prime_combo_read; uint8_t l_prime_validation; uint8_t eks_prepare; uint8_t eks_write; @@ -508,7 +509,7 @@ static inline void set_auth_complete(struct mod_hdcp *hdcp, struct mod_hdcp_output *output) { output->auth_complete = 1; - mod_hdcp_log_ddc_trace(hdcp); + HDCP_AUTH_COMPLETE_TRACE(hdcp); } /* connection topology helpers */ diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_execution.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_execution.c index 8bc377560787..1bbd728d4345 100644 --- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_execution.c +++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_execution.c @@ -29,6 +29,7 @@ static inline enum mod_hdcp_status validate_bksv(struct mod_hdcp *hdcp) { uint64_t n = 0; uint8_t count = 0; + enum mod_hdcp_status status; u8 bksv[sizeof(n)] = { }; memcpy(bksv, hdcp->auth.msg.hdcp1.bksv, sizeof(hdcp->auth.msg.hdcp1.bksv)); @@ -38,8 +39,14 @@ static inline enum mod_hdcp_status validate_bksv(struct mod_hdcp *hdcp) count++; n &= (n - 1); } - return (count == 20) ? MOD_HDCP_STATUS_SUCCESS : - MOD_HDCP_STATUS_HDCP1_INVALID_BKSV; + + if (count == 20) { + hdcp->connection.trace.hdcp1.attempt_count++; + status = MOD_HDCP_STATUS_SUCCESS; + } else { + status = MOD_HDCP_STATUS_HDCP1_INVALID_BKSV; + } + return status; } static inline enum mod_hdcp_status check_ksv_ready(struct mod_hdcp *hdcp) @@ -135,6 +142,8 @@ static inline enum mod_hdcp_status check_device_count(struct mod_hdcp *hdcp) if (get_device_count(hdcp) == 0) return MOD_HDCP_STATUS_HDCP1_DEVICE_COUNT_MISMATCH_FAILURE; + hdcp->connection.trace.hdcp1.downstream_device_count = get_device_count(hdcp); + /* Some MST display may choose to report the internal panel as an HDCP RX. * To update this condition with 1(because the immediate repeater's internal * panel is possibly not included in DEVICE_COUNT) + get_device_count(hdcp). diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_execution.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_execution.c index bb8ae80b37f8..27500abf9fee 100644 --- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_execution.c +++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_execution.c @@ -48,6 +48,7 @@ static inline enum mod_hdcp_status check_receiver_id_list_ready(struct mod_hdcp static inline enum mod_hdcp_status check_hdcp2_capable(struct mod_hdcp *hdcp) { enum mod_hdcp_status status; + struct mod_hdcp_trace *trace = &hdcp->connection.trace; if (is_dp_hdcp(hdcp)) status = (hdcp->auth.msg.hdcp2.rxcaps_dp[0] == HDCP_2_2_RX_CAPS_VERSION_VAL) && @@ -55,9 +56,14 @@ static inline enum mod_hdcp_status check_hdcp2_capable(struct mod_hdcp *hdcp) MOD_HDCP_STATUS_SUCCESS : MOD_HDCP_STATUS_HDCP2_NOT_CAPABLE; else - status = (hdcp->auth.msg.hdcp2.hdcp2version_hdmi & HDCP_2_2_HDMI_SUPPORT_MASK) ? - MOD_HDCP_STATUS_SUCCESS : - MOD_HDCP_STATUS_HDCP2_NOT_CAPABLE; + status = (hdcp->auth.msg.hdcp2.hdcp2version_hdmi + & HDCP_2_2_HDMI_SUPPORT_MASK) + ? MOD_HDCP_STATUS_SUCCESS + : MOD_HDCP_STATUS_HDCP2_NOT_CAPABLE; + + if (status == MOD_HDCP_STATUS_SUCCESS) + trace->hdcp2.attempt_count++; + return status; } @@ -201,10 +207,17 @@ static inline uint8_t get_device_count(struct mod_hdcp *hdcp) static enum mod_hdcp_status check_device_count(struct mod_hdcp *hdcp) { + struct mod_hdcp_trace *trace = &hdcp->connection.trace; + /* Avoid device count == 0 to do authentication */ if (get_device_count(hdcp) == 0) return MOD_HDCP_STATUS_HDCP1_DEVICE_COUNT_MISMATCH_FAILURE; + trace->hdcp2.downstream_device_count = get_device_count(hdcp); + trace->hdcp2.hdcp1_device_downstream = + HDCP_2_2_HDCP1_DEVICE_CONNECTED(hdcp->auth.msg.hdcp2.rx_id_list[2]); + trace->hdcp2.hdcp2_legacy_device_downstream = + HDCP_2_2_HDCP_2_0_REP_CONNECTED(hdcp->auth.msg.hdcp2.rx_id_list[2]); /* Some MST display may choose to report the internal panel as an HDCP RX. */ /* To update this condition with 1(because the immediate repeater's internal */ /* panel is possibly not included in DEVICE_COUNT) + get_device_count(hdcp). */ @@ -452,54 +465,11 @@ static enum mod_hdcp_status validate_h_prime(struct mod_hdcp *hdcp, return status; } -static enum mod_hdcp_status locality_check_sw(struct mod_hdcp *hdcp, - struct mod_hdcp_event_context *event_ctx, - struct mod_hdcp_transition_input_hdcp2 *input) -{ - enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS; - - if (!mod_hdcp_execute_and_set(mod_hdcp_write_lc_init, - &input->lc_init_write, &status, - hdcp, "lc_init_write")) - goto out; - if (is_dp_hdcp(hdcp)) - msleep(16); - else - if (!mod_hdcp_execute_and_set(poll_l_prime_available, - &input->l_prime_available_poll, &status, - hdcp, "l_prime_available_poll")) - goto out; - if (!mod_hdcp_execute_and_set(mod_hdcp_read_l_prime, - &input->l_prime_read, &status, - hdcp, "l_prime_read")) - goto out; -out: - return status; -} - -static enum mod_hdcp_status locality_check_fw(struct mod_hdcp *hdcp, - struct mod_hdcp_event_context *event_ctx, - struct mod_hdcp_transition_input_hdcp2 *input) -{ - enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS; - - if (!mod_hdcp_execute_and_set(mod_hdcp_write_poll_read_lc_fw, - &input->l_prime_read, &status, - hdcp, "l_prime_read")) - goto out; - -out: - return status; -} - static enum mod_hdcp_status locality_check(struct mod_hdcp *hdcp, struct mod_hdcp_event_context *event_ctx, struct mod_hdcp_transition_input_hdcp2 *input) { enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS; - const bool use_fw = hdcp->config.ddc.funcs.atomic_write_poll_read_i2c - && hdcp->config.ddc.funcs.atomic_write_poll_read_aux - && !hdcp->connection.link.adjust.hdcp2.force_sw_locality_check; if (event_ctx->event != MOD_HDCP_EVENT_CALLBACK) { event_ctx->unexpected_event = 1; @@ -511,9 +481,28 @@ static enum mod_hdcp_status locality_check(struct mod_hdcp *hdcp, hdcp, "lc_init_prepare")) goto out; - status = (use_fw ? locality_check_fw : locality_check_sw)(hdcp, event_ctx, input); - if (status != MOD_HDCP_STATUS_SUCCESS) - goto out; + if (hdcp->connection.link.adjust.hdcp2.use_fw_locality_check) { + if (!mod_hdcp_execute_and_set(mod_hdcp_write_poll_read_lc_fw, + &input->l_prime_combo_read, &status, + hdcp, "l_prime_combo_read")) + goto out; + } else { + if (!mod_hdcp_execute_and_set(mod_hdcp_write_lc_init, + &input->lc_init_write, &status, + hdcp, "lc_init_write")) + goto out; + if (is_dp_hdcp(hdcp)) + msleep(16); + else + if (!mod_hdcp_execute_and_set(poll_l_prime_available, + &input->l_prime_available_poll, &status, + hdcp, "l_prime_available_poll")) + goto out; + if (!mod_hdcp_execute_and_set(mod_hdcp_read_l_prime, + &input->l_prime_read, &status, + hdcp, "l_prime_read")) + goto out; + } if (!mod_hdcp_execute_and_set(mod_hdcp_hdcp2_validate_l_prime, &input->l_prime_validation, &status, diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_transition.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_transition.c index 89ffb89e1932..9316312a4df5 100644 --- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_transition.c +++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_transition.c @@ -184,31 +184,33 @@ enum mod_hdcp_status mod_hdcp_hdcp2_transition(struct mod_hdcp *hdcp, callback_in_ms(0, output); set_state_id(hdcp, output, H2_A2_LOCALITY_CHECK); break; - case H2_A2_LOCALITY_CHECK: { - const bool use_fw = hdcp->config.ddc.funcs.atomic_write_poll_read_i2c - && !adjust->hdcp2.force_sw_locality_check; - - /* - * 1A-05: consider disconnection after LC init a failure - * 1A-13-1: consider invalid l' a failure - * 1A-13-2: consider l' timeout a failure - */ + case H2_A2_LOCALITY_CHECK: + /* 1A-05: consider disconnection after LC init a failure */ if (hdcp->state.stay_count > 10 || - input->lc_init_prepare != PASS || - (!use_fw && input->lc_init_write != PASS) || - (!use_fw && input->l_prime_available_poll != PASS)) { + input->lc_init_prepare != PASS) { fail_and_restart_in_ms(0, &status, output); break; - } else if (input->l_prime_read != PASS) { - if (use_fw && hdcp->config.debug.lc_enable_sw_fallback) { - adjust->hdcp2.force_sw_locality_check = true; + } else if (adjust->hdcp2.use_fw_locality_check && + input->l_prime_combo_read != PASS) { + /* 1A-13-2: consider l' timeout a failure */ + if (adjust->hdcp2.use_sw_locality_fallback) { + /* switch to software locality check */ + adjust->hdcp2.use_fw_locality_check = 0; callback_in_ms(0, output); + increment_stay_counter(hdcp); break; } - + fail_and_restart_in_ms(0, &status, output); + break; + } else if (!adjust->hdcp2.use_fw_locality_check && + (input->lc_init_write != PASS || + input->l_prime_available_poll != PASS || + input->l_prime_read != PASS)) { + /* 1A-13-2: consider l' timeout a failure */ fail_and_restart_in_ms(0, &status, output); break; } else if (input->l_prime_validation != PASS) { + /* 1A-13-1: consider invalid l' a failure */ callback_in_ms(0, output); increment_stay_counter(hdcp); break; @@ -216,7 +218,6 @@ enum mod_hdcp_status mod_hdcp_hdcp2_transition(struct mod_hdcp *hdcp, callback_in_ms(0, output); set_state_id(hdcp, output, H2_A3_EXCHANGE_KS_AND_TEST_FOR_REPEATER); break; - } case H2_A3_EXCHANGE_KS_AND_TEST_FOR_REPEATER: if (input->eks_prepare != PASS || input->eks_write != PASS) { @@ -510,26 +511,29 @@ enum mod_hdcp_status mod_hdcp_hdcp2_dp_transition(struct mod_hdcp *hdcp, callback_in_ms(0, output); set_state_id(hdcp, output, D2_A2_LOCALITY_CHECK); break; - case D2_A2_LOCALITY_CHECK: { - const bool use_fw = hdcp->config.ddc.funcs.atomic_write_poll_read_aux - && !adjust->hdcp2.force_sw_locality_check; - + case D2_A2_LOCALITY_CHECK: if (hdcp->state.stay_count > 10 || - input->lc_init_prepare != PASS || - (!use_fw && input->lc_init_write != PASS)) { - /* 1A-12: consider invalid l' a failure */ + input->lc_init_prepare != PASS) { fail_and_restart_in_ms(0, &status, output); break; - } else if (input->l_prime_read != PASS) { - if (use_fw && hdcp->config.debug.lc_enable_sw_fallback) { - adjust->hdcp2.force_sw_locality_check = true; + } else if (adjust->hdcp2.use_fw_locality_check && + input->l_prime_combo_read != PASS) { + if (adjust->hdcp2.use_sw_locality_fallback) { + /* switch to software locality check */ + adjust->hdcp2.use_fw_locality_check = 0; callback_in_ms(0, output); + increment_stay_counter(hdcp); break; } - + fail_and_restart_in_ms(0, &status, output); + break; + } else if (!adjust->hdcp2.use_fw_locality_check && + (input->lc_init_write != PASS || + input->l_prime_read != PASS)) { fail_and_restart_in_ms(0, &status, output); break; } else if (input->l_prime_validation != PASS) { + /* 1A-12: consider invalid l' a failure */ callback_in_ms(0, output); increment_stay_counter(hdcp); break; @@ -537,7 +541,6 @@ enum mod_hdcp_status mod_hdcp_hdcp2_dp_transition(struct mod_hdcp *hdcp, callback_in_ms(0, output); set_state_id(hdcp, output, D2_A34_EXCHANGE_KS_AND_TEST_FOR_REPEATER); break; - } case D2_A34_EXCHANGE_KS_AND_TEST_FOR_REPEATER: if (input->eks_prepare != PASS || input->eks_write != PASS) { diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_ddc.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_ddc.c index 2e6408579194..0ca39873f807 100644 --- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_ddc.c +++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_ddc.c @@ -758,6 +758,6 @@ enum mod_hdcp_status mod_hdcp_write_poll_read_lc_fw(struct mod_hdcp *hdcp) { const bool success = (is_dp_hdcp(hdcp) ? write_stall_read_lc_fw_aux : write_poll_read_lc_fw_i2c)(hdcp); - return success ? MOD_HDCP_STATUS_SUCCESS : MOD_HDCP_STATUS_DDC_FAILURE; + return success ? MOD_HDCP_STATUS_SUCCESS : MOD_HDCP_STATUS_HDCP2_LOCALITY_COMBO_READ_FAILURE; } diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_log.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_log.c index 6b3b5f610907..5cb979c2cf8c 100644 --- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_log.c +++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_log.c @@ -125,129 +125,11 @@ void mod_hdcp_log_ddc_trace(struct mod_hdcp *hdcp) } } +#define CASE_FORMAT(entry) case entry: return #entry; char *mod_hdcp_status_to_str(int32_t status) { switch (status) { - case MOD_HDCP_STATUS_SUCCESS: - return "MOD_HDCP_STATUS_SUCCESS"; - case MOD_HDCP_STATUS_FAILURE: - return "MOD_HDCP_STATUS_FAILURE"; - case MOD_HDCP_STATUS_RESET_NEEDED: - return "MOD_HDCP_STATUS_RESET_NEEDED"; - case MOD_HDCP_STATUS_DISPLAY_OUT_OF_BOUND: - return "MOD_HDCP_STATUS_DISPLAY_OUT_OF_BOUND"; - case MOD_HDCP_STATUS_DISPLAY_NOT_FOUND: - return "MOD_HDCP_STATUS_DISPLAY_NOT_FOUND"; - case MOD_HDCP_STATUS_INVALID_STATE: - return "MOD_HDCP_STATUS_INVALID_STATE"; - case MOD_HDCP_STATUS_NOT_IMPLEMENTED: - return "MOD_HDCP_STATUS_NOT_IMPLEMENTED"; - case MOD_HDCP_STATUS_INTERNAL_POLICY_FAILURE: - return "MOD_HDCP_STATUS_INTERNAL_POLICY_FAILURE"; - case MOD_HDCP_STATUS_UPDATE_TOPOLOGY_FAILURE: - return "MOD_HDCP_STATUS_UPDATE_TOPOLOGY_FAILURE"; - case MOD_HDCP_STATUS_CREATE_PSP_SERVICE_FAILURE: - return "MOD_HDCP_STATUS_CREATE_PSP_SERVICE_FAILURE"; - case MOD_HDCP_STATUS_DESTROY_PSP_SERVICE_FAILURE: - return "MOD_HDCP_STATUS_DESTROY_PSP_SERVICE_FAILURE"; - case MOD_HDCP_STATUS_HDCP1_CREATE_SESSION_FAILURE: - return "MOD_HDCP_STATUS_HDCP1_CREATE_SESSION_FAILURE"; - case MOD_HDCP_STATUS_HDCP1_DESTROY_SESSION_FAILURE: - return "MOD_HDCP_STATUS_HDCP1_DESTROY_SESSION_FAILURE"; - case MOD_HDCP_STATUS_HDCP1_VALIDATE_ENCRYPTION_FAILURE: - return "MOD_HDCP_STATUS_HDCP1_VALIDATE_ENCRYPTION_FAILURE"; - case MOD_HDCP_STATUS_HDCP1_NOT_HDCP_REPEATER: - return "MOD_HDCP_STATUS_HDCP1_NOT_HDCP_REPEATER"; - case MOD_HDCP_STATUS_HDCP1_NOT_CAPABLE: - return "MOD_HDCP_STATUS_HDCP1_NOT_CAPABLE"; - case MOD_HDCP_STATUS_HDCP1_R0_PRIME_PENDING: - return "MOD_HDCP_STATUS_HDCP1_R0_PRIME_PENDING"; - case MOD_HDCP_STATUS_HDCP1_VALIDATE_RX_FAILURE: - return "MOD_HDCP_STATUS_HDCP1_VALIDATE_RX_FAILURE"; - case MOD_HDCP_STATUS_HDCP1_BKSV_REVOKED: - return "MOD_HDCP_STATUS_HDCP1_BKSV_REVOKED"; - case MOD_HDCP_STATUS_HDCP1_KSV_LIST_NOT_READY: - return "MOD_HDCP_STATUS_HDCP1_KSV_LIST_NOT_READY"; - case MOD_HDCP_STATUS_HDCP1_VALIDATE_KSV_LIST_FAILURE: - return "MOD_HDCP_STATUS_HDCP1_VALIDATE_KSV_LIST_FAILURE"; - case MOD_HDCP_STATUS_HDCP1_KSV_LIST_REVOKED: - return "MOD_HDCP_STATUS_HDCP1_KSV_LIST_REVOKED"; - case MOD_HDCP_STATUS_HDCP1_ENABLE_ENCRYPTION_FAILURE: - return "MOD_HDCP_STATUS_HDCP1_ENABLE_ENCRYPTION_FAILURE"; - case MOD_HDCP_STATUS_HDCP1_ENABLE_STREAM_ENCRYPTION_FAILURE: - return "MOD_HDCP_STATUS_HDCP1_ENABLE_STREAM_ENCRYPTION_FAILURE"; - case MOD_HDCP_STATUS_HDCP1_MAX_CASCADE_EXCEEDED_FAILURE: - return "MOD_HDCP_STATUS_HDCP1_MAX_CASCADE_EXCEEDED_FAILURE"; - case MOD_HDCP_STATUS_HDCP1_MAX_DEVS_EXCEEDED_FAILURE: - return "MOD_HDCP_STATUS_HDCP1_MAX_DEVS_EXCEEDED_FAILURE"; - case MOD_HDCP_STATUS_HDCP1_DEVICE_COUNT_MISMATCH_FAILURE: - return "MOD_HDCP_STATUS_HDCP1_DEVICE_COUNT_MISMATCH_FAILURE"; - case MOD_HDCP_STATUS_HDCP1_LINK_INTEGRITY_FAILURE: - return "MOD_HDCP_STATUS_HDCP1_LINK_INTEGRITY_FAILURE"; - case MOD_HDCP_STATUS_HDCP1_REAUTH_REQUEST_ISSUED: - return "MOD_HDCP_STATUS_HDCP1_REAUTH_REQUEST_ISSUED"; - case MOD_HDCP_STATUS_HDCP1_LINK_MAINTENANCE_FAILURE: - return "MOD_HDCP_STATUS_HDCP1_LINK_MAINTENANCE_FAILURE"; - case MOD_HDCP_STATUS_HDCP1_INVALID_BKSV: - return "MOD_HDCP_STATUS_HDCP1_INVALID_BKSV"; - case MOD_HDCP_STATUS_DDC_FAILURE: - return "MOD_HDCP_STATUS_DDC_FAILURE"; - case MOD_HDCP_STATUS_INVALID_OPERATION: - return "MOD_HDCP_STATUS_INVALID_OPERATION"; - case MOD_HDCP_STATUS_HDCP2_NOT_CAPABLE: - return "MOD_HDCP_STATUS_HDCP2_NOT_CAPABLE"; - case MOD_HDCP_STATUS_HDCP2_CREATE_SESSION_FAILURE: - return "MOD_HDCP_STATUS_HDCP2_CREATE_SESSION_FAILURE"; - case MOD_HDCP_STATUS_HDCP2_DESTROY_SESSION_FAILURE: - return "MOD_HDCP_STATUS_HDCP2_DESTROY_SESSION_FAILURE"; - case MOD_HDCP_STATUS_HDCP2_PREP_AKE_INIT_FAILURE: - return "MOD_HDCP_STATUS_HDCP2_PREP_AKE_INIT_FAILURE"; - case MOD_HDCP_STATUS_HDCP2_AKE_CERT_PENDING: - return "MOD_HDCP_STATUS_HDCP2_AKE_CERT_PENDING"; - case MOD_HDCP_STATUS_HDCP2_H_PRIME_PENDING: - return "MOD_HDCP_STATUS_HDCP2_H_PRIME_PENDING"; - case MOD_HDCP_STATUS_HDCP2_PAIRING_INFO_PENDING: - return "MOD_HDCP_STATUS_HDCP2_PAIRING_INFO_PENDING"; - case MOD_HDCP_STATUS_HDCP2_VALIDATE_AKE_CERT_FAILURE: - return "MOD_HDCP_STATUS_HDCP2_VALIDATE_AKE_CERT_FAILURE"; - case MOD_HDCP_STATUS_HDCP2_AKE_CERT_REVOKED: - return "MOD_HDCP_STATUS_HDCP2_AKE_CERT_REVOKED"; - case MOD_HDCP_STATUS_HDCP2_VALIDATE_H_PRIME_FAILURE: - return "MOD_HDCP_STATUS_HDCP2_VALIDATE_H_PRIME_FAILURE"; - case MOD_HDCP_STATUS_HDCP2_VALIDATE_PAIRING_INFO_FAILURE: - return "MOD_HDCP_STATUS_HDCP2_VALIDATE_PAIRING_INFO_FAILURE"; - case MOD_HDCP_STATUS_HDCP2_PREP_LC_INIT_FAILURE: - return "MOD_HDCP_STATUS_HDCP2_PREP_LC_INIT_FAILURE"; - case MOD_HDCP_STATUS_HDCP2_L_PRIME_PENDING: - return "MOD_HDCP_STATUS_HDCP2_L_PRIME_PENDING"; - case MOD_HDCP_STATUS_HDCP2_VALIDATE_L_PRIME_FAILURE: - return "MOD_HDCP_STATUS_HDCP2_VALIDATE_L_PRIME_FAILURE"; - case MOD_HDCP_STATUS_HDCP2_PREP_EKS_FAILURE: - return "MOD_HDCP_STATUS_HDCP2_PREP_EKS_FAILURE"; - case MOD_HDCP_STATUS_HDCP2_ENABLE_ENCRYPTION_FAILURE: - return "MOD_HDCP_STATUS_HDCP2_ENABLE_ENCRYPTION_FAILURE"; - case MOD_HDCP_STATUS_HDCP2_VALIDATE_RX_ID_LIST_FAILURE: - return "MOD_HDCP_STATUS_HDCP2_VALIDATE_RX_ID_LIST_FAILURE"; - case MOD_HDCP_STATUS_HDCP2_RX_ID_LIST_REVOKED: - return "MOD_HDCP_STATUS_HDCP2_RX_ID_LIST_REVOKED"; - case MOD_HDCP_STATUS_HDCP2_RX_ID_LIST_NOT_READY: - return "MOD_HDCP_STATUS_HDCP2_RX_ID_LIST_NOT_READY"; - case MOD_HDCP_STATUS_HDCP2_ENABLE_STREAM_ENCRYPTION_FAILURE: - return "MOD_HDCP_STATUS_HDCP2_ENABLE_STREAM_ENCRYPTION_FAILURE"; - case MOD_HDCP_STATUS_HDCP2_STREAM_READY_PENDING: - return "MOD_HDCP_STATUS_HDCP2_STREAM_READY_PENDING"; - case MOD_HDCP_STATUS_HDCP2_VALIDATE_STREAM_READY_FAILURE: - return "MOD_HDCP_STATUS_HDCP2_VALIDATE_STREAM_READY_FAILURE"; - case MOD_HDCP_STATUS_HDCP2_PREPARE_STREAM_MANAGEMENT_FAILURE: - return "MOD_HDCP_STATUS_HDCP2_PREPARE_STREAM_MANAGEMENT_FAILURE"; - case MOD_HDCP_STATUS_HDCP2_REAUTH_REQUEST: - return "MOD_HDCP_STATUS_HDCP2_REAUTH_REQUEST"; - case MOD_HDCP_STATUS_HDCP2_REAUTH_LINK_INTEGRITY_FAILURE: - return "MOD_HDCP_STATUS_HDCP2_REAUTH_LINK_INTEGRITY_FAILURE"; - case MOD_HDCP_STATUS_HDCP2_DEVICE_COUNT_MISMATCH_FAILURE: - return "MOD_HDCP_STATUS_HDCP2_DEVICE_COUNT_MISMATCH_FAILURE"; - case MOD_HDCP_STATUS_UNSUPPORTED_PSP_VER_FAILURE: - return "MOD_HDCP_STATUS_UNSUPPORTED_PSP_VER_FAILURE"; + MOD_HDCP_STATUS_LIST(CASE_FORMAT) default: return "MOD_HDCP_STATUS_UNKNOWN"; } diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_log.h b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_log.h index 1d83c1b9da10..26553aa4c5ca 100644 --- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_log.h +++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_log.h @@ -31,6 +31,7 @@ #define HDCP_LOG_FSM(hdcp, ...) DRM_DEBUG_KMS(__VA_ARGS__) #define HDCP_LOG_TOP(hdcp, ...) pr_debug("[HDCP_TOP]:"__VA_ARGS__) #define HDCP_LOG_DDC(hdcp, ...) pr_debug("[HDCP_DDC]:"__VA_ARGS__) +#define HDCP_LOG_TRA(hdcp) do {} while (0) /* default logs */ #define HDCP_ERROR_TRACE(hdcp, status) \ @@ -131,4 +132,9 @@ HDCP_LOG_TOP(hdcp, "[Link %d] %s display %d", hdcp->config.index, __func__, i); \ } while (0) +#define HDCP_AUTH_COMPLETE_TRACE(hdcp) do { \ + mod_hdcp_log_ddc_trace(hdcp); \ + HDCP_LOG_TRA(hdcp); \ +} while (0) + #endif // MOD_HDCP_LOG_H_ diff --git a/drivers/gpu/drm/amd/display/modules/inc/mod_hdcp.h b/drivers/gpu/drm/amd/display/modules/inc/mod_hdcp.h index b51ddf2846df..835467225458 100644 --- a/drivers/gpu/drm/amd/display/modules/inc/mod_hdcp.h +++ b/drivers/gpu/drm/amd/display/modules/inc/mod_hdcp.h @@ -35,69 +35,74 @@ struct mod_hdcp; #define MAX_NUM_OF_DISPLAYS 6 #define MAX_NUM_OF_ATTEMPTS 4 #define MAX_NUM_OF_ERROR_TRACE 10 +#define MOD_HDCP_STATUS_LIST(FORMAT) \ + FORMAT(MOD_HDCP_STATUS_SUCCESS) \ + FORMAT(MOD_HDCP_STATUS_FAILURE) \ + FORMAT(MOD_HDCP_STATUS_RESET_NEEDED) \ + FORMAT(MOD_HDCP_STATUS_DISPLAY_OUT_OF_BOUND) \ + FORMAT(MOD_HDCP_STATUS_DISPLAY_NOT_FOUND) \ + FORMAT(MOD_HDCP_STATUS_INVALID_STATE) \ + FORMAT(MOD_HDCP_STATUS_NOT_IMPLEMENTED) \ + FORMAT(MOD_HDCP_STATUS_INTERNAL_POLICY_FAILURE) \ + FORMAT(MOD_HDCP_STATUS_UPDATE_TOPOLOGY_FAILURE) \ + FORMAT(MOD_HDCP_STATUS_CREATE_PSP_SERVICE_FAILURE) \ + FORMAT(MOD_HDCP_STATUS_DESTROY_PSP_SERVICE_FAILURE) \ + FORMAT(MOD_HDCP_STATUS_HDCP1_CREATE_SESSION_FAILURE) \ + FORMAT(MOD_HDCP_STATUS_HDCP1_DESTROY_SESSION_FAILURE) \ + FORMAT(MOD_HDCP_STATUS_HDCP1_VALIDATE_ENCRYPTION_FAILURE) \ + FORMAT(MOD_HDCP_STATUS_HDCP1_NOT_HDCP_REPEATER) \ + FORMAT(MOD_HDCP_STATUS_HDCP1_NOT_CAPABLE) \ + FORMAT(MOD_HDCP_STATUS_HDCP1_R0_PRIME_PENDING) \ + FORMAT(MOD_HDCP_STATUS_HDCP1_VALIDATE_RX_FAILURE) \ + FORMAT(MOD_HDCP_STATUS_HDCP1_BKSV_REVOKED) \ + FORMAT(MOD_HDCP_STATUS_HDCP1_KSV_LIST_NOT_READY) \ + FORMAT(MOD_HDCP_STATUS_HDCP1_VALIDATE_KSV_LIST_FAILURE) \ + FORMAT(MOD_HDCP_STATUS_HDCP1_KSV_LIST_REVOKED) \ + FORMAT(MOD_HDCP_STATUS_HDCP1_ENABLE_ENCRYPTION_FAILURE) \ + FORMAT(MOD_HDCP_STATUS_HDCP1_ENABLE_STREAM_ENCRYPTION_FAILURE) \ + FORMAT(MOD_HDCP_STATUS_HDCP1_MAX_CASCADE_EXCEEDED_FAILURE) \ + FORMAT(MOD_HDCP_STATUS_HDCP1_MAX_DEVS_EXCEEDED_FAILURE) \ + FORMAT(MOD_HDCP_STATUS_HDCP1_DEVICE_COUNT_MISMATCH_FAILURE) \ + FORMAT(MOD_HDCP_STATUS_HDCP1_LINK_INTEGRITY_FAILURE) \ + FORMAT(MOD_HDCP_STATUS_HDCP1_REAUTH_REQUEST_ISSUED) \ + FORMAT(MOD_HDCP_STATUS_HDCP1_LINK_MAINTENANCE_FAILURE) \ + FORMAT(MOD_HDCP_STATUS_HDCP1_INVALID_BKSV) \ + FORMAT(MOD_HDCP_STATUS_DDC_FAILURE) /* TODO: specific errors */ \ + FORMAT(MOD_HDCP_STATUS_INVALID_OPERATION) \ + FORMAT(MOD_HDCP_STATUS_HDCP2_NOT_CAPABLE) \ + FORMAT(MOD_HDCP_STATUS_HDCP2_CREATE_SESSION_FAILURE) \ + FORMAT(MOD_HDCP_STATUS_HDCP2_DESTROY_SESSION_FAILURE) \ + FORMAT(MOD_HDCP_STATUS_HDCP2_PREP_AKE_INIT_FAILURE) \ + FORMAT(MOD_HDCP_STATUS_HDCP2_AKE_CERT_PENDING) \ + FORMAT(MOD_HDCP_STATUS_HDCP2_H_PRIME_PENDING) \ + FORMAT(MOD_HDCP_STATUS_HDCP2_PAIRING_INFO_PENDING) \ + FORMAT(MOD_HDCP_STATUS_HDCP2_VALIDATE_AKE_CERT_FAILURE) \ + FORMAT(MOD_HDCP_STATUS_HDCP2_AKE_CERT_REVOKED) \ + FORMAT(MOD_HDCP_STATUS_HDCP2_VALIDATE_H_PRIME_FAILURE) \ + FORMAT(MOD_HDCP_STATUS_HDCP2_VALIDATE_PAIRING_INFO_FAILURE) \ + FORMAT(MOD_HDCP_STATUS_HDCP2_PREP_LC_INIT_FAILURE) \ + FORMAT(MOD_HDCP_STATUS_HDCP2_L_PRIME_PENDING) \ + FORMAT(MOD_HDCP_STATUS_HDCP2_VALIDATE_L_PRIME_FAILURE) \ + FORMAT(MOD_HDCP_STATUS_HDCP2_PREP_EKS_FAILURE) \ + FORMAT(MOD_HDCP_STATUS_HDCP2_ENABLE_ENCRYPTION_FAILURE) \ + FORMAT(MOD_HDCP_STATUS_HDCP2_RX_ID_LIST_NOT_READY) \ + FORMAT(MOD_HDCP_STATUS_HDCP2_VALIDATE_RX_ID_LIST_FAILURE) \ + FORMAT(MOD_HDCP_STATUS_HDCP2_RX_ID_LIST_REVOKED) \ + FORMAT(MOD_HDCP_STATUS_HDCP2_ENABLE_STREAM_ENCRYPTION_FAILURE) \ + FORMAT(MOD_HDCP_STATUS_HDCP2_STREAM_READY_PENDING) \ + FORMAT(MOD_HDCP_STATUS_HDCP2_VALIDATE_STREAM_READY_FAILURE) \ + FORMAT(MOD_HDCP_STATUS_HDCP2_PREPARE_STREAM_MANAGEMENT_FAILURE) \ + FORMAT(MOD_HDCP_STATUS_HDCP2_REAUTH_REQUEST) \ + FORMAT(MOD_HDCP_STATUS_HDCP2_REAUTH_LINK_INTEGRITY_FAILURE) \ + FORMAT(MOD_HDCP_STATUS_HDCP2_DEVICE_COUNT_MISMATCH_FAILURE) \ + FORMAT(MOD_HDCP_STATUS_UNSUPPORTED_PSP_VER_FAILURE) \ + FORMAT(MOD_HDCP_STATUS_HDCP2_LOCALITY_COMBO_READ_FAILURE) + +#define ENUM_FORMAT(entry) entry, /* detailed return status */ enum mod_hdcp_status { - MOD_HDCP_STATUS_SUCCESS = 0, - MOD_HDCP_STATUS_FAILURE, - MOD_HDCP_STATUS_RESET_NEEDED, - MOD_HDCP_STATUS_DISPLAY_OUT_OF_BOUND, - MOD_HDCP_STATUS_DISPLAY_NOT_FOUND, - MOD_HDCP_STATUS_INVALID_STATE, - MOD_HDCP_STATUS_NOT_IMPLEMENTED, - MOD_HDCP_STATUS_INTERNAL_POLICY_FAILURE, - MOD_HDCP_STATUS_UPDATE_TOPOLOGY_FAILURE, - MOD_HDCP_STATUS_CREATE_PSP_SERVICE_FAILURE, - MOD_HDCP_STATUS_DESTROY_PSP_SERVICE_FAILURE, - MOD_HDCP_STATUS_HDCP1_CREATE_SESSION_FAILURE, - MOD_HDCP_STATUS_HDCP1_DESTROY_SESSION_FAILURE, - MOD_HDCP_STATUS_HDCP1_VALIDATE_ENCRYPTION_FAILURE, - MOD_HDCP_STATUS_HDCP1_NOT_HDCP_REPEATER, - MOD_HDCP_STATUS_HDCP1_NOT_CAPABLE, - MOD_HDCP_STATUS_HDCP1_R0_PRIME_PENDING, - MOD_HDCP_STATUS_HDCP1_VALIDATE_RX_FAILURE, - MOD_HDCP_STATUS_HDCP1_BKSV_REVOKED, - MOD_HDCP_STATUS_HDCP1_KSV_LIST_NOT_READY, - MOD_HDCP_STATUS_HDCP1_VALIDATE_KSV_LIST_FAILURE, - MOD_HDCP_STATUS_HDCP1_KSV_LIST_REVOKED, - MOD_HDCP_STATUS_HDCP1_ENABLE_ENCRYPTION_FAILURE, - MOD_HDCP_STATUS_HDCP1_ENABLE_STREAM_ENCRYPTION_FAILURE, - MOD_HDCP_STATUS_HDCP1_MAX_CASCADE_EXCEEDED_FAILURE, - MOD_HDCP_STATUS_HDCP1_MAX_DEVS_EXCEEDED_FAILURE, - MOD_HDCP_STATUS_HDCP1_DEVICE_COUNT_MISMATCH_FAILURE, - MOD_HDCP_STATUS_HDCP1_LINK_INTEGRITY_FAILURE, - MOD_HDCP_STATUS_HDCP1_REAUTH_REQUEST_ISSUED, - MOD_HDCP_STATUS_HDCP1_LINK_MAINTENANCE_FAILURE, - MOD_HDCP_STATUS_HDCP1_INVALID_BKSV, - MOD_HDCP_STATUS_DDC_FAILURE, /* TODO: specific errors */ - MOD_HDCP_STATUS_INVALID_OPERATION, - MOD_HDCP_STATUS_HDCP2_NOT_CAPABLE, - MOD_HDCP_STATUS_HDCP2_CREATE_SESSION_FAILURE, - MOD_HDCP_STATUS_HDCP2_DESTROY_SESSION_FAILURE, - MOD_HDCP_STATUS_HDCP2_PREP_AKE_INIT_FAILURE, - MOD_HDCP_STATUS_HDCP2_AKE_CERT_PENDING, - MOD_HDCP_STATUS_HDCP2_H_PRIME_PENDING, - MOD_HDCP_STATUS_HDCP2_PAIRING_INFO_PENDING, - MOD_HDCP_STATUS_HDCP2_VALIDATE_AKE_CERT_FAILURE, - MOD_HDCP_STATUS_HDCP2_AKE_CERT_REVOKED, - MOD_HDCP_STATUS_HDCP2_VALIDATE_H_PRIME_FAILURE, - MOD_HDCP_STATUS_HDCP2_VALIDATE_PAIRING_INFO_FAILURE, - MOD_HDCP_STATUS_HDCP2_PREP_LC_INIT_FAILURE, - MOD_HDCP_STATUS_HDCP2_L_PRIME_PENDING, - MOD_HDCP_STATUS_HDCP2_VALIDATE_L_PRIME_FAILURE, - MOD_HDCP_STATUS_HDCP2_PREP_EKS_FAILURE, - MOD_HDCP_STATUS_HDCP2_ENABLE_ENCRYPTION_FAILURE, - MOD_HDCP_STATUS_HDCP2_RX_ID_LIST_NOT_READY, - MOD_HDCP_STATUS_HDCP2_VALIDATE_RX_ID_LIST_FAILURE, - MOD_HDCP_STATUS_HDCP2_RX_ID_LIST_REVOKED, - MOD_HDCP_STATUS_HDCP2_ENABLE_STREAM_ENCRYPTION_FAILURE, - MOD_HDCP_STATUS_HDCP2_STREAM_READY_PENDING, - MOD_HDCP_STATUS_HDCP2_VALIDATE_STREAM_READY_FAILURE, - MOD_HDCP_STATUS_HDCP2_PREPARE_STREAM_MANAGEMENT_FAILURE, - MOD_HDCP_STATUS_HDCP2_REAUTH_REQUEST, - MOD_HDCP_STATUS_HDCP2_REAUTH_LINK_INTEGRITY_FAILURE, - MOD_HDCP_STATUS_HDCP2_DEVICE_COUNT_MISMATCH_FAILURE, - MOD_HDCP_STATUS_UNSUPPORTED_PSP_VER_FAILURE, + MOD_HDCP_STATUS_LIST(ENUM_FORMAT) }; struct mod_hdcp_displayport { @@ -214,8 +219,9 @@ struct mod_hdcp_link_adjustment_hdcp2 { uint8_t force_type : 2; uint8_t force_no_stored_km : 1; uint8_t increase_h_prime_timeout: 1; - uint8_t force_sw_locality_check : 1; - uint8_t reserved : 2; + uint8_t use_fw_locality_check : 1; + uint8_t use_sw_locality_fallback: 1; + uint8_t reserved : 1; }; struct mod_hdcp_link_adjustment { @@ -230,9 +236,23 @@ struct mod_hdcp_error { uint8_t state_id; }; +struct mod_hdcp1_trace { + uint8_t attempt_count; + uint8_t downstream_device_count; +}; + +struct mod_hdcp2_trace { + uint8_t attempt_count; + uint8_t downstream_device_count; + uint8_t hdcp1_device_downstream; + uint8_t hdcp2_legacy_device_downstream; +}; + struct mod_hdcp_trace { struct mod_hdcp_error errors[MAX_NUM_OF_ERROR_TRACE]; uint8_t error_count; + struct mod_hdcp1_trace hdcp1; + struct mod_hdcp2_trace hdcp2; }; enum mod_hdcp_encryption_status { @@ -303,10 +323,6 @@ struct mod_hdcp_display_query { struct mod_hdcp_config { struct mod_hdcp_psp psp; struct mod_hdcp_ddc ddc; - struct { - uint8_t lc_enable_sw_fallback : 1; - uint8_t reserved : 7; - } debug; uint8_t index; }; diff --git a/drivers/gpu/drm/amd/display/modules/power/power_helpers.c b/drivers/gpu/drm/amd/display/modules/power/power_helpers.c index 29ccd3532d13..fd139b219bf9 100644 --- a/drivers/gpu/drm/amd/display/modules/power/power_helpers.c +++ b/drivers/gpu/drm/amd/display/modules/power/power_helpers.c @@ -975,6 +975,34 @@ bool psr_su_set_dsc_slice_height(struct dc *dc, struct dc_link *link, return true; } +void set_replay_frame_skip_number(struct dc_link *link, + enum replay_coasting_vtotal_type type, + uint32_t coasting_vtotal_refresh_rate_mhz, + uint32_t flicker_free_refresh_rate_mhz, + bool is_defer) +{ + uint32_t *frame_skip_number_array = NULL; + uint32_t frame_skip_number = 0; + + if (link == NULL || flicker_free_refresh_rate_mhz == 0 || coasting_vtotal_refresh_rate_mhz == 0) + return; + + if (is_defer) + frame_skip_number_array = link->replay_settings.defer_frame_skip_number_table; + else + frame_skip_number_array = link->replay_settings.frame_skip_number_table; + + if (frame_skip_number_array == NULL) + return; + + frame_skip_number = coasting_vtotal_refresh_rate_mhz / flicker_free_refresh_rate_mhz; + + if (frame_skip_number >= 1) + frame_skip_number_array[type] = frame_skip_number - 1; + else + frame_skip_number_array[type] = 0; +} + void set_replay_defer_update_coasting_vtotal(struct dc_link *link, enum replay_coasting_vtotal_type type, uint32_t vtotal) @@ -987,6 +1015,8 @@ void update_replay_coasting_vtotal_from_defer(struct dc_link *link, { link->replay_settings.coasting_vtotal_table[type] = link->replay_settings.defer_update_coasting_vtotal_table[type]; + link->replay_settings.frame_skip_number_table[type] = + link->replay_settings.defer_frame_skip_number_table[type]; } void set_replay_coasting_vtotal(struct dc_link *link, @@ -1007,6 +1037,9 @@ void calculate_replay_link_off_frame_count(struct dc_link *link, uint8_t max_link_off_frame_count = 0; uint16_t max_deviation_line = 0, pixel_deviation_per_line = 0; + if (!link || link->replay_settings.config.replay_version != DC_FREESYNC_REPLAY) + return; + max_deviation_line = link->dpcd_caps.pr_info.max_deviation_line; pixel_deviation_per_line = link->dpcd_caps.pr_info.pixel_deviation_per_line; diff --git a/drivers/gpu/drm/amd/display/modules/power/power_helpers.h b/drivers/gpu/drm/amd/display/modules/power/power_helpers.h index 391209a3bf29..87d31d9dce5a 100644 --- a/drivers/gpu/drm/amd/display/modules/power/power_helpers.h +++ b/drivers/gpu/drm/amd/display/modules/power/power_helpers.h @@ -60,6 +60,11 @@ void set_replay_coasting_vtotal(struct dc_link *link, void set_replay_defer_update_coasting_vtotal(struct dc_link *link, enum replay_coasting_vtotal_type type, uint32_t vtotal); +void set_replay_frame_skip_number(struct dc_link *link, + enum replay_coasting_vtotal_type type, + uint32_t coasting_vtotal_refresh_rate_Mhz, + uint32_t flicker_free_refresh_rate_Mhz, + bool is_defer); void update_replay_coasting_vtotal_from_defer(struct dc_link *link, enum replay_coasting_vtotal_type type); void set_replay_low_rr_full_screen_video_src_vtotal(struct dc_link *link, uint16_t vtotal); diff --git a/drivers/gpu/drm/amd/include/amd_shared.h b/drivers/gpu/drm/amd/include/amd_shared.h index 75efda2969cf..17945094a138 100644 --- a/drivers/gpu/drm/amd/include/amd_shared.h +++ b/drivers/gpu/drm/amd/include/amd_shared.h @@ -109,6 +109,7 @@ enum amd_ip_block_type { AMD_IP_BLOCK_TYPE_VPE, AMD_IP_BLOCK_TYPE_UMSCH_MM, AMD_IP_BLOCK_TYPE_ISP, + AMD_IP_BLOCK_TYPE_RAS, AMD_IP_BLOCK_TYPE_NUM, }; diff --git a/drivers/gpu/drm/amd/include/asic_reg/vce/vce_1_0_d.h b/drivers/gpu/drm/amd/include/asic_reg/vce/vce_1_0_d.h index 2176548e9203..9778822dd2a0 100644 --- a/drivers/gpu/drm/amd/include/asic_reg/vce/vce_1_0_d.h +++ b/drivers/gpu/drm/amd/include/asic_reg/vce/vce_1_0_d.h @@ -60,5 +60,10 @@ #define mmVCE_VCPU_CACHE_SIZE1 0x800C #define mmVCE_VCPU_CACHE_SIZE2 0x800E #define mmVCE_VCPU_CNTL 0x8005 +#define mmVCE_VCPU_SCRATCH7 0x8037 +#define mmVCE_FW_REG_STATUS 0x8384 +#define mmVCE_LMI_FW_PERIODIC_CTRL 0x8388 +#define mmVCE_LMI_FW_START_KEYSEL 0x8386 + #endif diff --git a/drivers/gpu/drm/amd/include/asic_reg/vce/vce_1_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/vce/vce_1_0_sh_mask.h index ea5b26b11cb1..1f82d6f5abde 100644 --- a/drivers/gpu/drm/amd/include/asic_reg/vce/vce_1_0_sh_mask.h +++ b/drivers/gpu/drm/amd/include/asic_reg/vce/vce_1_0_sh_mask.h @@ -61,6 +61,8 @@ #define VCE_RB_WPTR__RB_WPTR__SHIFT 0x00000004 #define VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK 0x00000001L #define VCE_SOFT_RESET__ECPU_SOFT_RESET__SHIFT 0x00000000 +#define VCE_SOFT_RESET__FME_SOFT_RESET_MASK 0x00000004L +#define VCE_SOFT_RESET__FME_SOFT_RESET__SHIFT 0x00000002 #define VCE_STATUS__JOB_BUSY_MASK 0x00000001L #define VCE_STATUS__JOB_BUSY__SHIFT 0x00000000 #define VCE_STATUS__UENC_BUSY_MASK 0x00000100L @@ -95,5 +97,13 @@ #define VCE_VCPU_CNTL__CLK_EN__SHIFT 0x00000000 #define VCE_VCPU_CNTL__RBBM_SOFT_RESET_MASK 0x00040000L #define VCE_VCPU_CNTL__RBBM_SOFT_RESET__SHIFT 0x00000012 +#define VCE_CLOCK_GATING_A__CGC_DYN_CLOCK_MODE_MASK 0x00010000 +#define VCE_CLOCK_GATING_A__CGC_DYN_CLOCK_MODE_SHIFT 0x00000010 +#define VCE_FW_REG_STATUS__BUSY_MASK 0x0000001 +#define VCE_FW_REG_STATUS__BUSY__SHIFT 0x0000001 +#define VCE_FW_REG_STATUS__PASS_MASK 0x0000008 +#define VCE_FW_REG_STATUS__PASS__SHIFT 0x0000003 +#define VCE_FW_REG_STATUS__DONE_MASK 0x0000800 +#define VCE_FW_REG_STATUS__DONE__SHIFT 0x000000b #endif diff --git a/drivers/gpu/drm/amd/include/kgd_pp_interface.h b/drivers/gpu/drm/amd/include/kgd_pp_interface.h index 2b0cdb2a2775..2366e68262e6 100644 --- a/drivers/gpu/drm/amd/include/kgd_pp_interface.h +++ b/drivers/gpu/drm/amd/include/kgd_pp_interface.h @@ -454,7 +454,7 @@ struct amd_pm_funcs { bool gate, int inst); int (*set_clockgating_by_smu)(void *handle, uint32_t msg_id); - int (*set_power_limit)(void *handle, uint32_t n); + int (*set_power_limit)(void *handle, uint32_t limit_type, uint32_t n); int (*get_power_limit)(void *handle, uint32_t *limit, enum pp_power_limit_level pp_limit_level, enum pp_power_type power_type); @@ -532,6 +532,110 @@ struct metrics_table_header { uint8_t content_revision; }; +enum amdgpu_metrics_attr_id { + AMDGPU_METRICS_ATTR_ID_TEMPERATURE_HOTSPOT, + AMDGPU_METRICS_ATTR_ID_TEMPERATURE_MEM, + AMDGPU_METRICS_ATTR_ID_TEMPERATURE_VRSOC, + AMDGPU_METRICS_ATTR_ID_CURR_SOCKET_POWER, + AMDGPU_METRICS_ATTR_ID_AVERAGE_GFX_ACTIVITY, + AMDGPU_METRICS_ATTR_ID_AVERAGE_UMC_ACTIVITY, + AMDGPU_METRICS_ATTR_ID_MEM_MAX_BANDWIDTH, + AMDGPU_METRICS_ATTR_ID_ENERGY_ACCUMULATOR, + AMDGPU_METRICS_ATTR_ID_SYSTEM_CLOCK_COUNTER, + AMDGPU_METRICS_ATTR_ID_ACCUMULATION_COUNTER, + AMDGPU_METRICS_ATTR_ID_PROCHOT_RESIDENCY_ACC, + AMDGPU_METRICS_ATTR_ID_PPT_RESIDENCY_ACC, + AMDGPU_METRICS_ATTR_ID_SOCKET_THM_RESIDENCY_ACC, + AMDGPU_METRICS_ATTR_ID_VR_THM_RESIDENCY_ACC, + AMDGPU_METRICS_ATTR_ID_HBM_THM_RESIDENCY_ACC, + AMDGPU_METRICS_ATTR_ID_GFXCLK_LOCK_STATUS, + AMDGPU_METRICS_ATTR_ID_PCIE_LINK_WIDTH, + AMDGPU_METRICS_ATTR_ID_PCIE_LINK_SPEED, + AMDGPU_METRICS_ATTR_ID_XGMI_LINK_WIDTH, + AMDGPU_METRICS_ATTR_ID_XGMI_LINK_SPEED, + AMDGPU_METRICS_ATTR_ID_GFX_ACTIVITY_ACC, + AMDGPU_METRICS_ATTR_ID_MEM_ACTIVITY_ACC, + AMDGPU_METRICS_ATTR_ID_PCIE_BANDWIDTH_ACC, + AMDGPU_METRICS_ATTR_ID_PCIE_BANDWIDTH_INST, + AMDGPU_METRICS_ATTR_ID_PCIE_L0_TO_RECOV_COUNT_ACC, + AMDGPU_METRICS_ATTR_ID_PCIE_REPLAY_COUNT_ACC, + AMDGPU_METRICS_ATTR_ID_PCIE_REPLAY_ROVER_COUNT_ACC, + AMDGPU_METRICS_ATTR_ID_PCIE_NAK_SENT_COUNT_ACC, + AMDGPU_METRICS_ATTR_ID_PCIE_NAK_RCVD_COUNT_ACC, + AMDGPU_METRICS_ATTR_ID_XGMI_READ_DATA_ACC, + AMDGPU_METRICS_ATTR_ID_XGMI_WRITE_DATA_ACC, + AMDGPU_METRICS_ATTR_ID_XGMI_LINK_STATUS, + AMDGPU_METRICS_ATTR_ID_FIRMWARE_TIMESTAMP, + AMDGPU_METRICS_ATTR_ID_CURRENT_GFXCLK, + AMDGPU_METRICS_ATTR_ID_CURRENT_SOCCLK, + AMDGPU_METRICS_ATTR_ID_CURRENT_VCLK0, + AMDGPU_METRICS_ATTR_ID_CURRENT_DCLK0, + AMDGPU_METRICS_ATTR_ID_CURRENT_UCLK, + AMDGPU_METRICS_ATTR_ID_NUM_PARTITION, + AMDGPU_METRICS_ATTR_ID_PCIE_LC_PERF_OTHER_END_RECOVERY, + AMDGPU_METRICS_ATTR_ID_GFX_BUSY_INST, + AMDGPU_METRICS_ATTR_ID_JPEG_BUSY, + AMDGPU_METRICS_ATTR_ID_VCN_BUSY, + AMDGPU_METRICS_ATTR_ID_GFX_BUSY_ACC, + AMDGPU_METRICS_ATTR_ID_GFX_BELOW_HOST_LIMIT_PPT_ACC, + AMDGPU_METRICS_ATTR_ID_GFX_BELOW_HOST_LIMIT_THM_ACC, + AMDGPU_METRICS_ATTR_ID_GFX_LOW_UTILIZATION_ACC, + AMDGPU_METRICS_ATTR_ID_GFX_BELOW_HOST_LIMIT_TOTAL_ACC, + AMDGPU_METRICS_ATTR_ID_MAX, +}; + +enum amdgpu_metrics_attr_type { + AMDGPU_METRICS_TYPE_U8, + AMDGPU_METRICS_TYPE_S8, + AMDGPU_METRICS_TYPE_U16, + AMDGPU_METRICS_TYPE_S16, + AMDGPU_METRICS_TYPE_U32, + AMDGPU_METRICS_TYPE_S32, + AMDGPU_METRICS_TYPE_U64, + AMDGPU_METRICS_TYPE_S64, + AMDGPU_METRICS_TYPE_MAX, +}; + +enum amdgpu_metrics_attr_unit { + /* None */ + AMDGPU_METRICS_UNIT_NONE, + /* MHz*/ + AMDGPU_METRICS_UNIT_CLOCK_1, + /* Degree Celsius*/ + AMDGPU_METRICS_UNIT_TEMP_1, + /* Watts*/ + AMDGPU_METRICS_UNIT_POWER_1, + /* In nanoseconds*/ + AMDGPU_METRICS_UNIT_TIME_1, + /* In 10 nanoseconds*/ + AMDGPU_METRICS_UNIT_TIME_2, + /* Speed in GT/s */ + AMDGPU_METRICS_UNIT_SPEED_1, + /* Speed in 0.1 GT/s */ + AMDGPU_METRICS_UNIT_SPEED_2, + /* Bandwidth GB/s */ + AMDGPU_METRICS_UNIT_BW_1, + /* Data in KB */ + AMDGPU_METRICS_UNIT_DATA_1, + /* Percentage */ + AMDGPU_METRICS_UNIT_PERCENT, + AMDGPU_METRICS_UNIT_MAX, +}; + +#define AMDGPU_METRICS_ATTR_UNIT_MASK 0xFF000000 +#define AMDGPU_METRICS_ATTR_UNIT_SHIFT 24 +#define AMDGPU_METRICS_ATTR_TYPE_MASK 0x00F00000 +#define AMDGPU_METRICS_ATTR_TYPE_SHIFT 20 +#define AMDGPU_METRICS_ATTR_ID_MASK 0x000FFC00 +#define AMDGPU_METRICS_ATTR_ID_SHIFT 10 +#define AMDGPU_METRICS_ATTR_INST_MASK 0x000003FF +#define AMDGPU_METRICS_ATTR_INST_SHIFT 0 + +#define AMDGPU_METRICS_ENC_ATTR(unit, type, id, inst) \ + (((u64)(unit) << AMDGPU_METRICS_ATTR_UNIT_SHIFT) | \ + ((u64)(type) << AMDGPU_METRICS_ATTR_TYPE_SHIFT) | \ + ((u64)(id) << AMDGPU_METRICS_ATTR_ID_SHIFT) | (inst)) + /* * gpu_metrics_v1_0 is not recommended as it's not naturally aligned. * Use gpu_metrics_v1_1 or later instead. @@ -1221,6 +1325,19 @@ struct gpu_metrics_v1_8 { uint32_t pcie_lc_perf_other_end_recovery; }; +struct gpu_metrics_attr { + /* Field type encoded with AMDGPU_METRICS_ENC_ATTR */ + uint64_t attr_encoding; + /* Attribute value, depends on attr_encoding */ + void *attr_value; +}; + +struct gpu_metrics_v1_9 { + struct metrics_table_header common_header; + int attr_count; + struct gpu_metrics_attr metrics_attrs[]; +}; + /* * gpu_metrics_v2_0 is not recommended as it's not naturally aligned. * Use gpu_metrics_v2_1 or later instead. @@ -1703,4 +1820,10 @@ struct amdgpu_partition_metrics_v1_0 { uint64_t gfx_below_host_limit_total_acc[MAX_XCC]; }; +struct amdgpu_partition_metrics_v1_1 { + struct metrics_table_header common_header; + int attr_count; + struct gpu_metrics_attr metrics_attrs[]; +}; + #endif diff --git a/drivers/gpu/drm/amd/include/mes_v11_api_def.h b/drivers/gpu/drm/amd/include/mes_v11_api_def.h index ab1cfc92dbeb..f9629d42ada2 100644 --- a/drivers/gpu/drm/amd/include/mes_v11_api_def.h +++ b/drivers/gpu/drm/amd/include/mes_v11_api_def.h @@ -345,7 +345,8 @@ union MESAPI__REMOVE_QUEUE { uint32_t unmap_kiq_utility_queue : 1; uint32_t preempt_legacy_gfx_queue : 1; uint32_t unmap_legacy_queue : 1; - uint32_t reserved : 28; + uint32_t remove_queue_after_reset : 1; + uint32_t reserved : 27; }; struct MES_API_STATUS api_status; diff --git a/drivers/gpu/drm/amd/include/mes_v12_api_def.h b/drivers/gpu/drm/amd/include/mes_v12_api_def.h index 69611c7e30e3..2f12cba4eb66 100644 --- a/drivers/gpu/drm/amd/include/mes_v12_api_def.h +++ b/drivers/gpu/drm/amd/include/mes_v12_api_def.h @@ -399,7 +399,8 @@ union MESAPI__REMOVE_QUEUE { uint32_t unmap_kiq_utility_queue : 1; uint32_t preempt_legacy_gfx_queue : 1; uint32_t unmap_legacy_queue : 1; - uint32_t reserved : 28; + uint32_t remove_queue_after_reset : 1; + uint32_t reserved : 27; }; struct MES_API_STATUS api_status; diff --git a/drivers/gpu/drm/amd/pm/amdgpu_dpm.c b/drivers/gpu/drm/amd/pm/amdgpu_dpm.c index bc29a923fa6e..79b174e5326d 100644 --- a/drivers/gpu/drm/amd/pm/amdgpu_dpm.c +++ b/drivers/gpu/drm/amd/pm/amdgpu_dpm.c @@ -1187,8 +1187,11 @@ int amdgpu_dpm_get_pp_table(struct amdgpu_device *adev, char **table) const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; int ret = 0; - if (!pp_funcs->get_pp_table) - return 0; + if (!table) + return -EINVAL; + + if (amdgpu_sriov_vf(adev) || !pp_funcs->get_pp_table || adev->scpm_enabled) + return -EOPNOTSUPP; mutex_lock(&adev->pm.mutex); ret = pp_funcs->get_pp_table(adev->powerplay.pp_handle, @@ -1598,6 +1601,7 @@ int amdgpu_dpm_get_power_limit(struct amdgpu_device *adev, } int amdgpu_dpm_set_power_limit(struct amdgpu_device *adev, + uint32_t limit_type, uint32_t limit) { const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; @@ -1608,7 +1612,7 @@ int amdgpu_dpm_set_power_limit(struct amdgpu_device *adev, mutex_lock(&adev->pm.mutex); ret = pp_funcs->set_power_limit(adev->powerplay.pp_handle, - limit); + limit_type, limit); mutex_unlock(&adev->pm.mutex); return ret; @@ -1714,7 +1718,10 @@ int amdgpu_dpm_set_pp_table(struct amdgpu_device *adev, const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; int ret = 0; - if (!pp_funcs->set_pp_table) + if (!buf || !size) + return -EINVAL; + + if (amdgpu_sriov_vf(adev) || !pp_funcs->set_pp_table || adev->scpm_enabled) return -EOPNOTSUPP; mutex_lock(&adev->pm.mutex); @@ -2121,3 +2128,10 @@ ssize_t amdgpu_dpm_get_xcp_metrics(struct amdgpu_device *adev, int xcp_id, return ret; } + +const struct ras_smu_drv *amdgpu_dpm_get_ras_smu_driver(struct amdgpu_device *adev) +{ + void *pp_handle = adev->powerplay.pp_handle; + + return smu_get_ras_smu_driver(pp_handle); +} diff --git a/drivers/gpu/drm/amd/pm/amdgpu_pm.c b/drivers/gpu/drm/amd/pm/amdgpu_pm.c index a7e6d7854b7b..65296a819e6a 100644 --- a/drivers/gpu/drm/amd/pm/amdgpu_pm.c +++ b/drivers/gpu/drm/amd/pm/amdgpu_pm.c @@ -108,8 +108,9 @@ const char * const amdgpu_pp_profile_name[] = { static int amdgpu_pm_dev_state_check(struct amdgpu_device *adev, bool runpm) { bool runpm_check = runpm ? adev->in_runpm : false; + bool full_init = (adev->init_lvl->level == AMDGPU_INIT_LEVEL_DEFAULT); - if (amdgpu_in_reset(adev)) + if (amdgpu_in_reset(adev) || !full_init) return -EBUSY; if (adev->in_suspend && !runpm_check) @@ -173,7 +174,6 @@ static int amdgpu_pm_get_access_if_active(struct amdgpu_device *adev) */ static inline void amdgpu_pm_put_access(struct amdgpu_device *adev) { - pm_runtime_mark_last_busy(adev->dev); pm_runtime_put_autosuspend(adev->dev); } @@ -2506,7 +2506,7 @@ static struct amdgpu_device_attr amdgpu_device_attrs[] = { AMDGPU_DEVICE_ATTR_RO(pp_num_states, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF), AMDGPU_DEVICE_ATTR_RO(pp_cur_state, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF), AMDGPU_DEVICE_ATTR_RW(pp_force_state, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF), - AMDGPU_DEVICE_ATTR_RW(pp_table, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF), + AMDGPU_DEVICE_ATTR_RW(pp_table, ATTR_FLAG_BASIC), AMDGPU_DEVICE_ATTR_RW(pp_dpm_sclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF, .attr_update = pp_dpm_clk_default_attr_update), AMDGPU_DEVICE_ATTR_RW(pp_dpm_mclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF, @@ -2638,6 +2638,15 @@ static int default_attr_update(struct amdgpu_device *adev, struct amdgpu_device_ if (amdgpu_dpm_get_apu_thermal_limit(adev, &limit) == -EOPNOTSUPP) *states = ATTR_STATE_UNSUPPORTED; + } else if (DEVICE_ATTR_IS(pp_table)) { + int ret; + char *tmp = NULL; + + ret = amdgpu_dpm_get_pp_table(adev, &tmp); + if (ret == -EOPNOTSUPP || !tmp) + *states = ATTR_STATE_UNSUPPORTED; + else + *states = ATTR_STATE_SUPPORTED; } switch (gc_ver) { @@ -3372,7 +3381,9 @@ static ssize_t amdgpu_hwmon_show_power_label(struct device *dev, to_sensor_dev_attr(attr)->index == PP_PWR_TYPE_FAST ? "fastPPT" : "slowPPT"); else - return sysfs_emit(buf, "PPT\n"); + return sysfs_emit(buf, "%s\n", + to_sensor_dev_attr(attr)->index == PP_PWR_TYPE_FAST ? + "PPT1" : "PPT"); } static ssize_t amdgpu_hwmon_set_power_cap(struct device *dev, @@ -3390,13 +3401,12 @@ static ssize_t amdgpu_hwmon_set_power_cap(struct device *dev, return err; value = value / 1000000; /* convert to Watt */ - value |= limit_type << 24; err = amdgpu_pm_get_access(adev); if (err < 0) return err; - err = amdgpu_dpm_set_power_limit(adev, value); + err = amdgpu_dpm_set_power_limit(adev, limit_type, value); amdgpu_pm_put_access(adev); @@ -3578,7 +3588,6 @@ static SENSOR_DEVICE_ATTR(power1_cap_min, S_IRUGO, amdgpu_hwmon_show_power_cap_m static SENSOR_DEVICE_ATTR(power1_cap, S_IRUGO | S_IWUSR, amdgpu_hwmon_show_power_cap, amdgpu_hwmon_set_power_cap, 0); static SENSOR_DEVICE_ATTR(power1_cap_default, S_IRUGO, amdgpu_hwmon_show_power_cap_default, NULL, 0); static SENSOR_DEVICE_ATTR(power1_label, S_IRUGO, amdgpu_hwmon_show_power_label, NULL, 0); -static SENSOR_DEVICE_ATTR(power2_average, S_IRUGO, amdgpu_hwmon_show_power_avg, NULL, 1); static SENSOR_DEVICE_ATTR(power2_cap_max, S_IRUGO, amdgpu_hwmon_show_power_cap_max, NULL, 1); static SENSOR_DEVICE_ATTR(power2_cap_min, S_IRUGO, amdgpu_hwmon_show_power_cap_min, NULL, 1); static SENSOR_DEVICE_ATTR(power2_cap, S_IRUGO | S_IWUSR, amdgpu_hwmon_show_power_cap, amdgpu_hwmon_set_power_cap, 1); @@ -3627,7 +3636,6 @@ static struct attribute *hwmon_attributes[] = { &sensor_dev_attr_power1_cap.dev_attr.attr, &sensor_dev_attr_power1_cap_default.dev_attr.attr, &sensor_dev_attr_power1_label.dev_attr.attr, - &sensor_dev_attr_power2_average.dev_attr.attr, &sensor_dev_attr_power2_cap_max.dev_attr.attr, &sensor_dev_attr_power2_cap_min.dev_attr.attr, &sensor_dev_attr_power2_cap.dev_attr.attr, @@ -3826,13 +3834,14 @@ static umode_t hwmon_attributes_visible(struct kobject *kobj, return 0; /* only Vangogh has fast PPT limit and power labels */ - if (!(gc_ver == IP_VERSION(10, 3, 1)) && - (attr == &sensor_dev_attr_power2_average.dev_attr.attr || - attr == &sensor_dev_attr_power2_cap_max.dev_attr.attr || + if ((attr == &sensor_dev_attr_power2_cap_max.dev_attr.attr || attr == &sensor_dev_attr_power2_cap_min.dev_attr.attr || attr == &sensor_dev_attr_power2_cap.dev_attr.attr || attr == &sensor_dev_attr_power2_cap_default.dev_attr.attr || - attr == &sensor_dev_attr_power2_label.dev_attr.attr)) + attr == &sensor_dev_attr_power2_label.dev_attr.attr) && + (amdgpu_dpm_get_power_limit(adev, &tmp, + PP_PWR_LIMIT_MAX, + PP_PWR_TYPE_FAST) == -EOPNOTSUPP)) return 0; return effective_mode; diff --git a/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h b/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h index af48aead12f7..aa3f427819a0 100644 --- a/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h +++ b/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h @@ -551,7 +551,7 @@ int amdgpu_dpm_get_power_limit(struct amdgpu_device *adev, enum pp_power_limit_level pp_limit_level, enum pp_power_type power_type); int amdgpu_dpm_set_power_limit(struct amdgpu_device *adev, - uint32_t limit); + uint32_t limit_type, uint32_t limit); int amdgpu_dpm_is_cclk_dpm_supported(struct amdgpu_device *adev); int amdgpu_dpm_debugfs_print_current_performance_level(struct amdgpu_device *adev, struct seq_file *m); @@ -612,5 +612,6 @@ int amdgpu_dpm_reset_vcn(struct amdgpu_device *adev, uint32_t inst_mask); bool amdgpu_dpm_reset_vcn_is_supported(struct amdgpu_device *adev); bool amdgpu_dpm_is_temp_metrics_supported(struct amdgpu_device *adev, enum smu_temp_metric_type type); +const struct ras_smu_drv *amdgpu_dpm_get_ras_smu_driver(struct amdgpu_device *adev); #endif diff --git a/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c b/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c index 3a9522c17fee..1f539cc65f41 100644 --- a/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c +++ b/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c @@ -2558,18 +2558,13 @@ static int si_enable_power_containment(struct amdgpu_device *adev, if (enable) { if (!si_should_disable_uvd_powertune(adev, amdgpu_new_state)) { smc_result = amdgpu_si_send_msg_to_smc(adev, PPSMC_TDPClampingActive); - if (smc_result != PPSMC_Result_OK) { + if (smc_result != PPSMC_Result_OK) ret = -EINVAL; - ni_pi->pc_enabled = false; - } else { - ni_pi->pc_enabled = true; - } } } else { smc_result = amdgpu_si_send_msg_to_smc(adev, PPSMC_TDPClampingInactive); if (smc_result != PPSMC_Result_OK) ret = -EINVAL; - ni_pi->pc_enabled = false; } } @@ -7051,13 +7046,20 @@ static void si_set_vce_clock(struct amdgpu_device *adev, if ((old_rps->evclk != new_rps->evclk) || (old_rps->ecclk != new_rps->ecclk)) { /* Turn the clocks on when encoding, off otherwise */ + dev_dbg(adev->dev, "set VCE clocks: %u, %u\n", new_rps->evclk, new_rps->ecclk); + if (new_rps->evclk || new_rps->ecclk) { - /* Place holder for future VCE1.0 porting to amdgpu - vce_v1_0_enable_mgcg(adev, false, false);*/ + amdgpu_asic_set_vce_clocks(adev, new_rps->evclk, new_rps->ecclk); + amdgpu_device_ip_set_clockgating_state( + adev, AMD_IP_BLOCK_TYPE_VCE, AMD_CG_STATE_UNGATE); + amdgpu_device_ip_set_powergating_state( + adev, AMD_IP_BLOCK_TYPE_VCE, AMD_PG_STATE_UNGATE); } else { - /* Place holder for future VCE1.0 porting to amdgpu - vce_v1_0_enable_mgcg(adev, true, false); - amdgpu_asic_set_vce_clocks(adev, new_rps->evclk, new_rps->ecclk);*/ + amdgpu_device_ip_set_powergating_state( + adev, AMD_IP_BLOCK_TYPE_VCE, AMD_PG_STATE_GATE); + amdgpu_device_ip_set_clockgating_state( + adev, AMD_IP_BLOCK_TYPE_VCE, AMD_CG_STATE_GATE); + amdgpu_asic_set_vce_clocks(adev, 0, 0); } } } @@ -7509,8 +7511,6 @@ static int si_dpm_init(struct amdgpu_device *adev) pi->pasi = CYPRESS_HASI_DFLT; pi->vrc = SISLANDS_VRC_DFLT; - pi->gfx_clock_gating = true; - eg_pi->sclk_deep_sleep = true; si_pi->sclk_deep_sleep_above_low = false; @@ -7521,7 +7521,6 @@ static int si_dpm_init(struct amdgpu_device *adev) eg_pi->dynamic_ac_timing = true; - eg_pi->light_sleep = true; #if defined(CONFIG_ACPI) eg_pi->pcie_performance_request = amdgpu_acpi_is_pcie_performance_request_supported(adev); @@ -7582,6 +7581,7 @@ static void si_dpm_debugfs_print_current_performance_level(void *handle, } else { pl = &ps->performance_levels[current_index]; seq_printf(m, "uvd vclk: %d dclk: %d\n", rps->vclk, rps->dclk); + seq_printf(m, "vce evclk: %d ecclk: %d\n", rps->evclk, rps->ecclk); seq_printf(m, "power level %d sclk: %u mclk: %u vddc: %u vddci: %u pcie gen: %u\n", current_index, pl->sclk, pl->mclk, pl->vddc, pl->vddci, pl->pcie_gen + 1); } diff --git a/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.h b/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.h index 11cb7874a6bb..3aed75fbf913 100644 --- a/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.h +++ b/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.h @@ -38,11 +38,7 @@ #define MC_ARB_DRAM_TIMING2_2 0xa00 #define MC_ARB_DRAM_TIMING2_3 0xa01 -#define MAX_NO_OF_MVDD_VALUES 2 -#define MAX_NO_VREG_STEPS 32 #define NISLANDS_MAX_SMC_PERFORMANCE_LEVELS_PER_SWSTATE 16 -#define SMC_NISLANDS_MC_REGISTER_ARRAY_SIZE 32 -#define SMC_NISLANDS_MC_REGISTER_ARRAY_SET_COUNT 20 #define RV770_ASI_DFLT 1000 #define CYPRESS_HASI_DFLT 400000 #define PCIE_PERF_REQ_PECI_GEN1 2 @@ -51,11 +47,6 @@ #define RV770_DEFAULT_VCLK_FREQ 53300 /* 10 khz */ #define RV770_DEFAULT_DCLK_FREQ 40000 /* 10 khz */ -#define SMC_EVERGREEN_MC_REGISTER_ARRAY_SIZE 16 - -#define RV770_SMC_TABLE_ADDRESS 0xB000 -#define RV770_SMC_PERFORMANCE_LEVELS_PER_SWSTATE 3 - #define SMC_STROBE_RATIO 0x0F #define SMC_STROBE_ENABLE 0x10 @@ -64,27 +55,6 @@ #define SMC_MC_RTT_ENABLE 0x04 #define SMC_MC_STUTTER_EN 0x08 -#define RV770_SMC_VOLTAGEMASK_VDDC 0 -#define RV770_SMC_VOLTAGEMASK_MVDD 1 -#define RV770_SMC_VOLTAGEMASK_VDDCI 2 -#define RV770_SMC_VOLTAGEMASK_MAX 4 - -#define NISLANDS_MAX_SMC_PERFORMANCE_LEVELS_PER_SWSTATE 16 -#define NISLANDS_SMC_STROBE_RATIO 0x0F -#define NISLANDS_SMC_STROBE_ENABLE 0x10 - -#define NISLANDS_SMC_MC_EDC_RD_FLAG 0x01 -#define NISLANDS_SMC_MC_EDC_WR_FLAG 0x02 -#define NISLANDS_SMC_MC_RTT_ENABLE 0x04 -#define NISLANDS_SMC_MC_STUTTER_EN 0x08 - -#define MAX_NO_VREG_STEPS 32 - -#define NISLANDS_SMC_VOLTAGEMASK_VDDC 0 -#define NISLANDS_SMC_VOLTAGEMASK_MVDD 1 -#define NISLANDS_SMC_VOLTAGEMASK_VDDCI 2 -#define NISLANDS_SMC_VOLTAGEMASK_MAX 4 - #define SISLANDS_MCREGISTERTABLE_INITIAL_SLOT 0 #define SISLANDS_MCREGISTERTABLE_ACPI_SLOT 1 #define SISLANDS_MCREGISTERTABLE_ULV_SLOT 2 @@ -219,32 +189,6 @@ enum si_cac_config_reg_type SISLANDS_CACCONFIG_MAX }; -enum si_power_level { - SI_POWER_LEVEL_LOW = 0, - SI_POWER_LEVEL_MEDIUM = 1, - SI_POWER_LEVEL_HIGH = 2, - SI_POWER_LEVEL_CTXSW = 3, -}; - -enum si_td { - SI_TD_AUTO, - SI_TD_UP, - SI_TD_DOWN, -}; - -enum si_display_watermark { - SI_DISPLAY_WATERMARK_LOW = 0, - SI_DISPLAY_WATERMARK_HIGH = 1, -}; - -enum si_display_gap -{ - SI_PM_DISPLAY_GAP_VBLANK_OR_WM = 0, - SI_PM_DISPLAY_GAP_VBLANK = 1, - SI_PM_DISPLAY_GAP_WATERMARK = 2, - SI_PM_DISPLAY_GAP_IGNORE = 3, -}; - extern const struct amdgpu_ip_block_version si_smu_ip_block; struct ni_leakage_coeffients @@ -258,56 +202,6 @@ struct ni_leakage_coeffients u32 t_ref; }; -struct SMC_Evergreen_MCRegisterAddress -{ - uint16_t s0; - uint16_t s1; -}; - -typedef struct SMC_Evergreen_MCRegisterAddress SMC_Evergreen_MCRegisterAddress; - -struct evergreen_mc_reg_entry { - u32 mclk_max; - u32 mc_data[SMC_EVERGREEN_MC_REGISTER_ARRAY_SIZE]; -}; - -struct evergreen_mc_reg_table { - u8 last; - u8 num_entries; - u16 valid_flag; - struct evergreen_mc_reg_entry mc_reg_table_entry[MAX_AC_TIMING_ENTRIES]; - SMC_Evergreen_MCRegisterAddress mc_reg_address[SMC_EVERGREEN_MC_REGISTER_ARRAY_SIZE]; -}; - -struct SMC_Evergreen_MCRegisterSet -{ - uint32_t value[SMC_EVERGREEN_MC_REGISTER_ARRAY_SIZE]; -}; - -typedef struct SMC_Evergreen_MCRegisterSet SMC_Evergreen_MCRegisterSet; - -struct SMC_Evergreen_MCRegisters -{ - uint8_t last; - uint8_t reserved[3]; - SMC_Evergreen_MCRegisterAddress address[SMC_EVERGREEN_MC_REGISTER_ARRAY_SIZE]; - SMC_Evergreen_MCRegisterSet data[5]; -}; - -typedef struct SMC_Evergreen_MCRegisters SMC_Evergreen_MCRegisters; - -struct SMC_NIslands_MCRegisterSet -{ - uint32_t value[SMC_NISLANDS_MC_REGISTER_ARRAY_SIZE]; -}; - -typedef struct SMC_NIslands_MCRegisterSet SMC_NIslands_MCRegisterSet; - -struct ni_mc_reg_entry { - u32 mclk_max; - u32 mc_data[SMC_NISLANDS_MC_REGISTER_ARRAY_SIZE]; -}; - struct SMC_NIslands_MCRegisterAddress { uint16_t s0; @@ -316,257 +210,20 @@ struct SMC_NIslands_MCRegisterAddress typedef struct SMC_NIslands_MCRegisterAddress SMC_NIslands_MCRegisterAddress; -struct SMC_NIslands_MCRegisters -{ - uint8_t last; - uint8_t reserved[3]; - SMC_NIslands_MCRegisterAddress address[SMC_NISLANDS_MC_REGISTER_ARRAY_SIZE]; - SMC_NIslands_MCRegisterSet data[SMC_NISLANDS_MC_REGISTER_ARRAY_SET_COUNT]; -}; - -typedef struct SMC_NIslands_MCRegisters SMC_NIslands_MCRegisters; - -struct evergreen_ulv_param { - bool supported; - struct rv7xx_pl *pl; -}; - -struct evergreen_arb_registers { - u32 mc_arb_dram_timing; - u32 mc_arb_dram_timing2; - u32 mc_arb_rfsh_rate; - u32 mc_arb_burst_time; -}; - -struct at { - u32 rlp; - u32 rmp; - u32 lhp; - u32 lmp; -}; - -struct ni_clock_registers { - u32 cg_spll_func_cntl; - u32 cg_spll_func_cntl_2; - u32 cg_spll_func_cntl_3; - u32 cg_spll_func_cntl_4; - u32 cg_spll_spread_spectrum; - u32 cg_spll_spread_spectrum_2; - u32 mclk_pwrmgt_cntl; - u32 dll_cntl; - u32 mpll_ad_func_cntl; - u32 mpll_ad_func_cntl_2; - u32 mpll_dq_func_cntl; - u32 mpll_dq_func_cntl_2; - u32 mpll_ss1; - u32 mpll_ss2; -}; - -struct RV770_SMC_SCLK_VALUE -{ - uint32_t vCG_SPLL_FUNC_CNTL; - uint32_t vCG_SPLL_FUNC_CNTL_2; - uint32_t vCG_SPLL_FUNC_CNTL_3; - uint32_t vCG_SPLL_SPREAD_SPECTRUM; - uint32_t vCG_SPLL_SPREAD_SPECTRUM_2; - uint32_t sclk_value; -}; - -typedef struct RV770_SMC_SCLK_VALUE RV770_SMC_SCLK_VALUE; - -struct RV770_SMC_MCLK_VALUE -{ - uint32_t vMPLL_AD_FUNC_CNTL; - uint32_t vMPLL_AD_FUNC_CNTL_2; - uint32_t vMPLL_DQ_FUNC_CNTL; - uint32_t vMPLL_DQ_FUNC_CNTL_2; - uint32_t vMCLK_PWRMGT_CNTL; - uint32_t vDLL_CNTL; - uint32_t vMPLL_SS; - uint32_t vMPLL_SS2; - uint32_t mclk_value; -}; - -typedef struct RV770_SMC_MCLK_VALUE RV770_SMC_MCLK_VALUE; - - -struct RV730_SMC_MCLK_VALUE -{ - uint32_t vMCLK_PWRMGT_CNTL; - uint32_t vDLL_CNTL; - uint32_t vMPLL_FUNC_CNTL; - uint32_t vMPLL_FUNC_CNTL2; - uint32_t vMPLL_FUNC_CNTL3; - uint32_t vMPLL_SS; - uint32_t vMPLL_SS2; - uint32_t mclk_value; -}; - -typedef struct RV730_SMC_MCLK_VALUE RV730_SMC_MCLK_VALUE; - -struct RV770_SMC_VOLTAGE_VALUE -{ - uint16_t value; - uint8_t index; - uint8_t padding; -}; - -typedef struct RV770_SMC_VOLTAGE_VALUE RV770_SMC_VOLTAGE_VALUE; - -union RV7XX_SMC_MCLK_VALUE -{ - RV770_SMC_MCLK_VALUE mclk770; - RV730_SMC_MCLK_VALUE mclk730; -}; - -typedef union RV7XX_SMC_MCLK_VALUE RV7XX_SMC_MCLK_VALUE, *LPRV7XX_SMC_MCLK_VALUE; - -struct RV770_SMC_HW_PERFORMANCE_LEVEL -{ - uint8_t arbValue; - union{ - uint8_t seqValue; - uint8_t ACIndex; - }; - uint8_t displayWatermark; - uint8_t gen2PCIE; - uint8_t gen2XSP; - uint8_t backbias; - uint8_t strobeMode; - uint8_t mcFlags; - uint32_t aT; - uint32_t bSP; - RV770_SMC_SCLK_VALUE sclk; - RV7XX_SMC_MCLK_VALUE mclk; - RV770_SMC_VOLTAGE_VALUE vddc; - RV770_SMC_VOLTAGE_VALUE mvdd; - RV770_SMC_VOLTAGE_VALUE vddci; - uint8_t reserved1; - uint8_t reserved2; - uint8_t stateFlags; - uint8_t padding; -}; - -typedef struct RV770_SMC_HW_PERFORMANCE_LEVEL RV770_SMC_HW_PERFORMANCE_LEVEL; - -struct RV770_SMC_SWSTATE -{ - uint8_t flags; - uint8_t padding1; - uint8_t padding2; - uint8_t padding3; - RV770_SMC_HW_PERFORMANCE_LEVEL levels[RV770_SMC_PERFORMANCE_LEVELS_PER_SWSTATE]; -}; - -typedef struct RV770_SMC_SWSTATE RV770_SMC_SWSTATE; - -struct RV770_SMC_VOLTAGEMASKTABLE -{ - uint8_t highMask[RV770_SMC_VOLTAGEMASK_MAX]; - uint32_t lowMask[RV770_SMC_VOLTAGEMASK_MAX]; -}; - -typedef struct RV770_SMC_VOLTAGEMASKTABLE RV770_SMC_VOLTAGEMASKTABLE; - -struct RV770_SMC_STATETABLE -{ - uint8_t thermalProtectType; - uint8_t systemFlags; - uint8_t maxVDDCIndexInPPTable; - uint8_t extraFlags; - uint8_t highSMIO[MAX_NO_VREG_STEPS]; - uint32_t lowSMIO[MAX_NO_VREG_STEPS]; - RV770_SMC_VOLTAGEMASKTABLE voltageMaskTable; - RV770_SMC_SWSTATE initialState; - RV770_SMC_SWSTATE ACPIState; - RV770_SMC_SWSTATE driverState; - RV770_SMC_SWSTATE ULVState; -}; - -typedef struct RV770_SMC_STATETABLE RV770_SMC_STATETABLE; - -struct vddc_table_entry { - u16 vddc; - u8 vddc_index; - u8 high_smio; - u32 low_smio; -}; - -struct rv770_clock_registers { - u32 cg_spll_func_cntl; - u32 cg_spll_func_cntl_2; - u32 cg_spll_func_cntl_3; - u32 cg_spll_spread_spectrum; - u32 cg_spll_spread_spectrum_2; - u32 mpll_ad_func_cntl; - u32 mpll_ad_func_cntl_2; - u32 mpll_dq_func_cntl; - u32 mpll_dq_func_cntl_2; - u32 mclk_pwrmgt_cntl; - u32 dll_cntl; - u32 mpll_ss1; - u32 mpll_ss2; -}; - -struct rv730_clock_registers { - u32 cg_spll_func_cntl; - u32 cg_spll_func_cntl_2; - u32 cg_spll_func_cntl_3; - u32 cg_spll_spread_spectrum; - u32 cg_spll_spread_spectrum_2; - u32 mclk_pwrmgt_cntl; - u32 dll_cntl; - u32 mpll_func_cntl; - u32 mpll_func_cntl2; - u32 mpll_func_cntl3; - u32 mpll_ss; - u32 mpll_ss2; -}; - -union r7xx_clock_registers { - struct rv770_clock_registers rv770; - struct rv730_clock_registers rv730; -}; - struct rv7xx_power_info { /* flags */ - bool mem_gddr5; - bool pcie_gen2; - bool dynamic_pcie_gen2; - bool acpi_pcie_gen2; - bool boot_in_gen2; bool voltage_control; /* vddc */ bool mvdd_control; bool sclk_ss; bool mclk_ss; bool dynamic_ss; - bool gfx_clock_gating; - bool mg_clock_gating; - bool mgcgtssm; - bool power_gating; bool thermal_protection; - bool display_gap; - bool dcodt; - bool ulps; - /* registers */ - union r7xx_clock_registers clk_regs; - u32 s0_vid_lower_smio_cntl; /* voltage */ - u32 vddc_mask_low; - u32 mvdd_mask_low; u32 mvdd_split_frequency; - u32 mvdd_low_smio[MAX_NO_OF_MVDD_VALUES]; u16 max_vddc; u16 max_vddc_in_table; u16 min_vddc_in_table; - struct vddc_table_entry vddc_table[MAX_NO_VREG_STEPS]; - u8 valid_vddc_entries; - /* dc odt */ - u32 mclk_odt_threshold; - u8 odt_value_0[2]; - u8 odt_value_1[2]; /* stored values */ - u32 boot_sclk; u16 acpi_vddc; u32 ref_div; u32 active_auto_throttle_sources; @@ -582,17 +239,6 @@ struct rv7xx_power_info { u32 asi; u32 pasi; u32 vrc; - u32 restricted_levels; - u32 rlp; - u32 rmp; - u32 lhp; - u32 lmp; - /* smc offsets */ - u16 state_table_start; - u16 soft_regs_start; - u16 sram_end; - /* scratch structs */ - RV770_SMC_STATETABLE smc_statetable; }; enum si_pcie_gen { @@ -611,44 +257,12 @@ struct rv7xx_pl { enum si_pcie_gen pcie_gen; /* si+ only */ }; -struct rv7xx_ps { - struct rv7xx_pl high; - struct rv7xx_pl medium; - struct rv7xx_pl low; - bool dc_compatible; -}; - struct si_ps { u16 performance_level_count; bool dc_compatible; struct rv7xx_pl performance_levels[NISLANDS_MAX_SMC_PERFORMANCE_LEVELS_PER_SWSTATE]; }; -struct ni_mc_reg_table { - u8 last; - u8 num_entries; - u16 valid_flag; - struct ni_mc_reg_entry mc_reg_table_entry[MAX_AC_TIMING_ENTRIES]; - SMC_NIslands_MCRegisterAddress mc_reg_address[SMC_NISLANDS_MC_REGISTER_ARRAY_SIZE]; -}; - -struct ni_cac_data -{ - struct ni_leakage_coeffients leakage_coefficients; - u32 i_leakage; - s32 leakage_minimum_temperature; - u32 pwr_const; - u32 dc_cac_value; - u32 bif_cac_value; - u32 lkge_pwr; - u8 mc_wr_weight; - u8 mc_rd_weight; - u8 allow_ovrflw; - u8 num_win_tdp; - u8 l2num_win_tdp; - u8 lts_truncate_n; -}; - struct evergreen_power_info { /* must be first! */ struct rv7xx_power_info rv7xx; @@ -657,203 +271,33 @@ struct evergreen_power_info { bool dynamic_ac_timing; bool abm; bool mcls; - bool light_sleep; - bool memory_transition; bool pcie_performance_request; - bool pcie_performance_request_registered; bool sclk_deep_sleep; - bool dll_default_on; - bool ls_clock_gating; bool smu_uvd_hs; bool uvd_enabled; /* stored values */ u16 acpi_vddci; - u8 mvdd_high_index; - u8 mvdd_low_index; u32 mclk_edc_wr_enable_threshold; - struct evergreen_mc_reg_table mc_reg_table; struct atom_voltage_table vddc_voltage_table; struct atom_voltage_table vddci_voltage_table; - struct evergreen_arb_registers bootup_arb_registers; - struct evergreen_ulv_param ulv; - struct at ats[2]; - /* smc offsets */ - u16 mc_reg_table_start; struct amdgpu_ps current_rps; - struct rv7xx_ps current_ps; struct amdgpu_ps requested_rps; - struct rv7xx_ps requested_ps; }; -struct PP_NIslands_Dpm2PerfLevel -{ - uint8_t MaxPS; - uint8_t TgtAct; - uint8_t MaxPS_StepInc; - uint8_t MaxPS_StepDec; - uint8_t PSST; - uint8_t NearTDPDec; - uint8_t AboveSafeInc; - uint8_t BelowSafeInc; - uint8_t PSDeltaLimit; - uint8_t PSDeltaWin; - uint8_t Reserved[6]; -}; - -typedef struct PP_NIslands_Dpm2PerfLevel PP_NIslands_Dpm2PerfLevel; - -struct PP_NIslands_DPM2Parameters -{ - uint32_t TDPLimit; - uint32_t NearTDPLimit; - uint32_t SafePowerLimit; - uint32_t PowerBoostLimit; -}; -typedef struct PP_NIslands_DPM2Parameters PP_NIslands_DPM2Parameters; - -struct NISLANDS_SMC_SCLK_VALUE -{ - uint32_t vCG_SPLL_FUNC_CNTL; - uint32_t vCG_SPLL_FUNC_CNTL_2; - uint32_t vCG_SPLL_FUNC_CNTL_3; - uint32_t vCG_SPLL_FUNC_CNTL_4; - uint32_t vCG_SPLL_SPREAD_SPECTRUM; - uint32_t vCG_SPLL_SPREAD_SPECTRUM_2; - uint32_t sclk_value; -}; - -typedef struct NISLANDS_SMC_SCLK_VALUE NISLANDS_SMC_SCLK_VALUE; - -struct NISLANDS_SMC_MCLK_VALUE -{ - uint32_t vMPLL_FUNC_CNTL; - uint32_t vMPLL_FUNC_CNTL_1; - uint32_t vMPLL_FUNC_CNTL_2; - uint32_t vMPLL_AD_FUNC_CNTL; - uint32_t vMPLL_AD_FUNC_CNTL_2; - uint32_t vMPLL_DQ_FUNC_CNTL; - uint32_t vMPLL_DQ_FUNC_CNTL_2; - uint32_t vMCLK_PWRMGT_CNTL; - uint32_t vDLL_CNTL; - uint32_t vMPLL_SS; - uint32_t vMPLL_SS2; - uint32_t mclk_value; -}; - -typedef struct NISLANDS_SMC_MCLK_VALUE NISLANDS_SMC_MCLK_VALUE; - -struct NISLANDS_SMC_VOLTAGE_VALUE -{ - uint16_t value; - uint8_t index; - uint8_t padding; -}; - -typedef struct NISLANDS_SMC_VOLTAGE_VALUE NISLANDS_SMC_VOLTAGE_VALUE; - -struct NISLANDS_SMC_HW_PERFORMANCE_LEVEL -{ - uint8_t arbValue; - uint8_t ACIndex; - uint8_t displayWatermark; - uint8_t gen2PCIE; - uint8_t reserved1; - uint8_t reserved2; - uint8_t strobeMode; - uint8_t mcFlags; - uint32_t aT; - uint32_t bSP; - NISLANDS_SMC_SCLK_VALUE sclk; - NISLANDS_SMC_MCLK_VALUE mclk; - NISLANDS_SMC_VOLTAGE_VALUE vddc; - NISLANDS_SMC_VOLTAGE_VALUE mvdd; - NISLANDS_SMC_VOLTAGE_VALUE vddci; - NISLANDS_SMC_VOLTAGE_VALUE std_vddc; - uint32_t powergate_en; - uint8_t hUp; - uint8_t hDown; - uint8_t stateFlags; - uint8_t arbRefreshState; - uint32_t SQPowerThrottle; - uint32_t SQPowerThrottle_2; - uint32_t reserved[2]; - PP_NIslands_Dpm2PerfLevel dpm2; -}; - -typedef struct NISLANDS_SMC_HW_PERFORMANCE_LEVEL NISLANDS_SMC_HW_PERFORMANCE_LEVEL; - -struct NISLANDS_SMC_SWSTATE -{ - uint8_t flags; - uint8_t levelCount; - uint8_t padding2; - uint8_t padding3; - NISLANDS_SMC_HW_PERFORMANCE_LEVEL levels[]; -}; - -typedef struct NISLANDS_SMC_SWSTATE NISLANDS_SMC_SWSTATE; - -struct NISLANDS_SMC_VOLTAGEMASKTABLE -{ - uint8_t highMask[NISLANDS_SMC_VOLTAGEMASK_MAX]; - uint32_t lowMask[NISLANDS_SMC_VOLTAGEMASK_MAX]; -}; - -typedef struct NISLANDS_SMC_VOLTAGEMASKTABLE NISLANDS_SMC_VOLTAGEMASKTABLE; - -#define NISLANDS_MAX_NO_VREG_STEPS 32 - -struct NISLANDS_SMC_STATETABLE -{ - uint8_t thermalProtectType; - uint8_t systemFlags; - uint8_t maxVDDCIndexInPPTable; - uint8_t extraFlags; - uint8_t highSMIO[NISLANDS_MAX_NO_VREG_STEPS]; - uint32_t lowSMIO[NISLANDS_MAX_NO_VREG_STEPS]; - NISLANDS_SMC_VOLTAGEMASKTABLE voltageMaskTable; - PP_NIslands_DPM2Parameters dpm2Params; - NISLANDS_SMC_SWSTATE initialState; - NISLANDS_SMC_SWSTATE ACPIState; - NISLANDS_SMC_SWSTATE ULVState; - NISLANDS_SMC_SWSTATE driverState; - NISLANDS_SMC_HW_PERFORMANCE_LEVEL dpmLevels[NISLANDS_MAX_SMC_PERFORMANCE_LEVELS_PER_SWSTATE - 1]; -}; - -typedef struct NISLANDS_SMC_STATETABLE NISLANDS_SMC_STATETABLE; - struct ni_power_info { /* must be first! */ struct evergreen_power_info eg; - struct ni_clock_registers clock_registers; - struct ni_mc_reg_table mc_reg_table; u32 mclk_rtt_mode_threshold; /* flags */ - bool use_power_boost_limit; bool support_cac_long_term_average; bool cac_enabled; bool cac_configuration_required; bool driver_calculate_cac_leakage; - bool pc_enabled; bool enable_power_containment; bool enable_cac; bool enable_sq_ramping; - /* smc offsets */ - u16 arb_table_start; - u16 fan_table_start; - u16 cac_table_start; - u16 spll_table_start; - /* CAC stuff */ - struct ni_cac_data cac_data; - u32 dc_cac_table[NISLANDS_DCCAC_MAX_LEVELS]; - const struct ni_cac_weights *cac_weights; - u8 lta_window_size; - u8 lts_truncate; struct si_ps current_ps; struct si_ps requested_ps; - /* scratch structs */ - SMC_NIslands_MCRegisters smc_mc_reg_table; - NISLANDS_SMC_STATETABLE smc_statetable; }; struct si_cac_config_reg @@ -952,7 +396,6 @@ struct si_leakage_voltage struct si_leakage_voltage_entry entries[SISLANDS_MAX_LEAKAGE_COUNT]; }; - struct si_ulv_param { bool supported; u32 cg_ulv_control; diff --git a/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c b/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c index 554492dfa3c0..3aaf3dd71868 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c +++ b/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c @@ -20,7 +20,6 @@ * OTHER DEALINGS IN THE SOFTWARE. * */ -#include "pp_debug.h" #include #include #include @@ -28,12 +27,10 @@ #include #include #include "amd_shared.h" -#include "amd_powerplay.h" #include "power_state.h" #include "amdgpu.h" #include "hwmgr.h" #include "amdgpu_dpm_internal.h" -#include "amdgpu_display.h" static const struct amd_pm_funcs pp_dpm_funcs; @@ -634,9 +631,12 @@ static int pp_dpm_get_pp_table(void *handle, char **table) { struct pp_hwmgr *hwmgr = handle; - if (!hwmgr || !hwmgr->pm_en || !hwmgr->soft_pp_table) + if (!hwmgr || !hwmgr->pm_en || !table) return -EINVAL; + if (!hwmgr->soft_pp_table) + return -EOPNOTSUPP; + *table = (char *)hwmgr->soft_pp_table; return hwmgr->soft_pp_table_size; } @@ -955,7 +955,7 @@ static int pp_dpm_switch_power_profile(void *handle, return 0; } -static int pp_set_power_limit(void *handle, uint32_t limit) +static int pp_set_power_limit(void *handle, uint32_t limit_type, uint32_t limit) { struct pp_hwmgr *hwmgr = handle; uint32_t max_power_limit; diff --git a/drivers/gpu/drm/amd/pm/powerplay/smumgr/smu10_smumgr.c b/drivers/gpu/drm/amd/pm/powerplay/smumgr/smu10_smumgr.c index ac9ec8257f82..38e19e5cad4d 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/smumgr/smu10_smumgr.c +++ b/drivers/gpu/drm/amd/pm/powerplay/smumgr/smu10_smumgr.c @@ -139,7 +139,7 @@ static int smu10_copy_table_from_smc(struct pp_hwmgr *hwmgr, priv->smu_tables.entry[table_id].table_id, NULL); - amdgpu_asic_invalidate_hdp(adev, NULL); + amdgpu_hdp_invalidate(adev, NULL); memcpy(table, (uint8_t *)priv->smu_tables.entry[table_id].table, priv->smu_tables.entry[table_id].size); @@ -164,7 +164,7 @@ static int smu10_copy_table_to_smc(struct pp_hwmgr *hwmgr, memcpy(priv->smu_tables.entry[table_id].table, table, priv->smu_tables.entry[table_id].size); - amdgpu_asic_flush_hdp(adev, NULL); + amdgpu_hdp_flush(adev, NULL); smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetDriverDramAddrHigh, diff --git a/drivers/gpu/drm/amd/pm/powerplay/smumgr/vega10_smumgr.c b/drivers/gpu/drm/amd/pm/powerplay/smumgr/vega10_smumgr.c index f9c0f117725d..0bf1bf5528c2 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/smumgr/vega10_smumgr.c +++ b/drivers/gpu/drm/amd/pm/powerplay/smumgr/vega10_smumgr.c @@ -60,7 +60,7 @@ static int vega10_copy_table_from_smc(struct pp_hwmgr *hwmgr, priv->smu_tables.entry[table_id].table_id, NULL); - amdgpu_asic_invalidate_hdp(adev, NULL); + amdgpu_hdp_invalidate(adev, NULL); memcpy(table, priv->smu_tables.entry[table_id].table, priv->smu_tables.entry[table_id].size); @@ -90,7 +90,7 @@ static int vega10_copy_table_to_smc(struct pp_hwmgr *hwmgr, memcpy(priv->smu_tables.entry[table_id].table, table, priv->smu_tables.entry[table_id].size); - amdgpu_asic_flush_hdp(adev, NULL); + amdgpu_hdp_flush(adev, NULL); smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetDriverDramAddrHigh, diff --git a/drivers/gpu/drm/amd/pm/powerplay/smumgr/vega12_smumgr.c b/drivers/gpu/drm/amd/pm/powerplay/smumgr/vega12_smumgr.c index d3ff6a831ed5..e2ba593faa5d 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/smumgr/vega12_smumgr.c +++ b/drivers/gpu/drm/amd/pm/powerplay/smumgr/vega12_smumgr.c @@ -68,7 +68,7 @@ static int vega12_copy_table_from_smc(struct pp_hwmgr *hwmgr, "[CopyTableFromSMC] Attempt to Transfer Table From SMU Failed!", return -EINVAL); - amdgpu_asic_invalidate_hdp(adev, NULL); + amdgpu_hdp_invalidate(adev, NULL); memcpy(table, priv->smu_tables.entry[table_id].table, priv->smu_tables.entry[table_id].size); @@ -98,7 +98,7 @@ static int vega12_copy_table_to_smc(struct pp_hwmgr *hwmgr, memcpy(priv->smu_tables.entry[table_id].table, table, priv->smu_tables.entry[table_id].size); - amdgpu_asic_flush_hdp(adev, NULL); + amdgpu_hdp_flush(adev, NULL); PP_ASSERT_WITH_CODE(smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetDriverDramAddrHigh, diff --git a/drivers/gpu/drm/amd/pm/powerplay/smumgr/vega20_smumgr.c b/drivers/gpu/drm/amd/pm/powerplay/smumgr/vega20_smumgr.c index a5c95b180672..e3515156d26f 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/smumgr/vega20_smumgr.c +++ b/drivers/gpu/drm/amd/pm/powerplay/smumgr/vega20_smumgr.c @@ -192,7 +192,7 @@ static int vega20_copy_table_from_smc(struct pp_hwmgr *hwmgr, "[CopyTableFromSMC] Attempt to Transfer Table From SMU Failed!", return ret); - amdgpu_asic_invalidate_hdp(adev, NULL); + amdgpu_hdp_invalidate(adev, NULL); memcpy(table, priv->smu_tables.entry[table_id].table, priv->smu_tables.entry[table_id].size); @@ -223,7 +223,7 @@ static int vega20_copy_table_to_smc(struct pp_hwmgr *hwmgr, memcpy(priv->smu_tables.entry[table_id].table, table, priv->smu_tables.entry[table_id].size); - amdgpu_asic_flush_hdp(adev, NULL); + amdgpu_hdp_flush(adev, NULL); PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetDriverDramAddrHigh, @@ -256,7 +256,7 @@ int vega20_set_activity_monitor_coeff(struct pp_hwmgr *hwmgr, memcpy(priv->smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].table, table, priv->smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].size); - amdgpu_asic_flush_hdp(adev, NULL); + amdgpu_hdp_flush(adev, NULL); PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetDriverDramAddrHigh, @@ -306,7 +306,7 @@ int vega20_get_activity_monitor_coeff(struct pp_hwmgr *hwmgr, "[GetActivityMonitor] Attempt to Transfer Table From SMU Failed!", return ret); - amdgpu_asic_invalidate_hdp(adev, NULL); + amdgpu_hdp_invalidate(adev, NULL); memcpy(table, priv->smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].table, priv->smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].size); diff --git a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c index 244b8c364d45..f51fa265230b 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c +++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c @@ -68,7 +68,7 @@ static int smu_handle_task(struct smu_context *smu, static int smu_reset(struct smu_context *smu); static int smu_set_fan_speed_pwm(void *handle, u32 speed); static int smu_set_fan_control_mode(void *handle, u32 value); -static int smu_set_power_limit(void *handle, uint32_t limit); +static int smu_set_power_limit(void *handle, uint32_t limit_type, uint32_t limit); static int smu_set_fan_speed_rpm(void *handle, uint32_t speed); static int smu_set_gfx_cgpg(struct smu_context *smu, bool enabled); static int smu_set_mp1_state(void *handle, enum pp_mp1_state mp1_state); @@ -508,11 +508,14 @@ static void smu_restore_dpm_user_profile(struct smu_context *smu) /* Enable restore flag */ smu->user_dpm_profile.flags |= SMU_DPM_USER_PROFILE_RESTORE; - /* set the user dpm power limit */ - if (smu->user_dpm_profile.power_limit) { - ret = smu_set_power_limit(smu, smu->user_dpm_profile.power_limit); + /* set the user dpm power limits */ + for (int i = SMU_DEFAULT_PPT_LIMIT; i < SMU_LIMIT_TYPE_COUNT; i++) { + if (!smu->user_dpm_profile.power_limits[i]) + continue; + ret = smu_set_power_limit(smu, i, + smu->user_dpm_profile.power_limits[i]); if (ret) - dev_err(smu->adev->dev, "Failed to set power limit value\n"); + dev_err(smu->adev->dev, "Failed to set %d power limit value\n", i); } /* set the user dpm clock configurations */ @@ -609,6 +612,17 @@ bool is_support_cclk_dpm(struct amdgpu_device *adev) return true; } +int amdgpu_smu_ras_send_msg(struct amdgpu_device *adev, enum smu_message_type msg, + uint32_t param, uint32_t *read_arg) +{ + struct smu_context *smu = adev->powerplay.pp_handle; + int ret = -EOPNOTSUPP; + + if (smu->ppt_funcs && smu->ppt_funcs->ras_send_msg) + ret = smu->ppt_funcs->ras_send_msg(smu, msg, param, read_arg); + + return ret; +} static int smu_sys_get_pp_table(void *handle, char **table) @@ -620,7 +634,7 @@ static int smu_sys_get_pp_table(void *handle, return -EOPNOTSUPP; if (!smu_table->power_play_table && !smu_table->hardcode_pptable) - return -EINVAL; + return -EOPNOTSUPP; if (smu_table->hardcode_pptable) *table = smu_table->hardcode_pptable; @@ -1655,9 +1669,12 @@ static int smu_smc_hw_setup(struct smu_context *smu) if (adev->in_suspend && smu_is_dpm_running(smu)) { dev_info(adev->dev, "dpm has been enabled\n"); ret = smu_system_features_control(smu, true); - if (ret) + if (ret) { dev_err(adev->dev, "Failed system features control!\n"); - return ret; + return ret; + } + + return smu_enable_thermal_alert(smu); } break; default: @@ -2231,7 +2248,6 @@ static int smu_resume(struct amdgpu_ip_block *ip_block) int ret; struct amdgpu_device *adev = ip_block->adev; struct smu_context *smu = adev->powerplay.pp_handle; - struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm); if (amdgpu_sriov_multi_vf_mode(adev)) return 0; @@ -2263,18 +2279,6 @@ static int smu_resume(struct amdgpu_ip_block *ip_block) adev->pm.dpm_enabled = true; - if (smu->current_power_limit) { - ret = smu_set_power_limit(smu, smu->current_power_limit); - if (ret && ret != -EOPNOTSUPP) - return ret; - } - - if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL && smu->od_enabled) { - ret = smu_od_edit_dpm_table(smu, PP_OD_COMMIT_DPM_TABLE, NULL, 0); - if (ret) - return ret; - } - dev_info(adev->dev, "SMU is resumed successfully!\n"); return 0; @@ -2802,6 +2806,17 @@ const struct amdgpu_ip_block_version smu_v14_0_ip_block = { .funcs = &smu_ip_funcs, }; +const struct ras_smu_drv *smu_get_ras_smu_driver(void *handle) +{ + struct smu_context *smu = (struct smu_context *)handle; + const struct ras_smu_drv *tmp = NULL; + int ret; + + ret = smu_get_ras_smu_drv(smu, &tmp); + + return ret ? NULL : tmp; +} + static int smu_load_microcode(void *handle) { struct smu_context *smu = handle; @@ -2895,6 +2910,9 @@ int smu_get_power_limit(void *handle, if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) return -EOPNOTSUPP; + if (!limit) + return -EINVAL; + switch (pp_power_type) { case PP_PWR_TYPE_SUSTAINED: limit_type = SMU_DEFAULT_PPT_LIMIT; @@ -2926,6 +2944,8 @@ int smu_get_power_limit(void *handle, if (limit_type != SMU_DEFAULT_PPT_LIMIT) { if (smu->ppt_funcs->get_ppt_limit) ret = smu->ppt_funcs->get_ppt_limit(smu, limit, limit_type, limit_level); + else + return -EOPNOTSUPP; } else { switch (limit_level) { case SMU_PPT_LIMIT_CURRENT: @@ -2964,37 +2984,34 @@ int smu_get_power_limit(void *handle, return ret; } -static int smu_set_power_limit(void *handle, uint32_t limit) +static int smu_set_power_limit(void *handle, uint32_t limit_type, uint32_t limit) { struct smu_context *smu = handle; - uint32_t limit_type = limit >> 24; int ret = 0; if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) return -EOPNOTSUPP; - limit &= (1<<24)-1; - if (limit_type != SMU_DEFAULT_PPT_LIMIT) - if (smu->ppt_funcs->set_power_limit) - return smu->ppt_funcs->set_power_limit(smu, limit_type, limit); - - if ((limit > smu->max_power_limit) || (limit < smu->min_power_limit)) { - dev_err(smu->adev->dev, - "New power limit (%d) is out of range [%d,%d]\n", - limit, smu->min_power_limit, smu->max_power_limit); - return -EINVAL; + if (limit_type == SMU_DEFAULT_PPT_LIMIT) { + if (!limit) + limit = smu->current_power_limit; + if ((limit > smu->max_power_limit) || (limit < smu->min_power_limit)) { + dev_err(smu->adev->dev, + "New power limit (%d) is out of range [%d,%d]\n", + limit, smu->min_power_limit, smu->max_power_limit); + return -EINVAL; + } } - if (!limit) - limit = smu->current_power_limit; - if (smu->ppt_funcs->set_power_limit) { ret = smu->ppt_funcs->set_power_limit(smu, limit_type, limit); - if (!ret && !(smu->user_dpm_profile.flags & SMU_DPM_USER_PROFILE_RESTORE)) - smu->user_dpm_profile.power_limit = limit; + if (ret) + return ret; + if (!(smu->user_dpm_profile.flags & SMU_DPM_USER_PROFILE_RESTORE)) + smu->user_dpm_profile.power_limits[limit_type] = limit; } - return ret; + return 0; } static int smu_print_smuclk_levels(struct smu_context *smu, enum smu_clk_type clk_type, char *buf) diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h b/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h index 582c186d8b62..8815fc70b63b 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h +++ b/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h @@ -212,6 +212,7 @@ enum smu_power_src_type { enum smu_ppt_limit_type { SMU_DEFAULT_PPT_LIMIT = 0, SMU_FAST_PPT_LIMIT, + SMU_LIMIT_TYPE_COUNT, }; enum smu_ppt_limit_level { @@ -231,7 +232,7 @@ enum smu_memory_pool_size { struct smu_user_dpm_profile { uint32_t fan_mode; - uint32_t power_limit; + uint32_t power_limits[SMU_LIMIT_TYPE_COUNT]; uint32_t fan_speed_pwm; uint32_t fan_speed_rpm; uint32_t flags; @@ -1521,6 +1522,21 @@ struct pptable_funcs { */ ssize_t (*get_xcp_metrics)(struct smu_context *smu, int xcp_id, void *table); + /** + * @ras_send_msg: Send a message with a parameter from Ras + * &msg: Type of message. + * ¶m: Message parameter. + * &read_arg: SMU response (optional). + */ + int (*ras_send_msg)(struct smu_context *smu, + enum smu_message_type msg, uint32_t param, uint32_t *read_arg); + + + /** + * @get_ras_smu_drv: Get RAS smu driver interface + * Return: ras_smu_drv * + */ + int (*get_ras_smu_drv)(struct smu_context *smu, const struct ras_smu_drv **ras_smu_drv); }; typedef enum { @@ -1785,7 +1801,10 @@ int smu_set_pm_policy(struct smu_context *smu, enum pp_pm_policy p_type, int level); ssize_t smu_get_pm_policy_info(struct smu_context *smu, enum pp_pm_policy p_type, char *sysbuf); +const struct ras_smu_drv *smu_get_ras_smu_driver(void *handle); +int amdgpu_smu_ras_send_msg(struct amdgpu_device *adev, enum smu_message_type msg, + uint32_t param, uint32_t *readarg); #endif void smu_feature_cap_set(struct smu_context *smu, enum smu_feature_cap_id fea_id); diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_12_pmfw.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_12_pmfw.h index bf6aa9620911..dd30d96e1ca2 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_12_pmfw.h +++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_12_pmfw.h @@ -87,7 +87,7 @@ typedef enum { /*37*/ FEATURE_DVO = 37, /*38*/ FEATURE_XVMINORPSM_CLKSTOP_DS = 38, /*39*/ FEATURE_GLOBAL_DPM = 39, -/*40*/ FEATURE_NODE_POWER_MANAGER = 40, +/*40*/ FEATURE_HROM_EN = 40, /*41*/ NUM_FEATURES = 41 } FEATURE_LIST_e; @@ -189,7 +189,7 @@ typedef enum { SVI_MAX_TEMP_ENTRIES, // 13 } SVI_TEMP_e; -#define SMU_METRICS_TABLE_VERSION 0x14 +#define SMU_METRICS_TABLE_VERSION 0x15 #define SMU_SYSTEM_METRICS_TABLE_VERSION 0x1 @@ -367,6 +367,11 @@ typedef struct { //Node Power Limit uint32_t MaxNodePowerLimit; + + // PPT1 Configuration + uint32_t PPT1Max; + uint32_t PPT1Min; + uint32_t PPT1Default; } StaticMetricsTable_t; #pragma pack(pop) diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_12_ppsmc.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_12_ppsmc.h index 4b066c42e0ec..d09b6ae9827e 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_12_ppsmc.h +++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_12_ppsmc.h @@ -105,23 +105,21 @@ #define PPSMC_MSG_UpdatePccWaitDecMaxStr 0x4C #define PPSMC_MSG_ResetSDMA 0x4D #define PPSMC_MSG_GetRasTableVersion 0x4E -#define PPSMC_MSG_GetRmaStatus 0x4F -#define PPSMC_MSG_GetErrorCount 0x50 -#define PPSMC_MSG_GetBadPageCount 0x51 -#define PPSMC_MSG_GetBadPageInfo 0x52 -#define PPSMC_MSG_GetBadPagePaAddrLoHi 0x53 -#define PPSMC_MSG_SetTimestampLoHi 0x54 -#define PPSMC_MSG_GetTimestampLoHi 0x55 -#define PPSMC_MSG_GetRasPolicy 0x56 -#define PPSMC_MSG_DumpErrorRecord 0x57 +#define PPSMC_MSG_GetBadPageCount 0x50 +#define PPSMC_MSG_GetBadPageMcaAddress 0x51 +#define PPSMC_MSG_SetTimestamp 0x53 +#define PPSMC_MSG_SetTimestampHi 0x54 +#define PPSMC_MSG_GetTimestamp 0x55 +#define PPSMC_MSG_GetBadPageIpIdLoHi 0x57 #define PPSMC_MSG_EraseRasTable 0x58 #define PPSMC_MSG_GetStaticMetricsTable 0x59 #define PPSMC_MSG_ResetVfArbitersByIndex 0x5A -#define PPSMC_MSG_GetBadPageSeverity 0x5B #define PPSMC_MSG_GetSystemMetricsTable 0x5C #define PPSMC_MSG_GetSystemMetricsVersion 0x5D #define PPSMC_MSG_ResetVCN 0x5E -#define PPSMC_Message_Count 0x5F +#define PPSMC_MSG_SetFastPptLimit 0x5F +#define PPSMC_MSG_GetFastPptLimit 0x60 +#define PPSMC_Message_Count 0x61 //PPSMC Reset Types for driver msg argument #define PPSMC_RESET_TYPE_DRIVER_MODE_1_RESET 0x1 diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h index 2256c77da636..9b71a8afdd35 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h +++ b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h @@ -279,7 +279,16 @@ __SMU_DUMMY_MAP(ResetSDMA), \ __SMU_DUMMY_MAP(ResetVCN), \ __SMU_DUMMY_MAP(GetStaticMetricsTable), \ - __SMU_DUMMY_MAP(GetSystemMetricsTable), + __SMU_DUMMY_MAP(GetSystemMetricsTable), \ + __SMU_DUMMY_MAP(GetRASTableVersion), \ + __SMU_DUMMY_MAP(GetBadPageCount), \ + __SMU_DUMMY_MAP(GetBadPageMcaAddr), \ + __SMU_DUMMY_MAP(SetTimestamp), \ + __SMU_DUMMY_MAP(GetTimestamp), \ + __SMU_DUMMY_MAP(GetBadPageIpid), \ + __SMU_DUMMY_MAP(EraseRasTable), \ + __SMU_DUMMY_MAP(SetFastPptLimit), \ + __SMU_DUMMY_MAP(GetFastPptLimit), #undef __SMU_DUMMY_MAP #define __SMU_DUMMY_MAP(type) SMU_MSG_##type @@ -458,7 +467,8 @@ enum smu_clk_type { __SMU_DUMMY_MAP(GFX_EDC_XVMIN), \ __SMU_DUMMY_MAP(GFX_DIDT_XVMIN), \ __SMU_DUMMY_MAP(FAN_ABNORMAL), \ - __SMU_DUMMY_MAP(PIT), + __SMU_DUMMY_MAP(PIT), \ + __SMU_DUMMY_MAP(HROM_EN), #undef __SMU_DUMMY_MAP #define __SMU_DUMMY_MAP(feature) SMU_FEATURE_##feature##_BIT diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/cyan_skillfish_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/cyan_skillfish_ppt.c index 9548bd3c624b..55401e6b2b0b 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/cyan_skillfish_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/cyan_skillfish_ppt.c @@ -291,11 +291,12 @@ static int cyan_skillfish_print_clk_levels(struct smu_context *smu, enum smu_clk_type clk_type, char *buf) { - int ret = 0, size = 0; + int ret = 0, size = 0, start_offset = 0; uint32_t cur_value = 0; int i; smu_cmn_get_sysfs_buf(&buf, &size); + start_offset = size; switch (clk_type) { case SMU_OD_SCLK: @@ -353,7 +354,7 @@ static int cyan_skillfish_print_clk_levels(struct smu_context *smu, return ret; } - return size; + return size - start_offset; } static bool cyan_skillfish_is_dpm_running(struct smu_context *smu) diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c index 0028f10ead42..7c9f77124ab2 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c @@ -1469,7 +1469,7 @@ static int navi10_print_clk_levels(struct smu_context *smu, enum smu_clk_type clk_type, char *buf) { uint16_t *curve_settings; - int i, levels, size = 0, ret = 0; + int i, levels, size = 0, ret = 0, start_offset = 0; uint32_t cur_value = 0, value = 0, count = 0; uint32_t freq_values[3] = {0}; uint32_t mark_index = 0; @@ -1484,6 +1484,7 @@ static int navi10_print_clk_levels(struct smu_context *smu, uint32_t min_value, max_value; smu_cmn_get_sysfs_buf(&buf, &size); + start_offset = size; switch (clk_type) { case SMU_GFXCLK: @@ -1497,11 +1498,11 @@ static int navi10_print_clk_levels(struct smu_context *smu, case SMU_DCEFCLK: ret = navi10_get_current_clk_freq_by_table(smu, clk_type, &cur_value); if (ret) - return size; + return size - start_offset; ret = smu_v11_0_get_dpm_level_count(smu, clk_type, &count); if (ret) - return size; + return size - start_offset; ret = navi10_is_support_fine_grained_dpm(smu, clk_type); if (ret < 0) @@ -1511,7 +1512,7 @@ static int navi10_print_clk_levels(struct smu_context *smu, for (i = 0; i < count; i++) { ret = smu_v11_0_get_dpm_freq_by_index(smu, clk_type, i, &value); if (ret) - return size; + return size - start_offset; size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n", i, value, cur_value == value ? "*" : ""); @@ -1519,10 +1520,10 @@ static int navi10_print_clk_levels(struct smu_context *smu, } else { ret = smu_v11_0_get_dpm_freq_by_index(smu, clk_type, 0, &freq_values[0]); if (ret) - return size; + return size - start_offset; ret = smu_v11_0_get_dpm_freq_by_index(smu, clk_type, count - 1, &freq_values[2]); if (ret) - return size; + return size - start_offset; freq_values[1] = cur_value; mark_index = cur_value == freq_values[0] ? 0 : @@ -1653,7 +1654,7 @@ static int navi10_print_clk_levels(struct smu_context *smu, break; } - return size; + return size - start_offset; } static int navi10_force_clk_levels(struct smu_context *smu, @@ -2888,7 +2889,7 @@ static int navi10_set_dummy_pstates_table_location(struct smu_context *smu) dummy_table += 0x1000; } - amdgpu_asic_flush_hdp(smu->adev, NULL); + amdgpu_hdp_flush(smu->adev, NULL); ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SET_DRIVER_DUMMY_TABLE_DRAM_ADDR_HIGH, diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c index 31c2c0386b1f..774283ac7827 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c @@ -1281,7 +1281,7 @@ static int sienna_cichlid_print_clk_levels(struct smu_context *smu, struct smu_11_0_7_overdrive_table *od_settings = smu->od_settings; OverDriveTable_t *od_table = (OverDriveTable_t *)table_context->overdrive_table; - int i, size = 0, ret = 0; + int i, size = 0, ret = 0, start_offset = 0; uint32_t cur_value = 0, value = 0, count = 0; uint32_t freq_values[3] = {0}; uint32_t mark_index = 0; @@ -1289,6 +1289,7 @@ static int sienna_cichlid_print_clk_levels(struct smu_context *smu, uint32_t min_value, max_value; smu_cmn_get_sysfs_buf(&buf, &size); + start_offset = size; switch (clk_type) { case SMU_GFXCLK: @@ -1434,7 +1435,7 @@ static int sienna_cichlid_print_clk_levels(struct smu_context *smu, } print_clk_out: - return size; + return size - start_offset; } static int sienna_cichlid_force_clk_levels(struct smu_context *smu, diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c index 78e4186d06cc..b0d6487171d7 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c @@ -1022,7 +1022,12 @@ int smu_v11_0_enable_thermal_alert(struct smu_context *smu) int smu_v11_0_disable_thermal_alert(struct smu_context *smu) { - return amdgpu_irq_put(smu->adev, &smu->irq_source, 0); + int ret = 0; + + if (smu->smu_table.thermal_controller_type) + ret = amdgpu_irq_put(smu->adev, &smu->irq_source, 0); + + return ret; } static uint16_t convert_to_vddc(uint8_t vid) diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c index 0708d0f0938b..9626da2dba58 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c @@ -565,7 +565,7 @@ static int vangogh_print_legacy_clk_levels(struct smu_context *smu, DpmClocks_t *clk_table = smu->smu_table.clocks_table; SmuMetrics_legacy_t metrics; struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm); - int i, idx, size = 0, ret = 0; + int i, idx, size = 0, ret = 0, start_offset = 0; uint32_t cur_value = 0, value = 0, count = 0; bool cur_value_match_level = false; @@ -576,6 +576,7 @@ static int vangogh_print_legacy_clk_levels(struct smu_context *smu, return ret; smu_cmn_get_sysfs_buf(&buf, &size); + start_offset = size; switch (clk_type) { case SMU_OD_SCLK: @@ -658,7 +659,7 @@ static int vangogh_print_legacy_clk_levels(struct smu_context *smu, break; } - return size; + return size - start_offset; } static int vangogh_print_clk_levels(struct smu_context *smu, @@ -666,7 +667,7 @@ static int vangogh_print_clk_levels(struct smu_context *smu, { DpmClocks_t *clk_table = smu->smu_table.clocks_table; SmuMetrics_t metrics; - int i, idx, size = 0, ret = 0; + int i, idx, size = 0, ret = 0, start_offset = 0; uint32_t cur_value = 0, value = 0, count = 0; bool cur_value_match_level = false; uint32_t min, max; @@ -678,6 +679,7 @@ static int vangogh_print_clk_levels(struct smu_context *smu, return ret; smu_cmn_get_sysfs_buf(&buf, &size); + start_offset = size; switch (clk_type) { case SMU_OD_SCLK: @@ -779,7 +781,7 @@ static int vangogh_print_clk_levels(struct smu_context *smu, break; } - return size; + return size - start_offset; } static int vangogh_common_print_clk_levels(struct smu_context *smu, @@ -2311,8 +2313,7 @@ static int vangogh_get_power_limit(struct smu_context *smu, uint32_t *max_power_limit, uint32_t *min_power_limit) { - struct smu_11_5_power_context *power_context = - smu->smu_power.power_context; + struct smu_11_5_power_context *power_context = smu->smu_power.power_context; uint32_t ppt_limit; int ret = 0; @@ -2348,12 +2349,11 @@ static int vangogh_get_power_limit(struct smu_context *smu, } static int vangogh_get_ppt_limit(struct smu_context *smu, - uint32_t *ppt_limit, - enum smu_ppt_limit_type type, - enum smu_ppt_limit_level level) + uint32_t *ppt_limit, + enum smu_ppt_limit_type type, + enum smu_ppt_limit_level level) { - struct smu_11_5_power_context *power_context = - smu->smu_power.power_context; + struct smu_11_5_power_context *power_context = smu->smu_power.power_context; if (!power_context) return -EOPNOTSUPP; @@ -2402,7 +2402,6 @@ static int vangogh_set_power_limit(struct smu_context *smu, smu->current_power_limit = ppt_limit; break; case SMU_FAST_PPT_LIMIT: - ppt_limit &= ~(SMU_FAST_PPT_LIMIT << 24); if (ppt_limit > power_context->max_fast_ppt_limit) { dev_err(smu->adev->dev, "New power limit (%d) is over the max allowed %d\n", diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c index 3baf20f4c373..eaa9ea162f16 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c @@ -494,7 +494,7 @@ static int renoir_set_fine_grain_gfx_freq_parameters(struct smu_context *smu) static int renoir_print_clk_levels(struct smu_context *smu, enum smu_clk_type clk_type, char *buf) { - int i, idx, size = 0, ret = 0; + int i, idx, size = 0, ret = 0, start_offset = 0; uint32_t cur_value = 0, value = 0, count = 0, min = 0, max = 0; SmuMetrics_t metrics; bool cur_value_match_level = false; @@ -506,6 +506,7 @@ static int renoir_print_clk_levels(struct smu_context *smu, return ret; smu_cmn_get_sysfs_buf(&buf, &size); + start_offset = size; switch (clk_type) { case SMU_OD_RANGE: @@ -550,7 +551,7 @@ static int renoir_print_clk_levels(struct smu_context *smu, size += sysfs_emit_at(buf, size, "2: %uMhz %s\n", max, i == 2 ? "*" : ""); } - return size; + return size - start_offset; case SMU_SOCCLK: count = NUM_SOCCLK_DPM_LEVELS; cur_value = metrics.ClockFrequency[CLOCK_SOCCLK]; @@ -607,7 +608,7 @@ static int renoir_print_clk_levels(struct smu_context *smu, break; } - return size; + return size - start_offset; } static enum amd_pm_state_type renoir_get_current_power_state(struct smu_context *smu) diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c index c1062e5f0393..677781060246 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c @@ -1195,15 +1195,16 @@ static int smu_v13_0_0_print_clk_levels(struct smu_context *smu, struct smu_13_0_dpm_table *single_dpm_table; struct smu_13_0_pcie_table *pcie_table; uint32_t gen_speed, lane_width; - int i, curr_freq, size = 0; + int i, curr_freq, size = 0, start_offset = 0; int32_t min_value, max_value; int ret = 0; smu_cmn_get_sysfs_buf(&buf, &size); + start_offset = size; if (amdgpu_ras_intr_triggered()) { size += sysfs_emit_at(buf, size, "unavailable\n"); - return size; + return size - start_offset; } switch (clk_type) { @@ -1534,7 +1535,7 @@ static int smu_v13_0_0_print_clk_levels(struct smu_context *smu, break; } - return size; + return size - start_offset; } diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_12_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_12_ppt.c index cb3fea9e8cf3..9e635f733fbf 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_12_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_12_ppt.c @@ -34,6 +34,7 @@ #include "amdgpu_fru_eeprom.h" #include #include "smu_cmn.h" +#include "amdgpu_ras.h" #undef MP1_Public #undef smnMP1_FIRMWARE_FLAGS @@ -58,7 +59,7 @@ #define NUM_JPEG_RINGS_FW 10 #define NUM_JPEG_RINGS_GPU_METRICS(gpu_metrics) \ - (ARRAY_SIZE(gpu_metrics->xcp_stats[0].jpeg_busy) / 4) + (ARRAY_SIZE(gpu_metrics->jpeg_busy) / 4) const struct cmn2asic_mapping smu_v13_0_12_feature_mask_map[SMU_FEATURE_COUNT] = { SMU_13_0_12_FEA_MAP(SMU_FEATURE_DATA_CALCULATIONS_BIT, FEATURE_DATA_CALCULATION), @@ -81,6 +82,7 @@ const struct cmn2asic_mapping smu_v13_0_12_feature_mask_map[SMU_FEATURE_COUNT] = SMU_13_0_12_FEA_MAP(SMU_FEATURE_DS_MPIOCLK_BIT, FEATURE_DS_MPIOCLK), SMU_13_0_12_FEA_MAP(SMU_FEATURE_DS_MP0CLK_BIT, FEATURE_DS_MP0CLK), SMU_13_0_12_FEA_MAP(SMU_FEATURE_PIT_BIT, FEATURE_PIT), + SMU_13_0_12_FEA_MAP(SMU_FEATURE_HROM_EN_BIT, FEATURE_HROM_EN), }; const struct cmn2asic_msg_mapping smu_v13_0_12_message_map[SMU_MSG_MAX_COUNT] = { @@ -139,6 +141,15 @@ const struct cmn2asic_msg_mapping smu_v13_0_12_message_map[SMU_MSG_MAX_COUNT] = MSG_MAP(ResetVCN, PPSMC_MSG_ResetVCN, 0), MSG_MAP(GetStaticMetricsTable, PPSMC_MSG_GetStaticMetricsTable, 1), MSG_MAP(GetSystemMetricsTable, PPSMC_MSG_GetSystemMetricsTable, 1), + MSG_MAP(GetRASTableVersion, PPSMC_MSG_GetRasTableVersion, 0), + MSG_MAP(GetBadPageCount, PPSMC_MSG_GetBadPageCount, 0), + MSG_MAP(GetBadPageMcaAddr, PPSMC_MSG_GetBadPageMcaAddress, 0), + MSG_MAP(SetTimestamp, PPSMC_MSG_SetTimestamp, 0), + MSG_MAP(GetTimestamp, PPSMC_MSG_GetTimestamp, 0), + MSG_MAP(GetBadPageIpid, PPSMC_MSG_GetBadPageIpIdLoHi, 0), + MSG_MAP(EraseRasTable, PPSMC_MSG_EraseRasTable, 0), + MSG_MAP(SetFastPptLimit, PPSMC_MSG_SetFastPptLimit, 1), + MSG_MAP(GetFastPptLimit, PPSMC_MSG_GetFastPptLimit, 1), }; int smu_v13_0_12_tables_init(struct smu_context *smu) @@ -345,6 +356,12 @@ int smu_v13_0_12_setup_driver_pptable(struct smu_context *smu) if (smu_v13_0_6_cap_supported(smu, SMU_CAP(NPM_METRICS))) pptable->MaxNodePowerLimit = SMUQ10_ROUND(static_metrics->MaxNodePowerLimit); + if (smu_v13_0_6_cap_supported(smu, SMU_CAP(FAST_PPT)) && + static_metrics->PPT1Max) { + pptable->PPT1Max = static_metrics->PPT1Max; + pptable->PPT1Min = static_metrics->PPT1Min; + pptable->PPT1Default = static_metrics->PPT1Default; + } smu_v13_0_12_init_xgmi_data(smu, static_metrics); pptable->Init = true; } @@ -449,7 +466,7 @@ static int smu_v13_0_12_get_system_metrics_table(struct smu_context *smu) return ret; } - amdgpu_asic_invalidate_hdp(smu->adev, NULL); + amdgpu_hdp_invalidate(smu->adev, NULL); smu_table_cache_update_time(sys_table, jiffies); memcpy(sys_table->cache.buffer, table->cpu_addr, smu_v13_0_12_get_system_metrics_size()); @@ -719,15 +736,14 @@ static ssize_t smu_v13_0_12_get_temp_metrics(struct smu_context *smu, ssize_t smu_v13_0_12_get_xcp_metrics(struct smu_context *smu, struct amdgpu_xcp *xcp, void *table, void *smu_metrics) { const u8 num_jpeg_rings = NUM_JPEG_RINGS_FW; - struct amdgpu_partition_metrics_v1_0 *xcp_metrics; + struct smu_v13_0_6_partition_metrics *xcp_metrics; struct amdgpu_device *adev = smu->adev; MetricsTable_t *metrics; int inst, j, k, idx; u32 inst_mask; metrics = (MetricsTable_t *)smu_metrics; - xcp_metrics = (struct amdgpu_partition_metrics_v1_0 *) table; - smu_cmn_init_partition_metrics(xcp_metrics, 1, 0); + xcp_metrics = (struct smu_v13_0_6_partition_metrics *)table; amdgpu_xcp_get_inst_details(xcp, AMDGPU_XCP_VCN, &inst_mask); idx = 0; for_each_inst(k, inst_mask) { @@ -772,22 +788,17 @@ ssize_t smu_v13_0_12_get_xcp_metrics(struct smu_context *smu, struct amdgpu_xcp return sizeof(*xcp_metrics); } -ssize_t smu_v13_0_12_get_gpu_metrics(struct smu_context *smu, void **table, void *smu_metrics) +void smu_v13_0_12_get_gpu_metrics(struct smu_context *smu, void **table, + void *smu_metrics, + struct smu_v13_0_6_gpu_metrics *gpu_metrics) { - struct smu_table_context *smu_table = &smu->smu_table; - struct gpu_metrics_v1_8 *gpu_metrics = - (struct gpu_metrics_v1_8 *)smu_table->gpu_metrics_table; - int ret = 0, xcc_id, inst, i, j, k, idx; struct amdgpu_device *adev = smu->adev; + int ret = 0, xcc_id, inst, i, j; u8 num_jpeg_rings_gpu_metrics; MetricsTable_t *metrics; - struct amdgpu_xcp *xcp; - u32 inst_mask; metrics = (MetricsTable_t *)smu_metrics; - smu_cmn_init_soft_gpu_metrics(gpu_metrics, 1, 8); - gpu_metrics->temperature_hotspot = SMUQ10_ROUND(metrics->MaxSocketTemperature); /* Individual HBM stack temperature is not reported */ @@ -877,60 +888,186 @@ ssize_t smu_v13_0_12_get_gpu_metrics(struct smu_context *smu, void **table, void gpu_metrics->xgmi_link_status[j] = ret; } - gpu_metrics->num_partition = adev->xcp_mgr->num_xcps; - num_jpeg_rings_gpu_metrics = NUM_JPEG_RINGS_GPU_METRICS(gpu_metrics); - for_each_xcp(adev->xcp_mgr, xcp, i) { - amdgpu_xcp_get_inst_details(xcp, AMDGPU_XCP_VCN, &inst_mask); - idx = 0; - for_each_inst(k, inst_mask) { - /* Both JPEG and VCN has same instances */ - inst = GET_INST(VCN, k); + for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { + inst = GET_INST(VCN, i); - for (j = 0; j < num_jpeg_rings_gpu_metrics; ++j) { - gpu_metrics->xcp_stats[i].jpeg_busy - [(idx * num_jpeg_rings_gpu_metrics) + j] = - SMUQ10_ROUND(metrics->JpegBusy - [(inst * NUM_JPEG_RINGS_FW) + j]); - } - gpu_metrics->xcp_stats[i].vcn_busy[idx] = - SMUQ10_ROUND(metrics->VcnBusy[inst]); - idx++; + for (j = 0; j < num_jpeg_rings_gpu_metrics; ++j) { + gpu_metrics->jpeg_busy[(i * num_jpeg_rings_gpu_metrics) + + j] = + SMUQ10_ROUND( + metrics->JpegBusy[(inst * + NUM_JPEG_RINGS_FW) + + j]); } + gpu_metrics->vcn_busy[i] = SMUQ10_ROUND(metrics->VcnBusy[inst]); + } - amdgpu_xcp_get_inst_details(xcp, AMDGPU_XCP_GFX, &inst_mask); - idx = 0; - for_each_inst(k, inst_mask) { - inst = GET_INST(GC, k); - gpu_metrics->xcp_stats[i].gfx_busy_inst[idx] = - SMUQ10_ROUND(metrics->GfxBusy[inst]); - gpu_metrics->xcp_stats[i].gfx_busy_acc[idx] = - SMUQ10_ROUND(metrics->GfxBusyAcc[inst]); - if (smu_v13_0_6_cap_supported(smu, SMU_CAP(HST_LIMIT_METRICS))) { - gpu_metrics->xcp_stats[i].gfx_below_host_limit_ppt_acc[idx] = - SMUQ10_ROUND(metrics->GfxclkBelowHostLimitPptAcc[inst]); - gpu_metrics->xcp_stats[i].gfx_below_host_limit_thm_acc[idx] = - SMUQ10_ROUND(metrics->GfxclkBelowHostLimitThmAcc[inst]); - gpu_metrics->xcp_stats[i].gfx_low_utilization_acc[idx] = - SMUQ10_ROUND(metrics->GfxclkLowUtilizationAcc[inst]); - gpu_metrics->xcp_stats[i].gfx_below_host_limit_total_acc[idx] = - SMUQ10_ROUND(metrics->GfxclkBelowHostLimitTotalAcc[inst]); - } - idx++; - } + for (i = 0; i < NUM_XCC(adev->gfx.xcc_mask); ++i) { + inst = GET_INST(GC, i); + gpu_metrics->gfx_busy_inst[i] = + SMUQ10_ROUND(metrics->GfxBusy[inst]); + gpu_metrics->gfx_busy_acc[i] = + SMUQ10_ROUND(metrics->GfxBusyAcc[inst]); + if (smu_v13_0_6_cap_supported(smu, + SMU_CAP(HST_LIMIT_METRICS))) { + gpu_metrics + ->gfx_below_host_limit_ppt_acc[i] = SMUQ10_ROUND( + metrics->GfxclkBelowHostLimitPptAcc[inst]); + gpu_metrics + ->gfx_below_host_limit_thm_acc[i] = SMUQ10_ROUND( + metrics->GfxclkBelowHostLimitThmAcc[inst]); + gpu_metrics->gfx_low_utilization_acc[i] = SMUQ10_ROUND( + metrics->GfxclkLowUtilizationAcc[inst]); + gpu_metrics->gfx_below_host_limit_total_acc + [i] = SMUQ10_ROUND( + metrics->GfxclkBelowHostLimitTotalAcc[inst]); + }; } gpu_metrics->xgmi_link_width = metrics->XgmiWidth; gpu_metrics->xgmi_link_speed = metrics->XgmiBitrate; gpu_metrics->firmware_timestamp = metrics->Timestamp; - - *table = (void *)gpu_metrics; - - return sizeof(*gpu_metrics); } const struct smu_temp_funcs smu_v13_0_12_temp_funcs = { .temp_metrics_is_supported = smu_v13_0_12_is_temp_metrics_supported, .get_temp_metrics = smu_v13_0_12_get_temp_metrics, }; + +static int smu_v13_0_12_get_ras_table_version(struct amdgpu_device *adev, + uint32_t *table_version) +{ + struct smu_context *smu = adev->powerplay.pp_handle; + + return smu_cmn_send_smc_msg_with_param(smu, + SMU_MSG_GetRASTableVersion, 0, table_version); +} + +static int smu_v13_0_12_get_badpage_count(struct amdgpu_device *adev, uint32_t *count, + uint32_t timeout) +{ + struct smu_context *smu = adev->powerplay.pp_handle; + uint64_t end, now; + int ret = 0; + + now = (uint64_t)ktime_to_ms(ktime_get()); + end = now + timeout; + do { + ret = smu_cmn_send_smc_msg_with_param(smu, + SMU_MSG_GetBadPageCount, 0, count); + /* eeprom is not ready */ + if (ret != -EBUSY) + return ret; + mdelay(10); + now = (uint64_t)ktime_to_ms(ktime_get()); + } while (now < end); + + dev_err(adev->dev, + "smu get bad page count timeout!\n"); + return ret; +} + +static int smu_v13_0_12_set_timestamp(struct amdgpu_device *adev, uint64_t timestamp) +{ + struct smu_context *smu = adev->powerplay.pp_handle; + + return smu_cmn_send_smc_msg_with_param(smu, + SMU_MSG_SetTimestamp, (uint32_t)timestamp, 0); +} + +static int smu_v13_0_12_get_timestamp(struct amdgpu_device *adev, + uint16_t index, uint64_t *timestamp) +{ + struct smu_context *smu = adev->powerplay.pp_handle; + uint32_t temp; + int ret; + + ret = smu_cmn_send_smc_msg_with_param(smu, + SMU_MSG_GetTimestamp, index, &temp); + if (!ret) + *timestamp = temp; + + return ret; +} + +static int smu_v13_0_12_get_badpage_ipid(struct amdgpu_device *adev, + uint16_t index, uint64_t *ipid) +{ + struct smu_context *smu = adev->powerplay.pp_handle; + uint32_t temp_arg, temp_ipid_lo, temp_ipid_high; + int ret; + + temp_arg = index | (1 << 16); + ret = smu_cmn_send_smc_msg_with_param(smu, + SMU_MSG_GetBadPageIpid, temp_arg, &temp_ipid_lo); + if (ret) + return ret; + + temp_arg = index | (2 << 16); + ret = smu_cmn_send_smc_msg_with_param(smu, + SMU_MSG_GetBadPageIpid, temp_arg, &temp_ipid_high); + if (!ret) + *ipid = (uint64_t)temp_ipid_high << 32 | temp_ipid_lo; + return ret; +} + +static int smu_v13_0_12_erase_ras_table(struct amdgpu_device *adev, + uint32_t *result) +{ + struct smu_context *smu = adev->powerplay.pp_handle; + + return smu_cmn_send_smc_msg_with_param(smu, + SMU_MSG_EraseRasTable, 0, result); +} + +static int smu_v13_0_12_get_badpage_mca_addr(struct amdgpu_device *adev, + uint16_t index, uint64_t *mca_addr) +{ + struct smu_context *smu = adev->powerplay.pp_handle; + uint32_t temp_arg, temp_addr_lo, temp_addr_high; + int ret; + + temp_arg = index | (1 << 16); + ret = smu_cmn_send_smc_msg_with_param(smu, + SMU_MSG_GetBadPageMcaAddr, temp_arg, &temp_addr_lo); + if (ret) + return ret; + + temp_arg = index | (2 << 16); + ret = smu_cmn_send_smc_msg_with_param(smu, + SMU_MSG_GetBadPageMcaAddr, temp_arg, &temp_addr_high); + if (!ret) + *mca_addr = (uint64_t)temp_addr_high << 32 | temp_addr_lo; + return ret; +} + +static const struct ras_eeprom_smu_funcs smu_v13_0_12_eeprom_smu_funcs = { + .get_ras_table_version = smu_v13_0_12_get_ras_table_version, + .get_badpage_count = smu_v13_0_12_get_badpage_count, + .get_badpage_mca_addr = smu_v13_0_12_get_badpage_mca_addr, + .set_timestamp = smu_v13_0_12_set_timestamp, + .get_timestamp = smu_v13_0_12_get_timestamp, + .get_badpage_ipid = smu_v13_0_12_get_badpage_ipid, + .erase_ras_table = smu_v13_0_12_erase_ras_table, +}; + +static void smu_v13_0_12_ras_smu_feature_flags(struct amdgpu_device *adev, uint64_t *flags) +{ + struct smu_context *smu = adev->powerplay.pp_handle; + + if (!flags) + return; + + *flags = 0ULL; + + if (smu_v13_0_6_cap_supported(smu, SMU_CAP(RAS_EEPROM))) + *flags |= RAS_SMU_FEATURE_BIT__RAS_EEPROM; + +} + +const struct ras_smu_drv smu_v13_0_12_ras_smu_drv = { + .smu_eeprom_funcs = &smu_v13_0_12_eeprom_smu_funcs, + .ras_smu_feature_flags = smu_v13_0_12_ras_smu_feature_flags, +}; diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c index b081ae3e8f43..6908f9930f16 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c @@ -497,11 +497,12 @@ static int smu_v13_0_4_get_dpm_level_count(struct smu_context *smu, static int smu_v13_0_4_print_clk_levels(struct smu_context *smu, enum smu_clk_type clk_type, char *buf) { - int i, idx, size = 0, ret = 0; + int i, idx, size = 0, ret = 0, start_offset = 0; uint32_t cur_value = 0, value = 0, count = 0; uint32_t min, max; smu_cmn_get_sysfs_buf(&buf, &size); + start_offset = size; switch (clk_type) { case SMU_OD_SCLK: @@ -565,7 +566,7 @@ static int smu_v13_0_4_print_clk_levels(struct smu_context *smu, break; } - return size; + return size - start_offset; } static int smu_v13_0_4_read_sensor(struct smu_context *smu, diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_5_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_5_ppt.c index f5db181ef489..4576bf008b22 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_5_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_5_ppt.c @@ -861,11 +861,12 @@ static int smu_v13_0_5_set_soft_freq_limited_range(struct smu_context *smu, static int smu_v13_0_5_print_clk_levels(struct smu_context *smu, enum smu_clk_type clk_type, char *buf) { - int i, idx, size = 0, ret = 0; + int i, idx, size = 0, ret = 0, start_offset = 0; uint32_t cur_value = 0, value = 0, count = 0; uint32_t min = 0, max = 0; smu_cmn_get_sysfs_buf(&buf, &size); + start_offset = size; switch (clk_type) { case SMU_OD_SCLK: @@ -928,7 +929,7 @@ static int smu_v13_0_5_print_clk_levels(struct smu_context *smu, } print_clk_out: - return size; + return size - start_offset; } diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c index 285cf7979693..44e1cd821eec 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c @@ -356,6 +356,9 @@ static void smu_v13_0_12_init_caps(struct smu_context *smu) if (fw_ver > 0x04560900) smu_v13_0_6_cap_set(smu, SMU_CAP(VCN_RESET)); + if (fw_ver >= 0x04560D00) + smu_v13_0_6_cap_set(smu, SMU_CAP(FAST_PPT)); + if (fw_ver >= 0x04560700) { if (fw_ver >= 0x04560900) { smu_v13_0_6_cap_set(smu, SMU_CAP(TEMP_METRICS)); @@ -450,7 +453,8 @@ static void smu_v13_0_6_init_caps(struct smu_context *smu) ((pgm == 4) && (fw_ver >= 0x4557000))) smu_v13_0_6_cap_set(smu, SMU_CAP(SDMA_RESET)); - if ((pgm == 0) && (fw_ver >= 0x00558200)) + if ((pgm == 0 && fw_ver >= 0x00558200) || + (pgm == 7 && fw_ver >= 0x07551400)) smu_v13_0_6_cap_set(smu, SMU_CAP(VCN_RESET)); } @@ -548,7 +552,7 @@ static int smu_v13_0_6_tables_init(struct smu_context *smu) { struct smu_table_context *smu_table = &smu->smu_table; struct smu_table *tables = smu_table->tables; - void *gpu_metrics_table __free(kfree) = NULL; + struct smu_v13_0_6_gpu_metrics *gpu_metrics; void *driver_pptable __free(kfree) = NULL; void *metrics_table __free(kfree) = NULL; struct amdgpu_device *adev = smu->adev; @@ -578,24 +582,28 @@ static int smu_v13_0_6_tables_init(struct smu_context *smu) return -ENOMEM; smu_table->metrics_time = 0; - smu_table->gpu_metrics_table_size = sizeof(struct gpu_metrics_v1_8); - gpu_metrics_table = - kzalloc(smu_table->gpu_metrics_table_size, GFP_KERNEL); - if (!gpu_metrics_table) - return -ENOMEM; - driver_pptable = kzalloc(sizeof(struct PPTable_t), GFP_KERNEL); if (!driver_pptable) return -ENOMEM; + ret = smu_table_cache_init(smu, SMU_TABLE_SMU_METRICS, + sizeof(struct smu_v13_0_6_gpu_metrics), 1); + if (ret) + return ret; + + gpu_metrics = (struct smu_v13_0_6_gpu_metrics + *)(tables[SMU_TABLE_SMU_METRICS].cache.buffer); + + smu_v13_0_6_gpu_metrics_init(gpu_metrics, 1, 9); if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 12)) { ret = smu_v13_0_12_tables_init(smu); - if (ret) + if (ret) { + smu_table_cache_fini(smu, SMU_TABLE_SMU_METRICS); return ret; + } } - smu_table->gpu_metrics_table = no_free_ptr(gpu_metrics_table); smu_table->metrics_table = no_free_ptr(metrics_table); smu_table->driver_pptable = no_free_ptr(driver_pptable); @@ -731,6 +739,7 @@ static int smu_v13_0_6_fini_smc_tables(struct smu_context *smu) { if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 12)) smu_v13_0_12_tables_fini(smu); + smu_table_cache_fini(smu, SMU_TABLE_SMU_METRICS); return smu_v13_0_fini_smc_tables(smu); } @@ -765,7 +774,7 @@ int smu_v13_0_6_get_metrics_table(struct smu_context *smu, void *metrics_table, return ret; } - amdgpu_asic_invalidate_hdp(smu->adev, NULL); + amdgpu_hdp_invalidate(smu->adev, NULL); memcpy(smu_table->metrics_table, table->cpu_addr, table_size); smu_table->metrics_time = jiffies; @@ -844,12 +853,23 @@ int smu_v13_0_6_get_static_metrics_table(struct smu_context *smu) return ret; } - amdgpu_asic_invalidate_hdp(smu->adev, NULL); + amdgpu_hdp_invalidate(smu->adev, NULL); memcpy(smu_table->metrics_table, table->cpu_addr, table_size); return 0; } +static void smu_v13_0_6_update_caps(struct smu_context *smu) +{ + struct smu_table_context *smu_table = &smu->smu_table; + struct PPTable_t *pptable = + (struct PPTable_t *)smu_table->driver_pptable; + + if (smu_v13_0_6_cap_supported(smu, SMU_CAP(FAST_PPT)) && + !pptable->PPT1Max) + smu_v13_0_6_cap_clear(smu, SMU_CAP(FAST_PPT)); +} + static int smu_v13_0_6_setup_driver_pptable(struct smu_context *smu) { struct smu_table_context *smu_table = &smu->smu_table; @@ -866,8 +886,12 @@ static int smu_v13_0_6_setup_driver_pptable(struct smu_context *smu) uint8_t max_width; if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 12) && - smu_v13_0_6_cap_supported(smu, SMU_CAP(STATIC_METRICS))) - return smu_v13_0_12_setup_driver_pptable(smu); + smu_v13_0_6_cap_supported(smu, SMU_CAP(STATIC_METRICS))) { + ret = smu_v13_0_12_setup_driver_pptable(smu); + if (ret) + return ret; + goto out; + } /* Store one-time values in driver PPTable */ if (!pptable->Init) { @@ -947,7 +971,8 @@ static int smu_v13_0_6_setup_driver_pptable(struct smu_context *smu) smu_v13_0_6_fill_static_metrics_table(smu, static_metrics); } } - +out: + smu_v13_0_6_update_caps(smu); return 0; } @@ -1393,7 +1418,7 @@ static int smu_v13_0_6_print_clks(struct smu_context *smu, char *buf, int size, return -EINVAL; if (curr_clk < SMU_13_0_6_DSCLK_THRESHOLD) { - size = sysfs_emit_at(buf, size, "S: %uMhz *\n", curr_clk); + size += sysfs_emit_at(buf, size, "S: %uMhz *\n", curr_clk); for (i = 0; i < clocks.num_levels; i++) size += sysfs_emit_at(buf, size, "%d: %uMhz\n", i, clocks.data[i].clocks_in_khz / @@ -1428,7 +1453,7 @@ static int smu_v13_0_6_print_clks(struct smu_context *smu, char *buf, int size, static int smu_v13_0_6_print_clk_levels(struct smu_context *smu, enum smu_clk_type type, char *buf) { - int now, size = 0; + int now, size = 0, start_offset = 0; int ret = 0; struct smu_umd_pstate_table *pstate_table = &smu->pstate_table; struct smu_13_0_dpm_table *single_dpm_table; @@ -1437,10 +1462,11 @@ static int smu_v13_0_6_print_clk_levels(struct smu_context *smu, uint32_t min_clk, max_clk; smu_cmn_get_sysfs_buf(&buf, &size); + start_offset = size; if (amdgpu_ras_intr_triggered()) { size += sysfs_emit_at(buf, size, "unavailable\n"); - return size; + return size - start_offset; } dpm_context = smu_dpm->dpm_context; @@ -1512,9 +1538,13 @@ static int smu_v13_0_6_print_clk_levels(struct smu_context *smu, single_dpm_table = &(dpm_context->dpm_tables.uclk_table); - return smu_v13_0_6_print_clks(smu, buf, size, single_dpm_table, - now, "mclk"); + ret = smu_v13_0_6_print_clks(smu, buf, size, single_dpm_table, + now, "mclk"); + if (ret < 0) + return ret; + size += ret; + break; case SMU_SOCCLK: ret = smu_v13_0_6_get_current_clk_freq_by_table(smu, SMU_SOCCLK, &now); @@ -1526,9 +1556,13 @@ static int smu_v13_0_6_print_clk_levels(struct smu_context *smu, single_dpm_table = &(dpm_context->dpm_tables.soc_table); - return smu_v13_0_6_print_clks(smu, buf, size, single_dpm_table, - now, "socclk"); + ret = smu_v13_0_6_print_clks(smu, buf, size, single_dpm_table, + now, "socclk"); + if (ret < 0) + return ret; + size += ret; + break; case SMU_FCLK: ret = smu_v13_0_6_get_current_clk_freq_by_table(smu, SMU_FCLK, &now); @@ -1540,9 +1574,13 @@ static int smu_v13_0_6_print_clk_levels(struct smu_context *smu, single_dpm_table = &(dpm_context->dpm_tables.fclk_table); - return smu_v13_0_6_print_clks(smu, buf, size, single_dpm_table, - now, "fclk"); + ret = smu_v13_0_6_print_clks(smu, buf, size, single_dpm_table, + now, "fclk"); + if (ret < 0) + return ret; + size += ret; + break; case SMU_VCLK: ret = smu_v13_0_6_get_current_clk_freq_by_table(smu, SMU_VCLK, &now); @@ -1554,9 +1592,13 @@ static int smu_v13_0_6_print_clk_levels(struct smu_context *smu, single_dpm_table = &(dpm_context->dpm_tables.vclk_table); - return smu_v13_0_6_print_clks(smu, buf, size, single_dpm_table, - now, "vclk"); + ret = smu_v13_0_6_print_clks(smu, buf, size, single_dpm_table, + now, "vclk"); + if (ret < 0) + return ret; + size += ret; + break; case SMU_DCLK: ret = smu_v13_0_6_get_current_clk_freq_by_table(smu, SMU_DCLK, &now); @@ -1568,14 +1610,18 @@ static int smu_v13_0_6_print_clk_levels(struct smu_context *smu, single_dpm_table = &(dpm_context->dpm_tables.dclk_table); - return smu_v13_0_6_print_clks(smu, buf, size, single_dpm_table, - now, "dclk"); + ret = smu_v13_0_6_print_clks(smu, buf, size, single_dpm_table, + now, "dclk"); + if (ret < 0) + return ret; + size += ret; + break; default: break; } - return size; + return size - start_offset; } static int smu_v13_0_6_upload_dpm_level(struct smu_context *smu, bool max, @@ -1845,7 +1891,7 @@ static int smu_v13_0_6_get_power_limit(struct smu_context *smu, if (current_power_limit) *current_power_limit = power_limit; if (default_power_limit) - *default_power_limit = power_limit; + *default_power_limit = pptable->MaxSocketPowerLimit; if (max_power_limit) { *max_power_limit = pptable->MaxSocketPowerLimit; @@ -1860,9 +1906,66 @@ static int smu_v13_0_6_set_power_limit(struct smu_context *smu, enum smu_ppt_limit_type limit_type, uint32_t limit) { + struct smu_table_context *smu_table = &smu->smu_table; + struct PPTable_t *pptable = + (struct PPTable_t *)smu_table->driver_pptable; + int ret; + + if (limit_type == SMU_FAST_PPT_LIMIT) { + if (!smu_v13_0_6_cap_supported(smu, SMU_CAP(FAST_PPT))) + return -EOPNOTSUPP; + if (limit > pptable->PPT1Max || limit < pptable->PPT1Min) { + dev_err(smu->adev->dev, + "New power limit (%d) should be between min %d max %d\n", + limit, pptable->PPT1Min, pptable->PPT1Max); + return -EINVAL; + } + ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetFastPptLimit, + limit, NULL); + if (ret) + dev_err(smu->adev->dev, "Set fast PPT limit failed!\n"); + return ret; + } + return smu_v13_0_set_power_limit(smu, limit_type, limit); } +static int smu_v13_0_6_get_ppt_limit(struct smu_context *smu, + uint32_t *ppt_limit, + enum smu_ppt_limit_type type, + enum smu_ppt_limit_level level) +{ + struct smu_table_context *smu_table = &smu->smu_table; + struct PPTable_t *pptable = + (struct PPTable_t *)smu_table->driver_pptable; + int ret = 0; + + if (type == SMU_FAST_PPT_LIMIT) { + if (!smu_v13_0_6_cap_supported(smu, SMU_CAP(FAST_PPT))) + return -EOPNOTSUPP; + switch (level) { + case SMU_PPT_LIMIT_MAX: + *ppt_limit = pptable->PPT1Max; + break; + case SMU_PPT_LIMIT_CURRENT: + ret = smu_cmn_send_smc_msg(smu, SMU_MSG_GetFastPptLimit, ppt_limit); + if (ret) + dev_err(smu->adev->dev, "Get fast PPT limit failed!\n"); + break; + case SMU_PPT_LIMIT_DEFAULT: + *ppt_limit = pptable->PPT1Default; + break; + case SMU_PPT_LIMIT_MIN: + *ppt_limit = pptable->PPT1Min; + break; + default: + return -EOPNOTSUPP; + } + return ret; + } + return -EOPNOTSUPP; +} + static int smu_v13_0_6_irq_process(struct amdgpu_device *adev, struct amdgpu_irq_src *source, struct amdgpu_iv_entry *entry) @@ -2383,7 +2486,7 @@ static int smu_v13_0_6_request_i2c_xfer(struct smu_context *smu, memcpy(table->cpu_addr, table_data, table_size); /* Flush hdp cache */ - amdgpu_asic_flush_hdp(adev, NULL); + amdgpu_hdp_flush(adev, NULL); ret = smu_cmn_send_smc_msg(smu, SMU_MSG_RequestI2cTransaction, NULL); @@ -2627,7 +2730,7 @@ static ssize_t smu_v13_0_6_get_xcp_metrics(struct smu_context *smu, int xcp_id, { const u8 num_jpeg_rings = AMDGPU_MAX_JPEG_RINGS_4_0_3; int version = smu_v13_0_6_get_metrics_version(smu); - struct amdgpu_partition_metrics_v1_0 *xcp_metrics; + struct smu_v13_0_6_partition_metrics *xcp_metrics; MetricsTableV0_t *metrics_v0 __free(kfree) = NULL; struct amdgpu_device *adev = smu->adev; int ret, inst, i, j, k, idx; @@ -2647,8 +2750,8 @@ static ssize_t smu_v13_0_6_get_xcp_metrics(struct smu_context *smu, int xcp_id, if (i == adev->xcp_mgr->num_xcps) return -EINVAL; - xcp_metrics = (struct amdgpu_partition_metrics_v1_0 *)table; - smu_cmn_init_partition_metrics(xcp_metrics, 1, 0); + xcp_metrics = (struct smu_v13_0_6_partition_metrics *)table; + smu_v13_0_6_partition_metrics_init(xcp_metrics, 1, 1); metrics_v0 = kzalloc(METRICS_TABLE_SIZE, GFP_KERNEL); if (!metrics_v0) @@ -2740,18 +2843,16 @@ static ssize_t smu_v13_0_6_get_xcp_metrics(struct smu_context *smu, int xcp_id, static ssize_t smu_v13_0_6_get_gpu_metrics(struct smu_context *smu, void **table) { struct smu_table_context *smu_table = &smu->smu_table; - struct gpu_metrics_v1_8 *gpu_metrics = - (struct gpu_metrics_v1_8 *)smu_table->gpu_metrics_table; + struct smu_table *tables = smu_table->tables; + struct smu_v13_0_6_gpu_metrics *gpu_metrics; int version = smu_v13_0_6_get_metrics_version(smu); MetricsTableV0_t *metrics_v0 __free(kfree) = NULL; - int ret = 0, xcc_id, inst, i, j, k, idx; struct amdgpu_device *adev = smu->adev; + int ret = 0, xcc_id, inst, i, j; MetricsTableV1_t *metrics_v1; MetricsTableV2_t *metrics_v2; - struct amdgpu_xcp *xcp; u16 link_width_level; u8 num_jpeg_rings; - u32 inst_mask; bool per_inst; metrics_v0 = kzalloc(METRICS_TABLE_SIZE, GFP_KERNEL); @@ -2759,16 +2860,20 @@ static ssize_t smu_v13_0_6_get_gpu_metrics(struct smu_context *smu, void **table if (ret) return ret; - if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == - IP_VERSION(13, 0, 12) && - smu_v13_0_6_cap_supported(smu, SMU_CAP(STATIC_METRICS))) - return smu_v13_0_12_get_gpu_metrics(smu, table, metrics_v0); + metrics_v2 = (MetricsTableV2_t *)metrics_v0; + gpu_metrics = (struct smu_v13_0_6_gpu_metrics + *)(tables[SMU_TABLE_SMU_METRICS].cache.buffer); + + if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 12) && + smu_v13_0_6_cap_supported(smu, SMU_CAP(STATIC_METRICS))) { + smu_v13_0_12_get_gpu_metrics(smu, table, metrics_v0, + gpu_metrics); + goto fill; + } metrics_v1 = (MetricsTableV1_t *)metrics_v0; metrics_v2 = (MetricsTableV2_t *)metrics_v0; - smu_cmn_init_soft_gpu_metrics(gpu_metrics, 1, 8); - gpu_metrics->temperature_hotspot = SMUQ10_ROUND(GET_METRIC_FIELD(MaxSocketTemperature, version)); /* Individual HBM stack temperature is not reported */ @@ -2889,55 +2994,49 @@ static ssize_t smu_v13_0_6_get_gpu_metrics(struct smu_context *smu, void **table gpu_metrics->xgmi_link_status[j] = ret; } - gpu_metrics->num_partition = adev->xcp_mgr->num_xcps; - per_inst = smu_v13_0_6_cap_supported(smu, SMU_CAP(PER_INST_METRICS)); num_jpeg_rings = AMDGPU_MAX_JPEG_RINGS_4_0_3; - for_each_xcp(adev->xcp_mgr, xcp, i) { - amdgpu_xcp_get_inst_details(xcp, AMDGPU_XCP_VCN, &inst_mask); - idx = 0; - for_each_inst(k, inst_mask) { - /* Both JPEG and VCN has same instances */ - inst = GET_INST(VCN, k); + for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) { + inst = GET_INST(JPEG, i); + for (j = 0; j < num_jpeg_rings; ++j) + gpu_metrics->jpeg_busy[(i * num_jpeg_rings) + j] = + SMUQ10_ROUND(GET_METRIC_FIELD( + JpegBusy, + version)[(inst * num_jpeg_rings) + j]); + } + for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { + inst = GET_INST(VCN, i); + gpu_metrics->vcn_busy[i] = + SMUQ10_ROUND(GET_METRIC_FIELD(VcnBusy, version)[inst]); + } - for (j = 0; j < num_jpeg_rings; ++j) { - gpu_metrics->xcp_stats[i].jpeg_busy - [(idx * num_jpeg_rings) + j] = - SMUQ10_ROUND(GET_METRIC_FIELD(JpegBusy, version) - [(inst * num_jpeg_rings) + j]); - } - gpu_metrics->xcp_stats[i].vcn_busy[idx] = - SMUQ10_ROUND(GET_METRIC_FIELD(VcnBusy, version)[inst]); - idx++; - - } - - if (per_inst) { - amdgpu_xcp_get_inst_details(xcp, AMDGPU_XCP_GFX, &inst_mask); - idx = 0; - for_each_inst(k, inst_mask) { - inst = GET_INST(GC, k); - gpu_metrics->xcp_stats[i].gfx_busy_inst[idx] = - SMUQ10_ROUND(GET_GPU_METRIC_FIELD(GfxBusy, version)[inst]); - gpu_metrics->xcp_stats[i].gfx_busy_acc[idx] = - SMUQ10_ROUND(GET_GPU_METRIC_FIELD(GfxBusyAcc, - version)[inst]); - if (smu_v13_0_6_cap_supported(smu, SMU_CAP(HST_LIMIT_METRICS))) { - gpu_metrics->xcp_stats[i].gfx_below_host_limit_ppt_acc[idx] = - SMUQ10_ROUND - (metrics_v0->GfxclkBelowHostLimitPptAcc[inst]); - gpu_metrics->xcp_stats[i].gfx_below_host_limit_thm_acc[idx] = - SMUQ10_ROUND - (metrics_v0->GfxclkBelowHostLimitThmAcc[inst]); - gpu_metrics->xcp_stats[i].gfx_low_utilization_acc[idx] = - SMUQ10_ROUND - (metrics_v0->GfxclkLowUtilizationAcc[inst]); - gpu_metrics->xcp_stats[i].gfx_below_host_limit_total_acc[idx] = - SMUQ10_ROUND - (metrics_v0->GfxclkBelowHostLimitTotalAcc[inst]); - } - idx++; + if (per_inst) { + for (i = 0; i < NUM_XCC(adev->gfx.xcc_mask); ++i) { + inst = GET_INST(GC, i); + gpu_metrics->gfx_busy_inst[i] = SMUQ10_ROUND( + GET_GPU_METRIC_FIELD(GfxBusy, version)[inst]); + gpu_metrics->gfx_busy_acc[i] = SMUQ10_ROUND( + GET_GPU_METRIC_FIELD(GfxBusyAcc, + version)[inst]); + if (smu_v13_0_6_cap_supported( + smu, SMU_CAP(HST_LIMIT_METRICS))) { + gpu_metrics->gfx_below_host_limit_ppt_acc + [i] = SMUQ10_ROUND( + metrics_v0->GfxclkBelowHostLimitPptAcc + [inst]); + gpu_metrics->gfx_below_host_limit_thm_acc + [i] = SMUQ10_ROUND( + metrics_v0->GfxclkBelowHostLimitThmAcc + [inst]); + gpu_metrics->gfx_low_utilization_acc + [i] = SMUQ10_ROUND( + metrics_v0 + ->GfxclkLowUtilizationAcc[inst]); + gpu_metrics->gfx_below_host_limit_total_acc + [i] = SMUQ10_ROUND( + metrics_v0->GfxclkBelowHostLimitTotalAcc + [inst]); } } } @@ -2947,7 +3046,8 @@ static ssize_t smu_v13_0_6_get_gpu_metrics(struct smu_context *smu, void **table gpu_metrics->firmware_timestamp = GET_METRIC_FIELD(Timestamp, version); - *table = (void *)gpu_metrics; +fill: + *table = tables[SMU_TABLE_SMU_METRICS].cache.buffer; return sizeof(*gpu_metrics); } @@ -3226,6 +3326,24 @@ static int smu_v13_0_6_reset_vcn(struct smu_context *smu, uint32_t inst_mask) return ret; } +static int smu_v13_0_6_ras_send_msg(struct smu_context *smu, enum smu_message_type msg, uint32_t param, uint32_t *read_arg) +{ + int ret; + + switch (msg) { + case SMU_MSG_QueryValidMcaCount: + case SMU_MSG_QueryValidMcaCeCount: + case SMU_MSG_McaBankDumpDW: + case SMU_MSG_McaBankCeDumpDW: + case SMU_MSG_ClearMcaOnRead: + ret = smu_cmn_send_smc_msg_with_param(smu, msg, param, read_arg); + break; + default: + ret = -EPERM; + } + + return ret; +} static int smu_v13_0_6_post_init(struct smu_context *smu) { @@ -3863,6 +3981,29 @@ static void smu_v13_0_6_set_temp_funcs(struct smu_context *smu) == IP_VERSION(13, 0, 12)) ? &smu_v13_0_12_temp_funcs : NULL; } +static int smu_v13_0_6_get_ras_smu_drv(struct smu_context *smu, const struct ras_smu_drv **ras_smu_drv) +{ + if (!ras_smu_drv) + return -EINVAL; + + if (amdgpu_sriov_vf(smu->adev)) + return -EOPNOTSUPP; + + if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_HROM_EN_BIT)) + smu_v13_0_6_cap_set(smu, SMU_CAP(RAS_EEPROM)); + + switch (amdgpu_ip_version(smu->adev, MP1_HWIP, 0)) { + case IP_VERSION(13, 0, 12): + *ras_smu_drv = &smu_v13_0_12_ras_smu_drv; + break; + default: + *ras_smu_drv = NULL; + break; + } + + return 0; +} + static const struct pptable_funcs smu_v13_0_6_ppt_funcs = { /* init dpm */ .get_allowed_feature_mask = smu_v13_0_6_get_allowed_feature_mask, @@ -3894,6 +4035,7 @@ static const struct pptable_funcs smu_v13_0_6_ppt_funcs = { .get_enabled_mask = smu_v13_0_6_get_enabled_mask, .feature_is_enabled = smu_cmn_feature_is_enabled, .set_power_limit = smu_v13_0_6_set_power_limit, + .get_ppt_limit = smu_v13_0_6_get_ppt_limit, .set_xgmi_pstate = smu_v13_0_set_xgmi_pstate, .register_irq_handler = smu_v13_0_6_register_irq_handler, .enable_thermal_alert = smu_v13_0_enable_thermal_alert, @@ -3921,6 +4063,8 @@ static const struct pptable_funcs smu_v13_0_6_ppt_funcs = { .reset_sdma = smu_v13_0_6_reset_sdma, .dpm_reset_vcn = smu_v13_0_6_reset_vcn, .post_init = smu_v13_0_6_post_init, + .ras_send_msg = smu_v13_0_6_ras_send_msg, + .get_ras_smu_drv = smu_v13_0_6_get_ras_smu_drv, }; void smu_v13_0_6_set_ppt_funcs(struct smu_context *smu) diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.h b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.h index 7ef5f3e66c27..6cbdd7c5ded9 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.h +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.h @@ -50,6 +50,9 @@ struct PPTable_t { uint32_t MinLclkDpmRange; uint64_t PublicSerialNumber_AID; uint32_t MaxNodePowerLimit; + uint32_t PPT1Max; + uint32_t PPT1Min; + uint32_t PPT1Default; bool Init; }; @@ -72,9 +75,18 @@ enum smu_v13_0_6_caps { SMU_CAP(PLDM_VERSION), SMU_CAP(TEMP_METRICS), SMU_CAP(NPM_METRICS), + SMU_CAP(RAS_EEPROM), + SMU_CAP(FAST_PPT), SMU_CAP(ALL), }; +#define SMU_13_0_6_NUM_XGMI_LINKS 8 +#define SMU_13_0_6_MAX_GFX_CLKS 8 +#define SMU_13_0_6_MAX_CLKS 4 +#define SMU_13_0_6_MAX_XCC 8 +#define SMU_13_0_6_MAX_VCN 4 +#define SMU_13_0_6_MAX_JPEG 40 + extern void smu_v13_0_6_set_ppt_funcs(struct smu_context *smu); bool smu_v13_0_6_cap_supported(struct smu_context *smu, enum smu_v13_0_6_caps cap); int smu_v13_0_6_get_static_metrics_table(struct smu_context *smu); @@ -87,7 +99,6 @@ size_t smu_v13_0_12_get_system_metrics_size(void); int smu_v13_0_12_setup_driver_pptable(struct smu_context *smu); int smu_v13_0_12_get_smu_metrics_data(struct smu_context *smu, MetricsMember_t member, uint32_t *value); -ssize_t smu_v13_0_12_get_gpu_metrics(struct smu_context *smu, void **table, void *smu_metrics); ssize_t smu_v13_0_12_get_xcp_metrics(struct smu_context *smu, struct amdgpu_xcp *xcp, void *table, void *smu_metrics); @@ -99,4 +110,156 @@ int smu_v13_0_12_get_npm_data(struct smu_context *smu, extern const struct cmn2asic_mapping smu_v13_0_12_feature_mask_map[]; extern const struct cmn2asic_msg_mapping smu_v13_0_12_message_map[]; extern const struct smu_temp_funcs smu_v13_0_12_temp_funcs; +extern const struct ras_smu_drv smu_v13_0_12_ras_smu_drv; + +#if defined(SWSMU_CODE_LAYER_L2) +#include "smu_cmn.h" + +/* SMUv 13.0.6 GPU metrics*/ +#define SMU_13_0_6_METRICS_FIELDS(SMU_SCALAR, SMU_ARRAY) \ + SMU_SCALAR(SMU_MATTR(TEMPERATURE_HOTSPOT), SMU_MUNIT(TEMP_1), \ + SMU_MTYPE(U16), temperature_hotspot); \ + SMU_SCALAR(SMU_MATTR(TEMPERATURE_MEM), SMU_MUNIT(TEMP_1), \ + SMU_MTYPE(U16), temperature_mem); \ + SMU_SCALAR(SMU_MATTR(TEMPERATURE_VRSOC), SMU_MUNIT(TEMP_1), \ + SMU_MTYPE(U16), temperature_vrsoc); \ + SMU_SCALAR(SMU_MATTR(CURR_SOCKET_POWER), SMU_MUNIT(POWER_1), \ + SMU_MTYPE(U16), curr_socket_power); \ + SMU_SCALAR(SMU_MATTR(AVERAGE_GFX_ACTIVITY), SMU_MUNIT(PERCENT), \ + SMU_MTYPE(U16), average_gfx_activity); \ + SMU_SCALAR(SMU_MATTR(AVERAGE_UMC_ACTIVITY), SMU_MUNIT(PERCENT), \ + SMU_MTYPE(U16), average_umc_activity); \ + SMU_SCALAR(SMU_MATTR(MEM_MAX_BANDWIDTH), SMU_MUNIT(BW_1), \ + SMU_MTYPE(U64), mem_max_bandwidth); \ + SMU_SCALAR(SMU_MATTR(ENERGY_ACCUMULATOR), SMU_MUNIT(NONE), \ + SMU_MTYPE(U64), energy_accumulator); \ + SMU_SCALAR(SMU_MATTR(SYSTEM_CLOCK_COUNTER), SMU_MUNIT(TIME_1), \ + SMU_MTYPE(U64), system_clock_counter); \ + SMU_SCALAR(SMU_MATTR(ACCUMULATION_COUNTER), SMU_MUNIT(NONE), \ + SMU_MTYPE(U32), accumulation_counter); \ + SMU_SCALAR(SMU_MATTR(PROCHOT_RESIDENCY_ACC), SMU_MUNIT(NONE), \ + SMU_MTYPE(U32), prochot_residency_acc); \ + SMU_SCALAR(SMU_MATTR(PPT_RESIDENCY_ACC), SMU_MUNIT(NONE), \ + SMU_MTYPE(U32), ppt_residency_acc); \ + SMU_SCALAR(SMU_MATTR(SOCKET_THM_RESIDENCY_ACC), SMU_MUNIT(NONE), \ + SMU_MTYPE(U32), socket_thm_residency_acc); \ + SMU_SCALAR(SMU_MATTR(VR_THM_RESIDENCY_ACC), SMU_MUNIT(NONE), \ + SMU_MTYPE(U32), vr_thm_residency_acc); \ + SMU_SCALAR(SMU_MATTR(HBM_THM_RESIDENCY_ACC), SMU_MUNIT(NONE), \ + SMU_MTYPE(U32), hbm_thm_residency_acc); \ + SMU_SCALAR(SMU_MATTR(GFXCLK_LOCK_STATUS), SMU_MUNIT(NONE), \ + SMU_MTYPE(U32), gfxclk_lock_status); \ + SMU_SCALAR(SMU_MATTR(PCIE_LINK_WIDTH), SMU_MUNIT(NONE), \ + SMU_MTYPE(U16), pcie_link_width); \ + SMU_SCALAR(SMU_MATTR(PCIE_LINK_SPEED), SMU_MUNIT(SPEED_2), \ + SMU_MTYPE(U16), pcie_link_speed); \ + SMU_SCALAR(SMU_MATTR(XGMI_LINK_WIDTH), SMU_MUNIT(NONE), \ + SMU_MTYPE(U16), xgmi_link_width); \ + SMU_SCALAR(SMU_MATTR(XGMI_LINK_SPEED), SMU_MUNIT(SPEED_1), \ + SMU_MTYPE(U16), xgmi_link_speed); \ + SMU_SCALAR(SMU_MATTR(GFX_ACTIVITY_ACC), SMU_MUNIT(PERCENT), \ + SMU_MTYPE(U32), gfx_activity_acc); \ + SMU_SCALAR(SMU_MATTR(MEM_ACTIVITY_ACC), SMU_MUNIT(PERCENT), \ + SMU_MTYPE(U32), mem_activity_acc); \ + SMU_SCALAR(SMU_MATTR(PCIE_BANDWIDTH_ACC), SMU_MUNIT(PERCENT), \ + SMU_MTYPE(U64), pcie_bandwidth_acc); \ + SMU_SCALAR(SMU_MATTR(PCIE_BANDWIDTH_INST), SMU_MUNIT(BW_1), \ + SMU_MTYPE(U64), pcie_bandwidth_inst); \ + SMU_SCALAR(SMU_MATTR(PCIE_L0_TO_RECOV_COUNT_ACC), SMU_MUNIT(NONE), \ + SMU_MTYPE(U64), pcie_l0_to_recov_count_acc); \ + SMU_SCALAR(SMU_MATTR(PCIE_REPLAY_COUNT_ACC), SMU_MUNIT(NONE), \ + SMU_MTYPE(U64), pcie_replay_count_acc); \ + SMU_SCALAR(SMU_MATTR(PCIE_REPLAY_ROVER_COUNT_ACC), SMU_MUNIT(NONE), \ + SMU_MTYPE(U64), pcie_replay_rover_count_acc); \ + SMU_SCALAR(SMU_MATTR(PCIE_NAK_SENT_COUNT_ACC), SMU_MUNIT(NONE), \ + SMU_MTYPE(U32), pcie_nak_sent_count_acc); \ + SMU_SCALAR(SMU_MATTR(PCIE_NAK_RCVD_COUNT_ACC), SMU_MUNIT(NONE), \ + SMU_MTYPE(U32), pcie_nak_rcvd_count_acc); \ + SMU_ARRAY(SMU_MATTR(XGMI_READ_DATA_ACC), SMU_MUNIT(DATA_1), \ + SMU_MTYPE(U64), xgmi_read_data_acc, \ + SMU_13_0_6_NUM_XGMI_LINKS); \ + SMU_ARRAY(SMU_MATTR(XGMI_WRITE_DATA_ACC), SMU_MUNIT(DATA_1), \ + SMU_MTYPE(U64), xgmi_write_data_acc, \ + SMU_13_0_6_NUM_XGMI_LINKS); \ + SMU_ARRAY(SMU_MATTR(XGMI_LINK_STATUS), SMU_MUNIT(NONE), \ + SMU_MTYPE(U16), xgmi_link_status, \ + SMU_13_0_6_NUM_XGMI_LINKS); \ + SMU_SCALAR(SMU_MATTR(FIRMWARE_TIMESTAMP), SMU_MUNIT(TIME_2), \ + SMU_MTYPE(U64), firmware_timestamp); \ + SMU_ARRAY(SMU_MATTR(CURRENT_GFXCLK), SMU_MUNIT(CLOCK_1), \ + SMU_MTYPE(U16), current_gfxclk, SMU_13_0_6_MAX_GFX_CLKS); \ + SMU_ARRAY(SMU_MATTR(CURRENT_SOCCLK), SMU_MUNIT(CLOCK_1), \ + SMU_MTYPE(U16), current_socclk, SMU_13_0_6_MAX_CLKS); \ + SMU_ARRAY(SMU_MATTR(CURRENT_VCLK0), SMU_MUNIT(CLOCK_1), \ + SMU_MTYPE(U16), current_vclk0, SMU_13_0_6_MAX_CLKS); \ + SMU_ARRAY(SMU_MATTR(CURRENT_DCLK0), SMU_MUNIT(CLOCK_1), \ + SMU_MTYPE(U16), current_dclk0, SMU_13_0_6_MAX_CLKS); \ + SMU_SCALAR(SMU_MATTR(CURRENT_UCLK), SMU_MUNIT(CLOCK_1), \ + SMU_MTYPE(U16), current_uclk); \ + SMU_SCALAR(SMU_MATTR(PCIE_LC_PERF_OTHER_END_RECOVERY), \ + SMU_MUNIT(NONE), SMU_MTYPE(U32), \ + pcie_lc_perf_other_end_recovery); \ + SMU_ARRAY(SMU_MATTR(GFX_BUSY_INST), SMU_MUNIT(PERCENT), \ + SMU_MTYPE(U32), gfx_busy_inst, SMU_13_0_6_MAX_XCC); \ + SMU_ARRAY(SMU_MATTR(JPEG_BUSY), SMU_MUNIT(PERCENT), SMU_MTYPE(U16), \ + jpeg_busy, SMU_13_0_6_MAX_JPEG); \ + SMU_ARRAY(SMU_MATTR(VCN_BUSY), SMU_MUNIT(PERCENT), SMU_MTYPE(U16), \ + vcn_busy, SMU_13_0_6_MAX_VCN); \ + SMU_ARRAY(SMU_MATTR(GFX_BUSY_ACC), SMU_MUNIT(PERCENT), SMU_MTYPE(U64), \ + gfx_busy_acc, SMU_13_0_6_MAX_XCC); \ + SMU_ARRAY(SMU_MATTR(GFX_BELOW_HOST_LIMIT_PPT_ACC), SMU_MUNIT(NONE), \ + SMU_MTYPE(U64), gfx_below_host_limit_ppt_acc, \ + SMU_13_0_6_MAX_XCC); \ + SMU_ARRAY(SMU_MATTR(GFX_BELOW_HOST_LIMIT_THM_ACC), SMU_MUNIT(NONE), \ + SMU_MTYPE(U64), gfx_below_host_limit_thm_acc, \ + SMU_13_0_6_MAX_XCC); \ + SMU_ARRAY(SMU_MATTR(GFX_LOW_UTILIZATION_ACC), SMU_MUNIT(NONE), \ + SMU_MTYPE(U64), gfx_low_utilization_acc, \ + SMU_13_0_6_MAX_XCC); \ + SMU_ARRAY(SMU_MATTR(GFX_BELOW_HOST_LIMIT_TOTAL_ACC), SMU_MUNIT(NONE), \ + SMU_MTYPE(U64), gfx_below_host_limit_total_acc, \ + SMU_13_0_6_MAX_XCC); + +DECLARE_SMU_METRICS_CLASS(smu_v13_0_6_gpu_metrics, SMU_13_0_6_METRICS_FIELDS); +void smu_v13_0_12_get_gpu_metrics(struct smu_context *smu, void **table, + void *smu_metrics, + struct smu_v13_0_6_gpu_metrics *gpu_metrics); + +#define SMU_13_0_6_PARTITION_METRICS_FIELDS(SMU_SCALAR, SMU_ARRAY) \ + SMU_ARRAY(SMU_MATTR(CURRENT_GFXCLK), SMU_MUNIT(CLOCK_1), \ + SMU_MTYPE(U16), current_gfxclk, SMU_13_0_6_MAX_XCC); \ + SMU_ARRAY(SMU_MATTR(CURRENT_SOCCLK), SMU_MUNIT(CLOCK_1), \ + SMU_MTYPE(U16), current_socclk, SMU_13_0_6_MAX_CLKS); \ + SMU_ARRAY(SMU_MATTR(CURRENT_VCLK0), SMU_MUNIT(CLOCK_1), \ + SMU_MTYPE(U16), current_vclk0, SMU_13_0_6_MAX_CLKS); \ + SMU_ARRAY(SMU_MATTR(CURRENT_DCLK0), SMU_MUNIT(CLOCK_1), \ + SMU_MTYPE(U16), current_dclk0, SMU_13_0_6_MAX_CLKS); \ + SMU_SCALAR(SMU_MATTR(CURRENT_UCLK), SMU_MUNIT(CLOCK_1), \ + SMU_MTYPE(U16), current_uclk); \ + SMU_ARRAY(SMU_MATTR(GFX_BUSY_INST), SMU_MUNIT(PERCENT), \ + SMU_MTYPE(U32), gfx_busy_inst, SMU_13_0_6_MAX_XCC); \ + SMU_ARRAY(SMU_MATTR(JPEG_BUSY), SMU_MUNIT(PERCENT), SMU_MTYPE(U16), \ + jpeg_busy, SMU_13_0_6_MAX_JPEG); \ + SMU_ARRAY(SMU_MATTR(VCN_BUSY), SMU_MUNIT(PERCENT), SMU_MTYPE(U16), \ + vcn_busy, SMU_13_0_6_MAX_VCN); \ + SMU_ARRAY(SMU_MATTR(GFX_BUSY_ACC), SMU_MUNIT(PERCENT), SMU_MTYPE(U64), \ + gfx_busy_acc, SMU_13_0_6_MAX_XCC); \ + SMU_ARRAY(SMU_MATTR(GFX_BELOW_HOST_LIMIT_PPT_ACC), SMU_MUNIT(NONE), \ + SMU_MTYPE(U64), gfx_below_host_limit_ppt_acc, \ + SMU_13_0_6_MAX_XCC); \ + SMU_ARRAY(SMU_MATTR(GFX_BELOW_HOST_LIMIT_THM_ACC), SMU_MUNIT(NONE), \ + SMU_MTYPE(U64), gfx_below_host_limit_thm_acc, \ + SMU_13_0_6_MAX_XCC); \ + SMU_ARRAY(SMU_MATTR(GFX_LOW_UTILIZATION_ACC), SMU_MUNIT(NONE), \ + SMU_MTYPE(U64), gfx_low_utilization_acc, \ + SMU_13_0_6_MAX_XCC); \ + SMU_ARRAY(SMU_MATTR(GFX_BELOW_HOST_LIMIT_TOTAL_ACC), SMU_MUNIT(NONE), \ + SMU_MTYPE(U64), gfx_below_host_limit_total_acc, \ + SMU_13_0_6_MAX_XCC); + +DECLARE_SMU_METRICS_CLASS(smu_v13_0_6_partition_metrics, + SMU_13_0_6_PARTITION_METRICS_FIELDS); + +#endif /* SWSMU_CODE_LAYER_L2 */ + #endif diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c index c96fa5e49ed6..a3fc35b9011e 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c @@ -1184,15 +1184,16 @@ static int smu_v13_0_7_print_clk_levels(struct smu_context *smu, struct smu_13_0_dpm_table *single_dpm_table; struct smu_13_0_pcie_table *pcie_table; uint32_t gen_speed, lane_width; - int i, curr_freq, size = 0; + int i, curr_freq, size = 0, start_offset = 0; int32_t min_value, max_value; int ret = 0; smu_cmn_get_sysfs_buf(&buf, &size); + start_offset = size; if (amdgpu_ras_intr_triggered()) { size += sysfs_emit_at(buf, size, "unavailable\n"); - return size; + return size - start_offset; } switch (clk_type) { @@ -1523,7 +1524,7 @@ static int smu_v13_0_7_print_clk_levels(struct smu_context *smu, break; } - return size; + return size - start_offset; } static int smu_v13_0_7_od_restore_table_single(struct smu_context *smu, long input) diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c index 73b4506ef5a8..5d7e671fa3c3 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c @@ -1041,12 +1041,13 @@ static uint32_t yellow_carp_get_umd_pstate_clk_default(struct smu_context *smu, static int yellow_carp_print_clk_levels(struct smu_context *smu, enum smu_clk_type clk_type, char *buf) { - int i, idx, size = 0, ret = 0; + int i, idx, size = 0, ret = 0, start_offset = 0; uint32_t cur_value = 0, value = 0, count = 0; uint32_t min, max; uint32_t clk_limit = 0; smu_cmn_get_sysfs_buf(&buf, &size); + start_offset = size; switch (clk_type) { case SMU_OD_SCLK: @@ -1111,7 +1112,7 @@ static int yellow_carp_print_clk_levels(struct smu_context *smu, } print_clk_out: - return size; + return size - start_offset; } static int yellow_carp_force_clk_levels(struct smu_context *smu, diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_0_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_0_ppt.c index fe00c84b1cc6..b1bd946d8e30 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_0_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_0_ppt.c @@ -1132,11 +1132,12 @@ static int smu_v14_0_common_get_dpm_level_count(struct smu_context *smu, static int smu_v14_0_0_print_clk_levels(struct smu_context *smu, enum smu_clk_type clk_type, char *buf) { - int i, idx, ret = 0, size = 0; + int i, idx, ret = 0, size = 0, start_offset = 0; uint32_t cur_value = 0, value = 0, count = 0; uint32_t min, max; smu_cmn_get_sysfs_buf(&buf, &size); + start_offset = size; switch (clk_type) { case SMU_OD_SCLK: @@ -1202,7 +1203,7 @@ static int smu_v14_0_0_print_clk_levels(struct smu_context *smu, break; } - return size; + return size - start_offset; } static int smu_v14_0_0_set_soft_freq_limited_range(struct smu_context *smu, diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c index 086501cc5213..2cea688c604f 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c @@ -1056,15 +1056,16 @@ static int smu_v14_0_2_print_clk_levels(struct smu_context *smu, struct smu_14_0_dpm_table *single_dpm_table; struct smu_14_0_pcie_table *pcie_table; uint32_t gen_speed, lane_width; - int i, curr_freq, size = 0; + int i, curr_freq, size = 0, start_offset = 0; int32_t min_value, max_value; int ret = 0; smu_cmn_get_sysfs_buf(&buf, &size); + start_offset = size; if (amdgpu_ras_intr_triggered()) { size += sysfs_emit_at(buf, size, "unavailable\n"); - return size; + return size - start_offset; } switch (clk_type) { @@ -1374,7 +1375,7 @@ static int smu_v14_0_2_print_clk_levels(struct smu_context *smu, break; } - return size; + return size - start_offset; } static int smu_v14_0_2_force_clk_levels(struct smu_context *smu, diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c index a8961a8f5c42..4040ff926544 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c @@ -164,9 +164,13 @@ static void __smu_cmn_reg_print_error(struct smu_context *smu, msg_index, param, message); break; case SMU_RESP_BUSY_OTHER: - dev_err_ratelimited(adev->dev, - "SMU: I'm very busy for your command: index:%d param:0x%08X message:%s", - msg_index, param, message); + /* It is normal for SMU_MSG_GetBadPageCount to return busy + * so don't print error at this case. + */ + if (msg != SMU_MSG_GetBadPageCount) + dev_err_ratelimited(adev->dev, + "SMU: I'm very busy for your command: index:%d param:0x%08X message:%s", + msg_index, param, message); break; case SMU_RESP_DEBUG_END: dev_err_ratelimited(adev->dev, @@ -980,7 +984,7 @@ int smu_cmn_update_table(struct smu_context *smu, * Flush hdp cache: to guard the content seen by * GPU is consitent with CPU. */ - amdgpu_asic_flush_hdp(adev, NULL); + amdgpu_hdp_flush(adev, NULL); } ret = smu_cmn_send_smc_msg_with_param(smu, drv2smu ? @@ -992,7 +996,7 @@ int smu_cmn_update_table(struct smu_context *smu, return ret; if (!drv2smu) { - amdgpu_asic_invalidate_hdp(adev, NULL); + amdgpu_hdp_invalidate(adev, NULL); memcpy(table_data, table->cpu_addr, table_size); } diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h index 0ae91c8b6d72..8d7c4814c68f 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h +++ b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h @@ -202,5 +202,72 @@ void smu_cmn_get_backend_workload_mask(struct smu_context *smu, u32 workload_mask, u32 *backend_workload_mask); +/*SMU gpu metrics */ + +/* Attribute ID mapping */ +#define SMU_MATTR(X) AMDGPU_METRICS_ATTR_ID_##X +/* Type ID mapping */ +#define SMU_MTYPE(X) AMDGPU_METRICS_TYPE_##X +/* Unit ID mapping */ +#define SMU_MUNIT(X) AMDGPU_METRICS_UNIT_##X + +/* Map TYPEID to C type */ +#define SMU_CTYPE(TYPEID) SMU_CTYPE_##TYPEID + +#define SMU_CTYPE_AMDGPU_METRICS_TYPE_U8 u8 +#define SMU_CTYPE_AMDGPU_METRICS_TYPE_S8 s8 +#define SMU_CTYPE_AMDGPU_METRICS_TYPE_U16 u16 +#define SMU_CTYPE_AMDGPU_METRICS_TYPE_S16 s16 +#define SMU_CTYPE_AMDGPU_METRICS_TYPE_U32 u32 +#define SMU_CTYPE_AMDGPU_METRICS_TYPE_S32 s32 +#define SMU_CTYPE_AMDGPU_METRICS_TYPE_U64 u64 +#define SMU_CTYPE_AMDGPU_METRICS_TYPE_S64 s64 + +/* struct members */ +#define SMU_METRICS_SCALAR(ID, UNIT, TYPEID, NAME) \ + u64 NAME##_ftype; \ + SMU_CTYPE(TYPEID) NAME + +#define SMU_METRICS_ARRAY(ID, UNIT, TYPEID, NAME, SIZE) \ + u64 NAME##_ftype; \ + SMU_CTYPE(TYPEID) NAME[SIZE] + +/* Init functions for scalar/array fields - init to 0xFFs */ +#define SMU_METRICS_INIT_SCALAR(ID, UNIT, TYPEID, NAME) \ + do { \ + obj->NAME##_ftype = \ + AMDGPU_METRICS_ENC_ATTR(UNIT, TYPEID, ID, 1); \ + obj->NAME = (SMU_CTYPE(TYPEID)) ~0; \ + count++; \ + } while (0) + +#define SMU_METRICS_INIT_ARRAY(ID, UNIT, TYPEID, NAME, SIZE) \ + do { \ + obj->NAME##_ftype = \ + AMDGPU_METRICS_ENC_ATTR(UNIT, TYPEID, ID, SIZE); \ + memset(obj->NAME, 0xFF, sizeof(obj->NAME)); \ + count++; \ + } while (0) + +/* Declare Metrics Class and Template object */ +#define DECLARE_SMU_METRICS_CLASS(CLASSNAME, SMU_METRICS_FIELD_LIST) \ + struct __packed CLASSNAME { \ + struct metrics_table_header header; \ + int attr_count; \ + SMU_METRICS_FIELD_LIST(SMU_METRICS_SCALAR, SMU_METRICS_ARRAY); \ + }; \ + static inline void CLASSNAME##_init(struct CLASSNAME *obj, \ + uint8_t frev, uint8_t crev) \ + { \ + int count = 0; \ + memset(obj, 0xFF, sizeof(*obj)); \ + obj->header.format_revision = frev; \ + obj->header.content_revision = crev; \ + obj->header.structure_size = sizeof(*obj); \ + SMU_METRICS_FIELD_LIST(SMU_METRICS_INIT_SCALAR, \ + SMU_METRICS_INIT_ARRAY) \ + obj->attr_count = count; \ + } + #endif #endif diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu_internal.h b/drivers/gpu/drm/amd/pm/swsmu/smu_internal.h index c09ecf1a68a0..34f6b4b1c3ba 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu_internal.h +++ b/drivers/gpu/drm/amd/pm/swsmu/smu_internal.h @@ -100,6 +100,7 @@ #define smu_is_asic_wbrf_supported(smu) smu_ppt_funcs(is_asic_wbrf_supported, false, smu) #define smu_enable_uclk_shadow(smu, enable) smu_ppt_funcs(enable_uclk_shadow, 0, smu, enable) #define smu_set_wbrf_exclusion_ranges(smu, freq_band_range) smu_ppt_funcs(set_wbrf_exclusion_ranges, -EOPNOTSUPP, smu, freq_band_range) +#define smu_get_ras_smu_drv(smu, ras_smu_drv) smu_ppt_funcs(get_ras_smu_drv, -EOPNOTSUPP, smu, ras_smu_drv) #endif #endif diff --git a/drivers/gpu/drm/amd/ras/Makefile b/drivers/gpu/drm/amd/ras/Makefile new file mode 100644 index 000000000000..bbdaba811d34 --- /dev/null +++ b/drivers/gpu/drm/amd/ras/Makefile @@ -0,0 +1,34 @@ +# +# Copyright (c) 2025 Advanced Micro Devices, Inc. All rights reserved. +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +# THE SOFTWARE. + +ifeq ($(AMD_GPU_RAS_MGR),) + AMD_GPU_RAS_MGR := ras_mgr +endif + +subdir-ccflags-y += -I$(AMD_GPU_RAS_FULL_PATH)/rascore +subdir-ccflags-y += -I$(AMD_GPU_RAS_FULL_PATH)/$(AMD_GPU_RAS_MGR) + +RAS_LIBS = $(AMD_GPU_RAS_MGR) rascore + +AMD_RAS = $(addsuffix /Makefile, $(addprefix $(AMD_GPU_RAS_FULL_PATH)/,$(RAS_LIBS))) + +include $(AMD_RAS) + diff --git a/drivers/gpu/drm/amd/ras/ras_mgr/Makefile b/drivers/gpu/drm/amd/ras/ras_mgr/Makefile new file mode 100644 index 000000000000..5e5a2cfa4068 --- /dev/null +++ b/drivers/gpu/drm/amd/ras/ras_mgr/Makefile @@ -0,0 +1,33 @@ +# Copyright 2025 Advanced Micro Devices, Inc. +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the "Software"), +# to deal in the Software without restriction, including without limitation +# the rights to use, copy, modify, merge, publish, distribute, sublicense, +# and/or sell copies of the Software, and to permit persons to whom the +# Software is furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL +# THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR +# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, +# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR +# OTHER DEALINGS IN THE SOFTWARE. + +RAS_MGR_FILES = amdgpu_ras_sys.o \ + amdgpu_ras_mgr.o \ + amdgpu_ras_eeprom_i2c.o \ + amdgpu_ras_mp1_v13_0.o \ + amdgpu_ras_cmd.o \ + amdgpu_ras_process.o \ + amdgpu_ras_nbio_v7_9.o + + +RAS_MGR = $(addprefix $(AMD_GPU_RAS_PATH)/ras_mgr/, $(RAS_MGR_FILES)) + +AMD_GPU_RAS_FILES += $(RAS_MGR) + diff --git a/drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_cmd.c b/drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_cmd.c new file mode 100644 index 000000000000..78419b7f7729 --- /dev/null +++ b/drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_cmd.c @@ -0,0 +1,285 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright 2025 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#include +#include "amdgpu.h" +#include "amdgpu_ras.h" +#include "ras_sys.h" +#include "amdgpu_ras_cmd.h" +#include "amdgpu_ras_mgr.h" + +/* inject address is 52 bits */ +#define RAS_UMC_INJECT_ADDR_LIMIT (0x1ULL << 52) + +#define AMDGPU_RAS_TYPE_RASCORE 0x1 +#define AMDGPU_RAS_TYPE_AMDGPU 0x2 +#define AMDGPU_RAS_TYPE_VF 0x3 + +static int amdgpu_ras_trigger_error_prepare(struct ras_core_context *ras_core, + struct ras_cmd_inject_error_req *block_info) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)ras_core->dev; + int ret; + + if (block_info->block_id == TA_RAS_BLOCK__XGMI_WAFL) { + if (amdgpu_dpm_set_df_cstate(adev, DF_CSTATE_DISALLOW)) + RAS_DEV_WARN(adev, "Failed to disallow df cstate"); + + ret = amdgpu_dpm_set_pm_policy(adev, PP_PM_POLICY_XGMI_PLPD, XGMI_PLPD_DISALLOW); + if (ret && (ret != -EOPNOTSUPP)) + RAS_DEV_WARN(adev, "Failed to disallow XGMI power down"); + } + + return 0; +} + +static int amdgpu_ras_trigger_error_end(struct ras_core_context *ras_core, + struct ras_cmd_inject_error_req *block_info) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)ras_core->dev; + int ret; + + if (block_info->block_id == TA_RAS_BLOCK__XGMI_WAFL) { + if (amdgpu_ras_intr_triggered()) + return 0; + + ret = amdgpu_dpm_set_pm_policy(adev, PP_PM_POLICY_XGMI_PLPD, XGMI_PLPD_DEFAULT); + if (ret && (ret != -EOPNOTSUPP)) + RAS_DEV_WARN(adev, "Failed to allow XGMI power down"); + + if (amdgpu_dpm_set_df_cstate(adev, DF_CSTATE_ALLOW)) + RAS_DEV_WARN(adev, "Failed to allow df cstate"); + } + + return 0; +} + +static uint64_t local_addr_to_xgmi_global_addr(struct ras_core_context *ras_core, + uint64_t addr) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)ras_core->dev; + struct amdgpu_xgmi *xgmi = &adev->gmc.xgmi; + + return (addr + xgmi->physical_node_id * xgmi->node_segment_size); +} + +static int amdgpu_ras_inject_error(struct ras_core_context *ras_core, + struct ras_cmd_ctx *cmd, void *data) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)ras_core->dev; + struct ras_cmd_inject_error_req *req = + (struct ras_cmd_inject_error_req *)cmd->input_buff_raw; + int ret = RAS_CMD__ERROR_GENERIC; + + if (req->block_id == RAS_BLOCK_ID__UMC) { + if (amdgpu_ras_mgr_check_retired_addr(adev, req->address)) { + RAS_DEV_WARN(ras_core->dev, + "RAS WARN: inject: 0x%llx has already been marked as bad!\n", + req->address); + return RAS_CMD__ERROR_ACCESS_DENIED; + } + + if ((req->address >= adev->gmc.mc_vram_size && + adev->gmc.mc_vram_size) || + (req->address >= RAS_UMC_INJECT_ADDR_LIMIT)) { + RAS_DEV_WARN(adev, "RAS WARN: input address 0x%llx is invalid.", + req->address); + return RAS_CMD__ERROR_INVALID_INPUT_DATA; + } + + /* Calculate XGMI relative offset */ + if (adev->gmc.xgmi.num_physical_nodes > 1 && + req->block_id != RAS_BLOCK_ID__GFX) { + req->address = local_addr_to_xgmi_global_addr(ras_core, req->address); + } + } + + amdgpu_ras_trigger_error_prepare(ras_core, req); + ret = rascore_handle_cmd(ras_core, cmd, data); + amdgpu_ras_trigger_error_end(ras_core, req); + if (ret) { + RAS_DEV_ERR(adev, "ras inject block %u failed %d\n", req->block_id, ret); + ret = RAS_CMD__ERROR_ACCESS_DENIED; + } + + + return ret; +} + +static int amdgpu_ras_get_ras_safe_fb_addr_ranges(struct ras_core_context *ras_core, + struct ras_cmd_ctx *cmd, void *data) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)ras_core->dev; + struct ras_cmd_dev_handle *input_data = + (struct ras_cmd_dev_handle *)cmd->input_buff_raw; + struct ras_cmd_ras_safe_fb_address_ranges_rsp *ranges = + (struct ras_cmd_ras_safe_fb_address_ranges_rsp *)cmd->output_buff_raw; + struct amdgpu_mem_partition_info *mem_ranges; + uint32_t i = 0; + + if (cmd->input_size != sizeof(*input_data)) + return RAS_CMD__ERROR_INVALID_INPUT_DATA; + + mem_ranges = adev->gmc.mem_partitions; + for (i = 0; i < adev->gmc.num_mem_partitions; i++) { + ranges->range[i].start = mem_ranges[i].range.fpfn << AMDGPU_GPU_PAGE_SHIFT; + ranges->range[i].size = mem_ranges[i].size; + ranges->range[i].idx = i; + } + + ranges->num_ranges = adev->gmc.num_mem_partitions; + + ranges->version = 0; + cmd->output_size = sizeof(struct ras_cmd_ras_safe_fb_address_ranges_rsp); + + return RAS_CMD__SUCCESS; +} + +static int ras_translate_fb_address(struct ras_core_context *ras_core, + enum ras_fb_addr_type src_type, + enum ras_fb_addr_type dest_type, + union ras_translate_fb_address *src_addr, + union ras_translate_fb_address *dest_addr) +{ + uint64_t soc_phy_addr; + int ret = RAS_CMD__SUCCESS; + + /* Does not need to be queued as event as this is a SW translation */ + switch (src_type) { + case RAS_FB_ADDR_SOC_PHY: + soc_phy_addr = src_addr->soc_phy_addr; + break; + case RAS_FB_ADDR_BANK: + ret = ras_cmd_translate_bank_to_soc_pa(ras_core, + src_addr->bank_addr, &soc_phy_addr); + if (ret) + return RAS_CMD__ERROR_GENERIC; + break; + default: + return RAS_CMD__ERROR_INVALID_CMD; + } + + switch (dest_type) { + case RAS_FB_ADDR_SOC_PHY: + dest_addr->soc_phy_addr = soc_phy_addr; + break; + case RAS_FB_ADDR_BANK: + ret = ras_cmd_translate_soc_pa_to_bank(ras_core, + soc_phy_addr, &dest_addr->bank_addr); + if (ret) + return RAS_CMD__ERROR_GENERIC; + break; + default: + return RAS_CMD__ERROR_INVALID_CMD; + } + + return ret; +} + +static int amdgpu_ras_translate_fb_address(struct ras_core_context *ras_core, + struct ras_cmd_ctx *cmd, void *data) +{ + struct ras_cmd_translate_fb_address_req *req_buff = + (struct ras_cmd_translate_fb_address_req *)cmd->input_buff_raw; + struct ras_cmd_translate_fb_address_rsp *rsp_buff = + (struct ras_cmd_translate_fb_address_rsp *)cmd->output_buff_raw; + int ret = RAS_CMD__ERROR_GENERIC; + + if (cmd->input_size != sizeof(struct ras_cmd_translate_fb_address_req)) + return RAS_CMD__ERROR_INVALID_INPUT_SIZE; + + if ((req_buff->src_addr_type >= RAS_FB_ADDR_UNKNOWN) || + (req_buff->dest_addr_type >= RAS_FB_ADDR_UNKNOWN)) + return RAS_CMD__ERROR_INVALID_INPUT_DATA; + + ret = ras_translate_fb_address(ras_core, req_buff->src_addr_type, + req_buff->dest_addr_type, &req_buff->trans_addr, &rsp_buff->trans_addr); + if (ret) + return RAS_CMD__ERROR_GENERIC; + + rsp_buff->version = 0; + cmd->output_size = sizeof(struct ras_cmd_translate_fb_address_rsp); + + return RAS_CMD__SUCCESS; +} + +static struct ras_cmd_func_map amdgpu_ras_cmd_maps[] = { + {RAS_CMD__INJECT_ERROR, amdgpu_ras_inject_error}, + {RAS_CMD__GET_SAFE_FB_ADDRESS_RANGES, amdgpu_ras_get_ras_safe_fb_addr_ranges}, + {RAS_CMD__TRANSLATE_FB_ADDRESS, amdgpu_ras_translate_fb_address}, +}; + +int amdgpu_ras_handle_cmd(struct ras_core_context *ras_core, struct ras_cmd_ctx *cmd, void *data) +{ + struct ras_cmd_func_map *ras_cmd = NULL; + int i, res; + + for (i = 0; i < ARRAY_SIZE(amdgpu_ras_cmd_maps); i++) { + if (cmd->cmd_id == amdgpu_ras_cmd_maps[i].cmd_id) { + ras_cmd = &amdgpu_ras_cmd_maps[i]; + break; + } + } + + if (ras_cmd) + res = ras_cmd->func(ras_core, cmd, NULL); + else + res = RAS_CMD__ERROR_UKNOWN_CMD; + + return res; +} + +int amdgpu_ras_submit_cmd(struct ras_core_context *ras_core, struct ras_cmd_ctx *cmd) +{ + struct ras_core_context *cmd_core = ras_core; + int timeout = 60; + int res; + + cmd->cmd_res = RAS_CMD__ERROR_INVALID_CMD; + cmd->output_size = 0; + + if (!ras_core_is_enabled(cmd_core)) + return RAS_CMD__ERROR_ACCESS_DENIED; + + while (ras_core_gpu_in_reset(cmd_core)) { + msleep(1000); + if (!timeout--) + return RAS_CMD__ERROR_TIMEOUT; + } + + res = amdgpu_ras_handle_cmd(cmd_core, cmd, NULL); + if (res == RAS_CMD__ERROR_UKNOWN_CMD) + res = rascore_handle_cmd(cmd_core, cmd, NULL); + + cmd->cmd_res = res; + + if (cmd->output_size > cmd->output_buf_size) { + RAS_DEV_ERR(cmd_core->dev, + "Output size 0x%x exceeds output buffer size 0x%x!\n", + cmd->output_size, cmd->output_buf_size); + return RAS_CMD__SUCCESS_EXEED_BUFFER; + } + + return RAS_CMD__SUCCESS; +} diff --git a/drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_cmd.h b/drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_cmd.h new file mode 100644 index 000000000000..5973b156cc85 --- /dev/null +++ b/drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_cmd.h @@ -0,0 +1,54 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright 2025 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +#ifndef __AMDGPU_RAS_CMD_H__ +#define __AMDGPU_RAS_CMD_H__ +#include "ras.h" + +enum amdgpu_ras_cmd_id { + RAS_CMD__AMDGPU_BEGIN = RAS_CMD_ID_AMDGPU_START, + RAS_CMD__TRANSLATE_MEMORY_FD, + RAS_CMD__AMDGPU_SUPPORTED_MAX = RAS_CMD_ID_AMDGPU_END, +}; + +struct ras_cmd_translate_memory_fd_req { + struct ras_cmd_dev_handle dev; + uint32_t type; + uint32_t fd; + uint64_t address; + uint32_t reserved[4]; +}; + +struct ras_cmd_translate_memory_fd_rsp { + uint32_t version; + uint32_t padding; + uint64_t start; + uint64_t size; + uint32_t reserved[2]; +}; + +int amdgpu_ras_handle_cmd(struct ras_core_context *ras_core, + struct ras_cmd_ctx *cmd, void *data); +int amdgpu_ras_submit_cmd(struct ras_core_context *ras_core, struct ras_cmd_ctx *cmd); + +#endif diff --git a/drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_eeprom_i2c.c b/drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_eeprom_i2c.c new file mode 100644 index 000000000000..3ed3ff42b7e1 --- /dev/null +++ b/drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_eeprom_i2c.c @@ -0,0 +1,182 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright (c) 2025 Advanced Micro Devices, Inc. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include "amdgpu.h" +#include "amdgpu_atomfirmware.h" +#include "amdgpu_ras_eeprom.h" +#include "amdgpu_ras_mgr.h" +#include "amdgpu_ras_eeprom_i2c.h" +#include "ras_eeprom.h" + +/* These are memory addresses as would be seen by one or more EEPROM + * chips strung on the I2C bus, usually by manipulating pins 1-3 of a + * set of EEPROM devices. They form a continuous memory space. + * + * The I2C device address includes the device type identifier, 1010b, + * which is a reserved value and indicates that this is an I2C EEPROM + * device. It also includes the top 3 bits of the 19 bit EEPROM memory + * address, namely bits 18, 17, and 16. This makes up the 7 bit + * address sent on the I2C bus with bit 0 being the direction bit, + * which is not represented here, and sent by the hardware directly. + * + * For instance, + * 50h = 1010000b => device type identifier 1010b, bits 18:16 = 000b, address 0. + * 54h = 1010100b => --"--, bits 18:16 = 100b, address 40000h. + * 56h = 1010110b => --"--, bits 18:16 = 110b, address 60000h. + * Depending on the size of the I2C EEPROM device(s), bits 18:16 may + * address memory in a device or a device on the I2C bus, depending on + * the status of pins 1-3. See top of amdgpu_eeprom.c. + * + * The RAS table lives either at address 0 or address 40000h of EEPROM. + */ +#define EEPROM_I2C_MADDR_0 0x0 +#define EEPROM_I2C_MADDR_4 0x40000 + +#define MAKE_I2C_ADDR(_aa) ((0xA << 3) | (((_aa) >> 16) & 0xF)) +#define to_amdgpu_ras(x) (container_of(x, struct amdgpu_ras, eeprom_control)) + +#define EEPROM_PAGE_BITS 8 +#define EEPROM_PAGE_SIZE (1U << EEPROM_PAGE_BITS) +#define EEPROM_PAGE_MASK (EEPROM_PAGE_SIZE - 1) + +#define EEPROM_OFFSET_SIZE 2 + +static int ras_eeprom_i2c_config(struct ras_core_context *ras_core) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)ras_core->dev; + struct ras_eeprom_control *control = &ras_core->ras_eeprom; + u8 i2c_addr; + + if (amdgpu_atomfirmware_ras_rom_addr(adev, &i2c_addr)) { + /* The address given by VBIOS is an 8-bit, wire-format + * address, i.e. the most significant byte. + * + * Normalize it to a 19-bit EEPROM address. Remove the + * device type identifier and make it a 7-bit address; + * then make it a 19-bit EEPROM address. See top of + * amdgpu_eeprom.c. + */ + i2c_addr = (i2c_addr & 0x0F) >> 1; + control->i2c_address = ((u32) i2c_addr) << 16; + return 0; + } + + switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) { + case IP_VERSION(13, 0, 5): + case IP_VERSION(13, 0, 6): + case IP_VERSION(13, 0, 10): + case IP_VERSION(13, 0, 12): + case IP_VERSION(13, 0, 14): + control->i2c_address = EEPROM_I2C_MADDR_4; + return 0; + default: + return -ENODATA; + } + return -ENODATA; +} + +static int ras_eeprom_i2c_xfer(struct ras_core_context *ras_core, u32 eeprom_addr, + u8 *eeprom_buf, u32 buf_size, bool read) +{ + struct i2c_adapter *i2c_adap = ras_core->ras_eeprom.i2c_adapter; + u8 eeprom_offset_buf[EEPROM_OFFSET_SIZE]; + struct i2c_msg msgs[] = { + { + .flags = 0, + .len = EEPROM_OFFSET_SIZE, + .buf = eeprom_offset_buf, + }, + { + .flags = read ? I2C_M_RD : 0, + }, + }; + const u8 *p = eeprom_buf; + int r; + u16 len; + + for (r = 0; buf_size > 0; + buf_size -= len, eeprom_addr += len, eeprom_buf += len) { + /* Set the EEPROM address we want to write to/read from. + */ + msgs[0].addr = MAKE_I2C_ADDR(eeprom_addr); + msgs[1].addr = msgs[0].addr; + msgs[0].buf[0] = (eeprom_addr >> 8) & 0xff; + msgs[0].buf[1] = eeprom_addr & 0xff; + + if (!read) { + /* Write the maximum amount of data, without + * crossing the device's page boundary, as per + * its spec. Partial page writes are allowed, + * starting at any location within the page, + * so long as the page boundary isn't crossed + * over (actually the page pointer rolls + * over). + * + * As per the AT24CM02 EEPROM spec, after + * writing into a page, the I2C driver should + * terminate the transfer, i.e. in + * "i2c_transfer()" below, with a STOP + * condition, so that the self-timed write + * cycle begins. This is implied for the + * "i2c_transfer()" abstraction. + */ + len = min(EEPROM_PAGE_SIZE - (eeprom_addr & EEPROM_PAGE_MASK), + buf_size); + } else { + /* Reading from the EEPROM has no limitation + * on the number of bytes read from the EEPROM + * device--they are simply sequenced out. + * Keep in mind that i2c_msg.len is u16 type. + */ + len = min(U16_MAX, buf_size); + } + msgs[1].len = len; + msgs[1].buf = eeprom_buf; + + + /* This constitutes a START-STOP transaction. + */ + r = i2c_transfer(i2c_adap, msgs, ARRAY_SIZE(msgs)); + if (r != ARRAY_SIZE(msgs)) + break; + + if (!read) { + /* According to EEPROM specs the length of the + * self-writing cycle, tWR (tW), is 10 ms. + * + * TODO: Use polling on ACK, aka Acknowledge + * Polling, to minimize waiting for the + * internal write cycle to complete, as it is + * usually smaller than tWR (tW). + */ + msleep(10); + } + } + + return r < 0 ? r : eeprom_buf - p; +} + +const struct ras_eeprom_sys_func amdgpu_ras_eeprom_i2c_sys_func = { + .eeprom_i2c_xfer = ras_eeprom_i2c_xfer, + .update_eeprom_i2c_config = ras_eeprom_i2c_config, +}; diff --git a/drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_eeprom_i2c.h b/drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_eeprom_i2c.h new file mode 100644 index 000000000000..3b5878605411 --- /dev/null +++ b/drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_eeprom_i2c.h @@ -0,0 +1,27 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright (C) 2025 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN + * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + */ +#ifndef __AMDGPU_RAS_EEPROM_I2C_H__ +#define __AMDGPU_RAS_EEPROM_I2C_H__ +#include "ras.h" + +extern const struct ras_eeprom_sys_func amdgpu_ras_eeprom_i2c_sys_func; +#endif diff --git a/drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_mgr.c b/drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_mgr.c new file mode 100644 index 000000000000..afe8135b6258 --- /dev/null +++ b/drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_mgr.c @@ -0,0 +1,648 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright 2025 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#include "amdgpu.h" +#include "amdgpu_reset.h" +#include "amdgpu_xgmi.h" +#include "ras_sys.h" +#include "amdgpu_ras_mgr.h" +#include "amdgpu_ras_cmd.h" +#include "amdgpu_ras_process.h" +#include "amdgpu_ras_eeprom_i2c.h" +#include "amdgpu_ras_mp1_v13_0.h" +#include "amdgpu_ras_nbio_v7_9.h" + +#define MAX_SOCKET_NUM_PER_HIVE 8 +#define MAX_AID_NUM_PER_SOCKET 4 +#define MAX_XCD_NUM_PER_AID 2 + +/* typical ECC bad page rate is 1 bad page per 100MB VRAM */ +#define TYPICAL_ECC_BAD_PAGE_RATE (100ULL * SZ_1M) + +#define COUNT_BAD_PAGE_THRESHOLD(size) (((size) >> 21) << 4) + +/* Reserve 8 physical dram row for possible retirement. + * In worst cases, it will lose 8 * 2MB memory in vram domain + */ +#define RAS_RESERVED_VRAM_SIZE_DEFAULT (16ULL << 20) + + +static void ras_mgr_init_event_mgr(struct ras_event_manager *mgr) +{ + struct ras_event_state *event_state; + int i; + + memset(mgr, 0, sizeof(*mgr)); + atomic64_set(&mgr->seqno, 0); + + for (i = 0; i < ARRAY_SIZE(mgr->event_state); i++) { + event_state = &mgr->event_state[i]; + event_state->last_seqno = RAS_EVENT_INVALID_ID; + atomic64_set(&event_state->count, 0); + } +} + +static void amdgpu_ras_mgr_init_event_mgr(struct ras_core_context *ras_core) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)ras_core->dev; + struct amdgpu_ras_mgr *ras_mgr = amdgpu_ras_mgr_get_context(adev); + struct ras_event_manager *event_mgr; + struct amdgpu_hive_info *hive; + + hive = amdgpu_get_xgmi_hive(adev); + event_mgr = hive ? &hive->event_mgr : &ras_mgr->ras_event_mgr; + + /* init event manager with node 0 on xgmi system */ + if (!amdgpu_reset_in_recovery(adev)) { + if (!hive || adev->gmc.xgmi.node_id == 0) + ras_mgr_init_event_mgr(event_mgr); + } + + if (hive) + amdgpu_put_xgmi_hive(hive); +} + +static int amdgpu_ras_mgr_init_aca_config(struct amdgpu_device *adev, + struct ras_core_config *config) +{ + struct ras_aca_config *aca_cfg = &config->aca_cfg; + + aca_cfg->socket_num_per_hive = MAX_SOCKET_NUM_PER_HIVE; + aca_cfg->aid_num_per_socket = MAX_AID_NUM_PER_SOCKET; + aca_cfg->xcd_num_per_aid = MAX_XCD_NUM_PER_AID; + + return 0; +} + +static int amdgpu_ras_mgr_init_eeprom_config(struct amdgpu_device *adev, + struct ras_core_config *config) +{ + struct ras_eeprom_config *eeprom_cfg = &config->eeprom_cfg; + + eeprom_cfg->eeprom_sys_fn = &amdgpu_ras_eeprom_i2c_sys_func; + eeprom_cfg->eeprom_i2c_adapter = adev->pm.ras_eeprom_i2c_bus; + if (eeprom_cfg->eeprom_i2c_adapter) { + const struct i2c_adapter_quirks *quirks = + ((struct i2c_adapter *)eeprom_cfg->eeprom_i2c_adapter)->quirks; + + if (quirks) { + eeprom_cfg->max_i2c_read_len = quirks->max_read_len; + eeprom_cfg->max_i2c_write_len = quirks->max_write_len; + } + } + + /* + * amdgpu_bad_page_threshold is used to config + * the threshold for the number of bad pages. + * -1: Threshold is set to default value + * Driver will issue a warning message when threshold is reached + * and continue runtime services. + * 0: Disable bad page retirement + * Driver will not retire bad pages + * which is intended for debugging purpose. + * -2: Threshold is determined by a formula + * that assumes 1 bad page per 100M of local memory. + * Driver will continue runtime services when threhold is reached. + * 0 < threshold < max number of bad page records in EEPROM, + * A user-defined threshold is set + * Driver will halt runtime services when this custom threshold is reached. + */ + if (amdgpu_bad_page_threshold == NONSTOP_OVER_THRESHOLD) + eeprom_cfg->eeprom_record_threshold_count = + div64_u64(adev->gmc.mc_vram_size, TYPICAL_ECC_BAD_PAGE_RATE); + else if (amdgpu_bad_page_threshold == WARN_NONSTOP_OVER_THRESHOLD) + eeprom_cfg->eeprom_record_threshold_count = + COUNT_BAD_PAGE_THRESHOLD(RAS_RESERVED_VRAM_SIZE_DEFAULT); + else + eeprom_cfg->eeprom_record_threshold_count = amdgpu_bad_page_threshold; + + eeprom_cfg->eeprom_record_threshold_config = amdgpu_bad_page_threshold; + + return 0; +} + +static int amdgpu_ras_mgr_init_mp1_config(struct amdgpu_device *adev, + struct ras_core_config *config) +{ + struct ras_mp1_config *mp1_cfg = &config->mp1_cfg; + int ret = 0; + + switch (config->mp1_ip_version) { + case IP_VERSION(13, 0, 6): + case IP_VERSION(13, 0, 14): + case IP_VERSION(13, 0, 12): + mp1_cfg->mp1_sys_fn = &amdgpu_ras_mp1_sys_func_v13_0; + break; + default: + RAS_DEV_ERR(adev, + "The mp1(0x%x) ras config is not right!\n", + config->mp1_ip_version); + ret = -EINVAL; + break; + } + + return ret; +} + +static int amdgpu_ras_mgr_init_nbio_config(struct amdgpu_device *adev, + struct ras_core_config *config) +{ + struct ras_nbio_config *nbio_cfg = &config->nbio_cfg; + int ret = 0; + + switch (config->nbio_ip_version) { + case IP_VERSION(7, 9, 0): + case IP_VERSION(7, 9, 1): + nbio_cfg->nbio_sys_fn = &amdgpu_ras_nbio_sys_func_v7_9; + break; + default: + RAS_DEV_ERR(adev, + "The nbio(0x%x) ras config is not right!\n", + config->nbio_ip_version); + ret = -EINVAL; + break; + } + + return ret; +} + +static int amdgpu_ras_mgr_get_ras_psp_system_status(struct ras_core_context *ras_core, + struct ras_psp_sys_status *status) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)ras_core->dev; + struct ta_context *context = &adev->psp.ras_context.context; + + status->initialized = context->initialized; + status->session_id = context->session_id; + status->psp_cmd_mutex = &adev->psp.mutex; + + return 0; +} + +static int amdgpu_ras_mgr_get_ras_ta_init_param(struct ras_core_context *ras_core, + struct ras_ta_init_param *ras_ta_param) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)ras_core->dev; + uint32_t nps_mode; + + if (amdgpu_ras_is_poison_mode_supported(adev)) + ras_ta_param->poison_mode_en = 1; + + if (!adev->gmc.xgmi.connected_to_cpu && !adev->gmc.is_app_apu) + ras_ta_param->dgpu_mode = 1; + + ras_ta_param->xcc_mask = adev->gfx.xcc_mask; + ras_ta_param->channel_dis_num = hweight32(adev->gmc.m_half_use) * 2; + + ras_ta_param->active_umc_mask = adev->umc.active_mask; + + if (!amdgpu_ras_mgr_get_curr_nps_mode(adev, &nps_mode)) + ras_ta_param->nps_mode = nps_mode; + + return 0; +} + +const struct ras_psp_sys_func amdgpu_ras_psp_sys_func = { + .get_ras_psp_system_status = amdgpu_ras_mgr_get_ras_psp_system_status, + .get_ras_ta_init_param = amdgpu_ras_mgr_get_ras_ta_init_param, +}; + +static int amdgpu_ras_mgr_init_psp_config(struct amdgpu_device *adev, + struct ras_core_config *config) +{ + struct ras_psp_config *psp_cfg = &config->psp_cfg; + + psp_cfg->psp_sys_fn = &amdgpu_ras_psp_sys_func; + + return 0; +} + +static int amdgpu_ras_mgr_init_umc_config(struct amdgpu_device *adev, + struct ras_core_config *config) +{ + struct ras_umc_config *umc_cfg = &config->umc_cfg; + + umc_cfg->umc_vram_type = adev->gmc.vram_type; + + return 0; +} + +static struct ras_core_context *amdgpu_ras_mgr_create_ras_core(struct amdgpu_device *adev) +{ + struct ras_core_config init_config; + + memset(&init_config, 0, sizeof(init_config)); + + init_config.umc_ip_version = amdgpu_ip_version(adev, UMC_HWIP, 0); + init_config.mp1_ip_version = amdgpu_ip_version(adev, MP1_HWIP, 0); + init_config.gfx_ip_version = amdgpu_ip_version(adev, GC_HWIP, 0); + init_config.nbio_ip_version = amdgpu_ip_version(adev, NBIO_HWIP, 0); + init_config.psp_ip_version = amdgpu_ip_version(adev, MP1_HWIP, 0); + + if (init_config.umc_ip_version == IP_VERSION(12, 0, 0) || + init_config.umc_ip_version == IP_VERSION(12, 5, 0)) + init_config.aca_ip_version = IP_VERSION(1, 0, 0); + + init_config.sys_fn = &amdgpu_ras_sys_fn; + init_config.ras_eeprom_supported = true; + init_config.poison_supported = + amdgpu_ras_is_poison_mode_supported(adev); + + amdgpu_ras_mgr_init_aca_config(adev, &init_config); + amdgpu_ras_mgr_init_eeprom_config(adev, &init_config); + amdgpu_ras_mgr_init_mp1_config(adev, &init_config); + amdgpu_ras_mgr_init_nbio_config(adev, &init_config); + amdgpu_ras_mgr_init_psp_config(adev, &init_config); + amdgpu_ras_mgr_init_umc_config(adev, &init_config); + + return ras_core_create(&init_config); +} + +static int amdgpu_ras_mgr_sw_init(struct amdgpu_ip_block *ip_block) +{ + struct amdgpu_device *adev = ip_block->adev; + struct amdgpu_ras *con = amdgpu_ras_get_context(adev); + struct amdgpu_ras_mgr *ras_mgr; + int ret = 0; + + /* Disabled by default */ + con->uniras_enabled = false; + + /* Enabled only in debug mode */ + if (adev->debug_enable_ras_aca) { + con->uniras_enabled = true; + RAS_DEV_INFO(adev, "Debug amdgpu uniras!"); + } + + if (!con->uniras_enabled) + return 0; + + ras_mgr = kzalloc(sizeof(*ras_mgr), GFP_KERNEL); + if (!ras_mgr) + return -EINVAL; + + con->ras_mgr = ras_mgr; + ras_mgr->adev = adev; + + ras_mgr->ras_core = amdgpu_ras_mgr_create_ras_core(adev); + if (!ras_mgr->ras_core) { + RAS_DEV_ERR(adev, "Failed to create ras core!\n"); + ret = -EINVAL; + goto err; + } + + ras_mgr->ras_core->dev = adev; + + amdgpu_ras_process_init(adev); + ras_core_sw_init(ras_mgr->ras_core); + amdgpu_ras_mgr_init_event_mgr(ras_mgr->ras_core); + return 0; + +err: + kfree(ras_mgr); + return ret; +} + +static int amdgpu_ras_mgr_sw_fini(struct amdgpu_ip_block *ip_block) +{ + struct amdgpu_device *adev = ip_block->adev; + struct amdgpu_ras *con = amdgpu_ras_get_context(adev); + struct amdgpu_ras_mgr *ras_mgr = (struct amdgpu_ras_mgr *)con->ras_mgr; + + if (!con->uniras_enabled) + return 0; + + if (!ras_mgr) + return 0; + + amdgpu_ras_process_fini(adev); + ras_core_sw_fini(ras_mgr->ras_core); + ras_core_destroy(ras_mgr->ras_core); + ras_mgr->ras_core = NULL; + + kfree(con->ras_mgr); + con->ras_mgr = NULL; + + return 0; +} + +static int amdgpu_ras_mgr_hw_init(struct amdgpu_ip_block *ip_block) +{ + struct amdgpu_device *adev = ip_block->adev; + struct amdgpu_ras *con = amdgpu_ras_get_context(adev); + struct amdgpu_ras_mgr *ras_mgr = amdgpu_ras_mgr_get_context(adev); + int ret; + + if (!con->uniras_enabled) + return 0; + + if (!ras_mgr || !ras_mgr->ras_core) + return -EINVAL; + + ret = ras_core_hw_init(ras_mgr->ras_core); + if (ret) { + RAS_DEV_ERR(adev, "Failed to initialize ras core!\n"); + return ret; + } + + ras_mgr->ras_is_ready = true; + + amdgpu_enable_uniras(adev, true); + + RAS_DEV_INFO(adev, "AMDGPU RAS Is Ready.\n"); + return 0; +} + +static int amdgpu_ras_mgr_hw_fini(struct amdgpu_ip_block *ip_block) +{ + struct amdgpu_device *adev = ip_block->adev; + struct amdgpu_ras *con = amdgpu_ras_get_context(adev); + struct amdgpu_ras_mgr *ras_mgr = amdgpu_ras_mgr_get_context(adev); + + if (!con->uniras_enabled) + return 0; + + if (!ras_mgr || !ras_mgr->ras_core) + return -EINVAL; + + ras_core_hw_fini(ras_mgr->ras_core); + + ras_mgr->ras_is_ready = false; + + return 0; +} + +struct amdgpu_ras_mgr *amdgpu_ras_mgr_get_context(struct amdgpu_device *adev) +{ + if (!adev || !adev->psp.ras_context.ras) + return NULL; + + return (struct amdgpu_ras_mgr *)adev->psp.ras_context.ras->ras_mgr; +} + +static const struct amd_ip_funcs __maybe_unused ras_v1_0_ip_funcs = { + .name = "ras_v1_0", + .sw_init = amdgpu_ras_mgr_sw_init, + .sw_fini = amdgpu_ras_mgr_sw_fini, + .hw_init = amdgpu_ras_mgr_hw_init, + .hw_fini = amdgpu_ras_mgr_hw_fini, +}; + +const struct amdgpu_ip_block_version ras_v1_0_ip_block = { + .type = AMD_IP_BLOCK_TYPE_RAS, + .major = 1, + .minor = 0, + .rev = 0, + .funcs = &ras_v1_0_ip_funcs, +}; + +int amdgpu_enable_uniras(struct amdgpu_device *adev, bool enable) +{ + struct amdgpu_ras_mgr *ras_mgr = amdgpu_ras_mgr_get_context(adev); + + if (!ras_mgr || !ras_mgr->ras_core) + return -EPERM; + + if (amdgpu_sriov_vf(adev)) + return -EPERM; + + RAS_DEV_INFO(adev, "Enable amdgpu unified ras!"); + return ras_core_set_status(ras_mgr->ras_core, enable); +} + +bool amdgpu_uniras_enabled(struct amdgpu_device *adev) +{ + struct amdgpu_ras_mgr *ras_mgr = amdgpu_ras_mgr_get_context(adev); + + if (!ras_mgr || !ras_mgr->ras_core) + return false; + + if (amdgpu_sriov_vf(adev)) + return false; + + return ras_core_is_enabled(ras_mgr->ras_core); +} + +static bool amdgpu_ras_mgr_is_ready(struct amdgpu_device *adev) +{ + struct amdgpu_ras_mgr *ras_mgr = amdgpu_ras_mgr_get_context(adev); + + if (ras_mgr && ras_mgr->ras_core && ras_mgr->ras_is_ready && + ras_core_is_ready(ras_mgr->ras_core)) + return true; + + return false; +} + +int amdgpu_ras_mgr_handle_fatal_interrupt(struct amdgpu_device *adev, void *data) +{ + struct amdgpu_ras_mgr *ras_mgr = amdgpu_ras_mgr_get_context(adev); + + if (!amdgpu_ras_mgr_is_ready(adev)) + return -EPERM; + + return ras_core_handle_nbio_irq(ras_mgr->ras_core, data); +} + +uint64_t amdgpu_ras_mgr_gen_ras_event_seqno(struct amdgpu_device *adev, + enum ras_seqno_type seqno_type) +{ + struct amdgpu_ras_mgr *ras_mgr = amdgpu_ras_mgr_get_context(adev); + int ret; + uint64_t seq_no; + + if (!amdgpu_ras_mgr_is_ready(adev) || + (seqno_type >= RAS_SEQNO_TYPE_COUNT_MAX)) + return 0; + + seq_no = ras_core_gen_seqno(ras_mgr->ras_core, seqno_type); + + if ((seqno_type == RAS_SEQNO_TYPE_DE) || + (seqno_type == RAS_SEQNO_TYPE_POISON_CONSUMPTION)) { + ret = ras_core_put_seqno(ras_mgr->ras_core, seqno_type, seq_no); + if (ret) + RAS_DEV_WARN(adev, "There are too many ras interrupts!"); + } + + return seq_no; +} + +int amdgpu_ras_mgr_handle_controller_interrupt(struct amdgpu_device *adev, void *data) +{ + struct amdgpu_ras_mgr *ras_mgr = amdgpu_ras_mgr_get_context(adev); + struct ras_ih_info *ih_info = (struct ras_ih_info *)data; + uint64_t seq_no = 0; + int ret = 0; + + if (!amdgpu_ras_mgr_is_ready(adev)) + return -EPERM; + + if (ih_info && (ih_info->block == AMDGPU_RAS_BLOCK__UMC)) { + if (ras_mgr->ras_core->poison_supported) { + seq_no = amdgpu_ras_mgr_gen_ras_event_seqno(adev, RAS_SEQNO_TYPE_DE); + RAS_DEV_INFO(adev, + "{%llu} RAS poison is created, no user action is needed.\n", + seq_no); + } + + ret = amdgpu_ras_process_handle_umc_interrupt(adev, ih_info); + } else if (ras_mgr->ras_core->poison_supported) { + ret = amdgpu_ras_process_handle_unexpected_interrupt(adev, ih_info); + } else { + RAS_DEV_WARN(adev, + "No RAS interrupt handler for non-UMC block with poison disabled.\n"); + } + + return ret; +} + +int amdgpu_ras_mgr_handle_consumer_interrupt(struct amdgpu_device *adev, void *data) +{ + if (!amdgpu_ras_mgr_is_ready(adev)) + return -EPERM; + + return amdgpu_ras_process_handle_consumption_interrupt(adev, data); +} + +int amdgpu_ras_mgr_update_ras_ecc(struct amdgpu_device *adev) +{ + struct amdgpu_ras_mgr *ras_mgr = amdgpu_ras_mgr_get_context(adev); + + if (!amdgpu_ras_mgr_is_ready(adev)) + return -EPERM; + + return ras_core_update_ecc_info(ras_mgr->ras_core); +} + +int amdgpu_ras_mgr_reset_gpu(struct amdgpu_device *adev, uint32_t flags) +{ + struct amdgpu_ras *con = amdgpu_ras_get_context(adev); + + if (!amdgpu_ras_mgr_is_ready(adev)) + return -EPERM; + + con->gpu_reset_flags |= flags; + return amdgpu_ras_reset_gpu(adev); +} + +bool amdgpu_ras_mgr_check_eeprom_safety_watermark(struct amdgpu_device *adev) +{ + struct amdgpu_ras_mgr *ras_mgr = amdgpu_ras_mgr_get_context(adev); + + if (!amdgpu_ras_mgr_is_ready(adev)) + return false; + + return ras_eeprom_check_safety_watermark(ras_mgr->ras_core); +} + +int amdgpu_ras_mgr_get_curr_nps_mode(struct amdgpu_device *adev, + uint32_t *nps_mode) +{ + struct amdgpu_ras_mgr *ras_mgr = amdgpu_ras_mgr_get_context(adev); + uint32_t mode; + + if (!amdgpu_ras_mgr_is_ready(adev)) + return -EINVAL; + + mode = ras_core_get_curr_nps_mode(ras_mgr->ras_core); + if (!mode || mode > AMDGPU_NPS8_PARTITION_MODE) + return -EINVAL; + + *nps_mode = mode; + + return 0; +} + +bool amdgpu_ras_mgr_check_retired_addr(struct amdgpu_device *adev, + uint64_t addr) +{ + struct amdgpu_ras_mgr *ras_mgr = amdgpu_ras_mgr_get_context(adev); + + if (!amdgpu_ras_mgr_is_ready(adev)) + return false; + + return ras_umc_check_retired_addr(ras_mgr->ras_core, addr); +} + +bool amdgpu_ras_mgr_is_rma(struct amdgpu_device *adev) +{ + struct amdgpu_ras_mgr *ras_mgr = amdgpu_ras_mgr_get_context(adev); + + if (!ras_mgr || !ras_mgr->ras_core || !ras_mgr->ras_is_ready) + return false; + + return ras_core_gpu_is_rma(ras_mgr->ras_core); +} + +int amdgpu_ras_mgr_handle_ras_cmd(struct amdgpu_device *adev, + uint32_t cmd_id, void *input, uint32_t input_size, + void *output, uint32_t out_size) +{ + struct amdgpu_ras_mgr *ras_mgr = amdgpu_ras_mgr_get_context(adev); + struct ras_cmd_ctx *cmd_ctx; + uint32_t ctx_buf_size = PAGE_SIZE; + int ret; + + if (!amdgpu_ras_mgr_is_ready(adev)) + return -EPERM; + + cmd_ctx = kzalloc(ctx_buf_size, GFP_KERNEL); + if (!cmd_ctx) + return -ENOMEM; + + cmd_ctx->cmd_id = cmd_id; + + memcpy(cmd_ctx->input_buff_raw, input, input_size); + cmd_ctx->input_size = input_size; + cmd_ctx->output_buf_size = ctx_buf_size - sizeof(*cmd_ctx); + + ret = amdgpu_ras_submit_cmd(ras_mgr->ras_core, cmd_ctx); + if (!ret && !cmd_ctx->cmd_res && output && (out_size == cmd_ctx->output_size)) + memcpy(output, cmd_ctx->output_buff_raw, cmd_ctx->output_size); + + kfree(cmd_ctx); + + return ret; +} + +int amdgpu_ras_mgr_pre_reset(struct amdgpu_device *adev) +{ + if (!amdgpu_ras_mgr_is_ready(adev)) { + RAS_DEV_ERR(adev, "Invalid ras suspend!\n"); + return -EPERM; + } + + amdgpu_ras_process_pre_reset(adev); + return 0; +} + +int amdgpu_ras_mgr_post_reset(struct amdgpu_device *adev) +{ + if (!amdgpu_ras_mgr_is_ready(adev)) { + RAS_DEV_ERR(adev, "Invalid ras resume!\n"); + return -EPERM; + } + + amdgpu_ras_process_post_reset(adev); + return 0; +} diff --git a/drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_mgr.h b/drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_mgr.h new file mode 100644 index 000000000000..8fb7eb4b8f13 --- /dev/null +++ b/drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_mgr.h @@ -0,0 +1,83 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright (c) 2025 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ +#ifndef __AMDGPU_RAS_MGR_H__ +#define __AMDGPU_RAS_MGR_H__ +#include "ras.h" +#include "amdgpu_ras_process.h" + +enum ras_ih_type { + RAS_IH_NONE, + RAS_IH_FROM_BLOCK_CONTROLLER, + RAS_IH_FROM_CONSUMER_CLIENT, + RAS_IH_FROM_FATAL_ERROR, +}; + +struct ras_ih_info { + uint32_t block; + union { + struct amdgpu_iv_entry iv_entry; + struct { + uint16_t pasid; + uint32_t reset; + pasid_notify pasid_fn; + void *data; + }; + }; +}; + +struct amdgpu_ras_mgr { + struct amdgpu_device *adev; + struct ras_core_context *ras_core; + struct delayed_work retire_page_dwork; + struct ras_event_manager ras_event_mgr; + uint64_t last_poison_consumption_seqno; + bool ras_is_ready; + + bool is_paused; + struct completion ras_event_done; +}; + +extern const struct amdgpu_ip_block_version ras_v1_0_ip_block; + +struct amdgpu_ras_mgr *amdgpu_ras_mgr_get_context( + struct amdgpu_device *adev); +int amdgpu_enable_uniras(struct amdgpu_device *adev, bool enable); +bool amdgpu_uniras_enabled(struct amdgpu_device *adev); +int amdgpu_ras_mgr_handle_fatal_interrupt(struct amdgpu_device *adev, void *data); +int amdgpu_ras_mgr_handle_controller_interrupt(struct amdgpu_device *adev, void *data); +int amdgpu_ras_mgr_handle_consumer_interrupt(struct amdgpu_device *adev, void *data); +int amdgpu_ras_mgr_update_ras_ecc(struct amdgpu_device *adev); +int amdgpu_ras_mgr_reset_gpu(struct amdgpu_device *adev, uint32_t flags); +uint64_t amdgpu_ras_mgr_gen_ras_event_seqno(struct amdgpu_device *adev, + enum ras_seqno_type seqno_type); +bool amdgpu_ras_mgr_check_eeprom_safety_watermark(struct amdgpu_device *adev); +int amdgpu_ras_mgr_get_curr_nps_mode(struct amdgpu_device *adev, uint32_t *nps_mode); +bool amdgpu_ras_mgr_check_retired_addr(struct amdgpu_device *adev, + uint64_t addr); +bool amdgpu_ras_mgr_is_rma(struct amdgpu_device *adev); +int amdgpu_ras_mgr_handle_ras_cmd(struct amdgpu_device *adev, + uint32_t cmd_id, void *input, uint32_t input_size, + void *output, uint32_t out_size); +int amdgpu_ras_mgr_pre_reset(struct amdgpu_device *adev); +int amdgpu_ras_mgr_post_reset(struct amdgpu_device *adev); +#endif diff --git a/drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_mp1_v13_0.c b/drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_mp1_v13_0.c new file mode 100644 index 000000000000..79a51b1603ac --- /dev/null +++ b/drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_mp1_v13_0.c @@ -0,0 +1,94 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright 2025 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#include "amdgpu_smu.h" +#include "amdgpu_reset.h" +#include "amdgpu_ras_mp1_v13_0.h" + +#define RAS_MP1_MSG_QueryValidMcaCeCount 0x3A +#define RAS_MP1_MSG_McaBankCeDumpDW 0x3B + +static int mp1_v13_0_get_valid_bank_count(struct ras_core_context *ras_core, + u32 msg, u32 *count) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)ras_core->dev; + u32 smu_msg; + int ret = 0; + + if (!count) + return -EINVAL; + + smu_msg = (msg == RAS_MP1_MSG_QueryValidMcaCeCount) ? + SMU_MSG_QueryValidMcaCeCount : SMU_MSG_QueryValidMcaCount; + + if (down_read_trylock(&adev->reset_domain->sem)) { + ret = amdgpu_smu_ras_send_msg(adev, smu_msg, 0, count); + up_read(&adev->reset_domain->sem); + } else { + ret = -RAS_CORE_GPU_IN_MODE1_RESET; + } + + if (ret) + *count = 0; + + return ret; +} + +static int mp1_v13_0_dump_valid_bank(struct ras_core_context *ras_core, + u32 msg, u32 idx, u32 reg_idx, u64 *val) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)ras_core->dev; + uint32_t data[2] = {0, 0}; + uint32_t param; + int ret = 0; + int i, offset; + u32 smu_msg = (msg == RAS_MP1_MSG_McaBankCeDumpDW) ? + SMU_MSG_McaBankCeDumpDW : SMU_MSG_McaBankDumpDW; + + if (down_read_trylock(&adev->reset_domain->sem)) { + offset = reg_idx * 8; + for (i = 0; i < ARRAY_SIZE(data); i++) { + param = ((idx & 0xffff) << 16) | ((offset + (i << 2)) & 0xfffc); + ret = amdgpu_smu_ras_send_msg(adev, smu_msg, param, &data[i]); + if (ret) { + RAS_DEV_ERR(adev, "ACA failed to read register[%d], offset:0x%x\n", + reg_idx, offset); + break; + } + } + up_read(&adev->reset_domain->sem); + + if (!ret) + *val = (uint64_t)data[1] << 32 | data[0]; + } else { + ret = -RAS_CORE_GPU_IN_MODE1_RESET; + } + + return ret; +} + +const struct ras_mp1_sys_func amdgpu_ras_mp1_sys_func_v13_0 = { + .mp1_get_valid_bank_count = mp1_v13_0_get_valid_bank_count, + .mp1_dump_valid_bank = mp1_v13_0_dump_valid_bank, +}; + diff --git a/drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_mp1_v13_0.h b/drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_mp1_v13_0.h new file mode 100644 index 000000000000..71c614ae1ae4 --- /dev/null +++ b/drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_mp1_v13_0.h @@ -0,0 +1,30 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright 2025 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +#ifndef __AMDGPU_RAS_MP1_V13_0_H__ +#define __AMDGPU_RAS_MP1_V13_0_H__ +#include "ras.h" + +extern const struct ras_mp1_sys_func amdgpu_ras_mp1_sys_func_v13_0; + +#endif diff --git a/drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_nbio_v7_9.c b/drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_nbio_v7_9.c new file mode 100644 index 000000000000..2783f5875c7c --- /dev/null +++ b/drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_nbio_v7_9.c @@ -0,0 +1,125 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright 2025 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#include "amdgpu_ras_mgr.h" +#include "amdgpu_ras_nbio_v7_9.h" +#include "nbio/nbio_7_9_0_offset.h" +#include "nbio/nbio_7_9_0_sh_mask.h" +#include "ivsrcid/nbio/irqsrcs_nbif_7_4.h" + +static int nbio_v7_9_set_ras_controller_irq_state(struct amdgpu_device *adev, + struct amdgpu_irq_src *src, + unsigned int type, + enum amdgpu_interrupt_state state) +{ + /* Dummy function, there is no initialization operation in driver */ + + return 0; +} + +static int nbio_v7_9_process_ras_controller_irq(struct amdgpu_device *adev, + struct amdgpu_irq_src *source, + struct amdgpu_iv_entry *entry) +{ + /* By design, the ih cookie for ras_controller_irq should be written + * to BIFring instead of general iv ring. However, due to known bif ring + * hw bug, it has to be disabled. There is no chance the process function + * will be involked. Just left it as a dummy one. + */ + return 0; +} + +static int nbio_v7_9_set_ras_err_event_athub_irq_state(struct amdgpu_device *adev, + struct amdgpu_irq_src *src, + unsigned int type, + enum amdgpu_interrupt_state state) +{ + /* Dummy function, there is no initialization operation in driver */ + + return 0; +} + +static int nbio_v7_9_process_err_event_athub_irq(struct amdgpu_device *adev, + struct amdgpu_irq_src *source, + struct amdgpu_iv_entry *entry) +{ + /* By design, the ih cookie for err_event_athub_irq should be written + * to BIFring instead of general iv ring. However, due to known bif ring + * hw bug, it has to be disabled. There is no chance the process function + * will be involked. Just left it as a dummy one. + */ + return 0; +} + +static const struct amdgpu_irq_src_funcs nbio_v7_9_ras_controller_irq_funcs = { + .set = nbio_v7_9_set_ras_controller_irq_state, + .process = nbio_v7_9_process_ras_controller_irq, +}; + +static const struct amdgpu_irq_src_funcs nbio_v7_9_ras_err_event_athub_irq_funcs = { + .set = nbio_v7_9_set_ras_err_event_athub_irq_state, + .process = nbio_v7_9_process_err_event_athub_irq, +}; + +static int nbio_v7_9_init_ras_controller_interrupt(struct ras_core_context *ras_core, bool state) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)ras_core->dev; + int r; + + /* init the irq funcs */ + adev->nbio.ras_controller_irq.funcs = + &nbio_v7_9_ras_controller_irq_funcs; + adev->nbio.ras_controller_irq.num_types = 1; + + /* register ras controller interrupt */ + r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_BIF, + NBIF_7_4__SRCID__RAS_CONTROLLER_INTERRUPT, + &adev->nbio.ras_controller_irq); + + return r; +} + +static int nbio_v7_9_init_ras_err_event_athub_interrupt(struct ras_core_context *ras_core, + bool state) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)ras_core->dev; + int r; + + /* init the irq funcs */ + adev->nbio.ras_err_event_athub_irq.funcs = + &nbio_v7_9_ras_err_event_athub_irq_funcs; + adev->nbio.ras_err_event_athub_irq.num_types = 1; + + /* register ras err event athub interrupt */ + r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_BIF, + NBIF_7_4__SRCID__ERREVENT_ATHUB_INTERRUPT, + &adev->nbio.ras_err_event_athub_irq); + + return r; +} + +const struct ras_nbio_sys_func amdgpu_ras_nbio_sys_func_v7_9 = { + .set_ras_controller_irq_state = nbio_v7_9_init_ras_controller_interrupt, + .set_ras_err_event_athub_irq_state = nbio_v7_9_init_ras_err_event_athub_interrupt, +}; diff --git a/drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_nbio_v7_9.h b/drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_nbio_v7_9.h new file mode 100644 index 000000000000..272259e9a0e7 --- /dev/null +++ b/drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_nbio_v7_9.h @@ -0,0 +1,30 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright 2025 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef __AMDGPU_RAS_NBIO_V7_9_H__ +#define __AMDGPU_RAS_NBIO_V7_9_H__ + +extern const struct ras_nbio_sys_func amdgpu_ras_nbio_sys_func_v7_9; + +#endif diff --git a/drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_process.c b/drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_process.c new file mode 100644 index 000000000000..5782c007de71 --- /dev/null +++ b/drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_process.c @@ -0,0 +1,190 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright (c) 2025 Advanced Micro Devices, Inc. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include "amdgpu.h" +#include "amdgpu_reset.h" +#include "amdgpu_xgmi.h" +#include "ras_sys.h" +#include "amdgpu_ras_mgr.h" +#include "amdgpu_ras_process.h" + +#define RAS_MGR_RETIRE_PAGE_INTERVAL 100 +#define RAS_EVENT_PROCESS_TIMEOUT 1200 + +static void ras_process_retire_page_dwork(struct work_struct *work) +{ + struct amdgpu_ras_mgr *ras_mgr = + container_of(work, struct amdgpu_ras_mgr, retire_page_dwork.work); + struct amdgpu_device *adev = ras_mgr->adev; + int ret; + + if (amdgpu_ras_is_rma(adev)) + return; + + /* If gpu reset is ongoing, delay retiring the bad pages */ + if (amdgpu_in_reset(adev) || amdgpu_ras_in_recovery(adev)) { + schedule_delayed_work(&ras_mgr->retire_page_dwork, + msecs_to_jiffies(RAS_MGR_RETIRE_PAGE_INTERVAL * 3)); + return; + } + + ret = ras_umc_handle_bad_pages(ras_mgr->ras_core, NULL); + if (!ret) + schedule_delayed_work(&ras_mgr->retire_page_dwork, + msecs_to_jiffies(RAS_MGR_RETIRE_PAGE_INTERVAL)); +} + +int amdgpu_ras_process_init(struct amdgpu_device *adev) +{ + struct amdgpu_ras_mgr *ras_mgr = amdgpu_ras_mgr_get_context(adev); + + ras_mgr->is_paused = false; + init_completion(&ras_mgr->ras_event_done); + + INIT_DELAYED_WORK(&ras_mgr->retire_page_dwork, ras_process_retire_page_dwork); + + return 0; +} + +int amdgpu_ras_process_fini(struct amdgpu_device *adev) +{ + struct amdgpu_ras_mgr *ras_mgr = amdgpu_ras_mgr_get_context(adev); + + ras_mgr->is_paused = false; + /* Save all cached bad pages to eeprom */ + flush_delayed_work(&ras_mgr->retire_page_dwork); + cancel_delayed_work_sync(&ras_mgr->retire_page_dwork); + return 0; +} + +int amdgpu_ras_process_handle_umc_interrupt(struct amdgpu_device *adev, void *data) +{ + struct amdgpu_ras_mgr *ras_mgr = amdgpu_ras_mgr_get_context(adev); + + if (!ras_mgr->ras_core) + return -EINVAL; + + return ras_process_add_interrupt_req(ras_mgr->ras_core, NULL, true); +} + +int amdgpu_ras_process_handle_unexpected_interrupt(struct amdgpu_device *adev, void *data) +{ + amdgpu_ras_set_fed(adev, true); + return amdgpu_ras_mgr_reset_gpu(adev, AMDGPU_RAS_GPU_RESET_MODE1_RESET); +} + +int amdgpu_ras_process_handle_consumption_interrupt(struct amdgpu_device *adev, void *data) +{ + struct amdgpu_ras_mgr *ras_mgr = amdgpu_ras_mgr_get_context(adev); + struct ras_ih_info *ih_info = (struct ras_ih_info *)data; + struct ras_event_req req; + uint64_t seqno; + + if (!ih_info) + return -EINVAL; + + memset(&req, 0, sizeof(req)); + req.block = ih_info->block; + req.data = ih_info->data; + req.pasid = ih_info->pasid; + req.pasid_fn = ih_info->pasid_fn; + req.reset = ih_info->reset; + + seqno = ras_core_get_seqno(ras_mgr->ras_core, + RAS_SEQNO_TYPE_POISON_CONSUMPTION, false); + + /* When the ACA register cannot be read from FW, the poison + * consumption seqno in the fifo will not pop up, so it is + * necessary to check whether the seqno is the previous seqno. + */ + if (seqno == ras_mgr->last_poison_consumption_seqno) { + /* Pop and discard the previous seqno */ + ras_core_get_seqno(ras_mgr->ras_core, + RAS_SEQNO_TYPE_POISON_CONSUMPTION, true); + seqno = ras_core_get_seqno(ras_mgr->ras_core, + RAS_SEQNO_TYPE_POISON_CONSUMPTION, false); + } + ras_mgr->last_poison_consumption_seqno = seqno; + req.seqno = seqno; + + return ras_process_add_interrupt_req(ras_mgr->ras_core, &req, false); +} + +int amdgpu_ras_process_begin(struct amdgpu_device *adev) +{ + struct amdgpu_ras_mgr *ras_mgr = amdgpu_ras_mgr_get_context(adev); + + if (ras_mgr->is_paused) + return -EAGAIN; + + reinit_completion(&ras_mgr->ras_event_done); + return 0; +} + +int amdgpu_ras_process_end(struct amdgpu_device *adev) +{ + struct amdgpu_ras_mgr *ras_mgr = amdgpu_ras_mgr_get_context(adev); + + complete(&ras_mgr->ras_event_done); + return 0; +} + +int amdgpu_ras_process_pre_reset(struct amdgpu_device *adev) +{ + struct amdgpu_ras_mgr *ras_mgr = amdgpu_ras_mgr_get_context(adev); + long rc; + + if (!ras_mgr || !ras_mgr->ras_core) + return -EINVAL; + + if (!ras_mgr->ras_core->is_initialized) + return -EPERM; + + ras_mgr->is_paused = true; + + /* Wait for RAS event processing to complete */ + rc = wait_for_completion_interruptible_timeout(&ras_mgr->ras_event_done, + msecs_to_jiffies(RAS_EVENT_PROCESS_TIMEOUT)); + if (rc <= 0) + RAS_DEV_WARN(adev, "Waiting for ras process to complete %s\n", + rc ? "interrupted" : "timeout"); + + flush_delayed_work(&ras_mgr->retire_page_dwork); + return 0; +} + +int amdgpu_ras_process_post_reset(struct amdgpu_device *adev) +{ + struct amdgpu_ras_mgr *ras_mgr = amdgpu_ras_mgr_get_context(adev); + + if (!ras_mgr || !ras_mgr->ras_core) + return -EINVAL; + + if (!ras_mgr->ras_core->is_initialized) + return -EPERM; + + ras_mgr->is_paused = false; + + schedule_delayed_work(&ras_mgr->retire_page_dwork, 0); + return 0; +} diff --git a/drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_process.h b/drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_process.h new file mode 100644 index 000000000000..d55cdaeac441 --- /dev/null +++ b/drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_process.h @@ -0,0 +1,41 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright (c) 2025 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ +#ifndef __AMDGPU_RAS_PROCESS_H__ +#define __AMDGPU_RAS_PROCESS_H__ +#include "ras_process.h" +#include "amdgpu_ras_mgr.h" + +enum ras_ih_type; +int amdgpu_ras_process_init(struct amdgpu_device *adev); +int amdgpu_ras_process_fini(struct amdgpu_device *adev); +int amdgpu_ras_process_handle_umc_interrupt(struct amdgpu_device *adev, + void *data); +int amdgpu_ras_process_handle_unexpected_interrupt(struct amdgpu_device *adev, + void *data); +int amdgpu_ras_process_handle_consumption_interrupt(struct amdgpu_device *adev, + void *data); +int amdgpu_ras_process_begin(struct amdgpu_device *adev); +int amdgpu_ras_process_end(struct amdgpu_device *adev); +int amdgpu_ras_process_pre_reset(struct amdgpu_device *adev); +int amdgpu_ras_process_post_reset(struct amdgpu_device *adev); +#endif diff --git a/drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_sys.c b/drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_sys.c new file mode 100644 index 000000000000..45ed8c3b5563 --- /dev/null +++ b/drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_sys.c @@ -0,0 +1,279 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright 2025 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#include "ras_sys.h" +#include "amdgpu_ras_mgr.h" +#include "amdgpu_ras.h" +#include "amdgpu_reset.h" + +static int amdgpu_ras_sys_detect_fatal_event(struct ras_core_context *ras_core, void *data) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)ras_core->dev; + int ret; + uint64_t seq_no; + + ret = amdgpu_ras_global_ras_isr(adev); + if (ret) + return ret; + + seq_no = amdgpu_ras_mgr_gen_ras_event_seqno(adev, RAS_SEQNO_TYPE_UE); + RAS_DEV_INFO(adev, + "{%llu} Uncorrectable hardware error(ERREVENT_ATHUB_INTERRUPT) detected!\n", + seq_no); + + return amdgpu_ras_process_handle_unexpected_interrupt(adev, data); +} + +static int amdgpu_ras_sys_poison_consumption_event(struct ras_core_context *ras_core, + void *data) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)ras_core->dev; + struct ras_event_req *req = (struct ras_event_req *)data; + pasid_notify pasid_fn; + + if (!req) + return -EINVAL; + + if (req->pasid_fn) { + pasid_fn = (pasid_notify)req->pasid_fn; + pasid_fn(adev, req->pasid, req->data); + } + + return 0; +} + +static int amdgpu_ras_sys_gen_seqno(struct ras_core_context *ras_core, + enum ras_seqno_type seqno_type, uint64_t *seqno) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)ras_core->dev; + struct amdgpu_ras_mgr *ras_mgr = amdgpu_ras_mgr_get_context(adev); + struct ras_event_manager *event_mgr; + struct ras_event_state *event_state; + struct amdgpu_hive_info *hive; + enum ras_event_type event_type; + uint64_t seq_no; + + if (!ras_mgr || !seqno || + (seqno_type >= RAS_SEQNO_TYPE_COUNT_MAX)) + return -EINVAL; + + switch (seqno_type) { + case RAS_SEQNO_TYPE_UE: + event_type = RAS_EVENT_TYPE_FATAL; + break; + case RAS_SEQNO_TYPE_CE: + case RAS_SEQNO_TYPE_DE: + event_type = RAS_EVENT_TYPE_POISON_CREATION; + break; + case RAS_SEQNO_TYPE_POISON_CONSUMPTION: + event_type = RAS_EVENT_TYPE_POISON_CONSUMPTION; + break; + default: + event_type = RAS_EVENT_TYPE_INVALID; + break; + } + + hive = amdgpu_get_xgmi_hive(adev); + event_mgr = hive ? &hive->event_mgr : &ras_mgr->ras_event_mgr; + event_state = &event_mgr->event_state[event_type]; + if ((event_type == RAS_EVENT_TYPE_FATAL) && amdgpu_ras_in_recovery(adev)) { + seq_no = event_state->last_seqno; + } else { + seq_no = atomic64_inc_return(&event_mgr->seqno); + event_state->last_seqno = seq_no; + atomic64_inc(&event_state->count); + } + amdgpu_put_xgmi_hive(hive); + + *seqno = seq_no; + return 0; + +} + +static int amdgpu_ras_sys_event_notifier(struct ras_core_context *ras_core, + enum ras_notify_event event_id, void *data) +{ + struct amdgpu_ras_mgr *ras_mgr = amdgpu_ras_mgr_get_context(ras_core->dev); + int ret = 0; + + switch (event_id) { + case RAS_EVENT_ID__BAD_PAGE_DETECTED: + schedule_delayed_work(&ras_mgr->retire_page_dwork, 0); + break; + case RAS_EVENT_ID__POISON_CONSUMPTION: + amdgpu_ras_sys_poison_consumption_event(ras_core, data); + break; + case RAS_EVENT_ID__RESERVE_BAD_PAGE: + ret = amdgpu_ras_reserve_page(ras_core->dev, *(uint64_t *)data); + break; + case RAS_EVENT_ID__FATAL_ERROR_DETECTED: + ret = amdgpu_ras_sys_detect_fatal_event(ras_core, data); + break; + case RAS_EVENT_ID__UPDATE_BAD_PAGE_NUM: + ret = amdgpu_dpm_send_hbm_bad_pages_num(ras_core->dev, *(uint32_t *)data); + break; + case RAS_EVENT_ID__UPDATE_BAD_CHANNEL_BITMAP: + ret = amdgpu_dpm_send_hbm_bad_channel_flag(ras_core->dev, *(uint32_t *)data); + break; + case RAS_EVENT_ID__DEVICE_RMA: + ras_log_ring_add_log_event(ras_core, RAS_LOG_EVENT_RMA, NULL, NULL); + ret = amdgpu_dpm_send_rma_reason(ras_core->dev); + break; + case RAS_EVENT_ID__RESET_GPU: + ret = amdgpu_ras_mgr_reset_gpu(ras_core->dev, *(uint32_t *)data); + break; + case RAS_EVENT_ID__RAS_EVENT_PROC_BEGIN: + ret = amdgpu_ras_process_begin(ras_core->dev); + break; + case RAS_EVENT_ID__RAS_EVENT_PROC_END: + ret = amdgpu_ras_process_end(ras_core->dev); + break; + default: + RAS_DEV_WARN(ras_core->dev, "Invalid ras notify event:%d\n", event_id); + break; + } + + return ret; +} + +static u64 amdgpu_ras_sys_get_utc_second_timestamp(struct ras_core_context *ras_core) +{ + return ktime_get_real_seconds(); +} + +static int amdgpu_ras_sys_check_gpu_status(struct ras_core_context *ras_core, + uint32_t *status) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)ras_core->dev; + uint32_t gpu_status = 0; + + if (amdgpu_in_reset(adev) || amdgpu_ras_in_recovery(adev)) + gpu_status |= RAS_GPU_STATUS__IN_RESET; + + if (amdgpu_sriov_vf(adev)) + gpu_status |= RAS_GPU_STATUS__IS_VF; + + *status = gpu_status; + + return 0; +} + +static int amdgpu_ras_sys_get_device_system_info(struct ras_core_context *ras_core, + struct device_system_info *dev_info) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)ras_core->dev; + + dev_info->device_id = adev->pdev->device; + dev_info->vendor_id = adev->pdev->vendor; + dev_info->socket_id = adev->smuio.funcs->get_socket_id(adev); + + return 0; +} + +static int amdgpu_ras_sys_gpu_reset_lock(struct ras_core_context *ras_core, + bool down, bool try) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)ras_core->dev; + int ret = 0; + + if (down && try) + ret = down_read_trylock(&adev->reset_domain->sem); + else if (down) + down_read(&adev->reset_domain->sem); + else + up_read(&adev->reset_domain->sem); + + return ret; +} + +static bool amdgpu_ras_sys_detect_ras_interrupt(struct ras_core_context *ras_core) +{ + return !!atomic_read(&amdgpu_ras_in_intr); +} + +static int amdgpu_ras_sys_get_gpu_mem(struct ras_core_context *ras_core, + enum gpu_mem_type mem_type, struct gpu_mem_block *gpu_mem) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)ras_core->dev; + struct psp_context *psp = &adev->psp; + struct psp_ring *psp_ring; + struct ta_mem_context *mem_ctx; + + if (mem_type == GPU_MEM_TYPE_RAS_PSP_RING) { + psp_ring = &psp->km_ring; + gpu_mem->mem_bo = adev->firmware.rbuf; + gpu_mem->mem_size = psp_ring->ring_size; + gpu_mem->mem_mc_addr = psp_ring->ring_mem_mc_addr; + gpu_mem->mem_cpu_addr = psp_ring->ring_mem; + } else if (mem_type == GPU_MEM_TYPE_RAS_PSP_CMD) { + gpu_mem->mem_bo = psp->cmd_buf_bo; + gpu_mem->mem_size = PSP_CMD_BUFFER_SIZE; + gpu_mem->mem_mc_addr = psp->cmd_buf_mc_addr; + gpu_mem->mem_cpu_addr = psp->cmd_buf_mem; + } else if (mem_type == GPU_MEM_TYPE_RAS_PSP_FENCE) { + gpu_mem->mem_bo = psp->fence_buf_bo; + gpu_mem->mem_size = PSP_FENCE_BUFFER_SIZE; + gpu_mem->mem_mc_addr = psp->fence_buf_mc_addr; + gpu_mem->mem_cpu_addr = psp->fence_buf; + } else if (mem_type == GPU_MEM_TYPE_RAS_TA_FW) { + gpu_mem->mem_bo = psp->fw_pri_bo; + gpu_mem->mem_size = PSP_1_MEG; + gpu_mem->mem_mc_addr = psp->fw_pri_mc_addr; + gpu_mem->mem_cpu_addr = psp->fw_pri_buf; + } else if (mem_type == GPU_MEM_TYPE_RAS_TA_CMD) { + mem_ctx = &psp->ras_context.context.mem_context; + gpu_mem->mem_bo = mem_ctx->shared_bo; + gpu_mem->mem_size = mem_ctx->shared_mem_size; + gpu_mem->mem_mc_addr = mem_ctx->shared_mc_addr; + gpu_mem->mem_cpu_addr = mem_ctx->shared_buf; + } else { + return -EINVAL; + } + + if (!gpu_mem->mem_bo || !gpu_mem->mem_size || + !gpu_mem->mem_mc_addr || !gpu_mem->mem_cpu_addr) { + RAS_DEV_ERR(ras_core->dev, "The ras psp gpu memory is invalid!\n"); + return -ENOMEM; + } + + return 0; +} + +static int amdgpu_ras_sys_put_gpu_mem(struct ras_core_context *ras_core, + enum gpu_mem_type mem_type, struct gpu_mem_block *gpu_mem) +{ + + return 0; +} + +const struct ras_sys_func amdgpu_ras_sys_fn = { + .ras_notifier = amdgpu_ras_sys_event_notifier, + .get_utc_second_timestamp = amdgpu_ras_sys_get_utc_second_timestamp, + .gen_seqno = amdgpu_ras_sys_gen_seqno, + .check_gpu_status = amdgpu_ras_sys_check_gpu_status, + .get_device_system_info = amdgpu_ras_sys_get_device_system_info, + .gpu_reset_lock = amdgpu_ras_sys_gpu_reset_lock, + .detect_ras_interrupt = amdgpu_ras_sys_detect_ras_interrupt, + .get_gpu_mem = amdgpu_ras_sys_get_gpu_mem, + .put_gpu_mem = amdgpu_ras_sys_put_gpu_mem, +}; diff --git a/drivers/gpu/drm/amd/ras/ras_mgr/ras_sys.h b/drivers/gpu/drm/amd/ras/ras_mgr/ras_sys.h new file mode 100644 index 000000000000..8156531a7b63 --- /dev/null +++ b/drivers/gpu/drm/amd/ras/ras_mgr/ras_sys.h @@ -0,0 +1,110 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright 2025 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef __RAS_SYS_H__ +#define __RAS_SYS_H__ +#include +#include +#include +#include +#include "amdgpu.h" + +#define RAS_DEV_ERR(device, fmt, ...) \ + do { \ + if (device) \ + dev_err(((struct amdgpu_device *)device)->dev, fmt, ##__VA_ARGS__); \ + else \ + printk(KERN_ERR fmt, ##__VA_ARGS__); \ + } while (0) + +#define RAS_DEV_WARN(device, fmt, ...) \ + do { \ + if (device) \ + dev_warn(((struct amdgpu_device *)device)->dev, fmt, ##__VA_ARGS__); \ + else \ + printk(KERN_WARNING fmt, ##__VA_ARGS__); \ + } while (0) + +#define RAS_DEV_INFO(device, fmt, ...) \ + do { \ + if (device) \ + dev_info(((struct amdgpu_device *)device)->dev, fmt, ##__VA_ARGS__); \ + else \ + printk(KERN_INFO fmt, ##__VA_ARGS__); \ + } while (0) + +#define RAS_DEV_DBG(device, fmt, ...) \ + do { \ + if (device) \ + dev_dbg(((struct amdgpu_device *)device)->dev, fmt, ##__VA_ARGS__); \ + else \ + printk(KERN_DEBUG fmt, ##__VA_ARGS__); \ + } while (0) + +#define RAS_INFO(fmt, ...) printk(KERN_INFO fmt, ##__VA_ARGS__) + +#define RAS_DEV_RREG32_SOC15(dev, ip, inst, reg) \ +({ \ + struct amdgpu_device *adev = (struct amdgpu_device *)dev; \ + __RREG32_SOC15_RLC__(adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg, \ + 0, ip##_HWIP, inst); \ +}) + +#define RAS_DEV_WREG32_SOC15(dev, ip, inst, reg, value) \ +({ \ + struct amdgpu_device *adev = (struct amdgpu_device *)dev; \ + __WREG32_SOC15_RLC__((adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg), \ + value, 0, ip##_HWIP, inst); \ +}) + +/* GET_INST returns the physical instance corresponding to a logical instance */ +#define RAS_GET_INST(dev, ip, inst) \ +({ \ + struct amdgpu_device *adev = (struct amdgpu_device *)dev; \ + adev->ip_map.logical_to_dev_inst ? \ + adev->ip_map.logical_to_dev_inst(adev, ip##_HWIP, inst) : inst; \ +}) + +#define RAS_GET_MASK(dev, ip, mask) \ +({ \ + struct amdgpu_device *adev = (struct amdgpu_device *)dev; \ + (adev->ip_map.logical_to_dev_mask ? \ + adev->ip_map.logical_to_dev_mask(adev, ip##_HWIP, mask) : mask); \ +}) + +static inline void *ras_radix_tree_delete_iter(struct radix_tree_root *root, void *iter) +{ + return radix_tree_delete(root, ((struct radix_tree_iter *)iter)->index); +} + +static inline long ras_wait_event_interruptible_timeout(void *wq_head, + int (*condition)(void *param), void *param, unsigned int timeout) +{ + return wait_event_interruptible_timeout(*(wait_queue_head_t *)wq_head, + condition(param), timeout); +} + +extern const struct ras_sys_func amdgpu_ras_sys_fn; + +#endif diff --git a/drivers/gpu/drm/amd/ras/rascore/Makefile b/drivers/gpu/drm/amd/ras/rascore/Makefile index e69de29bb2d1..e826a1f86424 100644 --- a/drivers/gpu/drm/amd/ras/rascore/Makefile +++ b/drivers/gpu/drm/amd/ras/rascore/Makefile @@ -0,0 +1,44 @@ +# +# Copyright 2025 Advanced Micro Devices, Inc. +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the "Software"), +# to deal in the Software without restriction, including without limitation +# the rights to use, copy, modify, merge, publish, distribute, sublicense, +# and/or sell copies of the Software, and to permit persons to whom the +# Software is furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL +# THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR +# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, +# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR +# OTHER DEALINGS IN THE SOFTWARE. +# +RAS_CORE_FILES = ras_core.o \ + ras_mp1.o \ + ras_mp1_v13_0.o \ + ras_aca.o \ + ras_aca_v1_0.o \ + ras_eeprom.o \ + ras_umc.o \ + ras_umc_v12_0.o \ + ras_cmd.o \ + ras_gfx.o \ + ras_gfx_v9_0.o \ + ras_process.o \ + ras_nbio.o \ + ras_nbio_v7_9.o \ + ras_log_ring.o \ + ras_cper.o \ + ras_psp.o \ + ras_psp_v13_0.o + + +RAS_CORE = $(addprefix $(AMD_GPU_RAS_PATH)/rascore/,$(RAS_CORE_FILES)) + +AMD_GPU_RAS_FILES += $(RAS_CORE) diff --git a/drivers/gpu/drm/amd/ras/rascore/ras.h b/drivers/gpu/drm/amd/ras/rascore/ras.h new file mode 100644 index 000000000000..3396b2e0949d --- /dev/null +++ b/drivers/gpu/drm/amd/ras/rascore/ras.h @@ -0,0 +1,370 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright 2025 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef __RAS_H__ +#define __RAS_H__ +#include "ras_sys.h" +#include "ras_umc.h" +#include "ras_aca.h" +#include "ras_eeprom.h" +#include "ras_core_status.h" +#include "ras_process.h" +#include "ras_gfx.h" +#include "ras_cmd.h" +#include "ras_nbio.h" +#include "ras_mp1.h" +#include "ras_psp.h" +#include "ras_log_ring.h" + +#define RAS_HW_ERR "[Hardware Error]: " + +#define RAS_GPU_PAGE_SHIFT 12 +#define RAS_ADDR_TO_PFN(addr) ((addr) >> RAS_GPU_PAGE_SHIFT) +#define RAS_PFN_TO_ADDR(pfn) ((pfn) << RAS_GPU_PAGE_SHIFT) + +#define RAS_CORE_RESET_GPU 0x10000 + +#define GPU_RESET_CAUSE_POISON (RAS_CORE_RESET_GPU | 0x0001) +#define GPU_RESET_CAUSE_FATAL (RAS_CORE_RESET_GPU | 0x0002) +#define GPU_RESET_CAUSE_RMA (RAS_CORE_RESET_GPU | 0x0004) + +enum ras_block_id { + RAS_BLOCK_ID__UMC = 0, + RAS_BLOCK_ID__SDMA, + RAS_BLOCK_ID__GFX, + RAS_BLOCK_ID__MMHUB, + RAS_BLOCK_ID__ATHUB, + RAS_BLOCK_ID__PCIE_BIF, + RAS_BLOCK_ID__HDP, + RAS_BLOCK_ID__XGMI_WAFL, + RAS_BLOCK_ID__DF, + RAS_BLOCK_ID__SMN, + RAS_BLOCK_ID__SEM, + RAS_BLOCK_ID__MP0, + RAS_BLOCK_ID__MP1, + RAS_BLOCK_ID__FUSE, + RAS_BLOCK_ID__MCA, + RAS_BLOCK_ID__VCN, + RAS_BLOCK_ID__JPEG, + RAS_BLOCK_ID__IH, + RAS_BLOCK_ID__MPIO, + + RAS_BLOCK_ID__LAST +}; + +enum ras_ecc_err_type { + RAS_ECC_ERR__NONE = 0, + RAS_ECC_ERR__PARITY = 1, + RAS_ECC_ERR__SINGLE_CORRECTABLE = 2, + RAS_ECC_ERR__MULTI_UNCORRECTABLE = 4, + RAS_ECC_ERR__POISON = 8, +}; + +enum ras_err_type { + RAS_ERR_TYPE__UE = 0, + RAS_ERR_TYPE__CE, + RAS_ERR_TYPE__DE, + RAS_ERR_TYPE__LAST +}; + +enum ras_seqno_type { + RAS_SEQNO_TYPE_INVALID = 0, + RAS_SEQNO_TYPE_UE, + RAS_SEQNO_TYPE_CE, + RAS_SEQNO_TYPE_DE, + RAS_SEQNO_TYPE_POISON_CONSUMPTION, + RAS_SEQNO_TYPE_COUNT_MAX, +}; + +enum ras_seqno_fifo { + SEQNO_FIFO_INVALID = 0, + SEQNO_FIFO_POISON_CREATION, + SEQNO_FIFO_POISON_CONSUMPTION, + SEQNO_FIFO_COUNT_MAX +}; + +enum ras_notify_event { + RAS_EVENT_ID__NONE, + RAS_EVENT_ID__BAD_PAGE_DETECTED, + RAS_EVENT_ID__POISON_CONSUMPTION, + RAS_EVENT_ID__RESERVE_BAD_PAGE, + RAS_EVENT_ID__DEVICE_RMA, + RAS_EVENT_ID__UPDATE_BAD_PAGE_NUM, + RAS_EVENT_ID__UPDATE_BAD_CHANNEL_BITMAP, + RAS_EVENT_ID__FATAL_ERROR_DETECTED, + RAS_EVENT_ID__RESET_GPU, + RAS_EVENT_ID__RESET_VF, + RAS_EVENT_ID__RAS_EVENT_PROC_BEGIN, + RAS_EVENT_ID__RAS_EVENT_PROC_END, +}; + +enum ras_gpu_status { + RAS_GPU_STATUS__NOT_READY = 0, + RAS_GPU_STATUS__READY = 0x1, + RAS_GPU_STATUS__IN_RESET = 0x2, + RAS_GPU_STATUS__IS_RMA = 0x4, + RAS_GPU_STATUS__IS_VF = 0x8, +}; + +struct ras_core_context; +struct ras_bank_ecc; +struct ras_umc; +struct ras_aca; +struct ras_process; +struct ras_nbio; +struct ras_log_ring; +struct ras_psp; + +struct ras_mp1_sys_func { + int (*mp1_get_valid_bank_count)(struct ras_core_context *ras_core, + u32 msg, u32 *count); + int (*mp1_dump_valid_bank)(struct ras_core_context *ras_core, + u32 msg, u32 idx, u32 reg_idx, u64 *val); +}; + +struct ras_eeprom_sys_func { + int (*eeprom_i2c_xfer)(struct ras_core_context *ras_core, + u32 eeprom_addr, u8 *eeprom_buf, u32 buf_size, bool read); + int (*update_eeprom_i2c_config)(struct ras_core_context *ras_core); +}; + +struct ras_nbio_sys_func { + int (*set_ras_controller_irq_state)(struct ras_core_context *ras_core, + bool state); + int (*set_ras_err_event_athub_irq_state)(struct ras_core_context *ras_core, + bool state); +}; + +struct ras_time { + int tm_sec; + int tm_min; + int tm_hour; + int tm_mday; + int tm_mon; + long tm_year; +}; + +struct device_system_info { + uint32_t device_id; + uint32_t vendor_id; + uint32_t socket_id; +}; + +enum gpu_mem_type { + GPU_MEM_TYPE_DEFAULT, + GPU_MEM_TYPE_RAS_PSP_RING, + GPU_MEM_TYPE_RAS_PSP_CMD, + GPU_MEM_TYPE_RAS_PSP_FENCE, + GPU_MEM_TYPE_RAS_TA_FW, + GPU_MEM_TYPE_RAS_TA_CMD, +}; + +struct ras_psp_sys_func { + int (*get_ras_psp_system_status)(struct ras_core_context *ras_core, + struct ras_psp_sys_status *status); + int (*get_ras_ta_init_param)(struct ras_core_context *ras_core, + struct ras_ta_init_param *ras_ta_param); +}; + +struct ras_sys_func { + int (*gpu_reset_lock)(struct ras_core_context *ras_core, + bool down, bool try); + int (*check_gpu_status)(struct ras_core_context *ras_core, + uint32_t *status); + int (*gen_seqno)(struct ras_core_context *ras_core, + enum ras_seqno_type seqno_type, uint64_t *seqno); + int (*async_handle_ras_event)(struct ras_core_context *ras_core, void *data); + int (*ras_notifier)(struct ras_core_context *ras_core, + enum ras_notify_event event_id, void *data); + u64 (*get_utc_second_timestamp)(struct ras_core_context *ras_core); + int (*get_device_system_info)(struct ras_core_context *ras_core, + struct device_system_info *dev_info); + bool (*detect_ras_interrupt)(struct ras_core_context *ras_core); + int (*get_gpu_mem)(struct ras_core_context *ras_core, + enum gpu_mem_type mem_type, struct gpu_mem_block *gpu_mem); + int (*put_gpu_mem)(struct ras_core_context *ras_core, + enum gpu_mem_type mem_type, struct gpu_mem_block *gpu_mem); +}; + +struct ras_ecc_count { + uint64_t new_ce_count; + uint64_t total_ce_count; + uint64_t new_ue_count; + uint64_t total_ue_count; + uint64_t new_de_count; + uint64_t total_de_count; +}; + +struct ras_bank_ecc { + uint32_t nps; + uint64_t seq_no; + uint64_t status; + uint64_t ipid; + uint64_t addr; +}; + +struct ras_bank_ecc_node { + struct list_head node; + struct ras_bank_ecc ecc; +}; + +struct ras_aca_config { + u32 socket_num_per_hive; + u32 aid_num_per_socket; + u32 xcd_num_per_aid; +}; + +struct ras_mp1_config { + const struct ras_mp1_sys_func *mp1_sys_fn; +}; + +struct ras_nbio_config { + const struct ras_nbio_sys_func *nbio_sys_fn; +}; + +struct ras_psp_config { + const struct ras_psp_sys_func *psp_sys_fn; +}; + +struct ras_umc_config { + uint32_t umc_vram_type; +}; + +struct ras_eeprom_config { + const struct ras_eeprom_sys_func *eeprom_sys_fn; + int eeprom_record_threshold_config; + uint32_t eeprom_record_threshold_count; + void *eeprom_i2c_adapter; + u32 eeprom_i2c_addr; + u32 eeprom_i2c_port; + u16 max_i2c_read_len; + u16 max_i2c_write_len; +}; + +struct ras_core_config { + u32 aca_ip_version; + u32 umc_ip_version; + u32 mp1_ip_version; + u32 gfx_ip_version; + u32 nbio_ip_version; + u32 psp_ip_version; + + bool poison_supported; + bool ras_eeprom_supported; + const struct ras_sys_func *sys_fn; + + struct ras_aca_config aca_cfg; + struct ras_mp1_config mp1_cfg; + struct ras_nbio_config nbio_cfg; + struct ras_psp_config psp_cfg; + struct ras_eeprom_config eeprom_cfg; + struct ras_umc_config umc_cfg; +}; + +struct ras_core_context { + void *dev; + struct ras_core_config *config; + u32 socket_num_per_hive; + u32 aid_num_per_socket; + u32 xcd_num_per_aid; + int max_ue_banks_per_query; + int max_ce_banks_per_query; + struct ras_aca ras_aca; + + bool ras_eeprom_supported; + struct ras_eeprom_control ras_eeprom; + + struct ras_psp ras_psp; + struct ras_umc ras_umc; + struct ras_nbio ras_nbio; + struct ras_gfx ras_gfx; + struct ras_mp1 ras_mp1; + struct ras_process ras_proc; + struct ras_cmd_mgr ras_cmd; + struct ras_log_ring ras_log_ring; + + const struct ras_sys_func *sys_fn; + + /* is poison mode supported */ + bool poison_supported; + + bool is_rma; + bool is_initialized; + + struct kfifo de_seqno_fifo; + struct kfifo consumption_seqno_fifo; + spinlock_t seqno_lock; + + bool ras_core_enabled; +}; + +struct ras_core_context *ras_core_create(struct ras_core_config *init_config); +void ras_core_destroy(struct ras_core_context *ras_core); +int ras_core_sw_init(struct ras_core_context *ras_core); +int ras_core_sw_fini(struct ras_core_context *ras_core); +int ras_core_hw_init(struct ras_core_context *ras_core); +int ras_core_hw_fini(struct ras_core_context *ras_core); +bool ras_core_is_ready(struct ras_core_context *ras_core); +uint64_t ras_core_gen_seqno(struct ras_core_context *ras_core, + enum ras_seqno_type seqno_type); +uint64_t ras_core_get_seqno(struct ras_core_context *ras_core, + enum ras_seqno_type seqno_type, bool pop); + +int ras_core_put_seqno(struct ras_core_context *ras_core, + enum ras_seqno_type seqno_type, uint64_t seqno); + +int ras_core_update_ecc_info(struct ras_core_context *ras_core); +int ras_core_query_block_ecc_data(struct ras_core_context *ras_core, + enum ras_block_id block, struct ras_ecc_count *ecc_count); + +bool ras_core_gpu_in_reset(struct ras_core_context *ras_core); +bool ras_core_gpu_is_rma(struct ras_core_context *ras_core); +bool ras_core_gpu_is_vf(struct ras_core_context *ras_core); +bool ras_core_handle_nbio_irq(struct ras_core_context *ras_core, void *data); +int ras_core_handle_fatal_error(struct ras_core_context *ras_core); + +uint32_t ras_core_get_curr_nps_mode(struct ras_core_context *ras_core); +const char *ras_core_get_ras_block_name(enum ras_block_id block_id); +int ras_core_convert_timestamp_to_time(struct ras_core_context *ras_core, + uint64_t timestamp, struct ras_time *tm); + +int ras_core_set_status(struct ras_core_context *ras_core, bool enable); +bool ras_core_is_enabled(struct ras_core_context *ras_core); +uint64_t ras_core_get_utc_second_timestamp(struct ras_core_context *ras_core); +int ras_core_translate_soc_pa_and_bank(struct ras_core_context *ras_core, + uint64_t *soc_pa, struct umc_bank_addr *bank_addr, bool bank_to_pa); +bool ras_core_ras_interrupt_detected(struct ras_core_context *ras_core); +int ras_core_get_gpu_mem(struct ras_core_context *ras_core, + enum gpu_mem_type mem_type, struct gpu_mem_block *gpu_mem); +int ras_core_put_gpu_mem(struct ras_core_context *ras_core, + enum gpu_mem_type mem_type, struct gpu_mem_block *gpu_mem); +bool ras_core_check_safety_watermark(struct ras_core_context *ras_core); +int ras_core_down_trylock_gpu_reset_lock(struct ras_core_context *ras_core); +void ras_core_down_gpu_reset_lock(struct ras_core_context *ras_core); +void ras_core_up_gpu_reset_lock(struct ras_core_context *ras_core); +int ras_core_event_notify(struct ras_core_context *ras_core, + enum ras_notify_event event_id, void *data); +int ras_core_get_device_system_info(struct ras_core_context *ras_core, + struct device_system_info *dev_info); +#endif diff --git a/drivers/gpu/drm/amd/ras/rascore/ras_aca.c b/drivers/gpu/drm/amd/ras/rascore/ras_aca.c new file mode 100644 index 000000000000..e433c70d2989 --- /dev/null +++ b/drivers/gpu/drm/amd/ras/rascore/ras_aca.c @@ -0,0 +1,672 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright 2025 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#include "ras.h" +#include "ras_aca.h" +#include "ras_aca_v1_0.h" +#include "ras_mp1_v13_0.h" + +#define ACA_MARK_FATAL_FLAG 0x100 +#define ACA_MARK_UE_READ_FLAG 0x1 + +#define blk_name(block_id) ras_core_get_ras_block_name(block_id) + +static struct aca_regs_dump { + const char *name; + int reg_idx; +} aca_regs[] = { + {"CONTROL", ACA_REG_IDX__CTL}, + {"STATUS", ACA_REG_IDX__STATUS}, + {"ADDR", ACA_REG_IDX__ADDR}, + {"MISC", ACA_REG_IDX__MISC0}, + {"CONFIG", ACA_REG_IDX__CONFG}, + {"IPID", ACA_REG_IDX__IPID}, + {"SYND", ACA_REG_IDX__SYND}, + {"DESTAT", ACA_REG_IDX__DESTAT}, + {"DEADDR", ACA_REG_IDX__DEADDR}, + {"CONTROL_MASK", ACA_REG_IDX__CTL_MASK}, +}; + + +static void aca_report_ecc_info(struct ras_core_context *ras_core, + u64 seq_no, u32 blk, u32 skt, u32 aid, + struct aca_aid_ecc *aid_ecc, + struct aca_bank_ecc *new_ecc) +{ + struct aca_ecc_count ecc_count = {0}; + + ecc_count.new_ue_count = new_ecc->ue_count; + ecc_count.new_de_count = new_ecc->de_count; + ecc_count.new_ce_count = new_ecc->ce_count; + if (blk == RAS_BLOCK_ID__GFX) { + struct aca_ecc_count *xcd_ecc; + int xcd_id; + + for (xcd_id = 0; xcd_id < aid_ecc->xcd.xcd_num; xcd_id++) { + xcd_ecc = &aid_ecc->xcd.xcd[xcd_id].ecc_err; + ecc_count.total_ue_count += xcd_ecc->total_ue_count; + ecc_count.total_de_count += xcd_ecc->total_de_count; + ecc_count.total_ce_count += xcd_ecc->total_ce_count; + } + } else { + ecc_count.total_ue_count = aid_ecc->ecc_err.total_ue_count; + ecc_count.total_de_count = aid_ecc->ecc_err.total_de_count; + ecc_count.total_ce_count = aid_ecc->ecc_err.total_ce_count; + } + + if (ecc_count.new_ue_count) { + RAS_DEV_INFO(ras_core->dev, + "{%llu} socket: %d, die: %d, %u new uncorrectable hardware errors detected in %s block\n", + seq_no, skt, aid, ecc_count.new_ue_count, blk_name(blk)); + RAS_DEV_INFO(ras_core->dev, + "{%llu} socket: %d, die: %d, %u uncorrectable hardware errors detected in total in %s block\n", + seq_no, skt, aid, ecc_count.total_ue_count, blk_name(blk)); + } + + if (ecc_count.new_de_count) { + RAS_DEV_INFO(ras_core->dev, + "{%llu} socket: %d, die: %d, %u new %s detected in %s block\n", + seq_no, skt, aid, ecc_count.new_de_count, + (blk == RAS_BLOCK_ID__UMC) ? + "deferred hardware errors" : "poison consumption", + blk_name(blk)); + RAS_DEV_INFO(ras_core->dev, + "{%llu} socket: %d, die: %d, %u %s detected in total in %s block\n", + seq_no, skt, aid, ecc_count.total_de_count, + (blk == RAS_BLOCK_ID__UMC) ? + "deferred hardware errors" : "poison consumption", + blk_name(blk)); + } + + if (ecc_count.new_ce_count) { + RAS_DEV_INFO(ras_core->dev, + "{%llu} socket: %d, die: %d, %u new correctable hardware errors detected in %s block\n", + seq_no, skt, aid, ecc_count.new_ce_count, blk_name(blk)); + RAS_DEV_INFO(ras_core->dev, + "{%llu} socket: %d, die: %d, %u correctable hardware errors detected in total in %s block\n", + seq_no, skt, aid, ecc_count.total_ce_count, blk_name(blk)); + } +} + +static void aca_bank_log(struct ras_core_context *ras_core, + int idx, int total, struct aca_bank_reg *bank, + struct aca_bank_ecc *bank_ecc) +{ + int i; + + RAS_DEV_INFO(ras_core->dev, + "{%llu}" RAS_HW_ERR "Accelerator Check Architecture events logged\n", + bank->seq_no); + /* plus 1 for output format, e.g: ACA[08/08]: xxxx */ + for (i = 0; i < ARRAY_SIZE(aca_regs); i++) + RAS_DEV_INFO(ras_core->dev, + "{%llu}" RAS_HW_ERR "ACA[%02d/%02d].%s=0x%016llx\n", + bank->seq_no, idx + 1, total, + aca_regs[i].name, bank->regs[aca_regs[i].reg_idx]); +} + +static void aca_log_bank_data(struct ras_core_context *ras_core, + struct aca_bank_reg *bank, struct aca_bank_ecc *bank_ecc, + struct ras_log_batch_tag *batch) +{ + if (bank_ecc->ue_count) + ras_log_ring_add_log_event(ras_core, RAS_LOG_EVENT_UE, bank->regs, batch); + else if (bank_ecc->de_count) + ras_log_ring_add_log_event(ras_core, RAS_LOG_EVENT_DE, bank->regs, batch); + else + ras_log_ring_add_log_event(ras_core, RAS_LOG_EVENT_CE, bank->regs, batch); +} + +static int aca_get_bank_count(struct ras_core_context *ras_core, + enum ras_err_type type, u32 *count) +{ + return ras_mp1_get_bank_count(ras_core, type, count); +} + +static bool aca_match_bank(struct aca_block *aca_blk, struct aca_bank_reg *bank) +{ + const struct aca_bank_hw_ops *bank_ops; + + if (!aca_blk->blk_info) + return false; + + bank_ops = &aca_blk->blk_info->bank_ops; + if (!bank_ops->bank_match) + return false; + + return bank_ops->bank_match(aca_blk, bank); +} + +static int aca_parse_bank(struct ras_core_context *ras_core, + struct aca_block *aca_blk, + struct aca_bank_reg *bank, + struct aca_bank_ecc *ecc) +{ + const struct aca_bank_hw_ops *bank_ops = &aca_blk->blk_info->bank_ops; + + if (!bank_ops || !bank_ops->bank_parse) + return -RAS_CORE_NOT_SUPPORTED; + + return bank_ops->bank_parse(ras_core, aca_blk, bank, ecc); +} + +static int aca_check_block_ecc_info(struct ras_core_context *ras_core, + struct aca_block *aca_blk, struct aca_ecc_info *info) +{ + if (info->socket_id >= aca_blk->ecc.socket_num_per_hive) { + RAS_DEV_ERR(ras_core->dev, + "Socket id (%d) is out of config! max:%u\n", + info->socket_id, aca_blk->ecc.socket_num_per_hive); + return -ENODATA; + } + + if (info->die_id >= aca_blk->ecc.socket[info->socket_id].aid_num) { + RAS_DEV_ERR(ras_core->dev, + "Die id (%d) is out of config! max:%u\n", + info->die_id, aca_blk->ecc.socket[info->socket_id].aid_num); + return -ENODATA; + } + + if ((aca_blk->blk_info->ras_block_id == RAS_BLOCK_ID__GFX) && + (info->xcd_id >= + aca_blk->ecc.socket[info->socket_id].aid[info->die_id].xcd.xcd_num)) { + RAS_DEV_ERR(ras_core->dev, + "Xcd id (%d) is out of config! max:%u\n", + info->xcd_id, + aca_blk->ecc.socket[info->socket_id].aid[info->die_id].xcd.xcd_num); + return -ENODATA; + } + + return 0; +} + +static int aca_log_bad_bank(struct ras_core_context *ras_core, + struct aca_block *aca_blk, struct aca_bank_reg *bank, + struct aca_bank_ecc *bank_ecc) +{ + struct aca_ecc_info *info; + struct aca_ecc_count *ecc_err; + struct aca_aid_ecc *aid_ecc; + int ret; + + info = &bank_ecc->bank_info; + + ret = aca_check_block_ecc_info(ras_core, aca_blk, info); + if (ret) + return ret; + + mutex_lock(&ras_core->ras_aca.aca_lock); + aid_ecc = &aca_blk->ecc.socket[info->socket_id].aid[info->die_id]; + if (aca_blk->blk_info->ras_block_id == RAS_BLOCK_ID__GFX) + ecc_err = &aid_ecc->xcd.xcd[info->xcd_id].ecc_err; + else + ecc_err = &aid_ecc->ecc_err; + + ecc_err->new_ce_count += bank_ecc->ce_count; + ecc_err->total_ce_count += bank_ecc->ce_count; + ecc_err->new_ue_count += bank_ecc->ue_count; + ecc_err->total_ue_count += bank_ecc->ue_count; + ecc_err->new_de_count += bank_ecc->de_count; + ecc_err->total_de_count += bank_ecc->de_count; + mutex_unlock(&ras_core->ras_aca.aca_lock); + + if ((aca_blk->blk_info->ras_block_id == RAS_BLOCK_ID__UMC) && + bank_ecc->de_count) { + struct ras_bank_ecc ras_ecc = {0}; + + ras_ecc.nps = ras_core_get_curr_nps_mode(ras_core); + ras_ecc.addr = bank_ecc->bank_info.addr; + ras_ecc.ipid = bank_ecc->bank_info.ipid; + ras_ecc.status = bank_ecc->bank_info.status; + ras_ecc.seq_no = bank->seq_no; + + if (ras_core_gpu_in_reset(ras_core)) + ras_umc_log_bad_bank_pending(ras_core, &ras_ecc); + else + ras_umc_log_bad_bank(ras_core, &ras_ecc); + } + + aca_report_ecc_info(ras_core, + bank->seq_no, aca_blk->blk_info->ras_block_id, info->socket_id, info->die_id, + &aca_blk->ecc.socket[info->socket_id].aid[info->die_id], bank_ecc); + + return 0; +} + +static struct aca_block *aca_get_bank_aca_block(struct ras_core_context *ras_core, + struct aca_bank_reg *bank) +{ + int i = 0; + + for (i = 0; i < RAS_BLOCK_ID__LAST; i++) + if (aca_match_bank(&ras_core->ras_aca.aca_blk[i], bank)) + return &ras_core->ras_aca.aca_blk[i]; + + return NULL; +} + +static int aca_dump_bank(struct ras_core_context *ras_core, u32 ecc_type, + int idx, void *data) +{ + struct aca_bank_reg *bank = (struct aca_bank_reg *)data; + int i, ret, reg_cnt; + + reg_cnt = min_t(int, 16, ARRAY_SIZE(bank->regs)); + for (i = 0; i < reg_cnt; i++) { + ret = ras_mp1_dump_bank(ras_core, ecc_type, idx, i, &bank->regs[i]); + if (ret) + return ret; + } + + return 0; +} + +static uint64_t aca_get_bank_seqno(struct ras_core_context *ras_core, + enum ras_err_type err_type, struct aca_block *aca_blk, + struct aca_bank_ecc *bank_ecc) +{ + uint64_t seq_no = 0; + + if (bank_ecc->de_count) { + if (aca_blk->blk_info->ras_block_id == RAS_BLOCK_ID__UMC) + seq_no = ras_core_get_seqno(ras_core, RAS_SEQNO_TYPE_DE, true); + else + seq_no = ras_core_get_seqno(ras_core, + RAS_SEQNO_TYPE_POISON_CONSUMPTION, true); + } else if (bank_ecc->ue_count) { + seq_no = ras_core_get_seqno(ras_core, RAS_SEQNO_TYPE_UE, true); + } else { + seq_no = ras_core_get_seqno(ras_core, RAS_SEQNO_TYPE_CE, true); + } + + return seq_no; +} + +static bool aca_dup_update_ue_in_fatal(struct ras_core_context *ras_core, + u32 ecc_type) +{ + struct ras_aca *aca = &ras_core->ras_aca; + + if (ecc_type != RAS_ERR_TYPE__UE) + return false; + + if (aca->ue_updated_mark & ACA_MARK_FATAL_FLAG) { + if (aca->ue_updated_mark & ACA_MARK_UE_READ_FLAG) + return true; + + aca->ue_updated_mark |= ACA_MARK_UE_READ_FLAG; + } + + return false; +} + +void ras_aca_mark_fatal_flag(struct ras_core_context *ras_core) +{ + struct ras_aca *aca = &ras_core->ras_aca; + + if (!aca) + return; + + aca->ue_updated_mark |= ACA_MARK_FATAL_FLAG; +} + +void ras_aca_clear_fatal_flag(struct ras_core_context *ras_core) +{ + struct ras_aca *aca = &ras_core->ras_aca; + + if (!aca) + return; + + if ((aca->ue_updated_mark & ACA_MARK_FATAL_FLAG) && + (aca->ue_updated_mark & ACA_MARK_UE_READ_FLAG)) + aca->ue_updated_mark = 0; +} + +static int aca_banks_update(struct ras_core_context *ras_core, + u32 ecc_type, void *data) +{ + struct aca_bank_reg bank; + struct aca_block *aca_blk; + struct aca_bank_ecc bank_ecc; + struct ras_log_batch_tag *batch_tag = NULL; + u32 count = 0; + int ret = 0; + int i; + + mutex_lock(&ras_core->ras_aca.bank_op_lock); + + if (aca_dup_update_ue_in_fatal(ras_core, ecc_type)) + goto out; + + ret = aca_get_bank_count(ras_core, ecc_type, &count); + if (ret) + goto out; + + if (!count) + goto out; + + batch_tag = ras_log_ring_create_batch_tag(ras_core); + for (i = 0; i < count; i++) { + memset(&bank, 0, sizeof(bank)); + ret = aca_dump_bank(ras_core, ecc_type, i, &bank); + if (ret) + break; + + bank.ecc_type = ecc_type; + + memset(&bank_ecc, 0, sizeof(bank_ecc)); + aca_blk = aca_get_bank_aca_block(ras_core, &bank); + if (aca_blk) + ret = aca_parse_bank(ras_core, aca_blk, &bank, &bank_ecc); + + bank.seq_no = aca_get_bank_seqno(ras_core, ecc_type, aca_blk, &bank_ecc); + + aca_log_bank_data(ras_core, &bank, &bank_ecc, batch_tag); + aca_bank_log(ras_core, i, count, &bank, &bank_ecc); + + if (!ret && aca_blk) + ret = aca_log_bad_bank(ras_core, aca_blk, &bank, &bank_ecc); + + if (ret) + break; + } + ras_log_ring_destroy_batch_tag(ras_core, batch_tag); + +out: + mutex_unlock(&ras_core->ras_aca.bank_op_lock); + return ret; +} + +int ras_aca_update_ecc(struct ras_core_context *ras_core, u32 type, void *data) +{ + /* Update aca bank to aca source error_cache first */ + return aca_banks_update(ras_core, type, data); +} + +static struct aca_block *ras_aca_get_block_handle(struct ras_core_context *ras_core, uint32_t blk) +{ + return &ras_core->ras_aca.aca_blk[blk]; +} + +static int ras_aca_clear_block_ecc_count(struct ras_core_context *ras_core, u32 blk) +{ + struct aca_block *aca_blk; + struct aca_aid_ecc *aid_ecc; + int skt, aid, xcd; + + mutex_lock(&ras_core->ras_aca.aca_lock); + aca_blk = ras_aca_get_block_handle(ras_core, blk); + for (skt = 0; skt < aca_blk->ecc.socket_num_per_hive; skt++) { + for (aid = 0; aid < aca_blk->ecc.socket[skt].aid_num; aid++) { + aid_ecc = &aca_blk->ecc.socket[skt].aid[aid]; + if (blk == RAS_BLOCK_ID__GFX) { + for (xcd = 0; xcd < aid_ecc->xcd.xcd_num; xcd++) + memset(&aid_ecc->xcd.xcd[xcd], + 0, sizeof(struct aca_xcd_ecc)); + } else { + memset(&aid_ecc->ecc_err, 0, sizeof(aid_ecc->ecc_err)); + } + } + } + mutex_unlock(&ras_core->ras_aca.aca_lock); + + return 0; +} + +int ras_aca_clear_all_blocks_ecc_count(struct ras_core_context *ras_core) +{ + enum ras_block_id blk; + int ret; + + for (blk = RAS_BLOCK_ID__UMC; blk < RAS_BLOCK_ID__LAST; blk++) { + ret = ras_aca_clear_block_ecc_count(ras_core, blk); + if (ret) + break; + } + + return ret; +} + +int ras_aca_clear_block_new_ecc_count(struct ras_core_context *ras_core, u32 blk) +{ + struct aca_block *aca_blk; + int skt, aid, xcd; + struct aca_ecc_count *ecc_err; + struct aca_aid_ecc *aid_ecc; + + mutex_lock(&ras_core->ras_aca.aca_lock); + aca_blk = ras_aca_get_block_handle(ras_core, blk); + for (skt = 0; skt < aca_blk->ecc.socket_num_per_hive; skt++) { + for (aid = 0; aid < aca_blk->ecc.socket[skt].aid_num; aid++) { + aid_ecc = &aca_blk->ecc.socket[skt].aid[aid]; + if (blk == RAS_BLOCK_ID__GFX) { + for (xcd = 0; xcd < aid_ecc->xcd.xcd_num; xcd++) { + ecc_err = &aid_ecc->xcd.xcd[xcd].ecc_err; + ecc_err->new_ce_count = 0; + ecc_err->new_ue_count = 0; + ecc_err->new_de_count = 0; + } + } else { + ecc_err = &aid_ecc->ecc_err; + ecc_err->new_ce_count = 0; + ecc_err->new_ue_count = 0; + ecc_err->new_de_count = 0; + } + } + } + mutex_unlock(&ras_core->ras_aca.aca_lock); + + return 0; +} + +static int ras_aca_get_block_each_aid_ecc_count(struct ras_core_context *ras_core, + u32 blk, u32 skt, u32 aid, u32 xcd, + struct aca_ecc_count *ecc_count) +{ + struct aca_block *aca_blk; + struct aca_ecc_count *ecc_err; + + aca_blk = ras_aca_get_block_handle(ras_core, blk); + if (blk == RAS_BLOCK_ID__GFX) + ecc_err = &aca_blk->ecc.socket[skt].aid[aid].xcd.xcd[xcd].ecc_err; + else + ecc_err = &aca_blk->ecc.socket[skt].aid[aid].ecc_err; + + ecc_count->new_ce_count = ecc_err->new_ce_count; + ecc_count->total_ce_count = ecc_err->total_ce_count; + ecc_count->new_ue_count = ecc_err->new_ue_count; + ecc_count->total_ue_count = ecc_err->total_ue_count; + ecc_count->new_de_count = ecc_err->new_de_count; + ecc_count->total_de_count = ecc_err->total_de_count; + + return 0; +} + +static inline void _add_ecc_count(struct aca_ecc_count *des, struct aca_ecc_count *src) +{ + des->new_ce_count += src->new_ce_count; + des->total_ce_count += src->total_ce_count; + des->new_ue_count += src->new_ue_count; + des->total_ue_count += src->total_ue_count; + des->new_de_count += src->new_de_count; + des->total_de_count += src->total_de_count; +} + +static const struct ras_aca_ip_func *aca_get_ip_func( + struct ras_core_context *ras_core, uint32_t ip_version) +{ + switch (ip_version) { + case IP_VERSION(1, 0, 0): + return &ras_aca_func_v1_0; + default: + RAS_DEV_ERR(ras_core->dev, + "ACA ip version(0x%x) is not supported!\n", ip_version); + break; + } + + return NULL; +} + +int ras_aca_get_block_ecc_count(struct ras_core_context *ras_core, + u32 blk, void *data) +{ + struct ras_ecc_count *err_data = (struct ras_ecc_count *)data; + struct aca_block *aca_blk; + int skt, aid, xcd; + struct aca_ecc_count ecc_xcd; + struct aca_ecc_count ecc_aid; + struct aca_ecc_count ecc; + + if (blk >= RAS_BLOCK_ID__LAST) + return -EINVAL; + + if (!err_data) + return -EINVAL; + + aca_blk = ras_aca_get_block_handle(ras_core, blk); + memset(&ecc, 0, sizeof(ecc)); + + mutex_lock(&ras_core->ras_aca.aca_lock); + if (blk == RAS_BLOCK_ID__GFX) { + for (skt = 0; skt < aca_blk->ecc.socket_num_per_hive; skt++) { + for (aid = 0; aid < aca_blk->ecc.socket[skt].aid_num; aid++) { + memset(&ecc_aid, 0, sizeof(ecc_aid)); + for (xcd = 0; + xcd < aca_blk->ecc.socket[skt].aid[aid].xcd.xcd_num; + xcd++) { + memset(&ecc_xcd, 0, sizeof(ecc_xcd)); + if (ras_aca_get_block_each_aid_ecc_count(ras_core, + blk, skt, aid, xcd, &ecc_xcd)) + continue; + _add_ecc_count(&ecc_aid, &ecc_xcd); + } + _add_ecc_count(&ecc, &ecc_aid); + } + } + } else { + for (skt = 0; skt < aca_blk->ecc.socket_num_per_hive; skt++) { + for (aid = 0; aid < aca_blk->ecc.socket[skt].aid_num; aid++) { + memset(&ecc_aid, 0, sizeof(ecc_aid)); + if (ras_aca_get_block_each_aid_ecc_count(ras_core, + blk, skt, aid, 0, &ecc_aid)) + continue; + _add_ecc_count(&ecc, &ecc_aid); + } + } + } + + err_data->new_ce_count = ecc.new_ce_count; + err_data->total_ce_count = ecc.total_ce_count; + err_data->new_ue_count = ecc.new_ue_count; + err_data->total_ue_count = ecc.total_ue_count; + err_data->new_de_count = ecc.new_de_count; + err_data->total_de_count = ecc.total_de_count; + mutex_unlock(&ras_core->ras_aca.aca_lock); + + return 0; +} + +int ras_aca_sw_init(struct ras_core_context *ras_core) +{ + struct ras_aca *ras_aca = &ras_core->ras_aca; + struct ras_aca_config *aca_cfg = &ras_core->config->aca_cfg; + struct aca_block *aca_blk; + uint32_t socket_num_per_hive; + uint32_t aid_num_per_socket; + uint32_t xcd_num_per_aid; + int blk, skt, aid; + + socket_num_per_hive = aca_cfg->socket_num_per_hive; + aid_num_per_socket = aca_cfg->aid_num_per_socket; + xcd_num_per_aid = aca_cfg->xcd_num_per_aid; + + if (!xcd_num_per_aid || !aid_num_per_socket || + (socket_num_per_hive > MAX_SOCKET_NUM_PER_HIVE) || + (aid_num_per_socket > MAX_AID_NUM_PER_SOCKET) || + (xcd_num_per_aid > MAX_XCD_NUM_PER_AID)) { + RAS_DEV_ERR(ras_core->dev, "Invalid ACA system configuration: %d, %d, %d\n", + socket_num_per_hive, aid_num_per_socket, xcd_num_per_aid); + return -EINVAL; + } + + memset(ras_aca, 0, sizeof(*ras_aca)); + + for (blk = 0; blk < RAS_BLOCK_ID__LAST; blk++) { + aca_blk = &ras_aca->aca_blk[blk]; + aca_blk->ecc.socket_num_per_hive = socket_num_per_hive; + for (skt = 0; skt < aca_blk->ecc.socket_num_per_hive; skt++) { + aca_blk->ecc.socket[skt].aid_num = aid_num_per_socket; + if (blk == RAS_BLOCK_ID__GFX) { + for (aid = 0; aid < aca_blk->ecc.socket[skt].aid_num; aid++) + aca_blk->ecc.socket[skt].aid[aid].xcd.xcd_num = + xcd_num_per_aid; + } + } + } + + mutex_init(&ras_aca->aca_lock); + mutex_init(&ras_aca->bank_op_lock); + + return 0; +} + +int ras_aca_sw_fini(struct ras_core_context *ras_core) +{ + struct ras_aca *ras_aca = &ras_core->ras_aca; + + mutex_destroy(&ras_aca->aca_lock); + mutex_destroy(&ras_aca->bank_op_lock); + + return 0; +} + +int ras_aca_hw_init(struct ras_core_context *ras_core) +{ + struct ras_aca *ras_aca = &ras_core->ras_aca; + struct aca_block *aca_blk; + const struct ras_aca_ip_func *ip_func; + int i; + + ras_aca->aca_ip_version = ras_core->config->aca_ip_version; + ip_func = aca_get_ip_func(ras_core, ras_aca->aca_ip_version); + if (!ip_func) + return -EINVAL; + + for (i = 0; i < ip_func->block_num; i++) { + aca_blk = &ras_aca->aca_blk[ip_func->block_info[i]->ras_block_id]; + aca_blk->blk_info = ip_func->block_info[i]; + } + + ras_aca->ue_updated_mark = 0; + + return 0; +} + +int ras_aca_hw_fini(struct ras_core_context *ras_core) +{ + struct ras_aca *ras_aca = &ras_core->ras_aca; + + ras_aca->ue_updated_mark = 0; + + return 0; +} diff --git a/drivers/gpu/drm/amd/ras/rascore/ras_aca.h b/drivers/gpu/drm/amd/ras/rascore/ras_aca.h new file mode 100644 index 000000000000..f61b02a5f0fc --- /dev/null +++ b/drivers/gpu/drm/amd/ras/rascore/ras_aca.h @@ -0,0 +1,164 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright 2025 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef __RAS_ACA_H__ +#define __RAS_ACA_H__ +#include "ras.h" + +#define MAX_SOCKET_NUM_PER_HIVE 8 +#define MAX_AID_NUM_PER_SOCKET 4 +#define MAX_XCD_NUM_PER_AID 2 +#define MAX_ACA_RAS_BLOCK 20 + +#define ACA_ERROR__UE_MASK (0x1 << RAS_ERR_TYPE__UE) +#define ACA_ERROR__CE_MASK (0x1 << RAS_ERR_TYPE__CE) +#define ACA_ERROR__DE_MASK (0x1 << RAS_ERR_TYPE__DE) + +enum ras_aca_reg_idx { + ACA_REG_IDX__CTL = 0, + ACA_REG_IDX__STATUS = 1, + ACA_REG_IDX__ADDR = 2, + ACA_REG_IDX__MISC0 = 3, + ACA_REG_IDX__CONFG = 4, + ACA_REG_IDX__IPID = 5, + ACA_REG_IDX__SYND = 6, + ACA_REG_IDX__DESTAT = 8, + ACA_REG_IDX__DEADDR = 9, + ACA_REG_IDX__CTL_MASK = 10, + ACA_REG_MAX_COUNT = 16, +}; + +struct ras_core_context; +struct aca_block; + +struct aca_bank_reg { + u32 ecc_type; + u64 seq_no; + u64 regs[ACA_REG_MAX_COUNT]; +}; + +enum aca_ecc_hwip { + ACA_ECC_HWIP__UNKNOWN = -1, + ACA_ECC_HWIP__PSP = 0, + ACA_ECC_HWIP__UMC, + ACA_ECC_HWIP__SMU, + ACA_ECC_HWIP__PCS_XGMI, + ACA_ECC_HWIP_COUNT, +}; + +struct aca_ecc_info { + int die_id; + int socket_id; + int xcd_id; + int hwid; + int mcatype; + uint64_t status; + uint64_t ipid; + uint64_t addr; +}; + +struct aca_bank_ecc { + struct aca_ecc_info bank_info; + u32 ce_count; + u32 ue_count; + u32 de_count; +}; + +struct aca_ecc_count { + u32 new_ce_count; + u32 total_ce_count; + u32 new_ue_count; + u32 total_ue_count; + u32 new_de_count; + u32 total_de_count; +}; + +struct aca_xcd_ecc { + struct aca_ecc_count ecc_err; +}; + +struct aca_aid_ecc { + union { + struct aca_xcd { + struct aca_xcd_ecc xcd[MAX_XCD_NUM_PER_AID]; + u32 xcd_num; + } xcd; + struct aca_ecc_count ecc_err; + }; +}; + +struct aca_socket_ecc { + struct aca_aid_ecc aid[MAX_AID_NUM_PER_SOCKET]; + u32 aid_num; +}; + +struct aca_block_ecc { + struct aca_socket_ecc socket[MAX_SOCKET_NUM_PER_HIVE]; + u32 socket_num_per_hive; +}; + +struct aca_bank_hw_ops { + bool (*bank_match)(struct aca_block *ras_blk, void *data); + int (*bank_parse)(struct ras_core_context *ras_core, + struct aca_block *aca_blk, void *data, void *buf); +}; + +struct aca_block_info { + char name[32]; + u32 ras_block_id; + enum aca_ecc_hwip hwip; + struct aca_bank_hw_ops bank_ops; + u32 mask; +}; + +struct aca_block { + const struct aca_block_info *blk_info; + struct aca_block_ecc ecc; +}; + +struct ras_aca_ip_func { + uint32_t block_num; + const struct aca_block_info **block_info; +}; + +struct ras_aca { + uint32_t aca_ip_version; + const struct ras_aca_ip_func *ip_func; + struct mutex aca_lock; + struct mutex bank_op_lock; + struct aca_block aca_blk[MAX_ACA_RAS_BLOCK]; + uint32_t ue_updated_mark; +}; + +int ras_aca_sw_init(struct ras_core_context *ras_core); +int ras_aca_sw_fini(struct ras_core_context *ras_core); +int ras_aca_hw_init(struct ras_core_context *ras_core); +int ras_aca_hw_fini(struct ras_core_context *ras_core); +int ras_aca_get_block_ecc_count(struct ras_core_context *ras_core, u32 blk, void *data); +int ras_aca_clear_block_new_ecc_count(struct ras_core_context *ras_core, u32 blk); +int ras_aca_clear_all_blocks_ecc_count(struct ras_core_context *ras_core); +int ras_aca_update_ecc(struct ras_core_context *ras_core, u32 ecc_type, void *data); +void ras_aca_mark_fatal_flag(struct ras_core_context *ras_core); +void ras_aca_clear_fatal_flag(struct ras_core_context *ras_core); +#endif diff --git a/drivers/gpu/drm/amd/ras/rascore/ras_aca_v1_0.c b/drivers/gpu/drm/amd/ras/rascore/ras_aca_v1_0.c new file mode 100644 index 000000000000..29df98948703 --- /dev/null +++ b/drivers/gpu/drm/amd/ras/rascore/ras_aca_v1_0.c @@ -0,0 +1,379 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright 2025 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#include "ras.h" +#include "ras_aca.h" +#include "ras_core_status.h" +#include "ras_aca_v1_0.h" + +struct ras_aca_hwip { + int hwid; + int mcatype; +}; + +static struct ras_aca_hwip aca_hwid_mcatypes[ACA_ECC_HWIP_COUNT] = { + [ACA_ECC_HWIP__SMU] = {0x01, 0x01}, + [ACA_ECC_HWIP__PCS_XGMI] = {0x50, 0x00}, + [ACA_ECC_HWIP__UMC] = {0x96, 0x00}, +}; + +static int aca_decode_bank_info(struct aca_block *aca_blk, + struct aca_bank_reg *bank, struct aca_ecc_info *info) +{ + u64 ipid; + u32 instidhi, instidlo; + + ipid = bank->regs[ACA_REG_IDX__IPID]; + info->hwid = ACA_REG_IPID_HARDWAREID(ipid); + info->mcatype = ACA_REG_IPID_MCATYPE(ipid); + /* + * Unified DieID Format: SAASS. A:AID, S:Socket. + * Unified DieID[4:4] = InstanceId[0:0] + * Unified DieID[0:3] = InstanceIdHi[0:3] + */ + instidhi = ACA_REG_IPID_INSTANCEIDHI(ipid); + instidlo = ACA_REG_IPID_INSTANCEIDLO(ipid); + info->die_id = ((instidhi >> 2) & 0x03); + info->socket_id = ((instidlo & 0x1) << 2) | (instidhi & 0x03); + + if ((aca_blk->blk_info->hwip == ACA_ECC_HWIP__SMU) && + (aca_blk->blk_info->ras_block_id == RAS_BLOCK_ID__GFX)) + info->xcd_id = + ((instidlo & GENMASK_ULL(31, 1)) == mmSMNAID_XCD0_MCA_SMU) ? 0 : 1; + + return 0; +} + +static bool aca_check_bank_hwip(struct aca_bank_reg *bank, enum aca_ecc_hwip type) +{ + struct ras_aca_hwip *hwip; + int hwid, mcatype; + u64 ipid; + + if (!bank || (type == ACA_ECC_HWIP__UNKNOWN)) + return false; + + hwip = &aca_hwid_mcatypes[type]; + if (!hwip->hwid) + return false; + + ipid = bank->regs[ACA_REG_IDX__IPID]; + hwid = ACA_REG_IPID_HARDWAREID(ipid); + mcatype = ACA_REG_IPID_MCATYPE(ipid); + + return hwip->hwid == hwid && hwip->mcatype == mcatype; +} + +static bool aca_match_bank_default(struct aca_block *aca_blk, void *data) +{ + return aca_check_bank_hwip((struct aca_bank_reg *)data, aca_blk->blk_info->hwip); +} + +static bool aca_match_gfx_bank(struct aca_block *aca_blk, void *data) +{ + struct aca_bank_reg *bank = (struct aca_bank_reg *)data; + u32 instlo; + + if (!aca_check_bank_hwip(bank, aca_blk->blk_info->hwip)) + return false; + + instlo = ACA_REG_IPID_INSTANCEIDLO(bank->regs[ACA_REG_IDX__IPID]); + instlo &= GENMASK_ULL(31, 1); + switch (instlo) { + case mmSMNAID_XCD0_MCA_SMU: + case mmSMNAID_XCD1_MCA_SMU: + case mmSMNXCD_XCD0_MCA_SMU: + return true; + default: + break; + } + + return false; +} + +static bool aca_match_sdma_bank(struct aca_block *aca_blk, void *data) +{ + struct aca_bank_reg *bank = (struct aca_bank_reg *)data; + /* CODE_SDMA0 - CODE_SDMA4, reference to smu driver if header file */ + static int sdma_err_codes[] = { 33, 34, 35, 36 }; + u32 instlo; + int errcode, i; + + if (!aca_check_bank_hwip(bank, aca_blk->blk_info->hwip)) + return false; + + instlo = ACA_REG_IPID_INSTANCEIDLO(bank->regs[ACA_REG_IDX__IPID]); + instlo &= GENMASK_ULL(31, 1); + if (instlo != mmSMNAID_AID0_MCA_SMU) + return false; + + errcode = ACA_REG_SYND_ERRORINFORMATION(bank->regs[ACA_REG_IDX__SYND]); + errcode &= 0xff; + + /* Check SDMA error codes */ + for (i = 0; i < ARRAY_SIZE(sdma_err_codes); i++) { + if (errcode == sdma_err_codes[i]) + return true; + } + + return false; +} + +static bool aca_match_mmhub_bank(struct aca_block *aca_blk, void *data) +{ + struct aca_bank_reg *bank = (struct aca_bank_reg *)data; + /* reference to smu driver if header file */ + const int mmhub_err_codes[] = { + 0, 1, 2, 3, 4, /* CODE_DAGB0 - 4 */ + 5, 6, 7, 8, 9, /* CODE_EA0 - 4 */ + 10, /* CODE_UTCL2_ROUTER */ + 11, /* CODE_VML2 */ + 12, /* CODE_VML2_WALKER */ + 13, /* CODE_MMCANE */ + }; + u32 instlo; + int errcode, i; + + if (!aca_check_bank_hwip(bank, aca_blk->blk_info->hwip)) + return false; + + instlo = ACA_REG_IPID_INSTANCEIDLO(bank->regs[ACA_REG_IDX__IPID]); + instlo &= GENMASK_ULL(31, 1); + if (instlo != mmSMNAID_AID0_MCA_SMU) + return false; + + errcode = ACA_REG_SYND_ERRORINFORMATION(bank->regs[ACA_REG_IDX__SYND]); + errcode &= 0xff; + + /* Check MMHUB error codes */ + for (i = 0; i < ARRAY_SIZE(mmhub_err_codes); i++) { + if (errcode == mmhub_err_codes[i]) + return true; + } + + return false; +} + +static bool aca_check_umc_de(struct ras_core_context *ras_core, uint64_t mc_umc_status) +{ + return (ras_core->poison_supported && + ACA_REG_STATUS_VAL(mc_umc_status) && + ACA_REG_STATUS_DEFERRED(mc_umc_status)); +} + +static bool aca_check_umc_ue(struct ras_core_context *ras_core, uint64_t mc_umc_status) +{ + if (aca_check_umc_de(ras_core, mc_umc_status)) + return false; + + return (ACA_REG_STATUS_VAL(mc_umc_status) && + (ACA_REG_STATUS_PCC(mc_umc_status) || + ACA_REG_STATUS_UC(mc_umc_status) || + ACA_REG_STATUS_TCC(mc_umc_status))); +} + +static bool aca_check_umc_ce(struct ras_core_context *ras_core, uint64_t mc_umc_status) +{ + if (aca_check_umc_de(ras_core, mc_umc_status)) + return false; + + return (ACA_REG_STATUS_VAL(mc_umc_status) && + (ACA_REG_STATUS_CECC(mc_umc_status) || + (ACA_REG_STATUS_UECC(mc_umc_status) && + ACA_REG_STATUS_UC(mc_umc_status) == 0) || + /* Identify data parity error in replay mode */ + ((ACA_REG_STATUS_ERRORCODEEXT(mc_umc_status) == 0x5 || + ACA_REG_STATUS_ERRORCODEEXT(mc_umc_status) == 0xb) && + !(aca_check_umc_ue(ras_core, mc_umc_status))))); +} + +static int aca_parse_umc_bank(struct ras_core_context *ras_core, + struct aca_block *ras_blk, void *data, void *buf) +{ + struct aca_bank_reg *bank = (struct aca_bank_reg *)data; + struct aca_bank_ecc *ecc = (struct aca_bank_ecc *)buf; + struct aca_ecc_info bank_info; + uint32_t ext_error_code; + uint64_t status0; + + status0 = bank->regs[ACA_REG_IDX__STATUS]; + if (!ACA_REG_STATUS_VAL(status0)) + return 0; + + memset(&bank_info, 0, sizeof(bank_info)); + aca_decode_bank_info(ras_blk, bank, &bank_info); + memcpy(&ecc->bank_info, &bank_info, sizeof(bank_info)); + ecc->bank_info.status = bank->regs[ACA_REG_IDX__STATUS]; + ecc->bank_info.ipid = bank->regs[ACA_REG_IDX__IPID]; + ecc->bank_info.addr = bank->regs[ACA_REG_IDX__ADDR]; + + ext_error_code = ACA_REG_STATUS_ERRORCODEEXT(status0); + + if (aca_check_umc_de(ras_core, status0)) + ecc->de_count = 1; + else if (aca_check_umc_ue(ras_core, status0)) + ecc->ue_count = ext_error_code ? + 1 : ACA_REG_MISC0_ERRCNT(bank->regs[ACA_REG_IDX__MISC0]); + else if (aca_check_umc_ce(ras_core, status0)) + ecc->ce_count = ext_error_code ? + 1 : ACA_REG_MISC0_ERRCNT(bank->regs[ACA_REG_IDX__MISC0]); + + return 0; +} + +static bool aca_check_bank_is_de(struct ras_core_context *ras_core, + uint64_t status) +{ + return (ACA_REG_STATUS_POISON(status) || + ACA_REG_STATUS_DEFERRED(status)); +} + +static int aca_parse_bank_default(struct ras_core_context *ras_core, + struct aca_block *ras_blk, + void *data, void *buf) +{ + struct aca_bank_reg *bank = (struct aca_bank_reg *)data; + struct aca_bank_ecc *ecc = (struct aca_bank_ecc *)buf; + struct aca_ecc_info bank_info; + u64 misc0 = bank->regs[ACA_REG_IDX__MISC0]; + u64 status = bank->regs[ACA_REG_IDX__STATUS]; + + memset(&bank_info, 0, sizeof(bank_info)); + aca_decode_bank_info(ras_blk, bank, &bank_info); + memcpy(&ecc->bank_info, &bank_info, sizeof(bank_info)); + ecc->bank_info.status = status; + ecc->bank_info.ipid = bank->regs[ACA_REG_IDX__IPID]; + ecc->bank_info.addr = bank->regs[ACA_REG_IDX__ADDR]; + + if (aca_check_bank_is_de(ras_core, status)) { + ecc->de_count = 1; + } else { + if (bank->ecc_type == RAS_ERR_TYPE__UE) + ecc->ue_count = 1; + else if (bank->ecc_type == RAS_ERR_TYPE__CE) + ecc->ce_count = ACA_REG_MISC0_ERRCNT(misc0); + } + + return 0; +} + +static int aca_parse_xgmi_bank(struct ras_core_context *ras_core, + struct aca_block *ras_blk, + void *data, void *buf) +{ + struct aca_bank_reg *bank = (struct aca_bank_reg *)data; + struct aca_bank_ecc *ecc = (struct aca_bank_ecc *)buf; + struct aca_ecc_info bank_info; + u64 status, count; + int ext_error_code; + + memset(&bank_info, 0, sizeof(bank_info)); + aca_decode_bank_info(ras_blk, bank, &bank_info); + memcpy(&ecc->bank_info, &bank_info, sizeof(bank_info)); + ecc->bank_info.status = bank->regs[ACA_REG_IDX__STATUS]; + ecc->bank_info.ipid = bank->regs[ACA_REG_IDX__IPID]; + ecc->bank_info.addr = bank->regs[ACA_REG_IDX__ADDR]; + + status = bank->regs[ACA_REG_IDX__STATUS]; + ext_error_code = ACA_REG_STATUS_ERRORCODEEXT(status); + + count = ACA_REG_MISC0_ERRCNT(bank->regs[ACA_REG_IDX__MISC0]); + if (bank->ecc_type == RAS_ERR_TYPE__UE) { + if (ext_error_code != 0 && ext_error_code != 9) + count = 0ULL; + ecc->ue_count = count; + } else if (bank->ecc_type == RAS_ERR_TYPE__CE) { + count = ext_error_code == 6 ? count : 0ULL; + ecc->ce_count = count; + } + + return 0; +} + +static const struct aca_block_info aca_v1_0_umc = { + .name = "umc", + .ras_block_id = RAS_BLOCK_ID__UMC, + .hwip = ACA_ECC_HWIP__UMC, + .mask = ACA_ERROR__UE_MASK | ACA_ERROR__CE_MASK | ACA_ERROR__DE_MASK, + .bank_ops = { + .bank_match = aca_match_bank_default, + .bank_parse = aca_parse_umc_bank, + }, +}; + +static const struct aca_block_info aca_v1_0_gfx = { + .name = "gfx", + .ras_block_id = RAS_BLOCK_ID__GFX, + .hwip = ACA_ECC_HWIP__SMU, + .mask = ACA_ERROR__UE_MASK | ACA_ERROR__CE_MASK, + .bank_ops = { + .bank_match = aca_match_gfx_bank, + .bank_parse = aca_parse_bank_default, + }, +}; + +static const struct aca_block_info aca_v1_0_sdma = { + .name = "sdma", + .ras_block_id = RAS_BLOCK_ID__SDMA, + .hwip = ACA_ECC_HWIP__SMU, + .mask = ACA_ERROR__UE_MASK, + .bank_ops = { + .bank_match = aca_match_sdma_bank, + .bank_parse = aca_parse_bank_default, + }, +}; + +static const struct aca_block_info aca_v1_0_mmhub = { + .name = "mmhub", + .ras_block_id = RAS_BLOCK_ID__MMHUB, + .hwip = ACA_ECC_HWIP__SMU, + .mask = ACA_ERROR__UE_MASK, + .bank_ops = { + .bank_match = aca_match_mmhub_bank, + .bank_parse = aca_parse_bank_default, + }, +}; + +static const struct aca_block_info aca_v1_0_xgmi = { + .name = "xgmi", + .ras_block_id = RAS_BLOCK_ID__XGMI_WAFL, + .hwip = ACA_ECC_HWIP__PCS_XGMI, + .mask = ACA_ERROR__UE_MASK | ACA_ERROR__CE_MASK, + .bank_ops = { + .bank_match = aca_match_bank_default, + .bank_parse = aca_parse_xgmi_bank, + }, +}; + +static const struct aca_block_info *aca_block_info_v1_0[] = { + &aca_v1_0_umc, + &aca_v1_0_gfx, + &aca_v1_0_sdma, + &aca_v1_0_mmhub, + &aca_v1_0_xgmi, +}; + +const struct ras_aca_ip_func ras_aca_func_v1_0 = { + .block_num = ARRAY_SIZE(aca_block_info_v1_0), + .block_info = aca_block_info_v1_0, +}; diff --git a/drivers/gpu/drm/amd/ras/rascore/ras_aca_v1_0.h b/drivers/gpu/drm/amd/ras/rascore/ras_aca_v1_0.h new file mode 100644 index 000000000000..40e5d94b037f --- /dev/null +++ b/drivers/gpu/drm/amd/ras/rascore/ras_aca_v1_0.h @@ -0,0 +1,71 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright 2025 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef __RAS_ACA_V1_0_H__ +#define __RAS_ACA_V1_0_H__ +#include "ras.h" + +#define ACA__REG__FIELD(x, h, l) (((x) & GENMASK_ULL(h, l)) >> l) +#define ACA_REG_STATUS_VAL(x) ACA__REG__FIELD(x, 63, 63) +#define ACA_REG_STATUS_OVERFLOW(x) ACA__REG__FIELD(x, 62, 62) +#define ACA_REG_STATUS_UC(x) ACA__REG__FIELD(x, 61, 61) +#define ACA_REG_STATUS_EN(x) ACA__REG__FIELD(x, 60, 60) +#define ACA_REG_STATUS_MISCV(x) ACA__REG__FIELD(x, 59, 59) +#define ACA_REG_STATUS_ADDRV(x) ACA__REG__FIELD(x, 58, 58) +#define ACA_REG_STATUS_PCC(x) ACA__REG__FIELD(x, 57, 57) +#define ACA_REG_STATUS_ERRCOREIDVAL(x) ACA__REG__FIELD(x, 56, 56) +#define ACA_REG_STATUS_TCC(x) ACA__REG__FIELD(x, 55, 55) +#define ACA_REG_STATUS_SYNDV(x) ACA__REG__FIELD(x, 53, 53) +#define ACA_REG_STATUS_CECC(x) ACA__REG__FIELD(x, 46, 46) +#define ACA_REG_STATUS_UECC(x) ACA__REG__FIELD(x, 45, 45) +#define ACA_REG_STATUS_DEFERRED(x) ACA__REG__FIELD(x, 44, 44) +#define ACA_REG_STATUS_POISON(x) ACA__REG__FIELD(x, 43, 43) +#define ACA_REG_STATUS_SCRUB(x) ACA__REG__FIELD(x, 40, 40) +#define ACA_REG_STATUS_ERRCOREID(x) ACA__REG__FIELD(x, 37, 32) +#define ACA_REG_STATUS_ADDRLSB(x) ACA__REG__FIELD(x, 29, 24) +#define ACA_REG_STATUS_ERRORCODEEXT(x) ACA__REG__FIELD(x, 21, 16) +#define ACA_REG_STATUS_ERRORCODE(x) ACA__REG__FIELD(x, 15, 0) + +#define ACA_REG_IPID_MCATYPE(x) ACA__REG__FIELD(x, 63, 48) +#define ACA_REG_IPID_INSTANCEIDHI(x) ACA__REG__FIELD(x, 47, 44) +#define ACA_REG_IPID_HARDWAREID(x) ACA__REG__FIELD(x, 43, 32) +#define ACA_REG_IPID_INSTANCEIDLO(x) ACA__REG__FIELD(x, 31, 0) + +#define ACA_REG_MISC0_VALID(x) ACA__REG__FIELD(x, 63, 63) +#define ACA_REG_MISC0_OVRFLW(x) ACA__REG__FIELD(x, 48, 48) +#define ACA_REG_MISC0_ERRCNT(x) ACA__REG__FIELD(x, 43, 32) + +#define ACA_REG_SYND_ERRORINFORMATION(x) ACA__REG__FIELD(x, 17, 0) + +/* NOTE: The following codes refers to the smu header file */ +#define ACA_EXTERROR_CODE_CE 0x3a +#define ACA_EXTERROR_CODE_FAULT 0x3b + +#define mmSMNAID_XCD0_MCA_SMU 0x36430400 /* SMN AID XCD0 */ +#define mmSMNAID_XCD1_MCA_SMU 0x38430400 /* SMN AID XCD1 */ +#define mmSMNXCD_XCD0_MCA_SMU 0x40430400 /* SMN XCD XCD0 */ +#define mmSMNAID_AID0_MCA_SMU 0x03b30400 /* SMN AID AID0 */ + +extern const struct ras_aca_ip_func ras_aca_func_v1_0; +#endif diff --git a/drivers/gpu/drm/amd/ras/rascore/ras_cmd.c b/drivers/gpu/drm/amd/ras/rascore/ras_cmd.c new file mode 100644 index 000000000000..94e6d7420d94 --- /dev/null +++ b/drivers/gpu/drm/amd/ras/rascore/ras_cmd.c @@ -0,0 +1,522 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright 2025 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#include "ras.h" +#include "ras_cmd.h" + +#define RAS_CMD_MAJOR_VERSION 6 +#define RAS_CMD_MINOR_VERSION 0 +#define RAS_CMD_VERSION (((RAS_CMD_MAJOR_VERSION) << 10) | (RAS_CMD_MINOR_VERSION)) + +static int ras_cmd_add_device(struct ras_core_context *ras_core) +{ + INIT_LIST_HEAD(&ras_core->ras_cmd.head); + ras_core->ras_cmd.ras_core = ras_core; + ras_core->ras_cmd.dev_handle = (uintptr_t)ras_core ^ RAS_CMD_DEV_HANDLE_MAGIC; + return 0; +} + +static int ras_cmd_remove_device(struct ras_core_context *ras_core) +{ + memset(&ras_core->ras_cmd, 0, sizeof(ras_core->ras_cmd)); + return 0; +} + +static int ras_get_block_ecc_info(struct ras_core_context *ras_core, + struct ras_cmd_ctx *cmd, void *data) +{ + struct ras_cmd_block_ecc_info_req *input_data = + (struct ras_cmd_block_ecc_info_req *)cmd->input_buff_raw; + struct ras_cmd_block_ecc_info_rsp *output_data = + (struct ras_cmd_block_ecc_info_rsp *)cmd->output_buff_raw; + struct ras_ecc_count err_data; + int ret; + + if (cmd->input_size != sizeof(struct ras_cmd_block_ecc_info_req)) + return RAS_CMD__ERROR_INVALID_INPUT_SIZE; + + memset(&err_data, 0, sizeof(err_data)); + ret = ras_aca_get_block_ecc_count(ras_core, input_data->block_id, &err_data); + if (ret) + return RAS_CMD__ERROR_GENERIC; + + output_data->ce_count = err_data.total_ce_count; + output_data->ue_count = err_data.total_ue_count; + output_data->de_count = err_data.total_de_count; + + cmd->output_size = sizeof(struct ras_cmd_block_ecc_info_rsp); + return RAS_CMD__SUCCESS; +} + +static void ras_cmd_update_bad_page_info(struct ras_cmd_bad_page_record *ras_cmd_record, + struct eeprom_umc_record *record) +{ + ras_cmd_record->retired_page = record->cur_nps_retired_row_pfn; + ras_cmd_record->ts = record->ts; + ras_cmd_record->err_type = record->err_type; + ras_cmd_record->mem_channel = record->mem_channel; + ras_cmd_record->mcumc_id = record->mcumc_id; + ras_cmd_record->address = record->address; + ras_cmd_record->bank = record->bank; + ras_cmd_record->valid = 1; +} + +static int ras_cmd_get_group_bad_pages(struct ras_core_context *ras_core, + uint32_t group_index, struct ras_cmd_bad_pages_info_rsp *output_data) +{ + struct eeprom_umc_record record; + struct ras_cmd_bad_page_record *ras_cmd_record; + uint32_t i = 0, bp_cnt = 0, group_cnt = 0; + + output_data->bp_in_group = 0; + output_data->group_index = 0; + + bp_cnt = ras_umc_get_badpage_count(ras_core); + if (bp_cnt) { + output_data->group_index = group_index; + group_cnt = bp_cnt / RAS_CMD_MAX_BAD_PAGES_PER_GROUP + + ((bp_cnt % RAS_CMD_MAX_BAD_PAGES_PER_GROUP) ? 1 : 0); + + if (group_index >= group_cnt) + return RAS_CMD__ERROR_INVALID_INPUT_DATA; + + i = group_index * RAS_CMD_MAX_BAD_PAGES_PER_GROUP; + for (; + i < bp_cnt && output_data->bp_in_group < RAS_CMD_MAX_BAD_PAGES_PER_GROUP; + i++) { + if (ras_umc_get_badpage_record(ras_core, i, &record)) + return RAS_CMD__ERROR_GENERIC; + + ras_cmd_record = &output_data->records[i % RAS_CMD_MAX_BAD_PAGES_PER_GROUP]; + + memset(ras_cmd_record, 0, sizeof(*ras_cmd_record)); + ras_cmd_update_bad_page_info(ras_cmd_record, &record); + output_data->bp_in_group++; + } + } + output_data->bp_total_cnt = bp_cnt; + return RAS_CMD__SUCCESS; +} + +static int ras_cmd_get_bad_pages(struct ras_core_context *ras_core, + struct ras_cmd_ctx *cmd, void *data) +{ + struct ras_cmd_bad_pages_info_req *input_data = + (struct ras_cmd_bad_pages_info_req *)cmd->input_buff_raw; + struct ras_cmd_bad_pages_info_rsp *output_data = + (struct ras_cmd_bad_pages_info_rsp *)cmd->output_buff_raw; + int ret; + + if (cmd->input_size != sizeof(struct ras_cmd_bad_pages_info_req)) + return RAS_CMD__ERROR_INVALID_INPUT_SIZE; + + ret = ras_cmd_get_group_bad_pages(ras_core, input_data->group_index, output_data); + if (ret) + return RAS_CMD__ERROR_GENERIC; + + output_data->version = 0; + + cmd->output_size = sizeof(struct ras_cmd_bad_pages_info_rsp); + return RAS_CMD__SUCCESS; +} + +static int ras_cmd_clear_bad_page_info(struct ras_core_context *ras_core, + struct ras_cmd_ctx *cmd, void *data) +{ + if (cmd->input_size != sizeof(struct ras_cmd_dev_handle)) + return RAS_CMD__ERROR_INVALID_INPUT_SIZE; + + if (ras_eeprom_reset_table(ras_core)) + return RAS_CMD__ERROR_GENERIC; + + if (ras_umc_clean_badpage_data(ras_core)) + return RAS_CMD__ERROR_GENERIC; + + return RAS_CMD__SUCCESS; +} + +static int ras_cmd_reset_all_error_counts(struct ras_core_context *ras_core, + struct ras_cmd_ctx *cmd, void *data) +{ + if (cmd->input_size != sizeof(struct ras_cmd_dev_handle)) + return RAS_CMD__ERROR_INVALID_INPUT_SIZE; + + if (ras_aca_clear_all_blocks_ecc_count(ras_core)) + return RAS_CMD__ERROR_GENERIC; + + if (ras_umc_clear_logged_ecc(ras_core)) + return RAS_CMD__ERROR_GENERIC; + + return RAS_CMD__SUCCESS; +} + +static int ras_cmd_get_cper_snapshot(struct ras_core_context *ras_core, + struct ras_cmd_ctx *cmd, void *data) +{ + struct ras_cmd_cper_snapshot_rsp *output_data = + (struct ras_cmd_cper_snapshot_rsp *)cmd->output_buff_raw; + struct ras_log_batch_overview overview; + + if (cmd->input_size != sizeof(struct ras_cmd_cper_snapshot_req)) + return RAS_CMD__ERROR_INVALID_INPUT_SIZE; + + ras_log_ring_get_batch_overview(ras_core, &overview); + + output_data->total_cper_num = overview.logged_batch_count; + output_data->start_cper_id = overview.first_batch_id; + output_data->latest_cper_id = overview.last_batch_id; + + output_data->version = 0; + + cmd->output_size = sizeof(struct ras_cmd_cper_snapshot_rsp); + return RAS_CMD__SUCCESS; +} + +static int ras_cmd_get_cper_records(struct ras_core_context *ras_core, + struct ras_cmd_ctx *cmd, void *data) +{ + struct ras_cmd_cper_record_req *req = + (struct ras_cmd_cper_record_req *)cmd->input_buff_raw; + struct ras_cmd_cper_record_rsp *rsp = + (struct ras_cmd_cper_record_rsp *)cmd->output_buff_raw; + struct ras_log_info *trace[MAX_RECORD_PER_BATCH] = {0}; + struct ras_log_batch_overview overview; + uint32_t offset = 0, real_data_len = 0; + uint64_t batch_id; + uint8_t *buffer; + int ret = 0, i, count; + + if (cmd->input_size != sizeof(struct ras_cmd_cper_record_req)) + return RAS_CMD__ERROR_INVALID_INPUT_SIZE; + + if (!req->buf_size || !req->buf_ptr || !req->cper_num) + return RAS_CMD__ERROR_INVALID_INPUT_DATA; + + buffer = kzalloc(req->buf_size, GFP_KERNEL); + if (!buffer) + return RAS_CMD__ERROR_GENERIC; + + ras_log_ring_get_batch_overview(ras_core, &overview); + for (i = 0; i < req->cper_num; i++) { + batch_id = req->cper_start_id + i; + if (batch_id >= overview.last_batch_id) + break; + + count = ras_log_ring_get_batch_records(ras_core, batch_id, trace, + ARRAY_SIZE(trace)); + if (count > 0) { + ret = ras_cper_generate_cper(ras_core, trace, count, + &buffer[offset], req->buf_size - offset, &real_data_len); + if (ret) + break; + + offset += real_data_len; + } + } + + if ((ret && (ret != -ENOMEM)) || + copy_to_user(u64_to_user_ptr(req->buf_ptr), buffer, offset)) { + kfree(buffer); + return RAS_CMD__ERROR_GENERIC; + } + + rsp->real_data_size = offset; + rsp->real_cper_num = i; + rsp->remain_num = (ret == -ENOMEM) ? (req->cper_num - i) : 0; + rsp->version = 0; + + cmd->output_size = sizeof(struct ras_cmd_cper_record_rsp); + + kfree(buffer); + + return RAS_CMD__SUCCESS; +} + +static int ras_cmd_get_batch_trace_snapshot(struct ras_core_context *ras_core, + struct ras_cmd_ctx *cmd, void *data) +{ + struct ras_cmd_batch_trace_snapshot_rsp *rsp = + (struct ras_cmd_batch_trace_snapshot_rsp *)cmd->output_buff_raw; + struct ras_log_batch_overview overview; + + + if (cmd->input_size != sizeof(struct ras_cmd_batch_trace_snapshot_req)) + return RAS_CMD__ERROR_INVALID_INPUT_SIZE; + + ras_log_ring_get_batch_overview(ras_core, &overview); + + rsp->total_batch_num = overview.logged_batch_count; + rsp->start_batch_id = overview.first_batch_id; + rsp->latest_batch_id = overview.last_batch_id; + rsp->version = 0; + + cmd->output_size = sizeof(struct ras_cmd_batch_trace_snapshot_rsp); + return RAS_CMD__SUCCESS; +} + +static int ras_cmd_get_batch_trace_records(struct ras_core_context *ras_core, + struct ras_cmd_ctx *cmd, void *data) +{ + struct ras_cmd_batch_trace_record_req *input_data = + (struct ras_cmd_batch_trace_record_req *)cmd->input_buff_raw; + struct ras_cmd_batch_trace_record_rsp *output_data = + (struct ras_cmd_batch_trace_record_rsp *)cmd->output_buff_raw; + struct ras_log_batch_overview overview; + struct ras_log_info *trace_arry[MAX_RECORD_PER_BATCH] = {0}; + struct ras_log_info *record; + int i, j, count = 0, offset = 0; + uint64_t id; + bool completed = false; + + if (cmd->input_size != sizeof(struct ras_cmd_batch_trace_record_req)) + return RAS_CMD__ERROR_INVALID_INPUT_SIZE; + + if ((!input_data->batch_num) || (input_data->batch_num > RAS_CMD_MAX_BATCH_NUM)) + return RAS_CMD__ERROR_INVALID_INPUT_DATA; + + ras_log_ring_get_batch_overview(ras_core, &overview); + if ((input_data->start_batch_id < overview.first_batch_id) || + (input_data->start_batch_id >= overview.last_batch_id)) + return RAS_CMD__ERROR_INVALID_INPUT_SIZE; + + for (i = 0; i < input_data->batch_num; i++) { + id = input_data->start_batch_id + i; + if (id >= overview.last_batch_id) { + completed = true; + break; + } + + count = ras_log_ring_get_batch_records(ras_core, + id, trace_arry, ARRAY_SIZE(trace_arry)); + if (count > 0) { + if ((offset + count) > RAS_CMD_MAX_TRACE_NUM) + break; + for (j = 0; j < count; j++) { + record = &output_data->records[offset + j]; + record->seqno = trace_arry[j]->seqno; + record->timestamp = trace_arry[j]->timestamp; + record->event = trace_arry[j]->event; + memcpy(&record->aca_reg, + &trace_arry[j]->aca_reg, sizeof(trace_arry[j]->aca_reg)); + } + } else { + count = 0; + } + + output_data->batchs[i].batch_id = id; + output_data->batchs[i].offset = offset; + output_data->batchs[i].trace_num = count; + offset += count; + } + + output_data->start_batch_id = input_data->start_batch_id; + output_data->real_batch_num = i; + output_data->remain_num = completed ? 0 : (input_data->batch_num - i); + output_data->version = 0; + + cmd->output_size = sizeof(struct ras_cmd_batch_trace_record_rsp); + + return RAS_CMD__SUCCESS; +} + +static enum ras_ta_block __get_ras_ta_block(enum ras_block_id block) +{ + switch (block) { + case RAS_BLOCK_ID__UMC: + return RAS_TA_BLOCK__UMC; + case RAS_BLOCK_ID__SDMA: + return RAS_TA_BLOCK__SDMA; + case RAS_BLOCK_ID__GFX: + return RAS_TA_BLOCK__GFX; + case RAS_BLOCK_ID__MMHUB: + return RAS_TA_BLOCK__MMHUB; + case RAS_BLOCK_ID__ATHUB: + return RAS_TA_BLOCK__ATHUB; + case RAS_BLOCK_ID__PCIE_BIF: + return RAS_TA_BLOCK__PCIE_BIF; + case RAS_BLOCK_ID__HDP: + return RAS_TA_BLOCK__HDP; + case RAS_BLOCK_ID__XGMI_WAFL: + return RAS_TA_BLOCK__XGMI_WAFL; + case RAS_BLOCK_ID__DF: + return RAS_TA_BLOCK__DF; + case RAS_BLOCK_ID__SMN: + return RAS_TA_BLOCK__SMN; + case RAS_BLOCK_ID__SEM: + return RAS_TA_BLOCK__SEM; + case RAS_BLOCK_ID__MP0: + return RAS_TA_BLOCK__MP0; + case RAS_BLOCK_ID__MP1: + return RAS_TA_BLOCK__MP1; + case RAS_BLOCK_ID__FUSE: + return RAS_TA_BLOCK__FUSE; + case RAS_BLOCK_ID__MCA: + return RAS_TA_BLOCK__MCA; + case RAS_BLOCK_ID__VCN: + return RAS_TA_BLOCK__VCN; + case RAS_BLOCK_ID__JPEG: + return RAS_TA_BLOCK__JPEG; + default: + return RAS_TA_BLOCK__UMC; + } +} + +static enum ras_ta_error_type __get_ras_ta_err_type(enum ras_ecc_err_type error) +{ + switch (error) { + case RAS_ECC_ERR__NONE: + return RAS_TA_ERROR__NONE; + case RAS_ECC_ERR__PARITY: + return RAS_TA_ERROR__PARITY; + case RAS_ECC_ERR__SINGLE_CORRECTABLE: + return RAS_TA_ERROR__SINGLE_CORRECTABLE; + case RAS_ECC_ERR__MULTI_UNCORRECTABLE: + return RAS_TA_ERROR__MULTI_UNCORRECTABLE; + case RAS_ECC_ERR__POISON: + return RAS_TA_ERROR__POISON; + default: + return RAS_TA_ERROR__NONE; + } +} + +static int ras_cmd_inject_error(struct ras_core_context *ras_core, + struct ras_cmd_ctx *cmd, void *data) +{ + struct ras_cmd_inject_error_req *req = + (struct ras_cmd_inject_error_req *)cmd->input_buff_raw; + struct ras_cmd_inject_error_rsp *output_data = + (struct ras_cmd_inject_error_rsp *)cmd->output_buff_raw; + int ret = 0; + struct ras_ta_trigger_error_input block_info = { + .block_id = __get_ras_ta_block(req->block_id), + .sub_block_index = req->subblock_id, + .inject_error_type = __get_ras_ta_err_type(req->error_type), + .address = req->address, + .value = req->method, + }; + + ret = ras_psp_trigger_error(ras_core, &block_info, req->instance_mask); + if (!ret) { + output_data->version = 0; + output_data->address = block_info.address; + cmd->output_size = sizeof(struct ras_cmd_inject_error_rsp); + } else { + RAS_DEV_ERR(ras_core->dev, "ras inject block %u failed %d\n", req->block_id, ret); + ret = RAS_CMD__ERROR_ACCESS_DENIED; + } + + return ret; +} + +static struct ras_cmd_func_map ras_cmd_maps[] = { + {RAS_CMD__INJECT_ERROR, ras_cmd_inject_error}, + {RAS_CMD__GET_BLOCK_ECC_STATUS, ras_get_block_ecc_info}, + {RAS_CMD__GET_BAD_PAGES, ras_cmd_get_bad_pages}, + {RAS_CMD__CLEAR_BAD_PAGE_INFO, ras_cmd_clear_bad_page_info}, + {RAS_CMD__RESET_ALL_ERROR_COUNTS, ras_cmd_reset_all_error_counts}, + {RAS_CMD__GET_CPER_SNAPSHOT, ras_cmd_get_cper_snapshot}, + {RAS_CMD__GET_CPER_RECORD, ras_cmd_get_cper_records}, + {RAS_CMD__GET_BATCH_TRACE_SNAPSHOT, ras_cmd_get_batch_trace_snapshot}, + {RAS_CMD__GET_BATCH_TRACE_RECORD, ras_cmd_get_batch_trace_records}, +}; + +int rascore_handle_cmd(struct ras_core_context *ras_core, + struct ras_cmd_ctx *cmd, void *data) +{ + struct ras_cmd_func_map *ras_cmd = NULL; + int i; + + for (i = 0; i < ARRAY_SIZE(ras_cmd_maps); i++) { + if (cmd->cmd_id == ras_cmd_maps[i].cmd_id) { + ras_cmd = &ras_cmd_maps[i]; + break; + } + } + + if (!ras_cmd) + return RAS_CMD__ERROR_UKNOWN_CMD; + + return ras_cmd->func(ras_core, cmd, data); +} + +int ras_cmd_init(struct ras_core_context *ras_core) +{ + return ras_cmd_add_device(ras_core); +} + +int ras_cmd_fini(struct ras_core_context *ras_core) +{ + ras_cmd_remove_device(ras_core); + return 0; +} + +int ras_cmd_query_interface_info(struct ras_core_context *ras_core, + struct ras_query_interface_info_rsp *rsp) +{ + rsp->ras_cmd_major_ver = RAS_CMD_MAJOR_VERSION; + rsp->ras_cmd_minor_ver = RAS_CMD_MINOR_VERSION; + + return 0; +} + +int ras_cmd_translate_soc_pa_to_bank(struct ras_core_context *ras_core, + uint64_t soc_pa, struct ras_fb_bank_addr *bank_addr) +{ + struct umc_bank_addr umc_bank = {0}; + int ret; + + ret = ras_umc_translate_soc_pa_and_bank(ras_core, &soc_pa, &umc_bank, false); + if (ret) + return RAS_CMD__ERROR_GENERIC; + + bank_addr->stack_id = umc_bank.stack_id; + bank_addr->bank_group = umc_bank.bank_group; + bank_addr->bank = umc_bank.bank; + bank_addr->row = umc_bank.row; + bank_addr->column = umc_bank.column; + bank_addr->channel = umc_bank.channel; + bank_addr->subchannel = umc_bank.subchannel; + + return 0; +} + +int ras_cmd_translate_bank_to_soc_pa(struct ras_core_context *ras_core, + struct ras_fb_bank_addr bank_addr, uint64_t *soc_pa) +{ + struct umc_bank_addr umc_bank = {0}; + + umc_bank.stack_id = bank_addr.stack_id; + umc_bank.bank_group = bank_addr.bank_group; + umc_bank.bank = bank_addr.bank; + umc_bank.row = bank_addr.row; + umc_bank.column = bank_addr.column; + umc_bank.channel = bank_addr.channel; + umc_bank.subchannel = bank_addr.subchannel; + + return ras_umc_translate_soc_pa_and_bank(ras_core, soc_pa, &umc_bank, true); +} + +uint64_t ras_cmd_get_dev_handle(struct ras_core_context *ras_core) +{ + return ras_core->ras_cmd.dev_handle; +} diff --git a/drivers/gpu/drm/amd/ras/rascore/ras_cmd.h b/drivers/gpu/drm/amd/ras/rascore/ras_cmd.h new file mode 100644 index 000000000000..48a0715eb821 --- /dev/null +++ b/drivers/gpu/drm/amd/ras/rascore/ras_cmd.h @@ -0,0 +1,426 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright 2025 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef __RAS_CMD_H__ +#define __RAS_CMD_H__ +#include "ras.h" +#include "ras_eeprom.h" +#include "ras_log_ring.h" +#include "ras_cper.h" + +#define RAS_CMD_DEV_HANDLE_MAGIC 0xFEEDAD00UL + +#define RAS_CMD_MAX_IN_SIZE 256 +#define RAS_CMD_MAX_GPU_NUM 32 +#define RAS_CMD_MAX_BAD_PAGES_PER_GROUP 32 + +/* position of instance value in sub_block_index of + * ta_ras_trigger_error_input, the sub block uses lower 12 bits + */ +#define RAS_TA_INST_MASK 0xfffff000 +#define RAS_TA_INST_SHIFT 0xc + +enum ras_cmd_interface_type { + RAS_CMD_INTERFACE_TYPE_NONE, + RAS_CMD_INTERFACE_TYPE_AMDGPU, + RAS_CMD_INTERFACE_TYPE_VF, + RAS_CMD_INTERFACE_TYPE_PF, +}; + +enum ras_cmd_id_range { + RAS_CMD_ID_COMMON_START = 0, + RAS_CMD_ID_COMMON_END = 0x10000, + RAS_CMD_ID_AMDGPU_START = RAS_CMD_ID_COMMON_END, + RAS_CMD_ID_AMDGPU_END = 0x20000, + RAS_CMD_ID_MXGPU_START = RAS_CMD_ID_AMDGPU_END, + RAS_CMD_ID_MXGPU_END = 0x30000, + RAS_CMD_ID_MXGPU_VF_START = RAS_CMD_ID_MXGPU_END, + RAS_CMD_ID_MXGPU_VF_END = 0x40000, +}; + +enum ras_cmd_id { + RAS_CMD__BEGIN = RAS_CMD_ID_COMMON_START, + RAS_CMD__QUERY_INTERFACE_INFO, + RAS_CMD__GET_DEVICES_INFO, + RAS_CMD__GET_BLOCK_ECC_STATUS, + RAS_CMD__INJECT_ERROR, + RAS_CMD__GET_BAD_PAGES, + RAS_CMD__CLEAR_BAD_PAGE_INFO, + RAS_CMD__RESET_ALL_ERROR_COUNTS, + RAS_CMD__GET_SAFE_FB_ADDRESS_RANGES, + RAS_CMD__TRANSLATE_FB_ADDRESS, + RAS_CMD__GET_LINK_TOPOLOGY, + RAS_CMD__GET_CPER_SNAPSHOT, + RAS_CMD__GET_CPER_RECORD, + RAS_CMD__GET_BATCH_TRACE_SNAPSHOT, + RAS_CMD__GET_BATCH_TRACE_RECORD, + RAS_CMD__SUPPORTED_MAX = RAS_CMD_ID_COMMON_END, +}; + +enum ras_cmd_response { + RAS_CMD__SUCCESS = 0, + RAS_CMD__SUCCESS_EXEED_BUFFER, + RAS_CMD__ERROR_UKNOWN_CMD, + RAS_CMD__ERROR_INVALID_CMD, + RAS_CMD__ERROR_VERSION, + RAS_CMD__ERROR_INVALID_INPUT_SIZE, + RAS_CMD__ERROR_INVALID_INPUT_DATA, + RAS_CMD__ERROR_DRV_INIT_FAIL, + RAS_CMD__ERROR_ACCESS_DENIED, + RAS_CMD__ERROR_GENERIC, + RAS_CMD__ERROR_TIMEOUT, +}; + +enum ras_error_type { + RAS_TYPE_ERROR__NONE = 0, + RAS_TYPE_ERROR__PARITY = 1, + RAS_TYPE_ERROR__SINGLE_CORRECTABLE = 2, + RAS_TYPE_ERROR__MULTI_UNCORRECTABLE = 4, + RAS_TYPE_ERROR__POISON = 8, +}; + +struct ras_core_context; +struct ras_cmd_ctx; + +struct ras_cmd_mgr { + struct list_head head; + struct ras_core_context *ras_core; + uint64_t dev_handle; +}; + +struct ras_cmd_func_map { + uint32_t cmd_id; + int (*func)(struct ras_core_context *ras_core, + struct ras_cmd_ctx *cmd, void *data); +}; + +struct ras_device_bdf { + union { + struct { + uint32_t function : 3; + uint32_t device : 5; + uint32_t bus : 8; + uint32_t domain : 16; + }; + uint32_t u32_all; + }; +}; + +struct ras_cmd_param { + uint32_t idx_vf; + void *data; +}; + +#pragma pack(push, 8) +struct ras_cmd_ctx { + uint32_t magic; + union { + struct { + uint16_t ras_cmd_minor_ver : 10; + uint16_t ras_cmd_major_ver : 6; + }; + uint16_t ras_cmd_ver; + }; + union { + struct { + uint16_t plat_major_ver : 10; + uint16_t plat_minor_ver : 6; + }; + uint16_t plat_ver; + }; + uint32_t cmd_id; + uint32_t cmd_res; + uint32_t input_size; + uint32_t output_size; + uint32_t output_buf_size; + uint32_t reserved[5]; + uint8_t input_buff_raw[RAS_CMD_MAX_IN_SIZE]; + uint8_t output_buff_raw[]; +}; + +struct ras_cmd_dev_handle { + uint64_t dev_handle; +}; + +struct ras_cmd_block_ecc_info_req { + struct ras_cmd_dev_handle dev; + uint32_t block_id; + uint32_t subblock_id; + uint32_t reserved[4]; +}; + +struct ras_cmd_block_ecc_info_rsp { + uint32_t version; + uint32_t ce_count; + uint32_t ue_count; + uint32_t de_count; + uint32_t reserved[6]; +}; + +struct ras_cmd_inject_error_req { + struct ras_cmd_dev_handle dev; + uint32_t block_id; + uint32_t subblock_id; + uint64_t address; + uint32_t error_type; + uint32_t instance_mask; + union { + struct { + /* vf index */ + uint64_t vf_idx : 6; + /* method of error injection. i.e persistent, coherent etc */ + uint64_t method : 10; + uint64_t rsv : 48; + }; + uint64_t value; + }; + uint32_t reserved[8]; +}; + +struct ras_cmd_inject_error_rsp { + uint32_t version; + uint32_t reserved[5]; + uint64_t address; +}; + +struct ras_cmd_dev_info { + uint64_t dev_handle; + uint32_t location_id; + uint32_t ecc_enabled; + uint32_t ecc_supported; + uint32_t vf_num; + uint32_t asic_type; + uint32_t oam_id; + uint32_t reserved[8]; +}; + +struct ras_cmd_devices_info_rsp { + uint32_t version; + uint32_t dev_num; + uint32_t reserved[6]; + struct ras_cmd_dev_info devs[RAS_CMD_MAX_GPU_NUM]; +}; + +struct ras_cmd_bad_page_record { + union { + uint64_t address; + uint64_t offset; + }; + uint64_t retired_page; + uint64_t ts; + + uint32_t err_type; + + union { + unsigned char bank; + unsigned char cu; + }; + + unsigned char mem_channel; + unsigned char mcumc_id; + + unsigned char valid; + unsigned char reserved[8]; +}; + +struct ras_cmd_bad_pages_info_req { + struct ras_cmd_dev_handle device; + uint32_t group_index; + uint32_t reserved[5]; +}; + +struct ras_cmd_bad_pages_info_rsp { + uint32_t version; + uint32_t group_index; + uint32_t bp_in_group; + uint32_t bp_total_cnt; + uint32_t reserved[4]; + struct ras_cmd_bad_page_record records[RAS_CMD_MAX_BAD_PAGES_PER_GROUP]; +}; + +struct ras_query_interface_info_req { + uint32_t reserved[8]; +}; + +struct ras_query_interface_info_rsp { + uint32_t version; + uint32_t ras_cmd_major_ver; + uint32_t ras_cmd_minor_ver; + uint32_t plat_major_ver; + uint32_t plat_minor_ver; + uint8_t interface_type; + uint8_t rsv[3]; + uint32_t reserved[8]; +}; + +#define RAS_MAX_NUM_SAFE_RANGES 64 +struct ras_cmd_ras_safe_fb_address_ranges_rsp { + uint32_t version; + uint32_t num_ranges; + uint32_t reserved[4]; + struct { + uint64_t start; + uint64_t size; + uint32_t idx; + uint32_t reserved[3]; + } range[RAS_MAX_NUM_SAFE_RANGES]; +}; + +enum ras_fb_addr_type { + RAS_FB_ADDR_SOC_PHY, /* SPA */ + RAS_FB_ADDR_BANK, + RAS_FB_ADDR_VF_PHY, /* GPA */ + RAS_FB_ADDR_UNKNOWN +}; + +struct ras_fb_bank_addr { + uint32_t stack_id; /* SID */ + uint32_t bank_group; + uint32_t bank; + uint32_t row; + uint32_t column; + uint32_t channel; + uint32_t subchannel; /* Also called Pseudochannel (PC) */ + uint32_t reserved[3]; +}; + +struct ras_fb_vf_phy_addr { + uint32_t vf_idx; + uint32_t reserved; + uint64_t addr; +}; + +union ras_translate_fb_address { + struct ras_fb_bank_addr bank_addr; + uint64_t soc_phy_addr; + struct ras_fb_vf_phy_addr vf_phy_addr; +}; + +struct ras_cmd_translate_fb_address_req { + struct ras_cmd_dev_handle dev; + enum ras_fb_addr_type src_addr_type; + enum ras_fb_addr_type dest_addr_type; + union ras_translate_fb_address trans_addr; +}; + +struct ras_cmd_translate_fb_address_rsp { + uint32_t version; + uint32_t reserved[5]; + union ras_translate_fb_address trans_addr; +}; + +struct ras_dev_link_topology_req { + struct ras_cmd_dev_handle src; + struct ras_cmd_dev_handle dst; +}; + +struct ras_dev_link_topology_rsp { + uint32_t version; + uint32_t link_status; /* HW status of the link */ + uint32_t link_type; /* type of the link */ + uint32_t num_hops; /* number of hops */ + uint32_t reserved[8]; +}; + +struct ras_cmd_cper_snapshot_req { + struct ras_cmd_dev_handle dev; +}; + +struct ras_cmd_cper_snapshot_rsp { + uint32_t version; + uint32_t reserved[4]; + uint32_t total_cper_num; + uint64_t start_cper_id; + uint64_t latest_cper_id; +}; + +struct ras_cmd_cper_record_req { + struct ras_cmd_dev_handle dev; + uint64_t cper_start_id; + uint32_t cper_num; + uint32_t buf_size; + uint64_t buf_ptr; + uint32_t reserved[4]; +}; + +struct ras_cmd_cper_record_rsp { + uint32_t version; + uint32_t real_data_size; + uint32_t real_cper_num; + uint32_t remain_num; + uint32_t reserved[4]; +}; + +struct ras_cmd_batch_trace_snapshot_req { + struct ras_cmd_dev_handle dev; +}; + +struct ras_cmd_batch_trace_snapshot_rsp { + uint32_t version; + uint32_t reserved[4]; + uint32_t total_batch_num; + uint64_t start_batch_id; + uint64_t latest_batch_id; +}; + +struct ras_cmd_batch_trace_record_req { + struct ras_cmd_dev_handle dev; + uint64_t start_batch_id; + uint32_t batch_num; + uint32_t reserved[5]; +}; + +struct batch_ras_trace_info { + uint64_t batch_id; + uint16_t offset; + uint8_t trace_num; + uint8_t rsv; + uint32_t reserved; +}; + +#define RAS_CMD_MAX_BATCH_NUM 300 +#define RAS_CMD_MAX_TRACE_NUM 300 +struct ras_cmd_batch_trace_record_rsp { + uint32_t version; + uint16_t real_batch_num; + uint16_t remain_num; + uint64_t start_batch_id; + uint32_t reserved[2]; + struct batch_ras_trace_info batchs[RAS_CMD_MAX_BATCH_NUM]; + struct ras_log_info records[RAS_CMD_MAX_TRACE_NUM]; +}; + +#pragma pack(pop) + +int ras_cmd_init(struct ras_core_context *ras_core); +int ras_cmd_fini(struct ras_core_context *ras_core); +int rascore_handle_cmd(struct ras_core_context *ras_core, struct ras_cmd_ctx *cmd, void *data); +uint64_t ras_cmd_get_dev_handle(struct ras_core_context *ras_core); +int ras_cmd_query_interface_info(struct ras_core_context *ras_core, + struct ras_query_interface_info_rsp *rsp); +int ras_cmd_translate_soc_pa_to_bank(struct ras_core_context *ras_core, + uint64_t soc_pa, struct ras_fb_bank_addr *bank_addr); +int ras_cmd_translate_bank_to_soc_pa(struct ras_core_context *ras_core, + struct ras_fb_bank_addr bank_addr, uint64_t *soc_pa); +#endif diff --git a/drivers/gpu/drm/amd/ras/rascore/ras_core.c b/drivers/gpu/drm/amd/ras/rascore/ras_core.c new file mode 100644 index 000000000000..01122b55c98a --- /dev/null +++ b/drivers/gpu/drm/amd/ras/rascore/ras_core.c @@ -0,0 +1,603 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright 2025 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#include "ras.h" +#include "ras_core_status.h" + +#define RAS_SEQNO_FIFO_SIZE (128 * sizeof(uint64_t)) + +#define IS_LEAP_YEAR(x) ((x % 4 == 0 && x % 100 != 0) || x % 400 == 0) + +static const char * const ras_block_name[] = { + "umc", + "sdma", + "gfx", + "mmhub", + "athub", + "pcie_bif", + "hdp", + "xgmi_wafl", + "df", + "smn", + "sem", + "mp0", + "mp1", + "fuse", + "mca", + "vcn", + "jpeg", + "ih", + "mpio", +}; + +const char *ras_core_get_ras_block_name(enum ras_block_id block_id) +{ + if (block_id >= ARRAY_SIZE(ras_block_name)) + return ""; + + return ras_block_name[block_id]; +} + +int ras_core_convert_timestamp_to_time(struct ras_core_context *ras_core, + uint64_t timestamp, struct ras_time *tm) +{ + int days_in_month[] = {31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31}; + uint64_t month = 0, day = 0, hour = 0, minute = 0, second = 0; + uint32_t year = 0; + int seconds_per_day = 24 * 60 * 60; + int seconds_per_hour = 60 * 60; + int seconds_per_minute = 60; + int days, remaining_seconds; + + days = div64_u64_rem(timestamp, seconds_per_day, (uint64_t *)&remaining_seconds); + + /* utc_timestamp follows the Unix epoch */ + year = 1970; + while (days >= 365) { + if (IS_LEAP_YEAR(year)) { + if (days < 366) + break; + days -= 366; + } else { + days -= 365; + } + year++; + } + + days_in_month[1] += IS_LEAP_YEAR(year); + + month = 0; + while (days >= days_in_month[month]) { + days -= days_in_month[month]; + month++; + } + month++; + day = days + 1; + + if (remaining_seconds) { + hour = remaining_seconds / seconds_per_hour; + minute = (remaining_seconds % seconds_per_hour) / seconds_per_minute; + second = remaining_seconds % seconds_per_minute; + } + + tm->tm_year = year; + tm->tm_mon = month; + tm->tm_mday = day; + tm->tm_hour = hour; + tm->tm_min = minute; + tm->tm_sec = second; + + return 0; +} + +bool ras_core_gpu_in_reset(struct ras_core_context *ras_core) +{ + uint32_t status = 0; + + if (ras_core->sys_fn && + ras_core->sys_fn->check_gpu_status) + ras_core->sys_fn->check_gpu_status(ras_core, &status); + + return (status & RAS_GPU_STATUS__IN_RESET) ? true : false; +} + +bool ras_core_gpu_is_vf(struct ras_core_context *ras_core) +{ + uint32_t status = 0; + + if (ras_core->sys_fn && + ras_core->sys_fn->check_gpu_status) + ras_core->sys_fn->check_gpu_status(ras_core, &status); + + return (status & RAS_GPU_STATUS__IS_VF) ? true : false; +} + +bool ras_core_gpu_is_rma(struct ras_core_context *ras_core) +{ + if (!ras_core) + return false; + + return ras_core->is_rma; +} + +static int ras_core_seqno_fifo_write(struct ras_core_context *ras_core, + enum ras_seqno_fifo fifo_type, uint64_t seqno) +{ + int ret = 0; + struct kfifo *seqno_fifo = NULL; + + if (fifo_type == SEQNO_FIFO_POISON_CREATION) + seqno_fifo = &ras_core->de_seqno_fifo; + else if (fifo_type == SEQNO_FIFO_POISON_CONSUMPTION) + seqno_fifo = &ras_core->consumption_seqno_fifo; + + if (seqno_fifo) + ret = kfifo_in_spinlocked(seqno_fifo, + &seqno, sizeof(seqno), &ras_core->seqno_lock); + + return ret ? 0 : -EINVAL; +} + +static int ras_core_seqno_fifo_read(struct ras_core_context *ras_core, + enum ras_seqno_fifo fifo_type, uint64_t *seqno, bool pop) +{ + int ret = 0; + struct kfifo *seqno_fifo = NULL; + + if (fifo_type == SEQNO_FIFO_POISON_CREATION) + seqno_fifo = &ras_core->de_seqno_fifo; + else if (fifo_type == SEQNO_FIFO_POISON_CONSUMPTION) + seqno_fifo = &ras_core->consumption_seqno_fifo; + + if (seqno_fifo) { + if (pop) + ret = kfifo_out_spinlocked(seqno_fifo, + seqno, sizeof(*seqno), &ras_core->seqno_lock); + else + ret = kfifo_out_peek(seqno_fifo, seqno, sizeof(*seqno)); + } + + return ret ? 0 : -EINVAL; +} + +uint64_t ras_core_gen_seqno(struct ras_core_context *ras_core, + enum ras_seqno_type type) +{ + uint64_t seqno = 0; + + if (ras_core->sys_fn && + ras_core->sys_fn->gen_seqno) + ras_core->sys_fn->gen_seqno(ras_core, type, &seqno); + + return seqno; +} + +int ras_core_put_seqno(struct ras_core_context *ras_core, + enum ras_seqno_type seqno_type, uint64_t seqno) +{ + int ret = 0; + + if (seqno_type >= RAS_SEQNO_TYPE_COUNT_MAX) + return -EINVAL; + + if (seqno_type == RAS_SEQNO_TYPE_DE) + ret = ras_core_seqno_fifo_write(ras_core, + SEQNO_FIFO_POISON_CREATION, seqno); + else if (seqno_type == RAS_SEQNO_TYPE_POISON_CONSUMPTION) + ret = ras_core_seqno_fifo_write(ras_core, + SEQNO_FIFO_POISON_CONSUMPTION, seqno); + else + ret = -EINVAL; + + return ret; +} + +uint64_t ras_core_get_seqno(struct ras_core_context *ras_core, + enum ras_seqno_type seqno_type, bool pop) +{ + uint64_t seq_no; + int ret = -ENODATA; + + if (seqno_type >= RAS_SEQNO_TYPE_COUNT_MAX) + return 0; + + if (seqno_type == RAS_SEQNO_TYPE_DE) + ret = ras_core_seqno_fifo_read(ras_core, + SEQNO_FIFO_POISON_CREATION, &seq_no, pop); + else if (seqno_type == RAS_SEQNO_TYPE_POISON_CONSUMPTION) + ret = ras_core_seqno_fifo_read(ras_core, + SEQNO_FIFO_POISON_CONSUMPTION, &seq_no, pop); + + if (ret) + seq_no = ras_core_gen_seqno(ras_core, seqno_type); + + return seq_no; +} + +static int ras_core_eeprom_recovery(struct ras_core_context *ras_core) +{ + int count; + int ret; + + count = ras_eeprom_get_record_count(ras_core); + if (!count) + return 0; + + /* Avoid bad page to be loaded again after gpu reset */ + if (ras_umc_get_saved_eeprom_count(ras_core) >= count) + return 0; + + ret = ras_umc_load_bad_pages(ras_core); + if (ret) { + RAS_DEV_ERR(ras_core->dev, "ras_umc_load_bad_pages failed: %d\n", ret); + return ret; + } + + ras_eeprom_sync_info(ras_core); + + return ret; +} + +struct ras_core_context *ras_core_create(struct ras_core_config *init_config) +{ + struct ras_core_context *ras_core; + struct ras_core_config *config; + + ras_core = kzalloc(sizeof(*ras_core), GFP_KERNEL); + if (!ras_core) + return NULL; + + config = kzalloc(sizeof(*config), GFP_KERNEL); + if (!config) { + kfree(ras_core); + return NULL; + } + + memcpy(config, init_config, sizeof(*config)); + ras_core->config = config; + + return ras_core; +} + +void ras_core_destroy(struct ras_core_context *ras_core) +{ + if (ras_core) + kfree(ras_core->config); + + kfree(ras_core); +} + +int ras_core_sw_init(struct ras_core_context *ras_core) +{ + int ret; + + if (!ras_core->config) { + RAS_DEV_ERR(ras_core->dev, "No ras core config!\n"); + return -EINVAL; + } + + ras_core->sys_fn = ras_core->config->sys_fn; + if (!ras_core->sys_fn) + return -EINVAL; + + ret = kfifo_alloc(&ras_core->de_seqno_fifo, + RAS_SEQNO_FIFO_SIZE, GFP_KERNEL); + if (ret) + return ret; + + ret = kfifo_alloc(&ras_core->consumption_seqno_fifo, + RAS_SEQNO_FIFO_SIZE, GFP_KERNEL); + if (ret) + return ret; + + spin_lock_init(&ras_core->seqno_lock); + + ret = ras_aca_sw_init(ras_core); + if (ret) + return ret; + + ret = ras_umc_sw_init(ras_core); + if (ret) + return ret; + + ret = ras_cmd_init(ras_core); + if (ret) + return ret; + + ret = ras_log_ring_sw_init(ras_core); + if (ret) + return ret; + + ret = ras_psp_sw_init(ras_core); + if (ret) + return ret; + + return 0; +} + +int ras_core_sw_fini(struct ras_core_context *ras_core) +{ + kfifo_free(&ras_core->de_seqno_fifo); + kfifo_free(&ras_core->consumption_seqno_fifo); + + ras_psp_sw_fini(ras_core); + ras_log_ring_sw_fini(ras_core); + ras_cmd_fini(ras_core); + ras_umc_sw_fini(ras_core); + ras_aca_sw_fini(ras_core); + + return 0; +} + +int ras_core_hw_init(struct ras_core_context *ras_core) +{ + int ret; + + ras_core->ras_eeprom_supported = + ras_core->config->ras_eeprom_supported; + + ras_core->poison_supported = ras_core->config->poison_supported; + + ret = ras_psp_hw_init(ras_core); + if (ret) + return ret; + + ret = ras_aca_hw_init(ras_core); + if (ret) + goto init_err1; + + ret = ras_mp1_hw_init(ras_core); + if (ret) + goto init_err2; + + ret = ras_nbio_hw_init(ras_core); + if (ret) + goto init_err3; + + ret = ras_umc_hw_init(ras_core); + if (ret) + goto init_err4; + + ret = ras_gfx_hw_init(ras_core); + if (ret) + goto init_err5; + + ret = ras_eeprom_hw_init(ras_core); + if (ret) + goto init_err6; + + ret = ras_core_eeprom_recovery(ras_core); + if (ret) { + RAS_DEV_ERR(ras_core->dev, + "Failed to recovery ras core, ret:%d\n", ret); + goto init_err6; + } + + ret = ras_eeprom_check_storage_status(ras_core); + if (ret) + goto init_err6; + + ret = ras_process_init(ras_core); + if (ret) + goto init_err7; + + ras_core->is_initialized = true; + + return 0; + +init_err7: + ras_eeprom_hw_fini(ras_core); +init_err6: + ras_gfx_hw_fini(ras_core); +init_err5: + ras_umc_hw_fini(ras_core); +init_err4: + ras_nbio_hw_fini(ras_core); +init_err3: + ras_mp1_hw_fini(ras_core); +init_err2: + ras_aca_hw_fini(ras_core); +init_err1: + ras_psp_hw_fini(ras_core); + return ret; +} + +int ras_core_hw_fini(struct ras_core_context *ras_core) +{ + ras_core->is_initialized = false; + + ras_process_fini(ras_core); + ras_eeprom_hw_fini(ras_core); + ras_gfx_hw_fini(ras_core); + ras_nbio_hw_fini(ras_core); + ras_umc_hw_fini(ras_core); + ras_mp1_hw_fini(ras_core); + ras_aca_hw_fini(ras_core); + ras_psp_hw_fini(ras_core); + + return 0; +} + +bool ras_core_handle_nbio_irq(struct ras_core_context *ras_core, void *data) +{ + return ras_nbio_handle_irq_error(ras_core, data); +} + +int ras_core_handle_fatal_error(struct ras_core_context *ras_core) +{ + int ret = 0; + + ras_aca_mark_fatal_flag(ras_core); + + ret = ras_core_event_notify(ras_core, + RAS_EVENT_ID__FATAL_ERROR_DETECTED, NULL); + + return ret; +} + +uint32_t ras_core_get_curr_nps_mode(struct ras_core_context *ras_core) +{ + if (ras_core->ras_nbio.ip_func && + ras_core->ras_nbio.ip_func->get_memory_partition_mode) + return ras_core->ras_nbio.ip_func->get_memory_partition_mode(ras_core); + + RAS_DEV_ERR(ras_core->dev, "Failed to get gpu memory nps mode!\n"); + return 0; +} + +int ras_core_update_ecc_info(struct ras_core_context *ras_core) +{ + int ret; + + ret = ras_aca_update_ecc(ras_core, RAS_ERR_TYPE__CE, NULL); + if (!ret) + ret = ras_aca_update_ecc(ras_core, RAS_ERR_TYPE__UE, NULL); + + return ret; +} + +int ras_core_query_block_ecc_data(struct ras_core_context *ras_core, + enum ras_block_id block, struct ras_ecc_count *ecc_count) +{ + int ret; + + if (!ecc_count || (block >= RAS_BLOCK_ID__LAST) || !ras_core) + return -EINVAL; + + ret = ras_aca_get_block_ecc_count(ras_core, block, ecc_count); + if (!ret) + ras_aca_clear_block_new_ecc_count(ras_core, block); + + return ret; +} + +int ras_core_set_status(struct ras_core_context *ras_core, bool enable) +{ + ras_core->ras_core_enabled = enable; + + return 0; +} + +bool ras_core_is_enabled(struct ras_core_context *ras_core) +{ + return ras_core->ras_core_enabled; +} + +uint64_t ras_core_get_utc_second_timestamp(struct ras_core_context *ras_core) +{ + if (ras_core && ras_core->sys_fn && + ras_core->sys_fn->get_utc_second_timestamp) + return ras_core->sys_fn->get_utc_second_timestamp(ras_core); + + RAS_DEV_ERR(ras_core->dev, "Failed to get system timestamp!\n"); + return 0; +} + +int ras_core_translate_soc_pa_and_bank(struct ras_core_context *ras_core, + uint64_t *soc_pa, struct umc_bank_addr *bank_addr, bool bank_to_pa) +{ + if (!ras_core || !soc_pa || !bank_addr) + return -EINVAL; + + return ras_umc_translate_soc_pa_and_bank(ras_core, soc_pa, bank_addr, bank_to_pa); +} + +bool ras_core_ras_interrupt_detected(struct ras_core_context *ras_core) +{ + if (ras_core && ras_core->sys_fn && + ras_core->sys_fn->detect_ras_interrupt) + return ras_core->sys_fn->detect_ras_interrupt(ras_core); + + RAS_DEV_ERR(ras_core->dev, "Failed to detect ras interrupt!\n"); + return false; +} + +int ras_core_get_gpu_mem(struct ras_core_context *ras_core, + enum gpu_mem_type mem_type, struct gpu_mem_block *gpu_mem) +{ + if (ras_core->sys_fn && ras_core->sys_fn->get_gpu_mem) + return ras_core->sys_fn->get_gpu_mem(ras_core, mem_type, gpu_mem); + + RAS_DEV_ERR(ras_core->dev, "Not config get gpu memory API!\n"); + return -EACCES; +} + +int ras_core_put_gpu_mem(struct ras_core_context *ras_core, + enum gpu_mem_type mem_type, struct gpu_mem_block *gpu_mem) +{ + if (ras_core->sys_fn && ras_core->sys_fn->put_gpu_mem) + return ras_core->sys_fn->put_gpu_mem(ras_core, mem_type, gpu_mem); + + RAS_DEV_ERR(ras_core->dev, "Not config put gpu memory API!!\n"); + return -EACCES; +} + +bool ras_core_is_ready(struct ras_core_context *ras_core) +{ + return ras_core ? ras_core->is_initialized : false; +} + +bool ras_core_check_safety_watermark(struct ras_core_context *ras_core) +{ + return ras_eeprom_check_safety_watermark(ras_core); +} + +int ras_core_down_trylock_gpu_reset_lock(struct ras_core_context *ras_core) +{ + if (ras_core->sys_fn && ras_core->sys_fn->gpu_reset_lock) + return ras_core->sys_fn->gpu_reset_lock(ras_core, true, true); + + return 1; +} + +void ras_core_down_gpu_reset_lock(struct ras_core_context *ras_core) +{ + if (ras_core->sys_fn && ras_core->sys_fn->gpu_reset_lock) + ras_core->sys_fn->gpu_reset_lock(ras_core, true, false); +} + +void ras_core_up_gpu_reset_lock(struct ras_core_context *ras_core) +{ + if (ras_core->sys_fn && ras_core->sys_fn->gpu_reset_lock) + ras_core->sys_fn->gpu_reset_lock(ras_core, false, false); +} + +int ras_core_event_notify(struct ras_core_context *ras_core, + enum ras_notify_event event_id, void *data) +{ + if (ras_core && ras_core->sys_fn && + ras_core->sys_fn->ras_notifier) + return ras_core->sys_fn->ras_notifier(ras_core, event_id, data); + + return -RAS_CORE_NOT_SUPPORTED; +} + +int ras_core_get_device_system_info(struct ras_core_context *ras_core, + struct device_system_info *dev_info) +{ + if (ras_core && ras_core->sys_fn && + ras_core->sys_fn->get_device_system_info) + return ras_core->sys_fn->get_device_system_info(ras_core, dev_info); + + return -RAS_CORE_NOT_SUPPORTED; +} diff --git a/drivers/gpu/drm/amd/ras/rascore/ras_cper.c b/drivers/gpu/drm/amd/ras/rascore/ras_cper.c new file mode 100644 index 000000000000..0fc7522b7ab6 --- /dev/null +++ b/drivers/gpu/drm/amd/ras/rascore/ras_cper.c @@ -0,0 +1,315 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright 2025 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#include "ras.h" +#include "ras_core_status.h" +#include "ras_log_ring.h" +#include "ras_cper.h" + +static const struct ras_cper_guid MCE = CPER_NOTIFY__MCE; +static const struct ras_cper_guid CMC = CPER_NOTIFY__CMC; +static const struct ras_cper_guid BOOT = BOOT__TYPE; + +static const struct ras_cper_guid CRASHDUMP = GPU__CRASHDUMP; +static const struct ras_cper_guid RUNTIME = GPU__NONSTANDARD_ERROR; + +static void cper_get_timestamp(struct ras_core_context *ras_core, + struct ras_cper_timestamp *timestamp, uint64_t utc_second_timestamp) +{ + struct ras_time tm = {0}; + + ras_core_convert_timestamp_to_time(ras_core, utc_second_timestamp, &tm); + timestamp->seconds = tm.tm_sec; + timestamp->minutes = tm.tm_min; + timestamp->hours = tm.tm_hour; + timestamp->flag = 0; + timestamp->day = tm.tm_mday; + timestamp->month = tm.tm_mon; + timestamp->year = tm.tm_year % 100; + timestamp->century = tm.tm_year / 100; +} + +static void fill_section_hdr(struct ras_core_context *ras_core, + struct cper_section_hdr *hdr, enum ras_cper_type type, + enum ras_cper_severity sev, struct ras_log_info *trace) +{ + struct device_system_info dev_info = {0}; + char record_id[32]; + + hdr->signature[0] = 'C'; + hdr->signature[1] = 'P'; + hdr->signature[2] = 'E'; + hdr->signature[3] = 'R'; + hdr->revision = CPER_HDR__REV_1; + hdr->signature_end = 0xFFFFFFFF; + hdr->error_severity = (sev == RAS_CPER_SEV_RMA ? RAS_CPER_SEV_FATAL_UE : sev); + + hdr->valid_bits.platform_id = 1; + hdr->valid_bits.timestamp = 1; + + ras_core_get_device_system_info(ras_core, &dev_info); + + cper_get_timestamp(ras_core, &hdr->timestamp, trace->timestamp); + + snprintf(record_id, sizeof(record_id), "%d:%llX", dev_info.socket_id, + RAS_LOG_SEQNO_TO_BATCH_IDX(trace->seqno)); + memcpy(hdr->record_id, record_id, 8); + + snprintf(hdr->platform_id, 16, "0x%04X:0x%04X", + dev_info.vendor_id, dev_info.device_id); + /* pmfw version should be part of creator_id according to CPER spec */ + snprintf(hdr->creator_id, 16, "%s", CPER_CREATOR_ID__AMDGPU); + + switch (type) { + case RAS_CPER_TYPE_BOOT: + hdr->notify_type = BOOT; + break; + case RAS_CPER_TYPE_FATAL: + case RAS_CPER_TYPE_RMA: + hdr->notify_type = MCE; + break; + case RAS_CPER_TYPE_RUNTIME: + if (sev == RAS_CPER_SEV_NON_FATAL_CE) + hdr->notify_type = CMC; + else + hdr->notify_type = MCE; + break; + default: + RAS_DEV_ERR(ras_core->dev, "Unknown CPER Type\n"); + break; + } +} + +static int fill_section_descriptor(struct ras_core_context *ras_core, + struct cper_section_descriptor *descriptor, + enum ras_cper_severity sev, + struct ras_cper_guid sec_type, + uint32_t section_offset, + uint32_t section_length) +{ + struct device_system_info dev_info = {0}; + + descriptor->revision_minor = CPER_SEC__MINOR_REV_1; + descriptor->revision_major = CPER_SEC__MAJOR_REV_22; + descriptor->sec_offset = section_offset; + descriptor->sec_length = section_length; + descriptor->valid_bits.fru_text = 1; + descriptor->flag_bits.primary = 1; + descriptor->severity = (sev == RAS_CPER_SEV_RMA ? RAS_CPER_SEV_FATAL_UE : sev); + descriptor->sec_type = sec_type; + + ras_core_get_device_system_info(ras_core, &dev_info); + + snprintf(descriptor->fru_text, 20, "OAM%d", dev_info.socket_id); + + if (sev == RAS_CPER_SEV_RMA) + descriptor->flag_bits.exceed_err_threshold = 1; + + if (sev == RAS_CPER_SEV_NON_FATAL_UE) + descriptor->flag_bits.latent_err = 1; + + return 0; +} + +static int fill_section_fatal(struct ras_core_context *ras_core, + struct cper_section_fatal *fatal, struct ras_log_info *trace) +{ + fatal->data.reg_ctx_type = CPER_CTX_TYPE__CRASH; + fatal->data.reg_arr_size = sizeof(fatal->data.reg); + + fatal->data.reg.status = trace->aca_reg.regs[RAS_CPER_ACA_REG_STATUS]; + fatal->data.reg.addr = trace->aca_reg.regs[RAS_CPER_ACA_REG_ADDR]; + fatal->data.reg.ipid = trace->aca_reg.regs[RAS_CPER_ACA_REG_IPID]; + fatal->data.reg.synd = trace->aca_reg.regs[RAS_CPER_ACA_REG_SYND]; + + return 0; +} + +static int fill_section_runtime(struct ras_core_context *ras_core, + struct cper_section_runtime *runtime, struct ras_log_info *trace, + enum ras_cper_severity sev) +{ + runtime->hdr.valid_bits.err_info_cnt = 1; + runtime->hdr.valid_bits.err_context_cnt = 1; + + runtime->descriptor.error_type = RUNTIME; + runtime->descriptor.ms_chk_bits.err_type_valid = 1; + if (sev == RAS_CPER_SEV_RMA) { + runtime->descriptor.valid_bits.ms_chk = 1; + runtime->descriptor.ms_chk_bits.err_type = 1; + runtime->descriptor.ms_chk_bits.pcc = 1; + } + + runtime->reg.reg_ctx_type = CPER_CTX_TYPE__CRASH; + runtime->reg.reg_arr_size = sizeof(runtime->reg.reg_dump); + + runtime->reg.reg_dump[RAS_CPER_ACA_REG_CTL] = trace->aca_reg.regs[ACA_REG_IDX__CTL]; + runtime->reg.reg_dump[RAS_CPER_ACA_REG_STATUS] = trace->aca_reg.regs[ACA_REG_IDX__STATUS]; + runtime->reg.reg_dump[RAS_CPER_ACA_REG_ADDR] = trace->aca_reg.regs[ACA_REG_IDX__ADDR]; + runtime->reg.reg_dump[RAS_CPER_ACA_REG_MISC0] = trace->aca_reg.regs[ACA_REG_IDX__MISC0]; + runtime->reg.reg_dump[RAS_CPER_ACA_REG_CONFIG] = trace->aca_reg.regs[ACA_REG_IDX__CONFG]; + runtime->reg.reg_dump[RAS_CPER_ACA_REG_IPID] = trace->aca_reg.regs[ACA_REG_IDX__IPID]; + runtime->reg.reg_dump[RAS_CPER_ACA_REG_SYND] = trace->aca_reg.regs[ACA_REG_IDX__SYND]; + + return 0; +} + +static int cper_generate_runtime_record(struct ras_core_context *ras_core, + struct cper_section_hdr *hdr, struct ras_log_info **trace_arr, uint32_t arr_num, + enum ras_cper_severity sev) +{ + struct cper_section_descriptor *descriptor; + struct cper_section_runtime *runtime; + int i; + + fill_section_hdr(ras_core, hdr, RAS_CPER_TYPE_RUNTIME, sev, trace_arr[0]); + hdr->record_length = RAS_HDR_LEN + ((RAS_SEC_DESC_LEN + RAS_NONSTD_SEC_LEN) * arr_num); + hdr->sec_cnt = arr_num; + for (i = 0; i < arr_num; i++) { + descriptor = (struct cper_section_descriptor *)((uint8_t *)hdr + + RAS_SEC_DESC_OFFSET(i)); + runtime = (struct cper_section_runtime *)((uint8_t *)hdr + + RAS_NONSTD_SEC_OFFSET(hdr->sec_cnt, i)); + + fill_section_descriptor(ras_core, descriptor, sev, RUNTIME, + RAS_NONSTD_SEC_OFFSET(hdr->sec_cnt, i), + sizeof(struct cper_section_runtime)); + fill_section_runtime(ras_core, runtime, trace_arr[i], sev); + } + + return 0; +} + +static int cper_generate_fatal_record(struct ras_core_context *ras_core, + uint8_t *buffer, struct ras_log_info **trace_arr, uint32_t arr_num) +{ + struct ras_cper_fatal_record record = {0}; + int i = 0; + + for (i = 0; i < arr_num; i++) { + fill_section_hdr(ras_core, &record.hdr, RAS_CPER_TYPE_FATAL, + RAS_CPER_SEV_FATAL_UE, trace_arr[i]); + record.hdr.record_length = RAS_HDR_LEN + RAS_SEC_DESC_LEN + RAS_FATAL_SEC_LEN; + record.hdr.sec_cnt = 1; + + fill_section_descriptor(ras_core, &record.descriptor, RAS_CPER_SEV_FATAL_UE, + CRASHDUMP, offsetof(struct ras_cper_fatal_record, fatal), + sizeof(struct cper_section_fatal)); + + fill_section_fatal(ras_core, &record.fatal, trace_arr[i]); + + memcpy(buffer + (i * record.hdr.record_length), + &record, record.hdr.record_length); + } + + return 0; +} + +static int cper_get_record_size(enum ras_cper_type type, uint16_t section_count) +{ + int size = 0; + + size += RAS_HDR_LEN; + size += (RAS_SEC_DESC_LEN * section_count); + + switch (type) { + case RAS_CPER_TYPE_RUNTIME: + case RAS_CPER_TYPE_RMA: + size += (RAS_NONSTD_SEC_LEN * section_count); + break; + case RAS_CPER_TYPE_FATAL: + size += (RAS_FATAL_SEC_LEN * section_count); + size += (RAS_HDR_LEN * (section_count - 1)); + break; + case RAS_CPER_TYPE_BOOT: + size += (RAS_BOOT_SEC_LEN * section_count); + break; + default: + /* should never reach here */ + break; + } + + return size; +} + +static enum ras_cper_type cper_ras_log_event_to_cper_type(enum ras_log_event event) +{ + switch (event) { + case RAS_LOG_EVENT_UE: + return RAS_CPER_TYPE_FATAL; + case RAS_LOG_EVENT_DE: + case RAS_LOG_EVENT_CE: + case RAS_LOG_EVENT_POISON_CREATION: + case RAS_LOG_EVENT_POISON_CONSUMPTION: + return RAS_CPER_TYPE_RUNTIME; + case RAS_LOG_EVENT_RMA: + return RAS_CPER_TYPE_RMA; + default: + /* should never reach here */ + return RAS_CPER_TYPE_RUNTIME; + } +} + +int ras_cper_generate_cper(struct ras_core_context *ras_core, + struct ras_log_info **trace_list, uint32_t count, + uint8_t *buf, uint32_t buf_len, uint32_t *real_data_len) +{ + uint8_t *buffer = buf; + uint64_t buf_size = buf_len; + int record_size, saved_size = 0; + struct cper_section_hdr *hdr; + + /* All the batch traces share the same event */ + record_size = cper_get_record_size( + cper_ras_log_event_to_cper_type(trace_list[0]->event), count); + + if ((record_size + saved_size) > buf_size) + return -ENOMEM; + + hdr = (struct cper_section_hdr *)(buffer + saved_size); + + switch (trace_list[0]->event) { + case RAS_LOG_EVENT_RMA: + cper_generate_runtime_record(ras_core, hdr, trace_list, count, RAS_CPER_SEV_RMA); + break; + case RAS_LOG_EVENT_DE: + cper_generate_runtime_record(ras_core, + hdr, trace_list, count, RAS_CPER_SEV_NON_FATAL_UE); + break; + case RAS_LOG_EVENT_CE: + cper_generate_runtime_record(ras_core, + hdr, trace_list, count, RAS_CPER_SEV_NON_FATAL_CE); + break; + case RAS_LOG_EVENT_UE: + cper_generate_fatal_record(ras_core, buffer + saved_size, trace_list, count); + break; + default: + RAS_DEV_WARN(ras_core->dev, "Unprocessed trace event: %d\n", trace_list[0]->event); + break; + } + + saved_size += record_size; + + *real_data_len = saved_size; + return 0; +} diff --git a/drivers/gpu/drm/amd/ras/rascore/ras_cper.h b/drivers/gpu/drm/amd/ras/rascore/ras_cper.h new file mode 100644 index 000000000000..076c1883c1ce --- /dev/null +++ b/drivers/gpu/drm/amd/ras/rascore/ras_cper.h @@ -0,0 +1,304 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright 2025 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#ifndef __RAS_CPER_H__ +#define __RAS_CPER_H__ + +#define CPER_UUID_MAX_SIZE 16 +struct ras_cper_guid { + uint8_t b[CPER_UUID_MAX_SIZE]; +}; + +#define CPER_GUID__INIT(a, b, c, d0, d1, d2, d3, d4, d5, d6, d7) \ + ((struct ras_cper_guid) \ + {{ (a) & 0xff, ((a) >> 8) & 0xff, ((a) >> 16) & 0xff, ((a) >> 24) & 0xff, \ + (b) & 0xff, ((b) >> 8) & 0xff, \ + (c) & 0xff, ((c) >> 8) & 0xff, \ + (d0), (d1), (d2), (d3), (d4), (d5), (d6), (d7) }}) + +#define CPER_HDR__REV_1 (0x100) +#define CPER_SEC__MINOR_REV_1 (0x01) +#define CPER_SEC__MAJOR_REV_22 (0x22) +#define CPER_OAM_MAX_COUNT (8) + +#define CPER_CTX_TYPE__CRASH (1) +#define CPER_CTX_TYPE__BOOT (9) + +#define CPER_CREATOR_ID__AMDGPU "amdgpu" + +#define CPER_NOTIFY__MCE \ + CPER_GUID__INIT(0xE8F56FFE, 0x919C, 0x4cc5, 0xBA, 0x88, 0x65, 0xAB, \ + 0xE1, 0x49, 0x13, 0xBB) +#define CPER_NOTIFY__CMC \ + CPER_GUID__INIT(0x2DCE8BB1, 0xBDD7, 0x450e, 0xB9, 0xAD, 0x9C, 0xF4, \ + 0xEB, 0xD4, 0xF8, 0x90) +#define BOOT__TYPE \ + CPER_GUID__INIT(0x3D61A466, 0xAB40, 0x409a, 0xA6, 0x98, 0xF3, 0x62, \ + 0xD4, 0x64, 0xB3, 0x8F) + +#define GPU__CRASHDUMP \ + CPER_GUID__INIT(0x32AC0C78, 0x2623, 0x48F6, 0xB0, 0xD0, 0x73, 0x65, \ + 0x72, 0x5F, 0xD6, 0xAE) +#define GPU__NONSTANDARD_ERROR \ + CPER_GUID__INIT(0x32AC0C78, 0x2623, 0x48F6, 0x81, 0xA2, 0xAC, 0x69, \ + 0x17, 0x80, 0x55, 0x1D) +#define PROC_ERR__SECTION_TYPE \ + CPER_GUID__INIT(0xDC3EA0B0, 0xA144, 0x4797, 0xB9, 0x5B, 0x53, 0xFA, \ + 0x24, 0x2B, 0x6E, 0x1D) + +enum ras_cper_type { + RAS_CPER_TYPE_RUNTIME, + RAS_CPER_TYPE_FATAL, + RAS_CPER_TYPE_BOOT, + RAS_CPER_TYPE_RMA, +}; + +enum ras_cper_severity { + RAS_CPER_SEV_NON_FATAL_UE = 0, + RAS_CPER_SEV_FATAL_UE = 1, + RAS_CPER_SEV_NON_FATAL_CE = 2, + RAS_CPER_SEV_RMA = 3, + + RAS_CPER_SEV_UNUSED = 10, +}; + +enum ras_cper_aca_reg { + RAS_CPER_ACA_REG_CTL = 0, + RAS_CPER_ACA_REG_STATUS = 1, + RAS_CPER_ACA_REG_ADDR = 2, + RAS_CPER_ACA_REG_MISC0 = 3, + RAS_CPER_ACA_REG_CONFIG = 4, + RAS_CPER_ACA_REG_IPID = 5, + RAS_CPER_ACA_REG_SYND = 6, + RAS_CPER_ACA_REG_DESTAT = 8, + RAS_CPER_ACA_REG_DEADDR = 9, + RAS_CPER_ACA_REG_MASK = 10, + + RAS_CPER_ACA_REG_COUNT = 16, +}; + +#pragma pack(push, 1) + +struct ras_cper_timestamp { + uint8_t seconds; + uint8_t minutes; + uint8_t hours; + uint8_t flag; + uint8_t day; + uint8_t month; + uint8_t year; + uint8_t century; +}; + +struct cper_section_hdr { + char signature[4]; /* "CPER" */ + uint16_t revision; + uint32_t signature_end; /* 0xFFFFFFFF */ + uint16_t sec_cnt; + enum ras_cper_severity error_severity; + union { + struct { + uint32_t platform_id : 1; + uint32_t timestamp : 1; + uint32_t partition_id : 1; + uint32_t reserved : 29; + } valid_bits; + uint32_t valid_mask; + }; + uint32_t record_length; /* Total size of CPER Entry */ + struct ras_cper_timestamp timestamp; + char platform_id[16]; + struct ras_cper_guid partition_id; /* Reserved */ + char creator_id[16]; + struct ras_cper_guid notify_type; /* CMC, MCE */ + char record_id[8]; /* Unique CPER Entry ID */ + uint32_t flags; /* Reserved */ + uint64_t persistence_info; /* Reserved */ + uint8_t reserved[12]; /* Reserved */ +}; + +struct cper_section_descriptor { + uint32_t sec_offset; /* Offset from the start of CPER entry */ + uint32_t sec_length; + uint8_t revision_minor; /* CPER_SEC_MINOR_REV_1 */ + uint8_t revision_major; /* CPER_SEC_MAJOR_REV_22 */ + union { + struct { + uint8_t fru_id : 1; + uint8_t fru_text : 1; + uint8_t reserved : 6; + } valid_bits; + uint8_t valid_mask; + }; + uint8_t reserved; + union { + struct { + uint32_t primary : 1; + uint32_t reserved1 : 2; + uint32_t exceed_err_threshold : 1; + uint32_t latent_err : 1; + uint32_t reserved2 : 27; + } flag_bits; + uint32_t flag_mask; + }; + struct ras_cper_guid sec_type; + char fru_id[16]; + enum ras_cper_severity severity; + char fru_text[20]; +}; + +struct runtime_hdr { + union { + struct { + uint64_t apic_id : 1; + uint64_t fw_id : 1; + uint64_t err_info_cnt : 6; + uint64_t err_context_cnt : 6; + } valid_bits; + uint64_t valid_mask; + }; + uint64_t apic_id; + char fw_id[48]; +}; + +struct runtime_descriptor { + struct ras_cper_guid error_type; + union { + struct { + uint64_t ms_chk : 1; + uint64_t target_addr_id : 1; + uint64_t req_id : 1; + uint64_t resp_id : 1; + uint64_t instr_ptr : 1; + uint64_t reserved : 59; + } valid_bits; + uint64_t valid_mask; + }; + union { + struct { + uint64_t err_type_valid : 1; + uint64_t pcc_valid : 1; + uint64_t uncorr_valid : 1; + uint64_t precise_ip_valid : 1; + uint64_t restartable_ip_valid : 1; + uint64_t overflow_valid : 1; + uint64_t reserved1 : 10; + uint64_t err_type : 2; + uint64_t pcc : 1; + uint64_t uncorr : 1; + uint64_t precised_ip : 1; + uint64_t restartable_ip : 1; + uint64_t overflow : 1; + uint64_t reserved2 : 41; + } ms_chk_bits; + uint64_t ms_chk_mask; + }; + uint64_t target_addr_id; + uint64_t req_id; + uint64_t resp_id; + uint64_t instr_ptr; +}; + +struct runtime_error_reg { + uint16_t reg_ctx_type; + uint16_t reg_arr_size; + uint32_t msr_addr; + uint64_t mm_reg_addr; + uint64_t reg_dump[RAS_CPER_ACA_REG_COUNT]; +}; + +struct cper_section_runtime { + struct runtime_hdr hdr; + struct runtime_descriptor descriptor; + struct runtime_error_reg reg; +}; + +struct crashdump_hdr { + uint64_t reserved1; + uint64_t reserved2; + char fw_id[48]; + uint64_t reserved3[8]; +}; + +struct fatal_reg_info { + uint64_t status; + uint64_t addr; + uint64_t ipid; + uint64_t synd; +}; + +struct crashdump_fatal { + uint16_t reg_ctx_type; + uint16_t reg_arr_size; + uint32_t reserved1; + uint64_t reserved2; + struct fatal_reg_info reg; +}; + +struct crashdump_boot { + uint16_t reg_ctx_type; + uint16_t reg_arr_size; + uint32_t reserved1; + uint64_t reserved2; + uint64_t msg[CPER_OAM_MAX_COUNT]; +}; + +struct cper_section_fatal { + struct crashdump_hdr hdr; + struct crashdump_fatal data; +}; + +struct cper_section_boot { + struct crashdump_hdr hdr; + struct crashdump_boot data; +}; + +struct ras_cper_fatal_record { + struct cper_section_hdr hdr; + struct cper_section_descriptor descriptor; + struct cper_section_fatal fatal; +}; +#pragma pack(pop) + +#define RAS_HDR_LEN (sizeof(struct cper_section_hdr)) +#define RAS_SEC_DESC_LEN (sizeof(struct cper_sec_desc)) + +#define RAS_BOOT_SEC_LEN (sizeof(struct cper_sec_crashdump_boot)) +#define RAS_FATAL_SEC_LEN (sizeof(struct cper_sec_crashdump_fatal)) +#define RAS_NONSTD_SEC_LEN (sizeof(struct cper_sec_nonstd_err)) + +#define RAS_SEC_DESC_OFFSET(idx) (RAS_HDR_LEN + (RAS_SEC_DESC_LEN * idx)) + +#define RAS_BOOT_SEC_OFFSET(count, idx) \ + (RAS_HDR_LEN + (RAS_SEC_DESC_LEN * count) + (RAS_BOOT_SEC_LEN * idx)) +#define RAS_FATAL_SEC_OFFSET(count, idx) \ + (RAS_HDR_LEN + (RAS_SEC_DESC_LEN * count) + (RAS_FATAL_SEC_LEN * idx)) +#define RAS_NONSTD_SEC_OFFSET(count, idx) \ + (RAS_HDR_LEN + (RAS_SEC_DESC_LEN * count) + (RAS_NONSTD_SEC_LEN * idx)) + +struct ras_core_context; +struct ras_log_info; +int ras_cper_generate_cper(struct ras_core_context *ras_core, + struct ras_log_info **trace_list, uint32_t count, + uint8_t *buf, uint32_t buf_len, uint32_t *real_data_len); +#endif diff --git a/drivers/gpu/drm/amd/ras/rascore/ras_eeprom.c b/drivers/gpu/drm/amd/ras/rascore/ras_eeprom.c new file mode 100644 index 000000000000..cd6b057bdaf3 --- /dev/null +++ b/drivers/gpu/drm/amd/ras/rascore/ras_eeprom.c @@ -0,0 +1,1339 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright 2025 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#include "ras_eeprom.h" +#include "ras.h" + +/* These are memory addresses as would be seen by one or more EEPROM + * chips strung on the I2C bus, usually by manipulating pins 1-3 of a + * set of EEPROM devices. They form a continuous memory space. + * + * The I2C device address includes the device type identifier, 1010b, + * which is a reserved value and indicates that this is an I2C EEPROM + * device. It also includes the top 3 bits of the 19 bit EEPROM memory + * address, namely bits 18, 17, and 16. This makes up the 7 bit + * address sent on the I2C bus with bit 0 being the direction bit, + * which is not represented here, and sent by the hardware directly. + * + * For instance, + * 50h = 1010000b => device type identifier 1010b, bits 18:16 = 000b, address 0. + * 54h = 1010100b => --"--, bits 18:16 = 100b, address 40000h. + * 56h = 1010110b => --"--, bits 18:16 = 110b, address 60000h. + * Depending on the size of the I2C EEPROM device(s), bits 18:16 may + * address memory in a device or a device on the I2C bus, depending on + * the status of pins 1-3. + * + * The RAS table lives either at address 0 or address 40000h of EEPROM. + */ +#define EEPROM_I2C_MADDR_0 0x0 +#define EEPROM_I2C_MADDR_4 0x40000 + +#define EEPROM_PAGE_BITS 8 +#define EEPROM_PAGE_SIZE (1U << EEPROM_PAGE_BITS) +#define EEPROM_PAGE_MASK (EEPROM_PAGE_SIZE - 1) + +#define EEPROM_OFFSET_SIZE 2 +#define MAKE_I2C_ADDR(_aa) ((0xA << 3) | (((_aa) >> 16) & 0xF)) + +/* + * The 2 macros bellow represent the actual size in bytes that + * those entities occupy in the EEPROM memory. + * RAS_TABLE_RECORD_SIZE is different than sizeof(eeprom_umc_record) which + * uses uint64 to store 6b fields such as retired_page. + */ +#define RAS_TABLE_HEADER_SIZE 20 +#define RAS_TABLE_RECORD_SIZE 24 + +/* Table hdr is 'AMDR' */ +#define RAS_TABLE_HDR_VAL 0x414d4452 + +/* Bad GPU tag ‘BADG’ */ +#define RAS_TABLE_HDR_BAD 0x42414447 + +/* + * EEPROM Table structure v1 + * --------------------------------- + * | | + * | EEPROM TABLE HEADER | + * | ( size 20 Bytes ) | + * | | + * --------------------------------- + * | | + * | BAD PAGE RECORD AREA | + * | | + * --------------------------------- + */ + +/* Assume 2-Mbit size EEPROM and take up the whole space. */ +#define RAS_TBL_SIZE_BYTES (256 * 1024) +#define RAS_TABLE_START 0 +#define RAS_HDR_START RAS_TABLE_START +#define RAS_RECORD_START (RAS_HDR_START + RAS_TABLE_HEADER_SIZE) +#define RAS_MAX_RECORD_COUNT ((RAS_TBL_SIZE_BYTES - RAS_TABLE_HEADER_SIZE) \ + / RAS_TABLE_RECORD_SIZE) + +/* + * EEPROM Table structrue v2.1 + * --------------------------------- + * | | + * | EEPROM TABLE HEADER | + * | ( size 20 Bytes ) | + * | | + * --------------------------------- + * | | + * | EEPROM TABLE RAS INFO | + * | (available info size 4 Bytes) | + * | ( reserved size 252 Bytes ) | + * | | + * --------------------------------- + * | | + * | BAD PAGE RECORD AREA | + * | | + * --------------------------------- + */ + +/* EEPROM Table V2_1 */ +#define RAS_TABLE_V2_1_INFO_SIZE 256 +#define RAS_TABLE_V2_1_INFO_START RAS_TABLE_HEADER_SIZE +#define RAS_RECORD_START_V2_1 (RAS_HDR_START + RAS_TABLE_HEADER_SIZE + \ + RAS_TABLE_V2_1_INFO_SIZE) +#define RAS_MAX_RECORD_COUNT_V2_1 ((RAS_TBL_SIZE_BYTES - RAS_TABLE_HEADER_SIZE - \ + RAS_TABLE_V2_1_INFO_SIZE) \ + / RAS_TABLE_RECORD_SIZE) + +/* Given a zero-based index of an EEPROM RAS record, yields the EEPROM + * offset off of RAS_TABLE_START. That is, this is something you can + * add to control->i2c_address, and then tell I2C layer to read + * from/write to there. _N is the so called absolute index, + * because it starts right after the table header. + */ +#define RAS_INDEX_TO_OFFSET(_C, _N) ((_C)->ras_record_offset + \ + (_N) * RAS_TABLE_RECORD_SIZE) + +#define RAS_OFFSET_TO_INDEX(_C, _O) (((_O) - \ + (_C)->ras_record_offset) / RAS_TABLE_RECORD_SIZE) + +/* Given a 0-based relative record index, 0, 1, 2, ..., etc., off + * of "fri", return the absolute record index off of the end of + * the table header. + */ +#define RAS_RI_TO_AI(_C, _I) (((_I) + (_C)->ras_fri) % \ + (_C)->ras_max_record_count) + +#define RAS_NUM_RECS(_tbl_hdr) (((_tbl_hdr)->tbl_size - \ + RAS_TABLE_HEADER_SIZE) / RAS_TABLE_RECORD_SIZE) + +#define RAS_NUM_RECS_V2_1(_tbl_hdr) (((_tbl_hdr)->tbl_size - \ + RAS_TABLE_HEADER_SIZE - \ + RAS_TABLE_V2_1_INFO_SIZE) / RAS_TABLE_RECORD_SIZE) + +#define to_ras_core_context(x) (container_of(x, struct ras_core_context, ras_eeprom)) + +static bool __is_ras_eeprom_supported(struct ras_core_context *ras_core) +{ + return ras_core->ras_eeprom_supported; +} + +static bool __get_eeprom_i2c_addr(struct ras_core_context *ras_core, + struct ras_eeprom_control *control) +{ + int ret = -EINVAL; + + if (control->sys_func && + control->sys_func->update_eeprom_i2c_config) + ret = control->sys_func->update_eeprom_i2c_config(ras_core); + else + RAS_DEV_WARN(ras_core->dev, + "No eeprom i2c system config!\n"); + + return !ret ? true : false; +} + +static int __ras_eeprom_xfer(struct ras_core_context *ras_core, u32 eeprom_addr, + u8 *eeprom_buf, u32 buf_size, bool read) +{ + struct ras_eeprom_control *control = &ras_core->ras_eeprom; + int ret; + + if (control->sys_func && control->sys_func->eeprom_i2c_xfer) { + ret = control->sys_func->eeprom_i2c_xfer(ras_core, + eeprom_addr, eeprom_buf, buf_size, read); + + if ((ret > 0) && !read) { + /* According to EEPROM specs the length of the + * self-writing cycle, tWR (tW), is 10 ms. + * + * TODO: Use polling on ACK, aka Acknowledge + * Polling, to minimize waiting for the + * internal write cycle to complete, as it is + * usually smaller than tWR (tW). + */ + msleep(10); + } + + return ret; + } + + RAS_DEV_ERR(ras_core->dev, "Error: No eeprom i2c system xfer function!\n"); + return -EINVAL; +} + +static int __eeprom_xfer(struct ras_core_context *ras_core, u32 eeprom_addr, + u8 *eeprom_buf, u32 buf_size, bool read) +{ + u16 limit; + u16 ps; /* Partial size */ + int res = 0, r; + + if (read) + limit = ras_core->ras_eeprom.max_read_len; + else + limit = ras_core->ras_eeprom.max_write_len; + + if (limit && (limit <= EEPROM_OFFSET_SIZE)) { + RAS_DEV_ERR(ras_core->dev, + "maddr:0x%04X size:0x%02X:quirk max_%s_len must be > %d", + eeprom_addr, buf_size, + read ? "read" : "write", EEPROM_OFFSET_SIZE); + return -EINVAL; + } + + ras_core_down_gpu_reset_lock(ras_core); + + if (limit == 0) { + res = __ras_eeprom_xfer(ras_core, eeprom_addr, + eeprom_buf, buf_size, read); + } else { + /* The "limit" includes all data bytes sent/received, + * which would include the EEPROM_OFFSET_SIZE bytes. + * Account for them here. + */ + limit -= EEPROM_OFFSET_SIZE; + for ( ; buf_size > 0; + buf_size -= ps, eeprom_addr += ps, eeprom_buf += ps) { + ps = (buf_size < limit) ? buf_size : limit; + + r = __ras_eeprom_xfer(ras_core, eeprom_addr, + eeprom_buf, ps, read); + if (r < 0) + break; + + res += r; + } + } + + ras_core_up_gpu_reset_lock(ras_core); + + return res; +} + +static int __eeprom_read(struct ras_core_context *ras_core, + u32 eeprom_addr, u8 *eeprom_buf, u32 bytes) +{ + return __eeprom_xfer(ras_core, eeprom_addr, + eeprom_buf, bytes, true); +} + +static int __eeprom_write(struct ras_core_context *ras_core, + u32 eeprom_addr, u8 *eeprom_buf, u32 bytes) +{ + return __eeprom_xfer(ras_core, eeprom_addr, + eeprom_buf, bytes, false); +} + +static void +__encode_table_header_to_buf(struct ras_eeprom_table_header *hdr, + unsigned char *buf) +{ + u32 *pp = (uint32_t *)buf; + + pp[0] = cpu_to_le32(hdr->header); + pp[1] = cpu_to_le32(hdr->version); + pp[2] = cpu_to_le32(hdr->first_rec_offset); + pp[3] = cpu_to_le32(hdr->tbl_size); + pp[4] = cpu_to_le32(hdr->checksum); +} + +static void +__decode_table_header_from_buf(struct ras_eeprom_table_header *hdr, + unsigned char *buf) +{ + u32 *pp = (uint32_t *)buf; + + hdr->header = le32_to_cpu(pp[0]); + hdr->version = le32_to_cpu(pp[1]); + hdr->first_rec_offset = le32_to_cpu(pp[2]); + hdr->tbl_size = le32_to_cpu(pp[3]); + hdr->checksum = le32_to_cpu(pp[4]); +} + +static int __write_table_header(struct ras_eeprom_control *control) +{ + u8 buf[RAS_TABLE_HEADER_SIZE]; + struct ras_core_context *ras_core = to_ras_core_context(control); + int res; + + memset(buf, 0, sizeof(buf)); + __encode_table_header_to_buf(&control->tbl_hdr, buf); + + /* i2c may be unstable in gpu reset */ + res = __eeprom_write(ras_core, + control->i2c_address + + control->ras_header_offset, + buf, RAS_TABLE_HEADER_SIZE); + + if (res < 0) { + RAS_DEV_ERR(ras_core->dev, + "Failed to write EEPROM table header:%d\n", res); + } else if (res < RAS_TABLE_HEADER_SIZE) { + RAS_DEV_ERR(ras_core->dev, + "Short write:%d out of %d\n", res, RAS_TABLE_HEADER_SIZE); + res = -EIO; + } else { + res = 0; + } + + return res; +} + +static void +__encode_table_ras_info_to_buf(struct ras_eeprom_table_ras_info *rai, + unsigned char *buf) +{ + u32 *pp = (uint32_t *)buf; + u32 tmp; + + tmp = ((uint32_t)(rai->rma_status) & 0xFF) | + (((uint32_t)(rai->health_percent) << 8) & 0xFF00) | + (((uint32_t)(rai->ecc_page_threshold) << 16) & 0xFFFF0000); + pp[0] = cpu_to_le32(tmp); +} + +static void +__decode_table_ras_info_from_buf(struct ras_eeprom_table_ras_info *rai, + unsigned char *buf) +{ + u32 *pp = (uint32_t *)buf; + u32 tmp; + + tmp = le32_to_cpu(pp[0]); + rai->rma_status = tmp & 0xFF; + rai->health_percent = (tmp >> 8) & 0xFF; + rai->ecc_page_threshold = (tmp >> 16) & 0xFFFF; +} + +static int __write_table_ras_info(struct ras_eeprom_control *control) +{ + struct ras_core_context *ras_core = to_ras_core_context(control); + u8 *buf; + int res; + + buf = kzalloc(RAS_TABLE_V2_1_INFO_SIZE, GFP_KERNEL); + if (!buf) { + RAS_DEV_ERR(ras_core->dev, + "Failed to alloc buf to write table ras info\n"); + return -ENOMEM; + } + + __encode_table_ras_info_to_buf(&control->tbl_rai, buf); + + /* i2c may be unstable in gpu reset */ + res = __eeprom_write(ras_core, + control->i2c_address + + control->ras_info_offset, + buf, RAS_TABLE_V2_1_INFO_SIZE); + + if (res < 0) { + RAS_DEV_ERR(ras_core->dev, + "Failed to write EEPROM table ras info:%d\n", res); + } else if (res < RAS_TABLE_V2_1_INFO_SIZE) { + RAS_DEV_ERR(ras_core->dev, + "Short write:%d out of %d\n", res, RAS_TABLE_V2_1_INFO_SIZE); + res = -EIO; + } else { + res = 0; + } + + kfree(buf); + + return res; +} + +static u8 __calc_hdr_byte_sum(const struct ras_eeprom_control *control) +{ + int ii; + u8 *pp, csum; + u32 sz; + + /* Header checksum, skip checksum field in the calculation */ + sz = sizeof(control->tbl_hdr) - sizeof(control->tbl_hdr.checksum); + pp = (u8 *) &control->tbl_hdr; + csum = 0; + for (ii = 0; ii < sz; ii++, pp++) + csum += *pp; + + return csum; +} + +static u8 __calc_ras_info_byte_sum(const struct ras_eeprom_control *control) +{ + int ii; + u8 *pp, csum; + u32 sz; + + sz = sizeof(control->tbl_rai); + pp = (u8 *) &control->tbl_rai; + csum = 0; + for (ii = 0; ii < sz; ii++, pp++) + csum += *pp; + + return csum; +} + +static int ras_eeprom_correct_header_tag( + struct ras_eeprom_control *control, + uint32_t header) +{ + struct ras_eeprom_table_header *hdr = &control->tbl_hdr; + u8 *hh; + int res; + u8 csum; + + csum = -hdr->checksum; + + hh = (void *) &hdr->header; + csum -= (hh[0] + hh[1] + hh[2] + hh[3]); + hh = (void *) &header; + csum += hh[0] + hh[1] + hh[2] + hh[3]; + csum = -csum; + mutex_lock(&control->ras_tbl_mutex); + hdr->header = header; + hdr->checksum = csum; + res = __write_table_header(control); + mutex_unlock(&control->ras_tbl_mutex); + + return res; +} + +static void ras_set_eeprom_table_version(struct ras_eeprom_control *control) +{ + struct ras_eeprom_table_header *hdr = &control->tbl_hdr; + + hdr->version = RAS_TABLE_VER_V3; +} + +int ras_eeprom_reset_table(struct ras_core_context *ras_core) +{ + struct ras_eeprom_control *control = &ras_core->ras_eeprom; + struct ras_eeprom_table_header *hdr = &control->tbl_hdr; + struct ras_eeprom_table_ras_info *rai = &control->tbl_rai; + u8 csum; + int res; + + mutex_lock(&control->ras_tbl_mutex); + + hdr->header = RAS_TABLE_HDR_VAL; + ras_set_eeprom_table_version(control); + + if (hdr->version >= RAS_TABLE_VER_V2_1) { + hdr->first_rec_offset = RAS_RECORD_START_V2_1; + hdr->tbl_size = RAS_TABLE_HEADER_SIZE + + RAS_TABLE_V2_1_INFO_SIZE; + rai->rma_status = RAS_GPU_HEALTH_USABLE; + /** + * GPU health represented as a percentage. + * 0 means worst health, 100 means fully health. + */ + rai->health_percent = 100; + /* ecc_page_threshold = 0 means disable bad page retirement */ + rai->ecc_page_threshold = control->record_threshold_count; + } else { + hdr->first_rec_offset = RAS_RECORD_START; + hdr->tbl_size = RAS_TABLE_HEADER_SIZE; + } + + csum = __calc_hdr_byte_sum(control); + if (hdr->version >= RAS_TABLE_VER_V2_1) + csum += __calc_ras_info_byte_sum(control); + csum = -csum; + hdr->checksum = csum; + res = __write_table_header(control); + if (!res && hdr->version > RAS_TABLE_VER_V1) + res = __write_table_ras_info(control); + + control->ras_num_recs = 0; + control->ras_fri = 0; + + control->bad_channel_bitmap = 0; + ras_core_event_notify(ras_core, RAS_EVENT_ID__UPDATE_BAD_PAGE_NUM, + &control->ras_num_recs); + ras_core_event_notify(ras_core, RAS_EVENT_ID__UPDATE_BAD_CHANNEL_BITMAP, + &control->bad_channel_bitmap); + control->update_channel_flag = false; + + mutex_unlock(&control->ras_tbl_mutex); + + return res; +} + +static void +__encode_table_record_to_buf(struct ras_eeprom_control *control, + struct eeprom_umc_record *record, + unsigned char *buf) +{ + __le64 tmp = 0; + int i = 0; + + /* Next are all record fields according to EEPROM page spec in LE foramt */ + buf[i++] = record->err_type; + + buf[i++] = record->bank; + + tmp = cpu_to_le64(record->ts); + memcpy(buf + i, &tmp, 8); + i += 8; + + tmp = cpu_to_le64((record->offset & 0xffffffffffff)); + memcpy(buf + i, &tmp, 6); + i += 6; + + buf[i++] = record->mem_channel; + buf[i++] = record->mcumc_id; + + tmp = cpu_to_le64((record->retired_row_pfn & 0xffffffffffff)); + memcpy(buf + i, &tmp, 6); +} + +static void +__decode_table_record_from_buf(struct ras_eeprom_control *control, + struct eeprom_umc_record *record, + unsigned char *buf) +{ + __le64 tmp = 0; + int i = 0; + + /* Next are all record fields according to EEPROM page spec in LE foramt */ + record->err_type = buf[i++]; + + record->bank = buf[i++]; + + memcpy(&tmp, buf + i, 8); + record->ts = le64_to_cpu(tmp); + i += 8; + + memcpy(&tmp, buf + i, 6); + record->offset = (le64_to_cpu(tmp) & 0xffffffffffff); + i += 6; + + record->mem_channel = buf[i++]; + record->mcumc_id = buf[i++]; + + memcpy(&tmp, buf + i, 6); + record->retired_row_pfn = (le64_to_cpu(tmp) & 0xffffffffffff); +} + +bool ras_eeprom_check_safety_watermark(struct ras_core_context *ras_core) +{ + struct ras_eeprom_control *control = &ras_core->ras_eeprom; + bool ret = false; + int bad_page_count; + + if (!__is_ras_eeprom_supported(ras_core) || + !control->record_threshold_config) + return false; + + bad_page_count = ras_umc_get_badpage_count(ras_core); + if (control->tbl_hdr.header == RAS_TABLE_HDR_BAD) { + if (bad_page_count > control->record_threshold_count) + RAS_DEV_WARN(ras_core->dev, "RAS records:%d exceed threshold:%d", + bad_page_count, control->record_threshold_count); + + if ((control->record_threshold_config == WARN_NONSTOP_OVER_THRESHOLD) || + (control->record_threshold_config == NONSTOP_OVER_THRESHOLD)) { + RAS_DEV_WARN(ras_core->dev, + "Please consult AMD Service Action Guide (SAG) for appropriate service procedures.\n"); + ret = false; + } else { + ras_core->is_rma = true; + RAS_DEV_WARN(ras_core->dev, + "Please consider adjusting the customized threshold.\n"); + ret = true; + } + } + + return ret; +} + +/** + * __ras_eeprom_write -- write indexed from buffer to EEPROM + * @control: pointer to control structure + * @buf: pointer to buffer containing data to write + * @fri: start writing at this index + * @num: number of records to write + * + * The caller must hold the table mutex in @control. + * Return 0 on success, -errno otherwise. + */ +static int __ras_eeprom_write(struct ras_eeprom_control *control, + u8 *buf, const u32 fri, const u32 num) +{ + struct ras_core_context *ras_core = to_ras_core_context(control); + u32 buf_size; + int res; + + /* i2c may be unstable in gpu reset */ + buf_size = num * RAS_TABLE_RECORD_SIZE; + res = __eeprom_write(ras_core, + control->i2c_address + RAS_INDEX_TO_OFFSET(control, fri), + buf, buf_size); + if (res < 0) { + RAS_DEV_ERR(ras_core->dev, + "Writing %d EEPROM table records error:%d\n", num, res); + } else if (res < buf_size) { + /* Short write, return error.*/ + RAS_DEV_ERR(ras_core->dev, + "Wrote %d records out of %d\n", + (res/RAS_TABLE_RECORD_SIZE), num); + res = -EIO; + } else { + res = 0; + } + + return res; +} + +static int ras_eeprom_append_table(struct ras_eeprom_control *control, + struct eeprom_umc_record *record, + const u32 num) +{ + u32 a, b, i; + u8 *buf, *pp; + int res; + + buf = kcalloc(num, RAS_TABLE_RECORD_SIZE, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + /* Encode all of them in one go. + */ + pp = buf; + for (i = 0; i < num; i++, pp += RAS_TABLE_RECORD_SIZE) { + __encode_table_record_to_buf(control, &record[i], pp); + + /* update bad channel bitmap */ + if ((record[i].mem_channel < BITS_PER_TYPE(control->bad_channel_bitmap)) && + !(control->bad_channel_bitmap & (1 << record[i].mem_channel))) { + control->bad_channel_bitmap |= 1 << record[i].mem_channel; + control->update_channel_flag = true; + } + } + + /* a, first record index to write into. + * b, last record index to write into. + * a = first index to read (fri) + number of records in the table, + * b = a + @num - 1. + * Let N = control->ras_max_num_record_count, then we have, + * case 0: 0 <= a <= b < N, + * just append @num records starting at a; + * case 1: 0 <= a < N <= b, + * append (N - a) records starting at a, and + * append the remainder, b % N + 1, starting at 0. + * case 2: 0 <= fri < N <= a <= b, then modulo N we get two subcases, + * case 2a: 0 <= a <= b < N + * append num records starting at a; and fix fri if b overwrote it, + * and since a <= b, if b overwrote it then a must've also, + * and if b didn't overwrite it, then a didn't also. + * case 2b: 0 <= b < a < N + * write num records starting at a, which wraps around 0=N + * and overwrite fri unconditionally. Now from case 2a, + * this means that b eclipsed fri to overwrite it and wrap + * around 0 again, i.e. b = 2N+r pre modulo N, so we unconditionally + * set fri = b + 1 (mod N). + * Now, since fri is updated in every case, except the trivial case 0, + * the number of records present in the table after writing, is, + * num_recs - 1 = b - fri (mod N), and we take the positive value, + * by adding an arbitrary multiple of N before taking the modulo N + * as shown below. + */ + a = control->ras_fri + control->ras_num_recs; + b = a + num - 1; + if (b < control->ras_max_record_count) { + res = __ras_eeprom_write(control, buf, a, num); + } else if (a < control->ras_max_record_count) { + u32 g0, g1; + + g0 = control->ras_max_record_count - a; + g1 = b % control->ras_max_record_count + 1; + res = __ras_eeprom_write(control, buf, a, g0); + if (res) + goto Out; + res = __ras_eeprom_write(control, + buf + g0 * RAS_TABLE_RECORD_SIZE, + 0, g1); + if (res) + goto Out; + if (g1 > control->ras_fri) + control->ras_fri = g1 % control->ras_max_record_count; + } else { + a %= control->ras_max_record_count; + b %= control->ras_max_record_count; + + if (a <= b) { + /* Note that, b - a + 1 = num. */ + res = __ras_eeprom_write(control, buf, a, num); + if (res) + goto Out; + if (b >= control->ras_fri) + control->ras_fri = (b + 1) % control->ras_max_record_count; + } else { + u32 g0, g1; + + /* b < a, which means, we write from + * a to the end of the table, and from + * the start of the table to b. + */ + g0 = control->ras_max_record_count - a; + g1 = b + 1; + res = __ras_eeprom_write(control, buf, a, g0); + if (res) + goto Out; + res = __ras_eeprom_write(control, + buf + g0 * RAS_TABLE_RECORD_SIZE, 0, g1); + if (res) + goto Out; + control->ras_fri = g1 % control->ras_max_record_count; + } + } + control->ras_num_recs = 1 + + (control->ras_max_record_count + b - control->ras_fri) + % control->ras_max_record_count; +Out: + kfree(buf); + return res; +} + +static int ras_eeprom_update_header(struct ras_eeprom_control *control) +{ + struct ras_core_context *ras_core = to_ras_core_context(control); + int threshold_config = control->record_threshold_config; + u8 *buf, *pp, csum; + u32 buf_size; + int bad_page_count; + int res; + + bad_page_count = ras_umc_get_badpage_count(ras_core); + /* Modify the header if it exceeds. + */ + if (threshold_config != 0 && + bad_page_count > control->record_threshold_count) { + RAS_DEV_WARN(ras_core->dev, + "Saved bad pages %d reaches threshold value %d\n", + bad_page_count, control->record_threshold_count); + control->tbl_hdr.header = RAS_TABLE_HDR_BAD; + if (control->tbl_hdr.version >= RAS_TABLE_VER_V2_1) { + control->tbl_rai.rma_status = RAS_GPU_RETIRED__ECC_REACH_THRESHOLD; + control->tbl_rai.health_percent = 0; + } + + if ((threshold_config != WARN_NONSTOP_OVER_THRESHOLD) && + (threshold_config != NONSTOP_OVER_THRESHOLD)) + ras_core->is_rma = true; + + /* ignore the -ENOTSUPP return value */ + ras_core_event_notify(ras_core, RAS_EVENT_ID__DEVICE_RMA, NULL); + } + + if (control->tbl_hdr.version >= RAS_TABLE_VER_V2_1) + control->tbl_hdr.tbl_size = RAS_TABLE_HEADER_SIZE + + RAS_TABLE_V2_1_INFO_SIZE + + control->ras_num_recs * RAS_TABLE_RECORD_SIZE; + else + control->tbl_hdr.tbl_size = RAS_TABLE_HEADER_SIZE + + control->ras_num_recs * RAS_TABLE_RECORD_SIZE; + control->tbl_hdr.checksum = 0; + + buf_size = control->ras_num_recs * RAS_TABLE_RECORD_SIZE; + buf = kcalloc(control->ras_num_recs, RAS_TABLE_RECORD_SIZE, GFP_KERNEL); + if (!buf) { + RAS_DEV_ERR(ras_core->dev, + "allocating memory for table of size %d bytes failed\n", + control->tbl_hdr.tbl_size); + res = -ENOMEM; + goto Out; + } + + res = __eeprom_read(ras_core, + control->i2c_address + + control->ras_record_offset, + buf, buf_size); + if (res < 0) { + RAS_DEV_ERR(ras_core->dev, + "EEPROM failed reading records:%d\n", res); + goto Out; + } else if (res < buf_size) { + RAS_DEV_ERR(ras_core->dev, + "EEPROM read %d out of %d bytes\n", res, buf_size); + res = -EIO; + goto Out; + } + + /** + * bad page records have been stored in eeprom, + * now calculate gpu health percent + */ + if (threshold_config != 0 && + control->tbl_hdr.version >= RAS_TABLE_VER_V2_1 && + bad_page_count <= control->record_threshold_count) + control->tbl_rai.health_percent = ((control->record_threshold_count - + bad_page_count) * 100) / control->record_threshold_count; + + /* Recalc the checksum. + */ + csum = 0; + for (pp = buf; pp < buf + buf_size; pp++) + csum += *pp; + + csum += __calc_hdr_byte_sum(control); + if (control->tbl_hdr.version >= RAS_TABLE_VER_V2_1) + csum += __calc_ras_info_byte_sum(control); + /* avoid sign extension when assigning to "checksum" */ + csum = -csum; + control->tbl_hdr.checksum = csum; + res = __write_table_header(control); + if (!res && control->tbl_hdr.version > RAS_TABLE_VER_V1) + res = __write_table_ras_info(control); +Out: + kfree(buf); + return res; +} + +/** + * ras_core_eeprom_append -- append records to the EEPROM RAS table + * @control: pointer to control structure + * @record: array of records to append + * @num: number of records in @record array + * + * Append @num records to the table, calculate the checksum and write + * the table back to EEPROM. The maximum number of records that + * can be appended is between 1 and control->ras_max_record_count, + * regardless of how many records are already stored in the table. + * + * Return 0 on success or if EEPROM is not supported, -errno on error. + */ +int ras_eeprom_append(struct ras_core_context *ras_core, + struct eeprom_umc_record *record, const u32 num) +{ + struct ras_eeprom_control *control = &ras_core->ras_eeprom; + int res; + + if (!__is_ras_eeprom_supported(ras_core)) + return 0; + + if (num == 0) { + RAS_DEV_ERR(ras_core->dev, "will not append 0 records\n"); + return -EINVAL; + } else if ((num + control->ras_num_recs) > control->ras_max_record_count) { + RAS_DEV_ERR(ras_core->dev, + "cannot append %d records than the size of table %d\n", + num, control->ras_max_record_count); + return -EINVAL; + } + + mutex_lock(&control->ras_tbl_mutex); + res = ras_eeprom_append_table(control, record, num); + if (!res) + res = ras_eeprom_update_header(control); + + mutex_unlock(&control->ras_tbl_mutex); + + return res; +} + +/** + * __ras_eeprom_read -- read indexed from EEPROM into buffer + * @control: pointer to control structure + * @buf: pointer to buffer to read into + * @fri: first record index, start reading at this index, absolute index + * @num: number of records to read + * + * The caller must hold the table mutex in @control. + * Return 0 on success, -errno otherwise. + */ +static int __ras_eeprom_read(struct ras_eeprom_control *control, + u8 *buf, const u32 fri, const u32 num) +{ + struct ras_core_context *ras_core = to_ras_core_context(control); + u32 buf_size; + int res; + + /* i2c may be unstable in gpu reset */ + buf_size = num * RAS_TABLE_RECORD_SIZE; + res = __eeprom_read(ras_core, + control->i2c_address + + RAS_INDEX_TO_OFFSET(control, fri), + buf, buf_size); + if (res < 0) { + RAS_DEV_ERR(ras_core->dev, + "Reading %d EEPROM table records error:%d\n", num, res); + } else if (res < buf_size) { + /* Short read, return error. + */ + RAS_DEV_ERR(ras_core->dev, + "Read %d records out of %d\n", + (res/RAS_TABLE_RECORD_SIZE), num); + res = -EIO; + } else { + res = 0; + } + + return res; +} + +int ras_eeprom_read(struct ras_core_context *ras_core, + struct eeprom_umc_record *record, const u32 num) +{ + struct ras_eeprom_control *control = &ras_core->ras_eeprom; + int i, res; + u8 *buf, *pp; + u32 g0, g1; + + if (!__is_ras_eeprom_supported(ras_core)) + return 0; + + if (num == 0) { + RAS_DEV_ERR(ras_core->dev, "will not read 0 records\n"); + return -EINVAL; + } else if (num > control->ras_num_recs) { + RAS_DEV_ERR(ras_core->dev, + "too many records to read:%d available:%d\n", + num, control->ras_num_recs); + return -EINVAL; + } + + buf = kcalloc(num, RAS_TABLE_RECORD_SIZE, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + /* Determine how many records to read, from the first record + * index, fri, to the end of the table, and from the beginning + * of the table, such that the total number of records is + * @num, and we handle wrap around when fri > 0 and + * fri + num > RAS_MAX_RECORD_COUNT. + * + * First we compute the index of the last element + * which would be fetched from each region, + * g0 is in [fri, fri + num - 1], and + * g1 is in [0, RAS_MAX_RECORD_COUNT - 1]. + * Then, if g0 < RAS_MAX_RECORD_COUNT, the index of + * the last element to fetch, we set g0 to _the number_ + * of elements to fetch, @num, since we know that the last + * indexed to be fetched does not exceed the table. + * + * If, however, g0 >= RAS_MAX_RECORD_COUNT, then + * we set g0 to the number of elements to read + * until the end of the table, and g1 to the number of + * elements to read from the beginning of the table. + */ + g0 = control->ras_fri + num - 1; + g1 = g0 % control->ras_max_record_count; + if (g0 < control->ras_max_record_count) { + g0 = num; + g1 = 0; + } else { + g0 = control->ras_max_record_count - control->ras_fri; + g1 += 1; + } + + mutex_lock(&control->ras_tbl_mutex); + res = __ras_eeprom_read(control, buf, control->ras_fri, g0); + if (res) + goto Out; + if (g1) { + res = __ras_eeprom_read(control, + buf + g0 * RAS_TABLE_RECORD_SIZE, 0, g1); + if (res) + goto Out; + } + + res = 0; + + /* Read up everything? Then transform. + */ + pp = buf; + for (i = 0; i < num; i++, pp += RAS_TABLE_RECORD_SIZE) { + __decode_table_record_from_buf(control, &record[i], pp); + + /* update bad channel bitmap */ + if ((record[i].mem_channel < BITS_PER_TYPE(control->bad_channel_bitmap)) && + !(control->bad_channel_bitmap & (1 << record[i].mem_channel))) { + control->bad_channel_bitmap |= 1 << record[i].mem_channel; + control->update_channel_flag = true; + } + } +Out: + kfree(buf); + mutex_unlock(&control->ras_tbl_mutex); + + return res; +} + +uint32_t ras_eeprom_max_record_count(struct ras_core_context *ras_core) +{ + struct ras_eeprom_control *control = &ras_core->ras_eeprom; + + /* get available eeprom table version first before eeprom table init */ + ras_set_eeprom_table_version(control); + + if (control->tbl_hdr.version >= RAS_TABLE_VER_V2_1) + return RAS_MAX_RECORD_COUNT_V2_1; + else + return RAS_MAX_RECORD_COUNT; +} + +/** + * __verify_ras_table_checksum -- verify the RAS EEPROM table checksum + * @control: pointer to control structure + * + * Check the checksum of the stored in EEPROM RAS table. + * + * Return 0 if the checksum is correct, + * positive if it is not correct, and + * -errno on I/O error. + */ +static int __verify_ras_table_checksum(struct ras_eeprom_control *control) +{ + struct ras_core_context *ras_core = to_ras_core_context(control); + int buf_size, res; + u8 csum, *buf, *pp; + + if (control->tbl_hdr.version >= RAS_TABLE_VER_V2_1) + buf_size = RAS_TABLE_HEADER_SIZE + + RAS_TABLE_V2_1_INFO_SIZE + + control->ras_num_recs * RAS_TABLE_RECORD_SIZE; + else + buf_size = RAS_TABLE_HEADER_SIZE + + control->ras_num_recs * RAS_TABLE_RECORD_SIZE; + + buf = kzalloc(buf_size, GFP_KERNEL); + if (!buf) { + RAS_DEV_ERR(ras_core->dev, + "Out of memory checking RAS table checksum.\n"); + return -ENOMEM; + } + + res = __eeprom_read(ras_core, + control->i2c_address + + control->ras_header_offset, + buf, buf_size); + if (res < buf_size) { + RAS_DEV_ERR(ras_core->dev, + "Partial read for checksum, res:%d\n", res); + /* On partial reads, return -EIO. + */ + if (res >= 0) + res = -EIO; + goto Out; + } + + csum = 0; + for (pp = buf; pp < buf + buf_size; pp++) + csum += *pp; +Out: + kfree(buf); + return res < 0 ? res : csum; +} + +static int __read_table_ras_info(struct ras_eeprom_control *control) +{ + struct ras_eeprom_table_ras_info *rai = &control->tbl_rai; + struct ras_core_context *ras_core = to_ras_core_context(control); + unsigned char *buf; + int res; + + buf = kzalloc(RAS_TABLE_V2_1_INFO_SIZE, GFP_KERNEL); + if (!buf) { + RAS_DEV_ERR(ras_core->dev, + "Failed to alloc buf to read EEPROM table ras info\n"); + return -ENOMEM; + } + + /** + * EEPROM table V2_1 supports ras info, + * read EEPROM table ras info + */ + res = __eeprom_read(ras_core, + control->i2c_address + control->ras_info_offset, + buf, RAS_TABLE_V2_1_INFO_SIZE); + if (res < RAS_TABLE_V2_1_INFO_SIZE) { + RAS_DEV_ERR(ras_core->dev, + "Failed to read EEPROM table ras info, res:%d\n", res); + res = res >= 0 ? -EIO : res; + goto Out; + } + + __decode_table_ras_info_from_buf(rai, buf); + +Out: + kfree(buf); + return res == RAS_TABLE_V2_1_INFO_SIZE ? 0 : res; +} + +static int __check_ras_table_status(struct ras_core_context *ras_core) +{ + struct ras_eeprom_control *control = &ras_core->ras_eeprom; + unsigned char buf[RAS_TABLE_HEADER_SIZE] = { 0 }; + struct ras_eeprom_table_header *hdr; + int res; + + hdr = &control->tbl_hdr; + + if (!__is_ras_eeprom_supported(ras_core)) + return 0; + + if (!__get_eeprom_i2c_addr(ras_core, control)) + return -EINVAL; + + control->ras_header_offset = RAS_HDR_START; + control->ras_info_offset = RAS_TABLE_V2_1_INFO_START; + mutex_init(&control->ras_tbl_mutex); + + /* Read the table header from EEPROM address */ + res = __eeprom_read(ras_core, + control->i2c_address + control->ras_header_offset, + buf, RAS_TABLE_HEADER_SIZE); + if (res < RAS_TABLE_HEADER_SIZE) { + RAS_DEV_ERR(ras_core->dev, + "Failed to read EEPROM table header, res:%d\n", res); + return res >= 0 ? -EIO : res; + } + + __decode_table_header_from_buf(hdr, buf); + + if (hdr->header != RAS_TABLE_HDR_VAL && + hdr->header != RAS_TABLE_HDR_BAD) { + RAS_DEV_INFO(ras_core->dev, "Creating a new EEPROM table"); + return ras_eeprom_reset_table(ras_core); + } + + switch (hdr->version) { + case RAS_TABLE_VER_V2_1: + case RAS_TABLE_VER_V3: + control->ras_num_recs = RAS_NUM_RECS_V2_1(hdr); + control->ras_record_offset = RAS_RECORD_START_V2_1; + control->ras_max_record_count = RAS_MAX_RECORD_COUNT_V2_1; + break; + case RAS_TABLE_VER_V1: + control->ras_num_recs = RAS_NUM_RECS(hdr); + control->ras_record_offset = RAS_RECORD_START; + control->ras_max_record_count = RAS_MAX_RECORD_COUNT; + break; + default: + RAS_DEV_ERR(ras_core->dev, + "RAS header invalid, unsupported version: %u", + hdr->version); + return -EINVAL; + } + + if (control->ras_num_recs > control->ras_max_record_count) { + RAS_DEV_ERR(ras_core->dev, + "RAS header invalid, records in header: %u max allowed :%u", + control->ras_num_recs, control->ras_max_record_count); + return -EINVAL; + } + + control->ras_fri = RAS_OFFSET_TO_INDEX(control, hdr->first_rec_offset); + + return 0; +} + +int ras_eeprom_check_storage_status(struct ras_core_context *ras_core) +{ + struct ras_eeprom_control *control = &ras_core->ras_eeprom; + struct ras_eeprom_table_header *hdr; + int bad_page_count; + int res = 0; + + if (!__is_ras_eeprom_supported(ras_core)) + return 0; + + if (!__get_eeprom_i2c_addr(ras_core, control)) + return -EINVAL; + + hdr = &control->tbl_hdr; + + bad_page_count = ras_umc_get_badpage_count(ras_core); + if (hdr->header == RAS_TABLE_HDR_VAL) { + RAS_DEV_INFO(ras_core->dev, + "Found existing EEPROM table with %d records\n", + bad_page_count); + + if (hdr->version >= RAS_TABLE_VER_V2_1) { + res = __read_table_ras_info(control); + if (res) + return res; + } + + res = __verify_ras_table_checksum(control); + if (res) + RAS_DEV_ERR(ras_core->dev, + "RAS table incorrect checksum or error:%d\n", res); + + /* Warn if we are at 90% of the threshold or above + */ + if (10 * bad_page_count >= 9 * control->record_threshold_count) + RAS_DEV_WARN(ras_core->dev, + "RAS records:%u exceeds 90%% of threshold:%d\n", + bad_page_count, + control->record_threshold_count); + + } else if (hdr->header == RAS_TABLE_HDR_BAD && + control->record_threshold_config != 0) { + if (hdr->version >= RAS_TABLE_VER_V2_1) { + res = __read_table_ras_info(control); + if (res) + return res; + } + + res = __verify_ras_table_checksum(control); + if (res) + RAS_DEV_ERR(ras_core->dev, + "RAS Table incorrect checksum or error:%d\n", res); + + if (control->record_threshold_count >= bad_page_count) { + /* This means that, the threshold was increased since + * the last time the system was booted, and now, + * ras->record_threshold_count - control->num_recs > 0, + * so that at least one more record can be saved, + * before the page count threshold is reached. + */ + RAS_DEV_INFO(ras_core->dev, + "records:%d threshold:%d, resetting RAS table header signature", + bad_page_count, + control->record_threshold_count); + res = ras_eeprom_correct_header_tag(control, RAS_TABLE_HDR_VAL); + } else { + RAS_DEV_ERR(ras_core->dev, "RAS records:%d exceed threshold:%d", + bad_page_count, control->record_threshold_count); + if ((control->record_threshold_config == WARN_NONSTOP_OVER_THRESHOLD) || + (control->record_threshold_config == NONSTOP_OVER_THRESHOLD)) { + RAS_DEV_WARN(ras_core->dev, + "Please consult AMD Service Action Guide (SAG) for appropriate service procedures\n"); + res = 0; + } else { + ras_core->is_rma = true; + RAS_DEV_ERR(ras_core->dev, + "User defined threshold is set, runtime service will be halt when threshold is reached\n"); + } + } + } + + return res < 0 ? res : 0; +} + +int ras_eeprom_hw_init(struct ras_core_context *ras_core) +{ + struct ras_eeprom_control *control; + struct ras_eeprom_config *eeprom_cfg; + + if (!ras_core) + return -EINVAL; + + ras_core->is_rma = false; + + control = &ras_core->ras_eeprom; + + memset(control, 0, sizeof(*control)); + + eeprom_cfg = &ras_core->config->eeprom_cfg; + control->record_threshold_config = + eeprom_cfg->eeprom_record_threshold_config; + + control->record_threshold_count = ras_eeprom_max_record_count(ras_core); + if (eeprom_cfg->eeprom_record_threshold_count < + control->record_threshold_count) + control->record_threshold_count = + eeprom_cfg->eeprom_record_threshold_count; + + control->sys_func = eeprom_cfg->eeprom_sys_fn; + control->max_read_len = eeprom_cfg->max_i2c_read_len; + control->max_write_len = eeprom_cfg->max_i2c_write_len; + control->i2c_adapter = eeprom_cfg->eeprom_i2c_adapter; + control->i2c_port = eeprom_cfg->eeprom_i2c_port; + control->i2c_address = eeprom_cfg->eeprom_i2c_addr; + + control->update_channel_flag = false; + + return __check_ras_table_status(ras_core); +} + +int ras_eeprom_hw_fini(struct ras_core_context *ras_core) +{ + struct ras_eeprom_control *control; + + if (!ras_core) + return -EINVAL; + + control = &ras_core->ras_eeprom; + mutex_destroy(&control->ras_tbl_mutex); + + return 0; +} + +uint32_t ras_eeprom_get_record_count(struct ras_core_context *ras_core) +{ + if (!ras_core) + return 0; + + return ras_core->ras_eeprom.ras_num_recs; +} + +void ras_eeprom_sync_info(struct ras_core_context *ras_core) +{ + struct ras_eeprom_control *control; + + if (!ras_core) + return; + + control = &ras_core->ras_eeprom; + ras_core_event_notify(ras_core, RAS_EVENT_ID__UPDATE_BAD_PAGE_NUM, + &control->ras_num_recs); + ras_core_event_notify(ras_core, RAS_EVENT_ID__UPDATE_BAD_CHANNEL_BITMAP, + &control->bad_channel_bitmap); +} + +enum ras_gpu_health_status + ras_eeprom_check_gpu_status(struct ras_core_context *ras_core) +{ + struct ras_eeprom_control *control = &ras_core->ras_eeprom; + struct ras_eeprom_table_ras_info *rai = &control->tbl_rai; + + if (!__is_ras_eeprom_supported(ras_core) || + !control->record_threshold_config) + return RAS_GPU_HEALTH_NONE; + + if (control->tbl_hdr.header == RAS_TABLE_HDR_BAD) + return RAS_GPU_IN_BAD_STATUS; + + return rai->rma_status; +} diff --git a/drivers/gpu/drm/amd/ras/rascore/ras_eeprom.h b/drivers/gpu/drm/amd/ras/rascore/ras_eeprom.h new file mode 100644 index 000000000000..2abe566c18b6 --- /dev/null +++ b/drivers/gpu/drm/amd/ras/rascore/ras_eeprom.h @@ -0,0 +1,197 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright 2025 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef __RAS_EEPROM_H__ +#define __RAS_EEPROM_H__ +#include "ras_sys.h" + +#define RAS_TABLE_VER_V1 0x00010000 +#define RAS_TABLE_VER_V2_1 0x00021000 +#define RAS_TABLE_VER_V3 0x00030000 + +#define NONSTOP_OVER_THRESHOLD -2 +#define WARN_NONSTOP_OVER_THRESHOLD -1 +#define DISABLE_RETIRE_PAGE 0 + +/* + * Bad address pfn : eeprom_umc_record.retired_row_pfn[39:0], + * nps mode: eeprom_umc_record.retired_row_pfn[47:40] + */ +#define EEPROM_RECORD_UMC_ADDR_MASK 0xFFFFFFFFFFULL +#define EEPROM_RECORD_UMC_NPS_MASK 0xFF0000000000ULL +#define EEPROM_RECORD_UMC_NPS_SHIFT 40 + +#define EEPROM_RECORD_UMC_NPS_MODE(RECORD) \ + (((RECORD)->retired_row_pfn & EEPROM_RECORD_UMC_NPS_MASK) >> \ + EEPROM_RECORD_UMC_NPS_SHIFT) + +#define EEPROM_RECORD_UMC_ADDR_PFN(RECORD) \ + ((RECORD)->retired_row_pfn & EEPROM_RECORD_UMC_ADDR_MASK) + +#define EEPROM_RECORD_SETUP_UMC_ADDR_AND_NPS(RECORD, ADDR, NPS) \ +do { \ + uint64_t tmp = (NPS); \ + tmp = ((tmp << EEPROM_RECORD_UMC_NPS_SHIFT) & EEPROM_RECORD_UMC_NPS_MASK); \ + tmp |= (ADDR) & EEPROM_RECORD_UMC_ADDR_MASK; \ + (RECORD)->retired_row_pfn = tmp; \ +} while (0) + +enum ras_gpu_health_status { + RAS_GPU_HEALTH_NONE = 0, + RAS_GPU_HEALTH_USABLE = 1, + RAS_GPU_RETIRED__ECC_REACH_THRESHOLD = 2, + RAS_GPU_IN_BAD_STATUS = 3, +}; + +enum ras_eeprom_err_type { + RAS_EEPROM_ERR_NA, + RAS_EEPROM_ERR_RECOVERABLE, + RAS_EEPROM_ERR_NON_RECOVERABLE, + RAS_EEPROM_ERR_COUNT, +}; + +struct ras_eeprom_table_header { + uint32_t header; + uint32_t version; + uint32_t first_rec_offset; + uint32_t tbl_size; + uint32_t checksum; +} __packed; + +struct ras_eeprom_table_ras_info { + u8 rma_status; + u8 health_percent; + u16 ecc_page_threshold; + u32 padding[64 - 1]; +} __packed; + +struct ras_eeprom_control { + struct ras_eeprom_table_header tbl_hdr; + struct ras_eeprom_table_ras_info tbl_rai; + + /* record threshold */ + int record_threshold_config; + uint32_t record_threshold_count; + bool update_channel_flag; + + const struct ras_eeprom_sys_func *sys_func; + void *i2c_adapter; + u32 i2c_port; + u16 max_read_len; + u16 max_write_len; + + /* Base I2C EEPPROM 19-bit memory address, + * where the table is located. For more information, + * see top of amdgpu_eeprom.c. + */ + u32 i2c_address; + + /* The byte offset off of @i2c_address + * where the table header is found, + * and where the records start--always + * right after the header. + */ + u32 ras_header_offset; + u32 ras_info_offset; + u32 ras_record_offset; + + /* Number of records in the table. + */ + u32 ras_num_recs; + + /* First record index to read, 0-based. + * Range is [0, num_recs-1]. This is + * an absolute index, starting right after + * the table header. + */ + u32 ras_fri; + + /* Maximum possible number of records + * we could store, i.e. the maximum capacity + * of the table. + */ + u32 ras_max_record_count; + + /* Protect table access via this mutex. + */ + struct mutex ras_tbl_mutex; + + /* Record channel info which occurred bad pages + */ + u32 bad_channel_bitmap; +}; + +/* + * Represents single table record. Packed to be easily serialized into byte + * stream. + */ +struct eeprom_umc_record { + + union { + uint64_t address; + uint64_t offset; + }; + + uint64_t retired_row_pfn; + uint64_t ts; + + enum ras_eeprom_err_type err_type; + + union { + unsigned char bank; + unsigned char cu; + }; + + unsigned char mem_channel; + unsigned char mcumc_id; + + /* The following variables will not be saved to eeprom. + */ + uint64_t cur_nps_retired_row_pfn; + uint32_t cur_nps_bank; + uint32_t cur_nps; +}; + +struct ras_core_context; +int ras_eeprom_hw_init(struct ras_core_context *ras_core); +int ras_eeprom_hw_fini(struct ras_core_context *ras_core); + +int ras_eeprom_reset_table(struct ras_core_context *ras_core); + +bool ras_eeprom_check_safety_watermark(struct ras_core_context *ras_core); + +int ras_eeprom_read(struct ras_core_context *ras_core, + struct eeprom_umc_record *records, const u32 num); + +int ras_eeprom_append(struct ras_core_context *ras_core, + struct eeprom_umc_record *records, const u32 num); + +uint32_t ras_eeprom_max_record_count(struct ras_core_context *ras_core); +uint32_t ras_eeprom_get_record_count(struct ras_core_context *ras_core); +void ras_eeprom_sync_info(struct ras_core_context *ras_core); + +int ras_eeprom_check_storage_status(struct ras_core_context *ras_core); +enum ras_gpu_health_status + ras_eeprom_check_gpu_status(struct ras_core_context *ras_core); +#endif diff --git a/drivers/gpu/drm/amd/ras/rascore/ras_gfx.c b/drivers/gpu/drm/amd/ras/rascore/ras_gfx.c new file mode 100644 index 000000000000..f5ce28777705 --- /dev/null +++ b/drivers/gpu/drm/amd/ras/rascore/ras_gfx.c @@ -0,0 +1,70 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright 2025 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#include "ras.h" +#include "ras_gfx_v9_0.h" +#include "ras_gfx.h" +#include "ras_core_status.h" + +static const struct ras_gfx_ip_func *ras_gfx_get_ip_funcs( + struct ras_core_context *ras_core, uint32_t ip_version) +{ + switch (ip_version) { + case IP_VERSION(9, 4, 3): + case IP_VERSION(9, 4, 4): + case IP_VERSION(9, 5, 0): + return &gfx_ras_func_v9_0; + default: + RAS_DEV_ERR(ras_core->dev, + "GFX ip version(0x%x) is not supported!\n", ip_version); + break; + } + + return NULL; +} + +int ras_gfx_get_ta_subblock(struct ras_core_context *ras_core, + uint32_t error_type, uint32_t subblock, uint32_t *ta_subblock) +{ + struct ras_gfx *gfx = &ras_core->ras_gfx; + + return gfx->ip_func->get_ta_subblock(ras_core, + error_type, subblock, ta_subblock); +} + +int ras_gfx_hw_init(struct ras_core_context *ras_core) +{ + struct ras_gfx *gfx = &ras_core->ras_gfx; + + gfx->gfx_ip_version = ras_core->config->gfx_ip_version; + + gfx->ip_func = ras_gfx_get_ip_funcs(ras_core, gfx->gfx_ip_version); + + return gfx->ip_func ? RAS_CORE_OK : -EINVAL; +} + +int ras_gfx_hw_fini(struct ras_core_context *ras_core) +{ + return 0; +} diff --git a/drivers/gpu/drm/amd/ras/rascore/ras_gfx.h b/drivers/gpu/drm/amd/ras/rascore/ras_gfx.h new file mode 100644 index 000000000000..8a42d69fb0ad --- /dev/null +++ b/drivers/gpu/drm/amd/ras/rascore/ras_gfx.h @@ -0,0 +1,43 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright 2025 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#ifndef __RAS_GFX_H__ +#define __RAS_GFX_H__ + +struct ras_gfx_ip_func { + int (*get_ta_subblock)(struct ras_core_context *ras_core, + uint32_t error_type, uint32_t subblock, uint32_t *ta_subblock); +}; + +struct ras_gfx { + uint32_t gfx_ip_version; + const struct ras_gfx_ip_func *ip_func; +}; + +int ras_gfx_hw_init(struct ras_core_context *ras_core); +int ras_gfx_hw_fini(struct ras_core_context *ras_core); + +int ras_gfx_get_ta_subblock(struct ras_core_context *ras_core, + uint32_t error_type, uint32_t subblock, uint32_t *ta_subblock); + +#endif diff --git a/drivers/gpu/drm/amd/ras/rascore/ras_gfx_v9_0.c b/drivers/gpu/drm/amd/ras/rascore/ras_gfx_v9_0.c new file mode 100644 index 000000000000..6213d3f125be --- /dev/null +++ b/drivers/gpu/drm/amd/ras/rascore/ras_gfx_v9_0.c @@ -0,0 +1,426 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright 2025 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#include "ras.h" +#include "ras_gfx_v9_0.h" +#include "ras_core_status.h" + +enum ta_gfx_v9_subblock { + /*CPC*/ + TA_GFX_V9__GFX_CPC_INDEX_START = 0, + TA_GFX_V9__GFX_CPC_SCRATCH = TA_GFX_V9__GFX_CPC_INDEX_START, + TA_GFX_V9__GFX_CPC_UCODE, + TA_GFX_V9__GFX_DC_STATE_ME1, + TA_GFX_V9__GFX_DC_CSINVOC_ME1, + TA_GFX_V9__GFX_DC_RESTORE_ME1, + TA_GFX_V9__GFX_DC_STATE_ME2, + TA_GFX_V9__GFX_DC_CSINVOC_ME2, + TA_GFX_V9__GFX_DC_RESTORE_ME2, + TA_GFX_V9__GFX_CPC_INDEX_END = TA_GFX_V9__GFX_DC_RESTORE_ME2, + /* CPF*/ + TA_GFX_V9__GFX_CPF_INDEX_START, + TA_GFX_V9__GFX_CPF_ROQ_ME2 = TA_GFX_V9__GFX_CPF_INDEX_START, + TA_GFX_V9__GFX_CPF_ROQ_ME1, + TA_GFX_V9__GFX_CPF_TAG, + TA_GFX_V9__GFX_CPF_INDEX_END = TA_GFX_V9__GFX_CPF_TAG, + /* CPG*/ + TA_GFX_V9__GFX_CPG_INDEX_START, + TA_GFX_V9__GFX_CPG_DMA_ROQ = TA_GFX_V9__GFX_CPG_INDEX_START, + TA_GFX_V9__GFX_CPG_DMA_TAG, + TA_GFX_V9__GFX_CPG_TAG, + TA_GFX_V9__GFX_CPG_INDEX_END = TA_GFX_V9__GFX_CPG_TAG, + /* GDS*/ + TA_GFX_V9__GFX_GDS_INDEX_START, + TA_GFX_V9__GFX_GDS_MEM = TA_GFX_V9__GFX_GDS_INDEX_START, + TA_GFX_V9__GFX_GDS_INPUT_QUEUE, + TA_GFX_V9__GFX_GDS_OA_PHY_CMD_RAM_MEM, + TA_GFX_V9__GFX_GDS_OA_PHY_DATA_RAM_MEM, + TA_GFX_V9__GFX_GDS_OA_PIPE_MEM, + TA_GFX_V9__GFX_GDS_INDEX_END = TA_GFX_V9__GFX_GDS_OA_PIPE_MEM, + /* SPI*/ + TA_GFX_V9__GFX_SPI_SR_MEM, + /* SQ*/ + TA_GFX_V9__GFX_SQ_INDEX_START, + TA_GFX_V9__GFX_SQ_SGPR = TA_GFX_V9__GFX_SQ_INDEX_START, + TA_GFX_V9__GFX_SQ_LDS_D, + TA_GFX_V9__GFX_SQ_LDS_I, + TA_GFX_V9__GFX_SQ_VGPR, /* VGPR = SP*/ + TA_GFX_V9__GFX_SQ_INDEX_END = TA_GFX_V9__GFX_SQ_VGPR, + /* SQC (3 ranges)*/ + TA_GFX_V9__GFX_SQC_INDEX_START, + /* SQC range 0*/ + TA_GFX_V9__GFX_SQC_INDEX0_START = TA_GFX_V9__GFX_SQC_INDEX_START, + TA_GFX_V9__GFX_SQC_INST_UTCL1_LFIFO = + TA_GFX_V9__GFX_SQC_INDEX0_START, + TA_GFX_V9__GFX_SQC_DATA_CU0_WRITE_DATA_BUF, + TA_GFX_V9__GFX_SQC_DATA_CU0_UTCL1_LFIFO, + TA_GFX_V9__GFX_SQC_DATA_CU1_WRITE_DATA_BUF, + TA_GFX_V9__GFX_SQC_DATA_CU1_UTCL1_LFIFO, + TA_GFX_V9__GFX_SQC_DATA_CU2_WRITE_DATA_BUF, + TA_GFX_V9__GFX_SQC_DATA_CU2_UTCL1_LFIFO, + TA_GFX_V9__GFX_SQC_INDEX0_END = + TA_GFX_V9__GFX_SQC_DATA_CU2_UTCL1_LFIFO, + /* SQC range 1*/ + TA_GFX_V9__GFX_SQC_INDEX1_START, + TA_GFX_V9__GFX_SQC_INST_BANKA_TAG_RAM = + TA_GFX_V9__GFX_SQC_INDEX1_START, + TA_GFX_V9__GFX_SQC_INST_BANKA_UTCL1_MISS_FIFO, + TA_GFX_V9__GFX_SQC_INST_BANKA_MISS_FIFO, + TA_GFX_V9__GFX_SQC_INST_BANKA_BANK_RAM, + TA_GFX_V9__GFX_SQC_DATA_BANKA_TAG_RAM, + TA_GFX_V9__GFX_SQC_DATA_BANKA_HIT_FIFO, + TA_GFX_V9__GFX_SQC_DATA_BANKA_MISS_FIFO, + TA_GFX_V9__GFX_SQC_DATA_BANKA_DIRTY_BIT_RAM, + TA_GFX_V9__GFX_SQC_DATA_BANKA_BANK_RAM, + TA_GFX_V9__GFX_SQC_INDEX1_END = + TA_GFX_V9__GFX_SQC_DATA_BANKA_BANK_RAM, + /* SQC range 2*/ + TA_GFX_V9__GFX_SQC_INDEX2_START, + TA_GFX_V9__GFX_SQC_INST_BANKB_TAG_RAM = + TA_GFX_V9__GFX_SQC_INDEX2_START, + TA_GFX_V9__GFX_SQC_INST_BANKB_UTCL1_MISS_FIFO, + TA_GFX_V9__GFX_SQC_INST_BANKB_MISS_FIFO, + TA_GFX_V9__GFX_SQC_INST_BANKB_BANK_RAM, + TA_GFX_V9__GFX_SQC_DATA_BANKB_TAG_RAM, + TA_GFX_V9__GFX_SQC_DATA_BANKB_HIT_FIFO, + TA_GFX_V9__GFX_SQC_DATA_BANKB_MISS_FIFO, + TA_GFX_V9__GFX_SQC_DATA_BANKB_DIRTY_BIT_RAM, + TA_GFX_V9__GFX_SQC_DATA_BANKB_BANK_RAM, + TA_GFX_V9__GFX_SQC_INDEX2_END = + TA_GFX_V9__GFX_SQC_DATA_BANKB_BANK_RAM, + TA_GFX_V9__GFX_SQC_INDEX_END = TA_GFX_V9__GFX_SQC_INDEX2_END, + /* TA*/ + TA_GFX_V9__GFX_TA_INDEX_START, + TA_GFX_V9__GFX_TA_FS_DFIFO = TA_GFX_V9__GFX_TA_INDEX_START, + TA_GFX_V9__GFX_TA_FS_AFIFO, + TA_GFX_V9__GFX_TA_FL_LFIFO, + TA_GFX_V9__GFX_TA_FX_LFIFO, + TA_GFX_V9__GFX_TA_FS_CFIFO, + TA_GFX_V9__GFX_TA_INDEX_END = TA_GFX_V9__GFX_TA_FS_CFIFO, + /* TCA*/ + TA_GFX_V9__GFX_TCA_INDEX_START, + TA_GFX_V9__GFX_TCA_HOLE_FIFO = TA_GFX_V9__GFX_TCA_INDEX_START, + TA_GFX_V9__GFX_TCA_REQ_FIFO, + TA_GFX_V9__GFX_TCA_INDEX_END = TA_GFX_V9__GFX_TCA_REQ_FIFO, + /* TCC (5 sub-ranges)*/ + TA_GFX_V9__GFX_TCC_INDEX_START, + /* TCC range 0*/ + TA_GFX_V9__GFX_TCC_INDEX0_START = TA_GFX_V9__GFX_TCC_INDEX_START, + TA_GFX_V9__GFX_TCC_CACHE_DATA = TA_GFX_V9__GFX_TCC_INDEX0_START, + TA_GFX_V9__GFX_TCC_CACHE_DATA_BANK_0_1, + TA_GFX_V9__GFX_TCC_CACHE_DATA_BANK_1_0, + TA_GFX_V9__GFX_TCC_CACHE_DATA_BANK_1_1, + TA_GFX_V9__GFX_TCC_CACHE_DIRTY_BANK_0, + TA_GFX_V9__GFX_TCC_CACHE_DIRTY_BANK_1, + TA_GFX_V9__GFX_TCC_HIGH_RATE_TAG, + TA_GFX_V9__GFX_TCC_LOW_RATE_TAG, + TA_GFX_V9__GFX_TCC_INDEX0_END = TA_GFX_V9__GFX_TCC_LOW_RATE_TAG, + /* TCC range 1*/ + TA_GFX_V9__GFX_TCC_INDEX1_START, + TA_GFX_V9__GFX_TCC_IN_USE_DEC = TA_GFX_V9__GFX_TCC_INDEX1_START, + TA_GFX_V9__GFX_TCC_IN_USE_TRANSFER, + TA_GFX_V9__GFX_TCC_INDEX1_END = + TA_GFX_V9__GFX_TCC_IN_USE_TRANSFER, + /* TCC range 2*/ + TA_GFX_V9__GFX_TCC_INDEX2_START, + TA_GFX_V9__GFX_TCC_RETURN_DATA = TA_GFX_V9__GFX_TCC_INDEX2_START, + TA_GFX_V9__GFX_TCC_RETURN_CONTROL, + TA_GFX_V9__GFX_TCC_UC_ATOMIC_FIFO, + TA_GFX_V9__GFX_TCC_WRITE_RETURN, + TA_GFX_V9__GFX_TCC_WRITE_CACHE_READ, + TA_GFX_V9__GFX_TCC_SRC_FIFO, + TA_GFX_V9__GFX_TCC_SRC_FIFO_NEXT_RAM, + TA_GFX_V9__GFX_TCC_CACHE_TAG_PROBE_FIFO, + TA_GFX_V9__GFX_TCC_INDEX2_END = + TA_GFX_V9__GFX_TCC_CACHE_TAG_PROBE_FIFO, + /* TCC range 3*/ + TA_GFX_V9__GFX_TCC_INDEX3_START, + TA_GFX_V9__GFX_TCC_LATENCY_FIFO = TA_GFX_V9__GFX_TCC_INDEX3_START, + TA_GFX_V9__GFX_TCC_LATENCY_FIFO_NEXT_RAM, + TA_GFX_V9__GFX_TCC_INDEX3_END = + TA_GFX_V9__GFX_TCC_LATENCY_FIFO_NEXT_RAM, + /* TCC range 4*/ + TA_GFX_V9__GFX_TCC_INDEX4_START, + TA_GFX_V9__GFX_TCC_WRRET_TAG_WRITE_RETURN = + TA_GFX_V9__GFX_TCC_INDEX4_START, + TA_GFX_V9__GFX_TCC_ATOMIC_RETURN_BUFFER, + TA_GFX_V9__GFX_TCC_INDEX4_END = + TA_GFX_V9__GFX_TCC_ATOMIC_RETURN_BUFFER, + TA_GFX_V9__GFX_TCC_INDEX_END = TA_GFX_V9__GFX_TCC_INDEX4_END, + /* TCI*/ + TA_GFX_V9__GFX_TCI_WRITE_RAM, + /* TCP*/ + TA_GFX_V9__GFX_TCP_INDEX_START, + TA_GFX_V9__GFX_TCP_CACHE_RAM = TA_GFX_V9__GFX_TCP_INDEX_START, + TA_GFX_V9__GFX_TCP_LFIFO_RAM, + TA_GFX_V9__GFX_TCP_CMD_FIFO, + TA_GFX_V9__GFX_TCP_VM_FIFO, + TA_GFX_V9__GFX_TCP_DB_RAM, + TA_GFX_V9__GFX_TCP_UTCL1_LFIFO0, + TA_GFX_V9__GFX_TCP_UTCL1_LFIFO1, + TA_GFX_V9__GFX_TCP_INDEX_END = TA_GFX_V9__GFX_TCP_UTCL1_LFIFO1, + /* TD*/ + TA_GFX_V9__GFX_TD_INDEX_START, + TA_GFX_V9__GFX_TD_SS_FIFO_LO = TA_GFX_V9__GFX_TD_INDEX_START, + TA_GFX_V9__GFX_TD_SS_FIFO_HI, + TA_GFX_V9__GFX_TD_CS_FIFO, + TA_GFX_V9__GFX_TD_INDEX_END = TA_GFX_V9__GFX_TD_CS_FIFO, + /* EA (3 sub-ranges)*/ + TA_GFX_V9__GFX_EA_INDEX_START, + /* EA range 0*/ + TA_GFX_V9__GFX_EA_INDEX0_START = TA_GFX_V9__GFX_EA_INDEX_START, + TA_GFX_V9__GFX_EA_DRAMRD_CMDMEM = TA_GFX_V9__GFX_EA_INDEX0_START, + TA_GFX_V9__GFX_EA_DRAMWR_CMDMEM, + TA_GFX_V9__GFX_EA_DRAMWR_DATAMEM, + TA_GFX_V9__GFX_EA_RRET_TAGMEM, + TA_GFX_V9__GFX_EA_WRET_TAGMEM, + TA_GFX_V9__GFX_EA_GMIRD_CMDMEM, + TA_GFX_V9__GFX_EA_GMIWR_CMDMEM, + TA_GFX_V9__GFX_EA_GMIWR_DATAMEM, + TA_GFX_V9__GFX_EA_INDEX0_END = TA_GFX_V9__GFX_EA_GMIWR_DATAMEM, + /* EA range 1*/ + TA_GFX_V9__GFX_EA_INDEX1_START, + TA_GFX_V9__GFX_EA_DRAMRD_PAGEMEM = TA_GFX_V9__GFX_EA_INDEX1_START, + TA_GFX_V9__GFX_EA_DRAMWR_PAGEMEM, + TA_GFX_V9__GFX_EA_IORD_CMDMEM, + TA_GFX_V9__GFX_EA_IOWR_CMDMEM, + TA_GFX_V9__GFX_EA_IOWR_DATAMEM, + TA_GFX_V9__GFX_EA_GMIRD_PAGEMEM, + TA_GFX_V9__GFX_EA_GMIWR_PAGEMEM, + TA_GFX_V9__GFX_EA_INDEX1_END = TA_GFX_V9__GFX_EA_GMIWR_PAGEMEM, + /* EA range 2*/ + TA_GFX_V9__GFX_EA_INDEX2_START, + TA_GFX_V9__GFX_EA_MAM_D0MEM = TA_GFX_V9__GFX_EA_INDEX2_START, + TA_GFX_V9__GFX_EA_MAM_D1MEM, + TA_GFX_V9__GFX_EA_MAM_D2MEM, + TA_GFX_V9__GFX_EA_MAM_D3MEM, + TA_GFX_V9__GFX_EA_INDEX2_END = TA_GFX_V9__GFX_EA_MAM_D3MEM, + TA_GFX_V9__GFX_EA_INDEX_END = TA_GFX_V9__GFX_EA_INDEX2_END, + /* UTC VM L2 bank*/ + TA_GFX_V9__UTC_VML2_BANK_CACHE, + /* UTC VM walker*/ + TA_GFX_V9__UTC_VML2_WALKER, + /* UTC ATC L2 2MB cache*/ + TA_GFX_V9__UTC_ATCL2_CACHE_2M_BANK, + /* UTC ATC L2 4KB cache*/ + TA_GFX_V9__UTC_ATCL2_CACHE_4K_BANK, + TA_GFX_V9__GFX_MAX +}; + +struct ras_gfx_subblock_t { + unsigned char *name; + int ta_subblock; + int hw_supported_error_type; + int sw_supported_error_type; +}; + +#define RAS_GFX_SUB_BLOCK(subblock, a, b, c, d, e, f, g, h) \ + [RAS_GFX_V9__##subblock] = { \ + #subblock, \ + TA_GFX_V9__##subblock, \ + ((a) | ((b) << 1) | ((c) << 2) | ((d) << 3)), \ + (((e) << 1) | ((f) << 3) | (g) | ((h) << 2)), \ + } + +const struct ras_gfx_subblock_t ras_gfx_v9_0_subblocks[] = { + RAS_GFX_SUB_BLOCK(GFX_CPC_SCRATCH, 0, 1, 1, 1, 1, 0, 0, 1), + RAS_GFX_SUB_BLOCK(GFX_CPC_UCODE, 0, 1, 1, 1, 1, 0, 0, 1), + RAS_GFX_SUB_BLOCK(GFX_DC_STATE_ME1, 1, 0, 0, 1, 0, 0, 1, 0), + RAS_GFX_SUB_BLOCK(GFX_DC_CSINVOC_ME1, 1, 0, 0, 1, 0, 0, 0, 0), + RAS_GFX_SUB_BLOCK(GFX_DC_RESTORE_ME1, 1, 0, 0, 1, 0, 0, 0, 0), + RAS_GFX_SUB_BLOCK(GFX_DC_STATE_ME2, 1, 0, 0, 1, 0, 0, 0, 0), + RAS_GFX_SUB_BLOCK(GFX_DC_CSINVOC_ME2, 1, 0, 0, 1, 0, 0, 0, 0), + RAS_GFX_SUB_BLOCK(GFX_DC_RESTORE_ME2, 1, 0, 0, 1, 0, 0, 0, 0), + RAS_GFX_SUB_BLOCK(GFX_CPF_ROQ_ME2, 1, 0, 0, 1, 0, 0, 0, 0), + RAS_GFX_SUB_BLOCK(GFX_CPF_ROQ_ME1, 1, 0, 0, 1, 0, 0, 1, 0), + RAS_GFX_SUB_BLOCK(GFX_CPF_TAG, 0, 1, 1, 1, 1, 0, 0, 1), + RAS_GFX_SUB_BLOCK(GFX_CPG_DMA_ROQ, 1, 0, 0, 1, 0, 0, 1, 0), + RAS_GFX_SUB_BLOCK(GFX_CPG_DMA_TAG, 0, 1, 1, 1, 0, 1, 0, 1), + RAS_GFX_SUB_BLOCK(GFX_CPG_TAG, 0, 1, 1, 1, 1, 1, 0, 1), + RAS_GFX_SUB_BLOCK(GFX_GDS_MEM, 0, 1, 1, 1, 0, 0, 0, 0), + RAS_GFX_SUB_BLOCK(GFX_GDS_INPUT_QUEUE, 1, 0, 0, 1, 0, 0, 0, 0), + RAS_GFX_SUB_BLOCK(GFX_GDS_OA_PHY_CMD_RAM_MEM, 0, 1, 1, 1, 0, 0, 0, + 0), + RAS_GFX_SUB_BLOCK(GFX_GDS_OA_PHY_DATA_RAM_MEM, 1, 0, 0, 1, 0, 0, 0, + 0), + RAS_GFX_SUB_BLOCK(GFX_GDS_OA_PIPE_MEM, 0, 1, 1, 1, 0, 0, 0, 0), + RAS_GFX_SUB_BLOCK(GFX_SPI_SR_MEM, 1, 0, 0, 1, 0, 0, 0, 0), + RAS_GFX_SUB_BLOCK(GFX_SQ_SGPR, 0, 1, 1, 1, 0, 0, 0, 0), + RAS_GFX_SUB_BLOCK(GFX_SQ_LDS_D, 0, 1, 1, 1, 1, 0, 0, 1), + RAS_GFX_SUB_BLOCK(GFX_SQ_LDS_I, 0, 1, 1, 1, 0, 0, 0, 0), + RAS_GFX_SUB_BLOCK(GFX_SQ_VGPR, 0, 1, 1, 1, 0, 0, 0, 0), + RAS_GFX_SUB_BLOCK(GFX_SQC_INST_UTCL1_LFIFO, 0, 1, 1, 1, 0, 0, 0, 1), + RAS_GFX_SUB_BLOCK(GFX_SQC_DATA_CU0_WRITE_DATA_BUF, 0, 1, 1, 1, 0, 0, + 0, 0), + RAS_GFX_SUB_BLOCK(GFX_SQC_DATA_CU0_UTCL1_LFIFO, 0, 1, 1, 1, 0, 0, 0, + 0), + RAS_GFX_SUB_BLOCK(GFX_SQC_DATA_CU1_WRITE_DATA_BUF, 0, 1, 1, 1, 0, 0, + 0, 0), + RAS_GFX_SUB_BLOCK(GFX_SQC_DATA_CU1_UTCL1_LFIFO, 0, 1, 1, 1, 1, 0, 0, + 0), + RAS_GFX_SUB_BLOCK(GFX_SQC_DATA_CU2_WRITE_DATA_BUF, 0, 1, 1, 1, 0, 0, + 0, 0), + RAS_GFX_SUB_BLOCK(GFX_SQC_DATA_CU2_UTCL1_LFIFO, 0, 1, 1, 1, 0, 0, 0, + 0), + RAS_GFX_SUB_BLOCK(GFX_SQC_INST_BANKA_TAG_RAM, 0, 1, 1, 1, 1, 0, 0, + 1), + RAS_GFX_SUB_BLOCK(GFX_SQC_INST_BANKA_UTCL1_MISS_FIFO, 1, 0, 0, 1, 0, + 0, 0, 0), + RAS_GFX_SUB_BLOCK(GFX_SQC_INST_BANKA_MISS_FIFO, 1, 0, 0, 1, 0, 0, 0, + 0), + RAS_GFX_SUB_BLOCK(GFX_SQC_INST_BANKA_BANK_RAM, 0, 1, 1, 1, 0, 0, 0, + 0), + RAS_GFX_SUB_BLOCK(GFX_SQC_DATA_BANKA_TAG_RAM, 0, 1, 1, 1, 0, 0, 0, + 0), + RAS_GFX_SUB_BLOCK(GFX_SQC_DATA_BANKA_HIT_FIFO, 1, 0, 0, 1, 0, 0, 0, + 0), + RAS_GFX_SUB_BLOCK(GFX_SQC_DATA_BANKA_MISS_FIFO, 1, 0, 0, 1, 0, 0, 0, + 0), + RAS_GFX_SUB_BLOCK(GFX_SQC_DATA_BANKA_DIRTY_BIT_RAM, 1, 0, 0, 1, 0, 0, + 0, 0), + RAS_GFX_SUB_BLOCK(GFX_SQC_DATA_BANKA_BANK_RAM, 0, 1, 1, 1, 0, 0, 0, + 0), + RAS_GFX_SUB_BLOCK(GFX_SQC_INST_BANKB_TAG_RAM, 0, 1, 1, 1, 1, 0, 0, + 0), + RAS_GFX_SUB_BLOCK(GFX_SQC_INST_BANKB_UTCL1_MISS_FIFO, 1, 0, 0, 1, 0, + 0, 0, 0), + RAS_GFX_SUB_BLOCK(GFX_SQC_INST_BANKB_MISS_FIFO, 1, 0, 0, 1, 0, 0, 0, + 0), + RAS_GFX_SUB_BLOCK(GFX_SQC_INST_BANKB_BANK_RAM, 0, 1, 1, 1, 0, 0, 0, + 0), + RAS_GFX_SUB_BLOCK(GFX_SQC_DATA_BANKB_TAG_RAM, 0, 1, 1, 1, 0, 0, 0, + 0), + RAS_GFX_SUB_BLOCK(GFX_SQC_DATA_BANKB_HIT_FIFO, 1, 0, 0, 1, 0, 0, 0, + 0), + RAS_GFX_SUB_BLOCK(GFX_SQC_DATA_BANKB_MISS_FIFO, 1, 0, 0, 1, 0, 0, 0, + 0), + RAS_GFX_SUB_BLOCK(GFX_SQC_DATA_BANKB_DIRTY_BIT_RAM, 1, 0, 0, 1, 0, 0, + 0, 0), + RAS_GFX_SUB_BLOCK(GFX_SQC_DATA_BANKB_BANK_RAM, 0, 1, 1, 1, 0, 0, 0, + 0), + RAS_GFX_SUB_BLOCK(GFX_TA_FS_DFIFO, 0, 1, 1, 1, 1, 0, 0, 1), + RAS_GFX_SUB_BLOCK(GFX_TA_FS_AFIFO, 1, 0, 0, 1, 0, 0, 0, 0), + RAS_GFX_SUB_BLOCK(GFX_TA_FL_LFIFO, 1, 0, 0, 1, 0, 0, 0, 0), + RAS_GFX_SUB_BLOCK(GFX_TA_FX_LFIFO, 1, 0, 0, 1, 0, 0, 0, 0), + RAS_GFX_SUB_BLOCK(GFX_TA_FS_CFIFO, 1, 0, 0, 1, 0, 0, 0, 0), + RAS_GFX_SUB_BLOCK(GFX_TCA_HOLE_FIFO, 1, 0, 0, 1, 0, 1, 1, 0), + RAS_GFX_SUB_BLOCK(GFX_TCA_REQ_FIFO, 1, 0, 0, 1, 0, 0, 0, 0), + RAS_GFX_SUB_BLOCK(GFX_TCC_CACHE_DATA, 0, 1, 1, 1, 1, 0, 0, 1), + RAS_GFX_SUB_BLOCK(GFX_TCC_CACHE_DATA_BANK_0_1, 0, 1, 1, 1, 1, 0, 0, + 1), + RAS_GFX_SUB_BLOCK(GFX_TCC_CACHE_DATA_BANK_1_0, 0, 1, 1, 1, 1, 0, 0, + 1), + RAS_GFX_SUB_BLOCK(GFX_TCC_CACHE_DATA_BANK_1_1, 0, 1, 1, 1, 1, 0, 0, + 1), + RAS_GFX_SUB_BLOCK(GFX_TCC_CACHE_DIRTY_BANK_0, 0, 1, 1, 1, 0, 0, 0, + 0), + RAS_GFX_SUB_BLOCK(GFX_TCC_CACHE_DIRTY_BANK_1, 0, 1, 1, 1, 0, 0, 0, + 0), + RAS_GFX_SUB_BLOCK(GFX_TCC_HIGH_RATE_TAG, 0, 1, 1, 1, 0, 0, 0, 0), + RAS_GFX_SUB_BLOCK(GFX_TCC_LOW_RATE_TAG, 0, 1, 1, 1, 0, 0, 0, 0), + RAS_GFX_SUB_BLOCK(GFX_TCC_IN_USE_DEC, 1, 0, 0, 1, 0, 0, 0, 0), + RAS_GFX_SUB_BLOCK(GFX_TCC_IN_USE_TRANSFER, 1, 0, 0, 1, 0, 0, 0, 0), + RAS_GFX_SUB_BLOCK(GFX_TCC_RETURN_DATA, 1, 0, 0, 1, 0, 0, 0, 0), + RAS_GFX_SUB_BLOCK(GFX_TCC_RETURN_CONTROL, 1, 0, 0, 1, 0, 0, 0, 0), + RAS_GFX_SUB_BLOCK(GFX_TCC_UC_ATOMIC_FIFO, 1, 0, 0, 1, 0, 0, 0, 0), + RAS_GFX_SUB_BLOCK(GFX_TCC_WRITE_RETURN, 1, 0, 0, 1, 0, 1, 1, 0), + RAS_GFX_SUB_BLOCK(GFX_TCC_WRITE_CACHE_READ, 1, 0, 0, 1, 0, 0, 0, 0), + RAS_GFX_SUB_BLOCK(GFX_TCC_SRC_FIFO, 0, 1, 1, 1, 0, 0, 0, 0), + RAS_GFX_SUB_BLOCK(GFX_TCC_SRC_FIFO_NEXT_RAM, 1, 0, 0, 1, 0, 0, 1, 0), + RAS_GFX_SUB_BLOCK(GFX_TCC_CACHE_TAG_PROBE_FIFO, 1, 0, 0, 1, 0, 0, 0, + 0), + RAS_GFX_SUB_BLOCK(GFX_TCC_LATENCY_FIFO, 1, 0, 0, 1, 0, 0, 0, 0), + RAS_GFX_SUB_BLOCK(GFX_TCC_LATENCY_FIFO_NEXT_RAM, 1, 0, 0, 1, 0, 0, 0, + 0), + RAS_GFX_SUB_BLOCK(GFX_TCC_WRRET_TAG_WRITE_RETURN, 1, 0, 0, 1, 0, 0, + 0, 0), + RAS_GFX_SUB_BLOCK(GFX_TCC_ATOMIC_RETURN_BUFFER, 1, 0, 0, 1, 0, 0, 0, + 0), + RAS_GFX_SUB_BLOCK(GFX_TCI_WRITE_RAM, 1, 0, 0, 1, 0, 0, 0, 0), + RAS_GFX_SUB_BLOCK(GFX_TCP_CACHE_RAM, 0, 1, 1, 1, 1, 0, 0, 1), + RAS_GFX_SUB_BLOCK(GFX_TCP_LFIFO_RAM, 0, 1, 1, 1, 0, 0, 0, 0), + RAS_GFX_SUB_BLOCK(GFX_TCP_CMD_FIFO, 1, 0, 0, 1, 0, 0, 0, 0), + RAS_GFX_SUB_BLOCK(GFX_TCP_VM_FIFO, 0, 1, 1, 1, 0, 0, 0, 0), + RAS_GFX_SUB_BLOCK(GFX_TCP_DB_RAM, 1, 0, 0, 1, 0, 0, 0, 0), + RAS_GFX_SUB_BLOCK(GFX_TCP_UTCL1_LFIFO0, 0, 1, 1, 1, 0, 0, 0, 0), + RAS_GFX_SUB_BLOCK(GFX_TCP_UTCL1_LFIFO1, 0, 1, 1, 1, 0, 0, 0, 0), + RAS_GFX_SUB_BLOCK(GFX_TD_SS_FIFO_LO, 0, 1, 1, 1, 1, 0, 0, 1), + RAS_GFX_SUB_BLOCK(GFX_TD_SS_FIFO_HI, 0, 1, 1, 1, 0, 0, 0, 0), + RAS_GFX_SUB_BLOCK(GFX_TD_CS_FIFO, 1, 0, 0, 1, 0, 0, 0, 0), + RAS_GFX_SUB_BLOCK(GFX_EA_DRAMRD_CMDMEM, 0, 1, 1, 1, 1, 0, 0, 1), + RAS_GFX_SUB_BLOCK(GFX_EA_DRAMWR_CMDMEM, 0, 1, 1, 1, 0, 0, 0, 0), + RAS_GFX_SUB_BLOCK(GFX_EA_DRAMWR_DATAMEM, 0, 1, 1, 1, 0, 0, 0, 0), + RAS_GFX_SUB_BLOCK(GFX_EA_RRET_TAGMEM, 0, 1, 1, 1, 0, 0, 0, 0), + RAS_GFX_SUB_BLOCK(GFX_EA_WRET_TAGMEM, 0, 1, 1, 1, 0, 0, 0, 0), + RAS_GFX_SUB_BLOCK(GFX_EA_GMIRD_CMDMEM, 0, 1, 1, 1, 0, 0, 0, 0), + RAS_GFX_SUB_BLOCK(GFX_EA_GMIWR_CMDMEM, 0, 1, 1, 1, 0, 0, 0, 0), + RAS_GFX_SUB_BLOCK(GFX_EA_GMIWR_DATAMEM, 0, 1, 1, 1, 0, 0, 0, 0), + RAS_GFX_SUB_BLOCK(GFX_EA_DRAMRD_PAGEMEM, 1, 0, 0, 1, 0, 0, 0, 0), + RAS_GFX_SUB_BLOCK(GFX_EA_DRAMWR_PAGEMEM, 1, 0, 0, 1, 0, 0, 0, 0), + RAS_GFX_SUB_BLOCK(GFX_EA_IORD_CMDMEM, 1, 0, 0, 1, 0, 0, 0, 0), + RAS_GFX_SUB_BLOCK(GFX_EA_IOWR_CMDMEM, 1, 0, 0, 1, 0, 0, 0, 0), + RAS_GFX_SUB_BLOCK(GFX_EA_IOWR_DATAMEM, 1, 0, 0, 1, 0, 0, 0, 0), + RAS_GFX_SUB_BLOCK(GFX_EA_GMIRD_PAGEMEM, 1, 0, 0, 1, 0, 0, 0, 0), + RAS_GFX_SUB_BLOCK(GFX_EA_GMIWR_PAGEMEM, 1, 0, 0, 1, 0, 0, 0, 0), + RAS_GFX_SUB_BLOCK(GFX_EA_MAM_D0MEM, 1, 0, 0, 1, 0, 0, 0, 0), + RAS_GFX_SUB_BLOCK(GFX_EA_MAM_D1MEM, 1, 0, 0, 1, 0, 0, 0, 0), + RAS_GFX_SUB_BLOCK(GFX_EA_MAM_D2MEM, 1, 0, 0, 1, 0, 0, 0, 0), + RAS_GFX_SUB_BLOCK(GFX_EA_MAM_D3MEM, 1, 0, 0, 1, 0, 0, 0, 0), + RAS_GFX_SUB_BLOCK(UTC_VML2_BANK_CACHE, 0, 1, 1, 1, 0, 0, 0, 0), + RAS_GFX_SUB_BLOCK(UTC_VML2_WALKER, 0, 1, 1, 1, 0, 0, 0, 0), + RAS_GFX_SUB_BLOCK(UTC_ATCL2_CACHE_2M_BANK, 1, 0, 0, 1, 0, 0, 0, 0), + RAS_GFX_SUB_BLOCK(UTC_ATCL2_CACHE_4K_BANK, 0, 1, 1, 1, 0, 0, 0, 0), +}; + +static int gfx_v9_0_get_ta_subblock(struct ras_core_context *ras_core, + uint32_t error_type, uint32_t subblock, uint32_t *ta_subblock) +{ + const struct ras_gfx_subblock_t *gfx_subblock; + + if (subblock >= ARRAY_SIZE(ras_gfx_v9_0_subblocks)) + return -EINVAL; + + gfx_subblock = &ras_gfx_v9_0_subblocks[subblock]; + if (!gfx_subblock->name) + return -EPERM; + + if (!(gfx_subblock->hw_supported_error_type & error_type)) { + RAS_DEV_ERR(ras_core->dev, "GFX Subblock %s, hardware do not support type 0x%x\n", + gfx_subblock->name, error_type); + return -EPERM; + } + + if (!(gfx_subblock->sw_supported_error_type & error_type)) { + RAS_DEV_ERR(ras_core->dev, "GFX Subblock %s, driver do not support type 0x%x\n", + gfx_subblock->name, error_type); + return -EPERM; + } + + *ta_subblock = gfx_subblock->ta_subblock; + + return 0; +} + +const struct ras_gfx_ip_func gfx_ras_func_v9_0 = { + .get_ta_subblock = gfx_v9_0_get_ta_subblock, +}; diff --git a/drivers/gpu/drm/amd/ras/rascore/ras_gfx_v9_0.h b/drivers/gpu/drm/amd/ras/rascore/ras_gfx_v9_0.h new file mode 100644 index 000000000000..659b56619747 --- /dev/null +++ b/drivers/gpu/drm/amd/ras/rascore/ras_gfx_v9_0.h @@ -0,0 +1,259 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright 2025 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#ifndef __RAS_GFX_V9_0_H__ +#define __RAS_GFX_V9_0_H__ + +enum ras_gfx_v9_subblock { + /* CPC */ + RAS_GFX_V9__GFX_CPC_INDEX_START = 0, + RAS_GFX_V9__GFX_CPC_SCRATCH = + RAS_GFX_V9__GFX_CPC_INDEX_START, + RAS_GFX_V9__GFX_CPC_UCODE, + RAS_GFX_V9__GFX_DC_STATE_ME1, + RAS_GFX_V9__GFX_DC_CSINVOC_ME1, + RAS_GFX_V9__GFX_DC_RESTORE_ME1, + RAS_GFX_V9__GFX_DC_STATE_ME2, + RAS_GFX_V9__GFX_DC_CSINVOC_ME2, + RAS_GFX_V9__GFX_DC_RESTORE_ME2, + RAS_GFX_V9__GFX_CPC_INDEX_END = + RAS_GFX_V9__GFX_DC_RESTORE_ME2, + /* CPF */ + RAS_GFX_V9__GFX_CPF_INDEX_START, + RAS_GFX_V9__GFX_CPF_ROQ_ME2 = + RAS_GFX_V9__GFX_CPF_INDEX_START, + RAS_GFX_V9__GFX_CPF_ROQ_ME1, + RAS_GFX_V9__GFX_CPF_TAG, + RAS_GFX_V9__GFX_CPF_INDEX_END = RAS_GFX_V9__GFX_CPF_TAG, + /* CPG */ + RAS_GFX_V9__GFX_CPG_INDEX_START, + RAS_GFX_V9__GFX_CPG_DMA_ROQ = + RAS_GFX_V9__GFX_CPG_INDEX_START, + RAS_GFX_V9__GFX_CPG_DMA_TAG, + RAS_GFX_V9__GFX_CPG_TAG, + RAS_GFX_V9__GFX_CPG_INDEX_END = RAS_GFX_V9__GFX_CPG_TAG, + /* GDS */ + RAS_GFX_V9__GFX_GDS_INDEX_START, + RAS_GFX_V9__GFX_GDS_MEM = RAS_GFX_V9__GFX_GDS_INDEX_START, + RAS_GFX_V9__GFX_GDS_INPUT_QUEUE, + RAS_GFX_V9__GFX_GDS_OA_PHY_CMD_RAM_MEM, + RAS_GFX_V9__GFX_GDS_OA_PHY_DATA_RAM_MEM, + RAS_GFX_V9__GFX_GDS_OA_PIPE_MEM, + RAS_GFX_V9__GFX_GDS_INDEX_END = + RAS_GFX_V9__GFX_GDS_OA_PIPE_MEM, + /* SPI */ + RAS_GFX_V9__GFX_SPI_SR_MEM, + /* SQ */ + RAS_GFX_V9__GFX_SQ_INDEX_START, + RAS_GFX_V9__GFX_SQ_SGPR = RAS_GFX_V9__GFX_SQ_INDEX_START, + RAS_GFX_V9__GFX_SQ_LDS_D, + RAS_GFX_V9__GFX_SQ_LDS_I, + RAS_GFX_V9__GFX_SQ_VGPR, + RAS_GFX_V9__GFX_SQ_INDEX_END = RAS_GFX_V9__GFX_SQ_VGPR, + /* SQC (3 ranges) */ + RAS_GFX_V9__GFX_SQC_INDEX_START, + /* SQC range 0 */ + RAS_GFX_V9__GFX_SQC_INDEX0_START = + RAS_GFX_V9__GFX_SQC_INDEX_START, + RAS_GFX_V9__GFX_SQC_INST_UTCL1_LFIFO = + RAS_GFX_V9__GFX_SQC_INDEX0_START, + RAS_GFX_V9__GFX_SQC_DATA_CU0_WRITE_DATA_BUF, + RAS_GFX_V9__GFX_SQC_DATA_CU0_UTCL1_LFIFO, + RAS_GFX_V9__GFX_SQC_DATA_CU1_WRITE_DATA_BUF, + RAS_GFX_V9__GFX_SQC_DATA_CU1_UTCL1_LFIFO, + RAS_GFX_V9__GFX_SQC_DATA_CU2_WRITE_DATA_BUF, + RAS_GFX_V9__GFX_SQC_DATA_CU2_UTCL1_LFIFO, + RAS_GFX_V9__GFX_SQC_INDEX0_END = + RAS_GFX_V9__GFX_SQC_DATA_CU2_UTCL1_LFIFO, + /* SQC range 1 */ + RAS_GFX_V9__GFX_SQC_INDEX1_START, + RAS_GFX_V9__GFX_SQC_INST_BANKA_TAG_RAM = + RAS_GFX_V9__GFX_SQC_INDEX1_START, + RAS_GFX_V9__GFX_SQC_INST_BANKA_UTCL1_MISS_FIFO, + RAS_GFX_V9__GFX_SQC_INST_BANKA_MISS_FIFO, + RAS_GFX_V9__GFX_SQC_INST_BANKA_BANK_RAM, + RAS_GFX_V9__GFX_SQC_DATA_BANKA_TAG_RAM, + RAS_GFX_V9__GFX_SQC_DATA_BANKA_HIT_FIFO, + RAS_GFX_V9__GFX_SQC_DATA_BANKA_MISS_FIFO, + RAS_GFX_V9__GFX_SQC_DATA_BANKA_DIRTY_BIT_RAM, + RAS_GFX_V9__GFX_SQC_DATA_BANKA_BANK_RAM, + RAS_GFX_V9__GFX_SQC_INDEX1_END = + RAS_GFX_V9__GFX_SQC_DATA_BANKA_BANK_RAM, + /* SQC range 2 */ + RAS_GFX_V9__GFX_SQC_INDEX2_START, + RAS_GFX_V9__GFX_SQC_INST_BANKB_TAG_RAM = + RAS_GFX_V9__GFX_SQC_INDEX2_START, + RAS_GFX_V9__GFX_SQC_INST_BANKB_UTCL1_MISS_FIFO, + RAS_GFX_V9__GFX_SQC_INST_BANKB_MISS_FIFO, + RAS_GFX_V9__GFX_SQC_INST_BANKB_BANK_RAM, + RAS_GFX_V9__GFX_SQC_DATA_BANKB_TAG_RAM, + RAS_GFX_V9__GFX_SQC_DATA_BANKB_HIT_FIFO, + RAS_GFX_V9__GFX_SQC_DATA_BANKB_MISS_FIFO, + RAS_GFX_V9__GFX_SQC_DATA_BANKB_DIRTY_BIT_RAM, + RAS_GFX_V9__GFX_SQC_DATA_BANKB_BANK_RAM, + RAS_GFX_V9__GFX_SQC_INDEX2_END = + RAS_GFX_V9__GFX_SQC_DATA_BANKB_BANK_RAM, + RAS_GFX_V9__GFX_SQC_INDEX_END = + RAS_GFX_V9__GFX_SQC_INDEX2_END, + /* TA */ + RAS_GFX_V9__GFX_TA_INDEX_START, + RAS_GFX_V9__GFX_TA_FS_DFIFO = + RAS_GFX_V9__GFX_TA_INDEX_START, + RAS_GFX_V9__GFX_TA_FS_AFIFO, + RAS_GFX_V9__GFX_TA_FL_LFIFO, + RAS_GFX_V9__GFX_TA_FX_LFIFO, + RAS_GFX_V9__GFX_TA_FS_CFIFO, + RAS_GFX_V9__GFX_TA_INDEX_END = RAS_GFX_V9__GFX_TA_FS_CFIFO, + /* TCA */ + RAS_GFX_V9__GFX_TCA_INDEX_START, + RAS_GFX_V9__GFX_TCA_HOLE_FIFO = + RAS_GFX_V9__GFX_TCA_INDEX_START, + RAS_GFX_V9__GFX_TCA_REQ_FIFO, + RAS_GFX_V9__GFX_TCA_INDEX_END = + RAS_GFX_V9__GFX_TCA_REQ_FIFO, + /* TCC (5 sub-ranges) */ + RAS_GFX_V9__GFX_TCC_INDEX_START, + /* TCC range 0 */ + RAS_GFX_V9__GFX_TCC_INDEX0_START = + RAS_GFX_V9__GFX_TCC_INDEX_START, + RAS_GFX_V9__GFX_TCC_CACHE_DATA = + RAS_GFX_V9__GFX_TCC_INDEX0_START, + RAS_GFX_V9__GFX_TCC_CACHE_DATA_BANK_0_1, + RAS_GFX_V9__GFX_TCC_CACHE_DATA_BANK_1_0, + RAS_GFX_V9__GFX_TCC_CACHE_DATA_BANK_1_1, + RAS_GFX_V9__GFX_TCC_CACHE_DIRTY_BANK_0, + RAS_GFX_V9__GFX_TCC_CACHE_DIRTY_BANK_1, + RAS_GFX_V9__GFX_TCC_HIGH_RATE_TAG, + RAS_GFX_V9__GFX_TCC_LOW_RATE_TAG, + RAS_GFX_V9__GFX_TCC_INDEX0_END = + RAS_GFX_V9__GFX_TCC_LOW_RATE_TAG, + /* TCC range 1 */ + RAS_GFX_V9__GFX_TCC_INDEX1_START, + RAS_GFX_V9__GFX_TCC_IN_USE_DEC = + RAS_GFX_V9__GFX_TCC_INDEX1_START, + RAS_GFX_V9__GFX_TCC_IN_USE_TRANSFER, + RAS_GFX_V9__GFX_TCC_INDEX1_END = + RAS_GFX_V9__GFX_TCC_IN_USE_TRANSFER, + /* TCC range 2 */ + RAS_GFX_V9__GFX_TCC_INDEX2_START, + RAS_GFX_V9__GFX_TCC_RETURN_DATA = + RAS_GFX_V9__GFX_TCC_INDEX2_START, + RAS_GFX_V9__GFX_TCC_RETURN_CONTROL, + RAS_GFX_V9__GFX_TCC_UC_ATOMIC_FIFO, + RAS_GFX_V9__GFX_TCC_WRITE_RETURN, + RAS_GFX_V9__GFX_TCC_WRITE_CACHE_READ, + RAS_GFX_V9__GFX_TCC_SRC_FIFO, + RAS_GFX_V9__GFX_TCC_SRC_FIFO_NEXT_RAM, + RAS_GFX_V9__GFX_TCC_CACHE_TAG_PROBE_FIFO, + RAS_GFX_V9__GFX_TCC_INDEX2_END = + RAS_GFX_V9__GFX_TCC_CACHE_TAG_PROBE_FIFO, + /* TCC range 3 */ + RAS_GFX_V9__GFX_TCC_INDEX3_START, + RAS_GFX_V9__GFX_TCC_LATENCY_FIFO = + RAS_GFX_V9__GFX_TCC_INDEX3_START, + RAS_GFX_V9__GFX_TCC_LATENCY_FIFO_NEXT_RAM, + RAS_GFX_V9__GFX_TCC_INDEX3_END = + RAS_GFX_V9__GFX_TCC_LATENCY_FIFO_NEXT_RAM, + /* TCC range 4 */ + RAS_GFX_V9__GFX_TCC_INDEX4_START, + RAS_GFX_V9__GFX_TCC_WRRET_TAG_WRITE_RETURN = + RAS_GFX_V9__GFX_TCC_INDEX4_START, + RAS_GFX_V9__GFX_TCC_ATOMIC_RETURN_BUFFER, + RAS_GFX_V9__GFX_TCC_INDEX4_END = + RAS_GFX_V9__GFX_TCC_ATOMIC_RETURN_BUFFER, + RAS_GFX_V9__GFX_TCC_INDEX_END = + RAS_GFX_V9__GFX_TCC_INDEX4_END, + /* TCI */ + RAS_GFX_V9__GFX_TCI_WRITE_RAM, + /* TCP */ + RAS_GFX_V9__GFX_TCP_INDEX_START, + RAS_GFX_V9__GFX_TCP_CACHE_RAM = + RAS_GFX_V9__GFX_TCP_INDEX_START, + RAS_GFX_V9__GFX_TCP_LFIFO_RAM, + RAS_GFX_V9__GFX_TCP_CMD_FIFO, + RAS_GFX_V9__GFX_TCP_VM_FIFO, + RAS_GFX_V9__GFX_TCP_DB_RAM, + RAS_GFX_V9__GFX_TCP_UTCL1_LFIFO0, + RAS_GFX_V9__GFX_TCP_UTCL1_LFIFO1, + RAS_GFX_V9__GFX_TCP_INDEX_END = + RAS_GFX_V9__GFX_TCP_UTCL1_LFIFO1, + /* TD */ + RAS_GFX_V9__GFX_TD_INDEX_START, + RAS_GFX_V9__GFX_TD_SS_FIFO_LO = + RAS_GFX_V9__GFX_TD_INDEX_START, + RAS_GFX_V9__GFX_TD_SS_FIFO_HI, + RAS_GFX_V9__GFX_TD_CS_FIFO, + RAS_GFX_V9__GFX_TD_INDEX_END = RAS_GFX_V9__GFX_TD_CS_FIFO, + /* EA (3 sub-ranges) */ + RAS_GFX_V9__GFX_EA_INDEX_START, + /* EA range 0 */ + RAS_GFX_V9__GFX_EA_INDEX0_START = + RAS_GFX_V9__GFX_EA_INDEX_START, + RAS_GFX_V9__GFX_EA_DRAMRD_CMDMEM = + RAS_GFX_V9__GFX_EA_INDEX0_START, + RAS_GFX_V9__GFX_EA_DRAMWR_CMDMEM, + RAS_GFX_V9__GFX_EA_DRAMWR_DATAMEM, + RAS_GFX_V9__GFX_EA_RRET_TAGMEM, + RAS_GFX_V9__GFX_EA_WRET_TAGMEM, + RAS_GFX_V9__GFX_EA_GMIRD_CMDMEM, + RAS_GFX_V9__GFX_EA_GMIWR_CMDMEM, + RAS_GFX_V9__GFX_EA_GMIWR_DATAMEM, + RAS_GFX_V9__GFX_EA_INDEX0_END = + RAS_GFX_V9__GFX_EA_GMIWR_DATAMEM, + /* EA range 1 */ + RAS_GFX_V9__GFX_EA_INDEX1_START, + RAS_GFX_V9__GFX_EA_DRAMRD_PAGEMEM = + RAS_GFX_V9__GFX_EA_INDEX1_START, + RAS_GFX_V9__GFX_EA_DRAMWR_PAGEMEM, + RAS_GFX_V9__GFX_EA_IORD_CMDMEM, + RAS_GFX_V9__GFX_EA_IOWR_CMDMEM, + RAS_GFX_V9__GFX_EA_IOWR_DATAMEM, + RAS_GFX_V9__GFX_EA_GMIRD_PAGEMEM, + RAS_GFX_V9__GFX_EA_GMIWR_PAGEMEM, + RAS_GFX_V9__GFX_EA_INDEX1_END = + RAS_GFX_V9__GFX_EA_GMIWR_PAGEMEM, + /* EA range 2 */ + RAS_GFX_V9__GFX_EA_INDEX2_START, + RAS_GFX_V9__GFX_EA_MAM_D0MEM = + RAS_GFX_V9__GFX_EA_INDEX2_START, + RAS_GFX_V9__GFX_EA_MAM_D1MEM, + RAS_GFX_V9__GFX_EA_MAM_D2MEM, + RAS_GFX_V9__GFX_EA_MAM_D3MEM, + RAS_GFX_V9__GFX_EA_INDEX2_END = + RAS_GFX_V9__GFX_EA_MAM_D3MEM, + RAS_GFX_V9__GFX_EA_INDEX_END = + RAS_GFX_V9__GFX_EA_INDEX2_END, + /* UTC VM L2 bank */ + RAS_GFX_V9__UTC_VML2_BANK_CACHE, + /* UTC VM walker */ + RAS_GFX_V9__UTC_VML2_WALKER, + /* UTC ATC L2 2MB cache */ + RAS_GFX_V9__UTC_ATCL2_CACHE_2M_BANK, + /* UTC ATC L2 4KB cache */ + RAS_GFX_V9__UTC_ATCL2_CACHE_4K_BANK, + RAS_GFX_V9__GFX_MAX +}; + +extern const struct ras_gfx_ip_func gfx_ras_func_v9_0; + +#endif diff --git a/drivers/gpu/drm/amd/ras/rascore/ras_log_ring.c b/drivers/gpu/drm/amd/ras/rascore/ras_log_ring.c new file mode 100644 index 000000000000..0a838fdcb2f6 --- /dev/null +++ b/drivers/gpu/drm/amd/ras/rascore/ras_log_ring.c @@ -0,0 +1,317 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright 2025 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#include "ras.h" +#include "ras_core_status.h" +#include "ras_log_ring.h" + +#define RAS_LOG_MAX_QUERY_SIZE 0xC000 +#define RAS_LOG_MEM_TEMP_SIZE 0x200 +#define RAS_LOG_MEMPOOL_SIZE \ + (RAS_LOG_MAX_QUERY_SIZE + RAS_LOG_MEM_TEMP_SIZE) + +#define BATCH_IDX_TO_TREE_IDX(batch_idx, sn) (((batch_idx) << 8) | (sn)) + +static const uint64_t ras_rma_aca_reg[ACA_REG_MAX_COUNT] = { + [ACA_REG_IDX__CTL] = 0x1, + [ACA_REG_IDX__STATUS] = 0xB000000000000137, + [ACA_REG_IDX__ADDR] = 0x0, + [ACA_REG_IDX__MISC0] = 0x0, + [ACA_REG_IDX__CONFG] = 0x1ff00000002, + [ACA_REG_IDX__IPID] = 0x9600000000, + [ACA_REG_IDX__SYND] = 0x0, +}; + +static uint64_t ras_log_ring_get_logged_ecc_count(struct ras_core_context *ras_core) +{ + struct ras_log_ring *log_ring = &ras_core->ras_log_ring; + uint64_t count = 0; + + if (log_ring->logged_ecc_count < 0) { + RAS_DEV_WARN(ras_core->dev, + "Error: the logged ras count should not less than 0!\n"); + count = 0; + } else { + count = log_ring->logged_ecc_count; + } + + if (count > RAS_LOG_MEMPOOL_SIZE) + RAS_DEV_WARN(ras_core->dev, + "Error: the logged ras count is out of range!\n"); + + return count; +} + +static int ras_log_ring_add_data(struct ras_core_context *ras_core, + struct ras_log_info *log, struct ras_log_batch_tag *batch_tag) +{ + struct ras_log_ring *log_ring = &ras_core->ras_log_ring; + unsigned long flags = 0; + int ret = 0; + + if (batch_tag && (batch_tag->sub_seqno >= MAX_RECORD_PER_BATCH)) { + RAS_DEV_ERR(ras_core->dev, + "Invalid batch sub seqno:%d, batch:0x%llx\n", + batch_tag->sub_seqno, batch_tag->batch_id); + return -EINVAL; + } + + spin_lock_irqsave(&log_ring->spin_lock, flags); + if (batch_tag) { + log->seqno = + BATCH_IDX_TO_TREE_IDX(batch_tag->batch_id, batch_tag->sub_seqno); + batch_tag->sub_seqno++; + } else { + log->seqno = BATCH_IDX_TO_TREE_IDX(log_ring->mono_upward_batch_id, 0); + log_ring->mono_upward_batch_id++; + } + ret = radix_tree_insert(&log_ring->ras_log_root, log->seqno, log); + if (!ret) + log_ring->logged_ecc_count++; + spin_unlock_irqrestore(&log_ring->spin_lock, flags); + + if (ret) { + RAS_DEV_ERR(ras_core->dev, + "Failed to add ras log! seqno:0x%llx, ret:%d\n", + log->seqno, ret); + mempool_free(log, log_ring->ras_log_mempool); + } + + return ret; +} + +static int ras_log_ring_delete_data(struct ras_core_context *ras_core, uint32_t count) +{ + struct ras_log_ring *log_ring = &ras_core->ras_log_ring; + unsigned long flags = 0; + uint32_t i = 0, j = 0; + uint64_t batch_id, idx; + void *data; + int ret = -ENODATA; + + if (count > ras_log_ring_get_logged_ecc_count(ras_core)) + return -EINVAL; + + spin_lock_irqsave(&log_ring->spin_lock, flags); + batch_id = log_ring->last_del_batch_id; + while (batch_id < log_ring->mono_upward_batch_id) { + for (j = 0; j < MAX_RECORD_PER_BATCH; j++) { + idx = BATCH_IDX_TO_TREE_IDX(batch_id, j); + data = radix_tree_delete(&log_ring->ras_log_root, idx); + if (data) { + mempool_free(data, log_ring->ras_log_mempool); + log_ring->logged_ecc_count--; + i++; + } + } + batch_id = ++log_ring->last_del_batch_id; + if (i >= count) { + ret = 0; + break; + } + } + spin_unlock_irqrestore(&log_ring->spin_lock, flags); + + return ret; +} + +static void ras_log_ring_clear_log_tree(struct ras_core_context *ras_core) +{ + struct ras_log_ring *log_ring = &ras_core->ras_log_ring; + uint64_t batch_id, idx; + unsigned long flags = 0; + void *data; + int j; + + if ((log_ring->mono_upward_batch_id <= log_ring->last_del_batch_id) && + !log_ring->logged_ecc_count) + return; + + spin_lock_irqsave(&log_ring->spin_lock, flags); + batch_id = log_ring->last_del_batch_id; + while (batch_id < log_ring->mono_upward_batch_id) { + for (j = 0; j < MAX_RECORD_PER_BATCH; j++) { + idx = BATCH_IDX_TO_TREE_IDX(batch_id, j); + data = radix_tree_delete(&log_ring->ras_log_root, idx); + if (data) { + mempool_free(data, log_ring->ras_log_mempool); + log_ring->logged_ecc_count--; + } + } + batch_id++; + } + spin_unlock_irqrestore(&log_ring->spin_lock, flags); + +} + +int ras_log_ring_sw_init(struct ras_core_context *ras_core) +{ + struct ras_log_ring *log_ring = &ras_core->ras_log_ring; + + memset(log_ring, 0, sizeof(*log_ring)); + + log_ring->ras_log_mempool = mempool_create_kmalloc_pool( + RAS_LOG_MEMPOOL_SIZE, sizeof(struct ras_log_info)); + if (!log_ring->ras_log_mempool) + return -ENOMEM; + + INIT_RADIX_TREE(&log_ring->ras_log_root, GFP_KERNEL); + + spin_lock_init(&log_ring->spin_lock); + + return 0; +} + +int ras_log_ring_sw_fini(struct ras_core_context *ras_core) +{ + struct ras_log_ring *log_ring = &ras_core->ras_log_ring; + + ras_log_ring_clear_log_tree(ras_core); + log_ring->logged_ecc_count = 0; + log_ring->last_del_batch_id = 0; + log_ring->mono_upward_batch_id = 0; + + mempool_destroy(log_ring->ras_log_mempool); + + return 0; +} + +struct ras_log_batch_tag *ras_log_ring_create_batch_tag(struct ras_core_context *ras_core) +{ + struct ras_log_ring *log_ring = &ras_core->ras_log_ring; + struct ras_log_batch_tag *batch_tag; + unsigned long flags = 0; + + batch_tag = kzalloc(sizeof(*batch_tag), GFP_KERNEL); + if (!batch_tag) + return NULL; + + spin_lock_irqsave(&log_ring->spin_lock, flags); + batch_tag->batch_id = log_ring->mono_upward_batch_id; + log_ring->mono_upward_batch_id++; + spin_unlock_irqrestore(&log_ring->spin_lock, flags); + + batch_tag->sub_seqno = 0; + batch_tag->timestamp = ras_core_get_utc_second_timestamp(ras_core); + return batch_tag; +} + +void ras_log_ring_destroy_batch_tag(struct ras_core_context *ras_core, + struct ras_log_batch_tag *batch_tag) +{ + kfree(batch_tag); +} + +void ras_log_ring_add_log_event(struct ras_core_context *ras_core, + enum ras_log_event event, void *data, struct ras_log_batch_tag *batch_tag) +{ + struct ras_log_ring *log_ring = &ras_core->ras_log_ring; + struct device_system_info dev_info = {0}; + struct ras_log_info *log; + uint64_t socket_id; + void *obj; + + obj = mempool_alloc_preallocated(log_ring->ras_log_mempool); + if (!obj || + (ras_log_ring_get_logged_ecc_count(ras_core) >= RAS_LOG_MEMPOOL_SIZE)) { + ras_log_ring_delete_data(ras_core, RAS_LOG_MEM_TEMP_SIZE); + if (!obj) + obj = mempool_alloc_preallocated(log_ring->ras_log_mempool); + } + + if (!obj) { + RAS_DEV_ERR(ras_core->dev, "ERROR: Failed to alloc ras log buffer!\n"); + return; + } + + log = (struct ras_log_info *)obj; + + memset(log, 0, sizeof(*log)); + log->timestamp = + batch_tag ? batch_tag->timestamp : ras_core_get_utc_second_timestamp(ras_core); + log->event = event; + + if (data) + memcpy(&log->aca_reg, data, sizeof(log->aca_reg)); + + if (event == RAS_LOG_EVENT_RMA) { + memcpy(&log->aca_reg, ras_rma_aca_reg, sizeof(log->aca_reg)); + ras_core_get_device_system_info(ras_core, &dev_info); + socket_id = dev_info.socket_id; + log->aca_reg.regs[ACA_REG_IDX__IPID] |= ((socket_id / 4) & 0x01); + log->aca_reg.regs[ACA_REG_IDX__IPID] |= (((socket_id % 4) & 0x3) << 44); + } + + ras_log_ring_add_data(ras_core, log, batch_tag); +} + +static struct ras_log_info *ras_log_ring_lookup_data(struct ras_core_context *ras_core, + uint64_t idx) +{ + struct ras_log_ring *log_ring = &ras_core->ras_log_ring; + unsigned long flags = 0; + void *data; + + spin_lock_irqsave(&log_ring->spin_lock, flags); + data = radix_tree_lookup(&log_ring->ras_log_root, idx); + spin_unlock_irqrestore(&log_ring->spin_lock, flags); + + return (struct ras_log_info *)data; +} + +int ras_log_ring_get_batch_records(struct ras_core_context *ras_core, uint64_t batch_id, + struct ras_log_info **log_arr, uint32_t arr_num) +{ + struct ras_log_ring *log_ring = &ras_core->ras_log_ring; + uint32_t i, idx, count = 0; + void *data; + + if ((batch_id >= log_ring->mono_upward_batch_id) || + (batch_id < log_ring->last_del_batch_id)) + return -EINVAL; + + for (i = 0; i < MAX_RECORD_PER_BATCH; i++) { + idx = BATCH_IDX_TO_TREE_IDX(batch_id, i); + data = ras_log_ring_lookup_data(ras_core, idx); + if (data) { + log_arr[count++] = data; + if (count >= arr_num) + break; + } + } + + return count; +} + +int ras_log_ring_get_batch_overview(struct ras_core_context *ras_core, + struct ras_log_batch_overview *overview) +{ + struct ras_log_ring *log_ring = &ras_core->ras_log_ring; + + overview->logged_batch_count = + log_ring->mono_upward_batch_id - log_ring->last_del_batch_id; + overview->last_batch_id = log_ring->mono_upward_batch_id; + overview->first_batch_id = log_ring->last_del_batch_id; + + return 0; +} diff --git a/drivers/gpu/drm/amd/ras/rascore/ras_log_ring.h b/drivers/gpu/drm/amd/ras/rascore/ras_log_ring.h new file mode 100644 index 000000000000..0ff6cc35678d --- /dev/null +++ b/drivers/gpu/drm/amd/ras/rascore/ras_log_ring.h @@ -0,0 +1,93 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright 2025 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#ifndef __RAS_LOG_RING_H__ +#define __RAS_LOG_RING_H__ +#include "ras_aca.h" + +#define MAX_RECORD_PER_BATCH 32 + +#define RAS_LOG_SEQNO_TO_BATCH_IDX(seqno) ((seqno) >> 8) + +enum ras_log_event { + RAS_LOG_EVENT_NONE, + RAS_LOG_EVENT_UE, + RAS_LOG_EVENT_DE, + RAS_LOG_EVENT_CE, + RAS_LOG_EVENT_POISON_CREATION, + RAS_LOG_EVENT_POISON_CONSUMPTION, + RAS_LOG_EVENT_RMA, + RAS_LOG_EVENT_COUNT_MAX, +}; + +struct ras_aca_reg { + uint64_t regs[ACA_REG_MAX_COUNT]; +}; + +struct ras_log_info { + uint64_t seqno; + uint64_t timestamp; + enum ras_log_event event; + union { + struct ras_aca_reg aca_reg; + }; +}; + +struct ras_log_batch_tag { + uint64_t batch_id; + uint64_t timestamp; + uint32_t sub_seqno; +}; + +struct ras_log_ring { + void *ras_log_mempool; + struct radix_tree_root ras_log_root; + spinlock_t spin_lock; + uint64_t mono_upward_batch_id; + uint64_t last_del_batch_id; + int logged_ecc_count; +}; + +struct ras_log_batch_overview { + uint64_t first_batch_id; + uint64_t last_batch_id; + uint32_t logged_batch_count; +}; + +struct ras_core_context; + +int ras_log_ring_sw_init(struct ras_core_context *ras_core); +int ras_log_ring_sw_fini(struct ras_core_context *ras_core); + +struct ras_log_batch_tag *ras_log_ring_create_batch_tag(struct ras_core_context *ras_core); +void ras_log_ring_destroy_batch_tag(struct ras_core_context *ras_core, + struct ras_log_batch_tag *tag); +void ras_log_ring_add_log_event(struct ras_core_context *ras_core, + enum ras_log_event event, void *data, struct ras_log_batch_tag *tag); + +int ras_log_ring_get_batch_records(struct ras_core_context *ras_core, uint64_t batch_idx, + struct ras_log_info **log_arr, uint32_t arr_num); + +int ras_log_ring_get_batch_overview(struct ras_core_context *ras_core, + struct ras_log_batch_overview *overview); +#endif diff --git a/drivers/gpu/drm/amd/ras/rascore/ras_mp1.c b/drivers/gpu/drm/amd/ras/rascore/ras_mp1.c new file mode 100644 index 000000000000..f3321df85021 --- /dev/null +++ b/drivers/gpu/drm/amd/ras/rascore/ras_mp1.c @@ -0,0 +1,81 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright 2025 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#include "ras.h" +#include "ras_mp1.h" +#include "ras_mp1_v13_0.h" + +static const struct ras_mp1_ip_func *ras_mp1_get_ip_funcs( + struct ras_core_context *ras_core, uint32_t ip_version) +{ + switch (ip_version) { + case IP_VERSION(13, 0, 6): + case IP_VERSION(13, 0, 14): + case IP_VERSION(13, 0, 12): + return &mp1_ras_func_v13_0; + default: + RAS_DEV_ERR(ras_core->dev, + "MP1 ip version(0x%x) is not supported!\n", ip_version); + break; + } + + return NULL; +} + +int ras_mp1_get_bank_count(struct ras_core_context *ras_core, + enum ras_err_type type, u32 *count) +{ + struct ras_mp1 *mp1 = &ras_core->ras_mp1; + + return mp1->ip_func->get_valid_bank_count(ras_core, type, count); +} + +int ras_mp1_dump_bank(struct ras_core_context *ras_core, + u32 type, u32 idx, u32 reg_idx, u64 *val) +{ + struct ras_mp1 *mp1 = &ras_core->ras_mp1; + + return mp1->ip_func->dump_valid_bank(ras_core, type, idx, reg_idx, val); +} + +int ras_mp1_hw_init(struct ras_core_context *ras_core) +{ + struct ras_mp1 *mp1 = &ras_core->ras_mp1; + + mp1->mp1_ip_version = ras_core->config->mp1_ip_version; + mp1->sys_func = ras_core->config->mp1_cfg.mp1_sys_fn; + if (!mp1->sys_func) { + RAS_DEV_ERR(ras_core->dev, "RAS mp1 sys function not configured!\n"); + return -EINVAL; + } + + mp1->ip_func = ras_mp1_get_ip_funcs(ras_core, mp1->mp1_ip_version); + + return mp1->ip_func ? RAS_CORE_OK : -EINVAL; +} + +int ras_mp1_hw_fini(struct ras_core_context *ras_core) +{ + return 0; +} diff --git a/drivers/gpu/drm/amd/ras/rascore/ras_mp1.h b/drivers/gpu/drm/amd/ras/rascore/ras_mp1.h new file mode 100644 index 000000000000..de1d08286f41 --- /dev/null +++ b/drivers/gpu/drm/amd/ras/rascore/ras_mp1.h @@ -0,0 +1,50 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright 2025 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#ifndef __RAS_MP1_H__ +#define __RAS_MP1_H__ +#include "ras.h" + +enum ras_err_type; +struct ras_mp1_ip_func { + int (*get_valid_bank_count)(struct ras_core_context *ras_core, + enum ras_err_type type, u32 *count); + int (*dump_valid_bank)(struct ras_core_context *ras_core, + enum ras_err_type type, u32 idx, u32 reg_idx, u64 *val); +}; + +struct ras_mp1 { + uint32_t mp1_ip_version; + const struct ras_mp1_ip_func *ip_func; + const struct ras_mp1_sys_func *sys_func; +}; + +int ras_mp1_hw_init(struct ras_core_context *ras_core); +int ras_mp1_hw_fini(struct ras_core_context *ras_core); + +int ras_mp1_get_bank_count(struct ras_core_context *ras_core, + enum ras_err_type type, u32 *count); + +int ras_mp1_dump_bank(struct ras_core_context *ras_core, + u32 ecc_type, u32 idx, u32 reg_idx, u64 *val); +#endif diff --git a/drivers/gpu/drm/amd/ras/rascore/ras_mp1_v13_0.c b/drivers/gpu/drm/amd/ras/rascore/ras_mp1_v13_0.c new file mode 100644 index 000000000000..310d39fc816b --- /dev/null +++ b/drivers/gpu/drm/amd/ras/rascore/ras_mp1_v13_0.c @@ -0,0 +1,105 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright 2025 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#include "ras.h" +#include "ras_mp1.h" +#include "ras_core_status.h" +#include "ras_mp1_v13_0.h" + +#define RAS_MP1_MSG_QueryValidMcaCount 0x36 +#define RAS_MP1_MSG_McaBankDumpDW 0x37 +#define RAS_MP1_MSG_ClearMcaOnRead 0x39 +#define RAS_MP1_MSG_QueryValidMcaCeCount 0x3A +#define RAS_MP1_MSG_McaBankCeDumpDW 0x3B + +#define MAX_UE_BANKS_PER_QUERY 12 +#define MAX_CE_BANKS_PER_QUERY 12 + +static int mp1_v13_0_get_bank_count(struct ras_core_context *ras_core, + enum ras_err_type type, u32 *count) +{ + struct ras_mp1 *mp1 = &ras_core->ras_mp1; + const struct ras_mp1_sys_func *sys_func = mp1->sys_func; + uint32_t bank_count = 0; + u32 msg; + int ret; + + if (!count) + return -EINVAL; + + if (!sys_func || !sys_func->mp1_get_valid_bank_count) + return -RAS_CORE_NOT_SUPPORTED; + + switch (type) { + case RAS_ERR_TYPE__UE: + msg = RAS_MP1_MSG_QueryValidMcaCount; + break; + case RAS_ERR_TYPE__CE: + case RAS_ERR_TYPE__DE: + msg = RAS_MP1_MSG_QueryValidMcaCeCount; + break; + default: + return -EINVAL; + } + + ret = sys_func->mp1_get_valid_bank_count(ras_core, msg, &bank_count); + if (!ret) { + if (((type == RAS_ERR_TYPE__UE) && (bank_count >= MAX_UE_BANKS_PER_QUERY)) || + ((type == RAS_ERR_TYPE__CE) && (bank_count >= MAX_CE_BANKS_PER_QUERY))) + return -EINVAL; + + *count = bank_count; + } + + return ret; +} + +static int mp1_v13_0_dump_bank(struct ras_core_context *ras_core, + enum ras_err_type type, u32 idx, u32 reg_idx, u64 *val) +{ + struct ras_mp1 *mp1 = &ras_core->ras_mp1; + const struct ras_mp1_sys_func *sys_func = mp1->sys_func; + u32 msg; + + if (!sys_func || !sys_func->mp1_dump_valid_bank) + return -RAS_CORE_NOT_SUPPORTED; + + switch (type) { + case RAS_ERR_TYPE__UE: + msg = RAS_MP1_MSG_McaBankDumpDW; + break; + case RAS_ERR_TYPE__CE: + case RAS_ERR_TYPE__DE: + msg = RAS_MP1_MSG_McaBankCeDumpDW; + break; + default: + return -EINVAL; + } + + return sys_func->mp1_dump_valid_bank(ras_core, msg, idx, reg_idx, val); +} + +const struct ras_mp1_ip_func mp1_ras_func_v13_0 = { + .get_valid_bank_count = mp1_v13_0_get_bank_count, + .dump_valid_bank = mp1_v13_0_dump_bank, +}; diff --git a/drivers/gpu/drm/amd/ras/rascore/ras_mp1_v13_0.h b/drivers/gpu/drm/amd/ras/rascore/ras_mp1_v13_0.h new file mode 100644 index 000000000000..2edfdb5f6a75 --- /dev/null +++ b/drivers/gpu/drm/amd/ras/rascore/ras_mp1_v13_0.h @@ -0,0 +1,30 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright 2025 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#ifndef __RAS_MP1_V13_0_H__ +#define __RAS_MP1_V13_0_H__ +#include "ras_mp1.h" + +extern const struct ras_mp1_ip_func mp1_ras_func_v13_0; + +#endif diff --git a/drivers/gpu/drm/amd/ras/rascore/ras_nbio.c b/drivers/gpu/drm/amd/ras/rascore/ras_nbio.c new file mode 100644 index 000000000000..bfddd104d548 --- /dev/null +++ b/drivers/gpu/drm/amd/ras/rascore/ras_nbio.c @@ -0,0 +1,96 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright 2025 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#include "ras.h" +#include "ras_nbio.h" +#include "ras_nbio_v7_9.h" + +static const struct ras_nbio_ip_func *ras_nbio_get_ip_funcs( + struct ras_core_context *ras_core, uint32_t ip_version) +{ + switch (ip_version) { + case IP_VERSION(7, 9, 0): + case IP_VERSION(7, 9, 1): + return &ras_nbio_v7_9; + default: + RAS_DEV_ERR(ras_core->dev, + "NBIO ip version(0x%x) is not supported!\n", ip_version); + break; + } + + return NULL; +} + +int ras_nbio_hw_init(struct ras_core_context *ras_core) +{ + struct ras_nbio *nbio = &ras_core->ras_nbio; + + nbio->nbio_ip_version = ras_core->config->nbio_ip_version; + nbio->sys_func = ras_core->config->nbio_cfg.nbio_sys_fn; + if (!nbio->sys_func) { + RAS_DEV_ERR(ras_core->dev, "RAS nbio sys function not configured!\n"); + return -EINVAL; + } + + nbio->ip_func = ras_nbio_get_ip_funcs(ras_core, nbio->nbio_ip_version); + if (!nbio->ip_func) + return -EINVAL; + + if (nbio->sys_func) { + if (nbio->sys_func->set_ras_controller_irq_state) + nbio->sys_func->set_ras_controller_irq_state(ras_core, true); + if (nbio->sys_func->set_ras_err_event_athub_irq_state) + nbio->sys_func->set_ras_err_event_athub_irq_state(ras_core, true); + } + + return 0; +} + +int ras_nbio_hw_fini(struct ras_core_context *ras_core) +{ + struct ras_nbio *nbio = &ras_core->ras_nbio; + + if (nbio->sys_func) { + if (nbio->sys_func->set_ras_controller_irq_state) + nbio->sys_func->set_ras_controller_irq_state(ras_core, false); + if (nbio->sys_func->set_ras_err_event_athub_irq_state) + nbio->sys_func->set_ras_err_event_athub_irq_state(ras_core, false); + } + + return 0; +} + +bool ras_nbio_handle_irq_error(struct ras_core_context *ras_core, void *data) +{ + struct ras_nbio *nbio = &ras_core->ras_nbio; + + if (nbio->ip_func) { + if (nbio->ip_func->handle_ras_controller_intr_no_bifring) + nbio->ip_func->handle_ras_controller_intr_no_bifring(ras_core); + if (nbio->ip_func->handle_ras_err_event_athub_intr_no_bifring) + nbio->ip_func->handle_ras_err_event_athub_intr_no_bifring(ras_core); + } + + return true; +} diff --git a/drivers/gpu/drm/amd/ras/rascore/ras_nbio.h b/drivers/gpu/drm/amd/ras/rascore/ras_nbio.h new file mode 100644 index 000000000000..0a1313e59a02 --- /dev/null +++ b/drivers/gpu/drm/amd/ras/rascore/ras_nbio.h @@ -0,0 +1,46 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright 2025 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef __RAS_NBIO_H__ +#define __RAS_NBIO_H__ +#include "ras.h" + +struct ras_core_context; + +struct ras_nbio_ip_func { + int (*handle_ras_controller_intr_no_bifring)(struct ras_core_context *ras_core); + int (*handle_ras_err_event_athub_intr_no_bifring)(struct ras_core_context *ras_core); + uint32_t (*get_memory_partition_mode)(struct ras_core_context *ras_core); +}; + +struct ras_nbio { + uint32_t nbio_ip_version; + const struct ras_nbio_ip_func *ip_func; + const struct ras_nbio_sys_func *sys_func; +}; + +int ras_nbio_hw_init(struct ras_core_context *ras_core); +int ras_nbio_hw_fini(struct ras_core_context *ras_core); +bool ras_nbio_handle_irq_error(struct ras_core_context *ras_core, void *data); +#endif diff --git a/drivers/gpu/drm/amd/ras/rascore/ras_nbio_v7_9.c b/drivers/gpu/drm/amd/ras/rascore/ras_nbio_v7_9.c new file mode 100644 index 000000000000..f17d708ec668 --- /dev/null +++ b/drivers/gpu/drm/amd/ras/rascore/ras_nbio_v7_9.c @@ -0,0 +1,123 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright 2025 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#include "ras.h" +#include "ras_nbio_v7_9.h" + +#define BIF_BX0_BIF_DOORBELL_INT_CNTL__RAS_ATHUB_ERR_EVENT_INTERRUPT_CLEAR__SHIFT 0x12 +#define BIF_BX0_BIF_DOORBELL_INT_CNTL__RAS_ATHUB_ERR_EVENT_INTERRUPT_CLEAR_MASK 0x00040000L +#define BIF_BX0_BIF_DOORBELL_INT_CNTL__RAS_ATHUB_ERR_EVENT_INTERRUPT_STATUS__SHIFT 0x2 +#define BIF_BX0_BIF_DOORBELL_INT_CNTL__RAS_ATHUB_ERR_EVENT_INTERRUPT_STATUS_MASK 0x00000004L +#define BIF_BX0_BIF_DOORBELL_INT_CNTL__RAS_CNTLR_INTERRUPT_CLEAR__SHIFT 0x11 +#define BIF_BX0_BIF_DOORBELL_INT_CNTL__RAS_CNTLR_INTERRUPT_CLEAR_MASK 0x00020000L +#define BIF_BX0_BIF_DOORBELL_INT_CNTL__RAS_CNTLR_INTERRUPT_STATUS__SHIFT 0x1 +#define BIF_BX0_BIF_DOORBELL_INT_CNTL__RAS_CNTLR_INTERRUPT_STATUS_MASK 0x00000002L + +#define regBIF_BX0_BIF_DOORBELL_INT_CNTL_BASE_IDX 2 +#define regBIF_BX0_BIF_DOORBELL_INT_CNTL 0x00fe + +#define regBIF_BX0_BIF_INTR_CNTL 0x0101 +#define regBIF_BX0_BIF_INTR_CNTL_BASE_IDX 2 + +/* BIF_BX0_BIF_INTR_CNTL */ +#define BIF_BX0_BIF_INTR_CNTL__RAS_INTR_VEC_SEL__SHIFT 0x0 +#define BIF_BX0_BIF_INTR_CNTL__RAS_INTR_VEC_SEL_MASK 0x00000001L + +#define regBIF_BX_PF0_PARTITION_MEM_STATUS 0x0164 +#define regBIF_BX_PF0_PARTITION_MEM_STATUS_BASE_IDX 2 +/* BIF_BX_PF0_PARTITION_MEM_STATUS */ +#define BIF_BX_PF0_PARTITION_MEM_STATUS__CHANGE_STATUE__SHIFT 0x0 +#define BIF_BX_PF0_PARTITION_MEM_STATUS__NPS_MODE__SHIFT 0x4 +#define BIF_BX_PF0_PARTITION_MEM_STATUS__CHANGE_STATUE_MASK 0x0000000FL +#define BIF_BX_PF0_PARTITION_MEM_STATUS__NPS_MODE_MASK 0x00000FF0L + + +static int nbio_v7_9_handle_ras_controller_intr_no_bifring(struct ras_core_context *ras_core) +{ + uint32_t bif_doorbell_intr_cntl = 0; + + bif_doorbell_intr_cntl = + RAS_DEV_RREG32_SOC15(ras_core->dev, NBIO, 0, regBIF_BX0_BIF_DOORBELL_INT_CNTL); + + if (REG_GET_FIELD(bif_doorbell_intr_cntl, + BIF_BX0_BIF_DOORBELL_INT_CNTL, RAS_CNTLR_INTERRUPT_STATUS)) { + /* driver has to clear the interrupt status when bif ring is disabled */ + bif_doorbell_intr_cntl = REG_SET_FIELD(bif_doorbell_intr_cntl, + BIF_BX0_BIF_DOORBELL_INT_CNTL, + RAS_CNTLR_INTERRUPT_CLEAR, 1); + + RAS_DEV_WREG32_SOC15(ras_core->dev, + NBIO, 0, regBIF_BX0_BIF_DOORBELL_INT_CNTL, bif_doorbell_intr_cntl); + + /* TODO: handle ras controller interrupt */ + } + + return 0; +} + +static int nbio_v7_9_handle_ras_err_event_athub_intr_no_bifring(struct ras_core_context *ras_core) +{ + uint32_t bif_doorbell_intr_cntl = 0; + int ret = 0; + + bif_doorbell_intr_cntl = + RAS_DEV_RREG32_SOC15(ras_core->dev, NBIO, 0, regBIF_BX0_BIF_DOORBELL_INT_CNTL); + + if (REG_GET_FIELD(bif_doorbell_intr_cntl, + BIF_BX0_BIF_DOORBELL_INT_CNTL, RAS_ATHUB_ERR_EVENT_INTERRUPT_STATUS)) { + /* driver has to clear the interrupt status when bif ring is disabled */ + bif_doorbell_intr_cntl = REG_SET_FIELD(bif_doorbell_intr_cntl, + BIF_BX0_BIF_DOORBELL_INT_CNTL, + RAS_ATHUB_ERR_EVENT_INTERRUPT_CLEAR, 1); + + RAS_DEV_WREG32_SOC15(ras_core->dev, + NBIO, 0, regBIF_BX0_BIF_DOORBELL_INT_CNTL, bif_doorbell_intr_cntl); + + ret = ras_core_handle_fatal_error(ras_core); + } + + return ret; +} + +static uint32_t nbio_v7_9_get_memory_partition_mode(struct ras_core_context *ras_core) +{ + uint32_t mem_status; + uint32_t mem_mode; + + mem_status = + RAS_DEV_RREG32_SOC15(ras_core->dev, NBIO, 0, regBIF_BX_PF0_PARTITION_MEM_STATUS); + + /* Each bit represents a mode 1-8*/ + mem_mode = REG_GET_FIELD(mem_status, BIF_BX_PF0_PARTITION_MEM_STATUS, NPS_MODE); + + return ffs(mem_mode); +} + +const struct ras_nbio_ip_func ras_nbio_v7_9 = { + .handle_ras_controller_intr_no_bifring = + nbio_v7_9_handle_ras_controller_intr_no_bifring, + .handle_ras_err_event_athub_intr_no_bifring = + nbio_v7_9_handle_ras_err_event_athub_intr_no_bifring, + .get_memory_partition_mode = nbio_v7_9_get_memory_partition_mode, +}; diff --git a/drivers/gpu/drm/amd/ras/rascore/ras_nbio_v7_9.h b/drivers/gpu/drm/amd/ras/rascore/ras_nbio_v7_9.h new file mode 100644 index 000000000000..8711c82a927f --- /dev/null +++ b/drivers/gpu/drm/amd/ras/rascore/ras_nbio_v7_9.h @@ -0,0 +1,31 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright 2025 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef __RAS_NBIO_V7_9_H__ +#define __RAS_NBIO_V7_9_H__ +#include "ras_nbio.h" + +extern const struct ras_nbio_ip_func ras_nbio_v7_9; + +#endif diff --git a/drivers/gpu/drm/amd/ras/rascore/ras_process.c b/drivers/gpu/drm/amd/ras/rascore/ras_process.c new file mode 100644 index 000000000000..3267dcdb169c --- /dev/null +++ b/drivers/gpu/drm/amd/ras/rascore/ras_process.c @@ -0,0 +1,322 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright 2025 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#include "ras.h" +#include "ras_process.h" + +#define RAS_EVENT_FIFO_SIZE (128 * sizeof(struct ras_event_req)) + +#define RAS_POLLING_ECC_TIMEOUT 300 + +static int ras_process_put_event(struct ras_core_context *ras_core, + struct ras_event_req *req) +{ + struct ras_process *ras_proc = &ras_core->ras_proc; + int ret; + + ret = kfifo_in_spinlocked(&ras_proc->event_fifo, + req, sizeof(*req), &ras_proc->fifo_spinlock); + if (!ret) { + RAS_DEV_ERR(ras_core->dev, "Poison message fifo is full!\n"); + return -ENOSPC; + } + + return 0; +} + +static int ras_process_add_reset_gpu_event(struct ras_core_context *ras_core, + uint32_t reset_cause) +{ + struct ras_event_req req = {0}; + + req.reset = reset_cause; + + return ras_process_put_event(ras_core, &req); +} + +static int ras_process_get_event(struct ras_core_context *ras_core, + struct ras_event_req *req) +{ + struct ras_process *ras_proc = &ras_core->ras_proc; + + return kfifo_out_spinlocked(&ras_proc->event_fifo, + req, sizeof(*req), &ras_proc->fifo_spinlock); +} + +static void ras_process_clear_event_fifo(struct ras_core_context *ras_core) +{ + struct ras_event_req req; + int ret; + + do { + ret = ras_process_get_event(ras_core, &req); + } while (ret); +} + +#define AMDGPU_RAS_WAITING_DATA_READY 200 +static int ras_process_umc_event(struct ras_core_context *ras_core, + uint32_t event_count) +{ + struct ras_ecc_count ecc_data; + int ret = 0; + uint32_t timeout = 0; + uint32_t detected_de_count = 0; + + do { + memset(&ecc_data, 0, sizeof(ecc_data)); + ret = ras_core_update_ecc_info(ras_core); + if (ret) + return ret; + + ret = ras_core_query_block_ecc_data(ras_core, RAS_BLOCK_ID__UMC, &ecc_data); + if (ret) + return ret; + + if (ecc_data.new_de_count) { + detected_de_count += ecc_data.new_de_count; + timeout = 0; + } else { + if (!timeout && event_count) + timeout = AMDGPU_RAS_WAITING_DATA_READY; + + if (timeout) { + if (!--timeout) + break; + + msleep(1); + } + } + } while (detected_de_count < event_count); + + if (detected_de_count && ras_core_gpu_is_rma(ras_core)) + ras_process_add_reset_gpu_event(ras_core, GPU_RESET_CAUSE_RMA); + + return 0; +} + +static int ras_process_non_umc_event(struct ras_core_context *ras_core) +{ + struct ras_process *ras_proc = &ras_core->ras_proc; + struct ras_event_req req; + uint32_t event_count = kfifo_len(&ras_proc->event_fifo); + uint32_t reset_flags = 0; + int ret = 0, i; + + for (i = 0; i < event_count; i++) { + memset(&req, 0, sizeof(req)); + ret = ras_process_get_event(ras_core, &req); + if (!ret) + continue; + + ras_core_event_notify(ras_core, + RAS_EVENT_ID__POISON_CONSUMPTION, &req); + + reset_flags |= req.reset; + + if (req.reset == GPU_RESET_CAUSE_RMA) + continue; + + if (req.reset) + RAS_DEV_INFO(ras_core->dev, + "{%llu} GPU reset for %s RAS poison consumption is issued!\n", + req.seqno, ras_core_get_ras_block_name(req.block)); + else + RAS_DEV_INFO(ras_core->dev, + "{%llu} %s RAS poison consumption is issued!\n", + req.seqno, ras_core_get_ras_block_name(req.block)); + } + + if (reset_flags) { + ret = ras_core_event_notify(ras_core, + RAS_EVENT_ID__RESET_GPU, &reset_flags); + if (!ret && (reset_flags & GPU_RESET_CAUSE_RMA)) + return -RAS_CORE_GPU_IN_MODE1_RESET; + } + + return ret; +} + +int ras_process_handle_ras_event(struct ras_core_context *ras_core) +{ + struct ras_process *ras_proc = &ras_core->ras_proc; + uint32_t umc_event_count; + int ret; + + ret = ras_core_event_notify(ras_core, + RAS_EVENT_ID__RAS_EVENT_PROC_BEGIN, NULL); + if (ret) + return ret; + + ras_aca_clear_fatal_flag(ras_core); + ras_umc_log_pending_bad_bank(ras_core); + + do { + umc_event_count = atomic_read(&ras_proc->umc_interrupt_count); + ret = ras_process_umc_event(ras_core, umc_event_count); + if (ret == -RAS_CORE_GPU_IN_MODE1_RESET) + break; + + if (umc_event_count) + atomic_sub(umc_event_count, &ras_proc->umc_interrupt_count); + } while (atomic_read(&ras_proc->umc_interrupt_count)); + + if ((ret != -RAS_CORE_GPU_IN_MODE1_RESET) && + (kfifo_len(&ras_proc->event_fifo))) + ret = ras_process_non_umc_event(ras_core); + + if (ret == -RAS_CORE_GPU_IN_MODE1_RESET) { + /* Clear poison fifo */ + ras_process_clear_event_fifo(ras_core); + atomic_set(&ras_proc->umc_interrupt_count, 0); + } + + ras_core_event_notify(ras_core, + RAS_EVENT_ID__RAS_EVENT_PROC_END, NULL); + return ret; +} + +static int thread_wait_condition(void *param) +{ + struct ras_process *ras_proc = (struct ras_process *)param; + + return (kthread_should_stop() || + atomic_read(&ras_proc->ras_interrupt_req)); +} + +static int ras_process_thread(void *context) +{ + struct ras_core_context *ras_core = (struct ras_core_context *)context; + struct ras_process *ras_proc = &ras_core->ras_proc; + + while (!kthread_should_stop()) { + ras_wait_event_interruptible_timeout(&ras_proc->ras_process_wq, + thread_wait_condition, ras_proc, + msecs_to_jiffies(RAS_POLLING_ECC_TIMEOUT)); + + if (kthread_should_stop()) + break; + + if (!ras_core->is_initialized) + continue; + + atomic_set(&ras_proc->ras_interrupt_req, 0); + + if (ras_core_gpu_in_reset(ras_core)) + continue; + + if (ras_core->sys_fn && ras_core->sys_fn->async_handle_ras_event) + ras_core->sys_fn->async_handle_ras_event(ras_core, NULL); + else + ras_process_handle_ras_event(ras_core); + } + + return 0; +} + +int ras_process_init(struct ras_core_context *ras_core) +{ + struct ras_process *ras_proc = &ras_core->ras_proc; + int ret; + + ret = kfifo_alloc(&ras_proc->event_fifo, RAS_EVENT_FIFO_SIZE, GFP_KERNEL); + if (ret) + return ret; + + spin_lock_init(&ras_proc->fifo_spinlock); + + init_waitqueue_head(&ras_proc->ras_process_wq); + + ras_proc->ras_process_thread = kthread_run(ras_process_thread, + (void *)ras_core, "ras_process_thread"); + if (!ras_proc->ras_process_thread) { + RAS_DEV_ERR(ras_core->dev, "Failed to create ras_process_thread.\n"); + ret = -ENOMEM; + goto err; + } + + return 0; + +err: + ras_process_fini(ras_core); + return ret; +} + +int ras_process_fini(struct ras_core_context *ras_core) +{ + struct ras_process *ras_proc = &ras_core->ras_proc; + + if (ras_proc->ras_process_thread) { + kthread_stop(ras_proc->ras_process_thread); + ras_proc->ras_process_thread = NULL; + } + + kfifo_free(&ras_proc->event_fifo); + + return 0; +} + +static int ras_process_add_umc_interrupt_req(struct ras_core_context *ras_core, + struct ras_event_req *req) +{ + struct ras_process *ras_proc = &ras_core->ras_proc; + + atomic_inc(&ras_proc->umc_interrupt_count); + atomic_inc(&ras_proc->ras_interrupt_req); + + wake_up(&ras_proc->ras_process_wq); + return 0; +} + +static int ras_process_add_non_umc_interrupt_req(struct ras_core_context *ras_core, + struct ras_event_req *req) +{ + struct ras_process *ras_proc = &ras_core->ras_proc; + int ret; + + ret = ras_process_put_event(ras_core, req); + if (!ret) { + atomic_inc(&ras_proc->ras_interrupt_req); + wake_up(&ras_proc->ras_process_wq); + } + + return ret; +} + +int ras_process_add_interrupt_req(struct ras_core_context *ras_core, + struct ras_event_req *req, bool is_umc) +{ + int ret; + + if (!ras_core) + return -EINVAL; + + if (!ras_core->is_initialized) + return -EPERM; + + if (is_umc) + ret = ras_process_add_umc_interrupt_req(ras_core, req); + else + ret = ras_process_add_non_umc_interrupt_req(ras_core, req); + + return ret; +} diff --git a/drivers/gpu/drm/amd/ras/rascore/ras_process.h b/drivers/gpu/drm/amd/ras/rascore/ras_process.h new file mode 100644 index 000000000000..28458b50510e --- /dev/null +++ b/drivers/gpu/drm/amd/ras/rascore/ras_process.h @@ -0,0 +1,53 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright 2025 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#ifndef __RAS_PROCESS_H__ +#define __RAS_PROCESS_H__ + +struct ras_event_req { + uint64_t seqno; + uint32_t idx_vf; + uint32_t block; + uint16_t pasid; + uint32_t reset; + void *pasid_fn; + void *data; +}; + +struct ras_process { + void *dev; + void *ras_process_thread; + wait_queue_head_t ras_process_wq; + atomic_t ras_interrupt_req; + atomic_t umc_interrupt_count; + struct kfifo event_fifo; + spinlock_t fifo_spinlock; +}; + +struct ras_core_context; +int ras_process_init(struct ras_core_context *ras_core); +int ras_process_fini(struct ras_core_context *ras_core); +int ras_process_handle_ras_event(struct ras_core_context *ras_core); +int ras_process_add_interrupt_req(struct ras_core_context *ras_core, + struct ras_event_req *req, bool is_umc); +#endif diff --git a/drivers/gpu/drm/amd/ras/rascore/ras_psp.c b/drivers/gpu/drm/amd/ras/rascore/ras_psp.c new file mode 100644 index 000000000000..ccdb42d2dd60 --- /dev/null +++ b/drivers/gpu/drm/amd/ras/rascore/ras_psp.c @@ -0,0 +1,750 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright 2025 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#include "ras.h" +#include "ras_ta_if.h" +#include "ras_psp.h" +#include "ras_psp_v13_0.h" + +/* position of instance value in sub_block_index of + * ta_ras_trigger_error_input, the sub block uses lower 12 bits + */ +#define RAS_TA_INST_MASK 0xfffff000 +#define RAS_TA_INST_SHIFT 0xc + +static const struct ras_psp_ip_func *ras_psp_get_ip_funcs( + struct ras_core_context *ras_core, uint32_t ip_version) +{ + switch (ip_version) { + case IP_VERSION(13, 0, 6): + case IP_VERSION(13, 0, 14): + case IP_VERSION(13, 0, 12): + return &ras_psp_v13_0; + default: + RAS_DEV_ERR(ras_core->dev, + "psp ip version(0x%x) is not supported!\n", ip_version); + break; + } + + return NULL; +} + +static int ras_psp_sync_system_ras_psp_status(struct ras_core_context *ras_core) +{ + struct ras_psp *psp = &ras_core->ras_psp; + struct ras_ta_ctx *ta_ctx = &ras_core->ras_psp.ta_ctx; + struct ras_psp_ctx *psp_ctx = &ras_core->ras_psp.psp_ctx; + struct ras_psp_sys_status status = {0}; + int ret; + + if (psp->sys_func && psp->sys_func->get_ras_psp_system_status) { + ret = psp->sys_func->get_ras_psp_system_status(ras_core, &status); + if (ret) + return ret; + + if (status.initialized) { + ta_ctx->preload_ras_ta_enabled = true; + ta_ctx->ras_ta_initialized = status.initialized; + ta_ctx->session_id = status.session_id; + } + + psp_ctx->external_mutex = status.psp_cmd_mutex; + } + + return 0; +} + +static int ras_psp_get_ras_ta_init_param(struct ras_core_context *ras_core, + struct ras_ta_init_param *ras_ta_param) +{ + struct ras_psp *psp = &ras_core->ras_psp; + + if (psp->sys_func && psp->sys_func->get_ras_ta_init_param) + return psp->sys_func->get_ras_ta_init_param(ras_core, ras_ta_param); + + RAS_DEV_ERR(ras_core->dev, "Not config get_ras_ta_init_param API!!\n"); + return -EACCES; +} + +static struct gpu_mem_block *ras_psp_get_gpu_mem(struct ras_core_context *ras_core, + enum gpu_mem_type mem_type) +{ + struct ras_psp *psp = &ras_core->ras_psp; + struct gpu_mem_block *gpu_mem = NULL; + int ret; + + switch (mem_type) { + case GPU_MEM_TYPE_RAS_PSP_RING: + gpu_mem = &psp->psp_ring.ras_ring_gpu_mem; + break; + case GPU_MEM_TYPE_RAS_PSP_CMD: + gpu_mem = &psp->psp_ctx.psp_cmd_gpu_mem; + break; + case GPU_MEM_TYPE_RAS_PSP_FENCE: + gpu_mem = &psp->psp_ctx.out_fence_gpu_mem; + break; + case GPU_MEM_TYPE_RAS_TA_FW: + gpu_mem = &psp->ta_ctx.fw_gpu_mem; + break; + case GPU_MEM_TYPE_RAS_TA_CMD: + gpu_mem = &psp->ta_ctx.cmd_gpu_mem; + break; + default: + return NULL; + } + + if (!gpu_mem->ref_count) { + ret = ras_core_get_gpu_mem(ras_core, mem_type, gpu_mem); + if (ret) + return NULL; + gpu_mem->mem_type = mem_type; + } + + gpu_mem->ref_count++; + + return gpu_mem; +} + +static int ras_psp_put_gpu_mem(struct ras_core_context *ras_core, + struct gpu_mem_block *gpu_mem) +{ + if (!gpu_mem) + return 0; + + gpu_mem->ref_count--; + + if (gpu_mem->ref_count > 0) { + return 0; + } else if (gpu_mem->ref_count < 0) { + RAS_DEV_WARN(ras_core->dev, + "Duplicate free gpu memory %u\n", gpu_mem->mem_type); + } else { + ras_core_put_gpu_mem(ras_core, gpu_mem->mem_type, gpu_mem); + memset(gpu_mem, 0, sizeof(*gpu_mem)); + } + + return 0; +} + +static void __acquire_psp_cmd_lock(struct ras_core_context *ras_core) +{ + struct ras_psp_ctx *psp_ctx = &ras_core->ras_psp.psp_ctx; + + if (psp_ctx->external_mutex) + mutex_lock(psp_ctx->external_mutex); + else + mutex_lock(&psp_ctx->internal_mutex); +} + +static void __release_psp_cmd_lock(struct ras_core_context *ras_core) +{ + struct ras_psp_ctx *psp_ctx = &ras_core->ras_psp.psp_ctx; + + if (psp_ctx->external_mutex) + mutex_unlock(psp_ctx->external_mutex); + else + mutex_unlock(&psp_ctx->internal_mutex); +} + +static uint32_t __get_ring_frame_slot(struct ras_core_context *ras_core) +{ + struct ras_psp *psp = &ras_core->ras_psp; + uint32_t ras_ring_wptr_dw; + + ras_ring_wptr_dw = psp->ip_func->psp_ras_ring_wptr_get(ras_core); + + return div64_u64((ras_ring_wptr_dw << 2), sizeof(struct psp_gfx_rb_frame)); +} + +static int __set_ring_frame_slot(struct ras_core_context *ras_core, + uint32_t slot) +{ + struct ras_psp *psp = &ras_core->ras_psp; + + return psp->ip_func->psp_ras_ring_wptr_set(ras_core, + (slot * sizeof(struct psp_gfx_rb_frame)) >> 2); +} + +static int write_frame_to_ras_psp_ring(struct ras_core_context *ras_core, + struct psp_gfx_rb_frame *frame) +{ + struct gpu_mem_block *ring_mem; + struct psp_gfx_rb_frame *rb_frame; + uint32_t max_frame_slot; + uint32_t slot_idx; + uint32_t write_flush_read_back = 0; + int ret = 0; + + ring_mem = ras_psp_get_gpu_mem(ras_core, GPU_MEM_TYPE_RAS_PSP_RING); + if (!ring_mem) + return -ENOMEM; + + max_frame_slot = + div64_u64(ring_mem->mem_size, sizeof(struct psp_gfx_rb_frame)); + + rb_frame = + (struct psp_gfx_rb_frame *)ring_mem->mem_cpu_addr; + + slot_idx = __get_ring_frame_slot(ras_core); + if (slot_idx >= max_frame_slot) + slot_idx = 0; + + memcpy(&rb_frame[slot_idx], frame, sizeof(*frame)); + + /* Do a read to force the write of the frame before writing + * write pointer. + */ + write_flush_read_back = rb_frame[slot_idx].fence_value; + if (write_flush_read_back != frame->fence_value) { + RAS_DEV_ERR(ras_core->dev, + "Failed to submit ring cmd! cmd:0x%x:0x%x, fence:0x%x:0x%x value:%u, expected:%u\n", + rb_frame[slot_idx].cmd_buf_addr_hi, + rb_frame[slot_idx].cmd_buf_addr_lo, + rb_frame[slot_idx].fence_addr_hi, + rb_frame[slot_idx].fence_addr_lo, + write_flush_read_back, frame->fence_value); + ret = -EACCES; + goto err; + } + + slot_idx++; + + if (slot_idx >= max_frame_slot) + slot_idx = 0; + + __set_ring_frame_slot(ras_core, slot_idx); + +err: + ras_psp_put_gpu_mem(ras_core, ring_mem); + return ret; +} + +static int send_psp_cmd(struct ras_core_context *ras_core, + enum psp_gfx_cmd_id gfx_cmd_id, void *cmd_data, + uint32_t cmd_size, struct psp_cmd_resp *resp) +{ + struct ras_psp_ctx *psp_ctx = &ras_core->ras_psp.psp_ctx; + struct gpu_mem_block *psp_cmd_buf = NULL; + struct gpu_mem_block *psp_fence_buf = NULL; + struct psp_gfx_cmd_resp *gfx_cmd; + struct psp_gfx_rb_frame rb_frame; + int ret = 0; + int timeout = 1000; + + if (!cmd_data || (cmd_size > sizeof(union psp_gfx_commands)) || !resp) { + RAS_DEV_ERR(ras_core->dev, "Invalid RAS PSP command, id: %u\n", gfx_cmd_id); + return -EINVAL; + } + + __acquire_psp_cmd_lock(ras_core); + + psp_cmd_buf = ras_psp_get_gpu_mem(ras_core, GPU_MEM_TYPE_RAS_PSP_CMD); + if (!psp_cmd_buf) { + ret = -ENOMEM; + goto exit; + } + + psp_fence_buf = ras_psp_get_gpu_mem(ras_core, GPU_MEM_TYPE_RAS_PSP_FENCE); + if (!psp_fence_buf) { + ret = -ENOMEM; + goto exit; + } + + gfx_cmd = (struct psp_gfx_cmd_resp *)psp_cmd_buf->mem_cpu_addr; + memset(gfx_cmd, 0, sizeof(*gfx_cmd)); + gfx_cmd->cmd_id = gfx_cmd_id; + memcpy(&gfx_cmd->cmd, cmd_data, cmd_size); + + psp_ctx->in_fence_value++; + + memset(&rb_frame, 0, sizeof(rb_frame)); + rb_frame.cmd_buf_addr_hi = upper_32_bits(psp_cmd_buf->mem_mc_addr); + rb_frame.cmd_buf_addr_lo = lower_32_bits(psp_cmd_buf->mem_mc_addr); + rb_frame.fence_addr_hi = upper_32_bits(psp_fence_buf->mem_mc_addr); + rb_frame.fence_addr_lo = lower_32_bits(psp_fence_buf->mem_mc_addr); + rb_frame.fence_value = psp_ctx->in_fence_value; + + ret = write_frame_to_ras_psp_ring(ras_core, &rb_frame); + if (ret) { + psp_ctx->in_fence_value--; + goto exit; + } + + while (*((uint64_t *)psp_fence_buf->mem_cpu_addr) != + psp_ctx->in_fence_value) { + if (--timeout == 0) + break; + /* + * Shouldn't wait for timeout when err_event_athub occurs, + * because gpu reset thread triggered and lock resource should + * be released for psp resume sequence. + */ + if (ras_core_ras_interrupt_detected(ras_core)) + break; + + msleep(2); + } + + resp->status = gfx_cmd->resp.status; + resp->session_id = gfx_cmd->resp.session_id; + +exit: + ras_psp_put_gpu_mem(ras_core, psp_cmd_buf); + ras_psp_put_gpu_mem(ras_core, psp_fence_buf); + + __release_psp_cmd_lock(ras_core); + + return ret; +} + +static void __check_ras_ta_cmd_resp(struct ras_core_context *ras_core, + struct ras_ta_cmd *ras_cmd) +{ + + if (ras_cmd->ras_out_message.flags.err_inject_switch_disable_flag) { + RAS_DEV_WARN(ras_core->dev, "ECC switch disabled\n"); + ras_cmd->ras_status = RAS_TA_STATUS__ERROR_RAS_NOT_AVAILABLE; + } else if (ras_cmd->ras_out_message.flags.reg_access_failure_flag) + RAS_DEV_WARN(ras_core->dev, "RAS internal register access blocked\n"); + + switch (ras_cmd->ras_status) { + case RAS_TA_STATUS__ERROR_UNSUPPORTED_IP: + RAS_DEV_WARN(ras_core->dev, + "RAS WARNING: cmd failed due to unsupported ip\n"); + break; + case RAS_TA_STATUS__ERROR_UNSUPPORTED_ERROR_INJ: + RAS_DEV_WARN(ras_core->dev, + "RAS WARNING: cmd failed due to unsupported error injection\n"); + break; + case RAS_TA_STATUS__SUCCESS: + break; + case RAS_TA_STATUS__TEE_ERROR_ACCESS_DENIED: + if (ras_cmd->cmd_id == RAS_TA_CMD_ID__TRIGGER_ERROR) + RAS_DEV_WARN(ras_core->dev, + "RAS WARNING: Inject error to critical region is not allowed\n"); + break; + default: + RAS_DEV_WARN(ras_core->dev, + "RAS WARNING: ras status = 0x%X\n", ras_cmd->ras_status); + break; + } +} + +static int send_ras_ta_runtime_cmd(struct ras_core_context *ras_core, + enum ras_ta_cmd_id cmd_id, void *in, uint32_t in_size, + void *out, uint32_t out_size) +{ + struct ras_ta_ctx *ta_ctx = &ras_core->ras_psp.ta_ctx; + struct gpu_mem_block *cmd_mem; + struct ras_ta_cmd *ras_cmd; + struct psp_gfx_cmd_invoke_cmd invoke_cmd = {0}; + struct psp_cmd_resp resp = {0}; + int ret = 0; + + if (!in || (in_size > sizeof(union ras_ta_cmd_input)) || + (cmd_id >= MAX_RAS_TA_CMD_ID)) { + RAS_DEV_ERR(ras_core->dev, "Invalid RAS TA command, id: %u\n", cmd_id); + return -EINVAL; + } + + ras_psp_sync_system_ras_psp_status(ras_core); + + cmd_mem = ras_psp_get_gpu_mem(ras_core, GPU_MEM_TYPE_RAS_TA_CMD); + if (!cmd_mem) + return -ENOMEM; + + if (!ras_core_down_trylock_gpu_reset_lock(ras_core)) { + ret = -EACCES; + goto out; + } + + ras_cmd = (struct ras_ta_cmd *)cmd_mem->mem_cpu_addr; + + mutex_lock(&ta_ctx->ta_mutex); + + memset(ras_cmd, 0, sizeof(*ras_cmd)); + ras_cmd->cmd_id = cmd_id; + memcpy(&ras_cmd->ras_in_message, in, in_size); + + invoke_cmd.ta_cmd_id = cmd_id; + invoke_cmd.session_id = ta_ctx->session_id; + + ret = send_psp_cmd(ras_core, GFX_CMD_ID_INVOKE_CMD, + &invoke_cmd, sizeof(invoke_cmd), &resp); + + /* If err_event_athub occurs error inject was successful, however + * return status from TA is no long reliable + */ + if (ras_core_ras_interrupt_detected(ras_core)) { + ret = 0; + goto unlock; + } + + if (ret || resp.status) { + RAS_DEV_ERR(ras_core->dev, + "RAS: Failed to send psp cmd! ret:%d, status:%u\n", + ret, resp.status); + ret = -ESTRPIPE; + goto unlock; + } + + if (ras_cmd->if_version > RAS_TA_HOST_IF_VER) { + RAS_DEV_WARN(ras_core->dev, "RAS: Unsupported Interface\n"); + ret = -EINVAL; + goto unlock; + } + + if (!ras_cmd->ras_status && out && out_size) + memcpy(out, &ras_cmd->ras_out_message, out_size); + + __check_ras_ta_cmd_resp(ras_core, ras_cmd); + +unlock: + mutex_unlock(&ta_ctx->ta_mutex); + ras_core_up_gpu_reset_lock(ras_core); +out: + ras_psp_put_gpu_mem(ras_core, cmd_mem); + return ret; +} + +static int trigger_ras_ta_error(struct ras_core_context *ras_core, + struct ras_ta_trigger_error_input *info, uint32_t instance_mask) +{ + uint32_t dev_mask = 0; + + switch (info->block_id) { + case RAS_TA_BLOCK__GFX: + if (ras_gfx_get_ta_subblock(ras_core, info->inject_error_type, + info->sub_block_index, &info->sub_block_index)) + return -EINVAL; + + dev_mask = RAS_GET_MASK(ras_core->dev, GC, instance_mask); + break; + case RAS_TA_BLOCK__SDMA: + dev_mask = RAS_GET_MASK(ras_core->dev, SDMA0, instance_mask); + break; + case RAS_TA_BLOCK__VCN: + case RAS_TA_BLOCK__JPEG: + dev_mask = RAS_GET_MASK(ras_core->dev, VCN, instance_mask); + break; + default: + dev_mask = instance_mask; + break; + } + + /* reuse sub_block_index for backward compatibility */ + dev_mask <<= RAS_TA_INST_SHIFT; + dev_mask &= RAS_TA_INST_MASK; + info->sub_block_index |= dev_mask; + + return send_ras_ta_runtime_cmd(ras_core, RAS_TA_CMD_ID__TRIGGER_ERROR, + info, sizeof(*info), NULL, 0); +} + +static int send_load_ta_fw_cmd(struct ras_core_context *ras_core, + struct ras_ta_ctx *ta_ctx) +{ + struct ras_ta_fw_bin *fw_bin = &ta_ctx->fw_bin; + struct gpu_mem_block *fw_mem; + struct gpu_mem_block *cmd_mem; + struct ras_ta_cmd *ta_cmd; + struct ras_ta_init_flags *ta_init_flags; + struct psp_gfx_cmd_load_ta psp_load_ta_cmd; + struct psp_cmd_resp resp = {0}; + struct ras_ta_image_header *fw_hdr = NULL; + int ret; + + fw_mem = ras_psp_get_gpu_mem(ras_core, GPU_MEM_TYPE_RAS_TA_FW); + if (!fw_mem) + return -ENOMEM; + + cmd_mem = ras_psp_get_gpu_mem(ras_core, GPU_MEM_TYPE_RAS_TA_CMD); + if (!cmd_mem) { + ret = -ENOMEM; + goto err; + } + + ret = ras_psp_get_ras_ta_init_param(ras_core, &ta_ctx->init_param); + if (ret) + goto err; + + if (!ras_core_down_trylock_gpu_reset_lock(ras_core)) { + ret = -EACCES; + goto err; + } + + /* copy ras ta binary to shared gpu memory */ + memcpy(fw_mem->mem_cpu_addr, fw_bin->bin_addr, fw_bin->bin_size); + fw_mem->mem_size = fw_bin->bin_size; + + /* Initialize ras ta startup parameter */ + ta_cmd = (struct ras_ta_cmd *)cmd_mem->mem_cpu_addr; + ta_init_flags = &ta_cmd->ras_in_message.init_flags; + + ta_init_flags->poison_mode_en = ta_ctx->init_param.poison_mode_en; + ta_init_flags->dgpu_mode = ta_ctx->init_param.dgpu_mode; + ta_init_flags->xcc_mask = ta_ctx->init_param.xcc_mask; + ta_init_flags->channel_dis_num = ta_ctx->init_param.channel_dis_num; + ta_init_flags->nps_mode = ta_ctx->init_param.nps_mode; + ta_init_flags->active_umc_mask = ta_ctx->init_param.active_umc_mask; + + /* Setup load ras ta command */ + memset(&psp_load_ta_cmd, 0, sizeof(psp_load_ta_cmd)); + psp_load_ta_cmd.app_phy_addr_lo = lower_32_bits(fw_mem->mem_mc_addr); + psp_load_ta_cmd.app_phy_addr_hi = upper_32_bits(fw_mem->mem_mc_addr); + psp_load_ta_cmd.app_len = fw_mem->mem_size; + psp_load_ta_cmd.cmd_buf_phy_addr_lo = lower_32_bits(cmd_mem->mem_mc_addr); + psp_load_ta_cmd.cmd_buf_phy_addr_hi = upper_32_bits(cmd_mem->mem_mc_addr); + psp_load_ta_cmd.cmd_buf_len = cmd_mem->mem_size; + + ret = send_psp_cmd(ras_core, GFX_CMD_ID_LOAD_TA, + &psp_load_ta_cmd, sizeof(psp_load_ta_cmd), &resp); + if (!ret && !resp.status) { + /* Read TA version at FW offset 0x60 if TA version not found*/ + fw_hdr = (struct ras_ta_image_header *)fw_bin->bin_addr; + RAS_DEV_INFO(ras_core->dev, "PSP: RAS TA(version:%X.%X.%X.%X) is loaded.\n", + (fw_hdr->image_version >> 24) & 0xFF, (fw_hdr->image_version >> 16) & 0xFF, + (fw_hdr->image_version >> 8) & 0xFF, fw_hdr->image_version & 0xFF); + ta_ctx->ta_version = fw_hdr->image_version; + ta_ctx->session_id = resp.session_id; + ta_ctx->ras_ta_initialized = true; + } else { + RAS_DEV_ERR(ras_core->dev, + "Failed to load RAS TA! ret:%d, status:%d\n", ret, resp.status); + } + + ras_core_up_gpu_reset_lock(ras_core); + +err: + ras_psp_put_gpu_mem(ras_core, fw_mem); + ras_psp_put_gpu_mem(ras_core, cmd_mem); + return ret; +} + +static int load_ras_ta_firmware(struct ras_core_context *ras_core, + struct ras_psp_ta_load *ras_ta_load) +{ + struct ras_ta_ctx *ta_ctx = &ras_core->ras_psp.ta_ctx; + struct ras_ta_fw_bin *fw_bin = &ta_ctx->fw_bin; + int ret; + + fw_bin->bin_addr = ras_ta_load->bin_addr; + fw_bin->bin_size = ras_ta_load->bin_size; + fw_bin->fw_version = ras_ta_load->fw_version; + fw_bin->feature_version = ras_ta_load->feature_version; + + ret = send_load_ta_fw_cmd(ras_core, ta_ctx); + if (!ret) { + ras_ta_load->out_session_id = ta_ctx->session_id; + ras_ta_load->out_loaded_ta_version = ta_ctx->ta_version; + } + + return ret; +} + +static int unload_ras_ta_firmware(struct ras_core_context *ras_core, + struct ras_psp_ta_unload *ras_ta_unload) +{ + struct ras_ta_ctx *ta_ctx = &ras_core->ras_psp.ta_ctx; + struct psp_gfx_cmd_unload_ta cmd_unload_ta = {0}; + struct psp_cmd_resp resp = {0}; + int ret; + + if (!ras_core_down_trylock_gpu_reset_lock(ras_core)) + return -EACCES; + + cmd_unload_ta.session_id = ta_ctx->session_id; + ret = send_psp_cmd(ras_core, GFX_CMD_ID_UNLOAD_TA, + &cmd_unload_ta, sizeof(cmd_unload_ta), &resp); + if (ret || resp.status) { + RAS_DEV_ERR(ras_core->dev, + "Failed to unload RAS TA! ret:%d, status:%u\n", + ret, resp.status); + goto unlock; + } + + kfree(ta_ctx->fw_bin.bin_addr); + memset(&ta_ctx->fw_bin, 0, sizeof(ta_ctx->fw_bin)); + ta_ctx->ta_version = 0; + ta_ctx->ras_ta_initialized = false; + ta_ctx->session_id = 0; + +unlock: + ras_core_up_gpu_reset_lock(ras_core); + + return ret; +} + +int ras_psp_load_firmware(struct ras_core_context *ras_core, + struct ras_psp_ta_load *ras_ta_load) +{ + struct ras_ta_ctx *ta_ctx = &ras_core->ras_psp.ta_ctx; + struct ras_psp_ta_unload ras_ta_unload = {0}; + int ret; + + if (ta_ctx->preload_ras_ta_enabled) + return 0; + + if (!ras_ta_load) + return -EINVAL; + + if (ta_ctx->ras_ta_initialized) { + ras_ta_unload.ras_session_id = ta_ctx->session_id; + ret = unload_ras_ta_firmware(ras_core, &ras_ta_unload); + if (ret) + return ret; + } + + return load_ras_ta_firmware(ras_core, ras_ta_load); +} + +int ras_psp_unload_firmware(struct ras_core_context *ras_core, + struct ras_psp_ta_unload *ras_ta_unload) +{ + struct ras_ta_ctx *ta_ctx = &ras_core->ras_psp.ta_ctx; + + if (ta_ctx->preload_ras_ta_enabled) + return 0; + + if ((!ras_ta_unload) || + (ras_ta_unload->ras_session_id != ta_ctx->session_id)) + return -EINVAL; + + return unload_ras_ta_firmware(ras_core, ras_ta_unload); +} + +int ras_psp_trigger_error(struct ras_core_context *ras_core, + struct ras_ta_trigger_error_input *info, uint32_t instance_mask) +{ + struct ras_ta_ctx *ta_ctx = &ras_core->ras_psp.ta_ctx; + + if (!ta_ctx->preload_ras_ta_enabled && !ta_ctx->ras_ta_initialized) { + RAS_DEV_ERR(ras_core->dev, "RAS: ras firmware not initialized!"); + return -ENOEXEC; + } + + if (!info) + return -EINVAL; + + return trigger_ras_ta_error(ras_core, info, instance_mask); +} + +int ras_psp_query_address(struct ras_core_context *ras_core, + struct ras_ta_query_address_input *addr_in, + struct ras_ta_query_address_output *addr_out) +{ + struct ras_ta_ctx *ta_ctx = &ras_core->ras_psp.ta_ctx; + + if (!ta_ctx->preload_ras_ta_enabled && + !ta_ctx->ras_ta_initialized) { + RAS_DEV_ERR(ras_core->dev, "RAS: ras firmware not initialized!"); + return -ENOEXEC; + } + + if (!addr_in || !addr_out) + return -EINVAL; + + return send_ras_ta_runtime_cmd(ras_core, RAS_TA_CMD_ID__QUERY_ADDRESS, + addr_in, sizeof(*addr_in), addr_out, sizeof(*addr_out)); +} + +int ras_psp_sw_init(struct ras_core_context *ras_core) +{ + struct ras_psp *psp = &ras_core->ras_psp; + + memset(psp, 0, sizeof(*psp)); + + psp->sys_func = ras_core->config->psp_cfg.psp_sys_fn; + if (!psp->sys_func) { + RAS_DEV_ERR(ras_core->dev, "RAS psp sys function not configured!\n"); + return -EINVAL; + } + + mutex_init(&psp->psp_ctx.internal_mutex); + mutex_init(&psp->ta_ctx.ta_mutex); + + return 0; +} + +int ras_psp_sw_fini(struct ras_core_context *ras_core) +{ + struct ras_psp *psp = &ras_core->ras_psp; + + mutex_destroy(&psp->psp_ctx.internal_mutex); + mutex_destroy(&psp->ta_ctx.ta_mutex); + + memset(psp, 0, sizeof(*psp)); + + return 0; +} + +int ras_psp_hw_init(struct ras_core_context *ras_core) +{ + struct ras_psp *psp = &ras_core->ras_psp; + + psp->psp_ip_version = ras_core->config->psp_ip_version; + + psp->ip_func = ras_psp_get_ip_funcs(ras_core, psp->psp_ip_version); + if (!psp->ip_func) + return -EINVAL; + + /* After GPU reset, the system RAS PSP status may change. + * therefore, it is necessary to synchronize the system status again. + */ + ras_psp_sync_system_ras_psp_status(ras_core); + + return 0; +} + +int ras_psp_hw_fini(struct ras_core_context *ras_core) +{ + return 0; +} + +bool ras_psp_check_supported_cmd(struct ras_core_context *ras_core, + enum ras_ta_cmd_id cmd_id) +{ + struct ras_ta_ctx *ta_ctx = &ras_core->ras_psp.ta_ctx; + bool ret = false; + + if (!ta_ctx->preload_ras_ta_enabled && !ta_ctx->ras_ta_initialized) + return false; + + switch (cmd_id) { + case RAS_TA_CMD_ID__QUERY_ADDRESS: + /* Currently, querying the address from RAS TA is only supported + * when the RAS TA firmware is loaded during driver installation. + */ + if (ta_ctx->preload_ras_ta_enabled) + ret = true; + break; + case RAS_TA_CMD_ID__TRIGGER_ERROR: + ret = true; + break; + default: + ret = false; + break; + } + + return ret; +} diff --git a/drivers/gpu/drm/amd/ras/rascore/ras_psp.h b/drivers/gpu/drm/amd/ras/rascore/ras_psp.h new file mode 100644 index 000000000000..71776fecfd66 --- /dev/null +++ b/drivers/gpu/drm/amd/ras/rascore/ras_psp.h @@ -0,0 +1,145 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright 2025 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#ifndef __RAS_PSP_H__ +#define __RAS_PSP_H__ +#include "ras.h" +#include "ras_ta_if.h" + +struct ras_core_context; +struct ras_ta_trigger_error_input; +struct ras_ta_query_address_input; +struct ras_ta_query_address_output; +enum ras_ta_cmd_id; + +struct ras_ta_image_header { + uint32_t reserved1[24]; + uint32_t image_version; /* [0x60] Off Chip Firmware Version */ + uint32_t reserved2[39]; +}; + +struct ras_psp_sys_status { + bool initialized; + uint32_t session_id; + void *psp_cmd_mutex; +}; + +struct ras_ta_init_param { + uint8_t poison_mode_en; + uint8_t dgpu_mode; + uint16_t xcc_mask; + uint8_t channel_dis_num; + uint8_t nps_mode; + uint32_t active_umc_mask; +}; + +struct gpu_mem_block { + uint32_t mem_type; + void *mem_bo; + uint64_t mem_mc_addr; + void *mem_cpu_addr; + uint32_t mem_size; + int ref_count; + void *private; +}; + +struct ras_psp_ip_func { + uint32_t (*psp_ras_ring_wptr_get)(struct ras_core_context *ras_core); + int (*psp_ras_ring_wptr_set)(struct ras_core_context *ras_core, uint32_t wptr); +}; + +struct ras_psp_ring { + struct gpu_mem_block ras_ring_gpu_mem; +}; + +struct psp_cmd_resp { + uint32_t status; + uint32_t session_id; +}; + +struct ras_psp_ctx { + void *external_mutex; + struct mutex internal_mutex; + uint64_t in_fence_value; + struct gpu_mem_block psp_cmd_gpu_mem; + struct gpu_mem_block out_fence_gpu_mem; +}; + +struct ras_ta_fw_bin { + uint32_t fw_version; + uint32_t feature_version; + uint32_t bin_size; + uint8_t *bin_addr; +}; + +struct ras_ta_ctx { + bool preload_ras_ta_enabled; + bool ras_ta_initialized; + uint32_t session_id; + uint32_t resp_status; + uint32_t ta_version; + struct mutex ta_mutex; + struct ras_ta_fw_bin fw_bin; + struct ras_ta_init_param init_param; + struct gpu_mem_block fw_gpu_mem; + struct gpu_mem_block cmd_gpu_mem; +}; + +struct ras_psp { + uint32_t psp_ip_version; + struct ras_psp_ring psp_ring; + struct ras_psp_ctx psp_ctx; + struct ras_ta_ctx ta_ctx; + const struct ras_psp_ip_func *ip_func; + const struct ras_psp_sys_func *sys_func; +}; + +struct ras_psp_ta_load { + uint32_t fw_version; + uint32_t feature_version; + uint32_t bin_size; + uint8_t *bin_addr; + uint64_t out_session_id; + uint32_t out_loaded_ta_version; +}; + +struct ras_psp_ta_unload { + uint64_t ras_session_id; +}; + +int ras_psp_sw_init(struct ras_core_context *ras_core); +int ras_psp_sw_fini(struct ras_core_context *ras_core); +int ras_psp_hw_init(struct ras_core_context *ras_core); +int ras_psp_hw_fini(struct ras_core_context *ras_core); +int ras_psp_load_firmware(struct ras_core_context *ras_core, + struct ras_psp_ta_load *ras_ta_load); +int ras_psp_unload_firmware(struct ras_core_context *ras_core, + struct ras_psp_ta_unload *ras_ta_unload); +int ras_psp_trigger_error(struct ras_core_context *ras_core, + struct ras_ta_trigger_error_input *info, uint32_t instance_mask); +int ras_psp_query_address(struct ras_core_context *ras_core, + struct ras_ta_query_address_input *addr_in, + struct ras_ta_query_address_output *addr_out); +bool ras_psp_check_supported_cmd(struct ras_core_context *ras_core, + enum ras_ta_cmd_id cmd_id); +#endif diff --git a/drivers/gpu/drm/amd/ras/rascore/ras_psp_v13_0.c b/drivers/gpu/drm/amd/ras/rascore/ras_psp_v13_0.c new file mode 100644 index 000000000000..626cf39b75ac --- /dev/null +++ b/drivers/gpu/drm/amd/ras/rascore/ras_psp_v13_0.c @@ -0,0 +1,46 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright 2025 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#include "ras.h" +#include "ras_psp_v13_0.h" + +#define regMP0_SMN_C2PMSG_67 0x0083 +#define regMP0_SMN_C2PMSG_67_BASE_IDX 0 + +static uint32_t ras_psp_v13_0_ring_wptr_get(struct ras_core_context *ras_core) +{ + return RAS_DEV_RREG32_SOC15(ras_core->dev, MP0, 0, regMP0_SMN_C2PMSG_67); +} + +static int ras_psp_v13_0_ring_wptr_set(struct ras_core_context *ras_core, uint32_t value) +{ + RAS_DEV_WREG32_SOC15(ras_core->dev, MP0, 0, regMP0_SMN_C2PMSG_67, value); + + return 0; +} + +const struct ras_psp_ip_func ras_psp_v13_0 = { + .psp_ras_ring_wptr_get = ras_psp_v13_0_ring_wptr_get, + .psp_ras_ring_wptr_set = ras_psp_v13_0_ring_wptr_set, +}; diff --git a/drivers/gpu/drm/amd/ras/rascore/ras_psp_v13_0.h b/drivers/gpu/drm/amd/ras/rascore/ras_psp_v13_0.h new file mode 100644 index 000000000000..b705ffe38a12 --- /dev/null +++ b/drivers/gpu/drm/amd/ras/rascore/ras_psp_v13_0.h @@ -0,0 +1,31 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright 2025 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef __RAS_PSP_V13_0_H__ +#define __RAS_PSP_V13_0_H__ +#include "ras_psp.h" + +extern const struct ras_psp_ip_func ras_psp_v13_0; + +#endif diff --git a/drivers/gpu/drm/amd/ras/rascore/ras_ta_if.h b/drivers/gpu/drm/amd/ras/rascore/ras_ta_if.h new file mode 100644 index 000000000000..0921e36d3274 --- /dev/null +++ b/drivers/gpu/drm/amd/ras/rascore/ras_ta_if.h @@ -0,0 +1,231 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright 2025 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef _RAS_TA_IF_H +#define _RAS_TA_IF_H +#include "ras.h" + +#define RAS_TA_HOST_IF_VER 0 + +/* Responses have bit 31 set */ +#define RSP_ID_MASK (1U << 31) +#define RSP_ID(cmdId) (((uint32_t)(cmdId)) | RSP_ID_MASK) + +/* invalid node instance value */ +#define RAS_TA_INV_NODE 0xffff + +/* RAS related enumerations */ +/**********************************************************/ +enum ras_ta_cmd_id { + RAS_TA_CMD_ID__ENABLE_FEATURES = 0, + RAS_TA_CMD_ID__DISABLE_FEATURES, + RAS_TA_CMD_ID__TRIGGER_ERROR, + RAS_TA_CMD_ID__QUERY_BLOCK_INFO, + RAS_TA_CMD_ID__QUERY_SUB_BLOCK_INFO, + RAS_TA_CMD_ID__QUERY_ADDRESS, + MAX_RAS_TA_CMD_ID +}; + +enum ras_ta_status { + RAS_TA_STATUS__SUCCESS = 0x0000, + RAS_TA_STATUS__RESET_NEEDED = 0xA001, + RAS_TA_STATUS__ERROR_INVALID_PARAMETER = 0xA002, + RAS_TA_STATUS__ERROR_RAS_NOT_AVAILABLE = 0xA003, + RAS_TA_STATUS__ERROR_RAS_DUPLICATE_CMD = 0xA004, + RAS_TA_STATUS__ERROR_INJECTION_FAILED = 0xA005, + RAS_TA_STATUS__ERROR_ASD_READ_WRITE = 0xA006, + RAS_TA_STATUS__ERROR_TOGGLE_DF_CSTATE = 0xA007, + RAS_TA_STATUS__ERROR_TIMEOUT = 0xA008, + RAS_TA_STATUS__ERROR_BLOCK_DISABLED = 0XA009, + RAS_TA_STATUS__ERROR_GENERIC = 0xA00A, + RAS_TA_STATUS__ERROR_RAS_MMHUB_INIT = 0xA00B, + RAS_TA_STATUS__ERROR_GET_DEV_INFO = 0xA00C, + RAS_TA_STATUS__ERROR_UNSUPPORTED_DEV = 0xA00D, + RAS_TA_STATUS__ERROR_NOT_INITIALIZED = 0xA00E, + RAS_TA_STATUS__ERROR_TEE_INTERNAL = 0xA00F, + RAS_TA_STATUS__ERROR_UNSUPPORTED_FUNCTION = 0xA010, + RAS_TA_STATUS__ERROR_SYS_DRV_REG_ACCESS = 0xA011, + RAS_TA_STATUS__ERROR_RAS_READ_WRITE = 0xA012, + RAS_TA_STATUS__ERROR_NULL_PTR = 0xA013, + RAS_TA_STATUS__ERROR_UNSUPPORTED_IP = 0xA014, + RAS_TA_STATUS__ERROR_PCS_STATE_QUIET = 0xA015, + RAS_TA_STATUS__ERROR_PCS_STATE_ERROR = 0xA016, + RAS_TA_STATUS__ERROR_PCS_STATE_HANG = 0xA017, + RAS_TA_STATUS__ERROR_PCS_STATE_UNKNOWN = 0xA018, + RAS_TA_STATUS__ERROR_UNSUPPORTED_ERROR_INJ = 0xA019, + RAS_TA_STATUS__TEE_ERROR_ACCESS_DENIED = 0xA01A +}; + +enum ras_ta_block { + RAS_TA_BLOCK__UMC = 0, + RAS_TA_BLOCK__SDMA, + RAS_TA_BLOCK__GFX, + RAS_TA_BLOCK__MMHUB, + RAS_TA_BLOCK__ATHUB, + RAS_TA_BLOCK__PCIE_BIF, + RAS_TA_BLOCK__HDP, + RAS_TA_BLOCK__XGMI_WAFL, + RAS_TA_BLOCK__DF, + RAS_TA_BLOCK__SMN, + RAS_TA_BLOCK__SEM, + RAS_TA_BLOCK__MP0, + RAS_TA_BLOCK__MP1, + RAS_TA_BLOCK__FUSE, + RAS_TA_BLOCK__MCA, + RAS_TA_BLOCK__VCN, + RAS_TA_BLOCK__JPEG, + RAS_TA_BLOCK__IH, + RAS_TA_BLOCK__MPIO, + RAS_TA_BLOCK__MMSCH, + RAS_TA_NUM_BLOCK_MAX +}; + +enum ras_ta_mca_block { + RAS_TA_MCA_BLOCK__MP0 = 0, + RAS_TA_MCA_BLOCK__MP1 = 1, + RAS_TA_MCA_BLOCK__MPIO = 2, + RAS_TA_MCA_BLOCK__IOHC = 3, + RAS_TA_MCA_NUM_BLOCK_MAX +}; + +enum ras_ta_error_type { + RAS_TA_ERROR__NONE = 0, + RAS_TA_ERROR__PARITY = 1, + RAS_TA_ERROR__SINGLE_CORRECTABLE = 2, + RAS_TA_ERROR__MULTI_UNCORRECTABLE = 4, + RAS_TA_ERROR__POISON = 8, +}; + +enum ras_ta_address_type { + RAS_TA_MCA_TO_PA, + RAS_TA_PA_TO_MCA, +}; + +enum ras_ta_nps_mode { + RAS_TA_UNKNOWN_MODE = 0, + RAS_TA_NPS1_MODE = 1, + RAS_TA_NPS2_MODE = 2, + RAS_TA_NPS4_MODE = 4, + RAS_TA_NPS8_MODE = 8, +}; + +/* Input/output structures for RAS commands */ +/**********************************************************/ + +struct ras_ta_enable_features_input { + enum ras_ta_block block_id; + enum ras_ta_error_type error_type; +}; + +struct ras_ta_disable_features_input { + enum ras_ta_block block_id; + enum ras_ta_error_type error_type; +}; + +struct ras_ta_trigger_error_input { + /* ras-block. i.e. umc, gfx */ + enum ras_ta_block block_id; + + /* type of error. i.e. single_correctable */ + enum ras_ta_error_type inject_error_type; + + /* mem block. i.e. hbm, sram etc. */ + uint32_t sub_block_index; + + /* explicit address of error */ + uint64_t address; + + /* method if error injection. i.e persistent, coherent etc. */ + uint64_t value; +}; + +struct ras_ta_init_flags { + uint8_t poison_mode_en; + uint8_t dgpu_mode; + uint16_t xcc_mask; + uint8_t channel_dis_num; + uint8_t nps_mode; + uint32_t active_umc_mask; +}; + +struct ras_ta_mca_addr { + uint64_t err_addr; + uint32_t ch_inst; + uint32_t umc_inst; + uint32_t node_inst; + uint32_t socket_id; +}; + +struct ras_ta_phy_addr { + uint64_t pa; + uint32_t bank; + uint32_t channel_idx; +}; + +struct ras_ta_query_address_input { + enum ras_ta_address_type addr_type; + struct ras_ta_mca_addr ma; + struct ras_ta_phy_addr pa; +}; + +struct ras_ta_output_flags { + uint8_t ras_init_success_flag; + uint8_t err_inject_switch_disable_flag; + uint8_t reg_access_failure_flag; +}; + +struct ras_ta_query_address_output { + /* don't use the flags here */ + struct ras_ta_output_flags flags; + struct ras_ta_mca_addr ma; + struct ras_ta_phy_addr pa; +}; + +/* Common input structure for RAS callbacks */ +/**********************************************************/ +union ras_ta_cmd_input { + struct ras_ta_init_flags init_flags; + struct ras_ta_enable_features_input enable_features; + struct ras_ta_disable_features_input disable_features; + struct ras_ta_trigger_error_input trigger_error; + struct ras_ta_query_address_input address; + uint32_t reserve_pad[256]; +}; + +union ras_ta_cmd_output { + struct ras_ta_output_flags flags; + struct ras_ta_query_address_output address; + uint32_t reserve_pad[256]; +}; + +struct ras_ta_cmd { + uint32_t cmd_id; + uint32_t resp_id; + uint32_t ras_status; + uint32_t if_version; + union ras_ta_cmd_input ras_in_message; + union ras_ta_cmd_output ras_out_message; +}; + +#endif diff --git a/drivers/gpu/drm/amd/ras/rascore/ras_umc.c b/drivers/gpu/drm/amd/ras/rascore/ras_umc.c new file mode 100644 index 000000000000..4dae64c424a2 --- /dev/null +++ b/drivers/gpu/drm/amd/ras/rascore/ras_umc.c @@ -0,0 +1,707 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright 2025 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#include "ras.h" +#include "ras_umc.h" +#include "ras_umc_v12_0.h" + +#define MAX_ECC_NUM_PER_RETIREMENT 16 + +/* bad page timestamp format + * yy[31:27] mm[26:23] day[22:17] hh[16:12] mm[11:6] ss[5:0] + */ +#define EEPROM_TIMESTAMP_MINUTE 6 +#define EEPROM_TIMESTAMP_HOUR 12 +#define EEPROM_TIMESTAMP_DAY 17 +#define EEPROM_TIMESTAMP_MONTH 23 +#define EEPROM_TIMESTAMP_YEAR 27 + +static uint64_t ras_umc_get_eeprom_timestamp(struct ras_core_context *ras_core) +{ + struct ras_time tm = {0}; + uint64_t utc_timestamp = 0; + uint64_t eeprom_timestamp = 0; + + utc_timestamp = ras_core_get_utc_second_timestamp(ras_core); + if (!utc_timestamp) + return utc_timestamp; + + ras_core_convert_timestamp_to_time(ras_core, utc_timestamp, &tm); + + /* the year range is 2000 ~ 2031, set the year if not in the range */ + if (tm.tm_year < 2000) + tm.tm_year = 2000; + if (tm.tm_year > 2031) + tm.tm_year = 2031; + + tm.tm_year -= 2000; + + eeprom_timestamp = tm.tm_sec + (tm.tm_min << EEPROM_TIMESTAMP_MINUTE) + + (tm.tm_hour << EEPROM_TIMESTAMP_HOUR) + + (tm.tm_mday << EEPROM_TIMESTAMP_DAY) + + (tm.tm_mon << EEPROM_TIMESTAMP_MONTH) + + (tm.tm_year << EEPROM_TIMESTAMP_YEAR); + eeprom_timestamp &= 0xffffffff; + + return eeprom_timestamp; +} + +static const struct ras_umc_ip_func *ras_umc_get_ip_func( + struct ras_core_context *ras_core, uint32_t ip_version) +{ + switch (ip_version) { + case IP_VERSION(12, 0, 0): + case IP_VERSION(12, 5, 0): + return &ras_umc_func_v12_0; + default: + RAS_DEV_ERR(ras_core->dev, + "UMC ip version(0x%x) is not supported!\n", ip_version); + break; + } + + return NULL; +} + +int ras_umc_psp_convert_ma_to_pa(struct ras_core_context *ras_core, + struct umc_mca_addr *in, struct umc_phy_addr *out, + uint32_t nps) +{ + struct ras_ta_query_address_input addr_in; + struct ras_ta_query_address_output addr_out; + int ret; + + if (!in) + return -EINVAL; + + memset(&addr_in, 0, sizeof(addr_in)); + memset(&addr_out, 0, sizeof(addr_out)); + + addr_in.ma.err_addr = in->err_addr; + addr_in.ma.ch_inst = in->ch_inst; + addr_in.ma.umc_inst = in->umc_inst; + addr_in.ma.node_inst = in->node_inst; + addr_in.ma.socket_id = in->socket_id; + + addr_in.addr_type = RAS_TA_MCA_TO_PA; + + ret = ras_psp_query_address(ras_core, &addr_in, &addr_out); + if (ret) { + RAS_DEV_WARN(ras_core->dev, + "Failed to query RAS physical address for 0x%llx, ret:%d", + in->err_addr, ret); + return -EREMOTEIO; + } + + if (out) { + out->pa = addr_out.pa.pa; + out->bank = addr_out.pa.bank; + out->channel_idx = addr_out.pa.channel_idx; + } + + return 0; +} + +static int ras_umc_log_ecc(struct ras_core_context *ras_core, + unsigned long idx, void *data) +{ + struct ras_umc *ras_umc = &ras_core->ras_umc; + int ret; + + mutex_lock(&ras_umc->tree_lock); + ret = radix_tree_insert(&ras_umc->root, idx, data); + if (!ret) + radix_tree_tag_set(&ras_umc->root, idx, UMC_ECC_NEW_DETECTED_TAG); + mutex_unlock(&ras_umc->tree_lock); + + return ret; +} + +int ras_umc_clear_logged_ecc(struct ras_core_context *ras_core) +{ + struct ras_umc *ras_umc = &ras_core->ras_umc; + uint64_t buf[8] = {0}; + void **slot; + void *data; + void *iter = buf; + + mutex_lock(&ras_umc->tree_lock); + radix_tree_for_each_slot(slot, &ras_umc->root, iter, 0) { + data = ras_radix_tree_delete_iter(&ras_umc->root, iter); + kfree(data); + } + mutex_unlock(&ras_umc->tree_lock); + + return 0; +} + +static void ras_umc_reserve_eeprom_record(struct ras_core_context *ras_core, + struct eeprom_umc_record *record) +{ + struct ras_umc *ras_umc = &ras_core->ras_umc; + uint64_t page_pfn[16]; + int count = 0, i; + + memset(page_pfn, 0, sizeof(page_pfn)); + if (ras_umc->ip_func && ras_umc->ip_func->eeprom_record_to_nps_pages) { + count = ras_umc->ip_func->eeprom_record_to_nps_pages(ras_core, + record, record->cur_nps, page_pfn, ARRAY_SIZE(page_pfn)); + if (count <= 0) { + RAS_DEV_ERR(ras_core->dev, + "Fail to convert error address! count:%d\n", count); + return; + } + } + + /* Reserve memory */ + for (i = 0; i < count; i++) + ras_core_event_notify(ras_core, + RAS_EVENT_ID__RESERVE_BAD_PAGE, &page_pfn[i]); +} + +/* When gpu reset is ongoing, ecc logging operations will be pended. + */ +int ras_umc_log_bad_bank_pending(struct ras_core_context *ras_core, struct ras_bank_ecc *bank) +{ + struct ras_umc *ras_umc = &ras_core->ras_umc; + struct ras_bank_ecc_node *ecc_node; + + ecc_node = kzalloc(sizeof(*ecc_node), GFP_KERNEL); + if (!ecc_node) + return -ENOMEM; + + memcpy(&ecc_node->ecc, bank, sizeof(ecc_node->ecc)); + + mutex_lock(&ras_umc->pending_ecc_lock); + list_add_tail(&ecc_node->node, &ras_umc->pending_ecc_list); + mutex_unlock(&ras_umc->pending_ecc_lock); + + return 0; +} + +/* After gpu reset is complete, re-log the pending error banks. + */ +int ras_umc_log_pending_bad_bank(struct ras_core_context *ras_core) +{ + struct ras_umc *ras_umc = &ras_core->ras_umc; + struct ras_bank_ecc_node *ecc_node, *tmp; + + mutex_lock(&ras_umc->pending_ecc_lock); + list_for_each_entry_safe(ecc_node, + tmp, &ras_umc->pending_ecc_list, node){ + if (ecc_node && !ras_umc_log_bad_bank(ras_core, &ecc_node->ecc)) { + list_del(&ecc_node->node); + kfree(ecc_node); + } + } + mutex_unlock(&ras_umc->pending_ecc_lock); + + return 0; +} + +int ras_umc_log_bad_bank(struct ras_core_context *ras_core, struct ras_bank_ecc *bank) +{ + struct ras_umc *ras_umc = &ras_core->ras_umc; + struct eeprom_umc_record umc_rec; + struct eeprom_umc_record *err_rec; + int ret; + + memset(&umc_rec, 0, sizeof(umc_rec)); + + mutex_lock(&ras_umc->bank_log_lock); + ret = ras_umc->ip_func->bank_to_eeprom_record(ras_core, bank, &umc_rec); + if (ret) + goto out; + + err_rec = kzalloc(sizeof(*err_rec), GFP_KERNEL); + if (!err_rec) { + ret = -ENOMEM; + goto out; + } + + memcpy(err_rec, &umc_rec, sizeof(umc_rec)); + ret = ras_umc_log_ecc(ras_core, err_rec->cur_nps_retired_row_pfn, err_rec); + if (ret) { + if (ret == -EEXIST) { + RAS_DEV_INFO(ras_core->dev, "The bad pages have been logged before.\n"); + ret = 0; + } + + kfree(err_rec); + goto out; + } + + ras_umc_reserve_eeprom_record(ras_core, err_rec); + + ret = ras_core_event_notify(ras_core, + RAS_EVENT_ID__BAD_PAGE_DETECTED, NULL); + +out: + mutex_unlock(&ras_umc->bank_log_lock); + return ret; +} + +static int ras_umc_get_new_records(struct ras_core_context *ras_core, + struct eeprom_umc_record *records, u32 num) +{ + struct ras_umc *ras_umc = &ras_core->ras_umc; + struct eeprom_umc_record *entries[MAX_ECC_NUM_PER_RETIREMENT]; + u32 entry_num = num < MAX_ECC_NUM_PER_RETIREMENT ? num : MAX_ECC_NUM_PER_RETIREMENT; + int count = 0; + int new_detected, i; + + mutex_lock(&ras_umc->tree_lock); + new_detected = radix_tree_gang_lookup_tag(&ras_umc->root, (void **)entries, + 0, entry_num, UMC_ECC_NEW_DETECTED_TAG); + for (i = 0; i < new_detected; i++) { + if (!entries[i]) + continue; + + memcpy(&records[i], entries[i], sizeof(struct eeprom_umc_record)); + count++; + radix_tree_tag_clear(&ras_umc->root, + entries[i]->cur_nps_retired_row_pfn, UMC_ECC_NEW_DETECTED_TAG); + } + mutex_unlock(&ras_umc->tree_lock); + + return count; +} + +static bool ras_umc_check_retired_record(struct ras_core_context *ras_core, + struct eeprom_umc_record *record, bool from_eeprom) +{ + struct ras_umc *ras_umc = &ras_core->ras_umc; + struct eeprom_store_record *data = &ras_umc->umc_err_data.rom_data; + uint32_t nps = 0; + int i, ret; + + if (from_eeprom) { + nps = ras_umc->umc_err_data.umc_nps_mode; + if (ras_umc->ip_func && ras_umc->ip_func->eeprom_record_to_nps_record) { + ret = ras_umc->ip_func->eeprom_record_to_nps_record(ras_core, record, nps); + if (ret) + RAS_DEV_WARN(ras_core->dev, + "Failed to adjust eeprom record, ret:%d", ret); + } + return false; + } + + for (i = 0; i < data->count; i++) { + if ((data->bps[i].retired_row_pfn == record->retired_row_pfn) && + (data->bps[i].cur_nps_retired_row_pfn == record->cur_nps_retired_row_pfn)) + return true; + } + + return false; +} + +/* alloc/realloc bps array */ +static int ras_umc_realloc_err_data_space(struct ras_core_context *ras_core, + struct eeprom_store_record *data, int pages) +{ + unsigned int old_space = data->count + data->space_left; + unsigned int new_space = old_space + pages; + unsigned int align_space = ALIGN(new_space, 512); + void *bps = kzalloc(align_space * sizeof(*data->bps), GFP_KERNEL); + + if (!bps) + return -ENOMEM; + + if (data->bps) { + memcpy(bps, data->bps, + data->count * sizeof(*data->bps)); + kfree(data->bps); + } + + data->bps = bps; + data->space_left += align_space - old_space; + return 0; +} + +static int ras_umc_update_eeprom_rom_data(struct ras_core_context *ras_core, + struct eeprom_umc_record *bps) +{ + struct eeprom_store_record *data = &ras_core->ras_umc.umc_err_data.rom_data; + + if (!data->space_left && + ras_umc_realloc_err_data_space(ras_core, data, 256)) { + return -ENOMEM; + } + + memcpy(&data->bps[data->count], bps, sizeof(*data->bps)); + data->count++; + data->space_left--; + return 0; +} + +static int ras_umc_update_eeprom_ram_data(struct ras_core_context *ras_core, + struct eeprom_umc_record *bps) +{ + struct ras_umc *ras_umc = &ras_core->ras_umc; + struct eeprom_store_record *data = &ras_umc->umc_err_data.ram_data; + uint64_t page_pfn[16]; + int count = 0, j; + + if (!data->space_left && + ras_umc_realloc_err_data_space(ras_core, data, 256)) { + return -ENOMEM; + } + + memset(page_pfn, 0, sizeof(page_pfn)); + if (ras_umc->ip_func && ras_umc->ip_func->eeprom_record_to_nps_pages) + count = ras_umc->ip_func->eeprom_record_to_nps_pages(ras_core, + bps, bps->cur_nps, page_pfn, ARRAY_SIZE(page_pfn)); + + if (count > 0) { + for (j = 0; j < count; j++) { + bps->cur_nps_retired_row_pfn = page_pfn[j]; + memcpy(&data->bps[data->count], bps, sizeof(*data->bps)); + data->count++; + data->space_left--; + } + } else { + memcpy(&data->bps[data->count], bps, sizeof(*data->bps)); + data->count++; + data->space_left--; + } + + return 0; +} + +/* it deal with vram only. */ +static int ras_umc_add_bad_pages(struct ras_core_context *ras_core, + struct eeprom_umc_record *bps, + int pages, bool from_eeprom) +{ + struct ras_umc *ras_umc = &ras_core->ras_umc; + struct ras_umc_err_data *data = &ras_umc->umc_err_data; + int i, ret = 0; + + if (!bps || pages <= 0) + return 0; + + mutex_lock(&ras_umc->umc_lock); + for (i = 0; i < pages; i++) { + if (ras_umc_check_retired_record(ras_core, &bps[i], from_eeprom)) + continue; + + ret = ras_umc_update_eeprom_rom_data(ras_core, &bps[i]); + if (ret) + goto out; + + if (data->last_retired_pfn == bps[i].cur_nps_retired_row_pfn) + continue; + + data->last_retired_pfn = bps[i].cur_nps_retired_row_pfn; + + if (from_eeprom) + ras_umc_reserve_eeprom_record(ras_core, &bps[i]); + + ret = ras_umc_update_eeprom_ram_data(ras_core, &bps[i]); + if (ret) + goto out; + } +out: + mutex_unlock(&ras_umc->umc_lock); + + return ret; +} + +/* + * read error record array in eeprom and reserve enough space for + * storing new bad pages + */ +int ras_umc_load_bad_pages(struct ras_core_context *ras_core) +{ + struct eeprom_umc_record *bps; + uint32_t ras_num_recs; + int ret; + + ras_num_recs = ras_eeprom_get_record_count(ras_core); + /* no bad page record, skip eeprom access */ + if (!ras_num_recs || + ras_core->ras_eeprom.record_threshold_config == DISABLE_RETIRE_PAGE) + return 0; + + bps = kcalloc(ras_num_recs, sizeof(*bps), GFP_KERNEL); + if (!bps) + return -ENOMEM; + + ret = ras_eeprom_read(ras_core, bps, ras_num_recs); + if (ret) { + RAS_DEV_ERR(ras_core->dev, "Failed to load EEPROM table records!"); + } else { + ras_core->ras_umc.umc_err_data.last_retired_pfn = UMC_INV_MEM_PFN; + ret = ras_umc_add_bad_pages(ras_core, bps, ras_num_recs, true); + } + + kfree(bps); + return ret; +} + +/* + * write error record array to eeprom, the function should be + * protected by recovery_lock + * new_cnt: new added UE count, excluding reserved bad pages, can be NULL + */ +static int ras_umc_save_bad_pages(struct ras_core_context *ras_core) +{ + struct ras_umc *ras_umc = &ras_core->ras_umc; + struct eeprom_store_record *data = &ras_umc->umc_err_data.rom_data; + uint32_t eeprom_record_num; + int save_count; + int ret = 0; + + if (!data->bps) + return 0; + + eeprom_record_num = ras_eeprom_get_record_count(ras_core); + mutex_lock(&ras_umc->umc_lock); + save_count = data->count - eeprom_record_num; + /* only new entries are saved */ + if (save_count > 0) { + if (ras_eeprom_append(ras_core, + &data->bps[eeprom_record_num], + save_count)) { + RAS_DEV_ERR(ras_core->dev, "Failed to save EEPROM table data!"); + ret = -EIO; + goto exit; + } + + RAS_DEV_INFO(ras_core->dev, "Saved %d pages to EEPROM table.\n", save_count); + } + +exit: + mutex_unlock(&ras_umc->umc_lock); + return ret; +} + +int ras_umc_handle_bad_pages(struct ras_core_context *ras_core, void *data) +{ + struct eeprom_umc_record records[MAX_ECC_NUM_PER_RETIREMENT]; + int count, ret; + + memset(records, 0, sizeof(records)); + count = ras_umc_get_new_records(ras_core, records, ARRAY_SIZE(records)); + if (count <= 0) + return -ENODATA; + + ret = ras_umc_add_bad_pages(ras_core, records, count, false); + if (ret) { + RAS_DEV_ERR(ras_core->dev, "Failed to add ras bad page!\n"); + return -EINVAL; + } + + ret = ras_umc_save_bad_pages(ras_core); + if (ret) { + RAS_DEV_ERR(ras_core->dev, "Failed to save ras bad page\n"); + return -EINVAL; + } + + return 0; +} + +int ras_umc_sw_init(struct ras_core_context *ras_core) +{ + struct ras_umc *ras_umc = &ras_core->ras_umc; + + memset(ras_umc, 0, sizeof(*ras_umc)); + + INIT_LIST_HEAD(&ras_umc->pending_ecc_list); + + INIT_RADIX_TREE(&ras_umc->root, GFP_KERNEL); + + mutex_init(&ras_umc->tree_lock); + mutex_init(&ras_umc->pending_ecc_lock); + mutex_init(&ras_umc->umc_lock); + mutex_init(&ras_umc->bank_log_lock); + + return 0; +} + +int ras_umc_sw_fini(struct ras_core_context *ras_core) +{ + struct ras_umc *ras_umc = &ras_core->ras_umc; + struct ras_umc_err_data *umc_err_data = &ras_umc->umc_err_data; + struct ras_bank_ecc_node *ecc_node, *tmp; + + mutex_destroy(&ras_umc->umc_lock); + mutex_destroy(&ras_umc->bank_log_lock); + + if (umc_err_data->rom_data.bps) { + umc_err_data->rom_data.count = 0; + kfree(umc_err_data->rom_data.bps); + umc_err_data->rom_data.bps = NULL; + umc_err_data->rom_data.space_left = 0; + } + + if (umc_err_data->ram_data.bps) { + umc_err_data->ram_data.count = 0; + kfree(umc_err_data->ram_data.bps); + umc_err_data->ram_data.bps = NULL; + umc_err_data->ram_data.space_left = 0; + } + + ras_umc_clear_logged_ecc(ras_core); + + mutex_lock(&ras_umc->pending_ecc_lock); + list_for_each_entry_safe(ecc_node, + tmp, &ras_umc->pending_ecc_list, node){ + list_del(&ecc_node->node); + kfree(ecc_node); + } + mutex_unlock(&ras_umc->pending_ecc_lock); + + mutex_destroy(&ras_umc->tree_lock); + mutex_destroy(&ras_umc->pending_ecc_lock); + + return 0; +} + +int ras_umc_hw_init(struct ras_core_context *ras_core) +{ + struct ras_umc *ras_umc = &ras_core->ras_umc; + uint32_t nps; + + nps = ras_core_get_curr_nps_mode(ras_core); + + if (!nps || (nps >= UMC_MEMORY_PARTITION_MODE_UNKNOWN)) { + RAS_DEV_ERR(ras_core->dev, "Invalid memory NPS mode: %u!\n", nps); + return -ENODATA; + } + + ras_umc->umc_err_data.umc_nps_mode = nps; + + ras_umc->umc_vram_type = ras_core->config->umc_cfg.umc_vram_type; + if (!ras_umc->umc_vram_type) { + RAS_DEV_ERR(ras_core->dev, "Invalid UMC VRAM Type: %u!\n", + ras_umc->umc_vram_type); + return -ENODATA; + } + + ras_umc->umc_ip_version = ras_core->config->umc_ip_version; + ras_umc->ip_func = ras_umc_get_ip_func(ras_core, ras_umc->umc_ip_version); + if (!ras_umc->ip_func) + return -EINVAL; + + return 0; +} + +int ras_umc_hw_fini(struct ras_core_context *ras_core) +{ + return 0; +} + +int ras_umc_clean_badpage_data(struct ras_core_context *ras_core) +{ + struct ras_umc_err_data *data = &ras_core->ras_umc.umc_err_data; + + mutex_lock(&ras_core->ras_umc.umc_lock); + + kfree(data->rom_data.bps); + kfree(data->ram_data.bps); + + memset(data, 0, sizeof(*data)); + mutex_unlock(&ras_core->ras_umc.umc_lock); + + return 0; +} + +int ras_umc_fill_eeprom_record(struct ras_core_context *ras_core, + uint64_t err_addr, uint32_t umc_inst, struct umc_phy_addr *cur_nps_addr, + enum umc_memory_partition_mode cur_nps, struct eeprom_umc_record *record) +{ + struct eeprom_umc_record *err_rec = record; + + /* Set bad page pfn and nps mode */ + EEPROM_RECORD_SETUP_UMC_ADDR_AND_NPS(err_rec, + RAS_ADDR_TO_PFN(cur_nps_addr->pa), cur_nps); + + err_rec->address = err_addr; + err_rec->ts = ras_umc_get_eeprom_timestamp(ras_core); + err_rec->err_type = RAS_EEPROM_ERR_NON_RECOVERABLE; + err_rec->cu = 0; + err_rec->mem_channel = cur_nps_addr->channel_idx; + err_rec->mcumc_id = umc_inst; + err_rec->cur_nps_retired_row_pfn = RAS_ADDR_TO_PFN(cur_nps_addr->pa); + err_rec->cur_nps_bank = cur_nps_addr->bank; + err_rec->cur_nps = cur_nps; + return 0; +} + +int ras_umc_get_saved_eeprom_count(struct ras_core_context *ras_core) +{ + struct ras_umc_err_data *err_data = &ras_core->ras_umc.umc_err_data; + + return err_data->rom_data.count; +} + +int ras_umc_get_badpage_count(struct ras_core_context *ras_core) +{ + struct eeprom_store_record *data = &ras_core->ras_umc.umc_err_data.ram_data; + + return data->count; +} + +int ras_umc_get_badpage_record(struct ras_core_context *ras_core, uint32_t index, void *record) +{ + struct eeprom_store_record *data = &ras_core->ras_umc.umc_err_data.ram_data; + + if (index >= data->count) + return -EINVAL; + + memcpy(record, &data->bps[index], sizeof(struct eeprom_umc_record)); + return 0; +} + +bool ras_umc_check_retired_addr(struct ras_core_context *ras_core, uint64_t addr) +{ + struct ras_umc *ras_umc = &ras_core->ras_umc; + struct eeprom_store_record *data = &ras_umc->umc_err_data.ram_data; + uint64_t page_pfn = RAS_ADDR_TO_PFN(addr); + int i, ret = false; + + mutex_lock(&ras_umc->umc_lock); + for (i = 0; i < data->count; i++) { + if (data->bps[i].cur_nps_retired_row_pfn == page_pfn) { + ret = true; + break; + } + } + mutex_unlock(&ras_umc->umc_lock); + + return ret; +} + +int ras_umc_translate_soc_pa_and_bank(struct ras_core_context *ras_core, + uint64_t *soc_pa, struct umc_bank_addr *bank_addr, bool bank_to_pa) +{ + struct ras_umc *ras_umc = &ras_core->ras_umc; + int ret = 0; + + if (bank_to_pa) + ret = ras_umc->ip_func->bank_to_soc_pa(ras_core, *bank_addr, soc_pa); + else + ret = ras_umc->ip_func->soc_pa_to_bank(ras_core, *soc_pa, bank_addr); + + return ret; +} diff --git a/drivers/gpu/drm/amd/ras/rascore/ras_umc.h b/drivers/gpu/drm/amd/ras/rascore/ras_umc.h new file mode 100644 index 000000000000..7d9e779d8c4c --- /dev/null +++ b/drivers/gpu/drm/amd/ras/rascore/ras_umc.h @@ -0,0 +1,166 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright 2025 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef __RAS_UMC_H__ +#define __RAS_UMC_H__ +#include "ras.h" +#include "ras_eeprom.h" +#include "ras_cmd.h" + +#define UMC_VRAM_TYPE_UNKNOWN 0 +#define UMC_VRAM_TYPE_GDDR1 1 +#define UMC_VRAM_TYPE_DDR2 2 +#define UMC_VRAM_TYPE_GDDR3 3 +#define UMC_VRAM_TYPE_GDDR4 4 +#define UMC_VRAM_TYPE_GDDR5 5 +#define UMC_VRAM_TYPE_HBM 6 +#define UMC_VRAM_TYPE_DDR3 7 +#define UMC_VRAM_TYPE_DDR4 8 +#define UMC_VRAM_TYPE_GDDR6 9 +#define UMC_VRAM_TYPE_DDR5 10 +#define UMC_VRAM_TYPE_LPDDR4 11 +#define UMC_VRAM_TYPE_LPDDR5 12 +#define UMC_VRAM_TYPE_HBM3E 13 + +#define UMC_ECC_NEW_DETECTED_TAG 0x1 +#define UMC_INV_MEM_PFN (0xFFFFFFFFFFFFFFFF) + +/* three column bits and one row bit in MCA address flip + * in bad page retirement + */ +#define UMC_PA_FLIP_BITS_NUM 4 + +enum umc_memory_partition_mode { + UMC_MEMORY_PARTITION_MODE_NONE = 0, + UMC_MEMORY_PARTITION_MODE_NPS1 = 1, + UMC_MEMORY_PARTITION_MODE_NPS2 = 2, + UMC_MEMORY_PARTITION_MODE_NPS3 = 3, + UMC_MEMORY_PARTITION_MODE_NPS4 = 4, + UMC_MEMORY_PARTITION_MODE_NPS6 = 6, + UMC_MEMORY_PARTITION_MODE_NPS8 = 8, + UMC_MEMORY_PARTITION_MODE_UNKNOWN +}; + +struct ras_core_context; +struct ras_bank_ecc; + +struct umc_flip_bits { + uint32_t flip_bits_in_pa[UMC_PA_FLIP_BITS_NUM]; + uint32_t flip_row_bit; + uint32_t r13_in_pa; + uint32_t bit_num; +}; + +struct umc_mca_addr { + uint64_t err_addr; + uint32_t ch_inst; + uint32_t umc_inst; + uint32_t node_inst; + uint32_t socket_id; +}; + +struct umc_phy_addr { + uint64_t pa; + uint32_t bank; + uint32_t channel_idx; +}; + +struct umc_bank_addr { + uint32_t stack_id; /* SID */ + uint32_t bank_group; + uint32_t bank; + uint32_t row; + uint32_t column; + uint32_t channel; + uint32_t subchannel; /* Also called Pseudochannel (PC) */ +}; + +struct ras_umc_ip_func { + int (*bank_to_eeprom_record)(struct ras_core_context *ras_core, + struct ras_bank_ecc *bank, struct eeprom_umc_record *record); + int (*eeprom_record_to_nps_record)(struct ras_core_context *ras_core, + struct eeprom_umc_record *record, uint32_t nps); + int (*eeprom_record_to_nps_pages)(struct ras_core_context *ras_core, + struct eeprom_umc_record *record, uint32_t nps, + uint64_t *pfns, uint32_t num); + int (*bank_to_soc_pa)(struct ras_core_context *ras_core, + struct umc_bank_addr bank_addr, uint64_t *soc_pa); + int (*soc_pa_to_bank)(struct ras_core_context *ras_core, + uint64_t soc_pa, struct umc_bank_addr *bank_addr); +}; + +struct eeprom_store_record { + /* point to data records array */ + struct eeprom_umc_record *bps; + /* the count of entries */ + int count; + /* the space can place new entries */ + int space_left; +}; + +struct ras_umc_err_data { + struct eeprom_store_record rom_data; + struct eeprom_store_record ram_data; + enum umc_memory_partition_mode umc_nps_mode; + uint64_t last_retired_pfn; +}; + +struct ras_umc { + u32 umc_ip_version; + u32 umc_vram_type; + const struct ras_umc_ip_func *ip_func; + struct radix_tree_root root; + struct mutex tree_lock; + struct mutex umc_lock; + struct mutex bank_log_lock; + struct mutex pending_ecc_lock; + struct ras_umc_err_data umc_err_data; + struct list_head pending_ecc_list; +}; + +int ras_umc_sw_init(struct ras_core_context *ras); +int ras_umc_sw_fini(struct ras_core_context *ras); +int ras_umc_hw_init(struct ras_core_context *ras); +int ras_umc_hw_fini(struct ras_core_context *ras); +int ras_umc_psp_convert_ma_to_pa(struct ras_core_context *ras_core, + struct umc_mca_addr *in, struct umc_phy_addr *out, + uint32_t nps); +int ras_umc_handle_bad_pages(struct ras_core_context *ras_core, void *data); +int ras_umc_log_bad_bank(struct ras_core_context *ras, struct ras_bank_ecc *bank); +int ras_umc_log_bad_bank_pending(struct ras_core_context *ras_core, struct ras_bank_ecc *bank); +int ras_umc_log_pending_bad_bank(struct ras_core_context *ras_core); +int ras_umc_clear_logged_ecc(struct ras_core_context *ras_core); +int ras_umc_load_bad_pages(struct ras_core_context *ras_core); +int ras_umc_get_saved_eeprom_count(struct ras_core_context *ras_core); +int ras_umc_clean_badpage_data(struct ras_core_context *ras_core); +int ras_umc_fill_eeprom_record(struct ras_core_context *ras_core, + uint64_t err_addr, uint32_t umc_inst, struct umc_phy_addr *cur_nps_addr, + enum umc_memory_partition_mode cur_nps, struct eeprom_umc_record *record); + +int ras_umc_get_badpage_count(struct ras_core_context *ras_core); +int ras_umc_get_badpage_record(struct ras_core_context *ras_core, uint32_t index, void *record); +bool ras_umc_check_retired_addr(struct ras_core_context *ras_core, uint64_t addr); +int ras_umc_translate_soc_pa_and_bank(struct ras_core_context *ras_core, + uint64_t *soc_pa, struct umc_bank_addr *bank_addr, bool bank_to_pa); +#endif diff --git a/drivers/gpu/drm/amd/ras/rascore/ras_umc_v12_0.c b/drivers/gpu/drm/amd/ras/rascore/ras_umc_v12_0.c new file mode 100644 index 000000000000..5d9a11c17a86 --- /dev/null +++ b/drivers/gpu/drm/amd/ras/rascore/ras_umc_v12_0.c @@ -0,0 +1,511 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright 2025 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#include "ras.h" +#include "ras_umc.h" +#include "ras_core_status.h" +#include "ras_umc_v12_0.h" + +#define NumDieInterleaved 4 + +static const uint32_t umc_v12_0_channel_idx_tbl[] + [UMC_V12_0_UMC_INSTANCE_NUM][UMC_V12_0_CHANNEL_INSTANCE_NUM] = { + {{3, 7, 11, 15, 2, 6, 10, 14}, {1, 5, 9, 13, 0, 4, 8, 12}, + {19, 23, 27, 31, 18, 22, 26, 30}, {17, 21, 25, 29, 16, 20, 24, 28}}, + {{47, 43, 39, 35, 46, 42, 38, 34}, {45, 41, 37, 33, 44, 40, 36, 32}, + {63, 59, 55, 51, 62, 58, 54, 50}, {61, 57, 53, 49, 60, 56, 52, 48}}, + {{79, 75, 71, 67, 78, 74, 70, 66}, {77, 73, 69, 65, 76, 72, 68, 64}, + {95, 91, 87, 83, 94, 90, 86, 82}, {93, 89, 85, 81, 92, 88, 84, 80}}, + {{99, 103, 107, 111, 98, 102, 106, 110}, {97, 101, 105, 109, 96, 100, 104, 108}, + {115, 119, 123, 127, 114, 118, 122, 126}, {113, 117, 121, 125, 112, 116, 120, 124}} +}; + +/* mapping of MCA error address to normalized address */ +static const uint32_t umc_v12_0_ma2na_mapping[] = { + 0, 5, 6, 8, 9, 14, 12, 13, + 10, 11, 15, 16, 17, 18, 19, 20, + 21, 22, 23, 24, 25, 26, 27, 28, + 24, 7, 29, 30, +}; + +static bool umc_v12_0_bit_wise_xor(uint32_t val) +{ + bool result = 0; + int i; + + for (i = 0; i < 32; i++) + result = result ^ ((val >> i) & 0x1); + + return result; +} + +static void __get_nps_pa_flip_bits(struct ras_core_context *ras_core, + enum umc_memory_partition_mode nps, + struct umc_flip_bits *flip_bits) +{ + uint32_t vram_type = ras_core->ras_umc.umc_vram_type; + + /* default setting */ + flip_bits->flip_bits_in_pa[0] = UMC_V12_0_PA_C2_BIT; + flip_bits->flip_bits_in_pa[1] = UMC_V12_0_PA_C3_BIT; + flip_bits->flip_bits_in_pa[2] = UMC_V12_0_PA_C4_BIT; + flip_bits->flip_bits_in_pa[3] = UMC_V12_0_PA_R13_BIT; + flip_bits->flip_row_bit = 13; + flip_bits->bit_num = 4; + flip_bits->r13_in_pa = UMC_V12_0_PA_R13_BIT; + + if (nps == UMC_MEMORY_PARTITION_MODE_NPS2) { + flip_bits->flip_bits_in_pa[0] = UMC_V12_0_PA_CH5_BIT; + flip_bits->flip_bits_in_pa[1] = UMC_V12_0_PA_C2_BIT; + flip_bits->flip_bits_in_pa[2] = UMC_V12_0_PA_B1_BIT; + flip_bits->r13_in_pa = UMC_V12_0_PA_R12_BIT; + } else if (nps == UMC_MEMORY_PARTITION_MODE_NPS4) { + flip_bits->flip_bits_in_pa[0] = UMC_V12_0_PA_CH4_BIT; + flip_bits->flip_bits_in_pa[1] = UMC_V12_0_PA_CH5_BIT; + flip_bits->flip_bits_in_pa[2] = UMC_V12_0_PA_B0_BIT; + flip_bits->r13_in_pa = UMC_V12_0_PA_R11_BIT; + } + + switch (vram_type) { + case UMC_VRAM_TYPE_HBM: + /* other nps modes are taken as nps1 */ + if (nps == UMC_MEMORY_PARTITION_MODE_NPS2) + flip_bits->flip_bits_in_pa[3] = UMC_V12_0_PA_R12_BIT; + else if (nps == UMC_MEMORY_PARTITION_MODE_NPS4) + flip_bits->flip_bits_in_pa[3] = UMC_V12_0_PA_R11_BIT; + + break; + case UMC_VRAM_TYPE_HBM3E: + flip_bits->flip_bits_in_pa[3] = UMC_V12_0_PA_R12_BIT; + flip_bits->flip_row_bit = 12; + + if (nps == UMC_MEMORY_PARTITION_MODE_NPS2) + flip_bits->flip_bits_in_pa[3] = UMC_V12_0_PA_R11_BIT; + else if (nps == UMC_MEMORY_PARTITION_MODE_NPS4) + flip_bits->flip_bits_in_pa[3] = UMC_V12_0_PA_R10_BIT; + + break; + default: + RAS_DEV_WARN(ras_core->dev, + "Unknown HBM type, set RAS retire flip bits to the value in NPS1 mode.\n"); + break; + } +} + +static uint64_t convert_nps_pa_to_row_pa(struct ras_core_context *ras_core, + uint64_t pa, enum umc_memory_partition_mode nps, bool zero_pfn_ok) +{ + struct umc_flip_bits flip_bits = {0}; + uint64_t row_pa; + int i; + + __get_nps_pa_flip_bits(ras_core, nps, &flip_bits); + + row_pa = pa; + /* clear loop bits in soc physical address */ + for (i = 0; i < flip_bits.bit_num; i++) + row_pa &= ~BIT_ULL(flip_bits.flip_bits_in_pa[i]); + + if (!zero_pfn_ok && !RAS_ADDR_TO_PFN(row_pa)) + row_pa |= BIT_ULL(flip_bits.flip_bits_in_pa[2]); + + return row_pa; +} + +static int lookup_bad_pages_in_a_row(struct ras_core_context *ras_core, + struct eeprom_umc_record *record, uint32_t nps, + uint64_t *pfns, uint32_t num, + uint64_t seq_no, bool dump) +{ + uint32_t col, col_lower, row, row_lower, idx, row_high; + uint64_t soc_pa, row_pa, column, err_addr; + uint64_t retired_addr = RAS_PFN_TO_ADDR(record->cur_nps_retired_row_pfn); + struct umc_flip_bits flip_bits = {0}; + uint32_t retire_unit; + uint32_t i; + + __get_nps_pa_flip_bits(ras_core, nps, &flip_bits); + + row_pa = convert_nps_pa_to_row_pa(ras_core, retired_addr, nps, true); + + err_addr = record->address; + /* get column bit 0 and 1 in mca address */ + col_lower = (err_addr >> 1) & 0x3ULL; + /* MA_R13_BIT will be handled later */ + row_lower = (err_addr >> UMC_V12_0_MCA_R0_BIT) & 0x1fffULL; + row_lower &= ~BIT_ULL(flip_bits.flip_row_bit); + + if (ras_core->ras_gfx.gfx_ip_version >= IP_VERSION(9, 5, 0)) { + row_high = (row_pa >> flip_bits.r13_in_pa) & 0x3ULL; + /* it's 2.25GB in each channel, from MCA address to PA + * [R14 R13] is converted if the two bits value are 0x3, + * get them from PA instead of MCA address. + */ + row_lower |= (row_high << 13); + } + + idx = 0; + row = 0; + retire_unit = 0x1 << flip_bits.bit_num; + /* loop for all possibilities of retire bits */ + for (column = 0; column < retire_unit; column++) { + soc_pa = row_pa; + for (i = 0; i < flip_bits.bit_num; i++) + soc_pa |= (((column >> i) & 0x1ULL) << flip_bits.flip_bits_in_pa[i]); + + col = ((column & 0x7) << 2) | col_lower; + + /* add row bit 13 */ + if (flip_bits.bit_num == UMC_PA_FLIP_BITS_NUM) + row = ((column >> 3) << flip_bits.flip_row_bit) | row_lower; + + if (dump) + RAS_DEV_INFO(ras_core->dev, + "{%llu} Error Address(PA):0x%-10llx Row:0x%-4x Col:0x%-2x Bank:0x%x Channel:0x%x\n", + seq_no, soc_pa, row, col, + record->cur_nps_bank, record->mem_channel); + + + if (pfns && (idx < num)) + pfns[idx++] = RAS_ADDR_TO_PFN(soc_pa); + } + + return idx; +} + +static int umc_v12_convert_ma_to_pa(struct ras_core_context *ras_core, + struct umc_mca_addr *addr_in, struct umc_phy_addr *addr_out, + uint32_t nps) +{ + uint32_t i, na_shift; + uint64_t soc_pa, na, na_nps; + uint32_t bank_hash0, bank_hash1, bank_hash2, bank_hash3, col, row; + uint32_t bank0, bank1, bank2, bank3, bank; + uint32_t ch_inst = addr_in->ch_inst; + uint32_t umc_inst = addr_in->umc_inst; + uint32_t node_inst = addr_in->node_inst; + uint32_t socket_id = addr_in->socket_id; + uint32_t channel_index; + uint64_t err_addr = addr_in->err_addr; + + if (node_inst != UMC_INV_AID_NODE) { + if (ch_inst >= UMC_V12_0_CHANNEL_INSTANCE_NUM || + umc_inst >= UMC_V12_0_UMC_INSTANCE_NUM || + node_inst >= UMC_V12_0_AID_NUM_MAX || + socket_id >= UMC_V12_0_SOCKET_NUM_MAX) + return -EINVAL; + } else { + if (socket_id >= UMC_V12_0_SOCKET_NUM_MAX || + ch_inst >= UMC_V12_0_TOTAL_CHANNEL_NUM) + return -EINVAL; + } + + bank_hash0 = (err_addr >> UMC_V12_0_MCA_B0_BIT) & 0x1ULL; + bank_hash1 = (err_addr >> UMC_V12_0_MCA_B1_BIT) & 0x1ULL; + bank_hash2 = (err_addr >> UMC_V12_0_MCA_B2_BIT) & 0x1ULL; + bank_hash3 = (err_addr >> UMC_V12_0_MCA_B3_BIT) & 0x1ULL; + col = (err_addr >> 1) & 0x1fULL; + row = (err_addr >> 10) & 0x3fffULL; + + /* apply bank hash algorithm */ + bank0 = + bank_hash0 ^ (UMC_V12_0_XOR_EN0 & + (umc_v12_0_bit_wise_xor(col & UMC_V12_0_COL_XOR0) ^ + (umc_v12_0_bit_wise_xor(row & UMC_V12_0_ROW_XOR0)))); + bank1 = + bank_hash1 ^ (UMC_V12_0_XOR_EN1 & + (umc_v12_0_bit_wise_xor(col & UMC_V12_0_COL_XOR1) ^ + (umc_v12_0_bit_wise_xor(row & UMC_V12_0_ROW_XOR1)))); + bank2 = + bank_hash2 ^ (UMC_V12_0_XOR_EN2 & + (umc_v12_0_bit_wise_xor(col & UMC_V12_0_COL_XOR2) ^ + (umc_v12_0_bit_wise_xor(row & UMC_V12_0_ROW_XOR2)))); + bank3 = + bank_hash3 ^ (UMC_V12_0_XOR_EN3 & + (umc_v12_0_bit_wise_xor(col & UMC_V12_0_COL_XOR3) ^ + (umc_v12_0_bit_wise_xor(row & UMC_V12_0_ROW_XOR3)))); + + bank = bank0 | (bank1 << 1) | (bank2 << 2) | (bank3 << 3); + err_addr &= ~0x3c0ULL; + err_addr |= (bank << UMC_V12_0_MCA_B0_BIT); + + na_nps = 0x0; + /* convert mca error address to normalized address */ + for (i = 1; i < ARRAY_SIZE(umc_v12_0_ma2na_mapping); i++) + na_nps |= ((err_addr >> i) & 0x1ULL) << umc_v12_0_ma2na_mapping[i]; + + if (nps == UMC_MEMORY_PARTITION_MODE_NPS1) + na_shift = 8; + else if (nps == UMC_MEMORY_PARTITION_MODE_NPS2) + na_shift = 9; + else if (nps == UMC_MEMORY_PARTITION_MODE_NPS4) + na_shift = 10; + else if (nps == UMC_MEMORY_PARTITION_MODE_NPS8) + na_shift = 11; + else + return -EINVAL; + + na = ((na_nps >> na_shift) << 8) | (na_nps & 0xff); + + if (node_inst != UMC_INV_AID_NODE) + channel_index = + umc_v12_0_channel_idx_tbl[node_inst][umc_inst][ch_inst]; + else { + channel_index = ch_inst; + node_inst = channel_index / + (UMC_V12_0_UMC_INSTANCE_NUM * UMC_V12_0_CHANNEL_INSTANCE_NUM); + } + + /* translate umc channel address to soc pa, 3 parts are included */ + soc_pa = ADDR_OF_32KB_BLOCK(na) | + ADDR_OF_256B_BLOCK(channel_index) | + OFFSET_IN_256B_BLOCK(na); + + /* calc channel hash based on absolute address */ + soc_pa += socket_id * SOCKET_LFB_SIZE; + /* the umc channel bits are not original values, they are hashed */ + UMC_V12_0_SET_CHANNEL_HASH(channel_index, soc_pa); + /* restore pa */ + soc_pa -= socket_id * SOCKET_LFB_SIZE; + + /* get some channel bits from na_nps directly and + * add nps section offset + */ + if (nps == UMC_MEMORY_PARTITION_MODE_NPS2) { + soc_pa &= ~(0x1ULL << UMC_V12_0_PA_CH5_BIT); + soc_pa |= ((na_nps & 0x100) << 5); + soc_pa += (node_inst >> 1) * (SOCKET_LFB_SIZE >> 1); + } else if (nps == UMC_MEMORY_PARTITION_MODE_NPS4) { + soc_pa &= ~(0x3ULL << UMC_V12_0_PA_CH4_BIT); + soc_pa |= ((na_nps & 0x300) << 4); + soc_pa += node_inst * (SOCKET_LFB_SIZE >> 2); + } else if (nps == UMC_MEMORY_PARTITION_MODE_NPS8) { + soc_pa &= ~(0x7ULL << UMC_V12_0_PA_CH4_BIT); + soc_pa |= ((na_nps & 0x700) << 4); + soc_pa += node_inst * (SOCKET_LFB_SIZE >> 2) + + (channel_index >> 4) * (SOCKET_LFB_SIZE >> 3); + } + + addr_out->pa = soc_pa; + addr_out->bank = bank; + addr_out->channel_idx = channel_index; + + return 0; +} + +static int convert_ma_to_pa(struct ras_core_context *ras_core, + struct umc_mca_addr *addr_in, struct umc_phy_addr *addr_out, + uint32_t nps) +{ + int ret; + + if (ras_psp_check_supported_cmd(ras_core, RAS_TA_CMD_ID__QUERY_ADDRESS)) + ret = ras_umc_psp_convert_ma_to_pa(ras_core, + addr_in, addr_out, nps); + else + ret = umc_v12_convert_ma_to_pa(ras_core, + addr_in, addr_out, nps); + + return ret; +} + +static int convert_bank_to_nps_addr(struct ras_core_context *ras_core, + struct ras_bank_ecc *bank, struct umc_phy_addr *pa_addr, uint32_t nps) +{ + struct umc_mca_addr addr_in; + struct umc_phy_addr addr_out; + int ret; + + memset(&addr_in, 0, sizeof(addr_in)); + memset(&addr_out, 0, sizeof(addr_out)); + + addr_in.err_addr = ACA_ADDR_2_ERR_ADDR(bank->addr); + addr_in.ch_inst = ACA_IPID_2_UMC_CH(bank->ipid); + addr_in.umc_inst = ACA_IPID_2_UMC_INST(bank->ipid); + addr_in.node_inst = ACA_IPID_2_DIE_ID(bank->ipid); + addr_in.socket_id = ACA_IPID_2_SOCKET_ID(bank->ipid); + + ret = convert_ma_to_pa(ras_core, &addr_in, &addr_out, nps); + if (!ret) { + pa_addr->pa = + convert_nps_pa_to_row_pa(ras_core, addr_out.pa, nps, false); + pa_addr->channel_idx = addr_out.channel_idx; + pa_addr->bank = addr_out.bank; + } + + return ret; +} + +static int umc_v12_0_bank_to_eeprom_record(struct ras_core_context *ras_core, + struct ras_bank_ecc *bank, struct eeprom_umc_record *record) +{ + struct umc_phy_addr nps_addr; + int ret; + + memset(&nps_addr, 0, sizeof(nps_addr)); + + ret = convert_bank_to_nps_addr(ras_core, bank, + &nps_addr, bank->nps); + if (ret) + return ret; + + ras_umc_fill_eeprom_record(ras_core, + ACA_ADDR_2_ERR_ADDR(bank->addr), ACA_IPID_2_UMC_INST(bank->ipid), + &nps_addr, bank->nps, record); + + lookup_bad_pages_in_a_row(ras_core, record, + bank->nps, NULL, 0, bank->seq_no, true); + + return 0; +} + +static int convert_eeprom_record_to_nps_addr(struct ras_core_context *ras_core, + struct eeprom_umc_record *record, uint64_t *pa, uint32_t nps) +{ + struct device_system_info dev_info = {0}; + struct umc_mca_addr addr_in; + struct umc_phy_addr addr_out; + int ret; + + memset(&addr_in, 0, sizeof(addr_in)); + memset(&addr_out, 0, sizeof(addr_out)); + + ras_core_get_device_system_info(ras_core, &dev_info); + + addr_in.err_addr = record->address; + addr_in.ch_inst = record->mem_channel; + addr_in.umc_inst = record->mcumc_id; + addr_in.node_inst = UMC_INV_AID_NODE; + addr_in.socket_id = dev_info.socket_id; + + ret = convert_ma_to_pa(ras_core, &addr_in, &addr_out, nps); + if (ret) + return ret; + + *pa = convert_nps_pa_to_row_pa(ras_core, addr_out.pa, nps, false); + + return 0; +} + +static int umc_v12_0_eeprom_record_to_nps_record(struct ras_core_context *ras_core, + struct eeprom_umc_record *record, uint32_t nps) +{ + uint64_t pa = 0; + int ret = 0; + + if (nps == EEPROM_RECORD_UMC_NPS_MODE(record)) { + record->cur_nps_retired_row_pfn = EEPROM_RECORD_UMC_ADDR_PFN(record); + } else { + ret = convert_eeprom_record_to_nps_addr(ras_core, + record, &pa, nps); + if (!ret) + record->cur_nps_retired_row_pfn = RAS_ADDR_TO_PFN(pa); + } + + record->cur_nps = nps; + + return ret; +} + +static int umc_v12_0_eeprom_record_to_nps_pages(struct ras_core_context *ras_core, + struct eeprom_umc_record *record, uint32_t nps, + uint64_t *pfns, uint32_t num) +{ + return lookup_bad_pages_in_a_row(ras_core, + record, nps, pfns, num, 0, false); +} + +static int umc_12_0_soc_pa_to_bank(struct ras_core_context *ras_core, + uint64_t soc_pa, + struct umc_bank_addr *bank_addr) +{ + + int channel_hashed = 0; + int channel_real = 0; + int channel_reversed = 0; + int i = 0; + + bank_addr->stack_id = UMC_V12_0_SOC_PA_TO_SID(soc_pa); + bank_addr->bank_group = 0; /* This is a combination of SID & Bank. Needed?? */ + bank_addr->bank = UMC_V12_0_SOC_PA_TO_BANK(soc_pa); + bank_addr->row = UMC_V12_0_SOC_PA_TO_ROW(soc_pa); + bank_addr->column = UMC_V12_0_SOC_PA_TO_COL(soc_pa); + + /* Channel bits 4-6 are hashed. Bruteforce reverse the hash */ + channel_hashed = (soc_pa >> UMC_V12_0_PA_CH4_BIT) & 0x7; + + for (i = 0; i < 8; i++) { + channel_reversed = 0; + channel_reversed |= UMC_V12_0_CHANNEL_HASH_CH4((i << 4), soc_pa); + channel_reversed |= (UMC_V12_0_CHANNEL_HASH_CH5((i << 4), soc_pa) << 1); + channel_reversed |= (UMC_V12_0_CHANNEL_HASH_CH6((i << 4), soc_pa) << 2); + if (channel_reversed == channel_hashed) + channel_real = ((i << 4)) | ((soc_pa >> UMC_V12_0_PA_CH0_BIT) & 0xf); + } + + bank_addr->channel = channel_real; + bank_addr->subchannel = UMC_V12_0_SOC_PA_TO_PC(soc_pa); + + return 0; +} + +static int umc_12_0_bank_to_soc_pa(struct ras_core_context *ras_core, + struct umc_bank_addr bank_addr, + uint64_t *soc_pa) +{ + uint64_t na = 0; + uint64_t tmp_pa = 0; + *soc_pa = 0; + + tmp_pa |= UMC_V12_0_SOC_SID_TO_PA(bank_addr.stack_id); + tmp_pa |= UMC_V12_0_SOC_BANK_TO_PA(bank_addr.bank); + tmp_pa |= UMC_V12_0_SOC_ROW_TO_PA(bank_addr.row); + tmp_pa |= UMC_V12_0_SOC_COL_TO_PA(bank_addr.column); + tmp_pa |= UMC_V12_0_SOC_CH_TO_PA(bank_addr.channel); + tmp_pa |= UMC_V12_0_SOC_PC_TO_PA(bank_addr.subchannel); + + /* Get the NA */ + na = ((tmp_pa >> UMC_V12_0_PA_C2_BIT) << UMC_V12_0_NA_C2_BIT); + na |= tmp_pa & 0xff; + + /* translate umc channel address to soc pa, 3 parts are included */ + tmp_pa = ADDR_OF_32KB_BLOCK(na) | + ADDR_OF_256B_BLOCK(bank_addr.channel) | + OFFSET_IN_256B_BLOCK(na); + + /* the umc channel bits are not original values, they are hashed */ + UMC_V12_0_SET_CHANNEL_HASH(bank_addr.channel, tmp_pa); + + *soc_pa = tmp_pa; + + return 0; +} + +const struct ras_umc_ip_func ras_umc_func_v12_0 = { + .bank_to_eeprom_record = umc_v12_0_bank_to_eeprom_record, + .eeprom_record_to_nps_record = umc_v12_0_eeprom_record_to_nps_record, + .eeprom_record_to_nps_pages = umc_v12_0_eeprom_record_to_nps_pages, + .bank_to_soc_pa = umc_12_0_bank_to_soc_pa, + .soc_pa_to_bank = umc_12_0_soc_pa_to_bank, +}; + diff --git a/drivers/gpu/drm/amd/ras/rascore/ras_umc_v12_0.h b/drivers/gpu/drm/amd/ras/rascore/ras_umc_v12_0.h new file mode 100644 index 000000000000..8a35ad856165 --- /dev/null +++ b/drivers/gpu/drm/amd/ras/rascore/ras_umc_v12_0.h @@ -0,0 +1,314 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright 2025 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#ifndef __RAS_UMC_V12_0_H__ +#define __RAS_UMC_V12_0_H__ +#include "ras.h" + +/* MCA_UMC_UMC0_MCUMC_ADDRT0 */ +#define MCA_UMC_UMC0_MCUMC_ADDRT0__ErrorAddr__SHIFT 0x0 +#define MCA_UMC_UMC0_MCUMC_ADDRT0__Reserved__SHIFT 0x38 +#define MCA_UMC_UMC0_MCUMC_ADDRT0__ErrorAddr_MASK 0x00FFFFFFFFFFFFFFL +#define MCA_UMC_UMC0_MCUMC_ADDRT0__Reserved_MASK 0xFF00000000000000L + +/* MCMP1_IPIDT0 */ +#define MCMP1_IPIDT0__InstanceIdLo__SHIFT 0x0 +#define MCMP1_IPIDT0__HardwareID__SHIFT 0x20 +#define MCMP1_IPIDT0__InstanceIdHi__SHIFT 0x2c +#define MCMP1_IPIDT0__McaType__SHIFT 0x30 + +#define MCMP1_IPIDT0__InstanceIdLo_MASK 0x00000000FFFFFFFFL +#define MCMP1_IPIDT0__HardwareID_MASK 0x00000FFF00000000L +#define MCMP1_IPIDT0__InstanceIdHi_MASK 0x0000F00000000000L +#define MCMP1_IPIDT0__McaType_MASK 0xFFFF000000000000L + +/* number of umc channel instance with memory map register access */ +#define UMC_V12_0_CHANNEL_INSTANCE_NUM 8 +/* number of umc instance with memory map register access */ +#define UMC_V12_0_UMC_INSTANCE_NUM 4 + +/* one piece of normalized address is mapped to 8 pieces of physical address */ +#define UMC_V12_0_NA_MAP_PA_NUM 8 + +/* bank bits in MCA error address */ +#define UMC_V12_0_MCA_B0_BIT 6 +#define UMC_V12_0_MCA_B1_BIT 7 +#define UMC_V12_0_MCA_B2_BIT 8 +#define UMC_V12_0_MCA_B3_BIT 9 + +/* row bits in MCA address */ +#define UMC_V12_0_MCA_R0_BIT 10 + +/* Stack ID bits in SOC physical address */ +#define UMC_V12_0_PA_SID1_BIT 37 +#define UMC_V12_0_PA_SID0_BIT 36 + +/* bank bits in SOC physical address */ +#define UMC_V12_0_PA_B3_BIT 18 +#define UMC_V12_0_PA_B2_BIT 17 +#define UMC_V12_0_PA_B1_BIT 20 +#define UMC_V12_0_PA_B0_BIT 19 + +/* row bits in SOC physical address */ +#define UMC_V12_0_PA_R13_BIT 35 +#define UMC_V12_0_PA_R12_BIT 34 +#define UMC_V12_0_PA_R11_BIT 33 +#define UMC_V12_0_PA_R10_BIT 32 +#define UMC_V12_0_PA_R9_BIT 31 +#define UMC_V12_0_PA_R8_BIT 30 +#define UMC_V12_0_PA_R7_BIT 29 +#define UMC_V12_0_PA_R6_BIT 28 +#define UMC_V12_0_PA_R5_BIT 27 +#define UMC_V12_0_PA_R4_BIT 26 +#define UMC_V12_0_PA_R3_BIT 25 +#define UMC_V12_0_PA_R2_BIT 24 +#define UMC_V12_0_PA_R1_BIT 23 +#define UMC_V12_0_PA_R0_BIT 22 + +/* column bits in SOC physical address */ +#define UMC_V12_0_PA_C4_BIT 21 +#define UMC_V12_0_PA_C3_BIT 16 +#define UMC_V12_0_PA_C2_BIT 15 +#define UMC_V12_0_PA_C1_BIT 6 +#define UMC_V12_0_PA_C0_BIT 5 + +/* channel index bits in SOC physical address */ +#define UMC_V12_0_PA_CH6_BIT 14 +#define UMC_V12_0_PA_CH5_BIT 13 +#define UMC_V12_0_PA_CH4_BIT 12 +#define UMC_V12_0_PA_CH3_BIT 11 +#define UMC_V12_0_PA_CH2_BIT 10 +#define UMC_V12_0_PA_CH1_BIT 9 +#define UMC_V12_0_PA_CH0_BIT 8 + +/* Pseudochannel index bits in SOC physical address */ +#define UMC_V12_0_PA_PC0_BIT 7 + +#define UMC_V12_0_NA_C2_BIT 8 + +#define UMC_V12_0_SOC_PA_TO_SID(pa) \ + ((((pa >> UMC_V12_0_PA_SID0_BIT) & 0x1ULL) << 0ULL) | \ + (((pa >> UMC_V12_0_PA_SID1_BIT) & 0x1ULL) << 1ULL)) + +#define UMC_V12_0_SOC_PA_TO_BANK(pa) \ + ((((pa >> UMC_V12_0_PA_B0_BIT) & 0x1ULL) << 0ULL) | \ + (((pa >> UMC_V12_0_PA_B1_BIT) & 0x1ULL) << 1ULL) | \ + (((pa >> UMC_V12_0_PA_B2_BIT) & 0x1ULL) << 2ULL) | \ + (((pa >> UMC_V12_0_PA_B3_BIT) & 0x1ULL) << 3ULL)) + +#define UMC_V12_0_SOC_PA_TO_ROW(pa) \ + ((((pa >> UMC_V12_0_PA_R0_BIT) & 0x1ULL) << 0ULL) | \ + (((pa >> UMC_V12_0_PA_R1_BIT) & 0x1ULL) << 1ULL) | \ + (((pa >> UMC_V12_0_PA_R2_BIT) & 0x1ULL) << 2ULL) | \ + (((pa >> UMC_V12_0_PA_R3_BIT) & 0x1ULL) << 3ULL) | \ + (((pa >> UMC_V12_0_PA_R4_BIT) & 0x1ULL) << 4ULL) | \ + (((pa >> UMC_V12_0_PA_R5_BIT) & 0x1ULL) << 5ULL) | \ + (((pa >> UMC_V12_0_PA_R6_BIT) & 0x1ULL) << 6ULL) | \ + (((pa >> UMC_V12_0_PA_R7_BIT) & 0x1ULL) << 7ULL) | \ + (((pa >> UMC_V12_0_PA_R8_BIT) & 0x1ULL) << 8ULL) | \ + (((pa >> UMC_V12_0_PA_R9_BIT) & 0x1ULL) << 9ULL) | \ + (((pa >> UMC_V12_0_PA_R10_BIT) & 0x1ULL) << 10ULL) | \ + (((pa >> UMC_V12_0_PA_R11_BIT) & 0x1ULL) << 11ULL) | \ + (((pa >> UMC_V12_0_PA_R12_BIT) & 0x1ULL) << 12ULL) | \ + (((pa >> UMC_V12_0_PA_R13_BIT) & 0x1ULL) << 13ULL)) + +#define UMC_V12_0_SOC_PA_TO_COL(pa) \ + ((((pa >> UMC_V12_0_PA_C0_BIT) & 0x1ULL) << 0ULL) | \ + (((pa >> UMC_V12_0_PA_C1_BIT) & 0x1ULL) << 1ULL) | \ + (((pa >> UMC_V12_0_PA_C2_BIT) & 0x1ULL) << 2ULL) | \ + (((pa >> UMC_V12_0_PA_C3_BIT) & 0x1ULL) << 3ULL) | \ + (((pa >> UMC_V12_0_PA_C4_BIT) & 0x1ULL) << 4ULL)) + +#define UMC_V12_0_SOC_PA_TO_CH(pa) \ + ((((pa >> UMC_V12_0_PA_CH0_BIT) & 0x1ULL) << 0ULL) | \ + (((pa >> UMC_V12_0_PA_CH1_BIT) & 0x1ULL) << 1ULL) | \ + (((pa >> UMC_V12_0_PA_CH2_BIT) & 0x1ULL) << 2ULL) | \ + (((pa >> UMC_V12_0_PA_CH3_BIT) & 0x1ULL) << 3ULL) | \ + (((pa >> UMC_V12_0_PA_CH4_BIT) & 0x1ULL) << 4ULL) | \ + (((pa >> UMC_V12_0_PA_CH5_BIT) & 0x1ULL) << 5ULL) | \ + (((pa >> UMC_V12_0_PA_CH6_BIT) & 0x1ULL) << 6ULL)) + +#define UMC_V12_0_SOC_PA_TO_PC(pa) (((pa >> UMC_V12_0_PA_PC0_BIT) & 0x1ULL) << 0ULL) + +#define UMC_V12_0_SOC_SID_TO_PA(sid) \ + ((((sid >> 0ULL) & 0x1ULL) << UMC_V12_0_PA_SID0_BIT) | \ + (((sid >> 1ULL) & 0x1ULL) << UMC_V12_0_PA_SID1_BIT)) + +#define UMC_V12_0_SOC_BANK_TO_PA(bank) \ + ((((bank >> 0ULL) & 0x1ULL) << UMC_V12_0_PA_B0_BIT) | \ + (((bank >> 1ULL) & 0x1ULL) << UMC_V12_0_PA_B1_BIT) | \ + (((bank >> 2ULL) & 0x1ULL) << UMC_V12_0_PA_B2_BIT) | \ + (((bank >> 3ULL) & 0x1ULL) << UMC_V12_0_PA_B3_BIT)) + +#define UMC_V12_0_SOC_ROW_TO_PA(row) \ + ((((row >> 0ULL) & 0x1ULL) << UMC_V12_0_PA_R0_BIT) | \ + (((row >> 1ULL) & 0x1ULL) << UMC_V12_0_PA_R1_BIT) | \ + (((row >> 2ULL) & 0x1ULL) << UMC_V12_0_PA_R2_BIT) | \ + (((row >> 3ULL) & 0x1ULL) << UMC_V12_0_PA_R3_BIT) | \ + (((row >> 4ULL) & 0x1ULL) << UMC_V12_0_PA_R4_BIT) | \ + (((row >> 5ULL) & 0x1ULL) << UMC_V12_0_PA_R5_BIT) | \ + (((row >> 6ULL) & 0x1ULL) << UMC_V12_0_PA_R6_BIT) | \ + (((row >> 7ULL) & 0x1ULL) << UMC_V12_0_PA_R7_BIT) | \ + (((row >> 8ULL) & 0x1ULL) << UMC_V12_0_PA_R8_BIT) | \ + (((row >> 9ULL) & 0x1ULL) << UMC_V12_0_PA_R9_BIT) | \ + (((row >> 10ULL) & 0x1ULL) << UMC_V12_0_PA_R10_BIT) | \ + (((row >> 11ULL) & 0x1ULL) << UMC_V12_0_PA_R11_BIT) | \ + (((row >> 12ULL) & 0x1ULL) << UMC_V12_0_PA_R12_BIT) | \ + (((row >> 13ULL) & 0x1ULL) << UMC_V12_0_PA_R13_BIT)) + +#define UMC_V12_0_SOC_COL_TO_PA(col) \ + ((((col >> 0ULL) & 0x1ULL) << UMC_V12_0_PA_C0_BIT) | \ + (((col >> 1ULL) & 0x1ULL) << UMC_V12_0_PA_C1_BIT) | \ + (((col >> 2ULL) & 0x1ULL) << UMC_V12_0_PA_C2_BIT) | \ + (((col >> 3ULL) & 0x1ULL) << UMC_V12_0_PA_C3_BIT) | \ + (((col >> 4ULL) & 0x1ULL) << UMC_V12_0_PA_C4_BIT)) + +#define UMC_V12_0_SOC_CH_TO_PA(ch) \ + ((((ch >> 0ULL) & 0x1ULL) << UMC_V12_0_PA_CH0_BIT) | \ + (((ch >> 1ULL) & 0x1ULL) << UMC_V12_0_PA_CH1_BIT) | \ + (((ch >> 2ULL) & 0x1ULL) << UMC_V12_0_PA_CH2_BIT) | \ + (((ch >> 3ULL) & 0x1ULL) << UMC_V12_0_PA_CH3_BIT) | \ + (((ch >> 4ULL) & 0x1ULL) << UMC_V12_0_PA_CH4_BIT) | \ + (((ch >> 5ULL) & 0x1ULL) << UMC_V12_0_PA_CH5_BIT) | \ + (((ch >> 6ULL) & 0x1ULL) << UMC_V12_0_PA_CH6_BIT)) + +#define UMC_V12_0_SOC_PC_TO_PA(pc) (((pc >> 0ULL) & 0x1ULL) << UMC_V12_0_PA_PC0_BIT) + +/* bank hash settings */ +#define UMC_V12_0_XOR_EN0 1 +#define UMC_V12_0_XOR_EN1 1 +#define UMC_V12_0_XOR_EN2 1 +#define UMC_V12_0_XOR_EN3 1 +#define UMC_V12_0_COL_XOR0 0x0 +#define UMC_V12_0_COL_XOR1 0x0 +#define UMC_V12_0_COL_XOR2 0x800 +#define UMC_V12_0_COL_XOR3 0x1000 +#define UMC_V12_0_ROW_XOR0 0x11111 +#define UMC_V12_0_ROW_XOR1 0x22222 +#define UMC_V12_0_ROW_XOR2 0x4444 +#define UMC_V12_0_ROW_XOR3 0x8888 + +/* channel hash settings */ +#define UMC_V12_0_HASH_4K 0 +#define UMC_V12_0_HASH_64K 1 +#define UMC_V12_0_HASH_2M 1 +#define UMC_V12_0_HASH_1G 1 +#define UMC_V12_0_HASH_1T 1 + +/* XOR some bits of PA into CH4~CH6 bits (bits 12~14 of PA), + * hash bit is only effective when related setting is enabled + */ +#define UMC_V12_0_CHANNEL_HASH_CH4(channel_idx, pa) ((((channel_idx) >> 5) & 0x1) ^ \ + (((pa) >> 20) & 0x1ULL & UMC_V12_0_HASH_64K) ^ \ + (((pa) >> 27) & 0x1ULL & UMC_V12_0_HASH_2M) ^ \ + (((pa) >> 34) & 0x1ULL & UMC_V12_0_HASH_1G) ^ \ + (((pa) >> 41) & 0x1ULL & UMC_V12_0_HASH_1T)) +#define UMC_V12_0_CHANNEL_HASH_CH5(channel_idx, pa) ((((channel_idx) >> 6) & 0x1) ^ \ + (((pa) >> 21) & 0x1ULL & UMC_V12_0_HASH_64K) ^ \ + (((pa) >> 28) & 0x1ULL & UMC_V12_0_HASH_2M) ^ \ + (((pa) >> 35) & 0x1ULL & UMC_V12_0_HASH_1G) ^ \ + (((pa) >> 42) & 0x1ULL & UMC_V12_0_HASH_1T)) +#define UMC_V12_0_CHANNEL_HASH_CH6(channel_idx, pa) ((((channel_idx) >> 4) & 0x1) ^ \ + (((pa) >> 19) & 0x1ULL & UMC_V12_0_HASH_64K) ^ \ + (((pa) >> 26) & 0x1ULL & UMC_V12_0_HASH_2M) ^ \ + (((pa) >> 33) & 0x1ULL & UMC_V12_0_HASH_1G) ^ \ + (((pa) >> 40) & 0x1ULL & UMC_V12_0_HASH_1T) ^ \ + (((pa) >> 47) & 0x1ULL & UMC_V12_0_HASH_1T)) +#define UMC_V12_0_SET_CHANNEL_HASH(channel_idx, pa) do { \ + (pa) &= ~(0x7ULL << UMC_V12_0_PA_CH4_BIT); \ + (pa) |= (UMC_V12_0_CHANNEL_HASH_CH4(channel_idx, pa) << UMC_V12_0_PA_CH4_BIT); \ + (pa) |= (UMC_V12_0_CHANNEL_HASH_CH5(channel_idx, pa) << UMC_V12_0_PA_CH5_BIT); \ + (pa) |= (UMC_V12_0_CHANNEL_HASH_CH6(channel_idx, pa) << UMC_V12_0_PA_CH6_BIT); \ + } while (0) + + +/* + * (addr / 256) * 4096, the higher 26 bits in ErrorAddr + * is the index of 4KB block + */ +#define ADDR_OF_4KB_BLOCK(addr) (((addr) & ~0xffULL) << 4) +/* + * (addr / 256) * 8192, the higher 26 bits in ErrorAddr + * is the index of 8KB block + */ +#define ADDR_OF_8KB_BLOCK(addr) (((addr) & ~0xffULL) << 5) +/* + * (addr / 256) * 32768, the higher 26 bits in ErrorAddr + * is the index of 8KB block + */ +#define ADDR_OF_32KB_BLOCK(addr) (((addr) & ~0xffULL) << 7) +/* channel index is the index of 256B block */ +#define ADDR_OF_256B_BLOCK(channel_index) ((channel_index) << 8) +/* offset in 256B block */ +#define OFFSET_IN_256B_BLOCK(addr) ((addr) & 0xffULL) + + +#define UMC_V12_ADDR_MASK_BAD_COLS(addr) \ + ((addr) & ~((0x3ULL << UMC_V12_0_PA_C2_BIT) | \ + (0x1ULL << UMC_V12_0_PA_C4_BIT) | \ + (0x1ULL << UMC_V12_0_PA_R13_BIT))) + +#define ACA_IPID_HI_2_UMC_AID(_ipid_hi) (((_ipid_hi) >> 2) & 0x3) +#define ACA_IPID_LO_2_UMC_CH(_ipid_lo) \ + (((((_ipid_lo) >> 20) & 0x1) * 4) + (((_ipid_lo) >> 12) & 0xF)) +#define ACA_IPID_LO_2_UMC_INST(_ipid_lo) (((_ipid_lo) >> 21) & 0x7) + +#define ACA_IPID_2_DIE_ID(ipid) ((REG_GET_FIELD(ipid, MCMP1_IPIDT0, InstanceIdHi) >> 2) & 0x03) +#define ACA_IPID_2_UMC_CH(ipid) \ + (ACA_IPID_LO_2_UMC_CH(REG_GET_FIELD(ipid, MCMP1_IPIDT0, InstanceIdLo))) + +#define ACA_IPID_2_UMC_INST(ipid) \ + (ACA_IPID_LO_2_UMC_INST(REG_GET_FIELD(ipid, MCMP1_IPIDT0, InstanceIdLo))) + +#define ACA_IPID_2_SOCKET_ID(ipid) \ + (((REG_GET_FIELD(ipid, MCMP1_IPIDT0, InstanceIdLo) & 0x1) << 2) | \ + (REG_GET_FIELD(ipid, MCMP1_IPIDT0, InstanceIdHi) & 0x03)) + +#define ACA_ADDR_2_ERR_ADDR(addr) \ + REG_GET_FIELD(addr, MCA_UMC_UMC0_MCUMC_ADDRT0, ErrorAddr) + +/* R13 bit shift should be considered, double the number */ +#define UMC_V12_0_BAD_PAGE_NUM_PER_CHANNEL (UMC_V12_0_NA_MAP_PA_NUM * 2) + + +/* C2, C3, C4, R13, four MCA bits are looped in page retirement */ +#define UMC_V12_0_RETIRE_LOOP_BITS 4 + +/* invalid node instance value */ +#define UMC_INV_AID_NODE 0xffff + +#define UMC_V12_0_AID_NUM_MAX 4 +#define UMC_V12_0_SOCKET_NUM_MAX 8 + +#define UMC_V12_0_TOTAL_CHANNEL_NUM \ + (UMC_V12_0_AID_NUM_MAX * UMC_V12_0_UMC_INSTANCE_NUM * UMC_V12_0_CHANNEL_INSTANCE_NUM) + +/* one device has 192GB HBM */ +#define SOCKET_LFB_SIZE 0x3000000000ULL + +extern const struct ras_umc_ip_func ras_umc_func_v12_0; + +int ras_umc_get_badpage_count(struct ras_core_context *ras_core); +int ras_umc_get_badpage_record(struct ras_core_context *ras_core, uint32_t index, void *record); +#endif + diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_crtc.c b/drivers/gpu/drm/arm/display/komeda/komeda_crtc.c index 2ad33559a33a..5a66948ffd24 100644 --- a/drivers/gpu/drm/arm/display/komeda/komeda_crtc.c +++ b/drivers/gpu/drm/arm/display/komeda/komeda_crtc.c @@ -111,6 +111,7 @@ komeda_crtc_atomic_check(struct drm_crtc *crtc, static int komeda_crtc_prepare(struct komeda_crtc *kcrtc) { + struct drm_device *drm = kcrtc->base.dev; struct komeda_dev *mdev = kcrtc->base.dev->dev_private; struct komeda_pipeline *master = kcrtc->master; struct komeda_crtc_state *kcrtc_st = to_kcrtc_st(kcrtc->base.state); @@ -128,8 +129,8 @@ komeda_crtc_prepare(struct komeda_crtc *kcrtc) err = mdev->funcs->change_opmode(mdev, new_mode); if (err) { - DRM_ERROR("failed to change opmode: 0x%x -> 0x%x.\n,", - mdev->dpmode, new_mode); + drm_err(drm, "failed to change opmode: 0x%x -> 0x%x.\n,", + mdev->dpmode, new_mode); goto unlock; } @@ -142,18 +143,18 @@ komeda_crtc_prepare(struct komeda_crtc *kcrtc) if (new_mode != KOMEDA_MODE_DUAL_DISP) { err = clk_set_rate(mdev->aclk, komeda_crtc_get_aclk(kcrtc_st)); if (err) - DRM_ERROR("failed to set aclk.\n"); + drm_err(drm, "failed to set aclk.\n"); err = clk_prepare_enable(mdev->aclk); if (err) - DRM_ERROR("failed to enable aclk.\n"); + drm_err(drm, "failed to enable aclk.\n"); } err = clk_set_rate(master->pxlclk, mode->crtc_clock * 1000); if (err) - DRM_ERROR("failed to set pxlclk for pipe%d\n", master->id); + drm_err(drm, "failed to set pxlclk for pipe%d\n", master->id); err = clk_prepare_enable(master->pxlclk); if (err) - DRM_ERROR("failed to enable pxl clk for pipe%d.\n", master->id); + drm_err(drm, "failed to enable pxl clk for pipe%d.\n", master->id); unlock: mutex_unlock(&mdev->lock); @@ -164,6 +165,7 @@ komeda_crtc_prepare(struct komeda_crtc *kcrtc) static int komeda_crtc_unprepare(struct komeda_crtc *kcrtc) { + struct drm_device *drm = kcrtc->base.dev; struct komeda_dev *mdev = kcrtc->base.dev->dev_private; struct komeda_pipeline *master = kcrtc->master; u32 new_mode; @@ -180,8 +182,8 @@ komeda_crtc_unprepare(struct komeda_crtc *kcrtc) err = mdev->funcs->change_opmode(mdev, new_mode); if (err) { - DRM_ERROR("failed to change opmode: 0x%x -> 0x%x.\n,", - mdev->dpmode, new_mode); + drm_err(drm, "failed to change opmode: 0x%x -> 0x%x.\n,", + mdev->dpmode, new_mode); goto unlock; } @@ -200,6 +202,7 @@ komeda_crtc_unprepare(struct komeda_crtc *kcrtc) void komeda_crtc_handle_event(struct komeda_crtc *kcrtc, struct komeda_events *evts) { + struct drm_device *drm = kcrtc->base.dev; struct drm_crtc *crtc = &kcrtc->base; u32 events = evts->pipes[kcrtc->master->id]; @@ -212,7 +215,7 @@ void komeda_crtc_handle_event(struct komeda_crtc *kcrtc, if (wb_conn) drm_writeback_signal_completion(&wb_conn->base, 0); else - DRM_WARN("CRTC[%d]: EOW happen but no wb_connector.\n", + drm_warn(drm, "CRTC[%d]: EOW happen but no wb_connector.\n", drm_crtc_index(&kcrtc->base)); } /* will handle it together with the write back support */ @@ -236,7 +239,7 @@ void komeda_crtc_handle_event(struct komeda_crtc *kcrtc, crtc->state->event = NULL; drm_crtc_send_vblank_event(crtc, event); } else { - DRM_WARN("CRTC[%d]: FLIP happened but no pending commit.\n", + drm_warn(drm, "CRTC[%d]: FLIP happened but no pending commit.\n", drm_crtc_index(&kcrtc->base)); } spin_unlock_irqrestore(&crtc->dev->event_lock, flags); @@ -309,7 +312,7 @@ komeda_crtc_flush_and_wait_for_flip_done(struct komeda_crtc *kcrtc, /* wait the flip take affect.*/ if (wait_for_completion_timeout(flip_done, HZ) == 0) { - DRM_ERROR("wait pipe%d flip done timeout\n", kcrtc->master->id); + drm_err(drm, "wait pipe%d flip done timeout\n", kcrtc->master->id); if (!input_flip_done) { unsigned long flags; @@ -562,6 +565,7 @@ static const struct drm_crtc_funcs komeda_crtc_funcs = { int komeda_kms_setup_crtcs(struct komeda_kms_dev *kms, struct komeda_dev *mdev) { + struct drm_device *drm = &kms->base; struct komeda_crtc *crtc; struct komeda_pipeline *master; char str[16]; @@ -581,7 +585,7 @@ int komeda_kms_setup_crtcs(struct komeda_kms_dev *kms, else sprintf(str, "None"); - DRM_INFO("CRTC-%d: master(pipe-%d) slave(%s).\n", + drm_info(drm, "CRTC-%d: master(pipe-%d) slave(%s).\n", kms->n_crtcs, master->id, str); kms->n_crtcs++; @@ -613,6 +617,7 @@ static int komeda_attach_bridge(struct device *dev, struct komeda_pipeline *pipe, struct drm_encoder *encoder) { + struct drm_device *drm = encoder->dev; struct drm_bridge *bridge; int err; @@ -624,7 +629,7 @@ static int komeda_attach_bridge(struct device *dev, err = drm_bridge_attach(encoder, bridge, NULL, 0); if (err) - dev_err(dev, "bridge_attach() failed for pipe: %s\n", + drm_err(drm, "bridge_attach() failed for pipe: %s\n", of_node_full_name(pipe->of_node)); return err; diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_framebuffer.c b/drivers/gpu/drm/arm/display/komeda/komeda_framebuffer.c index 901f938aefe0..3ca461eb0a24 100644 --- a/drivers/gpu/drm/arm/display/komeda/komeda_framebuffer.c +++ b/drivers/gpu/drm/arm/display/komeda/komeda_framebuffer.c @@ -9,6 +9,7 @@ #include #include #include +#include #include "komeda_framebuffer.h" #include "komeda_dev.h" diff --git a/drivers/gpu/drm/arm/hdlcd_crtc.c b/drivers/gpu/drm/arm/hdlcd_crtc.c index 806da0aaedf7..4b4a08cb396d 100644 --- a/drivers/gpu/drm/arm/hdlcd_crtc.c +++ b/drivers/gpu/drm/arm/hdlcd_crtc.c @@ -22,6 +22,7 @@ #include #include #include +#include #include #include diff --git a/drivers/gpu/drm/arm/hdlcd_drv.c b/drivers/gpu/drm/arm/hdlcd_drv.c index c3179d74f3f5..81d45f2dd6a7 100644 --- a/drivers/gpu/drm/arm/hdlcd_drv.c +++ b/drivers/gpu/drm/arm/hdlcd_drv.c @@ -33,6 +33,7 @@ #include #include #include +#include #include #include diff --git a/drivers/gpu/drm/arm/malidp_drv.c b/drivers/gpu/drm/arm/malidp_drv.c index bc5f5e9798c3..b765f6c9eea4 100644 --- a/drivers/gpu/drm/arm/malidp_drv.c +++ b/drivers/gpu/drm/arm/malidp_drv.c @@ -29,6 +29,7 @@ #include #include #include +#include #include #include diff --git a/drivers/gpu/drm/arm/malidp_mw.c b/drivers/gpu/drm/arm/malidp_mw.c index 600af5ad81b1..47733c85d271 100644 --- a/drivers/gpu/drm/arm/malidp_mw.c +++ b/drivers/gpu/drm/arm/malidp_mw.c @@ -14,6 +14,7 @@ #include #include #include +#include #include #include diff --git a/drivers/gpu/drm/arm/malidp_planes.c b/drivers/gpu/drm/arm/malidp_planes.c index 87f2e5ee8790..f1a5014bcfa1 100644 --- a/drivers/gpu/drm/arm/malidp_planes.c +++ b/drivers/gpu/drm/arm/malidp_planes.c @@ -263,7 +263,7 @@ static int malidp_se_check_scaling(struct malidp_plane *mp, struct drm_plane_state *state) { struct drm_crtc_state *crtc_state = - drm_atomic_get_existing_crtc_state(state->state, state->crtc); + drm_atomic_get_new_crtc_state(state->state, state->crtc); struct malidp_crtc_state *mc; u32 src_w, src_h; int ret; diff --git a/drivers/gpu/drm/armada/armada_crtc.c b/drivers/gpu/drm/armada/armada_crtc.c index 0900e4466ffb..033b19b31f63 100644 --- a/drivers/gpu/drm/armada/armada_crtc.c +++ b/drivers/gpu/drm/armada/armada_crtc.c @@ -13,6 +13,7 @@ #include #include +#include #include #include diff --git a/drivers/gpu/drm/armada/armada_debugfs.c b/drivers/gpu/drm/armada/armada_debugfs.c index a763349dd89f..2445365c823f 100644 --- a/drivers/gpu/drm/armada/armada_debugfs.c +++ b/drivers/gpu/drm/armada/armada_debugfs.c @@ -12,6 +12,7 @@ #include #include +#include #include "armada_crtc.h" #include "armada_drm.h" diff --git a/drivers/gpu/drm/armada/armada_fb.c b/drivers/gpu/drm/armada/armada_fb.c index aa4289127086..77098928f821 100644 --- a/drivers/gpu/drm/armada/armada_fb.c +++ b/drivers/gpu/drm/armada/armada_fb.c @@ -6,6 +6,7 @@ #include #include #include +#include #include "armada_drm.h" #include "armada_fb.h" diff --git a/drivers/gpu/drm/armada/armada_fbdev.c b/drivers/gpu/drm/armada/armada_fbdev.c index cb53cc91bafb..8bbae94804f8 100644 --- a/drivers/gpu/drm/armada/armada_fbdev.c +++ b/drivers/gpu/drm/armada/armada_fbdev.c @@ -13,6 +13,7 @@ #include #include #include +#include #include "armada_crtc.h" #include "armada_drm.h" @@ -28,8 +29,6 @@ static void armada_fbdev_fb_destroy(struct fb_info *info) fbh->fb->funcs->destroy(fbh->fb); drm_client_release(&fbh->client); - drm_fb_helper_unprepare(fbh); - kfree(fbh); } static const struct fb_ops armada_fb_ops = { @@ -45,10 +44,10 @@ int armada_fbdev_driver_fbdev_probe(struct drm_fb_helper *fbh, struct drm_fb_helper_surface_size *sizes) { struct drm_device *dev = fbh->dev; + struct fb_info *info = fbh->info; struct drm_mode_fb_cmd2 mode; struct armada_framebuffer *dfb; struct armada_gem_object *obj; - struct fb_info *info; int size, ret; void *ptr; @@ -92,12 +91,6 @@ int armada_fbdev_driver_fbdev_probe(struct drm_fb_helper *fbh, if (IS_ERR(dfb)) return PTR_ERR(dfb); - info = drm_fb_helper_alloc_info(fbh); - if (IS_ERR(info)) { - ret = PTR_ERR(info); - goto err_fballoc; - } - info->fbops = &armada_fb_ops; info->fix.smem_start = obj->phys_addr; info->fix.smem_len = obj->obj.size; @@ -113,8 +106,4 @@ int armada_fbdev_driver_fbdev_probe(struct drm_fb_helper *fbh, (unsigned long long)obj->phys_addr); return 0; - - err_fballoc: - dfb->fb.funcs->destroy(&dfb->fb); - return ret; } diff --git a/drivers/gpu/drm/armada/armada_gem.c b/drivers/gpu/drm/armada/armada_gem.c index 1a1680d71486..35fcfa0d85ff 100644 --- a/drivers/gpu/drm/armada/armada_gem.c +++ b/drivers/gpu/drm/armada/armada_gem.c @@ -10,6 +10,7 @@ #include #include +#include #include "armada_drm.h" #include "armada_gem.h" diff --git a/drivers/gpu/drm/armada/armada_overlay.c b/drivers/gpu/drm/armada/armada_overlay.c index 3b9bd8ecda13..21fd3b4ba10f 100644 --- a/drivers/gpu/drm/armada/armada_overlay.c +++ b/drivers/gpu/drm/armada/armada_overlay.c @@ -12,6 +12,7 @@ #include #include #include +#include #include "armada_crtc.h" #include "armada_drm.h" diff --git a/drivers/gpu/drm/armada/armada_plane.c b/drivers/gpu/drm/armada/armada_plane.c index cc47c032dbc1..a0326b4f568e 100644 --- a/drivers/gpu/drm/armada/armada_plane.c +++ b/drivers/gpu/drm/armada/armada_plane.c @@ -8,6 +8,7 @@ #include #include #include +#include #include "armada_crtc.h" #include "armada_drm.h" @@ -94,12 +95,7 @@ int armada_drm_plane_atomic_check(struct drm_plane *plane, return 0; } - if (state) - crtc_state = drm_atomic_get_existing_crtc_state(state, - crtc); - else - crtc_state = crtc->state; - + crtc_state = drm_atomic_get_new_crtc_state(state, crtc); ret = drm_atomic_helper_check_plane_state(new_plane_state, crtc_state, 0, INT_MAX, true, false); diff --git a/drivers/gpu/drm/ast/Makefile b/drivers/gpu/drm/ast/Makefile index 2547613155da..cdbcba3b43ad 100644 --- a/drivers/gpu/drm/ast/Makefile +++ b/drivers/gpu/drm/ast/Makefile @@ -6,7 +6,9 @@ ast-y := \ ast_2000.o \ ast_2100.o \ + ast_2200.o \ ast_2300.o \ + ast_2400.o \ ast_2500.o \ ast_2600.o \ ast_cursor.o \ @@ -14,7 +16,6 @@ ast-y := \ ast_dp501.o \ ast_dp.o \ ast_drv.o \ - ast_main.o \ ast_mm.o \ ast_mode.o \ ast_post.o \ diff --git a/drivers/gpu/drm/ast/ast_2000.c b/drivers/gpu/drm/ast/ast_2000.c index 41c2aa1e425a..fa3bc23ce098 100644 --- a/drivers/gpu/drm/ast/ast_2000.c +++ b/drivers/gpu/drm/ast/ast_2000.c @@ -27,6 +27,9 @@ */ #include +#include + +#include #include "ast_drv.h" #include "ast_post.h" @@ -147,3 +150,108 @@ int ast_2000_post(struct ast_device *ast) return 0; } + +/* + * Mode setting + */ + +const struct ast_vbios_dclk_info ast_2000_dclk_table[] = { + {0x2c, 0xe7, 0x03}, /* 00: VCLK25_175 */ + {0x95, 0x62, 0x03}, /* 01: VCLK28_322 */ + {0x67, 0x63, 0x01}, /* 02: VCLK31_5 */ + {0x76, 0x63, 0x01}, /* 03: VCLK36 */ + {0xee, 0x67, 0x01}, /* 04: VCLK40 */ + {0x82, 0x62, 0x01}, /* 05: VCLK49_5 */ + {0xc6, 0x64, 0x01}, /* 06: VCLK50 */ + {0x94, 0x62, 0x01}, /* 07: VCLK56_25 */ + {0x80, 0x64, 0x00}, /* 08: VCLK65 */ + {0x7b, 0x63, 0x00}, /* 09: VCLK75 */ + {0x67, 0x62, 0x00}, /* 0a: VCLK78_75 */ + {0x7c, 0x62, 0x00}, /* 0b: VCLK94_5 */ + {0x8e, 0x62, 0x00}, /* 0c: VCLK108 */ + {0x85, 0x24, 0x00}, /* 0d: VCLK135 */ + {0x67, 0x22, 0x00}, /* 0e: VCLK157_5 */ + {0x6a, 0x22, 0x00}, /* 0f: VCLK162 */ + {0x4d, 0x4c, 0x80}, /* 10: VCLK154 */ + {0x68, 0x6f, 0x80}, /* 11: VCLK83.5 */ + {0x28, 0x49, 0x80}, /* 12: VCLK106.5 */ + {0x37, 0x49, 0x80}, /* 13: VCLK146.25 */ + {0x1f, 0x45, 0x80}, /* 14: VCLK148.5 */ + {0x47, 0x6c, 0x80}, /* 15: VCLK71 */ + {0x25, 0x65, 0x80}, /* 16: VCLK88.75 */ + {0x77, 0x58, 0x80}, /* 17: VCLK119 */ + {0x32, 0x67, 0x80}, /* 18: VCLK85_5 */ + {0x6a, 0x6d, 0x80}, /* 19: VCLK97_75 */ + {0x3b, 0x2c, 0x81}, /* 1a: VCLK118_25 */ +}; + +/* + * Device initialization + */ + +void ast_2000_detect_tx_chip(struct ast_device *ast, bool need_post) +{ + enum ast_tx_chip tx_chip = AST_TX_NONE; + u8 vgacra3; + + /* + * VGACRA3 Enhanced Color Mode Register, check if DVO is already + * enabled, in that case, assume we have a SIL164 TMDS transmitter + * + * Don't make that assumption if we the chip wasn't enabled and + * is at power-on reset, otherwise we'll incorrectly "detect" a + * SIL164 when there is none. + */ + if (!need_post) { + vgacra3 = ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xa3, 0xff); + if (vgacra3 & AST_IO_VGACRA3_DVO_ENABLED) + tx_chip = AST_TX_SIL164; + } + + __ast_device_set_tx_chip(ast, tx_chip); +} + +static const struct ast_device_quirks ast_2000_device_quirks = { + .crtc_mem_req_threshold_low = 31, + .crtc_mem_req_threshold_high = 47, +}; + +struct drm_device *ast_2000_device_create(struct pci_dev *pdev, + const struct drm_driver *drv, + enum ast_chip chip, + enum ast_config_mode config_mode, + void __iomem *regs, + void __iomem *ioregs, + bool need_post) +{ + struct drm_device *dev; + struct ast_device *ast; + int ret; + + ast = devm_drm_dev_alloc(&pdev->dev, drv, struct ast_device, base); + if (IS_ERR(ast)) + return ERR_CAST(ast); + dev = &ast->base; + + ast_device_init(ast, chip, config_mode, regs, ioregs, &ast_2000_device_quirks); + + ast->dclk_table = ast_2000_dclk_table; + + ast_2000_detect_tx_chip(ast, need_post); + + if (need_post) { + ret = ast_post_gpu(ast); + if (ret) + return ERR_PTR(ret); + } + + ret = ast_mm_init(ast); + if (ret) + return ERR_PTR(ret); + + ret = ast_mode_config_init(ast); + if (ret) + return ERR_PTR(ret); + + return dev; +} diff --git a/drivers/gpu/drm/ast/ast_2100.c b/drivers/gpu/drm/ast/ast_2100.c index 829e3b8b0d19..05aeb0624d41 100644 --- a/drivers/gpu/drm/ast/ast_2100.c +++ b/drivers/gpu/drm/ast/ast_2100.c @@ -27,6 +27,9 @@ */ #include +#include + +#include #include "ast_drv.h" #include "ast_post.h" @@ -386,3 +389,92 @@ int ast_2100_post(struct ast_device *ast) return 0; } + +/* + * Widescreen detection + */ + +/* Try to detect WSXGA+ on Gen2+ */ +bool __ast_2100_detect_wsxga_p(struct ast_device *ast) +{ + u8 vgacrd0 = ast_get_index_reg(ast, AST_IO_VGACRI, 0xd0); + + if (!(vgacrd0 & AST_IO_VGACRD0_VRAM_INIT_BY_BMC)) + return true; + if (vgacrd0 & AST_IO_VGACRD0_IKVM_WIDESCREEN) + return true; + + return false; +} + +/* Try to detect WUXGA on Gen2+ */ +bool __ast_2100_detect_wuxga(struct ast_device *ast) +{ + u8 vgacrd1; + + if (ast->support_fullhd) { + vgacrd1 = ast_get_index_reg(ast, AST_IO_VGACRI, 0xd1); + if (!(vgacrd1 & AST_IO_VGACRD1_SUPPORTS_WUXGA)) + return true; + } + + return false; +} + +static void ast_2100_detect_widescreen(struct ast_device *ast) +{ + if (__ast_2100_detect_wsxga_p(ast)) { + ast->support_wsxga_p = true; + if (ast->chip == AST2100) + ast->support_fullhd = true; + } + if (__ast_2100_detect_wuxga(ast)) + ast->support_wuxga = true; +} + +static const struct ast_device_quirks ast_2100_device_quirks = { + .crtc_mem_req_threshold_low = 47, + .crtc_mem_req_threshold_high = 63, +}; + +struct drm_device *ast_2100_device_create(struct pci_dev *pdev, + const struct drm_driver *drv, + enum ast_chip chip, + enum ast_config_mode config_mode, + void __iomem *regs, + void __iomem *ioregs, + bool need_post) +{ + struct drm_device *dev; + struct ast_device *ast; + int ret; + + ast = devm_drm_dev_alloc(&pdev->dev, drv, struct ast_device, base); + if (IS_ERR(ast)) + return ERR_CAST(ast); + dev = &ast->base; + + ast_device_init(ast, chip, config_mode, regs, ioregs, &ast_2100_device_quirks); + + ast->dclk_table = ast_2000_dclk_table; + + ast_2000_detect_tx_chip(ast, need_post); + + if (need_post) { + ret = ast_post_gpu(ast); + if (ret) + return ERR_PTR(ret); + } + + ret = ast_mm_init(ast); + if (ret) + return ERR_PTR(ret); + + ast_2100_detect_widescreen(ast); + + ret = ast_mode_config_init(ast); + if (ret) + return ERR_PTR(ret); + + return dev; +} diff --git a/drivers/gpu/drm/ast/ast_2200.c b/drivers/gpu/drm/ast/ast_2200.c new file mode 100644 index 000000000000..b64345d11ffa --- /dev/null +++ b/drivers/gpu/drm/ast/ast_2200.c @@ -0,0 +1,92 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright 2012 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sub license, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE + * USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + */ +/* + * Authors: Dave Airlie + */ + +#include + +#include + +#include "ast_drv.h" + +static void ast_2200_detect_widescreen(struct ast_device *ast) +{ + if (__ast_2100_detect_wsxga_p(ast)) { + ast->support_wsxga_p = true; + if (ast->chip == AST2200) + ast->support_fullhd = true; + } + if (__ast_2100_detect_wuxga(ast)) + ast->support_wuxga = true; +} + +static const struct ast_device_quirks ast_2200_device_quirks = { + .crtc_mem_req_threshold_low = 47, + .crtc_mem_req_threshold_high = 63, +}; + +struct drm_device *ast_2200_device_create(struct pci_dev *pdev, + const struct drm_driver *drv, + enum ast_chip chip, + enum ast_config_mode config_mode, + void __iomem *regs, + void __iomem *ioregs, + bool need_post) +{ + struct drm_device *dev; + struct ast_device *ast; + int ret; + + ast = devm_drm_dev_alloc(&pdev->dev, drv, struct ast_device, base); + if (IS_ERR(ast)) + return ERR_CAST(ast); + dev = &ast->base; + + ast_device_init(ast, chip, config_mode, regs, ioregs, &ast_2200_device_quirks); + + ast->dclk_table = ast_2000_dclk_table; + + ast_2000_detect_tx_chip(ast, need_post); + + if (need_post) { + ret = ast_post_gpu(ast); + if (ret) + return ERR_PTR(ret); + } + + ret = ast_mm_init(ast); + if (ret) + return ERR_PTR(ret); + + ast_2200_detect_widescreen(ast); + + ret = ast_mode_config_init(ast); + if (ret) + return ERR_PTR(ret); + + return dev; +} + diff --git a/drivers/gpu/drm/ast/ast_2300.c b/drivers/gpu/drm/ast/ast_2300.c index dc2a32244689..5f50d9f91ffd 100644 --- a/drivers/gpu/drm/ast/ast_2300.c +++ b/drivers/gpu/drm/ast/ast_2300.c @@ -27,6 +27,12 @@ */ #include +#include +#include + +#include +#include +#include #include "ast_drv.h" #include "ast_post.h" @@ -1326,3 +1332,132 @@ int ast_2300_post(struct ast_device *ast) return 0; } + +/* + * Device initialization + */ + +void ast_2300_detect_tx_chip(struct ast_device *ast) +{ + enum ast_tx_chip tx_chip = AST_TX_NONE; + struct drm_device *dev = &ast->base; + u8 vgacrd1; + + /* + * On AST GEN4+, look at the configuration set by the SoC in + * the SOC scratch register #1 bits 11:8 (interestingly marked + * as "reserved" in the spec) + */ + vgacrd1 = ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xd1, + AST_IO_VGACRD1_TX_TYPE_MASK); + switch (vgacrd1) { + /* + * GEN4 to GEN6 + */ + case AST_IO_VGACRD1_TX_SIL164_VBIOS: + tx_chip = AST_TX_SIL164; + break; + case AST_IO_VGACRD1_TX_DP501_VBIOS: + ast->dp501_fw_addr = drmm_kzalloc(dev, SZ_32K, GFP_KERNEL); + if (ast->dp501_fw_addr) { + /* backup firmware */ + if (ast_backup_fw(ast, ast->dp501_fw_addr, SZ_32K)) { + drmm_kfree(dev, ast->dp501_fw_addr); + ast->dp501_fw_addr = NULL; + } + } + fallthrough; + case AST_IO_VGACRD1_TX_FW_EMBEDDED_FW: + tx_chip = AST_TX_DP501; + break; + /* + * GEN7+ + */ + case AST_IO_VGACRD1_TX_ASTDP: + tx_chip = AST_TX_ASTDP; + break; + /* + * Several of the listed TX chips are not explicitly supported + * by the ast driver. If these exist in real-world devices, they + * are most likely reported as VGA or SIL164 outputs. We warn here + * to get bug reports for these devices. If none come in for some + * time, we can begin to fail device probing on these values. + */ + case AST_IO_VGACRD1_TX_ITE66121_VBIOS: + drm_warn(dev, "ITE IT66121 detected, 0x%x, Gen%lu\n", vgacrd1, AST_GEN(ast)); + break; + case AST_IO_VGACRD1_TX_CH7003_VBIOS: + drm_warn(dev, "Chrontel CH7003 detected, 0x%x, Gen%lu\n", vgacrd1, AST_GEN(ast)); + break; + case AST_IO_VGACRD1_TX_ANX9807_VBIOS: + drm_warn(dev, "Analogix ANX9807 detected, 0x%x, Gen%lu\n", vgacrd1, AST_GEN(ast)); + break; + } + + __ast_device_set_tx_chip(ast, tx_chip); +} + +static void ast_2300_detect_widescreen(struct ast_device *ast) +{ + if (__ast_2100_detect_wsxga_p(ast) || ast->chip == AST1300) { + ast->support_wsxga_p = true; + ast->support_fullhd = true; + } + if (__ast_2100_detect_wuxga(ast)) + ast->support_wuxga = true; +} + +static const struct ast_device_quirks ast_2300_device_quirks = { + .crtc_mem_req_threshold_low = 96, + .crtc_mem_req_threshold_high = 120, +}; + +struct drm_device *ast_2300_device_create(struct pci_dev *pdev, + const struct drm_driver *drv, + enum ast_chip chip, + enum ast_config_mode config_mode, + void __iomem *regs, + void __iomem *ioregs, + bool need_post) +{ + struct drm_device *dev; + struct ast_device *ast; + int ret; + + ast = devm_drm_dev_alloc(&pdev->dev, drv, struct ast_device, base); + if (IS_ERR(ast)) + return ERR_CAST(ast); + dev = &ast->base; + + ast_device_init(ast, chip, config_mode, regs, ioregs, &ast_2300_device_quirks); + + ast->dclk_table = ast_2000_dclk_table; + + ast_2300_detect_tx_chip(ast); + + if (need_post) { + ret = ast_post_gpu(ast); + if (ret) + return ERR_PTR(ret); + } + + ret = ast_mm_init(ast); + if (ret) + return ERR_PTR(ret); + + /* map reserved buffer */ + ast->dp501_fw_buf = NULL; + if (ast->vram_size < pci_resource_len(pdev, 0)) { + ast->dp501_fw_buf = pci_iomap_range(pdev, 0, ast->vram_size, 0); + if (!ast->dp501_fw_buf) + drm_info(dev, "failed to map reserved buffer!\n"); + } + + ast_2300_detect_widescreen(ast); + + ret = ast_mode_config_init(ast); + if (ret) + return ERR_PTR(ret); + + return dev; +} diff --git a/drivers/gpu/drm/ast/ast_2400.c b/drivers/gpu/drm/ast/ast_2400.c new file mode 100644 index 000000000000..2e6befd24f91 --- /dev/null +++ b/drivers/gpu/drm/ast/ast_2400.c @@ -0,0 +1,100 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright 2012 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sub license, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE + * USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + */ +/* + * Authors: Dave Airlie + */ + +#include + +#include +#include + +#include "ast_drv.h" + +static void ast_2400_detect_widescreen(struct ast_device *ast) +{ + if (__ast_2100_detect_wsxga_p(ast) || ast->chip == AST1400) { + ast->support_wsxga_p = true; + ast->support_fullhd = true; + } + if (__ast_2100_detect_wuxga(ast)) + ast->support_wuxga = true; +} + +static const struct ast_device_quirks ast_2400_device_quirks = { + .crtc_mem_req_threshold_low = 96, + .crtc_mem_req_threshold_high = 120, +}; + +struct drm_device *ast_2400_device_create(struct pci_dev *pdev, + const struct drm_driver *drv, + enum ast_chip chip, + enum ast_config_mode config_mode, + void __iomem *regs, + void __iomem *ioregs, + bool need_post) +{ + struct drm_device *dev; + struct ast_device *ast; + int ret; + + ast = devm_drm_dev_alloc(&pdev->dev, drv, struct ast_device, base); + if (IS_ERR(ast)) + return ERR_CAST(ast); + dev = &ast->base; + + ast_device_init(ast, chip, config_mode, regs, ioregs, &ast_2400_device_quirks); + + ast->dclk_table = ast_2000_dclk_table; + + ast_2300_detect_tx_chip(ast); + + if (need_post) { + ret = ast_post_gpu(ast); + if (ret) + return ERR_PTR(ret); + } + + ret = ast_mm_init(ast); + if (ret) + return ERR_PTR(ret); + + /* map reserved buffer */ + ast->dp501_fw_buf = NULL; + if (ast->vram_size < pci_resource_len(pdev, 0)) { + ast->dp501_fw_buf = pci_iomap_range(pdev, 0, ast->vram_size, 0); + if (!ast->dp501_fw_buf) + drm_info(dev, "failed to map reserved buffer!\n"); + } + + ast_2400_detect_widescreen(ast); + + ret = ast_mode_config_init(ast); + if (ret) + return ERR_PTR(ret); + + return dev; +} diff --git a/drivers/gpu/drm/ast/ast_2500.c b/drivers/gpu/drm/ast/ast_2500.c index 1e541498ea67..2a52af0ded56 100644 --- a/drivers/gpu/drm/ast/ast_2500.c +++ b/drivers/gpu/drm/ast/ast_2500.c @@ -27,7 +27,9 @@ */ #include +#include +#include #include #include "ast_drv.h" @@ -567,3 +569,107 @@ int ast_2500_post(struct ast_device *ast) return 0; } + +/* + * Mode setting + */ + +const struct ast_vbios_dclk_info ast_2500_dclk_table[] = { + {0x2c, 0xe7, 0x03}, /* 00: VCLK25_175 */ + {0x95, 0x62, 0x03}, /* 01: VCLK28_322 */ + {0x67, 0x63, 0x01}, /* 02: VCLK31_5 */ + {0x76, 0x63, 0x01}, /* 03: VCLK36 */ + {0xee, 0x67, 0x01}, /* 04: VCLK40 */ + {0x82, 0x62, 0x01}, /* 05: VCLK49_5 */ + {0xc6, 0x64, 0x01}, /* 06: VCLK50 */ + {0x94, 0x62, 0x01}, /* 07: VCLK56_25 */ + {0x80, 0x64, 0x00}, /* 08: VCLK65 */ + {0x7b, 0x63, 0x00}, /* 09: VCLK75 */ + {0x67, 0x62, 0x00}, /* 0a: VCLK78_75 */ + {0x7c, 0x62, 0x00}, /* 0b: VCLK94_5 */ + {0x8e, 0x62, 0x00}, /* 0c: VCLK108 */ + {0x85, 0x24, 0x00}, /* 0d: VCLK135 */ + {0x67, 0x22, 0x00}, /* 0e: VCLK157_5 */ + {0x6a, 0x22, 0x00}, /* 0f: VCLK162 */ + {0x4d, 0x4c, 0x80}, /* 10: VCLK154 */ + {0x68, 0x6f, 0x80}, /* 11: VCLK83.5 */ + {0x28, 0x49, 0x80}, /* 12: VCLK106.5 */ + {0x37, 0x49, 0x80}, /* 13: VCLK146.25 */ + {0x1f, 0x45, 0x80}, /* 14: VCLK148.5 */ + {0x47, 0x6c, 0x80}, /* 15: VCLK71 */ + {0x25, 0x65, 0x80}, /* 16: VCLK88.75 */ + {0x58, 0x01, 0x42}, /* 17: VCLK119 */ + {0x32, 0x67, 0x80}, /* 18: VCLK85_5 */ + {0x6a, 0x6d, 0x80}, /* 19: VCLK97_75 */ + {0x44, 0x20, 0x43}, /* 1a: VCLK118_25 */ +}; + +/* + * Device initialization + */ + +static void ast_2500_detect_widescreen(struct ast_device *ast) +{ + if (__ast_2100_detect_wsxga_p(ast) || ast->chip == AST2510) { + ast->support_wsxga_p = true; + ast->support_fullhd = true; + } + if (__ast_2100_detect_wuxga(ast)) + ast->support_wuxga = true; +} + +static const struct ast_device_quirks ast_2500_device_quirks = { + .crtc_mem_req_threshold_low = 96, + .crtc_mem_req_threshold_high = 120, + .crtc_hsync_precatch_needed = true, +}; + +struct drm_device *ast_2500_device_create(struct pci_dev *pdev, + const struct drm_driver *drv, + enum ast_chip chip, + enum ast_config_mode config_mode, + void __iomem *regs, + void __iomem *ioregs, + bool need_post) +{ + struct drm_device *dev; + struct ast_device *ast; + int ret; + + ast = devm_drm_dev_alloc(&pdev->dev, drv, struct ast_device, base); + if (IS_ERR(ast)) + return ERR_CAST(ast); + dev = &ast->base; + + ast_device_init(ast, chip, config_mode, regs, ioregs, &ast_2500_device_quirks); + + ast->dclk_table = ast_2500_dclk_table; + + ast_2300_detect_tx_chip(ast); + + if (need_post) { + ret = ast_post_gpu(ast); + if (ret) + return ERR_PTR(ret); + } + + ret = ast_mm_init(ast); + if (ret) + return ERR_PTR(ret); + + /* map reserved buffer */ + ast->dp501_fw_buf = NULL; + if (ast->vram_size < pci_resource_len(pdev, 0)) { + ast->dp501_fw_buf = pci_iomap_range(pdev, 0, ast->vram_size, 0); + if (!ast->dp501_fw_buf) + drm_info(dev, "failed to map reserved buffer!\n"); + } + + ast_2500_detect_widescreen(ast); + + ret = ast_mode_config_init(ast); + if (ret) + return ERR_PTR(ret); + + return dev; +} diff --git a/drivers/gpu/drm/ast/ast_2600.c b/drivers/gpu/drm/ast/ast_2600.c index 8d75a47444f5..dee78fd5b022 100644 --- a/drivers/gpu/drm/ast/ast_2600.c +++ b/drivers/gpu/drm/ast/ast_2600.c @@ -26,6 +26,10 @@ * Authors: Dave Airlie */ +#include + +#include + #include "ast_drv.h" #include "ast_post.h" @@ -42,3 +46,71 @@ int ast_2600_post(struct ast_device *ast) return 0; } + +/* + * Device initialization + */ + +static void ast_2600_detect_widescreen(struct ast_device *ast) +{ + ast->support_wsxga_p = true; + ast->support_fullhd = true; + if (__ast_2100_detect_wuxga(ast)) + ast->support_wuxga = true; +} + +static const struct ast_device_quirks ast_2600_device_quirks = { + .crtc_mem_req_threshold_low = 160, + .crtc_mem_req_threshold_high = 224, + .crtc_hsync_precatch_needed = true, + .crtc_hsync_add4_needed = true, +}; + +struct drm_device *ast_2600_device_create(struct pci_dev *pdev, + const struct drm_driver *drv, + enum ast_chip chip, + enum ast_config_mode config_mode, + void __iomem *regs, + void __iomem *ioregs, + bool need_post) +{ + struct drm_device *dev; + struct ast_device *ast; + int ret; + + ast = devm_drm_dev_alloc(&pdev->dev, drv, struct ast_device, base); + if (IS_ERR(ast)) + return ERR_CAST(ast); + dev = &ast->base; + + ast_device_init(ast, chip, config_mode, regs, ioregs, &ast_2600_device_quirks); + + ast->dclk_table = ast_2500_dclk_table; + + ast_2300_detect_tx_chip(ast); + + switch (ast->tx_chip) { + case AST_TX_ASTDP: + ret = ast_post_gpu(ast); + break; + default: + ret = 0; + if (need_post) + ret = ast_post_gpu(ast); + break; + } + if (ret) + return ERR_PTR(ret); + + ret = ast_mm_init(ast); + if (ret) + return ERR_PTR(ret); + + ast_2600_detect_widescreen(ast); + + ret = ast_mode_config_init(ast); + if (ret) + return ERR_PTR(ret); + + return dev; +} diff --git a/drivers/gpu/drm/ast/ast_drv.c b/drivers/gpu/drm/ast/ast_drv.c index 473faa92d08c..b9a9b050b546 100644 --- a/drivers/gpu/drm/ast/ast_drv.c +++ b/drivers/gpu/drm/ast/ast_drv.c @@ -37,6 +37,7 @@ #include #include #include +#include #include #include "ast_drv.h" @@ -46,6 +47,34 @@ static int ast_modeset = -1; MODULE_PARM_DESC(modeset, "Disable/Enable modesetting"); module_param_named(modeset, ast_modeset, int, 0400); +void ast_device_init(struct ast_device *ast, + enum ast_chip chip, + enum ast_config_mode config_mode, + void __iomem *regs, + void __iomem *ioregs, + const struct ast_device_quirks *quirks) +{ + ast->quirks = quirks; + ast->chip = chip; + ast->config_mode = config_mode; + ast->regs = regs; + ast->ioregs = ioregs; +} + +void __ast_device_set_tx_chip(struct ast_device *ast, enum ast_tx_chip tx_chip) +{ + static const char * const info_str[] = { + "analog VGA", + "Sil164 TMDS transmitter", + "DP501 DisplayPort transmitter", + "ASPEED DisplayPort transmitter", + }; + + drm_info(&ast->base, "Using %s\n", info_str[tx_chip]); + + ast->tx_chip = tx_chip; +} + /* * DRM driver */ @@ -266,7 +295,7 @@ static int ast_detect_chip(struct pci_dev *pdev, *chip_out = chip; *config_mode_out = config_mode; - return 0; + return __AST_CHIP_GEN(chip); } static int ast_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) @@ -277,6 +306,7 @@ static int ast_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) void __iomem *ioregs; enum ast_config_mode config_mode; enum ast_chip chip; + unsigned int chip_gen; struct drm_device *drm; bool need_post = false; @@ -349,10 +379,43 @@ static int ast_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) return ret; ret = ast_detect_chip(pdev, regs, ioregs, &chip, &config_mode); - if (ret) + if (ret < 0) return ret; + chip_gen = ret; - drm = ast_device_create(pdev, &ast_driver, chip, config_mode, regs, ioregs, need_post); + switch (chip_gen) { + case 1: + drm = ast_2000_device_create(pdev, &ast_driver, chip, config_mode, + regs, ioregs, need_post); + break; + case 2: + drm = ast_2100_device_create(pdev, &ast_driver, chip, config_mode, + regs, ioregs, need_post); + break; + case 3: + drm = ast_2200_device_create(pdev, &ast_driver, chip, config_mode, + regs, ioregs, need_post); + break; + case 4: + drm = ast_2300_device_create(pdev, &ast_driver, chip, config_mode, + regs, ioregs, need_post); + break; + case 5: + drm = ast_2400_device_create(pdev, &ast_driver, chip, config_mode, + regs, ioregs, need_post); + break; + case 6: + drm = ast_2500_device_create(pdev, &ast_driver, chip, config_mode, + regs, ioregs, need_post); + break; + case 7: + drm = ast_2600_device_create(pdev, &ast_driver, chip, config_mode, + regs, ioregs, need_post); + break; + default: + dev_err(&pdev->dev, "Gen%d not supported\n", chip_gen); + return -ENODEV; + } if (IS_ERR(drm)) return PTR_ERR(drm); pci_set_drvdata(pdev, drm); diff --git a/drivers/gpu/drm/ast/ast_drv.h b/drivers/gpu/drm/ast/ast_drv.h index d41bd876167c..787e38c6c17d 100644 --- a/drivers/gpu/drm/ast/ast_drv.h +++ b/drivers/gpu/drm/ast/ast_drv.h @@ -164,9 +164,31 @@ to_ast_connector(struct drm_connector *connector) * Device */ +struct ast_device_quirks { + /* + * CRTC memory request threshold + */ + unsigned char crtc_mem_req_threshold_low; + unsigned char crtc_mem_req_threshold_high; + + /* + * Adjust hsync values to load next scanline early. Signalled + * by AST2500PreCatchCRT in VBIOS mode flags. + */ + bool crtc_hsync_precatch_needed; + + /* + * Workaround for modes with HSync Time that is not a multiple + * of 8 (e.g., 1920x1080@60Hz, HSync +44 pixels). + */ + bool crtc_hsync_add4_needed; +}; + struct ast_device { struct drm_device base; + const struct ast_device_quirks *quirks; + void __iomem *regs; void __iomem *ioregs; void __iomem *dp501_fw_buf; @@ -174,6 +196,8 @@ struct ast_device { enum ast_config_mode config_mode; enum ast_chip chip; + const struct ast_vbios_dclk_info *dclk_table; + void __iomem *vram; unsigned long vram_base; unsigned long vram_size; @@ -217,14 +241,6 @@ static inline struct ast_device *to_ast_device(struct drm_device *dev) return container_of(dev, struct ast_device, base); } -struct drm_device *ast_device_create(struct pci_dev *pdev, - const struct drm_driver *drv, - enum ast_chip chip, - enum ast_config_mode config_mode, - void __iomem *regs, - void __iomem *ioregs, - bool need_post); - static inline unsigned long __ast_gen(struct ast_device *ast) { return __AST_CHIP_GEN(ast->chip); @@ -415,21 +431,89 @@ struct ast_crtc_state { int ast_mm_init(struct ast_device *ast); +/* ast_drv.c */ +void ast_device_init(struct ast_device *ast, + enum ast_chip chip, + enum ast_config_mode config_mode, + void __iomem *regs, + void __iomem *ioregs, + const struct ast_device_quirks *quirks); +void __ast_device_set_tx_chip(struct ast_device *ast, enum ast_tx_chip tx_chip); + /* ast_2000.c */ int ast_2000_post(struct ast_device *ast); +extern const struct ast_vbios_dclk_info ast_2000_dclk_table[]; +void ast_2000_detect_tx_chip(struct ast_device *ast, bool need_post); +struct drm_device *ast_2000_device_create(struct pci_dev *pdev, + const struct drm_driver *drv, + enum ast_chip chip, + enum ast_config_mode config_mode, + void __iomem *regs, + void __iomem *ioregs, + bool need_post); /* ast_2100.c */ int ast_2100_post(struct ast_device *ast); +bool __ast_2100_detect_wsxga_p(struct ast_device *ast); +bool __ast_2100_detect_wuxga(struct ast_device *ast); +struct drm_device *ast_2100_device_create(struct pci_dev *pdev, + const struct drm_driver *drv, + enum ast_chip chip, + enum ast_config_mode config_mode, + void __iomem *regs, + void __iomem *ioregs, + bool need_post); + +/* ast_2200.c */ +struct drm_device *ast_2200_device_create(struct pci_dev *pdev, + const struct drm_driver *drv, + enum ast_chip chip, + enum ast_config_mode config_mode, + void __iomem *regs, + void __iomem *ioregs, + bool need_post); /* ast_2300.c */ int ast_2300_post(struct ast_device *ast); +void ast_2300_detect_tx_chip(struct ast_device *ast); +struct drm_device *ast_2300_device_create(struct pci_dev *pdev, + const struct drm_driver *drv, + enum ast_chip chip, + enum ast_config_mode config_mode, + void __iomem *regs, + void __iomem *ioregs, + bool need_post); + +/* ast_2400.c */ +struct drm_device *ast_2400_device_create(struct pci_dev *pdev, + const struct drm_driver *drv, + enum ast_chip chip, + enum ast_config_mode config_mode, + void __iomem *regs, + void __iomem *ioregs, + bool need_post); /* ast_2500.c */ void ast_2500_patch_ahb(void __iomem *regs); int ast_2500_post(struct ast_device *ast); +extern const struct ast_vbios_dclk_info ast_2500_dclk_table[]; +struct drm_device *ast_2500_device_create(struct pci_dev *pdev, + const struct drm_driver *drv, + enum ast_chip chip, + enum ast_config_mode config_mode, + void __iomem *regs, + void __iomem *ioregs, + bool need_post); /* ast_2600.c */ int ast_2600_post(struct ast_device *ast); +struct drm_device *ast_2600_device_create(struct pci_dev *pdev, + const struct drm_driver *drv, + enum ast_chip chip, + enum ast_config_mode config_mode, + void __iomem *regs, + void __iomem *ioregs, + bool need_post); /* ast post */ int ast_post_gpu(struct ast_device *ast); diff --git a/drivers/gpu/drm/ast/ast_main.c b/drivers/gpu/drm/ast/ast_main.c deleted file mode 100644 index 3eea6a6cdacd..000000000000 --- a/drivers/gpu/drm/ast/ast_main.c +++ /dev/null @@ -1,268 +0,0 @@ -/* - * Copyright 2012 Red Hat Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the - * "Software"), to deal in the Software without restriction, including - * without limitation the rights to use, copy, modify, merge, publish, - * distribute, sub license, and/or sell copies of the Software, and to - * permit persons to whom the Software is furnished to do so, subject to - * the following conditions: - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, - * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR - * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE - * USE OR OTHER DEALINGS IN THE SOFTWARE. - * - * The above copyright notice and this permission notice (including the - * next paragraph) shall be included in all copies or substantial portions - * of the Software. - * - */ -/* - * Authors: Dave Airlie - */ - -#include -#include - -#include -#include -#include -#include - -#include "ast_drv.h" - -/* Try to detect WSXGA+ on Gen2+ */ -static bool __ast_2100_detect_wsxga_p(struct ast_device *ast) -{ - u8 vgacrd0 = ast_get_index_reg(ast, AST_IO_VGACRI, 0xd0); - - if (!(vgacrd0 & AST_IO_VGACRD0_VRAM_INIT_BY_BMC)) - return true; - if (vgacrd0 & AST_IO_VGACRD0_IKVM_WIDESCREEN) - return true; - - return false; -} - -/* Try to detect WUXGA on Gen2+ */ -static bool __ast_2100_detect_wuxga(struct ast_device *ast) -{ - u8 vgacrd1; - - if (ast->support_fullhd) { - vgacrd1 = ast_get_index_reg(ast, AST_IO_VGACRI, 0xd1); - if (!(vgacrd1 & AST_IO_VGACRD1_SUPPORTS_WUXGA)) - return true; - } - - return false; -} - -static void ast_detect_widescreen(struct ast_device *ast) -{ - ast->support_wsxga_p = false; - ast->support_fullhd = false; - ast->support_wuxga = false; - - if (AST_GEN(ast) >= 7) { - ast->support_wsxga_p = true; - ast->support_fullhd = true; - if (__ast_2100_detect_wuxga(ast)) - ast->support_wuxga = true; - } else if (AST_GEN(ast) >= 6) { - if (__ast_2100_detect_wsxga_p(ast)) - ast->support_wsxga_p = true; - else if (ast->chip == AST2510) - ast->support_wsxga_p = true; - if (ast->support_wsxga_p) - ast->support_fullhd = true; - if (__ast_2100_detect_wuxga(ast)) - ast->support_wuxga = true; - } else if (AST_GEN(ast) >= 5) { - if (__ast_2100_detect_wsxga_p(ast)) - ast->support_wsxga_p = true; - else if (ast->chip == AST1400) - ast->support_wsxga_p = true; - if (ast->support_wsxga_p) - ast->support_fullhd = true; - if (__ast_2100_detect_wuxga(ast)) - ast->support_wuxga = true; - } else if (AST_GEN(ast) >= 4) { - if (__ast_2100_detect_wsxga_p(ast)) - ast->support_wsxga_p = true; - else if (ast->chip == AST1300) - ast->support_wsxga_p = true; - if (ast->support_wsxga_p) - ast->support_fullhd = true; - if (__ast_2100_detect_wuxga(ast)) - ast->support_wuxga = true; - } else if (AST_GEN(ast) >= 3) { - if (__ast_2100_detect_wsxga_p(ast)) - ast->support_wsxga_p = true; - if (ast->support_wsxga_p) { - if (ast->chip == AST2200) - ast->support_fullhd = true; - } - if (__ast_2100_detect_wuxga(ast)) - ast->support_wuxga = true; - } else if (AST_GEN(ast) >= 2) { - if (__ast_2100_detect_wsxga_p(ast)) - ast->support_wsxga_p = true; - if (ast->support_wsxga_p) { - if (ast->chip == AST2100) - ast->support_fullhd = true; - } - if (__ast_2100_detect_wuxga(ast)) - ast->support_wuxga = true; - } -} - -static void ast_detect_tx_chip(struct ast_device *ast, bool need_post) -{ - static const char * const info_str[] = { - "analog VGA", - "Sil164 TMDS transmitter", - "DP501 DisplayPort transmitter", - "ASPEED DisplayPort transmitter", - }; - - struct drm_device *dev = &ast->base; - u8 vgacra3, vgacrd1; - - /* Check 3rd Tx option (digital output afaik) */ - ast->tx_chip = AST_TX_NONE; - - if (AST_GEN(ast) <= 3) { - /* - * VGACRA3 Enhanced Color Mode Register, check if DVO is already - * enabled, in that case, assume we have a SIL164 TMDS transmitter - * - * Don't make that assumption if we the chip wasn't enabled and - * is at power-on reset, otherwise we'll incorrectly "detect" a - * SIL164 when there is none. - */ - if (!need_post) { - vgacra3 = ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xa3, 0xff); - if (vgacra3 & AST_IO_VGACRA3_DVO_ENABLED) - ast->tx_chip = AST_TX_SIL164; - } - } else { - /* - * On AST GEN4+, look at the configuration set by the SoC in - * the SOC scratch register #1 bits 11:8 (interestingly marked - * as "reserved" in the spec) - */ - vgacrd1 = ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xd1, - AST_IO_VGACRD1_TX_TYPE_MASK); - switch (vgacrd1) { - /* - * GEN4 to GEN6 - */ - case AST_IO_VGACRD1_TX_SIL164_VBIOS: - ast->tx_chip = AST_TX_SIL164; - break; - case AST_IO_VGACRD1_TX_DP501_VBIOS: - ast->dp501_fw_addr = drmm_kzalloc(dev, 32*1024, GFP_KERNEL); - if (ast->dp501_fw_addr) { - /* backup firmware */ - if (ast_backup_fw(ast, ast->dp501_fw_addr, 32*1024)) { - drmm_kfree(dev, ast->dp501_fw_addr); - ast->dp501_fw_addr = NULL; - } - } - fallthrough; - case AST_IO_VGACRD1_TX_FW_EMBEDDED_FW: - ast->tx_chip = AST_TX_DP501; - break; - /* - * GEN7+ - */ - case AST_IO_VGACRD1_TX_ASTDP: - ast->tx_chip = AST_TX_ASTDP; - break; - /* - * Several of the listed TX chips are not explicitly supported - * by the ast driver. If these exist in real-world devices, they - * are most likely reported as VGA or SIL164 outputs. We warn here - * to get bug reports for these devices. If none come in for some - * time, we can begin to fail device probing on these values. - */ - case AST_IO_VGACRD1_TX_ITE66121_VBIOS: - drm_warn(dev, "ITE IT66121 detected, 0x%x, Gen%lu\n", - vgacrd1, AST_GEN(ast)); - break; - case AST_IO_VGACRD1_TX_CH7003_VBIOS: - drm_warn(dev, "Chrontel CH7003 detected, 0x%x, Gen%lu\n", - vgacrd1, AST_GEN(ast)); - break; - case AST_IO_VGACRD1_TX_ANX9807_VBIOS: - drm_warn(dev, "Analogix ANX9807 detected, 0x%x, Gen%lu\n", - vgacrd1, AST_GEN(ast)); - break; - } - } - - drm_info(dev, "Using %s\n", info_str[ast->tx_chip]); -} - -struct drm_device *ast_device_create(struct pci_dev *pdev, - const struct drm_driver *drv, - enum ast_chip chip, - enum ast_config_mode config_mode, - void __iomem *regs, - void __iomem *ioregs, - bool need_post) -{ - struct drm_device *dev; - struct ast_device *ast; - int ret; - - ast = devm_drm_dev_alloc(&pdev->dev, drv, struct ast_device, base); - if (IS_ERR(ast)) - return ERR_CAST(ast); - dev = &ast->base; - - ast->chip = chip; - ast->config_mode = config_mode; - ast->regs = regs; - ast->ioregs = ioregs; - - ast_detect_tx_chip(ast, need_post); - switch (ast->tx_chip) { - case AST_TX_ASTDP: - ret = ast_post_gpu(ast); - break; - default: - ret = 0; - if (need_post) - ret = ast_post_gpu(ast); - break; - } - if (ret) - return ERR_PTR(ret); - - ret = ast_mm_init(ast); - if (ret) - return ERR_PTR(ret); - - /* map reserved buffer */ - ast->dp501_fw_buf = NULL; - if (ast->vram_size < pci_resource_len(pdev, 0)) { - ast->dp501_fw_buf = pci_iomap_range(pdev, 0, ast->vram_size, 0); - if (!ast->dp501_fw_buf) - drm_info(dev, "failed to map reserved buffer!\n"); - } - - ast_detect_widescreen(ast); - - ret = ast_mode_config_init(ast); - if (ret) - return ERR_PTR(ret); - - return dev; -} diff --git a/drivers/gpu/drm/ast/ast_mode.c b/drivers/gpu/drm/ast/ast_mode.c index 30b011ed0a05..cd08990a10f9 100644 --- a/drivers/gpu/drm/ast/ast_mode.c +++ b/drivers/gpu/drm/ast/ast_mode.c @@ -43,6 +43,7 @@ #include #include #include +#include #include #include "ast_drv.h" @@ -241,16 +242,15 @@ static void ast_set_std_reg(struct ast_device *ast, ast_set_index_reg(ast, AST_IO_VGAGRI, i, stdtable->gr[i]); } -static void ast_set_crtc_reg(struct ast_device *ast, - struct drm_display_mode *mode, +static void ast_set_crtc_reg(struct ast_device *ast, struct drm_display_mode *mode, const struct ast_vbios_enhtable *vmode) { u8 jreg05 = 0, jreg07 = 0, jreg09 = 0, jregAC = 0, jregAD = 0, jregAE = 0; - u16 temp, precache = 0; + u16 temp; + unsigned char crtc_hsync_precatch = 0; - if ((IS_AST_GEN6(ast) || IS_AST_GEN7(ast)) && - (vmode->flags & AST2500PreCatchCRT)) - precache = 40; + if (ast->quirks->crtc_hsync_precatch_needed && (vmode->flags & AST2500PreCatchCRT)) + crtc_hsync_precatch = 40; ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0x11, 0x7f, 0x00); @@ -276,12 +276,12 @@ static void ast_set_crtc_reg(struct ast_device *ast, jregAD |= 0x01; /* HBE D[5] */ ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0x03, 0xE0, (temp & 0x1f)); - temp = ((mode->crtc_hsync_start-precache) >> 3) - 1; + temp = ((mode->crtc_hsync_start - crtc_hsync_precatch) >> 3) - 1; if (temp & 0x100) jregAC |= 0x40; /* HRS D[5] */ ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0x04, 0x00, temp); - temp = (((mode->crtc_hsync_end-precache) >> 3) - 1) & 0x3f; + temp = (((mode->crtc_hsync_end - crtc_hsync_precatch) >> 3) - 1) & 0x3f; if (temp & 0x20) jregAD |= 0x04; /* HRE D[5] */ ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0x05, 0x60, (u8)((temp & 0x1f) | jreg05)); @@ -289,8 +289,7 @@ static void ast_set_crtc_reg(struct ast_device *ast, ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0xAC, 0x00, jregAC); ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0xAD, 0x00, jregAD); - // Workaround for HSync Time non octave pixels (1920x1080@60Hz HSync 44 pixels); - if (IS_AST_GEN7(ast) && (mode->crtc_vdisplay == 1080)) + if (ast->quirks->crtc_hsync_add4_needed && mode->crtc_vdisplay == 1080) ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0xFC, 0xFD, 0x02); else ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0xFC, 0xFD, 0x00); @@ -348,7 +347,7 @@ static void ast_set_crtc_reg(struct ast_device *ast, ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0x09, 0xdf, jreg09); ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0xAE, 0x00, (jregAE | 0x80)); - if (precache) + if (crtc_hsync_precatch) ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0xb6, 0x3f, 0x80); else ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0xb6, 0x3f, 0x00); @@ -370,12 +369,7 @@ static void ast_set_dclk_reg(struct ast_device *ast, struct drm_display_mode *mode, const struct ast_vbios_enhtable *vmode) { - const struct ast_vbios_dclk_info *clk_info; - - if (IS_AST_GEN6(ast) || IS_AST_GEN7(ast)) - clk_info = &dclk_table_ast2500[vmode->dclk_index]; - else - clk_info = &dclk_table[vmode->dclk_index]; + const struct ast_vbios_dclk_info *clk_info = &ast->dclk_table[vmode->dclk_index]; ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0xc0, 0x00, clk_info->param1); ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0xc1, 0x00, clk_info->param2); @@ -415,20 +409,11 @@ static void ast_set_color_reg(struct ast_device *ast, static void ast_set_crtthd_reg(struct ast_device *ast) { - /* Set Threshold */ - if (IS_AST_GEN7(ast)) { - ast_set_index_reg(ast, AST_IO_VGACRI, 0xa7, 0xe0); - ast_set_index_reg(ast, AST_IO_VGACRI, 0xa6, 0xa0); - } else if (IS_AST_GEN6(ast) || IS_AST_GEN5(ast) || IS_AST_GEN4(ast)) { - ast_set_index_reg(ast, AST_IO_VGACRI, 0xa7, 0x78); - ast_set_index_reg(ast, AST_IO_VGACRI, 0xa6, 0x60); - } else if (IS_AST_GEN3(ast) || IS_AST_GEN2(ast)) { - ast_set_index_reg(ast, AST_IO_VGACRI, 0xa7, 0x3f); - ast_set_index_reg(ast, AST_IO_VGACRI, 0xa6, 0x2f); - } else { - ast_set_index_reg(ast, AST_IO_VGACRI, 0xa7, 0x2f); - ast_set_index_reg(ast, AST_IO_VGACRI, 0xa6, 0x1f); - } + u8 vgacra6 = ast->quirks->crtc_mem_req_threshold_low; + u8 vgacra7 = ast->quirks->crtc_mem_req_threshold_high; + + ast_set_index_reg(ast, AST_IO_VGACRI, 0xa7, vgacra7); + ast_set_index_reg(ast, AST_IO_VGACRI, 0xa6, vgacra6); } static void ast_set_sync_reg(struct ast_device *ast, @@ -572,9 +557,14 @@ static void ast_primary_plane_helper_atomic_update(struct drm_plane *plane, ast_set_vbios_color_reg(ast, fb->format, ast_crtc_state->vmode); } - drm_atomic_helper_damage_iter_init(&iter, old_plane_state, plane_state); - drm_atomic_for_each_plane_damage(&iter, &damage) { - ast_handle_damage(ast_plane, shadow_plane_state->data, fb, &damage); + /* if the buffer comes from another device */ + if (drm_gem_fb_begin_cpu_access(fb, DMA_FROM_DEVICE) == 0) { + drm_atomic_helper_damage_iter_init(&iter, old_plane_state, plane_state); + drm_atomic_for_each_plane_damage(&iter, &damage) { + ast_handle_damage(ast_plane, shadow_plane_state->data, fb, &damage); + } + + drm_gem_fb_end_cpu_access(fb, DMA_FROM_DEVICE); } /* diff --git a/drivers/gpu/drm/ast/ast_tables.h b/drivers/gpu/drm/ast/ast_tables.h index f1c9f7e1f1fc..7da5b5c60f41 100644 --- a/drivers/gpu/drm/ast/ast_tables.h +++ b/drivers/gpu/drm/ast/ast_tables.h @@ -33,66 +33,6 @@ #define HiCModeIndex 3 #define TrueCModeIndex 4 -static const struct ast_vbios_dclk_info dclk_table[] = { - {0x2C, 0xE7, 0x03}, /* 00: VCLK25_175 */ - {0x95, 0x62, 0x03}, /* 01: VCLK28_322 */ - {0x67, 0x63, 0x01}, /* 02: VCLK31_5 */ - {0x76, 0x63, 0x01}, /* 03: VCLK36 */ - {0xEE, 0x67, 0x01}, /* 04: VCLK40 */ - {0x82, 0x62, 0x01}, /* 05: VCLK49_5 */ - {0xC6, 0x64, 0x01}, /* 06: VCLK50 */ - {0x94, 0x62, 0x01}, /* 07: VCLK56_25 */ - {0x80, 0x64, 0x00}, /* 08: VCLK65 */ - {0x7B, 0x63, 0x00}, /* 09: VCLK75 */ - {0x67, 0x62, 0x00}, /* 0A: VCLK78_75 */ - {0x7C, 0x62, 0x00}, /* 0B: VCLK94_5 */ - {0x8E, 0x62, 0x00}, /* 0C: VCLK108 */ - {0x85, 0x24, 0x00}, /* 0D: VCLK135 */ - {0x67, 0x22, 0x00}, /* 0E: VCLK157_5 */ - {0x6A, 0x22, 0x00}, /* 0F: VCLK162 */ - {0x4d, 0x4c, 0x80}, /* 10: VCLK154 */ - {0x68, 0x6f, 0x80}, /* 11: VCLK83.5 */ - {0x28, 0x49, 0x80}, /* 12: VCLK106.5 */ - {0x37, 0x49, 0x80}, /* 13: VCLK146.25 */ - {0x1f, 0x45, 0x80}, /* 14: VCLK148.5 */ - {0x47, 0x6c, 0x80}, /* 15: VCLK71 */ - {0x25, 0x65, 0x80}, /* 16: VCLK88.75 */ - {0x77, 0x58, 0x80}, /* 17: VCLK119 */ - {0x32, 0x67, 0x80}, /* 18: VCLK85_5 */ - {0x6a, 0x6d, 0x80}, /* 19: VCLK97_75 */ - {0x3b, 0x2c, 0x81}, /* 1A: VCLK118_25 */ -}; - -static const struct ast_vbios_dclk_info dclk_table_ast2500[] = { - {0x2C, 0xE7, 0x03}, /* 00: VCLK25_175 */ - {0x95, 0x62, 0x03}, /* 01: VCLK28_322 */ - {0x67, 0x63, 0x01}, /* 02: VCLK31_5 */ - {0x76, 0x63, 0x01}, /* 03: VCLK36 */ - {0xEE, 0x67, 0x01}, /* 04: VCLK40 */ - {0x82, 0x62, 0x01}, /* 05: VCLK49_5 */ - {0xC6, 0x64, 0x01}, /* 06: VCLK50 */ - {0x94, 0x62, 0x01}, /* 07: VCLK56_25 */ - {0x80, 0x64, 0x00}, /* 08: VCLK65 */ - {0x7B, 0x63, 0x00}, /* 09: VCLK75 */ - {0x67, 0x62, 0x00}, /* 0A: VCLK78_75 */ - {0x7C, 0x62, 0x00}, /* 0B: VCLK94_5 */ - {0x8E, 0x62, 0x00}, /* 0C: VCLK108 */ - {0x85, 0x24, 0x00}, /* 0D: VCLK135 */ - {0x67, 0x22, 0x00}, /* 0E: VCLK157_5 */ - {0x6A, 0x22, 0x00}, /* 0F: VCLK162 */ - {0x4d, 0x4c, 0x80}, /* 10: VCLK154 */ - {0x68, 0x6f, 0x80}, /* 11: VCLK83.5 */ - {0x28, 0x49, 0x80}, /* 12: VCLK106.5 */ - {0x37, 0x49, 0x80}, /* 13: VCLK146.25 */ - {0x1f, 0x45, 0x80}, /* 14: VCLK148.5 */ - {0x47, 0x6c, 0x80}, /* 15: VCLK71 */ - {0x25, 0x65, 0x80}, /* 16: VCLK88.75 */ - {0x58, 0x01, 0x42}, /* 17: VCLK119 */ - {0x32, 0x67, 0x80}, /* 18: VCLK85_5 */ - {0x6a, 0x6d, 0x80}, /* 19: VCLK97_75 */ - {0x44, 0x20, 0x43}, /* 1A: VCLK118_25 */ -}; - static const struct ast_vbios_stdtable vbios_stdtable[] = { /* MD_2_3_400 */ { diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c index 0f7ffb3ced20..e0efc7309b1b 100644 --- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c +++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c @@ -20,6 +20,7 @@ #include #include #include +#include #include #include @@ -215,32 +216,32 @@ static void atmel_hlcdc_crtc_atomic_disable(struct drm_crtc *c, if (regmap_read_poll_timeout(regmap, ATMEL_HLCDC_SR, status, !(status & ATMEL_XLCDC_CM), 10, 1000)) - dev_warn(dev->dev, "Atmel LCDC status register CMSTS timeout\n"); + drm_warn(dev, "Atmel LCDC status register CMSTS timeout\n"); regmap_write(regmap, ATMEL_HLCDC_DIS, ATMEL_XLCDC_SD); if (regmap_read_poll_timeout(regmap, ATMEL_HLCDC_SR, status, status & ATMEL_XLCDC_SD, 10, 1000)) - dev_warn(dev->dev, "Atmel LCDC status register SDSTS timeout\n"); + drm_warn(dev, "Atmel LCDC status register SDSTS timeout\n"); } regmap_write(regmap, ATMEL_HLCDC_DIS, ATMEL_HLCDC_DISP); if (regmap_read_poll_timeout(regmap, ATMEL_HLCDC_SR, status, !(status & ATMEL_HLCDC_DISP), 10, 1000)) - dev_warn(dev->dev, "Atmel LCDC status register DISPSTS timeout\n"); + drm_warn(dev, "Atmel LCDC status register DISPSTS timeout\n"); regmap_write(regmap, ATMEL_HLCDC_DIS, ATMEL_HLCDC_SYNC); if (regmap_read_poll_timeout(regmap, ATMEL_HLCDC_SR, status, !(status & ATMEL_HLCDC_SYNC), 10, 1000)) - dev_warn(dev->dev, "Atmel LCDC status register LCDSTS timeout\n"); + drm_warn(dev, "Atmel LCDC status register LCDSTS timeout\n"); regmap_write(regmap, ATMEL_HLCDC_DIS, ATMEL_HLCDC_PIXEL_CLK); if (regmap_read_poll_timeout(regmap, ATMEL_HLCDC_SR, status, !(status & ATMEL_HLCDC_PIXEL_CLK), 10, 1000)) - dev_warn(dev->dev, "Atmel LCDC status register CLKSTS timeout\n"); + drm_warn(dev, "Atmel LCDC status register CLKSTS timeout\n"); clk_disable_unprepare(crtc->dc->hlcdc->sys_clk); pinctrl_pm_select_sleep_state(dev->dev); @@ -269,32 +270,32 @@ static void atmel_hlcdc_crtc_atomic_enable(struct drm_crtc *c, if (regmap_read_poll_timeout(regmap, ATMEL_HLCDC_SR, status, status & ATMEL_HLCDC_PIXEL_CLK, 10, 1000)) - dev_warn(dev->dev, "Atmel LCDC status register CLKSTS timeout\n"); + drm_warn(dev, "Atmel LCDC status register CLKSTS timeout\n"); regmap_write(regmap, ATMEL_HLCDC_EN, ATMEL_HLCDC_SYNC); if (regmap_read_poll_timeout(regmap, ATMEL_HLCDC_SR, status, status & ATMEL_HLCDC_SYNC, 10, 1000)) - dev_warn(dev->dev, "Atmel LCDC status register LCDSTS timeout\n"); + drm_warn(dev, "Atmel LCDC status register LCDSTS timeout\n"); regmap_write(regmap, ATMEL_HLCDC_EN, ATMEL_HLCDC_DISP); if (regmap_read_poll_timeout(regmap, ATMEL_HLCDC_SR, status, status & ATMEL_HLCDC_DISP, 10, 1000)) - dev_warn(dev->dev, "Atmel LCDC status register DISPSTS timeout\n"); + drm_warn(dev, "Atmel LCDC status register DISPSTS timeout\n"); if (crtc->dc->desc->is_xlcdc) { regmap_write(regmap, ATMEL_HLCDC_EN, ATMEL_XLCDC_CM); if (regmap_read_poll_timeout(regmap, ATMEL_HLCDC_SR, status, status & ATMEL_XLCDC_CM, 10, 1000)) - dev_warn(dev->dev, "Atmel LCDC status register CMSTS timeout\n"); + drm_warn(dev, "Atmel LCDC status register CMSTS timeout\n"); regmap_write(regmap, ATMEL_HLCDC_EN, ATMEL_XLCDC_SD); if (regmap_read_poll_timeout(regmap, ATMEL_HLCDC_SR, status, !(status & ATMEL_XLCDC_SD), 10, 1000)) - dev_warn(dev->dev, "Atmel LCDC status register SDSTS timeout\n"); + drm_warn(dev, "Atmel LCDC status register SDSTS timeout\n"); } pm_runtime_put_sync(dev->dev); diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c index fa8ad94e431a..dd70894c8f38 100644 --- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c +++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c @@ -25,6 +25,7 @@ #include #include #include +#include #include #include @@ -724,19 +725,19 @@ static int atmel_hlcdc_dc_modeset_init(struct drm_device *dev) ret = atmel_hlcdc_create_outputs(dev); if (ret) { - dev_err(dev->dev, "failed to create HLCDC outputs: %d\n", ret); + drm_err(dev, "failed to create HLCDC outputs: %d\n", ret); return ret; } ret = atmel_hlcdc_create_planes(dev); if (ret) { - dev_err(dev->dev, "failed to create planes: %d\n", ret); + drm_err(dev, "failed to create planes: %d\n", ret); return ret; } ret = atmel_hlcdc_crtc_create(dev); if (ret) { - dev_err(dev->dev, "failed to create crtc\n"); + drm_err(dev, "failed to create crtc\n"); return ret; } @@ -778,7 +779,7 @@ static int atmel_hlcdc_dc_load(struct drm_device *dev) ret = clk_prepare_enable(dc->hlcdc->periph_clk); if (ret) { - dev_err(dev->dev, "failed to enable periph_clk\n"); + drm_err(dev, "failed to enable periph_clk\n"); return ret; } @@ -786,13 +787,13 @@ static int atmel_hlcdc_dc_load(struct drm_device *dev) ret = drm_vblank_init(dev, 1); if (ret < 0) { - dev_err(dev->dev, "failed to initialize vblank\n"); + drm_err(dev, "failed to initialize vblank\n"); goto err_periph_clk_disable; } ret = atmel_hlcdc_dc_modeset_init(dev); if (ret < 0) { - dev_err(dev->dev, "failed to initialize mode setting\n"); + drm_err(dev, "failed to initialize mode setting\n"); goto err_periph_clk_disable; } @@ -802,7 +803,7 @@ static int atmel_hlcdc_dc_load(struct drm_device *dev) ret = atmel_hlcdc_dc_irq_install(dev, dc->hlcdc->irq); pm_runtime_put_sync(dev->dev); if (ret < 0) { - dev_err(dev->dev, "failed to install IRQ handler\n"); + drm_err(dev, "failed to install IRQ handler\n"); goto err_periph_clk_disable; } diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.h b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.h index e1a0bb24b511..53d47f01db0b 100644 --- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.h +++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.h @@ -378,7 +378,8 @@ struct atmel_lcdc_dc_ops { void (*lcdc_update_buffers)(struct atmel_hlcdc_plane *plane, struct atmel_hlcdc_plane_state *state, u32 sr, int i); - void (*lcdc_atomic_disable)(struct atmel_hlcdc_plane *plane); + void (*lcdc_atomic_disable)(struct atmel_hlcdc_plane *plane, + struct atmel_hlcdc_dc *dc); void (*lcdc_update_general_settings)(struct atmel_hlcdc_plane *plane, struct atmel_hlcdc_plane_state *state); void (*lcdc_atomic_update)(struct atmel_hlcdc_plane *plane, diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_output.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_output.c index 50fee6a93964..0b8a86afb096 100644 --- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_output.c +++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_output.c @@ -15,6 +15,7 @@ #include #include #include +#include #include #include "atmel_hlcdc_dc.h" @@ -92,7 +93,7 @@ static int atmel_hlcdc_attach_endpoint(struct drm_device *dev, int endpoint) output->bus_fmt = atmel_hlcdc_of_bus_fmt(ep); of_node_put(ep); if (output->bus_fmt < 0) { - dev_err(dev->dev, "endpoint %d: invalid bus width\n", endpoint); + drm_err(dev, "endpoint %d: invalid bus width\n", endpoint); return -EINVAL; } diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c index 4a7ba0918eca..92132be9823f 100644 --- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c +++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c @@ -16,6 +16,7 @@ #include #include #include +#include #include "atmel_hlcdc_dc.h" @@ -365,13 +366,34 @@ void atmel_xlcdc_plane_setup_scaler(struct atmel_hlcdc_plane *plane, xfactor); /* - * With YCbCr 4:2:2 and YCbYcr 4:2:0 window resampling, configuration - * register LCDC_HEOCFG25.VXSCFACT and LCDC_HEOCFG27.HXSCFACT is half + * With YCbCr 4:2:0 window resampling, configuration register + * LCDC_HEOCFG25.VXSCFACT and LCDC_HEOCFG27.HXSCFACT values are half * the value of yfactor and xfactor. + * + * On the other hand, with YCbCr 4:2:2 window resampling, only the + * configuration register LCDC_HEOCFG27.HXSCFACT value is half the value + * of the xfactor; the value of LCDC_HEOCFG25.VXSCFACT is yfactor (no + * division by 2). */ - if (state->base.fb->format->format == DRM_FORMAT_YUV420) { + switch (state->base.fb->format->format) { + /* YCbCr 4:2:2 */ + case DRM_FORMAT_YUYV: + case DRM_FORMAT_UYVY: + case DRM_FORMAT_YVYU: + case DRM_FORMAT_VYUY: + case DRM_FORMAT_YUV422: + case DRM_FORMAT_NV61: + xfactor /= 2; + break; + + /* YCbCr 4:2:0 */ + case DRM_FORMAT_YUV420: + case DRM_FORMAT_NV21: yfactor /= 2; xfactor /= 2; + break; + default: + break; } atmel_hlcdc_layer_write_cfg(&plane->layer, desc->layout.scaler_config + 2, @@ -714,7 +736,7 @@ static int atmel_hlcdc_plane_atomic_check(struct drm_plane *p, if (!hstate->base.crtc || WARN_ON(!fb)) return 0; - crtc_state = drm_atomic_get_existing_crtc_state(state, s->crtc); + crtc_state = drm_atomic_get_new_crtc_state(state, s->crtc); mode = &crtc_state->adjusted_mode; ret = drm_atomic_helper_check_plane_state(s, crtc_state, @@ -816,7 +838,8 @@ static int atmel_hlcdc_plane_atomic_check(struct drm_plane *p, return 0; } -static void atmel_hlcdc_atomic_disable(struct atmel_hlcdc_plane *plane) +static void atmel_hlcdc_atomic_disable(struct atmel_hlcdc_plane *plane, + struct atmel_hlcdc_dc *dc) { /* Disable interrupts */ atmel_hlcdc_layer_write_reg(&plane->layer, ATMEL_HLCDC_LAYER_IDR, @@ -832,7 +855,8 @@ static void atmel_hlcdc_atomic_disable(struct atmel_hlcdc_plane *plane) atmel_hlcdc_layer_read_reg(&plane->layer, ATMEL_HLCDC_LAYER_ISR); } -static void atmel_xlcdc_atomic_disable(struct atmel_hlcdc_plane *plane) +static void atmel_xlcdc_atomic_disable(struct atmel_hlcdc_plane *plane, + struct atmel_hlcdc_dc *dc) { /* Disable interrupts */ atmel_hlcdc_layer_write_reg(&plane->layer, ATMEL_XLCDC_LAYER_IDR, @@ -842,6 +866,15 @@ static void atmel_xlcdc_atomic_disable(struct atmel_hlcdc_plane *plane) atmel_hlcdc_layer_write_reg(&plane->layer, ATMEL_XLCDC_LAYER_ENR, 0); + /* + * Updating XLCDC_xxxCFGx, XLCDC_xxxFBA and XLCDC_xxxEN, + * (where xxx indicates each layer) requires writing one to the + * Update Attribute field for each layer in LCDC_ATTRE register for SAM9X7. + */ + regmap_write(dc->hlcdc->regmap, ATMEL_XLCDC_ATTRE, ATMEL_XLCDC_BASE_UPDATE | + ATMEL_XLCDC_OVR1_UPDATE | ATMEL_XLCDC_OVR3_UPDATE | + ATMEL_XLCDC_HEO_UPDATE); + /* Clear all pending interrupts */ atmel_hlcdc_layer_read_reg(&plane->layer, ATMEL_XLCDC_LAYER_ISR); } @@ -852,7 +885,7 @@ static void atmel_hlcdc_plane_atomic_disable(struct drm_plane *p, struct atmel_hlcdc_plane *plane = drm_plane_to_atmel_hlcdc_plane(p); struct atmel_hlcdc_dc *dc = plane->base.dev->dev_private; - dc->desc->ops->lcdc_atomic_disable(plane); + dc->desc->ops->lcdc_atomic_disable(plane, dc); } static void atmel_hlcdc_atomic_update(struct atmel_hlcdc_plane *plane, @@ -1034,7 +1067,7 @@ static void atmel_hlcdc_irq_dbg(struct atmel_hlcdc_plane *plane, if (isr & (ATMEL_HLCDC_LAYER_OVR_IRQ(0) | ATMEL_HLCDC_LAYER_OVR_IRQ(1) | ATMEL_HLCDC_LAYER_OVR_IRQ(2))) - dev_dbg(plane->base.dev->dev, "overrun on plane %s\n", + drm_dbg(plane->base.dev, "overrun on plane %s\n", desc->name); } @@ -1051,7 +1084,7 @@ static void atmel_xlcdc_irq_dbg(struct atmel_hlcdc_plane *plane, if (isr & (ATMEL_XLCDC_LAYER_OVR_IRQ(0) | ATMEL_XLCDC_LAYER_OVR_IRQ(1) | ATMEL_XLCDC_LAYER_OVR_IRQ(2))) - dev_dbg(plane->base.dev->dev, "overrun on plane %s\n", + drm_dbg(plane->base.dev, "overrun on plane %s\n", desc->name); } @@ -1140,7 +1173,7 @@ static void atmel_hlcdc_plane_reset(struct drm_plane *p) if (state) { if (atmel_hlcdc_plane_alloc_dscrs(p, state)) { kfree(state); - dev_err(p->dev->dev, + drm_err(p->dev, "Failed to allocate initial plane state\n"); return; } diff --git a/drivers/gpu/drm/bridge/imx/Kconfig b/drivers/gpu/drm/bridge/imx/Kconfig index 9a480c6abb85..b9028a5e5a06 100644 --- a/drivers/gpu/drm/bridge/imx/Kconfig +++ b/drivers/gpu/drm/bridge/imx/Kconfig @@ -18,12 +18,23 @@ config DRM_IMX8MP_DW_HDMI_BRIDGE depends on OF depends on COMMON_CLK select DRM_DW_HDMI + imply DRM_IMX8MP_HDMI_PAI imply DRM_IMX8MP_HDMI_PVI imply PHY_FSL_SAMSUNG_HDMI_PHY help Choose this to enable support for the internal HDMI encoder found on the i.MX8MP SoC. +config DRM_IMX8MP_HDMI_PAI + tristate "Freescale i.MX8MP HDMI PAI bridge support" + depends on OF + select DRM_DW_HDMI + select REGMAP + select REGMAP_MMIO + help + Choose this to enable support for the internal HDMI TX Parallel + Audio Interface found on the Freescale i.MX8MP SoC. + config DRM_IMX8MP_HDMI_PVI tristate "Freescale i.MX8MP HDMI PVI bridge support" depends on OF diff --git a/drivers/gpu/drm/bridge/imx/Makefile b/drivers/gpu/drm/bridge/imx/Makefile index dd5d48584806..8d01fda25451 100644 --- a/drivers/gpu/drm/bridge/imx/Makefile +++ b/drivers/gpu/drm/bridge/imx/Makefile @@ -1,6 +1,7 @@ obj-$(CONFIG_DRM_IMX_LDB_HELPER) += imx-ldb-helper.o obj-$(CONFIG_DRM_IMX_LEGACY_BRIDGE) += imx-legacy-bridge.o obj-$(CONFIG_DRM_IMX8MP_DW_HDMI_BRIDGE) += imx8mp-hdmi-tx.o +obj-$(CONFIG_DRM_IMX8MP_HDMI_PAI) += imx8mp-hdmi-pai.o obj-$(CONFIG_DRM_IMX8MP_HDMI_PVI) += imx8mp-hdmi-pvi.o obj-$(CONFIG_DRM_IMX8QM_LDB) += imx8qm-ldb.o obj-$(CONFIG_DRM_IMX8QXP_LDB) += imx8qxp-ldb.o diff --git a/drivers/gpu/drm/bridge/imx/imx8mp-hdmi-pai.c b/drivers/gpu/drm/bridge/imx/imx8mp-hdmi-pai.c new file mode 100644 index 000000000000..8d13a35b206a --- /dev/null +++ b/drivers/gpu/drm/bridge/imx/imx8mp-hdmi-pai.c @@ -0,0 +1,158 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Copyright 2025 NXP + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#define HTX_PAI_CTRL 0x00 +#define ENABLE BIT(0) + +#define HTX_PAI_CTRL_EXT 0x04 +#define WTMK_HIGH_MASK GENMASK(31, 24) +#define WTMK_LOW_MASK GENMASK(23, 16) +#define NUM_CH_MASK GENMASK(10, 8) +#define WTMK_HIGH(n) FIELD_PREP(WTMK_HIGH_MASK, (n)) +#define WTMK_LOW(n) FIELD_PREP(WTMK_LOW_MASK, (n)) +#define NUM_CH(n) FIELD_PREP(NUM_CH_MASK, (n) - 1) + +#define HTX_PAI_FIELD_CTRL 0x08 +#define PRE_SEL GENMASK(28, 24) +#define D_SEL GENMASK(23, 20) +#define V_SEL GENMASK(19, 15) +#define U_SEL GENMASK(14, 10) +#define C_SEL GENMASK(9, 5) +#define P_SEL GENMASK(4, 0) + +struct imx8mp_hdmi_pai { + struct regmap *regmap; +}; + +static void imx8mp_hdmi_pai_enable(struct dw_hdmi *dw_hdmi, int channel, + int width, int rate, int non_pcm, + int iec958) +{ + const struct dw_hdmi_plat_data *pdata = dw_hdmi_to_plat_data(dw_hdmi); + struct imx8mp_hdmi_pai *hdmi_pai = pdata->priv_audio; + int val; + + /* PAI set control extended */ + val = WTMK_HIGH(3) | WTMK_LOW(3); + val |= NUM_CH(channel); + regmap_write(hdmi_pai->regmap, HTX_PAI_CTRL_EXT, val); + + /* IEC60958 format */ + if (iec958) { + val = FIELD_PREP_CONST(P_SEL, + __bf_shf(IEC958_SUBFRAME_PARITY)); + val |= FIELD_PREP_CONST(C_SEL, + __bf_shf(IEC958_SUBFRAME_CHANNEL_STATUS)); + val |= FIELD_PREP_CONST(U_SEL, + __bf_shf(IEC958_SUBFRAME_USER_DATA)); + val |= FIELD_PREP_CONST(V_SEL, + __bf_shf(IEC958_SUBFRAME_VALIDITY)); + val |= FIELD_PREP_CONST(D_SEL, + __bf_shf(IEC958_SUBFRAME_SAMPLE_24_MASK)); + val |= FIELD_PREP_CONST(PRE_SEL, + __bf_shf(IEC958_SUBFRAME_PREAMBLE_MASK)); + } else { + /* + * The allowed PCM widths are 24bit and 32bit, as they are supported + * by aud2htx module. + * for 24bit, D_SEL = 0, select all the bits. + * for 32bit, D_SEL = 8, select 24bit in MSB. + */ + val = FIELD_PREP(D_SEL, width - 24); + } + + regmap_write(hdmi_pai->regmap, HTX_PAI_FIELD_CTRL, val); + + /* PAI start running */ + regmap_write(hdmi_pai->regmap, HTX_PAI_CTRL, ENABLE); +} + +static void imx8mp_hdmi_pai_disable(struct dw_hdmi *dw_hdmi) +{ + const struct dw_hdmi_plat_data *pdata = dw_hdmi_to_plat_data(dw_hdmi); + struct imx8mp_hdmi_pai *hdmi_pai = pdata->priv_audio; + + /* Stop PAI */ + regmap_write(hdmi_pai->regmap, HTX_PAI_CTRL, 0); +} + +static const struct regmap_config imx8mp_hdmi_pai_regmap_config = { + .reg_bits = 32, + .reg_stride = 4, + .val_bits = 32, + .max_register = HTX_PAI_FIELD_CTRL, +}; + +static int imx8mp_hdmi_pai_bind(struct device *dev, struct device *master, void *data) +{ + struct platform_device *pdev = to_platform_device(dev); + struct dw_hdmi_plat_data *plat_data = data; + struct imx8mp_hdmi_pai *hdmi_pai; + struct resource *res; + void __iomem *base; + + hdmi_pai = devm_kzalloc(dev, sizeof(*hdmi_pai), GFP_KERNEL); + if (!hdmi_pai) + return -ENOMEM; + + base = devm_platform_get_and_ioremap_resource(pdev, 0, &res); + if (IS_ERR(base)) + return PTR_ERR(base); + + hdmi_pai->regmap = devm_regmap_init_mmio_clk(dev, "apb", base, + &imx8mp_hdmi_pai_regmap_config); + if (IS_ERR(hdmi_pai->regmap)) { + dev_err(dev, "regmap init failed\n"); + return PTR_ERR(hdmi_pai->regmap); + } + + plat_data->enable_audio = imx8mp_hdmi_pai_enable; + plat_data->disable_audio = imx8mp_hdmi_pai_disable; + plat_data->priv_audio = hdmi_pai; + + return 0; +} + +static const struct component_ops imx8mp_hdmi_pai_ops = { + .bind = imx8mp_hdmi_pai_bind, +}; + +static int imx8mp_hdmi_pai_probe(struct platform_device *pdev) +{ + return component_add(&pdev->dev, &imx8mp_hdmi_pai_ops); +} + +static void imx8mp_hdmi_pai_remove(struct platform_device *pdev) +{ + component_del(&pdev->dev, &imx8mp_hdmi_pai_ops); +} + +static const struct of_device_id imx8mp_hdmi_pai_of_table[] = { + { .compatible = "fsl,imx8mp-hdmi-pai" }, + { /* Sentinel */ } +}; +MODULE_DEVICE_TABLE(of, imx8mp_hdmi_pai_of_table); + +static struct platform_driver imx8mp_hdmi_pai_platform_driver = { + .probe = imx8mp_hdmi_pai_probe, + .remove = imx8mp_hdmi_pai_remove, + .driver = { + .name = "imx8mp-hdmi-pai", + .of_match_table = imx8mp_hdmi_pai_of_table, + }, +}; +module_platform_driver(imx8mp_hdmi_pai_platform_driver); + +MODULE_DESCRIPTION("i.MX8MP HDMI PAI driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/gpu/drm/bridge/imx/imx8mp-hdmi-tx.c b/drivers/gpu/drm/bridge/imx/imx8mp-hdmi-tx.c index 1e7a789ec289..32fd3554e267 100644 --- a/drivers/gpu/drm/bridge/imx/imx8mp-hdmi-tx.c +++ b/drivers/gpu/drm/bridge/imx/imx8mp-hdmi-tx.c @@ -5,11 +5,13 @@ */ #include +#include #include #include #include #include #include +#include struct imx8mp_hdmi { struct dw_hdmi_plat_data plat_data; @@ -79,10 +81,45 @@ static const struct dw_hdmi_phy_ops imx8mp_hdmi_phy_ops = { .update_hpd = dw_hdmi_phy_update_hpd, }; +static int imx8mp_dw_hdmi_bind(struct device *dev) +{ + struct platform_device *pdev = to_platform_device(dev); + struct imx8mp_hdmi *hdmi = dev_get_drvdata(dev); + int ret; + + ret = component_bind_all(dev, &hdmi->plat_data); + if (ret) + return dev_err_probe(dev, ret, "component_bind_all failed!\n"); + + hdmi->dw_hdmi = dw_hdmi_probe(pdev, &hdmi->plat_data); + if (IS_ERR(hdmi->dw_hdmi)) { + component_unbind_all(dev, &hdmi->plat_data); + return PTR_ERR(hdmi->dw_hdmi); + } + + return 0; +} + +static void imx8mp_dw_hdmi_unbind(struct device *dev) +{ + struct imx8mp_hdmi *hdmi = dev_get_drvdata(dev); + + dw_hdmi_remove(hdmi->dw_hdmi); + + component_unbind_all(dev, &hdmi->plat_data); +} + +static const struct component_master_ops imx8mp_dw_hdmi_ops = { + .bind = imx8mp_dw_hdmi_bind, + .unbind = imx8mp_dw_hdmi_unbind, +}; + static int imx8mp_dw_hdmi_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct dw_hdmi_plat_data *plat_data; + struct component_match *match = NULL; + struct device_node *remote; struct imx8mp_hdmi *hdmi; hdmi = devm_kzalloc(dev, sizeof(*hdmi), GFP_KERNEL); @@ -102,20 +139,38 @@ static int imx8mp_dw_hdmi_probe(struct platform_device *pdev) plat_data->priv_data = hdmi; plat_data->phy_force_vendor = true; - hdmi->dw_hdmi = dw_hdmi_probe(pdev, plat_data); - if (IS_ERR(hdmi->dw_hdmi)) - return PTR_ERR(hdmi->dw_hdmi); - platform_set_drvdata(pdev, hdmi); + /* port@2 is for hdmi_pai device */ + remote = of_graph_get_remote_node(pdev->dev.of_node, 2, 0); + if (!remote) { + hdmi->dw_hdmi = dw_hdmi_probe(pdev, plat_data); + if (IS_ERR(hdmi->dw_hdmi)) + return PTR_ERR(hdmi->dw_hdmi); + } else { + drm_of_component_match_add(dev, &match, component_compare_of, remote); + + of_node_put(remote); + + return component_master_add_with_match(dev, &imx8mp_dw_hdmi_ops, match); + } + return 0; } static void imx8mp_dw_hdmi_remove(struct platform_device *pdev) { struct imx8mp_hdmi *hdmi = platform_get_drvdata(pdev); + struct device_node *remote; - dw_hdmi_remove(hdmi->dw_hdmi); + remote = of_graph_get_remote_node(pdev->dev.of_node, 2, 0); + if (remote) { + of_node_put(remote); + + component_master_del(&pdev->dev, &imx8mp_dw_hdmi_ops); + } else { + dw_hdmi_remove(hdmi->dw_hdmi); + } } static int imx8mp_dw_hdmi_pm_suspend(struct device *dev) diff --git a/drivers/gpu/drm/bridge/imx/imx8qxp-ldb.c b/drivers/gpu/drm/bridge/imx/imx8qxp-ldb.c index 5d272916e200..122502968927 100644 --- a/drivers/gpu/drm/bridge/imx/imx8qxp-ldb.c +++ b/drivers/gpu/drm/bridge/imx/imx8qxp-ldb.c @@ -683,11 +683,6 @@ static void imx8qxp_ldb_remove(struct platform_device *pdev) pm_runtime_disable(&pdev->dev); } -static int imx8qxp_ldb_runtime_suspend(struct device *dev) -{ - return 0; -} - static int imx8qxp_ldb_runtime_resume(struct device *dev) { struct imx8qxp_ldb *imx8qxp_ldb = dev_get_drvdata(dev); @@ -700,7 +695,7 @@ static int imx8qxp_ldb_runtime_resume(struct device *dev) } static const struct dev_pm_ops imx8qxp_ldb_pm_ops = { - RUNTIME_PM_OPS(imx8qxp_ldb_runtime_suspend, imx8qxp_ldb_runtime_resume, NULL) + RUNTIME_PM_OPS(NULL, imx8qxp_ldb_runtime_resume, NULL) }; static const struct of_device_id imx8qxp_ldb_dt_ids[] = { diff --git a/drivers/gpu/drm/bridge/ite-it66121.c b/drivers/gpu/drm/bridge/ite-it66121.c index aa7b1dcc5d70..0185f61e6e59 100644 --- a/drivers/gpu/drm/bridge/ite-it66121.c +++ b/drivers/gpu/drm/bridge/ite-it66121.c @@ -287,6 +287,7 @@ enum chip_id { ID_IT6610, ID_IT66121, + ID_IT66122, }; struct it66121_chip_info { @@ -312,7 +313,7 @@ struct it66121_ctx { u8 swl; bool auto_cts; } audio; - const struct it66121_chip_info *info; + enum chip_id id; }; static const struct regmap_range_cfg it66121_regmap_banks[] = { @@ -402,7 +403,7 @@ static int it66121_configure_afe(struct it66121_ctx *ctx, if (ret) return ret; - if (ctx->info->id == ID_IT66121) { + if (ctx->id == ID_IT66121 || ctx->id == ID_IT66122) { ret = regmap_write_bits(ctx->regmap, IT66121_AFE_IP_REG, IT66121_AFE_IP_EC1, 0); if (ret) @@ -428,7 +429,7 @@ static int it66121_configure_afe(struct it66121_ctx *ctx, if (ret) return ret; - if (ctx->info->id == ID_IT66121) { + if (ctx->id == ID_IT66121 || ctx->id == ID_IT66122) { ret = regmap_write_bits(ctx->regmap, IT66121_AFE_IP_REG, IT66121_AFE_IP_EC1, IT66121_AFE_IP_EC1); @@ -449,7 +450,7 @@ static int it66121_configure_afe(struct it66121_ctx *ctx, if (ret) return ret; - if (ctx->info->id == ID_IT6610) { + if (ctx->id == ID_IT6610) { ret = regmap_write_bits(ctx->regmap, IT66121_AFE_XP_REG, IT6610_AFE_XP_BYPASS, IT6610_AFE_XP_BYPASS); @@ -599,7 +600,7 @@ static int it66121_bridge_attach(struct drm_bridge *bridge, if (ret) return ret; - if (ctx->info->id == ID_IT66121) { + if (ctx->id == ID_IT66121 || ctx->id == ID_IT66122) { ret = regmap_write_bits(ctx->regmap, IT66121_CLK_BANK_REG, IT66121_CLK_BANK_PWROFF_RCLK, 0); if (ret) @@ -748,7 +749,7 @@ static int it66121_bridge_check(struct drm_bridge *bridge, { struct it66121_ctx *ctx = container_of(bridge, struct it66121_ctx, bridge); - if (ctx->info->id == ID_IT6610) { + if (ctx->id == ID_IT6610) { /* The IT6610 only supports these settings */ bridge_state->input_bus_cfg.flags |= DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_PIXDATA_DRIVE_NEGEDGE; @@ -802,7 +803,7 @@ void it66121_bridge_mode_set(struct drm_bridge *bridge, if (regmap_write(ctx->regmap, IT66121_HDMI_MODE_REG, IT66121_HDMI_MODE_HDMI)) goto unlock; - if (ctx->info->id == ID_IT66121 && + if ((ctx->id == ID_IT66121 || ctx->id == ID_IT66122) && regmap_write_bits(ctx->regmap, IT66121_CLK_BANK_REG, IT66121_CLK_BANK_PWROFF_TXCLK, IT66121_CLK_BANK_PWROFF_TXCLK)) { @@ -815,7 +816,7 @@ void it66121_bridge_mode_set(struct drm_bridge *bridge, if (it66121_configure_afe(ctx, adjusted_mode)) goto unlock; - if (ctx->info->id == ID_IT66121 && + if ((ctx->id == ID_IT66121 || ctx->id == ID_IT66122) && regmap_write_bits(ctx->regmap, IT66121_CLK_BANK_REG, IT66121_CLK_BANK_PWROFF_TXCLK, 0)) { goto unlock; @@ -1384,8 +1385,6 @@ static int it66121_audio_startup(struct device *dev, void *data) int ret; struct it66121_ctx *ctx = dev_get_drvdata(dev); - dev_dbg(dev, "%s\n", __func__); - mutex_lock(&ctx->lock); ret = it661221_audio_output_enable(ctx, true); if (ret) @@ -1401,8 +1400,6 @@ static void it66121_audio_shutdown(struct device *dev, void *data) int ret; struct it66121_ctx *ctx = dev_get_drvdata(dev); - dev_dbg(dev, "%s\n", __func__); - mutex_lock(&ctx->lock); ret = it661221_audio_output_enable(ctx, false); if (ret) @@ -1479,8 +1476,6 @@ static int it66121_audio_codec_init(struct it66121_ctx *ctx, struct device *dev) .no_capture_mute = 1, }; - dev_dbg(dev, "%s\n", __func__); - if (!of_property_present(dev->of_node, "#sound-dai-cells")) { dev_info(dev, "No \"#sound-dai-cells\", no audio\n"); return 0; @@ -1504,13 +1499,20 @@ static const char * const it66121_supplies[] = { "vcn33", "vcn18", "vrf12" }; +static const struct it66121_chip_info it66xx_chip_info[] = { + {.id = ID_IT6610, .vid = 0xca00, .pid = 0x0611 }, + {.id = ID_IT66121, .vid = 0x4954, .pid = 0x0612 }, + {.id = ID_IT66122, .vid = 0x4954, .pid = 0x0622 }, +}; + static int it66121_probe(struct i2c_client *client) { u32 revision_id, vendor_ids[2] = { 0 }, device_ids[2] = { 0 }; struct device_node *ep; - int ret; + int ret, i; struct it66121_ctx *ctx; struct device *dev = &client->dev; + const struct it66121_chip_info *chip_info; if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) { dev_err(dev, "I2C check functionality failed.\n"); @@ -1528,7 +1530,6 @@ static int it66121_probe(struct i2c_client *client) ctx->dev = dev; ctx->client = client; - ctx->info = i2c_get_match_data(client); of_property_read_u32(ep, "bus-width", &ctx->bus_width); of_node_put(ep); @@ -1574,11 +1575,18 @@ static int it66121_probe(struct i2c_client *client) revision_id = FIELD_GET(IT66121_REVISION_MASK, device_ids[1]); device_ids[1] &= IT66121_DEVICE_ID1_MASK; - if ((vendor_ids[1] << 8 | vendor_ids[0]) != ctx->info->vid || - (device_ids[1] << 8 | device_ids[0]) != ctx->info->pid) { - return -ENODEV; + for (i = 0; i < ARRAY_SIZE(it66xx_chip_info); i++) { + chip_info = &it66xx_chip_info[i]; + if ((vendor_ids[1] << 8 | vendor_ids[0]) == chip_info->vid && + (device_ids[1] << 8 | device_ids[0]) == chip_info->pid) { + ctx->id = chip_info->id; + break; + } } + if (i == ARRAY_SIZE(it66xx_chip_info)) + return -ENODEV; + ctx->bridge.of_node = dev->of_node; ctx->bridge.type = DRM_MODE_CONNECTOR_HDMIA; ctx->bridge.ops = DRM_BRIDGE_OP_DETECT | DRM_BRIDGE_OP_EDID; @@ -1612,28 +1620,18 @@ static void it66121_remove(struct i2c_client *client) mutex_destroy(&ctx->lock); } -static const struct it66121_chip_info it66121_chip_info = { - .id = ID_IT66121, - .vid = 0x4954, - .pid = 0x0612, -}; - -static const struct it66121_chip_info it6610_chip_info = { - .id = ID_IT6610, - .vid = 0xca00, - .pid = 0x0611, -}; - static const struct of_device_id it66121_dt_match[] = { - { .compatible = "ite,it66121", &it66121_chip_info }, - { .compatible = "ite,it6610", &it6610_chip_info }, + { .compatible = "ite,it6610" }, + { .compatible = "ite,it66121" }, + { .compatible = "ite,it66122" }, { } }; MODULE_DEVICE_TABLE(of, it66121_dt_match); static const struct i2c_device_id it66121_id[] = { - { "it66121", (kernel_ulong_t) &it66121_chip_info }, - { "it6610", (kernel_ulong_t) &it6610_chip_info }, + { .name = "it6610" }, + { .name = "it66121" }, + { .name = "it66122" }, { } }; MODULE_DEVICE_TABLE(i2c, it66121_id); diff --git a/drivers/gpu/drm/bridge/simple-bridge.c b/drivers/gpu/drm/bridge/simple-bridge.c index e4d0bc2200f8..2cd1847ba776 100644 --- a/drivers/gpu/drm/bridge/simple-bridge.c +++ b/drivers/gpu/drm/bridge/simple-bridge.c @@ -261,6 +261,16 @@ static const struct of_device_id simple_bridge_match[] = { .timings = &default_bridge_timings, .connector_type = DRM_MODE_CONNECTOR_VGA, }, + }, { + .compatible = "asl-tek,cs5263", + .data = &(const struct simple_bridge_info) { + .connector_type = DRM_MODE_CONNECTOR_HDMIA, + }, + }, { + .compatible = "parade,ps185hdm", + .data = &(const struct simple_bridge_info) { + .connector_type = DRM_MODE_CONNECTOR_HDMIA, + }, }, { .compatible = "radxa,ra620", .data = &(const struct simple_bridge_info) { diff --git a/drivers/gpu/drm/bridge/synopsys/Kconfig b/drivers/gpu/drm/bridge/synopsys/Kconfig index 2c5e532410de..a46df7583bcf 100644 --- a/drivers/gpu/drm/bridge/synopsys/Kconfig +++ b/drivers/gpu/drm/bridge/synopsys/Kconfig @@ -61,6 +61,14 @@ config DRM_DW_HDMI_QP select DRM_KMS_HELPER select REGMAP_MMIO +config DRM_DW_HDMI_QP_CEC + bool "Synopsis Designware QP CEC interface" + depends on DRM_DW_HDMI_QP + select DRM_DISPLAY_HDMI_CEC_HELPER + help + Support the CEC interface which is part of the Synopsys + Designware HDMI QP block. + config DRM_DW_MIPI_DSI tristate select DRM_KMS_HELPER diff --git a/drivers/gpu/drm/bridge/synopsys/dw-dp.c b/drivers/gpu/drm/bridge/synopsys/dw-dp.c index 9bbfe8da3de0..82aaf74e1bc0 100644 --- a/drivers/gpu/drm/bridge/synopsys/dw-dp.c +++ b/drivers/gpu/drm/bridge/synopsys/dw-dp.c @@ -2049,6 +2049,8 @@ struct dw_dp *dw_dp_bind(struct device *dev, struct drm_encoder *encoder, bridge->type = DRM_MODE_CONNECTOR_DisplayPort; bridge->ycbcr_420_allowed = true; + devm_drm_bridge_add(dev, bridge); + dp->aux.dev = dev; dp->aux.drm_dev = encoder->dev; dp->aux.name = dev_name(dev); diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi-gp-audio.c b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-gp-audio.c index ab18f9a3bf23..df7a37eb47f4 100644 --- a/drivers/gpu/drm/bridge/synopsys/dw-hdmi-gp-audio.c +++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-gp-audio.c @@ -90,6 +90,11 @@ static int audio_hw_params(struct device *dev, void *data, params->iec.status[0] & IEC958_AES0_NONAUDIO); dw_hdmi_set_sample_width(dw->data.hdmi, params->sample_width); + if (daifmt->bit_fmt == SNDRV_PCM_FORMAT_IEC958_SUBFRAME_LE) + dw_hdmi_set_sample_iec958(dw->data.hdmi, 1); + else + dw_hdmi_set_sample_iec958(dw->data.hdmi, 0); + return 0; } diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi-qp.c b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-qp.c index 39332c57f2c5..fe4c026280f0 100644 --- a/drivers/gpu/drm/bridge/synopsys/dw-hdmi-qp.c +++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-qp.c @@ -18,6 +18,7 @@ #include #include +#include #include #include #include @@ -26,6 +27,8 @@ #include #include +#include + #include #include "dw-hdmi-qp.h" @@ -131,17 +134,34 @@ struct dw_hdmi_qp_i2c { bool is_segment; }; +#ifdef CONFIG_DRM_DW_HDMI_QP_CEC +struct dw_hdmi_qp_cec { + struct drm_connector *connector; + int irq; + u32 addresses; + struct cec_msg rx_msg; + u8 tx_status; + bool tx_done; + bool rx_done; +}; +#endif + struct dw_hdmi_qp { struct drm_bridge bridge; struct device *dev; struct dw_hdmi_qp_i2c *i2c; +#ifdef CONFIG_DRM_DW_HDMI_QP_CEC + struct dw_hdmi_qp_cec *cec; +#endif + struct { const struct dw_hdmi_qp_phy_ops *ops; void *data; } phy; + unsigned long ref_clk_rate; struct regmap *regm; unsigned long tmds_char_rate; @@ -848,8 +868,9 @@ static void dw_hdmi_qp_bridge_atomic_enable(struct drm_bridge *bridge, return; if (connector->display_info.is_hdmi) { - dev_dbg(hdmi->dev, "%s mode=HDMI rate=%llu\n", - __func__, conn_state->hdmi.tmds_char_rate); + dev_dbg(hdmi->dev, "%s mode=HDMI %s rate=%llu bpc=%u\n", __func__, + drm_hdmi_connector_get_output_format_name(conn_state->hdmi.output_format), + conn_state->hdmi.tmds_char_rate, conn_state->hdmi.output_bpc); op_mode = 0; hdmi->tmds_char_rate = conn_state->hdmi.tmds_char_rate; } else { @@ -965,6 +986,179 @@ static int dw_hdmi_qp_bridge_write_infoframe(struct drm_bridge *bridge, } } +#ifdef CONFIG_DRM_DW_HDMI_QP_CEC +static irqreturn_t dw_hdmi_qp_cec_hardirq(int irq, void *dev_id) +{ + struct dw_hdmi_qp *hdmi = dev_id; + struct dw_hdmi_qp_cec *cec = hdmi->cec; + irqreturn_t ret = IRQ_HANDLED; + u32 stat; + + stat = dw_hdmi_qp_read(hdmi, CEC_INT_STATUS); + if (stat == 0) + return IRQ_NONE; + + dw_hdmi_qp_write(hdmi, stat, CEC_INT_CLEAR); + + if (stat & CEC_STAT_LINE_ERR) { + cec->tx_status = CEC_TX_STATUS_ERROR; + cec->tx_done = true; + ret = IRQ_WAKE_THREAD; + } else if (stat & CEC_STAT_DONE) { + cec->tx_status = CEC_TX_STATUS_OK; + cec->tx_done = true; + ret = IRQ_WAKE_THREAD; + } else if (stat & CEC_STAT_NACK) { + cec->tx_status = CEC_TX_STATUS_NACK; + cec->tx_done = true; + ret = IRQ_WAKE_THREAD; + } + + if (stat & CEC_STAT_EOM) { + unsigned int len, i, val; + + val = dw_hdmi_qp_read(hdmi, CEC_RX_COUNT_STATUS); + len = (val & 0xf) + 1; + + if (len > sizeof(cec->rx_msg.msg)) + len = sizeof(cec->rx_msg.msg); + + for (i = 0; i < 4; i++) { + val = dw_hdmi_qp_read(hdmi, CEC_RX_DATA3_0 + i * 4); + cec->rx_msg.msg[i * 4] = val & 0xff; + cec->rx_msg.msg[i * 4 + 1] = (val >> 8) & 0xff; + cec->rx_msg.msg[i * 4 + 2] = (val >> 16) & 0xff; + cec->rx_msg.msg[i * 4 + 3] = (val >> 24) & 0xff; + } + + dw_hdmi_qp_write(hdmi, 1, CEC_LOCK_CONTROL); + + cec->rx_msg.len = len; + cec->rx_done = true; + + ret = IRQ_WAKE_THREAD; + } + + return ret; +} + +static irqreturn_t dw_hdmi_qp_cec_thread(int irq, void *dev_id) +{ + struct dw_hdmi_qp *hdmi = dev_id; + struct dw_hdmi_qp_cec *cec = hdmi->cec; + + if (cec->tx_done) { + cec->tx_done = false; + drm_connector_hdmi_cec_transmit_attempt_done(cec->connector, + cec->tx_status); + } + + if (cec->rx_done) { + cec->rx_done = false; + drm_connector_hdmi_cec_received_msg(cec->connector, &cec->rx_msg); + } + + return IRQ_HANDLED; +} + +static int dw_hdmi_qp_cec_init(struct drm_bridge *bridge, + struct drm_connector *connector) +{ + struct dw_hdmi_qp *hdmi = dw_hdmi_qp_from_bridge(bridge); + struct dw_hdmi_qp_cec *cec = hdmi->cec; + + cec->connector = connector; + + dw_hdmi_qp_write(hdmi, 0, CEC_TX_COUNT); + dw_hdmi_qp_write(hdmi, ~0, CEC_INT_CLEAR); + dw_hdmi_qp_write(hdmi, 0, CEC_INT_MASK_N); + + return devm_request_threaded_irq(hdmi->dev, cec->irq, + dw_hdmi_qp_cec_hardirq, + dw_hdmi_qp_cec_thread, IRQF_SHARED, + dev_name(hdmi->dev), hdmi); +} + +static int dw_hdmi_qp_cec_log_addr(struct drm_bridge *bridge, u8 logical_addr) +{ + struct dw_hdmi_qp *hdmi = dw_hdmi_qp_from_bridge(bridge); + struct dw_hdmi_qp_cec *cec = hdmi->cec; + + if (logical_addr == CEC_LOG_ADDR_INVALID) + cec->addresses = 0; + else + cec->addresses |= BIT(logical_addr) | CEC_ADDR_BROADCAST; + + dw_hdmi_qp_write(hdmi, cec->addresses, CEC_ADDR); + + return 0; +} + +static int dw_hdmi_qp_cec_enable(struct drm_bridge *bridge, bool enable) +{ + struct dw_hdmi_qp *hdmi = dw_hdmi_qp_from_bridge(bridge); + unsigned int irqs; + u32 swdisable; + + if (!enable) { + dw_hdmi_qp_write(hdmi, 0, CEC_INT_MASK_N); + dw_hdmi_qp_write(hdmi, ~0, CEC_INT_CLEAR); + + swdisable = dw_hdmi_qp_read(hdmi, GLOBAL_SWDISABLE); + swdisable = swdisable | CEC_SWDISABLE; + dw_hdmi_qp_write(hdmi, swdisable, GLOBAL_SWDISABLE); + } else { + swdisable = dw_hdmi_qp_read(hdmi, GLOBAL_SWDISABLE); + swdisable = swdisable & ~CEC_SWDISABLE; + dw_hdmi_qp_write(hdmi, swdisable, GLOBAL_SWDISABLE); + + dw_hdmi_qp_write(hdmi, ~0, CEC_INT_CLEAR); + dw_hdmi_qp_write(hdmi, 1, CEC_LOCK_CONTROL); + + dw_hdmi_qp_cec_log_addr(bridge, CEC_LOG_ADDR_INVALID); + + irqs = CEC_STAT_LINE_ERR | CEC_STAT_NACK | CEC_STAT_EOM | + CEC_STAT_DONE; + dw_hdmi_qp_write(hdmi, ~0, CEC_INT_CLEAR); + dw_hdmi_qp_write(hdmi, irqs, CEC_INT_MASK_N); + } + + return 0; +} + +static int dw_hdmi_qp_cec_transmit(struct drm_bridge *bridge, u8 attempts, + u32 signal_free_time, struct cec_msg *msg) +{ + struct dw_hdmi_qp *hdmi = dw_hdmi_qp_from_bridge(bridge); + unsigned int i; + u32 val; + + for (i = 0; i < msg->len; i++) { + if (!(i % 4)) + val = msg->msg[i]; + if ((i % 4) == 1) + val |= msg->msg[i] << 8; + if ((i % 4) == 2) + val |= msg->msg[i] << 16; + if ((i % 4) == 3) + val |= msg->msg[i] << 24; + + if (i == (msg->len - 1) || (i % 4) == 3) + dw_hdmi_qp_write(hdmi, val, CEC_TX_DATA3_0 + (i / 4) * 4); + } + + dw_hdmi_qp_write(hdmi, msg->len - 1, CEC_TX_COUNT); + dw_hdmi_qp_write(hdmi, CEC_CTRL_START, CEC_TX_CONTROL); + + return 0; +} +#else +#define dw_hdmi_qp_cec_init NULL +#define dw_hdmi_qp_cec_enable NULL +#define dw_hdmi_qp_cec_log_addr NULL +#define dw_hdmi_qp_cec_transmit NULL +#endif /* CONFIG_DRM_DW_HDMI_QP_CEC */ + static const struct drm_bridge_funcs dw_hdmi_qp_bridge_funcs = { .atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state, .atomic_destroy_state = drm_atomic_helper_bridge_destroy_state, @@ -979,6 +1173,10 @@ static const struct drm_bridge_funcs dw_hdmi_qp_bridge_funcs = { .hdmi_audio_startup = dw_hdmi_qp_audio_enable, .hdmi_audio_shutdown = dw_hdmi_qp_audio_disable, .hdmi_audio_prepare = dw_hdmi_qp_audio_prepare, + .hdmi_cec_init = dw_hdmi_qp_cec_init, + .hdmi_cec_enable = dw_hdmi_qp_cec_enable, + .hdmi_cec_log_addr = dw_hdmi_qp_cec_log_addr, + .hdmi_cec_transmit = dw_hdmi_qp_cec_transmit, }; static irqreturn_t dw_hdmi_qp_main_hardirq(int irq, void *dev_id) @@ -1014,13 +1212,11 @@ static void dw_hdmi_qp_init_hw(struct dw_hdmi_qp *hdmi) { dw_hdmi_qp_write(hdmi, 0, MAINUNIT_0_INT_MASK_N); dw_hdmi_qp_write(hdmi, 0, MAINUNIT_1_INT_MASK_N); - dw_hdmi_qp_write(hdmi, 428571429, TIMER_BASE_CONFIG0); + dw_hdmi_qp_write(hdmi, hdmi->ref_clk_rate, TIMER_BASE_CONFIG0); /* Software reset */ dw_hdmi_qp_write(hdmi, 0x01, I2CM_CONTROL0); - dw_hdmi_qp_write(hdmi, 0x085c085c, I2CM_FM_SCL_CONFIG0); - dw_hdmi_qp_mod(hdmi, 0, I2CM_FM_EN, I2CM_INTERFACE_CONTROL0); /* Clear DONE and ERROR interrupts */ @@ -1066,6 +1262,13 @@ struct dw_hdmi_qp *dw_hdmi_qp_bind(struct platform_device *pdev, hdmi->phy.ops = plat_data->phy_ops; hdmi->phy.data = plat_data->phy_data; + if (plat_data->ref_clk_rate) { + hdmi->ref_clk_rate = plat_data->ref_clk_rate; + } else { + hdmi->ref_clk_rate = 428571429; + dev_warn(dev, "Set ref_clk_rate to vendor default\n"); + } + dw_hdmi_qp_init_hw(hdmi); ret = devm_request_threaded_irq(dev, plat_data->main_irq, @@ -1085,6 +1288,12 @@ struct dw_hdmi_qp *dw_hdmi_qp_bind(struct platform_device *pdev, hdmi->bridge.vendor = "Synopsys"; hdmi->bridge.product = "DW HDMI QP TX"; + if (plat_data->supported_formats) + hdmi->bridge.supported_formats = plat_data->supported_formats; + + if (plat_data->max_bpc) + hdmi->bridge.max_bpc = plat_data->max_bpc; + hdmi->bridge.ddc = dw_hdmi_qp_i2c_adapter(hdmi); if (IS_ERR(hdmi->bridge.ddc)) return ERR_CAST(hdmi->bridge.ddc); @@ -1093,6 +1302,22 @@ struct dw_hdmi_qp *dw_hdmi_qp_bind(struct platform_device *pdev, hdmi->bridge.hdmi_audio_dev = dev; hdmi->bridge.hdmi_audio_dai_port = 1; +#ifdef CONFIG_DRM_DW_HDMI_QP_CEC + if (plat_data->cec_irq) { + hdmi->bridge.ops |= DRM_BRIDGE_OP_HDMI_CEC_ADAPTER; + hdmi->bridge.hdmi_cec_dev = dev; + hdmi->bridge.hdmi_cec_adapter_name = dev_name(dev); + + hdmi->cec = devm_kzalloc(hdmi->dev, sizeof(*hdmi->cec), GFP_KERNEL); + if (!hdmi->cec) + return ERR_PTR(-ENOMEM); + + hdmi->cec->irq = plat_data->cec_irq; + } else { + dev_warn(dev, "Disabled CEC support due to missing IRQ\n"); + } +#endif + ret = devm_drm_bridge_add(dev, &hdmi->bridge); if (ret) return ERR_PTR(ret); diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi-qp.h b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-qp.h index 72987e6c4689..91a15f82e32a 100644 --- a/drivers/gpu/drm/bridge/synopsys/dw-hdmi-qp.h +++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-qp.h @@ -488,9 +488,23 @@ #define AUDPKT_VBIT_OVR0 0xf24 /* CEC Registers */ #define CEC_TX_CONTROL 0x1000 +#define CEC_CTRL_CLEAR BIT(0) +#define CEC_CTRL_START BIT(0) #define CEC_STATUS 0x1004 +#define CEC_STAT_DONE BIT(0) +#define CEC_STAT_NACK BIT(1) +#define CEC_STAT_ARBLOST BIT(2) +#define CEC_STAT_LINE_ERR BIT(3) +#define CEC_STAT_RETRANS_FAIL BIT(4) +#define CEC_STAT_DISCARD BIT(5) +#define CEC_STAT_TX_BUSY BIT(8) +#define CEC_STAT_RX_BUSY BIT(9) +#define CEC_STAT_DRIVE_ERR BIT(10) +#define CEC_STAT_EOM BIT(11) +#define CEC_STAT_NOTIFY_ERR BIT(12) #define CEC_CONFIG 0x1008 #define CEC_ADDR 0x100c +#define CEC_ADDR_BROADCAST BIT(15) #define CEC_TX_COUNT 0x1020 #define CEC_TX_DATA3_0 0x1024 #define CEC_TX_DATA7_4 0x1028 diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c index 206b099a35e9..3b77e73ac0ea 100644 --- a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c +++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c @@ -177,6 +177,7 @@ struct dw_hdmi { spinlock_t audio_lock; struct mutex audio_mutex; + unsigned int sample_iec958; unsigned int sample_non_pcm; unsigned int sample_width; unsigned int sample_rate; @@ -198,6 +199,12 @@ struct dw_hdmi { enum drm_connector_status last_connector_result; }; +const struct dw_hdmi_plat_data *dw_hdmi_to_plat_data(struct dw_hdmi *hdmi) +{ + return hdmi->plat_data; +} +EXPORT_SYMBOL_GPL(dw_hdmi_to_plat_data); + #define HDMI_IH_PHY_STAT0_RX_SENSE \ (HDMI_IH_PHY_STAT0_RX_SENSE0 | HDMI_IH_PHY_STAT0_RX_SENSE1 | \ HDMI_IH_PHY_STAT0_RX_SENSE2 | HDMI_IH_PHY_STAT0_RX_SENSE3) @@ -712,6 +719,14 @@ void dw_hdmi_set_sample_non_pcm(struct dw_hdmi *hdmi, unsigned int non_pcm) } EXPORT_SYMBOL_GPL(dw_hdmi_set_sample_non_pcm); +void dw_hdmi_set_sample_iec958(struct dw_hdmi *hdmi, unsigned int iec958) +{ + mutex_lock(&hdmi->audio_mutex); + hdmi->sample_iec958 = iec958; + mutex_unlock(&hdmi->audio_mutex); +} +EXPORT_SYMBOL_GPL(dw_hdmi_set_sample_iec958); + void dw_hdmi_set_sample_rate(struct dw_hdmi *hdmi, unsigned int rate) { mutex_lock(&hdmi->audio_mutex); @@ -843,7 +858,8 @@ static void dw_hdmi_gp_audio_enable(struct dw_hdmi *hdmi) hdmi->channels, hdmi->sample_width, hdmi->sample_rate, - hdmi->sample_non_pcm); + hdmi->sample_non_pcm, + hdmi->sample_iec958); } static void dw_hdmi_gp_audio_disable(struct dw_hdmi *hdmi) diff --git a/drivers/gpu/drm/bridge/ti-sn65dsi86.c b/drivers/gpu/drm/bridge/ti-sn65dsi86.c index ae0d08e5e960..276d05d25ad8 100644 --- a/drivers/gpu/drm/bridge/ti-sn65dsi86.c +++ b/drivers/gpu/drm/bridge/ti-sn65dsi86.c @@ -106,10 +106,21 @@ #define SN_PWM_EN_INV_REG 0xA5 #define SN_PWM_INV_MASK BIT(0) #define SN_PWM_EN_MASK BIT(1) + +#define SN_IRQ_EN_REG 0xE0 +#define IRQ_EN BIT(0) + +#define SN_IRQ_EVENTS_EN_REG 0xE6 +#define HPD_INSERTION_EN BIT(1) +#define HPD_REMOVAL_EN BIT(2) + #define SN_AUX_CMD_STATUS_REG 0xF4 #define AUX_IRQ_STATUS_AUX_RPLY_TOUT BIT(3) #define AUX_IRQ_STATUS_AUX_SHORT BIT(5) #define AUX_IRQ_STATUS_NAT_I2C_FAIL BIT(6) +#define SN_IRQ_STATUS_REG 0xF5 +#define HPD_REMOVAL_STATUS BIT(2) +#define HPD_INSERTION_STATUS BIT(1) #define MIN_DSI_CLK_FREQ_MHZ 40 @@ -152,7 +163,9 @@ * @ln_assign: Value to program to the LN_ASSIGN register. * @ln_polrs: Value for the 4-bit LN_POLRS field of SN_ENH_FRAME_REG. * @comms_enabled: If true then communication over the aux channel is enabled. + * @hpd_enabled: If true then HPD events are enabled. * @comms_mutex: Protects modification of comms_enabled. + * @hpd_mutex: Protects modification of hpd_enabled. * * @gchip: If we expose our GPIOs, this is used. * @gchip_output: A cache of whether we've set GPIOs to output. This @@ -190,7 +203,9 @@ struct ti_sn65dsi86 { u8 ln_assign; u8 ln_polrs; bool comms_enabled; + bool hpd_enabled; struct mutex comms_mutex; + struct mutex hpd_mutex; #if defined(CONFIG_OF_GPIO) struct gpio_chip gchip; @@ -221,6 +236,23 @@ static const struct regmap_config ti_sn65dsi86_regmap_config = { .max_register = 0xFF, }; +static int ti_sn65dsi86_read_u8(struct ti_sn65dsi86 *pdata, unsigned int reg, + u8 *val) +{ + int ret; + unsigned int reg_val; + + ret = regmap_read(pdata->regmap, reg, ®_val); + if (ret) { + dev_err(pdata->dev, "fail to read raw reg %#x: %d\n", + reg, ret); + return ret; + } + *val = (u8)reg_val; + + return 0; +} + static int __maybe_unused ti_sn65dsi86_read_u16(struct ti_sn65dsi86 *pdata, unsigned int reg, u16 *val) { @@ -379,6 +411,7 @@ static void ti_sn65dsi86_disable_comms(struct ti_sn65dsi86 *pdata) static int __maybe_unused ti_sn65dsi86_resume(struct device *dev) { struct ti_sn65dsi86 *pdata = dev_get_drvdata(dev); + const struct i2c_client *client = to_i2c_client(pdata->dev); int ret; ret = regulator_bulk_enable(SN_REGULATOR_SUPPLY_NUM, pdata->supplies); @@ -413,6 +446,13 @@ static int __maybe_unused ti_sn65dsi86_resume(struct device *dev) if (pdata->refclk) ti_sn65dsi86_enable_comms(pdata, NULL); + if (client->irq) { + ret = regmap_update_bits(pdata->regmap, SN_IRQ_EN_REG, IRQ_EN, + IRQ_EN); + if (ret) + dev_err(pdata->dev, "Failed to enable IRQ events: %d\n", ret); + } + return ret; } @@ -1211,6 +1251,8 @@ static void ti_sn65dsi86_debugfs_init(struct drm_bridge *bridge, struct dentry * static void ti_sn_bridge_hpd_enable(struct drm_bridge *bridge) { struct ti_sn65dsi86 *pdata = bridge_to_ti_sn65dsi86(bridge); + const struct i2c_client *client = to_i2c_client(pdata->dev); + int ret; /* * Device needs to be powered on before reading the HPD state @@ -1219,11 +1261,35 @@ static void ti_sn_bridge_hpd_enable(struct drm_bridge *bridge) */ pm_runtime_get_sync(pdata->dev); + + mutex_lock(&pdata->hpd_mutex); + pdata->hpd_enabled = true; + mutex_unlock(&pdata->hpd_mutex); + + if (client->irq) { + ret = regmap_set_bits(pdata->regmap, SN_IRQ_EVENTS_EN_REG, + HPD_REMOVAL_EN | HPD_INSERTION_EN); + if (ret) + dev_err(pdata->dev, "Failed to enable HPD events: %d\n", ret); + } } static void ti_sn_bridge_hpd_disable(struct drm_bridge *bridge) { struct ti_sn65dsi86 *pdata = bridge_to_ti_sn65dsi86(bridge); + const struct i2c_client *client = to_i2c_client(pdata->dev); + int ret; + + if (client->irq) { + ret = regmap_clear_bits(pdata->regmap, SN_IRQ_EVENTS_EN_REG, + HPD_REMOVAL_EN | HPD_INSERTION_EN); + if (ret) + dev_err(pdata->dev, "Failed to disable HPD events: %d\n", ret); + } + + mutex_lock(&pdata->hpd_mutex); + pdata->hpd_enabled = false; + mutex_unlock(&pdata->hpd_mutex); pm_runtime_put_autosuspend(pdata->dev); } @@ -1309,6 +1375,41 @@ static int ti_sn_bridge_parse_dsi_host(struct ti_sn65dsi86 *pdata) return 0; } +static irqreturn_t ti_sn_bridge_interrupt(int irq, void *private) +{ + struct ti_sn65dsi86 *pdata = private; + struct drm_device *dev = pdata->bridge.dev; + u8 status; + int ret; + bool hpd_event; + + ret = ti_sn65dsi86_read_u8(pdata, SN_IRQ_STATUS_REG, &status); + if (ret) { + dev_err(pdata->dev, "Failed to read IRQ status: %d\n", ret); + return IRQ_NONE; + } + + hpd_event = status & (HPD_REMOVAL_STATUS | HPD_INSERTION_STATUS); + + dev_dbg(pdata->dev, "(SN_IRQ_STATUS_REG = %#x)\n", status); + if (!status) + return IRQ_NONE; + + ret = regmap_write(pdata->regmap, SN_IRQ_STATUS_REG, status); + if (ret) { + dev_err(pdata->dev, "Failed to clear IRQ status: %d\n", ret); + return IRQ_NONE; + } + + /* Only send the HPD event if we are bound with a device. */ + mutex_lock(&pdata->hpd_mutex); + if (pdata->hpd_enabled && hpd_event) + drm_kms_helper_hotplug_event(dev); + mutex_unlock(&pdata->hpd_mutex); + + return IRQ_HANDLED; +} + static int ti_sn_bridge_probe(struct auxiliary_device *adev, const struct auxiliary_device_id *id) { @@ -1931,6 +2032,7 @@ static int ti_sn65dsi86_probe(struct i2c_client *client) dev_set_drvdata(dev, pdata); pdata->dev = dev; + mutex_init(&pdata->hpd_mutex); mutex_init(&pdata->comms_mutex); pdata->regmap = devm_regmap_init_i2c(client, @@ -1971,6 +2073,16 @@ static int ti_sn65dsi86_probe(struct i2c_client *client) if (strncmp(id_buf, "68ISD ", ARRAY_SIZE(id_buf))) return dev_err_probe(dev, -EOPNOTSUPP, "unsupported device id\n"); + if (client->irq) { + ret = devm_request_threaded_irq(pdata->dev, client->irq, NULL, + ti_sn_bridge_interrupt, + IRQF_ONESHOT, + dev_name(pdata->dev), pdata); + + if (ret) + return dev_err_probe(dev, ret, "failed to request interrupt\n"); + } + /* * Break ourselves up into a collection of aux devices. The only real * motiviation here is to solve the chicken-and-egg problem of probe diff --git a/drivers/gpu/drm/clients/drm_fbdev_client.c b/drivers/gpu/drm/clients/drm_fbdev_client.c index f894ba52bdb5..28951e392482 100644 --- a/drivers/gpu/drm/clients/drm_fbdev_client.c +++ b/drivers/gpu/drm/clients/drm_fbdev_client.c @@ -13,22 +13,36 @@ * struct drm_client_funcs */ +static void drm_fbdev_client_free(struct drm_client_dev *client) +{ + struct drm_fb_helper *fb_helper = drm_fb_helper_from_client(client); + + drm_fb_helper_unprepare(fb_helper); + kfree(fb_helper); +} + static void drm_fbdev_client_unregister(struct drm_client_dev *client) { struct drm_fb_helper *fb_helper = drm_fb_helper_from_client(client); if (fb_helper->info) { + /* + * Fully probed framebuffer device + */ drm_fb_helper_unregister_info(fb_helper); } else { + /* + * Partially initialized client, no framebuffer device yet + */ drm_client_release(&fb_helper->client); - drm_fb_helper_unprepare(fb_helper); - kfree(fb_helper); } } -static int drm_fbdev_client_restore(struct drm_client_dev *client) +static int drm_fbdev_client_restore(struct drm_client_dev *client, bool force) { - drm_fb_helper_lastclose(client->dev); + struct drm_fb_helper *fb_helper = drm_fb_helper_from_client(client); + + drm_fb_helper_restore_fbdev_mode_unlocked(fb_helper, force); return 0; } @@ -62,32 +76,27 @@ static int drm_fbdev_client_hotplug(struct drm_client_dev *client) return ret; } -static int drm_fbdev_client_suspend(struct drm_client_dev *client, bool holds_console_lock) +static int drm_fbdev_client_suspend(struct drm_client_dev *client) { struct drm_fb_helper *fb_helper = drm_fb_helper_from_client(client); - if (holds_console_lock) - drm_fb_helper_set_suspend(fb_helper, true); - else - drm_fb_helper_set_suspend_unlocked(fb_helper, true); + drm_fb_helper_set_suspend_unlocked(fb_helper, true); return 0; } -static int drm_fbdev_client_resume(struct drm_client_dev *client, bool holds_console_lock) +static int drm_fbdev_client_resume(struct drm_client_dev *client) { struct drm_fb_helper *fb_helper = drm_fb_helper_from_client(client); - if (holds_console_lock) - drm_fb_helper_set_suspend(fb_helper, false); - else - drm_fb_helper_set_suspend_unlocked(fb_helper, false); + drm_fb_helper_set_suspend_unlocked(fb_helper, false); return 0; } static const struct drm_client_funcs drm_fbdev_client_funcs = { .owner = THIS_MODULE, + .free = drm_fbdev_client_free, .unregister = drm_fbdev_client_unregister, .restore = drm_fbdev_client_restore, .hotplug = drm_fbdev_client_hotplug, diff --git a/drivers/gpu/drm/clients/drm_log.c b/drivers/gpu/drm/clients/drm_log.c index d239f1e3c456..4d3005273b27 100644 --- a/drivers/gpu/drm/clients/drm_log.c +++ b/drivers/gpu/drm/clients/drm_log.c @@ -100,7 +100,7 @@ static void drm_log_clear_line(struct drm_log_scanout *scanout, u32 line) return; iosys_map_memset(&map, r.y1 * fb->pitches[0], 0, height * fb->pitches[0]); drm_client_buffer_vunmap_local(scanout->buffer); - drm_client_framebuffer_flush(scanout->buffer, &r); + drm_client_buffer_flush(scanout->buffer, &r); } static void drm_log_draw_line(struct drm_log_scanout *scanout, const char *s, @@ -133,7 +133,7 @@ static void drm_log_draw_line(struct drm_log_scanout *scanout, const char *s, if (scanout->line >= scanout->rows) scanout->line = 0; drm_client_buffer_vunmap_local(scanout->buffer); - drm_client_framebuffer_flush(scanout->buffer, &r); + drm_client_buffer_flush(scanout->buffer, &r); } static void drm_log_draw_new_line(struct drm_log_scanout *scanout, @@ -204,7 +204,7 @@ static int drm_log_setup_modeset(struct drm_client_dev *client, if (format == DRM_FORMAT_INVALID) return -EINVAL; - scanout->buffer = drm_client_framebuffer_create(client, width, height, format); + scanout->buffer = drm_client_buffer_create_dumb(client, width, height, format); if (IS_ERR(scanout->buffer)) { drm_warn(client->dev, "drm_log can't create framebuffer %d %d %p4cc\n", width, height, &format); @@ -272,7 +272,7 @@ static void drm_log_init_client(struct drm_log *dlog) err_failed_commit: for (i = 0; i < n_modeset; i++) - drm_client_framebuffer_delete(dlog->scanout[i].buffer); + drm_client_buffer_delete(dlog->scanout[i].buffer); err_nomodeset: kfree(dlog->scanout); @@ -286,26 +286,45 @@ static void drm_log_free_scanout(struct drm_client_dev *client) if (dlog->n_scanout) { for (i = 0; i < dlog->n_scanout; i++) - drm_client_framebuffer_delete(dlog->scanout[i].buffer); + drm_client_buffer_delete(dlog->scanout[i].buffer); dlog->n_scanout = 0; kfree(dlog->scanout); dlog->scanout = NULL; } } -static void drm_log_client_unregister(struct drm_client_dev *client) +static void drm_log_client_free(struct drm_client_dev *client) { struct drm_log *dlog = client_to_drm_log(client); struct drm_device *dev = client->dev; + kfree(dlog); + + drm_dbg(dev, "Unregistered with drm log\n"); +} + +static void drm_log_client_unregister(struct drm_client_dev *client) +{ + struct drm_log *dlog = client_to_drm_log(client); + unregister_console(&dlog->con); mutex_lock(&dlog->lock); drm_log_free_scanout(client); - drm_client_release(client); mutex_unlock(&dlog->lock); - kfree(dlog); - drm_dbg(dev, "Unregistered with drm log\n"); + drm_client_release(client); +} + +static int drm_log_client_restore(struct drm_client_dev *client, bool force) +{ + int ret; + + if (force) + ret = drm_client_modeset_commit_locked(client); + else + ret = drm_client_modeset_commit(client); + + return ret; } static int drm_log_client_hotplug(struct drm_client_dev *client) @@ -319,7 +338,7 @@ static int drm_log_client_hotplug(struct drm_client_dev *client) return 0; } -static int drm_log_client_suspend(struct drm_client_dev *client, bool _console_lock) +static int drm_log_client_suspend(struct drm_client_dev *client) { struct drm_log *dlog = client_to_drm_log(client); @@ -328,7 +347,7 @@ static int drm_log_client_suspend(struct drm_client_dev *client, bool _console_l return 0; } -static int drm_log_client_resume(struct drm_client_dev *client, bool _console_lock) +static int drm_log_client_resume(struct drm_client_dev *client) { struct drm_log *dlog = client_to_drm_log(client); @@ -339,7 +358,9 @@ static int drm_log_client_resume(struct drm_client_dev *client, bool _console_lo static const struct drm_client_funcs drm_log_client_funcs = { .owner = THIS_MODULE, + .free = drm_log_client_free, .unregister = drm_log_client_unregister, + .restore = drm_log_client_restore, .hotplug = drm_log_client_hotplug, .suspend = drm_log_client_suspend, .resume = drm_log_client_resume, diff --git a/drivers/gpu/drm/display/drm_bridge_connector.c b/drivers/gpu/drm/display/drm_bridge_connector.c index baacd21e7341..a2d30cf9e06d 100644 --- a/drivers/gpu/drm/display/drm_bridge_connector.c +++ b/drivers/gpu/drm/display/drm_bridge_connector.c @@ -137,10 +137,9 @@ static void drm_bridge_connector_hpd_notify(struct drm_connector *connector, { struct drm_bridge_connector *bridge_connector = to_drm_bridge_connector(connector); - struct drm_bridge *bridge; /* Notify all bridges in the pipeline of hotplug events. */ - drm_for_each_bridge_in_chain(bridge_connector->encoder, bridge) { + drm_for_each_bridge_in_chain_scoped(bridge_connector->encoder, bridge) { if (bridge->funcs->hpd_notify) bridge->funcs->hpd_notify(bridge, status); } @@ -619,6 +618,20 @@ static const struct drm_connector_hdmi_cec_funcs drm_bridge_connector_hdmi_cec_f * Bridge Connector Initialisation */ +static void drm_bridge_connector_put_bridges(struct drm_device *dev, void *data) +{ + struct drm_bridge_connector *bridge_connector = (struct drm_bridge_connector *)data; + + drm_bridge_put(bridge_connector->bridge_edid); + drm_bridge_put(bridge_connector->bridge_hpd); + drm_bridge_put(bridge_connector->bridge_detect); + drm_bridge_put(bridge_connector->bridge_modes); + drm_bridge_put(bridge_connector->bridge_hdmi); + drm_bridge_put(bridge_connector->bridge_hdmi_audio); + drm_bridge_put(bridge_connector->bridge_dp_audio); + drm_bridge_put(bridge_connector->bridge_hdmi_cec); +} + /** * drm_bridge_connector_init - Initialise a connector for a chain of bridges * @drm: the DRM device @@ -639,7 +652,7 @@ struct drm_connector *drm_bridge_connector_init(struct drm_device *drm, struct drm_bridge_connector *bridge_connector; struct drm_connector *connector; struct i2c_adapter *ddc = NULL; - struct drm_bridge *bridge, *panel_bridge = NULL; + struct drm_bridge *panel_bridge __free(drm_bridge_put) = NULL; unsigned int supported_formats = BIT(HDMI_COLORSPACE_RGB); unsigned int max_bpc = 8; bool support_hdcp = false; @@ -650,6 +663,10 @@ struct drm_connector *drm_bridge_connector_init(struct drm_device *drm, if (!bridge_connector) return ERR_PTR(-ENOMEM); + ret = drmm_add_action(drm, drm_bridge_connector_put_bridges, bridge_connector); + if (ret) + return ERR_PTR(ret); + bridge_connector->encoder = encoder; /* @@ -667,20 +684,28 @@ struct drm_connector *drm_bridge_connector_init(struct drm_device *drm, * detection are available, we don't support hotplug detection at all. */ connector_type = DRM_MODE_CONNECTOR_Unknown; - drm_for_each_bridge_in_chain(encoder, bridge) { + drm_for_each_bridge_in_chain_scoped(encoder, bridge) { if (!bridge->interlace_allowed) connector->interlace_allowed = false; if (!bridge->ycbcr_420_allowed) connector->ycbcr_420_allowed = false; - if (bridge->ops & DRM_BRIDGE_OP_EDID) - bridge_connector->bridge_edid = bridge; - if (bridge->ops & DRM_BRIDGE_OP_HPD) - bridge_connector->bridge_hpd = bridge; - if (bridge->ops & DRM_BRIDGE_OP_DETECT) - bridge_connector->bridge_detect = bridge; - if (bridge->ops & DRM_BRIDGE_OP_MODES) - bridge_connector->bridge_modes = bridge; + if (bridge->ops & DRM_BRIDGE_OP_EDID) { + drm_bridge_put(bridge_connector->bridge_edid); + bridge_connector->bridge_edid = drm_bridge_get(bridge); + } + if (bridge->ops & DRM_BRIDGE_OP_HPD) { + drm_bridge_put(bridge_connector->bridge_hpd); + bridge_connector->bridge_hpd = drm_bridge_get(bridge); + } + if (bridge->ops & DRM_BRIDGE_OP_DETECT) { + drm_bridge_put(bridge_connector->bridge_detect); + bridge_connector->bridge_detect = drm_bridge_get(bridge); + } + if (bridge->ops & DRM_BRIDGE_OP_MODES) { + drm_bridge_put(bridge_connector->bridge_modes); + bridge_connector->bridge_modes = drm_bridge_get(bridge); + } if (bridge->ops & DRM_BRIDGE_OP_HDMI) { if (bridge_connector->bridge_hdmi) return ERR_PTR(-EBUSY); @@ -688,7 +713,7 @@ struct drm_connector *drm_bridge_connector_init(struct drm_device *drm, !bridge->funcs->hdmi_clear_infoframe) return ERR_PTR(-EINVAL); - bridge_connector->bridge_hdmi = bridge; + bridge_connector->bridge_hdmi = drm_bridge_get(bridge); if (bridge->supported_formats) supported_formats = bridge->supported_formats; @@ -711,7 +736,7 @@ struct drm_connector *drm_bridge_connector_init(struct drm_device *drm, !bridge->funcs->hdmi_audio_shutdown) return ERR_PTR(-EINVAL); - bridge_connector->bridge_hdmi_audio = bridge; + bridge_connector->bridge_hdmi_audio = drm_bridge_get(bridge); } if (bridge->ops & DRM_BRIDGE_OP_DP_AUDIO) { @@ -729,21 +754,21 @@ struct drm_connector *drm_bridge_connector_init(struct drm_device *drm, !bridge->funcs->dp_audio_shutdown) return ERR_PTR(-EINVAL); - bridge_connector->bridge_dp_audio = bridge; + bridge_connector->bridge_dp_audio = drm_bridge_get(bridge); } if (bridge->ops & DRM_BRIDGE_OP_HDMI_CEC_NOTIFIER) { if (bridge_connector->bridge_hdmi_cec) return ERR_PTR(-EBUSY); - bridge_connector->bridge_hdmi_cec = bridge; + bridge_connector->bridge_hdmi_cec = drm_bridge_get(bridge); } if (bridge->ops & DRM_BRIDGE_OP_HDMI_CEC_ADAPTER) { if (bridge_connector->bridge_hdmi_cec) return ERR_PTR(-EBUSY); - bridge_connector->bridge_hdmi_cec = bridge; + bridge_connector->bridge_hdmi_cec = drm_bridge_get(bridge); if (!bridge->funcs->hdmi_cec_enable || !bridge->funcs->hdmi_cec_log_addr || @@ -762,8 +787,10 @@ struct drm_connector *drm_bridge_connector_init(struct drm_device *drm, if (bridge->ddc) ddc = bridge->ddc; - if (drm_bridge_is_panel(bridge)) - panel_bridge = bridge; + if (drm_bridge_is_panel(bridge)) { + drm_bridge_put(panel_bridge); + panel_bridge = drm_bridge_get(bridge); + } if (bridge->support_hdcp) support_hdcp = true; @@ -818,7 +845,7 @@ struct drm_connector *drm_bridge_connector_init(struct drm_device *drm, if (bridge_connector->bridge_hdmi_cec && bridge_connector->bridge_hdmi_cec->ops & DRM_BRIDGE_OP_HDMI_CEC_NOTIFIER) { - bridge = bridge_connector->bridge_hdmi_cec; + struct drm_bridge *bridge = bridge_connector->bridge_hdmi_cec; ret = drmm_connector_hdmi_cec_notifier_register(connector, NULL, @@ -829,7 +856,7 @@ struct drm_connector *drm_bridge_connector_init(struct drm_device *drm, if (bridge_connector->bridge_hdmi_cec && bridge_connector->bridge_hdmi_cec->ops & DRM_BRIDGE_OP_HDMI_CEC_ADAPTER) { - bridge = bridge_connector->bridge_hdmi_cec; + struct drm_bridge *bridge = bridge_connector->bridge_hdmi_cec; ret = drmm_connector_hdmi_cec_register(connector, &drm_bridge_connector_hdmi_cec_funcs, diff --git a/drivers/gpu/drm/display/drm_dp_helper.c b/drivers/gpu/drm/display/drm_dp_helper.c index 4aaeae4fa03c..f9fdf19de74a 100644 --- a/drivers/gpu/drm/display/drm_dp_helper.c +++ b/drivers/gpu/drm/display/drm_dp_helper.c @@ -29,6 +29,7 @@ #include #include #include +#include #include #include #include @@ -123,6 +124,14 @@ bool drm_dp_clock_recovery_ok(const u8 link_status[DP_LINK_STATUS_SIZE], } EXPORT_SYMBOL(drm_dp_clock_recovery_ok); +bool drm_dp_post_lt_adj_req_in_progress(const u8 link_status[DP_LINK_STATUS_SIZE]) +{ + u8 lane_align = dp_link_status(link_status, DP_LANE_ALIGN_STATUS_UPDATED); + + return lane_align & DP_POST_LT_ADJ_REQ_IN_PROGRESS; +} +EXPORT_SYMBOL(drm_dp_post_lt_adj_req_in_progress); + u8 drm_dp_get_adjust_request_voltage(const u8 link_status[DP_LINK_STATUS_SIZE], int lane) { @@ -2543,6 +2552,10 @@ static const struct dpcd_quirk dpcd_quirk_list[] = { { OUI(0x00, 0x0C, 0xE7), DEVICE_ID_ANY, false, BIT(DP_DPCD_QUIRK_HBLANK_EXPANSION_REQUIRES_DSC) }, /* Apple MacBookPro 2017 15 inch eDP Retina panel reports too low DP_MAX_LINK_RATE */ { OUI(0x00, 0x10, 0xfa), DEVICE_ID(101, 68, 21, 101, 98, 97), false, BIT(DP_DPCD_QUIRK_CAN_DO_MAX_LINK_RATE_3_24_GBPS) }, + /* Synaptics Panamera supports only a compressed bpp of 12 above 50% of its max DSC pixel throughput */ + { OUI(0x90, 0xCC, 0x24), DEVICE_ID('S', 'Y', 'N', 'A', 0x53, 0x22), true, BIT(DP_DPCD_QUIRK_DSC_THROUGHPUT_BPP_LIMIT) }, + { OUI(0x90, 0xCC, 0x24), DEVICE_ID('S', 'Y', 'N', 'A', 0x53, 0x31), true, BIT(DP_DPCD_QUIRK_DSC_THROUGHPUT_BPP_LIMIT) }, + { OUI(0x90, 0xCC, 0x24), DEVICE_ID('S', 'Y', 'N', 'A', 0x53, 0x33), true, BIT(DP_DPCD_QUIRK_DSC_THROUGHPUT_BPP_LIMIT) }, }; #undef OUI @@ -2832,6 +2845,158 @@ int drm_dp_dsc_sink_supported_input_bpcs(const u8 dsc_dpcd[DP_DSC_RECEIVER_CAP_S } EXPORT_SYMBOL(drm_dp_dsc_sink_supported_input_bpcs); +/* + * See DP Standard v2.1a 2.8.4 Minimum Slices/Display, Table 2-159 and + * Appendix L.1 Derivation of Slice Count Requirements. + */ +static int dsc_sink_min_slice_throughput(int peak_pixel_rate) +{ + if (peak_pixel_rate >= 4800000) + return 600000; + else if (peak_pixel_rate >= 2700000) + return 400000; + else + return 340000; +} + +/** + * drm_dp_dsc_sink_max_slice_throughput() - Get a DSC sink's maximum pixel throughput per slice + * @dsc_dpcd: DSC sink's capabilities from DPCD + * @peak_pixel_rate: Cumulative peak pixel rate in kHz + * @is_rgb_yuv444: The mode is either RGB or YUV444 + * + * Return the DSC sink device's maximum pixel throughput per slice, based on + * the device's @dsc_dpcd capabilities, the @peak_pixel_rate of the transferred + * stream(s) and whether the output format @is_rgb_yuv444 or yuv422/yuv420. + * + * Note that @peak_pixel_rate is the total pixel rate transferred to the same + * DSC/display sink. For instance to calculate a tile's slice count of an MST + * multi-tiled display sink (not considering here the required + * rounding/alignment of slice count):: + * + * @peak_pixel_rate = tile_pixel_rate * tile_count + * total_slice_count = @peak_pixel_rate / drm_dp_dsc_sink_max_slice_throughput(@peak_pixel_rate) + * tile_slice_count = total_slice_count / tile_count + * + * Returns: + * The maximum pixel throughput per slice supported by the DSC sink device + * in kPixels/sec. + */ +int drm_dp_dsc_sink_max_slice_throughput(const u8 dsc_dpcd[DP_DSC_RECEIVER_CAP_SIZE], + int peak_pixel_rate, bool is_rgb_yuv444) +{ + int throughput; + int delta = 0; + int base; + + throughput = dsc_dpcd[DP_DSC_PEAK_THROUGHPUT - DP_DSC_SUPPORT]; + + if (is_rgb_yuv444) { + throughput = (throughput & DP_DSC_THROUGHPUT_MODE_0_MASK) >> + DP_DSC_THROUGHPUT_MODE_0_SHIFT; + + delta = ((dsc_dpcd[DP_DSC_RC_BUF_BLK_SIZE - DP_DSC_SUPPORT]) & + DP_DSC_THROUGHPUT_MODE_0_DELTA_MASK) >> + DP_DSC_THROUGHPUT_MODE_0_DELTA_SHIFT; /* in units of 2 MPixels/sec */ + delta *= 2000; + } else { + throughput = (throughput & DP_DSC_THROUGHPUT_MODE_1_MASK) >> + DP_DSC_THROUGHPUT_MODE_1_SHIFT; + } + + switch (throughput) { + case 0: + return dsc_sink_min_slice_throughput(peak_pixel_rate); + case 1: + base = 340000; + break; + case 2 ... 14: + base = 400000 + 50000 * (throughput - 2); + break; + case 15: + base = 170000; + break; + } + + return base + delta; +} +EXPORT_SYMBOL(drm_dp_dsc_sink_max_slice_throughput); + +static u8 dsc_branch_dpcd_cap(const u8 dpcd[DP_DSC_BRANCH_CAP_SIZE], int reg) +{ + return dpcd[reg - DP_DSC_BRANCH_OVERALL_THROUGHPUT_0]; +} + +/** + * drm_dp_dsc_branch_max_overall_throughput() - Branch device's max overall DSC pixel throughput + * @dsc_branch_dpcd: DSC branch capabilities from DPCD + * @is_rgb_yuv444: The mode is either RGB or YUV444 + * + * Return the branch device's maximum overall DSC pixel throughput, based on + * the device's DPCD DSC branch capabilities, and whether the output + * format @is_rgb_yuv444 or yuv422/yuv420. + * + * Returns: + * - 0: The maximum overall throughput capability is not indicated by + * the device separately and it must be determined from the per-slice + * max throughput (see @drm_dp_dsc_branch_slice_max_throughput()) + * and the maximum slice count supported by the device. + * - > 0: The maximum overall DSC pixel throughput supported by the branch + * device in kPixels/sec. + */ +int drm_dp_dsc_branch_max_overall_throughput(const u8 dsc_branch_dpcd[DP_DSC_BRANCH_CAP_SIZE], + bool is_rgb_yuv444) +{ + int throughput; + + if (is_rgb_yuv444) + throughput = dsc_branch_dpcd_cap(dsc_branch_dpcd, + DP_DSC_BRANCH_OVERALL_THROUGHPUT_0); + else + throughput = dsc_branch_dpcd_cap(dsc_branch_dpcd, + DP_DSC_BRANCH_OVERALL_THROUGHPUT_1); + + switch (throughput) { + case 0: + return 0; + case 1: + return 680000; + default: + return 600000 + 50000 * throughput; + } +} +EXPORT_SYMBOL(drm_dp_dsc_branch_max_overall_throughput); + +/** + * drm_dp_dsc_branch_max_line_width() - Branch device's max DSC line width + * @dsc_branch_dpcd: DSC branch capabilities from DPCD + * + * Return the branch device's maximum overall DSC line width, based on + * the device's @dsc_branch_dpcd capabilities. + * + * Returns: + * - 0: The maximum line width is not indicated by the device + * separately and it must be determined from the maximum + * slice count and slice-width supported by the device. + * - %-EINVAL: The device indicates an invalid maximum line width + * (< 5120 pixels). + * - >= 5120: The maximum line width in pixels. + */ +int drm_dp_dsc_branch_max_line_width(const u8 dsc_branch_dpcd[DP_DSC_BRANCH_CAP_SIZE]) +{ + int line_width = dsc_branch_dpcd_cap(dsc_branch_dpcd, DP_DSC_BRANCH_MAX_LINE_WIDTH); + + switch (line_width) { + case 0: + return 0; + case 1 ... 15: + return -EINVAL; + default: + return line_width * 320; + } +} +EXPORT_SYMBOL(drm_dp_dsc_branch_max_line_width); + static int drm_dp_read_lttpr_regs(struct drm_dp_aux *aux, const u8 dpcd[DP_RECEIVER_CAP_SIZE], int address, u8 *buf, int buf_size) @@ -4128,22 +4293,61 @@ drm_edp_backlight_probe_max(struct drm_dp_aux *aux, struct drm_edp_backlight_inf { int fxp, fxp_min, fxp_max, fxp_actual, f = 1; int ret; - u8 pn, pn_min, pn_max; + u8 pn, pn_min, pn_max, bit_count; if (!bl->aux_set) return 0; - ret = drm_dp_dpcd_read_byte(aux, DP_EDP_PWMGEN_BIT_COUNT, &pn); + ret = drm_dp_dpcd_read_byte(aux, DP_EDP_PWMGEN_BIT_COUNT, &bit_count); if (ret < 0) { drm_dbg_kms(aux->drm_dev, "%s: Failed to read pwmgen bit count cap: %d\n", aux->name, ret); return -ENODEV; } - pn &= DP_EDP_PWMGEN_BIT_COUNT_MASK; + bit_count &= DP_EDP_PWMGEN_BIT_COUNT_MASK; + + ret = drm_dp_dpcd_read_byte(aux, DP_EDP_PWMGEN_BIT_COUNT_CAP_MIN, &pn_min); + if (ret < 0) { + drm_dbg_kms(aux->drm_dev, "%s: Failed to read pwmgen bit count cap min: %d\n", + aux->name, ret); + return -ENODEV; + } + pn_min &= DP_EDP_PWMGEN_BIT_COUNT_MASK; + + ret = drm_dp_dpcd_read_byte(aux, DP_EDP_PWMGEN_BIT_COUNT_CAP_MAX, &pn_max); + if (ret < 0) { + drm_dbg_kms(aux->drm_dev, "%s: Failed to read pwmgen bit count cap max: %d\n", + aux->name, ret); + return -ENODEV; + } + pn_max &= DP_EDP_PWMGEN_BIT_COUNT_MASK; + + if (unlikely(pn_min > pn_max)) { + drm_dbg_kms(aux->drm_dev, "%s: Invalid pwmgen bit count cap min/max returned: %d %d\n", + aux->name, pn_min, pn_max); + return -EINVAL; + } + + /* + * Per VESA eDP Spec v1.4b, section 3.3.10.2: + * If DP_EDP_PWMGEN_BIT_COUNT is less than DP_EDP_PWMGEN_BIT_COUNT_CAP_MIN, + * the sink must use the MIN value as the effective PWM bit count. + * Clamp the reported value to the [MIN, MAX] capability range to ensure + * correct brightness scaling on compliant eDP panels. + * Only enable this logic if the [MIN, MAX] range is valid in regard to Spec. + */ + pn = bit_count; + if (bit_count < pn_min) + pn = clamp(bit_count, pn_min, pn_max); + bl->max = (1 << pn) - 1; - if (!driver_pwm_freq_hz) + if (!driver_pwm_freq_hz) { + if (pn != bit_count) + goto bit_count_write_back; + return 0; + } /* * Set PWM Frequency divider to match desired frequency provided by the driver. @@ -4167,21 +4371,6 @@ drm_edp_backlight_probe_max(struct drm_dp_aux *aux, struct drm_edp_backlight_inf * - FxP is within 25% of desired value. * Note: 25% is arbitrary value and may need some tweak. */ - ret = drm_dp_dpcd_read_byte(aux, DP_EDP_PWMGEN_BIT_COUNT_CAP_MIN, &pn_min); - if (ret < 0) { - drm_dbg_kms(aux->drm_dev, "%s: Failed to read pwmgen bit count cap min: %d\n", - aux->name, ret); - return 0; - } - ret = drm_dp_dpcd_read_byte(aux, DP_EDP_PWMGEN_BIT_COUNT_CAP_MAX, &pn_max); - if (ret < 0) { - drm_dbg_kms(aux->drm_dev, "%s: Failed to read pwmgen bit count cap max: %d\n", - aux->name, ret); - return 0; - } - pn_min &= DP_EDP_PWMGEN_BIT_COUNT_MASK; - pn_max &= DP_EDP_PWMGEN_BIT_COUNT_MASK; - /* Ensure frequency is within 25% of desired value */ fxp_min = DIV_ROUND_CLOSEST(fxp * 3, 4); fxp_max = DIV_ROUND_CLOSEST(fxp * 5, 4); @@ -4199,12 +4388,17 @@ drm_edp_backlight_probe_max(struct drm_dp_aux *aux, struct drm_edp_backlight_inf break; } +bit_count_write_back: ret = drm_dp_dpcd_write_byte(aux, DP_EDP_PWMGEN_BIT_COUNT, pn); if (ret < 0) { drm_dbg_kms(aux->drm_dev, "%s: Failed to write aux pwmgen bit count: %d\n", aux->name, ret); return 0; } + + if (!driver_pwm_freq_hz) + return 0; + bl->pwmgen_bit_count = pn; bl->max = (1 << pn) - 1; diff --git a/drivers/gpu/drm/drm_atomic.c b/drivers/gpu/drm/drm_atomic.c index cd15cf52f0c9..67e095e398a3 100644 --- a/drivers/gpu/drm/drm_atomic.c +++ b/drivers/gpu/drm/drm_atomic.c @@ -42,6 +42,7 @@ #include #include #include +#include #include "drm_crtc_internal.h" #include "drm_internal.h" @@ -107,6 +108,7 @@ void drm_atomic_state_default_release(struct drm_atomic_state *state) kfree(state->connectors); kfree(state->crtcs); kfree(state->planes); + kfree(state->colorops); kfree(state->private_objs); } EXPORT_SYMBOL(drm_atomic_state_default_release); @@ -138,6 +140,10 @@ drm_atomic_state_init(struct drm_device *dev, struct drm_atomic_state *state) sizeof(*state->planes), GFP_KERNEL); if (!state->planes) goto fail; + state->colorops = kcalloc(dev->mode_config.num_colorop, + sizeof(*state->colorops), GFP_KERNEL); + if (!state->colorops) + goto fail; /* * Because drm_atomic_state can be committed asynchronously we need our @@ -200,6 +206,8 @@ void drm_atomic_state_default_clear(struct drm_atomic_state *state) drm_dbg_atomic(dev, "Clearing atomic state %p\n", state); + state->checked = false; + for (i = 0; i < state->num_connector; i++) { struct drm_connector *connector = state->connectors[i].ptr; @@ -207,9 +215,9 @@ void drm_atomic_state_default_clear(struct drm_atomic_state *state) continue; connector->funcs->atomic_destroy_state(connector, - state->connectors[i].state); + state->connectors[i].state_to_destroy); state->connectors[i].ptr = NULL; - state->connectors[i].state = NULL; + state->connectors[i].state_to_destroy = NULL; state->connectors[i].old_state = NULL; state->connectors[i].new_state = NULL; drm_connector_put(connector); @@ -222,10 +230,10 @@ void drm_atomic_state_default_clear(struct drm_atomic_state *state) continue; crtc->funcs->atomic_destroy_state(crtc, - state->crtcs[i].state); + state->crtcs[i].state_to_destroy); state->crtcs[i].ptr = NULL; - state->crtcs[i].state = NULL; + state->crtcs[i].state_to_destroy = NULL; state->crtcs[i].old_state = NULL; state->crtcs[i].new_state = NULL; @@ -242,20 +250,34 @@ void drm_atomic_state_default_clear(struct drm_atomic_state *state) continue; plane->funcs->atomic_destroy_state(plane, - state->planes[i].state); + state->planes[i].state_to_destroy); state->planes[i].ptr = NULL; - state->planes[i].state = NULL; + state->planes[i].state_to_destroy = NULL; state->planes[i].old_state = NULL; state->planes[i].new_state = NULL; } + for (i = 0; i < config->num_colorop; i++) { + struct drm_colorop *colorop = state->colorops[i].ptr; + + if (!colorop) + continue; + + drm_colorop_atomic_destroy_state(colorop, + state->colorops[i].state); + state->colorops[i].ptr = NULL; + state->colorops[i].state = NULL; + state->colorops[i].old_state = NULL; + state->colorops[i].new_state = NULL; + } + for (i = 0; i < state->num_private_objs; i++) { struct drm_private_obj *obj = state->private_objs[i].ptr; obj->funcs->atomic_destroy_state(obj, - state->private_objs[i].state); + state->private_objs[i].state_to_destroy); state->private_objs[i].ptr = NULL; - state->private_objs[i].state = NULL; + state->private_objs[i].state_to_destroy = NULL; state->private_objs[i].old_state = NULL; state->private_objs[i].new_state = NULL; } @@ -348,8 +370,9 @@ drm_atomic_get_crtc_state(struct drm_atomic_state *state, struct drm_crtc_state *crtc_state; WARN_ON(!state->acquire_ctx); + drm_WARN_ON(state->dev, state->checked); - crtc_state = drm_atomic_get_existing_crtc_state(state, crtc); + crtc_state = drm_atomic_get_new_crtc_state(state, crtc); if (crtc_state) return crtc_state; @@ -361,7 +384,7 @@ drm_atomic_get_crtc_state(struct drm_atomic_state *state, if (!crtc_state) return ERR_PTR(-ENOMEM); - state->crtcs[index].state = crtc_state; + state->crtcs[index].state_to_destroy = crtc_state; state->crtcs[index].old_state = crtc->state; state->crtcs[index].new_state = crtc_state; state->crtcs[index].ptr = crtc; @@ -480,8 +503,8 @@ static int drm_atomic_connector_check(struct drm_connector *connector, } if (state->crtc) - crtc_state = drm_atomic_get_existing_crtc_state(state->state, - state->crtc); + crtc_state = drm_atomic_get_new_crtc_state(state->state, + state->crtc); if (writeback_job->fb && !crtc_state->active) { drm_dbg_atomic(connector->dev, @@ -528,13 +551,14 @@ drm_atomic_get_plane_state(struct drm_atomic_state *state, struct drm_plane_state *plane_state; WARN_ON(!state->acquire_ctx); + drm_WARN_ON(state->dev, state->checked); /* the legacy pointers should never be set */ WARN_ON(plane->fb); WARN_ON(plane->old_fb); WARN_ON(plane->crtc); - plane_state = drm_atomic_get_existing_plane_state(state, plane); + plane_state = drm_atomic_get_new_plane_state(state, plane); if (plane_state) return plane_state; @@ -546,7 +570,7 @@ drm_atomic_get_plane_state(struct drm_atomic_state *state, if (!plane_state) return ERR_PTR(-ENOMEM); - state->planes[index].state = plane_state; + state->planes[index].state_to_destroy = plane_state; state->planes[index].ptr = plane; state->planes[index].old_state = plane->state; state->planes[index].new_state = plane_state; @@ -568,6 +592,55 @@ drm_atomic_get_plane_state(struct drm_atomic_state *state, } EXPORT_SYMBOL(drm_atomic_get_plane_state); +/** + * drm_atomic_get_colorop_state - get colorop state + * @state: global atomic state object + * @colorop: colorop to get state object for + * + * This function returns the colorop state for the given colorop, allocating it + * if needed. It will also grab the relevant plane lock to make sure that the + * state is consistent. + * + * Returns: + * + * Either the allocated state or the error code encoded into the pointer. When + * the error is EDEADLK then the w/w mutex code has detected a deadlock and the + * entire atomic sequence must be restarted. All other errors are fatal. + */ +struct drm_colorop_state * +drm_atomic_get_colorop_state(struct drm_atomic_state *state, + struct drm_colorop *colorop) +{ + int ret, index = drm_colorop_index(colorop); + struct drm_colorop_state *colorop_state; + + WARN_ON(!state->acquire_ctx); + + colorop_state = drm_atomic_get_new_colorop_state(state, colorop); + if (colorop_state) + return colorop_state; + + ret = drm_modeset_lock(&colorop->plane->mutex, state->acquire_ctx); + if (ret) + return ERR_PTR(ret); + + colorop_state = drm_atomic_helper_colorop_duplicate_state(colorop); + if (!colorop_state) + return ERR_PTR(-ENOMEM); + + state->colorops[index].state = colorop_state; + state->colorops[index].ptr = colorop; + state->colorops[index].old_state = colorop->state; + state->colorops[index].new_state = colorop_state; + colorop_state->state = state; + + drm_dbg_atomic(colorop->dev, "Added [COLOROP:%d:%d] %p state to %p\n", + colorop->base.id, colorop->type, colorop_state, state); + + return colorop_state; +} +EXPORT_SYMBOL(drm_atomic_get_colorop_state); + static bool plane_switching_crtc(const struct drm_plane_state *old_plane_state, const struct drm_plane_state *new_plane_state) @@ -707,6 +780,46 @@ static int drm_atomic_plane_check(const struct drm_plane_state *old_plane_state, return 0; } +static void drm_atomic_colorop_print_state(struct drm_printer *p, + const struct drm_colorop_state *state) +{ + struct drm_colorop *colorop = state->colorop; + + drm_printf(p, "colorop[%u]:\n", colorop->base.id); + drm_printf(p, "\ttype=%s\n", drm_get_colorop_type_name(colorop->type)); + if (colorop->bypass_property) + drm_printf(p, "\tbypass=%u\n", state->bypass); + + switch (colorop->type) { + case DRM_COLOROP_1D_CURVE: + drm_printf(p, "\tcurve_1d_type=%s\n", + drm_get_colorop_curve_1d_type_name(state->curve_1d_type)); + break; + case DRM_COLOROP_1D_LUT: + drm_printf(p, "\tsize=%d\n", colorop->size); + drm_printf(p, "\tinterpolation=%s\n", + drm_get_colorop_lut1d_interpolation_name(colorop->lut1d_interpolation)); + drm_printf(p, "\tdata blob id=%d\n", state->data ? state->data->base.id : 0); + break; + case DRM_COLOROP_CTM_3X4: + drm_printf(p, "\tdata blob id=%d\n", state->data ? state->data->base.id : 0); + break; + case DRM_COLOROP_MULTIPLIER: + drm_printf(p, "\tmultiplier=%llu\n", state->multiplier); + break; + case DRM_COLOROP_3D_LUT: + drm_printf(p, "\tsize=%d\n", colorop->size); + drm_printf(p, "\tinterpolation=%s\n", + drm_get_colorop_lut3d_interpolation_name(colorop->lut3d_interpolation)); + drm_printf(p, "\tdata blob id=%d\n", state->data ? state->data->base.id : 0); + break; + default: + break; + } + + drm_printf(p, "\tnext=%d\n", colorop->next ? colorop->next->base.id : 0); +} + static void drm_atomic_plane_print_state(struct drm_printer *p, const struct drm_plane_state *state) { @@ -728,7 +841,8 @@ static void drm_atomic_plane_print_state(struct drm_printer *p, drm_printf(p, "\tcolor-range=%s\n", drm_get_color_range_name(state->color_range)); drm_printf(p, "\tcolor_mgmt_changed=%d\n", state->color_mgmt_changed); - + drm_printf(p, "\tcolor-pipeline=%d\n", + state->color_pipeline ? state->color_pipeline->base.id : 0); if (plane->funcs->atomic_print_state) plane->funcs->atomic_print_state(p, state); } @@ -831,14 +945,17 @@ struct drm_private_state * drm_atomic_get_private_obj_state(struct drm_atomic_state *state, struct drm_private_obj *obj) { - int index, num_objs, i, ret; + int index, num_objs, ret; size_t size; struct __drm_private_objs_state *arr; struct drm_private_state *obj_state; - for (i = 0; i < state->num_private_objs; i++) - if (obj == state->private_objs[i].ptr) - return state->private_objs[i].state; + WARN_ON(!state->acquire_ctx); + drm_WARN_ON(state->dev, state->checked); + + obj_state = drm_atomic_get_new_private_obj_state(state, obj); + if (obj_state) + return obj_state; ret = drm_modeset_lock(&obj->lock, state->acquire_ctx); if (ret) @@ -858,7 +975,7 @@ drm_atomic_get_private_obj_state(struct drm_atomic_state *state, if (!obj_state) return ERR_PTR(-ENOMEM); - state->private_objs[index].state = obj_state; + state->private_objs[index].state_to_destroy = obj_state; state->private_objs[index].old_state = obj->state; state->private_objs[index].new_state = obj_state; state->private_objs[index].ptr = obj; @@ -1129,6 +1246,7 @@ drm_atomic_get_connector_state(struct drm_atomic_state *state, struct drm_connector_state *connector_state; WARN_ON(!state->acquire_ctx); + drm_WARN_ON(state->dev, state->checked); ret = drm_modeset_lock(&config->connection_mutex, state->acquire_ctx); if (ret) @@ -1152,15 +1270,16 @@ drm_atomic_get_connector_state(struct drm_atomic_state *state, state->num_connector = alloc; } - if (state->connectors[index].state) - return state->connectors[index].state; + connector_state = drm_atomic_get_new_connector_state(state, connector); + if (connector_state) + return connector_state; connector_state = connector->funcs->atomic_duplicate_state(connector); if (!connector_state) return ERR_PTR(-ENOMEM); drm_connector_get(connector); - state->connectors[index].state = connector_state; + state->connectors[index].state_to_destroy = connector_state; state->connectors[index].old_state = connector->state; state->connectors[index].new_state = connector_state; state->connectors[index].ptr = connector; @@ -1308,7 +1427,6 @@ drm_atomic_add_encoder_bridges(struct drm_atomic_state *state, struct drm_encoder *encoder) { struct drm_bridge_state *bridge_state; - struct drm_bridge *bridge; if (!encoder) return 0; @@ -1317,7 +1435,7 @@ drm_atomic_add_encoder_bridges(struct drm_atomic_state *state, "Adding all bridges for [encoder:%d:%s] to %p\n", encoder->base.id, encoder->name, state); - drm_for_each_bridge_in_chain(encoder, bridge) { + drm_for_each_bridge_in_chain_scoped(encoder, bridge) { /* Skip bridges that don't implement the atomic state hooks. */ if (!bridge->funcs->atomic_duplicate_state) continue; @@ -1437,6 +1555,52 @@ drm_atomic_add_affected_planes(struct drm_atomic_state *state, } EXPORT_SYMBOL(drm_atomic_add_affected_planes); +/** + * drm_atomic_add_affected_colorops - add colorops for plane + * @state: atomic state + * @plane: DRM plane + * + * This function walks the current configuration and adds all colorops + * currently used by @plane to the atomic configuration @state. This is useful + * when an atomic commit also needs to check all currently enabled colorop on + * @plane, e.g. when changing the mode. It's also useful when re-enabling a plane + * to avoid special code to force-enable all colorops. + * + * Since acquiring a colorop state will always also acquire the w/w mutex of the + * current plane for that colorop (if there is any) adding all the colorop states for + * a plane will not reduce parallelism of atomic updates. + * + * Returns: + * 0 on success or can fail with -EDEADLK or -ENOMEM. When the error is EDEADLK + * then the w/w mutex code has detected a deadlock and the entire atomic + * sequence must be restarted. All other errors are fatal. + */ +int +drm_atomic_add_affected_colorops(struct drm_atomic_state *state, + struct drm_plane *plane) +{ + struct drm_colorop *colorop; + struct drm_colorop_state *colorop_state; + + WARN_ON(!drm_atomic_get_new_plane_state(state, plane)); + + drm_dbg_atomic(plane->dev, + "Adding all current colorops for [PLANE:%d:%s] to %p\n", + plane->base.id, plane->name, state); + + drm_for_each_colorop(colorop, plane->dev) { + if (colorop->plane != plane) + continue; + + colorop_state = drm_atomic_get_colorop_state(state, colorop); + if (IS_ERR(colorop_state)) + return PTR_ERR(colorop_state); + } + + return 0; +} +EXPORT_SYMBOL(drm_atomic_add_affected_colorops); + /** * drm_atomic_check_only - check whether a given config would work * @state: atomic configuration to check @@ -1541,6 +1705,8 @@ int drm_atomic_check_only(struct drm_atomic_state *state) requested_crtc, affected_crtc); } + state->checked = true; + return 0; } EXPORT_SYMBOL(drm_atomic_check_only); @@ -1833,6 +1999,7 @@ static void __drm_state_dump(struct drm_device *dev, struct drm_printer *p, bool take_locks) { struct drm_mode_config *config = &dev->mode_config; + struct drm_colorop *colorop; struct drm_plane *plane; struct drm_crtc *crtc; struct drm_connector *connector; @@ -1842,6 +2009,14 @@ static void __drm_state_dump(struct drm_device *dev, struct drm_printer *p, if (!drm_drv_uses_atomic_modeset(dev)) return; + list_for_each_entry(colorop, &config->colorop_list, head) { + if (take_locks) + drm_modeset_lock(&colorop->plane->mutex, NULL); + drm_atomic_colorop_print_state(p, colorop->state); + if (take_locks) + drm_modeset_unlock(&colorop->plane->mutex); + } + list_for_each_entry(plane, &config->plane_list, head) { if (take_locks) drm_modeset_lock(&plane->mutex, NULL); diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c index d5ebe6ea0acb..10adac9397cf 100644 --- a/drivers/gpu/drm/drm_atomic_helper.c +++ b/drivers/gpu/drm/drm_atomic_helper.c @@ -1831,10 +1831,12 @@ drm_atomic_helper_wait_for_vblanks(struct drm_device *dev, } for_each_old_crtc_in_state(state, crtc, old_crtc_state, i) { + wait_queue_head_t *queue = drm_crtc_vblank_waitqueue(crtc); + if (!(crtc_mask & drm_crtc_mask(crtc))) continue; - ret = wait_event_timeout(dev->vblank[i].queue, + ret = wait_event_timeout(*queue, state->crtcs[i].last_vblank_count != drm_crtc_vblank_count(crtc), msecs_to_jiffies(100)); @@ -3182,6 +3184,8 @@ int drm_atomic_helper_swap_state(struct drm_atomic_state *state, struct drm_crtc_state *old_crtc_state, *new_crtc_state; struct drm_plane *plane; struct drm_plane_state *old_plane_state, *new_plane_state; + struct drm_colorop *colorop; + struct drm_colorop_state *old_colorop_state, *new_colorop_state; struct drm_crtc_commit *commit; struct drm_private_obj *obj; struct drm_private_state *old_obj_state, *new_obj_state; @@ -3236,7 +3240,7 @@ int drm_atomic_helper_swap_state(struct drm_atomic_state *state, old_conn_state->state = state; new_conn_state->state = NULL; - state->connectors[i].state = old_conn_state; + state->connectors[i].state_to_destroy = old_conn_state; connector->state = new_conn_state; } @@ -3246,7 +3250,7 @@ int drm_atomic_helper_swap_state(struct drm_atomic_state *state, old_crtc_state->state = state; new_crtc_state->state = NULL; - state->crtcs[i].state = old_crtc_state; + state->crtcs[i].state_to_destroy = old_crtc_state; crtc->state = new_crtc_state; if (new_crtc_state->commit) { @@ -3259,6 +3263,16 @@ int drm_atomic_helper_swap_state(struct drm_atomic_state *state, } } + for_each_oldnew_colorop_in_state(state, colorop, old_colorop_state, new_colorop_state, i) { + WARN_ON(colorop->state != old_colorop_state); + + old_colorop_state->state = state; + new_colorop_state->state = NULL; + + state->colorops[i].state = old_colorop_state; + colorop->state = new_colorop_state; + } + drm_panic_lock(state->dev, flags); for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) { WARN_ON(plane->state != old_plane_state); @@ -3266,7 +3280,7 @@ int drm_atomic_helper_swap_state(struct drm_atomic_state *state, old_plane_state->state = state; new_plane_state->state = NULL; - state->planes[i].state = old_plane_state; + state->planes[i].state_to_destroy = old_plane_state; plane->state = new_plane_state; } drm_panic_unlock(state->dev, flags); @@ -3277,7 +3291,7 @@ int drm_atomic_helper_swap_state(struct drm_atomic_state *state, old_obj_state->state = state; new_obj_state->state = NULL; - state->private_objs[i].state = old_obj_state; + state->private_objs[i].state_to_destroy = old_obj_state; obj->state = new_obj_state; } diff --git a/drivers/gpu/drm/drm_atomic_state_helper.c b/drivers/gpu/drm/drm_atomic_state_helper.c index 7142e163e618..cee6d8fc44ad 100644 --- a/drivers/gpu/drm/drm_atomic_state_helper.c +++ b/drivers/gpu/drm/drm_atomic_state_helper.c @@ -268,6 +268,11 @@ void __drm_atomic_helper_plane_state_reset(struct drm_plane_state *plane_state, plane_state->color_range = val; } + if (plane->color_pipeline_property) { + /* default is always NULL, i.e., bypass */ + plane_state->color_pipeline = NULL; + } + if (plane->zpos_property) { if (!drm_object_property_get_default_value(&plane->base, plane->zpos_property, diff --git a/drivers/gpu/drm/drm_atomic_uapi.c b/drivers/gpu/drm/drm_atomic_uapi.c index 85dbdaa4a2e2..7320db4b8489 100644 --- a/drivers/gpu/drm/drm_atomic_uapi.c +++ b/drivers/gpu/drm/drm_atomic_uapi.c @@ -35,6 +35,7 @@ #include #include #include +#include #include #include @@ -257,6 +258,34 @@ drm_atomic_set_fb_for_plane(struct drm_plane_state *plane_state, } EXPORT_SYMBOL(drm_atomic_set_fb_for_plane); +/** + * drm_atomic_set_colorop_for_plane - set colorop for plane + * @plane_state: atomic state object for the plane + * @colorop: colorop to use for the plane + * + * Helper function to select the color pipeline on a plane by setting + * it to the first drm_colorop element of the pipeline. + */ +void +drm_atomic_set_colorop_for_plane(struct drm_plane_state *plane_state, + struct drm_colorop *colorop) +{ + struct drm_plane *plane = plane_state->plane; + + if (colorop) + drm_dbg_atomic(plane->dev, + "Set [COLOROP:%d] for [PLANE:%d:%s] state %p\n", + colorop->base.id, plane->base.id, plane->name, + plane_state); + else + drm_dbg_atomic(plane->dev, + "Set [NOCOLOROP] for [PLANE:%d:%s] state %p\n", + plane->base.id, plane->name, plane_state); + + plane_state->color_pipeline = colorop; +} +EXPORT_SYMBOL(drm_atomic_set_colorop_for_plane); + /** * drm_atomic_set_crtc_for_connector - set CRTC for connector * @conn_state: atomic state object for the connector @@ -419,6 +448,8 @@ static int drm_atomic_crtc_set_property(struct drm_crtc *crtc, set_out_fence_for_crtc(state->state, crtc, fence_ptr); } else if (property == crtc->scaling_filter_property) { state->scaling_filter = val; + } else if (property == crtc->sharpness_strength_property) { + state->sharpness_strength = val; } else if (crtc->funcs->atomic_set_property) { return crtc->funcs->atomic_set_property(crtc, state, property, val); } else { @@ -456,6 +487,8 @@ drm_atomic_crtc_get_property(struct drm_crtc *crtc, *val = 0; else if (property == crtc->scaling_filter_property) *val = state->scaling_filter; + else if (property == crtc->sharpness_strength_property) + *val = state->sharpness_strength; else if (crtc->funcs->atomic_get_property) return crtc->funcs->atomic_get_property(crtc, state, property, val); else { @@ -540,6 +573,16 @@ static int drm_atomic_plane_set_property(struct drm_plane *plane, state->color_encoding = val; } else if (property == plane->color_range_property) { state->color_range = val; + } else if (property == plane->color_pipeline_property) { + /* find DRM colorop object */ + struct drm_colorop *colorop = NULL; + + colorop = drm_colorop_find(dev, file_priv, val); + + if (val && !colorop) + return -EACCES; + + drm_atomic_set_colorop_for_plane(state, colorop); } else if (property == config->prop_fb_damage_clips) { ret = drm_property_replace_blob_from_id(dev, &state->fb_damage_clips, @@ -622,6 +665,8 @@ drm_atomic_plane_get_property(struct drm_plane *plane, *val = state->color_encoding; } else if (property == plane->color_range_property) { *val = state->color_range; + } else if (property == plane->color_pipeline_property) { + *val = (state->color_pipeline) ? state->color_pipeline->base.id : 0; } else if (property == config->prop_fb_damage_clips) { *val = (state->fb_damage_clips) ? state->fb_damage_clips->base.id : 0; @@ -644,6 +689,96 @@ drm_atomic_plane_get_property(struct drm_plane *plane, return 0; } +static int drm_atomic_color_set_data_property(struct drm_colorop *colorop, + struct drm_colorop_state *state, + struct drm_property *property, + uint64_t val) +{ + ssize_t elem_size = -1; + ssize_t size = -1; + bool replaced = false; + + switch (colorop->type) { + case DRM_COLOROP_1D_LUT: + size = colorop->size * sizeof(struct drm_color_lut32); + break; + case DRM_COLOROP_CTM_3X4: + size = sizeof(struct drm_color_ctm_3x4); + break; + case DRM_COLOROP_3D_LUT: + size = colorop->size * colorop->size * colorop->size * + sizeof(struct drm_color_lut32); + break; + default: + /* should never get here */ + return -EINVAL; + } + + return drm_property_replace_blob_from_id(colorop->dev, + &state->data, + val, + size, + elem_size, + &replaced); +} + +static int drm_atomic_colorop_set_property(struct drm_colorop *colorop, + struct drm_colorop_state *state, + struct drm_file *file_priv, + struct drm_property *property, + uint64_t val) +{ + if (property == colorop->bypass_property) { + state->bypass = val; + } else if (property == colorop->lut1d_interpolation_property) { + colorop->lut1d_interpolation = val; + } else if (property == colorop->curve_1d_type_property) { + state->curve_1d_type = val; + } else if (property == colorop->multiplier_property) { + state->multiplier = val; + } else if (property == colorop->lut3d_interpolation_property) { + colorop->lut3d_interpolation = val; + } else if (property == colorop->data_property) { + return drm_atomic_color_set_data_property(colorop, state, + property, val); + } else { + drm_dbg_atomic(colorop->dev, + "[COLOROP:%d:%d] unknown property [PROP:%d:%s]\n", + colorop->base.id, colorop->type, + property->base.id, property->name); + return -EINVAL; + } + + return 0; +} + +static int +drm_atomic_colorop_get_property(struct drm_colorop *colorop, + const struct drm_colorop_state *state, + struct drm_property *property, uint64_t *val) +{ + if (property == colorop->type_property) + *val = colorop->type; + else if (property == colorop->bypass_property) + *val = state->bypass; + else if (property == colorop->lut1d_interpolation_property) + *val = colorop->lut1d_interpolation; + else if (property == colorop->curve_1d_type_property) + *val = state->curve_1d_type; + else if (property == colorop->multiplier_property) + *val = state->multiplier; + else if (property == colorop->size_property) + *val = colorop->size; + else if (property == colorop->lut3d_interpolation_property) + *val = colorop->lut3d_interpolation; + else if (property == colorop->data_property) + *val = (state->data) ? state->data->base.id : 0; + else + return -EINVAL; + + return 0; +} + static int drm_atomic_set_writeback_fb_for_connector( struct drm_connector_state *conn_state, struct drm_framebuffer *fb) @@ -910,6 +1045,15 @@ int drm_atomic_get_property(struct drm_mode_object *obj, plane->state, property, val); break; } + case DRM_MODE_OBJECT_COLOROP: { + struct drm_colorop *colorop = obj_to_colorop(obj); + + if (colorop->plane) + WARN_ON(!drm_modeset_is_locked(&colorop->plane->mutex)); + + ret = drm_atomic_colorop_get_property(colorop, colorop->state, property, val); + break; + } default: drm_dbg_atomic(dev, "[OBJECT:%d] has no properties\n", obj->id); ret = -EINVAL; @@ -1107,6 +1251,21 @@ int drm_atomic_set_property(struct drm_atomic_state *state, ret = drm_atomic_plane_set_property(plane, plane_state, file_priv, prop, prop_value); + + break; + } + case DRM_MODE_OBJECT_COLOROP: { + struct drm_colorop *colorop = obj_to_colorop(obj); + struct drm_colorop_state *colorop_state; + + colorop_state = drm_atomic_get_colorop_state(state, colorop); + if (IS_ERR(colorop_state)) { + ret = PTR_ERR(colorop_state); + break; + } + + ret = drm_atomic_colorop_set_property(colorop, colorop_state, + file_priv, prop, prop_value); break; } default: @@ -1446,6 +1605,7 @@ int drm_mode_atomic_ioctl(struct drm_device *dev, drm_modeset_acquire_init(&ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE); state->acquire_ctx = &ctx; state->allow_modeset = !!(arg->flags & DRM_MODE_ATOMIC_ALLOW_MODESET); + state->plane_color_pipeline = file_priv->plane_color_pipeline; retry: copied_objs = 0; diff --git a/drivers/gpu/drm/drm_bridge.c b/drivers/gpu/drm/drm_bridge.c index d031447eebc9..8f355df883d8 100644 --- a/drivers/gpu/drm/drm_bridge.c +++ b/drivers/gpu/drm/drm_bridge.c @@ -197,15 +197,22 @@ * driver. */ +/* Protect bridge_list and bridge_lingering_list */ static DEFINE_MUTEX(bridge_lock); static LIST_HEAD(bridge_list); +static LIST_HEAD(bridge_lingering_list); static void __drm_bridge_free(struct kref *kref) { struct drm_bridge *bridge = container_of(kref, struct drm_bridge, refcount); + mutex_lock(&bridge_lock); + list_del(&bridge->list); + mutex_unlock(&bridge_lock); + if (bridge->funcs->destroy) bridge->funcs->destroy(bridge); + kfree(bridge->container); } @@ -273,6 +280,7 @@ void *__devm_drm_bridge_alloc(struct device *dev, size_t size, size_t offset, return ERR_PTR(-ENOMEM); bridge = container + offset; + INIT_LIST_HEAD(&bridge->list); bridge->container = container; bridge->funcs = funcs; kref_init(&bridge->refcount); @@ -286,10 +294,13 @@ void *__devm_drm_bridge_alloc(struct device *dev, size_t size, size_t offset, EXPORT_SYMBOL(__devm_drm_bridge_alloc); /** - * drm_bridge_add - add the given bridge to the global bridge list + * drm_bridge_add - register a bridge * * @bridge: bridge control structure * + * Add the given bridge to the global list of bridges, where they can be + * found by users via of_drm_find_bridge(). + * * The bridge to be added must have been allocated by * devm_drm_bridge_alloc(). */ @@ -300,6 +311,14 @@ void drm_bridge_add(struct drm_bridge *bridge) drm_bridge_get(bridge); + /* + * If the bridge was previously added and then removed, it is now + * in bridge_lingering_list. Remove it or bridge_lingering_list will be + * corrupted when adding this bridge to bridge_list below. + */ + if (!list_empty(&bridge->list)) + list_del_init(&bridge->list); + mutex_init(&bridge->hpd_mutex); if (bridge->ops & DRM_BRIDGE_OP_HDMI) @@ -336,14 +355,19 @@ int devm_drm_bridge_add(struct device *dev, struct drm_bridge *bridge) EXPORT_SYMBOL(devm_drm_bridge_add); /** - * drm_bridge_remove - remove the given bridge from the global bridge list + * drm_bridge_remove - unregister a bridge * * @bridge: bridge control structure + * + * Remove the given bridge from the global list of registered bridges, so + * it won't be found by users via of_drm_find_bridge(), and add it to the + * lingering bridge list, to keep track of it until its allocated memory is + * eventually freed. */ void drm_bridge_remove(struct drm_bridge *bridge) { mutex_lock(&bridge_lock); - list_del_init(&bridge->list); + list_move_tail(&bridge->list, &bridge_lingering_list); mutex_unlock(&bridge_lock); mutex_destroy(&bridge->hpd_mutex); @@ -398,6 +422,9 @@ static bool drm_bridge_is_atomic(struct drm_bridge *bridge) * If non-NULL the previous bridge must be already attached by a call to this * function. * + * The bridge to be attached must have been previously added by + * drm_bridge_add(). + * * Note that bridges attached to encoders are auto-detached during encoder * cleanup in drm_encoder_cleanup(), so drm_bridge_attach() should generally * *not* be balanced with a drm_bridge_detach() in driver code. @@ -414,6 +441,12 @@ int drm_bridge_attach(struct drm_encoder *encoder, struct drm_bridge *bridge, if (!encoder || !bridge) return -EINVAL; + if (!bridge->container) + DRM_WARN("DRM bridge corrupted or not allocated by devm_drm_bridge_alloc()\n"); + + if (list_empty(&bridge->list)) + DRM_WARN("Missing drm_bridge_add() before attach\n"); + drm_bridge_get(bridge); if (previous && (!previous->dev || previous->encoder != encoder)) { @@ -1062,12 +1095,12 @@ drm_atomic_bridge_chain_select_bus_fmts(struct drm_bridge *bridge, struct drm_encoder *encoder = bridge->encoder; struct drm_bridge_state *last_bridge_state; unsigned int i, num_out_bus_fmts = 0; - struct drm_bridge *last_bridge; u32 *out_bus_fmts; int ret = 0; - last_bridge = list_last_entry(&encoder->bridge_chain, - struct drm_bridge, chain_node); + struct drm_bridge *last_bridge __free(drm_bridge_put) = + drm_bridge_get(list_last_entry(&encoder->bridge_chain, + struct drm_bridge, chain_node)); last_bridge_state = drm_atomic_get_new_bridge_state(crtc_state->state, last_bridge); @@ -1121,7 +1154,6 @@ drm_atomic_bridge_propagate_bus_flags(struct drm_bridge *bridge, struct drm_atomic_state *state) { struct drm_bridge_state *bridge_state, *next_bridge_state; - struct drm_bridge *next_bridge; u32 output_flags = 0; bridge_state = drm_atomic_get_new_bridge_state(state, bridge); @@ -1130,7 +1162,7 @@ drm_atomic_bridge_propagate_bus_flags(struct drm_bridge *bridge, if (!bridge_state) return; - next_bridge = drm_bridge_get_next_bridge(bridge); + struct drm_bridge *next_bridge __free(drm_bridge_put) = drm_bridge_get_next_bridge(bridge); /* * Let's try to apply the most common case here, that is, propagate @@ -1432,17 +1464,20 @@ EXPORT_SYMBOL(devm_drm_put_bridge); static void drm_bridge_debugfs_show_bridge(struct drm_printer *p, struct drm_bridge *bridge, - unsigned int idx) + unsigned int idx, + bool lingering) { drm_printf(p, "bridge[%u]: %ps\n", idx, bridge->funcs); - drm_printf(p, "\trefcount: %u\n", kref_read(&bridge->refcount)); + drm_printf(p, "\trefcount: %u%s\n", kref_read(&bridge->refcount), + lingering ? " [lingering]" : ""); drm_printf(p, "\ttype: [%d] %s\n", bridge->type, drm_get_connector_type_name(bridge->type)); - if (bridge->of_node) + /* The OF node could be freed after drm_bridge_remove() */ + if (bridge->of_node && !lingering) drm_printf(p, "\tOF: %pOFfc\n", bridge->of_node); drm_printf(p, "\tops: [0x%x]", bridge->ops); @@ -1468,7 +1503,10 @@ static int allbridges_show(struct seq_file *m, void *data) mutex_lock(&bridge_lock); list_for_each_entry(bridge, &bridge_list, list) - drm_bridge_debugfs_show_bridge(&p, bridge, idx++); + drm_bridge_debugfs_show_bridge(&p, bridge, idx++, false); + + list_for_each_entry(bridge, &bridge_lingering_list, list) + drm_bridge_debugfs_show_bridge(&p, bridge, idx++, true); mutex_unlock(&bridge_lock); @@ -1480,11 +1518,10 @@ static int encoder_bridges_show(struct seq_file *m, void *data) { struct drm_encoder *encoder = m->private; struct drm_printer p = drm_seq_file_printer(m); - struct drm_bridge *bridge; unsigned int idx = 0; - drm_for_each_bridge_in_chain(encoder, bridge) - drm_bridge_debugfs_show_bridge(&p, bridge, idx++); + drm_for_each_bridge_in_chain_scoped(encoder, bridge) + drm_bridge_debugfs_show_bridge(&p, bridge, idx++, false); return 0; } diff --git a/drivers/gpu/drm/drm_buddy.c b/drivers/gpu/drm/drm_buddy.c index a94061f373de..2f279b46bd2c 100644 --- a/drivers/gpu/drm/drm_buddy.c +++ b/drivers/gpu/drm/drm_buddy.c @@ -11,9 +11,19 @@ #include #include +#include + +enum drm_buddy_free_tree { + DRM_BUDDY_CLEAR_TREE = 0, + DRM_BUDDY_DIRTY_TREE, + DRM_BUDDY_MAX_FREE_TREES, +}; static struct kmem_cache *slab_blocks; +#define for_each_free_tree(tree) \ + for ((tree) = 0; (tree) < DRM_BUDDY_MAX_FREE_TREES; (tree)++) + static struct drm_buddy_block *drm_block_alloc(struct drm_buddy *mm, struct drm_buddy_block *parent, unsigned int order, @@ -31,6 +41,8 @@ static struct drm_buddy_block *drm_block_alloc(struct drm_buddy *mm, block->header |= order; block->parent = parent; + RB_CLEAR_NODE(&block->rb); + BUG_ON(block->header & DRM_BUDDY_HEADER_UNUSED); return block; } @@ -41,23 +53,64 @@ static void drm_block_free(struct drm_buddy *mm, kmem_cache_free(slab_blocks, block); } -static void list_insert_sorted(struct drm_buddy *mm, - struct drm_buddy_block *block) +static enum drm_buddy_free_tree +get_block_tree(struct drm_buddy_block *block) { - struct drm_buddy_block *node; - struct list_head *head; + return drm_buddy_block_is_clear(block) ? + DRM_BUDDY_CLEAR_TREE : DRM_BUDDY_DIRTY_TREE; +} - head = &mm->free_list[drm_buddy_block_order(block)]; - if (list_empty(head)) { - list_add(&block->link, head); - return; - } +static struct drm_buddy_block * +rbtree_get_free_block(const struct rb_node *node) +{ + return node ? rb_entry(node, struct drm_buddy_block, rb) : NULL; +} - list_for_each_entry(node, head, link) - if (drm_buddy_block_offset(block) < drm_buddy_block_offset(node)) - break; +static struct drm_buddy_block * +rbtree_last_free_block(struct rb_root *root) +{ + return rbtree_get_free_block(rb_last(root)); +} - __list_add(&block->link, node->link.prev, &node->link); +static bool rbtree_is_empty(struct rb_root *root) +{ + return RB_EMPTY_ROOT(root); +} + +static bool drm_buddy_block_offset_less(const struct drm_buddy_block *block, + const struct drm_buddy_block *node) +{ + return drm_buddy_block_offset(block) < drm_buddy_block_offset(node); +} + +static bool rbtree_block_offset_less(struct rb_node *block, + const struct rb_node *node) +{ + return drm_buddy_block_offset_less(rbtree_get_free_block(block), + rbtree_get_free_block(node)); +} + +static void rbtree_insert(struct drm_buddy *mm, + struct drm_buddy_block *block, + enum drm_buddy_free_tree tree) +{ + rb_add(&block->rb, + &mm->free_trees[tree][drm_buddy_block_order(block)], + rbtree_block_offset_less); +} + +static void rbtree_remove(struct drm_buddy *mm, + struct drm_buddy_block *block) +{ + unsigned int order = drm_buddy_block_order(block); + enum drm_buddy_free_tree tree; + struct rb_root *root; + + tree = get_block_tree(block); + root = &mm->free_trees[tree][order]; + + rb_erase(&block->rb, root); + RB_CLEAR_NODE(&block->rb); } static void clear_reset(struct drm_buddy_block *block) @@ -70,29 +123,34 @@ static void mark_cleared(struct drm_buddy_block *block) block->header |= DRM_BUDDY_HEADER_CLEAR; } -static void mark_allocated(struct drm_buddy_block *block) +static void mark_allocated(struct drm_buddy *mm, + struct drm_buddy_block *block) { block->header &= ~DRM_BUDDY_HEADER_STATE; block->header |= DRM_BUDDY_ALLOCATED; - list_del(&block->link); + rbtree_remove(mm, block); } static void mark_free(struct drm_buddy *mm, struct drm_buddy_block *block) { + enum drm_buddy_free_tree tree; + block->header &= ~DRM_BUDDY_HEADER_STATE; block->header |= DRM_BUDDY_FREE; - list_insert_sorted(mm, block); + tree = get_block_tree(block); + rbtree_insert(mm, block, tree); } -static void mark_split(struct drm_buddy_block *block) +static void mark_split(struct drm_buddy *mm, + struct drm_buddy_block *block) { block->header &= ~DRM_BUDDY_HEADER_STATE; block->header |= DRM_BUDDY_SPLIT; - list_del(&block->link); + rbtree_remove(mm, block); } static inline bool overlaps(u64 s1, u64 e1, u64 s2, u64 e2) @@ -148,7 +206,7 @@ static unsigned int __drm_buddy_free(struct drm_buddy *mm, mark_cleared(parent); } - list_del(&buddy->link); + rbtree_remove(mm, buddy); if (force_merge && drm_buddy_block_is_clear(buddy)) mm->clear_avail -= drm_buddy_block_size(mm, buddy); @@ -169,7 +227,7 @@ static int __force_merge(struct drm_buddy *mm, u64 end, unsigned int min_order) { - unsigned int order; + unsigned int tree, order; int i; if (!min_order) @@ -178,44 +236,48 @@ static int __force_merge(struct drm_buddy *mm, if (min_order > mm->max_order) return -EINVAL; - for (i = min_order - 1; i >= 0; i--) { - struct drm_buddy_block *block, *prev; + for_each_free_tree(tree) { + for (i = min_order - 1; i >= 0; i--) { + struct rb_node *iter = rb_last(&mm->free_trees[tree][i]); - list_for_each_entry_safe_reverse(block, prev, &mm->free_list[i], link) { - struct drm_buddy_block *buddy; - u64 block_start, block_end; + while (iter) { + struct drm_buddy_block *block, *buddy; + u64 block_start, block_end; - if (!block->parent) - continue; + block = rbtree_get_free_block(iter); + iter = rb_prev(iter); - block_start = drm_buddy_block_offset(block); - block_end = block_start + drm_buddy_block_size(mm, block) - 1; + if (!block || !block->parent) + continue; - if (!contains(start, end, block_start, block_end)) - continue; + block_start = drm_buddy_block_offset(block); + block_end = block_start + drm_buddy_block_size(mm, block) - 1; - buddy = __get_buddy(block); - if (!drm_buddy_block_is_free(buddy)) - continue; + if (!contains(start, end, block_start, block_end)) + continue; - WARN_ON(drm_buddy_block_is_clear(block) == - drm_buddy_block_is_clear(buddy)); + buddy = __get_buddy(block); + if (!drm_buddy_block_is_free(buddy)) + continue; - /* - * If the prev block is same as buddy, don't access the - * block in the next iteration as we would free the - * buddy block as part of the free function. - */ - if (prev == buddy) - prev = list_prev_entry(prev, link); + WARN_ON(drm_buddy_block_is_clear(block) == + drm_buddy_block_is_clear(buddy)); - list_del(&block->link); - if (drm_buddy_block_is_clear(block)) - mm->clear_avail -= drm_buddy_block_size(mm, block); + /* + * Advance to the next node when the current node is the buddy, + * as freeing the block will also remove its buddy from the tree. + */ + if (iter == &buddy->rb) + iter = rb_prev(iter); - order = __drm_buddy_free(mm, block, true); - if (order >= min_order) - return 0; + rbtree_remove(mm, block); + if (drm_buddy_block_is_clear(block)) + mm->clear_avail -= drm_buddy_block_size(mm, block); + + order = __drm_buddy_free(mm, block, true); + if (order >= min_order) + return 0; + } } } @@ -236,8 +298,8 @@ static int __force_merge(struct drm_buddy *mm, */ int drm_buddy_init(struct drm_buddy *mm, u64 size, u64 chunk_size) { - unsigned int i; - u64 offset; + unsigned int i, j, root_count = 0; + u64 offset = 0; if (size < chunk_size) return -EINVAL; @@ -258,14 +320,22 @@ int drm_buddy_init(struct drm_buddy *mm, u64 size, u64 chunk_size) BUG_ON(mm->max_order > DRM_BUDDY_MAX_ORDER); - mm->free_list = kmalloc_array(mm->max_order + 1, - sizeof(struct list_head), - GFP_KERNEL); - if (!mm->free_list) + mm->free_trees = kmalloc_array(DRM_BUDDY_MAX_FREE_TREES, + sizeof(*mm->free_trees), + GFP_KERNEL); + if (!mm->free_trees) return -ENOMEM; - for (i = 0; i <= mm->max_order; ++i) - INIT_LIST_HEAD(&mm->free_list[i]); + for_each_free_tree(i) { + mm->free_trees[i] = kmalloc_array(mm->max_order + 1, + sizeof(struct rb_root), + GFP_KERNEL); + if (!mm->free_trees[i]) + goto out_free_tree; + + for (j = 0; j <= mm->max_order; ++j) + mm->free_trees[i][j] = RB_ROOT; + } mm->n_roots = hweight64(size); @@ -273,10 +343,7 @@ int drm_buddy_init(struct drm_buddy *mm, u64 size, u64 chunk_size) sizeof(struct drm_buddy_block *), GFP_KERNEL); if (!mm->roots) - goto out_free_list; - - offset = 0; - i = 0; + goto out_free_tree; /* * Split into power-of-two blocks, in case we are given a size that is @@ -296,24 +363,26 @@ int drm_buddy_init(struct drm_buddy *mm, u64 size, u64 chunk_size) mark_free(mm, root); - BUG_ON(i > mm->max_order); + BUG_ON(root_count > mm->max_order); BUG_ON(drm_buddy_block_size(mm, root) < chunk_size); - mm->roots[i] = root; + mm->roots[root_count] = root; offset += root_size; size -= root_size; - i++; + root_count++; } while (size); return 0; out_free_roots: - while (i--) - drm_block_free(mm, mm->roots[i]); + while (root_count--) + drm_block_free(mm, mm->roots[root_count]); kfree(mm->roots); -out_free_list: - kfree(mm->free_list); +out_free_tree: + while (i--) + kfree(mm->free_trees[i]); + kfree(mm->free_trees); return -ENOMEM; } EXPORT_SYMBOL(drm_buddy_init); @@ -323,7 +392,7 @@ EXPORT_SYMBOL(drm_buddy_init); * * @mm: DRM buddy manager to free * - * Cleanup memory manager resources and the freelist + * Cleanup memory manager resources and the freetree */ void drm_buddy_fini(struct drm_buddy *mm) { @@ -349,8 +418,9 @@ void drm_buddy_fini(struct drm_buddy *mm) WARN_ON(mm->avail != mm->size); + for_each_free_tree(i) + kfree(mm->free_trees[i]); kfree(mm->roots); - kfree(mm->free_list); } EXPORT_SYMBOL(drm_buddy_fini); @@ -374,8 +444,7 @@ static int split_block(struct drm_buddy *mm, return -ENOMEM; } - mark_free(mm, block->left); - mark_free(mm, block->right); + mark_split(mm, block); if (drm_buddy_block_is_clear(block)) { mark_cleared(block->left); @@ -383,7 +452,8 @@ static int split_block(struct drm_buddy *mm, clear_reset(block); } - mark_split(block); + mark_free(mm, block->left); + mark_free(mm, block->right); return 0; } @@ -412,10 +482,11 @@ EXPORT_SYMBOL(drm_get_buddy); * @is_clear: blocks clear state * * Reset the clear state based on @is_clear value for each block - * in the freelist. + * in the freetree. */ void drm_buddy_reset_clear(struct drm_buddy *mm, bool is_clear) { + enum drm_buddy_free_tree src_tree, dst_tree; u64 root_size, size, start; unsigned int order; int i; @@ -430,19 +501,24 @@ void drm_buddy_reset_clear(struct drm_buddy *mm, bool is_clear) size -= root_size; } - for (i = 0; i <= mm->max_order; ++i) { - struct drm_buddy_block *block; + src_tree = is_clear ? DRM_BUDDY_DIRTY_TREE : DRM_BUDDY_CLEAR_TREE; + dst_tree = is_clear ? DRM_BUDDY_CLEAR_TREE : DRM_BUDDY_DIRTY_TREE; - list_for_each_entry_reverse(block, &mm->free_list[i], link) { - if (is_clear != drm_buddy_block_is_clear(block)) { - if (is_clear) { - mark_cleared(block); - mm->clear_avail += drm_buddy_block_size(mm, block); - } else { - clear_reset(block); - mm->clear_avail -= drm_buddy_block_size(mm, block); - } + for (i = 0; i <= mm->max_order; ++i) { + struct rb_root *root = &mm->free_trees[src_tree][i]; + struct drm_buddy_block *block, *tmp; + + rbtree_postorder_for_each_entry_safe(block, tmp, root, rb) { + rbtree_remove(mm, block); + if (is_clear) { + mark_cleared(block); + mm->clear_avail += drm_buddy_block_size(mm, block); + } else { + clear_reset(block); + mm->clear_avail -= drm_buddy_block_size(mm, block); } + + rbtree_insert(mm, block, dst_tree); } } } @@ -632,23 +708,17 @@ __drm_buddy_alloc_range_bias(struct drm_buddy *mm, } static struct drm_buddy_block * -get_maxblock(struct drm_buddy *mm, unsigned int order, - unsigned long flags) +get_maxblock(struct drm_buddy *mm, + unsigned int order, + enum drm_buddy_free_tree tree) { struct drm_buddy_block *max_block = NULL, *block = NULL; + struct rb_root *root; unsigned int i; for (i = order; i <= mm->max_order; ++i) { - struct drm_buddy_block *tmp_block; - - list_for_each_entry_reverse(tmp_block, &mm->free_list[i], link) { - if (block_incompatible(tmp_block, flags)) - continue; - - block = tmp_block; - break; - } - + root = &mm->free_trees[tree][i]; + block = rbtree_last_free_block(root); if (!block) continue; @@ -667,46 +737,44 @@ get_maxblock(struct drm_buddy *mm, unsigned int order, } static struct drm_buddy_block * -alloc_from_freelist(struct drm_buddy *mm, +alloc_from_freetree(struct drm_buddy *mm, unsigned int order, unsigned long flags) { struct drm_buddy_block *block = NULL; + struct rb_root *root; + enum drm_buddy_free_tree tree; unsigned int tmp; int err; + tree = (flags & DRM_BUDDY_CLEAR_ALLOCATION) ? + DRM_BUDDY_CLEAR_TREE : DRM_BUDDY_DIRTY_TREE; + if (flags & DRM_BUDDY_TOPDOWN_ALLOCATION) { - block = get_maxblock(mm, order, flags); + block = get_maxblock(mm, order, tree); if (block) /* Store the obtained block order */ tmp = drm_buddy_block_order(block); } else { for (tmp = order; tmp <= mm->max_order; ++tmp) { - struct drm_buddy_block *tmp_block; - - list_for_each_entry_reverse(tmp_block, &mm->free_list[tmp], link) { - if (block_incompatible(tmp_block, flags)) - continue; - - block = tmp_block; - break; - } - + /* Get RB tree root for this order and tree */ + root = &mm->free_trees[tree][tmp]; + block = rbtree_last_free_block(root); if (block) break; } } if (!block) { - /* Fallback method */ + /* Try allocating from the other tree */ + tree = (tree == DRM_BUDDY_CLEAR_TREE) ? + DRM_BUDDY_DIRTY_TREE : DRM_BUDDY_CLEAR_TREE; + for (tmp = order; tmp <= mm->max_order; ++tmp) { - if (!list_empty(&mm->free_list[tmp])) { - block = list_last_entry(&mm->free_list[tmp], - struct drm_buddy_block, - link); - if (block) - break; - } + root = &mm->free_trees[tree][tmp]; + block = rbtree_last_free_block(root); + if (block) + break; } if (!block) @@ -771,7 +839,7 @@ static int __alloc_range(struct drm_buddy *mm, if (contains(start, end, block_start, block_end)) { if (drm_buddy_block_is_free(block)) { - mark_allocated(block); + mark_allocated(mm, block); total_allocated += drm_buddy_block_size(mm, block); mm->avail -= drm_buddy_block_size(mm, block); if (drm_buddy_block_is_clear(block)) @@ -849,10 +917,9 @@ static int __alloc_contig_try_harder(struct drm_buddy *mm, { u64 rhs_offset, lhs_offset, lhs_size, filled; struct drm_buddy_block *block; - struct list_head *list; + unsigned int tree, order; LIST_HEAD(blocks_lhs); unsigned long pages; - unsigned int order; u64 modify_size; int err; @@ -862,35 +929,45 @@ static int __alloc_contig_try_harder(struct drm_buddy *mm, if (order == 0) return -ENOSPC; - list = &mm->free_list[order]; - if (list_empty(list)) - return -ENOSPC; + for_each_free_tree(tree) { + struct rb_root *root; + struct rb_node *iter; - list_for_each_entry_reverse(block, list, link) { - /* Allocate blocks traversing RHS */ - rhs_offset = drm_buddy_block_offset(block); - err = __drm_buddy_alloc_range(mm, rhs_offset, size, - &filled, blocks); - if (!err || err != -ENOSPC) - return err; + root = &mm->free_trees[tree][order]; + if (rbtree_is_empty(root)) + continue; - lhs_size = max((size - filled), min_block_size); - if (!IS_ALIGNED(lhs_size, min_block_size)) - lhs_size = round_up(lhs_size, min_block_size); + iter = rb_last(root); + while (iter) { + block = rbtree_get_free_block(iter); - /* Allocate blocks traversing LHS */ - lhs_offset = drm_buddy_block_offset(block) - lhs_size; - err = __drm_buddy_alloc_range(mm, lhs_offset, lhs_size, - NULL, &blocks_lhs); - if (!err) { - list_splice(&blocks_lhs, blocks); - return 0; - } else if (err != -ENOSPC) { + /* Allocate blocks traversing RHS */ + rhs_offset = drm_buddy_block_offset(block); + err = __drm_buddy_alloc_range(mm, rhs_offset, size, + &filled, blocks); + if (!err || err != -ENOSPC) + return err; + + lhs_size = max((size - filled), min_block_size); + if (!IS_ALIGNED(lhs_size, min_block_size)) + lhs_size = round_up(lhs_size, min_block_size); + + /* Allocate blocks traversing LHS */ + lhs_offset = drm_buddy_block_offset(block) - lhs_size; + err = __drm_buddy_alloc_range(mm, lhs_offset, lhs_size, + NULL, &blocks_lhs); + if (!err) { + list_splice(&blocks_lhs, blocks); + return 0; + } else if (err != -ENOSPC) { + drm_buddy_free_list_internal(mm, blocks); + return err; + } + /* Free blocks for the next iteration */ drm_buddy_free_list_internal(mm, blocks); - return err; + + iter = rb_prev(iter); } - /* Free blocks for the next iteration */ - drm_buddy_free_list_internal(mm, blocks); } return -ENOSPC; @@ -976,7 +1053,7 @@ int drm_buddy_block_trim(struct drm_buddy *mm, list_add(&block->tmp_link, &dfs); err = __alloc_range(mm, &dfs, new_start, new_size, blocks, NULL); if (err) { - mark_allocated(block); + mark_allocated(mm, block); mm->avail -= drm_buddy_block_size(mm, block); if (drm_buddy_block_is_clear(block)) mm->clear_avail -= drm_buddy_block_size(mm, block); @@ -999,8 +1076,8 @@ __drm_buddy_alloc_blocks(struct drm_buddy *mm, return __drm_buddy_alloc_range_bias(mm, start, end, order, flags); else - /* Allocate from freelist */ - return alloc_from_freelist(mm, order, flags); + /* Allocate from freetree */ + return alloc_from_freetree(mm, order, flags); } /** @@ -1017,8 +1094,8 @@ __drm_buddy_alloc_blocks(struct drm_buddy *mm, * alloc_range_bias() called on range limitations, which traverses * the tree and returns the desired block. * - * alloc_from_freelist() called when *no* range restrictions - * are enforced, which picks the block from the freelist. + * alloc_from_freetree() called when *no* range restrictions + * are enforced, which picks the block from the freetree. * * Returns: * 0 on success, error code on failure. @@ -1120,7 +1197,7 @@ int drm_buddy_alloc_blocks(struct drm_buddy *mm, } } while (1); - mark_allocated(block); + mark_allocated(mm, block); mm->avail -= drm_buddy_block_size(mm, block); if (drm_buddy_block_is_clear(block)) mm->clear_avail -= drm_buddy_block_size(mm, block); @@ -1201,12 +1278,18 @@ void drm_buddy_print(struct drm_buddy *mm, struct drm_printer *p) mm->chunk_size >> 10, mm->size >> 20, mm->avail >> 20, mm->clear_avail >> 20); for (order = mm->max_order; order >= 0; order--) { - struct drm_buddy_block *block; + struct drm_buddy_block *block, *tmp; + struct rb_root *root; u64 count = 0, free; + unsigned int tree; - list_for_each_entry(block, &mm->free_list[order], link) { - BUG_ON(!drm_buddy_block_is_free(block)); - count++; + for_each_free_tree(tree) { + root = &mm->free_trees[tree][order]; + + rbtree_postorder_for_each_entry_safe(block, tmp, root, rb) { + BUG_ON(!drm_buddy_block_is_free(block)); + count++; + } } drm_printf(p, "order-%2d ", order); diff --git a/drivers/gpu/drm/drm_client.c b/drivers/gpu/drm/drm_client.c index 3fa38d4ac70b..a82d741e6630 100644 --- a/drivers/gpu/drm/drm_client.c +++ b/drivers/gpu/drm/drm_client.c @@ -11,12 +11,14 @@ #include #include +#include #include #include #include #include #include #include +#include #include #include @@ -168,29 +170,59 @@ void drm_client_release(struct drm_client_dev *client) drm_client_modeset_free(client); drm_client_close(client); + + if (client->funcs && client->funcs->free) + client->funcs->free(client); + drm_dev_put(dev); } EXPORT_SYMBOL(drm_client_release); -static void drm_client_buffer_delete(struct drm_client_buffer *buffer) +/** + * drm_client_buffer_delete - Delete a client buffer + * @buffer: DRM client buffer + */ +void drm_client_buffer_delete(struct drm_client_buffer *buffer) { - if (buffer->gem) { - drm_gem_vunmap(buffer->gem, &buffer->map); - drm_gem_object_put(buffer->gem); - } + struct drm_gem_object *gem; + int ret; + + if (!buffer) + return; + + gem = buffer->fb->obj[0]; + drm_gem_vunmap(gem, &buffer->map); + + ret = drm_mode_rmfb(buffer->client->dev, buffer->fb->base.id, buffer->client->file); + if (ret) + drm_err(buffer->client->dev, + "Error removing FB:%u (%d)\n", buffer->fb->base.id, ret); + + drm_gem_object_put(buffer->gem); kfree(buffer); } +EXPORT_SYMBOL(drm_client_buffer_delete); static struct drm_client_buffer * drm_client_buffer_create(struct drm_client_dev *client, u32 width, u32 height, - u32 format, u32 *handle) + u32 format, u32 handle, u32 pitch) { - const struct drm_format_info *info = drm_format_info(format); - struct drm_mode_create_dumb dumb_args = { }; + struct drm_mode_fb_cmd2 fb_req = { + .width = width, + .height = height, + .pixel_format = format, + .handles = { + handle, + }, + .pitches = { + pitch, + }, + }; struct drm_device *dev = client->dev; struct drm_client_buffer *buffer; struct drm_gem_object *obj; + struct drm_framebuffer *fb; int ret; buffer = kzalloc(sizeof(*buffer), GFP_KERNEL); @@ -199,28 +231,38 @@ drm_client_buffer_create(struct drm_client_dev *client, u32 width, u32 height, buffer->client = client; - dumb_args.width = width; - dumb_args.height = height; - dumb_args.bpp = drm_format_info_bpp(info, 0); - ret = drm_mode_create_dumb(dev, &dumb_args, client->file); - if (ret) - goto err_delete; - - obj = drm_gem_object_lookup(client->file, dumb_args.handle); + obj = drm_gem_object_lookup(client->file, handle); if (!obj) { ret = -ENOENT; goto err_delete; } - buffer->pitch = dumb_args.pitch; + ret = drm_mode_addfb2(dev, &fb_req, client->file); + if (ret) + goto err_drm_gem_object_put; + + fb = drm_framebuffer_lookup(dev, client->file, fb_req.fb_id); + if (drm_WARN_ON(dev, !fb)) { + ret = -ENOENT; + goto err_drm_mode_rmfb; + } + + /* drop the reference we picked up in framebuffer lookup */ + drm_framebuffer_put(fb); + + strscpy(fb->comm, client->name, TASK_COMM_LEN); + buffer->gem = obj; - *handle = dumb_args.handle; + buffer->fb = fb; return buffer; +err_drm_mode_rmfb: + drm_mode_rmfb(dev, fb_req.fb_id, client->file); +err_drm_gem_object_put: + drm_gem_object_put(obj); err_delete: - drm_client_buffer_delete(buffer); - + kfree(buffer); return ERR_PTR(ret); } @@ -247,7 +289,7 @@ drm_client_buffer_create(struct drm_client_dev *client, u32 width, u32 height, int drm_client_buffer_vmap_local(struct drm_client_buffer *buffer, struct iosys_map *map_copy) { - struct drm_gem_object *gem = buffer->gem; + struct drm_gem_object *gem = buffer->fb->obj[0]; struct iosys_map *map = &buffer->map; int ret; @@ -276,7 +318,7 @@ EXPORT_SYMBOL(drm_client_buffer_vmap_local); */ void drm_client_buffer_vunmap_local(struct drm_client_buffer *buffer) { - struct drm_gem_object *gem = buffer->gem; + struct drm_gem_object *gem = buffer->fb->obj[0]; struct iosys_map *map = &buffer->map; drm_gem_vunmap_locked(gem, map); @@ -307,9 +349,10 @@ EXPORT_SYMBOL(drm_client_buffer_vunmap_local); int drm_client_buffer_vmap(struct drm_client_buffer *buffer, struct iosys_map *map_copy) { + struct drm_gem_object *gem = buffer->fb->obj[0]; int ret; - ret = drm_gem_vmap(buffer->gem, &buffer->map); + ret = drm_gem_vmap(gem, &buffer->map); if (ret) return ret; *map_copy = buffer->map; @@ -328,57 +371,14 @@ EXPORT_SYMBOL(drm_client_buffer_vmap); */ void drm_client_buffer_vunmap(struct drm_client_buffer *buffer) { - drm_gem_vunmap(buffer->gem, &buffer->map); + struct drm_gem_object *gem = buffer->fb->obj[0]; + + drm_gem_vunmap(gem, &buffer->map); } EXPORT_SYMBOL(drm_client_buffer_vunmap); -static void drm_client_buffer_rmfb(struct drm_client_buffer *buffer) -{ - int ret; - - if (!buffer->fb) - return; - - ret = drm_mode_rmfb(buffer->client->dev, buffer->fb->base.id, buffer->client->file); - if (ret) - drm_err(buffer->client->dev, - "Error removing FB:%u (%d)\n", buffer->fb->base.id, ret); - - buffer->fb = NULL; -} - -static int drm_client_buffer_addfb(struct drm_client_buffer *buffer, - u32 width, u32 height, u32 format, - u32 handle) -{ - struct drm_client_dev *client = buffer->client; - struct drm_mode_fb_cmd2 fb_req = { }; - int ret; - - fb_req.width = width; - fb_req.height = height; - fb_req.pixel_format = format; - fb_req.handles[0] = handle; - fb_req.pitches[0] = buffer->pitch; - - ret = drm_mode_addfb2(client->dev, &fb_req, client->file); - if (ret) - return ret; - - buffer->fb = drm_framebuffer_lookup(client->dev, buffer->client->file, fb_req.fb_id); - if (WARN_ON(!buffer->fb)) - return -ENOENT; - - /* drop the reference we picked up in framebuffer lookup */ - drm_framebuffer_put(buffer->fb); - - strscpy(buffer->fb->comm, client->name, TASK_COMM_LEN); - - return 0; -} - /** - * drm_client_framebuffer_create - Create a client framebuffer + * drm_client_buffer_create_dumb - Create a client buffer backed by a dumb buffer * @client: DRM client * @width: Framebuffer width * @height: Framebuffer height @@ -386,24 +386,33 @@ static int drm_client_buffer_addfb(struct drm_client_buffer *buffer, * * This function creates a &drm_client_buffer which consists of a * &drm_framebuffer backed by a dumb buffer. - * Call drm_client_framebuffer_delete() to free the buffer. + * Call drm_client_buffer_delete() to free the buffer. * * Returns: * Pointer to a client buffer or an error pointer on failure. */ struct drm_client_buffer * -drm_client_framebuffer_create(struct drm_client_dev *client, u32 width, u32 height, u32 format) +drm_client_buffer_create_dumb(struct drm_client_dev *client, u32 width, u32 height, u32 format) { + const struct drm_format_info *info = drm_format_info(format); + struct drm_device *dev = client->dev; + struct drm_mode_create_dumb dumb_args = { }; struct drm_client_buffer *buffer; - u32 handle; int ret; - buffer = drm_client_buffer_create(client, width, height, format, - &handle); - if (IS_ERR(buffer)) - return buffer; + dumb_args.width = width; + dumb_args.height = height; + dumb_args.bpp = drm_format_info_bpp(info, 0); + ret = drm_mode_create_dumb(dev, &dumb_args, client->file); + if (ret) + return ERR_PTR(ret); - ret = drm_client_buffer_addfb(buffer, width, height, format, handle); + buffer = drm_client_buffer_create(client, width, height, format, + dumb_args.handle, dumb_args.pitch); + if (IS_ERR(buffer)) { + ret = PTR_ERR(buffer); + goto err_drm_mode_destroy_dumb; + } /* * The handle is only needed for creating the framebuffer, destroy it @@ -411,34 +420,19 @@ drm_client_framebuffer_create(struct drm_client_dev *client, u32 width, u32 heig * object as DMA-buf. The framebuffer and our buffer structure are still * holding references to the GEM object to prevent its destruction. */ - drm_mode_destroy_dumb(client->dev, handle, client->file); - - if (ret) { - drm_client_buffer_delete(buffer); - return ERR_PTR(ret); - } + drm_mode_destroy_dumb(client->dev, dumb_args.handle, client->file); return buffer; + +err_drm_mode_destroy_dumb: + drm_mode_destroy_dumb(client->dev, dumb_args.handle, client->file); + return ERR_PTR(ret); } -EXPORT_SYMBOL(drm_client_framebuffer_create); +EXPORT_SYMBOL(drm_client_buffer_create_dumb); /** - * drm_client_framebuffer_delete - Delete a client framebuffer - * @buffer: DRM client buffer (can be NULL) - */ -void drm_client_framebuffer_delete(struct drm_client_buffer *buffer) -{ - if (!buffer) - return; - - drm_client_buffer_rmfb(buffer); - drm_client_buffer_delete(buffer); -} -EXPORT_SYMBOL(drm_client_framebuffer_delete); - -/** - * drm_client_framebuffer_flush - Manually flush client framebuffer - * @buffer: DRM client buffer (can be NULL) + * drm_client_buffer_flush - Manually flush client buffer + * @buffer: DRM client buffer * @rect: Damage rectangle (if NULL flushes all) * * This calls &drm_framebuffer_funcs->dirty (if present) to flush buffer changes @@ -447,7 +441,7 @@ EXPORT_SYMBOL(drm_client_framebuffer_delete); * Returns: * Zero on success or negative error code on failure. */ -int drm_client_framebuffer_flush(struct drm_client_buffer *buffer, struct drm_rect *rect) +int drm_client_buffer_flush(struct drm_client_buffer *buffer, struct drm_rect *rect) { if (!buffer || !buffer->fb || !buffer->fb->funcs->dirty) return 0; @@ -467,4 +461,4 @@ int drm_client_framebuffer_flush(struct drm_client_buffer *buffer, struct drm_re return buffer->fb->funcs->dirty(buffer->fb, buffer->client->file, 0, 0, NULL, 0); } -EXPORT_SYMBOL(drm_client_framebuffer_flush); +EXPORT_SYMBOL(drm_client_buffer_flush); diff --git a/drivers/gpu/drm/drm_client_event.c b/drivers/gpu/drm/drm_client_event.c index c83196ad8b59..7b3e362f7926 100644 --- a/drivers/gpu/drm/drm_client_event.c +++ b/drivers/gpu/drm/drm_client_event.c @@ -39,12 +39,13 @@ void drm_client_dev_unregister(struct drm_device *dev) mutex_lock(&dev->clientlist_mutex); list_for_each_entry_safe(client, tmp, &dev->clientlist, list) { list_del(&client->list); - if (client->funcs && client->funcs->unregister) { + /* + * Unregistering consumes and frees the client. + */ + if (client->funcs && client->funcs->unregister) client->funcs->unregister(client); - } else { + else drm_client_release(client); - kfree(client); - } } mutex_unlock(&dev->clientlist_mutex); } @@ -101,7 +102,7 @@ void drm_client_dev_hotplug(struct drm_device *dev) } EXPORT_SYMBOL(drm_client_dev_hotplug); -void drm_client_dev_restore(struct drm_device *dev) +void drm_client_dev_restore(struct drm_device *dev, bool force) { struct drm_client_dev *client; int ret; @@ -114,7 +115,7 @@ void drm_client_dev_restore(struct drm_device *dev) if (!client->funcs || !client->funcs->restore) continue; - ret = client->funcs->restore(client); + ret = client->funcs->restore(client, force); drm_dbg_kms(dev, "%s: ret=%d\n", client->name, ret); if (!ret) /* The first one to return zero gets the privilege to restore */ break; @@ -122,7 +123,7 @@ void drm_client_dev_restore(struct drm_device *dev) mutex_unlock(&dev->clientlist_mutex); } -static int drm_client_suspend(struct drm_client_dev *client, bool holds_console_lock) +static int drm_client_suspend(struct drm_client_dev *client) { struct drm_device *dev = client->dev; int ret = 0; @@ -131,7 +132,7 @@ static int drm_client_suspend(struct drm_client_dev *client, bool holds_console_ return 0; if (client->funcs && client->funcs->suspend) - ret = client->funcs->suspend(client, holds_console_lock); + ret = client->funcs->suspend(client); drm_dbg_kms(dev, "%s: ret=%d\n", client->name, ret); client->suspended = true; @@ -139,20 +140,20 @@ static int drm_client_suspend(struct drm_client_dev *client, bool holds_console_ return ret; } -void drm_client_dev_suspend(struct drm_device *dev, bool holds_console_lock) +void drm_client_dev_suspend(struct drm_device *dev) { struct drm_client_dev *client; mutex_lock(&dev->clientlist_mutex); list_for_each_entry(client, &dev->clientlist, list) { if (!client->suspended) - drm_client_suspend(client, holds_console_lock); + drm_client_suspend(client); } mutex_unlock(&dev->clientlist_mutex); } EXPORT_SYMBOL(drm_client_dev_suspend); -static int drm_client_resume(struct drm_client_dev *client, bool holds_console_lock) +static int drm_client_resume(struct drm_client_dev *client) { struct drm_device *dev = client->dev; int ret = 0; @@ -161,7 +162,7 @@ static int drm_client_resume(struct drm_client_dev *client, bool holds_console_l return 0; if (client->funcs && client->funcs->resume) - ret = client->funcs->resume(client, holds_console_lock); + ret = client->funcs->resume(client); drm_dbg_kms(dev, "%s: ret=%d\n", client->name, ret); client->suspended = false; @@ -172,14 +173,14 @@ static int drm_client_resume(struct drm_client_dev *client, bool holds_console_l return ret; } -void drm_client_dev_resume(struct drm_device *dev, bool holds_console_lock) +void drm_client_dev_resume(struct drm_device *dev) { struct drm_client_dev *client; mutex_lock(&dev->clientlist_mutex); list_for_each_entry(client, &dev->clientlist, list) { if (client->suspended) - drm_client_resume(client, holds_console_lock); + drm_client_resume(client); } mutex_unlock(&dev->clientlist_mutex); } diff --git a/drivers/gpu/drm/drm_client_modeset.c b/drivers/gpu/drm/drm_client_modeset.c index 9c2c3b0c8c47..fc4caf7da5fc 100644 --- a/drivers/gpu/drm/drm_client_modeset.c +++ b/drivers/gpu/drm/drm_client_modeset.c @@ -1293,6 +1293,50 @@ int drm_client_modeset_dpms(struct drm_client_dev *client, int mode) } EXPORT_SYMBOL(drm_client_modeset_dpms); +/** + * drm_client_modeset_wait_for_vblank() - Wait for the next VBLANK to occur + * @client: DRM client + * @crtc_index: The ndex of the CRTC to wait on + * + * Block the caller until the given CRTC has seen a VBLANK. Do nothing + * if the CRTC is disabled. If there's another DRM master present, fail + * with -EBUSY. + * + * Returns: + * 0 on success, or negative error code otherwise. + */ +int drm_client_modeset_wait_for_vblank(struct drm_client_dev *client, unsigned int crtc_index) +{ + struct drm_device *dev = client->dev; + struct drm_crtc *crtc; + int ret; + + /* + * Rate-limit update frequency to vblank. If there's a DRM master + * present, it could interfere while we're waiting for the vblank + * event. Don't wait in this case. + */ + if (!drm_master_internal_acquire(dev)) + return -EBUSY; + + crtc = client->modesets[crtc_index].crtc; + + /* + * Only wait for a vblank event if the CRTC is enabled, otherwise + * just don't do anything, not even report an error. + */ + ret = drm_crtc_vblank_get(crtc); + if (!ret) { + drm_crtc_wait_one_vblank(crtc); + drm_crtc_vblank_put(crtc); + } + + drm_master_internal_release(dev); + + return 0; +} +EXPORT_SYMBOL(drm_client_modeset_wait_for_vblank); + #ifdef CONFIG_DRM_KUNIT_TEST #include "tests/drm_client_modeset_test.c" #endif diff --git a/drivers/gpu/drm/drm_client_sysrq.c b/drivers/gpu/drm/drm_client_sysrq.c new file mode 100644 index 000000000000..eea660096f1b --- /dev/null +++ b/drivers/gpu/drm/drm_client_sysrq.c @@ -0,0 +1,65 @@ +// SPDX-License-Identifier: GPL-2.0 or MIT + +#include + +#include +#include +#include + +#include "drm_internal.h" + +#ifdef CONFIG_MAGIC_SYSRQ +static LIST_HEAD(drm_client_sysrq_dev_list); +static DEFINE_MUTEX(drm_client_sysrq_dev_lock); + +/* emergency restore, don't bother with error reporting */ +static void drm_client_sysrq_restore_work_fn(struct work_struct *ignored) +{ + struct drm_device *dev; + + guard(mutex)(&drm_client_sysrq_dev_lock); + + list_for_each_entry(dev, &drm_client_sysrq_dev_list, client_sysrq_list) { + if (dev->switch_power_state == DRM_SWITCH_POWER_OFF) + continue; + + drm_client_dev_restore(dev, true); + } +} + +static DECLARE_WORK(drm_client_sysrq_restore_work, drm_client_sysrq_restore_work_fn); + +static void drm_client_sysrq_restore_handler(u8 ignored) +{ + schedule_work(&drm_client_sysrq_restore_work); +} + +static const struct sysrq_key_op drm_client_sysrq_restore_op = { + .handler = drm_client_sysrq_restore_handler, + .help_msg = "force-fb(v)", + .action_msg = "Restore framebuffer console", +}; + +void drm_client_sysrq_register(struct drm_device *dev) +{ + guard(mutex)(&drm_client_sysrq_dev_lock); + + if (list_empty(&drm_client_sysrq_dev_list)) + register_sysrq_key('v', &drm_client_sysrq_restore_op); + + list_add(&dev->client_sysrq_list, &drm_client_sysrq_dev_list); +} + +void drm_client_sysrq_unregister(struct drm_device *dev) +{ + guard(mutex)(&drm_client_sysrq_dev_lock); + + /* remove device from global restore list */ + if (!drm_WARN_ON(dev, list_empty(&dev->client_sysrq_list))) + list_del(&dev->client_sysrq_list); + + /* no devices left; unregister key */ + if (list_empty(&drm_client_sysrq_dev_list)) + unregister_sysrq_key('v', &drm_client_sysrq_restore_op); +} +#endif diff --git a/drivers/gpu/drm/drm_color_mgmt.c b/drivers/gpu/drm/drm_color_mgmt.c index 131c1c9ae92f..c598b99673fc 100644 --- a/drivers/gpu/drm/drm_color_mgmt.c +++ b/drivers/gpu/drm/drm_color_mgmt.c @@ -874,3 +874,46 @@ void drm_crtc_fill_palette_8(struct drm_crtc *crtc, drm_crtc_set_lut_func set_pa fill_palette_8(crtc, i, set_palette); } EXPORT_SYMBOL(drm_crtc_fill_palette_8); + +/** + * drm_color_lut32_check - check validity of extended lookup table + * @lut: property blob containing extended LUT to check + * @tests: bitmask of tests to run + * + * Helper to check whether a userspace-provided extended lookup table is valid and + * satisfies hardware requirements. Drivers pass a bitmask indicating which of + * the tests in &drm_color_lut_tests should be performed. + * + * Returns 0 on success, -EINVAL on failure. + */ +int drm_color_lut32_check(const struct drm_property_blob *lut, u32 tests) +{ + const struct drm_color_lut32 *entry; + int i; + + if (!lut || !tests) + return 0; + + entry = lut->data; + for (i = 0; i < drm_color_lut32_size(lut); i++) { + if (tests & DRM_COLOR_LUT_EQUAL_CHANNELS) { + if (entry[i].red != entry[i].blue || + entry[i].red != entry[i].green) { + DRM_DEBUG_KMS("All LUT entries must have equal r/g/b\n"); + return -EINVAL; + } + } + + if (i > 0 && tests & DRM_COLOR_LUT_NON_DECREASING) { + if (entry[i].red < entry[i - 1].red || + entry[i].green < entry[i - 1].green || + entry[i].blue < entry[i - 1].blue) { + DRM_DEBUG_KMS("LUT entries must never decrease.\n"); + return -EINVAL; + } + } + } + + return 0; +} +EXPORT_SYMBOL(drm_color_lut32_check); diff --git a/drivers/gpu/drm/drm_colorop.c b/drivers/gpu/drm/drm_colorop.c new file mode 100644 index 000000000000..44eb823585d2 --- /dev/null +++ b/drivers/gpu/drm/drm_colorop.c @@ -0,0 +1,599 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright (C) 2023 Advanced Micro Devices, Inc. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#include +#include +#include +#include + +#include "drm_crtc_internal.h" + +/** + * DOC: overview + * + * When userspace signals the &DRM_CLIENT_CAP_PLANE_COLOR_PIPELINE it + * should use the COLOR_PIPELINE plane property and associated colorops + * for any color operation on the &drm_plane. Setting of all old color + * properties, such as COLOR_ENCODING and COLOR_RANGE, will be rejected + * and the values of the properties will be ignored. + * + * Colorops are only advertised and valid for atomic drivers and atomic + * userspace that signals the &DRM_CLIENT_CAP_PLANE_COLOR_PIPELINE + * client cap. + * + * A colorop represents a single color operation. Colorops are chained + * via the NEXT property and make up color pipelines. Color pipelines + * are advertised and selected via the COLOR_PIPELINE &drm_plane + * property. + * + * A colorop will be of a certain type, advertised by the read-only TYPE + * property. Each type of colorop will advertise a different set of + * properties and is programmed in a different manner. Types can be + * enumerated 1D curves, 1D LUTs, 3D LUTs, matrices, etc. See the + * &drm_colorop_type documentation for information on each type. + * + * If a colorop advertises the BYPASS property it can be bypassed. + * + * Information about colorop and color pipeline design decisions can be + * found at rfc/color_pipeline.rst, but note that this document will + * grow stale over time. + */ + +static const struct drm_prop_enum_list drm_colorop_type_enum_list[] = { + { DRM_COLOROP_1D_CURVE, "1D Curve" }, + { DRM_COLOROP_1D_LUT, "1D LUT" }, + { DRM_COLOROP_CTM_3X4, "3x4 Matrix"}, + { DRM_COLOROP_MULTIPLIER, "Multiplier"}, + { DRM_COLOROP_3D_LUT, "3D LUT"}, +}; + +static const char * const colorop_curve_1d_type_names[] = { + [DRM_COLOROP_1D_CURVE_SRGB_EOTF] = "sRGB EOTF", + [DRM_COLOROP_1D_CURVE_SRGB_INV_EOTF] = "sRGB Inverse EOTF", + [DRM_COLOROP_1D_CURVE_PQ_125_EOTF] = "PQ 125 EOTF", + [DRM_COLOROP_1D_CURVE_PQ_125_INV_EOTF] = "PQ 125 Inverse EOTF", + [DRM_COLOROP_1D_CURVE_BT2020_INV_OETF] = "BT.2020 Inverse OETF", + [DRM_COLOROP_1D_CURVE_BT2020_OETF] = "BT.2020 OETF", + [DRM_COLOROP_1D_CURVE_GAMMA22] = "Gamma 2.2", + [DRM_COLOROP_1D_CURVE_GAMMA22_INV] = "Gamma 2.2 Inverse", +}; + +static const struct drm_prop_enum_list drm_colorop_lut1d_interpolation_list[] = { + { DRM_COLOROP_LUT1D_INTERPOLATION_LINEAR, "Linear" }, +}; + + +static const struct drm_prop_enum_list drm_colorop_lut3d_interpolation_list[] = { + { DRM_COLOROP_LUT3D_INTERPOLATION_TETRAHEDRAL, "Tetrahedral" }, +}; + +/* Init Helpers */ + +static int drm_plane_colorop_init(struct drm_device *dev, struct drm_colorop *colorop, + struct drm_plane *plane, enum drm_colorop_type type, + uint32_t flags) +{ + struct drm_mode_config *config = &dev->mode_config; + struct drm_property *prop; + int ret = 0; + + ret = drm_mode_object_add(dev, &colorop->base, DRM_MODE_OBJECT_COLOROP); + if (ret) + return ret; + + colorop->base.properties = &colorop->properties; + colorop->dev = dev; + colorop->type = type; + colorop->plane = plane; + colorop->next = NULL; + + list_add_tail(&colorop->head, &config->colorop_list); + colorop->index = config->num_colorop++; + + /* add properties */ + + /* type */ + prop = drm_property_create_enum(dev, + DRM_MODE_PROP_IMMUTABLE, + "TYPE", drm_colorop_type_enum_list, + ARRAY_SIZE(drm_colorop_type_enum_list)); + + if (!prop) + return -ENOMEM; + + colorop->type_property = prop; + + drm_object_attach_property(&colorop->base, + colorop->type_property, + colorop->type); + + if (flags & DRM_COLOROP_FLAG_ALLOW_BYPASS) { + /* bypass */ + prop = drm_property_create_bool(dev, DRM_MODE_PROP_ATOMIC, + "BYPASS"); + if (!prop) + return -ENOMEM; + + colorop->bypass_property = prop; + drm_object_attach_property(&colorop->base, + colorop->bypass_property, + 1); + } + + /* next */ + prop = drm_property_create_object(dev, DRM_MODE_PROP_IMMUTABLE | DRM_MODE_PROP_ATOMIC, + "NEXT", DRM_MODE_OBJECT_COLOROP); + if (!prop) + return -ENOMEM; + colorop->next_property = prop; + drm_object_attach_property(&colorop->base, + colorop->next_property, + 0); + + return ret; +} + +/** + * drm_colorop_cleanup - Cleanup a drm_colorop object in color_pipeline + * + * @colorop: The drm_colorop object to be cleaned + */ +void drm_colorop_cleanup(struct drm_colorop *colorop) +{ + struct drm_device *dev = colorop->dev; + struct drm_mode_config *config = &dev->mode_config; + + list_del(&colorop->head); + config->num_colorop--; + + if (colorop->state && colorop->state->data) { + drm_property_blob_put(colorop->state->data); + colorop->state->data = NULL; + } + + kfree(colorop->state); +} +EXPORT_SYMBOL(drm_colorop_cleanup); + +/** + * drm_colorop_pipeline_destroy - Helper for color pipeline destruction + * + * @dev: - The drm_device containing the drm_planes with the color_pipelines + * + * Provides a default color pipeline destroy handler for drm_device. + */ +void drm_colorop_pipeline_destroy(struct drm_device *dev) +{ + struct drm_mode_config *config = &dev->mode_config; + struct drm_colorop *colorop, *next; + + list_for_each_entry_safe(colorop, next, &config->colorop_list, head) { + drm_colorop_cleanup(colorop); + kfree(colorop); + } +} +EXPORT_SYMBOL(drm_colorop_pipeline_destroy); + +/** + * drm_plane_colorop_curve_1d_init - Initialize a DRM_COLOROP_1D_CURVE + * + * @dev: DRM device + * @colorop: The drm_colorop object to initialize + * @plane: The associated drm_plane + * @supported_tfs: A bitfield of supported drm_plane_colorop_curve_1d_init enum values, + * created using BIT(curve_type) and combined with the OR '|' + * operator. + * @flags: bitmask of misc, see DRM_COLOROP_FLAG_* defines. + * @return zero on success, -E value on failure + */ +int drm_plane_colorop_curve_1d_init(struct drm_device *dev, struct drm_colorop *colorop, + struct drm_plane *plane, u64 supported_tfs, uint32_t flags) +{ + struct drm_prop_enum_list enum_list[DRM_COLOROP_1D_CURVE_COUNT]; + int i, len; + + struct drm_property *prop; + int ret; + + if (!supported_tfs) { + drm_err(dev, + "No supported TFs for new 1D curve colorop on [PLANE:%d:%s]\n", + plane->base.id, plane->name); + return -EINVAL; + } + + if ((supported_tfs & -BIT(DRM_COLOROP_1D_CURVE_COUNT)) != 0) { + drm_err(dev, "Unknown TF provided on [PLANE:%d:%s]\n", + plane->base.id, plane->name); + return -EINVAL; + } + + ret = drm_plane_colorop_init(dev, colorop, plane, DRM_COLOROP_1D_CURVE, flags); + if (ret) + return ret; + + len = 0; + for (i = 0; i < DRM_COLOROP_1D_CURVE_COUNT; i++) { + if ((supported_tfs & BIT(i)) == 0) + continue; + + enum_list[len].type = i; + enum_list[len].name = colorop_curve_1d_type_names[i]; + len++; + } + + if (WARN_ON(len <= 0)) + return -EINVAL; + + /* initialize 1D curve only attribute */ + prop = drm_property_create_enum(dev, DRM_MODE_PROP_ATOMIC, "CURVE_1D_TYPE", + enum_list, len); + + if (!prop) + return -ENOMEM; + + colorop->curve_1d_type_property = prop; + drm_object_attach_property(&colorop->base, colorop->curve_1d_type_property, + enum_list[0].type); + drm_colorop_reset(colorop); + + return 0; +} +EXPORT_SYMBOL(drm_plane_colorop_curve_1d_init); + +static int drm_colorop_create_data_prop(struct drm_device *dev, struct drm_colorop *colorop) +{ + struct drm_property *prop; + + /* data */ + prop = drm_property_create(dev, DRM_MODE_PROP_ATOMIC | DRM_MODE_PROP_BLOB, + "DATA", 0); + if (!prop) + return -ENOMEM; + + colorop->data_property = prop; + drm_object_attach_property(&colorop->base, + colorop->data_property, + 0); + + return 0; +} + +/** + * drm_plane_colorop_curve_1d_lut_init - Initialize a DRM_COLOROP_1D_LUT + * + * @dev: DRM device + * @colorop: The drm_colorop object to initialize + * @plane: The associated drm_plane + * @lut_size: LUT size supported by driver + * @interpolation: 1D LUT interpolation type + * @flags: bitmask of misc, see DRM_COLOROP_FLAG_* defines. + * @return zero on success, -E value on failure + */ +int drm_plane_colorop_curve_1d_lut_init(struct drm_device *dev, struct drm_colorop *colorop, + struct drm_plane *plane, uint32_t lut_size, + enum drm_colorop_lut1d_interpolation_type interpolation, + uint32_t flags) +{ + struct drm_property *prop; + int ret; + + ret = drm_plane_colorop_init(dev, colorop, plane, DRM_COLOROP_1D_LUT, flags); + if (ret) + return ret; + + /* initialize 1D LUT only attribute */ + /* LUT size */ + prop = drm_property_create_range(dev, DRM_MODE_PROP_IMMUTABLE | DRM_MODE_PROP_ATOMIC, + "SIZE", 0, UINT_MAX); + if (!prop) + return -ENOMEM; + + colorop->size_property = prop; + drm_object_attach_property(&colorop->base, colorop->size_property, lut_size); + colorop->size = lut_size; + + /* interpolation */ + prop = drm_property_create_enum(dev, 0, "LUT1D_INTERPOLATION", + drm_colorop_lut1d_interpolation_list, + ARRAY_SIZE(drm_colorop_lut1d_interpolation_list)); + if (!prop) + return -ENOMEM; + + colorop->lut1d_interpolation_property = prop; + drm_object_attach_property(&colorop->base, prop, interpolation); + colorop->lut1d_interpolation = interpolation; + + /* data */ + ret = drm_colorop_create_data_prop(dev, colorop); + if (ret) + return ret; + + drm_colorop_reset(colorop); + + return 0; +} +EXPORT_SYMBOL(drm_plane_colorop_curve_1d_lut_init); + +int drm_plane_colorop_ctm_3x4_init(struct drm_device *dev, struct drm_colorop *colorop, + struct drm_plane *plane, uint32_t flags) +{ + int ret; + + ret = drm_plane_colorop_init(dev, colorop, plane, DRM_COLOROP_CTM_3X4, flags); + if (ret) + return ret; + + ret = drm_colorop_create_data_prop(dev, colorop); + if (ret) + return ret; + + drm_colorop_reset(colorop); + + return 0; +} +EXPORT_SYMBOL(drm_plane_colorop_ctm_3x4_init); + +/** + * drm_plane_colorop_mult_init - Initialize a DRM_COLOROP_MULTIPLIER + * + * @dev: DRM device + * @colorop: The drm_colorop object to initialize + * @plane: The associated drm_plane + * @flags: bitmask of misc, see DRM_COLOROP_FLAG_* defines. + * @return zero on success, -E value on failure + */ +int drm_plane_colorop_mult_init(struct drm_device *dev, struct drm_colorop *colorop, + struct drm_plane *plane, uint32_t flags) +{ + struct drm_property *prop; + int ret; + + ret = drm_plane_colorop_init(dev, colorop, plane, DRM_COLOROP_MULTIPLIER, flags); + if (ret) + return ret; + + prop = drm_property_create_range(dev, DRM_MODE_PROP_ATOMIC, "MULTIPLIER", 0, U64_MAX); + if (!prop) + return -ENOMEM; + + colorop->multiplier_property = prop; + drm_object_attach_property(&colorop->base, colorop->multiplier_property, 0); + + drm_colorop_reset(colorop); + + return 0; +} +EXPORT_SYMBOL(drm_plane_colorop_mult_init); + +int drm_plane_colorop_3dlut_init(struct drm_device *dev, struct drm_colorop *colorop, + struct drm_plane *plane, + uint32_t lut_size, + enum drm_colorop_lut3d_interpolation_type interpolation, + uint32_t flags) +{ + struct drm_property *prop; + int ret; + + ret = drm_plane_colorop_init(dev, colorop, plane, DRM_COLOROP_3D_LUT, flags); + if (ret) + return ret; + + /* LUT size */ + prop = drm_property_create_range(dev, DRM_MODE_PROP_IMMUTABLE | DRM_MODE_PROP_ATOMIC, + "SIZE", 0, UINT_MAX); + if (!prop) + return -ENOMEM; + + colorop->size_property = prop; + drm_object_attach_property(&colorop->base, colorop->size_property, lut_size); + colorop->size = lut_size; + + /* interpolation */ + prop = drm_property_create_enum(dev, 0, "LUT3D_INTERPOLATION", + drm_colorop_lut3d_interpolation_list, + ARRAY_SIZE(drm_colorop_lut3d_interpolation_list)); + if (!prop) + return -ENOMEM; + + colorop->lut3d_interpolation_property = prop; + drm_object_attach_property(&colorop->base, prop, interpolation); + colorop->lut3d_interpolation = interpolation; + + /* data */ + ret = drm_colorop_create_data_prop(dev, colorop); + if (ret) + return ret; + + drm_colorop_reset(colorop); + + return 0; +} +EXPORT_SYMBOL(drm_plane_colorop_3dlut_init); + +static void __drm_atomic_helper_colorop_duplicate_state(struct drm_colorop *colorop, + struct drm_colorop_state *state) +{ + memcpy(state, colorop->state, sizeof(*state)); + + if (state->data) + drm_property_blob_get(state->data); + + state->bypass = true; +} + +struct drm_colorop_state * +drm_atomic_helper_colorop_duplicate_state(struct drm_colorop *colorop) +{ + struct drm_colorop_state *state; + + if (WARN_ON(!colorop->state)) + return NULL; + + state = kmalloc(sizeof(*state), GFP_KERNEL); + if (state) + __drm_atomic_helper_colorop_duplicate_state(colorop, state); + + return state; +} + +void drm_colorop_atomic_destroy_state(struct drm_colorop *colorop, + struct drm_colorop_state *state) +{ + kfree(state); +} + +/** + * __drm_colorop_state_reset - resets colorop state to default values + * @colorop_state: atomic colorop state, must not be NULL + * @colorop: colorop object, must not be NULL + * + * Initializes the newly allocated @colorop_state with default + * values. This is useful for drivers that subclass the CRTC state. + */ +static void __drm_colorop_state_reset(struct drm_colorop_state *colorop_state, + struct drm_colorop *colorop) +{ + u64 val; + + colorop_state->colorop = colorop; + colorop_state->bypass = true; + + if (colorop->curve_1d_type_property) { + drm_object_property_get_default_value(&colorop->base, + colorop->curve_1d_type_property, + &val); + colorop_state->curve_1d_type = val; + } +} + +/** + * __drm_colorop_reset - reset state on colorop + * @colorop: drm colorop + * @colorop_state: colorop state to assign + * + * Initializes the newly allocated @colorop_state and assigns it to + * the &drm_crtc->state pointer of @colorop, usually required when + * initializing the drivers or when called from the &drm_colorop_funcs.reset + * hook. + * + * This is useful for drivers that subclass the colorop state. + */ +static void __drm_colorop_reset(struct drm_colorop *colorop, + struct drm_colorop_state *colorop_state) +{ + if (colorop_state) + __drm_colorop_state_reset(colorop_state, colorop); + + colorop->state = colorop_state; +} + +void drm_colorop_reset(struct drm_colorop *colorop) +{ + kfree(colorop->state); + colorop->state = kzalloc(sizeof(*colorop->state), GFP_KERNEL); + + if (colorop->state) + __drm_colorop_reset(colorop, colorop->state); +} + +static const char * const colorop_type_name[] = { + [DRM_COLOROP_1D_CURVE] = "1D Curve", + [DRM_COLOROP_1D_LUT] = "1D LUT", + [DRM_COLOROP_CTM_3X4] = "3x4 Matrix", + [DRM_COLOROP_MULTIPLIER] = "Multiplier", + [DRM_COLOROP_3D_LUT] = "3D LUT", +}; + +static const char * const colorop_lu3d_interpolation_name[] = { + [DRM_COLOROP_LUT3D_INTERPOLATION_TETRAHEDRAL] = "Tetrahedral", +}; + +static const char * const colorop_lut1d_interpolation_name[] = { + [DRM_COLOROP_LUT1D_INTERPOLATION_LINEAR] = "Linear", +}; + +const char *drm_get_colorop_type_name(enum drm_colorop_type type) +{ + if (WARN_ON(type >= ARRAY_SIZE(colorop_type_name))) + return "unknown"; + + return colorop_type_name[type]; +} + +const char *drm_get_colorop_curve_1d_type_name(enum drm_colorop_curve_1d_type type) +{ + if (WARN_ON(type >= ARRAY_SIZE(colorop_curve_1d_type_names))) + return "unknown"; + + return colorop_curve_1d_type_names[type]; +} + +/** + * drm_get_colorop_lut1d_interpolation_name: return a string for interpolation type + * @type: interpolation type to compute name of + * + * In contrast to the other drm_get_*_name functions this one here returns a + * const pointer and hence is threadsafe. + */ +const char *drm_get_colorop_lut1d_interpolation_name(enum drm_colorop_lut1d_interpolation_type type) +{ + if (WARN_ON(type >= ARRAY_SIZE(colorop_lut1d_interpolation_name))) + return "unknown"; + + return colorop_lut1d_interpolation_name[type]; +} + +/** + * drm_get_colorop_lut3d_interpolation_name - return a string for interpolation type + * @type: interpolation type to compute name of + * + * In contrast to the other drm_get_*_name functions this one here returns a + * const pointer and hence is threadsafe. + */ +const char *drm_get_colorop_lut3d_interpolation_name(enum drm_colorop_lut3d_interpolation_type type) +{ + if (WARN_ON(type >= ARRAY_SIZE(colorop_lu3d_interpolation_name))) + return "unknown"; + + return colorop_lu3d_interpolation_name[type]; +} + +/** + * drm_colorop_set_next_property - sets the next pointer + * @colorop: drm colorop + * @next: next colorop + * + * Should be used when constructing the color pipeline + */ +void drm_colorop_set_next_property(struct drm_colorop *colorop, struct drm_colorop *next) +{ + drm_object_property_set_value(&colorop->base, + colorop->next_property, + next ? next->base.id : 0); + colorop->next = next; +} +EXPORT_SYMBOL(drm_colorop_set_next_property); diff --git a/drivers/gpu/drm/drm_connector.c b/drivers/gpu/drm/drm_connector.c index 272d6254ea47..4d6dc9ebfdb5 100644 --- a/drivers/gpu/drm/drm_connector.c +++ b/drivers/gpu/drm/drm_connector.c @@ -3439,6 +3439,7 @@ int drm_mode_getconnector(struct drm_device *dev, void *data, * properties reflect the latest status. */ ret = drm_mode_object_get_properties(&connector->base, file_priv->atomic, + file_priv->plane_color_pipeline, (uint32_t __user *)(unsigned long)(out_resp->props_ptr), (uint64_t __user *)(unsigned long)(out_resp->prop_values_ptr), &out_resp->count_props); diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c index 46655339003d..a7797d260f1e 100644 --- a/drivers/gpu/drm/drm_crtc.c +++ b/drivers/gpu/drm/drm_crtc.c @@ -229,6 +229,25 @@ struct dma_fence *drm_crtc_create_fence(struct drm_crtc *crtc) * Driver's default scaling filter * Nearest Neighbor: * Nearest Neighbor scaling filter + * SHARPNESS_STRENGTH: + * Atomic property for setting the sharpness strength/intensity by userspace. + * + * The value of this property is set as an integer value ranging + * from 0 - 255 where: + * + * 0: Sharpness feature is disabled(default value). + * + * 1: Minimum sharpness. + * + * 255: Maximum sharpness. + * + * User can gradually increase or decrease the sharpness level and can + * set the optimum value depending on content. + * This value will be passed to kernel through the UAPI. + * The setting of this property does not require modeset. + * The sharpness effect takes place post blending on the final composed output. + * If the feature is disabled, the content remains same without any sharpening effect + * and when this feature is applied, it enhances the clarity of the content. */ __printf(6, 0) @@ -940,6 +959,22 @@ int drm_crtc_create_scaling_filter_property(struct drm_crtc *crtc, } EXPORT_SYMBOL(drm_crtc_create_scaling_filter_property); +int drm_crtc_create_sharpness_strength_property(struct drm_crtc *crtc) +{ + struct drm_device *dev = crtc->dev; + struct drm_property *prop = + drm_property_create_range(dev, 0, "SHARPNESS_STRENGTH", 0, 255); + + if (!prop) + return -ENOMEM; + + crtc->sharpness_strength_property = prop; + drm_object_attach_property(&crtc->base, prop, 0); + + return 0; +} +EXPORT_SYMBOL(drm_crtc_create_sharpness_strength_property); + /** * drm_crtc_in_clone_mode - check if the given CRTC state is in clone mode * diff --git a/drivers/gpu/drm/drm_crtc_internal.h b/drivers/gpu/drm/drm_crtc_internal.h index 89706aa8232f..c09409229644 100644 --- a/drivers/gpu/drm/drm_crtc_internal.h +++ b/drivers/gpu/drm/drm_crtc_internal.h @@ -163,6 +163,7 @@ struct drm_mode_object *__drm_mode_object_find(struct drm_device *dev, void drm_mode_object_unregister(struct drm_device *dev, struct drm_mode_object *object); int drm_mode_object_get_properties(struct drm_mode_object *obj, bool atomic, + bool plane_color_pipeline, uint32_t __user *prop_ptr, uint64_t __user *prop_values, uint32_t *arg_count_props); diff --git a/drivers/gpu/drm/drm_displayid.c b/drivers/gpu/drm/drm_displayid.c index b4fd43783c50..58d0bb6d2676 100644 --- a/drivers/gpu/drm/drm_displayid.c +++ b/drivers/gpu/drm/drm_displayid.c @@ -9,6 +9,34 @@ #include "drm_crtc_internal.h" #include "drm_displayid_internal.h" +enum { + QUIRK_IGNORE_CHECKSUM, +}; + +struct displayid_quirk { + const struct drm_edid_ident ident; + u8 quirks; +}; + +static const struct displayid_quirk quirks[] = { + { + .ident = DRM_EDID_IDENT_INIT('C', 'S', 'O', 5142, "MNE007ZA1-5"), + .quirks = BIT(QUIRK_IGNORE_CHECKSUM), + }, +}; + +static u8 get_quirks(const struct drm_edid *drm_edid) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(quirks); i++) { + if (drm_edid_match(drm_edid, &quirks[i].ident)) + return quirks[i].quirks; + } + + return 0; +} + static const struct displayid_header * displayid_get_header(const u8 *displayid, int length, int index) { @@ -23,7 +51,7 @@ displayid_get_header(const u8 *displayid, int length, int index) } static const struct displayid_header * -validate_displayid(const u8 *displayid, int length, int idx) +validate_displayid(const u8 *displayid, int length, int idx, bool ignore_checksum) { int i, dispid_length; u8 csum = 0; @@ -41,33 +69,35 @@ validate_displayid(const u8 *displayid, int length, int idx) for (i = 0; i < dispid_length; i++) csum += displayid[idx + i]; if (csum) { - DRM_NOTE("DisplayID checksum invalid, remainder is %d\n", csum); - return ERR_PTR(-EINVAL); + DRM_NOTE("DisplayID checksum invalid, remainder is %d%s\n", csum, + ignore_checksum ? " (ignoring)" : ""); + + if (!ignore_checksum) + return ERR_PTR(-EINVAL); } return base; } -static const u8 *drm_find_displayid_extension(const struct drm_edid *drm_edid, - int *length, int *idx, - int *ext_index) +static const u8 *find_next_displayid_extension(struct displayid_iter *iter) { const struct displayid_header *base; const u8 *displayid; + bool ignore_checksum = iter->quirks & BIT(QUIRK_IGNORE_CHECKSUM); - displayid = drm_edid_find_extension(drm_edid, DISPLAYID_EXT, ext_index); + displayid = drm_edid_find_extension(iter->drm_edid, DISPLAYID_EXT, &iter->ext_index); if (!displayid) return NULL; /* EDID extensions block checksum isn't for us */ - *length = EDID_LENGTH - 1; - *idx = 1; + iter->length = EDID_LENGTH - 1; + iter->idx = 1; - base = validate_displayid(displayid, *length, *idx); + base = validate_displayid(displayid, iter->length, iter->idx, ignore_checksum); if (IS_ERR(base)) return NULL; - *length = *idx + sizeof(*base) + base->bytes; + iter->length = iter->idx + sizeof(*base) + base->bytes; return displayid; } @@ -78,6 +108,7 @@ void displayid_iter_edid_begin(const struct drm_edid *drm_edid, memset(iter, 0, sizeof(*iter)); iter->drm_edid = drm_edid; + iter->quirks = get_quirks(drm_edid); } static const struct displayid_block * @@ -126,10 +157,7 @@ __displayid_iter_next(struct displayid_iter *iter) /* The first section we encounter is the base section */ bool base_section = !iter->section; - iter->section = drm_find_displayid_extension(iter->drm_edid, - &iter->length, - &iter->idx, - &iter->ext_index); + iter->section = find_next_displayid_extension(iter); if (!iter->section) { iter->drm_edid = NULL; return NULL; diff --git a/drivers/gpu/drm/drm_displayid_internal.h b/drivers/gpu/drm/drm_displayid_internal.h index 957dd0619f5c..5b1b32f73516 100644 --- a/drivers/gpu/drm/drm_displayid_internal.h +++ b/drivers/gpu/drm/drm_displayid_internal.h @@ -167,6 +167,8 @@ struct displayid_iter { u8 version; u8 primary_use; + + u8 quirks; }; void displayid_iter_edid_begin(const struct drm_edid *drm_edid, diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c index 8e3cb08241c8..2915118436ce 100644 --- a/drivers/gpu/drm/drm_drv.c +++ b/drivers/gpu/drm/drm_drv.c @@ -733,6 +733,7 @@ static int drm_dev_init(struct drm_device *dev, INIT_LIST_HEAD(&dev->filelist); INIT_LIST_HEAD(&dev->filelist_internal); INIT_LIST_HEAD(&dev->clientlist); + INIT_LIST_HEAD(&dev->client_sysrq_list); INIT_LIST_HEAD(&dev->vblank_event_list); spin_lock_init(&dev->event_lock); @@ -1100,6 +1101,7 @@ int drm_dev_register(struct drm_device *dev, unsigned long flags) goto err_unload; } drm_panic_register(dev); + drm_client_sysrq_register(dev); DRM_INFO("Initialized %s %d.%d.%d for %s on minor %d\n", driver->name, driver->major, driver->minor, @@ -1144,6 +1146,7 @@ void drm_dev_unregister(struct drm_device *dev) { dev->registered = false; + drm_client_sysrq_unregister(dev); drm_panic_unregister(dev); drm_client_dev_unregister(dev); diff --git a/drivers/gpu/drm/drm_dumb_buffers.c b/drivers/gpu/drm/drm_dumb_buffers.c index 70032bba1c97..e2b62e5fb891 100644 --- a/drivers/gpu/drm/drm_dumb_buffers.c +++ b/drivers/gpu/drm/drm_dumb_buffers.c @@ -25,8 +25,11 @@ #include #include +#include +#include #include #include +#include #include "drm_crtc_internal.h" #include "drm_internal.h" @@ -57,6 +60,134 @@ * a hardware-specific ioctl to allocate suitable buffer objects. */ +static int drm_mode_align_dumb(struct drm_mode_create_dumb *args, + unsigned long hw_pitch_align, + unsigned long hw_size_align) +{ + u32 pitch = args->pitch; + u32 size; + + if (!pitch) + return -EINVAL; + + if (hw_pitch_align) + pitch = roundup(pitch, hw_pitch_align); + + if (!hw_size_align) + hw_size_align = PAGE_SIZE; + else if (!IS_ALIGNED(hw_size_align, PAGE_SIZE)) + return -EINVAL; /* TODO: handle this if necessary */ + + if (check_mul_overflow(args->height, pitch, &size)) + return -EINVAL; + size = ALIGN(size, hw_size_align); + if (!size) + return -EINVAL; + + args->pitch = pitch; + args->size = size; + + return 0; +} + +/** + * drm_mode_size_dumb - Calculates the scanline and buffer sizes for dumb buffers + * @dev: DRM device + * @args: Parameters for the dumb buffer + * @hw_pitch_align: Hardware scanline alignment in bytes + * @hw_size_align: Hardware buffer-size alignment in bytes + * + * The helper drm_mode_size_dumb() calculates the size of the buffer + * allocation and the scanline size for a dumb buffer. Callers have to + * set the buffers width, height and color mode in the argument @arg. + * The helper validates the correctness of the input and tests for + * possible overflows. If successful, it returns the dumb buffer's + * required scanline pitch and size in &args. + * + * The parameter @hw_pitch_align allows the driver to specifies an + * alignment for the scanline pitch, if the hardware requires any. The + * calculated pitch will be a multiple of the alignment. The parameter + * @hw_size_align allows to specify an alignment for buffer sizes. The + * provided alignment should represent requirements of the graphics + * hardware. drm_mode_size_dumb() handles GEM-related constraints + * automatically across all drivers and hardware. For example, the + * returned buffer size is always a multiple of PAGE_SIZE, which is + * required by mmap(). + * + * Returns: + * Zero on success, or a negative error code otherwise. + */ +int drm_mode_size_dumb(struct drm_device *dev, + struct drm_mode_create_dumb *args, + unsigned long hw_pitch_align, + unsigned long hw_size_align) +{ + u64 pitch = 0; + u32 fourcc; + + /* + * The scanline pitch depends on the buffer width and the color + * format. The latter is specified as a color-mode constant for + * which we first have to find the corresponding color format. + * + * Different color formats can have the same color-mode constant. + * For example XRGB8888 and BGRX8888 both have a color mode of 32. + * It is possible to use different formats for dumb-buffer allocation + * and rendering as long as all involved formats share the same + * color-mode constant. + */ + fourcc = drm_driver_color_mode_format(dev, args->bpp); + if (fourcc != DRM_FORMAT_INVALID) { + const struct drm_format_info *info = drm_format_info(fourcc); + + if (!info) + return -EINVAL; + pitch = drm_format_info_min_pitch(info, 0, args->width); + } else if (args->bpp) { + /* + * Some userspace throws in arbitrary values for bpp and + * relies on the kernel to figure it out. In this case we + * fall back to the old method of using bpp directly. The + * over-commitment of memory from the rounding is acceptable + * for compatibility with legacy userspace. We have a number + * of deprecated legacy values that are explicitly supported. + */ + switch (args->bpp) { + default: + drm_warn_once(dev, + "Unknown color mode %u; guessing buffer size.\n", + args->bpp); + fallthrough; + /* + * These constants represent various YUV formats supported by + * drm_gem_afbc_get_bpp(). + */ + case 12: // DRM_FORMAT_YUV420_8BIT + case 15: // DRM_FORMAT_YUV420_10BIT + case 30: // DRM_FORMAT_VUY101010 + fallthrough; + /* + * Used by Mesa and Gstreamer to allocate NV formats and others + * as RGB buffers. Technically, XRGB16161616F formats are RGB, + * but the dumb buffers are not supposed to be used for anything + * beyond 32 bits per pixels. + */ + case 10: // DRM_FORMAT_NV{15,20,30}, DRM_FORMAT_P010 + case 64: // DRM_FORMAT_{XRGB,XBGR,ARGB,ABGR}16161616F + pitch = args->width * DIV_ROUND_UP(args->bpp, SZ_8); + break; + } + } + + if (!pitch || pitch > U32_MAX) + return -EINVAL; + + args->pitch = pitch; + + return drm_mode_align_dumb(args, hw_pitch_align, hw_size_align); +} +EXPORT_SYMBOL(drm_mode_size_dumb); + int drm_mode_create_dumb(struct drm_device *dev, struct drm_mode_create_dumb *args, struct drm_file *file_priv) @@ -99,7 +230,30 @@ int drm_mode_create_dumb(struct drm_device *dev, int drm_mode_create_dumb_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { - return drm_mode_create_dumb(dev, data, file_priv); + struct drm_mode_create_dumb *args = data; + int err; + + err = drm_mode_create_dumb(dev, args, file_priv); + if (err) { + args->handle = 0; + args->pitch = 0; + args->size = 0; + } + return err; +} + +static int drm_mode_mmap_dumb(struct drm_device *dev, struct drm_mode_map_dumb *args, + struct drm_file *file_priv) +{ + if (!dev->driver->dumb_create) + return -ENOSYS; + + if (dev->driver->dumb_map_offset) + return dev->driver->dumb_map_offset(file_priv, dev, args->handle, + &args->offset); + else + return drm_gem_dumb_map_offset(file_priv, dev, args->handle, + &args->offset); } /** @@ -120,17 +274,12 @@ int drm_mode_mmap_dumb_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_mode_map_dumb *args = data; + int err; - if (!dev->driver->dumb_create) - return -ENOSYS; - - if (dev->driver->dumb_map_offset) - return dev->driver->dumb_map_offset(file_priv, dev, - args->handle, - &args->offset); - else - return drm_gem_dumb_map_offset(file_priv, dev, args->handle, - &args->offset); + err = drm_mode_mmap_dumb(dev, args, file_priv); + if (err) + args->offset = 0; + return err; } int drm_mode_destroy_dumb(struct drm_device *dev, u32 handle, diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c index e2e85345aa9a..26bb7710a462 100644 --- a/drivers/gpu/drm/drm_edid.c +++ b/drivers/gpu/drm/drm_edid.c @@ -250,6 +250,9 @@ static const struct edid_quirk { EDID_QUIRK('S', 'V', 'R', 0x1019, BIT(EDID_QUIRK_NON_DESKTOP)), EDID_QUIRK('A', 'U', 'O', 0x1111, BIT(EDID_QUIRK_NON_DESKTOP)), + /* LQ116M1JW10 displays noise when 8 bpc, but display fine as 6 bpc */ + EDID_QUIRK('S', 'H', 'P', 0x154c, BIT(EDID_QUIRK_FORCE_6BPC)), + /* * @drm_edid_internal_quirk entries end here, following with the * @drm_edid_quirk entries. diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c index 0b3ee008523d..4a7f72044ab8 100644 --- a/drivers/gpu/drm/drm_fb_helper.c +++ b/drivers/gpu/drm/drm_fb_helper.c @@ -31,7 +31,6 @@ #include #include -#include #include #include @@ -253,6 +252,7 @@ __drm_fb_helper_restore_fbdev_mode_unlocked(struct drm_fb_helper *fb_helper, /** * drm_fb_helper_restore_fbdev_mode_unlocked - restore fbdev configuration * @fb_helper: driver-allocated fbdev helper, can be NULL + * @force: ignore present DRM master * * This helper should be called from fbdev emulation's &drm_client_funcs.restore * callback. It ensures that the user isn't greeted with a black screen when the @@ -261,48 +261,12 @@ __drm_fb_helper_restore_fbdev_mode_unlocked(struct drm_fb_helper *fb_helper, * Returns: * 0 on success, or a negative errno code otherwise. */ -int drm_fb_helper_restore_fbdev_mode_unlocked(struct drm_fb_helper *fb_helper) +int drm_fb_helper_restore_fbdev_mode_unlocked(struct drm_fb_helper *fb_helper, bool force) { - return __drm_fb_helper_restore_fbdev_mode_unlocked(fb_helper, false); + return __drm_fb_helper_restore_fbdev_mode_unlocked(fb_helper, force); } EXPORT_SYMBOL(drm_fb_helper_restore_fbdev_mode_unlocked); -#ifdef CONFIG_MAGIC_SYSRQ -/* emergency restore, don't bother with error reporting */ -static void drm_fb_helper_restore_work_fn(struct work_struct *ignored) -{ - struct drm_fb_helper *helper; - - mutex_lock(&kernel_fb_helper_lock); - list_for_each_entry(helper, &kernel_fb_helper_list, kernel_fb_list) { - struct drm_device *dev = helper->dev; - - if (dev->switch_power_state == DRM_SWITCH_POWER_OFF) - continue; - - mutex_lock(&helper->lock); - drm_client_modeset_commit_locked(&helper->client); - mutex_unlock(&helper->lock); - } - mutex_unlock(&kernel_fb_helper_lock); -} - -static DECLARE_WORK(drm_fb_helper_restore_work, drm_fb_helper_restore_work_fn); - -static void drm_fb_helper_sysrq(u8 dummy1) -{ - schedule_work(&drm_fb_helper_restore_work); -} - -static const struct sysrq_key_op sysrq_drm_fb_helper_restore_op = { - .handler = drm_fb_helper_sysrq, - .help_msg = "force-fb(v)", - .action_msg = "Restore framebuffer console", -}; -#else -static const struct sysrq_key_op sysrq_drm_fb_helper_restore_op = { }; -#endif - static void drm_fb_helper_dpms(struct fb_info *info, int dpms_mode) { struct drm_fb_helper *fb_helper = info->par; @@ -366,6 +330,10 @@ static void drm_fb_helper_fb_dirty(struct drm_fb_helper *helper) unsigned long flags; int ret; + mutex_lock(&helper->lock); + drm_client_modeset_wait_for_vblank(&helper->client, 0); + mutex_unlock(&helper->lock); + if (drm_WARN_ON_ONCE(dev, !helper->funcs->fb_dirty)) return; @@ -489,20 +457,7 @@ int drm_fb_helper_init(struct drm_device *dev, } EXPORT_SYMBOL(drm_fb_helper_init); -/** - * drm_fb_helper_alloc_info - allocate fb_info and some of its members - * @fb_helper: driver-allocated fbdev helper - * - * A helper to alloc fb_info and the member cmap. Called by the driver - * within the struct &drm_driver.fbdev_probe callback function. Drivers do - * not need to release the allocated fb_info structure themselves, this is - * automatically done when calling drm_fb_helper_fini(). - * - * RETURNS: - * fb_info pointer if things went okay, pointer containing error code - * otherwise - */ -struct fb_info *drm_fb_helper_alloc_info(struct drm_fb_helper *fb_helper) +static struct fb_info *drm_fb_helper_alloc_info(struct drm_fb_helper *fb_helper) { struct device *dev = fb_helper->dev->dev; struct fb_info *info; @@ -529,17 +484,8 @@ struct fb_info *drm_fb_helper_alloc_info(struct drm_fb_helper *fb_helper) framebuffer_release(info); return ERR_PTR(ret); } -EXPORT_SYMBOL(drm_fb_helper_alloc_info); -/** - * drm_fb_helper_release_info - release fb_info and its members - * @fb_helper: driver-allocated fbdev helper - * - * A helper to release fb_info and the member cmap. Drivers do not - * need to release the allocated fb_info structure themselves, this is - * automatically done when calling drm_fb_helper_fini(). - */ -void drm_fb_helper_release_info(struct drm_fb_helper *fb_helper) +static void drm_fb_helper_release_info(struct drm_fb_helper *fb_helper) { struct fb_info *info = fb_helper->info; @@ -552,7 +498,6 @@ void drm_fb_helper_release_info(struct drm_fb_helper *fb_helper) fb_dealloc_cmap(&info->cmap); framebuffer_release(info); } -EXPORT_SYMBOL(drm_fb_helper_release_info); /** * drm_fb_helper_unregister_info - unregister fb_info framebuffer device @@ -590,11 +535,8 @@ void drm_fb_helper_fini(struct drm_fb_helper *fb_helper) drm_fb_helper_release_info(fb_helper); mutex_lock(&kernel_fb_helper_lock); - if (!list_empty(&fb_helper->kernel_fb_list)) { + if (!list_empty(&fb_helper->kernel_fb_list)) list_del(&fb_helper->kernel_fb_list); - if (list_empty(&kernel_fb_helper_list)) - unregister_sysrq_key('v', &sysrq_drm_fb_helper_restore_op); - } mutex_unlock(&kernel_fb_helper_lock); if (!fb_helper->client.funcs) @@ -1061,15 +1003,9 @@ int drm_fb_helper_ioctl(struct fb_info *info, unsigned int cmd, unsigned long arg) { struct drm_fb_helper *fb_helper = info->par; - struct drm_device *dev = fb_helper->dev; - struct drm_crtc *crtc; int ret = 0; - mutex_lock(&fb_helper->lock); - if (!drm_master_internal_acquire(dev)) { - ret = -EBUSY; - goto unlock; - } + guard(mutex)(&fb_helper->lock); switch (cmd) { case FBIO_WAITFORVSYNC: @@ -1089,28 +1025,12 @@ int drm_fb_helper_ioctl(struct fb_info *info, unsigned int cmd, * make. If we're not smart enough here, one should * just consider switch the userspace to KMS. */ - crtc = fb_helper->client.modesets[0].crtc; - - /* - * Only wait for a vblank event if the CRTC is - * enabled, otherwise just don't do anythintg, - * not even report an error. - */ - ret = drm_crtc_vblank_get(crtc); - if (!ret) { - drm_crtc_wait_one_vblank(crtc); - drm_crtc_vblank_put(crtc); - } - - ret = 0; + ret = drm_client_modeset_wait_for_vblank(&fb_helper->client, 0); break; default: ret = -ENOTTY; } - drm_master_internal_release(dev); -unlock: - mutex_unlock(&fb_helper->lock); return ret; } EXPORT_SYMBOL(drm_fb_helper_ioctl); @@ -1339,9 +1259,9 @@ int drm_fb_helper_set_par(struct fb_info *info) * the KDSET IOCTL with KD_TEXT, and only after that drops the master * status when exiting. * - * In the past this was caught by drm_fb_helper_lastclose(), but on - * modern systems where logind always keeps a drm fd open to orchestrate - * the vt switching, this doesn't work. + * In the past this was caught by drm_fb_helper_restore_fbdev_mode_unlocked(), + * but on modern systems where logind always keeps a drm fd open to + * orchestrate the vt switching, this doesn't work. * * To not break the userspace ABI we have this special case here, which * is only used for the above case. Everything else uses the normal @@ -1813,6 +1733,11 @@ __drm_fb_helper_initial_config_and_unlock(struct drm_fb_helper *fb_helper) height = dev->mode_config.max_height; drm_client_modeset_probe(&fb_helper->client, width, height); + + info = drm_fb_helper_alloc_info(fb_helper); + if (IS_ERR(info)) + return PTR_ERR(info); + ret = drm_fb_helper_single_fb_probe(fb_helper); if (ret < 0) { if (ret == -EAGAIN) { @@ -1821,13 +1746,12 @@ __drm_fb_helper_initial_config_and_unlock(struct drm_fb_helper *fb_helper) } mutex_unlock(&fb_helper->lock); - return ret; + goto err_drm_fb_helper_release_info; } drm_setup_crtcs_fb(fb_helper); fb_helper->deferred_setup = false; - info = fb_helper->info; info->var.pixclock = 0; /* Need to drop locks to avoid recursive deadlock in @@ -1843,13 +1767,14 @@ __drm_fb_helper_initial_config_and_unlock(struct drm_fb_helper *fb_helper) info->node, info->fix.id); mutex_lock(&kernel_fb_helper_lock); - if (list_empty(&kernel_fb_helper_list)) - register_sysrq_key('v', &sysrq_drm_fb_helper_restore_op); - list_add(&fb_helper->kernel_fb_list, &kernel_fb_helper_list); mutex_unlock(&kernel_fb_helper_lock); return 0; + +err_drm_fb_helper_release_info: + drm_fb_helper_release_info(fb_helper); + return ret; } /** @@ -1959,16 +1884,3 @@ int drm_fb_helper_hotplug_event(struct drm_fb_helper *fb_helper) return 0; } EXPORT_SYMBOL(drm_fb_helper_hotplug_event); - -/** - * drm_fb_helper_lastclose - DRM driver lastclose helper for fbdev emulation - * @dev: DRM device - * - * This function is obsolete. Call drm_fb_helper_restore_fbdev_mode_unlocked() - * instead. - */ -void drm_fb_helper_lastclose(struct drm_device *dev) -{ - drm_fb_helper_restore_fbdev_mode_unlocked(dev->fb_helper); -} -EXPORT_SYMBOL(drm_fb_helper_lastclose); diff --git a/drivers/gpu/drm/drm_fbdev_dma.c b/drivers/gpu/drm/drm_fbdev_dma.c index 8bd626ef16c7..9412d9fdd74b 100644 --- a/drivers/gpu/drm/drm_fbdev_dma.c +++ b/drivers/gpu/drm/drm_fbdev_dma.c @@ -10,6 +10,7 @@ #include #include #include +#include /* * struct fb_ops @@ -55,10 +56,8 @@ static void drm_fbdev_dma_fb_destroy(struct fb_info *info) drm_fb_helper_fini(fb_helper); drm_client_buffer_vunmap(fb_helper->buffer); - drm_client_framebuffer_delete(fb_helper->buffer); + drm_client_buffer_delete(fb_helper->buffer); drm_client_release(&fb_helper->client); - drm_fb_helper_unprepare(fb_helper); - kfree(fb_helper); } static const struct fb_ops drm_fbdev_dma_fb_ops = { @@ -90,10 +89,8 @@ static void drm_fbdev_dma_shadowed_fb_destroy(struct fb_info *info) vfree(shadow); drm_client_buffer_vunmap(fb_helper->buffer); - drm_client_framebuffer_delete(fb_helper->buffer); + drm_client_buffer_delete(fb_helper->buffer); drm_client_release(&fb_helper->client); - drm_fb_helper_unprepare(fb_helper); - kfree(fb_helper); } static const struct fb_ops drm_fbdev_dma_shadowed_fb_ops = { @@ -272,9 +269,9 @@ int drm_fbdev_dma_driver_fbdev_probe(struct drm_fb_helper *fb_helper, { struct drm_client_dev *client = &fb_helper->client; struct drm_device *dev = fb_helper->dev; + struct fb_info *info = fb_helper->info; struct drm_client_buffer *buffer; struct drm_framebuffer *fb; - struct fb_info *info; u32 format; struct iosys_map map; int ret; @@ -285,7 +282,7 @@ int drm_fbdev_dma_driver_fbdev_probe(struct drm_fb_helper *fb_helper, format = drm_driver_legacy_fb_format(dev, sizes->surface_bpp, sizes->surface_depth); - buffer = drm_client_framebuffer_create(client, sizes->surface_width, + buffer = drm_client_buffer_create_dumb(client, sizes->surface_width, sizes->surface_height, format); if (IS_ERR(buffer)) return PTR_ERR(buffer); @@ -304,12 +301,6 @@ int drm_fbdev_dma_driver_fbdev_probe(struct drm_fb_helper *fb_helper, fb_helper->buffer = buffer; fb_helper->fb = fb; - info = drm_fb_helper_alloc_info(fb_helper); - if (IS_ERR(info)) { - ret = PTR_ERR(info); - goto err_drm_client_buffer_vunmap; - } - drm_fb_helper_fill_info(info, fb_helper, sizes); if (fb->funcs->dirty) @@ -317,18 +308,16 @@ int drm_fbdev_dma_driver_fbdev_probe(struct drm_fb_helper *fb_helper, else ret = drm_fbdev_dma_driver_fbdev_probe_tail(fb_helper, sizes); if (ret) - goto err_drm_fb_helper_release_info; + goto err_drm_client_buffer_vunmap; return 0; -err_drm_fb_helper_release_info: - drm_fb_helper_release_info(fb_helper); err_drm_client_buffer_vunmap: fb_helper->fb = NULL; fb_helper->buffer = NULL; drm_client_buffer_vunmap(buffer); err_drm_client_buffer_delete: - drm_client_framebuffer_delete(buffer); + drm_client_buffer_delete(buffer); return ret; } EXPORT_SYMBOL(drm_fbdev_dma_driver_fbdev_probe); diff --git a/drivers/gpu/drm/drm_fbdev_shmem.c b/drivers/gpu/drm/drm_fbdev_shmem.c index 1e827bf8b815..458c899b5d4f 100644 --- a/drivers/gpu/drm/drm_fbdev_shmem.c +++ b/drivers/gpu/drm/drm_fbdev_shmem.c @@ -9,6 +9,7 @@ #include #include #include +#include /* * struct fb_ops @@ -63,10 +64,8 @@ static void drm_fbdev_shmem_fb_destroy(struct fb_info *info) drm_fb_helper_fini(fb_helper); drm_client_buffer_vunmap(fb_helper->buffer); - drm_client_framebuffer_delete(fb_helper->buffer); + drm_client_buffer_delete(fb_helper->buffer); drm_client_release(&fb_helper->client); - drm_fb_helper_unprepare(fb_helper); - kfree(fb_helper); } static const struct fb_ops drm_fbdev_shmem_fb_ops = { @@ -136,10 +135,10 @@ int drm_fbdev_shmem_driver_fbdev_probe(struct drm_fb_helper *fb_helper, { struct drm_client_dev *client = &fb_helper->client; struct drm_device *dev = fb_helper->dev; + struct fb_info *info = fb_helper->info; struct drm_client_buffer *buffer; struct drm_gem_shmem_object *shmem; struct drm_framebuffer *fb; - struct fb_info *info; u32 format; struct iosys_map map; int ret; @@ -149,7 +148,7 @@ int drm_fbdev_shmem_driver_fbdev_probe(struct drm_fb_helper *fb_helper, sizes->surface_bpp); format = drm_driver_legacy_fb_format(dev, sizes->surface_bpp, sizes->surface_depth); - buffer = drm_client_framebuffer_create(client, sizes->surface_width, + buffer = drm_client_buffer_create_dumb(client, sizes->surface_width, sizes->surface_height, format); if (IS_ERR(buffer)) return PTR_ERR(buffer); @@ -169,12 +168,6 @@ int drm_fbdev_shmem_driver_fbdev_probe(struct drm_fb_helper *fb_helper, fb_helper->buffer = buffer; fb_helper->fb = fb; - info = drm_fb_helper_alloc_info(fb_helper); - if (IS_ERR(info)) { - ret = PTR_ERR(info); - goto err_drm_client_buffer_vunmap; - } - drm_fb_helper_fill_info(info, fb_helper, sizes); info->fbops = &drm_fbdev_shmem_fb_ops; @@ -195,18 +188,16 @@ int drm_fbdev_shmem_driver_fbdev_probe(struct drm_fb_helper *fb_helper, info->fbdefio = &fb_helper->fbdefio; ret = fb_deferred_io_init(info); if (ret) - goto err_drm_fb_helper_release_info; + goto err_drm_client_buffer_vunmap; return 0; -err_drm_fb_helper_release_info: - drm_fb_helper_release_info(fb_helper); err_drm_client_buffer_vunmap: fb_helper->fb = NULL; fb_helper->buffer = NULL; drm_client_buffer_vunmap(buffer); err_drm_client_buffer_delete: - drm_client_framebuffer_delete(buffer); + drm_client_buffer_delete(buffer); return ret; } EXPORT_SYMBOL(drm_fbdev_shmem_driver_fbdev_probe); diff --git a/drivers/gpu/drm/drm_fbdev_ttm.c b/drivers/gpu/drm/drm_fbdev_ttm.c index 85feb55bba11..160bc35d8738 100644 --- a/drivers/gpu/drm/drm_fbdev_ttm.c +++ b/drivers/gpu/drm/drm_fbdev_ttm.c @@ -50,11 +50,9 @@ static void drm_fbdev_ttm_fb_destroy(struct fb_info *info) fb_deferred_io_cleanup(info); drm_fb_helper_fini(fb_helper); vfree(shadow); - drm_client_framebuffer_delete(fb_helper->buffer); + drm_client_buffer_delete(fb_helper->buffer); drm_client_release(&fb_helper->client); - drm_fb_helper_unprepare(fb_helper); - kfree(fb_helper); } static const struct fb_ops drm_fbdev_ttm_fb_ops = { @@ -176,8 +174,8 @@ int drm_fbdev_ttm_driver_fbdev_probe(struct drm_fb_helper *fb_helper, { struct drm_client_dev *client = &fb_helper->client; struct drm_device *dev = fb_helper->dev; + struct fb_info *info = fb_helper->info; struct drm_client_buffer *buffer; - struct fb_info *info; size_t screen_size; void *screen_buffer; u32 format; @@ -189,7 +187,7 @@ int drm_fbdev_ttm_driver_fbdev_probe(struct drm_fb_helper *fb_helper, format = drm_driver_legacy_fb_format(dev, sizes->surface_bpp, sizes->surface_depth); - buffer = drm_client_framebuffer_create(client, sizes->surface_width, + buffer = drm_client_buffer_create_dumb(client, sizes->surface_width, sizes->surface_height, format); if (IS_ERR(buffer)) return PTR_ERR(buffer); @@ -202,13 +200,7 @@ int drm_fbdev_ttm_driver_fbdev_probe(struct drm_fb_helper *fb_helper, screen_buffer = vzalloc(screen_size); if (!screen_buffer) { ret = -ENOMEM; - goto err_drm_client_framebuffer_delete; - } - - info = drm_fb_helper_alloc_info(fb_helper); - if (IS_ERR(info)) { - ret = PTR_ERR(info); - goto err_vfree; + goto err_drm_client_buffer_delete; } drm_fb_helper_fill_info(info, fb_helper, sizes); @@ -227,18 +219,16 @@ int drm_fbdev_ttm_driver_fbdev_probe(struct drm_fb_helper *fb_helper, info->fbdefio = &fb_helper->fbdefio; ret = fb_deferred_io_init(info); if (ret) - goto err_drm_fb_helper_release_info; + goto err_vfree; return 0; -err_drm_fb_helper_release_info: - drm_fb_helper_release_info(fb_helper); err_vfree: vfree(screen_buffer); -err_drm_client_framebuffer_delete: +err_drm_client_buffer_delete: fb_helper->fb = NULL; fb_helper->buffer = NULL; - drm_client_framebuffer_delete(buffer); + drm_client_buffer_delete(buffer); return ret; } EXPORT_SYMBOL(drm_fbdev_ttm_driver_fbdev_probe); diff --git a/drivers/gpu/drm/drm_file.c b/drivers/gpu/drm/drm_file.c index eebd1a05ee97..be5e617ceb9f 100644 --- a/drivers/gpu/drm/drm_file.c +++ b/drivers/gpu/drm/drm_file.c @@ -405,7 +405,7 @@ EXPORT_SYMBOL(drm_open); static void drm_lastclose(struct drm_device *dev) { - drm_client_dev_restore(dev); + drm_client_dev_restore(dev, false); if (dev_is_pci(dev->dev)) vga_switcheroo_process_delayed_switch(); diff --git a/drivers/gpu/drm/drm_format_helper.c b/drivers/gpu/drm/drm_format_helper.c index 006836554cc2..6cddf05c493b 100644 --- a/drivers/gpu/drm/drm_format_helper.c +++ b/drivers/gpu/drm/drm_format_helper.c @@ -1165,97 +1165,6 @@ void drm_fb_argb8888_to_argb4444(struct iosys_map *dst, const unsigned int *dst_ } EXPORT_SYMBOL(drm_fb_argb8888_to_argb4444); -/** - * drm_fb_blit - Copy parts of a framebuffer to display memory - * @dst: Array of display-memory addresses to copy to - * @dst_pitch: Array of numbers of bytes between the start of two consecutive scanlines - * within @dst; can be NULL if scanlines are stored next to each other. - * @dst_format: FOURCC code of the display's color format - * @src: The framebuffer memory to copy from - * @fb: The framebuffer to copy from - * @clip: Clip rectangle area to copy - * @state: Transform and conversion state - * - * This function copies parts of a framebuffer to display memory. If the - * formats of the display and the framebuffer mismatch, the blit function - * will attempt to convert between them during the process. The parameters @dst, - * @dst_pitch and @src refer to arrays. Each array must have at least as many - * entries as there are planes in @dst_format's format. Each entry stores the - * value for the format's respective color plane at the same index. - * - * This function does not apply clipping on @dst (i.e. the destination is at the - * top-left corner). - * - * Returns: - * 0 on success, or - * -EINVAL if the color-format conversion failed, or - * a negative error code otherwise. - */ -int drm_fb_blit(struct iosys_map *dst, const unsigned int *dst_pitch, uint32_t dst_format, - const struct iosys_map *src, const struct drm_framebuffer *fb, - const struct drm_rect *clip, struct drm_format_conv_state *state) -{ - uint32_t fb_format = fb->format->format; - - if (fb_format == dst_format) { - drm_fb_memcpy(dst, dst_pitch, src, fb, clip); - return 0; - } else if (fb_format == (dst_format | DRM_FORMAT_BIG_ENDIAN)) { - drm_fb_swab(dst, dst_pitch, src, fb, clip, false, state); - return 0; - } else if (fb_format == (dst_format & ~DRM_FORMAT_BIG_ENDIAN)) { - drm_fb_swab(dst, dst_pitch, src, fb, clip, false, state); - return 0; - } else if (fb_format == DRM_FORMAT_XRGB8888) { - if (dst_format == DRM_FORMAT_RGB565) { - drm_fb_xrgb8888_to_rgb565(dst, dst_pitch, src, fb, clip, state); - return 0; - } else if (dst_format == DRM_FORMAT_XRGB1555) { - drm_fb_xrgb8888_to_xrgb1555(dst, dst_pitch, src, fb, clip, state); - return 0; - } else if (dst_format == DRM_FORMAT_ARGB1555) { - drm_fb_xrgb8888_to_argb1555(dst, dst_pitch, src, fb, clip, state); - return 0; - } else if (dst_format == DRM_FORMAT_RGBA5551) { - drm_fb_xrgb8888_to_rgba5551(dst, dst_pitch, src, fb, clip, state); - return 0; - } else if (dst_format == DRM_FORMAT_RGB888) { - drm_fb_xrgb8888_to_rgb888(dst, dst_pitch, src, fb, clip, state); - return 0; - } else if (dst_format == DRM_FORMAT_BGR888) { - drm_fb_xrgb8888_to_bgr888(dst, dst_pitch, src, fb, clip, state); - return 0; - } else if (dst_format == DRM_FORMAT_ARGB8888) { - drm_fb_xrgb8888_to_argb8888(dst, dst_pitch, src, fb, clip, state); - return 0; - } else if (dst_format == DRM_FORMAT_XBGR8888) { - drm_fb_xrgb8888_to_xbgr8888(dst, dst_pitch, src, fb, clip, state); - return 0; - } else if (dst_format == DRM_FORMAT_ABGR8888) { - drm_fb_xrgb8888_to_abgr8888(dst, dst_pitch, src, fb, clip, state); - return 0; - } else if (dst_format == DRM_FORMAT_XRGB2101010) { - drm_fb_xrgb8888_to_xrgb2101010(dst, dst_pitch, src, fb, clip, state); - return 0; - } else if (dst_format == DRM_FORMAT_ARGB2101010) { - drm_fb_xrgb8888_to_argb2101010(dst, dst_pitch, src, fb, clip, state); - return 0; - } else if (dst_format == DRM_FORMAT_BGRX8888) { - drm_fb_swab(dst, dst_pitch, src, fb, clip, false, state); - return 0; - } else if (dst_format == DRM_FORMAT_RGB332) { - drm_fb_xrgb8888_to_rgb332(dst, dst_pitch, src, fb, clip, state); - return 0; - } - } - - drm_warn_once(fb->dev, "No conversion helper from %p4cc to %p4cc found.\n", - &fb_format, &dst_format); - - return -EINVAL; -} -EXPORT_SYMBOL(drm_fb_blit); - static void drm_fb_gray8_to_gray2_line(void *dbuf, const void *sbuf, unsigned int pixels) { u8 *dbuf8 = dbuf; diff --git a/drivers/gpu/drm/drm_framebuffer.c b/drivers/gpu/drm/drm_framebuffer.c index adbb73f00d68..18e753ade001 100644 --- a/drivers/gpu/drm/drm_framebuffer.c +++ b/drivers/gpu/drm/drm_framebuffer.c @@ -1048,7 +1048,7 @@ static int atomic_remove_fb(struct drm_framebuffer *fb) plane_state->crtc->base.id, plane_state->crtc->name, fb->base.id); - crtc_state = drm_atomic_get_existing_crtc_state(state, plane_state->crtc); + crtc_state = drm_atomic_get_new_crtc_state(state, plane_state->crtc); ret = drm_atomic_add_affected_connectors(state, plane_state->crtc); if (ret) diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c index f884d155a832..efc79bbf3c73 100644 --- a/drivers/gpu/drm/drm_gem.c +++ b/drivers/gpu/drm/drm_gem.c @@ -101,10 +101,8 @@ drm_gem_init(struct drm_device *dev) vma_offset_manager = drmm_kzalloc(dev, sizeof(*vma_offset_manager), GFP_KERNEL); - if (!vma_offset_manager) { - DRM_ERROR("out of memory\n"); + if (!vma_offset_manager) return -ENOMEM; - } dev->vma_offset_manager = vma_offset_manager; drm_vma_offset_manager_init(vma_offset_manager, @@ -785,9 +783,9 @@ static int objects_lookup(struct drm_file *filp, u32 *handle, int count, int drm_gem_objects_lookup(struct drm_file *filp, void __user *bo_handles, int count, struct drm_gem_object ***objs_out) { - int ret; - u32 *handles; struct drm_gem_object **objs; + u32 *handles; + int ret; if (!count) return 0; @@ -799,20 +797,11 @@ int drm_gem_objects_lookup(struct drm_file *filp, void __user *bo_handles, *objs_out = objs; - handles = kvmalloc_array(count, sizeof(u32), GFP_KERNEL); - if (!handles) { - ret = -ENOMEM; - goto out; - } - - if (copy_from_user(handles, bo_handles, count * sizeof(u32))) { - ret = -EFAULT; - DRM_DEBUG("Failed to copy in GEM handles\n"); - goto out; - } + handles = vmemdup_array_user(bo_handles, count, sizeof(u32)); + if (IS_ERR(handles)) + return PTR_ERR(handles); ret = objects_lookup(filp, handles, count, objs); -out: kvfree(handles); return ret; @@ -855,12 +844,13 @@ EXPORT_SYMBOL(drm_gem_object_lookup); long drm_gem_dma_resv_wait(struct drm_file *filep, u32 handle, bool wait_all, unsigned long timeout) { - long ret; + struct drm_device *dev = filep->minor->dev; struct drm_gem_object *obj; + long ret; obj = drm_gem_object_lookup(filep, handle); if (!obj) { - DRM_DEBUG("Failed to look up GEM BO %d\n", handle); + drm_dbg_core(dev, "Failed to look up GEM BO %d\n", handle); return -EINVAL; } diff --git a/drivers/gpu/drm/drm_gem_atomic_helper.c b/drivers/gpu/drm/drm_gem_atomic_helper.c index 6fb55601252f..569d41a65a0b 100644 --- a/drivers/gpu/drm/drm_gem_atomic_helper.c +++ b/drivers/gpu/drm/drm_gem_atomic_helper.c @@ -338,8 +338,6 @@ void drm_gem_reset_shadow_plane(struct drm_plane *plane) } shadow_plane_state = kzalloc(sizeof(*shadow_plane_state), GFP_KERNEL); - if (!shadow_plane_state) - return; __drm_gem_reset_shadow_plane(plane, shadow_plane_state); } EXPORT_SYMBOL(drm_gem_reset_shadow_plane); diff --git a/drivers/gpu/drm/drm_gem_dma_helper.c b/drivers/gpu/drm/drm_gem_dma_helper.c index 4f0320df858f..12d8307997a0 100644 --- a/drivers/gpu/drm/drm_gem_dma_helper.c +++ b/drivers/gpu/drm/drm_gem_dma_helper.c @@ -20,7 +20,9 @@ #include #include #include +#include #include +#include #include /** @@ -304,9 +306,11 @@ int drm_gem_dma_dumb_create(struct drm_file *file_priv, struct drm_mode_create_dumb *args) { struct drm_gem_dma_object *dma_obj; + int ret; - args->pitch = DIV_ROUND_UP(args->width * args->bpp, 8); - args->size = args->pitch * args->height; + ret = drm_mode_size_dumb(drm, args, SZ_8, 0); + if (ret) + return ret; dma_obj = drm_gem_dma_create_with_handle(file_priv, drm, args->size, &args->handle); @@ -582,7 +586,7 @@ drm_gem_dma_prime_import_sg_table_vmap(struct drm_device *dev, ret = dma_buf_vmap_unlocked(attach->dmabuf, &map); if (ret) { - DRM_ERROR("Failed to vmap PRIME buffer\n"); + drm_err(dev, "Failed to vmap PRIME buffer\n"); return ERR_PTR(ret); } diff --git a/drivers/gpu/drm/drm_gem_framebuffer_helper.c b/drivers/gpu/drm/drm_gem_framebuffer_helper.c index 4bc89d33df59..9fd4eb02a20f 100644 --- a/drivers/gpu/drm/drm_gem_framebuffer_helper.c +++ b/drivers/gpu/drm/drm_gem_framebuffer_helper.c @@ -16,6 +16,7 @@ #include #include #include +#include #include "drm_internal.h" diff --git a/drivers/gpu/drm/drm_gem_shmem_helper.c b/drivers/gpu/drm/drm_gem_shmem_helper.c index 5d1349c34afd..dc94a27710e5 100644 --- a/drivers/gpu/drm/drm_gem_shmem_helper.c +++ b/drivers/gpu/drm/drm_gem_shmem_helper.c @@ -18,6 +18,7 @@ #include #include #include +#include #include #include #include @@ -48,6 +49,64 @@ static const struct drm_gem_object_funcs drm_gem_shmem_funcs = { .vm_ops = &drm_gem_shmem_vm_ops, }; +static int __drm_gem_shmem_init(struct drm_device *dev, struct drm_gem_shmem_object *shmem, + size_t size, bool private, struct vfsmount *gemfs) +{ + struct drm_gem_object *obj = &shmem->base; + int ret = 0; + + if (!obj->funcs) + obj->funcs = &drm_gem_shmem_funcs; + + if (private) { + drm_gem_private_object_init(dev, obj, size); + shmem->map_wc = false; /* dma-buf mappings use always writecombine */ + } else { + ret = drm_gem_object_init_with_mnt(dev, obj, size, gemfs); + } + if (ret) { + drm_gem_private_object_fini(obj); + return ret; + } + + ret = drm_gem_create_mmap_offset(obj); + if (ret) + goto err_release; + + INIT_LIST_HEAD(&shmem->madv_list); + + if (!private) { + /* + * Our buffers are kept pinned, so allocating them + * from the MOVABLE zone is a really bad idea, and + * conflicts with CMA. See comments above new_inode() + * why this is required _and_ expected if you're + * going to pin these pages. + */ + mapping_set_gfp_mask(obj->filp->f_mapping, GFP_HIGHUSER | + __GFP_RETRY_MAYFAIL | __GFP_NOWARN); + } + + return 0; +err_release: + drm_gem_object_release(obj); + return ret; +} + +/** + * drm_gem_shmem_init - Initialize an allocated object. + * @dev: DRM device + * @obj: The allocated shmem GEM object. + * + * Returns: + * 0 on success, or a negative error code on failure. + */ +int drm_gem_shmem_init(struct drm_device *dev, struct drm_gem_shmem_object *shmem, size_t size) +{ + return __drm_gem_shmem_init(dev, shmem, size, false, NULL); +} +EXPORT_SYMBOL_GPL(drm_gem_shmem_init); + static struct drm_gem_shmem_object * __drm_gem_shmem_create(struct drm_device *dev, size_t size, bool private, struct vfsmount *gemfs) @@ -70,46 +129,13 @@ __drm_gem_shmem_create(struct drm_device *dev, size_t size, bool private, obj = &shmem->base; } - if (!obj->funcs) - obj->funcs = &drm_gem_shmem_funcs; - - if (private) { - drm_gem_private_object_init(dev, obj, size); - shmem->map_wc = false; /* dma-buf mappings use always writecombine */ - } else { - ret = drm_gem_object_init_with_mnt(dev, obj, size, gemfs); - } + ret = __drm_gem_shmem_init(dev, shmem, size, private, gemfs); if (ret) { - drm_gem_private_object_fini(obj); - goto err_free; - } - - ret = drm_gem_create_mmap_offset(obj); - if (ret) - goto err_release; - - INIT_LIST_HEAD(&shmem->madv_list); - - if (!private) { - /* - * Our buffers are kept pinned, so allocating them - * from the MOVABLE zone is a really bad idea, and - * conflicts with CMA. See comments above new_inode() - * why this is required _and_ expected if you're - * going to pin these pages. - */ - mapping_set_gfp_mask(obj->filp->f_mapping, GFP_HIGHUSER | - __GFP_RETRY_MAYFAIL | __GFP_NOWARN); + kfree(obj); + return ERR_PTR(ret); } return shmem; - -err_release: - drm_gem_object_release(obj); -err_free: - kfree(obj); - - return ERR_PTR(ret); } /** * drm_gem_shmem_create - Allocate an object with the given size @@ -150,13 +176,13 @@ struct drm_gem_shmem_object *drm_gem_shmem_create_with_mnt(struct drm_device *de EXPORT_SYMBOL_GPL(drm_gem_shmem_create_with_mnt); /** - * drm_gem_shmem_free - Free resources associated with a shmem GEM object - * @shmem: shmem GEM object to free + * drm_gem_shmem_release - Release resources associated with a shmem GEM object. + * @shmem: shmem GEM object * - * This function cleans up the GEM object state and frees the memory used to - * store the object itself. + * This function cleans up the GEM object state, but does not free the memory used to store the + * object itself. This function is meant to be a dedicated helper for the Rust GEM bindings. */ -void drm_gem_shmem_free(struct drm_gem_shmem_object *shmem) +void drm_gem_shmem_release(struct drm_gem_shmem_object *shmem) { struct drm_gem_object *obj = &shmem->base; @@ -183,6 +209,19 @@ void drm_gem_shmem_free(struct drm_gem_shmem_object *shmem) } drm_gem_object_release(obj); +} +EXPORT_SYMBOL_GPL(drm_gem_shmem_release); + +/** + * drm_gem_shmem_free - Free resources associated with a shmem GEM object + * @shmem: shmem GEM object to free + * + * This function cleans up the GEM object state and frees the memory used to + * store the object itself. + */ +void drm_gem_shmem_free(struct drm_gem_shmem_object *shmem) +{ + drm_gem_shmem_release(shmem); kfree(shmem); } EXPORT_SYMBOL_GPL(drm_gem_shmem_free); @@ -518,18 +557,11 @@ EXPORT_SYMBOL_GPL(drm_gem_shmem_purge_locked); int drm_gem_shmem_dumb_create(struct drm_file *file, struct drm_device *dev, struct drm_mode_create_dumb *args) { - u32 min_pitch = DIV_ROUND_UP(args->width * args->bpp, 8); + int ret; - if (!args->pitch || !args->size) { - args->pitch = min_pitch; - args->size = PAGE_ALIGN(args->pitch * args->height); - } else { - /* ensure sane minimum values */ - if (args->pitch < min_pitch) - args->pitch = min_pitch; - if (args->size < args->pitch * args->height) - args->size = PAGE_ALIGN(args->pitch * args->height); - } + ret = drm_mode_size_dumb(dev, args, SZ_8, 0); + if (ret) + return ret; return drm_gem_shmem_create_with_handle(file, dev, args->size, &args->handle); } diff --git a/drivers/gpu/drm/drm_gem_ttm_helper.c b/drivers/gpu/drm/drm_gem_ttm_helper.c index 257cca4cb97a..08ff0fadd0b2 100644 --- a/drivers/gpu/drm/drm_gem_ttm_helper.c +++ b/drivers/gpu/drm/drm_gem_ttm_helper.c @@ -4,6 +4,7 @@ #include #include +#include #include #include diff --git a/drivers/gpu/drm/drm_gem_vram_helper.c b/drivers/gpu/drm/drm_gem_vram_helper.c index b04cde4a60e7..5e5b70518dbe 100644 --- a/drivers/gpu/drm/drm_gem_vram_helper.c +++ b/drivers/gpu/drm/drm_gem_vram_helper.c @@ -17,6 +17,7 @@ #include #include #include +#include #include #include @@ -107,7 +108,7 @@ static const struct drm_gem_object_funcs drm_gem_vram_object_funcs; static void drm_gem_vram_cleanup(struct drm_gem_vram_object *gbo) { - /* We got here via ttm_bo_put(), which means that the + /* We got here via ttm_bo_fini(), which means that the * TTM buffer object in 'bo' has already been cleaned * up; only release the GEM object. */ @@ -234,11 +235,11 @@ EXPORT_SYMBOL(drm_gem_vram_create); * drm_gem_vram_put() - Releases a reference to a VRAM-backed GEM object * @gbo: the GEM VRAM object * - * See ttm_bo_put() for more information. + * See ttm_bo_fini() for more information. */ void drm_gem_vram_put(struct drm_gem_vram_object *gbo) { - ttm_bo_put(&gbo->bo); + ttm_bo_fini(&gbo->bo); } EXPORT_SYMBOL(drm_gem_vram_put); @@ -859,7 +860,7 @@ static int drm_vram_mm_init(struct drm_vram_mm *vmm, struct drm_device *dev, ret = ttm_device_init(&vmm->bdev, &bo_driver, dev->dev, dev->anon_inode->i_mapping, dev->vma_offset_manager, - false, true); + TTM_ALLOCATION_POOL_USE_DMA32); if (ret) return ret; @@ -967,7 +968,7 @@ drm_vram_helper_mode_valid_internal(struct drm_device *dev, max_fbpages = (vmm->vram_size / 2) >> PAGE_SHIFT; - fbsize = mode->hdisplay * mode->vdisplay * max_bpp; + fbsize = (u32)mode->hdisplay * mode->vdisplay * max_bpp; fbpages = DIV_ROUND_UP(fbsize, PAGE_SIZE); if (fbpages > max_fbpages) diff --git a/drivers/gpu/drm/drm_gpusvm.c b/drivers/gpu/drm/drm_gpusvm.c index cb906765897e..73e550c8ff8c 100644 --- a/drivers/gpu/drm/drm_gpusvm.c +++ b/drivers/gpu/drm/drm_gpusvm.c @@ -1363,7 +1363,8 @@ int drm_gpusvm_get_pages(struct drm_gpusvm *gpusvm, order = drm_gpusvm_hmm_pfn_to_order(pfns[i], i, npages); if (is_device_private_page(page) || is_device_coherent_page(page)) { - if (zdd != page->zone_device_data && i > 0) { + if (!ctx->allow_mixed && + zdd != page->zone_device_data && i > 0) { err = -EOPNOTSUPP; goto err_unmap; } @@ -1399,7 +1400,8 @@ int drm_gpusvm_get_pages(struct drm_gpusvm *gpusvm, } else { dma_addr_t addr; - if (is_zone_device_page(page) || pagemap) { + if (is_zone_device_page(page) || + (pagemap && !ctx->allow_mixed)) { err = -EOPNOTSUPP; goto err_unmap; } diff --git a/drivers/gpu/drm/drm_gpuvm.c b/drivers/gpu/drm/drm_gpuvm.c index af63f4d00315..8a06d296561d 100644 --- a/drivers/gpu/drm/drm_gpuvm.c +++ b/drivers/gpu/drm/drm_gpuvm.c @@ -26,6 +26,7 @@ */ #include +#include #include #include @@ -876,6 +877,31 @@ __drm_gpuvm_bo_list_add(struct drm_gpuvm *gpuvm, spinlock_t *lock, cond_spin_unlock(lock, !!lock); } +/** + * drm_gpuvm_bo_is_zombie() - check whether this vm_bo is scheduled for cleanup + * @vm_bo: the &drm_gpuvm_bo + * + * When a vm_bo is scheduled for cleanup using the bo_defer list, it is not + * immediately removed from the evict and extobj lists. Therefore, anyone + * iterating these lists should skip entries that are being destroyed. + * + * Checking the refcount without incrementing it is okay as long as the lock + * protecting the evict/extobj list is held for as long as you are using the + * vm_bo, because even if the refcount hits zero while you are using it, freeing + * the vm_bo requires taking the list's lock. + * + * Zombie entries can be observed on the evict and extobj lists regardless of + * whether DRM_GPUVM_RESV_PROTECTED is used, but they remain on the lists for a + * longer time when the resv lock is used because we can't take the resv lock + * during run_job() in immediate mode, meaning that they need to remain on the + * lists until drm_gpuvm_bo_deferred_cleanup() is called. + */ +static bool +drm_gpuvm_bo_is_zombie(struct drm_gpuvm_bo *vm_bo) +{ + return !kref_read(&vm_bo->kref); +} + /** * drm_gpuvm_bo_list_add() - insert a vm_bo into the given list * @__vm_bo: the &drm_gpuvm_bo @@ -1081,6 +1107,8 @@ drm_gpuvm_init(struct drm_gpuvm *gpuvm, const char *name, INIT_LIST_HEAD(&gpuvm->evict.list); spin_lock_init(&gpuvm->evict.lock); + init_llist_head(&gpuvm->bo_defer); + kref_init(&gpuvm->kref); gpuvm->name = name ? name : "unknown"; @@ -1122,6 +1150,8 @@ drm_gpuvm_fini(struct drm_gpuvm *gpuvm) "Extobj list should be empty.\n"); drm_WARN(gpuvm->drm, !list_empty(&gpuvm->evict.list), "Evict list should be empty.\n"); + drm_WARN(gpuvm->drm, !llist_empty(&gpuvm->bo_defer), + "VM BO cleanup list should be empty.\n"); drm_gem_object_put(gpuvm->r_obj); } @@ -1217,6 +1247,9 @@ drm_gpuvm_prepare_objects_locked(struct drm_gpuvm *gpuvm, drm_gpuvm_resv_assert_held(gpuvm); list_for_each_entry(vm_bo, &gpuvm->extobj.list, list.entry.extobj) { + if (drm_gpuvm_bo_is_zombie(vm_bo)) + continue; + ret = exec_prepare_obj(exec, vm_bo->obj, num_fences); if (ret) break; @@ -1460,6 +1493,9 @@ drm_gpuvm_validate_locked(struct drm_gpuvm *gpuvm, struct drm_exec *exec) list_for_each_entry_safe(vm_bo, next, &gpuvm->evict.list, list.entry.evict) { + if (drm_gpuvm_bo_is_zombie(vm_bo)) + continue; + ret = ops->vm_bo_validate(vm_bo, exec); if (ret) break; @@ -1560,6 +1596,7 @@ drm_gpuvm_bo_create(struct drm_gpuvm *gpuvm, INIT_LIST_HEAD(&vm_bo->list.entry.extobj); INIT_LIST_HEAD(&vm_bo->list.entry.evict); + init_llist_node(&vm_bo->list.entry.bo_defer); return vm_bo; } @@ -1621,6 +1658,126 @@ drm_gpuvm_bo_put(struct drm_gpuvm_bo *vm_bo) } EXPORT_SYMBOL_GPL(drm_gpuvm_bo_put); +/* + * drm_gpuvm_bo_into_zombie() - called when the vm_bo becomes a zombie due to + * deferred cleanup + * + * If deferred cleanup is used, then this must be called right after the vm_bo + * refcount drops to zero. Must be called with GEM mutex held. After releasing + * the GEM mutex, drm_gpuvm_bo_defer_zombie_cleanup() must be called. + */ +static void +drm_gpuvm_bo_into_zombie(struct kref *kref) +{ + struct drm_gpuvm_bo *vm_bo = container_of(kref, struct drm_gpuvm_bo, + kref); + + if (!drm_gpuvm_resv_protected(vm_bo->vm)) { + drm_gpuvm_bo_list_del(vm_bo, extobj, true); + drm_gpuvm_bo_list_del(vm_bo, evict, true); + } + + list_del(&vm_bo->list.entry.gem); +} + +/* + * drm_gpuvm_bo_defer_zombie_cleanup() - adds a new zombie vm_bo to the + * bo_defer list + * + * Called after drm_gpuvm_bo_into_zombie(). GEM mutex must not be held. + * + * It's important that the GEM stays alive for the duration in which we hold + * the mutex, but the instant we add the vm_bo to bo_defer, another thread + * might call drm_gpuvm_bo_deferred_cleanup() and put the GEM. Therefore, to + * avoid kfreeing a mutex we are holding, the GEM mutex must be released + * *before* calling this function. + */ +static void +drm_gpuvm_bo_defer_zombie_cleanup(struct drm_gpuvm_bo *vm_bo) +{ + llist_add(&vm_bo->list.entry.bo_defer, &vm_bo->vm->bo_defer); +} + +static void +drm_gpuvm_bo_defer_free(struct kref *kref) +{ + struct drm_gpuvm_bo *vm_bo = container_of(kref, struct drm_gpuvm_bo, + kref); + + drm_gpuvm_bo_into_zombie(kref); + mutex_unlock(&vm_bo->obj->gpuva.lock); + drm_gpuvm_bo_defer_zombie_cleanup(vm_bo); +} + +/** + * drm_gpuvm_bo_put_deferred() - drop a struct drm_gpuvm_bo reference with + * deferred cleanup + * @vm_bo: the &drm_gpuvm_bo to release the reference of + * + * This releases a reference to @vm_bo. + * + * This might take and release the GEMs GPUVA lock. You should call + * drm_gpuvm_bo_deferred_cleanup() later to complete the cleanup process. + * + * Returns: true if vm_bo is being destroyed, false otherwise. + */ +bool +drm_gpuvm_bo_put_deferred(struct drm_gpuvm_bo *vm_bo) +{ + if (!vm_bo) + return false; + + drm_WARN_ON(vm_bo->vm->drm, !drm_gpuvm_immediate_mode(vm_bo->vm)); + + return !!kref_put_mutex(&vm_bo->kref, + drm_gpuvm_bo_defer_free, + &vm_bo->obj->gpuva.lock); +} +EXPORT_SYMBOL_GPL(drm_gpuvm_bo_put_deferred); + +/** + * drm_gpuvm_bo_deferred_cleanup() - clean up BOs in the deferred list + * deferred cleanup + * @gpuvm: the VM to clean up + * + * Cleans up &drm_gpuvm_bo instances in the deferred cleanup list. + */ +void +drm_gpuvm_bo_deferred_cleanup(struct drm_gpuvm *gpuvm) +{ + const struct drm_gpuvm_ops *ops = gpuvm->ops; + struct drm_gpuvm_bo *vm_bo; + struct drm_gem_object *obj; + struct llist_node *bo_defer; + + bo_defer = llist_del_all(&gpuvm->bo_defer); + if (!bo_defer) + return; + + if (drm_gpuvm_resv_protected(gpuvm)) { + dma_resv_lock(drm_gpuvm_resv(gpuvm), NULL); + llist_for_each_entry(vm_bo, bo_defer, list.entry.bo_defer) { + drm_gpuvm_bo_list_del(vm_bo, extobj, false); + drm_gpuvm_bo_list_del(vm_bo, evict, false); + } + dma_resv_unlock(drm_gpuvm_resv(gpuvm)); + } + + while (bo_defer) { + vm_bo = llist_entry(bo_defer, struct drm_gpuvm_bo, list.entry.bo_defer); + bo_defer = bo_defer->next; + obj = vm_bo->obj; + if (ops && ops->vm_bo_free) + ops->vm_bo_free(vm_bo); + else + kfree(vm_bo); + + drm_gpuvm_put(gpuvm); + drm_gem_object_put(obj); + } +} +EXPORT_SYMBOL_GPL(drm_gpuvm_bo_deferred_cleanup); + static struct drm_gpuvm_bo * __drm_gpuvm_bo_find(struct drm_gpuvm *gpuvm, struct drm_gem_object *obj) @@ -1948,6 +2105,40 @@ drm_gpuva_unlink(struct drm_gpuva *va) } EXPORT_SYMBOL_GPL(drm_gpuva_unlink); +/** + * drm_gpuva_unlink_defer() - unlink a &drm_gpuva with deferred vm_bo cleanup + * @va: the &drm_gpuva to unlink + * + * Similar to drm_gpuva_unlink(), but uses drm_gpuvm_bo_put_deferred() and takes + * the lock for the caller. + */ +void +drm_gpuva_unlink_defer(struct drm_gpuva *va) +{ + struct drm_gem_object *obj = va->gem.obj; + struct drm_gpuvm_bo *vm_bo = va->vm_bo; + bool should_defer_bo; + + if (unlikely(!obj)) + return; + + drm_WARN_ON(vm_bo->vm->drm, !drm_gpuvm_immediate_mode(vm_bo->vm)); + + mutex_lock(&obj->gpuva.lock); + list_del_init(&va->gem.entry); + + /* + * This is drm_gpuvm_bo_put_deferred() except we already hold the mutex. + */ + should_defer_bo = kref_put(&vm_bo->kref, drm_gpuvm_bo_into_zombie); + mutex_unlock(&obj->gpuva.lock); + if (should_defer_bo) + drm_gpuvm_bo_defer_zombie_cleanup(vm_bo); + + va->vm_bo = NULL; +} +EXPORT_SYMBOL_GPL(drm_gpuva_unlink_defer); + /** * drm_gpuva_find_first() - find the first &drm_gpuva in the given range * @gpuvm: the &drm_gpuvm to search in diff --git a/drivers/gpu/drm/drm_internal.h b/drivers/gpu/drm/drm_internal.h index 5a3bed48ab1f..f893b1e3a596 100644 --- a/drivers/gpu/drm/drm_internal.h +++ b/drivers/gpu/drm/drm_internal.h @@ -56,6 +56,17 @@ static inline void drm_client_debugfs_init(struct drm_device *dev) { } #endif +/* drm_client_sysrq.c */ +#if defined(CONFIG_DRM_CLIENT) && defined(CONFIG_MAGIC_SYSRQ) +void drm_client_sysrq_register(struct drm_device *dev); +void drm_client_sysrq_unregister(struct drm_device *dev); +#else +static inline void drm_client_sysrq_register(struct drm_device *dev) +{ } +static inline void drm_client_sysrq_unregister(struct drm_device *dev) +{ } +#endif + /* drm_file.c */ extern struct mutex drm_global_mutex; bool drm_dev_needs_global_mutex(struct drm_device *dev); diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c index d8a24875a7ba..ff193155129e 100644 --- a/drivers/gpu/drm/drm_ioctl.c +++ b/drivers/gpu/drm/drm_ioctl.c @@ -373,6 +373,13 @@ drm_setclientcap(struct drm_device *dev, void *data, struct drm_file *file_priv) return -EINVAL; file_priv->supports_virtualized_cursor_plane = req->value; break; + case DRM_CLIENT_CAP_PLANE_COLOR_PIPELINE: + if (!file_priv->atomic) + return -EINVAL; + if (req->value > 1) + return -EINVAL; + file_priv->plane_color_pipeline = req->value; + break; default: return -EINVAL; } diff --git a/drivers/gpu/drm/drm_mipi_dbi.c b/drivers/gpu/drm/drm_mipi_dbi.c index e33c78fc8fbd..00482227a9cd 100644 --- a/drivers/gpu/drm/drm_mipi_dbi.c +++ b/drivers/gpu/drm/drm_mipi_dbi.c @@ -26,6 +26,7 @@ #include #include #include +#include #include #include #include