Extra drm-misc-next for v6.19-rc1:

UAPI Changes:
 - Add support for drm colorop pipeline.
 - Add COLOR PIPELINE plane property.
 - Add DRM_CLIENT_CAP_PLANE_COLOR_PIPELINE.
 
 Cross-subsystem Changes:
 - Attempt to use higher order mappings in system heap allocator.
 - Always taint kernel with sw-sync.
 
 Core Changes:
 - Small fixes to drm/gem.
 - Support emergency restore to drm-client.
 - Allocate and release fb_info in single place.
 - Rework ttm pipelined eviction fence handling.
 
 Driver Changes:
 - Support the drm color pipeline in vkms, amdgfx.
 - Add NVJPG driver for tegra.
 - Assorted small fixes and updates to rockchip, bridge/dw-hdmi-qp,
   panthor.
 - Add ASL CS5263 DP-to-HDMI simple bridge.
 - Add and improve support for G LD070WX3-SL01 MIPI DSI, Samsung LTL106AL0,
   Samsung LTL106AL01, Raystar RFF500F-AWH-DNN, Winstar WF70A8SYJHLNGA,
   Wanchanglong w552946aaa, Samsung SOFEF00, Lenovo X13s panel.
 - Add support for it66122 to it66121.
 - Support mali-G1 gpu in panthor.
 -----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCgAdFiEEuXvWqAysSYEJGuVH/lWMcqZwE8MFAmkuEAsACgkQ/lWMcqZw
 E8Oqtw//fJqWXjhzobnffWEM7pbkWUjau83R+tqg/dwyLP7paSfPg8VEnesAjZoW
 TWlaYY7v3o0VYH1p533li/ImXcK1AkaCHyx0SfIBnEgR2X27KS1TvVy/iMDGBE50
 i609Skw/UjetDFojozG63NxkannlKCy5mL+WzCpqxg3FakRyrqvy20UZgQQrDtAJ
 v7etdikmfZz01+8WclvdprsJ+y391hhFM4FYYh6UDo5t/QQluMW7f3YiwAxUF3/9
 zuyRJD9ckIGsZ9Y0FxBwH/m25/SeTyW36ERrWOsgVlO3FzAOc7W9AukkgXO9orMu
 OAxheo0HYTlYonM1yitrgKJsXa7PdjxHL5ZwdPDSJWBjTw3b3xIWgJINc9k2SHKc
 OWoPOg54JDY5Yte17qF5od0MMmk5iG2YxHFMnQAQZ7nFOL+m8H7nwHzbbsu0iepP
 FOsly0Hr2UJY29QKHJPfS5pfiB5UJeezYnVDgZBzCrBZK/svpbpfb0KolNYZIHji
 qfEZ0o4RmkY0lwGYQ467Ow2NmIZIKmGgDiqbFZ5KoFo1tLE9UX3KDJY1hZg1X3w8
 sH2tEwCh3JGYsfBmEs4xe+iWElKe+0yrX08LpljpV3v0yzULKMC36qb1Wr9xJMBO
 gmWFwCvzevpawotAc4OyqRWWP/ncyxuNSb3gd50Q/D/4UPUxo3I=
 =QBjM
 -----END PGP SIGNATURE-----

Merge tag 'drm-misc-next-2025-12-01-1' of https://gitlab.freedesktop.org/drm/misc/kernel into drm-next

Extra drm-misc-next for v6.19-rc1:

UAPI Changes:
- Add support for drm colorop pipeline.
- Add COLOR PIPELINE plane property.
- Add DRM_CLIENT_CAP_PLANE_COLOR_PIPELINE.

Cross-subsystem Changes:
- Attempt to use higher order mappings in system heap allocator.
- Always taint kernel with sw-sync.

Core Changes:
- Small fixes to drm/gem.
- Support emergency restore to drm-client.
- Allocate and release fb_info in single place.
- Rework ttm pipelined eviction fence handling.

Driver Changes:
- Support the drm color pipeline in vkms, amdgfx.
- Add NVJPG driver for tegra.
- Assorted small fixes and updates to rockchip, bridge/dw-hdmi-qp,
  panthor.
- Add ASL CS5263 DP-to-HDMI simple bridge.
- Add and improve support for G LD070WX3-SL01 MIPI DSI, Samsung LTL106AL0,
  Samsung LTL106AL01, Raystar RFF500F-AWH-DNN, Winstar WF70A8SYJHLNGA,
  Wanchanglong w552946aaa, Samsung SOFEF00, Lenovo X13s panel.
- Add support for it66122 to it66121.
- Support mali-G1 gpu in panthor.

Signed-off-by: Dave Airlie <airlied@redhat.com>

From: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
Link: https://patch.msgid.link/aa5cbd50-7676-4a59-bbed-e8428af86804@linux.intel.com
This commit is contained in:
Dave Airlie 2025-12-02 18:09:01 +10:00
commit b3239df349
139 changed files with 7928 additions and 818 deletions

View File

@ -19,6 +19,7 @@ properties:
compatible:
enum:
- ite,it66121
- ite,it66122
- ite,it6610
reg:

View File

@ -27,7 +27,9 @@ properties:
- const: adi,adv7123
- enum:
- adi,adv7123
- asl-tek,cs5263
- dumb-vga-dac
- parade,ps185hdm
- radxa,ra620
- realtek,rtd2171
- ti,opa362

View File

@ -24,6 +24,7 @@ properties:
- raspberrypi,dsi-7inch
- startek,kd050hdfia020
- tdo,tl050hdv35
- wanchanglong,w552946aaa
- wanchanglong,w552946aba
- const: ilitek,ili9881c

View File

@ -0,0 +1,60 @@
# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
%YAML 1.2
---
$id: http://devicetree.org/schemas/display/panel/lg,ld070wx3-sl01.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
title: LG Corporation 7" WXGA TFT LCD panel
maintainers:
- Svyatoslav Ryhel <clamor95@gmail.com>
allOf:
- $ref: panel-common.yaml#
properties:
compatible:
items:
- const: lg,ld070wx3-sl01
reg:
maxItems: 1
vdd-supply: true
vcc-supply: true
backlight: true
port: true
required:
- compatible
- vdd-supply
- vcc-supply
additionalProperties: false
examples:
- |
#include <dt-bindings/gpio/gpio.h>
dsi {
#address-cells = <1>;
#size-cells = <0>;
panel@0 {
compatible = "lg,ld070wx3-sl01";
reg = <0>;
vdd-supply = <&vdd_3v3_lcd>;
vcc-supply = <&vcc_1v8_lcd>;
backlight = <&backlight>;
port {
endpoint {
remote-endpoint = <&dsi0_out>;
};
};
};
};
...

View File

@ -59,6 +59,8 @@ properties:
# Jenson Display BL-JT60050-01A 7" WSVGA (1024x600) color TFT LCD LVDS panel
- jenson,bl-jt60050-01a
- tbs,a711-panel
# Winstar WF70A8SYJHLNGA 7" WSVGA (1024x600) color TFT LCD LVDS panel
- winstar,wf70a8syjhlnga
- const: panel-lvds

View File

@ -19,6 +19,9 @@ description: |
If the panel is more advanced a dedicated binding file is required.
allOf:
- $ref: panel-common.yaml#
properties:
compatible:
@ -42,8 +45,6 @@ properties:
- kingdisplay,kd097d04
# LG ACX467AKM-7 4.95" 1080×1920 LCD Panel
- lg,acx467akm-7
# LG Corporation 7" WXGA TFT LCD panel
- lg,ld070wx3-sl01
# LG Corporation 5" HD TFT LCD panel
- lg,lh500wx1-sd03
# Lincoln LCD197 5" 1080x1920 LCD panel
@ -56,8 +57,6 @@ properties:
- panasonic,vvx10f034n00
# Samsung s6e3fa7 1080x2220 based AMS559NK06 AMOLED panel
- samsung,s6e3fa7-ams559nk06
# Samsung sofef00 1080x2280 AMOLED panel
- samsung,sofef00
# Shangai Top Display Optoelectronics 7" TL070WSH30 1024x600 TFT LCD panel
- tdo,tl070wsh30
@ -70,30 +69,12 @@ properties:
reset-gpios: true
port: true
power-supply: true
vddio-supply: true
allOf:
- $ref: panel-common.yaml#
- if:
properties:
compatible:
enum:
- samsung,sofef00
then:
properties:
power-supply: false
required:
- vddio-supply
else:
properties:
vddio-supply: false
required:
- power-supply
additionalProperties: false
required:
- compatible
- power-supply
- reg
examples:

View File

@ -270,6 +270,8 @@ properties:
- qiaodian,qd43003c0-40
# Shenzhen QiShenglong Industrialist Co., Ltd. Gopher 2b 4.3" 480(RGB)x272 TFT LCD panel
- qishenglong,gopher2b-lcd
# Raystar Optronics, Inc. RFF500F-AWH-DNN 5.0" TFT 840x480
- raystar,rff500f-awh-dnn
# Rocktech Displays Ltd. RK101II01D-CT 10.1" TFT 1280x800
- rocktech,rk101ii01d-ct
# Rocktech Display Ltd. RK070ER9427 800(RGB)x480 TFT LCD panel
@ -278,6 +280,8 @@ properties:
- rocktech,rk043fn48h
# Samsung Electronics 10.1" WXGA (1280x800) TFT LCD panel
- samsung,ltl101al01
# Samsung Electronics 10.6" FWXGA (1366x768) TFT LCD panel
- samsung,ltl106al01
# Samsung Electronics 10.1" WSVGA TFT LCD panel
- samsung,ltn101nt05
# Satoz SAT050AT40H12R2 5.0" WVGA TFT LCD panel

View File

@ -9,6 +9,9 @@ title: Ronbo RB070D30 DSI Display Panel
maintainers:
- Maxime Ripard <mripard@kernel.org>
allOf:
- $ref: panel-common.yaml#
properties:
compatible:
const: ronbo,rb070d30
@ -20,10 +23,6 @@ properties:
description: GPIO used for the power pin
maxItems: 1
reset-gpios:
description: GPIO used for the reset pin
maxItems: 1
shlr-gpios:
description: GPIO used for the shlr pin (horizontal flip)
maxItems: 1
@ -35,10 +34,6 @@ properties:
vcc-lcd-supply:
description: Power regulator
backlight:
description: Backlight used by the panel
$ref: /schemas/types.yaml#/definitions/phandle
required:
- compatible
- power-gpios
@ -47,5 +42,6 @@ required:
- shlr-gpios
- updn-gpios
- vcc-lcd-supply
- port
additionalProperties: false
unevaluatedProperties: false

View File

@ -0,0 +1,79 @@
# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
%YAML 1.2
---
$id: http://devicetree.org/schemas/display/panel/samsung,sofef00.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
title: Samsung SOFEF00 AMOLED DDIC
description: The SOFEF00 is display driver IC with connected panel.
maintainers:
- David Heidelberg <david@ixit.cz>
allOf:
- $ref: panel-common.yaml#
properties:
compatible:
items:
- enum:
# Samsung 6.01 inch, 1080x2160 pixels, 18:9 ratio
- samsung,sofef00-ams601nt22
# Samsung 6.28 inch, 1080x2280 pixels, 19:9 ratio
- samsung,sofef00-ams628nw01
- const: samsung,sofef00
reg:
maxItems: 1
poc-supply:
description: POC regulator
vci-supply:
description: VCI regulator
vddio-supply:
description: VDD regulator
required:
- compatible
- reset-gpios
- poc-supply
- vci-supply
- vddio-supply
unevaluatedProperties: false
examples:
- |
#include <dt-bindings/gpio/gpio.h>
dsi {
#address-cells = <1>;
#size-cells = <0>;
panel@0 {
compatible = "samsung,sofef00-ams628nw01", "samsung,sofef00";
reg = <0>;
vddio-supply = <&vreg_l14a_1p88>;
vci-supply = <&s2dos05_buck1>;
poc-supply = <&s2dos05_ldo1>;
te-gpios = <&tlmm 10 GPIO_ACTIVE_HIGH>;
reset-gpios = <&tlmm 6 GPIO_ACTIVE_HIGH>;
pinctrl-0 = <&panel_active>;
pinctrl-1 = <&panel_suspend>;
pinctrl-names = "default", "sleep";
port {
panel_in: endpoint {
remote-endpoint = <&mdss_dsi0_out>;
};
};
};
};
...

View File

@ -113,6 +113,14 @@ properties:
description:
Additional HDMI QP related data is accessed through VO GRF regs.
frl-enable-gpios:
description:
Optional GPIO line to be asserted when operating in HDMI 2.1 FRL mode and
deasserted for HDMI 1.4/2.0 TMDS. It can be used to control external
voltage bias for HDMI data lines. When not present the HDMI encoder will
operate in TMDS mode only.
maxItems: 1
required:
- compatible
- reg
@ -132,8 +140,10 @@ unevaluatedProperties: false
examples:
- |
#include <dt-bindings/clock/rockchip,rk3588-cru.h>
#include <dt-bindings/gpio/gpio.h>
#include <dt-bindings/interrupt-controller/arm-gic.h>
#include <dt-bindings/interrupt-controller/irq.h>
#include <dt-bindings/pinctrl/rockchip.h>
#include <dt-bindings/power/rk3588-power.h>
#include <dt-bindings/reset/rockchip,rk3588-cru.h>
@ -164,6 +174,7 @@ examples:
rockchip,grf = <&sys_grf>;
rockchip,vo-grf = <&vo1_grf>;
#sound-dai-cells = <0>;
frl-enable-gpios = <&gpio4 RK_PB1 GPIO_ACTIVE_LOW>;
ports {
#address-cells = <1>;

View File

@ -176,6 +176,8 @@ patternProperties:
description: All Sensors Corporation
"^asix,.*":
description: ASIX Electronics Corporation
"^asl-tek,.*":
description: ASL Xiamen Technology Co., Ltd.
"^aspeed,.*":
description: ASPEED Technology Inc.
"^asrock,.*":
@ -1325,6 +1327,8 @@ patternProperties:
description: Raumfeld GmbH
"^raydium,.*":
description: Raydium Semiconductor Corp.
"^raystar,.*":
description: Raystar Optronics, Inc.
"^rda,.*":
description: Unisoc Communications, Inc.
"^realtek,.*":

View File

@ -413,6 +413,21 @@ Plane Panic Functions Reference
.. kernel-doc:: drivers/gpu/drm/drm_panic.c
:export:
Colorop Abstraction
===================
.. kernel-doc:: drivers/gpu/drm/drm_colorop.c
:doc: overview
Colorop Functions Reference
---------------------------
.. kernel-doc:: include/drm/drm_colorop.h
:internal:
.. kernel-doc:: drivers/gpu/drm/drm_colorop.c
:export:
Display Modes Function Reference
================================

View File

@ -0,0 +1,378 @@
.. SPDX-License-Identifier: GPL-2.0
========================
Linux Color Pipeline API
========================
What problem are we solving?
============================
We would like to support pre-, and post-blending complex color
transformations in display controller hardware in order to allow for
HW-supported HDR use-cases, as well as to provide support to
color-managed applications, such as video or image editors.
It is possible to support an HDR output on HW supporting the Colorspace
and HDR Metadata drm_connector properties, but that requires the
compositor or application to render and compose the content into one
final buffer intended for display. Doing so is costly.
Most modern display HW offers various 1D LUTs, 3D LUTs, matrices, and other
operations to support color transformations. These operations are often
implemented in fixed-function HW and therefore much more power efficient than
performing similar operations via shaders or CPU.
We would like to make use of this HW functionality to support complex color
transformations with no, or minimal CPU or shader load. The switch between HW
fixed-function blocks and shaders/CPU must be seamless with no visible
difference when fallback to shaders/CPU is neceesary at any time.
How are other OSes solving this problem?
========================================
The most widely supported use-cases regard HDR content, whether video or
gaming.
Most OSes will specify the source content format (color gamut, encoding transfer
function, and other metadata, such as max and average light levels) to a driver.
Drivers will then program their fixed-function HW accordingly to map from a
source content buffer's space to a display's space.
When fixed-function HW is not available the compositor will assemble a shader to
ask the GPU to perform the transformation from the source content format to the
display's format.
A compositor's mapping function and a driver's mapping function are usually
entirely separate concepts. On OSes where a HW vendor has no insight into
closed-source compositor code such a vendor will tune their color management
code to visually match the compositor's. On other OSes, where both mapping
functions are open to an implementer they will ensure both mappings match.
This results in mapping algorithm lock-in, meaning that no-one alone can
experiment with or introduce new mapping algorithms and achieve
consistent results regardless of which implementation path is taken.
Why is Linux different?
=======================
Unlike other OSes, where there is one compositor for one or more drivers, on
Linux we have a many-to-many relationship. Many compositors; many drivers.
In addition each compositor vendor or community has their own view of how
color management should be done. This is what makes Linux so beautiful.
This means that a HW vendor can now no longer tune their driver to one
compositor, as tuning it to one could make it look fairly different from
another compositor's color mapping.
We need a better solution.
Descriptive API
===============
An API that describes the source and destination colorspaces is a descriptive
API. It describes the input and output color spaces but does not describe
how precisely they should be mapped. Such a mapping includes many minute
design decision that can greatly affect the look of the final result.
It is not feasible to describe such mapping with enough detail to ensure the
same result from each implementation. In fact, these mappings are a very active
research area.
Prescriptive API
================
A prescriptive API describes not the source and destination colorspaces. It
instead prescribes a recipe for how to manipulate pixel values to arrive at the
desired outcome.
This recipe is generally an ordered list of straight-forward operations,
with clear mathematical definitions, such as 1D LUTs, 3D LUTs, matrices,
or other operations that can be described in a precise manner.
The Color Pipeline API
======================
HW color management pipelines can significantly differ between HW
vendors in terms of availability, ordering, and capabilities of HW
blocks. This makes a common definition of color management blocks and
their ordering nigh impossible. Instead we are defining an API that
allows user space to discover the HW capabilities in a generic manner,
agnostic of specific drivers and hardware.
drm_colorop Object
==================
To support the definition of color pipelines we define the DRM core
object type drm_colorop. Individual drm_colorop objects will be chained
via the NEXT property of a drm_colorop to constitute a color pipeline.
Each drm_colorop object is unique, i.e., even if multiple color
pipelines have the same operation they won't share the same drm_colorop
object to describe that operation.
Note that drivers are not expected to map drm_colorop objects statically
to specific HW blocks. The mapping of drm_colorop objects is entirely a
driver-internal detail and can be as dynamic or static as a driver needs
it to be. See more in the Driver Implementation Guide section below.
Each drm_colorop has three core properties:
TYPE: An enumeration property, defining the type of transformation, such as
* enumerated curve
* custom (uniform) 1D LUT
* 3x3 matrix
* 3x4 matrix
* 3D LUT
* etc.
Depending on the type of transformation other properties will describe
more details.
BYPASS: A boolean property that can be used to easily put a block into
bypass mode. The BYPASS property is not mandatory for a colorop, as long
as the entire pipeline can get bypassed by setting the COLOR_PIPELINE on
a plane to '0'.
NEXT: The ID of the next drm_colorop in a color pipeline, or 0 if this
drm_colorop is the last in the chain.
An example of a drm_colorop object might look like one of these::
/* 1D enumerated curve */
Color operation 42
├─ "TYPE": immutable enum {1D enumerated curve, 1D LUT, 3x3 matrix, 3x4 matrix, 3D LUT, etc.} = 1D enumerated curve
├─ "BYPASS": bool {true, false}
├─ "CURVE_1D_TYPE": enum {sRGB EOTF, sRGB inverse EOTF, PQ EOTF, PQ inverse EOTF, …}
└─ "NEXT": immutable color operation ID = 43
/* custom 4k entry 1D LUT */
Color operation 52
├─ "TYPE": immutable enum {1D enumerated curve, 1D LUT, 3x3 matrix, 3x4 matrix, 3D LUT, etc.} = 1D LUT
├─ "BYPASS": bool {true, false}
├─ "SIZE": immutable range = 4096
├─ "DATA": blob
└─ "NEXT": immutable color operation ID = 0
/* 17^3 3D LUT */
Color operation 72
├─ "TYPE": immutable enum {1D enumerated curve, 1D LUT, 3x3 matrix, 3x4 matrix, 3D LUT, etc.} = 3D LUT
├─ "BYPASS": bool {true, false}
├─ "SIZE": immutable range = 17
├─ "DATA": blob
└─ "NEXT": immutable color operation ID = 73
drm_colorop extensibility
-------------------------
Unlike existing DRM core objects, like &drm_plane, drm_colorop is not
extensible. This simplifies implementations and keeps all functionality
for managing &drm_colorop objects in the DRM core.
If there is a need one may introduce a simple &drm_colorop_funcs
function table in the future, for example to support an IN_FORMATS
property on a &drm_colorop.
If a driver requires the ability to create a driver-specific colorop
object they will need to add &drm_colorop func table support with
support for the usual functions, like destroy, atomic_duplicate_state,
and atomic_destroy_state.
COLOR_PIPELINE Plane Property
=============================
Color Pipelines are created by a driver and advertised via a new
COLOR_PIPELINE enum property on each plane. Values of the property
always include object id 0, which is the default and means all color
processing is disabled. Additional values will be the object IDs of the
first drm_colorop in a pipeline. A driver can create and advertise none,
one, or more possible color pipelines. A DRM client will select a color
pipeline by setting the COLOR PIPELINE to the respective value.
NOTE: Many DRM clients will set enumeration properties via the string
value, often hard-coding it. Since this enumeration is generated based
on the colorop object IDs it is important to perform the Color Pipeline
Discovery, described below, instead of hard-coding color pipeline
assignment. Drivers might generate the enum strings dynamically.
Hard-coded strings might only work for specific drivers on a specific
pieces of HW. Color Pipeline Discovery can work universally, as long as
drivers implement the required color operations.
The COLOR_PIPELINE property is only exposed when the
DRM_CLIENT_CAP_PLANE_COLOR_PIPELINE is set. Drivers shall ignore any
existing pre-blend color operations when this cap is set, such as
COLOR_RANGE and COLOR_ENCODING. If drivers want to support COLOR_RANGE
or COLOR_ENCODING functionality when the color pipeline client cap is
set, they are expected to expose colorops in the pipeline to allow for
the appropriate color transformation.
Setting of the COLOR_PIPELINE plane property or drm_colorop properties
is only allowed for userspace that sets this client cap.
An example of a COLOR_PIPELINE property on a plane might look like this::
Plane 10
├─ "TYPE": immutable enum {Overlay, Primary, Cursor} = Primary
├─ …
└─ "COLOR_PIPELINE": enum {0, 42, 52} = 0
Color Pipeline Discovery
========================
A DRM client wanting color management on a drm_plane will:
1. Get the COLOR_PIPELINE property of the plane
2. iterate all COLOR_PIPELINE enum values
3. for each enum value walk the color pipeline (via the NEXT pointers)
and see if the available color operations are suitable for the
desired color management operations
If userspace encounters an unknown or unsuitable color operation during
discovery it does not need to reject the entire color pipeline outright,
as long as the unknown or unsuitable colorop has a "BYPASS" property.
Drivers will ensure that a bypassed block does not have any effect.
An example of chained properties to define an AMD pre-blending color
pipeline might look like this::
Plane 10
├─ "TYPE" (immutable) = Primary
└─ "COLOR_PIPELINE": enum {0, 44} = 0
Color operation 44
├─ "TYPE" (immutable) = 1D enumerated curve
├─ "BYPASS": bool
├─ "CURVE_1D_TYPE": enum {sRGB EOTF, PQ EOTF} = sRGB EOTF
└─ "NEXT" (immutable) = 45
Color operation 45
├─ "TYPE" (immutable) = 3x4 Matrix
├─ "BYPASS": bool
├─ "DATA": blob
└─ "NEXT" (immutable) = 46
Color operation 46
├─ "TYPE" (immutable) = 1D enumerated curve
├─ "BYPASS": bool
├─ "CURVE_1D_TYPE": enum {sRGB Inverse EOTF, PQ Inverse EOTF} = sRGB EOTF
└─ "NEXT" (immutable) = 47
Color operation 47
├─ "TYPE" (immutable) = 1D LUT
├─ "SIZE": immutable range = 4096
├─ "DATA": blob
└─ "NEXT" (immutable) = 48
Color operation 48
├─ "TYPE" (immutable) = 3D LUT
├─ "DATA": blob
└─ "NEXT" (immutable) = 49
Color operation 49
├─ "TYPE" (immutable) = 1D enumerated curve
├─ "BYPASS": bool
├─ "CURVE_1D_TYPE": enum {sRGB EOTF, PQ EOTF} = sRGB EOTF
└─ "NEXT" (immutable) = 0
Color Pipeline Programming
==========================
Once a DRM client has found a suitable pipeline it will:
1. Set the COLOR_PIPELINE enum value to the one pointing at the first
drm_colorop object of the desired pipeline
2. Set the properties for all drm_colorop objects in the pipeline to the
desired values, setting BYPASS to true for unused drm_colorop blocks,
and false for enabled drm_colorop blocks
3. Perform (TEST_ONLY or not) atomic commit with all the other KMS
states it wishes to change
To configure the pipeline for an HDR10 PQ plane and blending in linear
space, a compositor might perform an atomic commit with the following
property values::
Plane 10
└─ "COLOR_PIPELINE" = 42
Color operation 42
└─ "BYPASS" = true
Color operation 44
└─ "BYPASS" = true
Color operation 45
└─ "BYPASS" = true
Color operation 46
└─ "BYPASS" = true
Color operation 47
├─ "DATA" = Gamut mapping + tone mapping + night mode
└─ "BYPASS" = false
Color operation 48
├─ "CURVE_1D_TYPE" = PQ EOTF
└─ "BYPASS" = false
Driver Implementer's Guide
==========================
What does this all mean for driver implementations? As noted above the
colorops can map to HW directly but don't need to do so. Here are some
suggestions on how to think about creating your color pipelines:
- Try to expose pipelines that use already defined colorops, even if
your hardware pipeline is split differently. This allows existing
userspace to immediately take advantage of the hardware.
- Additionally, try to expose your actual hardware blocks as colorops.
Define new colorop types where you believe it can offer significant
benefits if userspace learns to program them.
- Avoid defining new colorops for compound operations with very narrow
scope. If you have a hardware block for a special operation that
cannot be split further, you can expose that as a new colorop type.
However, try to not define colorops for "use cases", especially if
they require you to combine multiple hardware blocks.
- Design new colorops as prescriptive, not descriptive; by the
mathematical formula, not by the assumed input and output.
A defined colorop type must be deterministic. The exact behavior of the
colorop must be documented entirely, whether via a mathematical formula
or some other description. Its operation can depend only on its
properties and input and nothing else, allowed error tolerance
notwithstanding.
Driver Forward/Backward Compatibility
=====================================
As this is uAPI drivers can't regress color pipelines that have been
introduced for a given HW generation. New HW generations are free to
abandon color pipelines advertised for previous generations.
Nevertheless, it can be beneficial to carry support for existing color
pipelines forward as those will likely already have support in DRM
clients.
Introducing new colorops to a pipeline is fine, as long as they can be
bypassed or are purely informational. DRM clients implementing support
for the pipeline can always skip unknown properties as long as they can
be confident that doing so will not cause unexpected results.
If a new colorop doesn't fall into one of the above categories
(bypassable or informational) the modified pipeline would be unusable
for user space. In this case a new pipeline should be defined.
References
==========
1. https://lore.kernel.org/dri-devel/QMers3awXvNCQlyhWdTtsPwkp5ie9bze_hD5nAccFW7a_RXlWjYB7MoUW_8CKLT2bSQwIXVi5H6VULYIxCdgvryZoAoJnC5lZgyK1QWn488=@emersion.fr/

View File

@ -35,3 +35,6 @@ host such documentation:
.. toctree::
i915_vm_bind.rst
.. toctree::
color_pipeline.rst

View File

@ -8088,6 +8088,13 @@ S: Maintained
F: Documentation/devicetree/bindings/display/panel/samsung,s6e3ha8.yaml
F: drivers/gpu/drm/panel/panel-samsung-s6e3ha8.c
DRM DRIVER FOR SAMSUNG SOFEF00 DDIC
M: David Heidelberg <david@ixit.cz>
M: Casey Connolly <casey.connolly@linaro.org>
S: Maintained
F: Documentation/devicetree/bindings/display/panel/samsung,sofef00.yaml
F: drivers/gpu/drm/panel/panel-samsung-sofef00.c
DRM DRIVER FOR SHARP MEMORY LCD
M: Alex Lanzano <lanzano.alex@gmail.com>
S: Maintained

View File

@ -997,19 +997,21 @@ EXPORT_SYMBOL(dma_fence_set_deadline);
*/
void dma_fence_describe(struct dma_fence *fence, struct seq_file *seq)
{
const char __rcu *timeline;
const char __rcu *driver;
const char __rcu *timeline = "";
const char __rcu *driver = "";
const char *signaled = "";
rcu_read_lock();
timeline = dma_fence_timeline_name(fence);
driver = dma_fence_driver_name(fence);
if (!dma_fence_is_signaled(fence)) {
timeline = dma_fence_timeline_name(fence);
driver = dma_fence_driver_name(fence);
signaled = "un";
}
seq_printf(seq, "%s %s seq %llu %ssignalled\n",
rcu_dereference(driver),
rcu_dereference(timeline),
fence->seqno,
dma_fence_is_signaled(fence) ? "" : "un");
seq_printf(seq, "%llu:%llu %s %s %ssignalled\n",
fence->context, fence->seqno, timeline, driver,
signaled);
rcu_read_unlock();
}

View File

@ -186,20 +186,35 @@ static int system_heap_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
struct system_heap_buffer *buffer = dmabuf->priv;
struct sg_table *table = &buffer->sg_table;
unsigned long addr = vma->vm_start;
struct sg_page_iter piter;
int ret;
unsigned long pgoff = vma->vm_pgoff;
struct scatterlist *sg;
int i, ret;
for_each_sgtable_page(table, &piter, vma->vm_pgoff) {
struct page *page = sg_page_iter_page(&piter);
for_each_sgtable_sg(table, sg, i) {
unsigned long n = sg->length >> PAGE_SHIFT;
ret = remap_pfn_range(vma, addr, page_to_pfn(page), PAGE_SIZE,
vma->vm_page_prot);
if (pgoff < n)
break;
pgoff -= n;
}
for (; sg && addr < vma->vm_end; sg = sg_next(sg)) {
unsigned long n = (sg->length >> PAGE_SHIFT) - pgoff;
struct page *page = sg_page(sg) + pgoff;
unsigned long size = n << PAGE_SHIFT;
if (addr + size > vma->vm_end)
size = vma->vm_end - addr;
ret = remap_pfn_range(vma, addr, page_to_pfn(page),
size, vma->vm_page_prot);
if (ret)
return ret;
addr += PAGE_SIZE;
if (addr >= vma->vm_end)
return 0;
addr += size;
pgoff = 0;
}
return 0;
}

View File

@ -8,6 +8,7 @@
#include <linux/file.h>
#include <linux/fs.h>
#include <linux/uaccess.h>
#include <linux/panic.h>
#include <linux/slab.h>
#include <linux/sync_file.h>
@ -349,6 +350,9 @@ static long sw_sync_ioctl_create_fence(struct sync_timeline *obj,
struct sync_file *sync_file;
struct sw_sync_create_fence_data data;
/* SW sync fence are inherently unsafe and can deadlock the kernel */
add_taint(TAINT_SOFTLOCKUP, LOCKDEP_STILL_OK);
if (fd < 0)
return fd;

View File

@ -41,6 +41,7 @@ drm-y := \
drm_bridge.o \
drm_cache.o \
drm_color_mgmt.o \
drm_colorop.o \
drm_connector.o \
drm_crtc.o \
drm_displayid.o \
@ -76,7 +77,8 @@ drm-y := \
drm-$(CONFIG_DRM_CLIENT) += \
drm_client.o \
drm_client_event.o \
drm_client_modeset.o
drm_client_modeset.o \
drm_client_sysrq.o
drm-$(CONFIG_DRM_LIB_RANDOM) += lib/drm_random.o
drm-$(CONFIG_COMPAT) += drm_ioc32.o
drm-$(CONFIG_DRM_PANEL) += drm_panel.o

View File

@ -2185,8 +2185,10 @@ void amdgpu_ttm_set_buffer_funcs_status(struct amdgpu_device *adev, bool enable)
} else {
drm_sched_entity_destroy(&adev->mman.high_pr);
drm_sched_entity_destroy(&adev->mman.low_pr);
dma_fence_put(man->move);
man->move = NULL;
/* Drop all the old fences since re-creating the scheduler entities
* will allocate new contexts.
*/
ttm_resource_manager_cleanup(man);
}
/* this just adjusts TTM size idea, which sets lpfn to the correct value */

View File

@ -39,7 +39,8 @@ AMDGPUDM = \
amdgpu_dm_psr.o \
amdgpu_dm_replay.o \
amdgpu_dm_quirks.o \
amdgpu_dm_wb.o
amdgpu_dm_wb.o \
amdgpu_dm_colorop.o
ifdef CONFIG_DRM_AMD_DC_FP
AMDGPUDM += dc_fpu.o

View File

@ -5888,6 +5888,10 @@ fill_plane_color_attributes(const struct drm_plane_state *plane_state,
*color_space = COLOR_SPACE_SRGB;
/* Ignore properties when DRM_CLIENT_CAP_PLANE_COLOR_PIPELINE is set */
if (plane_state->state && plane_state->state->plane_color_pipeline)
return 0;
/* DRM color properties only affect non-RGB formats. */
if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
return 0;

View File

@ -26,12 +26,39 @@
#include "amdgpu.h"
#include "amdgpu_mode.h"
#include "amdgpu_dm.h"
#include "amdgpu_dm_colorop.h"
#include "dc.h"
#include "modules/color/color_gamma.h"
/**
* DOC: overview
*
* We have three types of color management in the AMD display driver.
* 1. the legacy &drm_crtc DEGAMMA, CTM, and GAMMA properties
* 2. AMD driver private color management on &drm_plane and &drm_crtc
* 3. AMD plane color pipeline
*
* The CRTC properties are the original color management. When they were
* implemented per-plane color management was not a thing yet. Because
* of that we could get away with plumbing the DEGAMMA and CTM
* properties to pre-blending HW functions. This is incompatible with
* per-plane color management, such as via the AMD private properties or
* the new drm_plane color pipeline. The only compatible CRTC property
* with per-plane color management is the GAMMA property as it is
* applied post-blending.
*
* The AMD driver private color management properties are only exposed
* when the kernel is built explicitly with -DAMD_PRIVATE_COLOR. They
* are temporary building blocks on the path to full-fledged &drm_plane
* and &drm_crtc color pipelines and lay the driver's groundwork for the
* color pipelines.
*
* The AMD plane color pipeline describes AMD's &drm_colorops via the
* &drm_plane's COLOR_PIPELINE property.
*
* drm_crtc Properties
* -------------------
*
* The DC interface to HW gives us the following color management blocks
* per pipe (surface):
*
@ -42,36 +69,93 @@
* - Surface regamma LUT (normalized)
* - Output CSC (normalized)
*
* But these aren't a direct mapping to DRM color properties. The current DRM
* interface exposes CRTC degamma, CRTC CTM and CRTC regamma while our hardware
* is essentially giving:
* But these aren't a direct mapping to DRM color properties. The
* current DRM interface exposes CRTC degamma, CRTC CTM and CRTC regamma
* while our hardware is essentially giving:
*
* Plane CTM -> Plane degamma -> Plane CTM -> Plane regamma -> Plane CTM
*
* The input gamma LUT block isn't really applicable here since it operates
* on the actual input data itself rather than the HW fp representation. The
* input and output CSC blocks are technically available to use as part of
* the DC interface but are typically used internally by DC for conversions
* between color spaces. These could be blended together with user
* adjustments in the future but for now these should remain untouched.
* The input gamma LUT block isn't really applicable here since it
* operates on the actual input data itself rather than the HW fp
* representation. The input and output CSC blocks are technically
* available to use as part of the DC interface but are typically used
* internally by DC for conversions between color spaces. These could be
* blended together with user adjustments in the future but for now
* these should remain untouched.
*
* The pipe blending also happens after these blocks so we don't actually
* support any CRTC props with correct blending with multiple planes - but we
* can still support CRTC color management properties in DM in most single
* plane cases correctly with clever management of the DC interface in DM.
* The pipe blending also happens after these blocks so we don't
* actually support any CRTC props with correct blending with multiple
* planes - but we can still support CRTC color management properties in
* DM in most single plane cases correctly with clever management of the
* DC interface in DM.
*
* As per DRM documentation, blocks should be in hardware bypass when their
* respective property is set to NULL. A linear DGM/RGM LUT should also
* considered as putting the respective block into bypass mode.
* As per DRM documentation, blocks should be in hardware bypass when
* their respective property is set to NULL. A linear DGM/RGM LUT should
* also considered as putting the respective block into bypass mode.
*
* This means that the following
* configuration is assumed to be the default:
* This means that the following configuration is assumed to be the
* default:
*
* Plane DGM Bypass -> Plane CTM Bypass -> Plane RGM Bypass -> ... CRTC
* DGM Bypass -> CRTC CTM Bypass -> CRTC RGM Bypass
*
* AMD Private Color Management on drm_plane
* -----------------------------------------
*
* The AMD private color management properties on a &drm_plane are:
*
* - AMD_PLANE_DEGAMMA_LUT
* - AMD_PLANE_DEGAMMA_LUT_SIZE
* - AMD_PLANE_DEGAMMA_TF
* - AMD_PLANE_HDR_MULT
* - AMD_PLANE_CTM
* - AMD_PLANE_SHAPER_LUT
* - AMD_PLANE_SHAPER_LUT_SIZE
* - AMD_PLANE_SHAPER_TF
* - AMD_PLANE_LUT3D
* - AMD_PLANE_LUT3D_SIZE
* - AMD_PLANE_BLEND_LUT
* - AMD_PLANE_BLEND_LUT_SIZE
* - AMD_PLANE_BLEND_TF
*
* The AMD private color management property on a &drm_crtc is:
*
* - AMD_CRTC_REGAMMA_TF
*
* Use of these properties is discouraged.
*
* AMD plane color pipeline
* ------------------------
*
* The AMD &drm_plane color pipeline is advertised for DCN generations
* 3.0 and newer. It exposes these elements in this order:
*
* 1. 1D curve colorop
* 2. Multiplier
* 3. 3x4 CTM
* 4. 1D curve colorop
* 5. 1D LUT
* 6. 3D LUT
* 7. 1D curve colorop
* 8. 1D LUT
*
* The multiplier (#2) is a simple multiplier that is applied to all
* channels.
*
* The 3x4 CTM (#3) is a simple 3x4 matrix.
*
* #1, and #7 are non-linear to linear curves. #4 is a linear to
* non-linear curve. They support sRGB, PQ, and BT.709/BT.2020 EOTFs or
* their inverse.
*
* The 1D LUTs (#5 and #8) are plain 4096 entry LUTs.
*
* The 3DLUT (#6) is a tetrahedrally interpolated 17 cube LUT.
*
* Plane DGM Bypass -> Plane CTM Bypass -> Plane RGM Bypass -> ...
* CRTC DGM Bypass -> CRTC CTM Bypass -> CRTC RGM Bypass
*/
#define MAX_DRM_LUT_VALUE 0xFFFF
#define MAX_DRM_LUT32_VALUE 0xFFFFFFFF
#define SDR_WHITE_LEVEL_INIT_VALUE 80
/**
@ -341,6 +425,21 @@ __extract_blob_lut(const struct drm_property_blob *blob, uint32_t *size)
return blob ? (struct drm_color_lut *)blob->data : NULL;
}
/**
* __extract_blob_lut32 - Extracts the DRM lut and lut size from a blob.
* @blob: DRM color mgmt property blob
* @size: lut size
*
* Returns:
* DRM LUT or NULL
*/
static const struct drm_color_lut32 *
__extract_blob_lut32(const struct drm_property_blob *blob, uint32_t *size)
{
*size = blob ? drm_color_lut32_size(blob) : 0;
return blob ? (struct drm_color_lut32 *)blob->data : NULL;
}
/**
* __is_lut_linear - check if the given lut is a linear mapping of values
* @lut: given lut to check values
@ -414,6 +513,24 @@ static void __drm_lut_to_dc_gamma(const struct drm_color_lut *lut,
}
}
/**
* __drm_lut32_to_dc_gamma - convert the drm_color_lut to dc_gamma.
* @lut: DRM lookup table for color conversion
* @gamma: DC gamma to set entries
*
* The conversion depends on the size of the lut - whether or not it's legacy.
*/
static void __drm_lut32_to_dc_gamma(const struct drm_color_lut32 *lut, struct dc_gamma *gamma)
{
int i;
for (i = 0; i < MAX_COLOR_LUT_ENTRIES; i++) {
gamma->entries.red[i] = dc_fixpt_from_fraction(lut[i].red, MAX_DRM_LUT32_VALUE);
gamma->entries.green[i] = dc_fixpt_from_fraction(lut[i].green, MAX_DRM_LUT32_VALUE);
gamma->entries.blue[i] = dc_fixpt_from_fraction(lut[i].blue, MAX_DRM_LUT32_VALUE);
}
}
/**
* __drm_ctm_to_dc_matrix - converts a DRM CTM to a DC CSC float matrix
* @ctm: DRM color transformation matrix
@ -566,6 +683,63 @@ static int __set_output_tf(struct dc_transfer_func *func,
return res ? 0 : -ENOMEM;
}
/**
* __set_output_tf_32 - calculates the output transfer function based on expected input space.
* @func: transfer function
* @lut: lookup table that defines the color space
* @lut_size: size of respective lut
* @has_rom: if ROM can be used for hardcoded curve
*
* Returns:
* 0 in case of success. -ENOMEM if fails.
*/
static int __set_output_tf_32(struct dc_transfer_func *func,
const struct drm_color_lut32 *lut, uint32_t lut_size,
bool has_rom)
{
struct dc_gamma *gamma = NULL;
struct calculate_buffer cal_buffer = {0};
bool res;
cal_buffer.buffer_index = -1;
if (lut_size) {
gamma = dc_create_gamma();
if (!gamma)
return -ENOMEM;
gamma->num_entries = lut_size;
__drm_lut32_to_dc_gamma(lut, gamma);
}
if (func->tf == TRANSFER_FUNCTION_LINEAR) {
/*
* Color module doesn't like calculating regamma params
* on top of a linear input. But degamma params can be used
* instead to simulate this.
*/
if (gamma)
gamma->type = GAMMA_CUSTOM;
res = mod_color_calculate_degamma_params(NULL, func,
gamma, gamma != NULL);
} else {
/*
* Assume sRGB. The actual mapping will depend on whether the
* input was legacy or not.
*/
if (gamma)
gamma->type = GAMMA_CS_TFM_1D;
res = mod_color_calculate_regamma_params(func, gamma, gamma != NULL,
has_rom, NULL, &cal_buffer);
}
if (gamma)
dc_gamma_release(&gamma);
return res ? 0 : -ENOMEM;
}
static int amdgpu_dm_set_atomic_regamma(struct dc_transfer_func *out_tf,
const struct drm_color_lut *regamma_lut,
uint32_t regamma_size, bool has_rom,
@ -638,6 +812,42 @@ static int __set_input_tf(struct dc_color_caps *caps, struct dc_transfer_func *f
return res ? 0 : -ENOMEM;
}
/**
* __set_input_tf_32 - calculates the input transfer function based on expected
* input space.
* @caps: dc color capabilities
* @func: transfer function
* @lut: lookup table that defines the color space
* @lut_size: size of respective lut.
*
* Returns:
* 0 in case of success. -ENOMEM if fails.
*/
static int __set_input_tf_32(struct dc_color_caps *caps, struct dc_transfer_func *func,
const struct drm_color_lut32 *lut, uint32_t lut_size)
{
struct dc_gamma *gamma = NULL;
bool res;
if (lut_size) {
gamma = dc_create_gamma();
if (!gamma)
return -ENOMEM;
gamma->type = GAMMA_CUSTOM;
gamma->num_entries = lut_size;
__drm_lut32_to_dc_gamma(lut, gamma);
}
res = mod_color_calculate_degamma_params(caps, func, gamma, gamma != NULL);
if (gamma)
dc_gamma_release(&gamma);
return res ? 0 : -ENOMEM;
}
static enum dc_transfer_func_predefined
amdgpu_tf_to_dc_tf(enum amdgpu_transfer_function tf)
{
@ -667,6 +877,27 @@ amdgpu_tf_to_dc_tf(enum amdgpu_transfer_function tf)
}
}
static enum dc_transfer_func_predefined
amdgpu_colorop_tf_to_dc_tf(enum drm_colorop_curve_1d_type tf)
{
switch (tf) {
case DRM_COLOROP_1D_CURVE_SRGB_EOTF:
case DRM_COLOROP_1D_CURVE_SRGB_INV_EOTF:
return TRANSFER_FUNCTION_SRGB;
case DRM_COLOROP_1D_CURVE_PQ_125_EOTF:
case DRM_COLOROP_1D_CURVE_PQ_125_INV_EOTF:
return TRANSFER_FUNCTION_PQ;
case DRM_COLOROP_1D_CURVE_BT2020_INV_OETF:
case DRM_COLOROP_1D_CURVE_BT2020_OETF:
return TRANSFER_FUNCTION_BT709;
case DRM_COLOROP_1D_CURVE_GAMMA22:
case DRM_COLOROP_1D_CURVE_GAMMA22_INV:
return TRANSFER_FUNCTION_GAMMA22;
default:
return TRANSFER_FUNCTION_LINEAR;
}
}
static void __to_dc_lut3d_color(struct dc_rgb *rgb,
const struct drm_color_lut lut,
int bit_precision)
@ -720,6 +951,59 @@ static void __drm_3dlut_to_dc_3dlut(const struct drm_color_lut *lut,
__to_dc_lut3d_color(&lut0[lut_i], lut[i], bit_depth);
}
static void __to_dc_lut3d_32_color(struct dc_rgb *rgb,
const struct drm_color_lut32 lut,
int bit_precision)
{
rgb->red = drm_color_lut32_extract(lut.red, bit_precision);
rgb->green = drm_color_lut32_extract(lut.green, bit_precision);
rgb->blue = drm_color_lut32_extract(lut.blue, bit_precision);
}
static void __drm_3dlut32_to_dc_3dlut(const struct drm_color_lut32 *lut,
uint32_t lut3d_size,
struct tetrahedral_params *params,
bool use_tetrahedral_9,
int bit_depth)
{
struct dc_rgb *lut0;
struct dc_rgb *lut1;
struct dc_rgb *lut2;
struct dc_rgb *lut3;
int lut_i, i;
if (use_tetrahedral_9) {
lut0 = params->tetrahedral_9.lut0;
lut1 = params->tetrahedral_9.lut1;
lut2 = params->tetrahedral_9.lut2;
lut3 = params->tetrahedral_9.lut3;
} else {
lut0 = params->tetrahedral_17.lut0;
lut1 = params->tetrahedral_17.lut1;
lut2 = params->tetrahedral_17.lut2;
lut3 = params->tetrahedral_17.lut3;
}
for (lut_i = 0, i = 0; i < lut3d_size - 4; lut_i++, i += 4) {
/*
* We should consider the 3D LUT RGB values are distributed
* along four arrays lut0-3 where the first sizes 1229 and the
* other 1228. The bit depth supported for 3dlut channel is
* 12-bit, but DC also supports 10-bit.
*
* TODO: improve color pipeline API to enable the userspace set
* bit depth and 3D LUT size/stride, as specified by VA-API.
*/
__to_dc_lut3d_32_color(&lut0[lut_i], lut[i], bit_depth);
__to_dc_lut3d_32_color(&lut1[lut_i], lut[i + 1], bit_depth);
__to_dc_lut3d_32_color(&lut2[lut_i], lut[i + 2], bit_depth);
__to_dc_lut3d_32_color(&lut3[lut_i], lut[i + 3], bit_depth);
}
/* lut0 has 1229 points (lut_size/4 + 1) */
__to_dc_lut3d_32_color(&lut0[lut_i], lut[i], bit_depth);
}
/* amdgpu_dm_atomic_lut3d - set DRM 3D LUT to DC stream
* @drm_lut3d: user 3D LUT
* @drm_lut3d_size: size of 3D LUT
@ -1177,6 +1461,360 @@ __set_dm_plane_degamma(struct drm_plane_state *plane_state,
return 0;
}
static int
__set_colorop_in_tf_1d_curve(struct dc_plane_state *dc_plane_state,
struct drm_colorop_state *colorop_state)
{
struct dc_transfer_func *tf = &dc_plane_state->in_transfer_func;
struct drm_colorop *colorop = colorop_state->colorop;
struct drm_device *drm = colorop->dev;
if (colorop->type != DRM_COLOROP_1D_CURVE)
return -EINVAL;
if (!(BIT(colorop_state->curve_1d_type) & amdgpu_dm_supported_degam_tfs))
return -EINVAL;
if (colorop_state->bypass) {
tf->type = TF_TYPE_BYPASS;
tf->tf = TRANSFER_FUNCTION_LINEAR;
return 0;
}
drm_dbg(drm, "Degamma colorop with ID: %d\n", colorop->base.id);
tf->type = TF_TYPE_PREDEFINED;
tf->tf = amdgpu_colorop_tf_to_dc_tf(colorop_state->curve_1d_type);
return 0;
}
static int
__set_dm_plane_colorop_degamma(struct drm_plane_state *plane_state,
struct dc_plane_state *dc_plane_state,
struct drm_colorop *colorop)
{
struct drm_colorop *old_colorop;
struct drm_colorop_state *colorop_state = NULL, *new_colorop_state;
struct drm_atomic_state *state = plane_state->state;
int i = 0;
old_colorop = colorop;
/* 1st op: 1d curve - degamma */
for_each_new_colorop_in_state(state, colorop, new_colorop_state, i) {
if (new_colorop_state->colorop == old_colorop &&
(BIT(new_colorop_state->curve_1d_type) & amdgpu_dm_supported_degam_tfs)) {
colorop_state = new_colorop_state;
break;
}
}
if (!colorop_state)
return -EINVAL;
return __set_colorop_in_tf_1d_curve(dc_plane_state, colorop_state);
}
static int
__set_dm_plane_colorop_3x4_matrix(struct drm_plane_state *plane_state,
struct dc_plane_state *dc_plane_state,
struct drm_colorop *colorop)
{
struct drm_colorop *old_colorop;
struct drm_colorop_state *colorop_state = NULL, *new_colorop_state;
struct drm_atomic_state *state = plane_state->state;
const struct drm_device *dev = colorop->dev;
const struct drm_property_blob *blob;
struct drm_color_ctm_3x4 *ctm = NULL;
int i = 0;
/* 3x4 matrix */
old_colorop = colorop;
for_each_new_colorop_in_state(state, colorop, new_colorop_state, i) {
if (new_colorop_state->colorop == old_colorop &&
new_colorop_state->colorop->type == DRM_COLOROP_CTM_3X4) {
colorop_state = new_colorop_state;
break;
}
}
if (colorop_state && !colorop_state->bypass && colorop->type == DRM_COLOROP_CTM_3X4) {
drm_dbg(dev, "3x4 matrix colorop with ID: %d\n", colorop->base.id);
blob = colorop_state->data;
if (blob->length == sizeof(struct drm_color_ctm_3x4)) {
ctm = (struct drm_color_ctm_3x4 *) blob->data;
__drm_ctm_3x4_to_dc_matrix(ctm, dc_plane_state->gamut_remap_matrix.matrix);
dc_plane_state->gamut_remap_matrix.enable_remap = true;
dc_plane_state->input_csc_color_matrix.enable_adjustment = false;
} else {
drm_warn(dev, "blob->length (%zu) isn't equal to drm_color_ctm_3x4 (%zu)\n",
blob->length, sizeof(struct drm_color_ctm_3x4));
return -EINVAL;
}
}
return 0;
}
static int
__set_dm_plane_colorop_multiplier(struct drm_plane_state *plane_state,
struct dc_plane_state *dc_plane_state,
struct drm_colorop *colorop)
{
struct drm_colorop *old_colorop;
struct drm_colorop_state *colorop_state = NULL, *new_colorop_state;
struct drm_atomic_state *state = plane_state->state;
const struct drm_device *dev = colorop->dev;
int i = 0;
/* Multiplier */
old_colorop = colorop;
for_each_new_colorop_in_state(state, colorop, new_colorop_state, i) {
if (new_colorop_state->colorop == old_colorop &&
new_colorop_state->colorop->type == DRM_COLOROP_MULTIPLIER) {
colorop_state = new_colorop_state;
break;
}
}
if (colorop_state && !colorop_state->bypass && colorop->type == DRM_COLOROP_MULTIPLIER) {
drm_dbg(dev, "Multiplier colorop with ID: %d\n", colorop->base.id);
dc_plane_state->hdr_mult = amdgpu_dm_fixpt_from_s3132(colorop_state->multiplier);
}
return 0;
}
static int
__set_dm_plane_colorop_shaper(struct drm_plane_state *plane_state,
struct dc_plane_state *dc_plane_state,
struct drm_colorop *colorop)
{
struct drm_colorop *old_colorop;
struct drm_colorop_state *colorop_state = NULL, *new_colorop_state;
struct drm_atomic_state *state = plane_state->state;
enum dc_transfer_func_predefined default_tf = TRANSFER_FUNCTION_LINEAR;
struct dc_transfer_func *tf = &dc_plane_state->in_shaper_func;
const struct drm_color_lut32 *shaper_lut;
struct drm_device *dev = colorop->dev;
bool enabled = false;
u32 shaper_size;
int i = 0, ret = 0;
/* 1D Curve - SHAPER TF */
old_colorop = colorop;
for_each_new_colorop_in_state(state, colorop, new_colorop_state, i) {
if (new_colorop_state->colorop == old_colorop &&
(BIT(new_colorop_state->curve_1d_type) & amdgpu_dm_supported_shaper_tfs)) {
colorop_state = new_colorop_state;
break;
}
}
if (colorop_state && !colorop_state->bypass && colorop->type == DRM_COLOROP_1D_CURVE) {
drm_dbg(dev, "Shaper TF colorop with ID: %d\n", colorop->base.id);
tf->type = TF_TYPE_DISTRIBUTED_POINTS;
tf->tf = default_tf = amdgpu_colorop_tf_to_dc_tf(colorop_state->curve_1d_type);
tf->sdr_ref_white_level = SDR_WHITE_LEVEL_INIT_VALUE;
ret = __set_output_tf(tf, 0, 0, false);
if (ret)
return ret;
enabled = true;
}
/* 1D LUT - SHAPER LUT */
colorop = old_colorop->next;
if (!colorop) {
drm_dbg(dev, "no Shaper LUT colorop found\n");
return -EINVAL;
}
old_colorop = colorop;
for_each_new_colorop_in_state(state, colorop, new_colorop_state, i) {
if (new_colorop_state->colorop == old_colorop &&
new_colorop_state->colorop->type == DRM_COLOROP_1D_LUT) {
colorop_state = new_colorop_state;
break;
}
}
if (colorop_state && !colorop_state->bypass && colorop->type == DRM_COLOROP_1D_LUT) {
drm_dbg(dev, "Shaper LUT colorop with ID: %d\n", colorop->base.id);
tf->type = TF_TYPE_DISTRIBUTED_POINTS;
tf->tf = default_tf;
tf->sdr_ref_white_level = SDR_WHITE_LEVEL_INIT_VALUE;
shaper_lut = __extract_blob_lut32(colorop_state->data, &shaper_size);
shaper_size = shaper_lut != NULL ? shaper_size : 0;
/* Custom LUT size must be the same as supported size */
if (shaper_size == colorop->size) {
ret = __set_output_tf_32(tf, shaper_lut, shaper_size, false);
if (ret)
return ret;
enabled = true;
}
}
if (!enabled)
tf->type = TF_TYPE_BYPASS;
return 0;
}
/* __set_colorop_3dlut - set DRM 3D LUT to DC stream
* @drm_lut3d: user 3D LUT
* @drm_lut3d_size: size of 3D LUT
* @lut3d: DC 3D LUT
*
* Map user 3D LUT data to DC 3D LUT and all necessary bits to program it
* on DCN accordingly.
*
* Returns:
* 0 on success. -EINVAL if drm_lut3d_size is zero.
*/
static int __set_colorop_3dlut(const struct drm_color_lut32 *drm_lut3d,
uint32_t drm_lut3d_size,
struct dc_3dlut *lut)
{
if (!drm_lut3d_size) {
lut->state.bits.initialized = 0;
return -EINVAL;
}
/* Only supports 17x17x17 3D LUT (12-bit) now */
lut->lut_3d.use_12bits = true;
lut->lut_3d.use_tetrahedral_9 = false;
lut->state.bits.initialized = 1;
__drm_3dlut32_to_dc_3dlut(drm_lut3d, drm_lut3d_size, &lut->lut_3d,
lut->lut_3d.use_tetrahedral_9, 12);
return 0;
}
static int
__set_dm_plane_colorop_3dlut(struct drm_plane_state *plane_state,
struct dc_plane_state *dc_plane_state,
struct drm_colorop *colorop)
{
struct drm_colorop *old_colorop;
struct drm_colorop_state *colorop_state = NULL, *new_colorop_state;
struct dc_transfer_func *tf = &dc_plane_state->in_shaper_func;
struct drm_atomic_state *state = plane_state->state;
const struct amdgpu_device *adev = drm_to_adev(colorop->dev);
const struct drm_device *dev = colorop->dev;
const struct drm_color_lut32 *lut3d;
uint32_t lut3d_size;
int i = 0, ret = 0;
/* 3D LUT */
old_colorop = colorop;
for_each_new_colorop_in_state(state, colorop, new_colorop_state, i) {
if (new_colorop_state->colorop == old_colorop &&
new_colorop_state->colorop->type == DRM_COLOROP_3D_LUT) {
colorop_state = new_colorop_state;
break;
}
}
if (colorop_state && !colorop_state->bypass && colorop->type == DRM_COLOROP_3D_LUT) {
if (!adev->dm.dc->caps.color.dpp.hw_3d_lut) {
drm_dbg(dev, "3D LUT is not supported by hardware\n");
return -EINVAL;
}
drm_dbg(dev, "3D LUT colorop with ID: %d\n", colorop->base.id);
lut3d = __extract_blob_lut32(colorop_state->data, &lut3d_size);
lut3d_size = lut3d != NULL ? lut3d_size : 0;
ret = __set_colorop_3dlut(lut3d, lut3d_size, &dc_plane_state->lut3d_func);
if (ret) {
drm_dbg(dev, "3D LUT colorop with ID: %d has LUT size = %d\n",
colorop->base.id, lut3d_size);
return ret;
}
/* 3D LUT requires shaper. If shaper colorop is bypassed, enable shaper curve
* with TRANSFER_FUNCTION_LINEAR
*/
if (tf->type == TF_TYPE_BYPASS) {
tf->type = TF_TYPE_DISTRIBUTED_POINTS;
tf->tf = TRANSFER_FUNCTION_LINEAR;
tf->sdr_ref_white_level = SDR_WHITE_LEVEL_INIT_VALUE;
ret = __set_output_tf_32(tf, NULL, 0, false);
}
}
return ret;
}
static int
__set_dm_plane_colorop_blend(struct drm_plane_state *plane_state,
struct dc_plane_state *dc_plane_state,
struct drm_colorop *colorop)
{
struct drm_colorop *old_colorop;
struct drm_colorop_state *colorop_state = NULL, *new_colorop_state;
struct drm_atomic_state *state = plane_state->state;
enum dc_transfer_func_predefined default_tf = TRANSFER_FUNCTION_LINEAR;
struct dc_transfer_func *tf = &dc_plane_state->blend_tf;
const struct drm_color_lut32 *blend_lut = NULL;
struct drm_device *dev = colorop->dev;
uint32_t blend_size = 0;
int i = 0;
/* 1D Curve - BLND TF */
old_colorop = colorop;
for_each_new_colorop_in_state(state, colorop, new_colorop_state, i) {
if (new_colorop_state->colorop == old_colorop &&
(BIT(new_colorop_state->curve_1d_type) & amdgpu_dm_supported_blnd_tfs)) {
colorop_state = new_colorop_state;
break;
}
}
if (colorop_state && !colorop_state->bypass && colorop->type == DRM_COLOROP_1D_CURVE &&
(BIT(colorop_state->curve_1d_type) & amdgpu_dm_supported_blnd_tfs)) {
drm_dbg(dev, "Blend TF colorop with ID: %d\n", colorop->base.id);
tf->type = TF_TYPE_DISTRIBUTED_POINTS;
tf->tf = default_tf = amdgpu_colorop_tf_to_dc_tf(colorop_state->curve_1d_type);
tf->sdr_ref_white_level = SDR_WHITE_LEVEL_INIT_VALUE;
__set_input_tf_32(NULL, tf, blend_lut, blend_size);
}
/* 1D Curve - BLND LUT */
colorop = old_colorop->next;
if (!colorop) {
drm_dbg(dev, "no Blend LUT colorop found\n");
return -EINVAL;
}
old_colorop = colorop;
for_each_new_colorop_in_state(state, colorop, new_colorop_state, i) {
if (new_colorop_state->colorop == old_colorop &&
new_colorop_state->colorop->type == DRM_COLOROP_1D_LUT) {
colorop_state = new_colorop_state;
break;
}
}
if (colorop_state && !colorop_state->bypass && colorop->type == DRM_COLOROP_1D_LUT &&
(BIT(colorop_state->curve_1d_type) & amdgpu_dm_supported_blnd_tfs)) {
drm_dbg(dev, "Blend LUT colorop with ID: %d\n", colorop->base.id);
tf->type = TF_TYPE_DISTRIBUTED_POINTS;
tf->tf = default_tf;
tf->sdr_ref_white_level = SDR_WHITE_LEVEL_INIT_VALUE;
blend_lut = __extract_blob_lut32(colorop_state->data, &blend_size);
blend_size = blend_lut != NULL ? blend_size : 0;
/* Custom LUT size must be the same as supported size */
if (blend_size == colorop->size)
__set_input_tf_32(NULL, tf, blend_lut, blend_size);
}
return 0;
}
static int
amdgpu_dm_plane_set_color_properties(struct drm_plane_state *plane_state,
struct dc_plane_state *dc_plane_state)
@ -1227,6 +1865,93 @@ amdgpu_dm_plane_set_color_properties(struct drm_plane_state *plane_state,
return 0;
}
static int
amdgpu_dm_plane_set_colorop_properties(struct drm_plane_state *plane_state,
struct dc_plane_state *dc_plane_state)
{
struct drm_colorop *colorop = plane_state->color_pipeline;
struct drm_device *dev = plane_state->plane->dev;
struct amdgpu_device *adev = drm_to_adev(dev);
int ret;
/* 1D Curve - DEGAM TF */
if (!colorop)
return -EINVAL;
ret = __set_dm_plane_colorop_degamma(plane_state, dc_plane_state, colorop);
if (ret)
return ret;
/* Multiplier */
colorop = colorop->next;
if (!colorop) {
drm_dbg(dev, "no multiplier colorop found\n");
return -EINVAL;
}
ret = __set_dm_plane_colorop_multiplier(plane_state, dc_plane_state, colorop);
if (ret)
return ret;
/* 3x4 matrix */
colorop = colorop->next;
if (!colorop) {
drm_dbg(dev, "no 3x4 matrix colorop found\n");
return -EINVAL;
}
ret = __set_dm_plane_colorop_3x4_matrix(plane_state, dc_plane_state, colorop);
if (ret)
return ret;
if (adev->dm.dc->caps.color.dpp.hw_3d_lut) {
/* 1D Curve & LUT - SHAPER TF & LUT */
colorop = colorop->next;
if (!colorop) {
drm_dbg(dev, "no Shaper TF colorop found\n");
return -EINVAL;
}
ret = __set_dm_plane_colorop_shaper(plane_state, dc_plane_state, colorop);
if (ret)
return ret;
/* Shaper LUT colorop is already handled, just skip here */
colorop = colorop->next;
if (!colorop)
return -EINVAL;
/* 3D LUT */
colorop = colorop->next;
if (!colorop) {
drm_dbg(dev, "no 3D LUT colorop found\n");
return -EINVAL;
}
ret = __set_dm_plane_colorop_3dlut(plane_state, dc_plane_state, colorop);
if (ret)
return ret;
}
/* 1D Curve & LUT - BLND TF & LUT */
colorop = colorop->next;
if (!colorop) {
drm_dbg(dev, "no Blend TF colorop found\n");
return -EINVAL;
}
ret = __set_dm_plane_colorop_blend(plane_state, dc_plane_state, colorop);
if (ret)
return ret;
/* BLND LUT colorop is already handled, just skip here */
colorop = colorop->next;
if (!colorop)
return -EINVAL;
return 0;
}
/**
* amdgpu_dm_update_plane_color_mgmt: Maps DRM color management to DC plane.
* @crtc: amdgpu_dm crtc state
@ -1323,5 +2048,8 @@ int amdgpu_dm_update_plane_color_mgmt(struct dm_crtc_state *crtc,
dc_plane_state->input_csc_color_matrix.enable_adjustment = false;
}
if (!amdgpu_dm_plane_set_colorop_properties(plane_state, dc_plane_state))
return 0;
return amdgpu_dm_plane_set_color_properties(plane_state, dc_plane_state);
}

View File

@ -0,0 +1,209 @@
// SPDX-License-Identifier: MIT
/*
* Copyright 2023 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: AMD
*
*/
#include <drm/drm_print.h>
#include <drm/drm_plane.h>
#include <drm/drm_property.h>
#include <drm/drm_colorop.h>
#include "amdgpu.h"
#include "amdgpu_dm_colorop.h"
#include "dc.h"
const u64 amdgpu_dm_supported_degam_tfs =
BIT(DRM_COLOROP_1D_CURVE_SRGB_EOTF) |
BIT(DRM_COLOROP_1D_CURVE_PQ_125_EOTF) |
BIT(DRM_COLOROP_1D_CURVE_BT2020_INV_OETF) |
BIT(DRM_COLOROP_1D_CURVE_GAMMA22_INV);
const u64 amdgpu_dm_supported_shaper_tfs =
BIT(DRM_COLOROP_1D_CURVE_SRGB_INV_EOTF) |
BIT(DRM_COLOROP_1D_CURVE_PQ_125_INV_EOTF) |
BIT(DRM_COLOROP_1D_CURVE_BT2020_OETF) |
BIT(DRM_COLOROP_1D_CURVE_GAMMA22);
const u64 amdgpu_dm_supported_blnd_tfs =
BIT(DRM_COLOROP_1D_CURVE_SRGB_EOTF) |
BIT(DRM_COLOROP_1D_CURVE_PQ_125_EOTF) |
BIT(DRM_COLOROP_1D_CURVE_BT2020_INV_OETF) |
BIT(DRM_COLOROP_1D_CURVE_GAMMA22_INV);
#define MAX_COLOR_PIPELINE_OPS 10
#define LUT3D_SIZE 17
int amdgpu_dm_initialize_default_pipeline(struct drm_plane *plane, struct drm_prop_enum_list *list)
{
struct drm_colorop *ops[MAX_COLOR_PIPELINE_OPS];
struct drm_device *dev = plane->dev;
struct amdgpu_device *adev = drm_to_adev(dev);
int ret;
int i = 0;
memset(ops, 0, sizeof(ops));
/* 1D curve - DEGAM TF */
ops[i] = kzalloc(sizeof(*ops[0]), GFP_KERNEL);
if (!ops[i]) {
ret = -ENOMEM;
goto cleanup;
}
ret = drm_plane_colorop_curve_1d_init(dev, ops[i], plane,
amdgpu_dm_supported_degam_tfs,
DRM_COLOROP_FLAG_ALLOW_BYPASS);
if (ret)
goto cleanup;
list->type = ops[i]->base.id;
list->name = kasprintf(GFP_KERNEL, "Color Pipeline %d", ops[i]->base.id);
i++;
/* Multiplier */
ops[i] = kzalloc(sizeof(struct drm_colorop), GFP_KERNEL);
if (!ops[i]) {
ret = -ENOMEM;
goto cleanup;
}
ret = drm_plane_colorop_mult_init(dev, ops[i], plane, DRM_COLOROP_FLAG_ALLOW_BYPASS);
if (ret)
goto cleanup;
drm_colorop_set_next_property(ops[i-1], ops[i]);
i++;
/* 3x4 matrix */
ops[i] = kzalloc(sizeof(struct drm_colorop), GFP_KERNEL);
if (!ops[i]) {
ret = -ENOMEM;
goto cleanup;
}
ret = drm_plane_colorop_ctm_3x4_init(dev, ops[i], plane, DRM_COLOROP_FLAG_ALLOW_BYPASS);
if (ret)
goto cleanup;
drm_colorop_set_next_property(ops[i-1], ops[i]);
i++;
if (adev->dm.dc->caps.color.dpp.hw_3d_lut) {
/* 1D curve - SHAPER TF */
ops[i] = kzalloc(sizeof(*ops[0]), GFP_KERNEL);
if (!ops[i]) {
ret = -ENOMEM;
goto cleanup;
}
ret = drm_plane_colorop_curve_1d_init(dev, ops[i], plane,
amdgpu_dm_supported_shaper_tfs,
DRM_COLOROP_FLAG_ALLOW_BYPASS);
if (ret)
goto cleanup;
drm_colorop_set_next_property(ops[i-1], ops[i]);
i++;
/* 1D LUT - SHAPER LUT */
ops[i] = kzalloc(sizeof(*ops[0]), GFP_KERNEL);
if (!ops[i]) {
ret = -ENOMEM;
goto cleanup;
}
ret = drm_plane_colorop_curve_1d_lut_init(dev, ops[i], plane, MAX_COLOR_LUT_ENTRIES,
DRM_COLOROP_LUT1D_INTERPOLATION_LINEAR,
DRM_COLOROP_FLAG_ALLOW_BYPASS);
if (ret)
goto cleanup;
drm_colorop_set_next_property(ops[i-1], ops[i]);
i++;
/* 3D LUT */
ops[i] = kzalloc(sizeof(*ops[0]), GFP_KERNEL);
if (!ops[i]) {
ret = -ENOMEM;
goto cleanup;
}
ret = drm_plane_colorop_3dlut_init(dev, ops[i], plane, LUT3D_SIZE,
DRM_COLOROP_LUT3D_INTERPOLATION_TETRAHEDRAL,
DRM_COLOROP_FLAG_ALLOW_BYPASS);
if (ret)
goto cleanup;
drm_colorop_set_next_property(ops[i-1], ops[i]);
i++;
}
/* 1D curve - BLND TF */
ops[i] = kzalloc(sizeof(*ops[0]), GFP_KERNEL);
if (!ops[i]) {
ret = -ENOMEM;
goto cleanup;
}
ret = drm_plane_colorop_curve_1d_init(dev, ops[i], plane,
amdgpu_dm_supported_blnd_tfs,
DRM_COLOROP_FLAG_ALLOW_BYPASS);
if (ret)
goto cleanup;
drm_colorop_set_next_property(ops[i - 1], ops[i]);
i++;
/* 1D LUT - BLND LUT */
ops[i] = kzalloc(sizeof(struct drm_colorop), GFP_KERNEL);
if (!ops[i]) {
ret = -ENOMEM;
goto cleanup;
}
ret = drm_plane_colorop_curve_1d_lut_init(dev, ops[i], plane, MAX_COLOR_LUT_ENTRIES,
DRM_COLOROP_LUT1D_INTERPOLATION_LINEAR,
DRM_COLOROP_FLAG_ALLOW_BYPASS);
if (ret)
goto cleanup;
drm_colorop_set_next_property(ops[i-1], ops[i]);
return 0;
cleanup:
if (ret == -ENOMEM)
drm_err(plane->dev, "KMS: Failed to allocate colorop\n");
drm_colorop_pipeline_destroy(dev);
return ret;
}

View File

@ -0,0 +1,36 @@
/* SPDX-License-Identifier: MIT */
/*
* Copyright 2023 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: AMD
*
*/
#ifndef __AMDGPU_DM_COLOROP_H__
#define __AMDGPU_DM_COLOROP_H__
extern const u64 amdgpu_dm_supported_degam_tfs;
extern const u64 amdgpu_dm_supported_shaper_tfs;
extern const u64 amdgpu_dm_supported_blnd_tfs;
int amdgpu_dm_initialize_default_pipeline(struct drm_plane *plane, struct drm_prop_enum_list *list);
#endif /* __AMDGPU_DM_COLOROP_H__*/

View File

@ -736,7 +736,7 @@ int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
{
struct amdgpu_crtc *acrtc = NULL;
struct drm_plane *cursor_plane;
bool is_dcn;
bool has_degamma;
int res = -ENOMEM;
cursor_plane = kzalloc(sizeof(*cursor_plane), GFP_KERNEL);
@ -775,20 +775,18 @@ int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
dm->adev->mode_info.crtcs[crtc_index] = acrtc;
/* Don't enable DRM CRTC degamma property for DCE since it doesn't
* support programmable degamma anywhere.
/* Don't enable DRM CRTC degamma property for
* 1. Degamma is replaced by color pipeline.
* 2. DCE since it doesn't support programmable degamma anywhere.
* 3. DCN401 since pre-blending degamma LUT doesn't apply to cursor.
*/
is_dcn = dm->adev->dm.dc->caps.color.dpp.dcn_arch;
/* Dont't enable DRM CRTC degamma property for DCN401 since the
* pre-blending degamma LUT doesn't apply to cursor, and therefore
* can't work similar to a post-blending degamma LUT as in other hw
* versions.
* TODO: revisit it once KMS plane color API is merged.
*/
drm_crtc_enable_color_mgmt(&acrtc->base,
(is_dcn &&
dm->adev->dm.dc->ctx->dce_version != DCN_VERSION_4_01) ?
MAX_COLOR_LUT_ENTRIES : 0,
if (plane->color_pipeline_property)
has_degamma = false;
else
has_degamma = dm->adev->dm.dc->caps.color.dpp.dcn_arch &&
dm->adev->dm.dc->ctx->dce_version != DCN_VERSION_4_01;
drm_crtc_enable_color_mgmt(&acrtc->base, has_degamma ? MAX_COLOR_LUT_ENTRIES : 0,
true, MAX_COLOR_LUT_ENTRIES);
drm_mode_crtc_set_gamma_size(&acrtc->base, MAX_COLOR_LEGACY_LUT_ENTRIES);

View File

@ -37,6 +37,7 @@
#include "amdgpu_display.h"
#include "amdgpu_dm_trace.h"
#include "amdgpu_dm_plane.h"
#include "amdgpu_dm_colorop.h"
#include "gc/gc_11_0_0_offset.h"
#include "gc/gc_11_0_0_sh_mask.h"
@ -1782,6 +1783,39 @@ dm_atomic_plane_get_property(struct drm_plane *plane,
return 0;
}
#else
#define MAX_COLOR_PIPELINES 5
static int
dm_plane_init_colorops(struct drm_plane *plane)
{
struct drm_prop_enum_list pipelines[MAX_COLOR_PIPELINES];
struct drm_device *dev = plane->dev;
struct amdgpu_device *adev = drm_to_adev(dev);
struct dc *dc = adev->dm.dc;
int len = 0;
int ret;
if (plane->type == DRM_PLANE_TYPE_CURSOR)
return 0;
/* initialize pipeline */
if (dc->ctx->dce_version >= DCN_VERSION_3_0) {
ret = amdgpu_dm_initialize_default_pipeline(plane, &pipelines[len]);
if (ret) {
drm_err(plane->dev, "Failed to create color pipeline for plane %d: %d\n",
plane->base.id, ret);
return ret;
}
len++;
/* Create COLOR_PIPELINE property and attach */
drm_plane_create_color_pipeline_property(plane, pipelines, len);
}
return 0;
}
#endif
static const struct drm_plane_funcs dm_plane_funcs = {
@ -1890,7 +1924,12 @@ int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
#ifdef AMD_PRIVATE_COLOR
dm_atomic_plane_attach_color_mgmt_properties(dm, plane);
#else
res = dm_plane_init_colorops(plane);
if (res)
return res;
#endif
/* Create (reset) the plane state */
if (plane->funcs->reset)
plane->funcs->reset(plane);

View File

@ -44,10 +44,10 @@ int armada_fbdev_driver_fbdev_probe(struct drm_fb_helper *fbh,
struct drm_fb_helper_surface_size *sizes)
{
struct drm_device *dev = fbh->dev;
struct fb_info *info = fbh->info;
struct drm_mode_fb_cmd2 mode;
struct armada_framebuffer *dfb;
struct armada_gem_object *obj;
struct fb_info *info;
int size, ret;
void *ptr;
@ -91,12 +91,6 @@ int armada_fbdev_driver_fbdev_probe(struct drm_fb_helper *fbh,
if (IS_ERR(dfb))
return PTR_ERR(dfb);
info = drm_fb_helper_alloc_info(fbh);
if (IS_ERR(info)) {
ret = PTR_ERR(info);
goto err_fballoc;
}
info->fbops = &armada_fb_ops;
info->fix.smem_start = obj->phys_addr;
info->fix.smem_len = obj->obj.size;
@ -112,8 +106,4 @@ int armada_fbdev_driver_fbdev_probe(struct drm_fb_helper *fbh,
(unsigned long long)obj->phys_addr);
return 0;
err_fballoc:
dfb->fb.funcs->destroy(&dfb->fb);
return ret;
}

View File

@ -287,6 +287,7 @@
enum chip_id {
ID_IT6610,
ID_IT66121,
ID_IT66122,
};
struct it66121_chip_info {
@ -312,7 +313,7 @@ struct it66121_ctx {
u8 swl;
bool auto_cts;
} audio;
const struct it66121_chip_info *info;
enum chip_id id;
};
static const struct regmap_range_cfg it66121_regmap_banks[] = {
@ -402,7 +403,7 @@ static int it66121_configure_afe(struct it66121_ctx *ctx,
if (ret)
return ret;
if (ctx->info->id == ID_IT66121) {
if (ctx->id == ID_IT66121 || ctx->id == ID_IT66122) {
ret = regmap_write_bits(ctx->regmap, IT66121_AFE_IP_REG,
IT66121_AFE_IP_EC1, 0);
if (ret)
@ -428,7 +429,7 @@ static int it66121_configure_afe(struct it66121_ctx *ctx,
if (ret)
return ret;
if (ctx->info->id == ID_IT66121) {
if (ctx->id == ID_IT66121 || ctx->id == ID_IT66122) {
ret = regmap_write_bits(ctx->regmap, IT66121_AFE_IP_REG,
IT66121_AFE_IP_EC1,
IT66121_AFE_IP_EC1);
@ -449,7 +450,7 @@ static int it66121_configure_afe(struct it66121_ctx *ctx,
if (ret)
return ret;
if (ctx->info->id == ID_IT6610) {
if (ctx->id == ID_IT6610) {
ret = regmap_write_bits(ctx->regmap, IT66121_AFE_XP_REG,
IT6610_AFE_XP_BYPASS,
IT6610_AFE_XP_BYPASS);
@ -599,7 +600,7 @@ static int it66121_bridge_attach(struct drm_bridge *bridge,
if (ret)
return ret;
if (ctx->info->id == ID_IT66121) {
if (ctx->id == ID_IT66121 || ctx->id == ID_IT66122) {
ret = regmap_write_bits(ctx->regmap, IT66121_CLK_BANK_REG,
IT66121_CLK_BANK_PWROFF_RCLK, 0);
if (ret)
@ -748,7 +749,7 @@ static int it66121_bridge_check(struct drm_bridge *bridge,
{
struct it66121_ctx *ctx = container_of(bridge, struct it66121_ctx, bridge);
if (ctx->info->id == ID_IT6610) {
if (ctx->id == ID_IT6610) {
/* The IT6610 only supports these settings */
bridge_state->input_bus_cfg.flags |= DRM_BUS_FLAG_DE_HIGH |
DRM_BUS_FLAG_PIXDATA_DRIVE_NEGEDGE;
@ -802,7 +803,7 @@ void it66121_bridge_mode_set(struct drm_bridge *bridge,
if (regmap_write(ctx->regmap, IT66121_HDMI_MODE_REG, IT66121_HDMI_MODE_HDMI))
goto unlock;
if (ctx->info->id == ID_IT66121 &&
if ((ctx->id == ID_IT66121 || ctx->id == ID_IT66122) &&
regmap_write_bits(ctx->regmap, IT66121_CLK_BANK_REG,
IT66121_CLK_BANK_PWROFF_TXCLK,
IT66121_CLK_BANK_PWROFF_TXCLK)) {
@ -815,7 +816,7 @@ void it66121_bridge_mode_set(struct drm_bridge *bridge,
if (it66121_configure_afe(ctx, adjusted_mode))
goto unlock;
if (ctx->info->id == ID_IT66121 &&
if ((ctx->id == ID_IT66121 || ctx->id == ID_IT66122) &&
regmap_write_bits(ctx->regmap, IT66121_CLK_BANK_REG,
IT66121_CLK_BANK_PWROFF_TXCLK, 0)) {
goto unlock;
@ -1384,8 +1385,6 @@ static int it66121_audio_startup(struct device *dev, void *data)
int ret;
struct it66121_ctx *ctx = dev_get_drvdata(dev);
dev_dbg(dev, "%s\n", __func__);
mutex_lock(&ctx->lock);
ret = it661221_audio_output_enable(ctx, true);
if (ret)
@ -1401,8 +1400,6 @@ static void it66121_audio_shutdown(struct device *dev, void *data)
int ret;
struct it66121_ctx *ctx = dev_get_drvdata(dev);
dev_dbg(dev, "%s\n", __func__);
mutex_lock(&ctx->lock);
ret = it661221_audio_output_enable(ctx, false);
if (ret)
@ -1479,8 +1476,6 @@ static int it66121_audio_codec_init(struct it66121_ctx *ctx, struct device *dev)
.no_capture_mute = 1,
};
dev_dbg(dev, "%s\n", __func__);
if (!of_property_present(dev->of_node, "#sound-dai-cells")) {
dev_info(dev, "No \"#sound-dai-cells\", no audio\n");
return 0;
@ -1504,13 +1499,20 @@ static const char * const it66121_supplies[] = {
"vcn33", "vcn18", "vrf12"
};
static const struct it66121_chip_info it66xx_chip_info[] = {
{.id = ID_IT6610, .vid = 0xca00, .pid = 0x0611 },
{.id = ID_IT66121, .vid = 0x4954, .pid = 0x0612 },
{.id = ID_IT66122, .vid = 0x4954, .pid = 0x0622 },
};
static int it66121_probe(struct i2c_client *client)
{
u32 revision_id, vendor_ids[2] = { 0 }, device_ids[2] = { 0 };
struct device_node *ep;
int ret;
int ret, i;
struct it66121_ctx *ctx;
struct device *dev = &client->dev;
const struct it66121_chip_info *chip_info;
if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
dev_err(dev, "I2C check functionality failed.\n");
@ -1528,7 +1530,6 @@ static int it66121_probe(struct i2c_client *client)
ctx->dev = dev;
ctx->client = client;
ctx->info = i2c_get_match_data(client);
of_property_read_u32(ep, "bus-width", &ctx->bus_width);
of_node_put(ep);
@ -1574,11 +1575,18 @@ static int it66121_probe(struct i2c_client *client)
revision_id = FIELD_GET(IT66121_REVISION_MASK, device_ids[1]);
device_ids[1] &= IT66121_DEVICE_ID1_MASK;
if ((vendor_ids[1] << 8 | vendor_ids[0]) != ctx->info->vid ||
(device_ids[1] << 8 | device_ids[0]) != ctx->info->pid) {
return -ENODEV;
for (i = 0; i < ARRAY_SIZE(it66xx_chip_info); i++) {
chip_info = &it66xx_chip_info[i];
if ((vendor_ids[1] << 8 | vendor_ids[0]) == chip_info->vid &&
(device_ids[1] << 8 | device_ids[0]) == chip_info->pid) {
ctx->id = chip_info->id;
break;
}
}
if (i == ARRAY_SIZE(it66xx_chip_info))
return -ENODEV;
ctx->bridge.of_node = dev->of_node;
ctx->bridge.type = DRM_MODE_CONNECTOR_HDMIA;
ctx->bridge.ops = DRM_BRIDGE_OP_DETECT | DRM_BRIDGE_OP_EDID;
@ -1612,28 +1620,18 @@ static void it66121_remove(struct i2c_client *client)
mutex_destroy(&ctx->lock);
}
static const struct it66121_chip_info it66121_chip_info = {
.id = ID_IT66121,
.vid = 0x4954,
.pid = 0x0612,
};
static const struct it66121_chip_info it6610_chip_info = {
.id = ID_IT6610,
.vid = 0xca00,
.pid = 0x0611,
};
static const struct of_device_id it66121_dt_match[] = {
{ .compatible = "ite,it66121", &it66121_chip_info },
{ .compatible = "ite,it6610", &it6610_chip_info },
{ .compatible = "ite,it6610" },
{ .compatible = "ite,it66121" },
{ .compatible = "ite,it66122" },
{ }
};
MODULE_DEVICE_TABLE(of, it66121_dt_match);
static const struct i2c_device_id it66121_id[] = {
{ "it66121", (kernel_ulong_t) &it66121_chip_info },
{ "it6610", (kernel_ulong_t) &it6610_chip_info },
{ .name = "it6610" },
{ .name = "it66121" },
{ .name = "it66122" },
{ }
};
MODULE_DEVICE_TABLE(i2c, it66121_id);

View File

@ -261,6 +261,16 @@ static const struct of_device_id simple_bridge_match[] = {
.timings = &default_bridge_timings,
.connector_type = DRM_MODE_CONNECTOR_VGA,
},
}, {
.compatible = "asl-tek,cs5263",
.data = &(const struct simple_bridge_info) {
.connector_type = DRM_MODE_CONNECTOR_HDMIA,
},
}, {
.compatible = "parade,ps185hdm",
.data = &(const struct simple_bridge_info) {
.connector_type = DRM_MODE_CONNECTOR_HDMIA,
},
}, {
.compatible = "radxa,ra620",
.data = &(const struct simple_bridge_info) {

View File

@ -868,8 +868,9 @@ static void dw_hdmi_qp_bridge_atomic_enable(struct drm_bridge *bridge,
return;
if (connector->display_info.is_hdmi) {
dev_dbg(hdmi->dev, "%s mode=HDMI rate=%llu\n",
__func__, conn_state->hdmi.tmds_char_rate);
dev_dbg(hdmi->dev, "%s mode=HDMI %s rate=%llu bpc=%u\n", __func__,
drm_hdmi_connector_get_output_format_name(conn_state->hdmi.output_format),
conn_state->hdmi.tmds_char_rate, conn_state->hdmi.output_bpc);
op_mode = 0;
hdmi->tmds_char_rate = conn_state->hdmi.tmds_char_rate;
} else {
@ -1287,6 +1288,12 @@ struct dw_hdmi_qp *dw_hdmi_qp_bind(struct platform_device *pdev,
hdmi->bridge.vendor = "Synopsys";
hdmi->bridge.product = "DW HDMI QP TX";
if (plat_data->supported_formats)
hdmi->bridge.supported_formats = plat_data->supported_formats;
if (plat_data->max_bpc)
hdmi->bridge.max_bpc = plat_data->max_bpc;
hdmi->bridge.ddc = dw_hdmi_qp_i2c_adapter(hdmi);
if (IS_ERR(hdmi->bridge.ddc))
return ERR_CAST(hdmi->bridge.ddc);

View File

@ -38,9 +38,11 @@ static void drm_fbdev_client_unregister(struct drm_client_dev *client)
}
}
static int drm_fbdev_client_restore(struct drm_client_dev *client)
static int drm_fbdev_client_restore(struct drm_client_dev *client, bool force)
{
drm_fb_helper_lastclose(client->dev);
struct drm_fb_helper *fb_helper = drm_fb_helper_from_client(client);
drm_fb_helper_restore_fbdev_mode_unlocked(fb_helper, force);
return 0;
}

View File

@ -315,6 +315,18 @@ static void drm_log_client_unregister(struct drm_client_dev *client)
drm_client_release(client);
}
static int drm_log_client_restore(struct drm_client_dev *client, bool force)
{
int ret;
if (force)
ret = drm_client_modeset_commit_locked(client);
else
ret = drm_client_modeset_commit(client);
return ret;
}
static int drm_log_client_hotplug(struct drm_client_dev *client)
{
struct drm_log *dlog = client_to_drm_log(client);
@ -348,6 +360,7 @@ static const struct drm_client_funcs drm_log_client_funcs = {
.owner = THIS_MODULE,
.free = drm_log_client_free,
.unregister = drm_log_client_unregister,
.restore = drm_log_client_restore,
.hotplug = drm_log_client_hotplug,
.suspend = drm_log_client_suspend,
.resume = drm_log_client_resume,

View File

@ -42,6 +42,7 @@
#include <drm/drm_mode.h>
#include <drm/drm_print.h>
#include <drm/drm_writeback.h>
#include <drm/drm_colorop.h>
#include "drm_crtc_internal.h"
#include "drm_internal.h"
@ -107,6 +108,7 @@ void drm_atomic_state_default_release(struct drm_atomic_state *state)
kfree(state->connectors);
kfree(state->crtcs);
kfree(state->planes);
kfree(state->colorops);
kfree(state->private_objs);
}
EXPORT_SYMBOL(drm_atomic_state_default_release);
@ -138,6 +140,10 @@ drm_atomic_state_init(struct drm_device *dev, struct drm_atomic_state *state)
sizeof(*state->planes), GFP_KERNEL);
if (!state->planes)
goto fail;
state->colorops = kcalloc(dev->mode_config.num_colorop,
sizeof(*state->colorops), GFP_KERNEL);
if (!state->colorops)
goto fail;
/*
* Because drm_atomic_state can be committed asynchronously we need our
@ -251,6 +257,20 @@ void drm_atomic_state_default_clear(struct drm_atomic_state *state)
state->planes[i].new_state = NULL;
}
for (i = 0; i < config->num_colorop; i++) {
struct drm_colorop *colorop = state->colorops[i].ptr;
if (!colorop)
continue;
drm_colorop_atomic_destroy_state(colorop,
state->colorops[i].state);
state->colorops[i].ptr = NULL;
state->colorops[i].state = NULL;
state->colorops[i].old_state = NULL;
state->colorops[i].new_state = NULL;
}
for (i = 0; i < state->num_private_objs; i++) {
struct drm_private_obj *obj = state->private_objs[i].ptr;
@ -572,6 +592,55 @@ drm_atomic_get_plane_state(struct drm_atomic_state *state,
}
EXPORT_SYMBOL(drm_atomic_get_plane_state);
/**
* drm_atomic_get_colorop_state - get colorop state
* @state: global atomic state object
* @colorop: colorop to get state object for
*
* This function returns the colorop state for the given colorop, allocating it
* if needed. It will also grab the relevant plane lock to make sure that the
* state is consistent.
*
* Returns:
*
* Either the allocated state or the error code encoded into the pointer. When
* the error is EDEADLK then the w/w mutex code has detected a deadlock and the
* entire atomic sequence must be restarted. All other errors are fatal.
*/
struct drm_colorop_state *
drm_atomic_get_colorop_state(struct drm_atomic_state *state,
struct drm_colorop *colorop)
{
int ret, index = drm_colorop_index(colorop);
struct drm_colorop_state *colorop_state;
WARN_ON(!state->acquire_ctx);
colorop_state = drm_atomic_get_new_colorop_state(state, colorop);
if (colorop_state)
return colorop_state;
ret = drm_modeset_lock(&colorop->plane->mutex, state->acquire_ctx);
if (ret)
return ERR_PTR(ret);
colorop_state = drm_atomic_helper_colorop_duplicate_state(colorop);
if (!colorop_state)
return ERR_PTR(-ENOMEM);
state->colorops[index].state = colorop_state;
state->colorops[index].ptr = colorop;
state->colorops[index].old_state = colorop->state;
state->colorops[index].new_state = colorop_state;
colorop_state->state = state;
drm_dbg_atomic(colorop->dev, "Added [COLOROP:%d:%d] %p state to %p\n",
colorop->base.id, colorop->type, colorop_state, state);
return colorop_state;
}
EXPORT_SYMBOL(drm_atomic_get_colorop_state);
static bool
plane_switching_crtc(const struct drm_plane_state *old_plane_state,
const struct drm_plane_state *new_plane_state)
@ -711,6 +780,46 @@ static int drm_atomic_plane_check(const struct drm_plane_state *old_plane_state,
return 0;
}
static void drm_atomic_colorop_print_state(struct drm_printer *p,
const struct drm_colorop_state *state)
{
struct drm_colorop *colorop = state->colorop;
drm_printf(p, "colorop[%u]:\n", colorop->base.id);
drm_printf(p, "\ttype=%s\n", drm_get_colorop_type_name(colorop->type));
if (colorop->bypass_property)
drm_printf(p, "\tbypass=%u\n", state->bypass);
switch (colorop->type) {
case DRM_COLOROP_1D_CURVE:
drm_printf(p, "\tcurve_1d_type=%s\n",
drm_get_colorop_curve_1d_type_name(state->curve_1d_type));
break;
case DRM_COLOROP_1D_LUT:
drm_printf(p, "\tsize=%d\n", colorop->size);
drm_printf(p, "\tinterpolation=%s\n",
drm_get_colorop_lut1d_interpolation_name(colorop->lut1d_interpolation));
drm_printf(p, "\tdata blob id=%d\n", state->data ? state->data->base.id : 0);
break;
case DRM_COLOROP_CTM_3X4:
drm_printf(p, "\tdata blob id=%d\n", state->data ? state->data->base.id : 0);
break;
case DRM_COLOROP_MULTIPLIER:
drm_printf(p, "\tmultiplier=%llu\n", state->multiplier);
break;
case DRM_COLOROP_3D_LUT:
drm_printf(p, "\tsize=%d\n", colorop->size);
drm_printf(p, "\tinterpolation=%s\n",
drm_get_colorop_lut3d_interpolation_name(colorop->lut3d_interpolation));
drm_printf(p, "\tdata blob id=%d\n", state->data ? state->data->base.id : 0);
break;
default:
break;
}
drm_printf(p, "\tnext=%d\n", colorop->next ? colorop->next->base.id : 0);
}
static void drm_atomic_plane_print_state(struct drm_printer *p,
const struct drm_plane_state *state)
{
@ -732,7 +841,8 @@ static void drm_atomic_plane_print_state(struct drm_printer *p,
drm_printf(p, "\tcolor-range=%s\n",
drm_get_color_range_name(state->color_range));
drm_printf(p, "\tcolor_mgmt_changed=%d\n", state->color_mgmt_changed);
drm_printf(p, "\tcolor-pipeline=%d\n",
state->color_pipeline ? state->color_pipeline->base.id : 0);
if (plane->funcs->atomic_print_state)
plane->funcs->atomic_print_state(p, state);
}
@ -1445,6 +1555,52 @@ drm_atomic_add_affected_planes(struct drm_atomic_state *state,
}
EXPORT_SYMBOL(drm_atomic_add_affected_planes);
/**
* drm_atomic_add_affected_colorops - add colorops for plane
* @state: atomic state
* @plane: DRM plane
*
* This function walks the current configuration and adds all colorops
* currently used by @plane to the atomic configuration @state. This is useful
* when an atomic commit also needs to check all currently enabled colorop on
* @plane, e.g. when changing the mode. It's also useful when re-enabling a plane
* to avoid special code to force-enable all colorops.
*
* Since acquiring a colorop state will always also acquire the w/w mutex of the
* current plane for that colorop (if there is any) adding all the colorop states for
* a plane will not reduce parallelism of atomic updates.
*
* Returns:
* 0 on success or can fail with -EDEADLK or -ENOMEM. When the error is EDEADLK
* then the w/w mutex code has detected a deadlock and the entire atomic
* sequence must be restarted. All other errors are fatal.
*/
int
drm_atomic_add_affected_colorops(struct drm_atomic_state *state,
struct drm_plane *plane)
{
struct drm_colorop *colorop;
struct drm_colorop_state *colorop_state;
WARN_ON(!drm_atomic_get_new_plane_state(state, plane));
drm_dbg_atomic(plane->dev,
"Adding all current colorops for [PLANE:%d:%s] to %p\n",
plane->base.id, plane->name, state);
drm_for_each_colorop(colorop, plane->dev) {
if (colorop->plane != plane)
continue;
colorop_state = drm_atomic_get_colorop_state(state, colorop);
if (IS_ERR(colorop_state))
return PTR_ERR(colorop_state);
}
return 0;
}
EXPORT_SYMBOL(drm_atomic_add_affected_colorops);
/**
* drm_atomic_check_only - check whether a given config would work
* @state: atomic configuration to check
@ -1843,6 +1999,7 @@ static void __drm_state_dump(struct drm_device *dev, struct drm_printer *p,
bool take_locks)
{
struct drm_mode_config *config = &dev->mode_config;
struct drm_colorop *colorop;
struct drm_plane *plane;
struct drm_crtc *crtc;
struct drm_connector *connector;
@ -1852,6 +2009,14 @@ static void __drm_state_dump(struct drm_device *dev, struct drm_printer *p,
if (!drm_drv_uses_atomic_modeset(dev))
return;
list_for_each_entry(colorop, &config->colorop_list, head) {
if (take_locks)
drm_modeset_lock(&colorop->plane->mutex, NULL);
drm_atomic_colorop_print_state(p, colorop->state);
if (take_locks)
drm_modeset_unlock(&colorop->plane->mutex);
}
list_for_each_entry(plane, &config->plane_list, head) {
if (take_locks)
drm_modeset_lock(&plane->mutex, NULL);

View File

@ -3184,6 +3184,8 @@ int drm_atomic_helper_swap_state(struct drm_atomic_state *state,
struct drm_crtc_state *old_crtc_state, *new_crtc_state;
struct drm_plane *plane;
struct drm_plane_state *old_plane_state, *new_plane_state;
struct drm_colorop *colorop;
struct drm_colorop_state *old_colorop_state, *new_colorop_state;
struct drm_crtc_commit *commit;
struct drm_private_obj *obj;
struct drm_private_state *old_obj_state, *new_obj_state;
@ -3261,6 +3263,16 @@ int drm_atomic_helper_swap_state(struct drm_atomic_state *state,
}
}
for_each_oldnew_colorop_in_state(state, colorop, old_colorop_state, new_colorop_state, i) {
WARN_ON(colorop->state != old_colorop_state);
old_colorop_state->state = state;
new_colorop_state->state = NULL;
state->colorops[i].state = old_colorop_state;
colorop->state = new_colorop_state;
}
drm_panic_lock(state->dev, flags);
for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
WARN_ON(plane->state != old_plane_state);

View File

@ -268,6 +268,11 @@ void __drm_atomic_helper_plane_state_reset(struct drm_plane_state *plane_state,
plane_state->color_range = val;
}
if (plane->color_pipeline_property) {
/* default is always NULL, i.e., bypass */
plane_state->color_pipeline = NULL;
}
if (plane->zpos_property) {
if (!drm_object_property_get_default_value(&plane->base,
plane->zpos_property,

View File

@ -35,6 +35,7 @@
#include <drm/drm_drv.h>
#include <drm/drm_writeback.h>
#include <drm/drm_vblank.h>
#include <drm/drm_colorop.h>
#include <linux/export.h>
#include <linux/dma-fence.h>
@ -257,6 +258,34 @@ drm_atomic_set_fb_for_plane(struct drm_plane_state *plane_state,
}
EXPORT_SYMBOL(drm_atomic_set_fb_for_plane);
/**
* drm_atomic_set_colorop_for_plane - set colorop for plane
* @plane_state: atomic state object for the plane
* @colorop: colorop to use for the plane
*
* Helper function to select the color pipeline on a plane by setting
* it to the first drm_colorop element of the pipeline.
*/
void
drm_atomic_set_colorop_for_plane(struct drm_plane_state *plane_state,
struct drm_colorop *colorop)
{
struct drm_plane *plane = plane_state->plane;
if (colorop)
drm_dbg_atomic(plane->dev,
"Set [COLOROP:%d] for [PLANE:%d:%s] state %p\n",
colorop->base.id, plane->base.id, plane->name,
plane_state);
else
drm_dbg_atomic(plane->dev,
"Set [NOCOLOROP] for [PLANE:%d:%s] state %p\n",
plane->base.id, plane->name, plane_state);
plane_state->color_pipeline = colorop;
}
EXPORT_SYMBOL(drm_atomic_set_colorop_for_plane);
/**
* drm_atomic_set_crtc_for_connector - set CRTC for connector
* @conn_state: atomic state object for the connector
@ -544,6 +573,16 @@ static int drm_atomic_plane_set_property(struct drm_plane *plane,
state->color_encoding = val;
} else if (property == plane->color_range_property) {
state->color_range = val;
} else if (property == plane->color_pipeline_property) {
/* find DRM colorop object */
struct drm_colorop *colorop = NULL;
colorop = drm_colorop_find(dev, file_priv, val);
if (val && !colorop)
return -EACCES;
drm_atomic_set_colorop_for_plane(state, colorop);
} else if (property == config->prop_fb_damage_clips) {
ret = drm_property_replace_blob_from_id(dev,
&state->fb_damage_clips,
@ -626,6 +665,8 @@ drm_atomic_plane_get_property(struct drm_plane *plane,
*val = state->color_encoding;
} else if (property == plane->color_range_property) {
*val = state->color_range;
} else if (property == plane->color_pipeline_property) {
*val = (state->color_pipeline) ? state->color_pipeline->base.id : 0;
} else if (property == config->prop_fb_damage_clips) {
*val = (state->fb_damage_clips) ?
state->fb_damage_clips->base.id : 0;
@ -648,6 +689,96 @@ drm_atomic_plane_get_property(struct drm_plane *plane,
return 0;
}
static int drm_atomic_color_set_data_property(struct drm_colorop *colorop,
struct drm_colorop_state *state,
struct drm_property *property,
uint64_t val)
{
ssize_t elem_size = -1;
ssize_t size = -1;
bool replaced = false;
switch (colorop->type) {
case DRM_COLOROP_1D_LUT:
size = colorop->size * sizeof(struct drm_color_lut32);
break;
case DRM_COLOROP_CTM_3X4:
size = sizeof(struct drm_color_ctm_3x4);
break;
case DRM_COLOROP_3D_LUT:
size = colorop->size * colorop->size * colorop->size *
sizeof(struct drm_color_lut32);
break;
default:
/* should never get here */
return -EINVAL;
}
return drm_property_replace_blob_from_id(colorop->dev,
&state->data,
val,
size,
elem_size,
&replaced);
}
static int drm_atomic_colorop_set_property(struct drm_colorop *colorop,
struct drm_colorop_state *state,
struct drm_file *file_priv,
struct drm_property *property,
uint64_t val)
{
if (property == colorop->bypass_property) {
state->bypass = val;
} else if (property == colorop->lut1d_interpolation_property) {
colorop->lut1d_interpolation = val;
} else if (property == colorop->curve_1d_type_property) {
state->curve_1d_type = val;
} else if (property == colorop->multiplier_property) {
state->multiplier = val;
} else if (property == colorop->lut3d_interpolation_property) {
colorop->lut3d_interpolation = val;
} else if (property == colorop->data_property) {
return drm_atomic_color_set_data_property(colorop, state,
property, val);
} else {
drm_dbg_atomic(colorop->dev,
"[COLOROP:%d:%d] unknown property [PROP:%d:%s]\n",
colorop->base.id, colorop->type,
property->base.id, property->name);
return -EINVAL;
}
return 0;
}
static int
drm_atomic_colorop_get_property(struct drm_colorop *colorop,
const struct drm_colorop_state *state,
struct drm_property *property, uint64_t *val)
{
if (property == colorop->type_property)
*val = colorop->type;
else if (property == colorop->bypass_property)
*val = state->bypass;
else if (property == colorop->lut1d_interpolation_property)
*val = colorop->lut1d_interpolation;
else if (property == colorop->curve_1d_type_property)
*val = state->curve_1d_type;
else if (property == colorop->multiplier_property)
*val = state->multiplier;
else if (property == colorop->size_property)
*val = colorop->size;
else if (property == colorop->lut3d_interpolation_property)
*val = colorop->lut3d_interpolation;
else if (property == colorop->data_property)
*val = (state->data) ? state->data->base.id : 0;
else
return -EINVAL;
return 0;
}
static int drm_atomic_set_writeback_fb_for_connector(
struct drm_connector_state *conn_state,
struct drm_framebuffer *fb)
@ -914,6 +1045,15 @@ int drm_atomic_get_property(struct drm_mode_object *obj,
plane->state, property, val);
break;
}
case DRM_MODE_OBJECT_COLOROP: {
struct drm_colorop *colorop = obj_to_colorop(obj);
if (colorop->plane)
WARN_ON(!drm_modeset_is_locked(&colorop->plane->mutex));
ret = drm_atomic_colorop_get_property(colorop, colorop->state, property, val);
break;
}
default:
drm_dbg_atomic(dev, "[OBJECT:%d] has no properties\n", obj->id);
ret = -EINVAL;
@ -1111,6 +1251,21 @@ int drm_atomic_set_property(struct drm_atomic_state *state,
ret = drm_atomic_plane_set_property(plane,
plane_state, file_priv,
prop, prop_value);
break;
}
case DRM_MODE_OBJECT_COLOROP: {
struct drm_colorop *colorop = obj_to_colorop(obj);
struct drm_colorop_state *colorop_state;
colorop_state = drm_atomic_get_colorop_state(state, colorop);
if (IS_ERR(colorop_state)) {
ret = PTR_ERR(colorop_state);
break;
}
ret = drm_atomic_colorop_set_property(colorop, colorop_state,
file_priv, prop, prop_value);
break;
}
default:
@ -1450,6 +1605,7 @@ int drm_mode_atomic_ioctl(struct drm_device *dev,
drm_modeset_acquire_init(&ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE);
state->acquire_ctx = &ctx;
state->allow_modeset = !!(arg->flags & DRM_MODE_ATOMIC_ALLOW_MODESET);
state->plane_color_pipeline = file_priv->plane_color_pipeline;
retry:
copied_objs = 0;

View File

@ -11,6 +11,7 @@
#include <linux/slab.h>
#include <drm/drm_client.h>
#include <drm/drm_client_event.h>
#include <drm/drm_device.h>
#include <drm/drm_drv.h>
#include <drm/drm_file.h>

View File

@ -102,7 +102,7 @@ void drm_client_dev_hotplug(struct drm_device *dev)
}
EXPORT_SYMBOL(drm_client_dev_hotplug);
void drm_client_dev_restore(struct drm_device *dev)
void drm_client_dev_restore(struct drm_device *dev, bool force)
{
struct drm_client_dev *client;
int ret;
@ -115,7 +115,7 @@ void drm_client_dev_restore(struct drm_device *dev)
if (!client->funcs || !client->funcs->restore)
continue;
ret = client->funcs->restore(client);
ret = client->funcs->restore(client, force);
drm_dbg_kms(dev, "%s: ret=%d\n", client->name, ret);
if (!ret) /* The first one to return zero gets the privilege to restore */
break;

View File

@ -0,0 +1,65 @@
// SPDX-License-Identifier: GPL-2.0 or MIT
#include <linux/sysrq.h>
#include <drm/drm_client_event.h>
#include <drm/drm_device.h>
#include <drm/drm_print.h>
#include "drm_internal.h"
#ifdef CONFIG_MAGIC_SYSRQ
static LIST_HEAD(drm_client_sysrq_dev_list);
static DEFINE_MUTEX(drm_client_sysrq_dev_lock);
/* emergency restore, don't bother with error reporting */
static void drm_client_sysrq_restore_work_fn(struct work_struct *ignored)
{
struct drm_device *dev;
guard(mutex)(&drm_client_sysrq_dev_lock);
list_for_each_entry(dev, &drm_client_sysrq_dev_list, client_sysrq_list) {
if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
continue;
drm_client_dev_restore(dev, true);
}
}
static DECLARE_WORK(drm_client_sysrq_restore_work, drm_client_sysrq_restore_work_fn);
static void drm_client_sysrq_restore_handler(u8 ignored)
{
schedule_work(&drm_client_sysrq_restore_work);
}
static const struct sysrq_key_op drm_client_sysrq_restore_op = {
.handler = drm_client_sysrq_restore_handler,
.help_msg = "force-fb(v)",
.action_msg = "Restore framebuffer console",
};
void drm_client_sysrq_register(struct drm_device *dev)
{
guard(mutex)(&drm_client_sysrq_dev_lock);
if (list_empty(&drm_client_sysrq_dev_list))
register_sysrq_key('v', &drm_client_sysrq_restore_op);
list_add(&dev->client_sysrq_list, &drm_client_sysrq_dev_list);
}
void drm_client_sysrq_unregister(struct drm_device *dev)
{
guard(mutex)(&drm_client_sysrq_dev_lock);
/* remove device from global restore list */
if (!drm_WARN_ON(dev, list_empty(&dev->client_sysrq_list)))
list_del(&dev->client_sysrq_list);
/* no devices left; unregister key */
if (list_empty(&drm_client_sysrq_dev_list))
unregister_sysrq_key('v', &drm_client_sysrq_restore_op);
}
#endif

View File

@ -874,3 +874,46 @@ void drm_crtc_fill_palette_8(struct drm_crtc *crtc, drm_crtc_set_lut_func set_pa
fill_palette_8(crtc, i, set_palette);
}
EXPORT_SYMBOL(drm_crtc_fill_palette_8);
/**
* drm_color_lut32_check - check validity of extended lookup table
* @lut: property blob containing extended LUT to check
* @tests: bitmask of tests to run
*
* Helper to check whether a userspace-provided extended lookup table is valid and
* satisfies hardware requirements. Drivers pass a bitmask indicating which of
* the tests in &drm_color_lut_tests should be performed.
*
* Returns 0 on success, -EINVAL on failure.
*/
int drm_color_lut32_check(const struct drm_property_blob *lut, u32 tests)
{
const struct drm_color_lut32 *entry;
int i;
if (!lut || !tests)
return 0;
entry = lut->data;
for (i = 0; i < drm_color_lut32_size(lut); i++) {
if (tests & DRM_COLOR_LUT_EQUAL_CHANNELS) {
if (entry[i].red != entry[i].blue ||
entry[i].red != entry[i].green) {
DRM_DEBUG_KMS("All LUT entries must have equal r/g/b\n");
return -EINVAL;
}
}
if (i > 0 && tests & DRM_COLOR_LUT_NON_DECREASING) {
if (entry[i].red < entry[i - 1].red ||
entry[i].green < entry[i - 1].green ||
entry[i].blue < entry[i - 1].blue) {
DRM_DEBUG_KMS("LUT entries must never decrease.\n");
return -EINVAL;
}
}
}
return 0;
}
EXPORT_SYMBOL(drm_color_lut32_check);

View File

@ -0,0 +1,599 @@
// SPDX-License-Identifier: MIT
/*
* Copyright (C) 2023 Advanced Micro Devices, Inc. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: AMD
*
*/
#include <drm/drm_colorop.h>
#include <drm/drm_print.h>
#include <drm/drm_drv.h>
#include <drm/drm_plane.h>
#include "drm_crtc_internal.h"
/**
* DOC: overview
*
* When userspace signals the &DRM_CLIENT_CAP_PLANE_COLOR_PIPELINE it
* should use the COLOR_PIPELINE plane property and associated colorops
* for any color operation on the &drm_plane. Setting of all old color
* properties, such as COLOR_ENCODING and COLOR_RANGE, will be rejected
* and the values of the properties will be ignored.
*
* Colorops are only advertised and valid for atomic drivers and atomic
* userspace that signals the &DRM_CLIENT_CAP_PLANE_COLOR_PIPELINE
* client cap.
*
* A colorop represents a single color operation. Colorops are chained
* via the NEXT property and make up color pipelines. Color pipelines
* are advertised and selected via the COLOR_PIPELINE &drm_plane
* property.
*
* A colorop will be of a certain type, advertised by the read-only TYPE
* property. Each type of colorop will advertise a different set of
* properties and is programmed in a different manner. Types can be
* enumerated 1D curves, 1D LUTs, 3D LUTs, matrices, etc. See the
* &drm_colorop_type documentation for information on each type.
*
* If a colorop advertises the BYPASS property it can be bypassed.
*
* Information about colorop and color pipeline design decisions can be
* found at rfc/color_pipeline.rst, but note that this document will
* grow stale over time.
*/
static const struct drm_prop_enum_list drm_colorop_type_enum_list[] = {
{ DRM_COLOROP_1D_CURVE, "1D Curve" },
{ DRM_COLOROP_1D_LUT, "1D LUT" },
{ DRM_COLOROP_CTM_3X4, "3x4 Matrix"},
{ DRM_COLOROP_MULTIPLIER, "Multiplier"},
{ DRM_COLOROP_3D_LUT, "3D LUT"},
};
static const char * const colorop_curve_1d_type_names[] = {
[DRM_COLOROP_1D_CURVE_SRGB_EOTF] = "sRGB EOTF",
[DRM_COLOROP_1D_CURVE_SRGB_INV_EOTF] = "sRGB Inverse EOTF",
[DRM_COLOROP_1D_CURVE_PQ_125_EOTF] = "PQ 125 EOTF",
[DRM_COLOROP_1D_CURVE_PQ_125_INV_EOTF] = "PQ 125 Inverse EOTF",
[DRM_COLOROP_1D_CURVE_BT2020_INV_OETF] = "BT.2020 Inverse OETF",
[DRM_COLOROP_1D_CURVE_BT2020_OETF] = "BT.2020 OETF",
[DRM_COLOROP_1D_CURVE_GAMMA22] = "Gamma 2.2",
[DRM_COLOROP_1D_CURVE_GAMMA22_INV] = "Gamma 2.2 Inverse",
};
static const struct drm_prop_enum_list drm_colorop_lut1d_interpolation_list[] = {
{ DRM_COLOROP_LUT1D_INTERPOLATION_LINEAR, "Linear" },
};
static const struct drm_prop_enum_list drm_colorop_lut3d_interpolation_list[] = {
{ DRM_COLOROP_LUT3D_INTERPOLATION_TETRAHEDRAL, "Tetrahedral" },
};
/* Init Helpers */
static int drm_plane_colorop_init(struct drm_device *dev, struct drm_colorop *colorop,
struct drm_plane *plane, enum drm_colorop_type type,
uint32_t flags)
{
struct drm_mode_config *config = &dev->mode_config;
struct drm_property *prop;
int ret = 0;
ret = drm_mode_object_add(dev, &colorop->base, DRM_MODE_OBJECT_COLOROP);
if (ret)
return ret;
colorop->base.properties = &colorop->properties;
colorop->dev = dev;
colorop->type = type;
colorop->plane = plane;
colorop->next = NULL;
list_add_tail(&colorop->head, &config->colorop_list);
colorop->index = config->num_colorop++;
/* add properties */
/* type */
prop = drm_property_create_enum(dev,
DRM_MODE_PROP_IMMUTABLE,
"TYPE", drm_colorop_type_enum_list,
ARRAY_SIZE(drm_colorop_type_enum_list));
if (!prop)
return -ENOMEM;
colorop->type_property = prop;
drm_object_attach_property(&colorop->base,
colorop->type_property,
colorop->type);
if (flags & DRM_COLOROP_FLAG_ALLOW_BYPASS) {
/* bypass */
prop = drm_property_create_bool(dev, DRM_MODE_PROP_ATOMIC,
"BYPASS");
if (!prop)
return -ENOMEM;
colorop->bypass_property = prop;
drm_object_attach_property(&colorop->base,
colorop->bypass_property,
1);
}
/* next */
prop = drm_property_create_object(dev, DRM_MODE_PROP_IMMUTABLE | DRM_MODE_PROP_ATOMIC,
"NEXT", DRM_MODE_OBJECT_COLOROP);
if (!prop)
return -ENOMEM;
colorop->next_property = prop;
drm_object_attach_property(&colorop->base,
colorop->next_property,
0);
return ret;
}
/**
* drm_colorop_cleanup - Cleanup a drm_colorop object in color_pipeline
*
* @colorop: The drm_colorop object to be cleaned
*/
void drm_colorop_cleanup(struct drm_colorop *colorop)
{
struct drm_device *dev = colorop->dev;
struct drm_mode_config *config = &dev->mode_config;
list_del(&colorop->head);
config->num_colorop--;
if (colorop->state && colorop->state->data) {
drm_property_blob_put(colorop->state->data);
colorop->state->data = NULL;
}
kfree(colorop->state);
}
EXPORT_SYMBOL(drm_colorop_cleanup);
/**
* drm_colorop_pipeline_destroy - Helper for color pipeline destruction
*
* @dev: - The drm_device containing the drm_planes with the color_pipelines
*
* Provides a default color pipeline destroy handler for drm_device.
*/
void drm_colorop_pipeline_destroy(struct drm_device *dev)
{
struct drm_mode_config *config = &dev->mode_config;
struct drm_colorop *colorop, *next;
list_for_each_entry_safe(colorop, next, &config->colorop_list, head) {
drm_colorop_cleanup(colorop);
kfree(colorop);
}
}
EXPORT_SYMBOL(drm_colorop_pipeline_destroy);
/**
* drm_plane_colorop_curve_1d_init - Initialize a DRM_COLOROP_1D_CURVE
*
* @dev: DRM device
* @colorop: The drm_colorop object to initialize
* @plane: The associated drm_plane
* @supported_tfs: A bitfield of supported drm_plane_colorop_curve_1d_init enum values,
* created using BIT(curve_type) and combined with the OR '|'
* operator.
* @flags: bitmask of misc, see DRM_COLOROP_FLAG_* defines.
* @return zero on success, -E value on failure
*/
int drm_plane_colorop_curve_1d_init(struct drm_device *dev, struct drm_colorop *colorop,
struct drm_plane *plane, u64 supported_tfs, uint32_t flags)
{
struct drm_prop_enum_list enum_list[DRM_COLOROP_1D_CURVE_COUNT];
int i, len;
struct drm_property *prop;
int ret;
if (!supported_tfs) {
drm_err(dev,
"No supported TFs for new 1D curve colorop on [PLANE:%d:%s]\n",
plane->base.id, plane->name);
return -EINVAL;
}
if ((supported_tfs & -BIT(DRM_COLOROP_1D_CURVE_COUNT)) != 0) {
drm_err(dev, "Unknown TF provided on [PLANE:%d:%s]\n",
plane->base.id, plane->name);
return -EINVAL;
}
ret = drm_plane_colorop_init(dev, colorop, plane, DRM_COLOROP_1D_CURVE, flags);
if (ret)
return ret;
len = 0;
for (i = 0; i < DRM_COLOROP_1D_CURVE_COUNT; i++) {
if ((supported_tfs & BIT(i)) == 0)
continue;
enum_list[len].type = i;
enum_list[len].name = colorop_curve_1d_type_names[i];
len++;
}
if (WARN_ON(len <= 0))
return -EINVAL;
/* initialize 1D curve only attribute */
prop = drm_property_create_enum(dev, DRM_MODE_PROP_ATOMIC, "CURVE_1D_TYPE",
enum_list, len);
if (!prop)
return -ENOMEM;
colorop->curve_1d_type_property = prop;
drm_object_attach_property(&colorop->base, colorop->curve_1d_type_property,
enum_list[0].type);
drm_colorop_reset(colorop);
return 0;
}
EXPORT_SYMBOL(drm_plane_colorop_curve_1d_init);
static int drm_colorop_create_data_prop(struct drm_device *dev, struct drm_colorop *colorop)
{
struct drm_property *prop;
/* data */
prop = drm_property_create(dev, DRM_MODE_PROP_ATOMIC | DRM_MODE_PROP_BLOB,
"DATA", 0);
if (!prop)
return -ENOMEM;
colorop->data_property = prop;
drm_object_attach_property(&colorop->base,
colorop->data_property,
0);
return 0;
}
/**
* drm_plane_colorop_curve_1d_lut_init - Initialize a DRM_COLOROP_1D_LUT
*
* @dev: DRM device
* @colorop: The drm_colorop object to initialize
* @plane: The associated drm_plane
* @lut_size: LUT size supported by driver
* @interpolation: 1D LUT interpolation type
* @flags: bitmask of misc, see DRM_COLOROP_FLAG_* defines.
* @return zero on success, -E value on failure
*/
int drm_plane_colorop_curve_1d_lut_init(struct drm_device *dev, struct drm_colorop *colorop,
struct drm_plane *plane, uint32_t lut_size,
enum drm_colorop_lut1d_interpolation_type interpolation,
uint32_t flags)
{
struct drm_property *prop;
int ret;
ret = drm_plane_colorop_init(dev, colorop, plane, DRM_COLOROP_1D_LUT, flags);
if (ret)
return ret;
/* initialize 1D LUT only attribute */
/* LUT size */
prop = drm_property_create_range(dev, DRM_MODE_PROP_IMMUTABLE | DRM_MODE_PROP_ATOMIC,
"SIZE", 0, UINT_MAX);
if (!prop)
return -ENOMEM;
colorop->size_property = prop;
drm_object_attach_property(&colorop->base, colorop->size_property, lut_size);
colorop->size = lut_size;
/* interpolation */
prop = drm_property_create_enum(dev, 0, "LUT1D_INTERPOLATION",
drm_colorop_lut1d_interpolation_list,
ARRAY_SIZE(drm_colorop_lut1d_interpolation_list));
if (!prop)
return -ENOMEM;
colorop->lut1d_interpolation_property = prop;
drm_object_attach_property(&colorop->base, prop, interpolation);
colorop->lut1d_interpolation = interpolation;
/* data */
ret = drm_colorop_create_data_prop(dev, colorop);
if (ret)
return ret;
drm_colorop_reset(colorop);
return 0;
}
EXPORT_SYMBOL(drm_plane_colorop_curve_1d_lut_init);
int drm_plane_colorop_ctm_3x4_init(struct drm_device *dev, struct drm_colorop *colorop,
struct drm_plane *plane, uint32_t flags)
{
int ret;
ret = drm_plane_colorop_init(dev, colorop, plane, DRM_COLOROP_CTM_3X4, flags);
if (ret)
return ret;
ret = drm_colorop_create_data_prop(dev, colorop);
if (ret)
return ret;
drm_colorop_reset(colorop);
return 0;
}
EXPORT_SYMBOL(drm_plane_colorop_ctm_3x4_init);
/**
* drm_plane_colorop_mult_init - Initialize a DRM_COLOROP_MULTIPLIER
*
* @dev: DRM device
* @colorop: The drm_colorop object to initialize
* @plane: The associated drm_plane
* @flags: bitmask of misc, see DRM_COLOROP_FLAG_* defines.
* @return zero on success, -E value on failure
*/
int drm_plane_colorop_mult_init(struct drm_device *dev, struct drm_colorop *colorop,
struct drm_plane *plane, uint32_t flags)
{
struct drm_property *prop;
int ret;
ret = drm_plane_colorop_init(dev, colorop, plane, DRM_COLOROP_MULTIPLIER, flags);
if (ret)
return ret;
prop = drm_property_create_range(dev, DRM_MODE_PROP_ATOMIC, "MULTIPLIER", 0, U64_MAX);
if (!prop)
return -ENOMEM;
colorop->multiplier_property = prop;
drm_object_attach_property(&colorop->base, colorop->multiplier_property, 0);
drm_colorop_reset(colorop);
return 0;
}
EXPORT_SYMBOL(drm_plane_colorop_mult_init);
int drm_plane_colorop_3dlut_init(struct drm_device *dev, struct drm_colorop *colorop,
struct drm_plane *plane,
uint32_t lut_size,
enum drm_colorop_lut3d_interpolation_type interpolation,
uint32_t flags)
{
struct drm_property *prop;
int ret;
ret = drm_plane_colorop_init(dev, colorop, plane, DRM_COLOROP_3D_LUT, flags);
if (ret)
return ret;
/* LUT size */
prop = drm_property_create_range(dev, DRM_MODE_PROP_IMMUTABLE | DRM_MODE_PROP_ATOMIC,
"SIZE", 0, UINT_MAX);
if (!prop)
return -ENOMEM;
colorop->size_property = prop;
drm_object_attach_property(&colorop->base, colorop->size_property, lut_size);
colorop->size = lut_size;
/* interpolation */
prop = drm_property_create_enum(dev, 0, "LUT3D_INTERPOLATION",
drm_colorop_lut3d_interpolation_list,
ARRAY_SIZE(drm_colorop_lut3d_interpolation_list));
if (!prop)
return -ENOMEM;
colorop->lut3d_interpolation_property = prop;
drm_object_attach_property(&colorop->base, prop, interpolation);
colorop->lut3d_interpolation = interpolation;
/* data */
ret = drm_colorop_create_data_prop(dev, colorop);
if (ret)
return ret;
drm_colorop_reset(colorop);
return 0;
}
EXPORT_SYMBOL(drm_plane_colorop_3dlut_init);
static void __drm_atomic_helper_colorop_duplicate_state(struct drm_colorop *colorop,
struct drm_colorop_state *state)
{
memcpy(state, colorop->state, sizeof(*state));
if (state->data)
drm_property_blob_get(state->data);
state->bypass = true;
}
struct drm_colorop_state *
drm_atomic_helper_colorop_duplicate_state(struct drm_colorop *colorop)
{
struct drm_colorop_state *state;
if (WARN_ON(!colorop->state))
return NULL;
state = kmalloc(sizeof(*state), GFP_KERNEL);
if (state)
__drm_atomic_helper_colorop_duplicate_state(colorop, state);
return state;
}
void drm_colorop_atomic_destroy_state(struct drm_colorop *colorop,
struct drm_colorop_state *state)
{
kfree(state);
}
/**
* __drm_colorop_state_reset - resets colorop state to default values
* @colorop_state: atomic colorop state, must not be NULL
* @colorop: colorop object, must not be NULL
*
* Initializes the newly allocated @colorop_state with default
* values. This is useful for drivers that subclass the CRTC state.
*/
static void __drm_colorop_state_reset(struct drm_colorop_state *colorop_state,
struct drm_colorop *colorop)
{
u64 val;
colorop_state->colorop = colorop;
colorop_state->bypass = true;
if (colorop->curve_1d_type_property) {
drm_object_property_get_default_value(&colorop->base,
colorop->curve_1d_type_property,
&val);
colorop_state->curve_1d_type = val;
}
}
/**
* __drm_colorop_reset - reset state on colorop
* @colorop: drm colorop
* @colorop_state: colorop state to assign
*
* Initializes the newly allocated @colorop_state and assigns it to
* the &drm_crtc->state pointer of @colorop, usually required when
* initializing the drivers or when called from the &drm_colorop_funcs.reset
* hook.
*
* This is useful for drivers that subclass the colorop state.
*/
static void __drm_colorop_reset(struct drm_colorop *colorop,
struct drm_colorop_state *colorop_state)
{
if (colorop_state)
__drm_colorop_state_reset(colorop_state, colorop);
colorop->state = colorop_state;
}
void drm_colorop_reset(struct drm_colorop *colorop)
{
kfree(colorop->state);
colorop->state = kzalloc(sizeof(*colorop->state), GFP_KERNEL);
if (colorop->state)
__drm_colorop_reset(colorop, colorop->state);
}
static const char * const colorop_type_name[] = {
[DRM_COLOROP_1D_CURVE] = "1D Curve",
[DRM_COLOROP_1D_LUT] = "1D LUT",
[DRM_COLOROP_CTM_3X4] = "3x4 Matrix",
[DRM_COLOROP_MULTIPLIER] = "Multiplier",
[DRM_COLOROP_3D_LUT] = "3D LUT",
};
static const char * const colorop_lu3d_interpolation_name[] = {
[DRM_COLOROP_LUT3D_INTERPOLATION_TETRAHEDRAL] = "Tetrahedral",
};
static const char * const colorop_lut1d_interpolation_name[] = {
[DRM_COLOROP_LUT1D_INTERPOLATION_LINEAR] = "Linear",
};
const char *drm_get_colorop_type_name(enum drm_colorop_type type)
{
if (WARN_ON(type >= ARRAY_SIZE(colorop_type_name)))
return "unknown";
return colorop_type_name[type];
}
const char *drm_get_colorop_curve_1d_type_name(enum drm_colorop_curve_1d_type type)
{
if (WARN_ON(type >= ARRAY_SIZE(colorop_curve_1d_type_names)))
return "unknown";
return colorop_curve_1d_type_names[type];
}
/**
* drm_get_colorop_lut1d_interpolation_name: return a string for interpolation type
* @type: interpolation type to compute name of
*
* In contrast to the other drm_get_*_name functions this one here returns a
* const pointer and hence is threadsafe.
*/
const char *drm_get_colorop_lut1d_interpolation_name(enum drm_colorop_lut1d_interpolation_type type)
{
if (WARN_ON(type >= ARRAY_SIZE(colorop_lut1d_interpolation_name)))
return "unknown";
return colorop_lut1d_interpolation_name[type];
}
/**
* drm_get_colorop_lut3d_interpolation_name - return a string for interpolation type
* @type: interpolation type to compute name of
*
* In contrast to the other drm_get_*_name functions this one here returns a
* const pointer and hence is threadsafe.
*/
const char *drm_get_colorop_lut3d_interpolation_name(enum drm_colorop_lut3d_interpolation_type type)
{
if (WARN_ON(type >= ARRAY_SIZE(colorop_lu3d_interpolation_name)))
return "unknown";
return colorop_lu3d_interpolation_name[type];
}
/**
* drm_colorop_set_next_property - sets the next pointer
* @colorop: drm colorop
* @next: next colorop
*
* Should be used when constructing the color pipeline
*/
void drm_colorop_set_next_property(struct drm_colorop *colorop, struct drm_colorop *next)
{
drm_object_property_set_value(&colorop->base,
colorop->next_property,
next ? next->base.id : 0);
colorop->next = next;
}
EXPORT_SYMBOL(drm_colorop_set_next_property);

View File

@ -3439,6 +3439,7 @@ int drm_mode_getconnector(struct drm_device *dev, void *data,
* properties reflect the latest status.
*/
ret = drm_mode_object_get_properties(&connector->base, file_priv->atomic,
file_priv->plane_color_pipeline,
(uint32_t __user *)(unsigned long)(out_resp->props_ptr),
(uint64_t __user *)(unsigned long)(out_resp->prop_values_ptr),
&out_resp->count_props);

View File

@ -163,6 +163,7 @@ struct drm_mode_object *__drm_mode_object_find(struct drm_device *dev,
void drm_mode_object_unregister(struct drm_device *dev,
struct drm_mode_object *object);
int drm_mode_object_get_properties(struct drm_mode_object *obj, bool atomic,
bool plane_color_pipeline,
uint32_t __user *prop_ptr,
uint64_t __user *prop_values,
uint32_t *arg_count_props);

View File

@ -733,6 +733,7 @@ static int drm_dev_init(struct drm_device *dev,
INIT_LIST_HEAD(&dev->filelist);
INIT_LIST_HEAD(&dev->filelist_internal);
INIT_LIST_HEAD(&dev->clientlist);
INIT_LIST_HEAD(&dev->client_sysrq_list);
INIT_LIST_HEAD(&dev->vblank_event_list);
spin_lock_init(&dev->event_lock);
@ -1100,6 +1101,7 @@ int drm_dev_register(struct drm_device *dev, unsigned long flags)
goto err_unload;
}
drm_panic_register(dev);
drm_client_sysrq_register(dev);
DRM_INFO("Initialized %s %d.%d.%d for %s on minor %d\n",
driver->name, driver->major, driver->minor,
@ -1144,6 +1146,7 @@ void drm_dev_unregister(struct drm_device *dev)
{
dev->registered = false;
drm_client_sysrq_unregister(dev);
drm_panic_unregister(dev);
drm_client_dev_unregister(dev);

View File

@ -32,7 +32,6 @@
#include <linux/console.h>
#include <linux/export.h>
#include <linux/pci.h>
#include <linux/sysrq.h>
#include <linux/vga_switcheroo.h>
#include <drm/drm_atomic.h>
@ -255,6 +254,7 @@ __drm_fb_helper_restore_fbdev_mode_unlocked(struct drm_fb_helper *fb_helper,
/**
* drm_fb_helper_restore_fbdev_mode_unlocked - restore fbdev configuration
* @fb_helper: driver-allocated fbdev helper, can be NULL
* @force: ignore present DRM master
*
* This helper should be called from fbdev emulation's &drm_client_funcs.restore
* callback. It ensures that the user isn't greeted with a black screen when the
@ -263,48 +263,12 @@ __drm_fb_helper_restore_fbdev_mode_unlocked(struct drm_fb_helper *fb_helper,
* Returns:
* 0 on success, or a negative errno code otherwise.
*/
int drm_fb_helper_restore_fbdev_mode_unlocked(struct drm_fb_helper *fb_helper)
int drm_fb_helper_restore_fbdev_mode_unlocked(struct drm_fb_helper *fb_helper, bool force)
{
return __drm_fb_helper_restore_fbdev_mode_unlocked(fb_helper, false);
return __drm_fb_helper_restore_fbdev_mode_unlocked(fb_helper, force);
}
EXPORT_SYMBOL(drm_fb_helper_restore_fbdev_mode_unlocked);
#ifdef CONFIG_MAGIC_SYSRQ
/* emergency restore, don't bother with error reporting */
static void drm_fb_helper_restore_work_fn(struct work_struct *ignored)
{
struct drm_fb_helper *helper;
mutex_lock(&kernel_fb_helper_lock);
list_for_each_entry(helper, &kernel_fb_helper_list, kernel_fb_list) {
struct drm_device *dev = helper->dev;
if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
continue;
mutex_lock(&helper->lock);
drm_client_modeset_commit_locked(&helper->client);
mutex_unlock(&helper->lock);
}
mutex_unlock(&kernel_fb_helper_lock);
}
static DECLARE_WORK(drm_fb_helper_restore_work, drm_fb_helper_restore_work_fn);
static void drm_fb_helper_sysrq(u8 dummy1)
{
schedule_work(&drm_fb_helper_restore_work);
}
static const struct sysrq_key_op sysrq_drm_fb_helper_restore_op = {
.handler = drm_fb_helper_sysrq,
.help_msg = "force-fb(v)",
.action_msg = "Restore framebuffer console",
};
#else
static const struct sysrq_key_op sysrq_drm_fb_helper_restore_op = { };
#endif
static void drm_fb_helper_dpms(struct fb_info *info, int dpms_mode)
{
struct drm_fb_helper *fb_helper = info->par;
@ -495,20 +459,7 @@ int drm_fb_helper_init(struct drm_device *dev,
}
EXPORT_SYMBOL(drm_fb_helper_init);
/**
* drm_fb_helper_alloc_info - allocate fb_info and some of its members
* @fb_helper: driver-allocated fbdev helper
*
* A helper to alloc fb_info and the member cmap. Called by the driver
* within the struct &drm_driver.fbdev_probe callback function. Drivers do
* not need to release the allocated fb_info structure themselves, this is
* automatically done when calling drm_fb_helper_fini().
*
* RETURNS:
* fb_info pointer if things went okay, pointer containing error code
* otherwise
*/
struct fb_info *drm_fb_helper_alloc_info(struct drm_fb_helper *fb_helper)
static struct fb_info *drm_fb_helper_alloc_info(struct drm_fb_helper *fb_helper)
{
struct device *dev = fb_helper->dev->dev;
struct fb_info *info;
@ -535,17 +486,8 @@ struct fb_info *drm_fb_helper_alloc_info(struct drm_fb_helper *fb_helper)
framebuffer_release(info);
return ERR_PTR(ret);
}
EXPORT_SYMBOL(drm_fb_helper_alloc_info);
/**
* drm_fb_helper_release_info - release fb_info and its members
* @fb_helper: driver-allocated fbdev helper
*
* A helper to release fb_info and the member cmap. Drivers do not
* need to release the allocated fb_info structure themselves, this is
* automatically done when calling drm_fb_helper_fini().
*/
void drm_fb_helper_release_info(struct drm_fb_helper *fb_helper)
static void drm_fb_helper_release_info(struct drm_fb_helper *fb_helper)
{
struct fb_info *info = fb_helper->info;
@ -558,7 +500,6 @@ void drm_fb_helper_release_info(struct drm_fb_helper *fb_helper)
fb_dealloc_cmap(&info->cmap);
framebuffer_release(info);
}
EXPORT_SYMBOL(drm_fb_helper_release_info);
/**
* drm_fb_helper_unregister_info - unregister fb_info framebuffer device
@ -601,11 +542,8 @@ void drm_fb_helper_fini(struct drm_fb_helper *fb_helper)
drm_fb_helper_release_info(fb_helper);
mutex_lock(&kernel_fb_helper_lock);
if (!list_empty(&fb_helper->kernel_fb_list)) {
if (!list_empty(&fb_helper->kernel_fb_list))
list_del(&fb_helper->kernel_fb_list);
if (list_empty(&kernel_fb_helper_list))
unregister_sysrq_key('v', &sysrq_drm_fb_helper_restore_op);
}
mutex_unlock(&kernel_fb_helper_lock);
if (!fb_helper->client.funcs)
@ -1328,9 +1266,9 @@ int drm_fb_helper_set_par(struct fb_info *info)
* the KDSET IOCTL with KD_TEXT, and only after that drops the master
* status when exiting.
*
* In the past this was caught by drm_fb_helper_lastclose(), but on
* modern systems where logind always keeps a drm fd open to orchestrate
* the vt switching, this doesn't work.
* In the past this was caught by drm_fb_helper_restore_fbdev_mode_unlocked(),
* but on modern systems where logind always keeps a drm fd open to
* orchestrate the vt switching, this doesn't work.
*
* To not break the userspace ABI we have this special case here, which
* is only used for the above case. Everything else uses the normal
@ -1809,6 +1747,11 @@ __drm_fb_helper_initial_config_and_unlock(struct drm_fb_helper *fb_helper)
height = dev->mode_config.max_height;
drm_client_modeset_probe(&fb_helper->client, width, height);
info = drm_fb_helper_alloc_info(fb_helper);
if (IS_ERR(info))
return PTR_ERR(info);
ret = drm_fb_helper_single_fb_probe(fb_helper);
if (ret < 0) {
if (ret == -EAGAIN) {
@ -1817,13 +1760,12 @@ __drm_fb_helper_initial_config_and_unlock(struct drm_fb_helper *fb_helper)
}
mutex_unlock(&fb_helper->lock);
return ret;
goto err_drm_fb_helper_release_info;
}
drm_setup_crtcs_fb(fb_helper);
fb_helper->deferred_setup = false;
info = fb_helper->info;
info->var.pixclock = 0;
/* Need to drop locks to avoid recursive deadlock in
@ -1839,13 +1781,14 @@ __drm_fb_helper_initial_config_and_unlock(struct drm_fb_helper *fb_helper)
info->node, info->fix.id);
mutex_lock(&kernel_fb_helper_lock);
if (list_empty(&kernel_fb_helper_list))
register_sysrq_key('v', &sysrq_drm_fb_helper_restore_op);
list_add(&fb_helper->kernel_fb_list, &kernel_fb_helper_list);
mutex_unlock(&kernel_fb_helper_lock);
return 0;
err_drm_fb_helper_release_info:
drm_fb_helper_release_info(fb_helper);
return ret;
}
/**
@ -1955,16 +1898,3 @@ int drm_fb_helper_hotplug_event(struct drm_fb_helper *fb_helper)
return 0;
}
EXPORT_SYMBOL(drm_fb_helper_hotplug_event);
/**
* drm_fb_helper_lastclose - DRM driver lastclose helper for fbdev emulation
* @dev: DRM device
*
* This function is obsolete. Call drm_fb_helper_restore_fbdev_mode_unlocked()
* instead.
*/
void drm_fb_helper_lastclose(struct drm_device *dev)
{
drm_fb_helper_restore_fbdev_mode_unlocked(dev->fb_helper);
}
EXPORT_SYMBOL(drm_fb_helper_lastclose);

View File

@ -269,9 +269,9 @@ int drm_fbdev_dma_driver_fbdev_probe(struct drm_fb_helper *fb_helper,
{
struct drm_client_dev *client = &fb_helper->client;
struct drm_device *dev = fb_helper->dev;
struct fb_info *info = fb_helper->info;
struct drm_client_buffer *buffer;
struct drm_framebuffer *fb;
struct fb_info *info;
u32 format;
struct iosys_map map;
int ret;
@ -301,12 +301,6 @@ int drm_fbdev_dma_driver_fbdev_probe(struct drm_fb_helper *fb_helper,
fb_helper->buffer = buffer;
fb_helper->fb = fb;
info = drm_fb_helper_alloc_info(fb_helper);
if (IS_ERR(info)) {
ret = PTR_ERR(info);
goto err_drm_client_buffer_vunmap;
}
drm_fb_helper_fill_info(info, fb_helper, sizes);
if (fb->funcs->dirty)
@ -314,12 +308,10 @@ int drm_fbdev_dma_driver_fbdev_probe(struct drm_fb_helper *fb_helper,
else
ret = drm_fbdev_dma_driver_fbdev_probe_tail(fb_helper, sizes);
if (ret)
goto err_drm_fb_helper_release_info;
goto err_drm_client_buffer_vunmap;
return 0;
err_drm_fb_helper_release_info:
drm_fb_helper_release_info(fb_helper);
err_drm_client_buffer_vunmap:
fb_helper->fb = NULL;
fb_helper->buffer = NULL;

View File

@ -135,10 +135,10 @@ int drm_fbdev_shmem_driver_fbdev_probe(struct drm_fb_helper *fb_helper,
{
struct drm_client_dev *client = &fb_helper->client;
struct drm_device *dev = fb_helper->dev;
struct fb_info *info = fb_helper->info;
struct drm_client_buffer *buffer;
struct drm_gem_shmem_object *shmem;
struct drm_framebuffer *fb;
struct fb_info *info;
u32 format;
struct iosys_map map;
int ret;
@ -168,12 +168,6 @@ int drm_fbdev_shmem_driver_fbdev_probe(struct drm_fb_helper *fb_helper,
fb_helper->buffer = buffer;
fb_helper->fb = fb;
info = drm_fb_helper_alloc_info(fb_helper);
if (IS_ERR(info)) {
ret = PTR_ERR(info);
goto err_drm_client_buffer_vunmap;
}
drm_fb_helper_fill_info(info, fb_helper, sizes);
info->fbops = &drm_fbdev_shmem_fb_ops;
@ -194,12 +188,10 @@ int drm_fbdev_shmem_driver_fbdev_probe(struct drm_fb_helper *fb_helper,
info->fbdefio = &fb_helper->fbdefio;
ret = fb_deferred_io_init(info);
if (ret)
goto err_drm_fb_helper_release_info;
goto err_drm_client_buffer_vunmap;
return 0;
err_drm_fb_helper_release_info:
drm_fb_helper_release_info(fb_helper);
err_drm_client_buffer_vunmap:
fb_helper->fb = NULL;
fb_helper->buffer = NULL;

View File

@ -174,8 +174,8 @@ int drm_fbdev_ttm_driver_fbdev_probe(struct drm_fb_helper *fb_helper,
{
struct drm_client_dev *client = &fb_helper->client;
struct drm_device *dev = fb_helper->dev;
struct fb_info *info = fb_helper->info;
struct drm_client_buffer *buffer;
struct fb_info *info;
size_t screen_size;
void *screen_buffer;
u32 format;
@ -203,12 +203,6 @@ int drm_fbdev_ttm_driver_fbdev_probe(struct drm_fb_helper *fb_helper,
goto err_drm_client_buffer_delete;
}
info = drm_fb_helper_alloc_info(fb_helper);
if (IS_ERR(info)) {
ret = PTR_ERR(info);
goto err_vfree;
}
drm_fb_helper_fill_info(info, fb_helper, sizes);
info->fbops = &drm_fbdev_ttm_fb_ops;
@ -225,12 +219,10 @@ int drm_fbdev_ttm_driver_fbdev_probe(struct drm_fb_helper *fb_helper,
info->fbdefio = &fb_helper->fbdefio;
ret = fb_deferred_io_init(info);
if (ret)
goto err_drm_fb_helper_release_info;
goto err_vfree;
return 0;
err_drm_fb_helper_release_info:
drm_fb_helper_release_info(fb_helper);
err_vfree:
vfree(screen_buffer);
err_drm_client_buffer_delete:

View File

@ -405,7 +405,7 @@ EXPORT_SYMBOL(drm_open);
static void drm_lastclose(struct drm_device *dev)
{
drm_client_dev_restore(dev);
drm_client_dev_restore(dev, false);
if (dev_is_pci(dev->dev))
vga_switcheroo_process_delayed_switch();

View File

@ -783,7 +783,6 @@ static int objects_lookup(struct drm_file *filp, u32 *handle, int count,
int drm_gem_objects_lookup(struct drm_file *filp, void __user *bo_handles,
int count, struct drm_gem_object ***objs_out)
{
struct drm_device *dev = filp->minor->dev;
struct drm_gem_object **objs;
u32 *handles;
int ret;
@ -798,20 +797,11 @@ int drm_gem_objects_lookup(struct drm_file *filp, void __user *bo_handles,
*objs_out = objs;
handles = kvmalloc_array(count, sizeof(u32), GFP_KERNEL);
if (!handles) {
ret = -ENOMEM;
goto out;
}
if (copy_from_user(handles, bo_handles, count * sizeof(u32))) {
ret = -EFAULT;
drm_dbg_core(dev, "Failed to copy in GEM handles\n");
goto out;
}
handles = vmemdup_array_user(bo_handles, count, sizeof(u32));
if (IS_ERR(handles))
return PTR_ERR(handles);
ret = objects_lookup(filp, handles, count, objs);
out:
kvfree(handles);
return ret;

View File

@ -56,6 +56,17 @@ static inline void drm_client_debugfs_init(struct drm_device *dev)
{ }
#endif
/* drm_client_sysrq.c */
#if defined(CONFIG_DRM_CLIENT) && defined(CONFIG_MAGIC_SYSRQ)
void drm_client_sysrq_register(struct drm_device *dev);
void drm_client_sysrq_unregister(struct drm_device *dev);
#else
static inline void drm_client_sysrq_register(struct drm_device *dev)
{ }
static inline void drm_client_sysrq_unregister(struct drm_device *dev)
{ }
#endif
/* drm_file.c */
extern struct mutex drm_global_mutex;
bool drm_dev_needs_global_mutex(struct drm_device *dev);

View File

@ -373,6 +373,13 @@ drm_setclientcap(struct drm_device *dev, void *data, struct drm_file *file_priv)
return -EINVAL;
file_priv->supports_virtualized_cursor_plane = req->value;
break;
case DRM_CLIENT_CAP_PLANE_COLOR_PIPELINE:
if (!file_priv->atomic)
return -EINVAL;
if (req->value > 1)
return -EINVAL;
file_priv->plane_color_pipeline = req->value;
break;
default:
return -EINVAL;
}

View File

@ -30,6 +30,7 @@
#include <drm/drm_managed.h>
#include <drm/drm_mode_config.h>
#include <drm/drm_print.h>
#include <drm/drm_colorop.h>
#include <linux/dma-resv.h>
#include "drm_crtc_internal.h"
@ -192,11 +193,15 @@ int drm_mode_getresources(struct drm_device *dev, void *data,
void drm_mode_config_reset(struct drm_device *dev)
{
struct drm_crtc *crtc;
struct drm_colorop *colorop;
struct drm_plane *plane;
struct drm_encoder *encoder;
struct drm_connector *connector;
struct drm_connector_list_iter conn_iter;
drm_for_each_colorop(colorop, dev)
drm_colorop_reset(colorop);
drm_for_each_plane(plane, dev)
if (plane->funcs->reset)
plane->funcs->reset(plane);
@ -437,6 +442,7 @@ int drmm_mode_config_init(struct drm_device *dev)
INIT_LIST_HEAD(&dev->mode_config.property_list);
INIT_LIST_HEAD(&dev->mode_config.property_blob_list);
INIT_LIST_HEAD(&dev->mode_config.plane_list);
INIT_LIST_HEAD(&dev->mode_config.colorop_list);
INIT_LIST_HEAD(&dev->mode_config.privobj_list);
idr_init_base(&dev->mode_config.object_idr, 1);
idr_init_base(&dev->mode_config.tile_idr, 1);
@ -458,6 +464,7 @@ int drmm_mode_config_init(struct drm_device *dev)
dev->mode_config.num_crtc = 0;
dev->mode_config.num_encoder = 0;
dev->mode_config.num_total_plane = 0;
dev->mode_config.num_colorop = 0;
if (IS_ENABLED(CONFIG_LOCKDEP)) {
struct drm_modeset_acquire_ctx modeset_ctx;

View File

@ -28,6 +28,7 @@
#include <drm/drm_device.h>
#include <drm/drm_file.h>
#include <drm/drm_mode_object.h>
#include <drm/drm_plane.h>
#include <drm/drm_print.h>
#include "drm_crtc_internal.h"
@ -386,6 +387,7 @@ EXPORT_SYMBOL(drm_object_property_get_default_value);
/* helper for getconnector and getproperties ioctls */
int drm_mode_object_get_properties(struct drm_mode_object *obj, bool atomic,
bool plane_color_pipeline,
uint32_t __user *prop_ptr,
uint64_t __user *prop_values,
uint32_t *arg_count_props)
@ -399,6 +401,21 @@ int drm_mode_object_get_properties(struct drm_mode_object *obj, bool atomic,
if ((prop->flags & DRM_MODE_PROP_ATOMIC) && !atomic)
continue;
if (plane_color_pipeline && obj->type == DRM_MODE_OBJECT_PLANE) {
struct drm_plane *plane = obj_to_plane(obj);
if (prop == plane->color_encoding_property ||
prop == plane->color_range_property)
continue;
}
if (!plane_color_pipeline && obj->type == DRM_MODE_OBJECT_PLANE) {
struct drm_plane *plane = obj_to_plane(obj);
if (prop == plane->color_pipeline_property)
continue;
}
if (*arg_count_props > count) {
ret = __drm_object_property_get_value(obj, prop, &val);
if (ret)
@ -457,6 +474,7 @@ int drm_mode_obj_get_properties_ioctl(struct drm_device *dev, void *data,
}
ret = drm_mode_object_get_properties(obj, file_priv->atomic,
file_priv->plane_color_pipeline,
(uint32_t __user *)(unsigned long)(arg->props_ptr),
(uint64_t __user *)(unsigned long)(arg->prop_values_ptr),
&arg->count_props);

View File

@ -1820,3 +1820,62 @@ int drm_plane_add_size_hints_property(struct drm_plane *plane,
return 0;
}
EXPORT_SYMBOL(drm_plane_add_size_hints_property);
/**
* drm_plane_create_color_pipeline_property - create a new color pipeline
* property
*
* @plane: drm plane
* @pipelines: list of pipelines
* @num_pipelines: number of pipelines
*
* Create the COLOR_PIPELINE plane property to specific color pipelines on
* the plane.
*
* RETURNS:
* Zero for success or -errno
*/
int drm_plane_create_color_pipeline_property(struct drm_plane *plane,
const struct drm_prop_enum_list *pipelines,
int num_pipelines)
{
struct drm_prop_enum_list *all_pipelines;
struct drm_property *prop;
int len = 0;
int i;
all_pipelines = kcalloc(num_pipelines + 1,
sizeof(*all_pipelines),
GFP_KERNEL);
if (!all_pipelines) {
drm_err(plane->dev, "failed to allocate color pipeline\n");
return -ENOMEM;
}
/* Create default Bypass color pipeline */
all_pipelines[len].type = 0;
all_pipelines[len].name = "Bypass";
len++;
/* Add all other color pipelines */
for (i = 0; i < num_pipelines; i++, len++) {
all_pipelines[len].type = pipelines[i].type;
all_pipelines[len].name = pipelines[i].name;
}
prop = drm_property_create_enum(plane->dev, DRM_MODE_PROP_ATOMIC,
"COLOR_PIPELINE",
all_pipelines, len);
if (IS_ERR(prop)) {
kfree(all_pipelines);
return PTR_ERR(prop);
}
drm_object_attach_property(&plane->base, prop, 0);
plane->color_pipeline_property = prop;
kfree(all_pipelines);
return 0;
}
EXPORT_SYMBOL(drm_plane_create_color_pipeline_property);

View File

@ -58,18 +58,11 @@ static int exynos_drm_fbdev_update(struct drm_fb_helper *helper,
struct drm_fb_helper_surface_size *sizes,
struct exynos_drm_gem *exynos_gem)
{
struct fb_info *fbi;
struct fb_info *fbi = helper->info;
struct drm_framebuffer *fb = helper->fb;
unsigned int size = fb->width * fb->height * fb->format->cpp[0];
unsigned long offset;
fbi = drm_fb_helper_alloc_info(helper);
if (IS_ERR(fbi)) {
DRM_DEV_ERROR(to_dma_dev(helper->dev),
"failed to allocate fb info.\n");
return PTR_ERR(fbi);
}
fbi->fbops = &exynos_drm_fb_ops;
drm_fb_helper_fill_info(fbi, helper, sizes);

View File

@ -108,7 +108,7 @@ int psb_fbdev_driver_fbdev_probe(struct drm_fb_helper *fb_helper,
struct drm_device *dev = fb_helper->dev;
struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct pci_dev *pdev = to_pci_dev(dev->dev);
struct fb_info *info;
struct fb_info *info = fb_helper->info;
struct drm_framebuffer *fb;
struct drm_mode_fb_cmd2 mode_cmd = { };
int size;
@ -167,12 +167,6 @@ int psb_fbdev_driver_fbdev_probe(struct drm_fb_helper *fb_helper,
fb_helper->funcs = &psb_fbdev_fb_helper_funcs;
fb_helper->fb = fb;
info = drm_fb_helper_alloc_info(fb_helper);
if (IS_ERR(info)) {
ret = PTR_ERR(info);
goto err_drm_framebuffer_unregister_private;
}
info->fbops = &psb_fbdev_fb_ops;
/* Accessed stolen memory directly */
@ -196,10 +190,6 @@ int psb_fbdev_driver_fbdev_probe(struct drm_fb_helper *fb_helper,
return 0;
err_drm_framebuffer_unregister_private:
drm_framebuffer_unregister_private(fb);
drm_framebuffer_cleanup(fb);
kfree(fb);
err_drm_gem_object_put:
drm_gem_object_put(obj);
return ret;

View File

@ -267,8 +267,8 @@ int intel_fbdev_driver_fbdev_probe(struct drm_fb_helper *helper,
struct intel_display *display = to_intel_display(helper->dev);
struct intel_fbdev *ifbdev = to_intel_fbdev(helper);
struct intel_framebuffer *fb = ifbdev->fb;
struct fb_info *info = helper->info;
struct ref_tracker *wakeref;
struct fb_info *info;
struct i915_vma *vma;
unsigned long flags = 0;
bool prealloc = false;
@ -318,13 +318,6 @@ int intel_fbdev_driver_fbdev_probe(struct drm_fb_helper *helper,
goto out_unlock;
}
info = drm_fb_helper_alloc_info(helper);
if (IS_ERR(info)) {
drm_err(display->drm, "Failed to allocate fb_info (%pe)\n", info);
ret = PTR_ERR(info);
goto out_unpin;
}
helper->funcs = &intel_fb_helper_funcs;
helper->fb = &fb->base;

View File

@ -91,9 +91,9 @@ int msm_fbdev_driver_fbdev_probe(struct drm_fb_helper *helper,
{
struct drm_device *dev = helper->dev;
struct msm_drm_private *priv = dev->dev_private;
struct fb_info *fbi = helper->info;
struct drm_framebuffer *fb = NULL;
struct drm_gem_object *bo;
struct fb_info *fbi = NULL;
uint64_t paddr;
uint32_t format;
int ret, pitch;
@ -126,13 +126,6 @@ int msm_fbdev_driver_fbdev_probe(struct drm_fb_helper *helper,
goto fail;
}
fbi = drm_fb_helper_alloc_info(helper);
if (IS_ERR(fbi)) {
DRM_DEV_ERROR(dev->dev, "failed to allocate fb info\n");
ret = PTR_ERR(fbi);
goto fail;
}
DBG("fbi=%p, dev=%p", fbi, dev);
helper->funcs = &msm_fbdev_helper_funcs;

View File

@ -154,9 +154,9 @@ int omap_fbdev_driver_fbdev_probe(struct drm_fb_helper *helper,
struct drm_device *dev = helper->dev;
struct omap_drm_private *priv = dev->dev_private;
struct omap_fbdev *fbdev = priv->fbdev;
struct fb_info *fbi = helper->info;
struct drm_framebuffer *fb = NULL;
union omap_gem_size gsize;
struct fb_info *fbi = NULL;
struct drm_mode_fb_cmd2 mode_cmd = {0};
struct drm_gem_object *bo;
dma_addr_t dma_addr;
@ -225,13 +225,6 @@ int omap_fbdev_driver_fbdev_probe(struct drm_fb_helper *helper,
goto fail;
}
fbi = drm_fb_helper_alloc_info(helper);
if (IS_ERR(fbi)) {
dev_err(dev->dev, "failed to allocate fb info\n");
ret = PTR_ERR(fbi);
goto fail;
}
DBG("fbi=%p, dev=%p", fbi, dev);
helper->funcs = &omap_fbdev_helper_funcs;

View File

@ -408,6 +408,19 @@ config DRM_PANEL_LG_LB035Q02
(found on the Gumstix Overo Palo35 board). To compile this driver as
a module, choose M here.
config DRM_PANEL_LG_LD070WX3
tristate "LG LD070WX3 MIPI DSI panel"
depends on OF
depends on DRM_MIPI_DSI
depends on BACKLIGHT_CLASS_DEVICE
select VIDEOMODE_HELPERS
help
Say Y here if you want to enable support for the LD070WX3 MIPI DSI
panel found in the NVIDIA Tegra Note 7 tablet.
To compile this driver as a module, choose M here: the module will
be called panel-lg-ld070wx3.
config DRM_PANEL_LG_LG4573
tristate "LG4573 RGB/SPI panel"
depends on OF && SPI
@ -881,16 +894,17 @@ config DRM_PANEL_SAMSUNG_S6E8AA5X01_AMS561RA01
DSI protocol with 4 lanes.
config DRM_PANEL_SAMSUNG_SOFEF00
tristate "Samsung sofef00/s6e3fc2x01 OnePlus 6/6T DSI cmd mode panels"
tristate "Samsung SOFEF00 DSI panel controller"
depends on OF
depends on DRM_MIPI_DSI
depends on BACKLIGHT_CLASS_DEVICE
select VIDEOMODE_HELPERS
help
Say Y or M here if you want to enable support for the Samsung AMOLED
command mode panels found in the OnePlus 6/6T smartphones.
panel SOFEF00 DDIC and connected panel.
Currently supported panels:
The panels are 2280x1080@60Hz and 2340x1080@60Hz respectively
Samsung AMS628NW01 (found in OnePlus 6, 1080x2280@60Hz)
config DRM_PANEL_SEIKO_43WVF1G
tristate "Seiko 43WVF1G panel"

View File

@ -41,6 +41,7 @@ obj-$(CONFIG_DRM_PANEL_LEADTEK_LTK050H3146W) += panel-leadtek-ltk050h3146w.o
obj-$(CONFIG_DRM_PANEL_LEADTEK_LTK500HD1829) += panel-leadtek-ltk500hd1829.o
obj-$(CONFIG_DRM_PANEL_LINCOLNTECH_LCD197) += panel-lincolntech-lcd197.o
obj-$(CONFIG_DRM_PANEL_LG_LB035Q02) += panel-lg-lb035q02.o
obj-$(CONFIG_DRM_PANEL_LG_LD070WX3) += panel-lg-ld070wx3.o
obj-$(CONFIG_DRM_PANEL_LG_LG4573) += panel-lg-lg4573.o
obj-$(CONFIG_DRM_PANEL_LG_SW43408) += panel-lg-sw43408.o
obj-$(CONFIG_DRM_PANEL_MAGNACHIP_D53E6EA8966) += panel-magnachip-d53e6ea8966.o

View File

@ -1965,6 +1965,7 @@ static const struct edp_panel_entry edp_panels[] = {
EDP_PANEL_ENTRY('B', 'O', 'E', 0x0a3e, &delay_200_500_e80_d50, "NV116WHM-N49"),
EDP_PANEL_ENTRY('B', 'O', 'E', 0x0a5d, &delay_200_500_e50, "NV116WHM-N45"),
EDP_PANEL_ENTRY('B', 'O', 'E', 0x0a6a, &delay_200_500_e80, "NV140WUM-N44"),
EDP_PANEL_ENTRY('B', 'O', 'E', 0x0a84, &delay_200_500_e50, "NV133WUM-T01"),
EDP_PANEL_ENTRY('B', 'O', 'E', 0x0ac5, &delay_200_500_e50, "NV116WHM-N4C"),
EDP_PANEL_ENTRY('B', 'O', 'E', 0x0ae8, &delay_200_500_e50_p2e80, "NV140WUM-N41"),
EDP_PANEL_ENTRY('B', 'O', 'E', 0x0b09, &delay_200_500_e50_po2e200, "NV140FHM-NZ"),

View File

@ -820,6 +820,204 @@ static const struct ili9881c_instr tl050hdv35_init[] = {
ILI9881C_COMMAND_INSTR(0xd3, 0x39),
};
static const struct ili9881c_instr w552946aaa_init[] = {
ILI9881C_SWITCH_PAGE_INSTR(3),
ILI9881C_COMMAND_INSTR(0x01, 0x00),
ILI9881C_COMMAND_INSTR(0x02, 0x00),
ILI9881C_COMMAND_INSTR(0x03, 0x53),
ILI9881C_COMMAND_INSTR(0x04, 0x53),
ILI9881C_COMMAND_INSTR(0x05, 0x13),
ILI9881C_COMMAND_INSTR(0x06, 0x04),
ILI9881C_COMMAND_INSTR(0x07, 0x02),
ILI9881C_COMMAND_INSTR(0x08, 0x02),
ILI9881C_COMMAND_INSTR(0x09, 0x00),
ILI9881C_COMMAND_INSTR(0x0a, 0x00),
ILI9881C_COMMAND_INSTR(0x0b, 0x00),
ILI9881C_COMMAND_INSTR(0x0c, 0x00),
ILI9881C_COMMAND_INSTR(0x0d, 0x00),
ILI9881C_COMMAND_INSTR(0x0e, 0x00),
ILI9881C_COMMAND_INSTR(0x0f, 0x00),
ILI9881C_COMMAND_INSTR(0x10, 0x00),
ILI9881C_COMMAND_INSTR(0x11, 0x00),
ILI9881C_COMMAND_INSTR(0x12, 0x00),
ILI9881C_COMMAND_INSTR(0x13, 0x00),
ILI9881C_COMMAND_INSTR(0x14, 0x00),
ILI9881C_COMMAND_INSTR(0x15, 0x08),
ILI9881C_COMMAND_INSTR(0x16, 0x10),
ILI9881C_COMMAND_INSTR(0x17, 0x00),
ILI9881C_COMMAND_INSTR(0x18, 0x08),
ILI9881C_COMMAND_INSTR(0x19, 0x00),
ILI9881C_COMMAND_INSTR(0x1a, 0x00),
ILI9881C_COMMAND_INSTR(0x1b, 0x00),
ILI9881C_COMMAND_INSTR(0x1c, 0x00),
ILI9881C_COMMAND_INSTR(0x1d, 0x00),
ILI9881C_COMMAND_INSTR(0x1e, 0xc0),
ILI9881C_COMMAND_INSTR(0x1f, 0x80),
ILI9881C_COMMAND_INSTR(0x20, 0x02),
ILI9881C_COMMAND_INSTR(0x21, 0x09),
ILI9881C_COMMAND_INSTR(0x22, 0x00),
ILI9881C_COMMAND_INSTR(0x23, 0x00),
ILI9881C_COMMAND_INSTR(0x24, 0x00),
ILI9881C_COMMAND_INSTR(0x25, 0x00),
ILI9881C_COMMAND_INSTR(0x26, 0x00),
ILI9881C_COMMAND_INSTR(0x27, 0x00),
ILI9881C_COMMAND_INSTR(0x28, 0x55),
ILI9881C_COMMAND_INSTR(0x29, 0x03),
ILI9881C_COMMAND_INSTR(0x2a, 0x00),
ILI9881C_COMMAND_INSTR(0x2b, 0x00),
ILI9881C_COMMAND_INSTR(0x2c, 0x00),
ILI9881C_COMMAND_INSTR(0x2d, 0x00),
ILI9881C_COMMAND_INSTR(0x2e, 0x00),
ILI9881C_COMMAND_INSTR(0x2f, 0x00),
ILI9881C_COMMAND_INSTR(0x30, 0x00),
ILI9881C_COMMAND_INSTR(0x31, 0x00),
ILI9881C_COMMAND_INSTR(0x32, 0x00),
ILI9881C_COMMAND_INSTR(0x33, 0x00),
ILI9881C_COMMAND_INSTR(0x34, 0x04),
ILI9881C_COMMAND_INSTR(0x35, 0x05),
ILI9881C_COMMAND_INSTR(0x36, 0x05),
ILI9881C_COMMAND_INSTR(0x37, 0x00),
ILI9881C_COMMAND_INSTR(0x38, 0x3c),
ILI9881C_COMMAND_INSTR(0x39, 0x35),
ILI9881C_COMMAND_INSTR(0x3a, 0x00),
ILI9881C_COMMAND_INSTR(0x3b, 0x40),
ILI9881C_COMMAND_INSTR(0x3c, 0x00),
ILI9881C_COMMAND_INSTR(0x3d, 0x00),
ILI9881C_COMMAND_INSTR(0x3e, 0x00),
ILI9881C_COMMAND_INSTR(0x3f, 0x00),
ILI9881C_COMMAND_INSTR(0x40, 0x00),
ILI9881C_COMMAND_INSTR(0x41, 0x88),
ILI9881C_COMMAND_INSTR(0x42, 0x00),
ILI9881C_COMMAND_INSTR(0x43, 0x00),
ILI9881C_COMMAND_INSTR(0x44, 0x1f),
ILI9881C_COMMAND_INSTR(0x50, 0x01),
ILI9881C_COMMAND_INSTR(0x51, 0x23),
ILI9881C_COMMAND_INSTR(0x52, 0x45),
ILI9881C_COMMAND_INSTR(0x53, 0x67),
ILI9881C_COMMAND_INSTR(0x54, 0x89),
ILI9881C_COMMAND_INSTR(0x55, 0xab),
ILI9881C_COMMAND_INSTR(0x56, 0x01),
ILI9881C_COMMAND_INSTR(0x57, 0x23),
ILI9881C_COMMAND_INSTR(0x58, 0x45),
ILI9881C_COMMAND_INSTR(0x59, 0x67),
ILI9881C_COMMAND_INSTR(0x5a, 0x89),
ILI9881C_COMMAND_INSTR(0x5b, 0xab),
ILI9881C_COMMAND_INSTR(0x5c, 0xcd),
ILI9881C_COMMAND_INSTR(0x5d, 0xef),
ILI9881C_COMMAND_INSTR(0x5e, 0x03),
ILI9881C_COMMAND_INSTR(0x5f, 0x14),
ILI9881C_COMMAND_INSTR(0x60, 0x15),
ILI9881C_COMMAND_INSTR(0x61, 0x0c),
ILI9881C_COMMAND_INSTR(0x62, 0x0d),
ILI9881C_COMMAND_INSTR(0x63, 0x0e),
ILI9881C_COMMAND_INSTR(0x64, 0x0f),
ILI9881C_COMMAND_INSTR(0x65, 0x10),
ILI9881C_COMMAND_INSTR(0x66, 0x11),
ILI9881C_COMMAND_INSTR(0x67, 0x08),
ILI9881C_COMMAND_INSTR(0x68, 0x02),
ILI9881C_COMMAND_INSTR(0x69, 0x0a),
ILI9881C_COMMAND_INSTR(0x6a, 0x02),
ILI9881C_COMMAND_INSTR(0x6b, 0x02),
ILI9881C_COMMAND_INSTR(0x6c, 0x02),
ILI9881C_COMMAND_INSTR(0x6d, 0x02),
ILI9881C_COMMAND_INSTR(0x6e, 0x02),
ILI9881C_COMMAND_INSTR(0x6f, 0x02),
ILI9881C_COMMAND_INSTR(0x70, 0x02),
ILI9881C_COMMAND_INSTR(0x71, 0x02),
ILI9881C_COMMAND_INSTR(0x72, 0x06),
ILI9881C_COMMAND_INSTR(0x73, 0x02),
ILI9881C_COMMAND_INSTR(0x74, 0x02),
ILI9881C_COMMAND_INSTR(0x75, 0x14),
ILI9881C_COMMAND_INSTR(0x76, 0x15),
ILI9881C_COMMAND_INSTR(0x77, 0x0f),
ILI9881C_COMMAND_INSTR(0x78, 0x0e),
ILI9881C_COMMAND_INSTR(0x79, 0x0d),
ILI9881C_COMMAND_INSTR(0x7a, 0x0c),
ILI9881C_COMMAND_INSTR(0x7b, 0x11),
ILI9881C_COMMAND_INSTR(0x7c, 0x10),
ILI9881C_COMMAND_INSTR(0x7d, 0x06),
ILI9881C_COMMAND_INSTR(0x7e, 0x02),
ILI9881C_COMMAND_INSTR(0x7f, 0x0a),
ILI9881C_COMMAND_INSTR(0x80, 0x02),
ILI9881C_COMMAND_INSTR(0x81, 0x02),
ILI9881C_COMMAND_INSTR(0x82, 0x02),
ILI9881C_COMMAND_INSTR(0x83, 0x02),
ILI9881C_COMMAND_INSTR(0x84, 0x02),
ILI9881C_COMMAND_INSTR(0x85, 0x02),
ILI9881C_COMMAND_INSTR(0x86, 0x02),
ILI9881C_COMMAND_INSTR(0x87, 0x02),
ILI9881C_COMMAND_INSTR(0x88, 0x08),
ILI9881C_COMMAND_INSTR(0x89, 0x02),
ILI9881C_COMMAND_INSTR(0x8a, 0x02),
ILI9881C_SWITCH_PAGE_INSTR(4),
ILI9881C_COMMAND_INSTR(0x00, 0x80),
ILI9881C_COMMAND_INSTR(0x70, 0x00),
ILI9881C_COMMAND_INSTR(0x71, 0x00),
ILI9881C_COMMAND_INSTR(0x66, 0xfe),
ILI9881C_COMMAND_INSTR(0x82, 0x15),
ILI9881C_COMMAND_INSTR(0x84, 0x15),
ILI9881C_COMMAND_INSTR(0x85, 0x15),
ILI9881C_COMMAND_INSTR(0x3a, 0x24),
ILI9881C_COMMAND_INSTR(0x32, 0xac),
ILI9881C_COMMAND_INSTR(0x8c, 0x80),
ILI9881C_COMMAND_INSTR(0x3c, 0xf5),
ILI9881C_COMMAND_INSTR(0x88, 0x33),
ILI9881C_SWITCH_PAGE_INSTR(1),
ILI9881C_COMMAND_INSTR(0x22, 0x0a),
ILI9881C_COMMAND_INSTR(0x31, 0x00),
ILI9881C_COMMAND_INSTR(0x53, 0x78),
ILI9881C_COMMAND_INSTR(0x55, 0x7b),
ILI9881C_COMMAND_INSTR(0x60, 0x20),
ILI9881C_COMMAND_INSTR(0x61, 0x00),
ILI9881C_COMMAND_INSTR(0x62, 0x0d),
ILI9881C_COMMAND_INSTR(0x63, 0x00),
ILI9881C_COMMAND_INSTR(0xa0, 0x00),
ILI9881C_COMMAND_INSTR(0xa1, 0x10),
ILI9881C_COMMAND_INSTR(0xa2, 0x1c),
ILI9881C_COMMAND_INSTR(0xa3, 0x13),
ILI9881C_COMMAND_INSTR(0xa4, 0x15),
ILI9881C_COMMAND_INSTR(0xa5, 0x26),
ILI9881C_COMMAND_INSTR(0xa6, 0x1a),
ILI9881C_COMMAND_INSTR(0xa7, 0x1d),
ILI9881C_COMMAND_INSTR(0xa8, 0x67),
ILI9881C_COMMAND_INSTR(0xa9, 0x1c),
ILI9881C_COMMAND_INSTR(0xaa, 0x29),
ILI9881C_COMMAND_INSTR(0xab, 0x5b),
ILI9881C_COMMAND_INSTR(0xac, 0x26),
ILI9881C_COMMAND_INSTR(0xad, 0x28),
ILI9881C_COMMAND_INSTR(0xae, 0x5c),
ILI9881C_COMMAND_INSTR(0xaf, 0x30),
ILI9881C_COMMAND_INSTR(0xb0, 0x31),
ILI9881C_COMMAND_INSTR(0xb1, 0x32),
ILI9881C_COMMAND_INSTR(0xb2, 0x00),
ILI9881C_COMMAND_INSTR(0xb1, 0x2e),
ILI9881C_COMMAND_INSTR(0xb2, 0x32),
ILI9881C_COMMAND_INSTR(0xb3, 0x00),
ILI9881C_COMMAND_INSTR(0xb6, 0x02),
ILI9881C_COMMAND_INSTR(0xb7, 0x03),
ILI9881C_COMMAND_INSTR(0xc0, 0x00),
ILI9881C_COMMAND_INSTR(0xc1, 0x10),
ILI9881C_COMMAND_INSTR(0xc2, 0x1c),
ILI9881C_COMMAND_INSTR(0xc3, 0x13),
ILI9881C_COMMAND_INSTR(0xc4, 0x15),
ILI9881C_COMMAND_INSTR(0xc5, 0x26),
ILI9881C_COMMAND_INSTR(0xc6, 0x1a),
ILI9881C_COMMAND_INSTR(0xc7, 0x1d),
ILI9881C_COMMAND_INSTR(0xc8, 0x67),
ILI9881C_COMMAND_INSTR(0xc9, 0x1c),
ILI9881C_COMMAND_INSTR(0xca, 0x29),
ILI9881C_COMMAND_INSTR(0xcb, 0x5b),
ILI9881C_COMMAND_INSTR(0xcc, 0x26),
ILI9881C_COMMAND_INSTR(0xcd, 0x28),
ILI9881C_COMMAND_INSTR(0xce, 0x5c),
ILI9881C_COMMAND_INSTR(0xcf, 0x30),
ILI9881C_COMMAND_INSTR(0xd0, 0x31),
ILI9881C_COMMAND_INSTR(0xd1, 0x2e),
ILI9881C_COMMAND_INSTR(0xd2, 0x32),
ILI9881C_COMMAND_INSTR(0xd3, 0x00),
ILI9881C_SWITCH_PAGE_INSTR(0),
};
static const struct ili9881c_instr w552946ab_init[] = {
ILI9881C_SWITCH_PAGE_INSTR(3),
ILI9881C_COMMAND_INSTR(0x01, 0x00),
@ -1960,6 +2158,23 @@ static const struct drm_display_mode tl050hdv35_default_mode = {
.height_mm = 110,
};
static const struct drm_display_mode w552946aaa_default_mode = {
.clock = 65000,
.hdisplay = 720,
.hsync_start = 720 + 52,
.hsync_end = 720 + 52 + 8,
.htotal = 720 + 52 + 8 + 48,
.vdisplay = 1280,
.vsync_start = 1280 + 16,
.vsync_end = 1280 + 16 + 6,
.vtotal = 1280 + 16 + 6 + 15,
.width_mm = 68,
.height_mm = 121,
};
static const struct drm_display_mode w552946aba_default_mode = {
.clock = 64000,
@ -2188,6 +2403,15 @@ static const struct ili9881c_desc tl050hdv35_desc = {
.default_address_mode = 0x03,
};
static const struct ili9881c_desc w552946aaa_desc = {
.init = w552946aaa_init,
.init_length = ARRAY_SIZE(w552946aaa_init),
.mode = &w552946aaa_default_mode,
.mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST |
MIPI_DSI_MODE_LPM | MIPI_DSI_MODE_NO_EOT_PACKET,
.lanes = 2,
};
static const struct ili9881c_desc w552946aba_desc = {
.init = w552946ab_init,
.init_length = ARRAY_SIZE(w552946ab_init),
@ -2236,6 +2460,7 @@ static const struct of_device_id ili9881c_of_match[] = {
{ .compatible = "feixin,k101-im2byl02", .data = &k101_im2byl02_desc },
{ .compatible = "startek,kd050hdfia020", .data = &kd050hdfia020_desc },
{ .compatible = "tdo,tl050hdv35", .data = &tl050hdv35_desc },
{ .compatible = "wanchanglong,w552946aaa", .data = &w552946aaa_desc },
{ .compatible = "wanchanglong,w552946aba", .data = &w552946aba_desc },
{ .compatible = "ampire,am8001280g", .data = &am8001280g_desc },
{ .compatible = "raspberrypi,dsi-5inch", &rpi_5inch_desc },

View File

@ -1132,22 +1132,19 @@ static int jadard_dsi_probe(struct mipi_dsi_device *dsi)
dsi->lanes = desc->lanes;
jadard->reset = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH);
if (IS_ERR(jadard->reset)) {
DRM_DEV_ERROR(&dsi->dev, "failed to get our reset GPIO\n");
return PTR_ERR(jadard->reset);
}
if (IS_ERR(jadard->reset))
return dev_err_probe(&dsi->dev, PTR_ERR(jadard->reset),
"failed to get our reset GPIO\n");
jadard->vdd = devm_regulator_get(dev, "vdd");
if (IS_ERR(jadard->vdd)) {
DRM_DEV_ERROR(&dsi->dev, "failed to get vdd regulator\n");
return PTR_ERR(jadard->vdd);
}
if (IS_ERR(jadard->vdd))
return dev_err_probe(&dsi->dev, PTR_ERR(jadard->vdd),
"failed to get vdd regulator\n");
jadard->vccio = devm_regulator_get(dev, "vccio");
if (IS_ERR(jadard->vccio)) {
DRM_DEV_ERROR(&dsi->dev, "failed to get vccio regulator\n");
return PTR_ERR(jadard->vccio);
}
if (IS_ERR(jadard->vccio))
return dev_err_probe(&dsi->dev, PTR_ERR(jadard->vccio),
"failed to get vccio regulator\n");
ret = of_drm_get_panel_orientation(dev->of_node, &jadard->orientation);
if (ret < 0)

View File

@ -0,0 +1,184 @@
// SPDX-License-Identifier: GPL-2.0-only
#include <linux/array_size.h>
#include <linux/delay.h>
#include <linux/err.h>
#include <linux/gpio/consumer.h>
#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/property.h>
#include <linux/regulator/consumer.h>
#include <video/mipi_display.h>
#include <drm/drm_mipi_dsi.h>
#include <drm/drm_modes.h>
#include <drm/drm_panel.h>
#include <drm/drm_probe_helper.h>
static const struct regulator_bulk_data lg_ld070wx3_supplies[] = {
{ .supply = "vdd" }, { .supply = "vcc" },
};
struct lg_ld070wx3 {
struct drm_panel panel;
struct mipi_dsi_device *dsi;
struct regulator_bulk_data *supplies;
};
static inline struct lg_ld070wx3 *to_lg_ld070wx3(struct drm_panel *panel)
{
return container_of(panel, struct lg_ld070wx3, panel);
}
static int lg_ld070wx3_prepare(struct drm_panel *panel)
{
struct lg_ld070wx3 *priv = to_lg_ld070wx3(panel);
struct mipi_dsi_multi_context ctx = { .dsi = priv->dsi };
struct device *dev = panel->dev;
int ret;
ret = regulator_bulk_enable(ARRAY_SIZE(lg_ld070wx3_supplies), priv->supplies);
if (ret < 0) {
dev_err(dev, "failed to enable power supplies: %d\n", ret);
return ret;
}
/*
* According to spec delay between enabling supply is 0,
* for regulators to reach required voltage ~5ms needed.
* MIPI interface signal for setup requires additional
* 110ms which in total results in 115ms.
*/
mdelay(115);
mipi_dsi_dcs_soft_reset_multi(&ctx);
mipi_dsi_msleep(&ctx, 20);
/* Differential input impedance selection */
mipi_dsi_dcs_write_seq_multi(&ctx, 0xae, 0x0b);
/* Enter test mode 1 and 2*/
mipi_dsi_dcs_write_seq_multi(&ctx, 0xee, 0xea);
mipi_dsi_dcs_write_seq_multi(&ctx, 0xef, 0x5f);
/* Increased MIPI CLK driving ability */
mipi_dsi_dcs_write_seq_multi(&ctx, 0xf2, 0x68);
/* Exit test mode 1 and 2 */
mipi_dsi_dcs_write_seq_multi(&ctx, 0xee, 0x00);
mipi_dsi_dcs_write_seq_multi(&ctx, 0xef, 0x00);
return ctx.accum_err;
}
static int lg_ld070wx3_unprepare(struct drm_panel *panel)
{
struct lg_ld070wx3 *priv = to_lg_ld070wx3(panel);
struct mipi_dsi_multi_context ctx = { .dsi = priv->dsi };
mipi_dsi_dcs_enter_sleep_mode_multi(&ctx);
msleep(50);
regulator_bulk_disable(ARRAY_SIZE(lg_ld070wx3_supplies), priv->supplies);
/* power supply must be off for at least 1s after panel disable */
msleep(1000);
return 0;
}
static const struct drm_display_mode lg_ld070wx3_mode = {
.clock = (800 + 32 + 48 + 8) * (1280 + 5 + 3 + 1) * 60 / 1000,
.hdisplay = 800,
.hsync_start = 800 + 32,
.hsync_end = 800 + 32 + 48,
.htotal = 800 + 32 + 48 + 8,
.vdisplay = 1280,
.vsync_start = 1280 + 5,
.vsync_end = 1280 + 5 + 3,
.vtotal = 1280 + 5 + 3 + 1,
.width_mm = 94,
.height_mm = 151,
.type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED,
};
static int lg_ld070wx3_get_modes(struct drm_panel *panel,
struct drm_connector *connector)
{
return drm_connector_helper_get_modes_fixed(connector, &lg_ld070wx3_mode);
}
static const struct drm_panel_funcs lg_ld070wx3_panel_funcs = {
.prepare = lg_ld070wx3_prepare,
.unprepare = lg_ld070wx3_unprepare,
.get_modes = lg_ld070wx3_get_modes,
};
static int lg_ld070wx3_probe(struct mipi_dsi_device *dsi)
{
struct device *dev = &dsi->dev;
struct lg_ld070wx3 *priv;
int ret;
priv = devm_drm_panel_alloc(dev, struct lg_ld070wx3, panel,
&lg_ld070wx3_panel_funcs,
DRM_MODE_CONNECTOR_DSI);
if (IS_ERR(priv))
return PTR_ERR(priv);
ret = devm_regulator_bulk_get_const(dev, ARRAY_SIZE(lg_ld070wx3_supplies),
lg_ld070wx3_supplies, &priv->supplies);
if (ret < 0)
return dev_err_probe(dev, ret, "failed to get supplies\n");
priv->dsi = dsi;
mipi_dsi_set_drvdata(dsi, priv);
dsi->lanes = 4;
dsi->format = MIPI_DSI_FMT_RGB888;
dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_LPM;
ret = drm_panel_of_backlight(&priv->panel);
if (ret < 0)
return dev_err_probe(dev, ret, "failed to get backlight\n");
drm_panel_add(&priv->panel);
ret = devm_mipi_dsi_attach(dev, dsi);
if (ret < 0) {
drm_panel_remove(&priv->panel);
return dev_err_probe(dev, ret, "failed to attach to DSI host\n");
}
return 0;
}
static void lg_ld070wx3_remove(struct mipi_dsi_device *dsi)
{
struct lg_ld070wx3 *priv = mipi_dsi_get_drvdata(dsi);
drm_panel_remove(&priv->panel);
}
static const struct of_device_id lg_ld070wx3_of_match[] = {
{ .compatible = "lg,ld070wx3-sl01" },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, lg_ld070wx3_of_match);
static struct mipi_dsi_driver lg_ld070wx3_driver = {
.driver = {
.name = "panel-lg-ld070wx3",
.of_match_table = lg_ld070wx3_of_match,
},
.probe = lg_ld070wx3_probe,
.remove = lg_ld070wx3_remove,
};
module_mipi_dsi_driver(lg_ld070wx3_driver);
MODULE_AUTHOR("Svyatoslav Ryhel <clamor95@gmail.com>");
MODULE_DESCRIPTION("LG LD070WX3-SL01 DSI panel driver");
MODULE_LICENSE("GPL");

View File

@ -54,9 +54,9 @@ static int rb070d30_panel_prepare(struct drm_panel *panel)
}
msleep(20);
gpiod_set_value(ctx->gpios.power, 1);
gpiod_set_value_cansleep(ctx->gpios.power, 1);
msleep(20);
gpiod_set_value(ctx->gpios.reset, 1);
gpiod_set_value_cansleep(ctx->gpios.reset, 1);
msleep(20);
return 0;
}
@ -65,8 +65,8 @@ static int rb070d30_panel_unprepare(struct drm_panel *panel)
{
struct rb070d30_panel *ctx = panel_to_rb070d30_panel(panel);
gpiod_set_value(ctx->gpios.reset, 0);
gpiod_set_value(ctx->gpios.power, 0);
gpiod_set_value_cansleep(ctx->gpios.reset, 0);
gpiod_set_value_cansleep(ctx->gpios.power, 0);
regulator_disable(ctx->supply);
return 0;

View File

@ -16,20 +16,32 @@
#include <drm/drm_mipi_dsi.h>
#include <drm/drm_modes.h>
#include <drm/drm_panel.h>
#include <drm/drm_probe_helper.h>
struct sofef00_panel {
struct drm_panel panel;
struct mipi_dsi_device *dsi;
struct regulator *supply;
struct regulator_bulk_data *supplies;
struct gpio_desc *reset_gpio;
};
static const struct regulator_bulk_data sofef00_supplies[] = {
{ .supply = "vddio" },
{ .supply = "vci" },
{ .supply = "poc" },
};
static inline
struct sofef00_panel *to_sofef00_panel(struct drm_panel *panel)
{
return container_of(panel, struct sofef00_panel, panel);
}
#define sofef00_test_key_on_lvl2(ctx) \
mipi_dsi_dcs_write_seq_multi(ctx, 0xf0, 0x5a, 0x5a)
#define sofef00_test_key_off_lvl2(ctx) \
mipi_dsi_dcs_write_seq_multi(ctx, 0xf0, 0xa5, 0xa5)
static void sofef00_panel_reset(struct sofef00_panel *ctx)
{
gpiod_set_value_cansleep(ctx->reset_gpio, 0);
@ -50,18 +62,26 @@ static int sofef00_panel_on(struct sofef00_panel *ctx)
mipi_dsi_dcs_exit_sleep_mode_multi(&dsi_ctx);
mipi_dsi_usleep_range(&dsi_ctx, 10000, 11000);
mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xf0, 0x5a, 0x5a);
sofef00_test_key_on_lvl2(&dsi_ctx);
mipi_dsi_dcs_set_tear_on_multi(&dsi_ctx, MIPI_DSI_DCS_TEAR_MODE_VBLANK);
sofef00_test_key_off_lvl2(&dsi_ctx);
mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xf0, 0xa5, 0xa5);
mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xf0, 0x5a, 0x5a);
sofef00_test_key_on_lvl2(&dsi_ctx);
mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xb0, 0x07);
mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xb6, 0x12);
mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xf0, 0xa5, 0xa5);
sofef00_test_key_off_lvl2(&dsi_ctx);
mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MIPI_DCS_WRITE_CONTROL_DISPLAY, 0x20);
mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MIPI_DCS_WRITE_POWER_SAVE, 0x00);
return dsi_ctx.accum_err;
}
static int sofef00_enable(struct drm_panel *panel)
{
struct sofef00_panel *ctx = to_sofef00_panel(panel);
struct mipi_dsi_multi_context dsi_ctx = { .dsi = ctx->dsi };
mipi_dsi_dcs_set_display_on_multi(&dsi_ctx);
return dsi_ctx.accum_err;
@ -72,8 +92,6 @@ static int sofef00_panel_off(struct sofef00_panel *ctx)
struct mipi_dsi_device *dsi = ctx->dsi;
struct mipi_dsi_multi_context dsi_ctx = { .dsi = dsi };
dsi->mode_flags &= ~MIPI_DSI_MODE_LPM;
mipi_dsi_dcs_set_display_off_multi(&dsi_ctx);
mipi_dsi_msleep(&dsi_ctx, 40);
@ -86,70 +104,70 @@ static int sofef00_panel_off(struct sofef00_panel *ctx)
static int sofef00_panel_prepare(struct drm_panel *panel)
{
struct sofef00_panel *ctx = to_sofef00_panel(panel);
struct device *dev = &ctx->dsi->dev;
int ret;
ret = regulator_enable(ctx->supply);
if (ret < 0) {
dev_err(dev, "Failed to enable regulator: %d\n", ret);
ret = regulator_bulk_enable(ARRAY_SIZE(sofef00_supplies), ctx->supplies);
if (ret < 0)
return ret;
}
sofef00_panel_reset(ctx);
ret = sofef00_panel_on(ctx);
if (ret < 0) {
gpiod_set_value_cansleep(ctx->reset_gpio, 1);
regulator_bulk_disable(ARRAY_SIZE(sofef00_supplies), ctx->supplies);
return ret;
}
return 0;
}
static int sofef00_disable(struct drm_panel *panel)
{
struct sofef00_panel *ctx = to_sofef00_panel(panel);
sofef00_panel_off(ctx);
return 0;
}
static int sofef00_panel_unprepare(struct drm_panel *panel)
{
struct sofef00_panel *ctx = to_sofef00_panel(panel);
sofef00_panel_off(ctx);
regulator_disable(ctx->supply);
regulator_bulk_disable(ARRAY_SIZE(sofef00_supplies), ctx->supplies);
return 0;
}
static const struct drm_display_mode enchilada_panel_mode = {
static const struct drm_display_mode ams628nw01_panel_mode = {
.clock = (1080 + 112 + 16 + 36) * (2280 + 36 + 8 + 12) * 60 / 1000,
.hdisplay = 1080,
.hsync_start = 1080 + 112,
.hsync_end = 1080 + 112 + 16,
.htotal = 1080 + 112 + 16 + 36,
.vdisplay = 2280,
.vsync_start = 2280 + 36,
.vsync_end = 2280 + 36 + 8,
.vtotal = 2280 + 36 + 8 + 12,
.width_mm = 68,
.height_mm = 145,
.type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED,
};
static int sofef00_panel_get_modes(struct drm_panel *panel, struct drm_connector *connector)
{
struct drm_display_mode *mode;
mode = drm_mode_duplicate(connector->dev, &enchilada_panel_mode);
if (!mode)
return -ENOMEM;
drm_mode_set_name(mode);
mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
connector->display_info.width_mm = mode->width_mm;
connector->display_info.height_mm = mode->height_mm;
drm_mode_probed_add(connector, mode);
return 1;
return drm_connector_helper_get_modes_fixed(connector, &ams628nw01_panel_mode);
}
static const struct drm_panel_funcs sofef00_panel_panel_funcs = {
.prepare = sofef00_panel_prepare,
.enable = sofef00_enable,
.disable = sofef00_disable,
.unprepare = sofef00_panel_unprepare,
.get_modes = sofef00_panel_get_modes,
};
@ -160,10 +178,14 @@ static int sofef00_panel_bl_update_status(struct backlight_device *bl)
int err;
u16 brightness = (u16)backlight_get_brightness(bl);
dsi->mode_flags &= ~MIPI_DSI_MODE_LPM;
err = mipi_dsi_dcs_set_display_brightness_large(dsi, brightness);
if (err < 0)
return err;
dsi->mode_flags |= MIPI_DSI_MODE_LPM;
return 0;
}
@ -177,7 +199,7 @@ sofef00_create_backlight(struct mipi_dsi_device *dsi)
struct device *dev = &dsi->dev;
const struct backlight_properties props = {
.type = BACKLIGHT_PLATFORM,
.brightness = 1023,
.brightness = 512,
.max_brightness = 1023,
};
@ -197,10 +219,12 @@ static int sofef00_panel_probe(struct mipi_dsi_device *dsi)
if (IS_ERR(ctx))
return PTR_ERR(ctx);
ctx->supply = devm_regulator_get(dev, "vddio");
if (IS_ERR(ctx->supply))
return dev_err_probe(dev, PTR_ERR(ctx->supply),
"Failed to get vddio regulator\n");
ret = devm_regulator_bulk_get_const(dev,
ARRAY_SIZE(sofef00_supplies),
sofef00_supplies,
&ctx->supplies);
if (ret)
return dev_err_probe(dev, ret, "Failed to get regulators\n");
ctx->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH);
if (IS_ERR(ctx->reset_gpio))
@ -212,6 +236,10 @@ static int sofef00_panel_probe(struct mipi_dsi_device *dsi)
dsi->lanes = 4;
dsi->format = MIPI_DSI_FMT_RGB888;
dsi->mode_flags = MIPI_DSI_MODE_VIDEO_BURST |
MIPI_DSI_CLOCK_NON_CONTINUOUS | MIPI_DSI_MODE_LPM;
ctx->panel.prepare_prev_first = true;
ctx->panel.backlight = sofef00_create_backlight(dsi);
if (IS_ERR(ctx->panel.backlight))
@ -243,7 +271,8 @@ static void sofef00_panel_remove(struct mipi_dsi_device *dsi)
}
static const struct of_device_id sofef00_panel_of_match[] = {
{ .compatible = "samsung,sofef00" },
{ .compatible = "samsung,sofef00" }, /* legacy */
{ .compatible = "samsung,sofef00-ams628nw01" },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, sofef00_panel_of_match);
@ -252,7 +281,7 @@ static struct mipi_dsi_driver sofef00_panel_driver = {
.probe = sofef00_panel_probe,
.remove = sofef00_panel_remove,
.driver = {
.name = "panel-oneplus6",
.name = "panel-samsung-sofef00",
.of_match_table = sofef00_panel_of_match,
},
};
@ -260,5 +289,5 @@ static struct mipi_dsi_driver sofef00_panel_driver = {
module_mipi_dsi_driver(sofef00_panel_driver);
MODULE_AUTHOR("Casey Connolly <casey.connolly@linaro.org>");
MODULE_DESCRIPTION("DRM driver for Samsung AMOLED DSI panels found in OnePlus 6/6T phones");
MODULE_DESCRIPTION("DRM driver for Samsung SOFEF00 DDIC");
MODULE_LICENSE("GPL v2");

View File

@ -4106,6 +4106,30 @@ static const struct panel_desc qishenglong_gopher2b_lcd = {
.connector_type = DRM_MODE_CONNECTOR_DPI,
};
static const struct display_timing raystar_rff500f_awh_dnn_timing = {
.pixelclock = { 23000000, 25000000, 27000000 },
.hactive = { 800, 800, 800 },
.hback_porch = { 4, 8, 48 },
.hfront_porch = { 4, 8, 48 },
.hsync_len = { 2, 4, 8 },
.vactive = { 480, 480, 480 },
.vback_porch = { 4, 8, 12 },
.vfront_porch = { 4, 8, 12 },
.vsync_len = { 2, 4, 8 },
};
static const struct panel_desc raystar_rff500f_awh_dnn = {
.timings = &raystar_rff500f_awh_dnn_timing,
.num_timings = 1,
.bpc = 8,
.size = {
.width = 108,
.height = 65,
},
.bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG,
.connector_type = DRM_MODE_CONNECTOR_LVDS,
};
static const struct display_timing rocktech_rk043fn48h_timing = {
.pixelclock = { 6000000, 9000000, 12000000 },
.hactive = { 480, 480, 480 },
@ -4223,6 +4247,37 @@ static const struct panel_desc samsung_ltl101al01 = {
.connector_type = DRM_MODE_CONNECTOR_LVDS,
};
static const struct display_timing samsung_ltl106al01_timing = {
.pixelclock = { 71980000, 71980000, 71980000 },
.hactive = { 1366, 1366, 1366 },
.hfront_porch = { 56, 56, 56 },
.hback_porch = { 106, 106, 106 },
.hsync_len = { 14, 14, 14 },
.vactive = { 768, 768, 768 },
.vfront_porch = { 3, 3, 3 },
.vback_porch = { 6, 6, 6 },
.vsync_len = { 1, 1, 1 },
.flags = DISPLAY_FLAGS_HSYNC_LOW | DISPLAY_FLAGS_VSYNC_LOW,
};
static const struct panel_desc samsung_ltl106al01 = {
.timings = &samsung_ltl106al01_timing,
.num_timings = 1,
.bpc = 8,
.size = {
.width = 235,
.height = 132,
},
.delay = {
.prepare = 5,
.enable = 10,
.disable = 10,
.unprepare = 5,
},
.bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG,
.connector_type = DRM_MODE_CONNECTOR_LVDS,
};
static const struct drm_display_mode samsung_ltn101nt05_mode = {
.clock = 54030,
.hdisplay = 1024,
@ -5378,6 +5433,9 @@ static const struct of_device_id platform_of_match[] = {
}, {
.compatible = "qishenglong,gopher2b-lcd",
.data = &qishenglong_gopher2b_lcd,
}, {
.compatible = "raystar,rff500f-awh-dnn",
.data = &raystar_rff500f_awh_dnn,
}, {
.compatible = "rocktech,rk043fn48h",
.data = &rocktech_rk043fn48h,
@ -5390,6 +5448,9 @@ static const struct of_device_id platform_of_match[] = {
}, {
.compatible = "samsung,ltl101al01",
.data = &samsung_ltl101al01,
}, {
.compatible = "samsung,ltl106al01",
.data = &samsung_ltl106al01,
}, {
.compatible = "samsung,ltn101nt05",
.data = &samsung_ltn101nt05,
@ -5600,34 +5661,6 @@ static const struct panel_desc_dsi boe_tv080wum_nl0 = {
.lanes = 4,
};
static const struct drm_display_mode lg_ld070wx3_sl01_mode = {
.clock = 71000,
.hdisplay = 800,
.hsync_start = 800 + 32,
.hsync_end = 800 + 32 + 1,
.htotal = 800 + 32 + 1 + 57,
.vdisplay = 1280,
.vsync_start = 1280 + 28,
.vsync_end = 1280 + 28 + 1,
.vtotal = 1280 + 28 + 1 + 14,
};
static const struct panel_desc_dsi lg_ld070wx3_sl01 = {
.desc = {
.modes = &lg_ld070wx3_sl01_mode,
.num_modes = 1,
.bpc = 8,
.size = {
.width = 94,
.height = 151,
},
.connector_type = DRM_MODE_CONNECTOR_DSI,
},
.flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_CLOCK_NON_CONTINUOUS,
.format = MIPI_DSI_FMT_RGB888,
.lanes = 4,
};
static const struct drm_display_mode lg_lh500wx1_sd03_mode = {
.clock = 67000,
.hdisplay = 720,
@ -5751,9 +5784,6 @@ static const struct of_device_id dsi_of_match[] = {
}, {
.compatible = "boe,tv080wum-nl0",
.data = &boe_tv080wum_nl0
}, {
.compatible = "lg,ld070wx3-sl01",
.data = &lg_ld070wx3_sl01
}, {
.compatible = "lg,lh500wx1-sd03",
.data = &lg_lh500wx1_sd03

View File

@ -10,6 +10,7 @@ panthor-y := \
panthor_heap.o \
panthor_hw.o \
panthor_mmu.o \
panthor_pwr.o \
panthor_sched.o
obj-$(CONFIG_DRM_PANTHOR) += panthor.o

View File

@ -21,6 +21,7 @@
#include "panthor_gpu.h"
#include "panthor_hw.h"
#include "panthor_mmu.h"
#include "panthor_pwr.h"
#include "panthor_regs.h"
#include "panthor_sched.h"
@ -113,6 +114,7 @@ void panthor_device_unplug(struct panthor_device *ptdev)
panthor_fw_unplug(ptdev);
panthor_mmu_unplug(ptdev);
panthor_gpu_unplug(ptdev);
panthor_pwr_unplug(ptdev);
pm_runtime_dont_use_autosuspend(ptdev->base.dev);
pm_runtime_put_sync_suspend(ptdev->base.dev);
@ -152,8 +154,8 @@ static void panthor_device_reset_work(struct work_struct *work)
panthor_sched_pre_reset(ptdev);
panthor_fw_pre_reset(ptdev, true);
panthor_mmu_pre_reset(ptdev);
panthor_gpu_soft_reset(ptdev);
panthor_gpu_l2_power_on(ptdev);
panthor_hw_soft_reset(ptdev);
panthor_hw_l2_power_on(ptdev);
panthor_mmu_post_reset(ptdev);
ret = panthor_fw_post_reset(ptdev);
atomic_set(&ptdev->reset.pending, 0);
@ -268,10 +270,14 @@ int panthor_device_init(struct panthor_device *ptdev)
if (ret)
goto err_rpm_put;
ret = panthor_gpu_init(ptdev);
ret = panthor_pwr_init(ptdev);
if (ret)
goto err_rpm_put;
ret = panthor_gpu_init(ptdev);
if (ret)
goto err_unplug_pwr;
ret = panthor_gpu_coherency_init(ptdev);
if (ret)
goto err_unplug_gpu;
@ -312,6 +318,9 @@ int panthor_device_init(struct panthor_device *ptdev)
err_unplug_gpu:
panthor_gpu_unplug(ptdev);
err_unplug_pwr:
panthor_pwr_unplug(ptdev);
err_rpm_put:
pm_runtime_put_sync_suspend(ptdev->base.dev);
return ret;
@ -465,6 +474,7 @@ static int panthor_device_resume_hw_components(struct panthor_device *ptdev)
{
int ret;
panthor_pwr_resume(ptdev);
panthor_gpu_resume(ptdev);
panthor_mmu_resume(ptdev);
@ -474,6 +484,7 @@ static int panthor_device_resume_hw_components(struct panthor_device *ptdev)
panthor_mmu_suspend(ptdev);
panthor_gpu_suspend(ptdev);
panthor_pwr_suspend(ptdev);
return ret;
}
@ -587,6 +598,7 @@ int panthor_device_suspend(struct device *dev)
panthor_fw_suspend(ptdev);
panthor_mmu_suspend(ptdev);
panthor_gpu_suspend(ptdev);
panthor_pwr_suspend(ptdev);
drm_dev_exit(cookie);
}

View File

@ -24,10 +24,12 @@ struct panthor_device;
struct panthor_gpu;
struct panthor_group_pool;
struct panthor_heap_pool;
struct panthor_hw;
struct panthor_job;
struct panthor_mmu;
struct panthor_fw;
struct panthor_perfcnt;
struct panthor_pwr;
struct panthor_vm;
struct panthor_vm_pool;
@ -134,6 +136,12 @@ struct panthor_device {
/** @csif_info: Command stream interface information. */
struct drm_panthor_csif_info csif_info;
/** @hw: GPU-specific data. */
struct panthor_hw *hw;
/** @pwr: Power control management data. */
struct panthor_pwr *pwr;
/** @gpu: GPU management data. */
struct panthor_gpu *gpu;

View File

@ -22,6 +22,7 @@
#include "panthor_fw.h"
#include "panthor_gem.h"
#include "panthor_gpu.h"
#include "panthor_hw.h"
#include "panthor_mmu.h"
#include "panthor_regs.h"
#include "panthor_sched.h"
@ -33,6 +34,7 @@
#define PROGRESS_TIMEOUT_SCALE_SHIFT 10
#define IDLE_HYSTERESIS_US 800
#define PWROFF_HYSTERESIS_US 10000
#define MCU_HALT_TIMEOUT_US (1ULL * USEC_PER_SEC)
/**
* struct panthor_fw_binary_hdr - Firmware binary header.
@ -317,6 +319,49 @@ panthor_fw_get_cs_iface(struct panthor_device *ptdev, u32 csg_slot, u32 cs_slot)
return &ptdev->fw->iface.streams[csg_slot][cs_slot];
}
static bool panthor_fw_has_glb_state(struct panthor_device *ptdev)
{
struct panthor_fw_global_iface *glb_iface = panthor_fw_get_glb_iface(ptdev);
return glb_iface->control->version >= CSF_IFACE_VERSION(4, 1, 0);
}
static bool panthor_fw_has_64bit_ep_req(struct panthor_device *ptdev)
{
struct panthor_fw_global_iface *glb_iface = panthor_fw_get_glb_iface(ptdev);
return glb_iface->control->version >= CSF_IFACE_VERSION(4, 0, 0);
}
u64 panthor_fw_csg_endpoint_req_get(struct panthor_device *ptdev,
struct panthor_fw_csg_iface *csg_iface)
{
if (panthor_fw_has_64bit_ep_req(ptdev))
return csg_iface->input->endpoint_req2;
else
return csg_iface->input->endpoint_req;
}
void panthor_fw_csg_endpoint_req_set(struct panthor_device *ptdev,
struct panthor_fw_csg_iface *csg_iface, u64 value)
{
if (panthor_fw_has_64bit_ep_req(ptdev))
csg_iface->input->endpoint_req2 = value;
else
csg_iface->input->endpoint_req = lower_32_bits(value);
}
void panthor_fw_csg_endpoint_req_update(struct panthor_device *ptdev,
struct panthor_fw_csg_iface *csg_iface, u64 value,
u64 mask)
{
if (panthor_fw_has_64bit_ep_req(ptdev))
panthor_fw_update_reqs64(csg_iface, endpoint_req2, value, mask);
else
panthor_fw_update_reqs(csg_iface, endpoint_req, lower_32_bits(value),
lower_32_bits(mask));
}
/**
* panthor_fw_conv_timeout() - Convert a timeout into a cycle-count
* @ptdev: Device.
@ -996,6 +1041,9 @@ static void panthor_fw_init_global_iface(struct panthor_device *ptdev)
GLB_IDLE_EN |
GLB_IDLE;
if (panthor_fw_has_glb_state(ptdev))
glb_iface->input->ack_irq_mask |= GLB_STATE_MASK;
panthor_fw_update_reqs(glb_iface, req, GLB_IDLE_EN, GLB_IDLE_EN);
panthor_fw_toggle_reqs(glb_iface, req, ack,
GLB_CFG_ALLOC_EN |
@ -1069,6 +1117,54 @@ static void panthor_fw_stop(struct panthor_device *ptdev)
drm_err(&ptdev->base, "Failed to stop MCU");
}
static bool panthor_fw_mcu_halted(struct panthor_device *ptdev)
{
struct panthor_fw_global_iface *glb_iface = panthor_fw_get_glb_iface(ptdev);
bool halted;
halted = gpu_read(ptdev, MCU_STATUS) == MCU_STATUS_HALT;
if (panthor_fw_has_glb_state(ptdev))
halted &= (GLB_STATE_GET(glb_iface->output->ack) == GLB_STATE_HALT);
return halted;
}
static void panthor_fw_halt_mcu(struct panthor_device *ptdev)
{
struct panthor_fw_global_iface *glb_iface = panthor_fw_get_glb_iface(ptdev);
if (panthor_fw_has_glb_state(ptdev))
panthor_fw_update_reqs(glb_iface, req, GLB_STATE(GLB_STATE_HALT), GLB_STATE_MASK);
else
panthor_fw_update_reqs(glb_iface, req, GLB_HALT, GLB_HALT);
gpu_write(ptdev, CSF_DOORBELL(CSF_GLB_DOORBELL_ID), 1);
}
static bool panthor_fw_wait_mcu_halted(struct panthor_device *ptdev)
{
bool halted = false;
if (read_poll_timeout_atomic(panthor_fw_mcu_halted, halted, halted, 10,
MCU_HALT_TIMEOUT_US, 0, ptdev)) {
drm_warn(&ptdev->base, "Timed out waiting for MCU to halt");
return false;
}
return true;
}
static void panthor_fw_mcu_set_active(struct panthor_device *ptdev)
{
struct panthor_fw_global_iface *glb_iface = panthor_fw_get_glb_iface(ptdev);
if (panthor_fw_has_glb_state(ptdev))
panthor_fw_update_reqs(glb_iface, req, GLB_STATE(GLB_STATE_ACTIVE), GLB_STATE_MASK);
else
panthor_fw_update_reqs(glb_iface, req, 0, GLB_HALT);
}
/**
* panthor_fw_pre_reset() - Call before a reset.
* @ptdev: Device.
@ -1085,19 +1181,13 @@ void panthor_fw_pre_reset(struct panthor_device *ptdev, bool on_hang)
ptdev->reset.fast = false;
if (!on_hang) {
struct panthor_fw_global_iface *glb_iface = panthor_fw_get_glb_iface(ptdev);
u32 status;
panthor_fw_update_reqs(glb_iface, req, GLB_HALT, GLB_HALT);
gpu_write(ptdev, CSF_DOORBELL(CSF_GLB_DOORBELL_ID), 1);
if (!gpu_read_poll_timeout(ptdev, MCU_STATUS, status,
status == MCU_STATUS_HALT, 10,
100000)) {
ptdev->reset.fast = true;
} else {
panthor_fw_halt_mcu(ptdev);
if (!panthor_fw_wait_mcu_halted(ptdev))
drm_warn(&ptdev->base, "Failed to cleanly suspend MCU");
}
else
ptdev->reset.fast = true;
}
panthor_fw_stop(ptdev);
panthor_job_irq_suspend(&ptdev->fw->irq);
panthor_fw_stop(ptdev);
@ -1126,14 +1216,14 @@ int panthor_fw_post_reset(struct panthor_device *ptdev)
*/
panthor_reload_fw_sections(ptdev, true);
} else {
/* The FW detects 0 -> 1 transitions. Make sure we reset
* the HALT bit before the FW is rebooted.
/*
* If the FW was previously successfully halted in the pre-reset
* operation, we need to transition it to active again before
* the FW is rebooted.
* This is not needed on a slow reset because FW sections are
* re-initialized.
*/
struct panthor_fw_global_iface *glb_iface = panthor_fw_get_glb_iface(ptdev);
panthor_fw_update_reqs(glb_iface, req, 0, GLB_HALT);
panthor_fw_mcu_set_active(ptdev);
}
ret = panthor_fw_start(ptdev);
@ -1171,6 +1261,10 @@ void panthor_fw_unplug(struct panthor_device *ptdev)
if (ptdev->fw->irq.irq)
panthor_job_irq_suspend(&ptdev->fw->irq);
panthor_fw_halt_mcu(ptdev);
if (!panthor_fw_wait_mcu_halted(ptdev))
drm_warn(&ptdev->base, "Failed to halt MCU on unplug");
panthor_fw_stop(ptdev);
}
@ -1186,7 +1280,7 @@ void panthor_fw_unplug(struct panthor_device *ptdev)
ptdev->fw->vm = NULL;
if (!IS_ENABLED(CONFIG_PM) || pm_runtime_active(ptdev->base.dev))
panthor_gpu_power_off(ptdev, L2, ptdev->gpu_info.l2_present, 20000);
panthor_hw_l2_power_off(ptdev);
}
/**
@ -1365,7 +1459,7 @@ int panthor_fw_init(struct panthor_device *ptdev)
return ret;
}
ret = panthor_gpu_l2_power_on(ptdev);
ret = panthor_hw_l2_power_on(ptdev);
if (ret)
return ret;
@ -1409,3 +1503,4 @@ MODULE_FIRMWARE("arm/mali/arch10.12/mali_csffw.bin");
MODULE_FIRMWARE("arm/mali/arch11.8/mali_csffw.bin");
MODULE_FIRMWARE("arm/mali/arch12.8/mali_csffw.bin");
MODULE_FIRMWARE("arm/mali/arch13.8/mali_csffw.bin");
MODULE_FIRMWARE("arm/mali/arch14.8/mali_csffw.bin");

View File

@ -167,10 +167,11 @@ struct panthor_fw_csg_input_iface {
#define CSG_EP_REQ_TILER(x) (((x) << 16) & GENMASK(19, 16))
#define CSG_EP_REQ_EXCL_COMPUTE BIT(20)
#define CSG_EP_REQ_EXCL_FRAGMENT BIT(21)
#define CSG_EP_REQ_PRIORITY(x) (((x) << 28) & GENMASK(31, 28))
#define CSG_EP_REQ_PRIORITY_MASK GENMASK(31, 28)
#define CSG_EP_REQ_PRIORITY(x) (((x) << 28) & CSG_EP_REQ_PRIORITY_MASK)
#define CSG_EP_REQ_PRIORITY_GET(x) (((x) & CSG_EP_REQ_PRIORITY_MASK) >> 28)
u32 endpoint_req;
u32 reserved2[2];
u64 endpoint_req2;
u64 suspend_buf;
u64 protm_suspend_buf;
u32 config;
@ -214,6 +215,13 @@ struct panthor_fw_global_input_iface {
#define GLB_FWCFG_UPDATE BIT(9)
#define GLB_IDLE_EN BIT(10)
#define GLB_SLEEP BIT(12)
#define GLB_STATE_MASK GENMASK(14, 12)
#define GLB_STATE_ACTIVE 0
#define GLB_STATE_HALT 1
#define GLB_STATE_SLEEP 2
#define GLB_STATE_SUSPEND 3
#define GLB_STATE(x) (((x) << 12) & GLB_STATE_MASK)
#define GLB_STATE_GET(x) (((x) & GLB_STATE_MASK) >> 12)
#define GLB_INACTIVE_COMPUTE BIT(20)
#define GLB_INACTIVE_FRAGMENT BIT(21)
#define GLB_INACTIVE_TILER BIT(22)
@ -457,6 +465,16 @@ struct panthor_fw_global_iface {
spin_unlock(&(__iface)->lock); \
} while (0)
#define panthor_fw_update_reqs64(__iface, __in_reg, __val, __mask) \
do { \
u64 __cur_val, __new_val; \
spin_lock(&(__iface)->lock); \
__cur_val = READ_ONCE((__iface)->input->__in_reg); \
__new_val = (__cur_val & ~(__mask)) | ((__val) & (__mask)); \
WRITE_ONCE((__iface)->input->__in_reg, __new_val); \
spin_unlock(&(__iface)->lock); \
} while (0)
struct panthor_fw_global_iface *
panthor_fw_get_glb_iface(struct panthor_device *ptdev);
@ -466,6 +484,16 @@ panthor_fw_get_csg_iface(struct panthor_device *ptdev, u32 csg_slot);
struct panthor_fw_cs_iface *
panthor_fw_get_cs_iface(struct panthor_device *ptdev, u32 csg_slot, u32 cs_slot);
u64 panthor_fw_csg_endpoint_req_get(struct panthor_device *ptdev,
struct panthor_fw_csg_iface *csg_iface);
void panthor_fw_csg_endpoint_req_set(struct panthor_device *ptdev,
struct panthor_fw_csg_iface *csg_iface, u64 value);
void panthor_fw_csg_endpoint_req_update(struct panthor_device *ptdev,
struct panthor_fw_csg_iface *csg_iface, u64 value,
u64 mask);
int panthor_fw_csg_wait_acks(struct panthor_device *ptdev, u32 csg_id, u32 req_mask,
u32 *acked, u32 timeout_ms);

View File

@ -145,6 +145,9 @@ panthor_kernel_bo_create(struct panthor_device *ptdev, struct panthor_vm *vm,
bo = to_panthor_bo(&obj->base);
kbo->obj = &obj->base;
bo->flags = bo_flags;
bo->exclusive_vm_root_gem = panthor_vm_root_gem(vm);
drm_gem_object_get(bo->exclusive_vm_root_gem);
bo->base.base.resv = bo->exclusive_vm_root_gem->resv;
if (vm == panthor_fw_vm(ptdev))
debug_flags |= PANTHOR_DEBUGFS_GEM_USAGE_FLAG_FW_MAPPED;
@ -168,9 +171,6 @@ panthor_kernel_bo_create(struct panthor_device *ptdev, struct panthor_vm *vm,
goto err_free_va;
kbo->vm = panthor_vm_get(vm);
bo->exclusive_vm_root_gem = panthor_vm_root_gem(vm);
drm_gem_object_get(bo->exclusive_vm_root_gem);
bo->base.base.resv = bo->exclusive_vm_root_gem->resv;
return kbo;
err_free_va:

View File

@ -19,6 +19,7 @@
#include "panthor_device.h"
#include "panthor_gpu.h"
#include "panthor_hw.h"
#include "panthor_regs.h"
/**
@ -241,6 +242,11 @@ int panthor_gpu_block_power_on(struct panthor_device *ptdev,
return 0;
}
void panthor_gpu_l2_power_off(struct panthor_device *ptdev)
{
panthor_gpu_power_off(ptdev, L2, ptdev->gpu_info.l2_present, 20000);
}
/**
* panthor_gpu_l2_power_on() - Power-on the L2-cache
* @ptdev: Device.
@ -368,9 +374,9 @@ void panthor_gpu_suspend(struct panthor_device *ptdev)
{
/* On a fast reset, simply power down the L2. */
if (!ptdev->reset.fast)
panthor_gpu_soft_reset(ptdev);
panthor_hw_soft_reset(ptdev);
else
panthor_gpu_power_off(ptdev, L2, 1, 20000);
panthor_hw_l2_power_off(ptdev);
panthor_gpu_irq_suspend(&ptdev->gpu->irq);
}
@ -385,6 +391,6 @@ void panthor_gpu_suspend(struct panthor_device *ptdev)
void panthor_gpu_resume(struct panthor_device *ptdev)
{
panthor_gpu_irq_resume(&ptdev->gpu->irq, GPU_INTERRUPTS_MASK);
panthor_gpu_l2_power_on(ptdev);
panthor_hw_l2_power_on(ptdev);
}

View File

@ -46,6 +46,7 @@ int panthor_gpu_block_power_off(struct panthor_device *ptdev,
type ## _PWRTRANS, \
mask, timeout_us)
void panthor_gpu_l2_power_off(struct panthor_device *ptdev);
int panthor_gpu_l2_power_on(struct panthor_device *ptdev);
int panthor_gpu_flush_caches(struct panthor_device *ptdev,
u32 l2, u32 lsc, u32 other);

View File

@ -4,12 +4,55 @@
#include <drm/drm_print.h>
#include "panthor_device.h"
#include "panthor_gpu.h"
#include "panthor_hw.h"
#include "panthor_pwr.h"
#include "panthor_regs.h"
#define GPU_PROD_ID_MAKE(arch_major, prod_major) \
(((arch_major) << 24) | (prod_major))
/** struct panthor_hw_entry - HW arch major to panthor_hw binding entry */
struct panthor_hw_entry {
/** @arch_min: Minimum supported architecture major value (inclusive) */
u8 arch_min;
/** @arch_max: Maximum supported architecture major value (inclusive) */
u8 arch_max;
/** @hwdev: Pointer to panthor_hw structure */
struct panthor_hw *hwdev;
};
static struct panthor_hw panthor_hw_arch_v10 = {
.ops = {
.soft_reset = panthor_gpu_soft_reset,
.l2_power_off = panthor_gpu_l2_power_off,
.l2_power_on = panthor_gpu_l2_power_on,
},
};
static struct panthor_hw panthor_hw_arch_v14 = {
.ops = {
.soft_reset = panthor_pwr_reset_soft,
.l2_power_off = panthor_pwr_l2_power_off,
.l2_power_on = panthor_pwr_l2_power_on,
},
};
static struct panthor_hw_entry panthor_hw_match[] = {
{
.arch_min = 10,
.arch_max = 13,
.hwdev = &panthor_hw_arch_v10,
},
{
.arch_min = 14,
.arch_max = 14,
.hwdev = &panthor_hw_arch_v14,
},
};
static char *get_gpu_model_name(struct panthor_device *ptdev)
{
const u32 gpu_id = ptdev->gpu_info.gpu_id;
@ -55,6 +98,12 @@ static char *get_gpu_model_name(struct panthor_device *ptdev)
fallthrough;
case GPU_PROD_ID_MAKE(13, 1):
return "Mali-G625";
case GPU_PROD_ID_MAKE(14, 0):
return "Mali-G1-Ultra";
case GPU_PROD_ID_MAKE(14, 1):
return "Mali-G1-Premium";
case GPU_PROD_ID_MAKE(14, 3):
return "Mali-G1-Pro";
}
return "(Unknown Mali GPU)";
@ -64,7 +113,6 @@ static void panthor_gpu_info_init(struct panthor_device *ptdev)
{
unsigned int i;
ptdev->gpu_info.gpu_id = gpu_read(ptdev, GPU_ID);
ptdev->gpu_info.csf_id = gpu_read(ptdev, GPU_CSF_ID);
ptdev->gpu_info.gpu_rev = gpu_read(ptdev, GPU_REVID);
ptdev->gpu_info.core_features = gpu_read(ptdev, GPU_CORE_FEATURES);
@ -82,12 +130,19 @@ static void panthor_gpu_info_init(struct panthor_device *ptdev)
ptdev->gpu_info.as_present = gpu_read(ptdev, GPU_AS_PRESENT);
ptdev->gpu_info.shader_present = gpu_read64(ptdev, GPU_SHADER_PRESENT);
ptdev->gpu_info.tiler_present = gpu_read64(ptdev, GPU_TILER_PRESENT);
ptdev->gpu_info.l2_present = gpu_read64(ptdev, GPU_L2_PRESENT);
/* Introduced in arch 11.x */
ptdev->gpu_info.gpu_features = gpu_read64(ptdev, GPU_FEATURES);
if (panthor_hw_has_pwr_ctrl(ptdev)) {
/* Introduced in arch 14.x */
ptdev->gpu_info.l2_present = gpu_read64(ptdev, PWR_L2_PRESENT);
ptdev->gpu_info.tiler_present = gpu_read64(ptdev, PWR_TILER_PRESENT);
ptdev->gpu_info.shader_present = gpu_read64(ptdev, PWR_SHADER_PRESENT);
} else {
ptdev->gpu_info.shader_present = gpu_read64(ptdev, GPU_SHADER_PRESENT);
ptdev->gpu_info.tiler_present = gpu_read64(ptdev, GPU_TILER_PRESENT);
ptdev->gpu_info.l2_present = gpu_read64(ptdev, GPU_L2_PRESENT);
}
}
static void panthor_hw_info_init(struct panthor_device *ptdev)
@ -119,8 +174,50 @@ static void panthor_hw_info_init(struct panthor_device *ptdev)
ptdev->gpu_info.tiler_present);
}
static int panthor_hw_bind_device(struct panthor_device *ptdev)
{
struct panthor_hw *hdev = NULL;
const u32 arch_major = GPU_ARCH_MAJOR(ptdev->gpu_info.gpu_id);
int i = 0;
for (i = 0; i < ARRAY_SIZE(panthor_hw_match); i++) {
struct panthor_hw_entry *entry = &panthor_hw_match[i];
if (arch_major >= entry->arch_min && arch_major <= entry->arch_max) {
hdev = entry->hwdev;
break;
}
}
if (!hdev)
return -EOPNOTSUPP;
ptdev->hw = hdev;
return 0;
}
static int panthor_hw_gpu_id_init(struct panthor_device *ptdev)
{
ptdev->gpu_info.gpu_id = gpu_read(ptdev, GPU_ID);
if (!ptdev->gpu_info.gpu_id)
return -ENXIO;
return 0;
}
int panthor_hw_init(struct panthor_device *ptdev)
{
int ret = 0;
ret = panthor_hw_gpu_id_init(ptdev);
if (ret)
return ret;
ret = panthor_hw_bind_device(ptdev);
if (ret)
return ret;
panthor_hw_info_init(ptdev);
return 0;

View File

@ -4,8 +4,53 @@
#ifndef __PANTHOR_HW_H__
#define __PANTHOR_HW_H__
struct panthor_device;
#include "panthor_device.h"
#include "panthor_regs.h"
/**
* struct panthor_hw_ops - HW operations that are specific to a GPU
*/
struct panthor_hw_ops {
/** @soft_reset: Soft reset function pointer */
int (*soft_reset)(struct panthor_device *ptdev);
/** @l2_power_off: L2 power off function pointer */
void (*l2_power_off)(struct panthor_device *ptdev);
/** @l2_power_on: L2 power on function pointer */
int (*l2_power_on)(struct panthor_device *ptdev);
};
/**
* struct panthor_hw - GPU specific register mapping and functions
*/
struct panthor_hw {
/** @features: Bitmap containing panthor_hw_feature */
/** @ops: Panthor HW specific operations */
struct panthor_hw_ops ops;
};
int panthor_hw_init(struct panthor_device *ptdev);
static inline int panthor_hw_soft_reset(struct panthor_device *ptdev)
{
return ptdev->hw->ops.soft_reset(ptdev);
}
static inline int panthor_hw_l2_power_on(struct panthor_device *ptdev)
{
return ptdev->hw->ops.l2_power_on(ptdev);
}
static inline void panthor_hw_l2_power_off(struct panthor_device *ptdev)
{
ptdev->hw->ops.l2_power_off(ptdev);
}
static inline bool panthor_hw_has_pwr_ctrl(struct panthor_device *ptdev)
{
return GPU_ARCH_MAJOR(ptdev->gpu_info.gpu_id) >= 14;
}
#endif /* __PANTHOR_HW_H__ */

View File

@ -904,10 +904,9 @@ static int panthor_vm_unmap_pages(struct panthor_vm *vm, u64 iova, u64 size)
{
struct panthor_device *ptdev = vm->ptdev;
struct io_pgtable_ops *ops = vm->pgtbl_ops;
u64 start_iova = iova;
u64 offset = 0;
drm_dbg(&ptdev->base, "unmap: as=%d, iova=%llx, len=%llx", vm->as.id, iova, size);
while (offset < size) {
size_t unmapped_sz = 0, pgcount;
size_t pgsize = get_pgsize(iova + offset, size - offset, &pgcount);
@ -922,6 +921,12 @@ static int panthor_vm_unmap_pages(struct panthor_vm *vm, u64 iova, u64 size)
panthor_vm_flush_range(vm, iova, offset + unmapped_sz);
return -EINVAL;
}
drm_dbg(&ptdev->base,
"unmap: as=%d, iova=0x%llx, sz=%llu, va=0x%llx, pgcnt=%zu, pgsz=%zu",
vm->as.id, start_iova, size, iova + offset,
unmapped_sz / pgsize, pgsize);
offset += unmapped_sz;
}
@ -937,6 +942,7 @@ panthor_vm_map_pages(struct panthor_vm *vm, u64 iova, int prot,
struct scatterlist *sgl;
struct io_pgtable_ops *ops = vm->pgtbl_ops;
u64 start_iova = iova;
u64 start_size = size;
int ret;
if (!size)
@ -956,15 +962,18 @@ panthor_vm_map_pages(struct panthor_vm *vm, u64 iova, int prot,
len = min_t(size_t, len, size);
size -= len;
drm_dbg(&ptdev->base, "map: as=%d, iova=%llx, paddr=%pad, len=%zx",
vm->as.id, iova, &paddr, len);
while (len) {
size_t pgcount, mapped = 0;
size_t pgsize = get_pgsize(iova | paddr, len, &pgcount);
ret = ops->map_pages(ops, iova, paddr, pgsize, pgcount, prot,
GFP_KERNEL, &mapped);
drm_dbg(&ptdev->base,
"map: as=%d, iova=0x%llx, sz=%llu, va=0x%llx, pa=%pad, pgcnt=%zu, pgsz=%zu",
vm->as.id, start_iova, start_size, iova, &paddr,
mapped / pgsize, pgsize);
iova += mapped;
paddr += mapped;
len -= mapped;

View File

@ -0,0 +1,549 @@
// SPDX-License-Identifier: GPL-2.0 or MIT
/* Copyright 2025 ARM Limited. All rights reserved. */
#include <linux/platform_device.h>
#include <linux/interrupt.h>
#include <linux/cleanup.h>
#include <linux/iopoll.h>
#include <linux/wait.h>
#include <drm/drm_managed.h>
#include <drm/drm_print.h>
#include "panthor_device.h"
#include "panthor_hw.h"
#include "panthor_pwr.h"
#include "panthor_regs.h"
#define PWR_INTERRUPTS_MASK \
(PWR_IRQ_POWER_CHANGED_SINGLE | \
PWR_IRQ_POWER_CHANGED_ALL | \
PWR_IRQ_DELEGATION_CHANGED | \
PWR_IRQ_RESET_COMPLETED | \
PWR_IRQ_RETRACT_COMPLETED | \
PWR_IRQ_INSPECT_COMPLETED | \
PWR_IRQ_COMMAND_NOT_ALLOWED | \
PWR_IRQ_COMMAND_INVALID)
#define PWR_ALL_CORES_MASK GENMASK_U64(63, 0)
#define PWR_DOMAIN_MAX_BITS 16
#define PWR_TRANSITION_TIMEOUT_US (2ULL * USEC_PER_SEC)
#define PWR_RETRACT_TIMEOUT_US (2ULL * USEC_PER_MSEC)
#define PWR_RESET_TIMEOUT_MS 500
/**
* struct panthor_pwr - PWR_CONTROL block management data.
*/
struct panthor_pwr {
/** @irq: PWR irq. */
struct panthor_irq irq;
/** @reqs_lock: Lock protecting access to pending_reqs. */
spinlock_t reqs_lock;
/** @pending_reqs: Pending PWR requests. */
u32 pending_reqs;
/** @reqs_acked: PWR request wait queue. */
wait_queue_head_t reqs_acked;
};
static void panthor_pwr_irq_handler(struct panthor_device *ptdev, u32 status)
{
spin_lock(&ptdev->pwr->reqs_lock);
gpu_write(ptdev, PWR_INT_CLEAR, status);
if (unlikely(status & PWR_IRQ_COMMAND_NOT_ALLOWED))
drm_err(&ptdev->base, "PWR_IRQ: COMMAND_NOT_ALLOWED");
if (unlikely(status & PWR_IRQ_COMMAND_INVALID))
drm_err(&ptdev->base, "PWR_IRQ: COMMAND_INVALID");
if (status & ptdev->pwr->pending_reqs) {
ptdev->pwr->pending_reqs &= ~status;
wake_up_all(&ptdev->pwr->reqs_acked);
}
spin_unlock(&ptdev->pwr->reqs_lock);
}
PANTHOR_IRQ_HANDLER(pwr, PWR, panthor_pwr_irq_handler);
static void panthor_pwr_write_command(struct panthor_device *ptdev, u32 command, u64 args)
{
if (args)
gpu_write64(ptdev, PWR_CMDARG, args);
gpu_write(ptdev, PWR_COMMAND, command);
}
static bool reset_irq_raised(struct panthor_device *ptdev)
{
return gpu_read(ptdev, PWR_INT_RAWSTAT) & PWR_IRQ_RESET_COMPLETED;
}
static bool reset_pending(struct panthor_device *ptdev)
{
return (ptdev->pwr->pending_reqs & PWR_IRQ_RESET_COMPLETED);
}
static int panthor_pwr_reset(struct panthor_device *ptdev, u32 reset_cmd)
{
scoped_guard(spinlock_irqsave, &ptdev->pwr->reqs_lock) {
if (reset_pending(ptdev)) {
drm_WARN(&ptdev->base, 1, "Reset already pending");
} else {
ptdev->pwr->pending_reqs |= PWR_IRQ_RESET_COMPLETED;
gpu_write(ptdev, PWR_INT_CLEAR, PWR_IRQ_RESET_COMPLETED);
panthor_pwr_write_command(ptdev, reset_cmd, 0);
}
}
if (!wait_event_timeout(ptdev->pwr->reqs_acked, !reset_pending(ptdev),
msecs_to_jiffies(PWR_RESET_TIMEOUT_MS))) {
guard(spinlock_irqsave)(&ptdev->pwr->reqs_lock);
if (reset_pending(ptdev) && !reset_irq_raised(ptdev)) {
drm_err(&ptdev->base, "RESET timed out (0x%x)", reset_cmd);
return -ETIMEDOUT;
}
ptdev->pwr->pending_reqs &= ~PWR_IRQ_RESET_COMPLETED;
}
return 0;
}
static const char *get_domain_name(u8 domain)
{
switch (domain) {
case PWR_COMMAND_DOMAIN_L2:
return "L2";
case PWR_COMMAND_DOMAIN_TILER:
return "Tiler";
case PWR_COMMAND_DOMAIN_SHADER:
return "Shader";
case PWR_COMMAND_DOMAIN_BASE:
return "Base";
case PWR_COMMAND_DOMAIN_STACK:
return "Stack";
}
return "Unknown";
}
static u32 get_domain_base(u8 domain)
{
switch (domain) {
case PWR_COMMAND_DOMAIN_L2:
return PWR_L2_PRESENT;
case PWR_COMMAND_DOMAIN_TILER:
return PWR_TILER_PRESENT;
case PWR_COMMAND_DOMAIN_SHADER:
return PWR_SHADER_PRESENT;
case PWR_COMMAND_DOMAIN_BASE:
return PWR_BASE_PRESENT;
case PWR_COMMAND_DOMAIN_STACK:
return PWR_STACK_PRESENT;
}
return 0;
}
static u32 get_domain_ready_reg(u32 domain)
{
return get_domain_base(domain) + (PWR_L2_READY - PWR_L2_PRESENT);
}
static u32 get_domain_pwrtrans_reg(u32 domain)
{
return get_domain_base(domain) + (PWR_L2_PWRTRANS - PWR_L2_PRESENT);
}
static bool is_valid_domain(u32 domain)
{
return get_domain_base(domain) != 0;
}
static bool has_rtu(struct panthor_device *ptdev)
{
return ptdev->gpu_info.gpu_features & GPU_FEATURES_RAY_TRAVERSAL;
}
static u8 get_domain_subdomain(struct panthor_device *ptdev, u32 domain)
{
if (domain == PWR_COMMAND_DOMAIN_SHADER && has_rtu(ptdev))
return PWR_COMMAND_SUBDOMAIN_RTU;
return 0;
}
static int panthor_pwr_domain_wait_transition(struct panthor_device *ptdev, u32 domain,
u32 timeout_us)
{
u32 pwrtrans_reg = get_domain_pwrtrans_reg(domain);
u64 val;
int ret = 0;
ret = gpu_read64_poll_timeout(ptdev, pwrtrans_reg, val, !(PWR_ALL_CORES_MASK & val), 100,
timeout_us);
if (ret) {
drm_err(&ptdev->base, "%s domain power in transition, pwrtrans(0x%llx)",
get_domain_name(domain), val);
return ret;
}
return 0;
}
static void panthor_pwr_debug_info_show(struct panthor_device *ptdev)
{
drm_info(&ptdev->base, "GPU_FEATURES: 0x%016llx", gpu_read64(ptdev, GPU_FEATURES));
drm_info(&ptdev->base, "PWR_STATUS: 0x%016llx", gpu_read64(ptdev, PWR_STATUS));
drm_info(&ptdev->base, "L2_PRESENT: 0x%016llx", gpu_read64(ptdev, PWR_L2_PRESENT));
drm_info(&ptdev->base, "L2_PWRTRANS: 0x%016llx", gpu_read64(ptdev, PWR_L2_PWRTRANS));
drm_info(&ptdev->base, "L2_READY: 0x%016llx", gpu_read64(ptdev, PWR_L2_READY));
drm_info(&ptdev->base, "TILER_PRESENT: 0x%016llx", gpu_read64(ptdev, PWR_TILER_PRESENT));
drm_info(&ptdev->base, "TILER_PWRTRANS: 0x%016llx", gpu_read64(ptdev, PWR_TILER_PWRTRANS));
drm_info(&ptdev->base, "TILER_READY: 0x%016llx", gpu_read64(ptdev, PWR_TILER_READY));
drm_info(&ptdev->base, "SHADER_PRESENT: 0x%016llx", gpu_read64(ptdev, PWR_SHADER_PRESENT));
drm_info(&ptdev->base, "SHADER_PWRTRANS: 0x%016llx", gpu_read64(ptdev, PWR_SHADER_PWRTRANS));
drm_info(&ptdev->base, "SHADER_READY: 0x%016llx", gpu_read64(ptdev, PWR_SHADER_READY));
}
static int panthor_pwr_domain_transition(struct panthor_device *ptdev, u32 cmd, u32 domain,
u64 mask, u32 timeout_us)
{
u32 ready_reg = get_domain_ready_reg(domain);
u32 pwr_cmd = PWR_COMMAND_DEF(cmd, domain, get_domain_subdomain(ptdev, domain));
u64 expected_val = 0;
u64 val;
int ret = 0;
if (drm_WARN_ON(&ptdev->base, !is_valid_domain(domain)))
return -EINVAL;
switch (cmd) {
case PWR_COMMAND_POWER_DOWN:
expected_val = 0;
break;
case PWR_COMMAND_POWER_UP:
expected_val = mask;
break;
default:
drm_err(&ptdev->base, "Invalid power domain transition command (0x%x)", cmd);
return -EINVAL;
}
ret = panthor_pwr_domain_wait_transition(ptdev, domain, timeout_us);
if (ret)
return ret;
/* domain already in target state, return early */
if ((gpu_read64(ptdev, ready_reg) & mask) == expected_val)
return 0;
panthor_pwr_write_command(ptdev, pwr_cmd, mask);
ret = gpu_read64_poll_timeout(ptdev, ready_reg, val, (mask & val) == expected_val, 100,
timeout_us);
if (ret) {
drm_err(&ptdev->base,
"timeout waiting on %s power domain transition, cmd(0x%x), arg(0x%llx)",
get_domain_name(domain), pwr_cmd, mask);
panthor_pwr_debug_info_show(ptdev);
return ret;
}
return 0;
}
#define panthor_pwr_domain_power_off(__ptdev, __domain, __mask, __timeout_us) \
panthor_pwr_domain_transition(__ptdev, PWR_COMMAND_POWER_DOWN, __domain, __mask, \
__timeout_us)
#define panthor_pwr_domain_power_on(__ptdev, __domain, __mask, __timeout_us) \
panthor_pwr_domain_transition(__ptdev, PWR_COMMAND_POWER_UP, __domain, __mask, __timeout_us)
/**
* retract_domain() - Retract control of a domain from MCU
* @ptdev: Device.
* @domain: Domain to retract the control
*
* Retracting L2 domain is not expected since it won't be delegated.
*
* Return: 0 on success or retracted already.
* -EPERM if domain is L2.
* A negative error code otherwise.
*/
static int retract_domain(struct panthor_device *ptdev, u32 domain)
{
const u32 pwr_cmd = PWR_COMMAND_DEF(PWR_COMMAND_RETRACT, domain, 0);
const u64 pwr_status = gpu_read64(ptdev, PWR_STATUS);
const u64 delegated_mask = PWR_STATUS_DOMAIN_DELEGATED(domain);
const u64 allow_mask = PWR_STATUS_DOMAIN_ALLOWED(domain);
u64 val;
int ret;
if (drm_WARN_ON(&ptdev->base, domain == PWR_COMMAND_DOMAIN_L2))
return -EPERM;
ret = gpu_read64_poll_timeout(ptdev, PWR_STATUS, val, !(PWR_STATUS_RETRACT_PENDING & val),
0, PWR_RETRACT_TIMEOUT_US);
if (ret) {
drm_err(&ptdev->base, "%s domain retract pending", get_domain_name(domain));
return ret;
}
if (!(pwr_status & delegated_mask)) {
drm_dbg(&ptdev->base, "%s domain already retracted", get_domain_name(domain));
return 0;
}
panthor_pwr_write_command(ptdev, pwr_cmd, 0);
/*
* On successful retraction
* allow-flag will be set with delegated-flag being cleared.
*/
ret = gpu_read64_poll_timeout(ptdev, PWR_STATUS, val,
((delegated_mask | allow_mask) & val) == allow_mask, 10,
PWR_TRANSITION_TIMEOUT_US);
if (ret) {
drm_err(&ptdev->base, "Retracting %s domain timeout, cmd(0x%x)",
get_domain_name(domain), pwr_cmd);
return ret;
}
return 0;
}
/**
* delegate_domain() - Delegate control of a domain to MCU
* @ptdev: Device.
* @domain: Domain to delegate the control
*
* Delegating L2 domain is prohibited.
*
* Return:
* * 0 on success or delegated already.
* * -EPERM if domain is L2.
* * A negative error code otherwise.
*/
static int delegate_domain(struct panthor_device *ptdev, u32 domain)
{
const u32 pwr_cmd = PWR_COMMAND_DEF(PWR_COMMAND_DELEGATE, domain, 0);
const u64 pwr_status = gpu_read64(ptdev, PWR_STATUS);
const u64 allow_mask = PWR_STATUS_DOMAIN_ALLOWED(domain);
const u64 delegated_mask = PWR_STATUS_DOMAIN_DELEGATED(domain);
u64 val;
int ret;
if (drm_WARN_ON(&ptdev->base, domain == PWR_COMMAND_DOMAIN_L2))
return -EPERM;
/* Already delegated, exit early */
if (pwr_status & delegated_mask)
return 0;
/* Check if the command is allowed before delegating. */
if (!(pwr_status & allow_mask)) {
drm_warn(&ptdev->base, "Delegating %s domain not allowed", get_domain_name(domain));
return -EPERM;
}
ret = panthor_pwr_domain_wait_transition(ptdev, domain, PWR_TRANSITION_TIMEOUT_US);
if (ret)
return ret;
panthor_pwr_write_command(ptdev, pwr_cmd, 0);
/*
* On successful delegation
* allow-flag will be cleared with delegated-flag being set.
*/
ret = gpu_read64_poll_timeout(ptdev, PWR_STATUS, val,
((delegated_mask | allow_mask) & val) == delegated_mask,
10, PWR_TRANSITION_TIMEOUT_US);
if (ret) {
drm_err(&ptdev->base, "Delegating %s domain timeout, cmd(0x%x)",
get_domain_name(domain), pwr_cmd);
return ret;
}
return 0;
}
static int panthor_pwr_delegate_domains(struct panthor_device *ptdev)
{
int ret;
if (!ptdev->pwr)
return 0;
ret = delegate_domain(ptdev, PWR_COMMAND_DOMAIN_SHADER);
if (ret)
return ret;
ret = delegate_domain(ptdev, PWR_COMMAND_DOMAIN_TILER);
if (ret)
goto err_retract_shader;
return 0;
err_retract_shader:
retract_domain(ptdev, PWR_COMMAND_DOMAIN_SHADER);
return ret;
}
/**
* panthor_pwr_domain_force_off - Forcefully power down a domain.
* @ptdev: Device.
* @domain: Domain to forcefully power down.
*
* This function will attempt to retract and power off the requested power
* domain. However, if retraction fails, the operation is aborted. If power off
* fails, the domain will remain retracted and under the host control.
*
* Return: 0 on success or a negative error code on failure.
*/
static int panthor_pwr_domain_force_off(struct panthor_device *ptdev, u32 domain)
{
const u64 domain_ready = gpu_read64(ptdev, get_domain_ready_reg(domain));
int ret;
/* Domain already powered down, early exit. */
if (!domain_ready)
return 0;
/* Domain has to be in host control to issue power off command. */
ret = retract_domain(ptdev, domain);
if (ret)
return ret;
return panthor_pwr_domain_power_off(ptdev, domain, domain_ready, PWR_TRANSITION_TIMEOUT_US);
}
void panthor_pwr_unplug(struct panthor_device *ptdev)
{
unsigned long flags;
if (!ptdev->pwr)
return;
/* Make sure the IRQ handler is not running after that point. */
panthor_pwr_irq_suspend(&ptdev->pwr->irq);
/* Wake-up all waiters. */
spin_lock_irqsave(&ptdev->pwr->reqs_lock, flags);
ptdev->pwr->pending_reqs = 0;
wake_up_all(&ptdev->pwr->reqs_acked);
spin_unlock_irqrestore(&ptdev->pwr->reqs_lock, flags);
}
int panthor_pwr_init(struct panthor_device *ptdev)
{
struct panthor_pwr *pwr;
int err, irq;
if (!panthor_hw_has_pwr_ctrl(ptdev))
return 0;
pwr = drmm_kzalloc(&ptdev->base, sizeof(*pwr), GFP_KERNEL);
if (!pwr)
return -ENOMEM;
spin_lock_init(&pwr->reqs_lock);
init_waitqueue_head(&pwr->reqs_acked);
ptdev->pwr = pwr;
irq = platform_get_irq_byname(to_platform_device(ptdev->base.dev), "gpu");
if (irq < 0)
return irq;
err = panthor_request_pwr_irq(ptdev, &pwr->irq, irq, PWR_INTERRUPTS_MASK);
if (err)
return err;
return 0;
}
int panthor_pwr_reset_soft(struct panthor_device *ptdev)
{
if (!(gpu_read64(ptdev, PWR_STATUS) & PWR_STATUS_ALLOW_SOFT_RESET)) {
drm_err(&ptdev->base, "RESET_SOFT not allowed");
return -EOPNOTSUPP;
}
return panthor_pwr_reset(ptdev, PWR_COMMAND_RESET_SOFT);
}
void panthor_pwr_l2_power_off(struct panthor_device *ptdev)
{
const u64 l2_allow_mask = PWR_STATUS_DOMAIN_ALLOWED(PWR_COMMAND_DOMAIN_L2);
const u64 pwr_status = gpu_read64(ptdev, PWR_STATUS);
/* Abort if L2 power off constraints are not satisfied */
if (!(pwr_status & l2_allow_mask)) {
drm_warn(&ptdev->base, "Power off L2 domain not allowed");
return;
}
/* It is expected that when halting the MCU, it would power down its
* delegated domains. However, an unresponsive or hung MCU may not do
* so, which is why we need to check and retract the domains back into
* host control to be powered down in the right order before powering
* down the L2.
*/
if (panthor_pwr_domain_force_off(ptdev, PWR_COMMAND_DOMAIN_TILER))
return;
if (panthor_pwr_domain_force_off(ptdev, PWR_COMMAND_DOMAIN_SHADER))
return;
panthor_pwr_domain_power_off(ptdev, PWR_COMMAND_DOMAIN_L2, ptdev->gpu_info.l2_present,
PWR_TRANSITION_TIMEOUT_US);
}
int panthor_pwr_l2_power_on(struct panthor_device *ptdev)
{
const u32 pwr_status = gpu_read64(ptdev, PWR_STATUS);
const u32 l2_allow_mask = PWR_STATUS_DOMAIN_ALLOWED(PWR_COMMAND_DOMAIN_L2);
int ret;
if ((pwr_status & l2_allow_mask) == 0) {
drm_warn(&ptdev->base, "Power on L2 domain not allowed");
return -EPERM;
}
ret = panthor_pwr_domain_power_on(ptdev, PWR_COMMAND_DOMAIN_L2, ptdev->gpu_info.l2_present,
PWR_TRANSITION_TIMEOUT_US);
if (ret)
return ret;
/* Delegate control of the shader and tiler power domains to the MCU as
* it can better manage which shader/tiler cores need to be powered up
* or can be powered down based on currently running jobs.
*
* If the shader and tiler domains are already delegated to the MCU,
* this call would just return early.
*/
return panthor_pwr_delegate_domains(ptdev);
}
void panthor_pwr_suspend(struct panthor_device *ptdev)
{
if (!ptdev->pwr)
return;
panthor_pwr_irq_suspend(&ptdev->pwr->irq);
}
void panthor_pwr_resume(struct panthor_device *ptdev)
{
if (!ptdev->pwr)
return;
panthor_pwr_irq_resume(&ptdev->pwr->irq, PWR_INTERRUPTS_MASK);
}

View File

@ -0,0 +1,23 @@
/* SPDX-License-Identifier: GPL-2.0 or MIT */
/* Copyright 2025 ARM Limited. All rights reserved. */
#ifndef __PANTHOR_PWR_H__
#define __PANTHOR_PWR_H__
struct panthor_device;
void panthor_pwr_unplug(struct panthor_device *ptdev);
int panthor_pwr_init(struct panthor_device *ptdev);
int panthor_pwr_reset_soft(struct panthor_device *ptdev);
void panthor_pwr_l2_power_off(struct panthor_device *ptdev);
int panthor_pwr_l2_power_on(struct panthor_device *ptdev);
void panthor_pwr_suspend(struct panthor_device *ptdev);
void panthor_pwr_resume(struct panthor_device *ptdev);
#endif /* __PANTHOR_PWR_H__ */

View File

@ -74,6 +74,7 @@
#define GPU_FEATURES 0x60
#define GPU_FEATURES_RAY_INTERSECTION BIT(2)
#define GPU_FEATURES_RAY_TRAVERSAL BIT(5)
#define GPU_TIMESTAMP_OFFSET 0x88
#define GPU_CYCLE_COUNT 0x90
@ -209,4 +210,82 @@
#define CSF_DOORBELL(i) (0x80000 + ((i) * 0x10000))
#define CSF_GLB_DOORBELL_ID 0
/* PWR Control registers */
#define PWR_CONTROL_BASE 0x800
#define PWR_CTRL_REG(x) (PWR_CONTROL_BASE + (x))
#define PWR_INT_RAWSTAT PWR_CTRL_REG(0x0)
#define PWR_INT_CLEAR PWR_CTRL_REG(0x4)
#define PWR_INT_MASK PWR_CTRL_REG(0x8)
#define PWR_INT_STAT PWR_CTRL_REG(0xc)
#define PWR_IRQ_POWER_CHANGED_SINGLE BIT(0)
#define PWR_IRQ_POWER_CHANGED_ALL BIT(1)
#define PWR_IRQ_DELEGATION_CHANGED BIT(2)
#define PWR_IRQ_RESET_COMPLETED BIT(3)
#define PWR_IRQ_RETRACT_COMPLETED BIT(4)
#define PWR_IRQ_INSPECT_COMPLETED BIT(5)
#define PWR_IRQ_COMMAND_NOT_ALLOWED BIT(30)
#define PWR_IRQ_COMMAND_INVALID BIT(31)
#define PWR_STATUS PWR_CTRL_REG(0x20)
#define PWR_STATUS_ALLOW_L2 BIT_U64(0)
#define PWR_STATUS_ALLOW_TILER BIT_U64(1)
#define PWR_STATUS_ALLOW_SHADER BIT_U64(8)
#define PWR_STATUS_ALLOW_BASE BIT_U64(14)
#define PWR_STATUS_ALLOW_STACK BIT_U64(15)
#define PWR_STATUS_DOMAIN_ALLOWED(x) BIT_U64(x)
#define PWR_STATUS_DELEGATED_L2 BIT_U64(16)
#define PWR_STATUS_DELEGATED_TILER BIT_U64(17)
#define PWR_STATUS_DELEGATED_SHADER BIT_U64(24)
#define PWR_STATUS_DELEGATED_BASE BIT_U64(30)
#define PWR_STATUS_DELEGATED_STACK BIT_U64(31)
#define PWR_STATUS_DELEGATED_SHIFT 16
#define PWR_STATUS_DOMAIN_DELEGATED(x) BIT_U64((x) + PWR_STATUS_DELEGATED_SHIFT)
#define PWR_STATUS_ALLOW_SOFT_RESET BIT_U64(33)
#define PWR_STATUS_ALLOW_FAST_RESET BIT_U64(34)
#define PWR_STATUS_POWER_PENDING BIT_U64(41)
#define PWR_STATUS_RESET_PENDING BIT_U64(42)
#define PWR_STATUS_RETRACT_PENDING BIT_U64(43)
#define PWR_STATUS_INSPECT_PENDING BIT_U64(44)
#define PWR_COMMAND PWR_CTRL_REG(0x28)
#define PWR_COMMAND_POWER_UP 0x10
#define PWR_COMMAND_POWER_DOWN 0x11
#define PWR_COMMAND_DELEGATE 0x20
#define PWR_COMMAND_RETRACT 0x21
#define PWR_COMMAND_RESET_SOFT 0x31
#define PWR_COMMAND_RESET_FAST 0x32
#define PWR_COMMAND_INSPECT 0xF0
#define PWR_COMMAND_DOMAIN_L2 0
#define PWR_COMMAND_DOMAIN_TILER 1
#define PWR_COMMAND_DOMAIN_SHADER 8
#define PWR_COMMAND_DOMAIN_BASE 14
#define PWR_COMMAND_DOMAIN_STACK 15
#define PWR_COMMAND_SUBDOMAIN_RTU BIT(0)
#define PWR_COMMAND_DEF(cmd, domain, subdomain) \
(((subdomain) << 16) | ((domain) << 8) | (cmd))
#define PWR_CMDARG PWR_CTRL_REG(0x30)
#define PWR_L2_PRESENT PWR_CTRL_REG(0x100)
#define PWR_L2_READY PWR_CTRL_REG(0x108)
#define PWR_L2_PWRTRANS PWR_CTRL_REG(0x110)
#define PWR_L2_PWRACTIVE PWR_CTRL_REG(0x118)
#define PWR_TILER_PRESENT PWR_CTRL_REG(0x140)
#define PWR_TILER_READY PWR_CTRL_REG(0x148)
#define PWR_TILER_PWRTRANS PWR_CTRL_REG(0x150)
#define PWR_TILER_PWRACTIVE PWR_CTRL_REG(0x158)
#define PWR_SHADER_PRESENT PWR_CTRL_REG(0x200)
#define PWR_SHADER_READY PWR_CTRL_REG(0x208)
#define PWR_SHADER_PWRTRANS PWR_CTRL_REG(0x210)
#define PWR_SHADER_PWRACTIVE PWR_CTRL_REG(0x218)
#define PWR_BASE_PRESENT PWR_CTRL_REG(0x380)
#define PWR_BASE_READY PWR_CTRL_REG(0x388)
#define PWR_BASE_PWRTRANS PWR_CTRL_REG(0x390)
#define PWR_BASE_PWRACTIVE PWR_CTRL_REG(0x398)
#define PWR_STACK_PRESENT PWR_CTRL_REG(0x3c0)
#define PWR_STACK_READY PWR_CTRL_REG(0x3c8)
#define PWR_STACK_PWRTRANS PWR_CTRL_REG(0x3d0)
#endif

View File

@ -364,17 +364,20 @@ struct panthor_queue {
/** @name: DRM scheduler name for this queue. */
char *name;
/**
* @remaining_time: Time remaining before the job timeout expires.
*
* The job timeout is suspended when the queue is not scheduled by the
* FW. Every time we suspend the timer, we need to save the remaining
* time so we can restore it later on.
*/
unsigned long remaining_time;
/** @timeout: Queue timeout related fields. */
struct {
/** @timeout.work: Work executed when a queue timeout occurs. */
struct delayed_work work;
/** @timeout_suspended: True if the job timeout was suspended. */
bool timeout_suspended;
/**
* @timeout.remaining: Time remaining before a queue timeout.
*
* When the timer is running, this value is set to MAX_SCHEDULE_TIMEOUT.
* When the timer is suspended, it's set to the time remaining when the
* timer was suspended.
*/
unsigned long remaining;
} timeout;
/**
* @doorbell_id: Doorbell assigned to this queue.
@ -899,6 +902,10 @@ static void group_free_queue(struct panthor_group *group, struct panthor_queue *
if (IS_ERR_OR_NULL(queue))
return;
/* This should have been disabled before that point. */
drm_WARN_ON(&group->ptdev->base,
disable_delayed_work_sync(&queue->timeout.work));
if (queue->entity.fence_context)
drm_sched_entity_destroy(&queue->entity);
@ -1046,6 +1053,115 @@ group_unbind_locked(struct panthor_group *group)
return 0;
}
static bool
group_is_idle(struct panthor_group *group)
{
struct panthor_device *ptdev = group->ptdev;
u32 inactive_queues;
if (group->csg_id >= 0)
return ptdev->scheduler->csg_slots[group->csg_id].idle;
inactive_queues = group->idle_queues | group->blocked_queues;
return hweight32(inactive_queues) == group->queue_count;
}
static void
queue_reset_timeout_locked(struct panthor_queue *queue)
{
lockdep_assert_held(&queue->fence_ctx.lock);
if (queue->timeout.remaining != MAX_SCHEDULE_TIMEOUT) {
mod_delayed_work(queue->scheduler.timeout_wq,
&queue->timeout.work,
msecs_to_jiffies(JOB_TIMEOUT_MS));
}
}
static bool
group_can_run(struct panthor_group *group)
{
return group->state != PANTHOR_CS_GROUP_TERMINATED &&
group->state != PANTHOR_CS_GROUP_UNKNOWN_STATE &&
!group->destroyed && group->fatal_queues == 0 &&
!group->timedout;
}
static bool
queue_timeout_is_suspended(struct panthor_queue *queue)
{
/* When running, the remaining time is set to MAX_SCHEDULE_TIMEOUT. */
return queue->timeout.remaining != MAX_SCHEDULE_TIMEOUT;
}
static void
queue_suspend_timeout_locked(struct panthor_queue *queue)
{
unsigned long qtimeout, now;
struct panthor_group *group;
struct panthor_job *job;
bool timer_was_active;
lockdep_assert_held(&queue->fence_ctx.lock);
/* Already suspended, nothing to do. */
if (queue_timeout_is_suspended(queue))
return;
job = list_first_entry_or_null(&queue->fence_ctx.in_flight_jobs,
struct panthor_job, node);
group = job ? job->group : NULL;
/* If the queue is blocked and the group is idle, we want the timer to
* keep running because the group can't be unblocked by other queues,
* so it has to come from an external source, and we want to timebox
* this external signalling.
*/
if (group && group_can_run(group) &&
(group->blocked_queues & BIT(job->queue_idx)) &&
group_is_idle(group))
return;
now = jiffies;
qtimeout = queue->timeout.work.timer.expires;
/* Cancel the timer. */
timer_was_active = cancel_delayed_work(&queue->timeout.work);
if (!timer_was_active || !job)
queue->timeout.remaining = msecs_to_jiffies(JOB_TIMEOUT_MS);
else if (time_after(qtimeout, now))
queue->timeout.remaining = qtimeout - now;
else
queue->timeout.remaining = 0;
if (WARN_ON_ONCE(queue->timeout.remaining > msecs_to_jiffies(JOB_TIMEOUT_MS)))
queue->timeout.remaining = msecs_to_jiffies(JOB_TIMEOUT_MS);
}
static void
queue_suspend_timeout(struct panthor_queue *queue)
{
spin_lock(&queue->fence_ctx.lock);
queue_suspend_timeout_locked(queue);
spin_unlock(&queue->fence_ctx.lock);
}
static void
queue_resume_timeout(struct panthor_queue *queue)
{
spin_lock(&queue->fence_ctx.lock);
if (queue_timeout_is_suspended(queue)) {
mod_delayed_work(queue->scheduler.timeout_wq,
&queue->timeout.work,
queue->timeout.remaining);
queue->timeout.remaining = MAX_SCHEDULE_TIMEOUT;
}
spin_unlock(&queue->fence_ctx.lock);
}
/**
* cs_slot_prog_locked() - Program a queue slot
* @ptdev: Device.
@ -1084,10 +1200,8 @@ cs_slot_prog_locked(struct panthor_device *ptdev, u32 csg_id, u32 cs_id)
CS_IDLE_EMPTY |
CS_STATE_MASK |
CS_EXTRACT_EVENT);
if (queue->iface.input->insert != queue->iface.input->extract && queue->timeout_suspended) {
drm_sched_resume_timeout(&queue->scheduler, queue->remaining_time);
queue->timeout_suspended = false;
}
if (queue->iface.input->insert != queue->iface.input->extract)
queue_resume_timeout(queue);
}
/**
@ -1114,14 +1228,7 @@ cs_slot_reset_locked(struct panthor_device *ptdev, u32 csg_id, u32 cs_id)
CS_STATE_STOP,
CS_STATE_MASK);
/* If the queue is blocked, we want to keep the timeout running, so
* we can detect unbounded waits and kill the group when that happens.
*/
if (!(group->blocked_queues & BIT(cs_id)) && !queue->timeout_suspended) {
queue->remaining_time = drm_sched_suspend_timeout(&queue->scheduler);
queue->timeout_suspended = true;
WARN_ON(queue->remaining_time > msecs_to_jiffies(JOB_TIMEOUT_MS));
}
queue_suspend_timeout(queue);
return 0;
}
@ -1140,11 +1247,13 @@ csg_slot_sync_priority_locked(struct panthor_device *ptdev, u32 csg_id)
{
struct panthor_csg_slot *csg_slot = &ptdev->scheduler->csg_slots[csg_id];
struct panthor_fw_csg_iface *csg_iface;
u64 endpoint_req;
lockdep_assert_held(&ptdev->scheduler->lock);
csg_iface = panthor_fw_get_csg_iface(ptdev, csg_id);
csg_slot->priority = (csg_iface->input->endpoint_req & CSG_EP_REQ_PRIORITY_MASK) >> 28;
endpoint_req = panthor_fw_csg_endpoint_req_get(ptdev, csg_iface);
csg_slot->priority = CSG_EP_REQ_PRIORITY_GET(endpoint_req);
}
/**
@ -1304,6 +1413,7 @@ csg_slot_prog_locked(struct panthor_device *ptdev, u32 csg_id, u32 priority)
struct panthor_csg_slot *csg_slot;
struct panthor_group *group;
u32 queue_mask = 0, i;
u64 endpoint_req;
lockdep_assert_held(&ptdev->scheduler->lock);
@ -1330,10 +1440,12 @@ csg_slot_prog_locked(struct panthor_device *ptdev, u32 csg_id, u32 priority)
csg_iface->input->allow_compute = group->compute_core_mask;
csg_iface->input->allow_fragment = group->fragment_core_mask;
csg_iface->input->allow_other = group->tiler_core_mask;
csg_iface->input->endpoint_req = CSG_EP_REQ_COMPUTE(group->max_compute_cores) |
CSG_EP_REQ_FRAGMENT(group->max_fragment_cores) |
CSG_EP_REQ_TILER(group->max_tiler_cores) |
CSG_EP_REQ_PRIORITY(priority);
endpoint_req = CSG_EP_REQ_COMPUTE(group->max_compute_cores) |
CSG_EP_REQ_FRAGMENT(group->max_fragment_cores) |
CSG_EP_REQ_TILER(group->max_tiler_cores) |
CSG_EP_REQ_PRIORITY(priority);
panthor_fw_csg_endpoint_req_set(ptdev, csg_iface, endpoint_req);
csg_iface->input->config = panthor_vm_as(group->vm);
if (group->suspend_buf)
@ -1916,28 +2028,6 @@ tick_ctx_is_full(const struct panthor_scheduler *sched,
return ctx->group_count == sched->csg_slot_count;
}
static bool
group_is_idle(struct panthor_group *group)
{
struct panthor_device *ptdev = group->ptdev;
u32 inactive_queues;
if (group->csg_id >= 0)
return ptdev->scheduler->csg_slots[group->csg_id].idle;
inactive_queues = group->idle_queues | group->blocked_queues;
return hweight32(inactive_queues) == group->queue_count;
}
static bool
group_can_run(struct panthor_group *group)
{
return group->state != PANTHOR_CS_GROUP_TERMINATED &&
group->state != PANTHOR_CS_GROUP_UNKNOWN_STATE &&
!group->destroyed && group->fatal_queues == 0 &&
!group->timedout;
}
static void
tick_ctx_pick_groups_from_list(const struct panthor_scheduler *sched,
struct panthor_sched_tick_ctx *ctx,
@ -2231,9 +2321,9 @@ tick_ctx_apply(struct panthor_scheduler *sched, struct panthor_sched_tick_ctx *c
continue;
}
panthor_fw_update_reqs(csg_iface, endpoint_req,
CSG_EP_REQ_PRIORITY(new_csg_prio),
CSG_EP_REQ_PRIORITY_MASK);
panthor_fw_csg_endpoint_req_update(ptdev, csg_iface,
CSG_EP_REQ_PRIORITY(new_csg_prio),
CSG_EP_REQ_PRIORITY_MASK);
csgs_upd_ctx_queue_reqs(ptdev, &upd_ctx, csg_id,
csg_iface->output->ack ^ CSG_ENDPOINT_CONFIG,
CSG_ENDPOINT_CONFIG);
@ -2619,6 +2709,7 @@ static void group_schedule_locked(struct panthor_group *group, u32 queue_mask)
static void queue_stop(struct panthor_queue *queue,
struct panthor_job *bad_job)
{
disable_delayed_work_sync(&queue->timeout.work);
drm_sched_stop(&queue->scheduler, bad_job ? &bad_job->base : NULL);
}
@ -2630,6 +2721,7 @@ static void queue_start(struct panthor_queue *queue)
list_for_each_entry(job, &queue->scheduler.pending_list, base.list)
job->base.s_fence->parent = dma_fence_get(job->done_fence);
enable_delayed_work(&queue->timeout.work);
drm_sched_start(&queue->scheduler, 0);
}
@ -2696,7 +2788,6 @@ void panthor_sched_suspend(struct panthor_device *ptdev)
{
struct panthor_scheduler *sched = ptdev->scheduler;
struct panthor_csg_slots_upd_ctx upd_ctx;
struct panthor_group *group;
u32 suspended_slots;
u32 i;
@ -2750,13 +2841,23 @@ void panthor_sched_suspend(struct panthor_device *ptdev)
while (slot_mask) {
u32 csg_id = ffs(slot_mask) - 1;
struct panthor_csg_slot *csg_slot = &sched->csg_slots[csg_id];
struct panthor_group *group = csg_slot->group;
/* Terminate command timedout, but the soft-reset will
* automatically terminate all active groups, so let's
* force the state to halted here.
*/
if (csg_slot->group->state != PANTHOR_CS_GROUP_TERMINATED)
csg_slot->group->state = PANTHOR_CS_GROUP_TERMINATED;
if (group->state != PANTHOR_CS_GROUP_TERMINATED) {
group->state = PANTHOR_CS_GROUP_TERMINATED;
/* Reset the queue slots manually if the termination
* request failed.
*/
for (i = 0; i < group->queue_count; i++) {
if (group->queues[i])
cs_slot_reset_locked(ptdev, csg_id, i);
}
}
slot_mask &= ~BIT(csg_id);
}
}
@ -2786,8 +2887,8 @@ void panthor_sched_suspend(struct panthor_device *ptdev)
for (i = 0; i < sched->csg_slot_count; i++) {
struct panthor_csg_slot *csg_slot = &sched->csg_slots[i];
struct panthor_group *group = csg_slot->group;
group = csg_slot->group;
if (!group)
continue;
@ -2916,35 +3017,47 @@ void panthor_fdinfo_gather_group_samples(struct panthor_file *pfile)
xa_unlock(&gpool->xa);
}
static void group_sync_upd_work(struct work_struct *work)
static bool queue_check_job_completion(struct panthor_queue *queue)
{
struct panthor_group *group =
container_of(work, struct panthor_group, sync_upd_work);
struct panthor_syncobj_64b *syncobj = NULL;
struct panthor_job *job, *job_tmp;
bool cookie, progress = false;
LIST_HEAD(done_jobs);
u32 queue_idx;
bool cookie;
cookie = dma_fence_begin_signalling();
for (queue_idx = 0; queue_idx < group->queue_count; queue_idx++) {
struct panthor_queue *queue = group->queues[queue_idx];
struct panthor_syncobj_64b *syncobj;
spin_lock(&queue->fence_ctx.lock);
list_for_each_entry_safe(job, job_tmp, &queue->fence_ctx.in_flight_jobs, node) {
if (!syncobj) {
struct panthor_group *group = job->group;
if (!queue)
continue;
syncobj = group->syncobjs->kmap + (queue_idx * sizeof(*syncobj));
spin_lock(&queue->fence_ctx.lock);
list_for_each_entry_safe(job, job_tmp, &queue->fence_ctx.in_flight_jobs, node) {
if (syncobj->seqno < job->done_fence->seqno)
break;
list_move_tail(&job->node, &done_jobs);
dma_fence_signal_locked(job->done_fence);
syncobj = group->syncobjs->kmap +
(job->queue_idx * sizeof(*syncobj));
}
spin_unlock(&queue->fence_ctx.lock);
if (syncobj->seqno < job->done_fence->seqno)
break;
list_move_tail(&job->node, &done_jobs);
dma_fence_signal_locked(job->done_fence);
}
if (list_empty(&queue->fence_ctx.in_flight_jobs)) {
/* If we have no job left, we cancel the timer, and reset remaining
* time to its default so it can be restarted next time
* queue_resume_timeout() is called.
*/
queue_suspend_timeout_locked(queue);
/* If there's no job pending, we consider it progress to avoid a
* spurious timeout if the timeout handler and the sync update
* handler raced.
*/
progress = true;
} else if (!list_empty(&done_jobs)) {
queue_reset_timeout_locked(queue);
progress = true;
}
spin_unlock(&queue->fence_ctx.lock);
dma_fence_end_signalling(cookie);
list_for_each_entry_safe(job, job_tmp, &done_jobs, node) {
@ -2954,6 +3067,27 @@ static void group_sync_upd_work(struct work_struct *work)
panthor_job_put(&job->base);
}
return progress;
}
static void group_sync_upd_work(struct work_struct *work)
{
struct panthor_group *group =
container_of(work, struct panthor_group, sync_upd_work);
u32 queue_idx;
bool cookie;
cookie = dma_fence_begin_signalling();
for (queue_idx = 0; queue_idx < group->queue_count; queue_idx++) {
struct panthor_queue *queue = group->queues[queue_idx];
if (!queue)
continue;
queue_check_job_completion(queue);
}
dma_fence_end_signalling(cookie);
group_put(group);
}
@ -3201,17 +3335,6 @@ queue_run_job(struct drm_sched_job *sched_job)
queue->iface.input->insert = job->ringbuf.end;
if (group->csg_id < 0) {
/* If the queue is blocked, we want to keep the timeout running, so we
* can detect unbounded waits and kill the group when that happens.
* Otherwise, we suspend the timeout so the time we spend waiting for
* a CSG slot is not counted.
*/
if (!(group->blocked_queues & BIT(job->queue_idx)) &&
!queue->timeout_suspended) {
queue->remaining_time = drm_sched_suspend_timeout(&queue->scheduler);
queue->timeout_suspended = true;
}
group_schedule_locked(group, BIT(job->queue_idx));
} else {
gpu_write(ptdev, CSF_DOORBELL(queue->doorbell_id), 1);
@ -3220,6 +3343,7 @@ queue_run_job(struct drm_sched_job *sched_job)
pm_runtime_get(ptdev->base.dev);
sched->pm.has_ref = true;
}
queue_resume_timeout(queue);
panthor_devfreq_record_busy(sched->ptdev);
}
@ -3269,7 +3393,6 @@ queue_timedout_job(struct drm_sched_job *sched_job)
mutex_unlock(&sched->lock);
queue_start(queue);
return DRM_GPU_SCHED_STAT_RESET;
}
@ -3312,6 +3435,17 @@ static u32 calc_profiling_ringbuf_num_slots(struct panthor_device *ptdev,
return DIV_ROUND_UP(cs_ringbuf_size, min_profiled_job_instrs * sizeof(u64));
}
static void queue_timeout_work(struct work_struct *work)
{
struct panthor_queue *queue = container_of(work, struct panthor_queue,
timeout.work.work);
bool progress;
progress = queue_check_job_completion(queue);
if (!progress)
drm_sched_fault(&queue->scheduler);
}
static struct panthor_queue *
group_create_queue(struct panthor_group *group,
const struct drm_panthor_queue_create *args,
@ -3328,7 +3462,7 @@ group_create_queue(struct panthor_group *group,
* their profiling status.
*/
.credit_limit = args->ringbuf_size / sizeof(u64),
.timeout = msecs_to_jiffies(JOB_TIMEOUT_MS),
.timeout = MAX_SCHEDULE_TIMEOUT,
.timeout_wq = group->ptdev->reset.wq,
.dev = group->ptdev->base.dev,
};
@ -3350,6 +3484,8 @@ group_create_queue(struct panthor_group *group,
if (!queue)
return ERR_PTR(-ENOMEM);
queue->timeout.remaining = msecs_to_jiffies(JOB_TIMEOUT_MS);
INIT_DELAYED_WORK(&queue->timeout.work, queue_timeout_work);
queue->fence_ctx.id = dma_fence_context_alloc(1);
spin_lock_init(&queue->fence_ctx.lock);
INIT_LIST_HEAD(&queue->fence_ctx.in_flight_jobs);

View File

@ -202,7 +202,7 @@ int radeon_fbdev_driver_fbdev_probe(struct drm_fb_helper *fb_helper,
struct radeon_device *rdev = fb_helper->dev->dev_private;
const struct drm_format_info *format_info;
struct drm_mode_fb_cmd2 mode_cmd = { };
struct fb_info *info;
struct fb_info *info = fb_helper->info;
struct drm_gem_object *gobj;
struct radeon_bo *rbo;
struct drm_framebuffer *fb;
@ -243,13 +243,6 @@ int radeon_fbdev_driver_fbdev_probe(struct drm_fb_helper *fb_helper,
fb_helper->funcs = &radeon_fbdev_fb_helper_funcs;
fb_helper->fb = fb;
/* okay we have an object now allocate the framebuffer */
info = drm_fb_helper_alloc_info(fb_helper);
if (IS_ERR(info)) {
ret = PTR_ERR(info);
goto err_drm_framebuffer_unregister_private;
}
info->fbops = &radeon_fbdev_fb_ops;
/* radeon resume is fragile and needs a vt switch to help it along */
@ -275,10 +268,6 @@ int radeon_fbdev_driver_fbdev_probe(struct drm_fb_helper *fb_helper,
return 0;
err_drm_framebuffer_unregister_private:
fb_helper->fb = NULL;
drm_framebuffer_unregister_private(fb);
drm_framebuffer_cleanup(fb);
err_kfree:
kfree(fb);
err_radeon_fbdev_destroy_pinned_object:

View File

@ -331,32 +331,29 @@ static int rockchip_dp_of_probe(struct rockchip_dp_device *dp)
struct device_node *np = dev->of_node;
dp->grf = syscon_regmap_lookup_by_phandle(np, "rockchip,grf");
if (IS_ERR(dp->grf)) {
DRM_DEV_ERROR(dev, "failed to get rockchip,grf property\n");
return PTR_ERR(dp->grf);
}
if (IS_ERR(dp->grf))
return dev_err_probe(dev, PTR_ERR(dp->grf),
"failed to get rockchip,grf property\n");
dp->grfclk = devm_clk_get_optional(dev, "grf");
if (IS_ERR(dp->grfclk))
return dev_err_probe(dev, PTR_ERR(dp->grfclk), "failed to get grf clock\n");
return dev_err_probe(dev, PTR_ERR(dp->grfclk),
"failed to get grf clock\n");
dp->pclk = devm_clk_get(dev, "pclk");
if (IS_ERR(dp->pclk)) {
DRM_DEV_ERROR(dev, "failed to get pclk property\n");
return PTR_ERR(dp->pclk);
}
if (IS_ERR(dp->pclk))
return dev_err_probe(dev, PTR_ERR(dp->pclk),
"failed to get pclk property\n");
dp->rst = devm_reset_control_get(dev, "dp");
if (IS_ERR(dp->rst)) {
DRM_DEV_ERROR(dev, "failed to get dp reset control\n");
return PTR_ERR(dp->rst);
}
if (IS_ERR(dp->rst))
return dev_err_probe(dev, PTR_ERR(dp->rst),
"failed to get dp reset control\n");
dp->apbrst = devm_reset_control_get_optional(dev, "apb");
if (IS_ERR(dp->apbrst)) {
DRM_DEV_ERROR(dev, "failed to get apb reset control\n");
return PTR_ERR(dp->apbrst);
}
if (IS_ERR(dp->apbrst))
return dev_err_probe(dev, PTR_ERR(dp->apbrst),
"failed to get apb reset control\n");
return 0;
}

View File

@ -14,6 +14,7 @@
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/phy/phy.h>
#include <linux/phy/phy-hdmi.h>
#include <linux/regmap.h>
#include <linux/workqueue.h>
@ -38,21 +39,16 @@
#define RK3576_HDMI_HDCP14_MEM_EN BIT(15)
#define RK3576_VO0_GRF_SOC_CON8 0x0020
#define RK3576_COLOR_FORMAT_MASK (0xf << 4)
#define RK3576_COLOR_DEPTH_MASK (0xf << 8)
#define RK3576_RGB (0 << 4)
#define RK3576_YUV422 (0x1 << 4)
#define RK3576_YUV444 (0x2 << 4)
#define RK3576_YUV420 (0x3 << 4)
#define RK3576_8BPC (0x0 << 8)
#define RK3576_10BPC (0x6 << 8)
#define RK3576_COLOR_DEPTH_MASK GENMASK(11, 8)
#define RK3576_8BPC 0x0
#define RK3576_10BPC 0x6
#define RK3576_COLOR_FORMAT_MASK GENMASK(7, 4)
#define RK3576_RGB 0x9
#define RK3576_YUV422 0x1
#define RK3576_YUV444 0x2
#define RK3576_YUV420 0x3
#define RK3576_CECIN_MASK BIT(3)
#define RK3576_VO0_GRF_SOC_CON12 0x0030
#define RK3576_GRF_OSDA_DLYN (0xf << 12)
#define RK3576_GRF_OSDA_DIV (0x7f << 1)
#define RK3576_GRF_OSDA_DLY_EN BIT(0)
#define RK3576_VO0_GRF_SOC_CON14 0x0038
#define RK3576_I2S_SEL_MASK BIT(0)
#define RK3576_SPDIF_SEL_MASK BIT(1)
@ -74,6 +70,12 @@
#define RK3588_HDMI1_LEVEL_INT BIT(24)
#define RK3588_GRF_VO1_CON3 0x000c
#define RK3588_GRF_VO1_CON6 0x0018
#define RK3588_COLOR_DEPTH_MASK GENMASK(7, 4)
#define RK3588_8BPC 0x0
#define RK3588_10BPC 0x6
#define RK3588_COLOR_FORMAT_MASK GENMASK(3, 0)
#define RK3588_RGB 0x0
#define RK3588_YUV420 0x3
#define RK3588_SCLIN_MASK BIT(9)
#define RK3588_SDAIN_MASK BIT(10)
#define RK3588_MODE_MASK BIT(11)
@ -92,14 +94,16 @@ struct rockchip_hdmi_qp {
struct rockchip_encoder encoder;
struct dw_hdmi_qp *hdmi;
struct phy *phy;
struct gpio_desc *enable_gpio;
struct gpio_desc *frl_enable_gpio;
struct delayed_work hpd_work;
int port_id;
const struct rockchip_hdmi_qp_ctrl_ops *ctrl_ops;
unsigned long long tmds_char_rate;
};
struct rockchip_hdmi_qp_ctrl_ops {
void (*io_init)(struct rockchip_hdmi_qp *hdmi);
void (*enc_init)(struct rockchip_hdmi_qp *hdmi, struct rockchip_crtc_state *state);
irqreturn_t (*irq_callback)(int irq, void *dev_id);
irqreturn_t (*hardirq_callback)(int irq, void *dev_id);
};
@ -115,23 +119,15 @@ static void dw_hdmi_qp_rockchip_encoder_enable(struct drm_encoder *encoder)
{
struct rockchip_hdmi_qp *hdmi = to_rockchip_hdmi_qp(encoder);
struct drm_crtc *crtc = encoder->crtc;
unsigned long long rate;
/* Unconditionally switch to TMDS as FRL is not yet supported */
gpiod_set_value(hdmi->enable_gpio, 1);
gpiod_set_value(hdmi->frl_enable_gpio, 0);
if (crtc && crtc->state) {
rate = drm_hdmi_compute_mode_clock(&crtc->state->adjusted_mode,
8, HDMI_COLORSPACE_RGB);
/*
* FIXME: Temporary workaround to pass pixel clock rate
* to the PHY driver until phy_configure_opts_hdmi
* becomes available in the PHY API. See also the related
* comment in rk_hdptx_phy_power_on() from
* drivers/phy/rockchip/phy-rockchip-samsung-hdptx.c
*/
phy_set_bus_width(hdmi->phy, div_u64(rate, 100));
}
if (!crtc || !crtc->state)
return;
if (hdmi->ctrl_ops->enc_init)
hdmi->ctrl_ops->enc_init(hdmi, to_rockchip_crtc_state(crtc->state));
}
static int
@ -139,12 +135,29 @@ dw_hdmi_qp_rockchip_encoder_atomic_check(struct drm_encoder *encoder,
struct drm_crtc_state *crtc_state,
struct drm_connector_state *conn_state)
{
struct rockchip_hdmi_qp *hdmi = to_rockchip_hdmi_qp(encoder);
struct rockchip_crtc_state *s = to_rockchip_crtc_state(crtc_state);
union phy_configure_opts phy_cfg = {};
int ret;
s->output_mode = ROCKCHIP_OUT_MODE_AAAA;
s->output_type = DRM_MODE_CONNECTOR_HDMIA;
if (hdmi->tmds_char_rate == conn_state->hdmi.tmds_char_rate &&
s->output_bpc == conn_state->hdmi.output_bpc)
return 0;
return 0;
phy_cfg.hdmi.tmds_char_rate = conn_state->hdmi.tmds_char_rate;
phy_cfg.hdmi.bpc = conn_state->hdmi.output_bpc;
ret = phy_configure(hdmi->phy, &phy_cfg);
if (!ret) {
hdmi->tmds_char_rate = conn_state->hdmi.tmds_char_rate;
s->output_mode = ROCKCHIP_OUT_MODE_AAAA;
s->output_type = DRM_MODE_CONNECTOR_HDMIA;
s->output_bpc = conn_state->hdmi.output_bpc;
} else {
dev_err(hdmi->dev, "Failed to configure phy: %d\n", ret);
}
return ret;
}
static const struct
@ -375,15 +388,45 @@ static void dw_hdmi_qp_rk3588_io_init(struct rockchip_hdmi_qp *hdmi)
regmap_write(hdmi->regmap, RK3588_GRF_SOC_CON2, val);
}
static void dw_hdmi_qp_rk3576_enc_init(struct rockchip_hdmi_qp *hdmi,
struct rockchip_crtc_state *state)
{
u32 val;
if (state->output_bpc == 10)
val = FIELD_PREP_WM16(RK3576_COLOR_DEPTH_MASK, RK3576_10BPC);
else
val = FIELD_PREP_WM16(RK3576_COLOR_DEPTH_MASK, RK3576_8BPC);
regmap_write(hdmi->vo_regmap, RK3576_VO0_GRF_SOC_CON8, val);
}
static void dw_hdmi_qp_rk3588_enc_init(struct rockchip_hdmi_qp *hdmi,
struct rockchip_crtc_state *state)
{
u32 val;
if (state->output_bpc == 10)
val = FIELD_PREP_WM16(RK3588_COLOR_DEPTH_MASK, RK3588_10BPC);
else
val = FIELD_PREP_WM16(RK3588_COLOR_DEPTH_MASK, RK3588_8BPC);
regmap_write(hdmi->vo_regmap,
hdmi->port_id ? RK3588_GRF_VO1_CON6 : RK3588_GRF_VO1_CON3,
val);
}
static const struct rockchip_hdmi_qp_ctrl_ops rk3576_hdmi_ctrl_ops = {
.io_init = dw_hdmi_qp_rk3576_io_init,
.irq_callback = dw_hdmi_qp_rk3576_irq,
.enc_init = dw_hdmi_qp_rk3576_enc_init,
.irq_callback = dw_hdmi_qp_rk3576_irq,
.hardirq_callback = dw_hdmi_qp_rk3576_hardirq,
};
static const struct rockchip_hdmi_qp_ctrl_ops rk3588_hdmi_ctrl_ops = {
.io_init = dw_hdmi_qp_rk3588_io_init,
.irq_callback = dw_hdmi_qp_rk3588_irq,
.enc_init = dw_hdmi_qp_rk3588_enc_init,
.irq_callback = dw_hdmi_qp_rk3588_irq,
.hardirq_callback = dw_hdmi_qp_rk3588_hardirq,
};
@ -476,6 +519,7 @@ static int dw_hdmi_qp_rockchip_bind(struct device *dev, struct device *master,
plat_data.phy_ops = cfg->phy_ops;
plat_data.phy_data = hdmi;
plat_data.max_bpc = 10;
encoder = &hdmi->encoder.encoder;
encoder->possible_crtcs = drm_of_find_possible_crtcs(drm, dev->of_node);
@ -515,11 +559,11 @@ static int dw_hdmi_qp_rockchip_bind(struct device *dev, struct device *master,
plat_data.ref_clk_rate = clk_get_rate(ref_clk);
clk_put(ref_clk);
hdmi->enable_gpio = devm_gpiod_get_optional(hdmi->dev, "enable",
GPIOD_OUT_HIGH);
if (IS_ERR(hdmi->enable_gpio))
return dev_err_probe(hdmi->dev, PTR_ERR(hdmi->enable_gpio),
"Failed to request enable GPIO\n");
hdmi->frl_enable_gpio = devm_gpiod_get_optional(hdmi->dev, "frl-enable",
GPIOD_OUT_LOW);
if (IS_ERR(hdmi->frl_enable_gpio))
return dev_err_probe(hdmi->dev, PTR_ERR(hdmi->frl_enable_gpio),
"Failed to request FRL enable GPIO\n");
hdmi->phy = devm_of_phy_get_by_index(dev, dev->of_node, 0);
if (IS_ERR(hdmi->phy))

View File

@ -97,6 +97,9 @@ void rockchip_drm_dma_init_device(struct drm_device *drm_dev,
private->iommu_dev = ERR_PTR(-ENODEV);
else if (!private->iommu_dev)
private->iommu_dev = dev;
if (!IS_ERR(private->iommu_dev))
drm_dev_set_dma_dev(drm_dev, private->iommu_dev);
}
static int rockchip_drm_init_iommu(struct drm_device *drm_dev)

View File

@ -102,7 +102,7 @@ enum vop2_afbc_format {
VOP2_AFBC_FMT_INVALID = -1,
};
#define VOP2_MAX_DCLK_RATE 600000000
#define VOP2_MAX_DCLK_RATE 600000000UL
/*
* bus-format types.
@ -1743,36 +1743,42 @@ static void vop2_crtc_atomic_enable(struct drm_crtc *crtc,
* Switch to HDMI PHY PLL as DCLK source for display modes up
* to 4K@60Hz, if available, otherwise keep using the system CRU.
*/
if ((vop2->pll_hdmiphy0 || vop2->pll_hdmiphy1) && clock <= VOP2_MAX_DCLK_RATE) {
drm_for_each_encoder_mask(encoder, crtc->dev, crtc_state->encoder_mask) {
struct rockchip_encoder *rkencoder = to_rockchip_encoder(encoder);
if (vop2->pll_hdmiphy0 || vop2->pll_hdmiphy1) {
unsigned long max_dclk = DIV_ROUND_CLOSEST_ULL(VOP2_MAX_DCLK_RATE * 8,
vcstate->output_bpc);
if (clock <= max_dclk) {
drm_for_each_encoder_mask(encoder, crtc->dev, crtc_state->encoder_mask) {
struct rockchip_encoder *rkencoder = to_rockchip_encoder(encoder);
if (rkencoder->crtc_endpoint_id == ROCKCHIP_VOP2_EP_HDMI0) {
if (!vop2->pll_hdmiphy0)
if (rkencoder->crtc_endpoint_id == ROCKCHIP_VOP2_EP_HDMI0) {
if (!vop2->pll_hdmiphy0)
break;
if (!vp->dclk_src)
vp->dclk_src = clk_get_parent(vp->dclk);
ret = clk_set_parent(vp->dclk, vop2->pll_hdmiphy0);
if (ret < 0)
drm_warn(vop2->drm,
"Could not switch to HDMI0 PHY PLL: %d\n",
ret);
break;
}
if (!vp->dclk_src)
vp->dclk_src = clk_get_parent(vp->dclk);
if (rkencoder->crtc_endpoint_id == ROCKCHIP_VOP2_EP_HDMI1) {
if (!vop2->pll_hdmiphy1)
break;
ret = clk_set_parent(vp->dclk, vop2->pll_hdmiphy0);
if (ret < 0)
drm_warn(vop2->drm,
"Could not switch to HDMI0 PHY PLL: %d\n", ret);
break;
}
if (!vp->dclk_src)
vp->dclk_src = clk_get_parent(vp->dclk);
if (rkencoder->crtc_endpoint_id == ROCKCHIP_VOP2_EP_HDMI1) {
if (!vop2->pll_hdmiphy1)
ret = clk_set_parent(vp->dclk, vop2->pll_hdmiphy1);
if (ret < 0)
drm_warn(vop2->drm,
"Could not switch to HDMI1 PHY PLL: %d\n",
ret);
break;
if (!vp->dclk_src)
vp->dclk_src = clk_get_parent(vp->dclk);
ret = clk_set_parent(vp->dclk, vop2->pll_hdmiphy1);
if (ret < 0)
drm_warn(vop2->drm,
"Could not switch to HDMI1 PHY PLL: %d\n", ret);
break;
}
}
}
}

View File

@ -1369,6 +1369,25 @@ static const struct vop2_regs_dump rk3588_regs_dump[] = {
},
};
/*
* phys_id is used to identify a main window(Cluster Win/Smart Win, not
* include the sub win of a cluster or the multi area) that can do overlay
* in main overlay stage.
*/
static struct vop2_win *vop2_find_win_by_phys_id(struct vop2 *vop2, uint8_t phys_id)
{
struct vop2_win *win;
int i;
for (i = 0; i < vop2->data->win_size; i++) {
win = &vop2->win[i];
if (win->data->phys_id == phys_id)
return win;
}
return NULL;
}
static unsigned long rk3568_set_intf_mux(struct vop2_video_port *vp, int id, u32 polflags)
{
struct vop2 *vop2 = vp->vop2;
@ -1842,15 +1861,31 @@ static void vop2_parse_alpha(struct vop2_alpha_config *alpha_config,
alpha->dst_alpha_ctrl.bits.factor_mode = ALPHA_SRC_INVERSE;
}
static int vop2_find_start_mixer_id_for_vp(struct vop2 *vop2, u8 port_id)
static int vop2_find_start_mixer_id_for_vp(struct vop2_video_port *vp)
{
struct vop2_video_port *vp;
int used_layer = 0;
struct vop2 *vop2 = vp->vop2;
struct vop2_win *win;
u32 layer_sel = vop2->old_layer_sel;
u32 used_layer = 0;
unsigned long win_mask = vp->win_mask;
unsigned long phys_id;
bool match;
int i;
for (i = 0; i < port_id; i++) {
vp = &vop2->vps[i];
used_layer += hweight32(vp->win_mask);
for (i = 0; i < 31; i += 4) {
match = false;
for_each_set_bit(phys_id, &win_mask, ROCKCHIP_VOP2_ESMART3) {
win = vop2_find_win_by_phys_id(vop2, phys_id);
if (win->data->layer_sel_id[vp->id] == ((layer_sel >> i) & 0xf)) {
match = true;
break;
}
}
if (!match)
used_layer += 1;
else
break;
}
return used_layer;
@ -1935,7 +1970,7 @@ static void vop2_setup_alpha(struct vop2_video_port *vp)
u32 dst_global_alpha = DRM_BLEND_ALPHA_OPAQUE;
if (vop2->version <= VOP_VERSION_RK3588)
mixer_id = vop2_find_start_mixer_id_for_vp(vop2, vp->id);
mixer_id = vop2_find_start_mixer_id_for_vp(vp);
else
mixer_id = 0;

View File

@ -25,6 +25,7 @@ tegra-drm-y := \
falcon.o \
vic.o \
nvdec.o \
nvjpg.o \
riscv.o
tegra-drm-y += trace.o

View File

@ -1384,6 +1384,7 @@ static const struct of_device_id host1x_drm_subdevs[] = {
{ .compatible = "nvidia,tegra210-sor1", },
{ .compatible = "nvidia,tegra210-vic", },
{ .compatible = "nvidia,tegra210-nvdec", },
{ .compatible = "nvidia,tegra210-nvjpg", },
{ .compatible = "nvidia,tegra186-display", },
{ .compatible = "nvidia,tegra186-dc", },
{ .compatible = "nvidia,tegra186-sor", },
@ -1422,6 +1423,7 @@ static struct platform_driver * const drivers[] = {
&tegra_gr3d_driver,
&tegra_vic_driver,
&tegra_nvdec_driver,
&tegra_nvjpg_driver,
};
static int __init host1x_drm_init(void)

View File

@ -214,5 +214,6 @@ extern struct platform_driver tegra_gr2d_driver;
extern struct platform_driver tegra_gr3d_driver;
extern struct platform_driver tegra_vic_driver;
extern struct platform_driver tegra_nvdec_driver;
extern struct platform_driver tegra_nvjpg_driver;
#endif /* HOST1X_DRM_H */

View File

@ -546,12 +546,19 @@ static void tegra_dsi_configure(struct tegra_dsi *dsi, unsigned int pipe,
/* horizontal back porch */
hbp = (mode->htotal - mode->hsync_end) * mul / div;
if ((dsi->flags & MIPI_DSI_MODE_VIDEO_SYNC_PULSE) == 0)
hbp += hsw;
/* horizontal front porch */
hfp = (mode->hsync_start - mode->hdisplay) * mul / div;
if (dsi->master || dsi->slave) {
hact /= 2;
hsw /= 2;
hbp /= 2;
hfp /= 2;
}
if ((dsi->flags & MIPI_DSI_MODE_VIDEO_SYNC_PULSE) == 0)
hbp += hsw;
/* subtract packet overhead */
hsw -= 10;
hbp -= 14;
@ -561,11 +568,6 @@ static void tegra_dsi_configure(struct tegra_dsi *dsi, unsigned int pipe,
tegra_dsi_writel(dsi, hact << 16 | hbp, DSI_PKT_LEN_2_3);
tegra_dsi_writel(dsi, hfp, DSI_PKT_LEN_4_5);
tegra_dsi_writel(dsi, 0x0f0f << 16, DSI_PKT_LEN_6_7);
/* set SOL delay (for non-burst mode only) */
tegra_dsi_writel(dsi, 8 * mul / div, DSI_SOL_DELAY);
/* TODO: implement ganged mode */
} else {
u16 bytes;
@ -587,29 +589,28 @@ static void tegra_dsi_configure(struct tegra_dsi *dsi, unsigned int pipe,
value = MIPI_DCS_WRITE_MEMORY_START << 8 |
MIPI_DCS_WRITE_MEMORY_CONTINUE;
tegra_dsi_writel(dsi, value, DSI_DCS_CMDS);
/* set SOL delay */
if (dsi->master || dsi->slave) {
unsigned long delay, bclk, bclk_ganged;
unsigned int lanes = state->lanes;
/* SOL to valid, valid to FIFO and FIFO write delay */
delay = 4 + 4 + 2;
delay = DIV_ROUND_UP(delay * mul, div * lanes);
/* FIFO read delay */
delay = delay + 6;
bclk = DIV_ROUND_UP(mode->htotal * mul, div * lanes);
bclk_ganged = DIV_ROUND_UP(bclk * lanes / 2, lanes);
value = bclk - bclk_ganged + delay + 20;
} else {
/* TODO: revisit for non-ganged mode */
value = 8 * mul / div;
}
tegra_dsi_writel(dsi, value, DSI_SOL_DELAY);
}
/* set SOL delay */
if (dsi->master || dsi->slave) {
unsigned long delay, bclk, bclk_ganged;
unsigned int lanes = state->lanes;
/* SOL to valid, valid to FIFO and FIFO write delay */
delay = 4 + 4 + 2;
delay = DIV_ROUND_UP(delay * mul, div * lanes);
/* FIFO read delay */
delay = delay + 6;
bclk = DIV_ROUND_UP(mode->htotal * mul, div * lanes);
bclk_ganged = DIV_ROUND_UP(bclk * lanes / 2, lanes);
value = bclk - bclk_ganged + delay + 20;
} else {
value = 8 * mul / div;
}
tegra_dsi_writel(dsi, value, DSI_SOL_DELAY);
if (dsi->slave) {
tegra_dsi_configure(dsi->slave, pipe, mode);

View File

@ -73,10 +73,10 @@ int tegra_fbdev_driver_fbdev_probe(struct drm_fb_helper *helper,
struct tegra_drm *tegra = helper->dev->dev_private;
struct drm_device *drm = helper->dev;
struct drm_mode_fb_cmd2 cmd = { 0 };
struct fb_info *info = helper->info;
unsigned int bytes_per_pixel;
struct drm_framebuffer *fb;
unsigned long offset;
struct fb_info *info;
struct tegra_bo *bo;
size_t size;
int err;
@ -97,13 +97,6 @@ int tegra_fbdev_driver_fbdev_probe(struct drm_fb_helper *helper,
if (IS_ERR(bo))
return PTR_ERR(bo);
info = drm_fb_helper_alloc_info(helper);
if (IS_ERR(info)) {
dev_err(drm->dev, "failed to allocate framebuffer info\n");
drm_gem_object_put(&bo->gem);
return PTR_ERR(info);
}
fb = tegra_fb_alloc(drm,
drm_get_format_info(drm, cmd.pixel_format, cmd.modifier[0]),
&cmd, &bo, 1);

View File

@ -0,0 +1,330 @@
// SPDX-License-Identifier: GPL-2.0-only
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/dma-mapping.h>
#include <linux/host1x.h>
#include <linux/iommu.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include "drm.h"
#include "falcon.h"
struct nvjpg_config {
const char *firmware;
unsigned int version;
};
struct nvjpg {
struct falcon falcon;
void __iomem *regs;
struct tegra_drm_client client;
struct device *dev;
struct clk *clk;
/* Platform configuration */
const struct nvjpg_config *config;
};
static inline struct nvjpg *to_nvjpg(struct tegra_drm_client *client)
{
return container_of(client, struct nvjpg, client);
}
static int nvjpg_init(struct host1x_client *client)
{
struct tegra_drm_client *drm = host1x_to_drm_client(client);
struct drm_device *dev = dev_get_drvdata(client->host);
struct tegra_drm *tegra = dev->dev_private;
struct nvjpg *nvjpg = to_nvjpg(drm);
int err;
err = host1x_client_iommu_attach(client);
if (err < 0 && err != -ENODEV) {
dev_err(nvjpg->dev, "failed to attach to domain: %d\n", err);
return err;
}
err = tegra_drm_register_client(tegra, drm);
if (err < 0)
goto detach;
/*
* Inherit the DMA parameters (such as maximum segment size) from the
* parent host1x device.
*/
client->dev->dma_parms = client->host->dma_parms;
return 0;
detach:
host1x_client_iommu_detach(client);
return err;
}
static int nvjpg_exit(struct host1x_client *client)
{
struct tegra_drm_client *drm = host1x_to_drm_client(client);
struct drm_device *dev = dev_get_drvdata(client->host);
struct tegra_drm *tegra = dev->dev_private;
struct nvjpg *nvjpg = to_nvjpg(drm);
int err;
/* avoid a dangling pointer just in case this disappears */
client->dev->dma_parms = NULL;
err = tegra_drm_unregister_client(tegra, drm);
if (err < 0)
return err;
pm_runtime_dont_use_autosuspend(client->dev);
pm_runtime_force_suspend(client->dev);
host1x_client_iommu_detach(client);
if (client->group) {
dma_unmap_single(nvjpg->dev, nvjpg->falcon.firmware.phys,
nvjpg->falcon.firmware.size, DMA_TO_DEVICE);
tegra_drm_free(tegra, nvjpg->falcon.firmware.size,
nvjpg->falcon.firmware.virt,
nvjpg->falcon.firmware.iova);
} else {
dma_free_coherent(nvjpg->dev, nvjpg->falcon.firmware.size,
nvjpg->falcon.firmware.virt,
nvjpg->falcon.firmware.iova);
}
return 0;
}
static const struct host1x_client_ops nvjpg_client_ops = {
.init = nvjpg_init,
.exit = nvjpg_exit,
};
static int nvjpg_load_falcon_firmware(struct nvjpg *nvjpg)
{
struct host1x_client *client = &nvjpg->client.base;
struct tegra_drm *tegra = nvjpg->client.drm;
dma_addr_t iova;
size_t size;
void *virt;
int err;
if (nvjpg->falcon.firmware.virt)
return 0;
err = falcon_read_firmware(&nvjpg->falcon, nvjpg->config->firmware);
if (err < 0)
return err;
size = nvjpg->falcon.firmware.size;
if (!client->group) {
virt = dma_alloc_coherent(nvjpg->dev, size, &iova, GFP_KERNEL);
if (!virt)
return -ENOMEM;
} else {
virt = tegra_drm_alloc(tegra, size, &iova);
if (IS_ERR(virt))
return PTR_ERR(virt);
}
nvjpg->falcon.firmware.virt = virt;
nvjpg->falcon.firmware.iova = iova;
err = falcon_load_firmware(&nvjpg->falcon);
if (err < 0)
goto cleanup;
/*
* In this case we have received an IOVA from the shared domain, so we
* need to make sure to get the physical address so that the DMA API
* knows what memory pages to flush the cache for.
*/
if (client->group) {
dma_addr_t phys;
phys = dma_map_single(nvjpg->dev, virt, size, DMA_TO_DEVICE);
err = dma_mapping_error(nvjpg->dev, phys);
if (err < 0)
goto cleanup;
nvjpg->falcon.firmware.phys = phys;
}
return 0;
cleanup:
if (!client->group)
dma_free_coherent(nvjpg->dev, size, virt, iova);
else
tegra_drm_free(tegra, size, virt, iova);
return err;
}
static __maybe_unused int nvjpg_runtime_resume(struct device *dev)
{
struct nvjpg *nvjpg = dev_get_drvdata(dev);
int err;
err = clk_prepare_enable(nvjpg->clk);
if (err < 0)
return err;
usleep_range(20, 30);
err = nvjpg_load_falcon_firmware(nvjpg);
if (err < 0)
goto disable_clk;
err = falcon_boot(&nvjpg->falcon);
if (err < 0)
goto disable_clk;
return 0;
disable_clk:
clk_disable_unprepare(nvjpg->clk);
return err;
}
static __maybe_unused int nvjpg_runtime_suspend(struct device *dev)
{
struct nvjpg *nvjpg = dev_get_drvdata(dev);
clk_disable_unprepare(nvjpg->clk);
return 0;
}
static int nvjpg_can_use_memory_ctx(struct tegra_drm_client *client, bool *supported)
{
*supported = false;
return 0;
}
static const struct tegra_drm_client_ops nvjpg_ops = {
.get_streamid_offset = NULL,
.can_use_memory_ctx = nvjpg_can_use_memory_ctx,
};
#define NVIDIA_TEGRA_210_NVJPG_FIRMWARE "nvidia/tegra210/nvjpg.bin"
static const struct nvjpg_config tegra210_nvjpg_config = {
.firmware = NVIDIA_TEGRA_210_NVJPG_FIRMWARE,
.version = 0x21,
};
static const struct of_device_id tegra_nvjpg_of_match[] = {
{ .compatible = "nvidia,tegra210-nvjpg", .data = &tegra210_nvjpg_config },
{ },
};
MODULE_DEVICE_TABLE(of, tegra_nvjpg_of_match);
static int nvjpg_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct nvjpg *nvjpg;
int err;
/* inherit DMA mask from host1x parent */
err = dma_coerce_mask_and_coherent(dev, *dev->parent->dma_mask);
if (err < 0) {
dev_err(&pdev->dev, "failed to set DMA mask: %d\n", err);
return err;
}
nvjpg = devm_kzalloc(dev, sizeof(*nvjpg), GFP_KERNEL);
if (!nvjpg)
return -ENOMEM;
nvjpg->config = of_device_get_match_data(dev);
nvjpg->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(nvjpg->regs))
return PTR_ERR(nvjpg->regs);
nvjpg->clk = devm_clk_get(dev, "nvjpg");
if (IS_ERR(nvjpg->clk)) {
dev_err(&pdev->dev, "failed to get clock\n");
return PTR_ERR(nvjpg->clk);
}
err = clk_set_rate(nvjpg->clk, ULONG_MAX);
if (err < 0) {
dev_err(&pdev->dev, "failed to set clock rate\n");
return err;
}
nvjpg->falcon.dev = dev;
nvjpg->falcon.regs = nvjpg->regs;
err = falcon_init(&nvjpg->falcon);
if (err < 0)
return err;
platform_set_drvdata(pdev, nvjpg);
INIT_LIST_HEAD(&nvjpg->client.base.list);
nvjpg->client.base.ops = &nvjpg_client_ops;
nvjpg->client.base.dev = dev;
nvjpg->client.base.class = HOST1X_CLASS_NVJPG;
nvjpg->dev = dev;
INIT_LIST_HEAD(&nvjpg->client.list);
nvjpg->client.version = nvjpg->config->version;
nvjpg->client.ops = &nvjpg_ops;
err = host1x_client_register(&nvjpg->client.base);
if (err < 0) {
dev_err(dev, "failed to register host1x client: %d\n", err);
goto exit_falcon;
}
pm_runtime_use_autosuspend(dev);
pm_runtime_set_autosuspend_delay(dev, 500);
devm_pm_runtime_enable(dev);
return 0;
exit_falcon:
falcon_exit(&nvjpg->falcon);
return err;
}
static void nvjpg_remove(struct platform_device *pdev)
{
struct nvjpg *nvjpg = platform_get_drvdata(pdev);
host1x_client_unregister(&nvjpg->client.base);
falcon_exit(&nvjpg->falcon);
}
static const struct dev_pm_ops nvjpg_pm_ops = {
RUNTIME_PM_OPS(nvjpg_runtime_suspend, nvjpg_runtime_resume, NULL)
SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, pm_runtime_force_resume)
};
struct platform_driver tegra_nvjpg_driver = {
.driver = {
.name = "tegra-nvjpg",
.of_match_table = tegra_nvjpg_of_match,
.pm = &nvjpg_pm_ops
},
.probe = nvjpg_probe,
.remove = nvjpg_remove,
};
#if IS_ENABLED(CONFIG_ARCH_TEGRA_210_SOC)
MODULE_FIRMWARE(NVIDIA_TEGRA_210_NVJPG_FIRMWARE);
#endif

View File

@ -24,6 +24,7 @@ obj-$(CONFIG_DRM_KUNIT_TEST) += \
drm_plane_helper_test.o \
drm_probe_helper_test.o \
drm_rect_test.o \
drm_sysfb_modeset_test.o
drm_sysfb_modeset_test.o \
drm_fixp_test.o
CFLAGS_drm_mm_test.o := $(DISABLE_STRUCTLEAK_PLUGIN)

View File

@ -0,0 +1,71 @@
// SPDX-License-Identifier: MIT
/*
* Copyright 2022 Advanced Micro Devices, Inc.
*/
#include <kunit/test.h>
#include <drm/drm_fixed.h>
static void drm_test_sm2fixp(struct kunit *test)
{
KUNIT_EXPECT_EQ(test, 0x7fffffffffffffffll, ((1ull << 63) - 1));
/* 1 */
KUNIT_EXPECT_EQ(test, drm_int2fixp(1), drm_sm2fixp(1ull << DRM_FIXED_POINT));
/* -1 */
KUNIT_EXPECT_EQ(test, drm_int2fixp(-1),
drm_sm2fixp((1ull << 63) | (1ull << DRM_FIXED_POINT)));
/* 0.5 */
KUNIT_EXPECT_EQ(test, drm_fixp_from_fraction(1, 2),
drm_sm2fixp(1ull << (DRM_FIXED_POINT - 1)));
/* -0.5 */
KUNIT_EXPECT_EQ(test, drm_fixp_from_fraction(-1, 2),
drm_sm2fixp((1ull << 63) | (1ull << (DRM_FIXED_POINT - 1))));
}
static void drm_test_int2fixp(struct kunit *test)
{
/* 1 */
KUNIT_EXPECT_EQ(test, 1ll << 32, drm_int2fixp(1));
/* -1 */
KUNIT_EXPECT_EQ(test, -(1ll << 32), drm_int2fixp(-1));
/* 1 + (-1) = 0 */
KUNIT_EXPECT_EQ(test, 0, drm_int2fixp(1) + drm_int2fixp(-1));
/* 1 / 2 */
KUNIT_EXPECT_EQ(test, 1ll << 31, drm_fixp_from_fraction(1, 2));
/* -0.5 */
KUNIT_EXPECT_EQ(test, -(1ll << 31), drm_fixp_from_fraction(-1, 2));
/* (1 / 2) + (-1) = 0.5 */
KUNIT_EXPECT_EQ(test, 1ll << 31, drm_fixp_from_fraction(-1, 2) + drm_int2fixp(1));
/* (1 / 2) - 1) = 0.5 */
KUNIT_EXPECT_EQ(test, -(1ll << 31), drm_fixp_from_fraction(1, 2) + drm_int2fixp(-1));
/* (1 / 2) - 1) = 0.5 */
KUNIT_EXPECT_EQ(test, -(1ll << 31), drm_fixp_from_fraction(1, 2) - drm_int2fixp(1));
}
static struct kunit_case drm_fixp_tests[] = {
KUNIT_CASE(drm_test_int2fixp),
KUNIT_CASE(drm_test_sm2fixp),
{ }
};
static struct kunit_suite drm_fixp_test_suite = {
.name = "drm_fixp",
.test_cases = drm_fixp_tests,
};
kunit_test_suite(drm_fixp_test_suite);
MODULE_AUTHOR("AMD");
MODULE_LICENSE("Dual MIT/GPL");
MODULE_DESCRIPTION("Unit tests for drm_fixed.h");

View File

@ -652,7 +652,7 @@ static void ttm_bo_validate_move_fence_signaled(struct kunit *test)
int err;
man = ttm_manager_type(priv->ttm_dev, mem_type);
man->move = dma_fence_get_stub();
man->eviction_fences[0] = dma_fence_get_stub();
bo = ttm_bo_kunit_init(test, test->priv, size, NULL);
bo->type = bo_type;
@ -669,7 +669,7 @@ static void ttm_bo_validate_move_fence_signaled(struct kunit *test)
KUNIT_EXPECT_EQ(test, ctx.bytes_moved, size);
ttm_bo_fini(bo);
dma_fence_put(man->move);
dma_fence_put(man->eviction_fences[0]);
}
static const struct ttm_bo_validate_test_case ttm_bo_validate_wait_cases[] = {
@ -733,9 +733,9 @@ static void ttm_bo_validate_move_fence_not_signaled(struct kunit *test)
spin_lock_init(&fence_lock);
man = ttm_manager_type(priv->ttm_dev, fst_mem);
man->move = alloc_mock_fence(test);
man->eviction_fences[0] = alloc_mock_fence(test);
task = kthread_create(threaded_fence_signal, man->move, "move-fence-signal");
task = kthread_create(threaded_fence_signal, man->eviction_fences[0], "move-fence-signal");
if (IS_ERR(task))
KUNIT_FAIL(test, "Couldn't create move fence signal task\n");
@ -743,7 +743,8 @@ static void ttm_bo_validate_move_fence_not_signaled(struct kunit *test)
err = ttm_bo_validate(bo, placement_val, &ctx_val);
dma_resv_unlock(bo->base.resv);
dma_fence_wait_timeout(man->move, false, MAX_SCHEDULE_TIMEOUT);
dma_fence_wait_timeout(man->eviction_fences[0], false, MAX_SCHEDULE_TIMEOUT);
man->eviction_fences[0] = NULL;
KUNIT_EXPECT_EQ(test, err, 0);
KUNIT_EXPECT_EQ(test, ctx_val.bytes_moved, size);

Some files were not shown because too many files have changed in this diff Show More