mirror of https://github.com/torvalds/linux.git
Compare commits
49 Commits
bc69ed9752
...
2061f18ad7
| Author | SHA1 | Date |
|---|---|---|
|
|
2061f18ad7 | |
|
|
deb879faa9 | |
|
|
028bd4a146 | |
|
|
16460bf96c | |
|
|
c7685d1110 | |
|
|
86fafc584c | |
|
|
e73c226204 | |
|
|
55a271a0f7 | |
|
|
3f1c07fc21 | |
|
|
860daa4b0d | |
|
|
65db7a1f9c | |
|
|
55b0f3cd09 | |
|
|
bf0fd73754 | |
|
|
82caa1c881 | |
|
|
3b7476e786 | |
|
|
05df71544c | |
|
|
ed0ebbc89f | |
|
|
f00d02707d | |
|
|
6f1e094fb6 | |
|
|
a78f1b6baf | |
|
|
ef10531681 | |
|
|
730df5065e | |
|
|
3e9b06559a | |
|
|
4cd8a64b15 | |
|
|
e45b5df47b | |
|
|
09b71a58ee | |
|
|
7fcf459ac8 | |
|
|
faf07e611d | |
|
|
020a0d8fea | |
|
|
e68407b6b0 | |
|
|
6187221487 | |
|
|
76b1a8aebe | |
|
|
cffc934c0d | |
|
|
d72312d730 | |
|
|
3d98a7164d | |
|
|
14a8d83cbe | |
|
|
bf213ac637 | |
|
|
1f5556ec8b | |
|
|
bd45d46ffc | |
|
|
5be29ebe9f | |
|
|
73834d03a5 | |
|
|
50a59230fa | |
|
|
6ce0dd9f54 | |
|
|
9891d2f79a | |
|
|
1d779fa996 | |
|
|
13f4d99582 | |
|
|
095d495cb8 | |
|
|
4ea303d9e9 | |
|
|
c9d869fb29 |
|
|
@ -0,0 +1,79 @@
|
|||
# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
|
||||
%YAML 1.2
|
||||
---
|
||||
$id: http://devicetree.org/schemas/ata/eswin,eic7700-ahci.yaml#
|
||||
$schema: http://devicetree.org/meta-schemas/core.yaml#
|
||||
|
||||
title: Eswin EIC7700 SoC SATA Controller
|
||||
|
||||
maintainers:
|
||||
- Yulin Lu <luyulin@eswincomputing.com>
|
||||
- Huan He <hehuan1@eswincomputing.com>
|
||||
|
||||
description:
|
||||
AHCI SATA controller embedded into the EIC7700 SoC is based on the DWC AHCI
|
||||
SATA v5.00a IP core.
|
||||
|
||||
select:
|
||||
properties:
|
||||
compatible:
|
||||
const: eswin,eic7700-ahci
|
||||
required:
|
||||
- compatible
|
||||
|
||||
allOf:
|
||||
- $ref: snps,dwc-ahci-common.yaml#
|
||||
|
||||
properties:
|
||||
compatible:
|
||||
items:
|
||||
- const: eswin,eic7700-ahci
|
||||
- const: snps,dwc-ahci
|
||||
|
||||
clocks:
|
||||
minItems: 2
|
||||
maxItems: 2
|
||||
|
||||
clock-names:
|
||||
items:
|
||||
- const: pclk
|
||||
- const: aclk
|
||||
|
||||
resets:
|
||||
maxItems: 1
|
||||
|
||||
reset-names:
|
||||
const: arst
|
||||
|
||||
ports-implemented:
|
||||
const: 1
|
||||
|
||||
required:
|
||||
- compatible
|
||||
- reg
|
||||
- interrupts
|
||||
- clocks
|
||||
- clock-names
|
||||
- resets
|
||||
- reset-names
|
||||
- phys
|
||||
- phy-names
|
||||
- ports-implemented
|
||||
|
||||
unevaluatedProperties: false
|
||||
|
||||
examples:
|
||||
- |
|
||||
sata@50420000 {
|
||||
compatible = "eswin,eic7700-ahci", "snps,dwc-ahci";
|
||||
reg = <0x50420000 0x10000>;
|
||||
interrupt-parent = <&plic>;
|
||||
interrupts = <58>;
|
||||
clocks = <&clock 171>, <&clock 186>;
|
||||
clock-names = "pclk", "aclk";
|
||||
phys = <&sata_phy>;
|
||||
phy-names = "sata-phy";
|
||||
ports-implemented = <0x1>;
|
||||
resets = <&reset 96>;
|
||||
reset-names = "arst";
|
||||
};
|
||||
|
|
@ -33,6 +33,10 @@ properties:
|
|||
- description: SPEAr1340 AHCI SATA device
|
||||
const: snps,spear-ahci
|
||||
|
||||
iommus:
|
||||
minItems: 1
|
||||
maxItems: 3
|
||||
|
||||
patternProperties:
|
||||
"^sata-port@[0-9a-e]$":
|
||||
$ref: /schemas/ata/snps,dwc-ahci-common.yaml#/$defs/dwc-ahci-port
|
||||
|
|
|
|||
|
|
@ -27221,6 +27221,13 @@ L: virtualization@lists.linux.dev
|
|||
S: Maintained
|
||||
F: drivers/vfio/pci/virtio
|
||||
|
||||
VFIO XE PCI DRIVER
|
||||
M: Michał Winiarski <michal.winiarski@intel.com>
|
||||
L: kvm@vger.kernel.org
|
||||
L: intel-xe@lists.freedesktop.org
|
||||
S: Supported
|
||||
F: drivers/vfio/pci/xe
|
||||
|
||||
VGA_SWITCHEROO
|
||||
R: Lukas Wunner <lukas@wunner.de>
|
||||
S: Maintained
|
||||
|
|
|
|||
|
|
@ -4216,6 +4216,10 @@ static const struct ata_dev_quirks_entry __ata_dev_quirks[] = {
|
|||
/* Apacer models with LPM issues */
|
||||
{ "Apacer AS340*", NULL, ATA_QUIRK_NOLPM },
|
||||
|
||||
/* Silicon Motion models with LPM issues */
|
||||
{ "MD619HXCLDE3TC", "TCVAID", ATA_QUIRK_NOLPM },
|
||||
{ "MD619GXCLDE3TC", "TCV35D", ATA_QUIRK_NOLPM },
|
||||
|
||||
/* These specific Samsung models/firmware-revs do not handle LPM well */
|
||||
{ "SAMSUNG MZMPC128HBFU-000MV", "CXM14M1Q", ATA_QUIRK_NOLPM },
|
||||
{ "SAMSUNG SSD PM830 mSATA *", "CXM13D1Q", ATA_QUIRK_NOLPM },
|
||||
|
|
|
|||
|
|
@ -3191,7 +3191,8 @@ void ata_sff_port_init(struct ata_port *ap)
|
|||
|
||||
int __init ata_sff_init(void)
|
||||
{
|
||||
ata_sff_wq = alloc_workqueue("ata_sff", WQ_MEM_RECLAIM, WQ_MAX_ACTIVE);
|
||||
ata_sff_wq = alloc_workqueue("ata_sff", WQ_MEM_RECLAIM | WQ_PERCPU,
|
||||
WQ_MAX_ACTIVE);
|
||||
if (!ata_sff_wq)
|
||||
return -ENOMEM;
|
||||
|
||||
|
|
|
|||
|
|
@ -75,6 +75,7 @@
|
|||
#include <linux/blkdev.h>
|
||||
#include <linux/delay.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/string.h>
|
||||
#include <scsi/scsi_host.h>
|
||||
#include <linux/libata.h>
|
||||
|
||||
|
|
@ -632,9 +633,9 @@ static void it821x_display_disk(struct ata_port *ap, int n, u8 *buf)
|
|||
cbl = "";
|
||||
|
||||
if (mode)
|
||||
snprintf(mbuf, 8, "%5s%d", mtype, mode - 1);
|
||||
snprintf(mbuf, sizeof(mbuf), "%5s%d", mtype, mode - 1);
|
||||
else
|
||||
strcpy(mbuf, "PIO");
|
||||
strscpy(mbuf, "PIO");
|
||||
if (buf[52] == 4)
|
||||
ata_port_info(ap, "%d: %-6s %-8s %s %s\n",
|
||||
n, mbuf, types[buf[52]], id, cbl);
|
||||
|
|
|
|||
|
|
@ -344,6 +344,7 @@ static const struct pcmcia_device_id pcmcia_devices[] = {
|
|||
PCMCIA_DEVICE_PROD_ID2("NinjaATA-", 0xebe0bd79),
|
||||
PCMCIA_DEVICE_PROD_ID12("PCMCIA", "CD-ROM", 0x281f1c5d, 0x66536591),
|
||||
PCMCIA_DEVICE_PROD_ID12("PCMCIA", "PnPIDE", 0x281f1c5d, 0x0c694728),
|
||||
PCMCIA_DEVICE_PROD_ID2("PCMCIA ATA/ATAPI Adapter", 0x888d7b73),
|
||||
PCMCIA_DEVICE_PROD_ID12("SHUTTLE TECHNOLOGY LTD.", "PCCARD-IDE/ATAPI Adapter", 0x4a3f0ba0, 0x322560e1),
|
||||
PCMCIA_DEVICE_PROD_ID12("SEAGATE", "ST1", 0x87c1b330, 0xe1f30883),
|
||||
PCMCIA_DEVICE_PROD_ID12("SAMSUNG", "04/05/06", 0x43d74cb4, 0x6a22777d),
|
||||
|
|
|
|||
|
|
@ -230,42 +230,6 @@ struct tpm_chip *tpm_default_chip(void)
|
|||
}
|
||||
EXPORT_SYMBOL_GPL(tpm_default_chip);
|
||||
|
||||
/**
|
||||
* tpm_find_get_ops() - find and reserve a TPM chip
|
||||
* @chip: a &struct tpm_chip instance, %NULL for the default chip
|
||||
*
|
||||
* Finds a TPM chip and reserves its class device and operations. The chip must
|
||||
* be released with tpm_put_ops() after use.
|
||||
* This function is for internal use only. It supports existing TPM callers
|
||||
* by accepting NULL, but those callers should be converted to pass in a chip
|
||||
* directly.
|
||||
*
|
||||
* Return:
|
||||
* A reserved &struct tpm_chip instance.
|
||||
* %NULL if a chip is not found.
|
||||
* %NULL if the chip is not available.
|
||||
*/
|
||||
struct tpm_chip *tpm_find_get_ops(struct tpm_chip *chip)
|
||||
{
|
||||
int rc;
|
||||
|
||||
if (chip) {
|
||||
if (!tpm_try_get_ops(chip))
|
||||
return chip;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
chip = tpm_default_chip();
|
||||
if (!chip)
|
||||
return NULL;
|
||||
rc = tpm_try_get_ops(chip);
|
||||
/* release additional reference we got from tpm_default_chip() */
|
||||
put_device(&chip->dev);
|
||||
if (rc)
|
||||
return NULL;
|
||||
return chip;
|
||||
}
|
||||
|
||||
/**
|
||||
* tpm_dev_release() - free chip memory and the device number
|
||||
* @dev: the character device for the TPM chip
|
||||
|
|
@ -282,7 +246,6 @@ static void tpm_dev_release(struct device *dev)
|
|||
|
||||
kfree(chip->work_space.context_buf);
|
||||
kfree(chip->work_space.session_buf);
|
||||
kfree(chip->allocated_banks);
|
||||
#ifdef CONFIG_TCG_TPM2_HMAC
|
||||
kfree(chip->auth);
|
||||
#endif
|
||||
|
|
|
|||
|
|
@ -275,7 +275,8 @@ void tpm_common_release(struct file *file, struct file_priv *priv)
|
|||
|
||||
int __init tpm_dev_common_init(void)
|
||||
{
|
||||
tpm_dev_wq = alloc_workqueue("tpm_dev_wq", WQ_MEM_RECLAIM, 0);
|
||||
tpm_dev_wq = alloc_workqueue("tpm_dev_wq", WQ_MEM_RECLAIM | WQ_PERCPU,
|
||||
0);
|
||||
|
||||
return !tpm_dev_wq ? -ENOMEM : 0;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -313,10 +313,13 @@ int tpm_is_tpm2(struct tpm_chip *chip)
|
|||
{
|
||||
int rc;
|
||||
|
||||
chip = tpm_find_get_ops(chip);
|
||||
if (!chip)
|
||||
return -ENODEV;
|
||||
|
||||
rc = tpm_try_get_ops(chip);
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
rc = (chip->flags & TPM_CHIP_FLAG_TPM2) != 0;
|
||||
|
||||
tpm_put_ops(chip);
|
||||
|
|
@ -338,10 +341,13 @@ int tpm_pcr_read(struct tpm_chip *chip, u32 pcr_idx,
|
|||
{
|
||||
int rc;
|
||||
|
||||
chip = tpm_find_get_ops(chip);
|
||||
if (!chip)
|
||||
return -ENODEV;
|
||||
|
||||
rc = tpm_try_get_ops(chip);
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
if (chip->flags & TPM_CHIP_FLAG_TPM2)
|
||||
rc = tpm2_pcr_read(chip, pcr_idx, digest, NULL);
|
||||
else
|
||||
|
|
@ -369,10 +375,13 @@ int tpm_pcr_extend(struct tpm_chip *chip, u32 pcr_idx,
|
|||
int rc;
|
||||
int i;
|
||||
|
||||
chip = tpm_find_get_ops(chip);
|
||||
if (!chip)
|
||||
return -ENODEV;
|
||||
|
||||
rc = tpm_try_get_ops(chip);
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
for (i = 0; i < chip->nr_allocated_banks; i++) {
|
||||
if (digests[i].alg_id != chip->allocated_banks[i].alg_id) {
|
||||
rc = -EINVAL;
|
||||
|
|
@ -492,10 +501,13 @@ int tpm_get_random(struct tpm_chip *chip, u8 *out, size_t max)
|
|||
if (!out || max > TPM_MAX_RNG_DATA)
|
||||
return -EINVAL;
|
||||
|
||||
chip = tpm_find_get_ops(chip);
|
||||
if (!chip)
|
||||
return -ENODEV;
|
||||
|
||||
rc = tpm_try_get_ops(chip);
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
if (chip->flags & TPM_CHIP_FLAG_TPM2)
|
||||
rc = tpm2_get_random(chip, out, max);
|
||||
else
|
||||
|
|
|
|||
|
|
@ -267,7 +267,6 @@ static inline void tpm_msleep(unsigned int delay_msec)
|
|||
int tpm_chip_bootstrap(struct tpm_chip *chip);
|
||||
int tpm_chip_start(struct tpm_chip *chip);
|
||||
void tpm_chip_stop(struct tpm_chip *chip);
|
||||
struct tpm_chip *tpm_find_get_ops(struct tpm_chip *chip);
|
||||
|
||||
struct tpm_chip *tpm_chip_alloc(struct device *dev,
|
||||
const struct tpm_class_ops *ops);
|
||||
|
|
|
|||
|
|
@ -799,11 +799,6 @@ int tpm1_pm_suspend(struct tpm_chip *chip, u32 tpm_suspend_pcr)
|
|||
*/
|
||||
int tpm1_get_pcr_allocation(struct tpm_chip *chip)
|
||||
{
|
||||
chip->allocated_banks = kcalloc(1, sizeof(*chip->allocated_banks),
|
||||
GFP_KERNEL);
|
||||
if (!chip->allocated_banks)
|
||||
return -ENOMEM;
|
||||
|
||||
chip->allocated_banks[0].alg_id = TPM_ALG_SHA1;
|
||||
chip->allocated_banks[0].digest_size = hash_digest_size[HASH_ALGO_SHA1];
|
||||
chip->allocated_banks[0].crypto_id = HASH_ALGO_SHA1;
|
||||
|
|
|
|||
|
|
@ -550,11 +550,9 @@ ssize_t tpm2_get_pcr_allocation(struct tpm_chip *chip)
|
|||
|
||||
nr_possible_banks = be32_to_cpup(
|
||||
(__be32 *)&buf.data[TPM_HEADER_SIZE + 5]);
|
||||
|
||||
chip->allocated_banks = kcalloc(nr_possible_banks,
|
||||
sizeof(*chip->allocated_banks),
|
||||
GFP_KERNEL);
|
||||
if (!chip->allocated_banks) {
|
||||
if (nr_possible_banks > TPM2_MAX_PCR_BANKS) {
|
||||
pr_err("tpm: out of bank capacity: %u > %u\n",
|
||||
nr_possible_banks, TPM2_MAX_PCR_BANKS);
|
||||
rc = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -179,6 +179,7 @@ static int crb_try_pluton_doorbell(struct crb_priv *priv, bool wait_for_complete
|
|||
*
|
||||
* @dev: crb device
|
||||
* @priv: crb private data
|
||||
* @loc: locality
|
||||
*
|
||||
* Write CRB_CTRL_REQ_GO_IDLE to TPM_CRB_CTRL_REQ
|
||||
* The device should respond within TIMEOUT_C by clearing the bit.
|
||||
|
|
@ -233,6 +234,7 @@ static int crb_go_idle(struct tpm_chip *chip)
|
|||
*
|
||||
* @dev: crb device
|
||||
* @priv: crb private data
|
||||
* @loc: locality
|
||||
*
|
||||
* Write CRB_CTRL_REQ_CMD_READY to TPM_CRB_CTRL_REQ
|
||||
* and poll till the device acknowledge it by clearing the bit.
|
||||
|
|
@ -412,7 +414,7 @@ static int crb_do_acpi_start(struct tpm_chip *chip)
|
|||
#ifdef CONFIG_ARM64
|
||||
/*
|
||||
* This is a TPM Command Response Buffer start method that invokes a
|
||||
* Secure Monitor Call to requrest the firmware to execute or cancel
|
||||
* Secure Monitor Call to request the firmware to execute or cancel
|
||||
* a TPM 2.0 command.
|
||||
*/
|
||||
static int tpm_crb_smc_start(struct device *dev, unsigned long func_id)
|
||||
|
|
|
|||
|
|
@ -265,8 +265,7 @@ static u8 tpm_tis_status(struct tpm_chip *chip)
|
|||
|
||||
/*
|
||||
* Dump stack for forensics, as invalid TPM_STS.x could be
|
||||
* potentially triggered by impaired tpm_try_get_ops() or
|
||||
* tpm_find_get_ops().
|
||||
* potentially triggered by impaired tpm_try_get_ops().
|
||||
*/
|
||||
dump_stack();
|
||||
}
|
||||
|
|
|
|||
|
|
@ -239,6 +239,8 @@ i915-y += \
|
|||
display/intel_cdclk.o \
|
||||
display/intel_cmtg.o \
|
||||
display/intel_color.o \
|
||||
display/intel_colorop.o \
|
||||
display/intel_color_pipeline.o \
|
||||
display/intel_combo_phy.o \
|
||||
display/intel_connector.o \
|
||||
display/intel_crtc.o \
|
||||
|
|
|
|||
|
|
@ -32,6 +32,8 @@
|
|||
#include "intel_display_utils.h"
|
||||
#include "intel_dsb.h"
|
||||
#include "intel_vrr.h"
|
||||
#include "skl_universal_plane.h"
|
||||
#include "skl_universal_plane_regs.h"
|
||||
|
||||
struct intel_color_funcs {
|
||||
int (*color_check)(struct intel_atomic_state *state,
|
||||
|
|
@ -87,6 +89,14 @@ struct intel_color_funcs {
|
|||
* Read config other than LUTs and CSCs, before them. Optional.
|
||||
*/
|
||||
void (*get_config)(struct intel_crtc_state *crtc_state);
|
||||
|
||||
/* Plane CSC*/
|
||||
void (*load_plane_csc_matrix)(struct intel_dsb *dsb,
|
||||
const struct intel_plane_state *plane_state);
|
||||
|
||||
/* Plane Pre/Post CSC */
|
||||
void (*load_plane_luts)(struct intel_dsb *dsb,
|
||||
const struct intel_plane_state *plane_state);
|
||||
};
|
||||
|
||||
#define CTM_COEFF_SIGN (1ULL << 63)
|
||||
|
|
@ -609,6 +619,8 @@ static u16 ctm_to_twos_complement(u64 coeff, int int_bits, int frac_bits)
|
|||
if (CTM_COEFF_NEGATIVE(coeff))
|
||||
c = -c;
|
||||
|
||||
int_bits = max(int_bits, 1);
|
||||
|
||||
c = clamp(c, -(s64)BIT(int_bits + frac_bits - 1),
|
||||
(s64)(BIT(int_bits + frac_bits - 1) - 1));
|
||||
|
||||
|
|
@ -3836,6 +3848,266 @@ static void icl_read_luts(struct intel_crtc_state *crtc_state)
|
|||
}
|
||||
}
|
||||
|
||||
static void
|
||||
xelpd_load_plane_csc_matrix(struct intel_dsb *dsb,
|
||||
const struct intel_plane_state *plane_state)
|
||||
{
|
||||
struct intel_display *display = to_intel_display(plane_state);
|
||||
const struct drm_plane_state *state = &plane_state->uapi;
|
||||
enum pipe pipe = to_intel_plane(state->plane)->pipe;
|
||||
enum plane_id plane = to_intel_plane(state->plane)->id;
|
||||
const struct drm_property_blob *blob = plane_state->hw.ctm;
|
||||
struct drm_color_ctm_3x4 *ctm;
|
||||
const u64 *input;
|
||||
u16 coeffs[9] = {};
|
||||
int i, j;
|
||||
|
||||
if (!icl_is_hdr_plane(display, plane) || !blob)
|
||||
return;
|
||||
|
||||
ctm = blob->data;
|
||||
input = ctm->matrix;
|
||||
|
||||
/*
|
||||
* Convert fixed point S31.32 input to format supported by the
|
||||
* hardware.
|
||||
*/
|
||||
for (i = 0, j = 0; i < ARRAY_SIZE(coeffs); i++) {
|
||||
u64 abs_coeff = ((1ULL << 63) - 1) & input[j];
|
||||
|
||||
/*
|
||||
* Clamp input value to min/max supported by
|
||||
* hardware.
|
||||
*/
|
||||
abs_coeff = clamp_val(abs_coeff, 0, CTM_COEFF_4_0 - 1);
|
||||
|
||||
/* sign bit */
|
||||
if (CTM_COEFF_NEGATIVE(input[j]))
|
||||
coeffs[i] |= 1 << 15;
|
||||
|
||||
if (abs_coeff < CTM_COEFF_0_125)
|
||||
coeffs[i] |= (3 << 12) |
|
||||
ILK_CSC_COEFF_FP(abs_coeff, 12);
|
||||
else if (abs_coeff < CTM_COEFF_0_25)
|
||||
coeffs[i] |= (2 << 12) |
|
||||
ILK_CSC_COEFF_FP(abs_coeff, 11);
|
||||
else if (abs_coeff < CTM_COEFF_0_5)
|
||||
coeffs[i] |= (1 << 12) |
|
||||
ILK_CSC_COEFF_FP(abs_coeff, 10);
|
||||
else if (abs_coeff < CTM_COEFF_1_0)
|
||||
coeffs[i] |= ILK_CSC_COEFF_FP(abs_coeff, 9);
|
||||
else if (abs_coeff < CTM_COEFF_2_0)
|
||||
coeffs[i] |= (7 << 12) |
|
||||
ILK_CSC_COEFF_FP(abs_coeff, 8);
|
||||
else
|
||||
coeffs[i] |= (6 << 12) |
|
||||
ILK_CSC_COEFF_FP(abs_coeff, 7);
|
||||
|
||||
/* Skip postoffs */
|
||||
if (!((j + 2) % 4))
|
||||
j += 2;
|
||||
else
|
||||
j++;
|
||||
}
|
||||
|
||||
intel_de_write_dsb(display, dsb, PLANE_CSC_COEFF(pipe, plane, 0),
|
||||
coeffs[0] << 16 | coeffs[1]);
|
||||
intel_de_write_dsb(display, dsb, PLANE_CSC_COEFF(pipe, plane, 1),
|
||||
coeffs[2] << 16);
|
||||
|
||||
intel_de_write_dsb(display, dsb, PLANE_CSC_COEFF(pipe, plane, 2),
|
||||
coeffs[3] << 16 | coeffs[4]);
|
||||
intel_de_write_dsb(display, dsb, PLANE_CSC_COEFF(pipe, plane, 3),
|
||||
coeffs[5] << 16);
|
||||
|
||||
intel_de_write_dsb(display, dsb, PLANE_CSC_COEFF(pipe, plane, 4),
|
||||
coeffs[6] << 16 | coeffs[7]);
|
||||
intel_de_write_dsb(display, dsb, PLANE_CSC_COEFF(pipe, plane, 5),
|
||||
coeffs[8] << 16);
|
||||
|
||||
intel_de_write_dsb(display, dsb, PLANE_CSC_PREOFF(pipe, plane, 0), 0);
|
||||
intel_de_write_dsb(display, dsb, PLANE_CSC_PREOFF(pipe, plane, 1), 0);
|
||||
intel_de_write_dsb(display, dsb, PLANE_CSC_PREOFF(pipe, plane, 2), 0);
|
||||
|
||||
/*
|
||||
* Conversion from S31.32 to S0.12. BIT[12] is the signed bit
|
||||
*/
|
||||
intel_de_write_dsb(display, dsb,
|
||||
PLANE_CSC_POSTOFF(pipe, plane, 0),
|
||||
ctm_to_twos_complement(input[3], 0, 12));
|
||||
intel_de_write_dsb(display, dsb,
|
||||
PLANE_CSC_POSTOFF(pipe, plane, 1),
|
||||
ctm_to_twos_complement(input[7], 0, 12));
|
||||
intel_de_write_dsb(display, dsb,
|
||||
PLANE_CSC_POSTOFF(pipe, plane, 2),
|
||||
ctm_to_twos_complement(input[11], 0, 12));
|
||||
}
|
||||
|
||||
static void
|
||||
xelpd_program_plane_pre_csc_lut(struct intel_dsb *dsb,
|
||||
const struct intel_plane_state *plane_state)
|
||||
{
|
||||
struct intel_display *display = to_intel_display(plane_state);
|
||||
const struct drm_plane_state *state = &plane_state->uapi;
|
||||
enum pipe pipe = to_intel_plane(state->plane)->pipe;
|
||||
enum plane_id plane = to_intel_plane(state->plane)->id;
|
||||
const struct drm_color_lut32 *pre_csc_lut = plane_state->hw.degamma_lut->data;
|
||||
u32 i, lut_size;
|
||||
|
||||
if (icl_is_hdr_plane(display, plane)) {
|
||||
lut_size = 128;
|
||||
|
||||
intel_de_write_dsb(display, dsb,
|
||||
PLANE_PRE_CSC_GAMC_INDEX_ENH(pipe, plane, 0),
|
||||
PLANE_PAL_PREC_AUTO_INCREMENT);
|
||||
|
||||
if (pre_csc_lut) {
|
||||
for (i = 0; i < lut_size; i++) {
|
||||
u32 lut_val = drm_color_lut32_extract(pre_csc_lut[i].green, 24);
|
||||
|
||||
intel_de_write_dsb(display, dsb,
|
||||
PLANE_PRE_CSC_GAMC_DATA_ENH(pipe, plane, 0),
|
||||
lut_val);
|
||||
}
|
||||
|
||||
/* Program the max register to clamp values > 1.0. */
|
||||
/* TODO: Restrict to 0x7ffffff */
|
||||
do {
|
||||
intel_de_write_dsb(display, dsb,
|
||||
PLANE_PRE_CSC_GAMC_DATA_ENH(pipe, plane, 0),
|
||||
(1 << 24));
|
||||
} while (i++ > 130);
|
||||
} else {
|
||||
for (i = 0; i < lut_size; i++) {
|
||||
u32 v = (i * ((1 << 24) - 1)) / (lut_size - 1);
|
||||
|
||||
intel_de_write_dsb(display, dsb,
|
||||
PLANE_PRE_CSC_GAMC_DATA_ENH(pipe, plane, 0), v);
|
||||
}
|
||||
|
||||
do {
|
||||
intel_de_write_dsb(display, dsb,
|
||||
PLANE_PRE_CSC_GAMC_DATA_ENH(pipe, plane, 0),
|
||||
1 << 24);
|
||||
} while (i++ < 130);
|
||||
}
|
||||
|
||||
intel_de_write_dsb(display, dsb, PLANE_PRE_CSC_GAMC_INDEX_ENH(pipe, plane, 0), 0);
|
||||
}
|
||||
}
|
||||
|
||||
static void
|
||||
xelpd_program_plane_post_csc_lut(struct intel_dsb *dsb,
|
||||
const struct intel_plane_state *plane_state)
|
||||
{
|
||||
struct intel_display *display = to_intel_display(plane_state);
|
||||
const struct drm_plane_state *state = &plane_state->uapi;
|
||||
enum pipe pipe = to_intel_plane(state->plane)->pipe;
|
||||
enum plane_id plane = to_intel_plane(state->plane)->id;
|
||||
const struct drm_color_lut32 *post_csc_lut = plane_state->hw.gamma_lut->data;
|
||||
u32 i, lut_size, lut_val;
|
||||
|
||||
if (icl_is_hdr_plane(display, plane)) {
|
||||
intel_de_write_dsb(display, dsb, PLANE_POST_CSC_GAMC_INDEX_ENH(pipe, plane, 0),
|
||||
PLANE_PAL_PREC_AUTO_INCREMENT);
|
||||
/* TODO: Add macro */
|
||||
intel_de_write_dsb(display, dsb, PLANE_POST_CSC_GAMC_SEG0_INDEX_ENH(pipe, plane, 0),
|
||||
PLANE_PAL_PREC_AUTO_INCREMENT);
|
||||
if (post_csc_lut) {
|
||||
lut_size = 32;
|
||||
for (i = 0; i < lut_size; i++) {
|
||||
lut_val = drm_color_lut32_extract(post_csc_lut[i].green, 24);
|
||||
|
||||
intel_de_write_dsb(display, dsb,
|
||||
PLANE_POST_CSC_GAMC_DATA_ENH(pipe, plane, 0),
|
||||
lut_val);
|
||||
}
|
||||
|
||||
/* Segment 2 */
|
||||
do {
|
||||
intel_de_write_dsb(display, dsb,
|
||||
PLANE_POST_CSC_GAMC_DATA_ENH(pipe, plane, 0),
|
||||
(1 << 24));
|
||||
} while (i++ < 34);
|
||||
} else {
|
||||
/*TODO: Add for segment 0 */
|
||||
lut_size = 32;
|
||||
for (i = 0; i < lut_size; i++) {
|
||||
u32 v = (i * ((1 << 24) - 1)) / (lut_size - 1);
|
||||
|
||||
intel_de_write_dsb(display, dsb,
|
||||
PLANE_POST_CSC_GAMC_DATA_ENH(pipe, plane, 0), v);
|
||||
}
|
||||
|
||||
do {
|
||||
intel_de_write_dsb(display, dsb,
|
||||
PLANE_POST_CSC_GAMC_DATA_ENH(pipe, plane, 0),
|
||||
1 << 24);
|
||||
} while (i++ < 34);
|
||||
}
|
||||
|
||||
intel_de_write_dsb(display, dsb, PLANE_POST_CSC_GAMC_INDEX_ENH(pipe, plane, 0), 0);
|
||||
intel_de_write_dsb(display, dsb,
|
||||
PLANE_POST_CSC_GAMC_SEG0_INDEX_ENH(pipe, plane, 0), 0);
|
||||
}
|
||||
}
|
||||
|
||||
static void
|
||||
xelpd_plane_load_luts(struct intel_dsb *dsb, const struct intel_plane_state *plane_state)
|
||||
{
|
||||
if (plane_state->hw.degamma_lut)
|
||||
xelpd_program_plane_pre_csc_lut(dsb, plane_state);
|
||||
|
||||
if (plane_state->hw.gamma_lut)
|
||||
xelpd_program_plane_post_csc_lut(dsb, plane_state);
|
||||
}
|
||||
|
||||
static u32 glk_3dlut_10(const struct drm_color_lut32 *color)
|
||||
{
|
||||
return REG_FIELD_PREP(LUT_3D_DATA_RED_MASK, drm_color_lut32_extract(color->red, 10)) |
|
||||
REG_FIELD_PREP(LUT_3D_DATA_GREEN_MASK, drm_color_lut32_extract(color->green, 10)) |
|
||||
REG_FIELD_PREP(LUT_3D_DATA_BLUE_MASK, drm_color_lut32_extract(color->blue, 10));
|
||||
}
|
||||
|
||||
static void glk_load_lut_3d(struct intel_dsb *dsb,
|
||||
struct intel_crtc *crtc,
|
||||
const struct drm_property_blob *blob)
|
||||
{
|
||||
struct intel_display *display = to_intel_display(crtc->base.dev);
|
||||
const struct drm_color_lut32 *lut = blob->data;
|
||||
int i, lut_size = drm_color_lut32_size(blob);
|
||||
enum pipe pipe = crtc->pipe;
|
||||
|
||||
if (!dsb && intel_de_read(display, LUT_3D_CTL(pipe)) & LUT_3D_READY) {
|
||||
drm_err(display->drm, "[CRTC:%d:%s] 3D LUT not ready, not loading LUTs\n",
|
||||
crtc->base.base.id, crtc->base.name);
|
||||
return;
|
||||
}
|
||||
|
||||
intel_de_write_dsb(display, dsb, LUT_3D_INDEX(pipe), LUT_3D_AUTO_INCREMENT);
|
||||
for (i = 0; i < lut_size; i++)
|
||||
intel_de_write_dsb(display, dsb, LUT_3D_DATA(pipe), glk_3dlut_10(&lut[i]));
|
||||
intel_de_write_dsb(display, dsb, LUT_3D_INDEX(pipe), 0);
|
||||
}
|
||||
|
||||
static void glk_lut_3d_commit(struct intel_dsb *dsb, struct intel_crtc *crtc, bool enable)
|
||||
{
|
||||
struct intel_display *display = to_intel_display(crtc);
|
||||
enum pipe pipe = crtc->pipe;
|
||||
u32 val = 0;
|
||||
|
||||
if (!dsb && intel_de_read(display, LUT_3D_CTL(pipe)) & LUT_3D_READY) {
|
||||
drm_err(display->drm, "[CRTC:%d:%s] 3D LUT not ready, not committing change\n",
|
||||
crtc->base.base.id, crtc->base.name);
|
||||
return;
|
||||
}
|
||||
|
||||
if (enable)
|
||||
val = LUT_3D_ENABLE | LUT_3D_READY | LUT_3D_BIND_PLANE_1;
|
||||
|
||||
intel_de_write_dsb(display, dsb, LUT_3D_CTL(pipe), val);
|
||||
}
|
||||
|
||||
static const struct intel_color_funcs chv_color_funcs = {
|
||||
.color_check = chv_color_check,
|
||||
.color_commit_arm = i9xx_color_commit_arm,
|
||||
|
|
@ -3883,6 +4155,8 @@ static const struct intel_color_funcs tgl_color_funcs = {
|
|||
.lut_equal = icl_lut_equal,
|
||||
.read_csc = icl_read_csc,
|
||||
.get_config = skl_get_config,
|
||||
.load_plane_csc_matrix = xelpd_load_plane_csc_matrix,
|
||||
.load_plane_luts = xelpd_plane_load_luts,
|
||||
};
|
||||
|
||||
static const struct intel_color_funcs icl_color_funcs = {
|
||||
|
|
@ -3963,6 +4237,67 @@ static const struct intel_color_funcs ilk_color_funcs = {
|
|||
.get_config = ilk_get_config,
|
||||
};
|
||||
|
||||
void intel_color_plane_commit_arm(struct intel_dsb *dsb,
|
||||
const struct intel_plane_state *plane_state)
|
||||
{
|
||||
struct intel_display *display = to_intel_display(plane_state);
|
||||
struct intel_crtc *crtc = to_intel_crtc(plane_state->uapi.crtc);
|
||||
|
||||
if (crtc && intel_color_crtc_has_3dlut(display, crtc->pipe))
|
||||
glk_lut_3d_commit(dsb, crtc, !!plane_state->hw.lut_3d);
|
||||
}
|
||||
|
||||
static void
|
||||
intel_color_load_plane_csc_matrix(struct intel_dsb *dsb,
|
||||
const struct intel_plane_state *plane_state)
|
||||
{
|
||||
struct intel_display *display = to_intel_display(plane_state);
|
||||
|
||||
if (display->funcs.color->load_plane_csc_matrix)
|
||||
display->funcs.color->load_plane_csc_matrix(dsb, plane_state);
|
||||
}
|
||||
|
||||
static void
|
||||
intel_color_load_plane_luts(struct intel_dsb *dsb,
|
||||
const struct intel_plane_state *plane_state)
|
||||
{
|
||||
struct intel_display *display = to_intel_display(plane_state);
|
||||
|
||||
if (display->funcs.color->load_plane_luts)
|
||||
display->funcs.color->load_plane_luts(dsb, plane_state);
|
||||
}
|
||||
|
||||
bool
|
||||
intel_color_crtc_has_3dlut(struct intel_display *display, enum pipe pipe)
|
||||
{
|
||||
if (DISPLAY_VER(display) >= 12)
|
||||
return pipe == PIPE_A || pipe == PIPE_B;
|
||||
else
|
||||
return false;
|
||||
}
|
||||
|
||||
static void
|
||||
intel_color_load_3dlut(struct intel_dsb *dsb,
|
||||
const struct intel_plane_state *plane_state)
|
||||
{
|
||||
struct intel_display *display = to_intel_display(plane_state);
|
||||
struct intel_crtc *crtc = to_intel_crtc(plane_state->uapi.crtc);
|
||||
|
||||
if (crtc && intel_color_crtc_has_3dlut(display, crtc->pipe))
|
||||
glk_load_lut_3d(dsb, crtc, plane_state->hw.lut_3d);
|
||||
}
|
||||
|
||||
void intel_color_plane_program_pipeline(struct intel_dsb *dsb,
|
||||
const struct intel_plane_state *plane_state)
|
||||
{
|
||||
if (plane_state->hw.ctm)
|
||||
intel_color_load_plane_csc_matrix(dsb, plane_state);
|
||||
if (plane_state->hw.degamma_lut || plane_state->hw.gamma_lut)
|
||||
intel_color_load_plane_luts(dsb, plane_state);
|
||||
if (plane_state->hw.lut_3d)
|
||||
intel_color_load_3dlut(dsb, plane_state);
|
||||
}
|
||||
|
||||
void intel_color_crtc_init(struct intel_crtc *crtc)
|
||||
{
|
||||
struct intel_display *display = to_intel_display(crtc);
|
||||
|
|
|
|||
|
|
@ -13,7 +13,9 @@ struct intel_crtc_state;
|
|||
struct intel_crtc;
|
||||
struct intel_display;
|
||||
struct intel_dsb;
|
||||
struct intel_plane_state;
|
||||
struct drm_property_blob;
|
||||
enum pipe;
|
||||
|
||||
void intel_color_init_hooks(struct intel_display *display);
|
||||
int intel_color_init(struct intel_display *display);
|
||||
|
|
@ -40,5 +42,9 @@ bool intel_color_lut_equal(const struct intel_crtc_state *crtc_state,
|
|||
const struct drm_property_blob *blob2,
|
||||
bool is_pre_csc_lut);
|
||||
void intel_color_assert_luts(const struct intel_crtc_state *crtc_state);
|
||||
|
||||
void intel_color_plane_program_pipeline(struct intel_dsb *dsb,
|
||||
const struct intel_plane_state *plane_state);
|
||||
void intel_color_plane_commit_arm(struct intel_dsb *dsb,
|
||||
const struct intel_plane_state *plane_state);
|
||||
bool intel_color_crtc_has_3dlut(struct intel_display *display, enum pipe pipe);
|
||||
#endif /* __INTEL_COLOR_H__ */
|
||||
|
|
|
|||
|
|
@ -0,0 +1,99 @@
|
|||
// SPDX-License-Identifier: MIT
|
||||
/*
|
||||
* Copyright © 2025 Intel Corporation
|
||||
*/
|
||||
#include "intel_color.h"
|
||||
#include "intel_colorop.h"
|
||||
#include "intel_color_pipeline.h"
|
||||
#include "intel_de.h"
|
||||
#include "intel_display_types.h"
|
||||
#include "skl_universal_plane.h"
|
||||
|
||||
#define MAX_COLOR_PIPELINES 1
|
||||
#define PLANE_DEGAMMA_SIZE 128
|
||||
#define PLANE_GAMMA_SIZE 32
|
||||
|
||||
static
|
||||
int _intel_color_pipeline_plane_init(struct drm_plane *plane, struct drm_prop_enum_list *list,
|
||||
enum pipe pipe)
|
||||
{
|
||||
struct drm_device *dev = plane->dev;
|
||||
struct intel_display *display = to_intel_display(dev);
|
||||
struct drm_colorop *prev_op;
|
||||
struct intel_colorop *colorop;
|
||||
int ret;
|
||||
|
||||
colorop = intel_colorop_create(INTEL_PLANE_CB_PRE_CSC_LUT);
|
||||
|
||||
ret = drm_plane_colorop_curve_1d_lut_init(dev, &colorop->base, plane,
|
||||
PLANE_DEGAMMA_SIZE,
|
||||
DRM_COLOROP_LUT1D_INTERPOLATION_LINEAR,
|
||||
DRM_COLOROP_FLAG_ALLOW_BYPASS);
|
||||
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
list->type = colorop->base.base.id;
|
||||
list->name = kasprintf(GFP_KERNEL, "Color Pipeline %d", colorop->base.base.id);
|
||||
|
||||
/* TODO: handle failures and clean up */
|
||||
prev_op = &colorop->base;
|
||||
|
||||
if (DISPLAY_VER(display) >= 35 &&
|
||||
intel_color_crtc_has_3dlut(display, pipe) &&
|
||||
plane->type == DRM_PLANE_TYPE_PRIMARY) {
|
||||
colorop = intel_colorop_create(INTEL_PLANE_CB_3DLUT);
|
||||
|
||||
ret = drm_plane_colorop_3dlut_init(dev, &colorop->base, plane, 17,
|
||||
DRM_COLOROP_LUT3D_INTERPOLATION_TETRAHEDRAL,
|
||||
true);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
drm_colorop_set_next_property(prev_op, &colorop->base);
|
||||
|
||||
prev_op = &colorop->base;
|
||||
}
|
||||
|
||||
colorop = intel_colorop_create(INTEL_PLANE_CB_CSC);
|
||||
ret = drm_plane_colorop_ctm_3x4_init(dev, &colorop->base, plane,
|
||||
DRM_COLOROP_FLAG_ALLOW_BYPASS);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
drm_colorop_set_next_property(prev_op, &colorop->base);
|
||||
prev_op = &colorop->base;
|
||||
|
||||
colorop = intel_colorop_create(INTEL_PLANE_CB_POST_CSC_LUT);
|
||||
ret = drm_plane_colorop_curve_1d_lut_init(dev, &colorop->base, plane,
|
||||
PLANE_GAMMA_SIZE,
|
||||
DRM_COLOROP_LUT1D_INTERPOLATION_LINEAR,
|
||||
DRM_COLOROP_FLAG_ALLOW_BYPASS);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
drm_colorop_set_next_property(prev_op, &colorop->base);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int intel_color_pipeline_plane_init(struct drm_plane *plane, enum pipe pipe)
|
||||
{
|
||||
struct drm_device *dev = plane->dev;
|
||||
struct intel_display *display = to_intel_display(dev);
|
||||
struct drm_prop_enum_list pipelines[MAX_COLOR_PIPELINES];
|
||||
int len = 0;
|
||||
int ret;
|
||||
|
||||
/* Currently expose pipeline only for HDR planes */
|
||||
if (!icl_is_hdr_plane(display, to_intel_plane(plane)->id))
|
||||
return 0;
|
||||
|
||||
/* Add pipeline consisting of transfer functions */
|
||||
ret = _intel_color_pipeline_plane_init(plane, &pipelines[len], pipe);
|
||||
if (ret)
|
||||
return ret;
|
||||
len++;
|
||||
|
||||
return drm_plane_create_color_pipeline_property(plane, pipelines, len);
|
||||
}
|
||||
|
|
@ -0,0 +1,14 @@
|
|||
/* SPDX-License-Identifier: MIT */
|
||||
/*
|
||||
* Copyright © 2025 Intel Corporation
|
||||
*/
|
||||
|
||||
#ifndef __INTEL_COLOR_PIPELINE_H__
|
||||
#define __INTEL_COLOR_PIPELINE_H__
|
||||
|
||||
struct drm_plane;
|
||||
enum pipe;
|
||||
|
||||
int intel_color_pipeline_plane_init(struct drm_plane *plane, enum pipe pipe);
|
||||
|
||||
#endif /* __INTEL_COLOR_PIPELINE_H__ */
|
||||
|
|
@ -316,4 +316,33 @@
|
|||
#define SKL_BOTTOM_COLOR_CSC_ENABLE REG_BIT(30)
|
||||
#define SKL_BOTTOM_COLOR(pipe) _MMIO_PIPE(pipe, _SKL_BOTTOM_COLOR_A, _SKL_BOTTOM_COLOR_B)
|
||||
|
||||
/* 3D LUT */
|
||||
#define _LUT_3D_CTL_A 0x490A4
|
||||
#define _LUT_3D_CTL_B 0x491A4
|
||||
#define LUT_3D_CTL(pipe) _MMIO_PIPE(pipe, _LUT_3D_CTL_A, _LUT_3D_CTL_B)
|
||||
#define LUT_3D_ENABLE REG_BIT(31)
|
||||
#define LUT_3D_READY REG_BIT(30)
|
||||
#define LUT_3D_BINDING_MASK REG_GENMASK(23, 22)
|
||||
#define LUT_3D_BIND_PIPE REG_FIELD_PREP(LUT_3D_BINDING_MASK, 0)
|
||||
#define LUT_3D_BIND_PLANE_1 REG_FIELD_PREP(LUT_3D_BINDING_MASK, 1)
|
||||
#define LUT_3D_BIND_PLANE_2 REG_FIELD_PREP(LUT_3D_BINDING_MASK, 2)
|
||||
#define LUT_3D_BIND_PLANE_3 REG_FIELD_PREP(LUT_3D_BINDING_MASK, 3)
|
||||
|
||||
#define _LUT_3D_INDEX_A 0x490A8
|
||||
#define _LUT_3D_INDEX_B 0x491A8
|
||||
#define LUT_3D_INDEX(pipe) _MMIO_PIPE(pipe, _LUT_3D_INDEX_A, _LUT_3D_INDEX_B)
|
||||
#define LUT_3D_AUTO_INCREMENT REG_BIT(13)
|
||||
#define LUT_3D_INDEX_VALUE_MASK REG_GENMASK(12, 0)
|
||||
#define LUT_3D_INDEX_VALUE(x) REG_FIELD_PREP(LUT_3D_INDEX_VALUE_MASK, (x))
|
||||
|
||||
#define _LUT_3D_DATA_A 0x490AC
|
||||
#define _LUT_3D_DATA_B 0x491AC
|
||||
#define LUT_3D_DATA(pipe) _MMIO_PIPE(pipe, _LUT_3D_DATA_A, _LUT_3D_DATA_B)
|
||||
#define LUT_3D_DATA_RED_MASK REG_GENMASK(29, 20)
|
||||
#define LUT_3D_DATA_GREEN_MASK REG_GENMASK(19, 10)
|
||||
#define LUT_3D_DATA_BLUE_MASK REG_GENMASK(9, 0)
|
||||
#define LUT_3D_DATA_RED(x) REG_FIELD_PREP(LUT_3D_DATA_RED_MASK, (x))
|
||||
#define LUT_3D_DATA_GREEN(x) REG_FIELD_PREP(LUT_3D_DATA_GREEN_MASK, (x))
|
||||
#define LUT_3D_DATA_BLUE(x) REG_FIELD_PREP(LUT_3D_DATA_BLUE_MASK, (x))
|
||||
|
||||
#endif /* __INTEL_COLOR_REGS_H__ */
|
||||
|
|
|
|||
|
|
@ -0,0 +1,35 @@
|
|||
// SPDX-License-Identifier: MIT
|
||||
/*
|
||||
* Copyright © 2025 Intel Corporation
|
||||
*/
|
||||
#include "intel_colorop.h"
|
||||
|
||||
struct intel_colorop *to_intel_colorop(struct drm_colorop *colorop)
|
||||
{
|
||||
return container_of(colorop, struct intel_colorop, base);
|
||||
}
|
||||
|
||||
struct intel_colorop *intel_colorop_alloc(void)
|
||||
{
|
||||
struct intel_colorop *colorop;
|
||||
|
||||
colorop = kzalloc(sizeof(*colorop), GFP_KERNEL);
|
||||
if (!colorop)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
return colorop;
|
||||
}
|
||||
|
||||
struct intel_colorop *intel_colorop_create(enum intel_color_block id)
|
||||
{
|
||||
struct intel_colorop *colorop;
|
||||
|
||||
colorop = intel_colorop_alloc();
|
||||
|
||||
if (IS_ERR(colorop))
|
||||
return colorop;
|
||||
|
||||
colorop->id = id;
|
||||
|
||||
return colorop;
|
||||
}
|
||||
|
|
@ -0,0 +1,15 @@
|
|||
/* SPDX-License-Identifier: MIT */
|
||||
/*
|
||||
* Copyright © 2025 Intel Corporation
|
||||
*/
|
||||
|
||||
#ifndef __INTEL_COLOROP_H__
|
||||
#define __INTEL_COLOROP_H__
|
||||
|
||||
#include "intel_display_types.h"
|
||||
|
||||
struct intel_colorop *to_intel_colorop(struct drm_colorop *colorop);
|
||||
struct intel_colorop *intel_colorop_alloc(void);
|
||||
struct intel_colorop *intel_colorop_create(enum intel_color_block id);
|
||||
|
||||
#endif /* __INTEL_COLOROP_H__ */
|
||||
|
|
@ -7304,6 +7304,7 @@ static void intel_atomic_dsb_finish(struct intel_atomic_state *state,
|
|||
struct intel_display *display = to_intel_display(state);
|
||||
struct intel_crtc_state *new_crtc_state =
|
||||
intel_atomic_get_new_crtc_state(state, crtc);
|
||||
unsigned int size = new_crtc_state->plane_color_changed ? 8192 : 1024;
|
||||
|
||||
if (!new_crtc_state->use_flipq &&
|
||||
!new_crtc_state->use_dsb &&
|
||||
|
|
@ -7314,10 +7315,12 @@ static void intel_atomic_dsb_finish(struct intel_atomic_state *state,
|
|||
* Rough estimate:
|
||||
* ~64 registers per each plane * 8 planes = 512
|
||||
* Double that for pipe stuff and other overhead.
|
||||
* ~4913 registers for 3DLUT
|
||||
* ~200 color registers * 3 HDR planes
|
||||
*/
|
||||
new_crtc_state->dsb_commit = intel_dsb_prepare(state, crtc, INTEL_DSB_0,
|
||||
new_crtc_state->use_dsb ||
|
||||
new_crtc_state->use_flipq ? 1024 : 16);
|
||||
new_crtc_state->use_flipq ? size : 16);
|
||||
if (!new_crtc_state->dsb_commit) {
|
||||
new_crtc_state->use_flipq = false;
|
||||
new_crtc_state->use_dsb = false;
|
||||
|
|
|
|||
|
|
@ -138,4 +138,13 @@ enum hpd_pin {
|
|||
HPD_NUM_PINS
|
||||
};
|
||||
|
||||
enum intel_color_block {
|
||||
INTEL_PLANE_CB_PRE_CSC_LUT,
|
||||
INTEL_PLANE_CB_CSC,
|
||||
INTEL_PLANE_CB_POST_CSC_LUT,
|
||||
INTEL_PLANE_CB_3DLUT,
|
||||
|
||||
INTEL_CB_MAX
|
||||
};
|
||||
|
||||
#endif /* __INTEL_DISPLAY_LIMITS_H__ */
|
||||
|
|
|
|||
|
|
@ -646,6 +646,7 @@ struct intel_plane_state {
|
|||
enum drm_color_encoding color_encoding;
|
||||
enum drm_color_range color_range;
|
||||
enum drm_scaling_filter scaling_filter;
|
||||
struct drm_property_blob *ctm, *degamma_lut, *gamma_lut, *lut_3d;
|
||||
} hw;
|
||||
|
||||
struct i915_vma *ggtt_vma;
|
||||
|
|
@ -1391,6 +1392,9 @@ struct intel_crtc_state {
|
|||
u8 silence_period_sym_clocks;
|
||||
u8 lfps_half_cycle_num_of_syms;
|
||||
} alpm_state;
|
||||
|
||||
/* to track changes in plane color blocks */
|
||||
bool plane_color_changed;
|
||||
};
|
||||
|
||||
enum intel_pipe_crc_source {
|
||||
|
|
@ -1985,6 +1989,11 @@ struct intel_dp_mst_encoder {
|
|||
struct intel_connector *connector;
|
||||
};
|
||||
|
||||
struct intel_colorop {
|
||||
struct drm_colorop base;
|
||||
enum intel_color_block id;
|
||||
};
|
||||
|
||||
static inline struct intel_encoder *
|
||||
intel_attached_encoder(struct intel_connector *connector)
|
||||
{
|
||||
|
|
|
|||
|
|
@ -49,6 +49,7 @@
|
|||
#include "i9xx_plane_regs.h"
|
||||
#include "intel_cdclk.h"
|
||||
#include "intel_cursor.h"
|
||||
#include "intel_colorop.h"
|
||||
#include "intel_display_rps.h"
|
||||
#include "intel_display_trace.h"
|
||||
#include "intel_display_types.h"
|
||||
|
|
@ -336,6 +337,58 @@ intel_plane_copy_uapi_plane_damage(struct intel_plane_state *new_plane_state,
|
|||
*damage = drm_plane_state_src(&new_uapi_plane_state->uapi);
|
||||
}
|
||||
|
||||
static bool
|
||||
intel_plane_colorop_replace_blob(struct intel_plane_state *plane_state,
|
||||
struct intel_colorop *intel_colorop,
|
||||
struct drm_property_blob *blob)
|
||||
{
|
||||
if (intel_colorop->id == INTEL_PLANE_CB_CSC)
|
||||
return drm_property_replace_blob(&plane_state->hw.ctm, blob);
|
||||
else if (intel_colorop->id == INTEL_PLANE_CB_PRE_CSC_LUT)
|
||||
return drm_property_replace_blob(&plane_state->hw.degamma_lut, blob);
|
||||
else if (intel_colorop->id == INTEL_PLANE_CB_POST_CSC_LUT)
|
||||
return drm_property_replace_blob(&plane_state->hw.gamma_lut, blob);
|
||||
else if (intel_colorop->id == INTEL_PLANE_CB_3DLUT)
|
||||
return drm_property_replace_blob(&plane_state->hw.lut_3d, blob);
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
static void
|
||||
intel_plane_color_copy_uapi_to_hw_state(struct intel_plane_state *plane_state,
|
||||
const struct intel_plane_state *from_plane_state,
|
||||
struct intel_crtc *crtc)
|
||||
{
|
||||
struct drm_colorop *iter_colorop, *colorop;
|
||||
struct drm_colorop_state *new_colorop_state;
|
||||
struct drm_atomic_state *state = plane_state->uapi.state;
|
||||
struct intel_colorop *intel_colorop;
|
||||
struct drm_property_blob *blob;
|
||||
struct intel_atomic_state *intel_atomic_state = to_intel_atomic_state(state);
|
||||
struct intel_crtc_state *new_crtc_state = intel_atomic_state ?
|
||||
intel_atomic_get_new_crtc_state(intel_atomic_state, crtc) : NULL;
|
||||
bool changed = false;
|
||||
int i = 0;
|
||||
|
||||
iter_colorop = plane_state->uapi.color_pipeline;
|
||||
|
||||
while (iter_colorop) {
|
||||
for_each_new_colorop_in_state(state, colorop, new_colorop_state, i) {
|
||||
if (new_colorop_state->colorop == iter_colorop) {
|
||||
blob = new_colorop_state->bypass ? NULL : new_colorop_state->data;
|
||||
intel_colorop = to_intel_colorop(colorop);
|
||||
changed |= intel_plane_colorop_replace_blob(plane_state,
|
||||
intel_colorop,
|
||||
blob);
|
||||
}
|
||||
}
|
||||
iter_colorop = iter_colorop->next;
|
||||
}
|
||||
|
||||
if (new_crtc_state && changed)
|
||||
new_crtc_state->plane_color_changed = true;
|
||||
}
|
||||
|
||||
void intel_plane_copy_uapi_to_hw_state(struct intel_plane_state *plane_state,
|
||||
const struct intel_plane_state *from_plane_state,
|
||||
struct intel_crtc *crtc)
|
||||
|
|
@ -364,6 +417,8 @@ void intel_plane_copy_uapi_to_hw_state(struct intel_plane_state *plane_state,
|
|||
|
||||
plane_state->uapi.src = drm_plane_state_src(&from_plane_state->uapi);
|
||||
plane_state->uapi.dst = drm_plane_state_dest(&from_plane_state->uapi);
|
||||
|
||||
intel_plane_color_copy_uapi_to_hw_state(plane_state, from_plane_state, crtc);
|
||||
}
|
||||
|
||||
void intel_plane_copy_hw_state(struct intel_plane_state *plane_state,
|
||||
|
|
|
|||
|
|
@ -11,6 +11,8 @@
|
|||
|
||||
#include "pxp/intel_pxp.h"
|
||||
#include "intel_bo.h"
|
||||
#include "intel_color.h"
|
||||
#include "intel_color_pipeline.h"
|
||||
#include "intel_de.h"
|
||||
#include "intel_display_irq.h"
|
||||
#include "intel_display_regs.h"
|
||||
|
|
@ -1275,6 +1277,18 @@ static u32 glk_plane_color_ctl(const struct intel_plane_state *plane_state)
|
|||
if (plane_state->force_black)
|
||||
plane_color_ctl |= PLANE_COLOR_PLANE_CSC_ENABLE;
|
||||
|
||||
if (plane_state->hw.degamma_lut)
|
||||
plane_color_ctl |= PLANE_COLOR_PRE_CSC_GAMMA_ENABLE;
|
||||
|
||||
if (plane_state->hw.ctm)
|
||||
plane_color_ctl |= PLANE_COLOR_PLANE_CSC_ENABLE;
|
||||
|
||||
if (plane_state->hw.gamma_lut) {
|
||||
plane_color_ctl &= ~PLANE_COLOR_PLANE_GAMMA_DISABLE;
|
||||
if (drm_color_lut32_size(plane_state->hw.gamma_lut) != 32)
|
||||
plane_color_ctl |= PLANE_COLOR_POST_CSC_GAMMA_MULTSEG_ENABLE;
|
||||
}
|
||||
|
||||
return plane_color_ctl;
|
||||
}
|
||||
|
||||
|
|
@ -1556,6 +1570,8 @@ icl_plane_update_noarm(struct intel_dsb *dsb,
|
|||
plane_color_ctl = plane_state->color_ctl |
|
||||
glk_plane_color_ctl_crtc(crtc_state);
|
||||
|
||||
intel_color_plane_program_pipeline(dsb, plane_state);
|
||||
|
||||
/* The scaler will handle the output position */
|
||||
if (plane_state->scaler_id >= 0) {
|
||||
crtc_x = 0;
|
||||
|
|
@ -1657,6 +1673,8 @@ icl_plane_update_arm(struct intel_dsb *dsb,
|
|||
|
||||
icl_plane_update_sel_fetch_arm(dsb, plane, crtc_state, plane_state);
|
||||
|
||||
intel_color_plane_commit_arm(dsb, plane_state);
|
||||
|
||||
/*
|
||||
* In order to have FBC for fp16 formats pixel normalizer block must be
|
||||
* active. Check if pixel normalizer block need to be enabled for FBC.
|
||||
|
|
@ -3001,6 +3019,9 @@ skl_universal_plane_create(struct intel_display *display,
|
|||
DRM_COLOR_YCBCR_BT709,
|
||||
DRM_COLOR_YCBCR_LIMITED_RANGE);
|
||||
|
||||
if (DISPLAY_VER(display) >= 12)
|
||||
intel_color_pipeline_plane_init(&plane->base, pipe);
|
||||
|
||||
drm_plane_create_alpha_property(&plane->base);
|
||||
drm_plane_create_blend_mode_property(&plane->base,
|
||||
BIT(DRM_MODE_BLEND_PIXEL_NONE) |
|
||||
|
|
|
|||
|
|
@ -254,6 +254,8 @@
|
|||
#define PLANE_COLOR_PIPE_CSC_ENABLE REG_BIT(23) /* Pre-ICL */
|
||||
#define PLANE_COLOR_PLANE_CSC_ENABLE REG_BIT(21) /* ICL+ */
|
||||
#define PLANE_COLOR_INPUT_CSC_ENABLE REG_BIT(20) /* ICL+ */
|
||||
#define PLANE_COLOR_POST_CSC_GAMMA_MULTSEG_ENABLE REG_BIT(15) /* TGL+ */
|
||||
#define PLANE_COLOR_PRE_CSC_GAMMA_ENABLE REG_BIT(14)
|
||||
#define PLANE_COLOR_CSC_MODE_MASK REG_GENMASK(19, 17)
|
||||
#define PLANE_COLOR_CSC_MODE_BYPASS REG_FIELD_PREP(PLANE_COLOR_CSC_MODE_MASK, 0)
|
||||
#define PLANE_COLOR_CSC_MODE_YUV601_TO_RGB601 REG_FIELD_PREP(PLANE_COLOR_CSC_MODE_MASK, 1)
|
||||
|
|
@ -290,6 +292,119 @@
|
|||
_PLANE_INPUT_CSC_POSTOFF_HI_1_A, _PLANE_INPUT_CSC_POSTOFF_HI_1_B, \
|
||||
_PLANE_INPUT_CSC_POSTOFF_HI_2_A, _PLANE_INPUT_CSC_POSTOFF_HI_2_B)
|
||||
|
||||
#define _MMIO_PLANE_GAMC(plane, i, a, b) _MMIO(_PIPE(plane, a, b) + (i) * 4)
|
||||
|
||||
#define _PLANE_POST_CSC_GAMC_SEG0_INDEX_ENH_1_A 0x70160
|
||||
#define _PLANE_POST_CSC_GAMC_SEG0_INDEX_ENH_1_B 0x71160
|
||||
#define _PLANE_POST_CSC_GAMC_SEG0_INDEX_ENH_2_A 0x70260
|
||||
#define _PLANE_POST_CSC_GAMC_SEG0_INDEX_ENH_2_B 0x71260
|
||||
#define _PLANE_POST_CSC_GAMC_SEG0_INDEX_ENH_1(pipe) _PIPE(pipe, _PLANE_POST_CSC_GAMC_SEG0_INDEX_ENH_1_A, \
|
||||
_PLANE_POST_CSC_GAMC_SEG0_INDEX_ENH_1_B)
|
||||
#define _PLANE_POST_CSC_GAMC_SEG0_INDEX_ENH_2(pipe) _PIPE(pipe, _PLANE_POST_CSC_GAMC_SEG0_INDEX_ENH_2_A, \
|
||||
_PLANE_POST_CSC_GAMC_SEG0_INDEX_ENH_2_B)
|
||||
#define PLANE_POST_CSC_GAMC_SEG0_INDEX_ENH(pipe, plane, i) _MMIO_PLANE_GAMC(plane, i, _PLANE_POST_CSC_GAMC_SEG0_INDEX_ENH_1(pipe), \
|
||||
_PLANE_POST_CSC_GAMC_SEG0_INDEX_ENH_2(pipe))
|
||||
|
||||
#define _PLANE_POST_CSC_GAMC_SEG0_DATA_ENH_1_A 0x70164
|
||||
#define _PLANE_POST_CSC_GAMC_SEG0_DATA_ENH_1_B 0x71164
|
||||
#define _PLANE_POST_CSC_GAMC_SEG0_DATA_ENH_2_A 0x70264
|
||||
#define _PLANE_POST_CSC_GAMC_SEG0_DATA_ENH_2_B 0x71264
|
||||
#define _PLANE_POST_CSC_GAMC_SEG0_DATA_ENH_1(pipe) _PIPE(pipe, _PLANE_POST_CSC_GAMC_SEG0_DATA_ENH_1_A, \
|
||||
_PLANE_POST_CSC_GAMC_SEG0_DATA_ENH_1_B)
|
||||
#define _PLANE_POST_CSC_GAMC_SEG0_DATA_ENH_2(pipe) _PIPE(pipe, _PLANE_POST_CSC_GAMC_SEG0_DATA_ENH_2_A, \
|
||||
_PLANE_POST_CSC_GAMC_SEG0_DATA_ENH_2_B)
|
||||
#define PLANE_POST_CSC_GAMC_SEG0_DATA_ENH(pipe, plane, i) _MMIO_PLANE_GAMC(plane, i, _PLANE_POST_CSC_GAMC_SEG0_DATA_ENH_1(pipe), \
|
||||
_PLANE_POST_CSC_GAMC_SEG0_DATA_ENH_2(pipe))
|
||||
|
||||
#define _PLANE_POST_CSC_GAMC_INDEX_ENH_1_A 0x701d8
|
||||
#define _PLANE_POST_CSC_GAMC_INDEX_ENH_1_B 0x711d8
|
||||
#define _PLANE_POST_CSC_GAMC_INDEX_ENH_2_A 0x702d8
|
||||
#define _PLANE_POST_CSC_GAMC_INDEX_ENH_2_B 0x712d8
|
||||
#define _PLANE_POST_CSC_GAMC_INDEX_ENH_1(pipe) _PIPE(pipe, _PLANE_POST_CSC_GAMC_INDEX_ENH_1_A, \
|
||||
_PLANE_POST_CSC_GAMC_INDEX_ENH_1_B)
|
||||
#define _PLANE_POST_CSC_GAMC_INDEX_ENH_2(pipe) _PIPE(pipe, _PLANE_POST_CSC_GAMC_INDEX_ENH_2_A, \
|
||||
_PLANE_POST_CSC_GAMC_INDEX_ENH_2_B)
|
||||
#define PLANE_POST_CSC_GAMC_INDEX_ENH(pipe, plane, i) _MMIO_PLANE_GAMC(plane, i, _PLANE_POST_CSC_GAMC_INDEX_ENH_1(pipe), \
|
||||
_PLANE_POST_CSC_GAMC_INDEX_ENH_2(pipe))
|
||||
|
||||
#define _PLANE_POST_CSC_GAMC_DATA_ENH_1_A 0x701dc
|
||||
#define _PLANE_POST_CSC_GAMC_DATA_ENH_1_B 0x711dc
|
||||
#define _PLANE_POST_CSC_GAMC_DATA_ENH_2_A 0x702dc
|
||||
#define _PLANE_POST_CSC_GAMC_DATA_ENH_2_B 0x712dc
|
||||
#define _PLANE_POST_CSC_GAMC_DATA_ENH_1(pipe) _PIPE(pipe, _PLANE_POST_CSC_GAMC_DATA_ENH_1_A, \
|
||||
_PLANE_POST_CSC_GAMC_DATA_ENH_1_B)
|
||||
#define _PLANE_POST_CSC_GAMC_DATA_ENH_2(pipe) _PIPE(pipe, _PLANE_POST_CSC_GAMC_DATA_ENH_2_A, \
|
||||
_PLANE_POST_CSC_GAMC_DATA_ENH_2_B)
|
||||
#define PLANE_POST_CSC_GAMC_DATA_ENH(pipe, plane, i) _MMIO_PLANE_GAMC(plane, i, _PLANE_POST_CSC_GAMC_DATA_ENH_1(pipe), \
|
||||
_PLANE_POST_CSC_GAMC_DATA_ENH_2(pipe))
|
||||
|
||||
#define _PLANE_POST_CSC_GAMC_INDEX_1_A 0x704d8
|
||||
#define _PLANE_POST_CSC_GAMC_INDEX_1_B 0x714d8
|
||||
#define _PLANE_POST_CSC_GAMC_INDEX_2_A 0x705d8
|
||||
#define _PLANE_POST_CSC_GAMC_INDEX_2_B 0x715d8
|
||||
#define _PLANE_POST_CSC_GAMC_INDEX_1(pipe) _PIPE(pipe, _PLANE_POST_CSC_GAMC_INDEX_1_A, \
|
||||
_PLANE_POST_CSC_GAMC_INDEX_1_B)
|
||||
#define _PLANE_POST_CSC_GAMC_INDEX_2(pipe) _PIPE(pipe, _PLANE_POST_CSC_GAMC_INDEX_2_A, \
|
||||
_PLANE_POST_CSC_GAMC_INDEX_2_B)
|
||||
#define PLANE_POST_CSC_GAMC_INDEX(pipe, plane, i) _MMIO_PLANE_GAMC(plane, i, _PLANE_POST_CSC_GAMC_INDEX_1(pipe), \
|
||||
_PLANE_POST_CSC_GAMC_INDEX_2(pipe))
|
||||
|
||||
#define _PLANE_POST_CSC_GAMC_DATA_1_A 0x704dc
|
||||
#define _PLANE_POST_CSC_GAMC_DATA_1_B 0x714dc
|
||||
#define _PLANE_POST_CSC_GAMC_DATA_2_A 0x705dc
|
||||
#define _PLANE_POST_CSC_GAMC_DATA_2_B 0x715dc
|
||||
#define _PLANE_POST_CSC_GAMC_DATA_1(pipe) _PIPE(pipe, _PLANE_POST_CSC_GAMC_DATA_1_A, \
|
||||
_PLANE_POST_CSC_GAMC_DATA_1_B)
|
||||
#define _PLANE_POST_CSC_GAMC_DATA_2(pipe) _PIPE(pipe, _PLANE_POST_CSC_GAMC_DATA_2_A, \
|
||||
_PLANE_POST_CSC_GAMC_DATA_2_B)
|
||||
#define PLANE_POST_CSC_GAMC_DATA(pipe, plane, i) _MMIO_PLANE_GAMC(plane, i, _PLANE_POST_CSC_GAMC_DATA_1(pipe), \
|
||||
_PLANE_POST_CSC_GAMC_DATA_2(pipe))
|
||||
|
||||
#define _PLANE_PRE_CSC_GAMC_INDEX_ENH_1_A 0x701d0
|
||||
#define _PLANE_PRE_CSC_GAMC_INDEX_ENH_1_B 0x711d0
|
||||
#define _PLANE_PRE_CSC_GAMC_INDEX_ENH_2_A 0x702d0
|
||||
#define _PLANE_PRE_CSC_GAMC_INDEX_ENH_2_B 0x712d0
|
||||
#define _PLANE_PRE_CSC_GAMC_INDEX_ENH_1(pipe) _PIPE(pipe, _PLANE_PRE_CSC_GAMC_INDEX_ENH_1_A, \
|
||||
_PLANE_PRE_CSC_GAMC_INDEX_ENH_1_B)
|
||||
#define _PLANE_PRE_CSC_GAMC_INDEX_ENH_2(pipe) _PIPE(pipe, _PLANE_PRE_CSC_GAMC_INDEX_ENH_2_A, \
|
||||
_PLANE_PRE_CSC_GAMC_INDEX_ENH_2_B)
|
||||
#define PLANE_PRE_CSC_GAMC_INDEX_ENH(pipe, plane, i) _MMIO_PLANE_GAMC(plane, i, _PLANE_PRE_CSC_GAMC_INDEX_ENH_1(pipe), \
|
||||
_PLANE_PRE_CSC_GAMC_INDEX_ENH_2(pipe))
|
||||
#define PLANE_PAL_PREC_AUTO_INCREMENT REG_BIT(10)
|
||||
|
||||
#define _PLANE_PRE_CSC_GAMC_DATA_ENH_1_A 0x701d4
|
||||
#define _PLANE_PRE_CSC_GAMC_DATA_ENH_1_B 0x711d4
|
||||
#define _PLANE_PRE_CSC_GAMC_DATA_ENH_2_A 0x702d4
|
||||
#define _PLANE_PRE_CSC_GAMC_DATA_ENH_2_B 0x712d4
|
||||
#define _PLANE_PRE_CSC_GAMC_DATA_ENH_1(pipe) _PIPE(pipe, _PLANE_PRE_CSC_GAMC_DATA_ENH_1_A, \
|
||||
_PLANE_PRE_CSC_GAMC_DATA_ENH_1_B)
|
||||
#define _PLANE_PRE_CSC_GAMC_DATA_ENH_2(pipe) _PIPE(pipe, _PLANE_PRE_CSC_GAMC_DATA_ENH_2_A, \
|
||||
_PLANE_PRE_CSC_GAMC_DATA_ENH_2_B)
|
||||
#define PLANE_PRE_CSC_GAMC_DATA_ENH(pipe, plane, i) _MMIO_PLANE_GAMC(plane, i, _PLANE_PRE_CSC_GAMC_DATA_ENH_1(pipe), \
|
||||
_PLANE_PRE_CSC_GAMC_DATA_ENH_2(pipe))
|
||||
|
||||
#define _PLANE_PRE_CSC_GAMC_INDEX_1_A 0x704d0
|
||||
#define _PLANE_PRE_CSC_GAMC_INDEX_1_B 0x714d0
|
||||
#define _PLANE_PRE_CSC_GAMC_INDEX_2_A 0x705d0
|
||||
#define _PLANE_PRE_CSC_GAMC_INDEX_2_B 0x715d0
|
||||
#define _PLANE_PRE_CSC_GAMC_INDEX_1(pipe) _PIPE(pipe, _PLANE_PRE_CSC_GAMC_INDEX_1_A, \
|
||||
_PLANE_PRE_CSC_GAMC_INDEX_1_B)
|
||||
#define _PLANE_PRE_CSC_GAMC_INDEX_2(pipe) _PIPE(pipe, _PLANE_PRE_CSC_GAMC_INDEX_2_A, \
|
||||
_PLANE_PRE_CSC_GAMC_INDEX_2_B)
|
||||
#define PLANE_PRE_CSC_GAMC_INDEX(pipe, plane, i) _MMIO_PLANE_GAMC(plane, i, _PLANE_PRE_CSC_GAMC_INDEX_1(pipe), \
|
||||
_PLANE_PRE_CSC_GAMC_INDEX_2(pipe))
|
||||
|
||||
#define _PLANE_PRE_CSC_GAMC_DATA_1_A 0x704d4
|
||||
#define _PLANE_PRE_CSC_GAMC_DATA_1_B 0x714d4
|
||||
#define _PLANE_PRE_CSC_GAMC_DATA_2_A 0x705d4
|
||||
#define _PLANE_PRE_CSC_GAMC_DATA_2_B 0x715d4
|
||||
#define _PLANE_PRE_CSC_GAMC_DATA_1(pipe) _PIPE(pipe, _PLANE_PRE_CSC_GAMC_DATA_1_A, \
|
||||
_PLANE_PRE_CSC_GAMC_DATA_1_B)
|
||||
#define _PLANE_PRE_CSC_GAMC_DATA_2(pipe) _PIPE(pipe, _PLANE_PRE_CSC_GAMC_DATA_2_A, \
|
||||
_PLANE_PRE_CSC_GAMC_DATA_2_B)
|
||||
#define PLANE_PRE_CSC_GAMC_DATA(pipe, plane, i) _MMIO_PLANE_GAMC(plane, i, _PLANE_PRE_CSC_GAMC_DATA_1(pipe), \
|
||||
_PLANE_PRE_CSC_GAMC_DATA_2(pipe))
|
||||
|
||||
#define _PLANE_CSC_RY_GY_1_A 0x70210
|
||||
#define _PLANE_CSC_RY_GY_2_A 0x70310
|
||||
#define _PLANE_CSC_RY_GY_1_B 0x71210
|
||||
|
|
|
|||
|
|
@ -184,6 +184,10 @@ xe-$(CONFIG_PCI_IOV) += \
|
|||
xe_sriov_pf_sysfs.o \
|
||||
xe_tile_sriov_pf_debugfs.o
|
||||
|
||||
ifdef CONFIG_XE_VFIO_PCI
|
||||
xe-$(CONFIG_PCI_IOV) += xe_sriov_vfio.o
|
||||
endif
|
||||
|
||||
# include helpers for tests even when XE is built-in
|
||||
ifdef CONFIG_DRM_XE_KUNIT_TEST
|
||||
xe-y += tests/xe_kunit_helpers.o
|
||||
|
|
@ -242,6 +246,8 @@ xe-$(CONFIG_DRM_XE_DISPLAY) += \
|
|||
i915-display/intel_cdclk.o \
|
||||
i915-display/intel_cmtg.o \
|
||||
i915-display/intel_color.o \
|
||||
i915-display/intel_colorop.o \
|
||||
i915-display/intel_color_pipeline.o \
|
||||
i915-display/intel_combo_phy.o \
|
||||
i915-display/intel_connector.o \
|
||||
i915-display/intel_crtc.o \
|
||||
|
|
|
|||
|
|
@ -54,13 +54,14 @@ static inline void xe_sched_tdr_queue_imm(struct xe_gpu_scheduler *sched)
|
|||
static inline void xe_sched_resubmit_jobs(struct xe_gpu_scheduler *sched)
|
||||
{
|
||||
struct drm_sched_job *s_job;
|
||||
bool restore_replay = false;
|
||||
|
||||
list_for_each_entry(s_job, &sched->base.pending_list, list) {
|
||||
struct drm_sched_fence *s_fence = s_job->s_fence;
|
||||
struct dma_fence *hw_fence = s_fence->parent;
|
||||
|
||||
if (to_xe_sched_job(s_job)->skip_emit ||
|
||||
(hw_fence && !dma_fence_is_signaled(hw_fence)))
|
||||
restore_replay |= to_xe_sched_job(s_job)->restore_replay;
|
||||
if (restore_replay || (hw_fence && !dma_fence_is_signaled(hw_fence)))
|
||||
sched->base.ops->run_job(s_job);
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -711,7 +711,7 @@ static u64 pf_profile_fair_ggtt(struct xe_gt *gt, unsigned int num_vfs)
|
|||
if (num_vfs > 56)
|
||||
return SZ_64M - SZ_8M;
|
||||
|
||||
return rounddown_pow_of_two(shareable / num_vfs);
|
||||
return rounddown_pow_of_two(div_u64(shareable, num_vfs));
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
|||
|
|
@ -17,6 +17,7 @@
|
|||
#include "xe_gt_sriov_pf_helpers.h"
|
||||
#include "xe_gt_sriov_pf_migration.h"
|
||||
#include "xe_gt_sriov_printk.h"
|
||||
#include "xe_guc.h"
|
||||
#include "xe_guc_buf.h"
|
||||
#include "xe_guc_ct.h"
|
||||
#include "xe_migrate.h"
|
||||
|
|
@ -1023,6 +1024,12 @@ static void action_ring_cleanup(void *arg)
|
|||
ptr_ring_cleanup(r, destroy_pf_packet);
|
||||
}
|
||||
|
||||
static void pf_gt_migration_check_support(struct xe_gt *gt)
|
||||
{
|
||||
if (GUC_FIRMWARE_VER(>->uc.guc) < MAKE_GUC_VER(70, 54, 0))
|
||||
xe_sriov_pf_migration_disable(gt_to_xe(gt), "requires GuC version >= 70.54.0");
|
||||
}
|
||||
|
||||
/**
|
||||
* xe_gt_sriov_pf_migration_init() - Initialize support for VF migration.
|
||||
* @gt: the &xe_gt
|
||||
|
|
@ -1039,6 +1046,8 @@ int xe_gt_sriov_pf_migration_init(struct xe_gt *gt)
|
|||
|
||||
xe_gt_assert(gt, IS_SRIOV_PF(xe));
|
||||
|
||||
pf_gt_migration_check_support(gt);
|
||||
|
||||
if (!pf_migration_supported(gt))
|
||||
return 0;
|
||||
|
||||
|
|
|
|||
|
|
@ -822,7 +822,7 @@ static void submit_exec_queue(struct xe_exec_queue *q, struct xe_sched_job *job)
|
|||
|
||||
xe_gt_assert(guc_to_gt(guc), exec_queue_registered(q));
|
||||
|
||||
if (!job->skip_emit || job->last_replay) {
|
||||
if (!job->restore_replay || job->last_replay) {
|
||||
if (xe_exec_queue_is_parallel(q))
|
||||
wq_item_append(q);
|
||||
else
|
||||
|
|
@ -881,10 +881,10 @@ guc_exec_queue_run_job(struct drm_sched_job *drm_job)
|
|||
if (!killed_or_banned_or_wedged && !xe_sched_job_is_error(job)) {
|
||||
if (!exec_queue_registered(q))
|
||||
register_exec_queue(q, GUC_CONTEXT_NORMAL);
|
||||
if (!job->skip_emit)
|
||||
if (!job->restore_replay)
|
||||
q->ring_ops->emit_job(job);
|
||||
submit_exec_queue(q, job);
|
||||
job->skip_emit = false;
|
||||
job->restore_replay = false;
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
@ -2112,6 +2112,18 @@ static void guc_exec_queue_revert_pending_state_change(struct xe_guc *guc,
|
|||
q->guc->resume_time = 0;
|
||||
}
|
||||
|
||||
static void lrc_parallel_clear(struct xe_lrc *lrc)
|
||||
{
|
||||
struct xe_device *xe = gt_to_xe(lrc->gt);
|
||||
struct iosys_map map = xe_lrc_parallel_map(lrc);
|
||||
int i;
|
||||
|
||||
for (i = 0; i < WQ_SIZE / sizeof(u32); ++i)
|
||||
parallel_write(xe, map, wq[i],
|
||||
FIELD_PREP(WQ_TYPE_MASK, WQ_TYPE_NOOP) |
|
||||
FIELD_PREP(WQ_LEN_MASK, 0));
|
||||
}
|
||||
|
||||
/*
|
||||
* This function is quite complex but only real way to ensure no state is lost
|
||||
* during VF resume flows. The function scans the queue state, make adjustments
|
||||
|
|
@ -2135,8 +2147,8 @@ static void guc_exec_queue_pause(struct xe_guc *guc, struct xe_exec_queue *q)
|
|||
guc_exec_queue_revert_pending_state_change(guc, q);
|
||||
|
||||
if (xe_exec_queue_is_parallel(q)) {
|
||||
struct xe_device *xe = guc_to_xe(guc);
|
||||
struct iosys_map map = xe_lrc_parallel_map(q->lrc[0]);
|
||||
/* Pairs with WRITE_ONCE in __xe_exec_queue_init */
|
||||
struct xe_lrc *lrc = READ_ONCE(q->lrc[0]);
|
||||
|
||||
/*
|
||||
* NOP existing WQ commands that may contain stale GGTT
|
||||
|
|
@ -2144,14 +2156,14 @@ static void guc_exec_queue_pause(struct xe_guc *guc, struct xe_exec_queue *q)
|
|||
* seems to get confused if the WQ head/tail pointers are
|
||||
* adjusted.
|
||||
*/
|
||||
for (i = 0; i < WQ_SIZE / sizeof(u32); ++i)
|
||||
parallel_write(xe, map, wq[i],
|
||||
FIELD_PREP(WQ_TYPE_MASK, WQ_TYPE_NOOP) |
|
||||
FIELD_PREP(WQ_LEN_MASK, 0));
|
||||
if (lrc)
|
||||
lrc_parallel_clear(lrc);
|
||||
}
|
||||
|
||||
job = xe_sched_first_pending_job(sched);
|
||||
if (job) {
|
||||
job->restore_replay = true;
|
||||
|
||||
/*
|
||||
* Adjust software tail so jobs submitted overwrite previous
|
||||
* position in ring buffer with new GGTT addresses.
|
||||
|
|
@ -2241,17 +2253,18 @@ static void guc_exec_queue_unpause_prepare(struct xe_guc *guc,
|
|||
struct xe_exec_queue *q)
|
||||
{
|
||||
struct xe_gpu_scheduler *sched = &q->guc->sched;
|
||||
struct drm_sched_job *s_job;
|
||||
struct xe_sched_job *job = NULL;
|
||||
bool restore_replay = false;
|
||||
|
||||
list_for_each_entry(s_job, &sched->base.pending_list, list) {
|
||||
job = to_xe_sched_job(s_job);
|
||||
list_for_each_entry(job, &sched->base.pending_list, drm.list) {
|
||||
restore_replay |= job->restore_replay;
|
||||
if (restore_replay) {
|
||||
xe_gt_dbg(guc_to_gt(guc), "Replay JOB - guc_id=%d, seqno=%d",
|
||||
q->guc->id, xe_sched_job_seqno(job));
|
||||
|
||||
xe_gt_dbg(guc_to_gt(guc), "Replay JOB - guc_id=%d, seqno=%d",
|
||||
q->guc->id, xe_sched_job_seqno(job));
|
||||
|
||||
q->ring_ops->emit_job(job);
|
||||
job->skip_emit = true;
|
||||
q->ring_ops->emit_job(job);
|
||||
job->restore_replay = true;
|
||||
}
|
||||
}
|
||||
|
||||
if (job)
|
||||
|
|
|
|||
|
|
@ -102,7 +102,6 @@ static int xe_pagefault_handle_vma(struct xe_gt *gt, struct xe_vma *vma,
|
|||
|
||||
/* Lock VM and BOs dma-resv */
|
||||
xe_validation_ctx_init(&ctx, &vm->xe->val, &exec, (struct xe_val_flags) {});
|
||||
drm_exec_init(&exec, 0, 0);
|
||||
drm_exec_until_all_locked(&exec) {
|
||||
err = xe_pagefault_begin(&exec, vma, tile->mem.vram,
|
||||
needs_vram == 1);
|
||||
|
|
|
|||
|
|
@ -1223,6 +1223,23 @@ static struct pci_driver xe_pci_driver = {
|
|||
#endif
|
||||
};
|
||||
|
||||
/**
|
||||
* xe_pci_to_pf_device() - Get PF &xe_device.
|
||||
* @pdev: the VF &pci_dev device
|
||||
*
|
||||
* Return: pointer to PF &xe_device, NULL otherwise.
|
||||
*/
|
||||
struct xe_device *xe_pci_to_pf_device(struct pci_dev *pdev)
|
||||
{
|
||||
struct drm_device *drm;
|
||||
|
||||
drm = pci_iov_get_pf_drvdata(pdev, &xe_pci_driver);
|
||||
if (IS_ERR(drm))
|
||||
return NULL;
|
||||
|
||||
return to_xe_device(drm);
|
||||
}
|
||||
|
||||
int xe_register_pci_driver(void)
|
||||
{
|
||||
return pci_register_driver(&xe_pci_driver);
|
||||
|
|
|
|||
|
|
@ -6,7 +6,10 @@
|
|||
#ifndef _XE_PCI_H_
|
||||
#define _XE_PCI_H_
|
||||
|
||||
struct pci_dev;
|
||||
|
||||
int xe_register_pci_driver(void);
|
||||
void xe_unregister_pci_driver(void);
|
||||
struct xe_device *xe_pci_to_pf_device(struct pci_dev *pdev);
|
||||
|
||||
#endif
|
||||
|
|
|
|||
|
|
@ -726,6 +726,13 @@ static void xe_pm_runtime_lockdep_prime(void)
|
|||
/**
|
||||
* xe_pm_runtime_get - Get a runtime_pm reference and resume synchronously
|
||||
* @xe: xe device instance
|
||||
*
|
||||
* When possible, scope-based runtime PM (through guard(xe_pm_runtime)) is
|
||||
* be preferred over direct usage of this function. Manual get/put handling
|
||||
* should only be used when the function contains goto-based logic which
|
||||
* can break scope-based handling, or when the lifetime of the runtime PM
|
||||
* reference does not match a specific scope (e.g., runtime PM obtained in one
|
||||
* function and released in a different one).
|
||||
*/
|
||||
void xe_pm_runtime_get(struct xe_device *xe)
|
||||
{
|
||||
|
|
@ -758,6 +765,13 @@ void xe_pm_runtime_put(struct xe_device *xe)
|
|||
* xe_pm_runtime_get_ioctl - Get a runtime_pm reference before ioctl
|
||||
* @xe: xe device instance
|
||||
*
|
||||
* When possible, scope-based runtime PM (through
|
||||
* ACQUIRE(xe_pm_runtime_ioctl, ...)) is be preferred over direct usage of this
|
||||
* function. Manual get/put handling should only be used when the function
|
||||
* contains goto-based logic which can break scope-based handling, or when the
|
||||
* lifetime of the runtime PM reference does not match a specific scope (e.g.,
|
||||
* runtime PM obtained in one function and released in a different one).
|
||||
*
|
||||
* Returns: Any number greater than or equal to 0 for success, negative error
|
||||
* code otherwise.
|
||||
*/
|
||||
|
|
@ -827,6 +841,13 @@ static bool xe_pm_suspending_or_resuming(struct xe_device *xe)
|
|||
* It will warn if not protected.
|
||||
* The reference should be put back after this function regardless, since it
|
||||
* will always bump the usage counter, regardless.
|
||||
*
|
||||
* When possible, scope-based runtime PM (through guard(xe_pm_runtime_noresume))
|
||||
* is be preferred over direct usage of this function. Manual get/put handling
|
||||
* should only be used when the function contains goto-based logic which can
|
||||
* break scope-based handling, or when the lifetime of the runtime PM reference
|
||||
* does not match a specific scope (e.g., runtime PM obtained in one function
|
||||
* and released in a different one).
|
||||
*/
|
||||
void xe_pm_runtime_get_noresume(struct xe_device *xe)
|
||||
{
|
||||
|
|
|
|||
|
|
@ -6,6 +6,7 @@
|
|||
#ifndef _XE_PM_H_
|
||||
#define _XE_PM_H_
|
||||
|
||||
#include <linux/cleanup.h>
|
||||
#include <linux/pm_runtime.h>
|
||||
|
||||
#define DEFAULT_VRAM_THRESHOLD 300 /* in MB */
|
||||
|
|
@ -37,4 +38,20 @@ int xe_pm_block_on_suspend(struct xe_device *xe);
|
|||
void xe_pm_might_block_on_suspend(void);
|
||||
int xe_pm_module_init(void);
|
||||
|
||||
static inline void __xe_pm_runtime_noop(struct xe_device *xe) {}
|
||||
|
||||
DEFINE_GUARD(xe_pm_runtime, struct xe_device *,
|
||||
xe_pm_runtime_get(_T), xe_pm_runtime_put(_T))
|
||||
DEFINE_GUARD(xe_pm_runtime_noresume, struct xe_device *,
|
||||
xe_pm_runtime_get_noresume(_T), xe_pm_runtime_put(_T))
|
||||
DEFINE_GUARD_COND(xe_pm_runtime, _ioctl, xe_pm_runtime_get_ioctl(_T), _RET >= 0)
|
||||
|
||||
/*
|
||||
* Used when a function needs to release runtime PM in all possible cases
|
||||
* and error paths, but the wakeref was already acquired by a different
|
||||
* function (i.e., get() has already happened so only a put() is needed).
|
||||
*/
|
||||
DEFINE_GUARD(xe_pm_runtime_release_only, struct xe_device *,
|
||||
__xe_pm_runtime_noop(_T), xe_pm_runtime_put(_T));
|
||||
|
||||
#endif
|
||||
|
|
|
|||
|
|
@ -63,8 +63,8 @@ struct xe_sched_job {
|
|||
bool ring_ops_flush_tlb;
|
||||
/** @ggtt: mapped in ggtt. */
|
||||
bool ggtt;
|
||||
/** @skip_emit: skip emitting the job */
|
||||
bool skip_emit;
|
||||
/** @restore_replay: job being replayed for restore */
|
||||
bool restore_replay;
|
||||
/** @last_replay: last job being replayed */
|
||||
bool last_replay;
|
||||
/** @ptrs: per instance pointers. */
|
||||
|
|
|
|||
|
|
@ -46,13 +46,37 @@ bool xe_sriov_pf_migration_supported(struct xe_device *xe)
|
|||
{
|
||||
xe_assert(xe, IS_SRIOV_PF(xe));
|
||||
|
||||
return xe->sriov.pf.migration.supported;
|
||||
return IS_ENABLED(CONFIG_DRM_XE_DEBUG) || !xe->sriov.pf.migration.disabled;
|
||||
}
|
||||
|
||||
static bool pf_check_migration_support(struct xe_device *xe)
|
||||
/**
|
||||
* xe_sriov_pf_migration_disable() - Turn off SR-IOV VF migration support on PF.
|
||||
* @xe: the &xe_device instance.
|
||||
* @fmt: format string for the log message, to be combined with following VAs.
|
||||
*/
|
||||
void xe_sriov_pf_migration_disable(struct xe_device *xe, const char *fmt, ...)
|
||||
{
|
||||
/* XXX: for now this is for feature enabling only */
|
||||
return IS_ENABLED(CONFIG_DRM_XE_DEBUG);
|
||||
struct va_format vaf;
|
||||
va_list va_args;
|
||||
|
||||
xe_assert(xe, IS_SRIOV_PF(xe));
|
||||
|
||||
va_start(va_args, fmt);
|
||||
vaf.fmt = fmt;
|
||||
vaf.va = &va_args;
|
||||
xe_sriov_notice(xe, "migration %s: %pV\n",
|
||||
IS_ENABLED(CONFIG_DRM_XE_DEBUG) ?
|
||||
"missing prerequisite" : "disabled",
|
||||
&vaf);
|
||||
va_end(va_args);
|
||||
|
||||
xe->sriov.pf.migration.disabled = true;
|
||||
}
|
||||
|
||||
static void pf_migration_check_support(struct xe_device *xe)
|
||||
{
|
||||
if (!xe_device_has_memirq(xe))
|
||||
xe_sriov_pf_migration_disable(xe, "requires memory-based IRQ support");
|
||||
}
|
||||
|
||||
static void pf_migration_cleanup(void *arg)
|
||||
|
|
@ -77,7 +101,8 @@ int xe_sriov_pf_migration_init(struct xe_device *xe)
|
|||
|
||||
xe_assert(xe, IS_SRIOV_PF(xe));
|
||||
|
||||
xe->sriov.pf.migration.supported = pf_check_migration_support(xe);
|
||||
pf_migration_check_support(xe);
|
||||
|
||||
if (!xe_sriov_pf_migration_supported(xe))
|
||||
return 0;
|
||||
|
||||
|
|
|
|||
|
|
@ -14,6 +14,7 @@ struct xe_sriov_packet;
|
|||
|
||||
int xe_sriov_pf_migration_init(struct xe_device *xe);
|
||||
bool xe_sriov_pf_migration_supported(struct xe_device *xe);
|
||||
void xe_sriov_pf_migration_disable(struct xe_device *xe, const char *fmt, ...);
|
||||
int xe_sriov_pf_migration_restore_produce(struct xe_device *xe, unsigned int vfid,
|
||||
struct xe_sriov_packet *data);
|
||||
struct xe_sriov_packet *
|
||||
|
|
|
|||
|
|
@ -14,8 +14,8 @@
|
|||
* struct xe_sriov_pf_migration - Xe device level VF migration data
|
||||
*/
|
||||
struct xe_sriov_pf_migration {
|
||||
/** @supported: indicates whether VF migration feature is supported */
|
||||
bool supported;
|
||||
/** @disabled: indicates whether VF migration feature is disabled */
|
||||
bool disabled;
|
||||
};
|
||||
|
||||
/**
|
||||
|
|
|
|||
|
|
@ -0,0 +1,80 @@
|
|||
// SPDX-License-Identifier: MIT
|
||||
/*
|
||||
* Copyright © 2025 Intel Corporation
|
||||
*/
|
||||
|
||||
#include <drm/intel/xe_sriov_vfio.h>
|
||||
#include <linux/cleanup.h>
|
||||
|
||||
#include "xe_pci.h"
|
||||
#include "xe_pm.h"
|
||||
#include "xe_sriov_pf_control.h"
|
||||
#include "xe_sriov_pf_helpers.h"
|
||||
#include "xe_sriov_pf_migration.h"
|
||||
|
||||
struct xe_device *xe_sriov_vfio_get_pf(struct pci_dev *pdev)
|
||||
{
|
||||
return xe_pci_to_pf_device(pdev);
|
||||
}
|
||||
EXPORT_SYMBOL_FOR_MODULES(xe_sriov_vfio_get_pf, "xe-vfio-pci");
|
||||
|
||||
bool xe_sriov_vfio_migration_supported(struct xe_device *xe)
|
||||
{
|
||||
if (!IS_SRIOV_PF(xe))
|
||||
return -EPERM;
|
||||
|
||||
return xe_sriov_pf_migration_supported(xe);
|
||||
}
|
||||
EXPORT_SYMBOL_FOR_MODULES(xe_sriov_vfio_migration_supported, "xe-vfio-pci");
|
||||
|
||||
#define DEFINE_XE_SRIOV_VFIO_FUNCTION(_type, _func, _impl) \
|
||||
_type xe_sriov_vfio_##_func(struct xe_device *xe, unsigned int vfid) \
|
||||
{ \
|
||||
if (!IS_SRIOV_PF(xe)) \
|
||||
return -EPERM; \
|
||||
if (vfid == PFID || vfid > xe_sriov_pf_num_vfs(xe)) \
|
||||
return -EINVAL; \
|
||||
\
|
||||
guard(xe_pm_runtime_noresume)(xe); \
|
||||
\
|
||||
return xe_sriov_pf_##_impl(xe, vfid); \
|
||||
} \
|
||||
EXPORT_SYMBOL_FOR_MODULES(xe_sriov_vfio_##_func, "xe-vfio-pci")
|
||||
|
||||
DEFINE_XE_SRIOV_VFIO_FUNCTION(int, wait_flr_done, control_wait_flr);
|
||||
DEFINE_XE_SRIOV_VFIO_FUNCTION(int, suspend_device, control_pause_vf);
|
||||
DEFINE_XE_SRIOV_VFIO_FUNCTION(int, resume_device, control_resume_vf);
|
||||
DEFINE_XE_SRIOV_VFIO_FUNCTION(int, stop_copy_enter, control_trigger_save_vf);
|
||||
DEFINE_XE_SRIOV_VFIO_FUNCTION(int, stop_copy_exit, control_finish_save_vf);
|
||||
DEFINE_XE_SRIOV_VFIO_FUNCTION(int, resume_data_enter, control_trigger_restore_vf);
|
||||
DEFINE_XE_SRIOV_VFIO_FUNCTION(int, resume_data_exit, control_finish_restore_vf);
|
||||
DEFINE_XE_SRIOV_VFIO_FUNCTION(int, error, control_stop_vf);
|
||||
DEFINE_XE_SRIOV_VFIO_FUNCTION(ssize_t, stop_copy_size, migration_size);
|
||||
|
||||
ssize_t xe_sriov_vfio_data_read(struct xe_device *xe, unsigned int vfid,
|
||||
char __user *buf, size_t len)
|
||||
{
|
||||
if (!IS_SRIOV_PF(xe))
|
||||
return -EPERM;
|
||||
if (vfid == PFID || vfid > xe_sriov_pf_num_vfs(xe))
|
||||
return -EINVAL;
|
||||
|
||||
guard(xe_pm_runtime_noresume)(xe);
|
||||
|
||||
return xe_sriov_pf_migration_read(xe, vfid, buf, len);
|
||||
}
|
||||
EXPORT_SYMBOL_FOR_MODULES(xe_sriov_vfio_data_read, "xe-vfio-pci");
|
||||
|
||||
ssize_t xe_sriov_vfio_data_write(struct xe_device *xe, unsigned int vfid,
|
||||
const char __user *buf, size_t len)
|
||||
{
|
||||
if (!IS_SRIOV_PF(xe))
|
||||
return -EPERM;
|
||||
if (vfid == PFID || vfid > xe_sriov_pf_num_vfs(xe))
|
||||
return -EINVAL;
|
||||
|
||||
guard(xe_pm_runtime_noresume)(xe);
|
||||
|
||||
return xe_sriov_pf_migration_write(xe, vfid, buf, len);
|
||||
}
|
||||
EXPORT_SYMBOL_FOR_MODULES(xe_sriov_vfio_data_write, "xe-vfio-pci");
|
||||
|
|
@ -70,4 +70,6 @@ source "drivers/vfio/pci/nvgrace-gpu/Kconfig"
|
|||
|
||||
source "drivers/vfio/pci/qat/Kconfig"
|
||||
|
||||
source "drivers/vfio/pci/xe/Kconfig"
|
||||
|
||||
endmenu
|
||||
|
|
|
|||
|
|
@ -20,3 +20,5 @@ obj-$(CONFIG_VIRTIO_VFIO_PCI) += virtio/
|
|||
obj-$(CONFIG_NVGRACE_GPU_VFIO_PCI) += nvgrace-gpu/
|
||||
|
||||
obj-$(CONFIG_QAT_VFIO_PCI) += qat/
|
||||
|
||||
obj-$(CONFIG_XE_VFIO_PCI) += xe/
|
||||
|
|
|
|||
|
|
@ -0,0 +1,12 @@
|
|||
# SPDX-License-Identifier: GPL-2.0-only
|
||||
config XE_VFIO_PCI
|
||||
tristate "VFIO support for Intel Graphics"
|
||||
depends on DRM_XE && PCI_IOV
|
||||
select VFIO_PCI_CORE
|
||||
help
|
||||
This option enables device specific VFIO driver variant for Intel Graphics.
|
||||
In addition to generic VFIO PCI functionality, it implements VFIO
|
||||
migration uAPI allowing userspace to enable migration for
|
||||
Intel Graphics SR-IOV Virtual Functions supported by the Xe driver.
|
||||
|
||||
If you don't know what to do here, say N.
|
||||
|
|
@ -0,0 +1,3 @@
|
|||
# SPDX-License-Identifier: GPL-2.0-only
|
||||
obj-$(CONFIG_XE_VFIO_PCI) += xe-vfio-pci.o
|
||||
xe-vfio-pci-y := main.o
|
||||
|
|
@ -0,0 +1,573 @@
|
|||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
/*
|
||||
* Copyright © 2025 Intel Corporation
|
||||
*/
|
||||
|
||||
#include <linux/anon_inodes.h>
|
||||
#include <linux/delay.h>
|
||||
#include <linux/file.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/pci.h>
|
||||
#include <linux/sizes.h>
|
||||
#include <linux/types.h>
|
||||
#include <linux/vfio.h>
|
||||
#include <linux/vfio_pci_core.h>
|
||||
|
||||
#include <drm/intel/xe_sriov_vfio.h>
|
||||
#include <drm/intel/pciids.h>
|
||||
|
||||
struct xe_vfio_pci_migration_file {
|
||||
struct file *filp;
|
||||
/* serializes accesses to migration data */
|
||||
struct mutex lock;
|
||||
struct xe_vfio_pci_core_device *xe_vdev;
|
||||
u8 disabled:1;
|
||||
};
|
||||
|
||||
struct xe_vfio_pci_core_device {
|
||||
struct vfio_pci_core_device core_device;
|
||||
struct xe_device *xe;
|
||||
/* PF internal control uses vfid index starting from 1 */
|
||||
unsigned int vfid;
|
||||
u8 deferred_reset:1;
|
||||
/* protects migration state */
|
||||
struct mutex state_mutex;
|
||||
enum vfio_device_mig_state mig_state;
|
||||
/* protects the reset_done flow */
|
||||
spinlock_t reset_lock;
|
||||
struct xe_vfio_pci_migration_file *migf;
|
||||
};
|
||||
|
||||
#define xe_vdev_to_dev(xe_vdev) (&(xe_vdev)->core_device.pdev->dev)
|
||||
|
||||
static void xe_vfio_pci_disable_file(struct xe_vfio_pci_migration_file *migf)
|
||||
{
|
||||
mutex_lock(&migf->lock);
|
||||
migf->disabled = true;
|
||||
mutex_unlock(&migf->lock);
|
||||
}
|
||||
|
||||
static void xe_vfio_pci_put_file(struct xe_vfio_pci_core_device *xe_vdev)
|
||||
{
|
||||
xe_vfio_pci_disable_file(xe_vdev->migf);
|
||||
fput(xe_vdev->migf->filp);
|
||||
xe_vdev->migf = NULL;
|
||||
}
|
||||
|
||||
static void xe_vfio_pci_reset(struct xe_vfio_pci_core_device *xe_vdev)
|
||||
{
|
||||
if (xe_vdev->migf)
|
||||
xe_vfio_pci_put_file(xe_vdev);
|
||||
|
||||
xe_vdev->mig_state = VFIO_DEVICE_STATE_RUNNING;
|
||||
}
|
||||
|
||||
static void xe_vfio_pci_state_mutex_lock(struct xe_vfio_pci_core_device *xe_vdev)
|
||||
{
|
||||
mutex_lock(&xe_vdev->state_mutex);
|
||||
}
|
||||
|
||||
/*
|
||||
* This function is called in all state_mutex unlock cases to
|
||||
* handle a 'deferred_reset' if exists.
|
||||
*/
|
||||
static void xe_vfio_pci_state_mutex_unlock(struct xe_vfio_pci_core_device *xe_vdev)
|
||||
{
|
||||
again:
|
||||
spin_lock(&xe_vdev->reset_lock);
|
||||
if (xe_vdev->deferred_reset) {
|
||||
xe_vdev->deferred_reset = false;
|
||||
spin_unlock(&xe_vdev->reset_lock);
|
||||
xe_vfio_pci_reset(xe_vdev);
|
||||
goto again;
|
||||
}
|
||||
mutex_unlock(&xe_vdev->state_mutex);
|
||||
spin_unlock(&xe_vdev->reset_lock);
|
||||
}
|
||||
|
||||
static void xe_vfio_pci_reset_done(struct pci_dev *pdev)
|
||||
{
|
||||
struct xe_vfio_pci_core_device *xe_vdev = pci_get_drvdata(pdev);
|
||||
int ret;
|
||||
|
||||
if (!pdev->is_virtfn)
|
||||
return;
|
||||
|
||||
/*
|
||||
* VF FLR requires additional processing done by PF driver.
|
||||
* The processing is done after FLR is already finished from PCIe
|
||||
* perspective.
|
||||
* In order to avoid a scenario where VF is used while PF processing
|
||||
* is still in progress, additional synchronization point is needed.
|
||||
*/
|
||||
ret = xe_sriov_vfio_wait_flr_done(xe_vdev->xe, xe_vdev->vfid);
|
||||
if (ret)
|
||||
dev_err(&pdev->dev, "Failed to wait for FLR: %d\n", ret);
|
||||
|
||||
if (!xe_vdev->vfid)
|
||||
return;
|
||||
|
||||
/*
|
||||
* As the higher VFIO layers are holding locks across reset and using
|
||||
* those same locks with the mm_lock we need to prevent ABBA deadlock
|
||||
* with the state_mutex and mm_lock.
|
||||
* In case the state_mutex was taken already we defer the cleanup work
|
||||
* to the unlock flow of the other running context.
|
||||
*/
|
||||
spin_lock(&xe_vdev->reset_lock);
|
||||
xe_vdev->deferred_reset = true;
|
||||
if (!mutex_trylock(&xe_vdev->state_mutex)) {
|
||||
spin_unlock(&xe_vdev->reset_lock);
|
||||
return;
|
||||
}
|
||||
spin_unlock(&xe_vdev->reset_lock);
|
||||
xe_vfio_pci_state_mutex_unlock(xe_vdev);
|
||||
|
||||
xe_vfio_pci_reset(xe_vdev);
|
||||
}
|
||||
|
||||
static const struct pci_error_handlers xe_vfio_pci_err_handlers = {
|
||||
.reset_done = xe_vfio_pci_reset_done,
|
||||
.error_detected = vfio_pci_core_aer_err_detected,
|
||||
};
|
||||
|
||||
static int xe_vfio_pci_open_device(struct vfio_device *core_vdev)
|
||||
{
|
||||
struct xe_vfio_pci_core_device *xe_vdev =
|
||||
container_of(core_vdev, struct xe_vfio_pci_core_device, core_device.vdev);
|
||||
struct vfio_pci_core_device *vdev = &xe_vdev->core_device;
|
||||
int ret;
|
||||
|
||||
ret = vfio_pci_core_enable(vdev);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
xe_vdev->mig_state = VFIO_DEVICE_STATE_RUNNING;
|
||||
|
||||
vfio_pci_core_finish_enable(vdev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void xe_vfio_pci_close_device(struct vfio_device *core_vdev)
|
||||
{
|
||||
struct xe_vfio_pci_core_device *xe_vdev =
|
||||
container_of(core_vdev, struct xe_vfio_pci_core_device, core_device.vdev);
|
||||
|
||||
xe_vfio_pci_state_mutex_lock(xe_vdev);
|
||||
xe_vfio_pci_reset(xe_vdev);
|
||||
xe_vfio_pci_state_mutex_unlock(xe_vdev);
|
||||
vfio_pci_core_close_device(core_vdev);
|
||||
}
|
||||
|
||||
static int xe_vfio_pci_release_file(struct inode *inode, struct file *filp)
|
||||
{
|
||||
struct xe_vfio_pci_migration_file *migf = filp->private_data;
|
||||
|
||||
mutex_destroy(&migf->lock);
|
||||
kfree(migf);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static ssize_t xe_vfio_pci_save_read(struct file *filp, char __user *buf, size_t len, loff_t *pos)
|
||||
{
|
||||
struct xe_vfio_pci_migration_file *migf = filp->private_data;
|
||||
ssize_t ret;
|
||||
|
||||
if (pos)
|
||||
return -ESPIPE;
|
||||
|
||||
mutex_lock(&migf->lock);
|
||||
if (migf->disabled) {
|
||||
mutex_unlock(&migf->lock);
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
ret = xe_sriov_vfio_data_read(migf->xe_vdev->xe, migf->xe_vdev->vfid, buf, len);
|
||||
mutex_unlock(&migf->lock);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static const struct file_operations xe_vfio_pci_save_fops = {
|
||||
.owner = THIS_MODULE,
|
||||
.read = xe_vfio_pci_save_read,
|
||||
.release = xe_vfio_pci_release_file,
|
||||
.llseek = noop_llseek,
|
||||
};
|
||||
|
||||
static ssize_t xe_vfio_pci_resume_write(struct file *filp, const char __user *buf,
|
||||
size_t len, loff_t *pos)
|
||||
{
|
||||
struct xe_vfio_pci_migration_file *migf = filp->private_data;
|
||||
ssize_t ret;
|
||||
|
||||
if (pos)
|
||||
return -ESPIPE;
|
||||
|
||||
mutex_lock(&migf->lock);
|
||||
if (migf->disabled) {
|
||||
mutex_unlock(&migf->lock);
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
ret = xe_sriov_vfio_data_write(migf->xe_vdev->xe, migf->xe_vdev->vfid, buf, len);
|
||||
mutex_unlock(&migf->lock);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static const struct file_operations xe_vfio_pci_resume_fops = {
|
||||
.owner = THIS_MODULE,
|
||||
.write = xe_vfio_pci_resume_write,
|
||||
.release = xe_vfio_pci_release_file,
|
||||
.llseek = noop_llseek,
|
||||
};
|
||||
|
||||
static const char *vfio_dev_state_str(u32 state)
|
||||
{
|
||||
switch (state) {
|
||||
case VFIO_DEVICE_STATE_RUNNING: return "running";
|
||||
case VFIO_DEVICE_STATE_RUNNING_P2P: return "running_p2p";
|
||||
case VFIO_DEVICE_STATE_STOP_COPY: return "stopcopy";
|
||||
case VFIO_DEVICE_STATE_STOP: return "stop";
|
||||
case VFIO_DEVICE_STATE_RESUMING: return "resuming";
|
||||
case VFIO_DEVICE_STATE_ERROR: return "error";
|
||||
default: return "";
|
||||
}
|
||||
}
|
||||
|
||||
enum xe_vfio_pci_file_type {
|
||||
XE_VFIO_FILE_SAVE = 0,
|
||||
XE_VFIO_FILE_RESUME,
|
||||
};
|
||||
|
||||
static struct xe_vfio_pci_migration_file *
|
||||
xe_vfio_pci_alloc_file(struct xe_vfio_pci_core_device *xe_vdev,
|
||||
enum xe_vfio_pci_file_type type)
|
||||
{
|
||||
struct xe_vfio_pci_migration_file *migf;
|
||||
const struct file_operations *fops;
|
||||
int flags;
|
||||
|
||||
migf = kzalloc(sizeof(*migf), GFP_KERNEL_ACCOUNT);
|
||||
if (!migf)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
fops = type == XE_VFIO_FILE_SAVE ? &xe_vfio_pci_save_fops : &xe_vfio_pci_resume_fops;
|
||||
flags = type == XE_VFIO_FILE_SAVE ? O_RDONLY : O_WRONLY;
|
||||
migf->filp = anon_inode_getfile("xe_vfio_mig", fops, migf, flags);
|
||||
if (IS_ERR(migf->filp)) {
|
||||
kfree(migf);
|
||||
return ERR_CAST(migf->filp);
|
||||
}
|
||||
|
||||
mutex_init(&migf->lock);
|
||||
migf->xe_vdev = xe_vdev;
|
||||
xe_vdev->migf = migf;
|
||||
|
||||
stream_open(migf->filp->f_inode, migf->filp);
|
||||
|
||||
return migf;
|
||||
}
|
||||
|
||||
static struct file *
|
||||
xe_vfio_set_state(struct xe_vfio_pci_core_device *xe_vdev, u32 new)
|
||||
{
|
||||
u32 cur = xe_vdev->mig_state;
|
||||
int ret;
|
||||
|
||||
dev_dbg(xe_vdev_to_dev(xe_vdev),
|
||||
"state: %s->%s\n", vfio_dev_state_str(cur), vfio_dev_state_str(new));
|
||||
|
||||
/*
|
||||
* "STOP" handling is reused for "RUNNING_P2P", as the device doesn't
|
||||
* have the capability to selectively block outgoing p2p DMA transfers.
|
||||
* While the device is allowing BAR accesses when the VF is stopped, it
|
||||
* is not processing any new workload requests, effectively stopping
|
||||
* any outgoing DMA transfers (not just p2p).
|
||||
* Any VRAM / MMIO accesses occurring during "RUNNING_P2P" are kept and
|
||||
* will be migrated to target VF during stop-copy.
|
||||
*/
|
||||
if (cur == VFIO_DEVICE_STATE_RUNNING && new == VFIO_DEVICE_STATE_RUNNING_P2P) {
|
||||
ret = xe_sriov_vfio_suspend_device(xe_vdev->xe, xe_vdev->vfid);
|
||||
if (ret)
|
||||
goto err;
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
if ((cur == VFIO_DEVICE_STATE_RUNNING_P2P && new == VFIO_DEVICE_STATE_STOP) ||
|
||||
(cur == VFIO_DEVICE_STATE_STOP && new == VFIO_DEVICE_STATE_RUNNING_P2P))
|
||||
return NULL;
|
||||
|
||||
if (cur == VFIO_DEVICE_STATE_RUNNING_P2P && new == VFIO_DEVICE_STATE_RUNNING) {
|
||||
ret = xe_sriov_vfio_resume_device(xe_vdev->xe, xe_vdev->vfid);
|
||||
if (ret)
|
||||
goto err;
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
if (cur == VFIO_DEVICE_STATE_STOP && new == VFIO_DEVICE_STATE_STOP_COPY) {
|
||||
struct xe_vfio_pci_migration_file *migf;
|
||||
|
||||
migf = xe_vfio_pci_alloc_file(xe_vdev, XE_VFIO_FILE_SAVE);
|
||||
if (IS_ERR(migf)) {
|
||||
ret = PTR_ERR(migf);
|
||||
goto err;
|
||||
}
|
||||
get_file(migf->filp);
|
||||
|
||||
ret = xe_sriov_vfio_stop_copy_enter(xe_vdev->xe, xe_vdev->vfid);
|
||||
if (ret) {
|
||||
fput(migf->filp);
|
||||
goto err;
|
||||
}
|
||||
|
||||
return migf->filp;
|
||||
}
|
||||
|
||||
if (cur == VFIO_DEVICE_STATE_STOP_COPY && new == VFIO_DEVICE_STATE_STOP) {
|
||||
if (xe_vdev->migf)
|
||||
xe_vfio_pci_put_file(xe_vdev);
|
||||
|
||||
ret = xe_sriov_vfio_stop_copy_exit(xe_vdev->xe, xe_vdev->vfid);
|
||||
if (ret)
|
||||
goto err;
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
if (cur == VFIO_DEVICE_STATE_STOP && new == VFIO_DEVICE_STATE_RESUMING) {
|
||||
struct xe_vfio_pci_migration_file *migf;
|
||||
|
||||
migf = xe_vfio_pci_alloc_file(xe_vdev, XE_VFIO_FILE_RESUME);
|
||||
if (IS_ERR(migf)) {
|
||||
ret = PTR_ERR(migf);
|
||||
goto err;
|
||||
}
|
||||
get_file(migf->filp);
|
||||
|
||||
ret = xe_sriov_vfio_resume_data_enter(xe_vdev->xe, xe_vdev->vfid);
|
||||
if (ret) {
|
||||
fput(migf->filp);
|
||||
goto err;
|
||||
}
|
||||
|
||||
return migf->filp;
|
||||
}
|
||||
|
||||
if (cur == VFIO_DEVICE_STATE_RESUMING && new == VFIO_DEVICE_STATE_STOP) {
|
||||
if (xe_vdev->migf)
|
||||
xe_vfio_pci_put_file(xe_vdev);
|
||||
|
||||
ret = xe_sriov_vfio_resume_data_exit(xe_vdev->xe, xe_vdev->vfid);
|
||||
if (ret)
|
||||
goto err;
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
WARN(true, "Unknown state transition %d->%d", cur, new);
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
err:
|
||||
dev_dbg(xe_vdev_to_dev(xe_vdev),
|
||||
"Failed to transition state: %s->%s err=%d\n",
|
||||
vfio_dev_state_str(cur), vfio_dev_state_str(new), ret);
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
|
||||
static struct file *
|
||||
xe_vfio_pci_set_device_state(struct vfio_device *core_vdev,
|
||||
enum vfio_device_mig_state new_state)
|
||||
{
|
||||
struct xe_vfio_pci_core_device *xe_vdev =
|
||||
container_of(core_vdev, struct xe_vfio_pci_core_device, core_device.vdev);
|
||||
enum vfio_device_mig_state next_state;
|
||||
struct file *f = NULL;
|
||||
int ret;
|
||||
|
||||
xe_vfio_pci_state_mutex_lock(xe_vdev);
|
||||
while (new_state != xe_vdev->mig_state) {
|
||||
ret = vfio_mig_get_next_state(core_vdev, xe_vdev->mig_state,
|
||||
new_state, &next_state);
|
||||
if (ret) {
|
||||
xe_sriov_vfio_error(xe_vdev->xe, xe_vdev->vfid);
|
||||
f = ERR_PTR(ret);
|
||||
break;
|
||||
}
|
||||
f = xe_vfio_set_state(xe_vdev, next_state);
|
||||
if (IS_ERR(f))
|
||||
break;
|
||||
|
||||
xe_vdev->mig_state = next_state;
|
||||
|
||||
/* Multiple state transitions with non-NULL file in the middle */
|
||||
if (f && new_state != xe_vdev->mig_state) {
|
||||
fput(f);
|
||||
f = ERR_PTR(-EINVAL);
|
||||
break;
|
||||
}
|
||||
}
|
||||
xe_vfio_pci_state_mutex_unlock(xe_vdev);
|
||||
|
||||
return f;
|
||||
}
|
||||
|
||||
static int xe_vfio_pci_get_device_state(struct vfio_device *core_vdev,
|
||||
enum vfio_device_mig_state *curr_state)
|
||||
{
|
||||
struct xe_vfio_pci_core_device *xe_vdev =
|
||||
container_of(core_vdev, struct xe_vfio_pci_core_device, core_device.vdev);
|
||||
|
||||
xe_vfio_pci_state_mutex_lock(xe_vdev);
|
||||
*curr_state = xe_vdev->mig_state;
|
||||
xe_vfio_pci_state_mutex_unlock(xe_vdev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int xe_vfio_pci_get_data_size(struct vfio_device *vdev,
|
||||
unsigned long *stop_copy_length)
|
||||
{
|
||||
struct xe_vfio_pci_core_device *xe_vdev =
|
||||
container_of(vdev, struct xe_vfio_pci_core_device, core_device.vdev);
|
||||
|
||||
xe_vfio_pci_state_mutex_lock(xe_vdev);
|
||||
*stop_copy_length = xe_sriov_vfio_stop_copy_size(xe_vdev->xe, xe_vdev->vfid);
|
||||
xe_vfio_pci_state_mutex_unlock(xe_vdev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct vfio_migration_ops xe_vfio_pci_migration_ops = {
|
||||
.migration_set_state = xe_vfio_pci_set_device_state,
|
||||
.migration_get_state = xe_vfio_pci_get_device_state,
|
||||
.migration_get_data_size = xe_vfio_pci_get_data_size,
|
||||
};
|
||||
|
||||
static void xe_vfio_pci_migration_init(struct xe_vfio_pci_core_device *xe_vdev)
|
||||
{
|
||||
struct vfio_device *core_vdev = &xe_vdev->core_device.vdev;
|
||||
struct pci_dev *pdev = to_pci_dev(core_vdev->dev);
|
||||
struct xe_device *xe = xe_sriov_vfio_get_pf(pdev);
|
||||
|
||||
if (!xe)
|
||||
return;
|
||||
if (!xe_sriov_vfio_migration_supported(xe))
|
||||
return;
|
||||
|
||||
mutex_init(&xe_vdev->state_mutex);
|
||||
spin_lock_init(&xe_vdev->reset_lock);
|
||||
|
||||
/* PF internal control uses vfid index starting from 1 */
|
||||
xe_vdev->vfid = pci_iov_vf_id(pdev) + 1;
|
||||
xe_vdev->xe = xe;
|
||||
|
||||
core_vdev->migration_flags = VFIO_MIGRATION_STOP_COPY | VFIO_MIGRATION_P2P;
|
||||
core_vdev->mig_ops = &xe_vfio_pci_migration_ops;
|
||||
}
|
||||
|
||||
static void xe_vfio_pci_migration_fini(struct xe_vfio_pci_core_device *xe_vdev)
|
||||
{
|
||||
if (!xe_vdev->vfid)
|
||||
return;
|
||||
|
||||
mutex_destroy(&xe_vdev->state_mutex);
|
||||
}
|
||||
|
||||
static int xe_vfio_pci_init_dev(struct vfio_device *core_vdev)
|
||||
{
|
||||
struct xe_vfio_pci_core_device *xe_vdev =
|
||||
container_of(core_vdev, struct xe_vfio_pci_core_device, core_device.vdev);
|
||||
|
||||
xe_vfio_pci_migration_init(xe_vdev);
|
||||
|
||||
return vfio_pci_core_init_dev(core_vdev);
|
||||
}
|
||||
|
||||
static void xe_vfio_pci_release_dev(struct vfio_device *core_vdev)
|
||||
{
|
||||
struct xe_vfio_pci_core_device *xe_vdev =
|
||||
container_of(core_vdev, struct xe_vfio_pci_core_device, core_device.vdev);
|
||||
|
||||
xe_vfio_pci_migration_fini(xe_vdev);
|
||||
}
|
||||
|
||||
static const struct vfio_device_ops xe_vfio_pci_ops = {
|
||||
.name = "xe-vfio-pci",
|
||||
.init = xe_vfio_pci_init_dev,
|
||||
.release = xe_vfio_pci_release_dev,
|
||||
.open_device = xe_vfio_pci_open_device,
|
||||
.close_device = xe_vfio_pci_close_device,
|
||||
.ioctl = vfio_pci_core_ioctl,
|
||||
.device_feature = vfio_pci_core_ioctl_feature,
|
||||
.read = vfio_pci_core_read,
|
||||
.write = vfio_pci_core_write,
|
||||
.mmap = vfio_pci_core_mmap,
|
||||
.request = vfio_pci_core_request,
|
||||
.match = vfio_pci_core_match,
|
||||
.match_token_uuid = vfio_pci_core_match_token_uuid,
|
||||
.bind_iommufd = vfio_iommufd_physical_bind,
|
||||
.unbind_iommufd = vfio_iommufd_physical_unbind,
|
||||
.attach_ioas = vfio_iommufd_physical_attach_ioas,
|
||||
.detach_ioas = vfio_iommufd_physical_detach_ioas,
|
||||
};
|
||||
|
||||
static int xe_vfio_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
|
||||
{
|
||||
struct xe_vfio_pci_core_device *xe_vdev;
|
||||
int ret;
|
||||
|
||||
xe_vdev = vfio_alloc_device(xe_vfio_pci_core_device, core_device.vdev, &pdev->dev,
|
||||
&xe_vfio_pci_ops);
|
||||
if (IS_ERR(xe_vdev))
|
||||
return PTR_ERR(xe_vdev);
|
||||
|
||||
dev_set_drvdata(&pdev->dev, &xe_vdev->core_device);
|
||||
|
||||
ret = vfio_pci_core_register_device(&xe_vdev->core_device);
|
||||
if (ret) {
|
||||
vfio_put_device(&xe_vdev->core_device.vdev);
|
||||
return ret;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void xe_vfio_pci_remove(struct pci_dev *pdev)
|
||||
{
|
||||
struct xe_vfio_pci_core_device *xe_vdev = pci_get_drvdata(pdev);
|
||||
|
||||
vfio_pci_core_unregister_device(&xe_vdev->core_device);
|
||||
vfio_put_device(&xe_vdev->core_device.vdev);
|
||||
}
|
||||
|
||||
#define INTEL_PCI_VFIO_DEVICE(_id) { \
|
||||
PCI_DRIVER_OVERRIDE_DEVICE_VFIO(PCI_VENDOR_ID_INTEL, (_id)) \
|
||||
}
|
||||
|
||||
static const struct pci_device_id xe_vfio_pci_table[] = {
|
||||
INTEL_PTL_IDS(INTEL_PCI_VFIO_DEVICE),
|
||||
INTEL_WCL_IDS(INTEL_PCI_VFIO_DEVICE),
|
||||
INTEL_BMG_IDS(INTEL_PCI_VFIO_DEVICE),
|
||||
{}
|
||||
};
|
||||
MODULE_DEVICE_TABLE(pci, xe_vfio_pci_table);
|
||||
|
||||
static struct pci_driver xe_vfio_pci_driver = {
|
||||
.name = "xe-vfio-pci",
|
||||
.id_table = xe_vfio_pci_table,
|
||||
.probe = xe_vfio_pci_probe,
|
||||
.remove = xe_vfio_pci_remove,
|
||||
.err_handler = &xe_vfio_pci_err_handlers,
|
||||
.driver_managed_dma = true,
|
||||
};
|
||||
module_pci_driver(xe_vfio_pci_driver);
|
||||
|
||||
MODULE_LICENSE("GPL");
|
||||
MODULE_AUTHOR("Michał Winiarski <michal.winiarski@intel.com>");
|
||||
MODULE_DESCRIPTION("VFIO PCI driver with migration support for Intel Graphics");
|
||||
|
|
@ -0,0 +1,143 @@
|
|||
/* SPDX-License-Identifier: MIT */
|
||||
/*
|
||||
* Copyright © 2025 Intel Corporation
|
||||
*/
|
||||
|
||||
#ifndef _XE_SRIOV_VFIO_H_
|
||||
#define _XE_SRIOV_VFIO_H_
|
||||
|
||||
#include <linux/types.h>
|
||||
|
||||
struct pci_dev;
|
||||
struct xe_device;
|
||||
|
||||
/**
|
||||
* xe_sriov_vfio_get_pf() - Get PF &xe_device.
|
||||
* @pdev: the VF &pci_dev device
|
||||
*
|
||||
* Return: pointer to PF &xe_device, NULL otherwise.
|
||||
*/
|
||||
struct xe_device *xe_sriov_vfio_get_pf(struct pci_dev *pdev);
|
||||
|
||||
/**
|
||||
* xe_sriov_vfio_migration_supported() - Check if migration is supported.
|
||||
* @xe: the PF &xe_device obtained by calling xe_sriov_vfio_get_pf()
|
||||
*
|
||||
* Return: true if migration is supported, false otherwise.
|
||||
*/
|
||||
bool xe_sriov_vfio_migration_supported(struct xe_device *xe);
|
||||
|
||||
/**
|
||||
* xe_sriov_vfio_wait_flr_done() - Wait for VF FLR completion.
|
||||
* @xe: the PF &xe_device obtained by calling xe_sriov_vfio_get_pf()
|
||||
* @vfid: the VF identifier (can't be 0)
|
||||
*
|
||||
* This function will wait until VF FLR is processed by PF on all tiles (or
|
||||
* until timeout occurs).
|
||||
*
|
||||
* Return: 0 on success or a negative error code on failure.
|
||||
*/
|
||||
int xe_sriov_vfio_wait_flr_done(struct xe_device *xe, unsigned int vfid);
|
||||
|
||||
/**
|
||||
* xe_sriov_vfio_suspend_device() - Suspend VF.
|
||||
* @xe: the PF &xe_device obtained by calling xe_sriov_vfio_get_pf()
|
||||
* @vfid: the VF identifier (can't be 0)
|
||||
*
|
||||
* This function will pause VF on all tiles/GTs.
|
||||
*
|
||||
* Return: 0 on success or a negative error code on failure.
|
||||
*/
|
||||
int xe_sriov_vfio_suspend_device(struct xe_device *xe, unsigned int vfid);
|
||||
|
||||
/**
|
||||
* xe_sriov_vfio_resume_device() - Resume VF.
|
||||
* @xe: the PF &xe_device obtained by calling xe_sriov_vfio_get_pf()
|
||||
* @vfid: the VF identifier (can't be 0)
|
||||
*
|
||||
* This function will resume VF on all tiles.
|
||||
*
|
||||
* Return: 0 on success or a negative error code on failure.
|
||||
*/
|
||||
int xe_sriov_vfio_resume_device(struct xe_device *xe, unsigned int vfid);
|
||||
|
||||
/**
|
||||
* xe_sriov_vfio_stop_copy_enter() - Initiate a VF device migration data save.
|
||||
* @xe: the PF &xe_device obtained by calling xe_sriov_vfio_get_pf()
|
||||
* @vfid: the VF identifier (can't be 0)
|
||||
*
|
||||
* Return: 0 on success or a negative error code on failure.
|
||||
*/
|
||||
int xe_sriov_vfio_stop_copy_enter(struct xe_device *xe, unsigned int vfid);
|
||||
|
||||
/**
|
||||
* xe_sriov_vfio_stop_copy_exit() - Finish a VF device migration data save.
|
||||
* @xe: the PF &xe_device obtained by calling xe_sriov_vfio_get_pf()
|
||||
* @vfid: the VF identifier (can't be 0)
|
||||
*
|
||||
* Return: 0 on success or a negative error code on failure.
|
||||
*/
|
||||
int xe_sriov_vfio_stop_copy_exit(struct xe_device *xe, unsigned int vfid);
|
||||
|
||||
/**
|
||||
* xe_sriov_vfio_resume_data_enter() - Initiate a VF device migration data restore.
|
||||
* @xe: the PF &xe_device obtained by calling xe_sriov_vfio_get_pf()
|
||||
* @vfid: the VF identifier (can't be 0)
|
||||
*
|
||||
* Return: 0 on success or a negative error code on failure.
|
||||
*/
|
||||
int xe_sriov_vfio_resume_data_enter(struct xe_device *xe, unsigned int vfid);
|
||||
|
||||
/**
|
||||
* xe_sriov_vfio_resume_data_exit() - Finish a VF device migration data restore.
|
||||
* @xe: the PF &xe_device obtained by calling xe_sriov_vfio_get_pf()
|
||||
* @vfid: the VF identifier (can't be 0)
|
||||
*
|
||||
* Return: 0 on success or a negative error code on failure.
|
||||
*/
|
||||
int xe_sriov_vfio_resume_data_exit(struct xe_device *xe, unsigned int vfid);
|
||||
|
||||
/**
|
||||
* xe_sriov_vfio_error() - Move VF device to error state.
|
||||
* @xe: the PF &xe_device obtained by calling xe_sriov_vfio_get_pf()
|
||||
* @vfid: the VF identifier (can't be 0)
|
||||
*
|
||||
* Reset is needed to move it out of error state.
|
||||
*
|
||||
* Return: 0 on success or a negative error code on failure.
|
||||
*/
|
||||
int xe_sriov_vfio_error(struct xe_device *xe, unsigned int vfid);
|
||||
|
||||
/**
|
||||
* xe_sriov_vfio_data_read() - Read migration data from the VF device.
|
||||
* @xe: the PF &xe_device obtained by calling xe_sriov_vfio_get_pf()
|
||||
* @vfid: the VF identifier (can't be 0)
|
||||
* @buf: start address of userspace buffer
|
||||
* @len: requested read size from userspace
|
||||
*
|
||||
* Return: number of bytes that has been successfully read,
|
||||
* 0 if no more migration data is available, -errno on failure.
|
||||
*/
|
||||
ssize_t xe_sriov_vfio_data_read(struct xe_device *xe, unsigned int vfid,
|
||||
char __user *buf, size_t len);
|
||||
/**
|
||||
* xe_sriov_vfio_data_write() - Write migration data to the VF device.
|
||||
* @xe: the PF &xe_device obtained by calling xe_sriov_vfio_get_pf()
|
||||
* @vfid: the VF identifier (can't be 0)
|
||||
* @buf: start address of userspace buffer
|
||||
* @len: requested write size from userspace
|
||||
*
|
||||
* Return: number of bytes that has been successfully written, -errno on failure.
|
||||
*/
|
||||
ssize_t xe_sriov_vfio_data_write(struct xe_device *xe, unsigned int vfid,
|
||||
const char __user *buf, size_t len);
|
||||
/**
|
||||
* xe_sriov_vfio_stop_copy_size() - Get a size estimate of VF device migration data.
|
||||
* @xe: the PF &xe_device obtained by calling xe_sriov_vfio_get_pf()
|
||||
* @vfid: the VF identifier (can't be 0)
|
||||
*
|
||||
* Return: migration data size in bytes or a negative error code on failure.
|
||||
*/
|
||||
ssize_t xe_sriov_vfio_stop_copy_size(struct xe_device *xe, unsigned int vfid);
|
||||
|
||||
#endif
|
||||
|
|
@ -26,7 +26,9 @@
|
|||
#include <crypto/aes.h>
|
||||
|
||||
#define TPM_DIGEST_SIZE 20 /* Max TPM v1.2 PCR size */
|
||||
#define TPM_MAX_DIGEST_SIZE SHA512_DIGEST_SIZE
|
||||
|
||||
#define TPM2_MAX_DIGEST_SIZE SHA512_DIGEST_SIZE
|
||||
#define TPM2_MAX_PCR_BANKS 8
|
||||
|
||||
struct tpm_chip;
|
||||
struct trusted_key_payload;
|
||||
|
|
@ -68,7 +70,7 @@ enum tpm2_curves {
|
|||
|
||||
struct tpm_digest {
|
||||
u16 alg_id;
|
||||
u8 digest[TPM_MAX_DIGEST_SIZE];
|
||||
u8 digest[TPM2_MAX_DIGEST_SIZE];
|
||||
} __packed;
|
||||
|
||||
struct tpm_bank_info {
|
||||
|
|
@ -189,7 +191,7 @@ struct tpm_chip {
|
|||
unsigned int groups_cnt;
|
||||
|
||||
u32 nr_allocated_banks;
|
||||
struct tpm_bank_info *allocated_banks;
|
||||
struct tpm_bank_info allocated_banks[TPM2_MAX_PCR_BANKS];
|
||||
#ifdef CONFIG_ACPI
|
||||
acpi_handle acpi_dev_handle;
|
||||
char ppi_version[TPM_PPI_VERSION_LEN + 1];
|
||||
|
|
@ -454,8 +456,10 @@ static inline ssize_t tpm_ret_to_err(ssize_t ret)
|
|||
return 0;
|
||||
case TPM2_RC_SESSION_MEMORY:
|
||||
return -ENOMEM;
|
||||
case TPM2_RC_HASH:
|
||||
return -EINVAL;
|
||||
default:
|
||||
return -EFAULT;
|
||||
return -EPERM;
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -358,17 +358,17 @@ int cap_inode_killpriv(struct mnt_idmap *idmap, struct dentry *dentry)
|
|||
return error;
|
||||
}
|
||||
|
||||
static bool rootid_owns_currentns(vfsuid_t rootvfsuid)
|
||||
/**
|
||||
* kuid_root_in_ns - check whether the given kuid is root in the given ns
|
||||
* @kuid: the kuid to be tested
|
||||
* @ns: the user namespace to test against
|
||||
*
|
||||
* Returns true if @kuid represents the root user in @ns, false otherwise.
|
||||
*/
|
||||
static bool kuid_root_in_ns(kuid_t kuid, struct user_namespace *ns)
|
||||
{
|
||||
struct user_namespace *ns;
|
||||
kuid_t kroot;
|
||||
|
||||
if (!vfsuid_valid(rootvfsuid))
|
||||
return false;
|
||||
|
||||
kroot = vfsuid_into_kuid(rootvfsuid);
|
||||
for (ns = current_user_ns();; ns = ns->parent) {
|
||||
if (from_kuid(ns, kroot) == 0)
|
||||
for (;; ns = ns->parent) {
|
||||
if (from_kuid(ns, kuid) == 0)
|
||||
return true;
|
||||
if (ns == &init_user_ns)
|
||||
break;
|
||||
|
|
@ -377,6 +377,16 @@ static bool rootid_owns_currentns(vfsuid_t rootvfsuid)
|
|||
return false;
|
||||
}
|
||||
|
||||
static bool vfsuid_root_in_currentns(vfsuid_t vfsuid)
|
||||
{
|
||||
kuid_t kuid;
|
||||
|
||||
if (!vfsuid_valid(vfsuid))
|
||||
return false;
|
||||
kuid = vfsuid_into_kuid(vfsuid);
|
||||
return kuid_root_in_ns(kuid, current_user_ns());
|
||||
}
|
||||
|
||||
static __u32 sansflags(__u32 m)
|
||||
{
|
||||
return m & ~VFS_CAP_FLAGS_EFFECTIVE;
|
||||
|
|
@ -481,7 +491,7 @@ int cap_inode_getsecurity(struct mnt_idmap *idmap,
|
|||
goto out_free;
|
||||
}
|
||||
|
||||
if (!rootid_owns_currentns(vfsroot)) {
|
||||
if (!vfsuid_root_in_currentns(vfsroot)) {
|
||||
size = -EOVERFLOW;
|
||||
goto out_free;
|
||||
}
|
||||
|
|
@ -722,7 +732,7 @@ int get_vfs_caps_from_disk(struct mnt_idmap *idmap,
|
|||
/* Limit the caps to the mounter of the filesystem
|
||||
* or the more limited uid specified in the xattr.
|
||||
*/
|
||||
if (!rootid_owns_currentns(rootvfsuid))
|
||||
if (!vfsuid_root_in_currentns(rootvfsuid))
|
||||
return -ENODATA;
|
||||
|
||||
cpu_caps->permitted.val = le32_to_cpu(caps->data[0].permitted);
|
||||
|
|
|
|||
|
|
@ -333,25 +333,19 @@ int tpm2_seal_trusted(struct tpm_chip *chip,
|
|||
}
|
||||
|
||||
blob_len = tpm2_key_encode(payload, options, &buf.data[offset], blob_len);
|
||||
if (blob_len < 0)
|
||||
rc = blob_len;
|
||||
|
||||
out:
|
||||
tpm_buf_destroy(&sized);
|
||||
tpm_buf_destroy(&buf);
|
||||
|
||||
if (rc > 0) {
|
||||
if (tpm2_rc_value(rc) == TPM2_RC_HASH)
|
||||
rc = -EINVAL;
|
||||
else
|
||||
rc = -EPERM;
|
||||
}
|
||||
if (blob_len < 0)
|
||||
rc = blob_len;
|
||||
else
|
||||
if (!rc)
|
||||
payload->blob_len = blob_len;
|
||||
|
||||
out_put:
|
||||
tpm_put_ops(chip);
|
||||
return rc;
|
||||
return tpm_ret_to_err(rc);
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
@ -455,10 +449,7 @@ static int tpm2_load_cmd(struct tpm_chip *chip,
|
|||
out:
|
||||
tpm_buf_destroy(&buf);
|
||||
|
||||
if (rc > 0)
|
||||
rc = -EPERM;
|
||||
|
||||
return rc;
|
||||
return tpm_ret_to_err(rc);
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
@ -521,8 +512,6 @@ static int tpm2_unseal_cmd(struct tpm_chip *chip,
|
|||
tpm_buf_fill_hmac_session(chip, &buf);
|
||||
rc = tpm_transmit_cmd(chip, &buf, 6, "unsealing");
|
||||
rc = tpm_buf_check_hmac_response(chip, &buf, rc);
|
||||
if (rc > 0)
|
||||
rc = -EPERM;
|
||||
|
||||
if (!rc) {
|
||||
data_len = be16_to_cpup(
|
||||
|
|
@ -555,7 +544,7 @@ static int tpm2_unseal_cmd(struct tpm_chip *chip,
|
|||
|
||||
out:
|
||||
tpm_buf_destroy(&buf);
|
||||
return rc;
|
||||
return tpm_ret_to_err(rc);
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
@ -587,6 +576,5 @@ int tpm2_unseal_trusted(struct tpm_chip *chip,
|
|||
|
||||
out:
|
||||
tpm_put_ops(chip);
|
||||
|
||||
return rc;
|
||||
return tpm_ret_to_err(rc);
|
||||
}
|
||||
|
|
|
|||
|
|
@ -437,7 +437,7 @@ class Client:
|
|||
|
||||
def extend_pcr(self, i, dig, bank_alg = TPM2_ALG_SHA1):
|
||||
ds = get_digest_size(bank_alg)
|
||||
assert(ds == len(dig))
|
||||
assert ds == len(dig)
|
||||
|
||||
auth_cmd = AuthCommand()
|
||||
|
||||
|
|
@ -589,7 +589,7 @@ class Client:
|
|||
def seal(self, parent_key, data, auth_value, policy_dig,
|
||||
name_alg = TPM2_ALG_SHA1):
|
||||
ds = get_digest_size(name_alg)
|
||||
assert(not policy_dig or ds == len(policy_dig))
|
||||
assert not policy_dig or ds == len(policy_dig)
|
||||
|
||||
attributes = 0
|
||||
if not policy_dig:
|
||||
|
|
|
|||
Loading…
Reference in New Issue