mirror of https://github.com/torvalds/linux.git
drm next part 2 for 6.19-rc1
vfio: - add a vfio_pci variant driver for Intel xe/i915 display: - add plane color management support xe: - Add scope-based cleanup helper for runtime PM - vfio xe driver prerequisites and exports - fix vfio link error - Fix a memory leak - Fix a 64-bit division - vf migration fix - LRC pause fix -----BEGIN PGP SIGNATURE----- iQIzBAABCgAdFiEEEKbZHaGwW9KfbeusDHTzWXnEhr4FAmkyMUgACgkQDHTzWXnE hr69pg/9EWjh7qVGk9ZIpYc9AW42UzWwOVBX/HWkuQvmfxUUBqtA3IuP0dGGmPUn QbtbetbRvlCaXwEoZpPh1nzrXA2AGFxgHErYMO5BfwquyBcfpwTWZ9T15ptceL/3 aw2l63aH1R2/yxCRfHFIdwAmq1bThqdh5IkjjbE3im0V0lHT2Uo/jhmf/EWCNWol LlPgYxHpfBIzhtFYUcniaXxs9vOSk49AY+ObpPpuvks8OWoaaTcKYWlUCHr/X1ip OnWB4NGraTzx4l44vqdRvRL5/KPY7N2IcAxU7rXFTacWp6UoESph5DCYLsPREONb OsK1pVbAsKATobeoAC9J+utILhfDmKM8Z7eSAlNE+X+nk/BKu4h9Pp1TnKfo7bCz 0tER/OrsqnYMfxj1PawT3xpf/KUWkL0aqnRJpmA2cvJqTz8Qnb4h6kRQp1iAKp80 XaBL1v0uzVE/J4ffuA5bzkT71w3hjN5ytLyEe7h1Y43E/jxyQgyTIHM8cX/UrreJ RboaakyoTv1u1xrd9Mzx4WCzwKryH+JFY2nekAC3YnSCcGYnSScSNM/ARTrYC2pf wNbWBvkq7ZFy9eybaZQ/zaSYyVO7yQDjdCAqO+SA+xfRuwF41uiADJptyC+FgMPw nIBaeid314tJQ9uGNPJH0f2BzLzSvH569trUp/7hbOYWC69XeQI= =jyth -----END PGP SIGNATURE----- Merge tag 'drm-next-2025-12-05' of https://gitlab.freedesktop.org/drm/kernel Pull more drm updates from Dave Airlie: "There was some additional intel code for color operations we wanted to land. However I discovered I missed a pull for the xe vfio driver which I had sorted into 6.20 in my brain, until Thomas mentioned it. This contains the xe vfio code, a bunch of xe fixes that were waiting and the i915 color management support. I'd like to include it as part of keeping the two main vendors on the same page and giving a good cross-driver experience for userspace when it starts using it. vfio: - add a vfio_pci variant driver for Intel xe/i915 display: - add plane color management support xe: - Add scope-based cleanup helper for runtime PM - vfio xe driver prerequisites and exports - fix vfio link error - Fix a memory leak - Fix a 64-bit division - vf migration fix - LRC pause fix" * tag 'drm-next-2025-12-05' of https://gitlab.freedesktop.org/drm/kernel: (25 commits) drm/i915/color: Enable Plane Color Pipelines drm/i915/color: Add 3D LUT to color pipeline drm/i915/color: Add registers for 3D LUT drm/i915/color: Program Plane Post CSC Registers drm/i915/color: Program Pre-CSC registers drm/i915/color: Add framework to program PRE/POST CSC LUT drm/i915: Add register definitions for Plane Post CSC drm/i915: Add register definitions for Plane Degamma drm/i915/color: Add plane CTM callback for D12 and beyond drm/i915/color: Preserve sign bit when int_bits is Zero drm/i915/color: Add framework to program CSC drm/i915/color: Create a transfer function color pipeline drm/i915/color: Add helper to create intel colorop drm/i915: Add intel_color_op drm/i915/display: Add identifiers for driver specific blocks drm/xe/pf: fix VFIO link error drm/xe: Protect against unset LRC when pausing submissions drm/xe/vf: Start re-emission from first unsignaled job during VF migration drm/xe/pf: Use div_u64 when calculating GGTT profile drm/xe: Fix memory leak when handling pagefault vma ...
This commit is contained in:
commit
deb879faa9
|
|
@ -27221,6 +27221,13 @@ L: virtualization@lists.linux.dev
|
||||||
S: Maintained
|
S: Maintained
|
||||||
F: drivers/vfio/pci/virtio
|
F: drivers/vfio/pci/virtio
|
||||||
|
|
||||||
|
VFIO XE PCI DRIVER
|
||||||
|
M: Michał Winiarski <michal.winiarski@intel.com>
|
||||||
|
L: kvm@vger.kernel.org
|
||||||
|
L: intel-xe@lists.freedesktop.org
|
||||||
|
S: Supported
|
||||||
|
F: drivers/vfio/pci/xe
|
||||||
|
|
||||||
VGA_SWITCHEROO
|
VGA_SWITCHEROO
|
||||||
R: Lukas Wunner <lukas@wunner.de>
|
R: Lukas Wunner <lukas@wunner.de>
|
||||||
S: Maintained
|
S: Maintained
|
||||||
|
|
|
||||||
|
|
@ -239,6 +239,8 @@ i915-y += \
|
||||||
display/intel_cdclk.o \
|
display/intel_cdclk.o \
|
||||||
display/intel_cmtg.o \
|
display/intel_cmtg.o \
|
||||||
display/intel_color.o \
|
display/intel_color.o \
|
||||||
|
display/intel_colorop.o \
|
||||||
|
display/intel_color_pipeline.o \
|
||||||
display/intel_combo_phy.o \
|
display/intel_combo_phy.o \
|
||||||
display/intel_connector.o \
|
display/intel_connector.o \
|
||||||
display/intel_crtc.o \
|
display/intel_crtc.o \
|
||||||
|
|
|
||||||
|
|
@ -32,6 +32,8 @@
|
||||||
#include "intel_display_utils.h"
|
#include "intel_display_utils.h"
|
||||||
#include "intel_dsb.h"
|
#include "intel_dsb.h"
|
||||||
#include "intel_vrr.h"
|
#include "intel_vrr.h"
|
||||||
|
#include "skl_universal_plane.h"
|
||||||
|
#include "skl_universal_plane_regs.h"
|
||||||
|
|
||||||
struct intel_color_funcs {
|
struct intel_color_funcs {
|
||||||
int (*color_check)(struct intel_atomic_state *state,
|
int (*color_check)(struct intel_atomic_state *state,
|
||||||
|
|
@ -87,6 +89,14 @@ struct intel_color_funcs {
|
||||||
* Read config other than LUTs and CSCs, before them. Optional.
|
* Read config other than LUTs and CSCs, before them. Optional.
|
||||||
*/
|
*/
|
||||||
void (*get_config)(struct intel_crtc_state *crtc_state);
|
void (*get_config)(struct intel_crtc_state *crtc_state);
|
||||||
|
|
||||||
|
/* Plane CSC*/
|
||||||
|
void (*load_plane_csc_matrix)(struct intel_dsb *dsb,
|
||||||
|
const struct intel_plane_state *plane_state);
|
||||||
|
|
||||||
|
/* Plane Pre/Post CSC */
|
||||||
|
void (*load_plane_luts)(struct intel_dsb *dsb,
|
||||||
|
const struct intel_plane_state *plane_state);
|
||||||
};
|
};
|
||||||
|
|
||||||
#define CTM_COEFF_SIGN (1ULL << 63)
|
#define CTM_COEFF_SIGN (1ULL << 63)
|
||||||
|
|
@ -609,6 +619,8 @@ static u16 ctm_to_twos_complement(u64 coeff, int int_bits, int frac_bits)
|
||||||
if (CTM_COEFF_NEGATIVE(coeff))
|
if (CTM_COEFF_NEGATIVE(coeff))
|
||||||
c = -c;
|
c = -c;
|
||||||
|
|
||||||
|
int_bits = max(int_bits, 1);
|
||||||
|
|
||||||
c = clamp(c, -(s64)BIT(int_bits + frac_bits - 1),
|
c = clamp(c, -(s64)BIT(int_bits + frac_bits - 1),
|
||||||
(s64)(BIT(int_bits + frac_bits - 1) - 1));
|
(s64)(BIT(int_bits + frac_bits - 1) - 1));
|
||||||
|
|
||||||
|
|
@ -3836,6 +3848,266 @@ static void icl_read_luts(struct intel_crtc_state *crtc_state)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void
|
||||||
|
xelpd_load_plane_csc_matrix(struct intel_dsb *dsb,
|
||||||
|
const struct intel_plane_state *plane_state)
|
||||||
|
{
|
||||||
|
struct intel_display *display = to_intel_display(plane_state);
|
||||||
|
const struct drm_plane_state *state = &plane_state->uapi;
|
||||||
|
enum pipe pipe = to_intel_plane(state->plane)->pipe;
|
||||||
|
enum plane_id plane = to_intel_plane(state->plane)->id;
|
||||||
|
const struct drm_property_blob *blob = plane_state->hw.ctm;
|
||||||
|
struct drm_color_ctm_3x4 *ctm;
|
||||||
|
const u64 *input;
|
||||||
|
u16 coeffs[9] = {};
|
||||||
|
int i, j;
|
||||||
|
|
||||||
|
if (!icl_is_hdr_plane(display, plane) || !blob)
|
||||||
|
return;
|
||||||
|
|
||||||
|
ctm = blob->data;
|
||||||
|
input = ctm->matrix;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Convert fixed point S31.32 input to format supported by the
|
||||||
|
* hardware.
|
||||||
|
*/
|
||||||
|
for (i = 0, j = 0; i < ARRAY_SIZE(coeffs); i++) {
|
||||||
|
u64 abs_coeff = ((1ULL << 63) - 1) & input[j];
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Clamp input value to min/max supported by
|
||||||
|
* hardware.
|
||||||
|
*/
|
||||||
|
abs_coeff = clamp_val(abs_coeff, 0, CTM_COEFF_4_0 - 1);
|
||||||
|
|
||||||
|
/* sign bit */
|
||||||
|
if (CTM_COEFF_NEGATIVE(input[j]))
|
||||||
|
coeffs[i] |= 1 << 15;
|
||||||
|
|
||||||
|
if (abs_coeff < CTM_COEFF_0_125)
|
||||||
|
coeffs[i] |= (3 << 12) |
|
||||||
|
ILK_CSC_COEFF_FP(abs_coeff, 12);
|
||||||
|
else if (abs_coeff < CTM_COEFF_0_25)
|
||||||
|
coeffs[i] |= (2 << 12) |
|
||||||
|
ILK_CSC_COEFF_FP(abs_coeff, 11);
|
||||||
|
else if (abs_coeff < CTM_COEFF_0_5)
|
||||||
|
coeffs[i] |= (1 << 12) |
|
||||||
|
ILK_CSC_COEFF_FP(abs_coeff, 10);
|
||||||
|
else if (abs_coeff < CTM_COEFF_1_0)
|
||||||
|
coeffs[i] |= ILK_CSC_COEFF_FP(abs_coeff, 9);
|
||||||
|
else if (abs_coeff < CTM_COEFF_2_0)
|
||||||
|
coeffs[i] |= (7 << 12) |
|
||||||
|
ILK_CSC_COEFF_FP(abs_coeff, 8);
|
||||||
|
else
|
||||||
|
coeffs[i] |= (6 << 12) |
|
||||||
|
ILK_CSC_COEFF_FP(abs_coeff, 7);
|
||||||
|
|
||||||
|
/* Skip postoffs */
|
||||||
|
if (!((j + 2) % 4))
|
||||||
|
j += 2;
|
||||||
|
else
|
||||||
|
j++;
|
||||||
|
}
|
||||||
|
|
||||||
|
intel_de_write_dsb(display, dsb, PLANE_CSC_COEFF(pipe, plane, 0),
|
||||||
|
coeffs[0] << 16 | coeffs[1]);
|
||||||
|
intel_de_write_dsb(display, dsb, PLANE_CSC_COEFF(pipe, plane, 1),
|
||||||
|
coeffs[2] << 16);
|
||||||
|
|
||||||
|
intel_de_write_dsb(display, dsb, PLANE_CSC_COEFF(pipe, plane, 2),
|
||||||
|
coeffs[3] << 16 | coeffs[4]);
|
||||||
|
intel_de_write_dsb(display, dsb, PLANE_CSC_COEFF(pipe, plane, 3),
|
||||||
|
coeffs[5] << 16);
|
||||||
|
|
||||||
|
intel_de_write_dsb(display, dsb, PLANE_CSC_COEFF(pipe, plane, 4),
|
||||||
|
coeffs[6] << 16 | coeffs[7]);
|
||||||
|
intel_de_write_dsb(display, dsb, PLANE_CSC_COEFF(pipe, plane, 5),
|
||||||
|
coeffs[8] << 16);
|
||||||
|
|
||||||
|
intel_de_write_dsb(display, dsb, PLANE_CSC_PREOFF(pipe, plane, 0), 0);
|
||||||
|
intel_de_write_dsb(display, dsb, PLANE_CSC_PREOFF(pipe, plane, 1), 0);
|
||||||
|
intel_de_write_dsb(display, dsb, PLANE_CSC_PREOFF(pipe, plane, 2), 0);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Conversion from S31.32 to S0.12. BIT[12] is the signed bit
|
||||||
|
*/
|
||||||
|
intel_de_write_dsb(display, dsb,
|
||||||
|
PLANE_CSC_POSTOFF(pipe, plane, 0),
|
||||||
|
ctm_to_twos_complement(input[3], 0, 12));
|
||||||
|
intel_de_write_dsb(display, dsb,
|
||||||
|
PLANE_CSC_POSTOFF(pipe, plane, 1),
|
||||||
|
ctm_to_twos_complement(input[7], 0, 12));
|
||||||
|
intel_de_write_dsb(display, dsb,
|
||||||
|
PLANE_CSC_POSTOFF(pipe, plane, 2),
|
||||||
|
ctm_to_twos_complement(input[11], 0, 12));
|
||||||
|
}
|
||||||
|
|
||||||
|
static void
|
||||||
|
xelpd_program_plane_pre_csc_lut(struct intel_dsb *dsb,
|
||||||
|
const struct intel_plane_state *plane_state)
|
||||||
|
{
|
||||||
|
struct intel_display *display = to_intel_display(plane_state);
|
||||||
|
const struct drm_plane_state *state = &plane_state->uapi;
|
||||||
|
enum pipe pipe = to_intel_plane(state->plane)->pipe;
|
||||||
|
enum plane_id plane = to_intel_plane(state->plane)->id;
|
||||||
|
const struct drm_color_lut32 *pre_csc_lut = plane_state->hw.degamma_lut->data;
|
||||||
|
u32 i, lut_size;
|
||||||
|
|
||||||
|
if (icl_is_hdr_plane(display, plane)) {
|
||||||
|
lut_size = 128;
|
||||||
|
|
||||||
|
intel_de_write_dsb(display, dsb,
|
||||||
|
PLANE_PRE_CSC_GAMC_INDEX_ENH(pipe, plane, 0),
|
||||||
|
PLANE_PAL_PREC_AUTO_INCREMENT);
|
||||||
|
|
||||||
|
if (pre_csc_lut) {
|
||||||
|
for (i = 0; i < lut_size; i++) {
|
||||||
|
u32 lut_val = drm_color_lut32_extract(pre_csc_lut[i].green, 24);
|
||||||
|
|
||||||
|
intel_de_write_dsb(display, dsb,
|
||||||
|
PLANE_PRE_CSC_GAMC_DATA_ENH(pipe, plane, 0),
|
||||||
|
lut_val);
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Program the max register to clamp values > 1.0. */
|
||||||
|
/* TODO: Restrict to 0x7ffffff */
|
||||||
|
do {
|
||||||
|
intel_de_write_dsb(display, dsb,
|
||||||
|
PLANE_PRE_CSC_GAMC_DATA_ENH(pipe, plane, 0),
|
||||||
|
(1 << 24));
|
||||||
|
} while (i++ > 130);
|
||||||
|
} else {
|
||||||
|
for (i = 0; i < lut_size; i++) {
|
||||||
|
u32 v = (i * ((1 << 24) - 1)) / (lut_size - 1);
|
||||||
|
|
||||||
|
intel_de_write_dsb(display, dsb,
|
||||||
|
PLANE_PRE_CSC_GAMC_DATA_ENH(pipe, plane, 0), v);
|
||||||
|
}
|
||||||
|
|
||||||
|
do {
|
||||||
|
intel_de_write_dsb(display, dsb,
|
||||||
|
PLANE_PRE_CSC_GAMC_DATA_ENH(pipe, plane, 0),
|
||||||
|
1 << 24);
|
||||||
|
} while (i++ < 130);
|
||||||
|
}
|
||||||
|
|
||||||
|
intel_de_write_dsb(display, dsb, PLANE_PRE_CSC_GAMC_INDEX_ENH(pipe, plane, 0), 0);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static void
|
||||||
|
xelpd_program_plane_post_csc_lut(struct intel_dsb *dsb,
|
||||||
|
const struct intel_plane_state *plane_state)
|
||||||
|
{
|
||||||
|
struct intel_display *display = to_intel_display(plane_state);
|
||||||
|
const struct drm_plane_state *state = &plane_state->uapi;
|
||||||
|
enum pipe pipe = to_intel_plane(state->plane)->pipe;
|
||||||
|
enum plane_id plane = to_intel_plane(state->plane)->id;
|
||||||
|
const struct drm_color_lut32 *post_csc_lut = plane_state->hw.gamma_lut->data;
|
||||||
|
u32 i, lut_size, lut_val;
|
||||||
|
|
||||||
|
if (icl_is_hdr_plane(display, plane)) {
|
||||||
|
intel_de_write_dsb(display, dsb, PLANE_POST_CSC_GAMC_INDEX_ENH(pipe, plane, 0),
|
||||||
|
PLANE_PAL_PREC_AUTO_INCREMENT);
|
||||||
|
/* TODO: Add macro */
|
||||||
|
intel_de_write_dsb(display, dsb, PLANE_POST_CSC_GAMC_SEG0_INDEX_ENH(pipe, plane, 0),
|
||||||
|
PLANE_PAL_PREC_AUTO_INCREMENT);
|
||||||
|
if (post_csc_lut) {
|
||||||
|
lut_size = 32;
|
||||||
|
for (i = 0; i < lut_size; i++) {
|
||||||
|
lut_val = drm_color_lut32_extract(post_csc_lut[i].green, 24);
|
||||||
|
|
||||||
|
intel_de_write_dsb(display, dsb,
|
||||||
|
PLANE_POST_CSC_GAMC_DATA_ENH(pipe, plane, 0),
|
||||||
|
lut_val);
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Segment 2 */
|
||||||
|
do {
|
||||||
|
intel_de_write_dsb(display, dsb,
|
||||||
|
PLANE_POST_CSC_GAMC_DATA_ENH(pipe, plane, 0),
|
||||||
|
(1 << 24));
|
||||||
|
} while (i++ < 34);
|
||||||
|
} else {
|
||||||
|
/*TODO: Add for segment 0 */
|
||||||
|
lut_size = 32;
|
||||||
|
for (i = 0; i < lut_size; i++) {
|
||||||
|
u32 v = (i * ((1 << 24) - 1)) / (lut_size - 1);
|
||||||
|
|
||||||
|
intel_de_write_dsb(display, dsb,
|
||||||
|
PLANE_POST_CSC_GAMC_DATA_ENH(pipe, plane, 0), v);
|
||||||
|
}
|
||||||
|
|
||||||
|
do {
|
||||||
|
intel_de_write_dsb(display, dsb,
|
||||||
|
PLANE_POST_CSC_GAMC_DATA_ENH(pipe, plane, 0),
|
||||||
|
1 << 24);
|
||||||
|
} while (i++ < 34);
|
||||||
|
}
|
||||||
|
|
||||||
|
intel_de_write_dsb(display, dsb, PLANE_POST_CSC_GAMC_INDEX_ENH(pipe, plane, 0), 0);
|
||||||
|
intel_de_write_dsb(display, dsb,
|
||||||
|
PLANE_POST_CSC_GAMC_SEG0_INDEX_ENH(pipe, plane, 0), 0);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static void
|
||||||
|
xelpd_plane_load_luts(struct intel_dsb *dsb, const struct intel_plane_state *plane_state)
|
||||||
|
{
|
||||||
|
if (plane_state->hw.degamma_lut)
|
||||||
|
xelpd_program_plane_pre_csc_lut(dsb, plane_state);
|
||||||
|
|
||||||
|
if (plane_state->hw.gamma_lut)
|
||||||
|
xelpd_program_plane_post_csc_lut(dsb, plane_state);
|
||||||
|
}
|
||||||
|
|
||||||
|
static u32 glk_3dlut_10(const struct drm_color_lut32 *color)
|
||||||
|
{
|
||||||
|
return REG_FIELD_PREP(LUT_3D_DATA_RED_MASK, drm_color_lut32_extract(color->red, 10)) |
|
||||||
|
REG_FIELD_PREP(LUT_3D_DATA_GREEN_MASK, drm_color_lut32_extract(color->green, 10)) |
|
||||||
|
REG_FIELD_PREP(LUT_3D_DATA_BLUE_MASK, drm_color_lut32_extract(color->blue, 10));
|
||||||
|
}
|
||||||
|
|
||||||
|
static void glk_load_lut_3d(struct intel_dsb *dsb,
|
||||||
|
struct intel_crtc *crtc,
|
||||||
|
const struct drm_property_blob *blob)
|
||||||
|
{
|
||||||
|
struct intel_display *display = to_intel_display(crtc->base.dev);
|
||||||
|
const struct drm_color_lut32 *lut = blob->data;
|
||||||
|
int i, lut_size = drm_color_lut32_size(blob);
|
||||||
|
enum pipe pipe = crtc->pipe;
|
||||||
|
|
||||||
|
if (!dsb && intel_de_read(display, LUT_3D_CTL(pipe)) & LUT_3D_READY) {
|
||||||
|
drm_err(display->drm, "[CRTC:%d:%s] 3D LUT not ready, not loading LUTs\n",
|
||||||
|
crtc->base.base.id, crtc->base.name);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
intel_de_write_dsb(display, dsb, LUT_3D_INDEX(pipe), LUT_3D_AUTO_INCREMENT);
|
||||||
|
for (i = 0; i < lut_size; i++)
|
||||||
|
intel_de_write_dsb(display, dsb, LUT_3D_DATA(pipe), glk_3dlut_10(&lut[i]));
|
||||||
|
intel_de_write_dsb(display, dsb, LUT_3D_INDEX(pipe), 0);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void glk_lut_3d_commit(struct intel_dsb *dsb, struct intel_crtc *crtc, bool enable)
|
||||||
|
{
|
||||||
|
struct intel_display *display = to_intel_display(crtc);
|
||||||
|
enum pipe pipe = crtc->pipe;
|
||||||
|
u32 val = 0;
|
||||||
|
|
||||||
|
if (!dsb && intel_de_read(display, LUT_3D_CTL(pipe)) & LUT_3D_READY) {
|
||||||
|
drm_err(display->drm, "[CRTC:%d:%s] 3D LUT not ready, not committing change\n",
|
||||||
|
crtc->base.base.id, crtc->base.name);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (enable)
|
||||||
|
val = LUT_3D_ENABLE | LUT_3D_READY | LUT_3D_BIND_PLANE_1;
|
||||||
|
|
||||||
|
intel_de_write_dsb(display, dsb, LUT_3D_CTL(pipe), val);
|
||||||
|
}
|
||||||
|
|
||||||
static const struct intel_color_funcs chv_color_funcs = {
|
static const struct intel_color_funcs chv_color_funcs = {
|
||||||
.color_check = chv_color_check,
|
.color_check = chv_color_check,
|
||||||
.color_commit_arm = i9xx_color_commit_arm,
|
.color_commit_arm = i9xx_color_commit_arm,
|
||||||
|
|
@ -3883,6 +4155,8 @@ static const struct intel_color_funcs tgl_color_funcs = {
|
||||||
.lut_equal = icl_lut_equal,
|
.lut_equal = icl_lut_equal,
|
||||||
.read_csc = icl_read_csc,
|
.read_csc = icl_read_csc,
|
||||||
.get_config = skl_get_config,
|
.get_config = skl_get_config,
|
||||||
|
.load_plane_csc_matrix = xelpd_load_plane_csc_matrix,
|
||||||
|
.load_plane_luts = xelpd_plane_load_luts,
|
||||||
};
|
};
|
||||||
|
|
||||||
static const struct intel_color_funcs icl_color_funcs = {
|
static const struct intel_color_funcs icl_color_funcs = {
|
||||||
|
|
@ -3963,6 +4237,67 @@ static const struct intel_color_funcs ilk_color_funcs = {
|
||||||
.get_config = ilk_get_config,
|
.get_config = ilk_get_config,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
void intel_color_plane_commit_arm(struct intel_dsb *dsb,
|
||||||
|
const struct intel_plane_state *plane_state)
|
||||||
|
{
|
||||||
|
struct intel_display *display = to_intel_display(plane_state);
|
||||||
|
struct intel_crtc *crtc = to_intel_crtc(plane_state->uapi.crtc);
|
||||||
|
|
||||||
|
if (crtc && intel_color_crtc_has_3dlut(display, crtc->pipe))
|
||||||
|
glk_lut_3d_commit(dsb, crtc, !!plane_state->hw.lut_3d);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void
|
||||||
|
intel_color_load_plane_csc_matrix(struct intel_dsb *dsb,
|
||||||
|
const struct intel_plane_state *plane_state)
|
||||||
|
{
|
||||||
|
struct intel_display *display = to_intel_display(plane_state);
|
||||||
|
|
||||||
|
if (display->funcs.color->load_plane_csc_matrix)
|
||||||
|
display->funcs.color->load_plane_csc_matrix(dsb, plane_state);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void
|
||||||
|
intel_color_load_plane_luts(struct intel_dsb *dsb,
|
||||||
|
const struct intel_plane_state *plane_state)
|
||||||
|
{
|
||||||
|
struct intel_display *display = to_intel_display(plane_state);
|
||||||
|
|
||||||
|
if (display->funcs.color->load_plane_luts)
|
||||||
|
display->funcs.color->load_plane_luts(dsb, plane_state);
|
||||||
|
}
|
||||||
|
|
||||||
|
bool
|
||||||
|
intel_color_crtc_has_3dlut(struct intel_display *display, enum pipe pipe)
|
||||||
|
{
|
||||||
|
if (DISPLAY_VER(display) >= 12)
|
||||||
|
return pipe == PIPE_A || pipe == PIPE_B;
|
||||||
|
else
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void
|
||||||
|
intel_color_load_3dlut(struct intel_dsb *dsb,
|
||||||
|
const struct intel_plane_state *plane_state)
|
||||||
|
{
|
||||||
|
struct intel_display *display = to_intel_display(plane_state);
|
||||||
|
struct intel_crtc *crtc = to_intel_crtc(plane_state->uapi.crtc);
|
||||||
|
|
||||||
|
if (crtc && intel_color_crtc_has_3dlut(display, crtc->pipe))
|
||||||
|
glk_load_lut_3d(dsb, crtc, plane_state->hw.lut_3d);
|
||||||
|
}
|
||||||
|
|
||||||
|
void intel_color_plane_program_pipeline(struct intel_dsb *dsb,
|
||||||
|
const struct intel_plane_state *plane_state)
|
||||||
|
{
|
||||||
|
if (plane_state->hw.ctm)
|
||||||
|
intel_color_load_plane_csc_matrix(dsb, plane_state);
|
||||||
|
if (plane_state->hw.degamma_lut || plane_state->hw.gamma_lut)
|
||||||
|
intel_color_load_plane_luts(dsb, plane_state);
|
||||||
|
if (plane_state->hw.lut_3d)
|
||||||
|
intel_color_load_3dlut(dsb, plane_state);
|
||||||
|
}
|
||||||
|
|
||||||
void intel_color_crtc_init(struct intel_crtc *crtc)
|
void intel_color_crtc_init(struct intel_crtc *crtc)
|
||||||
{
|
{
|
||||||
struct intel_display *display = to_intel_display(crtc);
|
struct intel_display *display = to_intel_display(crtc);
|
||||||
|
|
|
||||||
|
|
@ -13,7 +13,9 @@ struct intel_crtc_state;
|
||||||
struct intel_crtc;
|
struct intel_crtc;
|
||||||
struct intel_display;
|
struct intel_display;
|
||||||
struct intel_dsb;
|
struct intel_dsb;
|
||||||
|
struct intel_plane_state;
|
||||||
struct drm_property_blob;
|
struct drm_property_blob;
|
||||||
|
enum pipe;
|
||||||
|
|
||||||
void intel_color_init_hooks(struct intel_display *display);
|
void intel_color_init_hooks(struct intel_display *display);
|
||||||
int intel_color_init(struct intel_display *display);
|
int intel_color_init(struct intel_display *display);
|
||||||
|
|
@ -40,5 +42,9 @@ bool intel_color_lut_equal(const struct intel_crtc_state *crtc_state,
|
||||||
const struct drm_property_blob *blob2,
|
const struct drm_property_blob *blob2,
|
||||||
bool is_pre_csc_lut);
|
bool is_pre_csc_lut);
|
||||||
void intel_color_assert_luts(const struct intel_crtc_state *crtc_state);
|
void intel_color_assert_luts(const struct intel_crtc_state *crtc_state);
|
||||||
|
void intel_color_plane_program_pipeline(struct intel_dsb *dsb,
|
||||||
|
const struct intel_plane_state *plane_state);
|
||||||
|
void intel_color_plane_commit_arm(struct intel_dsb *dsb,
|
||||||
|
const struct intel_plane_state *plane_state);
|
||||||
|
bool intel_color_crtc_has_3dlut(struct intel_display *display, enum pipe pipe);
|
||||||
#endif /* __INTEL_COLOR_H__ */
|
#endif /* __INTEL_COLOR_H__ */
|
||||||
|
|
|
||||||
|
|
@ -0,0 +1,99 @@
|
||||||
|
// SPDX-License-Identifier: MIT
|
||||||
|
/*
|
||||||
|
* Copyright © 2025 Intel Corporation
|
||||||
|
*/
|
||||||
|
#include "intel_color.h"
|
||||||
|
#include "intel_colorop.h"
|
||||||
|
#include "intel_color_pipeline.h"
|
||||||
|
#include "intel_de.h"
|
||||||
|
#include "intel_display_types.h"
|
||||||
|
#include "skl_universal_plane.h"
|
||||||
|
|
||||||
|
#define MAX_COLOR_PIPELINES 1
|
||||||
|
#define PLANE_DEGAMMA_SIZE 128
|
||||||
|
#define PLANE_GAMMA_SIZE 32
|
||||||
|
|
||||||
|
static
|
||||||
|
int _intel_color_pipeline_plane_init(struct drm_plane *plane, struct drm_prop_enum_list *list,
|
||||||
|
enum pipe pipe)
|
||||||
|
{
|
||||||
|
struct drm_device *dev = plane->dev;
|
||||||
|
struct intel_display *display = to_intel_display(dev);
|
||||||
|
struct drm_colorop *prev_op;
|
||||||
|
struct intel_colorop *colorop;
|
||||||
|
int ret;
|
||||||
|
|
||||||
|
colorop = intel_colorop_create(INTEL_PLANE_CB_PRE_CSC_LUT);
|
||||||
|
|
||||||
|
ret = drm_plane_colorop_curve_1d_lut_init(dev, &colorop->base, plane,
|
||||||
|
PLANE_DEGAMMA_SIZE,
|
||||||
|
DRM_COLOROP_LUT1D_INTERPOLATION_LINEAR,
|
||||||
|
DRM_COLOROP_FLAG_ALLOW_BYPASS);
|
||||||
|
|
||||||
|
if (ret)
|
||||||
|
return ret;
|
||||||
|
|
||||||
|
list->type = colorop->base.base.id;
|
||||||
|
list->name = kasprintf(GFP_KERNEL, "Color Pipeline %d", colorop->base.base.id);
|
||||||
|
|
||||||
|
/* TODO: handle failures and clean up */
|
||||||
|
prev_op = &colorop->base;
|
||||||
|
|
||||||
|
if (DISPLAY_VER(display) >= 35 &&
|
||||||
|
intel_color_crtc_has_3dlut(display, pipe) &&
|
||||||
|
plane->type == DRM_PLANE_TYPE_PRIMARY) {
|
||||||
|
colorop = intel_colorop_create(INTEL_PLANE_CB_3DLUT);
|
||||||
|
|
||||||
|
ret = drm_plane_colorop_3dlut_init(dev, &colorop->base, plane, 17,
|
||||||
|
DRM_COLOROP_LUT3D_INTERPOLATION_TETRAHEDRAL,
|
||||||
|
true);
|
||||||
|
if (ret)
|
||||||
|
return ret;
|
||||||
|
|
||||||
|
drm_colorop_set_next_property(prev_op, &colorop->base);
|
||||||
|
|
||||||
|
prev_op = &colorop->base;
|
||||||
|
}
|
||||||
|
|
||||||
|
colorop = intel_colorop_create(INTEL_PLANE_CB_CSC);
|
||||||
|
ret = drm_plane_colorop_ctm_3x4_init(dev, &colorop->base, plane,
|
||||||
|
DRM_COLOROP_FLAG_ALLOW_BYPASS);
|
||||||
|
if (ret)
|
||||||
|
return ret;
|
||||||
|
|
||||||
|
drm_colorop_set_next_property(prev_op, &colorop->base);
|
||||||
|
prev_op = &colorop->base;
|
||||||
|
|
||||||
|
colorop = intel_colorop_create(INTEL_PLANE_CB_POST_CSC_LUT);
|
||||||
|
ret = drm_plane_colorop_curve_1d_lut_init(dev, &colorop->base, plane,
|
||||||
|
PLANE_GAMMA_SIZE,
|
||||||
|
DRM_COLOROP_LUT1D_INTERPOLATION_LINEAR,
|
||||||
|
DRM_COLOROP_FLAG_ALLOW_BYPASS);
|
||||||
|
if (ret)
|
||||||
|
return ret;
|
||||||
|
|
||||||
|
drm_colorop_set_next_property(prev_op, &colorop->base);
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
int intel_color_pipeline_plane_init(struct drm_plane *plane, enum pipe pipe)
|
||||||
|
{
|
||||||
|
struct drm_device *dev = plane->dev;
|
||||||
|
struct intel_display *display = to_intel_display(dev);
|
||||||
|
struct drm_prop_enum_list pipelines[MAX_COLOR_PIPELINES];
|
||||||
|
int len = 0;
|
||||||
|
int ret;
|
||||||
|
|
||||||
|
/* Currently expose pipeline only for HDR planes */
|
||||||
|
if (!icl_is_hdr_plane(display, to_intel_plane(plane)->id))
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
/* Add pipeline consisting of transfer functions */
|
||||||
|
ret = _intel_color_pipeline_plane_init(plane, &pipelines[len], pipe);
|
||||||
|
if (ret)
|
||||||
|
return ret;
|
||||||
|
len++;
|
||||||
|
|
||||||
|
return drm_plane_create_color_pipeline_property(plane, pipelines, len);
|
||||||
|
}
|
||||||
|
|
@ -0,0 +1,14 @@
|
||||||
|
/* SPDX-License-Identifier: MIT */
|
||||||
|
/*
|
||||||
|
* Copyright © 2025 Intel Corporation
|
||||||
|
*/
|
||||||
|
|
||||||
|
#ifndef __INTEL_COLOR_PIPELINE_H__
|
||||||
|
#define __INTEL_COLOR_PIPELINE_H__
|
||||||
|
|
||||||
|
struct drm_plane;
|
||||||
|
enum pipe;
|
||||||
|
|
||||||
|
int intel_color_pipeline_plane_init(struct drm_plane *plane, enum pipe pipe);
|
||||||
|
|
||||||
|
#endif /* __INTEL_COLOR_PIPELINE_H__ */
|
||||||
|
|
@ -316,4 +316,33 @@
|
||||||
#define SKL_BOTTOM_COLOR_CSC_ENABLE REG_BIT(30)
|
#define SKL_BOTTOM_COLOR_CSC_ENABLE REG_BIT(30)
|
||||||
#define SKL_BOTTOM_COLOR(pipe) _MMIO_PIPE(pipe, _SKL_BOTTOM_COLOR_A, _SKL_BOTTOM_COLOR_B)
|
#define SKL_BOTTOM_COLOR(pipe) _MMIO_PIPE(pipe, _SKL_BOTTOM_COLOR_A, _SKL_BOTTOM_COLOR_B)
|
||||||
|
|
||||||
|
/* 3D LUT */
|
||||||
|
#define _LUT_3D_CTL_A 0x490A4
|
||||||
|
#define _LUT_3D_CTL_B 0x491A4
|
||||||
|
#define LUT_3D_CTL(pipe) _MMIO_PIPE(pipe, _LUT_3D_CTL_A, _LUT_3D_CTL_B)
|
||||||
|
#define LUT_3D_ENABLE REG_BIT(31)
|
||||||
|
#define LUT_3D_READY REG_BIT(30)
|
||||||
|
#define LUT_3D_BINDING_MASK REG_GENMASK(23, 22)
|
||||||
|
#define LUT_3D_BIND_PIPE REG_FIELD_PREP(LUT_3D_BINDING_MASK, 0)
|
||||||
|
#define LUT_3D_BIND_PLANE_1 REG_FIELD_PREP(LUT_3D_BINDING_MASK, 1)
|
||||||
|
#define LUT_3D_BIND_PLANE_2 REG_FIELD_PREP(LUT_3D_BINDING_MASK, 2)
|
||||||
|
#define LUT_3D_BIND_PLANE_3 REG_FIELD_PREP(LUT_3D_BINDING_MASK, 3)
|
||||||
|
|
||||||
|
#define _LUT_3D_INDEX_A 0x490A8
|
||||||
|
#define _LUT_3D_INDEX_B 0x491A8
|
||||||
|
#define LUT_3D_INDEX(pipe) _MMIO_PIPE(pipe, _LUT_3D_INDEX_A, _LUT_3D_INDEX_B)
|
||||||
|
#define LUT_3D_AUTO_INCREMENT REG_BIT(13)
|
||||||
|
#define LUT_3D_INDEX_VALUE_MASK REG_GENMASK(12, 0)
|
||||||
|
#define LUT_3D_INDEX_VALUE(x) REG_FIELD_PREP(LUT_3D_INDEX_VALUE_MASK, (x))
|
||||||
|
|
||||||
|
#define _LUT_3D_DATA_A 0x490AC
|
||||||
|
#define _LUT_3D_DATA_B 0x491AC
|
||||||
|
#define LUT_3D_DATA(pipe) _MMIO_PIPE(pipe, _LUT_3D_DATA_A, _LUT_3D_DATA_B)
|
||||||
|
#define LUT_3D_DATA_RED_MASK REG_GENMASK(29, 20)
|
||||||
|
#define LUT_3D_DATA_GREEN_MASK REG_GENMASK(19, 10)
|
||||||
|
#define LUT_3D_DATA_BLUE_MASK REG_GENMASK(9, 0)
|
||||||
|
#define LUT_3D_DATA_RED(x) REG_FIELD_PREP(LUT_3D_DATA_RED_MASK, (x))
|
||||||
|
#define LUT_3D_DATA_GREEN(x) REG_FIELD_PREP(LUT_3D_DATA_GREEN_MASK, (x))
|
||||||
|
#define LUT_3D_DATA_BLUE(x) REG_FIELD_PREP(LUT_3D_DATA_BLUE_MASK, (x))
|
||||||
|
|
||||||
#endif /* __INTEL_COLOR_REGS_H__ */
|
#endif /* __INTEL_COLOR_REGS_H__ */
|
||||||
|
|
|
||||||
|
|
@ -0,0 +1,35 @@
|
||||||
|
// SPDX-License-Identifier: MIT
|
||||||
|
/*
|
||||||
|
* Copyright © 2025 Intel Corporation
|
||||||
|
*/
|
||||||
|
#include "intel_colorop.h"
|
||||||
|
|
||||||
|
struct intel_colorop *to_intel_colorop(struct drm_colorop *colorop)
|
||||||
|
{
|
||||||
|
return container_of(colorop, struct intel_colorop, base);
|
||||||
|
}
|
||||||
|
|
||||||
|
struct intel_colorop *intel_colorop_alloc(void)
|
||||||
|
{
|
||||||
|
struct intel_colorop *colorop;
|
||||||
|
|
||||||
|
colorop = kzalloc(sizeof(*colorop), GFP_KERNEL);
|
||||||
|
if (!colorop)
|
||||||
|
return ERR_PTR(-ENOMEM);
|
||||||
|
|
||||||
|
return colorop;
|
||||||
|
}
|
||||||
|
|
||||||
|
struct intel_colorop *intel_colorop_create(enum intel_color_block id)
|
||||||
|
{
|
||||||
|
struct intel_colorop *colorop;
|
||||||
|
|
||||||
|
colorop = intel_colorop_alloc();
|
||||||
|
|
||||||
|
if (IS_ERR(colorop))
|
||||||
|
return colorop;
|
||||||
|
|
||||||
|
colorop->id = id;
|
||||||
|
|
||||||
|
return colorop;
|
||||||
|
}
|
||||||
|
|
@ -0,0 +1,15 @@
|
||||||
|
/* SPDX-License-Identifier: MIT */
|
||||||
|
/*
|
||||||
|
* Copyright © 2025 Intel Corporation
|
||||||
|
*/
|
||||||
|
|
||||||
|
#ifndef __INTEL_COLOROP_H__
|
||||||
|
#define __INTEL_COLOROP_H__
|
||||||
|
|
||||||
|
#include "intel_display_types.h"
|
||||||
|
|
||||||
|
struct intel_colorop *to_intel_colorop(struct drm_colorop *colorop);
|
||||||
|
struct intel_colorop *intel_colorop_alloc(void);
|
||||||
|
struct intel_colorop *intel_colorop_create(enum intel_color_block id);
|
||||||
|
|
||||||
|
#endif /* __INTEL_COLOROP_H__ */
|
||||||
|
|
@ -7304,6 +7304,7 @@ static void intel_atomic_dsb_finish(struct intel_atomic_state *state,
|
||||||
struct intel_display *display = to_intel_display(state);
|
struct intel_display *display = to_intel_display(state);
|
||||||
struct intel_crtc_state *new_crtc_state =
|
struct intel_crtc_state *new_crtc_state =
|
||||||
intel_atomic_get_new_crtc_state(state, crtc);
|
intel_atomic_get_new_crtc_state(state, crtc);
|
||||||
|
unsigned int size = new_crtc_state->plane_color_changed ? 8192 : 1024;
|
||||||
|
|
||||||
if (!new_crtc_state->use_flipq &&
|
if (!new_crtc_state->use_flipq &&
|
||||||
!new_crtc_state->use_dsb &&
|
!new_crtc_state->use_dsb &&
|
||||||
|
|
@ -7314,10 +7315,12 @@ static void intel_atomic_dsb_finish(struct intel_atomic_state *state,
|
||||||
* Rough estimate:
|
* Rough estimate:
|
||||||
* ~64 registers per each plane * 8 planes = 512
|
* ~64 registers per each plane * 8 planes = 512
|
||||||
* Double that for pipe stuff and other overhead.
|
* Double that for pipe stuff and other overhead.
|
||||||
|
* ~4913 registers for 3DLUT
|
||||||
|
* ~200 color registers * 3 HDR planes
|
||||||
*/
|
*/
|
||||||
new_crtc_state->dsb_commit = intel_dsb_prepare(state, crtc, INTEL_DSB_0,
|
new_crtc_state->dsb_commit = intel_dsb_prepare(state, crtc, INTEL_DSB_0,
|
||||||
new_crtc_state->use_dsb ||
|
new_crtc_state->use_dsb ||
|
||||||
new_crtc_state->use_flipq ? 1024 : 16);
|
new_crtc_state->use_flipq ? size : 16);
|
||||||
if (!new_crtc_state->dsb_commit) {
|
if (!new_crtc_state->dsb_commit) {
|
||||||
new_crtc_state->use_flipq = false;
|
new_crtc_state->use_flipq = false;
|
||||||
new_crtc_state->use_dsb = false;
|
new_crtc_state->use_dsb = false;
|
||||||
|
|
|
||||||
|
|
@ -138,4 +138,13 @@ enum hpd_pin {
|
||||||
HPD_NUM_PINS
|
HPD_NUM_PINS
|
||||||
};
|
};
|
||||||
|
|
||||||
|
enum intel_color_block {
|
||||||
|
INTEL_PLANE_CB_PRE_CSC_LUT,
|
||||||
|
INTEL_PLANE_CB_CSC,
|
||||||
|
INTEL_PLANE_CB_POST_CSC_LUT,
|
||||||
|
INTEL_PLANE_CB_3DLUT,
|
||||||
|
|
||||||
|
INTEL_CB_MAX
|
||||||
|
};
|
||||||
|
|
||||||
#endif /* __INTEL_DISPLAY_LIMITS_H__ */
|
#endif /* __INTEL_DISPLAY_LIMITS_H__ */
|
||||||
|
|
|
||||||
|
|
@ -646,6 +646,7 @@ struct intel_plane_state {
|
||||||
enum drm_color_encoding color_encoding;
|
enum drm_color_encoding color_encoding;
|
||||||
enum drm_color_range color_range;
|
enum drm_color_range color_range;
|
||||||
enum drm_scaling_filter scaling_filter;
|
enum drm_scaling_filter scaling_filter;
|
||||||
|
struct drm_property_blob *ctm, *degamma_lut, *gamma_lut, *lut_3d;
|
||||||
} hw;
|
} hw;
|
||||||
|
|
||||||
struct i915_vma *ggtt_vma;
|
struct i915_vma *ggtt_vma;
|
||||||
|
|
@ -1391,6 +1392,9 @@ struct intel_crtc_state {
|
||||||
u8 silence_period_sym_clocks;
|
u8 silence_period_sym_clocks;
|
||||||
u8 lfps_half_cycle_num_of_syms;
|
u8 lfps_half_cycle_num_of_syms;
|
||||||
} alpm_state;
|
} alpm_state;
|
||||||
|
|
||||||
|
/* to track changes in plane color blocks */
|
||||||
|
bool plane_color_changed;
|
||||||
};
|
};
|
||||||
|
|
||||||
enum intel_pipe_crc_source {
|
enum intel_pipe_crc_source {
|
||||||
|
|
@ -1985,6 +1989,11 @@ struct intel_dp_mst_encoder {
|
||||||
struct intel_connector *connector;
|
struct intel_connector *connector;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
struct intel_colorop {
|
||||||
|
struct drm_colorop base;
|
||||||
|
enum intel_color_block id;
|
||||||
|
};
|
||||||
|
|
||||||
static inline struct intel_encoder *
|
static inline struct intel_encoder *
|
||||||
intel_attached_encoder(struct intel_connector *connector)
|
intel_attached_encoder(struct intel_connector *connector)
|
||||||
{
|
{
|
||||||
|
|
|
||||||
|
|
@ -49,6 +49,7 @@
|
||||||
#include "i9xx_plane_regs.h"
|
#include "i9xx_plane_regs.h"
|
||||||
#include "intel_cdclk.h"
|
#include "intel_cdclk.h"
|
||||||
#include "intel_cursor.h"
|
#include "intel_cursor.h"
|
||||||
|
#include "intel_colorop.h"
|
||||||
#include "intel_display_rps.h"
|
#include "intel_display_rps.h"
|
||||||
#include "intel_display_trace.h"
|
#include "intel_display_trace.h"
|
||||||
#include "intel_display_types.h"
|
#include "intel_display_types.h"
|
||||||
|
|
@ -336,6 +337,58 @@ intel_plane_copy_uapi_plane_damage(struct intel_plane_state *new_plane_state,
|
||||||
*damage = drm_plane_state_src(&new_uapi_plane_state->uapi);
|
*damage = drm_plane_state_src(&new_uapi_plane_state->uapi);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static bool
|
||||||
|
intel_plane_colorop_replace_blob(struct intel_plane_state *plane_state,
|
||||||
|
struct intel_colorop *intel_colorop,
|
||||||
|
struct drm_property_blob *blob)
|
||||||
|
{
|
||||||
|
if (intel_colorop->id == INTEL_PLANE_CB_CSC)
|
||||||
|
return drm_property_replace_blob(&plane_state->hw.ctm, blob);
|
||||||
|
else if (intel_colorop->id == INTEL_PLANE_CB_PRE_CSC_LUT)
|
||||||
|
return drm_property_replace_blob(&plane_state->hw.degamma_lut, blob);
|
||||||
|
else if (intel_colorop->id == INTEL_PLANE_CB_POST_CSC_LUT)
|
||||||
|
return drm_property_replace_blob(&plane_state->hw.gamma_lut, blob);
|
||||||
|
else if (intel_colorop->id == INTEL_PLANE_CB_3DLUT)
|
||||||
|
return drm_property_replace_blob(&plane_state->hw.lut_3d, blob);
|
||||||
|
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void
|
||||||
|
intel_plane_color_copy_uapi_to_hw_state(struct intel_plane_state *plane_state,
|
||||||
|
const struct intel_plane_state *from_plane_state,
|
||||||
|
struct intel_crtc *crtc)
|
||||||
|
{
|
||||||
|
struct drm_colorop *iter_colorop, *colorop;
|
||||||
|
struct drm_colorop_state *new_colorop_state;
|
||||||
|
struct drm_atomic_state *state = plane_state->uapi.state;
|
||||||
|
struct intel_colorop *intel_colorop;
|
||||||
|
struct drm_property_blob *blob;
|
||||||
|
struct intel_atomic_state *intel_atomic_state = to_intel_atomic_state(state);
|
||||||
|
struct intel_crtc_state *new_crtc_state = intel_atomic_state ?
|
||||||
|
intel_atomic_get_new_crtc_state(intel_atomic_state, crtc) : NULL;
|
||||||
|
bool changed = false;
|
||||||
|
int i = 0;
|
||||||
|
|
||||||
|
iter_colorop = plane_state->uapi.color_pipeline;
|
||||||
|
|
||||||
|
while (iter_colorop) {
|
||||||
|
for_each_new_colorop_in_state(state, colorop, new_colorop_state, i) {
|
||||||
|
if (new_colorop_state->colorop == iter_colorop) {
|
||||||
|
blob = new_colorop_state->bypass ? NULL : new_colorop_state->data;
|
||||||
|
intel_colorop = to_intel_colorop(colorop);
|
||||||
|
changed |= intel_plane_colorop_replace_blob(plane_state,
|
||||||
|
intel_colorop,
|
||||||
|
blob);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
iter_colorop = iter_colorop->next;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (new_crtc_state && changed)
|
||||||
|
new_crtc_state->plane_color_changed = true;
|
||||||
|
}
|
||||||
|
|
||||||
void intel_plane_copy_uapi_to_hw_state(struct intel_plane_state *plane_state,
|
void intel_plane_copy_uapi_to_hw_state(struct intel_plane_state *plane_state,
|
||||||
const struct intel_plane_state *from_plane_state,
|
const struct intel_plane_state *from_plane_state,
|
||||||
struct intel_crtc *crtc)
|
struct intel_crtc *crtc)
|
||||||
|
|
@ -364,6 +417,8 @@ void intel_plane_copy_uapi_to_hw_state(struct intel_plane_state *plane_state,
|
||||||
|
|
||||||
plane_state->uapi.src = drm_plane_state_src(&from_plane_state->uapi);
|
plane_state->uapi.src = drm_plane_state_src(&from_plane_state->uapi);
|
||||||
plane_state->uapi.dst = drm_plane_state_dest(&from_plane_state->uapi);
|
plane_state->uapi.dst = drm_plane_state_dest(&from_plane_state->uapi);
|
||||||
|
|
||||||
|
intel_plane_color_copy_uapi_to_hw_state(plane_state, from_plane_state, crtc);
|
||||||
}
|
}
|
||||||
|
|
||||||
void intel_plane_copy_hw_state(struct intel_plane_state *plane_state,
|
void intel_plane_copy_hw_state(struct intel_plane_state *plane_state,
|
||||||
|
|
|
||||||
|
|
@ -11,6 +11,8 @@
|
||||||
|
|
||||||
#include "pxp/intel_pxp.h"
|
#include "pxp/intel_pxp.h"
|
||||||
#include "intel_bo.h"
|
#include "intel_bo.h"
|
||||||
|
#include "intel_color.h"
|
||||||
|
#include "intel_color_pipeline.h"
|
||||||
#include "intel_de.h"
|
#include "intel_de.h"
|
||||||
#include "intel_display_irq.h"
|
#include "intel_display_irq.h"
|
||||||
#include "intel_display_regs.h"
|
#include "intel_display_regs.h"
|
||||||
|
|
@ -1275,6 +1277,18 @@ static u32 glk_plane_color_ctl(const struct intel_plane_state *plane_state)
|
||||||
if (plane_state->force_black)
|
if (plane_state->force_black)
|
||||||
plane_color_ctl |= PLANE_COLOR_PLANE_CSC_ENABLE;
|
plane_color_ctl |= PLANE_COLOR_PLANE_CSC_ENABLE;
|
||||||
|
|
||||||
|
if (plane_state->hw.degamma_lut)
|
||||||
|
plane_color_ctl |= PLANE_COLOR_PRE_CSC_GAMMA_ENABLE;
|
||||||
|
|
||||||
|
if (plane_state->hw.ctm)
|
||||||
|
plane_color_ctl |= PLANE_COLOR_PLANE_CSC_ENABLE;
|
||||||
|
|
||||||
|
if (plane_state->hw.gamma_lut) {
|
||||||
|
plane_color_ctl &= ~PLANE_COLOR_PLANE_GAMMA_DISABLE;
|
||||||
|
if (drm_color_lut32_size(plane_state->hw.gamma_lut) != 32)
|
||||||
|
plane_color_ctl |= PLANE_COLOR_POST_CSC_GAMMA_MULTSEG_ENABLE;
|
||||||
|
}
|
||||||
|
|
||||||
return plane_color_ctl;
|
return plane_color_ctl;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -1556,6 +1570,8 @@ icl_plane_update_noarm(struct intel_dsb *dsb,
|
||||||
plane_color_ctl = plane_state->color_ctl |
|
plane_color_ctl = plane_state->color_ctl |
|
||||||
glk_plane_color_ctl_crtc(crtc_state);
|
glk_plane_color_ctl_crtc(crtc_state);
|
||||||
|
|
||||||
|
intel_color_plane_program_pipeline(dsb, plane_state);
|
||||||
|
|
||||||
/* The scaler will handle the output position */
|
/* The scaler will handle the output position */
|
||||||
if (plane_state->scaler_id >= 0) {
|
if (plane_state->scaler_id >= 0) {
|
||||||
crtc_x = 0;
|
crtc_x = 0;
|
||||||
|
|
@ -1657,6 +1673,8 @@ icl_plane_update_arm(struct intel_dsb *dsb,
|
||||||
|
|
||||||
icl_plane_update_sel_fetch_arm(dsb, plane, crtc_state, plane_state);
|
icl_plane_update_sel_fetch_arm(dsb, plane, crtc_state, plane_state);
|
||||||
|
|
||||||
|
intel_color_plane_commit_arm(dsb, plane_state);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* In order to have FBC for fp16 formats pixel normalizer block must be
|
* In order to have FBC for fp16 formats pixel normalizer block must be
|
||||||
* active. Check if pixel normalizer block need to be enabled for FBC.
|
* active. Check if pixel normalizer block need to be enabled for FBC.
|
||||||
|
|
@ -3001,6 +3019,9 @@ skl_universal_plane_create(struct intel_display *display,
|
||||||
DRM_COLOR_YCBCR_BT709,
|
DRM_COLOR_YCBCR_BT709,
|
||||||
DRM_COLOR_YCBCR_LIMITED_RANGE);
|
DRM_COLOR_YCBCR_LIMITED_RANGE);
|
||||||
|
|
||||||
|
if (DISPLAY_VER(display) >= 12)
|
||||||
|
intel_color_pipeline_plane_init(&plane->base, pipe);
|
||||||
|
|
||||||
drm_plane_create_alpha_property(&plane->base);
|
drm_plane_create_alpha_property(&plane->base);
|
||||||
drm_plane_create_blend_mode_property(&plane->base,
|
drm_plane_create_blend_mode_property(&plane->base,
|
||||||
BIT(DRM_MODE_BLEND_PIXEL_NONE) |
|
BIT(DRM_MODE_BLEND_PIXEL_NONE) |
|
||||||
|
|
|
||||||
|
|
@ -254,6 +254,8 @@
|
||||||
#define PLANE_COLOR_PIPE_CSC_ENABLE REG_BIT(23) /* Pre-ICL */
|
#define PLANE_COLOR_PIPE_CSC_ENABLE REG_BIT(23) /* Pre-ICL */
|
||||||
#define PLANE_COLOR_PLANE_CSC_ENABLE REG_BIT(21) /* ICL+ */
|
#define PLANE_COLOR_PLANE_CSC_ENABLE REG_BIT(21) /* ICL+ */
|
||||||
#define PLANE_COLOR_INPUT_CSC_ENABLE REG_BIT(20) /* ICL+ */
|
#define PLANE_COLOR_INPUT_CSC_ENABLE REG_BIT(20) /* ICL+ */
|
||||||
|
#define PLANE_COLOR_POST_CSC_GAMMA_MULTSEG_ENABLE REG_BIT(15) /* TGL+ */
|
||||||
|
#define PLANE_COLOR_PRE_CSC_GAMMA_ENABLE REG_BIT(14)
|
||||||
#define PLANE_COLOR_CSC_MODE_MASK REG_GENMASK(19, 17)
|
#define PLANE_COLOR_CSC_MODE_MASK REG_GENMASK(19, 17)
|
||||||
#define PLANE_COLOR_CSC_MODE_BYPASS REG_FIELD_PREP(PLANE_COLOR_CSC_MODE_MASK, 0)
|
#define PLANE_COLOR_CSC_MODE_BYPASS REG_FIELD_PREP(PLANE_COLOR_CSC_MODE_MASK, 0)
|
||||||
#define PLANE_COLOR_CSC_MODE_YUV601_TO_RGB601 REG_FIELD_PREP(PLANE_COLOR_CSC_MODE_MASK, 1)
|
#define PLANE_COLOR_CSC_MODE_YUV601_TO_RGB601 REG_FIELD_PREP(PLANE_COLOR_CSC_MODE_MASK, 1)
|
||||||
|
|
@ -290,6 +292,119 @@
|
||||||
_PLANE_INPUT_CSC_POSTOFF_HI_1_A, _PLANE_INPUT_CSC_POSTOFF_HI_1_B, \
|
_PLANE_INPUT_CSC_POSTOFF_HI_1_A, _PLANE_INPUT_CSC_POSTOFF_HI_1_B, \
|
||||||
_PLANE_INPUT_CSC_POSTOFF_HI_2_A, _PLANE_INPUT_CSC_POSTOFF_HI_2_B)
|
_PLANE_INPUT_CSC_POSTOFF_HI_2_A, _PLANE_INPUT_CSC_POSTOFF_HI_2_B)
|
||||||
|
|
||||||
|
#define _MMIO_PLANE_GAMC(plane, i, a, b) _MMIO(_PIPE(plane, a, b) + (i) * 4)
|
||||||
|
|
||||||
|
#define _PLANE_POST_CSC_GAMC_SEG0_INDEX_ENH_1_A 0x70160
|
||||||
|
#define _PLANE_POST_CSC_GAMC_SEG0_INDEX_ENH_1_B 0x71160
|
||||||
|
#define _PLANE_POST_CSC_GAMC_SEG0_INDEX_ENH_2_A 0x70260
|
||||||
|
#define _PLANE_POST_CSC_GAMC_SEG0_INDEX_ENH_2_B 0x71260
|
||||||
|
#define _PLANE_POST_CSC_GAMC_SEG0_INDEX_ENH_1(pipe) _PIPE(pipe, _PLANE_POST_CSC_GAMC_SEG0_INDEX_ENH_1_A, \
|
||||||
|
_PLANE_POST_CSC_GAMC_SEG0_INDEX_ENH_1_B)
|
||||||
|
#define _PLANE_POST_CSC_GAMC_SEG0_INDEX_ENH_2(pipe) _PIPE(pipe, _PLANE_POST_CSC_GAMC_SEG0_INDEX_ENH_2_A, \
|
||||||
|
_PLANE_POST_CSC_GAMC_SEG0_INDEX_ENH_2_B)
|
||||||
|
#define PLANE_POST_CSC_GAMC_SEG0_INDEX_ENH(pipe, plane, i) _MMIO_PLANE_GAMC(plane, i, _PLANE_POST_CSC_GAMC_SEG0_INDEX_ENH_1(pipe), \
|
||||||
|
_PLANE_POST_CSC_GAMC_SEG0_INDEX_ENH_2(pipe))
|
||||||
|
|
||||||
|
#define _PLANE_POST_CSC_GAMC_SEG0_DATA_ENH_1_A 0x70164
|
||||||
|
#define _PLANE_POST_CSC_GAMC_SEG0_DATA_ENH_1_B 0x71164
|
||||||
|
#define _PLANE_POST_CSC_GAMC_SEG0_DATA_ENH_2_A 0x70264
|
||||||
|
#define _PLANE_POST_CSC_GAMC_SEG0_DATA_ENH_2_B 0x71264
|
||||||
|
#define _PLANE_POST_CSC_GAMC_SEG0_DATA_ENH_1(pipe) _PIPE(pipe, _PLANE_POST_CSC_GAMC_SEG0_DATA_ENH_1_A, \
|
||||||
|
_PLANE_POST_CSC_GAMC_SEG0_DATA_ENH_1_B)
|
||||||
|
#define _PLANE_POST_CSC_GAMC_SEG0_DATA_ENH_2(pipe) _PIPE(pipe, _PLANE_POST_CSC_GAMC_SEG0_DATA_ENH_2_A, \
|
||||||
|
_PLANE_POST_CSC_GAMC_SEG0_DATA_ENH_2_B)
|
||||||
|
#define PLANE_POST_CSC_GAMC_SEG0_DATA_ENH(pipe, plane, i) _MMIO_PLANE_GAMC(plane, i, _PLANE_POST_CSC_GAMC_SEG0_DATA_ENH_1(pipe), \
|
||||||
|
_PLANE_POST_CSC_GAMC_SEG0_DATA_ENH_2(pipe))
|
||||||
|
|
||||||
|
#define _PLANE_POST_CSC_GAMC_INDEX_ENH_1_A 0x701d8
|
||||||
|
#define _PLANE_POST_CSC_GAMC_INDEX_ENH_1_B 0x711d8
|
||||||
|
#define _PLANE_POST_CSC_GAMC_INDEX_ENH_2_A 0x702d8
|
||||||
|
#define _PLANE_POST_CSC_GAMC_INDEX_ENH_2_B 0x712d8
|
||||||
|
#define _PLANE_POST_CSC_GAMC_INDEX_ENH_1(pipe) _PIPE(pipe, _PLANE_POST_CSC_GAMC_INDEX_ENH_1_A, \
|
||||||
|
_PLANE_POST_CSC_GAMC_INDEX_ENH_1_B)
|
||||||
|
#define _PLANE_POST_CSC_GAMC_INDEX_ENH_2(pipe) _PIPE(pipe, _PLANE_POST_CSC_GAMC_INDEX_ENH_2_A, \
|
||||||
|
_PLANE_POST_CSC_GAMC_INDEX_ENH_2_B)
|
||||||
|
#define PLANE_POST_CSC_GAMC_INDEX_ENH(pipe, plane, i) _MMIO_PLANE_GAMC(plane, i, _PLANE_POST_CSC_GAMC_INDEX_ENH_1(pipe), \
|
||||||
|
_PLANE_POST_CSC_GAMC_INDEX_ENH_2(pipe))
|
||||||
|
|
||||||
|
#define _PLANE_POST_CSC_GAMC_DATA_ENH_1_A 0x701dc
|
||||||
|
#define _PLANE_POST_CSC_GAMC_DATA_ENH_1_B 0x711dc
|
||||||
|
#define _PLANE_POST_CSC_GAMC_DATA_ENH_2_A 0x702dc
|
||||||
|
#define _PLANE_POST_CSC_GAMC_DATA_ENH_2_B 0x712dc
|
||||||
|
#define _PLANE_POST_CSC_GAMC_DATA_ENH_1(pipe) _PIPE(pipe, _PLANE_POST_CSC_GAMC_DATA_ENH_1_A, \
|
||||||
|
_PLANE_POST_CSC_GAMC_DATA_ENH_1_B)
|
||||||
|
#define _PLANE_POST_CSC_GAMC_DATA_ENH_2(pipe) _PIPE(pipe, _PLANE_POST_CSC_GAMC_DATA_ENH_2_A, \
|
||||||
|
_PLANE_POST_CSC_GAMC_DATA_ENH_2_B)
|
||||||
|
#define PLANE_POST_CSC_GAMC_DATA_ENH(pipe, plane, i) _MMIO_PLANE_GAMC(plane, i, _PLANE_POST_CSC_GAMC_DATA_ENH_1(pipe), \
|
||||||
|
_PLANE_POST_CSC_GAMC_DATA_ENH_2(pipe))
|
||||||
|
|
||||||
|
#define _PLANE_POST_CSC_GAMC_INDEX_1_A 0x704d8
|
||||||
|
#define _PLANE_POST_CSC_GAMC_INDEX_1_B 0x714d8
|
||||||
|
#define _PLANE_POST_CSC_GAMC_INDEX_2_A 0x705d8
|
||||||
|
#define _PLANE_POST_CSC_GAMC_INDEX_2_B 0x715d8
|
||||||
|
#define _PLANE_POST_CSC_GAMC_INDEX_1(pipe) _PIPE(pipe, _PLANE_POST_CSC_GAMC_INDEX_1_A, \
|
||||||
|
_PLANE_POST_CSC_GAMC_INDEX_1_B)
|
||||||
|
#define _PLANE_POST_CSC_GAMC_INDEX_2(pipe) _PIPE(pipe, _PLANE_POST_CSC_GAMC_INDEX_2_A, \
|
||||||
|
_PLANE_POST_CSC_GAMC_INDEX_2_B)
|
||||||
|
#define PLANE_POST_CSC_GAMC_INDEX(pipe, plane, i) _MMIO_PLANE_GAMC(plane, i, _PLANE_POST_CSC_GAMC_INDEX_1(pipe), \
|
||||||
|
_PLANE_POST_CSC_GAMC_INDEX_2(pipe))
|
||||||
|
|
||||||
|
#define _PLANE_POST_CSC_GAMC_DATA_1_A 0x704dc
|
||||||
|
#define _PLANE_POST_CSC_GAMC_DATA_1_B 0x714dc
|
||||||
|
#define _PLANE_POST_CSC_GAMC_DATA_2_A 0x705dc
|
||||||
|
#define _PLANE_POST_CSC_GAMC_DATA_2_B 0x715dc
|
||||||
|
#define _PLANE_POST_CSC_GAMC_DATA_1(pipe) _PIPE(pipe, _PLANE_POST_CSC_GAMC_DATA_1_A, \
|
||||||
|
_PLANE_POST_CSC_GAMC_DATA_1_B)
|
||||||
|
#define _PLANE_POST_CSC_GAMC_DATA_2(pipe) _PIPE(pipe, _PLANE_POST_CSC_GAMC_DATA_2_A, \
|
||||||
|
_PLANE_POST_CSC_GAMC_DATA_2_B)
|
||||||
|
#define PLANE_POST_CSC_GAMC_DATA(pipe, plane, i) _MMIO_PLANE_GAMC(plane, i, _PLANE_POST_CSC_GAMC_DATA_1(pipe), \
|
||||||
|
_PLANE_POST_CSC_GAMC_DATA_2(pipe))
|
||||||
|
|
||||||
|
#define _PLANE_PRE_CSC_GAMC_INDEX_ENH_1_A 0x701d0
|
||||||
|
#define _PLANE_PRE_CSC_GAMC_INDEX_ENH_1_B 0x711d0
|
||||||
|
#define _PLANE_PRE_CSC_GAMC_INDEX_ENH_2_A 0x702d0
|
||||||
|
#define _PLANE_PRE_CSC_GAMC_INDEX_ENH_2_B 0x712d0
|
||||||
|
#define _PLANE_PRE_CSC_GAMC_INDEX_ENH_1(pipe) _PIPE(pipe, _PLANE_PRE_CSC_GAMC_INDEX_ENH_1_A, \
|
||||||
|
_PLANE_PRE_CSC_GAMC_INDEX_ENH_1_B)
|
||||||
|
#define _PLANE_PRE_CSC_GAMC_INDEX_ENH_2(pipe) _PIPE(pipe, _PLANE_PRE_CSC_GAMC_INDEX_ENH_2_A, \
|
||||||
|
_PLANE_PRE_CSC_GAMC_INDEX_ENH_2_B)
|
||||||
|
#define PLANE_PRE_CSC_GAMC_INDEX_ENH(pipe, plane, i) _MMIO_PLANE_GAMC(plane, i, _PLANE_PRE_CSC_GAMC_INDEX_ENH_1(pipe), \
|
||||||
|
_PLANE_PRE_CSC_GAMC_INDEX_ENH_2(pipe))
|
||||||
|
#define PLANE_PAL_PREC_AUTO_INCREMENT REG_BIT(10)
|
||||||
|
|
||||||
|
#define _PLANE_PRE_CSC_GAMC_DATA_ENH_1_A 0x701d4
|
||||||
|
#define _PLANE_PRE_CSC_GAMC_DATA_ENH_1_B 0x711d4
|
||||||
|
#define _PLANE_PRE_CSC_GAMC_DATA_ENH_2_A 0x702d4
|
||||||
|
#define _PLANE_PRE_CSC_GAMC_DATA_ENH_2_B 0x712d4
|
||||||
|
#define _PLANE_PRE_CSC_GAMC_DATA_ENH_1(pipe) _PIPE(pipe, _PLANE_PRE_CSC_GAMC_DATA_ENH_1_A, \
|
||||||
|
_PLANE_PRE_CSC_GAMC_DATA_ENH_1_B)
|
||||||
|
#define _PLANE_PRE_CSC_GAMC_DATA_ENH_2(pipe) _PIPE(pipe, _PLANE_PRE_CSC_GAMC_DATA_ENH_2_A, \
|
||||||
|
_PLANE_PRE_CSC_GAMC_DATA_ENH_2_B)
|
||||||
|
#define PLANE_PRE_CSC_GAMC_DATA_ENH(pipe, plane, i) _MMIO_PLANE_GAMC(plane, i, _PLANE_PRE_CSC_GAMC_DATA_ENH_1(pipe), \
|
||||||
|
_PLANE_PRE_CSC_GAMC_DATA_ENH_2(pipe))
|
||||||
|
|
||||||
|
#define _PLANE_PRE_CSC_GAMC_INDEX_1_A 0x704d0
|
||||||
|
#define _PLANE_PRE_CSC_GAMC_INDEX_1_B 0x714d0
|
||||||
|
#define _PLANE_PRE_CSC_GAMC_INDEX_2_A 0x705d0
|
||||||
|
#define _PLANE_PRE_CSC_GAMC_INDEX_2_B 0x715d0
|
||||||
|
#define _PLANE_PRE_CSC_GAMC_INDEX_1(pipe) _PIPE(pipe, _PLANE_PRE_CSC_GAMC_INDEX_1_A, \
|
||||||
|
_PLANE_PRE_CSC_GAMC_INDEX_1_B)
|
||||||
|
#define _PLANE_PRE_CSC_GAMC_INDEX_2(pipe) _PIPE(pipe, _PLANE_PRE_CSC_GAMC_INDEX_2_A, \
|
||||||
|
_PLANE_PRE_CSC_GAMC_INDEX_2_B)
|
||||||
|
#define PLANE_PRE_CSC_GAMC_INDEX(pipe, plane, i) _MMIO_PLANE_GAMC(plane, i, _PLANE_PRE_CSC_GAMC_INDEX_1(pipe), \
|
||||||
|
_PLANE_PRE_CSC_GAMC_INDEX_2(pipe))
|
||||||
|
|
||||||
|
#define _PLANE_PRE_CSC_GAMC_DATA_1_A 0x704d4
|
||||||
|
#define _PLANE_PRE_CSC_GAMC_DATA_1_B 0x714d4
|
||||||
|
#define _PLANE_PRE_CSC_GAMC_DATA_2_A 0x705d4
|
||||||
|
#define _PLANE_PRE_CSC_GAMC_DATA_2_B 0x715d4
|
||||||
|
#define _PLANE_PRE_CSC_GAMC_DATA_1(pipe) _PIPE(pipe, _PLANE_PRE_CSC_GAMC_DATA_1_A, \
|
||||||
|
_PLANE_PRE_CSC_GAMC_DATA_1_B)
|
||||||
|
#define _PLANE_PRE_CSC_GAMC_DATA_2(pipe) _PIPE(pipe, _PLANE_PRE_CSC_GAMC_DATA_2_A, \
|
||||||
|
_PLANE_PRE_CSC_GAMC_DATA_2_B)
|
||||||
|
#define PLANE_PRE_CSC_GAMC_DATA(pipe, plane, i) _MMIO_PLANE_GAMC(plane, i, _PLANE_PRE_CSC_GAMC_DATA_1(pipe), \
|
||||||
|
_PLANE_PRE_CSC_GAMC_DATA_2(pipe))
|
||||||
|
|
||||||
#define _PLANE_CSC_RY_GY_1_A 0x70210
|
#define _PLANE_CSC_RY_GY_1_A 0x70210
|
||||||
#define _PLANE_CSC_RY_GY_2_A 0x70310
|
#define _PLANE_CSC_RY_GY_2_A 0x70310
|
||||||
#define _PLANE_CSC_RY_GY_1_B 0x71210
|
#define _PLANE_CSC_RY_GY_1_B 0x71210
|
||||||
|
|
|
||||||
|
|
@ -184,6 +184,10 @@ xe-$(CONFIG_PCI_IOV) += \
|
||||||
xe_sriov_pf_sysfs.o \
|
xe_sriov_pf_sysfs.o \
|
||||||
xe_tile_sriov_pf_debugfs.o
|
xe_tile_sriov_pf_debugfs.o
|
||||||
|
|
||||||
|
ifdef CONFIG_XE_VFIO_PCI
|
||||||
|
xe-$(CONFIG_PCI_IOV) += xe_sriov_vfio.o
|
||||||
|
endif
|
||||||
|
|
||||||
# include helpers for tests even when XE is built-in
|
# include helpers for tests even when XE is built-in
|
||||||
ifdef CONFIG_DRM_XE_KUNIT_TEST
|
ifdef CONFIG_DRM_XE_KUNIT_TEST
|
||||||
xe-y += tests/xe_kunit_helpers.o
|
xe-y += tests/xe_kunit_helpers.o
|
||||||
|
|
@ -242,6 +246,8 @@ xe-$(CONFIG_DRM_XE_DISPLAY) += \
|
||||||
i915-display/intel_cdclk.o \
|
i915-display/intel_cdclk.o \
|
||||||
i915-display/intel_cmtg.o \
|
i915-display/intel_cmtg.o \
|
||||||
i915-display/intel_color.o \
|
i915-display/intel_color.o \
|
||||||
|
i915-display/intel_colorop.o \
|
||||||
|
i915-display/intel_color_pipeline.o \
|
||||||
i915-display/intel_combo_phy.o \
|
i915-display/intel_combo_phy.o \
|
||||||
i915-display/intel_connector.o \
|
i915-display/intel_connector.o \
|
||||||
i915-display/intel_crtc.o \
|
i915-display/intel_crtc.o \
|
||||||
|
|
|
||||||
|
|
@ -54,13 +54,14 @@ static inline void xe_sched_tdr_queue_imm(struct xe_gpu_scheduler *sched)
|
||||||
static inline void xe_sched_resubmit_jobs(struct xe_gpu_scheduler *sched)
|
static inline void xe_sched_resubmit_jobs(struct xe_gpu_scheduler *sched)
|
||||||
{
|
{
|
||||||
struct drm_sched_job *s_job;
|
struct drm_sched_job *s_job;
|
||||||
|
bool restore_replay = false;
|
||||||
|
|
||||||
list_for_each_entry(s_job, &sched->base.pending_list, list) {
|
list_for_each_entry(s_job, &sched->base.pending_list, list) {
|
||||||
struct drm_sched_fence *s_fence = s_job->s_fence;
|
struct drm_sched_fence *s_fence = s_job->s_fence;
|
||||||
struct dma_fence *hw_fence = s_fence->parent;
|
struct dma_fence *hw_fence = s_fence->parent;
|
||||||
|
|
||||||
if (to_xe_sched_job(s_job)->skip_emit ||
|
restore_replay |= to_xe_sched_job(s_job)->restore_replay;
|
||||||
(hw_fence && !dma_fence_is_signaled(hw_fence)))
|
if (restore_replay || (hw_fence && !dma_fence_is_signaled(hw_fence)))
|
||||||
sched->base.ops->run_job(s_job);
|
sched->base.ops->run_job(s_job);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -711,7 +711,7 @@ static u64 pf_profile_fair_ggtt(struct xe_gt *gt, unsigned int num_vfs)
|
||||||
if (num_vfs > 56)
|
if (num_vfs > 56)
|
||||||
return SZ_64M - SZ_8M;
|
return SZ_64M - SZ_8M;
|
||||||
|
|
||||||
return rounddown_pow_of_two(shareable / num_vfs);
|
return rounddown_pow_of_two(div_u64(shareable, num_vfs));
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|
|
||||||
|
|
@ -17,6 +17,7 @@
|
||||||
#include "xe_gt_sriov_pf_helpers.h"
|
#include "xe_gt_sriov_pf_helpers.h"
|
||||||
#include "xe_gt_sriov_pf_migration.h"
|
#include "xe_gt_sriov_pf_migration.h"
|
||||||
#include "xe_gt_sriov_printk.h"
|
#include "xe_gt_sriov_printk.h"
|
||||||
|
#include "xe_guc.h"
|
||||||
#include "xe_guc_buf.h"
|
#include "xe_guc_buf.h"
|
||||||
#include "xe_guc_ct.h"
|
#include "xe_guc_ct.h"
|
||||||
#include "xe_migrate.h"
|
#include "xe_migrate.h"
|
||||||
|
|
@ -1023,6 +1024,12 @@ static void action_ring_cleanup(void *arg)
|
||||||
ptr_ring_cleanup(r, destroy_pf_packet);
|
ptr_ring_cleanup(r, destroy_pf_packet);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void pf_gt_migration_check_support(struct xe_gt *gt)
|
||||||
|
{
|
||||||
|
if (GUC_FIRMWARE_VER(>->uc.guc) < MAKE_GUC_VER(70, 54, 0))
|
||||||
|
xe_sriov_pf_migration_disable(gt_to_xe(gt), "requires GuC version >= 70.54.0");
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* xe_gt_sriov_pf_migration_init() - Initialize support for VF migration.
|
* xe_gt_sriov_pf_migration_init() - Initialize support for VF migration.
|
||||||
* @gt: the &xe_gt
|
* @gt: the &xe_gt
|
||||||
|
|
@ -1039,6 +1046,8 @@ int xe_gt_sriov_pf_migration_init(struct xe_gt *gt)
|
||||||
|
|
||||||
xe_gt_assert(gt, IS_SRIOV_PF(xe));
|
xe_gt_assert(gt, IS_SRIOV_PF(xe));
|
||||||
|
|
||||||
|
pf_gt_migration_check_support(gt);
|
||||||
|
|
||||||
if (!pf_migration_supported(gt))
|
if (!pf_migration_supported(gt))
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -822,7 +822,7 @@ static void submit_exec_queue(struct xe_exec_queue *q, struct xe_sched_job *job)
|
||||||
|
|
||||||
xe_gt_assert(guc_to_gt(guc), exec_queue_registered(q));
|
xe_gt_assert(guc_to_gt(guc), exec_queue_registered(q));
|
||||||
|
|
||||||
if (!job->skip_emit || job->last_replay) {
|
if (!job->restore_replay || job->last_replay) {
|
||||||
if (xe_exec_queue_is_parallel(q))
|
if (xe_exec_queue_is_parallel(q))
|
||||||
wq_item_append(q);
|
wq_item_append(q);
|
||||||
else
|
else
|
||||||
|
|
@ -881,10 +881,10 @@ guc_exec_queue_run_job(struct drm_sched_job *drm_job)
|
||||||
if (!killed_or_banned_or_wedged && !xe_sched_job_is_error(job)) {
|
if (!killed_or_banned_or_wedged && !xe_sched_job_is_error(job)) {
|
||||||
if (!exec_queue_registered(q))
|
if (!exec_queue_registered(q))
|
||||||
register_exec_queue(q, GUC_CONTEXT_NORMAL);
|
register_exec_queue(q, GUC_CONTEXT_NORMAL);
|
||||||
if (!job->skip_emit)
|
if (!job->restore_replay)
|
||||||
q->ring_ops->emit_job(job);
|
q->ring_ops->emit_job(job);
|
||||||
submit_exec_queue(q, job);
|
submit_exec_queue(q, job);
|
||||||
job->skip_emit = false;
|
job->restore_replay = false;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|
@ -2112,6 +2112,18 @@ static void guc_exec_queue_revert_pending_state_change(struct xe_guc *guc,
|
||||||
q->guc->resume_time = 0;
|
q->guc->resume_time = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void lrc_parallel_clear(struct xe_lrc *lrc)
|
||||||
|
{
|
||||||
|
struct xe_device *xe = gt_to_xe(lrc->gt);
|
||||||
|
struct iosys_map map = xe_lrc_parallel_map(lrc);
|
||||||
|
int i;
|
||||||
|
|
||||||
|
for (i = 0; i < WQ_SIZE / sizeof(u32); ++i)
|
||||||
|
parallel_write(xe, map, wq[i],
|
||||||
|
FIELD_PREP(WQ_TYPE_MASK, WQ_TYPE_NOOP) |
|
||||||
|
FIELD_PREP(WQ_LEN_MASK, 0));
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* This function is quite complex but only real way to ensure no state is lost
|
* This function is quite complex but only real way to ensure no state is lost
|
||||||
* during VF resume flows. The function scans the queue state, make adjustments
|
* during VF resume flows. The function scans the queue state, make adjustments
|
||||||
|
|
@ -2135,8 +2147,8 @@ static void guc_exec_queue_pause(struct xe_guc *guc, struct xe_exec_queue *q)
|
||||||
guc_exec_queue_revert_pending_state_change(guc, q);
|
guc_exec_queue_revert_pending_state_change(guc, q);
|
||||||
|
|
||||||
if (xe_exec_queue_is_parallel(q)) {
|
if (xe_exec_queue_is_parallel(q)) {
|
||||||
struct xe_device *xe = guc_to_xe(guc);
|
/* Pairs with WRITE_ONCE in __xe_exec_queue_init */
|
||||||
struct iosys_map map = xe_lrc_parallel_map(q->lrc[0]);
|
struct xe_lrc *lrc = READ_ONCE(q->lrc[0]);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* NOP existing WQ commands that may contain stale GGTT
|
* NOP existing WQ commands that may contain stale GGTT
|
||||||
|
|
@ -2144,14 +2156,14 @@ static void guc_exec_queue_pause(struct xe_guc *guc, struct xe_exec_queue *q)
|
||||||
* seems to get confused if the WQ head/tail pointers are
|
* seems to get confused if the WQ head/tail pointers are
|
||||||
* adjusted.
|
* adjusted.
|
||||||
*/
|
*/
|
||||||
for (i = 0; i < WQ_SIZE / sizeof(u32); ++i)
|
if (lrc)
|
||||||
parallel_write(xe, map, wq[i],
|
lrc_parallel_clear(lrc);
|
||||||
FIELD_PREP(WQ_TYPE_MASK, WQ_TYPE_NOOP) |
|
|
||||||
FIELD_PREP(WQ_LEN_MASK, 0));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
job = xe_sched_first_pending_job(sched);
|
job = xe_sched_first_pending_job(sched);
|
||||||
if (job) {
|
if (job) {
|
||||||
|
job->restore_replay = true;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Adjust software tail so jobs submitted overwrite previous
|
* Adjust software tail so jobs submitted overwrite previous
|
||||||
* position in ring buffer with new GGTT addresses.
|
* position in ring buffer with new GGTT addresses.
|
||||||
|
|
@ -2241,17 +2253,18 @@ static void guc_exec_queue_unpause_prepare(struct xe_guc *guc,
|
||||||
struct xe_exec_queue *q)
|
struct xe_exec_queue *q)
|
||||||
{
|
{
|
||||||
struct xe_gpu_scheduler *sched = &q->guc->sched;
|
struct xe_gpu_scheduler *sched = &q->guc->sched;
|
||||||
struct drm_sched_job *s_job;
|
|
||||||
struct xe_sched_job *job = NULL;
|
struct xe_sched_job *job = NULL;
|
||||||
|
bool restore_replay = false;
|
||||||
|
|
||||||
list_for_each_entry(s_job, &sched->base.pending_list, list) {
|
list_for_each_entry(job, &sched->base.pending_list, drm.list) {
|
||||||
job = to_xe_sched_job(s_job);
|
restore_replay |= job->restore_replay;
|
||||||
|
if (restore_replay) {
|
||||||
xe_gt_dbg(guc_to_gt(guc), "Replay JOB - guc_id=%d, seqno=%d",
|
xe_gt_dbg(guc_to_gt(guc), "Replay JOB - guc_id=%d, seqno=%d",
|
||||||
q->guc->id, xe_sched_job_seqno(job));
|
q->guc->id, xe_sched_job_seqno(job));
|
||||||
|
|
||||||
q->ring_ops->emit_job(job);
|
q->ring_ops->emit_job(job);
|
||||||
job->skip_emit = true;
|
job->restore_replay = true;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (job)
|
if (job)
|
||||||
|
|
|
||||||
|
|
@ -102,7 +102,6 @@ static int xe_pagefault_handle_vma(struct xe_gt *gt, struct xe_vma *vma,
|
||||||
|
|
||||||
/* Lock VM and BOs dma-resv */
|
/* Lock VM and BOs dma-resv */
|
||||||
xe_validation_ctx_init(&ctx, &vm->xe->val, &exec, (struct xe_val_flags) {});
|
xe_validation_ctx_init(&ctx, &vm->xe->val, &exec, (struct xe_val_flags) {});
|
||||||
drm_exec_init(&exec, 0, 0);
|
|
||||||
drm_exec_until_all_locked(&exec) {
|
drm_exec_until_all_locked(&exec) {
|
||||||
err = xe_pagefault_begin(&exec, vma, tile->mem.vram,
|
err = xe_pagefault_begin(&exec, vma, tile->mem.vram,
|
||||||
needs_vram == 1);
|
needs_vram == 1);
|
||||||
|
|
|
||||||
|
|
@ -1223,6 +1223,23 @@ static struct pci_driver xe_pci_driver = {
|
||||||
#endif
|
#endif
|
||||||
};
|
};
|
||||||
|
|
||||||
|
/**
|
||||||
|
* xe_pci_to_pf_device() - Get PF &xe_device.
|
||||||
|
* @pdev: the VF &pci_dev device
|
||||||
|
*
|
||||||
|
* Return: pointer to PF &xe_device, NULL otherwise.
|
||||||
|
*/
|
||||||
|
struct xe_device *xe_pci_to_pf_device(struct pci_dev *pdev)
|
||||||
|
{
|
||||||
|
struct drm_device *drm;
|
||||||
|
|
||||||
|
drm = pci_iov_get_pf_drvdata(pdev, &xe_pci_driver);
|
||||||
|
if (IS_ERR(drm))
|
||||||
|
return NULL;
|
||||||
|
|
||||||
|
return to_xe_device(drm);
|
||||||
|
}
|
||||||
|
|
||||||
int xe_register_pci_driver(void)
|
int xe_register_pci_driver(void)
|
||||||
{
|
{
|
||||||
return pci_register_driver(&xe_pci_driver);
|
return pci_register_driver(&xe_pci_driver);
|
||||||
|
|
|
||||||
|
|
@ -6,7 +6,10 @@
|
||||||
#ifndef _XE_PCI_H_
|
#ifndef _XE_PCI_H_
|
||||||
#define _XE_PCI_H_
|
#define _XE_PCI_H_
|
||||||
|
|
||||||
|
struct pci_dev;
|
||||||
|
|
||||||
int xe_register_pci_driver(void);
|
int xe_register_pci_driver(void);
|
||||||
void xe_unregister_pci_driver(void);
|
void xe_unregister_pci_driver(void);
|
||||||
|
struct xe_device *xe_pci_to_pf_device(struct pci_dev *pdev);
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
|
||||||
|
|
@ -726,6 +726,13 @@ static void xe_pm_runtime_lockdep_prime(void)
|
||||||
/**
|
/**
|
||||||
* xe_pm_runtime_get - Get a runtime_pm reference and resume synchronously
|
* xe_pm_runtime_get - Get a runtime_pm reference and resume synchronously
|
||||||
* @xe: xe device instance
|
* @xe: xe device instance
|
||||||
|
*
|
||||||
|
* When possible, scope-based runtime PM (through guard(xe_pm_runtime)) is
|
||||||
|
* be preferred over direct usage of this function. Manual get/put handling
|
||||||
|
* should only be used when the function contains goto-based logic which
|
||||||
|
* can break scope-based handling, or when the lifetime of the runtime PM
|
||||||
|
* reference does not match a specific scope (e.g., runtime PM obtained in one
|
||||||
|
* function and released in a different one).
|
||||||
*/
|
*/
|
||||||
void xe_pm_runtime_get(struct xe_device *xe)
|
void xe_pm_runtime_get(struct xe_device *xe)
|
||||||
{
|
{
|
||||||
|
|
@ -758,6 +765,13 @@ void xe_pm_runtime_put(struct xe_device *xe)
|
||||||
* xe_pm_runtime_get_ioctl - Get a runtime_pm reference before ioctl
|
* xe_pm_runtime_get_ioctl - Get a runtime_pm reference before ioctl
|
||||||
* @xe: xe device instance
|
* @xe: xe device instance
|
||||||
*
|
*
|
||||||
|
* When possible, scope-based runtime PM (through
|
||||||
|
* ACQUIRE(xe_pm_runtime_ioctl, ...)) is be preferred over direct usage of this
|
||||||
|
* function. Manual get/put handling should only be used when the function
|
||||||
|
* contains goto-based logic which can break scope-based handling, or when the
|
||||||
|
* lifetime of the runtime PM reference does not match a specific scope (e.g.,
|
||||||
|
* runtime PM obtained in one function and released in a different one).
|
||||||
|
*
|
||||||
* Returns: Any number greater than or equal to 0 for success, negative error
|
* Returns: Any number greater than or equal to 0 for success, negative error
|
||||||
* code otherwise.
|
* code otherwise.
|
||||||
*/
|
*/
|
||||||
|
|
@ -827,6 +841,13 @@ static bool xe_pm_suspending_or_resuming(struct xe_device *xe)
|
||||||
* It will warn if not protected.
|
* It will warn if not protected.
|
||||||
* The reference should be put back after this function regardless, since it
|
* The reference should be put back after this function regardless, since it
|
||||||
* will always bump the usage counter, regardless.
|
* will always bump the usage counter, regardless.
|
||||||
|
*
|
||||||
|
* When possible, scope-based runtime PM (through guard(xe_pm_runtime_noresume))
|
||||||
|
* is be preferred over direct usage of this function. Manual get/put handling
|
||||||
|
* should only be used when the function contains goto-based logic which can
|
||||||
|
* break scope-based handling, or when the lifetime of the runtime PM reference
|
||||||
|
* does not match a specific scope (e.g., runtime PM obtained in one function
|
||||||
|
* and released in a different one).
|
||||||
*/
|
*/
|
||||||
void xe_pm_runtime_get_noresume(struct xe_device *xe)
|
void xe_pm_runtime_get_noresume(struct xe_device *xe)
|
||||||
{
|
{
|
||||||
|
|
|
||||||
|
|
@ -6,6 +6,7 @@
|
||||||
#ifndef _XE_PM_H_
|
#ifndef _XE_PM_H_
|
||||||
#define _XE_PM_H_
|
#define _XE_PM_H_
|
||||||
|
|
||||||
|
#include <linux/cleanup.h>
|
||||||
#include <linux/pm_runtime.h>
|
#include <linux/pm_runtime.h>
|
||||||
|
|
||||||
#define DEFAULT_VRAM_THRESHOLD 300 /* in MB */
|
#define DEFAULT_VRAM_THRESHOLD 300 /* in MB */
|
||||||
|
|
@ -37,4 +38,20 @@ int xe_pm_block_on_suspend(struct xe_device *xe);
|
||||||
void xe_pm_might_block_on_suspend(void);
|
void xe_pm_might_block_on_suspend(void);
|
||||||
int xe_pm_module_init(void);
|
int xe_pm_module_init(void);
|
||||||
|
|
||||||
|
static inline void __xe_pm_runtime_noop(struct xe_device *xe) {}
|
||||||
|
|
||||||
|
DEFINE_GUARD(xe_pm_runtime, struct xe_device *,
|
||||||
|
xe_pm_runtime_get(_T), xe_pm_runtime_put(_T))
|
||||||
|
DEFINE_GUARD(xe_pm_runtime_noresume, struct xe_device *,
|
||||||
|
xe_pm_runtime_get_noresume(_T), xe_pm_runtime_put(_T))
|
||||||
|
DEFINE_GUARD_COND(xe_pm_runtime, _ioctl, xe_pm_runtime_get_ioctl(_T), _RET >= 0)
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Used when a function needs to release runtime PM in all possible cases
|
||||||
|
* and error paths, but the wakeref was already acquired by a different
|
||||||
|
* function (i.e., get() has already happened so only a put() is needed).
|
||||||
|
*/
|
||||||
|
DEFINE_GUARD(xe_pm_runtime_release_only, struct xe_device *,
|
||||||
|
__xe_pm_runtime_noop(_T), xe_pm_runtime_put(_T));
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
|
||||||
|
|
@ -63,8 +63,8 @@ struct xe_sched_job {
|
||||||
bool ring_ops_flush_tlb;
|
bool ring_ops_flush_tlb;
|
||||||
/** @ggtt: mapped in ggtt. */
|
/** @ggtt: mapped in ggtt. */
|
||||||
bool ggtt;
|
bool ggtt;
|
||||||
/** @skip_emit: skip emitting the job */
|
/** @restore_replay: job being replayed for restore */
|
||||||
bool skip_emit;
|
bool restore_replay;
|
||||||
/** @last_replay: last job being replayed */
|
/** @last_replay: last job being replayed */
|
||||||
bool last_replay;
|
bool last_replay;
|
||||||
/** @ptrs: per instance pointers. */
|
/** @ptrs: per instance pointers. */
|
||||||
|
|
|
||||||
|
|
@ -46,13 +46,37 @@ bool xe_sriov_pf_migration_supported(struct xe_device *xe)
|
||||||
{
|
{
|
||||||
xe_assert(xe, IS_SRIOV_PF(xe));
|
xe_assert(xe, IS_SRIOV_PF(xe));
|
||||||
|
|
||||||
return xe->sriov.pf.migration.supported;
|
return IS_ENABLED(CONFIG_DRM_XE_DEBUG) || !xe->sriov.pf.migration.disabled;
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool pf_check_migration_support(struct xe_device *xe)
|
/**
|
||||||
|
* xe_sriov_pf_migration_disable() - Turn off SR-IOV VF migration support on PF.
|
||||||
|
* @xe: the &xe_device instance.
|
||||||
|
* @fmt: format string for the log message, to be combined with following VAs.
|
||||||
|
*/
|
||||||
|
void xe_sriov_pf_migration_disable(struct xe_device *xe, const char *fmt, ...)
|
||||||
{
|
{
|
||||||
/* XXX: for now this is for feature enabling only */
|
struct va_format vaf;
|
||||||
return IS_ENABLED(CONFIG_DRM_XE_DEBUG);
|
va_list va_args;
|
||||||
|
|
||||||
|
xe_assert(xe, IS_SRIOV_PF(xe));
|
||||||
|
|
||||||
|
va_start(va_args, fmt);
|
||||||
|
vaf.fmt = fmt;
|
||||||
|
vaf.va = &va_args;
|
||||||
|
xe_sriov_notice(xe, "migration %s: %pV\n",
|
||||||
|
IS_ENABLED(CONFIG_DRM_XE_DEBUG) ?
|
||||||
|
"missing prerequisite" : "disabled",
|
||||||
|
&vaf);
|
||||||
|
va_end(va_args);
|
||||||
|
|
||||||
|
xe->sriov.pf.migration.disabled = true;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void pf_migration_check_support(struct xe_device *xe)
|
||||||
|
{
|
||||||
|
if (!xe_device_has_memirq(xe))
|
||||||
|
xe_sriov_pf_migration_disable(xe, "requires memory-based IRQ support");
|
||||||
}
|
}
|
||||||
|
|
||||||
static void pf_migration_cleanup(void *arg)
|
static void pf_migration_cleanup(void *arg)
|
||||||
|
|
@ -77,7 +101,8 @@ int xe_sriov_pf_migration_init(struct xe_device *xe)
|
||||||
|
|
||||||
xe_assert(xe, IS_SRIOV_PF(xe));
|
xe_assert(xe, IS_SRIOV_PF(xe));
|
||||||
|
|
||||||
xe->sriov.pf.migration.supported = pf_check_migration_support(xe);
|
pf_migration_check_support(xe);
|
||||||
|
|
||||||
if (!xe_sriov_pf_migration_supported(xe))
|
if (!xe_sriov_pf_migration_supported(xe))
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -14,6 +14,7 @@ struct xe_sriov_packet;
|
||||||
|
|
||||||
int xe_sriov_pf_migration_init(struct xe_device *xe);
|
int xe_sriov_pf_migration_init(struct xe_device *xe);
|
||||||
bool xe_sriov_pf_migration_supported(struct xe_device *xe);
|
bool xe_sriov_pf_migration_supported(struct xe_device *xe);
|
||||||
|
void xe_sriov_pf_migration_disable(struct xe_device *xe, const char *fmt, ...);
|
||||||
int xe_sriov_pf_migration_restore_produce(struct xe_device *xe, unsigned int vfid,
|
int xe_sriov_pf_migration_restore_produce(struct xe_device *xe, unsigned int vfid,
|
||||||
struct xe_sriov_packet *data);
|
struct xe_sriov_packet *data);
|
||||||
struct xe_sriov_packet *
|
struct xe_sriov_packet *
|
||||||
|
|
|
||||||
|
|
@ -14,8 +14,8 @@
|
||||||
* struct xe_sriov_pf_migration - Xe device level VF migration data
|
* struct xe_sriov_pf_migration - Xe device level VF migration data
|
||||||
*/
|
*/
|
||||||
struct xe_sriov_pf_migration {
|
struct xe_sriov_pf_migration {
|
||||||
/** @supported: indicates whether VF migration feature is supported */
|
/** @disabled: indicates whether VF migration feature is disabled */
|
||||||
bool supported;
|
bool disabled;
|
||||||
};
|
};
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|
|
||||||
|
|
@ -0,0 +1,80 @@
|
||||||
|
// SPDX-License-Identifier: MIT
|
||||||
|
/*
|
||||||
|
* Copyright © 2025 Intel Corporation
|
||||||
|
*/
|
||||||
|
|
||||||
|
#include <drm/intel/xe_sriov_vfio.h>
|
||||||
|
#include <linux/cleanup.h>
|
||||||
|
|
||||||
|
#include "xe_pci.h"
|
||||||
|
#include "xe_pm.h"
|
||||||
|
#include "xe_sriov_pf_control.h"
|
||||||
|
#include "xe_sriov_pf_helpers.h"
|
||||||
|
#include "xe_sriov_pf_migration.h"
|
||||||
|
|
||||||
|
struct xe_device *xe_sriov_vfio_get_pf(struct pci_dev *pdev)
|
||||||
|
{
|
||||||
|
return xe_pci_to_pf_device(pdev);
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL_FOR_MODULES(xe_sriov_vfio_get_pf, "xe-vfio-pci");
|
||||||
|
|
||||||
|
bool xe_sriov_vfio_migration_supported(struct xe_device *xe)
|
||||||
|
{
|
||||||
|
if (!IS_SRIOV_PF(xe))
|
||||||
|
return -EPERM;
|
||||||
|
|
||||||
|
return xe_sriov_pf_migration_supported(xe);
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL_FOR_MODULES(xe_sriov_vfio_migration_supported, "xe-vfio-pci");
|
||||||
|
|
||||||
|
#define DEFINE_XE_SRIOV_VFIO_FUNCTION(_type, _func, _impl) \
|
||||||
|
_type xe_sriov_vfio_##_func(struct xe_device *xe, unsigned int vfid) \
|
||||||
|
{ \
|
||||||
|
if (!IS_SRIOV_PF(xe)) \
|
||||||
|
return -EPERM; \
|
||||||
|
if (vfid == PFID || vfid > xe_sriov_pf_num_vfs(xe)) \
|
||||||
|
return -EINVAL; \
|
||||||
|
\
|
||||||
|
guard(xe_pm_runtime_noresume)(xe); \
|
||||||
|
\
|
||||||
|
return xe_sriov_pf_##_impl(xe, vfid); \
|
||||||
|
} \
|
||||||
|
EXPORT_SYMBOL_FOR_MODULES(xe_sriov_vfio_##_func, "xe-vfio-pci")
|
||||||
|
|
||||||
|
DEFINE_XE_SRIOV_VFIO_FUNCTION(int, wait_flr_done, control_wait_flr);
|
||||||
|
DEFINE_XE_SRIOV_VFIO_FUNCTION(int, suspend_device, control_pause_vf);
|
||||||
|
DEFINE_XE_SRIOV_VFIO_FUNCTION(int, resume_device, control_resume_vf);
|
||||||
|
DEFINE_XE_SRIOV_VFIO_FUNCTION(int, stop_copy_enter, control_trigger_save_vf);
|
||||||
|
DEFINE_XE_SRIOV_VFIO_FUNCTION(int, stop_copy_exit, control_finish_save_vf);
|
||||||
|
DEFINE_XE_SRIOV_VFIO_FUNCTION(int, resume_data_enter, control_trigger_restore_vf);
|
||||||
|
DEFINE_XE_SRIOV_VFIO_FUNCTION(int, resume_data_exit, control_finish_restore_vf);
|
||||||
|
DEFINE_XE_SRIOV_VFIO_FUNCTION(int, error, control_stop_vf);
|
||||||
|
DEFINE_XE_SRIOV_VFIO_FUNCTION(ssize_t, stop_copy_size, migration_size);
|
||||||
|
|
||||||
|
ssize_t xe_sriov_vfio_data_read(struct xe_device *xe, unsigned int vfid,
|
||||||
|
char __user *buf, size_t len)
|
||||||
|
{
|
||||||
|
if (!IS_SRIOV_PF(xe))
|
||||||
|
return -EPERM;
|
||||||
|
if (vfid == PFID || vfid > xe_sriov_pf_num_vfs(xe))
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
|
guard(xe_pm_runtime_noresume)(xe);
|
||||||
|
|
||||||
|
return xe_sriov_pf_migration_read(xe, vfid, buf, len);
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL_FOR_MODULES(xe_sriov_vfio_data_read, "xe-vfio-pci");
|
||||||
|
|
||||||
|
ssize_t xe_sriov_vfio_data_write(struct xe_device *xe, unsigned int vfid,
|
||||||
|
const char __user *buf, size_t len)
|
||||||
|
{
|
||||||
|
if (!IS_SRIOV_PF(xe))
|
||||||
|
return -EPERM;
|
||||||
|
if (vfid == PFID || vfid > xe_sriov_pf_num_vfs(xe))
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
|
guard(xe_pm_runtime_noresume)(xe);
|
||||||
|
|
||||||
|
return xe_sriov_pf_migration_write(xe, vfid, buf, len);
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL_FOR_MODULES(xe_sriov_vfio_data_write, "xe-vfio-pci");
|
||||||
|
|
@ -70,4 +70,6 @@ source "drivers/vfio/pci/nvgrace-gpu/Kconfig"
|
||||||
|
|
||||||
source "drivers/vfio/pci/qat/Kconfig"
|
source "drivers/vfio/pci/qat/Kconfig"
|
||||||
|
|
||||||
|
source "drivers/vfio/pci/xe/Kconfig"
|
||||||
|
|
||||||
endmenu
|
endmenu
|
||||||
|
|
|
||||||
|
|
@ -20,3 +20,5 @@ obj-$(CONFIG_VIRTIO_VFIO_PCI) += virtio/
|
||||||
obj-$(CONFIG_NVGRACE_GPU_VFIO_PCI) += nvgrace-gpu/
|
obj-$(CONFIG_NVGRACE_GPU_VFIO_PCI) += nvgrace-gpu/
|
||||||
|
|
||||||
obj-$(CONFIG_QAT_VFIO_PCI) += qat/
|
obj-$(CONFIG_QAT_VFIO_PCI) += qat/
|
||||||
|
|
||||||
|
obj-$(CONFIG_XE_VFIO_PCI) += xe/
|
||||||
|
|
|
||||||
|
|
@ -0,0 +1,12 @@
|
||||||
|
# SPDX-License-Identifier: GPL-2.0-only
|
||||||
|
config XE_VFIO_PCI
|
||||||
|
tristate "VFIO support for Intel Graphics"
|
||||||
|
depends on DRM_XE && PCI_IOV
|
||||||
|
select VFIO_PCI_CORE
|
||||||
|
help
|
||||||
|
This option enables device specific VFIO driver variant for Intel Graphics.
|
||||||
|
In addition to generic VFIO PCI functionality, it implements VFIO
|
||||||
|
migration uAPI allowing userspace to enable migration for
|
||||||
|
Intel Graphics SR-IOV Virtual Functions supported by the Xe driver.
|
||||||
|
|
||||||
|
If you don't know what to do here, say N.
|
||||||
|
|
@ -0,0 +1,3 @@
|
||||||
|
# SPDX-License-Identifier: GPL-2.0-only
|
||||||
|
obj-$(CONFIG_XE_VFIO_PCI) += xe-vfio-pci.o
|
||||||
|
xe-vfio-pci-y := main.o
|
||||||
|
|
@ -0,0 +1,573 @@
|
||||||
|
// SPDX-License-Identifier: GPL-2.0-only
|
||||||
|
/*
|
||||||
|
* Copyright © 2025 Intel Corporation
|
||||||
|
*/
|
||||||
|
|
||||||
|
#include <linux/anon_inodes.h>
|
||||||
|
#include <linux/delay.h>
|
||||||
|
#include <linux/file.h>
|
||||||
|
#include <linux/module.h>
|
||||||
|
#include <linux/pci.h>
|
||||||
|
#include <linux/sizes.h>
|
||||||
|
#include <linux/types.h>
|
||||||
|
#include <linux/vfio.h>
|
||||||
|
#include <linux/vfio_pci_core.h>
|
||||||
|
|
||||||
|
#include <drm/intel/xe_sriov_vfio.h>
|
||||||
|
#include <drm/intel/pciids.h>
|
||||||
|
|
||||||
|
struct xe_vfio_pci_migration_file {
|
||||||
|
struct file *filp;
|
||||||
|
/* serializes accesses to migration data */
|
||||||
|
struct mutex lock;
|
||||||
|
struct xe_vfio_pci_core_device *xe_vdev;
|
||||||
|
u8 disabled:1;
|
||||||
|
};
|
||||||
|
|
||||||
|
struct xe_vfio_pci_core_device {
|
||||||
|
struct vfio_pci_core_device core_device;
|
||||||
|
struct xe_device *xe;
|
||||||
|
/* PF internal control uses vfid index starting from 1 */
|
||||||
|
unsigned int vfid;
|
||||||
|
u8 deferred_reset:1;
|
||||||
|
/* protects migration state */
|
||||||
|
struct mutex state_mutex;
|
||||||
|
enum vfio_device_mig_state mig_state;
|
||||||
|
/* protects the reset_done flow */
|
||||||
|
spinlock_t reset_lock;
|
||||||
|
struct xe_vfio_pci_migration_file *migf;
|
||||||
|
};
|
||||||
|
|
||||||
|
#define xe_vdev_to_dev(xe_vdev) (&(xe_vdev)->core_device.pdev->dev)
|
||||||
|
|
||||||
|
static void xe_vfio_pci_disable_file(struct xe_vfio_pci_migration_file *migf)
|
||||||
|
{
|
||||||
|
mutex_lock(&migf->lock);
|
||||||
|
migf->disabled = true;
|
||||||
|
mutex_unlock(&migf->lock);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void xe_vfio_pci_put_file(struct xe_vfio_pci_core_device *xe_vdev)
|
||||||
|
{
|
||||||
|
xe_vfio_pci_disable_file(xe_vdev->migf);
|
||||||
|
fput(xe_vdev->migf->filp);
|
||||||
|
xe_vdev->migf = NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void xe_vfio_pci_reset(struct xe_vfio_pci_core_device *xe_vdev)
|
||||||
|
{
|
||||||
|
if (xe_vdev->migf)
|
||||||
|
xe_vfio_pci_put_file(xe_vdev);
|
||||||
|
|
||||||
|
xe_vdev->mig_state = VFIO_DEVICE_STATE_RUNNING;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void xe_vfio_pci_state_mutex_lock(struct xe_vfio_pci_core_device *xe_vdev)
|
||||||
|
{
|
||||||
|
mutex_lock(&xe_vdev->state_mutex);
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* This function is called in all state_mutex unlock cases to
|
||||||
|
* handle a 'deferred_reset' if exists.
|
||||||
|
*/
|
||||||
|
static void xe_vfio_pci_state_mutex_unlock(struct xe_vfio_pci_core_device *xe_vdev)
|
||||||
|
{
|
||||||
|
again:
|
||||||
|
spin_lock(&xe_vdev->reset_lock);
|
||||||
|
if (xe_vdev->deferred_reset) {
|
||||||
|
xe_vdev->deferred_reset = false;
|
||||||
|
spin_unlock(&xe_vdev->reset_lock);
|
||||||
|
xe_vfio_pci_reset(xe_vdev);
|
||||||
|
goto again;
|
||||||
|
}
|
||||||
|
mutex_unlock(&xe_vdev->state_mutex);
|
||||||
|
spin_unlock(&xe_vdev->reset_lock);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void xe_vfio_pci_reset_done(struct pci_dev *pdev)
|
||||||
|
{
|
||||||
|
struct xe_vfio_pci_core_device *xe_vdev = pci_get_drvdata(pdev);
|
||||||
|
int ret;
|
||||||
|
|
||||||
|
if (!pdev->is_virtfn)
|
||||||
|
return;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* VF FLR requires additional processing done by PF driver.
|
||||||
|
* The processing is done after FLR is already finished from PCIe
|
||||||
|
* perspective.
|
||||||
|
* In order to avoid a scenario where VF is used while PF processing
|
||||||
|
* is still in progress, additional synchronization point is needed.
|
||||||
|
*/
|
||||||
|
ret = xe_sriov_vfio_wait_flr_done(xe_vdev->xe, xe_vdev->vfid);
|
||||||
|
if (ret)
|
||||||
|
dev_err(&pdev->dev, "Failed to wait for FLR: %d\n", ret);
|
||||||
|
|
||||||
|
if (!xe_vdev->vfid)
|
||||||
|
return;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* As the higher VFIO layers are holding locks across reset and using
|
||||||
|
* those same locks with the mm_lock we need to prevent ABBA deadlock
|
||||||
|
* with the state_mutex and mm_lock.
|
||||||
|
* In case the state_mutex was taken already we defer the cleanup work
|
||||||
|
* to the unlock flow of the other running context.
|
||||||
|
*/
|
||||||
|
spin_lock(&xe_vdev->reset_lock);
|
||||||
|
xe_vdev->deferred_reset = true;
|
||||||
|
if (!mutex_trylock(&xe_vdev->state_mutex)) {
|
||||||
|
spin_unlock(&xe_vdev->reset_lock);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
spin_unlock(&xe_vdev->reset_lock);
|
||||||
|
xe_vfio_pci_state_mutex_unlock(xe_vdev);
|
||||||
|
|
||||||
|
xe_vfio_pci_reset(xe_vdev);
|
||||||
|
}
|
||||||
|
|
||||||
|
static const struct pci_error_handlers xe_vfio_pci_err_handlers = {
|
||||||
|
.reset_done = xe_vfio_pci_reset_done,
|
||||||
|
.error_detected = vfio_pci_core_aer_err_detected,
|
||||||
|
};
|
||||||
|
|
||||||
|
static int xe_vfio_pci_open_device(struct vfio_device *core_vdev)
|
||||||
|
{
|
||||||
|
struct xe_vfio_pci_core_device *xe_vdev =
|
||||||
|
container_of(core_vdev, struct xe_vfio_pci_core_device, core_device.vdev);
|
||||||
|
struct vfio_pci_core_device *vdev = &xe_vdev->core_device;
|
||||||
|
int ret;
|
||||||
|
|
||||||
|
ret = vfio_pci_core_enable(vdev);
|
||||||
|
if (ret)
|
||||||
|
return ret;
|
||||||
|
|
||||||
|
xe_vdev->mig_state = VFIO_DEVICE_STATE_RUNNING;
|
||||||
|
|
||||||
|
vfio_pci_core_finish_enable(vdev);
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void xe_vfio_pci_close_device(struct vfio_device *core_vdev)
|
||||||
|
{
|
||||||
|
struct xe_vfio_pci_core_device *xe_vdev =
|
||||||
|
container_of(core_vdev, struct xe_vfio_pci_core_device, core_device.vdev);
|
||||||
|
|
||||||
|
xe_vfio_pci_state_mutex_lock(xe_vdev);
|
||||||
|
xe_vfio_pci_reset(xe_vdev);
|
||||||
|
xe_vfio_pci_state_mutex_unlock(xe_vdev);
|
||||||
|
vfio_pci_core_close_device(core_vdev);
|
||||||
|
}
|
||||||
|
|
||||||
|
static int xe_vfio_pci_release_file(struct inode *inode, struct file *filp)
|
||||||
|
{
|
||||||
|
struct xe_vfio_pci_migration_file *migf = filp->private_data;
|
||||||
|
|
||||||
|
mutex_destroy(&migf->lock);
|
||||||
|
kfree(migf);
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static ssize_t xe_vfio_pci_save_read(struct file *filp, char __user *buf, size_t len, loff_t *pos)
|
||||||
|
{
|
||||||
|
struct xe_vfio_pci_migration_file *migf = filp->private_data;
|
||||||
|
ssize_t ret;
|
||||||
|
|
||||||
|
if (pos)
|
||||||
|
return -ESPIPE;
|
||||||
|
|
||||||
|
mutex_lock(&migf->lock);
|
||||||
|
if (migf->disabled) {
|
||||||
|
mutex_unlock(&migf->lock);
|
||||||
|
return -ENODEV;
|
||||||
|
}
|
||||||
|
|
||||||
|
ret = xe_sriov_vfio_data_read(migf->xe_vdev->xe, migf->xe_vdev->vfid, buf, len);
|
||||||
|
mutex_unlock(&migf->lock);
|
||||||
|
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
static const struct file_operations xe_vfio_pci_save_fops = {
|
||||||
|
.owner = THIS_MODULE,
|
||||||
|
.read = xe_vfio_pci_save_read,
|
||||||
|
.release = xe_vfio_pci_release_file,
|
||||||
|
.llseek = noop_llseek,
|
||||||
|
};
|
||||||
|
|
||||||
|
static ssize_t xe_vfio_pci_resume_write(struct file *filp, const char __user *buf,
|
||||||
|
size_t len, loff_t *pos)
|
||||||
|
{
|
||||||
|
struct xe_vfio_pci_migration_file *migf = filp->private_data;
|
||||||
|
ssize_t ret;
|
||||||
|
|
||||||
|
if (pos)
|
||||||
|
return -ESPIPE;
|
||||||
|
|
||||||
|
mutex_lock(&migf->lock);
|
||||||
|
if (migf->disabled) {
|
||||||
|
mutex_unlock(&migf->lock);
|
||||||
|
return -ENODEV;
|
||||||
|
}
|
||||||
|
|
||||||
|
ret = xe_sriov_vfio_data_write(migf->xe_vdev->xe, migf->xe_vdev->vfid, buf, len);
|
||||||
|
mutex_unlock(&migf->lock);
|
||||||
|
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
static const struct file_operations xe_vfio_pci_resume_fops = {
|
||||||
|
.owner = THIS_MODULE,
|
||||||
|
.write = xe_vfio_pci_resume_write,
|
||||||
|
.release = xe_vfio_pci_release_file,
|
||||||
|
.llseek = noop_llseek,
|
||||||
|
};
|
||||||
|
|
||||||
|
static const char *vfio_dev_state_str(u32 state)
|
||||||
|
{
|
||||||
|
switch (state) {
|
||||||
|
case VFIO_DEVICE_STATE_RUNNING: return "running";
|
||||||
|
case VFIO_DEVICE_STATE_RUNNING_P2P: return "running_p2p";
|
||||||
|
case VFIO_DEVICE_STATE_STOP_COPY: return "stopcopy";
|
||||||
|
case VFIO_DEVICE_STATE_STOP: return "stop";
|
||||||
|
case VFIO_DEVICE_STATE_RESUMING: return "resuming";
|
||||||
|
case VFIO_DEVICE_STATE_ERROR: return "error";
|
||||||
|
default: return "";
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
enum xe_vfio_pci_file_type {
|
||||||
|
XE_VFIO_FILE_SAVE = 0,
|
||||||
|
XE_VFIO_FILE_RESUME,
|
||||||
|
};
|
||||||
|
|
||||||
|
static struct xe_vfio_pci_migration_file *
|
||||||
|
xe_vfio_pci_alloc_file(struct xe_vfio_pci_core_device *xe_vdev,
|
||||||
|
enum xe_vfio_pci_file_type type)
|
||||||
|
{
|
||||||
|
struct xe_vfio_pci_migration_file *migf;
|
||||||
|
const struct file_operations *fops;
|
||||||
|
int flags;
|
||||||
|
|
||||||
|
migf = kzalloc(sizeof(*migf), GFP_KERNEL_ACCOUNT);
|
||||||
|
if (!migf)
|
||||||
|
return ERR_PTR(-ENOMEM);
|
||||||
|
|
||||||
|
fops = type == XE_VFIO_FILE_SAVE ? &xe_vfio_pci_save_fops : &xe_vfio_pci_resume_fops;
|
||||||
|
flags = type == XE_VFIO_FILE_SAVE ? O_RDONLY : O_WRONLY;
|
||||||
|
migf->filp = anon_inode_getfile("xe_vfio_mig", fops, migf, flags);
|
||||||
|
if (IS_ERR(migf->filp)) {
|
||||||
|
kfree(migf);
|
||||||
|
return ERR_CAST(migf->filp);
|
||||||
|
}
|
||||||
|
|
||||||
|
mutex_init(&migf->lock);
|
||||||
|
migf->xe_vdev = xe_vdev;
|
||||||
|
xe_vdev->migf = migf;
|
||||||
|
|
||||||
|
stream_open(migf->filp->f_inode, migf->filp);
|
||||||
|
|
||||||
|
return migf;
|
||||||
|
}
|
||||||
|
|
||||||
|
static struct file *
|
||||||
|
xe_vfio_set_state(struct xe_vfio_pci_core_device *xe_vdev, u32 new)
|
||||||
|
{
|
||||||
|
u32 cur = xe_vdev->mig_state;
|
||||||
|
int ret;
|
||||||
|
|
||||||
|
dev_dbg(xe_vdev_to_dev(xe_vdev),
|
||||||
|
"state: %s->%s\n", vfio_dev_state_str(cur), vfio_dev_state_str(new));
|
||||||
|
|
||||||
|
/*
|
||||||
|
* "STOP" handling is reused for "RUNNING_P2P", as the device doesn't
|
||||||
|
* have the capability to selectively block outgoing p2p DMA transfers.
|
||||||
|
* While the device is allowing BAR accesses when the VF is stopped, it
|
||||||
|
* is not processing any new workload requests, effectively stopping
|
||||||
|
* any outgoing DMA transfers (not just p2p).
|
||||||
|
* Any VRAM / MMIO accesses occurring during "RUNNING_P2P" are kept and
|
||||||
|
* will be migrated to target VF during stop-copy.
|
||||||
|
*/
|
||||||
|
if (cur == VFIO_DEVICE_STATE_RUNNING && new == VFIO_DEVICE_STATE_RUNNING_P2P) {
|
||||||
|
ret = xe_sriov_vfio_suspend_device(xe_vdev->xe, xe_vdev->vfid);
|
||||||
|
if (ret)
|
||||||
|
goto err;
|
||||||
|
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
if ((cur == VFIO_DEVICE_STATE_RUNNING_P2P && new == VFIO_DEVICE_STATE_STOP) ||
|
||||||
|
(cur == VFIO_DEVICE_STATE_STOP && new == VFIO_DEVICE_STATE_RUNNING_P2P))
|
||||||
|
return NULL;
|
||||||
|
|
||||||
|
if (cur == VFIO_DEVICE_STATE_RUNNING_P2P && new == VFIO_DEVICE_STATE_RUNNING) {
|
||||||
|
ret = xe_sriov_vfio_resume_device(xe_vdev->xe, xe_vdev->vfid);
|
||||||
|
if (ret)
|
||||||
|
goto err;
|
||||||
|
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (cur == VFIO_DEVICE_STATE_STOP && new == VFIO_DEVICE_STATE_STOP_COPY) {
|
||||||
|
struct xe_vfio_pci_migration_file *migf;
|
||||||
|
|
||||||
|
migf = xe_vfio_pci_alloc_file(xe_vdev, XE_VFIO_FILE_SAVE);
|
||||||
|
if (IS_ERR(migf)) {
|
||||||
|
ret = PTR_ERR(migf);
|
||||||
|
goto err;
|
||||||
|
}
|
||||||
|
get_file(migf->filp);
|
||||||
|
|
||||||
|
ret = xe_sriov_vfio_stop_copy_enter(xe_vdev->xe, xe_vdev->vfid);
|
||||||
|
if (ret) {
|
||||||
|
fput(migf->filp);
|
||||||
|
goto err;
|
||||||
|
}
|
||||||
|
|
||||||
|
return migf->filp;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (cur == VFIO_DEVICE_STATE_STOP_COPY && new == VFIO_DEVICE_STATE_STOP) {
|
||||||
|
if (xe_vdev->migf)
|
||||||
|
xe_vfio_pci_put_file(xe_vdev);
|
||||||
|
|
||||||
|
ret = xe_sriov_vfio_stop_copy_exit(xe_vdev->xe, xe_vdev->vfid);
|
||||||
|
if (ret)
|
||||||
|
goto err;
|
||||||
|
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (cur == VFIO_DEVICE_STATE_STOP && new == VFIO_DEVICE_STATE_RESUMING) {
|
||||||
|
struct xe_vfio_pci_migration_file *migf;
|
||||||
|
|
||||||
|
migf = xe_vfio_pci_alloc_file(xe_vdev, XE_VFIO_FILE_RESUME);
|
||||||
|
if (IS_ERR(migf)) {
|
||||||
|
ret = PTR_ERR(migf);
|
||||||
|
goto err;
|
||||||
|
}
|
||||||
|
get_file(migf->filp);
|
||||||
|
|
||||||
|
ret = xe_sriov_vfio_resume_data_enter(xe_vdev->xe, xe_vdev->vfid);
|
||||||
|
if (ret) {
|
||||||
|
fput(migf->filp);
|
||||||
|
goto err;
|
||||||
|
}
|
||||||
|
|
||||||
|
return migf->filp;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (cur == VFIO_DEVICE_STATE_RESUMING && new == VFIO_DEVICE_STATE_STOP) {
|
||||||
|
if (xe_vdev->migf)
|
||||||
|
xe_vfio_pci_put_file(xe_vdev);
|
||||||
|
|
||||||
|
ret = xe_sriov_vfio_resume_data_exit(xe_vdev->xe, xe_vdev->vfid);
|
||||||
|
if (ret)
|
||||||
|
goto err;
|
||||||
|
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
WARN(true, "Unknown state transition %d->%d", cur, new);
|
||||||
|
return ERR_PTR(-EINVAL);
|
||||||
|
|
||||||
|
err:
|
||||||
|
dev_dbg(xe_vdev_to_dev(xe_vdev),
|
||||||
|
"Failed to transition state: %s->%s err=%d\n",
|
||||||
|
vfio_dev_state_str(cur), vfio_dev_state_str(new), ret);
|
||||||
|
return ERR_PTR(ret);
|
||||||
|
}
|
||||||
|
|
||||||
|
static struct file *
|
||||||
|
xe_vfio_pci_set_device_state(struct vfio_device *core_vdev,
|
||||||
|
enum vfio_device_mig_state new_state)
|
||||||
|
{
|
||||||
|
struct xe_vfio_pci_core_device *xe_vdev =
|
||||||
|
container_of(core_vdev, struct xe_vfio_pci_core_device, core_device.vdev);
|
||||||
|
enum vfio_device_mig_state next_state;
|
||||||
|
struct file *f = NULL;
|
||||||
|
int ret;
|
||||||
|
|
||||||
|
xe_vfio_pci_state_mutex_lock(xe_vdev);
|
||||||
|
while (new_state != xe_vdev->mig_state) {
|
||||||
|
ret = vfio_mig_get_next_state(core_vdev, xe_vdev->mig_state,
|
||||||
|
new_state, &next_state);
|
||||||
|
if (ret) {
|
||||||
|
xe_sriov_vfio_error(xe_vdev->xe, xe_vdev->vfid);
|
||||||
|
f = ERR_PTR(ret);
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
f = xe_vfio_set_state(xe_vdev, next_state);
|
||||||
|
if (IS_ERR(f))
|
||||||
|
break;
|
||||||
|
|
||||||
|
xe_vdev->mig_state = next_state;
|
||||||
|
|
||||||
|
/* Multiple state transitions with non-NULL file in the middle */
|
||||||
|
if (f && new_state != xe_vdev->mig_state) {
|
||||||
|
fput(f);
|
||||||
|
f = ERR_PTR(-EINVAL);
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
xe_vfio_pci_state_mutex_unlock(xe_vdev);
|
||||||
|
|
||||||
|
return f;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int xe_vfio_pci_get_device_state(struct vfio_device *core_vdev,
|
||||||
|
enum vfio_device_mig_state *curr_state)
|
||||||
|
{
|
||||||
|
struct xe_vfio_pci_core_device *xe_vdev =
|
||||||
|
container_of(core_vdev, struct xe_vfio_pci_core_device, core_device.vdev);
|
||||||
|
|
||||||
|
xe_vfio_pci_state_mutex_lock(xe_vdev);
|
||||||
|
*curr_state = xe_vdev->mig_state;
|
||||||
|
xe_vfio_pci_state_mutex_unlock(xe_vdev);
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int xe_vfio_pci_get_data_size(struct vfio_device *vdev,
|
||||||
|
unsigned long *stop_copy_length)
|
||||||
|
{
|
||||||
|
struct xe_vfio_pci_core_device *xe_vdev =
|
||||||
|
container_of(vdev, struct xe_vfio_pci_core_device, core_device.vdev);
|
||||||
|
|
||||||
|
xe_vfio_pci_state_mutex_lock(xe_vdev);
|
||||||
|
*stop_copy_length = xe_sriov_vfio_stop_copy_size(xe_vdev->xe, xe_vdev->vfid);
|
||||||
|
xe_vfio_pci_state_mutex_unlock(xe_vdev);
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static const struct vfio_migration_ops xe_vfio_pci_migration_ops = {
|
||||||
|
.migration_set_state = xe_vfio_pci_set_device_state,
|
||||||
|
.migration_get_state = xe_vfio_pci_get_device_state,
|
||||||
|
.migration_get_data_size = xe_vfio_pci_get_data_size,
|
||||||
|
};
|
||||||
|
|
||||||
|
static void xe_vfio_pci_migration_init(struct xe_vfio_pci_core_device *xe_vdev)
|
||||||
|
{
|
||||||
|
struct vfio_device *core_vdev = &xe_vdev->core_device.vdev;
|
||||||
|
struct pci_dev *pdev = to_pci_dev(core_vdev->dev);
|
||||||
|
struct xe_device *xe = xe_sriov_vfio_get_pf(pdev);
|
||||||
|
|
||||||
|
if (!xe)
|
||||||
|
return;
|
||||||
|
if (!xe_sriov_vfio_migration_supported(xe))
|
||||||
|
return;
|
||||||
|
|
||||||
|
mutex_init(&xe_vdev->state_mutex);
|
||||||
|
spin_lock_init(&xe_vdev->reset_lock);
|
||||||
|
|
||||||
|
/* PF internal control uses vfid index starting from 1 */
|
||||||
|
xe_vdev->vfid = pci_iov_vf_id(pdev) + 1;
|
||||||
|
xe_vdev->xe = xe;
|
||||||
|
|
||||||
|
core_vdev->migration_flags = VFIO_MIGRATION_STOP_COPY | VFIO_MIGRATION_P2P;
|
||||||
|
core_vdev->mig_ops = &xe_vfio_pci_migration_ops;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void xe_vfio_pci_migration_fini(struct xe_vfio_pci_core_device *xe_vdev)
|
||||||
|
{
|
||||||
|
if (!xe_vdev->vfid)
|
||||||
|
return;
|
||||||
|
|
||||||
|
mutex_destroy(&xe_vdev->state_mutex);
|
||||||
|
}
|
||||||
|
|
||||||
|
static int xe_vfio_pci_init_dev(struct vfio_device *core_vdev)
|
||||||
|
{
|
||||||
|
struct xe_vfio_pci_core_device *xe_vdev =
|
||||||
|
container_of(core_vdev, struct xe_vfio_pci_core_device, core_device.vdev);
|
||||||
|
|
||||||
|
xe_vfio_pci_migration_init(xe_vdev);
|
||||||
|
|
||||||
|
return vfio_pci_core_init_dev(core_vdev);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void xe_vfio_pci_release_dev(struct vfio_device *core_vdev)
|
||||||
|
{
|
||||||
|
struct xe_vfio_pci_core_device *xe_vdev =
|
||||||
|
container_of(core_vdev, struct xe_vfio_pci_core_device, core_device.vdev);
|
||||||
|
|
||||||
|
xe_vfio_pci_migration_fini(xe_vdev);
|
||||||
|
}
|
||||||
|
|
||||||
|
static const struct vfio_device_ops xe_vfio_pci_ops = {
|
||||||
|
.name = "xe-vfio-pci",
|
||||||
|
.init = xe_vfio_pci_init_dev,
|
||||||
|
.release = xe_vfio_pci_release_dev,
|
||||||
|
.open_device = xe_vfio_pci_open_device,
|
||||||
|
.close_device = xe_vfio_pci_close_device,
|
||||||
|
.ioctl = vfio_pci_core_ioctl,
|
||||||
|
.device_feature = vfio_pci_core_ioctl_feature,
|
||||||
|
.read = vfio_pci_core_read,
|
||||||
|
.write = vfio_pci_core_write,
|
||||||
|
.mmap = vfio_pci_core_mmap,
|
||||||
|
.request = vfio_pci_core_request,
|
||||||
|
.match = vfio_pci_core_match,
|
||||||
|
.match_token_uuid = vfio_pci_core_match_token_uuid,
|
||||||
|
.bind_iommufd = vfio_iommufd_physical_bind,
|
||||||
|
.unbind_iommufd = vfio_iommufd_physical_unbind,
|
||||||
|
.attach_ioas = vfio_iommufd_physical_attach_ioas,
|
||||||
|
.detach_ioas = vfio_iommufd_physical_detach_ioas,
|
||||||
|
};
|
||||||
|
|
||||||
|
static int xe_vfio_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
|
||||||
|
{
|
||||||
|
struct xe_vfio_pci_core_device *xe_vdev;
|
||||||
|
int ret;
|
||||||
|
|
||||||
|
xe_vdev = vfio_alloc_device(xe_vfio_pci_core_device, core_device.vdev, &pdev->dev,
|
||||||
|
&xe_vfio_pci_ops);
|
||||||
|
if (IS_ERR(xe_vdev))
|
||||||
|
return PTR_ERR(xe_vdev);
|
||||||
|
|
||||||
|
dev_set_drvdata(&pdev->dev, &xe_vdev->core_device);
|
||||||
|
|
||||||
|
ret = vfio_pci_core_register_device(&xe_vdev->core_device);
|
||||||
|
if (ret) {
|
||||||
|
vfio_put_device(&xe_vdev->core_device.vdev);
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void xe_vfio_pci_remove(struct pci_dev *pdev)
|
||||||
|
{
|
||||||
|
struct xe_vfio_pci_core_device *xe_vdev = pci_get_drvdata(pdev);
|
||||||
|
|
||||||
|
vfio_pci_core_unregister_device(&xe_vdev->core_device);
|
||||||
|
vfio_put_device(&xe_vdev->core_device.vdev);
|
||||||
|
}
|
||||||
|
|
||||||
|
#define INTEL_PCI_VFIO_DEVICE(_id) { \
|
||||||
|
PCI_DRIVER_OVERRIDE_DEVICE_VFIO(PCI_VENDOR_ID_INTEL, (_id)) \
|
||||||
|
}
|
||||||
|
|
||||||
|
static const struct pci_device_id xe_vfio_pci_table[] = {
|
||||||
|
INTEL_PTL_IDS(INTEL_PCI_VFIO_DEVICE),
|
||||||
|
INTEL_WCL_IDS(INTEL_PCI_VFIO_DEVICE),
|
||||||
|
INTEL_BMG_IDS(INTEL_PCI_VFIO_DEVICE),
|
||||||
|
{}
|
||||||
|
};
|
||||||
|
MODULE_DEVICE_TABLE(pci, xe_vfio_pci_table);
|
||||||
|
|
||||||
|
static struct pci_driver xe_vfio_pci_driver = {
|
||||||
|
.name = "xe-vfio-pci",
|
||||||
|
.id_table = xe_vfio_pci_table,
|
||||||
|
.probe = xe_vfio_pci_probe,
|
||||||
|
.remove = xe_vfio_pci_remove,
|
||||||
|
.err_handler = &xe_vfio_pci_err_handlers,
|
||||||
|
.driver_managed_dma = true,
|
||||||
|
};
|
||||||
|
module_pci_driver(xe_vfio_pci_driver);
|
||||||
|
|
||||||
|
MODULE_LICENSE("GPL");
|
||||||
|
MODULE_AUTHOR("Michał Winiarski <michal.winiarski@intel.com>");
|
||||||
|
MODULE_DESCRIPTION("VFIO PCI driver with migration support for Intel Graphics");
|
||||||
|
|
@ -0,0 +1,143 @@
|
||||||
|
/* SPDX-License-Identifier: MIT */
|
||||||
|
/*
|
||||||
|
* Copyright © 2025 Intel Corporation
|
||||||
|
*/
|
||||||
|
|
||||||
|
#ifndef _XE_SRIOV_VFIO_H_
|
||||||
|
#define _XE_SRIOV_VFIO_H_
|
||||||
|
|
||||||
|
#include <linux/types.h>
|
||||||
|
|
||||||
|
struct pci_dev;
|
||||||
|
struct xe_device;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* xe_sriov_vfio_get_pf() - Get PF &xe_device.
|
||||||
|
* @pdev: the VF &pci_dev device
|
||||||
|
*
|
||||||
|
* Return: pointer to PF &xe_device, NULL otherwise.
|
||||||
|
*/
|
||||||
|
struct xe_device *xe_sriov_vfio_get_pf(struct pci_dev *pdev);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* xe_sriov_vfio_migration_supported() - Check if migration is supported.
|
||||||
|
* @xe: the PF &xe_device obtained by calling xe_sriov_vfio_get_pf()
|
||||||
|
*
|
||||||
|
* Return: true if migration is supported, false otherwise.
|
||||||
|
*/
|
||||||
|
bool xe_sriov_vfio_migration_supported(struct xe_device *xe);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* xe_sriov_vfio_wait_flr_done() - Wait for VF FLR completion.
|
||||||
|
* @xe: the PF &xe_device obtained by calling xe_sriov_vfio_get_pf()
|
||||||
|
* @vfid: the VF identifier (can't be 0)
|
||||||
|
*
|
||||||
|
* This function will wait until VF FLR is processed by PF on all tiles (or
|
||||||
|
* until timeout occurs).
|
||||||
|
*
|
||||||
|
* Return: 0 on success or a negative error code on failure.
|
||||||
|
*/
|
||||||
|
int xe_sriov_vfio_wait_flr_done(struct xe_device *xe, unsigned int vfid);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* xe_sriov_vfio_suspend_device() - Suspend VF.
|
||||||
|
* @xe: the PF &xe_device obtained by calling xe_sriov_vfio_get_pf()
|
||||||
|
* @vfid: the VF identifier (can't be 0)
|
||||||
|
*
|
||||||
|
* This function will pause VF on all tiles/GTs.
|
||||||
|
*
|
||||||
|
* Return: 0 on success or a negative error code on failure.
|
||||||
|
*/
|
||||||
|
int xe_sriov_vfio_suspend_device(struct xe_device *xe, unsigned int vfid);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* xe_sriov_vfio_resume_device() - Resume VF.
|
||||||
|
* @xe: the PF &xe_device obtained by calling xe_sriov_vfio_get_pf()
|
||||||
|
* @vfid: the VF identifier (can't be 0)
|
||||||
|
*
|
||||||
|
* This function will resume VF on all tiles.
|
||||||
|
*
|
||||||
|
* Return: 0 on success or a negative error code on failure.
|
||||||
|
*/
|
||||||
|
int xe_sriov_vfio_resume_device(struct xe_device *xe, unsigned int vfid);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* xe_sriov_vfio_stop_copy_enter() - Initiate a VF device migration data save.
|
||||||
|
* @xe: the PF &xe_device obtained by calling xe_sriov_vfio_get_pf()
|
||||||
|
* @vfid: the VF identifier (can't be 0)
|
||||||
|
*
|
||||||
|
* Return: 0 on success or a negative error code on failure.
|
||||||
|
*/
|
||||||
|
int xe_sriov_vfio_stop_copy_enter(struct xe_device *xe, unsigned int vfid);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* xe_sriov_vfio_stop_copy_exit() - Finish a VF device migration data save.
|
||||||
|
* @xe: the PF &xe_device obtained by calling xe_sriov_vfio_get_pf()
|
||||||
|
* @vfid: the VF identifier (can't be 0)
|
||||||
|
*
|
||||||
|
* Return: 0 on success or a negative error code on failure.
|
||||||
|
*/
|
||||||
|
int xe_sriov_vfio_stop_copy_exit(struct xe_device *xe, unsigned int vfid);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* xe_sriov_vfio_resume_data_enter() - Initiate a VF device migration data restore.
|
||||||
|
* @xe: the PF &xe_device obtained by calling xe_sriov_vfio_get_pf()
|
||||||
|
* @vfid: the VF identifier (can't be 0)
|
||||||
|
*
|
||||||
|
* Return: 0 on success or a negative error code on failure.
|
||||||
|
*/
|
||||||
|
int xe_sriov_vfio_resume_data_enter(struct xe_device *xe, unsigned int vfid);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* xe_sriov_vfio_resume_data_exit() - Finish a VF device migration data restore.
|
||||||
|
* @xe: the PF &xe_device obtained by calling xe_sriov_vfio_get_pf()
|
||||||
|
* @vfid: the VF identifier (can't be 0)
|
||||||
|
*
|
||||||
|
* Return: 0 on success or a negative error code on failure.
|
||||||
|
*/
|
||||||
|
int xe_sriov_vfio_resume_data_exit(struct xe_device *xe, unsigned int vfid);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* xe_sriov_vfio_error() - Move VF device to error state.
|
||||||
|
* @xe: the PF &xe_device obtained by calling xe_sriov_vfio_get_pf()
|
||||||
|
* @vfid: the VF identifier (can't be 0)
|
||||||
|
*
|
||||||
|
* Reset is needed to move it out of error state.
|
||||||
|
*
|
||||||
|
* Return: 0 on success or a negative error code on failure.
|
||||||
|
*/
|
||||||
|
int xe_sriov_vfio_error(struct xe_device *xe, unsigned int vfid);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* xe_sriov_vfio_data_read() - Read migration data from the VF device.
|
||||||
|
* @xe: the PF &xe_device obtained by calling xe_sriov_vfio_get_pf()
|
||||||
|
* @vfid: the VF identifier (can't be 0)
|
||||||
|
* @buf: start address of userspace buffer
|
||||||
|
* @len: requested read size from userspace
|
||||||
|
*
|
||||||
|
* Return: number of bytes that has been successfully read,
|
||||||
|
* 0 if no more migration data is available, -errno on failure.
|
||||||
|
*/
|
||||||
|
ssize_t xe_sriov_vfio_data_read(struct xe_device *xe, unsigned int vfid,
|
||||||
|
char __user *buf, size_t len);
|
||||||
|
/**
|
||||||
|
* xe_sriov_vfio_data_write() - Write migration data to the VF device.
|
||||||
|
* @xe: the PF &xe_device obtained by calling xe_sriov_vfio_get_pf()
|
||||||
|
* @vfid: the VF identifier (can't be 0)
|
||||||
|
* @buf: start address of userspace buffer
|
||||||
|
* @len: requested write size from userspace
|
||||||
|
*
|
||||||
|
* Return: number of bytes that has been successfully written, -errno on failure.
|
||||||
|
*/
|
||||||
|
ssize_t xe_sriov_vfio_data_write(struct xe_device *xe, unsigned int vfid,
|
||||||
|
const char __user *buf, size_t len);
|
||||||
|
/**
|
||||||
|
* xe_sriov_vfio_stop_copy_size() - Get a size estimate of VF device migration data.
|
||||||
|
* @xe: the PF &xe_device obtained by calling xe_sriov_vfio_get_pf()
|
||||||
|
* @vfid: the VF identifier (can't be 0)
|
||||||
|
*
|
||||||
|
* Return: migration data size in bytes or a negative error code on failure.
|
||||||
|
*/
|
||||||
|
ssize_t xe_sriov_vfio_stop_copy_size(struct xe_device *xe, unsigned int vfid);
|
||||||
|
|
||||||
|
#endif
|
||||||
Loading…
Reference in New Issue