From e0f74ed4634d6d662e7dca19115d0da1143a3ec0 Mon Sep 17 00:00:00 2001 From: Zhi Wang Date: Thu, 7 Apr 2022 03:19:43 -0400 Subject: [PATCH 01/41] i915/gvt: Separate the MMIO tracking table from GVT-g To support the new mdev interfaces and the re-factor patches from Christoph, which moves the GVT-g code into a dedicated module, the GVT-g MMIO tracking table needs to be separated from GVT-g. v9: - Fix a problem might cause kernel panic. - Remove the redaundant definitation of intel_get_device_type(). (Jani) - Sort the list of header reference in intel_gvt_mmio.c (Jani) - Include minimum header insted in intel_gvt_mmio.c (Jani) v8: - Use SPDX header in the intel_gvt_mmio_table.c - Reference the gvt.h with path. (Jani) - Add a missing fix on mmio emulation path during the debug. - Fix a building problem on refreshed gvt-staging branch. (Christoph) v7: - Keep the marcos of device generation in GVT-g. (Christoph, Jani) v6: - Move the mmio_table.c into i915. (Christoph) - Keep init_device_info and related structures in GVT-g. (Christoph) - Refine the callbacks of the iterator. (Christoph) - Move the flags of MMIO register defination to GVT-g. (Chrsitoph) - Move the mmio block handling to GVT-g. v5: - Re-design the mmio table framework. (Christoph) v4: - Fix the errors of patch checking scripts. v3: - Fix the errors when CONFIG_DRM_I915_WERROR is turned on. (Jani) v2: - Implement a mmio table instead of generating it by marco in i915. (Jani) Cc: Christoph Hellwig Cc: Jason Gunthorpe Cc: Jani Nikula Cc: Joonas Lahtinen Cc: Vivi Rodrigo Cc: Zhenyu Wang Cc: Zhi Wang Signed-off-by: Zhi Wang Reviewed-by: Christoph Hellwig Tested-by: Christoph Hellwig Reviewed-by: Zhenyu Wang Acked-by: Jani Nikula Link: http://patchwork.freedesktop.org/patch/msgid/20220407071945.72148-2-zhi.a.wang@intel.com --- drivers/gpu/drm/i915/Makefile | 2 +- drivers/gpu/drm/i915/gvt/gvt.h | 3 +- drivers/gpu/drm/i915/gvt/handlers.c | 1033 ++------------- drivers/gpu/drm/i915/gvt/mmio.h | 1 - drivers/gpu/drm/i915/gvt/reg.h | 9 +- drivers/gpu/drm/i915/intel_gvt.h | 19 + drivers/gpu/drm/i915/intel_gvt_mmio_table.c | 1290 +++++++++++++++++++ 7 files changed, 1458 insertions(+), 899 deletions(-) create mode 100644 drivers/gpu/drm/i915/intel_gvt_mmio_table.c diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile index 7df74a71d454..a6cab2d9a9d1 100644 --- a/drivers/gpu/drm/i915/Makefile +++ b/drivers/gpu/drm/i915/Makefile @@ -321,7 +321,7 @@ i915-$(CONFIG_DRM_I915_SELFTEST) += \ i915-y += i915_vgpu.o ifeq ($(CONFIG_DRM_I915_GVT),y) -i915-y += intel_gvt.o +i915-y += intel_gvt.o intel_gvt_mmio_table.o include $(src)/gvt/Makefile endif diff --git a/drivers/gpu/drm/i915/gvt/gvt.h b/drivers/gpu/drm/i915/gvt/gvt.h index 0ebffc327528..bfe07c69cfd2 100644 --- a/drivers/gpu/drm/i915/gvt/gvt.h +++ b/drivers/gpu/drm/i915/gvt/gvt.h @@ -36,6 +36,7 @@ #include #include "i915_drv.h" +#include "intel_gvt.h" #include "debug.h" #include "hypercall.h" @@ -272,7 +273,7 @@ struct intel_gvt_mmio { /* Value of command write of this reg needs to be patched */ #define F_CMD_WRITE_PATCH (1 << 8) - const struct gvt_mmio_block *mmio_block; + struct gvt_mmio_block *mmio_block; unsigned int num_mmio_block; DECLARE_HASHTABLE(mmio_info_table, INTEL_GVT_MMIO_HASH_BITS); diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c index 520a7e1942f3..b7bab95da3c5 100644 --- a/drivers/gpu/drm/i915/gvt/handlers.c +++ b/drivers/gpu/drm/i915/gvt/handlers.c @@ -101,12 +101,11 @@ struct intel_gvt_mmio_info *intel_gvt_find_mmio_info(struct intel_gvt *gvt, return NULL; } -static int new_mmio_info(struct intel_gvt *gvt, - u32 offset, u16 flags, u32 size, - u32 addr_mask, u32 ro_mask, u32 device, - gvt_mmio_func read, gvt_mmio_func write) +static int setup_mmio_info(struct intel_gvt *gvt, u32 offset, u32 size, + u16 flags, u32 addr_mask, u32 ro_mask, u32 device, + gvt_mmio_func read, gvt_mmio_func write) { - struct intel_gvt_mmio_info *info, *p; + struct intel_gvt_mmio_info *p; u32 start, end, i; if (!intel_gvt_match_device(gvt, device)) @@ -119,32 +118,18 @@ static int new_mmio_info(struct intel_gvt *gvt, end = offset + size; for (i = start; i < end; i += 4) { - info = kzalloc(sizeof(*info), GFP_KERNEL); - if (!info) - return -ENOMEM; - - info->offset = i; - p = intel_gvt_find_mmio_info(gvt, info->offset); - if (p) { - WARN(1, "dup mmio definition offset %x\n", - info->offset); - kfree(info); - - /* We return -EEXIST here to make GVT-g load fail. - * So duplicated MMIO can be found as soon as - * possible. - */ - return -EEXIST; + p = intel_gvt_find_mmio_info(gvt, i); + if (!p) { + WARN(1, "assign a handler to a non-tracked mmio %x\n", + i); + return -ENODEV; } - - info->ro_mask = ro_mask; - info->device = device; - info->read = read ? read : intel_vgpu_default_mmio_read; - info->write = write ? write : intel_vgpu_default_mmio_write; - gvt->mmio.mmio_attribute[info->offset / 4] = flags; - INIT_HLIST_NODE(&info->node); - hash_add(gvt->mmio.mmio_info_table, &info->node, info->offset); - gvt->mmio.num_tracked_mmio++; + p->ro_mask = ro_mask; + gvt->mmio.mmio_attribute[i / 4] = flags; + if (read) + p->read = read; + if (write) + p->write = write; } return 0; } @@ -2137,15 +2122,12 @@ static int csfe_chicken1_mmio_write(struct intel_vgpu *vgpu, } #define MMIO_F(reg, s, f, am, rm, d, r, w) do { \ - ret = new_mmio_info(gvt, i915_mmio_reg_offset(reg), \ - f, s, am, rm, d, r, w); \ + ret = setup_mmio_info(gvt, i915_mmio_reg_offset(reg), \ + s, f, am, rm, d, r, w); \ if (ret) \ return ret; \ } while (0) -#define MMIO_D(reg, d) \ - MMIO_F(reg, 4, 0, 0, 0, d, NULL, NULL) - #define MMIO_DH(reg, d, r, w) \ MMIO_F(reg, 4, 0, 0, 0, d, r, w) @@ -2170,9 +2152,6 @@ static int csfe_chicken1_mmio_write(struct intel_vgpu *vgpu, MMIO_F(prefix(GEN8_BSD2_RING_BASE), s, f, am, rm, d, r, w); \ } while (0) -#define MMIO_RING_D(prefix, d) \ - MMIO_RING_F(prefix, 4, 0, 0, 0, d, NULL, NULL) - #define MMIO_RING_DFH(prefix, d, f, r, w) \ MMIO_RING_F(prefix, 4, f, 0, 0, d, r, w) @@ -2196,7 +2175,6 @@ static int init_generic_mmio_info(struct intel_gvt *gvt) MMIO_DFH(SDEIMR, D_ALL, 0, NULL, intel_vgpu_reg_imr_handler); MMIO_DFH(SDEIER, D_ALL, 0, NULL, intel_vgpu_reg_ier_handler); MMIO_DFH(SDEIIR, D_ALL, 0, NULL, intel_vgpu_reg_iir_handler); - MMIO_D(SDEISR, D_ALL); MMIO_RING_DFH(RING_HWSTAM, D_ALL, 0, NULL, NULL); @@ -2224,7 +2202,6 @@ static int init_generic_mmio_info(struct intel_gvt *gvt) MMIO_GM_RDR(_MMIO(0x2148), D_ALL, NULL, NULL); MMIO_GM_RDR(CCID(RENDER_RING_BASE), D_ALL, NULL, NULL); MMIO_GM_RDR(_MMIO(0x12198), D_ALL, NULL, NULL); - MMIO_D(GEN7_CXT_SIZE, D_ALL); MMIO_RING_DFH(RING_TAIL, D_ALL, 0, NULL, NULL); MMIO_RING_DFH(RING_HEAD, D_ALL, 0, NULL, NULL); @@ -2278,257 +2255,32 @@ static int init_generic_mmio_info(struct intel_gvt *gvt) MMIO_DFH(GEN7_HALF_SLICE_CHICKEN1, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL); /* display */ - MMIO_F(_MMIO(0x60220), 0x20, 0, 0, 0, D_ALL, NULL, NULL); - MMIO_D(_MMIO(0x602a0), D_ALL); - - MMIO_D(_MMIO(0x65050), D_ALL); - MMIO_D(_MMIO(0x650b4), D_ALL); - - MMIO_D(_MMIO(0xc4040), D_ALL); - MMIO_D(DERRMR, D_ALL); - - MMIO_D(PIPEDSL(PIPE_A), D_ALL); - MMIO_D(PIPEDSL(PIPE_B), D_ALL); - MMIO_D(PIPEDSL(PIPE_C), D_ALL); - MMIO_D(PIPEDSL(_PIPE_EDP), D_ALL); - MMIO_DH(PIPECONF(PIPE_A), D_ALL, NULL, pipeconf_mmio_write); MMIO_DH(PIPECONF(PIPE_B), D_ALL, NULL, pipeconf_mmio_write); MMIO_DH(PIPECONF(PIPE_C), D_ALL, NULL, pipeconf_mmio_write); MMIO_DH(PIPECONF(_PIPE_EDP), D_ALL, NULL, pipeconf_mmio_write); - - MMIO_D(PIPESTAT(PIPE_A), D_ALL); - MMIO_D(PIPESTAT(PIPE_B), D_ALL); - MMIO_D(PIPESTAT(PIPE_C), D_ALL); - MMIO_D(PIPESTAT(_PIPE_EDP), D_ALL); - - MMIO_D(PIPE_FLIPCOUNT_G4X(PIPE_A), D_ALL); - MMIO_D(PIPE_FLIPCOUNT_G4X(PIPE_B), D_ALL); - MMIO_D(PIPE_FLIPCOUNT_G4X(PIPE_C), D_ALL); - MMIO_D(PIPE_FLIPCOUNT_G4X(_PIPE_EDP), D_ALL); - - MMIO_D(PIPE_FRMCOUNT_G4X(PIPE_A), D_ALL); - MMIO_D(PIPE_FRMCOUNT_G4X(PIPE_B), D_ALL); - MMIO_D(PIPE_FRMCOUNT_G4X(PIPE_C), D_ALL); - MMIO_D(PIPE_FRMCOUNT_G4X(_PIPE_EDP), D_ALL); - - MMIO_D(CURCNTR(PIPE_A), D_ALL); - MMIO_D(CURCNTR(PIPE_B), D_ALL); - MMIO_D(CURCNTR(PIPE_C), D_ALL); - - MMIO_D(CURPOS(PIPE_A), D_ALL); - MMIO_D(CURPOS(PIPE_B), D_ALL); - MMIO_D(CURPOS(PIPE_C), D_ALL); - - MMIO_D(CURBASE(PIPE_A), D_ALL); - MMIO_D(CURBASE(PIPE_B), D_ALL); - MMIO_D(CURBASE(PIPE_C), D_ALL); - - MMIO_D(CUR_FBC_CTL(PIPE_A), D_ALL); - MMIO_D(CUR_FBC_CTL(PIPE_B), D_ALL); - MMIO_D(CUR_FBC_CTL(PIPE_C), D_ALL); - - MMIO_D(_MMIO(0x700ac), D_ALL); - MMIO_D(_MMIO(0x710ac), D_ALL); - MMIO_D(_MMIO(0x720ac), D_ALL); - - MMIO_D(_MMIO(0x70090), D_ALL); - MMIO_D(_MMIO(0x70094), D_ALL); - MMIO_D(_MMIO(0x70098), D_ALL); - MMIO_D(_MMIO(0x7009c), D_ALL); - - MMIO_D(DSPCNTR(PIPE_A), D_ALL); - MMIO_D(DSPADDR(PIPE_A), D_ALL); - MMIO_D(DSPSTRIDE(PIPE_A), D_ALL); - MMIO_D(DSPPOS(PIPE_A), D_ALL); - MMIO_D(DSPSIZE(PIPE_A), D_ALL); MMIO_DH(DSPSURF(PIPE_A), D_ALL, NULL, pri_surf_mmio_write); - MMIO_D(DSPOFFSET(PIPE_A), D_ALL); - MMIO_D(DSPSURFLIVE(PIPE_A), D_ALL); MMIO_DH(REG_50080(PIPE_A, PLANE_PRIMARY), D_ALL, NULL, reg50080_mmio_write); - - MMIO_D(DSPCNTR(PIPE_B), D_ALL); - MMIO_D(DSPADDR(PIPE_B), D_ALL); - MMIO_D(DSPSTRIDE(PIPE_B), D_ALL); - MMIO_D(DSPPOS(PIPE_B), D_ALL); - MMIO_D(DSPSIZE(PIPE_B), D_ALL); MMIO_DH(DSPSURF(PIPE_B), D_ALL, NULL, pri_surf_mmio_write); - MMIO_D(DSPOFFSET(PIPE_B), D_ALL); - MMIO_D(DSPSURFLIVE(PIPE_B), D_ALL); MMIO_DH(REG_50080(PIPE_B, PLANE_PRIMARY), D_ALL, NULL, reg50080_mmio_write); - - MMIO_D(DSPCNTR(PIPE_C), D_ALL); - MMIO_D(DSPADDR(PIPE_C), D_ALL); - MMIO_D(DSPSTRIDE(PIPE_C), D_ALL); - MMIO_D(DSPPOS(PIPE_C), D_ALL); - MMIO_D(DSPSIZE(PIPE_C), D_ALL); MMIO_DH(DSPSURF(PIPE_C), D_ALL, NULL, pri_surf_mmio_write); - MMIO_D(DSPOFFSET(PIPE_C), D_ALL); - MMIO_D(DSPSURFLIVE(PIPE_C), D_ALL); MMIO_DH(REG_50080(PIPE_C, PLANE_PRIMARY), D_ALL, NULL, reg50080_mmio_write); - - MMIO_D(SPRCTL(PIPE_A), D_ALL); - MMIO_D(SPRLINOFF(PIPE_A), D_ALL); - MMIO_D(SPRSTRIDE(PIPE_A), D_ALL); - MMIO_D(SPRPOS(PIPE_A), D_ALL); - MMIO_D(SPRSIZE(PIPE_A), D_ALL); - MMIO_D(SPRKEYVAL(PIPE_A), D_ALL); - MMIO_D(SPRKEYMSK(PIPE_A), D_ALL); MMIO_DH(SPRSURF(PIPE_A), D_ALL, NULL, spr_surf_mmio_write); - MMIO_D(SPRKEYMAX(PIPE_A), D_ALL); - MMIO_D(SPROFFSET(PIPE_A), D_ALL); - MMIO_D(SPRSCALE(PIPE_A), D_ALL); - MMIO_D(SPRSURFLIVE(PIPE_A), D_ALL); MMIO_DH(REG_50080(PIPE_A, PLANE_SPRITE0), D_ALL, NULL, reg50080_mmio_write); - - MMIO_D(SPRCTL(PIPE_B), D_ALL); - MMIO_D(SPRLINOFF(PIPE_B), D_ALL); - MMIO_D(SPRSTRIDE(PIPE_B), D_ALL); - MMIO_D(SPRPOS(PIPE_B), D_ALL); - MMIO_D(SPRSIZE(PIPE_B), D_ALL); - MMIO_D(SPRKEYVAL(PIPE_B), D_ALL); - MMIO_D(SPRKEYMSK(PIPE_B), D_ALL); MMIO_DH(SPRSURF(PIPE_B), D_ALL, NULL, spr_surf_mmio_write); - MMIO_D(SPRKEYMAX(PIPE_B), D_ALL); - MMIO_D(SPROFFSET(PIPE_B), D_ALL); - MMIO_D(SPRSCALE(PIPE_B), D_ALL); - MMIO_D(SPRSURFLIVE(PIPE_B), D_ALL); MMIO_DH(REG_50080(PIPE_B, PLANE_SPRITE0), D_ALL, NULL, reg50080_mmio_write); - - MMIO_D(SPRCTL(PIPE_C), D_ALL); - MMIO_D(SPRLINOFF(PIPE_C), D_ALL); - MMIO_D(SPRSTRIDE(PIPE_C), D_ALL); - MMIO_D(SPRPOS(PIPE_C), D_ALL); - MMIO_D(SPRSIZE(PIPE_C), D_ALL); - MMIO_D(SPRKEYVAL(PIPE_C), D_ALL); - MMIO_D(SPRKEYMSK(PIPE_C), D_ALL); MMIO_DH(SPRSURF(PIPE_C), D_ALL, NULL, spr_surf_mmio_write); - MMIO_D(SPRKEYMAX(PIPE_C), D_ALL); - MMIO_D(SPROFFSET(PIPE_C), D_ALL); - MMIO_D(SPRSCALE(PIPE_C), D_ALL); - MMIO_D(SPRSURFLIVE(PIPE_C), D_ALL); MMIO_DH(REG_50080(PIPE_C, PLANE_SPRITE0), D_ALL, NULL, reg50080_mmio_write); - MMIO_D(HTOTAL(TRANSCODER_A), D_ALL); - MMIO_D(HBLANK(TRANSCODER_A), D_ALL); - MMIO_D(HSYNC(TRANSCODER_A), D_ALL); - MMIO_D(VTOTAL(TRANSCODER_A), D_ALL); - MMIO_D(VBLANK(TRANSCODER_A), D_ALL); - MMIO_D(VSYNC(TRANSCODER_A), D_ALL); - MMIO_D(BCLRPAT(TRANSCODER_A), D_ALL); - MMIO_D(VSYNCSHIFT(TRANSCODER_A), D_ALL); - MMIO_D(PIPESRC(TRANSCODER_A), D_ALL); - - MMIO_D(HTOTAL(TRANSCODER_B), D_ALL); - MMIO_D(HBLANK(TRANSCODER_B), D_ALL); - MMIO_D(HSYNC(TRANSCODER_B), D_ALL); - MMIO_D(VTOTAL(TRANSCODER_B), D_ALL); - MMIO_D(VBLANK(TRANSCODER_B), D_ALL); - MMIO_D(VSYNC(TRANSCODER_B), D_ALL); - MMIO_D(BCLRPAT(TRANSCODER_B), D_ALL); - MMIO_D(VSYNCSHIFT(TRANSCODER_B), D_ALL); - MMIO_D(PIPESRC(TRANSCODER_B), D_ALL); - - MMIO_D(HTOTAL(TRANSCODER_C), D_ALL); - MMIO_D(HBLANK(TRANSCODER_C), D_ALL); - MMIO_D(HSYNC(TRANSCODER_C), D_ALL); - MMIO_D(VTOTAL(TRANSCODER_C), D_ALL); - MMIO_D(VBLANK(TRANSCODER_C), D_ALL); - MMIO_D(VSYNC(TRANSCODER_C), D_ALL); - MMIO_D(BCLRPAT(TRANSCODER_C), D_ALL); - MMIO_D(VSYNCSHIFT(TRANSCODER_C), D_ALL); - MMIO_D(PIPESRC(TRANSCODER_C), D_ALL); - - MMIO_D(HTOTAL(TRANSCODER_EDP), D_ALL); - MMIO_D(HBLANK(TRANSCODER_EDP), D_ALL); - MMIO_D(HSYNC(TRANSCODER_EDP), D_ALL); - MMIO_D(VTOTAL(TRANSCODER_EDP), D_ALL); - MMIO_D(VBLANK(TRANSCODER_EDP), D_ALL); - MMIO_D(VSYNC(TRANSCODER_EDP), D_ALL); - MMIO_D(BCLRPAT(TRANSCODER_EDP), D_ALL); - MMIO_D(VSYNCSHIFT(TRANSCODER_EDP), D_ALL); - - MMIO_D(PIPE_DATA_M1(TRANSCODER_A), D_ALL); - MMIO_D(PIPE_DATA_N1(TRANSCODER_A), D_ALL); - MMIO_D(PIPE_DATA_M2(TRANSCODER_A), D_ALL); - MMIO_D(PIPE_DATA_N2(TRANSCODER_A), D_ALL); - MMIO_D(PIPE_LINK_M1(TRANSCODER_A), D_ALL); - MMIO_D(PIPE_LINK_N1(TRANSCODER_A), D_ALL); - MMIO_D(PIPE_LINK_M2(TRANSCODER_A), D_ALL); - MMIO_D(PIPE_LINK_N2(TRANSCODER_A), D_ALL); - - MMIO_D(PIPE_DATA_M1(TRANSCODER_B), D_ALL); - MMIO_D(PIPE_DATA_N1(TRANSCODER_B), D_ALL); - MMIO_D(PIPE_DATA_M2(TRANSCODER_B), D_ALL); - MMIO_D(PIPE_DATA_N2(TRANSCODER_B), D_ALL); - MMIO_D(PIPE_LINK_M1(TRANSCODER_B), D_ALL); - MMIO_D(PIPE_LINK_N1(TRANSCODER_B), D_ALL); - MMIO_D(PIPE_LINK_M2(TRANSCODER_B), D_ALL); - MMIO_D(PIPE_LINK_N2(TRANSCODER_B), D_ALL); - - MMIO_D(PIPE_DATA_M1(TRANSCODER_C), D_ALL); - MMIO_D(PIPE_DATA_N1(TRANSCODER_C), D_ALL); - MMIO_D(PIPE_DATA_M2(TRANSCODER_C), D_ALL); - MMIO_D(PIPE_DATA_N2(TRANSCODER_C), D_ALL); - MMIO_D(PIPE_LINK_M1(TRANSCODER_C), D_ALL); - MMIO_D(PIPE_LINK_N1(TRANSCODER_C), D_ALL); - MMIO_D(PIPE_LINK_M2(TRANSCODER_C), D_ALL); - MMIO_D(PIPE_LINK_N2(TRANSCODER_C), D_ALL); - - MMIO_D(PIPE_DATA_M1(TRANSCODER_EDP), D_ALL); - MMIO_D(PIPE_DATA_N1(TRANSCODER_EDP), D_ALL); - MMIO_D(PIPE_DATA_M2(TRANSCODER_EDP), D_ALL); - MMIO_D(PIPE_DATA_N2(TRANSCODER_EDP), D_ALL); - MMIO_D(PIPE_LINK_M1(TRANSCODER_EDP), D_ALL); - MMIO_D(PIPE_LINK_N1(TRANSCODER_EDP), D_ALL); - MMIO_D(PIPE_LINK_M2(TRANSCODER_EDP), D_ALL); - MMIO_D(PIPE_LINK_N2(TRANSCODER_EDP), D_ALL); - - MMIO_D(PF_CTL(PIPE_A), D_ALL); - MMIO_D(PF_WIN_SZ(PIPE_A), D_ALL); - MMIO_D(PF_WIN_POS(PIPE_A), D_ALL); - MMIO_D(PF_VSCALE(PIPE_A), D_ALL); - MMIO_D(PF_HSCALE(PIPE_A), D_ALL); - - MMIO_D(PF_CTL(PIPE_B), D_ALL); - MMIO_D(PF_WIN_SZ(PIPE_B), D_ALL); - MMIO_D(PF_WIN_POS(PIPE_B), D_ALL); - MMIO_D(PF_VSCALE(PIPE_B), D_ALL); - MMIO_D(PF_HSCALE(PIPE_B), D_ALL); - - MMIO_D(PF_CTL(PIPE_C), D_ALL); - MMIO_D(PF_WIN_SZ(PIPE_C), D_ALL); - MMIO_D(PF_WIN_POS(PIPE_C), D_ALL); - MMIO_D(PF_VSCALE(PIPE_C), D_ALL); - MMIO_D(PF_HSCALE(PIPE_C), D_ALL); - - MMIO_D(WM0_PIPE_ILK(PIPE_A), D_ALL); - MMIO_D(WM0_PIPE_ILK(PIPE_B), D_ALL); - MMIO_D(WM0_PIPE_ILK(PIPE_C), D_ALL); - MMIO_D(WM1_LP_ILK, D_ALL); - MMIO_D(WM2_LP_ILK, D_ALL); - MMIO_D(WM3_LP_ILK, D_ALL); - MMIO_D(WM1S_LP_ILK, D_ALL); - MMIO_D(WM2S_LP_IVB, D_ALL); - MMIO_D(WM3S_LP_IVB, D_ALL); - - MMIO_D(BLC_PWM_CPU_CTL2, D_ALL); - MMIO_D(BLC_PWM_CPU_CTL, D_ALL); - MMIO_D(BLC_PWM_PCH_CTL1, D_ALL); - MMIO_D(BLC_PWM_PCH_CTL2, D_ALL); - - MMIO_D(_MMIO(0x48268), D_ALL); - MMIO_F(PCH_GMBUS0, 4 * 4, 0, 0, 0, D_ALL, gmbus_mmio_read, gmbus_mmio_write); MMIO_F(PCH_GPIO_BASE, 6 * 4, F_UNALIGN, 0, 0, D_ALL, NULL, NULL); - MMIO_F(_MMIO(0xe4f00), 0x28, 0, 0, 0, D_ALL, NULL, NULL); MMIO_F(_MMIO(_PCH_DPB_AUX_CH_CTL), 6 * 4, 0, 0, 0, D_PRE_SKL, NULL, dp_aux_ch_ctl_mmio_write); @@ -2551,74 +2303,7 @@ static int init_generic_mmio_info(struct intel_gvt *gvt) MMIO_DH(FDI_RX_CTL(PIPE_A), D_ALL, NULL, update_fdi_rx_iir_status); MMIO_DH(FDI_RX_CTL(PIPE_B), D_ALL, NULL, update_fdi_rx_iir_status); MMIO_DH(FDI_RX_CTL(PIPE_C), D_ALL, NULL, update_fdi_rx_iir_status); - - MMIO_D(_MMIO(_PCH_TRANS_HTOTAL_A), D_ALL); - MMIO_D(_MMIO(_PCH_TRANS_HBLANK_A), D_ALL); - MMIO_D(_MMIO(_PCH_TRANS_HSYNC_A), D_ALL); - MMIO_D(_MMIO(_PCH_TRANS_VTOTAL_A), D_ALL); - MMIO_D(_MMIO(_PCH_TRANS_VBLANK_A), D_ALL); - MMIO_D(_MMIO(_PCH_TRANS_VSYNC_A), D_ALL); - MMIO_D(_MMIO(_PCH_TRANS_VSYNCSHIFT_A), D_ALL); - - MMIO_D(_MMIO(_PCH_TRANS_HTOTAL_B), D_ALL); - MMIO_D(_MMIO(_PCH_TRANS_HBLANK_B), D_ALL); - MMIO_D(_MMIO(_PCH_TRANS_HSYNC_B), D_ALL); - MMIO_D(_MMIO(_PCH_TRANS_VTOTAL_B), D_ALL); - MMIO_D(_MMIO(_PCH_TRANS_VBLANK_B), D_ALL); - MMIO_D(_MMIO(_PCH_TRANS_VSYNC_B), D_ALL); - MMIO_D(_MMIO(_PCH_TRANS_VSYNCSHIFT_B), D_ALL); - - MMIO_D(_MMIO(_PCH_TRANSA_DATA_M1), D_ALL); - MMIO_D(_MMIO(_PCH_TRANSA_DATA_N1), D_ALL); - MMIO_D(_MMIO(_PCH_TRANSA_DATA_M2), D_ALL); - MMIO_D(_MMIO(_PCH_TRANSA_DATA_N2), D_ALL); - MMIO_D(_MMIO(_PCH_TRANSA_LINK_M1), D_ALL); - MMIO_D(_MMIO(_PCH_TRANSA_LINK_N1), D_ALL); - MMIO_D(_MMIO(_PCH_TRANSA_LINK_M2), D_ALL); - MMIO_D(_MMIO(_PCH_TRANSA_LINK_N2), D_ALL); - - MMIO_D(TRANS_DP_CTL(PIPE_A), D_ALL); - MMIO_D(TRANS_DP_CTL(PIPE_B), D_ALL); - MMIO_D(TRANS_DP_CTL(PIPE_C), D_ALL); - - MMIO_D(TVIDEO_DIP_CTL(PIPE_A), D_ALL); - MMIO_D(TVIDEO_DIP_DATA(PIPE_A), D_ALL); - MMIO_D(TVIDEO_DIP_GCP(PIPE_A), D_ALL); - - MMIO_D(TVIDEO_DIP_CTL(PIPE_B), D_ALL); - MMIO_D(TVIDEO_DIP_DATA(PIPE_B), D_ALL); - MMIO_D(TVIDEO_DIP_GCP(PIPE_B), D_ALL); - - MMIO_D(TVIDEO_DIP_CTL(PIPE_C), D_ALL); - MMIO_D(TVIDEO_DIP_DATA(PIPE_C), D_ALL); - MMIO_D(TVIDEO_DIP_GCP(PIPE_C), D_ALL); - - MMIO_D(_MMIO(_FDI_RXA_MISC), D_ALL); - MMIO_D(_MMIO(_FDI_RXB_MISC), D_ALL); - MMIO_D(_MMIO(_FDI_RXA_TUSIZE1), D_ALL); - MMIO_D(_MMIO(_FDI_RXA_TUSIZE2), D_ALL); - MMIO_D(_MMIO(_FDI_RXB_TUSIZE1), D_ALL); - MMIO_D(_MMIO(_FDI_RXB_TUSIZE2), D_ALL); - MMIO_DH(PCH_PP_CONTROL, D_ALL, NULL, pch_pp_control_mmio_write); - MMIO_D(PCH_PP_DIVISOR, D_ALL); - MMIO_D(PCH_PP_STATUS, D_ALL); - MMIO_D(PCH_LVDS, D_ALL); - MMIO_D(_MMIO(_PCH_DPLL_A), D_ALL); - MMIO_D(_MMIO(_PCH_DPLL_B), D_ALL); - MMIO_D(_MMIO(_PCH_FPA0), D_ALL); - MMIO_D(_MMIO(_PCH_FPA1), D_ALL); - MMIO_D(_MMIO(_PCH_FPB0), D_ALL); - MMIO_D(_MMIO(_PCH_FPB1), D_ALL); - MMIO_D(PCH_DREF_CONTROL, D_ALL); - MMIO_D(PCH_RAWCLK_FREQ, D_ALL); - MMIO_D(PCH_DPLL_SEL, D_ALL); - - MMIO_D(_MMIO(0x61208), D_ALL); - MMIO_D(_MMIO(0x6120c), D_ALL); - MMIO_D(PCH_PP_ON_DELAYS, D_ALL); - MMIO_D(PCH_PP_OFF_DELAYS, D_ALL); - MMIO_DH(_MMIO(0xe651c), D_ALL, dpy_reg_mmio_read, NULL); MMIO_DH(_MMIO(0xe661c), D_ALL, dpy_reg_mmio_read, NULL); MMIO_DH(_MMIO(0xe671c), D_ALL, dpy_reg_mmio_read, NULL); @@ -2634,143 +2319,10 @@ static int init_generic_mmio_info(struct intel_gvt *gvt) NULL, NULL); MMIO_DH(LCPLL_CTL, D_ALL, NULL, lcpll_ctl_mmio_write); - MMIO_D(FUSE_STRAP, D_ALL); - MMIO_D(DIGITAL_PORT_HOTPLUG_CNTRL, D_ALL); - - MMIO_D(DISP_ARB_CTL, D_ALL); - MMIO_D(DISP_ARB_CTL2, D_ALL); - - MMIO_D(ILK_DISPLAY_CHICKEN1, D_ALL); - MMIO_D(ILK_DISPLAY_CHICKEN2, D_ALL); - MMIO_D(ILK_DSPCLK_GATE_D, D_ALL); - - MMIO_D(SOUTH_CHICKEN1, D_ALL); MMIO_DH(SOUTH_CHICKEN2, D_ALL, NULL, south_chicken2_mmio_write); - MMIO_D(_MMIO(_TRANSA_CHICKEN1), D_ALL); - MMIO_D(_MMIO(_TRANSB_CHICKEN1), D_ALL); - MMIO_D(SOUTH_DSPCLK_GATE_D, D_ALL); - MMIO_D(_MMIO(_TRANSA_CHICKEN2), D_ALL); - MMIO_D(_MMIO(_TRANSB_CHICKEN2), D_ALL); - - MMIO_D(ILK_DPFC_CB_BASE(INTEL_FBC_A), D_ALL); - MMIO_D(ILK_DPFC_CONTROL(INTEL_FBC_A), D_ALL); - MMIO_D(ILK_DPFC_RECOMP_CTL(INTEL_FBC_A), D_ALL); - MMIO_D(ILK_DPFC_STATUS(INTEL_FBC_A), D_ALL); - MMIO_D(ILK_DPFC_FENCE_YOFF(INTEL_FBC_A), D_ALL); - MMIO_D(ILK_DPFC_CHICKEN(INTEL_FBC_A), D_ALL); - MMIO_D(ILK_FBC_RT_BASE, D_ALL); - - MMIO_D(IPS_CTL, D_ALL); - - MMIO_D(PIPE_CSC_COEFF_RY_GY(PIPE_A), D_ALL); - MMIO_D(PIPE_CSC_COEFF_BY(PIPE_A), D_ALL); - MMIO_D(PIPE_CSC_COEFF_RU_GU(PIPE_A), D_ALL); - MMIO_D(PIPE_CSC_COEFF_BU(PIPE_A), D_ALL); - MMIO_D(PIPE_CSC_COEFF_RV_GV(PIPE_A), D_ALL); - MMIO_D(PIPE_CSC_COEFF_BV(PIPE_A), D_ALL); - MMIO_D(PIPE_CSC_MODE(PIPE_A), D_ALL); - MMIO_D(PIPE_CSC_PREOFF_HI(PIPE_A), D_ALL); - MMIO_D(PIPE_CSC_PREOFF_ME(PIPE_A), D_ALL); - MMIO_D(PIPE_CSC_PREOFF_LO(PIPE_A), D_ALL); - MMIO_D(PIPE_CSC_POSTOFF_HI(PIPE_A), D_ALL); - MMIO_D(PIPE_CSC_POSTOFF_ME(PIPE_A), D_ALL); - MMIO_D(PIPE_CSC_POSTOFF_LO(PIPE_A), D_ALL); - - MMIO_D(PIPE_CSC_COEFF_RY_GY(PIPE_B), D_ALL); - MMIO_D(PIPE_CSC_COEFF_BY(PIPE_B), D_ALL); - MMIO_D(PIPE_CSC_COEFF_RU_GU(PIPE_B), D_ALL); - MMIO_D(PIPE_CSC_COEFF_BU(PIPE_B), D_ALL); - MMIO_D(PIPE_CSC_COEFF_RV_GV(PIPE_B), D_ALL); - MMIO_D(PIPE_CSC_COEFF_BV(PIPE_B), D_ALL); - MMIO_D(PIPE_CSC_MODE(PIPE_B), D_ALL); - MMIO_D(PIPE_CSC_PREOFF_HI(PIPE_B), D_ALL); - MMIO_D(PIPE_CSC_PREOFF_ME(PIPE_B), D_ALL); - MMIO_D(PIPE_CSC_PREOFF_LO(PIPE_B), D_ALL); - MMIO_D(PIPE_CSC_POSTOFF_HI(PIPE_B), D_ALL); - MMIO_D(PIPE_CSC_POSTOFF_ME(PIPE_B), D_ALL); - MMIO_D(PIPE_CSC_POSTOFF_LO(PIPE_B), D_ALL); - - MMIO_D(PIPE_CSC_COEFF_RY_GY(PIPE_C), D_ALL); - MMIO_D(PIPE_CSC_COEFF_BY(PIPE_C), D_ALL); - MMIO_D(PIPE_CSC_COEFF_RU_GU(PIPE_C), D_ALL); - MMIO_D(PIPE_CSC_COEFF_BU(PIPE_C), D_ALL); - MMIO_D(PIPE_CSC_COEFF_RV_GV(PIPE_C), D_ALL); - MMIO_D(PIPE_CSC_COEFF_BV(PIPE_C), D_ALL); - MMIO_D(PIPE_CSC_MODE(PIPE_C), D_ALL); - MMIO_D(PIPE_CSC_PREOFF_HI(PIPE_C), D_ALL); - MMIO_D(PIPE_CSC_PREOFF_ME(PIPE_C), D_ALL); - MMIO_D(PIPE_CSC_PREOFF_LO(PIPE_C), D_ALL); - MMIO_D(PIPE_CSC_POSTOFF_HI(PIPE_C), D_ALL); - MMIO_D(PIPE_CSC_POSTOFF_ME(PIPE_C), D_ALL); - MMIO_D(PIPE_CSC_POSTOFF_LO(PIPE_C), D_ALL); - - MMIO_D(PREC_PAL_INDEX(PIPE_A), D_ALL); - MMIO_D(PREC_PAL_DATA(PIPE_A), D_ALL); - MMIO_F(PREC_PAL_GC_MAX(PIPE_A, 0), 4 * 3, 0, 0, 0, D_ALL, NULL, NULL); - - MMIO_D(PREC_PAL_INDEX(PIPE_B), D_ALL); - MMIO_D(PREC_PAL_DATA(PIPE_B), D_ALL); - MMIO_F(PREC_PAL_GC_MAX(PIPE_B, 0), 4 * 3, 0, 0, 0, D_ALL, NULL, NULL); - - MMIO_D(PREC_PAL_INDEX(PIPE_C), D_ALL); - MMIO_D(PREC_PAL_DATA(PIPE_C), D_ALL); - MMIO_F(PREC_PAL_GC_MAX(PIPE_C, 0), 4 * 3, 0, 0, 0, D_ALL, NULL, NULL); - - MMIO_D(_MMIO(0x60110), D_ALL); - MMIO_D(_MMIO(0x61110), D_ALL); - MMIO_F(_MMIO(0x70400), 0x40, 0, 0, 0, D_ALL, NULL, NULL); - MMIO_F(_MMIO(0x71400), 0x40, 0, 0, 0, D_ALL, NULL, NULL); - MMIO_F(_MMIO(0x72400), 0x40, 0, 0, 0, D_ALL, NULL, NULL); - MMIO_F(_MMIO(0x70440), 0xc, 0, 0, 0, D_PRE_SKL, NULL, NULL); - MMIO_F(_MMIO(0x71440), 0xc, 0, 0, 0, D_PRE_SKL, NULL, NULL); - MMIO_F(_MMIO(0x72440), 0xc, 0, 0, 0, D_PRE_SKL, NULL, NULL); - MMIO_F(_MMIO(0x7044c), 0xc, 0, 0, 0, D_PRE_SKL, NULL, NULL); - MMIO_F(_MMIO(0x7144c), 0xc, 0, 0, 0, D_PRE_SKL, NULL, NULL); - MMIO_F(_MMIO(0x7244c), 0xc, 0, 0, 0, D_PRE_SKL, NULL, NULL); - - MMIO_D(WM_LINETIME(PIPE_A), D_ALL); - MMIO_D(WM_LINETIME(PIPE_B), D_ALL); - MMIO_D(WM_LINETIME(PIPE_C), D_ALL); - MMIO_D(SPLL_CTL, D_ALL); - MMIO_D(_MMIO(_WRPLL_CTL1), D_ALL); - MMIO_D(_MMIO(_WRPLL_CTL2), D_ALL); - MMIO_D(PORT_CLK_SEL(PORT_A), D_ALL); - MMIO_D(PORT_CLK_SEL(PORT_B), D_ALL); - MMIO_D(PORT_CLK_SEL(PORT_C), D_ALL); - MMIO_D(PORT_CLK_SEL(PORT_D), D_ALL); - MMIO_D(PORT_CLK_SEL(PORT_E), D_ALL); - MMIO_D(TRANS_CLK_SEL(TRANSCODER_A), D_ALL); - MMIO_D(TRANS_CLK_SEL(TRANSCODER_B), D_ALL); - MMIO_D(TRANS_CLK_SEL(TRANSCODER_C), D_ALL); - - MMIO_D(HSW_NDE_RSTWRN_OPT, D_ALL); - MMIO_D(_MMIO(0x46508), D_ALL); - - MMIO_D(_MMIO(0x49080), D_ALL); - MMIO_D(_MMIO(0x49180), D_ALL); - MMIO_D(_MMIO(0x49280), D_ALL); - - MMIO_F(_MMIO(0x49090), 0x14, 0, 0, 0, D_ALL, NULL, NULL); - MMIO_F(_MMIO(0x49190), 0x14, 0, 0, 0, D_ALL, NULL, NULL); - MMIO_F(_MMIO(0x49290), 0x14, 0, 0, 0, D_ALL, NULL, NULL); - - MMIO_D(GAMMA_MODE(PIPE_A), D_ALL); - MMIO_D(GAMMA_MODE(PIPE_B), D_ALL); - MMIO_D(GAMMA_MODE(PIPE_C), D_ALL); - - MMIO_D(PIPE_MULT(PIPE_A), D_ALL); - MMIO_D(PIPE_MULT(PIPE_B), D_ALL); - MMIO_D(PIPE_MULT(PIPE_C), D_ALL); - - MMIO_D(HSW_TVIDEO_DIP_CTL(TRANSCODER_A), D_ALL); - MMIO_D(HSW_TVIDEO_DIP_CTL(TRANSCODER_B), D_ALL); - MMIO_D(HSW_TVIDEO_DIP_CTL(TRANSCODER_C), D_ALL); - MMIO_DH(SFUSE_STRAP, D_ALL, NULL, NULL); - MMIO_D(SBI_ADDR, D_ALL); MMIO_DH(SBI_DATA, D_ALL, sbi_data_mmio_read, NULL); MMIO_DH(SBI_CTL_STAT, D_ALL, NULL, sbi_ctl_mmio_write); - MMIO_D(PIXCLK_GATE, D_ALL); MMIO_F(_MMIO(_DPA_AUX_CH_CTL), 6 * 4, 0, 0, 0, D_ALL, NULL, dp_aux_ch_ctl_mmio_write); @@ -2793,65 +2345,18 @@ static int init_generic_mmio_info(struct intel_gvt *gvt) MMIO_DH(DP_TP_STATUS(PORT_D), D_ALL, NULL, dp_tp_status_mmio_write); MMIO_DH(DP_TP_STATUS(PORT_E), D_ALL, NULL, NULL); - MMIO_F(_MMIO(_DDI_BUF_TRANS_A), 0x50, 0, 0, 0, D_ALL, NULL, NULL); - MMIO_F(_MMIO(0x64e60), 0x50, 0, 0, 0, D_ALL, NULL, NULL); - MMIO_F(_MMIO(0x64eC0), 0x50, 0, 0, 0, D_ALL, NULL, NULL); - MMIO_F(_MMIO(0x64f20), 0x50, 0, 0, 0, D_ALL, NULL, NULL); - MMIO_F(_MMIO(0x64f80), 0x50, 0, 0, 0, D_ALL, NULL, NULL); - - MMIO_D(HSW_AUD_CFG(PIPE_A), D_ALL); - MMIO_D(HSW_AUD_PIN_ELD_CP_VLD, D_ALL); - MMIO_D(HSW_AUD_MISC_CTRL(PIPE_A), D_ALL); - MMIO_DH(_MMIO(_TRANS_DDI_FUNC_CTL_A), D_ALL, NULL, NULL); MMIO_DH(_MMIO(_TRANS_DDI_FUNC_CTL_B), D_ALL, NULL, NULL); MMIO_DH(_MMIO(_TRANS_DDI_FUNC_CTL_C), D_ALL, NULL, NULL); MMIO_DH(_MMIO(_TRANS_DDI_FUNC_CTL_EDP), D_ALL, NULL, NULL); - MMIO_D(_MMIO(_TRANSA_MSA_MISC), D_ALL); - MMIO_D(_MMIO(_TRANSB_MSA_MISC), D_ALL); - MMIO_D(_MMIO(_TRANSC_MSA_MISC), D_ALL); - MMIO_D(_MMIO(_TRANS_EDP_MSA_MISC), D_ALL); - MMIO_DH(FORCEWAKE, D_ALL, NULL, NULL); - MMIO_D(FORCEWAKE_ACK, D_ALL); - MMIO_D(GEN6_GT_CORE_STATUS, D_ALL); - MMIO_D(GEN6_GT_THREAD_STATUS_REG, D_ALL); MMIO_DFH(GTFIFODBG, D_ALL, F_CMD_ACCESS, NULL, NULL); MMIO_DFH(GTFIFOCTL, D_ALL, F_CMD_ACCESS, NULL, NULL); MMIO_DH(FORCEWAKE_MT, D_PRE_SKL, NULL, mul_force_wake_write); MMIO_DH(FORCEWAKE_ACK_HSW, D_BDW, NULL, NULL); - MMIO_D(ECOBUS, D_ALL); MMIO_DH(GEN6_RC_CONTROL, D_ALL, NULL, NULL); MMIO_DH(GEN6_RC_STATE, D_ALL, NULL, NULL); - MMIO_D(GEN6_RPNSWREQ, D_ALL); - MMIO_D(GEN6_RC_VIDEO_FREQ, D_ALL); - MMIO_D(GEN6_RP_DOWN_TIMEOUT, D_ALL); - MMIO_D(GEN6_RP_INTERRUPT_LIMITS, D_ALL); - MMIO_D(GEN6_RPSTAT1, D_ALL); - MMIO_D(GEN6_RP_CONTROL, D_ALL); - MMIO_D(GEN6_RP_UP_THRESHOLD, D_ALL); - MMIO_D(GEN6_RP_DOWN_THRESHOLD, D_ALL); - MMIO_D(GEN6_RP_CUR_UP_EI, D_ALL); - MMIO_D(GEN6_RP_CUR_UP, D_ALL); - MMIO_D(GEN6_RP_PREV_UP, D_ALL); - MMIO_D(GEN6_RP_CUR_DOWN_EI, D_ALL); - MMIO_D(GEN6_RP_CUR_DOWN, D_ALL); - MMIO_D(GEN6_RP_PREV_DOWN, D_ALL); - MMIO_D(GEN6_RP_UP_EI, D_ALL); - MMIO_D(GEN6_RP_DOWN_EI, D_ALL); - MMIO_D(GEN6_RP_IDLE_HYSTERSIS, D_ALL); - MMIO_D(GEN6_RC1_WAKE_RATE_LIMIT, D_ALL); - MMIO_D(GEN6_RC6_WAKE_RATE_LIMIT, D_ALL); - MMIO_D(GEN6_RC6pp_WAKE_RATE_LIMIT, D_ALL); - MMIO_D(GEN6_RC_EVALUATION_INTERVAL, D_ALL); - MMIO_D(GEN6_RC_IDLE_HYSTERSIS, D_ALL); - MMIO_D(GEN6_RC_SLEEP, D_ALL); - MMIO_D(GEN6_RC1e_THRESHOLD, D_ALL); - MMIO_D(GEN6_RC6_THRESHOLD, D_ALL); - MMIO_D(GEN6_RC6p_THRESHOLD, D_ALL); - MMIO_D(GEN6_RC6pp_THRESHOLD, D_ALL); - MMIO_D(GEN6_PMINTRMSK, D_ALL); MMIO_DH(HSW_PWR_WELL_CTL1, D_BDW, NULL, power_well_ctl_mmio_write); MMIO_DH(HSW_PWR_WELL_CTL2, D_BDW, NULL, power_well_ctl_mmio_write); MMIO_DH(HSW_PWR_WELL_CTL3, D_BDW, NULL, power_well_ctl_mmio_write); @@ -2859,97 +2364,17 @@ static int init_generic_mmio_info(struct intel_gvt *gvt) MMIO_DH(HSW_PWR_WELL_CTL5, D_BDW, NULL, power_well_ctl_mmio_write); MMIO_DH(HSW_PWR_WELL_CTL6, D_BDW, NULL, power_well_ctl_mmio_write); - MMIO_D(RSTDBYCTL, D_ALL); - MMIO_DH(GEN6_GDRST, D_ALL, NULL, gdrst_mmio_write); MMIO_F(FENCE_REG_GEN6_LO(0), 0x80, 0, 0, 0, D_ALL, fence_mmio_read, fence_mmio_write); MMIO_DH(CPU_VGACNTRL, D_ALL, NULL, vga_control_mmio_write); - MMIO_D(TILECTL, D_ALL); - - MMIO_D(GEN6_UCGCTL1, D_ALL); - MMIO_D(GEN6_UCGCTL2, D_ALL); - - MMIO_F(_MMIO(0x4f000), 0x90, 0, 0, 0, D_ALL, NULL, NULL); - - MMIO_D(GEN6_PCODE_DATA, D_ALL); - MMIO_D(_MMIO(0x13812c), D_ALL); MMIO_DH(GEN7_ERR_INT, D_ALL, NULL, NULL); - MMIO_D(HSW_EDRAM_CAP, D_ALL); - MMIO_D(HSW_IDICR, D_ALL); MMIO_DH(GFX_FLSH_CNTL_GEN6, D_ALL, NULL, NULL); - MMIO_D(_MMIO(0x3c), D_ALL); - MMIO_D(_MMIO(0x860), D_ALL); - MMIO_D(ECOSKPD(RENDER_RING_BASE), D_ALL); - MMIO_D(_MMIO(0x121d0), D_ALL); - MMIO_D(ECOSKPD(BLT_RING_BASE), D_ALL); - MMIO_D(_MMIO(0x41d0), D_ALL); - MMIO_D(GAC_ECO_BITS, D_ALL); - MMIO_D(_MMIO(0x6200), D_ALL); - MMIO_D(_MMIO(0x6204), D_ALL); - MMIO_D(_MMIO(0x6208), D_ALL); - MMIO_D(_MMIO(0x7118), D_ALL); - MMIO_D(_MMIO(0x7180), D_ALL); - MMIO_D(_MMIO(0x7408), D_ALL); - MMIO_D(_MMIO(0x7c00), D_ALL); MMIO_DH(GEN6_MBCTL, D_ALL, NULL, mbctl_write); - MMIO_D(_MMIO(0x911c), D_ALL); - MMIO_D(_MMIO(0x9120), D_ALL); MMIO_DFH(GEN7_UCGCTL4, D_ALL, F_CMD_ACCESS, NULL, NULL); - MMIO_D(GAB_CTL, D_ALL); - MMIO_D(_MMIO(0x48800), D_ALL); - MMIO_D(_MMIO(0xce044), D_ALL); - MMIO_D(_MMIO(0xe6500), D_ALL); - MMIO_D(_MMIO(0xe6504), D_ALL); - MMIO_D(_MMIO(0xe6600), D_ALL); - MMIO_D(_MMIO(0xe6604), D_ALL); - MMIO_D(_MMIO(0xe6700), D_ALL); - MMIO_D(_MMIO(0xe6704), D_ALL); - MMIO_D(_MMIO(0xe6800), D_ALL); - MMIO_D(_MMIO(0xe6804), D_ALL); - MMIO_D(PCH_GMBUS4, D_ALL); - MMIO_D(PCH_GMBUS5, D_ALL); - - MMIO_D(_MMIO(0x902c), D_ALL); - MMIO_D(_MMIO(0xec008), D_ALL); - MMIO_D(_MMIO(0xec00c), D_ALL); - MMIO_D(_MMIO(0xec008 + 0x18), D_ALL); - MMIO_D(_MMIO(0xec00c + 0x18), D_ALL); - MMIO_D(_MMIO(0xec008 + 0x18 * 2), D_ALL); - MMIO_D(_MMIO(0xec00c + 0x18 * 2), D_ALL); - MMIO_D(_MMIO(0xec008 + 0x18 * 3), D_ALL); - MMIO_D(_MMIO(0xec00c + 0x18 * 3), D_ALL); - MMIO_D(_MMIO(0xec408), D_ALL); - MMIO_D(_MMIO(0xec40c), D_ALL); - MMIO_D(_MMIO(0xec408 + 0x18), D_ALL); - MMIO_D(_MMIO(0xec40c + 0x18), D_ALL); - MMIO_D(_MMIO(0xec408 + 0x18 * 2), D_ALL); - MMIO_D(_MMIO(0xec40c + 0x18 * 2), D_ALL); - MMIO_D(_MMIO(0xec408 + 0x18 * 3), D_ALL); - MMIO_D(_MMIO(0xec40c + 0x18 * 3), D_ALL); - MMIO_D(_MMIO(0xfc810), D_ALL); - MMIO_D(_MMIO(0xfc81c), D_ALL); - MMIO_D(_MMIO(0xfc828), D_ALL); - MMIO_D(_MMIO(0xfc834), D_ALL); - MMIO_D(_MMIO(0xfcc00), D_ALL); - MMIO_D(_MMIO(0xfcc0c), D_ALL); - MMIO_D(_MMIO(0xfcc18), D_ALL); - MMIO_D(_MMIO(0xfcc24), D_ALL); - MMIO_D(_MMIO(0xfd000), D_ALL); - MMIO_D(_MMIO(0xfd00c), D_ALL); - MMIO_D(_MMIO(0xfd018), D_ALL); - MMIO_D(_MMIO(0xfd024), D_ALL); - MMIO_D(_MMIO(0xfd034), D_ALL); - MMIO_DH(FPGA_DBG, D_ALL, NULL, fpga_dbg_mmio_write); - MMIO_D(_MMIO(0x2054), D_ALL); - MMIO_D(_MMIO(0x12054), D_ALL); - MMIO_D(_MMIO(0x22054), D_ALL); - MMIO_D(_MMIO(0x1a054), D_ALL); - - MMIO_D(_MMIO(0x44070), D_ALL); MMIO_DFH(_MMIO(0x215c), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL); MMIO_DFH(_MMIO(0x2178), D_ALL, F_CMD_ACCESS, NULL, NULL); MMIO_DFH(_MMIO(0x217c), D_ALL, F_CMD_ACCESS, NULL, NULL); @@ -2957,8 +2382,6 @@ static int init_generic_mmio_info(struct intel_gvt *gvt) MMIO_DFH(_MMIO(0x1217c), D_ALL, F_CMD_ACCESS, NULL, NULL); MMIO_F(_MMIO(0x2290), 8, F_CMD_ACCESS, 0, 0, D_BDW_PLUS, NULL, NULL); - MMIO_D(_MMIO(0x2b00), D_BDW_PLUS); - MMIO_D(_MMIO(0x2360), D_BDW_PLUS); MMIO_F(_MMIO(0x5200), 32, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL); MMIO_F(_MMIO(0x5240), 32, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL); MMIO_F(_MMIO(0x5280), 16, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL); @@ -3006,28 +2429,23 @@ static int init_generic_mmio_info(struct intel_gvt *gvt) static int init_bdw_mmio_info(struct intel_gvt *gvt) { - struct drm_i915_private *dev_priv = gvt->gt->i915; int ret; MMIO_DH(GEN8_GT_IMR(0), D_BDW_PLUS, NULL, intel_vgpu_reg_imr_handler); MMIO_DH(GEN8_GT_IER(0), D_BDW_PLUS, NULL, intel_vgpu_reg_ier_handler); MMIO_DH(GEN8_GT_IIR(0), D_BDW_PLUS, NULL, intel_vgpu_reg_iir_handler); - MMIO_D(GEN8_GT_ISR(0), D_BDW_PLUS); MMIO_DH(GEN8_GT_IMR(1), D_BDW_PLUS, NULL, intel_vgpu_reg_imr_handler); MMIO_DH(GEN8_GT_IER(1), D_BDW_PLUS, NULL, intel_vgpu_reg_ier_handler); MMIO_DH(GEN8_GT_IIR(1), D_BDW_PLUS, NULL, intel_vgpu_reg_iir_handler); - MMIO_D(GEN8_GT_ISR(1), D_BDW_PLUS); MMIO_DH(GEN8_GT_IMR(2), D_BDW_PLUS, NULL, intel_vgpu_reg_imr_handler); MMIO_DH(GEN8_GT_IER(2), D_BDW_PLUS, NULL, intel_vgpu_reg_ier_handler); MMIO_DH(GEN8_GT_IIR(2), D_BDW_PLUS, NULL, intel_vgpu_reg_iir_handler); - MMIO_D(GEN8_GT_ISR(2), D_BDW_PLUS); MMIO_DH(GEN8_GT_IMR(3), D_BDW_PLUS, NULL, intel_vgpu_reg_imr_handler); MMIO_DH(GEN8_GT_IER(3), D_BDW_PLUS, NULL, intel_vgpu_reg_ier_handler); MMIO_DH(GEN8_GT_IIR(3), D_BDW_PLUS, NULL, intel_vgpu_reg_iir_handler); - MMIO_D(GEN8_GT_ISR(3), D_BDW_PLUS); MMIO_DH(GEN8_DE_PIPE_IMR(PIPE_A), D_BDW_PLUS, NULL, intel_vgpu_reg_imr_handler); @@ -3035,7 +2453,6 @@ static int init_bdw_mmio_info(struct intel_gvt *gvt) intel_vgpu_reg_ier_handler); MMIO_DH(GEN8_DE_PIPE_IIR(PIPE_A), D_BDW_PLUS, NULL, intel_vgpu_reg_iir_handler); - MMIO_D(GEN8_DE_PIPE_ISR(PIPE_A), D_BDW_PLUS); MMIO_DH(GEN8_DE_PIPE_IMR(PIPE_B), D_BDW_PLUS, NULL, intel_vgpu_reg_imr_handler); @@ -3043,7 +2460,6 @@ static int init_bdw_mmio_info(struct intel_gvt *gvt) intel_vgpu_reg_ier_handler); MMIO_DH(GEN8_DE_PIPE_IIR(PIPE_B), D_BDW_PLUS, NULL, intel_vgpu_reg_iir_handler); - MMIO_D(GEN8_DE_PIPE_ISR(PIPE_B), D_BDW_PLUS); MMIO_DH(GEN8_DE_PIPE_IMR(PIPE_C), D_BDW_PLUS, NULL, intel_vgpu_reg_imr_handler); @@ -3051,22 +2467,18 @@ static int init_bdw_mmio_info(struct intel_gvt *gvt) intel_vgpu_reg_ier_handler); MMIO_DH(GEN8_DE_PIPE_IIR(PIPE_C), D_BDW_PLUS, NULL, intel_vgpu_reg_iir_handler); - MMIO_D(GEN8_DE_PIPE_ISR(PIPE_C), D_BDW_PLUS); MMIO_DH(GEN8_DE_PORT_IMR, D_BDW_PLUS, NULL, intel_vgpu_reg_imr_handler); MMIO_DH(GEN8_DE_PORT_IER, D_BDW_PLUS, NULL, intel_vgpu_reg_ier_handler); MMIO_DH(GEN8_DE_PORT_IIR, D_BDW_PLUS, NULL, intel_vgpu_reg_iir_handler); - MMIO_D(GEN8_DE_PORT_ISR, D_BDW_PLUS); MMIO_DH(GEN8_DE_MISC_IMR, D_BDW_PLUS, NULL, intel_vgpu_reg_imr_handler); MMIO_DH(GEN8_DE_MISC_IER, D_BDW_PLUS, NULL, intel_vgpu_reg_ier_handler); MMIO_DH(GEN8_DE_MISC_IIR, D_BDW_PLUS, NULL, intel_vgpu_reg_iir_handler); - MMIO_D(GEN8_DE_MISC_ISR, D_BDW_PLUS); MMIO_DH(GEN8_PCU_IMR, D_BDW_PLUS, NULL, intel_vgpu_reg_imr_handler); MMIO_DH(GEN8_PCU_IER, D_BDW_PLUS, NULL, intel_vgpu_reg_ier_handler); MMIO_DH(GEN8_PCU_IIR, D_BDW_PLUS, NULL, intel_vgpu_reg_iir_handler); - MMIO_D(GEN8_PCU_ISR, D_BDW_PLUS); MMIO_DH(GEN8_MASTER_IRQ, D_BDW_PLUS, NULL, intel_vgpu_reg_master_irq_handler); @@ -3101,21 +2513,8 @@ static int init_bdw_mmio_info(struct intel_gvt *gvt) MMIO_RING_DFH(RING_REG, D_BDW_PLUS, F_MODE_MASK, NULL, NULL); #undef RING_REG - MMIO_D(PIPEMISC(PIPE_A), D_BDW_PLUS); - MMIO_D(PIPEMISC(PIPE_B), D_BDW_PLUS); - MMIO_D(PIPEMISC(PIPE_C), D_BDW_PLUS); - MMIO_D(_MMIO(0x1c1d0), D_BDW_PLUS); - MMIO_D(GEN6_MBCUNIT_SNPCR, D_BDW_PLUS); - MMIO_D(GEN7_MISCCPCTL, D_BDW_PLUS); - MMIO_D(_MMIO(0x1c054), D_BDW_PLUS); - MMIO_DH(GEN6_PCODE_MAILBOX, D_BDW_PLUS, NULL, mailbox_write); - MMIO_D(GEN8_PRIVATE_PAT_LO, D_BDW_PLUS & ~D_BXT); - MMIO_D(GEN8_PRIVATE_PAT_HI, D_BDW_PLUS); - - MMIO_D(GAMTARBMODE, D_BDW_PLUS); - #define RING_REG(base) _MMIO((base) + 0x270) MMIO_RING_F(RING_REG, 32, F_CMD_ACCESS, 0, 0, D_BDW_PLUS, NULL, NULL); #undef RING_REG @@ -3124,24 +2523,6 @@ static int init_bdw_mmio_info(struct intel_gvt *gvt) MMIO_DFH(HDC_CHICKEN0, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL); - MMIO_D(CHICKEN_PIPESL_1(PIPE_A), D_BDW_PLUS); - MMIO_D(CHICKEN_PIPESL_1(PIPE_B), D_BDW_PLUS); - MMIO_D(CHICKEN_PIPESL_1(PIPE_C), D_BDW_PLUS); - - MMIO_D(WM_MISC, D_BDW); - MMIO_D(_MMIO(_SRD_CTL_EDP), D_BDW); - - MMIO_D(_MMIO(0x6671c), D_BDW_PLUS); - MMIO_D(_MMIO(0x66c00), D_BDW_PLUS); - MMIO_D(_MMIO(0x66c04), D_BDW_PLUS); - - MMIO_D(HSW_GTT_CACHE_EN, D_BDW_PLUS); - - MMIO_D(GEN8_EU_DISABLE0, D_BDW_PLUS); - MMIO_D(GEN8_EU_DISABLE1, D_BDW_PLUS); - MMIO_D(GEN8_EU_DISABLE2, D_BDW_PLUS); - - MMIO_D(_MMIO(0xfdc), D_BDW_PLUS); MMIO_DFH(GEN8_ROW_CHICKEN, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL); MMIO_DFH(GEN7_ROW_CHICKEN2, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, @@ -3153,27 +2534,14 @@ static int init_bdw_mmio_info(struct intel_gvt *gvt) MMIO_DFH(GEN8_L3SQCREG4, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL); MMIO_DFH(_MMIO(0xb100), D_BDW, F_CMD_ACCESS, NULL, NULL); MMIO_DFH(_MMIO(0xb10c), D_BDW, F_CMD_ACCESS, NULL, NULL); - MMIO_D(_MMIO(0xb110), D_BDW); - MMIO_D(GEN9_SCRATCH_LNCF1, D_BDW_PLUS); MMIO_F(_MMIO(0x24d0), 48, F_CMD_ACCESS | F_CMD_WRITE_PATCH, 0, 0, D_BDW_PLUS, NULL, force_nonpriv_write); - MMIO_D(_MMIO(0x44484), D_BDW_PLUS); - MMIO_D(_MMIO(0x4448c), D_BDW_PLUS); - MMIO_DFH(_MMIO(0x83a4), D_BDW, F_CMD_ACCESS, NULL, NULL); - MMIO_D(GEN8_L3_LRA_1_GPGPU, D_BDW_PLUS); MMIO_DFH(_MMIO(0x8430), D_BDW, F_CMD_ACCESS, NULL, NULL); - MMIO_D(_MMIO(0x110000), D_BDW_PLUS); - - MMIO_D(_MMIO(0x48400), D_BDW_PLUS); - - MMIO_D(_MMIO(0x6e570), D_BDW_PLUS); - MMIO_D(_MMIO(0x65f10), D_BDW_PLUS); - MMIO_DFH(_MMIO(0xe194), D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL); MMIO_DFH(_MMIO(0xe188), D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL); MMIO_DFH(HALF_SLICE_CHICKEN2, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL); @@ -3213,30 +2581,15 @@ static int init_skl_mmio_info(struct intel_gvt *gvt) MMIO_F(DP_AUX_CH_CTL(AUX_CH_D), 6 * 4, 0, 0, 0, D_SKL_PLUS, NULL, dp_aux_ch_ctl_mmio_write); - MMIO_D(HSW_PWR_WELL_CTL1, D_SKL_PLUS); MMIO_DH(HSW_PWR_WELL_CTL2, D_SKL_PLUS, NULL, skl_power_well_ctl_write); MMIO_DH(DBUF_CTL_S(0), D_SKL_PLUS, NULL, gen9_dbuf_ctl_mmio_write); - MMIO_D(GEN9_PG_ENABLE, D_SKL_PLUS); - MMIO_D(GEN9_MEDIA_PG_IDLE_HYSTERESIS, D_SKL_PLUS); - MMIO_D(GEN9_RENDER_PG_IDLE_HYSTERESIS, D_SKL_PLUS); MMIO_DFH(GEN9_GAMT_ECO_REG_RW_IA, D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL); MMIO_DFH(MMCD_MISC_CTRL, D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL); MMIO_DH(CHICKEN_PAR1_1, D_SKL_PLUS, NULL, NULL); - MMIO_D(DC_STATE_EN, D_SKL_PLUS); - MMIO_D(DC_STATE_DEBUG, D_SKL_PLUS); - MMIO_D(CDCLK_CTL, D_SKL_PLUS); MMIO_DH(LCPLL1_CTL, D_SKL_PLUS, NULL, skl_lcpll_write); MMIO_DH(LCPLL2_CTL, D_SKL_PLUS, NULL, skl_lcpll_write); - MMIO_D(_MMIO(_DPLL1_CFGCR1), D_SKL_PLUS); - MMIO_D(_MMIO(_DPLL2_CFGCR1), D_SKL_PLUS); - MMIO_D(_MMIO(_DPLL3_CFGCR1), D_SKL_PLUS); - MMIO_D(_MMIO(_DPLL1_CFGCR2), D_SKL_PLUS); - MMIO_D(_MMIO(_DPLL2_CFGCR2), D_SKL_PLUS); - MMIO_D(_MMIO(_DPLL3_CFGCR2), D_SKL_PLUS); - MMIO_D(DPLL_CTRL1, D_SKL_PLUS); - MMIO_D(DPLL_CTRL2, D_SKL_PLUS); MMIO_DH(DPLL_STATUS, D_SKL_PLUS, dpll_status_read, NULL); MMIO_DH(SKL_PS_WIN_POS(PIPE_A, 0), D_SKL_PLUS, NULL, pf_write); @@ -3279,22 +2632,6 @@ static int init_skl_mmio_info(struct intel_gvt *gvt) MMIO_DH(CUR_BUF_CFG(PIPE_B), D_SKL_PLUS, NULL, NULL); MMIO_DH(CUR_BUF_CFG(PIPE_C), D_SKL_PLUS, NULL, NULL); - MMIO_F(PLANE_WM(PIPE_A, 0, 0), 4 * 8, 0, 0, 0, D_SKL_PLUS, NULL, NULL); - MMIO_F(PLANE_WM(PIPE_A, 1, 0), 4 * 8, 0, 0, 0, D_SKL_PLUS, NULL, NULL); - MMIO_F(PLANE_WM(PIPE_A, 2, 0), 4 * 8, 0, 0, 0, D_SKL_PLUS, NULL, NULL); - - MMIO_F(PLANE_WM(PIPE_B, 0, 0), 4 * 8, 0, 0, 0, D_SKL_PLUS, NULL, NULL); - MMIO_F(PLANE_WM(PIPE_B, 1, 0), 4 * 8, 0, 0, 0, D_SKL_PLUS, NULL, NULL); - MMIO_F(PLANE_WM(PIPE_B, 2, 0), 4 * 8, 0, 0, 0, D_SKL_PLUS, NULL, NULL); - - MMIO_F(PLANE_WM(PIPE_C, 0, 0), 4 * 8, 0, 0, 0, D_SKL_PLUS, NULL, NULL); - MMIO_F(PLANE_WM(PIPE_C, 1, 0), 4 * 8, 0, 0, 0, D_SKL_PLUS, NULL, NULL); - MMIO_F(PLANE_WM(PIPE_C, 2, 0), 4 * 8, 0, 0, 0, D_SKL_PLUS, NULL, NULL); - - MMIO_F(CUR_WM(PIPE_A, 0), 4 * 8, 0, 0, 0, D_SKL_PLUS, NULL, NULL); - MMIO_F(CUR_WM(PIPE_B, 0), 4 * 8, 0, 0, 0, D_SKL_PLUS, NULL, NULL); - MMIO_F(CUR_WM(PIPE_C, 0), 4 * 8, 0, 0, 0, D_SKL_PLUS, NULL, NULL); - MMIO_DH(PLANE_WM_TRANS(PIPE_A, 0), D_SKL_PLUS, NULL, NULL); MMIO_DH(PLANE_WM_TRANS(PIPE_A, 1), D_SKL_PLUS, NULL, NULL); MMIO_DH(PLANE_WM_TRANS(PIPE_A, 2), D_SKL_PLUS, NULL, NULL); @@ -3356,30 +2693,13 @@ static int init_skl_mmio_info(struct intel_gvt *gvt) MMIO_DH(_MMIO(_REG_701C4(PIPE_C, 3)), D_SKL_PLUS, NULL, NULL); MMIO_DH(_MMIO(_REG_701C4(PIPE_C, 4)), D_SKL_PLUS, NULL, NULL); - MMIO_D(_MMIO(_PLANE_CTL_3_A), D_SKL_PLUS); - MMIO_D(_MMIO(_PLANE_CTL_3_B), D_SKL_PLUS); - MMIO_D(_MMIO(0x72380), D_SKL_PLUS); - MMIO_D(_MMIO(0x7239c), D_SKL_PLUS); - MMIO_D(_MMIO(_PLANE_SURF_3_A), D_SKL_PLUS); - MMIO_D(_MMIO(_PLANE_SURF_3_B), D_SKL_PLUS); - - MMIO_D(DMC_SSP_BASE, D_SKL_PLUS); - MMIO_D(DMC_HTP_SKL, D_SKL_PLUS); - MMIO_D(DMC_LAST_WRITE, D_SKL_PLUS); - MMIO_DFH(BDW_SCRATCH1, D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL); - MMIO_D(SKL_DFSM, D_SKL_PLUS); - MMIO_D(DISPIO_CR_TX_BMU_CR0, D_SKL_PLUS); - MMIO_F(GEN9_GFX_MOCS(0), 0x7f8, F_CMD_ACCESS, 0, 0, D_SKL_PLUS, NULL, NULL); MMIO_F(GEN7_L3CNTLREG2, 0x80, F_CMD_ACCESS, 0, 0, D_SKL_PLUS, NULL, NULL); - MMIO_D(RPM_CONFIG0, D_SKL_PLUS); - MMIO_D(_MMIO(0xd08), D_SKL_PLUS); - MMIO_D(RC6_LOCATION, D_SKL_PLUS); MMIO_DFH(GEN7_FF_SLICE_CS_CHICKEN1, D_SKL_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL); MMIO_DFH(GEN9_CS_DEBUG_MODE1, D_SKL_PLUS, F_MODE_MASK | F_CMD_ACCESS, @@ -3396,40 +2716,9 @@ static int init_skl_mmio_info(struct intel_gvt *gvt) MMIO_DFH(_MMIO(0x4dfc), D_SKL_PLUS, F_PM_SAVE, NULL, gen9_trtt_chicken_write); - MMIO_D(_MMIO(0x46430), D_SKL_PLUS); - - MMIO_D(_MMIO(0x46520), D_SKL_PLUS); - - MMIO_D(_MMIO(0xc403c), D_SKL_PLUS); MMIO_DFH(GEN8_GARBCNTL, D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL); MMIO_DH(DMA_CTRL, D_SKL_PLUS, NULL, dma_ctrl_write); - MMIO_D(_MMIO(0x65900), D_SKL_PLUS); - MMIO_D(GEN6_STOLEN_RESERVED, D_SKL_PLUS); - MMIO_D(_MMIO(0x4068), D_SKL_PLUS); - MMIO_D(_MMIO(0x67054), D_SKL_PLUS); - MMIO_D(_MMIO(0x6e560), D_SKL_PLUS); - MMIO_D(_MMIO(0x6e554), D_SKL_PLUS); - MMIO_D(_MMIO(0x2b20), D_SKL_PLUS); - MMIO_D(_MMIO(0x65f00), D_SKL_PLUS); - MMIO_D(_MMIO(0x65f08), D_SKL_PLUS); - MMIO_D(_MMIO(0x320f0), D_SKL_PLUS); - - MMIO_D(_MMIO(0x70034), D_SKL_PLUS); - MMIO_D(_MMIO(0x71034), D_SKL_PLUS); - MMIO_D(_MMIO(0x72034), D_SKL_PLUS); - - MMIO_D(_MMIO(_PLANE_KEYVAL_1(PIPE_A)), D_SKL_PLUS); - MMIO_D(_MMIO(_PLANE_KEYVAL_1(PIPE_B)), D_SKL_PLUS); - MMIO_D(_MMIO(_PLANE_KEYVAL_1(PIPE_C)), D_SKL_PLUS); - MMIO_D(_MMIO(_PLANE_KEYMAX_1(PIPE_A)), D_SKL_PLUS); - MMIO_D(_MMIO(_PLANE_KEYMAX_1(PIPE_B)), D_SKL_PLUS); - MMIO_D(_MMIO(_PLANE_KEYMAX_1(PIPE_C)), D_SKL_PLUS); - MMIO_D(_MMIO(_PLANE_KEYMSK_1(PIPE_A)), D_SKL_PLUS); - MMIO_D(_MMIO(_PLANE_KEYMSK_1(PIPE_B)), D_SKL_PLUS); - MMIO_D(_MMIO(_PLANE_KEYMSK_1(PIPE_C)), D_SKL_PLUS); - - MMIO_D(_MMIO(0x44500), D_SKL_PLUS); #define CSFE_CHICKEN1_REG(base) _MMIO((base) + 0xD4) MMIO_RING_DFH(CSFE_CHICKEN1_REG, D_SKL_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, csfe_chicken1_mmio_write); @@ -3440,7 +2729,6 @@ static int init_skl_mmio_info(struct intel_gvt *gvt) NULL, NULL); MMIO_DFH(GAMT_CHKN_BIT_REG, D_KBL | D_CFL, F_CMD_ACCESS, NULL, NULL); - MMIO_D(GEN9_CTX_PREEMPT_REG, D_SKL_PLUS & ~D_BXT); MMIO_DFH(_MMIO(0xe4cc), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL); return 0; @@ -3448,43 +2736,13 @@ static int init_skl_mmio_info(struct intel_gvt *gvt) static int init_bxt_mmio_info(struct intel_gvt *gvt) { - struct drm_i915_private *dev_priv = gvt->gt->i915; int ret; - MMIO_F(_MMIO(0x80000), 0x3000, 0, 0, 0, D_BXT, NULL, NULL); - - MMIO_D(GEN7_SAMPLER_INSTDONE, D_BXT); - MMIO_D(GEN7_ROW_INSTDONE, D_BXT); - MMIO_D(GEN8_FAULT_TLB_DATA0, D_BXT); - MMIO_D(GEN8_FAULT_TLB_DATA1, D_BXT); - MMIO_D(ERROR_GEN6, D_BXT); - MMIO_D(DONE_REG, D_BXT); - MMIO_D(EIR, D_BXT); - MMIO_D(PGTBL_ER, D_BXT); - MMIO_D(_MMIO(0x4194), D_BXT); - MMIO_D(_MMIO(0x4294), D_BXT); - MMIO_D(_MMIO(0x4494), D_BXT); - - MMIO_RING_D(RING_PSMI_CTL, D_BXT); - MMIO_RING_D(RING_DMA_FADD, D_BXT); - MMIO_RING_D(RING_DMA_FADD_UDW, D_BXT); - MMIO_RING_D(RING_IPEHR, D_BXT); - MMIO_RING_D(RING_INSTPS, D_BXT); - MMIO_RING_D(RING_BBADDR_UDW, D_BXT); - MMIO_RING_D(RING_BBSTATE, D_BXT); - MMIO_RING_D(RING_IPEIR, D_BXT); - - MMIO_F(SOFT_SCRATCH(0), 16 * 4, 0, 0, 0, D_BXT, NULL, NULL); - MMIO_DH(BXT_P_CR_GT_DISP_PWRON, D_BXT, NULL, bxt_gt_disp_pwron_write); - MMIO_D(BXT_RP_STATE_CAP, D_BXT); MMIO_DH(BXT_PHY_CTL_FAMILY(DPIO_PHY0), D_BXT, NULL, bxt_phy_ctl_family_write); MMIO_DH(BXT_PHY_CTL_FAMILY(DPIO_PHY1), D_BXT, NULL, bxt_phy_ctl_family_write); - MMIO_D(BXT_PHY_CTL(PORT_A), D_BXT); - MMIO_D(BXT_PHY_CTL(PORT_B), D_BXT); - MMIO_D(BXT_PHY_CTL(PORT_C), D_BXT); MMIO_DH(BXT_PORT_PLL_ENABLE(PORT_A), D_BXT, NULL, bxt_port_pll_enable_write); MMIO_DH(BXT_PORT_PLL_ENABLE(PORT_B), D_BXT, @@ -3492,128 +2750,19 @@ static int init_bxt_mmio_info(struct intel_gvt *gvt) MMIO_DH(BXT_PORT_PLL_ENABLE(PORT_C), D_BXT, NULL, bxt_port_pll_enable_write); - MMIO_D(BXT_PORT_CL1CM_DW0(DPIO_PHY0), D_BXT); - MMIO_D(BXT_PORT_CL1CM_DW9(DPIO_PHY0), D_BXT); - MMIO_D(BXT_PORT_CL1CM_DW10(DPIO_PHY0), D_BXT); - MMIO_D(BXT_PORT_CL1CM_DW28(DPIO_PHY0), D_BXT); - MMIO_D(BXT_PORT_CL1CM_DW30(DPIO_PHY0), D_BXT); - MMIO_D(BXT_PORT_CL2CM_DW6(DPIO_PHY0), D_BXT); - MMIO_D(BXT_PORT_REF_DW3(DPIO_PHY0), D_BXT); - MMIO_D(BXT_PORT_REF_DW6(DPIO_PHY0), D_BXT); - MMIO_D(BXT_PORT_REF_DW8(DPIO_PHY0), D_BXT); - - MMIO_D(BXT_PORT_CL1CM_DW0(DPIO_PHY1), D_BXT); - MMIO_D(BXT_PORT_CL1CM_DW9(DPIO_PHY1), D_BXT); - MMIO_D(BXT_PORT_CL1CM_DW10(DPIO_PHY1), D_BXT); - MMIO_D(BXT_PORT_CL1CM_DW28(DPIO_PHY1), D_BXT); - MMIO_D(BXT_PORT_CL1CM_DW30(DPIO_PHY1), D_BXT); - MMIO_D(BXT_PORT_CL2CM_DW6(DPIO_PHY1), D_BXT); - MMIO_D(BXT_PORT_REF_DW3(DPIO_PHY1), D_BXT); - MMIO_D(BXT_PORT_REF_DW6(DPIO_PHY1), D_BXT); - MMIO_D(BXT_PORT_REF_DW8(DPIO_PHY1), D_BXT); - - MMIO_D(BXT_PORT_PLL_EBB_0(DPIO_PHY0, DPIO_CH0), D_BXT); - MMIO_D(BXT_PORT_PLL_EBB_4(DPIO_PHY0, DPIO_CH0), D_BXT); - MMIO_D(BXT_PORT_PCS_DW10_LN01(DPIO_PHY0, DPIO_CH0), D_BXT); - MMIO_D(BXT_PORT_PCS_DW10_GRP(DPIO_PHY0, DPIO_CH0), D_BXT); - MMIO_D(BXT_PORT_PCS_DW12_LN01(DPIO_PHY0, DPIO_CH0), D_BXT); - MMIO_D(BXT_PORT_PCS_DW12_LN23(DPIO_PHY0, DPIO_CH0), D_BXT); MMIO_DH(BXT_PORT_PCS_DW12_GRP(DPIO_PHY0, DPIO_CH0), D_BXT, NULL, bxt_pcs_dw12_grp_write); - MMIO_D(BXT_PORT_TX_DW2_LN0(DPIO_PHY0, DPIO_CH0), D_BXT); - MMIO_D(BXT_PORT_TX_DW2_GRP(DPIO_PHY0, DPIO_CH0), D_BXT); MMIO_DH(BXT_PORT_TX_DW3_LN0(DPIO_PHY0, DPIO_CH0), D_BXT, bxt_port_tx_dw3_read, NULL); - MMIO_D(BXT_PORT_TX_DW3_GRP(DPIO_PHY0, DPIO_CH0), D_BXT); - MMIO_D(BXT_PORT_TX_DW4_LN0(DPIO_PHY0, DPIO_CH0), D_BXT); - MMIO_D(BXT_PORT_TX_DW4_GRP(DPIO_PHY0, DPIO_CH0), D_BXT); - MMIO_D(BXT_PORT_TX_DW14_LN(DPIO_PHY0, DPIO_CH0, 0), D_BXT); - MMIO_D(BXT_PORT_TX_DW14_LN(DPIO_PHY0, DPIO_CH0, 1), D_BXT); - MMIO_D(BXT_PORT_TX_DW14_LN(DPIO_PHY0, DPIO_CH0, 2), D_BXT); - MMIO_D(BXT_PORT_TX_DW14_LN(DPIO_PHY0, DPIO_CH0, 3), D_BXT); - MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH0, 0), D_BXT); - MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH0, 1), D_BXT); - MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH0, 2), D_BXT); - MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH0, 3), D_BXT); - MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH0, 6), D_BXT); - MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH0, 8), D_BXT); - MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH0, 9), D_BXT); - MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH0, 10), D_BXT); - - MMIO_D(BXT_PORT_PLL_EBB_0(DPIO_PHY0, DPIO_CH1), D_BXT); - MMIO_D(BXT_PORT_PLL_EBB_4(DPIO_PHY0, DPIO_CH1), D_BXT); - MMIO_D(BXT_PORT_PCS_DW10_LN01(DPIO_PHY0, DPIO_CH1), D_BXT); - MMIO_D(BXT_PORT_PCS_DW10_GRP(DPIO_PHY0, DPIO_CH1), D_BXT); - MMIO_D(BXT_PORT_PCS_DW12_LN01(DPIO_PHY0, DPIO_CH1), D_BXT); - MMIO_D(BXT_PORT_PCS_DW12_LN23(DPIO_PHY0, DPIO_CH1), D_BXT); MMIO_DH(BXT_PORT_PCS_DW12_GRP(DPIO_PHY0, DPIO_CH1), D_BXT, NULL, bxt_pcs_dw12_grp_write); - MMIO_D(BXT_PORT_TX_DW2_LN0(DPIO_PHY0, DPIO_CH1), D_BXT); - MMIO_D(BXT_PORT_TX_DW2_GRP(DPIO_PHY0, DPIO_CH1), D_BXT); MMIO_DH(BXT_PORT_TX_DW3_LN0(DPIO_PHY0, DPIO_CH1), D_BXT, bxt_port_tx_dw3_read, NULL); - MMIO_D(BXT_PORT_TX_DW3_GRP(DPIO_PHY0, DPIO_CH1), D_BXT); - MMIO_D(BXT_PORT_TX_DW4_LN0(DPIO_PHY0, DPIO_CH1), D_BXT); - MMIO_D(BXT_PORT_TX_DW4_GRP(DPIO_PHY0, DPIO_CH1), D_BXT); - MMIO_D(BXT_PORT_TX_DW14_LN(DPIO_PHY0, DPIO_CH1, 0), D_BXT); - MMIO_D(BXT_PORT_TX_DW14_LN(DPIO_PHY0, DPIO_CH1, 1), D_BXT); - MMIO_D(BXT_PORT_TX_DW14_LN(DPIO_PHY0, DPIO_CH1, 2), D_BXT); - MMIO_D(BXT_PORT_TX_DW14_LN(DPIO_PHY0, DPIO_CH1, 3), D_BXT); - MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH1, 0), D_BXT); - MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH1, 1), D_BXT); - MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH1, 2), D_BXT); - MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH1, 3), D_BXT); - MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH1, 6), D_BXT); - MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH1, 8), D_BXT); - MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH1, 9), D_BXT); - MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH1, 10), D_BXT); - - MMIO_D(BXT_PORT_PLL_EBB_0(DPIO_PHY1, DPIO_CH0), D_BXT); - MMIO_D(BXT_PORT_PLL_EBB_4(DPIO_PHY1, DPIO_CH0), D_BXT); - MMIO_D(BXT_PORT_PCS_DW10_LN01(DPIO_PHY1, DPIO_CH0), D_BXT); - MMIO_D(BXT_PORT_PCS_DW10_GRP(DPIO_PHY1, DPIO_CH0), D_BXT); - MMIO_D(BXT_PORT_PCS_DW12_LN01(DPIO_PHY1, DPIO_CH0), D_BXT); - MMIO_D(BXT_PORT_PCS_DW12_LN23(DPIO_PHY1, DPIO_CH0), D_BXT); MMIO_DH(BXT_PORT_PCS_DW12_GRP(DPIO_PHY1, DPIO_CH0), D_BXT, NULL, bxt_pcs_dw12_grp_write); - MMIO_D(BXT_PORT_TX_DW2_LN0(DPIO_PHY1, DPIO_CH0), D_BXT); - MMIO_D(BXT_PORT_TX_DW2_GRP(DPIO_PHY1, DPIO_CH0), D_BXT); MMIO_DH(BXT_PORT_TX_DW3_LN0(DPIO_PHY1, DPIO_CH0), D_BXT, bxt_port_tx_dw3_read, NULL); - MMIO_D(BXT_PORT_TX_DW3_GRP(DPIO_PHY1, DPIO_CH0), D_BXT); - MMIO_D(BXT_PORT_TX_DW4_LN0(DPIO_PHY1, DPIO_CH0), D_BXT); - MMIO_D(BXT_PORT_TX_DW4_GRP(DPIO_PHY1, DPIO_CH0), D_BXT); - MMIO_D(BXT_PORT_TX_DW14_LN(DPIO_PHY1, DPIO_CH0, 0), D_BXT); - MMIO_D(BXT_PORT_TX_DW14_LN(DPIO_PHY1, DPIO_CH0, 1), D_BXT); - MMIO_D(BXT_PORT_TX_DW14_LN(DPIO_PHY1, DPIO_CH0, 2), D_BXT); - MMIO_D(BXT_PORT_TX_DW14_LN(DPIO_PHY1, DPIO_CH0, 3), D_BXT); - MMIO_D(BXT_PORT_PLL(DPIO_PHY1, DPIO_CH0, 0), D_BXT); - MMIO_D(BXT_PORT_PLL(DPIO_PHY1, DPIO_CH0, 1), D_BXT); - MMIO_D(BXT_PORT_PLL(DPIO_PHY1, DPIO_CH0, 2), D_BXT); - MMIO_D(BXT_PORT_PLL(DPIO_PHY1, DPIO_CH0, 3), D_BXT); - MMIO_D(BXT_PORT_PLL(DPIO_PHY1, DPIO_CH0, 6), D_BXT); - MMIO_D(BXT_PORT_PLL(DPIO_PHY1, DPIO_CH0, 8), D_BXT); - MMIO_D(BXT_PORT_PLL(DPIO_PHY1, DPIO_CH0, 9), D_BXT); - MMIO_D(BXT_PORT_PLL(DPIO_PHY1, DPIO_CH0, 10), D_BXT); - - MMIO_D(BXT_DE_PLL_CTL, D_BXT); MMIO_DH(BXT_DE_PLL_ENABLE, D_BXT, NULL, bxt_de_pll_enable_write); - MMIO_D(BXT_DSI_PLL_CTL, D_BXT); - MMIO_D(BXT_DSI_PLL_ENABLE, D_BXT); - - MMIO_D(GEN9_CLKGATE_DIS_0, D_BXT); - MMIO_D(GEN9_CLKGATE_DIS_4, D_BXT); - - MMIO_D(HSW_TVIDEO_DIP_GCP(TRANSCODER_A), D_BXT); - MMIO_D(HSW_TVIDEO_DIP_GCP(TRANSCODER_B), D_BXT); - MMIO_D(HSW_TVIDEO_DIP_GCP(TRANSCODER_C), D_BXT); - - MMIO_D(RC6_CTX_BASE, D_BXT); - - MMIO_D(GEN8_PUSHBUS_CONTROL, D_BXT); - MMIO_D(GEN8_PUSHBUS_ENABLE, D_BXT); - MMIO_D(GEN8_PUSHBUS_SHIFT, D_BXT); - MMIO_D(GEN6_GFXPAUSE, D_BXT); MMIO_DFH(GEN8_L3SQCREG1, D_BXT, F_CMD_ACCESS, NULL, NULL); MMIO_DFH(GEN8_L3CNTLREG, D_BXT, F_CMD_ACCESS, NULL, NULL); MMIO_DFH(_MMIO(0x20D8), D_BXT, F_CMD_ACCESS, NULL, NULL); @@ -3633,17 +2782,14 @@ static int init_bxt_mmio_info(struct intel_gvt *gvt) return 0; } -static const struct gvt_mmio_block *find_mmio_block(struct intel_gvt *gvt, - unsigned int offset) +static struct gvt_mmio_block *find_mmio_block(struct intel_gvt *gvt, + unsigned int offset) { - unsigned long device = intel_gvt_get_device_type(gvt); - const struct gvt_mmio_block *block = gvt->mmio.mmio_block; + struct gvt_mmio_block *block = gvt->mmio.mmio_block; int num = gvt->mmio.num_mmio_block; int i; for (i = 0; i < num; i++, block++) { - if (!(device & block->device)) - continue; if (offset >= i915_mmio_reg_offset(block->offset) && offset < i915_mmio_reg_offset(block->offset) + block->size) return block; @@ -3668,23 +2814,117 @@ void intel_gvt_clean_mmio_info(struct intel_gvt *gvt) hash_for_each_safe(gvt->mmio.mmio_info_table, i, tmp, e, node) kfree(e); + kfree(gvt->mmio.mmio_block); + gvt->mmio.mmio_block = NULL; + gvt->mmio.num_mmio_block = 0; + vfree(gvt->mmio.mmio_attribute); gvt->mmio.mmio_attribute = NULL; } -/* Special MMIO blocks. registers in MMIO block ranges should not be command - * accessible (should have no F_CMD_ACCESS flag). - * otherwise, need to update cmd_reg_handler in cmd_parser.c - */ -static const struct gvt_mmio_block mmio_blocks[] = { - {D_SKL_PLUS, _MMIO(DMC_MMIO_START_RANGE), 0x3000, NULL, NULL}, - {D_ALL, _MMIO(MCHBAR_MIRROR_BASE_SNB), 0x40000, NULL, NULL}, - {D_ALL, _MMIO(VGT_PVINFO_PAGE), VGT_PVINFO_SIZE, - pvinfo_mmio_read, pvinfo_mmio_write}, - {D_ALL, LGC_PALETTE(PIPE_A, 0), 1024, NULL, NULL}, - {D_ALL, LGC_PALETTE(PIPE_B, 0), 1024, NULL, NULL}, - {D_ALL, LGC_PALETTE(PIPE_C, 0), 1024, NULL, NULL}, -}; +static int handle_mmio(struct intel_gvt_mmio_table_iter *iter, u32 offset, + u32 size) +{ + struct intel_gvt *gvt = iter->data; + struct intel_gvt_mmio_info *info, *p; + u32 start, end, i; + + if (WARN_ON(!IS_ALIGNED(offset, 4))) + return -EINVAL; + + start = offset; + end = offset + size; + + for (i = start; i < end; i += 4) { + p = intel_gvt_find_mmio_info(gvt, i); + if (p) { + WARN(1, "dup mmio definition offset %x\n", + info->offset); + + /* We return -EEXIST here to make GVT-g load fail. + * So duplicated MMIO can be found as soon as + * possible. + */ + return -EEXIST; + } + + info = kzalloc(sizeof(*info), GFP_KERNEL); + if (!info) + return -ENOMEM; + + info->offset = i; + info->read = intel_vgpu_default_mmio_read; + info->write = intel_vgpu_default_mmio_write; + INIT_HLIST_NODE(&info->node); + hash_add(gvt->mmio.mmio_info_table, &info->node, info->offset); + gvt->mmio.num_tracked_mmio++; + } + return 0; +} + +static int handle_mmio_block(struct intel_gvt_mmio_table_iter *iter, + u32 offset, u32 size) +{ + struct intel_gvt *gvt = iter->data; + struct gvt_mmio_block *block = gvt->mmio.mmio_block; + void *ret; + + ret = krealloc(block, + (gvt->mmio.num_mmio_block + 1) * sizeof(*block), + GFP_KERNEL); + if (!ret) + return -ENOMEM; + + gvt->mmio.mmio_block = block = ret; + + block += gvt->mmio.num_mmio_block; + + memset(block, 0, sizeof(*block)); + + block->offset = _MMIO(offset); + block->size = size; + + gvt->mmio.num_mmio_block++; + + return 0; +} + +static int handle_mmio_cb(struct intel_gvt_mmio_table_iter *iter, u32 offset, + u32 size) +{ + if (size < 1024 || offset == i915_mmio_reg_offset(GEN9_GFX_MOCS(0))) + return handle_mmio(iter, offset, size); + else + return handle_mmio_block(iter, offset, size); +} + +static int init_mmio_info(struct intel_gvt *gvt) +{ + struct intel_gvt_mmio_table_iter iter = { + .i915 = gvt->gt->i915, + .data = gvt, + .handle_mmio_cb = handle_mmio_cb, + }; + + return intel_gvt_iterate_mmio_table(&iter); +} + +static int init_mmio_block_handlers(struct intel_gvt *gvt) +{ + struct gvt_mmio_block *block; + + block = find_mmio_block(gvt, VGT_PVINFO_PAGE); + if (!block) { + WARN(1, "fail to assign handlers to mmio block %x\n", + i915_mmio_reg_offset(block->offset)); + return -ENODEV; + } + + block->read = pvinfo_mmio_read; + block->write = pvinfo_mmio_write; + + return 0; +} /** * intel_gvt_setup_mmio_info - setup MMIO information table for GVT device @@ -3707,6 +2947,14 @@ int intel_gvt_setup_mmio_info(struct intel_gvt *gvt) if (!gvt->mmio.mmio_attribute) return -ENOMEM; + ret = init_mmio_info(gvt); + if (ret) + goto err; + + ret = init_mmio_block_handlers(gvt); + if (ret) + goto err; + ret = init_generic_mmio_info(gvt); if (ret) goto err; @@ -3737,9 +2985,6 @@ int intel_gvt_setup_mmio_info(struct intel_gvt *gvt) goto err; } - gvt->mmio.mmio_block = mmio_blocks; - gvt->mmio.num_mmio_block = ARRAY_SIZE(mmio_blocks); - return 0; err: intel_gvt_clean_mmio_info(gvt); @@ -3759,7 +3004,7 @@ int intel_gvt_for_each_tracked_mmio(struct intel_gvt *gvt, int (*handler)(struct intel_gvt *gvt, u32 offset, void *data), void *data) { - const struct gvt_mmio_block *block = gvt->mmio.mmio_block; + struct gvt_mmio_block *block = gvt->mmio.mmio_block; struct intel_gvt_mmio_info *e; int i, j, ret; @@ -3775,9 +3020,7 @@ int intel_gvt_for_each_tracked_mmio(struct intel_gvt *gvt, continue; for (j = 0; j < block->size; j += 4) { - ret = handler(gvt, - i915_mmio_reg_offset(block->offset) + j, - data); + ret = handler(gvt, i915_mmio_reg_offset(block->offset) + j, data); if (ret) return ret; } @@ -3877,7 +3120,7 @@ int intel_vgpu_mmio_reg_rw(struct intel_vgpu *vgpu, unsigned int offset, struct drm_i915_private *i915 = vgpu->gvt->gt->i915; struct intel_gvt *gvt = vgpu->gvt; struct intel_gvt_mmio_info *mmio_info; - const struct gvt_mmio_block *mmio_block; + struct gvt_mmio_block *mmio_block; gvt_mmio_func func; int ret; diff --git a/drivers/gpu/drm/i915/gvt/mmio.h b/drivers/gpu/drm/i915/gvt/mmio.h index 7c26af39fbfc..bba154e38705 100644 --- a/drivers/gpu/drm/i915/gvt/mmio.h +++ b/drivers/gpu/drm/i915/gvt/mmio.h @@ -72,7 +72,6 @@ struct intel_gvt_mmio_info { const struct intel_engine_cs * intel_gvt_render_mmio_to_engine(struct intel_gvt *gvt, unsigned int reg); unsigned long intel_gvt_get_device_type(struct intel_gvt *gvt); -bool intel_gvt_match_device(struct intel_gvt *gvt, unsigned long device); int intel_gvt_setup_mmio_info(struct intel_gvt *gvt); void intel_gvt_clean_mmio_info(struct intel_gvt *gvt); diff --git a/drivers/gpu/drm/i915/gvt/reg.h b/drivers/gpu/drm/i915/gvt/reg.h index 7d666d34f9ff..d8216c63c39a 100644 --- a/drivers/gpu/drm/i915/gvt/reg.h +++ b/drivers/gpu/drm/i915/gvt/reg.h @@ -132,6 +132,13 @@ #define RING_GFX_MODE(base) _MMIO((base) + 0x29c) #define VF_GUARDBAND _MMIO(0x83a4) - #define BCS_TILE_REGISTER_VAL_OFFSET (0x43*4) + +/* XXX FIXME i915 has changed PP_XXX definition */ +#define PCH_PP_STATUS _MMIO(0xc7200) +#define PCH_PP_CONTROL _MMIO(0xc7204) +#define PCH_PP_ON_DELAYS _MMIO(0xc7208) +#define PCH_PP_OFF_DELAYS _MMIO(0xc720c) +#define PCH_PP_DIVISOR _MMIO(0xc7210) + #endif diff --git a/drivers/gpu/drm/i915/intel_gvt.h b/drivers/gpu/drm/i915/intel_gvt.h index d7d3fb6186fd..3a2563ad50f8 100644 --- a/drivers/gpu/drm/i915/intel_gvt.h +++ b/drivers/gpu/drm/i915/intel_gvt.h @@ -24,9 +24,19 @@ #ifndef _INTEL_GVT_H_ #define _INTEL_GVT_H_ +#include + struct drm_i915_private; #ifdef CONFIG_DRM_I915_GVT + +struct intel_gvt_mmio_table_iter { + struct drm_i915_private *i915; + void *data; + int (*handle_mmio_cb)(struct intel_gvt_mmio_table_iter *iter, + u32 offset, u32 size); +}; + int intel_gvt_init(struct drm_i915_private *dev_priv); void intel_gvt_driver_remove(struct drm_i915_private *dev_priv); int intel_gvt_init_device(struct drm_i915_private *dev_priv); @@ -34,6 +44,7 @@ void intel_gvt_clean_device(struct drm_i915_private *dev_priv); int intel_gvt_init_host(void); void intel_gvt_sanitize_options(struct drm_i915_private *dev_priv); void intel_gvt_resume(struct drm_i915_private *dev_priv); +int intel_gvt_iterate_mmio_table(struct intel_gvt_mmio_table_iter *iter); #else static inline int intel_gvt_init(struct drm_i915_private *dev_priv) { @@ -51,6 +62,14 @@ static inline void intel_gvt_sanitize_options(struct drm_i915_private *dev_priv) static inline void intel_gvt_resume(struct drm_i915_private *dev_priv) { } + +struct intel_gvt_mmio_table_iter { +}; + +static inline int intel_gvt_iterate_mmio_table(struct intel_gvt_mmio_table_iter *iter) +{ + return 0; +} #endif #endif /* _INTEL_GVT_H_ */ diff --git a/drivers/gpu/drm/i915/intel_gvt_mmio_table.c b/drivers/gpu/drm/i915/intel_gvt_mmio_table.c new file mode 100644 index 000000000000..7f7fa9a6a7be --- /dev/null +++ b/drivers/gpu/drm/i915/intel_gvt_mmio_table.c @@ -0,0 +1,1290 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright © 2020 Intel Corporation + */ + +#include "display/vlv_dsi_pll_regs.h" +#include "gt/intel_gt_regs.h" +#include "gvt/gvt.h" +#include "i915_drv.h" +#include "i915_pvinfo.h" +#include "i915_reg.h" +#include "intel_gvt.h" +#include "intel_mchbar_regs.h" + +#define MMIO_F(reg, s) do { \ + int ret; \ + ret = iter->handle_mmio_cb(iter, i915_mmio_reg_offset(reg), s); \ + if (ret) \ + return ret; \ +} while (0) + +#define MMIO_D(reg) MMIO_F(reg, 4) + +#define MMIO_RING_F(prefix, s) do { \ + MMIO_F(prefix(RENDER_RING_BASE), s); \ + MMIO_F(prefix(BLT_RING_BASE), s); \ + MMIO_F(prefix(GEN6_BSD_RING_BASE), s); \ + MMIO_F(prefix(VEBOX_RING_BASE), s); \ + if (HAS_ENGINE(to_gt(iter->i915), VCS1)) \ + MMIO_F(prefix(GEN8_BSD2_RING_BASE), s); \ +} while (0) + +#define MMIO_RING_D(prefix) \ + MMIO_RING_F(prefix, 4) + +static int iterate_generic_mmio(struct intel_gvt_mmio_table_iter *iter) +{ + struct drm_i915_private *dev_priv = iter->i915; + + MMIO_RING_D(RING_IMR); + MMIO_D(SDEIMR); + MMIO_D(SDEIER); + MMIO_D(SDEIIR); + MMIO_D(SDEISR); + MMIO_RING_D(RING_HWSTAM); + MMIO_D(BSD_HWS_PGA_GEN7); + MMIO_D(BLT_HWS_PGA_GEN7); + MMIO_D(VEBOX_HWS_PGA_GEN7); + +#define RING_REG(base) _MMIO((base) + 0x28) + MMIO_RING_D(RING_REG); +#undef RING_REG + +#define RING_REG(base) _MMIO((base) + 0x134) + MMIO_RING_D(RING_REG); +#undef RING_REG + +#define RING_REG(base) _MMIO((base) + 0x6c) + MMIO_RING_D(RING_REG); +#undef RING_REG + MMIO_D(_MMIO(0x2148)); + MMIO_D(CCID(RENDER_RING_BASE)); + MMIO_D(_MMIO(0x12198)); + MMIO_D(GEN7_CXT_SIZE); + MMIO_RING_D(RING_TAIL); + MMIO_RING_D(RING_HEAD); + MMIO_RING_D(RING_CTL); + MMIO_RING_D(RING_ACTHD); + MMIO_RING_D(RING_START); + + /* RING MODE */ +#define RING_REG(base) _MMIO((base) + 0x29c) + MMIO_RING_D(RING_REG); +#undef RING_REG + + MMIO_RING_D(RING_MI_MODE); + MMIO_RING_D(RING_INSTPM); + MMIO_RING_D(RING_TIMESTAMP); + MMIO_RING_D(RING_TIMESTAMP_UDW); + MMIO_D(GEN7_GT_MODE); + MMIO_D(CACHE_MODE_0_GEN7); + MMIO_D(CACHE_MODE_1); + MMIO_D(CACHE_MODE_0); + MMIO_D(_MMIO(0x2124)); + MMIO_D(_MMIO(0x20dc)); + MMIO_D(_3D_CHICKEN3); + MMIO_D(_MMIO(0x2088)); + MMIO_D(FF_SLICE_CS_CHICKEN2); + MMIO_D(_MMIO(0x2470)); + MMIO_D(GAM_ECOCHK); + MMIO_D(GEN7_COMMON_SLICE_CHICKEN1); + MMIO_D(COMMON_SLICE_CHICKEN2); + MMIO_D(_MMIO(0x9030)); + MMIO_D(_MMIO(0x20a0)); + MMIO_D(_MMIO(0x2420)); + MMIO_D(_MMIO(0x2430)); + MMIO_D(_MMIO(0x2434)); + MMIO_D(_MMIO(0x2438)); + MMIO_D(_MMIO(0x243c)); + MMIO_D(_MMIO(0x7018)); + MMIO_D(HALF_SLICE_CHICKEN3); + MMIO_D(GEN7_HALF_SLICE_CHICKEN1); + /* display */ + MMIO_F(_MMIO(0x60220), 0x20); + MMIO_D(_MMIO(0x602a0)); + MMIO_D(_MMIO(0x65050)); + MMIO_D(_MMIO(0x650b4)); + MMIO_D(_MMIO(0xc4040)); + MMIO_D(DERRMR); + MMIO_D(PIPEDSL(PIPE_A)); + MMIO_D(PIPEDSL(PIPE_B)); + MMIO_D(PIPEDSL(PIPE_C)); + MMIO_D(PIPEDSL(_PIPE_EDP)); + MMIO_D(PIPECONF(PIPE_A)); + MMIO_D(PIPECONF(PIPE_B)); + MMIO_D(PIPECONF(PIPE_C)); + MMIO_D(PIPECONF(_PIPE_EDP)); + MMIO_D(PIPESTAT(PIPE_A)); + MMIO_D(PIPESTAT(PIPE_B)); + MMIO_D(PIPESTAT(PIPE_C)); + MMIO_D(PIPESTAT(_PIPE_EDP)); + MMIO_D(PIPE_FLIPCOUNT_G4X(PIPE_A)); + MMIO_D(PIPE_FLIPCOUNT_G4X(PIPE_B)); + MMIO_D(PIPE_FLIPCOUNT_G4X(PIPE_C)); + MMIO_D(PIPE_FLIPCOUNT_G4X(_PIPE_EDP)); + MMIO_D(PIPE_FRMCOUNT_G4X(PIPE_A)); + MMIO_D(PIPE_FRMCOUNT_G4X(PIPE_B)); + MMIO_D(PIPE_FRMCOUNT_G4X(PIPE_C)); + MMIO_D(PIPE_FRMCOUNT_G4X(_PIPE_EDP)); + MMIO_D(CURCNTR(PIPE_A)); + MMIO_D(CURCNTR(PIPE_B)); + MMIO_D(CURCNTR(PIPE_C)); + MMIO_D(CURPOS(PIPE_A)); + MMIO_D(CURPOS(PIPE_B)); + MMIO_D(CURPOS(PIPE_C)); + MMIO_D(CURBASE(PIPE_A)); + MMIO_D(CURBASE(PIPE_B)); + MMIO_D(CURBASE(PIPE_C)); + MMIO_D(CUR_FBC_CTL(PIPE_A)); + MMIO_D(CUR_FBC_CTL(PIPE_B)); + MMIO_D(CUR_FBC_CTL(PIPE_C)); + MMIO_D(_MMIO(0x700ac)); + MMIO_D(_MMIO(0x710ac)); + MMIO_D(_MMIO(0x720ac)); + MMIO_D(_MMIO(0x70090)); + MMIO_D(_MMIO(0x70094)); + MMIO_D(_MMIO(0x70098)); + MMIO_D(_MMIO(0x7009c)); + MMIO_D(DSPCNTR(PIPE_A)); + MMIO_D(DSPADDR(PIPE_A)); + MMIO_D(DSPSTRIDE(PIPE_A)); + MMIO_D(DSPPOS(PIPE_A)); + MMIO_D(DSPSIZE(PIPE_A)); + MMIO_D(DSPSURF(PIPE_A)); + MMIO_D(DSPOFFSET(PIPE_A)); + MMIO_D(DSPSURFLIVE(PIPE_A)); + MMIO_D(REG_50080(PIPE_A, PLANE_PRIMARY)); + MMIO_D(DSPCNTR(PIPE_B)); + MMIO_D(DSPADDR(PIPE_B)); + MMIO_D(DSPSTRIDE(PIPE_B)); + MMIO_D(DSPPOS(PIPE_B)); + MMIO_D(DSPSIZE(PIPE_B)); + MMIO_D(DSPSURF(PIPE_B)); + MMIO_D(DSPOFFSET(PIPE_B)); + MMIO_D(DSPSURFLIVE(PIPE_B)); + MMIO_D(REG_50080(PIPE_B, PLANE_PRIMARY)); + MMIO_D(DSPCNTR(PIPE_C)); + MMIO_D(DSPADDR(PIPE_C)); + MMIO_D(DSPSTRIDE(PIPE_C)); + MMIO_D(DSPPOS(PIPE_C)); + MMIO_D(DSPSIZE(PIPE_C)); + MMIO_D(DSPSURF(PIPE_C)); + MMIO_D(DSPOFFSET(PIPE_C)); + MMIO_D(DSPSURFLIVE(PIPE_C)); + MMIO_D(REG_50080(PIPE_C, PLANE_PRIMARY)); + MMIO_D(SPRCTL(PIPE_A)); + MMIO_D(SPRLINOFF(PIPE_A)); + MMIO_D(SPRSTRIDE(PIPE_A)); + MMIO_D(SPRPOS(PIPE_A)); + MMIO_D(SPRSIZE(PIPE_A)); + MMIO_D(SPRKEYVAL(PIPE_A)); + MMIO_D(SPRKEYMSK(PIPE_A)); + MMIO_D(SPRSURF(PIPE_A)); + MMIO_D(SPRKEYMAX(PIPE_A)); + MMIO_D(SPROFFSET(PIPE_A)); + MMIO_D(SPRSCALE(PIPE_A)); + MMIO_D(SPRSURFLIVE(PIPE_A)); + MMIO_D(REG_50080(PIPE_A, PLANE_SPRITE0)); + MMIO_D(SPRCTL(PIPE_B)); + MMIO_D(SPRLINOFF(PIPE_B)); + MMIO_D(SPRSTRIDE(PIPE_B)); + MMIO_D(SPRPOS(PIPE_B)); + MMIO_D(SPRSIZE(PIPE_B)); + MMIO_D(SPRKEYVAL(PIPE_B)); + MMIO_D(SPRKEYMSK(PIPE_B)); + MMIO_D(SPRSURF(PIPE_B)); + MMIO_D(SPRKEYMAX(PIPE_B)); + MMIO_D(SPROFFSET(PIPE_B)); + MMIO_D(SPRSCALE(PIPE_B)); + MMIO_D(SPRSURFLIVE(PIPE_B)); + MMIO_D(REG_50080(PIPE_B, PLANE_SPRITE0)); + MMIO_D(SPRCTL(PIPE_C)); + MMIO_D(SPRLINOFF(PIPE_C)); + MMIO_D(SPRSTRIDE(PIPE_C)); + MMIO_D(SPRPOS(PIPE_C)); + MMIO_D(SPRSIZE(PIPE_C)); + MMIO_D(SPRKEYVAL(PIPE_C)); + MMIO_D(SPRKEYMSK(PIPE_C)); + MMIO_D(SPRSURF(PIPE_C)); + MMIO_D(SPRKEYMAX(PIPE_C)); + MMIO_D(SPROFFSET(PIPE_C)); + MMIO_D(SPRSCALE(PIPE_C)); + MMIO_D(SPRSURFLIVE(PIPE_C)); + MMIO_D(REG_50080(PIPE_C, PLANE_SPRITE0)); + MMIO_D(HTOTAL(TRANSCODER_A)); + MMIO_D(HBLANK(TRANSCODER_A)); + MMIO_D(HSYNC(TRANSCODER_A)); + MMIO_D(VTOTAL(TRANSCODER_A)); + MMIO_D(VBLANK(TRANSCODER_A)); + MMIO_D(VSYNC(TRANSCODER_A)); + MMIO_D(BCLRPAT(TRANSCODER_A)); + MMIO_D(VSYNCSHIFT(TRANSCODER_A)); + MMIO_D(PIPESRC(TRANSCODER_A)); + MMIO_D(HTOTAL(TRANSCODER_B)); + MMIO_D(HBLANK(TRANSCODER_B)); + MMIO_D(HSYNC(TRANSCODER_B)); + MMIO_D(VTOTAL(TRANSCODER_B)); + MMIO_D(VBLANK(TRANSCODER_B)); + MMIO_D(VSYNC(TRANSCODER_B)); + MMIO_D(BCLRPAT(TRANSCODER_B)); + MMIO_D(VSYNCSHIFT(TRANSCODER_B)); + MMIO_D(PIPESRC(TRANSCODER_B)); + MMIO_D(HTOTAL(TRANSCODER_C)); + MMIO_D(HBLANK(TRANSCODER_C)); + MMIO_D(HSYNC(TRANSCODER_C)); + MMIO_D(VTOTAL(TRANSCODER_C)); + MMIO_D(VBLANK(TRANSCODER_C)); + MMIO_D(VSYNC(TRANSCODER_C)); + MMIO_D(BCLRPAT(TRANSCODER_C)); + MMIO_D(VSYNCSHIFT(TRANSCODER_C)); + MMIO_D(PIPESRC(TRANSCODER_C)); + MMIO_D(HTOTAL(TRANSCODER_EDP)); + MMIO_D(HBLANK(TRANSCODER_EDP)); + MMIO_D(HSYNC(TRANSCODER_EDP)); + MMIO_D(VTOTAL(TRANSCODER_EDP)); + MMIO_D(VBLANK(TRANSCODER_EDP)); + MMIO_D(VSYNC(TRANSCODER_EDP)); + MMIO_D(BCLRPAT(TRANSCODER_EDP)); + MMIO_D(VSYNCSHIFT(TRANSCODER_EDP)); + MMIO_D(PIPE_DATA_M1(TRANSCODER_A)); + MMIO_D(PIPE_DATA_N1(TRANSCODER_A)); + MMIO_D(PIPE_DATA_M2(TRANSCODER_A)); + MMIO_D(PIPE_DATA_N2(TRANSCODER_A)); + MMIO_D(PIPE_LINK_M1(TRANSCODER_A)); + MMIO_D(PIPE_LINK_N1(TRANSCODER_A)); + MMIO_D(PIPE_LINK_M2(TRANSCODER_A)); + MMIO_D(PIPE_LINK_N2(TRANSCODER_A)); + MMIO_D(PIPE_DATA_M1(TRANSCODER_B)); + MMIO_D(PIPE_DATA_N1(TRANSCODER_B)); + MMIO_D(PIPE_DATA_M2(TRANSCODER_B)); + MMIO_D(PIPE_DATA_N2(TRANSCODER_B)); + MMIO_D(PIPE_LINK_M1(TRANSCODER_B)); + MMIO_D(PIPE_LINK_N1(TRANSCODER_B)); + MMIO_D(PIPE_LINK_M2(TRANSCODER_B)); + MMIO_D(PIPE_LINK_N2(TRANSCODER_B)); + MMIO_D(PIPE_DATA_M1(TRANSCODER_C)); + MMIO_D(PIPE_DATA_N1(TRANSCODER_C)); + MMIO_D(PIPE_DATA_M2(TRANSCODER_C)); + MMIO_D(PIPE_DATA_N2(TRANSCODER_C)); + MMIO_D(PIPE_LINK_M1(TRANSCODER_C)); + MMIO_D(PIPE_LINK_N1(TRANSCODER_C)); + MMIO_D(PIPE_LINK_M2(TRANSCODER_C)); + MMIO_D(PIPE_LINK_N2(TRANSCODER_C)); + MMIO_D(PIPE_DATA_M1(TRANSCODER_EDP)); + MMIO_D(PIPE_DATA_N1(TRANSCODER_EDP)); + MMIO_D(PIPE_DATA_M2(TRANSCODER_EDP)); + MMIO_D(PIPE_DATA_N2(TRANSCODER_EDP)); + MMIO_D(PIPE_LINK_M1(TRANSCODER_EDP)); + MMIO_D(PIPE_LINK_N1(TRANSCODER_EDP)); + MMIO_D(PIPE_LINK_M2(TRANSCODER_EDP)); + MMIO_D(PIPE_LINK_N2(TRANSCODER_EDP)); + MMIO_D(PF_CTL(PIPE_A)); + MMIO_D(PF_WIN_SZ(PIPE_A)); + MMIO_D(PF_WIN_POS(PIPE_A)); + MMIO_D(PF_VSCALE(PIPE_A)); + MMIO_D(PF_HSCALE(PIPE_A)); + MMIO_D(PF_CTL(PIPE_B)); + MMIO_D(PF_WIN_SZ(PIPE_B)); + MMIO_D(PF_WIN_POS(PIPE_B)); + MMIO_D(PF_VSCALE(PIPE_B)); + MMIO_D(PF_HSCALE(PIPE_B)); + MMIO_D(PF_CTL(PIPE_C)); + MMIO_D(PF_WIN_SZ(PIPE_C)); + MMIO_D(PF_WIN_POS(PIPE_C)); + MMIO_D(PF_VSCALE(PIPE_C)); + MMIO_D(PF_HSCALE(PIPE_C)); + MMIO_D(WM0_PIPE_ILK(PIPE_A)); + MMIO_D(WM0_PIPE_ILK(PIPE_B)); + MMIO_D(WM0_PIPE_ILK(PIPE_C)); + MMIO_D(WM1_LP_ILK); + MMIO_D(WM2_LP_ILK); + MMIO_D(WM3_LP_ILK); + MMIO_D(WM1S_LP_ILK); + MMIO_D(WM2S_LP_IVB); + MMIO_D(WM3S_LP_IVB); + MMIO_D(BLC_PWM_CPU_CTL2); + MMIO_D(BLC_PWM_CPU_CTL); + MMIO_D(BLC_PWM_PCH_CTL1); + MMIO_D(BLC_PWM_PCH_CTL2); + MMIO_D(_MMIO(0x48268)); + MMIO_F(PCH_GMBUS0, 4 * 4); + MMIO_F(PCH_GPIO_BASE, 6 * 4); + MMIO_F(_MMIO(0xe4f00), 0x28); + MMIO_D(_MMIO(_PCH_TRANSACONF)); + MMIO_D(_MMIO(_PCH_TRANSBCONF)); + MMIO_D(FDI_RX_IIR(PIPE_A)); + MMIO_D(FDI_RX_IIR(PIPE_B)); + MMIO_D(FDI_RX_IIR(PIPE_C)); + MMIO_D(FDI_RX_IMR(PIPE_A)); + MMIO_D(FDI_RX_IMR(PIPE_B)); + MMIO_D(FDI_RX_IMR(PIPE_C)); + MMIO_D(FDI_RX_CTL(PIPE_A)); + MMIO_D(FDI_RX_CTL(PIPE_B)); + MMIO_D(FDI_RX_CTL(PIPE_C)); + MMIO_D(_MMIO(_PCH_TRANS_HTOTAL_A)); + MMIO_D(_MMIO(_PCH_TRANS_HBLANK_A)); + MMIO_D(_MMIO(_PCH_TRANS_HSYNC_A)); + MMIO_D(_MMIO(_PCH_TRANS_VTOTAL_A)); + MMIO_D(_MMIO(_PCH_TRANS_VBLANK_A)); + MMIO_D(_MMIO(_PCH_TRANS_VSYNC_A)); + MMIO_D(_MMIO(_PCH_TRANS_VSYNCSHIFT_A)); + MMIO_D(_MMIO(_PCH_TRANS_HTOTAL_B)); + MMIO_D(_MMIO(_PCH_TRANS_HBLANK_B)); + MMIO_D(_MMIO(_PCH_TRANS_HSYNC_B)); + MMIO_D(_MMIO(_PCH_TRANS_VTOTAL_B)); + MMIO_D(_MMIO(_PCH_TRANS_VBLANK_B)); + MMIO_D(_MMIO(_PCH_TRANS_VSYNC_B)); + MMIO_D(_MMIO(_PCH_TRANS_VSYNCSHIFT_B)); + MMIO_D(_MMIO(_PCH_TRANSA_DATA_M1)); + MMIO_D(_MMIO(_PCH_TRANSA_DATA_N1)); + MMIO_D(_MMIO(_PCH_TRANSA_DATA_M2)); + MMIO_D(_MMIO(_PCH_TRANSA_DATA_N2)); + MMIO_D(_MMIO(_PCH_TRANSA_LINK_M1)); + MMIO_D(_MMIO(_PCH_TRANSA_LINK_N1)); + MMIO_D(_MMIO(_PCH_TRANSA_LINK_M2)); + MMIO_D(_MMIO(_PCH_TRANSA_LINK_N2)); + MMIO_D(TRANS_DP_CTL(PIPE_A)); + MMIO_D(TRANS_DP_CTL(PIPE_B)); + MMIO_D(TRANS_DP_CTL(PIPE_C)); + MMIO_D(TVIDEO_DIP_CTL(PIPE_A)); + MMIO_D(TVIDEO_DIP_DATA(PIPE_A)); + MMIO_D(TVIDEO_DIP_GCP(PIPE_A)); + MMIO_D(TVIDEO_DIP_CTL(PIPE_B)); + MMIO_D(TVIDEO_DIP_DATA(PIPE_B)); + MMIO_D(TVIDEO_DIP_GCP(PIPE_B)); + MMIO_D(TVIDEO_DIP_CTL(PIPE_C)); + MMIO_D(TVIDEO_DIP_DATA(PIPE_C)); + MMIO_D(TVIDEO_DIP_GCP(PIPE_C)); + MMIO_D(_MMIO(_FDI_RXA_MISC)); + MMIO_D(_MMIO(_FDI_RXB_MISC)); + MMIO_D(_MMIO(_FDI_RXA_TUSIZE1)); + MMIO_D(_MMIO(_FDI_RXA_TUSIZE2)); + MMIO_D(_MMIO(_FDI_RXB_TUSIZE1)); + MMIO_D(_MMIO(_FDI_RXB_TUSIZE2)); + MMIO_D(PCH_PP_CONTROL); + MMIO_D(PCH_PP_DIVISOR); + MMIO_D(PCH_PP_STATUS); + MMIO_D(PCH_LVDS); + MMIO_D(_MMIO(_PCH_DPLL_A)); + MMIO_D(_MMIO(_PCH_DPLL_B)); + MMIO_D(_MMIO(_PCH_FPA0)); + MMIO_D(_MMIO(_PCH_FPA1)); + MMIO_D(_MMIO(_PCH_FPB0)); + MMIO_D(_MMIO(_PCH_FPB1)); + MMIO_D(PCH_DREF_CONTROL); + MMIO_D(PCH_RAWCLK_FREQ); + MMIO_D(PCH_DPLL_SEL); + MMIO_D(_MMIO(0x61208)); + MMIO_D(_MMIO(0x6120c)); + MMIO_D(PCH_PP_ON_DELAYS); + MMIO_D(PCH_PP_OFF_DELAYS); + MMIO_D(_MMIO(0xe651c)); + MMIO_D(_MMIO(0xe661c)); + MMIO_D(_MMIO(0xe671c)); + MMIO_D(_MMIO(0xe681c)); + MMIO_D(_MMIO(0xe6c04)); + MMIO_D(_MMIO(0xe6e1c)); + MMIO_D(PCH_PORT_HOTPLUG); + MMIO_D(LCPLL_CTL); + MMIO_D(FUSE_STRAP); + MMIO_D(DIGITAL_PORT_HOTPLUG_CNTRL); + MMIO_D(DISP_ARB_CTL); + MMIO_D(DISP_ARB_CTL2); + MMIO_D(ILK_DISPLAY_CHICKEN1); + MMIO_D(ILK_DISPLAY_CHICKEN2); + MMIO_D(ILK_DSPCLK_GATE_D); + MMIO_D(SOUTH_CHICKEN1); + MMIO_D(SOUTH_CHICKEN2); + MMIO_D(_MMIO(_TRANSA_CHICKEN1)); + MMIO_D(_MMIO(_TRANSB_CHICKEN1)); + MMIO_D(SOUTH_DSPCLK_GATE_D); + MMIO_D(_MMIO(_TRANSA_CHICKEN2)); + MMIO_D(_MMIO(_TRANSB_CHICKEN2)); + MMIO_D(ILK_DPFC_CB_BASE(INTEL_FBC_A)); + MMIO_D(ILK_DPFC_CONTROL(INTEL_FBC_A)); + MMIO_D(ILK_DPFC_RECOMP_CTL(INTEL_FBC_A)); + MMIO_D(ILK_DPFC_STATUS(INTEL_FBC_A)); + MMIO_D(ILK_DPFC_FENCE_YOFF(INTEL_FBC_A)); + MMIO_D(ILK_DPFC_CHICKEN(INTEL_FBC_A)); + MMIO_D(ILK_FBC_RT_BASE); + MMIO_D(IPS_CTL); + MMIO_D(PIPE_CSC_COEFF_RY_GY(PIPE_A)); + MMIO_D(PIPE_CSC_COEFF_BY(PIPE_A)); + MMIO_D(PIPE_CSC_COEFF_RU_GU(PIPE_A)); + MMIO_D(PIPE_CSC_COEFF_BU(PIPE_A)); + MMIO_D(PIPE_CSC_COEFF_RV_GV(PIPE_A)); + MMIO_D(PIPE_CSC_COEFF_BV(PIPE_A)); + MMIO_D(PIPE_CSC_MODE(PIPE_A)); + MMIO_D(PIPE_CSC_PREOFF_HI(PIPE_A)); + MMIO_D(PIPE_CSC_PREOFF_ME(PIPE_A)); + MMIO_D(PIPE_CSC_PREOFF_LO(PIPE_A)); + MMIO_D(PIPE_CSC_POSTOFF_HI(PIPE_A)); + MMIO_D(PIPE_CSC_POSTOFF_ME(PIPE_A)); + MMIO_D(PIPE_CSC_POSTOFF_LO(PIPE_A)); + MMIO_D(PIPE_CSC_COEFF_RY_GY(PIPE_B)); + MMIO_D(PIPE_CSC_COEFF_BY(PIPE_B)); + MMIO_D(PIPE_CSC_COEFF_RU_GU(PIPE_B)); + MMIO_D(PIPE_CSC_COEFF_BU(PIPE_B)); + MMIO_D(PIPE_CSC_COEFF_RV_GV(PIPE_B)); + MMIO_D(PIPE_CSC_COEFF_BV(PIPE_B)); + MMIO_D(PIPE_CSC_MODE(PIPE_B)); + MMIO_D(PIPE_CSC_PREOFF_HI(PIPE_B)); + MMIO_D(PIPE_CSC_PREOFF_ME(PIPE_B)); + MMIO_D(PIPE_CSC_PREOFF_LO(PIPE_B)); + MMIO_D(PIPE_CSC_POSTOFF_HI(PIPE_B)); + MMIO_D(PIPE_CSC_POSTOFF_ME(PIPE_B)); + MMIO_D(PIPE_CSC_POSTOFF_LO(PIPE_B)); + MMIO_D(PIPE_CSC_COEFF_RY_GY(PIPE_C)); + MMIO_D(PIPE_CSC_COEFF_BY(PIPE_C)); + MMIO_D(PIPE_CSC_COEFF_RU_GU(PIPE_C)); + MMIO_D(PIPE_CSC_COEFF_BU(PIPE_C)); + MMIO_D(PIPE_CSC_COEFF_RV_GV(PIPE_C)); + MMIO_D(PIPE_CSC_COEFF_BV(PIPE_C)); + MMIO_D(PIPE_CSC_MODE(PIPE_C)); + MMIO_D(PIPE_CSC_PREOFF_HI(PIPE_C)); + MMIO_D(PIPE_CSC_PREOFF_ME(PIPE_C)); + MMIO_D(PIPE_CSC_PREOFF_LO(PIPE_C)); + MMIO_D(PIPE_CSC_POSTOFF_HI(PIPE_C)); + MMIO_D(PIPE_CSC_POSTOFF_ME(PIPE_C)); + MMIO_D(PIPE_CSC_POSTOFF_LO(PIPE_C)); + MMIO_D(PREC_PAL_INDEX(PIPE_A)); + MMIO_D(PREC_PAL_DATA(PIPE_A)); + MMIO_F(PREC_PAL_GC_MAX(PIPE_A, 0), 4 * 3); + MMIO_D(PREC_PAL_INDEX(PIPE_B)); + MMIO_D(PREC_PAL_DATA(PIPE_B)); + MMIO_F(PREC_PAL_GC_MAX(PIPE_B, 0), 4 * 3); + MMIO_D(PREC_PAL_INDEX(PIPE_C)); + MMIO_D(PREC_PAL_DATA(PIPE_C)); + MMIO_F(PREC_PAL_GC_MAX(PIPE_C, 0), 4 * 3); + MMIO_D(_MMIO(0x60110)); + MMIO_D(_MMIO(0x61110)); + MMIO_F(_MMIO(0x70400), 0x40); + MMIO_F(_MMIO(0x71400), 0x40); + MMIO_F(_MMIO(0x72400), 0x40); + MMIO_D(WM_LINETIME(PIPE_A)); + MMIO_D(WM_LINETIME(PIPE_B)); + MMIO_D(WM_LINETIME(PIPE_C)); + MMIO_D(SPLL_CTL); + MMIO_D(_MMIO(_WRPLL_CTL1)); + MMIO_D(_MMIO(_WRPLL_CTL2)); + MMIO_D(PORT_CLK_SEL(PORT_A)); + MMIO_D(PORT_CLK_SEL(PORT_B)); + MMIO_D(PORT_CLK_SEL(PORT_C)); + MMIO_D(PORT_CLK_SEL(PORT_D)); + MMIO_D(PORT_CLK_SEL(PORT_E)); + MMIO_D(TRANS_CLK_SEL(TRANSCODER_A)); + MMIO_D(TRANS_CLK_SEL(TRANSCODER_B)); + MMIO_D(TRANS_CLK_SEL(TRANSCODER_C)); + MMIO_D(HSW_NDE_RSTWRN_OPT); + MMIO_D(_MMIO(0x46508)); + MMIO_D(_MMIO(0x49080)); + MMIO_D(_MMIO(0x49180)); + MMIO_D(_MMIO(0x49280)); + MMIO_F(_MMIO(0x49090), 0x14); + MMIO_F(_MMIO(0x49190), 0x14); + MMIO_F(_MMIO(0x49290), 0x14); + MMIO_D(GAMMA_MODE(PIPE_A)); + MMIO_D(GAMMA_MODE(PIPE_B)); + MMIO_D(GAMMA_MODE(PIPE_C)); + MMIO_D(PIPE_MULT(PIPE_A)); + MMIO_D(PIPE_MULT(PIPE_B)); + MMIO_D(PIPE_MULT(PIPE_C)); + MMIO_D(HSW_TVIDEO_DIP_CTL(TRANSCODER_A)); + MMIO_D(HSW_TVIDEO_DIP_CTL(TRANSCODER_B)); + MMIO_D(HSW_TVIDEO_DIP_CTL(TRANSCODER_C)); + MMIO_D(SFUSE_STRAP); + MMIO_D(SBI_ADDR); + MMIO_D(SBI_DATA); + MMIO_D(SBI_CTL_STAT); + MMIO_D(PIXCLK_GATE); + MMIO_F(_MMIO(_DPA_AUX_CH_CTL), 6 * 4); + MMIO_D(DDI_BUF_CTL(PORT_A)); + MMIO_D(DDI_BUF_CTL(PORT_B)); + MMIO_D(DDI_BUF_CTL(PORT_C)); + MMIO_D(DDI_BUF_CTL(PORT_D)); + MMIO_D(DDI_BUF_CTL(PORT_E)); + MMIO_D(DP_TP_CTL(PORT_A)); + MMIO_D(DP_TP_CTL(PORT_B)); + MMIO_D(DP_TP_CTL(PORT_C)); + MMIO_D(DP_TP_CTL(PORT_D)); + MMIO_D(DP_TP_CTL(PORT_E)); + MMIO_D(DP_TP_STATUS(PORT_A)); + MMIO_D(DP_TP_STATUS(PORT_B)); + MMIO_D(DP_TP_STATUS(PORT_C)); + MMIO_D(DP_TP_STATUS(PORT_D)); + MMIO_D(DP_TP_STATUS(PORT_E)); + MMIO_F(_MMIO(_DDI_BUF_TRANS_A), 0x50); + MMIO_F(_MMIO(0x64e60), 0x50); + MMIO_F(_MMIO(0x64eC0), 0x50); + MMIO_F(_MMIO(0x64f20), 0x50); + MMIO_F(_MMIO(0x64f80), 0x50); + MMIO_D(HSW_AUD_CFG(PIPE_A)); + MMIO_D(HSW_AUD_PIN_ELD_CP_VLD); + MMIO_D(HSW_AUD_MISC_CTRL(PIPE_A)); + MMIO_D(_MMIO(_TRANS_DDI_FUNC_CTL_A)); + MMIO_D(_MMIO(_TRANS_DDI_FUNC_CTL_B)); + MMIO_D(_MMIO(_TRANS_DDI_FUNC_CTL_C)); + MMIO_D(_MMIO(_TRANS_DDI_FUNC_CTL_EDP)); + MMIO_D(_MMIO(_TRANSA_MSA_MISC)); + MMIO_D(_MMIO(_TRANSB_MSA_MISC)); + MMIO_D(_MMIO(_TRANSC_MSA_MISC)); + MMIO_D(_MMIO(_TRANS_EDP_MSA_MISC)); + MMIO_D(FORCEWAKE); + MMIO_D(FORCEWAKE_ACK); + MMIO_D(GEN6_GT_CORE_STATUS); + MMIO_D(GEN6_GT_THREAD_STATUS_REG); + MMIO_D(GTFIFODBG); + MMIO_D(GTFIFOCTL); + MMIO_D(ECOBUS); + MMIO_D(GEN6_RC_CONTROL); + MMIO_D(GEN6_RC_STATE); + MMIO_D(GEN6_RPNSWREQ); + MMIO_D(GEN6_RC_VIDEO_FREQ); + MMIO_D(GEN6_RP_DOWN_TIMEOUT); + MMIO_D(GEN6_RP_INTERRUPT_LIMITS); + MMIO_D(GEN6_RPSTAT1); + MMIO_D(GEN6_RP_CONTROL); + MMIO_D(GEN6_RP_UP_THRESHOLD); + MMIO_D(GEN6_RP_DOWN_THRESHOLD); + MMIO_D(GEN6_RP_CUR_UP_EI); + MMIO_D(GEN6_RP_CUR_UP); + MMIO_D(GEN6_RP_PREV_UP); + MMIO_D(GEN6_RP_CUR_DOWN_EI); + MMIO_D(GEN6_RP_CUR_DOWN); + MMIO_D(GEN6_RP_PREV_DOWN); + MMIO_D(GEN6_RP_UP_EI); + MMIO_D(GEN6_RP_DOWN_EI); + MMIO_D(GEN6_RP_IDLE_HYSTERSIS); + MMIO_D(GEN6_RC1_WAKE_RATE_LIMIT); + MMIO_D(GEN6_RC6_WAKE_RATE_LIMIT); + MMIO_D(GEN6_RC6pp_WAKE_RATE_LIMIT); + MMIO_D(GEN6_RC_EVALUATION_INTERVAL); + MMIO_D(GEN6_RC_IDLE_HYSTERSIS); + MMIO_D(GEN6_RC_SLEEP); + MMIO_D(GEN6_RC1e_THRESHOLD); + MMIO_D(GEN6_RC6_THRESHOLD); + MMIO_D(GEN6_RC6p_THRESHOLD); + MMIO_D(GEN6_RC6pp_THRESHOLD); + MMIO_D(GEN6_PMINTRMSK); + + MMIO_D(RSTDBYCTL); + MMIO_D(GEN6_GDRST); + MMIO_F(FENCE_REG_GEN6_LO(0), 0x80); + MMIO_D(CPU_VGACNTRL); + MMIO_D(TILECTL); + MMIO_D(GEN6_UCGCTL1); + MMIO_D(GEN6_UCGCTL2); + MMIO_F(_MMIO(0x4f000), 0x90); + MMIO_D(GEN6_PCODE_DATA); + MMIO_D(_MMIO(0x13812c)); + MMIO_D(GEN7_ERR_INT); + MMIO_D(HSW_EDRAM_CAP); + MMIO_D(HSW_IDICR); + MMIO_D(GFX_FLSH_CNTL_GEN6); + MMIO_D(_MMIO(0x3c)); + MMIO_D(_MMIO(0x860)); + MMIO_D(ECOSKPD(RENDER_RING_BASE)); + MMIO_D(_MMIO(0x121d0)); + MMIO_D(ECOSKPD(BLT_RING_BASE)); + MMIO_D(_MMIO(0x41d0)); + MMIO_D(GAC_ECO_BITS); + MMIO_D(_MMIO(0x6200)); + MMIO_D(_MMIO(0x6204)); + MMIO_D(_MMIO(0x6208)); + MMIO_D(_MMIO(0x7118)); + MMIO_D(_MMIO(0x7180)); + MMIO_D(_MMIO(0x7408)); + MMIO_D(_MMIO(0x7c00)); + MMIO_D(GEN6_MBCTL); + MMIO_D(_MMIO(0x911c)); + MMIO_D(_MMIO(0x9120)); + MMIO_D(GEN7_UCGCTL4); + MMIO_D(GAB_CTL); + MMIO_D(_MMIO(0x48800)); + MMIO_D(_MMIO(0xce044)); + MMIO_D(_MMIO(0xe6500)); + MMIO_D(_MMIO(0xe6504)); + MMIO_D(_MMIO(0xe6600)); + MMIO_D(_MMIO(0xe6604)); + MMIO_D(_MMIO(0xe6700)); + MMIO_D(_MMIO(0xe6704)); + MMIO_D(_MMIO(0xe6800)); + MMIO_D(_MMIO(0xe6804)); + MMIO_D(PCH_GMBUS4); + MMIO_D(PCH_GMBUS5); + MMIO_D(_MMIO(0x902c)); + MMIO_D(_MMIO(0xec008)); + MMIO_D(_MMIO(0xec00c)); + MMIO_D(_MMIO(0xec008 + 0x18)); + MMIO_D(_MMIO(0xec00c + 0x18)); + MMIO_D(_MMIO(0xec008 + 0x18 * 2)); + MMIO_D(_MMIO(0xec00c + 0x18 * 2)); + MMIO_D(_MMIO(0xec008 + 0x18 * 3)); + MMIO_D(_MMIO(0xec00c + 0x18 * 3)); + MMIO_D(_MMIO(0xec408)); + MMIO_D(_MMIO(0xec40c)); + MMIO_D(_MMIO(0xec408 + 0x18)); + MMIO_D(_MMIO(0xec40c + 0x18)); + MMIO_D(_MMIO(0xec408 + 0x18 * 2)); + MMIO_D(_MMIO(0xec40c + 0x18 * 2)); + MMIO_D(_MMIO(0xec408 + 0x18 * 3)); + MMIO_D(_MMIO(0xec40c + 0x18 * 3)); + MMIO_D(_MMIO(0xfc810)); + MMIO_D(_MMIO(0xfc81c)); + MMIO_D(_MMIO(0xfc828)); + MMIO_D(_MMIO(0xfc834)); + MMIO_D(_MMIO(0xfcc00)); + MMIO_D(_MMIO(0xfcc0c)); + MMIO_D(_MMIO(0xfcc18)); + MMIO_D(_MMIO(0xfcc24)); + MMIO_D(_MMIO(0xfd000)); + MMIO_D(_MMIO(0xfd00c)); + MMIO_D(_MMIO(0xfd018)); + MMIO_D(_MMIO(0xfd024)); + MMIO_D(_MMIO(0xfd034)); + MMIO_D(FPGA_DBG); + MMIO_D(_MMIO(0x2054)); + MMIO_D(_MMIO(0x12054)); + MMIO_D(_MMIO(0x22054)); + MMIO_D(_MMIO(0x1a054)); + MMIO_D(_MMIO(0x44070)); + MMIO_D(_MMIO(0x2178)); + MMIO_D(_MMIO(0x217c)); + MMIO_D(_MMIO(0x12178)); + MMIO_D(_MMIO(0x1217c)); + MMIO_F(_MMIO(0x5200), 32); + MMIO_F(_MMIO(0x5240), 32); + MMIO_F(_MMIO(0x5280), 16); + MMIO_D(BCS_SWCTRL); + MMIO_F(HS_INVOCATION_COUNT, 8); + MMIO_F(DS_INVOCATION_COUNT, 8); + MMIO_F(IA_VERTICES_COUNT, 8); + MMIO_F(IA_PRIMITIVES_COUNT, 8); + MMIO_F(VS_INVOCATION_COUNT, 8); + MMIO_F(GS_INVOCATION_COUNT, 8); + MMIO_F(GS_PRIMITIVES_COUNT, 8); + MMIO_F(CL_INVOCATION_COUNT, 8); + MMIO_F(CL_PRIMITIVES_COUNT, 8); + MMIO_F(PS_INVOCATION_COUNT, 8); + MMIO_F(PS_DEPTH_COUNT, 8); + MMIO_D(ARB_MODE); + MMIO_RING_D(RING_BBADDR); + MMIO_D(_MMIO(0x2220)); + MMIO_D(_MMIO(0x12220)); + MMIO_D(_MMIO(0x22220)); + MMIO_RING_D(RING_SYNC_1); + MMIO_RING_D(RING_SYNC_0); + MMIO_D(GUC_STATUS); + + MMIO_F(_MMIO(MCHBAR_MIRROR_BASE_SNB), 0x40000); + MMIO_F(_MMIO(VGT_PVINFO_PAGE), VGT_PVINFO_SIZE); + MMIO_F(LGC_PALETTE(PIPE_A, 0), 1024); + MMIO_F(LGC_PALETTE(PIPE_B, 0), 1024); + MMIO_F(LGC_PALETTE(PIPE_C, 0), 1024); + + return 0; +} + +static int iterate_bdw_only_mmio(struct intel_gvt_mmio_table_iter *iter) +{ + MMIO_D(HSW_PWR_WELL_CTL1); + MMIO_D(HSW_PWR_WELL_CTL2); + MMIO_D(HSW_PWR_WELL_CTL3); + MMIO_D(HSW_PWR_WELL_CTL4); + MMIO_D(HSW_PWR_WELL_CTL5); + MMIO_D(HSW_PWR_WELL_CTL6); + + MMIO_D(WM_MISC); + MMIO_D(_MMIO(_SRD_CTL_EDP)); + + MMIO_D(_MMIO(0xb1f0)); + MMIO_D(_MMIO(0xb1c0)); + MMIO_D(_MMIO(0xb100)); + MMIO_D(_MMIO(0xb10c)); + MMIO_D(_MMIO(0xb110)); + MMIO_D(_MMIO(0x83a4)); + MMIO_D(_MMIO(0x8430)); + MMIO_D(_MMIO(0x2248)); + MMIO_D(FORCEWAKE_ACK_HSW); + + return 0; +} + +static int iterate_bdw_plus_mmio(struct intel_gvt_mmio_table_iter *iter) +{ + struct drm_i915_private *dev_priv = iter->i915; + + MMIO_D(GEN8_GT_IMR(0)); + MMIO_D(GEN8_GT_IER(0)); + MMIO_D(GEN8_GT_IIR(0)); + MMIO_D(GEN8_GT_ISR(0)); + MMIO_D(GEN8_GT_IMR(1)); + MMIO_D(GEN8_GT_IER(1)); + MMIO_D(GEN8_GT_IIR(1)); + MMIO_D(GEN8_GT_ISR(1)); + MMIO_D(GEN8_GT_IMR(2)); + MMIO_D(GEN8_GT_IER(2)); + MMIO_D(GEN8_GT_IIR(2)); + MMIO_D(GEN8_GT_ISR(2)); + MMIO_D(GEN8_GT_IMR(3)); + MMIO_D(GEN8_GT_IER(3)); + MMIO_D(GEN8_GT_IIR(3)); + MMIO_D(GEN8_GT_ISR(3)); + MMIO_D(GEN8_DE_PIPE_IMR(PIPE_A)); + MMIO_D(GEN8_DE_PIPE_IER(PIPE_A)); + MMIO_D(GEN8_DE_PIPE_IIR(PIPE_A)); + MMIO_D(GEN8_DE_PIPE_ISR(PIPE_A)); + MMIO_D(GEN8_DE_PIPE_IMR(PIPE_B)); + MMIO_D(GEN8_DE_PIPE_IER(PIPE_B)); + MMIO_D(GEN8_DE_PIPE_IIR(PIPE_B)); + MMIO_D(GEN8_DE_PIPE_ISR(PIPE_B)); + MMIO_D(GEN8_DE_PIPE_IMR(PIPE_C)); + MMIO_D(GEN8_DE_PIPE_IER(PIPE_C)); + MMIO_D(GEN8_DE_PIPE_IIR(PIPE_C)); + MMIO_D(GEN8_DE_PIPE_ISR(PIPE_C)); + MMIO_D(GEN8_DE_PORT_IMR); + MMIO_D(GEN8_DE_PORT_IER); + MMIO_D(GEN8_DE_PORT_IIR); + MMIO_D(GEN8_DE_PORT_ISR); + MMIO_D(GEN8_DE_MISC_IMR); + MMIO_D(GEN8_DE_MISC_IER); + MMIO_D(GEN8_DE_MISC_IIR); + MMIO_D(GEN8_DE_MISC_ISR); + MMIO_D(GEN8_PCU_IMR); + MMIO_D(GEN8_PCU_IER); + MMIO_D(GEN8_PCU_IIR); + MMIO_D(GEN8_PCU_ISR); + MMIO_D(GEN8_MASTER_IRQ); + MMIO_RING_D(RING_ACTHD_UDW); + +#define RING_REG(base) _MMIO((base) + 0xd0) + MMIO_RING_D(RING_REG); +#undef RING_REG + +#define RING_REG(base) _MMIO((base) + 0x230) + MMIO_RING_D(RING_REG); +#undef RING_REG + +#define RING_REG(base) _MMIO((base) + 0x234) + MMIO_RING_F(RING_REG, 8); +#undef RING_REG + +#define RING_REG(base) _MMIO((base) + 0x244) + MMIO_RING_D(RING_REG); +#undef RING_REG + +#define RING_REG(base) _MMIO((base) + 0x370) + MMIO_RING_F(RING_REG, 48); +#undef RING_REG + +#define RING_REG(base) _MMIO((base) + 0x3a0) + MMIO_RING_D(RING_REG); +#undef RING_REG + + MMIO_D(PIPEMISC(PIPE_A)); + MMIO_D(PIPEMISC(PIPE_B)); + MMIO_D(PIPEMISC(PIPE_C)); + MMIO_D(_MMIO(0x1c1d0)); + MMIO_D(GEN6_MBCUNIT_SNPCR); + MMIO_D(GEN7_MISCCPCTL); + MMIO_D(_MMIO(0x1c054)); + MMIO_D(GEN6_PCODE_MAILBOX); + if (!IS_BROXTON(dev_priv)) + MMIO_D(GEN8_PRIVATE_PAT_LO); + MMIO_D(GEN8_PRIVATE_PAT_HI); + MMIO_D(GAMTARBMODE); + +#define RING_REG(base) _MMIO((base) + 0x270) + MMIO_RING_F(RING_REG, 32); +#undef RING_REG + + MMIO_RING_D(RING_HWS_PGA); + MMIO_D(HDC_CHICKEN0); + MMIO_D(CHICKEN_PIPESL_1(PIPE_A)); + MMIO_D(CHICKEN_PIPESL_1(PIPE_B)); + MMIO_D(CHICKEN_PIPESL_1(PIPE_C)); + MMIO_D(_MMIO(0x6671c)); + MMIO_D(_MMIO(0x66c00)); + MMIO_D(_MMIO(0x66c04)); + MMIO_D(HSW_GTT_CACHE_EN); + MMIO_D(GEN8_EU_DISABLE0); + MMIO_D(GEN8_EU_DISABLE1); + MMIO_D(GEN8_EU_DISABLE2); + MMIO_D(_MMIO(0xfdc)); + MMIO_D(GEN8_ROW_CHICKEN); + MMIO_D(GEN7_ROW_CHICKEN2); + MMIO_D(GEN8_UCGCTL6); + MMIO_D(GEN8_L3SQCREG4); + MMIO_D(GEN9_SCRATCH_LNCF1); + MMIO_F(_MMIO(0x24d0), 48); + MMIO_D(_MMIO(0x44484)); + MMIO_D(_MMIO(0x4448c)); + MMIO_D(GEN8_L3_LRA_1_GPGPU); + MMIO_D(_MMIO(0x110000)); + MMIO_D(_MMIO(0x48400)); + MMIO_D(_MMIO(0x6e570)); + MMIO_D(_MMIO(0x65f10)); + MMIO_D(_MMIO(0xe194)); + MMIO_D(_MMIO(0xe188)); + MMIO_D(HALF_SLICE_CHICKEN2); + MMIO_D(_MMIO(0x2580)); + MMIO_D(_MMIO(0xe220)); + MMIO_D(_MMIO(0xe230)); + MMIO_D(_MMIO(0xe240)); + MMIO_D(_MMIO(0xe260)); + MMIO_D(_MMIO(0xe270)); + MMIO_D(_MMIO(0xe280)); + MMIO_D(_MMIO(0xe2a0)); + MMIO_D(_MMIO(0xe2b0)); + MMIO_D(_MMIO(0xe2c0)); + MMIO_D(_MMIO(0x21f0)); + MMIO_D(GEN8_GAMW_ECO_DEV_RW_IA); + MMIO_D(_MMIO(0x215c)); + MMIO_F(_MMIO(0x2290), 8); + MMIO_D(_MMIO(0x2b00)); + MMIO_D(_MMIO(0x2360)); + MMIO_D(_MMIO(0x1c17c)); + MMIO_D(_MMIO(0x1c178)); + MMIO_D(_MMIO(0x4260)); + MMIO_D(_MMIO(0x4264)); + MMIO_D(_MMIO(0x4268)); + MMIO_D(_MMIO(0x426c)); + MMIO_D(_MMIO(0x4270)); + MMIO_D(_MMIO(0x4094)); + MMIO_D(_MMIO(0x22178)); + MMIO_D(_MMIO(0x1a178)); + MMIO_D(_MMIO(0x1a17c)); + MMIO_D(_MMIO(0x2217c)); + MMIO_D(EDP_PSR_IMR); + MMIO_D(EDP_PSR_IIR); + MMIO_D(_MMIO(0xe4cc)); + MMIO_D(GEN7_SC_INSTDONE); + + return 0; +} + +static int iterate_pre_skl_mmio(struct intel_gvt_mmio_table_iter *iter) +{ + MMIO_D(FORCEWAKE_MT); + + MMIO_D(PCH_ADPA); + MMIO_F(_MMIO(_PCH_DPB_AUX_CH_CTL), 6 * 4); + MMIO_F(_MMIO(_PCH_DPC_AUX_CH_CTL), 6 * 4); + MMIO_F(_MMIO(_PCH_DPD_AUX_CH_CTL), 6 * 4); + + MMIO_F(_MMIO(0x70440), 0xc); + MMIO_F(_MMIO(0x71440), 0xc); + MMIO_F(_MMIO(0x72440), 0xc); + MMIO_F(_MMIO(0x7044c), 0xc); + MMIO_F(_MMIO(0x7144c), 0xc); + MMIO_F(_MMIO(0x7244c), 0xc); + + return 0; +} + +static int iterate_skl_plus_mmio(struct intel_gvt_mmio_table_iter *iter) +{ + struct drm_i915_private *dev_priv = iter->i915; + + MMIO_D(FORCEWAKE_RENDER_GEN9); + MMIO_D(FORCEWAKE_ACK_RENDER_GEN9); + MMIO_D(FORCEWAKE_GT_GEN9); + MMIO_D(FORCEWAKE_ACK_GT_GEN9); + MMIO_D(FORCEWAKE_MEDIA_GEN9); + MMIO_D(FORCEWAKE_ACK_MEDIA_GEN9); + MMIO_F(DP_AUX_CH_CTL(AUX_CH_B), 6 * 4); + MMIO_F(DP_AUX_CH_CTL(AUX_CH_C), 6 * 4); + MMIO_F(DP_AUX_CH_CTL(AUX_CH_D), 6 * 4); + MMIO_D(HSW_PWR_WELL_CTL1); + MMIO_D(HSW_PWR_WELL_CTL2); + MMIO_D(DBUF_CTL_S(0)); + MMIO_D(GEN9_PG_ENABLE); + MMIO_D(GEN9_MEDIA_PG_IDLE_HYSTERESIS); + MMIO_D(GEN9_RENDER_PG_IDLE_HYSTERESIS); + MMIO_D(GEN9_GAMT_ECO_REG_RW_IA); + MMIO_D(MMCD_MISC_CTRL); + MMIO_D(CHICKEN_PAR1_1); + MMIO_D(DC_STATE_EN); + MMIO_D(DC_STATE_DEBUG); + MMIO_D(CDCLK_CTL); + MMIO_D(LCPLL1_CTL); + MMIO_D(LCPLL2_CTL); + MMIO_D(_MMIO(_DPLL1_CFGCR1)); + MMIO_D(_MMIO(_DPLL2_CFGCR1)); + MMIO_D(_MMIO(_DPLL3_CFGCR1)); + MMIO_D(_MMIO(_DPLL1_CFGCR2)); + MMIO_D(_MMIO(_DPLL2_CFGCR2)); + MMIO_D(_MMIO(_DPLL3_CFGCR2)); + MMIO_D(DPLL_CTRL1); + MMIO_D(DPLL_CTRL2); + MMIO_D(DPLL_STATUS); + MMIO_D(SKL_PS_WIN_POS(PIPE_A, 0)); + MMIO_D(SKL_PS_WIN_POS(PIPE_A, 1)); + MMIO_D(SKL_PS_WIN_POS(PIPE_B, 0)); + MMIO_D(SKL_PS_WIN_POS(PIPE_B, 1)); + MMIO_D(SKL_PS_WIN_POS(PIPE_C, 0)); + MMIO_D(SKL_PS_WIN_POS(PIPE_C, 1)); + MMIO_D(SKL_PS_WIN_SZ(PIPE_A, 0)); + MMIO_D(SKL_PS_WIN_SZ(PIPE_A, 1)); + MMIO_D(SKL_PS_WIN_SZ(PIPE_B, 0)); + MMIO_D(SKL_PS_WIN_SZ(PIPE_B, 1)); + MMIO_D(SKL_PS_WIN_SZ(PIPE_C, 0)); + MMIO_D(SKL_PS_WIN_SZ(PIPE_C, 1)); + MMIO_D(SKL_PS_CTRL(PIPE_A, 0)); + MMIO_D(SKL_PS_CTRL(PIPE_A, 1)); + MMIO_D(SKL_PS_CTRL(PIPE_B, 0)); + MMIO_D(SKL_PS_CTRL(PIPE_B, 1)); + MMIO_D(SKL_PS_CTRL(PIPE_C, 0)); + MMIO_D(SKL_PS_CTRL(PIPE_C, 1)); + MMIO_D(PLANE_BUF_CFG(PIPE_A, 0)); + MMIO_D(PLANE_BUF_CFG(PIPE_A, 1)); + MMIO_D(PLANE_BUF_CFG(PIPE_A, 2)); + MMIO_D(PLANE_BUF_CFG(PIPE_A, 3)); + MMIO_D(PLANE_BUF_CFG(PIPE_B, 0)); + MMIO_D(PLANE_BUF_CFG(PIPE_B, 1)); + MMIO_D(PLANE_BUF_CFG(PIPE_B, 2)); + MMIO_D(PLANE_BUF_CFG(PIPE_B, 3)); + MMIO_D(PLANE_BUF_CFG(PIPE_C, 0)); + MMIO_D(PLANE_BUF_CFG(PIPE_C, 1)); + MMIO_D(PLANE_BUF_CFG(PIPE_C, 2)); + MMIO_D(PLANE_BUF_CFG(PIPE_C, 3)); + MMIO_D(CUR_BUF_CFG(PIPE_A)); + MMIO_D(CUR_BUF_CFG(PIPE_B)); + MMIO_D(CUR_BUF_CFG(PIPE_C)); + MMIO_F(PLANE_WM(PIPE_A, 0, 0), 4 * 8); + MMIO_F(PLANE_WM(PIPE_A, 1, 0), 4 * 8); + MMIO_F(PLANE_WM(PIPE_A, 2, 0), 4 * 8); + MMIO_F(PLANE_WM(PIPE_B, 0, 0), 4 * 8); + MMIO_F(PLANE_WM(PIPE_B, 1, 0), 4 * 8); + MMIO_F(PLANE_WM(PIPE_B, 2, 0), 4 * 8); + MMIO_F(PLANE_WM(PIPE_C, 0, 0), 4 * 8); + MMIO_F(PLANE_WM(PIPE_C, 1, 0), 4 * 8); + MMIO_F(PLANE_WM(PIPE_C, 2, 0), 4 * 8); + MMIO_F(CUR_WM(PIPE_A, 0), 4 * 8); + MMIO_F(CUR_WM(PIPE_B, 0), 4 * 8); + MMIO_F(CUR_WM(PIPE_C, 0), 4 * 8); + MMIO_D(PLANE_WM_TRANS(PIPE_A, 0)); + MMIO_D(PLANE_WM_TRANS(PIPE_A, 1)); + MMIO_D(PLANE_WM_TRANS(PIPE_A, 2)); + MMIO_D(PLANE_WM_TRANS(PIPE_B, 0)); + MMIO_D(PLANE_WM_TRANS(PIPE_B, 1)); + MMIO_D(PLANE_WM_TRANS(PIPE_B, 2)); + MMIO_D(PLANE_WM_TRANS(PIPE_C, 0)); + MMIO_D(PLANE_WM_TRANS(PIPE_C, 1)); + MMIO_D(PLANE_WM_TRANS(PIPE_C, 2)); + MMIO_D(CUR_WM_TRANS(PIPE_A)); + MMIO_D(CUR_WM_TRANS(PIPE_B)); + MMIO_D(CUR_WM_TRANS(PIPE_C)); + MMIO_D(PLANE_NV12_BUF_CFG(PIPE_A, 0)); + MMIO_D(PLANE_NV12_BUF_CFG(PIPE_A, 1)); + MMIO_D(PLANE_NV12_BUF_CFG(PIPE_A, 2)); + MMIO_D(PLANE_NV12_BUF_CFG(PIPE_A, 3)); + MMIO_D(PLANE_NV12_BUF_CFG(PIPE_B, 0)); + MMIO_D(PLANE_NV12_BUF_CFG(PIPE_B, 1)); + MMIO_D(PLANE_NV12_BUF_CFG(PIPE_B, 2)); + MMIO_D(PLANE_NV12_BUF_CFG(PIPE_B, 3)); + MMIO_D(PLANE_NV12_BUF_CFG(PIPE_C, 0)); + MMIO_D(PLANE_NV12_BUF_CFG(PIPE_C, 1)); + MMIO_D(PLANE_NV12_BUF_CFG(PIPE_C, 2)); + MMIO_D(PLANE_NV12_BUF_CFG(PIPE_C, 3)); + MMIO_D(_MMIO(_REG_701C0(PIPE_A, 1))); + MMIO_D(_MMIO(_REG_701C0(PIPE_A, 2))); + MMIO_D(_MMIO(_REG_701C0(PIPE_A, 3))); + MMIO_D(_MMIO(_REG_701C0(PIPE_A, 4))); + MMIO_D(_MMIO(_REG_701C0(PIPE_B, 1))); + MMIO_D(_MMIO(_REG_701C0(PIPE_B, 2))); + MMIO_D(_MMIO(_REG_701C0(PIPE_B, 3))); + MMIO_D(_MMIO(_REG_701C0(PIPE_B, 4))); + MMIO_D(_MMIO(_REG_701C0(PIPE_C, 1))); + MMIO_D(_MMIO(_REG_701C0(PIPE_C, 2))); + MMIO_D(_MMIO(_REG_701C0(PIPE_C, 3))); + MMIO_D(_MMIO(_REG_701C0(PIPE_C, 4))); + MMIO_D(_MMIO(_REG_701C4(PIPE_A, 1))); + MMIO_D(_MMIO(_REG_701C4(PIPE_A, 2))); + MMIO_D(_MMIO(_REG_701C4(PIPE_A, 3))); + MMIO_D(_MMIO(_REG_701C4(PIPE_A, 4))); + MMIO_D(_MMIO(_REG_701C4(PIPE_B, 1))); + MMIO_D(_MMIO(_REG_701C4(PIPE_B, 2))); + MMIO_D(_MMIO(_REG_701C4(PIPE_B, 3))); + MMIO_D(_MMIO(_REG_701C4(PIPE_B, 4))); + MMIO_D(_MMIO(_REG_701C4(PIPE_C, 1))); + MMIO_D(_MMIO(_REG_701C4(PIPE_C, 2))); + MMIO_D(_MMIO(_REG_701C4(PIPE_C, 3))); + MMIO_D(_MMIO(_REG_701C4(PIPE_C, 4))); + MMIO_D(_MMIO(_PLANE_CTL_3_A)); + MMIO_D(_MMIO(_PLANE_CTL_3_B)); + MMIO_D(_MMIO(0x72380)); + MMIO_D(_MMIO(0x7239c)); + MMIO_D(_MMIO(_PLANE_SURF_3_A)); + MMIO_D(_MMIO(_PLANE_SURF_3_B)); + MMIO_D(DMC_SSP_BASE); + MMIO_D(DMC_HTP_SKL); + MMIO_D(DMC_LAST_WRITE); + MMIO_D(BDW_SCRATCH1); + MMIO_D(SKL_DFSM); + MMIO_D(DISPIO_CR_TX_BMU_CR0); + MMIO_F(GEN9_GFX_MOCS(0), 0x7f8); + MMIO_F(GEN7_L3CNTLREG2, 0x80); + MMIO_D(RPM_CONFIG0); + MMIO_D(_MMIO(0xd08)); + MMIO_D(RC6_LOCATION); + MMIO_D(GEN7_FF_SLICE_CS_CHICKEN1); + MMIO_D(GEN9_CS_DEBUG_MODE1); + /* TRTT */ + MMIO_D(TRVATTL3PTRDW(0)); + MMIO_D(TRVATTL3PTRDW(1)); + MMIO_D(TRVATTL3PTRDW(2)); + MMIO_D(TRVATTL3PTRDW(3)); + MMIO_D(TRVADR); + MMIO_D(TRTTE); + MMIO_D(_MMIO(0x4dfc)); + MMIO_D(_MMIO(0x46430)); + MMIO_D(_MMIO(0x46520)); + MMIO_D(_MMIO(0xc403c)); + MMIO_D(GEN8_GARBCNTL); + MMIO_D(DMA_CTRL); + MMIO_D(_MMIO(0x65900)); + MMIO_D(GEN6_STOLEN_RESERVED); + MMIO_D(_MMIO(0x4068)); + MMIO_D(_MMIO(0x67054)); + MMIO_D(_MMIO(0x6e560)); + MMIO_D(_MMIO(0x6e554)); + MMIO_D(_MMIO(0x2b20)); + MMIO_D(_MMIO(0x65f00)); + MMIO_D(_MMIO(0x65f08)); + MMIO_D(_MMIO(0x320f0)); + MMIO_D(_MMIO(0x70034)); + MMIO_D(_MMIO(0x71034)); + MMIO_D(_MMIO(0x72034)); + MMIO_D(_MMIO(_PLANE_KEYVAL_1(PIPE_A))); + MMIO_D(_MMIO(_PLANE_KEYVAL_1(PIPE_B))); + MMIO_D(_MMIO(_PLANE_KEYVAL_1(PIPE_C))); + MMIO_D(_MMIO(_PLANE_KEYMAX_1(PIPE_A))); + MMIO_D(_MMIO(_PLANE_KEYMAX_1(PIPE_B))); + MMIO_D(_MMIO(_PLANE_KEYMAX_1(PIPE_C))); + MMIO_D(_MMIO(_PLANE_KEYMSK_1(PIPE_A))); + MMIO_D(_MMIO(_PLANE_KEYMSK_1(PIPE_B))); + MMIO_D(_MMIO(_PLANE_KEYMSK_1(PIPE_C))); + MMIO_D(_MMIO(0x44500)); +#define CSFE_CHICKEN1_REG(base) _MMIO((base) + 0xD4) + MMIO_RING_D(CSFE_CHICKEN1_REG); +#undef CSFE_CHICKEN1_REG + MMIO_D(GEN8_HDC_CHICKEN1); + MMIO_D(GEN9_WM_CHICKEN3); + + if (IS_KABYLAKE(dev_priv) || IS_COFFEELAKE(dev_priv)) + MMIO_D(GAMT_CHKN_BIT_REG); + if (!IS_BROXTON(dev_priv)) + MMIO_D(GEN9_CTX_PREEMPT_REG); + MMIO_F(_MMIO(DMC_MMIO_START_RANGE), 0x3000); + return 0; +} + +static int iterate_bxt_mmio(struct intel_gvt_mmio_table_iter *iter) +{ + struct drm_i915_private *dev_priv = iter->i915; + + MMIO_F(_MMIO(0x80000), 0x3000); + MMIO_D(GEN7_SAMPLER_INSTDONE); + MMIO_D(GEN7_ROW_INSTDONE); + MMIO_D(GEN8_FAULT_TLB_DATA0); + MMIO_D(GEN8_FAULT_TLB_DATA1); + MMIO_D(ERROR_GEN6); + MMIO_D(DONE_REG); + MMIO_D(EIR); + MMIO_D(PGTBL_ER); + MMIO_D(_MMIO(0x4194)); + MMIO_D(_MMIO(0x4294)); + MMIO_D(_MMIO(0x4494)); + MMIO_RING_D(RING_PSMI_CTL); + MMIO_RING_D(RING_DMA_FADD); + MMIO_RING_D(RING_DMA_FADD_UDW); + MMIO_RING_D(RING_IPEHR); + MMIO_RING_D(RING_INSTPS); + MMIO_RING_D(RING_BBADDR_UDW); + MMIO_RING_D(RING_BBSTATE); + MMIO_RING_D(RING_IPEIR); + MMIO_F(SOFT_SCRATCH(0), 16 * 4); + MMIO_D(BXT_P_CR_GT_DISP_PWRON); + MMIO_D(BXT_RP_STATE_CAP); + MMIO_D(BXT_PHY_CTL_FAMILY(DPIO_PHY0)); + MMIO_D(BXT_PHY_CTL_FAMILY(DPIO_PHY1)); + MMIO_D(BXT_PHY_CTL(PORT_A)); + MMIO_D(BXT_PHY_CTL(PORT_B)); + MMIO_D(BXT_PHY_CTL(PORT_C)); + MMIO_D(BXT_PORT_PLL_ENABLE(PORT_A)); + MMIO_D(BXT_PORT_PLL_ENABLE(PORT_B)); + MMIO_D(BXT_PORT_PLL_ENABLE(PORT_C)); + MMIO_D(BXT_PORT_CL1CM_DW0(DPIO_PHY0)); + MMIO_D(BXT_PORT_CL1CM_DW9(DPIO_PHY0)); + MMIO_D(BXT_PORT_CL1CM_DW10(DPIO_PHY0)); + MMIO_D(BXT_PORT_CL1CM_DW28(DPIO_PHY0)); + MMIO_D(BXT_PORT_CL1CM_DW30(DPIO_PHY0)); + MMIO_D(BXT_PORT_CL2CM_DW6(DPIO_PHY0)); + MMIO_D(BXT_PORT_REF_DW3(DPIO_PHY0)); + MMIO_D(BXT_PORT_REF_DW6(DPIO_PHY0)); + MMIO_D(BXT_PORT_REF_DW8(DPIO_PHY0)); + MMIO_D(BXT_PORT_CL1CM_DW0(DPIO_PHY1)); + MMIO_D(BXT_PORT_CL1CM_DW9(DPIO_PHY1)); + MMIO_D(BXT_PORT_CL1CM_DW10(DPIO_PHY1)); + MMIO_D(BXT_PORT_CL1CM_DW28(DPIO_PHY1)); + MMIO_D(BXT_PORT_CL1CM_DW30(DPIO_PHY1)); + MMIO_D(BXT_PORT_CL2CM_DW6(DPIO_PHY1)); + MMIO_D(BXT_PORT_REF_DW3(DPIO_PHY1)); + MMIO_D(BXT_PORT_REF_DW6(DPIO_PHY1)); + MMIO_D(BXT_PORT_REF_DW8(DPIO_PHY1)); + MMIO_D(BXT_PORT_PLL_EBB_0(DPIO_PHY0, DPIO_CH0)); + MMIO_D(BXT_PORT_PLL_EBB_4(DPIO_PHY0, DPIO_CH0)); + MMIO_D(BXT_PORT_PCS_DW10_LN01(DPIO_PHY0, DPIO_CH0)); + MMIO_D(BXT_PORT_PCS_DW10_GRP(DPIO_PHY0, DPIO_CH0)); + MMIO_D(BXT_PORT_PCS_DW12_LN01(DPIO_PHY0, DPIO_CH0)); + MMIO_D(BXT_PORT_PCS_DW12_LN23(DPIO_PHY0, DPIO_CH0)); + MMIO_D(BXT_PORT_PCS_DW12_GRP(DPIO_PHY0, DPIO_CH0)); + MMIO_D(BXT_PORT_TX_DW2_LN0(DPIO_PHY0, DPIO_CH0)); + MMIO_D(BXT_PORT_TX_DW2_GRP(DPIO_PHY0, DPIO_CH0)); + MMIO_D(BXT_PORT_TX_DW3_LN0(DPIO_PHY0, DPIO_CH0)); + MMIO_D(BXT_PORT_TX_DW3_GRP(DPIO_PHY0, DPIO_CH0)); + MMIO_D(BXT_PORT_TX_DW4_LN0(DPIO_PHY0, DPIO_CH0)); + MMIO_D(BXT_PORT_TX_DW4_GRP(DPIO_PHY0, DPIO_CH0)); + MMIO_D(BXT_PORT_TX_DW14_LN(DPIO_PHY0, DPIO_CH0, 0)); + MMIO_D(BXT_PORT_TX_DW14_LN(DPIO_PHY0, DPIO_CH0, 1)); + MMIO_D(BXT_PORT_TX_DW14_LN(DPIO_PHY0, DPIO_CH0, 2)); + MMIO_D(BXT_PORT_TX_DW14_LN(DPIO_PHY0, DPIO_CH0, 3)); + MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH0, 0)); + MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH0, 1)); + MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH0, 2)); + MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH0, 3)); + MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH0, 6)); + MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH0, 8)); + MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH0, 9)); + MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH0, 10)); + MMIO_D(BXT_PORT_PLL_EBB_0(DPIO_PHY0, DPIO_CH1)); + MMIO_D(BXT_PORT_PLL_EBB_4(DPIO_PHY0, DPIO_CH1)); + MMIO_D(BXT_PORT_PCS_DW10_LN01(DPIO_PHY0, DPIO_CH1)); + MMIO_D(BXT_PORT_PCS_DW10_GRP(DPIO_PHY0, DPIO_CH1)); + MMIO_D(BXT_PORT_PCS_DW12_LN01(DPIO_PHY0, DPIO_CH1)); + MMIO_D(BXT_PORT_PCS_DW12_LN23(DPIO_PHY0, DPIO_CH1)); + MMIO_D(BXT_PORT_PCS_DW12_GRP(DPIO_PHY0, DPIO_CH1)); + MMIO_D(BXT_PORT_TX_DW2_LN0(DPIO_PHY0, DPIO_CH1)); + MMIO_D(BXT_PORT_TX_DW2_GRP(DPIO_PHY0, DPIO_CH1)); + MMIO_D(BXT_PORT_TX_DW3_LN0(DPIO_PHY0, DPIO_CH1)); + MMIO_D(BXT_PORT_TX_DW3_GRP(DPIO_PHY0, DPIO_CH1)); + MMIO_D(BXT_PORT_TX_DW4_LN0(DPIO_PHY0, DPIO_CH1)); + MMIO_D(BXT_PORT_TX_DW4_GRP(DPIO_PHY0, DPIO_CH1)); + MMIO_D(BXT_PORT_TX_DW14_LN(DPIO_PHY0, DPIO_CH1, 0)); + MMIO_D(BXT_PORT_TX_DW14_LN(DPIO_PHY0, DPIO_CH1, 1)); + MMIO_D(BXT_PORT_TX_DW14_LN(DPIO_PHY0, DPIO_CH1, 2)); + MMIO_D(BXT_PORT_TX_DW14_LN(DPIO_PHY0, DPIO_CH1, 3)); + MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH1, 0)); + MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH1, 1)); + MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH1, 2)); + MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH1, 3)); + MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH1, 6)); + MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH1, 8)); + MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH1, 9)); + MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH1, 10)); + MMIO_D(BXT_PORT_PLL_EBB_0(DPIO_PHY1, DPIO_CH0)); + MMIO_D(BXT_PORT_PLL_EBB_4(DPIO_PHY1, DPIO_CH0)); + MMIO_D(BXT_PORT_PCS_DW10_LN01(DPIO_PHY1, DPIO_CH0)); + MMIO_D(BXT_PORT_PCS_DW10_GRP(DPIO_PHY1, DPIO_CH0)); + MMIO_D(BXT_PORT_PCS_DW12_LN01(DPIO_PHY1, DPIO_CH0)); + MMIO_D(BXT_PORT_PCS_DW12_LN23(DPIO_PHY1, DPIO_CH0)); + MMIO_D(BXT_PORT_PCS_DW12_GRP(DPIO_PHY1, DPIO_CH0)); + MMIO_D(BXT_PORT_TX_DW2_LN0(DPIO_PHY1, DPIO_CH0)); + MMIO_D(BXT_PORT_TX_DW2_GRP(DPIO_PHY1, DPIO_CH0)); + MMIO_D(BXT_PORT_TX_DW3_LN0(DPIO_PHY1, DPIO_CH0)); + MMIO_D(BXT_PORT_TX_DW3_GRP(DPIO_PHY1, DPIO_CH0)); + MMIO_D(BXT_PORT_TX_DW4_LN0(DPIO_PHY1, DPIO_CH0)); + MMIO_D(BXT_PORT_TX_DW4_GRP(DPIO_PHY1, DPIO_CH0)); + MMIO_D(BXT_PORT_TX_DW14_LN(DPIO_PHY1, DPIO_CH0, 0)); + MMIO_D(BXT_PORT_TX_DW14_LN(DPIO_PHY1, DPIO_CH0, 1)); + MMIO_D(BXT_PORT_TX_DW14_LN(DPIO_PHY1, DPIO_CH0, 2)); + MMIO_D(BXT_PORT_TX_DW14_LN(DPIO_PHY1, DPIO_CH0, 3)); + MMIO_D(BXT_PORT_PLL(DPIO_PHY1, DPIO_CH0, 0)); + MMIO_D(BXT_PORT_PLL(DPIO_PHY1, DPIO_CH0, 1)); + MMIO_D(BXT_PORT_PLL(DPIO_PHY1, DPIO_CH0, 2)); + MMIO_D(BXT_PORT_PLL(DPIO_PHY1, DPIO_CH0, 3)); + MMIO_D(BXT_PORT_PLL(DPIO_PHY1, DPIO_CH0, 6)); + MMIO_D(BXT_PORT_PLL(DPIO_PHY1, DPIO_CH0, 8)); + MMIO_D(BXT_PORT_PLL(DPIO_PHY1, DPIO_CH0, 9)); + MMIO_D(BXT_PORT_PLL(DPIO_PHY1, DPIO_CH0, 10)); + MMIO_D(BXT_DE_PLL_CTL); + MMIO_D(BXT_DE_PLL_ENABLE); + MMIO_D(BXT_DSI_PLL_CTL); + MMIO_D(BXT_DSI_PLL_ENABLE); + MMIO_D(GEN9_CLKGATE_DIS_0); + MMIO_D(GEN9_CLKGATE_DIS_4); + MMIO_D(HSW_TVIDEO_DIP_GCP(TRANSCODER_A)); + MMIO_D(HSW_TVIDEO_DIP_GCP(TRANSCODER_B)); + MMIO_D(HSW_TVIDEO_DIP_GCP(TRANSCODER_C)); + MMIO_D(RC6_CTX_BASE); + MMIO_D(GEN8_PUSHBUS_CONTROL); + MMIO_D(GEN8_PUSHBUS_ENABLE); + MMIO_D(GEN8_PUSHBUS_SHIFT); + MMIO_D(GEN6_GFXPAUSE); + MMIO_D(GEN8_L3SQCREG1); + MMIO_D(GEN8_L3CNTLREG); + MMIO_D(_MMIO(0x20D8)); + MMIO_F(GEN8_RING_CS_GPR(RENDER_RING_BASE, 0), 0x40); + MMIO_F(GEN8_RING_CS_GPR(GEN6_BSD_RING_BASE, 0), 0x40); + MMIO_F(GEN8_RING_CS_GPR(BLT_RING_BASE, 0), 0x40); + MMIO_F(GEN8_RING_CS_GPR(VEBOX_RING_BASE, 0), 0x40); + MMIO_D(GEN9_CTX_PREEMPT_REG); + MMIO_D(GEN8_PRIVATE_PAT_LO); + + return 0; +} + +/** + * intel_gvt_iterate_mmio_table - Iterate the GVT MMIO table + * @iter: the interator + * + * This function is called for iterating the GVT MMIO table when i915 is + * taking the snapshot of the HW and GVT is building MMIO tracking table. + */ +int intel_gvt_iterate_mmio_table(struct intel_gvt_mmio_table_iter *iter) +{ + struct drm_i915_private *i915 = iter->i915; + int ret; + + ret = iterate_generic_mmio(iter); + if (ret) + goto err; + + if (IS_BROADWELL(i915)) { + ret = iterate_bdw_only_mmio(iter); + if (ret) + goto err; + ret = iterate_bdw_plus_mmio(iter); + if (ret) + goto err; + ret = iterate_pre_skl_mmio(iter); + if (ret) + goto err; + } else if (IS_SKYLAKE(i915) || + IS_KABYLAKE(i915) || + IS_COFFEELAKE(i915) || + IS_COMETLAKE(i915)) { + ret = iterate_bdw_plus_mmio(iter); + if (ret) + goto err; + ret = iterate_skl_plus_mmio(iter); + if (ret) + goto err; + } else if (IS_BROXTON(i915)) { + ret = iterate_bdw_plus_mmio(iter); + if (ret) + goto err; + ret = iterate_skl_plus_mmio(iter); + if (ret) + goto err; + ret = iterate_bxt_mmio(iter); + if (ret) + goto err; + } + + return 0; +err: + return ret; +} From 66e7a8063381cb2f568cd3436df2f0ec239a84f9 Mon Sep 17 00:00:00 2001 From: Zhi Wang Date: Thu, 7 Apr 2022 03:19:44 -0400 Subject: [PATCH 02/41] i915/gvt: Save the initial HW state snapshot in i915 Save the initial HW state snapshot in i915 so that the rest code of GVT-g can be moved into a dedicated module while it can still get a clean initial HW state saved at the correct time during the initialization of i915. The futhrer vGPU created by GVT-g will use this HW state as the initial HW state. v6: - Remove the reference of intel_gvt_device_info.(Christoph) - Refine the save_mmio() function. (Christoph) Cc: Christoph Hellwig Cc: Jason Gunthorpe Cc: Jani Nikula Cc: Joonas Lahtinen Cc: Vivi Rodrigo Cc: Zhenyu Wang Cc: Zhi Wang Signed-off-by: Zhi Wang Reviewed-by: Christoph Hellwig Tested-by: Christoph Hellwig Reviewed-by: Zhenyu Wang Acked-by: Jani Nikula Link: http://patchwork.freedesktop.org/patch/msgid/20220407071945.72148-3-zhi.a.wang@intel.com --- drivers/gpu/drm/i915/i915_drv.h | 2 + drivers/gpu/drm/i915/intel_gvt.c | 92 +++++++++++++++++++++++++++++++- 2 files changed, 92 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index fa14da84362e..0830020c11c0 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -432,6 +432,8 @@ struct i915_virtual_gpu { struct mutex lock; /* serialises sending of g2v_notify command pkts */ bool active; u32 caps; + u32 *initial_mmio; + u8 *initial_cfg_space; }; struct i915_selftest_stash { diff --git a/drivers/gpu/drm/i915/intel_gvt.c b/drivers/gpu/drm/i915/intel_gvt.c index cf6e98962d82..65daab2c4d9e 100644 --- a/drivers/gpu/drm/i915/intel_gvt.c +++ b/drivers/gpu/drm/i915/intel_gvt.c @@ -86,6 +86,85 @@ void intel_gvt_sanitize_options(struct drm_i915_private *dev_priv) dev_priv->params.enable_gvt = 0; } +static void free_initial_hw_state(struct drm_i915_private *dev_priv) +{ + struct i915_virtual_gpu *vgpu = &dev_priv->vgpu; + + vfree(vgpu->initial_mmio); + vgpu->initial_mmio = NULL; + + kfree(vgpu->initial_cfg_space); + vgpu->initial_cfg_space = NULL; +} + +static void save_mmio(struct intel_gvt_mmio_table_iter *iter, u32 offset, + u32 size) +{ + struct drm_i915_private *dev_priv = iter->i915; + u32 *mmio, i; + + for (i = offset; i < offset + size; i += 4) { + mmio = iter->data + i; + *mmio = intel_uncore_read_notrace(to_gt(dev_priv)->uncore, + _MMIO(i)); + } +} + +static int handle_mmio(struct intel_gvt_mmio_table_iter *iter, + u32 offset, u32 size) +{ + if (WARN_ON(!IS_ALIGNED(offset, 4))) + return -EINVAL; + + save_mmio(iter, offset, size); + return 0; +} + +static int save_initial_hw_state(struct drm_i915_private *dev_priv) +{ + struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev); + struct i915_virtual_gpu *vgpu = &dev_priv->vgpu; + struct intel_gvt_mmio_table_iter iter; + void *mem; + int i, ret; + + mem = kzalloc(PCI_CFG_SPACE_EXP_SIZE, GFP_KERNEL); + if (!mem) + return -ENOMEM; + + vgpu->initial_cfg_space = mem; + + for (i = 0; i < PCI_CFG_SPACE_EXP_SIZE; i += 4) + pci_read_config_dword(pdev, i, mem + i); + + mem = vzalloc(2 * SZ_1M); + if (!mem) { + ret = -ENOMEM; + goto err_mmio; + } + + vgpu->initial_mmio = mem; + + iter.i915 = dev_priv; + iter.data = vgpu->initial_mmio; + iter.handle_mmio_cb = handle_mmio; + + ret = intel_gvt_iterate_mmio_table(&iter); + if (ret) + goto err_iterate; + + return 0; + +err_iterate: + vfree(vgpu->initial_mmio); + vgpu->initial_mmio = NULL; +err_mmio: + kfree(vgpu->initial_cfg_space); + vgpu->initial_cfg_space = NULL; + + return ret; +} + /** * intel_gvt_init - initialize GVT components * @dev_priv: drm i915 private data @@ -115,15 +194,23 @@ int intel_gvt_init(struct drm_i915_private *dev_priv) return -EIO; } + ret = save_initial_hw_state(dev_priv); + if (ret) { + drm_dbg(&dev_priv->drm, "Fail to save initial HW state\n"); + goto err_save_hw_state; + } + ret = intel_gvt_init_device(dev_priv); if (ret) { drm_dbg(&dev_priv->drm, "Fail to init GVT device\n"); - goto bail; + goto err_init_device; } return 0; -bail: +err_init_device: + free_initial_hw_state(dev_priv); +err_save_hw_state: dev_priv->params.enable_gvt = 0; return 0; } @@ -147,6 +234,7 @@ void intel_gvt_driver_remove(struct drm_i915_private *dev_priv) return; intel_gvt_clean_device(dev_priv); + free_initial_hw_state(dev_priv); } /** From 1672991412dfef000c9f9271558a3713081a4c57 Mon Sep 17 00:00:00 2001 From: Zhi Wang Date: Thu, 7 Apr 2022 03:19:45 -0400 Subject: [PATCH 03/41] i915/gvt: Use the initial HW state snapshot saved in i915 The code of saving initial HW state snapshot has been moved into i915. Let the GVT-g core logic use that snapshot. Cc: Christoph Hellwig Cc: Jason Gunthorpe Cc: Jani Nikula Cc: Joonas Lahtinen Cc: Vivi Rodrigo Cc: Zhenyu Wang Cc: Zhi Wang Signed-off-by: Zhi Wang Reviewed-by: Christoph Hellwig Tested-by: Christoph Hellwig Reviewed-by: Zhenyu Wang Acked-by: Jani Nikula Link: http://patchwork.freedesktop.org/patch/msgid/20220407071945.72148-4-zhi.a.wang@intel.com --- drivers/gpu/drm/i915/gvt/firmware.c | 25 +++++++++---------------- 1 file changed, 9 insertions(+), 16 deletions(-) diff --git a/drivers/gpu/drm/i915/gvt/firmware.c b/drivers/gpu/drm/i915/gvt/firmware.c index 1a8274a3f4b1..54fe442238c6 100644 --- a/drivers/gpu/drm/i915/gvt/firmware.c +++ b/drivers/gpu/drm/i915/gvt/firmware.c @@ -66,22 +66,16 @@ static struct bin_attribute firmware_attr = { .mmap = NULL, }; -static int mmio_snapshot_handler(struct intel_gvt *gvt, u32 offset, void *data) -{ - *(u32 *)(data + offset) = intel_uncore_read_notrace(gvt->gt->uncore, - _MMIO(offset)); - return 0; -} - static int expose_firmware_sysfs(struct intel_gvt *gvt) { struct intel_gvt_device_info *info = &gvt->device_info; - struct pci_dev *pdev = to_pci_dev(gvt->gt->i915->drm.dev); + struct drm_i915_private *i915 = gvt->gt->i915; + struct pci_dev *pdev = to_pci_dev(i915->drm.dev); struct gvt_firmware_header *h; void *firmware; void *p; unsigned long size, crc32_start; - int i, ret; + int ret; size = sizeof(*h) + info->mmio_size + info->cfg_space_size; firmware = vzalloc(size); @@ -99,17 +93,16 @@ static int expose_firmware_sysfs(struct intel_gvt *gvt) p = firmware + h->cfg_space_offset; - for (i = 0; i < h->cfg_space_size; i += 4) - pci_read_config_dword(pdev, i, p + i); - - memcpy(gvt->firmware.cfg_space, p, info->cfg_space_size); + memcpy(gvt->firmware.cfg_space, i915->vgpu.initial_cfg_space, + info->cfg_space_size); + memcpy(p, gvt->firmware.cfg_space, info->cfg_space_size); p = firmware + h->mmio_offset; - /* Take a snapshot of hw mmio registers. */ - intel_gvt_for_each_tracked_mmio(gvt, mmio_snapshot_handler, p); + memcpy(gvt->firmware.mmio, i915->vgpu.initial_mmio, + info->mmio_size); - memcpy(gvt->firmware.mmio, p, info->mmio_size); + memcpy(p, gvt->firmware.mmio, info->mmio_size); crc32_start = offsetof(struct gvt_firmware_header, crc32) + 4; h->crc32 = crc32_le(0, firmware + crc32_start, size - crc32_start); From de5d437ae8696ab958903ac199c56c939036e3ea Mon Sep 17 00:00:00 2001 From: Jani Nikula Date: Wed, 13 Apr 2022 15:25:38 +0300 Subject: [PATCH 04/41] drm/i915/gvt: fix trace TRACE_INCLUDE_PATH TRACE_INCLUDE_PATH should be a path relative to define_trace.h, not the file including it. (See the comment in include/trace/define_trace.h.) Cc: Zhi Wang Cc: Christoph Hellwig Signed-off-by: Jani Nikula Signed-off-by: Zhi Wang Link: http://patchwork.freedesktop.org/patch/msgid/48b772795b7ab674f609ecad53b4882c66a8262a.1649852517.git.jani.nikula@intel.com Reviewed-by: Zhi Wang Reviewed-by: Christoph Hellwig --- drivers/gpu/drm/i915/gvt/trace.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/i915/gvt/trace.h b/drivers/gpu/drm/i915/gvt/trace.h index 6d787750d279..020f1aa28322 100644 --- a/drivers/gpu/drm/i915/gvt/trace.h +++ b/drivers/gpu/drm/i915/gvt/trace.h @@ -377,7 +377,7 @@ TRACE_EVENT(render_mmio, /* This part must be out of protection */ #undef TRACE_INCLUDE_PATH -#define TRACE_INCLUDE_PATH . #undef TRACE_INCLUDE_FILE +#define TRACE_INCLUDE_PATH ../../drivers/gpu/drm/i915/gvt #define TRACE_INCLUDE_FILE trace #include From 7f0cf30187cdb1f04d905505ffde910cecf1b35e Mon Sep 17 00:00:00 2001 From: Jani Nikula Date: Wed, 13 Apr 2022 15:25:39 +0300 Subject: [PATCH 05/41] drm/i915/gvt: better align the Makefile with i915 Makefile Drop extra ccflags, drop extra intermediate variables, list object files one per line alphabetically. Cc: Zhi Wang Cc: Christoph Hellwig Signed-off-by: Jani Nikula Signed-off-by: Zhi Wang Link: http://patchwork.freedesktop.org/patch/msgid/8bc0895376c077156a671e24ac6a5c75b7db4c9c.1649852517.git.jani.nikula@intel.com Reviewed-by: Zhi Wang Reviewed-by: Christoph Hellwig --- drivers/gpu/drm/i915/Makefile | 6 +++--- drivers/gpu/drm/i915/gvt/Makefile | 30 +++++++++++++++++++++++------- 2 files changed, 26 insertions(+), 10 deletions(-) diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile index a6cab2d9a9d1..95a449b3508b 100644 --- a/drivers/gpu/drm/i915/Makefile +++ b/drivers/gpu/drm/i915/Makefile @@ -320,10 +320,10 @@ i915-$(CONFIG_DRM_I915_SELFTEST) += \ # virtual gpu code i915-y += i915_vgpu.o -ifeq ($(CONFIG_DRM_I915_GVT),y) -i915-y += intel_gvt.o intel_gvt_mmio_table.o +i915-$(CONFIG_DRM_I915_GVT) += \ + intel_gvt.o \ + intel_gvt_mmio_table.o include $(src)/gvt/Makefile -endif obj-$(CONFIG_DRM_I915) += i915.o obj-$(CONFIG_DRM_I915_GVT_KVMGT) += gvt/kvmgt.o diff --git a/drivers/gpu/drm/i915/gvt/Makefile b/drivers/gpu/drm/i915/gvt/Makefile index ea8324abc784..584661047945 100644 --- a/drivers/gpu/drm/i915/gvt/Makefile +++ b/drivers/gpu/drm/i915/gvt/Makefile @@ -1,9 +1,25 @@ # SPDX-License-Identifier: GPL-2.0 -GVT_DIR := gvt -GVT_SOURCE := gvt.o aperture_gm.o handlers.o vgpu.o trace_points.o firmware.o \ - interrupt.o gtt.o cfg_space.o opregion.o mmio.o display.o edid.o \ - execlist.o scheduler.o sched_policy.o mmio_context.o cmd_parser.o debugfs.o \ - fb_decoder.o dmabuf.o page_track.o -ccflags-y += -I $(srctree)/$(src) -I $(srctree)/$(src)/$(GVT_DIR)/ -i915-y += $(addprefix $(GVT_DIR)/, $(GVT_SOURCE)) +i915-$(CONFIG_DRM_I915_GVT) += \ + gvt/aperture_gm.o \ + gvt/cfg_space.o \ + gvt/cmd_parser.o \ + gvt/debugfs.o \ + gvt/display.o \ + gvt/dmabuf.o \ + gvt/edid.o \ + gvt/execlist.o \ + gvt/fb_decoder.o \ + gvt/firmware.o \ + gvt/gtt.o \ + gvt/gvt.o \ + gvt/handlers.o \ + gvt/interrupt.o \ + gvt/mmio.o \ + gvt/mmio_context.o \ + gvt/opregion.o \ + gvt/page_track.o \ + gvt/sched_policy.o \ + gvt/scheduler.o \ + gvt/trace_points.o \ + gvt/vgpu.o From a85749e12d66c2cd89d1bce05ef9abca8b5875e9 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Mon, 11 Apr 2022 16:13:30 +0200 Subject: [PATCH 06/41] drm/i915/gvt: remove module refcounting in intel_gvt_{,un}register_hypervisor THIS_MODULE always is reference when a symbol called by it is used, so don't bother with the additional reference. Signed-off-by: Christoph Hellwig Signed-off-by: Zhi Wang Link: http://patchwork.freedesktop.org/patch/msgid/20220411141403.86980-2-hch@lst.de Reviewed-by: Jason Gunthorpe Reviewed-by: Zhi Wang --- drivers/gpu/drm/i915/gvt/gvt.c | 6 ------ 1 file changed, 6 deletions(-) diff --git a/drivers/gpu/drm/i915/gvt/gvt.c b/drivers/gpu/drm/i915/gvt/gvt.c index f0b69e4dcb52..623424766c35 100644 --- a/drivers/gpu/drm/i915/gvt/gvt.c +++ b/drivers/gpu/drm/i915/gvt/gvt.c @@ -308,10 +308,6 @@ intel_gvt_register_hypervisor(const struct intel_gvt_mpt *m) m->type != INTEL_GVT_HYPERVISOR_XEN) return -EINVAL; - /* Get a reference for device model module */ - if (!try_module_get(THIS_MODULE)) - return -ENODEV; - intel_gvt_host.mpt = m; intel_gvt_host.hypervisor_type = m->type; gvt = (void *)kdev_to_i915(intel_gvt_host.dev)->gvt; @@ -321,7 +317,6 @@ intel_gvt_register_hypervisor(const struct intel_gvt_mpt *m) if (ret < 0) { gvt_err("Failed to init %s hypervisor module\n", supported_hypervisors[intel_gvt_host.hypervisor_type]); - module_put(THIS_MODULE); return -ENODEV; } gvt_dbg_core("Running with hypervisor %s in host mode\n", @@ -335,6 +330,5 @@ intel_gvt_unregister_hypervisor(void) { void *gvt = (void *)kdev_to_i915(intel_gvt_host.dev)->gvt; intel_gvt_hypervisor_host_exit(intel_gvt_host.dev, gvt); - module_put(THIS_MODULE); } EXPORT_SYMBOL_GPL(intel_gvt_unregister_hypervisor); From 367748066eeb378bcb1399f1cfa6675c76afc9e1 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Mon, 11 Apr 2022 16:13:31 +0200 Subject: [PATCH 07/41] drm/i915/gvt: remove enum hypervisor_type The only supported hypervisor is KVM, so don't bother with dead code enumerating hypervisors. Signed-off-by: Christoph Hellwig Signed-off-by: Zhi Wang Link: http://patchwork.freedesktop.org/patch/msgid/20220411141403.86980-3-hch@lst.de Reviewed-by: Jason Gunthorpe Reviewed-by: Zhi Wang --- drivers/gpu/drm/i915/gvt/gvt.c | 17 +-- drivers/gpu/drm/i915/gvt/gvt.h | 1 - drivers/gpu/drm/i915/gvt/hypercall.h | 6 -- drivers/gpu/drm/i915/gvt/kvmgt.c | 1 - drivers/gpu/drm/i915/gvt/opregion.c | 150 ++++++--------------------- 5 files changed, 34 insertions(+), 141 deletions(-) diff --git a/drivers/gpu/drm/i915/gvt/gvt.c b/drivers/gpu/drm/i915/gvt/gvt.c index 623424766c35..b4b13e4a94e3 100644 --- a/drivers/gpu/drm/i915/gvt/gvt.c +++ b/drivers/gpu/drm/i915/gvt/gvt.c @@ -41,11 +41,6 @@ struct intel_gvt_host intel_gvt_host; -static const char * const supported_hypervisors[] = { - [INTEL_GVT_HYPERVISOR_XEN] = "XEN", - [INTEL_GVT_HYPERVISOR_KVM] = "KVM", -}; - static const struct intel_gvt_ops intel_gvt_ops = { .emulate_cfg_read = intel_vgpu_emulate_cfg_read, .emulate_cfg_write = intel_vgpu_emulate_cfg_write, @@ -304,23 +299,13 @@ intel_gvt_register_hypervisor(const struct intel_gvt_mpt *m) if (!intel_gvt_host.initialized) return -ENODEV; - if (m->type != INTEL_GVT_HYPERVISOR_KVM && - m->type != INTEL_GVT_HYPERVISOR_XEN) - return -EINVAL; - intel_gvt_host.mpt = m; - intel_gvt_host.hypervisor_type = m->type; gvt = (void *)kdev_to_i915(intel_gvt_host.dev)->gvt; ret = intel_gvt_hypervisor_host_init(intel_gvt_host.dev, gvt, &intel_gvt_ops); - if (ret < 0) { - gvt_err("Failed to init %s hypervisor module\n", - supported_hypervisors[intel_gvt_host.hypervisor_type]); + if (ret < 0) return -ENODEV; - } - gvt_dbg_core("Running with hypervisor %s in host mode\n", - supported_hypervisors[intel_gvt_host.hypervisor_type]); return 0; } EXPORT_SYMBOL_GPL(intel_gvt_register_hypervisor); diff --git a/drivers/gpu/drm/i915/gvt/gvt.h b/drivers/gpu/drm/i915/gvt/gvt.h index bfe07c69cfd2..554baf26cab3 100644 --- a/drivers/gpu/drm/i915/gvt/gvt.h +++ b/drivers/gpu/drm/i915/gvt/gvt.h @@ -60,7 +60,6 @@ struct intel_gvt_host { struct device *dev; bool initialized; - int hypervisor_type; const struct intel_gvt_mpt *mpt; }; diff --git a/drivers/gpu/drm/i915/gvt/hypercall.h b/drivers/gpu/drm/i915/gvt/hypercall.h index f33e3cbd0439..317983153645 100644 --- a/drivers/gpu/drm/i915/gvt/hypercall.h +++ b/drivers/gpu/drm/i915/gvt/hypercall.h @@ -37,17 +37,11 @@ struct device; -enum hypervisor_type { - INTEL_GVT_HYPERVISOR_XEN = 0, - INTEL_GVT_HYPERVISOR_KVM, -}; - /* * Specific GVT-g MPT modules function collections. Currently GVT-g supports * both Xen and KVM by providing dedicated hypervisor-related MPT modules. */ struct intel_gvt_mpt { - enum hypervisor_type type; int (*host_init)(struct device *dev, void *gvt, const void *ops); void (*host_exit)(struct device *dev, void *gvt); int (*attach_vgpu)(void *vgpu, unsigned long *handle); diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c index 057ec4490104..5231ce8084b3 100644 --- a/drivers/gpu/drm/i915/gvt/kvmgt.c +++ b/drivers/gpu/drm/i915/gvt/kvmgt.c @@ -2221,7 +2221,6 @@ static bool kvmgt_is_valid_gfn(unsigned long handle, unsigned long gfn) } static const struct intel_gvt_mpt kvmgt_mpt = { - .type = INTEL_GVT_HYPERVISOR_KVM, .host_init = kvmgt_host_init, .host_exit = kvmgt_host_exit, .attach_vgpu = kvmgt_attach_vgpu, diff --git a/drivers/gpu/drm/i915/gvt/opregion.c b/drivers/gpu/drm/i915/gvt/opregion.c index 33569b910ed5..286ac6d7c6ce 100644 --- a/drivers/gpu/drm/i915/gvt/opregion.c +++ b/drivers/gpu/drm/i915/gvt/opregion.c @@ -255,33 +255,6 @@ int intel_vgpu_init_opregion(struct intel_vgpu *vgpu) return 0; } -static int map_vgpu_opregion(struct intel_vgpu *vgpu, bool map) -{ - u64 mfn; - int i, ret; - - for (i = 0; i < INTEL_GVT_OPREGION_PAGES; i++) { - mfn = intel_gvt_hypervisor_virt_to_mfn(vgpu_opregion(vgpu)->va - + i * PAGE_SIZE); - if (mfn == INTEL_GVT_INVALID_ADDR) { - gvt_vgpu_err("fail to get MFN from VA\n"); - return -EINVAL; - } - ret = intel_gvt_hypervisor_map_gfn_to_mfn(vgpu, - vgpu_opregion(vgpu)->gfn[i], - mfn, 1, map); - if (ret) { - gvt_vgpu_err("fail to map GFN to MFN, errno: %d\n", - ret); - return ret; - } - } - - vgpu_opregion(vgpu)->mapped = map; - - return 0; -} - /** * intel_vgpu_opregion_base_write_handler - Opregion base register write handler * @@ -294,34 +267,13 @@ static int map_vgpu_opregion(struct intel_vgpu *vgpu, bool map) int intel_vgpu_opregion_base_write_handler(struct intel_vgpu *vgpu, u32 gpa) { - int i, ret = 0; + int i; gvt_dbg_core("emulate opregion from kernel\n"); - switch (intel_gvt_host.hypervisor_type) { - case INTEL_GVT_HYPERVISOR_KVM: - for (i = 0; i < INTEL_GVT_OPREGION_PAGES; i++) - vgpu_opregion(vgpu)->gfn[i] = (gpa >> PAGE_SHIFT) + i; - break; - case INTEL_GVT_HYPERVISOR_XEN: - /** - * Wins guest on Xengt will write this register twice: xen - * hvmloader and windows graphic driver. - */ - if (vgpu_opregion(vgpu)->mapped) - map_vgpu_opregion(vgpu, false); - - for (i = 0; i < INTEL_GVT_OPREGION_PAGES; i++) - vgpu_opregion(vgpu)->gfn[i] = (gpa >> PAGE_SHIFT) + i; - - ret = map_vgpu_opregion(vgpu, true); - break; - default: - ret = -EINVAL; - gvt_vgpu_err("not supported hypervisor\n"); - } - - return ret; + for (i = 0; i < INTEL_GVT_OPREGION_PAGES; i++) + vgpu_opregion(vgpu)->gfn[i] = (gpa >> PAGE_SHIFT) + i; + return 0; } /** @@ -336,12 +288,7 @@ void intel_vgpu_clean_opregion(struct intel_vgpu *vgpu) if (!vgpu_opregion(vgpu)->va) return; - if (intel_gvt_host.hypervisor_type == INTEL_GVT_HYPERVISOR_XEN) { - if (vgpu_opregion(vgpu)->mapped) - map_vgpu_opregion(vgpu, false); - } else if (intel_gvt_host.hypervisor_type == INTEL_GVT_HYPERVISOR_KVM) { - /* Guest opregion is released by VFIO */ - } + /* Guest opregion is released by VFIO */ free_pages((unsigned long)vgpu_opregion(vgpu)->va, get_order(INTEL_GVT_OPREGION_SIZE)); @@ -470,39 +417,22 @@ int intel_vgpu_emulate_opregion_request(struct intel_vgpu *vgpu, u32 swsci) u64 scic_pa = 0, parm_pa = 0; int ret; - switch (intel_gvt_host.hypervisor_type) { - case INTEL_GVT_HYPERVISOR_XEN: - scic = *((u32 *)vgpu_opregion(vgpu)->va + - INTEL_GVT_OPREGION_SCIC); - parm = *((u32 *)vgpu_opregion(vgpu)->va + - INTEL_GVT_OPREGION_PARM); - break; - case INTEL_GVT_HYPERVISOR_KVM: - scic_pa = (vgpu_opregion(vgpu)->gfn[0] << PAGE_SHIFT) + - INTEL_GVT_OPREGION_SCIC; - parm_pa = (vgpu_opregion(vgpu)->gfn[0] << PAGE_SHIFT) + - INTEL_GVT_OPREGION_PARM; + scic_pa = (vgpu_opregion(vgpu)->gfn[0] << PAGE_SHIFT) + + INTEL_GVT_OPREGION_SCIC; + parm_pa = (vgpu_opregion(vgpu)->gfn[0] << PAGE_SHIFT) + + INTEL_GVT_OPREGION_PARM; + ret = intel_gvt_hypervisor_read_gpa(vgpu, scic_pa, &scic, sizeof(scic)); + if (ret) { + gvt_vgpu_err("guest opregion read error %d, gpa 0x%llx, len %lu\n", + ret, scic_pa, sizeof(scic)); + return ret; + } - ret = intel_gvt_hypervisor_read_gpa(vgpu, scic_pa, - &scic, sizeof(scic)); - if (ret) { - gvt_vgpu_err("guest opregion read error %d, gpa 0x%llx, len %lu\n", - ret, scic_pa, sizeof(scic)); - return ret; - } - - ret = intel_gvt_hypervisor_read_gpa(vgpu, parm_pa, - &parm, sizeof(parm)); - if (ret) { - gvt_vgpu_err("guest opregion read error %d, gpa 0x%llx, len %lu\n", - ret, scic_pa, sizeof(scic)); - return ret; - } - - break; - default: - gvt_vgpu_err("not supported hypervisor\n"); - return -EINVAL; + ret = intel_gvt_hypervisor_read_gpa(vgpu, parm_pa, &parm, sizeof(parm)); + if (ret) { + gvt_vgpu_err("guest opregion read error %d, gpa 0x%llx, len %lu\n", + ret, scic_pa, sizeof(scic)); + return ret; } if (!(swsci & SWSCI_SCI_SELECT)) { @@ -535,34 +465,20 @@ int intel_vgpu_emulate_opregion_request(struct intel_vgpu *vgpu, u32 swsci) parm = 0; out: - switch (intel_gvt_host.hypervisor_type) { - case INTEL_GVT_HYPERVISOR_XEN: - *((u32 *)vgpu_opregion(vgpu)->va + - INTEL_GVT_OPREGION_SCIC) = scic; - *((u32 *)vgpu_opregion(vgpu)->va + - INTEL_GVT_OPREGION_PARM) = parm; - break; - case INTEL_GVT_HYPERVISOR_KVM: - ret = intel_gvt_hypervisor_write_gpa(vgpu, scic_pa, - &scic, sizeof(scic)); - if (ret) { - gvt_vgpu_err("guest opregion write error %d, gpa 0x%llx, len %lu\n", - ret, scic_pa, sizeof(scic)); - return ret; - } + ret = intel_gvt_hypervisor_write_gpa(vgpu, scic_pa, &scic, + sizeof(scic)); + if (ret) { + gvt_vgpu_err("guest opregion write error %d, gpa 0x%llx, len %lu\n", + ret, scic_pa, sizeof(scic)); + return ret; + } - ret = intel_gvt_hypervisor_write_gpa(vgpu, parm_pa, - &parm, sizeof(parm)); - if (ret) { - gvt_vgpu_err("guest opregion write error %d, gpa 0x%llx, len %lu\n", - ret, scic_pa, sizeof(scic)); - return ret; - } - - break; - default: - gvt_vgpu_err("not supported hypervisor\n"); - return -EINVAL; + ret = intel_gvt_hypervisor_write_gpa(vgpu, parm_pa, &parm, + sizeof(parm)); + if (ret) { + gvt_vgpu_err("guest opregion write error %d, gpa 0x%llx, len %lu\n", + ret, scic_pa, sizeof(scic)); + return ret; } return 0; From f49fc35799fa63e149ad79f4250a655edfac57a2 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Mon, 11 Apr 2022 16:13:32 +0200 Subject: [PATCH 08/41] drm/i915/gvt: rename intel_vgpu_ops to intel_vgpu_mdev_ops Free the intel_vgpu_ops symbol name for something that fits better. Signed-off-by: Christoph Hellwig Signed-off-by: Zhi Wang Link: http://patchwork.freedesktop.org/patch/msgid/20220411141403.86980-4-hch@lst.de Reviewed-by: Jason Gunthorpe Reviewed-by: Zhi Wang --- drivers/gpu/drm/i915/gvt/kvmgt.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c index 5231ce8084b3..e3f0c555ed5f 100644 --- a/drivers/gpu/drm/i915/gvt/kvmgt.c +++ b/drivers/gpu/drm/i915/gvt/kvmgt.c @@ -1765,7 +1765,7 @@ static const struct attribute_group *intel_vgpu_groups[] = { NULL, }; -static struct mdev_parent_ops intel_vgpu_ops = { +static struct mdev_parent_ops intel_vgpu_mdev_ops = { .mdev_attr_groups = intel_vgpu_groups, .create = intel_vgpu_create, .remove = intel_vgpu_remove, @@ -1788,9 +1788,9 @@ static int kvmgt_host_init(struct device *dev, void *gvt, const void *ops) return ret; intel_gvt_ops = ops; - intel_vgpu_ops.supported_type_groups = gvt_vgpu_type_groups; + intel_vgpu_mdev_ops.supported_type_groups = gvt_vgpu_type_groups; - ret = mdev_register_device(dev, &intel_vgpu_ops); + ret = mdev_register_device(dev, &intel_vgpu_mdev_ops); if (ret) intel_gvt_cleanup_vgpu_type_groups((struct intel_gvt *)gvt); From 8b750bf744181ca3eadfb288830d2f42b04adc67 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Mon, 11 Apr 2022 16:13:35 +0200 Subject: [PATCH 09/41] drm/i915/gvt: move the gvt code into kvmgt.ko Instead of having an option to build the gvt code into the main i915 module, just move it into the kvmgt.ko module. This only requires a new struct with three entries that the KVMGT modules needs to register with the main i915 module, and a proper list of GVT-enabled devices instead of global device pointer. Signed-off-by: Christoph Hellwig Signed-off-by: Zhi Wang Link: http://patchwork.freedesktop.org/patch/msgid/20220411141403.86980-7-hch@lst.de Reviewed-by: Jason Gunthorpe Reviewed-by: Zhi Wang --- drivers/gpu/drm/i915/Kconfig | 36 ++-- drivers/gpu/drm/i915/Makefile | 2 +- drivers/gpu/drm/i915/gvt/Makefile | 3 +- drivers/gpu/drm/i915/gvt/gvt.c | 55 ++---- drivers/gpu/drm/i915/gvt/gvt.h | 6 +- drivers/gpu/drm/i915/gvt/kvmgt.c | 14 +- drivers/gpu/drm/i915/gvt/mpt.h | 3 - drivers/gpu/drm/i915/i915_driver.c | 7 - drivers/gpu/drm/i915/i915_drv.h | 1 + drivers/gpu/drm/i915/intel_gvt.c | 206 +++++++++++++------- drivers/gpu/drm/i915/intel_gvt.h | 17 +- drivers/gpu/drm/i915/intel_gvt_mmio_table.c | 1 + 12 files changed, 195 insertions(+), 156 deletions(-) diff --git a/drivers/gpu/drm/i915/Kconfig b/drivers/gpu/drm/i915/Kconfig index 98c5450b8eac..be122f541cf9 100644 --- a/drivers/gpu/drm/i915/Kconfig +++ b/drivers/gpu/drm/i915/Kconfig @@ -102,40 +102,30 @@ config DRM_I915_USERPTR If in doubt, say "Y". config DRM_I915_GVT - bool "Enable Intel GVT-g graphics virtualization host support" + bool + +config DRM_I915_GVT_KVMGT + tristate "Enable KVM host support Intel GVT-g graphics virtualization" depends on DRM_I915 depends on X86 depends on 64BIT - default n + depends on KVM + depends on VFIO_MDEV + select DRM_I915_GVT + select KVM_EXTERNAL_WRITE_TRACKING + help Choose this option if you want to enable Intel GVT-g graphics virtualization technology host support with integrated graphics. With GVT-g, it's possible to have one integrated graphics - device shared by multiple VMs under different hypervisors. + device shared by multiple VMs under KVM. - Note that at least one hypervisor like Xen or KVM is required for - this driver to work, and it only supports newer device from - Broadwell+. For further information and setup guide, you can - visit: http://01.org/igvt-g. - - Now it's just a stub to support the modifications of i915 for - GVT device model. It requires at least one MPT modules for Xen/KVM - and other components of GVT device model to work. Use it under - you own risk. + Note that this driver only supports newer device from Broadwell on. + For further information and setup guide, you can visit: + http://01.org/igvt-g. If in doubt, say "N". -config DRM_I915_GVT_KVMGT - tristate "Enable KVM/VFIO support for Intel GVT-g" - depends on DRM_I915_GVT - depends on KVM - depends on VFIO_MDEV - select KVM_EXTERNAL_WRITE_TRACKING - default n - help - Choose this option if you want to enable KVMGT support for - Intel GVT-g. - config DRM_I915_PXP bool "Enable Intel PXP support" depends on DRM_I915 diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile index 95a449b3508b..440cb3decd12 100644 --- a/drivers/gpu/drm/i915/Makefile +++ b/drivers/gpu/drm/i915/Makefile @@ -326,7 +326,7 @@ i915-$(CONFIG_DRM_I915_GVT) += \ include $(src)/gvt/Makefile obj-$(CONFIG_DRM_I915) += i915.o -obj-$(CONFIG_DRM_I915_GVT_KVMGT) += gvt/kvmgt.o +obj-$(CONFIG_DRM_I915_GVT_KVMGT) += kvmgt.o # header test diff --git a/drivers/gpu/drm/i915/gvt/Makefile b/drivers/gpu/drm/i915/gvt/Makefile index 584661047945..ca8894049dca 100644 --- a/drivers/gpu/drm/i915/gvt/Makefile +++ b/drivers/gpu/drm/i915/gvt/Makefile @@ -1,6 +1,6 @@ # SPDX-License-Identifier: GPL-2.0 -i915-$(CONFIG_DRM_I915_GVT) += \ +kvmgt-$(CONFIG_DRM_I915_GVT) += \ gvt/aperture_gm.o \ gvt/cfg_space.o \ gvt/cmd_parser.o \ @@ -15,6 +15,7 @@ i915-$(CONFIG_DRM_I915_GVT) += \ gvt/gvt.o \ gvt/handlers.o \ gvt/interrupt.o \ + gvt/kvmgt.o \ gvt/mmio.o \ gvt/mmio_context.o \ gvt/opregion.o \ diff --git a/drivers/gpu/drm/i915/gvt/gvt.c b/drivers/gpu/drm/i915/gvt/gvt.c index b4b13e4a94e3..c3308058f461 100644 --- a/drivers/gpu/drm/i915/gvt/gvt.c +++ b/drivers/gpu/drm/i915/gvt/gvt.c @@ -39,8 +39,6 @@ #include #include -struct intel_gvt_host intel_gvt_host; - static const struct intel_gvt_ops intel_gvt_ops = { .emulate_cfg_read = intel_vgpu_emulate_cfg_read, .emulate_cfg_write = intel_vgpu_emulate_cfg_write, @@ -147,13 +145,14 @@ static int init_service_thread(struct intel_gvt *gvt) * resources owned by a GVT device. * */ -void intel_gvt_clean_device(struct drm_i915_private *i915) +static void intel_gvt_clean_device(struct drm_i915_private *i915) { struct intel_gvt *gvt = fetch_and_zero(&i915->gvt); if (drm_WARN_ON(&i915->drm, !gvt)) return; + intel_gvt_hypervisor_host_exit(i915->drm.dev, gvt); intel_gvt_destroy_idle_vgpu(gvt->idle_vgpu); intel_gvt_clean_vgpu_types(gvt); @@ -181,7 +180,7 @@ void intel_gvt_clean_device(struct drm_i915_private *i915) * Zero on success, negative error code if failed. * */ -int intel_gvt_init_device(struct drm_i915_private *i915) +static int intel_gvt_init_device(struct drm_i915_private *i915) { struct intel_gvt *gvt; struct intel_vgpu *vgpu; @@ -253,11 +252,17 @@ int intel_gvt_init_device(struct drm_i915_private *i915) intel_gvt_debugfs_init(gvt); + ret = intel_gvt_hypervisor_host_init(i915->drm.dev, gvt, + &intel_gvt_ops); + if (ret) + goto out_destroy_idle_vgpu; + gvt_dbg_core("gvt device initialization is done\n"); - intel_gvt_host.dev = i915->drm.dev; - intel_gvt_host.initialized = true; return 0; +out_destroy_idle_vgpu: + intel_gvt_destroy_idle_vgpu(gvt->idle_vgpu); + intel_gvt_debugfs_clean(gvt); out_clean_types: intel_gvt_clean_vgpu_types(gvt); out_clean_thread: @@ -281,39 +286,17 @@ int intel_gvt_init_device(struct drm_i915_private *i915) return ret; } -int -intel_gvt_pm_resume(struct intel_gvt *gvt) +static void intel_gvt_pm_resume(struct drm_i915_private *i915) { + struct intel_gvt *gvt = i915->gvt; + intel_gvt_restore_fence(gvt); intel_gvt_restore_mmio(gvt); intel_gvt_restore_ggtt(gvt); - return 0; } -int -intel_gvt_register_hypervisor(const struct intel_gvt_mpt *m) -{ - int ret; - void *gvt; - - if (!intel_gvt_host.initialized) - return -ENODEV; - - intel_gvt_host.mpt = m; - gvt = (void *)kdev_to_i915(intel_gvt_host.dev)->gvt; - - ret = intel_gvt_hypervisor_host_init(intel_gvt_host.dev, gvt, - &intel_gvt_ops); - if (ret < 0) - return -ENODEV; - return 0; -} -EXPORT_SYMBOL_GPL(intel_gvt_register_hypervisor); - -void -intel_gvt_unregister_hypervisor(void) -{ - void *gvt = (void *)kdev_to_i915(intel_gvt_host.dev)->gvt; - intel_gvt_hypervisor_host_exit(intel_gvt_host.dev, gvt); -} -EXPORT_SYMBOL_GPL(intel_gvt_unregister_hypervisor); +const struct intel_vgpu_ops intel_gvt_vgpu_ops = { + .init_device = intel_gvt_init_device, + .clean_device = intel_gvt_clean_device, + .pm_resume = intel_gvt_pm_resume, +}; diff --git a/drivers/gpu/drm/i915/gvt/gvt.h b/drivers/gpu/drm/i915/gvt/gvt.h index 554baf26cab3..dc36a791467b 100644 --- a/drivers/gpu/drm/i915/gvt/gvt.h +++ b/drivers/gpu/drm/i915/gvt/gvt.h @@ -58,8 +58,6 @@ #define GVT_MAX_VGPU 8 struct intel_gvt_host { - struct device *dev; - bool initialized; const struct intel_gvt_mpt *mpt; }; @@ -728,9 +726,9 @@ void intel_gvt_debugfs_remove_vgpu(struct intel_vgpu *vgpu); void intel_gvt_debugfs_init(struct intel_gvt *gvt); void intel_gvt_debugfs_clean(struct intel_gvt *gvt); -int intel_gvt_pm_resume(struct intel_gvt *gvt); - #include "trace.h" #include "mpt.h" +extern const struct intel_vgpu_ops intel_gvt_vgpu_ops; + #endif diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c index e3f0c555ed5f..fa8b326eb219 100644 --- a/drivers/gpu/drm/i915/gvt/kvmgt.c +++ b/drivers/gpu/drm/i915/gvt/kvmgt.c @@ -49,8 +49,12 @@ #include #include "i915_drv.h" +#include "intel_gvt.h" #include "gvt.h" +MODULE_IMPORT_NS(DMA_BUF); +MODULE_IMPORT_NS(I915_GVT); + static const struct intel_gvt_ops *intel_gvt_ops; /* helper macros copied from vfio-pci */ @@ -2242,16 +2246,18 @@ static const struct intel_gvt_mpt kvmgt_mpt = { .is_valid_gfn = kvmgt_is_valid_gfn, }; +struct intel_gvt_host intel_gvt_host = { + .mpt = &kvmgt_mpt, +}; + static int __init kvmgt_init(void) { - if (intel_gvt_register_hypervisor(&kvmgt_mpt) < 0) - return -ENODEV; - return 0; + return intel_gvt_set_ops(&intel_gvt_vgpu_ops); } static void __exit kvmgt_exit(void) { - intel_gvt_unregister_hypervisor(); + intel_gvt_clear_ops(&intel_gvt_vgpu_ops); } module_init(kvmgt_init); diff --git a/drivers/gpu/drm/i915/gvt/mpt.h b/drivers/gpu/drm/i915/gvt/mpt.h index e6c5a792a49a..1b5617bb2745 100644 --- a/drivers/gpu/drm/i915/gvt/mpt.h +++ b/drivers/gpu/drm/i915/gvt/mpt.h @@ -394,7 +394,4 @@ static inline bool intel_gvt_hypervisor_is_valid_gfn( return intel_gvt_host.mpt->is_valid_gfn(vgpu->handle, gfn); } -int intel_gvt_register_hypervisor(const struct intel_gvt_mpt *); -void intel_gvt_unregister_hypervisor(void); - #endif /* _GVT_MPT_H_ */ diff --git a/drivers/gpu/drm/i915/i915_driver.c b/drivers/gpu/drm/i915/i915_driver.c index 62b3f332bbf5..46b7315de696 100644 --- a/drivers/gpu/drm/i915/i915_driver.c +++ b/drivers/gpu/drm/i915/i915_driver.c @@ -468,11 +468,6 @@ static void i915_driver_mmio_release(struct drm_i915_private *dev_priv) pci_dev_put(dev_priv->bridge_dev); } -static void intel_sanitize_options(struct drm_i915_private *dev_priv) -{ - intel_gvt_sanitize_options(dev_priv); -} - /** * i915_set_dma_info - set all relevant PCI dma info as configured for the * platform @@ -566,8 +561,6 @@ static int i915_driver_hw_probe(struct drm_i915_private *dev_priv) } } - intel_sanitize_options(dev_priv); - /* needs to be done before ggtt probe */ intel_dram_edram_detect(dev_priv); diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 0830020c11c0..204cf703323e 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -434,6 +434,7 @@ struct i915_virtual_gpu { u32 caps; u32 *initial_mmio; u8 *initial_cfg_space; + struct list_head entry; }; struct i915_selftest_stash { diff --git a/drivers/gpu/drm/i915/intel_gvt.c b/drivers/gpu/drm/i915/intel_gvt.c index 65daab2c4d9e..7c03d975069e 100644 --- a/drivers/gpu/drm/i915/intel_gvt.c +++ b/drivers/gpu/drm/i915/intel_gvt.c @@ -24,7 +24,10 @@ #include "i915_drv.h" #include "i915_vgpu.h" #include "intel_gvt.h" -#include "gvt/gvt.h" +#include "gem/i915_gem_dmabuf.h" +#include "gt/intel_context.h" +#include "gt/intel_ring.h" +#include "gt/shmem_utils.h" /** * DOC: Intel GVT-g host support @@ -41,6 +44,10 @@ * doc is available on https://01.org/group/2230/documentation-list. */ +static LIST_HEAD(intel_gvt_devices); +static const struct intel_vgpu_ops *intel_gvt_ops; +static DEFINE_MUTEX(intel_gvt_mutex); + static bool is_supported_device(struct drm_i915_private *dev_priv) { if (IS_BROADWELL(dev_priv)) @@ -59,33 +66,6 @@ static bool is_supported_device(struct drm_i915_private *dev_priv) return false; } -/** - * intel_gvt_sanitize_options - sanitize GVT related options - * @dev_priv: drm i915 private data - * - * This function is called at the i915 options sanitize stage. - */ -void intel_gvt_sanitize_options(struct drm_i915_private *dev_priv) -{ - if (!dev_priv->params.enable_gvt) - return; - - if (intel_vgpu_active(dev_priv)) { - drm_info(&dev_priv->drm, "GVT-g is disabled for guest\n"); - goto bail; - } - - if (!is_supported_device(dev_priv)) { - drm_info(&dev_priv->drm, - "Unsupported device. GVT-g is disabled\n"); - goto bail; - } - - return; -bail: - dev_priv->params.enable_gvt = 0; -} - static void free_initial_hw_state(struct drm_i915_private *dev_priv) { struct i915_virtual_gpu *vgpu = &dev_priv->vgpu; @@ -165,6 +145,84 @@ static int save_initial_hw_state(struct drm_i915_private *dev_priv) return ret; } +static void intel_gvt_init_device(struct drm_i915_private *dev_priv) +{ + if (!dev_priv->params.enable_gvt) { + drm_dbg(&dev_priv->drm, + "GVT-g is disabled by kernel params\n"); + return; + } + + if (intel_vgpu_active(dev_priv)) { + drm_info(&dev_priv->drm, "GVT-g is disabled for guest\n"); + return; + } + + if (!is_supported_device(dev_priv)) { + drm_info(&dev_priv->drm, + "Unsupported device. GVT-g is disabled\n"); + return; + } + + if (intel_uc_wants_guc_submission(&to_gt(dev_priv)->uc)) { + drm_err(&dev_priv->drm, + "Graphics virtualization is not yet supported with GuC submission\n"); + return; + } + + if (save_initial_hw_state(dev_priv)) { + drm_dbg(&dev_priv->drm, "Failed to save initial HW state\n"); + return; + } + + if (intel_gvt_ops->init_device(dev_priv)) + drm_dbg(&dev_priv->drm, "Fail to init GVT device\n"); +} + +static void intel_gvt_clean_device(struct drm_i915_private *dev_priv) +{ + if (dev_priv->gvt) + intel_gvt_ops->clean_device(dev_priv); + free_initial_hw_state(dev_priv); +} + +int intel_gvt_set_ops(const struct intel_vgpu_ops *ops) +{ + struct drm_i915_private *dev_priv; + + mutex_lock(&intel_gvt_mutex); + if (intel_gvt_ops) { + mutex_unlock(&intel_gvt_mutex); + return -EINVAL; + } + intel_gvt_ops = ops; + + list_for_each_entry(dev_priv, &intel_gvt_devices, vgpu.entry) + intel_gvt_init_device(dev_priv); + mutex_unlock(&intel_gvt_mutex); + + return 0; +} +EXPORT_SYMBOL_NS_GPL(intel_gvt_set_ops, I915_GVT); + +void intel_gvt_clear_ops(const struct intel_vgpu_ops *ops) +{ + struct drm_i915_private *dev_priv; + + mutex_lock(&intel_gvt_mutex); + if (intel_gvt_ops != ops) { + mutex_unlock(&intel_gvt_mutex); + return; + } + + list_for_each_entry(dev_priv, &intel_gvt_devices, vgpu.entry) + intel_gvt_clean_device(dev_priv); + + intel_gvt_ops = NULL; + mutex_unlock(&intel_gvt_mutex); +} +EXPORT_SYMBOL_NS_GPL(intel_gvt_clear_ops, I915_GVT); + /** * intel_gvt_init - initialize GVT components * @dev_priv: drm i915 private data @@ -177,47 +235,16 @@ static int save_initial_hw_state(struct drm_i915_private *dev_priv) */ int intel_gvt_init(struct drm_i915_private *dev_priv) { - int ret; - if (i915_inject_probe_failure(dev_priv)) return -ENODEV; - if (!dev_priv->params.enable_gvt) { - drm_dbg(&dev_priv->drm, - "GVT-g is disabled by kernel params\n"); - return 0; - } - - if (intel_uc_wants_guc_submission(&to_gt(dev_priv)->uc)) { - drm_err(&dev_priv->drm, - "i915 GVT-g loading failed due to Graphics virtualization is not yet supported with GuC submission\n"); - return -EIO; - } - - ret = save_initial_hw_state(dev_priv); - if (ret) { - drm_dbg(&dev_priv->drm, "Fail to save initial HW state\n"); - goto err_save_hw_state; - } - - ret = intel_gvt_init_device(dev_priv); - if (ret) { - drm_dbg(&dev_priv->drm, "Fail to init GVT device\n"); - goto err_init_device; - } + mutex_lock(&intel_gvt_mutex); + list_add_tail(&dev_priv->vgpu.entry, &intel_gvt_devices); + if (intel_gvt_ops) + intel_gvt_init_device(dev_priv); + mutex_unlock(&intel_gvt_mutex); return 0; - -err_init_device: - free_initial_hw_state(dev_priv); -err_save_hw_state: - dev_priv->params.enable_gvt = 0; - return 0; -} - -static inline bool intel_gvt_active(struct drm_i915_private *dev_priv) -{ - return dev_priv->gvt; } /** @@ -230,11 +257,10 @@ static inline bool intel_gvt_active(struct drm_i915_private *dev_priv) */ void intel_gvt_driver_remove(struct drm_i915_private *dev_priv) { - if (!intel_gvt_active(dev_priv)) - return; - + mutex_lock(&intel_gvt_mutex); intel_gvt_clean_device(dev_priv); - free_initial_hw_state(dev_priv); + list_del(&dev_priv->vgpu.entry); + mutex_unlock(&intel_gvt_mutex); } /** @@ -247,6 +273,46 @@ void intel_gvt_driver_remove(struct drm_i915_private *dev_priv) */ void intel_gvt_resume(struct drm_i915_private *dev_priv) { - if (intel_gvt_active(dev_priv)) - intel_gvt_pm_resume(dev_priv->gvt); + mutex_lock(&intel_gvt_mutex); + if (dev_priv->gvt) + intel_gvt_ops->pm_resume(dev_priv); + mutex_unlock(&intel_gvt_mutex); } + +/* + * Exported here so that the exports only get created when GVT support is + * actually enabled. + */ +EXPORT_SYMBOL_NS_GPL(i915_gem_object_alloc, I915_GVT); +EXPORT_SYMBOL_NS_GPL(i915_gem_object_create_shmem, I915_GVT); +EXPORT_SYMBOL_NS_GPL(i915_gem_object_init, I915_GVT); +EXPORT_SYMBOL_NS_GPL(i915_gem_object_ggtt_pin_ww, I915_GVT); +EXPORT_SYMBOL_NS_GPL(i915_gem_object_pin_map, I915_GVT); +EXPORT_SYMBOL_NS_GPL(i915_gem_object_set_to_cpu_domain, I915_GVT); +EXPORT_SYMBOL_NS_GPL(__i915_gem_object_flush_map, I915_GVT); +EXPORT_SYMBOL_NS_GPL(__i915_gem_object_set_pages, I915_GVT); +EXPORT_SYMBOL_NS_GPL(i915_gem_gtt_insert, I915_GVT); +EXPORT_SYMBOL_NS_GPL(i915_gem_prime_export, I915_GVT); +EXPORT_SYMBOL_NS_GPL(i915_gem_ww_ctx_init, I915_GVT); +EXPORT_SYMBOL_NS_GPL(i915_gem_ww_ctx_backoff, I915_GVT); +EXPORT_SYMBOL_NS_GPL(i915_gem_ww_ctx_fini, I915_GVT); +EXPORT_SYMBOL_NS_GPL(i915_ppgtt_create, I915_GVT); +EXPORT_SYMBOL_NS_GPL(i915_request_add, I915_GVT); +EXPORT_SYMBOL_NS_GPL(i915_request_create, I915_GVT); +EXPORT_SYMBOL_NS_GPL(i915_request_wait, I915_GVT); +EXPORT_SYMBOL_NS_GPL(i915_reserve_fence, I915_GVT); +EXPORT_SYMBOL_NS_GPL(i915_unreserve_fence, I915_GVT); +EXPORT_SYMBOL_NS_GPL(i915_vm_release, I915_GVT); +EXPORT_SYMBOL_NS_GPL(_i915_vma_move_to_active, I915_GVT); +EXPORT_SYMBOL_NS_GPL(intel_context_create, I915_GVT); +EXPORT_SYMBOL_NS_GPL(__intel_context_do_pin, I915_GVT); +EXPORT_SYMBOL_NS_GPL(__intel_context_do_unpin, I915_GVT); +EXPORT_SYMBOL_NS_GPL(intel_ring_begin, I915_GVT); +EXPORT_SYMBOL_NS_GPL(intel_runtime_pm_get, I915_GVT); +EXPORT_SYMBOL_NS_GPL(intel_runtime_pm_put_unchecked, I915_GVT); +EXPORT_SYMBOL_NS_GPL(intel_uncore_forcewake_for_reg, I915_GVT); +EXPORT_SYMBOL_NS_GPL(intel_uncore_forcewake_get, I915_GVT); +EXPORT_SYMBOL_NS_GPL(intel_uncore_forcewake_put, I915_GVT); +EXPORT_SYMBOL_NS_GPL(shmem_pin_map, I915_GVT); +EXPORT_SYMBOL_NS_GPL(shmem_unpin_map, I915_GVT); +EXPORT_SYMBOL_NS_GPL(__px_dma, I915_GVT); diff --git a/drivers/gpu/drm/i915/intel_gvt.h b/drivers/gpu/drm/i915/intel_gvt.h index 3a2563ad50f8..eb2a2be252ca 100644 --- a/drivers/gpu/drm/i915/intel_gvt.h +++ b/drivers/gpu/drm/i915/intel_gvt.h @@ -39,12 +39,19 @@ struct intel_gvt_mmio_table_iter { int intel_gvt_init(struct drm_i915_private *dev_priv); void intel_gvt_driver_remove(struct drm_i915_private *dev_priv); -int intel_gvt_init_device(struct drm_i915_private *dev_priv); -void intel_gvt_clean_device(struct drm_i915_private *dev_priv); int intel_gvt_init_host(void); -void intel_gvt_sanitize_options(struct drm_i915_private *dev_priv); void intel_gvt_resume(struct drm_i915_private *dev_priv); int intel_gvt_iterate_mmio_table(struct intel_gvt_mmio_table_iter *iter); + +struct intel_vgpu_ops { + int (*init_device)(struct drm_i915_private *dev_priv); + void (*clean_device)(struct drm_i915_private *dev_priv); + void (*pm_resume)(struct drm_i915_private *i915); +}; + +int intel_gvt_set_ops(const struct intel_vgpu_ops *ops); +void intel_gvt_clear_ops(const struct intel_vgpu_ops *ops); + #else static inline int intel_gvt_init(struct drm_i915_private *dev_priv) { @@ -55,10 +62,6 @@ static inline void intel_gvt_driver_remove(struct drm_i915_private *dev_priv) { } -static inline void intel_gvt_sanitize_options(struct drm_i915_private *dev_priv) -{ -} - static inline void intel_gvt_resume(struct drm_i915_private *dev_priv) { } diff --git a/drivers/gpu/drm/i915/intel_gvt_mmio_table.c b/drivers/gpu/drm/i915/intel_gvt_mmio_table.c index 7f7fa9a6a7be..03a7fcd0f904 100644 --- a/drivers/gpu/drm/i915/intel_gvt_mmio_table.c +++ b/drivers/gpu/drm/i915/intel_gvt_mmio_table.c @@ -1288,3 +1288,4 @@ int intel_gvt_iterate_mmio_table(struct intel_gvt_mmio_table_iter *iter) err: return ret; } +EXPORT_SYMBOL_NS_GPL(intel_gvt_iterate_mmio_table, I915_GVT); From 675e5c4a33e20cc1924e99cc6f71a42d355c2c31 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Mon, 11 Apr 2022 16:13:36 +0200 Subject: [PATCH 10/41] drm/i915/gvt: remove intel_gvt_ops Remove these pointless indirect alls by just calling the only instance of each method directly. Signed-off-by: Christoph Hellwig Signed-off-by: Zhi Wang Link: http://patchwork.freedesktop.org/patch/msgid/20220411141403.86980-8-hch@lst.de Reviewed-by: Jason Gunthorpe Reviewed-by: Zhi Wang --- drivers/gpu/drm/i915/gvt/gvt.c | 20 +-------------- drivers/gpu/drm/i915/gvt/gvt.h | 24 ------------------ drivers/gpu/drm/i915/gvt/hypercall.h | 2 +- drivers/gpu/drm/i915/gvt/kvmgt.c | 37 +++++++++++----------------- drivers/gpu/drm/i915/gvt/mpt.h | 5 ++-- 5 files changed, 19 insertions(+), 69 deletions(-) diff --git a/drivers/gpu/drm/i915/gvt/gvt.c b/drivers/gpu/drm/i915/gvt/gvt.c index c3308058f461..9259f2a17398 100644 --- a/drivers/gpu/drm/i915/gvt/gvt.c +++ b/drivers/gpu/drm/i915/gvt/gvt.c @@ -39,23 +39,6 @@ #include #include -static const struct intel_gvt_ops intel_gvt_ops = { - .emulate_cfg_read = intel_vgpu_emulate_cfg_read, - .emulate_cfg_write = intel_vgpu_emulate_cfg_write, - .emulate_mmio_read = intel_vgpu_emulate_mmio_read, - .emulate_mmio_write = intel_vgpu_emulate_mmio_write, - .vgpu_create = intel_gvt_create_vgpu, - .vgpu_destroy = intel_gvt_destroy_vgpu, - .vgpu_release = intel_gvt_release_vgpu, - .vgpu_reset = intel_gvt_reset_vgpu, - .vgpu_activate = intel_gvt_activate_vgpu, - .vgpu_deactivate = intel_gvt_deactivate_vgpu, - .vgpu_query_plane = intel_vgpu_query_plane, - .vgpu_get_dmabuf = intel_vgpu_get_dmabuf, - .write_protect_handler = intel_vgpu_page_track_handler, - .emulate_hotplug = intel_vgpu_emulate_hotplug, -}; - static void init_device_info(struct intel_gvt *gvt) { struct intel_gvt_device_info *info = &gvt->device_info; @@ -252,8 +235,7 @@ static int intel_gvt_init_device(struct drm_i915_private *i915) intel_gvt_debugfs_init(gvt); - ret = intel_gvt_hypervisor_host_init(i915->drm.dev, gvt, - &intel_gvt_ops); + ret = intel_gvt_hypervisor_host_init(i915->drm.dev, gvt); if (ret) goto out_destroy_idle_vgpu; diff --git a/drivers/gpu/drm/i915/gvt/gvt.h b/drivers/gpu/drm/i915/gvt/gvt.h index dc36a791467b..97150a1297eb 100644 --- a/drivers/gpu/drm/i915/gvt/gvt.h +++ b/drivers/gpu/drm/i915/gvt/gvt.h @@ -556,30 +556,6 @@ void populate_pvinfo_page(struct intel_vgpu *vgpu); int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload); void enter_failsafe_mode(struct intel_vgpu *vgpu, int reason); -struct intel_gvt_ops { - int (*emulate_cfg_read)(struct intel_vgpu *, unsigned int, void *, - unsigned int); - int (*emulate_cfg_write)(struct intel_vgpu *, unsigned int, void *, - unsigned int); - int (*emulate_mmio_read)(struct intel_vgpu *, u64, void *, - unsigned int); - int (*emulate_mmio_write)(struct intel_vgpu *, u64, void *, - unsigned int); - struct intel_vgpu *(*vgpu_create)(struct intel_gvt *, - struct intel_vgpu_type *); - void (*vgpu_destroy)(struct intel_vgpu *vgpu); - void (*vgpu_release)(struct intel_vgpu *vgpu); - void (*vgpu_reset)(struct intel_vgpu *); - void (*vgpu_activate)(struct intel_vgpu *); - void (*vgpu_deactivate)(struct intel_vgpu *); - int (*vgpu_query_plane)(struct intel_vgpu *vgpu, void *); - int (*vgpu_get_dmabuf)(struct intel_vgpu *vgpu, unsigned int); - int (*write_protect_handler)(struct intel_vgpu *, u64, void *, - unsigned int); - void (*emulate_hotplug)(struct intel_vgpu *vgpu, bool connected); -}; - - enum { GVT_FAILSAFE_UNSUPPORTED_GUEST, GVT_FAILSAFE_INSUFFICIENT_RESOURCE, diff --git a/drivers/gpu/drm/i915/gvt/hypercall.h b/drivers/gpu/drm/i915/gvt/hypercall.h index 317983153645..395bce9633fa 100644 --- a/drivers/gpu/drm/i915/gvt/hypercall.h +++ b/drivers/gpu/drm/i915/gvt/hypercall.h @@ -42,7 +42,7 @@ struct device; * both Xen and KVM by providing dedicated hypervisor-related MPT modules. */ struct intel_gvt_mpt { - int (*host_init)(struct device *dev, void *gvt, const void *ops); + int (*host_init)(struct device *dev, void *gvt); void (*host_exit)(struct device *dev, void *gvt); int (*attach_vgpu)(void *vgpu, unsigned long *handle); void (*detach_vgpu)(void *vgpu); diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c index fa8b326eb219..9ed13c86d476 100644 --- a/drivers/gpu/drm/i915/gvt/kvmgt.c +++ b/drivers/gpu/drm/i915/gvt/kvmgt.c @@ -55,8 +55,6 @@ MODULE_IMPORT_NS(DMA_BUF); MODULE_IMPORT_NS(I915_GVT); -static const struct intel_gvt_ops *intel_gvt_ops; - /* helper macros copied from vfio-pci */ #define VFIO_PCI_OFFSET_SHIFT 40 #define VFIO_PCI_OFFSET_TO_INDEX(off) (off >> VFIO_PCI_OFFSET_SHIFT) @@ -621,9 +619,9 @@ static int handle_edid_regs(struct intel_vgpu *vgpu, gvt_vgpu_err("invalid EDID blob\n"); return -EINVAL; } - intel_gvt_ops->emulate_hotplug(vgpu, true); + intel_vgpu_emulate_hotplug(vgpu, true); } else if (data == VFIO_DEVICE_GFX_LINK_STATE_DOWN) - intel_gvt_ops->emulate_hotplug(vgpu, false); + intel_vgpu_emulate_hotplug(vgpu, false); else { gvt_vgpu_err("invalid EDID link state %d\n", regs->link_state); @@ -825,7 +823,7 @@ static int intel_vgpu_create(struct mdev_device *mdev) goto out; } - vgpu = intel_gvt_ops->vgpu_create(gvt, type); + vgpu = intel_gvt_create_vgpu(gvt, type); if (IS_ERR_OR_NULL(vgpu)) { ret = vgpu == NULL ? -EFAULT : PTR_ERR(vgpu); gvt_err("failed to create intel vgpu: %d\n", ret); @@ -852,7 +850,7 @@ static int intel_vgpu_remove(struct mdev_device *mdev) if (handle_valid(vgpu->handle)) return -EBUSY; - intel_gvt_ops->vgpu_destroy(vgpu); + intel_gvt_destroy_vgpu(vgpu); return 0; } @@ -955,7 +953,7 @@ static int intel_vgpu_open_device(struct mdev_device *mdev) if (ret) goto undo_group; - intel_gvt_ops->vgpu_activate(vgpu); + intel_gvt_activate_vgpu(vgpu); atomic_set(&vdev->released, 0); return ret; @@ -1000,7 +998,7 @@ static void __intel_vgpu_release(struct intel_vgpu *vgpu) if (atomic_cmpxchg(&vdev->released, 0, 1)) return; - intel_gvt_ops->vgpu_release(vgpu); + intel_gvt_release_vgpu(vgpu); ret = vfio_unregister_notifier(mdev_dev(vdev->mdev), VFIO_IOMMU_NOTIFY, &vdev->iommu_notifier); @@ -1074,10 +1072,10 @@ static int intel_vgpu_bar_rw(struct intel_vgpu *vgpu, int bar, u64 off, int ret; if (is_write) - ret = intel_gvt_ops->emulate_mmio_write(vgpu, + ret = intel_vgpu_emulate_mmio_write(vgpu, bar_start + off, buf, count); else - ret = intel_gvt_ops->emulate_mmio_read(vgpu, + ret = intel_vgpu_emulate_mmio_read(vgpu, bar_start + off, buf, count); return ret; } @@ -1133,10 +1131,10 @@ static ssize_t intel_vgpu_rw(struct mdev_device *mdev, char *buf, switch (index) { case VFIO_PCI_CONFIG_REGION_INDEX: if (is_write) - ret = intel_gvt_ops->emulate_cfg_write(vgpu, pos, + ret = intel_vgpu_emulate_cfg_write(vgpu, pos, buf, count); else - ret = intel_gvt_ops->emulate_cfg_read(vgpu, pos, + ret = intel_vgpu_emulate_cfg_read(vgpu, pos, buf, count); break; case VFIO_PCI_BAR0_REGION_INDEX: @@ -1704,7 +1702,7 @@ static long intel_vgpu_ioctl(struct mdev_device *mdev, unsigned int cmd, return ret; } else if (cmd == VFIO_DEVICE_RESET) { - intel_gvt_ops->vgpu_reset(vgpu); + intel_gvt_reset_vgpu(vgpu); return 0; } else if (cmd == VFIO_DEVICE_QUERY_GFX_PLANE) { struct vfio_device_gfx_plane_info dmabuf; @@ -1717,7 +1715,7 @@ static long intel_vgpu_ioctl(struct mdev_device *mdev, unsigned int cmd, if (dmabuf.argsz < minsz) return -EINVAL; - ret = intel_gvt_ops->vgpu_query_plane(vgpu, &dmabuf); + ret = intel_vgpu_query_plane(vgpu, &dmabuf); if (ret != 0) return ret; @@ -1725,14 +1723,10 @@ static long intel_vgpu_ioctl(struct mdev_device *mdev, unsigned int cmd, -EFAULT : 0; } else if (cmd == VFIO_DEVICE_GET_GFX_DMABUF) { __u32 dmabuf_id; - __s32 dmabuf_fd; if (get_user(dmabuf_id, (__u32 __user *)arg)) return -EFAULT; - - dmabuf_fd = intel_gvt_ops->vgpu_get_dmabuf(vgpu, dmabuf_id); - return dmabuf_fd; - + return intel_vgpu_get_dmabuf(vgpu, dmabuf_id); } return -ENOTTY; @@ -1783,7 +1777,7 @@ static struct mdev_parent_ops intel_vgpu_mdev_ops = { .ioctl = intel_vgpu_ioctl, }; -static int kvmgt_host_init(struct device *dev, void *gvt, const void *ops) +static int kvmgt_host_init(struct device *dev, void *gvt) { int ret; @@ -1791,7 +1785,6 @@ static int kvmgt_host_init(struct device *dev, void *gvt, const void *ops) if (ret) return ret; - intel_gvt_ops = ops; intel_vgpu_mdev_ops.supported_type_groups = gvt_vgpu_type_groups; ret = mdev_register_device(dev, &intel_vgpu_mdev_ops); @@ -1883,7 +1876,7 @@ static void kvmgt_page_track_write(struct kvm_vcpu *vcpu, gpa_t gpa, struct kvmgt_guest_info, track_node); if (kvmgt_gfn_is_write_protected(info, gpa_to_gfn(gpa))) - intel_gvt_ops->write_protect_handler(info->vgpu, gpa, + intel_vgpu_page_track_handler(info->vgpu, gpa, (void *)val, len); } diff --git a/drivers/gpu/drm/i915/gvt/mpt.h b/drivers/gpu/drm/i915/gvt/mpt.h index 1b5617bb2745..0e3966e1fec8 100644 --- a/drivers/gpu/drm/i915/gvt/mpt.h +++ b/drivers/gpu/drm/i915/gvt/mpt.h @@ -51,13 +51,12 @@ * Returns: * Zero on success, negative error code if failed */ -static inline int intel_gvt_hypervisor_host_init(struct device *dev, - void *gvt, const void *ops) +static inline int intel_gvt_hypervisor_host_init(struct device *dev, void *gvt) { if (!intel_gvt_host.mpt->host_init) return -ENODEV; - return intel_gvt_host.mpt->host_init(dev, gvt, ops); + return intel_gvt_host.mpt->host_init(dev, gvt); } /** From c977092a9977083f1e73306658182be5123116e3 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Mon, 11 Apr 2022 16:13:37 +0200 Subject: [PATCH 11/41] drm/i915/gvt: remove the map_gfn_to_mfn and set_trap_area ops The map_gfn_to_mfn and set_trap_area ops are never defined, so remove them and clean up code that depends on them in the callers. Signed-off-by: Christoph Hellwig Signed-off-by: Zhi Wang Link: http://patchwork.freedesktop.org/patch/msgid/20220411141403.86980-9-hch@lst.de Reviewed-by: Jason Gunthorpe Reviewed-by: Zhi Wang --- drivers/gpu/drm/i915/gvt/cfg_space.c | 89 ++++++---------------------- drivers/gpu/drm/i915/gvt/hypercall.h | 4 -- drivers/gpu/drm/i915/gvt/mpt.h | 44 -------------- 3 files changed, 17 insertions(+), 120 deletions(-) diff --git a/drivers/gpu/drm/i915/gvt/cfg_space.c b/drivers/gpu/drm/i915/gvt/cfg_space.c index b490e3db2e38..dad3a6054335 100644 --- a/drivers/gpu/drm/i915/gvt/cfg_space.c +++ b/drivers/gpu/drm/i915/gvt/cfg_space.c @@ -129,60 +129,16 @@ int intel_vgpu_emulate_cfg_read(struct intel_vgpu *vgpu, unsigned int offset, return 0; } -static int map_aperture(struct intel_vgpu *vgpu, bool map) +static void map_aperture(struct intel_vgpu *vgpu, bool map) { - phys_addr_t aperture_pa = vgpu_aperture_pa_base(vgpu); - unsigned long aperture_sz = vgpu_aperture_sz(vgpu); - u64 first_gfn; - u64 val; - int ret; - - if (map == vgpu->cfg_space.bar[INTEL_GVT_PCI_BAR_APERTURE].tracked) - return 0; - - val = vgpu_cfg_space(vgpu)[PCI_BASE_ADDRESS_2]; - if (val & PCI_BASE_ADDRESS_MEM_TYPE_64) - val = *(u64 *)(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_2); - else - val = *(u32 *)(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_2); - - first_gfn = (val + vgpu_aperture_offset(vgpu)) >> PAGE_SHIFT; - - ret = intel_gvt_hypervisor_map_gfn_to_mfn(vgpu, first_gfn, - aperture_pa >> PAGE_SHIFT, - aperture_sz >> PAGE_SHIFT, - map); - if (ret) - return ret; - - vgpu->cfg_space.bar[INTEL_GVT_PCI_BAR_APERTURE].tracked = map; - return 0; + if (map != vgpu->cfg_space.bar[INTEL_GVT_PCI_BAR_APERTURE].tracked) + vgpu->cfg_space.bar[INTEL_GVT_PCI_BAR_APERTURE].tracked = map; } -static int trap_gttmmio(struct intel_vgpu *vgpu, bool trap) +static void trap_gttmmio(struct intel_vgpu *vgpu, bool trap) { - u64 start, end; - u64 val; - int ret; - - if (trap == vgpu->cfg_space.bar[INTEL_GVT_PCI_BAR_GTTMMIO].tracked) - return 0; - - val = vgpu_cfg_space(vgpu)[PCI_BASE_ADDRESS_0]; - if (val & PCI_BASE_ADDRESS_MEM_TYPE_64) - start = *(u64 *)(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_0); - else - start = *(u32 *)(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_0); - - start &= ~GENMASK(3, 0); - end = start + vgpu->cfg_space.bar[INTEL_GVT_PCI_BAR_GTTMMIO].size - 1; - - ret = intel_gvt_hypervisor_set_trap_area(vgpu, start, end, trap); - if (ret) - return ret; - - vgpu->cfg_space.bar[INTEL_GVT_PCI_BAR_GTTMMIO].tracked = trap; - return 0; + if (trap != vgpu->cfg_space.bar[INTEL_GVT_PCI_BAR_GTTMMIO].tracked) + vgpu->cfg_space.bar[INTEL_GVT_PCI_BAR_GTTMMIO].tracked = trap; } static int emulate_pci_command_write(struct intel_vgpu *vgpu, @@ -191,26 +147,17 @@ static int emulate_pci_command_write(struct intel_vgpu *vgpu, u8 old = vgpu_cfg_space(vgpu)[offset]; u8 new = *(u8 *)p_data; u8 changed = old ^ new; - int ret; vgpu_pci_cfg_mem_write(vgpu, offset, p_data, bytes); if (!(changed & PCI_COMMAND_MEMORY)) return 0; if (old & PCI_COMMAND_MEMORY) { - ret = trap_gttmmio(vgpu, false); - if (ret) - return ret; - ret = map_aperture(vgpu, false); - if (ret) - return ret; + trap_gttmmio(vgpu, false); + map_aperture(vgpu, false); } else { - ret = trap_gttmmio(vgpu, true); - if (ret) - return ret; - ret = map_aperture(vgpu, true); - if (ret) - return ret; + trap_gttmmio(vgpu, true); + map_aperture(vgpu, true); } return 0; @@ -230,13 +177,12 @@ static int emulate_pci_rom_bar_write(struct intel_vgpu *vgpu, return 0; } -static int emulate_pci_bar_write(struct intel_vgpu *vgpu, unsigned int offset, +static void emulate_pci_bar_write(struct intel_vgpu *vgpu, unsigned int offset, void *p_data, unsigned int bytes) { u32 new = *(u32 *)(p_data); bool lo = IS_ALIGNED(offset, 8); u64 size; - int ret = 0; bool mmio_enabled = vgpu_cfg_space(vgpu)[PCI_COMMAND] & PCI_COMMAND_MEMORY; struct intel_vgpu_pci_bar *bars = vgpu->cfg_space.bar; @@ -259,14 +205,14 @@ static int emulate_pci_bar_write(struct intel_vgpu *vgpu, unsigned int offset, * Untrap the BAR, since guest hasn't configured a * valid GPA */ - ret = trap_gttmmio(vgpu, false); + trap_gttmmio(vgpu, false); break; case PCI_BASE_ADDRESS_2: case PCI_BASE_ADDRESS_3: size = ~(bars[INTEL_GVT_PCI_BAR_APERTURE].size -1); intel_vgpu_write_pci_bar(vgpu, offset, size >> (lo ? 0 : 32), lo); - ret = map_aperture(vgpu, false); + map_aperture(vgpu, false); break; default: /* Unimplemented BARs */ @@ -282,19 +228,18 @@ static int emulate_pci_bar_write(struct intel_vgpu *vgpu, unsigned int offset, */ trap_gttmmio(vgpu, false); intel_vgpu_write_pci_bar(vgpu, offset, new, lo); - ret = trap_gttmmio(vgpu, mmio_enabled); + trap_gttmmio(vgpu, mmio_enabled); break; case PCI_BASE_ADDRESS_2: case PCI_BASE_ADDRESS_3: map_aperture(vgpu, false); intel_vgpu_write_pci_bar(vgpu, offset, new, lo); - ret = map_aperture(vgpu, mmio_enabled); + map_aperture(vgpu, mmio_enabled); break; default: intel_vgpu_write_pci_bar(vgpu, offset, new, lo); } } - return ret; } /** @@ -336,8 +281,8 @@ int intel_vgpu_emulate_cfg_write(struct intel_vgpu *vgpu, unsigned int offset, case PCI_BASE_ADDRESS_0 ... PCI_BASE_ADDRESS_5: if (drm_WARN_ON(&i915->drm, !IS_ALIGNED(offset, 4))) return -EINVAL; - return emulate_pci_bar_write(vgpu, offset, p_data, bytes); - + emulate_pci_bar_write(vgpu, offset, p_data, bytes); + break; case INTEL_GVT_PCI_SWSCI: if (drm_WARN_ON(&i915->drm, !IS_ALIGNED(offset, 4))) return -EINVAL; diff --git a/drivers/gpu/drm/i915/gvt/hypercall.h b/drivers/gpu/drm/i915/gvt/hypercall.h index 395bce9633fa..f1a4926f6f1b 100644 --- a/drivers/gpu/drm/i915/gvt/hypercall.h +++ b/drivers/gpu/drm/i915/gvt/hypercall.h @@ -62,10 +62,6 @@ struct intel_gvt_mpt { int (*dma_pin_guest_page)(unsigned long handle, dma_addr_t dma_addr); - int (*map_gfn_to_mfn)(unsigned long handle, unsigned long gfn, - unsigned long mfn, unsigned int nr, bool map); - int (*set_trap_area)(unsigned long handle, u64 start, u64 end, - bool map); int (*set_opregion)(void *vgpu); int (*set_edid)(void *vgpu, int port_num); int (*get_vfio_device)(void *vgpu); diff --git a/drivers/gpu/drm/i915/gvt/mpt.h b/drivers/gpu/drm/i915/gvt/mpt.h index 0e3966e1fec8..bb0e9e71d13e 100644 --- a/drivers/gpu/drm/i915/gvt/mpt.h +++ b/drivers/gpu/drm/i915/gvt/mpt.h @@ -270,50 +270,6 @@ intel_gvt_hypervisor_dma_pin_guest_page(struct intel_vgpu *vgpu, return intel_gvt_host.mpt->dma_pin_guest_page(vgpu->handle, dma_addr); } -/** - * intel_gvt_hypervisor_map_gfn_to_mfn - map a GFN region to MFN - * @vgpu: a vGPU - * @gfn: guest PFN - * @mfn: host PFN - * @nr: amount of PFNs - * @map: map or unmap - * - * Returns: - * Zero on success, negative error code if failed. - */ -static inline int intel_gvt_hypervisor_map_gfn_to_mfn( - struct intel_vgpu *vgpu, unsigned long gfn, - unsigned long mfn, unsigned int nr, - bool map) -{ - /* a MPT implementation could have MMIO mapped elsewhere */ - if (!intel_gvt_host.mpt->map_gfn_to_mfn) - return 0; - - return intel_gvt_host.mpt->map_gfn_to_mfn(vgpu->handle, gfn, mfn, nr, - map); -} - -/** - * intel_gvt_hypervisor_set_trap_area - Trap a guest PA region - * @vgpu: a vGPU - * @start: the beginning of the guest physical address region - * @end: the end of the guest physical address region - * @map: map or unmap - * - * Returns: - * Zero on success, negative error code if failed. - */ -static inline int intel_gvt_hypervisor_set_trap_area( - struct intel_vgpu *vgpu, u64 start, u64 end, bool map) -{ - /* a MPT implementation could have MMIO trapped elsewhere */ - if (!intel_gvt_host.mpt->set_trap_area) - return 0; - - return intel_gvt_host.mpt->set_trap_area(vgpu->handle, start, end, map); -} - /** * intel_gvt_hypervisor_set_opregion - Set opregion for guest * @vgpu: a vGPU From 3cbac24c2cdbfe7174427933a41a1027015d2644 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Mon, 11 Apr 2022 16:13:38 +0200 Subject: [PATCH 12/41] drm/i915/gvt: remove the unused from_virt_to_mfn op Signed-off-by: Christoph Hellwig Signed-off-by: Zhi Wang Link: http://patchwork.freedesktop.org/patch/msgid/20220411141403.86980-10-hch@lst.de Reviewed-by: Jason Gunthorpe Reviewed-by: Zhi Wang --- drivers/gpu/drm/i915/gvt/hypercall.h | 1 - drivers/gpu/drm/i915/gvt/kvmgt.c | 6 ------ drivers/gpu/drm/i915/gvt/mpt.h | 12 ------------ 3 files changed, 19 deletions(-) diff --git a/drivers/gpu/drm/i915/gvt/hypercall.h b/drivers/gpu/drm/i915/gvt/hypercall.h index f1a4926f6f1b..27890a5e2d82 100644 --- a/drivers/gpu/drm/i915/gvt/hypercall.h +++ b/drivers/gpu/drm/i915/gvt/hypercall.h @@ -47,7 +47,6 @@ struct intel_gvt_mpt { int (*attach_vgpu)(void *vgpu, unsigned long *handle); void (*detach_vgpu)(void *vgpu); int (*inject_msi)(unsigned long handle, u32 addr, u16 data); - unsigned long (*from_virt_to_mfn)(void *p); int (*enable_page_track)(unsigned long handle, u64 gfn); int (*disable_page_track)(unsigned long handle, u64 gfn); int (*read_gpa)(unsigned long handle, unsigned long gpa, void *buf, diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c index 9ed13c86d476..f7b8ce87bc33 100644 --- a/drivers/gpu/drm/i915/gvt/kvmgt.c +++ b/drivers/gpu/drm/i915/gvt/kvmgt.c @@ -2192,11 +2192,6 @@ static int kvmgt_write_gpa(unsigned long handle, unsigned long gpa, return kvmgt_rw_gpa(handle, gpa, buf, len, true); } -static unsigned long kvmgt_virt_to_pfn(void *addr) -{ - return PFN_DOWN(__pa(addr)); -} - static bool kvmgt_is_valid_gfn(unsigned long handle, unsigned long gfn) { struct kvmgt_guest_info *info; @@ -2223,7 +2218,6 @@ static const struct intel_gvt_mpt kvmgt_mpt = { .attach_vgpu = kvmgt_attach_vgpu, .detach_vgpu = kvmgt_detach_vgpu, .inject_msi = kvmgt_inject_msi, - .from_virt_to_mfn = kvmgt_virt_to_pfn, .enable_page_track = kvmgt_page_track_add, .disable_page_track = kvmgt_page_track_remove, .read_gpa = kvmgt_read_gpa, diff --git a/drivers/gpu/drm/i915/gvt/mpt.h b/drivers/gpu/drm/i915/gvt/mpt.h index bb0e9e71d13e..6d062cf71de9 100644 --- a/drivers/gpu/drm/i915/gvt/mpt.h +++ b/drivers/gpu/drm/i915/gvt/mpt.h @@ -140,18 +140,6 @@ static inline int intel_gvt_hypervisor_inject_msi(struct intel_vgpu *vgpu) return 0; } -/** - * intel_gvt_hypervisor_set_wp_page - translate a host VA into MFN - * @p: host kernel virtual address - * - * Returns: - * MFN on success, INTEL_GVT_INVALID_ADDR if failed. - */ -static inline unsigned long intel_gvt_hypervisor_virt_to_mfn(void *p) -{ - return intel_gvt_host.mpt->from_virt_to_mfn(p); -} - /** * intel_gvt_hypervisor_enable_page_track - track a guest page * @vgpu: a vGPU From 62980cacc37f58bd054de012d08052dfc4f5fa48 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Mon, 11 Apr 2022 16:13:39 +0200 Subject: [PATCH 13/41] drm/i915/gvt: merge struct kvmgt_vdev into struct intel_vgpu Move towards having only a single structure for the per-VGPU state. Signed-off-by: Christoph Hellwig Signed-off-by: Zhi Wang Link: http://patchwork.freedesktop.org/patch/msgid/20220411141403.86980-11-hch@lst.de Reviewed-by: Jason Gunthorpe Reviewed-by: Zhi Wang --- drivers/gpu/drm/i915/gvt/gvt.h | 31 ++- drivers/gpu/drm/i915/gvt/hypercall.h | 1 - drivers/gpu/drm/i915/gvt/kvmgt.c | 288 ++++++++++----------------- drivers/gpu/drm/i915/gvt/mpt.h | 16 -- drivers/gpu/drm/i915/gvt/vgpu.c | 8 +- 5 files changed, 128 insertions(+), 216 deletions(-) diff --git a/drivers/gpu/drm/i915/gvt/gvt.h b/drivers/gpu/drm/i915/gvt/gvt.h index 97150a1297eb..628dd686c03d 100644 --- a/drivers/gpu/drm/i915/gvt/gvt.h +++ b/drivers/gpu/drm/i915/gvt/gvt.h @@ -207,21 +207,36 @@ struct intel_vgpu { struct dentry *debugfs; - /* Hypervisor-specific device state. */ - void *vdev; - struct list_head dmabuf_obj_list_head; struct mutex dmabuf_lock; struct idr object_idr; struct intel_vgpu_vblank_timer vblank_timer; u32 scan_nonprivbb; -}; -static inline void *intel_vgpu_vdev(struct intel_vgpu *vgpu) -{ - return vgpu->vdev; -} + struct mdev_device *mdev; + struct vfio_region *region; + int num_regions; + struct eventfd_ctx *intx_trigger; + struct eventfd_ctx *msi_trigger; + + /* + * Two caches are used to avoid mapping duplicated pages (eg. + * scratch pages). This help to reduce dma setup overhead. + */ + struct rb_root gfn_cache; + struct rb_root dma_addr_cache; + unsigned long nr_cache_entries; + struct mutex cache_lock; + + struct notifier_block iommu_notifier; + struct notifier_block group_notifier; + struct kvm *kvm; + struct work_struct release_work; + atomic_t released; + struct vfio_device *vfio_device; + struct vfio_group *vfio_group; +}; /* validating GM healthy status*/ #define vgpu_is_vm_unhealthy(ret_val) \ diff --git a/drivers/gpu/drm/i915/gvt/hypercall.h b/drivers/gpu/drm/i915/gvt/hypercall.h index 27890a5e2d82..eacee6f41f9c 100644 --- a/drivers/gpu/drm/i915/gvt/hypercall.h +++ b/drivers/gpu/drm/i915/gvt/hypercall.h @@ -44,7 +44,6 @@ struct device; struct intel_gvt_mpt { int (*host_init)(struct device *dev, void *gvt); void (*host_exit)(struct device *dev, void *gvt); - int (*attach_vgpu)(void *vgpu, unsigned long *handle); void (*detach_vgpu)(void *vgpu); int (*inject_msi)(unsigned long handle, u32 addr, u16 data); int (*enable_page_track)(unsigned long handle, u64 gfn); diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c index f7b8ce87bc33..1c2b949d8e01 100644 --- a/drivers/gpu/drm/i915/gvt/kvmgt.c +++ b/drivers/gpu/drm/i915/gvt/kvmgt.c @@ -112,37 +112,6 @@ struct gvt_dma { struct kref ref; }; -struct kvmgt_vdev { - struct intel_vgpu *vgpu; - struct mdev_device *mdev; - struct vfio_region *region; - int num_regions; - struct eventfd_ctx *intx_trigger; - struct eventfd_ctx *msi_trigger; - - /* - * Two caches are used to avoid mapping duplicated pages (eg. - * scratch pages). This help to reduce dma setup overhead. - */ - struct rb_root gfn_cache; - struct rb_root dma_addr_cache; - unsigned long nr_cache_entries; - struct mutex cache_lock; - - struct notifier_block iommu_notifier; - struct notifier_block group_notifier; - struct kvm *kvm; - struct work_struct release_work; - atomic_t released; - struct vfio_device *vfio_device; - struct vfio_group *vfio_group; -}; - -static inline struct kvmgt_vdev *kvmgt_vdev(struct intel_vgpu *vgpu) -{ - return intel_vgpu_vdev(vgpu); -} - static inline bool handle_valid(unsigned long handle) { return !!(handle & ~0xff); @@ -269,7 +238,6 @@ static void gvt_unpin_guest_page(struct intel_vgpu *vgpu, unsigned long gfn, unsigned long size) { struct drm_i915_private *i915 = vgpu->gvt->gt->i915; - struct kvmgt_vdev *vdev = kvmgt_vdev(vgpu); int total_pages; int npage; int ret; @@ -279,7 +247,7 @@ static void gvt_unpin_guest_page(struct intel_vgpu *vgpu, unsigned long gfn, for (npage = 0; npage < total_pages; npage++) { unsigned long cur_gfn = gfn + npage; - ret = vfio_group_unpin_pages(vdev->vfio_group, &cur_gfn, 1); + ret = vfio_group_unpin_pages(vgpu->vfio_group, &cur_gfn, 1); drm_WARN_ON(&i915->drm, ret != 1); } } @@ -288,7 +256,6 @@ static void gvt_unpin_guest_page(struct intel_vgpu *vgpu, unsigned long gfn, static int gvt_pin_guest_page(struct intel_vgpu *vgpu, unsigned long gfn, unsigned long size, struct page **page) { - struct kvmgt_vdev *vdev = kvmgt_vdev(vgpu); unsigned long base_pfn = 0; int total_pages; int npage; @@ -303,7 +270,7 @@ static int gvt_pin_guest_page(struct intel_vgpu *vgpu, unsigned long gfn, unsigned long cur_gfn = gfn + npage; unsigned long pfn; - ret = vfio_group_pin_pages(vdev->vfio_group, &cur_gfn, 1, + ret = vfio_group_pin_pages(vgpu->vfio_group, &cur_gfn, 1, IOMMU_READ | IOMMU_WRITE, &pfn); if (ret != 1) { gvt_vgpu_err("vfio_pin_pages failed for gfn 0x%lx, ret %d\n", @@ -370,7 +337,7 @@ static void gvt_dma_unmap_page(struct intel_vgpu *vgpu, unsigned long gfn, static struct gvt_dma *__gvt_cache_find_dma_addr(struct intel_vgpu *vgpu, dma_addr_t dma_addr) { - struct rb_node *node = kvmgt_vdev(vgpu)->dma_addr_cache.rb_node; + struct rb_node *node = vgpu->dma_addr_cache.rb_node; struct gvt_dma *itr; while (node) { @@ -388,7 +355,7 @@ static struct gvt_dma *__gvt_cache_find_dma_addr(struct intel_vgpu *vgpu, static struct gvt_dma *__gvt_cache_find_gfn(struct intel_vgpu *vgpu, gfn_t gfn) { - struct rb_node *node = kvmgt_vdev(vgpu)->gfn_cache.rb_node; + struct rb_node *node = vgpu->gfn_cache.rb_node; struct gvt_dma *itr; while (node) { @@ -409,7 +376,6 @@ static int __gvt_cache_add(struct intel_vgpu *vgpu, gfn_t gfn, { struct gvt_dma *new, *itr; struct rb_node **link, *parent = NULL; - struct kvmgt_vdev *vdev = kvmgt_vdev(vgpu); new = kzalloc(sizeof(struct gvt_dma), GFP_KERNEL); if (!new) @@ -422,7 +388,7 @@ static int __gvt_cache_add(struct intel_vgpu *vgpu, gfn_t gfn, kref_init(&new->ref); /* gfn_cache maps gfn to struct gvt_dma. */ - link = &vdev->gfn_cache.rb_node; + link = &vgpu->gfn_cache.rb_node; while (*link) { parent = *link; itr = rb_entry(parent, struct gvt_dma, gfn_node); @@ -433,11 +399,11 @@ static int __gvt_cache_add(struct intel_vgpu *vgpu, gfn_t gfn, link = &parent->rb_right; } rb_link_node(&new->gfn_node, parent, link); - rb_insert_color(&new->gfn_node, &vdev->gfn_cache); + rb_insert_color(&new->gfn_node, &vgpu->gfn_cache); /* dma_addr_cache maps dma addr to struct gvt_dma. */ parent = NULL; - link = &vdev->dma_addr_cache.rb_node; + link = &vgpu->dma_addr_cache.rb_node; while (*link) { parent = *link; itr = rb_entry(parent, struct gvt_dma, dma_addr_node); @@ -448,51 +414,46 @@ static int __gvt_cache_add(struct intel_vgpu *vgpu, gfn_t gfn, link = &parent->rb_right; } rb_link_node(&new->dma_addr_node, parent, link); - rb_insert_color(&new->dma_addr_node, &vdev->dma_addr_cache); + rb_insert_color(&new->dma_addr_node, &vgpu->dma_addr_cache); - vdev->nr_cache_entries++; + vgpu->nr_cache_entries++; return 0; } static void __gvt_cache_remove_entry(struct intel_vgpu *vgpu, struct gvt_dma *entry) { - struct kvmgt_vdev *vdev = kvmgt_vdev(vgpu); - - rb_erase(&entry->gfn_node, &vdev->gfn_cache); - rb_erase(&entry->dma_addr_node, &vdev->dma_addr_cache); + rb_erase(&entry->gfn_node, &vgpu->gfn_cache); + rb_erase(&entry->dma_addr_node, &vgpu->dma_addr_cache); kfree(entry); - vdev->nr_cache_entries--; + vgpu->nr_cache_entries--; } static void gvt_cache_destroy(struct intel_vgpu *vgpu) { struct gvt_dma *dma; struct rb_node *node = NULL; - struct kvmgt_vdev *vdev = kvmgt_vdev(vgpu); for (;;) { - mutex_lock(&vdev->cache_lock); - node = rb_first(&vdev->gfn_cache); + mutex_lock(&vgpu->cache_lock); + node = rb_first(&vgpu->gfn_cache); if (!node) { - mutex_unlock(&vdev->cache_lock); + mutex_unlock(&vgpu->cache_lock); break; } dma = rb_entry(node, struct gvt_dma, gfn_node); gvt_dma_unmap_page(vgpu, dma->gfn, dma->dma_addr, dma->size); __gvt_cache_remove_entry(vgpu, dma); - mutex_unlock(&vdev->cache_lock); + mutex_unlock(&vgpu->cache_lock); } } static void gvt_cache_init(struct intel_vgpu *vgpu) { - struct kvmgt_vdev *vdev = kvmgt_vdev(vgpu); - - vdev->gfn_cache = RB_ROOT; - vdev->dma_addr_cache = RB_ROOT; - vdev->nr_cache_entries = 0; - mutex_init(&vdev->cache_lock); + vgpu->gfn_cache = RB_ROOT; + vgpu->dma_addr_cache = RB_ROOT; + vgpu->nr_cache_entries = 0; + mutex_init(&vgpu->cache_lock); } static void kvmgt_protect_table_init(struct kvmgt_guest_info *info) @@ -566,18 +527,17 @@ static void kvmgt_protect_table_del(struct kvmgt_guest_info *info, static size_t intel_vgpu_reg_rw_opregion(struct intel_vgpu *vgpu, char *buf, size_t count, loff_t *ppos, bool iswrite) { - struct kvmgt_vdev *vdev = kvmgt_vdev(vgpu); unsigned int i = VFIO_PCI_OFFSET_TO_INDEX(*ppos) - VFIO_PCI_NUM_REGIONS; - void *base = vdev->region[i].data; + void *base = vgpu->region[i].data; loff_t pos = *ppos & VFIO_PCI_OFFSET_MASK; - if (pos >= vdev->region[i].size || iswrite) { + if (pos >= vgpu->region[i].size || iswrite) { gvt_vgpu_err("invalid op or offset for Intel vgpu OpRegion\n"); return -EINVAL; } - count = min(count, (size_t)(vdev->region[i].size - pos)); + count = min(count, (size_t)(vgpu->region[i].size - pos)); memcpy(buf, base + pos, count); return count; @@ -670,8 +630,7 @@ static size_t intel_vgpu_reg_rw_edid(struct intel_vgpu *vgpu, char *buf, int ret; unsigned int i = VFIO_PCI_OFFSET_TO_INDEX(*ppos) - VFIO_PCI_NUM_REGIONS; - struct vfio_edid_region *region = - (struct vfio_edid_region *)kvmgt_vdev(vgpu)->region[i].data; + struct vfio_edid_region *region = vgpu->region[i].data; loff_t pos = *ppos & VFIO_PCI_OFFSET_MASK; if (pos < region->vfio_edid_regs.edid_offset) { @@ -703,34 +662,32 @@ static int intel_vgpu_register_reg(struct intel_vgpu *vgpu, const struct intel_vgpu_regops *ops, size_t size, u32 flags, void *data) { - struct kvmgt_vdev *vdev = kvmgt_vdev(vgpu); struct vfio_region *region; - region = krealloc(vdev->region, - (vdev->num_regions + 1) * sizeof(*region), + region = krealloc(vgpu->region, + (vgpu->num_regions + 1) * sizeof(*region), GFP_KERNEL); if (!region) return -ENOMEM; - vdev->region = region; - vdev->region[vdev->num_regions].type = type; - vdev->region[vdev->num_regions].subtype = subtype; - vdev->region[vdev->num_regions].ops = ops; - vdev->region[vdev->num_regions].size = size; - vdev->region[vdev->num_regions].flags = flags; - vdev->region[vdev->num_regions].data = data; - vdev->num_regions++; + vgpu->region = region; + vgpu->region[vgpu->num_regions].type = type; + vgpu->region[vgpu->num_regions].subtype = subtype; + vgpu->region[vgpu->num_regions].ops = ops; + vgpu->region[vgpu->num_regions].size = size; + vgpu->region[vgpu->num_regions].flags = flags; + vgpu->region[vgpu->num_regions].data = data; + vgpu->num_regions++; return 0; } static int kvmgt_get_vfio_device(void *p_vgpu) { struct intel_vgpu *vgpu = (struct intel_vgpu *)p_vgpu; - struct kvmgt_vdev *vdev = kvmgt_vdev(vgpu); - vdev->vfio_device = vfio_device_get_from_dev( - mdev_dev(vdev->mdev)); - if (!vdev->vfio_device) { + vgpu->vfio_device = vfio_device_get_from_dev( + mdev_dev(vgpu->mdev)); + if (!vgpu->vfio_device) { gvt_vgpu_err("failed to get vfio device\n"); return -ENODEV; } @@ -796,14 +753,14 @@ static int kvmgt_set_edid(void *p_vgpu, int port_num) return ret; } -static void kvmgt_put_vfio_device(void *vgpu) +static void kvmgt_put_vfio_device(void *data) { - struct kvmgt_vdev *vdev = kvmgt_vdev((struct intel_vgpu *)vgpu); + struct intel_vgpu *vgpu = data; - if (WARN_ON(!vdev->vfio_device)) + if (WARN_ON(!vgpu->vfio_device)) return; - vfio_device_put(vdev->vfio_device); + vfio_device_put(vgpu->vfio_device); } static int intel_vgpu_create(struct mdev_device *mdev) @@ -830,9 +787,9 @@ static int intel_vgpu_create(struct mdev_device *mdev) goto out; } - INIT_WORK(&kvmgt_vdev(vgpu)->release_work, intel_vgpu_release_work); + INIT_WORK(&vgpu->release_work, intel_vgpu_release_work); - kvmgt_vdev(vgpu)->mdev = mdev; + vgpu->mdev = mdev; mdev_set_drvdata(mdev, vgpu); gvt_dbg_core("intel_vgpu_create succeeded for mdev: %s\n", @@ -857,10 +814,8 @@ static int intel_vgpu_remove(struct mdev_device *mdev) static int intel_vgpu_iommu_notifier(struct notifier_block *nb, unsigned long action, void *data) { - struct kvmgt_vdev *vdev = container_of(nb, - struct kvmgt_vdev, - iommu_notifier); - struct intel_vgpu *vgpu = vdev->vgpu; + struct intel_vgpu *vgpu = + container_of(nb, struct intel_vgpu, iommu_notifier); if (action == VFIO_IOMMU_NOTIFY_DMA_UNMAP) { struct vfio_iommu_type1_dma_unmap *unmap = data; @@ -870,7 +825,7 @@ static int intel_vgpu_iommu_notifier(struct notifier_block *nb, iov_pfn = unmap->iova >> PAGE_SHIFT; end_iov_pfn = iov_pfn + unmap->size / PAGE_SIZE; - mutex_lock(&vdev->cache_lock); + mutex_lock(&vgpu->cache_lock); for (; iov_pfn < end_iov_pfn; iov_pfn++) { entry = __gvt_cache_find_gfn(vgpu, iov_pfn); if (!entry) @@ -880,7 +835,7 @@ static int intel_vgpu_iommu_notifier(struct notifier_block *nb, entry->size); __gvt_cache_remove_entry(vgpu, entry); } - mutex_unlock(&vdev->cache_lock); + mutex_unlock(&vgpu->cache_lock); } return NOTIFY_OK; @@ -889,16 +844,15 @@ static int intel_vgpu_iommu_notifier(struct notifier_block *nb, static int intel_vgpu_group_notifier(struct notifier_block *nb, unsigned long action, void *data) { - struct kvmgt_vdev *vdev = container_of(nb, - struct kvmgt_vdev, - group_notifier); + struct intel_vgpu *vgpu = + container_of(nb, struct intel_vgpu, group_notifier); /* the only action we care about */ if (action == VFIO_GROUP_NOTIFY_SET_KVM) { - vdev->kvm = data; + vgpu->kvm = data; if (!data) - schedule_work(&vdev->release_work); + schedule_work(&vgpu->release_work); } return NOTIFY_OK; @@ -907,17 +861,16 @@ static int intel_vgpu_group_notifier(struct notifier_block *nb, static int intel_vgpu_open_device(struct mdev_device *mdev) { struct intel_vgpu *vgpu = mdev_get_drvdata(mdev); - struct kvmgt_vdev *vdev = kvmgt_vdev(vgpu); unsigned long events; int ret; struct vfio_group *vfio_group; - vdev->iommu_notifier.notifier_call = intel_vgpu_iommu_notifier; - vdev->group_notifier.notifier_call = intel_vgpu_group_notifier; + vgpu->iommu_notifier.notifier_call = intel_vgpu_iommu_notifier; + vgpu->group_notifier.notifier_call = intel_vgpu_group_notifier; events = VFIO_IOMMU_NOTIFY_DMA_UNMAP; ret = vfio_register_notifier(mdev_dev(mdev), VFIO_IOMMU_NOTIFY, &events, - &vdev->iommu_notifier); + &vgpu->iommu_notifier); if (ret != 0) { gvt_vgpu_err("vfio_register_notifier for iommu failed: %d\n", ret); @@ -926,7 +879,7 @@ static int intel_vgpu_open_device(struct mdev_device *mdev) events = VFIO_GROUP_NOTIFY_SET_KVM; ret = vfio_register_notifier(mdev_dev(mdev), VFIO_GROUP_NOTIFY, &events, - &vdev->group_notifier); + &vgpu->group_notifier); if (ret != 0) { gvt_vgpu_err("vfio_register_notifier for group failed: %d\n", ret); @@ -939,7 +892,7 @@ static int intel_vgpu_open_device(struct mdev_device *mdev) gvt_vgpu_err("vfio_group_get_external_user_from_dev failed\n"); goto undo_register; } - vdev->vfio_group = vfio_group; + vgpu->vfio_group = vfio_group; /* Take a module reference as mdev core doesn't take * a reference for vendor driver. @@ -955,39 +908,37 @@ static int intel_vgpu_open_device(struct mdev_device *mdev) intel_gvt_activate_vgpu(vgpu); - atomic_set(&vdev->released, 0); + atomic_set(&vgpu->released, 0); return ret; undo_group: - vfio_group_put_external_user(vdev->vfio_group); - vdev->vfio_group = NULL; + vfio_group_put_external_user(vgpu->vfio_group); + vgpu->vfio_group = NULL; undo_register: vfio_unregister_notifier(mdev_dev(mdev), VFIO_GROUP_NOTIFY, - &vdev->group_notifier); + &vgpu->group_notifier); undo_iommu: vfio_unregister_notifier(mdev_dev(mdev), VFIO_IOMMU_NOTIFY, - &vdev->iommu_notifier); + &vgpu->iommu_notifier); out: return ret; } static void intel_vgpu_release_msi_eventfd_ctx(struct intel_vgpu *vgpu) { - struct kvmgt_vdev *vdev = kvmgt_vdev(vgpu); struct eventfd_ctx *trigger; - trigger = vdev->msi_trigger; + trigger = vgpu->msi_trigger; if (trigger) { eventfd_ctx_put(trigger); - vdev->msi_trigger = NULL; + vgpu->msi_trigger = NULL; } } static void __intel_vgpu_release(struct intel_vgpu *vgpu) { - struct kvmgt_vdev *vdev = kvmgt_vdev(vgpu); struct drm_i915_private *i915 = vgpu->gvt->gt->i915; struct kvmgt_guest_info *info; int ret; @@ -995,18 +946,18 @@ static void __intel_vgpu_release(struct intel_vgpu *vgpu) if (!handle_valid(vgpu->handle)) return; - if (atomic_cmpxchg(&vdev->released, 0, 1)) + if (atomic_cmpxchg(&vgpu->released, 0, 1)) return; intel_gvt_release_vgpu(vgpu); - ret = vfio_unregister_notifier(mdev_dev(vdev->mdev), VFIO_IOMMU_NOTIFY, - &vdev->iommu_notifier); + ret = vfio_unregister_notifier(mdev_dev(vgpu->mdev), VFIO_IOMMU_NOTIFY, + &vgpu->iommu_notifier); drm_WARN(&i915->drm, ret, "vfio_unregister_notifier for iommu failed: %d\n", ret); - ret = vfio_unregister_notifier(mdev_dev(vdev->mdev), VFIO_GROUP_NOTIFY, - &vdev->group_notifier); + ret = vfio_unregister_notifier(mdev_dev(vgpu->mdev), VFIO_GROUP_NOTIFY, + &vgpu->group_notifier); drm_WARN(&i915->drm, ret, "vfio_unregister_notifier for group failed: %d\n", ret); @@ -1017,9 +968,9 @@ static void __intel_vgpu_release(struct intel_vgpu *vgpu) kvmgt_guest_exit(info); intel_vgpu_release_msi_eventfd_ctx(vgpu); - vfio_group_put_external_user(vdev->vfio_group); + vfio_group_put_external_user(vgpu->vfio_group); - vdev->kvm = NULL; + vgpu->kvm = NULL; vgpu->handle = 0; } @@ -1032,10 +983,10 @@ static void intel_vgpu_close_device(struct mdev_device *mdev) static void intel_vgpu_release_work(struct work_struct *work) { - struct kvmgt_vdev *vdev = container_of(work, struct kvmgt_vdev, - release_work); + struct intel_vgpu *vgpu = + container_of(work, struct intel_vgpu, release_work); - __intel_vgpu_release(vdev->vgpu); + __intel_vgpu_release(vgpu); } static u64 intel_vgpu_get_bar_addr(struct intel_vgpu *vgpu, int bar) @@ -1117,13 +1068,12 @@ static ssize_t intel_vgpu_rw(struct mdev_device *mdev, char *buf, size_t count, loff_t *ppos, bool is_write) { struct intel_vgpu *vgpu = mdev_get_drvdata(mdev); - struct kvmgt_vdev *vdev = kvmgt_vdev(vgpu); unsigned int index = VFIO_PCI_OFFSET_TO_INDEX(*ppos); u64 pos = *ppos & VFIO_PCI_OFFSET_MASK; int ret = -EINVAL; - if (index >= VFIO_PCI_NUM_REGIONS + vdev->num_regions) { + if (index >= VFIO_PCI_NUM_REGIONS + vgpu->num_regions) { gvt_vgpu_err("invalid index: %u\n", index); return -EINVAL; } @@ -1152,11 +1102,11 @@ static ssize_t intel_vgpu_rw(struct mdev_device *mdev, char *buf, case VFIO_PCI_ROM_REGION_INDEX: break; default: - if (index >= VFIO_PCI_NUM_REGIONS + vdev->num_regions) + if (index >= VFIO_PCI_NUM_REGIONS + vgpu->num_regions) return -EINVAL; index -= VFIO_PCI_NUM_REGIONS; - return vdev->region[index].ops->rw(vgpu, buf, count, + return vgpu->region[index].ops->rw(vgpu, buf, count, ppos, is_write); } @@ -1409,7 +1359,7 @@ static int intel_vgpu_set_msi_trigger(struct intel_vgpu *vgpu, gvt_vgpu_err("eventfd_ctx_fdget failed\n"); return PTR_ERR(trigger); } - kvmgt_vdev(vgpu)->msi_trigger = trigger; + vgpu->msi_trigger = trigger; } else if ((flags & VFIO_IRQ_SET_DATA_NONE) && !count) intel_vgpu_release_msi_eventfd_ctx(vgpu); @@ -1461,7 +1411,6 @@ static long intel_vgpu_ioctl(struct mdev_device *mdev, unsigned int cmd, unsigned long arg) { struct intel_vgpu *vgpu = mdev_get_drvdata(mdev); - struct kvmgt_vdev *vdev = kvmgt_vdev(vgpu); unsigned long minsz; gvt_dbg_core("vgpu%d ioctl, cmd: %d\n", vgpu->id, cmd); @@ -1480,7 +1429,7 @@ static long intel_vgpu_ioctl(struct mdev_device *mdev, unsigned int cmd, info.flags = VFIO_DEVICE_FLAGS_PCI; info.flags |= VFIO_DEVICE_FLAGS_RESET; info.num_regions = VFIO_PCI_NUM_REGIONS + - vdev->num_regions; + vgpu->num_regions; info.num_irqs = VFIO_PCI_NUM_IRQS; return copy_to_user((void __user *)arg, &info, minsz) ? @@ -1571,22 +1520,22 @@ static long intel_vgpu_ioctl(struct mdev_device *mdev, unsigned int cmd, .header.version = 1 }; if (info.index >= VFIO_PCI_NUM_REGIONS + - vdev->num_regions) + vgpu->num_regions) return -EINVAL; info.index = array_index_nospec(info.index, VFIO_PCI_NUM_REGIONS + - vdev->num_regions); + vgpu->num_regions); i = info.index - VFIO_PCI_NUM_REGIONS; info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index); - info.size = vdev->region[i].size; - info.flags = vdev->region[i].flags; + info.size = vgpu->region[i].size; + info.flags = vgpu->region[i].flags; - cap_type.type = vdev->region[i].type; - cap_type.subtype = vdev->region[i].subtype; + cap_type.type = vgpu->region[i].type; + cap_type.subtype = vgpu->region[i].subtype; ret = vfio_info_add_capability(&caps, &cap_type.header, @@ -1928,15 +1877,13 @@ static int kvmgt_guest_init(struct mdev_device *mdev) { struct kvmgt_guest_info *info; struct intel_vgpu *vgpu; - struct kvmgt_vdev *vdev; struct kvm *kvm; vgpu = mdev_get_drvdata(mdev); if (handle_valid(vgpu->handle)) return -EEXIST; - vdev = kvmgt_vdev(vgpu); - kvm = vdev->kvm; + kvm = vgpu->kvm; if (!kvm || kvm->mm != current->mm) { gvt_vgpu_err("KVM is required to use Intel vGPU\n"); return -ESRCH; @@ -1962,7 +1909,7 @@ static int kvmgt_guest_init(struct mdev_device *mdev) kvm_page_track_register_notifier(kvm, &info->track_node); debugfs_create_ulong(KVMGT_DEBUGFS_FILENAME, 0444, vgpu->debugfs, - &vdev->nr_cache_entries); + &vgpu->nr_cache_entries); return 0; } @@ -1980,52 +1927,33 @@ static bool kvmgt_guest_exit(struct kvmgt_guest_info *info) return true; } -static int kvmgt_attach_vgpu(void *p_vgpu, unsigned long *handle) -{ - struct intel_vgpu *vgpu = (struct intel_vgpu *)p_vgpu; - - vgpu->vdev = kzalloc(sizeof(struct kvmgt_vdev), GFP_KERNEL); - - if (!vgpu->vdev) - return -ENOMEM; - - kvmgt_vdev(vgpu)->vgpu = vgpu; - - return 0; -} - static void kvmgt_detach_vgpu(void *p_vgpu) { int i; struct intel_vgpu *vgpu = (struct intel_vgpu *)p_vgpu; - struct kvmgt_vdev *vdev = kvmgt_vdev(vgpu); - if (!vdev->region) + if (!vgpu->region) return; - for (i = 0; i < vdev->num_regions; i++) - if (vdev->region[i].ops->release) - vdev->region[i].ops->release(vgpu, - &vdev->region[i]); - vdev->num_regions = 0; - kfree(vdev->region); - vdev->region = NULL; - - kfree(vdev); + for (i = 0; i < vgpu->num_regions; i++) + if (vgpu->region[i].ops->release) + vgpu->region[i].ops->release(vgpu, + &vgpu->region[i]); + vgpu->num_regions = 0; + kfree(vgpu->region); + vgpu->region = NULL; } static int kvmgt_inject_msi(unsigned long handle, u32 addr, u16 data) { struct kvmgt_guest_info *info; struct intel_vgpu *vgpu; - struct kvmgt_vdev *vdev; if (!handle_valid(handle)) return -ESRCH; info = (struct kvmgt_guest_info *)handle; vgpu = info->vgpu; - vdev = kvmgt_vdev(vgpu); /* * When guest is poweroff, msi_trigger is set to NULL, but vgpu's @@ -2036,10 +1964,10 @@ static int kvmgt_inject_msi(unsigned long handle, u32 addr, u16 data) * enabled by guest. so if msi_trigger is null, success is still * returned and don't inject interrupt into guest. */ - if (vdev->msi_trigger == NULL) + if (vgpu->msi_trigger == NULL) return 0; - if (eventfd_signal(vdev->msi_trigger, 1) == 1) + if (eventfd_signal(vgpu->msi_trigger, 1) == 1) return 0; return -EFAULT; @@ -2066,7 +1994,6 @@ static int kvmgt_dma_map_guest_page(unsigned long handle, unsigned long gfn, unsigned long size, dma_addr_t *dma_addr) { struct intel_vgpu *vgpu; - struct kvmgt_vdev *vdev; struct gvt_dma *entry; int ret; @@ -2074,9 +2001,8 @@ static int kvmgt_dma_map_guest_page(unsigned long handle, unsigned long gfn, return -EINVAL; vgpu = ((struct kvmgt_guest_info *)handle)->vgpu; - vdev = kvmgt_vdev(vgpu); - mutex_lock(&vdev->cache_lock); + mutex_lock(&vgpu->cache_lock); entry = __gvt_cache_find_gfn(vgpu, gfn); if (!entry) { @@ -2104,20 +2030,19 @@ static int kvmgt_dma_map_guest_page(unsigned long handle, unsigned long gfn, *dma_addr = entry->dma_addr; } - mutex_unlock(&vdev->cache_lock); + mutex_unlock(&vgpu->cache_lock); return 0; err_unmap: gvt_dma_unmap_page(vgpu, gfn, *dma_addr, size); err_unlock: - mutex_unlock(&vdev->cache_lock); + mutex_unlock(&vgpu->cache_lock); return ret; } static int kvmgt_dma_pin_guest_page(unsigned long handle, dma_addr_t dma_addr) { struct kvmgt_guest_info *info; - struct kvmgt_vdev *vdev; struct gvt_dma *entry; int ret = 0; @@ -2125,15 +2050,14 @@ static int kvmgt_dma_pin_guest_page(unsigned long handle, dma_addr_t dma_addr) return -ENODEV; info = (struct kvmgt_guest_info *)handle; - vdev = kvmgt_vdev(info->vgpu); - mutex_lock(&vdev->cache_lock); + mutex_lock(&info->vgpu->cache_lock); entry = __gvt_cache_find_dma_addr(info->vgpu, dma_addr); if (entry) kref_get(&entry->ref); else ret = -ENOMEM; - mutex_unlock(&vdev->cache_lock); + mutex_unlock(&info->vgpu->cache_lock); return ret; } @@ -2150,20 +2074,18 @@ static void __gvt_dma_release(struct kref *ref) static void kvmgt_dma_unmap_guest_page(unsigned long handle, dma_addr_t dma_addr) { struct intel_vgpu *vgpu; - struct kvmgt_vdev *vdev; struct gvt_dma *entry; if (!handle_valid(handle)) return; vgpu = ((struct kvmgt_guest_info *)handle)->vgpu; - vdev = kvmgt_vdev(vgpu); - mutex_lock(&vdev->cache_lock); + mutex_lock(&vgpu->cache_lock); entry = __gvt_cache_find_dma_addr(vgpu, dma_addr); if (entry) kref_put(&entry->ref, __gvt_dma_release); - mutex_unlock(&vdev->cache_lock); + mutex_unlock(&vgpu->cache_lock); } static int kvmgt_rw_gpa(unsigned long handle, unsigned long gpa, @@ -2176,8 +2098,7 @@ static int kvmgt_rw_gpa(unsigned long handle, unsigned long gpa, info = (struct kvmgt_guest_info *)handle; - return vfio_dma_rw(kvmgt_vdev(info->vgpu)->vfio_group, - gpa, buf, len, write); + return vfio_dma_rw(info->vgpu->vfio_group, gpa, buf, len, write); } static int kvmgt_read_gpa(unsigned long handle, unsigned long gpa, @@ -2215,7 +2136,6 @@ static bool kvmgt_is_valid_gfn(unsigned long handle, unsigned long gfn) static const struct intel_gvt_mpt kvmgt_mpt = { .host_init = kvmgt_host_init, .host_exit = kvmgt_host_exit, - .attach_vgpu = kvmgt_attach_vgpu, .detach_vgpu = kvmgt_detach_vgpu, .inject_msi = kvmgt_inject_msi, .enable_page_track = kvmgt_page_track_add, diff --git a/drivers/gpu/drm/i915/gvt/mpt.h b/drivers/gpu/drm/i915/gvt/mpt.h index 6d062cf71de9..8a659301d78b 100644 --- a/drivers/gpu/drm/i915/gvt/mpt.h +++ b/drivers/gpu/drm/i915/gvt/mpt.h @@ -71,22 +71,6 @@ static inline void intel_gvt_hypervisor_host_exit(struct device *dev, void *gvt) intel_gvt_host.mpt->host_exit(dev, gvt); } -/** - * intel_gvt_hypervisor_attach_vgpu - call hypervisor to initialize vGPU - * related stuffs inside hypervisor. - * - * Returns: - * Zero on success, negative error code if failed. - */ -static inline int intel_gvt_hypervisor_attach_vgpu(struct intel_vgpu *vgpu) -{ - /* optional to provide */ - if (!intel_gvt_host.mpt->attach_vgpu) - return 0; - - return intel_gvt_host.mpt->attach_vgpu(vgpu, &vgpu->handle); -} - /** * intel_gvt_hypervisor_detach_vgpu - call hypervisor to release vGPU * related stuffs inside hypervisor. diff --git a/drivers/gpu/drm/i915/gvt/vgpu.c b/drivers/gpu/drm/i915/gvt/vgpu.c index 8dddd0a940a1..fd335a7bcb84 100644 --- a/drivers/gpu/drm/i915/gvt/vgpu.c +++ b/drivers/gpu/drm/i915/gvt/vgpu.c @@ -405,13 +405,9 @@ static struct intel_vgpu *__intel_gvt_create_vgpu(struct intel_gvt *gvt, populate_pvinfo_page(vgpu); - ret = intel_gvt_hypervisor_attach_vgpu(vgpu); - if (ret) - goto out_clean_vgpu_resource; - ret = intel_vgpu_init_gtt(vgpu); if (ret) - goto out_detach_hypervisor_vgpu; + goto out_clean_vgpu_resource; ret = intel_vgpu_init_opregion(vgpu); if (ret) @@ -454,8 +450,6 @@ static struct intel_vgpu *__intel_gvt_create_vgpu(struct intel_gvt *gvt, intel_vgpu_clean_opregion(vgpu); out_clean_gtt: intel_vgpu_clean_gtt(vgpu); -out_detach_hypervisor_vgpu: - intel_gvt_hypervisor_detach_vgpu(vgpu); out_clean_vgpu_resource: intel_vgpu_free_resource(vgpu); out_clean_vgpu_mmio: From 10ddb96295f3bdc6caf4518b8001725440d7a7d2 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Mon, 11 Apr 2022 16:13:40 +0200 Subject: [PATCH 14/41] drm/i915/gvt: merge struct kvmgt_guest_info into strut intel_vgpu Consolidate the per-VGPU structures into a single one. Signed-off-by: Christoph Hellwig Signed-off-by: Zhi Wang Link: http://patchwork.freedesktop.org/patch/msgid/20220411141403.86980-12-hch@lst.de Reviewed-by: Jason Gunthorpe Reviewed-by: Zhi Wang --- drivers/gpu/drm/i915/gvt/gvt.h | 8 +++ drivers/gpu/drm/i915/gvt/kvmgt.c | 117 ++++++++++++------------------- 2 files changed, 52 insertions(+), 73 deletions(-) diff --git a/drivers/gpu/drm/i915/gvt/gvt.h b/drivers/gpu/drm/i915/gvt/gvt.h index 628dd686c03d..16daa615f9c0 100644 --- a/drivers/gpu/drm/i915/gvt/gvt.h +++ b/drivers/gpu/drm/i915/gvt/gvt.h @@ -34,6 +34,7 @@ #define _GVT_H_ #include +#include #include "i915_drv.h" #include "intel_gvt.h" @@ -174,6 +175,8 @@ struct intel_vgpu_submission { } last_ctx[I915_NUM_ENGINES]; }; +#define KVMGT_DEBUGFS_FILENAME "kvmgt_nr_cache_entries" + struct intel_vgpu { struct intel_gvt *gvt; struct mutex vgpu_lock; @@ -236,6 +239,11 @@ struct intel_vgpu { atomic_t released; struct vfio_device *vfio_device; struct vfio_group *vfio_group; + + struct kvm_page_track_notifier_node track_node; +#define NR_BKT (1 << 18) + struct hlist_head ptable[NR_BKT]; +#undef NR_BKT }; /* validating GM healthy status*/ diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c index 1c2b949d8e01..37cdf092a714 100644 --- a/drivers/gpu/drm/i915/gvt/kvmgt.c +++ b/drivers/gpu/drm/i915/gvt/kvmgt.c @@ -39,7 +39,6 @@ #include #include #include -#include #include #include #include @@ -92,16 +91,6 @@ struct kvmgt_pgfn { struct hlist_node hnode; }; -#define KVMGT_DEBUGFS_FILENAME "kvmgt_nr_cache_entries" -struct kvmgt_guest_info { - struct kvm *kvm; - struct intel_vgpu *vgpu; - struct kvm_page_track_notifier_node track_node; -#define NR_BKT (1 << 18) - struct hlist_head ptable[NR_BKT]; -#undef NR_BKT -}; - struct gvt_dma { struct intel_vgpu *vgpu; struct rb_node gfn_node; @@ -232,7 +221,7 @@ static void intel_gvt_cleanup_vgpu_type_groups(struct intel_gvt *gvt) static int kvmgt_guest_init(struct mdev_device *mdev); static void intel_vgpu_release_work(struct work_struct *work); -static bool kvmgt_guest_exit(struct kvmgt_guest_info *info); +static bool kvmgt_guest_exit(struct intel_vgpu *info); static void gvt_unpin_guest_page(struct intel_vgpu *vgpu, unsigned long gfn, unsigned long size) @@ -456,12 +445,12 @@ static void gvt_cache_init(struct intel_vgpu *vgpu) mutex_init(&vgpu->cache_lock); } -static void kvmgt_protect_table_init(struct kvmgt_guest_info *info) +static void kvmgt_protect_table_init(struct intel_vgpu *info) { hash_init(info->ptable); } -static void kvmgt_protect_table_destroy(struct kvmgt_guest_info *info) +static void kvmgt_protect_table_destroy(struct intel_vgpu *info) { struct kvmgt_pgfn *p; struct hlist_node *tmp; @@ -474,7 +463,7 @@ static void kvmgt_protect_table_destroy(struct kvmgt_guest_info *info) } static struct kvmgt_pgfn * -__kvmgt_protect_table_find(struct kvmgt_guest_info *info, gfn_t gfn) +__kvmgt_protect_table_find(struct intel_vgpu *info, gfn_t gfn) { struct kvmgt_pgfn *p, *res = NULL; @@ -488,8 +477,7 @@ __kvmgt_protect_table_find(struct kvmgt_guest_info *info, gfn_t gfn) return res; } -static bool kvmgt_gfn_is_write_protected(struct kvmgt_guest_info *info, - gfn_t gfn) +static bool kvmgt_gfn_is_write_protected(struct intel_vgpu *info, gfn_t gfn) { struct kvmgt_pgfn *p; @@ -497,7 +485,7 @@ static bool kvmgt_gfn_is_write_protected(struct kvmgt_guest_info *info, return !!p; } -static void kvmgt_protect_table_add(struct kvmgt_guest_info *info, gfn_t gfn) +static void kvmgt_protect_table_add(struct intel_vgpu *info, gfn_t gfn) { struct kvmgt_pgfn *p; @@ -512,8 +500,7 @@ static void kvmgt_protect_table_add(struct kvmgt_guest_info *info, gfn_t gfn) hash_add(info->ptable, &p->hnode, gfn); } -static void kvmgt_protect_table_del(struct kvmgt_guest_info *info, - gfn_t gfn) +static void kvmgt_protect_table_del(struct intel_vgpu *info, gfn_t gfn) { struct kvmgt_pgfn *p; @@ -940,7 +927,6 @@ static void intel_vgpu_release_msi_eventfd_ctx(struct intel_vgpu *vgpu) static void __intel_vgpu_release(struct intel_vgpu *vgpu) { struct drm_i915_private *i915 = vgpu->gvt->gt->i915; - struct kvmgt_guest_info *info; int ret; if (!handle_valid(vgpu->handle)) @@ -964,8 +950,7 @@ static void __intel_vgpu_release(struct intel_vgpu *vgpu) /* dereference module reference taken at open */ module_put(THIS_MODULE); - info = (struct kvmgt_guest_info *)vgpu->handle; - kvmgt_guest_exit(info); + kvmgt_guest_exit(vgpu); intel_vgpu_release_msi_eventfd_ctx(vgpu); vfio_group_put_external_user(vgpu->vfio_group); @@ -1751,7 +1736,7 @@ static void kvmgt_host_exit(struct device *dev, void *gvt) static int kvmgt_page_track_add(unsigned long handle, u64 gfn) { - struct kvmgt_guest_info *info; + struct intel_vgpu *info; struct kvm *kvm; struct kvm_memory_slot *slot; int idx; @@ -1759,7 +1744,7 @@ static int kvmgt_page_track_add(unsigned long handle, u64 gfn) if (!handle_valid(handle)) return -ESRCH; - info = (struct kvmgt_guest_info *)handle; + info = (struct intel_vgpu *)handle; kvm = info->kvm; idx = srcu_read_lock(&kvm->srcu); @@ -1785,7 +1770,7 @@ static int kvmgt_page_track_add(unsigned long handle, u64 gfn) static int kvmgt_page_track_remove(unsigned long handle, u64 gfn) { - struct kvmgt_guest_info *info; + struct intel_vgpu *info; struct kvm *kvm; struct kvm_memory_slot *slot; int idx; @@ -1793,7 +1778,7 @@ static int kvmgt_page_track_remove(unsigned long handle, u64 gfn) if (!handle_valid(handle)) return 0; - info = (struct kvmgt_guest_info *)handle; + info = (struct intel_vgpu *)handle; kvm = info->kvm; idx = srcu_read_lock(&kvm->srcu); @@ -1821,11 +1806,11 @@ static void kvmgt_page_track_write(struct kvm_vcpu *vcpu, gpa_t gpa, const u8 *val, int len, struct kvm_page_track_notifier_node *node) { - struct kvmgt_guest_info *info = container_of(node, - struct kvmgt_guest_info, track_node); + struct intel_vgpu *info = + container_of(node, struct intel_vgpu, track_node); if (kvmgt_gfn_is_write_protected(info, gpa_to_gfn(gpa))) - intel_vgpu_page_track_handler(info->vgpu, gpa, + intel_vgpu_page_track_handler(info, gpa, (void *)val, len); } @@ -1835,8 +1820,8 @@ static void kvmgt_page_track_flush_slot(struct kvm *kvm, { int i; gfn_t gfn; - struct kvmgt_guest_info *info = container_of(node, - struct kvmgt_guest_info, track_node); + struct intel_vgpu *info = + container_of(node, struct intel_vgpu, track_node); write_lock(&kvm->mmu_lock); for (i = 0; i < slot->npages; i++) { @@ -1853,7 +1838,6 @@ static void kvmgt_page_track_flush_slot(struct kvm *kvm, static bool __kvmgt_vgpu_exist(struct intel_vgpu *vgpu, struct kvm *kvm) { struct intel_vgpu *itr; - struct kvmgt_guest_info *info; int id; bool ret = false; @@ -1862,8 +1846,7 @@ static bool __kvmgt_vgpu_exist(struct intel_vgpu *vgpu, struct kvm *kvm) if (!handle_valid(itr->handle)) continue; - info = (struct kvmgt_guest_info *)itr->handle; - if (kvm && kvm == info->kvm) { + if (kvm && kvm == itr->kvm) { ret = true; goto out; } @@ -1875,7 +1858,6 @@ static bool __kvmgt_vgpu_exist(struct intel_vgpu *vgpu, struct kvm *kvm) static int kvmgt_guest_init(struct mdev_device *mdev) { - struct kvmgt_guest_info *info; struct intel_vgpu *vgpu; struct kvm *kvm; @@ -1892,38 +1874,29 @@ static int kvmgt_guest_init(struct mdev_device *mdev) if (__kvmgt_vgpu_exist(vgpu, kvm)) return -EEXIST; - info = vzalloc(sizeof(struct kvmgt_guest_info)); - if (!info) - return -ENOMEM; + vgpu->handle = (unsigned long)vgpu; + kvm_get_kvm(vgpu->kvm); - vgpu->handle = (unsigned long)info; - info->vgpu = vgpu; - info->kvm = kvm; - kvm_get_kvm(info->kvm); - - kvmgt_protect_table_init(info); + kvmgt_protect_table_init(vgpu); gvt_cache_init(vgpu); - info->track_node.track_write = kvmgt_page_track_write; - info->track_node.track_flush_slot = kvmgt_page_track_flush_slot; - kvm_page_track_register_notifier(kvm, &info->track_node); + vgpu->track_node.track_write = kvmgt_page_track_write; + vgpu->track_node.track_flush_slot = kvmgt_page_track_flush_slot; + kvm_page_track_register_notifier(kvm, &vgpu->track_node); debugfs_create_ulong(KVMGT_DEBUGFS_FILENAME, 0444, vgpu->debugfs, &vgpu->nr_cache_entries); return 0; } -static bool kvmgt_guest_exit(struct kvmgt_guest_info *info) +static bool kvmgt_guest_exit(struct intel_vgpu *info) { - debugfs_remove(debugfs_lookup(KVMGT_DEBUGFS_FILENAME, - info->vgpu->debugfs)); + debugfs_remove(debugfs_lookup(KVMGT_DEBUGFS_FILENAME, info->debugfs)); kvm_page_track_unregister_notifier(info->kvm, &info->track_node); kvm_put_kvm(info->kvm); kvmgt_protect_table_destroy(info); - gvt_cache_destroy(info->vgpu); - vfree(info); - + gvt_cache_destroy(info); return true; } @@ -1946,14 +1919,12 @@ static void kvmgt_detach_vgpu(void *p_vgpu) static int kvmgt_inject_msi(unsigned long handle, u32 addr, u16 data) { - struct kvmgt_guest_info *info; struct intel_vgpu *vgpu; if (!handle_valid(handle)) return -ESRCH; - info = (struct kvmgt_guest_info *)handle; - vgpu = info->vgpu; + vgpu = (struct intel_vgpu *)handle; /* * When guest is poweroff, msi_trigger is set to NULL, but vgpu's @@ -1975,15 +1946,15 @@ static int kvmgt_inject_msi(unsigned long handle, u32 addr, u16 data) static unsigned long kvmgt_gfn_to_pfn(unsigned long handle, unsigned long gfn) { - struct kvmgt_guest_info *info; + struct intel_vgpu *vgpu; kvm_pfn_t pfn; if (!handle_valid(handle)) return INTEL_GVT_INVALID_ADDR; - info = (struct kvmgt_guest_info *)handle; + vgpu = (struct intel_vgpu *)handle; - pfn = gfn_to_pfn(info->kvm, gfn); + pfn = gfn_to_pfn(vgpu->kvm, gfn); if (is_error_noslot_pfn(pfn)) return INTEL_GVT_INVALID_ADDR; @@ -2000,7 +1971,7 @@ static int kvmgt_dma_map_guest_page(unsigned long handle, unsigned long gfn, if (!handle_valid(handle)) return -EINVAL; - vgpu = ((struct kvmgt_guest_info *)handle)->vgpu; + vgpu = (struct intel_vgpu *)handle; mutex_lock(&vgpu->cache_lock); @@ -2042,22 +2013,22 @@ static int kvmgt_dma_map_guest_page(unsigned long handle, unsigned long gfn, static int kvmgt_dma_pin_guest_page(unsigned long handle, dma_addr_t dma_addr) { - struct kvmgt_guest_info *info; + struct intel_vgpu *vgpu; struct gvt_dma *entry; int ret = 0; if (!handle_valid(handle)) return -ENODEV; - info = (struct kvmgt_guest_info *)handle; + vgpu = (struct intel_vgpu *)handle; - mutex_lock(&info->vgpu->cache_lock); - entry = __gvt_cache_find_dma_addr(info->vgpu, dma_addr); + mutex_lock(&vgpu->cache_lock); + entry = __gvt_cache_find_dma_addr(vgpu, dma_addr); if (entry) kref_get(&entry->ref); else ret = -ENOMEM; - mutex_unlock(&info->vgpu->cache_lock); + mutex_unlock(&vgpu->cache_lock); return ret; } @@ -2079,7 +2050,7 @@ static void kvmgt_dma_unmap_guest_page(unsigned long handle, dma_addr_t dma_addr if (!handle_valid(handle)) return; - vgpu = ((struct kvmgt_guest_info *)handle)->vgpu; + vgpu = (struct intel_vgpu *)handle; mutex_lock(&vgpu->cache_lock); entry = __gvt_cache_find_dma_addr(vgpu, dma_addr); @@ -2091,14 +2062,14 @@ static void kvmgt_dma_unmap_guest_page(unsigned long handle, dma_addr_t dma_addr static int kvmgt_rw_gpa(unsigned long handle, unsigned long gpa, void *buf, unsigned long len, bool write) { - struct kvmgt_guest_info *info; + struct intel_vgpu *vgpu; if (!handle_valid(handle)) return -ESRCH; - info = (struct kvmgt_guest_info *)handle; + vgpu = (struct intel_vgpu *)handle; - return vfio_dma_rw(info->vgpu->vfio_group, gpa, buf, len, write); + return vfio_dma_rw(vgpu->vfio_group, gpa, buf, len, write); } static int kvmgt_read_gpa(unsigned long handle, unsigned long gpa, @@ -2115,7 +2086,7 @@ static int kvmgt_write_gpa(unsigned long handle, unsigned long gpa, static bool kvmgt_is_valid_gfn(unsigned long handle, unsigned long gfn) { - struct kvmgt_guest_info *info; + struct intel_vgpu *vgpu; struct kvm *kvm; int idx; bool ret; @@ -2123,8 +2094,8 @@ static bool kvmgt_is_valid_gfn(unsigned long handle, unsigned long gfn) if (!handle_valid(handle)) return false; - info = (struct kvmgt_guest_info *)handle; - kvm = info->kvm; + vgpu = (struct intel_vgpu *)handle; + kvm = vgpu->kvm; idx = srcu_read_lock(&kvm->srcu); ret = kvm_is_visible_gfn(kvm, gfn); From 3c340d05868d98bfded92c405363fd63bff3ca62 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Mon, 11 Apr 2022 16:13:41 +0200 Subject: [PATCH 15/41] drm/i915/gvt: remove vgpu->handle Always pass the actual vgpu structure instead of encoding it as a "handle" and add a bool flag to denote if a VGPU is attached. Signed-off-by: Christoph Hellwig Signed-off-by: Zhi Wang Link: http://patchwork.freedesktop.org/patch/msgid/20220411141403.86980-13-hch@lst.de Reviewed-by: Jason Gunthorpe Reviewed-by: Zhi Wang --- drivers/gpu/drm/i915/gvt/gvt.h | 3 +- drivers/gpu/drm/i915/gvt/hypercall.h | 32 +++---- drivers/gpu/drm/i915/gvt/kvmgt.c | 126 +++++++++------------------ drivers/gpu/drm/i915/gvt/mpt.h | 20 ++--- drivers/gpu/drm/i915/gvt/vgpu.c | 6 +- 5 files changed, 71 insertions(+), 116 deletions(-) diff --git a/drivers/gpu/drm/i915/gvt/gvt.h b/drivers/gpu/drm/i915/gvt/gvt.h index 16daa615f9c0..cda70ea3d174 100644 --- a/drivers/gpu/drm/i915/gvt/gvt.h +++ b/drivers/gpu/drm/i915/gvt/gvt.h @@ -181,8 +181,8 @@ struct intel_vgpu { struct intel_gvt *gvt; struct mutex vgpu_lock; int id; - unsigned long handle; /* vGPU handle used by hypervisor MPT modules */ bool active; + bool attached; bool pv_notified; bool failsafe; unsigned int resetting_eng; @@ -449,7 +449,6 @@ int intel_gvt_load_firmware(struct intel_gvt *gvt); #define RING_CTX_SIZE 320 struct intel_vgpu_creation_params { - __u64 handle; __u64 low_gm_sz; /* in MB */ __u64 high_gm_sz; /* in MB */ __u64 fence_sz; diff --git a/drivers/gpu/drm/i915/gvt/hypercall.h b/drivers/gpu/drm/i915/gvt/hypercall.h index eacee6f41f9c..9f0475759825 100644 --- a/drivers/gpu/drm/i915/gvt/hypercall.h +++ b/drivers/gpu/drm/i915/gvt/hypercall.h @@ -36,6 +36,7 @@ #include struct device; +struct intel_vgpu; /* * Specific GVT-g MPT modules function collections. Currently GVT-g supports @@ -44,27 +45,28 @@ struct device; struct intel_gvt_mpt { int (*host_init)(struct device *dev, void *gvt); void (*host_exit)(struct device *dev, void *gvt); - void (*detach_vgpu)(void *vgpu); - int (*inject_msi)(unsigned long handle, u32 addr, u16 data); - int (*enable_page_track)(unsigned long handle, u64 gfn); - int (*disable_page_track)(unsigned long handle, u64 gfn); - int (*read_gpa)(unsigned long handle, unsigned long gpa, void *buf, + void (*detach_vgpu)(struct intel_vgpu *vgpu); + int (*inject_msi)(struct intel_vgpu *vgpu, u32 addr, u16 data); + int (*enable_page_track)(struct intel_vgpu *vgpu, u64 gfn); + int (*disable_page_track)(struct intel_vgpu *vgpu, u64 gfn); + int (*read_gpa)(struct intel_vgpu *vgpu, unsigned long gpa, void *buf, unsigned long len); - int (*write_gpa)(unsigned long handle, unsigned long gpa, void *buf, + int (*write_gpa)(struct intel_vgpu *vgpu, unsigned long gpa, void *buf, unsigned long len); - unsigned long (*gfn_to_mfn)(unsigned long handle, unsigned long gfn); + unsigned long (*gfn_to_mfn)(struct intel_vgpu *vgpu, unsigned long gfn); - int (*dma_map_guest_page)(unsigned long handle, unsigned long gfn, + int (*dma_map_guest_page)(struct intel_vgpu *vgpu, unsigned long gfn, unsigned long size, dma_addr_t *dma_addr); - void (*dma_unmap_guest_page)(unsigned long handle, dma_addr_t dma_addr); + void (*dma_unmap_guest_page)(struct intel_vgpu *vgpu, + dma_addr_t dma_addr); - int (*dma_pin_guest_page)(unsigned long handle, dma_addr_t dma_addr); + int (*dma_pin_guest_page)(struct intel_vgpu *vgpu, dma_addr_t dma_addr); - int (*set_opregion)(void *vgpu); - int (*set_edid)(void *vgpu, int port_num); - int (*get_vfio_device)(void *vgpu); - void (*put_vfio_device)(void *vgpu); - bool (*is_valid_gfn)(unsigned long handle, unsigned long gfn); + int (*set_opregion)(struct intel_vgpu *vgpu); + int (*set_edid)(struct intel_vgpu *vgpu, int port_num); + int (*get_vfio_device)(struct intel_vgpu *vgpu); + void (*put_vfio_device)(struct intel_vgpu *vgpu); + bool (*is_valid_gfn)(struct intel_vgpu *vgpu, unsigned long gfn); }; #endif /* _GVT_HYPERCALL_H_ */ diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c index 37cdf092a714..ed010ab13310 100644 --- a/drivers/gpu/drm/i915/gvt/kvmgt.c +++ b/drivers/gpu/drm/i915/gvt/kvmgt.c @@ -101,11 +101,6 @@ struct gvt_dma { struct kref ref; }; -static inline bool handle_valid(unsigned long handle) -{ - return !!(handle & ~0xff); -} - static ssize_t available_instances_show(struct mdev_type *mtype, struct mdev_type_attribute *attr, char *buf) @@ -668,10 +663,8 @@ static int intel_vgpu_register_reg(struct intel_vgpu *vgpu, return 0; } -static int kvmgt_get_vfio_device(void *p_vgpu) +static int kvmgt_get_vfio_device(struct intel_vgpu *vgpu) { - struct intel_vgpu *vgpu = (struct intel_vgpu *)p_vgpu; - vgpu->vfio_device = vfio_device_get_from_dev( mdev_dev(vgpu->mdev)); if (!vgpu->vfio_device) { @@ -682,9 +675,8 @@ static int kvmgt_get_vfio_device(void *p_vgpu) } -static int kvmgt_set_opregion(void *p_vgpu) +static int kvmgt_set_opregion(struct intel_vgpu *vgpu) { - struct intel_vgpu *vgpu = (struct intel_vgpu *)p_vgpu; void *base; int ret; @@ -710,9 +702,8 @@ static int kvmgt_set_opregion(void *p_vgpu) return ret; } -static int kvmgt_set_edid(void *p_vgpu, int port_num) +static int kvmgt_set_edid(struct intel_vgpu *vgpu, int port_num) { - struct intel_vgpu *vgpu = (struct intel_vgpu *)p_vgpu; struct intel_vgpu_port *port = intel_vgpu_port(vgpu, port_num); struct vfio_edid_region *base; int ret; @@ -740,10 +731,8 @@ static int kvmgt_set_edid(void *p_vgpu, int port_num) return ret; } -static void kvmgt_put_vfio_device(void *data) +static void kvmgt_put_vfio_device(struct intel_vgpu *vgpu) { - struct intel_vgpu *vgpu = data; - if (WARN_ON(!vgpu->vfio_device)) return; @@ -791,7 +780,7 @@ static int intel_vgpu_remove(struct mdev_device *mdev) { struct intel_vgpu *vgpu = mdev_get_drvdata(mdev); - if (handle_valid(vgpu->handle)) + if (vgpu->attached) return -EBUSY; intel_gvt_destroy_vgpu(vgpu); @@ -929,7 +918,7 @@ static void __intel_vgpu_release(struct intel_vgpu *vgpu) struct drm_i915_private *i915 = vgpu->gvt->gt->i915; int ret; - if (!handle_valid(vgpu->handle)) + if (!vgpu->attached) return; if (atomic_cmpxchg(&vgpu->released, 0, 1)) @@ -956,7 +945,7 @@ static void __intel_vgpu_release(struct intel_vgpu *vgpu) vfio_group_put_external_user(vgpu->vfio_group); vgpu->kvm = NULL; - vgpu->handle = 0; + vgpu->attached = false; } static void intel_vgpu_close_device(struct mdev_device *mdev) @@ -1734,19 +1723,15 @@ static void kvmgt_host_exit(struct device *dev, void *gvt) intel_gvt_cleanup_vgpu_type_groups((struct intel_gvt *)gvt); } -static int kvmgt_page_track_add(unsigned long handle, u64 gfn) +static int kvmgt_page_track_add(struct intel_vgpu *info, u64 gfn) { - struct intel_vgpu *info; - struct kvm *kvm; + struct kvm *kvm = info->kvm; struct kvm_memory_slot *slot; int idx; - if (!handle_valid(handle)) + if (!info->attached) return -ESRCH; - info = (struct intel_vgpu *)handle; - kvm = info->kvm; - idx = srcu_read_lock(&kvm->srcu); slot = gfn_to_memslot(kvm, gfn); if (!slot) { @@ -1768,19 +1753,15 @@ static int kvmgt_page_track_add(unsigned long handle, u64 gfn) return 0; } -static int kvmgt_page_track_remove(unsigned long handle, u64 gfn) +static int kvmgt_page_track_remove(struct intel_vgpu *info, u64 gfn) { - struct intel_vgpu *info; - struct kvm *kvm; + struct kvm *kvm = info->kvm; struct kvm_memory_slot *slot; int idx; - if (!handle_valid(handle)) + if (!info->attached) return 0; - info = (struct intel_vgpu *)handle; - kvm = info->kvm; - idx = srcu_read_lock(&kvm->srcu); slot = gfn_to_memslot(kvm, gfn); if (!slot) { @@ -1843,7 +1824,7 @@ static bool __kvmgt_vgpu_exist(struct intel_vgpu *vgpu, struct kvm *kvm) mutex_lock(&vgpu->gvt->lock); for_each_active_vgpu(vgpu->gvt, itr, id) { - if (!handle_valid(itr->handle)) + if (!itr->attached) continue; if (kvm && kvm == itr->kvm) { @@ -1858,14 +1839,12 @@ static bool __kvmgt_vgpu_exist(struct intel_vgpu *vgpu, struct kvm *kvm) static int kvmgt_guest_init(struct mdev_device *mdev) { - struct intel_vgpu *vgpu; - struct kvm *kvm; + struct intel_vgpu *vgpu = mdev_get_drvdata(mdev); + struct kvm *kvm = vgpu->kvm; - vgpu = mdev_get_drvdata(mdev); - if (handle_valid(vgpu->handle)) + if (vgpu->attached) return -EEXIST; - kvm = vgpu->kvm; if (!kvm || kvm->mm != current->mm) { gvt_vgpu_err("KVM is required to use Intel vGPU\n"); return -ESRCH; @@ -1874,7 +1853,7 @@ static int kvmgt_guest_init(struct mdev_device *mdev) if (__kvmgt_vgpu_exist(vgpu, kvm)) return -EEXIST; - vgpu->handle = (unsigned long)vgpu; + vgpu->attached = true; kvm_get_kvm(vgpu->kvm); kvmgt_protect_table_init(vgpu); @@ -1900,10 +1879,9 @@ static bool kvmgt_guest_exit(struct intel_vgpu *info) return true; } -static void kvmgt_detach_vgpu(void *p_vgpu) +static void kvmgt_detach_vgpu(struct intel_vgpu *vgpu) { int i; - struct intel_vgpu *vgpu = (struct intel_vgpu *)p_vgpu; if (!vgpu->region) return; @@ -1917,15 +1895,11 @@ static void kvmgt_detach_vgpu(void *p_vgpu) vgpu->region = NULL; } -static int kvmgt_inject_msi(unsigned long handle, u32 addr, u16 data) +static int kvmgt_inject_msi(struct intel_vgpu *vgpu, u32 addr, u16 data) { - struct intel_vgpu *vgpu; - - if (!handle_valid(handle)) + if (!vgpu->attached) return -ESRCH; - vgpu = (struct intel_vgpu *)handle; - /* * When guest is poweroff, msi_trigger is set to NULL, but vgpu's * config and mmio register isn't restored to default during guest @@ -1944,16 +1918,14 @@ static int kvmgt_inject_msi(unsigned long handle, u32 addr, u16 data) return -EFAULT; } -static unsigned long kvmgt_gfn_to_pfn(unsigned long handle, unsigned long gfn) +static unsigned long kvmgt_gfn_to_pfn(struct intel_vgpu *vgpu, + unsigned long gfn) { - struct intel_vgpu *vgpu; kvm_pfn_t pfn; - if (!handle_valid(handle)) + if (!vgpu->attached) return INTEL_GVT_INVALID_ADDR; - vgpu = (struct intel_vgpu *)handle; - pfn = gfn_to_pfn(vgpu->kvm, gfn); if (is_error_noslot_pfn(pfn)) return INTEL_GVT_INVALID_ADDR; @@ -1961,18 +1933,15 @@ static unsigned long kvmgt_gfn_to_pfn(unsigned long handle, unsigned long gfn) return pfn; } -static int kvmgt_dma_map_guest_page(unsigned long handle, unsigned long gfn, +static int kvmgt_dma_map_guest_page(struct intel_vgpu *vgpu, unsigned long gfn, unsigned long size, dma_addr_t *dma_addr) { - struct intel_vgpu *vgpu; struct gvt_dma *entry; int ret; - if (!handle_valid(handle)) + if (!vgpu->attached) return -EINVAL; - vgpu = (struct intel_vgpu *)handle; - mutex_lock(&vgpu->cache_lock); entry = __gvt_cache_find_gfn(vgpu, gfn); @@ -2011,17 +1980,15 @@ static int kvmgt_dma_map_guest_page(unsigned long handle, unsigned long gfn, return ret; } -static int kvmgt_dma_pin_guest_page(unsigned long handle, dma_addr_t dma_addr) +static int kvmgt_dma_pin_guest_page(struct intel_vgpu *vgpu, + dma_addr_t dma_addr) { - struct intel_vgpu *vgpu; struct gvt_dma *entry; int ret = 0; - if (!handle_valid(handle)) + if (!vgpu->attached) return -ENODEV; - vgpu = (struct intel_vgpu *)handle; - mutex_lock(&vgpu->cache_lock); entry = __gvt_cache_find_dma_addr(vgpu, dma_addr); if (entry) @@ -2042,16 +2009,14 @@ static void __gvt_dma_release(struct kref *ref) __gvt_cache_remove_entry(entry->vgpu, entry); } -static void kvmgt_dma_unmap_guest_page(unsigned long handle, dma_addr_t dma_addr) +static void kvmgt_dma_unmap_guest_page(struct intel_vgpu *vgpu, + dma_addr_t dma_addr) { - struct intel_vgpu *vgpu; struct gvt_dma *entry; - if (!handle_valid(handle)) + if (!vgpu->attached) return; - vgpu = (struct intel_vgpu *)handle; - mutex_lock(&vgpu->cache_lock); entry = __gvt_cache_find_dma_addr(vgpu, dma_addr); if (entry) @@ -2059,44 +2024,35 @@ static void kvmgt_dma_unmap_guest_page(unsigned long handle, dma_addr_t dma_addr mutex_unlock(&vgpu->cache_lock); } -static int kvmgt_rw_gpa(unsigned long handle, unsigned long gpa, +static int kvmgt_rw_gpa(struct intel_vgpu *vgpu, unsigned long gpa, void *buf, unsigned long len, bool write) { - struct intel_vgpu *vgpu; - - if (!handle_valid(handle)) + if (!vgpu->attached) return -ESRCH; - - vgpu = (struct intel_vgpu *)handle; - return vfio_dma_rw(vgpu->vfio_group, gpa, buf, len, write); } -static int kvmgt_read_gpa(unsigned long handle, unsigned long gpa, +static int kvmgt_read_gpa(struct intel_vgpu *vgpu, unsigned long gpa, void *buf, unsigned long len) { - return kvmgt_rw_gpa(handle, gpa, buf, len, false); + return kvmgt_rw_gpa(vgpu, gpa, buf, len, false); } -static int kvmgt_write_gpa(unsigned long handle, unsigned long gpa, +static int kvmgt_write_gpa(struct intel_vgpu *vgpu, unsigned long gpa, void *buf, unsigned long len) { - return kvmgt_rw_gpa(handle, gpa, buf, len, true); + return kvmgt_rw_gpa(vgpu, gpa, buf, len, true); } -static bool kvmgt_is_valid_gfn(unsigned long handle, unsigned long gfn) +static bool kvmgt_is_valid_gfn(struct intel_vgpu *vgpu, unsigned long gfn) { - struct intel_vgpu *vgpu; - struct kvm *kvm; + struct kvm *kvm = vgpu->kvm; int idx; bool ret; - if (!handle_valid(handle)) + if (!vgpu->attached) return false; - vgpu = (struct intel_vgpu *)handle; - kvm = vgpu->kvm; - idx = srcu_read_lock(&kvm->srcu); ret = kvm_is_visible_gfn(kvm, gfn); srcu_read_unlock(&kvm->srcu, idx); diff --git a/drivers/gpu/drm/i915/gvt/mpt.h b/drivers/gpu/drm/i915/gvt/mpt.h index 8a659301d78b..ba0c31c4a705 100644 --- a/drivers/gpu/drm/i915/gvt/mpt.h +++ b/drivers/gpu/drm/i915/gvt/mpt.h @@ -118,7 +118,7 @@ static inline int intel_gvt_hypervisor_inject_msi(struct intel_vgpu *vgpu) trace_inject_msi(vgpu->id, addr, data); - ret = intel_gvt_host.mpt->inject_msi(vgpu->handle, addr, data); + ret = intel_gvt_host.mpt->inject_msi(vgpu, addr, data); if (ret) return ret; return 0; @@ -135,7 +135,7 @@ static inline int intel_gvt_hypervisor_inject_msi(struct intel_vgpu *vgpu) static inline int intel_gvt_hypervisor_enable_page_track( struct intel_vgpu *vgpu, unsigned long gfn) { - return intel_gvt_host.mpt->enable_page_track(vgpu->handle, gfn); + return intel_gvt_host.mpt->enable_page_track(vgpu, gfn); } /** @@ -149,7 +149,7 @@ static inline int intel_gvt_hypervisor_enable_page_track( static inline int intel_gvt_hypervisor_disable_page_track( struct intel_vgpu *vgpu, unsigned long gfn) { - return intel_gvt_host.mpt->disable_page_track(vgpu->handle, gfn); + return intel_gvt_host.mpt->disable_page_track(vgpu, gfn); } /** @@ -165,7 +165,7 @@ static inline int intel_gvt_hypervisor_disable_page_track( static inline int intel_gvt_hypervisor_read_gpa(struct intel_vgpu *vgpu, unsigned long gpa, void *buf, unsigned long len) { - return intel_gvt_host.mpt->read_gpa(vgpu->handle, gpa, buf, len); + return intel_gvt_host.mpt->read_gpa(vgpu, gpa, buf, len); } /** @@ -181,7 +181,7 @@ static inline int intel_gvt_hypervisor_read_gpa(struct intel_vgpu *vgpu, static inline int intel_gvt_hypervisor_write_gpa(struct intel_vgpu *vgpu, unsigned long gpa, void *buf, unsigned long len) { - return intel_gvt_host.mpt->write_gpa(vgpu->handle, gpa, buf, len); + return intel_gvt_host.mpt->write_gpa(vgpu, gpa, buf, len); } /** @@ -195,7 +195,7 @@ static inline int intel_gvt_hypervisor_write_gpa(struct intel_vgpu *vgpu, static inline unsigned long intel_gvt_hypervisor_gfn_to_mfn( struct intel_vgpu *vgpu, unsigned long gfn) { - return intel_gvt_host.mpt->gfn_to_mfn(vgpu->handle, gfn); + return intel_gvt_host.mpt->gfn_to_mfn(vgpu, gfn); } /** @@ -212,7 +212,7 @@ static inline int intel_gvt_hypervisor_dma_map_guest_page( struct intel_vgpu *vgpu, unsigned long gfn, unsigned long size, dma_addr_t *dma_addr) { - return intel_gvt_host.mpt->dma_map_guest_page(vgpu->handle, gfn, size, + return intel_gvt_host.mpt->dma_map_guest_page(vgpu, gfn, size, dma_addr); } @@ -224,7 +224,7 @@ static inline int intel_gvt_hypervisor_dma_map_guest_page( static inline void intel_gvt_hypervisor_dma_unmap_guest_page( struct intel_vgpu *vgpu, dma_addr_t dma_addr) { - intel_gvt_host.mpt->dma_unmap_guest_page(vgpu->handle, dma_addr); + intel_gvt_host.mpt->dma_unmap_guest_page(vgpu, dma_addr); } /** @@ -239,7 +239,7 @@ static inline int intel_gvt_hypervisor_dma_pin_guest_page(struct intel_vgpu *vgpu, dma_addr_t dma_addr) { - return intel_gvt_host.mpt->dma_pin_guest_page(vgpu->handle, dma_addr); + return intel_gvt_host.mpt->dma_pin_guest_page(vgpu, dma_addr); } /** @@ -318,7 +318,7 @@ static inline bool intel_gvt_hypervisor_is_valid_gfn( if (!intel_gvt_host.mpt->is_valid_gfn) return true; - return intel_gvt_host.mpt->is_valid_gfn(vgpu->handle, gfn); + return intel_gvt_host.mpt->is_valid_gfn(vgpu, gfn); } #endif /* _GVT_MPT_H_ */ diff --git a/drivers/gpu/drm/i915/gvt/vgpu.c b/drivers/gpu/drm/i915/gvt/vgpu.c index fd335a7bcb84..5356aa866968 100644 --- a/drivers/gpu/drm/i915/gvt/vgpu.c +++ b/drivers/gpu/drm/i915/gvt/vgpu.c @@ -370,8 +370,8 @@ static struct intel_vgpu *__intel_gvt_create_vgpu(struct intel_gvt *gvt, struct intel_vgpu *vgpu; int ret; - gvt_dbg_core("handle %llu low %llu MB high %llu MB fence %llu\n", - param->handle, param->low_gm_sz, param->high_gm_sz, + gvt_dbg_core("low %llu MB high %llu MB fence %llu\n", + param->low_gm_sz, param->high_gm_sz, param->fence_sz); vgpu = vzalloc(sizeof(*vgpu)); @@ -384,7 +384,6 @@ static struct intel_vgpu *__intel_gvt_create_vgpu(struct intel_gvt *gvt, goto out_free_vgpu; vgpu->id = ret; - vgpu->handle = param->handle; vgpu->gvt = gvt; vgpu->sched_ctl.weight = param->weight; mutex_init(&vgpu->vgpu_lock); @@ -477,7 +476,6 @@ struct intel_vgpu *intel_gvt_create_vgpu(struct intel_gvt *gvt, struct intel_vgpu_creation_params param; struct intel_vgpu *vgpu; - param.handle = 0; param.primary = 1; param.low_gm_sz = type->low_gm_size; param.high_gm_sz = type->high_gm_size; From e3d7640eeeb3066772500581172129a151a1a917 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Mon, 11 Apr 2022 16:13:42 +0200 Subject: [PATCH 16/41] drm/i915/gvt: devirtualize ->{read,write}_gpa Just call the VFIO functions directly instead of through the method table. Signed-off-by: Christoph Hellwig Signed-off-by: Zhi Wang Link: http://patchwork.freedesktop.org/patch/msgid/20220411141403.86980-14-hch@lst.de Reviewed-by: Jason Gunthorpe Reviewed-by: Zhi Wang --- drivers/gpu/drm/i915/gvt/cmd_parser.c | 4 +-- drivers/gpu/drm/i915/gvt/execlist.c | 12 ++++----- drivers/gpu/drm/i915/gvt/gtt.c | 6 ++--- drivers/gpu/drm/i915/gvt/gvt.h | 37 +++++++++++++++++++++++++++ drivers/gpu/drm/i915/gvt/hypercall.h | 4 --- drivers/gpu/drm/i915/gvt/kvmgt.c | 23 ----------------- drivers/gpu/drm/i915/gvt/mmio.c | 4 +-- drivers/gpu/drm/i915/gvt/mpt.h | 32 ----------------------- drivers/gpu/drm/i915/gvt/opregion.c | 10 +++----- drivers/gpu/drm/i915/gvt/scheduler.c | 37 +++++++++++++-------------- 10 files changed, 72 insertions(+), 97 deletions(-) diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.c b/drivers/gpu/drm/i915/gvt/cmd_parser.c index 2459213b6c87..b9eb75a2b400 100644 --- a/drivers/gpu/drm/i915/gvt/cmd_parser.c +++ b/drivers/gpu/drm/i915/gvt/cmd_parser.c @@ -1011,7 +1011,7 @@ static int cmd_reg_handler(struct parser_exec_state *s, if (GRAPHICS_VER(s->engine->i915) == 9 && intel_gvt_mmio_is_sr_in_ctx(gvt, offset) && !strncmp(cmd, "lri", 3)) { - intel_gvt_hypervisor_read_gpa(s->vgpu, + intel_gvt_read_gpa(s->vgpu, s->workload->ring_context_gpa + 12, &ctx_sr_ctl, 4); /* check inhibit context */ if (ctx_sr_ctl & 1) { @@ -1775,7 +1775,7 @@ static int copy_gma_to_hva(struct intel_vgpu *vgpu, struct intel_vgpu_mm *mm, copy_len = (end_gma - gma) >= (I915_GTT_PAGE_SIZE - offset) ? I915_GTT_PAGE_SIZE - offset : end_gma - gma; - intel_gvt_hypervisor_read_gpa(vgpu, gpa, va + len, copy_len); + intel_gvt_read_gpa(vgpu, gpa, va + len, copy_len); len += copy_len; gma += copy_len; diff --git a/drivers/gpu/drm/i915/gvt/execlist.c b/drivers/gpu/drm/i915/gvt/execlist.c index 66d354c4195b..274c6ef42400 100644 --- a/drivers/gpu/drm/i915/gvt/execlist.c +++ b/drivers/gpu/drm/i915/gvt/execlist.c @@ -159,12 +159,12 @@ static void emulate_csb_update(struct intel_vgpu_execlist *execlist, hwsp_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm, vgpu->hws_pga[execlist->engine->id]); if (hwsp_gpa != INTEL_GVT_INVALID_ADDR) { - intel_gvt_hypervisor_write_gpa(vgpu, - hwsp_gpa + I915_HWS_CSB_BUF0_INDEX * 4 + write_pointer * 8, - status, 8); - intel_gvt_hypervisor_write_gpa(vgpu, - hwsp_gpa + INTEL_HWS_CSB_WRITE_INDEX(execlist->engine->i915) * 4, - &write_pointer, 4); + intel_gvt_write_gpa(vgpu, + hwsp_gpa + I915_HWS_CSB_BUF0_INDEX * 4 + write_pointer * 8, + status, 8); + intel_gvt_write_gpa(vgpu, + hwsp_gpa + INTEL_HWS_CSB_WRITE_INDEX(execlist->engine->i915) * 4, + &write_pointer, 4); } gvt_dbg_el("vgpu%d: w pointer %u reg %x csb l %x csb h %x\n", diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c index d4082f4b9be1..9b696e5705eb 100644 --- a/drivers/gpu/drm/i915/gvt/gtt.c +++ b/drivers/gpu/drm/i915/gvt/gtt.c @@ -314,7 +314,7 @@ static inline int gtt_get_entry64(void *pt, return -EINVAL; if (hypervisor_access) { - ret = intel_gvt_hypervisor_read_gpa(vgpu, gpa + + ret = intel_gvt_read_gpa(vgpu, gpa + (index << info->gtt_entry_size_shift), &e->val64, 8); if (WARN_ON(ret)) @@ -339,7 +339,7 @@ static inline int gtt_set_entry64(void *pt, return -EINVAL; if (hypervisor_access) { - ret = intel_gvt_hypervisor_write_gpa(vgpu, gpa + + ret = intel_gvt_write_gpa(vgpu, gpa + (index << info->gtt_entry_size_shift), &e->val64, 8); if (WARN_ON(ret)) @@ -1497,7 +1497,7 @@ static int attach_oos_page(struct intel_vgpu_oos_page *oos_page, struct intel_gvt *gvt = spt->vgpu->gvt; int ret; - ret = intel_gvt_hypervisor_read_gpa(spt->vgpu, + ret = intel_gvt_read_gpa(spt->vgpu, spt->guest_page.gfn << I915_GTT_PAGE_SHIFT, oos_page->mem, I915_GTT_PAGE_SIZE); if (ret) diff --git a/drivers/gpu/drm/i915/gvt/gvt.h b/drivers/gpu/drm/i915/gvt/gvt.h index cda70ea3d174..ea07f4513805 100644 --- a/drivers/gpu/drm/i915/gvt/gvt.h +++ b/drivers/gpu/drm/i915/gvt/gvt.h @@ -35,6 +35,7 @@ #include #include +#include #include "i915_drv.h" #include "intel_gvt.h" @@ -720,6 +721,42 @@ static inline bool intel_gvt_mmio_is_cmd_write_patch( return gvt->mmio.mmio_attribute[offset >> 2] & F_CMD_WRITE_PATCH; } +/** + * intel_gvt_read_gpa - copy data from GPA to host data buffer + * @vgpu: a vGPU + * @gpa: guest physical address + * @buf: host data buffer + * @len: data length + * + * Returns: + * Zero on success, negative error code if failed. + */ +static inline int intel_gvt_read_gpa(struct intel_vgpu *vgpu, unsigned long gpa, + void *buf, unsigned long len) +{ + if (!vgpu->attached) + return -ESRCH; + return vfio_dma_rw(vgpu->vfio_group, gpa, buf, len, false); +} + +/** + * intel_gvt_write_gpa - copy data from host data buffer to GPA + * @vgpu: a vGPU + * @gpa: guest physical address + * @buf: host data buffer + * @len: data length + * + * Returns: + * Zero on success, negative error code if failed. + */ +static inline int intel_gvt_write_gpa(struct intel_vgpu *vgpu, + unsigned long gpa, void *buf, unsigned long len) +{ + if (!vgpu->attached) + return -ESRCH; + return vfio_dma_rw(vgpu->vfio_group, gpa, buf, len, true); +} + void intel_gvt_debugfs_remove_vgpu(struct intel_vgpu *vgpu); void intel_gvt_debugfs_init(struct intel_gvt *gvt); void intel_gvt_debugfs_clean(struct intel_gvt *gvt); diff --git a/drivers/gpu/drm/i915/gvt/hypercall.h b/drivers/gpu/drm/i915/gvt/hypercall.h index 9f0475759825..61e493e2de85 100644 --- a/drivers/gpu/drm/i915/gvt/hypercall.h +++ b/drivers/gpu/drm/i915/gvt/hypercall.h @@ -49,10 +49,6 @@ struct intel_gvt_mpt { int (*inject_msi)(struct intel_vgpu *vgpu, u32 addr, u16 data); int (*enable_page_track)(struct intel_vgpu *vgpu, u64 gfn); int (*disable_page_track)(struct intel_vgpu *vgpu, u64 gfn); - int (*read_gpa)(struct intel_vgpu *vgpu, unsigned long gpa, void *buf, - unsigned long len); - int (*write_gpa)(struct intel_vgpu *vgpu, unsigned long gpa, void *buf, - unsigned long len); unsigned long (*gfn_to_mfn)(struct intel_vgpu *vgpu, unsigned long gfn); int (*dma_map_guest_page)(struct intel_vgpu *vgpu, unsigned long gfn, diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c index ed010ab13310..e4e3d0cc66fc 100644 --- a/drivers/gpu/drm/i915/gvt/kvmgt.c +++ b/drivers/gpu/drm/i915/gvt/kvmgt.c @@ -39,7 +39,6 @@ #include #include #include -#include #include #include @@ -2024,26 +2023,6 @@ static void kvmgt_dma_unmap_guest_page(struct intel_vgpu *vgpu, mutex_unlock(&vgpu->cache_lock); } -static int kvmgt_rw_gpa(struct intel_vgpu *vgpu, unsigned long gpa, - void *buf, unsigned long len, bool write) -{ - if (!vgpu->attached) - return -ESRCH; - return vfio_dma_rw(vgpu->vfio_group, gpa, buf, len, write); -} - -static int kvmgt_read_gpa(struct intel_vgpu *vgpu, unsigned long gpa, - void *buf, unsigned long len) -{ - return kvmgt_rw_gpa(vgpu, gpa, buf, len, false); -} - -static int kvmgt_write_gpa(struct intel_vgpu *vgpu, unsigned long gpa, - void *buf, unsigned long len) -{ - return kvmgt_rw_gpa(vgpu, gpa, buf, len, true); -} - static bool kvmgt_is_valid_gfn(struct intel_vgpu *vgpu, unsigned long gfn) { struct kvm *kvm = vgpu->kvm; @@ -2067,8 +2046,6 @@ static const struct intel_gvt_mpt kvmgt_mpt = { .inject_msi = kvmgt_inject_msi, .enable_page_track = kvmgt_page_track_add, .disable_page_track = kvmgt_page_track_remove, - .read_gpa = kvmgt_read_gpa, - .write_gpa = kvmgt_write_gpa, .gfn_to_mfn = kvmgt_gfn_to_pfn, .dma_map_guest_page = kvmgt_dma_map_guest_page, .dma_unmap_guest_page = kvmgt_dma_unmap_guest_page, diff --git a/drivers/gpu/drm/i915/gvt/mmio.c b/drivers/gpu/drm/i915/gvt/mmio.c index 5db0ef83d522..9acc00505fde 100644 --- a/drivers/gpu/drm/i915/gvt/mmio.c +++ b/drivers/gpu/drm/i915/gvt/mmio.c @@ -139,7 +139,7 @@ int intel_vgpu_emulate_mmio_read(struct intel_vgpu *vgpu, u64 pa, } if (drm_WARN_ON_ONCE(&i915->drm, !reg_is_mmio(gvt, offset))) { - ret = intel_gvt_hypervisor_read_gpa(vgpu, pa, p_data, bytes); + ret = intel_gvt_read_gpa(vgpu, pa, p_data, bytes); goto out; } @@ -215,7 +215,7 @@ int intel_vgpu_emulate_mmio_write(struct intel_vgpu *vgpu, u64 pa, } if (drm_WARN_ON_ONCE(&i915->drm, !reg_is_mmio(gvt, offset))) { - ret = intel_gvt_hypervisor_write_gpa(vgpu, pa, p_data, bytes); + ret = intel_gvt_write_gpa(vgpu, pa, p_data, bytes); goto out; } diff --git a/drivers/gpu/drm/i915/gvt/mpt.h b/drivers/gpu/drm/i915/gvt/mpt.h index ba0c31c4a705..72388ceec596 100644 --- a/drivers/gpu/drm/i915/gvt/mpt.h +++ b/drivers/gpu/drm/i915/gvt/mpt.h @@ -152,38 +152,6 @@ static inline int intel_gvt_hypervisor_disable_page_track( return intel_gvt_host.mpt->disable_page_track(vgpu, gfn); } -/** - * intel_gvt_hypervisor_read_gpa - copy data from GPA to host data buffer - * @vgpu: a vGPU - * @gpa: guest physical address - * @buf: host data buffer - * @len: data length - * - * Returns: - * Zero on success, negative error code if failed. - */ -static inline int intel_gvt_hypervisor_read_gpa(struct intel_vgpu *vgpu, - unsigned long gpa, void *buf, unsigned long len) -{ - return intel_gvt_host.mpt->read_gpa(vgpu, gpa, buf, len); -} - -/** - * intel_gvt_hypervisor_write_gpa - copy data from host data buffer to GPA - * @vgpu: a vGPU - * @gpa: guest physical address - * @buf: host data buffer - * @len: data length - * - * Returns: - * Zero on success, negative error code if failed. - */ -static inline int intel_gvt_hypervisor_write_gpa(struct intel_vgpu *vgpu, - unsigned long gpa, void *buf, unsigned long len) -{ - return intel_gvt_host.mpt->write_gpa(vgpu, gpa, buf, len); -} - /** * intel_gvt_hypervisor_gfn_to_mfn - translate a GFN to MFN * @vgpu: a vGPU diff --git a/drivers/gpu/drm/i915/gvt/opregion.c b/drivers/gpu/drm/i915/gvt/opregion.c index 286ac6d7c6ce..d2bed466540a 100644 --- a/drivers/gpu/drm/i915/gvt/opregion.c +++ b/drivers/gpu/drm/i915/gvt/opregion.c @@ -421,14 +421,14 @@ int intel_vgpu_emulate_opregion_request(struct intel_vgpu *vgpu, u32 swsci) INTEL_GVT_OPREGION_SCIC; parm_pa = (vgpu_opregion(vgpu)->gfn[0] << PAGE_SHIFT) + INTEL_GVT_OPREGION_PARM; - ret = intel_gvt_hypervisor_read_gpa(vgpu, scic_pa, &scic, sizeof(scic)); + ret = intel_gvt_read_gpa(vgpu, scic_pa, &scic, sizeof(scic)); if (ret) { gvt_vgpu_err("guest opregion read error %d, gpa 0x%llx, len %lu\n", ret, scic_pa, sizeof(scic)); return ret; } - ret = intel_gvt_hypervisor_read_gpa(vgpu, parm_pa, &parm, sizeof(parm)); + ret = intel_gvt_read_gpa(vgpu, parm_pa, &parm, sizeof(parm)); if (ret) { gvt_vgpu_err("guest opregion read error %d, gpa 0x%llx, len %lu\n", ret, scic_pa, sizeof(scic)); @@ -465,16 +465,14 @@ int intel_vgpu_emulate_opregion_request(struct intel_vgpu *vgpu, u32 swsci) parm = 0; out: - ret = intel_gvt_hypervisor_write_gpa(vgpu, scic_pa, &scic, - sizeof(scic)); + ret = intel_gvt_write_gpa(vgpu, scic_pa, &scic, sizeof(scic)); if (ret) { gvt_vgpu_err("guest opregion write error %d, gpa 0x%llx, len %lu\n", ret, scic_pa, sizeof(scic)); return ret; } - ret = intel_gvt_hypervisor_write_gpa(vgpu, parm_pa, &parm, - sizeof(parm)); + ret = intel_gvt_write_gpa(vgpu, parm_pa, &parm, sizeof(parm)); if (ret) { gvt_vgpu_err("guest opregion write error %d, gpa 0x%llx, len %lu\n", ret, scic_pa, sizeof(scic)); diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c index 679476da0640..d6fe94cd0fdb 100644 --- a/drivers/gpu/drm/i915/gvt/scheduler.c +++ b/drivers/gpu/drm/i915/gvt/scheduler.c @@ -150,10 +150,10 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload) sr_oa_regs(workload, (u32 *)shadow_ring_context, true); #define COPY_REG(name) \ - intel_gvt_hypervisor_read_gpa(vgpu, workload->ring_context_gpa \ + intel_gvt_read_gpa(vgpu, workload->ring_context_gpa \ + RING_CTX_OFF(name.val), &shadow_ring_context->name.val, 4) #define COPY_REG_MASKED(name) {\ - intel_gvt_hypervisor_read_gpa(vgpu, workload->ring_context_gpa \ + intel_gvt_read_gpa(vgpu, workload->ring_context_gpa \ + RING_CTX_OFF(name.val),\ &shadow_ring_context->name.val, 4);\ shadow_ring_context->name.val |= 0xffff << 16;\ @@ -167,7 +167,7 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload) COPY_REG(rcs_indirect_ctx); COPY_REG(rcs_indirect_ctx_offset); } else if (workload->engine->id == BCS0) - intel_gvt_hypervisor_read_gpa(vgpu, + intel_gvt_read_gpa(vgpu, workload->ring_context_gpa + BCS_TILE_REGISTER_VAL_OFFSET, (void *)shadow_ring_context + @@ -178,7 +178,7 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload) /* don't copy Ring Context (the first 0x50 dwords), * only copy the Engine Context part from guest */ - intel_gvt_hypervisor_read_gpa(vgpu, + intel_gvt_read_gpa(vgpu, workload->ring_context_gpa + RING_CTX_SIZE, (void *)shadow_ring_context + @@ -245,7 +245,7 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload) continue; read: - intel_gvt_hypervisor_read_gpa(vgpu, gpa_base, dst, gpa_size); + intel_gvt_read_gpa(vgpu, gpa_base, dst, gpa_size); gpa_base = context_gpa; gpa_size = I915_GTT_PAGE_SIZE; dst = context_base + (i << I915_GTT_PAGE_SHIFT); @@ -911,8 +911,7 @@ static void update_guest_pdps(struct intel_vgpu *vgpu, gpa = ring_context_gpa + RING_CTX_OFF(pdps[0].val); for (i = 0; i < 8; i++) - intel_gvt_hypervisor_write_gpa(vgpu, - gpa + i * 8, &pdp[7 - i], 4); + intel_gvt_write_gpa(vgpu, gpa + i * 8, &pdp[7 - i], 4); } static __maybe_unused bool @@ -1007,13 +1006,13 @@ static void update_guest_context(struct intel_vgpu_workload *workload) continue; write: - intel_gvt_hypervisor_write_gpa(vgpu, gpa_base, src, gpa_size); + intel_gvt_write_gpa(vgpu, gpa_base, src, gpa_size); gpa_base = context_gpa; gpa_size = I915_GTT_PAGE_SIZE; src = context_base + (i << I915_GTT_PAGE_SHIFT); } - intel_gvt_hypervisor_write_gpa(vgpu, workload->ring_context_gpa + + intel_gvt_write_gpa(vgpu, workload->ring_context_gpa + RING_CTX_OFF(ring_header.val), &workload->rb_tail, 4); shadow_ring_context = (void *) ctx->lrc_reg_state; @@ -1028,7 +1027,7 @@ static void update_guest_context(struct intel_vgpu_workload *workload) } #define COPY_REG(name) \ - intel_gvt_hypervisor_write_gpa(vgpu, workload->ring_context_gpa + \ + intel_gvt_write_gpa(vgpu, workload->ring_context_gpa + \ RING_CTX_OFF(name.val), &shadow_ring_context->name.val, 4) COPY_REG(ctx_ctrl); @@ -1036,7 +1035,7 @@ static void update_guest_context(struct intel_vgpu_workload *workload) #undef COPY_REG - intel_gvt_hypervisor_write_gpa(vgpu, + intel_gvt_write_gpa(vgpu, workload->ring_context_gpa + sizeof(*shadow_ring_context), (void *)shadow_ring_context + @@ -1573,7 +1572,7 @@ static void read_guest_pdps(struct intel_vgpu *vgpu, gpa = ring_context_gpa + RING_CTX_OFF(pdps[0].val); for (i = 0; i < 8; i++) - intel_gvt_hypervisor_read_gpa(vgpu, + intel_gvt_read_gpa(vgpu, gpa + i * 8, &pdp[7 - i], 4); } @@ -1644,10 +1643,10 @@ intel_vgpu_create_workload(struct intel_vgpu *vgpu, return ERR_PTR(-EINVAL); } - intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa + + intel_gvt_read_gpa(vgpu, ring_context_gpa + RING_CTX_OFF(ring_header.val), &head, 4); - intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa + + intel_gvt_read_gpa(vgpu, ring_context_gpa + RING_CTX_OFF(ring_tail.val), &tail, 4); guest_head = head; @@ -1674,11 +1673,11 @@ intel_vgpu_create_workload(struct intel_vgpu *vgpu, gvt_dbg_el("ring %s begin a new workload\n", engine->name); /* record some ring buffer register values for scan and shadow */ - intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa + + intel_gvt_read_gpa(vgpu, ring_context_gpa + RING_CTX_OFF(rb_start.val), &start, 4); - intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa + + intel_gvt_read_gpa(vgpu, ring_context_gpa + RING_CTX_OFF(rb_ctrl.val), &ctl, 4); - intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa + + intel_gvt_read_gpa(vgpu, ring_context_gpa + RING_CTX_OFF(ctx_ctrl.val), &ctx_ctl, 4); if (!intel_gvt_ggtt_validate_range(vgpu, start, @@ -1701,9 +1700,9 @@ intel_vgpu_create_workload(struct intel_vgpu *vgpu, workload->rb_ctl = ctl; if (engine->id == RCS0) { - intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa + + intel_gvt_read_gpa(vgpu, ring_context_gpa + RING_CTX_OFF(bb_per_ctx_ptr.val), &per_ctx, 4); - intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa + + intel_gvt_read_gpa(vgpu, ring_context_gpa + RING_CTX_OFF(rcs_indirect_ctx.val), &indirect_ctx, 4); workload->wa_ctx.indirect_ctx.guest_gma = From fe902f0ce686e8dbdaea7dd3c40271640857328a Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Mon, 11 Apr 2022 16:13:43 +0200 Subject: [PATCH 17/41] drm/i915/gvt: devirtualize ->{get,put}_vfio_device Just open code the calls to the VFIO APIs. Signed-off-by: Christoph Hellwig Signed-off-by: Zhi Wang Link: http://patchwork.freedesktop.org/patch/msgid/20220411141403.86980-15-hch@lst.de Reviewed-by: Jason Gunthorpe Reviewed-by: Zhi Wang --- drivers/gpu/drm/i915/gvt/dmabuf.c | 12 ++++++----- drivers/gpu/drm/i915/gvt/hypercall.h | 2 -- drivers/gpu/drm/i915/gvt/kvmgt.c | 22 -------------------- drivers/gpu/drm/i915/gvt/mpt.h | 30 ---------------------------- 4 files changed, 7 insertions(+), 59 deletions(-) diff --git a/drivers/gpu/drm/i915/gvt/dmabuf.c b/drivers/gpu/drm/i915/gvt/dmabuf.c index c95c25d2addb..cc1a9ac0d272 100644 --- a/drivers/gpu/drm/i915/gvt/dmabuf.c +++ b/drivers/gpu/drm/i915/gvt/dmabuf.c @@ -29,7 +29,7 @@ */ #include -#include +#include #include #include @@ -157,7 +157,7 @@ static void dmabuf_gem_object_free(struct kref *kref) dmabuf_obj = list_entry(pos, struct intel_vgpu_dmabuf_obj, list); if (dmabuf_obj == obj) { list_del(pos); - intel_gvt_hypervisor_put_vfio_device(vgpu); + vfio_device_put(vgpu->vfio_device); idr_remove(&vgpu->object_idr, dmabuf_obj->dmabuf_id); kfree(dmabuf_obj->info); @@ -492,9 +492,11 @@ int intel_vgpu_query_plane(struct intel_vgpu *vgpu, void *args) kref_init(&dmabuf_obj->kref); mutex_lock(&vgpu->dmabuf_lock); - if (intel_gvt_hypervisor_get_vfio_device(vgpu)) { - gvt_vgpu_err("get vfio device failed\n"); + vgpu->vfio_device = vfio_device_get_from_dev(mdev_dev(vgpu->mdev)); + if (!vgpu->vfio_device) { + gvt_vgpu_err("failed to get vfio device\n"); mutex_unlock(&vgpu->dmabuf_lock); + ret = -ENODEV; goto out_free_info; } mutex_unlock(&vgpu->dmabuf_lock); @@ -603,7 +605,7 @@ void intel_vgpu_dmabuf_cleanup(struct intel_vgpu *vgpu) dmabuf_obj->vgpu = NULL; idr_remove(&vgpu->object_idr, dmabuf_obj->dmabuf_id); - intel_gvt_hypervisor_put_vfio_device(vgpu); + vfio_device_put(vgpu->vfio_device); list_del(pos); /* dmabuf_obj might be freed in dmabuf_obj_put */ diff --git a/drivers/gpu/drm/i915/gvt/hypercall.h b/drivers/gpu/drm/i915/gvt/hypercall.h index 61e493e2de85..fd903d52f431 100644 --- a/drivers/gpu/drm/i915/gvt/hypercall.h +++ b/drivers/gpu/drm/i915/gvt/hypercall.h @@ -60,8 +60,6 @@ struct intel_gvt_mpt { int (*set_opregion)(struct intel_vgpu *vgpu); int (*set_edid)(struct intel_vgpu *vgpu, int port_num); - int (*get_vfio_device)(struct intel_vgpu *vgpu); - void (*put_vfio_device)(struct intel_vgpu *vgpu); bool (*is_valid_gfn)(struct intel_vgpu *vgpu, unsigned long gfn); }; diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c index e4e3d0cc66fc..1f70cbd51a3f 100644 --- a/drivers/gpu/drm/i915/gvt/kvmgt.c +++ b/drivers/gpu/drm/i915/gvt/kvmgt.c @@ -662,18 +662,6 @@ static int intel_vgpu_register_reg(struct intel_vgpu *vgpu, return 0; } -static int kvmgt_get_vfio_device(struct intel_vgpu *vgpu) -{ - vgpu->vfio_device = vfio_device_get_from_dev( - mdev_dev(vgpu->mdev)); - if (!vgpu->vfio_device) { - gvt_vgpu_err("failed to get vfio device\n"); - return -ENODEV; - } - return 0; -} - - static int kvmgt_set_opregion(struct intel_vgpu *vgpu) { void *base; @@ -730,14 +718,6 @@ static int kvmgt_set_edid(struct intel_vgpu *vgpu, int port_num) return ret; } -static void kvmgt_put_vfio_device(struct intel_vgpu *vgpu) -{ - if (WARN_ON(!vgpu->vfio_device)) - return; - - vfio_device_put(vgpu->vfio_device); -} - static int intel_vgpu_create(struct mdev_device *mdev) { struct intel_vgpu *vgpu = NULL; @@ -2052,8 +2032,6 @@ static const struct intel_gvt_mpt kvmgt_mpt = { .dma_pin_guest_page = kvmgt_dma_pin_guest_page, .set_opregion = kvmgt_set_opregion, .set_edid = kvmgt_set_edid, - .get_vfio_device = kvmgt_get_vfio_device, - .put_vfio_device = kvmgt_put_vfio_device, .is_valid_gfn = kvmgt_is_valid_gfn, }; diff --git a/drivers/gpu/drm/i915/gvt/mpt.h b/drivers/gpu/drm/i915/gvt/mpt.h index 72388ceec596..2196187203af 100644 --- a/drivers/gpu/drm/i915/gvt/mpt.h +++ b/drivers/gpu/drm/i915/gvt/mpt.h @@ -242,36 +242,6 @@ static inline int intel_gvt_hypervisor_set_edid(struct intel_vgpu *vgpu, return intel_gvt_host.mpt->set_edid(vgpu, port_num); } -/** - * intel_gvt_hypervisor_get_vfio_device - increase vfio device ref count - * @vgpu: a vGPU - * - * Returns: - * Zero on success, negative error code if failed. - */ -static inline int intel_gvt_hypervisor_get_vfio_device(struct intel_vgpu *vgpu) -{ - if (!intel_gvt_host.mpt->get_vfio_device) - return 0; - - return intel_gvt_host.mpt->get_vfio_device(vgpu); -} - -/** - * intel_gvt_hypervisor_put_vfio_device - decrease vfio device ref count - * @vgpu: a vGPU - * - * Returns: - * Zero on success, negative error code if failed. - */ -static inline void intel_gvt_hypervisor_put_vfio_device(struct intel_vgpu *vgpu) -{ - if (!intel_gvt_host.mpt->put_vfio_device) - return; - - intel_gvt_host.mpt->put_vfio_device(vgpu); -} - /** * intel_gvt_hypervisor_is_valid_gfn - check if a visible gfn * @vgpu: a vGPU From f9399b0e4a9555227559f0adaa8e861bedb4b735 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Mon, 11 Apr 2022 16:13:44 +0200 Subject: [PATCH 18/41] drm/i915/gvt: devirtualize ->set_edid and ->set_opregion Just call the code to setup the opregions and EDID data directly. Signed-off-by: Christoph Hellwig Signed-off-by: Zhi Wang Link: http://patchwork.freedesktop.org/patch/msgid/20220411141403.86980-16-hch@lst.de Reviewed-by: Jason Gunthorpe Reviewed-by: Zhi Wang --- drivers/gpu/drm/i915/gvt/gvt.h | 3 +++ drivers/gpu/drm/i915/gvt/hypercall.h | 3 --- drivers/gpu/drm/i915/gvt/kvmgt.c | 6 ++---- drivers/gpu/drm/i915/gvt/mpt.h | 32 ---------------------------- drivers/gpu/drm/i915/gvt/vgpu.c | 6 +++--- 5 files changed, 8 insertions(+), 42 deletions(-) diff --git a/drivers/gpu/drm/i915/gvt/gvt.h b/drivers/gpu/drm/i915/gvt/gvt.h index ea07f4513805..d389c9c3822b 100644 --- a/drivers/gpu/drm/i915/gvt/gvt.h +++ b/drivers/gpu/drm/i915/gvt/gvt.h @@ -517,6 +517,9 @@ void intel_gvt_reset_vgpu(struct intel_vgpu *vgpu); void intel_gvt_activate_vgpu(struct intel_vgpu *vgpu); void intel_gvt_deactivate_vgpu(struct intel_vgpu *vgpu); +int intel_gvt_set_opregion(struct intel_vgpu *vgpu); +int intel_gvt_set_edid(struct intel_vgpu *vgpu, int port_num); + /* validating GM functions */ #define vgpu_gmadr_is_aperture(vgpu, gmadr) \ ((gmadr >= vgpu_aperture_gmadr_base(vgpu)) && \ diff --git a/drivers/gpu/drm/i915/gvt/hypercall.h b/drivers/gpu/drm/i915/gvt/hypercall.h index fd903d52f431..091249a924a8 100644 --- a/drivers/gpu/drm/i915/gvt/hypercall.h +++ b/drivers/gpu/drm/i915/gvt/hypercall.h @@ -57,9 +57,6 @@ struct intel_gvt_mpt { dma_addr_t dma_addr); int (*dma_pin_guest_page)(struct intel_vgpu *vgpu, dma_addr_t dma_addr); - - int (*set_opregion)(struct intel_vgpu *vgpu); - int (*set_edid)(struct intel_vgpu *vgpu, int port_num); bool (*is_valid_gfn)(struct intel_vgpu *vgpu, unsigned long gfn); }; diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c index 1f70cbd51a3f..006db16c7d03 100644 --- a/drivers/gpu/drm/i915/gvt/kvmgt.c +++ b/drivers/gpu/drm/i915/gvt/kvmgt.c @@ -662,7 +662,7 @@ static int intel_vgpu_register_reg(struct intel_vgpu *vgpu, return 0; } -static int kvmgt_set_opregion(struct intel_vgpu *vgpu) +int intel_gvt_set_opregion(struct intel_vgpu *vgpu) { void *base; int ret; @@ -689,7 +689,7 @@ static int kvmgt_set_opregion(struct intel_vgpu *vgpu) return ret; } -static int kvmgt_set_edid(struct intel_vgpu *vgpu, int port_num) +int intel_gvt_set_edid(struct intel_vgpu *vgpu, int port_num) { struct intel_vgpu_port *port = intel_vgpu_port(vgpu, port_num); struct vfio_edid_region *base; @@ -2030,8 +2030,6 @@ static const struct intel_gvt_mpt kvmgt_mpt = { .dma_map_guest_page = kvmgt_dma_map_guest_page, .dma_unmap_guest_page = kvmgt_dma_unmap_guest_page, .dma_pin_guest_page = kvmgt_dma_pin_guest_page, - .set_opregion = kvmgt_set_opregion, - .set_edid = kvmgt_set_edid, .is_valid_gfn = kvmgt_is_valid_gfn, }; diff --git a/drivers/gpu/drm/i915/gvt/mpt.h b/drivers/gpu/drm/i915/gvt/mpt.h index 2196187203af..9738aa3377b4 100644 --- a/drivers/gpu/drm/i915/gvt/mpt.h +++ b/drivers/gpu/drm/i915/gvt/mpt.h @@ -210,38 +210,6 @@ intel_gvt_hypervisor_dma_pin_guest_page(struct intel_vgpu *vgpu, return intel_gvt_host.mpt->dma_pin_guest_page(vgpu, dma_addr); } -/** - * intel_gvt_hypervisor_set_opregion - Set opregion for guest - * @vgpu: a vGPU - * - * Returns: - * Zero on success, negative error code if failed. - */ -static inline int intel_gvt_hypervisor_set_opregion(struct intel_vgpu *vgpu) -{ - if (!intel_gvt_host.mpt->set_opregion) - return 0; - - return intel_gvt_host.mpt->set_opregion(vgpu); -} - -/** - * intel_gvt_hypervisor_set_edid - Set EDID region for guest - * @vgpu: a vGPU - * @port_num: display port number - * - * Returns: - * Zero on success, negative error code if failed. - */ -static inline int intel_gvt_hypervisor_set_edid(struct intel_vgpu *vgpu, - int port_num) -{ - if (!intel_gvt_host.mpt->set_edid) - return 0; - - return intel_gvt_host.mpt->set_edid(vgpu, port_num); -} - /** * intel_gvt_hypervisor_is_valid_gfn - check if a visible gfn * @vgpu: a vGPU diff --git a/drivers/gpu/drm/i915/gvt/vgpu.c b/drivers/gpu/drm/i915/gvt/vgpu.c index 5356aa866968..69c1af3d6704 100644 --- a/drivers/gpu/drm/i915/gvt/vgpu.c +++ b/drivers/gpu/drm/i915/gvt/vgpu.c @@ -426,14 +426,14 @@ static struct intel_vgpu *__intel_gvt_create_vgpu(struct intel_gvt *gvt, intel_gvt_debugfs_add_vgpu(vgpu); - ret = intel_gvt_hypervisor_set_opregion(vgpu); + ret = intel_gvt_set_opregion(vgpu); if (ret) goto out_clean_sched_policy; if (IS_BROADWELL(dev_priv) || IS_BROXTON(dev_priv)) - ret = intel_gvt_hypervisor_set_edid(vgpu, PORT_B); + ret = intel_gvt_set_edid(vgpu, PORT_B); else - ret = intel_gvt_hypervisor_set_edid(vgpu, PORT_D); + ret = intel_gvt_set_edid(vgpu, PORT_D); if (ret) goto out_clean_sched_policy; From 4c705ad0d784fd9ae7160d8c4e0a151abe465dbc Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Mon, 11 Apr 2022 16:13:45 +0200 Subject: [PATCH 19/41] drm/i915/gvt: devirtualize ->detach_vgpu Just call the function directly. Signed-off-by: Christoph Hellwig Signed-off-by: Zhi Wang Link: http://patchwork.freedesktop.org/patch/msgid/20220411141403.86980-17-hch@lst.de Reviewed-by: Jason Gunthorpe Reviewed-by: Zhi Wang --- drivers/gpu/drm/i915/gvt/gvt.h | 1 + drivers/gpu/drm/i915/gvt/hypercall.h | 1 - drivers/gpu/drm/i915/gvt/kvmgt.c | 3 +-- drivers/gpu/drm/i915/gvt/mpt.h | 16 ---------------- drivers/gpu/drm/i915/gvt/vgpu.c | 2 +- 5 files changed, 3 insertions(+), 20 deletions(-) diff --git a/drivers/gpu/drm/i915/gvt/gvt.h b/drivers/gpu/drm/i915/gvt/gvt.h index d389c9c3822b..52ef88d2bf21 100644 --- a/drivers/gpu/drm/i915/gvt/gvt.h +++ b/drivers/gpu/drm/i915/gvt/gvt.h @@ -581,6 +581,7 @@ void populate_pvinfo_page(struct intel_vgpu *vgpu); int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload); void enter_failsafe_mode(struct intel_vgpu *vgpu, int reason); +void intel_vgpu_detach_regions(struct intel_vgpu *vgpu); enum { GVT_FAILSAFE_UNSUPPORTED_GUEST, diff --git a/drivers/gpu/drm/i915/gvt/hypercall.h b/drivers/gpu/drm/i915/gvt/hypercall.h index 091249a924a8..08c622c4079b 100644 --- a/drivers/gpu/drm/i915/gvt/hypercall.h +++ b/drivers/gpu/drm/i915/gvt/hypercall.h @@ -45,7 +45,6 @@ struct intel_vgpu; struct intel_gvt_mpt { int (*host_init)(struct device *dev, void *gvt); void (*host_exit)(struct device *dev, void *gvt); - void (*detach_vgpu)(struct intel_vgpu *vgpu); int (*inject_msi)(struct intel_vgpu *vgpu, u32 addr, u16 data); int (*enable_page_track)(struct intel_vgpu *vgpu, u64 gfn); int (*disable_page_track)(struct intel_vgpu *vgpu, u64 gfn); diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c index 006db16c7d03..4a6fed80c629 100644 --- a/drivers/gpu/drm/i915/gvt/kvmgt.c +++ b/drivers/gpu/drm/i915/gvt/kvmgt.c @@ -1858,7 +1858,7 @@ static bool kvmgt_guest_exit(struct intel_vgpu *info) return true; } -static void kvmgt_detach_vgpu(struct intel_vgpu *vgpu) +void intel_vgpu_detach_regions(struct intel_vgpu *vgpu) { int i; @@ -2022,7 +2022,6 @@ static bool kvmgt_is_valid_gfn(struct intel_vgpu *vgpu, unsigned long gfn) static const struct intel_gvt_mpt kvmgt_mpt = { .host_init = kvmgt_host_init, .host_exit = kvmgt_host_exit, - .detach_vgpu = kvmgt_detach_vgpu, .inject_msi = kvmgt_inject_msi, .enable_page_track = kvmgt_page_track_add, .disable_page_track = kvmgt_page_track_remove, diff --git a/drivers/gpu/drm/i915/gvt/mpt.h b/drivers/gpu/drm/i915/gvt/mpt.h index 9738aa3377b4..78efcf1e6946 100644 --- a/drivers/gpu/drm/i915/gvt/mpt.h +++ b/drivers/gpu/drm/i915/gvt/mpt.h @@ -71,22 +71,6 @@ static inline void intel_gvt_hypervisor_host_exit(struct device *dev, void *gvt) intel_gvt_host.mpt->host_exit(dev, gvt); } -/** - * intel_gvt_hypervisor_detach_vgpu - call hypervisor to release vGPU - * related stuffs inside hypervisor. - * - * Returns: - * Zero on success, negative error code if failed. - */ -static inline void intel_gvt_hypervisor_detach_vgpu(struct intel_vgpu *vgpu) -{ - /* optional to provide */ - if (!intel_gvt_host.mpt->detach_vgpu) - return; - - intel_gvt_host.mpt->detach_vgpu(vgpu); -} - #define MSI_CAP_CONTROL(offset) (offset + 2) #define MSI_CAP_ADDRESS(offset) (offset + 4) #define MSI_CAP_DATA(offset) (offset + 8) diff --git a/drivers/gpu/drm/i915/gvt/vgpu.c b/drivers/gpu/drm/i915/gvt/vgpu.c index 69c1af3d6704..46da19b3225d 100644 --- a/drivers/gpu/drm/i915/gvt/vgpu.c +++ b/drivers/gpu/drm/i915/gvt/vgpu.c @@ -293,7 +293,7 @@ void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu) intel_vgpu_clean_opregion(vgpu); intel_vgpu_reset_ggtt(vgpu, true); intel_vgpu_clean_gtt(vgpu); - intel_gvt_hypervisor_detach_vgpu(vgpu); + intel_vgpu_detach_regions(vgpu); intel_vgpu_free_resource(vgpu); intel_vgpu_clean_mmio(vgpu); intel_vgpu_dmabuf_cleanup(vgpu); From b3bece34956f86dcc8307f20b41a072ccdc917dc Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Mon, 11 Apr 2022 16:13:46 +0200 Subject: [PATCH 20/41] drm/i915/gvt: devirtualize ->inject_msi Just open code the MSI injection in a single place instead of going through the method table. Signed-off-by: Christoph Hellwig Signed-off-by: Zhi Wang Link: http://patchwork.freedesktop.org/patch/msgid/20220411141403.86980-18-hch@lst.de Reviewed-by: Jason Gunthorpe Reviewed-by: Zhi Wang --- drivers/gpu/drm/i915/gvt/hypercall.h | 1 - drivers/gpu/drm/i915/gvt/interrupt.c | 40 +++++++++++++++++++++++++++- drivers/gpu/drm/i915/gvt/kvmgt.c | 24 ----------------- drivers/gpu/drm/i915/gvt/mpt.h | 37 ------------------------- 4 files changed, 39 insertions(+), 63 deletions(-) diff --git a/drivers/gpu/drm/i915/gvt/hypercall.h b/drivers/gpu/drm/i915/gvt/hypercall.h index 08c622c4079b..de63bd8dd05b 100644 --- a/drivers/gpu/drm/i915/gvt/hypercall.h +++ b/drivers/gpu/drm/i915/gvt/hypercall.h @@ -45,7 +45,6 @@ struct intel_vgpu; struct intel_gvt_mpt { int (*host_init)(struct device *dev, void *gvt); void (*host_exit)(struct device *dev, void *gvt); - int (*inject_msi)(struct intel_vgpu *vgpu, u32 addr, u16 data); int (*enable_page_track)(struct intel_vgpu *vgpu, u64 gfn); int (*disable_page_track)(struct intel_vgpu *vgpu, u64 gfn); unsigned long (*gfn_to_mfn)(struct intel_vgpu *vgpu, unsigned long gfn); diff --git a/drivers/gpu/drm/i915/gvt/interrupt.c b/drivers/gpu/drm/i915/gvt/interrupt.c index 228f623d466d..a6b2021b665f 100644 --- a/drivers/gpu/drm/i915/gvt/interrupt.c +++ b/drivers/gpu/drm/i915/gvt/interrupt.c @@ -29,6 +29,8 @@ * */ +#include + #include "i915_drv.h" #include "i915_reg.h" #include "gvt.h" @@ -397,9 +399,45 @@ static void init_irq_map(struct intel_gvt_irq *irq) } /* =======================vEvent injection===================== */ + +#define MSI_CAP_CONTROL(offset) (offset + 2) +#define MSI_CAP_ADDRESS(offset) (offset + 4) +#define MSI_CAP_DATA(offset) (offset + 8) +#define MSI_CAP_EN 0x1 + static int inject_virtual_interrupt(struct intel_vgpu *vgpu) { - return intel_gvt_hypervisor_inject_msi(vgpu); + unsigned long offset = vgpu->gvt->device_info.msi_cap_offset; + u16 control, data; + u32 addr; + + control = *(u16 *)(vgpu_cfg_space(vgpu) + MSI_CAP_CONTROL(offset)); + addr = *(u32 *)(vgpu_cfg_space(vgpu) + MSI_CAP_ADDRESS(offset)); + data = *(u16 *)(vgpu_cfg_space(vgpu) + MSI_CAP_DATA(offset)); + + /* Do not generate MSI if MSIEN is disabled */ + if (!(control & MSI_CAP_EN)) + return 0; + + if (WARN(control & GENMASK(15, 1), "only support one MSI format\n")) + return -EINVAL; + + trace_inject_msi(vgpu->id, addr, data); + + /* + * When guest is powered off, msi_trigger is set to NULL, but vgpu's + * config and mmio register isn't restored to default during guest + * poweroff. If this vgpu is still used in next vm, this vgpu's pipe + * may be enabled, then once this vgpu is active, it will get inject + * vblank interrupt request. But msi_trigger is null until msi is + * enabled by guest. so if msi_trigger is null, success is still + * returned and don't inject interrupt into guest. + */ + if (!vgpu->attached) + return -ESRCH; + if (vgpu->msi_trigger && eventfd_signal(vgpu->msi_trigger, 1) != 1) + return -EFAULT; + return 0; } static void propagate_event(struct intel_gvt_irq *irq, diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c index 4a6fed80c629..1d1c026fd825 100644 --- a/drivers/gpu/drm/i915/gvt/kvmgt.c +++ b/drivers/gpu/drm/i915/gvt/kvmgt.c @@ -1874,29 +1874,6 @@ void intel_vgpu_detach_regions(struct intel_vgpu *vgpu) vgpu->region = NULL; } -static int kvmgt_inject_msi(struct intel_vgpu *vgpu, u32 addr, u16 data) -{ - if (!vgpu->attached) - return -ESRCH; - - /* - * When guest is poweroff, msi_trigger is set to NULL, but vgpu's - * config and mmio register isn't restored to default during guest - * poweroff. If this vgpu is still used in next vm, this vgpu's pipe - * may be enabled, then once this vgpu is active, it will get inject - * vblank interrupt request. But msi_trigger is null until msi is - * enabled by guest. so if msi_trigger is null, success is still - * returned and don't inject interrupt into guest. - */ - if (vgpu->msi_trigger == NULL) - return 0; - - if (eventfd_signal(vgpu->msi_trigger, 1) == 1) - return 0; - - return -EFAULT; -} - static unsigned long kvmgt_gfn_to_pfn(struct intel_vgpu *vgpu, unsigned long gfn) { @@ -2022,7 +1999,6 @@ static bool kvmgt_is_valid_gfn(struct intel_vgpu *vgpu, unsigned long gfn) static const struct intel_gvt_mpt kvmgt_mpt = { .host_init = kvmgt_host_init, .host_exit = kvmgt_host_exit, - .inject_msi = kvmgt_inject_msi, .enable_page_track = kvmgt_page_track_add, .disable_page_track = kvmgt_page_track_remove, .gfn_to_mfn = kvmgt_gfn_to_pfn, diff --git a/drivers/gpu/drm/i915/gvt/mpt.h b/drivers/gpu/drm/i915/gvt/mpt.h index 78efcf1e6946..59369e8b3b69 100644 --- a/drivers/gpu/drm/i915/gvt/mpt.h +++ b/drivers/gpu/drm/i915/gvt/mpt.h @@ -71,43 +71,6 @@ static inline void intel_gvt_hypervisor_host_exit(struct device *dev, void *gvt) intel_gvt_host.mpt->host_exit(dev, gvt); } -#define MSI_CAP_CONTROL(offset) (offset + 2) -#define MSI_CAP_ADDRESS(offset) (offset + 4) -#define MSI_CAP_DATA(offset) (offset + 8) -#define MSI_CAP_EN 0x1 - -/** - * intel_gvt_hypervisor_inject_msi - inject a MSI interrupt into vGPU - * - * Returns: - * Zero on success, negative error code if failed. - */ -static inline int intel_gvt_hypervisor_inject_msi(struct intel_vgpu *vgpu) -{ - unsigned long offset = vgpu->gvt->device_info.msi_cap_offset; - u16 control, data; - u32 addr; - int ret; - - control = *(u16 *)(vgpu_cfg_space(vgpu) + MSI_CAP_CONTROL(offset)); - addr = *(u32 *)(vgpu_cfg_space(vgpu) + MSI_CAP_ADDRESS(offset)); - data = *(u16 *)(vgpu_cfg_space(vgpu) + MSI_CAP_DATA(offset)); - - /* Do not generate MSI if MSIEN is disable */ - if (!(control & MSI_CAP_EN)) - return 0; - - if (WARN(control & GENMASK(15, 1), "only support one MSI format\n")) - return -EINVAL; - - trace_inject_msi(vgpu->id, addr, data); - - ret = intel_gvt_host.mpt->inject_msi(vgpu, addr, data); - if (ret) - return ret; - return 0; -} - /** * intel_gvt_hypervisor_enable_page_track - track a guest page * @vgpu: a vGPU From bd73b4b193d45074ff48705d21d4fbecc3fcfac8 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Mon, 11 Apr 2022 16:13:47 +0200 Subject: [PATCH 21/41] drm/i915/gvt: devirtualize ->is_valid_gfn Just call the code directly and move towards the callers. Signed-off-by: Christoph Hellwig Signed-off-by: Zhi Wang Link: http://patchwork.freedesktop.org/patch/msgid/20220411141403.86980-19-hch@lst.de Reviewed-by: Jason Gunthorpe Reviewed-by: Zhi Wang --- drivers/gpu/drm/i915/gvt/gtt.c | 20 ++++++++++++++++++-- drivers/gpu/drm/i915/gvt/hypercall.h | 1 - drivers/gpu/drm/i915/gvt/kvmgt.c | 17 ----------------- drivers/gpu/drm/i915/gvt/mpt.h | 17 ----------------- 4 files changed, 18 insertions(+), 37 deletions(-) diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c index 9b696e5705eb..1360412f8ef8 100644 --- a/drivers/gpu/drm/i915/gvt/gtt.c +++ b/drivers/gpu/drm/i915/gvt/gtt.c @@ -49,6 +49,22 @@ static bool enable_out_of_sync = false; static int preallocated_oos_pages = 8192; +static bool intel_gvt_is_valid_gfn(struct intel_vgpu *vgpu, unsigned long gfn) +{ + struct kvm *kvm = vgpu->kvm; + int idx; + bool ret; + + if (!vgpu->attached) + return false; + + idx = srcu_read_lock(&kvm->srcu); + ret = kvm_is_visible_gfn(kvm, gfn); + srcu_read_unlock(&kvm->srcu, idx); + + return ret; +} + /* * validate a gm address and related range size, * translate it to host gm address @@ -1331,7 +1347,7 @@ static int ppgtt_populate_spt(struct intel_vgpu_ppgtt_spt *spt) ppgtt_set_shadow_entry(spt, &se, i); } else { gfn = ops->get_pfn(&ge); - if (!intel_gvt_hypervisor_is_valid_gfn(vgpu, gfn)) { + if (!intel_gvt_is_valid_gfn(vgpu, gfn)) { ops->set_pfn(&se, gvt->gtt.scratch_mfn); ppgtt_set_shadow_entry(spt, &se, i); continue; @@ -2315,7 +2331,7 @@ static int emulate_ggtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off, /* one PTE update may be issued in multiple writes and the * first write may not construct a valid gfn */ - if (!intel_gvt_hypervisor_is_valid_gfn(vgpu, gfn)) { + if (!intel_gvt_is_valid_gfn(vgpu, gfn)) { ops->set_pfn(&m, gvt->gtt.scratch_mfn); goto out; } diff --git a/drivers/gpu/drm/i915/gvt/hypercall.h b/drivers/gpu/drm/i915/gvt/hypercall.h index de63bd8dd05b..c1a9eeed0460 100644 --- a/drivers/gpu/drm/i915/gvt/hypercall.h +++ b/drivers/gpu/drm/i915/gvt/hypercall.h @@ -55,7 +55,6 @@ struct intel_gvt_mpt { dma_addr_t dma_addr); int (*dma_pin_guest_page)(struct intel_vgpu *vgpu, dma_addr_t dma_addr); - bool (*is_valid_gfn)(struct intel_vgpu *vgpu, unsigned long gfn); }; #endif /* _GVT_HYPERCALL_H_ */ diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c index 1d1c026fd825..93fd7f997c8a 100644 --- a/drivers/gpu/drm/i915/gvt/kvmgt.c +++ b/drivers/gpu/drm/i915/gvt/kvmgt.c @@ -1980,22 +1980,6 @@ static void kvmgt_dma_unmap_guest_page(struct intel_vgpu *vgpu, mutex_unlock(&vgpu->cache_lock); } -static bool kvmgt_is_valid_gfn(struct intel_vgpu *vgpu, unsigned long gfn) -{ - struct kvm *kvm = vgpu->kvm; - int idx; - bool ret; - - if (!vgpu->attached) - return false; - - idx = srcu_read_lock(&kvm->srcu); - ret = kvm_is_visible_gfn(kvm, gfn); - srcu_read_unlock(&kvm->srcu, idx); - - return ret; -} - static const struct intel_gvt_mpt kvmgt_mpt = { .host_init = kvmgt_host_init, .host_exit = kvmgt_host_exit, @@ -2005,7 +1989,6 @@ static const struct intel_gvt_mpt kvmgt_mpt = { .dma_map_guest_page = kvmgt_dma_map_guest_page, .dma_unmap_guest_page = kvmgt_dma_unmap_guest_page, .dma_pin_guest_page = kvmgt_dma_pin_guest_page, - .is_valid_gfn = kvmgt_is_valid_gfn, }; struct intel_gvt_host intel_gvt_host = { diff --git a/drivers/gpu/drm/i915/gvt/mpt.h b/drivers/gpu/drm/i915/gvt/mpt.h index 59369e8b3b69..1a796f2181ba 100644 --- a/drivers/gpu/drm/i915/gvt/mpt.h +++ b/drivers/gpu/drm/i915/gvt/mpt.h @@ -157,21 +157,4 @@ intel_gvt_hypervisor_dma_pin_guest_page(struct intel_vgpu *vgpu, return intel_gvt_host.mpt->dma_pin_guest_page(vgpu, dma_addr); } -/** - * intel_gvt_hypervisor_is_valid_gfn - check if a visible gfn - * @vgpu: a vGPU - * @gfn: guest PFN - * - * Returns: - * true on valid gfn, false on not. - */ -static inline bool intel_gvt_hypervisor_is_valid_gfn( - struct intel_vgpu *vgpu, unsigned long gfn) -{ - if (!intel_gvt_host.mpt->is_valid_gfn) - return true; - - return intel_gvt_host.mpt->is_valid_gfn(vgpu, gfn); -} - #endif /* _GVT_MPT_H_ */ From 4050dab5981cd48f67d2367fa90ae030bcc8f7dd Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Mon, 11 Apr 2022 16:13:48 +0200 Subject: [PATCH 22/41] drm/i915/gvt: devirtualize ->gfn_to_mfn Just open code it in the only caller. Signed-off-by: Christoph Hellwig Signed-off-by: Zhi Wang Link: http://patchwork.freedesktop.org/patch/msgid/20220411141403.86980-20-hch@lst.de Reviewed-by: Jason Gunthorpe Reviewed-by: Zhi Wang --- drivers/gpu/drm/i915/gvt/gtt.c | 9 +++++---- drivers/gpu/drm/i915/gvt/hypercall.h | 1 - drivers/gpu/drm/i915/gvt/kvmgt.c | 16 ---------------- drivers/gpu/drm/i915/gvt/mpt.h | 14 -------------- 4 files changed, 5 insertions(+), 35 deletions(-) diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c index 1360412f8ef8..f6f3b22a70d2 100644 --- a/drivers/gpu/drm/i915/gvt/gtt.c +++ b/drivers/gpu/drm/i915/gvt/gtt.c @@ -1178,15 +1178,16 @@ static int is_2MB_gtt_possible(struct intel_vgpu *vgpu, struct intel_gvt_gtt_entry *entry) { const struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops; - unsigned long pfn; + kvm_pfn_t pfn; if (!HAS_PAGE_SIZES(vgpu->gvt->gt->i915, I915_GTT_PAGE_SIZE_2M)) return 0; - pfn = intel_gvt_hypervisor_gfn_to_mfn(vgpu, ops->get_pfn(entry)); - if (pfn == INTEL_GVT_INVALID_ADDR) + if (!vgpu->attached) + return -EINVAL; + pfn = gfn_to_pfn(vgpu->kvm, ops->get_pfn(entry)); + if (is_error_noslot_pfn(pfn)) return -EINVAL; - return PageTransHuge(pfn_to_page(pfn)); } diff --git a/drivers/gpu/drm/i915/gvt/hypercall.h b/drivers/gpu/drm/i915/gvt/hypercall.h index c1a9eeed0460..dbde492cafc8 100644 --- a/drivers/gpu/drm/i915/gvt/hypercall.h +++ b/drivers/gpu/drm/i915/gvt/hypercall.h @@ -47,7 +47,6 @@ struct intel_gvt_mpt { void (*host_exit)(struct device *dev, void *gvt); int (*enable_page_track)(struct intel_vgpu *vgpu, u64 gfn); int (*disable_page_track)(struct intel_vgpu *vgpu, u64 gfn); - unsigned long (*gfn_to_mfn)(struct intel_vgpu *vgpu, unsigned long gfn); int (*dma_map_guest_page)(struct intel_vgpu *vgpu, unsigned long gfn, unsigned long size, dma_addr_t *dma_addr); diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c index 93fd7f997c8a..6d4c67270172 100644 --- a/drivers/gpu/drm/i915/gvt/kvmgt.c +++ b/drivers/gpu/drm/i915/gvt/kvmgt.c @@ -1874,21 +1874,6 @@ void intel_vgpu_detach_regions(struct intel_vgpu *vgpu) vgpu->region = NULL; } -static unsigned long kvmgt_gfn_to_pfn(struct intel_vgpu *vgpu, - unsigned long gfn) -{ - kvm_pfn_t pfn; - - if (!vgpu->attached) - return INTEL_GVT_INVALID_ADDR; - - pfn = gfn_to_pfn(vgpu->kvm, gfn); - if (is_error_noslot_pfn(pfn)) - return INTEL_GVT_INVALID_ADDR; - - return pfn; -} - static int kvmgt_dma_map_guest_page(struct intel_vgpu *vgpu, unsigned long gfn, unsigned long size, dma_addr_t *dma_addr) { @@ -1985,7 +1970,6 @@ static const struct intel_gvt_mpt kvmgt_mpt = { .host_exit = kvmgt_host_exit, .enable_page_track = kvmgt_page_track_add, .disable_page_track = kvmgt_page_track_remove, - .gfn_to_mfn = kvmgt_gfn_to_pfn, .dma_map_guest_page = kvmgt_dma_map_guest_page, .dma_unmap_guest_page = kvmgt_dma_unmap_guest_page, .dma_pin_guest_page = kvmgt_dma_pin_guest_page, diff --git a/drivers/gpu/drm/i915/gvt/mpt.h b/drivers/gpu/drm/i915/gvt/mpt.h index 1a796f2181ba..2d4bb6eaa08e 100644 --- a/drivers/gpu/drm/i915/gvt/mpt.h +++ b/drivers/gpu/drm/i915/gvt/mpt.h @@ -99,20 +99,6 @@ static inline int intel_gvt_hypervisor_disable_page_track( return intel_gvt_host.mpt->disable_page_track(vgpu, gfn); } -/** - * intel_gvt_hypervisor_gfn_to_mfn - translate a GFN to MFN - * @vgpu: a vGPU - * @gpfn: guest pfn - * - * Returns: - * MFN on success, INTEL_GVT_INVALID_ADDR if failed. - */ -static inline unsigned long intel_gvt_hypervisor_gfn_to_mfn( - struct intel_vgpu *vgpu, unsigned long gfn) -{ - return intel_gvt_host.mpt->gfn_to_mfn(vgpu, gfn); -} - /** * intel_gvt_hypervisor_dma_map_guest_page - setup dma map for guest page * @vgpu: a vGPU From 4c2baaaf764bfb6c293c75bc911b9366d35ee085 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Mon, 11 Apr 2022 16:13:49 +0200 Subject: [PATCH 23/41] drm/i915/gvt: devirtualize ->{enable,disable}_page_track Just call the kvmgt functions directly. Signed-off-by: Christoph Hellwig Signed-off-by: Zhi Wang Link: http://patchwork.freedesktop.org/patch/msgid/20220411141403.86980-21-hch@lst.de Reviewed-by: Jason Gunthorpe Reviewed-by: Zhi Wang --- drivers/gpu/drm/i915/gvt/gvt.h | 3 +++ drivers/gpu/drm/i915/gvt/hypercall.h | 2 -- drivers/gpu/drm/i915/gvt/kvmgt.c | 6 ++---- drivers/gpu/drm/i915/gvt/mpt.h | 28 --------------------------- drivers/gpu/drm/i915/gvt/page_track.c | 8 ++++---- 5 files changed, 9 insertions(+), 38 deletions(-) diff --git a/drivers/gpu/drm/i915/gvt/gvt.h b/drivers/gpu/drm/i915/gvt/gvt.h index 52ef88d2bf21..a8a8728cd54a 100644 --- a/drivers/gpu/drm/i915/gvt/gvt.h +++ b/drivers/gpu/drm/i915/gvt/gvt.h @@ -765,6 +765,9 @@ void intel_gvt_debugfs_remove_vgpu(struct intel_vgpu *vgpu); void intel_gvt_debugfs_init(struct intel_gvt *gvt); void intel_gvt_debugfs_clean(struct intel_gvt *gvt); +int intel_gvt_page_track_add(struct intel_vgpu *info, u64 gfn); +int intel_gvt_page_track_remove(struct intel_vgpu *info, u64 gfn); + #include "trace.h" #include "mpt.h" diff --git a/drivers/gpu/drm/i915/gvt/hypercall.h b/drivers/gpu/drm/i915/gvt/hypercall.h index dbde492cafc8..ded13a63ab66 100644 --- a/drivers/gpu/drm/i915/gvt/hypercall.h +++ b/drivers/gpu/drm/i915/gvt/hypercall.h @@ -45,8 +45,6 @@ struct intel_vgpu; struct intel_gvt_mpt { int (*host_init)(struct device *dev, void *gvt); void (*host_exit)(struct device *dev, void *gvt); - int (*enable_page_track)(struct intel_vgpu *vgpu, u64 gfn); - int (*disable_page_track)(struct intel_vgpu *vgpu, u64 gfn); int (*dma_map_guest_page)(struct intel_vgpu *vgpu, unsigned long gfn, unsigned long size, dma_addr_t *dma_addr); diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c index 6d4c67270172..fbef3d3dfb1f 100644 --- a/drivers/gpu/drm/i915/gvt/kvmgt.c +++ b/drivers/gpu/drm/i915/gvt/kvmgt.c @@ -1702,7 +1702,7 @@ static void kvmgt_host_exit(struct device *dev, void *gvt) intel_gvt_cleanup_vgpu_type_groups((struct intel_gvt *)gvt); } -static int kvmgt_page_track_add(struct intel_vgpu *info, u64 gfn) +int intel_gvt_page_track_add(struct intel_vgpu *info, u64 gfn) { struct kvm *kvm = info->kvm; struct kvm_memory_slot *slot; @@ -1732,7 +1732,7 @@ static int kvmgt_page_track_add(struct intel_vgpu *info, u64 gfn) return 0; } -static int kvmgt_page_track_remove(struct intel_vgpu *info, u64 gfn) +int intel_gvt_page_track_remove(struct intel_vgpu *info, u64 gfn) { struct kvm *kvm = info->kvm; struct kvm_memory_slot *slot; @@ -1968,8 +1968,6 @@ static void kvmgt_dma_unmap_guest_page(struct intel_vgpu *vgpu, static const struct intel_gvt_mpt kvmgt_mpt = { .host_init = kvmgt_host_init, .host_exit = kvmgt_host_exit, - .enable_page_track = kvmgt_page_track_add, - .disable_page_track = kvmgt_page_track_remove, .dma_map_guest_page = kvmgt_dma_map_guest_page, .dma_unmap_guest_page = kvmgt_dma_unmap_guest_page, .dma_pin_guest_page = kvmgt_dma_pin_guest_page, diff --git a/drivers/gpu/drm/i915/gvt/mpt.h b/drivers/gpu/drm/i915/gvt/mpt.h index 2d4bb6eaa08e..d2723ac8bb04 100644 --- a/drivers/gpu/drm/i915/gvt/mpt.h +++ b/drivers/gpu/drm/i915/gvt/mpt.h @@ -71,34 +71,6 @@ static inline void intel_gvt_hypervisor_host_exit(struct device *dev, void *gvt) intel_gvt_host.mpt->host_exit(dev, gvt); } -/** - * intel_gvt_hypervisor_enable_page_track - track a guest page - * @vgpu: a vGPU - * @gfn: the gfn of guest - * - * Returns: - * Zero on success, negative error code if failed. - */ -static inline int intel_gvt_hypervisor_enable_page_track( - struct intel_vgpu *vgpu, unsigned long gfn) -{ - return intel_gvt_host.mpt->enable_page_track(vgpu, gfn); -} - -/** - * intel_gvt_hypervisor_disable_page_track - untrack a guest page - * @vgpu: a vGPU - * @gfn: the gfn of guest - * - * Returns: - * Zero on success, negative error code if failed. - */ -static inline int intel_gvt_hypervisor_disable_page_track( - struct intel_vgpu *vgpu, unsigned long gfn) -{ - return intel_gvt_host.mpt->disable_page_track(vgpu, gfn); -} - /** * intel_gvt_hypervisor_dma_map_guest_page - setup dma map for guest page * @vgpu: a vGPU diff --git a/drivers/gpu/drm/i915/gvt/page_track.c b/drivers/gpu/drm/i915/gvt/page_track.c index 84856022528e..3375b51c75f1 100644 --- a/drivers/gpu/drm/i915/gvt/page_track.c +++ b/drivers/gpu/drm/i915/gvt/page_track.c @@ -87,7 +87,7 @@ void intel_vgpu_unregister_page_track(struct intel_vgpu *vgpu, track = radix_tree_delete(&vgpu->page_track_tree, gfn); if (track) { if (track->tracked) - intel_gvt_hypervisor_disable_page_track(vgpu, gfn); + intel_gvt_page_track_remove(vgpu, gfn); kfree(track); } } @@ -112,7 +112,7 @@ int intel_vgpu_enable_page_track(struct intel_vgpu *vgpu, unsigned long gfn) if (track->tracked) return 0; - ret = intel_gvt_hypervisor_enable_page_track(vgpu, gfn); + ret = intel_gvt_page_track_add(vgpu, gfn); if (ret) return ret; track->tracked = true; @@ -139,7 +139,7 @@ int intel_vgpu_disable_page_track(struct intel_vgpu *vgpu, unsigned long gfn) if (!track->tracked) return 0; - ret = intel_gvt_hypervisor_disable_page_track(vgpu, gfn); + ret = intel_gvt_page_track_remove(vgpu, gfn); if (ret) return ret; track->tracked = false; @@ -172,7 +172,7 @@ int intel_vgpu_page_track_handler(struct intel_vgpu *vgpu, u64 gpa, if (unlikely(vgpu->failsafe)) { /* Remove write protection to prevent furture traps. */ - intel_vgpu_disable_page_track(vgpu, gpa >> PAGE_SHIFT); + intel_gvt_page_track_remove(vgpu, gpa >> PAGE_SHIFT); } else { ret = page_track->handler(page_track, gpa, data, bytes); if (ret) From 8398eee85fd009bfb2797ea4d0a63b7854d05e46 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Mon, 11 Apr 2022 16:13:50 +0200 Subject: [PATCH 24/41] drm/i915/gvt: devirtualize ->dma_{,un}map_guest_page Just call the functions directly. Also remove a pointless wrapper. Signed-off-by: Christoph Hellwig Signed-off-by: Zhi Wang Link: http://patchwork.freedesktop.org/patch/msgid/20220411141403.86980-22-hch@lst.de Reviewed-by: Jason Gunthorpe Reviewed-by: Zhi Wang --- drivers/gpu/drm/i915/gvt/dmabuf.c | 10 ++-------- drivers/gpu/drm/i915/gvt/gtt.c | 20 +++++++++---------- drivers/gpu/drm/i915/gvt/gvt.h | 4 ++++ drivers/gpu/drm/i915/gvt/hypercall.h | 5 ----- drivers/gpu/drm/i915/gvt/kvmgt.c | 6 ++---- drivers/gpu/drm/i915/gvt/mpt.h | 29 ---------------------------- 6 files changed, 17 insertions(+), 57 deletions(-) diff --git a/drivers/gpu/drm/i915/gvt/dmabuf.c b/drivers/gpu/drm/i915/gvt/dmabuf.c index cc1a9ac0d272..db93b6354327 100644 --- a/drivers/gpu/drm/i915/gvt/dmabuf.c +++ b/drivers/gpu/drm/i915/gvt/dmabuf.c @@ -54,12 +54,6 @@ static int vgpu_pin_dma_address(struct intel_vgpu *vgpu, return ret; } -static void vgpu_unpin_dma_address(struct intel_vgpu *vgpu, - dma_addr_t dma_addr) -{ - intel_gvt_hypervisor_dma_unmap_guest_page(vgpu, dma_addr); -} - static int vgpu_gem_get_pages( struct drm_i915_gem_object *obj) { @@ -114,7 +108,7 @@ static int vgpu_gem_get_pages( for_each_sg(st->sgl, sg, i, j) { dma_addr = sg_dma_address(sg); if (dma_addr) - vgpu_unpin_dma_address(vgpu, dma_addr); + intel_gvt_dma_unmap_guest_page(vgpu, dma_addr); } sg_free_table(st); kfree(st); @@ -136,7 +130,7 @@ static void vgpu_gem_put_pages(struct drm_i915_gem_object *obj, int i; for_each_sg(pages->sgl, sg, fb_info->size, i) - vgpu_unpin_dma_address(vgpu, + intel_gvt_dma_unmap_guest_page(vgpu, sg_dma_address(sg)); } diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c index f6f3b22a70d2..9c5cc2800975 100644 --- a/drivers/gpu/drm/i915/gvt/gtt.c +++ b/drivers/gpu/drm/i915/gvt/gtt.c @@ -1013,7 +1013,7 @@ static inline void ppgtt_invalidate_pte(struct intel_vgpu_ppgtt_spt *spt, if (!pfn || pfn == vgpu->gtt.scratch_pt[type].page_mfn) return; - intel_gvt_hypervisor_dma_unmap_guest_page(vgpu, pfn << PAGE_SHIFT); + intel_gvt_dma_unmap_guest_page(vgpu, pfn << PAGE_SHIFT); } static int ppgtt_invalidate_spt(struct intel_vgpu_ppgtt_spt *spt) @@ -1212,8 +1212,8 @@ static int split_2MB_gtt_entry(struct intel_vgpu *vgpu, return PTR_ERR(sub_spt); for_each_shadow_entry(sub_spt, &sub_se, sub_index) { - ret = intel_gvt_hypervisor_dma_map_guest_page(vgpu, - start_gfn + sub_index, PAGE_SIZE, &dma_addr); + ret = intel_gvt_dma_map_guest_page(vgpu, start_gfn + sub_index, + PAGE_SIZE, &dma_addr); if (ret) { ppgtt_invalidate_spt(spt); return ret; @@ -1258,8 +1258,8 @@ static int split_64KB_gtt_entry(struct intel_vgpu *vgpu, ops->set_64k_splited(&entry); for (i = 0; i < GTT_64K_PTE_STRIDE; i++) { - ret = intel_gvt_hypervisor_dma_map_guest_page(vgpu, - start_gfn + i, PAGE_SIZE, &dma_addr); + ret = intel_gvt_dma_map_guest_page(vgpu, start_gfn + i, + PAGE_SIZE, &dma_addr); if (ret) return ret; @@ -1313,8 +1313,7 @@ static int ppgtt_populate_shadow_entry(struct intel_vgpu *vgpu, } /* direct shadow */ - ret = intel_gvt_hypervisor_dma_map_guest_page(vgpu, gfn, page_size, - &dma_addr); + ret = intel_gvt_dma_map_guest_page(vgpu, gfn, page_size, &dma_addr); if (ret) return -ENXIO; @@ -2245,8 +2244,7 @@ static void ggtt_invalidate_pte(struct intel_vgpu *vgpu, pfn = pte_ops->get_pfn(entry); if (pfn != vgpu->gvt->gtt.scratch_mfn) - intel_gvt_hypervisor_dma_unmap_guest_page(vgpu, - pfn << PAGE_SHIFT); + intel_gvt_dma_unmap_guest_page(vgpu, pfn << PAGE_SHIFT); } static int emulate_ggtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off, @@ -2337,8 +2335,8 @@ static int emulate_ggtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off, goto out; } - ret = intel_gvt_hypervisor_dma_map_guest_page(vgpu, gfn, - PAGE_SIZE, &dma_addr); + ret = intel_gvt_dma_map_guest_page(vgpu, gfn, PAGE_SIZE, + &dma_addr); if (ret) { gvt_vgpu_err("fail to populate guest ggtt entry\n"); /* guest driver may read/write the entry when partial diff --git a/drivers/gpu/drm/i915/gvt/gvt.h b/drivers/gpu/drm/i915/gvt/gvt.h index a8a8728cd54a..4e42e6ac4077 100644 --- a/drivers/gpu/drm/i915/gvt/gvt.h +++ b/drivers/gpu/drm/i915/gvt/gvt.h @@ -767,6 +767,10 @@ void intel_gvt_debugfs_clean(struct intel_gvt *gvt); int intel_gvt_page_track_add(struct intel_vgpu *info, u64 gfn); int intel_gvt_page_track_remove(struct intel_vgpu *info, u64 gfn); +int intel_gvt_dma_map_guest_page(struct intel_vgpu *vgpu, unsigned long gfn, + unsigned long size, dma_addr_t *dma_addr); +void intel_gvt_dma_unmap_guest_page(struct intel_vgpu *vgpu, + dma_addr_t dma_addr); #include "trace.h" #include "mpt.h" diff --git a/drivers/gpu/drm/i915/gvt/hypercall.h b/drivers/gpu/drm/i915/gvt/hypercall.h index ded13a63ab66..ba03b3368a95 100644 --- a/drivers/gpu/drm/i915/gvt/hypercall.h +++ b/drivers/gpu/drm/i915/gvt/hypercall.h @@ -46,11 +46,6 @@ struct intel_gvt_mpt { int (*host_init)(struct device *dev, void *gvt); void (*host_exit)(struct device *dev, void *gvt); - int (*dma_map_guest_page)(struct intel_vgpu *vgpu, unsigned long gfn, - unsigned long size, dma_addr_t *dma_addr); - void (*dma_unmap_guest_page)(struct intel_vgpu *vgpu, - dma_addr_t dma_addr); - int (*dma_pin_guest_page)(struct intel_vgpu *vgpu, dma_addr_t dma_addr); }; diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c index fbef3d3dfb1f..a02274036487 100644 --- a/drivers/gpu/drm/i915/gvt/kvmgt.c +++ b/drivers/gpu/drm/i915/gvt/kvmgt.c @@ -1874,7 +1874,7 @@ void intel_vgpu_detach_regions(struct intel_vgpu *vgpu) vgpu->region = NULL; } -static int kvmgt_dma_map_guest_page(struct intel_vgpu *vgpu, unsigned long gfn, +int intel_gvt_dma_map_guest_page(struct intel_vgpu *vgpu, unsigned long gfn, unsigned long size, dma_addr_t *dma_addr) { struct gvt_dma *entry; @@ -1950,7 +1950,7 @@ static void __gvt_dma_release(struct kref *ref) __gvt_cache_remove_entry(entry->vgpu, entry); } -static void kvmgt_dma_unmap_guest_page(struct intel_vgpu *vgpu, +void intel_gvt_dma_unmap_guest_page(struct intel_vgpu *vgpu, dma_addr_t dma_addr) { struct gvt_dma *entry; @@ -1968,8 +1968,6 @@ static void kvmgt_dma_unmap_guest_page(struct intel_vgpu *vgpu, static const struct intel_gvt_mpt kvmgt_mpt = { .host_init = kvmgt_host_init, .host_exit = kvmgt_host_exit, - .dma_map_guest_page = kvmgt_dma_map_guest_page, - .dma_unmap_guest_page = kvmgt_dma_unmap_guest_page, .dma_pin_guest_page = kvmgt_dma_pin_guest_page, }; diff --git a/drivers/gpu/drm/i915/gvt/mpt.h b/drivers/gpu/drm/i915/gvt/mpt.h index d2723ac8bb04..26c1ee690f7e 100644 --- a/drivers/gpu/drm/i915/gvt/mpt.h +++ b/drivers/gpu/drm/i915/gvt/mpt.h @@ -71,35 +71,6 @@ static inline void intel_gvt_hypervisor_host_exit(struct device *dev, void *gvt) intel_gvt_host.mpt->host_exit(dev, gvt); } -/** - * intel_gvt_hypervisor_dma_map_guest_page - setup dma map for guest page - * @vgpu: a vGPU - * @gfn: guest pfn - * @size: page size - * @dma_addr: retrieve allocated dma addr - * - * Returns: - * 0 on success, negative error code if failed. - */ -static inline int intel_gvt_hypervisor_dma_map_guest_page( - struct intel_vgpu *vgpu, unsigned long gfn, unsigned long size, - dma_addr_t *dma_addr) -{ - return intel_gvt_host.mpt->dma_map_guest_page(vgpu, gfn, size, - dma_addr); -} - -/** - * intel_gvt_hypervisor_dma_unmap_guest_page - cancel dma map for guest page - * @vgpu: a vGPU - * @dma_addr: the mapped dma addr - */ -static inline void intel_gvt_hypervisor_dma_unmap_guest_page( - struct intel_vgpu *vgpu, dma_addr_t dma_addr) -{ - intel_gvt_host.mpt->dma_unmap_guest_page(vgpu, dma_addr); -} - /** * intel_gvt_hypervisor_dma_pin_guest_page - pin guest dma buf * @vgpu: a vGPU From 91879bbaf8890fe3595e1e580354462f80dc93de Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Mon, 11 Apr 2022 16:13:51 +0200 Subject: [PATCH 25/41] drm/i915/gvt: devirtualize dma_pin_guest_page Just call the function directly and remove a pointless wrapper. Signed-off-by: Christoph Hellwig Signed-off-by: Zhi Wang Link: http://patchwork.freedesktop.org/patch/msgid/20220411141403.86980-23-hch@lst.de Reviewed-by: Jason Gunthorpe Reviewed-by: Zhi Wang --- drivers/gpu/drm/i915/gvt/dmabuf.c | 14 +------------- drivers/gpu/drm/i915/gvt/gvt.h | 1 + drivers/gpu/drm/i915/gvt/hypercall.h | 2 -- drivers/gpu/drm/i915/gvt/kvmgt.c | 4 +--- drivers/gpu/drm/i915/gvt/mpt.h | 15 --------------- 5 files changed, 3 insertions(+), 33 deletions(-) diff --git a/drivers/gpu/drm/i915/gvt/dmabuf.c b/drivers/gpu/drm/i915/gvt/dmabuf.c index db93b6354327..90443306a9ad 100644 --- a/drivers/gpu/drm/i915/gvt/dmabuf.c +++ b/drivers/gpu/drm/i915/gvt/dmabuf.c @@ -42,18 +42,6 @@ #define GEN8_DECODE_PTE(pte) (pte & GENMASK_ULL(63, 12)) -static int vgpu_pin_dma_address(struct intel_vgpu *vgpu, - unsigned long size, - dma_addr_t dma_addr) -{ - int ret = 0; - - if (intel_gvt_hypervisor_dma_pin_guest_page(vgpu, dma_addr)) - ret = -EINVAL; - - return ret; -} - static int vgpu_gem_get_pages( struct drm_i915_gem_object *obj) { @@ -89,7 +77,7 @@ static int vgpu_gem_get_pages( for_each_sg(st->sgl, sg, page_num, i) { dma_addr_t dma_addr = GEN8_DECODE_PTE(readq(>t_entries[i])); - if (vgpu_pin_dma_address(vgpu, PAGE_SIZE, dma_addr)) { + if (intel_gvt_dma_pin_guest_page(vgpu, dma_addr)) { ret = -EINVAL; goto out; } diff --git a/drivers/gpu/drm/i915/gvt/gvt.h b/drivers/gpu/drm/i915/gvt/gvt.h index 4e42e6ac4077..938f57271737 100644 --- a/drivers/gpu/drm/i915/gvt/gvt.h +++ b/drivers/gpu/drm/i915/gvt/gvt.h @@ -767,6 +767,7 @@ void intel_gvt_debugfs_clean(struct intel_gvt *gvt); int intel_gvt_page_track_add(struct intel_vgpu *info, u64 gfn); int intel_gvt_page_track_remove(struct intel_vgpu *info, u64 gfn); +int intel_gvt_dma_pin_guest_page(struct intel_vgpu *vgpu, dma_addr_t dma_addr); int intel_gvt_dma_map_guest_page(struct intel_vgpu *vgpu, unsigned long gfn, unsigned long size, dma_addr_t *dma_addr); void intel_gvt_dma_unmap_guest_page(struct intel_vgpu *vgpu, diff --git a/drivers/gpu/drm/i915/gvt/hypercall.h b/drivers/gpu/drm/i915/gvt/hypercall.h index ba03b3368a95..d49437aeabac 100644 --- a/drivers/gpu/drm/i915/gvt/hypercall.h +++ b/drivers/gpu/drm/i915/gvt/hypercall.h @@ -45,8 +45,6 @@ struct intel_vgpu; struct intel_gvt_mpt { int (*host_init)(struct device *dev, void *gvt); void (*host_exit)(struct device *dev, void *gvt); - - int (*dma_pin_guest_page)(struct intel_vgpu *vgpu, dma_addr_t dma_addr); }; #endif /* _GVT_HYPERCALL_H_ */ diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c index a02274036487..606b2cb923d8 100644 --- a/drivers/gpu/drm/i915/gvt/kvmgt.c +++ b/drivers/gpu/drm/i915/gvt/kvmgt.c @@ -1921,8 +1921,7 @@ int intel_gvt_dma_map_guest_page(struct intel_vgpu *vgpu, unsigned long gfn, return ret; } -static int kvmgt_dma_pin_guest_page(struct intel_vgpu *vgpu, - dma_addr_t dma_addr) +int intel_gvt_dma_pin_guest_page(struct intel_vgpu *vgpu, dma_addr_t dma_addr) { struct gvt_dma *entry; int ret = 0; @@ -1968,7 +1967,6 @@ void intel_gvt_dma_unmap_guest_page(struct intel_vgpu *vgpu, static const struct intel_gvt_mpt kvmgt_mpt = { .host_init = kvmgt_host_init, .host_exit = kvmgt_host_exit, - .dma_pin_guest_page = kvmgt_dma_pin_guest_page, }; struct intel_gvt_host intel_gvt_host = { diff --git a/drivers/gpu/drm/i915/gvt/mpt.h b/drivers/gpu/drm/i915/gvt/mpt.h index 26c1ee690f7e..3be602a3f764 100644 --- a/drivers/gpu/drm/i915/gvt/mpt.h +++ b/drivers/gpu/drm/i915/gvt/mpt.h @@ -71,19 +71,4 @@ static inline void intel_gvt_hypervisor_host_exit(struct device *dev, void *gvt) intel_gvt_host.mpt->host_exit(dev, gvt); } -/** - * intel_gvt_hypervisor_dma_pin_guest_page - pin guest dma buf - * @vgpu: a vGPU - * @dma_addr: guest dma addr - * - * Returns: - * 0 on success, negative error code if failed. - */ -static inline int -intel_gvt_hypervisor_dma_pin_guest_page(struct intel_vgpu *vgpu, - dma_addr_t dma_addr) -{ - return intel_gvt_host.mpt->dma_pin_guest_page(vgpu, dma_addr); -} - #endif /* _GVT_MPT_H_ */ From 5f8f3fe67cd90807f01ebac744c7e6148a8f6cb7 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Mon, 11 Apr 2022 16:13:52 +0200 Subject: [PATCH 26/41] drm/i915/gvt: remove struct intel_gvt_mpt Just call the initializion and exit functions directly and remove this abstraction entirely. Signed-off-by: Christoph Hellwig Signed-off-by: Zhi Wang Link: http://patchwork.freedesktop.org/patch/msgid/20220411141403.86980-24-hch@lst.de Reviewed-by: Jason Gunthorpe Reviewed-by: Zhi Wang --- drivers/gpu/drm/i915/gvt/gvt.c | 11 ++++- drivers/gpu/drm/i915/gvt/gvt.h | 12 ++--- drivers/gpu/drm/i915/gvt/hypercall.h | 50 ------------------- drivers/gpu/drm/i915/gvt/kvmgt.c | 39 ++------------- drivers/gpu/drm/i915/gvt/mpt.h | 74 ---------------------------- 5 files changed, 17 insertions(+), 169 deletions(-) delete mode 100644 drivers/gpu/drm/i915/gvt/hypercall.h delete mode 100644 drivers/gpu/drm/i915/gvt/mpt.h diff --git a/drivers/gpu/drm/i915/gvt/gvt.c b/drivers/gpu/drm/i915/gvt/gvt.c index 9259f2a17398..047fb6c41788 100644 --- a/drivers/gpu/drm/i915/gvt/gvt.c +++ b/drivers/gpu/drm/i915/gvt/gvt.c @@ -135,7 +135,8 @@ static void intel_gvt_clean_device(struct drm_i915_private *i915) if (drm_WARN_ON(&i915->drm, !gvt)) return; - intel_gvt_hypervisor_host_exit(i915->drm.dev, gvt); + mdev_unregister_device(i915->drm.dev); + intel_gvt_cleanup_vgpu_type_groups(gvt); intel_gvt_destroy_idle_vgpu(gvt->idle_vgpu); intel_gvt_clean_vgpu_types(gvt); @@ -235,13 +236,19 @@ static int intel_gvt_init_device(struct drm_i915_private *i915) intel_gvt_debugfs_init(gvt); - ret = intel_gvt_hypervisor_host_init(i915->drm.dev, gvt); + ret = intel_gvt_init_vgpu_type_groups(gvt); if (ret) goto out_destroy_idle_vgpu; + ret = mdev_register_device(i915->drm.dev, &intel_vgpu_mdev_ops); + if (ret) + goto out_cleanup_vgpu_type_groups; + gvt_dbg_core("gvt device initialization is done\n"); return 0; +out_cleanup_vgpu_type_groups: + intel_gvt_cleanup_vgpu_type_groups(gvt); out_destroy_idle_vgpu: intel_gvt_destroy_idle_vgpu(gvt->idle_vgpu); intel_gvt_debugfs_clean(gvt); diff --git a/drivers/gpu/drm/i915/gvt/gvt.h b/drivers/gpu/drm/i915/gvt/gvt.h index 938f57271737..865997c5005d 100644 --- a/drivers/gpu/drm/i915/gvt/gvt.h +++ b/drivers/gpu/drm/i915/gvt/gvt.h @@ -41,7 +41,6 @@ #include "intel_gvt.h" #include "debug.h" -#include "hypercall.h" #include "mmio.h" #include "reg.h" #include "interrupt.h" @@ -59,12 +58,6 @@ #define GVT_MAX_VGPU 8 -struct intel_gvt_host { - const struct intel_gvt_mpt *mpt; -}; - -extern struct intel_gvt_host intel_gvt_host; - /* Describe per-platform limitations. */ struct intel_gvt_device_info { u32 max_support_vgpus; @@ -773,9 +766,12 @@ int intel_gvt_dma_map_guest_page(struct intel_vgpu *vgpu, unsigned long gfn, void intel_gvt_dma_unmap_guest_page(struct intel_vgpu *vgpu, dma_addr_t dma_addr); +int intel_gvt_init_vgpu_type_groups(struct intel_gvt *gvt); +void intel_gvt_cleanup_vgpu_type_groups(struct intel_gvt *gvt); + #include "trace.h" -#include "mpt.h" extern const struct intel_vgpu_ops intel_gvt_vgpu_ops; +extern const struct mdev_parent_ops intel_vgpu_mdev_ops; #endif diff --git a/drivers/gpu/drm/i915/gvt/hypercall.h b/drivers/gpu/drm/i915/gvt/hypercall.h deleted file mode 100644 index d49437aeabac..000000000000 --- a/drivers/gpu/drm/i915/gvt/hypercall.h +++ /dev/null @@ -1,50 +0,0 @@ -/* - * Copyright(c) 2011-2016 Intel Corporation. All rights reserved. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice (including the next - * paragraph) shall be included in all copies or substantial portions of the - * Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - * - * Authors: - * Eddie Dong - * Dexuan Cui - * Jike Song - * - * Contributors: - * Zhi Wang - * - */ - -#ifndef _GVT_HYPERCALL_H_ -#define _GVT_HYPERCALL_H_ - -#include - -struct device; -struct intel_vgpu; - -/* - * Specific GVT-g MPT modules function collections. Currently GVT-g supports - * both Xen and KVM by providing dedicated hypervisor-related MPT modules. - */ -struct intel_gvt_mpt { - int (*host_init)(struct device *dev, void *gvt); - void (*host_exit)(struct device *dev, void *gvt); -}; - -#endif /* _GVT_HYPERCALL_H_ */ diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c index 606b2cb923d8..f908867223ae 100644 --- a/drivers/gpu/drm/i915/gvt/kvmgt.c +++ b/drivers/gpu/drm/i915/gvt/kvmgt.c @@ -172,7 +172,7 @@ static struct attribute_group *gvt_vgpu_type_groups[] = { [0 ... NR_MAX_INTEL_VGPU_TYPES - 1] = NULL, }; -static int intel_gvt_init_vgpu_type_groups(struct intel_gvt *gvt) +int intel_gvt_init_vgpu_type_groups(struct intel_gvt *gvt) { int i, j; struct intel_vgpu_type *type; @@ -201,7 +201,7 @@ static int intel_gvt_init_vgpu_type_groups(struct intel_gvt *gvt) return -ENOMEM; } -static void intel_gvt_cleanup_vgpu_type_groups(struct intel_gvt *gvt) +void intel_gvt_cleanup_vgpu_type_groups(struct intel_gvt *gvt) { int i; struct attribute_group *group; @@ -1665,8 +1665,9 @@ static const struct attribute_group *intel_vgpu_groups[] = { NULL, }; -static struct mdev_parent_ops intel_vgpu_mdev_ops = { +const struct mdev_parent_ops intel_vgpu_mdev_ops = { .mdev_attr_groups = intel_vgpu_groups, + .supported_type_groups = gvt_vgpu_type_groups, .create = intel_vgpu_create, .remove = intel_vgpu_remove, @@ -1679,29 +1680,6 @@ static struct mdev_parent_ops intel_vgpu_mdev_ops = { .ioctl = intel_vgpu_ioctl, }; -static int kvmgt_host_init(struct device *dev, void *gvt) -{ - int ret; - - ret = intel_gvt_init_vgpu_type_groups((struct intel_gvt *)gvt); - if (ret) - return ret; - - intel_vgpu_mdev_ops.supported_type_groups = gvt_vgpu_type_groups; - - ret = mdev_register_device(dev, &intel_vgpu_mdev_ops); - if (ret) - intel_gvt_cleanup_vgpu_type_groups((struct intel_gvt *)gvt); - - return ret; -} - -static void kvmgt_host_exit(struct device *dev, void *gvt) -{ - mdev_unregister_device(dev); - intel_gvt_cleanup_vgpu_type_groups((struct intel_gvt *)gvt); -} - int intel_gvt_page_track_add(struct intel_vgpu *info, u64 gfn) { struct kvm *kvm = info->kvm; @@ -1964,15 +1942,6 @@ void intel_gvt_dma_unmap_guest_page(struct intel_vgpu *vgpu, mutex_unlock(&vgpu->cache_lock); } -static const struct intel_gvt_mpt kvmgt_mpt = { - .host_init = kvmgt_host_init, - .host_exit = kvmgt_host_exit, -}; - -struct intel_gvt_host intel_gvt_host = { - .mpt = &kvmgt_mpt, -}; - static int __init kvmgt_init(void) { return intel_gvt_set_ops(&intel_gvt_vgpu_ops); diff --git a/drivers/gpu/drm/i915/gvt/mpt.h b/drivers/gpu/drm/i915/gvt/mpt.h deleted file mode 100644 index 3be602a3f764..000000000000 --- a/drivers/gpu/drm/i915/gvt/mpt.h +++ /dev/null @@ -1,74 +0,0 @@ -/* - * Copyright(c) 2011-2016 Intel Corporation. All rights reserved. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice (including the next - * paragraph) shall be included in all copies or substantial portions of the - * Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - * - * Authors: - * Eddie Dong - * Dexuan Cui - * Jike Song - * - * Contributors: - * Zhi Wang - * - */ - -#ifndef _GVT_MPT_H_ -#define _GVT_MPT_H_ - -#include "gvt.h" - -/** - * DOC: Hypervisor Service APIs for GVT-g Core Logic - * - * This is the glue layer between specific hypervisor MPT modules and GVT-g core - * logic. Each kind of hypervisor MPT module provides a collection of function - * callbacks and will be attached to GVT host when the driver is loading. - * GVT-g core logic will call these APIs to request specific services from - * hypervisor. - */ - -/** - * intel_gvt_hypervisor_host_init - init GVT-g host side - * - * Returns: - * Zero on success, negative error code if failed - */ -static inline int intel_gvt_hypervisor_host_init(struct device *dev, void *gvt) -{ - if (!intel_gvt_host.mpt->host_init) - return -ENODEV; - - return intel_gvt_host.mpt->host_init(dev, gvt); -} - -/** - * intel_gvt_hypervisor_host_exit - exit GVT-g host side - */ -static inline void intel_gvt_hypervisor_host_exit(struct device *dev, void *gvt) -{ - /* optional to provide */ - if (!intel_gvt_host.mpt->host_exit) - return; - - intel_gvt_host.mpt->host_exit(dev, gvt); -} - -#endif /* _GVT_MPT_H_ */ From 37e4bdbd5bad711c7db5458041416f3925d7aae5 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Mon, 11 Apr 2022 16:13:53 +0200 Subject: [PATCH 27/41] drm/i915/gvt: remove the extra vfio_device refcounting for dmabufs All the dmabufs are torn down when th VGPU is released, so there is no need for extra refcounting here. Based on an patch from Jason Gunthorpe. Signed-off-by: Christoph Hellwig Signed-off-by: Zhi Wang Link: http://patchwork.freedesktop.org/patch/msgid/20220411141403.86980-25-hch@lst.de Reviewed-by: Jason Gunthorpe Reviewed-by: Zhi Wang --- drivers/gpu/drm/i915/gvt/dmabuf.c | 12 ------------ drivers/gpu/drm/i915/gvt/gvt.h | 1 - 2 files changed, 13 deletions(-) diff --git a/drivers/gpu/drm/i915/gvt/dmabuf.c b/drivers/gpu/drm/i915/gvt/dmabuf.c index 90443306a9ad..01e54b45c5c1 100644 --- a/drivers/gpu/drm/i915/gvt/dmabuf.c +++ b/drivers/gpu/drm/i915/gvt/dmabuf.c @@ -139,7 +139,6 @@ static void dmabuf_gem_object_free(struct kref *kref) dmabuf_obj = list_entry(pos, struct intel_vgpu_dmabuf_obj, list); if (dmabuf_obj == obj) { list_del(pos); - vfio_device_put(vgpu->vfio_device); idr_remove(&vgpu->object_idr, dmabuf_obj->dmabuf_id); kfree(dmabuf_obj->info); @@ -473,16 +472,6 @@ int intel_vgpu_query_plane(struct intel_vgpu *vgpu, void *args) kref_init(&dmabuf_obj->kref); - mutex_lock(&vgpu->dmabuf_lock); - vgpu->vfio_device = vfio_device_get_from_dev(mdev_dev(vgpu->mdev)); - if (!vgpu->vfio_device) { - gvt_vgpu_err("failed to get vfio device\n"); - mutex_unlock(&vgpu->dmabuf_lock); - ret = -ENODEV; - goto out_free_info; - } - mutex_unlock(&vgpu->dmabuf_lock); - update_fb_info(gfx_plane_info, &fb_info); INIT_LIST_HEAD(&dmabuf_obj->list); @@ -587,7 +576,6 @@ void intel_vgpu_dmabuf_cleanup(struct intel_vgpu *vgpu) dmabuf_obj->vgpu = NULL; idr_remove(&vgpu->object_idr, dmabuf_obj->dmabuf_id); - vfio_device_put(vgpu->vfio_device); list_del(pos); /* dmabuf_obj might be freed in dmabuf_obj_put */ diff --git a/drivers/gpu/drm/i915/gvt/gvt.h b/drivers/gpu/drm/i915/gvt/gvt.h index 865997c5005d..8565189e0c0d 100644 --- a/drivers/gpu/drm/i915/gvt/gvt.h +++ b/drivers/gpu/drm/i915/gvt/gvt.h @@ -231,7 +231,6 @@ struct intel_vgpu { struct kvm *kvm; struct work_struct release_work; atomic_t released; - struct vfio_device *vfio_device; struct vfio_group *vfio_group; struct kvm_page_track_notifier_node track_node; From 4456641232e2c1b1eb7d179449c5800b3ce9e9c1 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Mon, 11 Apr 2022 16:13:54 +0200 Subject: [PATCH 28/41] drm/i915/gvt: streamline intel_vgpu_create Initialize variables at declaration time, avoid pointless gotos and cater for the fact that intel_gvt_create_vgpu can't return NULL. Signed-off-by: Christoph Hellwig Signed-off-by: Zhi Wang Link: http://patchwork.freedesktop.org/patch/msgid/20220411141403.86980-26-hch@lst.de Reviewed-by: Jason Gunthorpe Reviewed-by: Zhi Wang --- drivers/gpu/drm/i915/gvt/kvmgt.c | 28 +++++++++------------------- 1 file changed, 9 insertions(+), 19 deletions(-) diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c index f908867223ae..11bce3a91a22 100644 --- a/drivers/gpu/drm/i915/gvt/kvmgt.c +++ b/drivers/gpu/drm/i915/gvt/kvmgt.c @@ -720,26 +720,19 @@ int intel_gvt_set_edid(struct intel_vgpu *vgpu, int port_num) static int intel_vgpu_create(struct mdev_device *mdev) { - struct intel_vgpu *vgpu = NULL; + struct device *pdev = mdev_parent_dev(mdev); + struct intel_gvt *gvt = kdev_to_i915(pdev)->gvt; struct intel_vgpu_type *type; - struct device *pdev; - struct intel_gvt *gvt; - int ret; - - pdev = mdev_parent_dev(mdev); - gvt = kdev_to_i915(pdev)->gvt; + struct intel_vgpu *vgpu; type = &gvt->types[mdev_get_type_group_id(mdev)]; - if (!type) { - ret = -EINVAL; - goto out; - } + if (!type) + return -EINVAL; vgpu = intel_gvt_create_vgpu(gvt, type); - if (IS_ERR_OR_NULL(vgpu)) { - ret = vgpu == NULL ? -EFAULT : PTR_ERR(vgpu); - gvt_err("failed to create intel vgpu: %d\n", ret); - goto out; + if (IS_ERR(vgpu)) { + gvt_err("failed to create intel vgpu: %ld\n", PTR_ERR(vgpu)); + return PTR_ERR(vgpu); } INIT_WORK(&vgpu->release_work, intel_vgpu_release_work); @@ -749,10 +742,7 @@ static int intel_vgpu_create(struct mdev_device *mdev) gvt_dbg_core("intel_vgpu_create succeeded for mdev: %s\n", dev_name(mdev_dev(mdev))); - ret = 0; - -out: - return ret; + return 0; } static int intel_vgpu_remove(struct mdev_device *mdev) From 7f11e6893ff01b63820a368851ca389293603dbe Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Mon, 11 Apr 2022 16:13:55 +0200 Subject: [PATCH 29/41] drm/i915/gvt: pass a struct intel_vgpu to the vfio read/write helpers Pass the structure we actually care about instead of deriving it from the mdev_device in the lower level code. Signed-off-by: Christoph Hellwig Signed-off-by: Zhi Wang Link: http://patchwork.freedesktop.org/patch/msgid/20220411141403.86980-27-hch@lst.de Reviewed-by: Jason Gunthorpe Reviewed-by: Zhi Wang --- drivers/gpu/drm/i915/gvt/kvmgt.c | 28 ++++++++++++++-------------- 1 file changed, 14 insertions(+), 14 deletions(-) diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c index 11bce3a91a22..5db74b6fe513 100644 --- a/drivers/gpu/drm/i915/gvt/kvmgt.c +++ b/drivers/gpu/drm/i915/gvt/kvmgt.c @@ -1007,10 +1007,9 @@ static int intel_vgpu_aperture_rw(struct intel_vgpu *vgpu, u64 off, return 0; } -static ssize_t intel_vgpu_rw(struct mdev_device *mdev, char *buf, +static ssize_t intel_vgpu_rw(struct intel_vgpu *vgpu, char *buf, size_t count, loff_t *ppos, bool is_write) { - struct intel_vgpu *vgpu = mdev_get_drvdata(mdev); unsigned int index = VFIO_PCI_OFFSET_TO_INDEX(*ppos); u64 pos = *ppos & VFIO_PCI_OFFSET_MASK; int ret = -EINVAL; @@ -1056,9 +1055,8 @@ static ssize_t intel_vgpu_rw(struct mdev_device *mdev, char *buf, return ret == 0 ? count : ret; } -static bool gtt_entry(struct mdev_device *mdev, loff_t *ppos) +static bool gtt_entry(struct intel_vgpu *vgpu, loff_t *ppos) { - struct intel_vgpu *vgpu = mdev_get_drvdata(mdev); unsigned int index = VFIO_PCI_OFFSET_TO_INDEX(*ppos); struct intel_gvt *gvt = vgpu->gvt; int offset; @@ -1078,6 +1076,7 @@ static bool gtt_entry(struct mdev_device *mdev, loff_t *ppos) static ssize_t intel_vgpu_read(struct mdev_device *mdev, char __user *buf, size_t count, loff_t *ppos) { + struct intel_vgpu *vgpu = mdev_get_drvdata(mdev); unsigned int done = 0; int ret; @@ -1086,10 +1085,10 @@ static ssize_t intel_vgpu_read(struct mdev_device *mdev, char __user *buf, /* Only support GGTT entry 8 bytes read */ if (count >= 8 && !(*ppos % 8) && - gtt_entry(mdev, ppos)) { + gtt_entry(vgpu, ppos)) { u64 val; - ret = intel_vgpu_rw(mdev, (char *)&val, sizeof(val), + ret = intel_vgpu_rw(vgpu, (char *)&val, sizeof(val), ppos, false); if (ret <= 0) goto read_err; @@ -1101,7 +1100,7 @@ static ssize_t intel_vgpu_read(struct mdev_device *mdev, char __user *buf, } else if (count >= 4 && !(*ppos % 4)) { u32 val; - ret = intel_vgpu_rw(mdev, (char *)&val, sizeof(val), + ret = intel_vgpu_rw(vgpu, (char *)&val, sizeof(val), ppos, false); if (ret <= 0) goto read_err; @@ -1113,7 +1112,7 @@ static ssize_t intel_vgpu_read(struct mdev_device *mdev, char __user *buf, } else if (count >= 2 && !(*ppos % 2)) { u16 val; - ret = intel_vgpu_rw(mdev, (char *)&val, sizeof(val), + ret = intel_vgpu_rw(vgpu, (char *)&val, sizeof(val), ppos, false); if (ret <= 0) goto read_err; @@ -1125,7 +1124,7 @@ static ssize_t intel_vgpu_read(struct mdev_device *mdev, char __user *buf, } else { u8 val; - ret = intel_vgpu_rw(mdev, &val, sizeof(val), ppos, + ret = intel_vgpu_rw(vgpu, &val, sizeof(val), ppos, false); if (ret <= 0) goto read_err; @@ -1152,6 +1151,7 @@ static ssize_t intel_vgpu_write(struct mdev_device *mdev, const char __user *buf, size_t count, loff_t *ppos) { + struct intel_vgpu *vgpu = mdev_get_drvdata(mdev); unsigned int done = 0; int ret; @@ -1160,13 +1160,13 @@ static ssize_t intel_vgpu_write(struct mdev_device *mdev, /* Only support GGTT entry 8 bytes write */ if (count >= 8 && !(*ppos % 8) && - gtt_entry(mdev, ppos)) { + gtt_entry(vgpu, ppos)) { u64 val; if (copy_from_user(&val, buf, sizeof(val))) goto write_err; - ret = intel_vgpu_rw(mdev, (char *)&val, sizeof(val), + ret = intel_vgpu_rw(vgpu, (char *)&val, sizeof(val), ppos, true); if (ret <= 0) goto write_err; @@ -1178,7 +1178,7 @@ static ssize_t intel_vgpu_write(struct mdev_device *mdev, if (copy_from_user(&val, buf, sizeof(val))) goto write_err; - ret = intel_vgpu_rw(mdev, (char *)&val, sizeof(val), + ret = intel_vgpu_rw(vgpu, (char *)&val, sizeof(val), ppos, true); if (ret <= 0) goto write_err; @@ -1190,7 +1190,7 @@ static ssize_t intel_vgpu_write(struct mdev_device *mdev, if (copy_from_user(&val, buf, sizeof(val))) goto write_err; - ret = intel_vgpu_rw(mdev, (char *)&val, + ret = intel_vgpu_rw(vgpu, (char *)&val, sizeof(val), ppos, true); if (ret <= 0) goto write_err; @@ -1202,7 +1202,7 @@ static ssize_t intel_vgpu_write(struct mdev_device *mdev, if (copy_from_user(&val, buf, sizeof(val))) goto write_err; - ret = intel_vgpu_rw(mdev, &val, sizeof(val), + ret = intel_vgpu_rw(vgpu, &val, sizeof(val), ppos, true); if (ret <= 0) goto write_err; From 0e09f4066ad11bcb4fa1b2d1a6f07fc0dc9788f9 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Mon, 11 Apr 2022 16:13:56 +0200 Subject: [PATCH 30/41] drm/i915/gvt: remove kvmgt_guest_{init,exit} Merge these into their only callers. Signed-off-by: Christoph Hellwig Signed-off-by: Zhi Wang Link: http://patchwork.freedesktop.org/patch/msgid/20220411141403.86980-28-hch@lst.de Reviewed-by: Jason Gunthorpe Reviewed-by: Zhi Wang --- drivers/gpu/drm/i915/gvt/kvmgt.c | 129 ++++++++++++++----------------- 1 file changed, 60 insertions(+), 69 deletions(-) diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c index 5db74b6fe513..38629ab4c9bf 100644 --- a/drivers/gpu/drm/i915/gvt/kvmgt.c +++ b/drivers/gpu/drm/i915/gvt/kvmgt.c @@ -100,6 +100,13 @@ struct gvt_dma { struct kref ref; }; +static void kvmgt_page_track_write(struct kvm_vcpu *vcpu, gpa_t gpa, + const u8 *val, int len, + struct kvm_page_track_notifier_node *node); +static void kvmgt_page_track_flush_slot(struct kvm *kvm, + struct kvm_memory_slot *slot, + struct kvm_page_track_notifier_node *node); + static ssize_t available_instances_show(struct mdev_type *mtype, struct mdev_type_attribute *attr, char *buf) @@ -213,9 +220,7 @@ void intel_gvt_cleanup_vgpu_type_groups(struct intel_gvt *gvt) } } -static int kvmgt_guest_init(struct mdev_device *mdev); static void intel_vgpu_release_work(struct work_struct *work); -static bool kvmgt_guest_exit(struct intel_vgpu *info); static void gvt_unpin_guest_page(struct intel_vgpu *vgpu, unsigned long gfn, unsigned long size) @@ -803,6 +808,27 @@ static int intel_vgpu_group_notifier(struct notifier_block *nb, return NOTIFY_OK; } +static bool __kvmgt_vgpu_exist(struct intel_vgpu *vgpu) +{ + struct intel_vgpu *itr; + int id; + bool ret = false; + + mutex_lock(&vgpu->gvt->lock); + for_each_active_vgpu(vgpu->gvt, itr, id) { + if (!itr->attached) + continue; + + if (vgpu->kvm == itr->kvm) { + ret = true; + goto out; + } + } +out: + mutex_unlock(&vgpu->gvt->lock); + return ret; +} + static int intel_vgpu_open_device(struct mdev_device *mdev) { struct intel_vgpu *vgpu = mdev_get_drvdata(mdev); @@ -847,14 +873,37 @@ static int intel_vgpu_open_device(struct mdev_device *mdev) goto undo_group; } - ret = kvmgt_guest_init(mdev); - if (ret) + ret = -EEXIST; + if (vgpu->attached) goto undo_group; + ret = -ESRCH; + if (!vgpu->kvm || vgpu->kvm->mm != current->mm) { + gvt_vgpu_err("KVM is required to use Intel vGPU\n"); + goto undo_group; + } + + ret = -EEXIST; + if (__kvmgt_vgpu_exist(vgpu)) + goto undo_group; + + vgpu->attached = true; + kvm_get_kvm(vgpu->kvm); + + kvmgt_protect_table_init(vgpu); + gvt_cache_init(vgpu); + + vgpu->track_node.track_write = kvmgt_page_track_write; + vgpu->track_node.track_flush_slot = kvmgt_page_track_flush_slot; + kvm_page_track_register_notifier(vgpu->kvm, &vgpu->track_node); + + debugfs_create_ulong(KVMGT_DEBUGFS_FILENAME, 0444, vgpu->debugfs, + &vgpu->nr_cache_entries); + intel_gvt_activate_vgpu(vgpu); atomic_set(&vgpu->released, 0); - return ret; + return 0; undo_group: vfio_group_put_external_user(vgpu->vfio_group); @@ -908,7 +957,12 @@ static void __intel_vgpu_release(struct intel_vgpu *vgpu) /* dereference module reference taken at open */ module_put(THIS_MODULE); - kvmgt_guest_exit(vgpu); + debugfs_remove(debugfs_lookup(KVMGT_DEBUGFS_FILENAME, vgpu->debugfs)); + + kvm_page_track_unregister_notifier(vgpu->kvm, &vgpu->track_node); + kvm_put_kvm(vgpu->kvm); + kvmgt_protect_table_destroy(vgpu); + gvt_cache_destroy(vgpu); intel_vgpu_release_msi_eventfd_ctx(vgpu); vfio_group_put_external_user(vgpu->vfio_group); @@ -1763,69 +1817,6 @@ static void kvmgt_page_track_flush_slot(struct kvm *kvm, write_unlock(&kvm->mmu_lock); } -static bool __kvmgt_vgpu_exist(struct intel_vgpu *vgpu, struct kvm *kvm) -{ - struct intel_vgpu *itr; - int id; - bool ret = false; - - mutex_lock(&vgpu->gvt->lock); - for_each_active_vgpu(vgpu->gvt, itr, id) { - if (!itr->attached) - continue; - - if (kvm && kvm == itr->kvm) { - ret = true; - goto out; - } - } -out: - mutex_unlock(&vgpu->gvt->lock); - return ret; -} - -static int kvmgt_guest_init(struct mdev_device *mdev) -{ - struct intel_vgpu *vgpu = mdev_get_drvdata(mdev); - struct kvm *kvm = vgpu->kvm; - - if (vgpu->attached) - return -EEXIST; - - if (!kvm || kvm->mm != current->mm) { - gvt_vgpu_err("KVM is required to use Intel vGPU\n"); - return -ESRCH; - } - - if (__kvmgt_vgpu_exist(vgpu, kvm)) - return -EEXIST; - - vgpu->attached = true; - kvm_get_kvm(vgpu->kvm); - - kvmgt_protect_table_init(vgpu); - gvt_cache_init(vgpu); - - vgpu->track_node.track_write = kvmgt_page_track_write; - vgpu->track_node.track_flush_slot = kvmgt_page_track_flush_slot; - kvm_page_track_register_notifier(kvm, &vgpu->track_node); - - debugfs_create_ulong(KVMGT_DEBUGFS_FILENAME, 0444, vgpu->debugfs, - &vgpu->nr_cache_entries); - return 0; -} - -static bool kvmgt_guest_exit(struct intel_vgpu *info) -{ - debugfs_remove(debugfs_lookup(KVMGT_DEBUGFS_FILENAME, info->debugfs)); - - kvm_page_track_unregister_notifier(info->kvm, &info->track_node); - kvm_put_kvm(info->kvm); - kvmgt_protect_table_destroy(info); - gvt_cache_destroy(info); - return true; -} - void intel_vgpu_detach_regions(struct intel_vgpu *vgpu) { int i; From 978cf586ac35f34604e2d252a51b71192c39f1e4 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Mon, 11 Apr 2022 16:13:57 +0200 Subject: [PATCH 31/41] drm/i915/gvt: convert to use vfio_register_emulated_iommu_dev This is straightforward conversion, the intel_vgpu already has a pointer to the vfio_dev, which can be replaced with the embedded structure and we can replace all the mdev_get_drvdata() with a simple container_of(). Based on an patch from Jason Gunthorpe. Signed-off-by: Christoph Hellwig Signed-off-by: Zhi Wang Link: http://patchwork.freedesktop.org/patch/msgid/20220411141403.86980-29-hch@lst.de Reviewed-by: Jason Gunthorpe Reviewed-by: Zhi Wang --- drivers/gpu/drm/i915/gvt/gvt.h | 2 +- drivers/gpu/drm/i915/gvt/kvmgt.c | 190 ++++++++++++++++--------------- 2 files changed, 102 insertions(+), 90 deletions(-) diff --git a/drivers/gpu/drm/i915/gvt/gvt.h b/drivers/gpu/drm/i915/gvt/gvt.h index 8565189e0c0d..c32e8eb199f1 100644 --- a/drivers/gpu/drm/i915/gvt/gvt.h +++ b/drivers/gpu/drm/i915/gvt/gvt.h @@ -211,7 +211,7 @@ struct intel_vgpu { u32 scan_nonprivbb; - struct mdev_device *mdev; + struct vfio_device vfio_device; struct vfio_region *region; int num_regions; struct eventfd_ctx *intx_trigger; diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c index 38629ab4c9bf..4113f850bf92 100644 --- a/drivers/gpu/drm/i915/gvt/kvmgt.c +++ b/drivers/gpu/drm/i915/gvt/kvmgt.c @@ -100,6 +100,9 @@ struct gvt_dma { struct kref ref; }; +#define vfio_dev_to_vgpu(vfio_dev) \ + container_of((vfio_dev), struct intel_vgpu, vfio_device) + static void kvmgt_page_track_write(struct kvm_vcpu *vcpu, gpa_t gpa, const u8 *val, int len, struct kvm_page_track_notifier_node *node); @@ -723,44 +726,6 @@ int intel_gvt_set_edid(struct intel_vgpu *vgpu, int port_num) return ret; } -static int intel_vgpu_create(struct mdev_device *mdev) -{ - struct device *pdev = mdev_parent_dev(mdev); - struct intel_gvt *gvt = kdev_to_i915(pdev)->gvt; - struct intel_vgpu_type *type; - struct intel_vgpu *vgpu; - - type = &gvt->types[mdev_get_type_group_id(mdev)]; - if (!type) - return -EINVAL; - - vgpu = intel_gvt_create_vgpu(gvt, type); - if (IS_ERR(vgpu)) { - gvt_err("failed to create intel vgpu: %ld\n", PTR_ERR(vgpu)); - return PTR_ERR(vgpu); - } - - INIT_WORK(&vgpu->release_work, intel_vgpu_release_work); - - vgpu->mdev = mdev; - mdev_set_drvdata(mdev, vgpu); - - gvt_dbg_core("intel_vgpu_create succeeded for mdev: %s\n", - dev_name(mdev_dev(mdev))); - return 0; -} - -static int intel_vgpu_remove(struct mdev_device *mdev) -{ - struct intel_vgpu *vgpu = mdev_get_drvdata(mdev); - - if (vgpu->attached) - return -EBUSY; - - intel_gvt_destroy_vgpu(vgpu); - return 0; -} - static int intel_vgpu_iommu_notifier(struct notifier_block *nb, unsigned long action, void *data) { @@ -829,9 +794,9 @@ static bool __kvmgt_vgpu_exist(struct intel_vgpu *vgpu) return ret; } -static int intel_vgpu_open_device(struct mdev_device *mdev) +static int intel_vgpu_open_device(struct vfio_device *vfio_dev) { - struct intel_vgpu *vgpu = mdev_get_drvdata(mdev); + struct intel_vgpu *vgpu = vfio_dev_to_vgpu(vfio_dev); unsigned long events; int ret; struct vfio_group *vfio_group; @@ -840,7 +805,7 @@ static int intel_vgpu_open_device(struct mdev_device *mdev) vgpu->group_notifier.notifier_call = intel_vgpu_group_notifier; events = VFIO_IOMMU_NOTIFY_DMA_UNMAP; - ret = vfio_register_notifier(mdev_dev(mdev), VFIO_IOMMU_NOTIFY, &events, + ret = vfio_register_notifier(vfio_dev->dev, VFIO_IOMMU_NOTIFY, &events, &vgpu->iommu_notifier); if (ret != 0) { gvt_vgpu_err("vfio_register_notifier for iommu failed: %d\n", @@ -849,7 +814,7 @@ static int intel_vgpu_open_device(struct mdev_device *mdev) } events = VFIO_GROUP_NOTIFY_SET_KVM; - ret = vfio_register_notifier(mdev_dev(mdev), VFIO_GROUP_NOTIFY, &events, + ret = vfio_register_notifier(vfio_dev->dev, VFIO_GROUP_NOTIFY, &events, &vgpu->group_notifier); if (ret != 0) { gvt_vgpu_err("vfio_register_notifier for group failed: %d\n", @@ -857,7 +822,8 @@ static int intel_vgpu_open_device(struct mdev_device *mdev) goto undo_iommu; } - vfio_group = vfio_group_get_external_user_from_dev(mdev_dev(mdev)); + vfio_group = + vfio_group_get_external_user_from_dev(vgpu->vfio_device.dev); if (IS_ERR_OR_NULL(vfio_group)) { ret = !vfio_group ? -EFAULT : PTR_ERR(vfio_group); gvt_vgpu_err("vfio_group_get_external_user_from_dev failed\n"); @@ -865,14 +831,6 @@ static int intel_vgpu_open_device(struct mdev_device *mdev) } vgpu->vfio_group = vfio_group; - /* Take a module reference as mdev core doesn't take - * a reference for vendor driver. - */ - if (!try_module_get(THIS_MODULE)) { - ret = -ENODEV; - goto undo_group; - } - ret = -EEXIST; if (vgpu->attached) goto undo_group; @@ -910,11 +868,11 @@ static int intel_vgpu_open_device(struct mdev_device *mdev) vgpu->vfio_group = NULL; undo_register: - vfio_unregister_notifier(mdev_dev(mdev), VFIO_GROUP_NOTIFY, + vfio_unregister_notifier(vfio_dev->dev, VFIO_GROUP_NOTIFY, &vgpu->group_notifier); undo_iommu: - vfio_unregister_notifier(mdev_dev(mdev), VFIO_IOMMU_NOTIFY, + vfio_unregister_notifier(vfio_dev->dev, VFIO_IOMMU_NOTIFY, &vgpu->iommu_notifier); out: return ret; @@ -944,19 +902,16 @@ static void __intel_vgpu_release(struct intel_vgpu *vgpu) intel_gvt_release_vgpu(vgpu); - ret = vfio_unregister_notifier(mdev_dev(vgpu->mdev), VFIO_IOMMU_NOTIFY, + ret = vfio_unregister_notifier(vgpu->vfio_device.dev, VFIO_IOMMU_NOTIFY, &vgpu->iommu_notifier); drm_WARN(&i915->drm, ret, "vfio_unregister_notifier for iommu failed: %d\n", ret); - ret = vfio_unregister_notifier(mdev_dev(vgpu->mdev), VFIO_GROUP_NOTIFY, + ret = vfio_unregister_notifier(vgpu->vfio_device.dev, VFIO_GROUP_NOTIFY, &vgpu->group_notifier); drm_WARN(&i915->drm, ret, "vfio_unregister_notifier for group failed: %d\n", ret); - /* dereference module reference taken at open */ - module_put(THIS_MODULE); - debugfs_remove(debugfs_lookup(KVMGT_DEBUGFS_FILENAME, vgpu->debugfs)); kvm_page_track_unregister_notifier(vgpu->kvm, &vgpu->track_node); @@ -971,11 +926,9 @@ static void __intel_vgpu_release(struct intel_vgpu *vgpu) vgpu->attached = false; } -static void intel_vgpu_close_device(struct mdev_device *mdev) +static void intel_vgpu_close_device(struct vfio_device *vfio_dev) { - struct intel_vgpu *vgpu = mdev_get_drvdata(mdev); - - __intel_vgpu_release(vgpu); + __intel_vgpu_release(vfio_dev_to_vgpu(vfio_dev)); } static void intel_vgpu_release_work(struct work_struct *work) @@ -1127,10 +1080,10 @@ static bool gtt_entry(struct intel_vgpu *vgpu, loff_t *ppos) true : false; } -static ssize_t intel_vgpu_read(struct mdev_device *mdev, char __user *buf, +static ssize_t intel_vgpu_read(struct vfio_device *vfio_dev, char __user *buf, size_t count, loff_t *ppos) { - struct intel_vgpu *vgpu = mdev_get_drvdata(mdev); + struct intel_vgpu *vgpu = vfio_dev_to_vgpu(vfio_dev); unsigned int done = 0; int ret; @@ -1201,11 +1154,11 @@ static ssize_t intel_vgpu_read(struct mdev_device *mdev, char __user *buf, return -EFAULT; } -static ssize_t intel_vgpu_write(struct mdev_device *mdev, +static ssize_t intel_vgpu_write(struct vfio_device *vfio_dev, const char __user *buf, size_t count, loff_t *ppos) { - struct intel_vgpu *vgpu = mdev_get_drvdata(mdev); + struct intel_vgpu *vgpu = vfio_dev_to_vgpu(vfio_dev); unsigned int done = 0; int ret; @@ -1275,13 +1228,14 @@ static ssize_t intel_vgpu_write(struct mdev_device *mdev, return -EFAULT; } -static int intel_vgpu_mmap(struct mdev_device *mdev, struct vm_area_struct *vma) +static int intel_vgpu_mmap(struct vfio_device *vfio_dev, + struct vm_area_struct *vma) { + struct intel_vgpu *vgpu = vfio_dev_to_vgpu(vfio_dev); unsigned int index; u64 virtaddr; unsigned long req_size, pgoff, req_start; pgprot_t pg_prot; - struct intel_vgpu *vgpu = mdev_get_drvdata(mdev); index = vma->vm_pgoff >> (VFIO_PCI_OFFSET_SHIFT - PAGE_SHIFT); if (index >= VFIO_PCI_ROM_REGION_INDEX) @@ -1404,10 +1358,10 @@ static int intel_vgpu_set_irqs(struct intel_vgpu *vgpu, u32 flags, return func(vgpu, index, start, count, flags, data); } -static long intel_vgpu_ioctl(struct mdev_device *mdev, unsigned int cmd, +static long intel_vgpu_ioctl(struct vfio_device *vfio_dev, unsigned int cmd, unsigned long arg) { - struct intel_vgpu *vgpu = mdev_get_drvdata(mdev); + struct intel_vgpu *vgpu = vfio_dev_to_vgpu(vfio_dev); unsigned long minsz; gvt_dbg_core("vgpu%d ioctl, cmd: %d\n", vgpu->id, cmd); @@ -1682,14 +1636,9 @@ static ssize_t vgpu_id_show(struct device *dev, struct device_attribute *attr, char *buf) { - struct mdev_device *mdev = mdev_from_dev(dev); + struct intel_vgpu *vgpu = dev_get_drvdata(dev); - if (mdev) { - struct intel_vgpu *vgpu = (struct intel_vgpu *) - mdev_get_drvdata(mdev); - return sprintf(buf, "%d\n", vgpu->id); - } - return sprintf(buf, "\n"); + return sprintf(buf, "%d\n", vgpu->id); } static DEVICE_ATTR_RO(vgpu_id); @@ -1709,19 +1658,72 @@ static const struct attribute_group *intel_vgpu_groups[] = { NULL, }; +static const struct vfio_device_ops intel_vgpu_dev_ops = { + .open_device = intel_vgpu_open_device, + .close_device = intel_vgpu_close_device, + .read = intel_vgpu_read, + .write = intel_vgpu_write, + .mmap = intel_vgpu_mmap, + .ioctl = intel_vgpu_ioctl, +}; + +static int intel_vgpu_probe(struct mdev_device *mdev) +{ + struct device *pdev = mdev_parent_dev(mdev); + struct intel_gvt *gvt = kdev_to_i915(pdev)->gvt; + struct intel_vgpu_type *type; + struct intel_vgpu *vgpu; + int ret; + + type = &gvt->types[mdev_get_type_group_id(mdev)]; + if (!type) + return -EINVAL; + + vgpu = intel_gvt_create_vgpu(gvt, type); + if (IS_ERR(vgpu)) { + gvt_err("failed to create intel vgpu: %ld\n", PTR_ERR(vgpu)); + return PTR_ERR(vgpu); + } + + INIT_WORK(&vgpu->release_work, intel_vgpu_release_work); + vfio_init_group_dev(&vgpu->vfio_device, &mdev->dev, + &intel_vgpu_dev_ops); + + dev_set_drvdata(&mdev->dev, vgpu); + ret = vfio_register_emulated_iommu_dev(&vgpu->vfio_device); + if (ret) { + intel_gvt_destroy_vgpu(vgpu); + return ret; + } + + gvt_dbg_core("intel_vgpu_create succeeded for mdev: %s\n", + dev_name(mdev_dev(mdev))); + return 0; +} + +static void intel_vgpu_remove(struct mdev_device *mdev) +{ + struct intel_vgpu *vgpu = dev_get_drvdata(&mdev->dev); + + if (WARN_ON_ONCE(vgpu->attached)) + return; + intel_gvt_destroy_vgpu(vgpu); +} + +static struct mdev_driver intel_vgpu_mdev_driver = { + .driver = { + .name = "intel_vgpu_mdev", + .owner = THIS_MODULE, + .dev_groups = intel_vgpu_groups, + }, + .probe = intel_vgpu_probe, + .remove = intel_vgpu_remove, +}; + const struct mdev_parent_ops intel_vgpu_mdev_ops = { - .mdev_attr_groups = intel_vgpu_groups, + .owner = THIS_MODULE, .supported_type_groups = gvt_vgpu_type_groups, - .create = intel_vgpu_create, - .remove = intel_vgpu_remove, - - .open_device = intel_vgpu_open_device, - .close_device = intel_vgpu_close_device, - - .read = intel_vgpu_read, - .write = intel_vgpu_write, - .mmap = intel_vgpu_mmap, - .ioctl = intel_vgpu_ioctl, + .device_driver = &intel_vgpu_mdev_driver, }; int intel_gvt_page_track_add(struct intel_vgpu *info, u64 gfn) @@ -1925,11 +1927,21 @@ void intel_gvt_dma_unmap_guest_page(struct intel_vgpu *vgpu, static int __init kvmgt_init(void) { - return intel_gvt_set_ops(&intel_gvt_vgpu_ops); + int ret; + + ret = intel_gvt_set_ops(&intel_gvt_vgpu_ops); + if (ret) + return ret; + + ret = mdev_register_driver(&intel_vgpu_mdev_driver); + if (ret) + intel_gvt_clear_ops(&intel_gvt_vgpu_ops); + return ret; } static void __exit kvmgt_exit(void) { + mdev_unregister_driver(&intel_vgpu_mdev_driver); intel_gvt_clear_ops(&intel_gvt_vgpu_ops); } From cba619cb0d4d66c743cf001c6b13c171a769a65f Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Mon, 11 Apr 2022 16:13:58 +0200 Subject: [PATCH 32/41] drm/i915/gvt: merge gvt.c into kvmgvt.c The code in both files is deeply interconnected, so merge it and keep a bunch of structures and functions static. Signed-off-by: Christoph Hellwig Signed-off-by: Zhi Wang Link: http://patchwork.freedesktop.org/patch/msgid/20220411141403.86980-30-hch@lst.de Reviewed-by: Jason Gunthorpe Reviewed-by: Zhi Wang --- drivers/gpu/drm/i915/gvt/Makefile | 1 - drivers/gpu/drm/i915/gvt/gvt.c | 291 ------------------------------ drivers/gpu/drm/i915/gvt/gvt.h | 6 - drivers/gpu/drm/i915/gvt/kvmgt.c | 264 ++++++++++++++++++++++++++- 4 files changed, 260 insertions(+), 302 deletions(-) delete mode 100644 drivers/gpu/drm/i915/gvt/gvt.c diff --git a/drivers/gpu/drm/i915/gvt/Makefile b/drivers/gpu/drm/i915/gvt/Makefile index ca8894049dca..1699f644298e 100644 --- a/drivers/gpu/drm/i915/gvt/Makefile +++ b/drivers/gpu/drm/i915/gvt/Makefile @@ -12,7 +12,6 @@ kvmgt-$(CONFIG_DRM_I915_GVT) += \ gvt/fb_decoder.o \ gvt/firmware.o \ gvt/gtt.o \ - gvt/gvt.o \ gvt/handlers.o \ gvt/interrupt.o \ gvt/kvmgt.o \ diff --git a/drivers/gpu/drm/i915/gvt/gvt.c b/drivers/gpu/drm/i915/gvt/gvt.c deleted file mode 100644 index 047fb6c41788..000000000000 --- a/drivers/gpu/drm/i915/gvt/gvt.c +++ /dev/null @@ -1,291 +0,0 @@ -/* - * Copyright(c) 2011-2016 Intel Corporation. All rights reserved. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice (including the next - * paragraph) shall be included in all copies or substantial portions of the - * Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - * - * Authors: - * Kevin Tian - * Eddie Dong - * - * Contributors: - * Niu Bing - * Zhi Wang - * - */ - -#include -#include - -#include "i915_drv.h" -#include "intel_gvt.h" -#include "gvt.h" -#include -#include - -static void init_device_info(struct intel_gvt *gvt) -{ - struct intel_gvt_device_info *info = &gvt->device_info; - struct pci_dev *pdev = to_pci_dev(gvt->gt->i915->drm.dev); - - info->max_support_vgpus = 8; - info->cfg_space_size = PCI_CFG_SPACE_EXP_SIZE; - info->mmio_size = 2 * 1024 * 1024; - info->mmio_bar = 0; - info->gtt_start_offset = 8 * 1024 * 1024; - info->gtt_entry_size = 8; - info->gtt_entry_size_shift = 3; - info->gmadr_bytes_in_cmd = 8; - info->max_surface_size = 36 * 1024 * 1024; - info->msi_cap_offset = pdev->msi_cap; -} - -static void intel_gvt_test_and_emulate_vblank(struct intel_gvt *gvt) -{ - struct intel_vgpu *vgpu; - int id; - - mutex_lock(&gvt->lock); - idr_for_each_entry((&(gvt)->vgpu_idr), (vgpu), (id)) { - if (test_and_clear_bit(INTEL_GVT_REQUEST_EMULATE_VBLANK + id, - (void *)&gvt->service_request)) { - if (vgpu->active) - intel_vgpu_emulate_vblank(vgpu); - } - } - mutex_unlock(&gvt->lock); -} - -static int gvt_service_thread(void *data) -{ - struct intel_gvt *gvt = (struct intel_gvt *)data; - int ret; - - gvt_dbg_core("service thread start\n"); - - while (!kthread_should_stop()) { - ret = wait_event_interruptible(gvt->service_thread_wq, - kthread_should_stop() || gvt->service_request); - - if (kthread_should_stop()) - break; - - if (WARN_ONCE(ret, "service thread is waken up by signal.\n")) - continue; - - intel_gvt_test_and_emulate_vblank(gvt); - - if (test_bit(INTEL_GVT_REQUEST_SCHED, - (void *)&gvt->service_request) || - test_bit(INTEL_GVT_REQUEST_EVENT_SCHED, - (void *)&gvt->service_request)) { - intel_gvt_schedule(gvt); - } - } - - return 0; -} - -static void clean_service_thread(struct intel_gvt *gvt) -{ - kthread_stop(gvt->service_thread); -} - -static int init_service_thread(struct intel_gvt *gvt) -{ - init_waitqueue_head(&gvt->service_thread_wq); - - gvt->service_thread = kthread_run(gvt_service_thread, - gvt, "gvt_service_thread"); - if (IS_ERR(gvt->service_thread)) { - gvt_err("fail to start service thread.\n"); - return PTR_ERR(gvt->service_thread); - } - return 0; -} - -/** - * intel_gvt_clean_device - clean a GVT device - * @i915: i915 private - * - * This function is called at the driver unloading stage, to free the - * resources owned by a GVT device. - * - */ -static void intel_gvt_clean_device(struct drm_i915_private *i915) -{ - struct intel_gvt *gvt = fetch_and_zero(&i915->gvt); - - if (drm_WARN_ON(&i915->drm, !gvt)) - return; - - mdev_unregister_device(i915->drm.dev); - intel_gvt_cleanup_vgpu_type_groups(gvt); - intel_gvt_destroy_idle_vgpu(gvt->idle_vgpu); - intel_gvt_clean_vgpu_types(gvt); - - intel_gvt_debugfs_clean(gvt); - clean_service_thread(gvt); - intel_gvt_clean_cmd_parser(gvt); - intel_gvt_clean_sched_policy(gvt); - intel_gvt_clean_workload_scheduler(gvt); - intel_gvt_clean_gtt(gvt); - intel_gvt_free_firmware(gvt); - intel_gvt_clean_mmio_info(gvt); - idr_destroy(&gvt->vgpu_idr); - - kfree(i915->gvt); -} - -/** - * intel_gvt_init_device - initialize a GVT device - * @i915: drm i915 private data - * - * This function is called at the initialization stage, to initialize - * necessary GVT components. - * - * Returns: - * Zero on success, negative error code if failed. - * - */ -static int intel_gvt_init_device(struct drm_i915_private *i915) -{ - struct intel_gvt *gvt; - struct intel_vgpu *vgpu; - int ret; - - if (drm_WARN_ON(&i915->drm, i915->gvt)) - return -EEXIST; - - gvt = kzalloc(sizeof(struct intel_gvt), GFP_KERNEL); - if (!gvt) - return -ENOMEM; - - gvt_dbg_core("init gvt device\n"); - - idr_init_base(&gvt->vgpu_idr, 1); - spin_lock_init(&gvt->scheduler.mmio_context_lock); - mutex_init(&gvt->lock); - mutex_init(&gvt->sched_lock); - gvt->gt = to_gt(i915); - i915->gvt = gvt; - - init_device_info(gvt); - - ret = intel_gvt_setup_mmio_info(gvt); - if (ret) - goto out_clean_idr; - - intel_gvt_init_engine_mmio_context(gvt); - - ret = intel_gvt_load_firmware(gvt); - if (ret) - goto out_clean_mmio_info; - - ret = intel_gvt_init_irq(gvt); - if (ret) - goto out_free_firmware; - - ret = intel_gvt_init_gtt(gvt); - if (ret) - goto out_free_firmware; - - ret = intel_gvt_init_workload_scheduler(gvt); - if (ret) - goto out_clean_gtt; - - ret = intel_gvt_init_sched_policy(gvt); - if (ret) - goto out_clean_workload_scheduler; - - ret = intel_gvt_init_cmd_parser(gvt); - if (ret) - goto out_clean_sched_policy; - - ret = init_service_thread(gvt); - if (ret) - goto out_clean_cmd_parser; - - ret = intel_gvt_init_vgpu_types(gvt); - if (ret) - goto out_clean_thread; - - vgpu = intel_gvt_create_idle_vgpu(gvt); - if (IS_ERR(vgpu)) { - ret = PTR_ERR(vgpu); - gvt_err("failed to create idle vgpu\n"); - goto out_clean_types; - } - gvt->idle_vgpu = vgpu; - - intel_gvt_debugfs_init(gvt); - - ret = intel_gvt_init_vgpu_type_groups(gvt); - if (ret) - goto out_destroy_idle_vgpu; - - ret = mdev_register_device(i915->drm.dev, &intel_vgpu_mdev_ops); - if (ret) - goto out_cleanup_vgpu_type_groups; - - gvt_dbg_core("gvt device initialization is done\n"); - return 0; - -out_cleanup_vgpu_type_groups: - intel_gvt_cleanup_vgpu_type_groups(gvt); -out_destroy_idle_vgpu: - intel_gvt_destroy_idle_vgpu(gvt->idle_vgpu); - intel_gvt_debugfs_clean(gvt); -out_clean_types: - intel_gvt_clean_vgpu_types(gvt); -out_clean_thread: - clean_service_thread(gvt); -out_clean_cmd_parser: - intel_gvt_clean_cmd_parser(gvt); -out_clean_sched_policy: - intel_gvt_clean_sched_policy(gvt); -out_clean_workload_scheduler: - intel_gvt_clean_workload_scheduler(gvt); -out_clean_gtt: - intel_gvt_clean_gtt(gvt); -out_free_firmware: - intel_gvt_free_firmware(gvt); -out_clean_mmio_info: - intel_gvt_clean_mmio_info(gvt); -out_clean_idr: - idr_destroy(&gvt->vgpu_idr); - kfree(gvt); - i915->gvt = NULL; - return ret; -} - -static void intel_gvt_pm_resume(struct drm_i915_private *i915) -{ - struct intel_gvt *gvt = i915->gvt; - - intel_gvt_restore_fence(gvt); - intel_gvt_restore_mmio(gvt); - intel_gvt_restore_ggtt(gvt); -} - -const struct intel_vgpu_ops intel_gvt_vgpu_ops = { - .init_device = intel_gvt_init_device, - .clean_device = intel_gvt_clean_device, - .pm_resume = intel_gvt_pm_resume, -}; diff --git a/drivers/gpu/drm/i915/gvt/gvt.h b/drivers/gpu/drm/i915/gvt/gvt.h index c32e8eb199f1..03ecffc2ba56 100644 --- a/drivers/gpu/drm/i915/gvt/gvt.h +++ b/drivers/gpu/drm/i915/gvt/gvt.h @@ -765,12 +765,6 @@ int intel_gvt_dma_map_guest_page(struct intel_vgpu *vgpu, unsigned long gfn, void intel_gvt_dma_unmap_guest_page(struct intel_vgpu *vgpu, dma_addr_t dma_addr); -int intel_gvt_init_vgpu_type_groups(struct intel_gvt *gvt); -void intel_gvt_cleanup_vgpu_type_groups(struct intel_gvt *gvt); - #include "trace.h" -extern const struct intel_vgpu_ops intel_gvt_vgpu_ops; -extern const struct mdev_parent_ops intel_vgpu_mdev_ops; - #endif diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c index 4113f850bf92..f033031c676d 100644 --- a/drivers/gpu/drm/i915/gvt/kvmgt.c +++ b/drivers/gpu/drm/i915/gvt/kvmgt.c @@ -1,7 +1,7 @@ /* * KVMGT - the implementation of Intel mediated pass-through framework for KVM * - * Copyright(c) 2014-2016 Intel Corporation. All rights reserved. + * Copyright(c) 2011-2016 Intel Corporation. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -26,6 +26,11 @@ * Kevin Tian * Jike Song * Xiaoguang Chen + * Eddie Dong + * + * Contributors: + * Niu Bing + * Zhi Wang */ #include @@ -182,7 +187,7 @@ static struct attribute_group *gvt_vgpu_type_groups[] = { [0 ... NR_MAX_INTEL_VGPU_TYPES - 1] = NULL, }; -int intel_gvt_init_vgpu_type_groups(struct intel_gvt *gvt) +static int intel_gvt_init_vgpu_type_groups(struct intel_gvt *gvt) { int i, j; struct intel_vgpu_type *type; @@ -211,7 +216,7 @@ int intel_gvt_init_vgpu_type_groups(struct intel_gvt *gvt) return -ENOMEM; } -void intel_gvt_cleanup_vgpu_type_groups(struct intel_gvt *gvt) +static void intel_gvt_cleanup_vgpu_type_groups(struct intel_gvt *gvt) { int i; struct attribute_group *group; @@ -1720,7 +1725,7 @@ static struct mdev_driver intel_vgpu_mdev_driver = { .remove = intel_vgpu_remove, }; -const struct mdev_parent_ops intel_vgpu_mdev_ops = { +static const struct mdev_parent_ops intel_vgpu_mdev_ops = { .owner = THIS_MODULE, .supported_type_groups = gvt_vgpu_type_groups, .device_driver = &intel_vgpu_mdev_driver, @@ -1925,6 +1930,257 @@ void intel_gvt_dma_unmap_guest_page(struct intel_vgpu *vgpu, mutex_unlock(&vgpu->cache_lock); } +static void init_device_info(struct intel_gvt *gvt) +{ + struct intel_gvt_device_info *info = &gvt->device_info; + struct pci_dev *pdev = to_pci_dev(gvt->gt->i915->drm.dev); + + info->max_support_vgpus = 8; + info->cfg_space_size = PCI_CFG_SPACE_EXP_SIZE; + info->mmio_size = 2 * 1024 * 1024; + info->mmio_bar = 0; + info->gtt_start_offset = 8 * 1024 * 1024; + info->gtt_entry_size = 8; + info->gtt_entry_size_shift = 3; + info->gmadr_bytes_in_cmd = 8; + info->max_surface_size = 36 * 1024 * 1024; + info->msi_cap_offset = pdev->msi_cap; +} + +static void intel_gvt_test_and_emulate_vblank(struct intel_gvt *gvt) +{ + struct intel_vgpu *vgpu; + int id; + + mutex_lock(&gvt->lock); + idr_for_each_entry((&(gvt)->vgpu_idr), (vgpu), (id)) { + if (test_and_clear_bit(INTEL_GVT_REQUEST_EMULATE_VBLANK + id, + (void *)&gvt->service_request)) { + if (vgpu->active) + intel_vgpu_emulate_vblank(vgpu); + } + } + mutex_unlock(&gvt->lock); +} + +static int gvt_service_thread(void *data) +{ + struct intel_gvt *gvt = (struct intel_gvt *)data; + int ret; + + gvt_dbg_core("service thread start\n"); + + while (!kthread_should_stop()) { + ret = wait_event_interruptible(gvt->service_thread_wq, + kthread_should_stop() || gvt->service_request); + + if (kthread_should_stop()) + break; + + if (WARN_ONCE(ret, "service thread is waken up by signal.\n")) + continue; + + intel_gvt_test_and_emulate_vblank(gvt); + + if (test_bit(INTEL_GVT_REQUEST_SCHED, + (void *)&gvt->service_request) || + test_bit(INTEL_GVT_REQUEST_EVENT_SCHED, + (void *)&gvt->service_request)) { + intel_gvt_schedule(gvt); + } + } + + return 0; +} + +static void clean_service_thread(struct intel_gvt *gvt) +{ + kthread_stop(gvt->service_thread); +} + +static int init_service_thread(struct intel_gvt *gvt) +{ + init_waitqueue_head(&gvt->service_thread_wq); + + gvt->service_thread = kthread_run(gvt_service_thread, + gvt, "gvt_service_thread"); + if (IS_ERR(gvt->service_thread)) { + gvt_err("fail to start service thread.\n"); + return PTR_ERR(gvt->service_thread); + } + return 0; +} + +/** + * intel_gvt_clean_device - clean a GVT device + * @i915: i915 private + * + * This function is called at the driver unloading stage, to free the + * resources owned by a GVT device. + * + */ +static void intel_gvt_clean_device(struct drm_i915_private *i915) +{ + struct intel_gvt *gvt = fetch_and_zero(&i915->gvt); + + if (drm_WARN_ON(&i915->drm, !gvt)) + return; + + mdev_unregister_device(i915->drm.dev); + intel_gvt_cleanup_vgpu_type_groups(gvt); + intel_gvt_destroy_idle_vgpu(gvt->idle_vgpu); + intel_gvt_clean_vgpu_types(gvt); + + intel_gvt_debugfs_clean(gvt); + clean_service_thread(gvt); + intel_gvt_clean_cmd_parser(gvt); + intel_gvt_clean_sched_policy(gvt); + intel_gvt_clean_workload_scheduler(gvt); + intel_gvt_clean_gtt(gvt); + intel_gvt_free_firmware(gvt); + intel_gvt_clean_mmio_info(gvt); + idr_destroy(&gvt->vgpu_idr); + + kfree(i915->gvt); +} + +/** + * intel_gvt_init_device - initialize a GVT device + * @i915: drm i915 private data + * + * This function is called at the initialization stage, to initialize + * necessary GVT components. + * + * Returns: + * Zero on success, negative error code if failed. + * + */ +static int intel_gvt_init_device(struct drm_i915_private *i915) +{ + struct intel_gvt *gvt; + struct intel_vgpu *vgpu; + int ret; + + if (drm_WARN_ON(&i915->drm, i915->gvt)) + return -EEXIST; + + gvt = kzalloc(sizeof(struct intel_gvt), GFP_KERNEL); + if (!gvt) + return -ENOMEM; + + gvt_dbg_core("init gvt device\n"); + + idr_init_base(&gvt->vgpu_idr, 1); + spin_lock_init(&gvt->scheduler.mmio_context_lock); + mutex_init(&gvt->lock); + mutex_init(&gvt->sched_lock); + gvt->gt = to_gt(i915); + i915->gvt = gvt; + + init_device_info(gvt); + + ret = intel_gvt_setup_mmio_info(gvt); + if (ret) + goto out_clean_idr; + + intel_gvt_init_engine_mmio_context(gvt); + + ret = intel_gvt_load_firmware(gvt); + if (ret) + goto out_clean_mmio_info; + + ret = intel_gvt_init_irq(gvt); + if (ret) + goto out_free_firmware; + + ret = intel_gvt_init_gtt(gvt); + if (ret) + goto out_free_firmware; + + ret = intel_gvt_init_workload_scheduler(gvt); + if (ret) + goto out_clean_gtt; + + ret = intel_gvt_init_sched_policy(gvt); + if (ret) + goto out_clean_workload_scheduler; + + ret = intel_gvt_init_cmd_parser(gvt); + if (ret) + goto out_clean_sched_policy; + + ret = init_service_thread(gvt); + if (ret) + goto out_clean_cmd_parser; + + ret = intel_gvt_init_vgpu_types(gvt); + if (ret) + goto out_clean_thread; + + vgpu = intel_gvt_create_idle_vgpu(gvt); + if (IS_ERR(vgpu)) { + ret = PTR_ERR(vgpu); + gvt_err("failed to create idle vgpu\n"); + goto out_clean_types; + } + gvt->idle_vgpu = vgpu; + + intel_gvt_debugfs_init(gvt); + + ret = intel_gvt_init_vgpu_type_groups(gvt); + if (ret) + goto out_destroy_idle_vgpu; + + ret = mdev_register_device(i915->drm.dev, &intel_vgpu_mdev_ops); + if (ret) + goto out_cleanup_vgpu_type_groups; + + gvt_dbg_core("gvt device initialization is done\n"); + return 0; + +out_cleanup_vgpu_type_groups: + intel_gvt_cleanup_vgpu_type_groups(gvt); +out_destroy_idle_vgpu: + intel_gvt_destroy_idle_vgpu(gvt->idle_vgpu); + intel_gvt_debugfs_clean(gvt); +out_clean_types: + intel_gvt_clean_vgpu_types(gvt); +out_clean_thread: + clean_service_thread(gvt); +out_clean_cmd_parser: + intel_gvt_clean_cmd_parser(gvt); +out_clean_sched_policy: + intel_gvt_clean_sched_policy(gvt); +out_clean_workload_scheduler: + intel_gvt_clean_workload_scheduler(gvt); +out_clean_gtt: + intel_gvt_clean_gtt(gvt); +out_free_firmware: + intel_gvt_free_firmware(gvt); +out_clean_mmio_info: + intel_gvt_clean_mmio_info(gvt); +out_clean_idr: + idr_destroy(&gvt->vgpu_idr); + kfree(gvt); + i915->gvt = NULL; + return ret; +} + +static void intel_gvt_pm_resume(struct drm_i915_private *i915) +{ + struct intel_gvt *gvt = i915->gvt; + + intel_gvt_restore_fence(gvt); + intel_gvt_restore_mmio(gvt); + intel_gvt_restore_ggtt(gvt); +} + +static const struct intel_vgpu_ops intel_gvt_vgpu_ops = { + .init_device = intel_gvt_init_device, + .clean_device = intel_gvt_clean_device, + .pm_resume = intel_gvt_pm_resume, +}; + static int __init kvmgt_init(void) { int ret; From 6c7f98b334a32df5cac8abac8983ac4ce17cab57 Mon Sep 17 00:00:00 2001 From: Jason Gunthorpe Date: Mon, 11 Apr 2022 16:13:59 +0200 Subject: [PATCH 33/41] vfio/mdev: Remove vfio_mdev.c Now that all mdev drivers directly create their own mdev_device driver and directly register with the vfio core's vfio_device_ops this is all dead code. Delete vfio_mdev.c and the mdev_parent_ops members that are connected to it. Signed-off-by: Jason Gunthorpe Signed-off-by: Christoph Hellwig Signed-off-by: Zhi Wang Link: http://patchwork.freedesktop.org/patch/msgid/20220411141403.86980-31-hch@lst.de Reviewed-by: Kirti Wankhede Reviewed-by: Zhi Wang --- .../driver-api/vfio-mediated-device.rst | 3 - drivers/vfio/mdev/Makefile | 2 +- drivers/vfio/mdev/mdev_core.c | 40 +---- drivers/vfio/mdev/mdev_driver.c | 10 -- drivers/vfio/mdev/mdev_private.h | 2 - drivers/vfio/mdev/vfio_mdev.c | 152 ------------------ include/linux/mdev.h | 48 +----- 7 files changed, 6 insertions(+), 251 deletions(-) delete mode 100644 drivers/vfio/mdev/vfio_mdev.c diff --git a/Documentation/driver-api/vfio-mediated-device.rst b/Documentation/driver-api/vfio-mediated-device.rst index 9f26079cacae..5a6e18a651a1 100644 --- a/Documentation/driver-api/vfio-mediated-device.rst +++ b/Documentation/driver-api/vfio-mediated-device.rst @@ -138,9 +138,6 @@ The structures in the mdev_parent_ops structure are as follows: * supported_config: attributes to define supported configurations * device_driver: device driver to bind for mediated device instances -The mdev_parent_ops also still has various functions pointers. Theses exist -for historical reasons only and shall not be used for new drivers. - When a driver wants to add the GUID creation sysfs to an existing device it has probe'd to then it should call:: diff --git a/drivers/vfio/mdev/Makefile b/drivers/vfio/mdev/Makefile index ff9ecd802125..7c236ba1b90e 100644 --- a/drivers/vfio/mdev/Makefile +++ b/drivers/vfio/mdev/Makefile @@ -1,5 +1,5 @@ # SPDX-License-Identifier: GPL-2.0-only -mdev-y := mdev_core.o mdev_sysfs.o mdev_driver.o vfio_mdev.o +mdev-y := mdev_core.o mdev_sysfs.o mdev_driver.o obj-$(CONFIG_VFIO_MDEV) += mdev.o diff --git a/drivers/vfio/mdev/mdev_core.c b/drivers/vfio/mdev/mdev_core.c index b314101237fe..8b1e86b9e8bc 100644 --- a/drivers/vfio/mdev/mdev_core.c +++ b/drivers/vfio/mdev/mdev_core.c @@ -89,17 +89,10 @@ void mdev_release_parent(struct kref *kref) static void mdev_device_remove_common(struct mdev_device *mdev) { struct mdev_parent *parent = mdev->type->parent; - int ret; mdev_remove_sysfs_files(mdev); device_del(&mdev->dev); lockdep_assert_held(&parent->unreg_sem); - if (parent->ops->remove) { - ret = parent->ops->remove(mdev); - if (ret) - dev_err(&mdev->dev, "Remove failed: err=%d\n", ret); - } - /* Balances with device_initialize() */ put_device(&mdev->dev); } @@ -131,7 +124,7 @@ int mdev_register_device(struct device *dev, const struct mdev_parent_ops *ops) /* check for mandatory ops */ if (!ops || !ops->supported_type_groups) return -EINVAL; - if (!ops->device_driver && (!ops->create || !ops->remove)) + if (!ops->device_driver) return -EINVAL; dev = get_device(dev); @@ -297,18 +290,10 @@ int mdev_device_create(struct mdev_type *type, const guid_t *uuid) goto out_put_device; } - if (parent->ops->create) { - ret = parent->ops->create(mdev); - if (ret) - goto out_unlock; - } - ret = device_add(&mdev->dev); if (ret) - goto out_remove; + goto out_unlock; - if (!drv) - drv = &vfio_mdev_driver; ret = device_driver_attach(&drv->driver, &mdev->dev); if (ret) goto out_del; @@ -325,9 +310,6 @@ int mdev_device_create(struct mdev_type *type, const guid_t *uuid) out_del: device_del(&mdev->dev); -out_remove: - if (parent->ops->remove) - parent->ops->remove(mdev); out_unlock: up_read(&parent->unreg_sem); out_put_device: @@ -370,28 +352,14 @@ int mdev_device_remove(struct mdev_device *mdev) static int __init mdev_init(void) { - int rc; - - rc = mdev_bus_register(); - if (rc) - return rc; - rc = mdev_register_driver(&vfio_mdev_driver); - if (rc) - goto err_bus; - return 0; -err_bus: - mdev_bus_unregister(); - return rc; + return bus_register(&mdev_bus_type); } static void __exit mdev_exit(void) { - mdev_unregister_driver(&vfio_mdev_driver); - if (mdev_bus_compat_class) class_compat_unregister(mdev_bus_compat_class); - - mdev_bus_unregister(); + bus_unregister(&mdev_bus_type); } subsys_initcall(mdev_init) diff --git a/drivers/vfio/mdev/mdev_driver.c b/drivers/vfio/mdev/mdev_driver.c index 7927ed4f1711..9c2af59809e2 100644 --- a/drivers/vfio/mdev/mdev_driver.c +++ b/drivers/vfio/mdev/mdev_driver.c @@ -74,13 +74,3 @@ void mdev_unregister_driver(struct mdev_driver *drv) driver_unregister(&drv->driver); } EXPORT_SYMBOL(mdev_unregister_driver); - -int mdev_bus_register(void) -{ - return bus_register(&mdev_bus_type); -} - -void mdev_bus_unregister(void) -{ - bus_unregister(&mdev_bus_type); -} diff --git a/drivers/vfio/mdev/mdev_private.h b/drivers/vfio/mdev/mdev_private.h index afbad7b0a14a..6999c89db7b1 100644 --- a/drivers/vfio/mdev/mdev_private.h +++ b/drivers/vfio/mdev/mdev_private.h @@ -37,8 +37,6 @@ struct mdev_type { #define to_mdev_type(_kobj) \ container_of(_kobj, struct mdev_type, kobj) -extern struct mdev_driver vfio_mdev_driver; - int parent_create_sysfs_files(struct mdev_parent *parent); void parent_remove_sysfs_files(struct mdev_parent *parent); diff --git a/drivers/vfio/mdev/vfio_mdev.c b/drivers/vfio/mdev/vfio_mdev.c deleted file mode 100644 index a90e24b0c851..000000000000 --- a/drivers/vfio/mdev/vfio_mdev.c +++ /dev/null @@ -1,152 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * VFIO based driver for Mediated device - * - * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved. - * Author: Neo Jia - * Kirti Wankhede - */ - -#include -#include -#include -#include -#include -#include -#include - -#include "mdev_private.h" - -static int vfio_mdev_open_device(struct vfio_device *core_vdev) -{ - struct mdev_device *mdev = to_mdev_device(core_vdev->dev); - struct mdev_parent *parent = mdev->type->parent; - - if (unlikely(!parent->ops->open_device)) - return 0; - - return parent->ops->open_device(mdev); -} - -static void vfio_mdev_close_device(struct vfio_device *core_vdev) -{ - struct mdev_device *mdev = to_mdev_device(core_vdev->dev); - struct mdev_parent *parent = mdev->type->parent; - - if (likely(parent->ops->close_device)) - parent->ops->close_device(mdev); -} - -static long vfio_mdev_unlocked_ioctl(struct vfio_device *core_vdev, - unsigned int cmd, unsigned long arg) -{ - struct mdev_device *mdev = to_mdev_device(core_vdev->dev); - struct mdev_parent *parent = mdev->type->parent; - - if (unlikely(!parent->ops->ioctl)) - return 0; - - return parent->ops->ioctl(mdev, cmd, arg); -} - -static ssize_t vfio_mdev_read(struct vfio_device *core_vdev, char __user *buf, - size_t count, loff_t *ppos) -{ - struct mdev_device *mdev = to_mdev_device(core_vdev->dev); - struct mdev_parent *parent = mdev->type->parent; - - if (unlikely(!parent->ops->read)) - return -EINVAL; - - return parent->ops->read(mdev, buf, count, ppos); -} - -static ssize_t vfio_mdev_write(struct vfio_device *core_vdev, - const char __user *buf, size_t count, - loff_t *ppos) -{ - struct mdev_device *mdev = to_mdev_device(core_vdev->dev); - struct mdev_parent *parent = mdev->type->parent; - - if (unlikely(!parent->ops->write)) - return -EINVAL; - - return parent->ops->write(mdev, buf, count, ppos); -} - -static int vfio_mdev_mmap(struct vfio_device *core_vdev, - struct vm_area_struct *vma) -{ - struct mdev_device *mdev = to_mdev_device(core_vdev->dev); - struct mdev_parent *parent = mdev->type->parent; - - if (unlikely(!parent->ops->mmap)) - return -EINVAL; - - return parent->ops->mmap(mdev, vma); -} - -static void vfio_mdev_request(struct vfio_device *core_vdev, unsigned int count) -{ - struct mdev_device *mdev = to_mdev_device(core_vdev->dev); - struct mdev_parent *parent = mdev->type->parent; - - if (parent->ops->request) - parent->ops->request(mdev, count); - else if (count == 0) - dev_notice(mdev_dev(mdev), - "No mdev vendor driver request callback support, blocked until released by user\n"); -} - -static const struct vfio_device_ops vfio_mdev_dev_ops = { - .name = "vfio-mdev", - .open_device = vfio_mdev_open_device, - .close_device = vfio_mdev_close_device, - .ioctl = vfio_mdev_unlocked_ioctl, - .read = vfio_mdev_read, - .write = vfio_mdev_write, - .mmap = vfio_mdev_mmap, - .request = vfio_mdev_request, -}; - -static int vfio_mdev_probe(struct mdev_device *mdev) -{ - struct vfio_device *vdev; - int ret; - - vdev = kzalloc(sizeof(*vdev), GFP_KERNEL); - if (!vdev) - return -ENOMEM; - - vfio_init_group_dev(vdev, &mdev->dev, &vfio_mdev_dev_ops); - ret = vfio_register_emulated_iommu_dev(vdev); - if (ret) - goto out_uninit; - - dev_set_drvdata(&mdev->dev, vdev); - return 0; - -out_uninit: - vfio_uninit_group_dev(vdev); - kfree(vdev); - return ret; -} - -static void vfio_mdev_remove(struct mdev_device *mdev) -{ - struct vfio_device *vdev = dev_get_drvdata(&mdev->dev); - - vfio_unregister_group_dev(vdev); - vfio_uninit_group_dev(vdev); - kfree(vdev); -} - -struct mdev_driver vfio_mdev_driver = { - .driver = { - .name = "vfio_mdev", - .owner = THIS_MODULE, - .mod_name = KBUILD_MODNAME, - }, - .probe = vfio_mdev_probe, - .remove = vfio_mdev_remove, -}; diff --git a/include/linux/mdev.h b/include/linux/mdev.h index 15d03f6532d0..192aed656116 100644 --- a/include/linux/mdev.h +++ b/include/linux/mdev.h @@ -40,40 +40,7 @@ struct device *mtype_get_parent_dev(struct mdev_type *mtype); * @mdev_attr_groups: Attributes of the mediated device. * @supported_type_groups: Attributes to define supported types. It is mandatory * to provide supported types. - * @create: Called to allocate basic resources in parent device's - * driver for a particular mediated device. It is - * mandatory to provide create ops. - * @mdev: mdev_device structure on of mediated device - * that is being created - * Returns integer: success (0) or error (< 0) - * @remove: Called to free resources in parent device's driver for - * a mediated device. It is mandatory to provide 'remove' - * ops. - * @mdev: mdev_device device structure which is being - * destroyed - * Returns integer: success (0) or error (< 0) - * @read: Read emulation callback - * @mdev: mediated device structure - * @buf: read buffer - * @count: number of bytes to read - * @ppos: address. - * Retuns number on bytes read on success or error. - * @write: Write emulation callback - * @mdev: mediated device structure - * @buf: write buffer - * @count: number of bytes to be written - * @ppos: address. - * Retuns number on bytes written on success or error. - * @ioctl: IOCTL callback - * @mdev: mediated device structure - * @cmd: ioctl command - * @arg: arguments to ioctl - * @mmap: mmap callback - * @mdev: mediated device structure - * @vma: vma structure - * @request: request callback to release device - * @mdev: mediated device structure - * @count: request sequence number + * * Parent device that support mediated device should be registered with mdev * module with mdev_parent_ops structure. **/ @@ -83,19 +50,6 @@ struct mdev_parent_ops { const struct attribute_group **dev_attr_groups; const struct attribute_group **mdev_attr_groups; struct attribute_group **supported_type_groups; - - int (*create)(struct mdev_device *mdev); - int (*remove)(struct mdev_device *mdev); - int (*open_device)(struct mdev_device *mdev); - void (*close_device)(struct mdev_device *mdev); - ssize_t (*read)(struct mdev_device *mdev, char __user *buf, - size_t count, loff_t *ppos); - ssize_t (*write)(struct mdev_device *mdev, const char __user *buf, - size_t count, loff_t *ppos); - long (*ioctl)(struct mdev_device *mdev, unsigned int cmd, - unsigned long arg); - int (*mmap)(struct mdev_device *mdev, struct vm_area_struct *vma); - void (*request)(struct mdev_device *mdev, unsigned int count); }; /* interface for exporting mdev supported type attributes */ From e6486939d8ea22569af942a1888aa3824f59cc5e Mon Sep 17 00:00:00 2001 From: Jason Gunthorpe Date: Mon, 11 Apr 2022 16:14:00 +0200 Subject: [PATCH 34/41] vfio/mdev: Remove mdev_parent_ops dev_attr_groups This is only used by one sample to print a fixed string that is pointless. In general, having a device driver attach sysfs attributes to the parent is horrific. This should never happen, and always leads to some kind of liftime bug as it become very difficult for the sysfs attribute to go back to any data owned by the device driver. Remove the general mechanism to create this abuse. Signed-off-by: Jason Gunthorpe Signed-off-by: Christoph Hellwig Signed-off-by: Zhi Wang Link: http://patchwork.freedesktop.org/patch/msgid/20220411141403.86980-32-hch@lst.de Reviewed-by: Kirti Wankhede Reviewed-by: Zhi Wang --- drivers/vfio/mdev/mdev_sysfs.c | 12 ++---------- include/linux/mdev.h | 2 -- samples/vfio-mdev/mtty.c | 30 +----------------------------- 3 files changed, 3 insertions(+), 41 deletions(-) diff --git a/drivers/vfio/mdev/mdev_sysfs.c b/drivers/vfio/mdev/mdev_sysfs.c index f5cf1931c54e..66eef08833a4 100644 --- a/drivers/vfio/mdev/mdev_sysfs.c +++ b/drivers/vfio/mdev/mdev_sysfs.c @@ -197,7 +197,6 @@ void parent_remove_sysfs_files(struct mdev_parent *parent) remove_mdev_supported_type(type); } - sysfs_remove_groups(&parent->dev->kobj, parent->ops->dev_attr_groups); kset_unregister(parent->mdev_types_kset); } @@ -213,17 +212,10 @@ int parent_create_sysfs_files(struct mdev_parent *parent) INIT_LIST_HEAD(&parent->type_list); - ret = sysfs_create_groups(&parent->dev->kobj, - parent->ops->dev_attr_groups); - if (ret) - goto create_err; - ret = add_mdev_supported_type_groups(parent); if (ret) - sysfs_remove_groups(&parent->dev->kobj, - parent->ops->dev_attr_groups); - else - return ret; + goto create_err; + return 0; create_err: kset_unregister(parent->mdev_types_kset); diff --git a/include/linux/mdev.h b/include/linux/mdev.h index 192aed656116..69df1fa2cb6f 100644 --- a/include/linux/mdev.h +++ b/include/linux/mdev.h @@ -36,7 +36,6 @@ struct device *mtype_get_parent_dev(struct mdev_type *mtype); * * @owner: The module owner. * @device_driver: Which device driver to probe() on newly created devices - * @dev_attr_groups: Attributes of the parent device. * @mdev_attr_groups: Attributes of the mediated device. * @supported_type_groups: Attributes to define supported types. It is mandatory * to provide supported types. @@ -47,7 +46,6 @@ struct device *mtype_get_parent_dev(struct mdev_type *mtype); struct mdev_parent_ops { struct module *owner; struct mdev_driver *device_driver; - const struct attribute_group **dev_attr_groups; const struct attribute_group **mdev_attr_groups; struct attribute_group **supported_type_groups; }; diff --git a/samples/vfio-mdev/mtty.c b/samples/vfio-mdev/mtty.c index a0e1a469bd47..4f227dc26785 100644 --- a/samples/vfio-mdev/mtty.c +++ b/samples/vfio-mdev/mtty.c @@ -1207,38 +1207,11 @@ static long mtty_ioctl(struct vfio_device *vdev, unsigned int cmd, return -ENOTTY; } -static ssize_t -sample_mtty_dev_show(struct device *dev, struct device_attribute *attr, - char *buf) -{ - return sprintf(buf, "This is phy device\n"); -} - -static DEVICE_ATTR_RO(sample_mtty_dev); - -static struct attribute *mtty_dev_attrs[] = { - &dev_attr_sample_mtty_dev.attr, - NULL, -}; - -static const struct attribute_group mtty_dev_group = { - .name = "mtty_dev", - .attrs = mtty_dev_attrs, -}; - -static const struct attribute_group *mtty_dev_groups[] = { - &mtty_dev_group, - NULL, -}; - static ssize_t sample_mdev_dev_show(struct device *dev, struct device_attribute *attr, char *buf) { - if (mdev_from_dev(dev)) - return sprintf(buf, "This is MDEV %s\n", dev_name(dev)); - - return sprintf(buf, "\n"); + return sprintf(buf, "This is MDEV %s\n", dev_name(dev)); } static DEVICE_ATTR_RO(sample_mdev_dev); @@ -1333,7 +1306,6 @@ static struct mdev_driver mtty_driver = { static const struct mdev_parent_ops mdev_fops = { .owner = THIS_MODULE, .device_driver = &mtty_driver, - .dev_attr_groups = mtty_dev_groups, .supported_type_groups = mdev_type_groups, }; From 6b42f491e17ce13f5ff7f2d1f49c73a0f4c47b20 Mon Sep 17 00:00:00 2001 From: Jason Gunthorpe Date: Mon, 11 Apr 2022 16:14:01 +0200 Subject: [PATCH 35/41] vfio/mdev: Remove mdev_parent_ops The last useful member in this struct is the supported_type_groups, move it to the mdev_driver and delete mdev_parent_ops. Replace it with mdev_driver as an argument to mdev_register_device() Signed-off-by: Jason Gunthorpe Signed-off-by: Christoph Hellwig Signed-off-by: Zhi Wang Link: http://patchwork.freedesktop.org/patch/msgid/20220411141403.86980-33-hch@lst.de Reviewed-by: Kirti Wankhede Reviewed-by: Zhi Wang --- .../driver-api/vfio-mediated-device.rst | 24 ++++-------------- drivers/gpu/drm/i915/gvt/kvmgt.c | 7 +----- drivers/s390/cio/vfio_ccw_ops.c | 7 +----- drivers/s390/crypto/vfio_ap_ops.c | 9 ++----- drivers/vfio/mdev/mdev_core.c | 13 ++++------ drivers/vfio/mdev/mdev_private.h | 2 +- drivers/vfio/mdev/mdev_sysfs.c | 6 ++--- include/linux/mdev.h | 25 +++---------------- samples/vfio-mdev/mbochs.c | 9 ++----- samples/vfio-mdev/mdpy.c | 9 ++----- samples/vfio-mdev/mtty.c | 9 ++----- 11 files changed, 28 insertions(+), 92 deletions(-) diff --git a/Documentation/driver-api/vfio-mediated-device.rst b/Documentation/driver-api/vfio-mediated-device.rst index 5a6e18a651a1..784bbeb22adc 100644 --- a/Documentation/driver-api/vfio-mediated-device.rst +++ b/Documentation/driver-api/vfio-mediated-device.rst @@ -105,6 +105,7 @@ structure to represent a mediated device's driver:: struct mdev_driver { int (*probe) (struct mdev_device *dev); void (*remove) (struct mdev_device *dev); + struct attribute_group **supported_type_groups; struct device_driver driver; }; @@ -119,30 +120,15 @@ to register and unregister itself with the core driver: extern void mdev_unregister_driver(struct mdev_driver *drv); -The mediated bus driver is responsible for adding mediated devices to the VFIO -group when devices are bound to the driver and removing mediated devices from -the VFIO when devices are unbound from the driver. - - -Physical Device Driver Interface --------------------------------- - -The physical device driver interface provides the mdev_parent_ops[3] structure -to define the APIs to manage work in the mediated core driver that is related -to the physical device. - -The structures in the mdev_parent_ops structure are as follows: - -* dev_attr_groups: attributes of the parent device -* mdev_attr_groups: attributes of the mediated device -* supported_config: attributes to define supported configurations -* device_driver: device driver to bind for mediated device instances +The mediated bus driver's probe function should create a vfio_device on top of +the mdev_device and connect it to an appropriate implementation of +vfio_device_ops. When a driver wants to add the GUID creation sysfs to an existing device it has probe'd to then it should call:: extern int mdev_register_device(struct device *dev, - const struct mdev_parent_ops *ops); + struct mdev_driver *mdev_driver); This will provide the 'mdev_supported_types/XX/create' files which can then be used to trigger the creation of a mdev_device. The created mdev_device will be diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c index f033031c676d..0787ba5c301f 100644 --- a/drivers/gpu/drm/i915/gvt/kvmgt.c +++ b/drivers/gpu/drm/i915/gvt/kvmgt.c @@ -1723,12 +1723,7 @@ static struct mdev_driver intel_vgpu_mdev_driver = { }, .probe = intel_vgpu_probe, .remove = intel_vgpu_remove, -}; - -static const struct mdev_parent_ops intel_vgpu_mdev_ops = { - .owner = THIS_MODULE, .supported_type_groups = gvt_vgpu_type_groups, - .device_driver = &intel_vgpu_mdev_driver, }; int intel_gvt_page_track_add(struct intel_vgpu *info, u64 gfn) @@ -2131,7 +2126,7 @@ static int intel_gvt_init_device(struct drm_i915_private *i915) if (ret) goto out_destroy_idle_vgpu; - ret = mdev_register_device(i915->drm.dev, &intel_vgpu_mdev_ops); + ret = mdev_register_device(i915->drm.dev, &intel_vgpu_mdev_driver); if (ret) goto out_cleanup_vgpu_type_groups; diff --git a/drivers/s390/cio/vfio_ccw_ops.c b/drivers/s390/cio/vfio_ccw_ops.c index d8589afac272..c4d60cdbf247 100644 --- a/drivers/s390/cio/vfio_ccw_ops.c +++ b/drivers/s390/cio/vfio_ccw_ops.c @@ -656,17 +656,12 @@ struct mdev_driver vfio_ccw_mdev_driver = { }, .probe = vfio_ccw_mdev_probe, .remove = vfio_ccw_mdev_remove, -}; - -static const struct mdev_parent_ops vfio_ccw_mdev_ops = { - .owner = THIS_MODULE, - .device_driver = &vfio_ccw_mdev_driver, .supported_type_groups = mdev_type_groups, }; int vfio_ccw_mdev_reg(struct subchannel *sch) { - return mdev_register_device(&sch->dev, &vfio_ccw_mdev_ops); + return mdev_register_device(&sch->dev, &vfio_ccw_mdev_driver); } void vfio_ccw_mdev_unreg(struct subchannel *sch) diff --git a/drivers/s390/crypto/vfio_ap_ops.c b/drivers/s390/crypto/vfio_ap_ops.c index 6e08d04b605d..ee0a3bf8f476 100644 --- a/drivers/s390/crypto/vfio_ap_ops.c +++ b/drivers/s390/crypto/vfio_ap_ops.c @@ -1496,12 +1496,7 @@ static struct mdev_driver vfio_ap_matrix_driver = { }, .probe = vfio_ap_mdev_probe, .remove = vfio_ap_mdev_remove, -}; - -static const struct mdev_parent_ops vfio_ap_matrix_ops = { - .owner = THIS_MODULE, - .device_driver = &vfio_ap_matrix_driver, - .supported_type_groups = vfio_ap_mdev_type_groups, + .supported_type_groups = vfio_ap_mdev_type_groups, }; int vfio_ap_mdev_register(void) @@ -1514,7 +1509,7 @@ int vfio_ap_mdev_register(void) if (ret) return ret; - ret = mdev_register_device(&matrix_dev->device, &vfio_ap_matrix_ops); + ret = mdev_register_device(&matrix_dev->device, &vfio_ap_matrix_driver); if (ret) goto err_driver; return 0; diff --git a/drivers/vfio/mdev/mdev_core.c b/drivers/vfio/mdev/mdev_core.c index 8b1e86b9e8bc..19a20ff420b7 100644 --- a/drivers/vfio/mdev/mdev_core.c +++ b/drivers/vfio/mdev/mdev_core.c @@ -109,12 +109,12 @@ static int mdev_device_remove_cb(struct device *dev, void *data) /* * mdev_register_device : Register a device * @dev: device structure representing parent device. - * @ops: Parent device operation structure to be registered. + * @mdev_driver: Device driver to bind to the newly created mdev * * Add device to list of registered parent devices. * Returns a negative value on error, otherwise 0. */ -int mdev_register_device(struct device *dev, const struct mdev_parent_ops *ops) +int mdev_register_device(struct device *dev, struct mdev_driver *mdev_driver) { int ret; struct mdev_parent *parent; @@ -122,9 +122,7 @@ int mdev_register_device(struct device *dev, const struct mdev_parent_ops *ops) char *envp[] = { env_string, NULL }; /* check for mandatory ops */ - if (!ops || !ops->supported_type_groups) - return -EINVAL; - if (!ops->device_driver) + if (!mdev_driver->supported_type_groups) return -EINVAL; dev = get_device(dev); @@ -151,7 +149,7 @@ int mdev_register_device(struct device *dev, const struct mdev_parent_ops *ops) init_rwsem(&parent->unreg_sem); parent->dev = dev; - parent->ops = ops; + parent->mdev_driver = mdev_driver; if (!mdev_bus_compat_class) { mdev_bus_compat_class = class_compat_register("mdev_bus"); @@ -249,7 +247,7 @@ int mdev_device_create(struct mdev_type *type, const guid_t *uuid) int ret; struct mdev_device *mdev, *tmp; struct mdev_parent *parent = type->parent; - struct mdev_driver *drv = parent->ops->device_driver; + struct mdev_driver *drv = parent->mdev_driver; mutex_lock(&mdev_list_lock); @@ -271,7 +269,6 @@ int mdev_device_create(struct mdev_type *type, const guid_t *uuid) mdev->dev.parent = parent->dev; mdev->dev.bus = &mdev_bus_type; mdev->dev.release = mdev_device_release; - mdev->dev.groups = parent->ops->mdev_attr_groups; mdev->type = type; /* Pairs with the put in mdev_device_release() */ kobject_get(&type->kobj); diff --git a/drivers/vfio/mdev/mdev_private.h b/drivers/vfio/mdev/mdev_private.h index 6999c89db7b1..5a7ffd4e770f 100644 --- a/drivers/vfio/mdev/mdev_private.h +++ b/drivers/vfio/mdev/mdev_private.h @@ -15,7 +15,7 @@ void mdev_bus_unregister(void); struct mdev_parent { struct device *dev; - const struct mdev_parent_ops *ops; + struct mdev_driver *mdev_driver; struct kref ref; struct list_head next; struct kset *mdev_types_kset; diff --git a/drivers/vfio/mdev/mdev_sysfs.c b/drivers/vfio/mdev/mdev_sysfs.c index 66eef08833a4..5a3873d1a275 100644 --- a/drivers/vfio/mdev/mdev_sysfs.c +++ b/drivers/vfio/mdev/mdev_sysfs.c @@ -97,7 +97,7 @@ static struct mdev_type *add_mdev_supported_type(struct mdev_parent *parent, { struct mdev_type *type; struct attribute_group *group = - parent->ops->supported_type_groups[type_group_id]; + parent->mdev_driver->supported_type_groups[type_group_id]; int ret; if (!group->name) { @@ -154,7 +154,7 @@ static struct mdev_type *add_mdev_supported_type(struct mdev_parent *parent, static void remove_mdev_supported_type(struct mdev_type *type) { struct attribute_group *group = - type->parent->ops->supported_type_groups[type->type_group_id]; + type->parent->mdev_driver->supported_type_groups[type->type_group_id]; sysfs_remove_files(&type->kobj, (const struct attribute **)group->attrs); @@ -168,7 +168,7 @@ static int add_mdev_supported_type_groups(struct mdev_parent *parent) { int i; - for (i = 0; parent->ops->supported_type_groups[i]; i++) { + for (i = 0; parent->mdev_driver->supported_type_groups[i]; i++) { struct mdev_type *type; type = add_mdev_supported_type(parent, i); diff --git a/include/linux/mdev.h b/include/linux/mdev.h index 69df1fa2cb6f..1f6f57a3c316 100644 --- a/include/linux/mdev.h +++ b/include/linux/mdev.h @@ -30,26 +30,6 @@ unsigned int mdev_get_type_group_id(struct mdev_device *mdev); unsigned int mtype_get_type_group_id(struct mdev_type *mtype); struct device *mtype_get_parent_dev(struct mdev_type *mtype); -/** - * struct mdev_parent_ops - Structure to be registered for each parent device to - * register the device to mdev module. - * - * @owner: The module owner. - * @device_driver: Which device driver to probe() on newly created devices - * @mdev_attr_groups: Attributes of the mediated device. - * @supported_type_groups: Attributes to define supported types. It is mandatory - * to provide supported types. - * - * Parent device that support mediated device should be registered with mdev - * module with mdev_parent_ops structure. - **/ -struct mdev_parent_ops { - struct module *owner; - struct mdev_driver *device_driver; - const struct attribute_group **mdev_attr_groups; - struct attribute_group **supported_type_groups; -}; - /* interface for exporting mdev supported type attributes */ struct mdev_type_attribute { struct attribute attr; @@ -74,12 +54,15 @@ struct mdev_type_attribute mdev_type_attr_##_name = \ * struct mdev_driver - Mediated device driver * @probe: called when new device created * @remove: called when device removed + * @supported_type_groups: Attributes to define supported types. It is mandatory + * to provide supported types. * @driver: device driver structure * **/ struct mdev_driver { int (*probe)(struct mdev_device *dev); void (*remove)(struct mdev_device *dev); + struct attribute_group **supported_type_groups; struct device_driver driver; }; @@ -98,7 +81,7 @@ static inline const guid_t *mdev_uuid(struct mdev_device *mdev) extern struct bus_type mdev_bus_type; -int mdev_register_device(struct device *dev, const struct mdev_parent_ops *ops); +int mdev_register_device(struct device *dev, struct mdev_driver *mdev_driver); void mdev_unregister_device(struct device *dev); int mdev_register_driver(struct mdev_driver *drv); diff --git a/samples/vfio-mdev/mbochs.c b/samples/vfio-mdev/mbochs.c index e90c8552cc31..344c2901a82b 100644 --- a/samples/vfio-mdev/mbochs.c +++ b/samples/vfio-mdev/mbochs.c @@ -1412,12 +1412,7 @@ static struct mdev_driver mbochs_driver = { }, .probe = mbochs_probe, .remove = mbochs_remove, -}; - -static const struct mdev_parent_ops mdev_fops = { - .owner = THIS_MODULE, - .device_driver = &mbochs_driver, - .supported_type_groups = mdev_type_groups, + .supported_type_groups = mdev_type_groups, }; static const struct file_operations vd_fops = { @@ -1462,7 +1457,7 @@ static int __init mbochs_dev_init(void) if (ret) goto err_class; - ret = mdev_register_device(&mbochs_dev, &mdev_fops); + ret = mdev_register_device(&mbochs_dev, &mbochs_driver); if (ret) goto err_device; diff --git a/samples/vfio-mdev/mdpy.c b/samples/vfio-mdev/mdpy.c index fe5d43e797b6..e8c46eb2e246 100644 --- a/samples/vfio-mdev/mdpy.c +++ b/samples/vfio-mdev/mdpy.c @@ -723,12 +723,7 @@ static struct mdev_driver mdpy_driver = { }, .probe = mdpy_probe, .remove = mdpy_remove, -}; - -static const struct mdev_parent_ops mdev_fops = { - .owner = THIS_MODULE, - .device_driver = &mdpy_driver, - .supported_type_groups = mdev_type_groups, + .supported_type_groups = mdev_type_groups, }; static const struct file_operations vd_fops = { @@ -771,7 +766,7 @@ static int __init mdpy_dev_init(void) if (ret) goto err_class; - ret = mdev_register_device(&mdpy_dev, &mdev_fops); + ret = mdev_register_device(&mdpy_dev, &mdpy_driver); if (ret) goto err_device; diff --git a/samples/vfio-mdev/mtty.c b/samples/vfio-mdev/mtty.c index 4f227dc26785..f42a59ed2e3f 100644 --- a/samples/vfio-mdev/mtty.c +++ b/samples/vfio-mdev/mtty.c @@ -1301,12 +1301,7 @@ static struct mdev_driver mtty_driver = { }, .probe = mtty_probe, .remove = mtty_remove, -}; - -static const struct mdev_parent_ops mdev_fops = { - .owner = THIS_MODULE, - .device_driver = &mtty_driver, - .supported_type_groups = mdev_type_groups, + .supported_type_groups = mdev_type_groups, }; static void mtty_device_release(struct device *dev) @@ -1357,7 +1352,7 @@ static int __init mtty_dev_init(void) if (ret) goto err_class; - ret = mdev_register_device(&mtty_dev.dev, &mdev_fops); + ret = mdev_register_device(&mtty_dev.dev, &mtty_driver); if (ret) goto err_device; return 0; From 2aa72ec97ce9eb092cb69efd26f5eb2469e61734 Mon Sep 17 00:00:00 2001 From: Jason Gunthorpe Date: Mon, 11 Apr 2022 16:14:02 +0200 Subject: [PATCH 36/41] vfio/mdev: Use the driver core to create the 'remove' file The device creator is supposed to use the dev.groups value to add sysfs files before device_add is called, not call sysfs_create_files() after device_add() returns. This creates a race with uevent delivery where the extra attribute will not be visible. This was being done because the groups had been co-opted by the mdev driver, now that prior patches have moved the driver's groups to the struct device_driver the dev.group is properly free for use here. Signed-off-by: Jason Gunthorpe Signed-off-by: Christoph Hellwig Signed-off-by: Zhi Wang Link: http://patchwork.freedesktop.org/patch/msgid/20220411141403.86980-34-hch@lst.de Reviewed-by: Kirti Wankhede Reviewed-by: Zhi Wang --- drivers/vfio/mdev/mdev_core.c | 1 + drivers/vfio/mdev/mdev_private.h | 2 ++ drivers/vfio/mdev/mdev_sysfs.c | 19 ++++++++++--------- 3 files changed, 13 insertions(+), 9 deletions(-) diff --git a/drivers/vfio/mdev/mdev_core.c b/drivers/vfio/mdev/mdev_core.c index 19a20ff420b7..b8b9e7911e55 100644 --- a/drivers/vfio/mdev/mdev_core.c +++ b/drivers/vfio/mdev/mdev_core.c @@ -269,6 +269,7 @@ int mdev_device_create(struct mdev_type *type, const guid_t *uuid) mdev->dev.parent = parent->dev; mdev->dev.bus = &mdev_bus_type; mdev->dev.release = mdev_device_release; + mdev->dev.groups = mdev_device_groups; mdev->type = type; /* Pairs with the put in mdev_device_release() */ kobject_get(&type->kobj); diff --git a/drivers/vfio/mdev/mdev_private.h b/drivers/vfio/mdev/mdev_private.h index 5a7ffd4e770f..7c9fc79f3d83 100644 --- a/drivers/vfio/mdev/mdev_private.h +++ b/drivers/vfio/mdev/mdev_private.h @@ -32,6 +32,8 @@ struct mdev_type { unsigned int type_group_id; }; +extern const struct attribute_group *mdev_device_groups[]; + #define to_mdev_type_attr(_attr) \ container_of(_attr, struct mdev_type_attribute, attr) #define to_mdev_type(_kobj) \ diff --git a/drivers/vfio/mdev/mdev_sysfs.c b/drivers/vfio/mdev/mdev_sysfs.c index 5a3873d1a275..0ccfeb3dda24 100644 --- a/drivers/vfio/mdev/mdev_sysfs.c +++ b/drivers/vfio/mdev/mdev_sysfs.c @@ -244,11 +244,20 @@ static ssize_t remove_store(struct device *dev, struct device_attribute *attr, static DEVICE_ATTR_WO(remove); -static const struct attribute *mdev_device_attrs[] = { +static struct attribute *mdev_device_attrs[] = { &dev_attr_remove.attr, NULL, }; +static const struct attribute_group mdev_device_group = { + .attrs = mdev_device_attrs, +}; + +const struct attribute_group *mdev_device_groups[] = { + &mdev_device_group, + NULL +}; + int mdev_create_sysfs_files(struct mdev_device *mdev) { struct mdev_type *type = mdev->type; @@ -262,15 +271,8 @@ int mdev_create_sysfs_files(struct mdev_device *mdev) ret = sysfs_create_link(kobj, &type->kobj, "mdev_type"); if (ret) goto type_link_failed; - - ret = sysfs_create_files(kobj, mdev_device_attrs); - if (ret) - goto create_files_failed; - return ret; -create_files_failed: - sysfs_remove_link(kobj, "mdev_type"); type_link_failed: sysfs_remove_link(mdev->type->devices_kobj, dev_name(&mdev->dev)); return ret; @@ -280,7 +282,6 @@ void mdev_remove_sysfs_files(struct mdev_device *mdev) { struct kobject *kobj = &mdev->dev.kobj; - sysfs_remove_files(kobj, mdev_device_attrs); sysfs_remove_link(kobj, "mdev_type"); sysfs_remove_link(mdev->type->devices_kobj, dev_name(&mdev->dev)); } From 2917f53113be3b7a0f374e02cebe6d6b749366b5 Mon Sep 17 00:00:00 2001 From: Jason Gunthorpe Date: Mon, 11 Apr 2022 16:14:03 +0200 Subject: [PATCH 37/41] vfio/mdev: Remove mdev drvdata This is no longer used, remove it. All usages were moved over to either use container_of() from a vfio_device or to use dev_drvdata() directly on the mdev. Signed-off-by: Jason Gunthorpe Signed-off-by: Christoph Hellwig Signed-off-by: Zhi Wang Link: http://patchwork.freedesktop.org/patch/msgid/20220411141403.86980-35-hch@lst.de Reviewed-by: Kirti Wankhede Reviewed-by: Zhi Wang --- include/linux/mdev.h | 9 --------- 1 file changed, 9 deletions(-) diff --git a/include/linux/mdev.h b/include/linux/mdev.h index 1f6f57a3c316..bb539794f54a 100644 --- a/include/linux/mdev.h +++ b/include/linux/mdev.h @@ -15,7 +15,6 @@ struct mdev_type; struct mdev_device { struct device dev; guid_t uuid; - void *driver_data; struct list_head next; struct mdev_type *type; bool active; @@ -66,14 +65,6 @@ struct mdev_driver { struct device_driver driver; }; -static inline void *mdev_get_drvdata(struct mdev_device *mdev) -{ - return mdev->driver_data; -} -static inline void mdev_set_drvdata(struct mdev_device *mdev, void *data) -{ - mdev->driver_data = data; -} static inline const guid_t *mdev_uuid(struct mdev_device *mdev) { return &mdev->uuid; From 5e9ae5c47052e28a31fb4f55a6e735c28d4c3948 Mon Sep 17 00:00:00 2001 From: Zhi Wang Date: Mon, 25 Apr 2022 18:03:31 -0400 Subject: [PATCH 38/41] drm/i915/gvt: Add missing symbol export. When CONFIG_DRM_I915_DEBUG_RUNTIME and CONFIG_DRM_I915_DEBUG_PM are enabled, two more extra symols in i915 are required to be exported. Cc: Jani Nikula Signed-off-by: Zhi Wang Link: http://patchwork.freedesktop.org/patch/msgid/20220425220331.24865-1-zhi.a.wang@intel.com Reviewed-by: Jani Nikula --- drivers/gpu/drm/i915/intel_gvt.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/gpu/drm/i915/intel_gvt.c b/drivers/gpu/drm/i915/intel_gvt.c index 7c03d975069e..24bc693439e8 100644 --- a/drivers/gpu/drm/i915/intel_gvt.c +++ b/drivers/gpu/drm/i915/intel_gvt.c @@ -309,6 +309,7 @@ EXPORT_SYMBOL_NS_GPL(__intel_context_do_pin, I915_GVT); EXPORT_SYMBOL_NS_GPL(__intel_context_do_unpin, I915_GVT); EXPORT_SYMBOL_NS_GPL(intel_ring_begin, I915_GVT); EXPORT_SYMBOL_NS_GPL(intel_runtime_pm_get, I915_GVT); +EXPORT_SYMBOL_NS_GPL(intel_runtime_pm_put, I915_GVT); EXPORT_SYMBOL_NS_GPL(intel_runtime_pm_put_unchecked, I915_GVT); EXPORT_SYMBOL_NS_GPL(intel_uncore_forcewake_for_reg, I915_GVT); EXPORT_SYMBOL_NS_GPL(intel_uncore_forcewake_get, I915_GVT); @@ -316,3 +317,4 @@ EXPORT_SYMBOL_NS_GPL(intel_uncore_forcewake_put, I915_GVT); EXPORT_SYMBOL_NS_GPL(shmem_pin_map, I915_GVT); EXPORT_SYMBOL_NS_GPL(shmem_unpin_map, I915_GVT); EXPORT_SYMBOL_NS_GPL(__px_dma, I915_GVT); +EXPORT_SYMBOL_NS_GPL(i915_fence_ops, I915_GVT); From fa630c304b934bee63d437010fb3cbca55c8ee83 Mon Sep 17 00:00:00 2001 From: Zhi Wang Date: Wed, 27 Apr 2022 17:28:48 -0400 Subject: [PATCH 39/41] drm/i915/gvt: Make intel_gvt_match_device() static After the refactor of GVT-g, the reference of intel_gvt_match_device() only happens in handlers.c. Make it static to let the compiler be happy. Fixes: e0f74ed4634d ("i915/gvt: Separate the MMIO tracking table from GVT-g") Cc: Jason Gunthorpe Cc: Jani Nikula Cc: Robert Beckett Cc: Lucas De Marchi Signed-off-by: Zhi Wang Link: http://patchwork.freedesktop.org/patch/msgid/20220427212849.18109-1-zhi.a.wang@intel.com Reviewed-by: Cc: Jani Nikula --- drivers/gpu/drm/i915/gvt/handlers.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c index b7bab95da3c5..3ce1a5a9e2da 100644 --- a/drivers/gpu/drm/i915/gvt/handlers.c +++ b/drivers/gpu/drm/i915/gvt/handlers.c @@ -71,7 +71,7 @@ unsigned long intel_gvt_get_device_type(struct intel_gvt *gvt) return 0; } -bool intel_gvt_match_device(struct intel_gvt *gvt, +static bool intel_gvt_match_device(struct intel_gvt *gvt, unsigned long device) { return intel_gvt_get_device_type(gvt) & device; From 5b95b9d58fb0d7418c3d2d020099db789f66e7a1 Mon Sep 17 00:00:00 2001 From: Zhi Wang Date: Wed, 27 Apr 2022 17:28:49 -0400 Subject: [PATCH 40/41] drm/i915/gvt: Fix the compiling error when CONFIG_DRM_I915_DEBUG_RUNTIME_PM=n A compiling error was reported when CONFIG_DRM_I915_DEBUG_RUNTIME_PM=n. Fix the problem by using the pre-defined macro. Cc: Jason Gunthorpe Cc: Jani Nikula Signed-off-by: Zhi Wang Link: http://patchwork.freedesktop.org/patch/msgid/20220427212849.18109-2-zhi.a.wang@intel.com Reviewed-by: Jani Nikula --- drivers/gpu/drm/i915/intel_gvt.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/gpu/drm/i915/intel_gvt.c b/drivers/gpu/drm/i915/intel_gvt.c index 24bc693439e8..e98b6d69a91a 100644 --- a/drivers/gpu/drm/i915/intel_gvt.c +++ b/drivers/gpu/drm/i915/intel_gvt.c @@ -309,7 +309,9 @@ EXPORT_SYMBOL_NS_GPL(__intel_context_do_pin, I915_GVT); EXPORT_SYMBOL_NS_GPL(__intel_context_do_unpin, I915_GVT); EXPORT_SYMBOL_NS_GPL(intel_ring_begin, I915_GVT); EXPORT_SYMBOL_NS_GPL(intel_runtime_pm_get, I915_GVT); +#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM) EXPORT_SYMBOL_NS_GPL(intel_runtime_pm_put, I915_GVT); +#endif EXPORT_SYMBOL_NS_GPL(intel_runtime_pm_put_unchecked, I915_GVT); EXPORT_SYMBOL_NS_GPL(intel_uncore_forcewake_for_reg, I915_GVT); EXPORT_SYMBOL_NS_GPL(intel_uncore_forcewake_get, I915_GVT); From 419f8299ddad6070a6c95aaedf78e50265871f36 Mon Sep 17 00:00:00 2001 From: Wan Jiabing Date: Wed, 27 Apr 2022 19:54:56 +0800 Subject: [PATCH 41/41] i915/gvt: Fix NULL pointer dereference in init_mmio_block_handlers Fix following coccicheck error: ./drivers/gpu/drm/i915/gvt/handlers.c:2925:35-41: ERROR: block is NULL but dereferenced. Use gvt->mmio.mmio_block instead of block to avoid NULL pointer dereference when find_mmio_block returns NULL. Fixes: e0f74ed4634d ("i915/gvt: Separate the MMIO tracking table from GVT-g") Signed-off-by: Wan Jiabing Signed-off-by: Zhi Wang Link: http://patchwork.freedesktop.org/patch/msgid/20220427115457.836729-1-wanjiabing@vivo.com Reviewed-by: Zhi Wang --- drivers/gpu/drm/i915/gvt/handlers.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c index 3ce1a5a9e2da..ad31640c9bf2 100644 --- a/drivers/gpu/drm/i915/gvt/handlers.c +++ b/drivers/gpu/drm/i915/gvt/handlers.c @@ -2916,7 +2916,7 @@ static int init_mmio_block_handlers(struct intel_gvt *gvt) block = find_mmio_block(gvt, VGT_PVINFO_PAGE); if (!block) { WARN(1, "fail to assign handlers to mmio block %x\n", - i915_mmio_reg_offset(block->offset)); + i915_mmio_reg_offset(gvt->mmio.mmio_block->offset)); return -ENODEV; }