Compare commits

..

49 Commits

Author SHA1 Message Date
Linus Torvalds 2061f18ad7 Capabilities patch for v6.19
There is only a single commit,
 
    Clarify the rootid_owns_currentns
 
 which introduces no functional change.  Ryan Foster had sent a patch
 to add testing of the security/commoncap.c:rootid_owns_currentns()
 function.  The patch pointed out that this function was not as clear
 as it should be.
 
 This commit has two purposes:
 
 1. Clarify the intent of the function in the name
 2. Split the function so that the base functionality is easier
    to test from a kunit test.
 
 This commit has been in linux-next since November 18 with no reported
 issues.  Ryan has posted an updated test patch based on this commit.
 -----BEGIN PGP SIGNATURE-----
 
 iQEzBAABCgAdFiEEqb0/8XByttt4D8+UNXDaFycKziQFAmkyJQsACgkQNXDaFycK
 ziTuVAgAuNKlx3SH2G9JAk75pyg3LB5DOHZo9SUXeyPJ0E5Mr2zsYEBDfrL0Ai7N
 ERIMdGHu07xeVeO/zRCpHqV0ghiKX8PNKk41Ck0+SIBDw4CQ/OVEql2WJB229YRI
 0MljanjV9Zi3WPREpXQd7Hj0cYKIff+ZgzQ/CBKN4co5HH9VXkggnm13zXoejQiR
 GZOsV/uVkLeXy9wXBsnySZ4p5PkCiqsDn8dp7RgNSHLDoh4s+Aj0zvxlCyeNr2IY
 tKS8iXsxMWgZyVsP6VOZkSRvXRTzgL8My+zCnCV10j8aHvw/LKrKW+iGePHDu6Pa
 CTw2S4I+AhIy0KtYKZSqqvllnX/low==
 =MaY2
 -----END PGP SIGNATURE-----

Merge tag 'caps-pr-20251204' of git://git.kernel.org/pub/scm/linux/kernel/git/sergeh/linux

Pull capabilities update from Serge Hallyn:
 "Ryan Foster had sent a patch to add testing of the
  rootid_owns_currentns() function. That patch pointed out
  that this function was not as clear as it should be. Fix it:

   - Clarify the intent of the function in the name

   - Split the function so that the base functionality is easier to test
     from a kunit test"

* tag 'caps-pr-20251204' of git://git.kernel.org/pub/scm/linux/kernel/git/sergeh/linux:
  Clarify the rootid_owns_currentns
2025-12-04 20:10:28 -08:00
Linus Torvalds deb879faa9 drm next part 2 for 6.19-rc1
vfio:
 - add a vfio_pci variant driver for Intel
 
 xe/i915 display:
 - add plane color management support
 
 xe:
 - Add scope-based cleanup helper for runtime PM
 - vfio xe driver prerequisites and exports
 - fix vfio link error
 - Fix a memory leak
 - Fix a 64-bit division
 - vf migration fix
 - LRC pause fix
 -----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCgAdFiEEEKbZHaGwW9KfbeusDHTzWXnEhr4FAmkyMUgACgkQDHTzWXnE
 hr69pg/9EWjh7qVGk9ZIpYc9AW42UzWwOVBX/HWkuQvmfxUUBqtA3IuP0dGGmPUn
 QbtbetbRvlCaXwEoZpPh1nzrXA2AGFxgHErYMO5BfwquyBcfpwTWZ9T15ptceL/3
 aw2l63aH1R2/yxCRfHFIdwAmq1bThqdh5IkjjbE3im0V0lHT2Uo/jhmf/EWCNWol
 LlPgYxHpfBIzhtFYUcniaXxs9vOSk49AY+ObpPpuvks8OWoaaTcKYWlUCHr/X1ip
 OnWB4NGraTzx4l44vqdRvRL5/KPY7N2IcAxU7rXFTacWp6UoESph5DCYLsPREONb
 OsK1pVbAsKATobeoAC9J+utILhfDmKM8Z7eSAlNE+X+nk/BKu4h9Pp1TnKfo7bCz
 0tER/OrsqnYMfxj1PawT3xpf/KUWkL0aqnRJpmA2cvJqTz8Qnb4h6kRQp1iAKp80
 XaBL1v0uzVE/J4ffuA5bzkT71w3hjN5ytLyEe7h1Y43E/jxyQgyTIHM8cX/UrreJ
 RboaakyoTv1u1xrd9Mzx4WCzwKryH+JFY2nekAC3YnSCcGYnSScSNM/ARTrYC2pf
 wNbWBvkq7ZFy9eybaZQ/zaSYyVO7yQDjdCAqO+SA+xfRuwF41uiADJptyC+FgMPw
 nIBaeid314tJQ9uGNPJH0f2BzLzSvH569trUp/7hbOYWC69XeQI=
 =jyth
 -----END PGP SIGNATURE-----

Merge tag 'drm-next-2025-12-05' of https://gitlab.freedesktop.org/drm/kernel

Pull more drm updates from Dave Airlie:
 "There was some additional intel code for color operations we wanted to
  land. However I discovered I missed a pull for the xe vfio driver
  which I had sorted into 6.20 in my brain, until Thomas mentioned it.

  This contains the xe vfio code, a bunch of xe fixes that were waiting
  and the i915 color management support. I'd like to include it as part
  of keeping the two main vendors on the same page and giving a good
  cross-driver experience for userspace when it starts using it.

  vfio:
   - add a vfio_pci variant driver for Intel

  xe/i915 display:
   - add plane color management support

  xe:
   - Add scope-based cleanup helper for runtime PM
   - vfio xe driver prerequisites and exports
   - fix vfio link error
   - Fix a memory leak
   - Fix a 64-bit division
   - vf migration fix
   - LRC pause fix"

* tag 'drm-next-2025-12-05' of https://gitlab.freedesktop.org/drm/kernel: (25 commits)
  drm/i915/color: Enable Plane Color Pipelines
  drm/i915/color: Add 3D LUT to color pipeline
  drm/i915/color: Add registers for 3D LUT
  drm/i915/color: Program Plane Post CSC Registers
  drm/i915/color: Program Pre-CSC registers
  drm/i915/color: Add framework to program PRE/POST CSC LUT
  drm/i915: Add register definitions for Plane Post CSC
  drm/i915: Add register definitions for Plane Degamma
  drm/i915/color: Add plane CTM callback for D12 and beyond
  drm/i915/color: Preserve sign bit when int_bits is Zero
  drm/i915/color: Add framework to program CSC
  drm/i915/color: Create a transfer function color pipeline
  drm/i915/color: Add helper to create intel colorop
  drm/i915: Add intel_color_op
  drm/i915/display: Add identifiers for driver specific blocks
  drm/xe/pf: fix VFIO link error
  drm/xe: Protect against unset LRC when pausing submissions
  drm/xe/vf: Start re-emission from first unsignaled job during VF migration
  drm/xe/pf: Use div_u64 when calculating GGTT profile
  drm/xe: Fix memory leak when handling pagefault vma
  ...
2025-12-04 19:42:53 -08:00
Linus Torvalds 028bd4a146 Hi,
This pull request for TPM driver contains changes to unify TPM return
 code translation between trusted_tpm2 and TPM driver itself. Other than
 that the changes are either bug fixes or minor imrovements.
 
 Change log that should explain the previous iterations:
 
 1. "Documentation: tpm-security.rst: change title to section"
    https://lore.kernel.org/all/86514a6ab364e01f163470a91cacef120e1b8b47.camel@HansenPartnership.com/
 2. "drivers/char/tpm: use min() instead of min_t()"
    https://lore.kernel.org/all/20251201161228.3c09d88a@pumpkin/
 3. Removed spurious kfree(): https://lore.kernel.org/linux-integrity/aS+K5nO2MP7N+kxQ@ly-workstation/
 
 BR, Jarkko
 -----BEGIN PGP SIGNATURE-----
 
 iHUEABYKAB0WIQRE6pSOnaBC00OEHEIaerohdGur0gUCaTComgAKCRAaerohdGur
 0t38AQDThfcJhDmgfR3zYo0C8rtNQwM06fnooqsiDjTRHYXu6QEArRKJfR9B/vpN
 vIAloxIgIUxQbewBJ1DfxJ7OVO2kGwA=
 =hMwZ
 -----END PGP SIGNATURE-----

Merge tag 'tpmdd-next-6.19-rc1-v4' of git://git.kernel.org/pub/scm/linux/kernel/git/jarkko/linux-tpmdd

Pull tpm updates from Jarkko Sakkinen:
 "This contains changes to unify TPM return code translation between
  trusted_tpm2 and TPM driver itself. Other than that the changes are
  either bug fixes or minor imrovements"

* tag 'tpmdd-next-6.19-rc1-v4' of git://git.kernel.org/pub/scm/linux/kernel/git/jarkko/linux-tpmdd:
  KEYS: trusted: Use tpm_ret_to_err() in trusted_tpm2
  tpm: Use -EPERM as fallback error code in tpm_ret_to_err
  tpm: Cap the number of PCR banks
  tpm: Remove tpm_find_get_ops
  tpm: add WQ_PERCPU to alloc_workqueue users
  tpm_crb: add missing loc parameter to kerneldoc
  tpm_crb: Fix a spelling mistake
  selftests: tpm2: Fix ill defined assertions
2025-12-04 19:30:09 -08:00
Linus Torvalds 16460bf96c ata changes for 6.19-rc1
- Add DT binding for the Eswin EIC7700 SoC SATA Controller (Yulin Lu)
 
  - Allow 'iommus' property in the Synopsys DWC AHCI SATA controller
    DT binding (Rob Herring)
 
  - Replace deprecated strcpy with strscpy in the pata_it821x driver
    (Thorsten Blum)
 
  - Add Iomega Clik! PCMCIA ATA/ATAPI Adapter PCMCIA ID to the
    pata_pcmcia driver (René Rebe)
 
  - Add ATA_QUIRK_NOLPM quirk for two Silicon Motion SSDs with broken
    LPM support (me)
 
  - Add flag WQ_PERCPU to the workqueue in the libata-sff helper
    library to explicitly request the use of the per-CPU behavior
    (Marco Crivellari)
 -----BEGIN PGP SIGNATURE-----
 
 iHUEABYKAB0WIQRN+ES/c4tHlMch3DzJZDGjmcZNcgUCaS67dQAKCRDJZDGjmcZN
 ciMEAQDxxWeELIIRO63Cu2x2dp/A/BHvDDk2lNMa2bQQgcdfAAD/cwBEDybSHZQR
 OZU+exFNvEb2ChHZYV6hw6+b73x0VQs=
 =/F07
 -----END PGP SIGNATURE-----

Merge tag 'ata-6.19-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/libata/linux

Pull ata updates from Niklas Cassel:

 - Add DT binding for the Eswin EIC7700 SoC SATA Controller (Yulin Lu)

 - Allow 'iommus' property in the Synopsys DWC AHCI SATA controller DT
   binding (Rob Herring)

 - Replace deprecated strcpy with strscpy in the pata_it821x driver
   (Thorsten Blum)

 - Add Iomega Clik! PCMCIA ATA/ATAPI Adapter PCMCIA ID to the
   pata_pcmcia driver (René Rebe)

 - Add ATA_QUIRK_NOLPM quirk for two Silicon Motion SSDs with broken LPM
   support (me)

 - Add flag WQ_PERCPU to the workqueue in the libata-sff helper library
   to explicitly request the use of the per-CPU behavior (Marco
   Crivellari)

* tag 'ata-6.19-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/libata/linux:
  ata: libata-core: Disable LPM on Silicon Motion MD619{H,G}XCLDE3TC
  ata: pata_pcmcia: Add Iomega Clik! PCMCIA ATA/ATAPI Adapter
  ata: libata-sff: add WQ_PERCPU to alloc_workqueue users
  dt-bindings: ata: snps,dwc-ahci: Allow 'iommus' property
  ata: pata_it821x: Replace deprecated strcpy with strscpy in it821x_display_disk
  dt-bindings: ata: eswin: Document for EIC7700 SoC ahci
2025-12-04 19:27:11 -08:00
Dave Airlie c7685d1110 Merge tag 'topic/drm-intel-plane-color-pipeline-2025-12-04' of https://gitlab.freedesktop.org/drm/i915/kernel into drm-next
drm/i915 topic pull request for v6.19:

Features and functionality:
- Add plane color management support (Uma, Chaitanya)

Signed-off-by: Dave Airlie <airlied@redhat.com>
From: Jani Nikula <jani.nikula@intel.com>
Link: https://patch.msgid.link/e7129c6afd6208719d2f5124da86e810505e7a7b@intel.com
2025-12-05 10:27:57 +10:00
Dave Airlie 86fafc584c Driver Changes:
- Fix a memory leak (Mika)
 - Fix a 64-bit division (Michal Wajdeczko)
 - vf migration fix (Matt Brost)
 - LRC pause Fix (Tomasz lis)
 -----BEGIN PGP SIGNATURE-----
 
 iHUEABYKAB0WIQRskUM7w1oG5rx2IZO4FpNVCsYGvwUCaTIFzAAKCRC4FpNVCsYG
 vzbpAP9l6oGkwKlAHNVMJRbzBXa8b42KmtbFk4rQkg6TvVrnsgD+M/rVo84Ldg/D
 rb07JTqzHgxlziIpzwri6UPRr8+9KAc=
 =Ax3c
 -----END PGP SIGNATURE-----

Merge tag 'drm-xe-next-fixes-2025-12-04' of https://gitlab.freedesktop.org/drm/xe/kernel into drm-next

Driver Changes:
- Fix a memory leak (Mika)
- Fix a 64-bit division (Michal Wajdeczko)
- vf migration fix (Matt Brost)
- LRC pause Fix (Tomasz lis)

Signed-off-by: Dave Airlie <airlied@redhat.com>

From: Thomas Hellstrom <thomas.hellstrom@linux.intel.com>
Link: https://patch.msgid.link/aTIGiHJnnMtqbDOO@fedora
2025-12-05 10:21:19 +10:00
Dave Airlie e73c226204 Driver Changes:
- fix VFIO link error for built-in xe module (Arnd Bergmann)
 -----BEGIN PGP SIGNATURE-----
 
 iHUEABYKAB0WIQRskUM7w1oG5rx2IZO4FpNVCsYGvwUCaTH93wAKCRC4FpNVCsYG
 vyq+AQD5FC+6FE7ZnbPFNRpRkNpS+0V8XPVLKZ4A8zHle6Gu3wD/X9XqTjYR+rrw
 Z7n4kUSLCDWgZhaaPyNtGbnYvMDXRw4=
 =RfSs
 -----END PGP SIGNATURE-----

Merge tag 'topic/xe-vfio-2025-12-04' of https://gitlab.freedesktop.org/drm/xe/kernel into drm-next

Driver Changes:
- fix VFIO link error for built-in xe module (Arnd Bergmann)

Signed-off-by: Dave Airlie <airlied@redhat.com>

From: Thomas Hellstrom <thomas.hellstrom@linux.intel.com>
Link: https://patch.msgid.link/aTIA9in2Bo_fA9TN@fedora
2025-12-05 10:16:47 +10:00
Dave Airlie 55a271a0f7 Cross-subsystem Changes:
- Add device specific vfio_pci driver variant for intel graphics (Michal Winiarski)
 
 Driver Changes:
 - Add scope-based cleanup helper for runtime PM (Matt Roper)
 - Additional xe driver prerequisites and exports (Michal Winiarski)
 -----BEGIN PGP SIGNATURE-----
 
 iHUEABYKAB0WIQRskUM7w1oG5rx2IZO4FpNVCsYGvwUCaS1ZrwAKCRC4FpNVCsYG
 v9iHAQCPfipm3pC3SykJCKe0Ve+gWfglHg4uuwaRLcvlO+fZsQD/T/enYeR+6omX
 S+owV2xgoVY0cEqYqPsEeOwp4Nj7BQs=
 =0Ouw
 -----END PGP SIGNATURE-----

Merge tag 'topic/xe-vfio-2025-12-01' of https://gitlab.freedesktop.org/drm/xe/kernel into drm-next

Cross-subsystem Changes:
- Add device specific vfio_pci driver variant for intel graphics (Michal Winiarski)

Driver Changes:
- Add scope-based cleanup helper for runtime PM (Matt Roper)
- Additional xe driver prerequisites and exports (Michal Winiarski)

Signed-off-by: Dave Airlie <airlied@redhat.com>

From: Thomas Hellstrom <thomas.hellstrom@linux.intel.com>
Link: https://patch.msgid.link/aS1bNpqeem6PIHrA@fedora
2025-12-05 10:16:25 +10:00
Thomas Hellström 3f1c07fc21 Merge drm/drm-next into drm-xe-next-fixes
Backmerging to be able do to a clean PR.

Signed-off-by: Thomas Hellström <thomas.hellstrom@linux.intel.com>
2025-12-04 22:54:56 +01:00
Uma Shankar 860daa4b0d drm/i915/color: Enable Plane Color Pipelines
Expose color pipeline and add ability to program it.

v2: Set bit to enable multisegmented lut
v3: s/drm_color_lut_32/drm_color_lut32 (Simon)
v4: - Fix dsb programming
    - Remove multi-segment LUT, they will be added in later patches
    - Add pipeline only to TGL+
    - Code Refactor

Reviewed-by: Suraj Kandpal <suraj.kandpal@intel.com>
Signed-off-by: Chaitanya Kumar Borah <chaitanya.kumar.borah@intel.com>
Signed-off-by: Uma Shankar <uma.shankar@intel.com>
Signed-off-by: Jani Nikula <jani.nikula@intel.com>
Link: https://patch.msgid.link/20251203085211.3663374-16-uma.shankar@intel.com
2025-12-04 19:44:36 +02:00
Chaitanya Kumar Borah 65db7a1f9c drm/i915/color: Add 3D LUT to color pipeline
Add helpers to program the 3D LUT registers and arm them.

LUT_3D_READY in LUT_3D_CLT is cleared off by the HW once
the LUT buffer is loaded into it's internal working RAM.
So by the time we try to load/commit new values, we expect
it to be cleared off. If not, log an error and return
without writing new values. Do it only when writing with MMIO.
There is no way to read register within DSB execution.

v2:
- Add information regarding LUT_3D_READY to commit message (Jani)
- Log error instead of a drm_warn and return without committing changes
  if 3DLUT HW is not ready to accept new values.
- Refactor intel_color_crtc_has_3dlut()
  Also remove Gen10 check (Suraj)
v3:
- Addressed review comments (Suraj)

Reviewed-by: Suraj Kandpal <suraj.kandpal@intel.com>
Signed-off-by: Chaitanya Kumar Borah <chaitanya.kumar.borah@intel.com>
Signed-off-by: Uma Shankar <uma.shankar@intel.com>
Link: https://patch.msgid.link/20251203085211.3663374-15-uma.shankar@intel.com
Signed-off-by: Jani Nikula <jani.nikula@intel.com>
2025-12-04 19:43:47 +02:00
Chaitanya Kumar Borah 55b0f3cd09 drm/i915/color: Add registers for 3D LUT
Add registers needed to program 3D LUT

v2:
- Follow convention documented in i915_reg.h (Jani)
- Removing space in trailer (Suraj)
- Move registers to intel_color_regs.h

BSpec: 69378, 69379, 69380
Reviewed-by: Suraj Kandpal <suraj.kandpal@intel.com>
Signed-off-by: Chaitanya Kumar Borah <chaitanya.kumar.borah@intel.com>
Signed-off-by: Uma Shankar <uma.shankar@intel.com>
Link: https://patch.msgid.link/20251203085211.3663374-14-uma.shankar@intel.com
Signed-off-by: Jani Nikula <jani.nikula@intel.com>
2025-12-04 19:43:47 +02:00
Uma Shankar bf0fd73754 drm/i915/color: Program Plane Post CSC Registers
Extract the LUT and program plane post csc registers.

v2: Add DSB support
v3: Add support for single segment 1D LUT
v4:
- s/drm_color_lut_32/drm_color_lut32 (Simon)
- Move declaration to beginning of the function (Suraj)
- Remove multisegmented code, add it later
- Remove dead code for SDR planes, add it later
v5:
- Fix iterator issues
v6: Removed redundant variable (Suraj)

Reviewed-by: Suraj Kandpal <suraj.kandpal@intel.com>
Signed-off-by: Uma Shankar <uma.shankar@intel.com>
Signed-off-by: Chaitanya Kumar Borah <chaitanya.kumar.borah@intel.com>
Link: https://patch.msgid.link/20251203085211.3663374-13-uma.shankar@intel.com
Signed-off-by: Jani Nikula <jani.nikula@intel.com>
2025-12-04 19:43:47 +02:00
Uma Shankar 82caa1c881 drm/i915/color: Program Pre-CSC registers
Add callback to program Pre-CSC LUT for TGL and beyond

v2: Add DSB support
v3: Add support for single segment 1D LUT color op
v4:
- s/drm_color_lut_32/drm_color_lut32/ (Simon)
- Change commit message (Suraj)
- Improve comments (Suraj)
- Remove multisegmented programming, to be added later
- Remove dead code for SDR planes, add when needed

BSpec: 50411, 50412, 50413, 50414
Reviewed-by: Suraj Kandpal <suraj.kandpal@intel.com>
Signed-off-by: Uma Shankar <uma.shankar@intel.com>
Signed-off-by: Chaitanya Kumar Borah <chaitanya.kumar.borah@intel.com>
Link: https://patch.msgid.link/20251203085211.3663374-12-uma.shankar@intel.com
Signed-off-by: Jani Nikula <jani.nikula@intel.com>
2025-12-04 19:43:47 +02:00
Uma Shankar 3b7476e786 drm/i915/color: Add framework to program PRE/POST CSC LUT
Add framework that will help in loading LUT to Pre/Post CSC color
blocks.

v2: Add dsb support
v3: Align enum names
v4: Propagate change in lut data to crtc_state

Reviewed-by: Suraj Kandpal <suraj.kandpal@intel.com>
Signed-off-by: Uma Shankar <uma.shankar@intel.com>
Signed-off-by: Chaitanya Kumar Borah <chaitanya.kumar.borah@intel.com>
Link: https://patch.msgid.link/20251203085211.3663374-11-uma.shankar@intel.com
Signed-off-by: Jani Nikula <jani.nikula@intel.com>
2025-12-04 19:43:47 +02:00
Uma Shankar 05df71544c drm/i915: Add register definitions for Plane Post CSC
Add macros to define Plane Post CSC registers

v2:
- Add Plane Post CSC Gamma Multi Segment Enable bit
- Add BSpec entries (Suraj)
v3:
- Fix checkpatch issues (Suraj)

BSpec: 50403, 50404, 50405, 50406, 50409, 50410,
Reviewed-by: Suraj Kandpal <suraj.kandpal@intel.com>
Signed-off-by: Uma Shankar <uma.shankar@intel.com>
Signed-off-by: Chaitanya Kumar Borah <chaitanya.kumar.borah@intel.com>
Link: https://patch.msgid.link/20251203085211.3663374-10-uma.shankar@intel.com
Signed-off-by: Jani Nikula <jani.nikula@intel.com>
2025-12-04 19:43:47 +02:00
Uma Shankar ed0ebbc89f drm/i915: Add register definitions for Plane Degamma
Add macros to define Plane Degamma registers

v2:
 - Add BSpec links (Suraj)
v3:
 - Add Bspec links in trailer (Suraj)
 - Fix checkpatch issues (Suraj)

BSpec: 50411, 50412, 50413, 50414
Reviewed-by: Suraj Kandpal <suraj.kandpal@intel.com>
Signed-off-by: Uma Shankar <uma.shankar@intel.com>
Signed-off-by: Chaitanya Kumar Borah <chaitanya.kumar.borah@intel.com>
Link: https://patch.msgid.link/20251203085211.3663374-9-uma.shankar@intel.com
Signed-off-by: Jani Nikula <jani.nikula@intel.com>
2025-12-04 19:43:47 +02:00
Uma Shankar f00d02707d drm/i915/color: Add plane CTM callback for D12 and beyond
Add callback for setting CTM block in platforms D12 and beyond

v2:
- Add dsb support
- Pass plane_state as we are now doing a uapi to hw state copy
- Add support for 3x4 matrix

v3:
- Add relevant header file
- Fix typo (Suraj)
- Add callback to TGL+ (Suraj)

Reviewed-by: Suraj Kandpal <suraj.kandpal@intel.com>
Signed-off-by: Chaitanya Kumar Borah <chaitanya.kumar.borah@intel.com>
Signed-off-by: Uma Shankar <uma.shankar@intel.com>
Link: https://patch.msgid.link/20251203085211.3663374-8-uma.shankar@intel.com
Signed-off-by: Jani Nikula <jani.nikula@intel.com>
2025-12-04 19:43:46 +02:00
Chaitanya Kumar Borah 6f1e094fb6 drm/i915/color: Preserve sign bit when int_bits is Zero
When int_bits == 0, we lose the sign bit when we do the range check
and apply the mask.

Fix this by ensuring a minimum of one integer bit, which guarantees space
for the sign bit in fully fractional representations (e.g. S0.12)

Reviewed-by: Suraj Kandpal <suraj.kandpal@intel.com>
Signed-off-by: Chaitanya Kumar Borah <chaitanya.kumar.borah@intel.com>
Signed-off-by: Uma Shankar <uma.shankar@intel.com>
Link: https://patch.msgid.link/20251203085211.3663374-7-uma.shankar@intel.com
Signed-off-by: Jani Nikula <jani.nikula@intel.com>
2025-12-04 19:43:46 +02:00
Chaitanya Kumar Borah a78f1b6baf drm/i915/color: Add framework to program CSC
Add framework to program CSC. It enables copying of matrix from UAPI
to intel plane state. Also add helper functions which will eventually
program values to hardware.

Add a crtc state variable to track plane color change.

v2:
- Add crtc_state->plane_color_changed
- Improve comments (Suraj)
- s/intel_plane_*_color/intel_plane_color_* (Suraj)

v3:
- align parameters with open braces (Suraj)
- Improve commit message (Suraj)

v4:
- Re-arrange variable declaration (Suraj)

Reviewed-by: Suraj Kandpal <suraj.kandpal@intel.com>
Signed-off-by: Chaitanya Kumar Borah <chaitanya.kumar.borah@intel.com>
Signed-off-by: Uma Shankar <uma.shankar@intel.com>
Link: https://patch.msgid.link/20251203085211.3663374-6-uma.shankar@intel.com
Signed-off-by: Jani Nikula <jani.nikula@intel.com>
2025-12-04 19:43:46 +02:00
Chaitanya Kumar Borah ef10531681 drm/i915/color: Create a transfer function color pipeline
Add a color pipeline with three colorops in the sequence

        1D LUT - 3x4 CTM - 1D LUT

This pipeline can be used to do any color space conversion or HDR
tone mapping

v2: Change namespace to drm_plane_colorop*
v3: Use simpler/pre-existing colorops for first iteration
v4:
 - s/*_tf_*/*_color_* (Jani)
 - Refactor to separate files (Jani)
 - Add missing space in comment (Suraj)
 - Consolidate patch that adds/attaches pipeline property
v5:
 - Limit MAX_COLOR_PIPELINES to 2.(Suraj)
	Increase it as and when we add more pipelines.
 - Remove redundant initialization code (Suraj)
v6:
 - Use drm_plane_create_color_pipeline_property() (Arun)
	Now MAX_COLOR_PIPELINES is 1

Reviewed-by: Suraj Kandpal <suraj.kandpal@intel.com>
Signed-off-by: Uma Shankar <uma.shankar@intel.com>
Signed-off-by: Chaitanya Kumar Borah <chaitanya.kumar.borah@intel.com>
Link: https://patch.msgid.link/20251203085211.3663374-5-uma.shankar@intel.com
Signed-off-by: Jani Nikula <jani.nikula@intel.com>
2025-12-04 19:43:46 +02:00
Chaitanya Kumar Borah 730df5065e drm/i915/color: Add helper to create intel colorop
Add intel colorop create helper

v2:
 - Make function names consistent (Jani)
 - Remove redundant code related to colorop state
 - Refactor code to separate files

Reviewed-by: Suraj Kandpal <suraj.kandpal@intel.com>
Signed-off-by: Chaitanya Kumar Borah <chaitanya.kumar.borah@intel.com>
Signed-off-by: Uma Shankar <uma.shankar@intel.com>
Link: https://patch.msgid.link/20251203085211.3663374-4-uma.shankar@intel.com
Signed-off-by: Jani Nikula <jani.nikula@intel.com>
2025-12-04 19:43:46 +02:00
Chaitanya Kumar Borah 3e9b06559a drm/i915: Add intel_color_op
Add data structure to store intel specific details of colorop

v2:
 - Remove dead code
 - Convert macro to function (Jani)
 - Remove colorop state as it is not being used
 - Refactor to separate file

Reviewed-by: Suraj Kandpal <suraj.kandpal@intel.com>
Signed-off-by: Chaitanya Kumar Borah <chaitanya.kumar.borah@intel.com>
Signed-off-by: Uma Shankar <uma.shankar@intel.com>
Link: https://patch.msgid.link/20251203085211.3663374-3-uma.shankar@intel.com
Signed-off-by: Jani Nikula <jani.nikula@intel.com>
2025-12-04 19:43:46 +02:00
Chaitanya Kumar Borah 4cd8a64b15 drm/i915/display: Add identifiers for driver specific blocks
Add macros to identify intel specific color blocks. It will help
in mapping drm_color_ops to intel color HW blocks

v2:- Prefix enums with INTEL_* (Jani, Suraj)
   - Remove unnecessary comments (Jani)
   - Commit message improvements (Suraj)

Reviewed-by: Suraj Kandpal <suraj.kandpal@intel.com>
Signed-off-by: Chaitanya Kumar Borah <chaitanya.kumar.borah@intel.com>
Signed-off-by: Uma Shankar <uma.shankar@intel.com>
Link: https://patch.msgid.link/20251203085211.3663374-2-uma.shankar@intel.com
Signed-off-by: Jani Nikula <jani.nikula@intel.com>
2025-12-04 19:43:46 +02:00
Arnd Bergmann e45b5df47b drm/xe/pf: fix VFIO link error
The Makefile logic for building xe_sriov_vfio.o was added incorrectly,
as setting CONFIG_XE_VFIO_PCI=m means it doesn't get included into a
built-in xe driver:

ERROR: modpost: "xe_sriov_vfio_stop_copy_enter" [drivers/vfio/pci/xe/xe-vfio-pci.ko] undefined!
ERROR: modpost: "xe_sriov_vfio_stop_copy_exit" [drivers/vfio/pci/xe/xe-vfio-pci.ko] undefined!
ERROR: modpost: "xe_sriov_vfio_suspend_device" [drivers/vfio/pci/xe/xe-vfio-pci.ko] undefined!
ERROR: modpost: "xe_sriov_vfio_wait_flr_done" [drivers/vfio/pci/xe/xe-vfio-pci.ko] undefined!
ERROR: modpost: "xe_sriov_vfio_error" [drivers/vfio/pci/xe/xe-vfio-pci.ko] undefined!
ERROR: modpost: "xe_sriov_vfio_resume_data_enter" [drivers/vfio/pci/xe/xe-vfio-pci.ko] undefined!
ERROR: modpost: "xe_sriov_vfio_resume_device" [drivers/vfio/pci/xe/xe-vfio-pci.ko] undefined!
ERROR: modpost: "xe_sriov_vfio_resume_data_exit" [drivers/vfio/pci/xe/xe-vfio-pci.ko] undefined!
ERROR: modpost: "xe_sriov_vfio_data_write" [drivers/vfio/pci/xe/xe-vfio-pci.ko] undefined!
ERROR: modpost: "xe_sriov_vfio_migration_supported" [drivers/vfio/pci/xe/xe-vfio-pci.ko] undefined!
WARNING: modpost: suppressed 3 unresolved symbol warnings because there were too many)

Check for CONFIG_XE_VFIO_PCI being enabled in the Makefile to decide whether to
include the object instead.

Fixes: bd45d46ffc ("drm/xe/pf: Export helpers for VFIO")
Signed-off-by: Arnd Bergmann <arnd@arndb.de>
Reviewed-by: Michal Wajdeczko <michal.wajdeczko@intel.com>
Signed-off-by: Thomas Hellström <thomas.hellstrom@linux.intel.com>
Link: https://patch.msgid.link/20251204094154.1029357-1-arnd@kernel.org
(cherry picked from commit ef7de33544a7a6783d7afe09496da362d1e90ba1)
Signed-off-by: Thomas Hellström <thomas.hellstrom@linux.intel.com>
2025-12-04 16:34:00 +01:00
Jarkko Sakkinen 09b71a58ee KEYS: trusted: Use tpm_ret_to_err() in trusted_tpm2
Use tpm_ret_to_err() to transmute TPM return codes in trusted_tpm2.

Signed-off-by: Jarkko Sakkinen <jarkko.sakkinen@opinsys.com>
Acked-by: Stefano Garzarella <sgarzare@redhat.com>
Signed-off-by: Jarkko Sakkinen <jarkko@kernel.org>
2025-12-03 22:55:28 +02:00
Jarkko Sakkinen 7fcf459ac8 tpm: Use -EPERM as fallback error code in tpm_ret_to_err
Using -EFAULT as the tpm_ret_to_err() fallback error code causes makes it
incompatible on how trusted keys transmute TPM return codes.

Change the fallback as -EPERM in order to gain compatibility with trusted
keys. In addition, map TPM_RC_HASH to -EINVAL in order to be compatible
with tpm2_seal_trusted() return values.

Signed-off-by: Jarkko Sakkinen <jarkko.sakkinen@opinsys.com>
Reviewed-by: Stefano Garzarella <sgarzare@redhat.com>
Signed-off-by: Jarkko Sakkinen <jarkko@kernel.org>
2025-12-03 22:55:28 +02:00
Jarkko Sakkinen faf07e611d tpm: Cap the number of PCR banks
tpm2_get_pcr_allocation() does not cap any upper limit for the number of
banks. Cap the limit to eight banks so that out of bounds values coming
from external I/O cause on only limited harm.

Cc: stable@vger.kernel.org # v5.10+
Fixes: bcfff8384f ("tpm: dynamically allocate the allocated_banks array")
Tested-by: Lai Yi <yi1.lai@linux.intel.com>
Reviewed-by: Jonathan McDowell <noodles@meta.com>
Reviewed-by: Roberto Sassu <roberto.sassu@huawei.com>
Signed-off-by: Jarkko Sakkinen <jarkko.sakkinen@opinsys.com>
2025-12-03 22:55:28 +02:00
Jonathan McDowell 020a0d8fea tpm: Remove tpm_find_get_ops
tpm_find_get_ops() looks for the first valid TPM if the caller passes in
NULL. All internal users have been converted to either associate
themselves with a TPM directly, or call tpm_default_chip() as part of
their setup. Remove the no longer necessary tpm_find_get_ops().

Reviewed-by: Jarkko Sakkinen <jarkko@kernel.org>
Signed-off-by: Jonathan McDowell <noodles@meta.com>
Signed-off-by: Jarkko Sakkinen <jarkko@kernel.org>
2025-12-03 22:55:28 +02:00
Marco Crivellari e68407b6b0 tpm: add WQ_PERCPU to alloc_workqueue users
Currently if a user enqueues a work item using schedule_delayed_work() the
used wq is "system_wq" (per-cpu wq) while queue_delayed_work() use
WORK_CPU_UNBOUND (used when a cpu is not specified). The same applies to
schedule_work() that is using system_wq and queue_work(), that makes use
again of WORK_CPU_UNBOUND.
This lack of consistency cannot be addressed without refactoring the API.

alloc_workqueue() treats all queues as per-CPU by default, while unbound
workqueues must opt-in via WQ_UNBOUND.

This default is suboptimal: most workloads benefit from unbound queues,
allowing the scheduler to place worker threads where they’re needed and
reducing noise when CPUs are isolated.

This continues the effort to refactor workqueue APIs, which began with
the introduction of new workqueues and a new alloc_workqueue flag in:

commit 128ea9f6cc ("workqueue: Add system_percpu_wq and system_dfl_wq")
commit 930c2ea566 ("workqueue: Add new WQ_PERCPU flag")

This change adds a new WQ_PERCPU flag to explicitly request
alloc_workqueue() to be per-cpu when WQ_UNBOUND has not been specified.

With the introduction of the WQ_PERCPU flag (equivalent to !WQ_UNBOUND),
any alloc_workqueue() caller that doesn’t explicitly specify WQ_UNBOUND
must now use WQ_PERCPU.

Once migration is complete, WQ_UNBOUND can be removed and unbound will
become the implicit default.

Suggested-by: Tejun Heo <tj@kernel.org>
Signed-off-by: Marco Crivellari <marco.crivellari@suse.com>
Reviewed-by: Jarkko Sakkinen <jarkko@kernel.org>
Signed-off-by: Jarkko Sakkinen <jarkko@kernel.org>
2025-12-03 22:55:28 +02:00
Stuart Yoder 6187221487 tpm_crb: add missing loc parameter to kerneldoc
Update the kerneldoc parameter definitions for __crb_go_idle
and __crb_cmd_ready to include the loc parameter.

Signed-off-by: Stuart Yoder <stuart.yoder@arm.com>
Reviewed-by: Jarkko Sakkinen <jarkko@kernel.org>
Signed-off-by: Jarkko Sakkinen <jarkko@kernel.org>
2025-12-03 22:55:27 +02:00
Chu Guangqing 76b1a8aebe tpm_crb: Fix a spelling mistake
The spelling of the word "requrest" is incorrect; it should be "request".

Signed-off-by: Chu Guangqing <chuguangqing@inspur.com>
Signed-off-by: Jarkko Sakkinen <jarkko@kernel.org>
2025-12-03 22:55:27 +02:00
Maurice Hieronymus cffc934c0d selftests: tpm2: Fix ill defined assertions
Remove parentheses around assert statements in Python. With parentheses,
assert always evaluates to True, making the checks ineffective.

Signed-off-by: Maurice Hieronymus <mhi@mailbox.org>
Signed-off-by: Jarkko Sakkinen <jarkko@kernel.org>
2025-12-03 22:55:27 +02:00
Tomasz Lis d72312d730 drm/xe: Protect against unset LRC when pausing submissions
While pausing submissions, it is possible to encouner an exec queue
which is during creation, and therefore doesn't have a valid xe_lrc
struct reference.

Protect agains such situation, by checking for NULL before access.

Reviewed-by: Matthew Brost <matthew.brost@intel.com>
Fixes: c25c1010df ("drm/xe/vf: Replay GuC submission state on pause / unpause")
Signed-off-by: Tomasz Lis <tomasz.lis@intel.com>
Signed-off-by: Matthew Brost <matthew.brost@intel.com>
Link: https://patch.msgid.link/20251124222853.1900800-1-tomasz.lis@intel.com
(cherry picked from commit 07cf4b864f523f01d2bb522a05813df30b076ba8)
Signed-off-by: Thomas Hellström <thomas.hellstrom@linux.intel.com>
2025-12-01 10:16:17 +01:00
Matthew Brost 3d98a7164d drm/xe/vf: Start re-emission from first unsignaled job during VF migration
The LRC software ring tail is reset to the first unsignaled pending
job's head.

Fix the re-emission logic to begin submitting from the first unsignaled
job detected, rather than scanning all pending jobs, which can cause
imbalance.

v2:
 - Include missing local changes
v3:
 - s/skip_replay/restore_replay (Tomasz)

Fixes: c25c1010df ("drm/xe/vf: Replay GuC submission state on pause / unpause")
Signed-off-by: Matthew Brost <matthew.brost@intel.com>
Reviewed-by: Tomasz Lis <tomasz.lis@intel.com>
Link: https://patch.msgid.link/20251121152750.240557-1-matthew.brost@intel.com
(cherry picked from commit 00937fe1921ab346b6f6a4beaa5c38e14733caa3)
Signed-off-by: Thomas Hellström <thomas.hellstrom@linux.intel.com>
2025-12-01 10:16:11 +01:00
Michal Wajdeczko 14a8d83cbe drm/xe/pf: Use div_u64 when calculating GGTT profile
This will fix the following error seen on some 32-bit config:

"ERROR: modpost: "__udivdi3" [drivers/gpu/drm/xe/xe.ko] undefined!"

Reported-by: kernel test robot <lkp@intel.com>
Closes: https://lore.kernel.org/oe-kbuild-all/202511150929.3vUi6PEJ-lkp@intel.com/
Fixes: e448372e8a ("drm/xe/pf: Use migration-friendly GGTT auto-provisioning")
Signed-off-by: Michal Wajdeczko <michal.wajdeczko@intel.com>
Reviewed-by: Piotr Piórkowski <piotr.piorkowski@intel.com>
Link: https://patch.msgid.link/20251115151323.10828-1-michal.wajdeczko@intel.com
(cherry picked from commit 0f4435a1f46efc3177eb082cd3f73e29da5ab86a)
Signed-off-by: Thomas Hellström <thomas.hellstrom@linux.intel.com>
2025-12-01 10:16:03 +01:00
Mika Kuoppala bf213ac637 drm/xe: Fix memory leak when handling pagefault vma
When the pagefault handling code was moved to a new file, an extra
drm_exec_init() was added to the VMA path. This call is unnecessary because
xe_validation_ctx_init() already performs a drm_exec_init(), resulting in a
memory leak reported by kmemleak.

Remove the redundant drm_exec_init() from the VMA pagefault handling code.

Fixes: fb544b8445 ("drm/xe: Implement xe_pagefault_queue_work")
Cc: Matthew Brost <matthew.brost@intel.com>
Cc: Stuart Summers <stuart.summers@intel.com>
Cc: Lucas De Marchi <lucas.demarchi@intel.com>
Cc: "Thomas Hellström" <thomas.hellstrom@linux.intel.com>
Cc: Rodrigo Vivi <rodrigo.vivi@intel.com>
Cc: Sumit Semwal <sumit.semwal@linaro.org>
Cc: "Christian König" <christian.koenig@amd.com>
Cc: intel-xe@lists.freedesktop.org
Cc: linux-media@vger.kernel.org
Cc: dri-devel@lists.freedesktop.org
Cc: linaro-mm-sig@lists.linaro.org
Signed-off-by: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Reviewed-by: Thomas Hellström <thomas.hellstrom@linux.intel.com>
Reviewed-by: Matthew Brost <matthew.brost@intel.com>
Signed-off-by: Matthew Brost <matthew.brost@intel.com>
Link: https://patch.msgid.link/20251120161435.3674556-1-mika.kuoppala@linux.intel.com
(cherry picked from commit 62519b77aecad22b525eda482660ffa127e7ad80)
Signed-off-by: Thomas Hellström <thomas.hellstrom@linux.intel.com>
2025-12-01 10:15:57 +01:00
Michał Winiarski 1f5556ec8b vfio/xe: Add device specific vfio_pci driver variant for Intel graphics
In addition to generic VFIO PCI functionality, the driver implements
VFIO migration uAPI, allowing userspace to enable migration for Intel
Graphics SR-IOV Virtual Functions.
The driver binds to VF device and uses API exposed by Xe driver to
transfer the VF migration data under the control of PF device.

Acked-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
Reviewed-by: Kevin Tian <kevin.tian@intel.com>
Reviewed-by: Alex Williamson <alex@shazbot.org>
Link: https://patch.msgid.link/20251127093934.1462188-5-michal.winiarski@intel.com
Link: https://lore.kernel.org/all/20251128125322.34edbeaf.alex@shazbot.org/
Signed-off-by: Michał Winiarski <michal.winiarski@intel.com>
(cherry picked from commit 2e38c50ae4929f0b954fee69d428db7121452867)
Signed-off-by: Thomas Hellström <thomas.hellstrom@linux.intel.com>
2025-12-01 09:45:48 +01:00
Michał Winiarski bd45d46ffc drm/xe/pf: Export helpers for VFIO
Device specific VFIO driver variant for Xe will implement VF migration.
Export everything that's needed for migration ops.

Reviewed-by: Michal Wajdeczko <michal.wajdeczko@intel.com>
Link: https://patch.msgid.link/20251127093934.1462188-4-michal.winiarski@intel.com
Signed-off-by: Michał Winiarski <michal.winiarski@intel.com>
(cherry picked from commit 17f22465c5a5573724c942ca7147b4024631ef87)
Signed-off-by: Thomas Hellström <thomas.hellstrom@linux.intel.com>
2025-12-01 09:42:37 +01:00
Michał Winiarski 5be29ebe9f drm/xe/pci: Introduce a helper to allow VF access to PF xe_device
In certain scenarios (such as VF migration), VF driver needs to interact
with PF driver.
Add a helper to allow VF driver access to PF xe_device.

Reviewed-by: Michal Wajdeczko <michal.wajdeczko@intel.com>
Link: https://patch.msgid.link/20251127093934.1462188-3-michal.winiarski@intel.com
Signed-off-by: Michał Winiarski <michal.winiarski@intel.com>
(cherry picked from commit 8b3cce3ad9c78ce3dae1c178f99352d50e12a3c0)
Signed-off-by: Thomas Hellström <thomas.hellstrom@linux.intel.com>
2025-12-01 09:42:37 +01:00
Michał Winiarski 73834d03a5 drm/xe/pf: Enable SR-IOV VF migration
All of the necessary building blocks are now in place to support SR-IOV
VF migration.
Flip the enable/disable logic to match VF code and disable the feature
only for platforms that don't meet the necessary prerequisites.
To allow more testing and experiments, on DEBUG builds any missing
prerequisites will be ignored.

Reviewed-by: Michal Wajdeczko <michal.wajdeczko@intel.com>
Link: https://patch.msgid.link/20251127093934.1462188-2-michal.winiarski@intel.com
Signed-off-by: Michał Winiarski <michal.winiarski@intel.com>
(cherry picked from commit 01c724aa7bf84e9d081a56e0cbf1d282678ce144)
Signed-off-by: Thomas Hellström <thomas.hellstrom@linux.intel.com>
2025-12-01 09:42:36 +01:00
Matt Roper 50a59230fa drm/xe/pm: Add scope-based cleanup helper for runtime PM
Add a scope-based helpers for runtime PM that may be used to simplify
cleanup logic and potentially avoid goto-based cleanup.

For example, using

        guard(xe_pm_runtime)(xe);

will get runtime PM and cause a corresponding put to occur automatically
when the current scope is exited.  'xe_pm_runtime_noresume' can be used
as a guard replacement for the corresponding 'noresume' variant.
There's also an xe_pm_runtime_ioctl conditional guard that can be used
as a replacement for xe_runtime_ioctl():

        ACQUIRE(xe_pm_runtime_ioctl, pm)(xe);
        if ((ret = ACQUIRE_ERR(xe_pm_runtime_ioctl, &pm)) < 0)
                /* failed */

In a few rare cases (such as gt_reset_worker()) we need to ensure that
runtime PM is dropped when the function is exited by any means
(including error paths), but the function does not need to acquire
runtime PM because that has already been done earlier by a different
function.  For these special cases, an 'xe_pm_runtime_release_only'
guard can be used to handle the release without doing an acquisition.

These guards will be used in future patches to eliminate some of our
goto-based cleanup.

v2:
 - Specify success condition for xe_pm runtime_ioctl as _RET >= 0 so
   that positive values will be properly identified as success and
   trigger destructor cleanup properly.

v3:
 - Add comments to the kerneldoc for the existing 'get' functions
   indicating that scope-based handling should be preferred where
   possible.  (Gustavo)

Cc: Gustavo Sousa <gustavo.sousa@intel.com>
Reviewed-by: Michal Wajdeczko <michal.wajdeczko@intel.com>
Reviewed-by: Gustavo Sousa <gustavo.sousa@intel.com>
Link: https://patch.msgid.link/20251118164338.3572146-31-matthew.d.roper@intel.com
Signed-off-by: Matt Roper <matthew.d.roper@intel.com>
(cherry picked from commit 59e7528dbfd52efbed05e0f11b2143217a12bc74)
Signed-off-by: Thomas Hellström <thomas.hellstrom@linux.intel.com>
2025-12-01 09:41:33 +01:00
Niklas Cassel 6ce0dd9f54 ata: libata-core: Disable LPM on Silicon Motion MD619{H,G}XCLDE3TC
According to a user report, the Silicon Motion MD619HXCLDE3TC SSD and
the Silicon Motion MD619GXCLDE3TC SSD have problems with LPM.

Reported-by: Yihang Li <liyihang9@h-partners.com>
Closes: https://lore.kernel.org/linux-ide/20251121073502.3388239-1-liyihang9@h-partners.com/
Signed-off-by: Niklas Cassel <cassel@kernel.org>
2025-11-28 06:58:24 +01:00
Serge Hallyn 9891d2f79a
Clarify the rootid_owns_currentns
Split most of the rootid_owns_currentns() functionality
into a more generic rootid_owns_ns() function which
will be easier to write tests for.

Rename the functions and variables to make clear that
the ids being tested could be any uid.

Signed-off-by: Serge Hallyn <serge@hallyn.com>
CC: Ryan Foster <foster.ryan.r@gmail.com>
CC: Christian Brauner <brauner@kernel.org>

---
v2: change the function parameter documentation to mollify the bot.
2025-11-18 18:00:19 -06:00
René Rebe 1d779fa996 ata: pata_pcmcia: Add Iomega Clik! PCMCIA ATA/ATAPI Adapter
Add Iomega Clik! "PCMCIA ATA/ATAPI Adapter" ID to pata_pcmcia.

Signed-off-by: René Rebe <rene@exactco.de>
Signed-off-by: Niklas Cassel <cassel@kernel.org>
2025-11-18 21:58:41 +01:00
Marco Crivellari 13f4d99582 ata: libata-sff: add WQ_PERCPU to alloc_workqueue users
Currently if a user enqueue a work item using schedule_delayed_work() the
used wq is "system_wq" (per-cpu wq) while queue_delayed_work() use
WORK_CPU_UNBOUND (used when a cpu is not specified). The same applies to
schedule_work() that is using system_wq and queue_work(), that makes use
again of WORK_CPU_UNBOUND.
This lack of consistency cannot be addressed without refactoring the API.

alloc_workqueue() treats all queues as per-CPU by default, while unbound
workqueues must opt-in via WQ_UNBOUND.

This default is suboptimal: most workloads benefit from unbound queues,
allowing the scheduler to place worker threads where they’re needed and
reducing noise when CPUs are isolated.

This continues the effort to refactor workqueue APIs, which began with
the introduction of new workqueues and a new alloc_workqueue flag in:

commit 128ea9f6cc ("workqueue: Add system_percpu_wq and system_dfl_wq")
commit 930c2ea566 ("workqueue: Add new WQ_PERCPU flag")

This change adds a new WQ_PERCPU flag to explicitly request
alloc_workqueue() to be per-cpu when WQ_UNBOUND has not been specified.

With the introduction of the WQ_PERCPU flag (equivalent to !WQ_UNBOUND),
any alloc_workqueue() caller that doesn’t explicitly specify WQ_UNBOUND
must now use WQ_PERCPU.

Once migration is complete, WQ_UNBOUND can be removed and unbound will
become the implicit default.

Suggested-by: Tejun Heo <tj@kernel.org>
Signed-off-by: Marco Crivellari <marco.crivellari@suse.com>
Reviewed-by: Damien Le Moal <dlemoal@kernel.org>
Signed-off-by: Niklas Cassel <cassel@kernel.org>
2025-11-07 09:42:36 +01:00
Rob Herring (Arm) 095d495cb8 dt-bindings: ata: snps,dwc-ahci: Allow 'iommus' property
The AMD Seattle DWC AHCI is behind an IOMMU and has 1-3 entries, so add
the 'iommus' property. There's not a specific compatible, so we can't
limit it to Seattle.

Signed-off-by: Rob Herring (Arm) <robh@kernel.org>
Signed-off-by: Niklas Cassel <cassel@kernel.org>
2025-10-23 14:29:41 +02:00
Thorsten Blum 4ea303d9e9 ata: pata_it821x: Replace deprecated strcpy with strscpy in it821x_display_disk
strcpy() is deprecated; use strscpy() instead.

Replace the hard-coded buffer size 8 with sizeof(mbuf) when using
snprintf() while we're at it.

Link: https://github.com/KSPP/linux/issues/88
Signed-off-by: Thorsten Blum <thorsten.blum@linux.dev>
Reviewed-by: Damien Le Moal <dlemoal@kernel.org>
Signed-off-by: Niklas Cassel <cassel@kernel.org>
2025-10-23 14:23:40 +02:00
Yulin Lu c9d869fb29 dt-bindings: ata: eswin: Document for EIC7700 SoC ahci
Document the SATA AHCI controller on the EIC7700 SoC platform,
including descriptions of its hardware configurations.

Signed-off-by: Yulin Lu <luyulin@eswincomputing.com>
Reviewed-by: Rob Herring (Arm) <robh@kernel.org>
Signed-off-by: Niklas Cassel <cassel@kernel.org>
2025-10-13 09:34:33 +02:00
54 changed files with 1870 additions and 128 deletions

View File

@ -0,0 +1,79 @@
# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
%YAML 1.2
---
$id: http://devicetree.org/schemas/ata/eswin,eic7700-ahci.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
title: Eswin EIC7700 SoC SATA Controller
maintainers:
- Yulin Lu <luyulin@eswincomputing.com>
- Huan He <hehuan1@eswincomputing.com>
description:
AHCI SATA controller embedded into the EIC7700 SoC is based on the DWC AHCI
SATA v5.00a IP core.
select:
properties:
compatible:
const: eswin,eic7700-ahci
required:
- compatible
allOf:
- $ref: snps,dwc-ahci-common.yaml#
properties:
compatible:
items:
- const: eswin,eic7700-ahci
- const: snps,dwc-ahci
clocks:
minItems: 2
maxItems: 2
clock-names:
items:
- const: pclk
- const: aclk
resets:
maxItems: 1
reset-names:
const: arst
ports-implemented:
const: 1
required:
- compatible
- reg
- interrupts
- clocks
- clock-names
- resets
- reset-names
- phys
- phy-names
- ports-implemented
unevaluatedProperties: false
examples:
- |
sata@50420000 {
compatible = "eswin,eic7700-ahci", "snps,dwc-ahci";
reg = <0x50420000 0x10000>;
interrupt-parent = <&plic>;
interrupts = <58>;
clocks = <&clock 171>, <&clock 186>;
clock-names = "pclk", "aclk";
phys = <&sata_phy>;
phy-names = "sata-phy";
ports-implemented = <0x1>;
resets = <&reset 96>;
reset-names = "arst";
};

View File

@ -33,6 +33,10 @@ properties:
- description: SPEAr1340 AHCI SATA device
const: snps,spear-ahci
iommus:
minItems: 1
maxItems: 3
patternProperties:
"^sata-port@[0-9a-e]$":
$ref: /schemas/ata/snps,dwc-ahci-common.yaml#/$defs/dwc-ahci-port

View File

@ -27221,6 +27221,13 @@ L: virtualization@lists.linux.dev
S: Maintained
F: drivers/vfio/pci/virtio
VFIO XE PCI DRIVER
M: Michał Winiarski <michal.winiarski@intel.com>
L: kvm@vger.kernel.org
L: intel-xe@lists.freedesktop.org
S: Supported
F: drivers/vfio/pci/xe
VGA_SWITCHEROO
R: Lukas Wunner <lukas@wunner.de>
S: Maintained

View File

@ -4216,6 +4216,10 @@ static const struct ata_dev_quirks_entry __ata_dev_quirks[] = {
/* Apacer models with LPM issues */
{ "Apacer AS340*", NULL, ATA_QUIRK_NOLPM },
/* Silicon Motion models with LPM issues */
{ "MD619HXCLDE3TC", "TCVAID", ATA_QUIRK_NOLPM },
{ "MD619GXCLDE3TC", "TCV35D", ATA_QUIRK_NOLPM },
/* These specific Samsung models/firmware-revs do not handle LPM well */
{ "SAMSUNG MZMPC128HBFU-000MV", "CXM14M1Q", ATA_QUIRK_NOLPM },
{ "SAMSUNG SSD PM830 mSATA *", "CXM13D1Q", ATA_QUIRK_NOLPM },

View File

@ -3191,7 +3191,8 @@ void ata_sff_port_init(struct ata_port *ap)
int __init ata_sff_init(void)
{
ata_sff_wq = alloc_workqueue("ata_sff", WQ_MEM_RECLAIM, WQ_MAX_ACTIVE);
ata_sff_wq = alloc_workqueue("ata_sff", WQ_MEM_RECLAIM | WQ_PERCPU,
WQ_MAX_ACTIVE);
if (!ata_sff_wq)
return -ENOMEM;

View File

@ -75,6 +75,7 @@
#include <linux/blkdev.h>
#include <linux/delay.h>
#include <linux/slab.h>
#include <linux/string.h>
#include <scsi/scsi_host.h>
#include <linux/libata.h>
@ -632,9 +633,9 @@ static void it821x_display_disk(struct ata_port *ap, int n, u8 *buf)
cbl = "";
if (mode)
snprintf(mbuf, 8, "%5s%d", mtype, mode - 1);
snprintf(mbuf, sizeof(mbuf), "%5s%d", mtype, mode - 1);
else
strcpy(mbuf, "PIO");
strscpy(mbuf, "PIO");
if (buf[52] == 4)
ata_port_info(ap, "%d: %-6s %-8s %s %s\n",
n, mbuf, types[buf[52]], id, cbl);

View File

@ -344,6 +344,7 @@ static const struct pcmcia_device_id pcmcia_devices[] = {
PCMCIA_DEVICE_PROD_ID2("NinjaATA-", 0xebe0bd79),
PCMCIA_DEVICE_PROD_ID12("PCMCIA", "CD-ROM", 0x281f1c5d, 0x66536591),
PCMCIA_DEVICE_PROD_ID12("PCMCIA", "PnPIDE", 0x281f1c5d, 0x0c694728),
PCMCIA_DEVICE_PROD_ID2("PCMCIA ATA/ATAPI Adapter", 0x888d7b73),
PCMCIA_DEVICE_PROD_ID12("SHUTTLE TECHNOLOGY LTD.", "PCCARD-IDE/ATAPI Adapter", 0x4a3f0ba0, 0x322560e1),
PCMCIA_DEVICE_PROD_ID12("SEAGATE", "ST1", 0x87c1b330, 0xe1f30883),
PCMCIA_DEVICE_PROD_ID12("SAMSUNG", "04/05/06", 0x43d74cb4, 0x6a22777d),

View File

@ -230,42 +230,6 @@ struct tpm_chip *tpm_default_chip(void)
}
EXPORT_SYMBOL_GPL(tpm_default_chip);
/**
* tpm_find_get_ops() - find and reserve a TPM chip
* @chip: a &struct tpm_chip instance, %NULL for the default chip
*
* Finds a TPM chip and reserves its class device and operations. The chip must
* be released with tpm_put_ops() after use.
* This function is for internal use only. It supports existing TPM callers
* by accepting NULL, but those callers should be converted to pass in a chip
* directly.
*
* Return:
* A reserved &struct tpm_chip instance.
* %NULL if a chip is not found.
* %NULL if the chip is not available.
*/
struct tpm_chip *tpm_find_get_ops(struct tpm_chip *chip)
{
int rc;
if (chip) {
if (!tpm_try_get_ops(chip))
return chip;
return NULL;
}
chip = tpm_default_chip();
if (!chip)
return NULL;
rc = tpm_try_get_ops(chip);
/* release additional reference we got from tpm_default_chip() */
put_device(&chip->dev);
if (rc)
return NULL;
return chip;
}
/**
* tpm_dev_release() - free chip memory and the device number
* @dev: the character device for the TPM chip
@ -282,7 +246,6 @@ static void tpm_dev_release(struct device *dev)
kfree(chip->work_space.context_buf);
kfree(chip->work_space.session_buf);
kfree(chip->allocated_banks);
#ifdef CONFIG_TCG_TPM2_HMAC
kfree(chip->auth);
#endif

View File

@ -275,7 +275,8 @@ void tpm_common_release(struct file *file, struct file_priv *priv)
int __init tpm_dev_common_init(void)
{
tpm_dev_wq = alloc_workqueue("tpm_dev_wq", WQ_MEM_RECLAIM, 0);
tpm_dev_wq = alloc_workqueue("tpm_dev_wq", WQ_MEM_RECLAIM | WQ_PERCPU,
0);
return !tpm_dev_wq ? -ENOMEM : 0;
}

View File

@ -313,10 +313,13 @@ int tpm_is_tpm2(struct tpm_chip *chip)
{
int rc;
chip = tpm_find_get_ops(chip);
if (!chip)
return -ENODEV;
rc = tpm_try_get_ops(chip);
if (rc)
return rc;
rc = (chip->flags & TPM_CHIP_FLAG_TPM2) != 0;
tpm_put_ops(chip);
@ -338,10 +341,13 @@ int tpm_pcr_read(struct tpm_chip *chip, u32 pcr_idx,
{
int rc;
chip = tpm_find_get_ops(chip);
if (!chip)
return -ENODEV;
rc = tpm_try_get_ops(chip);
if (rc)
return rc;
if (chip->flags & TPM_CHIP_FLAG_TPM2)
rc = tpm2_pcr_read(chip, pcr_idx, digest, NULL);
else
@ -369,10 +375,13 @@ int tpm_pcr_extend(struct tpm_chip *chip, u32 pcr_idx,
int rc;
int i;
chip = tpm_find_get_ops(chip);
if (!chip)
return -ENODEV;
rc = tpm_try_get_ops(chip);
if (rc)
return rc;
for (i = 0; i < chip->nr_allocated_banks; i++) {
if (digests[i].alg_id != chip->allocated_banks[i].alg_id) {
rc = -EINVAL;
@ -492,10 +501,13 @@ int tpm_get_random(struct tpm_chip *chip, u8 *out, size_t max)
if (!out || max > TPM_MAX_RNG_DATA)
return -EINVAL;
chip = tpm_find_get_ops(chip);
if (!chip)
return -ENODEV;
rc = tpm_try_get_ops(chip);
if (rc)
return rc;
if (chip->flags & TPM_CHIP_FLAG_TPM2)
rc = tpm2_get_random(chip, out, max);
else

View File

@ -267,7 +267,6 @@ static inline void tpm_msleep(unsigned int delay_msec)
int tpm_chip_bootstrap(struct tpm_chip *chip);
int tpm_chip_start(struct tpm_chip *chip);
void tpm_chip_stop(struct tpm_chip *chip);
struct tpm_chip *tpm_find_get_ops(struct tpm_chip *chip);
struct tpm_chip *tpm_chip_alloc(struct device *dev,
const struct tpm_class_ops *ops);

View File

@ -799,11 +799,6 @@ int tpm1_pm_suspend(struct tpm_chip *chip, u32 tpm_suspend_pcr)
*/
int tpm1_get_pcr_allocation(struct tpm_chip *chip)
{
chip->allocated_banks = kcalloc(1, sizeof(*chip->allocated_banks),
GFP_KERNEL);
if (!chip->allocated_banks)
return -ENOMEM;
chip->allocated_banks[0].alg_id = TPM_ALG_SHA1;
chip->allocated_banks[0].digest_size = hash_digest_size[HASH_ALGO_SHA1];
chip->allocated_banks[0].crypto_id = HASH_ALGO_SHA1;

View File

@ -550,11 +550,9 @@ ssize_t tpm2_get_pcr_allocation(struct tpm_chip *chip)
nr_possible_banks = be32_to_cpup(
(__be32 *)&buf.data[TPM_HEADER_SIZE + 5]);
chip->allocated_banks = kcalloc(nr_possible_banks,
sizeof(*chip->allocated_banks),
GFP_KERNEL);
if (!chip->allocated_banks) {
if (nr_possible_banks > TPM2_MAX_PCR_BANKS) {
pr_err("tpm: out of bank capacity: %u > %u\n",
nr_possible_banks, TPM2_MAX_PCR_BANKS);
rc = -ENOMEM;
goto out;
}

View File

@ -179,6 +179,7 @@ static int crb_try_pluton_doorbell(struct crb_priv *priv, bool wait_for_complete
*
* @dev: crb device
* @priv: crb private data
* @loc: locality
*
* Write CRB_CTRL_REQ_GO_IDLE to TPM_CRB_CTRL_REQ
* The device should respond within TIMEOUT_C by clearing the bit.
@ -233,6 +234,7 @@ static int crb_go_idle(struct tpm_chip *chip)
*
* @dev: crb device
* @priv: crb private data
* @loc: locality
*
* Write CRB_CTRL_REQ_CMD_READY to TPM_CRB_CTRL_REQ
* and poll till the device acknowledge it by clearing the bit.
@ -412,7 +414,7 @@ static int crb_do_acpi_start(struct tpm_chip *chip)
#ifdef CONFIG_ARM64
/*
* This is a TPM Command Response Buffer start method that invokes a
* Secure Monitor Call to requrest the firmware to execute or cancel
* Secure Monitor Call to request the firmware to execute or cancel
* a TPM 2.0 command.
*/
static int tpm_crb_smc_start(struct device *dev, unsigned long func_id)

View File

@ -265,8 +265,7 @@ static u8 tpm_tis_status(struct tpm_chip *chip)
/*
* Dump stack for forensics, as invalid TPM_STS.x could be
* potentially triggered by impaired tpm_try_get_ops() or
* tpm_find_get_ops().
* potentially triggered by impaired tpm_try_get_ops().
*/
dump_stack();
}

View File

@ -239,6 +239,8 @@ i915-y += \
display/intel_cdclk.o \
display/intel_cmtg.o \
display/intel_color.o \
display/intel_colorop.o \
display/intel_color_pipeline.o \
display/intel_combo_phy.o \
display/intel_connector.o \
display/intel_crtc.o \

View File

@ -32,6 +32,8 @@
#include "intel_display_utils.h"
#include "intel_dsb.h"
#include "intel_vrr.h"
#include "skl_universal_plane.h"
#include "skl_universal_plane_regs.h"
struct intel_color_funcs {
int (*color_check)(struct intel_atomic_state *state,
@ -87,6 +89,14 @@ struct intel_color_funcs {
* Read config other than LUTs and CSCs, before them. Optional.
*/
void (*get_config)(struct intel_crtc_state *crtc_state);
/* Plane CSC*/
void (*load_plane_csc_matrix)(struct intel_dsb *dsb,
const struct intel_plane_state *plane_state);
/* Plane Pre/Post CSC */
void (*load_plane_luts)(struct intel_dsb *dsb,
const struct intel_plane_state *plane_state);
};
#define CTM_COEFF_SIGN (1ULL << 63)
@ -609,6 +619,8 @@ static u16 ctm_to_twos_complement(u64 coeff, int int_bits, int frac_bits)
if (CTM_COEFF_NEGATIVE(coeff))
c = -c;
int_bits = max(int_bits, 1);
c = clamp(c, -(s64)BIT(int_bits + frac_bits - 1),
(s64)(BIT(int_bits + frac_bits - 1) - 1));
@ -3836,6 +3848,266 @@ static void icl_read_luts(struct intel_crtc_state *crtc_state)
}
}
static void
xelpd_load_plane_csc_matrix(struct intel_dsb *dsb,
const struct intel_plane_state *plane_state)
{
struct intel_display *display = to_intel_display(plane_state);
const struct drm_plane_state *state = &plane_state->uapi;
enum pipe pipe = to_intel_plane(state->plane)->pipe;
enum plane_id plane = to_intel_plane(state->plane)->id;
const struct drm_property_blob *blob = plane_state->hw.ctm;
struct drm_color_ctm_3x4 *ctm;
const u64 *input;
u16 coeffs[9] = {};
int i, j;
if (!icl_is_hdr_plane(display, plane) || !blob)
return;
ctm = blob->data;
input = ctm->matrix;
/*
* Convert fixed point S31.32 input to format supported by the
* hardware.
*/
for (i = 0, j = 0; i < ARRAY_SIZE(coeffs); i++) {
u64 abs_coeff = ((1ULL << 63) - 1) & input[j];
/*
* Clamp input value to min/max supported by
* hardware.
*/
abs_coeff = clamp_val(abs_coeff, 0, CTM_COEFF_4_0 - 1);
/* sign bit */
if (CTM_COEFF_NEGATIVE(input[j]))
coeffs[i] |= 1 << 15;
if (abs_coeff < CTM_COEFF_0_125)
coeffs[i] |= (3 << 12) |
ILK_CSC_COEFF_FP(abs_coeff, 12);
else if (abs_coeff < CTM_COEFF_0_25)
coeffs[i] |= (2 << 12) |
ILK_CSC_COEFF_FP(abs_coeff, 11);
else if (abs_coeff < CTM_COEFF_0_5)
coeffs[i] |= (1 << 12) |
ILK_CSC_COEFF_FP(abs_coeff, 10);
else if (abs_coeff < CTM_COEFF_1_0)
coeffs[i] |= ILK_CSC_COEFF_FP(abs_coeff, 9);
else if (abs_coeff < CTM_COEFF_2_0)
coeffs[i] |= (7 << 12) |
ILK_CSC_COEFF_FP(abs_coeff, 8);
else
coeffs[i] |= (6 << 12) |
ILK_CSC_COEFF_FP(abs_coeff, 7);
/* Skip postoffs */
if (!((j + 2) % 4))
j += 2;
else
j++;
}
intel_de_write_dsb(display, dsb, PLANE_CSC_COEFF(pipe, plane, 0),
coeffs[0] << 16 | coeffs[1]);
intel_de_write_dsb(display, dsb, PLANE_CSC_COEFF(pipe, plane, 1),
coeffs[2] << 16);
intel_de_write_dsb(display, dsb, PLANE_CSC_COEFF(pipe, plane, 2),
coeffs[3] << 16 | coeffs[4]);
intel_de_write_dsb(display, dsb, PLANE_CSC_COEFF(pipe, plane, 3),
coeffs[5] << 16);
intel_de_write_dsb(display, dsb, PLANE_CSC_COEFF(pipe, plane, 4),
coeffs[6] << 16 | coeffs[7]);
intel_de_write_dsb(display, dsb, PLANE_CSC_COEFF(pipe, plane, 5),
coeffs[8] << 16);
intel_de_write_dsb(display, dsb, PLANE_CSC_PREOFF(pipe, plane, 0), 0);
intel_de_write_dsb(display, dsb, PLANE_CSC_PREOFF(pipe, plane, 1), 0);
intel_de_write_dsb(display, dsb, PLANE_CSC_PREOFF(pipe, plane, 2), 0);
/*
* Conversion from S31.32 to S0.12. BIT[12] is the signed bit
*/
intel_de_write_dsb(display, dsb,
PLANE_CSC_POSTOFF(pipe, plane, 0),
ctm_to_twos_complement(input[3], 0, 12));
intel_de_write_dsb(display, dsb,
PLANE_CSC_POSTOFF(pipe, plane, 1),
ctm_to_twos_complement(input[7], 0, 12));
intel_de_write_dsb(display, dsb,
PLANE_CSC_POSTOFF(pipe, plane, 2),
ctm_to_twos_complement(input[11], 0, 12));
}
static void
xelpd_program_plane_pre_csc_lut(struct intel_dsb *dsb,
const struct intel_plane_state *plane_state)
{
struct intel_display *display = to_intel_display(plane_state);
const struct drm_plane_state *state = &plane_state->uapi;
enum pipe pipe = to_intel_plane(state->plane)->pipe;
enum plane_id plane = to_intel_plane(state->plane)->id;
const struct drm_color_lut32 *pre_csc_lut = plane_state->hw.degamma_lut->data;
u32 i, lut_size;
if (icl_is_hdr_plane(display, plane)) {
lut_size = 128;
intel_de_write_dsb(display, dsb,
PLANE_PRE_CSC_GAMC_INDEX_ENH(pipe, plane, 0),
PLANE_PAL_PREC_AUTO_INCREMENT);
if (pre_csc_lut) {
for (i = 0; i < lut_size; i++) {
u32 lut_val = drm_color_lut32_extract(pre_csc_lut[i].green, 24);
intel_de_write_dsb(display, dsb,
PLANE_PRE_CSC_GAMC_DATA_ENH(pipe, plane, 0),
lut_val);
}
/* Program the max register to clamp values > 1.0. */
/* TODO: Restrict to 0x7ffffff */
do {
intel_de_write_dsb(display, dsb,
PLANE_PRE_CSC_GAMC_DATA_ENH(pipe, plane, 0),
(1 << 24));
} while (i++ > 130);
} else {
for (i = 0; i < lut_size; i++) {
u32 v = (i * ((1 << 24) - 1)) / (lut_size - 1);
intel_de_write_dsb(display, dsb,
PLANE_PRE_CSC_GAMC_DATA_ENH(pipe, plane, 0), v);
}
do {
intel_de_write_dsb(display, dsb,
PLANE_PRE_CSC_GAMC_DATA_ENH(pipe, plane, 0),
1 << 24);
} while (i++ < 130);
}
intel_de_write_dsb(display, dsb, PLANE_PRE_CSC_GAMC_INDEX_ENH(pipe, plane, 0), 0);
}
}
static void
xelpd_program_plane_post_csc_lut(struct intel_dsb *dsb,
const struct intel_plane_state *plane_state)
{
struct intel_display *display = to_intel_display(plane_state);
const struct drm_plane_state *state = &plane_state->uapi;
enum pipe pipe = to_intel_plane(state->plane)->pipe;
enum plane_id plane = to_intel_plane(state->plane)->id;
const struct drm_color_lut32 *post_csc_lut = plane_state->hw.gamma_lut->data;
u32 i, lut_size, lut_val;
if (icl_is_hdr_plane(display, plane)) {
intel_de_write_dsb(display, dsb, PLANE_POST_CSC_GAMC_INDEX_ENH(pipe, plane, 0),
PLANE_PAL_PREC_AUTO_INCREMENT);
/* TODO: Add macro */
intel_de_write_dsb(display, dsb, PLANE_POST_CSC_GAMC_SEG0_INDEX_ENH(pipe, plane, 0),
PLANE_PAL_PREC_AUTO_INCREMENT);
if (post_csc_lut) {
lut_size = 32;
for (i = 0; i < lut_size; i++) {
lut_val = drm_color_lut32_extract(post_csc_lut[i].green, 24);
intel_de_write_dsb(display, dsb,
PLANE_POST_CSC_GAMC_DATA_ENH(pipe, plane, 0),
lut_val);
}
/* Segment 2 */
do {
intel_de_write_dsb(display, dsb,
PLANE_POST_CSC_GAMC_DATA_ENH(pipe, plane, 0),
(1 << 24));
} while (i++ < 34);
} else {
/*TODO: Add for segment 0 */
lut_size = 32;
for (i = 0; i < lut_size; i++) {
u32 v = (i * ((1 << 24) - 1)) / (lut_size - 1);
intel_de_write_dsb(display, dsb,
PLANE_POST_CSC_GAMC_DATA_ENH(pipe, plane, 0), v);
}
do {
intel_de_write_dsb(display, dsb,
PLANE_POST_CSC_GAMC_DATA_ENH(pipe, plane, 0),
1 << 24);
} while (i++ < 34);
}
intel_de_write_dsb(display, dsb, PLANE_POST_CSC_GAMC_INDEX_ENH(pipe, plane, 0), 0);
intel_de_write_dsb(display, dsb,
PLANE_POST_CSC_GAMC_SEG0_INDEX_ENH(pipe, plane, 0), 0);
}
}
static void
xelpd_plane_load_luts(struct intel_dsb *dsb, const struct intel_plane_state *plane_state)
{
if (plane_state->hw.degamma_lut)
xelpd_program_plane_pre_csc_lut(dsb, plane_state);
if (plane_state->hw.gamma_lut)
xelpd_program_plane_post_csc_lut(dsb, plane_state);
}
static u32 glk_3dlut_10(const struct drm_color_lut32 *color)
{
return REG_FIELD_PREP(LUT_3D_DATA_RED_MASK, drm_color_lut32_extract(color->red, 10)) |
REG_FIELD_PREP(LUT_3D_DATA_GREEN_MASK, drm_color_lut32_extract(color->green, 10)) |
REG_FIELD_PREP(LUT_3D_DATA_BLUE_MASK, drm_color_lut32_extract(color->blue, 10));
}
static void glk_load_lut_3d(struct intel_dsb *dsb,
struct intel_crtc *crtc,
const struct drm_property_blob *blob)
{
struct intel_display *display = to_intel_display(crtc->base.dev);
const struct drm_color_lut32 *lut = blob->data;
int i, lut_size = drm_color_lut32_size(blob);
enum pipe pipe = crtc->pipe;
if (!dsb && intel_de_read(display, LUT_3D_CTL(pipe)) & LUT_3D_READY) {
drm_err(display->drm, "[CRTC:%d:%s] 3D LUT not ready, not loading LUTs\n",
crtc->base.base.id, crtc->base.name);
return;
}
intel_de_write_dsb(display, dsb, LUT_3D_INDEX(pipe), LUT_3D_AUTO_INCREMENT);
for (i = 0; i < lut_size; i++)
intel_de_write_dsb(display, dsb, LUT_3D_DATA(pipe), glk_3dlut_10(&lut[i]));
intel_de_write_dsb(display, dsb, LUT_3D_INDEX(pipe), 0);
}
static void glk_lut_3d_commit(struct intel_dsb *dsb, struct intel_crtc *crtc, bool enable)
{
struct intel_display *display = to_intel_display(crtc);
enum pipe pipe = crtc->pipe;
u32 val = 0;
if (!dsb && intel_de_read(display, LUT_3D_CTL(pipe)) & LUT_3D_READY) {
drm_err(display->drm, "[CRTC:%d:%s] 3D LUT not ready, not committing change\n",
crtc->base.base.id, crtc->base.name);
return;
}
if (enable)
val = LUT_3D_ENABLE | LUT_3D_READY | LUT_3D_BIND_PLANE_1;
intel_de_write_dsb(display, dsb, LUT_3D_CTL(pipe), val);
}
static const struct intel_color_funcs chv_color_funcs = {
.color_check = chv_color_check,
.color_commit_arm = i9xx_color_commit_arm,
@ -3883,6 +4155,8 @@ static const struct intel_color_funcs tgl_color_funcs = {
.lut_equal = icl_lut_equal,
.read_csc = icl_read_csc,
.get_config = skl_get_config,
.load_plane_csc_matrix = xelpd_load_plane_csc_matrix,
.load_plane_luts = xelpd_plane_load_luts,
};
static const struct intel_color_funcs icl_color_funcs = {
@ -3963,6 +4237,67 @@ static const struct intel_color_funcs ilk_color_funcs = {
.get_config = ilk_get_config,
};
void intel_color_plane_commit_arm(struct intel_dsb *dsb,
const struct intel_plane_state *plane_state)
{
struct intel_display *display = to_intel_display(plane_state);
struct intel_crtc *crtc = to_intel_crtc(plane_state->uapi.crtc);
if (crtc && intel_color_crtc_has_3dlut(display, crtc->pipe))
glk_lut_3d_commit(dsb, crtc, !!plane_state->hw.lut_3d);
}
static void
intel_color_load_plane_csc_matrix(struct intel_dsb *dsb,
const struct intel_plane_state *plane_state)
{
struct intel_display *display = to_intel_display(plane_state);
if (display->funcs.color->load_plane_csc_matrix)
display->funcs.color->load_plane_csc_matrix(dsb, plane_state);
}
static void
intel_color_load_plane_luts(struct intel_dsb *dsb,
const struct intel_plane_state *plane_state)
{
struct intel_display *display = to_intel_display(plane_state);
if (display->funcs.color->load_plane_luts)
display->funcs.color->load_plane_luts(dsb, plane_state);
}
bool
intel_color_crtc_has_3dlut(struct intel_display *display, enum pipe pipe)
{
if (DISPLAY_VER(display) >= 12)
return pipe == PIPE_A || pipe == PIPE_B;
else
return false;
}
static void
intel_color_load_3dlut(struct intel_dsb *dsb,
const struct intel_plane_state *plane_state)
{
struct intel_display *display = to_intel_display(plane_state);
struct intel_crtc *crtc = to_intel_crtc(plane_state->uapi.crtc);
if (crtc && intel_color_crtc_has_3dlut(display, crtc->pipe))
glk_load_lut_3d(dsb, crtc, plane_state->hw.lut_3d);
}
void intel_color_plane_program_pipeline(struct intel_dsb *dsb,
const struct intel_plane_state *plane_state)
{
if (plane_state->hw.ctm)
intel_color_load_plane_csc_matrix(dsb, plane_state);
if (plane_state->hw.degamma_lut || plane_state->hw.gamma_lut)
intel_color_load_plane_luts(dsb, plane_state);
if (plane_state->hw.lut_3d)
intel_color_load_3dlut(dsb, plane_state);
}
void intel_color_crtc_init(struct intel_crtc *crtc)
{
struct intel_display *display = to_intel_display(crtc);

View File

@ -13,7 +13,9 @@ struct intel_crtc_state;
struct intel_crtc;
struct intel_display;
struct intel_dsb;
struct intel_plane_state;
struct drm_property_blob;
enum pipe;
void intel_color_init_hooks(struct intel_display *display);
int intel_color_init(struct intel_display *display);
@ -40,5 +42,9 @@ bool intel_color_lut_equal(const struct intel_crtc_state *crtc_state,
const struct drm_property_blob *blob2,
bool is_pre_csc_lut);
void intel_color_assert_luts(const struct intel_crtc_state *crtc_state);
void intel_color_plane_program_pipeline(struct intel_dsb *dsb,
const struct intel_plane_state *plane_state);
void intel_color_plane_commit_arm(struct intel_dsb *dsb,
const struct intel_plane_state *plane_state);
bool intel_color_crtc_has_3dlut(struct intel_display *display, enum pipe pipe);
#endif /* __INTEL_COLOR_H__ */

View File

@ -0,0 +1,99 @@
// SPDX-License-Identifier: MIT
/*
* Copyright © 2025 Intel Corporation
*/
#include "intel_color.h"
#include "intel_colorop.h"
#include "intel_color_pipeline.h"
#include "intel_de.h"
#include "intel_display_types.h"
#include "skl_universal_plane.h"
#define MAX_COLOR_PIPELINES 1
#define PLANE_DEGAMMA_SIZE 128
#define PLANE_GAMMA_SIZE 32
static
int _intel_color_pipeline_plane_init(struct drm_plane *plane, struct drm_prop_enum_list *list,
enum pipe pipe)
{
struct drm_device *dev = plane->dev;
struct intel_display *display = to_intel_display(dev);
struct drm_colorop *prev_op;
struct intel_colorop *colorop;
int ret;
colorop = intel_colorop_create(INTEL_PLANE_CB_PRE_CSC_LUT);
ret = drm_plane_colorop_curve_1d_lut_init(dev, &colorop->base, plane,
PLANE_DEGAMMA_SIZE,
DRM_COLOROP_LUT1D_INTERPOLATION_LINEAR,
DRM_COLOROP_FLAG_ALLOW_BYPASS);
if (ret)
return ret;
list->type = colorop->base.base.id;
list->name = kasprintf(GFP_KERNEL, "Color Pipeline %d", colorop->base.base.id);
/* TODO: handle failures and clean up */
prev_op = &colorop->base;
if (DISPLAY_VER(display) >= 35 &&
intel_color_crtc_has_3dlut(display, pipe) &&
plane->type == DRM_PLANE_TYPE_PRIMARY) {
colorop = intel_colorop_create(INTEL_PLANE_CB_3DLUT);
ret = drm_plane_colorop_3dlut_init(dev, &colorop->base, plane, 17,
DRM_COLOROP_LUT3D_INTERPOLATION_TETRAHEDRAL,
true);
if (ret)
return ret;
drm_colorop_set_next_property(prev_op, &colorop->base);
prev_op = &colorop->base;
}
colorop = intel_colorop_create(INTEL_PLANE_CB_CSC);
ret = drm_plane_colorop_ctm_3x4_init(dev, &colorop->base, plane,
DRM_COLOROP_FLAG_ALLOW_BYPASS);
if (ret)
return ret;
drm_colorop_set_next_property(prev_op, &colorop->base);
prev_op = &colorop->base;
colorop = intel_colorop_create(INTEL_PLANE_CB_POST_CSC_LUT);
ret = drm_plane_colorop_curve_1d_lut_init(dev, &colorop->base, plane,
PLANE_GAMMA_SIZE,
DRM_COLOROP_LUT1D_INTERPOLATION_LINEAR,
DRM_COLOROP_FLAG_ALLOW_BYPASS);
if (ret)
return ret;
drm_colorop_set_next_property(prev_op, &colorop->base);
return 0;
}
int intel_color_pipeline_plane_init(struct drm_plane *plane, enum pipe pipe)
{
struct drm_device *dev = plane->dev;
struct intel_display *display = to_intel_display(dev);
struct drm_prop_enum_list pipelines[MAX_COLOR_PIPELINES];
int len = 0;
int ret;
/* Currently expose pipeline only for HDR planes */
if (!icl_is_hdr_plane(display, to_intel_plane(plane)->id))
return 0;
/* Add pipeline consisting of transfer functions */
ret = _intel_color_pipeline_plane_init(plane, &pipelines[len], pipe);
if (ret)
return ret;
len++;
return drm_plane_create_color_pipeline_property(plane, pipelines, len);
}

View File

@ -0,0 +1,14 @@
/* SPDX-License-Identifier: MIT */
/*
* Copyright © 2025 Intel Corporation
*/
#ifndef __INTEL_COLOR_PIPELINE_H__
#define __INTEL_COLOR_PIPELINE_H__
struct drm_plane;
enum pipe;
int intel_color_pipeline_plane_init(struct drm_plane *plane, enum pipe pipe);
#endif /* __INTEL_COLOR_PIPELINE_H__ */

View File

@ -316,4 +316,33 @@
#define SKL_BOTTOM_COLOR_CSC_ENABLE REG_BIT(30)
#define SKL_BOTTOM_COLOR(pipe) _MMIO_PIPE(pipe, _SKL_BOTTOM_COLOR_A, _SKL_BOTTOM_COLOR_B)
/* 3D LUT */
#define _LUT_3D_CTL_A 0x490A4
#define _LUT_3D_CTL_B 0x491A4
#define LUT_3D_CTL(pipe) _MMIO_PIPE(pipe, _LUT_3D_CTL_A, _LUT_3D_CTL_B)
#define LUT_3D_ENABLE REG_BIT(31)
#define LUT_3D_READY REG_BIT(30)
#define LUT_3D_BINDING_MASK REG_GENMASK(23, 22)
#define LUT_3D_BIND_PIPE REG_FIELD_PREP(LUT_3D_BINDING_MASK, 0)
#define LUT_3D_BIND_PLANE_1 REG_FIELD_PREP(LUT_3D_BINDING_MASK, 1)
#define LUT_3D_BIND_PLANE_2 REG_FIELD_PREP(LUT_3D_BINDING_MASK, 2)
#define LUT_3D_BIND_PLANE_3 REG_FIELD_PREP(LUT_3D_BINDING_MASK, 3)
#define _LUT_3D_INDEX_A 0x490A8
#define _LUT_3D_INDEX_B 0x491A8
#define LUT_3D_INDEX(pipe) _MMIO_PIPE(pipe, _LUT_3D_INDEX_A, _LUT_3D_INDEX_B)
#define LUT_3D_AUTO_INCREMENT REG_BIT(13)
#define LUT_3D_INDEX_VALUE_MASK REG_GENMASK(12, 0)
#define LUT_3D_INDEX_VALUE(x) REG_FIELD_PREP(LUT_3D_INDEX_VALUE_MASK, (x))
#define _LUT_3D_DATA_A 0x490AC
#define _LUT_3D_DATA_B 0x491AC
#define LUT_3D_DATA(pipe) _MMIO_PIPE(pipe, _LUT_3D_DATA_A, _LUT_3D_DATA_B)
#define LUT_3D_DATA_RED_MASK REG_GENMASK(29, 20)
#define LUT_3D_DATA_GREEN_MASK REG_GENMASK(19, 10)
#define LUT_3D_DATA_BLUE_MASK REG_GENMASK(9, 0)
#define LUT_3D_DATA_RED(x) REG_FIELD_PREP(LUT_3D_DATA_RED_MASK, (x))
#define LUT_3D_DATA_GREEN(x) REG_FIELD_PREP(LUT_3D_DATA_GREEN_MASK, (x))
#define LUT_3D_DATA_BLUE(x) REG_FIELD_PREP(LUT_3D_DATA_BLUE_MASK, (x))
#endif /* __INTEL_COLOR_REGS_H__ */

View File

@ -0,0 +1,35 @@
// SPDX-License-Identifier: MIT
/*
* Copyright © 2025 Intel Corporation
*/
#include "intel_colorop.h"
struct intel_colorop *to_intel_colorop(struct drm_colorop *colorop)
{
return container_of(colorop, struct intel_colorop, base);
}
struct intel_colorop *intel_colorop_alloc(void)
{
struct intel_colorop *colorop;
colorop = kzalloc(sizeof(*colorop), GFP_KERNEL);
if (!colorop)
return ERR_PTR(-ENOMEM);
return colorop;
}
struct intel_colorop *intel_colorop_create(enum intel_color_block id)
{
struct intel_colorop *colorop;
colorop = intel_colorop_alloc();
if (IS_ERR(colorop))
return colorop;
colorop->id = id;
return colorop;
}

View File

@ -0,0 +1,15 @@
/* SPDX-License-Identifier: MIT */
/*
* Copyright © 2025 Intel Corporation
*/
#ifndef __INTEL_COLOROP_H__
#define __INTEL_COLOROP_H__
#include "intel_display_types.h"
struct intel_colorop *to_intel_colorop(struct drm_colorop *colorop);
struct intel_colorop *intel_colorop_alloc(void);
struct intel_colorop *intel_colorop_create(enum intel_color_block id);
#endif /* __INTEL_COLOROP_H__ */

View File

@ -7304,6 +7304,7 @@ static void intel_atomic_dsb_finish(struct intel_atomic_state *state,
struct intel_display *display = to_intel_display(state);
struct intel_crtc_state *new_crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
unsigned int size = new_crtc_state->plane_color_changed ? 8192 : 1024;
if (!new_crtc_state->use_flipq &&
!new_crtc_state->use_dsb &&
@ -7314,10 +7315,12 @@ static void intel_atomic_dsb_finish(struct intel_atomic_state *state,
* Rough estimate:
* ~64 registers per each plane * 8 planes = 512
* Double that for pipe stuff and other overhead.
* ~4913 registers for 3DLUT
* ~200 color registers * 3 HDR planes
*/
new_crtc_state->dsb_commit = intel_dsb_prepare(state, crtc, INTEL_DSB_0,
new_crtc_state->use_dsb ||
new_crtc_state->use_flipq ? 1024 : 16);
new_crtc_state->use_flipq ? size : 16);
if (!new_crtc_state->dsb_commit) {
new_crtc_state->use_flipq = false;
new_crtc_state->use_dsb = false;

View File

@ -138,4 +138,13 @@ enum hpd_pin {
HPD_NUM_PINS
};
enum intel_color_block {
INTEL_PLANE_CB_PRE_CSC_LUT,
INTEL_PLANE_CB_CSC,
INTEL_PLANE_CB_POST_CSC_LUT,
INTEL_PLANE_CB_3DLUT,
INTEL_CB_MAX
};
#endif /* __INTEL_DISPLAY_LIMITS_H__ */

View File

@ -646,6 +646,7 @@ struct intel_plane_state {
enum drm_color_encoding color_encoding;
enum drm_color_range color_range;
enum drm_scaling_filter scaling_filter;
struct drm_property_blob *ctm, *degamma_lut, *gamma_lut, *lut_3d;
} hw;
struct i915_vma *ggtt_vma;
@ -1391,6 +1392,9 @@ struct intel_crtc_state {
u8 silence_period_sym_clocks;
u8 lfps_half_cycle_num_of_syms;
} alpm_state;
/* to track changes in plane color blocks */
bool plane_color_changed;
};
enum intel_pipe_crc_source {
@ -1985,6 +1989,11 @@ struct intel_dp_mst_encoder {
struct intel_connector *connector;
};
struct intel_colorop {
struct drm_colorop base;
enum intel_color_block id;
};
static inline struct intel_encoder *
intel_attached_encoder(struct intel_connector *connector)
{

View File

@ -49,6 +49,7 @@
#include "i9xx_plane_regs.h"
#include "intel_cdclk.h"
#include "intel_cursor.h"
#include "intel_colorop.h"
#include "intel_display_rps.h"
#include "intel_display_trace.h"
#include "intel_display_types.h"
@ -336,6 +337,58 @@ intel_plane_copy_uapi_plane_damage(struct intel_plane_state *new_plane_state,
*damage = drm_plane_state_src(&new_uapi_plane_state->uapi);
}
static bool
intel_plane_colorop_replace_blob(struct intel_plane_state *plane_state,
struct intel_colorop *intel_colorop,
struct drm_property_blob *blob)
{
if (intel_colorop->id == INTEL_PLANE_CB_CSC)
return drm_property_replace_blob(&plane_state->hw.ctm, blob);
else if (intel_colorop->id == INTEL_PLANE_CB_PRE_CSC_LUT)
return drm_property_replace_blob(&plane_state->hw.degamma_lut, blob);
else if (intel_colorop->id == INTEL_PLANE_CB_POST_CSC_LUT)
return drm_property_replace_blob(&plane_state->hw.gamma_lut, blob);
else if (intel_colorop->id == INTEL_PLANE_CB_3DLUT)
return drm_property_replace_blob(&plane_state->hw.lut_3d, blob);
return false;
}
static void
intel_plane_color_copy_uapi_to_hw_state(struct intel_plane_state *plane_state,
const struct intel_plane_state *from_plane_state,
struct intel_crtc *crtc)
{
struct drm_colorop *iter_colorop, *colorop;
struct drm_colorop_state *new_colorop_state;
struct drm_atomic_state *state = plane_state->uapi.state;
struct intel_colorop *intel_colorop;
struct drm_property_blob *blob;
struct intel_atomic_state *intel_atomic_state = to_intel_atomic_state(state);
struct intel_crtc_state *new_crtc_state = intel_atomic_state ?
intel_atomic_get_new_crtc_state(intel_atomic_state, crtc) : NULL;
bool changed = false;
int i = 0;
iter_colorop = plane_state->uapi.color_pipeline;
while (iter_colorop) {
for_each_new_colorop_in_state(state, colorop, new_colorop_state, i) {
if (new_colorop_state->colorop == iter_colorop) {
blob = new_colorop_state->bypass ? NULL : new_colorop_state->data;
intel_colorop = to_intel_colorop(colorop);
changed |= intel_plane_colorop_replace_blob(plane_state,
intel_colorop,
blob);
}
}
iter_colorop = iter_colorop->next;
}
if (new_crtc_state && changed)
new_crtc_state->plane_color_changed = true;
}
void intel_plane_copy_uapi_to_hw_state(struct intel_plane_state *plane_state,
const struct intel_plane_state *from_plane_state,
struct intel_crtc *crtc)
@ -364,6 +417,8 @@ void intel_plane_copy_uapi_to_hw_state(struct intel_plane_state *plane_state,
plane_state->uapi.src = drm_plane_state_src(&from_plane_state->uapi);
plane_state->uapi.dst = drm_plane_state_dest(&from_plane_state->uapi);
intel_plane_color_copy_uapi_to_hw_state(plane_state, from_plane_state, crtc);
}
void intel_plane_copy_hw_state(struct intel_plane_state *plane_state,

View File

@ -11,6 +11,8 @@
#include "pxp/intel_pxp.h"
#include "intel_bo.h"
#include "intel_color.h"
#include "intel_color_pipeline.h"
#include "intel_de.h"
#include "intel_display_irq.h"
#include "intel_display_regs.h"
@ -1275,6 +1277,18 @@ static u32 glk_plane_color_ctl(const struct intel_plane_state *plane_state)
if (plane_state->force_black)
plane_color_ctl |= PLANE_COLOR_PLANE_CSC_ENABLE;
if (plane_state->hw.degamma_lut)
plane_color_ctl |= PLANE_COLOR_PRE_CSC_GAMMA_ENABLE;
if (plane_state->hw.ctm)
plane_color_ctl |= PLANE_COLOR_PLANE_CSC_ENABLE;
if (plane_state->hw.gamma_lut) {
plane_color_ctl &= ~PLANE_COLOR_PLANE_GAMMA_DISABLE;
if (drm_color_lut32_size(plane_state->hw.gamma_lut) != 32)
plane_color_ctl |= PLANE_COLOR_POST_CSC_GAMMA_MULTSEG_ENABLE;
}
return plane_color_ctl;
}
@ -1556,6 +1570,8 @@ icl_plane_update_noarm(struct intel_dsb *dsb,
plane_color_ctl = plane_state->color_ctl |
glk_plane_color_ctl_crtc(crtc_state);
intel_color_plane_program_pipeline(dsb, plane_state);
/* The scaler will handle the output position */
if (plane_state->scaler_id >= 0) {
crtc_x = 0;
@ -1657,6 +1673,8 @@ icl_plane_update_arm(struct intel_dsb *dsb,
icl_plane_update_sel_fetch_arm(dsb, plane, crtc_state, plane_state);
intel_color_plane_commit_arm(dsb, plane_state);
/*
* In order to have FBC for fp16 formats pixel normalizer block must be
* active. Check if pixel normalizer block need to be enabled for FBC.
@ -3001,6 +3019,9 @@ skl_universal_plane_create(struct intel_display *display,
DRM_COLOR_YCBCR_BT709,
DRM_COLOR_YCBCR_LIMITED_RANGE);
if (DISPLAY_VER(display) >= 12)
intel_color_pipeline_plane_init(&plane->base, pipe);
drm_plane_create_alpha_property(&plane->base);
drm_plane_create_blend_mode_property(&plane->base,
BIT(DRM_MODE_BLEND_PIXEL_NONE) |

View File

@ -254,6 +254,8 @@
#define PLANE_COLOR_PIPE_CSC_ENABLE REG_BIT(23) /* Pre-ICL */
#define PLANE_COLOR_PLANE_CSC_ENABLE REG_BIT(21) /* ICL+ */
#define PLANE_COLOR_INPUT_CSC_ENABLE REG_BIT(20) /* ICL+ */
#define PLANE_COLOR_POST_CSC_GAMMA_MULTSEG_ENABLE REG_BIT(15) /* TGL+ */
#define PLANE_COLOR_PRE_CSC_GAMMA_ENABLE REG_BIT(14)
#define PLANE_COLOR_CSC_MODE_MASK REG_GENMASK(19, 17)
#define PLANE_COLOR_CSC_MODE_BYPASS REG_FIELD_PREP(PLANE_COLOR_CSC_MODE_MASK, 0)
#define PLANE_COLOR_CSC_MODE_YUV601_TO_RGB601 REG_FIELD_PREP(PLANE_COLOR_CSC_MODE_MASK, 1)
@ -290,6 +292,119 @@
_PLANE_INPUT_CSC_POSTOFF_HI_1_A, _PLANE_INPUT_CSC_POSTOFF_HI_1_B, \
_PLANE_INPUT_CSC_POSTOFF_HI_2_A, _PLANE_INPUT_CSC_POSTOFF_HI_2_B)
#define _MMIO_PLANE_GAMC(plane, i, a, b) _MMIO(_PIPE(plane, a, b) + (i) * 4)
#define _PLANE_POST_CSC_GAMC_SEG0_INDEX_ENH_1_A 0x70160
#define _PLANE_POST_CSC_GAMC_SEG0_INDEX_ENH_1_B 0x71160
#define _PLANE_POST_CSC_GAMC_SEG0_INDEX_ENH_2_A 0x70260
#define _PLANE_POST_CSC_GAMC_SEG0_INDEX_ENH_2_B 0x71260
#define _PLANE_POST_CSC_GAMC_SEG0_INDEX_ENH_1(pipe) _PIPE(pipe, _PLANE_POST_CSC_GAMC_SEG0_INDEX_ENH_1_A, \
_PLANE_POST_CSC_GAMC_SEG0_INDEX_ENH_1_B)
#define _PLANE_POST_CSC_GAMC_SEG0_INDEX_ENH_2(pipe) _PIPE(pipe, _PLANE_POST_CSC_GAMC_SEG0_INDEX_ENH_2_A, \
_PLANE_POST_CSC_GAMC_SEG0_INDEX_ENH_2_B)
#define PLANE_POST_CSC_GAMC_SEG0_INDEX_ENH(pipe, plane, i) _MMIO_PLANE_GAMC(plane, i, _PLANE_POST_CSC_GAMC_SEG0_INDEX_ENH_1(pipe), \
_PLANE_POST_CSC_GAMC_SEG0_INDEX_ENH_2(pipe))
#define _PLANE_POST_CSC_GAMC_SEG0_DATA_ENH_1_A 0x70164
#define _PLANE_POST_CSC_GAMC_SEG0_DATA_ENH_1_B 0x71164
#define _PLANE_POST_CSC_GAMC_SEG0_DATA_ENH_2_A 0x70264
#define _PLANE_POST_CSC_GAMC_SEG0_DATA_ENH_2_B 0x71264
#define _PLANE_POST_CSC_GAMC_SEG0_DATA_ENH_1(pipe) _PIPE(pipe, _PLANE_POST_CSC_GAMC_SEG0_DATA_ENH_1_A, \
_PLANE_POST_CSC_GAMC_SEG0_DATA_ENH_1_B)
#define _PLANE_POST_CSC_GAMC_SEG0_DATA_ENH_2(pipe) _PIPE(pipe, _PLANE_POST_CSC_GAMC_SEG0_DATA_ENH_2_A, \
_PLANE_POST_CSC_GAMC_SEG0_DATA_ENH_2_B)
#define PLANE_POST_CSC_GAMC_SEG0_DATA_ENH(pipe, plane, i) _MMIO_PLANE_GAMC(plane, i, _PLANE_POST_CSC_GAMC_SEG0_DATA_ENH_1(pipe), \
_PLANE_POST_CSC_GAMC_SEG0_DATA_ENH_2(pipe))
#define _PLANE_POST_CSC_GAMC_INDEX_ENH_1_A 0x701d8
#define _PLANE_POST_CSC_GAMC_INDEX_ENH_1_B 0x711d8
#define _PLANE_POST_CSC_GAMC_INDEX_ENH_2_A 0x702d8
#define _PLANE_POST_CSC_GAMC_INDEX_ENH_2_B 0x712d8
#define _PLANE_POST_CSC_GAMC_INDEX_ENH_1(pipe) _PIPE(pipe, _PLANE_POST_CSC_GAMC_INDEX_ENH_1_A, \
_PLANE_POST_CSC_GAMC_INDEX_ENH_1_B)
#define _PLANE_POST_CSC_GAMC_INDEX_ENH_2(pipe) _PIPE(pipe, _PLANE_POST_CSC_GAMC_INDEX_ENH_2_A, \
_PLANE_POST_CSC_GAMC_INDEX_ENH_2_B)
#define PLANE_POST_CSC_GAMC_INDEX_ENH(pipe, plane, i) _MMIO_PLANE_GAMC(plane, i, _PLANE_POST_CSC_GAMC_INDEX_ENH_1(pipe), \
_PLANE_POST_CSC_GAMC_INDEX_ENH_2(pipe))
#define _PLANE_POST_CSC_GAMC_DATA_ENH_1_A 0x701dc
#define _PLANE_POST_CSC_GAMC_DATA_ENH_1_B 0x711dc
#define _PLANE_POST_CSC_GAMC_DATA_ENH_2_A 0x702dc
#define _PLANE_POST_CSC_GAMC_DATA_ENH_2_B 0x712dc
#define _PLANE_POST_CSC_GAMC_DATA_ENH_1(pipe) _PIPE(pipe, _PLANE_POST_CSC_GAMC_DATA_ENH_1_A, \
_PLANE_POST_CSC_GAMC_DATA_ENH_1_B)
#define _PLANE_POST_CSC_GAMC_DATA_ENH_2(pipe) _PIPE(pipe, _PLANE_POST_CSC_GAMC_DATA_ENH_2_A, \
_PLANE_POST_CSC_GAMC_DATA_ENH_2_B)
#define PLANE_POST_CSC_GAMC_DATA_ENH(pipe, plane, i) _MMIO_PLANE_GAMC(plane, i, _PLANE_POST_CSC_GAMC_DATA_ENH_1(pipe), \
_PLANE_POST_CSC_GAMC_DATA_ENH_2(pipe))
#define _PLANE_POST_CSC_GAMC_INDEX_1_A 0x704d8
#define _PLANE_POST_CSC_GAMC_INDEX_1_B 0x714d8
#define _PLANE_POST_CSC_GAMC_INDEX_2_A 0x705d8
#define _PLANE_POST_CSC_GAMC_INDEX_2_B 0x715d8
#define _PLANE_POST_CSC_GAMC_INDEX_1(pipe) _PIPE(pipe, _PLANE_POST_CSC_GAMC_INDEX_1_A, \
_PLANE_POST_CSC_GAMC_INDEX_1_B)
#define _PLANE_POST_CSC_GAMC_INDEX_2(pipe) _PIPE(pipe, _PLANE_POST_CSC_GAMC_INDEX_2_A, \
_PLANE_POST_CSC_GAMC_INDEX_2_B)
#define PLANE_POST_CSC_GAMC_INDEX(pipe, plane, i) _MMIO_PLANE_GAMC(plane, i, _PLANE_POST_CSC_GAMC_INDEX_1(pipe), \
_PLANE_POST_CSC_GAMC_INDEX_2(pipe))
#define _PLANE_POST_CSC_GAMC_DATA_1_A 0x704dc
#define _PLANE_POST_CSC_GAMC_DATA_1_B 0x714dc
#define _PLANE_POST_CSC_GAMC_DATA_2_A 0x705dc
#define _PLANE_POST_CSC_GAMC_DATA_2_B 0x715dc
#define _PLANE_POST_CSC_GAMC_DATA_1(pipe) _PIPE(pipe, _PLANE_POST_CSC_GAMC_DATA_1_A, \
_PLANE_POST_CSC_GAMC_DATA_1_B)
#define _PLANE_POST_CSC_GAMC_DATA_2(pipe) _PIPE(pipe, _PLANE_POST_CSC_GAMC_DATA_2_A, \
_PLANE_POST_CSC_GAMC_DATA_2_B)
#define PLANE_POST_CSC_GAMC_DATA(pipe, plane, i) _MMIO_PLANE_GAMC(plane, i, _PLANE_POST_CSC_GAMC_DATA_1(pipe), \
_PLANE_POST_CSC_GAMC_DATA_2(pipe))
#define _PLANE_PRE_CSC_GAMC_INDEX_ENH_1_A 0x701d0
#define _PLANE_PRE_CSC_GAMC_INDEX_ENH_1_B 0x711d0
#define _PLANE_PRE_CSC_GAMC_INDEX_ENH_2_A 0x702d0
#define _PLANE_PRE_CSC_GAMC_INDEX_ENH_2_B 0x712d0
#define _PLANE_PRE_CSC_GAMC_INDEX_ENH_1(pipe) _PIPE(pipe, _PLANE_PRE_CSC_GAMC_INDEX_ENH_1_A, \
_PLANE_PRE_CSC_GAMC_INDEX_ENH_1_B)
#define _PLANE_PRE_CSC_GAMC_INDEX_ENH_2(pipe) _PIPE(pipe, _PLANE_PRE_CSC_GAMC_INDEX_ENH_2_A, \
_PLANE_PRE_CSC_GAMC_INDEX_ENH_2_B)
#define PLANE_PRE_CSC_GAMC_INDEX_ENH(pipe, plane, i) _MMIO_PLANE_GAMC(plane, i, _PLANE_PRE_CSC_GAMC_INDEX_ENH_1(pipe), \
_PLANE_PRE_CSC_GAMC_INDEX_ENH_2(pipe))
#define PLANE_PAL_PREC_AUTO_INCREMENT REG_BIT(10)
#define _PLANE_PRE_CSC_GAMC_DATA_ENH_1_A 0x701d4
#define _PLANE_PRE_CSC_GAMC_DATA_ENH_1_B 0x711d4
#define _PLANE_PRE_CSC_GAMC_DATA_ENH_2_A 0x702d4
#define _PLANE_PRE_CSC_GAMC_DATA_ENH_2_B 0x712d4
#define _PLANE_PRE_CSC_GAMC_DATA_ENH_1(pipe) _PIPE(pipe, _PLANE_PRE_CSC_GAMC_DATA_ENH_1_A, \
_PLANE_PRE_CSC_GAMC_DATA_ENH_1_B)
#define _PLANE_PRE_CSC_GAMC_DATA_ENH_2(pipe) _PIPE(pipe, _PLANE_PRE_CSC_GAMC_DATA_ENH_2_A, \
_PLANE_PRE_CSC_GAMC_DATA_ENH_2_B)
#define PLANE_PRE_CSC_GAMC_DATA_ENH(pipe, plane, i) _MMIO_PLANE_GAMC(plane, i, _PLANE_PRE_CSC_GAMC_DATA_ENH_1(pipe), \
_PLANE_PRE_CSC_GAMC_DATA_ENH_2(pipe))
#define _PLANE_PRE_CSC_GAMC_INDEX_1_A 0x704d0
#define _PLANE_PRE_CSC_GAMC_INDEX_1_B 0x714d0
#define _PLANE_PRE_CSC_GAMC_INDEX_2_A 0x705d0
#define _PLANE_PRE_CSC_GAMC_INDEX_2_B 0x715d0
#define _PLANE_PRE_CSC_GAMC_INDEX_1(pipe) _PIPE(pipe, _PLANE_PRE_CSC_GAMC_INDEX_1_A, \
_PLANE_PRE_CSC_GAMC_INDEX_1_B)
#define _PLANE_PRE_CSC_GAMC_INDEX_2(pipe) _PIPE(pipe, _PLANE_PRE_CSC_GAMC_INDEX_2_A, \
_PLANE_PRE_CSC_GAMC_INDEX_2_B)
#define PLANE_PRE_CSC_GAMC_INDEX(pipe, plane, i) _MMIO_PLANE_GAMC(plane, i, _PLANE_PRE_CSC_GAMC_INDEX_1(pipe), \
_PLANE_PRE_CSC_GAMC_INDEX_2(pipe))
#define _PLANE_PRE_CSC_GAMC_DATA_1_A 0x704d4
#define _PLANE_PRE_CSC_GAMC_DATA_1_B 0x714d4
#define _PLANE_PRE_CSC_GAMC_DATA_2_A 0x705d4
#define _PLANE_PRE_CSC_GAMC_DATA_2_B 0x715d4
#define _PLANE_PRE_CSC_GAMC_DATA_1(pipe) _PIPE(pipe, _PLANE_PRE_CSC_GAMC_DATA_1_A, \
_PLANE_PRE_CSC_GAMC_DATA_1_B)
#define _PLANE_PRE_CSC_GAMC_DATA_2(pipe) _PIPE(pipe, _PLANE_PRE_CSC_GAMC_DATA_2_A, \
_PLANE_PRE_CSC_GAMC_DATA_2_B)
#define PLANE_PRE_CSC_GAMC_DATA(pipe, plane, i) _MMIO_PLANE_GAMC(plane, i, _PLANE_PRE_CSC_GAMC_DATA_1(pipe), \
_PLANE_PRE_CSC_GAMC_DATA_2(pipe))
#define _PLANE_CSC_RY_GY_1_A 0x70210
#define _PLANE_CSC_RY_GY_2_A 0x70310
#define _PLANE_CSC_RY_GY_1_B 0x71210

View File

@ -184,6 +184,10 @@ xe-$(CONFIG_PCI_IOV) += \
xe_sriov_pf_sysfs.o \
xe_tile_sriov_pf_debugfs.o
ifdef CONFIG_XE_VFIO_PCI
xe-$(CONFIG_PCI_IOV) += xe_sriov_vfio.o
endif
# include helpers for tests even when XE is built-in
ifdef CONFIG_DRM_XE_KUNIT_TEST
xe-y += tests/xe_kunit_helpers.o
@ -242,6 +246,8 @@ xe-$(CONFIG_DRM_XE_DISPLAY) += \
i915-display/intel_cdclk.o \
i915-display/intel_cmtg.o \
i915-display/intel_color.o \
i915-display/intel_colorop.o \
i915-display/intel_color_pipeline.o \
i915-display/intel_combo_phy.o \
i915-display/intel_connector.o \
i915-display/intel_crtc.o \

View File

@ -54,13 +54,14 @@ static inline void xe_sched_tdr_queue_imm(struct xe_gpu_scheduler *sched)
static inline void xe_sched_resubmit_jobs(struct xe_gpu_scheduler *sched)
{
struct drm_sched_job *s_job;
bool restore_replay = false;
list_for_each_entry(s_job, &sched->base.pending_list, list) {
struct drm_sched_fence *s_fence = s_job->s_fence;
struct dma_fence *hw_fence = s_fence->parent;
if (to_xe_sched_job(s_job)->skip_emit ||
(hw_fence && !dma_fence_is_signaled(hw_fence)))
restore_replay |= to_xe_sched_job(s_job)->restore_replay;
if (restore_replay || (hw_fence && !dma_fence_is_signaled(hw_fence)))
sched->base.ops->run_job(s_job);
}
}

View File

@ -711,7 +711,7 @@ static u64 pf_profile_fair_ggtt(struct xe_gt *gt, unsigned int num_vfs)
if (num_vfs > 56)
return SZ_64M - SZ_8M;
return rounddown_pow_of_two(shareable / num_vfs);
return rounddown_pow_of_two(div_u64(shareable, num_vfs));
}
/**

View File

@ -17,6 +17,7 @@
#include "xe_gt_sriov_pf_helpers.h"
#include "xe_gt_sriov_pf_migration.h"
#include "xe_gt_sriov_printk.h"
#include "xe_guc.h"
#include "xe_guc_buf.h"
#include "xe_guc_ct.h"
#include "xe_migrate.h"
@ -1023,6 +1024,12 @@ static void action_ring_cleanup(void *arg)
ptr_ring_cleanup(r, destroy_pf_packet);
}
static void pf_gt_migration_check_support(struct xe_gt *gt)
{
if (GUC_FIRMWARE_VER(&gt->uc.guc) < MAKE_GUC_VER(70, 54, 0))
xe_sriov_pf_migration_disable(gt_to_xe(gt), "requires GuC version >= 70.54.0");
}
/**
* xe_gt_sriov_pf_migration_init() - Initialize support for VF migration.
* @gt: the &xe_gt
@ -1039,6 +1046,8 @@ int xe_gt_sriov_pf_migration_init(struct xe_gt *gt)
xe_gt_assert(gt, IS_SRIOV_PF(xe));
pf_gt_migration_check_support(gt);
if (!pf_migration_supported(gt))
return 0;

View File

@ -822,7 +822,7 @@ static void submit_exec_queue(struct xe_exec_queue *q, struct xe_sched_job *job)
xe_gt_assert(guc_to_gt(guc), exec_queue_registered(q));
if (!job->skip_emit || job->last_replay) {
if (!job->restore_replay || job->last_replay) {
if (xe_exec_queue_is_parallel(q))
wq_item_append(q);
else
@ -881,10 +881,10 @@ guc_exec_queue_run_job(struct drm_sched_job *drm_job)
if (!killed_or_banned_or_wedged && !xe_sched_job_is_error(job)) {
if (!exec_queue_registered(q))
register_exec_queue(q, GUC_CONTEXT_NORMAL);
if (!job->skip_emit)
if (!job->restore_replay)
q->ring_ops->emit_job(job);
submit_exec_queue(q, job);
job->skip_emit = false;
job->restore_replay = false;
}
/*
@ -2112,6 +2112,18 @@ static void guc_exec_queue_revert_pending_state_change(struct xe_guc *guc,
q->guc->resume_time = 0;
}
static void lrc_parallel_clear(struct xe_lrc *lrc)
{
struct xe_device *xe = gt_to_xe(lrc->gt);
struct iosys_map map = xe_lrc_parallel_map(lrc);
int i;
for (i = 0; i < WQ_SIZE / sizeof(u32); ++i)
parallel_write(xe, map, wq[i],
FIELD_PREP(WQ_TYPE_MASK, WQ_TYPE_NOOP) |
FIELD_PREP(WQ_LEN_MASK, 0));
}
/*
* This function is quite complex but only real way to ensure no state is lost
* during VF resume flows. The function scans the queue state, make adjustments
@ -2135,8 +2147,8 @@ static void guc_exec_queue_pause(struct xe_guc *guc, struct xe_exec_queue *q)
guc_exec_queue_revert_pending_state_change(guc, q);
if (xe_exec_queue_is_parallel(q)) {
struct xe_device *xe = guc_to_xe(guc);
struct iosys_map map = xe_lrc_parallel_map(q->lrc[0]);
/* Pairs with WRITE_ONCE in __xe_exec_queue_init */
struct xe_lrc *lrc = READ_ONCE(q->lrc[0]);
/*
* NOP existing WQ commands that may contain stale GGTT
@ -2144,14 +2156,14 @@ static void guc_exec_queue_pause(struct xe_guc *guc, struct xe_exec_queue *q)
* seems to get confused if the WQ head/tail pointers are
* adjusted.
*/
for (i = 0; i < WQ_SIZE / sizeof(u32); ++i)
parallel_write(xe, map, wq[i],
FIELD_PREP(WQ_TYPE_MASK, WQ_TYPE_NOOP) |
FIELD_PREP(WQ_LEN_MASK, 0));
if (lrc)
lrc_parallel_clear(lrc);
}
job = xe_sched_first_pending_job(sched);
if (job) {
job->restore_replay = true;
/*
* Adjust software tail so jobs submitted overwrite previous
* position in ring buffer with new GGTT addresses.
@ -2241,17 +2253,18 @@ static void guc_exec_queue_unpause_prepare(struct xe_guc *guc,
struct xe_exec_queue *q)
{
struct xe_gpu_scheduler *sched = &q->guc->sched;
struct drm_sched_job *s_job;
struct xe_sched_job *job = NULL;
bool restore_replay = false;
list_for_each_entry(s_job, &sched->base.pending_list, list) {
job = to_xe_sched_job(s_job);
list_for_each_entry(job, &sched->base.pending_list, drm.list) {
restore_replay |= job->restore_replay;
if (restore_replay) {
xe_gt_dbg(guc_to_gt(guc), "Replay JOB - guc_id=%d, seqno=%d",
q->guc->id, xe_sched_job_seqno(job));
xe_gt_dbg(guc_to_gt(guc), "Replay JOB - guc_id=%d, seqno=%d",
q->guc->id, xe_sched_job_seqno(job));
q->ring_ops->emit_job(job);
job->skip_emit = true;
q->ring_ops->emit_job(job);
job->restore_replay = true;
}
}
if (job)

View File

@ -102,7 +102,6 @@ static int xe_pagefault_handle_vma(struct xe_gt *gt, struct xe_vma *vma,
/* Lock VM and BOs dma-resv */
xe_validation_ctx_init(&ctx, &vm->xe->val, &exec, (struct xe_val_flags) {});
drm_exec_init(&exec, 0, 0);
drm_exec_until_all_locked(&exec) {
err = xe_pagefault_begin(&exec, vma, tile->mem.vram,
needs_vram == 1);

View File

@ -1223,6 +1223,23 @@ static struct pci_driver xe_pci_driver = {
#endif
};
/**
* xe_pci_to_pf_device() - Get PF &xe_device.
* @pdev: the VF &pci_dev device
*
* Return: pointer to PF &xe_device, NULL otherwise.
*/
struct xe_device *xe_pci_to_pf_device(struct pci_dev *pdev)
{
struct drm_device *drm;
drm = pci_iov_get_pf_drvdata(pdev, &xe_pci_driver);
if (IS_ERR(drm))
return NULL;
return to_xe_device(drm);
}
int xe_register_pci_driver(void)
{
return pci_register_driver(&xe_pci_driver);

View File

@ -6,7 +6,10 @@
#ifndef _XE_PCI_H_
#define _XE_PCI_H_
struct pci_dev;
int xe_register_pci_driver(void);
void xe_unregister_pci_driver(void);
struct xe_device *xe_pci_to_pf_device(struct pci_dev *pdev);
#endif

View File

@ -726,6 +726,13 @@ static void xe_pm_runtime_lockdep_prime(void)
/**
* xe_pm_runtime_get - Get a runtime_pm reference and resume synchronously
* @xe: xe device instance
*
* When possible, scope-based runtime PM (through guard(xe_pm_runtime)) is
* be preferred over direct usage of this function. Manual get/put handling
* should only be used when the function contains goto-based logic which
* can break scope-based handling, or when the lifetime of the runtime PM
* reference does not match a specific scope (e.g., runtime PM obtained in one
* function and released in a different one).
*/
void xe_pm_runtime_get(struct xe_device *xe)
{
@ -758,6 +765,13 @@ void xe_pm_runtime_put(struct xe_device *xe)
* xe_pm_runtime_get_ioctl - Get a runtime_pm reference before ioctl
* @xe: xe device instance
*
* When possible, scope-based runtime PM (through
* ACQUIRE(xe_pm_runtime_ioctl, ...)) is be preferred over direct usage of this
* function. Manual get/put handling should only be used when the function
* contains goto-based logic which can break scope-based handling, or when the
* lifetime of the runtime PM reference does not match a specific scope (e.g.,
* runtime PM obtained in one function and released in a different one).
*
* Returns: Any number greater than or equal to 0 for success, negative error
* code otherwise.
*/
@ -827,6 +841,13 @@ static bool xe_pm_suspending_or_resuming(struct xe_device *xe)
* It will warn if not protected.
* The reference should be put back after this function regardless, since it
* will always bump the usage counter, regardless.
*
* When possible, scope-based runtime PM (through guard(xe_pm_runtime_noresume))
* is be preferred over direct usage of this function. Manual get/put handling
* should only be used when the function contains goto-based logic which can
* break scope-based handling, or when the lifetime of the runtime PM reference
* does not match a specific scope (e.g., runtime PM obtained in one function
* and released in a different one).
*/
void xe_pm_runtime_get_noresume(struct xe_device *xe)
{

View File

@ -6,6 +6,7 @@
#ifndef _XE_PM_H_
#define _XE_PM_H_
#include <linux/cleanup.h>
#include <linux/pm_runtime.h>
#define DEFAULT_VRAM_THRESHOLD 300 /* in MB */
@ -37,4 +38,20 @@ int xe_pm_block_on_suspend(struct xe_device *xe);
void xe_pm_might_block_on_suspend(void);
int xe_pm_module_init(void);
static inline void __xe_pm_runtime_noop(struct xe_device *xe) {}
DEFINE_GUARD(xe_pm_runtime, struct xe_device *,
xe_pm_runtime_get(_T), xe_pm_runtime_put(_T))
DEFINE_GUARD(xe_pm_runtime_noresume, struct xe_device *,
xe_pm_runtime_get_noresume(_T), xe_pm_runtime_put(_T))
DEFINE_GUARD_COND(xe_pm_runtime, _ioctl, xe_pm_runtime_get_ioctl(_T), _RET >= 0)
/*
* Used when a function needs to release runtime PM in all possible cases
* and error paths, but the wakeref was already acquired by a different
* function (i.e., get() has already happened so only a put() is needed).
*/
DEFINE_GUARD(xe_pm_runtime_release_only, struct xe_device *,
__xe_pm_runtime_noop(_T), xe_pm_runtime_put(_T));
#endif

View File

@ -63,8 +63,8 @@ struct xe_sched_job {
bool ring_ops_flush_tlb;
/** @ggtt: mapped in ggtt. */
bool ggtt;
/** @skip_emit: skip emitting the job */
bool skip_emit;
/** @restore_replay: job being replayed for restore */
bool restore_replay;
/** @last_replay: last job being replayed */
bool last_replay;
/** @ptrs: per instance pointers. */

View File

@ -46,13 +46,37 @@ bool xe_sriov_pf_migration_supported(struct xe_device *xe)
{
xe_assert(xe, IS_SRIOV_PF(xe));
return xe->sriov.pf.migration.supported;
return IS_ENABLED(CONFIG_DRM_XE_DEBUG) || !xe->sriov.pf.migration.disabled;
}
static bool pf_check_migration_support(struct xe_device *xe)
/**
* xe_sriov_pf_migration_disable() - Turn off SR-IOV VF migration support on PF.
* @xe: the &xe_device instance.
* @fmt: format string for the log message, to be combined with following VAs.
*/
void xe_sriov_pf_migration_disable(struct xe_device *xe, const char *fmt, ...)
{
/* XXX: for now this is for feature enabling only */
return IS_ENABLED(CONFIG_DRM_XE_DEBUG);
struct va_format vaf;
va_list va_args;
xe_assert(xe, IS_SRIOV_PF(xe));
va_start(va_args, fmt);
vaf.fmt = fmt;
vaf.va = &va_args;
xe_sriov_notice(xe, "migration %s: %pV\n",
IS_ENABLED(CONFIG_DRM_XE_DEBUG) ?
"missing prerequisite" : "disabled",
&vaf);
va_end(va_args);
xe->sriov.pf.migration.disabled = true;
}
static void pf_migration_check_support(struct xe_device *xe)
{
if (!xe_device_has_memirq(xe))
xe_sriov_pf_migration_disable(xe, "requires memory-based IRQ support");
}
static void pf_migration_cleanup(void *arg)
@ -77,7 +101,8 @@ int xe_sriov_pf_migration_init(struct xe_device *xe)
xe_assert(xe, IS_SRIOV_PF(xe));
xe->sriov.pf.migration.supported = pf_check_migration_support(xe);
pf_migration_check_support(xe);
if (!xe_sriov_pf_migration_supported(xe))
return 0;

View File

@ -14,6 +14,7 @@ struct xe_sriov_packet;
int xe_sriov_pf_migration_init(struct xe_device *xe);
bool xe_sriov_pf_migration_supported(struct xe_device *xe);
void xe_sriov_pf_migration_disable(struct xe_device *xe, const char *fmt, ...);
int xe_sriov_pf_migration_restore_produce(struct xe_device *xe, unsigned int vfid,
struct xe_sriov_packet *data);
struct xe_sriov_packet *

View File

@ -14,8 +14,8 @@
* struct xe_sriov_pf_migration - Xe device level VF migration data
*/
struct xe_sriov_pf_migration {
/** @supported: indicates whether VF migration feature is supported */
bool supported;
/** @disabled: indicates whether VF migration feature is disabled */
bool disabled;
};
/**

View File

@ -0,0 +1,80 @@
// SPDX-License-Identifier: MIT
/*
* Copyright © 2025 Intel Corporation
*/
#include <drm/intel/xe_sriov_vfio.h>
#include <linux/cleanup.h>
#include "xe_pci.h"
#include "xe_pm.h"
#include "xe_sriov_pf_control.h"
#include "xe_sriov_pf_helpers.h"
#include "xe_sriov_pf_migration.h"
struct xe_device *xe_sriov_vfio_get_pf(struct pci_dev *pdev)
{
return xe_pci_to_pf_device(pdev);
}
EXPORT_SYMBOL_FOR_MODULES(xe_sriov_vfio_get_pf, "xe-vfio-pci");
bool xe_sriov_vfio_migration_supported(struct xe_device *xe)
{
if (!IS_SRIOV_PF(xe))
return -EPERM;
return xe_sriov_pf_migration_supported(xe);
}
EXPORT_SYMBOL_FOR_MODULES(xe_sriov_vfio_migration_supported, "xe-vfio-pci");
#define DEFINE_XE_SRIOV_VFIO_FUNCTION(_type, _func, _impl) \
_type xe_sriov_vfio_##_func(struct xe_device *xe, unsigned int vfid) \
{ \
if (!IS_SRIOV_PF(xe)) \
return -EPERM; \
if (vfid == PFID || vfid > xe_sriov_pf_num_vfs(xe)) \
return -EINVAL; \
\
guard(xe_pm_runtime_noresume)(xe); \
\
return xe_sriov_pf_##_impl(xe, vfid); \
} \
EXPORT_SYMBOL_FOR_MODULES(xe_sriov_vfio_##_func, "xe-vfio-pci")
DEFINE_XE_SRIOV_VFIO_FUNCTION(int, wait_flr_done, control_wait_flr);
DEFINE_XE_SRIOV_VFIO_FUNCTION(int, suspend_device, control_pause_vf);
DEFINE_XE_SRIOV_VFIO_FUNCTION(int, resume_device, control_resume_vf);
DEFINE_XE_SRIOV_VFIO_FUNCTION(int, stop_copy_enter, control_trigger_save_vf);
DEFINE_XE_SRIOV_VFIO_FUNCTION(int, stop_copy_exit, control_finish_save_vf);
DEFINE_XE_SRIOV_VFIO_FUNCTION(int, resume_data_enter, control_trigger_restore_vf);
DEFINE_XE_SRIOV_VFIO_FUNCTION(int, resume_data_exit, control_finish_restore_vf);
DEFINE_XE_SRIOV_VFIO_FUNCTION(int, error, control_stop_vf);
DEFINE_XE_SRIOV_VFIO_FUNCTION(ssize_t, stop_copy_size, migration_size);
ssize_t xe_sriov_vfio_data_read(struct xe_device *xe, unsigned int vfid,
char __user *buf, size_t len)
{
if (!IS_SRIOV_PF(xe))
return -EPERM;
if (vfid == PFID || vfid > xe_sriov_pf_num_vfs(xe))
return -EINVAL;
guard(xe_pm_runtime_noresume)(xe);
return xe_sriov_pf_migration_read(xe, vfid, buf, len);
}
EXPORT_SYMBOL_FOR_MODULES(xe_sriov_vfio_data_read, "xe-vfio-pci");
ssize_t xe_sriov_vfio_data_write(struct xe_device *xe, unsigned int vfid,
const char __user *buf, size_t len)
{
if (!IS_SRIOV_PF(xe))
return -EPERM;
if (vfid == PFID || vfid > xe_sriov_pf_num_vfs(xe))
return -EINVAL;
guard(xe_pm_runtime_noresume)(xe);
return xe_sriov_pf_migration_write(xe, vfid, buf, len);
}
EXPORT_SYMBOL_FOR_MODULES(xe_sriov_vfio_data_write, "xe-vfio-pci");

View File

@ -70,4 +70,6 @@ source "drivers/vfio/pci/nvgrace-gpu/Kconfig"
source "drivers/vfio/pci/qat/Kconfig"
source "drivers/vfio/pci/xe/Kconfig"
endmenu

View File

@ -20,3 +20,5 @@ obj-$(CONFIG_VIRTIO_VFIO_PCI) += virtio/
obj-$(CONFIG_NVGRACE_GPU_VFIO_PCI) += nvgrace-gpu/
obj-$(CONFIG_QAT_VFIO_PCI) += qat/
obj-$(CONFIG_XE_VFIO_PCI) += xe/

View File

@ -0,0 +1,12 @@
# SPDX-License-Identifier: GPL-2.0-only
config XE_VFIO_PCI
tristate "VFIO support for Intel Graphics"
depends on DRM_XE && PCI_IOV
select VFIO_PCI_CORE
help
This option enables device specific VFIO driver variant for Intel Graphics.
In addition to generic VFIO PCI functionality, it implements VFIO
migration uAPI allowing userspace to enable migration for
Intel Graphics SR-IOV Virtual Functions supported by the Xe driver.
If you don't know what to do here, say N.

View File

@ -0,0 +1,3 @@
# SPDX-License-Identifier: GPL-2.0-only
obj-$(CONFIG_XE_VFIO_PCI) += xe-vfio-pci.o
xe-vfio-pci-y := main.o

573
drivers/vfio/pci/xe/main.c Normal file
View File

@ -0,0 +1,573 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright © 2025 Intel Corporation
*/
#include <linux/anon_inodes.h>
#include <linux/delay.h>
#include <linux/file.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/sizes.h>
#include <linux/types.h>
#include <linux/vfio.h>
#include <linux/vfio_pci_core.h>
#include <drm/intel/xe_sriov_vfio.h>
#include <drm/intel/pciids.h>
struct xe_vfio_pci_migration_file {
struct file *filp;
/* serializes accesses to migration data */
struct mutex lock;
struct xe_vfio_pci_core_device *xe_vdev;
u8 disabled:1;
};
struct xe_vfio_pci_core_device {
struct vfio_pci_core_device core_device;
struct xe_device *xe;
/* PF internal control uses vfid index starting from 1 */
unsigned int vfid;
u8 deferred_reset:1;
/* protects migration state */
struct mutex state_mutex;
enum vfio_device_mig_state mig_state;
/* protects the reset_done flow */
spinlock_t reset_lock;
struct xe_vfio_pci_migration_file *migf;
};
#define xe_vdev_to_dev(xe_vdev) (&(xe_vdev)->core_device.pdev->dev)
static void xe_vfio_pci_disable_file(struct xe_vfio_pci_migration_file *migf)
{
mutex_lock(&migf->lock);
migf->disabled = true;
mutex_unlock(&migf->lock);
}
static void xe_vfio_pci_put_file(struct xe_vfio_pci_core_device *xe_vdev)
{
xe_vfio_pci_disable_file(xe_vdev->migf);
fput(xe_vdev->migf->filp);
xe_vdev->migf = NULL;
}
static void xe_vfio_pci_reset(struct xe_vfio_pci_core_device *xe_vdev)
{
if (xe_vdev->migf)
xe_vfio_pci_put_file(xe_vdev);
xe_vdev->mig_state = VFIO_DEVICE_STATE_RUNNING;
}
static void xe_vfio_pci_state_mutex_lock(struct xe_vfio_pci_core_device *xe_vdev)
{
mutex_lock(&xe_vdev->state_mutex);
}
/*
* This function is called in all state_mutex unlock cases to
* handle a 'deferred_reset' if exists.
*/
static void xe_vfio_pci_state_mutex_unlock(struct xe_vfio_pci_core_device *xe_vdev)
{
again:
spin_lock(&xe_vdev->reset_lock);
if (xe_vdev->deferred_reset) {
xe_vdev->deferred_reset = false;
spin_unlock(&xe_vdev->reset_lock);
xe_vfio_pci_reset(xe_vdev);
goto again;
}
mutex_unlock(&xe_vdev->state_mutex);
spin_unlock(&xe_vdev->reset_lock);
}
static void xe_vfio_pci_reset_done(struct pci_dev *pdev)
{
struct xe_vfio_pci_core_device *xe_vdev = pci_get_drvdata(pdev);
int ret;
if (!pdev->is_virtfn)
return;
/*
* VF FLR requires additional processing done by PF driver.
* The processing is done after FLR is already finished from PCIe
* perspective.
* In order to avoid a scenario where VF is used while PF processing
* is still in progress, additional synchronization point is needed.
*/
ret = xe_sriov_vfio_wait_flr_done(xe_vdev->xe, xe_vdev->vfid);
if (ret)
dev_err(&pdev->dev, "Failed to wait for FLR: %d\n", ret);
if (!xe_vdev->vfid)
return;
/*
* As the higher VFIO layers are holding locks across reset and using
* those same locks with the mm_lock we need to prevent ABBA deadlock
* with the state_mutex and mm_lock.
* In case the state_mutex was taken already we defer the cleanup work
* to the unlock flow of the other running context.
*/
spin_lock(&xe_vdev->reset_lock);
xe_vdev->deferred_reset = true;
if (!mutex_trylock(&xe_vdev->state_mutex)) {
spin_unlock(&xe_vdev->reset_lock);
return;
}
spin_unlock(&xe_vdev->reset_lock);
xe_vfio_pci_state_mutex_unlock(xe_vdev);
xe_vfio_pci_reset(xe_vdev);
}
static const struct pci_error_handlers xe_vfio_pci_err_handlers = {
.reset_done = xe_vfio_pci_reset_done,
.error_detected = vfio_pci_core_aer_err_detected,
};
static int xe_vfio_pci_open_device(struct vfio_device *core_vdev)
{
struct xe_vfio_pci_core_device *xe_vdev =
container_of(core_vdev, struct xe_vfio_pci_core_device, core_device.vdev);
struct vfio_pci_core_device *vdev = &xe_vdev->core_device;
int ret;
ret = vfio_pci_core_enable(vdev);
if (ret)
return ret;
xe_vdev->mig_state = VFIO_DEVICE_STATE_RUNNING;
vfio_pci_core_finish_enable(vdev);
return 0;
}
static void xe_vfio_pci_close_device(struct vfio_device *core_vdev)
{
struct xe_vfio_pci_core_device *xe_vdev =
container_of(core_vdev, struct xe_vfio_pci_core_device, core_device.vdev);
xe_vfio_pci_state_mutex_lock(xe_vdev);
xe_vfio_pci_reset(xe_vdev);
xe_vfio_pci_state_mutex_unlock(xe_vdev);
vfio_pci_core_close_device(core_vdev);
}
static int xe_vfio_pci_release_file(struct inode *inode, struct file *filp)
{
struct xe_vfio_pci_migration_file *migf = filp->private_data;
mutex_destroy(&migf->lock);
kfree(migf);
return 0;
}
static ssize_t xe_vfio_pci_save_read(struct file *filp, char __user *buf, size_t len, loff_t *pos)
{
struct xe_vfio_pci_migration_file *migf = filp->private_data;
ssize_t ret;
if (pos)
return -ESPIPE;
mutex_lock(&migf->lock);
if (migf->disabled) {
mutex_unlock(&migf->lock);
return -ENODEV;
}
ret = xe_sriov_vfio_data_read(migf->xe_vdev->xe, migf->xe_vdev->vfid, buf, len);
mutex_unlock(&migf->lock);
return ret;
}
static const struct file_operations xe_vfio_pci_save_fops = {
.owner = THIS_MODULE,
.read = xe_vfio_pci_save_read,
.release = xe_vfio_pci_release_file,
.llseek = noop_llseek,
};
static ssize_t xe_vfio_pci_resume_write(struct file *filp, const char __user *buf,
size_t len, loff_t *pos)
{
struct xe_vfio_pci_migration_file *migf = filp->private_data;
ssize_t ret;
if (pos)
return -ESPIPE;
mutex_lock(&migf->lock);
if (migf->disabled) {
mutex_unlock(&migf->lock);
return -ENODEV;
}
ret = xe_sriov_vfio_data_write(migf->xe_vdev->xe, migf->xe_vdev->vfid, buf, len);
mutex_unlock(&migf->lock);
return ret;
}
static const struct file_operations xe_vfio_pci_resume_fops = {
.owner = THIS_MODULE,
.write = xe_vfio_pci_resume_write,
.release = xe_vfio_pci_release_file,
.llseek = noop_llseek,
};
static const char *vfio_dev_state_str(u32 state)
{
switch (state) {
case VFIO_DEVICE_STATE_RUNNING: return "running";
case VFIO_DEVICE_STATE_RUNNING_P2P: return "running_p2p";
case VFIO_DEVICE_STATE_STOP_COPY: return "stopcopy";
case VFIO_DEVICE_STATE_STOP: return "stop";
case VFIO_DEVICE_STATE_RESUMING: return "resuming";
case VFIO_DEVICE_STATE_ERROR: return "error";
default: return "";
}
}
enum xe_vfio_pci_file_type {
XE_VFIO_FILE_SAVE = 0,
XE_VFIO_FILE_RESUME,
};
static struct xe_vfio_pci_migration_file *
xe_vfio_pci_alloc_file(struct xe_vfio_pci_core_device *xe_vdev,
enum xe_vfio_pci_file_type type)
{
struct xe_vfio_pci_migration_file *migf;
const struct file_operations *fops;
int flags;
migf = kzalloc(sizeof(*migf), GFP_KERNEL_ACCOUNT);
if (!migf)
return ERR_PTR(-ENOMEM);
fops = type == XE_VFIO_FILE_SAVE ? &xe_vfio_pci_save_fops : &xe_vfio_pci_resume_fops;
flags = type == XE_VFIO_FILE_SAVE ? O_RDONLY : O_WRONLY;
migf->filp = anon_inode_getfile("xe_vfio_mig", fops, migf, flags);
if (IS_ERR(migf->filp)) {
kfree(migf);
return ERR_CAST(migf->filp);
}
mutex_init(&migf->lock);
migf->xe_vdev = xe_vdev;
xe_vdev->migf = migf;
stream_open(migf->filp->f_inode, migf->filp);
return migf;
}
static struct file *
xe_vfio_set_state(struct xe_vfio_pci_core_device *xe_vdev, u32 new)
{
u32 cur = xe_vdev->mig_state;
int ret;
dev_dbg(xe_vdev_to_dev(xe_vdev),
"state: %s->%s\n", vfio_dev_state_str(cur), vfio_dev_state_str(new));
/*
* "STOP" handling is reused for "RUNNING_P2P", as the device doesn't
* have the capability to selectively block outgoing p2p DMA transfers.
* While the device is allowing BAR accesses when the VF is stopped, it
* is not processing any new workload requests, effectively stopping
* any outgoing DMA transfers (not just p2p).
* Any VRAM / MMIO accesses occurring during "RUNNING_P2P" are kept and
* will be migrated to target VF during stop-copy.
*/
if (cur == VFIO_DEVICE_STATE_RUNNING && new == VFIO_DEVICE_STATE_RUNNING_P2P) {
ret = xe_sriov_vfio_suspend_device(xe_vdev->xe, xe_vdev->vfid);
if (ret)
goto err;
return NULL;
}
if ((cur == VFIO_DEVICE_STATE_RUNNING_P2P && new == VFIO_DEVICE_STATE_STOP) ||
(cur == VFIO_DEVICE_STATE_STOP && new == VFIO_DEVICE_STATE_RUNNING_P2P))
return NULL;
if (cur == VFIO_DEVICE_STATE_RUNNING_P2P && new == VFIO_DEVICE_STATE_RUNNING) {
ret = xe_sriov_vfio_resume_device(xe_vdev->xe, xe_vdev->vfid);
if (ret)
goto err;
return NULL;
}
if (cur == VFIO_DEVICE_STATE_STOP && new == VFIO_DEVICE_STATE_STOP_COPY) {
struct xe_vfio_pci_migration_file *migf;
migf = xe_vfio_pci_alloc_file(xe_vdev, XE_VFIO_FILE_SAVE);
if (IS_ERR(migf)) {
ret = PTR_ERR(migf);
goto err;
}
get_file(migf->filp);
ret = xe_sriov_vfio_stop_copy_enter(xe_vdev->xe, xe_vdev->vfid);
if (ret) {
fput(migf->filp);
goto err;
}
return migf->filp;
}
if (cur == VFIO_DEVICE_STATE_STOP_COPY && new == VFIO_DEVICE_STATE_STOP) {
if (xe_vdev->migf)
xe_vfio_pci_put_file(xe_vdev);
ret = xe_sriov_vfio_stop_copy_exit(xe_vdev->xe, xe_vdev->vfid);
if (ret)
goto err;
return NULL;
}
if (cur == VFIO_DEVICE_STATE_STOP && new == VFIO_DEVICE_STATE_RESUMING) {
struct xe_vfio_pci_migration_file *migf;
migf = xe_vfio_pci_alloc_file(xe_vdev, XE_VFIO_FILE_RESUME);
if (IS_ERR(migf)) {
ret = PTR_ERR(migf);
goto err;
}
get_file(migf->filp);
ret = xe_sriov_vfio_resume_data_enter(xe_vdev->xe, xe_vdev->vfid);
if (ret) {
fput(migf->filp);
goto err;
}
return migf->filp;
}
if (cur == VFIO_DEVICE_STATE_RESUMING && new == VFIO_DEVICE_STATE_STOP) {
if (xe_vdev->migf)
xe_vfio_pci_put_file(xe_vdev);
ret = xe_sriov_vfio_resume_data_exit(xe_vdev->xe, xe_vdev->vfid);
if (ret)
goto err;
return NULL;
}
WARN(true, "Unknown state transition %d->%d", cur, new);
return ERR_PTR(-EINVAL);
err:
dev_dbg(xe_vdev_to_dev(xe_vdev),
"Failed to transition state: %s->%s err=%d\n",
vfio_dev_state_str(cur), vfio_dev_state_str(new), ret);
return ERR_PTR(ret);
}
static struct file *
xe_vfio_pci_set_device_state(struct vfio_device *core_vdev,
enum vfio_device_mig_state new_state)
{
struct xe_vfio_pci_core_device *xe_vdev =
container_of(core_vdev, struct xe_vfio_pci_core_device, core_device.vdev);
enum vfio_device_mig_state next_state;
struct file *f = NULL;
int ret;
xe_vfio_pci_state_mutex_lock(xe_vdev);
while (new_state != xe_vdev->mig_state) {
ret = vfio_mig_get_next_state(core_vdev, xe_vdev->mig_state,
new_state, &next_state);
if (ret) {
xe_sriov_vfio_error(xe_vdev->xe, xe_vdev->vfid);
f = ERR_PTR(ret);
break;
}
f = xe_vfio_set_state(xe_vdev, next_state);
if (IS_ERR(f))
break;
xe_vdev->mig_state = next_state;
/* Multiple state transitions with non-NULL file in the middle */
if (f && new_state != xe_vdev->mig_state) {
fput(f);
f = ERR_PTR(-EINVAL);
break;
}
}
xe_vfio_pci_state_mutex_unlock(xe_vdev);
return f;
}
static int xe_vfio_pci_get_device_state(struct vfio_device *core_vdev,
enum vfio_device_mig_state *curr_state)
{
struct xe_vfio_pci_core_device *xe_vdev =
container_of(core_vdev, struct xe_vfio_pci_core_device, core_device.vdev);
xe_vfio_pci_state_mutex_lock(xe_vdev);
*curr_state = xe_vdev->mig_state;
xe_vfio_pci_state_mutex_unlock(xe_vdev);
return 0;
}
static int xe_vfio_pci_get_data_size(struct vfio_device *vdev,
unsigned long *stop_copy_length)
{
struct xe_vfio_pci_core_device *xe_vdev =
container_of(vdev, struct xe_vfio_pci_core_device, core_device.vdev);
xe_vfio_pci_state_mutex_lock(xe_vdev);
*stop_copy_length = xe_sriov_vfio_stop_copy_size(xe_vdev->xe, xe_vdev->vfid);
xe_vfio_pci_state_mutex_unlock(xe_vdev);
return 0;
}
static const struct vfio_migration_ops xe_vfio_pci_migration_ops = {
.migration_set_state = xe_vfio_pci_set_device_state,
.migration_get_state = xe_vfio_pci_get_device_state,
.migration_get_data_size = xe_vfio_pci_get_data_size,
};
static void xe_vfio_pci_migration_init(struct xe_vfio_pci_core_device *xe_vdev)
{
struct vfio_device *core_vdev = &xe_vdev->core_device.vdev;
struct pci_dev *pdev = to_pci_dev(core_vdev->dev);
struct xe_device *xe = xe_sriov_vfio_get_pf(pdev);
if (!xe)
return;
if (!xe_sriov_vfio_migration_supported(xe))
return;
mutex_init(&xe_vdev->state_mutex);
spin_lock_init(&xe_vdev->reset_lock);
/* PF internal control uses vfid index starting from 1 */
xe_vdev->vfid = pci_iov_vf_id(pdev) + 1;
xe_vdev->xe = xe;
core_vdev->migration_flags = VFIO_MIGRATION_STOP_COPY | VFIO_MIGRATION_P2P;
core_vdev->mig_ops = &xe_vfio_pci_migration_ops;
}
static void xe_vfio_pci_migration_fini(struct xe_vfio_pci_core_device *xe_vdev)
{
if (!xe_vdev->vfid)
return;
mutex_destroy(&xe_vdev->state_mutex);
}
static int xe_vfio_pci_init_dev(struct vfio_device *core_vdev)
{
struct xe_vfio_pci_core_device *xe_vdev =
container_of(core_vdev, struct xe_vfio_pci_core_device, core_device.vdev);
xe_vfio_pci_migration_init(xe_vdev);
return vfio_pci_core_init_dev(core_vdev);
}
static void xe_vfio_pci_release_dev(struct vfio_device *core_vdev)
{
struct xe_vfio_pci_core_device *xe_vdev =
container_of(core_vdev, struct xe_vfio_pci_core_device, core_device.vdev);
xe_vfio_pci_migration_fini(xe_vdev);
}
static const struct vfio_device_ops xe_vfio_pci_ops = {
.name = "xe-vfio-pci",
.init = xe_vfio_pci_init_dev,
.release = xe_vfio_pci_release_dev,
.open_device = xe_vfio_pci_open_device,
.close_device = xe_vfio_pci_close_device,
.ioctl = vfio_pci_core_ioctl,
.device_feature = vfio_pci_core_ioctl_feature,
.read = vfio_pci_core_read,
.write = vfio_pci_core_write,
.mmap = vfio_pci_core_mmap,
.request = vfio_pci_core_request,
.match = vfio_pci_core_match,
.match_token_uuid = vfio_pci_core_match_token_uuid,
.bind_iommufd = vfio_iommufd_physical_bind,
.unbind_iommufd = vfio_iommufd_physical_unbind,
.attach_ioas = vfio_iommufd_physical_attach_ioas,
.detach_ioas = vfio_iommufd_physical_detach_ioas,
};
static int xe_vfio_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
struct xe_vfio_pci_core_device *xe_vdev;
int ret;
xe_vdev = vfio_alloc_device(xe_vfio_pci_core_device, core_device.vdev, &pdev->dev,
&xe_vfio_pci_ops);
if (IS_ERR(xe_vdev))
return PTR_ERR(xe_vdev);
dev_set_drvdata(&pdev->dev, &xe_vdev->core_device);
ret = vfio_pci_core_register_device(&xe_vdev->core_device);
if (ret) {
vfio_put_device(&xe_vdev->core_device.vdev);
return ret;
}
return 0;
}
static void xe_vfio_pci_remove(struct pci_dev *pdev)
{
struct xe_vfio_pci_core_device *xe_vdev = pci_get_drvdata(pdev);
vfio_pci_core_unregister_device(&xe_vdev->core_device);
vfio_put_device(&xe_vdev->core_device.vdev);
}
#define INTEL_PCI_VFIO_DEVICE(_id) { \
PCI_DRIVER_OVERRIDE_DEVICE_VFIO(PCI_VENDOR_ID_INTEL, (_id)) \
}
static const struct pci_device_id xe_vfio_pci_table[] = {
INTEL_PTL_IDS(INTEL_PCI_VFIO_DEVICE),
INTEL_WCL_IDS(INTEL_PCI_VFIO_DEVICE),
INTEL_BMG_IDS(INTEL_PCI_VFIO_DEVICE),
{}
};
MODULE_DEVICE_TABLE(pci, xe_vfio_pci_table);
static struct pci_driver xe_vfio_pci_driver = {
.name = "xe-vfio-pci",
.id_table = xe_vfio_pci_table,
.probe = xe_vfio_pci_probe,
.remove = xe_vfio_pci_remove,
.err_handler = &xe_vfio_pci_err_handlers,
.driver_managed_dma = true,
};
module_pci_driver(xe_vfio_pci_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Michał Winiarski <michal.winiarski@intel.com>");
MODULE_DESCRIPTION("VFIO PCI driver with migration support for Intel Graphics");

View File

@ -0,0 +1,143 @@
/* SPDX-License-Identifier: MIT */
/*
* Copyright © 2025 Intel Corporation
*/
#ifndef _XE_SRIOV_VFIO_H_
#define _XE_SRIOV_VFIO_H_
#include <linux/types.h>
struct pci_dev;
struct xe_device;
/**
* xe_sriov_vfio_get_pf() - Get PF &xe_device.
* @pdev: the VF &pci_dev device
*
* Return: pointer to PF &xe_device, NULL otherwise.
*/
struct xe_device *xe_sriov_vfio_get_pf(struct pci_dev *pdev);
/**
* xe_sriov_vfio_migration_supported() - Check if migration is supported.
* @xe: the PF &xe_device obtained by calling xe_sriov_vfio_get_pf()
*
* Return: true if migration is supported, false otherwise.
*/
bool xe_sriov_vfio_migration_supported(struct xe_device *xe);
/**
* xe_sriov_vfio_wait_flr_done() - Wait for VF FLR completion.
* @xe: the PF &xe_device obtained by calling xe_sriov_vfio_get_pf()
* @vfid: the VF identifier (can't be 0)
*
* This function will wait until VF FLR is processed by PF on all tiles (or
* until timeout occurs).
*
* Return: 0 on success or a negative error code on failure.
*/
int xe_sriov_vfio_wait_flr_done(struct xe_device *xe, unsigned int vfid);
/**
* xe_sriov_vfio_suspend_device() - Suspend VF.
* @xe: the PF &xe_device obtained by calling xe_sriov_vfio_get_pf()
* @vfid: the VF identifier (can't be 0)
*
* This function will pause VF on all tiles/GTs.
*
* Return: 0 on success or a negative error code on failure.
*/
int xe_sriov_vfio_suspend_device(struct xe_device *xe, unsigned int vfid);
/**
* xe_sriov_vfio_resume_device() - Resume VF.
* @xe: the PF &xe_device obtained by calling xe_sriov_vfio_get_pf()
* @vfid: the VF identifier (can't be 0)
*
* This function will resume VF on all tiles.
*
* Return: 0 on success or a negative error code on failure.
*/
int xe_sriov_vfio_resume_device(struct xe_device *xe, unsigned int vfid);
/**
* xe_sriov_vfio_stop_copy_enter() - Initiate a VF device migration data save.
* @xe: the PF &xe_device obtained by calling xe_sriov_vfio_get_pf()
* @vfid: the VF identifier (can't be 0)
*
* Return: 0 on success or a negative error code on failure.
*/
int xe_sriov_vfio_stop_copy_enter(struct xe_device *xe, unsigned int vfid);
/**
* xe_sriov_vfio_stop_copy_exit() - Finish a VF device migration data save.
* @xe: the PF &xe_device obtained by calling xe_sriov_vfio_get_pf()
* @vfid: the VF identifier (can't be 0)
*
* Return: 0 on success or a negative error code on failure.
*/
int xe_sriov_vfio_stop_copy_exit(struct xe_device *xe, unsigned int vfid);
/**
* xe_sriov_vfio_resume_data_enter() - Initiate a VF device migration data restore.
* @xe: the PF &xe_device obtained by calling xe_sriov_vfio_get_pf()
* @vfid: the VF identifier (can't be 0)
*
* Return: 0 on success or a negative error code on failure.
*/
int xe_sriov_vfio_resume_data_enter(struct xe_device *xe, unsigned int vfid);
/**
* xe_sriov_vfio_resume_data_exit() - Finish a VF device migration data restore.
* @xe: the PF &xe_device obtained by calling xe_sriov_vfio_get_pf()
* @vfid: the VF identifier (can't be 0)
*
* Return: 0 on success or a negative error code on failure.
*/
int xe_sriov_vfio_resume_data_exit(struct xe_device *xe, unsigned int vfid);
/**
* xe_sriov_vfio_error() - Move VF device to error state.
* @xe: the PF &xe_device obtained by calling xe_sriov_vfio_get_pf()
* @vfid: the VF identifier (can't be 0)
*
* Reset is needed to move it out of error state.
*
* Return: 0 on success or a negative error code on failure.
*/
int xe_sriov_vfio_error(struct xe_device *xe, unsigned int vfid);
/**
* xe_sriov_vfio_data_read() - Read migration data from the VF device.
* @xe: the PF &xe_device obtained by calling xe_sriov_vfio_get_pf()
* @vfid: the VF identifier (can't be 0)
* @buf: start address of userspace buffer
* @len: requested read size from userspace
*
* Return: number of bytes that has been successfully read,
* 0 if no more migration data is available, -errno on failure.
*/
ssize_t xe_sriov_vfio_data_read(struct xe_device *xe, unsigned int vfid,
char __user *buf, size_t len);
/**
* xe_sriov_vfio_data_write() - Write migration data to the VF device.
* @xe: the PF &xe_device obtained by calling xe_sriov_vfio_get_pf()
* @vfid: the VF identifier (can't be 0)
* @buf: start address of userspace buffer
* @len: requested write size from userspace
*
* Return: number of bytes that has been successfully written, -errno on failure.
*/
ssize_t xe_sriov_vfio_data_write(struct xe_device *xe, unsigned int vfid,
const char __user *buf, size_t len);
/**
* xe_sriov_vfio_stop_copy_size() - Get a size estimate of VF device migration data.
* @xe: the PF &xe_device obtained by calling xe_sriov_vfio_get_pf()
* @vfid: the VF identifier (can't be 0)
*
* Return: migration data size in bytes or a negative error code on failure.
*/
ssize_t xe_sriov_vfio_stop_copy_size(struct xe_device *xe, unsigned int vfid);
#endif

View File

@ -26,7 +26,9 @@
#include <crypto/aes.h>
#define TPM_DIGEST_SIZE 20 /* Max TPM v1.2 PCR size */
#define TPM_MAX_DIGEST_SIZE SHA512_DIGEST_SIZE
#define TPM2_MAX_DIGEST_SIZE SHA512_DIGEST_SIZE
#define TPM2_MAX_PCR_BANKS 8
struct tpm_chip;
struct trusted_key_payload;
@ -68,7 +70,7 @@ enum tpm2_curves {
struct tpm_digest {
u16 alg_id;
u8 digest[TPM_MAX_DIGEST_SIZE];
u8 digest[TPM2_MAX_DIGEST_SIZE];
} __packed;
struct tpm_bank_info {
@ -189,7 +191,7 @@ struct tpm_chip {
unsigned int groups_cnt;
u32 nr_allocated_banks;
struct tpm_bank_info *allocated_banks;
struct tpm_bank_info allocated_banks[TPM2_MAX_PCR_BANKS];
#ifdef CONFIG_ACPI
acpi_handle acpi_dev_handle;
char ppi_version[TPM_PPI_VERSION_LEN + 1];
@ -454,8 +456,10 @@ static inline ssize_t tpm_ret_to_err(ssize_t ret)
return 0;
case TPM2_RC_SESSION_MEMORY:
return -ENOMEM;
case TPM2_RC_HASH:
return -EINVAL;
default:
return -EFAULT;
return -EPERM;
}
}

View File

@ -358,17 +358,17 @@ int cap_inode_killpriv(struct mnt_idmap *idmap, struct dentry *dentry)
return error;
}
static bool rootid_owns_currentns(vfsuid_t rootvfsuid)
/**
* kuid_root_in_ns - check whether the given kuid is root in the given ns
* @kuid: the kuid to be tested
* @ns: the user namespace to test against
*
* Returns true if @kuid represents the root user in @ns, false otherwise.
*/
static bool kuid_root_in_ns(kuid_t kuid, struct user_namespace *ns)
{
struct user_namespace *ns;
kuid_t kroot;
if (!vfsuid_valid(rootvfsuid))
return false;
kroot = vfsuid_into_kuid(rootvfsuid);
for (ns = current_user_ns();; ns = ns->parent) {
if (from_kuid(ns, kroot) == 0)
for (;; ns = ns->parent) {
if (from_kuid(ns, kuid) == 0)
return true;
if (ns == &init_user_ns)
break;
@ -377,6 +377,16 @@ static bool rootid_owns_currentns(vfsuid_t rootvfsuid)
return false;
}
static bool vfsuid_root_in_currentns(vfsuid_t vfsuid)
{
kuid_t kuid;
if (!vfsuid_valid(vfsuid))
return false;
kuid = vfsuid_into_kuid(vfsuid);
return kuid_root_in_ns(kuid, current_user_ns());
}
static __u32 sansflags(__u32 m)
{
return m & ~VFS_CAP_FLAGS_EFFECTIVE;
@ -481,7 +491,7 @@ int cap_inode_getsecurity(struct mnt_idmap *idmap,
goto out_free;
}
if (!rootid_owns_currentns(vfsroot)) {
if (!vfsuid_root_in_currentns(vfsroot)) {
size = -EOVERFLOW;
goto out_free;
}
@ -722,7 +732,7 @@ int get_vfs_caps_from_disk(struct mnt_idmap *idmap,
/* Limit the caps to the mounter of the filesystem
* or the more limited uid specified in the xattr.
*/
if (!rootid_owns_currentns(rootvfsuid))
if (!vfsuid_root_in_currentns(rootvfsuid))
return -ENODATA;
cpu_caps->permitted.val = le32_to_cpu(caps->data[0].permitted);

View File

@ -333,25 +333,19 @@ int tpm2_seal_trusted(struct tpm_chip *chip,
}
blob_len = tpm2_key_encode(payload, options, &buf.data[offset], blob_len);
if (blob_len < 0)
rc = blob_len;
out:
tpm_buf_destroy(&sized);
tpm_buf_destroy(&buf);
if (rc > 0) {
if (tpm2_rc_value(rc) == TPM2_RC_HASH)
rc = -EINVAL;
else
rc = -EPERM;
}
if (blob_len < 0)
rc = blob_len;
else
if (!rc)
payload->blob_len = blob_len;
out_put:
tpm_put_ops(chip);
return rc;
return tpm_ret_to_err(rc);
}
/**
@ -455,10 +449,7 @@ static int tpm2_load_cmd(struct tpm_chip *chip,
out:
tpm_buf_destroy(&buf);
if (rc > 0)
rc = -EPERM;
return rc;
return tpm_ret_to_err(rc);
}
/**
@ -521,8 +512,6 @@ static int tpm2_unseal_cmd(struct tpm_chip *chip,
tpm_buf_fill_hmac_session(chip, &buf);
rc = tpm_transmit_cmd(chip, &buf, 6, "unsealing");
rc = tpm_buf_check_hmac_response(chip, &buf, rc);
if (rc > 0)
rc = -EPERM;
if (!rc) {
data_len = be16_to_cpup(
@ -555,7 +544,7 @@ static int tpm2_unseal_cmd(struct tpm_chip *chip,
out:
tpm_buf_destroy(&buf);
return rc;
return tpm_ret_to_err(rc);
}
/**
@ -587,6 +576,5 @@ int tpm2_unseal_trusted(struct tpm_chip *chip,
out:
tpm_put_ops(chip);
return rc;
return tpm_ret_to_err(rc);
}

View File

@ -437,7 +437,7 @@ class Client:
def extend_pcr(self, i, dig, bank_alg = TPM2_ALG_SHA1):
ds = get_digest_size(bank_alg)
assert(ds == len(dig))
assert ds == len(dig)
auth_cmd = AuthCommand()
@ -589,7 +589,7 @@ class Client:
def seal(self, parent_key, data, auth_value, policy_dig,
name_alg = TPM2_ALG_SHA1):
ds = get_digest_size(name_alg)
assert(not policy_dig or ds == len(policy_dig))
assert not policy_dig or ds == len(policy_dig)
attributes = 0
if not policy_dig: