Fix a SEV-SNP regression when CONFIG_KVM_AMD_SEV is disabled.

Signed-off-by: Ingo Molnar <mingo@kernel.org>
 -----BEGIN PGP SIGNATURE-----
 
 iQJFBAABCgAvFiEEBpT5eoXrXCwVQwEKEnMQ0APhK1gFAmjOZQ0RHG1pbmdvQGtl
 cm5lbC5vcmcACgkQEnMQ0APhK1i0PBAAv855cFr65Ux3xAwrKbxEb/3yVVe9d5US
 Eut7CxnR/10FYJq1dRYCdc4yrmHk/f8hVXIPBrWCijrVsg7uwYFcq7+dd8nUqP1t
 YB1ozIi1Ma4pGePJb4POBrSYbBsdMOsnFwHAod9QdfmIcmxbdlUr8J2K/MZ/u+tV
 bFiBFH3TtqcG89P+ZbFgK7udVvQDghLBdI3iqtsXTO0C0EaqJO85+8HBvNp8FFuC
 pebRC8INs/Hn/tEx8sykG2jPgREDU9Hft3O1l+YIqhKIxkeevl5ifqpUfotAkXh9
 TWm/FecLHph7jk3m/KofKK8osforuKirPp1OG7X3M7eudmuQdkrHc0irxCIxl8Hs
 fg42HnH1iLQMxWSWcVXOxB5+6m2Z3+ugm+jd3eTDG5VuTAtEvs28yYjZx9xgNGjl
 jWazG3FeJVE1/xJXHnNdl/52lH0twU7e0Bwn8xrK+liH4YvhZP3A0YVVJ9+pUU8O
 VQrd2N4fL+bQ/S2aBtRBgphHX2JU18w7hrco+s1oiMIA9OREBto9unb7rYWrAcV4
 CJnd1Dat3882UqANEg4Exa5Oqd0IWKW76HeRevyxw2OW3kVaLHJQOE832/hOKW7u
 0THCaANUNW9s6dYDCuosM6HsQ2fvQNy6VU4fDbEYynMlRyflGHgLbKh9Mkvigihk
 grpYvfpEhLM=
 =WPmk
 -----END PGP SIGNATURE-----

Merge tag 'x86-urgent-2025-09-20' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull x86 fix from Ingo Molnar:
 "Fix a SEV-SNP regression when CONFIG_KVM_AMD_SEV is disabled"

* tag 'x86-urgent-2025-09-20' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  x86/sev: Guard sev_evict_cache() with CONFIG_AMD_MEM_ENCRYPT
This commit is contained in:
Linus Torvalds 2025-09-20 09:27:15 -07:00
commit 3b08f56fbb
1 changed files with 19 additions and 19 deletions

View File

@ -562,6 +562,24 @@ enum es_result sev_es_ghcb_hv_call(struct ghcb *ghcb,
extern struct ghcb *boot_ghcb;
static inline void sev_evict_cache(void *va, int npages)
{
volatile u8 val __always_unused;
u8 *bytes = va;
int page_idx;
/*
* For SEV guests, a read from the first/last cache-lines of a 4K page
* using the guest key is sufficient to cause a flush of all cache-lines
* associated with that 4K page without incurring all the overhead of a
* full CLFLUSH sequence.
*/
for (page_idx = 0; page_idx < npages; page_idx++) {
val = bytes[page_idx * PAGE_SIZE];
val = bytes[page_idx * PAGE_SIZE + PAGE_SIZE - 1];
}
}
#else /* !CONFIG_AMD_MEM_ENCRYPT */
#define snp_vmpl 0
@ -605,6 +623,7 @@ static inline int snp_send_guest_request(struct snp_msg_desc *mdesc,
static inline int snp_svsm_vtpm_send_command(u8 *buffer) { return -ENODEV; }
static inline void __init snp_secure_tsc_prepare(void) { }
static inline void __init snp_secure_tsc_init(void) { }
static inline void sev_evict_cache(void *va, int npages) {}
#endif /* CONFIG_AMD_MEM_ENCRYPT */
@ -619,24 +638,6 @@ int rmp_make_shared(u64 pfn, enum pg_level level);
void snp_leak_pages(u64 pfn, unsigned int npages);
void kdump_sev_callback(void);
void snp_fixup_e820_tables(void);
static inline void sev_evict_cache(void *va, int npages)
{
volatile u8 val __always_unused;
u8 *bytes = va;
int page_idx;
/*
* For SEV guests, a read from the first/last cache-lines of a 4K page
* using the guest key is sufficient to cause a flush of all cache-lines
* associated with that 4K page without incurring all the overhead of a
* full CLFLUSH sequence.
*/
for (page_idx = 0; page_idx < npages; page_idx++) {
val = bytes[page_idx * PAGE_SIZE];
val = bytes[page_idx * PAGE_SIZE + PAGE_SIZE - 1];
}
}
#else
static inline bool snp_probe_rmptable_info(void) { return false; }
static inline int snp_rmptable_init(void) { return -ENOSYS; }
@ -652,7 +653,6 @@ static inline int rmp_make_shared(u64 pfn, enum pg_level level) { return -ENODEV
static inline void snp_leak_pages(u64 pfn, unsigned int npages) {}
static inline void kdump_sev_callback(void) { }
static inline void snp_fixup_e820_tables(void) {}
static inline void sev_evict_cache(void *va, int npages) {}
#endif
#endif