diff options
author | Thiago Jung Bauermann <bauerman@linux.ibm.com> | 2019-08-06 01:49:17 -0300 |
---|---|---|
committer | Michael Ellerman <mpe@ellerman.id.au> | 2019-08-09 22:52:08 +1000 |
commit | 284e21fab2cfcf90dacce565e0b12f29e5df00c1 (patch) | |
tree | f3ff0d324d2c19857a4e842a1c7369e0421e1fc2 /arch | |
parent | e740815a97e2b6d6446792f4328378e66de166d1 (diff) |
x86, s390/mm: Move sme_active() and sme_me_mask to x86-specific header
Now that generic code doesn't reference them, move sme_active() and
sme_me_mask to x86's <asm/mem_encrypt.h>.
Also remove the export for sme_active() since it's only used in files that
won't be built as modules. sme_me_mask on the other hand is used in
arch/x86/kvm/svm.c (via __sme_set() and __psp_pa()) which can be built as a
module so its export needs to stay.
Signed-off-by: Thiago Jung Bauermann <bauerman@linux.ibm.com>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Tom Lendacky <thomas.lendacky@amd.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/20190806044919.10622-5-bauerman@linux.ibm.com
Diffstat (limited to 'arch')
-rw-r--r-- | arch/s390/include/asm/mem_encrypt.h | 4 | ||||
-rw-r--r-- | arch/x86/include/asm/mem_encrypt.h | 10 | ||||
-rw-r--r-- | arch/x86/mm/mem_encrypt.c | 1 |
3 files changed, 11 insertions, 4 deletions
diff --git a/arch/s390/include/asm/mem_encrypt.h b/arch/s390/include/asm/mem_encrypt.h index 3eb018508190..ff813a56bc30 100644 --- a/arch/s390/include/asm/mem_encrypt.h +++ b/arch/s390/include/asm/mem_encrypt.h @@ -4,9 +4,7 @@ #ifndef __ASSEMBLY__ -#define sme_me_mask 0ULL - -static inline bool sme_active(void) { return false; } +static inline bool mem_encrypt_active(void) { return false; } extern bool sev_active(void); int set_memory_encrypted(unsigned long addr, int numpages); diff --git a/arch/x86/include/asm/mem_encrypt.h b/arch/x86/include/asm/mem_encrypt.h index 0c196c47d621..848ce43b9040 100644 --- a/arch/x86/include/asm/mem_encrypt.h +++ b/arch/x86/include/asm/mem_encrypt.h @@ -92,6 +92,16 @@ early_set_memory_encrypted(unsigned long vaddr, unsigned long size) { return 0; extern char __start_bss_decrypted[], __end_bss_decrypted[], __start_bss_decrypted_unused[]; +static inline bool mem_encrypt_active(void) +{ + return sme_me_mask; +} + +static inline u64 sme_get_me_mask(void) +{ + return sme_me_mask; +} + #endif /* __ASSEMBLY__ */ #endif /* __X86_MEM_ENCRYPT_H__ */ diff --git a/arch/x86/mm/mem_encrypt.c b/arch/x86/mm/mem_encrypt.c index fece30ca8b0c..94da5a88abe6 100644 --- a/arch/x86/mm/mem_encrypt.c +++ b/arch/x86/mm/mem_encrypt.c @@ -344,7 +344,6 @@ bool sme_active(void) { return sme_me_mask && !sev_enabled; } -EXPORT_SYMBOL(sme_active); bool sev_active(void) { |