diff options
author | Paolo Bonzini <pbonzini@redhat.com> | 2019-04-30 21:29:14 +0200 |
---|---|---|
committer | Paolo Bonzini <pbonzini@redhat.com> | 2019-04-30 21:29:14 +0200 |
commit | da8f0d97b2a02ebc98eb380d9e59c7fb653d4ad8 (patch) | |
tree | 6f357846ce6e69463973e2e902be2c0d6afbdabd /arch/s390/kvm | |
parent | c110ae578ca0a10064dfbda3d786d6a733b9fe69 (diff) | |
parent | b2d0371d2e374facd45e115d3668086df13730ff (diff) |
Merge tag 'kvm-s390-next-5.2-1' of git://git.kernel.org/pub/scm/linux/kernel/git/kvms390/linux into HEAD
KVM: s390: Features and fixes for 5.2
- VSIE crypto fixes
- new guest features for gen15
- disable halt polling for nested virtualization with overcommit
Diffstat (limited to 'arch/s390/kvm')
-rw-r--r-- | arch/s390/kvm/Kconfig | 1 | ||||
-rw-r--r-- | arch/s390/kvm/interrupt.c | 11 | ||||
-rw-r--r-- | arch/s390/kvm/kvm-s390.c | 117 | ||||
-rw-r--r-- | arch/s390/kvm/vsie.c | 13 |
4 files changed, 135 insertions, 7 deletions
diff --git a/arch/s390/kvm/Kconfig b/arch/s390/kvm/Kconfig index 767453faacfc..e987e73738b5 100644 --- a/arch/s390/kvm/Kconfig +++ b/arch/s390/kvm/Kconfig @@ -31,6 +31,7 @@ config KVM select HAVE_KVM_IRQFD select HAVE_KVM_IRQ_ROUTING select HAVE_KVM_INVALID_WAKEUPS + select HAVE_KVM_NO_POLL select SRCU select KVM_VFIO ---help--- diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c index 82162867f378..bfd55ad34a3e 100644 --- a/arch/s390/kvm/interrupt.c +++ b/arch/s390/kvm/interrupt.c @@ -14,6 +14,7 @@ #include <linux/kvm_host.h> #include <linux/hrtimer.h> #include <linux/mmu_context.h> +#include <linux/nospec.h> #include <linux/signal.h> #include <linux/slab.h> #include <linux/bitmap.h> @@ -2307,6 +2308,7 @@ static struct s390_io_adapter *get_io_adapter(struct kvm *kvm, unsigned int id) { if (id >= MAX_S390_IO_ADAPTERS) return NULL; + id = array_index_nospec(id, MAX_S390_IO_ADAPTERS); return kvm->arch.adapters[id]; } @@ -2320,8 +2322,13 @@ static int register_io_adapter(struct kvm_device *dev, (void __user *)attr->addr, sizeof(adapter_info))) return -EFAULT; - if ((adapter_info.id >= MAX_S390_IO_ADAPTERS) || - (dev->kvm->arch.adapters[adapter_info.id] != NULL)) + if (adapter_info.id >= MAX_S390_IO_ADAPTERS) + return -EINVAL; + + adapter_info.id = array_index_nospec(adapter_info.id, + MAX_S390_IO_ADAPTERS); + + if (dev->kvm->arch.adapters[adapter_info.id] != NULL) return -EINVAL; adapter = kzalloc(sizeof(*adapter), GFP_KERNEL); diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c index 28f35d2b06cb..8d6d75db8de6 100644 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c @@ -75,6 +75,7 @@ struct kvm_stats_debugfs_item debugfs_entries[] = { { "halt_successful_poll", VCPU_STAT(halt_successful_poll) }, { "halt_attempted_poll", VCPU_STAT(halt_attempted_poll) }, { "halt_poll_invalid", VCPU_STAT(halt_poll_invalid) }, + { "halt_no_poll_steal", VCPU_STAT(halt_no_poll_steal) }, { "halt_wakeup", VCPU_STAT(halt_wakeup) }, { "instruction_lctlg", VCPU_STAT(instruction_lctlg) }, { "instruction_lctl", VCPU_STAT(instruction_lctl) }, @@ -177,6 +178,11 @@ static int hpage; module_param(hpage, int, 0444); MODULE_PARM_DESC(hpage, "1m huge page backing support"); +/* maximum percentage of steal time for polling. >100 is treated like 100 */ +static u8 halt_poll_max_steal = 10; +module_param(halt_poll_max_steal, byte, 0644); +MODULE_PARM_DESC(hpage, "Maximum percentage of steal time to allow polling"); + /* * For now we handle at most 16 double words as this is what the s390 base * kernel handles and stores in the prefix page. If we ever need to go beyond @@ -321,6 +327,22 @@ static inline int plo_test_bit(unsigned char nr) return cc == 0; } +static inline void __insn32_query(unsigned int opcode, u8 query[32]) +{ + register unsigned long r0 asm("0") = 0; /* query function */ + register unsigned long r1 asm("1") = (unsigned long) query; + + asm volatile( + /* Parameter regs are ignored */ + " .insn rrf,%[opc] << 16,2,4,6,0\n" + : "=m" (*query) + : "d" (r0), "a" (r1), [opc] "i" (opcode) + : "cc"); +} + +#define INSN_SORTL 0xb938 +#define INSN_DFLTCC 0xb939 + static void kvm_s390_cpu_feat_init(void) { int i; @@ -368,6 +390,16 @@ static void kvm_s390_cpu_feat_init(void) __cpacf_query(CPACF_KMA, (cpacf_mask_t *) kvm_s390_available_subfunc.kma); + if (test_facility(155)) /* MSA9 */ + __cpacf_query(CPACF_KDSA, (cpacf_mask_t *) + kvm_s390_available_subfunc.kdsa); + + if (test_facility(150)) /* SORTL */ + __insn32_query(INSN_SORTL, kvm_s390_available_subfunc.sortl); + + if (test_facility(151)) /* DFLTCC */ + __insn32_query(INSN_DFLTCC, kvm_s390_available_subfunc.dfltcc); + if (MACHINE_HAS_ESOP) allow_cpu_feat(KVM_S390_VM_CPU_FEAT_ESOP); /* @@ -654,6 +686,14 @@ int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap) set_kvm_facility(kvm->arch.model.fac_mask, 135); set_kvm_facility(kvm->arch.model.fac_list, 135); } + if (test_facility(148)) { + set_kvm_facility(kvm->arch.model.fac_mask, 148); + set_kvm_facility(kvm->arch.model.fac_list, 148); + } + if (test_facility(152)) { + set_kvm_facility(kvm->arch.model.fac_mask, 152); + set_kvm_facility(kvm->arch.model.fac_list, 152); + } r = 0; } else r = -EINVAL; @@ -1320,6 +1360,19 @@ static int kvm_s390_set_processor_subfunc(struct kvm *kvm, VM_EVENT(kvm, 3, "SET: guest KMA subfunc 0x%16.16lx.%16.16lx", ((unsigned long *) &kvm->arch.model.subfuncs.kma)[0], ((unsigned long *) &kvm->arch.model.subfuncs.kma)[1]); + VM_EVENT(kvm, 3, "SET: guest KDSA subfunc 0x%16.16lx.%16.16lx", + ((unsigned long *) &kvm->arch.model.subfuncs.kdsa)[0], + ((unsigned long *) &kvm->arch.model.subfuncs.kdsa)[1]); + VM_EVENT(kvm, 3, "SET: guest SORTL subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx", + ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[0], + ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[1], + ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[2], + ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[3]); + VM_EVENT(kvm, 3, "SET: guest DFLTCC subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx", + ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[0], + ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[1], + ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[2], + ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[3]); return 0; } @@ -1488,6 +1541,19 @@ static int kvm_s390_get_processor_subfunc(struct kvm *kvm, VM_EVENT(kvm, 3, "GET: guest KMA subfunc 0x%16.16lx.%16.16lx", ((unsigned long *) &kvm->arch.model.subfuncs.kma)[0], ((unsigned long *) &kvm->arch.model.subfuncs.kma)[1]); + VM_EVENT(kvm, 3, "GET: guest KDSA subfunc 0x%16.16lx.%16.16lx", + ((unsigned long *) &kvm->arch.model.subfuncs.kdsa)[0], + ((unsigned long *) &kvm->arch.model.subfuncs.kdsa)[1]); + VM_EVENT(kvm, 3, "GET: guest SORTL subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx", + ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[0], + ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[1], + ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[2], + ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[3]); + VM_EVENT(kvm, 3, "GET: guest DFLTCC subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx", + ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[0], + ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[1], + ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[2], + ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[3]); return 0; } @@ -1543,6 +1609,19 @@ static int kvm_s390_get_machine_subfunc(struct kvm *kvm, VM_EVENT(kvm, 3, "GET: host KMA subfunc 0x%16.16lx.%16.16lx", ((unsigned long *) &kvm_s390_available_subfunc.kma)[0], ((unsigned long *) &kvm_s390_available_subfunc.kma)[1]); + VM_EVENT(kvm, 3, "GET: host KDSA subfunc 0x%16.16lx.%16.16lx", + ((unsigned long *) &kvm_s390_available_subfunc.kdsa)[0], + ((unsigned long *) &kvm_s390_available_subfunc.kdsa)[1]); + VM_EVENT(kvm, 3, "GET: host SORTL subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx", + ((unsigned long *) &kvm_s390_available_subfunc.sortl)[0], + ((unsigned long *) &kvm_s390_available_subfunc.sortl)[1], + ((unsigned long *) &kvm_s390_available_subfunc.sortl)[2], + ((unsigned long *) &kvm_s390_available_subfunc.sortl)[3]); + VM_EVENT(kvm, 3, "GET: host DFLTCC subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx", + ((unsigned long *) &kvm_s390_available_subfunc.dfltcc)[0], + ((unsigned long *) &kvm_s390_available_subfunc.dfltcc)[1], + ((unsigned long *) &kvm_s390_available_subfunc.dfltcc)[2], + ((unsigned long *) &kvm_s390_available_subfunc.dfltcc)[3]); return 0; } @@ -2814,6 +2893,25 @@ void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu) vcpu->arch.enabled_gmap = vcpu->arch.gmap; } +static bool kvm_has_pckmo_subfunc(struct kvm *kvm, unsigned long nr) +{ + if (test_bit_inv(nr, (unsigned long *)&kvm->arch.model.subfuncs.pckmo) && + test_bit_inv(nr, (unsigned long *)&kvm_s390_available_subfunc.pckmo)) + return true; + return false; +} + +static bool kvm_has_pckmo_ecc(struct kvm *kvm) +{ + /* At least one ECC subfunction must be present */ + return kvm_has_pckmo_subfunc(kvm, 32) || + kvm_has_pckmo_subfunc(kvm, 33) || + kvm_has_pckmo_subfunc(kvm, 34) || + kvm_has_pckmo_subfunc(kvm, 40) || + kvm_has_pckmo_subfunc(kvm, 41); + +} + static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu) { /* @@ -2826,13 +2924,19 @@ static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu) vcpu->arch.sie_block->crycbd = vcpu->kvm->arch.crypto.crycbd; vcpu->arch.sie_block->ecb3 &= ~(ECB3_AES | ECB3_DEA); vcpu->arch.sie_block->eca &= ~ECA_APIE; + vcpu->arch.sie_block->ecd &= ~ECD_ECC; if (vcpu->kvm->arch.crypto.apie) vcpu->arch.sie_block->eca |= ECA_APIE; /* Set up protected key support */ - if (vcpu->kvm->arch.crypto.aes_kw) + if (vcpu->kvm->arch.crypto.aes_kw) { vcpu->arch.sie_block->ecb3 |= ECB3_AES; + /* ecc is also wrapped with AES key */ + if (kvm_has_pckmo_ecc(vcpu->kvm)) + vcpu->arch.sie_block->ecd |= ECD_ECC; + } + if (vcpu->kvm->arch.crypto.dea_kw) vcpu->arch.sie_block->ecb3 |= ECB3_DEA; } @@ -3065,6 +3169,17 @@ static void kvm_gmap_notifier(struct gmap *gmap, unsigned long start, } } +bool kvm_arch_no_poll(struct kvm_vcpu *vcpu) +{ + /* do not poll with more than halt_poll_max_steal percent of steal time */ + if (S390_lowcore.avg_steal_timer * 100 / (TICK_USEC << 12) >= + halt_poll_max_steal) { + vcpu->stat.halt_no_poll_steal++; + return true; + } + return false; +} + int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu) { /* kvm common code refers to this, but never calls it */ diff --git a/arch/s390/kvm/vsie.c b/arch/s390/kvm/vsie.c index d62fa148558b..076090f9e666 100644 --- a/arch/s390/kvm/vsie.c +++ b/arch/s390/kvm/vsie.c @@ -288,7 +288,9 @@ static int shadow_crycb(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page) const u32 crycb_addr = crycbd_o & 0x7ffffff8U; unsigned long *b1, *b2; u8 ecb3_flags; + u32 ecd_flags; int apie_h; + int apie_s; int key_msk = test_kvm_facility(vcpu->kvm, 76); int fmt_o = crycbd_o & CRYCB_FORMAT_MASK; int fmt_h = vcpu->arch.sie_block->crycbd & CRYCB_FORMAT_MASK; @@ -297,7 +299,8 @@ static int shadow_crycb(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page) scb_s->crycbd = 0; apie_h = vcpu->arch.sie_block->eca & ECA_APIE; - if (!apie_h && (!key_msk || fmt_o == CRYCB_FORMAT0)) + apie_s = apie_h & scb_o->eca; + if (!apie_s && (!key_msk || (fmt_o == CRYCB_FORMAT0))) return 0; if (!crycb_addr) @@ -308,7 +311,7 @@ static int shadow_crycb(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page) ((crycb_addr + 128) & PAGE_MASK)) return set_validity_icpt(scb_s, 0x003CU); - if (apie_h && (scb_o->eca & ECA_APIE)) { + if (apie_s) { ret = setup_apcb(vcpu, &vsie_page->crycb, crycb_addr, vcpu->kvm->arch.crypto.crycb, fmt_o, fmt_h); @@ -320,7 +323,8 @@ static int shadow_crycb(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page) /* we may only allow it if enabled for guest 2 */ ecb3_flags = scb_o->ecb3 & vcpu->arch.sie_block->ecb3 & (ECB3_AES | ECB3_DEA); - if (!ecb3_flags) + ecd_flags = scb_o->ecd & vcpu->arch.sie_block->ecd & ECD_ECC; + if (!ecb3_flags && !ecd_flags) goto end; /* copy only the wrapping keys */ @@ -329,6 +333,7 @@ static int shadow_crycb(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page) return set_validity_icpt(scb_s, 0x0035U); scb_s->ecb3 |= ecb3_flags; + scb_s->ecd |= ecd_flags; /* xor both blocks in one run */ b1 = (unsigned long *) vsie_page->crycb.dea_wrapping_key_mask; @@ -339,7 +344,7 @@ static int shadow_crycb(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page) end: switch (ret) { case -EINVAL: - return set_validity_icpt(scb_s, 0x0020U); + return set_validity_icpt(scb_s, 0x0022U); case -EFAULT: return set_validity_icpt(scb_s, 0x0035U); case -EACCES: |