Li, Jiongxi
2013-Jan-27 11:05 UTC
[PATCH v2 1/2] Xen: Fix live migration while enabling APICV
SVI should be restored in case guest is processing virtual interrupt while saving a domain state. Otherwise SVI would be missed when virtual interrupt delivery is enabled. Signed-off-by: Jiongxi Li <jiongxi.li@intel.com> diff --git a/xen/arch/x86/hvm/vlapic.c b/xen/arch/x86/hvm/vlapic.c index ee2294c..801c4fa 100644 --- a/xen/arch/x86/hvm/vlapic.c +++ b/xen/arch/x86/hvm/vlapic.c @@ -1198,6 +1198,9 @@ static int lapic_load_regs(struct domain *d, hvm_domain_context_t *h) if ( hvm_load_entry(LAPIC_REGS, h, s->regs) != 0 ) return -EINVAL; + if ( hvm_funcs.set_svi ) + hvm_funcs.set_svi(vlapic_find_highest_isr(s), v); + vlapic_adjust_i8259_target(d); lapic_rearm(s); return 0; diff --git a/xen/arch/x86/hvm/vmx/vmx.c b/xen/arch/x86/hvm/vmx/vmx.c index 4d7c93f..2949782 100644 --- a/xen/arch/x86/hvm/vmx/vmx.c +++ b/xen/arch/x86/hvm/vmx/vmx.c @@ -1419,6 +1419,28 @@ static int vmx_virtual_intr_delivery_enabled(void) return cpu_has_vmx_virtual_intr_delivery; } +static void vmx_set_svi(int isr, struct vcpu *v) +{ + unsigned long status; + u8 old; + + if ( !cpu_has_vmx_virtual_intr_delivery ) + return; + + if (isr == -1) + isr = 0; + + vmx_vmcs_enter(v); + status = __vmread(GUEST_INTR_STATUS); + old = status >> 8; + if (isr != old) { + status &= 0x0FF; + status |= isr << 8; + __vmwrite(GUEST_INTR_STATUS, status); + } + vmx_vmcs_exit(v); +} + static struct hvm_function_table __read_mostly vmx_function_table = { .name = "VMX", .cpu_up_prepare = vmx_cpu_up_prepare, @@ -1468,6 +1490,7 @@ static struct hvm_function_table __read_mostly vmx_function_table = { .nhvm_domain_relinquish_resources = nvmx_domain_relinquish_resources, .update_eoi_exit_bitmap = vmx_update_eoi_exit_bitmap, .virtual_intr_delivery_enabled = vmx_virtual_intr_delivery_enabled, + .set_svi = vmx_set_svi, .nhvm_hap_walk_L1_p2m = nvmx_hap_walk_L1_p2m, }; diff --git a/xen/include/asm-x86/hvm/hvm.h b/xen/include/asm-x86/hvm/hvm.h index 76e9cc8..a0375a7 100644 --- a/xen/include/asm-x86/hvm/hvm.h +++ b/xen/include/asm-x86/hvm/hvm.h @@ -183,6 +183,7 @@ struct hvm_function_table { /* Virtual interrupt delivery */ void (*update_eoi_exit_bitmap)(struct vcpu *v, u8 vector, u8 trig); int (*virtual_intr_delivery_enabled)(void); + void (*set_svi)(int isr, struct vcpu *v); /*Walk nested p2m */ int (*nhvm_hap_walk_L1_p2m)(struct vcpu *v, paddr_t L2_gpa, -- 1.7.1