Jan Beulich
2007-Oct-31 10:35 UTC
[Xen-devel] [PATCH, fixed] x86: fix debug register handling
This is intended to fix a number of problems: - loading of DR7 in generic __context_switch() allowed HVM guests to set breakpoints in ways that would crash the hypervisor - loading of DRs only dependent upon DR7 (for HVM guests even just DR7.Ln/Gn) being non-zero leaked information (the other DRs) and potentially corrupted state (if other DRs were set prior to a domain being preempted and any intermediately scheduled vCPU had a non-zero DR7) - {svm,vmx}_save_dr() did not save anything due to a broken inline asm constraint As a side effect, it also does away with the proliferation of loaddebug()/ savedebug() macros in various source files. Compared to the original version, this patch fixes DR6/DR7 restoring on SVM: These two registers must be restored (to the VMCB) unconditionally during context switch, since reads from these registers aren''t being intercepted. Signed-off-by: Jan Beulich <jbeulich@novell.com> Index: 2007-10-10/xen/arch/x86/acpi/suspend.c ==================================================================--- 2007-10-10.orig/xen/arch/x86/acpi/suspend.c 2007-10-31 11:09:20.505878272 +0100 +++ 2007-10-10/xen/arch/x86/acpi/suspend.c 2007-10-26 16:05:20.000000000 +0200 @@ -29,9 +29,6 @@ void save_rest_processor_state(void) #endif } -#define loaddebug(_v,_reg) \ - __asm__ __volatile__ ("mov %0,%%db" #_reg : : "r" ((_v)->debugreg[_reg])) - void restore_rest_processor_state(void) { int cpu = smp_processor_id(); @@ -54,16 +51,25 @@ void restore_rest_processor_state(void) #endif /* Maybe load the debug registers. */ - if ( !is_idle_vcpu(v) && unlikely(v->arch.guest_context.debugreg[7]) ) + if ( !is_idle_vcpu(v) ) { - loaddebug(&v->arch.guest_context, 0); - loaddebug(&v->arch.guest_context, 1); - loaddebug(&v->arch.guest_context, 2); - loaddebug(&v->arch.guest_context, 3); + if ( unlikely(v->arch.guest_context.debugreg[0]) ) + loaddebug(v, 0); + if ( unlikely(v->arch.guest_context.debugreg[1]) ) + loaddebug(v, 1); + if ( unlikely(v->arch.guest_context.debugreg[2]) ) + loaddebug(v, 2); + if ( unlikely(v->arch.guest_context.debugreg[3]) ) + loaddebug(v, 3); /* no 4 and 5 */ - loaddebug(&v->arch.guest_context, 6); - loaddebug(&v->arch.guest_context, 7); + if ( unlikely(v->arch.guest_context.debugreg[6]) ) + loaddebug(v, 6); + if ( unlikely(v->arch.guest_context.debugreg[7]) ) + loaddebug(v, 7); } + else + memset(v->arch.guest_context.debugreg, 0, + sizeof(v->arch.guest_context.debugreg)); /* Reload FPU state on next FPU use. */ stts(); Index: 2007-10-10/xen/arch/x86/domain.c ==================================================================--- 2007-10-10.orig/xen/arch/x86/domain.c 2007-10-31 11:09:20.506878120 +0100 +++ 2007-10-10/xen/arch/x86/domain.c 2007-10-26 16:47:52.000000000 +0200 @@ -1194,10 +1194,16 @@ static void paravirt_ctxt_switch_to(stru { set_int80_direct_trap(v); switch_kernel_stack(v); -} -#define loaddebug(_v,_reg) \ - asm volatile ( "mov %0,%%db" #_reg : : "r" ((_v)->debugreg[_reg]) ) + /* Maybe switch the debug registers. */ + cond_loaddebug(v, 0); + cond_loaddebug(v, 1); + cond_loaddebug(v, 2); + cond_loaddebug(v, 3); + /* no 4 and 5 */ + cond_loaddebug(v, 6); + cond_loaddebug(v, 7); +} static void __context_switch(void) { @@ -1223,18 +1229,6 @@ static void __context_switch(void) memcpy(stack_regs, &n->arch.guest_context.user_regs, CTXT_SWITCH_STACK_BYTES); - - /* Maybe switch the debug registers. */ - if ( unlikely(n->arch.guest_context.debugreg[7]) ) - { - loaddebug(&n->arch.guest_context, 0); - loaddebug(&n->arch.guest_context, 1); - loaddebug(&n->arch.guest_context, 2); - loaddebug(&n->arch.guest_context, 3); - /* no 4 and 5 */ - loaddebug(&n->arch.guest_context, 6); - loaddebug(&n->arch.guest_context, 7); - } n->arch.ctxt_switch_to(n); } Index: 2007-10-10/xen/arch/x86/hvm/svm/svm.c ==================================================================--- 2007-10-10.orig/xen/arch/x86/hvm/svm/svm.c 2007-10-31 11:09:20.501878880 +0100 +++ 2007-10-10/xen/arch/x86/hvm/svm/svm.c 2007-10-31 11:07:12.000000000 +0100 @@ -138,38 +138,54 @@ static enum handler_return long_mode_do_ } -#define loaddebug(_v,_reg) \ - asm volatile ("mov %0,%%db" #_reg : : "r" ((_v)->debugreg[_reg])) -#define savedebug(_v,_reg) \ - asm volatile ("mov %%db" #_reg ",%0" : : "r" ((_v)->debugreg[_reg])) - static void svm_save_dr(struct vcpu *v) { struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb; + v->arch.guest_context.debugreg[6] = vmcb->dr6; + if ( !v->arch.hvm_vcpu.flag_dr_dirty ) return; /* Clear the DR dirty flag and re-enable intercepts for DR accesses. */ v->arch.hvm_vcpu.flag_dr_dirty = 0; - v->arch.hvm_svm.vmcb->dr_intercepts = DR_INTERCEPT_ALL_WRITES; + v->arch.hvm_svm.vmcb->dr_intercepts = DR_INTERCEPTS; - savedebug(&v->arch.guest_context, 0); - savedebug(&v->arch.guest_context, 1); - savedebug(&v->arch.guest_context, 2); - savedebug(&v->arch.guest_context, 3); - v->arch.guest_context.debugreg[6] = vmcb->dr6; + savedebug(v, 0); + savedebug(v, 1); + savedebug(v, 2); + savedebug(v, 3); v->arch.guest_context.debugreg[7] = vmcb->dr7; } +/* + * DR7 is saved and restored on every vmexit. Other debug registers only + * need to be restored if their value is going to affect execution -- i.e., + * if one of the breakpoints or general detect is enabled. So mask out all + * bits that don''t enable some breakpoint functionality. + */ +#define DR7_ACTIVE_MASK 0x20ff static void __restore_debug_registers(struct vcpu *v) { - loaddebug(&v->arch.guest_context, 0); - loaddebug(&v->arch.guest_context, 1); - loaddebug(&v->arch.guest_context, 2); - loaddebug(&v->arch.guest_context, 3); - /* DR6 and DR7 are loaded from the VMCB. */ + cond_loaddebug(v, 0); + cond_loaddebug(v, 1); + cond_loaddebug(v, 2); + cond_loaddebug(v, 3); +} + +static void svm_restore_dr(struct vcpu *v) +{ + struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb; + + vmcb->dr6 = v->arch.guest_context.debugreg[6]; + vmcb->dr7 = v->arch.guest_context.debugreg[7]; + + if ( unlikely(v->arch.guest_context.debugreg[7] & DR7_ACTIVE_MASK) ) + { + __restore_debug_registers(v); + v->arch.hvm_svm.vmcb->dr_intercepts = DR_WRITE_INTERCEPTS; + } } @@ -421,12 +437,6 @@ static int svm_load_vmcb_ctxt(struct vcp return 0; } -static void svm_restore_dr(struct vcpu *v) -{ - if ( unlikely(v->arch.guest_context.debugreg[7] & 0xFF) ) - __restore_debug_registers(v); -} - static enum hvm_intblk svm_interrupt_blocked( struct vcpu *v, struct hvm_intack intack) { @@ -1158,9 +1168,11 @@ static void svm_dr_access(struct vcpu *v HVMTRACE_0D(DR_WRITE, v); + ASSERT(!v->arch.hvm_vcpu.flag_dr_dirty); v->arch.hvm_vcpu.flag_dr_dirty = 1; - __restore_debug_registers(v); + if ( likely(!(v->arch.guest_context.debugreg[7] & DR7_ACTIVE_MASK)) ) + __restore_debug_registers(v); /* allow the guest full access to the debug registers */ vmcb->dr_intercepts = 0; @@ -2270,7 +2282,8 @@ asmlinkage void svm_vmexit_handler(struc TYPE_MOV_TO_CR, regs); break; - case VMEXIT_DR0_WRITE ... VMEXIT_DR7_WRITE: + case VMEXIT_DR0_READ ... VMEXIT_DR15_READ: + case VMEXIT_DR0_WRITE ... VMEXIT_DR15_WRITE: svm_dr_access(v, regs); break; Index: 2007-10-10/xen/arch/x86/hvm/svm/vmcb.c ==================================================================--- 2007-10-10.orig/xen/arch/x86/hvm/svm/vmcb.c 2007-10-31 11:09:20.502878728 +0100 +++ 2007-10-10/xen/arch/x86/hvm/svm/vmcb.c 2007-10-31 11:09:59.660925800 +0100 @@ -129,8 +129,8 @@ static int construct_vmcb(struct vcpu *v GENERAL2_INTERCEPT_STGI | GENERAL2_INTERCEPT_CLGI | GENERAL2_INTERCEPT_SKINIT | GENERAL2_INTERCEPT_RDTSCP; - /* Intercept all debug-register writes. */ - vmcb->dr_intercepts = DR_INTERCEPT_ALL_WRITES; + /* Intercept most debug-register accesses. */ + vmcb->dr_intercepts = DR_INTERCEPTS; /* Intercept all control-register accesses except for CR2 and CR8. */ vmcb->cr_intercepts = ~(CR_INTERCEPT_CR2_READ | Index: 2007-10-10/xen/arch/x86/hvm/vmx/vmx.c ==================================================================--- 2007-10-10.orig/xen/arch/x86/hvm/vmx/vmx.c 2007-10-31 11:09:20.503878576 +0100 +++ 2007-10-10/xen/arch/x86/hvm/vmx/vmx.c 2007-10-29 09:01:31.000000000 +0100 @@ -382,11 +382,6 @@ static enum handler_return long_mode_do_ #endif /* __i386__ */ -#define loaddebug(_v,_reg) \ - __asm__ __volatile__ ("mov %0,%%db" #_reg : : "r" ((_v)->debugreg[_reg])) -#define savedebug(_v,_reg) \ - __asm__ __volatile__ ("mov %%db" #_reg ",%0" : : "r" ((_v)->debugreg[_reg])) - static int vmx_guest_x86_mode(struct vcpu *v) { unsigned int cs_ar_bytes; @@ -412,23 +407,38 @@ static void vmx_save_dr(struct vcpu *v) v->arch.hvm_vmx.exec_control |= CPU_BASED_MOV_DR_EXITING; __vmwrite(CPU_BASED_VM_EXEC_CONTROL, v->arch.hvm_vmx.exec_control); - savedebug(&v->arch.guest_context, 0); - savedebug(&v->arch.guest_context, 1); - savedebug(&v->arch.guest_context, 2); - savedebug(&v->arch.guest_context, 3); - savedebug(&v->arch.guest_context, 6); + savedebug(v, 0); + savedebug(v, 1); + savedebug(v, 2); + savedebug(v, 3); + savedebug(v, 6); v->arch.guest_context.debugreg[7] = __vmread(GUEST_DR7); } static void __restore_debug_registers(struct vcpu *v) { - loaddebug(&v->arch.guest_context, 0); - loaddebug(&v->arch.guest_context, 1); - loaddebug(&v->arch.guest_context, 2); - loaddebug(&v->arch.guest_context, 3); + cond_loaddebug(v, 0); + cond_loaddebug(v, 1); + cond_loaddebug(v, 2); + cond_loaddebug(v, 3); /* No 4 and 5 */ - loaddebug(&v->arch.guest_context, 6); - /* DR7 is loaded from the VMCS. */ + cond_loaddebug(v, 6); + __vmwrite(GUEST_DR7, v->arch.guest_context.debugreg[7]); +} + +/* + * DR7 is saved and restored on every vmexit. Other debug registers only + * need to be restored if their value is going to affect execution -- i.e., + * if one of the breakpoints is enabled. So mask out all bits that don''t + * enable some breakpoint functionality. + */ +#define DR7_ACTIVE_MASK 0xff + +static void vmx_restore_dr(struct vcpu *v) +{ + /* NB. __vmread() is not usable here, so we cannot read from the VMCS. */ + if ( unlikely(v->arch.guest_context.debugreg[7] & DR7_ACTIVE_MASK) ) + __restore_debug_registers(v); } void vmx_vmcs_save(struct vcpu *v, struct hvm_hw_cpu *c) @@ -704,21 +714,6 @@ static int vmx_load_vmcs_ctxt(struct vcp return 0; } -/* - * DR7 is saved and restored on every vmexit. Other debug registers only - * need to be restored if their value is going to affect execution -- i.e., - * if one of the breakpoints is enabled. So mask out all bits that don''t - * enable some breakpoint functionality. - */ -#define DR7_ACTIVE_MASK 0xff - -static void vmx_restore_dr(struct vcpu *v) -{ - /* NB. __vmread() is not usable here, so we cannot read from the VMCS. */ - if ( unlikely(v->arch.guest_context.debugreg[7] & DR7_ACTIVE_MASK) ) - __restore_debug_registers(v); -} - static void vmx_ctxt_switch_from(struct vcpu *v) { vmx_save_guest_msrs(v); @@ -1319,10 +1314,11 @@ static void vmx_dr_access(unsigned long HVMTRACE_0D(DR_WRITE, v); + ASSERT(!v->arch.hvm_vcpu.flag_dr_dirty); v->arch.hvm_vcpu.flag_dr_dirty = 1; - /* We could probably be smarter about this */ - __restore_debug_registers(v); + if ( likely(!(v->arch.guest_context.debugreg[7] & DR7_ACTIVE_MASK)) ) + __restore_debug_registers(v); /* Allow guest direct access to DR registers */ v->arch.hvm_vmx.exec_control &= ~CPU_BASED_MOV_DR_EXITING; Index: 2007-10-10/xen/arch/x86/traps.c ==================================================================--- 2007-10-10.orig/xen/arch/x86/traps.c 2007-10-31 11:09:20.507877968 +0100 +++ 2007-10-10/xen/arch/x86/traps.c 2007-10-26 15:53:13.000000000 +0200 @@ -2323,25 +2323,25 @@ long set_debugreg(struct vcpu *p, int re if ( !access_ok(value, sizeof(long)) ) return -EPERM; if ( p == current ) - asm volatile ( "mov %0, %%db0" : : "r" (value) ); + __loaddebug(p, 0, value); break; case 1: if ( !access_ok(value, sizeof(long)) ) return -EPERM; if ( p == current ) - asm volatile ( "mov %0, %%db1" : : "r" (value) ); + __loaddebug(p, 1, value); break; case 2: if ( !access_ok(value, sizeof(long)) ) return -EPERM; if ( p == current ) - asm volatile ( "mov %0, %%db2" : : "r" (value) ); + __loaddebug(p, 2, value); break; case 3: if ( !access_ok(value, sizeof(long)) ) return -EPERM; if ( p == current ) - asm volatile ( "mov %0, %%db3" : : "r" (value) ); + __loaddebug(p, 3, value); break; case 6: /* @@ -2351,7 +2351,7 @@ long set_debugreg(struct vcpu *p, int re value &= 0xffffefff; /* reserved bits => 0 */ value |= 0xffff0ff0; /* reserved bits => 1 */ if ( p == current ) - asm volatile ( "mov %0, %%db6" : : "r" (value) ); + __loaddebug(p, 6, value); break; case 7: /* @@ -2372,7 +2372,7 @@ long set_debugreg(struct vcpu *p, int re if ( ((value >> (i+16)) & 3) == 2 ) return -EPERM; } if ( p == current ) - asm volatile ( "mov %0, %%db7" : : "r" (value) ); + __loaddebug(p, 7, value); break; default: return -EINVAL; Index: 2007-10-10/xen/include/asm-x86/hvm/svm/vmcb.h ==================================================================--- 2007-10-10.orig/xen/include/asm-x86/hvm/svm/vmcb.h 2007-10-31 11:09:20.508877816 +0100 +++ 2007-10-10/xen/include/asm-x86/hvm/svm/vmcb.h 2007-10-29 09:09:26.000000000 +0100 @@ -151,12 +151,12 @@ enum DRInterceptBits DR_INTERCEPT_DR15_WRITE = 1 << 31, }; -/* for lazy save/restore we''d like to intercept all DR writes */ -#define DR_INTERCEPT_ALL_WRITES \ - (DR_INTERCEPT_DR0_WRITE|DR_INTERCEPT_DR1_WRITE|DR_INTERCEPT_DR2_WRITE \ - |DR_INTERCEPT_DR3_WRITE|DR_INTERCEPT_DR4_WRITE|DR_INTERCEPT_DR5_WRITE \ - |DR_INTERCEPT_DR6_WRITE|DR_INTERCEPT_DR7_WRITE) - +/* For lazy save/restore we''d like to intercept most DR accesses (DR6 and DR7 + * are implicitly handled through the VMCB, but DR7 writes must be watched). + */ +#define DR_INTERCEPTS (~(DR_INTERCEPT_DR6_READ|DR_INTERCEPT_DR7_READ| \ + DR_INTERCEPT_DR6_WRITE)) +#define DR_WRITE_INTERCEPTS (DR_INTERCEPTS & 0xffff0000) enum VMEXIT_EXITCODE { Index: 2007-10-10/xen/include/asm-x86/processor.h ==================================================================--- 2007-10-10.orig/xen/include/asm-x86/processor.h 2007-10-31 11:09:20.509877664 +0100 +++ 2007-10-10/xen/include/asm-x86/processor.h 2007-10-26 16:50:47.000000000 +0200 @@ -478,6 +478,30 @@ long set_gdt(struct vcpu *d, unsigned long *frames, unsigned int entries); +#define __loaddebug(v, reg, val) do { \ + idle_vcpu[(v)->processor]->arch.guest_context.debugreg[reg] = (val); \ + asm volatile ( "mov %0,%%db" #reg : : "r" (val) ); \ +} while (0) + +#define loaddebug(v, reg) do { \ + unsigned long __val = (v)->arch.guest_context.debugreg[reg]; \ + __loaddebug(v, reg, __val); \ +} while (0) + +#define cond_loaddebug(v, reg) do { \ + unsigned long __val = (v)->arch.guest_context.debugreg[reg]; \ + if ( idle_vcpu[(v)->processor]->arch.guest_context.debugreg[reg] != \ + (__val) ) \ + __loaddebug(v, reg, __val); \ +} while (0) + +#define savedebug(v, reg) do { \ + unsigned long __val; \ + asm volatile ("mov %%db" #reg ",%0" : "=r" (__val)); \ + (v)->arch.guest_context.debugreg[reg] = __val; \ + idle_vcpu[(v)->processor]->arch.guest_context.debugreg[reg] = __val; \ +} while (0) + long set_debugreg(struct vcpu *p, int reg, unsigned long value); struct microcode_header { _______________________________________________ Xen-devel mailing list Xen-devel@lists.xensource.com http://lists.xensource.com/xen-devel