Bastian Blank
2008-Jul-12 11:03 UTC
[Pkg-xen-changes] r599 - in branches/etch/xen-3.0/debian: . patches
Author: waldi Date: Sat Jul 12 11:03:33 2008 New Revision: 599 Log: Fix CVE-2007-5906 and CVE-2008-0928. * debian/changelog: Update. * debian/patches/00list: Add new patches. * debian/patches/CVE-2007-5906.dpatch, debian/patches/CVE-2008-0928.dpatch: Add. Added: branches/etch/xen-3.0/debian/patches/CVE-2007-5906.dpatch (contents, props changed) branches/etch/xen-3.0/debian/patches/CVE-2008-0928.dpatch Modified: branches/etch/xen-3.0/debian/changelog branches/etch/xen-3.0/debian/patches/00list Modified: branches/etch/xen-3.0/debian/changelog =============================================================================--- branches/etch/xen-3.0/debian/changelog (original) +++ branches/etch/xen-3.0/debian/changelog Sat Jul 12 11:03:33 2008 @@ -1,3 +1,12 @@ +xen-3.0 (3.0.3-0-5) UNRELEASED; urgency=low + + * Clear debug registers for HVM guests. + See: CVE-2007-5906 + * Fix range checks in ioemu block support. + See: CVE-2008-0928 + + -- Bastian Blank <waldi at debian.org> Sat, 12 Jul 2008 12:52:46 +0200 + xen-3.0 (3.0.3-0-4) stable-security; urgency=low * Non-maintainer upload by The Security Team. Modified: branches/etch/xen-3.0/debian/patches/00list =============================================================================--- branches/etch/xen-3.0/debian/patches/00list (original) +++ branches/etch/xen-3.0/debian/patches/00list Sat Jul 12 11:03:33 2008 @@ -10,3 +10,5 @@ not-reinvent-udev CVE-2007-4993 CVE-2007-1320 +CVE-2007-5906 +CVE-2008-0928 Added: branches/etch/xen-3.0/debian/patches/CVE-2007-5906.dpatch =============================================================================--- (empty file) +++ branches/etch/xen-3.0/debian/patches/CVE-2007-5906.dpatch Sat Jul 12 11:03:33 2008 @@ -0,0 +1,379 @@ +#! /bin/sh /usr/share/dpatch/dpatch-run + + at DPATCH@ +diff -r d4449461dc16 -r 5f6dc1bafffd xen/arch/x86/domain.c +--- a/xen/arch/x86/domain.c Tue Oct 17 22:09:27 2006 +0100 ++++ b/xen/arch/x86/domain.c Thu Nov 01 16:26:38 2007 +0000 +@@ -305,13 +305,13 @@ int arch_set_info_guest( + hvm_load_cpu_guest_regs(v, &v->arch.guest_context.user_regs); + } + +- if ( test_bit(_VCPUF_initialised, &v->vcpu_flags) ) +- return 0; +- + memset(v->arch.guest_context.debugreg, 0, + sizeof(v->arch.guest_context.debugreg)); + for ( i = 0; i < 8; i++ ) + (void)set_debugreg(v, i, c->debugreg[i]); ++ ++ if ( test_bit(_VCPUF_initialised, &v->vcpu_flags) ) ++ return 0; + + if ( v->vcpu_id == 0 ) + d->vm_assist = c->vm_assist; +@@ -633,16 +633,32 @@ static void paravirt_ctxt_switch_from(st + static void paravirt_ctxt_switch_from(struct vcpu *v) + { + save_segments(v); ++ ++ /* ++ * Disable debug breakpoints. We do this aggressively because if we switch ++ * to an HVM guest we may load DR0-DR3 with values that can cause #DE ++ * inside Xen, before we get a chance to reload DR7, and this cannot always ++ * safely be handled. ++ */ ++ if ( unlikely(v->arch.guest_context.debugreg[7]) ) ++ write_debugreg(7, 0); + } + + static void paravirt_ctxt_switch_to(struct vcpu *v) + { + set_int80_direct_trap(v); + switch_kernel_stack(v); ++ ++ if ( unlikely(v->arch.guest_context.debugreg[7]) ) ++ { ++ write_debugreg(0, v->arch.guest_context.debugreg[0]); ++ write_debugreg(1, v->arch.guest_context.debugreg[1]); ++ write_debugreg(2, v->arch.guest_context.debugreg[2]); ++ write_debugreg(3, v->arch.guest_context.debugreg[3]); ++ write_debugreg(6, v->arch.guest_context.debugreg[6]); ++ write_debugreg(7, v->arch.guest_context.debugreg[7]); ++ } + } +- +-#define loaddebug(_v,_reg) \ +- __asm__ __volatile__ ("mov %0,%%db" #_reg : : "r" ((_v)->debugreg[_reg])) + + static void __context_switch(void) + { +@@ -668,18 +684,6 @@ static void __context_switch(void) + memcpy(stack_regs, + &n->arch.guest_context.user_regs, + CTXT_SWITCH_STACK_BYTES); +- +- /* Maybe switch the debug registers. */ +- if ( unlikely(n->arch.guest_context.debugreg[7]) ) +- { +- loaddebug(&n->arch.guest_context, 0); +- loaddebug(&n->arch.guest_context, 1); +- loaddebug(&n->arch.guest_context, 2); +- loaddebug(&n->arch.guest_context, 3); +- /* no 4 and 5 */ +- loaddebug(&n->arch.guest_context, 6); +- loaddebug(&n->arch.guest_context, 7); +- } + n->arch.ctxt_switch_to(n); + } + +diff -r d4449461dc16 -r 5f6dc1bafffd xen/arch/x86/hvm/svm/svm.c +--- a/xen/arch/x86/hvm/svm/svm.c Tue Oct 17 22:09:27 2006 +0100 ++++ b/xen/arch/x86/hvm/svm/svm.c Thu Nov 01 16:26:38 2007 +0000 +@@ -418,45 +418,48 @@ static inline int long_mode_do_msr_write + } + + +-#define loaddebug(_v,_reg) \ +- __asm__ __volatile__ ("mov %0,%%db" #_reg : : "r" ((_v)->debugreg[_reg])) +-#define savedebug(_v,_reg) \ +- __asm__ __volatile__ ("mov %%db" #_reg ",%0" : : "r" ((_v)->debugreg[_reg])) +- +- + static inline void svm_save_dr(struct vcpu *v) + { + if (v->arch.hvm_vcpu.flag_dr_dirty) + { + /* clear the DR dirty flag and re-enable intercepts for DR accesses */ + v->arch.hvm_vcpu.flag_dr_dirty = 0; +- v->arch.hvm_svm.vmcb->dr_intercepts = DR_INTERCEPT_ALL_WRITES; ++ v->arch.hvm_svm.vmcb->dr_intercepts = ~0u; + +- savedebug(&v->arch.guest_context, 0); +- savedebug(&v->arch.guest_context, 1); +- savedebug(&v->arch.guest_context, 2); +- savedebug(&v->arch.guest_context, 3); ++ v->arch.guest_context.debugreg[0] = read_debugreg(0); ++ v->arch.guest_context.debugreg[1] = read_debugreg(1); ++ v->arch.guest_context.debugreg[2] = read_debugreg(2); ++ v->arch.guest_context.debugreg[3] = read_debugreg(3); + } + } + + + static inline void __restore_debug_registers(struct vcpu *v) + { +- loaddebug(&v->arch.guest_context, 0); +- loaddebug(&v->arch.guest_context, 1); +- loaddebug(&v->arch.guest_context, 2); +- loaddebug(&v->arch.guest_context, 3); ++ struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb; ++ ++ ASSERT(!v->arch.hvm_vcpu.flag_dr_dirty); ++ v->arch.hvm_vcpu.flag_dr_dirty = 1; ++ vmcb->dr_intercepts = 0; ++ ++ write_debugreg(0, v->arch.guest_context.debugreg[0]); ++ write_debugreg(1, v->arch.guest_context.debugreg[1]); ++ write_debugreg(2, v->arch.guest_context.debugreg[2]); ++ write_debugreg(3, v->arch.guest_context.debugreg[3]); + } + + +-static inline void svm_restore_dr(struct vcpu *v) ++/* ++ * DR7 is saved and restored on every vmexit. Other debug registers only ++ * need to be restored if their value is going to affect execution -- i.e., ++ * if one of the breakpoints is enabled. So mask out all bits that don''t ++ * enable some breakpoint functionality. ++ */ ++#define DR7_ACTIVE_MASK 0xff ++ ++static void svm_restore_dr(struct vcpu *v) + { +- struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb; +- +- if (!vmcb) +- return; +- +- if (unlikely(vmcb->dr7 & 0xFF)) ++ if ( unlikely(v->arch.guest_context.debugreg[7] & DR7_ACTIVE_MASK) ) + __restore_debug_registers(v); + } + +@@ -1233,14 +1236,7 @@ static inline void set_reg(unsigned int + + static void svm_dr_access(struct vcpu *v, struct cpu_user_regs *regs) + { +- struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb; +- +- v->arch.hvm_vcpu.flag_dr_dirty = 1; +- + __restore_debug_registers(v); +- +- /* allow the guest full access to the debug registers */ +- vmcb->dr_intercepts = 0; + } + + +diff -r d4449461dc16 -r 5f6dc1bafffd xen/arch/x86/hvm/svm/vmcb.c +--- a/xen/arch/x86/hvm/svm/vmcb.c Tue Oct 17 22:09:27 2006 +0100 ++++ b/xen/arch/x86/hvm/svm/vmcb.c Thu Nov 01 16:26:38 2007 +0000 +@@ -121,7 +121,7 @@ static int construct_vmcb_controls(struc + GENERAL2_INTERCEPT_SKINIT | GENERAL2_INTERCEPT_RDTSCP; + + /* read or write all debug registers 0 - 15 */ +- vmcb->dr_intercepts = DR_INTERCEPT_ALL_WRITES; ++ vmcb->dr_intercepts = ~0u; + + /* RD/WR all control registers 0 - 15, but not read CR2 */ + vmcb->cr_intercepts = ~(CR_INTERCEPT_CR2_READ | CR_INTERCEPT_CR2_WRITE); +diff -r d4449461dc16 -r 5f6dc1bafffd xen/arch/x86/hvm/vmx/vmx.c +--- a/xen/arch/x86/hvm/vmx/vmx.c Tue Oct 17 22:09:27 2006 +0100 ++++ b/xen/arch/x86/hvm/vmx/vmx.c Thu Nov 01 16:26:38 2007 +0000 +@@ -395,37 +395,34 @@ static inline int long_mode_do_msr_write + + #endif /* __i386__ */ + +-#define loaddebug(_v,_reg) \ +- __asm__ __volatile__ ("mov %0,%%db" #_reg : : "r" ((_v)->debugreg[_reg])) +-#define savedebug(_v,_reg) \ +- __asm__ __volatile__ ("mov %%db" #_reg ",%0" : : "r" ((_v)->debugreg[_reg])) +- + static inline void vmx_save_dr(struct vcpu *v) + { + if ( v->arch.hvm_vcpu.flag_dr_dirty ) + { +- savedebug(&v->arch.guest_context, 0); +- savedebug(&v->arch.guest_context, 1); +- savedebug(&v->arch.guest_context, 2); +- savedebug(&v->arch.guest_context, 3); +- savedebug(&v->arch.guest_context, 6); +- + v->arch.hvm_vcpu.flag_dr_dirty = 0; + + v->arch.hvm_vcpu.u.vmx.exec_control |= CPU_BASED_MOV_DR_EXITING; + __vmwrite(CPU_BASED_VM_EXEC_CONTROL, + v->arch.hvm_vcpu.u.vmx.exec_control); ++ ++ v->arch.guest_context.debugreg[0] = read_debugreg(0); ++ v->arch.guest_context.debugreg[1] = read_debugreg(1); ++ v->arch.guest_context.debugreg[2] = read_debugreg(2); ++ v->arch.guest_context.debugreg[3] = read_debugreg(3); ++ v->arch.guest_context.debugreg[6] = read_debugreg(6); + } + } + + static inline void __restore_debug_registers(struct vcpu *v) + { +- loaddebug(&v->arch.guest_context, 0); +- loaddebug(&v->arch.guest_context, 1); +- loaddebug(&v->arch.guest_context, 2); +- loaddebug(&v->arch.guest_context, 3); +- /* No 4 and 5 */ +- loaddebug(&v->arch.guest_context, 6); ++ ASSERT(!v->arch.hvm_vcpu.flag_dr_dirty); ++ v->arch.hvm_vcpu.flag_dr_dirty = 1; ++ ++ write_debugreg(0, v->arch.guest_context.debugreg[0]); ++ write_debugreg(1, v->arch.guest_context.debugreg[1]); ++ write_debugreg(2, v->arch.guest_context.debugreg[2]); ++ write_debugreg(3, v->arch.guest_context.debugreg[3]); ++ write_debugreg(6, v->arch.guest_context.debugreg[6]); + /* DR7 is loaded from the vmcs. */ + } + +@@ -1008,10 +1005,8 @@ static void vmx_dr_access(unsigned long + { + struct vcpu *v = current; + +- v->arch.hvm_vcpu.flag_dr_dirty = 1; +- +- /* We could probably be smarter about this */ +- __restore_debug_registers(v); ++ if ( !v->arch.hvm_vcpu.flag_dr_dirty ) ++ __restore_debug_registers(v); + + /* Allow guest direct access to DR registers */ + v->arch.hvm_vcpu.u.vmx.exec_control &= ~CPU_BASED_MOV_DR_EXITING; +diff -r d4449461dc16 -r 5f6dc1bafffd xen/arch/x86/traps.c +--- a/xen/arch/x86/traps.c Tue Oct 17 22:09:27 2006 +0100 ++++ b/xen/arch/x86/traps.c Thu Nov 01 16:26:38 2007 +0000 +@@ -1667,20 +1667,9 @@ asmlinkage int math_state_restore(struct + + asmlinkage int do_debug(struct cpu_user_regs *regs) + { +- unsigned long condition; + struct vcpu *v = current; + struct trap_bounce *tb = &v->arch.trap_bounce; + struct trap_info *ti; +- +- __asm__ __volatile__("mov %%db6,%0" : "=r" (condition)); +- +- /* Mask out spurious debug traps due to lazy DR7 setting */ +- if ( (condition & (DR_TRAP0|DR_TRAP1|DR_TRAP2|DR_TRAP3)) && +- (v->arch.guest_context.debugreg[7] == 0) ) +- { +- __asm__("mov %0,%%db7" : : "r" (0UL)); +- goto out; +- } + + DEBUGGER_trap_entry(TRAP_debug, regs); + +@@ -1698,7 +1687,7 @@ asmlinkage int do_debug(struct cpu_user_ + } + + /* Save debug status register where guest OS can peek at it */ +- v->arch.guest_context.debugreg[6] = condition; ++ v->arch.guest_context.debugreg[6] = read_debugreg(6); + + ti = &v->arch.guest_context.trap_ctxt[TRAP_debug]; + tb->flags = TBF_EXCEPTION; +@@ -1844,25 +1833,25 @@ long set_debugreg(struct vcpu *p, int re + if ( !access_ok(value, sizeof(long)) ) + return -EPERM; + if ( p == current ) +- __asm__ ( "mov %0, %%db0" : : "r" (value) ); ++ write_debugreg(0, value); + break; + case 1: + if ( !access_ok(value, sizeof(long)) ) + return -EPERM; + if ( p == current ) +- __asm__ ( "mov %0, %%db1" : : "r" (value) ); ++ write_debugreg(1, value); + break; + case 2: + if ( !access_ok(value, sizeof(long)) ) + return -EPERM; + if ( p == current ) +- __asm__ ( "mov %0, %%db2" : : "r" (value) ); ++ write_debugreg(2, value); + break; + case 3: + if ( !access_ok(value, sizeof(long)) ) + return -EPERM; + if ( p == current ) +- __asm__ ( "mov %0, %%db3" : : "r" (value) ); ++ write_debugreg(3, value); + break; + case 6: + /* +@@ -1872,7 +1861,7 @@ long set_debugreg(struct vcpu *p, int re + value &= 0xffffefff; /* reserved bits => 0 */ + value |= 0xffff0ff0; /* reserved bits => 1 */ + if ( p == current ) +- __asm__ ( "mov %0, %%db6" : : "r" (value) ); ++ write_debugreg(6, value); + break; + case 7: + /* +@@ -1891,9 +1880,22 @@ long set_debugreg(struct vcpu *p, int re + if ( (value & (1<<13)) != 0 ) return -EPERM; + for ( i = 0; i < 16; i += 2 ) + if ( ((value >> (i+16)) & 3) == 2 ) return -EPERM; ++ /* ++ * If DR7 was previously clear then we need to load all other ++ * debug registers at this point as they were not restored during ++ * context switch. ++ */ ++ if ( (p == current) && (p->arch.guest_context.debugreg[7] == 0) ) ++ { ++ write_debugreg(0, p->arch.guest_context.debugreg[0]); ++ write_debugreg(1, p->arch.guest_context.debugreg[1]); ++ write_debugreg(2, p->arch.guest_context.debugreg[2]); ++ write_debugreg(3, p->arch.guest_context.debugreg[3]); ++ write_debugreg(6, p->arch.guest_context.debugreg[6]); ++ } + } + if ( p == current ) +- __asm__ ( "mov %0, %%db7" : : "r" (value) ); ++ write_debugreg(7, value); + break; + default: + return -EINVAL; +diff -r d4449461dc16 -r 5f6dc1bafffd xen/include/asm-x86/hvm/svm/vmcb.h +--- a/xen/include/asm-x86/hvm/svm/vmcb.h Tue Oct 17 22:09:27 2006 +0100 ++++ b/xen/include/asm-x86/hvm/svm/vmcb.h Thu Nov 01 16:26:38 2007 +0000 +@@ -151,13 +151,6 @@ enum DRInterceptBits + DR_INTERCEPT_DR14_WRITE = 1 << 30, + DR_INTERCEPT_DR15_WRITE = 1 << 31, + }; +- +-/* for lazy save/restore we''d like to intercept all DR writes */ +-#define DR_INTERCEPT_ALL_WRITES \ +- (DR_INTERCEPT_DR0_WRITE|DR_INTERCEPT_DR1_WRITE|DR_INTERCEPT_DR2_WRITE \ +- |DR_INTERCEPT_DR3_WRITE|DR_INTERCEPT_DR4_WRITE|DR_INTERCEPT_DR5_WRITE \ +- |DR_INTERCEPT_DR6_WRITE|DR_INTERCEPT_DR7_WRITE) +- + + enum VMEXIT_EXITCODE + { +diff -r d4449461dc16 -r 5f6dc1bafffd xen/include/asm-x86/processor.h +--- a/xen/include/asm-x86/processor.h Tue Oct 17 22:09:27 2006 +0100 ++++ b/xen/include/asm-x86/processor.h Thu Nov 01 16:26:38 2007 +0000 +@@ -478,6 +478,15 @@ long set_gdt(struct vcpu *d, + unsigned long *frames, + unsigned int entries); + ++#define write_debugreg(reg, val) do { \ ++ unsigned long __val = val; \ ++ asm volatile ( "mov %0,%%db" #reg : : "r" (__val) ); \ ++} while (0) ++#define read_debugreg(reg) ({ \ ++ unsigned long __val; \ ++ asm volatile ( "mov %%db" #reg ",%0" : "=r" (__val) ); \ ++ __val; \ ++}) + long set_debugreg(struct vcpu *p, int reg, unsigned long value); + + struct microcode_header { Added: branches/etch/xen-3.0/debian/patches/CVE-2008-0928.dpatch =============================================================================--- (empty file) +++ branches/etch/xen-3.0/debian/patches/CVE-2008-0928.dpatch Sat Jul 12 11:03:33 2008 @@ -0,0 +1,67 @@ +#! /bin/sh /usr/share/dpatch/dpatch-run + + at DPATCH@ +diff -r 5f6dc1bafffd -r 42c6318fb648 tools/ioemu/block.c +--- a/tools/ioemu/block.c Thu Nov 01 16:26:38 2007 +0000 ++++ b/tools/ioemu/block.c Wed Feb 20 17:46:10 2008 +0000 +@@ -47,6 +47,8 @@ + #ifdef __sun__ + #include <sys/dkio.h> + #endif ++ ++#define SECTOR_BITS 9 + + static BlockDriverState *bdrv_first; + static BlockDriver *first_drv; +@@ -108,6 +110,15 @@ kern_return_t GetBSDPath( io_iterator_t + } + + #endif ++ ++static int bdrv_rw_badreq_sectors(BlockDriverState *bs, ++ int64_t sector_num, int nb_sectors) ++{ ++ return ++ nb_sectors < 0 || ++ nb_sectors > bs->total_sectors || ++ sector_num > bs->total_sectors - nb_sectors; ++} + + void bdrv_register(BlockDriver *bdrv) + { +@@ -307,6 +318,7 @@ int bdrv_open2(BlockDriverState *bs, con + } + bs->drv = drv; + bs->opaque = qemu_mallocz(drv->instance_size); ++ bs->total_sectors = 0; /* driver will set if it does not do getlength */ + if (bs->opaque == NULL && drv->instance_size > 0) + return -1; + +@@ -358,6 +370,7 @@ void bdrv_close(BlockDriverState *bs) + bs->inserted = 0; + + /* call the change callback */ ++ bs->total_sectors = 0; + if (bs->change_cb) + bs->change_cb(bs->change_opaque); + } +@@ -418,8 +431,8 @@ int bdrv_read(BlockDriverState *bs, int6 + int ret, n; + BlockDriver *drv = bs->drv; + +- if (!bs->inserted) +- return -1; ++ if (bdrv_rw_badreq_sectors(bs, sector_num, nb_sectors)) ++ return -EDOM; + + while (nb_sectors > 0) { + if (sector_num == 0 && bs->boot_sector_enabled) { +@@ -458,6 +471,8 @@ int bdrv_write(BlockDriverState *bs, int + return -1; + if (bs->read_only) + return -1; ++ if (bdrv_rw_badreq_sectors(bs, sector_num, nb_sectors)) ++ return -EDOM; + if (sector_num == 0 && bs->boot_sector_enabled && nb_sectors > 0) { + memcpy(bs->boot_sector_data, buf, 512); + }