Liu, Jinsong
2013-Nov-19 10:49 UTC
[PATCH 2/5] X86 architecture instruction set extension definiation
From eee3a3d3072651327453220876ebe9a7345d6ffe Mon Sep 17 00:00:00 2001 From: Liu Jinsong <jinsong.liu@intel.com> Date: Tue, 19 Nov 2013 18:44:45 +0800 Subject: [PATCH 2/5] X86 architecture instruction set extension definiation Intel has released new version of Intel Architecture Instruction Set Extensions Programming Reference, add new features like AVX-512, MPX, etc. refer http://download-software.intel.com/sites/default/files/319433-015.pdf This patch adds definiation of these new instruction set extension. It also adjusts valid xcr0 checking. Signed-off-by: Jan Beulich <jbeulich@novell.com> --- xen/arch/x86/xstate.c | 38 ++++++++++++++++++++++++-------------- xen/include/asm-x86/xstate.h | 13 +++++++++---- 2 files changed, 33 insertions(+), 18 deletions(-) diff --git a/xen/arch/x86/xstate.c b/xen/arch/x86/xstate.c index 9e74929..1fd43c9 100644 --- a/xen/arch/x86/xstate.c +++ b/xen/arch/x86/xstate.c @@ -253,7 +253,7 @@ void xstate_free_save_area(struct vcpu *v) /* Collect the information of processor''s extended state */ void xstate_init(bool_t bsp) { - u32 eax, ebx, ecx, edx, min_size; + u32 eax, ebx, ecx, edx; u64 feature_mask; if ( boot_cpu_data.cpuid_level < XSTATE_CPUID ) @@ -269,12 +269,6 @@ void xstate_init(bool_t bsp) BUG_ON((eax & XSTATE_YMM) && !(eax & XSTATE_SSE)); feature_mask = (((u64)edx << 32) | eax) & XCNTXT_MASK; - /* FP/SSE, XSAVE.HEADER, YMM */ - min_size = XSTATE_AREA_MIN_SIZE; - if ( eax & XSTATE_YMM ) - min_size += XSTATE_YMM_SIZE; - BUG_ON(ecx < min_size); - /* * Set CR4_OSXSAVE and run "cpuid" to get xsave_cntxt_size. */ @@ -327,14 +321,33 @@ unsigned int xstate_ctxt_size(u64 xcr0) return ebx; } +static bool_t valid_xcr0(u64 xcr0) +{ + if ( !(xcr0 & XSTATE_FP) ) + return 0; + + if ( (xcr0 & XSTATE_YMM) && !(xcr0 & XSTATE_SSE) ) + return 0; + + if ( xcr0 & (XSTATE_OPMASK | XSTATE_ZMM | XSTATE_HI_ZMM) ) + { + if ( !(xcr0 & XSTATE_YMM) ) + return 0; + + if ( ~xcr0 & (XSTATE_OPMASK | XSTATE_ZMM | XSTATE_HI_ZMM) ) + return 0; + } + + return !(xcr0 & XSTATE_BNDREGS) == !(xcr0 & XSTATE_BNDCSR); +} + int validate_xstate(u64 xcr0, u64 xcr0_accum, u64 xstate_bv, u64 xfeat_mask) { if ( (xcr0_accum & ~xfeat_mask) || (xstate_bv & ~xcr0_accum) || (xcr0 & ~xcr0_accum) || - !(xcr0 & XSTATE_FP) || - ((xcr0 & XSTATE_YMM) && !(xcr0 & XSTATE_SSE)) || - ((xcr0_accum & XSTATE_YMM) && !(xcr0_accum & XSTATE_SSE)) ) + !valid_xcr0(xcr0) || + !valid_xcr0(xcr0_accum) ) return -EINVAL; if ( xcr0_accum & ~xfeature_mask ) @@ -351,10 +364,7 @@ int handle_xsetbv(u32 index, u64 new_bv) if ( index != XCR_XFEATURE_ENABLED_MASK ) return -EOPNOTSUPP; - if ( (new_bv & ~xfeature_mask) || !(new_bv & XSTATE_FP) ) - return -EINVAL; - - if ( (new_bv & XSTATE_YMM) && !(new_bv & XSTATE_SSE) ) + if ( (new_bv & ~xfeature_mask) || !valid_xcr0(new_bv) ) return -EINVAL; if ( !set_xcr0(new_bv) ) diff --git a/xen/include/asm-x86/xstate.h b/xen/include/asm-x86/xstate.h index 5617963..de5711e 100644 --- a/xen/include/asm-x86/xstate.h +++ b/xen/include/asm-x86/xstate.h @@ -20,18 +20,23 @@ #define XCR_XFEATURE_ENABLED_MASK 0x00000000 /* index of XCR0 */ #define XSTATE_YMM_SIZE 256 -#define XSTATE_YMM_OFFSET XSAVE_AREA_MIN_SIZE #define XSTATE_AREA_MIN_SIZE (512 + 64) /* FP/SSE + XSAVE.HEADER */ #define XSTATE_FP (1ULL << 0) #define XSTATE_SSE (1ULL << 1) #define XSTATE_YMM (1ULL << 2) +#define XSTATE_BNDREGS (1ULL << 3) +#define XSTATE_BNDCSR (1ULL << 4) +#define XSTATE_OPMASK (1ULL << 5) +#define XSTATE_ZMM (1ULL << 6) +#define XSTATE_HI_ZMM (1ULL << 7) #define XSTATE_LWP (1ULL << 62) /* AMD lightweight profiling */ #define XSTATE_FP_SSE (XSTATE_FP | XSTATE_SSE) -#define XCNTXT_MASK (XSTATE_FP | XSTATE_SSE | XSTATE_YMM | XSTATE_LWP) +#define XCNTXT_MASK (XSTATE_FP | XSTATE_SSE | XSTATE_YMM | XSTATE_OPMASK | \ + XSTATE_ZMM | XSTATE_HI_ZMM | XSTATE_NONLAZY) -#define XSTATE_ALL (~0) -#define XSTATE_NONLAZY (XSTATE_LWP) +#define XSTATE_ALL (~(1ULL << 63)) +#define XSTATE_NONLAZY (XSTATE_LWP | XSTATE_BNDREGS | XSTATE_BNDCSR) #define XSTATE_LAZY (XSTATE_ALL & ~XSTATE_NONLAZY) extern u64 xfeature_mask; -- 1.7.1
Andrew Cooper
2013-Nov-21 15:14 UTC
Re: [PATCH 2/5] X86 architecture instruction set extension definiation
On 19/11/13 10:49, Liu, Jinsong wrote:> From eee3a3d3072651327453220876ebe9a7345d6ffe Mon Sep 17 00:00:00 2001 > From: Liu Jinsong <jinsong.liu@intel.com> > Date: Tue, 19 Nov 2013 18:44:45 +0800 > Subject: [PATCH 2/5] X86 architecture instruction set extension definiation > > Intel has released new version of Intel Architecture Instruction Set > Extensions Programming Reference, add new features like AVX-512, MPX, etc. > refer http://download-software.intel.com/sites/default/files/319433-015.pdf > > This patch adds definiation of these new instruction set extension. It also > adjusts valid xcr0 checking. > > Signed-off-by: Jan Beulich <jbeulich@novell.com> > --- > xen/arch/x86/xstate.c | 38 ++++++++++++++++++++++++-------------- > xen/include/asm-x86/xstate.h | 13 +++++++++---- > 2 files changed, 33 insertions(+), 18 deletions(-) > > diff --git a/xen/arch/x86/xstate.c b/xen/arch/x86/xstate.c > index 9e74929..1fd43c9 100644 > --- a/xen/arch/x86/xstate.c > +++ b/xen/arch/x86/xstate.c > @@ -253,7 +253,7 @@ void xstate_free_save_area(struct vcpu *v) > /* Collect the information of processor''s extended state */ > void xstate_init(bool_t bsp) > { > - u32 eax, ebx, ecx, edx, min_size; > + u32 eax, ebx, ecx, edx; > u64 feature_mask; > > if ( boot_cpu_data.cpuid_level < XSTATE_CPUID ) > @@ -269,12 +269,6 @@ void xstate_init(bool_t bsp) > BUG_ON((eax & XSTATE_YMM) && !(eax & XSTATE_SSE)); > feature_mask = (((u64)edx << 32) | eax) & XCNTXT_MASK; > > - /* FP/SSE, XSAVE.HEADER, YMM */ > - min_size = XSTATE_AREA_MIN_SIZE; > - if ( eax & XSTATE_YMM ) > - min_size += XSTATE_YMM_SIZE; > - BUG_ON(ecx < min_size); > - > /* > * Set CR4_OSXSAVE and run "cpuid" to get xsave_cntxt_size. > */ > @@ -327,14 +321,33 @@ unsigned int xstate_ctxt_size(u64 xcr0) > return ebx; > } > > +static bool_t valid_xcr0(u64 xcr0) > +{Valid states in xcr0 ave very complicated, and are really not helped by having the dependencies split across several manuals. I feel that for the sanity of someone trying to follow the code, there should be comments, and bits are validated in position order. So, /* XSTATE_FP must be unconditionally set */> + if ( !(xcr0 & XSTATE_FP) ) > + return 0; > +/* XSTATE_YMM depends on XSTATE_SSE */> + if ( (xcr0 & XSTATE_YMM) && !(xcr0 & XSTATE_SSE) ) > + return 0;/* XSTATE_BNDREGS and BNDCSR must be the same */ if ( (xcr0 & XSTATE_BNDREGS) ^ (xcr0 & XSTATE_BNDCSR) ) return 0; /* XSTATE_{OPMASK,ZMM,HI_ZMM} must be the same, and require XSTATE_YMM */> + > + if ( xcr0 & (XSTATE_OPMASK | XSTATE_ZMM | XSTATE_HI_ZMM) ) > + { > + if ( !(xcr0 & XSTATE_YMM) ) > + return 0; > + > + if ( ~xcr0 & (XSTATE_OPMASK | XSTATE_ZMM | XSTATE_HI_ZMM) ) > + return 0; > + } > +return 1; Shouldn''t there also be a test against the xfeat_mask here, rather than at all callers ?> + return !(xcr0 & XSTATE_BNDREGS) == !(xcr0 & XSTATE_BNDCSR); > +} > + > int validate_xstate(u64 xcr0, u64 xcr0_accum, u64 xstate_bv, u64 xfeat_mask) > { > if ( (xcr0_accum & ~xfeat_mask) || > (xstate_bv & ~xcr0_accum) || > (xcr0 & ~xcr0_accum) || > - !(xcr0 & XSTATE_FP) || > - ((xcr0 & XSTATE_YMM) && !(xcr0 & XSTATE_SSE)) || > - ((xcr0_accum & XSTATE_YMM) && !(xcr0_accum & XSTATE_SSE)) ) > + !valid_xcr0(xcr0) || > + !valid_xcr0(xcr0_accum) ) > return -EINVAL; > > if ( xcr0_accum & ~xfeature_mask ) > @@ -351,10 +364,7 @@ int handle_xsetbv(u32 index, u64 new_bv) > if ( index != XCR_XFEATURE_ENABLED_MASK ) > return -EOPNOTSUPP; > > - if ( (new_bv & ~xfeature_mask) || !(new_bv & XSTATE_FP) ) > - return -EINVAL; > - > - if ( (new_bv & XSTATE_YMM) && !(new_bv & XSTATE_SSE) ) > + if ( (new_bv & ~xfeature_mask) || !valid_xcr0(new_bv) ) > return -EINVAL; > > if ( !set_xcr0(new_bv) ) > diff --git a/xen/include/asm-x86/xstate.h b/xen/include/asm-x86/xstate.h > index 5617963..de5711e 100644 > --- a/xen/include/asm-x86/xstate.h > +++ b/xen/include/asm-x86/xstate.h > @@ -20,18 +20,23 @@ > #define XCR_XFEATURE_ENABLED_MASK 0x00000000 /* index of XCR0 */ > > #define XSTATE_YMM_SIZE 256 > -#define XSTATE_YMM_OFFSET XSAVE_AREA_MIN_SIZE > #define XSTATE_AREA_MIN_SIZE (512 + 64) /* FP/SSE + XSAVE.HEADER */ > > #define XSTATE_FP (1ULL << 0) > #define XSTATE_SSE (1ULL << 1) > #define XSTATE_YMM (1ULL << 2) > +#define XSTATE_BNDREGS (1ULL << 3) > +#define XSTATE_BNDCSR (1ULL << 4) > +#define XSTATE_OPMASK (1ULL << 5) > +#define XSTATE_ZMM (1ULL << 6) > +#define XSTATE_HI_ZMM (1ULL << 7) > #define XSTATE_LWP (1ULL << 62) /* AMD lightweight profiling */ > #define XSTATE_FP_SSE (XSTATE_FP | XSTATE_SSE) > -#define XCNTXT_MASK (XSTATE_FP | XSTATE_SSE | XSTATE_YMM | XSTATE_LWP) > +#define XCNTXT_MASK (XSTATE_FP | XSTATE_SSE | XSTATE_YMM | XSTATE_OPMASK | \ > + XSTATE_ZMM | XSTATE_HI_ZMM | XSTATE_NONLAZY) > > -#define XSTATE_ALL (~0) > -#define XSTATE_NONLAZY (XSTATE_LWP) > +#define XSTATE_ALL (~(1ULL << 63))Why has XSTATE_ALL changed to ~XSTATE_LWP ?> +#define XSTATE_NONLAZY (XSTATE_LWP | XSTATE_BNDREGS | XSTATE_BNDCSR) > #define XSTATE_LAZY (XSTATE_ALL & ~XSTATE_NONLAZY) > > extern u64 xfeature_mask;
Jan Beulich
2013-Nov-22 11:25 UTC
Re: [PATCH 2/5] X86 architecture instruction set extension definiation
>>> On 21.11.13 at 16:14, Andrew Cooper <andrew.cooper3@citrix.com> wrote: > On 19/11/13 10:49, Liu, Jinsong wrote: >> @@ -327,14 +321,33 @@ unsigned int xstate_ctxt_size(u64 xcr0) >> return ebx; >> } >> >> +static bool_t valid_xcr0(u64 xcr0) >> +{ > > Valid states in xcr0 ave very complicated, and are really not helped by > having the dependencies split across several manuals. > > I feel that for the sanity of someone trying to follow the code, there > should be comments, and bits are validated in position order. > > So, > > /* XSTATE_FP must be unconditionally set */ > >> + if ( !(xcr0 & XSTATE_FP) ) >> + return 0; >> + > > /* XSTATE_YMM depends on XSTATE_SSE */ > >> + if ( (xcr0 & XSTATE_YMM) && !(xcr0 & XSTATE_SSE) ) >> + return 0; > > /* XSTATE_BNDREGS and BNDCSR must be the same */ > if ( (xcr0 & XSTATE_BNDREGS) ^ (xcr0 & XSTATE_BNDCSR) ) > return 0; > > > /* XSTATE_{OPMASK,ZMM,HI_ZMM} must be the same, and require XSTATE_YMM */Can be done of course (albeit I''m not inclined to change the ordering - I''d rather keep XMM/YMM/ZMM stuff together, and handle independent things separately).>> + >> + if ( xcr0 & (XSTATE_OPMASK | XSTATE_ZMM | XSTATE_HI_ZMM) ) >> + { >> + if ( !(xcr0 & XSTATE_YMM) ) >> + return 0; >> + >> + if ( ~xcr0 & (XSTATE_OPMASK | XSTATE_ZMM | XSTATE_HI_ZMM) ) >> + return 0; >> + } >> + > > return 1;Why?> Shouldn''t there also be a test against the xfeat_mask here, rather than > at all callers ?No, the purpose of the function is to validate a single set of flags for internal consistency. The way validate_xstate() works makes it so validating xcr0 against xfeature_mask is unnecessary (as xcr0 is already verified to be a subset of xcr0_accum).>> -#define XSTATE_ALL (~0) >> -#define XSTATE_NONLAZY (XSTATE_LWP) >> +#define XSTATE_ALL (~(1ULL << 63)) > > Why has XSTATE_ALL changed to ~XSTATE_LWP ?LWP is bit 62. Bit 63 is reserved. Jan
Andrew Cooper
2013-Nov-22 11:31 UTC
Re: [PATCH 2/5] X86 architecture instruction set extension definiation
On 22/11/13 11:25, Jan Beulich wrote:>>>> On 21.11.13 at 16:14, Andrew Cooper <andrew.cooper3@citrix.com> wrote: >> On 19/11/13 10:49, Liu, Jinsong wrote: >>> @@ -327,14 +321,33 @@ unsigned int xstate_ctxt_size(u64 xcr0) >>> return ebx; >>> } >>> >>> +static bool_t valid_xcr0(u64 xcr0) >>> +{ >> Valid states in xcr0 ave very complicated, and are really not helped by >> having the dependencies split across several manuals. >> >> I feel that for the sanity of someone trying to follow the code, there >> should be comments, and bits are validated in position order. >> >> So, >> >> /* XSTATE_FP must be unconditionally set */ >> >>> + if ( !(xcr0 & XSTATE_FP) ) >>> + return 0; >>> + >> /* XSTATE_YMM depends on XSTATE_SSE */ >> >>> + if ( (xcr0 & XSTATE_YMM) && !(xcr0 & XSTATE_SSE) ) >>> + return 0; >> /* XSTATE_BNDREGS and BNDCSR must be the same */ >> if ( (xcr0 & XSTATE_BNDREGS) ^ (xcr0 & XSTATE_BNDCSR) ) >> return 0; >> >> >> /* XSTATE_{OPMASK,ZMM,HI_ZMM} must be the same, and require XSTATE_YMM */ > Can be done of course (albeit I''m not inclined to change the > ordering - I''d rather keep XMM/YMM/ZMM stuff together, and > handle independent things separately). > >>> + >>> + if ( xcr0 & (XSTATE_OPMASK | XSTATE_ZMM | XSTATE_HI_ZMM) ) >>> + { >>> + if ( !(xcr0 & XSTATE_YMM) ) >>> + return 0; >>> + >>> + if ( ~xcr0 & (XSTATE_OPMASK | XSTATE_ZMM | XSTATE_HI_ZMM) ) >>> + return 0; >>> + } >>> + >> return 1; > Why?That was based on the re-ordering of BNDREGS/BNDCSR, but is subject to the choice of reordering.> >> Shouldn''t there also be a test against the xfeat_mask here, rather than >> at all callers ? > No, the purpose of the function is to validate a single set of flags > for internal consistency. The way validate_xstate() works makes it > so validating xcr0 against xfeature_mask is unnecessary (as xcr0 > is already verified to be a subset of xcr0_accum).Ok.> >>> -#define XSTATE_ALL (~0) >>> -#define XSTATE_NONLAZY (XSTATE_LWP) >>> +#define XSTATE_ALL (~(1ULL << 63)) >> Why has XSTATE_ALL changed to ~XSTATE_LWP ? > LWP is bit 62. Bit 63 is reserved. > > Jan >Oops - so it is. In which case this change is certainly correct. ~Andrew
Jan Beulich
2013-Nov-22 13:27 UTC
[PATCH v2 2/5] x86/xsave: enable support for new ISA extensions
Intel has released new version of Intel Architecture Instruction Set Extensions Programming Reference, adding new features like AVX-512, MPX, etc. Refer to http://download-software.intel.com/sites/default/files/319433-015.pdf This patch adds support for these new instruction set extensions without enabling this support for guest use, yet. It also adjusts XCR0 validation, at once fixing the definition of XSTATE_ALL (which is not supposed to include bit 63). Signed-off-by: Jan Beulich <jbeulich@novell.com> --- a/xen/arch/x86/xstate.c +++ b/xen/arch/x86/xstate.c @@ -253,7 +253,7 @@ void xstate_free_save_area(struct vcpu * /* Collect the information of processor''s extended state */ void xstate_init(bool_t bsp) { - u32 eax, ebx, ecx, edx, min_size; + u32 eax, ebx, ecx, edx; u64 feature_mask; if ( boot_cpu_data.cpuid_level < XSTATE_CPUID ) @@ -269,12 +269,6 @@ void xstate_init(bool_t bsp) BUG_ON((eax & XSTATE_YMM) && !(eax & XSTATE_SSE)); feature_mask = (((u64)edx << 32) | eax) & XCNTXT_MASK; - /* FP/SSE, XSAVE.HEADER, YMM */ - min_size = XSTATE_AREA_MIN_SIZE; - if ( eax & XSTATE_YMM ) - min_size += XSTATE_YMM_SIZE; - BUG_ON(ecx < min_size); - /* * Set CR4_OSXSAVE and run "cpuid" to get xsave_cntxt_size. */ @@ -327,14 +321,38 @@ unsigned int xstate_ctxt_size(u64 xcr0) return ebx; } +static bool_t valid_xcr0(u64 xcr0) +{ + /* FP must be unconditionally set. */ + if ( !(xcr0 & XSTATE_FP) ) + return 0; + + /* YMM depends on SSE. */ + if ( (xcr0 & XSTATE_YMM) && !(xcr0 & XSTATE_SSE) ) + return 0; + + if ( xcr0 & (XSTATE_OPMASK | XSTATE_ZMM | XSTATE_HI_ZMM) ) + { + /* OPMASK, ZMM, and HI_ZMM require YMM. */ + if ( !(xcr0 & XSTATE_YMM) ) + return 0; + + /* OPMASK, ZMM, and HI_ZMM must be the same. */ + if ( ~xcr0 & (XSTATE_OPMASK | XSTATE_ZMM | XSTATE_HI_ZMM) ) + return 0; + } + + /* BNDREGS and BNDCSR must be the same. */ + return !(xcr0 & XSTATE_BNDREGS) == !(xcr0 & XSTATE_BNDCSR); +} + int validate_xstate(u64 xcr0, u64 xcr0_accum, u64 xstate_bv, u64 xfeat_mask) { if ( (xcr0_accum & ~xfeat_mask) || (xstate_bv & ~xcr0_accum) || (xcr0 & ~xcr0_accum) || - !(xcr0 & XSTATE_FP) || - ((xcr0 & XSTATE_YMM) && !(xcr0 & XSTATE_SSE)) || - ((xcr0_accum & XSTATE_YMM) && !(xcr0_accum & XSTATE_SSE)) ) + !valid_xcr0(xcr0) || + !valid_xcr0(xcr0_accum) ) return -EINVAL; if ( xcr0_accum & ~xfeature_mask ) @@ -351,10 +369,7 @@ int handle_xsetbv(u32 index, u64 new_bv) if ( index != XCR_XFEATURE_ENABLED_MASK ) return -EOPNOTSUPP; - if ( (new_bv & ~xfeature_mask) || !(new_bv & XSTATE_FP) ) - return -EINVAL; - - if ( (new_bv & XSTATE_YMM) && !(new_bv & XSTATE_SSE) ) + if ( (new_bv & ~xfeature_mask) || !valid_xcr0(new_bv) ) return -EINVAL; if ( !set_xcr0(new_bv) ) @@ -364,6 +379,10 @@ int handle_xsetbv(u32 index, u64 new_bv) curr->arch.xcr0 = new_bv; curr->arch.xcr0_accum |= new_bv; + /* LWP sets nonlazy_xstate_used independently. */ + if ( new_bv & (XSTATE_NONLAZY & ~XSTATE_LWP) ) + curr->arch.nonlazy_xstate_used = 1; + mask &= curr->fpu_dirtied ? ~XSTATE_FP_SSE : XSTATE_NONLAZY; if ( mask ) { --- a/xen/include/asm-x86/xstate.h +++ b/xen/include/asm-x86/xstate.h @@ -20,18 +20,23 @@ #define XCR_XFEATURE_ENABLED_MASK 0x00000000 /* index of XCR0 */ #define XSTATE_YMM_SIZE 256 -#define XSTATE_YMM_OFFSET XSAVE_AREA_MIN_SIZE #define XSTATE_AREA_MIN_SIZE (512 + 64) /* FP/SSE + XSAVE.HEADER */ #define XSTATE_FP (1ULL << 0) #define XSTATE_SSE (1ULL << 1) #define XSTATE_YMM (1ULL << 2) +#define XSTATE_BNDREGS (1ULL << 3) +#define XSTATE_BNDCSR (1ULL << 4) +#define XSTATE_OPMASK (1ULL << 5) +#define XSTATE_ZMM (1ULL << 6) +#define XSTATE_HI_ZMM (1ULL << 7) #define XSTATE_LWP (1ULL << 62) /* AMD lightweight profiling */ #define XSTATE_FP_SSE (XSTATE_FP | XSTATE_SSE) -#define XCNTXT_MASK (XSTATE_FP | XSTATE_SSE | XSTATE_YMM | XSTATE_LWP) +#define XCNTXT_MASK (XSTATE_FP | XSTATE_SSE | XSTATE_YMM | XSTATE_OPMASK | \ + XSTATE_ZMM | XSTATE_HI_ZMM | XSTATE_NONLAZY) -#define XSTATE_ALL (~0) -#define XSTATE_NONLAZY (XSTATE_LWP) +#define XSTATE_ALL (~(1ULL << 63)) +#define XSTATE_NONLAZY (XSTATE_LWP | XSTATE_BNDREGS | XSTATE_BNDCSR) #define XSTATE_LAZY (XSTATE_ALL & ~XSTATE_NONLAZY) extern u64 xfeature_mask; _______________________________________________ Xen-devel mailing list Xen-devel@lists.xen.org http://lists.xen.org/xen-devel
George Dunlap
2013-Nov-22 14:48 UTC
Re: [PATCH v2 2/5] x86/xsave: enable support for new ISA extensions
On Fri, Nov 22, 2013 at 1:27 PM, Jan Beulich <JBeulich@suse.com> wrote:> Intel has released new version of Intel Architecture Instruction Set > Extensions Programming Reference, adding new features like AVX-512, > MPX, etc. Refer to > http://download-software.intel.com/sites/default/files/319433-015.pdf > > This patch adds support for these new instruction set extensions > without enabling this support for guest use, yet.I take it you''re just putting this up for review then, and will check it in once the dev window opens for 4.5? -George
Jan Beulich
2013-Nov-22 15:47 UTC
Re: [PATCH v2 2/5] x86/xsave: enable support for new ISA extensions
>>> On 22.11.13 at 15:48, George Dunlap <George.Dunlap@eu.citrix.com> wrote: > On Fri, Nov 22, 2013 at 1:27 PM, Jan Beulich <JBeulich@suse.com> wrote: >> Intel has released new version of Intel Architecture Instruction Set >> Extensions Programming Reference, adding new features like AVX-512, >> MPX, etc. Refer to >> http://download-software.intel.com/sites/default/files/319433-015.pdf >> >> This patch adds support for these new instruction set extensions >> without enabling this support for guest use, yet. > > I take it you''re just putting this up for review then, and will check > it in once the dev window opens for 4.5?Yes. Only patch 1 in this series if intended to be considered for 4.4. Jan