tools/libxc/xc_cpuid_x86.c | 1 + xen/arch/x86/hvm/vmx/vmx.c | 2 ++ xen/arch/x86/hvm/vmx/vpmu_core2.c | 38 ++++++++++++++++++++++++++++++++++++-- xen/arch/x86/hvm/vpmu.c | 25 ++++++++++++++++++------- xen/include/asm-x86/hvm/vpmu.h | 6 ++++++ 5 files changed, 63 insertions(+), 9 deletions(-) The "Debug Store" cpuid flag in the Intel processors gets enabled in the libxc. A new function call arch_vpmu_cpuid is added to the struct arch_vpmu_ops and for Intel processors the function core2_vpmu_cpuid() is added. The aim is that always a "struct arch_vpmu_ops" is accessible at least with a cpuid function to switch off special PMU cpuid flags dynamically depending on the boot variable opt_vpmu_enabled. Signed-off-by: Dietmar Hahn <dietmar.hahn@ts.fujitsu.com> diff -r 7a82c2e2eb33 tools/libxc/xc_cpuid_x86.c --- a/tools/libxc/xc_cpuid_x86.c Thu Jan 19 13:14:02 2012 +0100 +++ b/tools/libxc/xc_cpuid_x86.c Thu Jan 19 14:37:17 2012 +0100 @@ -343,6 +343,7 @@ static void xc_cpuid_hvm_policy( bitmaskof(X86_FEATURE_CMOV) | bitmaskof(X86_FEATURE_PAT) | bitmaskof(X86_FEATURE_CLFLSH) | + bitmaskof(X86_FEATURE_DS) | bitmaskof(X86_FEATURE_PSE36) | bitmaskof(X86_FEATURE_MMX) | bitmaskof(X86_FEATURE_FXSR) | diff -r 7a82c2e2eb33 xen/arch/x86/hvm/vmx/vmx.c --- a/xen/arch/x86/hvm/vmx/vmx.c Thu Jan 19 13:14:02 2012 +0100 +++ b/xen/arch/x86/hvm/vmx/vmx.c Thu Jan 19 14:37:17 2012 +0100 @@ -1603,6 +1603,8 @@ static void vmx_cpuid_intercept( break; } + vpmu_do_cpuid(input, eax, ebx, ecx, edx); + HVMTRACE_5D (CPUID, input, *eax, *ebx, *ecx, *edx); } diff -r 7a82c2e2eb33 xen/arch/x86/hvm/vmx/vpmu_core2.c --- a/xen/arch/x86/hvm/vmx/vpmu_core2.c Thu Jan 19 13:14:02 2012 +0100 +++ b/xen/arch/x86/hvm/vmx/vpmu_core2.c Thu Jan 19 14:37:17 2012 +0100 @@ -598,6 +598,29 @@ static void core2_vpmu_destroy(struct vc vpmu->flags &= ~VPMU_CONTEXT_ALLOCATED; } +/** + * core2_vpmu_cpuid - prepare special vpmu cpuid bits + * If emulation of vpmu is switched off, some bits are swtiched off, currently: + * - EAX[0x1].EAX[Bits 0-7]: PMC revision id. + * - EAX[0xa].EDX[Bit 21]: Debug Store + */ +#define bitmaskof(idx) (1U << ((idx) & 31)) +static void core2_vpmu_cpuid(unsigned int input, + unsigned int *eax, unsigned int *ebx, + unsigned int *ecx, unsigned int *edx) +{ + switch ( input ) + { + case 0x1: + *edx &= ~bitmaskof(X86_FEATURE_DS); /* Debug Store not supported */ + break; + case 0xa: + if ( !opt_vpmu_enabled ) + *eax &= ~(unsigned int)0xff; /* Clear pmc version id. */ + break; + } +} + struct arch_vpmu_ops core2_vpmu_ops = { .do_wrmsr = core2_vpmu_do_wrmsr, .do_rdmsr = core2_vpmu_do_rdmsr, @@ -605,7 +628,13 @@ struct arch_vpmu_ops core2_vpmu_ops = { .arch_vpmu_initialise = core2_vpmu_initialise, .arch_vpmu_destroy = core2_vpmu_destroy, .arch_vpmu_save = core2_vpmu_save, - .arch_vpmu_load = core2_vpmu_load + .arch_vpmu_load = core2_vpmu_load, + .arch_vpmu_cpuid = core2_vpmu_cpuid +}; + +/* Used if vpmu is disabled. */ +struct arch_vpmu_ops core2_vpmu_dis_ops = { + .arch_vpmu_cpuid = core2_vpmu_cpuid }; int vmx_vpmu_initialize(struct vcpu *v) @@ -615,7 +644,7 @@ int vmx_vpmu_initialize(struct vcpu *v) __u8 cpu_model = current_cpu_data.x86_model; if ( !opt_vpmu_enabled ) - return -EINVAL; + goto func_out; if ( family == 6 ) { @@ -635,6 +664,11 @@ int vmx_vpmu_initialize(struct vcpu *v) printk("VPMU: Initialization failed. " "Intel processor family %d model %d has not " "been supported\n", family, cpu_model); + +func_out: + + vpmu->arch_vpmu_ops = &core2_vpmu_dis_ops; + return -EINVAL; } diff -r 7a82c2e2eb33 xen/arch/x86/hvm/vpmu.c --- a/xen/arch/x86/hvm/vpmu.c Thu Jan 19 13:14:02 2012 +0100 +++ b/xen/arch/x86/hvm/vpmu.c Thu Jan 19 14:37:17 2012 +0100 @@ -39,7 +39,7 @@ int vpmu_do_wrmsr(unsigned int msr, uint { struct vpmu_struct *vpmu = vcpu_vpmu(current); - if ( vpmu->arch_vpmu_ops ) + if ( vpmu->arch_vpmu_ops && vpmu->arch_vpmu_ops->do_wrmsr ) return vpmu->arch_vpmu_ops->do_wrmsr(msr, msr_content); return 0; } @@ -48,7 +48,7 @@ int vpmu_do_rdmsr(unsigned int msr, uint { struct vpmu_struct *vpmu = vcpu_vpmu(current); - if ( vpmu->arch_vpmu_ops ) + if ( vpmu->arch_vpmu_ops && vpmu->arch_vpmu_ops->do_rdmsr ) return vpmu->arch_vpmu_ops->do_rdmsr(msr, msr_content); return 0; } @@ -57,7 +57,7 @@ int vpmu_do_interrupt(struct cpu_user_re { struct vpmu_struct *vpmu = vcpu_vpmu(current); - if ( vpmu->arch_vpmu_ops ) + if ( vpmu->arch_vpmu_ops && vpmu->arch_vpmu_ops->do_interrupt ) return vpmu->arch_vpmu_ops->do_interrupt(regs); return 0; } @@ -66,7 +66,7 @@ void vpmu_save(struct vcpu *v) { struct vpmu_struct *vpmu = vcpu_vpmu(v); - if ( vpmu->arch_vpmu_ops ) + if ( vpmu->arch_vpmu_ops && vpmu->arch_vpmu_ops->arch_vpmu_save ) vpmu->arch_vpmu_ops->arch_vpmu_save(v); } @@ -74,7 +74,7 @@ void vpmu_load(struct vcpu *v) { struct vpmu_struct *vpmu = vcpu_vpmu(v); - if ( vpmu->arch_vpmu_ops ) + if ( vpmu->arch_vpmu_ops && vpmu->arch_vpmu_ops->arch_vpmu_load ) vpmu->arch_vpmu_ops->arch_vpmu_load(v); } @@ -109,7 +109,8 @@ void vpmu_initialise(struct vcpu *v) { vpmu->flags = 0; vpmu->context = NULL; - vpmu->arch_vpmu_ops->arch_vpmu_initialise(v); + if ( vpmu->arch_vpmu_ops->arch_vpmu_initialise ) + vpmu->arch_vpmu_ops->arch_vpmu_initialise(v); } } @@ -117,7 +118,17 @@ void vpmu_destroy(struct vcpu *v) { struct vpmu_struct *vpmu = vcpu_vpmu(v); - if ( vpmu->arch_vpmu_ops ) + if ( vpmu->arch_vpmu_ops && vpmu->arch_vpmu_ops->arch_vpmu_destroy ) vpmu->arch_vpmu_ops->arch_vpmu_destroy(v); } +void vpmu_do_cpuid(unsigned int input, + unsigned int *eax, unsigned int *ebx, + unsigned int *ecx, unsigned int *edx) +{ + struct vpmu_struct *vpmu = vcpu_vpmu(current); + + if ( vpmu->arch_vpmu_ops && vpmu->arch_vpmu_ops->arch_vpmu_cpuid) + vpmu->arch_vpmu_ops->arch_vpmu_cpuid(input, eax, ebx, ecx, edx); +} + diff -r 7a82c2e2eb33 xen/include/asm-x86/hvm/vpmu.h --- a/xen/include/asm-x86/hvm/vpmu.h Thu Jan 19 13:14:02 2012 +0100 +++ b/xen/include/asm-x86/hvm/vpmu.h Thu Jan 19 14:37:17 2012 +0100 @@ -56,6 +56,9 @@ struct arch_vpmu_ops { void (*arch_vpmu_destroy)(struct vcpu *v); void (*arch_vpmu_save)(struct vcpu *v); void (*arch_vpmu_load)(struct vcpu *v); + void (*arch_vpmu_cpuid)(unsigned int input, + unsigned int *eax, unsigned int *ebx, + unsigned int *ecx, unsigned int *edx); }; int vmx_vpmu_initialize(struct vcpu *v); @@ -78,6 +81,9 @@ void vpmu_initialise(struct vcpu *v); void vpmu_destroy(struct vcpu *v); void vpmu_save(struct vcpu *v); void vpmu_load(struct vcpu *v); +void vpmu_do_cpuid(unsigned int input, + unsigned int *eax, unsigned int *ebx, + unsigned int *ecx, unsigned int *edx); extern int acquire_pmu_ownership(int pmu_ownership); extern void release_pmu_ownership(int pmu_ownership); -- Company details: http://ts.fujitsu.com/imprint.html
On 19/01/2012 13:54, "Dietmar Hahn" <dietmar.hahn@ts.fujitsu.com> wrote:> > tools/libxc/xc_cpuid_x86.c | 1 + > xen/arch/x86/hvm/vmx/vmx.c | 2 ++ > xen/arch/x86/hvm/vmx/vpmu_core2.c | 38 > ++++++++++++++++++++++++++++++++++++-- > xen/arch/x86/hvm/vpmu.c | 25 ++++++++++++++++++------- > xen/include/asm-x86/hvm/vpmu.h | 6 ++++++ > 5 files changed, 63 insertions(+), 9 deletions(-) > > > The "Debug Store" cpuid flag in the Intel processors gets enabled in the > libxc. > A new function call arch_vpmu_cpuid is added to the struct arch_vpmu_ops and > for > Intel processors the function core2_vpmu_cpuid() is added. > The aim is that always a "struct arch_vpmu_ops" is accessible at least with > a cpuid function to switch off special PMU cpuid flags dynamically depending > on > the boot variable opt_vpmu_enabled.Our CPUID configuration is done per-domain, and from tools/libxc/xc_cpuid_x86.c. CPUID adjustments implemented within the hypervisor are generally not acceptable without very good reason. -- Keir> Signed-off-by: Dietmar Hahn <dietmar.hahn@ts.fujitsu.com> > > diff -r 7a82c2e2eb33 tools/libxc/xc_cpuid_x86.c > --- a/tools/libxc/xc_cpuid_x86.c Thu Jan 19 13:14:02 2012 +0100 > +++ b/tools/libxc/xc_cpuid_x86.c Thu Jan 19 14:37:17 2012 +0100 > @@ -343,6 +343,7 @@ static void xc_cpuid_hvm_policy( > bitmaskof(X86_FEATURE_CMOV) | > bitmaskof(X86_FEATURE_PAT) | > bitmaskof(X86_FEATURE_CLFLSH) | > + bitmaskof(X86_FEATURE_DS) | > bitmaskof(X86_FEATURE_PSE36) | > bitmaskof(X86_FEATURE_MMX) | > bitmaskof(X86_FEATURE_FXSR) | > diff -r 7a82c2e2eb33 xen/arch/x86/hvm/vmx/vmx.c > --- a/xen/arch/x86/hvm/vmx/vmx.c Thu Jan 19 13:14:02 2012 +0100 > +++ b/xen/arch/x86/hvm/vmx/vmx.c Thu Jan 19 14:37:17 2012 +0100 > @@ -1603,6 +1603,8 @@ static void vmx_cpuid_intercept( > break; > } > > + vpmu_do_cpuid(input, eax, ebx, ecx, edx); > + > HVMTRACE_5D (CPUID, input, *eax, *ebx, *ecx, *edx); > } > > diff -r 7a82c2e2eb33 xen/arch/x86/hvm/vmx/vpmu_core2.c > --- a/xen/arch/x86/hvm/vmx/vpmu_core2.c Thu Jan 19 13:14:02 2012 +0100 > +++ b/xen/arch/x86/hvm/vmx/vpmu_core2.c Thu Jan 19 14:37:17 2012 +0100 > @@ -598,6 +598,29 @@ static void core2_vpmu_destroy(struct vc > vpmu->flags &= ~VPMU_CONTEXT_ALLOCATED; > } > > +/** > + * core2_vpmu_cpuid - prepare special vpmu cpuid bits > + * If emulation of vpmu is switched off, some bits are swtiched off, > currently: > + * - EAX[0x1].EAX[Bits 0-7]: PMC revision id. > + * - EAX[0xa].EDX[Bit 21]: Debug Store > + */ > +#define bitmaskof(idx) (1U << ((idx) & 31)) > +static void core2_vpmu_cpuid(unsigned int input, > + unsigned int *eax, unsigned int *ebx, > + unsigned int *ecx, unsigned int *edx) > +{ > + switch ( input ) > + { > + case 0x1: > + *edx &= ~bitmaskof(X86_FEATURE_DS); /* Debug Store not supported > */ > + break; > + case 0xa: > + if ( !opt_vpmu_enabled ) > + *eax &= ~(unsigned int)0xff; /* Clear pmc version id. */ > + break; > + } > +} > + > struct arch_vpmu_ops core2_vpmu_ops = { > .do_wrmsr = core2_vpmu_do_wrmsr, > .do_rdmsr = core2_vpmu_do_rdmsr, > @@ -605,7 +628,13 @@ struct arch_vpmu_ops core2_vpmu_ops = { > .arch_vpmu_initialise = core2_vpmu_initialise, > .arch_vpmu_destroy = core2_vpmu_destroy, > .arch_vpmu_save = core2_vpmu_save, > - .arch_vpmu_load = core2_vpmu_load > + .arch_vpmu_load = core2_vpmu_load, > + .arch_vpmu_cpuid = core2_vpmu_cpuid > +}; > + > +/* Used if vpmu is disabled. */ > +struct arch_vpmu_ops core2_vpmu_dis_ops = { > + .arch_vpmu_cpuid = core2_vpmu_cpuid > }; > > int vmx_vpmu_initialize(struct vcpu *v) > @@ -615,7 +644,7 @@ int vmx_vpmu_initialize(struct vcpu *v) > __u8 cpu_model = current_cpu_data.x86_model; > > if ( !opt_vpmu_enabled ) > - return -EINVAL; > + goto func_out; > > if ( family == 6 ) > { > @@ -635,6 +664,11 @@ int vmx_vpmu_initialize(struct vcpu *v) > printk("VPMU: Initialization failed. " > "Intel processor family %d model %d has not " > "been supported\n", family, cpu_model); > + > +func_out: > + > + vpmu->arch_vpmu_ops = &core2_vpmu_dis_ops; > + > return -EINVAL; > } > > diff -r 7a82c2e2eb33 xen/arch/x86/hvm/vpmu.c > --- a/xen/arch/x86/hvm/vpmu.c Thu Jan 19 13:14:02 2012 +0100 > +++ b/xen/arch/x86/hvm/vpmu.c Thu Jan 19 14:37:17 2012 +0100 > @@ -39,7 +39,7 @@ int vpmu_do_wrmsr(unsigned int msr, uint > { > struct vpmu_struct *vpmu = vcpu_vpmu(current); > > - if ( vpmu->arch_vpmu_ops ) > + if ( vpmu->arch_vpmu_ops && vpmu->arch_vpmu_ops->do_wrmsr ) > return vpmu->arch_vpmu_ops->do_wrmsr(msr, msr_content); > return 0; > } > @@ -48,7 +48,7 @@ int vpmu_do_rdmsr(unsigned int msr, uint > { > struct vpmu_struct *vpmu = vcpu_vpmu(current); > > - if ( vpmu->arch_vpmu_ops ) > + if ( vpmu->arch_vpmu_ops && vpmu->arch_vpmu_ops->do_rdmsr ) > return vpmu->arch_vpmu_ops->do_rdmsr(msr, msr_content); > return 0; > } > @@ -57,7 +57,7 @@ int vpmu_do_interrupt(struct cpu_user_re > { > struct vpmu_struct *vpmu = vcpu_vpmu(current); > > - if ( vpmu->arch_vpmu_ops ) > + if ( vpmu->arch_vpmu_ops && vpmu->arch_vpmu_ops->do_interrupt ) > return vpmu->arch_vpmu_ops->do_interrupt(regs); > return 0; > } > @@ -66,7 +66,7 @@ void vpmu_save(struct vcpu *v) > { > struct vpmu_struct *vpmu = vcpu_vpmu(v); > > - if ( vpmu->arch_vpmu_ops ) > + if ( vpmu->arch_vpmu_ops && vpmu->arch_vpmu_ops->arch_vpmu_save ) > vpmu->arch_vpmu_ops->arch_vpmu_save(v); > } > > @@ -74,7 +74,7 @@ void vpmu_load(struct vcpu *v) > { > struct vpmu_struct *vpmu = vcpu_vpmu(v); > > - if ( vpmu->arch_vpmu_ops ) > + if ( vpmu->arch_vpmu_ops && vpmu->arch_vpmu_ops->arch_vpmu_load ) > vpmu->arch_vpmu_ops->arch_vpmu_load(v); > } > > @@ -109,7 +109,8 @@ void vpmu_initialise(struct vcpu *v) > { > vpmu->flags = 0; > vpmu->context = NULL; > - vpmu->arch_vpmu_ops->arch_vpmu_initialise(v); > + if ( vpmu->arch_vpmu_ops->arch_vpmu_initialise ) > + vpmu->arch_vpmu_ops->arch_vpmu_initialise(v); > } > } > > @@ -117,7 +118,17 @@ void vpmu_destroy(struct vcpu *v) > { > struct vpmu_struct *vpmu = vcpu_vpmu(v); > > - if ( vpmu->arch_vpmu_ops ) > + if ( vpmu->arch_vpmu_ops && vpmu->arch_vpmu_ops->arch_vpmu_destroy ) > vpmu->arch_vpmu_ops->arch_vpmu_destroy(v); > } > > +void vpmu_do_cpuid(unsigned int input, > + unsigned int *eax, unsigned int *ebx, > + unsigned int *ecx, unsigned int *edx) > +{ > + struct vpmu_struct *vpmu = vcpu_vpmu(current); > + > + if ( vpmu->arch_vpmu_ops && vpmu->arch_vpmu_ops->arch_vpmu_cpuid) > + vpmu->arch_vpmu_ops->arch_vpmu_cpuid(input, eax, ebx, ecx, edx); > +} > + > diff -r 7a82c2e2eb33 xen/include/asm-x86/hvm/vpmu.h > --- a/xen/include/asm-x86/hvm/vpmu.h Thu Jan 19 13:14:02 2012 +0100 > +++ b/xen/include/asm-x86/hvm/vpmu.h Thu Jan 19 14:37:17 2012 +0100 > @@ -56,6 +56,9 @@ struct arch_vpmu_ops { > void (*arch_vpmu_destroy)(struct vcpu *v); > void (*arch_vpmu_save)(struct vcpu *v); > void (*arch_vpmu_load)(struct vcpu *v); > + void (*arch_vpmu_cpuid)(unsigned int input, > + unsigned int *eax, unsigned int *ebx, > + unsigned int *ecx, unsigned int *edx); > }; > > int vmx_vpmu_initialize(struct vcpu *v); > @@ -78,6 +81,9 @@ void vpmu_initialise(struct vcpu *v); > void vpmu_destroy(struct vcpu *v); > void vpmu_save(struct vcpu *v); > void vpmu_load(struct vcpu *v); > +void vpmu_do_cpuid(unsigned int input, > + unsigned int *eax, unsigned int *ebx, > + unsigned int *ecx, unsigned int *edx); > > extern int acquire_pmu_ownership(int pmu_ownership); > extern void release_pmu_ownership(int pmu_ownership);
Am Freitag 20 Januar 2012, 10:29:41 schrieb Keir Fraser:> On 19/01/2012 13:54, "Dietmar Hahn" <dietmar.hahn@ts.fujitsu.com> wrote: > > > > > tools/libxc/xc_cpuid_x86.c | 1 + > > xen/arch/x86/hvm/vmx/vmx.c | 2 ++ > > xen/arch/x86/hvm/vmx/vpmu_core2.c | 38 > > ++++++++++++++++++++++++++++++++++++-- > > xen/arch/x86/hvm/vpmu.c | 25 ++++++++++++++++++------- > > xen/include/asm-x86/hvm/vpmu.h | 6 ++++++ > > 5 files changed, 63 insertions(+), 9 deletions(-) > > > > > > The "Debug Store" cpuid flag in the Intel processors gets enabled in the > > libxc. > > A new function call arch_vpmu_cpuid is added to the struct arch_vpmu_ops and > > for > > Intel processors the function core2_vpmu_cpuid() is added. > > The aim is that always a "struct arch_vpmu_ops" is accessible at least with > > a cpuid function to switch off special PMU cpuid flags dynamically depending > > on > > the boot variable opt_vpmu_enabled. > > Our CPUID configuration is done per-domain, and from > tools/libxc/xc_cpuid_x86.c. CPUID adjustments implemented within the > hypervisor are generally not acceptable without very good reason.Then a way is needed to have access to the opt_vpmu_enabled variable within the hypervisor from the tools to decide the enabling of the flag (is there such a way?) or the mechanism with the boot variable must be changed. The opt_vpmu_enabled boot variable was introduced because of a PMU problem in the Nehalem cpus leading sometimes to hypervisor crashes. But with the done quirk we never had a crash anymore. So maybe we can always switch on the vpmu stuff in the hypervisor and add a flag in the domain configuration when somebody wants to do some performance tests? Dietmar.> > -- Keir > > > Signed-off-by: Dietmar Hahn <dietmar.hahn@ts.fujitsu.com> > > > > diff -r 7a82c2e2eb33 tools/libxc/xc_cpuid_x86.c > > --- a/tools/libxc/xc_cpuid_x86.c Thu Jan 19 13:14:02 2012 +0100 > > +++ b/tools/libxc/xc_cpuid_x86.c Thu Jan 19 14:37:17 2012 +0100 > > @@ -343,6 +343,7 @@ static void xc_cpuid_hvm_policy( > > bitmaskof(X86_FEATURE_CMOV) | > > bitmaskof(X86_FEATURE_PAT) | > > bitmaskof(X86_FEATURE_CLFLSH) | > > + bitmaskof(X86_FEATURE_DS) | > > bitmaskof(X86_FEATURE_PSE36) | > > bitmaskof(X86_FEATURE_MMX) | > > bitmaskof(X86_FEATURE_FXSR) | > > diff -r 7a82c2e2eb33 xen/arch/x86/hvm/vmx/vmx.c > > --- a/xen/arch/x86/hvm/vmx/vmx.c Thu Jan 19 13:14:02 2012 +0100 > > +++ b/xen/arch/x86/hvm/vmx/vmx.c Thu Jan 19 14:37:17 2012 +0100 > > @@ -1603,6 +1603,8 @@ static void vmx_cpuid_intercept( > > break; > > } > > > > + vpmu_do_cpuid(input, eax, ebx, ecx, edx); > > + > > HVMTRACE_5D (CPUID, input, *eax, *ebx, *ecx, *edx); > > } > > > > diff -r 7a82c2e2eb33 xen/arch/x86/hvm/vmx/vpmu_core2.c > > --- a/xen/arch/x86/hvm/vmx/vpmu_core2.c Thu Jan 19 13:14:02 2012 +0100 > > +++ b/xen/arch/x86/hvm/vmx/vpmu_core2.c Thu Jan 19 14:37:17 2012 +0100 > > @@ -598,6 +598,29 @@ static void core2_vpmu_destroy(struct vc > > vpmu->flags &= ~VPMU_CONTEXT_ALLOCATED; > > } > > > > +/** > > + * core2_vpmu_cpuid - prepare special vpmu cpuid bits > > + * If emulation of vpmu is switched off, some bits are swtiched off, > > currently: > > + * - EAX[0x1].EAX[Bits 0-7]: PMC revision id. > > + * - EAX[0xa].EDX[Bit 21]: Debug Store > > + */ > > +#define bitmaskof(idx) (1U << ((idx) & 31)) > > +static void core2_vpmu_cpuid(unsigned int input, > > + unsigned int *eax, unsigned int *ebx, > > + unsigned int *ecx, unsigned int *edx) > > +{ > > + switch ( input ) > > + { > > + case 0x1: > > + *edx &= ~bitmaskof(X86_FEATURE_DS); /* Debug Store not supported > > */ > > + break; > > + case 0xa: > > + if ( !opt_vpmu_enabled ) > > + *eax &= ~(unsigned int)0xff; /* Clear pmc version id. */ > > + break; > > + } > > +} > > + > > struct arch_vpmu_ops core2_vpmu_ops = { > > .do_wrmsr = core2_vpmu_do_wrmsr, > > .do_rdmsr = core2_vpmu_do_rdmsr, > > @@ -605,7 +628,13 @@ struct arch_vpmu_ops core2_vpmu_ops = { > > .arch_vpmu_initialise = core2_vpmu_initialise, > > .arch_vpmu_destroy = core2_vpmu_destroy, > > .arch_vpmu_save = core2_vpmu_save, > > - .arch_vpmu_load = core2_vpmu_load > > + .arch_vpmu_load = core2_vpmu_load, > > + .arch_vpmu_cpuid = core2_vpmu_cpuid > > +}; > > + > > +/* Used if vpmu is disabled. */ > > +struct arch_vpmu_ops core2_vpmu_dis_ops = { > > + .arch_vpmu_cpuid = core2_vpmu_cpuid > > }; > > > > int vmx_vpmu_initialize(struct vcpu *v) > > @@ -615,7 +644,7 @@ int vmx_vpmu_initialize(struct vcpu *v) > > __u8 cpu_model = current_cpu_data.x86_model; > > > > if ( !opt_vpmu_enabled ) > > - return -EINVAL; > > + goto func_out; > > > > if ( family == 6 ) > > { > > @@ -635,6 +664,11 @@ int vmx_vpmu_initialize(struct vcpu *v) > > printk("VPMU: Initialization failed. " > > "Intel processor family %d model %d has not " > > "been supported\n", family, cpu_model); > > + > > +func_out: > > + > > + vpmu->arch_vpmu_ops = &core2_vpmu_dis_ops; > > + > > return -EINVAL; > > } > > > > diff -r 7a82c2e2eb33 xen/arch/x86/hvm/vpmu.c > > --- a/xen/arch/x86/hvm/vpmu.c Thu Jan 19 13:14:02 2012 +0100 > > +++ b/xen/arch/x86/hvm/vpmu.c Thu Jan 19 14:37:17 2012 +0100 > > @@ -39,7 +39,7 @@ int vpmu_do_wrmsr(unsigned int msr, uint > > { > > struct vpmu_struct *vpmu = vcpu_vpmu(current); > > > > - if ( vpmu->arch_vpmu_ops ) > > + if ( vpmu->arch_vpmu_ops && vpmu->arch_vpmu_ops->do_wrmsr ) > > return vpmu->arch_vpmu_ops->do_wrmsr(msr, msr_content); > > return 0; > > } > > @@ -48,7 +48,7 @@ int vpmu_do_rdmsr(unsigned int msr, uint > > { > > struct vpmu_struct *vpmu = vcpu_vpmu(current); > > > > - if ( vpmu->arch_vpmu_ops ) > > + if ( vpmu->arch_vpmu_ops && vpmu->arch_vpmu_ops->do_rdmsr ) > > return vpmu->arch_vpmu_ops->do_rdmsr(msr, msr_content); > > return 0; > > } > > @@ -57,7 +57,7 @@ int vpmu_do_interrupt(struct cpu_user_re > > { > > struct vpmu_struct *vpmu = vcpu_vpmu(current); > > > > - if ( vpmu->arch_vpmu_ops ) > > + if ( vpmu->arch_vpmu_ops && vpmu->arch_vpmu_ops->do_interrupt ) > > return vpmu->arch_vpmu_ops->do_interrupt(regs); > > return 0; > > } > > @@ -66,7 +66,7 @@ void vpmu_save(struct vcpu *v) > > { > > struct vpmu_struct *vpmu = vcpu_vpmu(v); > > > > - if ( vpmu->arch_vpmu_ops ) > > + if ( vpmu->arch_vpmu_ops && vpmu->arch_vpmu_ops->arch_vpmu_save ) > > vpmu->arch_vpmu_ops->arch_vpmu_save(v); > > } > > > > @@ -74,7 +74,7 @@ void vpmu_load(struct vcpu *v) > > { > > struct vpmu_struct *vpmu = vcpu_vpmu(v); > > > > - if ( vpmu->arch_vpmu_ops ) > > + if ( vpmu->arch_vpmu_ops && vpmu->arch_vpmu_ops->arch_vpmu_load ) > > vpmu->arch_vpmu_ops->arch_vpmu_load(v); > > } > > > > @@ -109,7 +109,8 @@ void vpmu_initialise(struct vcpu *v) > > { > > vpmu->flags = 0; > > vpmu->context = NULL; > > - vpmu->arch_vpmu_ops->arch_vpmu_initialise(v); > > + if ( vpmu->arch_vpmu_ops->arch_vpmu_initialise ) > > + vpmu->arch_vpmu_ops->arch_vpmu_initialise(v); > > } > > } > > > > @@ -117,7 +118,17 @@ void vpmu_destroy(struct vcpu *v) > > { > > struct vpmu_struct *vpmu = vcpu_vpmu(v); > > > > - if ( vpmu->arch_vpmu_ops ) > > + if ( vpmu->arch_vpmu_ops && vpmu->arch_vpmu_ops->arch_vpmu_destroy ) > > vpmu->arch_vpmu_ops->arch_vpmu_destroy(v); > > } > > > > +void vpmu_do_cpuid(unsigned int input, > > + unsigned int *eax, unsigned int *ebx, > > + unsigned int *ecx, unsigned int *edx) > > +{ > > + struct vpmu_struct *vpmu = vcpu_vpmu(current); > > + > > + if ( vpmu->arch_vpmu_ops && vpmu->arch_vpmu_ops->arch_vpmu_cpuid) > > + vpmu->arch_vpmu_ops->arch_vpmu_cpuid(input, eax, ebx, ecx, edx); > > +} > > + > > diff -r 7a82c2e2eb33 xen/include/asm-x86/hvm/vpmu.h > > --- a/xen/include/asm-x86/hvm/vpmu.h Thu Jan 19 13:14:02 2012 +0100 > > +++ b/xen/include/asm-x86/hvm/vpmu.h Thu Jan 19 14:37:17 2012 +0100 > > @@ -56,6 +56,9 @@ struct arch_vpmu_ops { > > void (*arch_vpmu_destroy)(struct vcpu *v); > > void (*arch_vpmu_save)(struct vcpu *v); > > void (*arch_vpmu_load)(struct vcpu *v); > > + void (*arch_vpmu_cpuid)(unsigned int input, > > + unsigned int *eax, unsigned int *ebx, > > + unsigned int *ecx, unsigned int *edx); > > }; > > > > int vmx_vpmu_initialize(struct vcpu *v); > > @@ -78,6 +81,9 @@ void vpmu_initialise(struct vcpu *v); > > void vpmu_destroy(struct vcpu *v); > > void vpmu_save(struct vcpu *v); > > void vpmu_load(struct vcpu *v); > > +void vpmu_do_cpuid(unsigned int input, > > + unsigned int *eax, unsigned int *ebx, > > + unsigned int *ecx, unsigned int *edx); > > > > extern int acquire_pmu_ownership(int pmu_ownership); > > extern void release_pmu_ownership(int pmu_ownership); > > >-- Company details: http://ts.fujitsu.com/imprint.html
On 20/01/2012 10:29, "Keir Fraser" <keir@xen.org> wrote:>> The "Debug Store" cpuid flag in the Intel processors gets enabled in the >> libxc. >> A new function call arch_vpmu_cpuid is added to the struct arch_vpmu_ops and >> for >> Intel processors the function core2_vpmu_cpuid() is added. >> The aim is that always a "struct arch_vpmu_ops" is accessible at least with >> a cpuid function to switch off special PMU cpuid flags dynamically depending >> on >> the boot variable opt_vpmu_enabled. > > Our CPUID configuration is done per-domain, and from > tools/libxc/xc_cpuid_x86.c. CPUID adjustments implemented within the > hypervisor are generally not acceptable without very good reason.I''ll preempt you saying that you need to depend on opt_vpmu_enabled by saying you should get rid of that hacky global configuration flag, and allow VPMU to be properly enabled per domain, via the toolstack. Then configuration via libxc/xc_cpuid_x86.c will fall naturally into place. Perhaps you can allow configuration straightforwardly via the existing libxl_cpuid.c mechanism, or perhaps you need a higher-level config option than that, and then cook it down into CPUID fiddling from within libxl, or libxc. One of the toolstack maintainers (Ian Jackson for example) may have better ideas than me on that. Also, there are AMD- and Intel-specific sections to xc_cpuid_x86.c for HVM guests -- {amd,intel}_xc_cpuid_policy(). You can use those to set flags differently for the two vendors.> -- Keir > >> Signed-off-by: Dietmar Hahn <dietmar.hahn@ts.fujitsu.com> >> >> diff -r 7a82c2e2eb33 tools/libxc/xc_cpuid_x86.c >> --- a/tools/libxc/xc_cpuid_x86.c Thu Jan 19 13:14:02 2012 +0100 >> +++ b/tools/libxc/xc_cpuid_x86.c Thu Jan 19 14:37:17 2012 +0100 >> @@ -343,6 +343,7 @@ static void xc_cpuid_hvm_policy( >> bitmaskof(X86_FEATURE_CMOV) | >> bitmaskof(X86_FEATURE_PAT) | >> bitmaskof(X86_FEATURE_CLFLSH) | >> + bitmaskof(X86_FEATURE_DS) | >> bitmaskof(X86_FEATURE_PSE36) | >> bitmaskof(X86_FEATURE_MMX) | >> bitmaskof(X86_FEATURE_FXSR) | >> diff -r 7a82c2e2eb33 xen/arch/x86/hvm/vmx/vmx.c >> --- a/xen/arch/x86/hvm/vmx/vmx.c Thu Jan 19 13:14:02 2012 +0100 >> +++ b/xen/arch/x86/hvm/vmx/vmx.c Thu Jan 19 14:37:17 2012 +0100 >> @@ -1603,6 +1603,8 @@ static void vmx_cpuid_intercept( >> break; >> } >> >> + vpmu_do_cpuid(input, eax, ebx, ecx, edx); >> + >> HVMTRACE_5D (CPUID, input, *eax, *ebx, *ecx, *edx); >> } >> >> diff -r 7a82c2e2eb33 xen/arch/x86/hvm/vmx/vpmu_core2.c >> --- a/xen/arch/x86/hvm/vmx/vpmu_core2.c Thu Jan 19 13:14:02 2012 +0100 >> +++ b/xen/arch/x86/hvm/vmx/vpmu_core2.c Thu Jan 19 14:37:17 2012 +0100 >> @@ -598,6 +598,29 @@ static void core2_vpmu_destroy(struct vc >> vpmu->flags &= ~VPMU_CONTEXT_ALLOCATED; >> } >> >> +/** >> + * core2_vpmu_cpuid - prepare special vpmu cpuid bits >> + * If emulation of vpmu is switched off, some bits are swtiched off, >> currently: >> + * - EAX[0x1].EAX[Bits 0-7]: PMC revision id. >> + * - EAX[0xa].EDX[Bit 21]: Debug Store >> + */ >> +#define bitmaskof(idx) (1U << ((idx) & 31)) >> +static void core2_vpmu_cpuid(unsigned int input, >> + unsigned int *eax, unsigned int *ebx, >> + unsigned int *ecx, unsigned int *edx) >> +{ >> + switch ( input ) >> + { >> + case 0x1: >> + *edx &= ~bitmaskof(X86_FEATURE_DS); /* Debug Store not supported >> */ >> + break; >> + case 0xa: >> + if ( !opt_vpmu_enabled ) >> + *eax &= ~(unsigned int)0xff; /* Clear pmc version id. */ >> + break; >> + } >> +} >> + >> struct arch_vpmu_ops core2_vpmu_ops = { >> .do_wrmsr = core2_vpmu_do_wrmsr, >> .do_rdmsr = core2_vpmu_do_rdmsr, >> @@ -605,7 +628,13 @@ struct arch_vpmu_ops core2_vpmu_ops = { >> .arch_vpmu_initialise = core2_vpmu_initialise, >> .arch_vpmu_destroy = core2_vpmu_destroy, >> .arch_vpmu_save = core2_vpmu_save, >> - .arch_vpmu_load = core2_vpmu_load >> + .arch_vpmu_load = core2_vpmu_load, >> + .arch_vpmu_cpuid = core2_vpmu_cpuid >> +}; >> + >> +/* Used if vpmu is disabled. */ >> +struct arch_vpmu_ops core2_vpmu_dis_ops = { >> + .arch_vpmu_cpuid = core2_vpmu_cpuid >> }; >> >> int vmx_vpmu_initialize(struct vcpu *v) >> @@ -615,7 +644,7 @@ int vmx_vpmu_initialize(struct vcpu *v) >> __u8 cpu_model = current_cpu_data.x86_model; >> >> if ( !opt_vpmu_enabled ) >> - return -EINVAL; >> + goto func_out; >> >> if ( family == 6 ) >> { >> @@ -635,6 +664,11 @@ int vmx_vpmu_initialize(struct vcpu *v) >> printk("VPMU: Initialization failed. " >> "Intel processor family %d model %d has not " >> "been supported\n", family, cpu_model); >> + >> +func_out: >> + >> + vpmu->arch_vpmu_ops = &core2_vpmu_dis_ops; >> + >> return -EINVAL; >> } >> >> diff -r 7a82c2e2eb33 xen/arch/x86/hvm/vpmu.c >> --- a/xen/arch/x86/hvm/vpmu.c Thu Jan 19 13:14:02 2012 +0100 >> +++ b/xen/arch/x86/hvm/vpmu.c Thu Jan 19 14:37:17 2012 +0100 >> @@ -39,7 +39,7 @@ int vpmu_do_wrmsr(unsigned int msr, uint >> { >> struct vpmu_struct *vpmu = vcpu_vpmu(current); >> >> - if ( vpmu->arch_vpmu_ops ) >> + if ( vpmu->arch_vpmu_ops && vpmu->arch_vpmu_ops->do_wrmsr ) >> return vpmu->arch_vpmu_ops->do_wrmsr(msr, msr_content); >> return 0; >> } >> @@ -48,7 +48,7 @@ int vpmu_do_rdmsr(unsigned int msr, uint >> { >> struct vpmu_struct *vpmu = vcpu_vpmu(current); >> >> - if ( vpmu->arch_vpmu_ops ) >> + if ( vpmu->arch_vpmu_ops && vpmu->arch_vpmu_ops->do_rdmsr ) >> return vpmu->arch_vpmu_ops->do_rdmsr(msr, msr_content); >> return 0; >> } >> @@ -57,7 +57,7 @@ int vpmu_do_interrupt(struct cpu_user_re >> { >> struct vpmu_struct *vpmu = vcpu_vpmu(current); >> >> - if ( vpmu->arch_vpmu_ops ) >> + if ( vpmu->arch_vpmu_ops && vpmu->arch_vpmu_ops->do_interrupt ) >> return vpmu->arch_vpmu_ops->do_interrupt(regs); >> return 0; >> } >> @@ -66,7 +66,7 @@ void vpmu_save(struct vcpu *v) >> { >> struct vpmu_struct *vpmu = vcpu_vpmu(v); >> >> - if ( vpmu->arch_vpmu_ops ) >> + if ( vpmu->arch_vpmu_ops && vpmu->arch_vpmu_ops->arch_vpmu_save ) >> vpmu->arch_vpmu_ops->arch_vpmu_save(v); >> } >> >> @@ -74,7 +74,7 @@ void vpmu_load(struct vcpu *v) >> { >> struct vpmu_struct *vpmu = vcpu_vpmu(v); >> >> - if ( vpmu->arch_vpmu_ops ) >> + if ( vpmu->arch_vpmu_ops && vpmu->arch_vpmu_ops->arch_vpmu_load ) >> vpmu->arch_vpmu_ops->arch_vpmu_load(v); >> } >> >> @@ -109,7 +109,8 @@ void vpmu_initialise(struct vcpu *v) >> { >> vpmu->flags = 0; >> vpmu->context = NULL; >> - vpmu->arch_vpmu_ops->arch_vpmu_initialise(v); >> + if ( vpmu->arch_vpmu_ops->arch_vpmu_initialise ) >> + vpmu->arch_vpmu_ops->arch_vpmu_initialise(v); >> } >> } >> >> @@ -117,7 +118,17 @@ void vpmu_destroy(struct vcpu *v) >> { >> struct vpmu_struct *vpmu = vcpu_vpmu(v); >> >> - if ( vpmu->arch_vpmu_ops ) >> + if ( vpmu->arch_vpmu_ops && vpmu->arch_vpmu_ops->arch_vpmu_destroy ) >> vpmu->arch_vpmu_ops->arch_vpmu_destroy(v); >> } >> >> +void vpmu_do_cpuid(unsigned int input, >> + unsigned int *eax, unsigned int *ebx, >> + unsigned int *ecx, unsigned int *edx) >> +{ >> + struct vpmu_struct *vpmu = vcpu_vpmu(current); >> + >> + if ( vpmu->arch_vpmu_ops && vpmu->arch_vpmu_ops->arch_vpmu_cpuid) >> + vpmu->arch_vpmu_ops->arch_vpmu_cpuid(input, eax, ebx, ecx, edx); >> +} >> + >> diff -r 7a82c2e2eb33 xen/include/asm-x86/hvm/vpmu.h >> --- a/xen/include/asm-x86/hvm/vpmu.h Thu Jan 19 13:14:02 2012 +0100 >> +++ b/xen/include/asm-x86/hvm/vpmu.h Thu Jan 19 14:37:17 2012 +0100 >> @@ -56,6 +56,9 @@ struct arch_vpmu_ops { >> void (*arch_vpmu_destroy)(struct vcpu *v); >> void (*arch_vpmu_save)(struct vcpu *v); >> void (*arch_vpmu_load)(struct vcpu *v); >> + void (*arch_vpmu_cpuid)(unsigned int input, >> + unsigned int *eax, unsigned int *ebx, >> + unsigned int *ecx, unsigned int *edx); >> }; >> >> int vmx_vpmu_initialize(struct vcpu *v); >> @@ -78,6 +81,9 @@ void vpmu_initialise(struct vcpu *v); >> void vpmu_destroy(struct vcpu *v); >> void vpmu_save(struct vcpu *v); >> void vpmu_load(struct vcpu *v); >> +void vpmu_do_cpuid(unsigned int input, >> + unsigned int *eax, unsigned int *ebx, >> + unsigned int *ecx, unsigned int *edx); >> >> extern int acquire_pmu_ownership(int pmu_ownership); >> extern void release_pmu_ownership(int pmu_ownership); > >
On 20/01/2012 10:49, "Dietmar Hahn" <dietmar.hahn@ts.fujitsu.com> wrote:>> Our CPUID configuration is done per-domain, and from >> tools/libxc/xc_cpuid_x86.c. CPUID adjustments implemented within the >> hypervisor are generally not acceptable without very good reason. > > Then a way is needed to have access to the opt_vpmu_enabled variable within > the > hypervisor from the tools to decide the enabling of the flag (is there such a > way?) or the mechanism with the boot variable must be changed. > The opt_vpmu_enabled boot variable was introduced because of a PMU problem in > the Nehalem cpus leading sometimes to hypervisor crashes. But with the done > quirk we never had a crash anymore. > So maybe we can always switch on the vpmu stuff in the hypervisor and add a > flag in the domain configuration when somebody wants to do some performance > tests?Yes! It''s obviously an option of fairly narrow interest. If someone tries to enable the per-domain option on a CPU which has problems, you can fail the domain creation, or print a warning in the hypervisor log, or whatever. Any sensible option in that respect is fine by me! -- Keir
Hi Ian, Am Freitag 20 Januar 2012, 10:54:44 schrieb Keir Fraser:> On 20/01/2012 10:49, "Dietmar Hahn" <dietmar.hahn@ts.fujitsu.com> wrote: > > >> Our CPUID configuration is done per-domain, and from > >> tools/libxc/xc_cpuid_x86.c. CPUID adjustments implemented within the > >> hypervisor are generally not acceptable without very good reason. > > > > Then a way is needed to have access to the opt_vpmu_enabled variable within > > the > > hypervisor from the tools to decide the enabling of the flag (is there such a > > way?) or the mechanism with the boot variable must be changed. > > The opt_vpmu_enabled boot variable was introduced because of a PMU problem in > > the Nehalem cpus leading sometimes to hypervisor crashes. But with the done > > quirk we never had a crash anymore. > > So maybe we can always switch on the vpmu stuff in the hypervisor and add a > > flag in the domain configuration when somebody wants to do some performance > > tests? > > Yes! > > It''s obviously an option of fairly narrow interest. If someone tries to > enable the per-domain option on a CPU which has problems, you can fail the > domain creation, or print a warning in the hypervisor log, or whatever. Any > sensible option in that respect is fine by me!What is the best solution for this? A domain specific configuration option is needed (vpmu?) which is usable in libxc/xc_cpuid_x86.c to select/deselect special vpmu bits in the cpuid command. Can you point me to an proper example? Thanks. Dietmar. -- Company details: http://ts.fujitsu.com/imprint.html
On Fri, 2012-01-20 at 12:40 +0000, Dietmar Hahn wrote:> Hi Ian, > > Am Freitag 20 Januar 2012, 10:54:44 schrieb Keir Fraser: > > On 20/01/2012 10:49, "Dietmar Hahn" <dietmar.hahn@ts.fujitsu.com> wrote: > > > > >> Our CPUID configuration is done per-domain, and from > > >> tools/libxc/xc_cpuid_x86.c. CPUID adjustments implemented within the > > >> hypervisor are generally not acceptable without very good reason. > > > > > > Then a way is needed to have access to the opt_vpmu_enabled variable within > > > the > > > hypervisor from the tools to decide the enabling of the flag (is there such a > > > way?) or the mechanism with the boot variable must be changed. > > > The opt_vpmu_enabled boot variable was introduced because of a PMU problem in > > > the Nehalem cpus leading sometimes to hypervisor crashes. But with the done > > > quirk we never had a crash anymore. > > > So maybe we can always switch on the vpmu stuff in the hypervisor and add a > > > flag in the domain configuration when somebody wants to do some performance > > > tests? > > > > Yes! > > > > It''s obviously an option of fairly narrow interest. If someone tries to > > enable the per-domain option on a CPU which has problems, you can fail the > > domain creation, or print a warning in the hypervisor log, or whatever. Any > > sensible option in that respect is fine by me! > > What is the best solution for this? > A domain specific configuration option is needed (vpmu?) which is usable in > libxc/xc_cpuid_x86.c to select/deselect special vpmu bits in the cpuid command. > Can you point me to an proper example?Can''t this already be done via the cpuid domain option given the correct runes? Maybe with an addition to the table in libxl_cpuid_parse_config? Ian.
On 20/01/2012 12:45, "Ian Campbell" <Ian.Campbell@citrix.com> wrote:>>> It''s obviously an option of fairly narrow interest. If someone tries to >>> enable the per-domain option on a CPU which has problems, you can fail the >>> domain creation, or print a warning in the hypervisor log, or whatever. Any >>> sensible option in that respect is fine by me! >> >> What is the best solution for this? >> A domain specific configuration option is needed (vpmu?) which is usable in >> libxc/xc_cpuid_x86.c to select/deselect special vpmu bits in the cpuid >> command. >> Can you point me to an proper example? > > Can''t this already be done via the cpuid domain option given the correct > runes? Maybe with an addition to the table in libxl_cpuid_parse_config?Yes that''s probably the way to do it. If the resulting required configuration runes are too cryptic or vendor-specific, it may make sense to have the libxl cpuid logic consume a ''vpmu'' option which it then turns into a set of lower-level cpuid settings to eventually pass down to the code in libxc/xc_cpuid. It''s a trifle messy I will admit. Arguably the ''default policy'' bits of xc_cpuid_x86.c would better belong in libxl these days, where we would have better access to a domain''s configuration state. As it is, we may end up with a spread of default policy across Xen (for dom0), libxc, and libxl. -- Keir
On Fri, 2012-01-20 at 13:33 +0000, Keir Fraser wrote:> On 20/01/2012 12:45, "Ian Campbell" <Ian.Campbell@citrix.com> wrote: > > >>> It''s obviously an option of fairly narrow interest. If someone tries to > >>> enable the per-domain option on a CPU which has problems, you can fail the > >>> domain creation, or print a warning in the hypervisor log, or whatever. Any > >>> sensible option in that respect is fine by me! > >> > >> What is the best solution for this? > >> A domain specific configuration option is needed (vpmu?) which is usable in > >> libxc/xc_cpuid_x86.c to select/deselect special vpmu bits in the cpuid > >> command. > >> Can you point me to an proper example? > > > > Can''t this already be done via the cpuid domain option given the correct > > runes? Maybe with an addition to the table in libxl_cpuid_parse_config? > > Yes that''s probably the way to do it. If the resulting required > configuration runes are too cryptic or vendor-specific, it may make sense to > have the libxl cpuid logic consume a ''vpmu'' option which it then turns into > a set of lower-level cpuid settings to eventually pass down to the code in > libxc/xc_cpuid. > > It''s a trifle messy I will admit. Arguably the ''default policy'' bits of > xc_cpuid_x86.c would better belong in libxl these days, where we would have > better access to a domain''s configuration state. As it is, we may end up > with a spread of default policy across Xen (for dom0), libxc, and libxl.Plus a bunch of ad-hoc stuff which predates the cpuid bit-fiddling support but is reflected in the cpuid (pae, apic, acpi etc). Ian.
On 20/01/2012 13:37, "Ian Campbell" <Ian.Campbell@citrix.com> wrote:>> Yes that''s probably the way to do it. If the resulting required >> configuration runes are too cryptic or vendor-specific, it may make sense to >> have the libxl cpuid logic consume a ''vpmu'' option which it then turns into >> a set of lower-level cpuid settings to eventually pass down to the code in >> libxc/xc_cpuid. >> >> It''s a trifle messy I will admit. Arguably the ''default policy'' bits of >> xc_cpuid_x86.c would better belong in libxl these days, where we would have >> better access to a domain''s configuration state. As it is, we may end up >> with a spread of default policy across Xen (for dom0), libxc, and libxl. > > Plus a bunch of ad-hoc stuff which predates the cpuid bit-fiddling > support but is reflected in the cpuid (pae, apic, acpi etc).Well, this is done in libxc now, so I think I included it in my list. ;) But it would be cleaner done in libxl. I don''t think anyone will be straining their arm to volunteer for this cleanup, but we shouldn''t be shy about putting new policy stuff in libxl if it makes best sense to put it there. -- Keir
Am Freitag 20 Januar 2012, 13:47:24 schrieb Keir Fraser:> On 20/01/2012 13:37, "Ian Campbell" <Ian.Campbell@citrix.com> wrote: > > >> Yes that''s probably the way to do it. If the resulting required > >> configuration runes are too cryptic or vendor-specific, it may make sense to > >> have the libxl cpuid logic consume a ''vpmu'' option which it then turns into > >> a set of lower-level cpuid settings to eventually pass down to the code in > >> libxc/xc_cpuid. > >> > >> It''s a trifle messy I will admit. Arguably the ''default policy'' bits of > >> xc_cpuid_x86.c would better belong in libxl these days, where we would have > >> better access to a domain''s configuration state. As it is, we may end up > >> with a spread of default policy across Xen (for dom0), libxc, and libxl. > > > > Plus a bunch of ad-hoc stuff which predates the cpuid bit-fiddling > > support but is reflected in the cpuid (pae, apic, acpi etc). > > Well, this is done in libxc now, so I think I included it in my list. ;) But > it would be cleaner done in libxl. > > I don''t think anyone will be straining their arm to volunteer for this > cleanup, but we shouldn''t be shy about putting new policy stuff in libxl if > it makes best sense to put it there.Thanks for the hints. For the first time I''ll go through the source and try to understand what you are talking about. Dietmar.