Dietmar Hahn
2013-Mar-26 11:16 UTC
[PATCH-v2] vpmu intel: Add cpuid handling when vpmu disabled
Even though vpmu is disabled in the hypervisor in the HVM guest the call of cpuid(0xa) returns informations about usable performance counters. This may confuse guest software when trying to use the counters and nothing happens. This patch clears most bits in registers eax and edx of cpuid(0xa) instruction for the guest when vpmu is disabled: - version ID of architectural performance counting - number of general pmu registers - width of general pmu registers - number of fixed pmu registers - width of ixed pmu registers Thanks. Dietmar. Signed-off-by: Dietmar Hahn <dietmar.hahn@ts.fujitsu.com> --- Changes from v1: As Konrad suggested I added a little bit more of documentation to the defines. diff -r 2246964a25a8 xen/arch/x86/hvm/svm/vpmu.c --- a/xen/arch/x86/hvm/svm/vpmu.c Mon Mar 25 16:57:31 2013 +0100 +++ b/xen/arch/x86/hvm/svm/vpmu.c Tue Mar 26 12:01:11 2013 +0100 @@ -370,6 +370,10 @@ int svm_vpmu_initialise(struct vcpu *v, uint8_t family = current_cpu_data.x86; int ret = 0; + /* vpmu enabled? */ + if ( !vpmu_flags ) + return 0; + switch ( family ) { case 0x10: diff -r 2246964a25a8 xen/arch/x86/hvm/vmx/vpmu_core2.c --- a/xen/arch/x86/hvm/vmx/vpmu_core2.c Mon Mar 25 16:57:31 2013 +0100 +++ b/xen/arch/x86/hvm/vmx/vpmu_core2.c Tue Mar 26 12:01:11 2013 +0100 @@ -731,6 +731,62 @@ struct arch_vpmu_ops core2_vpmu_ops = { .arch_vpmu_load = core2_vpmu_load }; +/* + * See Intel SDM Vol 2a Instruction Set Referenc for CPUID instruction. + * cpuid 0xa - Architectural Performance Monitoring Leaf + * Register eax + */ +#define X86_FEATURE_PMU_VER_OFF 0 /* Version ID */ +#define FEATURE_PMU_VER_BITS 8 /* 8 bits 0..7 */ +#define X86_FEATURE_NUM_GEN_OFF 8 /* Number of general pmu registers */ +#define FEATURE_NUM_GEN_BITS 8 /* 8 bits 8..15 */ +#define X86_FEATURE_GEN_WIDTH_OFF 16 /* Width of general pmu registers */ +#define FEATURE_GEN_WIDTH_BITS 8 /* 8 bits 16..23 */ +/* Register edx */ +#define X86_FEATURE_NUM_FIX_OFF 0 /* Number of fixed pmu registers */ +#define FEATURE_NUM_FIX_BITS 5 /* 5 bits 0..4 */ +#define X86_FEATURE_FIX_WIDTH_OFF 5 /* Width of fixed pmu registers */ +#define FEATURE_FIX_WIDTH_BITS 8 /* 8 bits 5..12 */ + +static void core2_no_vpmu_do_cpuid(unsigned int input, + unsigned int *eax, unsigned int *ebx, + unsigned int *ecx, unsigned int *edx) +{ + /* + * As in this case the vpmu is not enabled reset some bits in the + * architectural performance monitoring related part. + */ + if ( input == 0xa ) + { + *eax &= ~(((1 << FEATURE_PMU_VER_BITS) -1) << X86_FEATURE_PMU_VER_OFF); + *eax &= ~(((1 << FEATURE_NUM_GEN_BITS) -1) << X86_FEATURE_NUM_GEN_OFF); + *eax &= ~(((1 << FEATURE_GEN_WIDTH_BITS) -1) << X86_FEATURE_GEN_WIDTH_OFF); + + *edx &= ~(((1 << FEATURE_NUM_FIX_BITS) -1) << X86_FEATURE_NUM_FIX_OFF); + *edx &= ~(((1 << FEATURE_FIX_WIDTH_BITS) -1) << X86_FEATURE_FIX_WIDTH_OFF); + } +} + +/* + * If its a vpmu msr set it to 0. + */ +static int core2_no_vpmu_do_rdmsr(unsigned int msr, uint64_t *msr_content) +{ + int type = -1, index = -1; + if ( !is_core2_vpmu_msr(msr, &type, &index) ) + return 0; + *msr_content = 0; + return 1; +} + +/* + * These functions are used in case vpmu is not enabled. + */ +struct arch_vpmu_ops core2_no_vpmu_ops = { + .do_rdmsr = core2_no_vpmu_do_rdmsr, + .do_cpuid = core2_no_vpmu_do_cpuid, +}; + int vmx_vpmu_initialise(struct vcpu *v, unsigned int vpmu_flags) { struct vpmu_struct *vpmu = vcpu_vpmu(v); @@ -738,6 +794,10 @@ int vmx_vpmu_initialise(struct vcpu *v, uint8_t cpu_model = current_cpu_data.x86_model; int ret = 0; + vpmu->arch_vpmu_ops = &core2_no_vpmu_ops; + if ( !vpmu_flags ) + return 0; + if ( family == 6 ) { switch ( cpu_model ) diff -r 2246964a25a8 xen/arch/x86/hvm/vpmu.c --- a/xen/arch/x86/hvm/vpmu.c Mon Mar 25 16:57:31 2013 +0100 +++ b/xen/arch/x86/hvm/vpmu.c Tue Mar 26 12:01:11 2013 +0100 @@ -67,7 +67,7 @@ int vpmu_do_wrmsr(unsigned int msr, uint { struct vpmu_struct *vpmu = vcpu_vpmu(current); - if ( vpmu->arch_vpmu_ops ) + if ( vpmu->arch_vpmu_ops && vpmu->arch_vpmu_ops->do_wrmsr ) return vpmu->arch_vpmu_ops->do_wrmsr(msr, msr_content); return 0; } @@ -76,7 +76,7 @@ int vpmu_do_rdmsr(unsigned int msr, uint { struct vpmu_struct *vpmu = vcpu_vpmu(current); - if ( vpmu->arch_vpmu_ops ) + if ( vpmu->arch_vpmu_ops && vpmu->arch_vpmu_ops->do_rdmsr ) return vpmu->arch_vpmu_ops->do_rdmsr(msr, msr_content); return 0; } @@ -85,7 +85,7 @@ int vpmu_do_interrupt(struct cpu_user_re { struct vpmu_struct *vpmu = vcpu_vpmu(current); - if ( vpmu->arch_vpmu_ops ) + if ( vpmu->arch_vpmu_ops && vpmu->arch_vpmu_ops->do_interrupt ) return vpmu->arch_vpmu_ops->do_interrupt(regs); return 0; } @@ -104,7 +104,7 @@ void vpmu_save(struct vcpu *v) { struct vpmu_struct *vpmu = vcpu_vpmu(v); - if ( vpmu->arch_vpmu_ops ) + if ( vpmu->arch_vpmu_ops && vpmu->arch_vpmu_ops->arch_vpmu_save ) vpmu->arch_vpmu_ops->arch_vpmu_save(v); } @@ -112,7 +112,7 @@ void vpmu_load(struct vcpu *v) { struct vpmu_struct *vpmu = vcpu_vpmu(v); - if ( vpmu->arch_vpmu_ops ) + if ( vpmu->arch_vpmu_ops && vpmu->arch_vpmu_ops->arch_vpmu_load ) vpmu->arch_vpmu_ops->arch_vpmu_load(v); } @@ -121,9 +121,6 @@ void vpmu_initialise(struct vcpu *v) struct vpmu_struct *vpmu = vcpu_vpmu(v); uint8_t vendor = current_cpu_data.x86_vendor; - if ( !opt_vpmu_enabled ) - return; - if ( vpmu_is_set(vpmu, VPMU_CONTEXT_ALLOCATED) ) vpmu_destroy(v); vpmu_clear(vpmu); @@ -153,7 +150,7 @@ void vpmu_destroy(struct vcpu *v) { struct vpmu_struct *vpmu = vcpu_vpmu(v); - if ( vpmu->arch_vpmu_ops ) + if ( vpmu->arch_vpmu_ops && vpmu->arch_vpmu_ops->arch_vpmu_destroy ) vpmu->arch_vpmu_ops->arch_vpmu_destroy(v); } _______________________________________________ Xen-devel mailing list Xen-devel@lists.xen.org http://lists.xen.org/xen-devel
Keir Fraser
2013-Mar-26 11:45 UTC
Re: [PATCH-v2] vpmu intel: Add cpuid handling when vpmu disabled
On 26/03/2013 11:16, "Dietmar Hahn" <dietmar.hahn@ts.fujitsu.com> wrote:> Even though vpmu is disabled in the hypervisor in the HVM guest the call of > cpuid(0xa) returns informations about usable performance counters. > This may confuse guest software when trying to use the counters and nothing > happens. > This patch clears most bits in registers eax and edx of cpuid(0xa) instruction > for the guest when vpmu is disabled: > - version ID of architectural performance counting > - number of general pmu registers > - width of general pmu registers > - number of fixed pmu registers > - width of ixed pmu registers > > Thanks. > Dietmar. > > > Signed-off-by: Dietmar Hahn <dietmar.hahn@ts.fujitsu.com>Acked-by: Keir Fraser <keir@xen.org>> --- > > Changes from v1: As Konrad suggested I added a little bit more of > documentation > to the defines. > > > diff -r 2246964a25a8 xen/arch/x86/hvm/svm/vpmu.c > --- a/xen/arch/x86/hvm/svm/vpmu.c Mon Mar 25 16:57:31 2013 +0100 > +++ b/xen/arch/x86/hvm/svm/vpmu.c Tue Mar 26 12:01:11 2013 +0100 > @@ -370,6 +370,10 @@ int svm_vpmu_initialise(struct vcpu *v, > uint8_t family = current_cpu_data.x86; > int ret = 0; > > + /* vpmu enabled? */ > + if ( !vpmu_flags ) > + return 0; > + > switch ( family ) > { > case 0x10: > diff -r 2246964a25a8 xen/arch/x86/hvm/vmx/vpmu_core2.c > --- a/xen/arch/x86/hvm/vmx/vpmu_core2.c Mon Mar 25 16:57:31 2013 +0100 > +++ b/xen/arch/x86/hvm/vmx/vpmu_core2.c Tue Mar 26 12:01:11 2013 +0100 > @@ -731,6 +731,62 @@ struct arch_vpmu_ops core2_vpmu_ops = { > .arch_vpmu_load = core2_vpmu_load > }; > > +/* > + * See Intel SDM Vol 2a Instruction Set Referenc for CPUID instruction. > + * cpuid 0xa - Architectural Performance Monitoring Leaf > + * Register eax > + */ > +#define X86_FEATURE_PMU_VER_OFF 0 /* Version ID */ > +#define FEATURE_PMU_VER_BITS 8 /* 8 bits 0..7 */ > +#define X86_FEATURE_NUM_GEN_OFF 8 /* Number of general pmu registers */ > +#define FEATURE_NUM_GEN_BITS 8 /* 8 bits 8..15 */ > +#define X86_FEATURE_GEN_WIDTH_OFF 16 /* Width of general pmu registers */ > +#define FEATURE_GEN_WIDTH_BITS 8 /* 8 bits 16..23 */ > +/* Register edx */ > +#define X86_FEATURE_NUM_FIX_OFF 0 /* Number of fixed pmu registers */ > +#define FEATURE_NUM_FIX_BITS 5 /* 5 bits 0..4 */ > +#define X86_FEATURE_FIX_WIDTH_OFF 5 /* Width of fixed pmu registers */ > +#define FEATURE_FIX_WIDTH_BITS 8 /* 8 bits 5..12 */ > + > +static void core2_no_vpmu_do_cpuid(unsigned int input, > + unsigned int *eax, unsigned int *ebx, > + unsigned int *ecx, unsigned int *edx) > +{ > + /* > + * As in this case the vpmu is not enabled reset some bits in the > + * architectural performance monitoring related part. > + */ > + if ( input == 0xa ) > + { > + *eax &= ~(((1 << FEATURE_PMU_VER_BITS) -1) << > X86_FEATURE_PMU_VER_OFF); > + *eax &= ~(((1 << FEATURE_NUM_GEN_BITS) -1) << > X86_FEATURE_NUM_GEN_OFF); > + *eax &= ~(((1 << FEATURE_GEN_WIDTH_BITS) -1) << > X86_FEATURE_GEN_WIDTH_OFF); > + > + *edx &= ~(((1 << FEATURE_NUM_FIX_BITS) -1) << > X86_FEATURE_NUM_FIX_OFF); > + *edx &= ~(((1 << FEATURE_FIX_WIDTH_BITS) -1) << > X86_FEATURE_FIX_WIDTH_OFF); > + } > +} > + > +/* > + * If its a vpmu msr set it to 0. > + */ > +static int core2_no_vpmu_do_rdmsr(unsigned int msr, uint64_t *msr_content) > +{ > + int type = -1, index = -1; > + if ( !is_core2_vpmu_msr(msr, &type, &index) ) > + return 0; > + *msr_content = 0; > + return 1; > +} > + > +/* > + * These functions are used in case vpmu is not enabled. > + */ > +struct arch_vpmu_ops core2_no_vpmu_ops = { > + .do_rdmsr = core2_no_vpmu_do_rdmsr, > + .do_cpuid = core2_no_vpmu_do_cpuid, > +}; > + > int vmx_vpmu_initialise(struct vcpu *v, unsigned int vpmu_flags) > { > struct vpmu_struct *vpmu = vcpu_vpmu(v); > @@ -738,6 +794,10 @@ int vmx_vpmu_initialise(struct vcpu *v, > uint8_t cpu_model = current_cpu_data.x86_model; > int ret = 0; > > + vpmu->arch_vpmu_ops = &core2_no_vpmu_ops; > + if ( !vpmu_flags ) > + return 0; > + > if ( family == 6 ) > { > switch ( cpu_model ) > diff -r 2246964a25a8 xen/arch/x86/hvm/vpmu.c > --- a/xen/arch/x86/hvm/vpmu.c Mon Mar 25 16:57:31 2013 +0100 > +++ b/xen/arch/x86/hvm/vpmu.c Tue Mar 26 12:01:11 2013 +0100 > @@ -67,7 +67,7 @@ int vpmu_do_wrmsr(unsigned int msr, uint > { > struct vpmu_struct *vpmu = vcpu_vpmu(current); > > - if ( vpmu->arch_vpmu_ops ) > + if ( vpmu->arch_vpmu_ops && vpmu->arch_vpmu_ops->do_wrmsr ) > return vpmu->arch_vpmu_ops->do_wrmsr(msr, msr_content); > return 0; > } > @@ -76,7 +76,7 @@ int vpmu_do_rdmsr(unsigned int msr, uint > { > struct vpmu_struct *vpmu = vcpu_vpmu(current); > > - if ( vpmu->arch_vpmu_ops ) > + if ( vpmu->arch_vpmu_ops && vpmu->arch_vpmu_ops->do_rdmsr ) > return vpmu->arch_vpmu_ops->do_rdmsr(msr, msr_content); > return 0; > } > @@ -85,7 +85,7 @@ int vpmu_do_interrupt(struct cpu_user_re > { > struct vpmu_struct *vpmu = vcpu_vpmu(current); > > - if ( vpmu->arch_vpmu_ops ) > + if ( vpmu->arch_vpmu_ops && vpmu->arch_vpmu_ops->do_interrupt ) > return vpmu->arch_vpmu_ops->do_interrupt(regs); > return 0; > } > @@ -104,7 +104,7 @@ void vpmu_save(struct vcpu *v) > { > struct vpmu_struct *vpmu = vcpu_vpmu(v); > > - if ( vpmu->arch_vpmu_ops ) > + if ( vpmu->arch_vpmu_ops && vpmu->arch_vpmu_ops->arch_vpmu_save ) > vpmu->arch_vpmu_ops->arch_vpmu_save(v); > } > > @@ -112,7 +112,7 @@ void vpmu_load(struct vcpu *v) > { > struct vpmu_struct *vpmu = vcpu_vpmu(v); > > - if ( vpmu->arch_vpmu_ops ) > + if ( vpmu->arch_vpmu_ops && vpmu->arch_vpmu_ops->arch_vpmu_load ) > vpmu->arch_vpmu_ops->arch_vpmu_load(v); > } > > @@ -121,9 +121,6 @@ void vpmu_initialise(struct vcpu *v) > struct vpmu_struct *vpmu = vcpu_vpmu(v); > uint8_t vendor = current_cpu_data.x86_vendor; > > - if ( !opt_vpmu_enabled ) > - return; > - > if ( vpmu_is_set(vpmu, VPMU_CONTEXT_ALLOCATED) ) > vpmu_destroy(v); > vpmu_clear(vpmu); > @@ -153,7 +150,7 @@ void vpmu_destroy(struct vcpu *v) > { > struct vpmu_struct *vpmu = vcpu_vpmu(v); > > - if ( vpmu->arch_vpmu_ops ) > + if ( vpmu->arch_vpmu_ops && vpmu->arch_vpmu_ops->arch_vpmu_destroy ) > vpmu->arch_vpmu_ops->arch_vpmu_destroy(v); > } > > _______________________________________________ > Xen-devel mailing list > Xen-devel@lists.xen.org > http://lists.xen.org/xen-devel
Dietmar Hahn
2013-Mar-27 14:13 UTC
[PATCH 1/3] vpmu intel: Better names and replacing numerals with defines
This patch renames core2_counters to core2_fix_counters for better understanding the code and subtitutes 2 numerals with defines in fixed counter handling. Thanks. Dietmar. Signed-off-by: Dietmar Hahn <dietmar.hahn@ts.fujitsu.com> Index: xen-unstable.hg/xen/arch/x86/hvm/vmx/vpmu_core2.c ==================================================================--- xen-unstable.hg.orig/xen/arch/x86/hvm/vmx/vpmu_core2.c +++ xen-unstable.hg/xen/arch/x86/hvm/vmx/vpmu_core2.c @@ -101,7 +101,7 @@ static void handle_pmc_quirk(u64 msr_con } } -static const u32 core2_counters_msr[] = { +static const u32 core2_fix_counters_msr[] = { MSR_CORE_PERF_FIXED_CTR0, MSR_CORE_PERF_FIXED_CTR1, MSR_CORE_PERF_FIXED_CTR2 @@ -119,13 +119,13 @@ struct pmumsr { const u32 *msr; }; -static const struct pmumsr core2_counters = { - 3, - core2_counters_msr +static const struct pmumsr core2_fix_counters = { + VPMU_CORE2_NUM_FIXED, + core2_fix_counters_msr }; static const struct pmumsr core2_ctrls = { - 3, + VPMU_CORE2_NUM_CTRLS, core2_ctrls_msr }; static int arch_pmc_cnt; @@ -162,16 +162,16 @@ static int is_core2_vpmu_msr(u32 msr_ind { int i; - for ( i = 0; i < core2_counters.num; i++ ) + for ( i = 0; i < core2_fix_counters.num; i++ ) { - if ( core2_counters.msr[i] == msr_index ) + if ( core2_fix_counters.msr[i] == msr_index ) { *type = MSR_TYPE_COUNTER; *index = i; return 1; } } - + for ( i = 0; i < core2_ctrls.num; i++ ) { if ( core2_ctrls.msr[i] == msr_index ) @@ -214,10 +214,10 @@ static void core2_vpmu_set_msr_bitmap(un int i; /* Allow Read/Write PMU Counters MSR Directly. */ - for ( i = 0; i < core2_counters.num; i++ ) + for ( i = 0; i < core2_fix_counters.num; i++ ) { - clear_bit(msraddr_to_bitpos(core2_counters.msr[i]), msr_bitmap); - clear_bit(msraddr_to_bitpos(core2_counters.msr[i]), + clear_bit(msraddr_to_bitpos(core2_fix_counters.msr[i]), msr_bitmap); + clear_bit(msraddr_to_bitpos(core2_fix_counters.msr[i]), msr_bitmap + 0x800/BYTES_PER_LONG); } for ( i = 0; i < core2_get_pmc_count(); i++ ) @@ -238,10 +238,10 @@ static void core2_vpmu_unset_msr_bitmap( { int i; - for ( i = 0; i < core2_counters.num; i++ ) + for ( i = 0; i < core2_fix_counters.num; i++ ) { - set_bit(msraddr_to_bitpos(core2_counters.msr[i]), msr_bitmap); - set_bit(msraddr_to_bitpos(core2_counters.msr[i]), + set_bit(msraddr_to_bitpos(core2_fix_counters.msr[i]), msr_bitmap); + set_bit(msraddr_to_bitpos(core2_fix_counters.msr[i]), msr_bitmap + 0x800/BYTES_PER_LONG); } for ( i = 0; i < core2_get_pmc_count(); i++ ) @@ -261,8 +261,8 @@ static inline void __core2_vpmu_save(str int i; struct core2_vpmu_context *core2_vpmu_cxt = vcpu_vpmu(v)->context; - for ( i = 0; i < core2_counters.num; i++ ) - rdmsrl(core2_counters.msr[i], core2_vpmu_cxt->counters[i]); + for ( i = 0; i < core2_fix_counters.num; i++ ) + rdmsrl(core2_fix_counters.msr[i], core2_vpmu_cxt->fix_counters[i]); for ( i = 0; i < core2_get_pmc_count(); i++ ) rdmsrl(MSR_IA32_PERFCTR0+i, core2_vpmu_cxt->arch_msr_pair[i].counter); core2_vpmu_cxt->hw_lapic_lvtpc = apic_read(APIC_LVTPC); @@ -292,8 +292,8 @@ static inline void __core2_vpmu_load(str int i; struct core2_vpmu_context *core2_vpmu_cxt = vcpu_vpmu(v)->context; - for ( i = 0; i < core2_counters.num; i++ ) - wrmsrl(core2_counters.msr[i], core2_vpmu_cxt->counters[i]); + for ( i = 0; i < core2_fix_counters.num; i++ ) + wrmsrl(core2_fix_counters.msr[i], core2_vpmu_cxt->fix_counters[i]); for ( i = 0; i < core2_get_pmc_count(); i++ ) wrmsrl(MSR_IA32_PERFCTR0+i, core2_vpmu_cxt->arch_msr_pair[i].counter); @@ -474,7 +474,7 @@ static int core2_vpmu_do_wrmsr(unsigned rdmsrl(MSR_CORE_PERF_FIXED_CTR_CTRL, non_global_ctrl); global_ctrl = msr_content >> 32; - for ( i = 0; i < 3; i++ ) + for ( i = 0; i < core2_fix_counters.num; i++ ) { core2_vpmu_cxt->pmu_enable->fixed_ctr_enable[i] (global_ctrl & 1) & ((non_global_ctrl & 0x3)? 1: 0); @@ -486,7 +486,7 @@ static int core2_vpmu_do_wrmsr(unsigned non_global_ctrl = msr_content; vmx_read_guest_msr(MSR_CORE_PERF_GLOBAL_CTRL, &global_ctrl); global_ctrl >>= 32; - for ( i = 0; i < 3; i++ ) + for ( i = 0; i < core2_fix_counters.num; i++ ) { core2_vpmu_cxt->pmu_enable->fixed_ctr_enable[i] (global_ctrl & 1) & ((non_global_ctrl & 0x3)? 1: 0); @@ -502,7 +502,7 @@ static int core2_vpmu_do_wrmsr(unsigned (global_ctrl >> tmp) & (msr_content >> 22) & 1; } - for ( i = 0; i < 3; i++ ) + for ( i = 0; i < core2_fix_counters.num; i++ ) pmu_enable |= core2_vpmu_cxt->pmu_enable->fixed_ctr_enable[i]; for ( i = 0; i < core2_get_pmc_count(); i++ ) pmu_enable |= core2_vpmu_cxt->pmu_enable->arch_pmc_enable[i]; Index: xen-unstable.hg/xen/include/asm-x86/hvm/vmx/vpmu_core2.h ==================================================================--- xen-unstable.hg.orig/xen/include/asm-x86/hvm/vmx/vpmu_core2.h +++ xen-unstable.hg/xen/include/asm-x86/hvm/vmx/vpmu_core2.h @@ -23,6 +23,11 @@ #ifndef __ASM_X86_HVM_VPMU_CORE_H_ #define __ASM_X86_HVM_VPMU_CORE_H_ +/* Currently only 3 fixed counters are supported. */ +#define VPMU_CORE2_NUM_FIXED 3 +/* Currently only 3 Non-architectual Performance Control MSRs */ +#define VPMU_CORE2_NUM_CTRLS 3 + struct arch_msr_pair { u64 counter; u64 control; @@ -30,14 +35,14 @@ struct arch_msr_pair { struct core2_pmu_enable { char ds_area_enable; - char fixed_ctr_enable[3]; + char fixed_ctr_enable[VPMU_CORE2_NUM_FIXED]; char arch_pmc_enable[1]; }; struct core2_vpmu_context { struct core2_pmu_enable *pmu_enable; - u64 counters[3]; - u64 ctrls[3]; + u64 fix_counters[VPMU_CORE2_NUM_FIXED]; + u64 ctrls[VPMU_CORE2_NUM_CTRLS]; u64 global_ovf_status; u32 hw_lapic_lvtpc; struct arch_msr_pair arch_msr_pair[1]; _______________________________________________ Xen-devel mailing list Xen-devel@lists.xen.org http://lists.xen.org/xen-devel
Dietmar Hahn
2013-Mar-27 14:13 UTC
[PATCH 3/3] vpmu intel: Dump vpmu infos in ''q'' keyhandler
This patch works only on top of 2/3. This patch extends the printout of the VPCU infos of the keyhandler ''q''. If vPMU is enabled is on the VCPU and active lines are printed like (when running HVM openSuSE-12.3 with ''perf top''); (XEN) vPMU running (XEN) general_0: 0x000000ffffff3ae1 ctrl: 0x000000000053003c (XEN) fixed_1: 0x000000ff90799188 ctrl: 0xb This means general counter 0 and fixed counter 1 are running with showing their contents and the contents of their configuration msr. Thanks. Dietmar. Signed-off-by: Dietmar Hahn <dietmar.hahn@ts.fujitsu.com> Index: xen-unstable.hg/xen/arch/x86/domain.c ==================================================================--- xen-unstable.hg.orig/xen/arch/x86/domain.c +++ xen-unstable.hg/xen/arch/x86/domain.c @@ -2093,6 +2093,9 @@ void arch_dump_domain_info(struct domain void arch_dump_vcpu_info(struct vcpu *v) { paging_dump_vcpu_info(v); + + if ( is_hvm_vcpu(v) ) + vpmu_dump(v); } void domain_cpuid( Index: xen-unstable.hg/xen/arch/x86/hvm/vmx/vpmu_core2.c ==================================================================--- xen-unstable.hg.orig/xen/arch/x86/hvm/vmx/vpmu_core2.c +++ xen-unstable.hg/xen/arch/x86/hvm/vmx/vpmu_core2.c @@ -124,6 +124,14 @@ static const u32 core2_fix_counters_msr[ MSR_CORE_PERF_FIXED_CTR2 }; +/* + * MSR_CORE_PERF_FIXED_CTR_CTRL contains the configuration of all fixed + * counters. 4 bits for every counter. + */ +#define FIXED_CTRL_CTRL_CONF_WIDTH 4 +/* The index into the core2_ctrls_msr[] of this MSR used in core2_vpmu_dump() */ +#define MSR_CORE_PERF_FIXED_CTR_CTRL_IDX 0 + /* Core 2 Non-architectual Performance Control MSRs. */ static const u32 core2_ctrls_msr[] = { MSR_CORE_PERF_FIXED_CTR_CTRL, @@ -638,6 +646,52 @@ static void core2_vpmu_do_cpuid(unsigned } } +/* Dump vpmu info on console, called in the context of keyhandler ''q''. */ +static void core2_vpmu_dump(struct vcpu *v) +{ + struct vpmu_struct *vpmu = vcpu_vpmu(v); + int i, num; + struct core2_vpmu_context *core2_vpmu_cxt = NULL; + u64 val, mask; + + if ( !vpmu_is_set(vpmu, VPMU_CONTEXT_ALLOCATED) ) + return; + + if ( !vpmu_is_set(vpmu, VPMU_RUNNING) ) + { + if ( vpmu_set(vpmu, VPMU_CONTEXT_LOADED) ) + printk(" vPMU loaded\n"); + else + printk(" vPMU allocated\n"); + return; + } + + printk(" vPMU running\n"); + core2_vpmu_cxt = vpmu->context; + num = core2_get_pmc_count(); + /* Print the contents of the counter and its configuration msr. */ + for ( i = 0; i < num; i++ ) + { + struct arch_msr_pair* msr_pair = core2_vpmu_cxt->arch_msr_pair; + if ( core2_vpmu_cxt->pmu_enable->arch_pmc_enable[i] ) + printk(" general_%d: 0x%016lx ctrl: 0x%016lx\n", + i, msr_pair[i].counter, msr_pair[i].control); + } + /* + * The configuration of the fixed counter is 4 bits each in the + * MSR_CORE_PERF_FIXED_CTR_CTRL. + */ + val = core2_vpmu_cxt->ctrls[MSR_CORE_PERF_FIXED_CTR_CTRL_IDX]; + mask = (1 << FIXED_CTRL_CTRL_CONF_WIDTH) - 1; + for ( i = 0; i < core2_fix_counters.num; i++ ) + { + if ( core2_vpmu_cxt->pmu_enable->fixed_ctr_enable[i] ) + printk(" fixed_%d: 0x%016lx ctrl: 0x%lx\n", + i, core2_vpmu_cxt->fix_counters[i], val & mask); + val >>= FIXED_CTRL_CTRL_CONF_WIDTH; + } +} + static int core2_vpmu_do_interrupt(struct cpu_user_regs *regs) { struct vcpu *v = current; @@ -751,7 +805,8 @@ struct arch_vpmu_ops core2_vpmu_ops = { .do_cpuid = core2_vpmu_do_cpuid, .arch_vpmu_destroy = core2_vpmu_destroy, .arch_vpmu_save = core2_vpmu_save, - .arch_vpmu_load = core2_vpmu_load + .arch_vpmu_load = core2_vpmu_load, + .arch_vpmu_dump = core2_vpmu_dump }; static void core2_no_vpmu_do_cpuid(unsigned int input, Index: xen-unstable.hg/xen/arch/x86/hvm/vpmu.c ==================================================================--- xen-unstable.hg.orig/xen/arch/x86/hvm/vpmu.c +++ xen-unstable.hg/xen/arch/x86/hvm/vpmu.c @@ -154,3 +154,12 @@ void vpmu_destroy(struct vcpu *v) vpmu->arch_vpmu_ops->arch_vpmu_destroy(v); } +/* Dump some vpmu informations on console. Used in keyhandler dump_domains(). */ +void vpmu_dump(struct vcpu *v) +{ + struct vpmu_struct *vpmu = vcpu_vpmu(v); + + if ( vpmu->arch_vpmu_ops && vpmu->arch_vpmu_ops->arch_vpmu_dump ) + vpmu->arch_vpmu_ops->arch_vpmu_dump(v); +} + Index: xen-unstable.hg/xen/include/asm-x86/hvm/vpmu.h ==================================================================--- xen-unstable.hg.orig/xen/include/asm-x86/hvm/vpmu.h +++ xen-unstable.hg/xen/include/asm-x86/hvm/vpmu.h @@ -54,6 +54,7 @@ struct arch_vpmu_ops { void (*arch_vpmu_destroy)(struct vcpu *v); void (*arch_vpmu_save)(struct vcpu *v); void (*arch_vpmu_load)(struct vcpu *v); + void (*arch_vpmu_dump)(struct vcpu *v); }; int vmx_vpmu_initialise(struct vcpu *, unsigned int flags); @@ -87,6 +88,7 @@ void vpmu_initialise(struct vcpu *v); void vpmu_destroy(struct vcpu *v); void vpmu_save(struct vcpu *v); void vpmu_load(struct vcpu *v); +void vpmu_dump(struct vcpu *v); extern int acquire_pmu_ownership(int pmu_ownership); extern void release_pmu_ownership(int pmu_ownership); -- Company details: http://ts.fujitsu.com/imprint.html _______________________________________________ Xen-devel mailing list Xen-devel@lists.xen.org http://lists.xen.org/xen-devel
Konrad Rzeszutek Wilk
2013-Mar-27 15:16 UTC
Re: [PATCH 3/3] vpmu intel: Dump vpmu infos in ''q'' keyhandler
On Wed, Mar 27, 2013 at 03:13:22PM +0100, Dietmar Hahn wrote:> This patch works only on top of 2/3. > > This patch extends the printout of the VPCU infos of the keyhandler ''q''. > If vPMU is enabled is on the VCPU and active lines are printed like > (when running HVM openSuSE-12.3 with ''perf top''); > > (XEN) vPMU running > (XEN) general_0: 0x000000ffffff3ae1 ctrl: 0x000000000053003c > (XEN) fixed_1: 0x000000ff90799188 ctrl: 0xb > > This means general counter 0 and fixed counter 1 are running with showing > their contents and the contents of their configuration msr. > Thanks. > Dietmar. > > Signed-off-by: Dietmar Hahn <dietmar.hahn@ts.fujitsu.com>Reviewed-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>> > > Index: xen-unstable.hg/xen/arch/x86/domain.c > ==================================================================> --- xen-unstable.hg.orig/xen/arch/x86/domain.c > +++ xen-unstable.hg/xen/arch/x86/domain.c > @@ -2093,6 +2093,9 @@ void arch_dump_domain_info(struct domain > void arch_dump_vcpu_info(struct vcpu *v) > { > paging_dump_vcpu_info(v); > + > + if ( is_hvm_vcpu(v) ) > + vpmu_dump(v); > } > > void domain_cpuid( > Index: xen-unstable.hg/xen/arch/x86/hvm/vmx/vpmu_core2.c > ==================================================================> --- xen-unstable.hg.orig/xen/arch/x86/hvm/vmx/vpmu_core2.c > +++ xen-unstable.hg/xen/arch/x86/hvm/vmx/vpmu_core2.c > @@ -124,6 +124,14 @@ static const u32 core2_fix_counters_msr[ > MSR_CORE_PERF_FIXED_CTR2 > }; > > +/* > + * MSR_CORE_PERF_FIXED_CTR_CTRL contains the configuration of all fixed > + * counters. 4 bits for every counter. > + */ > +#define FIXED_CTRL_CTRL_CONF_WIDTH 4 > +/* The index into the core2_ctrls_msr[] of this MSR used in core2_vpmu_dump() */ > +#define MSR_CORE_PERF_FIXED_CTR_CTRL_IDX 0 > + > /* Core 2 Non-architectual Performance Control MSRs. */ > static const u32 core2_ctrls_msr[] = { > MSR_CORE_PERF_FIXED_CTR_CTRL, > @@ -638,6 +646,52 @@ static void core2_vpmu_do_cpuid(unsigned > } > } > > +/* Dump vpmu info on console, called in the context of keyhandler ''q''. */ > +static void core2_vpmu_dump(struct vcpu *v) > +{ > + struct vpmu_struct *vpmu = vcpu_vpmu(v); > + int i, num; > + struct core2_vpmu_context *core2_vpmu_cxt = NULL; > + u64 val, mask; > + > + if ( !vpmu_is_set(vpmu, VPMU_CONTEXT_ALLOCATED) ) > + return; > + > + if ( !vpmu_is_set(vpmu, VPMU_RUNNING) ) > + { > + if ( vpmu_set(vpmu, VPMU_CONTEXT_LOADED) ) > + printk(" vPMU loaded\n"); > + else > + printk(" vPMU allocated\n"); > + return; > + } > + > + printk(" vPMU running\n"); > + core2_vpmu_cxt = vpmu->context; > + num = core2_get_pmc_count(); > + /* Print the contents of the counter and its configuration msr. */ > + for ( i = 0; i < num; i++ ) > + { > + struct arch_msr_pair* msr_pair = core2_vpmu_cxt->arch_msr_pair; > + if ( core2_vpmu_cxt->pmu_enable->arch_pmc_enable[i] ) > + printk(" general_%d: 0x%016lx ctrl: 0x%016lx\n", > + i, msr_pair[i].counter, msr_pair[i].control); > + } > + /* > + * The configuration of the fixed counter is 4 bits each in the > + * MSR_CORE_PERF_FIXED_CTR_CTRL. > + */ > + val = core2_vpmu_cxt->ctrls[MSR_CORE_PERF_FIXED_CTR_CTRL_IDX]; > + mask = (1 << FIXED_CTRL_CTRL_CONF_WIDTH) - 1; > + for ( i = 0; i < core2_fix_counters.num; i++ ) > + { > + if ( core2_vpmu_cxt->pmu_enable->fixed_ctr_enable[i] ) > + printk(" fixed_%d: 0x%016lx ctrl: 0x%lx\n", > + i, core2_vpmu_cxt->fix_counters[i], val & mask); > + val >>= FIXED_CTRL_CTRL_CONF_WIDTH; > + } > +} > + > static int core2_vpmu_do_interrupt(struct cpu_user_regs *regs) > { > struct vcpu *v = current; > @@ -751,7 +805,8 @@ struct arch_vpmu_ops core2_vpmu_ops = { > .do_cpuid = core2_vpmu_do_cpuid, > .arch_vpmu_destroy = core2_vpmu_destroy, > .arch_vpmu_save = core2_vpmu_save, > - .arch_vpmu_load = core2_vpmu_load > + .arch_vpmu_load = core2_vpmu_load, > + .arch_vpmu_dump = core2_vpmu_dump > }; > > static void core2_no_vpmu_do_cpuid(unsigned int input, > Index: xen-unstable.hg/xen/arch/x86/hvm/vpmu.c > ==================================================================> --- xen-unstable.hg.orig/xen/arch/x86/hvm/vpmu.c > +++ xen-unstable.hg/xen/arch/x86/hvm/vpmu.c > @@ -154,3 +154,12 @@ void vpmu_destroy(struct vcpu *v) > vpmu->arch_vpmu_ops->arch_vpmu_destroy(v); > } > > +/* Dump some vpmu informations on console. Used in keyhandler dump_domains(). */ > +void vpmu_dump(struct vcpu *v) > +{ > + struct vpmu_struct *vpmu = vcpu_vpmu(v); > + > + if ( vpmu->arch_vpmu_ops && vpmu->arch_vpmu_ops->arch_vpmu_dump ) > + vpmu->arch_vpmu_ops->arch_vpmu_dump(v); > +} > + > Index: xen-unstable.hg/xen/include/asm-x86/hvm/vpmu.h > ==================================================================> --- xen-unstable.hg.orig/xen/include/asm-x86/hvm/vpmu.h > +++ xen-unstable.hg/xen/include/asm-x86/hvm/vpmu.h > @@ -54,6 +54,7 @@ struct arch_vpmu_ops { > void (*arch_vpmu_destroy)(struct vcpu *v); > void (*arch_vpmu_save)(struct vcpu *v); > void (*arch_vpmu_load)(struct vcpu *v); > + void (*arch_vpmu_dump)(struct vcpu *v); > }; > > int vmx_vpmu_initialise(struct vcpu *, unsigned int flags); > @@ -87,6 +88,7 @@ void vpmu_initialise(struct vcpu *v); > void vpmu_destroy(struct vcpu *v); > void vpmu_save(struct vcpu *v); > void vpmu_load(struct vcpu *v); > +void vpmu_dump(struct vcpu *v); > > extern int acquire_pmu_ownership(int pmu_ownership); > extern void release_pmu_ownership(int pmu_ownership); > > > -- > Company details: http://ts.fujitsu.com/imprint.html> Index: xen-unstable.hg/xen/arch/x86/domain.c > ==================================================================> --- xen-unstable.hg.orig/xen/arch/x86/domain.c > +++ xen-unstable.hg/xen/arch/x86/domain.c > @@ -2093,6 +2093,9 @@ void arch_dump_domain_info(struct domain > void arch_dump_vcpu_info(struct vcpu *v) > { > paging_dump_vcpu_info(v); > + > + if ( is_hvm_vcpu(v) ) > + vpmu_dump(v); > } > > void domain_cpuid( > Index: xen-unstable.hg/xen/arch/x86/hvm/vmx/vpmu_core2.c > ==================================================================> --- xen-unstable.hg.orig/xen/arch/x86/hvm/vmx/vpmu_core2.c > +++ xen-unstable.hg/xen/arch/x86/hvm/vmx/vpmu_core2.c > @@ -124,6 +124,14 @@ static const u32 core2_fix_counters_msr[ > MSR_CORE_PERF_FIXED_CTR2 > }; > > +/* > + * MSR_CORE_PERF_FIXED_CTR_CTRL contains the configuration of all fixed > + * counters. 4 bits for every counter. > + */ > +#define FIXED_CTRL_CTRL_CONF_WIDTH 4 > +/* The index into the core2_ctrls_msr[] of this MSR used in core2_vpmu_dump() */ > +#define MSR_CORE_PERF_FIXED_CTR_CTRL_IDX 0 > + > /* Core 2 Non-architectual Performance Control MSRs. */ > static const u32 core2_ctrls_msr[] = { > MSR_CORE_PERF_FIXED_CTR_CTRL, > @@ -638,6 +646,52 @@ static void core2_vpmu_do_cpuid(unsigned > } > } > > +/* Dump vpmu info on console, called in the context of keyhandler ''q''. */ > +static void core2_vpmu_dump(struct vcpu *v) > +{ > + struct vpmu_struct *vpmu = vcpu_vpmu(v); > + int i, num; > + struct core2_vpmu_context *core2_vpmu_cxt = NULL; > + u64 val, mask; > + > + if ( !vpmu_is_set(vpmu, VPMU_CONTEXT_ALLOCATED) ) > + return; > + > + if ( !vpmu_is_set(vpmu, VPMU_RUNNING) ) > + { > + if ( vpmu_set(vpmu, VPMU_CONTEXT_LOADED) ) > + printk(" vPMU loaded\n"); > + else > + printk(" vPMU allocated\n"); > + return; > + } > + > + printk(" vPMU running\n"); > + core2_vpmu_cxt = vpmu->context; > + num = core2_get_pmc_count(); > + /* Print the contents of the counter and its configuration msr. */ > + for ( i = 0; i < num; i++ ) > + { > + struct arch_msr_pair* msr_pair = core2_vpmu_cxt->arch_msr_pair; > + if ( core2_vpmu_cxt->pmu_enable->arch_pmc_enable[i] ) > + printk(" general_%d: 0x%016lx ctrl: 0x%016lx\n", > + i, msr_pair[i].counter, msr_pair[i].control); > + } > + /* > + * The configuration of the fixed counter is 4 bits each in the > + * MSR_CORE_PERF_FIXED_CTR_CTRL. > + */ > + val = core2_vpmu_cxt->ctrls[MSR_CORE_PERF_FIXED_CTR_CTRL_IDX]; > + mask = (1 << FIXED_CTRL_CTRL_CONF_WIDTH) - 1; > + for ( i = 0; i < core2_fix_counters.num; i++ ) > + { > + if ( core2_vpmu_cxt->pmu_enable->fixed_ctr_enable[i] ) > + printk(" fixed_%d: 0x%016lx ctrl: 0x%lx\n", > + i, core2_vpmu_cxt->fix_counters[i], val & mask); > + val >>= FIXED_CTRL_CTRL_CONF_WIDTH; > + } > +} > + > static int core2_vpmu_do_interrupt(struct cpu_user_regs *regs) > { > struct vcpu *v = current; > @@ -751,7 +805,8 @@ struct arch_vpmu_ops core2_vpmu_ops = { > .do_cpuid = core2_vpmu_do_cpuid, > .arch_vpmu_destroy = core2_vpmu_destroy, > .arch_vpmu_save = core2_vpmu_save, > - .arch_vpmu_load = core2_vpmu_load > + .arch_vpmu_load = core2_vpmu_load, > + .arch_vpmu_dump = core2_vpmu_dump > }; > > static void core2_no_vpmu_do_cpuid(unsigned int input, > Index: xen-unstable.hg/xen/arch/x86/hvm/vpmu.c > ==================================================================> --- xen-unstable.hg.orig/xen/arch/x86/hvm/vpmu.c > +++ xen-unstable.hg/xen/arch/x86/hvm/vpmu.c > @@ -154,3 +154,12 @@ void vpmu_destroy(struct vcpu *v) > vpmu->arch_vpmu_ops->arch_vpmu_destroy(v); > } > > +/* Dump some vpmu informations on console. Used in keyhandler dump_domains(). */ > +void vpmu_dump(struct vcpu *v) > +{ > + struct vpmu_struct *vpmu = vcpu_vpmu(v); > + > + if ( vpmu->arch_vpmu_ops && vpmu->arch_vpmu_ops->arch_vpmu_dump ) > + vpmu->arch_vpmu_ops->arch_vpmu_dump(v); > +} > + > Index: xen-unstable.hg/xen/include/asm-x86/hvm/vpmu.h > ==================================================================> --- xen-unstable.hg.orig/xen/include/asm-x86/hvm/vpmu.h > +++ xen-unstable.hg/xen/include/asm-x86/hvm/vpmu.h > @@ -54,6 +54,7 @@ struct arch_vpmu_ops { > void (*arch_vpmu_destroy)(struct vcpu *v); > void (*arch_vpmu_save)(struct vcpu *v); > void (*arch_vpmu_load)(struct vcpu *v); > + void (*arch_vpmu_dump)(struct vcpu *v); > }; > > int vmx_vpmu_initialise(struct vcpu *, unsigned int flags); > @@ -87,6 +88,7 @@ void vpmu_initialise(struct vcpu *v); > void vpmu_destroy(struct vcpu *v); > void vpmu_save(struct vcpu *v); > void vpmu_load(struct vcpu *v); > +void vpmu_dump(struct vcpu *v); > > extern int acquire_pmu_ownership(int pmu_ownership); > extern void release_pmu_ownership(int pmu_ownership);> _______________________________________________ > Xen-devel mailing list > Xen-devel@lists.xen.org > http://lists.xen.org/xen-devel
Dietmar Hahn
2013-Apr-04 12:10 UTC
Re: [PATCH 1/3] vpmu intel: Better names and replacing numerals with defines
Am Mittwoch 27 März 2013, 15:13:09 schrieb Dietmar Hahn:> This patch renames core2_counters to core2_fix_counters for better > understanding the code and subtitutes 2 numerals with defines in fixed counter > handling. > Thanks. > Dietmar. > > > Signed-off-by: Dietmar Hahn <dietmar.hahn@ts.fujitsu.com>Ping?> Index: xen-unstable.hg/xen/arch/x86/hvm/vmx/vpmu_core2.c > ==================================================================> --- xen-unstable.hg.orig/xen/arch/x86/hvm/vmx/vpmu_core2.c > +++ xen-unstable.hg/xen/arch/x86/hvm/vmx/vpmu_core2.c > @@ -101,7 +101,7 @@ static void handle_pmc_quirk(u64 msr_con > } > } > > -static const u32 core2_counters_msr[] = { > +static const u32 core2_fix_counters_msr[] = { > MSR_CORE_PERF_FIXED_CTR0, > MSR_CORE_PERF_FIXED_CTR1, > MSR_CORE_PERF_FIXED_CTR2 > @@ -119,13 +119,13 @@ struct pmumsr { > const u32 *msr; > }; > > -static const struct pmumsr core2_counters = { > - 3, > - core2_counters_msr > +static const struct pmumsr core2_fix_counters = { > + VPMU_CORE2_NUM_FIXED, > + core2_fix_counters_msr > }; > > static const struct pmumsr core2_ctrls = { > - 3, > + VPMU_CORE2_NUM_CTRLS, > core2_ctrls_msr > }; > static int arch_pmc_cnt; > @@ -162,16 +162,16 @@ static int is_core2_vpmu_msr(u32 msr_ind > { > int i; > > - for ( i = 0; i < core2_counters.num; i++ ) > + for ( i = 0; i < core2_fix_counters.num; i++ ) > { > - if ( core2_counters.msr[i] == msr_index ) > + if ( core2_fix_counters.msr[i] == msr_index ) > { > *type = MSR_TYPE_COUNTER; > *index = i; > return 1; > } > } > - > + > for ( i = 0; i < core2_ctrls.num; i++ ) > { > if ( core2_ctrls.msr[i] == msr_index ) > @@ -214,10 +214,10 @@ static void core2_vpmu_set_msr_bitmap(un > int i; > > /* Allow Read/Write PMU Counters MSR Directly. */ > - for ( i = 0; i < core2_counters.num; i++ ) > + for ( i = 0; i < core2_fix_counters.num; i++ ) > { > - clear_bit(msraddr_to_bitpos(core2_counters.msr[i]), msr_bitmap); > - clear_bit(msraddr_to_bitpos(core2_counters.msr[i]), > + clear_bit(msraddr_to_bitpos(core2_fix_counters.msr[i]), msr_bitmap); > + clear_bit(msraddr_to_bitpos(core2_fix_counters.msr[i]), > msr_bitmap + 0x800/BYTES_PER_LONG); > } > for ( i = 0; i < core2_get_pmc_count(); i++ ) > @@ -238,10 +238,10 @@ static void core2_vpmu_unset_msr_bitmap( > { > int i; > > - for ( i = 0; i < core2_counters.num; i++ ) > + for ( i = 0; i < core2_fix_counters.num; i++ ) > { > - set_bit(msraddr_to_bitpos(core2_counters.msr[i]), msr_bitmap); > - set_bit(msraddr_to_bitpos(core2_counters.msr[i]), > + set_bit(msraddr_to_bitpos(core2_fix_counters.msr[i]), msr_bitmap); > + set_bit(msraddr_to_bitpos(core2_fix_counters.msr[i]), > msr_bitmap + 0x800/BYTES_PER_LONG); > } > for ( i = 0; i < core2_get_pmc_count(); i++ ) > @@ -261,8 +261,8 @@ static inline void __core2_vpmu_save(str > int i; > struct core2_vpmu_context *core2_vpmu_cxt = vcpu_vpmu(v)->context; > > - for ( i = 0; i < core2_counters.num; i++ ) > - rdmsrl(core2_counters.msr[i], core2_vpmu_cxt->counters[i]); > + for ( i = 0; i < core2_fix_counters.num; i++ ) > + rdmsrl(core2_fix_counters.msr[i], core2_vpmu_cxt->fix_counters[i]); > for ( i = 0; i < core2_get_pmc_count(); i++ ) > rdmsrl(MSR_IA32_PERFCTR0+i, core2_vpmu_cxt->arch_msr_pair[i].counter); > core2_vpmu_cxt->hw_lapic_lvtpc = apic_read(APIC_LVTPC); > @@ -292,8 +292,8 @@ static inline void __core2_vpmu_load(str > int i; > struct core2_vpmu_context *core2_vpmu_cxt = vcpu_vpmu(v)->context; > > - for ( i = 0; i < core2_counters.num; i++ ) > - wrmsrl(core2_counters.msr[i], core2_vpmu_cxt->counters[i]); > + for ( i = 0; i < core2_fix_counters.num; i++ ) > + wrmsrl(core2_fix_counters.msr[i], core2_vpmu_cxt->fix_counters[i]); > for ( i = 0; i < core2_get_pmc_count(); i++ ) > wrmsrl(MSR_IA32_PERFCTR0+i, core2_vpmu_cxt->arch_msr_pair[i].counter); > > @@ -474,7 +474,7 @@ static int core2_vpmu_do_wrmsr(unsigned > > rdmsrl(MSR_CORE_PERF_FIXED_CTR_CTRL, non_global_ctrl); > global_ctrl = msr_content >> 32; > - for ( i = 0; i < 3; i++ ) > + for ( i = 0; i < core2_fix_counters.num; i++ ) > { > core2_vpmu_cxt->pmu_enable->fixed_ctr_enable[i] > (global_ctrl & 1) & ((non_global_ctrl & 0x3)? 1: 0); > @@ -486,7 +486,7 @@ static int core2_vpmu_do_wrmsr(unsigned > non_global_ctrl = msr_content; > vmx_read_guest_msr(MSR_CORE_PERF_GLOBAL_CTRL, &global_ctrl); > global_ctrl >>= 32; > - for ( i = 0; i < 3; i++ ) > + for ( i = 0; i < core2_fix_counters.num; i++ ) > { > core2_vpmu_cxt->pmu_enable->fixed_ctr_enable[i] > (global_ctrl & 1) & ((non_global_ctrl & 0x3)? 1: 0); > @@ -502,7 +502,7 @@ static int core2_vpmu_do_wrmsr(unsigned > (global_ctrl >> tmp) & (msr_content >> 22) & 1; > } > > - for ( i = 0; i < 3; i++ ) > + for ( i = 0; i < core2_fix_counters.num; i++ ) > pmu_enable |= core2_vpmu_cxt->pmu_enable->fixed_ctr_enable[i]; > for ( i = 0; i < core2_get_pmc_count(); i++ ) > pmu_enable |= core2_vpmu_cxt->pmu_enable->arch_pmc_enable[i]; > Index: xen-unstable.hg/xen/include/asm-x86/hvm/vmx/vpmu_core2.h > ==================================================================> --- xen-unstable.hg.orig/xen/include/asm-x86/hvm/vmx/vpmu_core2.h > +++ xen-unstable.hg/xen/include/asm-x86/hvm/vmx/vpmu_core2.h > @@ -23,6 +23,11 @@ > #ifndef __ASM_X86_HVM_VPMU_CORE_H_ > #define __ASM_X86_HVM_VPMU_CORE_H_ > > +/* Currently only 3 fixed counters are supported. */ > +#define VPMU_CORE2_NUM_FIXED 3 > +/* Currently only 3 Non-architectual Performance Control MSRs */ > +#define VPMU_CORE2_NUM_CTRLS 3 > + > struct arch_msr_pair { > u64 counter; > u64 control; > @@ -30,14 +35,14 @@ struct arch_msr_pair { > > struct core2_pmu_enable { > char ds_area_enable; > - char fixed_ctr_enable[3]; > + char fixed_ctr_enable[VPMU_CORE2_NUM_FIXED]; > char arch_pmc_enable[1]; > }; > > struct core2_vpmu_context { > struct core2_pmu_enable *pmu_enable; > - u64 counters[3]; > - u64 ctrls[3]; > + u64 fix_counters[VPMU_CORE2_NUM_FIXED]; > + u64 ctrls[VPMU_CORE2_NUM_CTRLS]; > u64 global_ovf_status; > u32 hw_lapic_lvtpc; > struct arch_msr_pair arch_msr_pair[1];-- Company details: http://ts.fujitsu.com/imprint.html
Konrad Rzeszutek Wilk
2013-Apr-05 13:34 UTC
Re: [PATCH 1/3] vpmu intel: Better names and replacing numerals with defines
On Thu, Apr 04, 2013 at 02:10:04PM +0200, Dietmar Hahn wrote:> Am Mittwoch 27 März 2013, 15:13:09 schrieb Dietmar Hahn: > > This patch renames core2_counters to core2_fix_counters for better > > understanding the code and subtitutes 2 numerals with defines in fixed counter > > handling. > > Thanks. > > Dietmar. > > > > > > Signed-off-by: Dietmar Hahn <dietmar.hahn@ts.fujitsu.com> > > Ping?Looks nice. Reviewed-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>> > > Index: xen-unstable.hg/xen/arch/x86/hvm/vmx/vpmu_core2.c > > ==================================================================> > --- xen-unstable.hg.orig/xen/arch/x86/hvm/vmx/vpmu_core2.c > > +++ xen-unstable.hg/xen/arch/x86/hvm/vmx/vpmu_core2.c > > @@ -101,7 +101,7 @@ static void handle_pmc_quirk(u64 msr_con > > } > > } > > > > -static const u32 core2_counters_msr[] = { > > +static const u32 core2_fix_counters_msr[] = { > > MSR_CORE_PERF_FIXED_CTR0, > > MSR_CORE_PERF_FIXED_CTR1, > > MSR_CORE_PERF_FIXED_CTR2 > > @@ -119,13 +119,13 @@ struct pmumsr { > > const u32 *msr; > > }; > > > > -static const struct pmumsr core2_counters = { > > - 3, > > - core2_counters_msr > > +static const struct pmumsr core2_fix_counters = { > > + VPMU_CORE2_NUM_FIXED, > > + core2_fix_counters_msr > > }; > > > > static const struct pmumsr core2_ctrls = { > > - 3, > > + VPMU_CORE2_NUM_CTRLS, > > core2_ctrls_msr > > }; > > static int arch_pmc_cnt; > > @@ -162,16 +162,16 @@ static int is_core2_vpmu_msr(u32 msr_ind > > { > > int i; > > > > - for ( i = 0; i < core2_counters.num; i++ ) > > + for ( i = 0; i < core2_fix_counters.num; i++ ) > > { > > - if ( core2_counters.msr[i] == msr_index ) > > + if ( core2_fix_counters.msr[i] == msr_index ) > > { > > *type = MSR_TYPE_COUNTER; > > *index = i; > > return 1; > > } > > } > > - > > + > > for ( i = 0; i < core2_ctrls.num; i++ ) > > { > > if ( core2_ctrls.msr[i] == msr_index ) > > @@ -214,10 +214,10 @@ static void core2_vpmu_set_msr_bitmap(un > > int i; > > > > /* Allow Read/Write PMU Counters MSR Directly. */ > > - for ( i = 0; i < core2_counters.num; i++ ) > > + for ( i = 0; i < core2_fix_counters.num; i++ ) > > { > > - clear_bit(msraddr_to_bitpos(core2_counters.msr[i]), msr_bitmap); > > - clear_bit(msraddr_to_bitpos(core2_counters.msr[i]), > > + clear_bit(msraddr_to_bitpos(core2_fix_counters.msr[i]), msr_bitmap); > > + clear_bit(msraddr_to_bitpos(core2_fix_counters.msr[i]), > > msr_bitmap + 0x800/BYTES_PER_LONG); > > } > > for ( i = 0; i < core2_get_pmc_count(); i++ ) > > @@ -238,10 +238,10 @@ static void core2_vpmu_unset_msr_bitmap( > > { > > int i; > > > > - for ( i = 0; i < core2_counters.num; i++ ) > > + for ( i = 0; i < core2_fix_counters.num; i++ ) > > { > > - set_bit(msraddr_to_bitpos(core2_counters.msr[i]), msr_bitmap); > > - set_bit(msraddr_to_bitpos(core2_counters.msr[i]), > > + set_bit(msraddr_to_bitpos(core2_fix_counters.msr[i]), msr_bitmap); > > + set_bit(msraddr_to_bitpos(core2_fix_counters.msr[i]), > > msr_bitmap + 0x800/BYTES_PER_LONG); > > } > > for ( i = 0; i < core2_get_pmc_count(); i++ ) > > @@ -261,8 +261,8 @@ static inline void __core2_vpmu_save(str > > int i; > > struct core2_vpmu_context *core2_vpmu_cxt = vcpu_vpmu(v)->context; > > > > - for ( i = 0; i < core2_counters.num; i++ ) > > - rdmsrl(core2_counters.msr[i], core2_vpmu_cxt->counters[i]); > > + for ( i = 0; i < core2_fix_counters.num; i++ ) > > + rdmsrl(core2_fix_counters.msr[i], core2_vpmu_cxt->fix_counters[i]); > > for ( i = 0; i < core2_get_pmc_count(); i++ ) > > rdmsrl(MSR_IA32_PERFCTR0+i, core2_vpmu_cxt->arch_msr_pair[i].counter); > > core2_vpmu_cxt->hw_lapic_lvtpc = apic_read(APIC_LVTPC); > > @@ -292,8 +292,8 @@ static inline void __core2_vpmu_load(str > > int i; > > struct core2_vpmu_context *core2_vpmu_cxt = vcpu_vpmu(v)->context; > > > > - for ( i = 0; i < core2_counters.num; i++ ) > > - wrmsrl(core2_counters.msr[i], core2_vpmu_cxt->counters[i]); > > + for ( i = 0; i < core2_fix_counters.num; i++ ) > > + wrmsrl(core2_fix_counters.msr[i], core2_vpmu_cxt->fix_counters[i]); > > for ( i = 0; i < core2_get_pmc_count(); i++ ) > > wrmsrl(MSR_IA32_PERFCTR0+i, core2_vpmu_cxt->arch_msr_pair[i].counter); > > > > @@ -474,7 +474,7 @@ static int core2_vpmu_do_wrmsr(unsigned > > > > rdmsrl(MSR_CORE_PERF_FIXED_CTR_CTRL, non_global_ctrl); > > global_ctrl = msr_content >> 32; > > - for ( i = 0; i < 3; i++ ) > > + for ( i = 0; i < core2_fix_counters.num; i++ ) > > { > > core2_vpmu_cxt->pmu_enable->fixed_ctr_enable[i] > > (global_ctrl & 1) & ((non_global_ctrl & 0x3)? 1: 0); > > @@ -486,7 +486,7 @@ static int core2_vpmu_do_wrmsr(unsigned > > non_global_ctrl = msr_content; > > vmx_read_guest_msr(MSR_CORE_PERF_GLOBAL_CTRL, &global_ctrl); > > global_ctrl >>= 32; > > - for ( i = 0; i < 3; i++ ) > > + for ( i = 0; i < core2_fix_counters.num; i++ ) > > { > > core2_vpmu_cxt->pmu_enable->fixed_ctr_enable[i] > > (global_ctrl & 1) & ((non_global_ctrl & 0x3)? 1: 0); > > @@ -502,7 +502,7 @@ static int core2_vpmu_do_wrmsr(unsigned > > (global_ctrl >> tmp) & (msr_content >> 22) & 1; > > } > > > > - for ( i = 0; i < 3; i++ ) > > + for ( i = 0; i < core2_fix_counters.num; i++ ) > > pmu_enable |= core2_vpmu_cxt->pmu_enable->fixed_ctr_enable[i]; > > for ( i = 0; i < core2_get_pmc_count(); i++ ) > > pmu_enable |= core2_vpmu_cxt->pmu_enable->arch_pmc_enable[i]; > > Index: xen-unstable.hg/xen/include/asm-x86/hvm/vmx/vpmu_core2.h > > ==================================================================> > --- xen-unstable.hg.orig/xen/include/asm-x86/hvm/vmx/vpmu_core2.h > > +++ xen-unstable.hg/xen/include/asm-x86/hvm/vmx/vpmu_core2.h > > @@ -23,6 +23,11 @@ > > #ifndef __ASM_X86_HVM_VPMU_CORE_H_ > > #define __ASM_X86_HVM_VPMU_CORE_H_ > > > > +/* Currently only 3 fixed counters are supported. */ > > +#define VPMU_CORE2_NUM_FIXED 3 > > +/* Currently only 3 Non-architectual Performance Control MSRs */ > > +#define VPMU_CORE2_NUM_CTRLS 3 > > + > > struct arch_msr_pair { > > u64 counter; > > u64 control; > > @@ -30,14 +35,14 @@ struct arch_msr_pair { > > > > struct core2_pmu_enable { > > char ds_area_enable; > > - char fixed_ctr_enable[3]; > > + char fixed_ctr_enable[VPMU_CORE2_NUM_FIXED]; > > char arch_pmc_enable[1]; > > }; > > > > struct core2_vpmu_context { > > struct core2_pmu_enable *pmu_enable; > > - u64 counters[3]; > > - u64 ctrls[3]; > > + u64 fix_counters[VPMU_CORE2_NUM_FIXED]; > > + u64 ctrls[VPMU_CORE2_NUM_CTRLS]; > > u64 global_ovf_status; > > u32 hw_lapic_lvtpc; > > struct arch_msr_pair arch_msr_pair[1]; > -- > Company details: http://ts.fujitsu.com/imprint.html > > _______________________________________________ > Xen-devel mailing list > Xen-devel@lists.xen.org > http://lists.xen.org/xen-devel >