Implement tsc adjust feature
IA32_TSC_ADJUST MSR is maintained separately for each logical processor.
A logical processor maintains and uses the IA32_TSC_ADJUST MSR as follows:
1). On RESET, the value of the IA32_TSC_ADJUST MSR is 0;
2). If an execution of WRMSR to the IA32_TIME_STAMP_COUNTER MSR adds (or
subtracts)
value X from the TSC, the logical processor also adds (or subtracts) value X
from the IA32_TSC_ADJUST MSR;
3). If an execution of WRMSR to the IA32_TSC_ADJUST MSR adds (or subtracts)
value X from that MSR, the logical processor also adds (or subtracts) value
X
from the TSC;
Signed-off-by: Liu, Jinsong <jinsong.liu@intel.com>
diff -r d5c677159abb xen/arch/x86/hvm/hvm.c
--- a/xen/arch/x86/hvm/hvm.c Thu Sep 20 21:50:56 2012 +0800
+++ b/xen/arch/x86/hvm/hvm.c Thu Sep 20 23:34:04 2012 +0800
@@ -244,6 +244,7 @@
void hvm_set_guest_tsc(struct vcpu *v, u64 guest_tsc)
{
uint64_t tsc;
+ uint64_t delta_tsc;
if ( v->domain->arch.vtsc )
{
@@ -255,10 +256,23 @@
rdtscll(tsc);
}
- v->arch.hvm_vcpu.cache_tsc_offset = guest_tsc - tsc;
+ delta_tsc = guest_tsc - tsc;
+
+ v->arch.hvm_vcpu.msr_tsc_adjust += delta_tsc
+ - v->arch.hvm_vcpu.cache_tsc_offset;
+ v->arch.hvm_vcpu.cache_tsc_offset = delta_tsc;
+
hvm_funcs.set_tsc_offset(v, v->arch.hvm_vcpu.cache_tsc_offset);
}
+void hvm_set_guest_tsc_adjust(struct vcpu *v, u64 tsc_adjust)
+{
+ v->arch.hvm_vcpu.cache_tsc_offset += tsc_adjust
+ - v->arch.hvm_vcpu.msr_tsc_adjust;
+ hvm_funcs.set_tsc_offset(v, v->arch.hvm_vcpu.cache_tsc_offset);
+ v->arch.hvm_vcpu.msr_tsc_adjust = tsc_adjust;
+}
+
u64 hvm_get_guest_tsc(struct vcpu *v)
{
uint64_t tsc;
@@ -277,6 +291,11 @@
return tsc + v->arch.hvm_vcpu.cache_tsc_offset;
}
+u64 hvm_get_guest_tsc_adjust(struct vcpu *v)
+{
+ return v->arch.hvm_vcpu.msr_tsc_adjust;
+}
+
void hvm_migrate_timers(struct vcpu *v)
{
rtc_migrate_timers(v);
@@ -2776,6 +2795,10 @@
*msr_content = hvm_get_guest_tsc(v);
break;
+ case MSR_IA32_TSC_ADJUST:
+ *msr_content = hvm_get_guest_tsc_adjust(v);
+ break;
+
case MSR_TSC_AUX:
*msr_content = hvm_msr_tsc_aux(v);
break;
@@ -2889,6 +2912,10 @@
hvm_set_guest_tsc(v, msr_content);
break;
+ case MSR_IA32_TSC_ADJUST:
+ hvm_set_guest_tsc_adjust(v, msr_content);
+ break;
+
case MSR_TSC_AUX:
v->arch.hvm_vcpu.msr_tsc_aux = (uint32_t)msr_content;
if ( cpu_has_rdtscp
@@ -3436,6 +3463,8 @@
v->domain->vcpu[0]->arch.hvm_vcpu.cache_tsc_offset;
hvm_funcs.set_tsc_offset(v, v->arch.hvm_vcpu.cache_tsc_offset);
+ v->arch.hvm_vcpu.msr_tsc_adjust = 0;
+
paging_update_paging_modes(v);
v->arch.flags |= TF_kernel_mode;
diff -r d5c677159abb xen/include/asm-x86/hvm/vcpu.h
--- a/xen/include/asm-x86/hvm/vcpu.h Thu Sep 20 21:50:56 2012 +0800
+++ b/xen/include/asm-x86/hvm/vcpu.h Thu Sep 20 23:34:04 2012 +0800
@@ -137,6 +137,7 @@
struct hvm_vcpu_asid n1asid;
u32 msr_tsc_aux;
+ u64 msr_tsc_adjust;
/* VPMU */
struct vpmu_struct vpmu;
diff -r d5c677159abb xen/include/asm-x86/msr-index.h
--- a/xen/include/asm-x86/msr-index.h Thu Sep 20 21:50:56 2012 +0800
+++ b/xen/include/asm-x86/msr-index.h Thu Sep 20 23:34:04 2012 +0800
@@ -284,6 +284,7 @@
#define MSR_IA32_PLATFORM_ID 0x00000017
#define MSR_IA32_EBL_CR_POWERON 0x0000002a
#define MSR_IA32_EBC_FREQUENCY_ID 0x0000002c
+#define MSR_IA32_TSC_ADJUST 0x0000003b
#define MSR_IA32_APICBASE 0x0000001b
#define MSR_IA32_APICBASE_BSP (1<<8)
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
http://lists.xen.org/xen-devel
>>> On 20.09.12 at 10:09, "Liu, Jinsong" <jinsong.liu@intel.com> wrote: > Implement tsc adjust featureThe title is a little misleading - with how it is, I would have expected it to make use of the new MSR particularly in synchronize_tsc_slave() (all other uses of write_tsc() should not be reached on modern CPUs). But for the purpose of HVM , the patch looks okay to me. Jan> IA32_TSC_ADJUST MSR is maintained separately for each logical processor. > A logical processor maintains and uses the IA32_TSC_ADJUST MSR as follows: > 1). On RESET, the value of the IA32_TSC_ADJUST MSR is 0; > 2). If an execution of WRMSR to the IA32_TIME_STAMP_COUNTER MSR adds (or > subtracts) > value X from the TSC, the logical processor also adds (or subtracts) > value X > from the IA32_TSC_ADJUST MSR; > 3). If an execution of WRMSR to the IA32_TSC_ADJUST MSR adds (or subtracts) > value X from that MSR, the logical processor also adds (or subtracts) > value X > from the TSC; > > Signed-off-by: Liu, Jinsong <jinsong.liu@intel.com> > > diff -r d5c677159abb xen/arch/x86/hvm/hvm.c > --- a/xen/arch/x86/hvm/hvm.c Thu Sep 20 21:50:56 2012 +0800 > +++ b/xen/arch/x86/hvm/hvm.c Thu Sep 20 23:34:04 2012 +0800 > @@ -244,6 +244,7 @@ > void hvm_set_guest_tsc(struct vcpu *v, u64 guest_tsc) > { > uint64_t tsc; > + uint64_t delta_tsc; > > if ( v->domain->arch.vtsc ) > { > @@ -255,10 +256,23 @@ > rdtscll(tsc); > } > > - v->arch.hvm_vcpu.cache_tsc_offset = guest_tsc - tsc; > + delta_tsc = guest_tsc - tsc; > + > + v->arch.hvm_vcpu.msr_tsc_adjust += delta_tsc > + - v->arch.hvm_vcpu.cache_tsc_offset; > + v->arch.hvm_vcpu.cache_tsc_offset = delta_tsc; > + > hvm_funcs.set_tsc_offset(v, v->arch.hvm_vcpu.cache_tsc_offset); > } > > +void hvm_set_guest_tsc_adjust(struct vcpu *v, u64 tsc_adjust) > +{ > + v->arch.hvm_vcpu.cache_tsc_offset += tsc_adjust > + - v->arch.hvm_vcpu.msr_tsc_adjust; > + hvm_funcs.set_tsc_offset(v, v->arch.hvm_vcpu.cache_tsc_offset); > + v->arch.hvm_vcpu.msr_tsc_adjust = tsc_adjust; > +} > + > u64 hvm_get_guest_tsc(struct vcpu *v) > { > uint64_t tsc; > @@ -277,6 +291,11 @@ > return tsc + v->arch.hvm_vcpu.cache_tsc_offset; > } > > +u64 hvm_get_guest_tsc_adjust(struct vcpu *v) > +{ > + return v->arch.hvm_vcpu.msr_tsc_adjust; > +} > + > void hvm_migrate_timers(struct vcpu *v) > { > rtc_migrate_timers(v); > @@ -2776,6 +2795,10 @@ > *msr_content = hvm_get_guest_tsc(v); > break; > > + case MSR_IA32_TSC_ADJUST: > + *msr_content = hvm_get_guest_tsc_adjust(v); > + break; > + > case MSR_TSC_AUX: > *msr_content = hvm_msr_tsc_aux(v); > break; > @@ -2889,6 +2912,10 @@ > hvm_set_guest_tsc(v, msr_content); > break; > > + case MSR_IA32_TSC_ADJUST: > + hvm_set_guest_tsc_adjust(v, msr_content); > + break; > + > case MSR_TSC_AUX: > v->arch.hvm_vcpu.msr_tsc_aux = (uint32_t)msr_content; > if ( cpu_has_rdtscp > @@ -3436,6 +3463,8 @@ > v->domain->vcpu[0]->arch.hvm_vcpu.cache_tsc_offset; > hvm_funcs.set_tsc_offset(v, v->arch.hvm_vcpu.cache_tsc_offset); > > + v->arch.hvm_vcpu.msr_tsc_adjust = 0; > + > paging_update_paging_modes(v); > > v->arch.flags |= TF_kernel_mode; > diff -r d5c677159abb xen/include/asm-x86/hvm/vcpu.h > --- a/xen/include/asm-x86/hvm/vcpu.h Thu Sep 20 21:50:56 2012 +0800 > +++ b/xen/include/asm-x86/hvm/vcpu.h Thu Sep 20 23:34:04 2012 +0800 > @@ -137,6 +137,7 @@ > struct hvm_vcpu_asid n1asid; > > u32 msr_tsc_aux; > + u64 msr_tsc_adjust; > > /* VPMU */ > struct vpmu_struct vpmu; > diff -r d5c677159abb xen/include/asm-x86/msr-index.h > --- a/xen/include/asm-x86/msr-index.h Thu Sep 20 21:50:56 2012 +0800 > +++ b/xen/include/asm-x86/msr-index.h Thu Sep 20 23:34:04 2012 +0800 > @@ -284,6 +284,7 @@ > #define MSR_IA32_PLATFORM_ID 0x00000017 > #define MSR_IA32_EBL_CR_POWERON 0x0000002a > #define MSR_IA32_EBC_FREQUENCY_ID 0x0000002c > +#define MSR_IA32_TSC_ADJUST 0x0000003b > > #define MSR_IA32_APICBASE 0x0000001b > #define MSR_IA32_APICBASE_BSP (1<<8)
Reasonably Related Threads
- [PATCH 0/3] tsc adjust implementation for hvm
- [hybrid]: hang in update_wall_time
- [PATCH v1 1/4] KVM/vmx: re-write the msr auto switch feature
- [PATCH V2] libxc, libxenstore: make the headers C++-friendlier
- [PATCH V3] libxc, libxenstore: make the headers C++-friendlier