Displaying 10 results from an estimated 10 matches for "cpufeat_mask".
2013 Sep 23
11
[PATCH v4 0/4] x86/HVM: miscellaneous improvements
The first and third patches are cleaned up versions of an earlier v3
submission by Yang.
1: Nested VMX: check VMX capability before read VMX related MSRs
2: VMX: clean up capability checks
3: Nested VMX: fix IA32_VMX_CR4_FIXED1 msr emulation
4: x86: make hvm_cpuid() tolerate NULL pointers
Signed-off-by: Jan Beulich <jbeulich@suse.com>
2012 Feb 28
3
[Patch] X86: expose HLE/RTM features to dom0
...u, Jinsong <jinsong.liu@intel.com>
diff -r 92e03310878f xen/arch/x86/traps.c
--- a/xen/arch/x86/traps.c Wed Feb 08 21:05:52 2012 +0800
+++ b/xen/arch/x86/traps.c Mon Feb 27 02:23:42 2012 +0800
@@ -857,9 +857,11 @@
case 0x00000007:
if ( regs->ecx == 0 )
b &= (cpufeat_mask(X86_FEATURE_BMI1) |
+ cpufeat_mask(X86_FEATURE_HLE) |
cpufeat_mask(X86_FEATURE_AVX2) |
cpufeat_mask(X86_FEATURE_BMI2) |
cpufeat_mask(X86_FEATURE_ERMS) |
+ cpufeat_mask(X86_FEATURE_RTM) |...
2013 Mar 12
0
[PATCH] vpmu intel: pass through cpuid bits when BTS is enabled
...ore2.c Mon Mar 11 16:13:42 2013 +0000
+++ b/xen/arch/x86/hvm/vmx/vpmu_core2.c Tue Mar 12 13:58:40 2013 +0100
@@ -607,6 +607,10 @@ static void core2_vpmu_do_cpuid(unsigned
{
/* Switch on the ''Debug Store'' feature in CPUID.EAX[1]:EDX[21] */
*edx |= cpufeat_mask(X86_FEATURE_DS);
+ if ( cpu_has(¤t_cpu_data, X86_FEATURE_DTES64) )
+ *ecx |= cpufeat_mask(X86_FEATURE_DTES64);
+ if ( cpu_has(¤t_cpu_data, X86_FEATURE_DSCPL) )
+ *ecx |= cpufeat_mask(X86_FEATURE_DSCPL);
}
}
}
-...
2011 Nov 24
0
[PATCH 6/6] X86: implement PCID/INVPCID for hvm
...&& nestedhvm_vcpu_in_guestmode(v) )
paging_update_nestedmode(v);
else
@@ -2409,10 +2430,18 @@ void hvm_cpuid(unsigned int input, unsig
if ( xsave_enabled(v) )
*ecx |= (v->arch.hvm_vcpu.guest_cr[4] & X86_CR4_OSXSAVE) ?
cpufeat_mask(X86_FEATURE_OSXSAVE) : 0;
+
+ /* Not expose PCID to non-hap hvm */
+ if ( !hap_enabled(d) )
+ *ecx &= ~cpufeat_mask(X86_FEATURE_PCID);
break;
case 0x7:
if ( (count == 0) && !cpu_has_smep )
*ebx &= ~cpufeat_mask(X86_FEATURE...
2013 Aug 23
2
[PATCH] Nested VMX: Allow to set CR4.OSXSAVE if guest has xsave feature
...cept(unsigned int msr, u64 *msr_content)
data = X86_CR4_VMXE;
break;
case MSR_IA32_VMX_CR4_FIXED1:
+ data = 0x267ff;
+ /* Allow to set OSXSAVE if guest has xsave feature. */
+ hvm_cpuid(0x1, &eax, &ebx, &ecx, &edx);
+ if ( ecx & cpufeat_mask(X86_FEATURE_XSAVE) )
+ data |= X86_CR4_OSXSAVE;
/* allow 0-settings except SMXE */
- data = 0x267ff & ~X86_CR4_SMXE;
+ data &= ~X86_CR4_SMXE;
break;
case MSR_IA32_VMX_MISC:
/* Do not support CR3-target feature now */
--
1.7.1
2013 Dec 02
0
[PATCH v4 3/7] X86: MPX IA32_BNDCFGS msr handle
.../hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index 9c88c73..0f7178b 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -2905,6 +2905,12 @@ void hvm_cpuid(unsigned int input, unsigned int *eax, unsigned int *ebx,
if ( (count == 0) && !cpu_has_smep )
*ebx &= ~cpufeat_mask(X86_FEATURE_SMEP);
+ /* Don''t expose MPX to hvm when VMX support is not available */
+ if ( (count == 0) &&
+ (!(vmx_vmexit_control & VM_EXIT_CLEAR_BNDCFGS) ||
+ !(vmx_vmentry_control & VM_ENTRY_LOAD_BNDCFGS)) )
+ *ebx &am...
2013 Jan 03
2
[PATCH V4] mem_event: Add support for MEM_EVENT_REASON_MSR
...m/hvm.c
--- a/xen/arch/x86/hvm/hvm.c Fri Dec 21 17:05:38 2012 +0000
+++ b/xen/arch/x86/hvm/hvm.c Thu Jan 03 12:05:13 2013 +0200
@@ -2927,6 +2927,8 @@ int hvm_msr_write_intercept(unsigned int
hvm_cpuid(1, &cpuid[0], &cpuid[1], &cpuid[2], &cpuid[3]);
mtrr = !!(cpuid[3] & cpufeat_mask(X86_FEATURE_MTRR));
+ hvm_memory_event_msr(msr, msr_content);
+
switch ( msr )
{
case MSR_EFER:
@@ -3862,6 +3864,7 @@ long do_hvm_op(unsigned long op, XEN_GUE
break;
case HVM_PARAM_MEMORY_EVENT_INT3:
case HVM_PARAM_MEMORY_EVENT_SINGLE_S...
2012 Sep 11
0
[PATCH 1/3] x86/hvm: don't use indirect calls without need
...uid_intercept(
{
case 0x80000001:
/* SYSCALL is visible iff running in long mode. */
- hvm_get_segment_register(v, x86_seg_cs, &cs);
+ vmx_get_segment_register(v, x86_seg_cs, &cs);
if ( cs.attr.fields.l )
*edx |= cpufeat_mask(X86_FEATURE_SYSCALL);
else
--- a/xen/arch/x86/hvm/vmx/vvmx.c
+++ b/xen/arch/x86/hvm/vmx/vvmx.c
@@ -291,8 +291,6 @@ static int vmx_inst_check_privilege(stru
struct vcpu *v = current;
struct segment_register cs;
- hvm_get_segment_register(v, x86_seg_cs, &cs);
-
if...
2012 Dec 20
4
[PATCH V2] mem_event: Add support for MEM_EVENT_REASON_MSR
...m/hvm.c
--- a/xen/arch/x86/hvm/hvm.c Tue Dec 18 18:16:52 2012 +0000
+++ b/xen/arch/x86/hvm/hvm.c Thu Dec 20 14:52:52 2012 +0200
@@ -2927,6 +2927,8 @@ int hvm_msr_write_intercept(unsigned int
hvm_cpuid(1, &cpuid[0], &cpuid[1], &cpuid[2], &cpuid[3]);
mtrr = !!(cpuid[3] & cpufeat_mask(X86_FEATURE_MTRR));
+ hvm_memory_event_msr(msr, msr_content);
+
switch ( msr )
{
case MSR_EFER:
@@ -3857,6 +3859,7 @@ long do_hvm_op(unsigned long op, XEN_GUE
case HVM_PARAM_MEMORY_EVENT_CR0:
case HVM_PARAM_MEMORY_EVENT_CR3:
case HVM_PARAM_...
2011 Aug 15
36
expose MWAIT to dom0
There''re basically two methods to enter a given C-state: legacy (hlt + I/O read),
and native(using mwait). MWAIT is always preferred when both underlying CPU
and OS support, which is a more efficient way to conduct C-state transition.
Xen PM relies on Dom0 to parse ACPI Cx/Px information, which involves one
step to notify BIOS about a set of capabilities supported by OSPM. One capability