Displaying 20 results from an estimated 22 matches for "x86_feature_mtrr".
2013 Sep 23
11
[PATCH v4 0/4] x86/HVM: miscellaneous improvements
The first and third patches are cleaned up versions of an earlier v3
submission by Yang.
1: Nested VMX: check VMX capability before read VMX related MSRs
2: VMX: clean up capability checks
3: Nested VMX: fix IA32_VMX_CR4_FIXED1 msr emulation
4: x86: make hvm_cpuid() tolerate NULL pointers
Signed-off-by: Jan Beulich <jbeulich@suse.com>
2007 Feb 01
0
[PATCH] hide RDTSCP feature flag from PV guests
...====================
--- 2007-01-16.orig/xen/arch/x86/traps.c 2007-01-15 09:10:11.000000000 +0100
+++ 2007-01-16/xen/arch/x86/traps.c 2007-02-01 17:25:28.000000000 +0100
@@ -597,6 +597,11 @@ static int emulate_forced_invalid_op(str
if ( !IS_PRIV(current->domain) )
clear_bit(X86_FEATURE_MTRR, &d);
}
+ else if ( regs->eax == 0x80000001 )
+ {
+ /* Modify Feature Information. */
+ clear_bit(X86_FEATURE_RDTSCP % 32, &d);
+ }
else
{
(void)cpuid_hypervisor_leaves(regs->eax, &a, &b, &c, &d);
Index: 2007-01-16/xen/inc...
2013 Dec 13
0
[PATCH v2] pvh: disable MTRR feature on cpuid for Dom0
.../traps.c
+++ b/xen/arch/x86/traps.c
@@ -796,6 +796,8 @@ void pv_cpuid(struct cpu_user_regs *regs)
__clear_bit(X86_FEATURE_DS, &d);
__clear_bit(X86_FEATURE_ACC, &d);
__clear_bit(X86_FEATURE_PBE, &d);
+ if ( is_pvh_vcpu(current) )
+ __clear_bit(X86_FEATURE_MTRR, &d);
__clear_bit(X86_FEATURE_DTES64 % 32, &c);
__clear_bit(X86_FEATURE_MWAIT % 32, &c);
--
1.7.7.5 (Apple Git-26)
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
http://lists.xen.org/xen-devel
2013 Jan 03
2
[PATCH V4] mem_event: Add support for MEM_EVENT_REASON_MSR
.../xen/arch/x86/hvm/hvm.c Fri Dec 21 17:05:38 2012 +0000
+++ b/xen/arch/x86/hvm/hvm.c Thu Jan 03 12:05:13 2013 +0200
@@ -2927,6 +2927,8 @@ int hvm_msr_write_intercept(unsigned int
hvm_cpuid(1, &cpuid[0], &cpuid[1], &cpuid[2], &cpuid[3]);
mtrr = !!(cpuid[3] & cpufeat_mask(X86_FEATURE_MTRR));
+ hvm_memory_event_msr(msr, msr_content);
+
switch ( msr )
{
case MSR_EFER:
@@ -3862,6 +3864,7 @@ long do_hvm_op(unsigned long op, XEN_GUE
break;
case HVM_PARAM_MEMORY_EVENT_INT3:
case HVM_PARAM_MEMORY_EVENT_SINGLE_STEP:
+...
2007 Apr 18
0
[PATCH 1/9] Vmi timer fixes round two.patch
...ocal_irq_save(flags);
activate_vmi();
-#ifdef CONFIG_SMP
+
+#ifdef CONFIG_X86_IO_APIC
no_timer_check = 1;
#endif
+
local_irq_restore(flags & X86_EFLAGS_IF);
}
@@ -942,7 +944,8 @@ static int __init parse_vmi(char *arg)
} else if (!strcmp(arg, "disable_mtrr")) {
clear_bit(X86_FEATURE_MTRR, boot_cpu_data.x86_capability);
disable_mtrr = 1;
- }
+ } else if (!strcmp(arg, "disable_noidle"))
+ disable_noidle = 1;
return 0;
}
diff -r f62ebe3ba01c arch/i386/kernel/vmitime.c
--- a/arch/i386/kernel/vmitime.c Tue Feb 27 14:01:28 2007 -0800
+++ b/arch/i386/kernel/vmitime.c Tu...
2007 Apr 18
0
[PATCH 1/9] Vmi timer fixes round two.patch
...ocal_irq_save(flags);
activate_vmi();
-#ifdef CONFIG_SMP
+
+#ifdef CONFIG_X86_IO_APIC
no_timer_check = 1;
#endif
+
local_irq_restore(flags & X86_EFLAGS_IF);
}
@@ -942,7 +944,8 @@ static int __init parse_vmi(char *arg)
} else if (!strcmp(arg, "disable_mtrr")) {
clear_bit(X86_FEATURE_MTRR, boot_cpu_data.x86_capability);
disable_mtrr = 1;
- }
+ } else if (!strcmp(arg, "disable_noidle"))
+ disable_noidle = 1;
return 0;
}
diff -r f62ebe3ba01c arch/i386/kernel/vmitime.c
--- a/arch/i386/kernel/vmitime.c Tue Feb 27 14:01:28 2007 -0800
+++ b/arch/i386/kernel/vmitime.c Tu...
2012 May 22
20
[PATCH] RFC: Linux: disable APERF/MPERF feature in PV kernels
Hi,
while testing some APERF/MPERF semantics I discovered that this feature
is enabled in Xen Dom0, but is not reliable.
The Linux kernel''s scheduler uses this feature if it sees the CPUID bit,
leading to costly RDMSR traps (a few 100,000s during a kernel compile)
and bogus values due to VCPU migration during the measurement.
The attached patch explicitly disables this CPU capability
2012 Dec 20
4
[PATCH V2] mem_event: Add support for MEM_EVENT_REASON_MSR
.../xen/arch/x86/hvm/hvm.c Tue Dec 18 18:16:52 2012 +0000
+++ b/xen/arch/x86/hvm/hvm.c Thu Dec 20 14:52:52 2012 +0200
@@ -2927,6 +2927,8 @@ int hvm_msr_write_intercept(unsigned int
hvm_cpuid(1, &cpuid[0], &cpuid[1], &cpuid[2], &cpuid[3]);
mtrr = !!(cpuid[3] & cpufeat_mask(X86_FEATURE_MTRR));
+ hvm_memory_event_msr(msr, msr_content);
+
switch ( msr )
{
case MSR_EFER:
@@ -3857,6 +3859,7 @@ long do_hvm_op(unsigned long op, XEN_GUE
case HVM_PARAM_MEMORY_EVENT_CR0:
case HVM_PARAM_MEMORY_EVENT_CR3:
case HVM_PARAM_MEMORY_EVENT_CR4:...
2007 Apr 18
0
[PATCH 8/9] Vmi apic ops.diff
...This is virtual hardware; timer routing is wired correctly */
no_timer_check = 1;
#endif
- no_sync_cmos_clock = 1;
-
local_irq_restore(flags & X86_EFLAGS_IF);
}
@@ -960,6 +975,9 @@ static int __init parse_vmi(char *arg)
} else if (!strcmp(arg, "disable_mtrr")) {
clear_bit(X86_FEATURE_MTRR, boot_cpu_data.x86_capability);
disable_mtrr = 1;
+ } else if (!strcmp(arg, "disable_timer")) {
+ disable_vmi_timer = 1;
+ disable_noidle = 1;
} else if (!strcmp(arg, "disable_noidle"))
disable_noidle = 1;
return 0;
diff -r 0ba8434a5c7e include/asm-i386/vmi.h
--- a/i...
2007 Apr 18
0
[PATCH 8/9] Vmi apic ops.diff
...This is virtual hardware; timer routing is wired correctly */
no_timer_check = 1;
#endif
- no_sync_cmos_clock = 1;
-
local_irq_restore(flags & X86_EFLAGS_IF);
}
@@ -960,6 +975,9 @@ static int __init parse_vmi(char *arg)
} else if (!strcmp(arg, "disable_mtrr")) {
clear_bit(X86_FEATURE_MTRR, boot_cpu_data.x86_capability);
disable_mtrr = 1;
+ } else if (!strcmp(arg, "disable_timer")) {
+ disable_vmi_timer = 1;
+ disable_noidle = 1;
} else if (!strcmp(arg, "disable_noidle"))
disable_noidle = 1;
return 0;
diff -r 0ba8434a5c7e include/asm-i386/vmi.h
--- a/i...
2007 Apr 18
0
[PATCH 4/5] Vmi.patch
...t;r" (vmi_ops.cpuid));
+ if (override) {
+ if (disable_pse)
+ *edx &= ~X86_FEATURE_PSE;
+ if (disable_pge)
+ *edx &= ~X86_FEATURE_PGE;
+ if (disable_sep)
+ *edx &= ~X86_FEATURE_SEP;
+ if (disable_tsc)
+ *edx &= ~X86_FEATURE_TSC;
+ if (disable_mtrr)
+ *edx &= ~X86_FEATURE_MTRR;
+ }
+}
+
+static inline void vmi_maybe_load_tls(struct desc_struct *gdt, int nr, struct desc_struct *new)
+{
+ if (gdt[nr].a != new->a || gdt[nr].b != new->b)
+ write_gdt_entry(gdt, nr, new->a, new->b);
+}
+
+static fastcall void vmi_load_tls(struct thread_struct *t, unsigned int cpu)...
2007 Apr 18
0
[PATCH 4/5] Vmi.patch
...t;r" (vmi_ops.cpuid));
+ if (override) {
+ if (disable_pse)
+ *edx &= ~X86_FEATURE_PSE;
+ if (disable_pge)
+ *edx &= ~X86_FEATURE_PGE;
+ if (disable_sep)
+ *edx &= ~X86_FEATURE_SEP;
+ if (disable_tsc)
+ *edx &= ~X86_FEATURE_TSC;
+ if (disable_mtrr)
+ *edx &= ~X86_FEATURE_MTRR;
+ }
+}
+
+static inline void vmi_maybe_load_tls(struct desc_struct *gdt, int nr, struct desc_struct *new)
+{
+ if (gdt[nr].a != new->a || gdt[nr].b != new->b)
+ write_gdt_entry(gdt, nr, new->a, new->b);
+}
+
+static fastcall void vmi_load_tls(struct thread_struct *t, unsigned int cpu)...
2007 Apr 18
0
[PATCH 5/6] VMI backend for paravirt-ops
...t;r" (vmi_ops.cpuid));
+ if (override) {
+ if (disable_pse)
+ *edx &= ~X86_FEATURE_PSE;
+ if (disable_pge)
+ *edx &= ~X86_FEATURE_PGE;
+ if (disable_sep)
+ *edx &= ~X86_FEATURE_SEP;
+ if (disable_tsc)
+ *edx &= ~X86_FEATURE_TSC;
+ if (disable_mtrr)
+ *edx &= ~X86_FEATURE_MTRR;
+ }
+}
+
+static inline void vmi_maybe_load_tls(struct desc_struct *gdt, int nr, struct desc_struct *new)
+{
+ if (gdt[nr].a != new->a || gdt[nr].b != new->b)
+ write_gdt_entry(gdt, nr, new->a, new->b);
+}
+
+static fastcall void vmi_load_tls(struct thread_struct *t, unsigned int cpu)...
2007 Apr 18
0
[PATCH 5/6] VMI backend for paravirt-ops
...t;r" (vmi_ops.cpuid));
+ if (override) {
+ if (disable_pse)
+ *edx &= ~X86_FEATURE_PSE;
+ if (disable_pge)
+ *edx &= ~X86_FEATURE_PGE;
+ if (disable_sep)
+ *edx &= ~X86_FEATURE_SEP;
+ if (disable_tsc)
+ *edx &= ~X86_FEATURE_TSC;
+ if (disable_mtrr)
+ *edx &= ~X86_FEATURE_MTRR;
+ }
+}
+
+static inline void vmi_maybe_load_tls(struct desc_struct *gdt, int nr, struct desc_struct *new)
+{
+ if (gdt[nr].a != new->a || gdt[nr].b != new->b)
+ write_gdt_entry(gdt, nr, new->a, new->b);
+}
+
+static fastcall void vmi_load_tls(struct thread_struct *t, unsigned int cpu)...
2007 Aug 08
2
[PATCH] x86-64: syscall/sysenter support for 32-bit apps
...clear_bit(X86_FEATURE_PGE, &d);
+ if ( !cpu_has_sep )
+ clear_bit(X86_FEATURE_SEP, &d);
+#ifdef __i386__
if ( !supervisor_mode_kernel )
clear_bit(X86_FEATURE_SEP, &d);
+#endif
if ( !IS_PRIV(current->domain) )
clear_bit(X86_FEATURE_MTRR, &d);
}
else if ( regs->eax == 0x80000001 )
{
/* Modify Feature Information. */
- if ( is_pv_32bit_vcpu(current) )
- clear_bit(X86_FEATURE_SYSCALL % 32, &d);
+#ifdef __i386__
+ clear_bit(X86_FEATURE_SYSCALL % 32, &d);
+#endif...
2010 Aug 23
1
Removing VMI kernel support from 2.6.37
..."r" (vmi_ops.cpuid));
- if (override) {
- if (disable_pse)
- *dx &= ~X86_FEATURE_PSE;
- if (disable_pge)
- *dx &= ~X86_FEATURE_PGE;
- if (disable_sep)
- *dx &= ~X86_FEATURE_SEP;
- if (disable_tsc)
- *dx &= ~X86_FEATURE_TSC;
- if (disable_mtrr)
- *dx &= ~X86_FEATURE_MTRR;
- }
-}
-
-static inline void vmi_maybe_load_tls(struct desc_struct *gdt, int nr, struct desc_struct *new)
-{
- if (gdt[nr].a != new->a || gdt[nr].b != new->b)
- write_gdt_entry(gdt, nr, new, 0);
-}
-
-static void vmi_load_tls(struct thread_struct *t, unsigned int cpu)
-{
- struct desc_struc...
2010 Aug 23
1
Removing VMI kernel support from 2.6.37
..."r" (vmi_ops.cpuid));
- if (override) {
- if (disable_pse)
- *dx &= ~X86_FEATURE_PSE;
- if (disable_pge)
- *dx &= ~X86_FEATURE_PGE;
- if (disable_sep)
- *dx &= ~X86_FEATURE_SEP;
- if (disable_tsc)
- *dx &= ~X86_FEATURE_TSC;
- if (disable_mtrr)
- *dx &= ~X86_FEATURE_MTRR;
- }
-}
-
-static inline void vmi_maybe_load_tls(struct desc_struct *gdt, int nr, struct desc_struct *new)
-{
- if (gdt[nr].a != new->a || gdt[nr].b != new->b)
- write_gdt_entry(gdt, nr, new, 0);
-}
-
-static void vmi_load_tls(struct thread_struct *t, unsigned int cpu)
-{
- struct desc_struc...
2007 Apr 18
1
[RFC/PATCH LGUEST X86_64 03/13] lguest64 core
...+ /* We don't have any features! */
+ clear_bit(X86_FEATURE_VME, features);
+ clear_bit(X86_FEATURE_DE, features);
+ clear_bit(X86_FEATURE_PSE, features);
+ clear_bit(X86_FEATURE_PAE, features);
+ clear_bit(X86_FEATURE_SEP, features);
+ clear_bit(X86_FEATURE_APIC, features);
+ clear_bit(X86_FEATURE_MTRR, features);
+ /* No MWAIT, either */
+ clear_bit(3, excap);
+ }
+}
+
+static unsigned long current_cr3;
+static void lguest_write_cr3(unsigned long cr3)
+{
+ hcall(LHCALL_NEW_PGTABLE, cr3, 0, 0);
+ current_cr3 = cr3;
+}
+
+static u64 lguest_read_msr(unsigned int msr, int *err)
+{
+ unsigned long...
2007 Apr 18
1
[RFC/PATCH LGUEST X86_64 03/13] lguest64 core
...+ /* We don't have any features! */
+ clear_bit(X86_FEATURE_VME, features);
+ clear_bit(X86_FEATURE_DE, features);
+ clear_bit(X86_FEATURE_PSE, features);
+ clear_bit(X86_FEATURE_PAE, features);
+ clear_bit(X86_FEATURE_SEP, features);
+ clear_bit(X86_FEATURE_APIC, features);
+ clear_bit(X86_FEATURE_MTRR, features);
+ /* No MWAIT, either */
+ clear_bit(3, excap);
+ }
+}
+
+static unsigned long current_cr3;
+static void lguest_write_cr3(unsigned long cr3)
+{
+ hcall(LHCALL_NEW_PGTABLE, cr3, 0, 0);
+ current_cr3 = cr3;
+}
+
+static u64 lguest_read_msr(unsigned int msr, int *err)
+{
+ unsigned long...
2008 Nov 13
69
[PATCH 00 of 38] xen: add more Xen dom0 support
Hi Ingo,
Here''s the chunk of patches to add Xen Dom0 support (it''s probably
worth creating a new xen/dom0 topic branch for it).
A dom0 Xen domain is basically the same as a normal domU domain, but
it has extra privileges to directly access hardware. There are two
issues to deal with:
- translating to and from the domain''s pseudo-physical addresses and
real machine