Displaying 20 results from an estimated 53 matches for "wrmsr_safe".
2007 Dec 04
10
[PATCH 0/10] Integrate msr.h
Hello,
This series of patches integrates msr.h header.
What it really does, is a series of steps to allow us
to get rid of duplicate code between i386 and x86_64 versions
With this done, achieving paravirt for x86_64 gets really easy,
just a couple of extra code.
The first patch was already sent a while ago, but was not yet pushed
to any tree , to my knowledge. So it is sent again. Also,
2007 Dec 04
10
[PATCH 0/10] Integrate msr.h
Hello,
This series of patches integrates msr.h header.
What it really does, is a series of steps to allow us
to get rid of duplicate code between i386 and x86_64 versions
With this done, achieving paravirt for x86_64 gets really easy,
just a couple of extra code.
The first patch was already sent a while ago, but was not yet pushed
to any tree , to my knowledge. So it is sent again. Also,
2007 Aug 09
0
[PATCH] x86-64: pv wrmsr emulation fix
...break;
case 0x30: /* WRMSR */
+ data = regs->eax;
+ res = ((u64)regs->edx << 32) | data;
switch ( regs->ecx )
{
#ifdef CONFIG_X86_64
@@ -1704,24 +1706,21 @@ static int emulate_privileged_op(struct
goto fail;
if ( wrmsr_safe(MSR_FS_BASE, regs->eax, regs->edx) )
goto fail;
- v->arch.guest_context.fs_base =
- ((u64)regs->edx << 32) | regs->eax;
+ v->arch.guest_context.fs_base = res;
break;
case MSR_GS_BASE:
if...
2007 Apr 18
0
[RFC/PATCH PV_OPS X86_64 02/17] paravirt_ops - msr
...ccess to machine-specific registers (available on 586 and better only)
* Note: the rd* operations modify the parameters directly (without using
@@ -28,37 +84,18 @@
#define wrmsrl(msr,val) wrmsr(msr,(__u32)((__u64)(val)),((__u64)(val))>>32)
-/* wrmsr with exception handling */
-#define wrmsr_safe(msr,a,b) ({ int ret__; \
- asm volatile("2: wrmsr ; xorl %0,%0\n" \
- "1:\n\t" \
- ".section .fixup,\"ax\"\n\t" \
- "3: movl %4,%0 ; jmp 1b\n\t" \
- ".previous\n\t" \
- ".section __ex_table...
2007 Apr 18
0
[RFC/PATCH PV_OPS X86_64 02/17] paravirt_ops - msr
...ccess to machine-specific registers (available on 586 and better only)
* Note: the rd* operations modify the parameters directly (without using
@@ -28,37 +84,18 @@
#define wrmsrl(msr,val) wrmsr(msr,(__u32)((__u64)(val)),((__u64)(val))>>32)
-/* wrmsr with exception handling */
-#define wrmsr_safe(msr,a,b) ({ int ret__; \
- asm volatile("2: wrmsr ; xorl %0,%0\n" \
- "1:\n\t" \
- ".section .fixup,\"ax\"\n\t" \
- "3: movl %4,%0 ; jmp 1b\n\t" \
- ".previous\n\t" \
- ".section __ex_table...
2009 Feb 26
5
[PATCH 4/4] ACPI: Enable THERM_CONTROL MSR write for dom0 even cpufreq=xen
...ged_op(struct
case MSR_IA32_MPERF:
case MSR_IA32_APERF:
case MSR_IA32_PERF_CTL:
+ if ( boot_cpu_data.x86_vendor != X86_VENDOR_INTEL )
+ goto fail;
+ if ( !is_cpufreq_controller(v->domain) )
+ break;
+ if ( wrmsr_safe(regs->ecx, eax, edx) != 0 )
+ goto fail;
+ break;
case MSR_IA32_THERM_CONTROL:
if ( boot_cpu_data.x86_vendor != X86_VENDOR_INTEL )
goto fail;
- if ( !is_cpufreq_controller(v->domain) )
+ if ( d->domain_i...
2007 Apr 18
2
[RFC, PATCH 17/24] i386 Vmi msr patch
Fairly straightforward code motion of MSR / TSC / PMC accessors
to the sub-arch level. Note that rdmsr/wrmsr_safe functions are
not moved; Linux relies on the fault behavior here in the event
that certain MSRs are not supported on hardware, and combining
this with a VMI wrapper is overly complicated. The instructions
are virtualizable with trap and emulate, not on critical code
paths, and only used as part of...
2007 Apr 18
2
[RFC, PATCH 17/24] i386 Vmi msr patch
Fairly straightforward code motion of MSR / TSC / PMC accessors
to the sub-arch level. Note that rdmsr/wrmsr_safe functions are
not moved; Linux relies on the fault behavior here in the event
that certain MSRs are not supported on hardware, and combining
this with a VMI wrapper is overly complicated. The instructions
are virtualizable with trap and emulate, not on critical code
paths, and only used as part of...
2010 May 04
0
[PATCH] svm: support EFER.LMSLE for guests
.../* Initialize core''s ASID handling. */
svm_asid_init(c);
+#ifdef __x86_64__
+ /*
+ * Check whether EFER.LMSLE can be written.
+ * Unfortunately there''s no feature bit defined for this.
+ */
+ eax = read_efer();
+ edx = read_efer() >> 32;
+ if ( wrmsr_safe(MSR_EFER, eax | EFER_LMSLE, edx) == 0 )
+ rdmsr(MSR_EFER, eax, edx);
+ if ( eax & EFER_LMSLE )
+ {
+ if ( c == &boot_cpu_data )
+ cpu_has_lmsl = 1;
+ wrmsr(MSR_EFER, eax ^ EFER_LMSLE, edx);
+ }
+ else
+ {
+ if ( cpu_has_lmsl )
+...
2010 Mar 02
2
Intel: Overly restrictive test for availablility of CPUID masking MSRs?
Hi,
http://xenbits.xensource.com/xen-unstable.hg?rev/aab9fbd6ffa0 from
http://lists.xensource.com/archives/html/xen-devel/2008-07/msg00518.html
restricts the CPUID masking feature to model 0x1d or model 0x17 with
stepping >=4:
if (!((model == 0x1d) || ((model == 0x17) && ((eax & 0xf) >=
4)))) {
/* fail ... */
A Xeon E5520 which is supposed to
2007 Apr 18
3
What do we need to do to hit 2.6.19?
Chris said, and I agree, that we should try to get the basic PV-ops
infrastructure stuff into 2.6.19. To me this means being able to run a
kernel native with CONFIG_PARAVIRT, and see little or no functional or
performance degradation. What do we need to do to get there:
* fix the slowdown bug, which seems to be something to do with
rdmsr/wrmsr on SMP systems
* work out how to
2007 Apr 18
3
What do we need to do to hit 2.6.19?
Chris said, and I agree, that we should try to get the basic PV-ops
infrastructure stuff into 2.6.19. To me this means being able to run a
kernel native with CONFIG_PARAVIRT, and see little or no functional or
performance degradation. What do we need to do to get there:
* fix the slowdown bug, which seems to be something to do with
rdmsr/wrmsr on SMP systems
* work out how to
2007 Aug 09
1
[PATCH] svm: allow guest to use EFER.FFXSE and EFER.LMSLE
.../* Initialize core''s ASID handling. */
svm_asid_init(c);
+#ifdef __x86_64__
+ /*
+ * Check whether EFER.LMSLE can be written.
+ * Unfortunately there''s no feature bit defined for this.
+ */
+ eax = read_efer();
+ edx = read_efer() >> 32;
+ if ( wrmsr_safe(MSR_EFER, eax | EFER_LMSLE, edx) == 0 )
+ rdmsr(MSR_EFER, eax, edx);
+ if ( !(eax & EFER_LMSLE) )
+ cpu_has_lmsl = 0;
+ else
+ wrmsr(MSR_EFER, eax ^ EFER_LMSLE, edx);
+#endif
+
if ( cpu != 0 )
return 1;
@@ -1131,8 +1154,6 @@ static void svm_vmexit_do_c...
2015 Jun 17
0
[PATCH v3 03/18] x86/tsc/paravirt: Remove the read_tsc and read_tscp paravirt hooks
...ng _val = native_read_tscp(&(aux)); \
> @@ -202,8 +204,6 @@ do { \
>
> #define rdtscpll(val, aux) (val) = native_read_tscp(&(aux))
>
> -#endif /* !CONFIG_PARAVIRT */
> -
> /*
> * 64-bit version of wrmsr_safe():
> */
> diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h
> index d143bfad45d7..c2be0375bcad 100644
> --- a/arch/x86/include/asm/paravirt.h
> +++ b/arch/x86/include/asm/paravirt.h
> @@ -174,19 +174,6 @@ static inline int rdmsrl_safe(unsigned msr, u...
2015 Jun 17
0
[PATCH v3 03/18] x86/tsc/paravirt: Remove the read_tsc and read_tscp paravirt hooks
...ng _val = native_read_tscp(&(aux)); \
> @@ -202,8 +204,6 @@ do { \
>
> #define rdtscpll(val, aux) (val) = native_read_tscp(&(aux))
>
> -#endif /* !CONFIG_PARAVIRT */
> -
> /*
> * 64-bit version of wrmsr_safe():
> */
> diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h
> index d143bfad45d7..c2be0375bcad 100644
> --- a/arch/x86/include/asm/paravirt.h
> +++ b/arch/x86/include/asm/paravirt.h
> @@ -174,19 +174,6 @@ static inline int rdmsrl_safe(unsigned msr, u...
2013 Oct 10
10
[PATCH 0/4] x86: XSA-67 follow-up
1: correct LDT checks
2: add address validity check to guest_map_l1e()
3: use {rd,wr}{fs,gs}base when available
4: check for canonical address before doing page walks
Signed-off-by: Jan Beulich <jbeulich@suse.com>
2007 Apr 18
0
[PATCH] paravirt_ops x86_64 , take 2
...;=c" (aux))
-
-#define rdtscpll(val, aux) do { \
- unsigned long __a, __d; \
- asm volatile (".byte 0x0f,0x01,0xf9" : "=a" (__a), "=d" (__d), "=c" (aux)); \
- (val) = (__d << 32) | __a; \
-} while (0)
-
#define checking_wrmsrl(msr,val) wrmsr_safe(msr,(u32)(val),(u32)((val)>>32))
#define write_tsc(val1,val2) wrmsr(0x10, val1, val2)
@@ -125,6 +116,7 @@ static inline void cpuid(unsigned int op
*eax = op;
__cpuid(eax, ebx, ecx, edx);
}
+
/* Some CPUID calls want 'count' to be placed in ecx */
static inline void cpuid_cou...
2007 Apr 18
0
[PATCH] paravirt_ops x86_64 , take 2
...;=c" (aux))
-
-#define rdtscpll(val, aux) do { \
- unsigned long __a, __d; \
- asm volatile (".byte 0x0f,0x01,0xf9" : "=a" (__a), "=d" (__d), "=c" (aux)); \
- (val) = (__d << 32) | __a; \
-} while (0)
-
#define checking_wrmsrl(msr,val) wrmsr_safe(msr,(u32)(val),(u32)((val)>>32))
#define write_tsc(val1,val2) wrmsr(0x10, val1, val2)
@@ -125,6 +116,7 @@ static inline void cpuid(unsigned int op
*eax = op;
__cpuid(eax, ebx, ecx, edx);
}
+
/* Some CPUID calls want 'count' to be placed in ecx */
static inline void cpuid_cou...
2007 Apr 18
2
[PATCH] Clean up x86 control register and MSR macros (corrected)
...23:27:45.000000000 -0700
@@ -1,6 +1,11 @@
#ifndef __ASM_MSR_H
#define __ASM_MSR_H
+#include <asm/msr-index.h>
+
+#ifdef __KERNEL__
+#ifndef __ASSEMBLY__
+
#include <asm/errno.h>
static inline unsigned long long native_read_msr(unsigned int msr)
@@ -162,235 +167,9 @@
{
return wrmsr_safe(msr_no, l, h);
}
-#endif /* CONFIG_SMP */
-
-/* symbolic names for some interesting MSRs */
-/* Intel defined MSRs. */
-#define MSR_IA32_P5_MC_ADDR 0
-#define MSR_IA32_P5_MC_TYPE 1
-#define MSR_IA32_PLATFORM_ID 0x17
-#define MSR_IA32_EBL_CR_POWERON 0x2a
-
-#define MSR_IA32_APICBASE 0x1b
-#...
2007 Apr 18
2
[PATCH] Clean up x86 control register and MSR macros (corrected)
...23:27:45.000000000 -0700
@@ -1,6 +1,11 @@
#ifndef __ASM_MSR_H
#define __ASM_MSR_H
+#include <asm/msr-index.h>
+
+#ifdef __KERNEL__
+#ifndef __ASSEMBLY__
+
#include <asm/errno.h>
static inline unsigned long long native_read_msr(unsigned int msr)
@@ -162,235 +167,9 @@
{
return wrmsr_safe(msr_no, l, h);
}
-#endif /* CONFIG_SMP */
-
-/* symbolic names for some interesting MSRs */
-/* Intel defined MSRs. */
-#define MSR_IA32_P5_MC_ADDR 0
-#define MSR_IA32_P5_MC_TYPE 1
-#define MSR_IA32_PLATFORM_ID 0x17
-#define MSR_IA32_EBL_CR_POWERON 0x2a
-
-#define MSR_IA32_APICBASE 0x1b
-#...