Displaying 20 results from an estimated 146 matches for "clts".
2008 Jan 10
1
[PATCH] Added CLTS instruction emulation in vmxassist
Trivial change to emulate CLTS instruction in vmxassist.
Relevant file - tools/firmware/vmxassist/vm86.c
Signed-off-by: Kamala Narasimhan <kamala.narasimhan@citrix.com>
Kamala
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xensource.com
http://lists.xensource.com/xen-devel
2012 Jun 29
0
[PATCH] linux-2.6.18/x86: improve CR0 read/write handling
..._taskswitch)
+ __get_cpu_var(xen_x86_cr0) |= X86_CR0_TS;
/*
* Restore %fs and %gs if needed.
--- a/arch/i386/kernel/traps-xen.c
+++ b/arch/i386/kernel/traps-xen.c
@@ -1057,6 +1057,7 @@ asmlinkage void math_state_restore(struc
struct task_struct *tsk = thread->task;
/* NB. ''clts'' is done for us by Xen during virtual trap. */
+ __get_cpu_var(xen_x86_cr0) &= ~X86_CR0_TS;
if (!tsk_used_math(tsk))
init_fpu(tsk);
restore_fpu(tsk);
--- a/arch/x86_64/kernel/process-xen.c
+++ b/arch/x86_64/kernel/process-xen.c
@@ -574,6 +574,8 @@ __switch_to(struct task_struct...
2007 Apr 18
2
[PATCH] Use correct macros in raid code, not raw asm
...ff -r 4ff048622391 drivers/md/raid6x86.h
--- a/drivers/md/raid6x86.h Thu Dec 28 16:52:54 2006 +1100
+++ b/drivers/md/raid6x86.h Fri Dec 29 10:09:38 2006 +1100
@@ -75,13 +75,14 @@ static inline unsigned long raid6_get_fp
unsigned long cr0;
preempt_disable();
- asm volatile("mov %%cr0,%0 ; clts" : "=r" (cr0));
+ cr0 = read_cr0();
+ clts();
return cr0;
}
static inline void raid6_put_fpu(unsigned long cr0)
{
- asm volatile("mov %0,%%cr0" : : "r" (cr0));
+ write_cr0(cr0);
preempt_enable();
}
2007 Apr 18
2
[PATCH] Use correct macros in raid code, not raw asm
...ff -r 4ff048622391 drivers/md/raid6x86.h
--- a/drivers/md/raid6x86.h Thu Dec 28 16:52:54 2006 +1100
+++ b/drivers/md/raid6x86.h Fri Dec 29 10:09:38 2006 +1100
@@ -75,13 +75,14 @@ static inline unsigned long raid6_get_fp
unsigned long cr0;
preempt_disable();
- asm volatile("mov %%cr0,%0 ; clts" : "=r" (cr0));
+ cr0 = read_cr0();
+ clts();
return cr0;
}
static inline void raid6_put_fpu(unsigned long cr0)
{
- asm volatile("mov %0,%%cr0" : : "r" (cr0));
+ write_cr0(cr0);
preempt_enable();
}
2007 Apr 18
0
[RFC, PATCH 13/24] i386 Vmi system header
...sk_struct *prev, struct task_struct *next));
@@ -83,69 +85,8 @@ __asm__ __volatile__ ("movw %%dx,%1\n\t"
#define savesegment(seg, value) \
asm volatile("mov %%" #seg ",%0":"=rm" (value))
-/*
- * Clear and set 'TS' bit respectively
- */
-#define clts() __asm__ __volatile__ ("clts")
-#define read_cr0() ({ \
- unsigned int __dummy; \
- __asm__ __volatile__( \
- "movl %%cr0,%0\n\t" \
- :"=r" (__dummy)); \
- __dummy; \
-})
-#define write_cr0(x) \
- __asm__ __volatile__("movl %0,%%cr0": :"r" (x));...
2007 Apr 18
0
[RFC, PATCH 13/24] i386 Vmi system header
...sk_struct *prev, struct task_struct *next));
@@ -83,69 +85,8 @@ __asm__ __volatile__ ("movw %%dx,%1\n\t"
#define savesegment(seg, value) \
asm volatile("mov %%" #seg ",%0":"=rm" (value))
-/*
- * Clear and set 'TS' bit respectively
- */
-#define clts() __asm__ __volatile__ ("clts")
-#define read_cr0() ({ \
- unsigned int __dummy; \
- __asm__ __volatile__( \
- "movl %%cr0,%0\n\t" \
- :"=r" (__dummy)); \
- __dummy; \
-})
-#define write_cr0(x) \
- __asm__ __volatile__("movl %0,%%cr0": :"r" (x));...
2007 Apr 18
0
[RFC/PATCH PV_OPS X86_64 03/17] paravirt_ops - system routines
...==========================================
--- clean-start.orig/include/asm-x86_64/system.h
+++ clean-start/include/asm-x86_64/system.h
@@ -65,46 +65,84 @@ extern void load_gs_index(unsigned);
".previous" \
: :"r" (value), "r" (0))
+static inline void native_clts(void)
+{
+ asm volatile ("clts");
+}
+
+static inline unsigned long native_read_cr0(void)
+{
+ unsigned long val;
+ asm volatile("movq %%cr0,%0\n\t" :"=r" (val));
+ return val;
+}
+
+static inline void native_write_cr0(unsigned long val)
+{
+ asm volatile("movq %0...
2007 Apr 18
0
[RFC/PATCH PV_OPS X86_64 03/17] paravirt_ops - system routines
...==========================================
--- clean-start.orig/include/asm-x86_64/system.h
+++ clean-start/include/asm-x86_64/system.h
@@ -65,46 +65,84 @@ extern void load_gs_index(unsigned);
".previous" \
: :"r" (value), "r" (0))
+static inline void native_clts(void)
+{
+ asm volatile ("clts");
+}
+
+static inline unsigned long native_read_cr0(void)
+{
+ unsigned long val;
+ asm volatile("movq %%cr0,%0\n\t" :"=r" (val));
+ return val;
+}
+
+static inline void native_write_cr0(unsigned long val)
+{
+ asm volatile("movq %0...
2007 Apr 18
5
[PATCH] paravirt.h
...d_cr0() ({ \
unsigned int __dummy; \
__asm__ __volatile__( \
@@ -133,16 +136,17 @@ __asm__ __volatile__ ("movw %%dx,%1\n\t"
#define write_cr4(x) \
__asm__ __volatile__("movl %0,%%cr4": :"r" (x))
-/*
- * Clear and set 'TS' bit respectively
- */
-#define clts() __asm__ __volatile__ ("clts")
-#define stts() write_cr0(8 | read_cr0())
-
-#endif /* __KERNEL__ */
-
#define wbinvd() \
__asm__ __volatile__ ("wbinvd": : :"memory")
+
+/* Clear the 'TS' bit */
+#define clts() __asm__ __volatile__ ("clts")
+#endif...
2007 Apr 18
5
[PATCH] paravirt.h
...d_cr0() ({ \
unsigned int __dummy; \
__asm__ __volatile__( \
@@ -133,16 +136,17 @@ __asm__ __volatile__ ("movw %%dx,%1\n\t"
#define write_cr4(x) \
__asm__ __volatile__("movl %0,%%cr4": :"r" (x))
-/*
- * Clear and set 'TS' bit respectively
- */
-#define clts() __asm__ __volatile__ ("clts")
-#define stts() write_cr0(8 | read_cr0())
-
-#endif /* __KERNEL__ */
-
#define wbinvd() \
__asm__ __volatile__ ("wbinvd": : :"memory")
+
+/* Clear the 'TS' bit */
+#define clts() __asm__ __volatile__ ("clts")
+#endif...
2015 Mar 16
0
[PATCH 9/9] qspinlock, x86, kvm: Implement KVM support for paravirt qspinlock
....unlock_kick = paravirt_nop,
-#endif
+#endif /* !CONFIG_QUEUE_SPINLOCK */
+#endif /* SMP */
};
EXPORT_SYMBOL(pv_lock_ops);
--- a/arch/x86/kernel/paravirt_patch_32.c
+++ b/arch/x86/kernel/paravirt_patch_32.c
@@ -12,6 +12,10 @@ DEF_NATIVE(pv_mmu_ops, read_cr3, "mov %c
DEF_NATIVE(pv_cpu_ops, clts, "clts");
DEF_NATIVE(pv_cpu_ops, read_tsc, "rdtsc");
+#if defined(CONFIG_PARAVIRT_SPINLOCKS) && defined(CONFIG_QUEUE_SPINLOCKS)
+DEF_NATIVE(pv_lock_ops, queue_spin_unlock, "movb $0, (%eax)");
+#endif
+
unsigned paravirt_patch_ident_32(void *insnbuf, unsigne...
2015 Mar 16
0
[PATCH 9/9] qspinlock, x86, kvm: Implement KVM support for paravirt qspinlock
....unlock_kick = paravirt_nop,
-#endif
+#endif /* !CONFIG_QUEUE_SPINLOCK */
+#endif /* SMP */
};
EXPORT_SYMBOL(pv_lock_ops);
--- a/arch/x86/kernel/paravirt_patch_32.c
+++ b/arch/x86/kernel/paravirt_patch_32.c
@@ -12,6 +12,10 @@ DEF_NATIVE(pv_mmu_ops, read_cr3, "mov %c
DEF_NATIVE(pv_cpu_ops, clts, "clts");
DEF_NATIVE(pv_cpu_ops, read_tsc, "rdtsc");
+#if defined(CONFIG_PARAVIRT_SPINLOCKS) && defined(CONFIG_QUEUE_SPINLOCKS)
+DEF_NATIVE(pv_lock_ops, queue_spin_unlock, "movb $0, (%eax)");
+#endif
+
unsigned paravirt_patch_ident_32(void *insnbuf, unsigne...
2007 Oct 09
2
[PATCH RFC REPOST 1/2] paravirt: refactor struct paravirt_ops into smaller pv_*_ops
...ave_fl, "pushf; pop %eax");
-DEF_NATIVE(iret, "iret");
-DEF_NATIVE(irq_enable_sysexit, "sti; sysexit");
-DEF_NATIVE(read_cr2, "mov %cr2, %eax");
-DEF_NATIVE(write_cr3, "mov %eax, %cr3");
-DEF_NATIVE(read_cr3, "mov %cr3, %eax");
-DEF_NATIVE(clts, "clts");
-DEF_NATIVE(read_tsc, "rdtsc");
-
-DEF_NATIVE(ud2a, "ud2a");
+#define DEF_NATIVE(ops, name, code) \
+ extern const char start_##ops##_##name[], end_##ops##_##name[]; \
+ asm("start_" #ops "_" #name ": " code "; end_"...
2007 Oct 09
2
[PATCH RFC REPOST 1/2] paravirt: refactor struct paravirt_ops into smaller pv_*_ops
...ave_fl, "pushf; pop %eax");
-DEF_NATIVE(iret, "iret");
-DEF_NATIVE(irq_enable_sysexit, "sti; sysexit");
-DEF_NATIVE(read_cr2, "mov %cr2, %eax");
-DEF_NATIVE(write_cr3, "mov %eax, %cr3");
-DEF_NATIVE(read_cr3, "mov %cr3, %eax");
-DEF_NATIVE(clts, "clts");
-DEF_NATIVE(read_tsc, "rdtsc");
-
-DEF_NATIVE(ud2a, "ud2a");
+#define DEF_NATIVE(ops, name, code) \
+ extern const char start_##ops##_##name[], end_##ops##_##name[]; \
+ asm("start_" #ops "_" #name ": " code "; end_"...
2007 Sep 28
2
[PATCH RFC] paravirt_ops: refactor struct paravirt_ops into smaller pv_*_ops
...ave_fl, "pushf; pop %eax");
-DEF_NATIVE(iret, "iret");
-DEF_NATIVE(irq_enable_sysexit, "sti; sysexit");
-DEF_NATIVE(read_cr2, "mov %cr2, %eax");
-DEF_NATIVE(write_cr3, "mov %eax, %cr3");
-DEF_NATIVE(read_cr3, "mov %cr3, %eax");
-DEF_NATIVE(clts, "clts");
-DEF_NATIVE(read_tsc, "rdtsc");
-
-DEF_NATIVE(ud2a, "ud2a");
+#define DEF_NATIVE(ops, name, code) \
+ extern const char start_##ops##_##name[], end_##ops##_##name[]; \
+ asm("start_" #ops "_" #name ": " code "; end_"...
2007 Sep 28
2
[PATCH RFC] paravirt_ops: refactor struct paravirt_ops into smaller pv_*_ops
...ave_fl, "pushf; pop %eax");
-DEF_NATIVE(iret, "iret");
-DEF_NATIVE(irq_enable_sysexit, "sti; sysexit");
-DEF_NATIVE(read_cr2, "mov %cr2, %eax");
-DEF_NATIVE(write_cr3, "mov %eax, %cr3");
-DEF_NATIVE(read_cr3, "mov %cr3, %eax");
-DEF_NATIVE(clts, "clts");
-DEF_NATIVE(read_tsc, "rdtsc");
-
-DEF_NATIVE(ud2a, "ud2a");
+#define DEF_NATIVE(ops, name, code) \
+ extern const char start_##ops##_##name[], end_##ops##_##name[]; \
+ asm("start_" #ops "_" #name ": " code "; end_"...
2015 Jun 17
0
[PATCH v3 03/18] x86/tsc/paravirt: Remove the read_tsc and read_tscp paravirt hooks
...c
> +++ b/arch/x86/kernel/paravirt_patch_32.c
> @@ -10,7 +10,6 @@ DEF_NATIVE(pv_mmu_ops, read_cr2, "mov %cr2, %eax");
> DEF_NATIVE(pv_mmu_ops, write_cr3, "mov %eax, %cr3");
> DEF_NATIVE(pv_mmu_ops, read_cr3, "mov %cr3, %eax");
> DEF_NATIVE(pv_cpu_ops, clts, "clts");
> -DEF_NATIVE(pv_cpu_ops, read_tsc, "rdtsc");
>
> #if defined(CONFIG_PARAVIRT_SPINLOCKS) && defined(CONFIG_QUEUED_SPINLOCKS)
> DEF_NATIVE(pv_lock_ops, queued_spin_unlock, "movb $0, (%eax)");
> @@ -52,7 +51,6 @@ unsigned native_patch...
2015 Jun 17
0
[PATCH v3 03/18] x86/tsc/paravirt: Remove the read_tsc and read_tscp paravirt hooks
...c
> +++ b/arch/x86/kernel/paravirt_patch_32.c
> @@ -10,7 +10,6 @@ DEF_NATIVE(pv_mmu_ops, read_cr2, "mov %cr2, %eax");
> DEF_NATIVE(pv_mmu_ops, write_cr3, "mov %eax, %cr3");
> DEF_NATIVE(pv_mmu_ops, read_cr3, "mov %cr3, %eax");
> DEF_NATIVE(pv_cpu_ops, clts, "clts");
> -DEF_NATIVE(pv_cpu_ops, read_tsc, "rdtsc");
>
> #if defined(CONFIG_PARAVIRT_SPINLOCKS) && defined(CONFIG_QUEUED_SPINLOCKS)
> DEF_NATIVE(pv_lock_ops, queued_spin_unlock, "movb $0, (%eax)");
> @@ -52,7 +51,6 @@ unsigned native_patch...
2007 Jul 09
1
[PATCH RFC] first cut at splitting up paravirt_ops
...switch(type) {
-#define SITE(x) case PARAVIRT_PATCH(x): start = start_##x; end = end_##x; goto patch_site
- SITE(irq_disable);
- SITE(irq_enable);
- SITE(restore_fl);
- SITE(save_fl);
- SITE(iret);
- SITE(irq_enable_sysexit);
- SITE(read_cr2);
- SITE(read_cr3);
- SITE(write_cr3);
- SITE(clts);
- SITE(read_tsc);
+#define SITE(ops,x) \
+ case PARAVIRT_PATCH(ops.x): \
+ start = start_##x; end = end_##x; goto patch_site
+
+ SITE(pv_irq_ops, irq_disable);
+ SITE(pv_irq_ops, irq_enable);
+ SITE(pv_irq_ops, restore_fl);
+ SITE(pv_irq_ops, save_fl);
+ SITE(pv_cpu_ops, iret);
+...
2007 Jul 09
1
[PATCH RFC] first cut at splitting up paravirt_ops
...switch(type) {
-#define SITE(x) case PARAVIRT_PATCH(x): start = start_##x; end = end_##x; goto patch_site
- SITE(irq_disable);
- SITE(irq_enable);
- SITE(restore_fl);
- SITE(save_fl);
- SITE(iret);
- SITE(irq_enable_sysexit);
- SITE(read_cr2);
- SITE(read_cr3);
- SITE(write_cr3);
- SITE(clts);
- SITE(read_tsc);
+#define SITE(ops,x) \
+ case PARAVIRT_PATCH(ops.x): \
+ start = start_##x; end = end_##x; goto patch_site
+
+ SITE(pv_irq_ops, irq_disable);
+ SITE(pv_irq_ops, irq_enable);
+ SITE(pv_irq_ops, restore_fl);
+ SITE(pv_irq_ops, save_fl);
+ SITE(pv_cpu_ops, iret);
+...