Glauber de Oliveira Costa
2007-Aug-15 05:50 UTC
[PATCH 0/25][V3] pvops_64 last round (hopefully)
This is hopefully the last iteration of the pvops64 patch.>From the last version, we have only one change, which is include/asm-x86_64/processor.h: There were still one survivor in raw asm.Also, git screwed me up for some reason, and the 25th patch was missing the new files, paravirt.{c,h}. (although I do remember having git-add'ed it, but who knows...) Andrew, could you please push it to -mm ? Have fun!
This patch turns the flush_tlb routines into native versions. In case paravirt is not defined, the natives are defined into the actually used ones. flush_tlb_others() goes in smp.c, unless smp is not in the game Signed-off-by: Glauber de Oliveira Costa <gcosta@redhat.com> Signed-off-by: Steven Rostedt <rostedt@goodmis.org> --- arch/x86_64/kernel/smp.c | 10 +++++++++- include/asm-x86_64/smp.h | 8 ++++++++ include/asm-x86_64/tlbflush.h | 22 ++++++++++++++++++---- 3 files changed, 35 insertions(+), 5 deletions(-) diff --git a/arch/x86_64/kernel/smp.c b/arch/x86_64/kernel/smp.c index 673a300..39f5f6b 100644 --- a/arch/x86_64/kernel/smp.c +++ b/arch/x86_64/kernel/smp.c @@ -165,7 +165,7 @@ out: cpu_clear(cpu, f->flush_cpumask); } -static void flush_tlb_others(cpumask_t cpumask, struct mm_struct *mm, +void native_flush_tlb_others(cpumask_t cpumask, struct mm_struct *mm, unsigned long va) { int sender; @@ -198,6 +198,14 @@ static void flush_tlb_others(cpumask_t cpumask, struct mm_struct *mm, spin_unlock(&f->tlbstate_lock); } +/* Overriden in paravirt.c if CONFIG_PARAVIRT */ +void __attribute__((weak)) flush_tlb_others(cpumask_t cpumask, + struct mm_struct *mm, + unsigned long va) +{ + native_flush_tlb_others(cpumask, mm, va); +} + int __cpuinit init_smp_flush(void) { int i; diff --git a/include/asm-x86_64/smp.h b/include/asm-x86_64/smp.h index 3f303d2..6b11114 100644 --- a/include/asm-x86_64/smp.h +++ b/include/asm-x86_64/smp.h @@ -19,6 +19,14 @@ extern int disable_apic; #include <asm/pda.h> +#ifdef CONFIG_PARAVIRT +#include <asm/paravirt.h> +void native_flush_tlb_others(cpumask_t cpumask, struct mm_struct *mm, + unsigned long va); +#else +#define startup_ipi_hook(apicid, rip, rsp) do { } while (0) +#endif + struct pt_regs; extern cpumask_t cpu_present_mask; diff --git a/include/asm-x86_64/tlbflush.h b/include/asm-x86_64/tlbflush.h index 888eb4a..1c68cc8 100644 --- a/include/asm-x86_64/tlbflush.h +++ b/include/asm-x86_64/tlbflush.h @@ -6,21 +6,30 @@ #include <asm/processor.h> #include <asm/system.h> -static inline void __flush_tlb(void) +static inline void native_flush_tlb(void) { write_cr3(read_cr3()); } -static inline void __flush_tlb_all(void) +static inline void native_flush_tlb_all(void) { unsigned long cr4 = read_cr4(); write_cr4(cr4 & ~X86_CR4_PGE); /* clear PGE */ write_cr4(cr4); /* write old PGE again and flush TLBs */ } -#define __flush_tlb_one(addr) \ - __asm__ __volatile__("invlpg (%0)" :: "r" (addr) : "memory") +static inline void native_flush_tlb_one(unsigned long addr) +{ + asm volatile ("invlpg (%0)" :: "r" (addr) : "memory"); +} +#ifdef CONFIG_PARAVIRT +#include <asm/paravirt.h> +#else +#define __flush_tlb() native_flush_tlb() +#define __flush_tlb_all() native_flush_tlb_all() +#define __flush_tlb_one(addr) native_flush_tlb_one(addr) +#endif /* CONFIG_PARAVIRT */ /* * TLB flushing: @@ -64,6 +73,11 @@ static inline void flush_tlb_range(struct vm_area_struct *vma, __flush_tlb(); } +static inline void native_flush_tlb_others(cpumask_t *cpumask, + struct mm_struct *mm, unsigned long va) +{ +} + #else #include <asm/smp.h> -- 1.4.4.2
Later on, the paravirt_ops patch will deference the vm_area_struct in asm/pgtable.h. It means this define must be after the struct definition Signed-off-by: Glauber de Oliveira Costa <gcosta@redhat.com> Signed-off-by: Steven Rostedt <rostedt@goodmis.org> --- include/linux/mm.h | 14 +++++++++----- 1 files changed, 9 insertions(+), 5 deletions(-) diff --git a/include/linux/mm.h b/include/linux/mm.h index 655094d..c3f8561 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -35,11 +35,6 @@ extern int sysctl_legacy_va_layout; #define sysctl_legacy_va_layout 0 #endif -#include <asm/page.h> -#include <asm/pgtable.h> -#include <asm/processor.h> - -#define nth_page(page,n) pfn_to_page(page_to_pfn((page)) + (n)) /* * Linux kernel virtual memory manager primitives. @@ -113,6 +108,15 @@ struct vm_area_struct { #endif }; +#include <asm/page.h> +/* + * pgtable.h must be included after the definition of vm_area_struct. + * x86_64 pgtable.h is one of the dereferencers of this struct + */ +#include <asm/pgtable.h> +#include <asm/processor.h> + +#define nth_page(page,n) pfn_to_page(page_to_pfn((page)) + (n)) extern struct kmem_cache *vm_area_cachep; /* -- 1.4.4.2
Glauber de Oliveira Costa
2007-Aug-15 05:50 UTC
[PATCH 3/25][V3] irq_flags / halt routines
This patch turns the irq_flags and halt routines into the native versions. [ updates from v1 Move raw_irqs_disabled_flags outside of the PARAVIRT ifdef to avoid increasing the mess, suggested by Andi Kleen ] Signed-off-by: Glauber de Oliveira Costa <gcosta@redhat.com> Signed-off-by: Steven Rostedt <rostedt@goodmis.org> --- include/asm-x86_64/irqflags.h | 37 ++++++++++++++++++++++++++----------- 1 files changed, 26 insertions(+), 11 deletions(-) diff --git a/include/asm-x86_64/irqflags.h b/include/asm-x86_64/irqflags.h index 86e70fe..fe0d346 100644 --- a/include/asm-x86_64/irqflags.h +++ b/include/asm-x86_64/irqflags.h @@ -16,6 +16,10 @@ * Interrupt control: */ +#ifdef CONFIG_PARAVIRT +#include <asm/paravirt.h> +#else /* PARAVIRT */ + static inline unsigned long __raw_local_save_flags(void) { unsigned long flags; @@ -31,9 +35,6 @@ static inline unsigned long __raw_local_save_flags(void) return flags; } -#define raw_local_save_flags(flags) \ - do { (flags) = __raw_local_save_flags(); } while (0) - static inline void raw_local_irq_restore(unsigned long flags) { __asm__ __volatile__( @@ -64,11 +65,6 @@ static inline void raw_local_irq_enable(void) raw_local_irq_restore((flags | X86_EFLAGS_IF) & (~X86_EFLAGS_AC)); } -static inline int raw_irqs_disabled_flags(unsigned long flags) -{ - return !(flags & X86_EFLAGS_IF) || (flags & X86_EFLAGS_AC); -} - #else /* CONFIG_X86_VSMP */ static inline void raw_local_irq_disable(void) @@ -81,13 +77,27 @@ static inline void raw_local_irq_enable(void) __asm__ __volatile__("sti" : : : "memory"); } +#endif /* CONFIG_X86_VSMP */ +#endif /* CONFIG_PARAVIRT */ + +/* Those are not paravirt stubs, so they live out of the PARAVIRT ifdef */ + +#ifdef CONFIG_X86_VSMP +static inline int raw_irqs_disabled_flags(unsigned long flags) +{ + return !(flags & X86_EFLAGS_IF) || (flags & X86_EFLAGS_AC); +} + +#else static inline int raw_irqs_disabled_flags(unsigned long flags) { return !(flags & X86_EFLAGS_IF); } -#endif +#endif /* CONFIG_X86_VSMP */ +#define raw_local_save_flags(flags) \ + do { (flags) = __raw_local_save_flags(); } while (0) /* * For spinlocks, etc.: */ @@ -115,7 +125,7 @@ static inline int raw_irqs_disabled(void) * Used in the idle loop; sti takes one instruction cycle * to complete: */ -static inline void raw_safe_halt(void) +static inline void native_raw_safe_halt(void) { __asm__ __volatile__("sti; hlt" : : : "memory"); } @@ -124,11 +134,16 @@ static inline void raw_safe_halt(void) * Used when interrupts are already enabled or to * shutdown the processor: */ -static inline void halt(void) +static inline void native_halt(void) { __asm__ __volatile__("hlt": : :"memory"); } +#ifndef CONFIG_PARAVIRT +#define raw_safe_halt native_raw_safe_halt +#define halt native_halt +#endif /* ! CONFIG_PARAVIRT */ + #else /* __ASSEMBLY__: */ # ifdef CONFIG_TRACE_IRQFLAGS # define TRACE_IRQS_ON call trace_hardirqs_on_thunk -- 1.4.4.2
Glauber de Oliveira Costa
2007-Aug-15 05:50 UTC
[PATCH 4/25][V3] Add debugreg/load_rsp native hooks
This patch adds native hooks for debugreg handling functions, and for the native load_rsp0 function. The later also have its call sites patched. [ updates from v2 * there were still a raw reference to cr4 missing ] Signed-off-by: Glauber de Oliveira Costa <gcosta@redhat.com> Signed-off-by: Steven Rostedt <rostedt@goodmis.org> --- arch/x86_64/kernel/process.c | 2 +- arch/x86_64/kernel/smpboot.c | 2 +- include/asm-x86_64/processor.h | 83 +++++++++++++++++++++++++++++++-------- 3 files changed, 68 insertions(+), 19 deletions(-) diff --git a/arch/x86_64/kernel/process.c b/arch/x86_64/kernel/process.c index 2842f50..33046f1 100644 --- a/arch/x86_64/kernel/process.c +++ b/arch/x86_64/kernel/process.c @@ -595,7 +595,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p) /* * Reload esp0, LDT and the page table pointer: */ - tss->rsp0 = next->rsp0; + load_rsp0(tss, next); /* * Switch DS and ES. diff --git a/arch/x86_64/kernel/smpboot.c b/arch/x86_64/kernel/smpboot.c index 32f5078..f99ced6 100644 --- a/arch/x86_64/kernel/smpboot.c +++ b/arch/x86_64/kernel/smpboot.c @@ -620,7 +620,7 @@ do_rest: start_rip = setup_trampoline(); init_rsp = c_idle.idle->thread.rsp; - per_cpu(init_tss,cpu).rsp0 = init_rsp; + load_rsp0(&per_cpu(init_tss,cpu), &c_idle.idle->thread); initial_code = start_secondary; clear_tsk_thread_flag(c_idle.idle, TIF_FORK); diff --git a/include/asm-x86_64/processor.h b/include/asm-x86_64/processor.h index 1952517..524390f 100644 --- a/include/asm-x86_64/processor.h +++ b/include/asm-x86_64/processor.h @@ -114,21 +114,13 @@ extern unsigned long mmu_cr4_features; static inline void set_in_cr4 (unsigned long mask) { mmu_cr4_features |= mask; - __asm__("movq %%cr4,%%rax\n\t" - "orq %0,%%rax\n\t" - "movq %%rax,%%cr4\n" - : : "irg" (mask) - :"ax"); + write_cr4(read_cr4() | mask); } static inline void clear_in_cr4 (unsigned long mask) { mmu_cr4_features &= ~mask; - __asm__("movq %%cr4,%%rax\n\t" - "andq %0,%%rax\n\t" - "movq %%rax,%%cr4\n" - : : "irg" (~mask) - :"ax"); + write_cr4(read_cr4() & ~mask); } @@ -249,6 +241,12 @@ struct thread_struct { .rsp0 = (unsigned long)&init_stack + sizeof(init_stack) \ } +static inline void native_load_rsp0(struct tss_struct *tss, + struct thread_struct *thread) +{ + tss->rsp0 = thread->rsp0; +} + #define INIT_MMAP \ { &init_mm, 0, 0, NULL, PAGE_SHARED, VM_READ | VM_WRITE | VM_EXEC, 1, NULL, NULL } @@ -264,13 +262,64 @@ struct thread_struct { set_fs(USER_DS); \ } while(0) -#define get_debugreg(var, register) \ - __asm__("movq %%db" #register ", %0" \ - :"=r" (var)) -#define set_debugreg(value, register) \ - __asm__("movq %0,%%db" #register \ - : /* no output */ \ - :"r" (value)) +static inline unsigned long native_get_debugreg(int regno) +{ + unsigned long val; + + switch (regno) { + case 0: + asm("movq %%db0, %0" :"=r" (val)); break; + case 1: + asm("movq %%db1, %0" :"=r" (val)); break; + case 2: + asm("movq %%db2, %0" :"=r" (val)); break; + case 3: + asm("movq %%db3, %0" :"=r" (val)); break; + case 6: + asm("movq %%db6, %0" :"=r" (val)); break; + case 7: + asm("movq %%db7, %0" :"=r" (val)); break; + default: + val = 0; /* assign it to keep gcc quiet */ + WARN_ON(1); + } + return val; +} + +static inline void native_set_debugreg(unsigned long value, int regno) +{ + switch (regno) { + case 0: + asm("movq %0,%%db0" : /* no output */ :"r" (value)); + break; + case 1: + asm("movq %0,%%db1" : /* no output */ :"r" (value)); + break; + case 2: + asm("movq %0,%%db2" : /* no output */ :"r" (value)); + break; + case 3: + asm("movq %0,%%db3" : /* no output */ :"r" (value)); + break; + case 6: + asm("movq %0,%%db6" : /* no output */ :"r" (value)); + break; + case 7: + asm("movq %0,%%db7" : /* no output */ :"r" (value)); + break; + default: + BUG(); + } +} + +#ifdef CONFIG_PARAVIRT +#include <asm/paravirt.h> +#else +#define paravirt_enabled() 0 +#define load_rsp0 native_load_rsp0 +#define set_debugreg(val, reg) native_set_debugreg(reg, val) +#define get_debugreg(var, reg) (var) = native_get_debugreg(reg) +#endif struct task_struct; struct mm_struct; -- 1.4.4.2
Glauber de Oliveira Costa
2007-Aug-15 05:51 UTC
[PATCH 7/25][V3] interrupt related native paravirt functions.
The interrupt initialization routine becomes native_init_IRQ and will be overriden later in case paravirt is on. [ updates from v1 * After a talk with Jeremy Fitzhardinge, it turned out that making the interrupt vector global was not a good idea. So it is removed in this patch ] Signed-off-by: Glauber de Oliveira Costa <gcosta@redhat.com> Signed-off-by: Steven Rostedt <rostedt@goodmis.org> --- arch/x86_64/kernel/i8259.c | 5 ++++- include/asm-x86_64/irq.h | 2 ++ 2 files changed, 6 insertions(+), 1 deletions(-) diff --git a/arch/x86_64/kernel/i8259.c b/arch/x86_64/kernel/i8259.c index 948cae6..048e3cb 100644 --- a/arch/x86_64/kernel/i8259.c +++ b/arch/x86_64/kernel/i8259.c @@ -484,7 +484,10 @@ static int __init init_timer_sysfs(void) device_initcall(init_timer_sysfs); -void __init init_IRQ(void) +/* Overridden in paravirt.c */ +void init_IRQ(void) __attribute__((weak, alias("native_init_IRQ"))); + +void __init native_init_IRQ(void) { int i; diff --git a/include/asm-x86_64/irq.h b/include/asm-x86_64/irq.h index 5006c6e..be55299 100644 --- a/include/asm-x86_64/irq.h +++ b/include/asm-x86_64/irq.h @@ -46,6 +46,8 @@ static __inline__ int irq_canonicalize(int irq) extern void fixup_irqs(cpumask_t map); #endif +void native_init_IRQ(void); + #define __ARCH_HAS_DO_SOFTIRQ 1 #endif /* _ASM_IRQ_H */ -- 1.4.4.2
Glauber de Oliveira Costa
2007-Aug-15 05:52 UTC
[PATCH 10/25][V3] export math_state_restore
Export math_state_restore symbol, so it can be used for hypervisors. They are commonly loaded as modules. Signed-off-by: Glauber de Oliveira Costa <gcosta@redhat.com> Signed-off-by: Steven Rostedt <rostedt@goodmis.org> --- arch/x86_64/kernel/traps.c | 1 + 1 files changed, 1 insertions(+), 0 deletions(-) diff --git a/arch/x86_64/kernel/traps.c b/arch/x86_64/kernel/traps.c index 0388842..aacbe12 100644 --- a/arch/x86_64/kernel/traps.c +++ b/arch/x86_64/kernel/traps.c @@ -1081,6 +1081,7 @@ asmlinkage void math_state_restore(void) task_thread_info(me)->status |= TS_USEDFPU; me->fpu_counter++; } +EXPORT_SYMBOL_GPL(math_state_restore); void __init trap_init(void) { -- 1.4.4.2
Glauber de Oliveira Costa
2007-Aug-15 05:52 UTC
[PATCH 11/25][V3] native versions for set pagetables
This patch turns the set_p{te,md,ud,gd} functions into their native_ versions. There is no need to patch any caller. Also, it adds pte_update() and pte_update_defer() calls whenever we modify a page table entry. This last part was coded to match i386 as close as possible. Pieces of the header are moved to below the #ifdef CONFIG_PARAVIRT site, as they are users of the newly defined set_* macros. Signed-off-by: Glauber de Oliveira Costa <gcosta@redhat.com> Signed-off-by: Steven Rostedt <rostedt@goodmis.org> --- include/asm-x86_64/pgtable.h | 152 ++++++++++++++++++++++++----------------- 1 files changed, 89 insertions(+), 63 deletions(-) diff --git a/include/asm-x86_64/pgtable.h b/include/asm-x86_64/pgtable.h index c9d8764..dd572a2 100644 --- a/include/asm-x86_64/pgtable.h +++ b/include/asm-x86_64/pgtable.h @@ -57,55 +57,77 @@ extern unsigned long empty_zero_page[PAGE_SIZE/sizeof(unsigned long)]; */ #define PTRS_PER_PTE 512 -#ifndef __ASSEMBLY__ +#ifdef CONFIG_PARAVIRT +#include <asm/paravirt.h> +#else -#define pte_ERROR(e) \ - printk("%s:%d: bad pte %p(%016lx).\n", __FILE__, __LINE__, &(e), pte_val(e)) -#define pmd_ERROR(e) \ - printk("%s:%d: bad pmd %p(%016lx).\n", __FILE__, __LINE__, &(e), pmd_val(e)) -#define pud_ERROR(e) \ - printk("%s:%d: bad pud %p(%016lx).\n", __FILE__, __LINE__, &(e), pud_val(e)) -#define pgd_ERROR(e) \ - printk("%s:%d: bad pgd %p(%016lx).\n", __FILE__, __LINE__, &(e), pgd_val(e)) +#define set_pte native_set_pte +#define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval) +#define set_pmd native_set_pmd +#define set_pud native_set_pud +#define set_pgd native_set_pgd +#define pte_clear(mm,addr,xp) do { set_pte_at(mm, addr, xp, __pte(0)); } while (0) +#define pmd_clear(xp) do { set_pmd(xp, __pmd(0)); } while (0) +#define pud_clear native_pud_clear +#define pgd_clear native_pgd_clear +#define pte_update(mm, addr, ptep) do { } while (0) +#define pte_update_defer(mm, addr, ptep) do { } while (0) -#define pgd_none(x) (!pgd_val(x)) -#define pud_none(x) (!pud_val(x)) +#endif + +#ifndef __ASSEMBLY__ -static inline void set_pte(pte_t *dst, pte_t val) +static inline void native_set_pte(pte_t *dst, pte_t val) { - pte_val(*dst) = pte_val(val); + dst->pte = pte_val(val); } -#define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval) -static inline void set_pmd(pmd_t *dst, pmd_t val) + +static inline void native_set_pmd(pmd_t *dst, pmd_t val) { - pmd_val(*dst) = pmd_val(val); + dst->pmd = pmd_val(val); } -static inline void set_pud(pud_t *dst, pud_t val) +static inline void native_set_pud(pud_t *dst, pud_t val) { - pud_val(*dst) = pud_val(val); + dst->pud = pud_val(val); } -static inline void pud_clear (pud_t *pud) +static inline void native_set_pgd(pgd_t *dst, pgd_t val) { - set_pud(pud, __pud(0)); + dst->pgd = pgd_val(val); } - -static inline void set_pgd(pgd_t *dst, pgd_t val) +static inline void native_pud_clear (pud_t *pud) { - pgd_val(*dst) = pgd_val(val); -} + set_pud(pud, __pud(0)); +} -static inline void pgd_clear (pgd_t * pgd) +static inline void native_pgd_clear (pgd_t * pgd) { set_pgd(pgd, __pgd(0)); } -#define ptep_get_and_clear(mm,addr,xp) __pte(xchg(&(xp)->pte, 0)) +#define pte_ERROR(e) \ + printk("%s:%d: bad pte %p(%016lx).\n", __FILE__, __LINE__, &(e), pte_val(e)) +#define pmd_ERROR(e) \ + printk("%s:%d: bad pmd %p(%016lx).\n", __FILE__, __LINE__, &(e), pmd_val(e)) +#define pud_ERROR(e) \ + printk("%s:%d: bad pud %p(%016lx).\n", __FILE__, __LINE__, &(e), pud_val(e)) +#define pgd_ERROR(e) \ + printk("%s:%d: bad pgd %p(%016lx).\n", __FILE__, __LINE__, &(e), pgd_val(e)) + +#define pgd_none(x) (!pgd_val(x)) +#define pud_none(x) (!pud_val(x)) struct mm_struct; +static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) +{ + pte_t pte = __pte(xchg(&ptep->pte, 0)); + pte_update(mm, addr, ptep); + return pte; +} + static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm, unsigned long addr, pte_t *ptep, int full) { pte_t pte; @@ -245,7 +267,6 @@ static inline unsigned long pmd_bad(pmd_t pmd) #define pte_none(x) (!pte_val(x)) #define pte_present(x) (pte_val(x) & (_PAGE_PRESENT | _PAGE_PROTNONE)) -#define pte_clear(mm,addr,xp) do { set_pte_at(mm, addr, xp, __pte(0)); } while (0) #define pages_to_mb(x) ((x) >> (20-PAGE_SHIFT)) /* FIXME: is this right? */ @@ -254,11 +275,11 @@ static inline unsigned long pmd_bad(pmd_t pmd) static inline pte_t pfn_pte(unsigned long page_nr, pgprot_t pgprot) { - pte_t pte; - pte_val(pte) = (page_nr << PAGE_SHIFT); - pte_val(pte) |= pgprot_val(pgprot); - pte_val(pte) &= __supported_pte_mask; - return pte; + unsigned long pte; + pte = (page_nr << PAGE_SHIFT); + pte |= pgprot_val(pgprot); + pte &= __supported_pte_mask; + return __pte(pte); } /* @@ -282,30 +303,6 @@ static inline pte_t pte_mkwrite(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | static inline pte_t pte_mkhuge(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_PSE)); return pte; } static inline pte_t pte_clrhuge(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_PSE)); return pte; } -struct vm_area_struct; - -static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep) -{ - if (!pte_young(*ptep)) - return 0; - return test_and_clear_bit(_PAGE_BIT_ACCESSED, &ptep->pte); -} - -static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep) -{ - clear_bit(_PAGE_BIT_RW, &ptep->pte); -} - -/* - * Macro to mark a page protection value as "uncacheable". - */ -#define pgprot_noncached(prot) (__pgprot(pgprot_val(prot) | _PAGE_PCD | _PAGE_PWT)) - -static inline int pmd_large(pmd_t pte) { - return (pmd_val(pte) & __LARGE_PTE) == __LARGE_PTE; -} - - /* * Conversion functions: convert a page and protection to a page entry, * and a page entry and page directory to the page they refer to. @@ -339,7 +336,6 @@ static inline int pmd_large(pmd_t pte) { pmd_index(address)) #define pmd_none(x) (!pmd_val(x)) #define pmd_present(x) (pmd_val(x) & _PAGE_PRESENT) -#define pmd_clear(xp) do { set_pmd(xp, __pmd(0)); } while (0) #define pfn_pmd(nr,prot) (__pmd(((nr) << PAGE_SHIFT) | pgprot_val(prot))) #define pmd_pfn(x) ((pmd_val(x) & __PHYSICAL_MASK) >> PAGE_SHIFT) @@ -352,14 +348,43 @@ static inline int pmd_large(pmd_t pte) { /* page, protection -> pte */ #define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot)) #define mk_pte_huge(entry) (pte_val(entry) |= _PAGE_PRESENT | _PAGE_PSE) - + +struct vm_area_struct; + +#include <linux/mm.h> +static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep) +{ + int ret = 0; + if (!pte_young(*ptep)) + return 0; + ret = test_and_clear_bit(_PAGE_BIT_ACCESSED, &ptep->pte); + pte_update(vma->vm_mm, addr, ptep); + return ret; +} + +static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep) +{ + clear_bit(_PAGE_BIT_RW, &ptep->pte); + pte_update(mm, addr, ptep); +} + +/* + * Macro to mark a page protection value as "uncacheable". + */ +#define pgprot_noncached(prot) (__pgprot(pgprot_val(prot) | _PAGE_PCD | _PAGE_PWT)) + +static inline int pmd_large(pmd_t pte) { + return (pmd_val(pte) & __LARGE_PTE) == __LARGE_PTE; +} + /* Change flags of a PTE */ -static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) +static inline pte_t pte_modify(pte_t pte_old, pgprot_t newprot) { - pte_val(pte) &= _PAGE_CHG_MASK; - pte_val(pte) |= pgprot_val(newprot); - pte_val(pte) &= __supported_pte_mask; - return pte; + unsigned long pte = pte_val(pte_old); + pte &= _PAGE_CHG_MASK; + pte |= pgprot_val(newprot); + pte &= __supported_pte_mask; + return __pte(pte); } #define pte_index(address) \ @@ -386,6 +411,7 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) int __changed = !pte_same(*(__ptep), __entry); \ if (__changed && __dirty) { \ set_pte(__ptep, __entry); \ + pte_update_defer((__vma)->vm_mm, (__address), (__ptep)); \ flush_tlb_page(__vma, __address); \ } \ __changed; \ -- 1.4.4.2
With paravirualization, hypervisors needs to handle the gdt, that was right to this point only used at very early inialization code. Hypervisors are commonly modules, so make it an export [ updates from v1 * make it an EXPORT_SYMBOL_GPL. Suggested by Arjan van de Ven ] Signed-off-by: Glauber de Oliveira Costa <gcosta@redhat.com> Signed-off-by: Steven Rostedt <rostedt@goodmis.org> --- arch/x86_64/kernel/x8664_ksyms.c | 6 ++++++ 1 files changed, 6 insertions(+), 0 deletions(-) diff --git a/arch/x86_64/kernel/x8664_ksyms.c b/arch/x86_64/kernel/x8664_ksyms.c index 77c25b3..2d3932d 100644 --- a/arch/x86_64/kernel/x8664_ksyms.c +++ b/arch/x86_64/kernel/x8664_ksyms.c @@ -60,3 +60,9 @@ EXPORT_SYMBOL(init_level4_pgt); EXPORT_SYMBOL(load_gs_index); EXPORT_SYMBOL(_proxy_pda); + +#ifdef CONFIG_PARAVIRT +extern unsigned long *cpu_gdt_descr; +/* Virtualized guests may want to use it */ +EXPORT_SYMBOL_GPL(cpu_gdt_descr); +#endif -- 1.4.4.2
This patch replaces syscall_init by x86_64_syscall_init. The former will be later replaced by a paravirt replacement in case paravirt is on Signed-off-by: Glauber de Oliveira Costa <gcosta@redhat.com> Signed-off-by: Steven Rostedt <rostedt@goodmis.org> --- arch/x86_64/kernel/setup64.c | 8 +++++++- include/asm-x86_64/proto.h | 3 +++ 2 files changed, 10 insertions(+), 1 deletions(-) diff --git a/arch/x86_64/kernel/setup64.c b/arch/x86_64/kernel/setup64.c index 49f7342..723822c 100644 --- a/arch/x86_64/kernel/setup64.c +++ b/arch/x86_64/kernel/setup64.c @@ -153,7 +153,7 @@ __attribute__((section(".bss.page_aligned"))); extern asmlinkage void ignore_sysret(void); /* May not be marked __init: used by software suspend */ -void syscall_init(void) +void x86_64_syscall_init(void) { /* * LSTAR and STAR live in a bit strange symbiosis. @@ -172,6 +172,12 @@ void syscall_init(void) wrmsrl(MSR_SYSCALL_MASK, EF_TF|EF_DF|EF_IE|0x3000); } +/* Overriden in paravirt.c if CONFIG_PARAVIRT */ +void __attribute__((weak)) syscall_init(void) +{ + x86_64_syscall_init(); +} + void __cpuinit check_efer(void) { unsigned long efer; diff --git a/include/asm-x86_64/proto.h b/include/asm-x86_64/proto.h index 31f20ad..77ed2de 100644 --- a/include/asm-x86_64/proto.h +++ b/include/asm-x86_64/proto.h @@ -18,6 +18,9 @@ extern void init_memory_mapping(unsigned long start, unsigned long end); extern void system_call(void); extern int kernel_syscall(void); +#ifdef CONFIG_PARAVIRT +extern void x86_64_syscall_init(void); +#endif extern void syscall_init(void); extern void ia32_syscall(void); -- 1.4.4.2
> +#ifdef CONFIG_PARAVIRT > +extern unsigned long *cpu_gdt_descr;No externs in .c files Normally they should be where the variable is defined anyways. -Andi
On 8/15/07, Andi Kleen <ak@suse.de> wrote:> > +#ifdef CONFIG_PARAVIRT > > +extern unsigned long *cpu_gdt_descr; > > No externs in .c files > > Normally they should be where the variable is defined > anyways.Given that this variable is defined in head.S, what do you propose? AFAICT, this is the very duty of this file (x8664_syms.c), and the extern is also used in other symbols like the memcpy family -- Glauber de Oliveira Costa. "Free as in Freedom" http://glommer.net "The less confident you are, the more serious you have to act."
On Wed, Aug 15, 2007 at 11:25:43AM -0300, Glauber de Oliveira Costa wrote:> On 8/15/07, Andi Kleen <ak@suse.de> wrote: > > > +#ifdef CONFIG_PARAVIRT > > > +extern unsigned long *cpu_gdt_descr; > > > > No externs in .c files > > > > Normally they should be where the variable is defined > > anyways. > Given that this variable is defined in head.S, what do you propose?Move it to C code first. -Andi
Jeremy Fitzhardinge
2007-Aug-15 11:07 UTC
[PATCH 0/25][V3] pvops_64 last round (hopefully)
Glauber de Oliveira Costa wrote:> This is hopefully the last iteration of the pvops64 patch. > > From the last version, we have only one change, which is include/asm-x86_64/processor.h: There were still one survivor in raw asm. > Also, git screwed me up for some reason, and the 25th patch was missing the new files, paravirt.{c,h}. (although I do remember having git-add'ed it, but who knows...) > > Andrew, could you please push it to -mm ?Hm, I have a patch here to refactor the pv-ops interface into smaller pieces. It needs a respin against 32-bit, and obviously 64-bit should be equivalent. Its mostly a moving things around patch rather than a big semantic change, so changing the users of the interface is fairly straightforward. I'll post it shortly, but if everyone agrees that its the way we want to go, we should probably start out with it on 64-bit. J
Possibly Parallel Threads
- [PATCH 0/25][V3] pvops_64 last round (hopefully)
- [PATCH 0/25 -v2] paravirt_ops for x86_64, second round
- [PATCH 0/25 -v2] paravirt_ops for x86_64, second round
- [PATCH 0/7] (Re-)introducing pvops for x86_64 - Real pvops work part
- [PATCH 0/7] (Re-)introducing pvops for x86_64 - Real pvops work part