Glauber de Oliveira Costa
2007-Oct-31 15:14 UTC
[PATCH 1/16] Wipe out traditional opt from x86_64 Makefile
Among other things, using -traditional as a gcc option stops us from using macro token pasting, which is a feature we heavily rely on. There was still a use of -traditional in arch/x86/kernel/Makefile_64, which this patch removes. I don't see any problems building kernels in my x86_64 box without -traditional. Signed-off-by: Glauber de Oliveira Costa <gcosta@redhat.com> --- arch/x86/kernel/Makefile_64 | 1 - 1 files changed, 0 insertions(+), 1 deletions(-) diff --git a/arch/x86/kernel/Makefile_64 b/arch/x86/kernel/Makefile_64 index 24671c3..74d3727 100644 --- a/arch/x86/kernel/Makefile_64 +++ b/arch/x86/kernel/Makefile_64 @@ -3,7 +3,6 @@ # extra-y := head_64.o head64.o init_task.o vmlinux.lds -EXTRA_AFLAGS := -traditional obj-y := process_64.o signal_64.o entry_64.o traps_64.o irq_64.o \ ptrace_64.o time_64.o ioport_64.o ldt_64.o setup_64.o i8259_64.o sys_x86_64.o \ x8664_ksyms_64.o i387_64.o syscall_64.o vsyscall_64.o \ -- 1.4.4.2
Glauber de Oliveira Costa
2007-Oct-31 15:14 UTC
[PATCH 5/16] report ring kernel is running without paravirt
When paravirtualization is disabled, the kernel is always running at ring 0. So report it in the appropriate macro Signed-off-by: Glauber de Oliveira Costa <gcosta@redhat.com> Signed-off-by: Steven Rostedt <rostedt@goodmis.org> Acked-by: Jeremy Fitzhardinge <jeremy@xensource.com> --- include/asm-x86/segment_64.h | 4 ++++ 1 files changed, 4 insertions(+), 0 deletions(-) diff --git a/include/asm-x86/segment_64.h b/include/asm-x86/segment_64.h index 04b8ab2..240c1bf 100644 --- a/include/asm-x86/segment_64.h +++ b/include/asm-x86/segment_64.h @@ -50,4 +50,8 @@ #define GDT_SIZE (GDT_ENTRIES * 8) #define TLS_SIZE (GDT_ENTRY_TLS_ENTRIES * 8) +#ifndef CONFIG_PARAVIRT +#define get_kernel_rpl() 0 +#endif + #endif -- 1.4.4.2
Glauber de Oliveira Costa
2007-Oct-31 15:14 UTC
[PATCH 0/7] (Re-)introducing pvops for x86_64 - Real pvops work part
Hey folks, This is the part-of-pvops-implementation-that-is-not-exactly-a-merge. Neat, uh? This is the majority of the work. The first patch in the series does not really belong here. It was already sent to lkml separetedly before, but I'm including it again, for a very simple reason: Try to test the paravirt patches without it, and you'll fail miserably ;-) (and it was not yet included). Other than that, I thank you all in advance for the review. Have fun!
Glauber de Oliveira Costa
2007-Oct-31 15:14 UTC
[PATCH 7/16] native versions for set pagetables
This patch turns the set_p{te,md,ud,gd} functions into their native_ versions. There is no need to patch any caller. Also, it adds pte_update() and pte_update_defer() calls whenever we modify a page table entry. This last part was coded to match i386 as close as possible. Pieces of the header are moved to below the #ifdef CONFIG_PARAVIRT site, as they are users of the newly defined set_* macros. Signed-off-by: Glauber de Oliveira Costa <gcosta@redhat.com> Signed-off-by: Steven Rostedt <rostedt@goodmis.org> Acked-by: Jeremy Fitzhardinge <jeremy@xensource.com> --- include/asm-x86/pgtable_64.h | 192 ++++++++++++++++++++++++++++-------------- 1 files changed, 128 insertions(+), 64 deletions(-) diff --git a/include/asm-x86/pgtable_64.h b/include/asm-x86/pgtable_64.h index 9b0ff47..592d613 100644 --- a/include/asm-x86/pgtable_64.h +++ b/include/asm-x86/pgtable_64.h @@ -57,56 +57,107 @@ extern unsigned long empty_zero_page[PAGE_SIZE/sizeof(unsigned long)]; */ #define PTRS_PER_PTE 512 -#ifndef __ASSEMBLY__ +#ifdef CONFIG_PARAVIRT +#include <asm/paravirt.h> +#else + +#define set_pte native_set_pte +#define set_pte_at(mm, addr, ptep, pteval) set_pte(ptep, pteval) +#define set_pmd native_set_pmd +#define set_pud native_set_pud +#define set_pgd native_set_pgd +#define pte_clear(mm, addr, xp) \ +do { \ + set_pte_at(mm, addr, xp, __pte(0)); \ +} while (0) -#define pte_ERROR(e) \ - printk("%s:%d: bad pte %p(%016lx).\n", __FILE__, __LINE__, &(e), pte_val(e)) -#define pmd_ERROR(e) \ - printk("%s:%d: bad pmd %p(%016lx).\n", __FILE__, __LINE__, &(e), pmd_val(e)) -#define pud_ERROR(e) \ - printk("%s:%d: bad pud %p(%016lx).\n", __FILE__, __LINE__, &(e), pud_val(e)) -#define pgd_ERROR(e) \ - printk("%s:%d: bad pgd %p(%016lx).\n", __FILE__, __LINE__, &(e), pgd_val(e)) +#define pmd_clear(xp) do { set_pmd(xp, __pmd(0)); } while (0) +#define pud_clear native_pud_clear +#define pgd_clear native_pgd_clear +#define pte_update(mm, addr, ptep) do { } while (0) +#define pte_update_defer(mm, addr, ptep) do { } while (0) -#define pgd_none(x) (!pgd_val(x)) -#define pud_none(x) (!pud_val(x)) +#endif -static inline void set_pte(pte_t *dst, pte_t val) +#ifndef __ASSEMBLY__ + +static inline void native_set_pte(pte_t *dst, pte_t val) { - pte_val(*dst) = pte_val(val); + dst->pte = pte_val(val); } -#define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval) -static inline void set_pmd(pmd_t *dst, pmd_t val) +static inline void native_set_pmd(pmd_t *dst, pmd_t val) { - pmd_val(*dst) = pmd_val(val); + dst->pmd = pmd_val(val); } -static inline void set_pud(pud_t *dst, pud_t val) +static inline void native_set_pud(pud_t *dst, pud_t val) { - pud_val(*dst) = pud_val(val); + dst->pud = pud_val(val); } -static inline void pud_clear (pud_t *pud) +static inline void native_set_pgd(pgd_t *dst, pgd_t val) { - set_pud(pud, __pud(0)); + dst->pgd = pgd_val(val); } -static inline void set_pgd(pgd_t *dst, pgd_t val) +static inline void native_pud_clear(pud_t *pud) { - pgd_val(*dst) = pgd_val(val); -} + set_pud(pud, __pud(0)); +} -static inline void pgd_clear (pgd_t * pgd) +static inline void native_pgd_clear(pgd_t *pgd) { set_pgd(pgd, __pgd(0)); } -#define ptep_get_and_clear(mm,addr,xp) __pte(xchg(&(xp)->pte, 0)) +static inline void native_set_pte_at(struct mm_struct *mm, unsigned long addr, + pte_t *ptep, pte_t pteval) +{ + native_set_pte(ptep, pteval); +} + +static inline void native_pte_clear(struct mm_struct *mm, unsigned long addr, + pte_t *ptep) +{ + native_set_pte_at(mm, addr, ptep, __pte(0)); +} + +static inline void native_pmd_clear(pmd_t *pmd) +{ + native_set_pmd(pmd, __pmd(0)); +} + + +#define pte_ERROR(e) \ + printk("%s:%d: bad pte %p(%016llx).\n", \ + __FILE__, __LINE__, &(e), (u64)pte_val(e)) +#define pmd_ERROR(e) \ + printk("%s:%d: bad pmd %p(%016llx).\n", \ + __FILE__, __LINE__, &(e), (u64)pmd_val(e)) +#define pud_ERROR(e) \ + printk("%s:%d: bad pud %p(%016llx).\n", \ + __FILE__, __LINE__, &(e), (u64)pud_val(e)) +#define pgd_ERROR(e) \ + printk("%s:%d: bad pgd %p(%016llx).\n", \ + __FILE__, __LINE__, &(e), (u64)pgd_val(e)) + +#define pgd_none(x) (!pgd_val(x)) +#define pud_none(x) (!pud_val(x)) struct mm_struct; -static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm, unsigned long addr, pte_t *ptep, int full) +static inline pte_t ptep_get_and_clear(struct mm_struct *mm, + unsigned long addr, pte_t *ptep) +{ + pte_t pte = __pte(xchg(&ptep->pte, 0)); + pte_update(mm, addr, ptep); + return pte; +} + +static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm, + unsigned long addr, pte_t *ptep, + int full) { pte_t pte; if (full) { @@ -246,7 +297,6 @@ static inline unsigned long pmd_bad(pmd_t pmd) #define pte_none(x) (!pte_val(x)) #define pte_present(x) (pte_val(x) & (_PAGE_PRESENT | _PAGE_PROTNONE)) -#define pte_clear(mm,addr,xp) do { set_pte_at(mm, addr, xp, __pte(0)); } while (0) #define pages_to_mb(x) ((x) >> (20-PAGE_SHIFT)) /* FIXME: is this right? */ @@ -255,11 +305,11 @@ static inline unsigned long pmd_bad(pmd_t pmd) static inline pte_t pfn_pte(unsigned long page_nr, pgprot_t pgprot) { - pte_t pte; - pte_val(pte) = (page_nr << PAGE_SHIFT); - pte_val(pte) |= pgprot_val(pgprot); - pte_val(pte) &= __supported_pte_mask; - return pte; + unsigned long pte; + pte = (page_nr << PAGE_SHIFT); + pte |= pgprot_val(pgprot); + pte &= __supported_pte_mask; + return __pte(pte); } /* @@ -283,30 +333,6 @@ static inline pte_t pte_mkwrite(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | static inline pte_t pte_mkhuge(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_PSE)); return pte; } static inline pte_t pte_clrhuge(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_PSE)); return pte; } -struct vm_area_struct; - -static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep) -{ - if (!pte_young(*ptep)) - return 0; - return test_and_clear_bit(_PAGE_BIT_ACCESSED, &ptep->pte); -} - -static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep) -{ - clear_bit(_PAGE_BIT_RW, &ptep->pte); -} - -/* - * Macro to mark a page protection value as "uncacheable". - */ -#define pgprot_noncached(prot) (__pgprot(pgprot_val(prot) | _PAGE_PCD | _PAGE_PWT)) - -static inline int pmd_large(pmd_t pte) { - return (pmd_val(pte) & __LARGE_PTE) == __LARGE_PTE; -} - - /* * Conversion functions: convert a page and protection to a page entry, * and a page entry and page directory to the page they refer to. @@ -340,7 +366,6 @@ static inline int pmd_large(pmd_t pte) { pmd_index(address)) #define pmd_none(x) (!pmd_val(x)) #define pmd_present(x) (pmd_val(x) & _PAGE_PRESENT) -#define pmd_clear(xp) do { set_pmd(xp, __pmd(0)); } while (0) #define pfn_pmd(nr,prot) (__pmd(((nr) << PAGE_SHIFT) | pgprot_val(prot))) #define pmd_pfn(x) ((pmd_val(x) & __PHYSICAL_MASK) >> PAGE_SHIFT) @@ -352,15 +377,53 @@ static inline int pmd_large(pmd_t pte) { /* page, protection -> pte */ #define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot)) -#define mk_pte_huge(entry) (pte_val(entry) |= _PAGE_PRESENT | _PAGE_PSE) - + +static inline pte_t __mk_pte_huge(pte_t entry) +{ + unsigned long pte; + pte = pte_val(entry); + pte |= _PAGE_PRESENT | _PAGE_PSE; + return __pte(pte); +} +#define mk_pte_huge(entry) ((entry) = __mk_pte_huge(entry)) + +#include <linux/mm_types.h> +static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, + unsigned long addr, pte_t *ptep) +{ + int ret = 0; + if (!pte_young(*ptep)) + return 0; + ret = test_and_clear_bit(_PAGE_BIT_ACCESSED, &ptep->pte); + pte_update(vma->vm_mm, addr, ptep); + return ret; +} + +static inline void ptep_set_wrprotect(struct mm_struct *mm, + unsigned long addr, pte_t *ptep) +{ + clear_bit(_PAGE_BIT_RW, &ptep->pte); + pte_update(mm, addr, ptep); +} + +/* + * Macro to mark a page protection value as "uncacheable". + */ +#define pgprot_noncached(prot) (__pgprot(pgprot_val(prot) | _PAGE_PCD | _PAGE_PWT)) + +static inline int pmd_large(pmd_t pte) +{ + return (pmd_val(pte) & __LARGE_PTE) == __LARGE_PTE; +} + /* Change flags of a PTE */ -static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) +static inline pte_t pte_modify(pte_t pte_old, pgprot_t newprot) { - pte_val(pte) &= _PAGE_CHG_MASK; - pte_val(pte) |= pgprot_val(newprot); - pte_val(pte) &= __supported_pte_mask; - return pte; + unsigned long pte = pte_val(pte_old); + pte &= _PAGE_CHG_MASK; + pte |= pgprot_val(newprot); + pte &= __supported_pte_mask; + return __pte(pte); } #define pte_index(address) \ @@ -387,6 +450,7 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) int __changed = !pte_same(*(__ptep), __entry); \ if (__changed && __dirty) { \ set_pte(__ptep, __entry); \ + pte_update_defer((__vma)->vm_mm, (__address), (__ptep)); \ flush_tlb_page(__vma, __address); \ } \ __changed; \ -- 1.4.4.2
Export math_state_restore symbol, so it can be used for hypervisors. They are commonly loaded as modules (lguest being an example). Signed-off-by: Glauber de Oliveira Costa <gcosta@redhat.com> Signed-off-by: Steven Rostedt <rostedt@goodmis.org> Acked-by: Jeremy Fitzhardinge <jeremy@xensource.com> --- arch/x86/kernel/traps_64.c | 1 + 1 files changed, 1 insertions(+), 0 deletions(-) diff --git a/arch/x86/kernel/traps_64.c b/arch/x86/kernel/traps_64.c index d0c2bc7..a533ecd 100644 --- a/arch/x86/kernel/traps_64.c +++ b/arch/x86/kernel/traps_64.c @@ -1077,6 +1077,7 @@ asmlinkage void math_state_restore(void) task_thread_info(me)->status |= TS_USEDFPU; me->fpu_counter++; } +EXPORT_SYMBOL_GPL(math_state_restore); void __init trap_init(void) { -- 1.4.4.2
Glauber de Oliveira Costa
2007-Oct-31 15:14 UTC
[PATCH 11/16] turn priviled operation into a macro in head_64.S
under paravirt, read cr2 cannot be issued directly anymore. So wrap it in a macro, defined to the operation itself in case paravirt is off, but to something else if we have paravirt in the game Signed-off-by: Glauber de Oliveira Costa <gcosta@redhat.com> Signed-off-by: Steven Rostedt <rostedt@goodmis.org> Acked-by: Jeremy Fitzhardinge <jeremy@xensource.com> --- arch/x86/kernel/head_64.S | 9 ++++++++- 1 files changed, 8 insertions(+), 1 deletions(-) diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S index b6167fe..c31b1c9 100644 --- a/arch/x86/kernel/head_64.S +++ b/arch/x86/kernel/head_64.S @@ -19,6 +19,13 @@ #include <asm/msr.h> #include <asm/cache.h> +#ifdef CONFIG_PARAVIRT +#include <asm/asm-offsets.h> +#include <asm/paravirt.h> +#else +#define GET_CR2_INTO_RCX movq %cr2, %rcx +#endif + /* we are not able to switch in one step to the final KERNEL ADRESS SPACE * because we need identity-mapped pages. * @@ -267,7 +274,7 @@ ENTRY(early_idt_handler) xorl %eax,%eax movq 8(%rsp),%rsi # get rip movq (%rsp),%rdx - movq %cr2,%rcx + GET_CR2_INTO_RCX leaq early_idt_msg(%rip),%rdi call early_printk cmpl $2,early_recursion_flag(%rip) -- 1.4.4.2
Reasonably Related Threads
- [PATCH 0/7] (Re-)introducing pvops for x86_64 - Real pvops work part
- [PATCH 0/24] paravirt_ops for unified x86 - that's me again!
- [PATCH 0/24] paravirt_ops for unified x86 - that's me again!
- [PATCH 0/25 -v2] paravirt_ops for x86_64, second round
- [PATCH 0/25 -v2] paravirt_ops for x86_64, second round