search for: mmu_cr4_featur

Displaying 20 results from an estimated 33 matches for "mmu_cr4_featur".

Did you mean: mmu_cr4_features
2007 Apr 18
0
[RFC/PATCH PV_OPS X86_64 03/17] paravirt_ops - system routines
...cache_leaves; #define X86_CR4_OSXMMEXCPT 0x0400 /* enable unmasked SSE exceptions */ /* - * Save the cr4 feature set we're using (ie - * Pentium 4MB enable and PPro Global page - * enable), so that any CPU's that boot up - * after us can get the correct flags. - */ -extern unsigned long mmu_cr4_features; - -static inline void set_in_cr4 (unsigned long mask) -{ - mmu_cr4_features |= mask; - __asm__("movq %%cr4,%%rax\n\t" - "orq %0,%%rax\n\t" - "movq %%rax,%%cr4\n" - : : "irg" (mask) - :"ax"); -} - -static inline void clear_in_cr4 (unsigned lon...
2007 Apr 18
0
[RFC/PATCH PV_OPS X86_64 03/17] paravirt_ops - system routines
...cache_leaves; #define X86_CR4_OSXMMEXCPT 0x0400 /* enable unmasked SSE exceptions */ /* - * Save the cr4 feature set we're using (ie - * Pentium 4MB enable and PPro Global page - * enable), so that any CPU's that boot up - * after us can get the correct flags. - */ -extern unsigned long mmu_cr4_features; - -static inline void set_in_cr4 (unsigned long mask) -{ - mmu_cr4_features |= mask; - __asm__("movq %%cr4,%%rax\n\t" - "orq %0,%%rax\n\t" - "movq %%rax,%%cr4\n" - : : "irg" (mask) - :"ax"); -} - -static inline void clear_in_cr4 (unsigned lon...
2007 Apr 18
0
[RFC, PATCH 18/24] i386 Vmi tlbflush header
...e__( \ + "movl %1, %%cr4; # turn off PGE \n" \ + "movl %%cr3, %0; \n" \ + "movl %0, %%cr3; # flush TLB \n" \ + "movl %2, %%cr4; # turn PGE back on \n" \ + : "=&r" (tmpreg) \ + : "r" (mmu_cr4_features & ~X86_CR4_PGE), \ + "r" (mmu_cr4_features) \ + : "memory"); \ + } while (0) + +#define __flush_tlb_single(addr) \ + __asm__ __volatile__("invlpg %0": :"m" (*(char *) addr)) + +#endif /* _MACH_TLBFLUSH_H */
2007 Apr 18
0
[RFC, PATCH 18/24] i386 Vmi tlbflush header
...e__( \ + "movl %1, %%cr4; # turn off PGE \n" \ + "movl %%cr3, %0; \n" \ + "movl %0, %%cr3; # flush TLB \n" \ + "movl %2, %%cr4; # turn PGE back on \n" \ + : "=&r" (tmpreg) \ + : "r" (mmu_cr4_features & ~X86_CR4_PGE), \ + "r" (mmu_cr4_features) \ + : "memory"); \ + } while (0) + +#define __flush_tlb_single(addr) \ + __asm__ __volatile__("invlpg %0": :"m" (*(char *) addr)) + +#endif /* _MACH_TLBFLUSH_H */
2007 Apr 18
0
[RFC/PATCH PV_OPS X86_64 08/17] paravirt_ops - memory managment
...=================================================================== --- clean-start.orig/arch/x86_64/mm/init.c +++ clean-start/arch/x86_64/mm/init.c @@ -384,7 +384,7 @@ void __meminit init_memory_mapping(unsig } if (!after_bootmem) - asm volatile("movq %%cr4,%0" : "=r" (mmu_cr4_features)); + mmu_cr4_features = read_cr4(); __flush_tlb_all(); } @@ -398,7 +398,7 @@ void __cpuinit zap_low_mappings(int cpu) * For AP's, zap the low identity mappings by changing the cr3 * to init_level4_pgt and doing local flush tlb all */ - asm volatile("movq %0,%%cr3"...
2007 Apr 18
0
[RFC/PATCH PV_OPS X86_64 08/17] paravirt_ops - memory managment
...=================================================================== --- clean-start.orig/arch/x86_64/mm/init.c +++ clean-start/arch/x86_64/mm/init.c @@ -384,7 +384,7 @@ void __meminit init_memory_mapping(unsig } if (!after_bootmem) - asm volatile("movq %%cr4,%0" : "=r" (mmu_cr4_features)); + mmu_cr4_features = read_cr4(); __flush_tlb_all(); } @@ -398,7 +398,7 @@ void __cpuinit zap_low_mappings(int cpu) * For AP's, zap the low identity mappings by changing the cr3 * to init_level4_pgt and doing local flush tlb all */ - asm volatile("movq %0,%%cr3"...
2013 Sep 23
57
[PATCH RFC v13 00/20] Introduce PVH domU support
This patch series is a reworking of a series developed by Mukesh Rathor at Oracle. The entirety of the design and development was done by him; I have only reworked, reorganized, and simplified things in a way that I think makes more sense. The vast majority of the credit for this effort therefore goes to him. This version is labelled v13 because it is based on his most recent series, v11.
2007 Apr 18
0
[PATCH 9/9] Vmi smp fixes.patch
...rt_esp) { + struct vmi_ap_state ap; + /* Default everything to zero. This is fine for most GPRs. */ memset(&ap, 0, sizeof(struct vmi_ap_state)); @@ -570,7 +571,7 @@ vmi_startup_ipi_hook(int phys_apicid, un /* Protected mode, paging, AM, WP, NE, MP. */ ap.cr0 = 0x80050023; ap.cr4 = mmu_cr4_features; - vmi_ops.set_initial_ap_state(__pa(&ap), phys_apicid); + vmi_ops.set_initial_ap_state((u32)&ap, phys_apicid); } #endif diff -r baf2e278a482 arch/i386/kernel/vmitime.c --- a/arch/i386/kernel/vmitime.c Thu Mar 01 18:08:53 2007 -0800 +++ b/arch/i386/kernel/vmitime.c Thu Mar 01 18:08:53...
2007 Apr 18
0
[PATCH 9/9] Vmi smp fixes.patch
...rt_esp) { + struct vmi_ap_state ap; + /* Default everything to zero. This is fine for most GPRs. */ memset(&ap, 0, sizeof(struct vmi_ap_state)); @@ -570,7 +571,7 @@ vmi_startup_ipi_hook(int phys_apicid, un /* Protected mode, paging, AM, WP, NE, MP. */ ap.cr0 = 0x80050023; ap.cr4 = mmu_cr4_features; - vmi_ops.set_initial_ap_state(__pa(&ap), phys_apicid); + vmi_ops.set_initial_ap_state((u32)&ap, phys_apicid); } #endif diff -r baf2e278a482 arch/i386/kernel/vmitime.c --- a/arch/i386/kernel/vmitime.c Thu Mar 01 18:08:53 2007 -0800 +++ b/arch/i386/kernel/vmitime.c Thu Mar 01 18:08:53...
2007 Aug 15
13
[PATCH 0/25][V3] pvops_64 last round (hopefully)
This is hopefully the last iteration of the pvops64 patch. >From the last version, we have only one change, which is include/asm-x86_64/processor.h: There were still one survivor in raw asm. Also, git screwed me up for some reason, and the 25th patch was missing the new files, paravirt.{c,h}. (although I do remember having git-add'ed it, but who knows...) Andrew, could you please push it
2007 Aug 15
13
[PATCH 0/25][V3] pvops_64 last round (hopefully)
This is hopefully the last iteration of the pvops64 patch. >From the last version, we have only one change, which is include/asm-x86_64/processor.h: There were still one survivor in raw asm. Also, git screwed me up for some reason, and the 25th patch was missing the new files, paravirt.{c,h}. (although I do remember having git-add'ed it, but who knows...) Andrew, could you please push it
2007 Jun 27
0
[PATCH 1/10] Provide basic Xen PM infrastructure
...$3, %rdi + xor %eax, %eax + +#else /* !defined(__x86_64__) */ + + push $3 + +#endif + + /* enter sleep state physically */ + call acpi_enter_sleep_state + jmp __ret_point + + .align 16 + .globl __ret_point +__ret_point: + + /* mmu_cr4_features contains latest cr4 setting */ + mov REF(mmu_cr4_features), GREG(ax) + mov GREG(ax), %cr4 + + mov REF(saved_cr3), GREG(ax) + mov GREG(ax), %cr3 + + mov REF(saved_cr0), GREG(ax) + mov GREG(ax), %cr0 + + lgdt REF(saved_gdt)...
2007 Apr 18
2
[RFC, PATCH 9/24] i386 Vmi smp support
...msr(MSR_EFER, l, h); + ap.efer = (unsigned long long) h << 32 | l; + } +#endif + + ap.cr3 = __pa(swapper_pg_dir); + /* Protected mode, paging, AM, WP, NE, MP. */ + ap.cr0 = 0x80050023; + ap.cr4 = mmu_cr4_features; + + vmi_set_initial_ap_state(__pa(&ap), phys_apicid); + } +} + +void __init smpboot_pre_start_secondary_hook(void) +{ + if (vmi_hypervisor_found()) { + *(unsigned long *) trampoline_base = 0xa5a5a5a5; + } +} + +static __init...
2007 Apr 18
2
[RFC, PATCH 9/24] i386 Vmi smp support
...msr(MSR_EFER, l, h); + ap.efer = (unsigned long long) h << 32 | l; + } +#endif + + ap.cr3 = __pa(swapper_pg_dir); + /* Protected mode, paging, AM, WP, NE, MP. */ + ap.cr0 = 0x80050023; + ap.cr4 = mmu_cr4_features; + + vmi_set_initial_ap_state(__pa(&ap), phys_apicid); + } +} + +void __init smpboot_pre_start_secondary_hook(void) +{ + if (vmi_hypervisor_found()) { + *(unsigned long *) trampoline_base = 0xa5a5a5a5; + } +} + +static __init...
2020 Aug 24
0
[PATCH v6 69/76] x86/realmode: Setup AP jump table
...tive()) { + if (sev_es_setup_ap_jump_table(real_mode_header)) + panic("Failed to update SEV-ES AP Jump Table"); + } +#endif +} + static void __init setup_real_mode(void) { u16 real_mode_seg; @@ -104,13 +118,13 @@ static void __init setup_real_mode(void) *trampoline_cr4_features = mmu_cr4_features; trampoline_header->flags = 0; - if (sme_active()) - trampoline_header->flags |= TH_FLAGS_SME_ACTIVE; trampoline_pgd = (u64 *) __va(real_mode_header->trampoline_pgd); trampoline_pgd[0] = trampoline_pgd_entry.pgd; trampoline_pgd[511] = init_top_pgt[511].pgd; #endif + + sme_s...
2013 Aug 26
5
[RFC PATCH 0/2] GLOBAL() macro for asm code.
Hello, This series has been split into two patches, one for arm and one for x86. I figured that this was easier than doing it as a single combined patch, especially as the changes are functionally independent. x86 has been boot tested, but arm has not even been compile tested as I lack a suitable cross compiler. However, the changes are just text replacement, so I dont expect any issues. The
2013 Oct 10
10
[PATCH 0/4] x86: XSA-67 follow-up
1: correct LDT checks 2: add address validity check to guest_map_l1e() 3: use {rd,wr}{fs,gs}base when available 4: check for canonical address before doing page walks Signed-off-by: Jan Beulich <jbeulich@suse.com>
2012 Dec 10
26
[PATCH 00/11] Add virtual EPT support Xen.
From: Zhang Xiantao <xiantao.zhang@intel.com> With virtual EPT support, L1 hyerpvisor can use EPT hardware for L2 guest''s memory virtualization. In this way, L2 guest''s performance can be improved sharply. According to our testing, some benchmarks can show > 5x performance gain. Signed-off-by: Zhang Xiantao <xiantao.zhang@intel.com> Zhang Xiantao (11):
2007 Apr 18
2
[PATCH] x86_64 paravirt_ops port
...====================================================== --- linux-2.6.19-quilt.orig/arch/x86_64/mm/init.c +++ linux-2.6.19-quilt/arch/x86_64/mm/init.c @@ -384,7 +384,7 @@ void __meminit init_memory_mapping(unsig } if (!after_bootmem) - asm volatile("movq %%cr4,%0" : "=r" (mmu_cr4_features)); + mmu_cr4_features = read_cr4(); __flush_tlb_all(); } @@ -398,7 +398,7 @@ void __cpuinit zap_low_mappings(int cpu) * For AP's, zap the low identity mappings by changing the cr3 * to init_level4_pgt and doing local flush tlb all */ - asm volatile("movq %0,%%cr3"...
2007 Apr 18
2
[PATCH] x86_64 paravirt_ops port
...====================================================== --- linux-2.6.19-quilt.orig/arch/x86_64/mm/init.c +++ linux-2.6.19-quilt/arch/x86_64/mm/init.c @@ -384,7 +384,7 @@ void __meminit init_memory_mapping(unsig } if (!after_bootmem) - asm volatile("movq %%cr4,%0" : "=r" (mmu_cr4_features)); + mmu_cr4_features = read_cr4(); __flush_tlb_all(); } @@ -398,7 +398,7 @@ void __cpuinit zap_low_mappings(int cpu) * For AP's, zap the low identity mappings by changing the cr3 * to init_level4_pgt and doing local flush tlb all */ - asm volatile("movq %0,%%cr3"...