search for: vmi_ireg1

Displaying 18 results from an estimated 18 matches for "vmi_ireg1".

2007 Apr 18
3
[RFC, PATCH 10/24] i386 Vmi descriptor changes
...h@vmware.com + * + */ + +#ifndef __MACH_DESC_H +#define __MACH_DESC_H + +#include <vmi.h> + +#if !defined(CONFIG_X86_VMI) +# error invalid sub-arch include +#endif + +static inline void load_gdt(VMI_DTR *const dtr) +{ + vmi_wrap_call( + SetGDT, "lgdt (%0)", + VMI_NO_OUTPUT, + 1, VMI_IREG1 (dtr), + VMI_CLOBBER_EXTENDED(ZERO_RETURNS, "memory")); +} + +static inline void load_idt(VMI_DTR *const dtr) +{ + vmi_wrap_call( + SetIDT, "lidt (%0)", + VMI_NO_OUTPUT, + 1, VMI_IREG1 (dtr), + VMI_CLOBBER_EXTENDED(ZERO_RETURNS, "memory")); +} + +static inline voi...
2007 Apr 18
3
[RFC, PATCH 10/24] i386 Vmi descriptor changes
...h@vmware.com + * + */ + +#ifndef __MACH_DESC_H +#define __MACH_DESC_H + +#include <vmi.h> + +#if !defined(CONFIG_X86_VMI) +# error invalid sub-arch include +#endif + +static inline void load_gdt(VMI_DTR *const dtr) +{ + vmi_wrap_call( + SetGDT, "lgdt (%0)", + VMI_NO_OUTPUT, + 1, VMI_IREG1 (dtr), + VMI_CLOBBER_EXTENDED(ZERO_RETURNS, "memory")); +} + +static inline void load_idt(VMI_DTR *const dtr) +{ + vmi_wrap_call( + SetIDT, "lidt (%0)", + VMI_NO_OUTPUT, + 1, VMI_IREG1 (dtr), + VMI_CLOBBER_EXTENDED(ZERO_RETURNS, "memory")); +} + +static inline voi...
2007 Apr 18
0
[RFC, PATCH 13/24] i386 Vmi system header
...., 675 Mass Ave, Cambridge, MA 02139, USA. + * + * Send feedback to zach@vmware.com + * + */ + + +#ifndef _MACH_SYSTEM_H +#define _MACH_SYSTEM_H + +#include <vmi.h> + +static inline void write_cr0(const u32 val) +{ + vmi_wrap_call( + SetCR0, "mov %0, %%cr0", + VMI_NO_OUTPUT, + 1, VMI_IREG1(val), + VMI_CLOBBER_EXTENDED(ZERO_RETURNS, "memory")); +} + +static inline void write_cr2(const u32 val) +{ + vmi_wrap_call( + SetCR2, "mov %0, %%cr2", + VMI_NO_OUTPUT, + 1, VMI_IREG1(val), + VMI_CLOBBER_EXTENDED(ZERO_RETURNS, "memory")); +} + +static inline void...
2007 Apr 18
0
[RFC, PATCH 13/24] i386 Vmi system header
...., 675 Mass Ave, Cambridge, MA 02139, USA. + * + * Send feedback to zach@vmware.com + * + */ + + +#ifndef _MACH_SYSTEM_H +#define _MACH_SYSTEM_H + +#include <vmi.h> + +static inline void write_cr0(const u32 val) +{ + vmi_wrap_call( + SetCR0, "mov %0, %%cr0", + VMI_NO_OUTPUT, + 1, VMI_IREG1(val), + VMI_CLOBBER_EXTENDED(ZERO_RETURNS, "memory")); +} + +static inline void write_cr2(const u32 val) +{ + vmi_wrap_call( + SetCR2, "mov %0, %%cr2", + VMI_NO_OUTPUT, + 1, VMI_IREG1(val), + VMI_CLOBBER_EXTENDED(ZERO_RETURNS, "memory")); +} + +static inline void...
2007 Apr 18
0
[RFC, PATCH 18/24] i386 Vmi tlbflush header
...bridge, MA 02139, USA. + * + * Send feedback to zach@vmware.com + * + */ + + +#ifndef _MACH_TLBFLUSH_H +#define _MACH_TLBFLUSH_H + +#include <vmi.h> + +static inline void __flush_tlb(void) +{ + vmi_wrap_call( + FlushTLB, "mov %%cr3, %%eax; mov %%eax, %%cr3", + VMI_NO_OUTPUT, + 1, VMI_IREG1(VMI_FLUSH_TLB), + VMI_CLOBBER_EXTENDED(ZERO_RETURNS, "eax", "memory")); +} + +static inline void __flush_tlb_global(void) +{ + vmi_wrap_call( + FlushTLB, "mov %%cr4, %%eax; \n" + "andb $0x7f, %%al; \n" + "mov %%eax, %%cr4; \n" + "...
2007 Apr 18
0
[RFC, PATCH 18/24] i386 Vmi tlbflush header
...bridge, MA 02139, USA. + * + * Send feedback to zach@vmware.com + * + */ + + +#ifndef _MACH_TLBFLUSH_H +#define _MACH_TLBFLUSH_H + +#include <vmi.h> + +static inline void __flush_tlb(void) +{ + vmi_wrap_call( + FlushTLB, "mov %%cr3, %%eax; mov %%eax, %%cr3", + VMI_NO_OUTPUT, + 1, VMI_IREG1(VMI_FLUSH_TLB), + VMI_CLOBBER_EXTENDED(ZERO_RETURNS, "eax", "memory")); +} + +static inline void __flush_tlb_global(void) +{ + vmi_wrap_call( + FlushTLB, "mov %%cr4, %%eax; \n" + "andb $0x7f, %%al; \n" + "mov %%eax, %%cr4; \n" + "...
2007 Apr 18
1
[RFC, PATCH 12/24] i386 Vmi processor header
...x; + + vmi_cpuid(op, &eax, &ebx, &ecx, &edx); + return edx; +} + +#define flush_deferred_cpu_state() vmi_flush_deferred_calls(0) + +static inline void arch_update_kernel_stack(void *tss, u32 stack) +{ + vmi_wrap_call( + UpdateKernelStack, "", + VMI_NO_OUTPUT, + 2, XCONC(VMI_IREG1(tss), VMI_IREG2(stack)), + VMI_CLOBBER(ZERO_RETURNS)); +} + +static inline void set_debugreg(const u32 val, const int num) +{ + vmi_wrap_call( + SetDR, "movl %1, %%db%c2", + VMI_NO_OUTPUT, + 2, XCONC(VMI_IREG1(num), VMI_IREG2(val), VMI_IMM (num)), + VMI_CLOBBER(ZERO_RETURNS)); +} + +...
2007 Apr 18
1
[RFC, PATCH 12/24] i386 Vmi processor header
...x; + + vmi_cpuid(op, &eax, &ebx, &ecx, &edx); + return edx; +} + +#define flush_deferred_cpu_state() vmi_flush_deferred_calls(0) + +static inline void arch_update_kernel_stack(void *tss, u32 stack) +{ + vmi_wrap_call( + UpdateKernelStack, "", + VMI_NO_OUTPUT, + 2, XCONC(VMI_IREG1(tss), VMI_IREG2(stack)), + VMI_CLOBBER(ZERO_RETURNS)); +} + +static inline void set_debugreg(const u32 val, const int num) +{ + vmi_wrap_call( + SetDR, "movl %1, %%db%c2", + VMI_NO_OUTPUT, + 2, XCONC(VMI_IREG1(num), VMI_IREG2(val), VMI_IMM (num)), + VMI_CLOBBER(ZERO_RETURNS)); +} + +...
2007 Apr 18
0
[RFC, PATCH 15/24] i386 Vmi apic header
...+#define __ASM_MACH_APICOPS_H + +#ifdef CONFIG_X86_LOCAL_APIC +#include <vmi.h> + +static inline void apic_write(unsigned long reg, unsigned long value) +{ + void *addr = (void *)(APIC_BASE + reg); + vmi_wrap_call( + APICWrite, "movl %1, (%0)", + VMI_NO_OUTPUT, + 2, XCONC(VMI_IREG1(addr), VMI_IREG2(value)), + VMI_CLOBBER_EXTENDED(ZERO_RETURNS, "memory")); +} + +#define apic_write_atomic(r,v) apic_write(r,v) + +static inline unsigned long apic_read(unsigned long reg) +{ + unsigned long value; + void *addr = (void *)(APIC_BASE + reg); + vmi_wrap_call( + APICR...
2007 Apr 18
0
[RFC, PATCH 15/24] i386 Vmi apic header
...+#define __ASM_MACH_APICOPS_H + +#ifdef CONFIG_X86_LOCAL_APIC +#include <vmi.h> + +static inline void apic_write(unsigned long reg, unsigned long value) +{ + void *addr = (void *)(APIC_BASE + reg); + vmi_wrap_call( + APICWrite, "movl %1, (%0)", + VMI_NO_OUTPUT, + 2, XCONC(VMI_IREG1(addr), VMI_IREG2(value)), + VMI_CLOBBER_EXTENDED(ZERO_RETURNS, "memory")); +} + +#define apic_write_atomic(r,v) apic_write(r,v) + +static inline unsigned long apic_read(unsigned long reg) +{ + unsigned long value; + void *addr = (void *)(APIC_BASE + reg); + vmi_wrap_call( + APICR...
2007 Apr 18
0
[RFC, PATCH 23/24] i386 Vmi timer patch
..., %%eax;" + "xor %%edx, %%edx;", + VMI_OREG64 (ret), + 0, VMI_NO_INPUT, + VMI_CLOBBER(TWO_RETURNS)); + return ret; +} + +static inline VMI_CYCLES vmi_get_real_cycles(void) +{ + VMI_CYCLES ret; + vmi_wrap_call( + GetCycleCounter, "rdtsc", + VMI_OREG64 (ret), + 1, VMI_IREG1(VMI_CYCLES_REAL), + VMI_CLOBBER(TWO_RETURNS)); + return ret; +} + +static inline VMI_CYCLES vmi_get_available_cycles(void) +{ + VMI_CYCLES ret; + vmi_wrap_call( + GetCycleCounter, "rdtsc", + VMI_OREG64 (ret), + 1, VMI_IREG1(VMI_CYCLES_AVAILABLE), + VMI_CLOBBER(TWO_RETURNS)); + return...
2007 Apr 18
0
[RFC, PATCH 23/24] i386 Vmi timer patch
..., %%eax;" + "xor %%edx, %%edx;", + VMI_OREG64 (ret), + 0, VMI_NO_INPUT, + VMI_CLOBBER(TWO_RETURNS)); + return ret; +} + +static inline VMI_CYCLES vmi_get_real_cycles(void) +{ + VMI_CYCLES ret; + vmi_wrap_call( + GetCycleCounter, "rdtsc", + VMI_OREG64 (ret), + 1, VMI_IREG1(VMI_CYCLES_REAL), + VMI_CLOBBER(TWO_RETURNS)); + return ret; +} + +static inline VMI_CYCLES vmi_get_available_cycles(void) +{ + VMI_CYCLES ret; + vmi_wrap_call( + GetCycleCounter, "rdtsc", + VMI_OREG64 (ret), + 1, VMI_IREG1(VMI_CYCLES_AVAILABLE), + VMI_CLOBBER(TWO_RETURNS)); + return...
2007 Apr 18
3
[RFC, PATCH 4/24] i386 Vmi inline implementation
...define VMI_CLOBBER_FOUR_RETURNS "cc" +#else +#error "No VMI calling convention defined" +#endif + +#define VMI_CLOBBER(saved) XCONC(VMI_CLOBBER_##saved) +#define VMI_CLOBBER_EXTENDED(saved, extras...) XCONC(VMI_CLOBBER_##saved, extras) + +#if defined(CONFIG_VMI_REGPARM) +#define VMI_IREG1 "a" +#define VMI_IREG2 "d" +#define VMI_IREG3 "c" +#else +#define VMI_IREG1 "ir" +#define VMI_IREG2 "ir" +#define VMI_IREG3 "ir" +#endif +#define VMI_IREG4 "ir" +#define VMI_IREG5 "ir" + +#if (__GNUC__ == 4) +#define V...
2007 Apr 18
3
[RFC, PATCH 4/24] i386 Vmi inline implementation
...define VMI_CLOBBER_FOUR_RETURNS "cc" +#else +#error "No VMI calling convention defined" +#endif + +#define VMI_CLOBBER(saved) XCONC(VMI_CLOBBER_##saved) +#define VMI_CLOBBER_EXTENDED(saved, extras...) XCONC(VMI_CLOBBER_##saved, extras) + +#if defined(CONFIG_VMI_REGPARM) +#define VMI_IREG1 "a" +#define VMI_IREG2 "d" +#define VMI_IREG3 "c" +#else +#define VMI_IREG1 "ir" +#define VMI_IREG2 "ir" +#define VMI_IREG3 "ir" +#endif +#define VMI_IREG4 "ir" +#define VMI_IREG5 "ir" + +#if (__GNUC__ == 4) +#define V...
2007 Apr 18
1
[RFC, PATCH 19/24] i386 Vmi mmu changes
...VMI_PAGE_PDP, root, base, count) +#define mach_release_pgd(pfn) vmi_release_page(pfn, VMI_PAGE_PDP) +#endif + +static inline void vmi_set_linear_mapping(const int slot, const u32 va, const u32 pages, const u32 ppn) +{ + vmi_wrap_call( + SetLinearMapping, "", + VMI_NO_OUTPUT, + 4, XCONC(VMI_IREG1(slot), VMI_IREG2(va), VMI_IREG3(pages), VMI_IREG4(ppn)), + VMI_CLOBBER(ZERO_RETURNS)); +} + +#define mach_map_linear_pt(num, ptep, pfn) \ + vmi_set_linear_mapping(num+1, (uint32_t)ptep, 1, pfn) +#define mach_map_linear_range(start, pages, pfn) \ + vmi_set_linear_mapping(0, start, pages, pfn) + +#e...
2007 Apr 18
1
[RFC, PATCH 19/24] i386 Vmi mmu changes
...VMI_PAGE_PDP, root, base, count) +#define mach_release_pgd(pfn) vmi_release_page(pfn, VMI_PAGE_PDP) +#endif + +static inline void vmi_set_linear_mapping(const int slot, const u32 va, const u32 pages, const u32 ppn) +{ + vmi_wrap_call( + SetLinearMapping, "", + VMI_NO_OUTPUT, + 4, XCONC(VMI_IREG1(slot), VMI_IREG2(va), VMI_IREG3(pages), VMI_IREG4(ppn)), + VMI_CLOBBER(ZERO_RETURNS)); +} + +#define mach_map_linear_pt(num, ptep, pfn) \ + vmi_set_linear_mapping(num+1, (uint32_t)ptep, 1, pfn) +#define mach_map_linear_range(start, pages, pfn) \ + vmi_set_linear_mapping(0, start, pages, pfn) + +#e...
2007 Apr 18
7
[RFC, PATCH 5/24] i386 Vmi code patching
The VMI ROM detection and code patching mechanism is illustrated in setup.c. There ROM is a binary block published by the hypervisor, and and there are certainly implications of this. ROMs certainly have a history of being proprietary, very differently licensed pieces of software, and mostly under non-free licenses. Before jumping to the conclusion that this is a bad thing, let us consider more
2007 Apr 18
7
[RFC, PATCH 5/24] i386 Vmi code patching
The VMI ROM detection and code patching mechanism is illustrated in setup.c. There ROM is a binary block published by the hypervisor, and and there are certainly implications of this. ROMs certainly have a history of being proprietary, very differently licensed pieces of software, and mostly under non-free licenses. Before jumping to the conclusion that this is a bad thing, let us consider more