search for: vmi_no_output

Displaying 20 results from an estimated 22 matches for "vmi_no_output".

2007 Apr 18
0
[RFC, PATCH 13/24] i386 Vmi system header
...e + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + * + * Send feedback to zach@vmware.com + * + */ + + +#ifndef _MACH_SYSTEM_H +#define _MACH_SYSTEM_H + +#include <vmi.h> + +static inline void write_cr0(const u32 val) +{ + vmi_wrap_call( + SetCR0, "mov %0, %%cr0", + VMI_NO_OUTPUT, + 1, VMI_IREG1(val), + VMI_CLOBBER_EXTENDED(ZERO_RETURNS, "memory")); +} + +static inline void write_cr2(const u32 val) +{ + vmi_wrap_call( + SetCR2, "mov %0, %%cr2", + VMI_NO_OUTPUT, + 1, VMI_IREG1(val), + VMI_CLOBBER_EXTENDED(ZERO_RETURNS, "memory")); +} + +st...
2007 Apr 18
0
[RFC, PATCH 13/24] i386 Vmi system header
...e + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + * + * Send feedback to zach@vmware.com + * + */ + + +#ifndef _MACH_SYSTEM_H +#define _MACH_SYSTEM_H + +#include <vmi.h> + +static inline void write_cr0(const u32 val) +{ + vmi_wrap_call( + SetCR0, "mov %0, %%cr0", + VMI_NO_OUTPUT, + 1, VMI_IREG1(val), + VMI_CLOBBER_EXTENDED(ZERO_RETURNS, "memory")); +} + +static inline void write_cr2(const u32 val) +{ + vmi_wrap_call( + SetCR2, "mov %0, %%cr2", + VMI_NO_OUTPUT, + 1, VMI_IREG1(val), + VMI_CLOBBER_EXTENDED(ZERO_RETURNS, "memory")); +} + +st...
2007 Apr 18
1
[RFC, PATCH 16/24] i386 Vmi io header
...s used to + * serialize memory writes by the compiler before an I/O + * instruction. In addition, even input operations may clobber + * hardware mapped memory. + */ + +static inline void vmi_outl(const VMI_UINT32 value, const VMI_UINT16 port) +{ + vmi_wrap_call( + OUT, "out %0, %w1", + VMI_NO_OUTPUT, + 2, XCONC("a"(value), "d"(port)), + VMI_CLOBBER_EXTENDED(ZERO_RETURNS, "memory")); +} + +static inline void vmi_outb(const VMI_UINT8 value, const VMI_UINT16 port) +{ + vmi_wrap_call( + OUTB, "outb %b0, %w1", + VMI_NO_OUTPUT, + 2, XCONC("a"(va...
2007 Apr 18
1
[RFC, PATCH 16/24] i386 Vmi io header
...s used to + * serialize memory writes by the compiler before an I/O + * instruction. In addition, even input operations may clobber + * hardware mapped memory. + */ + +static inline void vmi_outl(const VMI_UINT32 value, const VMI_UINT16 port) +{ + vmi_wrap_call( + OUT, "out %0, %w1", + VMI_NO_OUTPUT, + 2, XCONC("a"(value), "d"(port)), + VMI_CLOBBER_EXTENDED(ZERO_RETURNS, "memory")); +} + +static inline void vmi_outb(const VMI_UINT8 value, const VMI_UINT16 port) +{ + vmi_wrap_call( + OUTB, "outb %b0, %w1", + VMI_NO_OUTPUT, + 2, XCONC("a"(va...
2007 Apr 18
3
[RFC, PATCH 10/24] i386 Vmi descriptor changes
...Send feedback to zach@vmware.com + * + */ + +#ifndef __MACH_DESC_H +#define __MACH_DESC_H + +#include <vmi.h> + +#if !defined(CONFIG_X86_VMI) +# error invalid sub-arch include +#endif + +static inline void load_gdt(VMI_DTR *const dtr) +{ + vmi_wrap_call( + SetGDT, "lgdt (%0)", + VMI_NO_OUTPUT, + 1, VMI_IREG1 (dtr), + VMI_CLOBBER_EXTENDED(ZERO_RETURNS, "memory")); +} + +static inline void load_idt(VMI_DTR *const dtr) +{ + vmi_wrap_call( + SetIDT, "lidt (%0)", + VMI_NO_OUTPUT, + 1, VMI_IREG1 (dtr), + VMI_CLOBBER_EXTENDED(ZERO_RETURNS, "memory")); +} + +...
2007 Apr 18
3
[RFC, PATCH 10/24] i386 Vmi descriptor changes
...Send feedback to zach@vmware.com + * + */ + +#ifndef __MACH_DESC_H +#define __MACH_DESC_H + +#include <vmi.h> + +#if !defined(CONFIG_X86_VMI) +# error invalid sub-arch include +#endif + +static inline void load_gdt(VMI_DTR *const dtr) +{ + vmi_wrap_call( + SetGDT, "lgdt (%0)", + VMI_NO_OUTPUT, + 1, VMI_IREG1 (dtr), + VMI_CLOBBER_EXTENDED(ZERO_RETURNS, "memory")); +} + +static inline void load_idt(VMI_DTR *const dtr) +{ + vmi_wrap_call( + SetIDT, "lidt (%0)", + VMI_NO_OUTPUT, + 1, VMI_IREG1 (dtr), + VMI_CLOBBER_EXTENDED(ZERO_RETURNS, "memory")); +} + +...
2007 Apr 18
0
[RFC, PATCH 18/24] i386 Vmi tlbflush header
...c., 675 Mass Ave, Cambridge, MA 02139, USA. + * + * Send feedback to zach@vmware.com + * + */ + + +#ifndef _MACH_TLBFLUSH_H +#define _MACH_TLBFLUSH_H + +#include <vmi.h> + +static inline void __flush_tlb(void) +{ + vmi_wrap_call( + FlushTLB, "mov %%cr3, %%eax; mov %%eax, %%cr3", + VMI_NO_OUTPUT, + 1, VMI_IREG1(VMI_FLUSH_TLB), + VMI_CLOBBER_EXTENDED(ZERO_RETURNS, "eax", "memory")); +} + +static inline void __flush_tlb_global(void) +{ + vmi_wrap_call( + FlushTLB, "mov %%cr4, %%eax; \n" + "andb $0x7f, %%al; \n" + "mov %%eax, %%cr4; \n&q...
2007 Apr 18
0
[RFC, PATCH 18/24] i386 Vmi tlbflush header
...c., 675 Mass Ave, Cambridge, MA 02139, USA. + * + * Send feedback to zach@vmware.com + * + */ + + +#ifndef _MACH_TLBFLUSH_H +#define _MACH_TLBFLUSH_H + +#include <vmi.h> + +static inline void __flush_tlb(void) +{ + vmi_wrap_call( + FlushTLB, "mov %%cr3, %%eax; mov %%eax, %%cr3", + VMI_NO_OUTPUT, + 1, VMI_IREG1(VMI_FLUSH_TLB), + VMI_CLOBBER_EXTENDED(ZERO_RETURNS, "eax", "memory")); +} + +static inline void __flush_tlb_global(void) +{ + vmi_wrap_call( + FlushTLB, "mov %%cr4, %%eax; \n" + "andb $0x7f, %%al; \n" + "mov %%eax, %%cr4; \n&q...
2007 Apr 18
1
[RFC, PATCH 12/24] i386 Vmi processor header
...igned int eax, ebx, ecx, edx; + + vmi_cpuid(op, &eax, &ebx, &ecx, &edx); + return edx; +} + +#define flush_deferred_cpu_state() vmi_flush_deferred_calls(0) + +static inline void arch_update_kernel_stack(void *tss, u32 stack) +{ + vmi_wrap_call( + UpdateKernelStack, "", + VMI_NO_OUTPUT, + 2, XCONC(VMI_IREG1(tss), VMI_IREG2(stack)), + VMI_CLOBBER(ZERO_RETURNS)); +} + +static inline void set_debugreg(const u32 val, const int num) +{ + vmi_wrap_call( + SetDR, "movl %1, %%db%c2", + VMI_NO_OUTPUT, + 2, XCONC(VMI_IREG1(num), VMI_IREG2(val), VMI_IMM (num)), + VMI_CLOBBER...
2007 Apr 18
1
[RFC, PATCH 12/24] i386 Vmi processor header
...igned int eax, ebx, ecx, edx; + + vmi_cpuid(op, &eax, &ebx, &ecx, &edx); + return edx; +} + +#define flush_deferred_cpu_state() vmi_flush_deferred_calls(0) + +static inline void arch_update_kernel_stack(void *tss, u32 stack) +{ + vmi_wrap_call( + UpdateKernelStack, "", + VMI_NO_OUTPUT, + 2, XCONC(VMI_IREG1(tss), VMI_IREG2(stack)), + VMI_CLOBBER(ZERO_RETURNS)); +} + +static inline void set_debugreg(const u32 val, const int num) +{ + vmi_wrap_call( + SetDR, "movl %1, %%db%c2", + VMI_NO_OUTPUT, + 2, XCONC(VMI_IREG1(num), VMI_IREG2(val), VMI_IMM (num)), + VMI_CLOBBER...
2007 Apr 18
0
[RFC, PATCH 15/24] i386 Vmi apic header
...ifndef __ASM_MACH_APICOPS_H +#define __ASM_MACH_APICOPS_H + +#ifdef CONFIG_X86_LOCAL_APIC +#include <vmi.h> + +static inline void apic_write(unsigned long reg, unsigned long value) +{ + void *addr = (void *)(APIC_BASE + reg); + vmi_wrap_call( + APICWrite, "movl %1, (%0)", + VMI_NO_OUTPUT, + 2, XCONC(VMI_IREG1(addr), VMI_IREG2(value)), + VMI_CLOBBER_EXTENDED(ZERO_RETURNS, "memory")); +} + +#define apic_write_atomic(r,v) apic_write(r,v) + +static inline unsigned long apic_read(unsigned long reg) +{ + unsigned long value; + void *addr = (void *)(APIC_BASE + reg); +...
2007 Apr 18
0
[RFC, PATCH 15/24] i386 Vmi apic header
...ifndef __ASM_MACH_APICOPS_H +#define __ASM_MACH_APICOPS_H + +#ifdef CONFIG_X86_LOCAL_APIC +#include <vmi.h> + +static inline void apic_write(unsigned long reg, unsigned long value) +{ + void *addr = (void *)(APIC_BASE + reg); + vmi_wrap_call( + APICWrite, "movl %1, (%0)", + VMI_NO_OUTPUT, + 2, XCONC(VMI_IREG1(addr), VMI_IREG2(value)), + VMI_CLOBBER_EXTENDED(ZERO_RETURNS, "memory")); +} + +#define apic_write_atomic(r,v) apic_write(r,v) + +static inline unsigned long apic_read(unsigned long reg) +{ + unsigned long value; + void *addr = (void *)(APIC_BASE + reg); +...
2007 Apr 18
2
[RFC, PATCH 17/24] i386 Vmi msr patch
...ETURNS)); + return ret; +} + +#define rdmsr(msr,val1,val2) \ +do { \ + u64 _val = vmi_rdmsr(msr); \ + val1 = (u32)_val; \ + val2 = (u32)(_val >> 32); \ +} while (0) + +static inline void wrmsr(const u32 msr, const u32 valLo, const u32 valHi) +{ + vmi_wrap_call( + WRMSR, "wrmsr", + VMI_NO_OUTPUT, + 3, XCONC("a"(valLo), "d"(valHi), "c"(msr)), + VMI_CLOBBER_EXTENDED(ZERO_RETURNS, "memory")); +} + +static inline u64 vmi_rdtsc(void) +{ + u64 ret; + vmi_wrap_call( + RDTSC, "rdtsc", + VMI_OREG64 (ret), + 0, VMI_NO_INPUT, + VMI_CLOBBER(TWO_R...
2007 Apr 18
2
[RFC, PATCH 17/24] i386 Vmi msr patch
...ETURNS)); + return ret; +} + +#define rdmsr(msr,val1,val2) \ +do { \ + u64 _val = vmi_rdmsr(msr); \ + val1 = (u32)_val; \ + val2 = (u32)(_val >> 32); \ +} while (0) + +static inline void wrmsr(const u32 msr, const u32 valLo, const u32 valHi) +{ + vmi_wrap_call( + WRMSR, "wrmsr", + VMI_NO_OUTPUT, + 3, XCONC("a"(valLo), "d"(valHi), "c"(msr)), + VMI_CLOBBER_EXTENDED(ZERO_RETURNS, "memory")); +} + +static inline u64 vmi_rdtsc(void) +{ + u64 ret; + vmi_wrap_call( + RDTSC, "rdtsc", + VMI_OREG64 (ret), + 0, VMI_NO_INPUT, + VMI_CLOBBER(TWO_R...
2007 Apr 18
0
[RFC, PATCH 23/24] i386 Vmi timer patch
...t; + vmi_wrap_call( + WallclockUpdated, "xor %%eax, %%eax;", + VMI_OREG1 (ret), + 0, VMI_NO_INPUT, + VMI_CLOBBER(ONE_RETURN)); + return ret; +} + +static inline void vmi_set_alarm(VMI_UINT32 flags, VMI_CYCLES expiry, VMI_CYCLES period) +{ + vmi_wrap_call( + SetAlarm, "", + VMI_NO_OUTPUT, + 5, XCONC(VMI_IREG1(flags), + VMI_IREG2((VMI_UINT32)expiry), VMI_IREG3((VMI_UINT32)(expiry >> 32)), + VMI_IREG4((VMI_UINT32)period), VMI_IREG5((VMI_UINT32)(period >> 32))), + VMI_CLOBBER(ZERO_RETURNS)); +} + +static inline void vmi_cancel_alarm(VMI_UINT32 flags) +{ + vmi_wrap_...
2007 Apr 18
0
[RFC, PATCH 23/24] i386 Vmi timer patch
...t; + vmi_wrap_call( + WallclockUpdated, "xor %%eax, %%eax;", + VMI_OREG1 (ret), + 0, VMI_NO_INPUT, + VMI_CLOBBER(ONE_RETURN)); + return ret; +} + +static inline void vmi_set_alarm(VMI_UINT32 flags, VMI_CYCLES expiry, VMI_CYCLES period) +{ + vmi_wrap_call( + SetAlarm, "", + VMI_NO_OUTPUT, + 5, XCONC(VMI_IREG1(flags), + VMI_IREG2((VMI_UINT32)expiry), VMI_IREG3((VMI_UINT32)(expiry >> 32)), + VMI_IREG4((VMI_UINT32)period), VMI_IREG5((VMI_UINT32)(period >> 32))), + VMI_CLOBBER(ZERO_RETURNS)); +} + +static inline void vmi_cancel_alarm(VMI_UINT32 flags) +{ + vmi_wrap_...
2007 Apr 18
3
[RFC, PATCH 4/24] i386 Vmi inline implementation
..."\n\t" \ + XCSTR(vmi_padded_finish) "\n\t" \ + \ + XCSTR(vmi_annotate(VMI_CALL_##call)) "\n\t" \ + \ + :: input ); \ + asm volatile ( "" : output :: clobber ); \ +} while (0) + +#define VMI_NO_INPUT +#define VMI_NO_OUTPUT + +struct vmi_annotation { + unsigned long vmi_call; + unsigned char *nativeEIP; + unsigned char *translationEIP; + unsigned char native_size; + unsigned char translation_size; + char nop_size; + unsigned char pad; +}; + +extern VMI_UINT8 hypervisor_found; +extern VMI_UINT8 hypervisor_timer_found...
2007 Apr 18
3
[RFC, PATCH 4/24] i386 Vmi inline implementation
..."\n\t" \ + XCSTR(vmi_padded_finish) "\n\t" \ + \ + XCSTR(vmi_annotate(VMI_CALL_##call)) "\n\t" \ + \ + :: input ); \ + asm volatile ( "" : output :: clobber ); \ +} while (0) + +#define VMI_NO_INPUT +#define VMI_NO_OUTPUT + +struct vmi_annotation { + unsigned long vmi_call; + unsigned char *nativeEIP; + unsigned char *translationEIP; + unsigned char native_size; + unsigned char translation_size; + char nop_size; + unsigned char pad; +}; + +extern VMI_UINT8 hypervisor_found; +extern VMI_UINT8 hypervisor_timer_found...
2007 Apr 18
1
[RFC, PATCH 19/24] i386 Vmi mmu changes
...vmi_allocate_page(pfn, VMI_PAGE_PDP, root, base, count) +#define mach_release_pgd(pfn) vmi_release_page(pfn, VMI_PAGE_PDP) +#endif + +static inline void vmi_set_linear_mapping(const int slot, const u32 va, const u32 pages, const u32 ppn) +{ + vmi_wrap_call( + SetLinearMapping, "", + VMI_NO_OUTPUT, + 4, XCONC(VMI_IREG1(slot), VMI_IREG2(va), VMI_IREG3(pages), VMI_IREG4(ppn)), + VMI_CLOBBER(ZERO_RETURNS)); +} + +#define mach_map_linear_pt(num, ptep, pfn) \ + vmi_set_linear_mapping(num+1, (uint32_t)ptep, 1, pfn) +#define mach_map_linear_range(start, pages, pfn) \ + vmi_set_linear_mapping(0, s...
2007 Apr 18
1
[RFC, PATCH 19/24] i386 Vmi mmu changes
...vmi_allocate_page(pfn, VMI_PAGE_PDP, root, base, count) +#define mach_release_pgd(pfn) vmi_release_page(pfn, VMI_PAGE_PDP) +#endif + +static inline void vmi_set_linear_mapping(const int slot, const u32 va, const u32 pages, const u32 ppn) +{ + vmi_wrap_call( + SetLinearMapping, "", + VMI_NO_OUTPUT, + 4, XCONC(VMI_IREG1(slot), VMI_IREG2(va), VMI_IREG3(pages), VMI_IREG4(ppn)), + VMI_CLOBBER(ZERO_RETURNS)); +} + +#define mach_map_linear_pt(num, ptep, pfn) \ + vmi_set_linear_mapping(num+1, (uint32_t)ptep, 1, pfn) +#define mach_map_linear_range(start, pages, pfn) \ + vmi_set_linear_mapping(0, s...