Displaying 20 results from an estimated 29 matches for "vmi_get_funct".
2007 Apr 18
0
[PATCH 8/9] Vmi apic ops.diff
Use para_fill instead of directly setting the APIC ops to the result of the
vmi_get_function call - this allows one to implement a VMI ROM without
implementing APIC functions, just using the native APIC functions.
While doing this, I realized that there is a lot more cleanup that should
have been done. Basically, we should never assume that the ROM implements
a specific set of functio...
2007 Apr 18
0
[PATCH 8/9] Vmi apic ops.diff
Use para_fill instead of directly setting the APIC ops to the result of the
vmi_get_function call - this allows one to implement a VMI ROM without
implementing APIC functions, just using the native APIC functions.
While doing this, I realized that there is a lot more cleanup that should
have been done. Basically, we should never assume that the ROM implements
a specific set of functio...
2007 Apr 18
0
[PATCH 4/5] Vmi.patch
...urn 0;
+}
+
+/*
+ * VMI setup common to all processors
+ */
+void vmi_bringup(void)
+{
+ /* We must establish the lowmem mapping for MMU ops to work */
+ vmi_ops.set_linear_mapping(0, __PAGE_OFFSET, max_low_pfn, 0);
+}
+
+/*
+ * Return a pointer to the VMI function or a NOP stub
+ */
+static void *vmi_get_function(int vmicall)
+{
+ u64 reloc;
+ const struct vmi_relocation_info *rel = (struct vmi_relocation_info *)&reloc;
+ reloc = call_vrom_long_func(vmi_rom, get_reloc, vmicall);
+ BUG_ON(rel->type == VMI_RELOCATION_JUMP_REL);
+ if (rel->type == VMI_RELOCATION_CALL_REL)
+ return (void *)rel->...
2007 Apr 18
0
[PATCH 4/5] Vmi.patch
...urn 0;
+}
+
+/*
+ * VMI setup common to all processors
+ */
+void vmi_bringup(void)
+{
+ /* We must establish the lowmem mapping for MMU ops to work */
+ vmi_ops.set_linear_mapping(0, __PAGE_OFFSET, max_low_pfn, 0);
+}
+
+/*
+ * Return a pointer to the VMI function or a NOP stub
+ */
+static void *vmi_get_function(int vmicall)
+{
+ u64 reloc;
+ const struct vmi_relocation_info *rel = (struct vmi_relocation_info *)&reloc;
+ reloc = call_vrom_long_func(vmi_rom, get_reloc, vmicall);
+ BUG_ON(rel->type == VMI_RELOCATION_JUMP_REL);
+ if (rel->type == VMI_RELOCATION_CALL_REL)
+ return (void *)rel->...
2007 Apr 18
0
[PATCH 5/6] VMI backend for paravirt-ops
...urn 0;
+}
+
+/*
+ * VMI setup common to all processors
+ */
+void vmi_bringup(void)
+{
+ /* We must establish the lowmem mapping for MMU ops to work */
+ vmi_ops.set_linear_mapping(0, __PAGE_OFFSET, max_low_pfn, 0);
+}
+
+/*
+ * Return a pointer to the VMI function or a NOP stub
+ */
+static void *vmi_get_function(int vmicall)
+{
+ u64 reloc;
+ const struct vmi_relocation_info *rel = (struct vmi_relocation_info *)&reloc;
+ reloc = call_vrom_long_func(vmi_rom, get_reloc, vmicall);
+ BUG_ON(rel->type == VMI_RELOCATION_JUMP_REL);
+ if (rel->type == VMI_RELOCATION_CALL_REL)
+ return (void *)rel->...
2007 Apr 18
0
[PATCH 5/6] VMI backend for paravirt-ops
...urn 0;
+}
+
+/*
+ * VMI setup common to all processors
+ */
+void vmi_bringup(void)
+{
+ /* We must establish the lowmem mapping for MMU ops to work */
+ vmi_ops.set_linear_mapping(0, __PAGE_OFFSET, max_low_pfn, 0);
+}
+
+/*
+ * Return a pointer to the VMI function or a NOP stub
+ */
+static void *vmi_get_function(int vmicall)
+{
+ u64 reloc;
+ const struct vmi_relocation_info *rel = (struct vmi_relocation_info *)&reloc;
+ reloc = call_vrom_long_func(vmi_rom, get_reloc, vmicall);
+ BUG_ON(rel->type == VMI_RELOCATION_JUMP_REL);
+ if (rel->type == VMI_RELOCATION_CALL_REL)
+ return (void *)rel->...
2007 Apr 18
0
[PATCH 6/6] VMI timer patches
...G_X86_PAE
@@ -726,7 +741,12 @@ static inline int __init activate_vmi(vo
(char *)paravirt_ops.save_fl);
patch_offset(&irq_save_disable_callout[IRQ_PATCH_DISABLE],
(char *)paravirt_ops.irq_disable);
+#ifndef CONFIG_NO_IDLE_HZ
para_fill(safe_halt, Halt);
+#else
+ vmi_ops.halt = vmi_get_function(VMI_CALL_Halt);
+ paravirt_ops.safe_halt = vmi_safe_halt;
+#endif
para_fill(wbinvd, WBINVD);
/* paravirt_ops.read_msr = vmi_rdmsr */
/* paravirt_ops.write_msr = vmi_wrmsr */
@@ -835,6 +855,31 @@ static inline int __init activate_vmi(vo
paravirt_ops.apic_write = vmi_get_function(VMI_CALL_...
2007 Apr 18
0
[PATCH 6/6] VMI timer patches
...G_X86_PAE
@@ -726,7 +741,12 @@ static inline int __init activate_vmi(vo
(char *)paravirt_ops.save_fl);
patch_offset(&irq_save_disable_callout[IRQ_PATCH_DISABLE],
(char *)paravirt_ops.irq_disable);
+#ifndef CONFIG_NO_IDLE_HZ
para_fill(safe_halt, Halt);
+#else
+ vmi_ops.halt = vmi_get_function(VMI_CALL_Halt);
+ paravirt_ops.safe_halt = vmi_safe_halt;
+#endif
para_fill(wbinvd, WBINVD);
/* paravirt_ops.read_msr = vmi_rdmsr */
/* paravirt_ops.write_msr = vmi_wrmsr */
@@ -835,6 +855,31 @@ static inline int __init activate_vmi(vo
paravirt_ops.apic_write = vmi_get_function(VMI_CALL_...
2007 Apr 18
0
[PATCH 5/5] Vmi timer.patch
...G_X86_PAE
@@ -726,7 +741,12 @@ static inline int __init activate_vmi(vo
(char *)paravirt_ops.save_fl);
patch_offset(&irq_save_disable_callout[IRQ_PATCH_DISABLE],
(char *)paravirt_ops.irq_disable);
+#ifndef CONFIG_NO_IDLE_HZ
para_fill(safe_halt, Halt);
+#else
+ vmi_ops.halt = vmi_get_function(VMI_CALL_Halt);
+ paravirt_ops.safe_halt = vmi_safe_halt;
+#endif
para_fill(wbinvd, WBINVD);
/* paravirt_ops.read_msr = vmi_rdmsr */
/* paravirt_ops.write_msr = vmi_wrmsr */
@@ -835,6 +855,31 @@ static inline int __init activate_vmi(vo
paravirt_ops.apic_write = vmi_get_function(VMI_CALL_...
2007 Apr 18
0
[PATCH 5/5] Vmi timer.patch
...G_X86_PAE
@@ -726,7 +741,12 @@ static inline int __init activate_vmi(vo
(char *)paravirt_ops.save_fl);
patch_offset(&irq_save_disable_callout[IRQ_PATCH_DISABLE],
(char *)paravirt_ops.irq_disable);
+#ifndef CONFIG_NO_IDLE_HZ
para_fill(safe_halt, Halt);
+#else
+ vmi_ops.halt = vmi_get_function(VMI_CALL_Halt);
+ paravirt_ops.safe_halt = vmi_safe_halt;
+#endif
para_fill(wbinvd, WBINVD);
/* paravirt_ops.read_msr = vmi_rdmsr */
/* paravirt_ops.write_msr = vmi_wrmsr */
@@ -835,6 +855,31 @@ static inline int __init activate_vmi(vo
paravirt_ops.apic_write = vmi_get_function(VMI_CALL_...
2010 Aug 23
1
Removing VMI kernel support from 2.6.37
...ors
- */
-void vmi_bringup(void)
-{
- /* We must establish the lowmem mapping for MMU ops to work */
- if (vmi_ops.set_linear_mapping)
- vmi_ops.set_linear_mapping(0, (void *)__PAGE_OFFSET, MAXMEM_PFN, 0);
-}
-
-/*
- * Return a pointer to a VMI function or NULL if unimplemented
- */
-static void *vmi_get_function(int vmicall)
-{
- u64 reloc;
- const struct vmi_relocation_info *rel = (struct vmi_relocation_info *)&reloc;
- reloc = call_vrom_long_func(vmi_rom, get_reloc, vmicall);
- BUG_ON(rel->type == VMI_RELOCATION_JUMP_REL);
- if (rel->type == VMI_RELOCATION_CALL_REL)
- return (void *)rel->...
2010 Aug 23
1
Removing VMI kernel support from 2.6.37
...ors
- */
-void vmi_bringup(void)
-{
- /* We must establish the lowmem mapping for MMU ops to work */
- if (vmi_ops.set_linear_mapping)
- vmi_ops.set_linear_mapping(0, (void *)__PAGE_OFFSET, MAXMEM_PFN, 0);
-}
-
-/*
- * Return a pointer to a VMI function or NULL if unimplemented
- */
-static void *vmi_get_function(int vmicall)
-{
- u64 reloc;
- const struct vmi_relocation_info *rel = (struct vmi_relocation_info *)&reloc;
- reloc = call_vrom_long_func(vmi_rom, get_reloc, vmicall);
- BUG_ON(rel->type == VMI_RELOCATION_JUMP_REL);
- if (rel->type == VMI_RELOCATION_CALL_REL)
- return (void *)rel->...
2007 Apr 18
0
[PATCH 1/9] Vmi timer fixes round two.patch
...GE_TYPE
@@ -742,12 +741,7 @@ static inline int __init activate_vmi(vo
(char *)paravirt_ops.save_fl);
patch_offset(&irq_save_disable_callout[IRQ_PATCH_DISABLE],
(char *)paravirt_ops.irq_disable);
-#ifndef CONFIG_NO_IDLE_HZ
- para_fill(safe_halt, Halt);
-#else
- vmi_ops.halt = vmi_get_function(VMI_CALL_Halt);
- paravirt_ops.safe_halt = vmi_safe_halt;
-#endif
+
para_fill(wbinvd, WBINVD);
/* paravirt_ops.read_msr = vmi_rdmsr */
/* paravirt_ops.write_msr = vmi_wrmsr */
@@ -881,6 +875,12 @@ static inline int __init activate_vmi(vo
#endif
custom_sched_clock = vmi_sched_clock;
}...
2007 Apr 18
0
[PATCH 1/9] Vmi timer fixes round two.patch
...GE_TYPE
@@ -742,12 +741,7 @@ static inline int __init activate_vmi(vo
(char *)paravirt_ops.save_fl);
patch_offset(&irq_save_disable_callout[IRQ_PATCH_DISABLE],
(char *)paravirt_ops.irq_disable);
-#ifndef CONFIG_NO_IDLE_HZ
- para_fill(safe_halt, Halt);
-#else
- vmi_ops.halt = vmi_get_function(VMI_CALL_Halt);
- paravirt_ops.safe_halt = vmi_safe_halt;
-#endif
+
para_fill(wbinvd, WBINVD);
/* paravirt_ops.read_msr = vmi_rdmsr */
/* paravirt_ops.write_msr = vmi_wrmsr */
@@ -881,6 +875,12 @@ static inline int __init activate_vmi(vo
#endif
custom_sched_clock = vmi_sched_clock;
}...
2007 Apr 18
1
[PATCH 4/9] Vmi fix highpte
...+ */
+ BUG_ON(type != KM_PTE0 && type != KM_PTE1);
+ vmi_ops.set_linear_mapping((type - KM_PTE0)+1, (u32)va, 1, pfn);
+}
+
static void vmi_allocate_pt(u32 pfn)
{
vmi_set_page_type(pfn, VMI_PAGE_L1);
@@ -813,6 +831,7 @@ static inline int __init activate_vmi(vo
vmi_ops.allocate_page = vmi_get_function(VMI_CALL_AllocatePage);
vmi_ops.release_page = vmi_get_function(VMI_CALL_ReleasePage);
+ paravirt_ops.map_pt_hook = vmi_map_pt_hook;
paravirt_ops.alloc_pt = vmi_allocate_pt;
paravirt_ops.alloc_pd = vmi_allocate_pd;
paravirt_ops.alloc_pd_clone = vmi_allocate_pd_clone;
diff -r 87bf6b2d33...
2007 Apr 18
1
[PATCH 4/9] Vmi fix highpte
...+ */
+ BUG_ON(type != KM_PTE0 && type != KM_PTE1);
+ vmi_ops.set_linear_mapping((type - KM_PTE0)+1, (u32)va, 1, pfn);
+}
+
static void vmi_allocate_pt(u32 pfn)
{
vmi_set_page_type(pfn, VMI_PAGE_L1);
@@ -813,6 +831,7 @@ static inline int __init activate_vmi(vo
vmi_ops.allocate_page = vmi_get_function(VMI_CALL_AllocatePage);
vmi_ops.release_page = vmi_get_function(VMI_CALL_ReleasePage);
+ paravirt_ops.map_pt_hook = vmi_map_pt_hook;
paravirt_ops.alloc_pt = vmi_allocate_pt;
paravirt_ops.alloc_pd = vmi_allocate_pd;
paravirt_ops.alloc_pd_clone = vmi_allocate_pd_clone;
diff -r 87bf6b2d33...
2007 Apr 18
0
[PATCH 8/10] Vmi kmap_atomic_pte fix.patch
...tic inline int __init activate_vmi(vo
paravirt_ops.release_pt = vmi_release_pt;
paravirt_ops.release_pd = vmi_release_pd;
}
-#if 0
- para_wrap(map_pt_hook, vmi_map_pt_hook, set_linear_mapping,
- SetLinearMapping);
+
+ /* Set linear is needed in all cases */
+ vmi_ops.set_linear_mapping = vmi_get_function(VMI_CALL_SetLinearMapping);
+#ifdef CONFIG_HIGHPTE
+ if (vmi_ops.set_linear_mapping)
+ paravirt_ops.kmap_atomic_pte = vmi_kmap_atomic_pte;
#endif
/*
2007 Apr 18
0
[PATCH 8/10] Vmi kmap_atomic_pte fix.patch
...tic inline int __init activate_vmi(vo
paravirt_ops.release_pt = vmi_release_pt;
paravirt_ops.release_pd = vmi_release_pd;
}
-#if 0
- para_wrap(map_pt_hook, vmi_map_pt_hook, set_linear_mapping,
- SetLinearMapping);
+
+ /* Set linear is needed in all cases */
+ vmi_ops.set_linear_mapping = vmi_get_function(VMI_CALL_SetLinearMapping);
+#ifdef CONFIG_HIGHPTE
+ if (vmi_ops.set_linear_mapping)
+ paravirt_ops.kmap_atomic_pte = vmi_kmap_atomic_pte;
#endif
/*
2007 Oct 09
2
[PATCH RFC REPOST 1/2] paravirt: refactor struct paravirt_ops into smaller pv_*_ops
...md_clear = vmi_pmd_clear;
#endif
}
if (vmi_ops.update_pte) {
- paravirt_ops.pte_update = vmi_update_pte;
- paravirt_ops.pte_update_defer = vmi_update_pte_defer;
+ pv_mmu_ops.pte_update = vmi_update_pte;
+ pv_mmu_ops.pte_update_defer = vmi_update_pte_defer;
}
vmi_ops.allocate_page = vmi_get_function(VMI_CALL_AllocatePage);
if (vmi_ops.allocate_page) {
- paravirt_ops.alloc_pt = vmi_allocate_pt;
- paravirt_ops.alloc_pd = vmi_allocate_pd;
- paravirt_ops.alloc_pd_clone = vmi_allocate_pd_clone;
+ pv_mmu_ops.alloc_pt = vmi_allocate_pt;
+ pv_mmu_ops.alloc_pd = vmi_allocate_pd;
+ pv_mmu_ops...
2007 Oct 09
2
[PATCH RFC REPOST 1/2] paravirt: refactor struct paravirt_ops into smaller pv_*_ops
...md_clear = vmi_pmd_clear;
#endif
}
if (vmi_ops.update_pte) {
- paravirt_ops.pte_update = vmi_update_pte;
- paravirt_ops.pte_update_defer = vmi_update_pte_defer;
+ pv_mmu_ops.pte_update = vmi_update_pte;
+ pv_mmu_ops.pte_update_defer = vmi_update_pte_defer;
}
vmi_ops.allocate_page = vmi_get_function(VMI_CALL_AllocatePage);
if (vmi_ops.allocate_page) {
- paravirt_ops.alloc_pt = vmi_allocate_pt;
- paravirt_ops.alloc_pd = vmi_allocate_pd;
- paravirt_ops.alloc_pd_clone = vmi_allocate_pd_clone;
+ pv_mmu_ops.alloc_pt = vmi_allocate_pt;
+ pv_mmu_ops.alloc_pd = vmi_allocate_pd;
+ pv_mmu_ops...