Jan Beulich
2013-Apr-19 09:33 UTC
[PATCH] x86/HVM: move per-vendor function tables into .init.data
hvm_enable() copies the table contents rather than storing the pointer, so there''s no need to keep these tables post-boot. Also constify the return values of the per-vendor initialization functions, making clear that once the per-vendor initialization is complete, the vendor specific tables won''t get modified anymore. Finally, in hvm_enable(), use the returned pointer for all read accesses as being more efficient than global variable accesses. Writes of course still need to go to the global variable. Signed-off-by: Jan Beulich <jbeulich@suse.com> --- a/xen/arch/x86/hvm/hvm.c +++ b/xen/arch/x86/hvm/hvm.c @@ -113,7 +113,7 @@ static struct notifier_block cpu_nfb = { static int __init hvm_enable(void) { - struct hvm_function_table *fns = NULL; + const struct hvm_function_table *fns = NULL; if ( cpu_has_vmx ) fns = start_vmx(); @@ -126,8 +126,8 @@ static int __init hvm_enable(void) hvm_funcs = *fns; hvm_enabled = 1; - printk("HVM: %s enabled\n", hvm_funcs.name); - if ( !hvm_funcs.hap_supported ) + printk("HVM: %s enabled\n", fns->name); + if ( !fns->hap_supported ) printk("HVM: Hardware Assisted Paging (HAP) not detected\n"); else if ( !opt_hap_enabled ) { @@ -138,9 +138,9 @@ static int __init hvm_enable(void) { printk("HVM: Hardware Assisted Paging (HAP) detected\n"); printk("HVM: HAP page sizes: 4kB"); - if ( hvm_funcs.hap_capabilities & HVM_HAP_SUPERPAGE_2MB ) + if ( fns->hap_capabilities & HVM_HAP_SUPERPAGE_2MB ) printk(", 2MB%s", opt_hap_2mb ? "" : " [disabled]"); - if ( hvm_funcs.hap_capabilities & HVM_HAP_SUPERPAGE_1GB ) + if ( fns->hap_capabilities & HVM_HAP_SUPERPAGE_1GB ) printk(", 1GB%s", opt_hap_1gb ? "" : " [disabled]"); printk("\n"); } --- a/xen/arch/x86/hvm/svm/svm.c +++ b/xen/arch/x86/hvm/svm/svm.c @@ -1236,7 +1236,7 @@ static int svm_cpu_up(void) return 0; } -struct hvm_function_table * __init start_svm(void) +const struct hvm_function_table * __init start_svm(void) { bool_t printed = 0; @@ -1961,7 +1961,7 @@ static void svm_invlpg_intercept(unsigne svm_asid_g_invlpg(curr, vaddr); } -static struct hvm_function_table __read_mostly svm_function_table = { +static struct hvm_function_table __initdata svm_function_table = { .name = "SVM", .cpu_up_prepare = svm_cpu_up_prepare, .cpu_dead = svm_cpu_dead, --- a/xen/arch/x86/hvm/vmx/vmx.c +++ b/xen/arch/x86/hvm/vmx/vmx.c @@ -1504,7 +1504,7 @@ static void vmx_sync_pir_to_irr(struct v vlapic_set_vector(i, &vlapic->regs->data[APIC_IRR]); } -static struct hvm_function_table __read_mostly vmx_function_table = { +static struct hvm_function_table __initdata vmx_function_table = { .name = "VMX", .cpu_up_prepare = vmx_cpu_up_prepare, .cpu_dead = vmx_cpu_dead, @@ -1559,7 +1559,7 @@ static struct hvm_function_table __read_ .nhvm_hap_walk_L1_p2m = nvmx_hap_walk_L1_p2m, }; -struct hvm_function_table * __init start_vmx(void) +const struct hvm_function_table * __init start_vmx(void) { set_in_cr4(X86_CR4_VMXE); --- a/xen/include/asm-x86/hvm/hvm.h +++ b/xen/include/asm-x86/hvm/hvm.h @@ -199,8 +199,8 @@ extern bool_t hvm_enabled; extern bool_t cpu_has_lmsl; extern s8 hvm_port80_allowed; -extern struct hvm_function_table *start_svm(void); -extern struct hvm_function_table *start_vmx(void); +extern const struct hvm_function_table *start_svm(void); +extern const struct hvm_function_table *start_vmx(void); int hvm_domain_initialise(struct domain *d); void hvm_domain_relinquish_resources(struct domain *d); _______________________________________________ Xen-devel mailing list Xen-devel@lists.xen.org http://lists.xen.org/xen-devel
Maybe Matching Threads
- [PATCH] HAP: Add global enable/disable command line option
- [PATCH] nvmx: fix resource relinquish for nested VMX
- [PATCH 1/3] x86/hvm: don't use indirect calls without need
- [PATCH] svm: support EFER.LMSLE for guests
- [ PATCH v3 2/3] xen: enable Virtual-interrupt delivery