- no need for calling nestedhvm_setup() explicitly (can be a normal
init-call, and can be __init)
- calling _xmalloc() for multi-page, page-aligned memory regions is
inefficient - use alloc_xenheap_pages() instead
- albeit an allocation error is unlikely here, add error handling
nevertheless (and have nestedhvm_vcpu_initialise() bail if an error
occurred during setup)
- nestedhvm_enabled() must no access d->arch.hvm_domain without first
checking that ''d'' actually represents a HVM domain
Signed-off-by: Jan Beulich <JBeulich@suse.com>
--- a/xen/arch/x86/hvm/nestedhvm.c
+++ b/xen/arch/x86/hvm/nestedhvm.c
@@ -25,16 +25,14 @@
#include <asm/event.h> /* for local_event_delivery_(en|dis)able */
#include <asm/paging.h> /* for paging_mode_hap() */
+static unsigned long *shadow_io_bitmap[3];
+
/* Nested HVM on/off per domain */
bool_t
nestedhvm_enabled(struct domain *d)
{
- bool_t enabled;
-
- enabled = !!(d->arch.hvm_domain.params[HVM_PARAM_NESTEDHVM]);
- BUG_ON(enabled && !is_hvm_domain(d));
-
- return enabled;
+ return is_hvm_domain(d) &&
+ d->arch.hvm_domain.params[HVM_PARAM_NESTEDHVM];
}
/* Nested VCPU */
@@ -76,6 +74,9 @@ nestedhvm_vcpu_initialise(struct vcpu *v
{
int rc = -EOPNOTSUPP;
+ if ( !shadow_io_bitmap[0] )
+ return -ENOMEM;
+
if ( !hvm_funcs.nhvm_vcpu_initialise ||
((rc = hvm_funcs.nhvm_vcpu_initialise(v)) != 0) )
return rc;
@@ -147,15 +148,15 @@ nestedhvm_is_n2(struct vcpu *v)
* iomap[2] set set
*/
-static unsigned long *shadow_io_bitmap[3];
-
-void
+static int __init
nestedhvm_setup(void)
{
/* Same format and size as hvm_io_bitmap (Intel needs only 2 pages). */
- unsigned int sz = (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL)
- ? 2*PAGE_SIZE : 3*PAGE_SIZE;
- unsigned int i;
+ unsigned int nr = (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) ? 2 : 3;
+ unsigned int i, order = get_order_from_pages(nr);
+
+ if ( !hvm_funcs.name )
+ return 0;
/* shadow_io_bitmaps can''t be declared static because
* they must fulfill hw requirements (page aligned section)
@@ -169,13 +170,25 @@ nestedhvm_setup(void)
for ( i = 0; i < ARRAY_SIZE(shadow_io_bitmap); i++ )
{
- shadow_io_bitmap[i] = _xmalloc(sz, PAGE_SIZE);
- memset(shadow_io_bitmap[i], ~0U, sz);
+ shadow_io_bitmap[i] = alloc_xenheap_pages(order, 0);
+ if ( !shadow_io_bitmap[i] )
+ {
+ while ( i-- )
+ {
+ free_xenheap_pages(shadow_io_bitmap[i], order);
+ shadow_io_bitmap[i] = NULL;
+ }
+ return -ENOMEM;
+ }
+ memset(shadow_io_bitmap[i], ~0U, nr << PAGE_SHIFT);
}
__clear_bit(0x80, shadow_io_bitmap[0]);
__clear_bit(0xed, shadow_io_bitmap[1]);
+
+ return 0;
}
+__initcall(nestedhvm_setup);
unsigned long *
nestedhvm_vcpu_iomap_get(bool_t port_80, bool_t port_ed)
--- a/xen/arch/x86/setup.c
+++ b/xen/arch/x86/setup.c
@@ -1312,8 +1312,6 @@ void __init __start_xen(unsigned long mb
if ( opt_watchdog )
watchdog_setup();
- nestedhvm_setup();
-
if ( !tboot_protect_mem_regions() )
panic("Could not protect TXT memory regions\n");
--- a/xen/include/asm-x86/setup.h
+++ b/xen/include/asm-x86/setup.h
@@ -21,7 +21,6 @@ void set_nr_cpu_ids(unsigned int max_cpu
void numa_initmem_init(unsigned long start_pfn, unsigned long end_pfn);
void arch_init_memory(void);
void subarch_init_memory(void);
-void nestedhvm_setup(void);
void init_IRQ(void);
void vesa_init(void);
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
http://lists.xen.org/xen-devel
On 05/24/12 14:14, Jan Beulich wrote:> - no need for calling nestedhvm_setup() explicitly (can be a normal > init-call, and can be __init) > - calling _xmalloc() for multi-page, page-aligned memory regions is > inefficient - use alloc_xenheap_pages() instead > - albeit an allocation error is unlikely here, add error handling > nevertheless (and have nestedhvm_vcpu_initialise() bail if an error > occurred during setup) > - nestedhvm_enabled() must no access d->arch.hvm_domain without first > checking that ''d'' actually represents a HVM domain > > Signed-off-by: Jan Beulich <JBeulich@suse.com>Looks ok to me. How did you test it? Christoph> > --- a/xen/arch/x86/hvm/nestedhvm.c > +++ b/xen/arch/x86/hvm/nestedhvm.c > @@ -25,16 +25,14 @@ > #include <asm/event.h> /* for local_event_delivery_(en|dis)able */ > #include <asm/paging.h> /* for paging_mode_hap() */ > > +static unsigned long *shadow_io_bitmap[3]; > + > /* Nested HVM on/off per domain */ > bool_t > nestedhvm_enabled(struct domain *d) > { > - bool_t enabled; > - > - enabled = !!(d->arch.hvm_domain.params[HVM_PARAM_NESTEDHVM]); > - BUG_ON(enabled && !is_hvm_domain(d)); > - > - return enabled; > + return is_hvm_domain(d) && > + d->arch.hvm_domain.params[HVM_PARAM_NESTEDHVM]; > } > > /* Nested VCPU */ > @@ -76,6 +74,9 @@ nestedhvm_vcpu_initialise(struct vcpu *v > { > int rc = -EOPNOTSUPP; > > + if ( !shadow_io_bitmap[0] ) > + return -ENOMEM; > + > if ( !hvm_funcs.nhvm_vcpu_initialise || > ((rc = hvm_funcs.nhvm_vcpu_initialise(v)) != 0) ) > return rc; > @@ -147,15 +148,15 @@ nestedhvm_is_n2(struct vcpu *v) > * iomap[2] set set > */ > > -static unsigned long *shadow_io_bitmap[3]; > - > -void > +static int __init > nestedhvm_setup(void) > { > /* Same format and size as hvm_io_bitmap (Intel needs only 2 pages). */ > - unsigned int sz = (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) > - ? 2*PAGE_SIZE : 3*PAGE_SIZE; > - unsigned int i; > + unsigned int nr = (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) ? 2 : 3; > + unsigned int i, order = get_order_from_pages(nr); > + > + if ( !hvm_funcs.name ) > + return 0; > > /* shadow_io_bitmaps can''t be declared static because > * they must fulfill hw requirements (page aligned section) > @@ -169,13 +170,25 @@ nestedhvm_setup(void) > > for ( i = 0; i < ARRAY_SIZE(shadow_io_bitmap); i++ ) > { > - shadow_io_bitmap[i] = _xmalloc(sz, PAGE_SIZE); > - memset(shadow_io_bitmap[i], ~0U, sz); > + shadow_io_bitmap[i] = alloc_xenheap_pages(order, 0); > + if ( !shadow_io_bitmap[i] ) > + { > + while ( i-- ) > + { > + free_xenheap_pages(shadow_io_bitmap[i], order); > + shadow_io_bitmap[i] = NULL; > + } > + return -ENOMEM; > + } > + memset(shadow_io_bitmap[i], ~0U, nr << PAGE_SHIFT); > } > > __clear_bit(0x80, shadow_io_bitmap[0]); > __clear_bit(0xed, shadow_io_bitmap[1]); > + > + return 0; > } > +__initcall(nestedhvm_setup); > > unsigned long * > nestedhvm_vcpu_iomap_get(bool_t port_80, bool_t port_ed) > --- a/xen/arch/x86/setup.c > +++ b/xen/arch/x86/setup.c > @@ -1312,8 +1312,6 @@ void __init __start_xen(unsigned long mb > if ( opt_watchdog ) > watchdog_setup(); > > - nestedhvm_setup(); > - > if ( !tboot_protect_mem_regions() ) > panic("Could not protect TXT memory regions\n"); > > --- a/xen/include/asm-x86/setup.h > +++ b/xen/include/asm-x86/setup.h > @@ -21,7 +21,6 @@ void set_nr_cpu_ids(unsigned int max_cpu > void numa_initmem_init(unsigned long start_pfn, unsigned long end_pfn); > void arch_init_memory(void); > void subarch_init_memory(void); > -void nestedhvm_setup(void); > > void init_IRQ(void); > void vesa_init(void); > > >-- ---to satisfy European Law for business letters: Advanced Micro Devices GmbH Einsteinring 24, 85689 Dornach b. Muenchen Geschaeftsfuehrer: Alberto Bozzo, Andrew Bowd Sitz: Dornach, Gemeinde Aschheim, Landkreis Muenchen Registergericht Muenchen, HRB Nr. 43632
>>> On 24.05.12 at 14:44, Christoph Egger <Christoph.Egger@amd.com> wrote: > On 05/24/12 14:14, Jan Beulich wrote: > >> - no need for calling nestedhvm_setup() explicitly (can be a normal >> init-call, and can be __init) >> - calling _xmalloc() for multi-page, page-aligned memory regions is >> inefficient - use alloc_xenheap_pages() instead >> - albeit an allocation error is unlikely here, add error handling >> nevertheless (and have nestedhvm_vcpu_initialise() bail if an error >> occurred during setup) >> - nestedhvm_enabled() must no access d->arch.hvm_domain without first >> checking that ''d'' actually represents a HVM domain >> >> Signed-off-by: Jan Beulich <JBeulich@suse.com> > > > Looks ok to me. How did you test it?Sorry, no, I did not test it at all. Should probably have said so... I just noticed the brokenness while putting together the VIA enabling patch sent soon afterwards. Jan