Mukesh Rathor
2013-Jan-12 01:57 UTC
[RFC PATCH 8/16]: PVH xen: domain creation code changes
This patch mainly changes arch_domain_create() to allow for PVH creation. Also, since PVH uses lot of HVM data structs and code paths, in hvm_vcpu {} a sub struct to store in PVH specific info is created. Right now it only has one field, but will grow over time. Signed-off-by: Mukesh Rathor <mukesh.rathor@oracle.com> diff -r 956e77de73e4 -r e51e48fd1d20 xen/arch/x86/domain.c --- a/xen/arch/x86/domain.c Fri Jan 11 16:29:49 2013 -0800 +++ b/xen/arch/x86/domain.c Fri Jan 11 16:31:33 2013 -0800 @@ -410,8 +410,11 @@ int vcpu_initialise(struct vcpu *v) vmce_init_vcpu(v); - if ( is_hvm_domain(d) ) + if ( is_hvm_or_pvh_domain(d) ) { + if ( is_pvh_domain(d) ) + v->arch.hvm_vcpu.hvm_pvh.pvh_vcpu_info_mfn = INVALID_MFN; + rc = hvm_vcpu_initialise(v); goto done; } @@ -476,7 +479,7 @@ void vcpu_destroy(struct vcpu *v) vcpu_destroy_fpu(v); - if ( is_hvm_vcpu(v) ) + if ( is_hvm_or_pvh_vcpu(v) ) hvm_vcpu_destroy(v); else if ( standalone_trap_ctxt(v) ) free_xenheap_page(v->arch.pv_vcpu.trap_ctxt); @@ -489,7 +492,7 @@ int arch_domain_create(struct domain *d, int rc = -ENOMEM; d->arch.hvm_domain.hap_enabled - is_hvm_domain(d) && + is_hvm_or_pvh_domain(d) && hvm_funcs.hap_supported && (domcr_flags & DOMCRF_hap); d->arch.hvm_domain.mem_sharing_enabled = 0; @@ -539,7 +542,7 @@ int arch_domain_create(struct domain *d, __PAGE_HYPERVISOR); HYPERVISOR_COMPAT_VIRT_START(d) - is_hvm_domain(d) ? ~0u : __HYPERVISOR_COMPAT_VIRT_START; + is_hvm_or_pvh_domain(d) ? ~0u : __HYPERVISOR_COMPAT_VIRT_START; if ( (rc = paging_domain_init(d, domcr_flags)) != 0 ) goto fail; @@ -581,7 +584,7 @@ int arch_domain_create(struct domain *d, goto fail; } - if ( is_hvm_domain(d) ) + if ( is_hvm_or_pvh_domain(d) ) { if ( (rc = hvm_domain_initialise(d)) != 0 ) { @@ -593,9 +596,9 @@ int arch_domain_create(struct domain *d, { /* 64-bit PV guest by default. */ d->arch.is_32bit_pv = d->arch.has_32bit_shinfo = 0; - + } + if ( !is_hvm_domain(d) ) spin_lock_init(&d->arch.pv_domain.e820_lock); - } /* initialize default tsc behavior in case tools don''t */ tsc_set_info(d, TSC_MODE_DEFAULT, 0UL, 0, 0); @@ -622,9 +625,10 @@ void arch_domain_destroy(struct domain * { unsigned int i; - if ( is_hvm_domain(d) ) + if ( is_hvm_or_pvh_domain(d) ) hvm_domain_destroy(d); - else + + if ( !is_hvm_domain(d) ) xfree(d->arch.pv_domain.e820); free_domain_pirqs(d); @@ -683,6 +687,11 @@ int arch_set_info_guest( unsigned int i; int rc = 0, compat; +#if 0 + /* should we allow PV dom0 to create PVH domU''s ???? */ + if ( is_pvh_vcpu(v) && !is_pvh_vcpu(current) ) + return -EINVAL; +#endif /* The context is a compat-mode one if the target domain is compat-mode; * we expect the tools to DTRT even in compat-mode callers. */ compat = is_pv_32on64_domain(d); @@ -690,7 +699,7 @@ int arch_set_info_guest( #define c(fld) (compat ? (c.cmp->fld) : (c.nat->fld)) flags = c(flags); - if ( !is_hvm_vcpu(v) ) + if ( !is_hvm_or_pvh_vcpu(v) ) { if ( !compat ) { @@ -743,7 +752,7 @@ int arch_set_info_guest( v->fpu_initialised = !!(flags & VGCF_I387_VALID); v->arch.flags &= ~TF_kernel_mode; - if ( (flags & VGCF_in_kernel) || is_hvm_vcpu(v)/*???*/ ) + if ( (flags & VGCF_in_kernel) || is_hvm_or_pvh_vcpu(v)/*???*/ ) v->arch.flags |= TF_kernel_mode; v->arch.vgc_flags = flags; @@ -752,7 +761,7 @@ int arch_set_info_guest( if ( !compat ) { memcpy(&v->arch.user_regs, &c.nat->user_regs, sizeof(c.nat->user_regs)); - if ( !is_hvm_vcpu(v) ) + if ( !is_hvm_or_pvh_vcpu(v) ) memcpy(v->arch.pv_vcpu.trap_ctxt, c.nat->trap_ctxt, sizeof(c.nat->trap_ctxt)); } @@ -768,10 +777,13 @@ int arch_set_info_guest( v->arch.user_regs.eflags |= 2; - if ( is_hvm_vcpu(v) ) + if ( is_hvm_or_pvh_vcpu(v) ) { hvm_set_info_guest(v); - goto out; + if ( is_hvm_vcpu(v) || v->is_initialised ) + goto out; + else + goto pvh_skip_pv;; } init_int80_direct_trap(v); @@ -780,7 +792,8 @@ int arch_set_info_guest( v->arch.pv_vcpu.iopl = (v->arch.user_regs.eflags >> 12) & 3; v->arch.user_regs.eflags &= ~X86_EFLAGS_IOPL; - /* Ensure real hardware interrupts are enabled. */ + /* Ensure real hardware interrupts are enabled. Note: PVH may not have + * IDT set on all vcpus so don''t enable IF for it yet. */ v->arch.user_regs.eflags |= X86_EFLAGS_IF; if ( !v->is_initialised ) @@ -876,6 +889,7 @@ int arch_set_info_guest( if ( rc != 0 ) return rc; +pvh_skip_pv: if ( !compat ) { cr3_gfn = xen_cr3_to_pfn(c.nat->ctrlreg[3]); @@ -883,19 +897,26 @@ int arch_set_info_guest( if ( !cr3_page ) { - destroy_gdt(v); + if ( !is_pvh_vcpu(v) ) + destroy_gdt(v); return -EINVAL; } if ( !paging_mode_refcounts(d) && !get_page_type(cr3_page, PGT_base_page_table) ) { put_page(cr3_page); - destroy_gdt(v); + if ( !is_pvh_vcpu(v) ) + destroy_gdt(v); return -EINVAL; } + if ( is_pvh_vcpu(v) ) { + v->arch.cr3 = page_to_mfn(cr3_page); + v->arch.hvm_vcpu.guest_cr[3] = c.nat->ctrlreg[3]; + } + v->arch.guest_table = pagetable_from_page(cr3_page); - if ( c.nat->ctrlreg[1] ) + if ( c.nat->ctrlreg[1] && !is_pvh_vcpu(v) ) { cr3_gfn = xen_cr3_to_pfn(c.nat->ctrlreg[1]); cr3_page = get_page_from_gfn(d, cr3_gfn, NULL, P2M_ALLOC); @@ -920,7 +941,8 @@ int arch_set_info_guest( } else if ( !(flags & VGCF_in_kernel) ) { - destroy_gdt(v); + if ( !is_pvh_vcpu(v) ) + destroy_gdt(v); return -EINVAL; } } @@ -961,6 +983,13 @@ int arch_set_info_guest( update_cr3(v); + if ( is_pvh_vcpu(v) ) + { + /* guest is bringing up non-boot SMP vcpu */ + if ( (rc=hvm_pvh_set_vcpu_info(v, c.nat)) != 0 ) + return rc; + } + out: if ( flags & VGCF_online ) clear_bit(_VPF_down, &v->pause_flags); @@ -991,16 +1020,21 @@ void arch_vcpu_reset(struct vcpu *v) static void unmap_vcpu_info(struct vcpu *v) { - unsigned long mfn; + unsigned long mfn, *mfnp; - if ( v->arch.pv_vcpu.vcpu_info_mfn == INVALID_MFN ) + if ( is_pvh_vcpu(v) ) + mfnp = &v->arch.hvm_vcpu.hvm_pvh.pvh_vcpu_info_mfn; + else + mfnp = &v->arch.pv_vcpu.vcpu_info_mfn; + + mfn = *mfnp; + if ( mfn == INVALID_MFN ) return; - mfn = v->arch.pv_vcpu.vcpu_info_mfn; unmap_domain_page_global(v->vcpu_info); v->vcpu_info = &dummy_vcpu_info; - v->arch.pv_vcpu.vcpu_info_mfn = INVALID_MFN; + *mfnp = INVALID_MFN; put_page_and_type(mfn_to_page(mfn)); } @@ -1011,7 +1045,7 @@ unmap_vcpu_info(struct vcpu *v) * of memory, and it sets a pending event to make sure that a pending * event doesn''t get missed. */ -static int +static noinline int map_vcpu_info(struct vcpu *v, unsigned long gfn, unsigned offset) { struct domain *d = v->domain; @@ -1019,11 +1053,17 @@ map_vcpu_info(struct vcpu *v, unsigned l vcpu_info_t *new_info; struct page_info *page; int i; + unsigned long *mfnp; + + if ( is_pvh_vcpu(v) ) + mfnp = &v->arch.hvm_vcpu.hvm_pvh.pvh_vcpu_info_mfn; + else + mfnp = &v->arch.pv_vcpu.vcpu_info_mfn; if ( offset > (PAGE_SIZE - sizeof(vcpu_info_t)) ) return -EINVAL; - if ( v->arch.pv_vcpu.vcpu_info_mfn != INVALID_MFN ) + if ( *mfnp != INVALID_MFN ) return -EINVAL; /* Run this command on yourself or on other offline VCPUS. */ @@ -1060,7 +1100,7 @@ map_vcpu_info(struct vcpu *v, unsigned l } v->vcpu_info = new_info; - v->arch.pv_vcpu.vcpu_info_mfn = page_to_mfn(page); + *mfnp = page_to_mfn(page); /* Set new vcpu_info pointer /before/ setting pending flags. */ wmb(); @@ -1466,7 +1506,7 @@ static void update_runstate_area(struct static inline int need_full_gdt(struct vcpu *v) { - return (!is_hvm_vcpu(v) && !is_idle_vcpu(v)); + return (!is_hvm_or_pvh_vcpu(v) && !is_idle_vcpu(v)); } static void __context_switch(void) @@ -1593,7 +1633,7 @@ void context_switch(struct vcpu *prev, s /* Re-enable interrupts before restoring state which may fault. */ local_irq_enable(); - if ( !is_hvm_vcpu(next) ) + if ( !is_hvm_or_pvh_vcpu(next) ) { load_LDT(next); load_segments(next); @@ -1717,7 +1757,7 @@ unsigned long hypercall_create_continuat else current->arch.hvm_vcpu.hcall_preempted = 1; - if ( !is_hvm_vcpu(current) ? + if ( !is_hvm_or_pvh_vcpu(current) ? !is_pv_32on64_vcpu(current) : (hvm_guest_x86_mode(current) == 8) ) { @@ -2034,7 +2074,7 @@ int domain_relinquish_resources(struct d for_each_vcpu ( d, v ) vcpu_destroy_pagetables(v); - if ( !is_hvm_domain(d) ) + if ( !is_hvm_or_pvh_domain(d) ) { for_each_vcpu ( d, v ) { @@ -2109,7 +2149,7 @@ int domain_relinquish_resources(struct d BUG(); } - if ( is_hvm_domain(d) ) + if ( is_hvm_or_pvh_domain(d) ) hvm_domain_relinquish_resources(d); return 0; @@ -2190,7 +2230,7 @@ void vcpu_mark_events_pending(struct vcp if ( already_pending ) return; - if ( is_hvm_vcpu(v) ) + if ( is_hvm_or_pvh_vcpu(v) ) hvm_assert_evtchn_irq(v); else vcpu_kick(v); diff -r 956e77de73e4 -r e51e48fd1d20 xen/include/asm-x86/hvm/hvm.h --- a/xen/include/asm-x86/hvm/hvm.h Fri Jan 11 16:29:49 2013 -0800 +++ b/xen/include/asm-x86/hvm/hvm.h Fri Jan 11 16:31:33 2013 -0800 @@ -183,6 +183,12 @@ struct hvm_function_table { /* Virtual interrupt delivery */ void (*update_eoi_exit_bitmap)(struct vcpu *v, u8 vector, u8 trig); int (*virtual_intr_delivery_enabled)(void); + + /* PVH functions */ + int (*pvh_set_vcpu_info)(struct vcpu *v, struct vcpu_guest_context *ctxtp); + int (*pvh_read_descriptor)(unsigned int sel, const struct vcpu *v, + const struct cpu_user_regs *regs, unsigned long *base, + unsigned long *limit, unsigned int *ar); }; extern struct hvm_function_table hvm_funcs; @@ -316,6 +322,19 @@ static inline unsigned long hvm_get_shad return hvm_funcs.get_shadow_gs_base(v); } +static inline int hvm_pvh_set_vcpu_info(struct vcpu *v, + struct vcpu_guest_context *ctxtp) +{ + return hvm_funcs.pvh_set_vcpu_info(v, ctxtp); +} + +static inline int hvm_pvh_read_descriptor(unsigned int sel, + const struct vcpu *v, const struct cpu_user_regs *regs, + unsigned long *base, unsigned long *limit, unsigned int *ar) +{ + return hvm_funcs.pvh_read_descriptor(sel, v, regs, base, limit, ar); +} + #define is_viridian_domain(_d) \ (is_hvm_domain(_d) && ((_d)->arch.hvm_domain.params[HVM_PARAM_VIRIDIAN])) diff -r 956e77de73e4 -r e51e48fd1d20 xen/include/asm-x86/hvm/vcpu.h --- a/xen/include/asm-x86/hvm/vcpu.h Fri Jan 11 16:29:49 2013 -0800 +++ b/xen/include/asm-x86/hvm/vcpu.h Fri Jan 11 16:31:33 2013 -0800 @@ -104,6 +104,13 @@ struct nestedvcpu { #define vcpu_nestedhvm(v) ((v)->arch.hvm_vcpu.nvcpu) +/* add any PVH specific fields here */ +struct pvh_hvm_vcpu_ext +{ + /* Guest-specified relocation of vcpu_info. */ + unsigned long pvh_vcpu_info_mfn; +}; + struct hvm_vcpu { /* Guest control-register and EFER values, just as the guest sees them. */ unsigned long guest_cr[5]; @@ -170,6 +177,8 @@ struct hvm_vcpu { struct hvm_trap inject_trap; struct viridian_vcpu viridian; + + struct pvh_hvm_vcpu_ext hvm_pvh; }; #endif /* __ASM_X86_HVM_VCPU_H__ */
Jan Beulich
2013-Jan-14 11:49 UTC
Re: [RFC PATCH 8/16]: PVH xen: domain creation code changes
>>> On 12.01.13 at 02:57, Mukesh Rathor <mukesh.rathor@oracle.com> wrote: > +#if 0 > + /* should we allow PV dom0 to create PVH domU''s ???? */ > + if ( is_pvh_vcpu(v) && !is_pvh_vcpu(current) ) > + return -EINVAL; > +#endifAny Dom0 ought to be able to construct any kind of guest imo.> --- a/xen/include/asm-x86/hvm/hvm.h Fri Jan 11 16:29:49 2013 -0800 > +++ b/xen/include/asm-x86/hvm/hvm.h Fri Jan 11 16:31:33 2013 -0800 > @@ -183,6 +183,12 @@ struct hvm_function_table { > /* Virtual interrupt delivery */ > void (*update_eoi_exit_bitmap)(struct vcpu *v, u8 vector, u8 trig); > int (*virtual_intr_delivery_enabled)(void); > + > + /* PVH functions */ > + int (*pvh_set_vcpu_info)(struct vcpu *v, struct vcpu_guest_context *ctxtp); > + int (*pvh_read_descriptor)(unsigned int sel, const struct vcpu *v, > + const struct cpu_user_regs *regs, unsigned long *base, > + unsigned long *limit, unsigned int *ar); > }; > > extern struct hvm_function_table hvm_funcs; > @@ -316,6 +322,19 @@ static inline unsigned long hvm_get_shad > return hvm_funcs.get_shadow_gs_base(v); > } > > +static inline int hvm_pvh_set_vcpu_info(struct vcpu *v, > + struct vcpu_guest_context *ctxtp) > +{ > + return hvm_funcs.pvh_set_vcpu_info(v, ctxtp); > +} > + > +static inline int hvm_pvh_read_descriptor(unsigned int sel, > + const struct vcpu *v, const struct cpu_user_regs *regs, > + unsigned long *base, unsigned long *limit, unsigned int *ar) > +{ > + return hvm_funcs.pvh_read_descriptor(sel, v, regs, base, limit, ar); > +} > +So you add these hooks, call them unconditionally, yet neither VMX nor SVM implement them? What''s the purpose? Series of patches are expected to be consistent at each patch boundary.> --- a/xen/include/asm-x86/hvm/vcpu.h Fri Jan 11 16:29:49 2013 -0800 > +++ b/xen/include/asm-x86/hvm/vcpu.h Fri Jan 11 16:31:33 2013 -0800 > @@ -104,6 +104,13 @@ struct nestedvcpu { > > #define vcpu_nestedhvm(v) ((v)->arch.hvm_vcpu.nvcpu) > > +/* add any PVH specific fields here */ > +struct pvh_hvm_vcpu_ext > +{ > + /* Guest-specified relocation of vcpu_info. */ > + unsigned long pvh_vcpu_info_mfn;Isn''t that a field equivalent to v->arch.pv_vcpu.vcpu_info_mfn? Preferably they would be shared then, or if not, having "pvh" in the containing structure''s field name and the field name here is clearly one too much. Jan
Mukesh Rathor
2013-Jan-16 00:50 UTC
Re: [RFC PATCH 8/16]: PVH xen: domain creation code changes
On Mon, 14 Jan 2013 11:49:42 +0000 "Jan Beulich" <JBeulich@suse.com> wrote:> >>> On 12.01.13 at 02:57, Mukesh Rathor <mukesh.rathor@oracle.com> > >>> wrote: > > +#if 0 > > + /* should we allow PV dom0 to create PVH domU''s ???? */ > > + if ( is_pvh_vcpu(v) && !is_pvh_vcpu(current) ) > > + return -EINVAL; > > +#endif > > Any Dom0 ought to be able to construct any kind of guest imo.Agree. Removed the code.> So you add these hooks, call them unconditionally, yet neither VMX > nor SVM implement them? What''s the purpose? Series of patches > are expected to be consistent at each patch boundary.I''m told to keep patch sizes small, so I try to group together changes. The functions are small/generic enough I figured it would be OK.> > --- a/xen/include/asm-x86/hvm/vcpu.h Fri Jan 11 16:29:49 > > 2013 -0800 +++ b/xen/include/asm-x86/hvm/vcpu.h Fri Jan 11 > > 16:31:33 2013 -0800 @@ -104,6 +104,13 @@ struct nestedvcpu { > > > > #define vcpu_nestedhvm(v) ((v)->arch.hvm_vcpu.nvcpu) > > > > +/* add any PVH specific fields here */ > > +struct pvh_hvm_vcpu_ext > > +{ > > + /* Guest-specified relocation of vcpu_info. */ > > + unsigned long pvh_vcpu_info_mfn; > > Isn''t that a field equivalent to v->arch.pv_vcpu.vcpu_info_mfn? > Preferably they would be shared then, or if not, having "pvh" in > the containing structure''s field name and the field name here is > clearly one too much.No, it''s a union, so can''t use pv_vcpu.vcpu_info_mfn. I like the 3 char prefix to field name so grep/cscope can find it easily. Thanks, Mukesh
Jan Beulich
2013-Jan-16 09:57 UTC
Re: [RFC PATCH 8/16]: PVH xen: domain creation code changes
>>> On 16.01.13 at 01:50, Mukesh Rathor <mukesh.rathor@oracle.com> wrote: > On Mon, 14 Jan 2013 11:49:42 +0000 "Jan Beulich" <JBeulich@suse.com> wrote: >> So you add these hooks, call them unconditionally, yet neither VMX >> nor SVM implement them? What''s the purpose? Series of patches >> are expected to be consistent at each patch boundary. > > I''m told to keep patch sizes small, so I try to group together > changes. The functions are small/generic enough I figured it would > be OK.Sure, but the code needs to be correct at patch boundaries. And a NULL pointer dereference doesn''t count as correct to me, the more that I don''t think the patch set deals with SVM, and hence there the NULL pointer dereference (at the end of your patch set) likely has paths reaching it that cannot be easily shown to be dead under SVM.>> > --- a/xen/include/asm-x86/hvm/vcpu.h Fri Jan 11 16:29:49 >> > 2013 -0800 +++ b/xen/include/asm-x86/hvm/vcpu.h Fri Jan 11 >> > 16:31:33 2013 -0800 @@ -104,6 +104,13 @@ struct nestedvcpu { >> > >> > #define vcpu_nestedhvm(v) ((v)->arch.hvm_vcpu.nvcpu) >> > >> > +/* add any PVH specific fields here */ >> > +struct pvh_hvm_vcpu_ext >> > +{ >> > + /* Guest-specified relocation of vcpu_info. */ >> > + unsigned long pvh_vcpu_info_mfn; >> >> Isn''t that a field equivalent to v->arch.pv_vcpu.vcpu_info_mfn? >> Preferably they would be shared then, or if not, having "pvh" in >> the containing structure''s field name and the field name here is >> clearly one too much. > > No, it''s a union, so can''t use pv_vcpu.vcpu_info_mfn. I like the > 3 char prefix to field name so grep/cscope can find it easily.Sure, it''s a matter of taste to some degree. But I personally dislike that sort of redundancy (the expressions actually using this look pretty odd), except when the untagged name is really very generic (which isn''t the case here). Jan
Mukesh Rathor
2013-Jan-24 23:50 UTC
Re: [RFC PATCH 8/16]: PVH xen: domain creation code changes
On Wed, 16 Jan 2013 09:57:45 +0000 "Jan Beulich" <JBeulich@suse.com> wrote:> >>> On 16.01.13 at 01:50, Mukesh Rathor <mukesh.rathor@oracle.com> > >>> wrote: > > On Mon, 14 Jan 2013 11:49:42 +0000 "Jan Beulich" > > <JBeulich@suse.com> wrote: > >> So you add these hooks, call them unconditionally, yet neither VMXNo, both are conditional calls: + if ( is_pvh_vcpu(v) ) + { + /* guest is bringing up non-boot SMP vcpu */ + if ( (rc=hvm_pvh_set_vcpu_info(v, c.nat)) != 0 ) + return rc; + } + So, if someone applies partial patches and then tries to boot PVH guest, then it''ll hit NULL ptr.> Sure, but the code needs to be correct at patch boundaries. And > a NULL pointer dereference doesn''t count as correct to me, the > more that I don''t think the patch set deals with SVM, and hence > there the NULL pointer dereference (at the end of your patch > set) likely has paths reaching it that cannot be easily shown to be > dead under SVM.Hmm.. I guess in this particular patch then I could just create null functions and not call vmx/svm ones, and change that later in the patch when I introduce the actual vmx/svm functions. For SVM, they will be stubs since SVM is not implmented for PVH right now. Ok, I''ll do that.> >> > #define vcpu_nestedhvm(v) ((v)->arch.hvm_vcpu.nvcpu) > >> > > >> > +/* add any PVH specific fields here */ > >> > +struct pvh_hvm_vcpu_ext > >> > +{ > >> > + /* Guest-specified relocation of vcpu_info. */ > >> > + unsigned long pvh_vcpu_info_mfn; > >> > >> Isn''t that a field equivalent to v->arch.pv_vcpu.vcpu_info_mfn? > >> Preferably they would be shared then, or if not, having "pvh" in > >> the containing structure''s field name and the field name here is > >> clearly one too much. > > > > No, it''s a union, so can''t use pv_vcpu.vcpu_info_mfn. I like the > > 3 char prefix to field name so grep/cscope can find it easily. > > Sure, it''s a matter of taste to some degree. But I personally > dislike that sort of redundancy (the expressions actually using > this look pretty odd), except when the untagged name is > really very generic (which isn''t the case here).Only if the ptr to struct is define in a meaningful way, like struct pvh_hvm_vcpu_ext *pvhp; But I rarely see that. Furthermore, some smart guy will add a field "int size". Good luck greping/cscoping that! So, setting a precedent to prefix pvh would be a good idea IMO. I changed it to vcpu_info_mfn anyways. thanks Mukesh