Hi,
Really just basic preparation: switch over PAE builds to the new
shadow code, drop old dummy functions, add (fewer) new ones.
  Gerd
Index: xen/include/asm-x86/shadow_public.h
==================================================================---
xen.orig/include/asm-x86/shadow_public.h	2005-07-28 11:31:39.000000000 +0200
+++ xen/include/asm-x86/shadow_public.h	2005-07-28 14:55:05.000000000 +0200
@@ -21,7 +21,7 @@
 
 #ifndef _XEN_SHADOW_PUBLIC_H
 #define _XEN_SHADOW_PUBLIC_H
-#if CONFIG_PAGING_LEVELS >= 4
+#if CONFIG_PAGING_LEVELS >= 3
 #define MFN_PINNED(_x) (frame_table[_x].u.inuse.type_info & PGT_pinned)
 
 extern int alloc_p2m_table(struct domain *d);
@@ -31,10 +31,6 @@ extern void shadow_sync_and_drop_referen
 extern void shadow_drop_references(
       struct domain *d, struct pfn_info *page);
 
-extern void shadow_l4_normal_pt_update(struct domain *d,
-                                       unsigned long pa, l4_pgentry_t l4e,
-                                       struct domain_mmap_cache *cache);
-
 extern int shadow_set_guest_paging_levels(struct domain *d, int levels);
 
 extern void release_out_of_sync_entry(
@@ -56,4 +52,10 @@ struct shadow_ops {
 };
 #endif
 
+#if CONFIG_PAGING_LEVELS >= 4
+extern void shadow_l4_normal_pt_update(struct domain *d,
+                                       unsigned long pa, l4_pgentry_t l4e,
+                                       struct domain_mmap_cache *cache);
+#endif
+
 #endif
Index: xen/include/asm-x86/shadow_64.h
==================================================================---
xen.orig/include/asm-x86/shadow_64.h	2005-07-28 11:31:39.000000000 +0200
+++ xen/include/asm-x86/shadow_64.h	2005-07-28 14:49:35.000000000 +0200
@@ -85,8 +85,10 @@ static inline int  table_offset_64(unsig
             return  (((va) >> L2_PAGETABLE_SHIFT) &
(L2_PAGETABLE_ENTRIES - 1));
         case 3:
             return  (((va) >> L3_PAGETABLE_SHIFT) &
(L3_PAGETABLE_ENTRIES - 1));
+#if CONFIG_PAGING_LEVELS >= 4
         case 4:
             return  (((va) >> L4_PAGETABLE_SHIFT) &
(L4_PAGETABLE_ENTRIES - 1));
+#endif
         default:
             //printk("<table_offset_64> level %d is too big\n",
level);
             return -1;
Index: xen/include/asm-x86/shadow.h
==================================================================---
xen.orig/include/asm-x86/shadow.h	2005-07-28 11:31:39.000000000 +0200
+++ xen/include/asm-x86/shadow.h	2005-07-28 15:00:49.000000000 +0200
@@ -131,12 +131,12 @@ extern void shadow_l2_normal_pt_update(s
                                        unsigned long pa, l2_pgentry_t l2e,
                                        struct domain_mmap_cache *cache);
 #if CONFIG_PAGING_LEVELS >= 3
+#include <asm/page-guest32.h>
 extern void shadow_l3_normal_pt_update(struct domain *d,
                                        unsigned long pa, l3_pgentry_t l3e,
                                        struct domain_mmap_cache *cache);
 #endif
 #if CONFIG_PAGING_LEVELS >= 4
-#include <asm/page-guest32.h>
 extern void shadow_l4_normal_pt_update(struct domain *d,
                                        unsigned long pa, l4_pgentry_t l4e,
                                        struct domain_mmap_cache *cache);
@@ -631,82 +631,6 @@ static inline void shadow_sync_and_drop_
 }
 #endif
 
-#if CONFIG_PAGING_LEVELS == 3
-/* dummy functions, PAE has no shadow support yet */
-
-static inline void
-__shadow_get_l2e(
-    struct vcpu *v, unsigned long va, l2_pgentry_t *psl2e)
-{
-    BUG();
-}
-
-static inline void
-__shadow_set_l2e(
-    struct vcpu *v, unsigned long va, l2_pgentry_t value)
-{
-    BUG();
-}
-
-static inline void
-__guest_get_l2e(
-    struct vcpu *v, unsigned long va, l2_pgentry_t *pl2e)
-{
-    BUG();
-}
-
-static inline void
-__guest_set_l2e(
-    struct vcpu *v, unsigned long va, l2_pgentry_t value)
-{
-    BUG();
-}
-
-static inline void shadow_drop_references(
-    struct domain *d, struct pfn_info *page)
-{
-    if ( likely(!shadow_mode_refcounts(d)) ||
-         ((page->u.inuse.type_info & PGT_count_mask) == 0) )
-        return;
-    BUG();
-}
-
-static inline void shadow_sync_and_drop_references(
-    struct domain *d, struct pfn_info *page)
-{
-    if ( likely(!shadow_mode_refcounts(d)) )
-        return;
-    BUG();
-}
-
-static inline int l1pte_write_fault(
-    struct vcpu *v, l1_pgentry_t *gpte_p, l1_pgentry_t *spte_p,
-    unsigned long va)
-{
-    BUG();
-    return 42;
-}
-
-static inline int l1pte_read_fault(
-    struct domain *d, l1_pgentry_t *gpte_p, l1_pgentry_t *spte_p)
-{
-    BUG();
-    return 42;
-}
-
-void static inline
-shadow_set_l1e(unsigned long va, l1_pgentry_t new_spte, int create_l1_shadow)
-{
-    BUG();
-}
-
-static inline unsigned long gva_to_gpa(unsigned long gva)
-{
-    BUG();
-    return 42;
-}
-#endif
-    
 /************************************************************************/
 
 /*
Index: xen/arch/x86/vmx_platform.c
==================================================================---
xen.orig/arch/x86/vmx_platform.c	2005-07-28 11:31:39.000000000 +0200
+++ xen/arch/x86/vmx_platform.c	2005-07-28 15:04:13.000000000 +0200
@@ -32,7 +32,7 @@
 #include <xen/lib.h>
 #include <xen/sched.h>
 #include <asm/current.h>
-#if CONFIG_PAGING_LEVELS >= 4
+#if CONFIG_PAGING_LEVELS >= 3
 #include <asm/shadow_64.h>
 #endif
 #ifdef CONFIG_VMX
Index: xen/arch/x86/Makefile
==================================================================---
xen.orig/arch/x86/Makefile	2005-07-28 11:31:39.000000000 +0200
+++ xen/arch/x86/Makefile	2005-07-28 14:31:44.000000000 +0200
@@ -13,11 +13,18 @@ OBJS := $(subst cpu/centaur.o,,$(OBJS))
 OBJS := $(subst cpu/cyrix.o,,$(OBJS))
 OBJS := $(subst cpu/rise.o,,$(OBJS))
 OBJS := $(subst cpu/transmeta.o,,$(OBJS))
-OBJS := $(subst shadow32.o,,$(OBJS))
-else
-OBJS := $(subst shadow.o,,$(OBJS))
-OBJS := $(subst shadow_public.o,,$(OBJS))
-OBJS := $(subst shadow_xxx.o,,$(OBJS))
+endif
+
+OBJS := $(patsubst shadow%.o,,$(OBJS))	# drop all
+ifeq ($(TARGET_SUBARCH),x86_64) 
+ OBJS += shadow.o shadow_public.o	# x86_64: new code
+endif
+ifeq ($(TARGET_SUBARCH),x86_32) 
+ ifneq ($(pae),n)
+  OBJS += shadow.o shadow_public.o	# x86_32p: new code
+ else
+  OBJS += shadow32.o			# x86_32: old code
+ endif
 endif
 
 OBJS := $(subst $(TARGET_SUBARCH)/asm-offsets.o,,$(OBJS))
Index: xen/arch/x86/shadow.c
==================================================================---
xen.orig/arch/x86/shadow.c	2005-07-28 11:31:39.000000000 +0200
+++ xen/arch/x86/shadow.c	2005-07-28 15:06:59.000000000 +0200
@@ -41,7 +41,13 @@ extern void free_shadow_pages(struct dom
 static void mark_shadows_as_reflecting_snapshot(struct domain *d, unsigned long
gpfn);
 #endif
 
-#if CONFIG_PAGING_LEVELS >= 4
+#if CONFIG_PAGING_LEVELS == 3
+#include <asm/shadow_64.h>
+static unsigned long shadow_l3_table(
+    struct domain *d, unsigned long gpfn, unsigned long gmfn);
+#endif
+
+#if CONFIG_PAGING_LEVELS == 4
 #include <asm/shadow_64.h>
 static unsigned long shadow_l4_table(
     struct domain *d, unsigned long gpfn, unsigned long gmfn);
@@ -1833,7 +1839,7 @@ static void shadow_update_pagetables(str
     unsigned long gpfn = __mfn_to_gpfn(d, gmfn);
     unsigned long smfn, old_smfn;
 
-#if defined (__i386__)
+#if CONFIG_PAGING_LEVELS == 2
     unsigned long hl2mfn;
 #endif
   
@@ -1890,7 +1896,7 @@ static void shadow_update_pagetables(str
         v->arch.shadow_vtable = map_domain_page(smfn);
     }
 
-#if defined (__i386__)
+#if CONFIG_PAGING_LEVELS == 2
     /*
      * arch.hl2_vtable
      */
@@ -1936,6 +1942,10 @@ static void shadow_update_pagetables(str
         local_flush_tlb();
     }
 #endif
+
+#if CONFIG_PAGING_LEVELS == 3
+    /* FIXME: PAE code to be written */
+#endif
 }
 
 struct shadow_ops MODE_A_HANDLER = {
@@ -2427,6 +2437,7 @@ static unsigned long shadow_l3_table(
   struct domain *d, unsigned long gpfn, unsigned long gmfn)
 {
     BUG();                      /* not implemenated yet */
+    return 42;
 }
 #endif
 
Index: xen/arch/x86/vmx.c
==================================================================---
xen.orig/arch/x86/vmx.c	2005-07-28 15:03:18.000000000 +0200
+++ xen/arch/x86/vmx.c	2005-07-28 15:03:40.000000000 +0200
@@ -38,7 +38,7 @@
 #include <asm/vmx_vmcs.h>
 #include <asm/vmx_intercept.h>
 #include <asm/shadow.h>
-#if CONFIG_PAGING_LEVELS >= 4
+#if CONFIG_PAGING_LEVELS >= 3
 #include <asm/shadow_64.h>
 #endif
 
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xensource.com
http://lists.xensource.com/xen-devel
Gerd Knorr wrote:> Hi, > > Really just basic preparation: switch over PAE builds to the new > shadow code, drop old dummy functions, add (fewer) new ones.Looks good to me. Jun --- Intel Open Source Technology Center> > Gerd > > Index: xen/include/asm-x86/shadow_public.h > ==================================================================> --- xen.orig/include/asm-x86/shadow_public.h 2005-07-28 > 11:31:39.000000000 +0200 +++ > xen/include/asm-x86/shadow_public.h 2005-07-28 14:55:05.000000000 > +0200 @@ -21,7 +21,7 @@ > > #ifndef _XEN_SHADOW_PUBLIC_H > #define _XEN_SHADOW_PUBLIC_H > -#if CONFIG_PAGING_LEVELS >= 4 > +#if CONFIG_PAGING_LEVELS >= 3 > #define MFN_PINNED(_x) (frame_table[_x].u.inuse.type_info & > PGT_pinned) > > extern int alloc_p2m_table(struct domain *d); > @@ -31,10 +31,6 @@ extern void shadow_sync_and_drop_referen > extern void shadow_drop_references( > struct domain *d, struct pfn_info *page); > > -extern void shadow_l4_normal_pt_update(struct domain *d, > - unsigned long pa, > l4_pgentry_t l4e, > - struct domain_mmap_cache > *cache); - > extern int shadow_set_guest_paging_levels(struct domain *d, int > levels); > > extern void release_out_of_sync_entry( > @@ -56,4 +52,10 @@ struct shadow_ops { > }; > #endif > > +#if CONFIG_PAGING_LEVELS >= 4 > +extern void shadow_l4_normal_pt_update(struct domain *d, > + unsigned long pa, > l4_pgentry_t l4e, + struct > domain_mmap_cache *cache); +#endif > + > #endif > Index: xen/include/asm-x86/shadow_64.h > ==================================================================> --- xen.orig/include/asm-x86/shadow_64.h 2005-07-28 > 11:31:39.000000000 +0200 +++ > xen/include/asm-x86/shadow_64.h 2005-07-28 14:49:35.000000000+0200> @@ -85,8 +85,10 @@ static inline int > table_offset_64(unsig return (((va) >> L2_PAGETABLE_SHIFT) > & (L2_PAGETABLE_ENTRIES - 1)); case 3: return (((va) >> > L3_PAGETABLE_SHIFT) & (L3_PAGETABLE_ENTRIES - 1)); +#if > CONFIG_PAGING_LEVELS >= 4 case 4: > return (((va) >> L4_PAGETABLE_SHIFT) & > (L4_PAGETABLE_ENTRIES - 1)); +#endif > default: > //printk("<table_offset_64> level %d is too big\n", > level); return -1; > Index: xen/include/asm-x86/shadow.h > ==================================================================> --- xen.orig/include/asm-x86/shadow.h 2005-07-28 11:31:39.000000000 > +0200 +++ xen/include/asm-x86/shadow.h 2005-07-2815:00:49.000000000> +0200 @@ -131,12 +131,12 @@ extern void shadow_l2_normal_pt_update(s > unsigned long pa, > l2_pgentry_t l2e, struct > domain_mmap_cache *cache); #if CONFIG_PAGING_LEVELS >= 3 > +#include <asm/page-guest32.h> > extern void shadow_l3_normal_pt_update(struct domain *d, > unsigned long pa, > l3_pgentry_t l3e, struct > domain_mmap_cache *cache); #endif > #if CONFIG_PAGING_LEVELS >= 4 > -#include <asm/page-guest32.h> > extern void shadow_l4_normal_pt_update(struct domain *d, > unsigned long pa, > l4_pgentry_t l4e, struct > domain_mmap_cache *cache); @@ -631,82 +631,6 @@ static inline void > shadow_sync_and_drop_ } > #endif > > -#if CONFIG_PAGING_LEVELS == 3 > -/* dummy functions, PAE has no shadow support yet */ > - > -static inline void > -__shadow_get_l2e( > - struct vcpu *v, unsigned long va, l2_pgentry_t *psl2e) > -{ > - BUG(); > -} > - > -static inline void > -__shadow_set_l2e( > - struct vcpu *v, unsigned long va, l2_pgentry_t value) > -{ > - BUG(); > -} > - > -static inline void > -__guest_get_l2e( > - struct vcpu *v, unsigned long va, l2_pgentry_t *pl2e) > -{ > - BUG(); > -} > - > -static inline void > -__guest_set_l2e( > - struct vcpu *v, unsigned long va, l2_pgentry_t value) > -{ > - BUG(); > -} > - > -static inline void shadow_drop_references( > - struct domain *d, struct pfn_info *page) > -{ > - if ( likely(!shadow_mode_refcounts(d)) || > - ((page->u.inuse.type_info & PGT_count_mask) == 0) ) > - return; > - BUG(); > -} > - > -static inline void shadow_sync_and_drop_references( > - struct domain *d, struct pfn_info *page) > -{ > - if ( likely(!shadow_mode_refcounts(d)) ) > - return; > - BUG(); > -} > - > -static inline int l1pte_write_fault( > - struct vcpu *v, l1_pgentry_t *gpte_p, l1_pgentry_t *spte_p, > - unsigned long va) > -{ > - BUG(); > - return 42; > -} > - > -static inline int l1pte_read_fault( > - struct domain *d, l1_pgentry_t *gpte_p, l1_pgentry_t *spte_p) > -{ > - BUG(); > - return 42; > -} > - > -void static inline > -shadow_set_l1e(unsigned long va, l1_pgentry_t new_spte, int > create_l1_shadow) -{ > - BUG(); > -} > - > -static inline unsigned long gva_to_gpa(unsigned long gva) > -{ > - BUG(); > - return 42; > -} > -#endif > - >/*********************************************************************** */> > /* > Index: xen/arch/x86/vmx_platform.c > ==================================================================> --- xen.orig/arch/x86/vmx_platform.c 2005-07-28 11:31:39.000000000 > +0200 +++ xen/arch/x86/vmx_platform.c 2005-07-28 15:04:13.000000000 > +0200 @@ -32,7 +32,7 @@ > #include <xen/lib.h> > #include <xen/sched.h> > #include <asm/current.h> > -#if CONFIG_PAGING_LEVELS >= 4 > +#if CONFIG_PAGING_LEVELS >= 3 > #include <asm/shadow_64.h> > #endif > #ifdef CONFIG_VMX > Index: xen/arch/x86/Makefile > ==================================================================> --- xen.orig/arch/x86/Makefile 2005-07-28 11:31:39.000000000+0200> +++ xen/arch/x86/Makefile 2005-07-28 14:31:44.000000000 +0200 > @@ -13,11 +13,18 @@ OBJS := $(subst cpu/centaur.o,,$(OBJS)) > OBJS := $(subst cpu/cyrix.o,,$(OBJS)) > OBJS := $(subst cpu/rise.o,,$(OBJS)) > OBJS := $(subst cpu/transmeta.o,,$(OBJS)) > -OBJS := $(subst shadow32.o,,$(OBJS)) > -else > -OBJS := $(subst shadow.o,,$(OBJS)) > -OBJS := $(subst shadow_public.o,,$(OBJS)) > -OBJS := $(subst shadow_xxx.o,,$(OBJS)) > +endif > + > +OBJS := $(patsubst shadow%.o,,$(OBJS)) # drop all > +ifeq ($(TARGET_SUBARCH),x86_64) > + OBJS += shadow.o shadow_public.o # x86_64: new code > +endif > +ifeq ($(TARGET_SUBARCH),x86_32) > + ifneq ($(pae),n) > + OBJS += shadow.o shadow_public.o # x86_32p: new code > + else > + OBJS += shadow32.o # x86_32: old code > + endif > endif > > OBJS := $(subst $(TARGET_SUBARCH)/asm-offsets.o,,$(OBJS)) > Index: xen/arch/x86/shadow.c > ==================================================================> --- xen.orig/arch/x86/shadow.c 2005-07-28 11:31:39.000000000+0200> +++ xen/arch/x86/shadow.c 2005-07-28 15:06:59.000000000 +0200 > @@ -41,7 +41,13 @@ extern void free_shadow_pages(struct dom > static void mark_shadows_as_reflecting_snapshot(struct domain *d, > unsigned long gpfn); #endif > > -#if CONFIG_PAGING_LEVELS >= 4 > +#if CONFIG_PAGING_LEVELS == 3 > +#include <asm/shadow_64.h> > +static unsigned long shadow_l3_table( > + struct domain *d, unsigned long gpfn, unsigned long gmfn); > +#endif > + > +#if CONFIG_PAGING_LEVELS == 4 > #include <asm/shadow_64.h> > static unsigned long shadow_l4_table( > struct domain *d, unsigned long gpfn, unsigned long gmfn); > @@ -1833,7 +1839,7 @@ static void shadow_update_pagetables(str > unsigned long gpfn = __mfn_to_gpfn(d, gmfn); > unsigned long smfn, old_smfn; > > -#if defined (__i386__) > +#if CONFIG_PAGING_LEVELS == 2 > unsigned long hl2mfn; > #endif > > @@ -1890,7 +1896,7 @@ static void shadow_update_pagetables(str > v->arch.shadow_vtable = map_domain_page(smfn); > } > > -#if defined (__i386__) > +#if CONFIG_PAGING_LEVELS == 2 > /* > * arch.hl2_vtable > */ > @@ -1936,6 +1942,10 @@ static void shadow_update_pagetables(str > local_flush_tlb(); > } > #endif > + > +#if CONFIG_PAGING_LEVELS == 3 > + /* FIXME: PAE code to be written */ > +#endif > } > > struct shadow_ops MODE_A_HANDLER = { > @@ -2427,6 +2437,7 @@ static unsigned long shadow_l3_table( > struct domain *d, unsigned long gpfn, unsigned long gmfn) > { > BUG(); /* not implemenated yet */ > + return 42; > } > #endif > > Index: xen/arch/x86/vmx.c > ==================================================================> --- xen.orig/arch/x86/vmx.c 2005-07-28 15:03:18.000000000 +0200 > +++ xen/arch/x86/vmx.c 2005-07-28 15:03:40.000000000 +0200 > @@ -38,7 +38,7 @@ > #include <asm/vmx_vmcs.h> > #include <asm/vmx_intercept.h> > #include <asm/shadow.h> > -#if CONFIG_PAGING_LEVELS >= 4 > +#if CONFIG_PAGING_LEVELS >= 3 > #include <asm/shadow_64.h> > #endif > > > _______________________________________________ > Xen-devel mailing list > Xen-devel@lists.xensource.com > http://lists.xensource.com/xen-devel_______________________________________________ Xen-devel mailing list Xen-devel@lists.xensource.com http://lists.xensource.com/xen-devel
> Gerd Knorr wrote: > > Hi, > > > > Really just basic preparation: switch over PAE builds to the new > > shadow code, drop old dummy functions, add (fewer) new ones. > > Looks good to me.I think there''s rather more opportunity for sharing code than we''re exposing there. I guess we get it working then do another pass of refactoring... Ian _______________________________________________ Xen-devel mailing list Xen-devel@lists.xensource.com http://lists.xensource.com/xen-devel
On Thu, Jul 28, 2005 at 03:53:17PM +0100, Ian Pratt wrote:> I think there''s rather more opportunity for sharing code than we''re > exposing there. > > I guess we get it working then do another pass of refactoring...I have to browse through the code anyway, if I find some opportunities along the way while implementing shadow-pae I''ll fix them up ;) Gerd -- panic("it works"); /* avoid being flooded with debug messages */ _______________________________________________ Xen-devel mailing list Xen-devel@lists.xensource.com http://lists.xensource.com/xen-devel