"Ian Pratt" <m+Ian.Pratt@cl.cam.ac.uk> writes:
> * cleanup use of ''unsigned long'' that exists in some
handling of PTEs.
> We''ve agreed a strategy of doing this with Gerd
Next version of this cleanup patch, comments are welcome.
Handle with care for now. It builds and boots domain0 fine for me.
But will break the x86_64 build and isn''t tested much otherwise.
enjoy,
Gerd
Index: xen/arch/x86/shadow.c
==================================================================---
xen.orig/arch/x86/shadow.c 2005-03-21 12:21:48.000000000 +0100
+++ xen/arch/x86/shadow.c 2005-03-22 13:15:05.956121831 +0100
@@ -512,14 +512,13 @@ unsigned long shadow_l2_table(
memcpy(&spl2e[DOMAIN_ENTRIES_PER_L2_PAGETABLE],
&idle_pg_table[DOMAIN_ENTRIES_PER_L2_PAGETABLE],
HYPERVISOR_ENTRIES_PER_L2_PAGETABLE * sizeof(l2_pgentry_t));
- spl2e[LINEAR_PT_VIRT_START >> L2_PAGETABLE_SHIFT] -
mk_l2_pgentry((gmfn << PAGE_SHIFT) | __PAGE_HYPERVISOR);
- spl2e[SH_LINEAR_PT_VIRT_START >> L2_PAGETABLE_SHIFT] -
mk_l2_pgentry((spfn << PAGE_SHIFT) | __PAGE_HYPERVISOR);
- spl2e[PERDOMAIN_VIRT_START >> L2_PAGETABLE_SHIFT] -
mk_l2_pgentry(__pa(page_get_owner(
- &frame_table[gmfn])->arch.mm_perdomain_pt) |
- __PAGE_HYPERVISOR);
+ l2_pg_init_pfn(spl2e[LINEAR_PT_VIRT_START >> L2_PAGETABLE_SHIFT],
+ gmfn, __PAGE_HYPERVISOR);
+ l2_pg_init_pfn(spl2e[SH_LINEAR_PT_VIRT_START >>
L2_PAGETABLE_SHIFT],
+ spfn, __PAGE_HYPERVISOR);
+ l2_pg_init_phys(spl2e[PERDOMAIN_VIRT_START >>
L2_PAGETABLE_SHIFT],
+
__pa(page_get_owner(&frame_table[gmfn])->arch.mm_perdomain_pt),
+ __PAGE_HYPERVISOR);
unmap_domain_mem(spl2e);
}
@@ -533,13 +532,15 @@ static void shadow_map_l1_into_current_l
{
struct exec_domain *ed = current;
struct domain *d = ed->domain;
- unsigned long *gpl1e, *spl1e, gl2e, sl2e, gl1pfn, sl1mfn, sl1ss;
+ l1_pgentry_t *gpl1e, *spl1e;
+ l2_pgentry_t gl2e, sl2e;
+ unsigned long gl1pfn, sl1mfn, sl1ss;
struct pfn_info *sl1mfn_info;
int i;
__guest_get_l2e(ed, va, &gl2e);
- gl1pfn = gl2e >> PAGE_SHIFT;
+ gl1pfn = l2_pg_get_pfn(gl2e);
sl1ss = __shadow_status(d, gl1pfn);
if ( !(sl1ss & PSH_shadowed) )
@@ -562,10 +563,10 @@ static void shadow_map_l1_into_current_l
__guest_set_l2e(ed, va, gl2e);
__shadow_set_l2e(ed, va, sl2e);
- gpl1e = (unsigned long *) &(linear_pg_table[
+ gpl1e = &(linear_pg_table[
(va>>L1_PAGETABLE_SHIFT) & ~(L1_PAGETABLE_ENTRIES-1)]);
- spl1e = (unsigned long *) &(shadow_linear_pg_table[
+ spl1e = &(shadow_linear_pg_table[
(va>>L1_PAGETABLE_SHIFT) & ~(L1_PAGETABLE_ENTRIES-1)]);
for ( i = 0; i < L1_PAGETABLE_ENTRIES; i++ )
@@ -585,7 +586,9 @@ static void shadow_map_l1_into_current_l
void shadow_invlpg(struct exec_domain *ed, unsigned long va)
{
- unsigned long gpte, spte;
+ l1_pgentry_t gpte, spte;
+ l1_pgentry_t zero;
+ l1_pg_init_0(zero);
ASSERT(shadow_mode_enabled(ed->domain));
@@ -593,31 +596,33 @@ void shadow_invlpg(struct exec_domain *e
* XXX KAF: Why is this set-to-zero required?
* Why, on failure, must we bin all our shadow state?
*/
- if (__put_user(0L, (unsigned long *)
- &shadow_linear_pg_table[va >> PAGE_SHIFT])) {
+ if (__copy_to_user(&shadow_linear_pg_table[va >> PAGE_SHIFT],
+ &zero, sizeof(zero))) {
vmx_shadow_clear_state(ed->domain);
return;
}
- if (__get_user(gpte, (unsigned long *)
- &linear_pg_table[va >> PAGE_SHIFT])) {
+ if (__copy_from_user(&gpte,
+ &linear_pg_table[va >> PAGE_SHIFT],
+ sizeof(gpte))) {
return;
}
l1pte_propagate_from_guest(ed->domain, &gpte, &spte);
- if (__put_user(spte, (unsigned long *)
- &shadow_linear_pg_table[va >> PAGE_SHIFT])) {
+ if (__copy_to_user(&shadow_linear_pg_table[va >> PAGE_SHIFT],
+ &spte, sizeof(spte))) {
return;
}
}
int shadow_fault(unsigned long va, struct xen_regs *regs)
{
- unsigned long gpte, spte = 0;
+ l1_pgentry_t gpte, spte;
struct exec_domain *ed = current;
struct domain *d = ed->domain;
+ l1_pg_init_0(spte);
SH_VVLOG("shadow_fault( va=%p, code=%lu )", va,
regs->error_code );
check_pagetable(d, ed->arch.guest_table, "pre-sf");
@@ -626,20 +631,21 @@ int shadow_fault(unsigned long va, struc
* STEP 1. A fast-reject set of checks with no locking.
*/
- if ( unlikely(__get_user(gpte, (unsigned long *)
- &linear_pg_table[va >> PAGE_SHIFT])) )
+ if ( unlikely(__copy_from_user(&gpte,
+ &linear_pg_table[va >>
PAGE_SHIFT],
+ sizeof(gpte))) )
{
SH_VVLOG("shadow_fault - EXIT: read gpte faulted" );
return 0;
}
- if ( !(gpte & _PAGE_PRESENT) )
+ if ( !(l1_pg_get_flags(gpte) & _PAGE_PRESENT) )
{
SH_VVLOG("shadow_fault - EXIT: gpte not present (%lx)",gpte
);
return 0;
}
- if ( (regs->error_code & 2) && !(gpte & _PAGE_RW) )
+ if ( (regs->error_code & 2) && !(l1_pg_get_flags(gpte)
& _PAGE_RW) )
{
/* Write fault on a read-only mapping. */
return 0;
@@ -651,15 +657,16 @@ int shadow_fault(unsigned long va, struc
shadow_lock(d);
- if ( unlikely(__get_user(gpte, (unsigned long *)
- &linear_pg_table[va >> PAGE_SHIFT])) )
+ if ( unlikely(__copy_from_user(&gpte,
+ &linear_pg_table[va >>
PAGE_SHIFT],
+ sizeof(gpte))) )
{
SH_VVLOG("shadow_fault - EXIT: read gpte faulted2" );
shadow_unlock(d);
return 0;
}
- if ( unlikely(!(gpte & _PAGE_PRESENT)) )
+ if ( unlikely(!(l1_pg_get_flags(gpte) & _PAGE_PRESENT)) )
{
SH_VVLOG("shadow_fault - EXIT: gpte not present2 (%lx)",gpte
);
shadow_unlock(d);
@@ -669,7 +676,7 @@ int shadow_fault(unsigned long va, struc
/* Write fault? */
if ( regs->error_code & 2 )
{
- if ( unlikely(!(gpte & _PAGE_RW)) )
+ if ( unlikely(!(l1_pg_get_flags(gpte) & _PAGE_RW)) )
{
/* Write fault on a read-only mapping. */
SH_VVLOG("shadow_fault - EXIT: wr fault on RO page
(%lx)", gpte);
@@ -689,20 +696,21 @@ int shadow_fault(unsigned long va, struc
*/
/* XXX Watch out for read-only L2 entries! (not used in Linux). */
- if ( unlikely(__put_user(gpte, (unsigned long *)
- &linear_pg_table[va >> PAGE_SHIFT])) )
+ if ( unlikely(__copy_to_user(&linear_pg_table[va >> PAGE_SHIFT],
+ &gpte, sizeof(gpte))) )
domain_crash();
/*
* Update of shadow PTE can fail because the L1 p.t. is not shadowed,
* or because the shadow isn''t linked into this shadow L2 p.t.
*/
- if ( unlikely(__put_user(spte, (unsigned long *)
- &shadow_linear_pg_table[va >>
PAGE_SHIFT])) )
+ if ( unlikely(__copy_to_user(&shadow_linear_pg_table[va >>
PAGE_SHIFT],
+ &spte, sizeof(spte))) )
{
- SH_VVLOG("3: not shadowed/mapped gpte=%p spte=%p", gpte,
spte);
+ SH_VVLOG("3: not shadowed/mapped gpte=%p spte=%p",
+ l1_pg_get_value(hpte), l1_pg_get_value(spte));
shadow_map_l1_into_current_l2(va);
- shadow_linear_pg_table[va >> PAGE_SHIFT] = mk_l1_pgentry(spte);
+ shadow_linear_pg_table[va >> PAGE_SHIFT] = spte;
}
perfc_incrc(shadow_fixup_count);
@@ -716,17 +724,18 @@ int shadow_fault(unsigned long va, struc
void shadow_l1_normal_pt_update(
- unsigned long pa, unsigned long gpte,
+ unsigned long pa, l1_pgentry_t gpte,
unsigned long *prev_smfn_ptr,
l1_pgentry_t **prev_spl1e_ptr)
{
- unsigned long smfn, spte, prev_smfn = *prev_smfn_ptr;
+ l1_pgentry_t spte;
+ unsigned long smfn, prev_smfn = *prev_smfn_ptr;
l1_pgentry_t *spl1e, *prev_spl1e = *prev_spl1e_ptr;
/* N.B. To get here, we know the l1 page *must* be shadowed. */
SH_VVLOG("shadow_l1_normal_pt_update pa=%p, gpte=%p, "
"prev_smfn=%p, prev_spl1e=%p",
- pa, gpte, prev_smfn, prev_spl1e);
+ pa, l1_pgentry_val(gpte), prev_smfn, prev_spl1e);
smfn = __shadow_status(current->domain, pa >> PAGE_SHIFT) &
PSH_pfn_mask;
@@ -738,23 +747,25 @@ void shadow_l1_normal_pt_update(
{
if ( prev_spl1e != NULL )
unmap_domain_mem( prev_spl1e );
- spl1e = (l1_pgentry_t *)map_domain_mem(smfn << PAGE_SHIFT);
+ spl1e = map_domain_mem(smfn << PAGE_SHIFT);
*prev_smfn_ptr = smfn;
*prev_spl1e_ptr = spl1e;
}
l1pte_propagate_from_guest(current->domain, &gpte, &spte);
- spl1e[(pa & ~PAGE_MASK) / sizeof(l1_pgentry_t)] = mk_l1_pgentry(spte);
+ spl1e[(pa & ~PAGE_MASK) / sizeof(l1_pgentry_t)] = spte;
}
-void shadow_l2_normal_pt_update(unsigned long pa, unsigned long gpde)
+void shadow_l2_normal_pt_update(unsigned long pa, l2_pgentry_t gpde)
{
- unsigned long sl2mfn, spde = 0;
- l2_pgentry_t *spl2e;
+ unsigned long sl2mfn;
+ l2_pgentry_t *spl2e, spde;
unsigned long sl1mfn;
+ l2_pg_init_0(spde);
/* N.B. To get here, we know the l2 page *must* be shadowed. */
- SH_VVLOG("shadow_l2_normal_pt_update pa=%p, gpde=%p",pa,gpde);
+ SH_VVLOG("shadow_l2_normal_pt_update pa=%p, gpde=%p",pa,
+ l1_pgentry_t(gpde));
sl2mfn = __shadow_status(current->domain, pa >> PAGE_SHIFT) &
PSH_pfn_mask;
@@ -762,15 +773,16 @@ void shadow_l2_normal_pt_update(unsigned
* Only propagate to shadow if _PAGE_ACCESSED is set in the guest.
* Otherwise, to ensure coherency, we blow away the existing shadow value.
*/
- if ( gpde & _PAGE_ACCESSED )
+ if ( l2_pg_get_flags(gpde) & _PAGE_ACCESSED )
{
- sl1mfn = (gpde & _PAGE_PRESENT) ?
- __shadow_status(current->domain, gpde >> PAGE_SHIFT) : 0;
+ sl1mfn = (l2_pg_get_flags(gpde) & _PAGE_PRESENT)
+ ? __shadow_status(current->domain, l2_pg_get_pfn(gpde))
+ : 0;
l2pde_general(current->domain, &gpde, &spde, sl1mfn);
}
- spl2e = (l2_pgentry_t *)map_domain_mem(sl2mfn << PAGE_SHIFT);
- spl2e[(pa & ~PAGE_MASK) / sizeof(l2_pgentry_t)] = mk_l2_pgentry(spde);
+ spl2e = map_domain_mem(sl2mfn << PAGE_SHIFT);
+ spl2e[(pa & ~PAGE_MASK) / sizeof(l2_pgentry_t)] = spde;
unmap_domain_mem(spl2e);
}
Index: xen/include/asm-x86/shadow.h
==================================================================---
xen.orig/include/asm-x86/shadow.h 2005-03-21 12:21:48.000000000 +0100
+++ xen/include/asm-x86/shadow.h 2005-03-22 13:04:31.849138841 +0100
@@ -42,9 +42,9 @@ extern void shadow_mode_init(void);
extern int shadow_mode_control(struct domain *p, dom0_shadow_control_t *sc);
extern int shadow_fault(unsigned long va, struct xen_regs *regs);
extern void shadow_l1_normal_pt_update(
- unsigned long pa, unsigned long gpte,
+ unsigned long pa, l1_pgentry_t gpte,
unsigned long *prev_spfn_ptr, l1_pgentry_t **prev_spl1e_ptr);
-extern void shadow_l2_normal_pt_update(unsigned long pa, unsigned long gpde);
+extern void shadow_l2_normal_pt_update(unsigned long pa, l2_pgentry_t gpde);
extern void unshadow_table(unsigned long gpfn, unsigned int type);
extern int shadow_mode_enable(struct domain *p, unsigned int mode);
extern void free_shadow_state(struct domain *d);
@@ -100,10 +100,10 @@ translate_gpfn_to_mfn(struct domain *rd,
unmap_domain_mem( l2_table );
- if ( l2_pgentry_val(l2_entry) == 0 )
+ if ( l2_pg_get_value(l2_entry) == 0 )
goto unlock_out;
- ma_of_l1_table = l2_pgentry_to_phys( l2_entry );
+ ma_of_l1_table = l2_pg_get_phys( l2_entry );
l1_table = (l1_pgentry_t *) map_domain_mem( ma_of_l1_table );
pte = l1_table[ (gpfn >> (L1_PAGETABLE_SHIFT - PAGE_SHIFT))
&
@@ -111,7 +111,7 @@ translate_gpfn_to_mfn(struct domain *rd,
unmap_domain_mem( l1_table );
- mfn = l1_pgentry_to_pfn(pte);
+ mfn = l1_pg_get_pfn(pte);
unlock_out:
shadow_unlock(rd);
@@ -178,55 +178,53 @@ struct shadow_status {
// BUG: mafetter: this assumes ed == current, so why pass ed?
static inline void __shadow_get_l2e(
- struct exec_domain *ed, unsigned long va, unsigned long *sl2e)
+ struct exec_domain *ed, unsigned long va, l2_pgentry_t *sl2e)
{
if ( !likely(shadow_mode_enabled(ed->domain)) )
BUG();
if ( shadow_mode_translate(ed->domain) )
- *sl2e = l2_pgentry_val(
- ed->arch.shadow_vtable[l2_table_offset(va)]);
+ *sl2e = ed->arch.shadow_vtable[l2_table_offset(va)];
else
- *sl2e = l2_pgentry_val(
- shadow_linear_l2_table[l2_table_offset(va)]);
+ *sl2e = shadow_linear_l2_table[l2_table_offset(va)];
}
static inline void __shadow_set_l2e(
- struct exec_domain *ed, unsigned long va, unsigned long value)
+ struct exec_domain *ed, unsigned long va, l2_pgentry_t value)
{
if ( !likely(shadow_mode_enabled(ed->domain)) )
BUG();
if ( shadow_mode_translate(ed->domain) )
- ed->arch.shadow_vtable[l2_table_offset(va)] = mk_l2_pgentry(value);
+ ed->arch.shadow_vtable[l2_table_offset(va)] = value;
else
- shadow_linear_l2_table[l2_table_offset(va)] = mk_l2_pgentry(value);
+ shadow_linear_l2_table[l2_table_offset(va)] = value;
}
static inline void __guest_get_l2e(
- struct exec_domain *ed, unsigned long va, unsigned long *l2e)
+ struct exec_domain *ed, unsigned long va, l2_pgentry_t *l2e)
{
*l2e = ( shadow_mode_translate(ed->domain) ) ?
- l2_pgentry_val(ed->arch.guest_vtable[l2_table_offset(va)]) :
- l2_pgentry_val(linear_l2_table[l2_table_offset(va)]);
+ ed->arch.guest_vtable[l2_table_offset(va)] :
+ linear_l2_table[l2_table_offset(va)];
}
static inline void __guest_set_l2e(
- struct exec_domain *ed, unsigned long va, unsigned long value)
+ struct exec_domain *ed, unsigned long va, l2_pgentry_t value)
{
if ( shadow_mode_translate(ed->domain) )
{
unsigned long pfn;
- pfn = phys_to_machine_mapping(value >> PAGE_SHIFT);
- ed->arch.hl2_vtable[l2_table_offset(va)] -
mk_l2_pgentry((pfn << PAGE_SHIFT) | __PAGE_HYPERVISOR);
+ pfn = phys_to_machine_mapping(l2_pg_get_pfn(value));
+ l2_pg_init_pfn(ed->arch.hl2_vtable[l2_table_offset(va)],
+ pfn, __PAGE_HYPERVISOR);
- ed->arch.guest_vtable[l2_table_offset(va)] = mk_l2_pgentry(value);
+ ed->arch.guest_vtable[l2_table_offset(va)] = value;
}
else
{
- linear_l2_table[l2_table_offset(va)] = mk_l2_pgentry(value);
+ linear_l2_table[l2_table_offset(va)] = value;
}
}
@@ -288,76 +286,79 @@ static inline int mark_dirty(struct doma
/************************************************************************/
static inline void l1pte_write_fault(
- struct domain *d, unsigned long *gpte_p, unsigned long *spte_p)
+ struct domain *d, l1_pgentry_t *gpte_p, l1_pgentry_t *spte_p)
{
- unsigned long gpte = *gpte_p;
- unsigned long spte = *spte_p;
- unsigned long pfn = gpte >> PAGE_SHIFT;
+ l1_pgentry_t gpte = *gpte_p;
+ l1_pgentry_t spte = *spte_p;
+ unsigned long pfn = l1_pg_get_pfn(gpte);
unsigned long mfn = __gpfn_to_mfn(d, pfn);
- ASSERT(gpte & _PAGE_RW);
- gpte |= _PAGE_DIRTY | _PAGE_ACCESSED;
+ ASSERT(l1_pg_get_flags(gpte) & _PAGE_RW);
+ l1_pg_bset_flags(gpte, _PAGE_DIRTY | _PAGE_ACCESSED);
if ( shadow_mode_log_dirty(d) )
__mark_dirty(d, pfn);
- spte = (mfn << PAGE_SHIFT) | (gpte & ~PAGE_MASK);
+ l1_pg_init_pfn(spte, mfn, l1_pg_get_flags(gpte));
- SH_VVLOG("l1pte_write_fault: updating spte=0x%p gpte=0x%p", spte,
gpte);
+ SH_VVLOG("l1pte_write_fault: updating spte=0x%p gpte=0x%p",
+ l1_pg_get_value(spte), l1_pg_get_value(gpte));
*gpte_p = gpte;
*spte_p = spte;
}
static inline void l1pte_read_fault(
- struct domain *d, unsigned long *gpte_p, unsigned long *spte_p)
+ struct domain *d, l1_pgentry_t *gpte_p, l1_pgentry_t *spte_p)
{
- unsigned long gpte = *gpte_p;
- unsigned long spte = *spte_p;
- unsigned long pfn = gpte >> PAGE_SHIFT;
+ l1_pgentry_t gpte = *gpte_p;
+ l1_pgentry_t spte = *spte_p;
+ unsigned long pfn = l1_pg_get_pfn(gpte);
unsigned long mfn = __gpfn_to_mfn(d, pfn);
- gpte |= _PAGE_ACCESSED;
- spte = (mfn << PAGE_SHIFT) | (gpte & ~PAGE_MASK);
+ l1_pg_bset_flags(gpte, _PAGE_ACCESSED);
+ l1_pg_init_pfn(spte, mfn, l1_pg_get_flags(gpte));
- if ( shadow_mode_log_dirty(d) || !(gpte & _PAGE_DIRTY) )
- spte &= ~_PAGE_RW;
+ if ( shadow_mode_log_dirty(d) || !(l1_pg_get_flags(gpte) & _PAGE_DIRTY)
)
+ l1_pg_bclr_flags(spte, _PAGE_RW);
- SH_VVLOG("l1pte_read_fault: updating spte=0x%p gpte=0x%p", spte,
gpte);
+ SH_VVLOG("l1pte_read_fault: updating spte=0x%p gpte=0x%p",
+ l1_pg_get_value(spte), l1_pg_get_value(gpte));
*gpte_p = gpte;
*spte_p = spte;
}
static inline void l1pte_propagate_from_guest(
- struct domain *d, unsigned long *gpte_p, unsigned long *spte_p)
+ struct domain *d, l1_pgentry_t *gpte_p, l1_pgentry_t *spte_p)
{
- unsigned long gpte = *gpte_p;
- unsigned long spte = *spte_p;
- unsigned long pfn = gpte >> PAGE_SHIFT;
+ l1_pgentry_t gpte = *gpte_p;
+ l1_pgentry_t spte = *spte_p;
+ unsigned long pfn = l1_pg_get_pfn(gpte);
unsigned long mfn = __gpfn_to_mfn(d, pfn);
#if SHADOW_VERBOSE_DEBUG
- unsigned long old_spte = spte;
+ l1_pgentry_t old_spte = spte;
#endif
/* Use 1:1 page table to identify MMIO address space */
- if ( shadow_mode_external(d) && mmio_space(gpte) ) {
- *spte_p = 0;
+ if ( shadow_mode_external(d) && mmio_space(l1_pg_get_phys(gpte)) )
{
+ l1_pg_init_0(*spte_p);
return;
}
- spte = 0;
- if ( (gpte & (_PAGE_PRESENT|_PAGE_ACCESSED) ) ==
+ l1_pg_init_0(spte);
+ if ( (l1_pg_get_flags(gpte) & (_PAGE_PRESENT|_PAGE_ACCESSED) ) ==
(_PAGE_PRESENT|_PAGE_ACCESSED) ) {
- spte = (mfn << PAGE_SHIFT) | (gpte & ~PAGE_MASK);
+ l1_pg_init_pfn(spte, mfn, l1_pg_get_flags(gpte));
- if ( shadow_mode_log_dirty(d) || !(gpte & _PAGE_DIRTY) )
- spte &= ~_PAGE_RW;
+ if ( shadow_mode_log_dirty(d) || !(l1_pg_get_flags(gpte) &
_PAGE_DIRTY) )
+ l1_pg_bclr_flags(spte, _PAGE_RW);
}
#if SHADOW_VERBOSE_DEBUG
- if ( old_spte || spte || gpte )
- SH_VVLOG("l1pte_propagate_from_guest: gpte=0x%p, old spte=0x%p,
new spte=0x%p ", gpte, old_spte, spte);
+ if ( l1_pg_get_value(old_spte) || l1_pg_get_value(spte) ||
l1_pg_get_value(gpte) )
+ SH_VVLOG("l1pte_propagate_from_guest: gpte=0x%p, old spte=0x%p,
new spte=0x%p ",
+ l1_pg_get_value(gpte), l1_pg_get_value(old_spte), l1_pg_get_value(spte));
#endif
*gpte_p = gpte;
@@ -368,28 +369,29 @@ static inline void l1pte_propagate_from_
static inline void l2pde_general(
struct domain *d,
- unsigned long *gpde_p,
- unsigned long *spde_p,
+ l2_pgentry_t *gpde_p,
+ l2_pgentry_t *spde_p,
unsigned long sl1mfn)
{
- unsigned long gpde = *gpde_p;
- unsigned long spde = *spde_p;
+ l2_pgentry_t gpde = *gpde_p;
+ l2_pgentry_t spde = *spde_p;
- spde = 0;
+ l2_pg_init_0(spde);
if ( sl1mfn != 0 )
{
- spde = (gpde & ~PAGE_MASK) | (sl1mfn << PAGE_SHIFT) |
- _PAGE_RW | _PAGE_ACCESSED | _PAGE_DIRTY;
- gpde |= _PAGE_ACCESSED; /* N.B. PDEs do not have a dirty bit. */
+ l2_pg_init_pfn(spde, sl1mfn, l2_pg_get_flags(gpde)
+ | _PAGE_RW | _PAGE_ACCESSED | _PAGE_DIRTY);
+ l2_pg_bset_flags(gpde, _PAGE_ACCESSED); /* N.B. PDEs do not have a
dirty bit. */
/* Detect linear p.t. mappings and write-protect them. */
if ( (frame_table[sl1mfn].u.inuse.type_info & PGT_type_mask) =
PGT_l2_page_table )
{
- if ( !shadow_mode_translate(d) )
- spde = gpde & ~_PAGE_RW;
-
+ if ( !shadow_mode_translate(d) ) {
+ spde = gpde;
+ l2_pg_bclr_flags(spde,_PAGE_RW);
+ }
}
}
@@ -725,39 +727,44 @@ static inline void set_shadow_status(
shadow_audit(d, 0);
}
-static inline unsigned long gva_to_gpte(unsigned long gva)
+static inline l1_pgentry_t gva_to_gpte(unsigned long gva)
{
- unsigned long gpde, gpte, pfn, index;
+ l1_pgentry_t gpte;
+ l2_pgentry_t gpde;
+ unsigned long pfn, index;
struct exec_domain *ed = current;
__guest_get_l2e(ed, gva, &gpde);
- if (!(gpde & _PAGE_PRESENT))
- return 0;
+ if (!(l2_pg_get_flags(gpde) & _PAGE_PRESENT)) {
+ l1_pg_init_0(gpte);
+ return gpte;
+ }
index = l2_table_offset(gva);
- if (!l2_pgentry_val(ed->arch.hl2_vtable[index])) {
- pfn = phys_to_machine_mapping(gpde >> PAGE_SHIFT);
- ed->arch.hl2_vtable[index] =
- mk_l2_pgentry((pfn << PAGE_SHIFT) | __PAGE_HYPERVISOR);
+ if (!l2_pg_get_value(ed->arch.hl2_vtable[index])) {
+ pfn = phys_to_machine_mapping(l2_pg_get_pfn(gpde));
+ l2_pg_init_pfn(ed->arch.hl2_vtable[index], pfn,
+ __PAGE_HYPERVISOR);
}
- if ( unlikely(__get_user(gpte, (unsigned long *)
- &linear_pg_table[gva >> PAGE_SHIFT])) )
- return 0;
+ if (unlikely(__copy_from_user(&gpte,
+ &linear_pg_table[gva >> PAGE_SHIFT],
+ sizeof(gpte))))
+ l1_pg_init_0(gpte);
return gpte;
}
static inline unsigned long gva_to_gpa(unsigned long gva)
{
- unsigned long gpte;
+ l1_pgentry_t gpte;
gpte = gva_to_gpte(gva);
- if ( !(gpte & _PAGE_PRESENT) )
+ if ( !(l1_pg_get_flags(gpte) & _PAGE_PRESENT) )
return 0;
- return (gpte & PAGE_MASK) + (gva & ~PAGE_MASK);
+ return (l1_pg_get_phys(gpte) + (gva & ~PAGE_MASK));
}
static inline void hl2_table_invalidate(struct exec_domain *ed)
@@ -809,20 +816,18 @@ static inline void __update_pagetables(s
ed->arch.hl2_vtable = map_domain_mem(hl2mfn << PAGE_SHIFT);
offset = l2_table_offset(LINEAR_PT_VIRT_START);
- if ( hl2mfn != (l2_pgentry_val(mpl2e[offset]) >> PAGE_SHIFT) )
+ if ( hl2mfn != (l2_pg_get_pfn(mpl2e[offset])) )
{
- mpl2e[offset] - mk_l2_pgentry((hl2mfn <<
PAGE_SHIFT) | __PAGE_HYPERVISOR);
+ l2_pg_init_pfn(mpl2e[offset], hl2mfn, __PAGE_HYPERVISOR);
need_flush = 1;
}
if ( shadow_mode_external(d ) )
{
offset = l2_table_offset(SH_LINEAR_PT_VIRT_START);
- if ( smfn != (l2_pgentry_val(mpl2e[offset]) >> PAGE_SHIFT) )
+ if ( smfn != (l2_pg_get_pfn(mpl2e[offset])) )
{
- mpl2e[offset] - mk_l2_pgentry((smfn <<
PAGE_SHIFT) | __PAGE_HYPERVISOR);
+ l2_pg_init_pfn(mpl2e[offset], smfn, __PAGE_HYPERVISOR);
need_flush = 1;
}
}
Index: xen/arch/x86/vmx.c
==================================================================---
xen.orig/arch/x86/vmx.c 2005-03-21 12:21:47.000000000 +0100
+++ xen/arch/x86/vmx.c 2005-03-22 13:15:39.480675433 +0100
@@ -107,7 +107,8 @@ static void inline __update_guest_eip(un
static int vmx_do_page_fault(unsigned long va, struct xen_regs *regs)
{
unsigned long eip;
- unsigned long gpte, gpa;
+ l1_pgentry_t gpte;
+ unsigned long gpa; /* FIXME: PAE */
int result;
#if VMX_DEBUG
@@ -130,9 +131,9 @@ static int vmx_do_page_fault(unsigned lo
}
gpte = gva_to_gpte(va);
- if (!(gpte & _PAGE_PRESENT) )
+ if (!(l1_pg_get_flags(gpte) & _PAGE_PRESENT) )
return 0;
- gpa = (gpte & PAGE_MASK) + (va & ~PAGE_MASK);
+ gpa = l1_pg_get_phys(gpte) + (va & ~PAGE_MASK);
/* Use 1:1 page table to identify MMIO address space */
if (mmio_space(gpa))
@@ -282,8 +283,7 @@ static void vmx_vmexit_do_invlpg(unsigne
*/
shadow_invlpg(ed, va);
index = l2_table_offset(va);
- ed->arch.hl2_vtable[index] =
- mk_l2_pgentry(0); /* invalidate pgd cache */
+ l2_pg_init_0(ed->arch.hl2_vtable[index]);
}
static void vmx_io_instruction(struct xen_regs *regs,
Index: xen/arch/x86/mm.c
==================================================================---
xen.orig/arch/x86/mm.c 2005-03-21 12:21:49.000000000 +0100
+++ xen/arch/x86/mm.c 2005-03-22 13:13:31.872406670 +0100
@@ -209,9 +209,9 @@ static void __invalidate_shadow_ldt(stru
for ( i = 16; i < 32; i++ )
{
- pfn = l1_pgentry_to_pfn(d->arch.perdomain_ptes[i]);
+ pfn = l1_pg_get_pfn(d->arch.perdomain_ptes[i]);
if ( pfn == 0 ) continue;
- d->arch.perdomain_ptes[i] = mk_l1_pgentry(0);
+ l1_pg_init_0(d->arch.perdomain_ptes[i]);
page = &frame_table[pfn];
ASSERT_PAGE_IS_TYPE(page, PGT_ldt_page);
ASSERT_PAGE_IS_DOMAIN(page, d->domain);
@@ -255,7 +255,7 @@ int map_ldt_shadow_page(unsigned int off
{
struct exec_domain *ed = current;
struct domain *d = ed->domain;
- unsigned long l1e;
+ l1_pgentry_t l1e;
#if defined(__x86_64__)
/* If in user mode, switch to kernel mode just to read LDT mapping. */
@@ -269,17 +269,19 @@ int map_ldt_shadow_page(unsigned int off
BUG_ON(unlikely(in_irq()));
TOGGLE_MODE();
- __get_user(l1e, (unsigned long *)
- &linear_pg_table[l1_linear_offset(ed->arch.ldt_base) +
off]);
+ __copy_from_user(&l1e,
+
&linear_pg_table[l1_linear_offset(ed->arch.ldt_base) + off],
+ sizeof(l1e));
TOGGLE_MODE();
- if ( unlikely(!(l1e & _PAGE_PRESENT)) ||
+ if ( unlikely(!(l1_pg_get_flags(l1e) & _PAGE_PRESENT)) ||
unlikely(!get_page_and_type(
- &frame_table[l1_pgentry_to_pfn(mk_l1_pgentry(l1e))],
- d, PGT_ldt_page)) )
+ &frame_table[l1_pg_get_pfn(l1e)],
+ d, PGT_ldt_page)) )
return 0;
- ed->arch.perdomain_ptes[off + 16] = mk_l1_pgentry(l1e | _PAGE_RW);
+ ed->arch.perdomain_ptes[off + 16] = l1e;
+ l1_pg_bset_flags(ed->arch.perdomain_ptes[off + 16], _PAGE_RW);
ed->arch.shadow_ldt_mapcnt++;
return 1;
@@ -348,13 +350,13 @@ get_linear_pagetable(
struct pfn_info *page;
unsigned long pfn;
- if ( (root_pgentry_val(re) & _PAGE_RW) )
+ if ( (root_pg_get_flags(re) & _PAGE_RW) )
{
MEM_LOG("Attempt to create linear p.t. with write perms");
return 0;
}
- if ( (pfn = root_pgentry_to_pfn(re)) != re_pfn )
+ if ( (pfn = root_pg_get_pfn(re)) != re_pfn )
{
/* Make sure the mapped frame belongs to the correct domain. */
if ( unlikely(!get_page_from_pagenr(pfn, d)) )
@@ -387,17 +389,16 @@ static int
get_page_from_l1e(
l1_pgentry_t l1e, struct domain *d)
{
- unsigned long l1v = l1_pgentry_val(l1e);
- unsigned long pfn = l1_pgentry_to_pfn(l1e);
+ unsigned long pfn = l1_pg_get_pfn(l1e);
struct pfn_info *page = &frame_table[pfn];
extern int domain_iomem_in_pfn(struct domain *d, unsigned long pfn);
- if ( !(l1v & _PAGE_PRESENT) )
+ if ( !(l1_pg_get_flags(l1e) & _PAGE_PRESENT) )
return 1;
- if ( unlikely(l1v & L1_DISALLOW_MASK) )
+ if ( unlikely(l1_pg_get_flags(l1e) & L1_DISALLOW_MASK) )
{
- MEM_LOG("Bad L1 type settings %p", l1v &
L1_DISALLOW_MASK);
+ MEM_LOG("Bad L1 type settings %p", l1_pg_get_value(l1e) &
L1_DISALLOW_MASK);
return 0;
}
@@ -417,7 +418,7 @@ get_page_from_l1e(
return 0;
}
- return ((l1v & _PAGE_RW) ?
+ return ((l1_pg_get_flags(l1e) & _PAGE_RW) ?
get_page_and_type(page, d, PGT_writable_page) :
get_page(page, d));
}
@@ -431,18 +432,18 @@ get_page_from_l2e(
{
int rc;
- if ( !(l2_pgentry_val(l2e) & _PAGE_PRESENT) )
+ if ( !(l2_pg_get_flags(l2e) & _PAGE_PRESENT) )
return 1;
- if ( unlikely((l2_pgentry_val(l2e) & L2_DISALLOW_MASK)) )
+ if ( unlikely((l2_pg_get_flags(l2e) & L2_DISALLOW_MASK)) )
{
MEM_LOG("Bad L2 page type settings %p",
- l2_pgentry_val(l2e) & L2_DISALLOW_MASK);
+ l2_pg_get_value(l2e) & L2_DISALLOW_MASK);
return 0;
}
rc = get_page_and_type_from_pagenr(
- l2_pgentry_to_pfn(l2e),
+ l2_pg_get_pfn(l2e),
PGT_l1_page_table | (va_idx<<PGT_va_shift), d);
#if defined(__i386__)
@@ -504,12 +505,11 @@ get_page_from_l4e(
static void put_page_from_l1e(l1_pgentry_t l1e, struct domain *d)
{
- unsigned long l1v = l1_pgentry_val(l1e);
- unsigned long pfn = l1_pgentry_to_pfn(l1e);
+ unsigned long pfn = l1_pg_get_pfn(l1e);
struct pfn_info *page = &frame_table[pfn];
struct domain *e;
- if ( !(l1v & _PAGE_PRESENT) || !pfn_is_ram(pfn) )
+ if ( !(l1_pg_get_flags(l1e) & _PAGE_PRESENT) || !pfn_is_ram(pfn) )
return;
e = page_get_owner(page);
@@ -526,12 +526,13 @@ static void put_page_from_l1e(l1_pgentry
* mappings and which unmappings are counted via the grant entry, but
* really it doesn''t matter as privileged domains have carte
blanche.
*/
- if ( likely(gnttab_check_unmap(e, d, pfn, !(l1v & _PAGE_RW))) )
+ if (likely(gnttab_check_unmap(e, d, pfn,
+ !(l1_pg_get_flags(l1e) & _PAGE_RW))))
return;
/* Assume this mapping was made via MMUEXT_SET_FOREIGNDOM... */
}
- if ( l1v & _PAGE_RW )
+ if ( l1_pg_get_flags(l1e) & _PAGE_RW )
{
put_page_and_type(page);
}
@@ -553,9 +554,9 @@ static void put_page_from_l1e(l1_pgentry
*/
static void put_page_from_l2e(l2_pgentry_t l2e, unsigned long pfn)
{
- if ( (l2_pgentry_val(l2e) & _PAGE_PRESENT) &&
- (l2_pgentry_to_pfn(l2e) != pfn) )
- put_page_and_type(&frame_table[l2_pgentry_to_pfn(l2e)]);
+ if ( (l2_pg_get_flags(l2e) & _PAGE_PRESENT) &&
+ (l2_pg_get_pfn(l2e) != pfn) )
+ put_page_and_type(&frame_table[l2_pg_get_pfn(l2e)]);
}
@@ -625,11 +626,11 @@ static int alloc_l2_table(struct pfn_inf
memcpy(&pl2e[ROOT_PAGETABLE_FIRST_XEN_SLOT],
&idle_pg_table[ROOT_PAGETABLE_FIRST_XEN_SLOT],
ROOT_PAGETABLE_XEN_SLOTS * sizeof(l2_pgentry_t));
- pl2e[l2_table_offset(LINEAR_PT_VIRT_START)] - mk_l2_pgentry((pfn
<< PAGE_SHIFT) | __PAGE_HYPERVISOR);
- pl2e[l2_table_offset(PERDOMAIN_VIRT_START)] -
mk_l2_pgentry(__pa(page_get_owner(page)->arch.mm_perdomain_pt) |
- __PAGE_HYPERVISOR);
+ l2_pg_init_pfn(pl2e[l2_table_offset(LINEAR_PT_VIRT_START)],
+ pfn, __PAGE_HYPERVISOR);
+ l2_pg_init_phys(pl2e[l2_table_offset(PERDOMAIN_VIRT_START)],
+ __pa(page_get_owner(page)->arch.mm_perdomain_pt),
+ __PAGE_HYPERVISOR);
#endif
unmap_domain_mem(pl2e);
@@ -770,14 +771,15 @@ static inline int update_l1e(l1_pgentry_
l1_pgentry_t ol1e,
l1_pgentry_t nl1e)
{
- unsigned long o = l1_pgentry_val(ol1e);
- unsigned long n = l1_pgentry_val(nl1e);
+ /* FIXME: breaks with PAE */
+ unsigned long o = l1_pg_get_value(ol1e);
+ unsigned long n = l1_pg_get_value(nl1e);
if ( unlikely(cmpxchg_user(pl1e, o, n) != 0) ||
- unlikely(o != l1_pgentry_val(ol1e)) )
+ unlikely(o != l1_pg_get_value(ol1e)) )
{
MEM_LOG("Failed to update %p -> %p: saw %p\n",
- l1_pgentry_val(ol1e), l1_pgentry_val(nl1e), o);
+ l1_pg_get_value(ol1e), l1_pg_get_value(nl1e), o);
return 0;
}
@@ -789,24 +791,22 @@ static inline int update_l1e(l1_pgentry_
static int mod_l1_entry(l1_pgentry_t *pl1e, l1_pgentry_t nl1e)
{
l1_pgentry_t ol1e;
- unsigned long _ol1e;
struct domain *d = current->domain;
- if ( unlikely(__get_user(_ol1e, (unsigned long *)pl1e) != 0) )
+ if (unlikely(__copy_from_user(&ol1e, pl1e, sizeof(ol1e)) != 0) )
return 0;
- ol1e = mk_l1_pgentry(_ol1e);
- if ( l1_pgentry_val(nl1e) & _PAGE_PRESENT )
+ if ( l1_pg_get_flags(nl1e) & _PAGE_PRESENT )
{
- if ( unlikely(l1_pgentry_val(nl1e) & L1_DISALLOW_MASK) )
+ if ( unlikely(l1_pg_get_flags(nl1e) & L1_DISALLOW_MASK) )
{
MEM_LOG("Bad L1 type settings %p",
- l1_pgentry_val(nl1e) & L1_DISALLOW_MASK);
+ l1_pg_get_flags(nl1e) & L1_DISALLOW_MASK);
return 0;
}
/* Fast path for identical mapping, r/w and presence. */
- if ( ((l1_pgentry_val(ol1e) ^ l1_pgentry_val(nl1e)) &
+ if ( ((l1_pg_get_value(ol1e) ^ l1_pg_get_value(nl1e)) &
((PADDR_MASK & PAGE_MASK) | _PAGE_RW | _PAGE_PRESENT)) == 0 )
return update_l1e(pl1e, ol1e, nl1e);
@@ -830,15 +830,15 @@ static int mod_l1_entry(l1_pgentry_t *pl
return 1;
}
-
+/* FIXME: breaks with PAE */
#define UPDATE_ENTRY(_t,_p,_o,_n) ({ \
unsigned long __o = cmpxchg((unsigned long *)(_p), \
- _t ## _pgentry_val(_o), \
- _t ## _pgentry_val(_n)); \
- if ( __o != _t ## _pgentry_val(_o) ) \
+ _t ## _pg_get_value(_o), \
+ _t ## _pg_get_value(_n)); \
+ if ( __o != _t ## _pg_get_value(_o) ) \
MEM_LOG("Failed to update %p -> %p: saw %p\n",
\
- _t ## _pgentry_val(_o), _t ## _pgentry_val(_n), __o); \
- (__o == _t ## _pgentry_val(_o)); })
+ _t ## _pg_get_value(_o), _t ## _pg_get_value(_n), __o); \
+ (__o == _t ## _pg_get_value(_o)); })
/* Update the L2 entry at pl2e to new value nl2e. pl2e is within frame pfn. */
@@ -847,7 +847,6 @@ static int mod_l2_entry(l2_pgentry_t *pl
unsigned long pfn)
{
l2_pgentry_t ol2e;
- unsigned long _ol2e;
if ( unlikely(!is_guest_l2_slot(pgentry_ptr_to_slot(pl2e))) )
{
@@ -855,21 +854,20 @@ static int mod_l2_entry(l2_pgentry_t *pl
return 0;
}
- if ( unlikely(__get_user(_ol2e, (unsigned long *)pl2e) != 0) )
+ if ( unlikely(__copy_from_user(&ol2e, pl2e, sizeof(ol2e)) != 0) )
return 0;
- ol2e = mk_l2_pgentry(_ol2e);
- if ( l2_pgentry_val(nl2e) & _PAGE_PRESENT )
+ if ( l2_pg_get_flags(nl2e) & _PAGE_PRESENT )
{
- if ( unlikely(l2_pgentry_val(nl2e) & L2_DISALLOW_MASK) )
+ if ( unlikely(l2_pg_get_flags(nl2e) & L2_DISALLOW_MASK) )
{
MEM_LOG("Bad L2 type settings %p",
- l2_pgentry_val(nl2e) & L2_DISALLOW_MASK);
+ l2_pg_get_value(nl2e) & L2_DISALLOW_MASK);
return 0;
}
/* Fast path for identical mapping and presence. */
- if ( ((l2_pgentry_val(ol2e) ^ l2_pgentry_val(nl2e)) &
+ if ( ((l2_pg_get_value(ol2e) ^ l2_pg_get_value(nl2e)) &
((PADDR_MASK & PAGE_MASK) | _PAGE_PRESENT)) == 0 )
return UPDATE_ENTRY(l2, pl2e, ol2e, nl2e);
@@ -1761,15 +1759,18 @@ int do_mmu_update(
if ( likely(get_page_type(
page, type_info & (PGT_type_mask|PGT_va_mask))) )
{
- okay = mod_l1_entry((l1_pgentry_t *)va,
- mk_l1_pgentry(req.val));
+ l1_pgentry_t l1e;
+
+ /* FIXME: breaks with PAE & mem above 4GB */
+ l1_pg_init_phys(l1e, req.val, req.val & ~PAGE_MASK);
+ okay = mod_l1_entry((l1_pgentry_t *)va, l1e);
if ( unlikely(shadow_mode_enabled(d)) && okay
&&
(get_shadow_status(d, page-frame_table) &
PSH_shadowed) )
{
shadow_l1_normal_pt_update(
- req.ptr, req.val, &prev_smfn, &prev_spl1e);
+ req.ptr, l1e, &prev_smfn, &prev_spl1e);
put_shadow_status(d);
}
@@ -1779,15 +1780,17 @@ int do_mmu_update(
case PGT_l2_page_table:
if ( likely(get_page_type(page, PGT_l2_page_table)) )
{
- okay = mod_l2_entry((l2_pgentry_t *)va,
- mk_l2_pgentry(req.val),
- pfn);
+ l2_pgentry_t l2e;
+
+ /* FIXME: breaks with PAE & mem above 4GB */
+ l2_pg_init_phys(l2e, req.val, req.val & ~PAGE_MASK);
+ okay = mod_l2_entry((l2_pgentry_t *)va, l2e, pfn);
if ( unlikely(shadow_mode_enabled(d)) && okay
&&
(get_shadow_status(d, page-frame_table) &
PSH_shadowed) )
{
- shadow_l2_normal_pt_update(req.ptr, req.val);
+ shadow_l2_normal_pt_update(req.ptr, l2e);
put_shadow_status(d);
}
@@ -1923,20 +1926,20 @@ int do_mmu_update(
}
void update_shadow_va_mapping(unsigned long va,
- unsigned long val,
+ l1_pgentry_t val,
struct exec_domain *ed,
struct domain *d)
{
/* This function assumes the caller is holding the domain''s
BIGLOCK
* and is running in a shadow mode
*/
+ l1_pgentry_t sval;
- unsigned long sval = 0;
-
+ l1_pg_init_0(sval);
l1pte_propagate_from_guest(d, &val, &sval);
- if ( unlikely(__put_user(sval, ((unsigned long *)(
- &shadow_linear_pg_table[l1_linear_offset(va)])))) )
+ if (
unlikely(__copy_to_user(&shadow_linear_pg_table[l1_linear_offset(va)],
+ &sval, sizeof(sval))))
{
/*
* Since L2''s are guaranteed RW, failure indicates either that
the
@@ -1948,12 +1951,12 @@ void update_shadow_va_mapping(unsigned l
BUG_ON(shadow_mode_external(current->domain));
l2_pgentry_t gpde = linear_l2_table[l2_table_offset(va)];
- unsigned long gpfn = l2_pgentry_val(gpde) >> PAGE_SHIFT;
+ unsigned long gpfn = l2_pg_get_pfn(gpde);
if (get_shadow_status(d, gpfn))
{
unsigned long gmfn = __gpfn_to_mfn(d, gpfn);
- unsigned long *gl1e = map_domain_mem(gmfn << PAGE_SHIFT);
+ l1_pgentry_t *gl1e = map_domain_mem(gmfn << PAGE_SHIFT);
unsigned l1_idx = l1_table_offset(va);
gl1e[l1_idx] = sval;
unmap_domain_mem(gl1e);
@@ -1977,7 +1980,7 @@ void update_shadow_va_mapping(unsigned l
}
int update_grant_va_mapping(unsigned long va,
- unsigned long _nl1e,
+ l1_pgentry_t _nl1e,
struct domain *d,
struct exec_domain *ed)
{
@@ -1990,19 +1993,17 @@ int update_grant_va_mapping(unsigned lon
int rc = 0;
l1_pgentry_t *pl1e;
- unsigned long _ol1e;
+ l1_pgentry_t ol1e;
cleanup_writable_pagetable(d);
pl1e = &linear_pg_table[l1_linear_offset(va)];
- if ( unlikely(__get_user(_ol1e, (unsigned long *)pl1e) != 0) )
+ if ( unlikely(__copy_from_user(&ol1e, pl1e, sizeof(ol1e)) != 0) )
rc = -EINVAL;
else
{
- l1_pgentry_t ol1e = mk_l1_pgentry(_ol1e);
-
- if ( update_l1e(pl1e, ol1e, mk_l1_pgentry(_nl1e)) )
+ if ( update_l1e(pl1e, ol1e, _nl1e) )
put_page_from_l1e(ol1e, d);
else
rc = -EINVAL;
@@ -2016,7 +2017,7 @@ int update_grant_va_mapping(unsigned lon
int do_update_va_mapping(unsigned long va,
- unsigned long val,
+ l1_pgentry_t val,
unsigned long flags)
{
struct exec_domain *ed = current;
@@ -2042,8 +2043,7 @@ int do_update_va_mapping(unsigned long v
* the case of updating L2 entries.
*/
- if ( unlikely(!mod_l1_entry(&linear_pg_table[l1_linear_offset(va)],
- mk_l1_pgentry(val))) )
+ if (
unlikely(!mod_l1_entry(&linear_pg_table[l1_linear_offset(va)],val)) )
rc = -EINVAL;
if ( unlikely(shadow_mode_enabled(d)) )
@@ -2067,7 +2067,7 @@ int do_update_va_mapping(unsigned long v
}
int do_update_va_mapping_otherdomain(unsigned long va,
- unsigned long val,
+ l1_pgentry_t val,
unsigned long flags,
domid_t domid)
{
@@ -2106,9 +2106,9 @@ void destroy_gdt(struct exec_domain *ed)
for ( i = 0; i < 16; i++ )
{
- if ( (pfn = l1_pgentry_to_pfn(ed->arch.perdomain_ptes[i])) != 0 )
+ if ( (pfn = l1_pg_get_pfn(ed->arch.perdomain_ptes[i])) != 0 )
put_page_and_type(&frame_table[pfn]);
- ed->arch.perdomain_ptes[i] = mk_l1_pgentry(0);
+ l1_pg_init_0(ed->arch.perdomain_ptes[i]);
}
}
@@ -2162,8 +2162,8 @@ long set_gdt(struct exec_domain *ed,
/* Install the new GDT. */
for ( i = 0; i < nr_pages; i++ )
- ed->arch.perdomain_ptes[i] - mk_l1_pgentry((frames[i]
<< PAGE_SHIFT) | __PAGE_HYPERVISOR);
+ l1_pg_init_pfn(ed->arch.perdomain_ptes[i], frames[i],
+ __PAGE_HYPERVISOR);
SET_GDT_ADDRESS(ed, GDT_VIRT_START(ed));
SET_GDT_ENTRIES(ed, entries);
@@ -2234,7 +2234,7 @@ long do_update_descriptor(
case PGT_gdt_page:
/* Disallow updates of Xen-reserved descriptors in the current GDT. */
for_each_exec_domain(current->domain, ed) {
- if ( (l1_pgentry_to_pfn(ed->arch.perdomain_ptes[0]) == pfn)
&&
+ if ( (l1_pg_get_pfn(ed->arch.perdomain_ptes[0]) == pfn)
&&
(((pa&(PAGE_SIZE-1))>>3) >=
FIRST_RESERVED_GDT_ENTRY) &&
(((pa&(PAGE_SIZE-1))>>3) <=
LAST_RESERVED_GDT_ENTRY) )
goto out;
@@ -2289,22 +2289,22 @@ int ptwr_debug = 0x0;
/* Flush the given writable p.t. page and write-protect it again. */
void ptwr_flush(const int which)
{
- unsigned long sstat, spte, pte, *ptep, l1va;
- l1_pgentry_t *sl1e = NULL, *pl1e, ol1e, nl1e;
- l2_pgentry_t *pl2e;
+ unsigned long sstat, l1va;
+ l1_pgentry_t *sl1e = NULL, *pl1e, ol1e, nl1e, pte, spte, *ptep;
+ l2_pgentry_t *pl2e;
int i, cpu = smp_processor_id();
struct exec_domain *ed = current;
struct domain *d = ed->domain;
unsigned int modified = 0;
l1va = ptwr_info[cpu].ptinfo[which].l1va;
- ptep = (unsigned long *)&linear_pg_table[l1_linear_offset(l1va)];
+ ptep = &linear_pg_table[l1_linear_offset(l1va)];
/*
* STEP 1. Write-protect the p.t. page so no more updates can occur.
*/
- if ( unlikely(__get_user(pte, ptep)) )
+ if ( unlikely(__copy_from_user(&pte, ptep, sizeof(pte))) )
{
MEM_LOG("ptwr: Could not read pte at %p\n", ptep);
/*
@@ -2315,17 +2315,17 @@ void ptwr_flush(const int which)
}
PTWR_PRINTK("[%c] disconnected_l1va at %p is %p\n",
PTWR_PRINT_WHICH, ptep, pte);
- pte &= ~_PAGE_RW;
+ l1_pg_bclr_flags(pte,_PAGE_RW);
if ( unlikely(shadow_mode_enabled(d)) )
{
/* Write-protect the p.t. page in the shadow page table. */
l1pte_propagate_from_guest(d, &pte, &spte);
- __put_user(spte, (unsigned long *)
- &shadow_linear_pg_table[l1_linear_offset(l1va)]);
+ __copy_to_user(&shadow_linear_pg_table[l1_linear_offset(l1va)],
+ &spte, sizeof(spte));
/* Is the p.t. page itself shadowed? Map it into Xen space if so. */
- sstat = get_shadow_status(d, pte >> PAGE_SHIFT);
+ sstat = get_shadow_status(d, l1_pg_get_pfn(pte));
if ( sstat & PSH_shadowed )
sl1e = map_domain_mem((sstat & PSH_pfn_mask) <<
PAGE_SHIFT);
}
@@ -2357,7 +2357,7 @@ void ptwr_flush(const int which)
ol1e = ptwr_info[cpu].ptinfo[which].page[i];
nl1e = pl1e[i];
- if ( likely(l1_pgentry_val(ol1e) == l1_pgentry_val(nl1e)) )
+ if ( likely(l1_pg_get_value(ol1e) == l1_pg_get_value(nl1e)) )
continue;
/* Update number of entries modified. */
@@ -2367,15 +2367,13 @@ void ptwr_flush(const int which)
* Fast path for PTEs that have merely been write-protected
* (e.g., during a Unix fork()). A strict reduction in privilege.
*/
- if ( likely(l1_pgentry_val(ol1e) == (l1_pgentry_val(nl1e)|_PAGE_RW)) )
+ if ( likely(l1_pg_get_value(ol1e) == (l1_pg_get_value(nl1e)|_PAGE_RW))
)
{
- if ( likely(l1_pgentry_val(nl1e) & _PAGE_PRESENT) )
+ if ( likely(l1_pg_get_flags(nl1e) & _PAGE_PRESENT) )
{
if ( unlikely(sl1e != NULL) )
- l1pte_propagate_from_guest(
- d, &l1_pgentry_val(nl1e),
- &l1_pgentry_val(sl1e[i]));
- put_page_type(&frame_table[l1_pgentry_to_pfn(nl1e)]);
+ l1pte_propagate_from_guest(d, &nl1e, &sl1e[i]);
+ put_page_type(&frame_table[l1_pg_get_pfn(nl1e)]);
}
continue;
}
@@ -2395,10 +2393,9 @@ void ptwr_flush(const int which)
}
if ( unlikely(sl1e != NULL) )
- l1pte_propagate_from_guest(
- d, &l1_pgentry_val(nl1e), &l1_pgentry_val(sl1e[i]));
+ l1pte_propagate_from_guest(d, &nl1e, &sl1e[i]);
- if ( unlikely(l1_pgentry_val(ol1e) & _PAGE_PRESENT) )
+ if ( unlikely(l1_pg_get_flags(ol1e) & _PAGE_PRESENT) )
put_page_from_l1e(ol1e, d);
}
unmap_domain_mem(pl1e);
@@ -2414,7 +2411,7 @@ void ptwr_flush(const int which)
if ( (which == PTWR_PT_ACTIVE) && likely(!shadow_mode_enabled(d)) )
{
pl2e = &linear_l2_table[ptwr_info[cpu].ptinfo[which].l2_idx];
- *pl2e = mk_l2_pgentry(l2_pgentry_val(*pl2e) | _PAGE_PRESENT);
+ l2_pg_bset_flags(*pl2e, _PAGE_PRESENT);
}
/*
@@ -2437,9 +2434,9 @@ static int ptwr_emulated_update(
unsigned int bytes,
unsigned int do_cmpxchg)
{
- unsigned long sstat, pte, pfn;
+ unsigned long sstat, pfn;
struct pfn_info *page;
- l1_pgentry_t ol1e, nl1e, *pl1e, *sl1e;
+ l1_pgentry_t pte, ol1e, nl1e, *pl1e, *sl1e;
struct domain *d = current->domain;
/* Aligned access only, thank you. */
@@ -2451,6 +2448,7 @@ static int ptwr_emulated_update(
}
/* Turn a sub-word access into a full-word access. */
+ /* FIXME: needs tweaks for PAE */
if ( (addr & ((BITS_PER_LONG/8)-1)) != 0 )
{
int rc;
@@ -2469,18 +2467,18 @@ static int ptwr_emulated_update(
}
/* Read the PTE that maps the page being updated. */
- if ( __get_user(pte, (unsigned long *)
- &linear_pg_table[l1_linear_offset(addr)]) )
+ if ( __copy_from_user(&pte,
&linear_pg_table[l1_linear_offset(addr)],
+ sizeof(pte)) )
{
MEM_LOG("ptwr_emulate: Cannot read thru linear_pg_table\n");
return X86EMUL_UNHANDLEABLE;
}
- pfn = pte >> PAGE_SHIFT;
+ pfn = l1_pg_get_pfn(pte);
page = &frame_table[pfn];
/* We are looking only for read-only mappings of p.t. pages. */
- if ( ((pte & (_PAGE_RW | _PAGE_PRESENT)) != _PAGE_PRESENT) ||
+ if ( ((l1_pg_get_flags(pte) & (_PAGE_RW | _PAGE_PRESENT)) !=
_PAGE_PRESENT) ||
((page->u.inuse.type_info & PGT_type_mask) !=
PGT_l1_page_table) )
{
MEM_LOG("ptwr_emulate: Page is mistyped or bad pte (%p,
%x)\n",
@@ -2489,7 +2487,7 @@ static int ptwr_emulated_update(
}
/* Check the new PTE. */
- nl1e = mk_l1_pgentry(val);
+ l1_pg_init_phys(nl1e, val, val & ~PAGE_MASK);
if ( unlikely(!get_page_from_l1e(nl1e, d)) )
return X86EMUL_UNHANDLEABLE;
@@ -2497,7 +2495,7 @@ static int ptwr_emulated_update(
pl1e = map_domain_mem(page_to_phys(page) + (addr & ~PAGE_MASK));
if ( do_cmpxchg )
{
- ol1e = mk_l1_pgentry(old);
+ l1_pg_init_phys(ol1e, old, old & ~PAGE_MASK);
if ( cmpxchg((unsigned long *)pl1e, old, val) != old )
{
unmap_domain_mem(pl1e);
@@ -2519,14 +2517,13 @@ static int ptwr_emulated_update(
{
sl1e = map_domain_mem(
((sstat & PSH_pfn_mask) << PAGE_SHIFT) + (addr &
~PAGE_MASK));
- l1pte_propagate_from_guest(
- d, &l1_pgentry_val(nl1e), &l1_pgentry_val(*sl1e));
+ l1pte_propagate_from_guest(d, &nl1e, sl1e);
unmap_domain_mem(sl1e);
}
}
/* Finally, drop the old PTE. */
- if ( unlikely(l1_pgentry_val(ol1e) & _PAGE_PRESENT) )
+ if ( unlikely(l1_pg_get_flags(ol1e) & _PAGE_PRESENT) )
put_page_from_l1e(ol1e, d);
return X86EMUL_CONTINUE;
@@ -2560,7 +2557,7 @@ static struct x86_mem_emulator ptwr_mem_
/* Write page fault handler: check if guest is trying to modify a PTE. */
int ptwr_do_page_fault(unsigned long addr)
{
- unsigned long pte, pfn, l2e;
+ unsigned long pte, pfn;
struct pfn_info *page;
l2_pgentry_t *pl2e;
int which, cpu = smp_processor_id();
@@ -2574,7 +2571,7 @@ int ptwr_do_page_fault(unsigned long add
* Attempt to read the PTE that maps the VA being accessed. By checking for
* PDE validity in the L2 we avoid many expensive fixups in __get_user().
*/
- if ( !(l2_pgentry_val(linear_l2_table[addr>>L2_PAGETABLE_SHIFT])
&
+ if ( !(l2_pg_get_flags(linear_l2_table[addr>>L2_PAGETABLE_SHIFT])
&
_PAGE_PRESENT) ||
__get_user(pte, (unsigned long *)
&linear_pg_table[l1_linear_offset(addr)]) )
@@ -2615,9 +2612,8 @@ int ptwr_do_page_fault(unsigned long add
* an ACTIVE p.t., otherwise it is INACTIVE.
*/
pl2e = &linear_l2_table[l2_idx];
- l2e = l2_pgentry_val(*pl2e);
which = PTWR_PT_INACTIVE;
- if ( (l2e >> PAGE_SHIFT) == pfn )
+ if ( (l2_pg_get_pfn(*pl2e)) == pfn )
{
/*
* Check the PRESENT bit to set ACTIVE mode.
@@ -2625,7 +2621,7 @@ int ptwr_do_page_fault(unsigned long add
* ACTIVE p.t. (it may be the same p.t. mapped at another virt addr).
* The ptwr_flush call below will restore the PRESENT bit.
*/
- if ( likely(l2e & _PAGE_PRESENT) ||
+ if ( likely(l2_pg_get_flags(*pl2e) & _PAGE_PRESENT) ||
(ptwr_info[cpu].ptinfo[PTWR_PT_ACTIVE].l1va &&
(l2_idx == ptwr_info[cpu].ptinfo[PTWR_PT_ACTIVE].l2_idx)) )
which = PTWR_PT_ACTIVE;
@@ -2661,7 +2657,7 @@ int ptwr_do_page_fault(unsigned long add
if ( (which == PTWR_PT_ACTIVE) &&
likely(!shadow_mode_enabled(ed->domain)) )
{
- *pl2e = mk_l2_pgentry(l2e & ~_PAGE_PRESENT);
+ l2_pg_bclr_flags(*pl2e, _PAGE_PRESENT);
flush_tlb(); /* XXX Multi-CPU guests? */
}
Index: xen/common/grant_table.c
==================================================================---
xen.orig/common/grant_table.c 2005-03-21 12:21:52.000000000 +0100
+++ xen/common/grant_table.c 2005-03-22 13:05:04.945761964 +0100
@@ -283,19 +283,18 @@ __gnttab_map_grant_ref(
if ( (host_virt_addr != 0) && (flags & GNTMAP_host_map) )
{
+
/* Write update into the pagetable
*/
/* cwc22: TODO: check locking... */
+ l1_pgentry_t l1e;
spin_unlock(&rd->grant_table->lock);
- rc = update_grant_va_mapping( host_virt_addr,
- (frame << PAGE_SHIFT) | _PAGE_PRESENT |
- _PAGE_ACCESSED |
- _PAGE_DIRTY |
- ((flags & GNTMAP_readonly) ? 0 : _PAGE_RW),
- ld, led );
+ l1_pg_init_pfn(l1e, frame, _PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_DIRTY
|
+ ((flags & GNTMAP_readonly) ? 0 : _PAGE_RW));
+ rc = update_grant_va_mapping( host_virt_addr, l1e, ld, led );
spin_lock(&rd->grant_table->lock);
Index: xen/arch/x86/vmx_platform.c
==================================================================---
xen.orig/arch/x86/vmx_platform.c 2005-03-21 12:21:51.000000000 +0100
+++ xen/arch/x86/vmx_platform.c 2005-03-22 11:58:58.000000000 +0100
@@ -367,7 +367,7 @@ static int vmx_decode(const unsigned cha
static int inst_copy_from_guest(unsigned char *buf, unsigned long guest_eip,
int inst_len)
{
- unsigned long gpte;
+ l1_pgentry_t gpte;
unsigned long mfn;
unsigned long ma;
unsigned char * inst_start;
@@ -378,7 +378,7 @@ static int inst_copy_from_guest(unsigned
if ((guest_eip & PAGE_MASK) == ((guest_eip + inst_len) &
PAGE_MASK)) {
gpte = gva_to_gpte(guest_eip);
- mfn = phys_to_machine_mapping(gpte >> PAGE_SHIFT);
+ mfn = phys_to_machine_mapping(l1_pg_get_pfn(gpte));
ma = (mfn << PAGE_SHIFT) | (guest_eip & (PAGE_SIZE - 1));
inst_start = (unsigned char *)map_domain_mem(ma);
Index: xen/include/asm-x86/x86_32/page.h
==================================================================---
xen.orig/include/asm-x86/x86_32/page.h 2005-03-21 12:21:49.000000000 +0100
+++ xen/include/asm-x86/x86_32/page.h 2005-03-22 12:58:10.364114891 +0100
@@ -21,34 +21,46 @@
#ifndef __ASSEMBLY__
#include <xen/config.h>
-typedef struct { unsigned long l1_lo; } l1_pgentry_t;
-typedef struct { unsigned long l2_lo; } l2_pgentry_t;
+#include <asm/types.h>
+typedef struct { u32 l1_lo; } l1_pgentry_t;
+typedef struct { u32 l2_lo; } l2_pgentry_t;
typedef l2_pgentry_t root_pgentry_t;
#endif /* !__ASSEMBLY__ */
-/* Strip type from a table entry. */
-#define l1_pgentry_val(_x) ((_x).l1_lo)
-#define l2_pgentry_val(_x) ((_x).l2_lo)
-#define root_pgentry_val(_x) (l2_pgentry_val(_x))
+/* read access (depricated) */
+#define l1_pg_get_value(_x) ((_x).l1_lo)
+#define l2_pg_get_value(_x) ((_x).l2_lo)
-/* Add type to a table entry. */
-#define mk_l1_pgentry(_x) ( (l1_pgentry_t) { (_x) } )
-#define mk_l2_pgentry(_x) ( (l2_pgentry_t) { (_x) } )
-#define mk_root_pgentry(_x) (mk_l2_pgentry(_x))
+/* read access */
+#define l1_pg_get_pfn(_x) ((_x).l1_lo >> PAGE_SHIFT)
+#define l1_pg_get_phys(_x) ((_x).l1_lo & PAGE_MASK)
+#define l1_pg_get_flags(_x) ((_x).l1_lo & ~PAGE_MASK)
-/* Turn a typed table entry into a physical address. */
-#define l1_pgentry_to_phys(_x) (l1_pgentry_val(_x) & PAGE_MASK)
-#define l2_pgentry_to_phys(_x) (l2_pgentry_val(_x) & PAGE_MASK)
-#define root_pgentry_to_phys(_x) (l2_pgentry_to_phys(_x))
+#define l2_pg_get_pfn(_x) ((_x).l2_lo >> PAGE_SHIFT)
+#define l2_pg_get_phys(_x) ((_x).l2_lo & PAGE_MASK)
+#define l2_pg_get_flags(_x) ((_x).l2_lo & ~PAGE_MASK)
-/* Turn a typed table entry into a page index. */
-#define l1_pgentry_to_pfn(_x) (l1_pgentry_val(_x) >> PAGE_SHIFT)
-#define l2_pgentry_to_pfn(_x) (l2_pgentry_val(_x) >> PAGE_SHIFT)
-#define root_pgentry_to_pfn(_x) (l2_pgentry_to_pfn(_x))
+#define root_pg_get_pfn l2_pg_get_pfn
+#define root_pg_get_flags l2_pg_get_flags
+
+/* write access */
+#define l1_pg_init_0(_x) { (_x).l1_lo = 0; }
+#define l1_pg_init_pfn(_x,_p,_f) { (_x).l1_lo = (_p << PAGE_SHIFT) |
((u32)_f); }
+#define l1_pg_init_phys(_x,_p,_f) { (_x).l1_lo = (_p & PAGE_MASK) |
((u32)_f); }
+#define l1_pg_set_flags(_x, _f) { (_x).l1_lo = (_x).l1_lo & PAGE_MASK |
((u32)_f); }
+#define l1_pg_bset_flags(_x, _f) { (_x).l1_lo |= ((u32)_f); }
+#define l1_pg_bclr_flags(_x, _f) { (_x).l1_lo &= ~((u32)_f); }
+
+#define l2_pg_init_0(_x) { (_x).l2_lo = 0; }
+#define l2_pg_init_pfn(_x,_p,_f) { (_x).l2_lo = (_p << PAGE_SHIFT) |
((u32)_f); }
+#define l2_pg_init_phys(_x,_p,_f) { (_x).l2_lo = (_p & PAGE_MASK) |
((u32)_f); }
+#define l2_pg_set_flags(_x, _f) { (_x).l2_lo = ((_x).l2_lo & PAGE_MASK)
| ((u32)_f); }
+#define l2_pg_bset_flags(_x, _f) { (_x).l2_lo |= ((u32)_f); }
+#define l2_pg_bclr_flags(_x, _f) { (_x).l2_lo &= ~((u32)_f); }
/* Pagetable walking. */
-#define l2_pgentry_to_l1(_x) \
- ((l1_pgentry_t *)__va(l2_pgentry_to_phys(_x)))
+#define l2_pg_to_l1(_x) \
+ ((l1_pgentry_t *)__va(l2_pg_get_phys(_x)))
/* Given a virtual address, get an entry offset into a page table. */
#define l1_table_offset(_a) \
Index: xen/include/asm-x86/mm.h
==================================================================---
xen.orig/include/asm-x86/mm.h 2005-03-21 12:21:49.000000000 +0100
+++ xen/include/asm-x86/mm.h 2005-03-22 11:58:58.000000000 +0100
@@ -232,8 +232,8 @@ void synchronise_pagetables(unsigned lon
#define phys_to_machine_mapping(_pfn) \
({ l1_pgentry_t l1e; unsigned long mfn; \
- mfn = __get_user(l1_pgentry_val(l1e), &__phys_to_machine_mapping[_pfn])
\
- ? 0 : l1_pgentry_to_pfn(l1e); \
+ mfn = __copy_from_user(&l1e, &__phys_to_machine_mapping[_pfn],
sizeof(l1e)) \
+ ? 0 : l1_pg_get_pfn(l1e); \
mfn; })
#define set_machinetophys(_mfn, _pfn) machine_to_phys_mapping[(_mfn)] = (_pfn)
@@ -325,7 +325,7 @@ void propagate_page_fault(unsigned long
* hold a reference to the page.
*/
int update_grant_va_mapping(unsigned long va,
- unsigned long val,
+ l1_pgentry_t _nl1e,
struct domain *d,
struct exec_domain *ed);
#endif /* __ASM_X86_MM_H__ */
Index: xen/arch/x86/domain_build.c
==================================================================---
xen.orig/arch/x86/domain_build.c 2005-03-21 12:21:48.000000000 +0100
+++ xen/arch/x86/domain_build.c 2005-03-22 13:08:13.879067862 +0100
@@ -233,10 +233,10 @@ int construct_dom0(struct domain *d,
/* WARNING: The new domain must have its ''processor''
field filled in! */
l2start = l2tab = (l2_pgentry_t *)mpt_alloc; mpt_alloc += PAGE_SIZE;
memcpy(l2tab, &idle_pg_table[0], PAGE_SIZE);
- l2tab[LINEAR_PT_VIRT_START >> L2_PAGETABLE_SHIFT] -
mk_l2_pgentry((unsigned long)l2start | __PAGE_HYPERVISOR);
- l2tab[PERDOMAIN_VIRT_START >> L2_PAGETABLE_SHIFT] -
mk_l2_pgentry(__pa(d->arch.mm_perdomain_pt) | __PAGE_HYPERVISOR);
+ l2_pg_init_phys(l2tab[LINEAR_PT_VIRT_START >> L2_PAGETABLE_SHIFT],
+ (unsigned long)l2start, __PAGE_HYPERVISOR);
+ l2_pg_init_phys(l2tab[PERDOMAIN_VIRT_START >> L2_PAGETABLE_SHIFT],
+ __pa(d->arch.mm_perdomain_pt), __PAGE_HYPERVISOR);
ed->arch.guest_table = mk_pagetable((unsigned long)l2start);
l2tab += l2_table_offset(dsi.v_start);
@@ -247,12 +247,14 @@ int construct_dom0(struct domain *d,
{
l1start = l1tab = (l1_pgentry_t *)mpt_alloc;
mpt_alloc += PAGE_SIZE;
- *l2tab++ = mk_l2_pgentry((unsigned long)l1start | L2_PROT);
+ l2_pg_init_phys(*l2tab, (unsigned long)l1start, L2_PROT);
+ l2tab++;
clear_page(l1tab);
if ( count == 0 )
l1tab += l1_table_offset(dsi.v_start);
}
- *l1tab++ = mk_l1_pgentry((mfn << PAGE_SHIFT) | L1_PROT);
+ l1_pg_init_pfn(*l1tab, mfn, L1_PROT);
+ l1tab++;
page = &frame_table[mfn];
if ( !get_page_and_type(page, d, PGT_writable_page) )
@@ -263,12 +265,12 @@ int construct_dom0(struct domain *d,
/* Pages that are part of page tables must be read only. */
l2tab = l2start + l2_table_offset(vpt_start);
- l1start = l1tab = (l1_pgentry_t *)l2_pgentry_to_phys(*l2tab);
+ l1start = l1tab = (l1_pgentry_t *)l2_pg_get_phys(*l2tab);
l1tab += l1_table_offset(vpt_start);
for ( count = 0; count < nr_pt_pages; count++ )
{
- *l1tab = mk_l1_pgentry(l1_pgentry_val(*l1tab) & ~_PAGE_RW);
- page = &frame_table[l1_pgentry_to_pfn(*l1tab)];
+ l1_pg_bclr_flags(*l1tab, _PAGE_RW);
+ page = &frame_table[l1_pg_get_pfn(*l1tab)];
if ( count == 0 )
{
page->u.inuse.type_info &= ~PGT_type_mask;
@@ -302,7 +304,7 @@ int construct_dom0(struct domain *d,
get_page(page, d); /* an extra ref because of readable mapping */
}
if ( !((unsigned long)++l1tab & (PAGE_SIZE - 1)) )
- l1start = l1tab = (l1_pgentry_t *)l2_pgentry_to_phys(*++l2tab);
+ l1start = l1tab = (l1_pgentry_t *)l2_pg_get_phys(*++l2tab);
}
#elif defined(__x86_64__)
@@ -497,8 +499,8 @@ int construct_dom0(struct domain *d,
#if defined(__i386__)
/* Destroy low mappings - they were only for our convenience. */
for ( i = 0; i < DOMAIN_ENTRIES_PER_L2_PAGETABLE; i++ )
- if ( l2_pgentry_val(l2start[i]) & _PAGE_PSE )
- l2start[i] = mk_l2_pgentry(0);
+ if ( l2_pg_get_flags(l2start[i]) & _PAGE_PSE )
+ l2_pg_init_0(l2start[i]);
zap_low_mappings(); /* Do the same for the idle page tables. */
#endif
Index: xen/arch/x86/dom0_ops.c
==================================================================---
xen.orig/arch/x86/dom0_ops.c 2005-03-21 12:21:48.000000000 +0100
+++ xen/arch/x86/dom0_ops.c 2005-03-22 11:58:58.000000000 +0100
@@ -398,7 +398,7 @@ void arch_getdomaininfo_ctxt(
{
for ( i = 0; i < 16; i++ )
c->gdt_frames[i] =
- l1_pgentry_to_pfn(ed->arch.perdomain_ptes[i]);
+ l1_pg_get_pfn(ed->arch.perdomain_ptes[i]);
c->gdt_ents = GET_GDT_ENTRIES(ed);
}
c->kernel_ss = ed->arch.kernel_ss;
Index: xen/arch/x86/domain.c
==================================================================---
xen.orig/arch/x86/domain.c 2005-03-21 12:21:48.000000000 +0100
+++ xen/arch/x86/domain.c 2005-03-22 13:06:36.138946721 +0100
@@ -316,15 +316,15 @@ unsigned long alloc_monitor_pagetable(st
&idle_pg_table[DOMAIN_ENTRIES_PER_L2_PAGETABLE],
HYPERVISOR_ENTRIES_PER_L2_PAGETABLE * sizeof(l2_pgentry_t));
- mpl2e[l2_table_offset(PERDOMAIN_VIRT_START)] -
mk_l2_pgentry((__pa(d->arch.mm_perdomain_pt) & PAGE_MASK)
- | __PAGE_HYPERVISOR);
+ l2_pg_init_phys(mpl2e[l2_table_offset(PERDOMAIN_VIRT_START)],
+ __pa(d->arch.mm_perdomain_pt),__PAGE_HYPERVISOR);
ed->arch.monitor_vtable = mpl2e;
// map the phys_to_machine map into the Read-Only MPT space for this domain
- mpl2e[l2_table_offset(RO_MPT_VIRT_START)] -
mk_l2_pgentry(pagetable_val(ed->arch.phys_table) | __PAGE_HYPERVISOR);
+ l2_pg_init_phys(mpl2e[l2_table_offset(RO_MPT_VIRT_START)],
+ pagetable_val(ed->arch.phys_table),
+ __PAGE_HYPERVISOR);
return mmfn;
}
@@ -344,8 +344,7 @@ static void free_monitor_pagetable(struc
/*
* First get the mfn for hl2_table by looking at monitor_table
*/
- mfn = l2_pgentry_val(mpl2e[LINEAR_PT_VIRT_START >>
L2_PAGETABLE_SHIFT])
- >> PAGE_SHIFT;
+ mfn = l2_pg_get_pfn(mpl2e[LINEAR_PT_VIRT_START >>
L2_PAGETABLE_SHIFT]);
free_domheap_page(&frame_table[mfn]);
unmap_domain_mem(mpl2e);
Index: xen/include/asm-x86/page.h
==================================================================---
xen.orig/include/asm-x86/page.h 2005-03-21 12:21:49.000000000 +0100
+++ xen/include/asm-x86/page.h 2005-03-22 11:58:58.000000000 +0100
@@ -72,7 +72,7 @@ typedef struct { unsigned long pt_lo; }
(LINEAR_PT_VIRT_START >>
(PAGETABLE_ORDER<<2))))
#define linear_pg_table linear_l1_table
#define va_to_l1mfn(_va) \
- (l2_pgentry_val(linear_l2_table[_va>>L2_PAGETABLE_SHIFT]) >>
PAGE_SHIFT)
+ l2_pg_get_pfn(linear_l2_table[_va>>L2_PAGETABLE_SHIFT])
extern root_pgentry_t idle_pg_table[ROOT_PAGETABLE_ENTRIES];
Index: xen/arch/x86/x86_32/traps.c
==================================================================---
xen.orig/arch/x86/x86_32/traps.c 2005-03-21 12:21:48.000000000 +0100
+++ xen/arch/x86/x86_32/traps.c 2005-03-22 11:58:59.000000000 +0100
@@ -161,7 +161,7 @@ void show_page_walk(unsigned long addr)
printk("Pagetable walk from %p:\n", addr);
- page = l2_pgentry_val(idle_pg_table[l2_table_offset(addr)]);
+ page = l2_pg_get_value(idle_pg_table[l2_table_offset(addr)]);
printk(" L2 = %p %s\n", page, (page & _PAGE_PSE) ?
"(4MB)" : "");
if ( !(page & _PAGE_PRESENT) || (page & _PAGE_PSE) )
return;
Index: xen/arch/x86/x86_32/mm.c
==================================================================---
xen.orig/arch/x86/x86_32/mm.c 2005-03-21 12:21:51.000000000 +0100
+++ xen/arch/x86/x86_32/mm.c 2005-03-22 13:20:05.122519263 +0100
@@ -47,9 +47,9 @@ int map_pages(
if ( ((s|v|p) & ((1<<L2_PAGETABLE_SHIFT)-1)) == 0 )
{
/* Super-page mapping. */
- if ( (l2_pgentry_val(*pl2e) & _PAGE_PRESENT) )
+ if ( (l2_pg_get_flags(*pl2e) & _PAGE_PRESENT) )
__flush_tlb_pge();
- *pl2e = mk_l2_pgentry(p|flags|_PAGE_PSE);
+ l2_pg_init_phys(*pl2e, p, flags|_PAGE_PSE);
v += 1 << L2_PAGETABLE_SHIFT;
p += 1 << L2_PAGETABLE_SHIFT;
@@ -58,16 +58,16 @@ int map_pages(
else
{
/* Normal page mapping. */
- if ( !(l2_pgentry_val(*pl2e) & _PAGE_PRESENT) )
+ if ( !(l2_pg_get_flags(*pl2e) & _PAGE_PRESENT) )
{
newpg = (void *)alloc_xenheap_page();
clear_page(newpg);
- *pl2e = mk_l2_pgentry(__pa(newpg) | (flags & __PTE_MASK));
+ l2_pg_init_phys(*pl2e, __pa(newpg), flags & __PTE_MASK);
}
- pl1e = l2_pgentry_to_l1(*pl2e) + l1_table_offset(v);
- if ( (l1_pgentry_val(*pl1e) & _PAGE_PRESENT) )
+ pl1e = l2_pg_to_l1(*pl2e) + l1_table_offset(v);
+ if ( (l1_pg_get_flags(*pl1e) & _PAGE_PRESENT) )
__flush_tlb_one(v);
- *pl1e = mk_l1_pgentry(p|flags);
+ l1_pg_init_phys(*pl1e, p, flags);
v += 1 << L1_PAGETABLE_SHIFT;
p += 1 << L1_PAGETABLE_SHIFT;
@@ -90,14 +90,14 @@ void __set_fixmap(
void __init paging_init(void)
{
void *ioremap_pt;
- unsigned long v, l2e;
+ unsigned long v;
struct pfn_info *pg;
/* Allocate and map the machine-to-phys table. */
if ( (pg = alloc_domheap_pages(NULL, 10)) == NULL )
panic("Not enough memory to bootstrap Xen.\n");
- idle_pg_table[l2_table_offset(RDWR_MPT_VIRT_START)] -
mk_l2_pgentry(page_to_phys(pg) | __PAGE_HYPERVISOR | _PAGE_PSE);
+ l2_pg_init_phys(idle_pg_table[l2_table_offset(RDWR_MPT_VIRT_START)],
+ page_to_phys(pg), __PAGE_HYPERVISOR | _PAGE_PSE);
memset((void *)RDWR_MPT_VIRT_START, 0x55, 4UL << 20);
/* Xen 4MB mappings can all be GLOBAL. */
@@ -105,44 +105,43 @@ void __init paging_init(void)
{
for ( v = HYPERVISOR_VIRT_START; v; v += (1 <<
L2_PAGETABLE_SHIFT) )
{
- l2e = l2_pgentry_val(idle_pg_table[l2_table_offset(v)]);
- if ( l2e & _PAGE_PSE )
- l2e |= _PAGE_GLOBAL;
- idle_pg_table[v >> L2_PAGETABLE_SHIFT] = mk_l2_pgentry(l2e);
+ if (l2_pg_get_flags(idle_pg_table[l2_table_offset(v)]) &
_PAGE_PSE)
+ l2_pg_bset_flags(idle_pg_table[v >> L2_PAGETABLE_SHIFT],
+ _PAGE_GLOBAL);
}
}
/* Create page table for ioremap(). */
ioremap_pt = (void *)alloc_xenheap_page();
clear_page(ioremap_pt);
- idle_pg_table[l2_table_offset(IOREMAP_VIRT_START)] -
mk_l2_pgentry(__pa(ioremap_pt) | __PAGE_HYPERVISOR);
+ l2_pg_init_phys(idle_pg_table[l2_table_offset(IOREMAP_VIRT_START)],
+ __pa(ioremap_pt), __PAGE_HYPERVISOR);
/* Create read-only mapping of MPT for guest-OS use.
* NB. Remove the global bit so that shadow_mode_translate()==true domains
* can reused this address space for their phys-to-machine mapping.
*/
- idle_pg_table[l2_table_offset(RO_MPT_VIRT_START)] -
mk_l2_pgentry(l2_pgentry_val(
- idle_pg_table[l2_table_offset(RDWR_MPT_VIRT_START)])
&
- ~(_PAGE_RW | _PAGE_GLOBAL));
+ l2_pg_init_pfn(idle_pg_table[l2_table_offset(RO_MPT_VIRT_START)],
+
l2_pg_get_pfn(idle_pg_table[l2_table_offset(RDWR_MPT_VIRT_START)]),
+
l2_pg_get_flags(idle_pg_table[l2_table_offset(RDWR_MPT_VIRT_START)])
+ & ~(_PAGE_RW | _PAGE_GLOBAL));
/* Set up mapping cache for domain pages. */
mapcache = (unsigned long *)alloc_xenheap_page();
clear_page(mapcache);
- idle_pg_table[l2_table_offset(MAPCACHE_VIRT_START)] -
mk_l2_pgentry(__pa(mapcache) | __PAGE_HYPERVISOR);
+ l2_pg_init_phys(idle_pg_table[l2_table_offset(MAPCACHE_VIRT_START)],
+ __pa(mapcache), __PAGE_HYPERVISOR);
/* Set up linear page table mapping. */
- idle_pg_table[l2_table_offset(LINEAR_PT_VIRT_START)] -
mk_l2_pgentry(__pa(idle_pg_table) | __PAGE_HYPERVISOR);
+ l2_pg_init_phys(idle_pg_table[l2_table_offset(LINEAR_PT_VIRT_START)],
+ __pa(idle_pg_table), __PAGE_HYPERVISOR);
}
void __init zap_low_mappings(void)
{
int i;
for ( i = 0; i < DOMAIN_ENTRIES_PER_L2_PAGETABLE; i++ )
- idle_pg_table[i] = mk_l2_pgentry(0);
+ l2_pg_init_0(idle_pg_table[i]);
flush_tlb_all_pge();
}
@@ -168,7 +167,7 @@ void subarch_init_memory(struct domain *
}
/* M2P table is mappable read-only by privileged domains. */
- m2p_start_mfn = l2_pgentry_to_pfn(
+ m2p_start_mfn = l2_pg_get_pfn(
idle_pg_table[l2_table_offset(RDWR_MPT_VIRT_START)]);
for ( i = 0; i < 1024; i++ )
{
@@ -334,11 +333,9 @@ void *memguard_init(void *heap_start)
l1 = (l1_pgentry_t *)heap_start;
heap_start = (void *)((unsigned long)heap_start + PAGE_SIZE);
for ( j = 0; j < L1_PAGETABLE_ENTRIES; j++ )
- l1[j] = mk_l1_pgentry((i << L2_PAGETABLE_SHIFT) |
- (j << L1_PAGETABLE_SHIFT) |
- __PAGE_HYPERVISOR);
- idle_pg_table[i + l2_table_offset(PAGE_OFFSET)] -
mk_l2_pgentry(virt_to_phys(l1) | __PAGE_HYPERVISOR);
+ l1_pg_init_pfn(l1[j], j, __PAGE_HYPERVISOR);
+ l2_pg_init_phys(idle_pg_table[i + l2_table_offset(PAGE_OFFSET)],
+ virt_to_phys(l1), __PAGE_HYPERVISOR);
}
return heap_start;
@@ -360,11 +357,12 @@ static void __memguard_change_range(void
while ( _l != 0 )
{
l2 = &idle_pg_table[l2_table_offset(_p)];
- l1 = l2_pgentry_to_l1(*l2) + l1_table_offset(_p);
- if ( guard )
- *l1 = mk_l1_pgentry(l1_pgentry_val(*l1) & ~_PAGE_PRESENT);
- else
- *l1 = mk_l1_pgentry(l1_pgentry_val(*l1) | _PAGE_PRESENT);
+ l1 = l2_pg_to_l1(*l2) + l1_table_offset(_p);
+ if ( guard ) {
+ l1_pg_bclr_flags(*l1, _PAGE_PRESENT);
+ } else {
+ l1_pg_bset_flags(*l1, _PAGE_PRESENT);
+ }
_p += PAGE_SIZE;
_l -= PAGE_SIZE;
}
-------------------------------------------------------
This SF.net email is sponsored by: 2005 Windows Mobile Application Contest
Submit applications for Windows Mobile(tm)-based Pocket PCs or Smartphones
for the chance to win $25,000 and application distribution. Enter today at
http://ads.osdn.com/?ad_id=6882&alloc_id=15148&op=click
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.sourceforge.net
https://lists.sourceforge.net/lists/listinfo/xen-devel