If you're one of the people who has cvsup'd to 4.8-stable since August
8th
and you've since begun to experience panics on a previously stable system,
please apply the attached patch and see if your previous stability has
been restored.
Please tell me your results.
Thanks,
Mike "Silby" Silbersack
-------------- next part --------------
diff -u -r /usr/src/sys.old/i386/i386/bios.c /usr/src/sys/i386/i386/bios.c
--- /usr/src/sys.old/i386/i386/bios.c Thu Aug 21 18:35:33 2003
+++ /usr/src/sys/i386/i386/bios.c Fri Aug 22 02:17:03 2003
@@ -312,8 +312,7 @@
va_list ap;
int flags = BIOSCODE_FLAG | BIOSDATA_FLAG;
u_int i, arg_start, arg_end;
- pt_entry_t *pte;
- pd_entry_t *ptd;
+ u_int *pte, *ptd;
arg_start = 0xffffffff;
arg_end = 0;
@@ -372,22 +371,19 @@
args->seg.code32.base = (u_int)&bios16_jmp & PG_FRAME;
args->seg.code32.limit = 0xffff;
- ptd = (pd_entry_t *)rcr3();
-#ifdef PAE
- ptd = (pd_entry_t *)(*ptd & PG_FRAME);
-#endif
+ ptd = (u_int *)rcr3();
if (ptd == IdlePTD) {
/*
* no page table, so create one and install it.
*/
- pte = (pt_entry_t *)malloc(PAGE_SIZE, M_TEMP, M_WAITOK);
- ptd = (pd_entry_t *)(KERNBASE + IdlePTD);
+ pte = (u_int *)malloc(PAGE_SIZE, M_TEMP, M_WAITOK);
+ ptd = (u_int *)((u_int)ptd + KERNBASE);
*ptd = vtophys(pte) | PG_RW | PG_V;
} else {
/*
* this is a user-level page table
*/
- pte = PTmap;
+ pte = (u_int *)&PTmap;
}
/*
* install pointer to page 0. we don't need to flush the tlb,
@@ -444,7 +440,7 @@
i = bios16_call(&args->r, stack_top);
- if (pte == PTmap) {
+ if (pte == (u_int *)&PTmap) {
*pte = 0; /* remove entry */
} else {
*ptd = 0; /* remove page table */
Only in /usr/src/sys/i386/i386: bios.c.orig
Only in /usr/src/sys/i386/i386: bios.c.rej
Only in /usr/src/sys/i386/i386: blah.patch
diff -u -r /usr/src/sys.old/i386/i386/db_interface.c
/usr/src/sys/i386/i386/db_interface.c
--- /usr/src/sys.old/i386/i386/db_interface.c Thu Aug 21 18:35:33 2003
+++ /usr/src/sys/i386/i386/db_interface.c Fri Aug 22 02:15:24 2003
@@ -251,7 +251,7 @@
if (addr > trunc_page((vm_offset_t)btext) - size &&
addr < round_page((vm_offset_t)etext)) {
- ptep0 = vtopte(addr);
+ ptep0 = pmap_pte(kernel_pmap, addr);
oldmap0 = *ptep0;
*ptep0 |= PG_RW;
@@ -259,14 +259,14 @@
if ((*ptep0 & PG_PS) == 0) {
addr1 = trunc_page(addr + size - 1);
if (trunc_page(addr) != addr1) {
- ptep1 = vtopte(addr1);
+ ptep1 = pmap_pte(kernel_pmap, addr1);
oldmap1 = *ptep1;
*ptep1 |= PG_RW;
}
} else {
addr1 = trunc_4mpage(addr + size - 1);
if (trunc_4mpage(addr) != addr1) {
- ptep1 = vtopte(addr1);
+ ptep1 = pmap_pte(kernel_pmap, addr1);
oldmap1 = *ptep1;
*ptep1 |= PG_RW;
}
Only in /usr/src/sys/i386/i386: db_interface.c.orig
Only in /usr/src/sys/i386/i386: db_interface.c.rej
diff -u -r /usr/src/sys.old/i386/i386/genassym.c
/usr/src/sys/i386/i386/genassym.c
--- /usr/src/sys.old/i386/i386/genassym.c Thu Aug 21 18:35:33 2003
+++ /usr/src/sys/i386/i386/genassym.c Fri Aug 22 02:15:02 2003
@@ -96,10 +96,6 @@
ASSYM(NPDEPG, NPDEPG);
ASSYM(PDESIZE, PDESIZE);
ASSYM(PTESIZE, PTESIZE);
-ASSYM(PDESHIFT, PDESHIFT);
-ASSYM(PTESHIFT, PTESHIFT);
-ASSYM(NPGPTD, NPGPTD);
-ASSYM(NPDEPTD, NPDEPTD);
ASSYM(PAGE_SHIFT, PAGE_SHIFT);
ASSYM(PAGE_MASK, PAGE_MASK);
ASSYM(PDRSHIFT, PDRSHIFT);
Only in /usr/src/sys/i386/i386: genassym.c.orig
Only in /usr/src/sys/i386/i386: genassym.c.rej
diff -u -r /usr/src/sys.old/i386/i386/locore.s /usr/src/sys/i386/i386/locore.s
--- /usr/src/sys.old/i386/i386/locore.s Thu Aug 21 18:35:33 2003
+++ /usr/src/sys/i386/i386/locore.s Fri Aug 22 02:14:42 2003
@@ -75,6 +75,15 @@
.set _PTDpde,_PTD + (PTDPTDI * PDESIZE)
/*
+ * APTmap, APTD is the alternate recursive pagemap.
+ * It's used when modifying another process's page tables.
+ */
+ .globl _APTmap,_APTD,_APTDpde
+ .set _APTmap,APTDPTDI << PDRSHIFT
+ .set _APTD,_APTmap + (APTDPTDI * PAGE_SIZE)
+ .set _APTDpde,_PTD + (APTDPTDI * PDESIZE)
+
+/*
* Compiled KERNBASE location
*/
.globl _kernbase
@@ -103,16 +112,15 @@
_cpu_vendor: .space 20 /* CPU origin code */
_bootinfo: .space BOOTINFO_SIZE /* bootinfo that we can handle */
- .globl _KERNend
_KERNend: .long 0 /* phys addr end of kernel (just after bss) */
physfree: .long 0 /* phys addr of next free page */
#ifdef SMP
- .globl _cpu0prvpage
+ .globl _cpu0prvpage
cpu0pp: .long 0 /* phys addr cpu0 private pg */
_cpu0prvpage: .long 0 /* relocated version */
- .globl _SMPpt
+ .globl _SMPpt
SMPptpa: .long 0 /* phys addr SMP page table */
_SMPpt: .long 0 /* relocated version */
#endif /* SMP */
@@ -120,12 +128,6 @@
.globl _IdlePTD
_IdlePTD: .long 0 /* phys addr of kernel PTD */
-#ifdef PAE
- .globl _IdlePDPT
- .p2align 5
-_IdlePDPT: .space 32
-#endif
-
#ifdef SMP
.globl _KPTphys
#endif
@@ -181,13 +183,13 @@
* prot = protection bits
*/
#define fillkpt(base, prot) \
- shll $PTESHIFT,%ebx ; \
+ shll $2,%ebx ; \
addl base,%ebx ; \
orl $PG_V,%eax ; \
orl prot,%eax ; \
1: movl %eax,(%ebx) ; \
addl $PAGE_SIZE,%eax ; /* increment physical address */ \
- addl $PTESIZE,%ebx ; /* next pte */ \
+ addl $4,%ebx ; /* next pte */ \
loop 1b
/*
@@ -328,14 +330,7 @@
#endif
/* Now enable paging */
-#ifdef PAE
- movl %cr4,%eax
- orl $CR4_PAE,%eax
- movl %eax,%cr4
- movl $R(_IdlePDPT),%eax
-#else
- movl R(_IdlePTD),%eax
-#endif
+ movl R(_IdlePTD), %eax
movl %eax,%cr3 /* load ptd addr into mmu */
movl %cr0,%eax /* get control word */
orl $CR0_PE|CR0_PG,%eax /* enable paging */
@@ -358,10 +353,10 @@
/* set up bootstrap stack */
movl _proc0paddr,%esp /* location of in-kernel pages */
addl $UPAGES*PAGE_SIZE,%esp /* bootstrap stack end location */
- xorl %eax,%eax /* mark end of frames */
+ xorl %eax,%eax /* mark end of frames */
movl %eax,%ebp
movl _proc0paddr,%eax
- movl %cr3,%esi
+ movl _IdlePTD, %esi
movl %esi,PCB_CR3(%eax)
testl $CPUID_PGE, R(_cpu_feature)
@@ -372,11 +367,11 @@
1:
movl physfree, %esi
- pushl %esi /* value of first for init386(first) */
- call _init386 /* wire 386 chip for unix operation */
+ pushl %esi /* value of first for init386(first) */
+ call _init386 /* wire 386 chip for unix operation */
popl %esi
- call _mi_startup /* autoconfiguration, mountroot etc */
+ call _mi_startup /* autoconfiguration, mountroot etc */
hlt /* never returns to here */
@@ -742,7 +737,7 @@
movl %esi,R(_KPTphys)
/* Allocate Page Table Directory */
- ALLOCPAGES(NPGPTD)
+ ALLOCPAGES(1)
movl %esi,R(_IdlePTD)
/* Allocate UPAGES */
@@ -812,7 +807,7 @@
/* Map page directory. */
movl R(_IdlePTD), %eax
- movl $NPGPTD, %ecx
+ movl $1, %ecx
fillkptphys($PG_RW)
/* Map proc0's UPAGES in the physical way ... */
@@ -891,15 +886,9 @@
/* install a pde recursively mapping page directory as a page table */
movl R(_IdlePTD), %eax
movl $PTDPTDI, %ebx
- movl $NPGPTD, %ecx
+ movl $1,%ecx
fillkpt(R(_IdlePTD), $PG_RW)
-#ifdef PAE
- movl R(_IdlePTD),%eax
- xorl %ebx,%ebx
- movl $NPGPTD,%ecx
- fillkpt($R(_IdlePDPT), $0)
-#endif
ret
#ifdef BDE_DEBUGGER
Only in /usr/src/sys/i386/i386: locore.s.orig
Only in /usr/src/sys/i386/i386: locore.s.rej
diff -u -r /usr/src/sys.old/i386/i386/machdep.c /usr/src/sys/i386/i386/machdep.c
--- /usr/src/sys.old/i386/i386/machdep.c Thu Aug 21 18:35:33 2003
+++ /usr/src/sys/i386/i386/machdep.c Fri Aug 22 02:37:16 2003
@@ -271,8 +271,7 @@
#ifdef PERFMON
perfmon_init();
#endif
- printf("real memory = %llu (%lluK bytes)\n",
- ptoa((u_int64_t)Maxmem), ptoa((u_int64_t)Maxmem) / 1024);
+ printf("real memory = %u (%uK bytes)\n", ptoa(Maxmem), ptoa(Maxmem)
/ 1024);
/*
* Display any holes after the first chunk of extended memory.
*/
@@ -281,14 +280,11 @@
printf("Physical memory chunk(s):\n");
for (indx = 0; phys_avail[indx + 1] != 0; indx += 2) {
- vm_paddr_t size1;
+ unsigned int size1 = phys_avail[indx + 1] - phys_avail[indx];
- size1 = phys_avail[indx + 1] - phys_avail[indx];
- printf("0x%09llx - 0x%09llx, %llu bytes (%llu pages)\n",
- (u_int64_t)phys_avail[indx],
- (u_int64_t)phys_avail[indx + 1] - 1,
- (u_int64_t)size1,
- (u_int64_t)size1 / PAGE_SIZE);
+ printf("0x%08x - 0x%08x, %u bytes (%u pages)\n",
+ phys_avail[indx], phys_avail[indx + 1] - 1, size1,
+ size1 / PAGE_SIZE);
}
}
@@ -439,9 +435,8 @@
cninit(); /* the preferred console may have changed */
#endif
- printf("avail memory = %llu (%lluK bytes)\n",
- ptoa((u_int64_t)cnt.v_free_count),
- ptoa((u_int64_t)cnt.v_free_count) / 1024);
+ printf("avail memory = %u (%uK bytes)\n", ptoa(cnt.v_free_count),
+ ptoa(cnt.v_free_count) / 1024);
/*
* Set up buffers, so they can be used to read disk labels.
@@ -1422,8 +1417,6 @@
*
* Total memory size may be set by the kernel environment variable
* hw.physmem or the compile-time define MAXMEM.
- *
- * XXX first should be vm_paddr_t.
*/
static void
getmemsize(int first)
@@ -1433,8 +1426,8 @@
u_int basemem, extmem;
struct vm86frame vmf;
struct vm86context vmc;
- vm_paddr_t pa, physmap[PHYSMAP_SIZE];
- pt_entry_t *pte;
+ vm_offset_t pa, physmap[PHYSMAP_SIZE];
+ pt_entry_t pte;
const char *cp;
struct {
u_int64_t base;
@@ -1490,7 +1483,7 @@
*/
for (pa = trunc_page(basemem * 1024);
pa < ISA_HOLE_START; pa += PAGE_SIZE) {
- pte = vtopte(pa + KERNBASE);
+ pte = (pt_entry_t)vtopte(pa + KERNBASE);
*pte = pa | PG_RW | PG_V;
}
@@ -1498,7 +1491,7 @@
* if basemem != 640, map pages r/w into vm86 page table so
* that the bios can scribble on it.
*/
- pte = (pt_entry_t *)vm86paddr;
+ pte = (pt_entry_t)vm86paddr;
for (i = basemem / 4; i < 160; i++)
pte[i] = (i << PAGE_SHIFT) | PG_V | PG_RW | PG_U;
@@ -1507,7 +1500,7 @@
* map page 1 R/W into the kernel page table so we can use it
* as a buffer. The kernel will unmap this page later.
*/
- pte = vtopte(KERNBASE + (1 << PAGE_SHIFT));
+ pte = (pt_entry_t)vtopte(KERNBASE + (1 << PAGE_SHIFT));
*pte = (1 << PAGE_SHIFT) | PG_RW | PG_V;
/*
@@ -1530,8 +1523,12 @@
if (i || vmf.vmf_eax != SMAP_SIG)
break;
if (boothowto & RB_VERBOSE)
- printf("SMAP type=%02x base=%016llx len=%016llx\n",
- smap->type, smap->base, smap->length);
+ printf("SMAP type=%02x base=%08x %08x len=%08x %08x\n",
+ smap->type,
+ *(u_int32_t *)((char *)&smap->base + 4),
+ (u_int32_t)smap->base,
+ *(u_int32_t *)((char *)&smap->length + 4),
+ (u_int32_t)smap->length);
if (smap->type != 0x01)
goto next_run;
@@ -1539,13 +1536,11 @@
if (smap->length == 0)
goto next_run;
-#ifndef PAE
if (smap->base >= 0xffffffff) {
printf("%uK of memory above 4GB ignored\n",
(u_int)(smap->length / 1024));
goto next_run;
}
-#endif
for (i = 0; i <= physmap_idx; i += 2) {
if (smap->base < physmap[i + 1]) {
@@ -1595,11 +1590,11 @@
for (pa = trunc_page(basemem * 1024);
pa < ISA_HOLE_START; pa += PAGE_SIZE) {
- pte = vtopte(pa + KERNBASE);
+ pte = (pt_entry_t)vtopte(pa + KERNBASE);
*pte = pa | PG_RW | PG_V;
}
- pte = (pt_entry_t *)vm86paddr;
+ pte = (pt_entry_t)vm86paddr;
for (i = basemem / 4; i < 160; i++)
pte[i] = (i << PAGE_SHIFT) | PG_V | PG_RW | PG_U;
}
@@ -1712,7 +1707,10 @@
* extend the last memory segment to the new limit.
*/
if (atop(physmap[physmap_idx + 1]) < Maxmem)
- physmap[physmap_idx + 1] = ptoa((vm_paddr_t)Maxmem);
+ physmap[physmap_idx + 1] = ptoa(Maxmem);
+
+ /* call pmap initialization to make new kernel address space */
+ pmap_bootstrap(first, 0);
/*
* Size up each available chunk of physical memory.
@@ -1721,21 +1719,29 @@
pa_indx = 0;
phys_avail[pa_indx++] = physmap[0];
phys_avail[pa_indx] = physmap[0];
- pte = vtopte(KERNBASE + PAGE_SIZE);
+#if 0
+ pte = (pt_entry_t)vtopte(KERNBASE);
+#else
+ pte = (pt_entry_t)CMAP1;
+#endif
/*
* physmap is in bytes, so when converting to page boundaries,
* round up the start address and round down the end address.
*/
for (i = 0; i <= physmap_idx; i += 2) {
- vm_paddr_t end;
+ vm_offset_t end;
- end = ptoa((vm_paddr_t)Maxmem);
+ end = ptoa(Maxmem);
if (physmap[i + 1] < end)
end = trunc_page(physmap[i + 1]);
for (pa = round_page(physmap[i]); pa < end; pa += PAGE_SIZE) {
int tmp, page_bad;
- volatile int *ptr = (int *)(KERNBASE + PAGE_SIZE);
+#if 0
+ int *ptr = 0;
+#else
+ int *ptr = (int *)CADDR1;
+#endif
/*
* block out kernel memory as not available.
@@ -1751,39 +1757,39 @@
*pte = pa | PG_V | PG_RW | PG_N;
invltlb();
- tmp = *ptr;
+ tmp = *(int *)ptr;
/*
* Test for alternating 1's and 0's
*/
- *ptr = 0xaaaaaaaa;
- if (*ptr != 0xaaaaaaaa) {
+ *(volatile int *)ptr = 0xaaaaaaaa;
+ if (*(volatile int *)ptr != 0xaaaaaaaa) {
page_bad = TRUE;
}
/*
* Test for alternating 0's and 1's
*/
- *ptr = 0x55555555;
- if (*ptr != 0x55555555) {
- page_bad = TRUE;
+ *(volatile int *)ptr = 0x55555555;
+ if (*(volatile int *)ptr != 0x55555555) {
+ page_bad = TRUE;
}
/*
* Test for all 1's
*/
- *ptr = 0xffffffff;
- if (*ptr != 0xffffffff) {
+ *(volatile int *)ptr = 0xffffffff;
+ if (*(volatile int *)ptr != 0xffffffff) {
page_bad = TRUE;
}
/*
* Test for all 0's
*/
- *ptr = 0x0;
- if (*ptr != 0x0) {
+ *(volatile int *)ptr = 0x0;
+ if (*(volatile int *)ptr != 0x0) {
page_bad = TRUE;
}
/*
* Restore original value.
*/
- *ptr = tmp;
+ *(int *)ptr = tmp;
/*
* Adjust array of valid/good pages.
@@ -1995,11 +2001,7 @@
dblfault_tss.tss_esp2 = (int) &dblfault_stack[sizeof(dblfault_stack)];
dblfault_tss.tss_ss = dblfault_tss.tss_ss0 = dblfault_tss.tss_ss1
dblfault_tss.tss_ss2 = GSEL(GDATA_SEL, SEL_KPL);
-#ifdef PAE
- dblfault_tss.tss_cr3 = (int)IdlePDPT - KERNBASE;
-#else
dblfault_tss.tss_cr3 = (int)IdlePTD;
-#endif
dblfault_tss.tss_eip = (int) dblfault_handler;
dblfault_tss.tss_eflags = PSL_KERNEL;
dblfault_tss.tss_ds = dblfault_tss.tss_es @@ -2010,7 +2012,6 @@
vm86_initialize();
getmemsize(first);
- pmap_bootstrap(first, 0);
init_param2(physmem);
/* now running on new page tables, configured,and u/iom is accessible */
@@ -2044,11 +2045,7 @@
/* setup proc 0's pcb */
proc0.p_addr->u_pcb.pcb_flags = 0;
-#ifdef PAE
- proc0.p_addr->u_pcb.pcb_cr3 = (int)IdlePDPT - KERNBASE;
-#else
- proc0.p_addr->u_pcb.pcb_cr3 = (int)IdlePTD;
-#endif
+ proc0.p_addr->u_pcb.pcb_cr3 = (int)IdlePTD;
#ifdef SMP
proc0.p_addr->u_pcb.pcb_mpnest = 1;
#endif
Only in /usr/src/sys/i386/i386: machdep.c.orig
Only in /usr/src/sys/i386/i386: machdep.c.rej
diff -u -r /usr/src/sys.old/i386/i386/mem.c /usr/src/sys/i386/i386/mem.c
--- /usr/src/sys.old/i386/i386/mem.c Thu Aug 21 18:35:33 2003
+++ /usr/src/sys/i386/i386/mem.c Fri Aug 22 02:10:41 2003
@@ -159,7 +159,7 @@
int flags;
{
register int o;
- register u_int c;
+ register u_int c, v;
u_int poolsize;
register struct iovec *iov;
int error = 0;
@@ -178,8 +178,9 @@
/* minor device 0 is physical memory */
case 0:
- pmap_kenter((vm_offset_t)ptvmmap,
- uio->uio_offset & ~PAGE_MASK);
+ v = uio->uio_offset;
+ v &= ~PAGE_MASK;
+ pmap_kenter((vm_offset_t)ptvmmap, v);
o = (int)uio->uio_offset & PAGE_MASK;
c = (u_int)(PAGE_SIZE - ((int)iov->iov_base & PAGE_MASK));
c = min(c, (u_int)(PAGE_SIZE - o));
@@ -201,6 +202,8 @@
eaddr = round_page(uio->uio_offset + c);
if (addr < (vm_offset_t)VADDR(PTDPTDI, 0))
+ return EFAULT;
+ if (eaddr >= (vm_offset_t)VADDR(APTDPTDI, 0))
return EFAULT;
for (; addr < eaddr; addr += PAGE_SIZE)
if (pmap_extract(kernel_pmap, addr) == 0)
Only in /usr/src/sys/i386/i386: mem.c.orig
Only in /usr/src/sys/i386/i386: mem.c.rej
diff -u -r /usr/src/sys.old/i386/i386/mp_machdep.c
/usr/src/sys/i386/i386/mp_machdep.c
--- /usr/src/sys.old/i386/i386/mp_machdep.c Thu Aug 21 18:35:33 2003
+++ /usr/src/sys/i386/i386/mp_machdep.c Fri Aug 22 02:10:16 2003
@@ -2147,11 +2147,11 @@
gd->gd_prv_CMAP1 = &SMPpt[pg + 1];
gd->gd_prv_CMAP2 = &SMPpt[pg + 2];
gd->gd_prv_CMAP3 = &SMPpt[pg + 3];
- gd->gd_prv_PMAP1 = (pd_entry_t *)&SMPpt[pg + 4];
+ gd->gd_prv_PMAP1 = &SMPpt[pg + 4];
gd->gd_prv_CADDR1 = SMP_prvspace[x].CPAGE1;
gd->gd_prv_CADDR2 = SMP_prvspace[x].CPAGE2;
gd->gd_prv_CADDR3 = SMP_prvspace[x].CPAGE3;
- gd->gd_prv_PADDR1 = (pt_entry_t *)SMP_prvspace[x].PPAGE1;
+ gd->gd_prv_PADDR1 = (unsigned *)SMP_prvspace[x].PPAGE1;
/* setup a vector to our boot code */
*((volatile u_short *) WARMBOOT_OFF) = WARMBOOT_TARGET;
@@ -2536,7 +2536,7 @@
if (cpuid != apic_id) {
printf("SMP: cpuid = %d\n", cpuid);
printf("SMP: apic_id = %d\n", apic_id);
- printf("PTD[MPPTDI] = %llx\n", (u_int64_t)PTD[MPPTDI]);
+ printf("PTD[MPPTDI] = %p\n", (void *)PTD[MPPTDI]);
panic("cpuid mismatch! boom!!");
}
Only in /usr/src/sys/i386/i386: mp_machdep.c.orig
Only in /usr/src/sys/i386/i386: mp_machdep.c.rej
diff -u -r /usr/src/sys.old/i386/i386/mpboot.s /usr/src/sys/i386/i386/mpboot.s
--- /usr/src/sys.old/i386/i386/mpboot.s Thu Aug 21 18:35:33 2003
+++ /usr/src/sys/i386/i386/mpboot.s Fri Aug 22 02:09:28 2003
@@ -40,8 +40,6 @@
#include "assym.s"
-#define R(x) ((x)-KERNBASE)
-
/*
* this code MUST be enabled here and in mp_machdep.c
* it follows the very early stages of AP boot by placing values in CMOS ram.
@@ -76,14 +74,7 @@
NON_GPROF_ENTRY(MPentry)
CHECKPOINT(0x36, 3)
/* Now enable paging mode */
-#ifdef PAE
- movl %cr4,%eax
- orl $CR4_PAE,%eax
- movl %eax,%cr4
- movl $R(_IdlePDPT),%eax
-#else
- movl R(_IdlePTD),%eax
-#endif
+ movl _IdlePTD-KERNBASE, %eax
movl %eax,%cr3
movl %cr0,%eax
orl $CR0_PE|CR0_PG,%eax /* enable paging */
Only in /usr/src/sys/i386/i386: mpboot.s.orig
Only in /usr/src/sys/i386/i386: mpboot.s.rej
diff -u -r /usr/src/sys.old/i386/i386/pmap.c /usr/src/sys/i386/i386/pmap.c
--- /usr/src/sys.old/i386/i386/pmap.c Thu Aug 21 18:35:34 2003
+++ /usr/src/sys/i386/i386/pmap.c Wed Mar 6 16:48:53 2002
@@ -39,39 +39,7 @@
* SUCH DAMAGE.
*
* from: @(#)pmap.c 7.7 (Berkeley) 5/12/91
- * $FreeBSD: src/sys/i386/i386/pmap.c,v 1.250.2.19 2003/08/09 16:21:18 luoqi
Exp $
- */
-
-/*-
- * Copyright (c) 2003 Networks Associates Technology, Inc.
- * All rights reserved.
- *
- * This software was developed for the FreeBSD Project by Jake Burkholder,
- * Safeport Network Services, and Network Associates Laboratories, the
- * Security Research Division of Network Associates, Inc. under
- * DARPA/SPAWAR contract N66001-01-C-8035 ("CBOSS"), as part of the
DARPA
- * CHATS research program.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS''
AND
- * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
- * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
- * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- * SUCH DAMAGE.
+ * $FreeBSD: /repoman/r/ncvs/src/sys/i386/i386/pmap.c,v 1.250.2.18 2002/03/06
22:48:53 silby Exp $
*/
/*
@@ -112,7 +80,6 @@
#include <sys/msgbuf.h>
#include <sys/vmmeter.h>
#include <sys/mman.h>
-#include <sys/malloc.h>
#include <vm/vm.h>
#include <vm/vm_param.h>
@@ -129,7 +96,6 @@
#include <sys/user.h>
-#include <machine/cpu.h>
#include <machine/cputypes.h>
#include <machine/md_var.h>
#include <machine/specialreg.h>
@@ -162,16 +128,16 @@
* Get PDEs and PTEs for user/kernel address space
*/
#define pmap_pde(m, v) (&((m)->pm_pdir[(vm_offset_t)(v) >>
PDRSHIFT]))
-#define pdir_pde(m, v) (m[(vm_offset_t)(v) >> PDRSHIFT])
+#define pdir_pde(m, v) (m[(vm_offset_t)(v) >> PDRSHIFT])
-#define pmap_pde_v(pte) ((*pte & PG_V) != 0)
-#define pmap_pte_w(pte) ((*pte & PG_W) != 0)
-#define pmap_pte_m(pte) ((*pte & PG_M) != 0)
-#define pmap_pte_u(pte) ((*pte & PG_A) != 0)
-#define pmap_pte_v(pte) ((*pte & PG_V) != 0)
+#define pmap_pde_v(pte) ((*(int *)pte & PG_V) != 0)
+#define pmap_pte_w(pte) ((*(int *)pte & PG_W) != 0)
+#define pmap_pte_m(pte) ((*(int *)pte & PG_M) != 0)
+#define pmap_pte_u(pte) ((*(int *)pte & PG_A) != 0)
+#define pmap_pte_v(pte) ((*(int *)pte & PG_V) != 0)
-#define pmap_pte_set_w(pte, v) ((v) ? (*pte |= PG_W) : (*pte &= ~PG_W))
-#define pmap_pte_set_prot(pte, v) (*pte = (*pte & ~PG_PROT) | (v))
+#define pmap_pte_set_w(pte, v) ((v)?(*(int *)pte |= PG_W):(*(int *)pte &=
~PG_W))
+#define pmap_pte_set_prot(pte, v) ((*(int *)pte &= ~PG_PROT), (*(int *)pte
|= (v)))
/*
* Given a map and a machine independent protection code,
@@ -183,8 +149,8 @@
static struct pmap kernel_pmap_store;
pmap_t kernel_pmap;
-vm_paddr_t avail_start; /* PA of first available physical page */
-vm_paddr_t avail_end; /* PA of last available physical page */
+vm_offset_t avail_start; /* PA of first available physical page */
+vm_offset_t avail_end; /* PA of last available physical page */
vm_offset_t virtual_avail; /* VA of first avail page (after kernel bss) */
vm_offset_t virtual_end; /* VA of last avail page (end of kernel AS) */
static boolean_t pmap_initialized = FALSE; /* Has pmap_init completed? */
@@ -202,34 +168,19 @@
static vm_zone_t pvzone;
static struct vm_zone pvzone_store;
static struct vm_object pvzone_obj;
-static int pv_entry_count = 0, pv_entry_max = 0, pv_entry_high_water = 0;
+static int pv_entry_count=0, pv_entry_max=0, pv_entry_high_water=0;
static int pmap_pagedaemon_waken = 0;
static struct pv_entry *pvinit;
/*
* All those kernel PT submaps that BSD is so fond of
*/
-#ifdef SMP
-extern pt_entry_t *SMPpt;
-#define CMAP1 prv_CMAP1
-#define CMAP2 prv_CMAP2
-#define CMAP3 prv_CMAP3
-#define PMAP1 prv_PMAP1
-#define CADDR1 prv_CADDR1
-#define CADDR2 prv_CADDR2
-#define CADDR3 prv_CADDR3
-#define PADDR1 prv_PADDR1
-#else
-static pt_entry_t *CMAP1, *CMAP2, *CMAP3;
-static caddr_t CADDR1, CADDR2, CADDR3;
-static pd_entry_t *PMAP1;
-static pt_entry_t *PADDR1;
-#endif
-
-static pt_entry_t *ptmmap;
-caddr_t ptvmmap = 0;
+pt_entry_t *CMAP1 = 0;
+static pt_entry_t *CMAP2, *ptmmap;
+caddr_t CADDR1 = 0, ptvmmap = 0;
+static caddr_t CADDR2;
static pt_entry_t *msgbufmap;
-struct msgbuf *msgbufp = 0;
+struct msgbuf *msgbufp=0;
/*
* Crashdump maps.
@@ -237,36 +188,66 @@
static pt_entry_t *pt_crashdumpmap;
static caddr_t crashdumpmap;
-static pd_entry_t pdir4mb;
+#ifdef SMP
+extern pt_entry_t *SMPpt;
+#else
+static pt_entry_t *PMAP1 = 0;
+static unsigned *PADDR1 = 0;
+#endif
static PMAP_INLINE void free_pv_entry __P((pv_entry_t pv));
+static unsigned * get_ptbase __P((pmap_t pmap));
static pv_entry_t get_pv_entry __P((void));
static void i386_protection_init __P((void));
static __inline void pmap_changebit __P((vm_page_t m, int bit, boolean_t
setem));
+
static void pmap_remove_all __P((vm_page_t m));
static vm_page_t pmap_enter_quick __P((pmap_t pmap, vm_offset_t va,
vm_page_t m, vm_page_t mpte));
-static int pmap_remove_pte __P((pmap_t pmap, pt_entry_t* ptq,
+static int pmap_remove_pte __P((struct pmap *pmap, unsigned *ptq,
vm_offset_t sva));
-static void pmap_remove_page __P((pmap_t pmap, vm_offset_t va));
-static int pmap_remove_entry __P((pmap_t pmap, vm_page_t m,
+static void pmap_remove_page __P((struct pmap *pmap, vm_offset_t va));
+static int pmap_remove_entry __P((struct pmap *pmap, vm_page_t m,
vm_offset_t va));
static boolean_t pmap_testbit __P((vm_page_t m, int bit));
static void pmap_insert_entry __P((pmap_t pmap, vm_offset_t va,
vm_page_t mpte, vm_page_t m));
static vm_page_t pmap_allocpte __P((pmap_t pmap, vm_offset_t va));
+
+static int pmap_release_free_page __P((pmap_t pmap, vm_page_t p));
static vm_page_t _pmap_allocpte __P((pmap_t pmap, unsigned ptepindex));
+static unsigned * pmap_pte_quick __P((pmap_t pmap, vm_offset_t va));
static vm_page_t pmap_page_lookup __P((vm_object_t object, vm_pindex_t
pindex));
static int pmap_unuse_pt __P((pmap_t, vm_offset_t, vm_page_t));
static vm_offset_t pmap_kmem_choose(vm_offset_t addr);
-static int pmap_is_current(pmap_t);
+static unsigned pdir4mb;
-#ifdef PAE
-static pdpt_entry_t *pmap_alloc_pdpt(void);
-static void pmap_free_pdpt(pdpt_entry_t *);
-#endif
+/*
+ * Routine: pmap_pte
+ * Function:
+ * Extract the page table entry associated
+ * with the given map/virtual_address pair.
+ */
+
+PMAP_INLINE unsigned *
+pmap_pte(pmap, va)
+ register pmap_t pmap;
+ vm_offset_t va;
+{
+ unsigned *pdeaddr;
+
+ if (pmap) {
+ pdeaddr = (unsigned *) pmap_pde(pmap, va);
+ if (*pdeaddr & PG_PS)
+ return pdeaddr;
+ if (*pdeaddr) {
+ return get_ptbase(pmap) + i386_btop(va);
+ }
+ }
+ return (0);
+}
/*
* Move the kernel virtual free pointer to the next
@@ -297,7 +278,9 @@
* (physical) address starting relative to 0]
*/
void
-pmap_bootstrap(vm_paddr_t firstaddr, vm_paddr_t loadaddr)
+pmap_bootstrap(firstaddr, loadaddr)
+ vm_offset_t firstaddr;
+ vm_offset_t loadaddr;
{
vm_offset_t va;
pt_entry_t *pte;
@@ -309,7 +292,7 @@
avail_start = firstaddr;
/*
- * The calculation of virtual_avail is wrong. It's NKPT*PAGE_SIZE too
+ * XXX The calculation of virtual_avail is wrong. It's NKPT*PAGE_SIZE too
* large. It should instead be correctly calculated in locore.s and
* not based on 'first' (which is a physical address, not a virtual
* address, for the start of unused physical memory). The kernel
@@ -333,11 +316,9 @@
*/
kernel_pmap = &kernel_pmap_store;
- kernel_pmap->pm_pdir = (pd_entry_t *) (KERNBASE + IdlePTD);
+ kernel_pmap->pm_pdir = (pd_entry_t *) (KERNBASE + (u_int)IdlePTD);
+ kernel_pmap->pm_count = 1;
kernel_pmap->pm_active = -1; /* don't allow deactivation */
-#ifdef PAE
- kernel_pmap->pm_pdpt = IdlePDPT;
-#endif
TAILQ_INIT(&kernel_pmap->pm_pvlist);
nkpt = NKPT;
@@ -349,22 +330,13 @@
v = (c)va; va += ((n)*PAGE_SIZE); p = pte; pte += (n);
va = virtual_avail;
- pte = vtopte(va);
+ pte = (pt_entry_t *) pmap_pte(kernel_pmap, va);
-#ifndef SMP
/*
- * CMAP1/CMAP2/CMAP3 are used for zeroing and copying pages.
+ * CMAP1/CMAP2 are used for zeroing and copying pages.
*/
SYSMAP(caddr_t, CMAP1, CADDR1, 1)
SYSMAP(caddr_t, CMAP2, CADDR2, 1)
- SYSMAP(caddr_t, CMAP3, CADDR3, 1)
- *CMAP1 = *CMAP2 = *CMAP3 = 0;
-
- /*
- * ptemap is used for pmap_pte
- */
- SYSMAP(pd_entry_t *, PMAP1, PADDR1, 1);
-#endif
/*
* Crashdump maps.
@@ -384,38 +356,44 @@
SYSMAP(struct msgbuf *, msgbufmap, msgbufp,
atop(round_page(MSGBUF_SIZE)))
+#if !defined(SMP)
+ /*
+ * ptemap is used for pmap_pte_quick
+ */
+ SYSMAP(unsigned *, PMAP1, PADDR1, 1);
+#endif
+
virtual_avail = va;
+ *(int *) CMAP1 = *(int *) CMAP2 = 0;
for (i = 0; i < NKPT; i++)
PTD[i] = 0;
- /*
- * Initialize the global page flag
- */
pgeflag = 0;
#if !defined(SMP) /* XXX - see also mp_machdep.c */
- if (cpu_feature & CPUID_PGE)
- pgeflag = PG_G;
+ if (cpu_feature & CPUID_PGE) {
+ pgeflag = PG_G;
+ }
#endif
-
- /*
- * Initialize the 4MB page size flag
- */
+
+/*
+ * Initialize the 4MB page size flag
+ */
pseflag = 0;
-#ifndef DISABLE_PSE
- if (cpu_feature & CPUID_PSE)
- pseflag = PG_PS;
-#endif
- /*
- * The 4MB page version of the initial
- * kernel page mapping.
- */
- if (pseflag) {
- pd_entry_t ptditmp;
+/*
+ * The 4MB page version of the initial
+ * kernel page mapping.
+ */
+ pdir4mb = 0;
+
+#if !defined(DISABLE_PSE)
+ if (cpu_feature & CPUID_PSE) {
+ unsigned ptditmp;
/*
* Note that we have enabled PSE mode
*/
- ptditmp = *(PTmap + i386_btop(KERNBASE));
+ pseflag = PG_PS;
+ ptditmp = *((unsigned *)PTmap + i386_btop(KERNBASE));
ptditmp &= ~(NBPDR - 1);
ptditmp |= PG_V | PG_RW | PG_PS | PG_U | pgeflag;
pdir4mb = ptditmp;
@@ -435,18 +413,20 @@
* For SMP, we still need 4K pages to bootstrap APs,
* PSE will be enabled as soon as all APs are up.
*/
- kernel_pmap->pm_pdir[KPTDI] = PTD[KPTDI] = pdir4mb;
+ PTD[KPTDI] = (pd_entry_t) ptditmp;
+ kernel_pmap->pm_pdir[KPTDI] = (pd_entry_t) ptditmp;
invltlb();
#endif
}
+#endif
#ifdef SMP
if (cpu_apic_address == 0)
panic("pmap_bootstrap: no local apic!");
/* local apic is mapped on last page */
- SMPpt[NPTEPG - 1] = PG_V | PG_RW | PG_N | pgeflag |
- (cpu_apic_address & PG_FRAME);
+ SMPpt[NPTEPG - 1] = (pt_entry_t)(PG_V | PG_RW | PG_N | pgeflag |
+ (cpu_apic_address & PG_FRAME));
/* BSP does this itself, AP's get it pre-set */
gd = &SMP_prvspace[0].globaldata;
@@ -457,7 +437,7 @@
gd->gd_prv_CADDR1 = SMP_prvspace[0].CPAGE1;
gd->gd_prv_CADDR2 = SMP_prvspace[0].CPAGE2;
gd->gd_prv_CADDR3 = SMP_prvspace[0].CPAGE3;
- gd->gd_prv_PADDR1 = (pt_entry_t *)SMP_prvspace[0].PPAGE1;
+ gd->gd_prv_PADDR1 = (unsigned *)SMP_prvspace[0].PPAGE1;
#endif
invltlb();
@@ -473,7 +453,8 @@
if (pseflag && (cpu_feature & CPUID_PSE)) {
load_cr4(rcr4() | CR4_PSE);
if (pdir4mb && cpuid == 0) { /* only on BSP */
- kernel_pmap->pm_pdir[KPTDI] = PTD[KPTDI] = pdir4mb;
+ kernel_pmap->pm_pdir[KPTDI] + PTD[KPTDI] = (pd_entry_t)pdir4mb;
cpu_invltlb();
}
}
@@ -488,7 +469,8 @@
* way, discontiguous physical memory.
*/
void
-pmap_init(vm_paddr_t phys_start, vm_paddr_t phys_end)
+pmap_init(phys_start, phys_end)
+ vm_offset_t phys_start, phys_end;
{
int i;
int initial_pvs;
@@ -552,17 +534,26 @@
***************************************************/
#if defined(PMAP_DIAGNOSTIC)
+
/*
* This code checks for non-writeable/modified pages.
* This should be an invalid condition.
*/
static int
-pmap_nw_modified(pt_entry_t pte)
+pmap_nw_modified(pt_entry_t ptea)
{
- return ((pte & (PG_M|PG_RW)) == PG_M);
+ int pte;
+
+ pte = (int) ptea;
+
+ if ((pte & (PG_M|PG_RW)) == PG_M)
+ return 1;
+ else
+ return 0;
}
#endif
+
/*
* this routine defines the region(s) of memory that should
* not be tested for the modified bit.
@@ -570,7 +561,10 @@
static PMAP_INLINE int
pmap_track_modified(vm_offset_t va)
{
- return (va < clean_sva) || (va >= clean_eva);
+ if ((va < clean_sva) || (va >= clean_eva))
+ return 1;
+ else
+ return 0;
}
static PMAP_INLINE void
@@ -614,78 +608,65 @@
#endif
}
-#ifdef PAE
-static __inline pt_entry_t
-pte_load(pt_entry_t *pte)
-{
- pt_entry_t rv = 0;
- __asm __volatile(MPLOCKED "cmpxchg8b %1"
- : "+A" (rv) : "m" (*pte), "b" (0),
"c" (0));
- return rv;
-}
-
-static __inline pt_entry_t
-pte_store(pt_entry_t *pte, pt_entry_t v)
-{
- pt_entry_t rv = *pte;
- __asm __volatile("1:;" MPLOCKED "cmpxchg8b %1; jnz 1b"
- : "+A" (rv)
- : "m" (*pte), "b" ((u_int32_t)v), "c"
((u_int32_t)(v >> 32)));
- return rv;
-}
-#else
-static __inline pt_entry_t
-pte_load(pt_entry_t *pte)
+static unsigned *
+get_ptbase(pmap)
+ pmap_t pmap;
{
- return *pte;
-}
+ unsigned frame = (unsigned) pmap->pm_pdir[PTDPTDI] & PG_FRAME;
-static __inline pt_entry_t
-pte_store(pt_entry_t *pte, pt_entry_t v)
-{
- __asm __volatile("xchgl %1,%0" : "+r" (v) : "m"
(*pte));
- return v;
-}
+ /* are we current address space or kernel? */
+ if (pmap == kernel_pmap || frame == (((unsigned) PTDpde) & PG_FRAME)) {
+ return (unsigned *) PTmap;
+ }
+ /* otherwise, we are alternate address space */
+ if (frame != (((unsigned) APTDpde) & PG_FRAME)) {
+ APTDpde = (pd_entry_t) (frame | PG_RW | PG_V);
+#if defined(SMP)
+ /* The page directory is not shared between CPUs */
+ cpu_invltlb();
+#else
+ invltlb();
#endif
-
-/*
- * Are we current address space or kernel?
- */
-static __inline int
-pmap_is_current(pmap_t pmap)
-{
- return (pmap == kernel_pmap ||
- (pmap->pm_pdir[PTDPTDI] & PG_FRAME) == (PTDpde[0] & PG_FRAME));
+ }
+ return (unsigned *) APTmap;
}
/*
- * Routine: pmap_pte
- * Function:
- * Extract the page table entry associated
- * with the given map/virtual_address pair.
+ * Super fast pmap_pte routine best used when scanning
+ * the pv lists. This eliminates many coarse-grained
+ * invltlb calls. Note that many of the pv list
+ * scans are across different pmaps. It is very wasteful
+ * to do an entire invltlb for checking a single mapping.
*/
-pt_entry_t *
-pmap_pte(pmap_t pmap, vm_offset_t va)
+static unsigned *
+pmap_pte_quick(pmap, va)
+ register pmap_t pmap;
+ vm_offset_t va;
{
- pd_entry_t *pde, newpf;
-
- pde = pmap_pde(pmap, va);
- if (*pde & PG_V) {
- if (*pde & PG_PS)
- return (pt_entry_t *)pde;
- if (pmap_is_current(pmap))
- return vtopte(va);
- newpf = *pde & PG_FRAME;
- if ((*PMAP1 & PG_FRAME) != newpf) {
- *PMAP1 = newpf | PG_RW | PG_V;
+ unsigned pde, newpf;
+ if ((pde = (unsigned) pmap->pm_pdir[va >> PDRSHIFT]) != 0) {
+ unsigned frame = (unsigned) pmap->pm_pdir[PTDPTDI] & PG_FRAME;
+ unsigned index = i386_btop(va);
+ /* are we current address space or kernel? */
+ if ((pmap == kernel_pmap) ||
+ (frame == (((unsigned) PTDpde) & PG_FRAME))) {
+ return (unsigned *) PTmap + index;
+ }
+ newpf = pde & PG_FRAME;
#ifdef SMP
- cpu_invlpg(PADDR1);
+ if ( ((* (unsigned *) prv_PMAP1) & PG_FRAME) != newpf) {
+ * (unsigned *) prv_PMAP1 = newpf | PG_RW | PG_V;
+ cpu_invlpg(prv_PADDR1);
+ }
+ return prv_PADDR1 + ((unsigned) index & (NPTEPG - 1));
#else
+ if ( ((* (unsigned *) PMAP1) & PG_FRAME) != newpf) {
+ * (unsigned *) PMAP1 = newpf | PG_RW | PG_V;
invltlb_1pg((vm_offset_t) PADDR1);
-#endif
}
- return PADDR1 + (i386_btop(va) & (NPTEPG - 1));
+ return PADDR1 + ((unsigned) index & (NPTEPG - 1));
+#endif
}
return (0);
}
@@ -696,20 +677,27 @@
* Extract the physical page address associated
* with the given map/virtual_address pair.
*/
-vm_paddr_t
-pmap_extract(pmap_t pmap, vm_offset_t va)
+vm_offset_t
+pmap_extract(pmap, va)
+ register pmap_t pmap;
+ vm_offset_t va;
{
- pt_entry_t *pte;
-
- if (pmap == 0)
- return 0;
- pte = pmap_pte(pmap, va);
- if (pte) {
- if (*pte & PG_PS)
- return (*pte & ~PDRMASK) | (va & PDRMASK);
- return (*pte & PG_FRAME) | (va & PAGE_MASK);
+ vm_offset_t rtval;
+ vm_offset_t pdirindex;
+ pdirindex = va >> PDRSHIFT;
+ if (pmap && (rtval = (unsigned) pmap->pm_pdir[pdirindex])) {
+ unsigned *pte;
+ if ((rtval & PG_PS) != 0) {
+ rtval &= ~(NBPDR - 1);
+ rtval |= va & (NBPDR - 1);
+ return rtval;
+ }
+ pte = get_ptbase(pmap) + i386_btop(va);
+ rtval = ((*pte & PG_FRAME) | (va & PAGE_MASK));
+ return rtval;
}
return 0;
+
}
/***************************************************
@@ -722,12 +710,17 @@
* should do a invltlb after doing the pmap_kenter...
*/
PMAP_INLINE void
-pmap_kenter(vm_offset_t va, vm_paddr_t pa)
+pmap_kenter(va, pa)
+ vm_offset_t va;
+ register vm_offset_t pa;
{
- pt_entry_t *pte;
+ register unsigned *pte;
+ unsigned npte, opte;
- pte = vtopte(va);
- *pte = pa | PG_RW | PG_V | pgeflag;
+ npte = pa | PG_RW | PG_V | pgeflag;
+ pte = (unsigned *)vtopte(va);
+ opte = *pte;
+ *pte = npte;
invltlb_1pg(va);
}
@@ -735,11 +728,12 @@
* remove a page from the kernel pagetables
*/
PMAP_INLINE void
-pmap_kremove(vm_offset_t va)
+pmap_kremove(va)
+ vm_offset_t va;
{
- pt_entry_t *pte;
+ register unsigned *pte;
- pte = vtopte(va);
+ pte = (unsigned *)vtopte(va);
*pte = 0;
invltlb_1pg(va);
}
@@ -752,7 +746,11 @@
* specified memory.
*/
vm_offset_t
-pmap_map(vm_offset_t virt, vm_paddr_t start, vm_paddr_t end, int prot)
+pmap_map(virt, start, end, prot)
+ vm_offset_t virt;
+ vm_offset_t start;
+ vm_offset_t end;
+ int prot;
{
while (start < end) {
pmap_kenter(virt, start);
@@ -772,10 +770,19 @@
* over. The page *must* be wired.
*/
void
-pmap_qenter(vm_offset_t va, vm_page_t *m, int count)
+pmap_qenter(va, m, count)
+ vm_offset_t va;
+ vm_page_t *m;
+ int count;
{
- while (count-- > 0) {
- pt_entry_t *pte = vtopte(va);
+ vm_offset_t end_va;
+
+ end_va = va + count * PAGE_SIZE;
+
+ while (va < end_va) {
+ unsigned *pte;
+
+ pte = (unsigned *)vtopte(va);
*pte = VM_PAGE_TO_PHYS(*m) | PG_RW | PG_V | pgeflag;
#ifdef SMP
cpu_invlpg((void *)va);
@@ -795,10 +802,18 @@
* kernel -- it is meant only for temporary mappings.
*/
void
-pmap_qremove(vm_offset_t va, int count)
+pmap_qremove(va, count)
+ vm_offset_t va;
+ int count;
{
- while (count-- > 0) {
- pt_entry_t *pte = vtopte(va);
+ vm_offset_t end_va;
+
+ end_va = va + count*PAGE_SIZE;
+
+ while (va < end_va) {
+ unsigned *pte;
+
+ pte = (unsigned *)vtopte(va);
*pte = 0;
#ifdef SMP
cpu_invlpg((void *)va);
@@ -813,7 +828,9 @@
}
static vm_page_t
-pmap_page_lookup(vm_object_t object, vm_pindex_t pindex)
+pmap_page_lookup(object, pindex)
+ vm_object_t object;
+ vm_pindex_t pindex;
{
vm_page_t m;
retry:
@@ -828,12 +845,14 @@
* This routine directly affects the fork perf for a process.
*/
void
-pmap_new_proc(struct proc *p)
+pmap_new_proc(p)
+ struct proc *p;
{
- int i;
+ int i, updateneeded;
vm_object_t upobj;
- vm_page_t m, ma[UPAGES];
- vm_offset_t up;
+ vm_page_t m;
+ struct user *up;
+ unsigned *ptek, oldpte;
/*
* allocate object for the upages
@@ -844,19 +863,22 @@
}
/* get a kernel virtual address for the UPAGES for this proc */
- if ((up = (vm_offset_t) p->p_addr) == 0) {
- up = kmem_alloc_nofault(kernel_map, UPAGES * PAGE_SIZE);
- if (up == 0)
+ if ((up = p->p_addr) == NULL) {
+ up = (struct user *) kmem_alloc_nofault(kernel_map,
+ UPAGES * PAGE_SIZE);
+ if (up == NULL)
panic("pmap_new_proc: u_map allocation failed");
- p->p_addr = (struct user *) up;
+ p->p_addr = up;
}
- for(i = 0; i < UPAGES; i++) {
+ ptek = (unsigned *) vtopte((vm_offset_t) up);
+
+ updateneeded = 0;
+ for(i=0;i<UPAGES;i++) {
/*
* Get a kernel stack page
*/
m = vm_page_grab(upobj, i, VM_ALLOC_NORMAL | VM_ALLOC_RETRY);
- ma[i] = m;
/*
* Wire the page
@@ -864,12 +886,26 @@
m->wire_count++;
cnt.v_wire_count++;
+ oldpte = *(ptek + i);
+ /*
+ * Enter the page into the kernel address space.
+ */
+ *(ptek + i) = VM_PAGE_TO_PHYS(m) | PG_RW | PG_V | pgeflag;
+ if (oldpte) {
+ if ((oldpte & PG_G) || (cpu_class > CPUCLASS_386)) {
+ invlpg((vm_offset_t) up + i * PAGE_SIZE);
+ } else {
+ updateneeded = 1;
+ }
+ }
+
vm_page_wakeup(m);
vm_page_flag_clear(m, PG_ZERO);
vm_page_flag_set(m, PG_MAPPED | PG_WRITEABLE);
m->valid = VM_PAGE_BITS_ALL;
}
- pmap_qenter(up, ma, UPAGES);
+ if (updateneeded)
+ invltlb();
}
/*
@@ -877,23 +913,35 @@
* This routine directly impacts the exit perf of a process.
*/
void
-pmap_dispose_proc(struct proc *p)
+pmap_dispose_proc(p)
+ struct proc *p;
{
int i;
vm_object_t upobj;
vm_page_t m;
+ unsigned *ptek, oldpte;
upobj = p->p_upages_obj;
- pmap_qremove((vm_offset_t) p->p_addr, UPAGES);
- for(i = 0; i < UPAGES; i++) {
+ ptek = (unsigned *) vtopte((vm_offset_t) p->p_addr);
+ for(i=0;i<UPAGES;i++) {
+
if ((m = vm_page_lookup(upobj, i)) == NULL)
panic("pmap_dispose_proc: upage already missing???");
vm_page_busy(m);
+
+ oldpte = *(ptek + i);
+ *(ptek + i) = 0;
+ if ((oldpte & PG_G) || (cpu_class > CPUCLASS_386))
+ invlpg((vm_offset_t) p->p_addr + i * PAGE_SIZE);
vm_page_unwire(m, 0);
vm_page_free(m);
}
+#if defined(I386_CPU)
+ if (cpu_class <= CPUCLASS_386)
+ invltlb();
+#endif
/*
* If the process got swapped out some of its UPAGES might have gotten
@@ -910,23 +958,23 @@
* Allow the UPAGES for a process to be prejudicially paged out.
*/
void
-pmap_swapout_proc(struct proc *p)
+pmap_swapout_proc(p)
+ struct proc *p;
{
int i;
vm_object_t upobj;
vm_page_t m;
upobj = p->p_upages_obj;
- pmap_qremove((vm_offset_t) p->p_addr, UPAGES);
-
/*
* let the upages be paged
*/
- for(i = 0; i < UPAGES; i++) {
+ for(i=0;i<UPAGES;i++) {
if ((m = vm_page_lookup(upobj, i)) == NULL)
panic("pmap_swapout_proc: upage already missing???");
vm_page_dirty(m);
vm_page_unwire(m, 0);
+ pmap_kremove( (vm_offset_t) p->p_addr + PAGE_SIZE * i);
}
}
@@ -934,17 +982,20 @@
* Bring the UPAGES for a specified process back in.
*/
void
-pmap_swapin_proc(struct proc *p)
+pmap_swapin_proc(p)
+ struct proc *p;
{
- int i, rv;
+ int i,rv;
vm_object_t upobj;
- vm_page_t m, ma[UPAGES];
+ vm_page_t m;
upobj = p->p_upages_obj;
+ for(i=0;i<UPAGES;i++) {
- for(i = 0; i < UPAGES; i++) {
m = vm_page_grab(upobj, i, VM_ALLOC_NORMAL | VM_ALLOC_RETRY);
- ma[i] = m;
+
+ pmap_kenter(((vm_offset_t) p->p_addr) + i * PAGE_SIZE,
+ VM_PAGE_TO_PHYS(m));
if (m->valid != VM_PAGE_BITS_ALL) {
rv = vm_pager_get_pages(upobj, &m, 1, 0);
@@ -958,8 +1009,6 @@
vm_page_wakeup(m);
vm_page_flag_set(m, PG_MAPPED | PG_WRITEABLE);
}
-
- pmap_qenter((vm_offset_t) p->p_addr, ma, UPAGES);
}
/***************************************************
@@ -971,8 +1020,8 @@
* drops to zero, then it decrements the wire count.
*/
static int
-_pmap_unwire_pte_hold(pmap_t pmap, vm_page_t m)
-{
+_pmap_unwire_pte_hold(pmap_t pmap, vm_page_t m) {
+
while (vm_page_sleep_busy(m, FALSE, "pmuwpt"))
;
@@ -983,7 +1032,8 @@
*/
pmap->pm_pdir[m->pindex] = 0;
--pmap->pm_stats.resident_count;
- if (pmap_is_current(pmap)) {
+ if ((((unsigned)pmap->pm_pdir[PTDPTDI]) & PG_FRAME) =+ (((unsigned)
PTDpde) & PG_FRAME)) {
/*
* Do a invltlb to make the invalidated mapping
* take effect immediately.
@@ -1000,6 +1050,7 @@
*/
--m->wire_count;
if (m->wire_count == 0) {
+
vm_page_flash(m);
vm_page_busy(m);
vm_page_free_zero(m);
@@ -1025,20 +1076,22 @@
* conditionally free the page, and manage the hold/wire counts.
*/
static int
-pmap_unuse_pt(pmap_t pmap, vm_offset_t va, vm_page_t mpte)
+pmap_unuse_pt(pmap, va, mpte)
+ pmap_t pmap;
+ vm_offset_t va;
+ vm_page_t mpte;
{
unsigned ptepindex;
-
if (va >= UPT_MIN_ADDRESS)
return 0;
if (mpte == NULL) {
ptepindex = (va >> PDRSHIFT);
if (pmap->pm_ptphint &&
- (pmap->pm_ptphint->pindex == ptepindex)) {
+ (pmap->pm_ptphint->pindex == ptepindex)) {
mpte = pmap->pm_ptphint;
} else {
- mpte = pmap_page_lookup(pmap->pm_pteobj, ptepindex);
+ mpte = pmap_page_lookup( pmap->pm_pteobj, ptepindex);
pmap->pm_ptphint = mpte;
}
}
@@ -1047,16 +1100,17 @@
}
void
-pmap_pinit0(pmap_t pmap)
+pmap_pinit0(pmap)
+ struct pmap *pmap;
{
- pmap->pm_pdir = (pd_entry_t *)(KERNBASE + IdlePTD);
+ pmap->pm_pdir + (pd_entry_t *)kmem_alloc_pageable(kernel_map, PAGE_SIZE);
+ pmap_kenter((vm_offset_t) pmap->pm_pdir, (vm_offset_t) IdlePTD);
+ pmap->pm_count = 1;
pmap->pm_active = 0;
pmap->pm_ptphint = NULL;
TAILQ_INIT(&pmap->pm_pvlist);
bzero(&pmap->pm_stats, sizeof pmap->pm_stats);
-#ifdef PAE
- pmap->pm_pdpt = IdlePDPT;
-#endif
}
/*
@@ -1064,66 +1118,51 @@
* such as one in a vmspace structure.
*/
void
-pmap_pinit(pmap_t pmap)
+pmap_pinit(pmap)
+ register struct pmap *pmap;
{
- vm_page_t m, ma[NPGPTD];
- vm_paddr_t pa;
- int i;
+ vm_page_t ptdpg;
/*
* No need to allocate page table space yet but we do need a valid
* page directory table.
*/
- if (pmap->pm_pdir == NULL) {
- pmap->pm_pdir = (pd_entry_t *)kmem_alloc_pageable(kernel_map,
- NPGPTD * PAGE_SIZE);
-#ifdef PAE
- pmap->pm_pdpt = pmap_alloc_pdpt();
-#endif
- }
+ if (pmap->pm_pdir == NULL)
+ pmap->pm_pdir + (pd_entry_t *)kmem_alloc_pageable(kernel_map,
PAGE_SIZE);
/*
* allocate object for the ptes
*/
if (pmap->pm_pteobj == NULL)
- pmap->pm_pteobj = vm_object_allocate(OBJT_DEFAULT,
- PTDPTDI + NPGPTD);
+ pmap->pm_pteobj = vm_object_allocate( OBJT_DEFAULT, PTDPTDI + 1);
/*
* allocate the page directory page
*/
- for (i = 0; i < NPGPTD; i++) {
- m = vm_page_grab(pmap->pm_pteobj, PTDPTDI + i,
- VM_ALLOC_NORMAL | VM_ALLOC_RETRY);
- ma[i] = m;
+ ptdpg = vm_page_grab( pmap->pm_pteobj, PTDPTDI,
+ VM_ALLOC_NORMAL | VM_ALLOC_RETRY);
- m->wire_count = 1;
- ++cnt.v_wire_count;
+ ptdpg->wire_count = 1;
+ ++cnt.v_wire_count;
- vm_page_flag_clear(m, PG_MAPPED | PG_BUSY);
- m->valid = VM_PAGE_BITS_ALL;
- }
- pmap_qenter((vm_offset_t)pmap->pm_pdir, ma, NPGPTD);
+ vm_page_flag_clear(ptdpg, PG_MAPPED | PG_BUSY); /* not usually mapped*/
+ ptdpg->valid = VM_PAGE_BITS_ALL;
- for (i = 0; i < NPGPTD; i++) {
- if ((ma[i]->flags & PG_ZERO) == 0)
- bzero(pmap->pm_pdir + i * NPDEPG, PAGE_SIZE);
- }
+ pmap_kenter((vm_offset_t) pmap->pm_pdir, VM_PAGE_TO_PHYS(ptdpg));
+ if ((ptdpg->flags & PG_ZERO) == 0)
+ bzero(pmap->pm_pdir, PAGE_SIZE);
#ifdef SMP
pmap->pm_pdir[MPPTDI] = PTD[MPPTDI];
#endif
/* install self-referential address mapping entry */
- for (i = 0; i < NPGPTD; i++) {
- pa = VM_PAGE_TO_PHYS(ma[i]);
- pmap->pm_pdir[PTDPTDI + i] = pa | PG_V | PG_RW | PG_A | PG_M;
-#ifdef PAE
- pmap->pm_pdpt[i] = pa | PG_V;
-#endif
- }
+ *(unsigned *) (pmap->pm_pdir + PTDPTDI) + VM_PAGE_TO_PHYS(ptdpg) | PG_V |
PG_RW | PG_A | PG_M;
+ pmap->pm_count = 1;
pmap->pm_active = 0;
pmap->pm_ptphint = NULL;
TAILQ_INIT(&pmap->pm_pvlist);
@@ -1137,10 +1176,58 @@
* but before this pmap is activated.
*/
void
-pmap_pinit2(pmap_t pmap)
+pmap_pinit2(pmap)
+ struct pmap *pmap;
{
/* XXX copies current process, does not fill in MPPTDI */
- bcopy(PTD + KPTDI, pmap->pm_pdir + KPTDI, nkpt * PDESIZE);
+ bcopy(PTD + KPTDI, pmap->pm_pdir + KPTDI, nkpt * PTESIZE);
+}
+
+static int
+pmap_release_free_page(pmap, p)
+ struct pmap *pmap;
+ vm_page_t p;
+{
+ unsigned *pde = (unsigned *) pmap->pm_pdir;
+ /*
+ * This code optimizes the case of freeing non-busy
+ * page-table pages. Those pages are zero now, and
+ * might as well be placed directly into the zero queue.
+ */
+ if (vm_page_sleep_busy(p, FALSE, "pmaprl"))
+ return 0;
+
+ vm_page_busy(p);
+
+ /*
+ * Remove the page table page from the processes address space.
+ */
+ pde[p->pindex] = 0;
+ pmap->pm_stats.resident_count--;
+
+ if (p->hold_count) {
+ panic("pmap_release: freeing held page table page");
+ }
+ /*
+ * Page directory pages need to have the kernel
+ * stuff cleared, so they can go into the zero queue also.
+ */
+ if (p->pindex == PTDPTDI) {
+ bzero(pde + KPTDI, nkpt * PTESIZE);
+#ifdef SMP
+ pde[MPPTDI] = 0;
+#endif
+ pde[APTDPTDI] = 0;
+ pmap_kremove((vm_offset_t) pmap->pm_pdir);
+ }
+
+ if (pmap->pm_ptphint && (pmap->pm_ptphint->pindex ==
p->pindex))
+ pmap->pm_ptphint = NULL;
+
+ p->wire_count--;
+ cnt.v_wire_count--;
+ vm_page_free_zero(p);
+ return 1;
}
/*
@@ -1148,10 +1235,11 @@
* mapped correctly.
*/
static vm_page_t
-_pmap_allocpte(pmap_t pmap, unsigned ptepindex)
+_pmap_allocpte(pmap, ptepindex)
+ pmap_t pmap;
+ unsigned ptepindex;
{
- vm_offset_t pteva;
- vm_paddr_t ptepa;
+ vm_offset_t pteva, ptepa;
vm_page_t m;
/*
@@ -1181,7 +1269,8 @@
pmap->pm_stats.resident_count++;
ptepa = VM_PAGE_TO_PHYS(m);
- pmap->pm_pdir[ptepindex] = ptepa | PG_U | PG_RW | PG_V | PG_A | PG_M;
+ pmap->pm_pdir[ptepindex] + (pd_entry_t) (ptepa | PG_U | PG_RW | PG_V |
PG_A | PG_M);
/*
* Set the page table hint
@@ -1193,7 +1282,8 @@
* do it with the routine that maps the page explicitly.
*/
if ((m->flags & PG_ZERO) == 0) {
- if (pmap_is_current(pmap)) {
+ if ((((unsigned)pmap->pm_pdir[PTDPTDI]) & PG_FRAME) =+ (((unsigned)
PTDpde) & PG_FRAME)) {
pteva = UPT_MIN_ADDRESS + i386_ptob(ptepindex);
bzero((caddr_t) pteva, PAGE_SIZE);
} else {
@@ -1210,10 +1300,12 @@
}
static vm_page_t
-pmap_allocpte(pmap_t pmap, vm_offset_t va)
+pmap_allocpte(pmap, va)
+ pmap_t pmap;
+ vm_offset_t va;
{
unsigned ptepindex;
- pd_entry_t pde;
+ vm_offset_t ptepa;
vm_page_t m;
/*
@@ -1224,15 +1316,15 @@
/*
* Get the page directory entry
*/
- pde = pmap->pm_pdir[ptepindex];
+ ptepa = (vm_offset_t) pmap->pm_pdir[ptepindex];
/*
* This supports switching from a 4MB page to a
* normal 4K page.
*/
- if (pde & PG_PS) {
+ if (ptepa & PG_PS) {
pmap->pm_pdir[ptepindex] = 0;
- pde = 0;
+ ptepa = 0;
invltlb();
}
@@ -1240,15 +1332,16 @@
* If the page table page is mapped, we just increment the
* hold count, and activate it.
*/
- if (pde & PG_V) {
+ if (ptepa) {
/*
* In order to get the page table page, try the
* hint first.
*/
- if (pmap->pm_ptphint && pmap->pm_ptphint->pindex ==
ptepindex) {
+ if (pmap->pm_ptphint &&
+ (pmap->pm_ptphint->pindex == ptepindex)) {
m = pmap->pm_ptphint;
} else {
- m = pmap_page_lookup(pmap->pm_pteobj, ptepindex);
+ m = pmap_page_lookup( pmap->pm_pteobj, ptepindex);
pmap->pm_ptphint = m;
}
m->hold_count++;
@@ -1271,27 +1364,37 @@
* Should only be called if the map contains no valid mappings.
*/
void
-pmap_release(pmap_t pmap)
+pmap_release(pmap)
+ register struct pmap *pmap;
{
- vm_page_t m;
+ vm_page_t p,n,ptdpg;
vm_object_t object = pmap->pm_pteobj;
+ int curgeneration;
- bzero(pmap->pm_pdir + PTDPTDI, (nkpt + NPGPTD) * PDESIZE);
-#ifdef SMP
- pmap->pm_pdir[MPPTDI] = 0;
+#if defined(DIAGNOSTIC)
+ if (object->ref_count != 1)
+ panic("pmap_release: pteobj reference count != 1");
#endif
- pmap_qremove((vm_offset_t)pmap->pm_pdir, NPGPTD);
-
- while ((m = TAILQ_FIRST(&object->memq))) {
- if (m->pindex < PTDPTDI || m->pindex >= KPTDI)
- panic("pmap_release: non ptd page");
- m->wire_count--;
- cnt.v_wire_count--;
- vm_page_busy(m);
- vm_page_free_zero(m);
+
+ ptdpg = NULL;
+retry:
+ curgeneration = object->generation;
+ for (p = TAILQ_FIRST(&object->memq); p != NULL; p = n) {
+ n = TAILQ_NEXT(p, listq);
+ if (p->pindex == PTDPTDI) {
+ ptdpg = p;
+ continue;
+ }
+ while (1) {
+ if (!pmap_release_free_page(pmap, p) &&
+ (object->generation != curgeneration))
+ goto retry;
+ }
}
-}
+ if (ptdpg && !pmap_release_free_page(pmap, ptdpg))
+ goto retry;
+}
static int
kvm_size(SYSCTL_HANDLER_ARGS)
@@ -1322,7 +1425,7 @@
struct proc *p;
struct pmap *pmap;
int s;
- vm_paddr_t ptppaddr;
+ vm_offset_t ptppaddr;
vm_page_t nkpg;
pd_entry_t newpdir;
@@ -1330,17 +1433,15 @@
if (kernel_vm_end == 0) {
kernel_vm_end = KERNBASE;
nkpt = 0;
- while (pdir_pde(PTD, kernel_vm_end) & PG_V) {
- kernel_vm_end = (kernel_vm_end + PAGE_SIZE * NPTEPG) &
- ~(PAGE_SIZE * NPTEPG - 1);
+ while (pdir_pde(PTD, kernel_vm_end)) {
+ kernel_vm_end = (kernel_vm_end + PAGE_SIZE * NPTEPG) & ~(PAGE_SIZE *
NPTEPG - 1);
nkpt++;
}
}
addr = (addr + PAGE_SIZE * NPTEPG) & ~(PAGE_SIZE * NPTEPG - 1);
while (kernel_vm_end < addr) {
- if (pdir_pde(PTD, kernel_vm_end) & PG_V) {
- kernel_vm_end = (kernel_vm_end + PAGE_SIZE * NPTEPG) &
- ~(PAGE_SIZE * NPTEPG - 1);
+ if (pdir_pde(PTD, kernel_vm_end)) {
+ kernel_vm_end = (kernel_vm_end + PAGE_SIZE * NPTEPG) & ~(PAGE_SIZE *
NPTEPG - 1);
continue;
}
@@ -1356,7 +1457,7 @@
vm_page_wire(nkpg);
ptppaddr = VM_PAGE_TO_PHYS(nkpg);
pmap_zero_page(ptppaddr);
- newpdir = ptppaddr | PG_V | PG_RW | PG_A | PG_M;
+ newpdir = (pd_entry_t) (ptppaddr | PG_V | PG_RW | PG_A | PG_M);
pdir_pde(PTD, kernel_vm_end) = newpdir;
LIST_FOREACH(p, &allproc, p_list) {
@@ -1366,12 +1467,44 @@
}
}
*pmap_pde(kernel_pmap, kernel_vm_end) = newpdir;
- kernel_vm_end = (kernel_vm_end + PAGE_SIZE * NPTEPG) &
- ~(PAGE_SIZE * NPTEPG - 1);
+ kernel_vm_end = (kernel_vm_end + PAGE_SIZE * NPTEPG) & ~(PAGE_SIZE *
NPTEPG - 1);
}
splx(s);
}
+/*
+ * Retire the given physical map from service.
+ * Should only be called if the map contains
+ * no valid mappings.
+ */
+void
+pmap_destroy(pmap)
+ register pmap_t pmap;
+{
+ int count;
+
+ if (pmap == NULL)
+ return;
+
+ count = --pmap->pm_count;
+ if (count == 0) {
+ pmap_release(pmap);
+ panic("destroying a pmap is not yet implemented");
+ }
+}
+
+/*
+ * Add a reference to the specified pmap.
+ */
+void
+pmap_reference(pmap)
+ pmap_t pmap;
+{
+ if (pmap != NULL) {
+ pmap->pm_count++;
+ }
+}
+
/***************************************************
* page management routines.
***************************************************/
@@ -1380,7 +1513,8 @@
* free the pv_entry back to the free list
*/
static PMAP_INLINE void
-free_pv_entry(pv_entry_t pv)
+free_pv_entry(pv)
+ pv_entry_t pv;
{
pv_entry_count--;
zfreei(pvzone, pv);
@@ -1396,8 +1530,9 @@
get_pv_entry(void)
{
pv_entry_count++;
- if (pv_entry_high_water && (pv_entry_count > pv_entry_high_water)
&&
- (pmap_pagedaemon_waken == 0)) {
+ if (pv_entry_high_water &&
+ (pv_entry_count > pv_entry_high_water) &&
+ (pmap_pagedaemon_waken == 0)) {
pmap_pagedaemon_waken = 1;
wakeup (&vm_pages_needed);
}
@@ -1442,7 +1577,10 @@
*/
static int
-pmap_remove_entry(pmap_t pmap, vm_page_t m, vm_offset_t va)
+pmap_remove_entry(pmap, m, va)
+ struct pmap *pmap;
+ vm_page_t m;
+ vm_offset_t va;
{
pv_entry_t pv;
int rtval;
@@ -1463,6 +1601,7 @@
rtval = 0;
if (pv) {
+
rtval = pmap_unuse_pt(pmap, va, pv->pv_ptem);
TAILQ_REMOVE(&m->md.pv_list, pv, pv_list);
m->md.pv_list_count--;
@@ -1482,8 +1621,13 @@
* (pmap, va).
*/
static void
-pmap_insert_entry(pmap_t pmap, vm_offset_t va, vm_page_t mpte, vm_page_t m)
+pmap_insert_entry(pmap, va, mpte, m)
+ pmap_t pmap;
+ vm_offset_t va;
+ vm_page_t mpte;
+ vm_page_t m;
{
+
int s;
pv_entry_t pv;
@@ -1504,12 +1648,15 @@
* pmap_remove_pte: do the things to unmap a page in a process
*/
static int
-pmap_remove_pte(pmap_t pmap, pt_entry_t *ptq, vm_offset_t va)
+pmap_remove_pte(pmap, ptq, va)
+ struct pmap *pmap;
+ unsigned *ptq;
+ vm_offset_t va;
{
- pt_entry_t oldpte;
+ unsigned oldpte;
vm_page_t m;
- oldpte = pte_store(ptq, 0);
+ oldpte = loadandclear(ptq);
if (oldpte & PG_W)
pmap->pm_stats.wired_count -= 1;
/*
@@ -1523,7 +1670,7 @@
m = PHYS_TO_VM_PAGE(oldpte);
if (oldpte & PG_M) {
#if defined(PMAP_DIAGNOSTIC)
- if (pmap_nw_modified(oldpte)) {
+ if (pmap_nw_modified((pt_entry_t) oldpte)) {
printf(
"pmap_remove: modified page not writable: va: 0x%x, pte: 0x%x\n",
va, oldpte);
@@ -1546,18 +1693,25 @@
* Remove a single page from a process address space
*/
static void
-pmap_remove_page(pmap_t pmap, vm_offset_t va)
+pmap_remove_page(pmap, va)
+ struct pmap *pmap;
+ register vm_offset_t va;
{
- pt_entry_t *pte;
+ register unsigned *ptq;
/*
- * get a local va for mappings for this pmap.
+ * if there is no pte for this address, just skip it!!!
*/
- pte = pmap_pte(pmap, va);
- if (!pte)
+ if (*pmap_pde(pmap, va) == 0) {
return;
- if (*pte & PG_V) {
- (void) pmap_remove_pte(pmap, pte, va);
+ }
+
+ /*
+ * get a local va for mappings for this pmap.
+ */
+ ptq = get_ptbase(pmap) + i386_btop(va);
+ if (*ptq) {
+ (void) pmap_remove_pte(pmap, ptq, va);
pmap_TLB_invalidate(pmap, va);
}
return;
@@ -1570,11 +1724,15 @@
* rounded to the page size.
*/
void
-pmap_remove(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
+pmap_remove(pmap, sva, eva)
+ struct pmap *pmap;
+ register vm_offset_t sva;
+ register vm_offset_t eva;
{
- pt_entry_t *pte;
- pd_entry_t pde;
- vm_offset_t nva;
+ register unsigned *ptbase;
+ vm_offset_t pdnxt;
+ vm_offset_t ptpaddr;
+ vm_offset_t sindex, eindex;
int anyvalid;
if (pmap == NULL)
@@ -1588,8 +1746,8 @@
* common operation and easy to short circuit some
* code.
*/
- if (sva + PAGE_SIZE == eva &&
- (pmap->pm_pdir[sva >> PDRSHIFT] & PG_PS) == 0) {
+ if (((sva + PAGE_SIZE) == eva) &&
+ (((unsigned) pmap->pm_pdir[(sva >> PDRSHIFT)] & PG_PS) == 0)) {
pmap_remove_page(pmap, sva);
return;
}
@@ -1600,20 +1758,23 @@
* Get a local virtual address for the mappings that are being
* worked with.
*/
+ ptbase = get_ptbase(pmap);
+
+ sindex = i386_btop(sva);
+ eindex = i386_btop(eva);
- for (; sva < eva; sva = nva) {
+ for (; sindex < eindex; sindex = pdnxt) {
unsigned pdirindex;
/*
- * Calculate address for next page table.
+ * Calculate index for next page table.
*/
- nva = (sva + NBPDR) & ~PDRMASK;
-
+ pdnxt = ((sindex + NPTEPG) & ~(NPTEPG - 1));
if (pmap->pm_stats.resident_count == 0)
break;
- pdirindex = sva >> PDRSHIFT;
- if (((pde = pmap->pm_pdir[pdirindex]) & PG_PS) != 0) {
+ pdirindex = sindex / NPDEPG;
+ if (((ptpaddr = (unsigned) pmap->pm_pdir[pdirindex]) & PG_PS) != 0) {
pmap->pm_pdir[pdirindex] = 0;
pmap->pm_stats.resident_count -= NBPDR / PAGE_SIZE;
anyvalid++;
@@ -1624,7 +1785,7 @@
* Weed out invalid mappings. Note: we assume that the page
* directory table is always allocated, and in kernel virtual.
*/
- if ((pde & PG_V) == 0)
+ if (ptpaddr == 0)
continue;
/*
@@ -1632,16 +1793,20 @@
* by the current page table page, or to the end of the
* range being removed.
*/
- if (nva > eva)
- nva = eva;
+ if (pdnxt > eindex) {
+ pdnxt = eindex;
+ }
- pte = pmap_pte(pmap, sva);
- for (; sva < nva; sva += PAGE_SIZE, pte++) {
- if ((*pte & PG_V) == 0)
+ for ( ;sindex != pdnxt; sindex++) {
+ vm_offset_t va;
+ if (ptbase[sindex] == 0) {
continue;
+ }
+ va = i386_ptob(sindex);
anyvalid++;
- if (pmap_remove_pte(pmap, pte, sva))
+ if (pmap_remove_pte(pmap,
+ ptbase + sindex, va))
break;
}
}
@@ -1664,10 +1829,11 @@
*/
static void
-pmap_remove_all(vm_page_t m)
+pmap_remove_all(m)
+ vm_page_t m;
{
- pv_entry_t pv;
- pt_entry_t *pte, tpte;
+ register pv_entry_t pv;
+ register unsigned *pte, tpte;
int s;
#if defined(PMAP_DIAGNOSTIC)
@@ -1684,9 +1850,9 @@
while ((pv = TAILQ_FIRST(&m->md.pv_list)) != NULL) {
pv->pv_pmap->pm_stats.resident_count--;
- pte = pmap_pte(pv->pv_pmap, pv->pv_va);
+ pte = pmap_pte_quick(pv->pv_pmap, pv->pv_va);
- tpte = pte_store(pte, 0);
+ tpte = loadandclear(pte);
if (tpte & PG_W)
pv->pv_pmap->pm_stats.wired_count--;
@@ -1698,7 +1864,7 @@
*/
if (tpte & PG_M) {
#if defined(PMAP_DIAGNOSTIC)
- if (pmap_nw_modified(tpte)) {
+ if (pmap_nw_modified((pt_entry_t) tpte)) {
printf(
"pmap_remove_all: modified page not writable: va: 0x%x, pte:
0x%x\n",
pv->pv_va, tpte);
@@ -1728,9 +1894,9 @@
void
pmap_protect(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot)
{
- pt_entry_t *pte;
- pd_entry_t pde;
- vm_offset_t nva;
+ register unsigned *ptbase;
+ vm_offset_t pdnxt, ptpaddr;
+ vm_pindex_t sindex, eindex;
int anychanged;
if (pmap == NULL)
@@ -1746,15 +1912,20 @@
anychanged = 0;
- for (; sva < eva; sva = nva) {
+ ptbase = get_ptbase(pmap);
+
+ sindex = i386_btop(sva);
+ eindex = i386_btop(eva);
+
+ for (; sindex < eindex; sindex = pdnxt) {
unsigned pdirindex;
- nva = (sva + NBPDR) & ~PDRMASK;
+ pdnxt = ((sindex + NPTEPG) & ~(NPTEPG - 1));
- pdirindex = sva >> PDRSHIFT;
- if (((pde = pmap->pm_pdir[pdirindex]) & PG_PS) != 0) {
- pmap->pm_pdir[pdirindex] &= ~(PG_M|PG_RW);
+ pdirindex = sindex / NPDEPG;
+ if (((ptpaddr = (unsigned) pmap->pm_pdir[pdirindex]) & PG_PS) != 0) {
+ (unsigned) pmap->pm_pdir[pdirindex] &= ~(PG_M|PG_RW);
pmap->pm_stats.resident_count -= NBPDR / PAGE_SIZE;
anychanged++;
continue;
@@ -1764,18 +1935,19 @@
* Weed out invalid mappings. Note: we assume that the page
* directory table is always allocated, and in kernel virtual.
*/
- if ((pde & PG_V) == 0)
+ if (ptpaddr == 0)
continue;
- if (nva > eva)
- nva = eva;
+ if (pdnxt > eindex) {
+ pdnxt = eindex;
+ }
+
+ for (; sindex != pdnxt; sindex++) {
- pte = pmap_pte(pmap, sva);
- for (; sva < nva; sva += PAGE_SIZE, pte++) {
- pt_entry_t pbits;
+ unsigned pbits;
vm_page_t m;
- pbits = *pte;
+ pbits = ptbase[sindex];
if (pbits & PG_MANAGED) {
m = NULL;
@@ -1785,7 +1957,7 @@
pbits &= ~PG_A;
}
if (pbits & PG_M) {
- if (pmap_track_modified(sva)) {
+ if (pmap_track_modified(i386_ptob(sindex))) {
if (m == NULL)
m = PHYS_TO_VM_PAGE(pbits);
vm_page_dirty(m);
@@ -1796,8 +1968,8 @@
pbits &= ~PG_RW;
- if (pbits != *pte) {
- *pte = pbits;
+ if (pbits != ptbase[sindex]) {
+ ptbase[sindex] = pbits;
anychanged = 1;
}
}
@@ -1822,8 +1994,10 @@
pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
boolean_t wired)
{
- vm_paddr_t pa, opa;
- pt_entry_t *pte, origpte, newpte;
+ vm_offset_t pa;
+ register unsigned *pte;
+ vm_offset_t opa;
+ vm_offset_t origpte, newpte;
vm_page_t mpte;
if (pmap == NULL)
@@ -1847,15 +2021,15 @@
}
#if 0 && defined(PMAP_DIAGNOSTIC)
else {
- pd_entry_t *pdeaddr = pmap_pde(pmap, va);
- if (((origpte = *pdeaddr) & PG_V) == 0) {
+ vm_offset_t *pdeaddr = (vm_offset_t *)pmap_pde(pmap, va);
+ if (((origpte = (vm_offset_t) *pdeaddr) & PG_V) == 0) {
panic("pmap_enter: invalid kernel page table page(0), pdir=%p, pde=%p,
va=%p\n",
pmap->pm_pdir[PTDPTDI], origpte, va);
}
if (smp_active) {
- pdeaddr = IdlePTDS[cpuid];
+ pdeaddr = (vm_offset_t *) IdlePTDS[cpuid];
if (((newpte = pdeaddr[va >> PDRSHIFT]) & PG_V) == 0) {
- if (my_idlePTD != vtophys(pdeaddr))
+ if ((vm_offset_t) my_idlePTD != (vm_offset_t) vtophys(pdeaddr))
printf("pde mismatch: %x, %x\n", my_idlePTD, pdeaddr);
printf("cpuid: %d, pdeaddr: 0x%x\n", cpuid, pdeaddr);
panic("pmap_enter: invalid kernel page table page(1), pdir=%p,
npde=%p, pde=%p, va=%p\n",
@@ -1871,12 +2045,12 @@
* Page Directory table entry not valid, we need a new PT page
*/
if (pte == NULL) {
- panic("pmap_enter: invalid page directory pdir=%#llx, va=%#x\n",
- (u_int64_t)pmap->pm_pdir[PTDPTDI], va);
+ panic("pmap_enter: invalid page directory, pdir=%p, va=0x%x\n",
+ (void *)pmap->pm_pdir[PTDPTDI], va);
}
pa = VM_PAGE_TO_PHYS(m) & PG_FRAME;
- origpte = *pte;
+ origpte = *(vm_offset_t *)pte;
opa = origpte & PG_FRAME;
if (origpte & PG_PS)
@@ -1885,7 +2059,7 @@
/*
* Mapping has not changed, must be protection or wiring change.
*/
- if ((origpte & PG_V) && (opa == pa)) {
+ if (origpte && (opa == pa)) {
/*
* Wiring change, just update stats. We don't worry about
* wiring PT pages as they remain resident as long as there
@@ -1911,10 +2085,16 @@
if (mpte)
mpte->hold_count--;
- if ((prot & VM_PROT_WRITE)) {
+ if ((prot & VM_PROT_WRITE) && (origpte & PG_V)) {
if ((origpte & PG_RW) == 0) {
*pte |= PG_RW;
- pmap_TLB_invalidate(pmap, va);
+#ifdef SMP
+ cpu_invlpg((void *)va);
+ if (pmap->pm_active & other_cpus)
+ smp_invltlb();
+#else
+ invltlb_1pg(va);
+#endif
}
return;
}
@@ -1932,12 +2112,12 @@
pa |= PG_MANAGED;
}
goto validate;
- }
+ }
/*
* Mapping has changed, invalidate old range and fall through to
* handle validating new mapping.
*/
- if ((origpte & PG_V)) {
+ if (opa) {
int err;
err = pmap_remove_pte(pmap, pte, va);
if (err)
@@ -1949,7 +2129,8 @@
* raise IPL while manipulating pv_table since pmap_enter can be
* called at interrupt time.
*/
- if (pmap_initialized && !(m->flags &
(PG_FICTITIOUS|PG_UNMANAGED))) {
+ if (pmap_initialized &&
+ (m->flags & (PG_FICTITIOUS|PG_UNMANAGED)) == 0) {
pmap_insert_entry(pmap, va, mpte, m);
pa |= PG_MANAGED;
}
@@ -1965,7 +2146,7 @@
/*
* Now validate mapping with desired protection/wiring.
*/
- newpte = pa | pte_prot(pmap, prot) | PG_V;
+ newpte = (vm_offset_t) (pa | pte_prot(pmap, prot) | PG_V);
if (wired)
newpte |= PG_W;
@@ -1980,7 +2161,15 @@
*/
if ((origpte & ~(PG_M|PG_A)) != newpte) {
*pte = newpte | PG_A;
- pmap_TLB_invalidate(pmap, va);
+ /*if (origpte)*/ {
+#ifdef SMP
+ cpu_invlpg((void *)va);
+ if (pmap->pm_active & other_cpus)
+ smp_invltlb();
+#else
+ invltlb_1pg(va);
+#endif
+ }
}
}
@@ -1996,10 +2185,14 @@
*/
static vm_page_t
-pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_page_t mpte)
+pmap_enter_quick(pmap, va, m, mpte)
+ register pmap_t pmap;
+ vm_offset_t va;
+ vm_page_t m;
+ vm_page_t mpte;
{
- pt_entry_t *pte;
- vm_paddr_t pa;
+ unsigned *pte;
+ vm_offset_t pa;
/*
* In the case that a page table page is not
@@ -2007,7 +2200,7 @@
*/
if (va < UPT_MIN_ADDRESS) {
unsigned ptepindex;
- pd_entry_t pde;
+ vm_offset_t ptepa;
/*
* Calculate pagetable page index
@@ -2020,20 +2213,20 @@
/*
* Get the page directory entry
*/
- pde = pmap->pm_pdir[ptepindex];
+ ptepa = (vm_offset_t) pmap->pm_pdir[ptepindex];
/*
* If the page table page is mapped, we just increment
* the hold count, and activate it.
*/
- if ((pde & PG_V)) {
- if (pde & PG_PS)
+ if (ptepa) {
+ if (ptepa & PG_PS)
panic("pmap_enter_quick: unexpected mapping into 4MB page");
if (pmap->pm_ptphint &&
- (pmap->pm_ptphint->pindex == ptepindex)) {
+ (pmap->pm_ptphint->pindex == ptepindex)) {
mpte = pmap->pm_ptphint;
} else {
- mpte = pmap_page_lookup(pmap->pm_pteobj, ptepindex);
+ mpte = pmap_page_lookup( pmap->pm_pteobj, ptepindex);
pmap->pm_ptphint = mpte;
}
if (mpte == NULL)
@@ -2050,10 +2243,10 @@
/*
* This call to vtopte makes the assumption that we are
* entering the page into the current pmap. In order to support
- * quick entry into any pmap, one would likely use pmap_pte.
+ * quick entry into any pmap, one would likely use pmap_pte_quick.
* But that isn't as quick as vtopte.
*/
- pte = vtopte(va);
+ pte = (unsigned *)vtopte(va);
if (*pte) {
if (mpte)
pmap_unwire_pte_hold(pmap, mpte);
@@ -2091,7 +2284,7 @@
* to be used for panic dumps.
*/
void *
-pmap_kenter_temporary(vm_paddr_t pa, int i)
+pmap_kenter_temporary(vm_offset_t pa, int i)
{
pmap_kenter((vm_offset_t)crashdumpmap + (i * PAGE_SIZE), pa);
return ((void *)crashdumpmap);
@@ -2126,16 +2319,16 @@
* are taken, but the code works.
*/
if (pseflag &&
- (object->type == OBJT_DEVICE) &&
- ((addr & (NBPDR - 1)) == 0) &&
- ((size & (NBPDR - 1)) == 0) ) {
+ (object->type == OBJT_DEVICE) &&
+ ((addr & (NBPDR - 1)) == 0) &&
+ ((size & (NBPDR - 1)) == 0) ) {
int i;
vm_page_t m[1];
unsigned int ptepindex;
int npdes;
- pd_entry_t ptepa;
+ vm_offset_t ptepa;
- if (pmap->pm_pdir[ptepindex = (addr >> PDRSHIFT)] & PG_V)
+ if (pmap->pm_pdir[ptepindex = (addr >> PDRSHIFT)])
return;
retry:
@@ -2158,7 +2351,7 @@
vm_page_wakeup(p);
}
- ptepa = VM_PAGE_TO_PHYS(p);
+ ptepa = (vm_offset_t) VM_PAGE_TO_PHYS(p);
if (ptepa & (NBPDR - 1)) {
return;
}
@@ -2167,9 +2360,9 @@
pmap->pm_stats.resident_count += size >> PAGE_SHIFT;
npdes = size >> PDRSHIFT;
- for(i = 0; i < npdes; i++) {
+ for(i=0;i<npdes;i++) {
pmap->pm_pdir[ptepindex] - ptepa | PG_U | PG_RW | PG_V | PG_PS;
+ (pd_entry_t) (ptepa | PG_U | PG_RW | PG_V | PG_PS);
ptepa += NBPDR;
ptepindex += 1;
}
@@ -2220,7 +2413,7 @@
cnt.v_free_count < cnt.v_free_reserved) {
break;
}
- if ((p->valid & VM_PAGE_BITS_ALL) == VM_PAGE_BITS_ALL &&
+ if (((p->valid & VM_PAGE_BITS_ALL) == VM_PAGE_BITS_ALL) &&
(p->busy == 0) &&
(p->flags & (PG_BUSY | PG_FICTITIOUS)) == 0) {
if ((p->queue - p->pc) == PQ_CACHE)
@@ -2248,7 +2441,7 @@
}
p = vm_page_lookup(object, tmpidx + pindex);
if (p &&
- (p->valid & VM_PAGE_BITS_ALL) == VM_PAGE_BITS_ALL &&
+ ((p->valid & VM_PAGE_BITS_ALL) == VM_PAGE_BITS_ALL) &&
(p->busy == 0) &&
(p->flags & (PG_BUSY | PG_FICTITIOUS)) == 0) {
if ((p->queue - p->pc) == PQ_CACHE)
@@ -2261,6 +2454,7 @@
}
}
}
+ return;
}
/*
@@ -2308,7 +2502,7 @@
mpte = NULL;
for (i = 0; i < PAGEORDER_SIZE; i++) {
vm_object_t lobject;
- pt_entry_t *pte;
+ unsigned *pte;
addr = addra + pmap_prefault_pageorder[i];
if (addr > addra + (PFFOR * PAGE_SIZE))
@@ -2317,11 +2511,11 @@
if (addr < starta || addr >= entry->end)
continue;
- if ((*pmap_pde(pmap, addr) & PG_V) == 0)
+ if ((*pmap_pde(pmap, addr)) == NULL)
continue;
- pte = vtopte(addr);
- if ((*pte & PG_V))
+ pte = (unsigned *) vtopte(addr);
+ if (*pte)
continue;
pindex = ((addr - entry->start) + entry->offset) >> PAGE_SHIFT;
@@ -2341,7 +2535,7 @@
if (m == NULL)
break;
- if ((m->valid & VM_PAGE_BITS_ALL) == VM_PAGE_BITS_ALL &&
+ if (((m->valid & VM_PAGE_BITS_ALL) == VM_PAGE_BITS_ALL) &&
(m->busy == 0) &&
(m->flags & (PG_BUSY | PG_FICTITIOUS)) == 0) {
@@ -2364,9 +2558,12 @@
* The mapping must already exist in the pmap.
*/
void
-pmap_change_wiring(pmap_t pmap, vm_offset_t va, boolean_t wired)
+pmap_change_wiring(pmap, va, wired)
+ register pmap_t pmap;
+ vm_offset_t va;
+ boolean_t wired;
{
- pt_entry_t *pte;
+ register unsigned *pte;
if (pmap == NULL)
return;
@@ -2405,21 +2602,32 @@
vm_offset_t addr;
vm_offset_t end_addr = src_addr + len;
vm_offset_t pdnxt;
- vm_paddr_t src_frame;
+ unsigned src_frame, dst_frame;
vm_page_t m;
if (dst_addr != src_addr)
return;
- src_frame = src_pmap->pm_pdir[PTDPTDI] & PG_FRAME;
- if (src_frame != (PTDpde[0] & PG_FRAME)) {
+ src_frame = ((unsigned) src_pmap->pm_pdir[PTDPTDI]) & PG_FRAME;
+ if (src_frame != (((unsigned) PTDpde) & PG_FRAME)) {
return;
}
+ dst_frame = ((unsigned) dst_pmap->pm_pdir[PTDPTDI]) & PG_FRAME;
+ if (dst_frame != (((unsigned) APTDpde) & PG_FRAME)) {
+ APTDpde = (pd_entry_t) (dst_frame | PG_RW | PG_V);
+#if defined(SMP)
+ /* The page directory is not shared between CPUs */
+ cpu_invltlb();
+#else
+ invltlb();
+#endif
+ }
+
for(addr = src_addr; addr < end_addr; addr = pdnxt) {
- pt_entry_t *src_pte, *dst_pte;
+ unsigned *src_pte, *dst_pte;
vm_page_t dstmpte, srcmpte;
- pd_entry_t srcptepaddr;
+ vm_offset_t srcptepaddr;
unsigned ptepindex;
if (addr >= UPT_MIN_ADDRESS)
@@ -2434,32 +2642,33 @@
pv_entry_count > pv_entry_high_water)
break;
- pdnxt = (addr + NBPDR) & ~(NBPDR - 1);
+ pdnxt = ((addr + PAGE_SIZE*NPTEPG) & ~(PAGE_SIZE*NPTEPG - 1));
ptepindex = addr >> PDRSHIFT;
- srcptepaddr = src_pmap->pm_pdir[ptepindex];
+ srcptepaddr = (vm_offset_t) src_pmap->pm_pdir[ptepindex];
if (srcptepaddr == 0)
continue;
if (srcptepaddr & PG_PS) {
if (dst_pmap->pm_pdir[ptepindex] == 0) {
- dst_pmap->pm_pdir[ptepindex] = srcptepaddr;
- dst_pmap->pm_stats.resident_count += NPDEPG;
+ dst_pmap->pm_pdir[ptepindex] = (pd_entry_t) srcptepaddr;
+ dst_pmap->pm_stats.resident_count += NBPDR / PAGE_SIZE;
}
continue;
}
srcmpte = vm_page_lookup(src_pmap->pm_pteobj, ptepindex);
if ((srcmpte == NULL) ||
- (srcmpte->hold_count == 0) || (srcmpte->flags & PG_BUSY))
+ (srcmpte->hold_count == 0) || (srcmpte->flags & PG_BUSY))
continue;
if (pdnxt > end_addr)
pdnxt = end_addr;
- src_pte = vtopte(addr);
+ src_pte = (unsigned *) vtopte(addr);
+ dst_pte = (unsigned *) avtopte(addr);
while (addr < pdnxt) {
- pt_entry_t ptetemp;
+ unsigned ptetemp;
ptetemp = *src_pte;
/*
* we only virtual copy managed pages
@@ -2471,7 +2680,6 @@
* block.
*/
dstmpte = pmap_allocpte(dst_pmap, addr);
- dst_pte = pmap_pte(dst_pmap, addr);
if ((*dst_pte == 0) && (ptetemp = *src_pte)) {
/*
* Clear the modified and
@@ -2491,9 +2699,10 @@
}
addr += PAGE_SIZE;
src_pte++;
+ dst_pte++;
}
}
-}
+}
/*
* Routine: pmap_kernel
@@ -2511,25 +2720,39 @@
* the page into KVM and using bzero to clear its contents.
*/
void
-pmap_zero_page(vm_paddr_t phys)
+pmap_zero_page(phys)
+ vm_offset_t phys;
{
- if (*CMAP3)
- panic("pmap_zero_page: CMAP3 busy");
-
- *CMAP3 = PG_V | PG_RW | (phys & PG_FRAME) | PG_A | PG_M;
#ifdef SMP
- cpu_invlpg(CADDR3);
-#else
- invltlb_1pg((vm_offset_t)CADDR3);
+ if (*(int *) prv_CMAP3)
+ panic("pmap_zero_page: prv_CMAP3 busy");
+
+ *(int *) prv_CMAP3 = PG_V | PG_RW | (phys & PG_FRAME) | PG_A | PG_M;
+ cpu_invlpg(prv_CADDR3);
+
+#if defined(I686_CPU)
+ if (cpu_class == CPUCLASS_686)
+ i686_pagezero(prv_CADDR3);
+ else
#endif
+ bzero(prv_CADDR3, PAGE_SIZE);
+
+ *(int *) prv_CMAP3 = 0;
+#else
+ if (*(int *) CMAP2)
+ panic("pmap_zero_page: CMAP2 busy");
+
+ *(int *) CMAP2 = PG_V | PG_RW | (phys & PG_FRAME) | PG_A | PG_M;
+ invltlb_1pg((vm_offset_t)CADDR2);
#if defined(I686_CPU)
if (cpu_class == CPUCLASS_686)
- i686_pagezero(CADDR3);
+ i686_pagezero(CADDR2);
else
#endif
- bzero(CADDR3, PAGE_SIZE);
- *CMAP3 = 0;
+ bzero(CADDR2, PAGE_SIZE);
+ *(int *) CMAP2 = 0;
+#endif
}
/*
@@ -2539,25 +2762,41 @@
* off and size may not cover an area beyond a single hardware page.
*/
void
-pmap_zero_page_area(vm_paddr_t phys, int off, int size)
+pmap_zero_page_area(phys, off, size)
+ vm_offset_t phys;
+ int off;
+ int size;
{
- if (*CMAP3)
- panic("pmap_zero_page: CMAP3 busy");
-
- *CMAP3 = PG_V | PG_RW | (phys & PG_FRAME) | PG_A | PG_M;
#ifdef SMP
- cpu_invlpg(CADDR3);
-#else
- invltlb_1pg((vm_offset_t)CADDR3);
+ if (*(int *) prv_CMAP3)
+ panic("pmap_zero_page: prv_CMAP3 busy");
+
+ *(int *) prv_CMAP3 = PG_V | PG_RW | (phys & PG_FRAME) | PG_A | PG_M;
+ cpu_invlpg(prv_CADDR3);
+
+#if defined(I686_CPU)
+ if (cpu_class == CPUCLASS_686 && off == 0 && size ==
PAGE_SIZE)
+ i686_pagezero(prv_CADDR3);
+ else
#endif
+ bzero((char *)prv_CADDR3 + off, size);
+
+ *(int *) prv_CMAP3 = 0;
+#else
+ if (*(int *) CMAP2)
+ panic("pmap_zero_page: CMAP2 busy");
+
+ *(int *) CMAP2 = PG_V | PG_RW | (phys & PG_FRAME) | PG_A | PG_M;
+ invltlb_1pg((vm_offset_t)CADDR2);
#if defined(I686_CPU)
if (cpu_class == CPUCLASS_686 && off == 0 && size ==
PAGE_SIZE)
- i686_pagezero(CADDR3);
+ i686_pagezero(CADDR2);
else
#endif
- bzero(CADDR3 + off, size);
- *CMAP3 = 0;
+ bzero((char *)CADDR2 + off, size);
+ *(int *) CMAP2 = 0;
+#endif
}
/*
@@ -2567,28 +2806,47 @@
* time.
*/
void
-pmap_copy_page(vm_paddr_t src, vm_paddr_t dst)
+pmap_copy_page(src, dst)
+ vm_offset_t src;
+ vm_offset_t dst;
{
- if (*CMAP1)
- panic("pmap_copy_page: CMAP1 busy");
- if (*CMAP2)
- panic("pmap_copy_page: CMAP2 busy");
+#ifdef SMP
+ if (*(int *) prv_CMAP1)
+ panic("pmap_copy_page: prv_CMAP1 busy");
+ if (*(int *) prv_CMAP2)
+ panic("pmap_copy_page: prv_CMAP2 busy");
- *CMAP1 = PG_V | (src & PG_FRAME) | PG_A;
- *CMAP2 = PG_V | PG_RW | (dst & PG_FRAME) | PG_A | PG_M;
+ *(int *) prv_CMAP1 = PG_V | (src & PG_FRAME) | PG_A;
+ *(int *) prv_CMAP2 = PG_V | PG_RW | (dst & PG_FRAME) | PG_A | PG_M;
-#ifdef SMP
- cpu_invlpg(CADDR1);
- cpu_invlpg(CADDR2);
+ cpu_invlpg(prv_CADDR1);
+ cpu_invlpg(prv_CADDR2);
+
+ bcopy(prv_CADDR1, prv_CADDR2, PAGE_SIZE);
+
+ *(int *) prv_CMAP1 = 0;
+ *(int *) prv_CMAP2 = 0;
#else
- invltlb_1pg((vm_offset_t)CADDR1);
- invltlb_1pg((vm_offset_t)CADDR2);
+ if (*(int *) CMAP1 || *(int *) CMAP2)
+ panic("pmap_copy_page: CMAP busy");
+
+ *(int *) CMAP1 = PG_V | (src & PG_FRAME) | PG_A;
+ *(int *) CMAP2 = PG_V | PG_RW | (dst & PG_FRAME) | PG_A | PG_M;
+#if defined(I386_CPU)
+ if (cpu_class == CPUCLASS_386) {
+ invltlb();
+ } else
#endif
+ {
+ invlpg((u_int)CADDR1);
+ invlpg((u_int)CADDR2);
+ }
bcopy(CADDR1, CADDR2, PAGE_SIZE);
- *CMAP1 = 0;
- *CMAP2 = 0;
+ *(int *) CMAP1 = 0;
+ *(int *) CMAP2 = 0;
+#endif
}
@@ -2622,7 +2880,9 @@
* subset of pmaps for proper page aging.
*/
boolean_t
-pmap_page_exists_quick(pmap_t pmap, vm_page_t m)
+pmap_page_exists_quick(pmap, m)
+ pmap_t pmap;
+ vm_page_t m;
{
pv_entry_t pv;
int loops = 0;
@@ -2656,9 +2916,11 @@
* in the case of running down an entire address space.
*/
void
-pmap_remove_pages(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
+pmap_remove_pages(pmap, sva, eva)
+ pmap_t pmap;
+ vm_offset_t sva, eva;
{
- pt_entry_t *pte, tpte;
+ unsigned *pte, tpte;
pv_entry_t pv, npv;
int s;
vm_page_t m;
@@ -2671,7 +2933,9 @@
#endif
s = splvm();
- for(pv = TAILQ_FIRST(&pmap->pm_pvlist); pv; pv = npv) {
+ for(pv = TAILQ_FIRST(&pmap->pm_pvlist);
+ pv;
+ pv = npv) {
if (pv->pv_va >= eva || pv->pv_va < sva) {
npv = TAILQ_NEXT(pv, pv_plist);
@@ -2679,9 +2943,9 @@
}
#ifdef PMAP_REMOVE_PAGES_CURPROC_ONLY
- pte = vtopte(pv->pv_va);
+ pte = (unsigned *)vtopte(pv->pv_va);
#else
- pte = pmap_pte(pv->pv_pmap, pv->pv_va);
+ pte = pmap_pte_quick(pv->pv_pmap, pv->pv_va);
#endif
tpte = *pte;
@@ -2731,10 +2995,12 @@
* and a lot of things compile-time evaluate.
*/
static boolean_t
-pmap_testbit(vm_page_t m, int bit)
+pmap_testbit(m, bit)
+ vm_page_t m;
+ int bit;
{
pv_entry_t pv;
- pt_entry_t *pte;
+ unsigned *pte;
int s;
if (!pmap_initialized || (m->flags & PG_FICTITIOUS))
@@ -2762,7 +3028,7 @@
continue;
}
#endif
- pte = pmap_pte(pv->pv_pmap, pv->pv_va);
+ pte = pmap_pte_quick(pv->pv_pmap, pv->pv_va);
if (*pte & bit) {
splx(s);
return TRUE;
@@ -2776,10 +3042,13 @@
* this routine is used to modify bits in ptes
*/
static __inline void
-pmap_changebit(vm_page_t m, int bit, boolean_t setem)
+pmap_changebit(m, bit, setem)
+ vm_page_t m;
+ int bit;
+ boolean_t setem;
{
- pv_entry_t pv;
- pt_entry_t *pte;
+ register pv_entry_t pv;
+ register unsigned *pte;
int s;
if (!pmap_initialized || (m->flags & PG_FICTITIOUS))
@@ -2807,21 +3076,21 @@
}
#endif
- pte = pmap_pte(pv->pv_pmap, pv->pv_va);
+ pte = pmap_pte_quick(pv->pv_pmap, pv->pv_va);
if (setem) {
- *pte |= bit;
+ *(int *)pte |= bit;
pmap_TLB_invalidate(pv->pv_pmap, pv->pv_va);
} else {
- pt_entry_t pbits = *pte;
+ vm_offset_t pbits = *(vm_offset_t *)pte;
if (pbits & bit) {
if (bit == PG_RW) {
if (pbits & PG_M) {
vm_page_dirty(m);
}
- *pte = pbits & ~(PG_M|PG_RW);
+ *(int *)pte = pbits & ~(PG_M|PG_RW);
} else {
- *pte = pbits & ~bit;
+ *(int *)pte = pbits & ~bit;
}
pmap_TLB_invalidate(pv->pv_pmap, pv->pv_va);
}
@@ -2847,11 +3116,11 @@
}
}
-vm_paddr_t
+vm_offset_t
pmap_phys_address(ppn)
int ppn;
{
- return (i386_ptob((vm_paddr_t)ppn));
+ return (i386_ptob(ppn));
}
/*
@@ -2869,8 +3138,8 @@
int
pmap_ts_referenced(vm_page_t m)
{
- pv_entry_t pv, pvf, pvn;
- pt_entry_t *pte;
+ register pv_entry_t pv, pvf, pvn;
+ unsigned *pte;
int s;
int rtval = 0;
@@ -2893,7 +3162,7 @@
if (!pmap_track_modified(pv->pv_va))
continue;
- pte = pmap_pte(pv->pv_pmap, pv->pv_va);
+ pte = pmap_pte_quick(pv->pv_pmap, pv->pv_va);
if (pte && (*pte & PG_A)) {
*pte &= ~PG_A;
@@ -2983,10 +3252,12 @@
* NOT real memory.
*/
void *
-pmap_mapdev(vm_paddr_t pa, vm_size_t size)
+pmap_mapdev(pa, size)
+ vm_offset_t pa;
+ vm_size_t size;
{
vm_offset_t va, tmpva, offset;
- pt_entry_t *pte;
+ unsigned *pte;
offset = pa & PAGE_MASK;
size = roundup(offset + size, PAGE_SIZE);
@@ -2997,7 +3268,7 @@
pa = pa & PG_FRAME;
for (tmpva = va; size > 0;) {
- pte = vtopte(tmpva);
+ pte = (unsigned *)vtopte(tmpva);
*pte = pa | PG_RW | PG_V | pgeflag;
size -= PAGE_SIZE;
tmpva += PAGE_SIZE;
@@ -3009,7 +3280,9 @@
}
void
-pmap_unmapdev(vm_offset_t va, vm_size_t size)
+pmap_unmapdev(va, size)
+ vm_offset_t va;
+ vm_size_t size;
{
vm_offset_t base, offset;
@@ -3023,9 +3296,12 @@
* perform the pmap work for mincore
*/
int
-pmap_mincore(pmap_t pmap, vm_offset_t addr)
+pmap_mincore(pmap, addr)
+ pmap_t pmap;
+ vm_offset_t addr;
{
- pt_entry_t *ptep, pte;
+
+ unsigned *ptep, pte;
vm_page_t m;
int val = 0;
@@ -3035,7 +3311,7 @@
}
if ((pte = *ptep) != 0) {
- vm_paddr_t pa;
+ vm_offset_t pa;
val = MINCORE_INCORE;
if ((pte & PG_MANAGED) == 0)
@@ -3086,11 +3362,7 @@
#if defined(SWTCH_OPTIM_STATS)
tlb_flush_count++;
#endif
-#ifdef PAE
- load_cr3(p->p_addr->u_pcb.pcb_cr3 = vtophys(pmap->pm_pdpt));
-#else
load_cr3(p->p_addr->u_pcb.pcb_cr3 = vtophys(pmap->pm_pdir));
-#endif
}
vm_offset_t
@@ -3105,73 +3377,6 @@
return addr;
}
-#ifdef PAE
-/*
- * Allocate PDPT in lower 32-bit pages
- */
-struct pdpt_page {
- SLIST_ENTRY(pdpt_page) link;
- u_int32_t avail;
- vm_paddr_t phys;
- u_int32_t bits[4];
-};
-
-SLIST_HEAD(,pdpt_page) pdpt_pages = SLIST_HEAD_INITIALIZER(&pdpt_pages);
-int pdpt_avail = 0;
-
-static pdpt_entry_t *
-pmap_alloc_pdpt()
-{
- struct pdpt_page *pp;
- pdpt_entry_t *pdpt = 0;
- int i;
-
- if (pdpt_avail == 0) {
- pp = (struct pdpt_page *)contigmalloc(PAGE_SIZE, M_DEVBUF,
- M_WAITOK, 0ull, 0xffffffffull, PAGE_SIZE, 0);
- if (!pp)
- panic("pmap_alloc_pdpt: alloc failed");
- pp->phys = vtophys(pp);
- pp->avail = PAGE_SIZE / 32 - 1;
- pp->bits[0] = 1;
- pp->bits[1] = pp->bits[2] = pp->bits[3] = 0;
- SLIST_INSERT_HEAD(&pdpt_pages, pp, link);
- pdpt_avail += pp->avail;
- } else {
- SLIST_FOREACH(pp, &pdpt_pages, link) {
- if (pp->avail > 0)
- break;
- }
- }
-
- for (i = 0; i < 4; i++) {
- int j = ffs(~pp->bits[i]);
- if (j == 0)
- continue;
- pp->bits[i] |= 1 << (j - 1);
- pp->avail--;
- pdpt_avail--;
- pdpt = (pdpt_entry_t *)pp + (32 * i + j - 1) * NPGPTD;
- }
-
- return pdpt;
-}
-
-#if 0
-static void
-pmap_free_pdpt(pdpt_entry_t *pdpt)
-{
- struct pdpt_page *pp;
- int i;
-
- pp = (struct pdpt_page *)((vm_offset_t)pdpt & ~PAGE_MASK);
- i = (pdpt - (pdpt_entry_t *)pp) / NPGPTD;
- pp->bits[i / 32] &= ~(1 << (i % 32));
- pp->avail++;
- pdpt_avail++;
-}
-#endif
-#endif
#if defined(PMAP_DEBUG)
pmap_pid_dump(int pid)
@@ -3204,7 +3409,7 @@
}
return npte;
}
- pte = pmap_pte( pmap, va);
+ pte = pmap_pte_quick( pmap, va);
if (pte && pmap_pte_v(pte)) {
vm_offset_t pa;
vm_page_t m;
@@ -3253,7 +3458,7 @@
continue;
if (pm != kernel_pmap && va > UPT_MAX_ADDRESS)
continue;
- ptep = pmap_pte(pm, va);
+ ptep = pmap_pte_quick(pm, va);
if (pmap_pte_v(ptep))
printf("%x:%x ", va, *(int *) ptep);
};
@@ -3262,7 +3467,7 @@
void
pmap_pvdump(pa)
- vm_paddr_t pa;
+ vm_offset_t pa;
{
register pv_entry_t pv;
vm_page_t m;
diff -u -r /usr/src/sys.old/i386/i386/vm86.c /usr/src/sys/i386/i386/vm86.c
--- /usr/src/sys.old/i386/i386/vm86.c Thu Aug 21 18:35:34 2003
+++ /usr/src/sys/i386/i386/vm86.c Fri Aug 22 02:09:00 2003
@@ -584,7 +584,7 @@
struct vm86frame *vmf;
struct vm86context *vmc;
{
- pt_entry_t *pte = (pt_entry_t *)vm86paddr;
+ pt_entry_t pte = (pt_entry_t)vm86paddr;
vm_paddr_t page;
int i, entry, retval;
diff -u -r /usr/src/sys.old/i386/include/asnames.h
/usr/src/sys/i386/include/asnames.h
--- /usr/src/sys.old/i386/include/asnames.h Thu Aug 21 18:35:36 2003
+++ /usr/src/sys/i386/include/asnames.h Fri Aug 22 02:07:23 2003
@@ -52,10 +52,11 @@
#ifdef __ELF__
+#define _APTD APTD
+#define _APTDpde APTDpde
+#define _APTmap APTmap
#define _CONST_QNaN CONST_QNaN
-#define _IdlePDPT IdlePDPT
#define _IdlePTD IdlePTD
-#define _KERNend KERNend
#define _KPTphys KPTphys
#define _MP_GDT MP_GDT
#define _MPgetlock MPgetlock
Only in /usr/src/sys/i386/include: asnames.h.orig
Only in /usr/src/sys/i386/include: asnames.h.rej
diff -u -r /usr/src/sys.old/i386/include/cpu.h /usr/src/sys/i386/include/cpu.h
--- /usr/src/sys.old/i386/include/cpu.h Thu Aug 21 18:35:36 2003
+++ /usr/src/sys/i386/include/cpu.h Fri Aug 22 02:06:26 2003
@@ -135,7 +135,6 @@
#ifdef _KERNEL
extern char btext[];
extern char etext[];
-extern char KERNend[];
extern u_char intr_nesting_level;
void fork_trampoline __P((void));
diff -u -r /usr/src/sys.old/i386/include/globaldata.h
/usr/src/sys/i386/include/globaldata.h
--- /usr/src/sys.old/i386/include/globaldata.h Thu Aug 21 18:35:36 2003
+++ /usr/src/sys/i386/include/globaldata.h Fri Aug 22 02:06:10 2003
@@ -60,11 +60,11 @@
pt_entry_t *gd_prv_CMAP1;
pt_entry_t *gd_prv_CMAP2;
pt_entry_t *gd_prv_CMAP3;
- pd_entry_t *gd_prv_PMAP1;
+ pt_entry_t *gd_prv_PMAP1;
caddr_t gd_prv_CADDR1;
caddr_t gd_prv_CADDR2;
caddr_t gd_prv_CADDR3;
- pt_entry_t *gd_prv_PADDR1;
+ unsigned *gd_prv_PADDR1;
#endif
u_int gd_astpending;
};
diff -u -r /usr/src/sys.old/i386/include/globals.h
/usr/src/sys/i386/include/globals.h
--- /usr/src/sys.old/i386/include/globals.h Thu Aug 21 18:35:36 2003
+++ /usr/src/sys/i386/include/globals.h Fri Aug 22 02:05:40 2003
@@ -103,11 +103,11 @@
#define prv_CMAP1 GLOBAL_LVALUE(prv_CMAP1, pt_entry_t *)
#define prv_CMAP2 GLOBAL_LVALUE(prv_CMAP2, pt_entry_t *)
#define prv_CMAP3 GLOBAL_LVALUE(prv_CMAP3, pt_entry_t *)
-#define prv_PMAP1 GLOBAL_LVALUE(prv_PMAP1, pd_entry_t *)
+#define prv_PMAP1 GLOBAL_LVALUE(prv_PMAP1, pt_entry_t *)
#define prv_CADDR1 GLOBAL_RVALUE(prv_CADDR1, caddr_t)
#define prv_CADDR2 GLOBAL_RVALUE(prv_CADDR2, caddr_t)
#define prv_CADDR3 GLOBAL_RVALUE(prv_CADDR3, caddr_t)
-#define prv_PADDR1 GLOBAL_RVALUE(prv_PADDR1, pt_entry_t *)
+#define prv_PADDR1 GLOBAL_RVALUE(prv_PADDR1, unsigned *)
#endif
#endif /*UP kernel*/
Only in /usr/src/sys/i386/include: globals.h.orig
Only in /usr/src/sys/i386/include: globals.h.rej
diff -u -r /usr/src/sys.old/i386/include/md_var.h
/usr/src/sys/i386/include/md_var.h
--- /usr/src/sys.old/i386/include/md_var.h Thu Aug 21 18:35:36 2003
+++ /usr/src/sys/i386/include/md_var.h Fri Aug 22 02:05:11 2003
@@ -72,7 +72,6 @@
void bcopyb __P((const void *from, void *to, size_t len));
void busdma_swi __P((void));
void cpu_halt __P((void));
-void cpu_idle __P((void));
void cpu_reset __P((void));
void cpu_setregs __P((void));
void cpu_switch_load_gs __P((void)) __asm(__STRING(cpu_switch_load_gs));
diff -u -r /usr/src/sys.old/i386/include/param.h
/usr/src/sys/i386/include/param.h
--- /usr/src/sys.old/i386/include/param.h Thu Aug 21 18:35:36 2003
+++ /usr/src/sys/i386/include/param.h Fri Aug 22 02:04:43 2003
@@ -94,18 +94,10 @@
#define PAGE_MASK (PAGE_SIZE-1)
#define NPTEPG (PAGE_SIZE/(sizeof (pt_entry_t)))
-#ifdef PAE
-#define NPGPTD 4
-#define PDRSHIFT 21 /* LOG2(NBPDR) */
-#else
-#define NPGPTD 1
+#define NPDEPG (PAGE_SIZE/(sizeof (pd_entry_t)))
#define PDRSHIFT 22 /* LOG2(NBPDR) */
-#endif
-
#define NBPDR (1<<PDRSHIFT) /* bytes/page dir */
#define PDRMASK (NBPDR-1)
-#define NPDEPG (PAGE_SIZE/(sizeof (pd_entry_t)))
-#define NPDEPTD (NPDEPG*NPGPTD)
#define DEV_BSHIFT 9 /* log2(DEV_BSIZE) */
#define DEV_BSIZE (1<<DEV_BSHIFT)
Only in /usr/src/sys/i386/include: param.h.orig
diff -u -r /usr/src/sys.old/i386/include/pmap.h /usr/src/sys/i386/include/pmap.h
--- /usr/src/sys.old/i386/include/pmap.h Thu Aug 21 18:35:36 2003
+++ /usr/src/sys/i386/include/pmap.h Fri Aug 22 02:03:52 2003
@@ -70,7 +70,7 @@
/* Our various interpretations of the above */
#define PG_W PG_AVAIL1 /* "Wired" pseudoflag */
#define PG_MANAGED PG_AVAIL2
-#define PG_FRAME (~((vm_paddr_t)PAGE_MASK))
+#define PG_FRAME (~PAGE_MASK)
#define PG_PROT (PG_RW|PG_U) /* all protection bits . */
#define PG_N (PG_NC_PWT|PG_NC_PCD) /* Non-cacheable */
@@ -88,7 +88,7 @@
* This **MUST** be a multiple of 4 (eg: 252, 256, 260, etc).
*/
#ifndef KVA_PAGES
-#define KVA_PAGES (1 << (30 - PDRSHIFT))
+#define KVA_PAGES 256
#endif
/*
@@ -97,17 +97,13 @@
#define VADDR(pdi, pti)
((vm_offset_t)(((pdi)<<PDRSHIFT)|((pti)<<PAGE_SHIFT)))
#ifndef NKPT
-#ifdef PAE
-#define NKPT 120 /* actual number of kernel page tables */
-#else
-#define NKPT 30 /* actual number of kernel page tables */
-#endif
+#define NKPT 30 /* actual number of kernel page tables */
#endif
#ifndef NKPDE
#ifdef SMP
-#define NKPDE (KVA_PAGES - 1) /* addressable number of page tables/pde's */
+#define NKPDE (KVA_PAGES - 2) /* addressable number of page tables/pde's */
#else
-#define NKPDE (KVA_PAGES) /* addressable number of page tables/pde's */
+#define NKPDE (KVA_PAGES - 1) /* addressable number of page tables/pde's */
#endif
#endif
@@ -119,13 +115,16 @@
*
* SMP_PRIVPAGES: The per-cpu address space is 0xff80000 -> 0xffbfffff
*/
+#define APTDPTDI (NPDEPG-1) /* alt ptd entry that points to APTD */
#ifdef SMP
-#define MPPTDI (NPDEPTD-1) /* per cpu ptd entry */
+#define MPPTDI (APTDPTDI-1) /* per cpu ptd entry */
#define KPTDI (MPPTDI-NKPDE) /* start of kernel virtual pde's */
#else
-#define KPTDI (NPDEPTD-NKPDE) /* start of kernel virtual pde's */
+#define KPTDI (APTDPTDI-NKPDE)/* start of kernel virtual pde's */
#endif /* SMP */
-#define PTDPTDI (KPTDI-NPGPTD) /* ptd entry that points to ptd! */
+#define PTDPTDI (KPTDI-1) /* ptd entry that points to ptd! */
+#define UMAXPTDI (PTDPTDI-1) /* ptd entry for user space end */
+#define UMAXPTEOFF (NPTEPG) /* pte entry for user space end */
/*
* XXX doesn't really belong here I guess...
@@ -137,18 +136,8 @@
#include <sys/queue.h>
-#ifdef PAE
-typedef u_int64_t pdpt_entry_t;
-typedef u_int64_t pd_entry_t;
-typedef u_int64_t pt_entry_t;
-#define PTESHIFT 3
-#define PDESHIFT 3
-#else
-typedef u_int32_t pd_entry_t;
-typedef u_int32_t pt_entry_t;
-#define PTESHIFT 2
-#define PDESHIFT 2
-#endif
+typedef unsigned int *pd_entry_t;
+typedef unsigned int *pt_entry_t;
#define PDESIZE sizeof(pd_entry_t) /* for assembly files */
#define PTESIZE sizeof(pt_entry_t) /* for assembly files */
@@ -158,13 +147,10 @@
* and directories.
*/
#ifdef _KERNEL
-extern pt_entry_t PTmap[];
-extern pd_entry_t PTD[], PTDpde[];
+extern pt_entry_t PTmap[], APTmap[], Upte;
+extern pd_entry_t PTD[], APTD[], PTDpde, APTDpde, Upde;
-extern u_int32_t IdlePTD; /* physical address of "Idle" state
directory */
-#ifdef PAE
-extern pdpt_entry_t IdlePDPT[];
-#endif
+extern pd_entry_t IdlePTD; /* physical address of "Idle" state
directory */
#endif
#ifdef _KERNEL
@@ -176,6 +162,8 @@
*/
#define vtopte(va) (PTmap + i386_btop(va))
+#define avtopte(va) (APTmap + i386_btop(va))
+
/*
* Routine: pmap_kextract
* Function:
@@ -186,17 +174,22 @@
pmap_kextract(vm_offset_t va)
{
vm_paddr_t pa;
-
- if ((pa = PTD[va >> PDRSHIFT]) & PG_PS) {
+ if ((pa = (vm_offset_t) PTD[va >> PDRSHIFT]) & PG_PS) {
pa = (pa & ~(NBPDR - 1)) | (va & (NBPDR - 1));
} else {
- pa = *vtopte(va);
+ pa = *(vm_offset_t *)vtopte(va);
pa = (pa & PG_FRAME) | (va & PAGE_MASK);
}
return pa;
}
-#define vtophys(va) pmap_kextract((vm_offset_t) (va))
+#if 0
+#define vtophys(va) (((vm_offset_t) (*vtopte(va))&PG_FRAME) |
((vm_offset_t)(va) & PAGE_MASK))
+#else
+#define vtophys(va) pmap_kextract(((vm_offset_t) (va)))
+#endif
+
+#define avtophys(va) (((vm_offset_t) (*avtopte(va))&PG_FRAME) |
((vm_offset_t)(va) & PAGE_MASK))
#endif
@@ -214,14 +207,10 @@
pd_entry_t *pm_pdir; /* KVA of page directory */
vm_object_t pm_pteobj; /* Container for pte's */
TAILQ_HEAD(,pv_entry) pm_pvlist; /* list of mappings in pmap */
+ int pm_count; /* reference count */
int pm_active; /* active on cpus */
struct pmap_statistics pm_stats; /* pmap statistics */
struct vm_page *pm_ptphint; /* pmap ptp hint */
-#ifdef PAE
- pdpt_entry_t *pm_pdpt; /* KVA of page dir ptr table */
-#else
- void *pm_pdpt_dontuse;
-#endif
};
#define pmap_resident_count(pmap) (pmap)->pm_stats.resident_count
@@ -244,6 +233,11 @@
vm_page_t pv_ptem; /* VM page for pte */
} *pv_entry_t;
+#define PV_ENTRY_NULL ((pv_entry_t) 0)
+
+#define PV_CI 0x01 /* all entries must be cache inhibited */
+#define PV_PTPAGE 0x02 /* entry maps a page table page */
+
#ifdef _KERNEL
#define NPPROVMTRR 8
@@ -254,6 +248,8 @@
};
extern struct ppro_vmtrr PPro_vmtrr[NPPROVMTRR];
+extern caddr_t CADDR1;
+extern pt_entry_t *CMAP1;
extern vm_paddr_t avail_end;
extern vm_paddr_t avail_start;
extern vm_offset_t clean_eva;
@@ -263,11 +259,12 @@
extern vm_offset_t virtual_avail;
extern vm_offset_t virtual_end;
-void pmap_bootstrap __P((vm_paddr_t, vm_paddr_t));
+void pmap_bootstrap __P(( vm_paddr_t, vm_paddr_t));
pmap_t pmap_kernel __P((void));
void *pmap_mapdev __P((vm_paddr_t, vm_size_t));
void pmap_unmapdev __P((vm_offset_t, vm_size_t));
-pt_entry_t *pmap_pte __P((pmap_t, vm_offset_t));
+unsigned *pmap_pte __P((pmap_t, vm_offset_t)) __pure2;
+vm_page_t pmap_use_pt __P((pmap_t, vm_offset_t));
#ifdef SMP
void pmap_set_opt __P((void));
#endif
Only in /usr/src/sys/i386/include: pmap.h.orig
diff -u -r /usr/src/sys.old/i386/include/vmparam.h
/usr/src/sys/i386/include/vmparam.h
--- /usr/src/sys.old/i386/include/vmparam.h Thu Aug 21 18:35:37 2003
+++ /usr/src/sys/i386/include/vmparam.h Fri Aug 22 02:00:08 2003
@@ -90,7 +90,7 @@
* messy at times, but hey, we'll do anything to save a page :-)
*/
-#define VM_MAX_KERNEL_ADDRESS VADDR(KPTDI+NKPDE-1, NPTEPG-1)
+#define VM_MAX_KERNEL_ADDRESS VADDR(KPTDI+NKPDE, 0)
#define VM_MIN_KERNEL_ADDRESS VADDR(PTDPTDI, PTDPTDI)
#define KERNBASE VADDR(KPTDI, 0)
@@ -101,7 +101,7 @@
#define UPT_MAX_ADDRESS VADDR(PTDPTDI, PTDPTDI)
#define UPT_MIN_ADDRESS VADDR(PTDPTDI, 0)
-#define VM_MAXUSER_ADDRESS VADDR(PTDPTDI, 0)
+#define VM_MAXUSER_ADDRESS VADDR(UMAXPTDI, UMAXPTEOFF)
#define USRSTACK VM_MAXUSER_ADDRESS
Only in /usr/src/sys/i386/include: vmparam.h.orig
diff -u -r /usr/src/sys.old/i386/isa/wd.c /usr/src/sys/i386/isa/wd.c
--- /usr/src/sys.old/i386/isa/wd.c Thu Aug 21 18:35:40 2003
+++ /usr/src/sys/i386/isa/wd.c Fri Aug 22 01:59:24 2003
@@ -1927,14 +1927,12 @@
return (EIO);
}
while (blkcnt != 0) {
- caddr_t va;
-
if (is_physical_memory((vm_offset_t)addr))
- va = pmap_kenter_temporary(
- trunc_page((vm_offset_t)addr), 0);
+ pmap_kenter((vm_offset_t)CADDR1,
+ trunc_page((vm_offset_t)addr));
else
- va = pmap_kenter_temporary(
- trunc_page(0), 0);
+ pmap_kenter((vm_offset_t)CADDR1,
+ trunc_page(0));
/* Ready to send data? */
DELAY(5); /* ATA spec */
@@ -1946,11 +1944,11 @@
}
if (du->dk_flags & DKFL_32BIT)
outsl(du->dk_port + wd_data,
- va + ((int)addr & PAGE_MASK),
+ CADDR1 + ((int)addr & PAGE_MASK),
DEV_BSIZE / sizeof(long));
else
outsw(du->dk_port + wd_data,
- va + ((int)addr & PAGE_MASK),
+ CADDR1 + ((int)addr & PAGE_MASK),
DEV_BSIZE / sizeof(short));
addr += DEV_BSIZE;
/*
Only in /usr/src/sys/i386/isa: wd.c.orig
Only in /usr/src/sys/i386/isa: wd.c.rej
Only in /usr/src/sys/i386/isa: wd.c.rej.orig
diff -u -r /usr/src/sys.old/vm/pmap.h /usr/src/sys/vm/pmap.h
--- /usr/src/sys.old/vm/pmap.h Thu Aug 21 18:36:25 2003
+++ /usr/src/sys/vm/pmap.h Fri Aug 22 01:56:59 2003
@@ -125,6 +125,7 @@
vm_prot_t));
void pmap_qenter __P((vm_offset_t, vm_page_t *, int));
void pmap_qremove __P((vm_offset_t, int));
+void pmap_reference __P((pmap_t));
void pmap_release __P((pmap_t));
void pmap_remove __P((pmap_t, vm_offset_t, vm_offset_t));
void pmap_remove_pages __P((pmap_t, vm_offset_t, vm_offset_t));
diff -u -r /usr/src/sys.old/vm/vm_kern.c /usr/src/sys/vm/vm_kern.c
--- /usr/src/sys.old/vm/vm_kern.c Thu Aug 21 18:36:25 2003
+++ /usr/src/sys/vm/vm_kern.c Fri Aug 22 01:56:09 2003
@@ -265,6 +265,7 @@
panic("kmem_suballoc");
}
*max = *min + size;
+ pmap_reference(vm_map_pmap(parent));
result = vm_map_create(vm_map_pmap(parent), *min, *max);
if (result == NULL)
panic("kmem_suballoc: cannot create submap");