summaryrefslogtreecommitdiff
path: root/sys/arch
diff options
context:
space:
mode:
Diffstat (limited to 'sys/arch')
-rw-r--r--sys/arch/hppa/conf/ld.script3
-rw-r--r--sys/arch/hppa/dev/mem.c5
-rw-r--r--sys/arch/hppa/gsc/if_ie_gsc.c10
-rw-r--r--sys/arch/hppa/hppa/genassym.cf21
-rw-r--r--sys/arch/hppa/hppa/locore.S293
-rw-r--r--sys/arch/hppa/hppa/machdep.c117
-rw-r--r--sys/arch/hppa/hppa/pmap.c1855
-rw-r--r--sys/arch/hppa/hppa/trap.c10
-rw-r--r--sys/arch/hppa/hppa/vm_machdep.c7
-rw-r--r--sys/arch/hppa/include/cpu.h4
-rw-r--r--sys/arch/hppa/include/cpufunc.h7
-rw-r--r--sys/arch/hppa/include/db_machdep.h30
-rw-r--r--sys/arch/hppa/include/pmap.h240
-rw-r--r--sys/arch/hppa/include/pte.h50
-rw-r--r--sys/arch/hppa/include/vmparam.h4
15 files changed, 1075 insertions, 1581 deletions
diff --git a/sys/arch/hppa/conf/ld.script b/sys/arch/hppa/conf/ld.script
index a05233c4c5a..7134843aae7 100644
--- a/sys/arch/hppa/conf/ld.script
+++ b/sys/arch/hppa/conf/ld.script
@@ -1,4 +1,4 @@
-/* $OpenBSD: ld.script,v 1.8 2001/11/29 17:28:09 mickey Exp $ */
+/* $OpenBSD: ld.script,v 1.9 2002/03/15 21:44:14 mickey Exp $ */
OUTPUT_FORMAT("elf32-hppa")
OUTPUT_ARCH(hppa)
@@ -22,6 +22,7 @@ SECTIONS
__unwind_end = .;
. = ALIGN(4096);
} = 0 /* 0x08000240 nop filled, does not work */
+ . = 0x400000;
etext = ABSOLUTE(.);
.data :
diff --git a/sys/arch/hppa/dev/mem.c b/sys/arch/hppa/dev/mem.c
index 2032abbb72c..008b4fcc63d 100644
--- a/sys/arch/hppa/dev/mem.c
+++ b/sys/arch/hppa/dev/mem.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: mem.c,v 1.9 2002/03/14 01:26:31 millert Exp $ */
+/* $OpenBSD: mem.c,v 1.10 2002/03/15 21:44:18 mickey Exp $ */
/*
* Copyright (c) 1998,1999 Michael Shalayeff
@@ -164,14 +164,15 @@ memattach(parent, self, aux)
printf (" viper rev %x, ctrl %b",
sc->sc_vp->vi_status.hw_rev,
VI_CTRL, VIPER_BITS);
-
s = splhigh();
+#if 0
VI_CTRL |= VI_CTRL_ANYDEN;
((struct vi_ctrl *)&VI_CTRL)->core_den = 0;
((struct vi_ctrl *)&VI_CTRL)->sgc0_den = 0;
((struct vi_ctrl *)&VI_CTRL)->sgc1_den = 0;
((struct vi_ctrl *)&VI_CTRL)->core_prf = 1;
sc->sc_vp->vi_control = VI_CTRL;
+#endif
splx(s);
#ifdef DEBUG
printf (" >> %b", VI_CTRL, VIPER_BITS);
diff --git a/sys/arch/hppa/gsc/if_ie_gsc.c b/sys/arch/hppa/gsc/if_ie_gsc.c
index c37b285d2d7..a630796325f 100644
--- a/sys/arch/hppa/gsc/if_ie_gsc.c
+++ b/sys/arch/hppa/gsc/if_ie_gsc.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: if_ie_gsc.c,v 1.10 2002/03/14 01:26:31 millert Exp $ */
+/* $OpenBSD: if_ie_gsc.c,v 1.11 2002/03/15 21:44:18 mickey Exp $ */
/*
* Copyright (c) 1998,1999 Michael Shalayeff
@@ -85,7 +85,7 @@ static int ie_gsc_media[] = {
};
#define IE_NMEDIA (sizeof(ie_gsc_media) / sizeof(ie_gsc_media[0]))
-static char mem[IE_SIZE+16];
+char *ie_mem;
void ie_gsc_reset(struct ie_softc *sc, int what);
void ie_gsc_attend(struct ie_softc *sc);
@@ -154,7 +154,7 @@ ie_gsc_attend(sc)
{
register volatile struct ie_gsc_regs *r = (struct ie_gsc_regs *)sc->ioh;
- fdcache(0, (vaddr_t)&mem, sizeof(mem));
+ fdcache(0, (vaddr_t)ie_mem, IE_SIZE);
r->ie_attn = 0;
}
@@ -328,8 +328,8 @@ ie_gsc_attach(parent, self, aux)
sc->sc_maddr = kvtop((caddr_t)sc->bh);
#else
- bzero(mem, sizeof(mem));
- sc->bh = ((u_int)&mem + 15) & ~0xf;
+ printf("%x ", ie_mem);
+ sc->bh = (u_int)ie_mem;
sc->sc_maddr = sc->bh;
#endif
sc->sysbus = 0x40 | IE_SYSBUS_82586 | IE_SYSBUS_INTLOW | IE_SYSBUS_TRG | IE_SYSBUS_BE;
diff --git a/sys/arch/hppa/hppa/genassym.cf b/sys/arch/hppa/hppa/genassym.cf
index 53f2b449a17..6d3f4cb87e3 100644
--- a/sys/arch/hppa/hppa/genassym.cf
+++ b/sys/arch/hppa/hppa/genassym.cf
@@ -1,4 +1,4 @@
-# $OpenBSD: genassym.cf,v 1.19 2002/01/16 20:50:16 miod Exp $
+# $OpenBSD: genassym.cf,v 1.20 2002/03/15 21:44:18 mickey Exp $
#
# Copyright (c) 1982, 1990, 1993
@@ -69,24 +69,9 @@ export HPPA_BREAK_GET_PSW
export HPPA_BREAK_SET_PSW
# pte things
-export TLB_REF_POS
+#export TLB_REF_POS
export TLB_GATE_PROT
-export TLB_DIRTY_POS
-
-# hpt_table fields
-struct hpt_entry
-member hpt_tlbprot
-member hpt_tlbpage
-member hpt_entry
-define HPT_TAG 0
-
-# pv_entry fields
-struct pv_entry
-member pv_hash
-member pv_space
-member pv_va
-member pv_tlbpage
-member pv_tlbprot
+#export TLB_DIRTY_POS
# saved state fields
struct trapframe
diff --git a/sys/arch/hppa/hppa/locore.S b/sys/arch/hppa/hppa/locore.S
index a6dbee2a517..e4108956232 100644
--- a/sys/arch/hppa/hppa/locore.S
+++ b/sys/arch/hppa/hppa/locore.S
@@ -1,4 +1,4 @@
-/* $OpenBSD: locore.S,v 1.59 2002/03/15 19:53:49 mickey Exp $ */
+/* $OpenBSD: locore.S,v 1.60 2002/03/15 21:44:18 mickey Exp $ */
/*
* Copyright (c) 1998-2002 Michael Shalayeff
@@ -117,6 +117,11 @@ fpu_curpcb
.comm 4
fpu_enable
.comm 4
+$trap_tmp_save /* XXX assumed to be aligned on 2048 */
+ .align 2048
+ .block TF_PHYS /* XXX must be aligned to 64 */
+ .align 64
+
.text
/*
@@ -517,10 +522,6 @@ $bsd_syscall
.export gateway_page_end, entry
gateway_page_end
-$trap_tmp_save /* XXX assumed to be aligned on 2048 */
- .block TF_PHYS /* XXX must be aligned to 64 */
- .align 64
-
.export $syscall,entry
.proc
.callinfo calls
@@ -898,6 +899,7 @@ $syscall_end
#ifdef HP7000_CPU
LDILDO(itlb_x)
+LDILDO(itlbna_x)
LDILDO(dtlb_x)
LDILDO(dtlbna_x)
LDILDO(tlbd_x)
@@ -905,6 +907,7 @@ LDILDO(tlbd_x)
#ifdef HP7100_CPU
LDILDO(itlb_s)
+LDILDO(itlbna_s)
LDILDO(dtlb_s)
LDILDO(dtlbna_s)
LDILDO(tlbd_s)
@@ -912,6 +915,7 @@ LDILDO(tlbd_s)
#ifdef HP7200_CPU
LDILDO(itlb_t)
+LDILDO(itlbna_t)
LDILDO(dtlb_t)
LDILDO(dtlbna_t)
LDILDO(tlbd_t)
@@ -919,6 +923,7 @@ LDILDO(tlbd_t)
#ifdef HP7100LC_CPU
LDILDO(itlb_l)
+LDILDO(itlbna_l)
LDILDO(dtlb_l)
LDILDO(dtlbna_l)
LDILDO(tlbd_l)
@@ -960,8 +965,8 @@ hpmc_v
ATRAP(excpt,T_EXCEPTION)
#endif
STRAP(dtlb,T_DTLBMISS,DTLBPRE) /* 15. data TLB miss fault */
- STRAP(itlb,T_ITLBMISSNA,ITLBPRE)/* 16. ITLB non-access miss fault */
- STRAP(dtlb,T_DTLBMISSNA,DTLBPRE)/* 17. DTLB non-access miss fault */
+ STRAP(itlbna,T_ITLBMISSNA,ITLBPRE)/* 16. ITLB non-access miss fault */
+ STRAP(dtlbna,T_DTLBMISSNA,DTLBPRE)/* 17. DTLB non-access miss fault */
ATRAP(dprot,T_DPROT) /* 18. data protection trap
unalligned data reference trap */
ATRAP(dbrk,T_DBREAK) /* 19. data break trap */
@@ -1303,48 +1308,54 @@ LEAF_ENTRY(desidhash_t)
EXIT(desidhash_t)
#endif
+#if defined(HP7000_CPU) || defined(HP7100_CPU) || defined(HP7200_CPU)
+#define TLB_PULL(bits) ! \
+ /* space:pgaddr -- r8:r9 */ ! \
+ mfctl vtop, r16 ! \
+ ldwax,s r8(r16), r17 /* space -> page directory */ ! \
+ extru r9, 9, 10, r25 ! \
+ combt,=,n r0, r17, TLABEL(all) ! \
+ ldwax,s r25(r17), r24 /* page -> page table */ ! \
+ extru r9, 19, 10, r16 ! \
+ combt,=,n r0, r24, TLABEL(all) ! \
+ ldwax,s r16(r24), r17 /* va -> pa:prot */ ! \
+ sh2addl r16, r24, r25 ! \
+ combt,=,n r0, r17, TLABEL(all) ! \
+ depi (bits), 21+bits, 1+bits, r17 ! \
+ mfctl tr7, r1 ! \
+ stwas r17, 0(r25) /* store back w/ the bits */ ! \
+ shd r17, r0, 13, r25 ! \
+ dep r8, 30, 15, r25 /* mix0r the pid from the sid */! \
+ dep r0, 31, 12, r17 /* needed ? */ ! \
+ addi 2, r25, r25 ! \
+ extru r17, 24, 25, r17
+
$tlbd_x
$tlbd_s
$tlbd_t
-#if 1
- HPTENT
- mtctl r24, cr28
-
- /*
- * Chase the list of entries for this hash bucket until we find
- * the correct mapping or NULL.
- */
- ldw HPT_ENTRY(r24), r24
-$hash_loop_tlbd_t
- comb,=,n r0, r24, TLABEL(all)
- ldw PV_VA(r24), r25
- ldw PV_SPACE(r24), r17
- comb,<>,n r9, r25, $hash_loop_tlbd_t
- ldw PV_HASH(r24), r24
- comb,<>,n r8, r17, $hash_loop_tlbd_t
- ldw PV_HASH(r24), r24
-
- VTAG /* (r8,r9) -> r16 */
- /* Set the dirty bit for this physical page. */
- ldw PV_TLBPROT(r24), r25
- b $tlb_inshpt_t
- depi 1, TLB_DIRTY_POS, 1, r25
-#else
-
- mfsp %sr1, %r25
- mtsp %r8, %sr1
- lpa %r0(%sr1, %r9), %r17
- mfctl %cr29, %r16
- mtsp %r25, %sr1
- extru %r17, 20, 21, %r24
- sh3add %r24, %r16, %r16
-
-#endif
+ TLB_PULL(1)
+ mfsp sr1, r16
+ mtsp r8, sr1
+ idtlba r17,(sr1, r9)
+ idtlbp r25,(sr1, r9)
+ mtsp r16, sr1
+ rfir
+ nop
$itlb_x
+$itlbna_x
$itlb_s
+$itlbna_s
$itlb_t
- depi 1, TFF_ITLB_POS, 1, r1 /* mark for ITLB insert */
+$itlbna_t
+ TLB_PULL(0)
+ mfsp sr1, r16
+ mtsp r8, sr1
+ iitlba r17,(sr1, r9)
+ iitlbp r25,(sr1, r9)
+ mtsp r16, sr1
+ rfir
+ nop
$dtlb_x
$dtlbna_x
@@ -1352,77 +1363,15 @@ $dtlb_s
$dtlbna_s
$dtlb_t
$dtlbna_t
- /*
- * r1 is the trap type
- * r8 is the space of the address that had the TLB miss
- * r9 is the offset of the address that had the TLB miss
- * r24 is the correspondent HPT entry pointer
- */
-
- HPTENT
- mtctl r24, cr28
-
- ldw HPT_TAG(r24),r17
- VTAG /* (r8,r9) -> r16 */
-
- /* Compare the tag against the HPT entry.
- If it matches, then do the TLB insertion. */
- comb,<>,n r16, r17, $tlb_gottalook_t
-
- ldw HPT_TLBPAGE(r24), r17
- b $tlb_gothpt_t
- ldw HPT_TLBPROT(r24), r25
-
-$tlb_gottalook_t
- /*
- * Chase the list of entries for this hash bucket until we find
- * the correct mapping or NULL.
- */
- ldw HPT_ENTRY(r24),r24
-$hash_loop_t
- comb,=,n r0, r24, TLABEL(all)
- ldw PV_VA(r24),r25
- ldw PV_SPACE(r24),r17
- comb,<>,n r9,r25,$hash_loop_t
- ldw PV_HASH(r24),r24
- comb,<>,n r8,r17,$hash_loop_t
- ldw PV_HASH(r24),r24
-
- /* Now set things up to enter the real mapping that we want */
- ldw PV_TLBPROT(r24),r25
- depi 1, TLB_REF_POS, 1, r25
-
- /*
- * Load the HPT cache with the miss information for the next time.
- */
-$tlb_inshpt_t
- stw r25, PV_TLBPROT(r24)
- ldw PV_TLBPAGE(r24),r17
- mfctl cr28, r24
-
- stw r16, HPT_TAG(r24)
- stw r25, HPT_TLBPROT(r24)
- stw r17, HPT_TLBPAGE(r24)
-
-$tlb_gothpt_t
+ TLB_PULL(0)
mfsp sr1, r16
- bb,< r1, TFF_ITLB_POS, $tlb_itlb_t
mtsp r8, sr1
-
idtlba r17,(sr1, r9)
idtlbp r25,(sr1, r9)
- nop ! nop
- mtsp r16, sr1
- rfir
- nop
-
-$tlb_itlb_t
- iitlba r17,(sr1, r9)
- iitlbp r25,(sr1, r9)
- nop ! nop
mtsp r16, sr1
rfir
nop
+#endif /* defined(HP7000_CPU) || defined(HP7100_CPU) || defined(HP7200_CPU) */
#ifdef HP7100LC_CPU
/*
@@ -1481,98 +1430,58 @@ EXIT(desidhash_l)
.align 32
-$tlbd_l
- mfctl cr28, r24
+#define IITLBAF(r) .word 0x04000440 | ((r) << 16)
+#define IITLBPF(r) .word 0x04000400 | ((r) << 16)
+#define IDTLBAF(r) .word 0x04001440 | ((r) << 16)
+#define IDTLBPF(r) .word 0x04001400 | ((r) << 16)
- /*
- * Chase the list of entries for this hash bucket until we find
- * the correct mapping or NULL.
- */
- ldw HPT_ENTRY(r24), r16
-$hash_loop_tlbd_l
- comb,=,n r0, r16, TLABEL(all)
- ldw PV_VA(r16), r25
- ldw PV_SPACE(r16), r17
- comb,<>,n r9, r25, $hash_loop_tlbd_l
- ldw PV_HASH(r16), r16
- comb,<>,n r8, r17, $hash_loop_tlbd_l
- ldw PV_HASH(r16), r16
-
- /* Set the dirty bit for this physical page. */
- ldw PV_TLBPAGE(r16), r17
- ldw PV_TLBPROT(r16), r25
- depi 1, TLB_DIRTY_POS, 1, r25
- depi 1, TLB_REF_POS, 1, r25
- stw r25, PV_TLBPROT(r16)
- VTAG /* (r8,r9) -> r16 */
-
- stw r16, HPT_TAG(r24)
- stw r25, HPT_TLBPROT(r24)
- stw r17, HPT_TLBPAGE(r24)
-
- .word 0x04111440 ; idtlbaf r17
- .word 0x04191400 ; idtlbpf r25
- nop ! nop
+/*
+ * possible optimizations:
+ * change pte to reduce number of shifts
+ * reorder to reduce stalls
+ * check if stwas is needed (if we changed the bits)
+ */
+#define TLB_PULL_L(bits) ! \
+ /* space:pgaddr -- r8:r9 */ ! \
+ mfctl vtop, r16 ! \
+ ldwax,s r8(r16), r17 /* space -> page directory */ ! \
+ extru r9, 9, 10, r25 ! \
+ combt,=,n r0, r17, TLABEL(all) ! \
+ ldwax,s r25(r17), r24 /* page -> page table */ ! \
+ extru r9, 19, 10, r16 ! \
+ combt,=,n r0, r24, TLABEL(all) ! \
+ ldwax,s r16(r24), r17 /* va -> pa:prot */ ! \
+ sh2addl r16, r24, r25 ! \
+ combt,=,n r0, r17, TLABEL(all) ! \
+ depi (bits), 21+bits, 1+bits, r17 ! \
+ mfctl tr7, r1 ! \
+ stwas r17, 0(r25) /* store back w/ the bits */ ! \
+ shd r17, r0, 13, r25 ! \
+ dep r8, 30, 15, r25 /* mix0r the pid from the sid */! \
+ dep r0, 31, 12, r17 /* needed ? */ ! \
+ addi 2, r25, r25 ! \
+ extru r17, 24, 25, r17
+
+$tlbd_l
+ TLB_PULL_L(1)
+ IDTLBAF(17)
+ IDTLBPF(25)
rfir
nop
-
- .align 8
+$itlbna_l
$itlb_l
- depi 1, TFF_ITLB_POS, 1, r1 /* mark for ITLB insert */
-$dtlbna_l
- HPTENT
- mtctl r24, cr28
-
-$dtlb_l
- mfctl cr28, r24
- /*
- * r1 is the trap type
- * r8 is the space of the address that had the TLB miss
- * r9 is the offset of the address that had the TLB miss
- * r24 is the correspondent HPT entry pointer
- */
-
- /*
- * Chase the list of entries for this hash bucket until we find
- * the correct mapping or NULL.
- */
- ldw HPT_ENTRY(r24), r16
-$hash_loop_l
- comb,=,n r0, r16, TLABEL(all)
- ldw PV_VA(r16), r25
- ldw PV_SPACE(r16), r17
- comb,<>,n r9, r25, $hash_loop_l
- ldw PV_HASH(r16), r16
- comb,<>,n r8, r17, $hash_loop_l
- ldw PV_HASH(r16), r16
-
- /* Now set things up to enter the real mapping that we want */
- ldw PV_TLBPAGE(r16), r17
- ldw PV_TLBPROT(r16), r25
-
- /*
- * Load the HPT cache with the miss information for the next time.
- * The HPT entry address was saved by the HPTENT
- */
- depi 1, TLB_REF_POS, 1, r25
- stw r25, PV_TLBPROT(r16)
- VTAG /* (r8,r9) -> r16 */
-
- stw r16, HPT_TAG(r24)
- stw r25, HPT_TLBPROT(r24)
- bb,< r1, TFF_ITLB_POS, $tlb_itlb_l
- stw r17, HPT_TLBPAGE(r24)
-
- .word 0x04111440 ; idtlbaf r17
- .word 0x04191400 ; idtlbpf r25
- nop ! nop
+#if 0 /* XXX assume combined TLB */
+ TLB_PULL_L(0)
+ IITLBAF(17)
+ IITLBPF(25)
rfir
nop
-
-$tlb_itlb_l
- .word 0x04110440 ; iitlbaf r17
- .word 0x04190400 ; iitlbpf r25
- nop ! nop
+#endif
+$dtlbna_l
+$dtlb_l
+ TLB_PULL_L(0)
+ IDTLBAF(17)
+ IDTLBPF(25)
rfir
nop
#endif /* HP7100LC_CPU */
diff --git a/sys/arch/hppa/hppa/machdep.c b/sys/arch/hppa/hppa/machdep.c
index ebacbf7c8ab..48ace22fc02 100644
--- a/sys/arch/hppa/hppa/machdep.c
+++ b/sys/arch/hppa/hppa/machdep.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: machdep.c,v 1.61 2002/03/14 01:26:31 millert Exp $ */
+/* $OpenBSD: machdep.c,v 1.62 2002/03/15 21:44:18 mickey Exp $ */
/*
* Copyright (c) 1999-2002 Michael Shalayeff
@@ -184,7 +184,9 @@ void hppa_user2frame(struct trapframe *sf, struct trapframe *tf);
/*
* wide used hardware params
*/
+#if defined(HP7100LC_CPU) || defined(HP7300LC_CPU)
struct pdc_hwtlb pdc_hwtlb PDC_ALIGNMENT;
+#endif
struct pdc_coproc pdc_coproc PDC_ALIGNMENT;
struct pdc_coherence pdc_coherence PDC_ALIGNMENT;
struct pdc_spidb pdc_spidbits PDC_ALIGNMENT;
@@ -198,10 +200,10 @@ pid_t sigpid = 0;
/*
* Whatever CPU types we support
*/
-extern const u_int itlb_x[], dtlb_x[], dtlbna_x[], tlbd_x[];
-extern const u_int itlb_s[], dtlb_s[], dtlbna_s[], tlbd_s[];
-extern const u_int itlb_t[], dtlb_t[], dtlbna_t[], tlbd_t[];
-extern const u_int itlb_l[], dtlb_l[], dtlbna_l[], tlbd_l[];
+extern const u_int itlb_x[], itlbna_x[], dtlb_x[], dtlbna_x[], tlbd_x[];
+extern const u_int itlb_s[], itlbna_s[], dtlb_s[], dtlbna_s[], tlbd_s[];
+extern const u_int itlb_t[], itlbna_t[], dtlb_t[], dtlbna_t[], tlbd_t[];
+extern const u_int itlb_l[], itlbna_l[], dtlb_l[], dtlbna_l[], tlbd_l[];
int iibtlb_s(int i, pa_space_t sp, vaddr_t va, paddr_t pa,
vsize_t sz, u_int prot);
int idbtlb_s(int i, pa_space_t sp, vaddr_t va, paddr_t pa,
@@ -226,7 +228,7 @@ const struct hppa_cpu_typed {
int arch;
int features;
int (*desidhash)(void);
- const u_int *itlbh, *dtlbh, *dtlbnah, *tlbdh;
+ const u_int *itlbh, *itlbnah, *dtlbh, *dtlbnah, *tlbdh;
int (*dbtlbins)(int i, pa_space_t sp, vaddr_t va, paddr_t pa,
vsize_t sz, u_int prot);
int (*ibtlbins)(int i, pa_space_t sp, vaddr_t va, paddr_t pa,
@@ -236,50 +238,50 @@ const struct hppa_cpu_typed {
} cpu_types[] = {
#ifdef HP7000_CPU
{ "PCX", hpcx, 0x10, 0,
- desidhash_x, itlb_x, dtlb_x, dtlbna_x, tlbd_x,
+ desidhash_x, itlb_x, itlbna_l, dtlb_x, dtlbna_x, tlbd_x,
ibtlb_g, NULL, pbtlb_g},
#endif
#ifdef HP7100_CPU
- { "PCXS", hpcxs, 0x11, HPPA_FTRS_BTLBS,
- desidhash_s, itlb_s, dtlb_s, dtlbna_s, tlbd_s,
+ { "PCXS", hpcxs, 0x11, 0,
+ desidhash_s, itlb_s, itlbna_l, dtlb_s, dtlbna_s, tlbd_s,
ibtlb_g, NULL, pbtlb_g},
#endif
#ifdef HP7200_CPU
{ "PCXT", hpcxt, 0x11, HPPA_FTRS_BTLBU,
- desidhash_t, itlb_t, dtlb_t, dtlbna_t, tlbd_t,
+ desidhash_t, itlb_t, itlbna_l, dtlb_t, dtlbna_t, tlbd_t,
ibtlb_g, NULL, pbtlb_g},
/* HOW? { "PCXT'", hpcxta,0x11, HPPA_FTRS_BTLBU,
- desidhash_t, itlb_t, dtlb_t, dtlbna_t, tlbd_t,
+ desidhash_t, itlb_t, itlbna_l, dtlb_t, dtlbna_t, tlbd_t,
ibtlb_g, NULL, pbtlb_g}, */
#endif
#ifdef HP7100LC_CPU
{ "PCXL", hpcxl, 0x11, HPPA_FTRS_BTLBU|HPPA_FTRS_HVT,
- desidhash_l, itlb_l, dtlb_l, dtlbna_l, tlbd_l,
+ desidhash_l, itlb_l, itlbna_l, dtlb_l, dtlbna_l, tlbd_l,
ibtlb_g, NULL, pbtlb_g, hpti_g},
#endif
#ifdef HP7300LC_CPU
/* HOW? { "PCXL2", hpcxl2,0x11, HPPA_FTRS_BTLBU|HPPA_FTRS_HVT,
- desidhash_l, itlb_l, dtlb_l, dtlbna_l, tlbd_l,
+ desidhash_l, itlb_l, itlbna_l, dtlb_l, dtlbna_l, tlbd_l,
ibtlb_g, NULL, pbtlb_g, hpti_g}, */
#endif
#ifdef HP8000_CPU
{ "PCXU", hpcxu, 0x20, HPPA_FTRS_W32B|HPPA_FTRS_BTLBU|HPPA_FTRS_HVT,
- desidhash_g, itlb_l, dtlb_l, dtlbna_l, tlbd_l,
+ desidhash_g, itlb_l, itlbna_l, dtlb_l, dtlbna_l, tlbd_l,
ibtlb_g, NULL, pbtlb_g, hpti_g},
#endif
#ifdef HP8200_CPU
/* HOW? { "PCXU2", hpcxu2,0x20, HPPA_FTRS_W32B|HPPA_FTRS_BTLBU|HPPA_FTRS_HVT,
- desidhash_g, itlb_l, dtlb_l, dtlbna_l, tlbd_l,
+ desidhash_g, itlb_l, itlbna_l, dtlb_l, dtlbna_l, tlbd_l,
ibtlb_g, NULL, pbtlb_g, hpti_g}, */
#endif
#ifdef HP8500_CPU
/* HOW? { "PCXW", hpcxw, 0x20, HPPA_FTRS_W32B|HPPA_FTRS_BTLBU|HPPA_FTRS_HVT,
- desidhash_g, itlb_l, dtlb_l, dtlbna_l, tlbd_l,
+ desidhash_g, itlb_l, itlbna_l, dtlb_l, dtlbna_l, tlbd_l,
ibtlb_g, NULL, pbtlb_g, hpti_g}, */
#endif
#ifdef HP8600_CPU
/* HOW? { "PCXW+", hpcxw, 0x20, HPPA_FTRS_W32B|HPPA_FTRS_BTLBU|HPPA_FTRS_HVT,
- desidhash_g, itlb_l, dtlb_l, dtlbna_l, tlbd_l,
+ desidhash_g, itlb_l, itlbna_l, dtlb_l, dtlbna_l, tlbd_l,
ibtlb_g, NULL, pbtlb_g, hpti_g}, */
#endif
{ "", 0 }
@@ -290,10 +292,8 @@ hppa_init(start)
paddr_t start;
{
extern int kernel_text;
- vaddr_t v, vstart, vend;
- register int error;
- int hptsize; /* size of HPT table if supported */
- int cpu_features = 0;
+ vaddr_t v, v1;
+ int error, cpu_features = 0;
boothowto |= RB_SINGLE; /* XXX always go into single-user while debug */
@@ -364,6 +364,9 @@ hppa_init(start)
PAGE0->ivec_mempflen = (hppa_pfr_end - hppa_pfr + 1) * 4;
}
+ /* may the scientific guessing begin */
+ cpu_features = 0;
+
/* BTLB params */
if ((error = pdc_call((iodcio_t)pdc, 0, PDC_BLOCK_TLB,
PDC_BTLB_DEFAULT, &pdc_btlb)) < 0) {
@@ -388,8 +391,8 @@ hppa_init(start)
PDC_BTLB_PURGE_ALL) < 0)
printf("WARNING: BTLB purge failed\n");
- cpu_features = pdc_btlb.finfo.num_c?
- HPPA_FTRS_BTLBU : HPPA_FTRS_BTLBS;
+ if (pdc_btlb.finfo.num_c)
+ cpu_features |= HPPA_FTRS_BTLBU;
}
ptlball();
@@ -398,24 +401,14 @@ hppa_init(start)
totalphysmem = PAGE0->imm_max_mem / NBPG;
resvmem = ((vaddr_t)&kernel_text) / NBPG;
- /* calculate HPT size */
- for (hptsize = 256; hptsize < totalphysmem; hptsize *= 2);
- hptsize *= 16; /* sizeof(hpt_entry) */
-
+#if defined(HP7100LC_CPU) || defined(HP7300LC_CPU)
if (pdc_call((iodcio_t)pdc, 0, PDC_TLB, PDC_TLB_INFO, &pdc_hwtlb) &&
!pdc_hwtlb.min_size && !pdc_hwtlb.max_size) {
printf("WARNING: no HPT support, fine!\n");
- mtctl(hptsize - 1, CR_HPTMASK);
- hptsize = 0;
- } else {
+ pmap_hptsize = 0;
+ } else
cpu_features |= HPPA_FTRS_HVT;
-
- if (hptsize > pdc_hwtlb.max_size)
- hptsize = pdc_hwtlb.max_size;
- else if (hptsize < pdc_hwtlb.min_size)
- hptsize = pdc_hwtlb.min_size;
- mtctl(hptsize - 1, CR_HPTMASK);
- }
+#endif
/*
* Deal w/ CPU now
@@ -452,8 +445,13 @@ hppa_init(start)
LDILDO(trap_ep_T_TLB_DIRTY , p->tlbdh);
LDILDO(trap_ep_T_DTLBMISS , p->dtlbh);
LDILDO(trap_ep_T_DTLBMISSNA, p->dtlbnah);
- LDILDO(trap_ep_T_ITLBMISS , p->itlbh);
- LDILDO(trap_ep_T_ITLBMISSNA, p->itlbh);
+ if (pdc_cache.dt_conf.tc_sh) {
+ LDILDO(trap_ep_T_DTLBMISS , p->dtlbh);
+ LDILDO(trap_ep_T_DTLBMISSNA, p->dtlbnah);
+ } else {
+ LDILDO(trap_ep_T_ITLBMISS , p->itlbh);
+ LDILDO(trap_ep_T_ITLBMISSNA, p->itlbnah);
+ }
#undef LDILDO
}
}
@@ -466,9 +464,6 @@ hppa_init(start)
EX_NOWAIT))
panic("cannot reserve main memory");
- vstart = hppa_round_page(start);
- vend = VM_MAX_KERNEL_ADDRESS;
-
/*
* Now allocate kernel dynamic variables
*/
@@ -491,7 +486,7 @@ hppa_init(start)
if (bufpages > nbuf * MAXBSIZE / PAGE_SIZE)
bufpages = nbuf * MAXBSIZE / PAGE_SIZE;
- v = vstart;
+ v1 = v = hppa_round_page(start);
#define valloc(name, type, num) (name) = (type *)v; v = (vaddr_t)((name)+(num))
valloc(buf, struct buf, nbuf);
@@ -514,34 +509,16 @@ hppa_init(start)
#undef valloc
v = hppa_round_page(v);
- bzero ((void *)vstart, (v - vstart));
- vstart = v;
+ bzero ((void *)v1, (v - v1));
/* sets physmem */
- pmap_bootstrap(&vstart, &vend);
+ pmap_bootstrap(v);
/* alloc msgbuf */
if (!(msgbufp = (void *)pmap_steal_memory(MSGBUFSIZE, NULL, NULL)))
panic("cannot allocate msgbuf");
msgbufmapped = 1;
- /* Turn on the HW TLB assist */
- if (hptsize) {
- u_int hpt;
-
- mfctl(CR_VTOP, hpt);
- if ((error = (cpu_hpt_init)(hpt, hptsize)) < 0) {
-#ifdef DEBUG
- printf("WARNING: HPT init error %d\n", error);
-#endif
- } else {
-#ifdef PMAPDEBUG
- printf("HPT: %d entries @ 0x%x\n",
- hptsize / sizeof(struct hpt_entry), hpt);
-#endif
- }
- }
-
/* locate coprocessors and SFUs */
if ((error = pdc_call((iodcio_t)pdc, 0, PDC_COPROC, PDC_COPROC_DFLT,
&pdc_coproc)) < 0)
@@ -630,10 +607,6 @@ cpu_startup()
printf("real mem = %d (%d reserved for PROM, %d used by OpenBSD)\n",
ctob(totalphysmem), ctob(resvmem), ctob(physmem));
- /*
- * Now allocate buffers proper. They are different than the above
- * in that they usually occupy more virtual memory than physical.
- */
size = MAXBSIZE * nbuf;
if (uvm_map(kernel_map, (vaddr_t *) &buffers, round_page(size),
NULL, UVM_UNKNOWN_OFFSET, 0, UVM_MAPFLAG(UVM_PROT_NONE,
@@ -661,9 +634,8 @@ cpu_startup()
if ((pg = uvm_pagealloc(NULL, 0, NULL, 0)) == NULL)
panic("cpu_startup: not enough memory for "
"buffer cache");
- pmap_enter(kernel_map->pmap, curbuf,
- VM_PAGE_TO_PHYS(pg), VM_PROT_READ|VM_PROT_WRITE,
- VM_PROT_READ|VM_PROT_WRITE|PMAP_WIRED);
+ pmap_kenter_pa(curbuf, VM_PAGE_TO_PHYS(pg),
+ VM_PROT_READ|VM_PROT_WRITE);
curbuf += PAGE_SIZE;
curbufsize -= PAGE_SIZE;
}
@@ -890,11 +862,12 @@ btlb_insert(space, va, pa, lenp, prot)
va >>= PGSHIFT;
/* check address alignment */
if (pa & (len - 1))
- printf("WARNING: BTLB address misaligned\n");
+ printf("WARNING: BTLB address misaligned pa=0x%x, len=0x%x\n",
+ pa, len);
/* ensure IO space is uncached */
if ((pa & 0xF0000) == 0xF0000)
- prot |= TLB_UNCACHEABLE;
+ prot |= TLB_UNCACHABLE;
#ifdef BTLBDEBUG
printf("btlb_insert(%d): %x:%x=%x[%x,%x]\n", i, space, va, pa, len, prot);
diff --git a/sys/arch/hppa/hppa/pmap.c b/sys/arch/hppa/hppa/pmap.c
index aeb1b1ff72e..30bb0d71213 100644
--- a/sys/arch/hppa/hppa/pmap.c
+++ b/sys/arch/hppa/hppa/pmap.c
@@ -1,7 +1,7 @@
-/* $OpenBSD: pmap.c,v 1.61 2002/03/14 01:26:31 millert Exp $ */
+/* $OpenBSD: pmap.c,v 1.62 2002/03/15 21:44:18 mickey Exp $ */
/*
- * Copyright (c) 1998-2001 Michael Shalayeff
+ * Copyright (c) 1998-2002 Michael Shalayeff
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -24,104 +24,12 @@
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * MOND, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
- * Copyright 1996 1995 by Open Software Foundation, Inc.
- * All Rights Reserved
- *
- * Permission to use, copy, modify, and distribute this software and
- * its documentation for any purpose and without fee is hereby granted,
- * provided that the above copyright notice appears in all copies and
- * that both the copyright notice and this permission notice appear in
- * supporting documentation.
- *
- * OSF DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE
- * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
- * FOR A PARTICULAR PURPOSE.
- *
- * IN NO EVENT SHALL OSF BE LIABLE FOR ANY SPECIAL, INDIRECT, OR
- * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
- * LOSS OF USE, DATA OR PROFITS, WHETHER IN ACTION OF CONTRACT,
- * NEGLIGENCE, OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
- * WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- */
-/*
- * Mach Operating System
- * Copyright (c) 1990,1991,1992,1993,1994 The University of Utah and
- * the Computer Systems Laboratory (CSL).
- * Copyright (c) 1991,1987 Carnegie Mellon University.
- * All rights reserved.
- *
- * Permission to use, copy, modify and distribute this software and its
- * documentation is hereby granted, provided that both the copyright
- * notice and this permission notice appear in all copies of the
- * software, derivative works or modified versions, and any portions
- * thereof, and that both notices appear in supporting documentation,
- * and that all advertising materials mentioning features or use of
- * this software display the following acknowledgement: ``This product
- * includes software developed by the Computer Systems Laboratory at
- * the University of Utah.''
- *
- * CARNEGIE MELLON, THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF
- * THIS SOFTWARE IN ITS "AS IS" CONDITION, AND DISCLAIM ANY LIABILITY
- * OF ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF
- * THIS SOFTWARE.
- *
- * CSL requests users of this software to return to csl-dist@cs.utah.edu any
- * improvements that they make and grant CSL redistribution rights.
- *
- * Carnegie Mellon requests users of this software to return to
- * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
- * School of Computer Science
- * Carnegie Mellon University
- * Pittsburgh PA 15213-3890
- * any improvements or extensions that they make and grant Carnegie Mellon
- * the rights to redistribute these changes.
- *
- * Utah $Hdr: pmap.c 1.49 94/12/15$
- * Author: Mike Hibler, Bob Wheeler, University of Utah CSL, 10/90
- */
-/*
- * Manages physical address maps for hppa.
- *
- * In addition to hardware address maps, this
- * module is called upon to provide software-use-only
- * maps which may or may not be stored in the same
- * form as hardware maps. These pseudo-maps are
- * used to store intermediate results from copy
- * operations to and from address spaces.
- *
- * Since the information managed by this module is
- * also stored by the logical address mapping module,
- * this module may throw away valid virtual-to-physical
- * mappings at almost any time. However, invalidations
- * of virtual-to-physical mappings must be done as
- * requested.
- *
- * In order to cope with hardware architectures which
- * make virtual-to-physical map invalidates expensive,
- * this module may delay invalidate or reduced protection
- * operations until such time as they are actually
- * necessary. This module is given full information to
- * when physical maps must be made correct.
- *
- */
-/*
- * CAVEATS:
- *
- * PAGE_SIZE must equal NBPG
- * Needs more work for MP support
- * page maps are stored as linear linked lists, some
- * improvement may be achieved should we use smth else
- * protection id (pid) allocation should be done in a pid_t fashion
- * (maybe just use the pid itself)
- * some ppl say, block tlb entries should be maintained somewhere in uvm
- * and be ready for reloads in the fault handler.
- *
* References:
* 1. PA7100LC ERS, Hewlett-Packard, March 30 1999, Public version 1.0
* 2. PA7300LC ERS, Hewlett-Packard, March 18 1996, Version 1.0
@@ -133,6 +41,7 @@
#include <sys/lock.h>
#include <sys/user.h>
#include <sys/proc.h>
+#include <sys/pool.h>
#include <sys/malloc.h>
#include <uvm/uvm.h>
@@ -161,17 +70,18 @@
#define PDB_BITS 0x00000080
#define PDB_COLLECT 0x00000100
#define PDB_PROTECT 0x00000200
-#define PDB_PDRTAB 0x00000400
-#define PDB_VA 0x00000800
+#define PDB_EXTRACT 0x00000400
+#define PDB_VP 0x00000800
#define PDB_PV 0x00001000
#define PDB_PARANOIA 0x00002000
#define PDB_WIRING 0x00004000
-#define PDB_PVDUMP 0x00008000
+#define PDB_PMAP 0x00008000
#define PDB_STEAL 0x00010000
#define PDB_PHYS 0x00020000
+#define PDB_POOL 0x00040000
int pmapdebug = 0
/* | PDB_FOLLOW */
-/* | PDB_VA */
+/* | PDB_VP */
/* | PDB_PV */
/* | PDB_INIT */
/* | PDB_ENTER */
@@ -184,35 +94,35 @@ int pmapdebug = 0
#define DPRINTF(l,s) /* */
#endif
-vaddr_t virtual_steal, virtual_avail, virtual_end;
+vaddr_t virtual_steal, virtual_avail;
+
+#if defined(HP7100LC_CPU) || defined(HP7300LC_CPU)
+int pmap_hptsize = 256; /* patchable */
+#endif
struct pmap kernel_pmap_store;
-pmap_t kernel_pmap;
+int pmap_sid_counter, hppa_sid_max = HPPA_SID_MAX;
boolean_t pmap_initialized = FALSE;
+struct pool pmap_pmap_pool;
+struct pool pmap_pv_pool;
+struct simplelock pvalloc_lock;
-TAILQ_HEAD(, pmap) pmap_freelist; /* list of free pmaps */
-u_int pmap_nfree;
-struct simplelock pmap_freelock; /* and lock */
-
-struct simplelock pmap_lock; /* XXX this is all broken */
-struct simplelock sid_pid_lock; /* pids */
-
-u_int pages_per_vm_page;
-u_int sid_counter;
+void *pmap_pv_page_alloc(struct pool *, int);
+void pmap_pv_page_free(struct pool *, void *);
-TAILQ_HEAD(, pv_page) pv_page_freelist;
-u_int pv_nfree;
-
-#ifdef PMAPDEBUG
-void pmap_hptdump(int sp);
-#endif
+struct pool_allocator pmap_allocator_pv = {
+ pmap_pv_page_alloc, pmap_pv_page_free, 0
+};
-u_int kern_prot[8], user_prot[8];
+u_int hppa_prot[8];
-void pmap_pinit(pmap_t);
#define pmap_sid(pmap, va) \
(((va & 0xc0000000) != 0xc0000000)? pmap->pmap_space : HPPA_SID_KERNEL)
+#define pmap_pvh_attrs(a) \
+ (((a) & PTE_PROT(TLB_DIRTY)) | ((a) ^ PTE_PROT(TLB_REFTRAP)))
+
+#if defined(HP7100LC_CPU) || defined(HP7300LC_CPU)
/*
* This hash function is the one used by the hardware TLB walker on the 7100LC.
*/
@@ -231,497 +141,368 @@ pmap_hash(pa_space_t sp, vaddr_t va)
: "=r" (hpt) : "r" (sp), "r" (va) : "r22", "r23");
return hpt;
}
+#endif
-/*
- * pmap_enter_va(space, va, pv)
- * insert mapping entry into va->pa translation hash table.
- */
static __inline void
-pmap_enter_va(struct pv_entry *pv)
+pmap_sdir_set(pa_space_t space, paddr_t pa)
{
- struct hpt_entry *hpt = pmap_hash(pv->pv_space, pv->pv_va);
-#if defined(PMAPDEBUG) || defined(DIAGNOSTIC)
- struct pv_entry *pvp = hpt->hpt_entry;
-#endif
- DPRINTF(PDB_FOLLOW | PDB_VA,
- ("pmap_enter_va(%x,%x,%p): hpt=%p, pvp=%p\n",
- pv->pv_space, pv->pv_va, pv, hpt, pvp));
-#ifdef DIAGNOSTIC
- while(pvp && (pvp->pv_va != pv->pv_va || pvp->pv_space != pv->pv_space))
- pvp = pvp->pv_hash;
- if (pvp)
- panic("pmap_enter_va: pv_entry is already in hpt_table");
+ paddr_t vtop;
+
+ mfctl(CR_VTOP, vtop);
+#ifdef PMAPDEBUG
+ if (!vtop)
+ panic("pmap_sdir_set: zero vtop");
#endif
- /* we assume that normally there are no duplicate entries
- would be inserted (use DIAGNOSTIC should you want a proof) */
- pv->pv_hash = hpt->hpt_entry;
- hpt->hpt_entry = pv;
+ asm("stwas %0, 0(%1)":: "r" (pa), "r" (vtop + (space << 2)));
}
-/*
- * pmap_find_va(space, va)
- * returns address of the pv_entry correspondent to sp:va
- */
-static __inline struct pv_entry *
-pmap_find_va(pa_space_t space, vaddr_t va)
+static __inline paddr_t
+pmap_sdir_get(pa_space_t space)
{
- struct pv_entry *pvp = pmap_hash(space, va)->hpt_entry;
+ paddr_t vtop, pa;
- DPRINTF(PDB_FOLLOW | PDB_VA, ("pmap_find_va(%x,%x)\n", space, va));
+ mfctl(CR_VTOP, vtop);
+ asm("ldwax,s %2(%1), %0": "=&r" (pa) : "r" (vtop), "r" (space));
- while(pvp && (pvp->pv_va != va || pvp->pv_space != space))
- pvp = pvp->pv_hash;
-
- return pvp;
+ return (pa);
}
-/*
- * Clear the HPT table entry for the corresponding space/offset to reflect
- * the fact that we have possibly changed the mapping, and need to pick
- * up new values from the mapping structure on the next access.
- */
-static __inline void
-pmap_clear_va(pa_space_t space, vaddr_t va)
+static __inline pt_entry_t *
+pmap_pde_get(paddr_t pa, vaddr_t va)
{
- struct hpt_entry *hpt = pmap_hash(space, va);
+ pt_entry_t *pde;
+
+ asm("ldwax,s %2(%1), %0": "=&r" (pde) : "r" (pa), "r" (va >> 22));
- hpt->hpt_valid = 0;
- hpt->hpt_space = -1;
+ return (pde);
}
-/*
- * pmap_remove_va(pv)
- * removes pv_entry from the va->pa translation hash table
- */
static __inline void
-pmap_remove_va(struct pv_entry *pv)
+pmap_pde_set(struct pmap *pm, vaddr_t va, paddr_t ptp)
{
- struct hpt_entry *hpt = pmap_hash(pv->pv_space, pv->pv_va);
- struct pv_entry **pvp = (struct pv_entry **)&hpt->hpt_entry;
-
- DPRINTF(PDB_FOLLOW | PDB_VA,
- ("pmap_remove_va(%p), hpt=%p, pvp=%p\n", pv, hpt, pvp));
-
- while(*pvp && *pvp != pv)
- pvp = &(*pvp)->pv_hash;
- if (*pvp) {
- *pvp = (*pvp)->pv_hash;
- pv->pv_hash = NULL;
- if (hptbtop(pv->pv_va) == hpt->hpt_vpn &&
- pv->pv_space == hpt->hpt_space) {
- hpt->hpt_space = -1;
- hpt->hpt_valid = 0;
- }
- } else {
-#ifdef DIAGNOSTIC
- printf("pmap_remove_va: entry not found\n");
-#endif
- }
+ asm("stwas %0, 0(%1)"
+ :: "r" (ptp), "r" ((paddr_t)pm->pm_pdir + ((va >> 20) & 0xffc)));
}
-/*
- * pmap_insert_pvp(pvp, flag)
- * loads the passed page into pv_entries free list.
- * flag specifies how the page was allocated where possible
- * choices are (0)static, (1)malloc; (probably bogus, but see free_pv)
- */
-static __inline void
-pmap_insert_pvp(struct pv_page *pvp, u_int flag)
+static __inline pt_entry_t *
+pmap_pde_alloc(struct pmap *pm, vaddr_t va, struct vm_page **pdep)
{
- struct pv_entry *pv;
+ struct vm_page *pg;
+ paddr_t pa;
+
+ DPRINTF(PDB_FOLLOW|PDB_VP,
+ ("pmap_pde_alloc(%p, 0x%x, %p)\n", pm, va, pdep));
+
+ va &= PDE_MASK;
+ pg = uvm_pagealloc(&pm->pm_obj, va, NULL,
+ UVM_PGA_USERESERVE|UVM_PGA_ZERO);
+ if (pg == NULL) {
+ /* try to steal somewhere */
+ return (NULL);
+ }
+
+ pa = VM_PAGE_TO_PHYS(pg);
+
+ DPRINTF(PDB_FOLLOW|PDB_VP, ("pmap_pde_alloc: pde %x\n", pa));
- bzero(pvp, sizeof(*pvp));
- for (pv = &pvp->pvp_pv[0]; pv < &pvp->pvp_pv[NPVPPG - 1]; pv++)
- pv->pv_next = pv + 1;
- pvp->pvp_flag = flag;
- pvp->pvp_freelist = &pvp->pvp_pv[0];
- pv_nfree += pvp->pvp_nfree = NPVPPG;
- TAILQ_INSERT_HEAD(&pv_page_freelist, pvp, pvp_list);
+ pg->flags &= ~PG_BUSY; /* never busy */
+ pg->wire_count = 1; /* no mappings yet */
+ pmap_pde_set(pm, va, pa);
+ pm->pm_stats.resident_count++; /* count PTP as resident */
+ pm->pm_ptphint = pg;
+ if (pdep)
+ *pdep = pg;
+ return ((pt_entry_t *)pa);
}
-/*
- * pmap_alloc_pv()
- * allocates the pv_entry from the pv_entries free list.
- * once we've ran out of preallocated pv_entries, nothing
- * can be done, since tlb fault handlers work in phys mode.
- */
-static __inline struct pv_entry *
-pmap_alloc_pv(void)
+static __inline struct vm_page *
+pmap_pde_ptp(struct pmap *pm, pt_entry_t *pde)
{
- struct pv_page *pvp;
- struct pv_entry *pv;
+ paddr_t pa = (paddr_t)pde;
- DPRINTF(PDB_FOLLOW | PDB_PV, ("pmap_alloc_pv()\n"));
+ DPRINTF(PDB_FOLLOW|PDB_PV, ("pmap_pde_ptp(%p, %p)\n", pm, pde));
- if (pv_nfree == 0) {
-#if notyet
- MALLOC(pvp, struct pv_page *, NBPG, M_VMPVENT, M_WAITOK);
-
- if (!pvp)
- panic("pmap_alloc_pv: alloc failed");
- pmap_insert_pvp(pvp, 0);
-#else
- panic("out of pv_entries");
-#endif
- }
+ if (pm->pm_ptphint && VM_PAGE_TO_PHYS(pm->pm_ptphint) == pa)
+ return (pm->pm_ptphint);
- --pv_nfree;
- pvp = TAILQ_FIRST(&pv_page_freelist);
- if (--pvp->pvp_nfree == 0)
- TAILQ_REMOVE(&pv_page_freelist, pvp, pvp_list);
- pv = pvp->pvp_freelist;
-#ifdef DIAGNOSTIC
- if (pv == 0)
- panic("pmap_alloc_pv: pgi_nfree inconsistent");
-#endif
- pvp->pvp_freelist = pv->pv_next;
- pv->pv_next = NULL;
- pv->pv_hash = NULL;
- pv->pv_pmap = NULL;
+ DPRINTF(PDB_FOLLOW|PDB_PV, ("pmap_pde_ptp: lookup 0x%x\n", pa));
- return pv;
+ return (PHYS_TO_VM_PAGE(pa));
}
-/*
- * pmap_free_pv(pv)
- * return pv_entry back into free list.
- * once full page of entries has been freed and that page
- * was allocated dynamically, free the page.
- */
static __inline void
-pmap_free_pv(struct pv_entry *pv)
+pmap_pde_release(struct pmap *pmap, vaddr_t va, struct vm_page *ptp)
{
- struct pv_page *pvp;
-
- DPRINTF(PDB_FOLLOW | PDB_PV, ("pmap_free_pv(%p)\n", pv));
-
- pvp = (struct pv_page *) trunc_page((vaddr_t)pv);
- switch (++pvp->pvp_nfree) {
- case 1:
- TAILQ_INSERT_HEAD(&pv_page_freelist, pvp, pvp_list);
- default:
- pv->pv_next = pvp->pvp_freelist;
- pvp->pvp_freelist = pv;
- ++pv_nfree;
- break;
- case NPVPPG:
- if (!pvp->pvp_flag) {
-#ifdef notyet
- pv_nfree -= NPVPPG - 1;
- TAILQ_REMOVE(&pv_page_freelist, pvp, pvp_list);
- FREE((vaddr_t) pvp, M_VMPVENT);
-#else
- panic("pmap_free_pv: mallocated pv page");
-#endif
- }
- break;
+ ptp->wire_count--;
+ if (ptp->wire_count <= 1) {
+ pmap_pde_set(pmap, va, 0);
+ pmap->pm_stats.resident_count--;
+ if (pmap->pm_ptphint == ptp)
+ pmap->pm_ptphint = TAILQ_FIRST(&pmap->pm_obj.memq);
+ ptp->wire_count = 0;
+ uvm_pagefree(ptp);
}
}
-/*
- * pmap_enter_pv(pmap, va, tlbprot, tlbpage, pv)
- * insert specified mapping into pa->va translation list,
- * where pv specifies the list head (for particular pa)
- */
-static __inline struct pv_entry *
-pmap_enter_pv(pmap_t pmap, vaddr_t va, u_int tlbprot, u_int tlbpage,
- struct pv_entry *pv)
+static __inline pt_entry_t
+pmap_pte_get(pt_entry_t *pde, vaddr_t va)
{
- struct pv_entry *npv, *hpv;
+ pt_entry_t pte;
- if (!pmap_initialized)
- return NULL;
+ asm("ldwax,s %2(%1),%0" : "=&r" (pte)
+ : "r" (pde), "r" ((va >> 12) & 0x3ff));
-#ifdef DEBUG
- if (pv == NULL)
- printf("pmap_enter_pv: zero pv\n");
-#endif
+ return (pte);
+}
- DPRINTF(PDB_FOLLOW | PDB_PV, ("pmap_enter_pv: pv %p: %lx/%p/%p\n",
- pv, pv->pv_va, pv->pv_pmap, pv->pv_next));
+static __inline void
+pmap_pte_set(pt_entry_t *pde, vaddr_t va, pt_entry_t pte)
+{
+ DPRINTF(PDB_FOLLOW|PDB_VP, ("pmap_pte_set(%p, 0x%x, 0x%x)\n",
+ pde, va, pte));
- if (pv->pv_pmap == NULL) {
- /*
- * No entries yet, use header as the first entry
- */
- DPRINTF(PDB_ENTER, ("pmap_enter_pv: no entries yet\n"));
- hpv = npv = NULL;
- } else {
- /*
- * There is at least one other VA mapping this page.
- * Place this entry after the header.
- */
- DPRINTF(PDB_ENTER, ("pmap_enter_pv: adding to the list\n"));
#ifdef PMAPDEBUG
- for (npv = pv; npv; npv = npv->pv_next)
- if (pmap == npv->pv_pmap && va == npv->pv_va) {
- printf("pmap_enter_pv: %p already in pv_tab",
- npv);
- pmap_enter_va(npv); /* HACK UGLY HACK HACK */
- return (npv);
- }
+ if (!pde)
+ panic("pmap_pte_set: zero pde");
+ if (pte && pte < virtual_steal &&
+ hppa_trunc_page(pte) != (paddr_t)&gateway_page)
+ panic("pmap_pte_set: invalid pte");
#endif
- hpv = pv;
- npv = pv->pv_next;
- pv = pmap_alloc_pv();
- }
- pv->pv_va = va;
- pv->pv_pmap = pmap;
- pv->pv_space = pmap->pmap_space;
- pv->pv_tlbprot = tlbprot;
- pv->pv_tlbpage = tlbpage;
- pv->pv_next = npv;
- if (hpv)
- hpv->pv_next = pv;
- pmap_enter_va(pv);
-
- return pv;
+ asm("stwas %0, 0(%1)"
+ :: "r" (pte), "r" ((paddr_t)pde + ((va >> 10) & 0xffc)));
}
-/*
- * pmap_remove_pv(ppv, pv)
- * remove mapping for specified va and pmap, from
- * pa->va translation list, having pv as a list head.
- */
-static __inline void
-pmap_remove_pv(struct pv_entry *ppv, struct pv_entry *pv)
+static __inline pt_entry_t
+pmap_vp_find(struct pmap *pm, vaddr_t va)
{
+ pt_entry_t *pde;
- DPRINTF(PDB_FOLLOW | PDB_PV, ("pmap_remove_pv(%p,%p)\n", ppv, pv));
+ if (!(pde = pmap_pde_get(pm->pm_pdir, va)))
+ return (NULL);
- /*
- * Clear it from cache and TLB
- */
- ficache(ppv->pv_space, ppv->pv_va, PAGE_SIZE);
- pitlb(ppv->pv_space, ppv->pv_va);
+ return (pmap_pte_get(pde, va));
+}
- fdcache(ppv->pv_space, ppv->pv_va, PAGE_SIZE);
- pdtlb(ppv->pv_space, ppv->pv_va);
+void
+pmap_dump_table(pa_space_t space)
+{
+ pa_space_t sp;
- /*
- * If it is the first entry on the list, it is actually
- * in the header and we must copy the following entry up
- * to the header. Otherwise we must search the list for
- * the entry. In either case we free the now unused entry.
- */
- if (ppv == pv) {
- ppv = pv->pv_next;
- pmap_remove_va(pv);
- if (ppv) {
- ppv->pv_tlbprot |= pv->pv_tlbprot &
- (TLB_DIRTY | TLB_REF);
- *pv = *ppv;
- pmap_free_pv(ppv);
- } else
- pv->pv_pmap = NULL;
- } else {
- for (; pv && pv->pv_next != ppv; pv = pv->pv_next)
- ;
-
- if (pv) {
- pv->pv_tlbprot |= ppv->pv_tlbprot &
- (TLB_DIRTY | TLB_REF);
- pv->pv_next = ppv->pv_next;
- pmap_remove_va(ppv);
- pmap_free_pv(ppv);
- } else {
-#ifdef DEBUG
- panic("pmap_remove_pv: npv == NULL\n");
-#endif
+ for (sp = 0; sp <= hppa_sid_max; sp++) {
+ paddr_t pa;
+ pt_entry_t *pde, pte;
+ vaddr_t va, pdemask = virtual_avail + 1;
+
+ if (((int)space >= 0 && sp != space) ||
+ !(pa = pmap_sdir_get(sp)))
+ continue;
+
+ for (va = virtual_avail; va < VM_MAX_KERNEL_ADDRESS;
+ va += PAGE_SIZE) {
+ if (pdemask != (va & PDE_MASK)) {
+ pdemask = va & PDE_MASK;
+ if (!(pde = pmap_pde_get(pa, va))) {
+ va += ~PDE_MASK + 1 - PAGE_SIZE;
+ continue;
+ }
+ printf("%x:0x%08x:\n", sp, pde);
+ }
+
+ if (!(pte = pmap_pte_get(pde, va)))
+ continue;
+
+ printf("0x%08x-0x%08x\n", va, pte);
}
}
}
-/*
- * pmap_find_pv(pa)
- * returns head of the pa->va translation list for specified pa.
- */
static __inline struct pv_entry *
-pmap_find_pv(paddr_t pa)
+pmap_pv_alloc(void)
{
- int bank, off;
+ struct pv_entry *pv;
- if ((bank = vm_physseg_find(atop(pa), &off)) != -1) {
- DPRINTF(PDB_PV, ("pmap_find_pv(%x): %d:%d\n", pa, bank, off));
- return &vm_physmem[bank].pmseg.pvent[off];
- } else
- return NULL;
+ DPRINTF(PDB_FOLLOW|PDB_PV, ("pmap_pv_alloc()\n"));
+
+ simple_lock(&pvalloc_lock);
+
+ pv = pool_get(&pmap_pv_pool, 0);
+
+ simple_unlock(&pvalloc_lock);
+
+ DPRINTF(PDB_FOLLOW|PDB_PV, ("pmap_pv_alloc: %p\n", pv));
+
+ return (pv);
}
-/*
- * Flush caches and TLB entries refering to physical page pa. If cmp is
- * non-zero, we do not affect the cache or TLB entires for that mapping.
- */
static __inline void
-pmap_clear_pv(paddr_t pa, struct pv_entry *cpv)
+pmap_pv_free(struct pv_entry *pv)
{
- struct pv_entry *pv;
+ simple_lock(&pvalloc_lock);
- DPRINTF(PDB_FOLLOW | PDB_PV, ("pmap_clear_pv(%x,%p)\n", pa, cpv));
+ if (pv->pv_ptp)
+ pmap_pde_release(pv->pv_pmap, pv->pv_va, pv->pv_ptp);
- if (!(pv = pmap_find_pv(pa)) || !pv->pv_pmap)
- return;
+ pool_put(&pmap_pv_pool, pv);
- for (; pv; pv = pv->pv_next) {
- if (pv == cpv)
- continue;
- DPRINTF(PDB_PV,
- ("pmap_clear_pv: %x:%x\n", pv->pv_space, pv->pv_va));
- /*
- * have to clear the icache first since fic uses the dtlb.
- */
- ficache(pv->pv_space, pv->pv_va, NBPG);
- pitlb(pv->pv_space, pv->pv_va);
-
- fdcache(pv->pv_space, pv->pv_va, NBPG);
- pdtlb(pv->pv_space, pv->pv_va);
-
- pmap_clear_va(pv->pv_space, pv->pv_va);
- }
+ simple_unlock(&pvalloc_lock);
+}
+
+static __inline void
+pmap_pv_enter(struct pv_head *pvh, struct pv_entry *pve, struct pmap *pm,
+ vaddr_t va, struct vm_page *pdep)
+{
+ DPRINTF(PDB_FOLLOW|PDB_PV, ("pmap_pv_enter(%p, %p, %p, 0x%x, %p)\n",
+ pvh, pve, pm, va, pdep));
+
+ pve->pv_pmap = pm;
+ pve->pv_va = va;
+ pve->pv_ptp = pdep;
+ simple_lock(&pvh->pvh_lock); /* lock pv_head */
+ pve->pv_next = pvh->pvh_list;
+ pvh->pvh_list = pve;
+ simple_unlock(&pvh->pvh_lock); /* unlock, done! */
+}
+
+static __inline struct pv_entry *
+pmap_pv_remove(struct pv_head *pvh, struct pmap *pmap, vaddr_t va)
+{
+ struct pv_entry **pve, *pv;
+
+ for(pv = *(pve = &pvh->pvh_list); pv; pv = *(pve = &(*pve)->pv_next))
+ if (pv->pv_pmap == pmap && pv->pv_va == va) {
+ *pve = pv->pv_next;
+ break;
+ }
+ return (pv);
}
-/*
- * Bootstrap the system enough to run with virtual memory.
- * Map the kernel's code and data, and allocate the system page table.
- * Called with mapping OFF.
- *
- * Parameters:
- * vstart PA of first available physical page
- * vend PA of last available physical page
- */
void
-pmap_bootstrap(vstart, vend)
- vaddr_t *vstart;
- vaddr_t *vend;
+pmap_bootstrap(vstart)
+ vaddr_t vstart;
{
- extern int maxproc; /* used to estimate pv_entries pool size */
- extern u_int totalphysmem;
- vaddr_t addr;
+ extern char etext;
+ extern u_int totalphysmem, *ie_mem;
+ vaddr_t addr = hppa_round_page(vstart), t;
vsize_t size;
- struct pv_page *pvp;
- struct hpt_entry *hptp;
+#if 0 && (defined(HP7100LC_CPU) || defined(HP7300LC_CPU))
+ struct vp_entry *hptp;
+#endif
+ struct pmap *kpm;
int i;
- DPRINTF(PDB_FOLLOW, ("pmap_bootstrap(%p, %p)\n", vstart, vend));
+ DPRINTF(PDB_FOLLOW|PDB_INIT, ("pmap_bootstrap(0x%x)\n", vstart));
uvm_setpagesize();
- pages_per_vm_page = PAGE_SIZE / NBPG;
- /* XXX for now */
- if (pages_per_vm_page != 1)
- panic("HPPA page != VM page");
-
- kern_prot[VM_PROT_NONE | VM_PROT_NONE | VM_PROT_NONE] =TLB_AR_NA;
- kern_prot[VM_PROT_READ | VM_PROT_NONE | VM_PROT_NONE] =TLB_AR_KR;
- kern_prot[VM_PROT_NONE | VM_PROT_WRITE | VM_PROT_NONE] =TLB_AR_KRW;
- kern_prot[VM_PROT_READ | VM_PROT_WRITE | VM_PROT_NONE] =TLB_AR_KRW;
- kern_prot[VM_PROT_NONE | VM_PROT_NONE | VM_PROT_EXECUTE] =TLB_AR_KRX;
- kern_prot[VM_PROT_READ | VM_PROT_NONE | VM_PROT_EXECUTE] =TLB_AR_KRX;
- kern_prot[VM_PROT_NONE | VM_PROT_WRITE | VM_PROT_EXECUTE] =TLB_AR_KRWX;
- kern_prot[VM_PROT_READ | VM_PROT_WRITE | VM_PROT_EXECUTE] =TLB_AR_KRWX;
-
- user_prot[VM_PROT_NONE | VM_PROT_NONE | VM_PROT_NONE] =TLB_AR_NA;
- user_prot[VM_PROT_READ | VM_PROT_NONE | VM_PROT_NONE] =TLB_AR_UR;
- user_prot[VM_PROT_NONE | VM_PROT_WRITE | VM_PROT_NONE] =TLB_AR_URW;
- user_prot[VM_PROT_READ | VM_PROT_WRITE | VM_PROT_NONE] =TLB_AR_URW;
- user_prot[VM_PROT_NONE | VM_PROT_NONE | VM_PROT_EXECUTE] =TLB_AR_URX;
- user_prot[VM_PROT_READ | VM_PROT_NONE | VM_PROT_EXECUTE] =TLB_AR_URX;
- user_prot[VM_PROT_NONE | VM_PROT_WRITE | VM_PROT_EXECUTE] =TLB_AR_URWX;
- user_prot[VM_PROT_READ | VM_PROT_WRITE | VM_PROT_EXECUTE] =TLB_AR_URWX;
+ hppa_prot[VM_PROT_NONE | VM_PROT_NONE | VM_PROT_NONE] =TLB_AR_NA;
+ hppa_prot[VM_PROT_READ | VM_PROT_NONE | VM_PROT_NONE] =TLB_AR_R;
+ hppa_prot[VM_PROT_NONE | VM_PROT_WRITE | VM_PROT_NONE] =TLB_AR_RW;
+ hppa_prot[VM_PROT_READ | VM_PROT_WRITE | VM_PROT_NONE] =TLB_AR_RW;
+ hppa_prot[VM_PROT_NONE | VM_PROT_NONE | VM_PROT_EXECUTE] =TLB_AR_RX;
+ hppa_prot[VM_PROT_READ | VM_PROT_NONE | VM_PROT_EXECUTE] =TLB_AR_RX;
+ hppa_prot[VM_PROT_NONE | VM_PROT_WRITE | VM_PROT_EXECUTE] =TLB_AR_RWX;
+ hppa_prot[VM_PROT_READ | VM_PROT_WRITE | VM_PROT_EXECUTE] =TLB_AR_RWX;
/*
* Initialize kernel pmap
*/
- kernel_pmap = &kernel_pmap_store;
-#if NCPUS > 1
- lock_init(&pmap_lock, FALSE, ETAP_VM_PMAP_SYS, ETAP_VM_PMAP_SYS_I);
-#endif /* NCPUS > 1 */
- simple_lock_init(&kernel_pmap->pmap_lock);
- simple_lock_init(&pmap_freelock);
- simple_lock_init(&sid_pid_lock);
-
- kernel_pmap->pmap_refcnt = 1;
- kernel_pmap->pmap_space = HPPA_SID_KERNEL;
+ kpm = &kernel_pmap_store;
+ bzero(kpm, sizeof(*kpm));
+ simple_lock_init(&kpm->pm_obj.vmobjlock);
+ kpm->pm_obj.pgops = NULL;
+ TAILQ_INIT(&kpm->pm_obj.memq);
+ kpm->pm_obj.uo_npages = 0;
+ kpm->pm_obj.uo_refs = 1;
+ kpm->pm_space = HPPA_SID_KERNEL;
+ kpm->pm_pid = HPPA_PID_KERNEL;
+ kpm->pm_pdir_pg = NULL;
+ kpm->pm_pdir = addr;
+ addr += PAGE_SIZE;
+ bzero((void *)addr, PAGE_SIZE);
+ fdcache(HPPA_SID_KERNEL, addr, PAGE_SIZE);
/*
* Allocate various tables and structures.
*/
- addr = hppa_round_page(*vstart);
- virtual_end = *vend;
- pvp = (struct pv_page *)addr;
-
- mfctl(CR_HPTMASK, size);
- addr = (addr + size) & ~(size);
-
- DPRINTF(PDB_INIT, ("pmap_bootstrap: allocating %d pv_pages\n",
- (struct pv_page *)addr - pvp));
-
- TAILQ_INIT(&pv_page_freelist);
- for (; pvp + 1 <= (struct pv_page *)addr; pvp++)
- pmap_insert_pvp(pvp, 1);
-
- /* Allocate the HPT */
- for (hptp = (struct hpt_entry *)addr;
- ((u_int)hptp - addr) <= size; hptp++) {
- hptp->hpt_valid = 0;
- hptp->hpt_vpn = 0;
- hptp->hpt_space = -1;
- hptp->hpt_tlbpage = 0;
- hptp->hpt_tlbprot = 0;
- hptp->hpt_entry = NULL;
- }
-
- DPRINTF(PDB_INIT, ("hpt_table: 0x%x @ %p\n", size + 1, addr));
- /* load cr25 with the address of the HPT table
- NB: It sez CR_VTOP, but we (and the TLB handlers) know better ... */
mtctl(addr, CR_VTOP);
- addr += size + 1;
+ bzero((void *)addr, (hppa_sid_max + 1) * 4);
+ fdcache(HPPA_SID_KERNEL, addr, (hppa_sid_max + 1) * 4);
+ printf("vtop: 0x%x @ 0x%x\n", (hppa_sid_max + 1) * 4, addr);
+ addr += (hppa_sid_max + 1) * 4;
+ pmap_sdir_set(HPPA_SID_KERNEL, kpm->pm_pdir);
+
+ ie_mem = (u_int *)addr;
+ addr += 0x8000;
+
+#if 0 && (defined(HP7100LC_CPU) || defined(HP7300LC_CPU))
+ if (pmap_hptsize && (cpu_type == hpcxl || cpu_type == hpcxl2)) {
+ int error;
+
+ if (pmap_hptsize > pdc_hwtlb.max_size)
+ pmap_hptsize = pdc_hwtlb.max_size;
+ else if (pmap_hptsize < pdc_hwtlb.min_size)
+ pmap_hptsize = pdc_hwtlb.min_size;
+
+ size = pmap_hptsize * sizeof(*hptp);
+ bzero((void *)addr, size);
+ /* Allocate the HPT */
+ for (hptp = (struct vp_entry *)addr, i = pmap_hptsize; i--;)
+ hptp[i].vp_tag = 0xffff;
+
+ DPRINTF(PDB_INIT, ("hpt_table: 0x%x @ %p\n", size, addr));
+
+ if ((error = (cpu_hpt_init)(addr, size)) < 0) {
+ printf("WARNING: HPT init error %d\n", error);
+ } else {
+ printf("HPT: %d entries @ 0x%x\n",
+ pmap_hptsize / sizeof(struct vp_entry), addr);
+ }
- /*
- * we know that btlb_insert() will round it up to the next
- * power of two at least anyway
- */
- for (physmem = 1; physmem < btoc(addr); physmem *= 2);
+ /* TODO find a way to avoid using cr*, use cpu regs instead */
+ mtctl(addr, CR_VTOP);
+ mtctl(size - 1, CR_HPTMASK);
+ addr += size;
+ }
+#endif /* HP7100LC_CPU | HP7300LC_CPU */
/* map the kernel space, which will give us virtual_avail */
- *vstart = hppa_round_page(addr + (totalphysmem - physmem) *
- (sizeof(struct pv_entry) * maxproc / 8 +
- sizeof(struct vm_page)));
+ vstart = hppa_round_page(addr + (totalphysmem - (atop(addr))) *
+ (16 + sizeof(struct pv_head) + sizeof(struct vm_page)));
/* XXX PCXS needs two separate inserts in separate btlbs */
- if (btlb_insert(HPPA_SID_KERNEL, 0, 0, vstart,
+ t = (vaddr_t)&etext;
+ if (btlb_insert(HPPA_SID_KERNEL, 0, 0, &t,
pmap_sid2pid(HPPA_SID_KERNEL) |
- pmap_prot(kernel_pmap, VM_PROT_ALL)) < 0)
+ pmap_prot(pmap_kernel(), VM_PROT_READ|VM_PROT_EXECUTE)) < 0)
+ panic("pmap_bootstrap: cannot block map kernel text");
+ t = vstart - (vaddr_t)&etext;
+ if (btlb_insert(HPPA_SID_KERNEL, (vaddr_t)&etext, (vaddr_t)&etext, &t,
+ pmap_sid2pid(HPPA_SID_KERNEL) | TLB_UNCACHABLE |
+ pmap_prot(pmap_kernel(), VM_PROT_ALL)) < 0)
panic("pmap_bootstrap: cannot block map kernel");
- virtual_avail = *vstart;
+ vstart = (vaddr_t)&etext + t;
+ virtual_avail = vstart;
+ kpm->pm_stats.wired_count = kpm->pm_stats.resident_count =
+ physmem = atop(vstart);
/*
* NOTE: we no longer trash the BTLB w/ unused entries,
* lazy map only needed pieces (see bus_mem_add_mapping() for refs).
*/
- size = hppa_round_page(sizeof(struct pv_entry) * totalphysmem);
+ addr = hppa_round_page(addr);
+ size = hppa_round_page(sizeof(struct pv_head) * totalphysmem);
bzero ((caddr_t)addr, size);
- DPRINTF(PDB_INIT, ("pv_array: 0x%x @ 0x%x\n", size, addr));
+ DPRINTF(PDB_INIT, ("pmseg.pvent: 0x%x @ 0x%x\n", size, addr));
+ /* XXX we might need to split this for isa */
virtual_steal = addr + size;
i = atop(virtual_avail - virtual_steal);
uvm_page_physload(0, totalphysmem + i,
atop(virtual_avail), totalphysmem + i, VM_FREELIST_DEFAULT);
/* we have only one initial phys memory segment */
- vm_physmem[0].pmseg.pvent = (struct pv_entry *)addr;
- /* mtctl(addr, CR_PSEG); */
-
- /* here will be a hole due to the kernel memory alignment
- and we use it for pmap_steal_memory */
-}
-
-void
-pmap_virtual_space(vaddr_t *vstartp, vaddr_t *vendp)
-{
- *vstartp = virtual_avail;
- *vendp = virtual_end;
+ vm_physmem[0].pmseg.pvhead = (struct pv_head *)addr;
}
/*
@@ -738,13 +519,13 @@ pmap_steal_memory(size, startp, endp)
{
vaddr_t va;
- DPRINTF(PDB_FOLLOW,
+ DPRINTF(PDB_FOLLOW|PDB_STEAL,
("pmap_steal_memory(%x, %x, %x)\n", size, startp, endp));
if (startp)
*startp = virtual_avail;
if (endp)
- *endp = virtual_end;
+ *endp = VM_MAX_KERNEL_ADDRESS;
size = hppa_round_page(size);
if (size <= virtual_avail - virtual_steal) {
@@ -765,38 +546,20 @@ pmap_steal_memory(size, startp, endp)
return va;
}
-/*
- * Finishes the initialization of the pmap module.
- * This procedure is called from vm_mem_init() in vm/vm_init.c
- * to initialize any remaining data structures that the pmap module
- * needs to map virtual memory (VM is already ON).
- */
void
pmap_init()
{
- struct pv_page *pvp;
+ DPRINTF(PDB_FOLLOW|PDB_INIT, ("pmap_init()\n"));
-#ifdef PMAPDEBUG
- int opmapdebug = pmapdebug;
- DPRINTF(PDB_FOLLOW, ("pmap_init()\n"));
- pmapdebug = 0;
-#endif
+ pool_init(&pmap_pmap_pool, sizeof(struct pmap), 0, 0, 0, "pmappl",
+ &pool_allocator_nointr);
+ pool_init(&pmap_pv_pool, sizeof(struct pv_entry), 0, 0, 0, "pmappv",
+ &pmap_allocator_pv);
+ /* depleet the steal area */
+ pool_prime(&pmap_pv_pool, (virtual_avail - virtual_steal) / PAGE_SIZE *
+ pmap_pv_pool.pr_itemsperpage);
- /* allocate the rest of the steal area for pv_pages */
-#ifdef PMAPDEBUG
- printf("pmap_init: %d pv_pages @ %x allocated\n",
- (virtual_avail - virtual_steal) / sizeof(struct pv_page),
- virtual_steal);
-#endif
- while ((pvp = (struct pv_page *)
- pmap_steal_memory(sizeof(*pvp), NULL, NULL)))
- pmap_insert_pvp(pvp, 1);
-
-#ifdef PMAPDEBUG
- pmapdebug = opmapdebug /* | PDB_VA | PDB_PV */;
-#endif
- TAILQ_INIT(&pmap_freelist);
- sid_counter = HPPA_SID_KERNEL + 1;
+ simple_lock_init(&pvalloc_lock);
pmap_initialized = TRUE;
@@ -807,465 +570,497 @@ pmap_init()
*
* no spls since no interrupts.
*/
- pmap_enter_pv(pmap_kernel(), SYSCALLGATE, TLB_GATE_PROT,
- tlbbtop((paddr_t)&gateway_page),
- pmap_find_pv((paddr_t)&gateway_page));
-}
+ {
+ pt_entry_t *pde;
-/*
- * Initialize a preallocated and zeroed pmap structure,
- * such as one in a vmspace structure.
- */
-void
-pmap_pinit(pmap)
- pmap_t pmap;
-{
- pa_space_t sid;
- int s;
-
- DPRINTF(PDB_FOLLOW, ("pmap_pinit(%p)\n", pmap));
-
- if (!(sid = pmap->pmap_space)) {
+ if (!(pde = pmap_pde_get(pmap_kernel()->pm_pdir, SYSCALLGATE)) &&
+ !(pde = pmap_pde_alloc(pmap_kernel(), SYSCALLGATE, NULL)))
+ panic("pmap_init: cannot allocate pde");
- /*
- * Allocate space and protection IDs for the pmap.
- * If all are allocated, there is nothing we can do.
- */
- s = splimp();
- if (sid_counter < HPPA_SID_MAX) {
- sid = sid_counter;
- sid_counter++;
- } else
- sid = 0;
- splx(s);
-
- if (sid == 0)
- panic("no more space ids\n");
-
- simple_lock_init(&pmap->pmap_lock);
+ pmap_pte_set(pde, SYSCALLGATE, (paddr_t)&gateway_page |
+ PTE_PROT(TLB_GATE_PROT));
}
-
- s = splimp();
- pmap->pmap_space = sid;
- pmap->pmap_refcnt = 1;
- pmap->pmap_stats.resident_count = 0;
- pmap->pmap_stats.wired_count = 0;
- splx(s);
}
-/*
- * pmap_create()
- *
- * Create and return a physical map.
- * the map is an actual physical map, and may be referenced by the hardware.
- */
-pmap_t
+struct pmap *
pmap_create()
{
- register pmap_t pmap;
- int s;
+ struct pmap *pmap;
+ pa_space_t space;
- DPRINTF(PDB_FOLLOW, ("pmap_create()\n"));
+ DPRINTF(PDB_FOLLOW|PDB_PMAP, ("pmap_create()\n"));
+ pmap = pool_get(&pmap_pmap_pool, PR_WAITOK);
- /*
- * If there is a pmap in the pmap free list, reuse it.
- */
- s = splimp();
- if (pmap_nfree) {
- pmap = pmap_freelist.tqh_first;
- TAILQ_REMOVE(&pmap_freelist, pmap, pmap_list);
- pmap_nfree--;
- splx(s);
- } else {
- splx(s);
- MALLOC(pmap, struct pmap *, sizeof(*pmap), M_VMMAP, M_NOWAIT);
- if (pmap == NULL)
- return NULL;
- bzero(pmap, sizeof(*pmap));
- }
+ simple_lock_init(&pmap->pm_obj.vmobjlock);
+ pmap->pm_obj.pgops = NULL; /* currently not a mappable object */
+ TAILQ_INIT(&pmap->pm_obj.memq);
+ pmap->pm_obj.uo_npages = 0;
+ pmap->pm_obj.uo_refs = 1;
+ pmap->pm_stats.wired_count = 0;
+ pmap->pm_stats.resident_count = 1;
- pmap_pinit(pmap);
+ if (pmap_sid_counter >= hppa_sid_max) {
+ /* collect some */
+ panic("pmap_create: outer space");
+ } else
+ space = ++pmap_sid_counter;
+
+ pmap->pm_space = space;
+ pmap->pm_pid = (space + 1) << 1;
+ pmap->pm_pdir_pg = uvm_pagealloc(NULL, 0, NULL,
+ UVM_PGA_USERESERVE|UVM_PGA_ZERO);
+ if (!pmap->pm_pdir_pg)
+ panic("pmap_create: no pages");
+ pmap->pm_pdir = VM_PAGE_TO_PHYS(pmap->pm_pdir_pg);
+
+ pmap_sdir_set(space, pmap->pm_pdir);
return(pmap);
}
-/*
- * pmap_destroy(pmap)
- * Gives up a reference to the specified pmap. When the reference count
- * reaches zero the pmap structure is added to the pmap free list.
- * Should only be called if the map contains no valid mappings.
- */
void
pmap_destroy(pmap)
- pmap_t pmap;
+ struct pmap *pmap;
{
- int ref_count;
- int s;
-
- DPRINTF(PDB_FOLLOW, ("pmap_destroy(%p)\n", pmap));
-
- if (!pmap)
- return;
+ struct vm_page *pg;
+ pa_space_t space;
+ int refs;
- s = splimp();
+ DPRINTF(PDB_FOLLOW|PDB_PMAP, ("pmap_destroy(%p)\n", pmap));
- ref_count = --pmap->pmap_refcnt;
+ simple_lock(&pmap->pm_obj.vmobjlock);
+ refs = --pmap->pm_obj.uo_refs;
+ simple_unlock(&pmap->pm_obj.vmobjlock);
- if (ref_count < 0)
- panic("pmap_destroy(): ref_count < 0");
- if (!ref_count) {
- assert(pmap->pmap_stats.resident_count == 0);
+ if (refs > 0)
+ return;
- /*
- * Add the pmap to the pmap free list
- * We cannot free() disposed pmaps because of
- * PID shortage of 2^16
- * (do some random pid allocation later)
- */
- TAILQ_INSERT_HEAD(&pmap_freelist, pmap, pmap_list);
- pmap_nfree++;
+ TAILQ_FOREACH(pg, &pmap->pm_obj.memq, listq) {
+#ifdef DIAGNOSTIC
+ if (pg->flags & PG_BUSY)
+ panic("pmap_release: busy page table page");
+#endif
+ pg->wire_count = 0;
+ uvm_pagefree(pg);
}
- splx(s);
+
+ uvm_pagefree(pmap->pm_pdir_pg);
+ pmap->pm_pdir_pg = NULL; /* XXX cache it? */
+ pmap_sdir_set(space, 0);
+ pool_put(&pmap_pmap_pool, pmap);
}
+
/*
- * pmap_enter(pmap, va, pa, prot, flags)
- * Create a translation for the virtual address (va) to the physical
- * address (pa) in the pmap with the protection requested. If the
- * translation is wired then we can not allow a page fault to occur
- * for this mapping.
+ * Add a reference to the specified pmap.
*/
+void
+pmap_reference(pmap)
+ struct pmap *pmap;
+{
+ DPRINTF(PDB_FOLLOW|PDB_PMAP, ("pmap_reference(%p)\n", pmap));
+
+ simple_lock(&pmap->pm_obj.vmobjlock);
+ pmap->pm_obj.uo_refs++;
+ simple_unlock(&pmap->pm_obj.vmobjlock);
+}
+
+void
+pmap_collect(struct pmap *pmap)
+{
+ DPRINTF(PDB_FOLLOW|PDB_PMAP, ("pmap_collect(%p)\n", pmap));
+ /* nothing yet */
+}
+
int
pmap_enter(pmap, va, pa, prot, flags)
- pmap_t pmap;
+ struct pmap *pmap;
vaddr_t va;
paddr_t pa;
vm_prot_t prot;
int flags;
{
- register struct pv_entry *pv, *ppv;
- u_int tlbpage, tlbprot;
- pa_space_t space;
- boolean_t waswired;
+ pt_entry_t *pde, pte;
+ struct vm_page *ptp = NULL;
+ struct pv_head *pvh;
+ struct pv_entry *pve;
+ int bank, off;
boolean_t wired = (flags & PMAP_WIRED) != 0;
- int s;
- pa = hppa_trunc_page(pa);
- va = hppa_trunc_page(va);
- space = pmap_sid(pmap, va);
-#ifdef PMAPDEBUG
- if (pmapdebug & PDB_FOLLOW &&
- (!pmap_initialized || pmapdebug & PDB_ENTER))
- printf("pmap_enter(%p, %x, %x, %x, %swired)\n", pmap, va, pa,
- prot, wired? "" : "un");
-#endif
+ DPRINTF(PDB_FOLLOW|PDB_ENTER,
+ ("pmap_enter(%p, 0x%x, 0x%x, 0x%x, 0x%x)\n",
+ pmap, va, pa, prot, flags));
- s = splimp(); /* are we already high enough? XXX */
+ simple_lock(&pmap->pm_obj.vmobjlock);
- if (!(pv = pmap_find_pv(pa)))
- panic("pmap_enter: pmap_find_pv failed");
+ if (!(pde = pmap_pde_get(pmap->pm_pdir, va)) &&
+ !(pde = pmap_pde_alloc(pmap, va, &ptp))) {
+ if (flags & PMAP_CANFAIL)
+ return (KERN_RESOURCE_SHORTAGE);
- tlbpage = tlbbtop(pa);
- tlbprot = TLB_UNCACHEABLE | pmap_prot(pmap, prot) | pmap_sid2pid(space);
+ panic("pmap_kenter: cannot allocate pde");
+ }
- if (!(ppv = pmap_find_va(space, va))) {
- /*
- * Mapping for this virtual address doesn't exist.
- * Enter a new mapping.
- */
- DPRINTF(PDB_ENTER, ("pmap_enter: new mapping\n"));
- pv = pmap_enter_pv(pmap, va, tlbprot, tlbpage, pv);
- pmap->pmap_stats.resident_count++;
+ if (!ptp)
+ ptp = pmap_pde_ptp(pmap, pde);
- } else {
+ if ((pte = pmap_pte_get(pde, va))) {
- /* see if we are remapping the page to another PA */
- if (ppv->pv_tlbpage != tlbpage) {
- DPRINTF(PDB_ENTER, ("pmap_enter: moving pa %x -> %x\n",
- ppv->pv_tlbpage, tlbpage));
- /* update tlbprot to avoid extra subsequent fault */
- pmap_remove_pv(ppv, pmap_find_pv(tlbptob(ppv->pv_tlbpage)));
- pv = pmap_enter_pv(pmap, va, tlbprot, tlbpage, pv);
- } else {
- /* We are just changing the protection. */
- DPRINTF(PDB_ENTER, ("pmap_enter: changing %b->%b\n",
- ppv->pv_tlbprot, TLB_BITS, tlbprot, TLB_BITS));
- pv = ppv;
- ppv->pv_tlbprot = (tlbprot & ~TLB_PID_MASK) |
- (ppv->pv_tlbprot & ~(TLB_AR_MASK|TLB_PID_MASK));
- pmap_clear_pv(pa, NULL);
+ DPRINTF(PDB_ENTER,
+ ("pmap_enter: remapping 0x%x -> 0x%x\n", pte, pa));
+
+ if (pte & PTE_PROT(TLB_EXECUTE))
+ ficache(pmap->pm_space, va, NBPG);
+ pitlb(pmap->pm_space, va);
+ fdcache(pmap->pm_space, va, NBPG);
+ pdtlb(pmap->pm_space, va);
+
+ if (wired && !(pte & PTE_PROT(TLB_WIRED)) == 0)
+ pmap->pm_stats.wired_count++;
+ else if (!wired && (pte & PTE_PROT(TLB_WIRED)) != 0)
+ pmap->pm_stats.wired_count--;
+
+ if (PTE_PAGE(pte) == pa) {
+ DPRINTF(PDB_FOLLOW|PDB_ENTER,
+ ("pmap_enter: same page\n"));
+ goto enter;
}
+
+ bank = vm_physseg_find(atop(PTE_PAGE(pte)), &off);
+ if (bank != -1) {
+ pvh = &vm_physmem[bank].pmseg.pvhead[off];
+ simple_lock(&pvh->pvh_lock);
+ pve = pmap_pv_remove(pvh, pmap, va);
+ pvh->pvh_attrs |= pmap_pvh_attrs(pte);
+ simple_unlock(&pvh->pvh_lock);
+ } else
+ pve = NULL;
+ } else {
+ DPRINTF(PDB_ENTER,
+ ("pmap_enter: new mapping 0x%x -> 0x%x\n", va, pa));
+ pte = PTE_PROT(TLB_REFTRAP);
+ pve = NULL;
+ pmap->pm_stats.resident_count++;
+ if (wired)
+ pmap->pm_stats.wired_count++;
+ if (ptp)
+ ptp->wire_count++;
}
- /*
- * Add in software bits and adjust statistics
- */
- waswired = pv->pv_tlbprot & TLB_WIRED;
- if (wired && !waswired) {
- pv->pv_tlbprot |= TLB_WIRED;
- pmap->pmap_stats.wired_count++;
- } else if (!wired && waswired) {
- pv->pv_tlbprot &= ~TLB_WIRED;
- pmap->pmap_stats.wired_count--;
+ bank = vm_physseg_find(atop(pa), &off);
+ if (pmap_initialized && bank != -1) {
+ if (!pve && !(pve = pmap_pv_alloc())) {
+ if (flags & PMAP_CANFAIL) {
+ simple_unlock(&pmap->pm_obj.vmobjlock);
+ return (KERN_RESOURCE_SHORTAGE);
+ }
+ panic("pmap_enter: no pv entries available");
+ }
+ pvh = &vm_physmem[bank].pmseg.pvhead[off];
+ pmap_pv_enter(pvh, pve, pmap, va, ptp);
+ } else {
+ pvh = NULL;
+ if (pve)
+ pmap_pv_free(pve);
}
- splx(s);
- simple_unlock(&pmap->pmap_lock);
- DPRINTF(PDB_ENTER, ("pmap_enter: leaving\n"));
+enter:
+ /* preserve old ref & mod */
+ pte = pa | PTE_PROT(pmap_prot(pmap, prot)) |
+ (pte & PTE_PROT(TLB_UNCACHABLE|TLB_DIRTY|TLB_REFTRAP));
+ if (wired)
+ pte |= PTE_PROT(TLB_WIRED);
+ pmap_pte_set(pde, va, pte);
+
+ simple_unlock(&pmap->pm_obj.vmobjlock);
+
+ DPRINTF(PDB_FOLLOW, ("pmap_enter: leaving\n"));
return (0);
}
-/*
- * pmap_remove(pmap, sva, eva)
- * unmaps all virtual addresses v in the virtual address
- * range determined by [sva, eva) and pmap.
- * sva and eva must be on machine independent page boundaries and
- * sva must be less than or equal to eva.
- */
void
pmap_remove(pmap, sva, eva)
- register pmap_t pmap;
- register vaddr_t sva;
- register vaddr_t eva;
+ struct pmap *pmap;
+ vaddr_t sva;
+ vaddr_t eva;
{
- register struct pv_entry *pv;
- register pa_space_t space;
- int s;
+ struct pv_head *pvh;
+ struct pv_entry *pve;
+ pt_entry_t *pde, pte;
+ int bank, off;
+ u_int pdemask;
- DPRINTF(PDB_FOLLOW, ("pmap_remove(%p, %x, %x)\n", pmap, sva, eva));
+ DPRINTF(PDB_FOLLOW|PDB_REMOVE,
+ ("pmap_remove(%p, 0x%x, 0x%x\n", pmap, sva, eva));
- if(!pmap)
- return;
+ simple_lock(&pmap->pm_obj.vmobjlock);
- s = splimp();
+ for (pdemask = sva + 1; sva < eva; sva += PAGE_SIZE) {
+ if (pdemask != (sva & PDE_MASK)) {
+ pdemask = sva & PDE_MASK;
+ if (!(pde = pmap_pde_get(pmap->pm_pdir, sva))) {
+ sva += ~PDE_MASK + 1 - PAGE_SIZE;
+ continue;
+ }
+ }
- sva = hppa_trunc_page(sva);
- space = pmap_sid(pmap, sva);
-
- while (pmap->pmap_stats.resident_count && ((sva < eva))) {
- pv = pmap_find_va(space, sva);
-
- DPRINTF(PDB_REMOVE, ("pmap_remove: removing %p for 0x%x:0x%x\n",
- pv, space, sva));
- if (pv) {
- pmap->pmap_stats.resident_count--;
- if (pv->pv_tlbprot & TLB_WIRED)
- pmap->pmap_stats.wired_count--;
- pmap_remove_pv(pv,
- pmap_find_pv(tlbptob(pv->pv_tlbpage)));
+ if ((pte = pmap_pte_get(pde, sva))) {
+
+ if (pte & PTE_PROT(TLB_WIRED))
+ pmap->pm_stats.wired_count--;
+ pmap->pm_stats.resident_count--;
+
+ if (pte & PTE_PROT(pte))
+ ficache(pmap->pm_space, sva, PAGE_SIZE);
+ pitlb(pmap->pm_space, sva);
+ fdcache(pmap->pm_space, sva, PAGE_SIZE);
+ pdtlb(pmap->pm_space, sva);
+
+ pmap_pte_set(pde, sva, 0);
+
+ bank = vm_physseg_find(atop(pte), &off);
+ if (pmap_initialized && bank != -1) {
+ pvh = &vm_physmem[bank].pmseg.pvhead[off];
+ pve = pmap_pv_remove(pvh, pmap, sva);
+ if (pve) {
+ pvh->pvh_attrs |= pmap_pvh_attrs(pte);
+ pmap_pv_free(pve);
+ }
+ }
}
- sva += PAGE_SIZE;
}
- splx(s);
+ simple_unlock(&pmap->pm_obj.vmobjlock);
+
+ DPRINTF(PDB_FOLLOW|PDB_REMOVE, ("pmap_remove: leaving\n"));
}
-/*
- * pmap_page_protect(pa, prot)
- *
- * Lower the permission for all mappings to a given page.
- */
void
-pmap_page_protect(pg, prot)
- struct vm_page *pg;
+pmap_write_protect(pmap, sva, eva, prot)
+ struct pmap *pmap;
+ vaddr_t sva;
+ vaddr_t eva;
vm_prot_t prot;
{
- register struct pv_entry *pv;
- register pmap_t pmap;
- register u_int tlbprot;
- paddr_t pa = VM_PAGE_TO_PHYS(pg);
- int s;
+ pt_entry_t *pde, pte;
+ u_int tlbprot, pdemask;
- DPRINTF(PDB_FOLLOW|PDB_PROTECT,
- ("pmap_page_protect(%x, %x)\n", pa, prot));
+ DPRINTF(PDB_FOLLOW|PDB_PMAP,
+ ("pmap_write_protect(%p, %x, %x, %x)\n", pmap, sva, eva, prot));
- switch (prot) {
- case VM_PROT_ALL:
- return;
- case VM_PROT_READ:
- case VM_PROT_READ|VM_PROT_EXECUTE:
- s = splimp();
- if (!(pv = pmap_find_pv(pa)) || !pv->pv_pmap) {
- splx(s);
- break;
+ sva = hppa_trunc_page(sva);
+ tlbprot = PTE_PROT(pmap_prot(pmap, prot));
+
+ simple_lock(&pmap->pm_obj.vmobjlock);
+
+ for(pdemask = sva + 1; sva < eva; sva += PAGE_SIZE) {
+ if (pdemask != (sva & PDE_MASK)) {
+ pdemask = sva & PDE_MASK;
+ if (!(pde = pmap_pde_get(pmap->pm_pdir, sva))) {
+ sva += ~PDE_MASK + 1 - PAGE_SIZE;
+ continue;
+ }
}
+ if ((pte = pmap_pte_get(pde, sva))) {
- for ( ; pv; pv = pv->pv_next) {
/*
- * Compare new protection with old to see if
- * anything needs to be changed.
+ * Determine if mapping is changing.
+ * If not, nothing to do.
*/
- tlbprot = pmap_prot(pv->pv_pmap, prot);
-
- if ((pv->pv_tlbprot & TLB_AR_MASK) != tlbprot) {
- pv->pv_tlbprot &= ~TLB_AR_MASK;
- pv->pv_tlbprot |= tlbprot;
-
- /*
- * Purge the current TLB entry (if any)
- * to force a fault and reload with the
- * new protection.
- */
- ficache(pv->pv_space, pv->pv_va, NBPG);
- pitlb(pv->pv_space, pv->pv_va);
- fdcache(pv->pv_space, pv->pv_va, NBPG);
- pdtlb(pv->pv_space, pv->pv_va);
- pmap_clear_va(pv->pv_space, pv->pv_va);
- }
- }
- splx(s);
- break;
- default:
- s = splimp();
- while ((pv = pmap_find_pv(pa)) && pv->pv_pmap) {
-
- DPRINTF(PDB_PROTECT, ("pv={%p,%x:%x,%b,%x}->%p\n",
- pv->pv_pmap, pv->pv_space, pv->pv_va,
- pv->pv_tlbprot, TLB_BITS,
- tlbptob(pv->pv_tlbpage), pv->pv_hash));
- pmap = pv->pv_pmap;
- pmap_remove_pv(pv, pv);
- pmap->pmap_stats.resident_count--;
+ if ((pte & PTE_PROT(TLB_AR_MASK)) == tlbprot)
+ continue;
+
+ if (pte & PTE_PROT(TLB_EXECUTE))
+ ficache(pmap->pm_space, sva, PAGE_SIZE);
+ pitlb(pmap->pm_space, sva);
+ fdcache(pmap->pm_space, sva, PAGE_SIZE);
+ pdtlb(pmap->pm_space, sva);
+
+ pte &= ~PTE_PROT(TLB_AR_MASK);
+ pte |= tlbprot;
+ pmap_pte_set(pde, sva, pte);
}
- splx(s);
- break;
}
+
+ simple_unlock(&pmap->pm_obj.vmobjlock);
}
-/*
- * pmap_protect(pmap, s, e, prot)
- * changes the protection on all virtual addresses v in the
- * virtual address range determined by [s, e) and pmap to prot.
- * s and e must be on machine independent page boundaries and
- * s must be less than or equal to e.
- */
void
-pmap_protect(pmap, sva, eva, prot)
- pmap_t pmap;
- vaddr_t sva;
- vaddr_t eva;
- vm_prot_t prot;
+pmap_page_remove(pg)
+ struct vm_page *pg;
{
- register struct pv_entry *pv;
- u_int tlbprot;
- pa_space_t space;
-
- DPRINTF(PDB_FOLLOW,
- ("pmap_protect(%p, %x, %x, %x)\n", pmap, sva, eva, prot));
+ struct pv_head *pvh;
+ struct pv_entry *pve, *ppve;
+ pt_entry_t *pde, pte;
+ int bank, off;
- if (!pmap)
- return;
+ DPRINTF(PDB_FOLLOW|PDB_PV, ("pmap_page_remove(%p)\n", pg));
- if (prot == VM_PROT_NONE) {
- pmap_remove(pmap, sva, eva);
+ bank = vm_physseg_find(atop(VM_PAGE_TO_PHYS(pg)), &off);
+ if (bank == -1) {
+ printf("pmap_page_remove: unmanaged page?\n");
return;
}
- if (prot & VM_PROT_WRITE)
+
+ pvh = &vm_physmem[bank].pmseg.pvhead[off];
+ if (pvh->pvh_list == NULL)
return;
- sva = hppa_trunc_page(sva);
- space = pmap_sid(pmap, sva);
- tlbprot = pmap_prot(pmap, prot);
+ simple_lock(&pvh->pvh_lock);
- for(; sva < eva; sva += PAGE_SIZE) {
- if((pv = pmap_find_va(space, sva))) {
- /*
- * Determine if mapping is changing.
- * If not, nothing to do.
- */
- if ((pv->pv_tlbprot & TLB_AR_MASK) == tlbprot)
- continue;
+ for (pve = pvh->pvh_list; pve; ) {
+ simple_lock(&pve->pv_pmap->pm_obj.vmobjlock);
- pv->pv_tlbprot &= ~TLB_AR_MASK;
- pv->pv_tlbprot |= tlbprot;
+ pde = pmap_pde_get(pve->pv_pmap->pm_pdir, pve->pv_va);
+ pte = pmap_pte_get(pde, pve->pv_va);
+ pmap_pte_set(pde, pve->pv_va, 0);
- /*
- * Purge the current TLB entry (if any) to force
- * a fault and reload with the new protection.
- */
- ficache(space, sva, NBPG);
- pitlb(space, sva);
- fdcache(space, sva, NBPG);
- pdtlb(space, sva);
- pmap_clear_va(space, sva);
- }
+ if (pte & PTE_PROT(TLB_WIRED))
+ pve->pv_pmap->pm_stats.wired_count--;
+ pve->pv_pmap->pm_stats.resident_count--;
+
+ simple_unlock(&pve->pmap->pm_obj.vmobjlock);
+
+ pvh->pvh_attrs |= pmap_pvh_attrs(pte);
+ ppve = pve;
+ pve = pve->pv_next;
+ pmap_pv_free(ppve);
}
+ simple_unlock(&pvh->pvh_lock);
+
+ DPRINTF(PDB_FOLLOW|PDB_PV, ("pmap_page_remove: leaving\n"));
+
}
-/*
- * Routine: pmap_unwire
- * Function: Change the wiring attribute for a map/virtual-address
- * pair.
- * In/out conditions:
- * The mapping must already exist in the pmap.
- *
- * Change the wiring for a given virtual page. This routine currently is
- * only used to unwire pages and hence the mapping entry will exist.
- */
void
pmap_unwire(pmap, va)
- pmap_t pmap;
+ struct pmap *pmap;
vaddr_t va;
{
- struct pv_entry *pv;
- int s;
+ pt_entry_t *pde, pte = 0;
- va = hppa_trunc_page(va);
- DPRINTF(PDB_FOLLOW, ("pmap_unwire(%p, %x)\n", pmap, va));
+ DPRINTF(PDB_FOLLOW|PDB_PMAP, ("pmap_unwire(%p, 0x%x)\n", pmap, va));
- if (!pmap)
- return;
+ simple_lock(&pmap->pm_obj.vmobjlock);
+ if ((pde = pmap_pde_get(pmap->pm_pdir, va))) {
+ pte = pmap_pte_get(pde, va);
+
+ if (pte & PTE_PROT(TLB_WIRED)) {
+ pte &= ~PTE_PROT(TLB_WIRED);
+ pmap->pm_stats.wired_count--;
+ pmap_pte_set(pde, va, pte);
+ }
+ }
+ simple_unlock(&pmap->pm_obj.vmobjlock);
+
+ DPRINTF(PDB_FOLLOW|PDB_PMAP, ("pmap_unwire: leaving\n"));
- simple_lock(&pmap->pmap_lock);
+#ifdef DIAGNOSTIC
+ if (!pte)
+ panic("pmap_unwire: invalid va 0x%x", va);
+#endif
+}
+
+boolean_t
+pmap_changebit(struct vm_page *pg, u_int set, u_int clear)
+{
+ struct pv_head *pvh;
+ struct pv_entry *pve;
+ pt_entry_t *pde, pte, res;
+ int bank, off;
- s = splimp();
- if ((pv = pmap_find_va(pmap_sid(pmap, va), va)) == NULL)
- panic("pmap_unwire: can't find mapping entry");
+ DPRINTF(PDB_FOLLOW|PDB_BITS,
+ ("pmap_changebit(%p, %x, %x)\n", pg, set, clear));
- if (pv->pv_tlbprot & TLB_WIRED) {
- pv->pv_tlbprot &= ~TLB_WIRED;
- pmap->pmap_stats.wired_count--;
+ bank = vm_physseg_find(atop(VM_PAGE_TO_PHYS(pg)), &off);
+ if (bank == -1) {
+ printf("pmap_testbits: unmanaged page?\n");
+ return(FALSE);
}
- splx(s);
- simple_unlock(&pmap->pmap_lock);
+
+ pvh = &vm_physmem[bank].pmseg.pvhead[off];
+ res = pvh->pvh_attrs = 0;
+
+ simple_lock(&pvh->pvh_lock);
+ for(pve = pvh->pvh_list; pve; pve = pve->pv_next) {
+ simple_lock(&pve->pv_pmap->pm_obj.vmobjlock);
+ if ((pde = pmap_pde_get(pve->pv_pmap->pm_pdir, pve->pv_va))) {
+ pte = pmap_pte_get(pde, pve->pv_va);
+ res |= pmap_pvh_attrs(pte);
+ pte &= ~clear;
+ pte |= set;
+
+ pitlb(pve->pv_pmap->pm_space, pve->pv_va);
+ /* XXX flush only if there was mod ? */
+ fdcache(pve->pv_pmap->pm_space, pve->pv_va, PAGE_SIZE);
+ pdtlb(pve->pv_pmap->pm_space, pve->pv_va);
+
+ pmap_pte_set(pde, pve->pv_va, pte);
+ }
+ simple_unlock(&pve->pv_pmap->pm_obj.vmobjlock);
+ }
+ pvh->pvh_attrs = res;
+ simple_unlock(&pvh->pvh_lock);
+
+ return ((res & clear) != 0);
+}
+
+boolean_t
+pmap_testbit(struct vm_page *pg, u_int bits)
+{
+ struct pv_head *pvh;
+ struct pv_entry *pve;
+ pt_entry_t pte;
+ int bank, off;
+
+ DPRINTF(PDB_FOLLOW|PDB_BITS, ("pmap_testbit(%p, %x)\n", pg, bits));
+
+ bank = vm_physseg_find(atop(VM_PAGE_TO_PHYS(pg)), &off);
+ if (bank == -1) {
+ printf("pmap_testbits: unmanaged page?\n");
+ return(FALSE);
+ }
+
+ simple_lock(&pvh->pvh_lock);
+ pvh = &vm_physmem[bank].pmseg.pvhead[off];
+ for(pve = pvh->pvh_list; !(pvh->pvh_attrs & bits) && pve;
+ pve = pve->pv_next) {
+ simple_lock(&pve->pv_pmap->pm_obj.vmobjlock);
+ pte = pmap_vp_find(pve->pv_pmap, pve->pv_va);
+ simple_unlock(&pve->pv_pmap->pm_obj.vmobjlock);
+ pvh->pvh_attrs |= pmap_pvh_attrs(pte);
+ }
+ simple_unlock(&pvh->pvh_lock);
+
+ return ((pvh->pvh_attrs & bits) != 0);
}
-/*
- * pmap_extract(pmap, va, pap)
- * fills in the physical address corrsponding to the
- * virtual address specified by pmap and va into the
- * storage pointed to by pap and returns TRUE if the
- * virtual address is mapped. returns FALSE in not mapped.
- */
boolean_t
pmap_extract(pmap, va, pap)
- pmap_t pmap;
+ struct pmap *pmap;
vaddr_t va;
paddr_t *pap;
{
- struct pv_entry *pv;
- int s;
+ pt_entry_t pte;
+
+ DPRINTF(PDB_FOLLOW|PDB_EXTRACT, ("pmap_extract(%p, %x)\n", pmap, va));
- DPRINTF(PDB_FOLLOW, ("pmap_extract(%p, %x)\n", pmap, va));
+ simple_lock(&pmap->pm_obj.vmobjlock);
+ pte = pmap_vp_find(pmap, va);
+ simple_unlock(&pmap->pm_obj.vmobjlock);
- s = splimp();
- if (!(pv = pmap_find_va(pmap_sid(pmap, va), hppa_trunc_page(va))))
- return (FALSE);
- else {
- *pap = tlbptob(pv->pv_tlbpage) + (va & PGOFSET);
+ if (pte) {
+ if (pap)
+ *pap = (pte & ~PGOFSET) | (va & PGOFSET);
return (TRUE);
}
- splx(s);
+
+ return (FALSE);
}
-/*
- * pmap_zero_page(pa)
- *
- * Zeros the specified page.
- */
void
pmap_zero_page(pa)
- register paddr_t pa;
+ paddr_t pa;
{
extern int dcache_line_mask;
register paddr_t pe = pa + PAGE_SIZE;
@@ -1281,10 +1076,9 @@ pmap_zero_page(pa)
* instead, keep 'em pending (or verify by the book).
*/
s = splhigh();
- pmap_clear_pv(pa, NULL);
while (pa < pe) {
- __asm volatile(
+ __asm volatile( /* can use ,bc */
"stwas,ma %%r0,4(%0)\n\t"
"stwas,ma %%r0,4(%0)\n\t"
"stwas,ma %%r0,4(%0)\n\t"
@@ -1302,14 +1096,6 @@ pmap_zero_page(pa)
splx(s);
}
-/*
- * pmap_copy_page(src, dst)
- *
- * pmap_copy_page copies the src page to the destination page. If a mapping
- * can be found for the source, we use that virtual address. Otherwise, a
- * slower physical page copy must be done. The destination is always a
- * physical address sivnce there is usually no mapping for it.
- */
void
pmap_copy_page(spa, dpa)
paddr_t spa;
@@ -1322,11 +1108,10 @@ pmap_copy_page(spa, dpa)
DPRINTF(PDB_FOLLOW|PDB_PHYS, ("pmap_copy_page(%x, %x)\n", spa, dpa));
s = splhigh();
- pmap_clear_pv(spa, NULL);
- pmap_clear_pv(dpa, NULL);
+ /* XXX flush cache for the spa ??? */
while (spa < spe) {
- __asm volatile(
+ __asm volatile( /* can use ,bc */
"ldwas,ma 4(%0),%%r22\n\t"
"ldwas,ma 4(%0),%%r21\n\t"
"stwas,ma %%r22,4(%1)\n\t"
@@ -1351,239 +1136,105 @@ pmap_copy_page(spa, dpa)
splx(s);
}
-/*
- * pmap_clear_modify(pa)
- * clears the hardware modified ("dirty") bit for one
- * machine independant page starting at the given
- * physical address. phys must be aligned on a machine
- * independant page boundary.
- */
-boolean_t
-pmap_clear_modify(pg)
- struct vm_page *pg;
-{
- register struct pv_entry *pv;
- register paddr_t pa = VM_PAGE_TO_PHYS(pg);
- int s, ret;
-
- DPRINTF(PDB_FOLLOW, ("pmap_clear_modify(%x)\n", pa));
-
- s = splimp();
- for (pv = pmap_find_pv(pa); pv; pv = pv->pv_next)
- if (pv->pv_tlbprot & TLB_DIRTY) {
- pitlb(pv->pv_space, pv->pv_va);
- pdtlb(pv->pv_space, pv->pv_va);
- pv->pv_tlbprot &= ~(TLB_DIRTY);
- pmap_clear_va(pv->pv_space, pv->pv_va);
- ret = TRUE;
- }
- splx(s);
-
- return (ret);
-}
-
-/*
- * pmap_is_modified(pa)
- * returns TRUE if the given physical page has been modified
- * since the last call to pmap_clear_modify().
- */
-boolean_t
-pmap_is_modified(pg)
- struct vm_page *pg;
-{
- register struct pv_entry *pv;
- register paddr_t pa = VM_PAGE_TO_PHYS(pg);
- int s, f = 0;
-
- DPRINTF(PDB_FOLLOW, ("pmap_is_modified(%x)\n", pa));
-
- s = splhigh();
- for (pv = pmap_find_pv(pa); pv && pv->pv_pmap && !f; pv = pv->pv_next)
- f |= pv->pv_tlbprot & TLB_DIRTY;
- splx(s);
-
- return f? TRUE : FALSE;
-}
-
-/*
- * pmap_clear_reference(pa)
- * clears the hardware referenced bit in the given machine
- * independant physical page.
- *
- * Currently, we treat a TLB miss as a reference; i.e. to clear
- * the reference bit we flush all mappings for pa from the TLBs.
- */
-boolean_t
-pmap_clear_reference(pg)
- struct vm_page *pg;
+void
+pmap_kenter_pa(va, pa, prot)
+ vaddr_t va;
+ paddr_t pa;
+ vm_prot_t prot;
{
- register struct pv_entry *pv;
- register paddr_t pa = VM_PAGE_TO_PHYS(pg);
- int s, ret;
-
- DPRINTF(PDB_FOLLOW, ("pmap_clear_reference(%x)\n", pa));
-
- s = splimp();
- for (pv = pmap_find_pv(pa); pv; pv = pv->pv_next)
- if (pv->pv_tlbprot & TLB_REF) {
- pitlb(pv->pv_space, pv->pv_va);
- pdtlb(pv->pv_space, pv->pv_va);
- pv->pv_tlbprot &= ~(TLB_REF);
- pmap_clear_va(pv->pv_space, pv->pv_va);
- ret = TRUE;
- }
- splx(s);
+ pt_entry_t *pde, pte;
- return (ret);
-}
-
-/*
- * pmap_is_referenced(pa)
- * returns TRUE if the given physical page has been referenced
- * since the last call to pmap_clear_reference().
- */
-boolean_t
-pmap_is_referenced(pg)
- struct vm_page *pg;
-{
- register struct pv_entry *pv;
- register paddr_t pa = VM_PAGE_TO_PHYS(pg);
- int s, f;
+ DPRINTF(PDB_FOLLOW|PDB_ENTER,
+ ("pmap_kenter_pa(%x, %x, %x)\n", va, pa, prot));
- DPRINTF(PDB_FOLLOW, ("pmap_is_referenced(%x)\n", pa));
+ if (!(pde = pmap_pde_get(pmap_kernel()->pm_pdir, va)) &&
+ !(pde = pmap_pde_alloc(pmap_kernel(), va, NULL)))
+ panic("pmap_kenter_pa: cannot allocate pde");
+#ifdef DIAGNOSTIC
+ if ((pte = pmap_pte_get(pde, va)))
+ panic("pmap_kenter_pa: 0x%x is already mapped %p:0x%x",
+ va, pde, pte);
+#endif
- s = splhigh();
- for (pv = pmap_find_pv(pa); pv && pv->pv_pmap && !f; pv = pv->pv_next)
- f |= pv->pv_tlbprot & TLB_REF;
- splx(s);
+ pte = pa | PTE_PROT(TLB_UNCACHABLE|TLB_WIRED|TLB_DIRTY|pmap_prot(pmap_kernel(), prot));
+ pmap_pte_set(pde, va, pte);
- return f? TRUE : FALSE;
+ DPRINTF(PDB_FOLLOW|PDB_ENTER, ("pmap_kenter_pa: leaving\n"));
}
-#ifdef notused
void
-pmap_changebit(va, set, reset)
+pmap_kremove(va, size)
vaddr_t va;
- u_int set, reset;
+ vsize_t size;
{
- register struct pv_entry *pv;
- int s;
-
- DPRINTF(PDB_FOLLOW, ("pmap_changebit(%x, %x, %x)\n", va, set, reset));
+ pt_entry_t *pde, pte;
+ vaddr_t eva = va + size, pdemask;
- s = splimp();
- if (!(pv = pmap_find_va(HPPA_SID_KERNEL, va))) {
- splx(s);
- return;
- }
+ DPRINTF(PDB_FOLLOW|PDB_REMOVE,
+ ("pmap_kremove(%x, %x)\n", va, size));
- pv->pv_tlbprot |= set;
- pv->pv_tlbprot &= ~reset;
- splx(s);
+ for (pdemask = va + 1; va < eva; va += PAGE_SIZE) {
+ if (pdemask != (va & PDE_MASK)) {
+ pdemask = va & PDE_MASK;
+ if (!(pde = pmap_pde_get(pmap_kernel()->pm_pdir, va))) {
+ va += ~PDE_MASK + 1 - PAGE_SIZE;
+ continue;
+ }
+ }
+ if (!(pte = pmap_pte_get(pde, va))) {
+#ifdef DEBUG
+ printf("pmap_kremove: unmapping unmapped 0x%x\n", va);
+#endif
+ continue;
+ }
- ficache(HPPA_SID_KERNEL, va, NBPG);
- pitlb(HPPA_SID_KERNEL, va);
+ if (pte & PTE_PROT(TLB_EXECUTE))
+ ficache(HPPA_SID_KERNEL, va, NBPG);
+ pitlb(HPPA_SID_KERNEL, va);
+ fdcache(HPPA_SID_KERNEL, va, NBPG);
+ pdtlb(HPPA_SID_KERNEL, va);
- fdcache(HPPA_SID_KERNEL, va, NBPG);
- pdtlb(HPPA_SID_KERNEL, va);
+ pmap_pte_set(pde, va, 0);
+ }
- pmap_clear_va(HPPA_SID_KERNEL, va);
+ DPRINTF(PDB_FOLLOW|PDB_REMOVE, ("pmap_kremove: leaving\n"));
}
-#endif
-void
-pmap_kenter_pa(va, pa, prot)
- vaddr_t va;
- paddr_t pa;
- vm_prot_t prot;
+void *
+pmap_pv_page_alloc(struct pool *pp, int flags)
{
- register struct pv_entry *pv;
+ vaddr_t va;
- DPRINTF(PDB_FOLLOW|PDB_ENTER,
- ("pmap_kenter_pa(%x, %x, %x)\n", va, pa, prot));
+ DPRINTF(PDB_FOLLOW|PDB_POOL,
+ ("pmap_pv_page_alloc(%p, %x)\n", pp, flags));
- va = hppa_trunc_page(va);
- pv = pmap_find_va(HPPA_SID_KERNEL, va);
- if (pv && (pa & HPPA_IOSPACE) == HPPA_IOSPACE)
- /* if already mapped i/o space, nothing to do */
- ;
- else {
- if (pv)
- panic("pmap_kenter_pa: mapped already %x", va);
- else
- pmap_kernel()->pmap_stats.resident_count++;
-
- pv = pmap_alloc_pv();
- pv->pv_va = va;
- pv->pv_pmap = pmap_kernel();
- pv->pv_space = HPPA_SID_KERNEL;
- pv->pv_tlbpage = tlbbtop(pa);
- pv->pv_tlbprot = TLB_WIRED | TLB_DIRTY | TLB_REF |
- HPPA_PID_KERNEL | pmap_prot(pmap_kernel(), prot) |
- ((pa & HPPA_IOSPACE) == HPPA_IOSPACE? TLB_UNCACHEABLE : 0);
- pmap_enter_va(pv);
+ if ((va = pmap_steal_memory(PAGE_SIZE, NULL, NULL)))
+ return (void *)va;
+
+ /*
+ TODO
+ if (list not empty) {
+ get from the list;
+ return (va);
}
+ */
- DPRINTF(PDB_ENTER, ("pmap_kenter_pa: leaving\n"));
-}
+ DPRINTF(PDB_FOLLOW|PDB_POOL,
+ ("pmap_pv_page_alloc: uvm_km_alloc_poolpage1\n"));
-void
-pmap_kremove(va, size)
- vaddr_t va;
- vsize_t size;
-{
- register struct pv_entry *pv;
-
- for (va = hppa_trunc_page(va); size > 0;
- size -= PAGE_SIZE, va += PAGE_SIZE) {
- pv = pmap_find_va(HPPA_SID_KERNEL, va);
- if (pv) {
- ficache(pv->pv_space, pv->pv_va, NBPG);
- pitlb(pv->pv_space, pv->pv_va);
- fdcache(pv->pv_space, pv->pv_va, NBPG);
- pdtlb(pv->pv_space, pv->pv_va);
- pmap_remove_va(pv);
- } else
- DPRINTF(PDB_REMOVE,
- ("pmap_kremove: no pv for 0x%x\n", va));
- }
+ return ((void *)uvm_km_alloc_poolpage1(kernel_map, uvm.kernel_object,
+ (flags & PR_WAITOK) ? TRUE : FALSE));
}
-#if defined(PMAPDEBUG) && defined(DDB)
-#include <ddb/db_output.h>
-/*
- * prints whole va->pa (aka HPT or HVT)
- */
void
-pmap_hptdump(sp)
- int sp;
+pmap_pv_page_free(struct pool *pp, void *v)
{
- register struct hpt_entry *hpt, *ehpt;
- register struct pv_entry *pv;
- register int hpthf;
-
- mfctl(CR_HPTMASK, ehpt);
- mfctl(CR_VTOP, hpt);
- ehpt = (struct hpt_entry *)((int)hpt + (int)ehpt + 1);
- db_printf("HPT dump %p-%p:\n", hpt, ehpt);
- for (hpthf = 0; hpt < ehpt; hpt++, hpthf = 0)
- for (pv = hpt->hpt_entry; pv; pv = pv->pv_hash)
- if (sp < 0 || sp == pv->pv_space) {
- if (!hpthf) {
- db_printf(
- "hpt@%p: %x{%sv=%x:%x},%b,%x\n",
- hpt, *(u_int *)hpt,
- (hpt->hpt_valid?"ok,":""),
- hpt->hpt_space, hpt->hpt_vpn << 9,
- hpt->hpt_tlbprot, TLB_BITS,
- tlbptob(hpt->hpt_tlbpage));
-
- hpthf++;
- }
- db_printf(" pv={%p,%x:%x,%b,%x}->%p\n",
- pv->pv_pmap, pv->pv_space, pv->pv_va,
- pv->pv_tlbprot, TLB_BITS,
- tlbptob(pv->pv_tlbpage), pv->pv_hash);
- }
+ vaddr_t va = (vaddr_t)v;
+
+ DPRINTF(PDB_FOLLOW|PDB_POOL, ("pmap_pv_page_free(%p, %p)\n", pp, v));
+
+ if (va < virtual_avail) {
+ /* TODO save on list somehow */
+ } else
+ uvm_km_free_poolpage1(kernel_map, va);
}
-#endif
diff --git a/sys/arch/hppa/hppa/trap.c b/sys/arch/hppa/hppa/trap.c
index e74257bedb6..a4d4322fd66 100644
--- a/sys/arch/hppa/hppa/trap.c
+++ b/sys/arch/hppa/hppa/trap.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: trap.c,v 1.39 2002/03/14 01:26:32 millert Exp $ */
+/* $OpenBSD: trap.c,v 1.40 2002/03/15 21:44:18 mickey Exp $ */
/*
* Copyright (c) 1998-2001 Michael Shalayeff
@@ -311,10 +311,10 @@ trap(type, frame)
else
map = &vm->vm_map;
- if (map->pmap->pmap_space != space) {
+ if (map->pmap->pm_space != space) {
#ifdef TRAPDEBUG
printf("trap: space missmatch %d != %d\n",
- space, map->pmap->pmap_space);
+ space, map->pmap->pm_space);
#endif
/* actually dump the user, crap the kernel */
goto dead_end;
@@ -371,7 +371,7 @@ trap(type, frame)
pcbp->pcb_onfault = 0;
break;
}
-#if 1
+#if 0
if (kdb_trap (type, va, frame))
return;
#else
@@ -434,7 +434,7 @@ return;
}
/* FALLTHROUGH to unimplemented */
default:
-#if 1
+#if 0
if (kdb_trap (type, va, frame))
return;
#endif
diff --git a/sys/arch/hppa/hppa/vm_machdep.c b/sys/arch/hppa/hppa/vm_machdep.c
index 90293feb08e..d0eb3c2be74 100644
--- a/sys/arch/hppa/hppa/vm_machdep.c
+++ b/sys/arch/hppa/hppa/vm_machdep.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: vm_machdep.c,v 1.33 2002/02/21 06:12:30 mickey Exp $ */
+/* $OpenBSD: vm_machdep.c,v 1.34 2002/03/15 21:44:18 mickey Exp $ */
/*
* Copyright (c) 1999-2002 Michael Shalayeff
@@ -198,7 +198,7 @@ cpu_fork(p1, p2, stack, stacksize, func, arg)
pcbp = &p2->p_addr->u_pcb;
bcopy(&p1->p_addr->u_pcb, pcbp, sizeof(*pcbp));
/* space is cached for the copy{in,out}'s pleasure */
- pcbp->pcb_space = p2->p_vmspace->vm_map.pmap->pmap_space;
+ pcbp->pcb_space = p2->p_vmspace->vm_map.pmap->pm_space;
pcbp->pcb_uva = (vaddr_t)p2->p_addr;
sp = (register_t)p2->p_addr + NBPG;
@@ -214,9 +214,8 @@ cpu_fork(p1, p2, stack, stacksize, func, arg)
tf->tf_sr0 = tf->tf_sr1 = tf->tf_sr2 = tf->tf_sr3 =
tf->tf_sr4 = tf->tf_sr5 = tf->tf_sr6 =
- p2->p_vmspace->vm_map.pmap->pmap_space;
tf->tf_iisq_head = tf->tf_iisq_tail =
- p2->p_vmspace->vm_map.pmap->pmap_space;
+ p2->p_vmspace->vm_map.pmap->pm_space;
tf->tf_pidr1 = tf->tf_pidr2 = pmap_sid2pid(tf->tf_sr0);
/*
diff --git a/sys/arch/hppa/include/cpu.h b/sys/arch/hppa/include/cpu.h
index 3dae3c6d2aa..46b0f66abd5 100644
--- a/sys/arch/hppa/include/cpu.h
+++ b/sys/arch/hppa/include/cpu.h
@@ -1,4 +1,4 @@
-/* $OpenBSD: cpu.h,v 1.28 2002/03/14 01:26:32 millert Exp $ */
+/* $OpenBSD: cpu.h,v 1.29 2002/03/15 21:44:18 mickey Exp $ */
/*
* Copyright (c) 2000-2001 Michael Shalayeff
@@ -62,7 +62,7 @@
/*
* CPU types and features
*/
-#define HPPA_FTRS_BTLBS 0x00000001
+#define HPPA_FTRS_TLBU 0x00000001
#define HPPA_FTRS_BTLBU 0x00000002
#define HPPA_FTRS_HVT 0x00000004
#define HPPA_FTRS_W32B 0x00000008
diff --git a/sys/arch/hppa/include/cpufunc.h b/sys/arch/hppa/include/cpufunc.h
index b967a53614d..6b85c9e3aef 100644
--- a/sys/arch/hppa/include/cpufunc.h
+++ b/sys/arch/hppa/include/cpufunc.h
@@ -1,4 +1,4 @@
-/* $OpenBSD: cpufunc.h,v 1.18 2002/03/14 01:26:32 millert Exp $ */
+/* $OpenBSD: cpufunc.h,v 1.19 2002/03/15 21:44:18 mickey Exp $ */
/*
* Copyright (c) 1998,2000 Michael Shalayeff
@@ -218,13 +218,14 @@ ledctl(int on, int off, int toggle)
#endif
#ifdef _KERNEL
+extern int (*cpu_hpt_init)(vaddr_t hpt, vsize_t hptsize);
+
void ficache(pa_space_t sp, vaddr_t va, vsize_t size);
void fdcache(pa_space_t sp, vaddr_t va, vsize_t size);
void pdcache(pa_space_t sp, vaddr_t va, vsize_t size);
void fcacheall(void);
void ptlball(void);
-int btlb_insert(pa_space_t space, vaddr_t va, paddr_t pa,
- vsize_t *lenp, u_int prot);
+int btlb_insert(pa_space_t space, vaddr_t va, paddr_t pa, vsize_t *lenp, u_int prot);
hppa_hpa_t cpu_gethpa(int n);
#endif
diff --git a/sys/arch/hppa/include/db_machdep.h b/sys/arch/hppa/include/db_machdep.h
index d4003aa932a..4951a23fb47 100644
--- a/sys/arch/hppa/include/db_machdep.h
+++ b/sys/arch/hppa/include/db_machdep.h
@@ -1,7 +1,7 @@
-/* $OpenBSD: db_machdep.h,v 1.7 2002/03/14 01:26:32 millert Exp $ */
+/* $OpenBSD: db_machdep.h,v 1.8 2002/03/15 21:44:18 mickey Exp $ */
/*
- * Copyright (c) 1998 Michael Shalayeff
+ * Copyright (c) 1998-2002 Michael Shalayeff
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -35,6 +35,7 @@
#include <uvm/uvm_extern.h>
+#define DB_AOUT_SYMBOLS
#define DB_ELF_SYMBOLS
#define DB_ELFSIZE 32
@@ -53,8 +54,8 @@ extern db_regs_t ddb_regs;
#define BKPT_SIZE sizeof(int)
#define BKPT_SET(inst) BKPT_INST
-#define IS_BREAKPOINT_TRAP(type, code) 1
-#define IS_WATCHPOINT_TRAP(type, code) 0
+#define IS_BREAKPOINT_TRAP(type, code) ((type) == T_IBREAK)
+#define IS_WATCHPOINT_TRAP(type, code) ((type) == T_DBREAK)
#define FIXUP_PC_AFTER_BREAK(regs) ((regs)->tf_iioq_head -= sizeof(int))
@@ -90,8 +91,25 @@ static __inline int inst_trap_return(u_int ins) {
return (ins & 0xfc001fc0) == 0x00000ca0;
}
-#define db_clear_single_step(r) ((r)->tf_flags |= 0)
-#define db_set_single_step(r) ((r)->tf_flags |= 0)
+#if 0
+#define db_clear_single_step(r) ((r)->tf_flags &= ~(PSW_Z))
+#define db_set_single_step(r) ((r)->tf_flags |= (PSW_Z))
+#else
+#define SOFTWARE_SSTEP 1
+#define SOFTWARE_SSTEP_EMUL 1
+
+static __inline db_addr_t
+next_instr_address(db_addr_t addr, int b) {
+ return (addr + 4);
+}
+
+#define branch_taken(ins,pc,f,regs) branch_taken1(ins, pc, regs)
+static __inline db_addr_t
+branch_taken1(int ins, db_addr_t pc, db_regs_t *regs) {
+ return (pc);
+}
+
+#endif
int db_valid_breakpoint(db_addr_t);
int kdb_trap(int, int, db_regs_t *);
diff --git a/sys/arch/hppa/include/pmap.h b/sys/arch/hppa/include/pmap.h
index 872c71e20d0..43c16c4a067 100644
--- a/sys/arch/hppa/include/pmap.h
+++ b/sys/arch/hppa/include/pmap.h
@@ -1,7 +1,7 @@
-/* $OpenBSD: pmap.h,v 1.18 2002/03/14 01:26:32 millert Exp $ */
+/* $OpenBSD: pmap.h,v 1.19 2002/03/15 21:44:18 mickey Exp $ */
/*
- * Copyright (c) 1998,1999 Michael Shalayeff
+ * Copyright (c) 2002 Michael Shalayeff
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -14,142 +14,84 @@
* documentation and/or other materials provided with the distribution.
* 3. All advertising materials mentioning features or use of this software
* must display the following acknowledgement:
- * This product includes software developed by Michael Shalayeff.
+ * This product includes software developed by Michael Shalayeff.
* 4. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-/*
- * Copyright 1996 1995 by Open Software Foundation, Inc.
- * All Rights Reserved
- *
- * Permission to use, copy, modify, and distribute this software and
- * its documentation for any purpose and without fee is hereby granted,
- * provided that the above copyright notice appears in all copies and
- * that both the copyright notice and this permission notice appear in
- * supporting documentation.
- *
- * OSF DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE
- * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
- * FOR A PARTICULAR PURPOSE.
- *
- * IN NO EVENT SHALL OSF BE LIABLE FOR ANY SPECIAL, INDIRECT, OR
- * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
- * LOSS OF USE, DATA OR PROFITS, WHETHER IN ACTION OF CONTRACT,
- * NEGLIGENCE, OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
- * WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- */
-/*
- * Copyright (c) 1990,1993,1994 The University of Utah and
- * the Computer Systems Laboratory at the University of Utah (CSL).
- * All rights reserved.
- *
- * Permission to use, copy, modify and distribute this software is hereby
- * granted provided that (1) source code retains these copyright, permission,
- * and disclaimer notices, and (2) redistributions including binaries
- * reproduce the notices in supporting documentation, and (3) all advertising
- * materials mentioning features or use of this software display the following
- * acknowledgement: ``This product includes software developed by the
- * Computer Systems Laboratory at the University of Utah.''
- *
- * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
- * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF
- * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
- *
- * CSL requests users of this software to return to csl-dist@cs.utah.edu any
- * improvements that they make and grant CSL redistribution rights.
- *
- * Utah $Hdr: pmap.h 1.24 94/12/14$
- * Author: Mike Hibler, Bob Wheeler, University of Utah CSL, 9/90
- */
-
-/*
- * Pmap header for hppa.
+ * IN NO EVENT SHALL THE AUTHOR OR HIS RELATIVES BE LIABLE FOR ANY DIRECT,
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF MIND, USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
*/
-#ifndef _MACHINE_PMAP_H_
-#define _MACHINE_PMAP_H_
+#ifndef _MACHINE_PMAP_H_
+#define _MACHINE_PMAP_H_
#include <machine/pte.h>
+#include <uvm/uvm_pglist.h>
+#include <uvm/uvm_object.h>
-typedef
struct pmap {
- TAILQ_ENTRY(pmap) pmap_list; /* pmap free list */
- struct simplelock pmap_lock; /* lock on map */
- int pmap_refcnt; /* reference count */
- pa_space_t pmap_space; /* space for this pmap */
- struct pmap_statistics pmap_stats; /* statistics */
-} *pmap_t;
-extern pmap_t kernel_pmap; /* The kernel's map */
-
-/*
- * If HPT is defined, we cache the last miss for each bucket using a
- * structure defined for the 7100 hardware TLB walker. On non-7100s, this
- * acts as a software cache that cuts down on the number of times we have
- * to search the hash chain. (thereby reducing the number of instructions
- * and cache misses incurred during the TLB miss).
- *
- * The pv_entry pointer is the address of the associated hash bucket
- * list for fast tlbmiss search.
- */
-struct hpt_entry {
- u_int hpt_valid:1, /* Valid bit */
- hpt_vpn:15, /* Virtual Page Number */
- hpt_space:16; /* Space ID */
- u_int hpt_tlbprot; /* prot/access rights (for TLB load) */
- u_int hpt_tlbpage; /* physical page (<<5 for TLB load) */
- void *hpt_entry; /* Pointer to associated hash list */
+ struct uvm_object pm_obj; /* object (lck by object lock) */
+#define pm_lock pm_obj.vmobjlock
+ struct vm_page *pm_ptphint;
+ struct vm_page *pm_pdir_pg; /* vm_page for pdir */
+ paddr_t pm_pdir; /* PA of PD (read-only after create) */
+ pa_space_t pm_space; /* space id (read-only after create) */
+ u_int pm_pid; /* prot id (read-only after create) */
+
+ struct pmap_statistics pm_stats;
};
-#ifdef _KERNEL
-extern struct hpt_entry *hpt_table;
-#endif /* _KERNEL */
+typedef struct pmap *pmap_t;
-/*
- * keep it at 32 bytes for the cache overall satisfaction
- * also, align commonly used pairs on double-word boundary
- */
-struct pv_entry {
- struct pv_entry *pv_next; /* list of mappings of a given PA */
- pmap_t pv_pmap; /* back link to pmap */
- u_int pv_va; /* virtual page number */
- u_int pv_space; /* copy of space id from pmap */
- u_int pv_tlbpage; /* physical page (for TLB load) */
- u_int pv_tlbprot; /* TLB format protection */
- struct pv_entry *pv_hash; /* VTOP hash bucket list */
- u_int pv_pad; /* pad to 32 bytes */
-};
+#define HPPA_MAX_PID 0xfffa
+#define HPPA_SID_MAX 0x7fff
+#define HPPA_SID_KERNEL 0
+#define HPPA_PID_KERNEL 2
-#define NPVPPG (NBPG/32-1)
-struct pv_page {
- TAILQ_ENTRY(pv_page) pvp_list; /* Chain of pages */
- u_int pvp_nfree;
- struct pv_entry *pvp_freelist;
- u_int pvp_flag; /* is it direct mapped (unused) */
- u_int pvp_pad[3]; /* align to 32 */
- struct pv_entry pvp_pv[NPVPPG];
+#define KERNEL_ACCESS_ID 1
+#define KERNEL_TEXT_PROT (TLB_AR_KRX | (KERNEL_ACCESS_ID << 1))
+#define KERNEL_DATA_PROT (TLB_AR_KRW | (KERNEL_ACCESS_ID << 1))
+
+struct pv_entry;
+
+struct pv_head {
+ struct simplelock pvh_lock; /* locks every pv on this list */
+ struct pv_entry *pvh_list; /* head of list (locked by pvh_lock) */
+ pt_entry_t pvh_attrs; /* to preserve ref/mod */
};
-#define HPPA_SID_MAX 0x7fff
-#define HPPA_SID_KERNEL 0
-#define HPPA_PID_KERNEL 2
+struct pv_entry { /* locked by its list's pvh_lock */
+ struct pv_entry *pv_next;
+ struct pmap *pv_pmap; /* the pmap */
+ vaddr_t pv_va; /* the virtual address */
+ struct vm_page *pv_ptp; /* the vm_page of the PTP */
+};
-#define KERNEL_ACCESS_ID 1
+/* also match the hardware tlb walker definition */
+struct vp_entry {
+ u_int vp_tag;
+ u_int vp_tlbprot;
+ u_int vp_tlbpage;
+ u_int vp_ptr;
+};
-#define KERNEL_TEXT_PROT (TLB_AR_KRX | (KERNEL_ACCESS_ID << 1))
-#define KERNEL_DATA_PROT (TLB_AR_KRW | (KERNEL_ACCESS_ID << 1))
+#ifdef _KERNEL
-#ifdef _KERNEL
extern void gateway_page(void);
+extern struct pmap kernel_pmap_store;
+
+#if defined(HP7100LC_CPU) || defined(HP7300LC_CPU)
+extern int pmap_hptsize;
+extern struct pdc_hwtlb pdc_hwtlb;
+#endif
#define PMAP_STEAL_MEMORY /* we have some memory to steal */
@@ -165,36 +107,56 @@ extern void gateway_page(void);
*(h) = pmap_prefer_hint; \
} while(0)
-#define pmap_kernel_va(VA) \
- (((VA) >= VM_MIN_KERNEL_ADDRESS) && ((VA) <= VM_MAX_KERNEL_ADDRESS))
-
#define pmap_sid2pid(s) (((s) + 1) << 1)
-#define pmap_kernel() (kernel_pmap)
-#define pmap_resident_count(pmap) ((pmap)->pmap_stats.resident_count)
-#define pmap_reference(pmap) \
-do { if (pmap) { \
- simple_lock(&pmap->pmap_lock); \
- pmap->pmap_refcnt++; \
- simple_unlock(&pmap->pmap_lock); \
-} } while (0)
-#define pmap_collect(pmap)
-#define pmap_release(pmap)
+#define pmap_kernel() (&kernel_pmap_store)
+#define pmap_resident_count(pmap) ((pmap)->pm_stats.resident_count)
+#define pmap_update(pm) (void)(pm)
+#define pmap_activate(pm) (void)(pm)
+#define pmap_deactivate(pm) (void)(pm)
#define pmap_copy(dpmap,spmap,da,len,sa)
-#define pmap_update(pm)
-#define pmap_activate(p)
-#define pmap_deactivate(p)
-#define pmap_phys_address(x) ((x) << PGSHIFT)
-#define pmap_phys_to_frame(x) ((x) >> PGSHIFT)
+#define pmap_clear_modify(pg) pmap_changebit(pg, 0, PTE_PROT(TLB_DIRTY))
+#define pmap_clear_reference(pg) pmap_changebit(pg, PTE_PROT(TLB_REFTRAP), 0)
+#define pmap_is_modified(pg) pmap_testbit(pg, PTE_PROT(TLB_DIRTY))
+#define pmap_is_referenced(pg) pmap_testbit(pg, PTE_PROT(TLB_REFTRAP))
+#define pmap_phys_address(ppn) ((ppn) << PAGE_SHIFT)
+
+void pmap_bootstrap(vaddr_t);
+boolean_t pmap_changebit(struct vm_page *, u_int, u_int);
+boolean_t pmap_testbit(struct vm_page *, u_int);
+void pmap_write_protect(struct pmap *, vaddr_t, vaddr_t, vm_prot_t);
+void pmap_remove(struct pmap *pmap, vaddr_t sva, vaddr_t eva);
+void pmap_page_remove(struct vm_page *pg);
static __inline int
pmap_prot(struct pmap *pmap, int prot)
{
- extern u_int kern_prot[], user_prot[];
- return (pmap == kernel_pmap? kern_prot: user_prot)[prot];
+ extern u_int hppa_prot[];
+ return (hppa_prot[prot] | (pmap == pmap_kernel()? 0 : TLB_USER));
}
-void pmap_bootstrap(vaddr_t *, vaddr_t *);
-#endif /* _KERNEL */
+static __inline void
+pmap_page_protect(struct vm_page *pg, vm_prot_t prot)
+{
+ if ((prot & VM_PROT_WRITE) == 0) {
+ if (prot & (VM_PROT_READ|VM_PROT_EXECUTE))
+ (void) pmap_changebit(pg, PTE_PROT(TLB_READ),
+ PTE_PROT(TLB_WRITE));
+ else
+ pmap_page_remove(pg);
+ }
+}
+static __inline void
+pmap_protect(struct pmap *pmap, vaddr_t sva, vaddr_t eva, vm_prot_t prot)
+{
+ if ((prot & VM_PROT_WRITE) == 0) {
+ if (prot & (VM_PROT_READ|VM_PROT_EXECUTE))
+ pmap_write_protect(pmap, sva, eva, prot);
+ else
+ pmap_remove(pmap, sva, eva);
+ }
+}
+
+#endif /* _KERNEL */
#endif /* _MACHINE_PMAP_H_ */
diff --git a/sys/arch/hppa/include/pte.h b/sys/arch/hppa/include/pte.h
index 9e550cf5f02..d6f5f7650e9 100644
--- a/sys/arch/hppa/include/pte.h
+++ b/sys/arch/hppa/include/pte.h
@@ -1,4 +1,4 @@
-/* $OpenBSD: pte.h,v 1.8 2001/01/12 23:37:49 mickey Exp $ */
+/* $OpenBSD: pte.h,v 1.9 2002/03/15 21:44:18 mickey Exp $ */
/*
* Copyright (c) 1990,1993,1994 The University of Utah and
@@ -27,42 +27,36 @@
#ifndef _MACHINE_PTE_H_
#define _MACHINE_PTE_H_
+typedef u_int32_t pt_entry_t;
+
+#define PTE_PROT_SHIFT 19
+#define PTE_PROT(tlb) ((tlb) >> PTE_PROT_SHIFT)
+#define TLB_PROT(pte) ((pte) << PTE_PROT_SHIFT)
+#define PDE_MASK (0xffc00000)
+#define PTE_MASK (0x003ff000)
+#define PTE_PAGE(pte) ((pte) & ~PGOFSET)
+
/* TLB access/protection values */
-#define TLB_REF 0x80000000 /* software only */
-#define TLB_ALIGNED 0x40000000 /* software only */
-#define TLB_TRAP 0x20000000
+#define TLB_WIRED 0x40000000 /* software only */
+#define TLB_REFTRAP 0x20000000
#define TLB_DIRTY 0x10000000
#define TLB_BREAK 0x08000000
#define TLB_AR_MASK 0x07f00000
+#define TLB_READ 0x00000000
+#define TLB_WRITE 0x01000000
+#define TLB_EXECUTE 0x02000000
+#define TLB_GATEWAY 0x04000000
+#define TLB_USER 0x00f00000
#define TLB_AR_NA 0x07300000
-#define TLB_AR_KR 0x00000000
-#define TLB_AR_KRW 0x01000000
-#define TLB_AR_KRX 0x02000000
-#define TLB_AR_KRWX 0x03000000
-#define TLB_AR_UR 0x00f00000
-#define TLB_AR_URW 0x01f00000
-#define TLB_AR_URX 0x02f00000
-#define TLB_AR_URWX 0x03f00000
-#define TLB_UNCACHEABLE 0x00080000
-#define TLB_ICACHE 0x00040000 /* software only */
-#define TLB_NOTUSED 0x00020000 /* software only */
-#define TLB_DCACHE 0x00010000 /* software only */
+#define TLB_AR_R TLB_READ
+#define TLB_AR_RW TLB_READ|TLB_WRITE
+#define TLB_AR_RX TLB_READ|TLB_EXECUTE
+#define TLB_AR_RWX TLB_READ|TLB_WRITE|TLB_EXECUTE
+#define TLB_UNCACHABLE 0x00080000
#define TLB_PID_MASK 0x0000fffe
-#define TLB_WIRED 0x00000001 /* software only */
#define TLB_BITS "\020\024U\031W\032X\033N\034B\035D\036T\037A\040R"
-#define TLB_REF_POS 0
-#define TLB_ALIGNED_POS 1
-#define TLB_TRAP_POS 2
-#define TLB_DIRTY_POS 3
-#define TLB_BREAK_POS 4
-#define TLB_ITLB_POS 12
-#define TLB_ICACHE_POS 13
-#define TLB_DTLB_POS 14
-#define TLB_DCACHE_POS 15
-#define TLB_WIRED_POS 31
-
/* protection for a gateway page */
#define TLB_GATE_PROT 0x04c00000
diff --git a/sys/arch/hppa/include/vmparam.h b/sys/arch/hppa/include/vmparam.h
index 22992486c53..3b6be168ea2 100644
--- a/sys/arch/hppa/include/vmparam.h
+++ b/sys/arch/hppa/include/vmparam.h
@@ -1,4 +1,4 @@
-/* $OpenBSD: vmparam.h,v 1.21 2002/02/17 22:59:52 maja Exp $ */
+/* $OpenBSD: vmparam.h,v 1.22 2002/03/15 21:44:18 mickey Exp $ */
/*
* Copyright (c) 1988-1994, The University of Utah and
@@ -106,7 +106,7 @@
#ifndef _LOCORE
#define __HAVE_PMAP_PHYSSEG
struct pmap_physseg {
- struct pv_entry *pvent;
+ struct pv_head *pvhead;
};
#endif