diff options
author | Michael Shalayeff <mickey@cvs.openbsd.org> | 1998-10-30 22:16:43 +0000 |
---|---|---|
committer | Michael Shalayeff <mickey@cvs.openbsd.org> | 1998-10-30 22:16:43 +0000 |
commit | 774a90a035f055fa6dcf304c080c05bf13a095ca (patch) | |
tree | 3b800fdb2dbd0b81c06ee5fd547c128993ec7ea2 /sys | |
parent | 65962ad84b5691bc210500fc70add3e89e847983 (diff) |
remove all those "black magic" inspired routines,
use PDC calls instead, which is more MI.
we also don't need pmap_map to be defined any more, unless
kernel mapping enforced through FORCE_MAP_KERNEL definition.
Diffstat (limited to 'sys')
-rw-r--r-- | sys/arch/hppa/hppa/pmap.c | 95 | ||||
-rw-r--r-- | sys/arch/hppa/include/cpufunc.h | 248 |
2 files changed, 121 insertions, 222 deletions
diff --git a/sys/arch/hppa/hppa/pmap.c b/sys/arch/hppa/hppa/pmap.c index e59cc10de4b..88ecad9229c 100644 --- a/sys/arch/hppa/hppa/pmap.c +++ b/sys/arch/hppa/hppa/pmap.c @@ -1,4 +1,4 @@ -/* $OpenBSD: pmap.c,v 1.2 1998/09/12 03:14:49 mickey Exp $ */ +/* $OpenBSD: pmap.c,v 1.3 1998/10/30 22:16:42 mickey Exp $ */ /* * Copyright (c) 1998 Michael Shalayeff @@ -135,6 +135,7 @@ #include <machine/pmap.h> #include <machine/pte.h> #include <machine/cpufunc.h> +#include <machine/pdc.h> #ifdef DEBUG struct { @@ -231,6 +232,10 @@ static __inline void pmap_clear_va __P((pa_space_t, vm_offset_t)); #ifdef DEBUG void pmap_hptdump __P((void)); #endif +#if FORCE_MAP_KERNEL +vm_offset_t pmap_map __P((vm_offset_t va, vm_offset_t spa, vm_offset_t epa, + vm_prot_t prot, int wired)); +#endif u_int kern_prot[8], user_prot[8]; @@ -634,7 +639,7 @@ pmap_find_pv(pa) #endif return &vm_physmem[bank].pmseg.pvent[off]; } else - return NULL; + panic("pmap_find_pv: mapping unmappable"); } /* @@ -651,6 +656,7 @@ pmap_bootstrap(vstart, vend) vm_offset_t *vstart; vm_offset_t *vend; { + extern u_int totalphysmem; vm_offset_t addr; vm_size_t size; struct pv_page *pvp; @@ -740,29 +746,31 @@ pmap_bootstrap(vstart, vend) NB: It sez CR_VTOP, but we (and the TLB handlers) know better ... */ mtctl(hpt_table, CR_VTOP); - /* Allocate the physical to virtual table. */ - addr = cache_align(addr); - size = sizeof(struct pv_entry) * atop(*vend - *vstart + 1); + addr = hppa_round_page(addr); + size = hppa_round_page(sizeof(struct pv_entry) * + (totalphysmem - atop(virtual_avail))); + bzero ((caddr_t)addr, size); + #ifdef PMAPDEBUG if (pmapdebug & PDB_INIT) printf("pv_array: 0x%x @ 0x%x\n", size, addr); #endif - bzero ((caddr_t)addr, size); - virtual_steal = hppa_round_page(addr + size); - /* align the virtual_avail at power of 2 - always keep some memory for steal */ - for (virtual_avail = 1; virtual_avail <= virtual_steal; - virtual_avail *= 2); - vm_page_physload(atop(virtual_steal), atop(virtual_end), - atop(virtual_avail), atop(virtual_end)); + + /* map the kernel space, which will give us virtual_avail */ + *vstart = hppa_round_page(addr + size + totalphysmem * 64); + btlb_insert(0, kernel_pmap->pmap_space, 0, 0, vstart, + kernel_pmap->pmap_pid | + pmap_prot(kernel_pmap, VM_PROT_ALL)); + virtual_avail = *vstart; + + vm_page_physload(atop(virtual_avail), totalphysmem, + atop(virtual_avail), totalphysmem); /* we have only one initial phys memory segment */ - vm_physmem[0].pmseg.pvent = (struct pv_entry *) addr; - addr = virtual_steal; + vm_physmem[0].pmseg.pvent = (struct pv_entry *)addr; + virtual_steal = addr += size; /* here will be a hole due to the kernel memory alignment and we use it for pmap_steal_memory */ - - *vstart = virtual_avail; } vm_offset_t @@ -776,17 +784,17 @@ pmap_steal_memory(size, startp, endp) if (pmapdebug & PDB_FOLLOW) printf("pmap_steal_memory(%x, %x, %x)\n", size, startp, endp); #endif - *startp = virtual_avail; - *endp = virtual_end; + if (startp) + *startp = virtual_avail; + if (endp) + *endp = virtual_end; size = hppa_round_page(size); if (size <= virtual_avail - virtual_steal) { #ifdef PMAPDEBUG printf("pmap_steal_memory: steal %d bytes (%x+%x,%x)\n", - size, vm_physmem[0].start, atop(size), - vm_physmem[0].avail_start); + size, virtual_steal, size, virtual_avail); #endif - vm_physmem[0].start += atop(size); va = virtual_steal; virtual_steal += size; } else @@ -804,10 +812,44 @@ pmap_steal_memory(size, startp, endp) void pmap_init(void) { +#ifdef FORCE_MAP_KERNEL + extern int kernel_text, etext; + vm_offset_t end_text, end_data; +#endif + register struct pv_page *pvp; + #ifdef DEBUG if (pmapdebug & PDB_FOLLOW) printf("pmap_init()\n"); -/* pmapdebug |= PDB_ENTER | PDB_VA | PDB_PV; */ +/* pmapdebug |= PDB_VA | PDB_PV; */ +#endif + + /* alloc the rest of steal area for pv_pages */ + for (pvp = (struct pv_page *)virtual_steal; + pvp + 1 <= (struct pv_page *)virtual_avail; pvp++) + pmap_insert_pvp(pvp, 1); +#ifdef DEBUG + printf("pmap_init: allocate %d pv_pages @ %x\n", + (virtual_avail - virtual_steal) / sizeof(struct pv_page), + virtual_steal); +#endif + virtual_steal = virtual_avail; + +#if FORCE_MAP_KERNEL + end_text = round_page((vm_offset_t)&etext); + end_data = virtual_avail; + + /* pdc/iodc area; kernel_text is assumed to be page-aligned */ + pmap_map(0, 0, (vm_offset_t)&kernel_text, VM_PROT_ALL, TRUE); + /* .text */ + pmap_map((vm_offset_t)&kernel_text, (vm_offset_t)&kernel_text,end_text, +#ifdef DDB + VM_PROT_WRITE | +#endif + VM_PROT_READ | VM_PROT_EXECUTE, TRUE); + /* .data+.bss */ + pmap_map(end_text, end_text, end_data, + VM_PROT_READ | VM_PROT_WRITE, TRUE); #endif TAILQ_INIT(&pmap_freelist); @@ -1082,6 +1124,7 @@ pmap_remove(pmap, sva, eva) simple_unlock(&pmap->pmap_lock); } +#if FORCE_MAP_KERNEL /* * Used to map a range of physical addresses into kernel * virtual address space. @@ -1090,11 +1133,12 @@ pmap_remove(pmap, sva, eva) * specified memory. */ vm_offset_t -pmap_map(va, spa, epa, prot) +pmap_map(va, spa, epa, prot, wired) vm_offset_t va; vm_offset_t spa; vm_offset_t epa; vm_prot_t prot; + int wired; { #ifdef DEBUG @@ -1103,12 +1147,13 @@ pmap_map(va, spa, epa, prot) #endif while (spa < epa) { - pmap_enter(pmap_kernel(), va, spa, prot, FALSE); + pmap_enter(pmap_kernel(), va, spa, prot, wired); va += NBPG; spa += NBPG; } return va; } +#endif /* FORCE_MAP_KERNEL */ /* * pmap_page_protect(pa, prot) diff --git a/sys/arch/hppa/include/cpufunc.h b/sys/arch/hppa/include/cpufunc.h index f6fd259b2d7..3694bc037ee 100644 --- a/sys/arch/hppa/include/cpufunc.h +++ b/sys/arch/hppa/include/cpufunc.h @@ -1,6 +1,35 @@ -/* $OpenBSD: cpufunc.h,v 1.2 1998/08/29 01:56:55 mickey Exp $ */ +/* $OpenBSD: cpufunc.h,v 1.3 1998/10/30 22:16:42 mickey Exp $ */ /* + * Copyright (c) 1998 Michael Shalayeff + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by Michael Shalayeff. + * 4. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ +/* * (c) Copyright 1988 HEWLETT-PACKARD COMPANY * * To anyone who acknowledges that this file is provided "AS IS" @@ -29,15 +58,16 @@ * Author: Bob Wheeler, University of Utah CSL */ -#ifndef _HPPA_CPUFUNC_H_ -#define _HPPA_CPUFUNC_H_ +#ifndef _MACHINE_CPUFUNC_H_ +#define _MACHINE_CPUFUNC_H_ #include <machine/psl.h> #include <machine/pte.h> -#define tlbbtop(b) (((b) & ~PGOFSET) >> (PGSHIFT - 5)) +#define tlbbtop(b) ((b) >> (PGSHIFT - 5)) #define tlbptob(p) ((p) << (PGSHIFT - 5)) +#define hptbtop(b) ((b) >> 17) /* Get space register for an address */ static __inline u_int ldsid(vm_offset_t p) { @@ -46,71 +76,9 @@ static __inline u_int ldsid(vm_offset_t p) { return ret; } -/* Disable SID hashing and flush all caches for S-CHIP */ -static __inline u_int disable_S_sid_hashing(void) { - register u_int t, ret; - __asm ("mfcpu (0,%1)\n\t" /* get cpu diagnosic register */ - "mfcpu (0,%1)\n\t" /* black magic */ - "copy %1,%0\n\t" - "depi 0,20,3,%1\n\t" /* clear DHE, domain and IHE bits */ - "depi 1,16,1,%1\n\t" /* enable quad-word stores */ - "depi 0,10,1,%1\n\t" /* do not clear the DHPMC bit */ - "depi 0,14,1,%1\n\t" /* do not clear the ILPMC bit */ - "mtcpu (%1,0)\n\t" /* set the cpu disagnostic register */ - "mtcpu (%1,0)\n\t" /* black magic */ - : "=r" (ret) : "r" (t)); - return ret; -} - -/* Disable SID hashing and flush all caches for T-CHIP */ -static __inline u_int disable_T_sid_hashing(void) { - register u_int t, ret; - __asm("mfcpu (0,%1)\n\t" /* get cpu diagnosic register */ - "mfcpu (0,%1)\n\t" /* black magic */ - "copy %1,%0\n\t" - "depi 0,18,1,%1\n\t" /* clear DHE bit */ - "depi 0,20,1,%1\n\t" /* clear IHE bit */ - "depi 0,10,1,%1\n\t" /* do not clear the DHPMC bit */ - "depi 0,14,1,%1\n\t" /* do not clear the ILPMC bit */ - "mtcpu (%1,0)\n\t" /* set the cpu disagnostic register */ - "mtcpu (%1,0)\n\t" /* black magic */ - : "=r" (ret) : "r" (t)); - return ret; -} - -/* Disable SID hashing and flush all caches for L-CHIP */ -static __inline u_int disable_L_sid_hashing(void) { - register u_int t, ret; - __asm("mfcpu2 (0,%1)\n\t" /* get cpu diagnosic register */ -/* ".word 0x14160600\n\t" */ - "copy %1,%0\n\t" - "depi 0,27,1,%1\n\t" /* clear DHE bit */ - "depi 0,28,1,%1\n\t" /* clear IHE bit */ - "depi 0,6,1,%1\n\t" /* do not clear the L2IHPMC bit */ - "depi 0,8,1,%1\n\t" /* do not clear the L2DHPMC bit */ - "depi 0,10,1,%1\n\t" /* do not clear the L1IHPMC bit */ - "mtcpu2 (%1,0)" /* set the cpu disagnostic register */ -/* ".word 0x14160240\n\t" */ - : "=r" (ret) : "r" (t)); - return ret; -} - -static __inline u_int get_dcpu_reg(void) { - register u_int ret; - __asm("mfcpu (0,%0)\n\t" /* Get cpu diagnostic register */ - "mfcpu (0,%0)": "=r" (ret)); /* black magic */ - return ret; -} - #define mtctl(v,r) __asm __volatile("mtctl %0,%1":: "r" (v), "i" (r)) #define mfctl(r,v) __asm __volatile("mfctl %1,%0": "=r" (v): "i" (r)) -#define mtcpu(v,r) __asm __volatile("mtcpu %0,%1":: "r" (v), "i" (r)) -#define mfcpu(r,v) __asm __volatile("mfcpu %1,%0": "=r" (v): "i" (r)) - -#define mtcpu2(v,r) __asm __volatile("mtcpu2 %0,%1":: "r" (v), "i" (r)) -#define mfcpu2(r,v) __asm __volatile("mfcpu2 %1,%0": "=r" (v): "i" (r)) - #define mtsp(v,r) __asm __volatile("mtsp %0,%1":: "r" (v), "i" (r)) #define mfsp(r,v) __asm __volatile("mfsp %1,%0": "=r" (v): "i" (r)) @@ -125,19 +93,22 @@ static __inline u_int mtsm(u_int mask) { return ret; } +#if 0 static __inline void set_psw(u_int psw) { - __asm __volatile("mtctl %%r0, %%cr17\n\t" + __asm __volatile("mtctl %0, %%cr22\n\t" "mtctl %%r0, %%cr17\n\t" - "ldil L%%.+32, %%r21\n\t" - "ldo R%%.+28(%%r21), %%r21\n\t" - "mtctl %%r21, %%cr17\n\t" - "ldo 4(%%r21), %%r21\n\t" - "mtctl %%r21, %%cr17\n\t" - "mtctl %0, %%cr22\n\t" - "rfi\n\t" - "nop\n\tnop\n\tnop\n\tnop" - :: "r" (psw): "r21"); -} + "mtctl %%r0, %%cr17\n\t" + "ldil L%%., %0\n\t" + "ldo R%%.+24(%0), %0\n\t" + "mtctl %0, %%cr18\n\t" + "ldo 4(%0), %0\n\t" + "mtctl %0, %%cr18\n\t" + "rfi\n\tnop\n\tnop" + :: "r" (psw)); +} +#else +void set_psw __P((u_int psw)); +#endif #define fdce(sp,off) __asm __volatile("fdce 0(%0,%1)":: "i" (sp), "r" (off)) #define fice(sp,off) __asm __volatile("fdce 0(%0,%1)":: "i" (sp), "r" (off)) @@ -212,129 +183,12 @@ pdtlbe(pa_space_t sp, vm_offset_t off) __asm volatile("pdtlbe %%r0(%%sr1, %0)":: "r" (off)); } -static __inline void -ibitlb(int i, vm_offset_t pa, vm_offset_t va, vm_size_t sz, u_int prot) -{ - -} - -static __inline void -pbitlb(int i) -{ -} - -static __inline void -ibdtlb(int i, vm_offset_t pa, vm_offset_t va, vm_size_t sz, u_int prot) -{ - -} - -static __inline void -pbdtlb(int i) -{ -} - -static __inline void -ibctlb(int i, vm_offset_t pa, vm_offset_t va, vm_size_t sz, u_int prot) -{ - register u_int psw, t; - - rsm(PSW_R|PSW_I,psw); - - t = 0x7fc1|((i&15)<<1); /* index 127, lockin, override, mismatch */ - mtcpu(t,8); /* move to the dtlb diag reg */ - mtcpu(t,8); /* black magic */ - - prot |= TLB_DIRTY; - sz = (~sz >> 7) & 0x7f000; - pa = (tlbbtop(pa) & 0x7f000) | sz; - va = (va & 0x7f000) | sz; - - idtlba(pa, 0, va); - idtlbp(prot, 0, va); - - t |= 0x2000; /* no lockout, PE force-ins disable */ - mtcpu2(t,8); /* move to the dtlb diagnostic register */ - - mtsm(psw); -} - -static __inline void -pbctlb(int i) -{ - register u_int psw, t; - - rsm(PSW_R|PSW_I,psw); - - t = 0xffc1|((i&15)<<1); /* index 127, lockin, override, mismatch */ - mtcpu(t,8); /* move to the dtlb diag reg */ - mtcpu(t,8); /* black magic */ - - idtlba(0,0,0); /* address does not matter */ - idtlbp(0,0,0); - - t |= 0x7f << 7; - mtcpu(t,8); /* move to the dtlb diagnostic register */ - mtcpu(t,8); /* black magic */ - - mtsm(psw); -} - -static __inline void -iLbctlb(int i, vm_offset_t pa, vm_offset_t va, vm_offset_t sz, u_int prot) -{ - register u_int psw, t; - - rsm(PSW_R|PSW_I,psw); - - t = 0x6041| ((i&7)<<1); /* lockin, PE force-insert disable, - PE LRU-ins dis, BE force-ins enable - set the block enter select bit */ - mtcpu2(t, 8); /* move to the dtlb diagnostic register */ - - prot |= TLB_DIRTY; - sz = (~sz >> 7) & 0x7f000; - pa = (tlbbtop(pa) & 0x7f000) | sz; - va = (va & 0x7f000) | sz; - - /* we assume correct address/size alignment */ - idtlba(pa, 0, va); - idtlbp(prot, 0, va); - - t |= 0x2000; /* no lockin, PE force-ins disable */ - mtcpu2(t, 8); /* move to the dtlb diagnostic register */ - - mtsm(psw); -} - -static __inline void -pLbctlb(int i) -{ - register u_int psw, t; - - rsm(PSW_R|PSW_I,psw); - - t = 0xc041| ((i&7)<<1); /* lockout, PE force-insert disable, - PE LRU-ins dis, BE force-ins enable - set the block enter select bit */ - mtcpu2(t,8); /* move to the dtlb diagnostic register */ - - idtlba(0,0,0); /* address does not matter */ - idtlbp(0,0,0); - - t |= 0x2000; /* no lockout, PE force-ins disable */ - mtcpu2(t,8); /* move to the dtlb diagnostic register */ - - mtsm(psw); -} - #ifdef _KERNEL struct pdc_cache; void fcacheall __P((struct pdc_cache *)); void ptlball __P((struct pdc_cache *)); +int btlb_insert __P((int i, pa_space_t space, vm_offset_t va, vm_offset_t pa, + vm_size_t *lenp, u_int prot)); #endif -#endif /* _HPPA_CPUFUNC_H_ */ - - - +#endif /* _MACHINE_CPUFUNC_H_ */ |