summaryrefslogtreecommitdiff
path: root/sys/arch
diff options
context:
space:
mode:
Diffstat (limited to 'sys/arch')
-rw-r--r--sys/arch/hppa/include/cpufunc.h664
-rw-r--r--sys/arch/hppa/stand/libsa/cache_c.c130
-rw-r--r--sys/arch/hppa/stand/libsa/cache_s.s220
3 files changed, 246 insertions, 768 deletions
diff --git a/sys/arch/hppa/include/cpufunc.h b/sys/arch/hppa/include/cpufunc.h
index 4caa651bd6a..f6fd259b2d7 100644
--- a/sys/arch/hppa/include/cpufunc.h
+++ b/sys/arch/hppa/include/cpufunc.h
@@ -1,4 +1,4 @@
-/* $OpenBSD: cpufunc.h,v 1.1 1998/07/07 21:32:40 mickey Exp $ */
+/* $OpenBSD: cpufunc.h,v 1.2 1998/08/29 01:56:55 mickey Exp $ */
/*
* (c) Copyright 1988 HEWLETT-PACKARD COMPANY
@@ -14,7 +14,6 @@
* Hewlett-Packard Company makes no representations about the
* suitability of this software for any purpose.
*/
-
/*
* Copyright (c) 1990,1994 The University of Utah and
* the Computer Systems Laboratory (CSL). All rights reserved.
@@ -30,483 +29,312 @@
* Author: Bob Wheeler, University of Utah CSL
*/
-/*
- * hppa routines to move to and from control registers from C
- */
+#ifndef _HPPA_CPUFUNC_H_
+#define _HPPA_CPUFUNC_H_
+#include <machine/psl.h>
+#include <machine/pte.h>
-/*
- * Get space register for an address
- */
+#define tlbbtop(b) (((b) & ~PGOFSET) >> (PGSHIFT - 5))
+#define tlbptob(p) ((p) << (PGSHIFT - 5))
+
+/* Get space register for an address */
static __inline u_int ldsid(vm_offset_t p) {
register u_int ret;
- __asm __volatile("ldsid (%1),%0" : "=r" (p) : "r" (ret));
+ __asm __volatile("ldsid (%1),%0" : "=r" (ret) : "r" (p));
return ret;
}
-/*
- * Move the specified value into the control register. The register is taken
- * modulo 32. If the register is invalid the operation is ignored.
- */
-static __inline void mtctl(int reg, int value) {
- reg %= 32;
- if (reg == 0)
- __asm __volatile("mtctl %0, cr0" : : "r" (value));
- else if (reg > 7)
- ;
-#if 0
- bv 0(r2)
- mtctl arg1,cr8
- bv 0(r2)
- mtctl arg1,cr9
- bv 0(r2)
- mtctl arg1,cr10
- bv 0(r2)
- mtctl arg1,cr11
- bv 0(r2)
- mtctl arg1,cr12
- bv 0(r2)
- mtctl arg1,cr13
- bv 0(r2)
- mtctl arg1,cr14
- bv 0(r2)
- mtctl arg1,cr15
- bv 0(r2)
- mtctl arg1,cr16
- bv 0(r2)
- mtctl arg1,cr17
- bv 0(r2)
- mtctl arg1,cr18
- bv 0(r2)
- mtctl arg1,cr19
- bv 0(r2)
- mtctl arg1,cr20
- bv 0(r2)
- mtctl arg1,cr21
- bv 0(r2)
- mtctl arg1,cr22
- bv 0(r2)
- mtctl arg1,cr23
- bv 0(r2)
- mtctl arg1,cr24
- bv 0(r2)
- mtctl arg1,cr25
- bv 0(r2)
- mtctl arg1,cr26
- bv 0(r2)
- mtctl arg1,cr27
- bv 0(r2)
- mtctl arg1,cr28
- bv 0(r2)
- mtctl arg1,cr29
- bv 0(r2)
- mtctl arg1,cr30
- bv 0(r2)
- mtctl arg1,cr31
-#endif
+/* Disable SID hashing and flush all caches for S-CHIP */
+static __inline u_int disable_S_sid_hashing(void) {
+ register u_int t, ret;
+ __asm ("mfcpu (0,%1)\n\t" /* get cpu diagnosic register */
+ "mfcpu (0,%1)\n\t" /* black magic */
+ "copy %1,%0\n\t"
+ "depi 0,20,3,%1\n\t" /* clear DHE, domain and IHE bits */
+ "depi 1,16,1,%1\n\t" /* enable quad-word stores */
+ "depi 0,10,1,%1\n\t" /* do not clear the DHPMC bit */
+ "depi 0,14,1,%1\n\t" /* do not clear the ILPMC bit */
+ "mtcpu (%1,0)\n\t" /* set the cpu disagnostic register */
+ "mtcpu (%1,0)\n\t" /* black magic */
+ : "=r" (ret) : "r" (t));
+ return ret;
}
-/*
- * Return the contents of the specified control register. The register is taken
- * modulo 32. If the register is invalid the operation is ignored.
- */
+/* Disable SID hashing and flush all caches for T-CHIP */
+static __inline u_int disable_T_sid_hashing(void) {
+ register u_int t, ret;
+ __asm("mfcpu (0,%1)\n\t" /* get cpu diagnosic register */
+ "mfcpu (0,%1)\n\t" /* black magic */
+ "copy %1,%0\n\t"
+ "depi 0,18,1,%1\n\t" /* clear DHE bit */
+ "depi 0,20,1,%1\n\t" /* clear IHE bit */
+ "depi 0,10,1,%1\n\t" /* do not clear the DHPMC bit */
+ "depi 0,14,1,%1\n\t" /* do not clear the ILPMC bit */
+ "mtcpu (%1,0)\n\t" /* set the cpu disagnostic register */
+ "mtcpu (%1,0)\n\t" /* black magic */
+ : "=r" (ret) : "r" (t));
+ return ret;
+}
+
+/* Disable SID hashing and flush all caches for L-CHIP */
+static __inline u_int disable_L_sid_hashing(void) {
+ register u_int t, ret;
+ __asm("mfcpu2 (0,%1)\n\t" /* get cpu diagnosic register */
+/* ".word 0x14160600\n\t" */
+ "copy %1,%0\n\t"
+ "depi 0,27,1,%1\n\t" /* clear DHE bit */
+ "depi 0,28,1,%1\n\t" /* clear IHE bit */
+ "depi 0,6,1,%1\n\t" /* do not clear the L2IHPMC bit */
+ "depi 0,8,1,%1\n\t" /* do not clear the L2DHPMC bit */
+ "depi 0,10,1,%1\n\t" /* do not clear the L1IHPMC bit */
+ "mtcpu2 (%1,0)" /* set the cpu disagnostic register */
+/* ".word 0x14160240\n\t" */
+ : "=r" (ret) : "r" (t));
+ return ret;
+}
-static __inline u_int mfctl(int reg) {
+static __inline u_int get_dcpu_reg(void) {
register u_int ret;
- reg %= 32;
- if (reg == 0)
- __asm __volatile("mfctl cr0,%0" : "=r" (ret));
- else if (reg > 7)
- ;
-#if 0
- bv 0(r2)
- mfctl cr8,ret0
- bv 0(r2)
- mfctl cr9,ret0
- bv 0(r2)
- mfctl cr10,ret0
- bv 0(r2)
- mfctl cr11,ret0
- bv 0(r2)
- mfctl cr12,ret0
- bv 0(r2)
- mfctl cr13,ret0
- bv 0(r2)
- mfctl cr14,ret0
- bv 0(r2)
- mfctl cr15,ret0
- bv 0(r2)
- mfctl cr16,ret0
- bv 0(r2)
- mfctl cr17,ret0
- bv 0(r2)
- mfctl cr18,ret0
- bv 0(r2)
- mfctl cr19,ret0
- bv 0(r2)
- mfctl cr20,ret0
- bv 0(r2)
- mfctl cr21,ret0
- bv 0(r2)
- mfctl cr22,ret0
- bv 0(r2)
- mfctl cr23,ret0
- bv 0(r2)
- mfctl cr24,ret0
- bv 0(r2)
- mfctl cr25,ret0
- bv 0(r2)
- mfctl cr26,ret0
- bv 0(r2)
- mfctl cr27,ret0
- bv 0(r2)
- mfctl cr28,ret0
- bv 0(r2)
- mfctl cr29,ret0
- bv 0(r2)
- mfctl cr30,ret0
- bv 0(r2)
- mfctl cr31,ret0
-#endif
+ __asm("mfcpu (0,%0)\n\t" /* Get cpu diagnostic register */
+ "mfcpu (0,%0)": "=r" (ret)); /* black magic */
return ret;
}
-#if 0
-/*
- * int mtsp(sr, value)
- * int sr;
- * int value;
- *
- * Move the specified value into a space register. The space register is taken
- * modulo 8.
- */
+#define mtctl(v,r) __asm __volatile("mtctl %0,%1":: "r" (v), "i" (r))
+#define mfctl(r,v) __asm __volatile("mfctl %1,%0": "=r" (v): "i" (r))
- .export mtsp,entry
- .proc
- .callinfo
-mtsp
+#define mtcpu(v,r) __asm __volatile("mtcpu %0,%1":: "r" (v), "i" (r))
+#define mfcpu(r,v) __asm __volatile("mfcpu %1,%0": "=r" (v): "i" (r))
-/*
- * take the register number modulo 8
- */
- ldi 7,t1
- and t1,arg0,arg0
+#define mtcpu2(v,r) __asm __volatile("mtcpu2 %0,%1":: "r" (v), "i" (r))
+#define mfcpu2(r,v) __asm __volatile("mfcpu2 %1,%0": "=r" (v): "i" (r))
-/*
- * write the value to the specified register
- */
+#define mtsp(v,r) __asm __volatile("mtsp %0,%1":: "r" (v), "i" (r))
+#define mfsp(r,v) __asm __volatile("mfsp %1,%0": "=r" (v): "i" (r))
- blr,n arg0,r0
- nop
-
- bv 0(r2)
- mtsp arg1,sr0
- bv 0(r2)
- mtsp arg1,sr1
- bv 0(r2)
- mtsp arg1,sr2
- bv 0(r2)
- mtsp arg1,sr3
- bv 0(r2)
- mtsp arg1,sr4
- bv 0(r2)
- mtsp arg1,sr5
- bv 0(r2)
- mtsp arg1,sr6
- bv 0(r2)
- mtsp arg1,sr7
-
- .procend
+#define ssm(v,r) __asm __volatile("ssm %1,%0": "=r" (r): "i" (v))
+#define rsm(v,r) __asm __volatile("rsm %1,%0": "=r" (r): "i" (v))
+/* Move to system mask. Old value of system mask is returned. */
+static __inline u_int mtsm(u_int mask) {
+ register u_int ret;
+ __asm __volatile("ssm 0,%0\n\t"
+ "mtsm %1": "=r" (ret) : "r" (mask));
+ return ret;
+}
-/*
- * int mfsr(reg)
- * int reg;
- *
- * Return the contents of the specified space register. The space register is
- * taken modulo 8.
- */
+static __inline void set_psw(u_int psw) {
+ __asm __volatile("mtctl %%r0, %%cr17\n\t"
+ "mtctl %%r0, %%cr17\n\t"
+ "ldil L%%.+32, %%r21\n\t"
+ "ldo R%%.+28(%%r21), %%r21\n\t"
+ "mtctl %%r21, %%cr17\n\t"
+ "ldo 4(%%r21), %%r21\n\t"
+ "mtctl %%r21, %%cr17\n\t"
+ "mtctl %0, %%cr22\n\t"
+ "rfi\n\t"
+ "nop\n\tnop\n\tnop\n\tnop"
+ :: "r" (psw): "r21");
+}
- .export mfsp,entry
- .proc
- .callinfo
-mfsp
+#define fdce(sp,off) __asm __volatile("fdce 0(%0,%1)":: "i" (sp), "r" (off))
+#define fice(sp,off) __asm __volatile("fdce 0(%0,%1)":: "i" (sp), "r" (off))
+#define sync_caches() \
+ __asm __volatile("sync\n\tnop\n\tnop\n\tnop\n\tnop\n\tnop\n\tnop\n\tnop")
-/*
- * take the register number modulo 8
- */
- ldi 7,t1
- and t1,arg0,arg0
+static __inline void
+ficache(pa_space_t space, vm_offset_t off, vm_size_t size)
+{
-/*
- * write the value to the specified register
- */
+}
- blr,n arg0,r0
- nop
-
- bv 0(r2)
- mfsp sr0,ret0
- bv 0(r2)
- mfsp sr1,ret0
- bv 0(r2)
- mfsp sr2,ret0
- bv 0(r2)
- mfsp sr3,ret0
- bv 0(r2)
- mfsp sr4,ret0
- bv 0(r2)
- mfsp sr5,ret0
- bv 0(r2)
- mfsp sr6,ret0
- bv 0(r2)
- mfsp sr7,ret0
-
- .procend
+static __inline void
+fdcache(pa_space_t space, vm_offset_t off, vm_size_t size)
+{
+}
-/*
- * int ssm(mask)
- * int mask;
- *
- * Set system mask. This call will not set the Q bit even if it is
- * specified.
- *
- * Returns the old system mask
- */
+static __inline void
+iitlba(u_int pg, pa_space_t sp, vm_offset_t off)
+{
+ mtsp(1, sp);
+ __asm volatile("iitlba %0,(%%sr1, %1)":: "r" (pg), "r" (off));
+}
- .export ssm,entry
- .proc
- .callinfo
-ssm
+static __inline void
+idtlba(u_int pg, pa_space_t sp, vm_offset_t off)
+{
+ mtsp(sp, 1);
+ __asm volatile("idtlba %0,(%%sr1, %1)":: "r" (pg), "r" (off));
+}
-/*
- * look at only the lower 5 bits of the mask
- */
- ldi 31,t1
- and t1,arg0,arg0
+static __inline void
+iitlbp(u_int prot, pa_space_t sp, vm_offset_t off)
+{
+ mtsp(sp, 1);
+ __asm volatile("iitlbp %0,(%%sr1, %1)":: "r" (prot), "r" (off));
+}
+static __inline void
+idtlbp(u_int prot, pa_space_t sp, vm_offset_t off)
+{
+ mtsp(sp, 1);
+ __asm volatile("idtlbp %0,(%%sr1, %1)":: "r" (prot), "r" (off));
+}
-/*
- * Set System Mask and Return
- */
+static __inline void
+pitlb(pa_space_t sp, vm_offset_t off)
+{
+ mtsp(sp, 1);
+ __asm volatile("pitlb %%r0(%%sr1, %0)":: "r" (off));
+}
- blr,n arg0,r0
- nop
-
- bv 0(r2)
- ssm 0,ret0
- bv 0(r2)
- ssm 1,ret0
- bv 0(r2)
- ssm 2,ret0
- bv 0(r2)
- ssm 3,ret0
- bv 0(r2)
- ssm 4,ret0
- bv 0(r2)
- ssm 5,ret0
- bv 0(r2)
- ssm 6,ret0
- bv 0(r2)
- ssm 7,ret0
- bv 0(r2)
- ssm 0,ret0 /* can't set Q bit with ssm */
- bv 0(r2)
- ssm 1,ret0 /* can't set Q bit with ssm */
- bv 0(r2)
- ssm 2,ret0 /* can't set Q bit with ssm */
- bv 0(r2)
- ssm 3,ret0 /* can't set Q bit with ssm */
- bv 0(r2)
- ssm 4,ret0 /* can't set Q bit with ssm */
- bv 0(r2)
- ssm 5,ret0 /* can't set Q bit with ssm */
- bv 0(r2)
- ssm 6,ret0 /* can't set Q bit with ssm */
- bv 0(r2)
- ssm 7,ret0 /* can't set Q bit with ssm */
- bv 0(r2)
- ssm 16,ret0
- bv 0(r2)
- ssm 17,ret0
- bv 0(r2)
- ssm 18,ret0
- bv 0(r2)
- ssm 19,ret0
- bv 0(r2)
- ssm 20,ret0
- bv 0(r2)
- ssm 21,ret0
- bv 0(r2)
- ssm 22,ret0
- bv 0(r2)
- ssm 23,ret0
- bv 0(r2)
- ssm 16,ret0 /* can't set Q bit with ssm */
- bv 0(r2)
- ssm 17,ret0 /* can't set Q bit with ssm */
- bv 0(r2)
- ssm 18,ret0 /* can't set Q bit with ssm */
- bv 0(r2)
- ssm 19,ret0 /* can't set Q bit with ssm */
- bv 0(r2)
- ssm 20,ret0 /* can't set Q bit with ssm */
- bv 0(r2)
- ssm 21,ret0 /* can't set Q bit with ssm */
- bv 0(r2)
- ssm 22,ret0 /* can't set Q bit with ssm */
- bv 0(r2)
- ssm 23,ret0 /* can't set Q bit with ssm */
-
- .procend
+static __inline void
+pdtlb(pa_space_t sp, vm_offset_t off)
+{
+ mtsp(sp, 1);
+ __asm volatile("pdtlb %%r0(%%sr1, %0)":: "r" (off));
+}
+static __inline void
+pitlbe(pa_space_t sp, vm_offset_t off)
+{
+ mtsp(sp, 1);
+ __asm volatile("pitlbe %%r0(%%sr1, %0)":: "r" (off));
+}
-/*
- * int rsm(mask)
- * int mask;
- *
- * Reset system mask.
- *
- * Returns the old system mask
- */
+static __inline void
+pdtlbe(pa_space_t sp, vm_offset_t off)
+{
+ mtsp(sp, 1);
+ __asm volatile("pdtlbe %%r0(%%sr1, %0)":: "r" (off));
+}
- .export rsm,entry
- .proc
- .callinfo
-rsm
+static __inline void
+ibitlb(int i, vm_offset_t pa, vm_offset_t va, vm_size_t sz, u_int prot)
+{
-/*
- * look at only the lower 5 bits of the mask
- */
- ldi 31,t1
- and t1,arg0,arg0
+}
-/*
- * Set System Mask and Return
- */
+static __inline void
+pbitlb(int i)
+{
+}
- blr,n arg0,r0
- nop
-
- bv 0(r2)
- rsm 0,ret0
- bv 0(r2)
- rsm 1,ret0
- bv 0(r2)
- rsm 2,ret0
- bv 0(r2)
- rsm 3,ret0
- bv 0(r2)
- rsm 4,ret0
- bv 0(r2)
- rsm 5,ret0
- bv 0(r2)
- rsm 6,ret0
- bv 0(r2)
- rsm 7,ret0
- bv 0(r2)
- rsm 8,ret0
- bv 0(r2)
- rsm 9,ret0
- bv 0(r2)
- rsm 10,ret0
- bv 0(r2)
- rsm 11,ret0
- bv 0(r2)
- rsm 12,ret0
- bv 0(r2)
- rsm 13,ret0
- bv 0(r2)
- rsm 14,ret0
- bv 0(r2)
- rsm 15,ret0
- bv 0(r2)
- rsm 16,ret0
- bv 0(r2)
- rsm 17,ret0
- bv 0(r2)
- rsm 18,ret0
- bv 0(r2)
- rsm 19,ret0
- bv 0(r2)
- rsm 20,ret0
- bv 0(r2)
- rsm 21,ret0
- bv 0(r2)
- rsm 22,ret0
- bv 0(r2)
- rsm 23,ret0
- bv 0(r2)
- rsm 24,ret0
- bv 0(r2)
- rsm 25,ret0
- bv 0(r2)
- rsm 26,ret0
- bv 0(r2)
- rsm 27,ret0
- bv 0(r2)
- rsm 28,ret0
- bv 0(r2)
- rsm 29,ret0
- bv 0(r2)
- rsm 30,ret0
- bv 0(r2)
- rsm 31,ret0
-
- .procend
+static __inline void
+ibdtlb(int i, vm_offset_t pa, vm_offset_t va, vm_size_t sz, u_int prot)
+{
+}
-/*
- * int mtsm(mask)
- * int mask;
- *
- * Move to system mask. Old value of system mask is returned.
- */
+static __inline void
+pbdtlb(int i)
+{
+}
- .export mtsm,entry
- .proc
- .callinfo
-mtsm
+static __inline void
+ibctlb(int i, vm_offset_t pa, vm_offset_t va, vm_size_t sz, u_int prot)
+{
+ register u_int psw, t;
-/*
- * Move System Mask and Return
- */
- ssm 0,ret0
- bv 0(r2)
- mtsm arg0
+ rsm(PSW_R|PSW_I,psw);
- .procend
-#endif
+ t = 0x7fc1|((i&15)<<1); /* index 127, lockin, override, mismatch */
+ mtcpu(t,8); /* move to the dtlb diag reg */
+ mtcpu(t,8); /* black magic */
-static __inline void
-ficache(pa_space_t space, vm_offset_t off, vm_size_t size)
-{
+ prot |= TLB_DIRTY;
+ sz = (~sz >> 7) & 0x7f000;
+ pa = (tlbbtop(pa) & 0x7f000) | sz;
+ va = (va & 0x7f000) | sz;
+
+ idtlba(pa, 0, va);
+ idtlbp(prot, 0, va);
+ t |= 0x2000; /* no lockout, PE force-ins disable */
+ mtcpu2(t,8); /* move to the dtlb diagnostic register */
+
+ mtsm(psw);
}
static __inline void
-fdcache(pa_space_t space, vm_offset_t off, vm_size_t size)
+pbctlb(int i)
{
+ register u_int psw, t;
+
+ rsm(PSW_R|PSW_I,psw);
+
+ t = 0xffc1|((i&15)<<1); /* index 127, lockin, override, mismatch */
+ mtcpu(t,8); /* move to the dtlb diag reg */
+ mtcpu(t,8); /* black magic */
+ idtlba(0,0,0); /* address does not matter */
+ idtlbp(0,0,0);
+
+ t |= 0x7f << 7;
+ mtcpu(t,8); /* move to the dtlb diagnostic register */
+ mtcpu(t,8); /* black magic */
+
+ mtsm(psw);
}
static __inline void
-pitlb(pa_space_t sp, vm_offset_t off)
+iLbctlb(int i, vm_offset_t pa, vm_offset_t va, vm_offset_t sz, u_int prot)
{
+ register u_int psw, t;
+
+ rsm(PSW_R|PSW_I,psw);
+
+ t = 0x6041| ((i&7)<<1); /* lockin, PE force-insert disable,
+ PE LRU-ins dis, BE force-ins enable
+ set the block enter select bit */
+ mtcpu2(t, 8); /* move to the dtlb diagnostic register */
+
+ prot |= TLB_DIRTY;
+ sz = (~sz >> 7) & 0x7f000;
+ pa = (tlbbtop(pa) & 0x7f000) | sz;
+ va = (va & 0x7f000) | sz;
+
+ /* we assume correct address/size alignment */
+ idtlba(pa, 0, va);
+ idtlbp(prot, 0, va);
+
+ t |= 0x2000; /* no lockin, PE force-ins disable */
+ mtcpu2(t, 8); /* move to the dtlb diagnostic register */
+ mtsm(psw);
}
static __inline void
-pdtlb(pa_space_t sp, vm_offset_t off)
+pLbctlb(int i)
{
+ register u_int psw, t;
+
+ rsm(PSW_R|PSW_I,psw);
+
+ t = 0xc041| ((i&7)<<1); /* lockout, PE force-insert disable,
+ PE LRU-ins dis, BE force-ins enable
+ set the block enter select bit */
+ mtcpu2(t,8); /* move to the dtlb diagnostic register */
+
+ idtlba(0,0,0); /* address does not matter */
+ idtlbp(0,0,0);
+ t |= 0x2000; /* no lockout, PE force-ins disable */
+ mtcpu2(t,8); /* move to the dtlb diagnostic register */
+
+ mtsm(psw);
}
-void phys_page_copy __P((vm_offset_t, vm_offset_t));
-void phys_bzero __P((vm_offset_t, vm_size_t));
-void lpage_copy __P((int, pa_space_t, vm_offset_t, vm_offset_t));
-void lpage_zero __P((int, vm_offset_t, pa_space_t));
+#ifdef _KERNEL
+struct pdc_cache;
+void fcacheall __P((struct pdc_cache *));
+void ptlball __P((struct pdc_cache *));
+#endif
+
+#endif /* _HPPA_CPUFUNC_H_ */
+
diff --git a/sys/arch/hppa/stand/libsa/cache_c.c b/sys/arch/hppa/stand/libsa/cache_c.c
deleted file mode 100644
index d8f90394496..00000000000
--- a/sys/arch/hppa/stand/libsa/cache_c.c
+++ /dev/null
@@ -1,130 +0,0 @@
-/* $OpenBSD: cache_c.c,v 1.2 1998/07/08 21:34:31 mickey Exp $ */
-/* $NOWHERE: cache_c.c,v 2.1 1998/06/22 19:34:46 mickey Exp $ */
-
-/*
- * Copyright 1996 1995 by Open Software Foundation, Inc.
- * All Rights Reserved
- *
- * Permission to use, copy, modify, and distribute this software and
- * its documentation for any purpose and without fee is hereby granted,
- * provided that the above copyright notice appears in all copies and
- * that both the copyright notice and this permission notice appear in
- * supporting documentation.
- *
- * OSF DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE
- * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
- * FOR A PARTICULAR PURPOSE.
- *
- * IN NO EVENT SHALL OSF BE LIABLE FOR ANY SPECIAL, INDIRECT, OR
- * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
- * LOSS OF USE, DATA OR PROFITS, WHETHER IN ACTION OF CONTRACT,
- * NEGLIGENCE, OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
- * WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- *
- */
-/*
- * pmk1.1
- */
-/*
- * (c) Copyright 1988 HEWLETT-PACKARD COMPANY
- *
- * To anyone who acknowledges that this file is provided "AS IS"
- * without any express or implied warranty:
- * permission to use, copy, modify, and distribute this file
- * for any purpose is hereby granted without fee, provided that
- * the above copyright notice and this notice appears in all
- * copies, and that the name of Hewlett-Packard Company not be
- * used in advertising or publicity pertaining to distribution
- * of the software without specific, written prior permission.
- * Hewlett-Packard Company makes no representations about the
- * suitability of this software for any purpose.
- */
-/*
- * HISTORY
- * $Log: cache_c.c,v $
- * Revision 1.2 1998/07/08 21:34:31 mickey
- * use those new pdc call types
- *
- * Revision 1.1.1.1 1998/06/23 18:46:41 mickey
- * ok, it boots, include and libkern to come
- *
- * Revision 2.1 1998/06/22 19:34:46 mickey
- * add cache manipulating routines
- *
- * Revision 1.1.2.1 1996/08/19 07:46:48 bruel
- * First revision
- * [1996/08/02 09:17:19 bruel]
- *
- * Revision 1.1.1.2 1996/08/02 09:17:19 bruel
- * First revision
- *
- * Revision 1.1.2.2 91/11/20 16:21:21 sharpe
- * Initial version from DSEE
- * [91/11/20 16:08:06 sharpe]
- *
- */
-
-/*
- * Stolen - Lock, stock, and barrel from tmm's pmap* .
- */
-
-#include "libsa.h"
-#include <machine/pdc.h>
-
-void
-fall(c_base, c_count, c_loop, c_stride, rot)
- int c_base, c_count, c_loop, c_stride;
- void (*rot)();
-{
- int addr, count, loop; /* Internal vars */
-
- addr = c_base;
- for (count = 0; count < c_count; count++) {
- for (loop = 0; loop < c_loop; loop++) {
- (*rot)(0, addr);
- }
- addr += c_stride;
- }
-
-}
-
-/*
- * fcacheall - Flush all caches.
- *
- * This routine is just a wrapper around the real cache flush routine.
- *
- * Parameters:
- * None.
- *
- * Returns:
- * Hopefully.
- */
-struct pdc_cache pdc_cacheinfo __attribute__ ((aligned(8)));
-
-void
-fcacheall()
-{
- extern int fice();
- extern int fdce();
- int err;
-
- err = (*pdc)(PDC_CACHE, PDC_CACHE_DFLT, &pdc_cacheinfo);
- if (err) {
-#ifdef DEBUG
- if (debug)
- printf("fcacheall: PDC_CACHE failed (%d).\n", err);
-#endif
- return;
- }
-
- /*
- * Flush the instruction, then data cache.
- */
- fall(pdc_cacheinfo.ic_base, pdc_cacheinfo.ic_count, pdc_cacheinfo.ic_loop,
- pdc_cacheinfo.ic_stride, fice);
- sync_caches();
- fall(pdc_cacheinfo.dc_base, pdc_cacheinfo.dc_count, pdc_cacheinfo.dc_loop,
- pdc_cacheinfo.dc_stride, fdce);
- sync_caches();
-}
-
diff --git a/sys/arch/hppa/stand/libsa/cache_s.s b/sys/arch/hppa/stand/libsa/cache_s.s
deleted file mode 100644
index 0812143d4a6..00000000000
--- a/sys/arch/hppa/stand/libsa/cache_s.s
+++ /dev/null
@@ -1,220 +0,0 @@
-/* $OpenBSD: cache_s.s,v 1.1 1998/06/23 18:46:42 mickey Exp $ */
-/* $NOWHERE: cache_s.s,v 2.1 1998/06/22 19:34:46 mickey Exp $ */
-
-/*
- * Copyright 1996 1995 by Open Software Foundation, Inc.
- * All Rights Reserved
- *
- * Permission to use, copy, modify, and distribute this software and
- * its documentation for any purpose and without fee is hereby granted,
- * provided that the above copyright notice appears in all copies and
- * that both the copyright notice and this permission notice appear in
- * supporting documentation.
- *
- * OSF DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE
- * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
- * FOR A PARTICULAR PURPOSE.
- *
- * IN NO EVENT SHALL OSF BE LIABLE FOR ANY SPECIAL, INDIRECT, OR
- * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
- * LOSS OF USE, DATA OR PROFITS, WHETHER IN ACTION OF CONTRACT,
- * NEGLIGENCE, OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
- * WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- *
- */
-/*
- * pmk1.1
- */
-/*
- * (c) Copyright 1988 HEWLETT-PACKARD COMPANY
- *
- * To anyone who acknowledges that this file is provided "AS IS"
- * without any express or implied warranty:
- * permission to use, copy, modify, and distribute this file
- * for any purpose is hereby granted without fee, provided that
- * the above copyright notice and this notice appears in all
- * copies, and that the name of Hewlett-Packard Company not be
- * used in advertising or publicity pertaining to distribution
- * of the software without specific, written prior permission.
- * Hewlett-Packard Company makes no representations about the
- * suitability of this software for any purpose.
- */
-/*
- * HISTORY
- * $Log: cache_s.s,v $
- * Revision 1.1 1998/06/23 18:46:42 mickey
- * Initial revision
- *
- * Revision 2.1 1998/06/22 19:34:46 mickey
- * add cache manipulating routines
- *
- * Revision 1.1.2.1 1996/08/19 07:47:16 bruel
- * First revision
- * [1996/08/02 09:17:26 bruel]
- *
- * Revision 1.1.1.2 1996/08/02 09:17:26 bruel
- * First revision
- *
- * Revision 1.1.2.2 91/11/20 16:21:39 sharpe
- * Initial version from DSEE
- * [91/11/20 16:08:35 sharpe]
- *
- */
-
-/*
- * FlushDCache(start,size)
- * Stolen from pmap_fdcache.
- * void FlushDCache(space, start, end) - Flush the data cache.
- *
- * This routine flushes the given range of virtual addresses, from start (inclusive)
- * to end (exclusive) from the data cache.
- *
- */
- .space $TEXT$
- .subspa $CODE$
-
- .export FlushDCache,entry
- .proc
- .callinfo
-FlushDCache
- .entry
-
- comb,= %arg1,%r0,FDCdone /* If len=0, then done */
- ldi 0x10,%r21
- ldi 0x0f,%r22
- add %arg0,%arg1,%r20
- andcm %arg0,%r22,%arg0 /* Truncate lower bound to stridesize boundary */
- sub %r20,%arg0,%arg1
- ldi 0xff,%r22
- add %arg1,%r22,%arg1
- andcm %arg1,%r22,%arg1
- add %arg0,%arg1,%r20 /* Round up upper bound */
- fdc,m %r21(%arg0)
-FDCloop
- fdc,m %r21(%arg0) /* Flush block */
- fdc,m %r21(%arg0)
- fdc,m %r21(%arg0)
- fdc,m %r21(%arg0)
- fdc,m %r21(%arg0)
- fdc,m %r21(%arg0)
- fdc,m %r21(%arg0)
- fdc,m %r21(%arg0)
- fdc,m %r21(%arg0)
- fdc,m %r21(%arg0)
- fdc,m %r21(%arg0)
- fdc,m %r21(%arg0)
- fdc,m %r21(%arg0)
- fdc,m %r21(%arg0)
- fdc,m %r21(%arg0)
- comb,<<,n %arg0,%r20,FDCloop /* Continue until fstart = fend */
- fdc,m %r21(%arg0)
-FDCdone
- bv 0(%rp) /* return */
- nop
- .exit
- .procend
-
- .export FlushICache,entry
- .proc
- .callinfo
-FlushICache
- .entry
-
- comb,= %arg1,%r0,PICdone /* If len=0, then done */
- ldi 0x10,%r21
- ldi 0x0f,%r22
- add %arg0,%arg1,%r20
- andcm %arg0,%r22,%arg0 /* Truncate lower bound to stridesize boundary */
- sub %r20,%arg0,%arg1
- ldi 0xff,%r22
- add %arg1,%r22,%arg1
- andcm %arg1,%r22,%arg1
- add %arg0,%arg1,%r20 /* Round up upper bound */
- fic,m %r21(%arg0)
-PICloop
- fic,m %r21(%arg0) /* Flush block */
- fic,m %r21(%arg0)
- fic,m %r21(%arg0)
- fic,m %r21(%arg0)
- fic,m %r21(%arg0)
- fic,m %r21(%arg0)
- fic,m %r21(%arg0)
- fic,m %r21(%arg0)
- fic,m %r21(%arg0)
- fic,m %r21(%arg0)
- fic,m %r21(%arg0)
- fic,m %r21(%arg0)
- fic,m %r21(%arg0)
- fic,m %r21(%arg0)
- fic,m %r21(%arg0)
- comb,<<,n %arg0,%r20,PICloop /* Continue until fstart = fend */
- fic,m %r21(%arg0)
-PICdone
- bv 0(%rp) /* return */
- nop
- .exit
- .procend
-
-/*
- * void sync_caches - Synchronize the cache.
- *
- * This routine executes a sync instruction and executes 7 nops.
- * Intended to be used with kdb when setting breakpoints.
- * Stolen from pmap_as.s.
- */
- .export sync_caches,entry
- .proc
- .callinfo
-sync_caches
- .entry
-
- sync /* Sync access */
- nop /* voodoo */
- nop
- nop
- nop
- nop
- nop
- bv 0(%rp)
- nop
- .exit
- .procend
-
-/*
- * void fdce(space, offset) - Perform fdce operation.
- *
- * This routine is called by pmap_fcacheall to whack the data cache. Must
- * be only used inside an architectured loop.
- */
- .export fdce,entry
- .proc
- .callinfo
-fdce
- .entry
-
- fdce 0(0,%arg1) /* Space does not make a difference */
- sync
- bv 0(%rp)
- nop
- .exit
- .procend
-
-/*
- * void fice(space, offset) - Perform fice operation.
- *
- * This routine is called by pmap_fcacheall to whack the instruction cache.
- * Must be only used inside an architectured loop
- */
- .export fice,entry
- .proc
- .callinfo
-fice
- .entry
- fice 0(0,%arg1) /* Space does not make a difference */
- sync
- bv 0(%rp)
- .exit
- .procend
-
- .end
-