summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--sys/arch/arm/arm/arm32_machdep.c614
-rw-r--r--sys/arch/arm/arm/arm_machdep.c272
-rw-r--r--sys/arch/arm/arm/ast.c132
-rw-r--r--sys/arch/arm/arm/atomic.S73
-rw-r--r--sys/arch/arm/arm/bcopy_page.S276
-rw-r--r--sys/arch/arm/arm/bcopyinout.S815
-rw-r--r--sys/arch/arm/arm/blockio.S588
-rw-r--r--sys/arch/arm/arm/bootconfig.c128
-rw-r--r--sys/arch/arm/arm/bus_dma.c1070
-rw-r--r--sys/arch/arm/arm/bus_space_asm_generic.S352
-rw-r--r--sys/arch/arm/arm/bus_space_notimpl.S160
-rw-r--r--sys/arch/arm/arm/conf.c525
-rw-r--r--sys/arch/arm/arm/copystr.S229
-rw-r--r--sys/arch/arm/arm/cpu.c587
-rw-r--r--sys/arch/arm/arm/cpufunc.c2168
-rw-r--r--sys/arch/arm/arm/cpufunc_asm.S151
-rw-r--r--sys/arch/arm/arm/cpufunc_asm_armv4.S67
-rw-r--r--sys/arch/arm/arm/cpufunc_asm_sa1.S314
-rw-r--r--sys/arch/arm/arm/cpuswitch.S1172
-rw-r--r--sys/arch/arm/arm/db_disasm.c77
-rw-r--r--sys/arch/arm/arm/db_interface.c476
-rw-r--r--sys/arch/arm/arm/db_machdep.c88
-rw-r--r--sys/arch/arm/arm/db_trace.c208
-rw-r--r--sys/arch/arm/arm/disassem.c682
-rw-r--r--sys/arch/arm/arm/disassem.h49
-rw-r--r--sys/arch/arm/arm/disksubr.c362
-rw-r--r--sys/arch/arm/arm/disksubr_mbr.c208
-rw-r--r--sys/arch/arm/arm/exception.S380
-rw-r--r--sys/arch/arm/arm/fault.c837
-rw-r--r--sys/arch/arm/arm/fiq.c177
-rw-r--r--sys/arch/arm/arm/fiq_subr.S116
-rw-r--r--sys/arch/arm/arm/fusu.S398
-rw-r--r--sys/arch/arm/arm/genassym.cf168
-rw-r--r--sys/arch/arm/arm/in_cksum_arm.S471
-rw-r--r--sys/arch/arm/arm/irq_dispatch.S155
-rw-r--r--sys/arch/arm/arm/locore.S215
-rw-r--r--sys/arch/arm/arm/mem.c249
-rw-r--r--sys/arch/arm/arm/pmap.c5131
-rw-r--r--sys/arch/arm/arm/process_machdep.c225
-rw-r--r--sys/arch/arm/arm/procfs_machdep.c23
-rw-r--r--sys/arch/arm/arm/setcpsr.S79
-rw-r--r--sys/arch/arm/arm/setstack.S93
-rw-r--r--sys/arch/arm/arm/sig_machdep.c384
-rw-r--r--sys/arch/arm/arm/sigcode.S62
-rw-r--r--sys/arch/arm/arm/softintr.c207
-rw-r--r--sys/arch/arm/arm/stubs.c215
-rw-r--r--sys/arch/arm/arm/sys_machdep.c119
-rw-r--r--sys/arch/arm/arm/syscall.c492
-rw-r--r--sys/arch/arm/arm/undefined.c329
-rw-r--r--sys/arch/arm/arm/vectors.S104
-rw-r--r--sys/arch/arm/arm/vm_machdep.c390
-rw-r--r--sys/arch/arm/arm/vm_machdep_arm.c100
-rw-r--r--sys/arch/arm/conf/files.arm141
-rw-r--r--sys/arch/arm/conf/files.footbridge22
-rw-r--r--sys/arch/arm/footbridge/dc21285mem.h90
-rw-r--r--sys/arch/arm/footbridge/dc21285reg.h393
-rw-r--r--sys/arch/arm/footbridge/footbridge.c293
-rw-r--r--sys/arch/arm/footbridge/footbridge.h17
-rw-r--r--sys/arch/arm/footbridge/footbridge_clock.c482
-rw-r--r--sys/arch/arm/footbridge/footbridge_com.c870
-rw-r--r--sys/arch/arm/footbridge/footbridge_com_io.c211
-rw-r--r--sys/arch/arm/footbridge/footbridge_intr.h215
-rw-r--r--sys/arch/arm/footbridge/footbridge_io.c321
-rw-r--r--sys/arch/arm/footbridge/footbridge_irqhandler.c482
-rw-r--r--sys/arch/arm/footbridge/footbridge_irqhandler.h60
-rw-r--r--sys/arch/arm/footbridge/footbridge_machdep.c66
-rw-r--r--sys/arch/arm/footbridge/footbridge_pci.c419
-rw-r--r--sys/arch/arm/footbridge/footbridgevar.h96
-rw-r--r--sys/arch/arm/footbridge/genassym.cf47
-rw-r--r--sys/arch/arm/footbridge/isa/ds1687reg.h129
-rw-r--r--sys/arch/arm/footbridge/isa/dsrtc.c279
-rw-r--r--sys/arch/arm/footbridge/isa/icu.h71
-rw-r--r--sys/arch/arm/footbridge/isa/isa_io.c321
-rw-r--r--sys/arch/arm/footbridge/isa/isa_io_asm.S342
-rw-r--r--sys/arch/arm/footbridge/isa/isa_machdep.c609
-rw-r--r--sys/arch/arm/footbridge/isa/sysbeep_isa.c90
-rw-r--r--sys/arch/arm/footbridge/todclock.c348
-rw-r--r--sys/arch/arm/footbridge/todclockvar.h56
-rw-r--r--sys/arch/arm/include/ansi.h109
-rw-r--r--sys/arch/arm/include/armreg.h353
-rw-r--r--sys/arch/arm/include/asm.h130
-rw-r--r--sys/arch/arm/include/atomic.h103
-rw-r--r--sys/arch/arm/include/blockio.h54
-rw-r--r--sys/arch/arm/include/bus.h1071
-rw-r--r--sys/arch/arm/include/cdefs.h13
-rw-r--r--sys/arch/arm/include/conf.h134
-rw-r--r--sys/arch/arm/include/cpu.h302
-rw-r--r--sys/arch/arm/include/cpuconf.h177
-rw-r--r--sys/arch/arm/include/cpufunc.h524
-rw-r--r--sys/arch/arm/include/db_machdep.h117
-rw-r--r--sys/arch/arm/include/disklabel.h151
-rw-r--r--sys/arch/arm/include/elf_abi.h7
-rw-r--r--sys/arch/arm/include/endian.h8
-rw-r--r--sys/arch/arm/include/fiq.h69
-rw-r--r--sys/arch/arm/include/float.h90
-rw-r--r--sys/arch/arm/include/fp.h87
-rw-r--r--sys/arch/arm/include/frame.h412
-rw-r--r--sys/arch/arm/include/ieee.h191
-rw-r--r--sys/arch/arm/include/ieeefp.h41
-rw-r--r--sys/arch/arm/include/internal_types.h6
-rw-r--r--sys/arch/arm/include/isa_machdep.h193
-rw-r--r--sys/arch/arm/include/katelib.h99
-rw-r--r--sys/arch/arm/include/limits.h54
-rw-r--r--sys/arch/arm/include/lock.h90
-rw-r--r--sys/arch/arm/include/machdep.h27
-rw-r--r--sys/arch/arm/include/math.h4
-rw-r--r--sys/arch/arm/include/param.h241
-rw-r--r--sys/arch/arm/include/pcb.h115
-rw-r--r--sys/arch/arm/include/pci_machdep.h103
-rw-r--r--sys/arch/arm/include/pio.h47
-rw-r--r--sys/arch/arm/include/pmap.h595
-rw-r--r--sys/arch/arm/include/proc.h51
-rw-r--r--sys/arch/arm/include/profile.h107
-rw-r--r--sys/arch/arm/include/pte.h246
-rw-r--r--sys/arch/arm/include/ptrace.h44
-rw-r--r--sys/arch/arm/include/reg.h55
-rw-r--r--sys/arch/arm/include/reloc.h53
-rw-r--r--sys/arch/arm/include/rtc.h84
-rw-r--r--sys/arch/arm/include/setjmp.h87
-rw-r--r--sys/arch/arm/include/signal.h134
-rw-r--r--sys/arch/arm/include/softintr.h106
-rw-r--r--sys/arch/arm/include/spinlock.h10
-rw-r--r--sys/arch/arm/include/swi.h23
-rw-r--r--sys/arch/arm/include/sysarch.h61
-rw-r--r--sys/arch/arm/include/trap.h71
-rw-r--r--sys/arch/arm/include/types.h119
-rw-r--r--sys/arch/arm/include/undefined.h89
-rw-r--r--sys/arch/arm/include/vmparam.h152
-rw-r--r--sys/arch/arm/mainbus/cpu_mainbus.c102
-rw-r--r--sys/arch/arm/mainbus/mainbus.c129
-rw-r--r--sys/arch/arm/mainbus/mainbus.h61
-rw-r--r--sys/arch/arm/mainbus/mainbus_io.c248
-rw-r--r--sys/arch/arm/mainbus/mainbus_io_asm.S113
133 files changed, 37294 insertions, 0 deletions
diff --git a/sys/arch/arm/arm/arm32_machdep.c b/sys/arch/arm/arm/arm32_machdep.c
new file mode 100644
index 00000000000..a045c0b81dc
--- /dev/null
+++ b/sys/arch/arm/arm/arm32_machdep.c
@@ -0,0 +1,614 @@
+/* $OpenBSD: arm32_machdep.c,v 1.1 2004/02/01 05:09:48 drahn Exp $ */
+/*^I$NetBSD: arm32_machdep.c,v 1.42 2003/12/30 12:33:15 pk Exp $^I*/$
+
+/*
+ * Copyright (c) 1994-1998 Mark Brinicombe.
+ * Copyright (c) 1994 Brini.
+ * All rights reserved.
+ *
+ * This code is derived from software written for Brini by Mark Brinicombe
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by Mark Brinicombe
+ * for the NetBSD Project.
+ * 4. The name of the company nor the name of the author may be used to
+ * endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * Machine dependant functions for kernel setup
+ *
+ * Created : 17/09/94
+ * Updated : 18/04/01 updated for new wscons
+ */
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/reboot.h>
+#include <sys/proc.h>
+#include <sys/user.h>
+#include <sys/kernel.h>
+#include <sys/mbuf.h>
+#include <sys/mount.h>
+#include <sys/buf.h>
+#include <sys/msg.h>
+#include <sys/msgbuf.h>
+#include <sys/device.h>
+#include <uvm/uvm_extern.h>
+#include <sys/sysctl.h>
+
+#include <dev/cons.h>
+
+#include <arm/katelib.h>
+#include <arm/machdep.h>
+#include <machine/bootconfig.h>
+
+#include "rd.h"
+
+struct vm_map *exec_map = NULL;
+struct vm_map *phys_map = NULL;
+
+extern int physmem;
+caddr_t allocsys(caddr_t);
+
+#ifdef NBUF
+int nbuf = NBUF;
+#else
+int nbuf = 0;
+#endif
+
+#ifndef BUFCACHEPERCENT
+#define BUFCACHEPERCENT 5
+#endif
+
+#ifdef BUFPAGES
+int bufpages = BUFPAGES;
+#else
+int bufpages = 0;
+#endif
+int bufcachepercent = BUFCACHEPERCENT;
+
+int cold = 1;
+
+#if NMD > 0 && defined(MEMORY_DISK_HOOKS) && !defined(MEMORY_DISK_ROOT_SIZE)
+extern size_t md_root_size; /* Memory disc size */
+#endif /* NMD && MEMORY_DISK_HOOKS && !MEMORY_DISK_ROOT_SIZE */
+
+pv_addr_t kernelstack;
+
+/* the following is used externally (sysctl_hw) */
+char machine[] = MACHINE; /* from <machine/param.h> */
+char machine_arch[] = MACHINE_ARCH; /* from <machine/param.h> */
+
+/* Our exported CPU info; we can have only one. */
+struct cpu_info cpu_info_store;
+
+caddr_t msgbufaddr;
+extern paddr_t msgbufphys;
+
+int kernel_debug = 0;
+
+struct user *proc0paddr;
+
+/* exported variable to be filled in by the bootloaders */
+char *booted_kernel;
+
+
+/* Prototypes */
+
+void data_abort_handler __P((trapframe_t *frame));
+void prefetch_abort_handler __P((trapframe_t *frame));
+extern void configure __P((void));
+
+/*
+ * arm32_vector_init:
+ *
+ * Initialize the vector page, and select whether or not to
+ * relocate the vectors.
+ *
+ * NOTE: We expect the vector page to be mapped at its expected
+ * destination.
+ */
+void
+arm32_vector_init(vaddr_t va, int which)
+{
+ extern unsigned int page0[], page0_data[];
+ unsigned int *vectors = (int *) va;
+ unsigned int *vectors_data = vectors + (page0_data - page0);
+ int vec;
+
+ /*
+ * Loop through the vectors we're taking over, and copy the
+ * vector's insn and data word.
+ */
+ for (vec = 0; vec < ARM_NVEC; vec++) {
+ if ((which & (1 << vec)) == 0) {
+ /* Don't want to take over this vector. */
+ continue;
+ }
+ vectors[vec] = page0[vec];
+ vectors_data[vec] = page0_data[vec];
+ }
+
+ /* Now sync the vectors. */
+ cpu_icache_sync_range(va, (ARM_NVEC * 2) * sizeof(u_int));
+
+ vector_page = va;
+
+ if (va == ARM_VECTORS_HIGH) {
+ /*
+ * Assume the MD caller knows what it's doing here, and
+ * really does want the vector page relocated.
+ *
+ * Note: This has to be done here (and not just in
+ * cpu_setup()) because the vector page needs to be
+ * accessible *before* cpu_startup() is called.
+ * Think ddb(9) ...
+ *
+ * NOTE: If the CPU control register is not readable,
+ * this will totally fail! We'll just assume that
+ * any system that has high vector support has a
+ * readable CPU control register, for now. If we
+ * ever encounter one that does not, we'll have to
+ * rethink this.
+ */
+ cpu_control(CPU_CONTROL_VECRELOC, CPU_CONTROL_VECRELOC);
+ }
+}
+
+/*
+ * Debug function just to park the CPU
+ */
+
+void
+halt()
+{
+ while (1)
+ cpu_sleep(0);
+}
+
+
+/* Sync the discs and unmount the filesystems */
+
+void
+bootsync(void)
+{
+ static int bootsyncdone = 0;
+
+ if (bootsyncdone) return;
+
+ bootsyncdone = 1;
+
+ /* Make sure we can still manage to do things */
+ if (GetCPSR() & I32_bit) {
+ /*
+ * If we get here then boot has been called without RB_NOSYNC
+ * and interrupts were disabled. This means the boot() call
+ * did not come from a user process e.g. shutdown, but must
+ * have come from somewhere in the kernel.
+ */
+ IRQenable;
+ printf("Warning IRQ's disabled during boot()\n");
+ }
+
+ vfs_shutdown();
+}
+
+/*
+ * void cpu_startup(void)
+ *
+ * Machine dependant startup code.
+ *
+ */
+void
+cpu_startup()
+{
+ u_int loop;
+ paddr_t minaddr;
+ paddr_t maxaddr;
+ caddr_t sysbase;
+ caddr_t size;
+ vsize_t bufsize;
+ int base, residual;
+
+ proc0paddr = (struct user *)kernelstack.pv_va;
+ proc0.p_addr = proc0paddr;
+
+ /* Set the cpu control register */
+ cpu_setup(boot_args);
+
+ /* Lock down zero page */
+ vector_page_setprot(VM_PROT_READ);
+
+ /*
+ * Give pmap a chance to set up a few more things now the vm
+ * is initialised
+ */
+ pmap_postinit();
+
+ /*
+ * Initialize error message buffer (at end of core).
+ */
+
+ /* msgbufphys was setup during the secondary boot strap */
+ for (loop = 0; loop < btoc(MSGBUFSIZE); ++loop)
+ pmap_kenter_pa((vaddr_t)msgbufaddr + loop * PAGE_SIZE,
+ msgbufphys + loop * PAGE_SIZE, VM_PROT_READ|VM_PROT_WRITE);
+ pmap_update(pmap_kernel());
+ initmsgbuf(msgbufaddr, round_page(MSGBUFSIZE));
+
+ /*
+ * Look at arguments passed to us and compute boothowto.
+ * Default to SINGLE and ASKNAME if no args or
+ * SINGLE and DFLTROOT if this is a ramdisk kernel.
+ */
+#ifdef RAMDISK_HOOKS
+ boothowto = RB_SINGLE | RB_DFLTROOT;
+#else
+ boothowto = RB_AUTOBOOT;
+#endif /* RAMDISK_HOOKS */
+
+ /*
+ * Identify ourselves for the msgbuf (everything printed earlier will
+ * not be buffered).
+ */
+ printf(version);
+
+ printf("real mem = %u (%uK)\n", ctob(physmem), ctob(physmem)/1024);
+
+
+ /*
+ * Find out how much space we need, allocate it,
+ * and then give everything true virtual addresses.
+ */
+ size = allocsys(NULL);
+ sysbase = (caddr_t)uvm_km_zalloc(kernel_map, round_page((vaddr_t)size));
+ if (sysbase == 0)
+ panic(
+ "cpu_startup: no room for system tables; %d bytes required",
+ (u_int)size);
+ if ((caddr_t)((allocsys(sysbase) - sysbase)) != size)
+ panic("cpu_startup: system table size inconsistency");
+
+ /*
+ * Now allocate buffers proper. They are different than the above
+ * in that they usually occupy more virtual memory than physical.
+ */
+ bufsize = MAXBSIZE * nbuf;
+ if (uvm_map(kernel_map, (vaddr_t *)&buffers, round_page(bufsize),
+ NULL, UVM_UNKNOWN_OFFSET, 0,
+ UVM_MAPFLAG(UVM_PROT_NONE, UVM_PROT_NONE, UVM_INH_NONE,
+ UVM_ADV_NORMAL, 0)) != 0)
+ panic("cpu_startup: cannot allocate UVM space for buffers");
+ minaddr = (vaddr_t)buffers;
+ if ((bufpages / nbuf) >= btoc(MAXBSIZE)) {
+ /* don't want to alloc more physical mem than needed */
+ bufpages = btoc(MAXBSIZE) * nbuf;
+ }
+
+ base = bufpages / nbuf;
+ residual = bufpages % nbuf;
+ for (loop = 0; loop < nbuf; ++loop) {
+ vsize_t curbufsize;
+ vaddr_t curbuf;
+ struct vm_page *pg;
+
+ /*
+ * Each buffer has MAXBSIZE bytes of VM space allocated. Of
+ * that MAXBSIZE space, we allocate and map (base+1) pages
+ * for the first "residual" buffers, and then we allocate
+ * "base" pages for the rest.
+ */
+ curbuf = (vaddr_t) buffers + (loop * MAXBSIZE);
+ curbufsize = NBPG * ((loop < residual) ? (base+1) : base);
+
+ while (curbufsize) {
+ pg = uvm_pagealloc(NULL, 0, NULL, 0);
+ if (pg == NULL)
+ panic("cpu_startup: not enough memory for buffer cache");
+ pmap_kenter_pa(curbuf, VM_PAGE_TO_PHYS(pg),
+ VM_PROT_READ|VM_PROT_WRITE);
+ curbuf += PAGE_SIZE;
+ curbufsize -= PAGE_SIZE;
+ }
+ }
+ pmap_update(pmap_kernel());
+
+ /*
+ * Allocate a submap for exec arguments. This map effectively
+ * limits the number of processes exec'ing at any time.
+ */
+ exec_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr,
+ 16*NCARGS, VM_MAP_PAGEABLE, FALSE, NULL);
+
+ /*
+ * Allocate a submap for physio
+ */
+ phys_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr,
+ VM_PHYS_SIZE, 0, FALSE, NULL);
+
+ /*
+ * Set up buffers, so they can be used to read disk labels.
+ */
+ bufinit();
+
+ printf("avail mem = %lu (%uK)\n", ptoa(uvmexp.free),
+ ptoa(uvmexp.free)/1024);
+ printf("using %d buffers containing %u bytes (%uK) of memory\n",
+ nbuf, bufpages * PAGE_SIZE, bufpages * PAGE_SIZE / 1024);
+
+ curpcb = &proc0.p_addr->u_pcb;
+ curpcb->pcb_flags = 0;
+ curpcb->pcb_un.un_32.pcb32_und_sp = (u_int)proc0.p_addr +
+ USPACE_UNDEF_STACK_TOP;
+ curpcb->pcb_un.un_32.pcb32_sp = (u_int)proc0.p_addr +
+ USPACE_SVC_STACK_TOP;
+ pmap_set_pcb_pagedir(pmap_kernel(), curpcb);
+
+ curpcb->pcb_tf = (struct trapframe *)curpcb->pcb_un.un_32.pcb32_sp - 1;
+}
+
+/*
+ * machine dependent system variables.
+ */
+
+int
+cpu_sysctl(name, namelen, oldp, oldlenp, newp, newlen, p)
+ int *name;
+ u_int namelen;
+ void *oldp;
+ size_t *oldlenp;
+ void *newp;
+ size_t newlen;
+ struct proc *p;
+{
+ /* all sysctl names at this level are terminal */
+ if (namelen != 1)
+ return (ENOTDIR); /* overloaded */
+
+ switch (name[0]) {
+ case CPU_DEBUG:
+ return(sysctl_int(oldp, oldlenp, newp, newlen, &kernel_debug));
+
+#if 0
+ case CPU_BOOTED_DEVICE:
+ if (booted_device != NULL)
+ return (sysctl_rdstring(oldp, oldlenp, newp,
+ booted_device->dv_xname));
+ return (EOPNOTSUPP);
+#endif
+
+ case CPU_CONSDEV: {
+ dev_t consdev;
+ if (cn_tab != NULL)
+ consdev = cn_tab->cn_dev;
+ else
+ consdev = NODEV;
+ return (sysctl_rdstruct(oldp, oldlenp, newp, &consdev,
+ sizeof consdev));
+ }
+ case CPU_BOOTED_KERNEL: {
+ if (booted_kernel != NULL && booted_kernel[0] != '\0')
+ return sysctl_rdstring(oldp, oldlenp, newp,
+ booted_kernel);
+ return (EOPNOTSUPP);
+ }
+
+ default:
+ return (EOPNOTSUPP);
+ }
+ /* NOTREACHED */
+}
+
+#if 0
+/*
+ * machine dependent system variables.
+ */
+static int
+sysctl_machdep_booted_device(SYSCTLFN_ARGS)
+{
+ struct sysctlnode node;
+
+ if (booted_device == NULL)
+ return (EOPNOTSUPP);
+
+ node = *rnode;
+ node.sysctl_data = booted_device->dv_xname;
+ node.sysctl_size = strlen(booted_device->dv_xname) + 1;
+ return (sysctl_lookup(SYSCTLFN_CALL(&node)));
+}
+
+static int
+sysctl_machdep_booted_kernel(SYSCTLFN_ARGS)
+{
+ struct sysctlnode node;
+
+ if (booted_kernel == NULL || booted_kernel[0] == '\0')
+ return (EOPNOTSUPP);
+
+ node = *rnode;
+ node.sysctl_data = booted_kernel;
+ node.sysctl_size = strlen(booted_kernel) + 1;
+ return (sysctl_lookup(SYSCTLFN_CALL(&node)));
+}
+
+static int
+sysctl_machdep_powersave(SYSCTLFN_ARGS)
+{
+ struct sysctlnode node = *rnode;
+ int error, newval;
+
+ newval = cpu_do_powersave;
+ node.sysctl_data = &newval;
+ if (cpufuncs.cf_sleep == (void *) cpufunc_nullop)
+ node.sysctl_flags &= ~SYSCTL_READWRITE;
+ error = sysctl_lookup(SYSCTLFN_CALL(&node));
+ if (error || newp == NULL || newval == cpu_do_powersave)
+ return (error);
+
+ if (newval < 0 || newval > 1)
+ return (EINVAL);
+ cpu_do_powersave = newval;
+
+ return (0);
+}
+
+SYSCTL_SETUP(sysctl_machdep_setup, "sysctl machdep subtree setup")
+{
+
+ sysctl_createv(SYSCTL_PERMANENT,
+ CTLTYPE_NODE, "machdep", NULL,
+ NULL, 0, NULL, 0,
+ CTL_MACHDEP, CTL_EOL);
+
+ sysctl_createv(SYSCTL_PERMANENT|SYSCTL_READWRITE,
+ CTLTYPE_INT, "debug", NULL,
+ NULL, 0, &kernel_debug, 0,
+ CTL_MACHDEP, CPU_DEBUG, CTL_EOL);
+ sysctl_createv(SYSCTL_PERMANENT,
+ CTLTYPE_STRING, "booted_device", NULL,
+ sysctl_machdep_booted_device, 0, NULL, 0,
+ CTL_MACHDEP, CPU_BOOTED_DEVICE, CTL_EOL);
+ sysctl_createv(SYSCTL_PERMANENT,
+ CTLTYPE_STRING, "booted_kernel", NULL,
+ sysctl_machdep_booted_kernel, 0, NULL, 0,
+ CTL_MACHDEP, CPU_BOOTED_KERNEL, CTL_EOL);
+ sysctl_createv(SYSCTL_PERMANENT,
+ CTLTYPE_STRUCT, "console_device", NULL,
+ sysctl_consdev, 0, NULL, sizeof(dev_t),
+ CTL_MACHDEP, CPU_CONSDEV, CTL_EOL);
+ sysctl_createv(SYSCTL_PERMANENT|SYSCTL_READWRITE,
+ CTLTYPE_INT, "powersave", NULL,
+ sysctl_machdep_powersave, 0, &cpu_do_powersave, 0,
+ CTL_MACHDEP, CPU_POWERSAVE, CTL_EOL);
+}
+#endif
+
+#if 0
+void
+parse_mi_bootargs(args)
+ char *args;
+{
+ int integer;
+
+ if (get_bootconf_option(args, "single", BOOTOPT_TYPE_BOOLEAN, &integer)
+ || get_bootconf_option(args, "-s", BOOTOPT_TYPE_BOOLEAN, &integer))
+ if (integer)
+ boothowto |= RB_SINGLE;
+ if (get_bootconf_option(args, "kdb", BOOTOPT_TYPE_BOOLEAN, &integer)
+ || get_bootconf_option(args, "-k", BOOTOPT_TYPE_BOOLEAN, &integer))
+ if (integer)
+ boothowto |= RB_KDB;
+ if (get_bootconf_option(args, "ask", BOOTOPT_TYPE_BOOLEAN, &integer)
+ || get_bootconf_option(args, "-a", BOOTOPT_TYPE_BOOLEAN, &integer))
+ if (integer)
+ boothowto |= RB_ASKNAME;
+
+#ifdef PMAP_DEBUG
+ if (get_bootconf_option(args, "pmapdebug", BOOTOPT_TYPE_INT, &integer)) {
+ pmap_debug_level = integer;
+ pmap_debug(pmap_debug_level);
+ }
+#endif /* PMAP_DEBUG */
+
+/* if (get_bootconf_option(args, "nbuf", BOOTOPT_TYPE_INT, &integer))
+ bufpages = integer;*/
+
+#if NMD > 0 && defined(MEMORY_DISK_HOOKS) && !defined(MEMORY_DISK_ROOT_SIZE)
+ if (get_bootconf_option(args, "memorydisc", BOOTOPT_TYPE_INT, &integer)
+ || get_bootconf_option(args, "memorydisk", BOOTOPT_TYPE_INT, &integer)) {
+ md_root_size = integer;
+ md_root_size *= 1024;
+ if (md_root_size < 32*1024)
+ md_root_size = 32*1024;
+ if (md_root_size > 2048*1024)
+ md_root_size = 2048*1024;
+ }
+#endif /* NMD && MEMORY_DISK_HOOKS && !MEMORY_DISK_ROOT_SIZE */
+
+ if (get_bootconf_option(args, "quiet", BOOTOPT_TYPE_BOOLEAN, &integer)
+ || get_bootconf_option(args, "-q", BOOTOPT_TYPE_BOOLEAN, &integer))
+ if (integer)
+ boothowto |= AB_QUIET;
+ if (get_bootconf_option(args, "verbose", BOOTOPT_TYPE_BOOLEAN, &integer)
+ || get_bootconf_option(args, "-v", BOOTOPT_TYPE_BOOLEAN, &integer))
+ if (integer)
+ boothowto |= AB_VERBOSE;
+}
+#endif
+
+/*
+ * Allocate space for system data structures. We are given
+ * a starting virtual address and we return a final virtual
+ * address; along the way we set each data structure pointer.
+ *
+ * We call allocsys() with 0 to find out how much space we want,
+ * allocate that much and fill it with zeroes, and then call
+ * allocsys() again with the correct base virtual address.
+ */
+caddr_t
+allocsys(caddr_t v)
+{
+
+#define valloc(name, type, num) \
+ v = (caddr_t)(((name) = (type *)v) + (num))
+
+#ifdef SYSVMSG
+ valloc(msgpool, char, msginfo.msgmax);
+ valloc(msgmaps, struct msgmap, msginfo.msgseg);
+ valloc(msghdrs, struct msg, msginfo.msgtql);
+ valloc(msqids, struct msqid_ds, msginfo.msgmni);
+#endif
+ /*
+ * Determine how many buffers to allocate. We use 10% of the
+ * first 2MB of memory, and 5% of the rest, with a minimum of 16
+ * buffers. We allocate 1/2 as many swap buffer headers as file
+ * i/o buffers.
+ */
+ if (bufpages == 0) {
+ if (physmem < btoc(2 * 1024 * 1024))
+ bufpages = physmem / 10;
+ else
+ bufpages = (btoc(2 * 1024 * 1024) + physmem) *
+ bufcachepercent / 100;
+ }
+ if (nbuf == 0) {
+ nbuf = bufpages;
+ if (nbuf < 16)
+ nbuf = 16;
+ }
+
+ /* Restrict to at most 35% filled kvm */
+ /* XXX - This needs UBC... */
+ if (nbuf >
+ (VM_MAX_KERNEL_ADDRESS-VM_MIN_KERNEL_ADDRESS) / MAXBSIZE * 35 / 100)
+ nbuf = (VM_MAX_KERNEL_ADDRESS-VM_MIN_KERNEL_ADDRESS) /
+ MAXBSIZE * 35 / 100;
+
+ /* More buffer pages than fits into the buffers is senseless. */
+ if (bufpages > nbuf * MAXBSIZE / PAGE_SIZE)
+ bufpages = nbuf * MAXBSIZE / PAGE_SIZE;
+
+ valloc(buf, struct buf, nbuf);
+ return v;
+}
diff --git a/sys/arch/arm/arm/arm_machdep.c b/sys/arch/arm/arm/arm_machdep.c
new file mode 100644
index 00000000000..c526411b1ec
--- /dev/null
+++ b/sys/arch/arm/arm/arm_machdep.c
@@ -0,0 +1,272 @@
+/* $OpenBSD: arm_machdep.c,v 1.1 2004/02/01 05:09:48 drahn Exp $ */
+/* $NetBSD: arm_machdep.c,v 1.7 2003/10/25 19:44:42 scw Exp $ */
+
+/*
+ * Copyright (c) 2001 Wasabi Systems, Inc.
+ * All rights reserved.
+ *
+ * Written by Jason R. Thorpe for Wasabi Systems, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed for the NetBSD Project by
+ * Wasabi Systems, Inc.
+ * 4. The name of Wasabi Systems, Inc. may not be used to endorse
+ * or promote products derived from this software without specific prior
+ * written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * Copyright (c) 1994-1998 Mark Brinicombe.
+ * Copyright (c) 1994 Brini.
+ * All rights reserved.
+ *
+ * This code is derived from software written for Brini by Mark Brinicombe
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by Mark Brinicombe
+ * for the NetBSD Project.
+ * 4. The name of the company nor the name of the author may be used to
+ * endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/param.h>
+
+#include <sys/exec.h>
+#include <sys/proc.h>
+#include <sys/systm.h>
+#include <sys/user.h>
+#include <sys/pool.h>
+
+#include <arm/cpufunc.h>
+
+#include <machine/pcb.h>
+#include <machine/vmparam.h>
+#include <machine/bus.h>
+
+static __inline struct trapframe *
+process_frame(struct proc *p)
+{
+
+ return (p->p_addr->u_pcb.pcb_tf);
+}
+
+/*
+ * The ARM architecture places the vector page at address 0.
+ * Later ARM architecture versions, however, allow it to be
+ * relocated to a high address (0xffff0000). This is primarily
+ * to support the Fast Context Switch Extension.
+ *
+ * This variable contains the address of the vector page. It
+ * defaults to 0; it only needs to be initialized if we enable
+ * relocated vectors.
+ */
+vaddr_t vector_page;
+
+/*
+ * Clear registers on exec
+ */
+
+void
+setregs(struct proc *p, struct exec_package *pack, u_long stack,
+ register_t *retval)
+{
+ struct trapframe *tf;
+
+ tf = p->p_addr->u_pcb.pcb_tf;
+
+ memset(tf, 0, sizeof(*tf));
+/* tf->tf_r0 = (u_int)p->p_proc->p_psstr; */
+ tf->tf_usr_sp = stack;
+ tf->tf_usr_lr = pack->ep_entry;
+ tf->tf_svc_lr = 0x77777777; /* Something we can see */
+ tf->tf_pc = pack->ep_entry;
+#ifdef __PROG32
+ tf->tf_spsr = PSR_USR32_MODE;
+#endif
+
+ p->p_addr->u_pcb.pcb_flags = 0;
+ retval[1] = 0;
+}
+
+#if 0
+/*
+ * startlwp:
+ *
+ * Start a new LWP.
+ */
+void
+startlwp(void *arg)
+{
+ int err;
+ ucontext_t *uc = arg;
+ struct lwp *l = curlwp;
+
+ err = cpu_setmcontext(l, &uc->uc_mcontext, uc->uc_flags);
+#ifdef DIAGNOSTIC
+ if (err)
+ printf("Error %d from cpu_setmcontext.", err);
+#endif
+ pool_put(&lwp_uc_pool, uc);
+
+ userret(l);
+}
+
+/*
+ * XXX This is a terrible name.
+ */
+void
+upcallret(struct lwp *l)
+{
+
+ userret(l);
+}
+
+/*
+ * cpu_upcall:
+ *
+ * Send an an upcall to userland.
+ */
+void
+cpu_upcall(struct lwp *l, int type, int nevents, int ninterrupted, void *sas,
+ void *ap, void *sp, sa_upcall_t upcall)
+{
+ struct trapframe *tf;
+ struct saframe *sf, frame;
+
+ tf = process_frame(l);
+
+ /* Finally, copy out the rest of the frame. */
+#if 0 /* First 4 args in regs (see below). */
+ frame.sa_type = type;
+ frame.sa_sas = sas;
+ frame.sa_events = nevents;
+ frame.sa_interrupted = ninterrupted;
+#endif
+ frame.sa_arg = ap;
+
+ sf = (struct saframe *)sp - 1;
+ if (copyout(&frame, sf, sizeof(frame)) != 0) {
+ /* Copying onto the stack didn't work. Die. */
+ sigexit(l, SIGILL);
+ /* NOTREACHED */
+ }
+
+ tf->tf_r0 = type;
+ tf->tf_r1 = (int) sas;
+ tf->tf_r2 = nevents;
+ tf->tf_r3 = ninterrupted;
+ tf->tf_pc = (int) upcall;
+ tf->tf_usr_sp = (int) sf;
+ tf->tf_usr_lr = 0; /* no return */
+}
+#endif
+
+
+#define _CONCAT(A,B) A ## B
+#define __C(A,B) _CONCAT(A,B)
+
+#define BUS_SPACE_COPY_N(BYTES,TYPE) \
+void \
+__C(bus_space_copy_,BYTES)(bus_space_tag_t bst, bus_space_handle_t h1, \
+ bus_size_t o1, bus_space_handle_t h2, bus_size_t o2, \
+ bus_size_t c) \
+{ \
+ int i; \
+ \
+ if (h1 == h2 && o2 > o1) \
+ for (i = c-1; i >= 0; i--) \
+ __C(bus_space_write_,BYTES)(bst, h2, o2+(BYTES*i), \
+ __C(bus_space_read_,BYTES)(bst, h1, o1+(BYTES*i))); \
+ else \
+ for (i = 0; i < c; i++) \
+ __C(bus_space_write_,BYTES)(bst, h2, o2+(BYTES*i), \
+ __C(bus_space_read_,BYTES)(bst, h1, o1+(BYTES*i))); \
+}
+BUS_SPACE_COPY_N(1,u_int8_t)
+BUS_SPACE_COPY_N(2,u_int16_t)
+BUS_SPACE_COPY_N(4,u_int32_t)
+
+
+
+#if 0
+#define BUS_SPACE_READ_RAW_MULTI_N(BYTES,SHIFT,TYPE) \
+void \
+__C(bus_space_read_raw_multi_,BYTES)(bus_space_tag_t bst, \
+ bus_space_handle_t h, bus_addr_t o, u_int8_t *dst, bus_size_t size) \
+{ \
+ TYPE *src; \
+ TYPE *rdst = (TYPE *)dst; \
+ int i; \
+ int count = size >> SHIFT; \
+ \
+ src = (TYPE *)(h+o); \
+ for (i = 0; i < count; i++) { \
+ rdst[i] = *src; \
+ } \
+}
+BUS_SPACE_READ_RAW_MULTI_N(2,1,u_int16_t)
+BUS_SPACE_READ_RAW_MULTI_N(4,2,u_int32_t)
+
+#define BUS_SPACE_WRITE_RAW_MULTI_N(BYTES,SHIFT,TYPE) \
+void \
+__C(bus_space_write_raw_multi_,BYTES)( bus_space_tag_t bst, \
+ bus_space_handle_t h, bus_addr_t o, const u_int8_t *src, \
+ bus_size_t size) \
+{ \
+ int i; \
+ TYPE *dst; \
+ TYPE *rsrc = (TYPE *)src; \
+ int count = size >> SHIFT; \
+ \
+ dst = (TYPE *)(h+o); \
+ for (i = 0; i < count; i++) { \
+ *dst = rsrc[i]; \
+ } \
+}
+
+BUS_SPACE_WRITE_RAW_MULTI_N(2,1,u_int16_t)
+BUS_SPACE_WRITE_RAW_MULTI_N(4,2,u_int32_t)
+#endif
diff --git a/sys/arch/arm/arm/ast.c b/sys/arch/arm/arm/ast.c
new file mode 100644
index 00000000000..c10b0d006e9
--- /dev/null
+++ b/sys/arch/arm/arm/ast.c
@@ -0,0 +1,132 @@
+/* $OpenBSD: ast.c,v 1.1 2004/02/01 05:09:48 drahn Exp $ */
+/* $NetBSD: ast.c,v 1.6 2003/10/31 16:44:34 cl Exp $ */
+
+/*
+ * Copyright (c) 1994,1995 Mark Brinicombe
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the RiscBSD team.
+ * 4. The name of the company nor the name of the author may be used to
+ * endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTERS BE LIABLE FOR ANY DIRECT,
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * RiscBSD kernel project
+ *
+ * ast.c
+ *
+ * Code to handle ast's and returns to user mode
+ *
+ * Created : 11/10/94
+ */
+
+#include <sys/param.h>
+#include <sys/proc.h>
+#include <sys/user.h>
+#include <sys/acct.h>
+#include <sys/systm.h>
+#include <sys/kernel.h>
+#include <sys/signal.h>
+#include <sys/signalvar.h>
+#include <sys/vmmeter.h>
+
+#include <machine/cpu.h>
+#include <machine/frame.h>
+#include <machine/cpu.h>
+
+#include <arm/cpufunc.h>
+
+#include <uvm/uvm_extern.h>
+
+#ifdef acorn26
+#include <machine/machdep.h>
+#endif
+
+/*
+ * Prototypes
+ */
+void ast __P((struct trapframe *));
+
+int want_resched = 0;
+extern int astpending;
+
+void
+userret(struct proc *p)
+{
+ int sig;
+
+ /* Take pending signals. */
+ while ((sig = (CURSIG(p))) != 0)
+ postsig(sig);
+
+ #if 0
+ /* XXX */
+ curcpu()->ci_schedstate.spc_curpriority = p->p_priority = p->p_usrpri;
+ #endif
+ pmap_update(p->p_vmspace->vm_map.pmap); /* XXX DSR help stability */
+}
+
+
+/*
+ * Handle asynchronous system traps.
+ * This is called from the irq handler to deliver signals
+ * and switch processes if required.
+ */
+
+void
+ast(struct trapframe *tf)
+{
+ struct proc *p = curproc;;
+
+#ifdef acorn26
+ /* Enable interrupts if they were enabled before the trap. */
+ if ((tf->tf_r15 & R15_IRQ_DISABLE) == 0)
+ int_on();
+#else
+ /* Interrupts were restored by exception_exit. */
+#endif
+
+ uvmexp.traps++;
+ uvmexp.softs++;
+
+#ifdef DEBUG
+ if (p == NULL)
+ panic("ast: no curproc!");
+ if (&p->p_addr->u_pcb == 0)
+ panic("ast: no pcb!");
+#endif
+
+ if (p->p_flag & P_OWEUPC) {
+ p->p_flag &= ~P_OWEUPC;
+ ADDUPROF(p);
+ }
+
+ /* Allow a forced task switch. */
+ if (want_resched)
+ preempt(0);
+
+ userret(p);
+}
+
+/* End of ast.c */
diff --git a/sys/arch/arm/arm/atomic.S b/sys/arch/arm/arm/atomic.S
new file mode 100644
index 00000000000..d5f64a656c0
--- /dev/null
+++ b/sys/arch/arm/arm/atomic.S
@@ -0,0 +1,73 @@
+/* $OpenBSD: atomic.S,v 1.1 2004/02/01 05:09:48 drahn Exp $ */
+/* $NetBSD: atomic.S,v 1.1 2002/10/19 12:46:57 bsh Exp $ */
+
+
+/*
+ * Copyright (C) 1994-1997 Mark Brinicombe
+ * Copyright (C) 1994 Brini
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by Brini.
+ * 4. The name of Brini may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY BRINI ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL BRINI BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "assym.h"
+#include <machine/asm.h>
+#include <machine/atomic.h>
+#include <machine/cpu.h>
+
+#ifdef ATOMIC_SET_BIT_NONINLINE_REQUIRED
+/*
+ * Atomic bit set and clear functions
+ */
+
+#undef atomic_set_bit
+ENTRY(atomic_set_bit)
+ mrs r2, cpsr
+ orr r3, r2, #(I32_bit)
+ msr cpsr_all, r3
+
+ ldr r3, [r0]
+ orr r3, r3, r1
+ str r3, [r0]
+
+ msr cpsr_all, r2
+ mov pc, lr
+
+
+#undef atomic_clear_bit
+ENTRY(atomic_clear_bit)
+ mrs r2, cpsr
+ orr r3, r2, #(I32_bit)
+ msr cpsr_all, r3
+
+ ldr r3, [r0]
+ bic r3, r3, r1
+ str r3, [r0]
+
+ msr cpsr_all, r2
+ mov pc, lr
+
+#endif /* ATOMIC_SET_BIT_NONINLINE_REQUIRED */
diff --git a/sys/arch/arm/arm/bcopy_page.S b/sys/arch/arm/arm/bcopy_page.S
new file mode 100644
index 00000000000..3c8f1ae2ce6
--- /dev/null
+++ b/sys/arch/arm/arm/bcopy_page.S
@@ -0,0 +1,276 @@
+/* $OpenBSD: bcopy_page.S,v 1.1 2004/02/01 05:09:48 drahn Exp $ */
+/* $NetBSD: bcopy_page.S,v 1.7 2003/10/13 21:03:13 scw Exp $ */
+
+
+/*
+ * Copyright (c) 1995 Scott Stevens
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by Scott Stevens.
+ * 4. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * RiscBSD kernel project
+ *
+ * bcopy_page.S
+ *
+ * page optimised bcopy and bzero routines
+ *
+ * Created : 08/04/95
+ */
+
+#include <machine/asm.h>
+
+#include "assym.h"
+
+#ifndef __XSCALE__
+
+/* #define BIG_LOOPS */
+
+/*
+ * bcopy_page(src, dest)
+ *
+ * Optimised copy page routine.
+ *
+ * On entry:
+ * r0 - src address
+ * r1 - dest address
+ *
+ * Requires:
+ * number of bytes per page (PAGE_SIZE) is a multiple of 512 (BIG_LOOPS), 128
+ * otherwise.
+ */
+
+#define CHUNK_SIZE 32
+
+#define PREFETCH_FIRST_CHUNK /* nothing */
+#define PREFETCH_NEXT_CHUNK /* nothing */
+
+#ifndef COPY_CHUNK
+#define COPY_CHUNK \
+ PREFETCH_NEXT_CHUNK ; \
+ ldmia r0!, {r3-r8,ip,lr} ; \
+ stmia r1!, {r3-r8,ip,lr}
+#endif /* ! COPY_CHUNK */
+
+#ifndef SAVE_REGS
+#define SAVE_REGS stmfd sp!, {r4-r8, lr}
+#define RESTORE_REGS ldmfd sp!, {r4-r8, pc}
+#endif
+
+ENTRY(bcopy_page)
+ PREFETCH_FIRST_CHUNK
+ SAVE_REGS
+#ifdef BIG_LOOPS
+ mov r2, #(PAGE_SIZE >> 9)
+#else
+ mov r2, #(PAGE_SIZE >> 7)
+#endif
+
+1:
+ COPY_CHUNK
+ COPY_CHUNK
+ COPY_CHUNK
+ COPY_CHUNK
+
+#ifdef BIG_LOOPS
+ /* There is little point making the loop any larger; unless we are
+ running with the cache off, the load/store overheads will
+ completely dominate this loop. */
+ COPY_CHUNK
+ COPY_CHUNK
+ COPY_CHUNK
+ COPY_CHUNK
+
+ COPY_CHUNK
+ COPY_CHUNK
+ COPY_CHUNK
+ COPY_CHUNK
+
+ COPY_CHUNK
+ COPY_CHUNK
+ COPY_CHUNK
+ COPY_CHUNK
+#endif
+ subs r2, r2, #1
+ bne 1b
+
+ RESTORE_REGS /* ...and return. */
+
+/*
+ * bzero_page(dest)
+ *
+ * Optimised zero page routine.
+ *
+ * On entry:
+ * r0 - dest address
+ *
+ * Requires:
+ * number of bytes per page (PAGE_SIZE) is a multiple of 512 (BIG_LOOPS), 128
+ * otherwise
+ */
+
+ENTRY(bzero_page)
+ stmfd sp!, {r4-r8, lr}
+#ifdef BIG_LOOPS
+ mov r2, #(PAGE_SIZE >> 9)
+#else
+ mov r2, #(PAGE_SIZE >> 7)
+#endif
+ mov r3, #0
+ mov r4, #0
+ mov r5, #0
+ mov r6, #0
+ mov r7, #0
+ mov r8, #0
+ mov ip, #0
+ mov lr, #0
+
+1:
+ stmia r0!, {r3-r8,ip,lr}
+ stmia r0!, {r3-r8,ip,lr}
+ stmia r0!, {r3-r8,ip,lr}
+ stmia r0!, {r3-r8,ip,lr}
+
+#ifdef BIG_LOOPS
+ /* There is little point making the loop any larger; unless we are
+ running with the cache off, the load/store overheads will
+ completely dominate this loop. */
+ stmia r0!, {r3-r8,ip,lr}
+ stmia r0!, {r3-r8,ip,lr}
+ stmia r0!, {r3-r8,ip,lr}
+ stmia r0!, {r3-r8,ip,lr}
+
+ stmia r0!, {r3-r8,ip,lr}
+ stmia r0!, {r3-r8,ip,lr}
+ stmia r0!, {r3-r8,ip,lr}
+ stmia r0!, {r3-r8,ip,lr}
+
+ stmia r0!, {r3-r8,ip,lr}
+ stmia r0!, {r3-r8,ip,lr}
+ stmia r0!, {r3-r8,ip,lr}
+ stmia r0!, {r3-r8,ip,lr}
+
+#endif
+
+ subs r2, r2, #1
+ bne 1b
+
+ ldmfd sp!, {r4-r8, pc}
+
+#else /* __XSCALE__ */
+
+/*
+ * XSCALE version of bcopy_page
+ */
+ENTRY(bcopy_page)
+ pld [r0]
+ stmfd sp!, {r4, r5}
+ mov ip, #32
+ ldr r2, [r0], #0x04 /* 0x00 */
+ ldr r3, [r0], #0x04 /* 0x04 */
+1: pld [r0, #0x18] /* Prefetch 0x20 */
+ ldr r4, [r0], #0x04 /* 0x08 */
+ ldr r5, [r0], #0x04 /* 0x0c */
+ strd r2, [r1], #0x08
+ ldr r2, [r0], #0x04 /* 0x10 */
+ ldr r3, [r0], #0x04 /* 0x14 */
+ strd r4, [r1], #0x08
+ ldr r4, [r0], #0x04 /* 0x18 */
+ ldr r5, [r0], #0x04 /* 0x1c */
+ strd r2, [r1], #0x08
+ ldr r2, [r0], #0x04 /* 0x20 */
+ ldr r3, [r0], #0x04 /* 0x24 */
+ pld [r0, #0x18] /* Prefetch 0x40 */
+ strd r4, [r1], #0x08
+ ldr r4, [r0], #0x04 /* 0x28 */
+ ldr r5, [r0], #0x04 /* 0x2c */
+ strd r2, [r1], #0x08
+ ldr r2, [r0], #0x04 /* 0x30 */
+ ldr r3, [r0], #0x04 /* 0x34 */
+ strd r4, [r1], #0x08
+ ldr r4, [r0], #0x04 /* 0x38 */
+ ldr r5, [r0], #0x04 /* 0x3c */
+ strd r2, [r1], #0x08
+ ldr r2, [r0], #0x04 /* 0x40 */
+ ldr r3, [r0], #0x04 /* 0x44 */
+ pld [r0, #0x18] /* Prefetch 0x60 */
+ strd r4, [r1], #0x08
+ ldr r4, [r0], #0x04 /* 0x48 */
+ ldr r5, [r0], #0x04 /* 0x4c */
+ strd r2, [r1], #0x08
+ ldr r2, [r0], #0x04 /* 0x50 */
+ ldr r3, [r0], #0x04 /* 0x54 */
+ strd r4, [r1], #0x08
+ ldr r4, [r0], #0x04 /* 0x58 */
+ ldr r5, [r0], #0x04 /* 0x5c */
+ strd r2, [r1], #0x08
+ ldr r2, [r0], #0x04 /* 0x60 */
+ ldr r3, [r0], #0x04 /* 0x64 */
+ pld [r0, #0x18] /* Prefetch 0x80 */
+ strd r4, [r1], #0x08
+ ldr r4, [r0], #0x04 /* 0x68 */
+ ldr r5, [r0], #0x04 /* 0x6c */
+ strd r2, [r1], #0x08
+ ldr r2, [r0], #0x04 /* 0x70 */
+ ldr r3, [r0], #0x04 /* 0x74 */
+ strd r4, [r1], #0x08
+ ldr r4, [r0], #0x04 /* 0x78 */
+ ldr r5, [r0], #0x04 /* 0x7c */
+ strd r2, [r1], #0x08
+ subs ip, ip, #0x01
+ ldrgt r2, [r0], #0x04 /* 0x80 */
+ ldrgt r3, [r0], #0x04 /* 0x84 */
+ strd r4, [r1], #0x08
+ bgt 1b
+ ldmfd sp!, {r4, r5}
+ mov pc, lr
+
+/*
+ * XSCALE version of bzero_page
+ */
+ENTRY(bzero_page)
+ mov r1, #PAGE_SIZE
+ mov r2, #0
+ mov r3, #0
+1: strd r2, [r0], #8 /* 32 */
+ strd r2, [r0], #8
+ strd r2, [r0], #8
+ strd r2, [r0], #8
+ strd r2, [r0], #8 /* 64 */
+ strd r2, [r0], #8
+ strd r2, [r0], #8
+ strd r2, [r0], #8
+ strd r2, [r0], #8 /* 96 */
+ strd r2, [r0], #8
+ strd r2, [r0], #8
+ strd r2, [r0], #8
+ strd r2, [r0], #8 /* 128 */
+ strd r2, [r0], #8
+ strd r2, [r0], #8
+ strd r2, [r0], #8
+ subs r1, r1, #128
+ bne 1b
+ mov pc, lr
+#endif /* __XSCALE__ */
diff --git a/sys/arch/arm/arm/bcopyinout.S b/sys/arch/arm/arm/bcopyinout.S
new file mode 100644
index 00000000000..684a4d3ac68
--- /dev/null
+++ b/sys/arch/arm/arm/bcopyinout.S
@@ -0,0 +1,815 @@
+/* $OpenBSD: bcopyinout.S,v 1.1 2004/02/01 05:09:48 drahn Exp $ */
+/* $NetBSD: bcopyinout.S,v 1.13 2003/10/31 16:54:05 scw Exp $ */
+
+/*
+ * Copyright (c) 2002 Wasabi Systems, Inc.
+ * All rights reserved.
+ *
+ * Written by Allen Briggs for Wasabi Systems, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed for the NetBSD Project by
+ * Wasabi Systems, Inc.
+ * 4. The name of Wasabi Systems, Inc. may not be used to endorse
+ * or promote products derived from this software without specific prior
+ * written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "assym.h"
+
+#include <machine/asm.h>
+
+#ifdef __XSCALE__
+#include "bcopyinout_xscale.S"
+#else
+
+ .text
+ .align 0
+
+#ifdef MULTIPROCESSOR
+.Lcpu_info:
+ .word _C_LABEL(cpu_info)
+#else
+.Lcurpcb:
+ .word _C_LABEL(curpcb)
+#endif
+
+#ifdef __PROG32
+#define SAVE_REGS stmfd sp!, {r4-r11}
+#define RESTORE_REGS ldmfd sp!, {r4-r11}
+#else
+/* Need to save R14_svc because it'll get trampled if we take a page fault. */
+#define SAVE_REGS stmfd sp!, {r4-r11, r14}
+#define RESTORE_REGS ldmfd sp!, {r4-r11, r14}
+#endif
+
+#if defined(__XSCALE__)
+#define HELLOCPP #
+#define PREFETCH(rx,o) pld [ rx , HELLOCPP (o) ]
+#else
+#define PREFETCH(rx,o)
+#endif
+
+/*
+ * r0 = user space address
+ * r1 = kernel space address
+ * r2 = length
+ *
+ * Copies bytes from user space to kernel space
+ *
+ * We save/restore r4-r11:
+ * r4-r11 are scratch
+ */
+ENTRY(copyin)
+ /* Quick exit if length is zero */
+ teq r2, #0
+ moveq r0, #0
+ moveq pc, lr
+
+ SAVE_REGS
+#ifdef MULTIPROCESSOR
+ /* XXX Probably not appropriate for non-Hydra SMPs */
+ stmfd sp!, {r0-r2, r14}
+ bl _C_LABEL(cpu_number)
+ ldr r4, .Lcpu_info
+ ldr r4, [r4, r0, lsl #2]
+ ldr r4, [r4, #CI_CURPCB]
+ ldmfd sp!, {r0-r2, r14}
+#else
+ ldr r4, .Lcurpcb
+ ldr r4, [r4]
+#endif
+
+ ldr r5, [r4, #PCB_ONFAULT]
+ adr r3, .Lcopyfault
+ str r3, [r4, #PCB_ONFAULT]
+
+ PREFETCH(r0, 0)
+ PREFETCH(r1, 0)
+
+ /*
+ * If not too many bytes, take the slow path.
+ */
+ cmp r2, #0x08
+ blt .Licleanup
+
+ /*
+ * Align destination to word boundary.
+ */
+ and r6, r1, #0x3
+ ldr pc, [pc, r6, lsl #2]
+ b .Lialend
+ .word .Lialend
+ .word .Lial3
+ .word .Lial2
+ .word .Lial1
+.Lial3: ldrbt r6, [r0], #1
+ sub r2, r2, #1
+ strb r6, [r1], #1
+.Lial2: ldrbt r7, [r0], #1
+ sub r2, r2, #1
+ strb r7, [r1], #1
+.Lial1: ldrbt r6, [r0], #1
+ sub r2, r2, #1
+ strb r6, [r1], #1
+.Lialend:
+
+ /*
+ * If few bytes left, finish slow.
+ */
+ cmp r2, #0x08
+ blt .Licleanup
+
+ /*
+ * If source is not aligned, finish slow.
+ */
+ ands r3, r0, #0x03
+ bne .Licleanup
+
+ cmp r2, #0x60 /* Must be > 0x5f for unrolled cacheline */
+ blt .Licleanup8
+
+ /*
+ * Align destination to cacheline boundary.
+ * If source and destination are nicely aligned, this can be a big
+ * win. If not, it's still cheaper to copy in groups of 32 even if
+ * we don't get the nice cacheline alignment.
+ */
+ and r6, r1, #0x1f
+ ldr pc, [pc, r6]
+ b .Licaligned
+ .word .Licaligned
+ .word .Lical28
+ .word .Lical24
+ .word .Lical20
+ .word .Lical16
+ .word .Lical12
+ .word .Lical8
+ .word .Lical4
+.Lical28:ldrt r6, [r0], #4
+ sub r2, r2, #4
+ str r6, [r1], #4
+.Lical24:ldrt r7, [r0], #4
+ sub r2, r2, #4
+ str r7, [r1], #4
+.Lical20:ldrt r6, [r0], #4
+ sub r2, r2, #4
+ str r6, [r1], #4
+.Lical16:ldrt r7, [r0], #4
+ sub r2, r2, #4
+ str r7, [r1], #4
+.Lical12:ldrt r6, [r0], #4
+ sub r2, r2, #4
+ str r6, [r1], #4
+.Lical8:ldrt r7, [r0], #4
+ sub r2, r2, #4
+ str r7, [r1], #4
+.Lical4:ldrt r6, [r0], #4
+ sub r2, r2, #4
+ str r6, [r1], #4
+
+ /*
+ * We start with > 0x40 bytes to copy (>= 0x60 got us into this
+ * part of the code, and we may have knocked that down by as much
+ * as 0x1c getting aligned).
+ *
+ * This loop basically works out to:
+ * do {
+ * prefetch-next-cacheline(s)
+ * bytes -= 0x20;
+ * copy cacheline
+ * } while (bytes >= 0x40);
+ * bytes -= 0x20;
+ * copy cacheline
+ */
+.Licaligned:
+ PREFETCH(r0, 32)
+ PREFETCH(r1, 32)
+
+ sub r2, r2, #0x20
+
+ /* Copy a cacheline */
+ ldrt r10, [r0], #4
+ ldrt r11, [r0], #4
+ ldrt r6, [r0], #4
+ ldrt r7, [r0], #4
+ ldrt r8, [r0], #4
+ ldrt r9, [r0], #4
+ stmia r1!, {r10-r11}
+ ldrt r10, [r0], #4
+ ldrt r11, [r0], #4
+ stmia r1!, {r6-r11}
+
+ cmp r2, #0x40
+ bge .Licaligned
+
+ sub r2, r2, #0x20
+
+ /* Copy a cacheline */
+ ldrt r10, [r0], #4
+ ldrt r11, [r0], #4
+ ldrt r6, [r0], #4
+ ldrt r7, [r0], #4
+ ldrt r8, [r0], #4
+ ldrt r9, [r0], #4
+ stmia r1!, {r10-r11}
+ ldrt r10, [r0], #4
+ ldrt r11, [r0], #4
+ stmia r1!, {r6-r11}
+
+ cmp r2, #0x08
+ blt .Liprecleanup
+
+.Licleanup8:
+ ldrt r8, [r0], #4
+ ldrt r9, [r0], #4
+ sub r2, r2, #8
+ stmia r1!, {r8, r9}
+ cmp r2, #8
+ bge .Licleanup8
+
+.Liprecleanup:
+ /*
+ * If we're done, bail.
+ */
+ cmp r2, #0
+ beq .Lout
+
+.Licleanup:
+ and r6, r2, #0x3
+ ldr pc, [pc, r6, lsl #2]
+ b .Licend
+ .word .Lic4
+ .word .Lic1
+ .word .Lic2
+ .word .Lic3
+.Lic4: ldrbt r6, [r0], #1
+ sub r2, r2, #1
+ strb r6, [r1], #1
+.Lic3: ldrbt r7, [r0], #1
+ sub r2, r2, #1
+ strb r7, [r1], #1
+.Lic2: ldrbt r6, [r0], #1
+ sub r2, r2, #1
+ strb r6, [r1], #1
+.Lic1: ldrbt r7, [r0], #1
+ subs r2, r2, #1
+ strb r7, [r1], #1
+.Licend:
+ bne .Licleanup
+
+.Liout:
+ mov r0, #0
+
+ str r5, [r4, #PCB_ONFAULT]
+ RESTORE_REGS
+
+ mov pc, lr
+
+.Lcopyfault:
+ str r5, [r4, #PCB_ONFAULT]
+ RESTORE_REGS
+
+ mov pc, lr
+
+/*
+ * r0 = kernel space address
+ * r1 = user space address
+ * r2 = length
+ *
+ * Copies bytes from kernel space to user space
+ *
+ * We save/restore r4-r11:
+ * r4-r11 are scratch
+ */
+
+ENTRY(copyout)
+ /* Quick exit if length is zero */
+ teq r2, #0
+ moveq r0, #0
+ moveq pc, lr
+
+ SAVE_REGS
+#ifdef MULTIPROCESSOR
+ /* XXX Probably not appropriate for non-Hydra SMPs */
+ stmfd sp!, {r0-r2, r14}
+ bl _C_LABEL(cpu_number)
+ ldr r4, .Lcpu_info
+ ldr r4, [r4, r0, lsl #2]
+ ldr r4, [r4, #CI_CURPCB]
+ ldmfd sp!, {r0-r2, r14}
+#else
+ ldr r4, .Lcurpcb
+ ldr r4, [r4]
+#endif
+
+ ldr r5, [r4, #PCB_ONFAULT]
+ adr r3, .Lcopyfault
+ str r3, [r4, #PCB_ONFAULT]
+
+ PREFETCH(r0, 0)
+ PREFETCH(r1, 0)
+
+ /*
+ * If not too many bytes, take the slow path.
+ */
+ cmp r2, #0x08
+ blt .Lcleanup
+
+ /*
+ * Align destination to word boundary.
+ */
+ and r6, r1, #0x3
+ ldr pc, [pc, r6, lsl #2]
+ b .Lalend
+ .word .Lalend
+ .word .Lal3
+ .word .Lal2
+ .word .Lal1
+.Lal3: ldrb r6, [r0], #1
+ sub r2, r2, #1
+ strbt r6, [r1], #1
+.Lal2: ldrb r7, [r0], #1
+ sub r2, r2, #1
+ strbt r7, [r1], #1
+.Lal1: ldrb r6, [r0], #1
+ sub r2, r2, #1
+ strbt r6, [r1], #1
+.Lalend:
+
+ /*
+ * If few bytes left, finish slow.
+ */
+ cmp r2, #0x08
+ blt .Lcleanup
+
+ /*
+ * If source is not aligned, finish slow.
+ */
+ ands r3, r0, #0x03
+ bne .Lcleanup
+
+ cmp r2, #0x60 /* Must be > 0x5f for unrolled cacheline */
+ blt .Lcleanup8
+
+ /*
+ * Align source & destination to cacheline boundary.
+ */
+ and r6, r1, #0x1f
+ ldr pc, [pc, r6]
+ b .Lcaligned
+ .word .Lcaligned
+ .word .Lcal28
+ .word .Lcal24
+ .word .Lcal20
+ .word .Lcal16
+ .word .Lcal12
+ .word .Lcal8
+ .word .Lcal4
+.Lcal28:ldr r6, [r0], #4
+ sub r2, r2, #4
+ strt r6, [r1], #4
+.Lcal24:ldr r7, [r0], #4
+ sub r2, r2, #4
+ strt r7, [r1], #4
+.Lcal20:ldr r6, [r0], #4
+ sub r2, r2, #4
+ strt r6, [r1], #4
+.Lcal16:ldr r7, [r0], #4
+ sub r2, r2, #4
+ strt r7, [r1], #4
+.Lcal12:ldr r6, [r0], #4
+ sub r2, r2, #4
+ strt r6, [r1], #4
+.Lcal8: ldr r7, [r0], #4
+ sub r2, r2, #4
+ strt r7, [r1], #4
+.Lcal4: ldr r6, [r0], #4
+ sub r2, r2, #4
+ strt r6, [r1], #4
+
+ /*
+ * We start with > 0x40 bytes to copy (>= 0x60 got us into this
+ * part of the code, and we may have knocked that down by as much
+ * as 0x1c getting aligned).
+ *
+ * This loop basically works out to:
+ * do {
+ * prefetch-next-cacheline(s)
+ * bytes -= 0x20;
+ * copy cacheline
+ * } while (bytes >= 0x40);
+ * bytes -= 0x20;
+ * copy cacheline
+ */
+.Lcaligned:
+ PREFETCH(r0, 32)
+ PREFETCH(r1, 32)
+
+ sub r2, r2, #0x20
+
+ /* Copy a cacheline */
+ ldmia r0!, {r6-r11}
+ strt r6, [r1], #4
+ strt r7, [r1], #4
+ ldmia r0!, {r6-r7}
+ strt r8, [r1], #4
+ strt r9, [r1], #4
+ strt r10, [r1], #4
+ strt r11, [r1], #4
+ strt r6, [r1], #4
+ strt r7, [r1], #4
+
+ cmp r2, #0x40
+ bge .Lcaligned
+
+ sub r2, r2, #0x20
+
+ /* Copy a cacheline */
+ ldmia r0!, {r6-r11}
+ strt r6, [r1], #4
+ strt r7, [r1], #4
+ ldmia r0!, {r6-r7}
+ strt r8, [r1], #4
+ strt r9, [r1], #4
+ strt r10, [r1], #4
+ strt r11, [r1], #4
+ strt r6, [r1], #4
+ strt r7, [r1], #4
+
+ cmp r2, #0x08
+ blt .Lprecleanup
+
+.Lcleanup8:
+ ldmia r0!, {r8-r9}
+ sub r2, r2, #8
+ strt r8, [r1], #4
+ strt r9, [r1], #4
+ cmp r2, #8
+ bge .Lcleanup8
+
+.Lprecleanup:
+ /*
+ * If we're done, bail.
+ */
+ cmp r2, #0
+ beq .Lout
+
+.Lcleanup:
+ and r6, r2, #0x3
+ ldr pc, [pc, r6, lsl #2]
+ b .Lcend
+ .word .Lc4
+ .word .Lc1
+ .word .Lc2
+ .word .Lc3
+.Lc4: ldrb r6, [r0], #1
+ sub r2, r2, #1
+ strbt r6, [r1], #1
+.Lc3: ldrb r7, [r0], #1
+ sub r2, r2, #1
+ strbt r7, [r1], #1
+.Lc2: ldrb r6, [r0], #1
+ sub r2, r2, #1
+ strbt r6, [r1], #1
+.Lc1: ldrb r7, [r0], #1
+ subs r2, r2, #1
+ strbt r7, [r1], #1
+.Lcend:
+ bne .Lcleanup
+
+.Lout:
+ mov r0, #0
+
+ str r5, [r4, #PCB_ONFAULT]
+ RESTORE_REGS
+
+ mov pc, lr
+
+/*
+ * r0 = kernel space source address
+ * r1 = kernel space destination address
+ * r2 = length
+ *
+ * Copies bytes from kernel space to kernel space, aborting on page fault
+ *
+ * Copy of copyout, but without the ldrt/strt instructions.
+ */
+
+ENTRY(kcopy)
+ /* Quick exit if length is zero */
+ teq r2, #0
+ moveq r0, #0
+ moveq pc, lr
+
+ SAVE_REGS
+#ifdef MULTIPROCESSOR
+ /* XXX Probably not appropriate for non-Hydra SMPs */
+ stmfd sp!, {r0-r2, r14}
+ bl _C_LABEL(cpu_number)
+ ldr r4, .Lcpu_info
+ ldr r4, [r4, r0, lsl #2]
+ ldr r4, [r4, #CI_CURPCB]
+ ldmfd sp!, {r0-r2, r14}
+#else
+ ldr r4, .Lcurpcb
+ ldr r4, [r4]
+#endif
+
+ ldr r5, [r4, #PCB_ONFAULT]
+ adr r3, .Lcopyfault
+ str r3, [r4, #PCB_ONFAULT]
+
+ PREFETCH(r0, 0)
+ PREFETCH(r1, 0)
+
+ /*
+ * If not too many bytes, take the slow path.
+ */
+ cmp r2, #0x08
+ blt .Lkcleanup
+
+ /*
+ * Align destination to word boundary.
+ */
+ and r6, r1, #0x3
+ ldr pc, [pc, r6, lsl #2]
+ b .Lkalend
+ .word .Lkalend
+ .word .Lkal3
+ .word .Lkal2
+ .word .Lkal1
+.Lkal3: ldrb r6, [r0], #1
+ sub r2, r2, #1
+ strb r6, [r1], #1
+.Lkal2: ldrb r7, [r0], #1
+ sub r2, r2, #1
+ strb r7, [r1], #1
+.Lkal1: ldrb r6, [r0], #1
+ sub r2, r2, #1
+ strb r6, [r1], #1
+.Lkalend:
+
+ /*
+ * If few bytes left, finish slow.
+ */
+ cmp r2, #0x08
+ blt .Lkcleanup
+
+ /*
+ * If source is not aligned, finish slow.
+ */
+ ands r3, r0, #0x03
+ bne .Lkcleanup
+
+ cmp r2, #0x60 /* Must be > 0x5f for unrolled cacheline */
+ blt .Lkcleanup8
+
+ /*
+ * Align source & destination to cacheline boundary.
+ */
+ and r6, r1, #0x1f
+ ldr pc, [pc, r6]
+ b .Lkcaligned
+ .word .Lkcaligned
+ .word .Lkcal28
+ .word .Lkcal24
+ .word .Lkcal20
+ .word .Lkcal16
+ .word .Lkcal12
+ .word .Lkcal8
+ .word .Lkcal4
+.Lkcal28:ldr r6, [r0], #4
+ sub r2, r2, #4
+ str r6, [r1], #4
+.Lkcal24:ldr r7, [r0], #4
+ sub r2, r2, #4
+ str r7, [r1], #4
+.Lkcal20:ldr r6, [r0], #4
+ sub r2, r2, #4
+ str r6, [r1], #4
+.Lkcal16:ldr r7, [r0], #4
+ sub r2, r2, #4
+ str r7, [r1], #4
+.Lkcal12:ldr r6, [r0], #4
+ sub r2, r2, #4
+ str r6, [r1], #4
+.Lkcal8:ldr r7, [r0], #4
+ sub r2, r2, #4
+ str r7, [r1], #4
+.Lkcal4:ldr r6, [r0], #4
+ sub r2, r2, #4
+ str r6, [r1], #4
+
+ /*
+ * We start with > 0x40 bytes to copy (>= 0x60 got us into this
+ * part of the code, and we may have knocked that down by as much
+ * as 0x1c getting aligned).
+ *
+ * This loop basically works out to:
+ * do {
+ * prefetch-next-cacheline(s)
+ * bytes -= 0x20;
+ * copy cacheline
+ * } while (bytes >= 0x40);
+ * bytes -= 0x20;
+ * copy cacheline
+ */
+.Lkcaligned:
+ PREFETCH(r0, 32)
+ PREFETCH(r1, 32)
+
+ sub r2, r2, #0x20
+
+ /* Copy a cacheline */
+ ldmia r0!, {r6-r11}
+ stmia r1!, {r6, r7}
+ ldmia r0!, {r6, r7}
+ stmia r1!, {r8-r11}
+ stmia r1!, {r6, r7}
+
+ cmp r2, #0x40
+ bge .Lkcaligned
+
+ sub r2, r2, #0x20
+
+ /* Copy a cacheline */
+ ldmia r0!, {r6-r11}
+ stmia r1!, {r6-r7}
+ ldmia r0!, {r6-r7}
+ stmia r1!, {r8-r11}
+ stmia r1!, {r6-r7}
+
+ cmp r2, #0x08
+ blt .Lkprecleanup
+
+.Lkcleanup8:
+ ldmia r0!, {r8-r9}
+ sub r2, r2, #8
+ stmia r1!, {r8-r9}
+ cmp r2, #8
+ bge .Lkcleanup8
+
+.Lkprecleanup:
+ /*
+ * If we're done, bail.
+ */
+ cmp r2, #0
+ beq .Lkout
+
+.Lkcleanup:
+ and r6, r2, #0x3
+ ldr pc, [pc, r6, lsl #2]
+ b .Lkcend
+ .word .Lkc4
+ .word .Lkc1
+ .word .Lkc2
+ .word .Lkc3
+.Lkc4: ldrb r6, [r0], #1
+ sub r2, r2, #1
+ strb r6, [r1], #1
+.Lkc3: ldrb r7, [r0], #1
+ sub r2, r2, #1
+ strb r7, [r1], #1
+.Lkc2: ldrb r6, [r0], #1
+ sub r2, r2, #1
+ strb r6, [r1], #1
+.Lkc1: ldrb r7, [r0], #1
+ subs r2, r2, #1
+ strb r7, [r1], #1
+.Lkcend:
+ bne .Lkcleanup
+
+.Lkout:
+ mov r0, #0
+
+ str r5, [r4, #PCB_ONFAULT]
+ RESTORE_REGS
+
+ mov pc, lr
+#endif /* !__XSCALE__ */
+
+#ifdef __PROG32
+/*
+ * int badaddr_read_1(const uint8_t *src, uint8_t *dest)
+ *
+ * Copies a single 8-bit value from src to dest, returning 0 on success,
+ * else EFAULT if a page fault occurred.
+ */
+ENTRY(badaddr_read_1)
+#ifdef MULTIPROCESSOR
+ /* XXX Probably not appropriate for non-Hydra SMPs */
+ stmfd sp!, {r0-r1, r14}
+ bl _C_LABEL(cpu_number)
+ ldr r2, .Lcpu_info
+ ldr r2, [r2, r0, lsl #2]
+ ldr r2, [r2, #CI_CURPCB]
+ ldmfd sp!, {r0-r1, r14}
+#else
+ ldr r2, .Lcurpcb
+ ldr r2, [r2]
+#endif
+ ldr ip, [r2, #PCB_ONFAULT]
+ adr r3, 1f
+ str r3, [r2, #PCB_ONFAULT]
+ nop
+ nop
+ nop
+ ldrb r3, [r0]
+ nop
+ nop
+ nop
+ strb r3, [r1]
+ mov r0, #0 /* No fault */
+1: str ip, [r2, #PCB_ONFAULT]
+ mov pc, lr
+
+/*
+ * int badaddr_read_2(const uint16_t *src, uint16_t *dest)
+ *
+ * Copies a single 16-bit value from src to dest, returning 0 on success,
+ * else EFAULT if a page fault occurred.
+ */
+ENTRY(badaddr_read_2)
+#ifdef MULTIPROCESSOR
+ /* XXX Probably not appropriate for non-Hydra SMPs */
+ stmfd sp!, {r0-r1, r14}
+ bl _C_LABEL(cpu_number)
+ ldr r2, .Lcpu_info
+ ldr r2, [r2, r0, lsl #2]
+ ldr r2, [r2, #CI_CURPCB]
+ ldmfd sp!, {r0-r1, r14}
+#else
+ ldr r2, .Lcurpcb
+ ldr r2, [r2]
+#endif
+ ldr ip, [r2, #PCB_ONFAULT]
+ adr r3, 1f
+ str r3, [r2, #PCB_ONFAULT]
+ nop
+ nop
+ nop
+ ldrh r3, [r0]
+ nop
+ nop
+ nop
+ strh r3, [r1]
+ mov r0, #0 /* No fault */
+1: str ip, [r2, #PCB_ONFAULT]
+ mov pc, lr
+
+/*
+ * int badaddr_read_4(const uint32_t *src, uint32_t *dest)
+ *
+ * Copies a single 32-bit value from src to dest, returning 0 on success,
+ * else EFAULT if a page fault occurred.
+ */
+ENTRY(badaddr_read_4)
+#ifdef MULTIPROCESSOR
+ /* XXX Probably not appropriate for non-Hydra SMPs */
+ stmfd sp!, {r0-r1, r14}
+ bl _C_LABEL(cpu_number)
+ ldr r2, .Lcpu_info
+ ldr r2, [r2, r0, lsl #2]
+ ldr r2, [r2, #CI_CURPCB]
+ ldmfd sp!, {r0-r1, r14}
+#else
+ ldr r2, .Lcurpcb
+ ldr r2, [r2]
+#endif
+ ldr ip, [r2, #PCB_ONFAULT]
+ adr r3, 1f
+ str r3, [r2, #PCB_ONFAULT]
+ nop
+ nop
+ nop
+ ldr r3, [r0]
+ nop
+ nop
+ nop
+ str r3, [r1]
+ mov r0, #0 /* No fault */
+1: str ip, [r2, #PCB_ONFAULT]
+ mov pc, lr
+#endif /* __PROG32 */
diff --git a/sys/arch/arm/arm/blockio.S b/sys/arch/arm/arm/blockio.S
new file mode 100644
index 00000000000..590c3e6a7d4
--- /dev/null
+++ b/sys/arch/arm/arm/blockio.S
@@ -0,0 +1,588 @@
+/* $OpenBSD: blockio.S,v 1.1 2004/02/01 05:09:48 drahn Exp $ */
+/* $NetBSD: blockio.S,v 1.5 2002/08/15 01:38:16 briggs Exp $ */
+
+/*
+ * Copyright (c) 2001 Ben Harris.
+ * Copyright (c) 1994 Mark Brinicombe.
+ * Copyright (c) 1994 Brini.
+ * All rights reserved.
+ *
+ * This code is derived from software written for Brini by Mark Brinicombe
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by Brini.
+ * 4. The name of the company nor the name of the author may be used to
+ * endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY BRINI ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL BRINI OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * RiscBSD kernel project
+ *
+ * blockio.S
+ *
+ * optimised block read/write from/to IO routines.
+ *
+ * Created : 08/10/94
+ * Modified : 22/01/99 -- R.Earnshaw
+ * Faster, and small tweaks for StrongARM
+ */
+
+#include <machine/asm.h>
+
+RCSID("$NetBSD: blockio.S,v 1.4 2001/06/02 11:15:56 bjh21 Exp $")
+
+/*
+ * Read bytes from an I/O address into a block of memory
+ *
+ * r0 = address to read from (IO)
+ * r1 = address to write to (memory)
+ * r2 = length
+ */
+
+/* This code will look very familiar if you've read _memcpy(). */
+ENTRY(read_multi_1)
+ mov ip, sp
+ stmfd sp!, {fp, ip, lr, pc}
+ sub fp, ip, #4
+ subs r2, r2, #4 /* r2 = length - 4 */
+ blt .Lrm1_l4 /* less than 4 bytes */
+ ands r12, r1, #3
+ beq .Lrm1_main /* aligned destination */
+ rsb r12, r12, #4
+ cmp r12, #2
+ ldrb r3, [r0]
+ strb r3, [r1], #1
+ ldrgeb r3, [r0]
+ strgeb r3, [r1], #1
+ ldrgtb r3, [r0]
+ strgtb r3, [r1], #1
+ subs r2, r2, r12
+ blt .Lrm1_l4
+.Lrm1_main:
+.Lrm1loop:
+ ldrb r3, [r0]
+ ldrb r12, [r0]
+ orr r3, r3, r12, lsl #8
+ ldrb r12, [r0]
+ orr r3, r3, r12, lsl #16
+ ldrb r12, [r0]
+ orr r3, r3, r12, lsl #24
+ str r3, [r1], #4
+ subs r2, r2, #4
+ bge .Lrm1loop
+.Lrm1_l4:
+ adds r2, r2, #4 /* r2 = length again */
+ ldmeqdb fp, {fp, sp, pc}
+ moveq pc, r14
+ cmp r2, #2
+ ldrb r3, [r0]
+ strb r3, [r1], #1
+ ldrgeb r3, [r0]
+ strgeb r3, [r1], #1
+ ldrgtb r3, [r0]
+ strgtb r3, [r1], #1
+ ldmdb fp, {fp, sp, pc}
+
+/*
+ * Write bytes to an I/O address from a block of memory
+ *
+ * r0 = address to write to (IO)
+ * r1 = address to read from (memory)
+ * r2 = length
+ */
+
+/* This code will look very familiar if you've read _memcpy(). */
+ENTRY(write_multi_1)
+ mov ip, sp
+ stmfd sp!, {fp, ip, lr, pc}
+ sub fp, ip, #4
+ subs r2, r2, #4 /* r2 = length - 4 */
+ blt .Lwm1_l4 /* less than 4 bytes */
+ ands r12, r1, #3
+ beq .Lwm1_main /* aligned source */
+ rsb r12, r12, #4
+ cmp r12, #2
+ ldrb r3, [r1], #1
+ strb r3, [r0]
+ ldrgeb r3, [r1], #1
+ strgeb r3, [r0]
+ ldrgtb r3, [r1], #1
+ strgtb r3, [r0]
+ subs r2, r2, r12
+ blt .Lwm1_l4
+.Lwm1_main:
+.Lwm1loop:
+ ldr r3, [r1], #4
+ strb r3, [r0]
+ mov r3, r3, lsr #8
+ strb r3, [r0]
+ mov r3, r3, lsr #8
+ strb r3, [r0]
+ mov r3, r3, lsr #8
+ strb r3, [r0]
+ subs r2, r2, #4
+ bge .Lwm1loop
+.Lwm1_l4:
+ adds r2, r2, #4 /* r2 = length again */
+ ldmeqdb fp, {fp, sp, pc}
+ cmp r2, #2
+ ldrb r3, [r1], #1
+ strb r3, [r0]
+ ldrgeb r3, [r1], #1
+ strgeb r3, [r0]
+ ldrgtb r3, [r1], #1
+ strgtb r3, [r0]
+ ldmdb fp, {fp, sp, pc}
+
+/*
+ * Reads short ints (16 bits) from an I/O address into a block of memory
+ *
+ * r0 = address to read from (IO)
+ * r1 = address to write to (memory)
+ * r2 = length
+ */
+
+ENTRY(insw)
+/* Make sure that we have a positive length */
+ cmp r2, #0x00000000
+ movle pc, lr
+
+/* If the destination address and the size is word aligned, do it fast */
+
+ tst r2, #0x00000001
+ tsteq r1, #0x00000003
+ beq .Lfastinsw
+
+/* Non aligned insw */
+
+.Linswloop:
+ ldr r3, [r0]
+ subs r2, r2, #0x00000001 /* Loop test in load delay slot */
+ strb r3, [r1], #0x0001
+ mov r3, r3, lsr #8
+ strb r3, [r1], #0x0001
+ bgt .Linswloop
+
+ mov pc, lr
+
+/* Word aligned insw */
+
+.Lfastinsw:
+
+.Lfastinswloop:
+ ldr r3, [r0, #0x0002] /* take advantage of nonaligned
+ * word accesses */
+ ldr ip, [r0]
+ mov r3, r3, lsr #16 /* Put the two shorts together */
+ orr r3, r3, ip, lsl #16
+ str r3, [r1], #0x0004 /* Store */
+ subs r2, r2, #0x00000002 /* Next */
+ bgt .Lfastinswloop
+
+ mov pc, lr
+
+
+/*
+ * Writes short ints (16 bits) from a block of memory to an I/O address
+ *
+ * r0 = address to write to (IO)
+ * r1 = address to read from (memory)
+ * r2 = length
+ */
+
+ENTRY(outsw)
+/* Make sure that we have a positive length */
+ cmp r2, #0x00000000
+ movle pc, lr
+
+/* If the destination address and the size is word aligned, do it fast */
+
+ tst r2, #0x00000001
+ tsteq r1, #0x00000003
+ beq .Lfastoutsw
+
+/* Non aligned outsw */
+
+.Loutswloop:
+ ldrb r3, [r1], #0x0001
+ ldrb ip, [r1], #0x0001
+ subs r2, r2, #0x00000001 /* Loop test in load delay slot */
+ orr r3, r3, ip, lsl #8
+ orr r3, r3, r3, lsl #16
+ str r3, [r0]
+ bgt .Loutswloop
+
+ mov pc, lr
+
+/* Word aligned outsw */
+
+.Lfastoutsw:
+
+.Lfastoutswloop:
+ ldr r3, [r1], #0x0004 /* r3 = (H)(L) */
+ subs r2, r2, #0x00000002 /* Loop test in load delay slot */
+
+ eor ip, r3, r3, lsr #16 /* ip = (H)(H^L) */
+ eor r3, r3, ip, lsl #16 /* r3 = (H^H^L)(L) = (L)(L) */
+ eor ip, ip, r3, lsr #16 /* ip = (H)(H^L^L) = (H)(H) */
+
+ str r3, [r0]
+ str ip, [r0]
+
+/* mov ip, r3, lsl #16
+ * orr ip, ip, ip, lsr #16
+ * str ip, [r0]
+ *
+ * mov ip, r3, lsr #16
+ * orr ip, ip, ip, lsl #16
+ * str ip, [r0]
+ */
+
+ bgt .Lfastoutswloop
+
+ mov pc, lr
+
+/*
+ * reads short ints (16 bits) from an I/O address into a block of memory
+ * with a length garenteed to be a multiple of 16 bytes
+ * with a word aligned destination address
+ *
+ * r0 = address to read from (IO)
+ * r1 = address to write to (memory)
+ * r2 = length
+ */
+
+ENTRY(insw16)
+/* Make sure that we have a positive length */
+ cmp r2, #0x00000000
+ movle pc, lr
+
+/* If the destination address is word aligned and the size suitably
+ aligned, do it fast */
+
+ tst r2, #0x00000007
+ tsteq r1, #0x00000003
+
+ bne _C_LABEL(insw)
+
+/* Word aligned insw */
+
+ stmfd sp!, {r4,r5,lr}
+
+.Linsw16loop:
+ ldr r3, [r0, #0x0002] /* take advantage of nonaligned
+ * word accesses */
+ ldr lr, [r0]
+ mov r3, r3, lsr #16 /* Put the two shorts together */
+ orr r3, r3, lr, lsl #16
+
+ ldr r4, [r0, #0x0002] /* take advantage of nonaligned
+ * word accesses */
+ ldr lr, [r0]
+ mov r4, r4, lsr #16 /* Put the two shorts together */
+ orr r4, r4, lr, lsl #16
+
+ ldr r5, [r0, #0x0002] /* take advantage of nonaligned
+ * word accesses */
+ ldr lr, [r0]
+ mov r5, r5, lsr #16 /* Put the two shorts together */
+ orr r5, r5, lr, lsl #16
+
+ ldr ip, [r0, #0x0002] /* take advantage of nonaligned
+ * word accesses */
+ ldr lr, [r0]
+ mov ip, ip, lsr #16 /* Put the two shorts together */
+ orr ip, ip, lr, lsl #16
+
+ stmia r1!, {r3-r5,ip}
+ subs r2, r2, #0x00000008 /* Next */
+ bgt .Linsw16loop
+
+ ldmfd sp!, {r4,r5,pc} /* Restore regs and go home */
+
+
+/*
+ * Writes short ints (16 bits) from a block of memory to an I/O address
+ *
+ * r0 = address to write to (IO)
+ * r1 = address to read from (memory)
+ * r2 = length
+ */
+
+ENTRY(outsw16)
+/* Make sure that we have a positive length */
+ cmp r2, #0x00000000
+ movle pc, lr
+
+/* If the destination address is word aligned and the size suitably
+ aligned, do it fast */
+
+ tst r2, #0x00000007
+ tsteq r1, #0x00000003
+
+ bne _C_LABEL(outsw)
+
+/* Word aligned outsw */
+
+ stmfd sp!, {r4,r5,lr}
+
+.Loutsw16loop:
+ ldmia r1!, {r4,r5,ip,lr}
+
+ eor r3, r4, r4, lsl #16 /* r3 = (A^B)(B) */
+ eor r4, r4, r3, lsr #16 /* r4 = (A)(B^A^B) = (A)(A) */
+ eor r3, r3, r4, lsl #16 /* r3 = (A^B^A)(B) = (B)(B) */
+ str r3, [r0]
+ str r4, [r0]
+
+/* mov r3, r4, lsl #16
+ * orr r3, r3, r3, lsr #16
+ * str r3, [r0]
+ *
+ * mov r3, r4, lsr #16
+ * orr r3, r3, r3, lsl #16
+ * str r3, [r0]
+ */
+
+ eor r3, r5, r5, lsl #16 /* r3 = (A^B)(B) */
+ eor r5, r5, r3, lsr #16 /* r4 = (A)(B^A^B) = (A)(A) */
+ eor r3, r3, r5, lsl #16 /* r3 = (A^B^A)(B) = (B)(B) */
+ str r3, [r0]
+ str r5, [r0]
+
+ eor r3, ip, ip, lsl #16 /* r3 = (A^B)(B) */
+ eor ip, ip, r3, lsr #16 /* r4 = (A)(B^A^B) = (A)(A) */
+ eor r3, r3, ip, lsl #16 /* r3 = (A^B^A)(B) = (B)(B) */
+ str r3, [r0]
+ str ip, [r0]
+
+ eor r3, lr, lr, lsl #16 /* r3 = (A^B)(B) */
+ eor lr, lr, r3, lsr #16 /* r4 = (A)(B^A^B) = (A)(A) */
+ eor r3, r3, lr, lsl #16 /* r3 = (A^B^A)(B) = (B)(B) */
+ str r3, [r0]
+ str lr, [r0]
+
+ subs r2, r2, #0x00000008
+ bgt .Loutsw16loop
+
+ ldmfd sp!, {r4,r5,pc} /* and go home */
+
+/*
+ * reads short ints (16 bits) from an I/O address into a block of memory
+ * The I/O address is assumed to be mapped multiple times in a block of
+ * 8 words.
+ * The destination address should be word aligned.
+ *
+ * r0 = address to read from (IO)
+ * r1 = address to write to (memory)
+ * r2 = length
+ */
+
+ENTRY(inswm8)
+/* Make sure that we have a positive length */
+ cmp r2, #0x00000000
+ movle pc, lr
+
+/* If the destination address is word aligned and the size suitably
+ aligned, do it fast */
+
+ tst r1, #0x00000003
+
+ bne _C_LABEL(insw)
+
+/* Word aligned insw */
+
+ stmfd sp!, {r4-r9,lr}
+
+ mov lr, #0xff000000
+ orr lr, lr, #0x00ff0000
+
+.Linswm8_loop8:
+ cmp r2, #8
+ bcc .Linswm8_l8
+
+ ldmia r0, {r3-r9,ip}
+
+ bic r3, r3, lr
+ orr r3, r3, r4, lsl #16
+ bic r5, r5, lr
+ orr r4, r5, r6, lsl #16
+ bic r7, r7, lr
+ orr r5, r7, r8, lsl #16
+ bic r9, r9, lr
+ orr r6, r9, ip, lsl #16
+
+ stmia r1!, {r3-r6}
+
+ subs r2, r2, #0x00000008 /* Next */
+ bne .Linswm8_loop8
+ beq .Linswm8_l1
+
+.Linswm8_l8:
+ cmp r2, #4
+ bcc .Linswm8_l4
+
+ ldmia r0, {r3-r6}
+
+ bic r3, r3, lr
+ orr r3, r3, r4, lsl #16
+ bic r5, r5, lr
+ orr r4, r5, r6, lsl #16
+
+ stmia r1!, {r3-r4}
+
+ subs r2, r2, #0x00000004
+ beq .Linswm8_l1
+
+.Linswm8_l4:
+ cmp r2, #2
+ bcc .Linswm8_l2
+
+ ldmia r0, {r3-r4}
+
+ bic r3, r3, lr
+ orr r3, r3, r4, lsl #16
+ str r3, [r1], #0x0004
+
+ subs r2, r2, #0x00000002
+ beq .Linswm8_l1
+
+.Linswm8_l2:
+ cmp r2, #1
+ bcc .Linswm8_l1
+
+ ldr r3, [r0]
+ subs r2, r2, #0x00000001 /* Test in load delay slot */
+ /* XXX, why don't we use result? */
+
+ strb r3, [r1], #0x0001
+ mov r3, r3, lsr #8
+ strb r3, [r1], #0x0001
+
+
+.Linswm8_l1:
+ ldmfd sp!, {r4-r9,pc} /* And go home */
+
+/*
+ * write short ints (16 bits) to an I/O address from a block of memory
+ * The I/O address is assumed to be mapped multiple times in a block of
+ * 8 words.
+ * The source address should be word aligned.
+ *
+ * r0 = address to read to (IO)
+ * r1 = address to write from (memory)
+ * r2 = length
+ */
+
+ENTRY(outswm8)
+/* Make sure that we have a positive length */
+ cmp r2, #0x00000000
+ movle pc, lr
+
+/* If the destination address is word aligned and the size suitably
+ aligned, do it fast */
+
+ tst r1, #0x00000003
+
+ bne _C_LABEL(outsw)
+
+/* Word aligned outsw */
+
+ stmfd sp!, {r4-r8,lr}
+
+.Loutswm8_loop8:
+ cmp r2, #8
+ bcc .Loutswm8_l8
+
+ ldmia r1!, {r3,r5,r7,ip}
+
+ eor r4, r3, r3, lsr #16 /* r4 = (A)(A^B) */
+ eor r3, r3, r4, lsl #16 /* r3 = (A^A^B)(B) = (B)(B) */
+ eor r4, r4, r3, lsr #16 /* r4 = (A)(B^A^B) = (A)(A) */
+
+ eor r6, r5, r5, lsr #16 /* r6 = (A)(A^B) */
+ eor r5, r5, r6, lsl #16 /* r5 = (A^A^B)(B) = (B)(B) */
+ eor r6, r6, r5, lsr #16 /* r6 = (A)(B^A^B) = (A)(A) */
+
+ eor r8, r7, r7, lsr #16 /* r8 = (A)(A^B) */
+ eor r7, r7, r8, lsl #16 /* r7 = (A^A^B)(B) = (B)(B) */
+ eor r8, r8, r7, lsr #16 /* r8 = (A)(B^A^B) = (A)(A) */
+
+ eor lr, ip, ip, lsr #16 /* lr = (A)(A^B) */
+ eor ip, ip, lr, lsl #16 /* ip = (A^A^B)(B) = (B)(B) */
+ eor lr, lr, ip, lsr #16 /* lr = (A)(B^A^B) = (A)(A) */
+
+ stmia r0, {r3-r8,ip,lr}
+
+ subs r2, r2, #0x00000008 /* Next */
+ bne .Loutswm8_loop8
+ beq .Loutswm8_l1
+
+.Loutswm8_l8:
+ cmp r2, #4
+ bcc .Loutswm8_l4
+
+ ldmia r1!, {r3-r4}
+
+ eor r6, r3, r3, lsr #16 /* r6 = (A)(A^B) */
+ eor r5, r3, r6, lsl #16 /* r5 = (A^A^B)(B) = (B)(B) */
+ eor r6, r6, r5, lsr #16 /* r6 = (A)(B^A^B) = (A)(A) */
+
+ eor r8, r4, r4, lsr #16 /* r8 = (A)(A^B) */
+ eor r7, r4, r8, lsl #16 /* r7 = (A^A^B)(B) = (B)(B) */
+ eor r8, r8, r7, lsr #16 /* r8 = (A)(B^A^B) = (A)(A) */
+
+ stmia r0, {r5-r8}
+
+ subs r2, r2, #0x00000004
+ beq .Loutswm8_l1
+
+.Loutswm8_l4:
+ cmp r2, #2
+ bcc .Loutswm8_l2
+
+ ldr r3, [r1], #0x0004 /* r3 = (A)(B) */
+ subs r2, r2, #0x00000002 /* Done test in Load delay slot */
+
+ eor r5, r3, r3, lsr #16 /* r5 = (A)(A^B)*/
+ eor r4, r3, r5, lsl #16 /* r4 = (A^A^B)(B) = (B)(B) */
+ eor r5, r5, r4, lsr #16 /* r5 = (A)(B^A^B) = (A)(A) */
+
+ stmia r0, {r4, r5}
+
+ beq .Loutswm8_l1
+
+.Loutswm8_l2:
+ cmp r2, #1
+ bcc .Loutswm8_l1
+
+ ldrb r3, [r1], #0x0001
+ ldrb r4, [r1], #0x0001
+ subs r2, r2, #0x00000001 /* Done test in load delay slot */
+ /* XXX This test isn't used? */
+ orr r3, r3, r4, lsl #8
+ orr r3, r3, r3, lsl #16
+ str r3, [r0]
+
+.Loutswm8_l1:
+ ldmfd sp!, {r4-r8,pc} /* And go home */
diff --git a/sys/arch/arm/arm/bootconfig.c b/sys/arch/arm/arm/bootconfig.c
new file mode 100644
index 00000000000..27ed4736fe6
--- /dev/null
+++ b/sys/arch/arm/arm/bootconfig.c
@@ -0,0 +1,128 @@
+/* $OpenBSD: bootconfig.c,v 1.1 2004/02/01 05:09:48 drahn Exp $ */
+/* $NetBSD: bootconfig.c,v 1.2 2002/03/10 19:56:39 lukem Exp $ */
+
+/*
+ * Copyright (c) 1994-1998 Mark Brinicombe.
+ * Copyright (c) 1994 Brini.
+ * All rights reserved.
+ *
+ * This code is derived from software written for Brini by Mark Brinicombe
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by Mark Brinicombe
+ * for the NetBSD Project.
+ * 4. The name of the company nor the name of the author may be used to
+ * endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/param.h>
+
+#include <sys/systm.h>
+
+#include <machine/bootconfig.h>
+
+#include "rd.h"
+
+/*
+ * Function to identify and process different types of boot argument
+ */
+
+int
+get_bootconf_option(opts, opt, type, result)
+ char *opts;
+ char *opt;
+ int type;
+ void *result;
+{
+ char *ptr;
+ char *optstart;
+ int not;
+
+ ptr = opts;
+
+ while (*ptr) {
+ /* Find start of option */
+ while (*ptr == ' ' || *ptr == '\t')
+ ++ptr;
+
+ if (*ptr == 0)
+ break;
+
+ not = 0;
+
+ /* Is it a negate option */
+ if ((type & BOOTOPT_TYPE_MASK) == BOOTOPT_TYPE_BOOLEAN && *ptr == '!') {
+ not = 1;
+ ++ptr;
+ }
+
+ /* Find the end of option */
+ optstart = ptr;
+ while (*ptr != 0 && *ptr != ' ' && *ptr != '\t' && *ptr != '=')
+ ++ptr;
+
+ if ((*ptr == '=')
+ || (*ptr != '=' && ((type & BOOTOPT_TYPE_MASK) == BOOTOPT_TYPE_BOOLEAN))) {
+ /* compare the option */
+ if (strncmp(optstart, opt, (ptr - optstart)) == 0) {
+ /* found */
+
+ if (*ptr == '=')
+ ++ptr;
+
+#if 0
+/* BELCH */
+ switch(type & BOOTOPT_TYPE_MASK) {
+ case BOOTOPT_TYPE_BOOLEAN :
+ if (*(ptr - 1) == '=')
+ *((int *)result) = ((u_int)strtoul(ptr, NULL, 10) != 0);
+ else
+ *((int *)result) = !not;
+ break;
+ case BOOTOPT_TYPE_STRING :
+ *((char **)result) = ptr;
+ break;
+ case BOOTOPT_TYPE_INT :
+ *((int *)result) = (u_int)strtoul(ptr, NULL, 10);
+ break;
+ case BOOTOPT_TYPE_BININT :
+ *((int *)result) = (u_int)strtoul(ptr, NULL, 2);
+ break;
+ case BOOTOPT_TYPE_HEXINT :
+ *((int *)result) = (u_int)strtoul(ptr, NULL, 16);
+ break;
+ default:
+ return(0);
+ }
+#endif
+ return(1);
+ }
+ }
+ /* skip to next option */
+ while (*ptr != ' ' && *ptr != '\t' && *ptr != 0)
+ ++ptr;
+ }
+ return(0);
+}
diff --git a/sys/arch/arm/arm/bus_dma.c b/sys/arch/arm/arm/bus_dma.c
new file mode 100644
index 00000000000..b5334213d37
--- /dev/null
+++ b/sys/arch/arm/arm/bus_dma.c
@@ -0,0 +1,1070 @@
+/* $OpenBSD: bus_dma.c,v 1.1 2004/02/01 05:09:48 drahn Exp $ */
+/*^I$NetBSD: bus_dma.c,v 1.38 2003/10/30 08:44:13 scw Exp $^I*/$
+
+/*-
+ * Copyright (c) 1996, 1997, 1998 The NetBSD Foundation, Inc.
+ * All rights reserved.
+ *
+ * This code is derived from software contributed to The NetBSD Foundation
+ * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
+ * NASA Ames Research Center.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the NetBSD
+ * Foundation, Inc. and its contributors.
+ * 4. Neither the name of The NetBSD Foundation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
+ * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#define _ARM32_BUS_DMA_PRIVATE
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/kernel.h>
+#include <sys/proc.h>
+#include <sys/buf.h>
+#include <sys/reboot.h>
+#include <sys/conf.h>
+#include <sys/file.h>
+#include <sys/malloc.h>
+#include <sys/mbuf.h>
+#include <sys/vnode.h>
+#include <sys/device.h>
+
+#include <uvm/uvm_extern.h>
+
+#include <machine/bus.h>
+#include <machine/cpu.h>
+
+#include <arm/cpufunc.h>
+
+int _bus_dmamap_load_buffer(bus_dma_tag_t, bus_dmamap_t, void *,
+ bus_size_t, struct proc *, int, paddr_t *, int *, int);
+struct arm32_dma_range *_bus_dma_inrange(struct arm32_dma_range *,
+ int, bus_addr_t);
+
+/*
+ * Check to see if the specified page is in an allowed DMA range.
+ */
+__inline struct arm32_dma_range *
+_bus_dma_inrange(struct arm32_dma_range *ranges, int nranges,
+ bus_addr_t curaddr)
+{
+ struct arm32_dma_range *dr;
+ int i;
+
+ for (i = 0, dr = ranges; i < nranges; i++, dr++) {
+ if (curaddr >= dr->dr_sysbase &&
+ round_page(curaddr) <= (dr->dr_sysbase + dr->dr_len))
+ return (dr);
+ }
+
+ return (NULL);
+}
+
+/*
+ * Common function for DMA map creation. May be called by bus-specific
+ * DMA map creation functions.
+ */
+int
+_bus_dmamap_create(bus_dma_tag_t t, bus_size_t size, int nsegments,
+ bus_size_t maxsegsz, bus_size_t boundary, int flags, bus_dmamap_t *dmamp)
+{
+ struct arm32_bus_dmamap *map;
+ void *mapstore;
+ size_t mapsize;
+
+#ifdef DEBUG_DMA
+ printf("dmamap_create: t=%p size=%lx nseg=%x msegsz=%lx boundary=%lx flags=%x\n",
+ t, size, nsegments, maxsegsz, boundary, flags);
+#endif /* DEBUG_DMA */
+
+ /*
+ * Allocate and initialize the DMA map. The end of the map
+ * is a variable-sized array of segments, so we allocate enough
+ * room for them in one shot.
+ *
+ * Note we don't preserve the WAITOK or NOWAIT flags. Preservation
+ * of ALLOCNOW notifies others that we've reserved these resources,
+ * and they are not to be freed.
+ *
+ * The bus_dmamap_t includes one bus_dma_segment_t, hence
+ * the (nsegments - 1).
+ */
+ mapsize = sizeof(struct arm32_bus_dmamap) +
+ (sizeof(bus_dma_segment_t) * (nsegments - 1));
+ if ((mapstore = malloc(mapsize, M_DEVBUF,
+ (flags & BUS_DMA_NOWAIT) ? M_NOWAIT : M_WAITOK)) == NULL)
+ return (ENOMEM);
+
+ memset(mapstore, 0, mapsize);
+ map = (struct arm32_bus_dmamap *)mapstore;
+ map->_dm_size = size;
+ map->_dm_segcnt = nsegments;
+ map->_dm_maxsegsz = maxsegsz;
+ map->_dm_boundary = boundary;
+ map->_dm_flags = flags & ~(BUS_DMA_WAITOK|BUS_DMA_NOWAIT);
+ map->_dm_origbuf = NULL;
+ map->_dm_buftype = ARM32_BUFTYPE_INVALID;
+ map->_dm_proc = NULL;
+ map->dm_mapsize = 0; /* no valid mappings */
+ map->dm_nsegs = 0;
+
+ *dmamp = map;
+#ifdef DEBUG_DMA
+ printf("dmamap_create:map=%p\n", map);
+#endif /* DEBUG_DMA */
+ return (0);
+}
+
+/*
+ * Common function for DMA map destruction. May be called by bus-specific
+ * DMA map destruction functions.
+ */
+void
+_bus_dmamap_destroy(bus_dma_tag_t t, bus_dmamap_t map)
+{
+
+#ifdef DEBUG_DMA
+ printf("dmamap_destroy: t=%p map=%p\n", t, map);
+#endif /* DEBUG_DMA */
+
+ /*
+ * Explicit unload.
+ */
+ map->dm_mapsize = 0;
+ map->dm_nsegs = 0;
+ map->_dm_origbuf = NULL;
+ map->_dm_buftype = ARM32_BUFTYPE_INVALID;
+ map->_dm_proc = NULL;
+
+ free(map, M_DEVBUF);
+}
+
+/*
+ * Common function for loading a DMA map with a linear buffer. May
+ * be called by bus-specific DMA map load functions.
+ */
+int
+_bus_dmamap_load(bus_dma_tag_t t, bus_dmamap_t map, void *buf,
+ bus_size_t buflen, struct proc *p, int flags)
+{
+ paddr_t lastaddr;
+ int seg, error;
+
+#ifdef DEBUG_DMA
+ printf("dmamap_load: t=%p map=%p buf=%p len=%lx p=%p f=%d\n",
+ t, map, buf, buflen, p, flags);
+#endif /* DEBUG_DMA */
+
+ /*
+ * Make sure that on error condition we return "no valid mappings".
+ */
+ map->dm_mapsize = 0;
+ map->dm_nsegs = 0;
+
+ if (buflen > map->_dm_size)
+ return (EINVAL);
+
+ /* _bus_dmamap_load_buffer() clears this if we're not... */
+ map->_dm_flags |= ARM32_DMAMAP_COHERENT;
+
+ seg = 0;
+ error = _bus_dmamap_load_buffer(t, map, buf, buflen, p, flags,
+ &lastaddr, &seg, 1);
+ if (error == 0) {
+ map->dm_mapsize = buflen;
+ map->dm_nsegs = seg + 1;
+ map->_dm_origbuf = buf;
+ map->_dm_buftype = ARM32_BUFTYPE_LINEAR;
+ map->_dm_proc = p;
+ }
+#ifdef DEBUG_DMA
+ printf("dmamap_load: error=%d\n", error);
+#endif /* DEBUG_DMA */
+ return (error);
+}
+
+/*
+ * Like _bus_dmamap_load(), but for mbufs.
+ */
+int
+_bus_dmamap_load_mbuf(bus_dma_tag_t t, bus_dmamap_t map, struct mbuf *m0,
+ int flags)
+{
+#if 0
+ struct arm32_dma_range *dr;
+#endif
+ paddr_t lastaddr;
+ int seg, error, first;
+ struct mbuf *m;
+
+#ifdef DEBUG_DMA
+ printf("dmamap_load_mbuf: t=%p map=%p m0=%p f=%d\n",
+ t, map, m0, flags);
+#endif /* DEBUG_DMA */
+
+ /*
+ * Make sure that on error condition we return "no valid mappings."
+ */
+ map->dm_mapsize = 0;
+ map->dm_nsegs = 0;
+
+#ifdef DIAGNOSTIC
+ if ((m0->m_flags & M_PKTHDR) == 0)
+ panic("_bus_dmamap_load_mbuf: no packet header");
+#endif /* DIAGNOSTIC */
+
+ if (m0->m_pkthdr.len > map->_dm_size)
+ return (EINVAL);
+
+ /*
+ * Mbuf chains should almost never have coherent (i.e.
+ * un-cached) mappings, so clear that flag now.
+ */
+ map->_dm_flags &= ~ARM32_DMAMAP_COHERENT;
+
+ first = 1;
+ seg = 0;
+ error = 0;
+ for (m = m0; m != NULL && error == 0; m = m->m_next) {
+ error = _bus_dmamap_load_buffer(t, map, m->m_data, m->m_len,
+ NULL, flags, &lastaddr, &seg, first);
+ first = 0;
+ }
+ if (error == 0) {
+ map->dm_mapsize = m0->m_pkthdr.len;
+ map->dm_nsegs = seg + 1;
+ map->_dm_origbuf = m0;
+ map->_dm_buftype = ARM32_BUFTYPE_MBUF;
+ map->_dm_proc = NULL; /* always kernel */
+ }
+#ifdef DEBUG_DMA
+ printf("dmamap_load_mbuf: error=%d\n", error);
+#endif /* DEBUG_DMA */
+ return (error);
+}
+
+/*
+ * Like _bus_dmamap_load(), but for uios.
+ */
+int
+_bus_dmamap_load_uio(bus_dma_tag_t t, bus_dmamap_t map, struct uio *uio,
+ int flags)
+{
+ paddr_t lastaddr;
+ int seg, i, error, first;
+ bus_size_t minlen, resid;
+ struct proc *p = NULL;
+ struct iovec *iov;
+ caddr_t addr;
+
+ /*
+ * Make sure that on error condition we return "no valid mappings."
+ */
+ map->dm_mapsize = 0;
+ map->dm_nsegs = 0;
+
+ resid = uio->uio_resid;
+ iov = uio->uio_iov;
+
+ if (uio->uio_segflg == UIO_USERSPACE) {
+ p = uio->uio_procp;
+#ifdef DIAGNOSTIC
+ if (p == NULL)
+ panic("_bus_dmamap_load_uio: USERSPACE but no proc");
+#endif
+ }
+
+ /* _bus_dmamap_load_buffer() clears this if we're not... */
+ map->_dm_flags |= ARM32_DMAMAP_COHERENT;
+
+ first = 1;
+ seg = 0;
+ error = 0;
+ for (i = 0; i < uio->uio_iovcnt && resid != 0 && error == 0; i++) {
+ /*
+ * Now at the first iovec to load. Load each iovec
+ * until we have exhausted the residual count.
+ */
+ minlen = resid < iov[i].iov_len ? resid : iov[i].iov_len;
+ addr = (caddr_t)iov[i].iov_base;
+
+ error = _bus_dmamap_load_buffer(t, map, addr, minlen,
+ p, flags, &lastaddr, &seg, first);
+ first = 0;
+
+ resid -= minlen;
+ }
+ if (error == 0) {
+ map->dm_mapsize = uio->uio_resid;
+ map->dm_nsegs = seg + 1;
+ map->_dm_origbuf = uio;
+ map->_dm_buftype = ARM32_BUFTYPE_UIO;
+ map->_dm_proc = p;
+ }
+ return (error);
+}
+
+/*
+ * Like _bus_dmamap_load(), but for raw memory allocated with
+ * bus_dmamem_alloc().
+ */
+int
+_bus_dmamap_load_raw(bus_dma_tag_t t, bus_dmamap_t map,
+ bus_dma_segment_t *segs, int nsegs, bus_size_t size, int flags)
+{
+
+ panic("_bus_dmamap_load_raw: not implemented");
+}
+
+/*
+ * Common function for unloading a DMA map. May be called by
+ * bus-specific DMA map unload functions.
+ */
+void
+_bus_dmamap_unload(bus_dma_tag_t t, bus_dmamap_t map)
+{
+
+#ifdef DEBUG_DMA
+ printf("dmamap_unload: t=%p map=%p\n", t, map);
+#endif /* DEBUG_DMA */
+
+ /*
+ * No resources to free; just mark the mappings as
+ * invalid.
+ */
+ map->dm_mapsize = 0;
+ map->dm_nsegs = 0;
+ map->_dm_origbuf = NULL;
+ map->_dm_buftype = ARM32_BUFTYPE_INVALID;
+ map->_dm_proc = NULL;
+}
+
+static __inline void
+_bus_dmamap_sync_linear(bus_dma_tag_t t, bus_dmamap_t map, bus_addr_t offset,
+ bus_size_t len, int ops)
+{
+ vaddr_t addr = (vaddr_t) map->_dm_origbuf;
+
+ addr += offset;
+
+ switch (ops) {
+ case BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE:
+ cpu_dcache_wbinv_range(addr, len);
+ break;
+
+ case BUS_DMASYNC_PREREAD:
+ if (((addr | len) & arm_dcache_align_mask) == 0)
+ cpu_dcache_inv_range(addr, len);
+ else
+ cpu_dcache_wbinv_range(addr, len);
+ break;
+
+ case BUS_DMASYNC_PREWRITE:
+ cpu_dcache_wb_range(addr, len);
+ break;
+ }
+}
+
+static __inline void
+_bus_dmamap_sync_mbuf(bus_dma_tag_t t, bus_dmamap_t map, bus_addr_t offset,
+ bus_size_t len, int ops)
+{
+ struct mbuf *m, *m0 = map->_dm_origbuf;
+ bus_size_t minlen, moff;
+ vaddr_t maddr;
+
+ for (moff = offset, m = m0; m != NULL && len != 0;
+ m = m->m_next) {
+ /* Find the beginning mbuf. */
+ if (moff >= m->m_len) {
+ moff -= m->m_len;
+ continue;
+ }
+
+ /*
+ * Now at the first mbuf to sync; nail each one until
+ * we have exhausted the length.
+ */
+ minlen = m->m_len - moff;
+ if (len < minlen)
+ minlen = len;
+
+ maddr = mtod(m, vaddr_t);
+ maddr += moff;
+
+ /*
+ * We can save a lot of work here if we know the mapping
+ * is read-only at the MMU:
+ *
+ * If a mapping is read-only, no dirty cache blocks will
+ * exist for it. If a writable mapping was made read-only,
+ * we know any dirty cache lines for the range will have
+ * been cleaned for us already. Therefore, if the upper
+ * layer can tell us we have a read-only mapping, we can
+ * skip all cache cleaning.
+ *
+ * NOTE: This only works if we know the pmap cleans pages
+ * before making a read-write -> read-only transition. If
+ * this ever becomes non-true (e.g. Physically Indexed
+ * cache), this will have to be revisited.
+ */
+ switch (ops) {
+ case BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE:
+ /* if (! M_ROMAP(m)) */{
+ cpu_dcache_wbinv_range(maddr, minlen);
+ break;
+ }
+ /* else FALLTHROUGH */
+
+ case BUS_DMASYNC_PREREAD:
+ if (((maddr | minlen) & arm_dcache_align_mask) == 0)
+ cpu_dcache_inv_range(maddr, minlen);
+ else
+ cpu_dcache_wbinv_range(maddr, minlen);
+ break;
+
+ case BUS_DMASYNC_PREWRITE:
+ /* if (! M_ROMAP(m)) */
+ cpu_dcache_wb_range(maddr, minlen);
+ break;
+ }
+ moff = 0;
+ len -= minlen;
+ }
+}
+
+static __inline void
+_bus_dmamap_sync_uio(bus_dma_tag_t t, bus_dmamap_t map, bus_addr_t offset,
+ bus_size_t len, int ops)
+{
+ struct uio *uio = map->_dm_origbuf;
+ struct iovec *iov;
+ bus_size_t minlen, ioff;
+ vaddr_t addr;
+
+ for (iov = uio->uio_iov, ioff = offset; len != 0; iov++) {
+ /* Find the beginning iovec. */
+ if (ioff >= iov->iov_len) {
+ ioff -= iov->iov_len;
+ continue;
+ }
+
+ /*
+ * Now at the first iovec to sync; nail each one until
+ * we have exhausted the length.
+ */
+ minlen = iov->iov_len - ioff;
+ if (len < minlen)
+ minlen = len;
+
+ addr = (vaddr_t) iov->iov_base;
+ addr += ioff;
+
+ switch (ops) {
+ case BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE:
+ cpu_dcache_wbinv_range(addr, minlen);
+ break;
+
+ case BUS_DMASYNC_PREREAD:
+ if (((addr | minlen) & arm_dcache_align_mask) == 0)
+ cpu_dcache_inv_range(addr, minlen);
+ else
+ cpu_dcache_wbinv_range(addr, minlen);
+ break;
+
+ case BUS_DMASYNC_PREWRITE:
+ cpu_dcache_wb_range(addr, minlen);
+ break;
+ }
+ ioff = 0;
+ len -= minlen;
+ }
+}
+
+/*
+ * Common function for DMA map synchronization. May be called
+ * by bus-specific DMA map synchronization functions.
+ *
+ * This version works for the Virtually Indexed Virtually Tagged
+ * cache found on 32-bit ARM processors.
+ *
+ * XXX Should have separate versions for write-through vs.
+ * XXX write-back caches. We currently assume write-back
+ * XXX here, which is not as efficient as it could be for
+ * XXX the write-through case.
+ */
+void
+_bus_dmamap_sync(bus_dma_tag_t t, bus_dmamap_t map, bus_addr_t offset,
+ bus_size_t len, int ops)
+{
+
+#ifdef DEBUG_DMA
+ printf("dmamap_sync: t=%p map=%p offset=%lx len=%lx ops=%x\n",
+ t, map, offset, len, ops);
+#endif /* DEBUG_DMA */
+
+ /*
+ * Mixing of PRE and POST operations is not allowed.
+ */
+ if ((ops & (BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE)) != 0 &&
+ (ops & (BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE)) != 0)
+ panic("_bus_dmamap_sync: mix PRE and POST");
+
+#ifdef DIAGNOSTIC
+ if (offset >= map->dm_mapsize)
+ panic("_bus_dmamap_sync: bad offset %lu (map size is %lu)",
+ offset, map->dm_mapsize);
+ if (len == 0 || (offset + len) > map->dm_mapsize)
+ panic("_bus_dmamap_sync: bad length");
+#endif
+
+ /*
+ * For a virtually-indexed write-back cache, we need
+ * to do the following things:
+ *
+ * PREREAD -- Invalidate the D-cache. We do this
+ * here in case a write-back is required by the back-end.
+ *
+ * PREWRITE -- Write-back the D-cache. Note that if
+ * we are doing a PREREAD|PREWRITE, we can collapse
+ * the whole thing into a single Wb-Inv.
+ *
+ * POSTREAD -- Nothing.
+ *
+ * POSTWRITE -- Nothing.
+ */
+
+ ops &= (BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
+ if (ops == 0)
+ return;
+
+ /* Skip cache frobbing if mapping was COHERENT. */
+ if (map->_dm_flags & ARM32_DMAMAP_COHERENT) {
+ /* Drain the write buffer. */
+ cpu_drain_writebuf();
+ return;
+ }
+
+ /*
+ * If the mapping belongs to a non-kernel vmspace, and the
+ * vmspace has not been active since the last time a full
+ * cache flush was performed, we don't need to do anything.
+ */
+ if (__predict_false(map->_dm_proc != NULL &&
+ map->_dm_proc->p_vmspace->vm_map.pmap->pm_cstate.cs_cache_d == 0))
+ return;
+
+ switch (map->_dm_buftype) {
+ case ARM32_BUFTYPE_LINEAR:
+ _bus_dmamap_sync_linear(t, map, offset, len, ops);
+ break;
+
+ case ARM32_BUFTYPE_MBUF:
+ _bus_dmamap_sync_mbuf(t, map, offset, len, ops);
+ break;
+
+ case ARM32_BUFTYPE_UIO:
+ _bus_dmamap_sync_uio(t, map, offset, len, ops);
+ break;
+
+ case ARM32_BUFTYPE_RAW:
+ panic("_bus_dmamap_sync: ARM32_BUFTYPE_RAW");
+ break;
+
+ case ARM32_BUFTYPE_INVALID:
+ panic("_bus_dmamap_sync: ARM32_BUFTYPE_INVALID");
+ break;
+
+ default:
+ printf("unknown buffer type %d\n", map->_dm_buftype);
+ panic("_bus_dmamap_sync");
+ }
+
+ /* Drain the write buffer. */
+ cpu_drain_writebuf();
+}
+
+/*
+ * Common function for DMA-safe memory allocation. May be called
+ * by bus-specific DMA memory allocation functions.
+ */
+
+extern paddr_t physical_start;
+extern paddr_t physical_end;
+
+int
+_bus_dmamem_alloc(bus_dma_tag_t t, bus_size_t size, bus_size_t alignment,
+ bus_size_t boundary, bus_dma_segment_t *segs, int nsegs, int *rsegs,
+ int flags)
+{
+ struct arm32_dma_range *dr;
+ int error, i;
+
+#ifdef DEBUG_DMA
+ printf("dmamem_alloc t=%p size=%lx align=%lx boundary=%lx "
+ "segs=%p nsegs=%x rsegs=%p flags=%x\n", t, size, alignment,
+ boundary, segs, nsegs, rsegs, flags);
+#endif
+
+ if ((dr = t->_ranges) != NULL) {
+ error = ENOMEM;
+ for (i = 0; i < t->_nranges; i++, dr++) {
+ if (dr->dr_len == 0)
+ continue;
+ error = _bus_dmamem_alloc_range(t, size, alignment,
+ boundary, segs, nsegs, rsegs, flags,
+ trunc_page(dr->dr_sysbase),
+ trunc_page(dr->dr_sysbase + dr->dr_len));
+ if (error == 0)
+ break;
+ }
+ } else {
+ error = _bus_dmamem_alloc_range(t, size, alignment, boundary,
+ segs, nsegs, rsegs, flags, trunc_page(physical_start),
+ trunc_page(physical_end));
+ }
+
+#ifdef DEBUG_DMA
+ printf("dmamem_alloc: =%d\n", error);
+#endif
+
+ return(error);
+}
+
+/*
+ * Common function for freeing DMA-safe memory. May be called by
+ * bus-specific DMA memory free functions.
+ */
+void
+_bus_dmamem_free(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs)
+{
+ struct vm_page *m;
+ bus_addr_t addr;
+ struct pglist mlist;
+ int curseg;
+
+#ifdef DEBUG_DMA
+ printf("dmamem_free: t=%p segs=%p nsegs=%x\n", t, segs, nsegs);
+#endif /* DEBUG_DMA */
+
+ /*
+ * Build a list of pages to free back to the VM system.
+ */
+ TAILQ_INIT(&mlist);
+ for (curseg = 0; curseg < nsegs; curseg++) {
+ for (addr = segs[curseg].ds_addr;
+ addr < (segs[curseg].ds_addr + segs[curseg].ds_len);
+ addr += PAGE_SIZE) {
+ m = PHYS_TO_VM_PAGE(addr);
+ TAILQ_INSERT_TAIL(&mlist, m, pageq);
+ }
+ }
+ uvm_pglistfree(&mlist);
+}
+
+/*
+ * Common function for mapping DMA-safe memory. May be called by
+ * bus-specific DMA memory map functions.
+ */
+int
+_bus_dmamem_map(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs,
+ size_t size, caddr_t *kvap, int flags)
+{
+ vaddr_t va;
+ bus_addr_t addr;
+ int curseg;
+ pt_entry_t *ptep/*, pte*/;
+
+#ifdef DEBUG_DMA
+ printf("dmamem_map: t=%p segs=%p nsegs=%x size=%lx flags=%x\n", t,
+ segs, nsegs, (unsigned long)size, flags);
+#endif /* DEBUG_DMA */
+
+ size = round_page(size);
+ va = uvm_km_valloc(kernel_map, size);
+
+ if (va == 0)
+ return (ENOMEM);
+
+ *kvap = (caddr_t)va;
+
+ for (curseg = 0; curseg < nsegs; curseg++) {
+ for (addr = segs[curseg].ds_addr;
+ addr < (segs[curseg].ds_addr + segs[curseg].ds_len);
+ addr += PAGE_SIZE, va += PAGE_SIZE, size -= PAGE_SIZE) {
+#ifdef DEBUG_DMA
+ printf("wiring p%lx to v%lx", addr, va);
+#endif /* DEBUG_DMA */
+ if (size == 0)
+ panic("_bus_dmamem_map: size botch");
+ pmap_enter(pmap_kernel(), va, addr,
+ VM_PROT_READ | VM_PROT_WRITE,
+ VM_PROT_READ | VM_PROT_WRITE | PMAP_WIRED);
+ /*
+ * If the memory must remain coherent with the
+ * cache then we must make the memory uncacheable
+ * in order to maintain virtual cache coherency.
+ * We must also guarantee the cache does not already
+ * contain the virtal addresses we are making
+ * uncacheable.
+ */
+ if (flags & BUS_DMA_COHERENT) {
+ cpu_dcache_wbinv_range(va, PAGE_SIZE);
+ cpu_drain_writebuf();
+ ptep = vtopte(va);
+ *ptep &= ~L2_S_CACHE_MASK;
+ PTE_SYNC(ptep);
+ tlb_flush();
+ }
+#ifdef DEBUG_DMA
+ ptep = vtopte(va);
+ printf(" pte=v%p *pte=%x\n", ptep, *ptep);
+#endif /* DEBUG_DMA */
+ }
+ }
+ pmap_update(pmap_kernel());
+#ifdef DEBUG_DMA
+ printf("dmamem_map: =%p\n", *kvap);
+#endif /* DEBUG_DMA */
+ return (0);
+}
+
+/*
+ * Common function for unmapping DMA-safe memory. May be called by
+ * bus-specific DMA memory unmapping functions.
+ */
+void
+_bus_dmamem_unmap(bus_dma_tag_t t, caddr_t kva, size_t size)
+{
+
+#ifdef DEBUG_DMA
+ printf("dmamem_unmap: t=%p kva=%p size=%lx\n", t, kva,
+ (unsigned long)size);
+#endif /* DEBUG_DMA */
+#ifdef DIAGNOSTIC
+ if ((u_long)kva & PGOFSET)
+ panic("_bus_dmamem_unmap");
+#endif /* DIAGNOSTIC */
+
+ size = round_page(size);
+ uvm_km_free(kernel_map, (vaddr_t)kva, size);
+}
+
+/*
+ * Common functin for mmap(2)'ing DMA-safe memory. May be called by
+ * bus-specific DMA mmap(2)'ing functions.
+ */
+paddr_t
+_bus_dmamem_mmap(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs,
+ off_t off, int prot, int flags)
+{
+ int i;
+
+ for (i = 0; i < nsegs; i++) {
+#ifdef DIAGNOSTIC
+ if (off & PGOFSET)
+ panic("_bus_dmamem_mmap: offset unaligned");
+ if (segs[i].ds_addr & PGOFSET)
+ panic("_bus_dmamem_mmap: segment unaligned");
+ if (segs[i].ds_len & PGOFSET)
+ panic("_bus_dmamem_mmap: segment size not multiple"
+ " of page size");
+#endif /* DIAGNOSTIC */
+ if (off >= segs[i].ds_len) {
+ off -= segs[i].ds_len;
+ continue;
+ }
+
+ return (arm_btop((u_long)segs[i].ds_addr + off));
+ }
+
+ /* Page not found. */
+ return (-1);
+}
+
+/**********************************************************************
+ * DMA utility functions
+ **********************************************************************/
+
+/*
+ * Utility function to load a linear buffer. lastaddrp holds state
+ * between invocations (for multiple-buffer loads). segp contains
+ * the starting segment on entrace, and the ending segment on exit.
+ * first indicates if this is the first invocation of this function.
+ */
+int
+_bus_dmamap_load_buffer(bus_dma_tag_t t, bus_dmamap_t map, void *buf,
+ bus_size_t buflen, struct proc *p, int flags, paddr_t *lastaddrp,
+ int *segp, int first)
+{
+ struct arm32_dma_range *dr;
+ bus_size_t sgsize;
+ bus_addr_t curaddr, lastaddr, baddr, bmask;
+ vaddr_t vaddr = (vaddr_t)buf;
+ pd_entry_t *pde;
+ pt_entry_t pte;
+ int seg;
+ pmap_t pmap;
+ pt_entry_t *ptep;
+
+#ifdef DEBUG_DMA
+ printf("_bus_dmamem_load_buffer(buf=%p, len=%lx, flags=%d, 1st=%d)\n",
+ buf, buflen, flags, first);
+#endif /* DEBUG_DMA */
+
+ if (p != NULL)
+ pmap = p->p_vmspace->vm_map.pmap;
+ else
+ pmap = pmap_kernel();
+
+ lastaddr = *lastaddrp;
+ bmask = ~(map->_dm_boundary - 1);
+
+ for (seg = *segp; buflen > 0; ) {
+ /*
+ * Get the physical address for this segment.
+ *
+ * XXX Don't support checking for coherent mappings
+ * XXX in user address space.
+ */
+ if (__predict_true(pmap == pmap_kernel())) {
+ (void) pmap_get_pde_pte(pmap, vaddr, &pde, &ptep);
+ if (__predict_false(pmap_pde_section(pde))) {
+ curaddr = (*pde & L1_S_FRAME) |
+ (vaddr & L1_S_OFFSET);
+ if (*pde & L1_S_CACHE_MASK) {
+ map->_dm_flags &=
+ ~ARM32_DMAMAP_COHERENT;
+ }
+ } else {
+ pte = *ptep;
+ KDASSERT((pte & L2_TYPE_MASK) != L2_TYPE_INV);
+ if (__predict_false((pte & L2_TYPE_MASK)
+ == L2_TYPE_L)) {
+ curaddr = (pte & L2_L_FRAME) |
+ (vaddr & L2_L_OFFSET);
+ if (pte & L2_L_CACHE_MASK) {
+ map->_dm_flags &=
+ ~ARM32_DMAMAP_COHERENT;
+ }
+ } else {
+ curaddr = (pte & L2_S_FRAME) |
+ (vaddr & L2_S_OFFSET);
+ if (pte & L2_S_CACHE_MASK) {
+ map->_dm_flags &=
+ ~ARM32_DMAMAP_COHERENT;
+ }
+ }
+ }
+ } else {
+ (void) pmap_extract(pmap, vaddr, &curaddr);
+ map->_dm_flags &= ~ARM32_DMAMAP_COHERENT;
+ }
+
+ /*
+ * Make sure we're in an allowed DMA range.
+ */
+ if (t->_ranges != NULL) {
+ /* XXX cache last result? */
+ dr = _bus_dma_inrange(t->_ranges, t->_nranges,
+ curaddr);
+ if (dr == NULL)
+ return (EINVAL);
+
+ /*
+ * In a valid DMA range. Translate the physical
+ * memory address to an address in the DMA window.
+ */
+ curaddr = (curaddr - dr->dr_sysbase) + dr->dr_busbase;
+ }
+
+ /*
+ * Compute the segment size, and adjust counts.
+ */
+ sgsize = PAGE_SIZE - ((u_long)vaddr & PGOFSET);
+ if (buflen < sgsize)
+ sgsize = buflen;
+
+ /*
+ * Make sure we don't cross any boundaries.
+ */
+ if (map->_dm_boundary > 0) {
+ baddr = (curaddr + map->_dm_boundary) & bmask;
+ if (sgsize > (baddr - curaddr))
+ sgsize = (baddr - curaddr);
+ }
+
+ /*
+ * Insert chunk into a segment, coalescing with
+ * previous segment if possible.
+ */
+ if (first) {
+ map->dm_segs[seg].ds_addr = curaddr;
+ map->dm_segs[seg].ds_len = sgsize;
+ first = 0;
+ } else {
+ if (curaddr == lastaddr &&
+ (map->dm_segs[seg].ds_len + sgsize) <=
+ map->_dm_maxsegsz &&
+ (map->_dm_boundary == 0 ||
+ (map->dm_segs[seg].ds_addr & bmask) ==
+ (curaddr & bmask)))
+ map->dm_segs[seg].ds_len += sgsize;
+ else {
+ if (++seg >= map->_dm_segcnt)
+ break;
+ map->dm_segs[seg].ds_addr = curaddr;
+ map->dm_segs[seg].ds_len = sgsize;
+ }
+ }
+
+ lastaddr = curaddr + sgsize;
+ vaddr += sgsize;
+ buflen -= sgsize;
+ }
+
+ *segp = seg;
+ *lastaddrp = lastaddr;
+
+ /*
+ * Did we fit?
+ */
+ if (buflen != 0)
+ return (EFBIG); /* XXX better return value here? */
+ return (0);
+}
+
+/*
+ * Allocate physical memory from the given physical address range.
+ * Called by DMA-safe memory allocation methods.
+ */
+int
+_bus_dmamem_alloc_range(bus_dma_tag_t t, bus_size_t size, bus_size_t alignment,
+ bus_size_t boundary, bus_dma_segment_t *segs, int nsegs, int *rsegs,
+ int flags, paddr_t low, paddr_t high)
+{
+ paddr_t curaddr, lastaddr;
+ struct vm_page *m;
+ struct pglist mlist;
+ int curseg, error;
+
+#ifdef DEBUG_DMA
+ printf("alloc_range: t=%p size=%lx align=%lx boundary=%lx segs=%p nsegs=%x rsegs=%p flags=%x lo=%lx hi=%lx\n",
+ t, size, alignment, boundary, segs, nsegs, rsegs, flags, low, high);
+#endif /* DEBUG_DMA */
+
+ /* Always round the size. */
+ size = round_page(size);
+
+ TAILQ_INIT(&mlist);
+ /*
+ * Allocate pages from the VM system.
+ */
+ error = uvm_pglistalloc(size, low, high, alignment, boundary,
+ &mlist, nsegs, (flags & BUS_DMA_NOWAIT) == 0);
+ if (error)
+ return (error);
+
+ /*
+ * Compute the location, size, and number of segments actually
+ * returned by the VM code.
+ */
+ m = mlist.tqh_first;
+ curseg = 0;
+ lastaddr = segs[curseg].ds_addr = VM_PAGE_TO_PHYS(m);
+ segs[curseg].ds_len = PAGE_SIZE;
+#ifdef DEBUG_DMA
+ printf("alloc: page %lx\n", lastaddr);
+#endif /* DEBUG_DMA */
+ m = m->pageq.tqe_next;
+
+ for (; m != NULL; m = m->pageq.tqe_next) {
+ curaddr = VM_PAGE_TO_PHYS(m);
+#ifdef DIAGNOSTIC
+ if (curaddr < low || curaddr >= high) {
+ printf("uvm_pglistalloc returned non-sensical"
+ " address 0x%lx\n", curaddr);
+ panic("_bus_dmamem_alloc_range");
+ }
+#endif /* DIAGNOSTIC */
+#ifdef DEBUG_DMA
+ printf("alloc: page %lx\n", curaddr);
+#endif /* DEBUG_DMA */
+ if (curaddr == (lastaddr + PAGE_SIZE))
+ segs[curseg].ds_len += PAGE_SIZE;
+ else {
+ curseg++;
+ segs[curseg].ds_addr = curaddr;
+ segs[curseg].ds_len = PAGE_SIZE;
+ }
+ lastaddr = curaddr;
+ }
+
+ *rsegs = curseg + 1;
+
+ return (0);
+}
+
+/*
+ * Check if a memory region intersects with a DMA range, and return the
+ * page-rounded intersection if it does.
+ */
+int
+arm32_dma_range_intersect(struct arm32_dma_range *ranges, int nranges,
+ paddr_t pa, psize_t size, paddr_t *pap, psize_t *sizep)
+{
+ struct arm32_dma_range *dr;
+ int i;
+
+ if (ranges == NULL)
+ return (0);
+
+ for (i = 0, dr = ranges; i < nranges; i++, dr++) {
+ if (dr->dr_sysbase <= pa &&
+ pa < (dr->dr_sysbase + dr->dr_len)) {
+ /*
+ * Beginning of region intersects with this range.
+ */
+ *pap = trunc_page(pa);
+ *sizep = round_page(min(pa + size,
+ dr->dr_sysbase + dr->dr_len) - pa);
+ return (1);
+ }
+ if (pa < dr->dr_sysbase && dr->dr_sysbase < (pa + size)) {
+ /*
+ * End of region intersects with this range.
+ */
+ *pap = trunc_page(dr->dr_sysbase);
+ *sizep = round_page(min((pa + size) - dr->dr_sysbase,
+ dr->dr_len));
+ return (1);
+ }
+ }
+
+ /* No intersection found. */
+ return (0);
+}
diff --git a/sys/arch/arm/arm/bus_space_asm_generic.S b/sys/arch/arm/arm/bus_space_asm_generic.S
new file mode 100644
index 00000000000..80817e5008b
--- /dev/null
+++ b/sys/arch/arm/arm/bus_space_asm_generic.S
@@ -0,0 +1,352 @@
+/* $OpenBSD: bus_space_asm_generic.S,v 1.1 2004/02/01 05:09:48 drahn Exp $ */
+/* $NetBSD: bus_space_asm_generic.S,v 1.3 2003/03/27 19:46:14 mycroft Exp $ */
+
+/*
+ * Copyright (c) 1997 Causality Limited.
+ * Copyright (c) 1997 Mark Brinicombe.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by Mark Brinicombe
+ * for the NetBSD Project.
+ * 4. The name of the company nor the name of the author may be used to
+ * endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <arm/asm.h>
+#include <arm/cpuconf.h>
+
+/*
+ * Generic bus_space functions.
+ */
+
+/*
+ * read single
+ */
+
+ENTRY(generic_bs_r_1)
+ ldrb r0, [r1, r2]
+ mov pc, lr
+
+#if (ARM_ARCH_4 + ARM_ARCH_5) > 0
+ENTRY(generic_armv4_bs_r_2)
+ ldrh r0, [r1, r2]
+ mov pc, lr
+#endif
+
+ENTRY(generic_bs_r_4)
+ ldr r0, [r1, r2]
+ mov pc, lr
+
+/*
+ * write single
+ */
+
+ENTRY(generic_bs_w_1)
+ strb r3, [r1, r2]
+ mov pc, lr
+
+#if (ARM_ARCH_4 + ARM_ARCH_5) > 0
+ENTRY(generic_armv4_bs_w_2)
+ strh r3, [r1, r2]
+ mov pc, lr
+#endif
+
+ENTRY(generic_bs_w_4)
+ str r3, [r1, r2]
+ mov pc, lr
+
+/*
+ * read multiple
+ */
+
+ENTRY(generic_bs_rm_1)
+ add r0, r1, r2
+ mov r1, r3
+ ldr r2, [sp, #0]
+ teq r2, #0
+ moveq pc, lr
+
+1: ldrb r3, [r0]
+ strb r3, [r1], #1
+ subs r2, r2, #1
+ bne 1b
+
+ mov pc, lr
+
+#if (ARM_ARCH_4 + ARM_ARCH_5) > 0
+ENTRY(generic_armv4_bs_rm_2)
+ add r0, r1, r2
+ mov r1, r3
+ ldr r2, [sp, #0]
+ teq r2, #0
+ moveq pc, lr
+
+1: ldrh r3, [r0]
+ strh r3, [r1], #2
+ subs r2, r2, #1
+ bne 1b
+
+ mov pc, lr
+#endif
+
+ENTRY(generic_bs_rm_4)
+ add r0, r1, r2
+ mov r1, r3
+ ldr r2, [sp, #0]
+ teq r2, #0
+ moveq pc, lr
+
+1: ldr r3, [r0]
+ str r3, [r1], #4
+ subs r2, r2, #1
+ bne 1b
+
+ mov pc, lr
+
+/*
+ * write multiple
+ */
+
+ENTRY(generic_bs_wm_1)
+ add r0, r1, r2
+ mov r1, r3
+ ldr r2, [sp, #0]
+ teq r2, #0
+ moveq pc, lr
+
+1: ldrb r3, [r1], #1
+ strb r3, [r0]
+ subs r2, r2, #1
+ bne 1b
+
+ mov pc, lr
+
+#if (ARM_ARCH_4 + ARM_ARCH_5) > 0
+ENTRY(generic_armv4_bs_wm_2)
+ add r0, r1, r2
+ mov r1, r3
+ ldr r2, [sp, #0]
+ teq r2, #0
+ moveq pc, lr
+
+1: ldrh r3, [r1], #2
+ strh r3, [r0]
+ subs r2, r2, #1
+ bne 1b
+
+ mov pc, lr
+#endif
+
+ENTRY(generic_bs_wm_4)
+ add r0, r1, r2
+ mov r1, r3
+ ldr r2, [sp, #0]
+ teq r2, #0
+ moveq pc, lr
+
+1: ldr r3, [r1], #4
+ str r3, [r0]
+ subs r2, r2, #1
+ bne 1b
+
+ mov pc, lr
+
+/*
+ * read region
+ */
+
+ENTRY(generic_bs_rr_1)
+ add r0, r1, r2
+ mov r1, r3
+ ldr r2, [sp, #0]
+ teq r2, #0
+ moveq pc, lr
+
+1: ldrb r3, [r0], #1
+ strb r3, [r1], #1
+ subs r2, r2, #1
+ bne 1b
+
+ mov pc, lr
+
+#if (ARM_ARCH_4 + ARM_ARCH_5) > 0
+ENTRY(generic_armv4_bs_rr_2)
+ add r0, r1, r2
+ mov r1, r3
+ ldr r2, [sp, #0]
+ teq r2, #0
+ moveq pc, lr
+
+1: ldrh r3, [r0], #2
+ strh r3, [r1], #2
+ subs r2, r2, #1
+ bne 1b
+
+ mov pc, lr
+#endif
+
+ENTRY(generic_bs_rr_4)
+ add r0, r1, r2
+ mov r1, r3
+ ldr r2, [sp, #0]
+ teq r2, #0
+ moveq pc, lr
+
+1: ldr r3, [r0], #4
+ str r3, [r1], #4
+ subs r2, r2, #1
+ bne 1b
+
+ mov pc, lr
+
+/*
+ * write region.
+ */
+
+ENTRY(generic_bs_wr_1)
+ add r0, r1, r2
+ mov r1, r3
+ ldr r2, [sp, #0]
+ teq r2, #0
+ moveq pc, lr
+
+1: ldrb r3, [r1], #1
+ strb r3, [r0], #1
+ subs r2, r2, #1
+ bne 1b
+
+ mov pc, lr
+
+#if (ARM_ARCH_4 + ARM_ARCH_5) > 0
+ENTRY(generic_armv4_bs_wr_2)
+ add r0, r1, r2
+ mov r1, r3
+ ldr r2, [sp, #0]
+ teq r2, #0
+ moveq pc, lr
+
+1: ldrh r3, [r1], #2
+ strh r3, [r0], #2
+ subs r2, r2, #1
+ bne 1b
+
+ mov pc, lr
+#endif
+
+ENTRY(generic_bs_wr_4)
+ add r0, r1, r2
+ mov r1, r3
+ ldr r2, [sp, #0]
+ teq r2, #0
+ moveq pc, lr
+
+1: ldr r3, [r1], #4
+ str r3, [r0], #4
+ subs r2, r2, #1
+ bne 1b
+
+ mov pc, lr
+
+/*
+ * set region
+ */
+
+ENTRY(generic_bs_sr_1)
+ add r0, r1, r2
+ mov r1, r3
+ ldr r2, [sp, #0]
+ teq r2, #0
+ moveq pc, lr
+
+1: strb r1, [r0], #1
+ subs r2, r2, #1
+ bne 1b
+
+ mov pc, lr
+
+#if (ARM_ARCH_4 + ARM_ARCH_5) > 0
+ENTRY(generic_armv4_bs_sr_2)
+ add r0, r1, r2
+ mov r1, r3
+ ldr r2, [sp, #0]
+ teq r2, #0
+ moveq pc, lr
+
+1: strh r1, [r0], #2
+ subs r2, r2, #1
+ bne 1b
+
+ mov pc, lr
+#endif
+
+ENTRY(generic_bs_sr_4)
+ add r0, r1, r2
+ mov r1, r3
+ ldr r2, [sp, #0]
+ teq r2, #0
+ moveq pc, lr
+
+1: str r1, [r0], #4
+ subs r2, r2, #1
+ bne 1b
+
+ mov pc, lr
+
+/*
+ * copy region
+ */
+
+#if (ARM_ARCH_4 + ARM_ARCH_5) > 0
+ENTRY(generic_armv4_bs_c_2)
+ add r0, r1, r2
+ ldr r2, [sp, #0]
+ add r1, r2, r3
+ ldr r2, [sp, #4]
+ teq r2, #0
+ moveq pc, lr
+
+ cmp r0, r1
+ blt 2f
+
+1: ldrh r3, [r0], #2
+ strh r3, [r1], #2
+ subs r2, r2, #1
+ bne 1b
+
+ mov pc, lr
+
+2: add r0, r0, r2, lsl #1
+ add r1, r1, r2, lsl #1
+ sub r0, r0, #2
+ sub r1, r1, #2
+
+3: ldrh r3, [r0], #-2
+ strh r3, [r1], #-2
+ subs r2, r2, #1
+ bne 3b
+
+ mov pc, lr
+#endif
diff --git a/sys/arch/arm/arm/bus_space_notimpl.S b/sys/arch/arm/arm/bus_space_notimpl.S
new file mode 100644
index 00000000000..a0a51be2d47
--- /dev/null
+++ b/sys/arch/arm/arm/bus_space_notimpl.S
@@ -0,0 +1,160 @@
+/* $OpenBSD: bus_space_notimpl.S,v 1.1 2004/02/01 05:09:48 drahn Exp $ */
+/* $NetBSD: bus_space_notimpl.S,v 1.2 2001/09/10 02:20:19 reinoud Exp $ */
+
+/*
+ * Copyright (c) 1997 Mark Brinicombe.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by Mark Brinicombe.
+ * 4. The name of the company nor the name of the author may be used to
+ * endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <machine/asm.h>
+
+/*
+ * BUS_SPACE - name of this bus space
+ */
+
+#define BUS_SPACE bs_notimpl
+
+#define __C(x,y) __CONCAT(x,y)
+#define __S(s) __STRING(s)
+#define NAME(func) __C(BUS_SPACE,__C(_bs_,func))
+#define LNAME(func) __C(L,NAME(func))
+
+#define __L(x) _C_LABEL(x)
+#define GLOBAL(func) .global __L(NAME(func))
+#define LABEL(func) __L(NAME(func)):
+#define LLABEL(func) LNAME(func):
+
+#define FTEXT(func,text) __S(__C(NAME(func),text))
+
+
+#define NOT_IMPL(func) \
+ GLOBAL(func) ; \
+LABEL(func) ; \
+ stmfd sp!, {r0-r3} ; \
+ adr r0, LNAME(__C(func,_text)) ; \
+ mov r1, sp ; \
+ b _C_LABEL(panic) ; \
+ ; \
+LLABEL(__C(func,_text)) ; \
+ .asciz FTEXT(func,: args at 0x%08x\n) ; \
+ .align 0 ;
+
+
+/*
+ * misc functions
+ */
+
+NOT_IMPL(mmap)
+
+
+/*
+ * Generic bus_space I/O functions
+ */
+
+/*
+ * read single
+ */
+
+NOT_IMPL(r_1)
+NOT_IMPL(r_2)
+NOT_IMPL(r_4)
+NOT_IMPL(r_8)
+
+/*
+ * write single
+ */
+
+NOT_IMPL(w_1)
+NOT_IMPL(w_2)
+NOT_IMPL(w_4)
+NOT_IMPL(w_8)
+
+/*
+ * read multiple
+ */
+
+NOT_IMPL(rm_1)
+NOT_IMPL(rm_2)
+NOT_IMPL(rm_4)
+NOT_IMPL(rm_8)
+
+/*
+ * write multiple
+ */
+
+NOT_IMPL(wm_1)
+NOT_IMPL(wm_2)
+NOT_IMPL(wm_4)
+NOT_IMPL(wm_8)
+
+/*
+ * read region
+ */
+
+NOT_IMPL(rr_1)
+NOT_IMPL(rr_2)
+NOT_IMPL(rr_4)
+NOT_IMPL(rr_8)
+
+/*
+ * write region
+ */
+
+NOT_IMPL(wr_1)
+NOT_IMPL(wr_2)
+NOT_IMPL(wr_4)
+NOT_IMPL(wr_8)
+
+/*
+ * set multiple
+ */
+
+NOT_IMPL(sm_1)
+NOT_IMPL(sm_2)
+NOT_IMPL(sm_4)
+NOT_IMPL(sm_8)
+
+/*
+ * set region
+ */
+
+NOT_IMPL(sr_1)
+NOT_IMPL(sr_2)
+NOT_IMPL(sr_4)
+NOT_IMPL(sr_8)
+
+/*
+ * copy
+ */
+
+NOT_IMPL(c_1)
+NOT_IMPL(c_2)
+NOT_IMPL(c_4)
+NOT_IMPL(c_8)
diff --git a/sys/arch/arm/arm/conf.c b/sys/arch/arm/arm/conf.c
new file mode 100644
index 00000000000..dd8a21bcf46
--- /dev/null
+++ b/sys/arch/arm/arm/conf.c
@@ -0,0 +1,525 @@
+/* $OpenBSD: conf.c,v 1.1 2004/02/01 05:09:48 drahn Exp $ */
+/* $NetBSD: conf.c,v 1.10 2002/04/19 01:04:38 wiz Exp $ */
+
+/*
+ * Copyright (c) 1994-1998 Mark Brinicombe.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by Mark Brinicombe
+ * for the NetBSD Project.
+ * 4. The name of the company nor the name of the author may be used to
+ * endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * RiscBSD kernel project
+ *
+ * conf.c
+ *
+ * Character and Block Device configuration
+ * Console configuration
+ *
+ * Defines the structures cdevsw and constab
+ *
+ * Created : 17/09/94
+ */
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/buf.h>
+#include <sys/ioctl.h>
+#include <sys/tty.h>
+#include <sys/conf.h>
+#include <sys/vnode.h>
+
+#include <machine/conf.h>
+
+/*
+ * From this point, these need to be MI foo.h files.
+ */
+
+/*
+ * Standard MI devices (e.g. ones in dev/ic)
+ */
+#include "com.h" /* NS164x0 serial ports */
+
+/*
+ * Standard pseudo-devices
+ */
+#include "bpfilter.h"
+#include "pf.h"
+#include "pty.h"
+#include "tun.h"
+
+/*
+ * Disk/Filesystem pseudo-devices
+ */
+#include "ccd.h" /* concatenated disk driver */
+#include "rd.h" /* memory disk driver */
+#include "raid.h" /* RAIDframe */
+#include "vnd.h" /* vnode disk driver */
+
+/*
+ * WD/ATA devices
+ */
+#include "wd.h"
+bdev_decl(wd);
+bdev_decl(sw);
+
+/*
+ * ISDN devices
+ */
+#ifdef CONF_HAVE_ISDN
+#include "isdn.h"
+#include "isdnctl.h"
+#include "isdntrc.h"
+#include "isdnbchan.h"
+#include "isdntel.h"
+#else
+#define NISDN 0
+#define NISDNCTL 0
+#define NISDNTRC 0
+#define NISDNBCHAN 0
+#define NISDNTEL 0
+#endif
+
+#ifdef CONF_HAVE_PCI
+#include "iop.h"
+#include "pci.h"
+#else
+#define NIOP 0
+#define NMLX 0
+#define NMLY 0
+#define NPCI 0
+#endif
+#define NAGP 0
+/*
+ * SCSI/ATAPI devices
+ */
+#include "sd.h"
+#include "st.h"
+#include "cd.h"
+#include "ch.h"
+#include "uk.h"
+#include "ss.h"
+
+/*
+ * Audio devices
+ */
+#include "audio.h"
+#include "midi.h"
+#include "sequencer.h"
+
+/*
+ * USB devices
+ */
+#include "usb.h"
+#include "ucom.h"
+#include "ugen.h"
+#include "uhid.h"
+#include "ulpt.h"
+#include "urio.h"
+#include "uscanner.h"
+
+/*
+ * WSCONS devices
+ */
+#include "wsdisplay.h"
+/*
+#include "wsfont.h"
+*/
+#include "wskbd.h"
+#include "wsmouse.h"
+#include "wsmux.h"
+cdev_decl(wskbd);
+cdev_decl(wsmouse);
+
+#include "lpt.h"
+
+#include "radio.h"
+cdev_decl(radio);
+
+#include <arm/conf.h>
+
+/* Block devices */
+
+struct bdevsw bdevsw[] = {
+ bdev_lkm_dummy(), /* 0: */
+ bdev_swap_init(1, sw), /* 1: swap pseudo-device */
+ bdev_lkm_dummy(), /* 2: */
+ bdev_lkm_dummy(), /* 3: */
+ bdev_lkm_dummy(), /* 4: */
+ bdev_lkm_dummy(), /* 5: */
+ bdev_lkm_dummy(), /* 6: */
+ bdev_lkm_dummy(), /* 7: */
+ bdev_lkm_dummy(), /* 8: */
+ bdev_lkm_dummy(), /* 9: */
+ bdev_lkm_dummy(), /* 10: */
+ bdev_lkm_dummy(), /* 11: */
+ bdev_lkm_dummy(), /* 12: */
+ bdev_lkm_dummy(), /* 13: */
+ bdev_lkm_dummy(), /* 14: */
+ bdev_lkm_dummy(), /* 15: */
+ bdev_disk_init(NWD,wd), /* 16: Internal IDE disk */
+ bdev_lkm_dummy(), /* 17: */
+ bdev_disk_init(NRD,rd), /* 18: memory disk */
+ bdev_disk_init(NVND,vnd), /* 19: vnode disk driver */
+ bdev_lkm_dummy(), /* 20: */
+ bdev_disk_init(NCCD,ccd), /* 21: concatenated disk driver */
+ bdev_lkm_dummy(), /* 22: */
+ bdev_lkm_dummy(), /* 23: */
+ bdev_disk_init(NSD,sd), /* 24: SCSI disk */
+ bdev_tape_init(NST,st), /* 25: SCSI tape */
+ bdev_disk_init(NCD,cd), /* 26: SCSI cdrom */
+ bdev_lkm_dummy(), /* 27: */
+ bdev_lkm_dummy(), /* 28: */
+ bdev_lkm_dummy(), /* 29: */
+ bdev_lkm_dummy(), /* 30: */
+ bdev_lkm_dummy(), /* 31: */
+ bdev_lkm_dummy(), /* 32: */
+ bdev_lkm_dummy(), /* 33: */
+ bdev_lkm_dummy(), /* 34: */
+ bdev_lkm_dummy(), /* 35: */
+ bdev_lkm_dummy(), /* 36: */
+ bdev_lkm_dummy(), /* 37: */
+ bdev_lkm_dummy(), /* 38: */
+ bdev_lkm_dummy(), /* 39: */
+ bdev_lkm_dummy(), /* 40: */
+ bdev_lkm_dummy(), /* 41: */
+ bdev_lkm_dummy(), /* 42: */
+ bdev_lkm_dummy(), /* 43: */
+ bdev_lkm_dummy(), /* 44: */
+ bdev_lkm_dummy(), /* 45: */
+ bdev_lkm_dummy(), /* 46: */
+ bdev_lkm_dummy(), /* 47: */
+ bdev_lkm_dummy(), /* 48: */
+ bdev_lkm_dummy(), /* 49: */
+ bdev_lkm_dummy(), /* 50: */
+ bdev_lkm_dummy(), /* 51: */
+ bdev_lkm_dummy(), /* 52: */
+ bdev_lkm_dummy(), /* 53: */
+ bdev_lkm_dummy(), /* 54: */
+ bdev_lkm_dummy(), /* 55: */
+ bdev_lkm_dummy(), /* 56: */
+ bdev_lkm_dummy(), /* 57: */
+ bdev_lkm_dummy(), /* 58: */
+ bdev_lkm_dummy(), /* 59: */
+ bdev_lkm_dummy(), /* 60: */
+ bdev_lkm_dummy(), /* 61: */
+ bdev_lkm_dummy(), /* 62: */
+ bdev_lkm_dummy(), /* 63: */
+ bdev_lkm_dummy(), /* 64: */
+ bdev_lkm_dummy(), /* 65: */
+ bdev_lkm_dummy(), /* 66: */
+ bdev_lkm_dummy(), /* 67: */
+ bdev_lkm_dummy(), /* 68: */
+ bdev_lkm_dummy(), /* 69: */
+ bdev_lkm_dummy(), /* 70: */
+ bdev_disk_init(NRAID,raid), /* 71: RAIDframe disk driver */
+ bdev_lkm_dummy(), /* 72: */
+ bdev_lkm_dummy(), /* 73: */
+ bdev_lkm_dummy(), /* 74: */
+ bdev_lkm_dummy(), /* 75: */
+ bdev_lkm_dummy(), /* 76: */
+ bdev_lkm_dummy(), /* 77: */
+ bdev_lkm_dummy(), /* 78: */
+ bdev_lkm_dummy(), /* 79: */
+ bdev_lkm_dummy(), /* 80: */
+ bdev_lkm_dummy(), /* 81: */
+ bdev_lkm_dummy(), /* 82: */
+ bdev_lkm_dummy(), /* 83: */
+ bdev_lkm_dummy(), /* 84: */
+ bdev_lkm_dummy(), /* 85: */
+ bdev_lkm_dummy(), /* 86: */
+ bdev_lkm_dummy(), /* 87: */
+ bdev_lkm_dummy(), /* 88: */
+ bdev_lkm_dummy(), /* 89: */
+ bdev_lkm_dummy(), /* 90: */
+ bdev_lkm_dummy(), /* 91: */
+ bdev_lkm_dummy(), /* 93: */
+ bdev_lkm_dummy(), /* 94: */
+ bdev_lkm_dummy(), /* 95: */
+ bdev_lkm_dummy(), /* 96: */
+ bdev_lkm_dummy(), /* 97: */
+};
+
+/* Character devices */
+cdev_decl(isdn);
+cdev_decl(isdnctl);
+cdev_decl(isdntrc);
+cdev_decl(isdnbchan);
+cdev_decl(isdntel);
+#define ptstty ptytty
+#define ptsioctl ptyioctl
+#define ptctty ptytty
+#define ptcioctl ptyioctl
+
+struct cdevsw cdevsw[] = {
+ cdev_cn_init(1,cn), /* 0: virtual console */
+ cdev_ctty_init(1,ctty), /* 1: controlling terminal */
+ cdev_mm_init(1,mm), /* 2: /dev/{null,mem,kmem,...} */
+ cdev_swap_init(1,sw), /* 3: /dev/drum (swap pseudo-device) */
+ cdev_tty_init(NPTY,pts), /* 4: pseudo-tty slave */
+ cdev_ptc_init(NPTY,ptc), /* 5: pseudo-tty master */
+ cdev_log_init(1,log), /* 6: /dev/klog */
+ cdev_fd_init(1,filedesc), /* 7: file descriptor pseudo-device */
+ cdev_lkm_dummy(), /* 8: */
+ cdev_lpt_init(NLPT,lpt), /* 9: parallel printer */
+ cdev_lkm_dummy(), /* 10: */
+ cdev_lkm_dummy(), /* 11: */
+ cdev_tty_init(NCOM,com), /* 12: serial port */
+ cdev_lkm_dummy(), /* 13: */
+ cdev_lkm_dummy(), /* 14: */
+ cdev_lkm_dummy(), /* 15: */
+ cdev_disk_init(NWD,wd), /* 16: ST506/ESDI/IDE disk */
+ cdev_lkm_dummy(), /* 17: */
+ cdev_disk_init(NRD,rd), /* 18: ram disk driver */
+ cdev_disk_init(NVND,vnd), /* 19: vnode disk driver */
+ cdev_lkm_dummy(), /* 20: */
+ cdev_disk_init(NCCD,ccd), /* 21: concatenated disk driver */
+ cdev_bpftun_init(NBPFILTER,bpf), /* 22: Berkeley packet filter */
+ cdev_lkm_dummy(), /* 23: */
+ cdev_disk_init(NSD,sd), /* 24: SCSI disk */
+ cdev_tape_init(NST,st), /* 25: SCSI tape */
+ cdev_disk_init(NCD,cd), /* 26: SCSI CD-ROM */
+ cdev_ch_init(NCH,ch), /* 27: SCSI autochanger */
+ cdev_uk_init(NUK,uk), /* 28: SCSI unknown */
+ cdev_scanner_init(NSS,ss), /* 29: SCSI scanner */
+ cdev_lkm_dummy(), /* 30: */
+ cdev_lkm_dummy(), /* 31: */
+ cdev_lkm_dummy(), /* 32: */
+ cdev_bpftun_init(NTUN,tun), /* 33: network tunnel */
+ cdev_lkm_dummy(), /* 34: */
+ cdev_lkm_init(NLKM,lkm), /* 35: loadable module driver */
+ cdev_audio_init(NAUDIO,audio), /* 36: generic audio I/O */
+ cdev_notdef(), /* 37: removed cpu device */
+ cdev_notdef(), /* 38: removed cpu device */
+ cdev_lkm_dummy(), /* 39: reserved */
+ cdev_lkm_dummy(), /* 40: reserved */
+ cdev_lkm_dummy(), /* 41: reserved */
+ cdev_lkm_dummy(), /* 42: reserved */
+ cdev_lkm_dummy(), /* 43: reserved */
+ cdev_lkm_dummy(), /* 44: reserved */
+ cdev_lkm_dummy(), /* 45: reserved */
+ cdev_pf_init(NPF,pf), /* 39: packet filter */
+ cdev_lkm_dummy(), /* 47: reserved */
+ cdev_lkm_dummy(), /* 48: reserved */
+ cdev_lkm_dummy(), /* 49: reserved */
+ cdev_lkm_dummy(), /* 50: reserved */
+ cdev_notdef(), /* 51: reserved */
+ cdev_notdef(), /* 52: reserved */
+ cdev_notdef(), /* 53: reserved */
+ cdev_tty_init(NFCOM,fcom), /* 54: FOOTBRIDGE console */
+ cdev_lkm_dummy(), /* 55: Reserved for bypass device */
+ cdev_notdef(), /* 56: reserved */
+ cdev_midi_init(NMIDI,midi), /* 57: MIDI I/O */
+ cdev_midi_init(NSEQUENCER,sequencer), /* 58: sequencer I/O */
+ cdev_notdef(), /* 59: reserved */
+ cdev_wsdisplay_init(NWSDISPLAY,wsdisplay), /* 60: frame buffers, etc.*/
+ cdev_mouse_init(NWSKBD,wskbd), /* 61: keyboards */
+ cdev_mouse_init(NWSMOUSE,wsmouse), /* 62: mice */
+ cdev_mouse_init(NWSMUX,wsmux), /* 63: ws multiplexor */
+ cdev_usb_init(NUSB,usb), /* 64: USB controller */
+ cdev_usbdev_init(NUHID,uhid), /* 65: USB generic HID */
+ cdev_lpt_init(NULPT,ulpt), /* 66: USB printer */
+ cdev_urio_init(NURIO,urio), /* 67: Diamond Rio 500 */
+ cdev_tty_init(NUCOM,ucom), /* 68: USB tty */
+ cdev_usbdev_init(NUSCANNER,uscanner), /* 69: USB scanner */
+ cdev_usbdev_init(NUGEN,ugen), /* 70: USB generic driver */
+ cdev_disk_init(NRAID,raid), /* 71: RAIDframe disk driver */
+ cdev_lkm_dummy(), /* 72: reserved */
+ cdev_lkm_dummy(), /* 73: reserved */
+ cdev_lkm_dummy(), /* 74: reserved */
+ cdev_lkm_dummy(), /* 75: reserved */
+ cdev_lkm_dummy(), /* 76: reserved */
+ cdev_notdef(), /* 77: removed device */
+ cdev_notdef(), /* 78: removed device */
+ cdev_notdef(), /* 79: removed device */
+ cdev_notdef(), /* 80: removed device */
+ cdev_notdef(), /* 81: removed device */
+ cdev_notdef(), /* 82: removed device */
+ cdev_notdef(), /* 83: removed device */
+ cdev_notdef(), /* 84: removed device */
+ cdev_notdef(), /* 85: removed device */
+ cdev_notdef(), /* 86: removed device */
+ cdev_notdef(), /* 87: removed device */
+ cdev_notdef(), /* 88: removed device */
+ cdev_notdef(), /* 89: removed device */
+ cdev_notdef(), /* 90: removed device */
+ cdev_notdef(), /* 91: removed device */
+ cdev_notdef(), /* 92: removed device */
+ cdev_notdef(), /* 93: removed device */
+ cdev_notdef(), /* 94: removed device */
+ cdev_notdef(), /* 95: removed device */
+ cdev_notdef(), /* 96: removed device */
+ cdev_radio_init(NRADIO,radio), /* 97: generic radio I/O */
+};
+
+int nblkdev = sizeof(bdevsw) / sizeof(bdevsw[0]);
+int nchrdev = sizeof(cdevsw) / sizeof(cdevsw[0]);
+
+int mem_no = 2; /* major device number of memory special file */
+
+/*
+ * Swapdev is a fake device implemented
+ * in sw.c used only internally to get to swstrategy.
+ * It cannot be provided to the users, because the
+ * swstrategy routine munches the b_dev and b_blkno entries
+ * before calling the appropriate driver. This would horribly
+ * confuse, e.g. the hashing routines. Instead, /dev/drum is
+ * provided as a character (raw) device.
+ */
+dev_t swapdev = makedev(1, 0);
+
+/*
+ * Returns true if dev is /dev/mem or /dev/kmem.
+ */
+int
+iskmemdev(dev)
+ dev_t dev;
+{
+ return (major(dev) == mem_no && minor(dev) < 2);
+}
+
+/*
+ * Returns true if dev is /dev/zero.
+ */
+int
+iszerodev(dev)
+ dev_t dev;
+{
+ return (major(dev) == mem_no && minor(dev) == 3);
+}
+
+
+int chrtoblktbl[] = {
+/* XXXX This needs to be dynamic for LKMs. */
+ /*VCHR*/ /*VBLK*/
+ /* 0 */ NODEV,
+ /* 1 */ 1,
+ /* 2 */ NODEV,
+ /* 3 */ NODEV,
+ /* 4 */ NODEV,
+ /* 5 */ NODEV,
+ /* 6 */ NODEV,
+ /* 7 */ NODEV,
+ /* 8 */ NODEV,
+ /* 9 */ NODEV,
+ /* 10 */ NODEV,
+ /* 11 */ NODEV,
+ /* 12 */ NODEV,
+ /* 13 */ NODEV,
+ /* 14 */ NODEV,
+ /* 15 */ NODEV,
+ /* 16 */ 16,
+ /* 17 */ 17,
+ /* 18 */ 18,
+ /* 19 */ 19,
+ /* 20 */ NODEV,
+ /* 21 */ 21,
+ /* 22 */ NODEV,
+ /* 23 */ NODEV,
+ /* 24 */ 24,
+ /* 25 */ 25,
+ /* 26 */ 26,
+ /* 27 */ NODEV,
+ /* 28 */ NODEV,
+ /* 29 */ NODEV,
+ /* 30 */ NODEV,
+ /* 31 */ NODEV,
+ /* 32 */ NODEV,
+ /* 33 */ NODEV,
+ /* 34 */ NODEV,
+ /* 35 */ NODEV,
+ /* 36 */ NODEV,
+ /* 37 */ NODEV,
+ /* 38 */ NODEV,
+ /* 39 */ NODEV,
+ /* 40 */ NODEV,
+ /* 41 */ NODEV,
+ /* 42 */ NODEV,
+ /* 43 */ NODEV,
+ /* 44 */ NODEV,
+ /* 45 */ NODEV,
+ /* 46 */ NODEV,
+ /* 47 */ NODEV,
+ /* 48 */ NODEV,
+ /* 49 */ NODEV,
+ /* 50 */ NODEV,
+ /* 51 */ NODEV,
+ /* 52 */ NODEV,
+ /* 53 */ NODEV,
+ /* 54 */ NODEV,
+ /* 55 */ NODEV,
+ /* 56 */ NODEV,
+ /* 57 */ NODEV,
+ /* 58 */ NODEV,
+ /* 59 */ NODEV,
+ /* 60 */ NODEV,
+ /* 61 */ NODEV,
+ /* 62 */ NODEV,
+ /* 63 */ NODEV,
+ /* 64 */ NODEV,
+ /* 65 */ NODEV,
+ /* 66 */ NODEV,
+ /* 67 */ NODEV,
+ /* 68 */ NODEV,
+ /* 69 */ NODEV,
+ /* 70 */ NODEV,
+ /* 71 */ 71,
+ /* 72 */ NODEV,
+ /* 73 */ NODEV,
+ /* 74 */ NODEV,
+ /* 75 */ NODEV,
+ /* 76 */ NODEV,
+ /* 77 */ NODEV,
+ /* 78 */ NODEV,
+ /* 79 */ NODEV,
+ /* 80 */ NODEV,
+ /* 81 */ NODEV,
+ /* 82 */ NODEV,
+ /* 83 */ NODEV,
+ /* 84 */ NODEV,
+ /* 85 */ NODEV,
+ /* 86 */ NODEV,
+ /* 87 */ NODEV,
+ /* 88 */ NODEV,
+ /* 89 */ NODEV,
+ /* 90 */ NODEV,
+ /* 91 */ NODEV,
+ /* 92 */ 92,
+ /* 93 */ NODEV,
+ /* 94 */ NODEV,
+ /* 95 */ NODEV,
+ /* 96 */ NODEV,
+ /* 97 */ NODEV,
+};
+int nchrtoblktbl = sizeof(chrtoblktbl) / sizeof(chrtoblktbl[0]);
+
+
+dev_t
+getnulldev()
+{
+ return makedev(mem_no, 2);
+}
diff --git a/sys/arch/arm/arm/copystr.S b/sys/arch/arm/arm/copystr.S
new file mode 100644
index 00000000000..c7d0e87696e
--- /dev/null
+++ b/sys/arch/arm/arm/copystr.S
@@ -0,0 +1,229 @@
+/* $OpenBSD: copystr.S,v 1.1 2004/02/01 05:09:48 drahn Exp $ */
+/* $NetBSD: copystr.S,v 1.8 2002/10/13 14:54:48 bjh21 Exp $ */
+
+/*
+ * Copyright (c) 1995 Mark Brinicombe.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by Mark Brinicombe.
+ * 4. The name of the company nor the name of the author may be used to
+ * endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * copystr.S
+ *
+ * optimised and fault protected copystr functions
+ *
+ * Created : 16/05/95
+ */
+
+#include "assym.h"
+#include <machine/asm.h>
+#include <sys/errno.h>
+
+ .text
+ .align 0
+#ifdef MULTIPROCESSOR
+.Lcpu_info:
+ .word _C_LABEL(cpu_info)
+#else
+.Lcurpcb:
+ .word _C_LABEL(curpcb)
+#endif
+
+/*
+ * r0 - from
+ * r1 - to
+ * r2 - maxlens
+ * r3 - lencopied
+ *
+ * Copy string from r0 to r1
+ */
+ENTRY(copystr)
+ stmfd sp!, {r4-r5} /* stack is 8 byte aligned */
+ teq r2, #0x00000000
+ mov r5, #0x00000000
+ moveq r0, #ENAMETOOLONG
+ beq 2f
+
+1: ldrb r4, [r0], #0x0001
+ add r5, r5, #0x00000001
+ teq r4, #0x00000000
+ strb r4, [r1], #0x0001
+ teqne r5, r2
+ bne 1b
+
+ teq r4, #0x00000000
+ moveq r0, #0x00000000
+ movne r0, #ENAMETOOLONG
+
+2: teq r3, #0x00000000
+ strne r5, [r3]
+
+ ldmfd sp!, {r4-r5} /* stack is 8 byte aligned */
+ mov pc, lr
+
+#ifdef __PROG32
+#define SAVE_REGS stmfd sp!, {r4-r6}
+#define RESTORE_REGS ldmfd sp!, {r4-r6}
+#else
+/* Need to save R14_svc because it'll get trampled if we take a page fault. */
+#define SAVE_REGS stmfd sp!, {r4-r6, r14}
+#define RESTORE_REGS ldmfd sp!, {r4-r6, r14}
+#endif
+
+/*
+ * r0 - user space address
+ * r1 - kernel space address
+ * r2 - maxlens
+ * r3 - lencopied
+ *
+ * Copy string from user space to kernel space
+ */
+ENTRY(copyinstr)
+ SAVE_REGS
+
+ teq r2, #0x00000000
+ mov r6, #0x00000000
+ moveq r0, #ENAMETOOLONG
+ beq 2f
+
+#ifdef MULTIPROCESSOR
+ /* XXX Probably not appropriate for non-Hydra SMPs */
+ stmfd sp!, {r0-r3, r14}
+ bl _C_LABEL(cpu_number)
+ ldr r4, .Lcpu_info
+ ldr r4, [r4, r0, lsl #2]
+ ldr r4, [r4, #CI_CURPCB]
+ ldmfd sp!, {r0-r3, r14}
+#else
+ ldr r4, .Lcurpcb
+ ldr r4, [r4]
+#endif
+
+#ifdef DIAGNOSTIC
+ teq r4, #0x00000000
+ beq .Lcopystrpcbfault
+#endif
+
+ adr r5, .Lcopystrfault
+ str r5, [r4, #PCB_ONFAULT]
+
+1: ldrbt r5, [r0], #0x0001
+ add r6, r6, #0x00000001
+ teq r5, #0x00000000
+ strb r5, [r1], #0x0001
+ teqne r6, r2
+ bne 1b
+
+ mov r0, #0x00000000
+ str r0, [r4, #PCB_ONFAULT]
+
+ teq r5, #0x00000000
+ moveq r0, #0x00000000
+ movne r0, #ENAMETOOLONG
+
+2: teq r3, #0x00000000
+ strne r6, [r3]
+
+ RESTORE_REGS
+ mov pc, lr
+
+/*
+ * r0 - kernel space address
+ * r1 - user space address
+ * r2 - maxlens
+ * r3 - lencopied
+ *
+ * Copy string from kernel space to user space
+ */
+ENTRY(copyoutstr)
+ SAVE_REGS
+
+ teq r2, #0x00000000
+ mov r6, #0x00000000
+ moveq r0, #ENAMETOOLONG
+ beq 2f
+
+#ifdef MULTIPROCESSOR
+ /* XXX Probably not appropriate for non-Hydra SMPs */
+ stmfd sp!, {r0-r3, r14}
+ bl _C_LABEL(cpu_number)
+ ldr r4, .Lcpu_info
+ ldr r4, [r4, r0, lsl #2]
+ ldr r4, [r4, #CI_CURPCB]
+ ldmfd sp!, {r0-r3, r14}
+#else
+ ldr r4, .Lcurpcb
+ ldr r4, [r4]
+#endif
+
+#ifdef DIAGNOSTIC
+ teq r4, #0x00000000
+ beq .Lcopystrpcbfault
+#endif
+
+ adr r5, .Lcopystrfault
+ str r5, [r4, #PCB_ONFAULT]
+
+1: ldrb r5, [r0], #0x0001
+ add r6, r6, #0x00000001
+ teq r5, #0x00000000
+ strbt r5, [r1], #0x0001
+ teqne r6, r2
+ bne 1b
+
+ mov r0, #0x00000000
+ str r0, [r4, #PCB_ONFAULT]
+
+ teq r5, #0x00000000
+ moveq r0, #0x00000000
+ movne r0, #ENAMETOOLONG
+
+2: teq r3, #0x00000000
+ strne r6, [r3]
+
+ RESTORE_REGS
+ mov pc, lr
+
+/* A fault occurred during the copy */
+.Lcopystrfault:
+ mov r1, #0x00000000
+ str r1, [r4, #PCB_ONFAULT]
+ RESTORE_REGS
+ mov pc, lr
+
+#ifdef DIAGNOSTIC
+.Lcopystrpcbfault:
+ mov r2, r1
+ mov r1, r0
+ adr r0, Lcopystrpcbfaulttext
+ bic sp, sp, #7 /* align stack to 8 bytes */
+ b _C_LABEL(panic)
+
+Lcopystrpcbfaulttext:
+ .asciz "No valid PCB during copyinoutstr() addr1=%08x addr2=%08x\n"
+ .align 0
+#endif
diff --git a/sys/arch/arm/arm/cpu.c b/sys/arch/arm/arm/cpu.c
new file mode 100644
index 00000000000..8bfe63a3256
--- /dev/null
+++ b/sys/arch/arm/arm/cpu.c
@@ -0,0 +1,587 @@
+/* $OpenBSD: cpu.c,v 1.1 2004/02/01 05:09:48 drahn Exp $ */
+/*^I$NetBSD: cpu.c,v 1.54 2003/10/26 23:11:15 chris Exp $^I*/$
+
+/*
+ * Copyright (c) 1995 Mark Brinicombe.
+ * Copyright (c) 1995 Brini.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by Brini.
+ * 4. The name of the company nor the name of the author may be used to
+ * endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY BRINI ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL BRINI OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * RiscBSD kernel project
+ *
+ * cpu.c
+ *
+ * Probing and configuration for the master cpu
+ *
+ * Created : 10/10/95
+ */
+
+#include <sys/param.h>
+
+#include <sys/systm.h>
+#include <sys/malloc.h>
+#include <sys/device.h>
+#include <sys/proc.h>
+#include <sys/conf.h>
+#include <uvm/uvm_extern.h>
+#include <machine/cpu.h>
+
+#include <arm/cpuconf.h>
+#include <arm/undefined.h>
+
+#ifdef ARMFPE
+#include <machine/bootconfig.h> /* For boot args */
+#include <arm/fpe-arm/armfpe.h>
+#endif
+
+char cpu_model[256];
+
+/* Prototypes */
+void identify_arm_cpu(struct device *dv, struct cpu_info *);
+
+/*
+ * Identify the master (boot) CPU
+ */
+
+void
+cpu_attach(struct device *dv)
+{
+ int usearmfpe;
+
+ usearmfpe = 1; /* when compiled in, its enabled by default */
+
+ curcpu()->ci_dev = dv;
+
+ /* Get the cpu ID from coprocessor 15 */
+
+ curcpu()->ci_arm_cpuid = cpu_id();
+ curcpu()->ci_arm_cputype = curcpu()->ci_arm_cpuid & CPU_ID_CPU_MASK;
+ curcpu()->ci_arm_cpurev =
+ curcpu()->ci_arm_cpuid & CPU_ID_REVISION_MASK;
+
+ identify_arm_cpu(dv, curcpu());
+
+ if (curcpu()->ci_arm_cputype == CPU_ID_SA110 &&
+ curcpu()->ci_arm_cpurev < 3) {
+ printf("%s: SA-110 with bugged STM^ instruction\n",
+ dv->dv_xname);
+ }
+
+#ifdef CPU_ARM8
+ if ((curcpu()->ci_arm_cpuid & CPU_ID_CPU_MASK) == CPU_ID_ARM810) {
+ int clock = arm8_clock_config(0, 0);
+ char *fclk;
+ aprint_normal("%s: ARM810 cp15=%02x", dv->dv_xname, clock);
+ aprint_normal(" clock:%s", (clock & 1) ? " dynamic" : "");
+ aprint_normal("%s", (clock & 2) ? " sync" : "");
+ switch ((clock >> 2) & 3) {
+ case 0:
+ fclk = "bus clock";
+ break;
+ case 1:
+ fclk = "ref clock";
+ break;
+ case 3:
+ fclk = "pll";
+ break;
+ default:
+ fclk = "illegal";
+ break;
+ }
+ aprint_normal(" fclk source=%s\n", fclk);
+ }
+#endif
+
+#ifdef ARMFPE
+ /*
+ * Ok now we test for an FPA
+ * At this point no floating point emulator has been installed.
+ * This means any FP instruction will cause undefined exception.
+ * We install a temporay coproc 1 handler which will modify
+ * undefined_test if it is called.
+ * We then try to read the FP status register. If undefined_test
+ * has been decremented then the instruction was not handled by
+ * an FPA so we know the FPA is missing. If undefined_test is
+ * still 1 then we know the instruction was handled by an FPA.
+ * We then remove our test handler and look at the
+ * FP status register for identification.
+ */
+
+ /*
+ * Ok if ARMFPE is defined and the boot options request the
+ * ARM FPE then it will be installed as the FPE.
+ * This is just while I work on integrating the new FPE.
+ * It means the new FPE gets installed if compiled int (ARMFPE
+ * defined) and also gives me a on/off option when I boot in
+ * case the new FPE is causing panics.
+ */
+
+
+ if (boot_args)
+ get_bootconf_option(boot_args, "armfpe",
+ BOOTOPT_TYPE_BOOLEAN, &usearmfpe);
+ if (usearmfpe)
+ initialise_arm_fpe();
+#endif
+}
+
+enum cpu_class {
+ CPU_CLASS_NONE,
+ CPU_CLASS_ARM2,
+ CPU_CLASS_ARM2AS,
+ CPU_CLASS_ARM3,
+ CPU_CLASS_ARM6,
+ CPU_CLASS_ARM7,
+ CPU_CLASS_ARM7TDMI,
+ CPU_CLASS_ARM8,
+ CPU_CLASS_ARM9TDMI,
+ CPU_CLASS_ARM9ES,
+ CPU_CLASS_ARM10E,
+ CPU_CLASS_SA1,
+ CPU_CLASS_XSCALE
+};
+
+static const char * const generic_steppings[16] = {
+ "rev 0", "rev 1", "rev 2", "rev 3",
+ "rev 4", "rev 5", "rev 6", "rev 7",
+ "rev 8", "rev 9", "rev 10", "rev 11",
+ "rev 12", "rev 13", "rev 14", "rev 15",
+};
+
+static const char * const sa110_steppings[16] = {
+ "rev 0", "step J", "step K", "step S",
+ "step T", "rev 5", "rev 6", "rev 7",
+ "rev 8", "rev 9", "rev 10", "rev 11",
+ "rev 12", "rev 13", "rev 14", "rev 15",
+};
+
+static const char * const sa1100_steppings[16] = {
+ "rev 0", "step B", "step C", "rev 3",
+ "rev 4", "rev 5", "rev 6", "rev 7",
+ "step D", "step E", "rev 10" "step G",
+ "rev 12", "rev 13", "rev 14", "rev 15",
+};
+
+static const char * const sa1110_steppings[16] = {
+ "step A-0", "rev 1", "rev 2", "rev 3",
+ "step B-0", "step B-1", "step B-2", "step B-3",
+ "step B-4", "step B-5", "rev 10", "rev 11",
+ "rev 12", "rev 13", "rev 14", "rev 15",
+};
+
+static const char * const ixp12x0_steppings[16] = {
+ "(IXP1200 step A)", "(IXP1200 step B)",
+ "rev 2", "(IXP1200 step C)",
+ "(IXP1200 step D)", "(IXP1240/1250 step A)",
+ "(IXP1240 step B)", "(IXP1250 step B)",
+ "rev 8", "rev 9", "rev 10", "rev 11",
+ "rev 12", "rev 13", "rev 14", "rev 15",
+};
+
+static const char * const xscale_steppings[16] = {
+ "step A-0", "step A-1", "step B-0", "step C-0",
+ "step D-0", "rev 5", "rev 6", "rev 7",
+ "rev 8", "rev 9", "rev 10", "rev 11",
+ "rev 12", "rev 13", "rev 14", "rev 15",
+};
+
+static const char * const i80321_steppings[16] = {
+ "step A-0", "step B-0", "rev 2", "rev 3",
+ "rev 4", "rev 5", "rev 6", "rev 7",
+ "rev 8", "rev 9", "rev 10", "rev 11",
+ "rev 12", "rev 13", "rev 14", "rev 15",
+};
+
+static const char * const pxa2x0_steppings[16] = {
+ "step A-0", "step A-1", "step B-0", "step B-1",
+ "step B-2", "step C-0", "rev 6", "rev 7",
+ "rev 8", "rev 9", "rev 10", "rev 11",
+ "rev 12", "rev 13", "rev 14", "rev 15",
+};
+
+static const char * const ixp425_steppings[16] = {
+ "step 0", "rev 1", "rev 2", "rev 3",
+ "rev 4", "rev 5", "rev 6", "rev 7",
+ "rev 8", "rev 9", "rev 10", "rev 11",
+ "rev 12", "rev 13", "rev 14", "rev 15",
+};
+
+struct cpuidtab {
+ u_int32_t cpuid;
+ enum cpu_class cpu_class;
+ const char *cpu_name;
+ const char * const *cpu_steppings;
+};
+
+const struct cpuidtab cpuids[] = {
+ { CPU_ID_ARM2, CPU_CLASS_ARM2, "ARM2",
+ generic_steppings },
+ { CPU_ID_ARM250, CPU_CLASS_ARM2AS, "ARM250",
+ generic_steppings },
+
+ { CPU_ID_ARM3, CPU_CLASS_ARM3, "ARM3",
+ generic_steppings },
+
+ { CPU_ID_ARM600, CPU_CLASS_ARM6, "ARM600",
+ generic_steppings },
+ { CPU_ID_ARM610, CPU_CLASS_ARM6, "ARM610",
+ generic_steppings },
+ { CPU_ID_ARM620, CPU_CLASS_ARM6, "ARM620",
+ generic_steppings },
+
+ { CPU_ID_ARM700, CPU_CLASS_ARM7, "ARM700",
+ generic_steppings },
+ { CPU_ID_ARM710, CPU_CLASS_ARM7, "ARM710",
+ generic_steppings },
+ { CPU_ID_ARM7500, CPU_CLASS_ARM7, "ARM7500",
+ generic_steppings },
+ { CPU_ID_ARM710A, CPU_CLASS_ARM7, "ARM710a",
+ generic_steppings },
+ { CPU_ID_ARM7500FE, CPU_CLASS_ARM7, "ARM7500FE",
+ generic_steppings },
+ { CPU_ID_ARM710T, CPU_CLASS_ARM7TDMI, "ARM710T",
+ generic_steppings },
+ { CPU_ID_ARM720T, CPU_CLASS_ARM7TDMI, "ARM720T",
+ generic_steppings },
+ { CPU_ID_ARM740T8K, CPU_CLASS_ARM7TDMI, "ARM740T (8 KB cache)",
+ generic_steppings },
+ { CPU_ID_ARM740T4K, CPU_CLASS_ARM7TDMI, "ARM740T (4 KB cache)",
+ generic_steppings },
+
+ { CPU_ID_ARM810, CPU_CLASS_ARM8, "ARM810",
+ generic_steppings },
+
+ { CPU_ID_ARM920T, CPU_CLASS_ARM9TDMI, "ARM920T",
+ generic_steppings },
+ { CPU_ID_ARM922T, CPU_CLASS_ARM9TDMI, "ARM922T",
+ generic_steppings },
+ { CPU_ID_ARM940T, CPU_CLASS_ARM9TDMI, "ARM940T",
+ generic_steppings },
+ { CPU_ID_ARM946ES, CPU_CLASS_ARM9ES, "ARM946E-S",
+ generic_steppings },
+ { CPU_ID_ARM966ES, CPU_CLASS_ARM9ES, "ARM966E-S",
+ generic_steppings },
+ { CPU_ID_ARM966ESR1, CPU_CLASS_ARM9ES, "ARM966E-S",
+ generic_steppings },
+ { CPU_ID_TI925T, CPU_CLASS_ARM9TDMI, "TI ARM925T",
+ generic_steppings },
+
+ { CPU_ID_ARM1020E, CPU_CLASS_ARM10E, "ARM1020E",
+ generic_steppings },
+ { CPU_ID_ARM1022ES, CPU_CLASS_ARM10E, "ARM1022E-S",
+ generic_steppings },
+
+ { CPU_ID_SA110, CPU_CLASS_SA1, "SA-110",
+ sa110_steppings },
+ { CPU_ID_SA1100, CPU_CLASS_SA1, "SA-1100",
+ sa1100_steppings },
+ { CPU_ID_SA1110, CPU_CLASS_SA1, "SA-1110",
+ sa1110_steppings },
+
+ { CPU_ID_IXP1200, CPU_CLASS_SA1, "IXP1200",
+ ixp12x0_steppings },
+
+ { CPU_ID_80200, CPU_CLASS_XSCALE, "i80200",
+ xscale_steppings },
+
+ { CPU_ID_80321_400, CPU_CLASS_XSCALE, "i80321 400MHz",
+ i80321_steppings },
+ { CPU_ID_80321_600, CPU_CLASS_XSCALE, "i80321 600MHz",
+ i80321_steppings },
+ { CPU_ID_80321_400_B0, CPU_CLASS_XSCALE, "i80321 400MHz",
+ i80321_steppings },
+ { CPU_ID_80321_600_B0, CPU_CLASS_XSCALE, "i80321 600MHz",
+ i80321_steppings },
+
+ { CPU_ID_PXA250A, CPU_CLASS_XSCALE, "PXA250",
+ pxa2x0_steppings },
+ { CPU_ID_PXA210A, CPU_CLASS_XSCALE, "PXA210",
+ pxa2x0_steppings },
+ { CPU_ID_PXA250B, CPU_CLASS_XSCALE, "PXA250",
+ pxa2x0_steppings },
+ { CPU_ID_PXA210B, CPU_CLASS_XSCALE, "PXA210",
+ pxa2x0_steppings },
+ { CPU_ID_PXA250C, CPU_CLASS_XSCALE, "PXA250",
+ pxa2x0_steppings },
+ { CPU_ID_PXA210C, CPU_CLASS_XSCALE, "PXA210",
+ pxa2x0_steppings },
+
+ { CPU_ID_IXP425_533, CPU_CLASS_XSCALE, "IXP425 533MHz",
+ ixp425_steppings },
+ { CPU_ID_IXP425_400, CPU_CLASS_XSCALE, "IXP425 400MHz",
+ ixp425_steppings },
+ { CPU_ID_IXP425_266, CPU_CLASS_XSCALE, "IXP425 266MHz",
+ ixp425_steppings },
+
+ { 0, CPU_CLASS_NONE, NULL, NULL }
+};
+
+struct cpu_classtab {
+ const char *class_name;
+ const char *class_option;
+};
+
+const struct cpu_classtab cpu_classes[] = {
+ { "unknown", NULL }, /* CPU_CLASS_NONE */
+ { "ARM2", "CPU_ARM2" }, /* CPU_CLASS_ARM2 */
+ { "ARM2as", "CPU_ARM250" }, /* CPU_CLASS_ARM2AS */
+ { "ARM3", "CPU_ARM3" }, /* CPU_CLASS_ARM3 */
+ { "ARM6", "CPU_ARM6" }, /* CPU_CLASS_ARM6 */
+ { "ARM7", "CPU_ARM7" }, /* CPU_CLASS_ARM7 */
+ { "ARM7TDMI", "CPU_ARM7TDMI" }, /* CPU_CLASS_ARM7TDMI */
+ { "ARM8", "CPU_ARM8" }, /* CPU_CLASS_ARM8 */
+ { "ARM9TDMI", NULL }, /* CPU_CLASS_ARM9TDMI */
+ { "ARM9E-S", NULL }, /* CPU_CLASS_ARM9ES */
+ { "ARM10E", "CPU_ARM10" }, /* CPU_CLASS_ARM10E */
+ { "SA-1", "CPU_SA110" }, /* CPU_CLASS_SA1 */
+ { "XScale", "CPU_XSCALE_..." }, /* CPU_CLASS_XSCALE */
+};
+
+/*
+ * Report the type of the specified arm processor. This uses the generic and
+ * arm specific information in the cpu structure to identify the processor.
+ * The remaining fields in the cpu structure are filled in appropriately.
+ */
+
+static const char * const wtnames[] = {
+ "write-through",
+ "write-back",
+ "write-back",
+ "**unknown 3**",
+ "**unknown 4**",
+ "write-back-locking", /* XXX XScale-specific? */
+ "write-back-locking-A",
+ "write-back-locking-B",
+ "**unknown 8**",
+ "**unknown 9**",
+ "**unknown 10**",
+ "**unknown 11**",
+ "**unknown 12**",
+ "**unknown 13**",
+ "**unknown 14**",
+ "**unknown 15**",
+};
+
+void
+identify_arm_cpu(struct device *dv, struct cpu_info *ci)
+{
+ u_int cpuid;
+ enum cpu_class cpu_class = CPU_CLASS_NONE;
+ int i;
+
+ cpuid = ci->ci_arm_cpuid;
+
+ if (cpuid == 0) {
+ printf("Processor failed probe - no CPU ID\n");
+ return;
+ }
+
+ for (i = 0; cpuids[i].cpuid != 0; i++)
+ if (cpuids[i].cpuid == (cpuid & CPU_ID_CPU_MASK)) {
+ cpu_class = cpuids[i].cpu_class;
+ snprintf(cpu_model, sizeof(cpu_model),
+ "%s %s (%s core)", cpuids[i].cpu_name,
+ cpuids[i].cpu_steppings[cpuid &
+ CPU_ID_REVISION_MASK],
+ cpu_classes[cpu_class].class_name);
+ break;
+ }
+
+ if (cpuids[i].cpuid == 0)
+ snprintf(cpu_model, sizeof(cpu_model),
+ "unknown CPU (ID = 0x%x)", cpuid);
+
+ printf(": %s\n", cpu_model);
+
+ printf("%s:", dv->dv_xname);
+
+ switch (cpu_class) {
+ case CPU_CLASS_ARM6:
+ case CPU_CLASS_ARM7:
+ case CPU_CLASS_ARM7TDMI:
+ case CPU_CLASS_ARM8:
+ if ((ci->ci_ctrl & CPU_CONTROL_IDC_ENABLE) == 0)
+ printf(" IDC disabled");
+ else
+ printf(" IDC enabled");
+ break;
+ case CPU_CLASS_ARM9TDMI:
+ case CPU_CLASS_ARM10E:
+ case CPU_CLASS_SA1:
+ case CPU_CLASS_XSCALE:
+ if ((ci->ci_ctrl & CPU_CONTROL_DC_ENABLE) == 0)
+ printf(" DC disabled");
+ else
+ printf(" DC enabled");
+ if ((ci->ci_ctrl & CPU_CONTROL_IC_ENABLE) == 0)
+ printf(" IC disabled");
+ else
+ printf(" IC enabled");
+ break;
+ default:
+ break;
+ }
+ if ((ci->ci_ctrl & CPU_CONTROL_WBUF_ENABLE) == 0)
+ printf(" WB disabled");
+ else
+ printf(" WB enabled");
+
+ if (ci->ci_ctrl & CPU_CONTROL_LABT_ENABLE)
+ printf(" LABT");
+ else
+ printf(" EABT");
+
+ if (ci->ci_ctrl & CPU_CONTROL_BPRD_ENABLE)
+ printf(" branch prediction enabled");
+
+ printf("\n");
+
+ /* Print cache info. */
+ if (arm_picache_line_size == 0 && arm_pdcache_line_size == 0)
+ goto skip_pcache;
+
+ if (arm_pcache_unified) {
+ printf("%s: %dKB/%dB %d-way %s unified cache\n",
+ dv->dv_xname, arm_pdcache_size / 1024,
+ arm_pdcache_line_size, arm_pdcache_ways,
+ wtnames[arm_pcache_type]);
+ } else {
+ printf("%s: %dKB/%dB %d-way Instruction cache\n",
+ dv->dv_xname, arm_picache_size / 1024,
+ arm_picache_line_size, arm_picache_ways);
+ printf("%s: %dKB/%dB %d-way %s Data cache\n",
+ dv->dv_xname, arm_pdcache_size / 1024,
+ arm_pdcache_line_size, arm_pdcache_ways,
+ wtnames[arm_pcache_type]);
+ }
+
+ skip_pcache:
+
+ switch (cpu_class) {
+#ifdef CPU_ARM2
+ case CPU_CLASS_ARM2:
+#endif
+#ifdef CPU_ARM250
+ case CPU_CLASS_ARM2AS:
+#endif
+#ifdef CPU_ARM3
+ case CPU_CLASS_ARM3:
+#endif
+#ifdef CPU_ARM6
+ case CPU_CLASS_ARM6:
+#endif
+#ifdef CPU_ARM7
+ case CPU_CLASS_ARM7:
+#endif
+#ifdef CPU_ARM7TDMI
+ case CPU_CLASS_ARM7TDMI:
+#endif
+#ifdef CPU_ARM8
+ case CPU_CLASS_ARM8:
+#endif
+#ifdef CPU_ARM9
+ case CPU_CLASS_ARM9TDMI:
+#endif
+#ifdef CPU_ARM10
+ case CPU_CLASS_ARM10E:
+#endif
+#if defined(CPU_SA110) || defined(CPU_SA1100) || \
+ defined(CPU_SA1110) || defined(CPU_IXP12X0)
+ case CPU_CLASS_SA1:
+#endif
+#if defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321) || \
+ defined(CPU_XSCALE_PXA2X0) || defined(CPU_XSCALE_IXP425)
+ case CPU_CLASS_XSCALE:
+#endif
+ break;
+ default:
+ if (cpu_classes[cpu_class].class_option != NULL)
+ printf("%s: %s does not fully support this CPU."
+ "\n", dv->dv_xname, ostype);
+ else {
+ printf("%s: This kernel does not fully support "
+ "this CPU.\n", dv->dv_xname);
+ printf("%s: Recompile with \"options %s\" to "
+ "correct this.\n", dv->dv_xname,
+ cpu_classes[cpu_class].class_option);
+ }
+ break;
+ }
+
+}
+#ifdef MULTIPROCESSOR
+int
+cpu_alloc_idlepcb(struct cpu_info *ci)
+{
+ vaddr_t uaddr;
+ struct pcb *pcb;
+ struct trapframe *tf;
+ int error;
+
+ /*
+ * Generate a kernel stack and PCB (in essence, a u-area) for the
+ * new CPU.
+ */
+ if (uvm_uarea_alloc(&uaddr)) {
+ error = uvm_fault_wire(kernel_map, uaddr, uaddr + USPACE,
+ VM_FAULT_WIRE, VM_PROT_READ | VM_PROT_WRITE);
+ if (error)
+ return error;
+ }
+ ci->ci_idlepcb = pcb = (struct pcb *)uaddr;
+
+ /*
+ * This code is largely derived from cpu_fork(), with which it
+ * should perhaps be shared.
+ */
+
+ /* Copy the pcb */
+ *pcb = proc0.p_addr->u_pcb;
+
+ /* Set up the undefined stack for the process. */
+ pcb->pcb_un.un_32.pcb32_und_sp = uaddr + USPACE_UNDEF_STACK_TOP;
+ pcb->pcb_un.un_32.pcb32_sp = uaddr + USPACE_SVC_STACK_TOP;
+
+#ifdef STACKCHECKS
+ /* Fill the undefined stack with a known pattern */
+ memset(((u_char *)uaddr) + USPACE_UNDEF_STACK_BOTTOM, 0xdd,
+ (USPACE_UNDEF_STACK_TOP - USPACE_UNDEF_STACK_BOTTOM));
+ /* Fill the kernel stack with a known pattern */
+ memset(((u_char *)uaddr) + USPACE_SVC_STACK_BOTTOM, 0xdd,
+ (USPACE_SVC_STACK_TOP - USPACE_SVC_STACK_BOTTOM));
+#endif /* STACKCHECKS */
+
+ pcb->pcb_tf = tf =
+ (struct trapframe *)pcb->pcb_un.un_32.pcb32_sp - 1;
+ *tf = *proc0.p_addr->u_pcb.pcb_tf;
+ return 0;
+}
+#endif /* MULTIPROCESSOR */
+
+/* End of cpu.c */
diff --git a/sys/arch/arm/arm/cpufunc.c b/sys/arch/arm/arm/cpufunc.c
new file mode 100644
index 00000000000..04e41f5dbeb
--- /dev/null
+++ b/sys/arch/arm/arm/cpufunc.c
@@ -0,0 +1,2168 @@
+/* $OpenBSD: cpufunc.c,v 1.1 2004/02/01 05:09:48 drahn Exp $ */
+/* $NetBSD: cpufunc.c,v 1.65 2003/11/05 12:53:15 scw Exp $ */
+
+/*
+ * arm7tdmi support code Copyright (c) 2001 John Fremlin
+ * arm8 support code Copyright (c) 1997 ARM Limited
+ * arm8 support code Copyright (c) 1997 Causality Limited
+ * arm9 support code Copyright (C) 2001 ARM Ltd
+ * Copyright (c) 1997 Mark Brinicombe.
+ * Copyright (c) 1997 Causality Limited
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by Causality Limited.
+ * 4. The name of Causality Limited may not be used to endorse or promote
+ * products derived from this software without specific prior written
+ * permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY CAUSALITY LIMITED ``AS IS'' AND ANY EXPRESS
+ * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL CAUSALITY LIMITED BE LIABLE FOR ANY DIRECT,
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * RiscBSD kernel project
+ *
+ * cpufuncs.c
+ *
+ * C functions for supporting CPU / MMU / TLB specific operations.
+ *
+ * Created : 30/01/97
+ */
+
+#include <sys/types.h>
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <machine/cpu.h>
+#include <machine/bootconfig.h>
+
+#include <uvm/uvm.h>
+
+#include <arm/cpuconf.h>
+#include <arm/cpufunc.h>
+
+#ifdef CPU_XSCALE_80200
+#include <arm/xscale/i80200reg.h>
+#include <arm/xscale/i80200var.h>
+#endif
+
+#ifdef CPU_XSCALE_80321
+#include <arm/xscale/i80321reg.h>
+#include <arm/xscale/i80321var.h>
+#endif
+
+#ifdef CPU_XSCALE_IXP425
+#include <arm/xscale/ixp425reg.h>
+#include <arm/xscale/ixp425var.h>
+#endif
+
+#if defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321)
+#include <arm/xscale/xscalereg.h>
+#endif
+
+#if defined(PERFCTRS)
+struct arm_pmc_funcs *arm_pmc;
+#endif
+
+/* PRIMARY CACHE VARIABLES */
+int arm_picache_size;
+int arm_picache_line_size;
+int arm_picache_ways;
+
+int arm_pdcache_size; /* and unified */
+int arm_pdcache_line_size;
+int arm_pdcache_ways;
+
+int arm_pcache_type;
+int arm_pcache_unified;
+
+int arm_dcache_align;
+int arm_dcache_align_mask;
+
+/* 1 == use cpu_sleep(), 0 == don't */
+int cpu_do_powersave;
+
+#ifdef CPU_ARM3
+struct cpu_functions arm3_cpufuncs = {
+ /* CPU functions */
+
+ cpufunc_id, /* id */
+ cpufunc_nullop, /* cpwait */
+
+ /* MMU functions */
+
+ arm3_control, /* control */
+ NULL, /* domain */
+ NULL, /* setttb */
+ NULL, /* faultstatus */
+ NULL, /* faultaddress */
+
+ /* TLB functions */
+
+ cpufunc_nullop, /* tlb_flushID */
+ (void *)cpufunc_nullop, /* tlb_flushID_SE */
+ cpufunc_nullop, /* tlb_flushI */
+ (void *)cpufunc_nullop, /* tlb_flushI_SE */
+ cpufunc_nullop, /* tlb_flushD */
+ (void *)cpufunc_nullop, /* tlb_flushD_SE */
+
+ /* Cache operations */
+
+ cpufunc_nullop, /* icache_sync_all */
+ (void *) cpufunc_nullop, /* icache_sync_range */
+
+ arm3_cache_flush, /* dcache_wbinv_all */
+ (void *)arm3_cache_flush, /* dcache_wbinv_range */
+ (void *)arm3_cache_flush, /* dcache_inv_range */
+ (void *)cpufunc_nullop, /* dcache_wb_range */
+
+ arm3_cache_flush, /* idcache_wbinv_all */
+ (void *)arm3_cache_flush, /* idcache_wbinv_range */
+
+ /* Other functions */
+
+ cpufunc_nullop, /* flush_prefetchbuf */
+ cpufunc_nullop, /* drain_writebuf */
+ cpufunc_nullop, /* flush_brnchtgt_C */
+ (void *)cpufunc_nullop, /* flush_brnchtgt_E */
+
+ (void *)cpufunc_nullop, /* sleep */
+
+ /* Soft functions */
+
+ early_abort_fixup, /* dataabt_fixup */
+ cpufunc_null_fixup, /* prefetchabt_fixup */
+
+ NULL, /* context_switch */
+
+ (void *)cpufunc_nullop /* cpu setup */
+
+};
+#endif /* CPU_ARM3 */
+
+#ifdef CPU_ARM6
+struct cpu_functions arm6_cpufuncs = {
+ /* CPU functions */
+
+ cpufunc_id, /* id */
+ cpufunc_nullop, /* cpwait */
+
+ /* MMU functions */
+
+ cpufunc_control, /* control */
+ cpufunc_domains, /* domain */
+ arm67_setttb, /* setttb */
+ cpufunc_faultstatus, /* faultstatus */
+ cpufunc_faultaddress, /* faultaddress */
+
+ /* TLB functions */
+
+ arm67_tlb_flush, /* tlb_flushID */
+ arm67_tlb_purge, /* tlb_flushID_SE */
+ arm67_tlb_flush, /* tlb_flushI */
+ arm67_tlb_purge, /* tlb_flushI_SE */
+ arm67_tlb_flush, /* tlb_flushD */
+ arm67_tlb_purge, /* tlb_flushD_SE */
+
+ /* Cache operations */
+
+ cpufunc_nullop, /* icache_sync_all */
+ (void *) cpufunc_nullop, /* icache_sync_range */
+
+ arm67_cache_flush, /* dcache_wbinv_all */
+ (void *)arm67_cache_flush, /* dcache_wbinv_range */
+ (void *)arm67_cache_flush, /* dcache_inv_range */
+ (void *)cpufunc_nullop, /* dcache_wb_range */
+
+ arm67_cache_flush, /* idcache_wbinv_all */
+ (void *)arm67_cache_flush, /* idcache_wbinv_range */
+
+ /* Other functions */
+
+ cpufunc_nullop, /* flush_prefetchbuf */
+ cpufunc_nullop, /* drain_writebuf */
+ cpufunc_nullop, /* flush_brnchtgt_C */
+ (void *)cpufunc_nullop, /* flush_brnchtgt_E */
+
+ (void *)cpufunc_nullop, /* sleep */
+
+ /* Soft functions */
+
+#ifdef ARM6_LATE_ABORT
+ late_abort_fixup, /* dataabt_fixup */
+#else
+ early_abort_fixup, /* dataabt_fixup */
+#endif
+ cpufunc_null_fixup, /* prefetchabt_fixup */
+
+ arm67_context_switch, /* context_switch */
+
+ arm6_setup /* cpu setup */
+
+};
+#endif /* CPU_ARM6 */
+
+#ifdef CPU_ARM7
+struct cpu_functions arm7_cpufuncs = {
+ /* CPU functions */
+
+ cpufunc_id, /* id */
+ cpufunc_nullop, /* cpwait */
+
+ /* MMU functions */
+
+ cpufunc_control, /* control */
+ cpufunc_domains, /* domain */
+ arm67_setttb, /* setttb */
+ cpufunc_faultstatus, /* faultstatus */
+ cpufunc_faultaddress, /* faultaddress */
+
+ /* TLB functions */
+
+ arm67_tlb_flush, /* tlb_flushID */
+ arm67_tlb_purge, /* tlb_flushID_SE */
+ arm67_tlb_flush, /* tlb_flushI */
+ arm67_tlb_purge, /* tlb_flushI_SE */
+ arm67_tlb_flush, /* tlb_flushD */
+ arm67_tlb_purge, /* tlb_flushD_SE */
+
+ /* Cache operations */
+
+ cpufunc_nullop, /* icache_sync_all */
+ (void *)cpufunc_nullop, /* icache_sync_range */
+
+ arm67_cache_flush, /* dcache_wbinv_all */
+ (void *)arm67_cache_flush, /* dcache_wbinv_range */
+ (void *)arm67_cache_flush, /* dcache_inv_range */
+ (void *)cpufunc_nullop, /* dcache_wb_range */
+
+ arm67_cache_flush, /* idcache_wbinv_all */
+ (void *)arm67_cache_flush, /* idcache_wbinv_range */
+
+ /* Other functions */
+
+ cpufunc_nullop, /* flush_prefetchbuf */
+ cpufunc_nullop, /* drain_writebuf */
+ cpufunc_nullop, /* flush_brnchtgt_C */
+ (void *)cpufunc_nullop, /* flush_brnchtgt_E */
+
+ (void *)cpufunc_nullop, /* sleep */
+
+ /* Soft functions */
+
+ late_abort_fixup, /* dataabt_fixup */
+ cpufunc_null_fixup, /* prefetchabt_fixup */
+
+ arm67_context_switch, /* context_switch */
+
+ arm7_setup /* cpu setup */
+
+};
+#endif /* CPU_ARM7 */
+
+#ifdef CPU_ARM7TDMI
+struct cpu_functions arm7tdmi_cpufuncs = {
+ /* CPU functions */
+
+ cpufunc_id, /* id */
+ cpufunc_nullop, /* cpwait */
+
+ /* MMU functions */
+
+ cpufunc_control, /* control */
+ cpufunc_domains, /* domain */
+ arm7tdmi_setttb, /* setttb */
+ cpufunc_faultstatus, /* faultstatus */
+ cpufunc_faultaddress, /* faultaddress */
+
+ /* TLB functions */
+
+ arm7tdmi_tlb_flushID, /* tlb_flushID */
+ arm7tdmi_tlb_flushID_SE, /* tlb_flushID_SE */
+ arm7tdmi_tlb_flushID, /* tlb_flushI */
+ arm7tdmi_tlb_flushID_SE, /* tlb_flushI_SE */
+ arm7tdmi_tlb_flushID, /* tlb_flushD */
+ arm7tdmi_tlb_flushID_SE, /* tlb_flushD_SE */
+
+ /* Cache operations */
+
+ cpufunc_nullop, /* icache_sync_all */
+ (void *)cpufunc_nullop, /* icache_sync_range */
+
+ arm7tdmi_cache_flushID, /* dcache_wbinv_all */
+ (void *)arm7tdmi_cache_flushID, /* dcache_wbinv_range */
+ (void *)arm7tdmi_cache_flushID, /* dcache_inv_range */
+ (void *)cpufunc_nullop, /* dcache_wb_range */
+
+ arm7tdmi_cache_flushID, /* idcache_wbinv_all */
+ (void *)arm7tdmi_cache_flushID, /* idcache_wbinv_range */
+
+ /* Other functions */
+
+ cpufunc_nullop, /* flush_prefetchbuf */
+ cpufunc_nullop, /* drain_writebuf */
+ cpufunc_nullop, /* flush_brnchtgt_C */
+ (void *)cpufunc_nullop, /* flush_brnchtgt_E */
+
+ (void *)cpufunc_nullop, /* sleep */
+
+ /* Soft functions */
+
+ late_abort_fixup, /* dataabt_fixup */
+ cpufunc_null_fixup, /* prefetchabt_fixup */
+
+ arm7tdmi_context_switch, /* context_switch */
+
+ arm7tdmi_setup /* cpu setup */
+
+};
+#endif /* CPU_ARM7TDMI */
+
+#ifdef CPU_ARM8
+struct cpu_functions arm8_cpufuncs = {
+ /* CPU functions */
+
+ cpufunc_id, /* id */
+ cpufunc_nullop, /* cpwait */
+
+ /* MMU functions */
+
+ cpufunc_control, /* control */
+ cpufunc_domains, /* domain */
+ arm8_setttb, /* setttb */
+ cpufunc_faultstatus, /* faultstatus */
+ cpufunc_faultaddress, /* faultaddress */
+
+ /* TLB functions */
+
+ arm8_tlb_flushID, /* tlb_flushID */
+ arm8_tlb_flushID_SE, /* tlb_flushID_SE */
+ arm8_tlb_flushID, /* tlb_flushI */
+ arm8_tlb_flushID_SE, /* tlb_flushI_SE */
+ arm8_tlb_flushID, /* tlb_flushD */
+ arm8_tlb_flushID_SE, /* tlb_flushD_SE */
+
+ /* Cache operations */
+
+ cpufunc_nullop, /* icache_sync_all */
+ (void *)cpufunc_nullop, /* icache_sync_range */
+
+ arm8_cache_purgeID, /* dcache_wbinv_all */
+ (void *)arm8_cache_purgeID, /* dcache_wbinv_range */
+/*XXX*/ (void *)arm8_cache_purgeID, /* dcache_inv_range */
+ (void *)arm8_cache_cleanID, /* dcache_wb_range */
+
+ arm8_cache_purgeID, /* idcache_wbinv_all */
+ (void *)arm8_cache_purgeID, /* idcache_wbinv_range */
+
+ /* Other functions */
+
+ cpufunc_nullop, /* flush_prefetchbuf */
+ cpufunc_nullop, /* drain_writebuf */
+ cpufunc_nullop, /* flush_brnchtgt_C */
+ (void *)cpufunc_nullop, /* flush_brnchtgt_E */
+
+ (void *)cpufunc_nullop, /* sleep */
+
+ /* Soft functions */
+
+ cpufunc_null_fixup, /* dataabt_fixup */
+ cpufunc_null_fixup, /* prefetchabt_fixup */
+
+ arm8_context_switch, /* context_switch */
+
+ arm8_setup /* cpu setup */
+};
+#endif /* CPU_ARM8 */
+
+#ifdef CPU_ARM9
+struct cpu_functions arm9_cpufuncs = {
+ /* CPU functions */
+
+ cpufunc_id, /* id */
+ cpufunc_nullop, /* cpwait */
+
+ /* MMU functions */
+
+ cpufunc_control, /* control */
+ cpufunc_domains, /* Domain */
+ arm9_setttb, /* Setttb */
+ cpufunc_faultstatus, /* Faultstatus */
+ cpufunc_faultaddress, /* Faultaddress */
+
+ /* TLB functions */
+
+ armv4_tlb_flushID, /* tlb_flushID */
+ arm9_tlb_flushID_SE, /* tlb_flushID_SE */
+ armv4_tlb_flushI, /* tlb_flushI */
+ (void *)armv4_tlb_flushI, /* tlb_flushI_SE */
+ armv4_tlb_flushD, /* tlb_flushD */
+ armv4_tlb_flushD_SE, /* tlb_flushD_SE */
+
+ /* Cache operations */
+
+ arm9_cache_syncI, /* icache_sync_all */
+ arm9_cache_syncI_rng, /* icache_sync_range */
+
+ /* ...cache in write-though mode... */
+ arm9_cache_flushD, /* dcache_wbinv_all */
+ arm9_cache_flushD_rng, /* dcache_wbinv_range */
+ arm9_cache_flushD_rng, /* dcache_inv_range */
+ (void *)cpufunc_nullop, /* dcache_wb_range */
+
+ arm9_cache_flushID, /* idcache_wbinv_all */
+ arm9_cache_flushID_rng, /* idcache_wbinv_range */
+
+ /* Other functions */
+
+ cpufunc_nullop, /* flush_prefetchbuf */
+ armv4_drain_writebuf, /* drain_writebuf */
+ cpufunc_nullop, /* flush_brnchtgt_C */
+ (void *)cpufunc_nullop, /* flush_brnchtgt_E */
+
+ (void *)cpufunc_nullop, /* sleep */
+
+ /* Soft functions */
+
+ cpufunc_null_fixup, /* dataabt_fixup */
+ cpufunc_null_fixup, /* prefetchabt_fixup */
+
+ arm9_context_switch, /* context_switch */
+
+ arm9_setup /* cpu setup */
+
+};
+#endif /* CPU_ARM9 */
+
+#ifdef CPU_ARM10
+struct cpu_functions arm10_cpufuncs = {
+ /* CPU functions */
+
+ cpufunc_id, /* id */
+ cpufunc_nullop, /* cpwait */
+
+ /* MMU functions */
+
+ cpufunc_control, /* control */
+ cpufunc_domains, /* Domain */
+ arm10_setttb, /* Setttb */
+ cpufunc_faultstatus, /* Faultstatus */
+ cpufunc_faultaddress, /* Faultaddress */
+
+ /* TLB functions */
+
+ armv4_tlb_flushID, /* tlb_flushID */
+ arm10_tlb_flushID_SE, /* tlb_flushID_SE */
+ armv4_tlb_flushI, /* tlb_flushI */
+ arm10_tlb_flushI_SE, /* tlb_flushI_SE */
+ armv4_tlb_flushD, /* tlb_flushD */
+ armv4_tlb_flushD_SE, /* tlb_flushD_SE */
+
+ /* Cache operations */
+
+ arm10_icache_sync_all, /* icache_sync_all */
+ arm10_icache_sync_range, /* icache_sync_range */
+
+ arm10_dcache_wbinv_all, /* dcache_wbinv_all */
+ arm10_dcache_wbinv_range, /* dcache_wbinv_range */
+ arm10_dcache_inv_range, /* dcache_inv_range */
+ arm10_dcache_wb_range, /* dcache_wb_range */
+
+ arm10_idcache_wbinv_all, /* idcache_wbinv_all */
+ arm10_idcache_wbinv_range, /* idcache_wbinv_range */
+
+ /* Other functions */
+
+ cpufunc_nullop, /* flush_prefetchbuf */
+ armv4_drain_writebuf, /* drain_writebuf */
+ cpufunc_nullop, /* flush_brnchtgt_C */
+ (void *)cpufunc_nullop, /* flush_brnchtgt_E */
+
+ (void *)cpufunc_nullop, /* sleep */
+
+ /* Soft functions */
+
+ cpufunc_null_fixup, /* dataabt_fixup */
+ cpufunc_null_fixup, /* prefetchabt_fixup */
+
+ arm10_context_switch, /* context_switch */
+
+ arm10_setup /* cpu setup */
+
+};
+#endif /* CPU_ARM10 */
+
+#ifdef CPU_SA110
+struct cpu_functions sa110_cpufuncs = {
+ /* CPU functions */
+
+ cpufunc_id, /* id */
+ cpufunc_nullop, /* cpwait */
+
+ /* MMU functions */
+
+ cpufunc_control, /* control */
+ cpufunc_domains, /* domain */
+ sa1_setttb, /* setttb */
+ cpufunc_faultstatus, /* faultstatus */
+ cpufunc_faultaddress, /* faultaddress */
+
+ /* TLB functions */
+
+ armv4_tlb_flushID, /* tlb_flushID */
+ sa1_tlb_flushID_SE, /* tlb_flushID_SE */
+ armv4_tlb_flushI, /* tlb_flushI */
+ (void *)armv4_tlb_flushI, /* tlb_flushI_SE */
+ armv4_tlb_flushD, /* tlb_flushD */
+ armv4_tlb_flushD_SE, /* tlb_flushD_SE */
+
+ /* Cache operations */
+
+ sa1_cache_syncI, /* icache_sync_all */
+ sa1_cache_syncI_rng, /* icache_sync_range */
+
+ sa1_cache_purgeD, /* dcache_wbinv_all */
+ sa1_cache_purgeD_rng, /* dcache_wbinv_range */
+/*XXX*/ sa1_cache_purgeD_rng, /* dcache_inv_range */
+ sa1_cache_cleanD_rng, /* dcache_wb_range */
+
+ sa1_cache_purgeID, /* idcache_wbinv_all */
+ sa1_cache_purgeID_rng, /* idcache_wbinv_range */
+
+ /* Other functions */
+
+ cpufunc_nullop, /* flush_prefetchbuf */
+ armv4_drain_writebuf, /* drain_writebuf */
+ cpufunc_nullop, /* flush_brnchtgt_C */
+ (void *)cpufunc_nullop, /* flush_brnchtgt_E */
+
+ (void *)cpufunc_nullop, /* sleep */
+
+ /* Soft functions */
+
+ cpufunc_null_fixup, /* dataabt_fixup */
+ cpufunc_null_fixup, /* prefetchabt_fixup */
+
+ sa110_context_switch, /* context_switch */
+
+ sa110_setup /* cpu setup */
+};
+#endif /* CPU_SA110 */
+
+#if defined(CPU_SA1100) || defined(CPU_SA1110)
+struct cpu_functions sa11x0_cpufuncs = {
+ /* CPU functions */
+
+ cpufunc_id, /* id */
+ cpufunc_nullop, /* cpwait */
+
+ /* MMU functions */
+
+ cpufunc_control, /* control */
+ cpufunc_domains, /* domain */
+ sa1_setttb, /* setttb */
+ cpufunc_faultstatus, /* faultstatus */
+ cpufunc_faultaddress, /* faultaddress */
+
+ /* TLB functions */
+
+ armv4_tlb_flushID, /* tlb_flushID */
+ sa1_tlb_flushID_SE, /* tlb_flushID_SE */
+ armv4_tlb_flushI, /* tlb_flushI */
+ (void *)armv4_tlb_flushI, /* tlb_flushI_SE */
+ armv4_tlb_flushD, /* tlb_flushD */
+ armv4_tlb_flushD_SE, /* tlb_flushD_SE */
+
+ /* Cache operations */
+
+ sa1_cache_syncI, /* icache_sync_all */
+ sa1_cache_syncI_rng, /* icache_sync_range */
+
+ sa1_cache_purgeD, /* dcache_wbinv_all */
+ sa1_cache_purgeD_rng, /* dcache_wbinv_range */
+/*XXX*/ sa1_cache_purgeD_rng, /* dcache_inv_range */
+ sa1_cache_cleanD_rng, /* dcache_wb_range */
+
+ sa1_cache_purgeID, /* idcache_wbinv_all */
+ sa1_cache_purgeID_rng, /* idcache_wbinv_range */
+
+ /* Other functions */
+
+ sa11x0_drain_readbuf, /* flush_prefetchbuf */
+ armv4_drain_writebuf, /* drain_writebuf */
+ cpufunc_nullop, /* flush_brnchtgt_C */
+ (void *)cpufunc_nullop, /* flush_brnchtgt_E */
+
+ sa11x0_cpu_sleep, /* sleep */
+
+ /* Soft functions */
+
+ cpufunc_null_fixup, /* dataabt_fixup */
+ cpufunc_null_fixup, /* prefetchabt_fixup */
+
+ sa11x0_context_switch, /* context_switch */
+
+ sa11x0_setup /* cpu setup */
+};
+#endif /* CPU_SA1100 || CPU_SA1110 */
+
+#ifdef CPU_IXP12X0
+struct cpu_functions ixp12x0_cpufuncs = {
+ /* CPU functions */
+
+ cpufunc_id, /* id */
+ cpufunc_nullop, /* cpwait */
+
+ /* MMU functions */
+
+ cpufunc_control, /* control */
+ cpufunc_domains, /* domain */
+ sa1_setttb, /* setttb */
+ cpufunc_faultstatus, /* faultstatus */
+ cpufunc_faultaddress, /* faultaddress */
+
+ /* TLB functions */
+
+ armv4_tlb_flushID, /* tlb_flushID */
+ sa1_tlb_flushID_SE, /* tlb_flushID_SE */
+ armv4_tlb_flushI, /* tlb_flushI */
+ (void *)armv4_tlb_flushI, /* tlb_flushI_SE */
+ armv4_tlb_flushD, /* tlb_flushD */
+ armv4_tlb_flushD_SE, /* tlb_flushD_SE */
+
+ /* Cache operations */
+
+ sa1_cache_syncI, /* icache_sync_all */
+ sa1_cache_syncI_rng, /* icache_sync_range */
+
+ sa1_cache_purgeD, /* dcache_wbinv_all */
+ sa1_cache_purgeD_rng, /* dcache_wbinv_range */
+/*XXX*/ sa1_cache_purgeD_rng, /* dcache_inv_range */
+ sa1_cache_cleanD_rng, /* dcache_wb_range */
+
+ sa1_cache_purgeID, /* idcache_wbinv_all */
+ sa1_cache_purgeID_rng, /* idcache_wbinv_range */
+
+ /* Other functions */
+
+ ixp12x0_drain_readbuf, /* flush_prefetchbuf */
+ armv4_drain_writebuf, /* drain_writebuf */
+ cpufunc_nullop, /* flush_brnchtgt_C */
+ (void *)cpufunc_nullop, /* flush_brnchtgt_E */
+
+ (void *)cpufunc_nullop, /* sleep */
+
+ /* Soft functions */
+
+ cpufunc_null_fixup, /* dataabt_fixup */
+ cpufunc_null_fixup, /* prefetchabt_fixup */
+
+ ixp12x0_context_switch, /* context_switch */
+
+ ixp12x0_setup /* cpu setup */
+};
+#endif /* CPU_IXP12X0 */
+
+#if defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321) || \
+ defined(CPU_XSCALE_PXA2X0) || defined(CPU_XSCALE_IXP425)
+struct cpu_functions xscale_cpufuncs = {
+ /* CPU functions */
+
+ cpufunc_id, /* id */
+ xscale_cpwait, /* cpwait */
+
+ /* MMU functions */
+
+ xscale_control, /* control */
+ cpufunc_domains, /* domain */
+ xscale_setttb, /* setttb */
+ cpufunc_faultstatus, /* faultstatus */
+ cpufunc_faultaddress, /* faultaddress */
+
+ /* TLB functions */
+
+ armv4_tlb_flushID, /* tlb_flushID */
+ xscale_tlb_flushID_SE, /* tlb_flushID_SE */
+ armv4_tlb_flushI, /* tlb_flushI */
+ (void *)armv4_tlb_flushI, /* tlb_flushI_SE */
+ armv4_tlb_flushD, /* tlb_flushD */
+ armv4_tlb_flushD_SE, /* tlb_flushD_SE */
+
+ /* Cache operations */
+
+ xscale_cache_syncI, /* icache_sync_all */
+ xscale_cache_syncI_rng, /* icache_sync_range */
+
+ xscale_cache_purgeD, /* dcache_wbinv_all */
+ xscale_cache_purgeD_rng, /* dcache_wbinv_range */
+ xscale_cache_flushD_rng, /* dcache_inv_range */
+ xscale_cache_cleanD_rng, /* dcache_wb_range */
+
+ xscale_cache_purgeID, /* idcache_wbinv_all */
+ xscale_cache_purgeID_rng, /* idcache_wbinv_range */
+
+ /* Other functions */
+
+ cpufunc_nullop, /* flush_prefetchbuf */
+ armv4_drain_writebuf, /* drain_writebuf */
+ cpufunc_nullop, /* flush_brnchtgt_C */
+ (void *)cpufunc_nullop, /* flush_brnchtgt_E */
+
+ xscale_cpu_sleep, /* sleep */
+
+ /* Soft functions */
+
+ cpufunc_null_fixup, /* dataabt_fixup */
+ cpufunc_null_fixup, /* prefetchabt_fixup */
+
+ xscale_context_switch, /* context_switch */
+
+ xscale_setup /* cpu setup */
+};
+#endif
+/* CPU_XSCALE_80200 || CPU_XSCALE_80321 || CPU_XSCALE_PXA2X0 || CPU_XSCALE_IXP425 */
+
+/*
+ * Global constants also used by locore.s
+ */
+
+struct cpu_functions cpufuncs;
+u_int cputype;
+u_int cpu_reset_needs_v4_MMU_disable; /* flag used in locore.s */
+
+#if defined(CPU_ARM7TDMI) || defined(CPU_ARM8) || defined(CPU_ARM9) || \
+ defined (CPU_ARM10) || \
+ defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321) || \
+ defined(CPU_XSCALE_PXA2X0) || defined(CPU_XSCALE_IXP425)
+static void get_cachetype_cp15 __P((void));
+
+/* Additional cache information local to this file. Log2 of some of the
+ above numbers. */
+static int arm_dcache_l2_nsets;
+static int arm_dcache_l2_assoc;
+static int arm_dcache_l2_linesize;
+
+static void
+get_cachetype_cp15()
+{
+ u_int ctype, isize, dsize;
+ u_int multiplier;
+
+ __asm __volatile("mrc p15, 0, %0, c0, c0, 1"
+ : "=r" (ctype));
+
+ /*
+ * ...and thus spake the ARM ARM:
+ *
+ * If an <opcode2> value corresponding to an unimplemented or
+ * reserved ID register is encountered, the System Control
+ * processor returns the value of the main ID register.
+ */
+ if (ctype == cpufunc_id())
+ goto out;
+
+ if ((ctype & CPU_CT_S) == 0)
+ arm_pcache_unified = 1;
+
+ /*
+ * If you want to know how this code works, go read the ARM ARM.
+ */
+
+ arm_pcache_type = CPU_CT_CTYPE(ctype);
+
+ if (arm_pcache_unified == 0) {
+ isize = CPU_CT_ISIZE(ctype);
+ multiplier = (isize & CPU_CT_xSIZE_M) ? 3 : 2;
+ arm_picache_line_size = 1U << (CPU_CT_xSIZE_LEN(isize) + 3);
+ if (CPU_CT_xSIZE_ASSOC(isize) == 0) {
+ if (isize & CPU_CT_xSIZE_M)
+ arm_picache_line_size = 0; /* not present */
+ else
+ arm_picache_ways = 1;
+ } else {
+ arm_picache_ways = multiplier <<
+ (CPU_CT_xSIZE_ASSOC(isize) - 1);
+ }
+ arm_picache_size = multiplier << (CPU_CT_xSIZE_SIZE(isize) + 8);
+ }
+
+ dsize = CPU_CT_DSIZE(ctype);
+ multiplier = (dsize & CPU_CT_xSIZE_M) ? 3 : 2;
+ arm_pdcache_line_size = 1U << (CPU_CT_xSIZE_LEN(dsize) + 3);
+ if (CPU_CT_xSIZE_ASSOC(dsize) == 0) {
+ if (dsize & CPU_CT_xSIZE_M)
+ arm_pdcache_line_size = 0; /* not present */
+ else
+ arm_pdcache_ways = 1;
+ } else {
+ arm_pdcache_ways = multiplier <<
+ (CPU_CT_xSIZE_ASSOC(dsize) - 1);
+ }
+ arm_pdcache_size = multiplier << (CPU_CT_xSIZE_SIZE(dsize) + 8);
+
+ arm_dcache_align = arm_pdcache_line_size;
+
+ arm_dcache_l2_assoc = CPU_CT_xSIZE_ASSOC(dsize) + multiplier - 2;
+ arm_dcache_l2_linesize = CPU_CT_xSIZE_LEN(dsize) + 3;
+ arm_dcache_l2_nsets = 6 + CPU_CT_xSIZE_SIZE(dsize) -
+ CPU_CT_xSIZE_ASSOC(dsize) - CPU_CT_xSIZE_LEN(dsize);
+
+ out:
+ arm_dcache_align_mask = arm_dcache_align - 1;
+}
+#endif /* ARM7TDMI || ARM8 || ARM9 || XSCALE */
+
+#if defined(CPU_ARM2) || defined(CPU_ARM250) || defined(CPU_ARM3) || \
+ defined(CPU_ARM6) || defined(CPU_ARM7) || defined(CPU_SA110) || \
+ defined(CPU_SA1100) || defined(CPU_SA1110) || defined(CPU_IXP12X0)
+/* Cache information for CPUs without cache type registers. */
+struct cachetab {
+ u_int32_t ct_cpuid;
+ int ct_pcache_type;
+ int ct_pcache_unified;
+ int ct_pdcache_size;
+ int ct_pdcache_line_size;
+ int ct_pdcache_ways;
+ int ct_picache_size;
+ int ct_picache_line_size;
+ int ct_picache_ways;
+};
+
+struct cachetab cachetab[] = {
+ /* cpuid, cache type, u, dsiz, ls, wy, isiz, ls, wy */
+ { CPU_ID_ARM2, 0, 1, 0, 0, 0, 0, 0, 0 },
+ { CPU_ID_ARM250, 0, 1, 0, 0, 0, 0, 0, 0 },
+ { CPU_ID_ARM3, CPU_CT_CTYPE_WT, 1, 4096, 16, 64, 0, 0, 0 },
+ { CPU_ID_ARM610, CPU_CT_CTYPE_WT, 1, 4096, 16, 64, 0, 0, 0 },
+ { CPU_ID_ARM710, CPU_CT_CTYPE_WT, 1, 8192, 32, 4, 0, 0, 0 },
+ { CPU_ID_ARM7500, CPU_CT_CTYPE_WT, 1, 4096, 16, 4, 0, 0, 0 },
+ { CPU_ID_ARM710A, CPU_CT_CTYPE_WT, 1, 8192, 16, 4, 0, 0, 0 },
+ { CPU_ID_ARM7500FE, CPU_CT_CTYPE_WT, 1, 4096, 16, 4, 0, 0, 0 },
+ /* XXX is this type right for SA-1? */
+ { CPU_ID_SA110, CPU_CT_CTYPE_WB1, 0, 16384, 32, 32, 16384, 32, 32 },
+ { CPU_ID_SA1100, CPU_CT_CTYPE_WB1, 0, 8192, 32, 32, 16384, 32, 32 },
+ { CPU_ID_SA1110, CPU_CT_CTYPE_WB1, 0, 8192, 32, 32, 16384, 32, 32 },
+ { CPU_ID_IXP1200, CPU_CT_CTYPE_WB1, 0, 16384, 32, 32, 16384, 32, 32 }, /* XXX */
+ { 0, 0, 0, 0, 0, 0, 0, 0}
+};
+
+static void get_cachetype_table __P((void));
+
+static void
+get_cachetype_table()
+{
+ int i;
+ u_int32_t cpuid = cpufunc_id();
+
+ for (i = 0; cachetab[i].ct_cpuid != 0; i++) {
+ if (cachetab[i].ct_cpuid == (cpuid & CPU_ID_CPU_MASK)) {
+ arm_pcache_type = cachetab[i].ct_pcache_type;
+ arm_pcache_unified = cachetab[i].ct_pcache_unified;
+ arm_pdcache_size = cachetab[i].ct_pdcache_size;
+ arm_pdcache_line_size =
+ cachetab[i].ct_pdcache_line_size;
+ arm_pdcache_ways = cachetab[i].ct_pdcache_ways;
+ arm_picache_size = cachetab[i].ct_picache_size;
+ arm_picache_line_size =
+ cachetab[i].ct_picache_line_size;
+ arm_picache_ways = cachetab[i].ct_picache_ways;
+ }
+ }
+ arm_dcache_align = arm_pdcache_line_size;
+
+ arm_dcache_align_mask = arm_dcache_align - 1;
+}
+
+#endif /* ARM2 || ARM250 || ARM3 || ARM6 || ARM7 || SA110 || SA1100 || SA1111 || IXP12X0 */
+
+/*
+ * Cannot panic here as we may not have a console yet ...
+ */
+
+int
+set_cpufuncs()
+{
+ cputype = cpufunc_id();
+ cputype &= CPU_ID_CPU_MASK;
+
+ /*
+ * NOTE: cpu_do_powersave defaults to off. If we encounter a
+ * CPU type where we want to use it by default, then we set it.
+ */
+
+#ifdef CPU_ARM3
+ if ((cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_ARM_LTD &&
+ (cputype & 0x00000f00) == 0x00000300) {
+ cpufuncs = arm3_cpufuncs;
+ cpu_reset_needs_v4_MMU_disable = 0;
+ get_cachetype_table();
+ return 0;
+ }
+#endif /* CPU_ARM3 */
+#ifdef CPU_ARM6
+ if ((cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_ARM_LTD &&
+ (cputype & 0x00000f00) == 0x00000600) {
+ cpufuncs = arm6_cpufuncs;
+ cpu_reset_needs_v4_MMU_disable = 0;
+ get_cachetype_table();
+ pmap_pte_init_generic();
+ return 0;
+ }
+#endif /* CPU_ARM6 */
+#ifdef CPU_ARM7
+ if ((cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_ARM_LTD &&
+ CPU_ID_IS7(cputype) &&
+ (cputype & CPU_ID_7ARCH_MASK) == CPU_ID_7ARCH_V3) {
+ cpufuncs = arm7_cpufuncs;
+ cpu_reset_needs_v4_MMU_disable = 0;
+ get_cachetype_table();
+ pmap_pte_init_generic();
+ return 0;
+ }
+#endif /* CPU_ARM7 */
+#ifdef CPU_ARM7TDMI
+ if ((cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_ARM_LTD &&
+ CPU_ID_IS7(cputype) &&
+ (cputype & CPU_ID_7ARCH_MASK) == CPU_ID_7ARCH_V4T) {
+ cpufuncs = arm7tdmi_cpufuncs;
+ cpu_reset_needs_v4_MMU_disable = 0;
+ get_cachetype_cp15();
+ pmap_pte_init_generic();
+ return 0;
+ }
+#endif
+#ifdef CPU_ARM8
+ if ((cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_ARM_LTD &&
+ (cputype & 0x0000f000) == 0x00008000) {
+ cpufuncs = arm8_cpufuncs;
+ cpu_reset_needs_v4_MMU_disable = 0; /* XXX correct? */
+ get_cachetype_cp15();
+ pmap_pte_init_arm8();
+ return 0;
+ }
+#endif /* CPU_ARM8 */
+#ifdef CPU_ARM9
+ if (((cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_ARM_LTD ||
+ (cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_TI) &&
+ (cputype & 0x0000f000) == 0x00009000) {
+ cpufuncs = arm9_cpufuncs;
+ cpu_reset_needs_v4_MMU_disable = 1; /* V4 or higher */
+ get_cachetype_cp15();
+ pmap_pte_init_arm9();
+ return 0;
+ }
+#endif /* CPU_ARM9 */
+#ifdef CPU_ARM10
+ if (/* cputype == CPU_ID_ARM1020T || */
+ cputype == CPU_ID_ARM1020E) {
+ /*
+ * Select write-through cacheing (this isn't really an
+ * option on ARM1020T).
+ */
+ cpufuncs = arm10_cpufuncs;
+ cpu_reset_needs_v4_MMU_disable = 1; /* V4 or higher */
+ get_cachetype_cp15();
+ arm10_dcache_sets_inc = 1U << arm_dcache_l2_linesize;
+ arm10_dcache_sets_max =
+ (1U << (arm_dcache_l2_linesize + arm_dcache_l2_nsets)) -
+ arm10_dcache_sets_inc;
+ arm10_dcache_index_inc = 1U << (32 - arm_dcache_l2_assoc);
+ arm10_dcache_index_max = 0U - arm10_dcache_index_inc;
+ pmap_pte_init_generic();
+ return 0;
+ }
+#endif /* CPU_ARM10 */
+#ifdef CPU_SA110
+ if (cputype == CPU_ID_SA110) {
+ cpufuncs = sa110_cpufuncs;
+ cpu_reset_needs_v4_MMU_disable = 1; /* SA needs it */
+ get_cachetype_table();
+ pmap_pte_init_sa1();
+ return 0;
+ }
+#endif /* CPU_SA110 */
+#ifdef CPU_SA1100
+ if (cputype == CPU_ID_SA1100) {
+ cpufuncs = sa11x0_cpufuncs;
+ cpu_reset_needs_v4_MMU_disable = 1; /* SA needs it */
+ get_cachetype_table();
+ pmap_pte_init_sa1();
+
+ /* Use powersave on this CPU. */
+ cpu_do_powersave = 1;
+
+ return 0;
+ }
+#endif /* CPU_SA1100 */
+#ifdef CPU_SA1110
+ if (cputype == CPU_ID_SA1110) {
+ cpufuncs = sa11x0_cpufuncs;
+ cpu_reset_needs_v4_MMU_disable = 1; /* SA needs it */
+ get_cachetype_table();
+ pmap_pte_init_sa1();
+
+ /* Use powersave on this CPU. */
+ cpu_do_powersave = 1;
+
+ return 0;
+ }
+#endif /* CPU_SA1110 */
+#ifdef CPU_IXP12X0
+ if (cputype == CPU_ID_IXP1200) {
+ cpufuncs = ixp12x0_cpufuncs;
+ cpu_reset_needs_v4_MMU_disable = 1;
+ get_cachetype_table();
+ pmap_pte_init_sa1();
+ return 0;
+ }
+#endif /* CPU_IXP12X0 */
+#ifdef CPU_XSCALE_80200
+ if (cputype == CPU_ID_80200) {
+ int rev = cpufunc_id() & CPU_ID_REVISION_MASK;
+
+ i80200_icu_init();
+
+ /*
+ * Reset the Performance Monitoring Unit to a
+ * pristine state:
+ * - CCNT, PMN0, PMN1 reset to 0
+ * - overflow indications cleared
+ * - all counters disabled
+ */
+ __asm __volatile("mcr p14, 0, %0, c0, c0, 0"
+ :
+ : "r" (PMNC_P|PMNC_C|PMNC_PMN0_IF|PMNC_PMN1_IF|
+ PMNC_CC_IF));
+
+#if defined(XSCALE_CCLKCFG)
+ /*
+ * Crank CCLKCFG to maximum legal value.
+ */
+ __asm __volatile ("mcr p14, 0, %0, c6, c0, 0"
+ :
+ : "r" (XSCALE_CCLKCFG));
+#endif
+
+ /*
+ * XXX Disable ECC in the Bus Controller Unit; we
+ * don't really support it, yet. Clear any pending
+ * error indications.
+ */
+ __asm __volatile("mcr p13, 0, %0, c0, c1, 0"
+ :
+ : "r" (BCUCTL_E0|BCUCTL_E1|BCUCTL_EV));
+
+ cpufuncs = xscale_cpufuncs;
+#if defined(PERFCTRS)
+ xscale_pmu_init();
+#endif
+
+ /*
+ * i80200 errata: Step-A0 and A1 have a bug where
+ * D$ dirty bits are not cleared on "invalidate by
+ * address".
+ *
+ * Workaround: Clean cache line before invalidating.
+ */
+ if (rev == 0 || rev == 1)
+ cpufuncs.cf_dcache_inv_range = xscale_cache_purgeD_rng;
+
+ cpu_reset_needs_v4_MMU_disable = 1; /* XScale needs it */
+ get_cachetype_cp15();
+ pmap_pte_init_xscale();
+ return 0;
+ }
+#endif /* CPU_XSCALE_80200 */
+#ifdef CPU_XSCALE_80321
+ if (cputype == CPU_ID_80321_400 || cputype == CPU_ID_80321_600 ||
+ cputype == CPU_ID_80321_400_B0 || cputype == CPU_ID_80321_600_B0) {
+ i80321_icu_init();
+
+ /*
+ * Reset the Performance Monitoring Unit to a
+ * pristine state:
+ * - CCNT, PMN0, PMN1 reset to 0
+ * - overflow indications cleared
+ * - all counters disabled
+ */
+ __asm __volatile("mcr p14, 0, %0, c0, c0, 0"
+ :
+ : "r" (PMNC_P|PMNC_C|PMNC_PMN0_IF|PMNC_PMN1_IF|
+ PMNC_CC_IF));
+
+ cpufuncs = xscale_cpufuncs;
+#if defined(PERFCTRS)
+ xscale_pmu_init();
+#endif
+
+ cpu_reset_needs_v4_MMU_disable = 1; /* XScale needs it */
+ get_cachetype_cp15();
+ pmap_pte_init_xscale();
+ return 0;
+ }
+#endif /* CPU_XSCALE_80321 */
+#ifdef CPU_XSCALE_PXA2X0
+ /* ignore core revision to test PXA2xx CPUs */
+ if ((cputype & ~CPU_ID_XSCALE_COREREV_MASK) == CPU_ID_PXA250 ||
+ (cputype & ~CPU_ID_XSCALE_COREREV_MASK) == CPU_ID_PXA210) {
+
+ cpufuncs = xscale_cpufuncs;
+#if defined(PERFCTRS)
+ xscale_pmu_init();
+#endif
+
+ cpu_reset_needs_v4_MMU_disable = 1; /* XScale needs it */
+ get_cachetype_cp15();
+ pmap_pte_init_xscale();
+
+ /* Use powersave on this CPU. */
+ cpu_do_powersave = 1;
+
+ return 0;
+ }
+#endif /* CPU_XSCALE_PXA2X0 */
+#ifdef CPU_XSCALE_IXP425
+ if (cputype == CPU_ID_IXP425_533 || cputype == CPU_ID_IXP425_400 ||
+ cputype == CPU_ID_IXP425_266) {
+ ixp425_icu_init();
+
+ cpufuncs = xscale_cpufuncs;
+#if defined(PERFCTRS)
+ xscale_pmu_init();
+#endif
+
+ cpu_reset_needs_v4_MMU_disable = 1; /* XScale needs it */
+ get_cachetype_cp15();
+ pmap_pte_init_xscale();
+
+ return 0;
+ }
+#endif /* CPU_XSCALE_IXP425 */
+ /*
+ * Bzzzz. And the answer was ...
+ */
+ panic("No support for this CPU type (%08x) in kernel", cputype);
+ return(ARCHITECTURE_NOT_PRESENT);
+}
+
+/*
+ * Fixup routines for data and prefetch aborts.
+ *
+ * Several compile time symbols are used
+ *
+ * DEBUG_FAULT_CORRECTION - Print debugging information during the
+ * correction of registers after a fault.
+ * ARM6_LATE_ABORT - ARM6 supports both early and late aborts
+ * when defined should use late aborts
+ */
+
+
+/*
+ * Null abort fixup routine.
+ * For use when no fixup is required.
+ */
+int
+cpufunc_null_fixup(arg)
+ void *arg;
+{
+ return(ABORT_FIXUP_OK);
+}
+
+
+#if defined(CPU_ARM2) || defined(CPU_ARM250) || defined(CPU_ARM3) || \
+ defined(CPU_ARM6) || defined(CPU_ARM7) || defined(CPU_ARM7TDMI)
+
+#ifdef DEBUG_FAULT_CORRECTION
+#define DFC_PRINTF(x) printf x
+#define DFC_DISASSEMBLE(x) disassemble(x)
+#else
+#define DFC_PRINTF(x) /* nothing */
+#define DFC_DISASSEMBLE(x) /* nothing */
+#endif
+
+/*
+ * "Early" data abort fixup.
+ *
+ * For ARM2, ARM2as, ARM3 and ARM6 (in early-abort mode). Also used
+ * indirectly by ARM6 (in late-abort mode) and ARM7[TDMI].
+ *
+ * In early aborts, we may have to fix up LDM, STM, LDC and STC.
+ */
+int
+early_abort_fixup(arg)
+ void *arg;
+{
+ trapframe_t *frame = arg;
+ u_int fault_pc;
+ u_int fault_instruction;
+ int saved_lr = 0;
+
+ if ((frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE) {
+
+ /* Ok an abort in SVC mode */
+
+ /*
+ * Copy the SVC r14 into the usr r14 - The usr r14 is garbage
+ * as the fault happened in svc mode but we need it in the
+ * usr slot so we can treat the registers as an array of ints
+ * during fixing.
+ * NOTE: This PC is in the position but writeback is not
+ * allowed on r15.
+ * Doing it like this is more efficient than trapping this
+ * case in all possible locations in the following fixup code.
+ */
+
+ saved_lr = frame->tf_usr_lr;
+ frame->tf_usr_lr = frame->tf_svc_lr;
+
+ /*
+ * Note the trapframe does not have the SVC r13 so a fault
+ * from an instruction with writeback to r13 in SVC mode is
+ * not allowed. This should not happen as the kstack is
+ * always valid.
+ */
+ }
+
+ /* Get fault address and status from the CPU */
+
+ fault_pc = frame->tf_pc;
+ fault_instruction = *((volatile unsigned int *)fault_pc);
+
+ /* Decode the fault instruction and fix the registers as needed */
+
+ if ((fault_instruction & 0x0e000000) == 0x08000000) {
+ int base;
+ int loop;
+ int count;
+ int *registers = &frame->tf_r0;
+
+ DFC_PRINTF(("LDM/STM\n"));
+ DFC_DISASSEMBLE(fault_pc);
+ if (fault_instruction & (1 << 21)) {
+ DFC_PRINTF(("This instruction must be corrected\n"));
+ base = (fault_instruction >> 16) & 0x0f;
+ if (base == 15)
+ return ABORT_FIXUP_FAILED;
+ /* Count registers transferred */
+ count = 0;
+ for (loop = 0; loop < 16; ++loop) {
+ if (fault_instruction & (1<<loop))
+ ++count;
+ }
+ DFC_PRINTF(("%d registers used\n", count));
+ DFC_PRINTF(("Corrected r%d by %d bytes ",
+ base, count * 4));
+ if (fault_instruction & (1 << 23)) {
+ DFC_PRINTF(("down\n"));
+ registers[base] -= count * 4;
+ } else {
+ DFC_PRINTF(("up\n"));
+ registers[base] += count * 4;
+ }
+ }
+ } else if ((fault_instruction & 0x0e000000) == 0x0c000000) {
+ int base;
+ int offset;
+ int *registers = &frame->tf_r0;
+
+ /* REGISTER CORRECTION IS REQUIRED FOR THESE INSTRUCTIONS */
+
+ DFC_DISASSEMBLE(fault_pc);
+
+ /* Only need to fix registers if write back is turned on */
+
+ if ((fault_instruction & (1 << 21)) != 0) {
+ base = (fault_instruction >> 16) & 0x0f;
+ if (base == 13 &&
+ (frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE)
+ return ABORT_FIXUP_FAILED;
+ if (base == 15)
+ return ABORT_FIXUP_FAILED;
+
+ offset = (fault_instruction & 0xff) << 2;
+ DFC_PRINTF(("r%d=%08x\n", base, registers[base]));
+ if ((fault_instruction & (1 << 23)) != 0)
+ offset = -offset;
+ registers[base] += offset;
+ DFC_PRINTF(("r%d=%08x\n", base, registers[base]));
+ }
+ } else if ((fault_instruction & 0x0e000000) == 0x0c000000)
+ return ABORT_FIXUP_FAILED;
+
+ if ((frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE) {
+
+ /* Ok an abort in SVC mode */
+
+ /*
+ * Copy the SVC r14 into the usr r14 - The usr r14 is garbage
+ * as the fault happened in svc mode but we need it in the
+ * usr slot so we can treat the registers as an array of ints
+ * during fixing.
+ * NOTE: This PC is in the position but writeback is not
+ * allowed on r15.
+ * Doing it like this is more efficient than trapping this
+ * case in all possible locations in the prior fixup code.
+ */
+
+ frame->tf_svc_lr = frame->tf_usr_lr;
+ frame->tf_usr_lr = saved_lr;
+
+ /*
+ * Note the trapframe does not have the SVC r13 so a fault
+ * from an instruction with writeback to r13 in SVC mode is
+ * not allowed. This should not happen as the kstack is
+ * always valid.
+ */
+ }
+
+ return(ABORT_FIXUP_OK);
+}
+#endif /* CPU_ARM2/250/3/6/7 */
+
+
+#if (defined(CPU_ARM6) && defined(ARM6_LATE_ABORT)) || defined(CPU_ARM7) || \
+ defined(CPU_ARM7TDMI)
+/*
+ * "Late" (base updated) data abort fixup
+ *
+ * For ARM6 (in late-abort mode) and ARM7.
+ *
+ * In this model, all data-transfer instructions need fixing up. We defer
+ * LDM, STM, LDC and STC fixup to the early-abort handler.
+ */
+int
+late_abort_fixup(arg)
+ void *arg;
+{
+ trapframe_t *frame = arg;
+ u_int fault_pc;
+ u_int fault_instruction;
+ int saved_lr = 0;
+
+ if ((frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE) {
+
+ /* Ok an abort in SVC mode */
+
+ /*
+ * Copy the SVC r14 into the usr r14 - The usr r14 is garbage
+ * as the fault happened in svc mode but we need it in the
+ * usr slot so we can treat the registers as an array of ints
+ * during fixing.
+ * NOTE: This PC is in the position but writeback is not
+ * allowed on r15.
+ * Doing it like this is more efficient than trapping this
+ * case in all possible locations in the following fixup code.
+ */
+
+ saved_lr = frame->tf_usr_lr;
+ frame->tf_usr_lr = frame->tf_svc_lr;
+
+ /*
+ * Note the trapframe does not have the SVC r13 so a fault
+ * from an instruction with writeback to r13 in SVC mode is
+ * not allowed. This should not happen as the kstack is
+ * always valid.
+ */
+ }
+
+ /* Get fault address and status from the CPU */
+
+ fault_pc = frame->tf_pc;
+ fault_instruction = *((volatile unsigned int *)fault_pc);
+
+ /* Decode the fault instruction and fix the registers as needed */
+
+ /* Was is a swap instruction ? */
+
+ if ((fault_instruction & 0x0fb00ff0) == 0x01000090) {
+ DFC_DISASSEMBLE(fault_pc);
+ } else if ((fault_instruction & 0x0c000000) == 0x04000000) {
+
+ /* Was is a ldr/str instruction */
+ /* This is for late abort only */
+
+ int base;
+ int offset;
+ int *registers = &frame->tf_r0;
+
+ DFC_DISASSEMBLE(fault_pc);
+
+ /* This is for late abort only */
+
+ if ((fault_instruction & (1 << 24)) == 0
+ || (fault_instruction & (1 << 21)) != 0) {
+ /* postindexed ldr/str with no writeback */
+
+ base = (fault_instruction >> 16) & 0x0f;
+ if (base == 13 &&
+ (frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE)
+ return ABORT_FIXUP_FAILED;
+ if (base == 15)
+ return ABORT_FIXUP_FAILED;
+ DFC_PRINTF(("late abt fix: r%d=%08x : ",
+ base, registers[base]));
+ if ((fault_instruction & (1 << 25)) == 0) {
+ /* Immediate offset - easy */
+
+ offset = fault_instruction & 0xfff;
+ if ((fault_instruction & (1 << 23)))
+ offset = -offset;
+ registers[base] += offset;
+ DFC_PRINTF(("imm=%08x ", offset));
+ } else {
+ /* offset is a shifted register */
+ int shift;
+
+ offset = fault_instruction & 0x0f;
+ if (offset == base)
+ return ABORT_FIXUP_FAILED;
+
+ /*
+ * Register offset - hard we have to
+ * cope with shifts !
+ */
+ offset = registers[offset];
+
+ if ((fault_instruction & (1 << 4)) == 0)
+ /* shift with amount */
+ shift = (fault_instruction >> 7) & 0x1f;
+ else {
+ /* shift with register */
+ if ((fault_instruction & (1 << 7)) != 0)
+ /* undefined for now so bail out */
+ return ABORT_FIXUP_FAILED;
+ shift = ((fault_instruction >> 8) & 0xf);
+ if (base == shift)
+ return ABORT_FIXUP_FAILED;
+ DFC_PRINTF(("shift reg=%d ", shift));
+ shift = registers[shift];
+ }
+ DFC_PRINTF(("shift=%08x ", shift));
+ switch (((fault_instruction >> 5) & 0x3)) {
+ case 0 : /* Logical left */
+ offset = (int)(((u_int)offset) << shift);
+ break;
+ case 1 : /* Logical Right */
+ if (shift == 0) shift = 32;
+ offset = (int)(((u_int)offset) >> shift);
+ break;
+ case 2 : /* Arithmetic Right */
+ if (shift == 0) shift = 32;
+ offset = (int)(((int)offset) >> shift);
+ break;
+ case 3 : /* Rotate right (rol or rxx) */
+ return ABORT_FIXUP_FAILED;
+ break;
+ }
+
+ DFC_PRINTF(("abt: fixed LDR/STR with "
+ "register offset\n"));
+ if ((fault_instruction & (1 << 23)))
+ offset = -offset;
+ DFC_PRINTF(("offset=%08x ", offset));
+ registers[base] += offset;
+ }
+ DFC_PRINTF(("r%d=%08x\n", base, registers[base]));
+ }
+ }
+
+ if ((frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE) {
+
+ /* Ok an abort in SVC mode */
+
+ /*
+ * Copy the SVC r14 into the usr r14 - The usr r14 is garbage
+ * as the fault happened in svc mode but we need it in the
+ * usr slot so we can treat the registers as an array of ints
+ * during fixing.
+ * NOTE: This PC is in the position but writeback is not
+ * allowed on r15.
+ * Doing it like this is more efficient than trapping this
+ * case in all possible locations in the prior fixup code.
+ */
+
+ frame->tf_svc_lr = frame->tf_usr_lr;
+ frame->tf_usr_lr = saved_lr;
+
+ /*
+ * Note the trapframe does not have the SVC r13 so a fault
+ * from an instruction with writeback to r13 in SVC mode is
+ * not allowed. This should not happen as the kstack is
+ * always valid.
+ */
+ }
+
+ /*
+ * Now let the early-abort fixup routine have a go, in case it
+ * was an LDM, STM, LDC or STC that faulted.
+ */
+
+ return early_abort_fixup(arg);
+}
+#endif /* CPU_ARM6(LATE)/7/7TDMI */
+
+/*
+ * CPU Setup code
+ */
+
+#if defined(CPU_ARM6) || defined(CPU_ARM7) || defined(CPU_ARM7TDMI) || \
+ defined(CPU_ARM8) || defined (CPU_ARM9) || defined(CPU_SA110) || \
+ defined(CPU_SA1100) || defined(CPU_SA1110) || \
+ defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321) || \
+ defined(CPU_XSCALE_PXA2X0) || defined(CPU_XSCALE_IXP425)
+
+#define IGN 0
+#define OR 1
+#define BIC 2
+
+struct cpu_option {
+ char *co_name;
+ int co_falseop;
+ int co_trueop;
+ int co_value;
+};
+
+static u_int parse_cpu_options __P((char *, struct cpu_option *, u_int));
+
+static u_int
+parse_cpu_options(args, optlist, cpuctrl)
+ char *args;
+ struct cpu_option *optlist;
+ u_int cpuctrl;
+{
+ int integer;
+
+ if (args == NULL)
+ return(cpuctrl);
+
+ while (optlist->co_name) {
+ if (get_bootconf_option(args, optlist->co_name,
+ BOOTOPT_TYPE_BOOLEAN, &integer)) {
+ if (integer) {
+ if (optlist->co_trueop == OR)
+ cpuctrl |= optlist->co_value;
+ else if (optlist->co_trueop == BIC)
+ cpuctrl &= ~optlist->co_value;
+ } else {
+ if (optlist->co_falseop == OR)
+ cpuctrl |= optlist->co_value;
+ else if (optlist->co_falseop == BIC)
+ cpuctrl &= ~optlist->co_value;
+ }
+ }
+ ++optlist;
+ }
+ return(cpuctrl);
+}
+#endif /* CPU_ARM6 || CPU_ARM7 || CPU_ARM7TDMI || CPU_ARM8 || CPU_SA110 */
+
+#if defined (CPU_ARM6) || defined(CPU_ARM7) || defined(CPU_ARM7TDMI) \
+ || defined(CPU_ARM8)
+struct cpu_option arm678_options[] = {
+#ifdef COMPAT_12
+ { "nocache", IGN, BIC, CPU_CONTROL_IDC_ENABLE },
+ { "nowritebuf", IGN, BIC, CPU_CONTROL_WBUF_ENABLE },
+#endif /* COMPAT_12 */
+ { "cpu.cache", BIC, OR, CPU_CONTROL_IDC_ENABLE },
+ { "cpu.nocache", OR, BIC, CPU_CONTROL_IDC_ENABLE },
+ { "cpu.writebuf", BIC, OR, CPU_CONTROL_WBUF_ENABLE },
+ { "cpu.nowritebuf", OR, BIC, CPU_CONTROL_WBUF_ENABLE },
+ { NULL, IGN, IGN, 0 }
+};
+
+#endif /* CPU_ARM6 || CPU_ARM7 || CPU_ARM7TDMI || CPU_ARM8 */
+
+#ifdef CPU_ARM6
+struct cpu_option arm6_options[] = {
+ { "arm6.cache", BIC, OR, CPU_CONTROL_IDC_ENABLE },
+ { "arm6.nocache", OR, BIC, CPU_CONTROL_IDC_ENABLE },
+ { "arm6.writebuf", BIC, OR, CPU_CONTROL_WBUF_ENABLE },
+ { "arm6.nowritebuf", OR, BIC, CPU_CONTROL_WBUF_ENABLE },
+ { NULL, IGN, IGN, 0 }
+};
+
+void
+arm6_setup(args)
+ char *args;
+{
+ int cpuctrl, cpuctrlmask;
+
+ /* Set up default control registers bits */
+ cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
+ | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
+ | CPU_CONTROL_IDC_ENABLE | CPU_CONTROL_WBUF_ENABLE;
+ cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
+ | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
+ | CPU_CONTROL_IDC_ENABLE | CPU_CONTROL_WBUF_ENABLE
+ | CPU_CONTROL_ROM_ENABLE | CPU_CONTROL_BEND_ENABLE
+ | CPU_CONTROL_AFLT_ENABLE;
+
+#ifdef ARM6_LATE_ABORT
+ cpuctrl |= CPU_CONTROL_LABT_ENABLE;
+#endif /* ARM6_LATE_ABORT */
+
+#ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
+ cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
+#endif
+
+ cpuctrl = parse_cpu_options(args, arm678_options, cpuctrl);
+ cpuctrl = parse_cpu_options(args, arm6_options, cpuctrl);
+
+#ifdef __ARMEB__
+ cpuctrl |= CPU_CONTROL_BEND_ENABLE;
+#endif
+
+ /* Clear out the cache */
+ cpu_idcache_wbinv_all();
+
+ /* Set the control register */
+ curcpu()->ci_ctrl = cpuctrl;
+ cpu_control(0xffffffff, cpuctrl);
+}
+#endif /* CPU_ARM6 */
+
+#ifdef CPU_ARM7
+struct cpu_option arm7_options[] = {
+ { "arm7.cache", BIC, OR, CPU_CONTROL_IDC_ENABLE },
+ { "arm7.nocache", OR, BIC, CPU_CONTROL_IDC_ENABLE },
+ { "arm7.writebuf", BIC, OR, CPU_CONTROL_WBUF_ENABLE },
+ { "arm7.nowritebuf", OR, BIC, CPU_CONTROL_WBUF_ENABLE },
+#ifdef COMPAT_12
+ { "fpaclk2", BIC, OR, CPU_CONTROL_CPCLK },
+#endif /* COMPAT_12 */
+ { "arm700.fpaclk", BIC, OR, CPU_CONTROL_CPCLK },
+ { NULL, IGN, IGN, 0 }
+};
+
+void
+arm7_setup(args)
+ char *args;
+{
+ int cpuctrl, cpuctrlmask;
+
+ cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
+ | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
+ | CPU_CONTROL_IDC_ENABLE | CPU_CONTROL_WBUF_ENABLE;
+ cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
+ | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
+ | CPU_CONTROL_IDC_ENABLE | CPU_CONTROL_WBUF_ENABLE
+ | CPU_CONTROL_CPCLK | CPU_CONTROL_LABT_ENABLE
+ | CPU_CONTROL_ROM_ENABLE | CPU_CONTROL_BEND_ENABLE
+ | CPU_CONTROL_AFLT_ENABLE;
+
+#ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
+ cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
+#endif
+
+ cpuctrl = parse_cpu_options(args, arm678_options, cpuctrl);
+ cpuctrl = parse_cpu_options(args, arm7_options, cpuctrl);
+
+#ifdef __ARMEB__
+ cpuctrl |= CPU_CONTROL_BEND_ENABLE;
+#endif
+
+ /* Clear out the cache */
+ cpu_idcache_wbinv_all();
+
+ /* Set the control register */
+ curcpu()->ci_ctrl = cpuctrl;
+ cpu_control(0xffffffff, cpuctrl);
+}
+#endif /* CPU_ARM7 */
+
+#ifdef CPU_ARM7TDMI
+struct cpu_option arm7tdmi_options[] = {
+ { "arm7.cache", BIC, OR, CPU_CONTROL_IDC_ENABLE },
+ { "arm7.nocache", OR, BIC, CPU_CONTROL_IDC_ENABLE },
+ { "arm7.writebuf", BIC, OR, CPU_CONTROL_WBUF_ENABLE },
+ { "arm7.nowritebuf", OR, BIC, CPU_CONTROL_WBUF_ENABLE },
+#ifdef COMPAT_12
+ { "fpaclk2", BIC, OR, CPU_CONTROL_CPCLK },
+#endif /* COMPAT_12 */
+ { "arm700.fpaclk", BIC, OR, CPU_CONTROL_CPCLK },
+ { NULL, IGN, IGN, 0 }
+};
+
+void
+arm7tdmi_setup(args)
+ char *args;
+{
+ int cpuctrl;
+
+ cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
+ | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
+ | CPU_CONTROL_IDC_ENABLE | CPU_CONTROL_WBUF_ENABLE;
+
+ cpuctrl = parse_cpu_options(args, arm678_options, cpuctrl);
+ cpuctrl = parse_cpu_options(args, arm7tdmi_options, cpuctrl);
+
+#ifdef __ARMEB__
+ cpuctrl |= CPU_CONTROL_BEND_ENABLE;
+#endif
+
+ /* Clear out the cache */
+ cpu_idcache_wbinv_all();
+
+ /* Set the control register */
+ curcpu()->ci_ctrl = cpuctrl;
+ cpu_control(0xffffffff, cpuctrl);
+}
+#endif /* CPU_ARM7TDMI */
+
+#ifdef CPU_ARM8
+struct cpu_option arm8_options[] = {
+ { "arm8.cache", BIC, OR, CPU_CONTROL_IDC_ENABLE },
+ { "arm8.nocache", OR, BIC, CPU_CONTROL_IDC_ENABLE },
+ { "arm8.writebuf", BIC, OR, CPU_CONTROL_WBUF_ENABLE },
+ { "arm8.nowritebuf", OR, BIC, CPU_CONTROL_WBUF_ENABLE },
+#ifdef COMPAT_12
+ { "branchpredict", BIC, OR, CPU_CONTROL_BPRD_ENABLE },
+#endif /* COMPAT_12 */
+ { "cpu.branchpredict", BIC, OR, CPU_CONTROL_BPRD_ENABLE },
+ { "arm8.branchpredict", BIC, OR, CPU_CONTROL_BPRD_ENABLE },
+ { NULL, IGN, IGN, 0 }
+};
+
+void
+arm8_setup(args)
+ char *args;
+{
+ int integer;
+ int cpuctrl, cpuctrlmask;
+ int clocktest;
+ int setclock = 0;
+
+ cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
+ | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
+ | CPU_CONTROL_IDC_ENABLE | CPU_CONTROL_WBUF_ENABLE;
+ cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
+ | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
+ | CPU_CONTROL_IDC_ENABLE | CPU_CONTROL_WBUF_ENABLE
+ | CPU_CONTROL_BPRD_ENABLE | CPU_CONTROL_ROM_ENABLE
+ | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE;
+
+#ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
+ cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
+#endif
+
+ cpuctrl = parse_cpu_options(args, arm678_options, cpuctrl);
+ cpuctrl = parse_cpu_options(args, arm8_options, cpuctrl);
+
+#ifdef __ARMEB__
+ cpuctrl |= CPU_CONTROL_BEND_ENABLE;
+#endif
+
+ /* Get clock configuration */
+ clocktest = arm8_clock_config(0, 0) & 0x0f;
+
+ /* Special ARM8 clock and test configuration */
+ if (get_bootconf_option(args, "arm8.clock.reset", BOOTOPT_TYPE_BOOLEAN, &integer)) {
+ clocktest = 0;
+ setclock = 1;
+ }
+ if (get_bootconf_option(args, "arm8.clock.dynamic", BOOTOPT_TYPE_BOOLEAN, &integer)) {
+ if (integer)
+ clocktest |= 0x01;
+ else
+ clocktest &= ~(0x01);
+ setclock = 1;
+ }
+ if (get_bootconf_option(args, "arm8.clock.sync", BOOTOPT_TYPE_BOOLEAN, &integer)) {
+ if (integer)
+ clocktest |= 0x02;
+ else
+ clocktest &= ~(0x02);
+ setclock = 1;
+ }
+ if (get_bootconf_option(args, "arm8.clock.fast", BOOTOPT_TYPE_BININT, &integer)) {
+ clocktest = (clocktest & ~0xc0) | (integer & 3) << 2;
+ setclock = 1;
+ }
+ if (get_bootconf_option(args, "arm8.test", BOOTOPT_TYPE_BININT, &integer)) {
+ clocktest |= (integer & 7) << 5;
+ setclock = 1;
+ }
+
+ /* Clear out the cache */
+ cpu_idcache_wbinv_all();
+
+ /* Set the control register */
+ curcpu()->ci_ctrl = cpuctrl;
+ cpu_control(0xffffffff, cpuctrl);
+
+ /* Set the clock/test register */
+ if (setclock)
+ arm8_clock_config(0x7f, clocktest);
+}
+#endif /* CPU_ARM8 */
+
+#ifdef CPU_ARM9
+struct cpu_option arm9_options[] = {
+ { "cpu.cache", BIC, OR, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
+ { "cpu.nocache", OR, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
+ { "arm9.cache", BIC, OR, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
+ { "arm9.icache", BIC, OR, CPU_CONTROL_IC_ENABLE },
+ { "arm9.dcache", BIC, OR, CPU_CONTROL_DC_ENABLE },
+ { "cpu.writebuf", BIC, OR, CPU_CONTROL_WBUF_ENABLE },
+ { "cpu.nowritebuf", OR, BIC, CPU_CONTROL_WBUF_ENABLE },
+ { "arm9.writebuf", BIC, OR, CPU_CONTROL_WBUF_ENABLE },
+ { NULL, IGN, IGN, 0 }
+};
+
+void
+arm9_setup(args)
+ char *args;
+{
+ int cpuctrl, cpuctrlmask;
+
+ cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
+ | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
+ | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
+ | CPU_CONTROL_WBUF_ENABLE;
+ cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
+ | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
+ | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
+ | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
+ | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
+ | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_BPRD_ENABLE
+ | CPU_CONTROL_CPCLK;
+
+#ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
+ cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
+#endif
+
+ cpuctrl = parse_cpu_options(args, arm9_options, cpuctrl);
+
+#ifdef __ARMEB__
+ cpuctrl |= CPU_CONTROL_BEND_ENABLE;
+#endif
+
+ /* Clear out the cache */
+ cpu_idcache_wbinv_all();
+
+ /* Set the control register */
+ curcpu()->ci_ctrl = cpuctrl;
+ cpu_control(0xffffffff, cpuctrl);
+
+}
+#endif /* CPU_ARM9 */
+
+#ifdef CPU_ARM10
+struct cpu_option arm10_options[] = {
+ { "cpu.cache", BIC, OR, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
+ { "cpu.nocache", OR, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
+ { "arm10.cache", BIC, OR, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
+ { "arm10.icache", BIC, OR, CPU_CONTROL_IC_ENABLE },
+ { "arm10.dcache", BIC, OR, CPU_CONTROL_DC_ENABLE },
+ { "cpu.writebuf", BIC, OR, CPU_CONTROL_WBUF_ENABLE },
+ { "cpu.nowritebuf", OR, BIC, CPU_CONTROL_WBUF_ENABLE },
+ { "arm10.writebuf", BIC, OR, CPU_CONTROL_WBUF_ENABLE },
+ { NULL, IGN, IGN, 0 }
+};
+
+void
+arm10_setup(args)
+ char *args;
+{
+ int cpuctrl, cpuctrlmask;
+
+ cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_SYST_ENABLE
+ | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
+ | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_BPRD_ENABLE;
+ cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_SYST_ENABLE
+ | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
+ | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
+ | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
+ | CPU_CONTROL_BPRD_ENABLE
+ | CPU_CONTROL_ROUNDROBIN | CPU_CONTROL_CPCLK;
+
+#ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
+ cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
+#endif
+
+ cpuctrl = parse_cpu_options(args, arm10_options, cpuctrl);
+
+#ifdef __ARMEB__
+ cpuctrl |= CPU_CONTROL_BEND_ENABLE;
+#endif
+
+ /* Clear out the cache */
+ cpu_idcache_wbinv_all();
+
+ /* Now really make sure they are clean. */
+ asm volatile ("mcr\tp15, 0, r0, c7, c7, 0" : : );
+
+ /* Set the control register */
+ curcpu()->ci_ctrl = cpuctrl;
+ cpu_control(0xffffffff, cpuctrl);
+
+ /* And again. */
+ cpu_idcache_wbinv_all();
+}
+#endif /* CPU_ARM10 */
+
+#ifdef CPU_SA110
+struct cpu_option sa110_options[] = {
+#ifdef COMPAT_12
+ { "nocache", IGN, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
+ { "nowritebuf", IGN, BIC, CPU_CONTROL_WBUF_ENABLE },
+#endif /* COMPAT_12 */
+ { "cpu.cache", BIC, OR, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
+ { "cpu.nocache", OR, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
+ { "sa110.cache", BIC, OR, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
+ { "sa110.icache", BIC, OR, CPU_CONTROL_IC_ENABLE },
+ { "sa110.dcache", BIC, OR, CPU_CONTROL_DC_ENABLE },
+ { "cpu.writebuf", BIC, OR, CPU_CONTROL_WBUF_ENABLE },
+ { "cpu.nowritebuf", OR, BIC, CPU_CONTROL_WBUF_ENABLE },
+ { "sa110.writebuf", BIC, OR, CPU_CONTROL_WBUF_ENABLE },
+ { NULL, IGN, IGN, 0 }
+};
+
+void
+sa110_setup(args)
+ char *args;
+{
+ int cpuctrl, cpuctrlmask;
+
+ cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
+ | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
+ | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
+ | CPU_CONTROL_WBUF_ENABLE;
+ cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
+ | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
+ | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
+ | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
+ | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
+ | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_BPRD_ENABLE
+ | CPU_CONTROL_CPCLK;
+
+#ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
+ cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
+#endif
+
+ cpuctrl = parse_cpu_options(args, sa110_options, cpuctrl);
+
+#ifdef __ARMEB__
+ cpuctrl |= CPU_CONTROL_BEND_ENABLE;
+#endif
+
+ /* Clear out the cache */
+ cpu_idcache_wbinv_all();
+
+ /* Set the control register */
+ curcpu()->ci_ctrl = cpuctrl;
+/* cpu_control(cpuctrlmask, cpuctrl);*/
+ cpu_control(0xffffffff, cpuctrl);
+
+ /*
+ * enable clockswitching, note that this doesn't read or write to r0,
+ * r0 is just to make it valid asm
+ */
+ __asm ("mcr 15, 0, r0, c15, c1, 2");
+}
+#endif /* CPU_SA110 */
+
+#if defined(CPU_SA1100) || defined(CPU_SA1110)
+struct cpu_option sa11x0_options[] = {
+#ifdef COMPAT_12
+ { "nocache", IGN, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
+ { "nowritebuf", IGN, BIC, CPU_CONTROL_WBUF_ENABLE },
+#endif /* COMPAT_12 */
+ { "cpu.cache", BIC, OR, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
+ { "cpu.nocache", OR, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
+ { "sa11x0.cache", BIC, OR, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
+ { "sa11x0.icache", BIC, OR, CPU_CONTROL_IC_ENABLE },
+ { "sa11x0.dcache", BIC, OR, CPU_CONTROL_DC_ENABLE },
+ { "cpu.writebuf", BIC, OR, CPU_CONTROL_WBUF_ENABLE },
+ { "cpu.nowritebuf", OR, BIC, CPU_CONTROL_WBUF_ENABLE },
+ { "sa11x0.writebuf", BIC, OR, CPU_CONTROL_WBUF_ENABLE },
+ { NULL, IGN, IGN, 0 }
+};
+
+void
+sa11x0_setup(args)
+ char *args;
+{
+ int cpuctrl, cpuctrlmask;
+
+ cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
+ | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
+ | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
+ | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_LABT_ENABLE;
+ cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
+ | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
+ | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
+ | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
+ | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
+ | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_BPRD_ENABLE
+ | CPU_CONTROL_CPCLK | CPU_CONTROL_VECRELOC;
+
+#ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
+ cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
+#endif
+
+ cpuctrl = parse_cpu_options(args, sa11x0_options, cpuctrl);
+
+#ifdef __ARMEB__
+ cpuctrl |= CPU_CONTROL_BEND_ENABLE;
+#endif
+
+ if (vector_page == ARM_VECTORS_HIGH)
+ cpuctrl |= CPU_CONTROL_VECRELOC;
+
+ /* Clear out the cache */
+ cpu_idcache_wbinv_all();
+
+ /* Set the control register */
+ cpu_control(0xffffffff, cpuctrl);
+}
+#endif /* CPU_SA1100 || CPU_SA1110 */
+
+#if defined(CPU_IXP12X0)
+struct cpu_option ixp12x0_options[] = {
+ { "cpu.cache", BIC, OR, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
+ { "cpu.nocache", OR, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
+ { "ixp12x0.cache", BIC, OR, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
+ { "ixp12x0.icache", BIC, OR, CPU_CONTROL_IC_ENABLE },
+ { "ixp12x0.dcache", BIC, OR, CPU_CONTROL_DC_ENABLE },
+ { "cpu.writebuf", BIC, OR, CPU_CONTROL_WBUF_ENABLE },
+ { "cpu.nowritebuf", OR, BIC, CPU_CONTROL_WBUF_ENABLE },
+ { "ixp12x0.writebuf", BIC, OR, CPU_CONTROL_WBUF_ENABLE },
+ { NULL, IGN, IGN, 0 }
+};
+
+void
+ixp12x0_setup(args)
+ char *args;
+{
+ int cpuctrl, cpuctrlmask;
+
+
+ cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_DC_ENABLE
+ | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_SYST_ENABLE
+ | CPU_CONTROL_IC_ENABLE;
+
+ cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_AFLT_ENABLE
+ | CPU_CONTROL_DC_ENABLE | CPU_CONTROL_WBUF_ENABLE
+ | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_SYST_ENABLE
+ | CPU_CONTROL_ROM_ENABLE | CPU_CONTROL_IC_ENABLE
+ | CPU_CONTROL_VECRELOC;
+
+#ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
+ cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
+#endif
+
+ cpuctrl = parse_cpu_options(args, ixp12x0_options, cpuctrl);
+
+#ifdef __ARMEB__
+ cpuctrl |= CPU_CONTROL_BEND_ENABLE;
+#endif
+
+ if (vector_page == ARM_VECTORS_HIGH)
+ cpuctrl |= CPU_CONTROL_VECRELOC;
+
+ /* Clear out the cache */
+ cpu_idcache_wbinv_all();
+
+ /* Set the control register */
+ curcpu()->ci_ctrl = cpuctrl;
+ /* cpu_control(0xffffffff, cpuctrl); */
+ cpu_control(cpuctrlmask, cpuctrl);
+}
+#endif /* CPU_IXP12X0 */
+
+#if defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321) || \
+ defined(CPU_XSCALE_PXA2X0) || defined(CPU_XSCALE_IXP425)
+struct cpu_option xscale_options[] = {
+#ifdef COMPAT_12
+ { "branchpredict", BIC, OR, CPU_CONTROL_BPRD_ENABLE },
+ { "nocache", IGN, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
+#endif /* COMPAT_12 */
+ { "cpu.branchpredict", BIC, OR, CPU_CONTROL_BPRD_ENABLE },
+ { "cpu.cache", BIC, OR, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
+ { "cpu.nocache", OR, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
+ { "xscale.branchpredict", BIC, OR, CPU_CONTROL_BPRD_ENABLE },
+ { "xscale.cache", BIC, OR, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
+ { "xscale.icache", BIC, OR, CPU_CONTROL_IC_ENABLE },
+ { "xscale.dcache", BIC, OR, CPU_CONTROL_DC_ENABLE },
+ { NULL, IGN, IGN, 0 }
+};
+
+void
+xscale_setup(args)
+ char *args;
+{
+ uint32_t auxctl;
+ int cpuctrl, cpuctrlmask;
+
+ /*
+ * The XScale Write Buffer is always enabled. Our option
+ * is to enable/disable coalescing. Note that bits 6:3
+ * must always be enabled.
+ */
+
+ cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
+ | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
+ | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
+ | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_LABT_ENABLE
+ | CPU_CONTROL_BPRD_ENABLE;
+ cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
+ | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
+ | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
+ | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
+ | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
+ | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_BPRD_ENABLE
+ | CPU_CONTROL_CPCLK | CPU_CONTROL_VECRELOC;
+
+#ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
+ cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
+#endif
+
+ cpuctrl = parse_cpu_options(args, xscale_options, cpuctrl);
+
+#ifdef __ARMEB__
+ cpuctrl |= CPU_CONTROL_BEND_ENABLE;
+#endif
+
+ if (vector_page == ARM_VECTORS_HIGH)
+ cpuctrl |= CPU_CONTROL_VECRELOC;
+
+ /* Clear out the cache */
+ cpu_idcache_wbinv_all();
+
+ /*
+ * Set the control register. Note that bits 6:3 must always
+ * be set to 1.
+ */
+ curcpu()->ci_ctrl = cpuctrl;
+/* cpu_control(cpuctrlmask, cpuctrl);*/
+ cpu_control(0xffffffff, cpuctrl);
+
+ /* Make sure write coalescing is turned on */
+ __asm __volatile("mrc p15, 0, %0, c1, c0, 1"
+ : "=r" (auxctl));
+#ifdef XSCALE_NO_COALESCE_WRITES
+ auxctl |= XSCALE_AUXCTL_K;
+#else
+ auxctl &= ~XSCALE_AUXCTL_K;
+#endif
+ __asm __volatile("mcr p15, 0, %0, c1, c0, 1"
+ : : "r" (auxctl));
+}
+#endif /* CPU_XSCALE_80200 || CPU_XSCALE_80321 || CPU_XSCALE_PXA2X0 || CPU_XSCALE_IXP425 */
diff --git a/sys/arch/arm/arm/cpufunc_asm.S b/sys/arch/arm/arm/cpufunc_asm.S
new file mode 100644
index 00000000000..2e36bada941
--- /dev/null
+++ b/sys/arch/arm/arm/cpufunc_asm.S
@@ -0,0 +1,151 @@
+/* $OpenBSD: cpufunc_asm.S,v 1.1 2004/02/01 05:09:48 drahn Exp $ */
+/* $NetBSD: cpufunc_asm.S,v 1.12 2003/09/06 09:14:52 rearnsha Exp $ */
+
+/*
+ * Copyright (c) 1997,1998 Mark Brinicombe.
+ * Copyright (c) 1997 Causality Limited
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by Causality Limited.
+ * 4. The name of Causality Limited may not be used to endorse or promote
+ * products derived from this software without specific prior written
+ * permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY CAUSALITY LIMITED ``AS IS'' AND ANY EXPRESS
+ * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL CAUSALITY LIMITED BE LIABLE FOR ANY DIRECT,
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * RiscBSD kernel project
+ *
+ * cpufunc.S
+ *
+ * Assembly functions for CPU / MMU / TLB specific operations
+ *
+ * Created : 30/01/97
+ */
+
+#include <machine/cpu.h>
+#include <machine/asm.h>
+
+ .text
+ .align 0
+
+ENTRY(cpufunc_nullop)
+ mov pc, lr
+
+/*
+ * Generic functions to read the internal coprocessor registers
+ *
+ * Currently these registers are :
+ * c0 - CPU ID
+ * c5 - Fault status
+ * c6 - Fault address
+ *
+ */
+
+ENTRY(cpufunc_id)
+ mrc p15, 0, r0, c0, c0, 0
+ mov pc, lr
+
+ENTRY(cpu_get_control)
+ mrc p15, 0, r0, c1, c0, 0
+ mov pc, lr
+
+ENTRY(cpu_read_cache_config)
+ mrc p15, 0, r0, c0, c0, 1
+ mov pc, lr
+
+ENTRY(cpufunc_faultstatus)
+ mrc p15, 0, r0, c5, c0, 0
+ mov pc, lr
+
+ENTRY(cpufunc_faultaddress)
+ mrc p15, 0, r0, c6, c0, 0
+ mov pc, lr
+
+
+/*
+ * Generic functions to write the internal coprocessor registers
+ *
+ *
+ * Currently these registers are
+ * c1 - CPU Control
+ * c3 - Domain Access Control
+ *
+ * All other registers are CPU architecture specific
+ */
+
+#if 0 /* See below. */
+ENTRY(cpufunc_control)
+ mcr p15, 0, r0, c1, c0, 0
+ mov pc, lr
+#endif
+
+ENTRY(cpufunc_domains)
+ mcr p15, 0, r0, c3, c0, 0
+ mov pc, lr
+
+/*
+ * Generic functions to read/modify/write the internal coprocessor registers
+ *
+ *
+ * Currently these registers are
+ * c1 - CPU Control
+ *
+ * All other registers are CPU architecture specific
+ */
+
+ENTRY(cpufunc_control)
+ mrc p15, 0, r3, c1, c0, 0 /* Read the control register */
+ bic r2, r3, r0 /* Clear bits */
+ eor r2, r2, r1 /* XOR bits */
+
+ teq r2, r3 /* Only write if there is a change */
+ mcrne p15, 0, r2, c1, c0, 0 /* Write new control register */
+ mov r0, r3 /* Return old value */
+ mov pc, lr
+
+/*
+ * other potentially useful software functions are:
+ * clean D cache entry and flush I cache entry
+ * for the moment use cache_purgeID_E
+ */
+
+/* Random odd functions */
+
+/*
+ * Function to get the offset of a stored program counter from the
+ * instruction doing the store. This offset is defined to be the same
+ * for all STRs and STMs on a given implementation. Code based on
+ * section 2.4.3 of the ARM ARM (2nd Ed.), with modifications to work
+ * in 26-bit modes as well.
+ */
+ENTRY(get_pc_str_offset)
+ mov ip, sp
+ stmfd sp!, {fp, ip, lr, pc}
+ sub fp, ip, #4
+ sub sp, sp, #4
+ mov r1, pc /* R1 = addr of following STR */
+ mov r0, r0
+ str pc, [sp] /* [SP] = . + offset */
+ ldr r0, [sp]
+ sub r0, r0, r1
+ ldmdb fp, {fp, sp, pc}
diff --git a/sys/arch/arm/arm/cpufunc_asm_armv4.S b/sys/arch/arm/arm/cpufunc_asm_armv4.S
new file mode 100644
index 00000000000..35089398b4c
--- /dev/null
+++ b/sys/arch/arm/arm/cpufunc_asm_armv4.S
@@ -0,0 +1,67 @@
+/* $OpenBSD: cpufunc_asm_armv4.S,v 1.1 2004/02/01 05:09:48 drahn Exp $ */
+/* $NetBSD: cpufunc_asm_armv4.S,v 1.1 2001/11/10 23:14:09 thorpej Exp $ */
+
+/*
+ * Copyright (c) 2001 ARM Limited
+ * Copyright (c) 1997,1998 Mark Brinicombe.
+ * Copyright (c) 1997 Causality Limited
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by Causality Limited.
+ * 4. The name of Causality Limited may not be used to endorse or promote
+ * products derived from this software without specific prior written
+ * permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY CAUSALITY LIMITED ``AS IS'' AND ANY EXPRESS
+ * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL CAUSALITY LIMITED BE LIABLE FOR ANY DIRECT,
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * ARM9 assembly functions for CPU / MMU / TLB specific operations
+ */
+
+#include <machine/cpu.h>
+#include <machine/asm.h>
+
+/*
+ * TLB functions
+ */
+ENTRY(armv4_tlb_flushID)
+ mcr p15, 0, r0, c8, c7, 0 /* flush I+D tlb */
+ mov pc, lr
+
+ENTRY(armv4_tlb_flushI)
+ mcr p15, 0, r0, c8, c5, 0 /* flush I tlb */
+ mov pc, lr
+
+ENTRY(armv4_tlb_flushD)
+ mcr p15, 0, r0, c8, c6, 0 /* flush D tlb */
+ mov pc, lr
+
+ENTRY(armv4_tlb_flushD_SE)
+ mcr p15, 0, r0, c8, c6, 1 /* flush D tlb single entry */
+ mov pc, lr
+
+/*
+ * Other functions
+ */
+ENTRY(armv4_drain_writebuf)
+ mcr p15, 0, r0, c7, c10, 4 /* drain write buffer */
+ mov pc, lr
diff --git a/sys/arch/arm/arm/cpufunc_asm_sa1.S b/sys/arch/arm/arm/cpufunc_asm_sa1.S
new file mode 100644
index 00000000000..01db708e61c
--- /dev/null
+++ b/sys/arch/arm/arm/cpufunc_asm_sa1.S
@@ -0,0 +1,314 @@
+/* $OpenBSD: cpufunc_asm_sa1.S,v 1.1 2004/02/01 05:09:48 drahn Exp $ */
+/* $NetBSD: cpufunc_asm_sa1.S,v 1.8 2002/08/17 16:36:32 thorpej Exp $ */
+
+/*
+ * Copyright (c) 1997,1998 Mark Brinicombe.
+ * Copyright (c) 1997 Causality Limited
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by Causality Limited.
+ * 4. The name of Causality Limited may not be used to endorse or promote
+ * products derived from this software without specific prior written
+ * permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY CAUSALITY LIMITED ``AS IS'' AND ANY EXPRESS
+ * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL CAUSALITY LIMITED BE LIABLE FOR ANY DIRECT,
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * SA-1 assembly functions for CPU / MMU / TLB specific operations
+ */
+
+#include <machine/cpu.h>
+#include <machine/asm.h>
+
+.Lblock_userspace_access:
+ .word _C_LABEL(block_userspace_access)
+
+/*
+ * Functions to set the MMU Translation Table Base register
+ *
+ * We need to clean and flush the cache as it uses virtual
+ * addresses that are about to change.
+ */
+ENTRY(sa1_setttb)
+#ifdef CACHE_CLEAN_BLOCK_INTR
+ mrs r3, cpsr_all
+ orr r1, r3, #(I32_bit | F32_bit)
+ msr cpsr_all, r1
+#else
+ ldr r3, .Lblock_userspace_access
+ ldr r2, [r3]
+ orr r1, r2, #1
+ str r1, [r3]
+#endif
+ stmfd sp!, {r0-r3, lr}
+ bl _C_LABEL(sa1_cache_cleanID)
+ ldmfd sp!, {r0-r3, lr}
+ mcr p15, 0, r0, c7, c5, 0 /* invalidate I$ and BTB */
+ mcr p15, 0, r0, c7, c10, 4 /* drain write and fill buffer */
+
+ /* Write the TTB */
+ mcr p15, 0, r0, c2, c0, 0
+
+ /* If we have updated the TTB we must flush the TLB */
+ mcr p15, 0, r0, c8, c7, 0 /* invalidate I+D TLB */
+
+ /* The cleanID above means we only need to flush the I cache here */
+ mcr p15, 0, r0, c7, c5, 0 /* invalidate I$ and BTB */
+
+ /* Make sure that pipeline is emptied */
+ mov r0, r0
+ mov r0, r0
+#ifdef CACHE_CLEAN_BLOCK_INTR
+ msr cpsr_all, r3
+#else
+ str r2, [r3]
+#endif
+ mov pc, lr
+
+/*
+ * TLB functions
+ */
+ENTRY(sa1_tlb_flushID_SE)
+ mcr p15, 0, r0, c8, c6, 1 /* flush D tlb single entry */
+ mcr p15, 0, r0, c8, c5, 0 /* flush I tlb */
+ mov pc, lr
+
+/*
+ * Cache functions
+ */
+ENTRY(sa1_cache_flushID)
+ mcr p15, 0, r0, c7, c7, 0 /* flush I+D cache */
+ mov pc, lr
+
+ENTRY(sa1_cache_flushI)
+ mcr p15, 0, r0, c7, c5, 0 /* flush I cache */
+ mov pc, lr
+
+ENTRY(sa1_cache_flushD)
+ mcr p15, 0, r0, c7, c6, 0 /* flush D cache */
+ mov pc, lr
+
+ENTRY(sa1_cache_flushD_SE)
+ mcr p15, 0, r0, c7, c6, 1 /* flush D cache single entry */
+ mov pc, lr
+
+ENTRY(sa1_cache_cleanD_E)
+ mcr p15, 0, r0, c7, c10, 1 /* clean D cache entry */
+ mov pc, lr
+
+/*
+ * Information for the SA-1 cache clean/purge functions:
+ *
+ * * Virtual address of the memory region to use
+ * * Size of memory region
+ */
+ .data
+
+ .global _C_LABEL(sa1_cache_clean_addr)
+_C_LABEL(sa1_cache_clean_addr):
+ .word 0xf0000000
+
+ .global _C_LABEL(sa1_cache_clean_size)
+_C_LABEL(sa1_cache_clean_size):
+#if defined(CPU_SA1100) || defined(CPU_SA1110)
+ .word 0x00004000
+#else
+ .word 0x00008000
+#endif
+
+ .text
+
+.Lsa1_cache_clean_addr:
+ .word _C_LABEL(sa1_cache_clean_addr)
+.Lsa1_cache_clean_size:
+ .word _C_LABEL(sa1_cache_clean_size)
+
+#ifdef CACHE_CLEAN_BLOCK_INTR
+#define SA1_CACHE_CLEAN_BLOCK \
+ mrs r3, cpsr_all ; \
+ orr r0, r3, #(I32_bit | F32_bit) ; \
+ msr cpsr_all, r0
+
+#define SA1_CACHE_CLEAN_UNBLOCK \
+ msr cpsr_all, r3
+#else
+#define SA1_CACHE_CLEAN_BLOCK \
+ ldr r3, .Lblock_userspace_access ; \
+ ldr ip, [r3] ; \
+ orr r0, ip, #1 ; \
+ str r0, [r3]
+
+#define SA1_CACHE_CLEAN_UNBLOCK \
+ str ip, [r3]
+#endif /* CACHE_CLEAN_BLOCK_INTR */
+
+#ifdef DOUBLE_CACHE_CLEAN_BANK
+#define SA1_DOUBLE_CACHE_CLEAN_BANK \
+ eor r0, r0, r1 ; \
+ str r0, [r2]
+#else
+#define SA1_DOUBLE_CACHE_CLEAN_BANK /* nothing */
+#endif /* DOUBLE_CACHE_CLEAN_BANK */
+
+#define SA1_CACHE_CLEAN_PROLOGUE \
+ SA1_CACHE_CLEAN_BLOCK ; \
+ ldr r2, .Lsa1_cache_clean_addr ; \
+ ldmia r2, {r0, r1} ; \
+ SA1_DOUBLE_CACHE_CLEAN_BANK
+
+#define SA1_CACHE_CLEAN_EPILOGUE \
+ SA1_CACHE_CLEAN_UNBLOCK
+
+ENTRY_NP(sa1_cache_syncI)
+ENTRY_NP(sa1_cache_purgeID)
+ mcr p15, 0, r0, c7, c5, 0 /* flush I cache (D cleaned below) */
+ENTRY_NP(sa1_cache_cleanID)
+ENTRY_NP(sa1_cache_purgeD)
+ENTRY(sa1_cache_cleanD)
+ SA1_CACHE_CLEAN_PROLOGUE
+
+1: ldr r2, [r0], #32
+ subs r1, r1, #32
+ bne 1b
+
+ mcr p15, 0, r0, c7, c10, 4 /* drain write buffer */
+
+ SA1_CACHE_CLEAN_EPILOGUE
+ mov pc, lr
+
+ENTRY(sa1_cache_purgeID_E)
+ mcr p15, 0, r0, c7, c10, 1 /* clean dcache entry */
+ mcr p15, 0, r0, c7, c10, 4 /* drain write buffer */
+ mcr p15, 0, r0, c7, c5, 0 /* flush I cache */
+ mcr p15, 0, r0, c7, c6, 1 /* flush D cache single entry */
+ mov pc, lr
+
+ENTRY(sa1_cache_purgeD_E)
+ mcr p15, 0, r0, c7, c10, 1 /* clean dcache entry */
+ mcr p15, 0, r0, c7, c10, 4 /* drain write buffer */
+ mcr p15, 0, r0, c7, c6, 1 /* flush D cache single entry */
+ mov pc, lr
+
+/*
+ * Soft functions
+ */
+/* sa1_cache_syncI is identical to sa1_cache_purgeID */
+
+ENTRY(sa1_cache_cleanID_rng)
+ENTRY(sa1_cache_cleanD_rng)
+ cmp r1, #0x4000
+ bcs _C_LABEL(sa1_cache_cleanID)
+
+ and r2, r0, #0x1f
+ add r1, r1, r2
+ bic r0, r0, #0x1f
+
+1: mcr p15, 0, r0, c7, c10, 1 /* clean D cache entry */
+ add r0, r0, #32
+ subs r1, r1, #32
+ bhi 1b
+
+ mcr p15, 0, r0, c7, c10, 4 /* drain write buffer */
+ mov pc, lr
+
+ENTRY(sa1_cache_purgeID_rng)
+ cmp r1, #0x4000
+ bcs _C_LABEL(sa1_cache_purgeID)
+
+ and r2, r0, #0x1f
+ add r1, r1, r2
+ bic r0, r0, #0x1f
+
+1: mcr p15, 0, r0, c7, c10, 1 /* clean D cache entry */
+ mcr p15, 0, r0, c7, c6, 1 /* flush D cache single entry */
+ add r0, r0, #32
+ subs r1, r1, #32
+ bhi 1b
+
+ mcr p15, 0, r0, c7, c10, 4 /* drain write buffer */
+ mcr p15, 0, r0, c7, c5, 0 /* flush I cache */
+ mov pc, lr
+
+ENTRY(sa1_cache_purgeD_rng)
+ cmp r1, #0x4000
+ bcs _C_LABEL(sa1_cache_purgeD)
+
+ and r2, r0, #0x1f
+ add r1, r1, r2
+ bic r0, r0, #0x1f
+
+1: mcr p15, 0, r0, c7, c10, 1 /* clean D cache entry */
+ mcr p15, 0, r0, c7, c6, 1 /* flush D cache single entry */
+ add r0, r0, #32
+ subs r1, r1, #32
+ bhi 1b
+
+ mcr p15, 0, r0, c7, c10, 4 /* drain write buffer */
+ mov pc, lr
+
+ENTRY(sa1_cache_syncI_rng)
+ cmp r1, #0x4000
+ bcs _C_LABEL(sa1_cache_syncI)
+
+ and r2, r0, #0x1f
+ add r1, r1, r2
+ bic r0, r0, #0x1f
+
+1: mcr p15, 0, r0, c7, c10, 1 /* clean D cache entry */
+ add r0, r0, #32
+ subs r1, r1, #32
+ bhi 1b
+
+ mcr p15, 0, r0, c7, c10, 4 /* drain write buffer */
+ mcr p15, 0, r0, c7, c5, 0 /* flush I cache */
+
+ mov pc, lr
+
+/*
+ * Context switch.
+ *
+ * These is the CPU-specific parts of the context switcher cpu_switch()
+ * These functions actually perform the TTB reload.
+ *
+ * NOTE: Special calling convention
+ * r1, r4-r13 must be preserved
+ */
+#if defined(CPU_SA110)
+ENTRY(sa110_context_switch)
+ /*
+ * CF_CACHE_PURGE_ID will *ALWAYS* be called prior to this.
+ * Thus the data cache will contain only kernel data and the
+ * instruction cache will contain only kernel code, and all
+ * kernel mappings are shared by all processes.
+ */
+
+ /* Write the TTB */
+ mcr p15, 0, r0, c2, c0, 0
+
+ /* If we have updated the TTB we must flush the TLB */
+ mcr p15, 0, r0, c8, c7, 0 /* flush the I+D tlb */
+
+ /* Make sure that pipeline is emptied */
+ mov r0, r0
+ mov r0, r0
+ mov pc, lr
+#endif
diff --git a/sys/arch/arm/arm/cpuswitch.S b/sys/arch/arm/arm/cpuswitch.S
new file mode 100644
index 00000000000..743540537d4
--- /dev/null
+++ b/sys/arch/arm/arm/cpuswitch.S
@@ -0,0 +1,1172 @@
+/* $OpenBSD: cpuswitch.S,v 1.1 2004/02/01 05:09:48 drahn Exp $ */
+/* $NetBSD: cpuswitch.S,v 1.41 2003/11/15 08:44:18 scw Exp $ */$
+
+/*
+ * Copyright 2003 Wasabi Systems, Inc.
+ * All rights reserved.
+ *
+ * Written by Steve C. Woodford for Wasabi Systems, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed for the NetBSD Project by
+ * Wasabi Systems, Inc.
+ * 4. The name of Wasabi Systems, Inc. may not be used to endorse
+ * or promote products derived from this software without specific prior
+ * written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+/*
+ * Copyright (c) 1994-1998 Mark Brinicombe.
+ * Copyright (c) 1994 Brini.
+ * All rights reserved.
+ *
+ * This code is derived from software written for Brini by Mark Brinicombe
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by Brini.
+ * 4. The name of the company nor the name of the author may be used to
+ * endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY BRINI ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL BRINI OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * RiscBSD kernel project
+ *
+ * cpuswitch.S
+ *
+ * cpu switching functions
+ *
+ * Created : 15/10/94
+ */
+
+#include "assym.h"
+#include <machine/cpu.h>
+#include <machine/frame.h>
+#include <machine/asm.h>
+
+/* LINTSTUB: include <sys/param.h> */
+
+#undef IRQdisable
+#undef IRQenable
+
+/*
+ * New experimental definitions of IRQdisable and IRQenable
+ * These keep FIQ's enabled since FIQ's are special.
+ */
+
+#define IRQdisable \
+ mrs r14, cpsr ; \
+ orr r14, r14, #(I32_bit) ; \
+ msr cpsr_c, r14 ; \
+
+#define IRQenable \
+ mrs r14, cpsr ; \
+ bic r14, r14, #(I32_bit) ; \
+ msr cpsr_c, r14 ; \
+
+/*
+ * These are used for switching the translation table/DACR.
+ * Since the vector page can be invalid for a short time, we must
+ * disable both regular IRQs *and* FIQs.
+ *
+ * XXX: This is not necessary if the vector table is relocated.
+ */
+#define IRQdisableALL \
+ mrs r14, cpsr ; \
+ orr r14, r14, #(I32_bit | F32_bit) ; \
+ msr cpsr_c, r14
+
+#define IRQenableALL \
+ mrs r14, cpsr ; \
+ bic r14, r14, #(I32_bit | F32_bit) ; \
+ msr cpsr_c, r14
+
+ .text
+
+.Lwhichqs:
+ .word _C_LABEL(whichqs)
+
+.Lqs:
+ .word _C_LABEL(qs)
+
+/*
+ * On entry
+ * r0 = process
+ */
+
+ENTRY(setrunqueue)
+ /*
+ * Local register usage
+ * r0 = process
+ * r1 = queue
+ * r2 = &qs[queue] and temp
+ * r3 = temp
+ * r12 = whichqs
+ */
+#ifdef DIAGNOSTIC
+ ldr r1, [r0, #(P_BACK)]
+ teq r1, #0x00000000
+ bne Lsetrunqueue_erg
+
+ ldr r1, [r0, #(P_WCHAN)]
+ teq r1, #0x00000000
+ bne Lsetrunqueue_erg
+#endif
+
+ /* Get the priority of the queue */
+ ldrb r1, [r0, #(P_PRIORITY)]
+ mov r1, r1, lsr #2
+
+ /* Indicate that there is a process on this queue */
+ ldr r12, .Lwhichqs
+ ldr r2, [r12]
+ mov r3, #0x00000001
+ mov r3, r3, lsl r1
+ orr r2, r2, r3
+ str r2, [r12]
+
+ /* Get the address of the queue */
+ ldr r2, .Lqs
+ add r1, r2, r1, lsl # 3
+
+ /* Hook the process in */
+ str r1, [r0, #(P_FORW)]
+ ldr r2, [r1, #(P_BACK)]
+
+ str r0, [r1, #(P_BACK)]
+#ifdef DIAGNOSTIC
+ teq r2, #0x00000000
+ beq Lsetrunqueue_erg
+#endif
+ str r0, [r2, #(P_FORW)]
+ str r2, [r0, #(P_BACK)]
+
+ mov pc, lr
+
+#ifdef DIAGNOSTIC
+Lsetrunqueue_erg:
+ mov r2, r1
+ mov r1, r0
+ add r0, pc, #Ltext1 - . - 8
+ bl _C_LABEL(printf)
+
+ ldr r2, .Lqs
+ ldr r1, [r2]
+ add r0, pc, #Ltext2 - . - 8
+ b _C_LABEL(panic)
+
+Ltext1:
+ .asciz "setrunqueue : %08x %08x\n"
+Ltext2:
+ .asciz "setrunqueue : [qs]=%08x qs=%08x\n"
+ .align 0
+#endif
+
+/*
+ * On entry
+ * r0 = process
+ */
+
+ENTRY(remrunqueue)
+ /*
+ * Local register usage
+ * r0 = oldproc
+ * r1 = queue
+ * r2 = &qs[queue] and scratch
+ * r3 = scratch
+ * r12 = whichqs
+ */
+
+ /* Get the priority of the queue */
+ ldrb r1, [r0, #(P_PRIORITY)]
+ mov r1, r1, lsr #2
+
+ /* Unhook the process */
+ ldr r2, [r0, #(P_FORW)]
+ ldr r3, [r0, #(P_BACK)]
+
+ str r3, [r2, #(P_BACK)]
+ str r2, [r3, #(P_FORW)]
+
+ /* If the queue is now empty clear the queue not empty flag */
+ teq r2, r3
+
+ /* This could be reworked to avoid the use of r4 */
+ ldreq r12, .Lwhichqs
+ ldreq r2, [r12]
+ moveq r3, #0x00000001
+ moveq r3, r3, lsl r1
+ biceq r2, r2, r3
+ streq r2, [r12]
+
+ /* Remove the back pointer for the process */
+ mov r1, #0x00000000
+ str r1, [r0, #(P_BACK)]
+
+ mov pc, lr
+
+
+/*
+ * cpuswitch()
+ *
+ * preforms a process context switch.
+ * This function has several entry points
+ */
+
+#ifdef MULTIPROCESSOR
+.Lcpu_info_store:
+ .word _C_LABEL(cpu_info_store)
+.Lcurproc:
+ /* FIXME: This is bogus in the general case. */
+ .word _C_LABEL(cpu_info_store) + CI_CURLWP
+
+.Lcurpcb:
+ .word _C_LABEL(cpu_info_store) + CI_CURPCB
+#else
+.Lcurproc:
+ .word _C_LABEL(curproc)
+
+.Lcurpcb:
+ .word _C_LABEL(curpcb)
+#endif
+
+.Lwant_resched:
+ .word _C_LABEL(want_resched)
+
+.Lcpufuncs:
+ .word _C_LABEL(cpufuncs)
+
+#ifndef MULTIPROCESSOR
+ .data
+ .global _C_LABEL(curpcb)
+_C_LABEL(curpcb):
+ .word 0x00000000
+ .text
+#endif
+
+.Lblock_userspace_access:
+ .word _C_LABEL(block_userspace_access)
+
+.Lcpu_do_powersave:
+ .word _C_LABEL(cpu_do_powersave)
+
+.Lpmap_kernel_cstate:
+ .word (kernel_pmap_store + PMAP_CSTATE)
+
+.Llast_cache_state_ptr:
+ .word _C_LABEL(pmap_cache_state)
+
+/*
+ * Idle loop, exercised while waiting for a process to wake up.
+ *
+ * NOTE: When we jump back to .Lswitch_search, we must have a
+ * pointer to whichqs in r7, which is what it is when we arrive
+ * here.
+ */
+/* LINTSTUB: Ignore */
+ASENTRY_NP(idle)
+ ldr r6, .Lcpu_do_powersave
+ IRQenable /* Enable interrupts */
+ ldr r6, [r6] /* r6 = cpu_do_powersave */
+
+#if defined(MULTIPROCESSOR) || defined(LOCKDEBUG)
+ bl _C_LABEL(sched_unlock_idle)
+#endif
+
+ /* Drop to spl0 (returns the current spl level in r0). */
+#ifdef __NEWINTR
+ mov r0, #(IPL_NONE)
+ bl _C_LABEL(_spllower)
+#else /* ! __NEWINTR */
+ mov r0, #(_SPL_0)
+ bl _C_LABEL(splx)
+#endif /* __NEWINTR */
+
+ teq r6, #0 /* cpu_do_powersave non zero? */
+ ldrne r6, .Lcpufuncs
+ mov r4, r0 /* Old interrupt level to r4 */
+ ldrne r6, [r6, #(CF_SLEEP)]
+
+ /*
+ * Main idle loop.
+ * r6 points to power-save idle function if required, else NULL.
+ */
+1: ldr r3, [r7] /* r3 = sched_whichqs */
+ teq r3, #0
+ bne 2f /* We have work to do */
+ teq r6, #0 /* Powersave idle? */
+ beq 1b /* Nope. Just sit-n-spin. */
+
+ /*
+ * Before going into powersave idle mode, disable interrupts
+ * and check sched_whichqs one more time.
+ */
+ IRQdisableALL
+ ldr r3, [r7]
+ mov r0, #0
+ teq r3, #0 /* sched_whichqs still zero? */
+ moveq lr, pc
+ moveq pc, r6 /* If so, do powersave idle */
+ IRQenableALL
+ b 1b /* Back around */
+
+ /*
+ * sched_whichqs indicates that at least one proc is ready to run.
+ * Restore the original interrupt priority level, grab the
+ * scheduler lock if necessary, and jump back into cpu_switch.
+ */
+2: mov r0, r4
+#if defined(MULTIPROCESSOR) || defined(LOCKDEBUG)
+ bl _C_LABEL(splx)
+ adr lr, .Lswitch_search
+ b _C_LABEL(sched_lock_idle)
+#else
+ adr lr, .Lswitch_search
+ b _C_LABEL(splx)
+#endif
+
+
+/*
+ * Find a new lwp to run, save the current context and
+ * load the new context
+ *
+ * Arguments:
+ * r0 'struct proc *' of the current LWP
+ */
+
+ENTRY(cpu_switch)
+/*
+ * Local register usage. Some of these registers are out of date.
+ * r1 = oldproc
+ * r2 = spl level
+ * r3 = whichqs
+ * r4 = queue
+ * r5 = &qs[queue]
+ * r6 = newlwp
+ * r7 = scratch
+ */
+ stmfd sp!, {r4-r7, lr}
+
+ /*
+ * Indicate that there is no longer a valid process (curlwp = 0).
+ * Zero the current PCB pointer while we're at it.
+ */
+ ldr r7, .Lcurproc
+ ldr r6, .Lcurpcb
+ mov r2, #0x00000000
+ str r2, [r7] /* curproc = NULL */
+ str r2, [r6] /* curpcb = NULL */
+
+ /* stash the old proc while we call functions */
+ mov r5, r0
+
+ /* First phase : find a new proc */
+ ldr r7, .Lwhichqs
+
+ /* rem: r5 = old proc */
+ /* rem: r7 = &whichqs */
+
+.Lswitch_search:
+ IRQdisable
+
+ /* Do we have any active queues */
+ ldr r3, [r7]
+
+ /* If not we must idle until we do. */
+ teq r3, #0x00000000
+ beq _ASM_LABEL(idle)
+
+ /* put old proc back in r1 */
+ mov r1, r5
+
+ /* rem: r1 = old proc */
+ /* rem: r3 = whichqs */
+ /* rem: interrupts are disabled */
+
+ /* used further down, saves SA stall */
+ ldr r6, .Lqs
+
+ /*
+ * We have found an active queue. Currently we do not know which queue
+ * is active just that one of them is.
+ */
+ /* Non-Xscale version of the ffs algorithm devised by d.seal and
+ * posted to comp.sys.arm on 16 Feb 1994.
+ */
+ rsb r5, r3, #0
+ ands r0, r3, r5
+
+#ifndef __XSCALE__
+ adr r5, .Lcpu_switch_ffs_table
+
+ /* X = R0 */
+ orr r4, r0, r0, lsl #4 /* r4 = X * 0x11 */
+ orr r4, r4, r4, lsl #6 /* r4 = X * 0x451 */
+ rsb r4, r4, r4, lsl #16 /* r4 = X * 0x0450fbaf */
+
+ /* now lookup in table indexed on top 6 bits of a4 */
+ ldrb r4, [ r5, r4, lsr #26 ]
+
+#else /* __XSCALE__ */
+ clz r4, r0
+ rsb r4, r4, #31
+#endif /* __XSCALE__ */
+
+ /* rem: r0 = bit mask of chosen queue (1 << r4) */
+ /* rem: r1 = old proc */
+ /* rem: r3 = whichqs */
+ /* rem: r4 = queue number */
+ /* rem: interrupts are disabled */
+
+ /* Get the address of the queue (&qs[queue]) */
+ add r5, r6, r4, lsl #3
+
+ /*
+ * Get the proc from the queue and place the next process in
+ * the queue at the head. This basically unlinks the lwp at
+ * the head of the queue.
+ */
+ ldr r6, [r5, #(P_FORW)]
+
+#ifdef DIAGNOSTIC
+ cmp r6, r5
+ beq .Lswitch_bogons
+#endif
+
+ /* rem: r6 = new proc */
+ ldr r7, [r6, #(P_FORW)]
+ str r7, [r5, #(P_FORW)]
+
+ /*
+ * Test to see if the queue is now empty. If the head of the queue
+ * points to the queue itself then there are no more procs in
+ * the queue. We can therefore clear the queue not empty flag held
+ * in r3.
+ */
+
+ teq r5, r7
+ biceq r3, r3, r0
+
+ /* rem: r0 = bit mask of chosen queue (1 << r4) - NOT NEEDED AN MORE */
+
+ /* Fix the back pointer for the lwp now at the head of the queue. */
+ ldr r0, [r6, #(P_BACK)]
+ str r0, [r7, #(P_BACK)]
+
+ /* Update the RAM copy of the queue not empty flags word. */
+ ldreq r7, .Lwhichqs
+ streq r3, [r7]
+
+ /* rem: r1 = old proc */
+ /* rem: r3 = whichqs - NOT NEEDED ANY MORE */
+ /* rem: r4 = queue number - NOT NEEDED ANY MORE */
+ /* rem: r6 = new proc */
+ /* rem: interrupts are disabled */
+
+ /* Clear the want_resched flag */
+ ldr r7, .Lwant_resched
+ mov r0, #0x00000000
+ str r0, [r7]
+
+ /*
+ * Clear the back pointer of the proc we have removed from
+ * the head of the queue. The new proc is isolated now.
+ */
+ str r0, [r6, #(P_BACK)]
+
+#if defined(MULTIPROCESSOR) || defined(LOCKDEBUG)
+ /*
+ * unlock the sched_lock, but leave interrupts off, for now.
+ */
+ mov r7, r1
+ bl _C_LABEL(sched_unlock_idle)
+ mov r1, r7
+#endif
+
+
+.Lswitch_resume:
+ /* rem: r1 = old proc */
+ /* rem: r4 = return value [not used if came from cpu_switchto()] */
+ /* rem: r6 = new process */
+ /* rem: interrupts are disabled */
+
+#ifdef MULTIPROCESSOR
+ /* XXX use curcpu() */
+ ldr r0, .Lcpu_info_store
+ str r0, [r6, #(P_CPU)]
+#else
+ /* l->l_cpu initialized in fork1() for single-processor */
+#endif
+
+#if 0
+ /* Process is now on a processor. */
+ mov r0, #LSONPROC /* l->l_stat = LSONPROC */
+ str r0, [r6, #(P_STAT)]
+#endif
+
+ /* We have a new curproc now so make a note it */
+ ldr r7, .Lcurproc
+ str r6, [r7]
+
+ /* Hook in a new pcb */
+ ldr r7, .Lcurpcb
+ ldr r0, [r6, #(P_ADDR)]
+ str r0, [r7]
+
+ /* At this point we can allow IRQ's again. */
+ IRQenable
+
+ /* rem: r1 = old proc */
+ /* rem: r4 = return value */
+ /* rem: r6 = new process */
+ /* rem: interrupts are enabled */
+
+ /*
+ * If the new process is the same as the process that called
+ * cpu_switch() then we do not need to save and restore any
+ * contexts. This means we can make a quick exit.
+ * The test is simple if curproc on entry (now in r1) is the
+ * same as the proc removed from the queue we can jump to the exit.
+ */
+ teq r1, r6
+ moveq r4, #0x00000000 /* default to "didn't switch" */
+ beq .Lswitch_return
+
+ /*
+ * At this point, we are guaranteed to be switching to
+ * a new proc.
+ */
+ mov r4, #0x00000001
+
+ /* Remember the old proc in r0 */
+ mov r0, r1
+
+ /*
+ * If the old proc on entry to cpu_switch was zero then the
+ * process that called it was exiting. This means that we do
+ * not need to save the current context. Instead we can jump
+ * straight to restoring the context for the new process.
+ */
+ teq r0, #0x00000000
+ beq .Lswitch_exited
+
+ /* rem: r0 = old proc */
+ /* rem: r4 = return value */
+ /* rem: r6 = new process */
+ /* rem: interrupts are enabled */
+
+ /* Stage two : Save old context */
+
+ /* Get the user structure for the old proc. */
+ ldr r1, [r0, #(P_ADDR)]
+
+ /* Save all the registers in the old proc's pcb */
+#ifndef __XSCALE__
+ add r7, r1, #(PCB_R8)
+ stmia r7, {r8-r13}
+#else
+ strd r8, [r1, #(PCB_R8)]
+ strd r10, [r1, #(PCB_R10)]
+ strd r12, [r1, #(PCB_R12)]
+#endif
+
+ /*
+ * NOTE: We can now use r8-r13 until it is time to restore
+ * them for the new process.
+ */
+
+ /* Remember the old PCB. */
+ mov r8, r1
+
+ /* r1 now free! */
+
+ /* Get the user structure for the new process in r9 */
+ ldr r9, [r6, #(P_ADDR)]
+
+ /*
+ * This can be optimised... We know we want to go from SVC32
+ * mode to UND32 mode
+ */
+ mrs r3, cpsr
+ bic r2, r3, #(PSR_MODE)
+ orr r2, r2, #(PSR_UND32_MODE | I32_bit)
+ msr cpsr_c, r2
+
+ str sp, [r8, #(PCB_UND_SP)]
+
+ msr cpsr_c, r3 /* Restore the old mode */
+
+ /* rem: r0 = old proc */
+ /* rem: r4 = return value */
+ /* rem: r6 = new process */
+ /* rem: r8 = old PCB */
+ /* rem: r9 = new PCB */
+ /* rem: interrupts are enabled */
+
+ /* What else needs to be saved Only FPA stuff when that is supported */
+
+ /* Third phase : restore saved context */
+
+ /* rem: r0 = old proc */
+ /* rem: r4 = return value */
+ /* rem: r6 = new proc */
+ /* rem: r8 = old PCB */
+ /* rem: r9 = new PCB */
+ /* rem: interrupts are enabled */
+
+ /*
+ * Get the new L1 table pointer into r11. If we're switching to
+ * an LWP with the same address space as the outgoing one, we can
+ * skip the cache purge and the TTB load.
+ *
+ * To avoid data dep stalls that would happen anyway, we try
+ * and get some useful work done in the mean time.
+ */
+ ldr r10, [r8, #(PCB_PAGEDIR)] /* r10 = old L1 */
+ ldr r11, [r9, #(PCB_PAGEDIR)] /* r11 = new L1 */
+
+ ldr r0, [r8, #(PCB_DACR)] /* r0 = old DACR */
+ ldr r1, [r9, #(PCB_DACR)] /* r1 = new DACR */
+ ldr r8, [r9, #(PCB_CSTATE)] /* r8 = &new_pmap->pm_cstate */
+ ldr r5, .Llast_cache_state_ptr /* Previous thread's cstate */
+
+ teq r10, r11 /* Same L1? */
+ ldr r5, [r5]
+ cmpeq r0, r1 /* Same DACR? */
+ beq .Lcs_context_switched /* yes! */
+
+ ldr r3, .Lblock_userspace_access
+ mov r12, #0
+ cmp r5, #0 /* No last vm? (switch_exit) */
+ beq .Lcs_cache_purge_skipped /* No, we can skip cache flsh */
+
+ mov r2, #DOMAIN_CLIENT
+ cmp r1, r2, lsl #(PMAP_DOMAIN_KERNEL * 2) /* Sw to kernel thread? */
+ beq .Lcs_cache_purge_skipped /* Yup. Don't flush cache */
+
+ cmp r5, r8 /* Same userland VM space? */
+ ldrneb r12, [r5, #(CS_CACHE_ID)] /* Last VM space cache state */
+
+ /*
+ * We're definately switching to a new userland VM space,
+ * and the previous userland VM space has yet to be flushed
+ * from the cache/tlb.
+ *
+ * r12 holds the previous VM space's cs_cache_id state
+ */
+ tst r12, #0xff /* Test cs_cache_id */
+ beq .Lcs_cache_purge_skipped /* VM space is not in cache */
+
+ /*
+ * Definately need to flush the cache.
+ * Mark the old VM space as NOT being resident in the cache.
+ */
+ mov r2, #0x00000000
+ strb r2, [r5, #(CS_CACHE_ID)]
+ strb r2, [r5, #(CS_CACHE_D)]
+
+ /*
+ * Don't allow user space access between the purge and the switch.
+ */
+ mov r2, #0x00000001
+ str r2, [r3]
+
+ stmfd sp!, {r0-r3}
+ ldr r1, .Lcpufuncs
+ mov lr, pc
+ ldr pc, [r1, #CF_IDCACHE_WBINV_ALL]
+ ldmfd sp!, {r0-r3}
+
+.Lcs_cache_purge_skipped:
+ /* rem: r1 = new DACR */
+ /* rem: r3 = &block_userspace_access */
+ /* rem: r4 = return value */
+ /* rem: r5 = &old_pmap->pm_cstate (or NULL) */
+ /* rem: r6 = new proc */
+ /* rem: r8 = &new_pmap->pm_cstate */
+ /* rem: r9 = new PCB */
+ /* rem: r10 = old L1 */
+ /* rem: r11 = new L1 */
+
+ mov r2, #0x00000000
+ ldr r7, [r9, #(PCB_PL1VEC)]
+
+ /*
+ * At this point we need to kill IRQ's again.
+ *
+ * XXXSCW: Don't need to block FIQs if vectors have been relocated
+ */
+ IRQdisableALL
+
+ /*
+ * Interrupts are disabled so we can allow user space accesses again
+ * as none will occur until interrupts are re-enabled after the
+ * switch.
+ */
+ str r2, [r3]
+
+ /*
+ * Ensure the vector table is accessible by fixing up the L1
+ */
+ cmp r7, #0 /* No need to fixup vector table? */
+ ldrne r2, [r7] /* But if yes, fetch current value */
+ ldrne r0, [r9, #(PCB_L1VEC)] /* Fetch new vector_page value */
+ mcr p15, 0, r1, c3, c0, 0 /* Update DACR for new context */
+ cmpne r2, r0 /* Stuffing the same value? */
+#ifndef PMAP_INCLUDE_PTE_SYNC
+ strne r0, [r7] /* Nope, update it */
+#else
+ beq .Lcs_same_vector
+ str r0, [r7] /* Otherwise, update it */
+
+ /*
+ * Need to sync the cache to make sure that last store is
+ * visible to the MMU.
+ */
+ ldr r2, .Lcpufuncs
+ mov r0, r7
+ mov r1, #4
+ mov lr, pc
+ ldr pc, [r2, #CF_DCACHE_WB_RANGE]
+
+.Lcs_same_vector:
+#endif /* PMAP_INCLUDE_PTE_SYNC */
+
+ cmp r10, r11 /* Switching to the same L1? */
+ ldr r10, .Lcpufuncs
+ beq .Lcs_same_l1 /* Yup. */
+
+ /*
+ * Do a full context switch, including full TLB flush.
+ */
+ mov r0, r11
+ mov lr, pc
+ ldr pc, [r10, #CF_CONTEXT_SWITCH]
+
+ /*
+ * Mark the old VM space as NOT being resident in the TLB
+ */
+ mov r2, #0x00000000
+ cmp r5, #0
+ strneh r2, [r5, #(CS_TLB_ID)]
+ b .Lcs_context_switched
+
+ /*
+ * We're switching to a different process in the same L1.
+ * In this situation, we only need to flush the TLB for the
+ * vector_page mapping, and even then only if r7 is non-NULL.
+ */
+.Lcs_same_l1:
+ cmp r7, #0
+ movne r0, #0 /* We *know* vector_page's VA is 0x0 */
+ movne lr, pc
+ ldrne pc, [r10, #CF_TLB_FLUSHID_SE]
+
+.Lcs_context_switched:
+ /* rem: r8 = &new_pmap->pm_cstate */
+
+ /* XXXSCW: Safe to re-enable FIQs here */
+
+ /*
+ * The new VM space is live in the cache and TLB.
+ * Update its cache/tlb state, and if it's not the kernel
+ * pmap, update the 'last cache state' pointer.
+ */
+ mov r2, #-1
+ ldr r5, .Lpmap_kernel_cstate
+ ldr r0, .Llast_cache_state_ptr
+ str r2, [r8, #(CS_ALL)]
+ cmp r5, r8
+ strne r8, [r0]
+
+ /* rem: r4 = return value */
+ /* rem: r6 = new proc */
+ /* rem: r9 = new PCB */
+
+ /*
+ * This can be optimised... We know we want to go from SVC32
+ * mode to UND32 mode
+ */
+ mrs r3, cpsr
+ bic r2, r3, #(PSR_MODE)
+ orr r2, r2, #(PSR_UND32_MODE)
+ msr cpsr_c, r2
+
+ ldr sp, [r9, #(PCB_UND_SP)]
+
+ msr cpsr_c, r3 /* Restore the old mode */
+
+ /* Restore all the save registers */
+#ifndef __XSCALE__
+ add r7, r9, #PCB_R8
+ ldmia r7, {r8-r13}
+
+ sub r7, r7, #PCB_R8 /* restore PCB pointer */
+#else
+ mov r7, r9
+ ldr r8, [r7, #(PCB_R8)]
+ ldr r9, [r7, #(PCB_R9)]
+ ldr r10, [r7, #(PCB_R10)]
+ ldr r11, [r7, #(PCB_R11)]
+ ldr r12, [r7, #(PCB_R12)]
+ ldr r13, [r7, #(PCB_SP)]
+#endif
+
+#if 0
+ ldr r5, [r6, #(L_PROC)] /* fetch the proc for below */
+#else
+ mov r5, r6
+#endif
+
+ /* rem: r4 = return value */
+ /* rem: r5 = new proc's proc */
+ /* rem: r6 = new proc */
+ /* rem: r7 = new pcb */
+
+#ifdef ARMFPE
+ add r0, r7, #(USER_SIZE) & 0x00ff
+ add r0, r0, #(USER_SIZE) & 0xff00
+ bl _C_LABEL(arm_fpe_core_changecontext)
+#endif
+
+ /* We can enable interrupts again */
+ IRQenableALL
+
+ /* rem: r4 = return value */
+ /* rem: r5 = new proc's proc */
+ /* rem: r6 = new proc */
+ /* rem: r7 = new PCB */
+
+#if 0
+ /*
+ * Check for restartable atomic sequences (RAS).
+ */
+
+ ldr r2, [r5, #(P_RASLIST)]
+ ldr r1, [r7, #(PCB_TF)] /* r1 = trapframe (used below) */
+ teq r2, #0 /* p->p_nras == 0? */
+ bne .Lswitch_do_ras /* no, check for one */
+#endif
+
+.Lswitch_return:
+ /* cpu_switch returns 1 == switched, 0 == didn't switch */
+ mov r0, r4
+
+ /*
+ * Pull the registers that got pushed when either savectx() or
+ * cpu_switch() was called and return.
+ */
+ ldmfd sp!, {r4-r7, pc}
+
+#if 0
+.Lswitch_do_ras:
+ ldr r1, [r1, #(TF_PC)] /* second ras_lookup() arg */
+ mov r0, r5 /* first ras_lookup() arg */
+ bl _C_LABEL(ras_lookup)
+ cmn r0, #1 /* -1 means "not in a RAS" */
+ ldrne r1, [r7, #(PCB_TF)]
+ strne r0, [r1, #(TF_PC)]
+ b .Lswitch_return
+#endif
+
+.Lswitch_exited:
+ /*
+ * We skip the cache purge because switch_exit() already did it.
+ * Load up registers the way .Lcs_cache_purge_skipped expects.
+ * Userpsace access already blocked by switch_exit().
+ */
+ ldr r9, [r6, #(P_ADDR)] /* r9 = new PCB */
+ ldr r3, .Lblock_userspace_access
+ mrc p15, 0, r10, c2, c0, 0 /* r10 = old L1 */
+ mov r5, #0 /* No previous cache state */
+ ldr r1, [r9, #(PCB_DACR)] /* r1 = new DACR */
+ ldr r8, [r9, #(PCB_CSTATE)] /* r8 = new cache state */
+ ldr r11, [r9, #(PCB_PAGEDIR)] /* r11 = new L1 */
+ b .Lcs_cache_purge_skipped
+
+
+#ifdef DIAGNOSTIC
+.Lswitch_bogons:
+ adr r0, .Lswitch_panic_str
+ bl _C_LABEL(panic)
+1: nop
+ b 1b
+
+.Lswitch_panic_str:
+ .asciz "cpu_switch: sched_qs empty with non-zero sched_whichqs!\n"
+#endif
+
+/*
+ * cpu_switchto(struct proc *current, struct proc *next)
+ * Switch to the specified next LWP
+ * Arguments:
+ *
+ * r0 'struct proc *' of the current LWP
+ * r1 'struct proc *' of the LWP to switch to
+ */
+ENTRY(cpu_switchto)
+ stmfd sp!, {r4-r7, lr}
+
+ mov r6, r1 /* save new proc */
+
+#if defined(LOCKDEBUG)
+ mov r5, r0 /* save old proc */
+ bl _C_LABEL(sched_unlock_idle)
+ mov r1, r5
+#else
+ mov r1, r0
+#endif
+
+ IRQdisable
+
+ /*
+ * Okay, set up registers the way cpu_switch() wants them,
+ * and jump into the middle of it (where we bring up the
+ * new process).
+ *
+ * r1 = old proc (r6 = new proc)
+ */
+ b .Lswitch_resume
+
+/*
+ * void switch_exit(struct proc *l, struct proc *l0,
+ * void (*exit)(struct proc *));
+ * Switch to proc0's saved context and deallocate the address space and kernel
+ * stack for l. Then jump into cpu_switch(), as if we were in proc0 all along.
+ */
+
+/* LINTSTUB: Func: void switch_exit(struct proc *l, struct proc *l0,
+ void (*func)(struct proc *)) */
+ENTRY(switch_exit)
+ /*
+ * The process is going away, so we can use callee-saved
+ * registers here without having to save them.
+ */
+
+ mov r4, r0
+ ldr r0, .Lcurproc
+
+ mov r5, r1
+ ldr r1, .Lblock_userspace_access
+
+ mov r6, r2
+
+ /*
+ * r4 = proc
+ * r5 = proc0
+ * r6 = exit func
+ */
+
+ mov r2, #0x00000000 /* curproc = NULL */
+ str r2, [r0]
+
+ /*
+ * We're about to clear both the cache and the TLB.
+ * Make sure to zap the 'last cache state' pointer since the
+ * pmap might be about to go away. Also ensure the outgoing
+ * VM space's cache state is marked as NOT resident in the
+ * cache, and that proc0's cache state IS resident.
+ */
+ ldr r7, [r4, #(P_ADDR)] /* r7 = old proc's PCB */
+ ldr r0, .Llast_cache_state_ptr /* Last userland cache state */
+ ldr r9, [r7, #(PCB_CSTATE)] /* Fetch cache state pointer */
+ ldr r3, [r5, #(P_ADDR)] /* r3 = proc0's PCB */
+ str r2, [r0] /* No previous cache state */
+ str r2, [r9, #(CS_ALL)] /* Zap old proc's cache state */
+ ldr r3, [r3, #(PCB_CSTATE)] /* proc0's cache state */
+ mov r2, #-1
+ str r2, [r3, #(CS_ALL)] /* proc0 is in da cache! */
+
+ /*
+ * Don't allow user space access between the purge and the switch.
+ */
+ mov r2, #0x00000001
+ str r2, [r1]
+
+ /* Switch to proc0 context */
+
+ ldr r9, .Lcpufuncs
+ mov lr, pc
+ ldr pc, [r9, #CF_IDCACHE_WBINV_ALL]
+
+ ldr r0, [r7, #(PCB_PL1VEC)]
+ ldr r1, [r7, #(PCB_DACR)]
+
+ /*
+ * r0 = Pointer to L1 slot for vector_page (or NULL)
+ * r1 = proc0's DACR
+ * r4 = proc we're switching from
+ * r5 = proc0
+ * r6 = exit func
+ * r7 = proc0's PCB
+ * r9 = cpufuncs
+ */
+
+ IRQdisableALL
+
+ /*
+ * Ensure the vector table is accessible by fixing up proc0's L1
+ */
+ cmp r0, #0 /* No need to fixup vector table? */
+ ldrne r3, [r0] /* But if yes, fetch current value */
+ ldrne r2, [r7, #(PCB_L1VEC)] /* Fetch new vector_page value */
+ mcr p15, 0, r1, c3, c0, 0 /* Update DACR for proc0's context */
+ cmpne r3, r2 /* Stuffing the same value? */
+ strne r2, [r0] /* Store if not. */
+
+#ifdef PMAP_INCLUDE_PTE_SYNC
+ /*
+ * Need to sync the cache to make sure that last store is
+ * visible to the MMU.
+ */
+ movne r1, #4
+ movne lr, pc
+ ldrne pc, [r9, #CF_DCACHE_WB_RANGE]
+#endif /* PMAP_INCLUDE_PTE_SYNC */
+
+ /*
+ * Note: We don't do the same optimisation as cpu_switch() with
+ * respect to avoiding flushing the TLB if we're switching to
+ * the same L1 since this process' VM space may be about to go
+ * away, so we don't want *any* turds left in the TLB.
+ */
+
+ /* Switch the memory to the new process */
+ ldr r0, [r7, #(PCB_PAGEDIR)]
+ mov lr, pc
+ ldr pc, [r9, #CF_CONTEXT_SWITCH]
+
+ ldr r0, .Lcurpcb
+
+ /* Restore all the save registers */
+#ifndef __XSCALE__
+ add r1, r7, #PCB_R8
+ ldmia r1, {r8-r13}
+#else
+ ldr r8, [r7, #(PCB_R8)]
+ ldr r9, [r7, #(PCB_R9)]
+ ldr r10, [r7, #(PCB_R10)]
+ ldr r11, [r7, #(PCB_R11)]
+ ldr r12, [r7, #(PCB_R12)]
+ ldr r13, [r7, #(PCB_SP)]
+#endif
+ str r7, [r0] /* curpcb = proc0's PCB */
+
+ IRQenableALL
+
+ /*
+ * Schedule the vmspace and stack to be freed.
+ */
+ mov r0, r4 /* {proc_}exit2(l) */
+ mov lr, pc
+ mov pc, r6
+
+#if defined(MULTIPROCESSOR) || defined(LOCKDEBUG)
+ bl _C_LABEL(sched_lock_idle)
+#endif
+
+ ldr r7, .Lwhichqs /* r7 = &whichqs */
+ mov r5, #0x00000000 /* r5 = old proc = NULL */
+ b .Lswitch_search
+
+/* LINTSTUB: Func: void savectx(struct pcb *pcb) */
+ENTRY(savectx)
+ /*
+ * r0 = pcb
+ */
+
+ /* Push registers.*/
+ stmfd sp!, {r4-r7, lr}
+
+ /* Store all the registers in the process's pcb */
+#ifndef __XSCALE__
+ add r2, r0, #(PCB_R8)
+ stmia r2, {r8-r13}
+#else
+ strd r8, [r0, #(PCB_R8)]
+ strd r10, [r0, #(PCB_R10)]
+ strd r12, [r0, #(PCB_R12)]
+#endif
+
+ /* Pull the regs of the stack */
+ ldmfd sp!, {r4-r7, pc}
+
+ENTRY(proc_trampoline)
+#ifdef __NEWINTR
+ mov r0, #(IPL_NONE)
+ bl _C_LABEL(_spllower)
+#else /* ! __NEWINTR */
+ mov r0, #(_SPL_0)
+ bl _C_LABEL(splx)
+#endif /* __NEWINTR */
+
+#ifdef MULTIPROCESSOR
+ bl _C_LABEL(proc_trampoline_mp)
+#endif
+ mov r0, r5
+ mov r1, sp
+ mov lr, pc
+ mov pc, r4
+
+ /* Kill irq's */
+ mrs r0, cpsr
+ orr r0, r0, #(I32_bit)
+ msr cpsr_c, r0
+
+ PULLFRAME
+
+ movs pc, lr /* Exit */
+
+#ifndef __XSCALE__
+ .type .Lcpu_switch_ffs_table, _ASM_TYPE_OBJECT;
+.Lcpu_switch_ffs_table:
+/* same as ffs table but all nums are -1 from that */
+/* 0 1 2 3 4 5 6 7 */
+ .byte 0, 0, 1, 12, 2, 6, 0, 13 /* 0- 7 */
+ .byte 3, 0, 7, 0, 0, 0, 0, 14 /* 8-15 */
+ .byte 10, 4, 0, 0, 8, 0, 0, 25 /* 16-23 */
+ .byte 0, 0, 0, 0, 0, 21, 27, 15 /* 24-31 */
+ .byte 31, 11, 5, 0, 0, 0, 0, 0 /* 32-39 */
+ .byte 9, 0, 0, 24, 0, 0, 20, 26 /* 40-47 */
+ .byte 30, 0, 0, 0, 0, 23, 0, 19 /* 48-55 */
+ .byte 29, 0, 22, 18, 28, 17, 16, 0 /* 56-63 */
+#endif /* !__XSCALE_ */
diff --git a/sys/arch/arm/arm/db_disasm.c b/sys/arch/arm/arm/db_disasm.c
new file mode 100644
index 00000000000..071fab0d269
--- /dev/null
+++ b/sys/arch/arm/arm/db_disasm.c
@@ -0,0 +1,77 @@
+/* $OpenBSD: db_disasm.c,v 1.1 2004/02/01 05:09:48 drahn Exp $ */
+/* $NetBSD: db_disasm.c,v 1.4 2003/07/15 00:24:38 lukem Exp $ */
+
+/*
+ * Copyright (c) 1996 Mark Brinicombe.
+ * Copyright (c) 1996 Brini.
+ *
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by Brini.
+ * 4. The name of the company nor the name of the author may be used to
+ * endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY BRINI ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL BRINI OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/param.h>
+#include <machine/db_machdep.h>
+#include <ddb/db_interface.h>
+#include <ddb/db_sym.h>
+#include <ddb/db_output.h>
+#include <ddb/db_access.h>
+
+#include <arch/arm/arm/disassem.h>
+
+/* Glue code to interface db_disasm to the generic ARM disassembler */
+
+static db_expr_t db_disasm_read_word(db_expr_t);
+static void db_disasm_printaddr(db_expr_t);
+
+static const disasm_interface_t db_disasm_interface = {
+ db_disasm_read_word, db_disasm_printaddr, db_printf
+};
+
+static db_expr_t
+db_disasm_read_word(db_expr_t address)
+{
+
+ return db_get_value(address, 4, 0);
+}
+
+static void
+db_disasm_printaddr(db_expr_t address)
+{
+
+ db_printsym((db_addr_t)address, DB_STGY_ANY, db_printf);
+}
+
+vaddr_t
+db_disasm(vaddr_t loc, boolean_t altfmt)
+{
+
+ return disasm(&db_disasm_interface, loc, altfmt);
+}
+
+/* End of db_disasm.c */
diff --git a/sys/arch/arm/arm/db_interface.c b/sys/arch/arm/arm/db_interface.c
new file mode 100644
index 00000000000..3580ca72658
--- /dev/null
+++ b/sys/arch/arm/arm/db_interface.c
@@ -0,0 +1,476 @@
+/* $OpenBSD: db_interface.c,v 1.1 2004/02/01 05:09:48 drahn Exp $ */
+/*^I$NetBSD: db_interface.c,v 1.34 2003/10/26 23:11:15 chris Exp $^I*/$
+
+/*
+ * Copyright (c) 1996 Scott K. Stevens
+ *
+ * Mach Operating System
+ * Copyright (c) 1991,1990 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie the
+ * rights to redistribute these changes.
+ *
+ * From: db_interface.c,v 2.4 1991/02/05 17:11:13 mrt (CMU)
+ */
+
+/*
+ * Interface to new debugger.
+ */
+#include <sys/param.h>
+#include <sys/proc.h>
+#include <sys/reboot.h>
+#include <sys/systm.h> /* just for boothowto */
+#include <sys/exec.h>
+
+#include <uvm/uvm_extern.h>
+
+#include <arm/db_machdep.h>
+#include <arm/katelib.h>
+#include <arm/undefined.h>
+#include <ddb/db_access.h>
+#include <ddb/db_command.h>
+#include <ddb/db_output.h>
+#include <ddb/db_variables.h>
+#include <ddb/db_sym.h>
+#include <ddb/db_extern.h>
+#include <ddb/db_interface.h>
+#include <dev/cons.h>
+
+static long nil;
+
+int db_access_und_sp (struct db_variable *, db_expr_t *, int);
+int db_access_abt_sp (struct db_variable *, db_expr_t *, int);
+int db_access_irq_sp (struct db_variable *, db_expr_t *, int);
+u_int db_fetch_reg (int, db_regs_t *);
+
+int db_trapper (u_int, u_int, trapframe_t *, int);
+
+struct db_variable db_regs[] = {
+ { "spsr", (long *)&DDB_REGS->tf_spsr, FCN_NULL, },
+ { "r0", (long *)&DDB_REGS->tf_r0, FCN_NULL, },
+ { "r1", (long *)&DDB_REGS->tf_r1, FCN_NULL, },
+ { "r2", (long *)&DDB_REGS->tf_r2, FCN_NULL, },
+ { "r3", (long *)&DDB_REGS->tf_r3, FCN_NULL, },
+ { "r4", (long *)&DDB_REGS->tf_r4, FCN_NULL, },
+ { "r5", (long *)&DDB_REGS->tf_r5, FCN_NULL, },
+ { "r6", (long *)&DDB_REGS->tf_r6, FCN_NULL, },
+ { "r7", (long *)&DDB_REGS->tf_r7, FCN_NULL, },
+ { "r8", (long *)&DDB_REGS->tf_r8, FCN_NULL, },
+ { "r9", (long *)&DDB_REGS->tf_r9, FCN_NULL, },
+ { "r10", (long *)&DDB_REGS->tf_r10, FCN_NULL, },
+ { "r11", (long *)&DDB_REGS->tf_r11, FCN_NULL, },
+ { "r12", (long *)&DDB_REGS->tf_r12, FCN_NULL, },
+ { "usr_sp", (long *)&DDB_REGS->tf_usr_sp, FCN_NULL, },
+ { "usr_lr", (long *)&DDB_REGS->tf_usr_lr, FCN_NULL, },
+ { "svc_sp", (long *)&DDB_REGS->tf_svc_sp, FCN_NULL, },
+ { "svc_lr", (long *)&DDB_REGS->tf_svc_lr, FCN_NULL, },
+ { "pc", (long *)&DDB_REGS->tf_pc, FCN_NULL, },
+ { "und_sp", (long *)&nil, db_access_und_sp, },
+ { "abt_sp", (long *)&nil, db_access_abt_sp, },
+ { "irq_sp", (long *)&nil, db_access_irq_sp, },
+};
+
+extern label_t *db_recover;
+
+struct db_variable * db_eregs = db_regs + sizeof(db_regs)/sizeof(db_regs[0]);
+
+int db_active = 0;
+
+int
+db_access_und_sp(struct db_variable *vp, db_expr_t *valp, int rw)
+{
+
+ if (rw == DB_VAR_GET)
+ *valp = get_stackptr(PSR_UND32_MODE);
+ return(0);
+}
+
+int
+db_access_abt_sp(struct db_variable *vp, db_expr_t *valp, int rw)
+{
+
+ if (rw == DB_VAR_GET)
+ *valp = get_stackptr(PSR_ABT32_MODE);
+ return(0);
+}
+
+int
+db_access_irq_sp(struct db_variable *vp, db_expr_t *valp, int rw)
+{
+
+ if (rw == DB_VAR_GET)
+ *valp = get_stackptr(PSR_IRQ32_MODE);
+ return(0);
+}
+
+#ifdef DDB
+/*
+ * kdb_trap - field a TRACE or BPT trap
+ */
+int
+kdb_trap(int type, db_regs_t *regs)
+{
+ int s;
+
+ switch (type) {
+ case T_BREAKPOINT: /* breakpoint */
+ case -1: /* keyboard interrupt */
+ break;
+ default:
+ if (db_recover != 0) {
+ /* This will longjmp back into db_command_loop() */
+ db_error("Faulted in DDB; continuing...\n");
+ /*NOTREACHED*/
+ }
+ }
+
+ /* Should switch to kdb`s own stack here. */
+
+ ddb_regs = *regs;
+
+ s = splhigh();
+ db_active++;
+ cnpollc(TRUE);
+ db_trap(type, 0/*code*/);
+ cnpollc(FALSE);
+ db_active--;
+ splx(s);
+
+ *regs = ddb_regs;
+
+ return (1);
+}
+#endif
+
+
+static int db_validate_address(vaddr_t addr);
+
+static int
+db_validate_address(vaddr_t addr)
+{
+ struct proc *p = curproc;
+ struct pmap *pmap;
+
+ if (!p || !p->p_vmspace || !p->p_vmspace->vm_map.pmap ||
+#ifndef ARM32_NEW_VM_LAYOUT
+ addr >= VM_MAXUSER_ADDRESS
+#else
+ addr >= VM_MIN_KERNEL_ADDRESS
+#endif
+ )
+ pmap = pmap_kernel();
+ else
+ pmap = p->p_vmspace->vm_map.pmap;
+
+ return (pmap_extract(pmap, addr, NULL) == FALSE);
+}
+
+/*
+ * Read bytes from kernel address space for debugger.
+ */
+void
+db_read_bytes(addr, size, data)
+ vaddr_t addr;
+ size_t size;
+ char *data;
+{
+ char *src = (char *)addr;
+
+ if (db_validate_address((u_int)src)) {
+ db_printf("address %p is invalid\n", src);
+ return;
+ }
+
+ if (size == 4 && (addr & 3) == 0 && ((u_int32_t)data & 3) == 0) {
+ *((int*)data) = *((int*)src);
+ return;
+ }
+
+ if (size == 2 && (addr & 1) == 0 && ((u_int32_t)data & 1) == 0) {
+ *((short*)data) = *((short*)src);
+ return;
+ }
+
+ while (size-- > 0) {
+ if (db_validate_address((u_int)src)) {
+ db_printf("address %p is invalid\n", src);
+ return;
+ }
+ *data++ = *src++;
+ }
+}
+
+static void
+db_write_text(vaddr_t addr, size_t size, char *data)
+{
+ struct pmap *pmap = pmap_kernel();
+ pd_entry_t *pde, oldpde, tmppde;
+ pt_entry_t *pte, oldpte, tmppte;
+ vaddr_t pgva;
+ size_t limit, savesize;
+ char *dst;
+
+ /* XXX: gcc */
+ oldpte = 0;
+
+ if ((savesize = size) == 0)
+ return;
+
+ dst = (char *) addr;
+
+ do {
+ /* Get the PDE of the current VA. */
+ if (pmap_get_pde_pte(pmap, (vaddr_t) dst, &pde, &pte) == FALSE)
+ goto no_mapping;
+ switch ((oldpde = *pde) & L1_TYPE_MASK) {
+ case L1_TYPE_S:
+ pgva = (vaddr_t)dst & L1_S_FRAME;
+ limit = L1_S_SIZE - ((vaddr_t)dst & L1_S_OFFSET);
+
+ tmppde = oldpde | L1_S_PROT_W;
+ *pde = tmppde;
+ PTE_SYNC(pde);
+ break;
+
+ case L1_TYPE_C:
+ pgva = (vaddr_t)dst & L2_S_FRAME;
+ limit = L2_S_SIZE - ((vaddr_t)dst & L2_S_OFFSET);
+
+ if (pte == NULL)
+ goto no_mapping;
+ oldpte = *pte;
+ tmppte = oldpte | L2_S_PROT_W;
+ *pte = tmppte;
+ PTE_SYNC(pte);
+ break;
+
+ default:
+ no_mapping:
+ printf(" address 0x%08lx not a valid page\n",
+ (vaddr_t) dst);
+ return;
+ }
+ cpu_tlb_flushD_SE(pgva);
+ cpu_cpwait();
+
+ if (limit > size)
+ limit = size;
+ size -= limit;
+
+ /*
+ * Page is now writable. Do as much access as we
+ * can in this page.
+ */
+ for (; limit > 0; limit--)
+ *dst++ = *data++;
+
+ /*
+ * Restore old mapping permissions.
+ */
+ switch (oldpde & L1_TYPE_MASK) {
+ case L1_TYPE_S:
+ *pde = oldpde;
+ PTE_SYNC(pde);
+ break;
+
+ case L1_TYPE_C:
+ *pte = oldpte;
+ PTE_SYNC(pte);
+ break;
+ }
+ cpu_tlb_flushD_SE(pgva);
+ cpu_cpwait();
+
+ } while (size != 0);
+
+ /* Sync the I-cache. */
+ cpu_icache_sync_range(addr, savesize);
+}
+
+/*
+ * Write bytes to kernel address space for debugger.
+ */
+void
+db_write_bytes(vaddr_t addr, size_t size, char *data)
+{
+ extern char etext[];
+ extern char kernel_text[];
+ char *dst;
+ size_t loop;
+
+ /* If any part is in kernel text, use db_write_text() */
+ if (addr >= (vaddr_t) kernel_text && addr < (vaddr_t) etext) {
+ db_write_text(addr, size, data);
+ return;
+ }
+
+ dst = (char *)addr;
+ loop = size;
+ while (loop-- > 0) {
+ if (db_validate_address((u_int)dst)) {
+ db_printf("address %p is invalid\n", dst);
+ return;
+ }
+ *dst++ = *data++;
+ }
+ /* make sure the caches and memory are in sync */
+ cpu_icache_sync_range(addr, size);
+
+ /* In case the current page tables have been modified ... */
+ cpu_tlb_flushID();
+ cpu_cpwait();
+}
+
+void
+Debugger(void)
+{
+ asm(".word 0xe7ffffff");
+}
+
+const struct db_command db_machine_command_table[] = {
+ { "frame", db_show_frame_cmd, 0, NULL },
+ { "panic", db_show_panic_cmd, 0, NULL },
+#ifdef ARM32_DB_COMMANDS
+ ARM32_DB_COMMANDS,
+#endif
+ { NULL, NULL, 0, NULL }
+};
+
+int
+db_trapper(u_int addr, u_int inst, trapframe_t *frame, int fault_code)
+{
+
+ if (fault_code == 0) {
+ if ((inst & ~INSN_COND_MASK) == (BKPT_INST & ~INSN_COND_MASK)) {
+ kdb_trap(T_BREAKPOINT, frame);
+ frame->tf_pc += INSN_SIZE;
+ } else
+ kdb_trap(-1, frame);
+ } else
+ return (1);
+ return (0);
+}
+
+extern u_int esym;
+extern u_int end;
+
+static struct undefined_handler db_uh;
+
+void
+db_machine_init(void)
+{
+ /*
+ * We get called before malloc() is available, so supply a static
+ * struct undefined_handler.
+ */
+ db_uh.uh_handler = db_trapper;
+ install_coproc_handler_static(0, &db_uh);
+}
+
+u_int
+db_fetch_reg(int reg, db_regs_t *db_regs)
+{
+
+ switch (reg) {
+ case 0:
+ return (db_regs->tf_r0);
+ case 1:
+ return (db_regs->tf_r1);
+ case 2:
+ return (db_regs->tf_r2);
+ case 3:
+ return (db_regs->tf_r3);
+ case 4:
+ return (db_regs->tf_r4);
+ case 5:
+ return (db_regs->tf_r5);
+ case 6:
+ return (db_regs->tf_r6);
+ case 7:
+ return (db_regs->tf_r7);
+ case 8:
+ return (db_regs->tf_r8);
+ case 9:
+ return (db_regs->tf_r9);
+ case 10:
+ return (db_regs->tf_r10);
+ case 11:
+ return (db_regs->tf_r11);
+ case 12:
+ return (db_regs->tf_r12);
+ case 13:
+ return (db_regs->tf_svc_sp);
+ case 14:
+ return (db_regs->tf_svc_lr);
+ case 15:
+ return (db_regs->tf_pc);
+ default:
+ panic("db_fetch_reg: botch");
+ }
+}
+
+db_addr_t
+db_branch_taken(u_int insn, db_addr_t pc, db_regs_t *db_regs)
+{
+ u_int addr, nregs;
+
+ switch ((insn >> 24) & 0xf) {
+ case 0xa: /* b ... */
+ case 0xb: /* bl ... */
+ addr = ((insn << 2) & 0x03ffffff);
+ if (addr & 0x02000000)
+ addr |= 0xfc000000;
+ return (pc + 8 + addr);
+ case 0x7: /* ldr pc, [pc, reg, lsl #2] */
+ addr = db_fetch_reg(insn & 0xf, db_regs);
+ addr = pc + 8 + (addr << 2);
+ db_read_bytes(addr, 4, (char *)&addr);
+ return (addr);
+ case 0x1: /* mov pc, reg */
+ addr = db_fetch_reg(insn & 0xf, db_regs);
+ return (addr);
+ case 0x8: /* ldmxx reg, {..., pc} */
+ case 0x9:
+ addr = db_fetch_reg((insn >> 16) & 0xf, db_regs);
+ nregs = (insn & 0x5555) + ((insn >> 1) & 0x5555);
+ nregs = (nregs & 0x3333) + ((nregs >> 2) & 0x3333);
+ nregs = (nregs + (nregs >> 4)) & 0x0f0f;
+ nregs = (nregs + (nregs >> 8)) & 0x001f;
+ switch ((insn >> 23) & 0x3) {
+ case 0x0: /* ldmda */
+ addr = addr - 0;
+ break;
+ case 0x1: /* ldmia */
+ addr = addr + 0 + ((nregs - 1) << 2);
+ break;
+ case 0x2: /* ldmdb */
+ addr = addr - 4;
+ break;
+ case 0x3: /* ldmib */
+ addr = addr + 4 + ((nregs - 1) << 2);
+ break;
+ }
+ db_read_bytes(addr, 4, (char *)&addr);
+ return (addr);
+ default:
+ panic("branch_taken: botch");
+ }
+}
diff --git a/sys/arch/arm/arm/db_machdep.c b/sys/arch/arm/arm/db_machdep.c
new file mode 100644
index 00000000000..781ea3c387d
--- /dev/null
+++ b/sys/arch/arm/arm/db_machdep.c
@@ -0,0 +1,88 @@
+/* $OpenBSD: db_machdep.c,v 1.1 2004/02/01 05:09:48 drahn Exp $ */
+/*^I$NetBSD: db_machdep.c,v 1.8 2003/07/15 00:24:41 lukem Exp $^I*/$
+
+/*
+ * Copyright (c) 1996 Mark Brinicombe
+ *
+ * Mach Operating System
+ * Copyright (c) 1991,1990 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie the
+ * rights to redistribute these changes.
+ */
+
+#include <sys/param.h>
+#include <sys/proc.h>
+#include <sys/vnode.h>
+#include <sys/systm.h>
+
+#include <arm/db_machdep.h>
+
+#include <ddb/db_access.h>
+#include <ddb/db_sym.h>
+#include <ddb/db_output.h>
+
+
+void
+db_show_panic_cmd(addr, have_addr, count, modif)
+ db_expr_t addr;
+ int have_addr;
+ db_expr_t count;
+ char *modif;
+{
+ int s;
+
+ s = splhigh();
+
+ db_printf("Panic string: %s\n", panicstr);
+
+ (void)splx(s);
+}
+
+
+void
+db_show_frame_cmd(addr, have_addr, count, modif)
+ db_expr_t addr;
+ int have_addr;
+ db_expr_t count;
+ char *modif;
+{
+ struct trapframe *frame;
+
+ if (!have_addr) {
+ db_printf("frame address must be specified\n");
+ return;
+ }
+
+ frame = (struct trapframe *)addr;
+
+ db_printf("frame address = %08x ", (u_int)frame);
+ db_printf("spsr=%08x\n", frame->tf_spsr);
+ db_printf("r0 =%08x r1 =%08x r2 =%08x r3 =%08x\n",
+ frame->tf_r0, frame->tf_r1, frame->tf_r2, frame->tf_r3);
+ db_printf("r4 =%08x r5 =%08x r6 =%08x r7 =%08x\n",
+ frame->tf_r4, frame->tf_r5, frame->tf_r6, frame->tf_r7);
+ db_printf("r8 =%08x r9 =%08x r10=%08x r11=%08x\n",
+ frame->tf_r8, frame->tf_r9, frame->tf_r10, frame->tf_r11);
+ db_printf("r12=%08x r13=%08x r14=%08x r15=%08x\n",
+ frame->tf_r12, frame->tf_usr_sp, frame->tf_usr_lr, frame->tf_pc);
+ db_printf("slr=%08x\n", frame->tf_svc_lr);
+}
diff --git a/sys/arch/arm/arm/db_trace.c b/sys/arch/arm/arm/db_trace.c
new file mode 100644
index 00000000000..e43d464ee19
--- /dev/null
+++ b/sys/arch/arm/arm/db_trace.c
@@ -0,0 +1,208 @@
+/* $OpenBSD: db_trace.c,v 1.1 2004/02/01 05:09:48 drahn Exp $ */
+/* $NetBSD: db_trace.c,v 1.8 2003/01/17 22:28:48 thorpej Exp $ */
+
+/*
+ * Copyright (c) 2000, 2001 Ben Harris
+ * Copyright (c) 1996 Scott K. Stevens
+ *
+ * Mach Operating System
+ * Copyright (c) 1991,1990 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie the
+ * rights to redistribute these changes.
+ */
+
+#include <sys/param.h>
+
+#include <sys/proc.h>
+#include <sys/user.h>
+#include <arm/armreg.h>
+#include <arm/cpufunc.h>
+#include <machine/db_machdep.h>
+
+#include <ddb/db_access.h>
+#include <ddb/db_interface.h>
+#include <ddb/db_sym.h>
+#include <ddb/db_output.h>
+
+db_regs_t ddb_regs;
+
+#define INKERNEL(va) (((vaddr_t)(va)) >= VM_MIN_KERNEL_ADDRESS)
+
+/*
+ * APCS stack frames are awkward beasts, so I don't think even trying to use
+ * a structure to represent them is a good idea.
+ *
+ * Here's the diagram from the APCS. Increasing address is _up_ the page.
+ *
+ * save code pointer [fp] <- fp points to here
+ * return link value [fp, #-4]
+ * return sp value [fp, #-8]
+ * return fp value [fp, #-12]
+ * [saved v7 value]
+ * [saved v6 value]
+ * [saved v5 value]
+ * [saved v4 value]
+ * [saved v3 value]
+ * [saved v2 value]
+ * [saved v1 value]
+ * [saved a4 value]
+ * [saved a3 value]
+ * [saved a2 value]
+ * [saved a1 value]
+ *
+ * The save code pointer points twelve bytes beyond the start of the
+ * code sequence (usually a single STM) that created the stack frame.
+ * We have to disassemble it if we want to know which of the optional
+ * fields are actually present.
+ */
+
+#define FR_SCP (0)
+#define FR_RLV (-1)
+#define FR_RSP (-2)
+#define FR_RFP (-3)
+
+void
+db_stack_trace_print(addr, have_addr, count, modif, pr)
+ db_expr_t addr;
+ int have_addr;
+ db_expr_t count;
+ char *modif;
+ int (*pr) __P((const char *, ...));
+{
+ u_int32_t *frame, *lastframe;
+ char c, *cp = modif;
+ boolean_t kernel_only = TRUE;
+ boolean_t trace_thread = FALSE;
+ int scp_offset;
+
+ while ((c = *cp++) != 0) {
+ if (c == 'u')
+ kernel_only = FALSE;
+ if (c == 't')
+ trace_thread = TRUE;
+ }
+
+ if (!have_addr)
+ frame = (u_int32_t *)(DDB_REGS->tf_r11);
+ else {
+ if (trace_thread) {
+ struct proc *p;
+ struct user *u;
+ (*pr) ("trace: pid %d ", (int)addr);
+ p = pfind(addr);
+ if (p == NULL) {
+ (*pr)("not found\n");
+ return;
+ }
+ if (!(p->p_flag & P_INMEM)) {
+ (*pr)("swapped out\n");
+ return;
+ }
+ u = p->p_addr;
+#ifdef acorn26
+ frame = (u_int32_t *)(u->u_pcb.pcb_sf->sf_r11);
+#else
+ frame = (u_int32_t *)(u->u_pcb.pcb_un.un_32.pcb32_r11);
+#endif
+ (*pr)("at %p\n", frame);
+ } else
+ frame = (u_int32_t *)(addr);
+ }
+ lastframe = NULL;
+ scp_offset = -(get_pc_str_offset() >> 2);
+
+ while (count-- && frame != NULL) {
+ db_addr_t scp;
+ u_int32_t savecode;
+ int r;
+ u_int32_t *rp;
+ const char *sep;
+
+ /*
+ * In theory, the SCP isn't guaranteed to be in the function
+ * that generated the stack frame. We hope for the best.
+ */
+#ifdef __PROG26
+ scp = frame[FR_SCP] & R15_PC;
+#else
+ scp = frame[FR_SCP];
+#endif
+
+ db_printsym(scp, DB_STGY_PROC, pr);
+ (*pr)("\n\t");
+#ifdef __PROG26
+ (*pr)("scp=0x%08x rlv=0x%08x (", scp, frame[FR_RLV] & R15_PC);
+ db_printsym(frame[FR_RLV] & R15_PC, DB_STGY_PROC, pr);
+ (*pr)(")\n");
+#else
+ (*pr)("scp=0x%08x rlv=0x%08x (", scp, frame[FR_RLV]);
+ db_printsym(frame[FR_RLV], DB_STGY_PROC, pr);
+ (*pr)(")\n");
+#endif
+ (*pr)("\trsp=0x%08x rfp=0x%08x", frame[FR_RSP], frame[FR_RFP]);
+
+ savecode = ((u_int32_t *)scp)[scp_offset];
+ if ((savecode & 0x0e100000) == 0x08000000) {
+ /* Looks like an STM */
+ rp = frame - 4;
+ sep = "\n\t";
+ for (r = 10; r >= 0; r--) {
+ if (savecode & (1 << r)) {
+ (*pr)("%sr%d=0x%08x",
+ sep, r, *rp--);
+ sep = (frame - rp) % 4 == 2 ?
+ "\n\t" : " ";
+ }
+ }
+ }
+
+ (*pr)("\n");
+
+ /*
+ * Switch to next frame up
+ */
+ if (frame[FR_RFP] == 0)
+ break; /* Top of stack */
+
+ lastframe = frame;
+ frame = (u_int32_t *)(frame[FR_RFP]);
+
+ if (INKERNEL((int)frame)) {
+ /* staying in kernel */
+ if (frame <= lastframe) {
+ (*pr)("Bad frame pointer: %p\n", frame);
+ break;
+ }
+ } else if (INKERNEL((int)lastframe)) {
+ /* switch from user to kernel */
+ if (kernel_only)
+ break; /* kernel stack only */
+ } else {
+ /* in user */
+ if (frame <= lastframe) {
+ (*pr)("Bad user frame pointer: %p\n",
+ frame);
+ break;
+ }
+ }
+ }
+}
diff --git a/sys/arch/arm/arm/disassem.c b/sys/arch/arm/arm/disassem.c
new file mode 100644
index 00000000000..14a3d54ac94
--- /dev/null
+++ b/sys/arch/arm/arm/disassem.c
@@ -0,0 +1,682 @@
+/* $OpenBSD: disassem.c,v 1.1 2004/02/01 05:09:48 drahn Exp $ */
+/* $NetBSD: disassem.c,v 1.14 2003/03/27 16:58:36 mycroft Exp $ */
+
+/*
+ * Copyright (c) 1996 Mark Brinicombe.
+ * Copyright (c) 1996 Brini.
+ *
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by Brini.
+ * 4. The name of the company nor the name of the author may be used to
+ * endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY BRINI ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL BRINI OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * RiscBSD kernel project
+ *
+ * db_disasm.c
+ *
+ * Kernel disassembler
+ *
+ * Created : 10/02/96
+ *
+ * Structured after the sparc/sparc/db_disasm.c by David S. Miller &
+ * Paul Kranenburg
+ *
+ * This code is not complete. Not all instructions are disassembled.
+ */
+
+#include <sys/param.h>
+
+#include <sys/systm.h>
+#include <machine/db_machdep.h>
+#include <ddb/db_access.h>
+#include <ddb/db_sym.h>
+#include <ddb/db_variables.h>
+#include <ddb/db_interface.h>
+#include <arch/arm/arm/disassem.h>
+
+/*
+ * General instruction format
+ *
+ * insn[cc][mod] [operands]
+ *
+ * Those fields with an uppercase format code indicate that the field
+ * follows directly after the instruction before the separator i.e.
+ * they modify the instruction rather than just being an operand to
+ * the instruction. The only exception is the writeback flag which
+ * follows a operand.
+ *
+ *
+ * 2 - print Operand 2 of a data processing instruction
+ * d - destination register (bits 12-15)
+ * n - n register (bits 16-19)
+ * s - s register (bits 8-11)
+ * o - indirect register rn (bits 16-19) (used by swap)
+ * m - m register (bits 0-3)
+ * a - address operand of ldr/str instruction
+ * l - register list for ldm/stm instruction
+ * f - 1st fp operand (register) (bits 12-14)
+ * g - 2nd fp operand (register) (bits 16-18)
+ * h - 3rd fp operand (register/immediate) (bits 0-4)
+ * b - branch address
+ * t - thumb branch address (bits 24, 0-23)
+ * k - breakpoint comment (bits 0-3, 8-19)
+ * X - block transfer type
+ * Y - block transfer type (r13 base)
+ * c - comment field bits(0-23)
+ * p - saved or current status register
+ * F - PSR transfer fields
+ * D - destination-is-r15 (P) flag on TST, TEQ, CMP, CMN
+ * L - co-processor transfer size
+ * S - set status flag
+ * P - fp precision
+ * Q - fp precision (for ldf/stf)
+ * R - fp rounding
+ * v - co-processor data transfer registers + addressing mode
+ * W - writeback flag
+ * x - instruction in hex
+ * # - co-processor number
+ * y - co-processor data processing registers
+ * z - co-processor register transfer registers
+ */
+
+struct arm32_insn {
+ u_int mask;
+ u_int pattern;
+ char* name;
+ char* format;
+};
+
+static const struct arm32_insn arm32_i[] = {
+ { 0x0fffffff, 0x0ff00000, "imb", "c" }, /* Before swi */
+ { 0x0fffffff, 0x0ff00001, "imbrange", "c" }, /* Before swi */
+ { 0x0f000000, 0x0f000000, "swi", "c" },
+ { 0xfe000000, 0xfa000000, "blx", "t" }, /* Before b and bl */
+ { 0x0f000000, 0x0a000000, "b", "b" },
+ { 0x0f000000, 0x0b000000, "bl", "b" },
+ { 0x0fe000f0, 0x00000090, "mul", "Snms" },
+ { 0x0fe000f0, 0x00200090, "mla", "Snmsd" },
+ { 0x0fe000f0, 0x00800090, "umull", "Sdnms" },
+ { 0x0fe000f0, 0x00c00090, "smull", "Sdnms" },
+ { 0x0fe000f0, 0x00a00090, "umlal", "Sdnms" },
+ { 0x0fe000f0, 0x00e00090, "smlal", "Sdnms" },
+ { 0x0d700000, 0x04200000, "strt", "daW" },
+ { 0x0d700000, 0x04300000, "ldrt", "daW" },
+ { 0x0d700000, 0x04600000, "strbt", "daW" },
+ { 0x0d700000, 0x04700000, "ldrbt", "daW" },
+ { 0x0c500000, 0x04000000, "str", "daW" },
+ { 0x0c500000, 0x04100000, "ldr", "daW" },
+ { 0x0c500000, 0x04400000, "strb", "daW" },
+ { 0x0c500000, 0x04500000, "ldrb", "daW" },
+ { 0x0e1f0000, 0x080d0000, "stm", "YnWl" },/* separate out r13 base */
+ { 0x0e1f0000, 0x081d0000, "ldm", "YnWl" },/* separate out r13 base */
+ { 0x0e100000, 0x08000000, "stm", "XnWl" },
+ { 0x0e100000, 0x08100000, "ldm", "XnWl" },
+ { 0x0e1000f0, 0x00100090, "ldrb", "de" },
+ { 0x0e1000f0, 0x00000090, "strb", "de" },
+ { 0x0e1000f0, 0x001000d0, "ldrsb", "de" },
+ { 0x0e1000f0, 0x001000b0, "ldrh", "de" },
+ { 0x0e1000f0, 0x000000b0, "strh", "de" },
+ { 0x0e1000f0, 0x001000f0, "ldrsh", "de" },
+ { 0x0f200090, 0x00200090, "und", "x" }, /* Before data processing */
+ { 0x0e1000d0, 0x000000d0, "und", "x" }, /* Before data processing */
+ { 0x0ff00ff0, 0x01000090, "swp", "dmo" },
+ { 0x0ff00ff0, 0x01400090, "swpb", "dmo" },
+ { 0x0fbf0fff, 0x010f0000, "mrs", "dp" }, /* Before data processing */
+ { 0x0fb0fff0, 0x0120f000, "msr", "pFm" },/* Before data processing */
+ { 0x0fb0f000, 0x0320f000, "msr", "pF2" },/* Before data processing */
+ { 0x0ffffff0, 0x012fff10, "bx", "m" },
+ { 0x0fff0ff0, 0x016f0f10, "clz", "dm" },
+ { 0x0ffffff0, 0x012fff30, "blx", "m" },
+ { 0xfff000f0, 0xe1200070, "bkpt", "k" },
+ { 0x0de00000, 0x00000000, "and", "Sdn2" },
+ { 0x0de00000, 0x00200000, "eor", "Sdn2" },
+ { 0x0de00000, 0x00400000, "sub", "Sdn2" },
+ { 0x0de00000, 0x00600000, "rsb", "Sdn2" },
+ { 0x0de00000, 0x00800000, "add", "Sdn2" },
+ { 0x0de00000, 0x00a00000, "adc", "Sdn2" },
+ { 0x0de00000, 0x00c00000, "sbc", "Sdn2" },
+ { 0x0de00000, 0x00e00000, "rsc", "Sdn2" },
+ { 0x0df00000, 0x01100000, "tst", "Dn2" },
+ { 0x0df00000, 0x01300000, "teq", "Dn2" },
+ { 0x0de00000, 0x01400000, "cmp", "Dn2" },
+ { 0x0de00000, 0x01600000, "cmn", "Dn2" },
+ { 0x0de00000, 0x01800000, "orr", "Sdn2" },
+ { 0x0de00000, 0x01a00000, "mov", "Sd2" },
+ { 0x0de00000, 0x01c00000, "bic", "Sdn2" },
+ { 0x0de00000, 0x01e00000, "mvn", "Sd2" },
+ { 0x0ff08f10, 0x0e000100, "adf", "PRfgh" },
+ { 0x0ff08f10, 0x0e100100, "muf", "PRfgh" },
+ { 0x0ff08f10, 0x0e200100, "suf", "PRfgh" },
+ { 0x0ff08f10, 0x0e300100, "rsf", "PRfgh" },
+ { 0x0ff08f10, 0x0e400100, "dvf", "PRfgh" },
+ { 0x0ff08f10, 0x0e500100, "rdf", "PRfgh" },
+ { 0x0ff08f10, 0x0e600100, "pow", "PRfgh" },
+ { 0x0ff08f10, 0x0e700100, "rpw", "PRfgh" },
+ { 0x0ff08f10, 0x0e800100, "rmf", "PRfgh" },
+ { 0x0ff08f10, 0x0e900100, "fml", "PRfgh" },
+ { 0x0ff08f10, 0x0ea00100, "fdv", "PRfgh" },
+ { 0x0ff08f10, 0x0eb00100, "frd", "PRfgh" },
+ { 0x0ff08f10, 0x0ec00100, "pol", "PRfgh" },
+ { 0x0f008f10, 0x0e000100, "fpbop", "PRfgh" },
+ { 0x0ff08f10, 0x0e008100, "mvf", "PRfh" },
+ { 0x0ff08f10, 0x0e108100, "mnf", "PRfh" },
+ { 0x0ff08f10, 0x0e208100, "abs", "PRfh" },
+ { 0x0ff08f10, 0x0e308100, "rnd", "PRfh" },
+ { 0x0ff08f10, 0x0e408100, "sqt", "PRfh" },
+ { 0x0ff08f10, 0x0e508100, "log", "PRfh" },
+ { 0x0ff08f10, 0x0e608100, "lgn", "PRfh" },
+ { 0x0ff08f10, 0x0e708100, "exp", "PRfh" },
+ { 0x0ff08f10, 0x0e808100, "sin", "PRfh" },
+ { 0x0ff08f10, 0x0e908100, "cos", "PRfh" },
+ { 0x0ff08f10, 0x0ea08100, "tan", "PRfh" },
+ { 0x0ff08f10, 0x0eb08100, "asn", "PRfh" },
+ { 0x0ff08f10, 0x0ec08100, "acs", "PRfh" },
+ { 0x0ff08f10, 0x0ed08100, "atn", "PRfh" },
+ { 0x0f008f10, 0x0e008100, "fpuop", "PRfh" },
+ { 0x0e100f00, 0x0c000100, "stf", "QLv" },
+ { 0x0e100f00, 0x0c100100, "ldf", "QLv" },
+ { 0x0ff00f10, 0x0e000110, "flt", "PRgd" },
+ { 0x0ff00f10, 0x0e100110, "fix", "PRdh" },
+ { 0x0ff00f10, 0x0e200110, "wfs", "d" },
+ { 0x0ff00f10, 0x0e300110, "rfs", "d" },
+ { 0x0ff00f10, 0x0e400110, "wfc", "d" },
+ { 0x0ff00f10, 0x0e500110, "rfc", "d" },
+ { 0x0ff0ff10, 0x0e90f110, "cmf", "PRgh" },
+ { 0x0ff0ff10, 0x0eb0f110, "cnf", "PRgh" },
+ { 0x0ff0ff10, 0x0ed0f110, "cmfe", "PRgh" },
+ { 0x0ff0ff10, 0x0ef0f110, "cnfe", "PRgh" },
+ { 0xff100010, 0xfe000010, "mcr2", "#z" },
+ { 0x0f100010, 0x0e000010, "mcr", "#z" },
+ { 0xff100010, 0xfe100010, "mrc2", "#z" },
+ { 0x0f100010, 0x0e100010, "mrc", "#z" },
+ { 0xff000010, 0xfe000000, "cdp2", "#y" },
+ { 0x0f000010, 0x0e000000, "cdp", "#y" },
+ { 0xfe100090, 0xfc100000, "ldc2", "L#v" },
+ { 0x0e100090, 0x0c100000, "ldc", "L#v" },
+ { 0xfe100090, 0xfc000000, "stc2", "L#v" },
+ { 0x0e100090, 0x0c000000, "stc", "L#v" },
+ { 0x00000000, 0x00000000, NULL, NULL }
+};
+
+static char const arm32_insn_conditions[][4] = {
+ "eq", "ne", "cs", "cc",
+ "mi", "pl", "vs", "vc",
+ "hi", "ls", "ge", "lt",
+ "gt", "le", "", "nv"
+};
+
+static char const insn_block_transfers[][4] = {
+ "da", "ia", "db", "ib"
+};
+
+static char const insn_stack_block_transfers[][4] = {
+ "ed", "ea", "fd", "fa"
+};
+
+static char const op_shifts[][4] = {
+ "lsl", "lsr", "asr", "ror"
+};
+
+static char const insn_fpa_rounding[][2] = {
+ "", "p", "m", "z"
+};
+
+static char const insn_fpa_precision[][2] = {
+ "s", "d", "e", "p"
+};
+
+static char const insn_fpaconstants[][8] = {
+ "0.0", "1.0", "2.0", "3.0",
+ "4.0", "5.0", "0.5", "10.0"
+};
+
+#define insn_condition(x) arm32_insn_conditions[(x >> 28) & 0x0f]
+#define insn_blktrans(x) insn_block_transfers[(x >> 23) & 3]
+#define insn_stkblktrans(x) insn_stack_block_transfers[(x >> 23) & 3]
+#define op2_shift(x) op_shifts[(x >> 5) & 3]
+#define insn_fparnd(x) insn_fpa_rounding[(x >> 5) & 0x03]
+#define insn_fpaprec(x) insn_fpa_precision[(((x >> 18) & 2)|(x >> 7)) & 1]
+#define insn_fpaprect(x) insn_fpa_precision[(((x >> 21) & 2)|(x >> 15)) & 1]
+#define insn_fpaimm(x) insn_fpaconstants[x & 0x07]
+
+/* Local prototypes */
+static void disasm_register_shift(const disasm_interface_t *di, u_int insn);
+static void disasm_print_reglist(const disasm_interface_t *di, u_int insn);
+static void disasm_insn_ldrstr(const disasm_interface_t *di, u_int insn,
+ u_int loc);
+static void disasm_insn_ldrhstrh(const disasm_interface_t *di, u_int insn,
+ u_int loc);
+static void disasm_insn_ldcstc(const disasm_interface_t *di, u_int insn,
+ u_int loc);
+static db_expr_t disassemble_readword(db_expr_t address);
+static void disassemble_printaddr(db_expr_t address);
+
+vaddr_t
+disasm(const disasm_interface_t *di, vaddr_t loc, int altfmt)
+{
+ struct arm32_insn *i_ptr = (struct arm32_insn *)&arm32_i;
+
+ u_int insn;
+ int matchp;
+ int branch;
+ char* f_ptr;
+ int fmt;
+
+ fmt = 0;
+ matchp = 0;
+ insn = di->di_readword(loc);
+
+/* di->di_printf("loc=%08x insn=%08x : ", loc, insn);*/
+
+ while (i_ptr->name) {
+ if ((insn & i_ptr->mask) == i_ptr->pattern) {
+ matchp = 1;
+ break;
+ }
+ i_ptr++;
+ }
+
+ if (!matchp) {
+ di->di_printf("und%s\t%08x\n", insn_condition(insn), insn);
+ return(loc + INSN_SIZE);
+ }
+
+ /* If instruction forces condition code, don't print it. */
+ if ((i_ptr->mask & 0xf0000000) == 0xf0000000)
+ di->di_printf("%s", i_ptr->name);
+ else
+ di->di_printf("%s%s", i_ptr->name, insn_condition(insn));
+
+ f_ptr = i_ptr->format;
+
+ /* Insert tab if there are no instruction modifiers */
+
+ if (*(f_ptr) < 'A' || *(f_ptr) > 'Z') {
+ ++fmt;
+ di->di_printf("\t");
+ }
+
+ while (*f_ptr) {
+ switch (*f_ptr) {
+ /* 2 - print Operand 2 of a data processing instruction */
+ case '2':
+ if (insn & 0x02000000) {
+ int rotate= ((insn >> 7) & 0x1e);
+
+ di->di_printf("#0x%08x",
+ (insn & 0xff) << (32 - rotate) |
+ (insn & 0xff) >> rotate);
+ } else {
+ disasm_register_shift(di, insn);
+ }
+ break;
+ /* d - destination register (bits 12-15) */
+ case 'd':
+ di->di_printf("r%d", ((insn >> 12) & 0x0f));
+ break;
+ /* D - insert 'p' if Rd is R15 */
+ case 'D':
+ if (((insn >> 12) & 0x0f) == 15)
+ di->di_printf("p");
+ break;
+ /* n - n register (bits 16-19) */
+ case 'n':
+ di->di_printf("r%d", ((insn >> 16) & 0x0f));
+ break;
+ /* s - s register (bits 8-11) */
+ case 's':
+ di->di_printf("r%d", ((insn >> 8) & 0x0f));
+ break;
+ /* o - indirect register rn (bits 16-19) (used by swap) */
+ case 'o':
+ di->di_printf("[r%d]", ((insn >> 16) & 0x0f));
+ break;
+ /* m - m register (bits 0-4) */
+ case 'm':
+ di->di_printf("r%d", ((insn >> 0) & 0x0f));
+ break;
+ /* a - address operand of ldr/str instruction */
+ case 'a':
+ disasm_insn_ldrstr(di, insn, loc);
+ break;
+ /* e - address operand of ldrh/strh instruction */
+ case 'e':
+ disasm_insn_ldrhstrh(di, insn, loc);
+ break;
+ /* l - register list for ldm/stm instruction */
+ case 'l':
+ disasm_print_reglist(di, insn);
+ break;
+ /* f - 1st fp operand (register) (bits 12-14) */
+ case 'f':
+ di->di_printf("f%d", (insn >> 12) & 7);
+ break;
+ /* g - 2nd fp operand (register) (bits 16-18) */
+ case 'g':
+ di->di_printf("f%d", (insn >> 16) & 7);
+ break;
+ /* h - 3rd fp operand (register/immediate) (bits 0-4) */
+ case 'h':
+ if (insn & (1 << 3))
+ di->di_printf("#%s", insn_fpaimm(insn));
+ else
+ di->di_printf("f%d", insn & 7);
+ break;
+ /* b - branch address */
+ case 'b':
+ branch = ((insn << 2) & 0x03ffffff);
+ if (branch & 0x02000000)
+ branch |= 0xfc000000;
+ di->di_printaddr(loc + 8 + branch);
+ break;
+ /* t - blx address */
+ case 't':
+ branch = ((insn << 2) & 0x03ffffff) |
+ (insn >> 23 & 0x00000002);
+ if (branch & 0x02000000)
+ branch |= 0xfc000000;
+ di->di_printaddr(loc + 8 + branch);
+ break;
+ /* X - block transfer type */
+ case 'X':
+ di->di_printf("%s", insn_blktrans(insn));
+ break;
+ /* Y - block transfer type (r13 base) */
+ case 'Y':
+ di->di_printf("%s", insn_stkblktrans(insn));
+ break;
+ /* c - comment field bits(0-23) */
+ case 'c':
+ di->di_printf("0x%08x", (insn & 0x00ffffff));
+ break;
+ /* k - breakpoint comment (bits 0-3, 8-19) */
+ case 'k':
+ di->di_printf("0x%04x",
+ (insn & 0x000fff00) >> 4 | (insn & 0x0000000f));
+ break;
+ /* p - saved or current status register */
+ case 'p':
+ if (insn & 0x00400000)
+ di->di_printf("spsr");
+ else
+ di->di_printf("cpsr");
+ break;
+ /* F - PSR transfer fields */
+ case 'F':
+ di->di_printf("_");
+ if (insn & (1 << 16))
+ di->di_printf("c");
+ if (insn & (1 << 17))
+ di->di_printf("x");
+ if (insn & (1 << 18))
+ di->di_printf("s");
+ if (insn & (1 << 19))
+ di->di_printf("f");
+ break;
+ /* B - byte transfer flag */
+ case 'B':
+ if (insn & 0x00400000)
+ di->di_printf("b");
+ break;
+ /* L - co-processor transfer size */
+ case 'L':
+ if (insn & (1 << 22))
+ di->di_printf("l");
+ break;
+ /* S - set status flag */
+ case 'S':
+ if (insn & 0x00100000)
+ di->di_printf("s");
+ break;
+ /* P - fp precision */
+ case 'P':
+ di->di_printf("%s", insn_fpaprec(insn));
+ break;
+ /* Q - fp precision (for ldf/stf) */
+ case 'Q':
+ break;
+ /* R - fp rounding */
+ case 'R':
+ di->di_printf("%s", insn_fparnd(insn));
+ break;
+ /* W - writeback flag */
+ case 'W':
+ if (insn & (1 << 21))
+ di->di_printf("!");
+ break;
+ /* # - co-processor number */
+ case '#':
+ di->di_printf("p%d", (insn >> 8) & 0x0f);
+ break;
+ /* v - co-processor data transfer registers+addressing mode */
+ case 'v':
+ disasm_insn_ldcstc(di, insn, loc);
+ break;
+ /* x - instruction in hex */
+ case 'x':
+ di->di_printf("0x%08x", insn);
+ break;
+ /* y - co-processor data processing registers */
+ case 'y':
+ di->di_printf("%d, ", (insn >> 20) & 0x0f);
+
+ di->di_printf("c%d, c%d, c%d", (insn >> 12) & 0x0f,
+ (insn >> 16) & 0x0f, insn & 0x0f);
+
+ di->di_printf(", %d", (insn >> 5) & 0x07);
+ break;
+ /* z - co-processor register transfer registers */
+ case 'z':
+ di->di_printf("%d, ", (insn >> 21) & 0x07);
+ di->di_printf("r%d, c%d, c%d, %d",
+ (insn >> 12) & 0x0f, (insn >> 16) & 0x0f,
+ insn & 0x0f, (insn >> 5) & 0x07);
+
+/* if (((insn >> 5) & 0x07) != 0)
+ di->di_printf(", %d", (insn >> 5) & 0x07);*/
+ break;
+ default:
+ di->di_printf("[%c - unknown]", *f_ptr);
+ break;
+ }
+ if (*(f_ptr+1) >= 'A' && *(f_ptr+1) <= 'Z')
+ ++f_ptr;
+ else if (*(++f_ptr)) {
+ ++fmt;
+ if (fmt == 1)
+ di->di_printf("\t");
+ else
+ di->di_printf(", ");
+ }
+ };
+
+ di->di_printf("\n");
+
+ return(loc + INSN_SIZE);
+}
+
+
+static void
+disasm_register_shift(const disasm_interface_t *di, u_int insn)
+{
+ di->di_printf("r%d", (insn & 0x0f));
+ if ((insn & 0x00000ff0) == 0)
+ ;
+ else if ((insn & 0x00000ff0) == 0x00000060)
+ di->di_printf(", rrx");
+ else {
+ if (insn & 0x10)
+ di->di_printf(", %s r%d", op2_shift(insn),
+ (insn >> 8) & 0x0f);
+ else
+ di->di_printf(", %s #%d", op2_shift(insn),
+ (insn >> 7) & 0x1f);
+ }
+}
+
+
+static void
+disasm_print_reglist(const disasm_interface_t *di, u_int insn)
+{
+ int loop;
+ int start;
+ int comma;
+
+ di->di_printf("{");
+ start = -1;
+ comma = 0;
+
+ for (loop = 0; loop < 17; ++loop) {
+ if (start != -1) {
+ if (loop == 16 || !(insn & (1 << loop))) {
+ if (comma)
+ di->di_printf(", ");
+ else
+ comma = 1;
+ if (start == loop - 1)
+ di->di_printf("r%d", start);
+ else
+ di->di_printf("r%d-r%d", start, loop - 1);
+ start = -1;
+ }
+ } else {
+ if (insn & (1 << loop))
+ start = loop;
+ }
+ }
+ di->di_printf("}");
+
+ if (insn & (1 << 22))
+ di->di_printf("^");
+}
+
+static void
+disasm_insn_ldrstr(const disasm_interface_t *di, u_int insn, u_int loc)
+{
+ int offset;
+
+ offset = insn & 0xfff;
+ if ((insn & 0x032f0000) == 0x010f0000) {
+ /* rA = pc, immediate index */
+ if (insn & 0x00800000)
+ loc += offset;
+ else
+ loc -= offset;
+ di->di_printaddr(loc + 8);
+ } else {
+ di->di_printf("[r%d", (insn >> 16) & 0x0f);
+ if ((insn & 0x03000fff) != 0x01000000) {
+ di->di_printf("%s, ", (insn & (1 << 24)) ? "" : "]");
+ if (!(insn & 0x00800000))
+ di->di_printf("-");
+ if (insn & (1 << 25))
+ disasm_register_shift(di, insn);
+ else
+ di->di_printf("#0x%03x", offset);
+ }
+ if (insn & (1 << 24))
+ di->di_printf("]");
+ }
+}
+
+static void
+disasm_insn_ldrhstrh(const disasm_interface_t *di, u_int insn, u_int loc)
+{
+ int offset;
+
+ offset = ((insn & 0xf00) >> 4) | (insn & 0xf);
+ if ((insn & 0x004f0000) == 0x004f0000) {
+ /* rA = pc, immediate index */
+ if (insn & 0x00800000)
+ loc += offset;
+ else
+ loc -= offset;
+ di->di_printaddr(loc + 8);
+ } else {
+ di->di_printf("[r%d", (insn >> 16) & 0x0f);
+ if ((insn & 0x01400f0f) != 0x01400000) {
+ di->di_printf("%s, ", (insn & (1 << 24)) ? "" : "]");
+ if (!(insn & 0x00800000))
+ di->di_printf("-");
+ if (insn & (1 << 22))
+ di->di_printf("#0x%02x", offset);
+ else
+ di->di_printf("r%d", (insn & 0x0f));
+ }
+ if (insn & (1 << 24))
+ di->di_printf("]");
+ }
+}
+
+static void
+disasm_insn_ldcstc(const disasm_interface_t *di, u_int insn, u_int loc)
+{
+ if (((insn >> 8) & 0xf) == 1)
+ di->di_printf("f%d, ", (insn >> 12) & 0x07);
+ else
+ di->di_printf("c%d, ", (insn >> 12) & 0x0f);
+
+ di->di_printf("[r%d", (insn >> 16) & 0x0f);
+
+ di->di_printf("%s, ", (insn & (1 << 24)) ? "" : "]");
+
+ if (!(insn & (1 << 23)))
+ di->di_printf("-");
+
+ di->di_printf("#0x%03x", (insn & 0xff) << 2);
+
+ if (insn & (1 << 24))
+ di->di_printf("]");
+
+ if (insn & (1 << 21))
+ di->di_printf("!");
+}
+
+static db_expr_t
+disassemble_readword(db_expr_t address)
+{
+ return(*((u_int *)address));
+}
+
+static void
+disassemble_printaddr(db_expr_t address)
+{
+ printf("0x%08x", address);
+}
+
+static const disasm_interface_t disassemble_di = {
+ disassemble_readword, disassemble_printaddr, printf
+};
+
+void
+disassemble(u_int address)
+{
+
+ (void)disasm(&disassemble_di, address, 0);
+}
+
+/* End of disassem.c */
diff --git a/sys/arch/arm/arm/disassem.h b/sys/arch/arm/arm/disassem.h
new file mode 100644
index 00000000000..1352864fd50
--- /dev/null
+++ b/sys/arch/arm/arm/disassem.h
@@ -0,0 +1,49 @@
+/* $OpenBSD: disassem.h,v 1.1 2004/02/01 05:09:48 drahn Exp $ */
+/* $NetBSD: disassem.h,v 1.4 2001/03/04 04:15:58 matt Exp $ */
+
+/*
+ * Copyright (c) 1997 Mark Brinicombe.
+ * Copyright (c) 1997 Causality Limited.
+ *
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by Mark Brinicombe.
+ * 4. The name of the company nor the name of the author may be used to
+ * endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * Define the interface structure required by the disassembler.
+ */
+
+typedef struct {
+ db_expr_t (*di_readword)(db_expr_t);
+ void (*di_printaddr)(db_expr_t);
+ int (*di_printf)(const char *, ...);
+} disasm_interface_t;
+
+/* Prototypes for callable functions */
+
+vaddr_t disasm(const disasm_interface_t *, vaddr_t, int);
+void disassemble(u_int);
diff --git a/sys/arch/arm/arm/disksubr.c b/sys/arch/arm/arm/disksubr.c
new file mode 100644
index 00000000000..150dc5bca43
--- /dev/null
+++ b/sys/arch/arm/arm/disksubr.c
@@ -0,0 +1,362 @@
+/* $OpenBSD: disksubr.c,v 1.1 2004/02/01 05:09:48 drahn Exp $ */
+/* $NetBSD: disksubr.c,v 1.21 1996/05/03 19:42:03 christos Exp $ */
+
+/*
+ * Copyright (c) 1996 Theo de Raadt
+ * Copyright (c) 1982, 1986, 1988 Regents of the University of California.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)ufs_disksubr.c 7.16 (Berkeley) 5/4/91
+ */
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/buf.h>
+#include <sys/device.h>
+#include <sys/disklabel.h>
+#include <sys/syslog.h>
+#include <sys/disk.h>
+
+#define b_cylin b_resid
+
+void
+dk_establish(struct disk *dk, struct device *dev)
+{
+}
+
+int
+try_mbr_label(dev_t dev, void (*strat)(struct buf *), struct buf *bp,
+ struct disklabel *lp, struct cpu_disklabel *osdep, char **pmsg,
+ int *bsdpartoff);
+
+
+/*
+ * Attempt to read a disk label from a device
+ * using the indicated strategy routine.
+ * The label must be partly set up before this:
+ * secpercyl, secsize and anything required for a block i/o read
+ * operation in the driver's strategy/start routines
+ * must be filled in before calling us.
+ *
+ * Returns null on success and an error string on failure.
+ */
+char *
+readdisklabel(dev_t dev, void (*strat)(struct buf *),
+ struct disklabel *lp, struct cpu_disklabel *osdep, int spoofonly)
+{
+ struct buf *bp;
+ struct disklabel *dlp;
+ char *msg = NULL;
+ int partoff, i, found;
+
+ /* minimal requirements for archtypal disk label */
+ if (lp->d_secsize == 0)
+ lp->d_secsize = DEV_BSIZE;
+ if (lp->d_secperunit == 0)
+ lp->d_secperunit = 0x1fffffff;
+ lp->d_npartitions = RAW_PART + 1;
+ for (i = 0; i < RAW_PART; i++) {
+ lp->d_partitions[i].p_size = 0;
+ lp->d_partitions[i].p_offset = 0;
+ }
+ if (lp->d_partitions[i].p_size == 0)
+ lp->d_partitions[i].p_size = 0x1fffffff;
+ lp->d_partitions[i].p_offset = 0;
+
+ /* get a buffer and initialize it */
+ bp = geteblk((int)lp->d_secsize);
+ bp->b_dev = dev;
+
+ partoff = -1;
+
+ found = try_mbr_label(dev, strat, bp, lp, osdep, &msg,
+ &partoff);
+ /* if no partition found, return */
+ if (found == 0 || partoff == -1) {
+ /* no special partition table found try raw labeled disk. */
+ partoff = LABELSECTOR;
+ }
+
+ /* don't read the on-disk label if we are in spoofed-only mode */
+ if (spoofonly)
+ goto done;
+
+ /* next, dig out disk label */
+ bp->b_blkno = partoff;
+ bp->b_cylin = partoff/lp->d_secpercyl; /* XXX */
+ bp->b_bcount = lp->d_secsize;
+ bp->b_flags = B_BUSY | B_READ;
+ (*strat)(bp);
+
+ /* if successful, locate disk label within block and validate */
+ if (biowait(bp)) {
+ /* XXX we return the faked label built so far */
+ msg = "disk label I/O error";
+ goto done;
+ }
+
+ for (dlp = (struct disklabel *)bp->b_data;
+ dlp <= (struct disklabel *)(bp->b_data + lp->d_secsize -
+ sizeof(*dlp));
+ dlp = (struct disklabel *)((char *)dlp + sizeof(long))) {
+ if (dlp->d_magic != DISKMAGIC || dlp->d_magic2 != DISKMAGIC) {
+ if (msg == NULL)
+ msg = "no disk label";
+ } else if (dlp->d_npartitions > MAXPARTITIONS ||
+ dkcksum(dlp) != 0)
+ msg = "disk label corrupted";
+ else {
+ *lp = *dlp;
+ msg = NULL;
+ break;
+ }
+ }
+
+ if (msg) {
+#if defined(CD9660)
+ if (iso_disklabelspoof(dev, strat, lp) == 0)
+ msg = NULL;
+#endif
+ goto done;
+ }
+
+done:
+ bp->b_flags |= B_INVAL;
+ brelse(bp);
+ return (msg);
+}
+
+/*
+ * Check new disk label for sensibility
+ * before setting it.
+ */
+int
+setdisklabel(struct disklabel *olp, struct disklabel *nlp, u_long openmask,
+ struct cpu_disklabel *osdep)
+{
+ int i;
+ struct partition *opp, *npp;
+
+ /* sanity clause */
+ if (nlp->d_secpercyl == 0 || nlp->d_secsize == 0 ||
+ (nlp->d_secsize % DEV_BSIZE) != 0)
+ return(EINVAL);
+
+ /* special case to allow disklabel to be invalidated */
+ if (nlp->d_magic == 0xffffffff) {
+ *olp = *nlp;
+ return (0);
+ }
+
+ if (nlp->d_magic != DISKMAGIC || nlp->d_magic2 != DISKMAGIC ||
+ dkcksum(nlp) != 0)
+ return (EINVAL);
+
+ /* XXX missing check if other dos partitions will be overwritten */
+
+ while (openmask != 0) {
+ i = ffs(openmask) - 1;
+ openmask &= ~(1 << i);
+ if (nlp->d_npartitions <= i)
+ return (EBUSY);
+ opp = &olp->d_partitions[i];
+ npp = &nlp->d_partitions[i];
+ if (npp->p_offset != opp->p_offset || npp->p_size < opp->p_size)
+ return (EBUSY);
+ /*
+ * Copy internally-set partition information
+ * if new label doesn't include it. XXX
+ */
+ if (npp->p_fstype == FS_UNUSED && opp->p_fstype != FS_UNUSED) {
+ npp->p_fstype = opp->p_fstype;
+ npp->p_fsize = opp->p_fsize;
+ npp->p_frag = opp->p_frag;
+ npp->p_cpg = opp->p_cpg;
+ }
+ }
+ nlp->d_checksum = 0;
+ nlp->d_checksum = dkcksum(nlp);
+ *olp = *nlp;
+ return (0);
+}
+
+
+/*
+ * Write disk label back to device after modification.
+ * XXX cannot handle OpenBSD partitions in extended partitions!
+ */
+int
+writedisklabel(dev_t dev, void (*strat)(struct buf *), struct disklabel *lp,
+ struct cpu_disklabel *osdep)
+{
+ struct dos_partition *dp = osdep->dosparts, *dp2;
+ struct buf *bp;
+ struct disklabel *dlp;
+ int error, dospartoff, cyl, i;
+ int ourpart = -1;
+
+ /* get a buffer and initialize it */
+ bp = geteblk((int)lp->d_secsize);
+ bp->b_dev = dev;
+
+ /* do dos partitions in the process of getting disklabel? */
+ dospartoff = 0;
+ cyl = LABELSECTOR / lp->d_secpercyl;
+ if (dp) {
+ /* read master boot record */
+ bp->b_blkno = DOSBBSECTOR;
+ bp->b_bcount = lp->d_secsize;
+ bp->b_flags = B_BUSY | B_READ;
+ bp->b_cylin = DOSBBSECTOR / lp->d_secpercyl;
+ (*strat)(bp);
+
+ if ((error = biowait(bp)) != 0)
+ goto done;
+
+ /* XXX how do we check veracity/bounds of this? */
+ bcopy(bp->b_data + DOSPARTOFF, dp,
+ NDOSPART * sizeof(*dp));
+
+ for (dp2=dp, i=0; i < NDOSPART && ourpart == -1; i++, dp2++)
+ if (get_le(&dp2->dp_size) && dp2->dp_typ ==
+ DOSPTYP_OPENBSD)
+ ourpart = i;
+ for (dp2=dp, i=0; i < NDOSPART && ourpart == -1; i++, dp2++)
+ if (get_le(&dp2->dp_size) && dp2->dp_typ ==
+ DOSPTYP_FREEBSD)
+ ourpart = i;
+ for (dp2=dp, i=0; i < NDOSPART && ourpart == -1; i++, dp2++)
+ if (get_le(&dp2->dp_size) && dp2->dp_typ ==
+ DOSPTYP_NETBSD)
+ ourpart = i;
+
+ if (ourpart != -1) {
+ dp2 = &dp[ourpart];
+
+ /*
+ * need sector address for SCSI/IDE,
+ * cylinder for ESDI/ST506/RLL
+ */
+ dospartoff = get_le(&dp2->dp_start);
+ cyl = DPCYL(dp2->dp_scyl, dp2->dp_ssect);
+ }
+ }
+
+ /* next, dig out disk label */
+ bp->b_blkno = dospartoff + LABELSECTOR;
+ bp->b_cylin = cyl;
+ bp->b_bcount = lp->d_secsize;
+ bp->b_flags = B_BUSY | B_READ;
+ (*strat)(bp);
+
+ /* if successful, locate disk label within block and validate */
+ if ((error = biowait(bp)) != 0)
+ goto done;
+ for (dlp = (struct disklabel *)bp->b_data;
+ dlp <= (struct disklabel *)(bp->b_data + lp->d_secsize -
+ sizeof(*dlp));
+ dlp = (struct disklabel *)((char *)dlp + sizeof(long))) {
+ if (dlp->d_magic == DISKMAGIC && dlp->d_magic2 == DISKMAGIC &&
+ dkcksum(dlp) == 0) {
+ *dlp = *lp;
+ bp->b_flags = B_BUSY | B_WRITE;
+ (*strat)(bp);
+ error = biowait(bp);
+ goto done;
+ }
+ }
+
+ /* Write it in the regular place. */
+ *(struct disklabel *)bp->b_data = *lp;
+ bp->b_flags = B_BUSY | B_WRITE;
+ (*strat)(bp);
+ error = biowait(bp);
+ goto done;
+
+done:
+ bp->b_flags |= B_INVAL;
+ brelse(bp);
+ return (error);
+}
+
+/*
+ * Determine the size of the transfer, and make sure it is
+ * within the boundaries of the partition. Adjust transfer
+ * if needed, and signal errors or early completion.
+ */
+int
+bounds_check_with_label(struct buf *bp, struct disklabel *lp,
+ struct cpu_disklabel *osdep, int wlabel)
+{
+#define blockpersec(count, lp) ((count) * (((lp)->d_secsize) / DEV_BSIZE))
+ struct partition *p = lp->d_partitions + DISKPART(bp->b_dev);
+ int labelsector = blockpersec(lp->d_partitions[RAW_PART].p_offset, lp) +
+ LABELSECTOR;
+ int sz = howmany(bp->b_bcount, DEV_BSIZE);
+
+ /* avoid division by zero */
+ if (lp->d_secpercyl == 0) {
+ bp->b_error = EINVAL;
+ goto bad;
+ }
+
+ if (bp->b_blkno + sz > blockpersec(p->p_size, lp)) {
+ sz = blockpersec(p->p_size, lp) - bp->b_blkno;
+ if (sz == 0) {
+ /* If exactly at end of disk, return EOF. */
+ bp->b_resid = bp->b_bcount;
+ goto done;
+ }
+ if (sz < 0) {
+ /* If past end of disk, return EINVAL. */
+ bp->b_error = EINVAL;
+ goto bad;
+ }
+ /* Otherwise, truncate request. */
+ bp->b_bcount = sz << DEV_BSHIFT;
+ }
+
+ /* Overwriting disk label? */
+ if (bp->b_blkno + blockpersec(p->p_offset, lp) <= labelsector &&
+#if LABELSECTOR != 0
+ bp->b_blkno + blockpersec(p->p_offset, lp) + sz > labelsector &&
+#endif
+ (bp->b_flags & B_READ) == 0 && !wlabel) {
+ bp->b_error = EROFS;
+ goto bad;
+ }
+
+ /* calculate cylinder for disksort to order transfers with */
+ bp->b_cylin = (bp->b_blkno + blockpersec(p->p_offset, lp)) /
+ lp->d_secpercyl;
+ return (1);
+
+bad:
+ bp->b_flags |= B_ERROR;
+done:
+ return (0);
+}
diff --git a/sys/arch/arm/arm/disksubr_mbr.c b/sys/arch/arm/arm/disksubr_mbr.c
new file mode 100644
index 00000000000..f300eb25edb
--- /dev/null
+++ b/sys/arch/arm/arm/disksubr_mbr.c
@@ -0,0 +1,208 @@
+/* $OpenBSD: disksubr_mbr.c,v 1.1 2004/02/01 05:09:48 drahn Exp $ */
+/* $NetBSD: disksubr.c,v 1.21 1996/05/03 19:42:03 christos Exp $ */
+
+/*
+ * Copyright (c) 1996 Theo de Raadt
+ * Copyright (c) 1982, 1986, 1988 Regents of the University of California.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)ufs_disksubr.c 7.16 (Berkeley) 5/4/91
+ */
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/buf.h>
+#include <sys/device.h>
+#include <sys/disklabel.h>
+#include <sys/syslog.h>
+#include <sys/disk.h>
+
+#define b_cylin b_resid
+
+#define BOOT_MAGIC 0xAA55
+#define BOOT_MAGIC_OFF (DOSPARTOFF+NDOSPART*sizeof(struct dos_partition))
+
+int
+try_mbr_label(dev_t dev, void (*strat)(struct buf *), struct buf *bp,
+ struct disklabel *lp, struct cpu_disklabel *osdep, char **pmsg,
+ int *bsdpartoff);
+int
+try_mbr_label(dev_t dev, void (*strat)(struct buf *), struct buf *bp,
+ struct disklabel *lp, struct cpu_disklabel *osdep, char **pmsg,
+ int *bsdpartoff)
+{
+ struct dos_partition *dp = osdep->dosparts, *dp2;
+ char *cp;
+ int cyl, n = 0, i, ourpart = -1;
+ int dospartoff = -1;
+
+ /* MBR type disklabel */
+ /* do dos partitions in the process of getting disklabel? */
+ cyl = LABELSECTOR / lp->d_secpercyl;
+ if (dp) {
+ daddr_t part_blkno = DOSBBSECTOR;
+ unsigned long extoff = 0;
+ int wander = 1, loop = 0;
+
+ /*
+ * Read dos partition table, follow extended partitions.
+ * Map the partitions to disklabel entries i-p
+ */
+ while (wander && n < 8 && loop < 8) {
+ loop++;
+ wander = 0;
+ if (part_blkno < extoff)
+ part_blkno = extoff;
+
+ /* read boot record */
+ bp->b_blkno = part_blkno;
+ bp->b_bcount = lp->d_secsize;
+ bp->b_flags = B_BUSY | B_READ;
+ bp->b_cylin = part_blkno / lp->d_secpercyl;
+ (*strat)(bp);
+
+ /* if successful, wander through dos partition table */
+ if (biowait(bp)) {
+ *pmsg = "dos partition I/O error";
+ return 0;
+ }
+ bcopy(bp->b_data + DOSPARTOFF, dp, NDOSPART * sizeof(*dp));
+
+ if (ourpart == -1) {
+ /* Search for our MBR partition */
+ for (dp2=dp, i=0; i < NDOSPART && ourpart == -1;
+ i++, dp2++)
+ if (get_le(&dp2->dp_size) &&
+ dp2->dp_typ == DOSPTYP_OPENBSD)
+ ourpart = i;
+ for (dp2=dp, i=0; i < NDOSPART && ourpart == -1;
+ i++, dp2++)
+ if (get_le(&dp2->dp_size) &&
+ dp2->dp_typ == DOSPTYP_FREEBSD)
+ ourpart = i;
+ for (dp2=dp, i=0; i < NDOSPART && ourpart == -1;
+ i++, dp2++)
+ if (get_le(&dp2->dp_size) &&
+ dp2->dp_typ == DOSPTYP_NETBSD)
+ ourpart = i;
+ if (ourpart == -1)
+ goto donot;
+ /*
+ * This is our MBR partition. need sector address
+ * for SCSI/IDE, cylinder for ESDI/ST506/RLL
+ */
+ dp2 = &dp[ourpart];
+ dospartoff = get_le(&dp2->dp_start) + part_blkno;
+ cyl = DPCYL(dp2->dp_scyl, dp2->dp_ssect);
+
+ /* XXX build a temporary disklabel */
+ lp->d_partitions[0].p_size = get_le(&dp2->dp_size);
+ lp->d_partitions[0].p_offset =
+ get_le(&dp2->dp_start) + part_blkno;
+ if (lp->d_ntracks == 0)
+ lp->d_ntracks = dp2->dp_ehd + 1;
+ if (lp->d_nsectors == 0)
+ lp->d_nsectors = DPSECT(dp2->dp_esect);
+ if (lp->d_secpercyl == 0)
+ lp->d_secpercyl = lp->d_ntracks *
+ lp->d_nsectors;
+ }
+donot:
+ /*
+ * In case the disklabel read below fails, we want to
+ * provide a fake label in i-p.
+ */
+ for (dp2=dp, i=0; i < NDOSPART && n < 8; i++, dp2++) {
+ struct partition *pp = &lp->d_partitions[8+n];
+
+ if (dp2->dp_typ == DOSPTYP_OPENBSD)
+ continue;
+ if (get_le(&dp2->dp_size) > lp->d_secperunit)
+ continue;
+ if (get_le(&dp2->dp_size))
+ pp->p_size = get_le(&dp2->dp_size);
+ if (get_le(&dp2->dp_start))
+ pp->p_offset =
+ get_le(&dp2->dp_start) + part_blkno;
+
+ switch (dp2->dp_typ) {
+ case DOSPTYP_UNUSED:
+ for (cp = (char *)dp2;
+ cp < (char *)(dp2 + 1); cp++)
+ if (*cp)
+ break;
+ /*
+ * Was it all zeroes? If so, it is
+ * an unused entry that we don't
+ * want to show.
+ */
+ if (cp == (char *)(dp2 + 1))
+ continue;
+ lp->d_partitions[8 + n++].p_fstype =
+ FS_UNUSED;
+ break;
+
+ case DOSPTYP_LINUX:
+ pp->p_fstype = FS_EXT2FS;
+ n++;
+ break;
+
+ case DOSPTYP_FAT12:
+ case DOSPTYP_FAT16S:
+ case DOSPTYP_FAT16B:
+ case DOSPTYP_FAT16C:
+ case DOSPTYP_FAT32:
+ pp->p_fstype = FS_MSDOS;
+ n++;
+ break;
+ case DOSPTYP_EXTEND:
+ case DOSPTYP_EXTENDL:
+ part_blkno = get_le(&dp2->dp_start) + extoff;
+ if (!extoff) {
+ extoff = get_le(&dp2->dp_start);
+ part_blkno = 0;
+ }
+ wander = 1;
+ break;
+ default:
+ pp->p_fstype = FS_OTHER;
+ n++;
+ break;
+ }
+ }
+ }
+ lp->d_bbsize = 8192;
+ lp->d_sbsize = 64*1024; /* XXX ? */
+ lp->d_npartitions = MAXPARTITIONS;
+ }
+
+ /* if not partitions found return failure */
+ if (n == 0 && dospartoff == -1)
+ return 0;
+ *bsdpartoff = dospartoff + LABELSECTOR;
+ return 1;
+}
diff --git a/sys/arch/arm/arm/exception.S b/sys/arch/arm/arm/exception.S
new file mode 100644
index 00000000000..d0fedb6da45
--- /dev/null
+++ b/sys/arch/arm/arm/exception.S
@@ -0,0 +1,380 @@
+/* $OpenBSD: exception.S,v 1.1 2004/02/01 05:09:48 drahn Exp $ */
+/*^I$NetBSD: exception.S,v 1.13 2003/10/31 16:30:15 scw Exp $^I*/$
+
+/*
+ * Copyright (c) 1994-1997 Mark Brinicombe.
+ * Copyright (c) 1994 Brini.
+ * All rights reserved.
+ *
+ * This code is derived from software written for Brini by Mark Brinicombe
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by Brini.
+ * 4. The name of the company nor the name of the author may be used to
+ * endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY BRINI ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL BRINI OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * RiscBSD kernel project
+ *
+ * exception.S
+ *
+ * Low level handlers for exception vectors
+ *
+ * Created : 24/09/94
+ *
+ * Based on kate/display/abort.s
+ */
+
+#include <machine/asm.h>
+#include <machine/cpu.h>
+#include <machine/frame.h>
+#include "assym.h"
+
+ .text
+ .align 0
+
+AST_ALIGNMENT_FAULT_LOCALS
+
+/*
+ * reset_entry:
+ *
+ * Handler for Reset exception.
+ */
+ASENTRY_NP(reset_entry)
+ adr r0, Lreset_panicmsg
+ mov r1, lr
+ bl _C_LABEL(panic)
+ /* NOTREACHED */
+Lreset_panicmsg:
+ .asciz "Reset vector called, LR = 0x%08x"
+ .balign 4
+
+/*
+ * swi_entry
+ *
+ * Handler for the Software Interrupt exception.
+ */
+ASENTRY_NP(swi_entry)
+ PUSHFRAME
+ ENABLE_ALIGNMENT_FAULTS
+
+ mov r0, sp /* Pass the frame to any function */
+ bl _C_LABEL(swi_handler) /* It's a SWI ! */
+
+ DO_AST_AND_RESTORE_ALIGNMENT_FAULTS
+ PULLFRAME
+ movs pc, lr /* Exit */
+
+/*
+ * prefetch_abort_entry:
+ *
+ * Handler for the Prefetch Abort exception.
+ */
+ASENTRY_NP(prefetch_abort_entry)
+#ifdef __XSCALE__
+ nop /* Make absolutely sure any pending */
+ nop /* imprecise aborts have occurred. */
+#endif
+ sub lr, lr, #0x00000004 /* Adjust the lr */
+
+ PUSHFRAMEINSVC
+ ENABLE_ALIGNMENT_FAULTS
+
+ ldr r1, Lprefetch_abort_handler_address
+ adr lr, exception_exit
+ mov r0, sp /* pass the stack pointer as r0 */
+ ldr pc, [r1]
+
+Lprefetch_abort_handler_address:
+ .word _C_LABEL(prefetch_abort_handler_address)
+
+ .data
+ .global _C_LABEL(prefetch_abort_handler_address)
+
+_C_LABEL(prefetch_abort_handler_address):
+ .word abortprefetch
+
+ .text
+abortprefetch:
+ adr r0, abortprefetchmsg
+ b _C_LABEL(panic)
+
+abortprefetchmsg:
+ .asciz "abortprefetch"
+ .align 0
+
+/*
+ * data_abort_entry:
+ *
+ * Handler for the Data Abort exception.
+ */
+ASENTRY_NP(data_abort_entry)
+#ifdef __XSCALE__
+ nop /* Make absolutely sure any pending */
+ nop /* imprecise aborts have occurred. */
+#endif
+ sub lr, lr, #0x00000008 /* Adjust the lr */
+
+ PUSHFRAMEINSVC /* Push trap frame and switch */
+ /* to SVC32 mode */
+ ENABLE_ALIGNMENT_FAULTS
+
+ ldr r1, Ldata_abort_handler_address
+ adr lr, exception_exit
+ mov r0, sp /* pass the stack pointer as r0 */
+ ldr pc, [r1]
+
+Ldata_abort_handler_address:
+ .word _C_LABEL(data_abort_handler_address)
+
+ .data
+ .global _C_LABEL(data_abort_handler_address)
+_C_LABEL(data_abort_handler_address):
+ .word abortdata
+
+ .text
+abortdata:
+ adr r0, abortdatamsg
+ b _C_LABEL(panic)
+
+abortdatamsg:
+ .asciz "abortdata"
+ .align 0
+
+/*
+ * address_exception_entry:
+ *
+ * Handler for the Address Exception exception.
+ *
+ * NOTE: This exception isn't really used on arm32. We
+ * print a warning message to the console and then treat
+ * it like a Data Abort.
+ */
+ASENTRY_NP(address_exception_entry)
+ mrs r1, cpsr_all
+ mrs r2, spsr_all
+ mov r3, lr
+ adr r0, Laddress_exception_msg
+ bl _C_LABEL(printf) /* XXX CLOBBERS LR!! */
+ b data_abort_entry
+Laddress_exception_msg:
+ .asciz "Address Exception CPSR=0x%08x SPSR=0x%08x LR=0x%08x\n"
+ .balign 4
+
+/*
+ * General exception exit handler
+ * (Placed here to be within range of all the references to it)
+ *
+ * It exits straight away if not returning to USR mode.
+ * This loops around delivering any pending ASTs.
+ * Interrupts are disabled at suitable points to avoid ASTs
+ * being posted between testing and exit to user mode.
+ *
+ * This function uses PULLFRAMEFROMSVCANDEXIT and
+ * DO_AST_AND_RESTORE_ALIGNMENT_FAULTS thus should
+ * only be called if the exception handler used PUSHFRAMEINSVC
+ * followed by ENABLE_ALIGNMENT_FAULTS.
+ */
+
+exception_exit:
+ DO_AST_AND_RESTORE_ALIGNMENT_FAULTS
+ PULLFRAMEFROMSVCANDEXIT
+
+/*
+ * undefined_entry:
+ *
+ * Handler for the Undefined Instruction exception.
+ *
+ * We indirect the undefined vector via the handler address
+ * in the data area. Entry to the undefined handler must
+ * look like direct entry from the vector.
+ */
+ASENTRY_NP(undefined_entry)
+#ifdef IPKDB
+/*
+ * IPKDB must be hooked in at the earliest possible entry point.
+ *
+ */
+/*
+ * Make room for all registers saving real r0-r7 and r15.
+ * The remaining registers are updated later.
+ */
+ stmfd sp!, {r0,r1} /* psr & spsr */
+ stmfd sp!, {lr} /* pc */
+ stmfd sp!, {r0-r14} /* r0-r7, r8-r14 */
+/*
+ * Get previous psr.
+ */
+ mrs r7, cpsr_all
+ mrs r0, spsr_all
+ str r0, [sp, #(16*4)]
+/*
+ * Test for user mode.
+ */
+ tst r0, #0xf
+ bne .Lprenotuser_push
+ add r1, sp, #(8*4)
+ stmia r1,{r8-r14}^ /* store user mode r8-r14*/
+ b .Lgoipkdb
+/*
+ * Switch to previous mode to get r8-r13.
+ */
+.Lprenotuser_push:
+ orr r0, r0, #(I32_bit) /* disable interrupts */
+ msr cpsr_all, r0
+ mov r1, r8
+ mov r2, r9
+ mov r3, r10
+ mov r4, r11
+ mov r5, r12
+ mov r6, r13
+ msr cpsr_all, r7 /* back to undefined mode */
+ add r8, sp, #(8*4)
+ stmia r8, {r1-r6} /* r8-r13 */
+/*
+ * Now back to previous mode to get r14 and spsr.
+ */
+ msr cpsr_all, r0
+ mov r1, r14
+ mrs r2, spsr
+ msr cpsr_all, r7 /* back to undefined mode */
+ str r1, [sp, #(14*4)] /* r14 */
+ str r2, [sp, #(17*4)] /* spsr */
+/*
+ * Now to IPKDB.
+ */
+.Lgoipkdb:
+ mov r0, sp
+ bl _C_LABEL(ipkdb_trap_glue)
+ ldr r1, .Lipkdb_trap_return
+ str r0,[r1]
+
+/*
+ * Have to load all registers from the stack.
+ *
+ * Start with spsr and pc.
+ */
+ ldr r0, [sp, #(16*4)] /* spsr */
+ ldr r1, [sp, #(15*4)] /* r15 */
+ msr spsr_all, r0
+ mov r14, r1
+/*
+ * Test for user mode.
+ */
+ tst r0, #0xf
+ bne .Lprenotuser_pull
+ add r1, sp, #(8*4)
+ ldmia r1, {r8-r14}^ /* load user mode r8-r14 */
+ b .Lpull_r0r7
+.Lprenotuser_pull:
+/*
+ * Now previous mode spsr and r14.
+ */
+ ldr r1, [sp, #(17*4)] /* spsr */
+ ldr r2, [sp, #(14*4)] /* r14 */
+ orr r0, r0, #(I32_bit)
+ msr cpsr_all, r0 /* switch to previous mode */
+ msr spsr_all, r1
+ mov r14, r2
+ msr cpsr_all, r7 /* back to undefined mode */
+/*
+ * Now r8-r13.
+ */
+ add r8, sp, #(8*4)
+ ldmia r8, {r1-r6} /* r8-r13 */
+ msr cpsr_all, r0
+ mov r8, r1
+ mov r9, r2
+ mov r10, r3
+ mov r11, r4
+ mov r12, r5
+ mov r13, r6
+ msr cpsr_all, r7
+.Lpull_r0r7:
+/*
+ * Now the rest of the registers.
+ */
+ ldr r1,Lipkdb_trap_return
+ ldr r0,[r1]
+ tst r0,r0
+ ldmfd sp!, {r0-r7} /* r0-r7 */
+ add sp, sp, #(10*4) /* adjust sp */
+
+/*
+ * Did IPKDB handle it?
+ */
+ movnes pc, lr /* return */
+
+#endif
+ stmfd sp!, {r0, r1}
+ ldr r0, Lundefined_handler_indirection
+ ldr r1, [sp], #0x0004
+ str r1, [r0, #0x0000]
+ ldr r1, [sp], #0x0004
+ str r1, [r0, #0x0004]
+ ldmia r0, {r0, r1, pc}
+
+#ifdef IPKDB
+Lipkdb_trap_return:
+ .word Lipkdb_trap_return_data
+#endif
+
+Lundefined_handler_indirection:
+ .word Lundefined_handler_indirection_data
+
+/*
+ * assembly bounce code for calling the kernel
+ * undefined instruction handler. This uses
+ * a standard trap frame and is called in SVC mode.
+ */
+
+ENTRY_NP(undefinedinstruction_bounce)
+ PUSHFRAMEINSVC
+ ENABLE_ALIGNMENT_FAULTS
+
+ mov r0, sp
+ adr lr, exception_exit
+ b _C_LABEL(undefinedinstruction)
+
+ .data
+ .align 0
+
+#ifdef IPKDB
+Lipkdb_trap_return_data:
+ .word 0
+#endif
+
+/*
+ * Indirection data
+ * 2 words use for preserving r0 and r1
+ * 3rd word contains the undefined handler address.
+ */
+
+Lundefined_handler_indirection_data:
+ .word 0
+ .word 0
+
+ .global _C_LABEL(undefined_handler_address)
+_C_LABEL(undefined_handler_address):
+ .word _C_LABEL(undefinedinstruction_bounce)
diff --git a/sys/arch/arm/arm/fault.c b/sys/arch/arm/arm/fault.c
new file mode 100644
index 00000000000..0841d4d66f4
--- /dev/null
+++ b/sys/arch/arm/arm/fault.c
@@ -0,0 +1,837 @@
+/* $OpenBSD: fault.c,v 1.1 2004/02/01 05:09:48 drahn Exp $ */
+/*^I$NetBSD: fault.c,v 1.46 2004/01/21 15:39:21 skrll Exp $^I*/$
+
+/*
+ * Copyright 2003 Wasabi Systems, Inc.
+ * All rights reserved.
+ *
+ * Written by Steve C. Woodford for Wasabi Systems, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed for the NetBSD Project by
+ * Wasabi Systems, Inc.
+ * 4. The name of Wasabi Systems, Inc. may not be used to endorse
+ * or promote products derived from this software without specific prior
+ * written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+/*
+ * Copyright (c) 1994-1997 Mark Brinicombe.
+ * Copyright (c) 1994 Brini.
+ * All rights reserved.
+ *
+ * This code is derived from software written for Brini by Mark Brinicombe
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by Brini.
+ * 4. The name of the company nor the name of the author may be used to
+ * endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY BRINI ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL BRINI OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * RiscBSD kernel project
+ *
+ * fault.c
+ *
+ * Fault handlers
+ *
+ * Created : 28/11/94
+ */
+
+#include <sys/types.h>
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/proc.h>
+#include <sys/user.h>
+#include <sys/kernel.h>
+#include <sys/signalvar.h>
+
+#include <uvm/uvm_extern.h>
+
+#include <arm/cpuconf.h>
+
+#include <machine/frame.h>
+#include <arm/katelib.h>
+#include <machine/cpu.h>
+#include <machine/intr.h>
+#if defined(DDB) || defined(KGDB)
+#include <machine/db_machdep.h>
+#ifdef KGDB
+#include <sys/kgdb.h>
+#endif
+#if !defined(DDB)
+#define kdb_trap kgdb_trap
+#endif
+#endif
+
+#include <arch/arm/arm/disassem.h>
+#include <arm/machdep.h>
+
+extern char fusubailout[];
+
+#ifdef DEBUG
+int last_fault_code; /* For the benefit of pmap_fault_fixup() */
+#endif
+
+#if defined(CPU_ARM3) || defined(CPU_ARM6) || \
+ defined(CPU_ARM7) || defined(CPU_ARM7TDMI)
+/* These CPUs may need data/prefetch abort fixups */
+#define CPU_ABORT_FIXUP_REQUIRED
+#endif
+
+struct sigdata {
+ int signo;
+ int code;
+ vaddr_t addr;
+ int trap;
+};
+
+struct data_abort {
+ int (*func)(trapframe_t *, u_int, u_int, struct proc *,
+ struct sigdata *);
+ const char *desc;
+};
+
+static int dab_fatal(trapframe_t *, u_int, u_int, struct proc *,
+ struct sigdata *sd);
+static int dab_align(trapframe_t *, u_int, u_int, struct proc *,
+ struct sigdata *sd);
+static int dab_buserr(trapframe_t *, u_int, u_int, struct proc *,
+ struct sigdata *sd);
+
+static const struct data_abort data_aborts[] = {
+ {dab_fatal, "Vector Exception"},
+ {dab_align, "Alignment Fault 1"},
+ {dab_fatal, "Terminal Exception"},
+ {dab_align, "Alignment Fault 3"},
+ {dab_buserr, "External Linefetch Abort (S)"},
+ {NULL, "Translation Fault (S)"},
+ {dab_buserr, "External Linefetch Abort (P)"},
+ {NULL, "Translation Fault (P)"},
+ {dab_buserr, "External Non-Linefetch Abort (S)"},
+ {NULL, "Domain Fault (S)"},
+ {dab_buserr, "External Non-Linefetch Abort (P)"},
+ {NULL, "Domain Fault (P)"},
+ {dab_buserr, "External Translation Abort (L1)"},
+ {NULL, "Permission Fault (S)"},
+ {dab_buserr, "External Translation Abort (L2)"},
+ {NULL, "Permission Fault (P)"}
+};
+
+/* Determine if a fault came from user mode */
+#define TRAP_USERMODE(tf) ((tf->tf_spsr & PSR_MODE) == PSR_USR32_MODE)
+
+/* Determine if 'x' is a permission fault */
+#define IS_PERMISSION_FAULT(x) \
+ (((1 << ((x) & FAULT_TYPE_MASK)) & \
+ ((1 << FAULT_PERM_P) | (1 << FAULT_PERM_S))) != 0)
+
+static __inline int
+data_abort_fixup(trapframe_t *tf, u_int fsr, u_int far, struct proc *l)
+{
+#ifdef CPU_ABORT_FIXUP_REQUIRED
+ int error;
+
+ /* Call the cpu specific data abort fixup routine */
+ error = cpu_dataabt_fixup(tf);
+ if (__predict_true(error != ABORT_FIXUP_FAILED))
+ return (error);
+
+ /*
+ * Oops, couldn't fix up the instruction
+ */
+ printf("data_abort_fixup: fixup for %s mode data abort failed.\n",
+ TRAP_USERMODE(tf) ? "user" : "kernel");
+ printf("pc = 0x%08x, opcode 0x%08x, insn = ", tf->tf_pc,
+ *((u_int *)tf->tf_pc));
+ disassemble(tf->tf_pc);
+
+ /* Die now if this happened in kernel mode */
+ if (!TRAP_USERMODE(tf))
+ dab_fatal(tf, fsr, far, l, NULL);
+
+ return (error);
+#else
+ return (ABORT_FIXUP_OK);
+#endif /* CPU_ABORT_FIXUP_REQUIRED */
+}
+
+void
+data_abort_handler(trapframe_t *tf)
+{
+ struct vm_map *map;
+ struct pcb *pcb;
+ struct proc *p;
+ u_int user, far, fsr;
+ vm_prot_t ftype;
+ void *onfault;
+ vaddr_t va;
+ int error;
+ union sigval sv;
+ struct sigdata sd;
+
+ /* Grab FAR/FSR before enabling interrupts */
+ far = cpu_faultaddress();
+ fsr = cpu_faultstatus();
+
+ /* Update vmmeter statistics */
+ uvmexp.traps++;
+
+ /* Re-enable interrupts if they were enabled previously */
+ if (__predict_true((tf->tf_spsr & I32_bit) == 0))
+ enable_interrupts(I32_bit);
+
+ /* Get the current proc structure or proc0 if there is none */
+ p = (curproc != NULL) ? curproc : &proc0;
+
+ /* Data abort came from user mode? */
+ user = TRAP_USERMODE(tf);
+
+ /* Grab the current pcb */
+ pcb = &p->p_addr->u_pcb;
+
+ /* Invoke the appropriate handler, if necessary */
+ if (__predict_false(data_aborts[fsr & FAULT_TYPE_MASK].func != NULL)) {
+ if ((data_aborts[fsr & FAULT_TYPE_MASK].func)(tf, fsr, far, p,
+ &sd)) {
+ printf("data abort trap fsr %x far %x pc %x\n",
+ fsr, far, tf->tf_pc);
+ goto do_trapsignal;
+ }
+ goto out;
+ }
+
+ /*
+ * At this point, we're dealing with one of the following data aborts:
+ *
+ * FAULT_TRANS_S - Translation -- Section
+ * FAULT_TRANS_P - Translation -- Page
+ * FAULT_DOMAIN_S - Domain -- Section
+ * FAULT_DOMAIN_P - Domain -- Page
+ * FAULT_PERM_S - Permission -- Section
+ * FAULT_PERM_P - Permission -- Page
+ *
+ * These are the main virtual memory-related faults signalled by
+ * the MMU.
+ */
+
+ /* fusubailout is used by [fs]uswintr to avoid page faulting */
+ if (__predict_false(pcb->pcb_onfault == fusubailout)) {
+ tf->tf_r0 = EFAULT;
+ tf->tf_pc = (register_t)pcb->pcb_onfault;
+ return;
+ }
+
+ if (user)
+ p->p_addr->u_pcb.pcb_tf = tf;
+
+ /*
+ * Make sure the Program Counter is sane. We could fall foul of
+ * someone executing Thumb code, in which case the PC might not
+ * be word-aligned. This would cause a kernel alignment fault
+ * further down if we have to decode the current instruction.
+ * XXX: It would be nice to be able to support Thumb at some point.
+ */
+ if (__predict_false((tf->tf_pc & 3) != 0)) {
+ if (user) {
+ /*
+ * Give the user an illegal instruction signal.
+ */
+ /* Deliver a SIGILL to the process */
+ sd.signo = SIGILL;
+ sd.code = ILL_ILLOPC;
+ sd.addr = far;
+ sd.trap = fsr;
+ goto do_trapsignal;
+ }
+
+ /*
+ * The kernel never executes Thumb code.
+ */
+ printf("\ndata_abort_fault: Misaligned Kernel-mode "
+ "Program Counter\n");
+ dab_fatal(tf, fsr, far, p, NULL);
+ }
+
+ /* See if the cpu state needs to be fixed up */
+ switch (data_abort_fixup(tf, fsr, far, p)) {
+ case ABORT_FIXUP_RETURN:
+ return;
+ case ABORT_FIXUP_FAILED:
+ /* Deliver a SIGILL to the process */
+ sd.signo = SIGILL;
+ sd.code = ILL_ILLOPC;
+ sd.addr = far;
+ sd.trap = fsr;
+ goto do_trapsignal;
+ default:
+ break;
+ }
+
+ va = trunc_page((vaddr_t)far);
+
+ /*
+ * It is only a kernel address space fault iff:
+ * 1. user == 0 and
+ * 2. pcb_onfault not set or
+ * 3. pcb_onfault set and not LDRT/LDRBT/STRT/STRBT instruction.
+ */
+ if (user == 0 && (va >= VM_MIN_KERNEL_ADDRESS ||
+ (va < VM_MIN_ADDRESS && vector_page == ARM_VECTORS_LOW)) &&
+ __predict_true((pcb->pcb_onfault == NULL ||
+ (ReadWord(tf->tf_pc) & 0x05200000) != 0x04200000))) {
+ map = kernel_map;
+
+ /* Was the fault due to the FPE/IPKDB ? */
+ if (__predict_false((tf->tf_spsr & PSR_MODE)==PSR_UND32_MODE)) {
+ sd.signo = SIGSEGV;
+ sd.code = SEGV_ACCERR;
+ sd.addr = far;
+ sd.trap = fsr;
+
+ /*
+ * Force exit via userret()
+ * This is necessary as the FPE is an extension to
+ * userland that actually runs in a priveledged mode
+ * but uses USR mode permissions for its accesses.
+ */
+ user = 1;
+ goto do_trapsignal;
+ }
+ } else {
+ map = &p->p_vmspace->vm_map;
+#if 0
+ if (l->l_flag & L_SA) {
+ KDASSERT(l->l_proc->p_sa != NULL);
+ l->l_proc->p_sa->sa_vp_faultaddr = (vaddr_t)far;
+ l->l_flag |= L_SA_PAGEFAULT;
+ }
+#endif
+ }
+
+ /*
+ * We need to know whether the page should be mapped
+ * as R or R/W. The MMU does not give us the info as
+ * to whether the fault was caused by a read or a write.
+ *
+ * However, we know that a permission fault can only be
+ * the result of a write to a read-only location, so
+ * we can deal with those quickly.
+ *
+ * Otherwise we need to disassemble the instruction
+ * responsible to determine if it was a write.
+ */
+ if (IS_PERMISSION_FAULT(fsr))
+ ftype = VM_PROT_WRITE;
+ else {
+ u_int insn = ReadWord(tf->tf_pc);
+
+ if (((insn & 0x0c100000) == 0x04000000) || /* STR/STRB */
+ ((insn & 0x0e1000b0) == 0x000000b0) || /* STRH/STRD */
+ ((insn & 0x0a100000) == 0x08000000)) /* STM/CDT */
+ ftype = VM_PROT_WRITE;
+ else
+ if ((insn & 0x0fb00ff0) == 0x01000090) /* SWP */
+ ftype = VM_PROT_READ | VM_PROT_WRITE;
+ else
+ ftype = VM_PROT_READ;
+ }
+
+ /*
+ * See if the fault is as a result of ref/mod emulation,
+ * or domain mismatch.
+ */
+#ifdef DEBUG
+ last_fault_code = fsr;
+#endif
+ if (pmap_fault_fixup(map->pmap, va, ftype, user)) {
+#if 0
+ if (map != kernel_map)
+ p->p_flag &= ~L_SA_PAGEFAULT;
+#endif
+ goto out;
+ }
+
+ if (__predict_false(current_intr_depth > 0)) {
+ if (pcb->pcb_onfault) {
+ tf->tf_r0 = EINVAL;
+ tf->tf_pc = (register_t) pcb->pcb_onfault;
+ return;
+ }
+ printf("\nNon-emulated page fault with intr_depth > 0\n");
+ dab_fatal(tf, fsr, far, p, NULL);
+ }
+
+ onfault = pcb->pcb_onfault;
+ pcb->pcb_onfault = NULL;
+ error = uvm_fault(map, va, 0, ftype);
+ pcb->pcb_onfault = onfault;
+
+#if 0
+ if (map != kernel_map)
+ p->p_flag &= ~L_SA_PAGEFAULT;
+#endif
+
+ if (__predict_true(error == 0)) {
+ if (user)
+ uvm_grow(p, va); /* Record any stack growth */
+ goto out;
+ }
+
+ if (user == 0) {
+ if (pcb->pcb_onfault) {
+ tf->tf_r0 = error;
+ tf->tf_pc = (register_t) pcb->pcb_onfault;
+ return;
+ }
+
+ printf("\nuvm_fault(%p, %lx, %x, 0) -> %x\n", map, va, ftype,
+ error);
+ dab_fatal(tf, fsr, far, p, NULL);
+ }
+
+
+ sv.sival_ptr = (u_int32_t *)far;
+ if (error == ENOMEM) {
+ printf("UVM: pid %d (%s), uid %d killed: "
+ "out of swap\n", p->p_pid, p->p_comm,
+ (p->p_cred && p->p_ucred) ?
+ p->p_ucred->cr_uid : -1);
+ sd.signo = SIGKILL;
+ } else
+ sd.signo = SIGSEGV;
+
+ sd.code = (error == EACCES) ? SEGV_ACCERR : SEGV_MAPERR;
+ sd.addr = far;
+ sd.trap = fsr;
+do_trapsignal:
+ sv.sival_int = sd.addr;
+ trapsignal(p, sd.signo, sd.trap, sd.code, sv);
+out:
+ /* If returning to user mode, make sure to invoke userret() */
+ if (user)
+ userret(p);
+}
+
+/*
+ * dab_fatal() handles the following data aborts:
+ *
+ * FAULT_WRTBUF_0 - Vector Exception
+ * FAULT_WRTBUF_1 - Terminal Exception
+ *
+ * We should never see these on a properly functioning system.
+ *
+ * This function is also called by the other handlers if they
+ * detect a fatal problem.
+ *
+ * Note: If 'l' is NULL, we assume we're dealing with a prefetch abort.
+ */
+static int
+dab_fatal(trapframe_t *tf, u_int fsr, u_int far, struct proc *p,
+ struct sigdata *sd)
+{
+ const char *mode;
+
+ mode = TRAP_USERMODE(tf) ? "user" : "kernel";
+
+ if (p != NULL) {
+ printf("Fatal %s mode data abort: '%s'\n", mode,
+ data_aborts[fsr & FAULT_TYPE_MASK].desc);
+ printf("trapframe: %p\nFSR=%08x, FAR=", tf, fsr);
+ if ((fsr & FAULT_IMPRECISE) == 0)
+ printf("%08x, ", far);
+ else
+ printf("Invalid, ");
+ printf("spsr=%08x\n", tf->tf_spsr);
+ } else {
+ printf("Fatal %s mode prefetch abort at 0x%08x\n",
+ mode, tf->tf_pc);
+ printf("trapframe: %p, spsr=%08x\n", tf, tf->tf_spsr);
+ }
+
+ printf("r0 =%08x, r1 =%08x, r2 =%08x, r3 =%08x\n",
+ tf->tf_r0, tf->tf_r1, tf->tf_r2, tf->tf_r3);
+ printf("r4 =%08x, r5 =%08x, r6 =%08x, r7 =%08x\n",
+ tf->tf_r4, tf->tf_r5, tf->tf_r6, tf->tf_r7);
+ printf("r8 =%08x, r9 =%08x, r10=%08x, r11=%08x\n",
+ tf->tf_r8, tf->tf_r9, tf->tf_r10, tf->tf_r11);
+ printf("r12=%08x, ", tf->tf_r12);
+
+ if (TRAP_USERMODE(tf))
+ printf("usp=%08x, ulr=%08x",
+ tf->tf_usr_sp, tf->tf_usr_lr);
+ else
+ printf("ssp=%08x, slr=%08x",
+ tf->tf_svc_sp, tf->tf_svc_lr);
+ printf(", pc =%08x\n\n", tf->tf_pc);
+
+#if defined(DDB) || defined(KGDB)
+ kdb_trap(T_FAULT, tf);
+#endif
+ panic("Fatal abort");
+ /*NOTREACHED*/
+}
+
+/*
+ * dab_align() handles the following data aborts:
+ *
+ * FAULT_ALIGN_0 - Alignment fault
+ * FAULT_ALIGN_0 - Alignment fault
+ *
+ * These faults are fatal if they happen in kernel mode. Otherwise, we
+ * deliver a bus error to the process.
+ */
+static int
+dab_align(trapframe_t *tf, u_int fsr, u_int far, struct proc *p,
+ struct sigdata *sd)
+{
+ union sigval sv;
+
+ /* Alignment faults are always fatal if they occur in kernel mode */
+ if (!TRAP_USERMODE(tf))
+ dab_fatal(tf, fsr, far, p, NULL);
+
+ /* pcb_onfault *must* be NULL at this point */
+ KDASSERT(p->p_addr->u_pcb.pcb_onfault == NULL);
+
+ /* See if the cpu state needs to be fixed up */
+ (void) data_abort_fixup(tf, fsr, far, p);
+
+ /* Deliver a bus error signal to the process */
+ sd->signo = SIGBUS;
+ sd->code = BUS_ADRALN;
+ sd->addr = far;
+ sd->trap = fsr;
+
+ p->p_addr->u_pcb.pcb_tf = tf;
+
+ sv.sival_ptr = (u_int32_t *)far;
+ trapsignal(p, SIGBUS, 0, BUS_ADRALN, sv);
+
+ return (1);
+}
+
+/*
+ * dab_buserr() handles the following data aborts:
+ *
+ * FAULT_BUSERR_0 - External Abort on Linefetch -- Section
+ * FAULT_BUSERR_1 - External Abort on Linefetch -- Page
+ * FAULT_BUSERR_2 - External Abort on Non-linefetch -- Section
+ * FAULT_BUSERR_3 - External Abort on Non-linefetch -- Page
+ * FAULT_BUSTRNL1 - External abort on Translation -- Level 1
+ * FAULT_BUSTRNL2 - External abort on Translation -- Level 2
+ *
+ * If pcb_onfault is set, flag the fault and return to the handler.
+ * If the fault occurred in user mode, give the process a SIGBUS.
+ *
+ * Note: On XScale, FAULT_BUSERR_0, FAULT_BUSERR_1, and FAULT_BUSERR_2
+ * can be flagged as imprecise in the FSR. This causes a real headache
+ * since some of the machine state is lost. In this case, tf->tf_pc
+ * may not actually point to the offending instruction. In fact, if
+ * we've taken a double abort fault, it generally points somewhere near
+ * the top of "data_abort_entry" in exception.S.
+ *
+ * In all other cases, these data aborts are considered fatal.
+ */
+static int
+dab_buserr(trapframe_t *tf, u_int fsr, u_int far, struct proc *p,
+ struct sigdata *sd)
+{
+ struct pcb *pcb = &p->p_addr->u_pcb;
+
+#ifdef __XSCALE__
+ if ((fsr & FAULT_IMPRECISE) != 0 &&
+ (tf->tf_spsr & PSR_MODE) == PSR_ABT32_MODE) {
+ /*
+ * Oops, an imprecise, double abort fault. We've lost the
+ * r14_abt/spsr_abt values corresponding to the original
+ * abort, and the spsr saved in the trapframe indicates
+ * ABT mode.
+ */
+ tf->tf_spsr &= ~PSR_MODE;
+
+ /*
+ * We use a simple heuristic to determine if the double abort
+ * happened as a result of a kernel or user mode access.
+ * If the current trapframe is at the top of the kernel stack,
+ * the fault _must_ have come from user mode.
+ */
+ if (tf != ((trapframe_t *)pcb->pcb_un.un_32.pcb32_sp) - 1) {
+ /*
+ * Kernel mode. We're either about to die a
+ * spectacular death, or pcb_onfault will come
+ * to our rescue. Either way, the current value
+ * of tf->tf_pc is irrelevant.
+ */
+ tf->tf_spsr |= PSR_SVC32_MODE;
+ if (pcb->pcb_onfault == NULL)
+ printf("\nKernel mode double abort!\n");
+ } else {
+ /*
+ * User mode. We've lost the program counter at the
+ * time of the fault (not that it was accurate anyway;
+ * it's not called an imprecise fault for nothing).
+ * About all we can do is copy r14_usr to tf_pc and
+ * hope for the best. The process is about to get a
+ * SIGBUS, so it's probably history anyway.
+ */
+ tf->tf_spsr |= PSR_USR32_MODE;
+ tf->tf_pc = tf->tf_usr_lr;
+ }
+ }
+
+ /* FAR is invalid for imprecise exceptions */
+ if ((fsr & FAULT_IMPRECISE) != 0)
+ far = 0;
+#endif /* __XSCALE__ */
+
+ if (pcb->pcb_onfault) {
+ KDASSERT(TRAP_USERMODE(tf) == 0);
+ tf->tf_r0 = EFAULT;
+ tf->tf_pc = (register_t) pcb->pcb_onfault;
+ return (0);
+ }
+
+ /* See if the cpu state needs to be fixed up */
+ (void) data_abort_fixup(tf, fsr, far, p);
+
+ /*
+ * At this point, if the fault happened in kernel mode, we're toast
+ */
+ if (!TRAP_USERMODE(tf))
+ dab_fatal(tf, fsr, far, p, NULL);
+
+ /* Deliver a bus error signal to the process */
+ sd->signo = SIGBUS;
+ sd->code = BUS_ADRERR;
+ sd->addr = far;
+ sd->trap = fsr;
+
+ p->p_addr->u_pcb.pcb_tf = tf;
+
+ return (1);
+}
+
+static __inline int
+prefetch_abort_fixup(trapframe_t *tf)
+{
+#ifdef CPU_ABORT_FIXUP_REQUIRED
+ int error;
+
+ /* Call the cpu specific prefetch abort fixup routine */
+ error = cpu_prefetchabt_fixup(tf);
+ if (__predict_true(error != ABORT_FIXUP_FAILED))
+ return (error);
+
+ /*
+ * Oops, couldn't fix up the instruction
+ */
+ printf(
+ "prefetch_abort_fixup: fixup for %s mode prefetch abort failed.\n",
+ TRAP_USERMODE(tf) ? "user" : "kernel");
+ printf("pc = 0x%08x, opcode 0x%08x, insn = ", tf->tf_pc,
+ *((u_int *)tf->tf_pc));
+ disassemble(tf->tf_pc);
+
+ /* Die now if this happened in kernel mode */
+ if (!TRAP_USERMODE(tf))
+ dab_fatal(tf, 0, tf->tf_pc, NULL, NULL);
+
+ return (error);
+#else
+ return (ABORT_FIXUP_OK);
+#endif /* CPU_ABORT_FIXUP_REQUIRED */
+}
+
+/*
+ * void prefetch_abort_handler(trapframe_t *tf)
+ *
+ * Abort handler called when instruction execution occurs at
+ * a non existent or restricted (access permissions) memory page.
+ * If the address is invalid and we were in SVC mode then panic as
+ * the kernel should never prefetch abort.
+ * If the address is invalid and the page is mapped then the user process
+ * does no have read permission so send it a signal.
+ * Otherwise fault the page in and try again.
+ */
+void
+prefetch_abort_handler(trapframe_t *tf)
+{
+ struct proc *p;
+ struct vm_map *map;
+ vaddr_t fault_pc, va;
+ int error;
+ union sigval sv;
+
+ /* Update vmmeter statistics */
+ uvmexp.traps++;
+
+ /*
+ * Enable IRQ's (disabled by the abort) This always comes
+ * from user mode so we know interrupts were not disabled.
+ * But we check anyway.
+ */
+ if (__predict_true((tf->tf_spsr & I32_bit) == 0))
+ enable_interrupts(I32_bit);
+
+ /* See if the cpu state needs to be fixed up */
+ switch (prefetch_abort_fixup(tf)) {
+ case ABORT_FIXUP_RETURN:
+ return;
+ case ABORT_FIXUP_FAILED:
+ /* Deliver a SIGILL to the process */
+ sv.sival_ptr = (u_int32_t *) tf->tf_pc;
+ trapsignal(p, SIGILL, BUS_ADRERR, ILL_ILLOPC, sv);
+
+ p = curproc;
+ p->p_addr->u_pcb.pcb_tf = tf;
+
+ goto do_trapsignal;
+ default:
+ break;
+ }
+
+ /* Prefetch aborts cannot happen in kernel mode */
+ if (__predict_false(!TRAP_USERMODE(tf)))
+ dab_fatal(tf, 0, tf->tf_pc, NULL, NULL);
+
+ /* Get fault address */
+ fault_pc = tf->tf_pc;
+ p = curproc;
+ p->p_addr->u_pcb.pcb_tf = tf;
+
+ /* Ok validate the address, can only execute in USER space */
+ if (__predict_false(fault_pc >= VM_MAXUSER_ADDRESS ||
+ (fault_pc < VM_MIN_ADDRESS && vector_page == ARM_VECTORS_LOW))) {
+ sv.sival_ptr = (u_int32_t *)fault_pc;
+ trapsignal(p, SIGSEGV, 0, SEGV_ACCERR, sv);
+ goto do_trapsignal;
+ }
+
+ map = &p->p_vmspace->vm_map;
+ va = trunc_page(fault_pc);
+
+ /*
+ * See if the pmap can handle this fault on its own...
+ */
+#ifdef DEBUG
+ last_fault_code = -1;
+#endif
+ if (pmap_fault_fixup(map->pmap, va, VM_PROT_READ, 1))
+ goto out;
+
+#ifdef DIAGNOSTIC
+ if (__predict_false(current_intr_depth > 0)) {
+ printf("\nNon-emulated prefetch abort with intr_depth > 0\n");
+ dab_fatal(tf, 0, tf->tf_pc, NULL, NULL);
+ }
+#endif
+
+ error = uvm_fault(map, va, 0, VM_PROT_READ);
+ if (__predict_true(error == 0))
+ goto out;
+
+ sv.sival_ptr = (u_int32_t *) fault_pc;
+ if (error == ENOMEM) {
+ printf("UVM: pid %d (%s), uid %d killed: "
+ "out of swap\n", p->p_pid, p->p_comm,
+ (p->p_cred && p->p_ucred) ?
+ p->p_ucred->cr_uid : -1);
+ trapsignal(p, SIGKILL, 0, SEGV_MAPERR, sv);
+ } else
+ trapsignal(p, SIGSEGV, 0, SEGV_MAPERR, sv);
+
+do_trapsignal:
+
+out:
+ userret(p);
+}
+
+/*
+ * Tentatively read an 8, 16, or 32-bit value from 'addr'.
+ * If the read succeeds, the value is written to 'rptr' and zero is returned.
+ * Else, return EFAULT.
+ */
+int
+badaddr_read(void *addr, size_t size, void *rptr)
+{
+ extern int badaddr_read_1(const uint8_t *, uint8_t *);
+ extern int badaddr_read_2(const uint16_t *, uint16_t *);
+ extern int badaddr_read_4(const uint32_t *, uint32_t *);
+ union {
+ uint8_t v1;
+ uint16_t v2;
+ uint32_t v4;
+ } u;
+ int rv;
+
+ cpu_drain_writebuf();
+
+ /* Read from the test address. */
+ switch (size) {
+ case sizeof(uint8_t):
+ rv = badaddr_read_1(addr, &u.v1);
+ if (rv == 0 && rptr)
+ *(uint8_t *) rptr = u.v1;
+ break;
+
+ case sizeof(uint16_t):
+ rv = badaddr_read_2(addr, &u.v2);
+ if (rv == 0 && rptr)
+ *(uint16_t *) rptr = u.v2;
+ break;
+
+ case sizeof(uint32_t):
+ rv = badaddr_read_4(addr, &u.v4);
+ if (rv == 0 && rptr)
+ *(uint32_t *) rptr = u.v4;
+ break;
+
+ default:
+ panic("badaddr: invalid size (%lu)", (u_long) size);
+ }
+
+ /* Return EFAULT if the address was invalid, else zero */
+ return (rv);
+}
diff --git a/sys/arch/arm/arm/fiq.c b/sys/arch/arm/arm/fiq.c
new file mode 100644
index 00000000000..076675aa641
--- /dev/null
+++ b/sys/arch/arm/arm/fiq.c
@@ -0,0 +1,177 @@
+/* $OpenBSD: fiq.c,v 1.1 2004/02/01 05:09:48 drahn Exp $ */
+/* $NetBSD: fiq.c,v 1.5 2002/04/03 23:33:27 thorpej Exp $ */
+
+/*
+ * Copyright (c) 2001, 2002 Wasabi Systems, Inc.
+ * All rights reserved.
+ *
+ * Written by Jason R. Thorpe for Wasabi Systems, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed for the NetBSD Project by
+ * Wasabi Systems, Inc.
+ * 4. The name of Wasabi Systems, Inc. may not be used to endorse
+ * or promote products derived from this software without specific prior
+ * written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+
+#include <sys/param.h>
+#include <sys/systm.h>
+
+#include <arm/cpufunc.h>
+#include <arm/fiq.h>
+
+#ifdef __PROG32
+#include <uvm/uvm.h>
+#endif
+
+TAILQ_HEAD(, fiqhandler) fiqhandler_stack =
+ TAILQ_HEAD_INITIALIZER(fiqhandler_stack);
+
+extern char fiqvector[];
+extern char fiq_nullhandler[], fiq_nullhandler_end[];
+
+#ifdef __PROG32
+#define IRQ_BIT I32_bit
+#define FIQ_BIT F32_bit
+#else
+#define IRQ_BIT R15_IRQ_DISABLE
+#define FIQ_BIT R15_FIQ_DISABLE
+#endif /* __PROG32 */
+
+/*
+ * fiq_installhandler:
+ *
+ * Actually install the FIQ handler down at the FIQ vector.
+ *
+ * Note: If the FIQ is invoked via an extra layer of
+ * indirection, the actual FIQ code store lives in the
+ * data segment, so there is no need to manipulate
+ * the vector page's protection.
+ */
+static void
+fiq_installhandler(void *func, size_t size)
+{
+#if defined(__PROG32) && !defined(__ARM_FIQ_INDIRECT)
+ vector_page_setprot(VM_PROT_READ|VM_PROT_WRITE);
+#endif
+
+ memcpy(fiqvector, func, size);
+
+#ifdef __PROG32
+#if !defined(__ARM_FIQ_INDIRECT)
+ vector_page_setprot(VM_PROT_READ);
+#endif
+ cpu_icache_sync_range((vaddr_t) fiqvector, size);
+#endif
+}
+
+/*
+ * fiq_claim:
+ *
+ * Claim the FIQ vector.
+ */
+int
+fiq_claim(struct fiqhandler *fh)
+{
+ struct fiqhandler *ofh;
+ u_int oldirqstate;
+ int error = 0;
+
+ if (fh->fh_size > 0x100)
+ return (EFBIG);
+
+ oldirqstate = disable_interrupts(FIQ_BIT);
+
+ if ((ofh = TAILQ_FIRST(&fiqhandler_stack)) != NULL) {
+ if ((ofh->fh_flags & FH_CANPUSH) == 0) {
+ error = EBUSY;
+ goto out;
+ }
+
+ /* Save the previous FIQ handler's registers. */
+ if (ofh->fh_regs != NULL)
+ fiq_getregs(ofh->fh_regs);
+ }
+
+ /* Set FIQ mode registers to ours. */
+ if (fh->fh_regs != NULL)
+ fiq_setregs(fh->fh_regs);
+
+ TAILQ_INSERT_HEAD(&fiqhandler_stack, fh, fh_list);
+
+ /* Now copy the actual handler into place. */
+ fiq_installhandler(fh->fh_func, fh->fh_size);
+
+ /* Make sure FIQs are enabled when we return. */
+ oldirqstate &= ~FIQ_BIT;
+
+ out:
+ restore_interrupts(oldirqstate);
+ return (error);
+}
+
+/*
+ * fiq_release:
+ *
+ * Release the FIQ vector.
+ */
+void
+fiq_release(struct fiqhandler *fh)
+{
+ u_int oldirqstate;
+ struct fiqhandler *ofh;
+
+ oldirqstate = disable_interrupts(FIQ_BIT);
+
+ /*
+ * If we are the currently active FIQ handler, then we
+ * need to save our registers and pop the next one back
+ * into the vector.
+ */
+ if (fh == TAILQ_FIRST(&fiqhandler_stack)) {
+ if (fh->fh_regs != NULL)
+ fiq_getregs(fh->fh_regs);
+ TAILQ_REMOVE(&fiqhandler_stack, fh, fh_list);
+ if ((ofh = TAILQ_FIRST(&fiqhandler_stack)) != NULL) {
+ if (ofh->fh_regs != NULL)
+ fiq_setregs(ofh->fh_regs);
+ fiq_installhandler(ofh->fh_func, ofh->fh_size);
+ }
+ } else
+ TAILQ_REMOVE(&fiqhandler_stack, fh, fh_list);
+
+ if (TAILQ_FIRST(&fiqhandler_stack) == NULL) {
+ /* Copy the NULL handler back down into the vector. */
+ fiq_installhandler(fiq_nullhandler,
+ (size_t)(fiq_nullhandler_end - fiq_nullhandler));
+
+ /* Make sure FIQs are disabled when we return. */
+ oldirqstate |= FIQ_BIT;
+ }
+
+ restore_interrupts(oldirqstate);
+}
diff --git a/sys/arch/arm/arm/fiq_subr.S b/sys/arch/arm/arm/fiq_subr.S
new file mode 100644
index 00000000000..dfdc543c5ff
--- /dev/null
+++ b/sys/arch/arm/arm/fiq_subr.S
@@ -0,0 +1,116 @@
+/* $OpenBSD: fiq_subr.S,v 1.1 2004/02/01 05:09:48 drahn Exp $ */
+/* $NetBSD: fiq_subr.S,v 1.3 2002/04/12 18:50:31 thorpej Exp $ */
+
+/*
+ * Copyright (c) 2001 Wasabi Systems, Inc.
+ * All rights reserved.
+ *
+ * Written by Jason R. Thorpe for Wasabi Systems, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed for the NetBSD Project by
+ * Wasabi Systems, Inc.
+ * 4. The name of Wasabi Systems, Inc. may not be used to endorse
+ * or promote products derived from this software without specific prior
+ * written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "assym.h"
+
+#include <arm/armreg.h>
+#include <arm/asm.h>
+#include <arm/cpuconf.h>
+
+/*
+ * MODE_CHANGE_NOP should be inserted between a mode change and a
+ * banked register (R8--R15) access.
+ */
+#if defined(CPU_ARM2) || defined(CPU_ARM250)
+#define MODE_CHANGE_NOP mov r0, r0
+#else
+#define MODE_CHANGE_NOP /* Data sheet says ARM3 doesn't need it */
+#endif
+
+#ifdef __PROG32
+#define SWITCH_TO_FIQ_MODE \
+ mrs r2, cpsr_all ; \
+ mov r3, r2 ; \
+ bic r2, r2, #(PSR_MODE) ; \
+ orr r2, r2, #(PSR_FIQ32_MODE) ; \
+ msr cpsr_all, r2
+#else
+#define SWITCH_TO_FIQ_MODE ; \
+ mov r1, r15 ; \
+ bic r2, r1, #(R15_MODE) ; \
+ teqp r2, #(R15_MODE_FIQ) ; \
+ MODE_CHANGE_NOP
+#endif /* __PROG32 */
+
+#ifdef __PROG32
+#define BACK_TO_SVC_MODE \
+ msr cpsr_all, r3
+#else
+#define BACK_TO_SVC_MODE ; \
+ teqp r1, #0 ; \
+ MODE_CHANGE_NOP
+#endif /* __PROG32 */
+
+/*
+ * fiq_getregs:
+ *
+ * Fetch the FIQ mode banked registers into the fiqhandler
+ * structure.
+ */
+ENTRY(fiq_getregs)
+ SWITCH_TO_FIQ_MODE
+
+ stmia r0, {r8-r13}
+
+ BACK_TO_SVC_MODE
+ mov pc, lr
+
+/*
+ * fiq_setregs:
+ *
+ * Load the FIQ mode banked registers from the fiqhandler
+ * structure.
+ */
+ENTRY(fiq_setregs)
+ SWITCH_TO_FIQ_MODE
+
+ ldmia r0, {r8-r13}
+
+ BACK_TO_SVC_MODE
+ mov pc, lr
+
+/*
+ * fiq_nullhandler:
+ *
+ * Null handler copied down to the FIQ vector when the last
+ * FIQ handler is removed.
+ */
+ .global _C_LABEL(fiq_nullhandler), _C_LABEL(fiq_nullhandler_end)
+_C_LABEL(fiq_nullhandler):
+ subs pc, lr, #4
+_C_LABEL(fiq_nullhandler_end):
diff --git a/sys/arch/arm/arm/fusu.S b/sys/arch/arm/arm/fusu.S
new file mode 100644
index 00000000000..300219843d7
--- /dev/null
+++ b/sys/arch/arm/arm/fusu.S
@@ -0,0 +1,398 @@
+/* $OpenBSD: fusu.S,v 1.1 2004/02/01 05:09:48 drahn Exp $ */
+/*^I$NetBSD: exception.S,v 1.13 2003/10/31 16:30:15 scw Exp $^I*/$
+
+
+/*
+ * Copyright (c) 1996-1998 Mark Brinicombe.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by Mark Brinicombe
+ * 4. The name of the company nor the name of the author may be used to
+ * endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include "assym.h"
+#include <machine/asm.h>
+
+#ifdef MULTIPROCESSOR
+.Lcpu_info:
+ .word _C_LABEL(cpu_info)
+#else
+.Lcurpcb:
+ .word _C_LABEL(curpcb)
+#endif
+
+/*
+ * fuword(caddr_t uaddr);
+ * Fetch an int from the user's address space.
+ */
+
+ENTRY(fuword)
+#ifdef MULTIPROCESSOR
+ /* XXX Probably not appropriate for non-Hydra SMPs */
+ stmfd sp!, {r0, r14}
+ bl _C_LABEL(cpu_number)
+ ldr r2, .Lcpu_info
+ ldr r2, [r2, r0, lsl #2]
+ ldr r2, [r2, #CI_CURPCB]
+ ldmfd sp!, {r0, r14}
+#else
+ ldr r2, .Lcurpcb
+ ldr r2, [r2]
+#endif
+
+#ifdef DIAGNOSTIC
+ teq r2, #0x00000000
+ beq .Lfusupcbfault
+#endif
+
+ adr r1, .Lfusufault
+ str r1, [r2, #PCB_ONFAULT]
+
+ ldrt r3, [r0]
+
+ mov r1, #0x00000000
+ str r1, [r2, #PCB_ONFAULT]
+ mov r0, r3
+ mov pc, lr
+
+/*
+ * fusword(caddr_t uaddr);
+ * Fetch a short from the user's address space.
+ */
+
+ENTRY(fusword)
+#ifdef MULTIPROCESSOR
+ /* XXX Probably not appropriate for non-Hydra SMPs */
+ stmfd sp!, {r0, r14}
+ bl _C_LABEL(cpu_number)
+ ldr r2, .Lcpu_info
+ ldr r2, [r2, r0, lsl #2]
+ ldr r2, [r2, #CI_CURPCB]
+ ldmfd sp!, {r0, r14}
+#else
+ ldr r2, .Lcurpcb
+ ldr r2, [r2]
+#endif
+
+#ifdef DIAGNOSTIC
+ teq r2, #0x00000000
+ beq .Lfusupcbfault
+#endif
+
+ adr r1, .Lfusufault
+ str r1, [r2, #PCB_ONFAULT]
+
+ ldrbt r3, [r0], #1
+ ldrbt ip, [r0]
+#ifdef __ARMEB__
+ orr r0, ip, r3, asl #8
+#else
+ orr r0, r3, ip, asl #8
+#endif
+ mov r1, #0x00000000
+ str r1, [r2, #PCB_ONFAULT]
+ mov pc, lr
+
+/*
+ * fuswintr(caddr_t uaddr);
+ * Fetch a short from the user's address space. Can be called during an
+ * interrupt.
+ */
+
+ENTRY(fuswintr)
+ ldr r2, Lblock_userspace_access
+ ldr r2, [r2]
+ teq r2, #0
+ mvnne r0, #0x00000000
+ movne pc, lr
+
+#ifdef MULTIPROCESSOR
+ /* XXX Probably not appropriate for non-Hydra SMPs */
+ stmfd sp!, {r0, r14}
+ bl _C_LABEL(cpu_number)
+ ldr r2, .Lcpu_info
+ ldr r2, [r2, r0, lsl #2]
+ ldr r2, [r2, #CI_CURPCB]
+ ldmfd sp!, {r0, r14}
+#else
+ ldr r2, .Lcurpcb
+ ldr r2, [r2]
+#endif
+
+#ifdef DIAGNOSTIC
+ teq r2, #0x00000000
+ beq .Lfusupcbfault
+#endif
+
+ adr r1, _C_LABEL(fusubailout)
+ str r1, [r2, #PCB_ONFAULT]
+
+ ldrbt r3, [r0], #1
+ ldrbt ip, [r0]
+#ifdef __ARMEB__
+ orr r0, ip, r3, asl #8
+#else
+ orr r0, r3, ip, asl #8
+#endif
+
+ mov r1, #0x00000000
+ str r1, [r2, #PCB_ONFAULT]
+ mov pc, lr
+
+Lblock_userspace_access:
+ .word _C_LABEL(block_userspace_access)
+
+ .data
+ .align 0
+ .global _C_LABEL(block_userspace_access)
+_C_LABEL(block_userspace_access):
+ .word 0
+ .text
+
+/*
+ * fubyte(caddr_t uaddr);
+ * Fetch a byte from the user's address space.
+ */
+
+ENTRY(fubyte)
+#ifdef MULTIPROCESSOR
+ /* XXX Probably not appropriate for non-Hydra SMPs */
+ stmfd sp!, {r0, r14}
+ bl _C_LABEL(cpu_number)
+ ldr r2, .Lcpu_info
+ ldr r2, [r2, r0, lsl #2]
+ ldr r2, [r2, #CI_CURPCB]
+ ldmfd sp!, {r0, r14}
+#else
+ ldr r2, .Lcurpcb
+ ldr r2, [r2]
+#endif
+
+#ifdef DIAGNOSTIC
+ teq r2, #0x00000000
+ beq .Lfusupcbfault
+#endif
+
+ adr r1, .Lfusufault
+ str r1, [r2, #PCB_ONFAULT]
+
+ ldrbt r3, [r0]
+
+ mov r1, #0x00000000
+ str r1, [r2, #PCB_ONFAULT]
+ mov r0, r3
+ mov pc, lr
+
+/*
+ * Handle faults from [fs]u*(). Clean up and return -1.
+ */
+
+.Lfusufault:
+ mov r0, #0x00000000
+ str r0, [r2, #PCB_ONFAULT]
+ mvn r0, #0x00000000
+ mov pc, lr
+
+/*
+ * Handle faults from [fs]u*(). Clean up and return -1. This differs from
+ * fusufault() in that trap() will recognise it and return immediately rather
+ * than trying to page fault.
+ */
+
+/* label must be global as fault.c references it */
+ .global _C_LABEL(fusubailout)
+_C_LABEL(fusubailout):
+ mov r0, #0x00000000
+ str r0, [r2, #PCB_ONFAULT]
+ mvn r0, #0x00000000
+ mov pc, lr
+
+#ifdef DIAGNOSTIC
+/*
+ * Handle earlier faults from [fs]u*(), due to no pcb
+ */
+
+.Lfusupcbfault:
+ mov r1, r0
+ adr r0, fusupcbfaulttext
+ b _C_LABEL(panic)
+
+fusupcbfaulttext:
+ .asciz "Yikes - no valid PCB during fusuxxx() addr=%08x\n"
+ .align 0
+#endif
+
+/*
+ * suword(caddr_t uaddr, int x);
+ * Store an int in the user's address space.
+ */
+
+ENTRY(suword)
+#ifdef MULTIPROCESSOR
+ /* XXX Probably not appropriate for non-Hydra SMPs */
+ stmfd sp!, {r0, r1, r14}
+ bl _C_LABEL(cpu_number)
+ ldr r2, .Lcpu_info
+ ldr r2, [r2, r0, lsl #2]
+ ldr r2, [r2, #CI_CURPCB]
+ ldmfd sp!, {r0, r1, r14}
+#else
+ ldr r2, .Lcurpcb
+ ldr r2, [r2]
+#endif
+
+#ifdef DIAGNOSTIC
+ teq r2, #0x00000000
+ beq .Lfusupcbfault
+#endif
+
+ adr r3, .Lfusufault
+ str r3, [r2, #PCB_ONFAULT]
+
+ strt r1, [r0]
+
+ mov r0, #0x00000000
+ str r0, [r2, #PCB_ONFAULT]
+ mov pc, lr
+
+/*
+ * suswintr(caddr_t uaddr, short x);
+ * Store a short in the user's address space. Can be called during an
+ * interrupt.
+ */
+
+ENTRY(suswintr)
+ ldr r2, Lblock_userspace_access
+ ldr r2, [r2]
+ teq r2, #0
+ mvnne r0, #0x00000000
+ movne pc, lr
+
+#ifdef MULTIPROCESSOR
+ stmfd sp!, {r0, r1, r14}
+ bl _C_LABEL(cpu_number)
+ ldr r2, .Lcpu_info
+ ldr r2, [r2, r0, lsl #2]
+ ldr r2, [r2, #CI_CURPCB]
+ ldmfd sp!, {r0, r1, r14}
+#else
+ ldr r2, .Lcurpcb
+ ldr r2, [r2]
+#endif
+
+#ifdef DIAGNOSTIC
+ teq r2, #0x00000000
+ beq .Lfusupcbfault
+#endif
+
+ adr r3, _C_LABEL(fusubailout)
+ str r3, [r2, #PCB_ONFAULT]
+
+#ifdef __ARMEB__
+ mov ip, r1, lsr #8
+ strbt ip, [r0], #1
+#else
+ strbt r1, [r0], #1
+ mov r1, r1, lsr #8
+#endif
+ strbt r1, [r0]
+
+ mov r0, #0x00000000
+ str r0, [r2, #PCB_ONFAULT]
+ mov pc, lr
+
+/*
+ * susword(caddr_t uaddr, short x);
+ * Store a short in the user's address space.
+ */
+
+ENTRY(susword)
+#ifdef MULTIPROCESSOR
+ stmfd sp!, {r0, r1, r14}
+ bl _C_LABEL(cpu_number)
+ ldr r2, .Lcpu_info
+ ldr r2, [r2, r0, lsl #2]
+ ldr r2, [r2, #CI_CURPCB]
+ ldmfd sp!, {r0, r1, r14}
+#else
+ ldr r2, .Lcurpcb
+ ldr r2, [r2]
+#endif
+
+#ifdef DIAGNOSTIC
+ teq r2, #0x00000000
+ beq .Lfusupcbfault
+#endif
+
+ adr r3, .Lfusufault
+ str r3, [r2, #PCB_ONFAULT]
+
+#ifdef __ARMEB__
+ mov ip, r1, lsr #8
+ strbt ip, [r0], #1
+#else
+ strbt r1, [r0], #1
+ mov r1, r1, lsr #8
+#endif
+ strbt r1, [r0]
+
+ mov r0, #0x00000000
+ str r0, [r2, #PCB_ONFAULT]
+ mov pc, lr
+
+/*
+ * subyte(caddr_t uaddr, char x);
+ * Store a byte in the user's address space.
+ */
+
+ENTRY(subyte)
+#ifdef MULTIPROCESSOR
+ stmfd sp!, {r0, r1, r14}
+ bl _C_LABEL(cpu_number)
+ ldr r2, .Lcpu_info
+ ldr r2, [r2, r0, lsl #2]
+ ldr r2, [r2, #CI_CURPCB]
+ ldmfd sp!, {r0, r1, r14}
+#else
+ ldr r2, .Lcurpcb
+ ldr r2, [r2]
+#endif
+
+
+#ifdef DIAGNOSTIC
+ teq r2, #0x00000000
+ beq .Lfusupcbfault
+#endif
+
+ adr r3, .Lfusufault
+ str r3, [r2, #PCB_ONFAULT]
+
+ strbt r1, [r0]
+
+ mov r0, #0x00000000
+ str r0, [r2, #PCB_ONFAULT]
+ mov pc, lr
diff --git a/sys/arch/arm/arm/genassym.cf b/sys/arch/arm/arm/genassym.cf
new file mode 100644
index 00000000000..0a4dff53536
--- /dev/null
+++ b/sys/arch/arm/arm/genassym.cf
@@ -0,0 +1,168 @@
+# $OpenBSD: genassym.cf,v 1.1 2004/02/01 05:09:48 drahn Exp $
+# $NetBSD: genassym.cf,v 1.27 2003/11/04 10:33:16 dsl Exp$
+
+# Copyright (c) 1982, 1990 The Regents of the University of California.
+# All rights reserved.
+#
+# This code is derived from software contributed to Berkeley by
+# William Jolitz.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+# 1. Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# 2. Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in the
+# documentation and/or other materials provided with the distribution.
+# 3. All advertising materials mentioning features or use of this software
+# must display the following acknowledgement:
+# This product includes software developed by the University of
+# California, Berkeley and its contributors.
+# 4. Neither the name of the University nor the names of its contributors
+# may be used to endorse or promote products derived from this software
+# without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+# SUCH DAMAGE.
+
+include <sys/param.h>
+include <sys/proc.h>
+include <sys/systm.h>
+include <sys/mbuf.h>
+include <sys/resourcevar.h>
+include <sys/device.h>
+include <sys/user.h>
+include <sys/signal.h>
+include <sys/mbuf.h>
+include <sys/socketvar.h>
+include <netinet/in.h>
+include <netinet/in_systm.h>
+include <netinet/ip.h>
+include <netinet/ip6.h>
+include <netinet/ip_var.h>
+
+include <machine/cpu.h>
+include <uvm/uvm_extern.h>
+
+include <arm/fiq.h>
+
+include <machine/pmap.h>
+include <machine/frame.h>
+include <machine/vmparam.h>
+
+define __PROG32 1
+ifdef __ARM_FIQ_INDIRECT
+define __ARM_FIQ_INDIRECT 1
+endif
+
+define VM_MIN_ADDRESS VM_MIN_ADDRESS
+define VM_MAXUSER_ADDRESS VM_MAXUSER_ADDRESS
+
+define DOMAIN_CLIENT DOMAIN_CLIENT
+define PMAP_DOMAIN_KERNEL PMAP_DOMAIN_KERNEL
+
+ifdef PMAP_INCLUDE_PTE_SYNC
+define PMAP_INCLUDE_PTE_SYNC 1
+endif
+
+define PAGE_SIZE PAGE_SIZE
+define UPAGES UPAGES
+define PAGE_SHIFT PAGE_SHIFT
+
+# Important offsets into the lwp and proc structs & associated constants
+define P_TRACED P_TRACED
+define P_PROFIL P_PROFIL
+
+define P_FORW offsetof(struct proc, p_forw)
+define P_BACK offsetof(struct proc, p_back)
+define P_ADDR offsetof(struct proc, p_addr)
+define P_PRIORITY offsetof(struct proc, p_priority)
+define P_WCHAN offsetof(struct proc, p_wchan)
+define P_STAT offsetof(struct proc, p_stat)
+
+define PCB_TF offsetof(struct pcb, pcb_tf)
+define PCB_PAGEDIR offsetof(struct pcb, pcb_pagedir)
+define PCB_PL1VEC offsetof(struct pcb, pcb_pl1vec)
+define PCB_L1VEC offsetof(struct pcb, pcb_l1vec)
+define PCB_DACR offsetof(struct pcb, pcb_dacr)
+define PCB_CSTATE offsetof(struct pcb, pcb_cstate)
+define PCB_FLAGS offsetof(struct pcb, pcb_flags)
+define PCB_R8 offsetof(struct pcb, pcb_un.un_32.pcb32_r8)
+define PCB_R9 offsetof(struct pcb, pcb_un.un_32.pcb32_r9)
+define PCB_R10 offsetof(struct pcb, pcb_un.un_32.pcb32_r10)
+define PCB_R11 offsetof(struct pcb, pcb_un.un_32.pcb32_r11)
+define PCB_R12 offsetof(struct pcb, pcb_un.un_32.pcb32_r12)
+define PCB_SP offsetof(struct pcb, pcb_un.un_32.pcb32_sp)
+define PCB_LR offsetof(struct pcb, pcb_un.un_32.pcb32_lr)
+define PCB_PC offsetof(struct pcb, pcb_un.un_32.pcb32_pc)
+define PCB_UND_SP offsetof(struct pcb, pcb_un.un_32.pcb32_und_sp)
+define PCB_ONFAULT offsetof(struct pcb, pcb_onfault)
+define PCB_NOALIGNFLT PCB_NOALIGNFLT
+
+define USER_SIZE sizeof(struct user)
+
+define V_TRAP offsetof(struct uvmexp, traps)
+define V_INTR offsetof(struct uvmexp, intrs)
+define V_SOFT offsetof(struct uvmexp, softs)
+
+define VM_MAP offsetof(struct vmspace, vm_map)
+define VM_PMAP offsetof(struct vmspace, vm_map.pmap)
+
+define CS_TLB_ID offsetof(union pmap_cache_state, cs_tlb_id)
+define CS_TLB_D offsetof(union pmap_cache_state, cs_tlb_d)
+define CS_TLB offsetof(union pmap_cache_state, cs_tlb)
+define CS_CACHE_ID offsetof(union pmap_cache_state, cs_cache_id)
+define CS_CACHE_D offsetof(union pmap_cache_state, cs_cache_d)
+define CS_CACHE offsetof(union pmap_cache_state, cs_cache)
+define CS_ALL offsetof(union pmap_cache_state, cs_all)
+define PMAP_CSTATE offsetof(struct pmap, pm_cstate)
+
+define PR_BASE offsetof(struct uprof, pr_base)
+define PR_SIZE offsetof(struct uprof, pr_size)
+define PR_OFF offsetof(struct uprof, pr_off)
+define PR_SCALE offsetof(struct uprof, pr_scale)
+
+define SIGTRAP SIGTRAP
+define SIGEMT SIGEMT
+
+define SIGF_SC offsetof(struct sigframe, sf_sc)
+
+define TF_R0 offsetof(struct trapframe, tf_r0)
+define TF_R10 offsetof(struct trapframe, tf_r10)
+define TF_PC offsetof(struct trapframe, tf_pc)
+
+define PROCSIZE sizeof(struct proc)
+define TRAPFRAMESIZE sizeof(struct trapframe)
+
+define CF_IDCACHE_WBINV_ALL offsetof(struct cpu_functions, cf_idcache_wbinv_all)
+define CF_DCACHE_WB_RANGE offsetof(struct cpu_functions, cf_dcache_wb_range)
+define CF_TLB_FLUSHID_SE offsetof(struct cpu_functions, cf_tlb_flushID_SE)
+define CF_CONTEXT_SWITCH offsetof(struct cpu_functions, cf_context_switch)
+define CF_SLEEP offsetof(struct cpu_functions, cf_sleep)
+define CF_CONTROL offsetof(struct cpu_functions, cf_control)
+
+#define CI_CURPRIORITY offsetof(struct cpu_info, ci_schedstate.spc_curpriority)
+ifdef MULTIPROCESSOR
+define CI_CURLWP offsetof(struct cpu_info, ci_curlwp)
+define CI_CURPCB offsetof(struct cpu_info, ci_curpcb)
+endif
+if defined(COMPAT_15) && defined(EXEC_AOUT)
+define CI_CTRL offsetof(struct cpu_info, ci_ctrl)
+endif
+
+# Constants required for in_cksum() and friends.
+define M_LEN offsetof(struct mbuf, m_len)
+define M_DATA offsetof(struct mbuf, m_data)
+define M_NEXT offsetof(struct mbuf, m_next)
+define IP_SRC offsetof(struct ip, ip_src)
+define IP_DST offsetof(struct ip, ip_dst)
diff --git a/sys/arch/arm/arm/in_cksum_arm.S b/sys/arch/arm/arm/in_cksum_arm.S
new file mode 100644
index 00000000000..cde73dc4d36
--- /dev/null
+++ b/sys/arch/arm/arm/in_cksum_arm.S
@@ -0,0 +1,471 @@
+/* $OpenBSD: in_cksum_arm.S,v 1.1 2004/02/01 05:09:48 drahn Exp $ */
+/* $NetBSD: in_cksum_arm.S,v 1.3 2003/11/26 10:31:53 rearnsha Exp $ */
+
+/*
+ * Copyright 2003 Wasabi Systems, Inc.
+ * All rights reserved.
+ *
+ * Written by Steve C. Woodford for Wasabi Systems, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed for the NetBSD Project by
+ * Wasabi Systems, Inc.
+ * 4. The name of Wasabi Systems, Inc. may not be used to endorse
+ * or promote products derived from this software without specific prior
+ * written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * Hand-optimised in_cksum() and in4_cksum() implementations for ARM/Xscale
+ */
+
+#include <machine/asm.h>
+#include "assym.h"
+
+
+/*
+ * int in_cksum(struct mbuf *m, int len)
+ *
+ * Entry:
+ * r0 m
+ * r1 len
+ *
+ * NOTE: Assumes 'm' is *never* NULL.
+ */
+/* LINTSTUB: Func: int in_cksum(struct mbuf *, int) */
+ENTRY(in_cksum)
+ stmfd sp!, {r4-r11,lr}
+ mov r8, #0x00
+ mov r9, r1
+ mov r10, #0x00
+ mov ip, r0
+
+.Lin_cksum_loop:
+ ldr r1, [ip, #(M_LEN)]
+ ldr r0, [ip, #(M_DATA)]
+ ldr ip, [ip, #(M_NEXT)]
+.Lin_cksum_entry4:
+ cmp r9, r1
+ movlt r1, r9
+ sub r9, r9, r1
+ eor r11, r10, r0
+ add r10, r10, r1
+ adds r2, r1, #0x00
+ blne _ASM_LABEL(L_cksumdata)
+ tst r11, #0x01
+ movne r2, r2, ror #8
+ adds r8, r8, r2
+ adc r8, r8, #0x00
+ cmp ip, #0x00
+ bne .Lin_cksum_loop
+
+ mov r1, #0xff
+ orr r1, r1, #0xff00
+ and r0, r8, r1
+ add r0, r0, r8, lsr #16
+ add r0, r0, r0, lsr #16
+ and r0, r0, r1
+ eor r0, r0, r1
+ ldmfd sp!, {r4-r11,pc}
+
+
+#if 0
+ /* ALSO IN in4_cksum.c */
+#ifdef INET
+/*
+ * int in4_cksum(struct mbuf *m, u_int8_t nxt, int off, int len)
+ *
+ * Entry:
+ * r0 m
+ * r1 nxt
+ * r2 off
+ * r3 len
+ */
+/* LINTSTUB: Func: int in4_cksum(struct mbuf *, u_int8_t, int, int) */
+ENTRY(in4_cksum)
+ stmfd sp!, {r4-r11,lr}
+ mov r8, #0x00 /* Accumulate sum in r8 */
+
+ /*
+ * First, deal with a pseudo header, if present
+ */
+ ldr r6, [r0, #(M_DATA)]
+ cmp r1, #0x00
+ beq .Lin4_cksum_skip_entry
+
+#ifdef __XSCALE__
+ pld [r6, #(IP_SRC)]
+#endif
+ add r4, r6, #(IP_SRC)
+ ands r4, r4, #0x03
+ add r8, r1, r3 /* sum = nxt + len */
+ addne pc, pc, r4, lsl #5 /* Handle alignment of pseudo header */
+
+ /* 0x00: Data 32-bit aligned */
+ ldr r5, [r6, #(IP_SRC)]
+ ldr r4, [r6, #(IP_DST)]
+ b .Lin4_cksum_add_ips
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+
+ /* 0x01: Data 8-bit aligned */
+ ldr r4, [r6, #(IP_SRC - 1)] /* BE:r4 = x012 LE:r4 = 210x */
+ ldr r5, [r6, #(IP_SRC + 3)] /* BE:r5 = 3456 LE:r5 = 6543 */
+ ldrb r7, [r6, #(IP_SRC + 7)] /* r7 = ...7 */
+#ifdef __ARMEB__
+ mov r4, r4, lsl #8 /* r4 = 012. */
+ orr r4, r4, r5, lsr #24 /* r4 = 0123 */
+ orr r5, r7, r5, lsl #8 /* r5 = 4567 */
+ b .Lin4_cksum_add_ips
+ nop
+#else
+ mov r4, r4, lsr #8 /* r4 = .210 */
+ orr r4, r4, r5, lsl #24 /* r4 = 3210 */
+ mov r5, r5, lsr #8 /* r5 = .654 */
+ orr r5, r5, r7, lsl #24 /* r5 = 7654 */
+ b .Lin4_cksum_add_ips
+#endif
+
+ /* 0x02: Data 16-bit aligned */
+#ifdef __XSCALE__
+ ldrh r5, [r6, #(IP_SRC)] /* BE:r5 = ..01 LE:r5 = ..10 */
+ ldrh r7, [r6, #(IP_DST + 2)] /* BE:r7 = ..67 LE:r7 = ..76 */
+ ldr r4, [r6, #(IP_SRC + 2)] /* BE:r4 = 2345 LE:r4 = 5432 */
+ orr r5, r7, r5, lsl #16 /* BE:r5 = 0167 LE:r5 = 1076 */
+ b .Lin4_cksum_add_ips
+ nop
+ nop
+ nop
+#else
+ ldr r4, [r6, #(IP_SRC - 2)] /* r4 = 10xx */
+ ldr r7, [r6, #(IP_DST - 2)] /* r7 = xx76 */
+ ldr r5, [r6, #(IP_SRC + 2)] /* r5 = 5432 */
+ mov r4, r4, lsr #16 /* r4 = ..10 */
+ orr r4, r4, r7, lsl #16 /* r4 = 7610 */
+ b .Lin4_cksum_add_ips
+ nop
+ nop
+#endif
+
+ /* 0x03: Data 8-bit aligned */
+ ldrb r4, [r6, #(IP_SRC)] /* r4 = ...0 */
+ ldr r5, [r6, #(IP_SRC + 1)] /* BE:r5 = 1234 LE:r5 = 4321 */
+ ldr r7, [r6, #(IP_SRC + 5)] /* BE:r7 = 567x LE:r7 = x765 */
+#ifdef __ARMEB__
+ mov r4, r4, lsl #24 /* r4 = 0... */
+ orr r4, r4, r5, lsr #8 /* r4 = 0123 */
+ mov r5, r5, lsl #24 /* r5 = 4... */
+ orr r5, r5, r7, lsr #8 /* r5 = 4567 */
+#else
+ orr r4, r4, r5, lsl #8 /* r4 = 3210 */
+ mov r5, r5, lsr #24 /* r4 = ...4 */
+ orr r5, r5, r7, lsl #8 /* r5 = 7654 */
+#endif
+ /* FALLTHROUGH */
+
+.Lin4_cksum_add_ips:
+ adds r5, r5, r4
+#ifndef __ARMEB__
+ adcs r8, r5, r8, lsl #8
+#else
+ adcs r8, r5, r8
+#endif
+ adc r8, r8, #0x00
+ mov r1, #0x00
+ b .Lin4_cksum_skip_entry
+
+.Lin4_cksum_skip_loop:
+ ldr r1, [r0, #(M_LEN)]
+ ldr r6, [r0, #(M_DATA)]
+ ldr r0, [r0, #(M_NEXT)]
+.Lin4_cksum_skip_entry:
+ subs r2, r2, r1
+ blt .Lin4_cksum_skip_done
+ cmp r0, #0x00
+ bne .Lin4_cksum_skip_loop
+ b .Lin4_cksum_whoops
+
+.Lin4_cksum_skip_done:
+ mov ip, r0
+ add r0, r2, r6
+ add r0, r0, r1
+ rsb r1, r2, #0x00
+ mov r9, r3
+ mov r10, #0x00
+ b .Lin_cksum_entry4
+
+.Lin4_cksum_whoops:
+ adr r0, .Lin4_cksum_whoops_str
+ bl _C_LABEL(panic)
+.Lin4_cksum_whoops_str:
+ .asciz "in4_cksum: out of mbufs\n"
+ .align 5
+#endif /* INET */
+#endif
+
+
+/*
+ * The main in*_cksum() workhorse...
+ *
+ * Entry parameters:
+ * r0 Pointer to buffer
+ * r1 Buffer length
+ * lr Return address
+ *
+ * Returns:
+ * r2 Accumulated 32-bit sum
+ *
+ * Clobbers:
+ * r0-r7
+ */
+/* LINTSTUB: Ignore */
+ASENTRY_NP(L_cksumdata)
+#ifdef __XSCALE__
+ pld [r0] /* Pre-fetch the start of the buffer */
+#endif
+ mov r2, #0
+
+ /* We first have to word-align the buffer. */
+ ands r7, r0, #0x03
+ beq .Lcksumdata_wordaligned
+ rsb r7, r7, #0x04
+ cmp r1, r7 /* Enough bytes left to make it? */
+ blt .Lcksumdata_endgame
+ cmp r7, #0x02
+ ldrb r4, [r0], #0x01 /* Fetch 1st byte */
+ ldrgeb r5, [r0], #0x01 /* Fetch 2nd byte */
+ movlt r5, #0x00
+ ldrgtb r6, [r0], #0x01 /* Fetch 3rd byte */
+ movle r6, #0x00
+ /* Combine the three bytes depending on endianness and alignment */
+#ifdef __ARMEB__
+ orreq r2, r5, r4, lsl #8
+ orreq r2, r2, r6, lsl #24
+ orrne r2, r4, r5, lsl #8
+ orrne r2, r2, r6, lsl #16
+#else
+ orreq r2, r4, r5, lsl #8
+ orreq r2, r2, r6, lsl #16
+ orrne r2, r5, r4, lsl #8
+ orrne r2, r2, r6, lsl #24
+#endif
+ subs r1, r1, r7 /* Update length */
+ moveq pc, lr /* All done? */
+
+ /* Buffer is now word aligned */
+.Lcksumdata_wordaligned:
+#ifdef __XSCALE__
+ cmp r1, #0x04 /* Less than 4 bytes left? */
+ blt .Lcksumdata_endgame /* Yup */
+
+ /* Now quad-align, if necessary */
+ ands r7, r0, #0x04
+ ldrne r7, [r0], #0x04
+ subne r1, r1, #0x04
+ subs r1, r1, #0x40
+ blt .Lcksumdata_bigloop_end /* Note: C flag clear if branch taken */
+
+ /*
+ * Buffer is now quad aligned. Sum 64 bytes at a time.
+ * Note: First ldrd is hoisted above the loop, together with
+ * setting r6 to zero to avoid stalling for results in the
+ * loop. (r7 is live, from above).
+ */
+ ldrd r4, [r0], #0x08
+ mov r6, #0x00
+.Lcksumdata_bigloop:
+ pld [r0, #0x18]
+ adds r2, r2, r6
+ adcs r2, r2, r7
+ ldrd r6, [r0], #0x08
+ adcs r2, r2, r4
+ adcs r2, r2, r5
+ ldrd r4, [r0], #0x08
+ adcs r2, r2, r6
+ adcs r2, r2, r7
+ ldrd r6, [r0], #0x08
+ adcs r2, r2, r4
+ adcs r2, r2, r5
+ ldrd r4, [r0], #0x08
+ adcs r2, r2, r6
+ adcs r2, r2, r7
+ pld [r0, #0x18]
+ ldrd r6, [r0], #0x08
+ adcs r2, r2, r4
+ adcs r2, r2, r5
+ ldrd r4, [r0], #0x08
+ adcs r2, r2, r6
+ adcs r2, r2, r7
+ ldrd r6, [r0], #0x08
+ adcs r2, r2, r4
+ adcs r2, r2, r5
+ adc r2, r2, #0x00
+ subs r1, r1, #0x40
+ ldrged r4, [r0], #0x08
+ bge .Lcksumdata_bigloop
+
+ adds r2, r2, r6 /* r6/r7 still need summing */
+.Lcksumdata_bigloop_end:
+ adcs r2, r2, r7
+ adc r2, r2, #0x00
+
+#else /* !__XSCALE__ */
+
+ subs r1, r1, #0x40
+ blt .Lcksumdata_bigloop_end
+
+.Lcksumdata_bigloop:
+ ldmia r0!, {r3, r4, r5, r6}
+ adds r2, r2, r3
+ adcs r2, r2, r4
+ adcs r2, r2, r5
+ ldmia r0!, {r3, r4, r5, r7}
+ adcs r2, r2, r6
+ adcs r2, r2, r3
+ adcs r2, r2, r4
+ adcs r2, r2, r5
+ ldmia r0!, {r3, r4, r5, r6}
+ adcs r2, r2, r7
+ adcs r2, r2, r3
+ adcs r2, r2, r4
+ adcs r2, r2, r5
+ ldmia r0!, {r3, r4, r5, r7}
+ adcs r2, r2, r6
+ adcs r2, r2, r3
+ adcs r2, r2, r4
+ adcs r2, r2, r5
+ adcs r2, r2, r7
+ adc r2, r2, #0x00
+ subs r1, r1, #0x40
+ bge .Lcksumdata_bigloop
+.Lcksumdata_bigloop_end:
+#endif
+
+ adds r1, r1, #0x40
+ moveq pc, lr
+ cmp r1, #0x20
+
+#ifdef __XSCALE__
+ ldrged r4, [r0], #0x08 /* Avoid stalling pld and result */
+ blt .Lcksumdata_less_than_32
+ pld [r0, #0x18]
+ ldrd r6, [r0], #0x08
+ adds r2, r2, r4
+ adcs r2, r2, r5
+ ldrd r4, [r0], #0x08
+ adcs r2, r2, r6
+ adcs r2, r2, r7
+ ldrd r6, [r0], #0x08
+ adcs r2, r2, r4
+ adcs r2, r2, r5
+ adcs r2, r2, r6 /* XXX: Unavoidable result stall */
+ adcs r2, r2, r7
+#else
+ blt .Lcksumdata_less_than_32
+ ldmia r0!, {r3, r4, r5, r6}
+ adds r2, r2, r3
+ adcs r2, r2, r4
+ adcs r2, r2, r5
+ ldmia r0!, {r3, r4, r5, r7}
+ adcs r2, r2, r6
+ adcs r2, r2, r3
+ adcs r2, r2, r4
+ adcs r2, r2, r5
+ adcs r2, r2, r7
+#endif
+ adc r2, r2, #0x00
+ subs r1, r1, #0x20
+ moveq pc, lr
+
+.Lcksumdata_less_than_32:
+ /* There are less than 32 bytes left */
+ and r3, r1, #0x18
+ rsb r4, r3, #0x18
+ sub r1, r1, r3
+ adds r4, r4, r4, lsr #1 /* Side effect: Clear carry flag */
+ addne pc, pc, r4
+
+/*
+ * Note: We use ldm here, even on Xscale, since the combined issue/result
+ * latencies for ldm and ldrd are the same. Using ldm avoids needless #ifdefs.
+ */
+ /* At least 24 bytes remaining... */
+ ldmia r0!, {r4, r5}
+ nop
+ adcs r2, r2, r4
+ adcs r2, r2, r5
+
+ /* At least 16 bytes remaining... */
+ ldmia r0!, {r4, r5}
+ adcs r2, r2, r4
+ adcs r2, r2, r5
+
+ /* At least 8 bytes remaining... */
+ ldmia r0!, {r4, r5}
+ adcs r2, r2, r4
+ adcs r2, r2, r5
+
+ /* Less than 8 bytes remaining... */
+ adc r2, r2, #0x00
+ subs r1, r1, #0x04
+ blt .Lcksumdata_lessthan4
+
+ ldr r4, [r0], #0x04
+ sub r1, r1, #0x04
+ adds r2, r2, r4
+ adc r2, r2, #0x00
+
+ /* Deal with < 4 bytes remaining */
+.Lcksumdata_lessthan4:
+ adds r1, r1, #0x04
+ moveq pc, lr
+
+ /* Deal with 1 to 3 remaining bytes, possibly misaligned */
+.Lcksumdata_endgame:
+ ldrb r3, [r0] /* Fetch first byte */
+ cmp r1, #0x02
+ ldrgeb r4, [r0, #0x01] /* Fetch 2nd and 3rd as necessary */
+ movlt r4, #0x00
+ ldrgtb r5, [r0, #0x02]
+ movle r5, #0x00
+ /* Combine the three bytes depending on endianness and alignment */
+ tst r0, #0x01
+#ifdef __ARMEB__
+ orreq r3, r4, r3, lsl #8
+ orreq r3, r3, r5, lsl #24
+ orrne r3, r3, r4, lsl #8
+ orrne r3, r3, r5, lsl #16
+#else
+ orreq r3, r3, r4, lsl #8
+ orreq r3, r3, r5, lsl #16
+ orrne r3, r4, r3, lsl #8
+ orrne r3, r3, r5, lsl #24
+#endif
+ adds r2, r2, r3
+ adc r2, r2, #0x00
+ mov pc, lr
diff --git a/sys/arch/arm/arm/irq_dispatch.S b/sys/arch/arm/arm/irq_dispatch.S
new file mode 100644
index 00000000000..72c0dcb4879
--- /dev/null
+++ b/sys/arch/arm/arm/irq_dispatch.S
@@ -0,0 +1,155 @@
+/* $OpenBSD: irq_dispatch.S,v 1.1 2004/02/01 05:09:48 drahn Exp $ */
+/*^I$NetBSD: irq_dispatch.S,v 1.5 2003/10/30 08:57:24 scw Exp $^I*/$
+
+/*
+ * Copyright (c) 2002 Fujitsu Component Limited
+ * Copyright (c) 2002 Genetec Corporation
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of The Fujitsu Component Limited nor the name of
+ * Genetec corporation may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY FUJITSU COMPONENT LIMITED AND GENETEC
+ * CORPORATION ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES,
+ * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL FUJITSU COMPONENT LIMITED OR GENETEC
+ * CORPORATION BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
+ * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
+ * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+/*
+ * Copyright (c) 2002, 2003 Wasabi Systems, Inc.
+ * All rights reserved.
+ *
+ * Written by Jason R. Thorpe for Wasabi Systems, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed for the NetBSD Project by
+ * Wasabi Systems, Inc.
+ * 4. The name of Wasabi Systems, Inc. may not be used to endorse
+ * or promote products derived from this software without specific prior
+ * written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "assym.h"
+
+#include <machine/asm.h>
+#include <machine/cpu.h>
+#include <machine/frame.h>
+
+#ifdef FOOTBRIDGE_INTR
+#include <arm/footbridge/footbridge_intr.h>
+#else
+#error ARM_INTR_IMPL not defined
+#endif
+
+#ifndef ARM_IRQ_HANDLER
+#error ARM_IRQ_HANDLER not defined
+#endif
+
+/*
+ * irq_entry:
+ * Main entry point for the IRQ vector. This is a generic version
+ * which can be used by different platforms.
+ */
+ .text
+ .align 0
+.Lcurrent_intr_depth:
+ .word _C_LABEL(current_intr_depth)
+
+AST_ALIGNMENT_FAULT_LOCALS
+
+ASENTRY_NP(irq_entry)
+ sub lr, lr, #0x00000004 /* Adjust the lr */
+
+ PUSHFRAMEINSVC /* Push an interrupt frame */
+ ENABLE_ALIGNMENT_FAULTS
+
+ /*
+ * Increment the interrupt nesting depth and call the interrupt
+ * dispatch routine. We've pushed a frame, so we can safely use
+ * callee-saved regs here. We use the following registers, which
+ * we expect to presist:
+ *
+ * r5 address of `current_intr_depth' variable
+ * r6 old value of `current_intr_depth'
+ */
+ ldr r5, .Lcurrent_intr_depth
+ mov r0, sp /* arg for dispatcher */
+ ldr r6, [r5]
+ add r1, r6, #1
+ str r1, [r5]
+
+ bl ARM_IRQ_HANDLER
+
+ /*
+ * Restore the old interrupt depth value (which should be the
+ * same as decrementing it at this point).
+ */
+ str r6, [r5]
+
+ DO_AST_AND_RESTORE_ALIGNMENT_FAULTS
+ PULLFRAMEFROMSVCANDEXIT
+ movs pc, lr /* Exit */
+
+ .bss
+ .align 0
+
+ .global _C_LABEL(astpending)
+_C_LABEL(astpending):
+ .word 0
+
+ .global _C_LABEL(current_intr_depth)
+_C_LABEL(current_intr_depth):
+ .word 0
+
+ /*
+ * XXX Provide intrnames/intrcnt for legacy code, but
+ * don't actually use them.
+ */
+
+ .global _C_LABEL(intrnames), _C_LABEL(eintrnames)
+ .global _C_LABEL(intrcnt), _C_LABEL(eintrcnt)
+_C_LABEL(intrnames):
+_C_LABEL(eintrnames):
+
+ .global _C_LABEL(intrcnt), _C_LABEL(sintrcnt), _C_LABEL(eintrcnt)
+_C_LABEL(intrcnt):
+_C_LABEL(eintrcnt):
diff --git a/sys/arch/arm/arm/locore.S b/sys/arch/arm/arm/locore.S
new file mode 100644
index 00000000000..7e7cb356301
--- /dev/null
+++ b/sys/arch/arm/arm/locore.S
@@ -0,0 +1,215 @@
+/* $OpenBSD: locore.S,v 1.1 2004/02/01 05:09:48 drahn Exp $ */
+/*^I$NetBSD: locore.S,v 1.14 2003/04/20 16:21:40 thorpej Exp $^I*/$
+
+/*
+ * Copyright (C) 1994-1997 Mark Brinicombe
+ * Copyright (C) 1994 Brini
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by Brini.
+ * 4. The name of Brini may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY BRINI ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL BRINI BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "assym.h"
+#include <sys/syscall.h>
+#include <sys/errno.h>
+#include <machine/asm.h>
+#include <machine/cpu.h>
+#include <machine/frame.h>
+
+/* What size should this really be ? It is only used by init_arm() */
+#define INIT_ARM_STACK_SIZE 2048
+
+/*
+ * This is for kvm_mkdb, and should be the address of the beginning
+ * of the kernel text segment (not necessarily the same as kernbase).
+ */
+
+ .text
+ .align 0
+
+ENTRY_NP(kernel_text)
+
+ASENTRY_NP(start)
+ adr r1, .Lstart
+ ldmia r1, {r1, r2, sp} /* Set initial stack and */
+ sub r2, r2, r1 /* get zero init data */
+ mov r3, #0
+
+.L1:
+ str r3, [r1], #0x0004 /* Zero the bss */
+ subs r2, r2, #4
+ bgt .L1
+
+ mov fp, #0x00000000 /* trace back starts here */
+ bl _C_LABEL(initarm) /* Off we go */
+
+ /* init arm will return the new stack pointer. */
+ mov sp, r0
+
+ mov fp, #0x00000000 /* trace back starts here */
+ mov ip, sp
+ stmfd sp!, {fp, ip, lr, pc}
+ sub fp, ip, #4
+
+ bl _C_LABEL(main) /* call main()! */
+
+ adr r0, .Lmainreturned
+ b _C_LABEL(panic)
+ /* NOTEACHED */
+
+.Lstart:
+ .word _edata
+ .word _end
+ .word svcstk + INIT_ARM_STACK_SIZE
+
+.Lmainreturned:
+ .asciz "main() returned"
+ .align 0
+
+ .bss
+svcstk:
+ .space INIT_ARM_STACK_SIZE
+
+ .text
+ .align 0
+
+#ifndef OFW
+ /* OFW based systems will used OF_boot() */
+
+.Lcpufuncs:
+ .word _C_LABEL(cpufuncs)
+
+ENTRY_NP(cpu_reset)
+ mrs r2, cpsr
+ bic r2, r2, #(PSR_MODE)
+ orr r2, r2, #(PSR_SVC32_MODE)
+ orr r2, r2, #(I32_bit | F32_bit)
+ msr cpsr_all, r2
+
+ ldr r4, .Lcpu_reset_address
+ ldr r4, [r4]
+
+ ldr r0, .Lcpufuncs
+ mov lr, pc
+ ldr pc, [r0, #CF_IDCACHE_WBINV_ALL]
+
+ /*
+ * Load the cpu_reset_needs_v4_MMU_disable flag to determine if it's
+ * necessary.
+ */
+
+ ldr r1, .Lcpu_reset_needs_v4_MMU_disable
+ ldr r1, [r1]
+ cmp r1, #0
+ mov r2, #0
+
+ /*
+ * MMU & IDC off, 32 bit program & data space
+ * Hurl ourselves into the ROM
+ */
+ mov r0, #(CPU_CONTROL_32BP_ENABLE | CPU_CONTROL_32BD_ENABLE)
+ mcr 15, 0, r0, c1, c0, 0
+ mcrne 15, 0, r2, c8, c7, 0 /* nail I+D TLB on ARMv4 and greater */
+ mov pc, r4
+
+ /*
+ * _cpu_reset_address contains the address to branch to, to complete
+ * the cpu reset after turning the MMU off
+ * This variable is provided by the hardware specific code
+ */
+.Lcpu_reset_address:
+ .word _C_LABEL(cpu_reset_address)
+
+ /*
+ * cpu_reset_needs_v4_MMU_disable contains a flag that signals if the
+ * v4 MMU disable instruction needs executing... it is an illegal instruction
+ * on f.e. ARM6/7 that locks up the computer in an endless illegal
+ * instruction / data-abort / reset loop.
+ */
+.Lcpu_reset_needs_v4_MMU_disable:
+ .word _C_LABEL(cpu_reset_needs_v4_MMU_disable)
+
+#endif /* OFW */
+
+#ifdef IPKDB
+/*
+ * Execute(inst, psr, args, sp)
+ *
+ * Execute INSTruction with PSR and ARGS[0] - ARGS[3] making
+ * available stack at SP for next undefined instruction trap.
+ *
+ * Move the instruction onto the stack and jump to it.
+ */
+ENTRY_NP(Execute)
+ mov ip, sp
+ stmfd sp!, {r2, r4-r7, fp, ip, lr, pc}
+ sub fp, ip, #4
+ mov ip, r3
+ ldr r7, .Lreturn
+ stmfd sp!, {r0, r7}
+ adr r7, #.LExec
+ mov r5, r1
+ mrs r4, cpsr
+ ldmia r2, {r0-r3}
+ mov r6, sp
+ mov sp, ip
+ msr cpsr_all, r5
+ mov pc, r6
+.LExec:
+ mrs r5, cpsr
+/* XXX Cannot switch thus easily back from user mode */
+ msr cpsr_all, r4
+ add sp, r6, #8
+ ldmfd sp!, {r6}
+ stmia r6, {r0-r3}
+ mov r0, r5
+ ldmdb fp, {r4-r7, fp, sp, pc}
+.Lreturn:
+ mov pc, r7
+#endif
+
+/*
+ * setjump + longjmp
+ */
+ENTRY(setjmp)
+ stmia r0, {r4-r14}
+ mov r0, #0x00000000
+ mov pc, lr
+
+ENTRY(longjmp)
+ ldmia r0, {r4-r14}
+ mov r0, #0x00000001
+ mov pc, lr
+
+ .data
+ .global _C_LABEL(esym)
+_C_LABEL(esym): .word _C_LABEL(end)
+
+ENTRY_NP(abort)
+ b _C_LABEL(abort)
+
+
+/* End of locore.S */
diff --git a/sys/arch/arm/arm/mem.c b/sys/arch/arm/arm/mem.c
new file mode 100644
index 00000000000..3e0936d7d3b
--- /dev/null
+++ b/sys/arch/arm/arm/mem.c
@@ -0,0 +1,249 @@
+/* $OpenBSD: mem.c,v 1.1 2004/02/01 05:09:48 drahn Exp $ */
+/*^I$NetBSD: mem.c,v 1.11 2003/10/16 12:02:58 jdolecek Exp $^I*/$
+
+/*
+ * Copyright (c) 1982, 1986, 1990, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * the Systems Programming Group of the University of Utah Computer
+ * Science Department.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+/*
+ * Copyright (c) 1988 University of Utah.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * the Systems Programming Group of the University of Utah Computer
+ * Science Department.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+/*
+ * Memory special file
+ */
+
+#include <sys/param.h>
+#include <sys/conf.h>
+#include <sys/buf.h>
+#include <sys/systm.h>
+#include <sys/uio.h>
+#include <sys/malloc.h>
+#include <sys/proc.h>
+#include <sys/fcntl.h>
+
+#include <machine/cpu.h>
+#include <arm/conf.h>
+
+#include <uvm/uvm_extern.h>
+
+extern char *memhook; /* poor name! */
+caddr_t zeropage;
+int physlock;
+
+/*ARGSUSED*/
+int
+mmopen(dev, flag, mode, p)
+ dev_t dev;
+ int flag, mode;
+ struct proc *p;
+{
+ switch (minor(dev)) {
+ default:
+ break;
+ }
+ return (0);
+}
+
+/*ARGSUSED*/
+int
+mmclose(dev, flag, mode, p)
+ dev_t dev;
+ int flag, mode;
+ struct proc *p;
+{
+
+ return (0);
+}
+#define DEV_MEM 0
+#define DEV_KMEM 1
+#define DEV_NULL 2
+#define DEV_ZERO 12
+
+/*ARGSUSED*/
+int
+mmrw(dev, uio, flags)
+ dev_t dev;
+ struct uio *uio;
+ int flags;
+{
+ register vaddr_t o, v;
+ register int c;
+ register struct iovec *iov;
+ int error = 0;
+ vm_prot_t prot;
+
+ if (minor(dev) == DEV_MEM) {
+ /* lock against other uses of shared vmmap */
+ while (physlock > 0) {
+ physlock++;
+ error = tsleep((caddr_t)&physlock, PZERO | PCATCH,
+ "mmrw", 0);
+ if (error)
+ return (error);
+ }
+ physlock = 1;
+ }
+ while (uio->uio_resid > 0 && error == 0) {
+ iov = uio->uio_iov;
+ if (iov->iov_len == 0) {
+ uio->uio_iov++;
+ uio->uio_iovcnt--;
+ if (uio->uio_iovcnt < 0)
+ panic("mmrw");
+ continue;
+ }
+ switch (minor(dev)) {
+
+ case DEV_MEM:
+ v = uio->uio_offset;
+ prot = uio->uio_rw == UIO_READ ? VM_PROT_READ :
+ VM_PROT_WRITE;
+ pmap_enter(pmap_kernel(), (vaddr_t)memhook,
+ trunc_page(v), prot, prot|PMAP_WIRED);
+ pmap_update(pmap_kernel());
+ o = uio->uio_offset & PGOFSET;
+ c = min(uio->uio_resid, (int)(PAGE_SIZE - o));
+ error = uiomove((caddr_t)memhook + o, c, uio);
+ pmap_remove(pmap_kernel(), (vaddr_t)memhook,
+ (vaddr_t)memhook + PAGE_SIZE);
+ pmap_update(pmap_kernel());
+ break;
+
+ case DEV_KMEM:
+ v = uio->uio_offset;
+ c = min(iov->iov_len, MAXPHYS);
+ if (!uvm_kernacc((caddr_t)v, c,
+ uio->uio_rw == UIO_READ ? B_READ : B_WRITE))
+ return (EFAULT);
+ error = uiomove((caddr_t)v, c, uio);
+ break;
+
+ case DEV_NULL:
+ if (uio->uio_rw == UIO_WRITE)
+ uio->uio_resid = 0;
+ return (0);
+
+#ifdef COMPAT_16
+ case _DEV_ZERO_oARM:
+#endif
+ case DEV_ZERO:
+ if (uio->uio_rw == UIO_WRITE) {
+ uio->uio_resid = 0;
+ return (0);
+ }
+ if (zeropage == NULL) {
+ zeropage = (caddr_t)
+ malloc(PAGE_SIZE, M_TEMP, M_WAITOK);
+ memset(zeropage, 0, PAGE_SIZE);
+ }
+ c = min(iov->iov_len, PAGE_SIZE);
+ error = uiomove(zeropage, c, uio);
+ break;
+
+ default:
+ return (ENXIO);
+ }
+ }
+ if (minor(dev) == DEV_MEM) {
+/*unlock:*/
+ if (physlock > 1)
+ wakeup((caddr_t)&physlock);
+ physlock = 0;
+ }
+ return (error);
+}
+
+paddr_t
+mmmmap(dev, off, prot)
+ dev_t dev;
+ off_t off;
+ int prot;
+{
+ struct proc *p = curproc; /* XXX */
+
+ /*
+ * /dev/mem is the only one that makes sense through this
+ * interface. For /dev/kmem any physaddr we return here
+ * could be transient and hence incorrect or invalid at
+ * a later time. /dev/null just doesn't make any sense
+ * and /dev/zero is a hack that is handled via the default
+ * pager in mmap().
+ */
+ if (minor(dev) != DEV_MEM)
+ return (-1);
+
+ /* minor device 0 is physical memory */
+
+ if (off >= ctob(physmem) &&
+ suser(p, 0) != 0)
+ return -1;
+ return arm_btop(off);
+}
+/*ARGSUSED*/
+int
+mmioctl(dev_t dev, u_long cmd, caddr_t data, int flags, struct proc *p)
+{
+ return (EOPNOTSUPP);
+}
+
diff --git a/sys/arch/arm/arm/pmap.c b/sys/arch/arm/arm/pmap.c
new file mode 100644
index 00000000000..6c1c4fc23b8
--- /dev/null
+++ b/sys/arch/arm/arm/pmap.c
@@ -0,0 +1,5131 @@
+/* $OpenBSD: pmap.c,v 1.1 2004/02/01 05:09:48 drahn Exp $ */
+/*^I$NetBSD: pmap.c,v 1.147 2004/01/18 13:03:50 scw Exp $^I*/$
+
+/*
+ * Copyright 2003 Wasabi Systems, Inc.
+ * All rights reserved.
+ *
+ * Written by Steve C. Woodford for Wasabi Systems, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed for the NetBSD Project by
+ * Wasabi Systems, Inc.
+ * 4. The name of Wasabi Systems, Inc. may not be used to endorse
+ * or promote products derived from this software without specific prior
+ * written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * Copyright (c) 2002-2003 Wasabi Systems, Inc.
+ * Copyright (c) 2001 Richard Earnshaw
+ * Copyright (c) 2001-2002 Christopher Gilbert
+ * All rights reserved.
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the company nor the name of the author may be used to
+ * endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+/*-
+ * Copyright (c) 1999 The NetBSD Foundation, Inc.
+ * All rights reserved.
+ *
+ * This code is derived from software contributed to The NetBSD Foundation
+ * by Charles M. Hannum.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the NetBSD
+ * Foundation, Inc. and its contributors.
+ * 4. Neither the name of The NetBSD Foundation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
+ * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * Copyright (c) 1994-1998 Mark Brinicombe.
+ * Copyright (c) 1994 Brini.
+ * All rights reserved.
+ *
+ * This code is derived from software written for Brini by Mark Brinicombe
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by Mark Brinicombe.
+ * 4. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ *
+ * RiscBSD kernel project
+ *
+ * pmap.c
+ *
+ * Machine dependant vm stuff
+ *
+ * Created : 20/09/94
+ */
+
+/*
+ * Performance improvements, UVM changes, overhauls and part-rewrites
+ * were contributed by Neil A. Carson <neil@causality.com>.
+ */
+
+/*
+ * Overhauled again to speedup the pmap, use MMU Domains so that L1 tables
+ * can be shared, and re-work the KVM layout, by Steve Woodford of Wasabi
+ * Systems, Inc.
+ *
+ * There are still a few things outstanding at this time:
+ *
+ * - There are some unresolved issues for MP systems:
+ *
+ * o The L1 metadata needs a lock, or more specifically, some places
+ * need to acquire an exclusive lock when modifying L1 translation
+ * table entries.
+ *
+ * o When one cpu modifies an L1 entry, and that L1 table is also
+ * being used by another cpu, then the latter will need to be told
+ * that a tlb invalidation may be necessary. (But only if the old
+ * domain number in the L1 entry being over-written is currently
+ * the active domain on that cpu). I guess there are lots more tlb
+ * shootdown issues too...
+ *
+ * o If the vector_page is at 0x00000000 instead of 0xffff0000, then
+ * MP systems will lose big-time because of the MMU domain hack.
+ * The only way this can be solved (apart from moving the vector
+ * page to 0xffff0000) is to reserve the first 1MB of user address
+ * space for kernel use only. This would require re-linking all
+ * applications so that the text section starts above this 1MB
+ * boundary.
+ *
+ * o Tracking which VM space is resident in the cache/tlb has not yet
+ * been implemented for MP systems.
+ *
+ * o Finally, there is a pathological condition where two cpus running
+ * two separate processes (not procs) which happen to share an L1
+ * can get into a fight over one or more L1 entries. This will result
+ * in a significant slow-down if both processes are in tight loops.
+ */
+
+/*
+ * Special compilation symbols
+ * PMAP_DEBUG - Build in pmap_debug_level code
+ */
+
+/* Include header files */
+
+#include <sys/types.h>
+#include <sys/param.h>
+#include <sys/kernel.h>
+#include <sys/systm.h>
+#include <sys/proc.h>
+#include <sys/malloc.h>
+#include <sys/user.h>
+#include <sys/pool.h>
+#include <sys/cdefs.h>
+
+#include <uvm/uvm.h>
+
+#include <machine/bus.h>
+#include <machine/pmap.h>
+#include <machine/pcb.h>
+#include <machine/param.h>
+#include <arm/katelib.h>
+#include <arm/cpufunc.h>
+
+#ifdef PMAP_DEBUG
+
+/* XXX need to get rid of all refs to this */
+int pmap_debug_level = 0;
+
+/*
+ * for switching to potentially finer grained debugging
+ */
+#define PDB_FOLLOW 0x0001
+#define PDB_INIT 0x0002
+#define PDB_ENTER 0x0004
+#define PDB_REMOVE 0x0008
+#define PDB_CREATE 0x0010
+#define PDB_PTPAGE 0x0020
+#define PDB_GROWKERN 0x0040
+#define PDB_BITS 0x0080
+#define PDB_COLLECT 0x0100
+#define PDB_PROTECT 0x0200
+#define PDB_MAP_L1 0x0400
+#define PDB_BOOTSTRAP 0x1000
+#define PDB_PARANOIA 0x2000
+#define PDB_WIRING 0x4000
+#define PDB_PVDUMP 0x8000
+#define PDB_VAC 0x10000
+#define PDB_KENTER 0x20000
+#define PDB_KREMOVE 0x40000
+
+int debugmap = 1;
+int pmapdebug = 0;
+#define NPDEBUG(_lev_,_stat_) \
+ if (pmapdebug & (_lev_)) \
+ ((_stat_))
+
+#else /* PMAP_DEBUG */
+#define NPDEBUG(_lev_,_stat_) /* Nothing */
+#endif /* PMAP_DEBUG */
+
+/*
+ * pmap_kernel() points here
+ */
+struct pmap kernel_pmap_store;
+
+/*
+ * Which pmap is currently 'live' in the cache
+ *
+ * XXXSCW: Fix for SMP ...
+ */
+union pmap_cache_state *pmap_cache_state;
+
+/*
+ * Pool and cache that pmap structures are allocated from.
+ * We use a cache to avoid clearing the pm_l2[] array (1KB)
+ * in pmap_create().
+ */
+static struct pool pmap_pmap_pool;
+static struct pool_cache pmap_pmap_cache;
+static LIST_HEAD(, pmap) pmap_pmaps;
+
+/*
+ * Pool of PV structures
+ */
+static struct pool pmap_pv_pool;
+void *pmap_bootstrap_pv_page_alloc(struct pool *, int);
+void pmap_bootstrap_pv_page_free(struct pool *, void *);
+struct pool_allocator pmap_bootstrap_pv_allocator = {
+ pmap_bootstrap_pv_page_alloc, pmap_bootstrap_pv_page_free
+};
+
+/*
+ * Pool and cache of l2_dtable structures.
+ * We use a cache to avoid clearing the structures when they're
+ * allocated. (196 bytes)
+ */
+static struct pool pmap_l2dtable_pool;
+static struct pool_cache pmap_l2dtable_cache;
+static vaddr_t pmap_kernel_l2dtable_kva;
+
+/*
+ * Pool and cache of L2 page descriptors.
+ * We use a cache to avoid clearing the descriptor table
+ * when they're allocated. (1KB)
+ */
+static struct pool pmap_l2ptp_pool;
+static struct pool_cache pmap_l2ptp_cache;
+static vaddr_t pmap_kernel_l2ptp_kva;
+static paddr_t pmap_kernel_l2ptp_phys;
+
+/*
+ * pmap copy/zero page, and mem(5) hook point
+ */
+static pt_entry_t *csrc_pte, *cdst_pte;
+static vaddr_t csrcp, cdstp;
+char *memhook;
+extern caddr_t msgbufaddr;
+
+/*
+ * Flag to indicate if pmap_init() has done its thing
+ */
+boolean_t pmap_initialized;
+
+/*
+ * Misc. locking data structures
+ */
+
+#if defined(MULTIPROCESSOR) || defined(LOCKDEBUG)
+static struct lock pmap_main_lock;
+
+#define PMAP_MAP_TO_HEAD_LOCK() \
+ (void) spinlockmgr(&pmap_main_lock, LK_SHARED, NULL)
+#define PMAP_MAP_TO_HEAD_UNLOCK() \
+ (void) spinlockmgr(&pmap_main_lock, LK_RELEASE, NULL)
+#define PMAP_HEAD_TO_MAP_LOCK() \
+ (void) spinlockmgr(&pmap_main_lock, LK_EXCLUSIVE, NULL)
+#define PMAP_HEAD_TO_MAP_UNLOCK() \
+ spinlockmgr(&pmap_main_lock, LK_RELEASE, (void *) 0)
+#else
+#define PMAP_MAP_TO_HEAD_LOCK() /* null */
+#define PMAP_MAP_TO_HEAD_UNLOCK() /* null */
+#define PMAP_HEAD_TO_MAP_LOCK() /* null */
+#define PMAP_HEAD_TO_MAP_UNLOCK() /* null */
+#endif
+
+#define pmap_acquire_pmap_lock(pm) \
+ do { \
+ if ((pm) != pmap_kernel()) \
+ simple_lock(&(pm)->pm_lock); \
+ } while (/*CONSTCOND*/0)
+
+#define pmap_release_pmap_lock(pm) \
+ do { \
+ if ((pm) != pmap_kernel()) \
+ simple_unlock(&(pm)->pm_lock); \
+ } while (/*CONSTCOND*/0)
+
+
+/*
+ * Metadata for L1 translation tables.
+ */
+struct l1_ttable {
+ /* Entry on the L1 Table list */
+ SLIST_ENTRY(l1_ttable) l1_link;
+
+ /* Entry on the L1 Least Recently Used list */
+ TAILQ_ENTRY(l1_ttable) l1_lru;
+
+ /* Track how many domains are allocated from this L1 */
+ volatile u_int l1_domain_use_count;
+
+ /*
+ * A free-list of domain numbers for this L1.
+ * We avoid using ffs() and a bitmap to track domains since ffs()
+ * is slow on ARM.
+ */
+ u_int8_t l1_domain_first;
+ u_int8_t l1_domain_free[PMAP_DOMAINS];
+
+ /* Physical address of this L1 page table */
+ paddr_t l1_physaddr;
+
+ /* KVA of this L1 page table */
+ pd_entry_t *l1_kva;
+};
+
+/*
+ * Convert a virtual address into its L1 table index. That is, the
+ * index used to locate the L2 descriptor table pointer in an L1 table.
+ * This is basically used to index l1->l1_kva[].
+ *
+ * Each L2 descriptor table represents 1MB of VA space.
+ */
+#define L1_IDX(va) (((vaddr_t)(va)) >> L1_S_SHIFT)
+
+/*
+ * L1 Page Tables are tracked using a Least Recently Used list.
+ * - New L1s are allocated from the HEAD.
+ * - Freed L1s are added to the TAIl.
+ * - Recently accessed L1s (where an 'access' is some change to one of
+ * the userland pmaps which owns this L1) are moved to the TAIL.
+ */
+static TAILQ_HEAD(, l1_ttable) l1_lru_list;
+static struct simplelock l1_lru_lock;
+
+/*
+ * A list of all L1 tables
+ */
+static SLIST_HEAD(, l1_ttable) l1_list;
+
+/*
+ * The l2_dtable tracks L2_BUCKET_SIZE worth of L1 slots.
+ *
+ * This is normally 16MB worth L2 page descriptors for any given pmap.
+ * Reference counts are maintained for L2 descriptors so they can be
+ * freed when empty.
+ */
+struct l2_dtable {
+ /* The number of L2 page descriptors allocated to this l2_dtable */
+ u_int l2_occupancy;
+
+ /* List of L2 page descriptors */
+ struct l2_bucket {
+ pt_entry_t *l2b_kva; /* KVA of L2 Descriptor Table */
+ paddr_t l2b_phys; /* Physical address of same */
+ u_short l2b_l1idx; /* This L2 table's L1 index */
+ u_short l2b_occupancy; /* How many active descriptors */
+ } l2_bucket[L2_BUCKET_SIZE];
+};
+
+/*
+ * Given an L1 table index, calculate the corresponding l2_dtable index
+ * and bucket index within the l2_dtable.
+ */
+#define L2_IDX(l1idx) (((l1idx) >> L2_BUCKET_LOG2) & \
+ (L2_SIZE - 1))
+#define L2_BUCKET(l1idx) ((l1idx) & (L2_BUCKET_SIZE - 1))
+
+/*
+ * Given a virtual address, this macro returns the
+ * virtual address required to drop into the next L2 bucket.
+ */
+#define L2_NEXT_BUCKET(va) (((va) & L1_S_FRAME) + L1_S_SIZE)
+
+/*
+ * L2 allocation.
+ */
+#define pmap_alloc_l2_dtable() \
+ pool_cache_get(&pmap_l2dtable_cache, PR_NOWAIT)
+#define pmap_free_l2_dtable(l2) \
+ pool_cache_put(&pmap_l2dtable_cache, (l2))
+/*
+#define POOL_CACHE_PADDR
+*/
+#ifdef POOL_CACHE_PADDR
+#define pmap_alloc_l2_ptp(pap) \
+ ((pt_entry_t *)pool_cache_get_paddr(&pmap_l2ptp_cache,\
+ PR_NOWAIT, (pap)))
+#else
+static __inline pt_entry_t *
+pmap_alloc_l2_ptp(paddr_t *pap)
+{
+ pt_entry_t * pted;
+
+ pted = pool_cache_get(&pmap_l2ptp_cache, PR_NOWAIT);
+ *pap = vtophys((vaddr_t)pted);
+ return pted;
+}
+#endif /* POOL_CACHE_PADDR */
+
+/*
+ * We try to map the page tables write-through, if possible. However, not
+ * all CPUs have a write-through cache mode, so on those we have to sync
+ * the cache when we frob page tables.
+ *
+ * We try to evaluate this at compile time, if possible. However, it's
+ * not always possible to do that, hence this run-time var.
+ */
+int pmap_needs_pte_sync;
+
+/*
+ * Real definition of pv_entry.
+ */
+struct pv_entry {
+ struct pv_entry *pv_next; /* next pv_entry */
+ pmap_t pv_pmap; /* pmap where mapping lies */
+ vaddr_t pv_va; /* virtual address for mapping */
+ u_int pv_flags; /* flags */
+};
+
+/*
+ * Macro to determine if a mapping might be resident in the
+ * instruction cache and/or TLB
+ */
+#define PV_BEEN_EXECD(f) (((f) & (PVF_REF | PVF_EXEC)) == (PVF_REF | PVF_EXEC))
+
+/*
+ * Macro to determine if a mapping might be resident in the
+ * data cache and/or TLB
+ */
+#define PV_BEEN_REFD(f) (((f) & PVF_REF) != 0)
+
+/*
+ * Local prototypes
+ */
+int pmap_set_pt_cache_mode(pd_entry_t *, vaddr_t);
+void pmap_alloc_specials(vaddr_t *, int, vaddr_t *,
+ pt_entry_t **);
+static boolean_t pmap_is_current(pmap_t);
+static boolean_t pmap_is_cached(pmap_t);
+void pmap_enter_pv(struct vm_page *, struct pv_entry *,
+ pmap_t, vaddr_t, u_int);
+static struct pv_entry *pmap_find_pv(struct vm_page *, pmap_t, vaddr_t);
+struct pv_entry *pmap_remove_pv(struct vm_page *, pmap_t, vaddr_t);
+u_int pmap_modify_pv(struct vm_page *, pmap_t, vaddr_t,
+ u_int, u_int);
+
+void pmap_pinit(pmap_t);
+int pmap_pmap_ctor(void *, void *, int);
+
+void pmap_alloc_l1(pmap_t);
+void pmap_free_l1(pmap_t);
+static void pmap_use_l1(pmap_t);
+
+static struct l2_bucket *pmap_get_l2_bucket(pmap_t, vaddr_t);
+struct l2_bucket *pmap_alloc_l2_bucket(pmap_t, vaddr_t);
+void pmap_free_l2_bucket(pmap_t, struct l2_bucket *, u_int);
+int pmap_l2ptp_ctor(void *, void *, int);
+int pmap_l2dtable_ctor(void *, void *, int);
+
+static void pmap_vac_me_harder(struct vm_page *, pmap_t, vaddr_t);
+void pmap_vac_me_kpmap(struct vm_page *, pmap_t, vaddr_t);
+void pmap_vac_me_user(struct vm_page *, pmap_t, vaddr_t);
+
+void pmap_clearbit(struct vm_page *, u_int);
+int pmap_clean_page(struct pv_entry *, boolean_t);
+void pmap_page_remove(struct vm_page *);
+
+void pmap_init_l1(struct l1_ttable *, pd_entry_t *);
+vaddr_t kernel_pt_lookup(paddr_t);
+
+
+/*
+ * External function prototypes
+ */
+extern void bzero_page(vaddr_t);
+extern void bcopy_page(vaddr_t, vaddr_t);
+
+/*
+ * Misc variables
+ */
+vaddr_t virtual_avail;
+vaddr_t virtual_end;
+vaddr_t pmap_curmaxkvaddr;
+
+vaddr_t avail_start;
+vaddr_t avail_end;
+
+extern pv_addr_t systempage;
+
+/* Function to set the debug level of the pmap code */
+
+#ifdef PMAP_DEBUG
+void
+pmap_debug(int level)
+{
+ pmap_debug_level = level;
+ printf("pmap_debug: level=%d\n", pmap_debug_level);
+}
+#endif /* PMAP_DEBUG */
+
+/*
+ * A bunch of routines to conditionally flush the caches/TLB depending
+ * on whether the specified pmap actually needs to be flushed at any
+ * given time.
+ */
+static __inline void
+pmap_tlb_flushID_SE(pmap_t pm, vaddr_t va)
+{
+
+ if (pm->pm_cstate.cs_tlb_id)
+ cpu_tlb_flushID_SE(va);
+}
+
+static __inline void
+pmap_tlb_flushD_SE(pmap_t pm, vaddr_t va)
+{
+
+ if (pm->pm_cstate.cs_tlb_d)
+ cpu_tlb_flushD_SE(va);
+}
+
+static __inline void
+pmap_tlb_flushID(pmap_t pm)
+{
+
+ if (pm->pm_cstate.cs_tlb_id) {
+ cpu_tlb_flushID();
+ pm->pm_cstate.cs_tlb = 0;
+ }
+}
+
+static __inline void
+pmap_tlb_flushD(pmap_t pm)
+{
+
+ if (pm->pm_cstate.cs_tlb_d) {
+ cpu_tlb_flushD();
+ pm->pm_cstate.cs_tlb_d = 0;
+ }
+}
+
+static __inline void
+pmap_idcache_wbinv_range(pmap_t pm, vaddr_t va, vsize_t len)
+{
+
+ if (pm->pm_cstate.cs_cache_id)
+ cpu_idcache_wbinv_range(va, len);
+}
+
+static __inline void
+pmap_dcache_wb_range(pmap_t pm, vaddr_t va, vsize_t len,
+ boolean_t do_inv, boolean_t rd_only)
+{
+
+ if (pm->pm_cstate.cs_cache_d) {
+ if (do_inv) {
+ if (rd_only)
+ cpu_dcache_inv_range(va, len);
+ else
+ cpu_dcache_wbinv_range(va, len);
+ } else
+ if (!rd_only)
+ cpu_dcache_wb_range(va, len);
+ }
+}
+
+static __inline void
+pmap_idcache_wbinv_all(pmap_t pm)
+{
+
+ if (pm->pm_cstate.cs_cache_id) {
+ cpu_idcache_wbinv_all();
+ pm->pm_cstate.cs_cache = 0;
+ }
+}
+
+static __inline void
+pmap_dcache_wbinv_all(pmap_t pm)
+{
+
+ if (pm->pm_cstate.cs_cache_d) {
+ cpu_dcache_wbinv_all();
+ pm->pm_cstate.cs_cache_d = 0;
+ }
+}
+
+static __inline boolean_t
+pmap_is_current(pmap_t pm)
+{
+
+ if (pm == pmap_kernel() ||
+ (curproc && curproc->p_vmspace->vm_map.pmap == pm))
+ return (TRUE);
+
+ return (FALSE);
+}
+
+static __inline boolean_t
+pmap_is_cached(pmap_t pm)
+{
+
+ if (pm == pmap_kernel() || pmap_cache_state == NULL ||
+ pmap_cache_state == &pm->pm_cstate)
+ return (TRUE);
+
+ return (FALSE);
+}
+
+/*
+ * PTE_SYNC_CURRENT:
+ *
+ * Make sure the pte is written out to RAM.
+ * We need to do this for one of two cases:
+ * - We're dealing with the kernel pmap
+ * - There is no pmap active in the cache/tlb.
+ * - The specified pmap is 'active' in the cache/tlb.
+ */
+#ifdef PMAP_INCLUDE_PTE_SYNC
+#define PTE_SYNC_CURRENT(pm, ptep) \
+do { \
+ if (PMAP_NEEDS_PTE_SYNC && \
+ pmap_is_cached(pm)) \
+ PTE_SYNC(ptep); \
+} while (/*CONSTCOND*/0)
+#else
+#define PTE_SYNC_CURRENT(pm, ptep) /* nothing */
+#endif
+
+/*
+ * main pv_entry manipulation functions:
+ * pmap_enter_pv: enter a mapping onto a vm_page list
+ * pmap_remove_pv: remove a mappiing from a vm_page list
+ *
+ * NOTE: pmap_enter_pv expects to lock the pvh itself
+ * pmap_remove_pv expects te caller to lock the pvh before calling
+ */
+
+/*
+ * pmap_enter_pv: enter a mapping onto a vm_page lst
+ *
+ * => caller should hold the proper lock on pmap_main_lock
+ * => caller should have pmap locked
+ * => we will gain the lock on the vm_page and allocate the new pv_entry
+ * => caller should adjust ptp's wire_count before calling
+ * => caller should not adjust pmap's wire_count
+ */
+void
+pmap_enter_pv(struct vm_page *pg, struct pv_entry *pve, pmap_t pm,
+ vaddr_t va, u_int flags)
+{
+
+ NPDEBUG(PDB_PVDUMP,
+ printf("pmap_enter_pv: pm %p, pg %p, flags 0x%x\n", pm, pg, flags));
+
+ pve->pv_pmap = pm;
+ pve->pv_va = va;
+ pve->pv_flags = flags;
+
+ simple_lock(&pg->mdpage.pvh_slock); /* lock vm_page */
+ pve->pv_next = pg->mdpage.pvh_list; /* add to ... */
+ pg->mdpage.pvh_list = pve; /* ... locked list */
+ pg->mdpage.pvh_attrs |= flags & (PVF_REF | PVF_MOD);
+ if (pm == pmap_kernel()) {
+ if (flags & PVF_WRITE)
+ pg->mdpage.krw_mappings++;
+ else
+ pg->mdpage.kro_mappings++;
+ } else
+ if (flags & PVF_WRITE)
+ pg->mdpage.urw_mappings++;
+ else
+ pg->mdpage.uro_mappings++;
+ simple_unlock(&pg->mdpage.pvh_slock); /* unlock, done! */
+
+ if (pve->pv_flags & PVF_WIRED)
+ ++pm->pm_stats.wired_count;
+}
+
+/*
+ *
+ * pmap_find_pv: Find a pv entry
+ *
+ * => caller should hold lock on vm_page
+ */
+static __inline struct pv_entry *
+pmap_find_pv(struct vm_page *pg, pmap_t pm, vaddr_t va)
+{
+ struct pv_entry *pv;
+
+ for (pv = pg->mdpage.pvh_list; pv; pv = pv->pv_next) {
+ if (pm == pv->pv_pmap && va == pv->pv_va)
+ break;
+ }
+
+ return (pv);
+}
+
+/*
+ * pmap_remove_pv: try to remove a mapping from a pv_list
+ *
+ * => caller should hold proper lock on pmap_main_lock
+ * => pmap should be locked
+ * => caller should hold lock on vm_page [so that attrs can be adjusted]
+ * => caller should adjust ptp's wire_count and free PTP if needed
+ * => caller should NOT adjust pmap's wire_count
+ * => we return the removed pve
+ */
+struct pv_entry *
+pmap_remove_pv(struct vm_page *pg, pmap_t pm, vaddr_t va)
+{
+ struct pv_entry *pve, **prevptr;
+
+ NPDEBUG(PDB_PVDUMP,
+ printf("pmap_remove_pv: pm %p, pg %p, va 0x%08lx\n", pm, pg, va));
+
+ prevptr = &pg->mdpage.pvh_list; /* previous pv_entry pointer */
+ pve = *prevptr;
+
+ while (pve) {
+ if (pve->pv_pmap == pm && pve->pv_va == va) { /* match? */
+ NPDEBUG(PDB_PVDUMP,
+ printf("pmap_remove_pv: pm %p, pg %p, flags 0x%x\n", pm, pg, pve->pv_flags));
+ *prevptr = pve->pv_next; /* remove it! */
+ if (pve->pv_flags & PVF_WIRED)
+ --pm->pm_stats.wired_count;
+ if (pm == pmap_kernel()) {
+ if (pve->pv_flags & PVF_WRITE)
+ pg->mdpage.krw_mappings--;
+ else
+ pg->mdpage.kro_mappings--;
+ } else
+ if (pve->pv_flags & PVF_WRITE)
+ pg->mdpage.urw_mappings--;
+ else
+ pg->mdpage.uro_mappings--;
+ break;
+ }
+ prevptr = &pve->pv_next; /* previous pointer */
+ pve = pve->pv_next; /* advance */
+ }
+
+ return(pve); /* return removed pve */
+}
+
+/*
+ *
+ * pmap_modify_pv: Update pv flags
+ *
+ * => caller should hold lock on vm_page [so that attrs can be adjusted]
+ * => caller should NOT adjust pmap's wire_count
+ * => caller must call pmap_vac_me_harder() if writable status of a page
+ * may have changed.
+ * => we return the old flags
+ *
+ * Modify a physical-virtual mapping in the pv table
+ */
+u_int
+pmap_modify_pv(struct vm_page *pg, pmap_t pm, vaddr_t va,
+ u_int clr_mask, u_int set_mask)
+{
+ struct pv_entry *npv;
+ u_int flags, oflags;
+
+ if ((npv = pmap_find_pv(pg, pm, va)) == NULL)
+ return (0);
+
+ NPDEBUG(PDB_PVDUMP,
+ printf("pmap_modify_pv: pm %p, pg %p, clr 0x%x, set 0x%x, flags 0x%x\n", pm, pg, clr_mask, set_mask, npv->pv_flags));
+
+ /*
+ * There is at least one VA mapping this page.
+ */
+
+ if (clr_mask & (PVF_REF | PVF_MOD))
+ pg->mdpage.pvh_attrs |= set_mask & (PVF_REF | PVF_MOD);
+
+ oflags = npv->pv_flags;
+ npv->pv_flags = flags = (oflags & ~clr_mask) | set_mask;
+
+ if ((flags ^ oflags) & PVF_WIRED) {
+ if (flags & PVF_WIRED)
+ ++pm->pm_stats.wired_count;
+ else
+ --pm->pm_stats.wired_count;
+ }
+
+ if ((flags ^ oflags) & PVF_WRITE) {
+ if (pm == pmap_kernel()) {
+ if (flags & PVF_WRITE) {
+ pg->mdpage.krw_mappings++;
+ pg->mdpage.kro_mappings--;
+ } else {
+ pg->mdpage.kro_mappings++;
+ pg->mdpage.krw_mappings--;
+ }
+ } else
+ if (flags & PVF_WRITE) {
+ pg->mdpage.urw_mappings++;
+ pg->mdpage.uro_mappings--;
+ } else {
+ pg->mdpage.uro_mappings++;
+ pg->mdpage.urw_mappings--;
+ }
+ }
+
+ return (oflags);
+}
+
+void
+pmap_pinit(pmap_t pm)
+{
+
+ if (vector_page < KERNEL_BASE) {
+ /*
+ * Map the vector page.
+ */
+ pmap_enter(pm, vector_page, systempage.pv_pa,
+ VM_PROT_READ, VM_PROT_READ | PMAP_WIRED);
+ pmap_update(pm);
+ }
+}
+
+/*
+ * Allocate an L1 translation table for the specified pmap.
+ * This is called at pmap creation time.
+ */
+void
+pmap_alloc_l1(pmap_t pm)
+{
+ struct l1_ttable *l1;
+ u_int8_t domain;
+
+ /*
+ * Remove the L1 at the head of the LRU list
+ */
+ simple_lock(&l1_lru_lock);
+ l1 = TAILQ_FIRST(&l1_lru_list);
+ KDASSERT(l1 != NULL);
+ TAILQ_REMOVE(&l1_lru_list, l1, l1_lru);
+
+ /*
+ * Pick the first available domain number, and update
+ * the link to the next number.
+ */
+ domain = l1->l1_domain_first;
+ l1->l1_domain_first = l1->l1_domain_free[domain];
+
+ /*
+ * If there are still free domain numbers in this L1,
+ * put it back on the TAIL of the LRU list.
+ */
+ if (++l1->l1_domain_use_count < PMAP_DOMAINS)
+ TAILQ_INSERT_TAIL(&l1_lru_list, l1, l1_lru);
+
+ simple_unlock(&l1_lru_lock);
+
+ /*
+ * Fix up the relevant bits in the pmap structure
+ */
+ pm->pm_l1 = l1;
+ pm->pm_domain = domain;
+}
+
+/*
+ * Free an L1 translation table.
+ * This is called at pmap destruction time.
+ */
+void
+pmap_free_l1(pmap_t pm)
+{
+ struct l1_ttable *l1 = pm->pm_l1;
+
+ simple_lock(&l1_lru_lock);
+
+ /*
+ * If this L1 is currently on the LRU list, remove it.
+ */
+ if (l1->l1_domain_use_count < PMAP_DOMAINS)
+ TAILQ_REMOVE(&l1_lru_list, l1, l1_lru);
+
+ /*
+ * Free up the domain number which was allocated to the pmap
+ */
+ l1->l1_domain_free[pm->pm_domain] = l1->l1_domain_first;
+ l1->l1_domain_first = pm->pm_domain;
+ l1->l1_domain_use_count--;
+
+ /*
+ * The L1 now must have at least 1 free domain, so add
+ * it back to the LRU list. If the use count is zero,
+ * put it at the head of the list, otherwise it goes
+ * to the tail.
+ */
+ if (l1->l1_domain_use_count == 0)
+ TAILQ_INSERT_HEAD(&l1_lru_list, l1, l1_lru);
+ else
+ TAILQ_INSERT_TAIL(&l1_lru_list, l1, l1_lru);
+
+ simple_unlock(&l1_lru_lock);
+}
+
+static __inline void
+pmap_use_l1(pmap_t pm)
+{
+ struct l1_ttable *l1;
+
+ /*
+ * Do nothing if we're in interrupt context.
+ * Access to an L1 by the kernel pmap must not affect
+ * the LRU list.
+ */
+ if (current_intr_depth || pm == pmap_kernel())
+ return;
+
+ l1 = pm->pm_l1;
+
+ /*
+ * If the L1 is not currently on the LRU list, just return
+ */
+ if (l1->l1_domain_use_count == PMAP_DOMAINS)
+ return;
+
+ simple_lock(&l1_lru_lock);
+
+ /*
+ * Check the use count again, now that we've acquired the lock
+ */
+ if (l1->l1_domain_use_count == PMAP_DOMAINS) {
+ simple_unlock(&l1_lru_lock);
+ return;
+ }
+
+ /*
+ * Move the L1 to the back of the LRU list
+ */
+ TAILQ_REMOVE(&l1_lru_list, l1, l1_lru);
+ TAILQ_INSERT_TAIL(&l1_lru_list, l1, l1_lru);
+
+ simple_unlock(&l1_lru_lock);
+}
+
+/*
+ * void pmap_free_l2_ptp(pt_entry_t *, paddr_t *)
+ *
+ * Free an L2 descriptor table.
+ */
+static __inline void
+#ifndef PMAP_INCLUDE_PTE_SYNC
+pmap_free_l2_ptp(pt_entry_t *l2, paddr_t pa)
+#else
+pmap_free_l2_ptp(boolean_t need_sync, pt_entry_t *l2, paddr_t pa)
+#endif
+{
+#ifdef PMAP_INCLUDE_PTE_SYNC
+ /*
+ * Note: With a write-back cache, we may need to sync this
+ * L2 table before re-using it.
+ * This is because it may have belonged to a non-current
+ * pmap, in which case the cache syncs would have been
+ * skipped when the pages were being unmapped. If the
+ * L2 table were then to be immediately re-allocated to
+ * the *current* pmap, it may well contain stale mappings
+ * which have not yet been cleared by a cache write-back
+ * and so would still be visible to the mmu.
+ */
+ if (need_sync)
+ PTE_SYNC_RANGE(l2, L2_TABLE_SIZE_REAL / sizeof(pt_entry_t));
+#endif
+#ifdef POOL_CACHE_PADDR
+ pool_cache_put_paddr(&pmap_l2ptp_cache, (void *)l2, pa);
+#else
+ pool_cache_put(&pmap_l2ptp_cache, (void *)l2);
+#endif
+}
+
+/*
+ * Returns a pointer to the L2 bucket associated with the specified pmap
+ * and VA, or NULL if no L2 bucket exists for the address.
+ */
+static __inline struct l2_bucket *
+pmap_get_l2_bucket(pmap_t pm, vaddr_t va)
+{
+ struct l2_dtable *l2;
+ struct l2_bucket *l2b;
+ u_short l1idx;
+
+ l1idx = L1_IDX(va);
+
+ if ((l2 = pm->pm_l2[L2_IDX(l1idx)]) == NULL ||
+ (l2b = &l2->l2_bucket[L2_BUCKET(l1idx)])->l2b_kva == NULL)
+ return (NULL);
+
+ return (l2b);
+}
+
+/*
+ * Returns a pointer to the L2 bucket associated with the specified pmap
+ * and VA.
+ *
+ * If no L2 bucket exists, perform the necessary allocations to put an L2
+ * bucket/page table in place.
+ *
+ * Note that if a new L2 bucket/page was allocated, the caller *must*
+ * increment the bucket occupancy counter appropriately *before*
+ * releasing the pmap's lock to ensure no other thread or cpu deallocates
+ * the bucket/page in the meantime.
+ */
+struct l2_bucket *
+pmap_alloc_l2_bucket(pmap_t pm, vaddr_t va)
+{
+ struct l2_dtable *l2;
+ struct l2_bucket *l2b;
+ u_short l1idx;
+
+ l1idx = L1_IDX(va);
+
+ if ((l2 = pm->pm_l2[L2_IDX(l1idx)]) == NULL) {
+ /*
+ * No mapping at this address, as there is
+ * no entry in the L1 table.
+ * Need to allocate a new l2_dtable.
+ */
+ if ((l2 = pmap_alloc_l2_dtable()) == NULL)
+ return (NULL);
+
+ /*
+ * Link it into the parent pmap
+ */
+ pm->pm_l2[L2_IDX(l1idx)] = l2;
+ }
+
+ l2b = &l2->l2_bucket[L2_BUCKET(l1idx)];
+
+ /*
+ * Fetch pointer to the L2 page table associated with the address.
+ */
+ if (l2b->l2b_kva == NULL) {
+ pt_entry_t *ptep;
+
+ /*
+ * No L2 page table has been allocated. Chances are, this
+ * is because we just allocated the l2_dtable, above.
+ */
+ if ((ptep = pmap_alloc_l2_ptp(&l2b->l2b_phys)) == NULL) {
+ /*
+ * Oops, no more L2 page tables available at this
+ * time. We may need to deallocate the l2_dtable
+ * if we allocated a new one above.
+ */
+ if (l2->l2_occupancy == 0) {
+ pm->pm_l2[L2_IDX(l1idx)] = NULL;
+ pmap_free_l2_dtable(l2);
+ }
+ return (NULL);
+ }
+
+ l2->l2_occupancy++;
+ l2b->l2b_kva = ptep;
+ l2b->l2b_l1idx = l1idx;
+ }
+
+ return (l2b);
+}
+
+/*
+ * One or more mappings in the specified L2 descriptor table have just been
+ * invalidated.
+ *
+ * Garbage collect the metadata and descriptor table itself if necessary.
+ *
+ * The pmap lock must be acquired when this is called (not necessary
+ * for the kernel pmap).
+ */
+void
+pmap_free_l2_bucket(pmap_t pm, struct l2_bucket *l2b, u_int count)
+{
+ struct l2_dtable *l2;
+ pd_entry_t *pl1pd, l1pd;
+ pt_entry_t *ptep;
+ u_short l1idx;
+
+ KDASSERT(count <= l2b->l2b_occupancy);
+
+ /*
+ * Update the bucket's reference count according to how many
+ * PTEs the caller has just invalidated.
+ */
+ l2b->l2b_occupancy -= count;
+
+ /*
+ * Note:
+ *
+ * Level 2 page tables allocated to the kernel pmap are never freed
+ * as that would require checking all Level 1 page tables and
+ * removing any references to the Level 2 page table. See also the
+ * comment elsewhere about never freeing bootstrap L2 descriptors.
+ *
+ * We make do with just invalidating the mapping in the L2 table.
+ *
+ * This isn't really a big deal in practice and, in fact, leads
+ * to a performance win over time as we don't need to continually
+ * alloc/free.
+ */
+ if (l2b->l2b_occupancy > 0 || pm == pmap_kernel())
+ return;
+
+ /*
+ * There are no more valid mappings in this level 2 page table.
+ * Go ahead and NULL-out the pointer in the bucket, then
+ * free the page table.
+ */
+ l1idx = l2b->l2b_l1idx;
+ ptep = l2b->l2b_kva;
+ l2b->l2b_kva = NULL;
+
+ pl1pd = &pm->pm_l1->l1_kva[l1idx];
+
+ /*
+ * If the L1 slot matches the pmap's domain
+ * number, then invalidate it.
+ */
+ l1pd = *pl1pd & (L1_TYPE_MASK | L1_C_DOM_MASK);
+ if (l1pd == (L1_C_DOM(pm->pm_domain) | L1_TYPE_C)) {
+ *pl1pd = 0;
+ PTE_SYNC(pl1pd);
+ }
+
+ /*
+ * Release the L2 descriptor table back to the pool cache.
+ */
+#ifndef PMAP_INCLUDE_PTE_SYNC
+ pmap_free_l2_ptp(ptep, l2b->l2b_phys);
+#else
+ pmap_free_l2_ptp(!pmap_is_cached(pm), ptep, l2b->l2b_phys);
+#endif
+
+ /*
+ * Update the reference count in the associated l2_dtable
+ */
+ l2 = pm->pm_l2[L2_IDX(l1idx)];
+ if (--l2->l2_occupancy > 0)
+ return;
+
+ /*
+ * There are no more valid mappings in any of the Level 1
+ * slots managed by this l2_dtable. Go ahead and NULL-out
+ * the pointer in the parent pmap and free the l2_dtable.
+ */
+ pm->pm_l2[L2_IDX(l1idx)] = NULL;
+ pmap_free_l2_dtable(l2);
+}
+
+/*
+ * Pool cache constructors for L2 descriptor tables, metadata and pmap
+ * structures.
+ */
+int
+pmap_l2ptp_ctor(void *arg, void *v, int flags)
+{
+#ifndef PMAP_INCLUDE_PTE_SYNC
+ struct l2_bucket *l2b;
+ pt_entry_t *ptep, pte;
+ vaddr_t va = (vaddr_t)v & ~PGOFSET;
+
+ /*
+ * The mappings for these page tables were initially made using
+ * pmap_kenter_pa() by the pool subsystem. Therefore, the cache-
+ * mode will not be right for page table mappings. To avoid
+ * polluting the pmap_kenter_pa() code with a special case for
+ * page tables, we simply fix up the cache-mode here if it's not
+ * correct.
+ */
+ l2b = pmap_get_l2_bucket(pmap_kernel(), va);
+ KDASSERT(l2b != NULL);
+ ptep = &l2b->l2b_kva[l2pte_index(va)];
+ pte = *ptep;
+
+ if ((pte & L2_S_CACHE_MASK) != pte_l2_s_cache_mode_pt) {
+ /*
+ * Page tables must have the cache-mode set to Write-Thru.
+ */
+ *ptep = (pte & ~L2_S_CACHE_MASK) | pte_l2_s_cache_mode_pt;
+ PTE_SYNC(ptep);
+ cpu_tlb_flushD_SE(va);
+ cpu_cpwait();
+ }
+#endif
+
+ memset(v, 0, L2_TABLE_SIZE_REAL);
+ PTE_SYNC_RANGE(v, L2_TABLE_SIZE_REAL / sizeof(pt_entry_t));
+ return (0);
+}
+
+int
+pmap_l2dtable_ctor(void *arg, void *v, int flags)
+{
+
+ memset(v, 0, sizeof(struct l2_dtable));
+ return (0);
+}
+
+int
+pmap_pmap_ctor(void *arg, void *v, int flags)
+{
+
+ memset(v, 0, sizeof(struct pmap));
+ return (0);
+}
+
+/*
+ * Since we have a virtually indexed cache, we may need to inhibit caching if
+ * there is more than one mapping and at least one of them is writable.
+ * Since we purge the cache on every context switch, we only need to check for
+ * other mappings within the same pmap, or kernel_pmap.
+ * This function is also called when a page is unmapped, to possibly reenable
+ * caching on any remaining mappings.
+ *
+ * The code implements the following logic, where:
+ *
+ * KW = # of kernel read/write pages
+ * KR = # of kernel read only pages
+ * UW = # of user read/write pages
+ * UR = # of user read only pages
+ *
+ * KC = kernel mapping is cacheable
+ * UC = user mapping is cacheable
+ *
+ * KW=0,KR=0 KW=0,KR>0 KW=1,KR=0 KW>1,KR>=0
+ * +---------------------------------------------
+ * UW=0,UR=0 | --- KC=1 KC=1 KC=0
+ * UW=0,UR>0 | UC=1 KC=1,UC=1 KC=0,UC=0 KC=0,UC=0
+ * UW=1,UR=0 | UC=1 KC=0,UC=0 KC=0,UC=0 KC=0,UC=0
+ * UW>1,UR>=0 | UC=0 KC=0,UC=0 KC=0,UC=0 KC=0,UC=0
+ */
+
+static const int pmap_vac_flags[4][4] = {
+ {-1, 0, 0, PVF_KNC},
+ {0, 0, PVF_NC, PVF_NC},
+ {0, PVF_NC, PVF_NC, PVF_NC},
+ {PVF_UNC, PVF_NC, PVF_NC, PVF_NC}
+};
+
+static __inline int
+pmap_get_vac_flags(const struct vm_page *pg)
+{
+ int kidx, uidx;
+
+ kidx = 0;
+ if (pg->mdpage.kro_mappings || pg->mdpage.krw_mappings > 1)
+ kidx |= 1;
+ if (pg->mdpage.krw_mappings)
+ kidx |= 2;
+
+ uidx = 0;
+ if (pg->mdpage.uro_mappings || pg->mdpage.urw_mappings > 1)
+ uidx |= 1;
+ if (pg->mdpage.urw_mappings)
+ uidx |= 2;
+
+ return (pmap_vac_flags[uidx][kidx]);
+}
+
+static __inline void
+pmap_vac_me_harder(struct vm_page *pg, pmap_t pm, vaddr_t va)
+{
+ int nattr;
+
+ nattr = pmap_get_vac_flags(pg);
+
+ if (nattr < 0) {
+ pg->mdpage.pvh_attrs &= ~PVF_NC;
+ return;
+ }
+
+ if (nattr == 0 && (pg->mdpage.pvh_attrs & PVF_NC) == 0)
+ return;
+
+ if (pm == pmap_kernel())
+ pmap_vac_me_kpmap(pg, pm, va);
+ else
+ pmap_vac_me_user(pg, pm, va);
+
+ pg->mdpage.pvh_attrs = (pg->mdpage.pvh_attrs & ~PVF_NC) | nattr;
+}
+
+void
+pmap_vac_me_kpmap(struct vm_page *pg, pmap_t pm, vaddr_t va)
+{
+ u_int u_cacheable, u_entries;
+ struct pv_entry *pv;
+ pmap_t last_pmap = pm;
+
+ /*
+ * Pass one, see if there are both kernel and user pmaps for
+ * this page. Calculate whether there are user-writable or
+ * kernel-writable pages.
+ */
+ u_cacheable = 0;
+ for (pv = pg->mdpage.pvh_list; pv; pv = pv->pv_next) {
+ if (pv->pv_pmap != pm && (pv->pv_flags & PVF_NC) == 0)
+ u_cacheable++;
+ }
+
+ u_entries = pg->mdpage.urw_mappings + pg->mdpage.uro_mappings;
+
+ /*
+ * We know we have just been updating a kernel entry, so if
+ * all user pages are already cacheable, then there is nothing
+ * further to do.
+ */
+ if (pg->mdpage.k_mappings == 0 && u_cacheable == u_entries)
+ return;
+
+ if (u_entries) {
+ /*
+ * Scan over the list again, for each entry, if it
+ * might not be set correctly, call pmap_vac_me_user
+ * to recalculate the settings.
+ */
+ for (pv = pg->mdpage.pvh_list; pv; pv = pv->pv_next) {
+ /*
+ * We know kernel mappings will get set
+ * correctly in other calls. We also know
+ * that if the pmap is the same as last_pmap
+ * then we've just handled this entry.
+ */
+ if (pv->pv_pmap == pm || pv->pv_pmap == last_pmap)
+ continue;
+
+ /*
+ * If there are kernel entries and this page
+ * is writable but non-cacheable, then we can
+ * skip this entry also.
+ */
+ if (pg->mdpage.k_mappings &&
+ (pv->pv_flags & (PVF_NC | PVF_WRITE)) ==
+ (PVF_NC | PVF_WRITE))
+ continue;
+
+ /*
+ * Similarly if there are no kernel-writable
+ * entries and the page is already
+ * read-only/cacheable.
+ */
+ if (pg->mdpage.krw_mappings == 0 &&
+ (pv->pv_flags & (PVF_NC | PVF_WRITE)) == 0)
+ continue;
+
+ /*
+ * For some of the remaining cases, we know
+ * that we must recalculate, but for others we
+ * can't tell if they are correct or not, so
+ * we recalculate anyway.
+ */
+ pmap_vac_me_user(pg, (last_pmap = pv->pv_pmap), 0);
+ }
+
+ if (pg->mdpage.k_mappings == 0)
+ return;
+ }
+
+ pmap_vac_me_user(pg, pm, va);
+}
+
+void
+pmap_vac_me_user(struct vm_page *pg, pmap_t pm, vaddr_t va)
+{
+ pmap_t kpmap = pmap_kernel();
+ struct pv_entry *pv, *npv;
+ struct l2_bucket *l2b;
+ pt_entry_t *ptep, pte;
+ u_int entries = 0;
+ u_int writable = 0;
+ u_int cacheable_entries = 0;
+ u_int kern_cacheable = 0;
+ u_int other_writable = 0;
+
+ /*
+ * Count mappings and writable mappings in this pmap.
+ * Include kernel mappings as part of our own.
+ * Keep a pointer to the first one.
+ */
+ for (pv = npv = pg->mdpage.pvh_list; pv; pv = pv->pv_next) {
+ /* Count mappings in the same pmap */
+ if (pm == pv->pv_pmap || kpmap == pv->pv_pmap) {
+ if (entries++ == 0)
+ npv = pv;
+
+ /* Cacheable mappings */
+ if ((pv->pv_flags & PVF_NC) == 0) {
+ cacheable_entries++;
+ if (kpmap == pv->pv_pmap)
+ kern_cacheable++;
+ }
+
+ /* Writable mappings */
+ if (pv->pv_flags & PVF_WRITE)
+ ++writable;
+ } else
+ if (pv->pv_flags & PVF_WRITE)
+ other_writable = 1;
+ }
+
+ /*
+ * Enable or disable caching as necessary.
+ * Note: the first entry might be part of the kernel pmap,
+ * so we can't assume this is indicative of the state of the
+ * other (maybe non-kpmap) entries.
+ */
+ if ((entries > 1 && writable) ||
+ (entries > 0 && pm == kpmap && other_writable)) {
+ if (cacheable_entries == 0)
+ return;
+
+ for (pv = npv; pv; pv = pv->pv_next) {
+ if ((pm != pv->pv_pmap && kpmap != pv->pv_pmap) ||
+ (pv->pv_flags & PVF_NC))
+ continue;
+
+ pv->pv_flags |= PVF_NC;
+
+ l2b = pmap_get_l2_bucket(pv->pv_pmap, pv->pv_va);
+ ptep = &l2b->l2b_kva[l2pte_index(pv->pv_va)];
+ pte = *ptep & ~L2_S_CACHE_MASK;
+
+ if ((va != pv->pv_va || pm != pv->pv_pmap) &&
+ l2pte_valid(pte)) {
+ if (PV_BEEN_EXECD(pv->pv_flags)) {
+ pmap_idcache_wbinv_range(pv->pv_pmap,
+ pv->pv_va, PAGE_SIZE);
+ pmap_tlb_flushID_SE(pv->pv_pmap,
+ pv->pv_va);
+ } else
+ if (PV_BEEN_REFD(pv->pv_flags)) {
+ pmap_dcache_wb_range(pv->pv_pmap,
+ pv->pv_va, PAGE_SIZE, TRUE,
+ (pv->pv_flags & PVF_WRITE) == 0);
+ pmap_tlb_flushD_SE(pv->pv_pmap,
+ pv->pv_va);
+ }
+ }
+
+ *ptep = pte;
+ PTE_SYNC_CURRENT(pv->pv_pmap, ptep);
+ }
+ cpu_cpwait();
+ } else
+ if (entries > cacheable_entries) {
+ /*
+ * Turn cacheing back on for some pages. If it is a kernel
+ * page, only do so if there are no other writable pages.
+ */
+ for (pv = npv; pv; pv = pv->pv_next) {
+ if (!(pv->pv_flags & PVF_NC) || (pm != pv->pv_pmap &&
+ (kpmap != pv->pv_pmap || other_writable)))
+ continue;
+
+ pv->pv_flags &= ~PVF_NC;
+
+ l2b = pmap_get_l2_bucket(pv->pv_pmap, pv->pv_va);
+ ptep = &l2b->l2b_kva[l2pte_index(pv->pv_va)];
+ pte = (*ptep & ~L2_S_CACHE_MASK) | pte_l2_s_cache_mode;
+
+ if (l2pte_valid(pte)) {
+ if (PV_BEEN_EXECD(pv->pv_flags)) {
+ pmap_tlb_flushID_SE(pv->pv_pmap,
+ pv->pv_va);
+ } else
+ if (PV_BEEN_REFD(pv->pv_flags)) {
+ pmap_tlb_flushD_SE(pv->pv_pmap,
+ pv->pv_va);
+ }
+ }
+
+ *ptep = pte;
+ PTE_SYNC_CURRENT(pv->pv_pmap, ptep);
+ }
+ }
+}
+
+/*
+ * Modify pte bits for all ptes corresponding to the given physical address.
+ * We use `maskbits' rather than `clearbits' because we're always passing
+ * constants and the latter would require an extra inversion at run-time.
+ */
+void
+pmap_clearbit(struct vm_page *pg, u_int maskbits)
+{
+ struct l2_bucket *l2b;
+ struct pv_entry *pv;
+ pt_entry_t *ptep, npte, opte;
+ pmap_t pm;
+ vaddr_t va;
+ u_int oflags;
+
+ NPDEBUG(PDB_BITS,
+ printf("pmap_clearbit: pg %p (0x%08lx) mask 0x%x\n",
+ pg, pg->phys_addr, maskbits));
+
+ PMAP_HEAD_TO_MAP_LOCK();
+ simple_lock(&pg->mdpage.pvh_slock);
+
+ /*
+ * Clear saved attributes (modify, reference)
+ */
+ pg->mdpage.pvh_attrs &= ~(maskbits & (PVF_MOD | PVF_REF));
+
+ if (pg->mdpage.pvh_list == NULL) {
+ simple_unlock(&pg->mdpage.pvh_slock);
+ PMAP_HEAD_TO_MAP_UNLOCK();
+ return;
+ }
+
+ /*
+ * Loop over all current mappings setting/clearing as appropos
+ */
+ for (pv = pg->mdpage.pvh_list; pv; pv = pv->pv_next) {
+ va = pv->pv_va;
+ pm = pv->pv_pmap;
+ oflags = pv->pv_flags;
+ pv->pv_flags &= ~maskbits;
+
+ pmap_acquire_pmap_lock(pm);
+
+ l2b = pmap_get_l2_bucket(pm, va);
+ KDASSERT(l2b != NULL);
+
+ ptep = &l2b->l2b_kva[l2pte_index(va)];
+ npte = opte = *ptep;
+
+ NPDEBUG(PDB_BITS,
+ printf(
+ "pmap_clearbit: pv %p, pm %p, va 0x%08lx, flag 0x%x\n",
+ pv, pv->pv_pmap, pv->pv_va, oflags));
+
+ if (maskbits & (PVF_WRITE|PVF_MOD)) {
+ if ((pv->pv_flags & PVF_NC)) {
+ /*
+ * Entry is not cacheable:
+ *
+ * Don't turn caching on again if this is a
+ * modified emulation. This would be
+ * inconsitent with the settings created by
+ * pmap_vac_me_harder(). Otherwise, it's safe
+ * to re-enable cacheing.
+ *
+ * There's no need to call pmap_vac_me_harder()
+ * here: all pages are losing their write
+ * permission.
+ */
+ if (maskbits & PVF_WRITE) {
+ npte |= pte_l2_s_cache_mode;
+ pv->pv_flags &= ~PVF_NC;
+ }
+ } else
+ if (opte & L2_S_PROT_W) {
+ /*
+ * Entry is writable/cacheable: check if pmap
+ * is current if it is flush it, otherwise it
+ * won't be in the cache
+ */
+ if (PV_BEEN_EXECD(oflags))
+ pmap_idcache_wbinv_range(pm, pv->pv_va,
+ PAGE_SIZE);
+ else
+ if (PV_BEEN_REFD(oflags))
+ pmap_dcache_wb_range(pm, pv->pv_va,
+ PAGE_SIZE,
+ (maskbits & PVF_REF) ? TRUE : FALSE,
+ FALSE);
+ }
+
+ /* make the pte read only */
+ npte &= ~L2_S_PROT_W;
+
+ if (maskbits & PVF_WRITE) {
+ /*
+ * Keep alias accounting up to date
+ */
+ if (pv->pv_pmap == pmap_kernel()) {
+ if (oflags & PVF_WRITE) {
+ pg->mdpage.krw_mappings--;
+ pg->mdpage.kro_mappings++;
+ }
+ } else
+ if (oflags & PVF_WRITE) {
+ pg->mdpage.urw_mappings--;
+ pg->mdpage.uro_mappings++;
+ }
+ }
+ }
+
+ if (maskbits & PVF_REF) {
+ if ((pv->pv_flags & PVF_NC) == 0 &&
+ (maskbits & (PVF_WRITE|PVF_MOD)) == 0) {
+ /*
+ * Check npte here; we may have already
+ * done the wbinv above, and the validity
+ * of the PTE is the same for opte and
+ * npte.
+ */
+ if (npte & L2_S_PROT_W) {
+ if (PV_BEEN_EXECD(oflags))
+ pmap_idcache_wbinv_range(pm,
+ pv->pv_va, PAGE_SIZE);
+ else
+ if (PV_BEEN_REFD(oflags))
+ pmap_dcache_wb_range(pm,
+ pv->pv_va, PAGE_SIZE,
+ TRUE, FALSE);
+ } else
+ if ((npte & L2_TYPE_MASK) != L2_TYPE_INV) {
+ /* XXXJRT need idcache_inv_range */
+ if (PV_BEEN_EXECD(oflags))
+ pmap_idcache_wbinv_range(pm,
+ pv->pv_va, PAGE_SIZE);
+ else
+ if (PV_BEEN_REFD(oflags))
+ pmap_dcache_wb_range(pm,
+ pv->pv_va, PAGE_SIZE,
+ TRUE, TRUE);
+ }
+ }
+
+ /*
+ * Make the PTE invalid so that we will take a
+ * page fault the next time the mapping is
+ * referenced.
+ */
+ npte &= ~L2_TYPE_MASK;
+ npte |= L2_TYPE_INV;
+ }
+
+ if (npte != opte) {
+ *ptep = npte;
+ PTE_SYNC(ptep);
+ /* Flush the TLB entry if a current pmap. */
+ if (PV_BEEN_EXECD(oflags))
+ pmap_tlb_flushID_SE(pm, pv->pv_va);
+ else
+ if (PV_BEEN_REFD(oflags))
+ pmap_tlb_flushD_SE(pm, pv->pv_va);
+ }
+
+ pmap_release_pmap_lock(pm);
+
+ NPDEBUG(PDB_BITS,
+ printf("pmap_clearbit: pm %p va 0x%lx opte 0x%08x npte 0x%08x\n",
+ pm, va, opte, npte));
+ }
+
+ simple_unlock(&pg->mdpage.pvh_slock);
+ PMAP_HEAD_TO_MAP_UNLOCK();
+}
+
+/*
+ * pmap_clean_page()
+ *
+ * This is a local function used to work out the best strategy to clean
+ * a single page referenced by its entry in the PV table. It's used by
+ * pmap_copy_page, pmap_zero page and maybe some others later on.
+ *
+ * Its policy is effectively:
+ * o If there are no mappings, we don't bother doing anything with the cache.
+ * o If there is one mapping, we clean just that page.
+ * o If there are multiple mappings, we clean the entire cache.
+ *
+ * So that some functions can be further optimised, it returns 0 if it didn't
+ * clean the entire cache, or 1 if it did.
+ *
+ * XXX One bug in this routine is that if the pv_entry has a single page
+ * mapped at 0x00000000 a whole cache clean will be performed rather than
+ * just the 1 page. Since this should not occur in everyday use and if it does
+ * it will just result in not the most efficient clean for the page.
+ */
+int
+pmap_clean_page(struct pv_entry *pv, boolean_t is_src)
+{
+ pmap_t pm, pm_to_clean = NULL;
+ struct pv_entry *npv;
+ u_int cache_needs_cleaning = 0;
+ u_int flags = 0;
+ vaddr_t page_to_clean = 0;
+
+ if (pv == NULL) {
+ /* nothing mapped in so nothing to flush */
+ return (0);
+ }
+
+ /*
+ * Since we flush the cache each time we change to a different
+ * user vmspace, we only need to flush the page if it is in the
+ * current pmap.
+ */
+ if (curproc)
+ pm = curproc->p_vmspace->vm_map.pmap;
+ else
+ pm = pmap_kernel();
+
+ for (npv = pv; npv; npv = npv->pv_next) {
+ if (npv->pv_pmap == pmap_kernel() || npv->pv_pmap == pm) {
+ flags |= npv->pv_flags;
+ /*
+ * The page is mapped non-cacheable in
+ * this map. No need to flush the cache.
+ */
+ if (npv->pv_flags & PVF_NC) {
+#ifdef DIAGNOSTIC
+ if (cache_needs_cleaning)
+ panic("pmap_clean_page: "
+ "cache inconsistency");
+#endif
+ break;
+ } else if (is_src && (npv->pv_flags & PVF_WRITE) == 0)
+ continue;
+ if (cache_needs_cleaning) {
+ page_to_clean = 0;
+ break;
+ } else {
+ page_to_clean = npv->pv_va;
+ pm_to_clean = npv->pv_pmap;
+ }
+ cache_needs_cleaning = 1;
+ }
+ }
+
+ if (page_to_clean) {
+ if (PV_BEEN_EXECD(flags))
+ pmap_idcache_wbinv_range(pm_to_clean, page_to_clean,
+ PAGE_SIZE);
+ else
+ pmap_dcache_wb_range(pm_to_clean, page_to_clean,
+ PAGE_SIZE, !is_src, (flags & PVF_WRITE) == 0);
+ } else if (cache_needs_cleaning) {
+ if (PV_BEEN_EXECD(flags))
+ pmap_idcache_wbinv_all(pm);
+ else
+ pmap_dcache_wbinv_all(pm);
+ return (1);
+ }
+ return (0);
+}
+
+/*
+ * Routine: pmap_page_remove
+ * Function:
+ * Removes this physical page from
+ * all physical maps in which it resides.
+ * Reflects back modify bits to the pager.
+ */
+void
+pmap_page_remove(struct vm_page *pg)
+{
+ struct l2_bucket *l2b;
+ struct pv_entry *pv, *npv;
+ pmap_t pm, curpm;
+ pt_entry_t *ptep, pte;
+ boolean_t flush;
+ u_int flags;
+
+ NPDEBUG(PDB_FOLLOW,
+ printf("pmap_page_remove: pg %p (0x%08lx)\n", pg, pg->phys_addr));
+
+ PMAP_HEAD_TO_MAP_LOCK();
+ simple_lock(&pg->mdpage.pvh_slock);
+
+ pv = pg->mdpage.pvh_list;
+ if (pv == NULL) {
+ simple_unlock(&pg->mdpage.pvh_slock);
+ PMAP_HEAD_TO_MAP_UNLOCK();
+ return;
+ }
+
+ /*
+ * Clear alias counts
+ */
+ pg->mdpage.k_mappings = 0;
+ pg->mdpage.urw_mappings = pg->mdpage.uro_mappings = 0;
+
+ flush = FALSE;
+ flags = 0;
+ if (curproc)
+ curpm = curproc->p_vmspace->vm_map.pmap;
+ else
+ curpm = pmap_kernel();
+
+ pmap_clean_page(pv, FALSE);
+
+ while (pv) {
+ pm = pv->pv_pmap;
+ if (flush == FALSE && (pm == curpm || pm == pmap_kernel()))
+ flush = TRUE;
+
+ pmap_acquire_pmap_lock(pm);
+
+ l2b = pmap_get_l2_bucket(pm, pv->pv_va);
+ KDASSERT(l2b != NULL);
+
+ ptep = &l2b->l2b_kva[l2pte_index(pv->pv_va)];
+ pte = *ptep;
+
+ /*
+ * Update statistics
+ */
+ --pm->pm_stats.resident_count;
+
+ /* Wired bit */
+ if (pv->pv_flags & PVF_WIRED)
+ --pm->pm_stats.wired_count;
+
+ flags |= pv->pv_flags;
+
+ /*
+ * Invalidate the PTEs.
+ */
+ *ptep = 0;
+ PTE_SYNC_CURRENT(pm, ptep);
+ pmap_free_l2_bucket(pm, l2b, 1);
+
+ npv = pv->pv_next;
+ pool_put(&pmap_pv_pool, pv);
+ pv = npv;
+ pmap_release_pmap_lock(pm);
+ }
+ pg->mdpage.pvh_list = NULL;
+ simple_unlock(&pg->mdpage.pvh_slock);
+ PMAP_HEAD_TO_MAP_UNLOCK();
+
+ if (flush) {
+ if (PV_BEEN_EXECD(flags))
+ pmap_tlb_flushID(curpm);
+ else
+ pmap_tlb_flushD(curpm);
+ }
+ cpu_cpwait();
+}
+
+/*
+ * pmap_t pmap_create(void)
+ *
+ * Create a new pmap structure from scratch.
+ */
+pmap_t
+pmap_create(void)
+{
+ pmap_t pm;
+
+ pm = pool_cache_get(&pmap_pmap_cache, PR_WAITOK);
+
+ simple_lock_init(&pm->pm_lock);
+ pm->pm_obj.pgops = NULL; /* currently not a mappable object */
+ TAILQ_INIT(&pm->pm_obj.memq);
+ pm->pm_obj.uo_npages = 0;
+ pm->pm_obj.uo_refs = 1;
+ pm->pm_stats.wired_count = 0;
+ pm->pm_stats.resident_count = 1;
+ pm->pm_cstate.cs_all = 0;
+ pmap_alloc_l1(pm);
+
+ /*
+ * Note: The pool cache ensures that the pm_l2[] array is already
+ * initialised to zero.
+ */
+
+ pmap_pinit(pm);
+
+ LIST_INSERT_HEAD(&pmap_pmaps, pm, pm_list);
+
+ return (pm);
+}
+
+/*
+ * void pmap_enter(pmap_t pm, vaddr_t va, paddr_t pa, vm_prot_t prot,
+ * int flags)
+ *
+ * Insert the given physical page (p) at
+ * the specified virtual address (v) in the
+ * target physical map with the protection requested.
+ *
+ * NB: This is the only routine which MAY NOT lazy-evaluate
+ * or lose information. That is, this routine must actually
+ * insert this page into the given map NOW.
+ */
+int
+pmap_enter(pmap_t pm, vaddr_t va, paddr_t pa, vm_prot_t prot, int flags)
+{
+ struct l2_bucket *l2b;
+ struct vm_page *pg, *opg;
+ struct pv_entry *pve;
+ pt_entry_t *ptep, npte, opte;
+ u_int nflags;
+ u_int oflags;
+
+ NPDEBUG(PDB_ENTER, printf("pmap_enter: pm %p va 0x%lx pa 0x%lx prot %x flag %x\n", pm, va, pa, prot, flags));
+
+ KDASSERT((flags & PMAP_WIRED) == 0 || (flags & VM_PROT_ALL) != 0);
+ KDASSERT(((va | pa) & PGOFSET) == 0);
+
+ /*
+ * Get a pointer to the page. Later on in this function, we
+ * test for a managed page by checking pg != NULL.
+ */
+ pg = pmap_initialized ? PHYS_TO_VM_PAGE(pa) : NULL;
+
+ nflags = 0;
+ if (prot & VM_PROT_WRITE)
+ nflags |= PVF_WRITE;
+ if (prot & VM_PROT_EXECUTE)
+ nflags |= PVF_EXEC;
+ if (flags & PMAP_WIRED)
+ nflags |= PVF_WIRED;
+
+ PMAP_MAP_TO_HEAD_LOCK();
+ pmap_acquire_pmap_lock(pm);
+
+ /*
+ * Fetch the L2 bucket which maps this page, allocating one if
+ * necessary for user pmaps.
+ */
+ if (pm == pmap_kernel())
+ l2b = pmap_get_l2_bucket(pm, va);
+ else
+ l2b = pmap_alloc_l2_bucket(pm, va);
+ if (l2b == NULL) {
+ if (flags & PMAP_CANFAIL) {
+ pmap_release_pmap_lock(pm);
+ PMAP_MAP_TO_HEAD_UNLOCK();
+ return (ENOMEM);
+ }
+ panic("pmap_enter: failed to allocate L2 bucket");
+ }
+ ptep = &l2b->l2b_kva[l2pte_index(va)];
+ opte = *ptep;
+ npte = pa;
+ oflags = 0;
+
+ if (opte) {
+ /*
+ * There is already a mapping at this address.
+ * If the physical address is different, lookup the
+ * vm_page.
+ */
+ if (l2pte_pa(opte) != pa)
+ opg = PHYS_TO_VM_PAGE(l2pte_pa(opte));
+ else
+ opg = pg;
+ } else
+ opg = NULL;
+
+ if (pg) {
+ /*
+ * This is to be a managed mapping.
+ */
+ if ((flags & VM_PROT_ALL) ||
+ (pg->mdpage.pvh_attrs & PVF_REF)) {
+ /*
+ * - The access type indicates that we don't need
+ * to do referenced emulation.
+ * OR
+ * - The physical page has already been referenced
+ * so no need to re-do referenced emulation here.
+ */
+ npte |= L2_S_PROTO;
+
+ nflags |= PVF_REF;
+
+ if ((prot & VM_PROT_WRITE) != 0 &&
+ ((flags & VM_PROT_WRITE) != 0 ||
+ (pg->mdpage.pvh_attrs & PVF_MOD) != 0)) {
+ /*
+ * This is a writable mapping, and the
+ * page's mod state indicates it has
+ * already been modified. Make it
+ * writable from the outset.
+ */
+ npte |= L2_S_PROT_W;
+ nflags |= PVF_MOD;
+ }
+ } else {
+ /*
+ * Need to do page referenced emulation.
+ */
+ npte |= L2_TYPE_INV;
+ }
+
+ npte |= pte_l2_s_cache_mode;
+
+ if (pg == opg) {
+ /*
+ * We're changing the attrs of an existing mapping.
+ */
+ simple_lock(&pg->mdpage.pvh_slock);
+ oflags = pmap_modify_pv(pg, pm, va,
+ PVF_WRITE | PVF_EXEC | PVF_WIRED |
+ PVF_MOD | PVF_REF, nflags);
+ simple_unlock(&pg->mdpage.pvh_slock);
+
+ /*
+ * We may need to flush the cache if we're
+ * doing rw-ro...
+ */
+ if (pm->pm_cstate.cs_cache_d &&
+ (oflags & PVF_NC) == 0 &&
+ (opte & L2_S_PROT_W) != 0 &&
+ (prot & VM_PROT_WRITE) == 0)
+ cpu_dcache_wb_range(va, PAGE_SIZE);
+ } else {
+ /*
+ * New mapping, or changing the backing page
+ * of an existing mapping.
+ */
+ if (opg) {
+ /*
+ * Replacing an existing mapping with a new one.
+ * It is part of our managed memory so we
+ * must remove it from the PV list
+ */
+ simple_lock(&opg->mdpage.pvh_slock);
+ pve = pmap_remove_pv(opg, pm, va);
+ pmap_vac_me_harder(opg, pm, 0);
+ simple_unlock(&opg->mdpage.pvh_slock);
+ oflags = pve->pv_flags;
+
+ /*
+ * If the old mapping was valid (ref/mod
+ * emulation creates 'invalid' mappings
+ * initially) then make sure to frob
+ * the cache.
+ */
+ if ((oflags & PVF_NC) == 0 &&
+ l2pte_valid(opte)) {
+ if (PV_BEEN_EXECD(oflags)) {
+ pmap_idcache_wbinv_range(pm, va,
+ PAGE_SIZE);
+ } else
+ if (PV_BEEN_REFD(oflags)) {
+ pmap_dcache_wb_range(pm, va,
+ PAGE_SIZE, TRUE,
+ (oflags & PVF_WRITE) == 0);
+ }
+ }
+ } else
+ if ((pve = pool_get(&pmap_pv_pool, PR_NOWAIT)) == NULL){
+ if ((flags & PMAP_CANFAIL) == 0)
+ panic("pmap_enter: no pv entries");
+
+ if (pm != pmap_kernel())
+ pmap_free_l2_bucket(pm, l2b, 0);
+ pmap_release_pmap_lock(pm);
+ PMAP_MAP_TO_HEAD_UNLOCK();
+ NPDEBUG(PDB_ENTER,
+ printf("pmap_enter: ENOMEM\n"));
+ return (ENOMEM);
+ }
+
+ pmap_enter_pv(pg, pve, pm, va, nflags);
+ }
+ } else {
+ /*
+ * We're mapping an unmanaged page.
+ * These are always readable, and possibly writable, from
+ * the get go as we don't need to track ref/mod status.
+ */
+ npte |= L2_S_PROTO;
+ if (prot & VM_PROT_WRITE)
+ npte |= L2_S_PROT_W;
+
+ /*
+ * Make sure the vector table is mapped cacheable
+ */
+ if (pm != pmap_kernel() && va == vector_page)
+ npte |= pte_l2_s_cache_mode;
+
+ if (opg) {
+ /*
+ * Looks like there's an existing 'managed' mapping
+ * at this address.
+ */
+ simple_lock(&opg->mdpage.pvh_slock);
+ pve = pmap_remove_pv(opg, pm, va);
+ pmap_vac_me_harder(opg, pm, 0);
+ simple_unlock(&opg->mdpage.pvh_slock);
+ oflags = pve->pv_flags;
+
+ if ((oflags & PVF_NC) == 0 && l2pte_valid(opte)) {
+ if (PV_BEEN_EXECD(oflags))
+ pmap_idcache_wbinv_range(pm, va,
+ PAGE_SIZE);
+ else
+ if (PV_BEEN_REFD(oflags))
+ pmap_dcache_wb_range(pm, va, PAGE_SIZE,
+ TRUE, (oflags & PVF_WRITE) == 0);
+ }
+ pool_put(&pmap_pv_pool, pve);
+ }
+ }
+
+ /*
+ * Make sure userland mappings get the right permissions
+ */
+ if (pm != pmap_kernel() && va != vector_page)
+ npte |= L2_S_PROT_U;
+
+ /*
+ * Keep the stats up to date
+ */
+ if (opte == 0) {
+ l2b->l2b_occupancy++;
+ pm->pm_stats.resident_count++;
+ }
+
+ NPDEBUG(PDB_ENTER,
+ printf("pmap_enter: opte 0x%08x npte 0x%08x\n", opte, npte));
+
+ /*
+ * If this is just a wiring change, the two PTEs will be
+ * identical, so there's no need to update the page table.
+ */
+ if (npte != opte) {
+ boolean_t is_cached = pmap_is_cached(pm);
+
+ *ptep = npte;
+ if (is_cached) {
+ /*
+ * We only need to frob the cache/tlb if this pmap
+ * is current
+ */
+ PTE_SYNC(ptep);
+ if (va != vector_page && l2pte_valid(npte)) {
+ /*
+ * This mapping is likely to be accessed as
+ * soon as we return to userland. Fix up the
+ * L1 entry to avoid taking another
+ * page/domain fault.
+ */
+ pd_entry_t *pl1pd, l1pd;
+
+ pl1pd = &pm->pm_l1->l1_kva[L1_IDX(va)];
+ l1pd = l2b->l2b_phys | L1_C_DOM(pm->pm_domain) |
+ L1_C_PROTO;
+ if (*pl1pd != l1pd) {
+ *pl1pd = l1pd;
+ PTE_SYNC(pl1pd);
+ }
+ }
+ }
+
+ if (PV_BEEN_EXECD(oflags))
+ pmap_tlb_flushID_SE(pm, va);
+ else
+ if (PV_BEEN_REFD(oflags))
+ pmap_tlb_flushD_SE(pm, va);
+
+ NPDEBUG(PDB_ENTER,
+ printf("pmap_enter: is_cached %d cs 0x%08x\n",
+ is_cached, pm->pm_cstate.cs_all));
+
+ if (pg != NULL) {
+ simple_lock(&pg->mdpage.pvh_slock);
+ pmap_vac_me_harder(pg, pm, va);
+ simple_unlock(&pg->mdpage.pvh_slock);
+ }
+ }
+
+ pmap_release_pmap_lock(pm);
+ PMAP_MAP_TO_HEAD_UNLOCK();
+
+ return (0);
+}
+
+/*
+ * pmap_remove()
+ *
+ * pmap_remove is responsible for nuking a number of mappings for a range
+ * of virtual address space in the current pmap. To do this efficiently
+ * is interesting, because in a number of cases a wide virtual address
+ * range may be supplied that contains few actual mappings. So, the
+ * optimisations are:
+ * 1. Skip over hunks of address space for which no L1 or L2 entry exists.
+ * 2. Build up a list of pages we've hit, up to a maximum, so we can
+ * maybe do just a partial cache clean. This path of execution is
+ * complicated by the fact that the cache must be flushed _before_
+ * the PTE is nuked, being a VAC :-)
+ * 3. If we're called after UVM calls pmap_remove_all(), we can defer
+ * all invalidations until pmap_update(), since pmap_remove_all() has
+ * already flushed the cache.
+ * 4. Maybe later fast-case a single page, but I don't think this is
+ * going to make _that_ much difference overall.
+ */
+
+#define PMAP_REMOVE_CLEAN_LIST_SIZE 3
+
+void
+pmap_remove(pmap_t pm, vaddr_t sva, vaddr_t eva)
+{
+ struct l2_bucket *l2b;
+ vaddr_t next_bucket;
+ pt_entry_t *ptep;
+ u_int cleanlist_idx, total, cnt;
+ struct {
+ vaddr_t va;
+ pt_entry_t *pte;
+ } cleanlist[PMAP_REMOVE_CLEAN_LIST_SIZE];
+ u_int mappings, is_exec, is_refd;
+
+ NPDEBUG(PDB_REMOVE, printf("pmap_remove: pmap=%p sva=%08lx eva=%08lx\n",
+ pm, sva, eva));
+
+ /*
+ * we lock in the pmap => pv_head direction
+ */
+ PMAP_MAP_TO_HEAD_LOCK();
+ pmap_acquire_pmap_lock(pm);
+
+ if (pm->pm_remove_all || !pmap_is_cached(pm)) {
+ cleanlist_idx = PMAP_REMOVE_CLEAN_LIST_SIZE + 1;
+ if (pm->pm_cstate.cs_tlb == 0)
+ pm->pm_remove_all = TRUE;
+ } else
+ cleanlist_idx = 0;
+
+ total = 0;
+
+ while (sva < eva) {
+ /*
+ * Do one L2 bucket's worth at a time.
+ */
+ next_bucket = L2_NEXT_BUCKET(sva);
+ if (next_bucket > eva)
+ next_bucket = eva;
+
+ l2b = pmap_get_l2_bucket(pm, sva);
+ if (l2b == NULL) {
+ sva = next_bucket;
+ continue;
+ }
+
+ ptep = &l2b->l2b_kva[l2pte_index(sva)];
+ mappings = 0;
+
+ while (sva < next_bucket) {
+ struct vm_page *pg;
+ pt_entry_t pte;
+ paddr_t pa;
+
+ pte = *ptep;
+
+ if (pte == 0) {
+ /*
+ * Nothing here, move along
+ */
+ sva += PAGE_SIZE;
+ ptep++;
+ continue;
+ }
+
+ pm->pm_stats.resident_count--;
+ pa = l2pte_pa(pte);
+ is_exec = 0;
+ is_refd = 1;
+
+ /*
+ * Update flags. In a number of circumstances,
+ * we could cluster a lot of these and do a
+ * number of sequential pages in one go.
+ */
+ if ((pg = PHYS_TO_VM_PAGE(pa)) != NULL) {
+ struct pv_entry *pve;
+ simple_lock(&pg->mdpage.pvh_slock);
+ pve = pmap_remove_pv(pg, pm, sva);
+ pmap_vac_me_harder(pg, pm, 0);
+ simple_unlock(&pg->mdpage.pvh_slock);
+ if (pve != NULL) {
+ if (pm->pm_remove_all == FALSE) {
+ is_exec =
+ PV_BEEN_EXECD(pve->pv_flags);
+ is_refd =
+ PV_BEEN_REFD(pve->pv_flags);
+ }
+ pool_put(&pmap_pv_pool, pve);
+ }
+ }
+
+ if (!l2pte_valid(pte)) {
+ *ptep = 0;
+ PTE_SYNC_CURRENT(pm, ptep);
+ sva += PAGE_SIZE;
+ ptep++;
+ mappings++;
+ continue;
+ }
+
+ if (cleanlist_idx < PMAP_REMOVE_CLEAN_LIST_SIZE) {
+ /* Add to the clean list. */
+ cleanlist[cleanlist_idx].pte = ptep;
+ cleanlist[cleanlist_idx].va =
+ sva | (is_exec & 1);
+ cleanlist_idx++;
+ } else
+ if (cleanlist_idx == PMAP_REMOVE_CLEAN_LIST_SIZE) {
+ /* Nuke everything if needed. */
+ pmap_idcache_wbinv_all(pm);
+ pmap_tlb_flushID(pm);
+
+ /*
+ * Roll back the previous PTE list,
+ * and zero out the current PTE.
+ */
+ for (cnt = 0;
+ cnt < PMAP_REMOVE_CLEAN_LIST_SIZE; cnt++) {
+ *cleanlist[cnt].pte = 0;
+ }
+ *ptep = 0;
+ PTE_SYNC(ptep);
+ cleanlist_idx++;
+ pm->pm_remove_all = TRUE;
+ } else {
+ *ptep = 0;
+ PTE_SYNC(ptep);
+ if (pm->pm_remove_all == FALSE) {
+ if (is_exec)
+ pmap_tlb_flushID_SE(pm, sva);
+ else
+ if (is_refd)
+ pmap_tlb_flushD_SE(pm, sva);
+ }
+ }
+
+ sva += PAGE_SIZE;
+ ptep++;
+ mappings++;
+ }
+
+ /*
+ * Deal with any left overs
+ */
+ if (cleanlist_idx <= PMAP_REMOVE_CLEAN_LIST_SIZE) {
+ total += cleanlist_idx;
+ for (cnt = 0; cnt < cleanlist_idx; cnt++) {
+ if (pm->pm_cstate.cs_all != 0) {
+ vaddr_t clva = cleanlist[cnt].va & ~1;
+ if (cleanlist[cnt].va & 1) {
+ pmap_idcache_wbinv_range(pm,
+ clva, PAGE_SIZE);
+ pmap_tlb_flushID_SE(pm, clva);
+ } else {
+ pmap_dcache_wb_range(pm,
+ clva, PAGE_SIZE, TRUE,
+ FALSE);
+ pmap_tlb_flushD_SE(pm, clva);
+ }
+ }
+ *cleanlist[cnt].pte = 0;
+ PTE_SYNC_CURRENT(pm, cleanlist[cnt].pte);
+ }
+
+ /*
+ * If it looks like we're removing a whole bunch
+ * of mappings, it's faster to just write-back
+ * the whole cache now and defer TLB flushes until
+ * pmap_update() is called.
+ */
+ if (total <= PMAP_REMOVE_CLEAN_LIST_SIZE)
+ cleanlist_idx = 0;
+ else {
+ cleanlist_idx = PMAP_REMOVE_CLEAN_LIST_SIZE + 1;
+ pmap_idcache_wbinv_all(pm);
+ pm->pm_remove_all = TRUE;
+ }
+ }
+
+ pmap_free_l2_bucket(pm, l2b, mappings);
+ }
+
+ pmap_release_pmap_lock(pm);
+ PMAP_MAP_TO_HEAD_UNLOCK();
+}
+
+/*
+ * pmap_kenter_pa: enter an unmanaged, wired kernel mapping
+ *
+ * We assume there is already sufficient KVM space available
+ * to do this, as we can't allocate L2 descriptor tables/metadata
+ * from here.
+ */
+void
+pmap_kenter_pa(vaddr_t va, paddr_t pa, vm_prot_t prot)
+{
+ struct l2_bucket *l2b;
+ pt_entry_t *ptep, opte;
+
+ NPDEBUG(PDB_KENTER,
+ printf("pmap_kenter_pa: va 0x%08lx, pa 0x%08lx, prot 0x%x\n",
+ va, pa, prot));
+
+ l2b = pmap_get_l2_bucket(pmap_kernel(), va);
+ KDASSERT(l2b != NULL);
+
+ ptep = &l2b->l2b_kva[l2pte_index(va)];
+ opte = *ptep;
+
+ if (l2pte_valid(opte)) {
+ cpu_dcache_wbinv_range(va, PAGE_SIZE);
+ cpu_tlb_flushD_SE(va);
+ cpu_cpwait();
+ } else
+ if (opte == 0)
+ l2b->l2b_occupancy++;
+
+ *ptep = L2_S_PROTO | pa | L2_S_PROT(PTE_KERNEL, prot) |
+ pte_l2_s_cache_mode;
+ PTE_SYNC(ptep);
+}
+
+void
+pmap_kremove(vaddr_t va, vsize_t len)
+{
+ struct l2_bucket *l2b;
+ pt_entry_t *ptep, *sptep, opte;
+ vaddr_t next_bucket, eva;
+ u_int mappings;
+
+ NPDEBUG(PDB_KREMOVE, printf("pmap_kremove: va 0x%08lx, len 0x%08lx\n",
+ va, len));
+
+ eva = va + len;
+
+ while (va < eva) {
+ next_bucket = L2_NEXT_BUCKET(va);
+ if (next_bucket > eva)
+ next_bucket = eva;
+
+ l2b = pmap_get_l2_bucket(pmap_kernel(), va);
+ KDASSERT(l2b != NULL);
+
+ sptep = ptep = &l2b->l2b_kva[l2pte_index(va)];
+ mappings = 0;
+
+ while (va < next_bucket) {
+ opte = *ptep;
+ if (l2pte_valid(opte)) {
+ cpu_dcache_wbinv_range(va, PAGE_SIZE);
+ cpu_tlb_flushD_SE(va);
+ }
+ if (opte) {
+ *ptep = 0;
+ mappings++;
+ }
+ va += PAGE_SIZE;
+ ptep++;
+ }
+ KDASSERT(mappings <= l2b->l2b_occupancy);
+ l2b->l2b_occupancy -= mappings;
+ PTE_SYNC_RANGE(sptep, (u_int)(ptep - sptep));
+ }
+ cpu_cpwait();
+}
+
+boolean_t
+pmap_extract(pmap_t pm, vaddr_t va, paddr_t *pap)
+{
+ struct l2_dtable *l2;
+ pd_entry_t *pl1pd, l1pd;
+ pt_entry_t *ptep, pte;
+ paddr_t pa;
+ u_int l1idx;
+
+ pmap_acquire_pmap_lock(pm);
+
+ l1idx = L1_IDX(va);
+ pl1pd = &pm->pm_l1->l1_kva[l1idx];
+ l1pd = *pl1pd;
+
+ if (l1pte_section_p(l1pd)) {
+ /*
+ * These should only happen for pmap_kernel()
+ */
+ KDASSERT(pm == pmap_kernel());
+ pmap_release_pmap_lock(pm);
+ pa = (l1pd & L1_S_FRAME) | (va & L1_S_OFFSET);
+ } else {
+ /*
+ * Note that we can't rely on the validity of the L1
+ * descriptor as an indication that a mapping exists.
+ * We have to look it up in the L2 dtable.
+ */
+ l2 = pm->pm_l2[L2_IDX(l1idx)];
+
+ if (l2 == NULL ||
+ (ptep = l2->l2_bucket[L2_BUCKET(l1idx)].l2b_kva) == NULL) {
+ pmap_release_pmap_lock(pm);
+ return (FALSE);
+ }
+
+ ptep = &ptep[l2pte_index(va)];
+ pte = *ptep;
+ pmap_release_pmap_lock(pm);
+
+ if (pte == 0)
+ return (FALSE);
+
+ switch (pte & L2_TYPE_MASK) {
+ case L2_TYPE_L:
+ pa = (pte & L2_L_FRAME) | (va & L2_L_OFFSET);
+ break;
+
+ default:
+ pa = (pte & L2_S_FRAME) | (va & L2_S_OFFSET);
+ break;
+ }
+ }
+
+ if (pap != NULL)
+ *pap = pa;
+
+ return (TRUE);
+}
+
+void
+pmap_protect(pmap_t pm, vaddr_t sva, vaddr_t eva, vm_prot_t prot)
+{
+ struct l2_bucket *l2b;
+ pt_entry_t *ptep, pte;
+ vaddr_t next_bucket;
+ u_int flags;
+ int flush;
+
+ NPDEBUG(PDB_PROTECT,
+ printf("pmap_protect: pm %p sva 0x%lx eva 0x%lx prot 0x%x\n",
+ pm, sva, eva, prot));
+
+ if ((prot & VM_PROT_READ) == 0) {
+ pmap_remove(pm, sva, eva);
+ return;
+ }
+
+ if (prot & VM_PROT_WRITE) {
+ /*
+ * If this is a read->write transition, just ignore it and let
+ * uvm_fault() take care of it later.
+ */
+ return;
+ }
+
+ PMAP_MAP_TO_HEAD_LOCK();
+ pmap_acquire_pmap_lock(pm);
+
+ /*
+ * OK, at this point, we know we're doing write-protect operation.
+ * If the pmap is active, write-back the range.
+ */
+ pmap_dcache_wb_range(pm, sva, eva - sva, FALSE, FALSE);
+
+ flush = ((eva - sva) >= (PAGE_SIZE * 4)) ? 0 : -1;
+ flags = 0;
+
+ while (sva < eva) {
+ next_bucket = L2_NEXT_BUCKET(sva);
+ if (next_bucket > eva)
+ next_bucket = eva;
+
+ l2b = pmap_get_l2_bucket(pm, sva);
+ if (l2b == NULL) {
+ sva = next_bucket;
+ continue;
+ }
+
+ ptep = &l2b->l2b_kva[l2pte_index(sva)];
+
+ while (sva < next_bucket) {
+ if ((pte = *ptep) != 0 && (pte & L2_S_PROT_W) != 0) {
+ struct vm_page *pg;
+ u_int f;
+
+ pg = PHYS_TO_VM_PAGE(l2pte_pa(pte));
+ pte &= ~L2_S_PROT_W;
+ *ptep = pte;
+ PTE_SYNC(ptep);
+
+ if (pg != NULL) {
+ simple_lock(&pg->mdpage.pvh_slock);
+ f = pmap_modify_pv(pg, pm, sva,
+ PVF_WRITE, 0);
+ pmap_vac_me_harder(pg, pm, sva);
+ simple_unlock(&pg->mdpage.pvh_slock);
+ } else
+ f = PVF_REF | PVF_EXEC;
+
+ if (flush >= 0) {
+ flush++;
+ flags |= f;
+ } else
+ if (PV_BEEN_EXECD(f))
+ pmap_tlb_flushID_SE(pm, sva);
+ else
+ if (PV_BEEN_REFD(f))
+ pmap_tlb_flushD_SE(pm, sva);
+ }
+
+ sva += PAGE_SIZE;
+ ptep++;
+ }
+ }
+
+ pmap_release_pmap_lock(pm);
+ PMAP_MAP_TO_HEAD_UNLOCK();
+
+ if (flush) {
+ if (PV_BEEN_EXECD(flags))
+ pmap_tlb_flushID(pm);
+ else
+ if (PV_BEEN_REFD(flags))
+ pmap_tlb_flushD(pm);
+ }
+}
+
+void
+pmap_page_protect(struct vm_page *pg, vm_prot_t prot)
+{
+
+ NPDEBUG(PDB_PROTECT,
+ printf("pmap_page_protect: pg %p (0x%08lx), prot 0x%x\n",
+ pg, pg->phys_addr, prot));
+
+ switch(prot) {
+ case VM_PROT_READ|VM_PROT_WRITE|VM_PROT_EXECUTE:
+ case VM_PROT_READ|VM_PROT_WRITE:
+ return;
+
+ case VM_PROT_READ:
+ case VM_PROT_READ|VM_PROT_EXECUTE:
+ pmap_clearbit(pg, PVF_WRITE);
+ break;
+
+ default:
+ pmap_page_remove(pg);
+ break;
+ }
+}
+
+/*
+ * pmap_clear_modify:
+ *
+ * Clear the "modified" attribute for a page.
+ */
+boolean_t
+pmap_clear_modify(struct vm_page *pg)
+{
+ boolean_t rv;
+
+ if (pg->mdpage.pvh_attrs & PVF_MOD) {
+ rv = TRUE;
+ pmap_clearbit(pg, PVF_MOD);
+ } else
+ rv = FALSE;
+
+ return (rv);
+}
+
+/*
+ * pmap_clear_reference:
+ *
+ * Clear the "referenced" attribute for a page.
+ */
+boolean_t
+pmap_clear_reference(struct vm_page *pg)
+{
+ boolean_t rv;
+
+ if (pg->mdpage.pvh_attrs & PVF_REF) {
+ rv = TRUE;
+ pmap_clearbit(pg, PVF_REF);
+ } else
+ rv = FALSE;
+
+ return (rv);
+}
+
+/*
+ * pmap_is_modified:
+ *
+ * Test if a page has the "modified" attribute.
+ */
+/* See <arm/pmap.h> */
+
+/*
+ * pmap_is_referenced:
+ *
+ * Test if a page has the "referenced" attribute.
+ */
+/* See <arm/pmap.h> */
+
+int
+pmap_fault_fixup(pmap_t pm, vaddr_t va, vm_prot_t ftype, int user)
+{
+ struct l2_dtable *l2;
+ struct l2_bucket *l2b;
+ pd_entry_t *pl1pd, l1pd;
+ pt_entry_t *ptep, pte;
+ paddr_t pa;
+ u_int l1idx;
+ int rv = 0;
+
+ PMAP_MAP_TO_HEAD_LOCK();
+ pmap_acquire_pmap_lock(pm);
+
+ l1idx = L1_IDX(va);
+
+ /*
+ * If there is no l2_dtable for this address, then the process
+ * has no business accessing it.
+ *
+ * Note: This will catch userland processes trying to access
+ * kernel addresses.
+ */
+ l2 = pm->pm_l2[L2_IDX(l1idx)];
+ if (l2 == NULL)
+ goto out;
+
+ /*
+ * Likewise if there is no L2 descriptor table
+ */
+ l2b = &l2->l2_bucket[L2_BUCKET(l1idx)];
+ if (l2b->l2b_kva == NULL)
+ goto out;
+
+ /*
+ * Check the PTE itself.
+ */
+ ptep = &l2b->l2b_kva[l2pte_index(va)];
+ pte = *ptep;
+ if (pte == 0)
+ goto out;
+
+ /*
+ * Catch a userland access to the vector page mapped at 0x0
+ */
+ if (user && (pte & L2_S_PROT_U) == 0)
+ goto out;
+
+ pa = l2pte_pa(pte);
+
+ if ((ftype & VM_PROT_WRITE) && (pte & L2_S_PROT_W) == 0) {
+ /*
+ * This looks like a good candidate for "page modified"
+ * emulation...
+ */
+ struct pv_entry *pv;
+ struct vm_page *pg;
+
+ /* Extract the physical address of the page */
+ if ((pg = PHYS_TO_VM_PAGE(pa)) == NULL)
+ goto out;
+
+ /* Get the current flags for this page. */
+ simple_lock(&pg->mdpage.pvh_slock);
+
+ pv = pmap_find_pv(pg, pm, va);
+ if (pv == NULL) {
+ simple_unlock(&pg->mdpage.pvh_slock);
+ goto out;
+ }
+
+ /*
+ * Do the flags say this page is writable? If not then it
+ * is a genuine write fault. If yes then the write fault is
+ * our fault as we did not reflect the write access in the
+ * PTE. Now we know a write has occurred we can correct this
+ * and also set the modified bit
+ */
+ if ((pv->pv_flags & PVF_WRITE) == 0) {
+ simple_unlock(&pg->mdpage.pvh_slock);
+ goto out;
+ }
+
+ NPDEBUG(PDB_FOLLOW,
+ printf("pmap_fault_fixup: mod emul. pm %p, va 0x%08lx, pa 0x%08lx\n",
+ pm, va, pg->phys_addr));
+
+ pg->mdpage.pvh_attrs |= PVF_REF | PVF_MOD;
+ pv->pv_flags |= PVF_REF | PVF_MOD;
+ simple_unlock(&pg->mdpage.pvh_slock);
+
+ /*
+ * Re-enable write permissions for the page. No need to call
+ * pmap_vac_me_harder(), since this is just a
+ * modified-emulation fault, and the PVF_WRITE bit isn't
+ * changing. We've already set the cacheable bits based on
+ * the assumption that we can write to this page.
+ */
+ *ptep = (pte & ~L2_TYPE_MASK) | L2_S_PROTO | L2_S_PROT_W;
+ PTE_SYNC(ptep);
+ rv = 1;
+ } else
+ if ((pte & L2_TYPE_MASK) == L2_TYPE_INV) {
+ /*
+ * This looks like a good candidate for "page referenced"
+ * emulation.
+ */
+ struct pv_entry *pv;
+ struct vm_page *pg;
+
+ /* Extract the physical address of the page */
+ if ((pg = PHYS_TO_VM_PAGE(pa)) == NULL)
+ goto out;
+
+ /* Get the current flags for this page. */
+ simple_lock(&pg->mdpage.pvh_slock);
+
+ pv = pmap_find_pv(pg, pm, va);
+ if (pv == NULL) {
+ simple_unlock(&pg->mdpage.pvh_slock);
+ goto out;
+ }
+
+ pg->mdpage.pvh_attrs |= PVF_REF;
+ pv->pv_flags |= PVF_REF;
+ simple_unlock(&pg->mdpage.pvh_slock);
+
+ NPDEBUG(PDB_FOLLOW,
+ printf("pmap_fault_fixup: ref emul. pm %p, va 0x%08lx, pa 0x%08lx\n",
+ pm, va, pg->phys_addr));
+
+ *ptep = (pte & ~L2_TYPE_MASK) | L2_S_PROTO;
+ PTE_SYNC(ptep);
+ rv = 1;
+ }
+
+ /*
+ * We know there is a valid mapping here, so simply
+ * fix up the L1 if necessary.
+ */
+ pl1pd = &pm->pm_l1->l1_kva[l1idx];
+ l1pd = l2b->l2b_phys | L1_C_DOM(pm->pm_domain) | L1_C_PROTO;
+ if (*pl1pd != l1pd) {
+ *pl1pd = l1pd;
+ PTE_SYNC(pl1pd);
+ rv = 1;
+ }
+
+#ifdef CPU_SA110
+ /*
+ * There are bugs in the rev K SA110. This is a check for one
+ * of them.
+ */
+ if (rv == 0 && curcpu()->ci_arm_cputype == CPU_ID_SA110 &&
+ curcpu()->ci_arm_cpurev < 3) {
+ /* Always current pmap */
+ if (l2pte_valid(pte)) {
+ extern int kernel_debug;
+ if (kernel_debug & 1) {
+ struct proc *p = curproc;
+ printf("prefetch_abort: page is already "
+ "mapped - pte=%p *pte=%08x\n", ptep, pte);
+ printf("prefetch_abort: pc=%08lx proc=%p "
+ "process=%s\n", va, p, p->p_comm);
+ printf("prefetch_abort: far=%08x fs=%x\n",
+ cpu_faultaddress(), cpu_faultstatus());
+ }
+#ifdef DDB
+ if (kernel_debug & 2)
+ Debugger();
+#endif
+ rv = 1;
+ }
+ }
+#endif /* CPU_SA110 */
+
+#ifdef DEBUG
+ /*
+ * If 'rv == 0' at this point, it generally indicates that there is a
+ * stale TLB entry for the faulting address. This happens when two or
+ * more processes are sharing an L1. Since we don't flush the TLB on
+ * a context switch between such processes, we can take domain faults
+ * for mappings which exist at the same VA in both processes. EVEN IF
+ * WE'VE RECENTLY FIXED UP THE CORRESPONDING L1 in pmap_enter(), for
+ * example.
+ *
+ * This is extremely likely to happen if pmap_enter() updated the L1
+ * entry for a recently entered mapping. In this case, the TLB is
+ * flushed for the new mapping, but there may still be TLB entries for
+ * other mappings belonging to other processes in the 1MB range
+ * covered by the L1 entry.
+ *
+ * Since 'rv == 0', we know that the L1 already contains the correct
+ * value, so the fault must be due to a stale TLB entry.
+ *
+ * Since we always need to flush the TLB anyway in the case where we
+ * fixed up the L1, or frobbed the L2 PTE, we effectively deal with
+ * stale TLB entries dynamically.
+ *
+ * However, the above condition can ONLY happen if the current L1 is
+ * being shared. If it happens when the L1 is unshared, it indicates
+ * that other parts of the pmap are not doing their job WRT managing
+ * the TLB.
+ */
+ if (rv == 0 && pm->pm_l1->l1_domain_use_count == 1) {
+ extern int last_fault_code;
+ printf("fixup: pm %p, va 0x%lx, ftype %d - nothing to do!\n",
+ pm, va, ftype);
+ printf("fixup: l2 %p, l2b %p, ptep %p, pl1pd %p\n",
+ l2, l2b, ptep, pl1pd);
+ printf("fixup: pte 0x%x, l1pd 0x%x, last code 0x%x\n",
+ pte, l1pd, last_fault_code);
+#ifdef DDB
+ Debugger();
+#endif
+ }
+#endif
+
+ cpu_tlb_flushID_SE(va);
+ cpu_cpwait();
+
+ rv = 1;
+
+out:
+ pmap_release_pmap_lock(pm);
+ PMAP_MAP_TO_HEAD_UNLOCK();
+
+ return (rv);
+}
+
+/*
+ * pmap_collect: free resources held by a pmap
+ *
+ * => optional function.
+ * => called when a process is swapped out to free memory.
+ */
+void
+pmap_collect(pmap_t pm)
+{
+ /*
+ * Nothing to do.
+ * We don't even need to free-up the process' L1.
+ */
+}
+
+/*
+ * Routine: pmap_procwr
+ *
+ * Function:
+ * Synchronize caches corresponding to [addr, addr+len) in p.
+ *
+ */
+void
+pmap_procwr(struct proc *p, vaddr_t va, int len)
+{
+ /* We only need to do anything if it is the current process. */
+ if (p == curproc)
+ cpu_icache_sync_range(va, len);
+}
+
+/*
+ * Routine: pmap_unwire
+ * Function: Clear the wired attribute for a map/virtual-address pair.
+ *
+ * In/out conditions:
+ * The mapping must already exist in the pmap.
+ */
+void
+pmap_unwire(pmap_t pm, vaddr_t va)
+{
+ struct l2_bucket *l2b;
+ pt_entry_t *ptep, pte;
+ struct vm_page *pg;
+ paddr_t pa;
+
+ NPDEBUG(PDB_WIRING, printf("pmap_unwire: pm %p, va 0x%08lx\n", pm, va));
+
+ PMAP_MAP_TO_HEAD_LOCK();
+ pmap_acquire_pmap_lock(pm);
+
+ l2b = pmap_get_l2_bucket(pm, va);
+ KDASSERT(l2b != NULL);
+
+ ptep = &l2b->l2b_kva[l2pte_index(va)];
+ pte = *ptep;
+
+ /* Extract the physical address of the page */
+ pa = l2pte_pa(pte);
+
+ if ((pg = PHYS_TO_VM_PAGE(pa)) != NULL) {
+ /* Update the wired bit in the pv entry for this page. */
+ simple_lock(&pg->mdpage.pvh_slock);
+ (void) pmap_modify_pv(pg, pm, va, PVF_WIRED, 0);
+ simple_unlock(&pg->mdpage.pvh_slock);
+ }
+
+ pmap_release_pmap_lock(pm);
+ PMAP_MAP_TO_HEAD_UNLOCK();
+}
+
+void
+pmap_activate(struct proc *p)
+{
+ pmap_t pm;
+ struct pcb *pcb;
+ int s;
+
+ pm = p->p_vmspace->vm_map.pmap;
+ pcb = &p->p_addr->u_pcb;
+
+ pmap_set_pcb_pagedir(pm, pcb);
+
+ if (p == curproc) {
+ u_int cur_dacr, cur_ttb;
+
+ __asm __volatile("mrc p15, 0, %0, c2, c0, 0" : "=r"(cur_ttb));
+ __asm __volatile("mrc p15, 0, %0, c3, c0, 0" : "=r"(cur_dacr));
+
+ cur_ttb &= ~(L1_TABLE_SIZE - 1);
+
+ if (cur_ttb == (u_int)pcb->pcb_pagedir &&
+ cur_dacr == pcb->pcb_dacr) {
+ /*
+ * No need to switch address spaces.
+ */
+ return;
+ }
+
+ s = splhigh();
+ pmap_acquire_pmap_lock(pm);
+ disable_interrupts(I32_bit | F32_bit);
+
+ /*
+ * We MUST, I repeat, MUST fix up the L1 entry corresponding
+ * to 'vector_page' in the incoming L1 table before switching
+ * to it otherwise subsequent interrupts/exceptions (including
+ * domain faults!) will jump into hyperspace.
+ */
+ if (pcb->pcb_pl1vec) {
+ *pcb->pcb_pl1vec = pcb->pcb_l1vec;
+ /*
+ * Don't need to PTE_SYNC() at this point since
+ * cpu_setttb() is about to flush both the cache
+ * and the TLB.
+ */
+ }
+
+ cpu_domains(pcb->pcb_dacr);
+ cpu_setttb(pcb->pcb_pagedir);
+
+ enable_interrupts(I32_bit | F32_bit);
+
+ /*
+ * Flag any previous userland pmap as being NOT
+ * resident in the cache/tlb.
+ */
+ if (pmap_cache_state && pmap_cache_state != &pm->pm_cstate)
+ pmap_cache_state->cs_all = 0;
+
+ /*
+ * The new pmap, however, IS resident.
+ */
+ pmap_cache_state = &pm->pm_cstate;
+ pm->pm_cstate.cs_all = PMAP_CACHE_STATE_ALL;
+ pmap_release_pmap_lock(pm);
+ splx(s);
+ }
+}
+
+void
+pmap_deactivate(struct proc *p)
+{
+}
+
+void
+pmap_update(pmap_t pm)
+{
+
+ if (pm->pm_remove_all) {
+ /*
+ * Finish up the pmap_remove_all() optimisation by flushing
+ * the TLB.
+ */
+ pmap_tlb_flushID(pm);
+ pm->pm_remove_all = FALSE;
+ }
+
+ if (pmap_is_current(pm)) {
+ /*
+ * If we're dealing with a current userland pmap, move its L1
+ * to the end of the LRU.
+ */
+ if (pm != pmap_kernel())
+ pmap_use_l1(pm);
+
+ /*
+ * We can assume we're done with frobbing the cache/tlb for
+ * now. Make sure any future pmap ops don't skip cache/tlb
+ * flushes.
+ */
+ pm->pm_cstate.cs_all = PMAP_CACHE_STATE_ALL;
+ }
+
+ /*
+ * make sure TLB/cache operations have completed.
+ */
+ cpu_cpwait();
+}
+
+void
+pmap_remove_all(pmap_t pm)
+{
+
+ /*
+ * The vmspace described by this pmap is about to be torn down.
+ * Until pmap_update() is called, UVM will only make calls
+ * to pmap_remove(). We can make life much simpler by flushing
+ * the cache now, and deferring TLB invalidation to pmap_update().
+ */
+ pmap_idcache_wbinv_all(pm);
+ pm->pm_remove_all = TRUE;
+}
+
+/*
+ * Retire the given physical map from service.
+ * Should only be called if the map contains no valid mappings.
+ */
+void
+pmap_destroy(pmap_t pm)
+{
+ u_int count;
+
+ if (pm == NULL)
+ return;
+
+ if (pm->pm_remove_all) {
+ pmap_tlb_flushID(pm);
+ pm->pm_remove_all = FALSE;
+ }
+
+ /*
+ * Drop reference count
+ */
+ simple_lock(&pm->pm_lock);
+ count = --pm->pm_obj.uo_refs;
+ simple_unlock(&pm->pm_lock);
+ if (count > 0) {
+ if (pmap_is_current(pm)) {
+ if (pm != pmap_kernel())
+ pmap_use_l1(pm);
+ pm->pm_cstate.cs_all = PMAP_CACHE_STATE_ALL;
+ }
+ return;
+ }
+
+ /*
+ * reference count is zero, free pmap resources and then free pmap.
+ */
+
+ if (vector_page < KERNEL_BASE) {
+ struct pcb *pcb = &proc0.p_addr->u_pcb;
+
+ if (pmap_is_current(pm)) {
+ /*
+ * Frob the L1 entry corresponding to the vector
+ * page so that it contains the kernel pmap's domain
+ * number. This will ensure pmap_remove() does not
+ * pull the current vector page out from under us.
+ */
+ disable_interrupts(I32_bit | F32_bit);
+ *pcb->pcb_pl1vec = pcb->pcb_l1vec;
+ cpu_domains(pcb->pcb_dacr);
+ cpu_setttb(pcb->pcb_pagedir);
+ enable_interrupts(I32_bit | F32_bit);
+ }
+
+ /* Remove the vector page mapping */
+ pmap_remove(pm, vector_page, vector_page + PAGE_SIZE);
+ pmap_update(pm);
+
+ /*
+ * Make sure cpu_switch(), et al, DTRT. This is safe to do
+ * since this process has no remaining mappings of its own.
+ */
+ curpcb->pcb_pl1vec = pcb->pcb_pl1vec;
+ curpcb->pcb_l1vec = pcb->pcb_l1vec;
+ curpcb->pcb_dacr = pcb->pcb_dacr;
+ curpcb->pcb_pagedir = pcb->pcb_pagedir;
+ }
+
+ LIST_REMOVE(pm, pm_list);
+
+ pmap_free_l1(pm);
+
+ /* return the pmap to the pool */
+ pool_cache_put(&pmap_pmap_cache, pm);
+}
+
+
+/*
+ * void pmap_reference(pmap_t pm)
+ *
+ * Add a reference to the specified pmap.
+ */
+void
+pmap_reference(pmap_t pm)
+{
+
+ if (pm == NULL)
+ return;
+
+ pmap_use_l1(pm);
+
+ simple_lock(&pm->pm_lock);
+ pm->pm_obj.uo_refs++;
+ simple_unlock(&pm->pm_lock);
+}
+
+/*
+ * pmap_zero_page()
+ *
+ * Zero a given physical page by mapping it at a page hook point.
+ * In doing the zero page op, the page we zero is mapped cachable, as with
+ * StrongARM accesses to non-cached pages are non-burst making writing
+ * _any_ bulk data very slow.
+ */
+#if (ARM_MMU_GENERIC + ARM_MMU_SA1) != 0
+void
+pmap_zero_page_generic(struct vm_page *pg)
+{
+ paddr_t phys = VM_PAGE_TO_PHYS(pg);
+#ifdef DEBUG
+ if (pg->mdpage.pvh_list != NULL)
+ panic("pmap_zero_page: page has mappings");
+#endif
+
+ KDASSERT((phys & PGOFSET) == 0);
+
+ /*
+ * Hook in the page, zero it, and purge the cache for that
+ * zeroed page. Invalidate the TLB as needed.
+ */
+ *cdst_pte = L2_S_PROTO | phys |
+ L2_S_PROT(PTE_KERNEL, VM_PROT_WRITE) | pte_l2_s_cache_mode;
+ PTE_SYNC(cdst_pte);
+ cpu_tlb_flushD_SE(cdstp);
+ cpu_cpwait();
+ bzero_page(cdstp);
+ cpu_dcache_wbinv_range(cdstp, PAGE_SIZE);
+}
+#endif /* (ARM_MMU_GENERIC + ARM_MMU_SA1) != 0 */
+
+#if ARM_MMU_XSCALE == 1
+void
+pmap_zero_page_xscale(struct vm_page *pg)
+{
+ paddr_t phys = VM_PAGE_TO_PHYS(pg);
+#ifdef DEBUG
+ if (pg->mdpage.pvh_list != NULL)
+ panic("pmap_zero_page: page has mappings");
+#endif
+
+ KDASSERT((phys & PGOFSET) == 0);
+
+ /*
+ * Hook in the page, zero it, and purge the cache for that
+ * zeroed page. Invalidate the TLB as needed.
+ */
+ *cdst_pte = L2_S_PROTO | phys |
+ L2_S_PROT(PTE_KERNEL, VM_PROT_WRITE) |
+ L2_C | L2_XSCALE_T_TEX(TEX_XSCALE_X); /* mini-data */
+ PTE_SYNC(cdst_pte);
+ cpu_tlb_flushD_SE(cdstp);
+ cpu_cpwait();
+ bzero_page(cdstp);
+#if 0
+ xscale_cache_clean_minidata();
+#else
+ printf("xscale_cache_clean_minidata call\n");
+#endif
+}
+#endif /* ARM_MMU_XSCALE == 1 */
+
+/* pmap_pageidlezero()
+ *
+ * The same as above, except that we assume that the page is not
+ * mapped. This means we never have to flush the cache first. Called
+ * from the idle loop.
+ */
+boolean_t
+pmap_pageidlezero(struct vm_page *pg)
+{
+ unsigned int i;
+ int *ptr;
+ boolean_t rv = TRUE;
+ paddr_t phys = VM_PAGE_TO_PHYS(pg);
+#ifdef DEBUG
+ if (pg->mdpage.pvh_list != NULL)
+ panic("pmap_pageidlezero: page has mappings");
+#endif
+
+ KDASSERT((phys & PGOFSET) == 0);
+
+ /*
+ * Hook in the page, zero it, and purge the cache for that
+ * zeroed page. Invalidate the TLB as needed.
+ */
+ *cdst_pte = L2_S_PROTO | phys |
+ L2_S_PROT(PTE_KERNEL, VM_PROT_WRITE) | pte_l2_s_cache_mode;
+ PTE_SYNC(cdst_pte);
+ cpu_tlb_flushD_SE(cdstp);
+ cpu_cpwait();
+
+ for (i = 0, ptr = (int *)cdstp;
+ i < (PAGE_SIZE / sizeof(int)); i++) {
+ if (whichqs != 0) {
+ /*
+ * A process has become ready. Abort now,
+ * so we don't keep it waiting while we
+ * do slow memory access to finish this
+ * page.
+ */
+ rv = FALSE;
+ break;
+ }
+ *ptr++ = 0;
+ }
+
+ if (rv)
+ /*
+ * if we aborted we'll rezero this page again later so don't
+ * purge it unless we finished it
+ */
+ cpu_dcache_wbinv_range(cdstp, PAGE_SIZE);
+
+ return (rv);
+}
+
+/*
+ * pmap_copy_page()
+ *
+ * Copy one physical page into another, by mapping the pages into
+ * hook points. The same comment regarding cachability as in
+ * pmap_zero_page also applies here.
+ */
+#if (ARM_MMU_GENERIC + ARM_MMU_SA1) != 0
+void
+pmap_copy_page_generic(struct vm_page *src_pg, struct vm_page *dst_pg)
+{
+ paddr_t src = VM_PAGE_TO_PHYS(src_pg);
+ paddr_t dst = VM_PAGE_TO_PHYS(dst_pg);
+#ifdef DEBUG
+ if (dst_pg->mdpage.pvh_list != NULL)
+ panic("pmap_copy_page: dst page has mappings");
+#endif
+
+ KDASSERT((src & PGOFSET) == 0);
+ KDASSERT((dst & PGOFSET) == 0);
+
+ /*
+ * Clean the source page. Hold the source page's lock for
+ * the duration of the copy so that no other mappings can
+ * be created while we have a potentially aliased mapping.
+ */
+ simple_lock(&src_pg->mdpage.pvh_slock);
+ (void) pmap_clean_page(src_pg->mdpage.pvh_list, TRUE);
+
+ /*
+ * Map the pages into the page hook points, copy them, and purge
+ * the cache for the appropriate page. Invalidate the TLB
+ * as required.
+ */
+ *csrc_pte = L2_S_PROTO | src |
+ L2_S_PROT(PTE_KERNEL, VM_PROT_READ) | pte_l2_s_cache_mode;
+ PTE_SYNC(csrc_pte);
+ *cdst_pte = L2_S_PROTO | dst |
+ L2_S_PROT(PTE_KERNEL, VM_PROT_WRITE) | pte_l2_s_cache_mode;
+ PTE_SYNC(cdst_pte);
+ cpu_tlb_flushD_SE(csrcp);
+ cpu_tlb_flushD_SE(cdstp);
+ cpu_cpwait();
+ bcopy_page(csrcp, cdstp);
+ cpu_dcache_inv_range(csrcp, PAGE_SIZE);
+ simple_unlock(&src_pg->mdpage.pvh_slock); /* cache is safe again */
+ cpu_dcache_wbinv_range(cdstp, PAGE_SIZE);
+}
+#endif /* (ARM_MMU_GENERIC + ARM_MMU_SA1) != 0 */
+
+#if ARM_MMU_XSCALE == 1
+void
+pmap_copy_page_xscale(struct vm_page *src_pg, struct vm_page *dst_pg)
+{
+ paddr_t src = VM_PAGE_TO_PHYS(src_pg);
+ paddr_t dst = VM_PAGE_TO_PHYS(dst_pg);
+#ifdef DEBUG
+ if (dst_pg->mdpage.pvh_list != NULL)
+ panic("pmap_copy_page: dst page has mappings");
+#endif
+
+ KDASSERT((src & PGOFSET) == 0);
+ KDASSERT((dst & PGOFSET) == 0);
+
+ /*
+ * Clean the source page. Hold the source page's lock for
+ * the duration of the copy so that no other mappings can
+ * be created while we have a potentially aliased mapping.
+ */
+ simple_lock(&src_pg->mdpage.pvh_slock);
+ (void) pmap_clean_page(src_pg->mdpage.pvh_list, TRUE);
+
+ /*
+ * Map the pages into the page hook points, copy them, and purge
+ * the cache for the appropriate page. Invalidate the TLB
+ * as required.
+ */
+ *csrc_pte = L2_S_PROTO | src |
+ L2_S_PROT(PTE_KERNEL, VM_PROT_READ) |
+ L2_C | L2_XSCALE_T_TEX(TEX_XSCALE_X); /* mini-data */
+ PTE_SYNC(csrc_pte);
+ *cdst_pte = L2_S_PROTO | dst |
+ L2_S_PROT(PTE_KERNEL, VM_PROT_WRITE) |
+ L2_C | L2_XSCALE_T_TEX(TEX_XSCALE_X); /* mini-data */
+ PTE_SYNC(cdst_pte);
+ cpu_tlb_flushD_SE(csrcp);
+ cpu_tlb_flushD_SE(cdstp);
+ cpu_cpwait();
+ bcopy_page(csrcp, cdstp);
+ simple_unlock(&src_pg->mdpage.pvh_slock); /* cache is safe again */
+#if 0
+ xscale_cache_clean_minidata();
+#else
+ printf("xscale_cache_clean_minidata call\n");
+#endif
+}
+#endif /* ARM_MMU_XSCALE == 1 */
+
+/*
+ * void pmap_virtual_space(vaddr_t *start, vaddr_t *end)
+ *
+ * Return the start and end addresses of the kernel's virtual space.
+ * These values are setup in pmap_bootstrap and are updated as pages
+ * are allocated.
+ */
+void
+pmap_virtual_space(vaddr_t *start, vaddr_t *end)
+{
+ *start = virtual_avail;
+ *end = virtual_end;
+}
+
+/*
+ * Helper function for pmap_grow_l2_bucket()
+ */
+static __inline int
+pmap_grow_map(vaddr_t va, pt_entry_t cache_mode, paddr_t *pap)
+{
+ struct l2_bucket *l2b;
+ pt_entry_t *ptep;
+ paddr_t pa;
+
+ if (uvm.page_init_done == FALSE) {
+ if (uvm_page_physget(&pa) == FALSE)
+ return (1);
+ } else {
+ struct vm_page *pg;
+ pg = uvm_pagealloc(NULL, 0, NULL, UVM_PGA_USERESERVE);
+ if (pg == NULL)
+ return (1);
+ pa = VM_PAGE_TO_PHYS(pg);
+ }
+
+ if (pap)
+ *pap = pa;
+
+ l2b = pmap_get_l2_bucket(pmap_kernel(), va);
+ KDASSERT(l2b != NULL);
+
+ ptep = &l2b->l2b_kva[l2pte_index(va)];
+ *ptep = L2_S_PROTO | pa | cache_mode |
+ L2_S_PROT(PTE_KERNEL, VM_PROT_READ | VM_PROT_WRITE);
+ PTE_SYNC(ptep);
+ memset((void *)va, 0, PAGE_SIZE);
+ return (0);
+}
+
+/*
+ * This is the same as pmap_alloc_l2_bucket(), except that it is only
+ * used by pmap_growkernel().
+ */
+static __inline struct l2_bucket *
+pmap_grow_l2_bucket(pmap_t pm, vaddr_t va)
+{
+ struct l2_dtable *l2;
+ struct l2_bucket *l2b;
+ u_short l1idx;
+ vaddr_t nva;
+
+ l1idx = L1_IDX(va);
+
+ if ((l2 = pm->pm_l2[L2_IDX(l1idx)]) == NULL) {
+ /*
+ * No mapping at this address, as there is
+ * no entry in the L1 table.
+ * Need to allocate a new l2_dtable.
+ */
+ nva = pmap_kernel_l2dtable_kva;
+ if ((nva & PGOFSET) == 0) {
+ /*
+ * Need to allocate a backing page
+ */
+ if (pmap_grow_map(nva, pte_l2_s_cache_mode, NULL))
+ return (NULL);
+ }
+
+ l2 = (struct l2_dtable *)nva;
+ nva += sizeof(struct l2_dtable);
+
+ if ((nva & PGOFSET) < (pmap_kernel_l2dtable_kva & PGOFSET)) {
+ /*
+ * The new l2_dtable straddles a page boundary.
+ * Map in another page to cover it.
+ */
+ if (pmap_grow_map(nva, pte_l2_s_cache_mode, NULL))
+ return (NULL);
+ }
+
+ pmap_kernel_l2dtable_kva = nva;
+
+ /*
+ * Link it into the parent pmap
+ */
+ pm->pm_l2[L2_IDX(l1idx)] = l2;
+ }
+
+ l2b = &l2->l2_bucket[L2_BUCKET(l1idx)];
+
+ /*
+ * Fetch pointer to the L2 page table associated with the address.
+ */
+ if (l2b->l2b_kva == NULL) {
+ pt_entry_t *ptep;
+
+ /*
+ * No L2 page table has been allocated. Chances are, this
+ * is because we just allocated the l2_dtable, above.
+ */
+ nva = pmap_kernel_l2ptp_kva;
+ ptep = (pt_entry_t *)nva;
+ if ((nva & PGOFSET) == 0) {
+ /*
+ * Need to allocate a backing page
+ */
+ if (pmap_grow_map(nva, pte_l2_s_cache_mode_pt,
+ &pmap_kernel_l2ptp_phys))
+ return (NULL);
+ PTE_SYNC_RANGE(ptep, PAGE_SIZE / sizeof(pt_entry_t));
+ }
+
+ l2->l2_occupancy++;
+ l2b->l2b_kva = ptep;
+ l2b->l2b_l1idx = l1idx;
+ l2b->l2b_phys = pmap_kernel_l2ptp_phys;
+
+ pmap_kernel_l2ptp_kva += L2_TABLE_SIZE_REAL;
+ pmap_kernel_l2ptp_phys += L2_TABLE_SIZE_REAL;
+ }
+
+ return (l2b);
+}
+
+vaddr_t
+pmap_growkernel(vaddr_t maxkvaddr)
+{
+ pmap_t kpm = pmap_kernel();
+ struct l1_ttable *l1;
+ struct l2_bucket *l2b;
+ pd_entry_t *pl1pd;
+ int s;
+
+ if (maxkvaddr <= pmap_curmaxkvaddr)
+ goto out; /* we are OK */
+
+ NPDEBUG(PDB_GROWKERN,
+ printf("pmap_growkernel: growing kernel from 0x%lx to 0x%lx\n",
+ pmap_curmaxkvaddr, maxkvaddr));
+
+ KDASSERT(maxkvaddr <= virtual_end);
+
+ /*
+ * whoops! we need to add kernel PTPs
+ */
+
+ s = splhigh(); /* to be safe */
+ simple_lock(&kpm->pm_lock);
+
+ /* Map 1MB at a time */
+ for (; pmap_curmaxkvaddr < maxkvaddr; pmap_curmaxkvaddr += L1_S_SIZE) {
+
+ l2b = pmap_grow_l2_bucket(kpm, pmap_curmaxkvaddr);
+ KDASSERT(l2b != NULL);
+
+ /* Distribute new L1 entry to all other L1s */
+ SLIST_FOREACH(l1, &l1_list, l1_link) {
+ pl1pd = &l1->l1_kva[L1_IDX(pmap_curmaxkvaddr)];
+ *pl1pd = l2b->l2b_phys | L1_C_DOM(PMAP_DOMAIN_KERNEL) |
+ L1_C_PROTO;
+ PTE_SYNC(pl1pd);
+ }
+ }
+
+ /*
+ * flush out the cache, expensive but growkernel will happen so
+ * rarely
+ */
+ cpu_dcache_wbinv_all();
+ cpu_tlb_flushD();
+ cpu_cpwait();
+
+ simple_unlock(&kpm->pm_lock);
+ splx(s);
+
+out:
+ return (pmap_curmaxkvaddr);
+}
+
+/************************ Utility routines ****************************/
+
+/*
+ * vector_page_setprot:
+ *
+ * Manipulate the protection of the vector page.
+ */
+void
+vector_page_setprot(int prot)
+{
+ struct l2_bucket *l2b;
+ pt_entry_t *ptep;
+
+ l2b = pmap_get_l2_bucket(pmap_kernel(), vector_page);
+ KDASSERT(l2b != NULL);
+
+ ptep = &l2b->l2b_kva[l2pte_index(vector_page)];
+
+ *ptep = (*ptep & ~L1_S_PROT_MASK) | L2_S_PROT(PTE_KERNEL, prot);
+ PTE_SYNC(ptep);
+ cpu_tlb_flushD_SE(vector_page);
+ cpu_cpwait();
+}
+
+/*
+ * This is used to stuff certain critical values into the PCB where they
+ * can be accessed quickly from cpu_switch() et al.
+ */
+void
+pmap_set_pcb_pagedir(pmap_t pm, struct pcb *pcb)
+{
+ struct l2_bucket *l2b;
+
+ KDASSERT(pm->pm_l1);
+
+ pcb->pcb_pagedir = pm->pm_l1->l1_physaddr;
+ pcb->pcb_dacr = (DOMAIN_CLIENT << (PMAP_DOMAIN_KERNEL * 2)) |
+ (DOMAIN_CLIENT << (pm->pm_domain * 2));
+ pcb->pcb_cstate = (void *)&pm->pm_cstate;
+
+ if (vector_page < KERNEL_BASE) {
+ pcb->pcb_pl1vec = &pm->pm_l1->l1_kva[L1_IDX(vector_page)];
+ l2b = pmap_get_l2_bucket(pm, vector_page);
+ pcb->pcb_l1vec = l2b->l2b_phys | L1_C_PROTO |
+ L1_C_DOM(pm->pm_domain);
+ } else
+ pcb->pcb_pl1vec = NULL;
+}
+
+/*
+ * Fetch pointers to the PDE/PTE for the given pmap/VA pair.
+ * Returns TRUE if the mapping exists, else FALSE.
+ *
+ * NOTE: This function is only used by a couple of arm-specific modules.
+ * It is not safe to take any pmap locks here, since we could be right
+ * in the middle of debugging the pmap anyway...
+ *
+ * It is possible for this routine to return FALSE even though a valid
+ * mapping does exist. This is because we don't lock, so the metadata
+ * state may be inconsistent.
+ *
+ * NOTE: We can return a NULL *ptp in the case where the L1 pde is
+ * a "section" mapping.
+ */
+boolean_t
+pmap_get_pde_pte(pmap_t pm, vaddr_t va, pd_entry_t **pdp, pt_entry_t **ptp)
+{
+ struct l2_dtable *l2;
+ pd_entry_t *pl1pd, l1pd;
+ pt_entry_t *ptep;
+ u_short l1idx;
+
+ if (pm->pm_l1 == NULL)
+ return (FALSE);
+
+ l1idx = L1_IDX(va);
+ *pdp = pl1pd = &pm->pm_l1->l1_kva[l1idx];
+ l1pd = *pl1pd;
+
+ if (l1pte_section_p(l1pd)) {
+ *ptp = NULL;
+ return (TRUE);
+ }
+
+ if (pm->pm_l2 == NULL)
+ return (FALSE);
+
+ l2 = pm->pm_l2[L2_IDX(l1idx)];
+
+ if (l2 == NULL ||
+ (ptep = l2->l2_bucket[L2_BUCKET(l1idx)].l2b_kva) == NULL) {
+ return (FALSE);
+ }
+
+ *ptp = &ptep[l2pte_index(va)];
+ return (TRUE);
+}
+
+boolean_t
+pmap_get_pde(pmap_t pm, vaddr_t va, pd_entry_t **pdp)
+{
+ u_short l1idx;
+
+ if (pm->pm_l1 == NULL)
+ return (FALSE);
+
+ l1idx = L1_IDX(va);
+ *pdp = &pm->pm_l1->l1_kva[l1idx];
+
+ return (TRUE);
+}
+
+/************************ Bootstrapping routines ****************************/
+
+void
+pmap_init_l1(struct l1_ttable *l1, pd_entry_t *l1pt)
+{
+ int i;
+
+ l1->l1_kva = l1pt;
+ l1->l1_domain_use_count = 0;
+ l1->l1_domain_first = 0;
+
+ for (i = 0; i < PMAP_DOMAINS; i++)
+ l1->l1_domain_free[i] = i + 1;
+
+ /*
+ * Copy the kernel's L1 entries to each new L1.
+ */
+ if (pmap_initialized)
+ memcpy(l1pt, pmap_kernel()->pm_l1->l1_kva, L1_TABLE_SIZE);
+
+ if (pmap_extract(pmap_kernel(), (vaddr_t)l1pt,
+ &l1->l1_physaddr) == FALSE)
+ panic("pmap_init_l1: can't get PA of L1 at %p", l1pt);
+
+ SLIST_INSERT_HEAD(&l1_list, l1, l1_link);
+ TAILQ_INSERT_TAIL(&l1_lru_list, l1, l1_lru);
+}
+
+/*
+ * pmap_bootstrap() is called from the board-specific initarm() routine
+ * once the kernel L1/L2 descriptors tables have been set up.
+ *
+ * This is a somewhat convoluted process since pmap bootstrap is, effectively,
+ * spread over a number of disparate files/functions.
+ *
+ * We are passed the following parameters
+ * - kernel_l1pt
+ * This is a pointer to the base of the kernel's L1 translation table.
+ * - vstart
+ * 1MB-aligned start of managed kernel virtual memory.
+ * - vend
+ * 1MB-aligned end of managed kernel virtual memory.
+ *
+ * We use the first parameter to build the metadata (struct l1_ttable and
+ * struct l2_dtable) necessary to track kernel mappings.
+ */
+#define PMAP_STATIC_L2_SIZE 16
+void
+pmap_bootstrap(pd_entry_t *kernel_l1pt, vaddr_t vstart, vaddr_t vend)
+{
+ static struct l1_ttable static_l1;
+ static struct l2_dtable static_l2[PMAP_STATIC_L2_SIZE];
+ struct l1_ttable *l1 = &static_l1;
+ struct l2_dtable *l2;
+ struct l2_bucket *l2b;
+ pmap_t pm = pmap_kernel();
+ pd_entry_t pde;
+ pt_entry_t *ptep;
+ paddr_t pa;
+ vaddr_t va;
+ vsize_t size;
+ int l1idx, l2idx, l2next = 0;
+
+ /*
+ * Initialise the kernel pmap object
+ */
+ pm->pm_l1 = l1;
+ pm->pm_domain = PMAP_DOMAIN_KERNEL;
+ pm->pm_cstate.cs_all = PMAP_CACHE_STATE_ALL;
+ simple_lock_init(&pm->pm_lock);
+ pm->pm_obj.pgops = NULL;
+ TAILQ_INIT(&pm->pm_obj.memq);
+ pm->pm_obj.uo_npages = 0;
+ pm->pm_obj.uo_refs = 1;
+
+ /*
+ * Scan the L1 translation table created by initarm() and create
+ * the required metadata for all valid mappings found in it.
+ */
+ for (l1idx = 0; l1idx < (L1_TABLE_SIZE / sizeof(pd_entry_t)); l1idx++) {
+ pde = kernel_l1pt[l1idx];
+
+ /*
+ * We're only interested in Coarse mappings.
+ * pmap_extract() can deal with section mappings without
+ * recourse to checking L2 metadata.
+ */
+ if ((pde & L1_TYPE_MASK) != L1_TYPE_C)
+ continue;
+
+ /*
+ * Lookup the KVA of this L2 descriptor table
+ */
+ pa = (paddr_t)(pde & L1_C_ADDR_MASK);
+ ptep = (pt_entry_t *)kernel_pt_lookup(pa);
+ if (ptep == NULL) {
+ panic("pmap_bootstrap: No L2 for va 0x%x, pa 0x%lx",
+ (u_int)l1idx << L1_S_SHIFT, pa);
+ }
+
+ /*
+ * Fetch the associated L2 metadata structure.
+ * Allocate a new one if necessary.
+ */
+ if ((l2 = pm->pm_l2[L2_IDX(l1idx)]) == NULL) {
+ if (l2next == PMAP_STATIC_L2_SIZE)
+ panic("pmap_bootstrap: out of static L2s");
+ pm->pm_l2[L2_IDX(l1idx)] = l2 = &static_l2[l2next++];
+ }
+
+ /*
+ * One more L1 slot tracked...
+ */
+ l2->l2_occupancy++;
+
+ /*
+ * Fill in the details of the L2 descriptor in the
+ * appropriate bucket.
+ */
+ l2b = &l2->l2_bucket[L2_BUCKET(l1idx)];
+ l2b->l2b_kva = ptep;
+ l2b->l2b_phys = pa;
+ l2b->l2b_l1idx = l1idx;
+
+ /*
+ * Establish an initial occupancy count for this descriptor
+ */
+ for (l2idx = 0;
+ l2idx < (L2_TABLE_SIZE_REAL / sizeof(pt_entry_t));
+ l2idx++) {
+ if ((ptep[l2idx] & L2_TYPE_MASK) != L2_TYPE_INV) {
+ l2b->l2b_occupancy++;
+ }
+ }
+
+ /*
+ * Make sure the descriptor itself has the correct cache mode.
+ * If not, fix it, but whine about the problem. Port-meisters
+ * should consider this a clue to fix up their initarm()
+ * function. :)
+ */
+ if (pmap_set_pt_cache_mode(kernel_l1pt, (vaddr_t)ptep)) {
+ printf("pmap_bootstrap: WARNING! wrong cache mode for "
+ "L2 pte @ %p\n", ptep);
+ }
+ }
+
+ /*
+ * Ensure the primary (kernel) L1 has the correct cache mode for
+ * a page table. Bitch if it is not correctly set.
+ */
+ for (va = (vaddr_t)kernel_l1pt;
+ va < ((vaddr_t)kernel_l1pt + L1_TABLE_SIZE); va += PAGE_SIZE) {
+ if (pmap_set_pt_cache_mode(kernel_l1pt, va))
+ printf("pmap_bootstrap: WARNING! wrong cache mode for "
+ "primary L1 @ 0x%lx\n", va);
+ }
+
+ cpu_dcache_wbinv_all();
+ cpu_tlb_flushID();
+ cpu_cpwait();
+
+ /*
+ * now we allocate the "special" VAs which are used for tmp mappings
+ * by the pmap (and other modules). we allocate the VAs by advancing
+ * virtual_avail (note that there are no pages mapped at these VAs).
+ *
+ * Managed KVM space start from wherever initarm() tells us.
+ */
+ virtual_avail = vstart;
+ virtual_end = vend;
+
+ pmap_alloc_specials(&virtual_avail, 1, &csrcp, &csrc_pte);
+ pmap_set_pt_cache_mode(kernel_l1pt, (vaddr_t)csrc_pte);
+ pmap_alloc_specials(&virtual_avail, 1, &cdstp, &cdst_pte);
+ pmap_set_pt_cache_mode(kernel_l1pt, (vaddr_t)cdst_pte);
+ pmap_alloc_specials(&virtual_avail, 1, (void *)&memhook, NULL);
+ pmap_alloc_specials(&virtual_avail, round_page(MSGBUFSIZE) / PAGE_SIZE,
+ (void *)&msgbufaddr, NULL);
+
+ /*
+ * Allocate a range of kernel virtual address space to be used
+ * for L2 descriptor tables and metadata allocation in
+ * pmap_growkernel().
+ */
+ size = ((virtual_end - pmap_curmaxkvaddr) + L1_S_OFFSET) / L1_S_SIZE;
+ pmap_alloc_specials(&virtual_avail,
+ round_page(size * L2_TABLE_SIZE_REAL) / PAGE_SIZE,
+ &pmap_kernel_l2ptp_kva, NULL);
+
+ size = (size + (L2_BUCKET_SIZE - 1)) / L2_BUCKET_SIZE;
+ pmap_alloc_specials(&virtual_avail,
+ round_page(size * sizeof(struct l2_dtable)) / PAGE_SIZE,
+ &pmap_kernel_l2dtable_kva, NULL);
+
+ /*
+ * init the static-global locks and global pmap list.
+ */
+#if defined(MULTIPROCESSOR) || defined(LOCKDEBUG)
+ spinlockinit(&pmap_main_lock, "pmaplk", 0);
+#endif
+
+ /*
+ * We can now initialise the first L1's metadata.
+ */
+ SLIST_INIT(&l1_list);
+ TAILQ_INIT(&l1_lru_list);
+ simple_lock_init(&l1_lru_lock);
+ pmap_init_l1(l1, kernel_l1pt);
+
+ /*
+ * Initialize the pmap pool and cache
+ */
+ pool_init(&pmap_pmap_pool, sizeof(struct pmap), 0, 0, 0, "pmappl",
+ &pool_allocator_nointr);
+ pool_cache_init(&pmap_pmap_cache, &pmap_pmap_pool,
+ pmap_pmap_ctor, NULL, NULL);
+ LIST_INIT(&pmap_pmaps);
+ LIST_INSERT_HEAD(&pmap_pmaps, pm, pm_list);
+
+ /*
+ * Initialize the pv pool.
+ */
+ pool_init(&pmap_pv_pool, sizeof(struct pv_entry), 0, 0, 0, "pvepl",
+ &pmap_bootstrap_pv_allocator);
+
+ /*
+ * Initialize the L2 dtable pool and cache.
+ */
+ pool_init(&pmap_l2dtable_pool, sizeof(struct l2_dtable), 0, 0, 0,
+ "l2dtblpl", NULL);
+ pool_cache_init(&pmap_l2dtable_cache, &pmap_l2dtable_pool,
+ pmap_l2dtable_ctor, NULL, NULL);
+
+ /*
+ * Initialise the L2 descriptor table pool and cache
+ */
+ pool_init(&pmap_l2ptp_pool, L2_TABLE_SIZE_REAL, 0, L2_TABLE_SIZE_REAL,
+ 0, "l2ptppl", NULL);
+ pool_cache_init(&pmap_l2ptp_cache, &pmap_l2ptp_pool,
+ pmap_l2ptp_ctor, NULL, NULL);
+
+ cpu_dcache_wbinv_all();
+}
+
+int
+pmap_set_pt_cache_mode(pd_entry_t *kl1, vaddr_t va)
+{
+ pd_entry_t *pdep, pde;
+ pt_entry_t *ptep, pte;
+ vaddr_t pa;
+ int rv = 0;
+
+ /*
+ * Make sure the descriptor itself has the correct cache mode
+ */
+ pdep = &kl1[L1_IDX(va)];
+ pde = *pdep;
+
+ if (l1pte_section_p(pde)) {
+ if ((pde & L1_S_CACHE_MASK) != pte_l1_s_cache_mode_pt) {
+ *pdep = (pde & ~L1_S_CACHE_MASK) |
+ pte_l1_s_cache_mode_pt;
+ PTE_SYNC(pdep);
+ cpu_dcache_wbinv_range((vaddr_t)pdep, sizeof(*pdep));
+ rv = 1;
+ }
+ } else {
+ pa = (paddr_t)(pde & L1_C_ADDR_MASK);
+ ptep = (pt_entry_t *)kernel_pt_lookup(pa);
+ if (ptep == NULL)
+ panic("pmap_bootstrap: No L2 for L2 @ va %p\n", ptep);
+
+ ptep = &ptep[l2pte_index(va)];
+ pte = *ptep;
+ if ((pte & L2_S_CACHE_MASK) != pte_l2_s_cache_mode_pt) {
+ *ptep = (pte & ~L2_S_CACHE_MASK) |
+ pte_l2_s_cache_mode_pt;
+ PTE_SYNC(ptep);
+ cpu_dcache_wbinv_range((vaddr_t)ptep, sizeof(*ptep));
+ rv = 1;
+ }
+ }
+
+ return (rv);
+}
+
+void
+pmap_alloc_specials(vaddr_t *availp, int pages, vaddr_t *vap, pt_entry_t **ptep)
+{
+ vaddr_t va = *availp;
+ struct l2_bucket *l2b;
+
+ if (ptep) {
+ l2b = pmap_get_l2_bucket(pmap_kernel(), va);
+ if (l2b == NULL)
+ panic("pmap_alloc_specials: no l2b for 0x%lx", va);
+
+ if (ptep)
+ *ptep = &l2b->l2b_kva[l2pte_index(va)];
+ }
+
+ *vap = va;
+ *availp = va + (PAGE_SIZE * pages);
+}
+
+void
+pmap_init(void)
+{
+ extern int physmem;
+
+ /*
+ * Set the available memory vars - These do not map to real memory
+ * addresses and cannot as the physical memory is fragmented.
+ * They are used by ps for %mem calculations.
+ * One could argue whether this should be the entire memory or just
+ * the memory that is useable in a user process.
+ */
+ avail_start = 0;
+ avail_end = physmem * PAGE_SIZE;
+
+ /*
+ * Now we need to free enough pv_entry structures to allow us to get
+ * the kmem_map/kmem_object allocated and inited (done after this
+ * function is finished). to do this we allocate one bootstrap page out
+ * of kernel_map and use it to provide an initial pool of pv_entry
+ * structures. we never free this page.
+ */
+ pool_setlowat(&pmap_pv_pool,
+ (PAGE_SIZE / sizeof(struct pv_entry)) * 2);
+
+ pmap_initialized = TRUE;
+}
+
+static vaddr_t last_bootstrap_page = 0;
+static void *free_bootstrap_pages = NULL;
+
+void *
+pmap_bootstrap_pv_page_alloc(struct pool *pp, int flags)
+{
+ extern void *pool_page_alloc(struct pool *, int);
+ vaddr_t new_page;
+ void *rv;
+
+ if (pmap_initialized)
+ return (pool_page_alloc(pp, flags));
+
+ if (free_bootstrap_pages) {
+ rv = free_bootstrap_pages;
+ free_bootstrap_pages = *((void **)rv);
+ return (rv);
+ }
+
+ new_page = uvm_km_kmemalloc(kernel_map, NULL, PAGE_SIZE,
+ (flags & PR_WAITOK) ? 0 : UVM_KMF_NOWAIT);
+
+ KASSERT(new_page > last_bootstrap_page);
+ last_bootstrap_page = new_page;
+ return ((void *)new_page);
+}
+
+void
+pmap_bootstrap_pv_page_free(struct pool *pp, void *v)
+{
+ extern void pool_page_free(struct pool *, void *);
+
+ if (pmap_initialized) {
+ pool_page_free(pp, v);
+ return;
+ }
+
+ if ((vaddr_t)v < last_bootstrap_page) {
+ *((void **)v) = free_bootstrap_pages;
+ free_bootstrap_pages = v;
+ return;
+ }
+}
+
+/*
+ * pmap_postinit()
+ *
+ * This routine is called after the vm and kmem subsystems have been
+ * initialised. This allows the pmap code to perform any initialisation
+ * that can only be done one the memory allocation is in place.
+ */
+void
+pmap_postinit(void)
+{
+ extern paddr_t physical_start, physical_end;
+ struct l2_bucket *l2b;
+ struct l1_ttable *l1;
+ struct pglist plist;
+ struct vm_page *m;
+ pd_entry_t *pl1pt;
+ pt_entry_t *ptep, pte;
+ vaddr_t va, eva;
+ u_int loop, needed;
+ int error;
+
+ pool_setlowat(&pmap_l2ptp_pool,
+ (PAGE_SIZE / L2_TABLE_SIZE_REAL) * 4);
+ pool_setlowat(&pmap_l2dtable_pool,
+ (PAGE_SIZE / sizeof(struct l2_dtable)) * 2);
+
+ needed = (maxproc / PMAP_DOMAINS) + ((maxproc % PMAP_DOMAINS) ? 1 : 0);
+ needed -= 1;
+
+ l1 = malloc(sizeof(*l1) * needed, M_VMPMAP, M_WAITOK);
+
+ for (loop = 0; loop < needed; loop++, l1++) {
+ /* Allocate a L1 page table */
+ va = uvm_km_valloc(kernel_map, L1_TABLE_SIZE);
+ if (va == 0)
+ panic("Cannot allocate L1 KVM");
+
+ TAILQ_INIT(&plist);
+
+ error = uvm_pglistalloc(L1_TABLE_SIZE, physical_start,
+ physical_end, L1_TABLE_SIZE, 0, &plist, 1, M_WAITOK);
+ if (error)
+ panic("Cannot allocate L1 physical pages");
+
+ m = TAILQ_FIRST(&plist);
+ eva = va + L1_TABLE_SIZE;
+ pl1pt = (pd_entry_t *)va;
+
+ while (m && va < eva) {
+ paddr_t pa = VM_PAGE_TO_PHYS(m);
+
+
+ pmap_kenter_pa(va, pa, VM_PROT_READ | VM_PROT_WRITE);
+
+ /*
+ * Make sure the L1 descriptor table is mapped
+ * with the cache-mode set to write-through.
+ */
+ l2b = pmap_get_l2_bucket(pmap_kernel(), va);
+ ptep = &l2b->l2b_kva[l2pte_index(va)];
+ pte = *ptep;
+ pte = (pte & ~L2_S_CACHE_MASK) | pte_l2_s_cache_mode_pt;
+ *ptep = pte;
+ PTE_SYNC(ptep);
+ cpu_tlb_flushD_SE(va);
+
+ va += PAGE_SIZE;
+ m = m->pageq.tqe_next;
+ }
+
+#ifdef DIAGNOSTIC
+ if (m)
+ panic("pmap_alloc_l1pt: pglist not empty");
+#endif /* DIAGNOSTIC */
+
+ pmap_init_l1(l1, pl1pt);
+ }
+
+#ifdef DEBUG
+ printf("pmap_postinit: Allocated %d static L1 descriptor tables\n",
+ needed);
+#endif
+}
+
+/*
+ * Note that the following routines are used by board-specific initialisation
+ * code to configure the initial kernel page tables.
+ *
+ * If ARM32_NEW_VM_LAYOUT is *not* defined, they operate on the assumption that
+ * L2 page-table pages are 4KB in size and use 4 L1 slots. This mimics the
+ * behaviour of the old pmap, and provides an easy migration path for
+ * initial bring-up of the new pmap on existing ports. Fortunately,
+ * pmap_bootstrap() compensates for this hackery. This is only a stop-gap and
+ * will be deprecated.
+ *
+ * If ARM32_NEW_VM_LAYOUT *is* defined, these functions deal with 1KB L2 page
+ * tables.
+ */
+
+/*
+ * This list exists for the benefit of pmap_map_chunk(). It keeps track
+ * of the kernel L2 tables during bootstrap, so that pmap_map_chunk() can
+ * find them as necessary.
+ *
+ * Note that the data on this list MUST remain valid after initarm() returns,
+ * as pmap_bootstrap() uses it to contruct L2 table metadata.
+ */
+SLIST_HEAD(, pv_addr) kernel_pt_list = SLIST_HEAD_INITIALIZER(kernel_pt_list);
+
+vaddr_t
+kernel_pt_lookup(paddr_t pa)
+{
+ pv_addr_t *pv;
+
+ SLIST_FOREACH(pv, &kernel_pt_list, pv_list) {
+#ifndef ARM32_NEW_VM_LAYOUT
+ if (pv->pv_pa == (pa & ~PGOFSET))
+ return (pv->pv_va | (pa & PGOFSET));
+#else
+ if (pv->pv_pa == pa)
+ return (pv->pv_va);
+#endif
+ }
+ return (0);
+}
+
+/*
+ * pmap_map_section:
+ *
+ * Create a single section mapping.
+ */
+void
+pmap_map_section(vaddr_t l1pt, vaddr_t va, paddr_t pa, int prot, int cache)
+{
+ pd_entry_t *pde = (pd_entry_t *) l1pt;
+ pd_entry_t fl;
+
+ KASSERT(((va | pa) & L1_S_OFFSET) == 0);
+
+ switch (cache) {
+ case PTE_NOCACHE:
+ default:
+ fl = 0;
+ break;
+
+ case PTE_CACHE:
+ fl = pte_l1_s_cache_mode;
+ break;
+
+ case PTE_PAGETABLE:
+ fl = pte_l1_s_cache_mode_pt;
+ break;
+ }
+
+ pde[va >> L1_S_SHIFT] = L1_S_PROTO | pa |
+ L1_S_PROT(PTE_KERNEL, prot) | fl | L1_S_DOM(PMAP_DOMAIN_KERNEL);
+ PTE_SYNC(&pde[va >> L1_S_SHIFT]);
+}
+
+/*
+ * pmap_map_entry:
+ *
+ * Create a single page mapping.
+ */
+void
+pmap_map_entry(vaddr_t l1pt, vaddr_t va, paddr_t pa, int prot, int cache)
+{
+ pd_entry_t *pde = (pd_entry_t *) l1pt;
+ pt_entry_t fl;
+ pt_entry_t *pte;
+
+ KASSERT(((va | pa) & PGOFSET) == 0);
+
+ switch (cache) {
+ case PTE_NOCACHE:
+ default:
+ fl = 0;
+ break;
+
+ case PTE_CACHE:
+ fl = pte_l2_s_cache_mode;
+ break;
+
+ case PTE_PAGETABLE:
+ fl = pte_l2_s_cache_mode_pt;
+ break;
+ }
+
+ if ((pde[va >> L1_S_SHIFT] & L1_TYPE_MASK) != L1_TYPE_C)
+ panic("pmap_map_entry: no L2 table for VA 0x%08lx", va);
+
+#ifndef ARM32_NEW_VM_LAYOUT
+ pte = (pt_entry_t *)
+ kernel_pt_lookup(pde[va >> L1_S_SHIFT] & L2_S_FRAME);
+#else
+ pte = (pt_entry_t *) kernel_pt_lookup(pde[L1_IDX(va)] & L1_C_ADDR_MASK);
+#endif
+ if (pte == NULL)
+ panic("pmap_map_entry: can't find L2 table for VA 0x%08lx", va);
+
+#ifndef ARM32_NEW_VM_LAYOUT
+ pte[(va >> PGSHIFT) & 0x3ff] =
+ L2_S_PROTO | pa | L2_S_PROT(PTE_KERNEL, prot) | fl;
+ PTE_SYNC(&pte[(va >> PGSHIFT) & 0x3ff]);
+#else
+ pte[l2pte_index(va)] =
+ L2_S_PROTO | pa | L2_S_PROT(PTE_KERNEL, prot) | fl;
+ PTE_SYNC(&pte[l2pte_index(va)]);
+#endif
+}
+
+/*
+ * pmap_link_l2pt:
+ *
+ * Link the L2 page table specified by "l2pv" into the L1
+ * page table at the slot for "va".
+ */
+void
+pmap_link_l2pt(vaddr_t l1pt, vaddr_t va, pv_addr_t *l2pv)
+{
+ pd_entry_t *pde = (pd_entry_t *) l1pt, proto;
+ u_int slot = va >> L1_S_SHIFT;
+
+#ifndef ARM32_NEW_VM_LAYOUT
+ KASSERT((va & ((L1_S_SIZE * 4) - 1)) == 0);
+ KASSERT((l2pv->pv_pa & PGOFSET) == 0);
+#endif
+
+ proto = L1_S_DOM(PMAP_DOMAIN_KERNEL) | L1_C_PROTO;
+
+ pde[slot + 0] = proto | (l2pv->pv_pa + 0x000);
+#ifdef ARM32_NEW_VM_LAYOUT
+ PTE_SYNC(&pde[slot]);
+#else
+ pde[slot + 1] = proto | (l2pv->pv_pa + 0x400);
+ pde[slot + 2] = proto | (l2pv->pv_pa + 0x800);
+ pde[slot + 3] = proto | (l2pv->pv_pa + 0xc00);
+ PTE_SYNC_RANGE(&pde[slot + 0], 4);
+#endif
+
+ SLIST_INSERT_HEAD(&kernel_pt_list, l2pv, pv_list);
+}
+
+/*
+ * pmap_map_chunk:
+ *
+ * Map a chunk of memory using the most efficient mappings
+ * possible (section, large page, small page) into the
+ * provided L1 and L2 tables at the specified virtual address.
+ */
+vsize_t
+pmap_map_chunk(vaddr_t l1pt, vaddr_t va, paddr_t pa, vsize_t size,
+ int prot, int cache)
+{
+ pd_entry_t *pde = (pd_entry_t *) l1pt;
+ pt_entry_t *pte, f1, f2s, f2l;
+ vsize_t resid;
+ int i;
+
+ resid = (size + (PAGE_SIZE - 1)) & ~(PAGE_SIZE - 1);
+
+ if (l1pt == 0)
+ panic("pmap_map_chunk: no L1 table provided");
+
+#ifdef VERBOSE_INIT_ARM
+ printf("pmap_map_chunk: pa=0x%lx va=0x%lx size=0x%lx resid=0x%lx "
+ "prot=0x%x cache=%d\n", pa, va, size, resid, prot, cache);
+#endif
+
+ switch (cache) {
+ case PTE_NOCACHE:
+ default:
+ f1 = 0;
+ f2l = 0;
+ f2s = 0;
+ break;
+
+ case PTE_CACHE:
+ f1 = pte_l1_s_cache_mode;
+ f2l = pte_l2_l_cache_mode;
+ f2s = pte_l2_s_cache_mode;
+ break;
+
+ case PTE_PAGETABLE:
+ f1 = pte_l1_s_cache_mode_pt;
+ f2l = pte_l2_l_cache_mode_pt;
+ f2s = pte_l2_s_cache_mode_pt;
+ break;
+ }
+
+ size = resid;
+
+ while (resid > 0) {
+ /* See if we can use a section mapping. */
+ if (L1_S_MAPPABLE_P(va, pa, resid)) {
+#ifdef VERBOSE_INIT_ARM
+ printf("S");
+#endif
+ pde[va >> L1_S_SHIFT] = L1_S_PROTO | pa |
+ L1_S_PROT(PTE_KERNEL, prot) | f1 |
+ L1_S_DOM(PMAP_DOMAIN_KERNEL);
+ PTE_SYNC(&pde[va >> L1_S_SHIFT]);
+ va += L1_S_SIZE;
+ pa += L1_S_SIZE;
+ resid -= L1_S_SIZE;
+ continue;
+ }
+
+ /*
+ * Ok, we're going to use an L2 table. Make sure
+ * one is actually in the corresponding L1 slot
+ * for the current VA.
+ */
+ if ((pde[va >> L1_S_SHIFT] & L1_TYPE_MASK) != L1_TYPE_C)
+ panic("pmap_map_chunk: no L2 table for VA 0x%08lx", va);
+
+#ifndef ARM32_NEW_VM_LAYOUT
+ pte = (pt_entry_t *)
+ kernel_pt_lookup(pde[va >> L1_S_SHIFT] & L2_S_FRAME);
+#else
+ pte = (pt_entry_t *) kernel_pt_lookup(
+ pde[L1_IDX(va)] & L1_C_ADDR_MASK);
+#endif
+ if (pte == NULL)
+ panic("pmap_map_chunk: can't find L2 table for VA"
+ "0x%08lx", va);
+
+ /* See if we can use a L2 large page mapping. */
+ if (L2_L_MAPPABLE_P(va, pa, resid)) {
+#ifdef VERBOSE_INIT_ARM
+ printf("L");
+#endif
+ for (i = 0; i < 16; i++) {
+#ifndef ARM32_NEW_VM_LAYOUT
+ pte[((va >> PGSHIFT) & 0x3f0) + i] =
+ L2_L_PROTO | pa |
+ L2_L_PROT(PTE_KERNEL, prot) | f2l;
+ PTE_SYNC(&pte[((va >> PGSHIFT) & 0x3f0) + i]);
+#else
+ pte[l2pte_index(va) + i] =
+ L2_L_PROTO | pa |
+ L2_L_PROT(PTE_KERNEL, prot) | f2l;
+ PTE_SYNC(&pte[l2pte_index(va) + i]);
+#endif
+ }
+ va += L2_L_SIZE;
+ pa += L2_L_SIZE;
+ resid -= L2_L_SIZE;
+ continue;
+ }
+
+ /* Use a small page mapping. */
+#ifdef VERBOSE_INIT_ARM
+ printf("P");
+#endif
+#ifndef ARM32_NEW_VM_LAYOUT
+ pte[(va >> PGSHIFT) & 0x3ff] =
+ L2_S_PROTO | pa | L2_S_PROT(PTE_KERNEL, prot) | f2s;
+ PTE_SYNC(&pte[(va >> PGSHIFT) & 0x3ff]);
+#else
+ pte[l2pte_index(va)] =
+ L2_S_PROTO | pa | L2_S_PROT(PTE_KERNEL, prot) | f2s;
+ PTE_SYNC(&pte[l2pte_index(va)]);
+#endif
+ va += PAGE_SIZE;
+ pa += PAGE_SIZE;
+ resid -= PAGE_SIZE;
+ }
+#ifdef VERBOSE_INIT_ARM
+ printf("\n");
+#endif
+ return (size);
+}
+
+/********************** Static device map routines ***************************/
+
+const struct pmap_devmap *pmap_devmap_table;
+
+/*
+ * Register the devmap table. This is provided in case early console
+ * initialization needs to register mappings created by bootstrap code
+ * before pmap_devmap_bootstrap() is called.
+ */
+void
+pmap_devmap_register(const struct pmap_devmap *table)
+{
+
+ pmap_devmap_table = table;
+}
+
+/*
+ * Map all of the static regions in the devmap table, and remember
+ * the devmap table so other parts of the kernel can look up entries
+ * later.
+ */
+void
+pmap_devmap_bootstrap(vaddr_t l1pt, const struct pmap_devmap *table)
+{
+ int i;
+
+ pmap_devmap_table = table;
+
+ for (i = 0; pmap_devmap_table[i].pd_size != 0; i++) {
+#ifdef VERBOSE_INIT_ARM
+ printf("devmap: %08lx -> %08lx @ %08lx\n",
+ pmap_devmap_table[i].pd_pa,
+ pmap_devmap_table[i].pd_pa +
+ pmap_devmap_table[i].pd_size - 1,
+ pmap_devmap_table[i].pd_va);
+#endif
+ pmap_map_chunk(l1pt, pmap_devmap_table[i].pd_va,
+ pmap_devmap_table[i].pd_pa,
+ pmap_devmap_table[i].pd_size,
+ pmap_devmap_table[i].pd_prot,
+ pmap_devmap_table[i].pd_cache);
+ }
+}
+
+const struct pmap_devmap *
+pmap_devmap_find_pa(paddr_t pa, psize_t size)
+{
+ int i;
+
+ if (pmap_devmap_table == NULL)
+ return (NULL);
+
+ for (i = 0; pmap_devmap_table[i].pd_size != 0; i++) {
+ if (pa >= pmap_devmap_table[i].pd_pa &&
+ pa + size <= pmap_devmap_table[i].pd_pa +
+ pmap_devmap_table[i].pd_size)
+ return (&pmap_devmap_table[i]);
+ }
+
+ return (NULL);
+}
+
+const struct pmap_devmap *
+pmap_devmap_find_va(vaddr_t va, vsize_t size)
+{
+ int i;
+
+ if (pmap_devmap_table == NULL)
+ return (NULL);
+
+ for (i = 0; pmap_devmap_table[i].pd_size != 0; i++) {
+ if (va >= pmap_devmap_table[i].pd_va &&
+ va + size <= pmap_devmap_table[i].pd_va +
+ pmap_devmap_table[i].pd_size)
+ return (&pmap_devmap_table[i]);
+ }
+
+ return (NULL);
+}
+
+/********************** PTE initialization routines **************************/
+
+/*
+ * These routines are called when the CPU type is identified to set up
+ * the PTE prototypes, cache modes, etc.
+ *
+ * The variables are always here, just in case LKMs need to reference
+ * them (though, they shouldn't).
+ */
+
+pt_entry_t pte_l1_s_cache_mode;
+pt_entry_t pte_l1_s_cache_mode_pt;
+pt_entry_t pte_l1_s_cache_mask;
+
+pt_entry_t pte_l2_l_cache_mode;
+pt_entry_t pte_l2_l_cache_mode_pt;
+pt_entry_t pte_l2_l_cache_mask;
+
+pt_entry_t pte_l2_s_cache_mode;
+pt_entry_t pte_l2_s_cache_mode_pt;
+pt_entry_t pte_l2_s_cache_mask;
+
+pt_entry_t pte_l2_s_prot_u;
+pt_entry_t pte_l2_s_prot_w;
+pt_entry_t pte_l2_s_prot_mask;
+
+pt_entry_t pte_l1_s_proto;
+pt_entry_t pte_l1_c_proto;
+pt_entry_t pte_l2_s_proto;
+
+void (*pmap_copy_page_func)(struct vm_page *, struct vm_page *);
+void (*pmap_zero_page_func)(struct vm_page *);
+
+#if (ARM_MMU_GENERIC + ARM_MMU_SA1) != 0
+void
+pmap_pte_init_generic(void)
+{
+
+ pte_l1_s_cache_mode = L1_S_B|L1_S_C;
+ pte_l1_s_cache_mask = L1_S_CACHE_MASK_generic;
+
+ pte_l2_l_cache_mode = L2_B|L2_C;
+ pte_l2_l_cache_mask = L2_L_CACHE_MASK_generic;
+
+ pte_l2_s_cache_mode = L2_B|L2_C;
+ pte_l2_s_cache_mask = L2_S_CACHE_MASK_generic;
+
+ /*
+ * If we have a write-through cache, set B and C. If
+ * we have a write-back cache, then we assume setting
+ * only C will make those pages write-through.
+ */
+ if (cpufuncs.cf_dcache_wb_range == (void *) cpufunc_nullop) {
+ pte_l1_s_cache_mode_pt = L1_S_B|L1_S_C;
+ pte_l2_l_cache_mode_pt = L2_B|L2_C;
+ pte_l2_s_cache_mode_pt = L2_B|L2_C;
+ } else {
+ pte_l1_s_cache_mode_pt = L1_S_C;
+ pte_l2_l_cache_mode_pt = L2_C;
+ pte_l2_s_cache_mode_pt = L2_C;
+ }
+
+ pte_l2_s_prot_u = L2_S_PROT_U_generic;
+ pte_l2_s_prot_w = L2_S_PROT_W_generic;
+ pte_l2_s_prot_mask = L2_S_PROT_MASK_generic;
+
+ pte_l1_s_proto = L1_S_PROTO_generic;
+ pte_l1_c_proto = L1_C_PROTO_generic;
+ pte_l2_s_proto = L2_S_PROTO_generic;
+
+ pmap_copy_page_func = pmap_copy_page_generic;
+ pmap_zero_page_func = pmap_zero_page_generic;
+}
+
+#if defined(CPU_ARM8)
+void
+pmap_pte_init_arm8(void)
+{
+
+ /*
+ * ARM8 is compatible with generic, but we need to use
+ * the page tables uncached.
+ */
+ pmap_pte_init_generic();
+
+ pte_l1_s_cache_mode_pt = 0;
+ pte_l2_l_cache_mode_pt = 0;
+ pte_l2_s_cache_mode_pt = 0;
+}
+#endif /* CPU_ARM8 */
+
+#if defined(CPU_ARM9)
+void
+pmap_pte_init_arm9(void)
+{
+
+ /*
+ * ARM9 is compatible with generic, but we want to use
+ * write-through caching for now.
+ */
+ pmap_pte_init_generic();
+
+ pte_l1_s_cache_mode = L1_S_C;
+ pte_l2_l_cache_mode = L2_C;
+ pte_l2_s_cache_mode = L2_C;
+
+ pte_l1_s_cache_mode_pt = L1_S_C;
+ pte_l2_l_cache_mode_pt = L2_C;
+ pte_l2_s_cache_mode_pt = L2_C;
+}
+#endif /* CPU_ARM9 */
+#endif /* (ARM_MMU_GENERIC + ARM_MMU_SA1) != 0 */
+
+#if defined(CPU_ARM10)
+void
+pmap_pte_init_arm10(void)
+{
+
+ /*
+ * ARM10 is compatible with generic, but we want to use
+ * write-through caching for now.
+ */
+ pmap_pte_init_generic();
+
+ pte_l1_s_cache_mode = L1_S_B | L1_S_C;
+ pte_l2_l_cache_mode = L2_B | L2_C;
+ pte_l2_s_cache_mode = L2_B | L2_C;
+
+ pte_l1_s_cache_mode_pt = L1_S_C;
+ pte_l2_l_cache_mode_pt = L2_C;
+ pte_l2_s_cache_mode_pt = L2_C;
+
+}
+#endif /* CPU_ARM10 */
+
+#if ARM_MMU_SA1 == 1
+void
+pmap_pte_init_sa1(void)
+{
+
+ /*
+ * The StrongARM SA-1 cache does not have a write-through
+ * mode. So, do the generic initialization, then reset
+ * the page table cache mode to B=1,C=1, and note that
+ * the PTEs need to be sync'd.
+ */
+ pmap_pte_init_generic();
+
+ pte_l1_s_cache_mode_pt = L1_S_B|L1_S_C;
+ pte_l2_l_cache_mode_pt = L2_B|L2_C;
+ pte_l2_s_cache_mode_pt = L2_B|L2_C;
+
+ pmap_needs_pte_sync = 1;
+}
+#endif /* ARM_MMU_SA1 == 1*/
+
+#if ARM_MMU_XSCALE == 1
+#if (ARM_NMMUS > 1)
+u_int xscale_use_minidata;
+#endif
+
+void
+pmap_pte_init_xscale(void)
+{
+ uint32_t auxctl;
+ int write_through = 0;
+
+ pte_l1_s_cache_mode = L1_S_B|L1_S_C;
+ pte_l1_s_cache_mask = L1_S_CACHE_MASK_xscale;
+
+ pte_l2_l_cache_mode = L2_B|L2_C;
+ pte_l2_l_cache_mask = L2_L_CACHE_MASK_xscale;
+
+ pte_l2_s_cache_mode = L2_B|L2_C;
+ pte_l2_s_cache_mask = L2_S_CACHE_MASK_xscale;
+
+ pte_l1_s_cache_mode_pt = L1_S_C;
+ pte_l2_l_cache_mode_pt = L2_C;
+ pte_l2_s_cache_mode_pt = L2_C;
+
+#ifdef XSCALE_CACHE_READ_WRITE_ALLOCATE
+ /*
+ * The XScale core has an enhanced mode where writes that
+ * miss the cache cause a cache line to be allocated. This
+ * is significantly faster than the traditional, write-through
+ * behavior of this case.
+ */
+ pte_l1_s_cache_mode |= L1_S_XSCALE_TEX(TEX_XSCALE_X);
+ pte_l2_l_cache_mode |= L2_XSCALE_L_TEX(TEX_XSCALE_X);
+ pte_l2_s_cache_mode |= L2_XSCALE_T_TEX(TEX_XSCALE_X);
+#endif /* XSCALE_CACHE_READ_WRITE_ALLOCATE */
+
+#ifdef XSCALE_CACHE_WRITE_THROUGH
+ /*
+ * Some versions of the XScale core have various bugs in
+ * their cache units, the work-around for which is to run
+ * the cache in write-through mode. Unfortunately, this
+ * has a major (negative) impact on performance. So, we
+ * go ahead and run fast-and-loose, in the hopes that we
+ * don't line up the planets in a way that will trip the
+ * bugs.
+ *
+ * However, we give you the option to be slow-but-correct.
+ */
+ write_through = 1;
+#elif defined(XSCALE_CACHE_WRITE_BACK)
+ /* force write back cache mode */
+ write_through = 0;
+#elif defined(CPU_XSCALE_PXA2X0)
+ /*
+ * Intel PXA2[15]0 processors are known to have a bug in
+ * write-back cache on revision 4 and earlier (stepping
+ * A[01] and B[012]). Fixed for C0 and later.
+ */
+ {
+ uint32_t id, type;
+
+ id = cpufunc_id();
+ type = id & ~(CPU_ID_XSCALE_COREREV_MASK|CPU_ID_REVISION_MASK);
+
+ if (type == CPU_ID_PXA250 || type == CPU_ID_PXA210) {
+ if ((id & CPU_ID_REVISION_MASK) < 5) {
+ /* write through for stepping A0-1 and B0-2 */
+ write_through = 1;
+ }
+ }
+ }
+#endif /* XSCALE_CACHE_WRITE_THROUGH */
+
+ if (write_through) {
+ pte_l1_s_cache_mode = L1_S_C;
+ pte_l2_l_cache_mode = L2_C;
+ pte_l2_s_cache_mode = L2_C;
+ }
+
+#if (ARM_NMMUS > 1)
+ xscale_use_minidata = 1;
+#endif
+
+ pte_l2_s_prot_u = L2_S_PROT_U_xscale;
+ pte_l2_s_prot_w = L2_S_PROT_W_xscale;
+ pte_l2_s_prot_mask = L2_S_PROT_MASK_xscale;
+
+ pte_l1_s_proto = L1_S_PROTO_xscale;
+ pte_l1_c_proto = L1_C_PROTO_xscale;
+ pte_l2_s_proto = L2_S_PROTO_xscale;
+
+ pmap_copy_page_func = pmap_copy_page_xscale;
+ pmap_zero_page_func = pmap_zero_page_xscale;
+
+ /*
+ * Disable ECC protection of page table access, for now.
+ */
+ __asm __volatile("mrc p15, 0, %0, c1, c0, 1" : "=r" (auxctl));
+ auxctl &= ~XSCALE_AUXCTL_P;
+ __asm __volatile("mcr p15, 0, %0, c1, c0, 1" : : "r" (auxctl));
+}
+
+/*
+ * xscale_setup_minidata:
+ *
+ * Set up the mini-data cache clean area. We require the
+ * caller to allocate the right amount of physically and
+ * virtually contiguous space.
+ */
+vaddr_t xscale_minidata_clean_addr;
+vsize_t xscale_minidata_clean_size; /* already initialized */
+
+void
+xscale_setup_minidata(vaddr_t l1pt, vaddr_t va, paddr_t pa)
+{
+ extern vaddr_t xscale_minidata_clean_addr;
+ extern vsize_t xscale_minidata_clean_size; /* already initialized */
+ pd_entry_t *pde = (pd_entry_t *) l1pt;
+ pt_entry_t *pte;
+ vsize_t size;
+ uint32_t auxctl;
+ panic("xscale_setup_minidata: xscale_minidata_clean_size, "
+ "xscale_minidata_clean_addr");
+
+ xscale_minidata_clean_addr = va;
+
+ /* Round it to page size. */
+ size = (xscale_minidata_clean_size + L2_S_OFFSET) & L2_S_FRAME;
+
+ for (; size != 0;
+ va += L2_S_SIZE, pa += L2_S_SIZE, size -= L2_S_SIZE) {
+#ifndef ARM32_NEW_VM_LAYOUT
+ pte = (pt_entry_t *)
+ kernel_pt_lookup(pde[va >> L1_S_SHIFT] & L2_S_FRAME);
+#else
+ pte = (pt_entry_t *) kernel_pt_lookup(
+ pde[L1_IDX(va)] & L1_C_ADDR_MASK);
+#endif
+ if (pte == NULL)
+ panic("xscale_setup_minidata: can't find L2 table for "
+ "VA 0x%08lx", va);
+#ifndef ARM32_NEW_VM_LAYOUT
+ pte[(va >> PGSHIFT) & 0x3ff] =
+#else
+ pte[l2pte_index(va)] =
+#endif
+ L2_S_PROTO | pa | L2_S_PROT(PTE_KERNEL, VM_PROT_READ) |
+ L2_C | L2_XSCALE_T_TEX(TEX_XSCALE_X);
+ }
+
+ /*
+ * Configure the mini-data cache for write-back with
+ * read/write-allocate.
+ *
+ * NOTE: In order to reconfigure the mini-data cache, we must
+ * make sure it contains no valid data! In order to do that,
+ * we must issue a global data cache invalidate command!
+ *
+ * WE ASSUME WE ARE RUNNING UN-CACHED WHEN THIS ROUTINE IS CALLED!
+ * THIS IS VERY IMPORTANT!
+ */
+
+ /* Invalidate data and mini-data. */
+ __asm __volatile("mcr p15, 0, %0, c7, c6, 0" : : "r" (0));
+ __asm __volatile("mrc p15, 0, %0, c1, c0, 1" : "=r" (auxctl));
+ auxctl = (auxctl & ~XSCALE_AUXCTL_MD_MASK) | XSCALE_AUXCTL_MD_WB_RWA;
+ __asm __volatile("mcr p15, 0, %0, c1, c0, 1" : : "r" (auxctl));
+}
+
+/*
+ * Change the PTEs for the specified kernel mappings such that they
+ * will use the mini data cache instead of the main data cache.
+ */
+void
+pmap_uarea(vaddr_t va)
+{
+ struct l2_bucket *l2b;
+ pt_entry_t *ptep, *sptep, pte;
+ vaddr_t next_bucket, eva;
+
+#if (ARM_NMMUS > 1)
+ if (xscale_use_minidata == 0)
+ return;
+#endif
+
+ eva = va + USPACE;
+
+ while (va < eva) {
+ next_bucket = L2_NEXT_BUCKET(va);
+ if (next_bucket > eva)
+ next_bucket = eva;
+
+ l2b = pmap_get_l2_bucket(pmap_kernel(), va);
+ KDASSERT(l2b != NULL);
+
+ sptep = ptep = &l2b->l2b_kva[l2pte_index(va)];
+
+ while (va < next_bucket) {
+ pte = *ptep;
+ if (!l2pte_minidata(pte)) {
+ cpu_dcache_wbinv_range(va, PAGE_SIZE);
+ cpu_tlb_flushD_SE(va);
+ *ptep = pte & ~L2_B;
+ }
+ ptep++;
+ va += PAGE_SIZE;
+ }
+ PTE_SYNC_RANGE(sptep, (u_int)(ptep - sptep));
+ }
+ cpu_cpwait();
+}
+#endif /* ARM_MMU_XSCALE == 1 */
+
+#if defined(DDB)
+/*
+ * A couple of ddb-callable functions for dumping pmaps
+ */
+void pmap_dump_all(void);
+void pmap_dump(pmap_t);
+
+void
+pmap_dump_all(void)
+{
+ pmap_t pm;
+
+ LIST_FOREACH(pm, &pmap_pmaps, pm_list) {
+ if (pm == pmap_kernel())
+ continue;
+ pmap_dump(pm);
+ printf("\n");
+ }
+}
+
+static pt_entry_t ncptes[64];
+void pmap_dump_ncpg(pmap_t);
+
+void
+pmap_dump(pmap_t pm)
+{
+ struct l2_dtable *l2;
+ struct l2_bucket *l2b;
+ pt_entry_t *ptep, pte;
+ vaddr_t l2_va, l2b_va, va;
+ int i, j, k, occ, rows = 0;
+
+ if (pm == pmap_kernel())
+ printf("pmap_kernel (%p): ", pm);
+ else
+ printf("user pmap (%p): ", pm);
+
+ printf("domain %d, l1 at %p\n", pm->pm_domain, pm->pm_l1->l1_kva);
+
+ l2_va = 0;
+ for (i = 0; i < L2_SIZE; i++, l2_va += 0x01000000) {
+ l2 = pm->pm_l2[i];
+
+ if (l2 == NULL || l2->l2_occupancy == 0)
+ continue;
+
+ l2b_va = l2_va;
+ for (j = 0; j < L2_BUCKET_SIZE; j++, l2b_va += 0x00100000) {
+ l2b = &l2->l2_bucket[j];
+
+ if (l2b->l2b_occupancy == 0 || l2b->l2b_kva == NULL)
+ continue;
+
+ ptep = l2b->l2b_kva;
+
+ for (k = 0; k < 256 && ptep[k] == 0; k++)
+ ;
+
+ k &= ~63;
+ occ = l2b->l2b_occupancy;
+ va = l2b_va + (k * 4096);
+ for (; k < 256; k++, va += 0x1000) {
+ char ch = ' ';
+ if ((k % 64) == 0) {
+ if ((rows % 8) == 0) {
+ printf(
+" |0000 |8000 |10000 |18000 |20000 |28000 |30000 |38000\n");
+ }
+ printf("%08lx: ", va);
+ }
+
+ ncptes[k & 63] = 0;
+ pte = ptep[k];
+ if (pte == 0) {
+ ch = '.';
+ } else {
+ occ--;
+ switch (pte & 0x0c) {
+ case 0x00:
+ ch = 'D'; /* No cache No buff */
+ break;
+ case 0x04:
+ ch = 'B'; /* No cache buff */
+ break;
+ case 0x08:
+ if (pte & 0x40)
+ ch = 'm';
+ else
+ ch = 'C'; /* Cache No buff */
+ break;
+ case 0x0c:
+ ch = 'F'; /* Cache Buff */
+ break;
+ }
+
+ if ((pte & L2_S_PROT_U) == L2_S_PROT_U)
+ ch += 0x20;
+
+ if ((pte & 0xc) == 0)
+ ncptes[k & 63] = pte;
+ }
+
+ if ((k % 64) == 63) {
+ rows++;
+ printf("%c\n", ch);
+ pmap_dump_ncpg(pm);
+ if (occ == 0)
+ break;
+ } else
+ printf("%c", ch);
+ }
+ }
+ }
+}
+
+void
+pmap_dump_ncpg(pmap_t pm)
+{
+ struct vm_page *pg;
+ struct pv_entry *pv;
+ int i;
+
+ for (i = 0; i < 63; i++) {
+ if (ncptes[i] == 0)
+ continue;
+
+ pg = PHYS_TO_VM_PAGE(l2pte_pa(ncptes[i]));
+ if (pg == NULL)
+ continue;
+
+ printf(" pa 0x%08lx: krw %d kro %d urw %d uro %d\n",
+ pg->phys_addr,
+ pg->mdpage.krw_mappings, pg->mdpage.kro_mappings,
+ pg->mdpage.urw_mappings, pg->mdpage.uro_mappings);
+
+ for (pv = pg->mdpage.pvh_list; pv; pv = pv->pv_next) {
+ printf(" %c va 0x%08lx, flags 0x%x\n",
+ (pm == pv->pv_pmap) ? '*' : ' ',
+ pv->pv_va, pv->pv_flags);
+ }
+ }
+}
+#endif
diff --git a/sys/arch/arm/arm/process_machdep.c b/sys/arch/arm/arm/process_machdep.c
new file mode 100644
index 00000000000..b1ac0c9a97d
--- /dev/null
+++ b/sys/arch/arm/arm/process_machdep.c
@@ -0,0 +1,225 @@
+/* $OpenBSD: process_machdep.c,v 1.1 2004/02/01 05:09:48 drahn Exp $ */
+/* $NetBSD: process_machdep.c,v 1.11 2003/08/07 16:26:52 agc Exp $ */
+
+/*
+ * Copyright (c) 1993 The Regents of the University of California.
+ * All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * Jan-Simon Pendry.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * From:
+ * Id: procfs_i386.c,v 4.1 1993/12/17 10:47:45 jsp Rel
+ */
+
+/*
+ * Copyright (c) 1995 Frank Lancaster. All rights reserved.
+ * Copyright (c) 1995 Tools GmbH. All rights reserved.
+ * Copyright (c) 1995 Charles M. Hannum. All rights reserved.
+ * Copyright (c) 1993 Jan-Simon Pendry
+ * All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * Jan-Simon Pendry.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * From:
+ * Id: procfs_i386.c,v 4.1 1993/12/17 10:47:45 jsp Rel
+ */
+
+/*
+ * This file may seem a bit stylized, but that so that it's easier to port.
+ * Functions to be implemented here are:
+ *
+ * process_read_regs(proc, regs)
+ * Get the current user-visible register set from the process
+ * and copy it into the regs structure (<machine/reg.h>).
+ * The process is stopped at the time read_regs is called.
+ *
+ * process_write_regs(proc, regs)
+ * Update the current register set from the passed in regs
+ * structure. Take care to avoid clobbering special CPU
+ * registers or privileged bits in the PSL.
+ * The process is stopped at the time write_regs is called.
+ *
+ * process_sstep(proc, sstep)
+ * Arrange for the process to trap or not trap depending on sstep
+ * after executing a single instruction.
+ *
+ * process_set_pc(proc)
+ * Set the process's program counter.
+ */
+
+#include <sys/param.h>
+
+#include <sys/proc.h>
+#include <sys/ptrace.h>
+#include <sys/systm.h>
+#include <sys/user.h>
+
+#include <machine/frame.h>
+#include <machine/pcb.h>
+#include <machine/reg.h>
+
+#include <arm/armreg.h>
+
+#ifdef ARMFPE
+#include <arm/fpe-arm/armfpe.h>
+#endif
+
+static __inline struct trapframe *
+process_frame(struct proc *p)
+{
+
+ return p->p_addr->u_pcb.pcb_tf;
+}
+
+int
+process_read_regs(struct proc *p, struct reg *regs)
+{
+ struct trapframe *tf = process_frame(p);
+
+ KASSERT(tf != NULL);
+ bcopy((caddr_t)&tf->tf_r0, (caddr_t)regs->r, sizeof(regs->r));
+ regs->r_sp = tf->tf_usr_sp;
+ regs->r_lr = tf->tf_usr_lr;
+ regs->r_pc = tf->tf_pc;
+ regs->r_cpsr = tf->tf_spsr;
+
+#ifdef DIAGNOSTIC
+ if ((tf->tf_spsr & PSR_MODE) == PSR_USR32_MODE
+ && tf->tf_spsr & I32_bit)
+ panic("process_read_regs: Interrupts blocked in user process");
+#endif
+
+ return(0);
+}
+
+int
+process_read_fpregs(struct proc *p, struct fpreg *regs)
+{
+#ifdef ARMFPE
+ arm_fpe_getcontext(p, regs);
+ return(0);
+#else /* ARMFPE */
+ /* No hardware FP support */
+ memset(regs, 0, sizeof(struct fpreg));
+ return(0);
+#endif /* ARMFPE */
+}
+
+int
+process_write_regs(struct proc *p, struct reg *regs)
+{
+ struct trapframe *tf = process_frame(p);
+
+ KASSERT(tf != NULL);
+ bcopy((caddr_t)regs->r, (caddr_t)&tf->tf_r0, sizeof(regs->r));
+ tf->tf_usr_sp = regs->r_sp;
+ tf->tf_usr_lr = regs->r_lr;
+#ifdef __PROG32
+ tf->tf_pc = regs->r_pc;
+ tf->tf_spsr &= ~PSR_FLAGS;
+ tf->tf_spsr |= regs->r_cpsr & PSR_FLAGS;
+#ifdef DIAGNOSTIC
+ if ((tf->tf_spsr & PSR_MODE) == PSR_USR32_MODE
+ && tf->tf_spsr & I32_bit)
+ panic("process_write_regs: Interrupts blocked in user process");
+#endif
+#else /* __PROG26 */
+ if ((regs->r_pc & (R15_MODE | R15_IRQ_DISABLE | R15_FIQ_DISABLE)) != 0)
+ return EPERM;
+
+ tf->tf_r15 = regs->r_pc;
+#endif
+
+ return(0);
+}
+
+int
+process_write_fpregs(struct proc *p, struct fpreg *regs)
+{
+#ifdef ARMFPE
+ arm_fpe_setcontext(p, regs);
+ return(0);
+#else /* ARMFPE */
+ /* No hardware FP support */
+ return(0);
+#endif /* ARMFPE */
+}
+
+int
+process_sstep(struct proc *p, int sstep)
+{
+ /* XXX */
+ return 0;
+}
+
+int
+process_set_pc(struct proc *p, caddr_t addr)
+{
+ struct trapframe *tf = process_frame(p);
+
+ KASSERT(tf != NULL);
+#ifdef __PROG32
+ tf->tf_pc = (int)addr;
+#else /* __PROG26 */
+ /* Only set the PC, not the PSR */
+ if (((register_t)addr & R15_PC) != (register_t)addr)
+ return EINVAL;
+ tf->tf_r15 = (tf->tf_r15 & ~R15_PC) | (register_t)addr;
+#endif
+
+ return (0);
+}
diff --git a/sys/arch/arm/arm/procfs_machdep.c b/sys/arch/arm/arm/procfs_machdep.c
new file mode 100644
index 00000000000..ecde04d2802
--- /dev/null
+++ b/sys/arch/arm/arm/procfs_machdep.c
@@ -0,0 +1,23 @@
+/* $OpenBSD: procfs_machdep.c,v 1.1 2004/02/01 05:09:48 drahn Exp $ */
+/* $NetBSD: procfs_machdep.c,v 1.2 2003/07/15 00:24:39 lukem Exp $ */
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/mount.h>
+#include <sys/vnode.h>
+#include <miscfs/procfs/procfs.h>
+
+
+#if 0
+/*
+ * Linux-style /proc/cpuinfo.
+ * Only used when procfs is mounted with -o linux.
+ */
+int
+procfs_getcpuinfstr(char *buf, int *len)
+{
+ *len = 0;
+
+ return 0;
+}
+#endif
diff --git a/sys/arch/arm/arm/setcpsr.S b/sys/arch/arm/arm/setcpsr.S
new file mode 100644
index 00000000000..86129919ef1
--- /dev/null
+++ b/sys/arch/arm/arm/setcpsr.S
@@ -0,0 +1,79 @@
+/* $OpenBSD: setcpsr.S,v 1.1 2004/02/01 05:09:48 drahn Exp $ */
+/*^I$NetBSD: setcpsr.S,v 1.2 2002/08/15 01:37:02 briggs Exp $^I*/$
+
+/*
+ * Copyright (c) 1994 Mark Brinicombe.
+ * Copyright (c) 1994 Brini.
+ * All rights reserved.
+ *
+ * This code is derived from software written for Brini by Mark Brinicombe
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by Brini.
+ * 4. The name of the company nor the name of the author may be used to
+ * endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY BRINI ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL BRINI OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * RiscBSD kernel project
+ *
+ * setcpsr.S
+ *
+ * Miscellaneous routines to play with the CPSR register
+ *
+ * Eventually this routine can be inline assembly.
+ *
+ * Created : 12/09/94
+ *
+ * Based of kate/display/setcpsr.s
+ */
+
+#include <machine/asm.h>
+
+/* Sets and clears bits in the CPSR register
+ *
+ * r0 - bic mask
+ * r1 - eor mask
+ */
+
+ENTRY_NP(SetCPSR)
+ mrs r3, cpsr /* Set the CPSR */
+ bic r2, r3, r0
+ eor r2, r2, r1
+ msr cpsr_all, r2
+
+ mov r0, r3 /* Return the old CPSR */
+
+ mov pc, lr
+
+
+/* Gets the CPSR register
+ *
+ * Returns the CPSR in r0
+ */
+
+ENTRY_NP(GetCPSR)
+ mrs r0, cpsr /* Get the CPSR */
+
+ mov pc, lr
+
diff --git a/sys/arch/arm/arm/setstack.S b/sys/arch/arm/arm/setstack.S
new file mode 100644
index 00000000000..0d3925db9bb
--- /dev/null
+++ b/sys/arch/arm/arm/setstack.S
@@ -0,0 +1,93 @@
+/* $OpenBSD: setstack.S,v 1.1 2004/02/01 05:09:48 drahn Exp $ */
+/*^I$NetBSD: setstack.S,v 1.2 2002/08/15 01:37:02 briggs Exp $^I*/$
+
+/*
+ * Copyright (c) 1994 Mark Brinicombe.
+ * Copyright (c) 1994 Brini.
+ * All rights reserved.
+ *
+ * This code is derived from software written for Brini by Mark Brinicombe
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by Brini.
+ * 4. The name of the company nor the name of the author may be used to
+ * endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY BRINI ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL BRINI OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * RiscBSD kernel project
+ *
+ * setstack.S
+ *
+ * Miscellaneous routine to play with the stack pointer in different CPU modes
+ *
+ * Eventually this routine can be inline assembly.
+ *
+ * Created : 17/09/94
+ *
+ * Based of kate/display/setstack.s
+ */
+
+#include <machine/cpu.h>
+#include <machine/asm.h>
+
+/* To set the stack pointer for a particular mode we must switch
+ * to that mode update the banked r13 and then switch back.
+ * This routine provides an easy way of doing this for any mode
+ *
+ * r0 = CPU mode
+ * r1 = stackptr
+ */
+
+ENTRY(set_stackptr)
+ mrs r3, cpsr /* Switch to the appropriate mode */
+ bic r2, r3, #(PSR_MODE)
+ orr r2, r2, r0
+ msr cpsr_all, r2
+
+ mov sp, r1 /* Set the stack pointer */
+
+ msr cpsr_all, r3 /* Restore the old mode */
+
+ mov pc, lr /* Exit */
+
+/* To get the stack pointer for a particular mode we must switch
+ * to that mode copy the banked r13 and then switch back.
+ * This routine provides an easy way of doing this for any mode
+ *
+ * r0 = CPU mode
+ */
+
+ENTRY(get_stackptr)
+ mrs r3, cpsr /* Switch to the appropriate mode */
+ bic r2, r3, #(PSR_MODE)
+ orr r2, r2, r0
+ msr cpsr_all, r2
+
+ mov r0, sp /* Set the stack pointer */
+
+ msr cpsr_all, r3 /* Restore the old mode */
+
+ mov pc, lr /* Exit */
+
+/* End of setstack.S */
diff --git a/sys/arch/arm/arm/sig_machdep.c b/sys/arch/arm/arm/sig_machdep.c
new file mode 100644
index 00000000000..cf5756ba210
--- /dev/null
+++ b/sys/arch/arm/arm/sig_machdep.c
@@ -0,0 +1,384 @@
+/* $OpenBSD: sig_machdep.c,v 1.1 2004/02/01 05:09:48 drahn Exp $ */
+/* $NetBSD: sig_machdep.c,v 1.22 2003/10/08 00:28:41 thorpej Exp $ */
+
+/*
+ * Copyright (c) 1994-1998 Mark Brinicombe.
+ * Copyright (c) 1994 Brini.
+ * All rights reserved.
+ *
+ * This code is derived from software written for Brini by Mark Brinicombe
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by Mark Brinicombe
+ * for the NetBSD Project.
+ * 4. The name of the company nor the name of the author may be used to
+ * endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * Machine dependant functions for kernel setup
+ *
+ * Created : 17/09/94
+ */
+
+#include <sys/param.h>
+
+#include <sys/mount.h> /* XXX only needed by syscallargs.h */
+#include <sys/proc.h>
+#include <sys/signal.h>
+#include <sys/signalvar.h>
+#include <sys/syscallargs.h>
+#include <sys/systm.h>
+#include <sys/user.h>
+
+#include <arm/armreg.h>
+
+#include <machine/cpu.h>
+#include <machine/frame.h>
+#include <machine/pcb.h>
+#ifndef acorn26
+#include <arm/cpufunc.h>
+#endif
+
+static __inline struct trapframe *
+process_frame(struct proc *p)
+{
+
+ return p->p_addr->u_pcb.pcb_tf;
+}
+
+void *getframe(struct proc *p, int sig, int *onstack);
+
+
+/*
+ * Send an interrupt to process.
+ *
+ * Stack is set up to allow sigcode stored
+ * in u. to call routine, followed by kcall
+ * to sigreturn routine below. After sigreturn
+ * resets the signal mask, the stack, and the
+ * frame pointer, it returns to the user specified pc.
+ */
+void
+sendsig(sig_t catcher, int sig, int returnmask, u_long code, int type,
+ union sigval val)
+{
+ struct proc *p = curproc;
+ struct trapframe *tf;
+ struct sigframe *fp, frame;
+ struct sigacts *psp = p->p_sigacts;
+ int oonstack = psp->ps_sigstk.ss_flags & SS_ONSTACK;
+ int onstack = 0;
+
+ tf = process_frame(p);
+
+ /* Do we need to jump onto the signal stack? */
+
+ /* Allocate space for the signal handler context. */
+ if ((psp->ps_flags & SAS_ALTSTACK) && !oonstack &&
+ (psp->ps_sigonstack & sigmask(sig))) {
+ onstack = 1;
+ fp = (struct sigframe *)((caddr_t)psp->ps_sigstk.ss_sp +
+ psp->ps_sigstk.ss_size);
+ } else
+ fp = (struct sigframe *)tf->tf_usr_sp;
+ /* make room on the stack */
+ fp--;
+
+ /* make the stack aligned */
+ fp = (void *)STACKALIGN(fp);
+
+ /* Build stack frame for signal trampoline. */
+ frame.sf_signum = sig;
+ frame.sf_sip = NULL;
+ frame.sf_scp = &fp->sf_sc;
+ frame.sf_handler = catcher;
+
+ /* Save register context. */
+ frame.sf_sc.sc_r0 = tf->tf_r0;
+ frame.sf_sc.sc_r1 = tf->tf_r1;
+ frame.sf_sc.sc_r2 = tf->tf_r2;
+ frame.sf_sc.sc_r3 = tf->tf_r3;
+ frame.sf_sc.sc_r4 = tf->tf_r4;
+ frame.sf_sc.sc_r5 = tf->tf_r5;
+ frame.sf_sc.sc_r6 = tf->tf_r6;
+ frame.sf_sc.sc_r7 = tf->tf_r7;
+ frame.sf_sc.sc_r8 = tf->tf_r8;
+ frame.sf_sc.sc_r9 = tf->tf_r9;
+ frame.sf_sc.sc_r10 = tf->tf_r10;
+ frame.sf_sc.sc_r11 = tf->tf_r11;
+ frame.sf_sc.sc_r12 = tf->tf_r12;
+ frame.sf_sc.sc_usr_sp = tf->tf_usr_sp;
+ frame.sf_sc.sc_usr_lr = tf->tf_usr_lr;
+ frame.sf_sc.sc_svc_lr = tf->tf_svc_lr;
+ frame.sf_sc.sc_pc = tf->tf_pc;
+ frame.sf_sc.sc_spsr = tf->tf_spsr;
+
+ /* Save signal stack. */
+ frame.sf_sc.sc_onstack = psp->ps_sigstk.ss_flags & SS_ONSTACK;
+
+ /* Save signal mask. */
+ frame.sf_sc.sc_mask = returnmask;
+
+ if (psp->ps_siginfo & sigmask(sig)) {
+ frame.sf_sip = &fp->sf_si;
+ initsiginfo(&frame.sf_si, sig, code, type, val);
+ }
+
+ if (copyout(&frame, fp, sizeof(frame)) != 0) {
+ /*
+ * Process has trashed its stack; give it an illegal
+ * instruction to halt it in its tracks.
+ */
+ sigexit(p, SIGILL);
+ /* NOTREACHED */
+ }
+
+ /*
+ * Build context to run handler in. We invoke the handler
+ * directly, only returning via the trampoline. Note the
+ * trampoline version numbers are coordinated with machine-
+ * dependent code in libc.
+ */
+
+ /*
+ * this was all in the switch below, seemed daft to duplicate it, if
+ * we do a new trampoline version it might change then
+ */
+ tf->tf_r0 = sig;
+ tf->tf_r1 = code;
+ tf->tf_r2 = (int)frame.sf_scp;
+ tf->tf_pc = (int)frame.sf_handler;
+ tf->tf_usr_sp = (int)fp;
+
+ tf->tf_usr_lr = (int)p->p_sigcode;
+ /* XXX This should not be needed. */
+ cpu_icache_sync_all();
+
+ /* Remember that we're now on the signal stack. */
+ if (onstack)
+ psp->ps_sigstk.ss_flags |= SS_ONSTACK;
+}
+
+#if 0
+void *
+getframe(struct proc *p, int sig, int *onstack)
+{
+ struct sigctx *ctx = &p->p_sigctx;
+ struct trapframe *tf = process_frame(l);
+
+ /* Do we need to jump onto the signal stack? */
+ *onstack = (ctx->ps_sigstk.ss_flags & (SS_DISABLE | SS_ONSTACK)) == 0
+ && (SIGACTION(p, sig).sa_flags & SA_ONSTACK) != 0;
+ if (*onstack)
+ return (char *)ctx->ps_sigstk.ss_sp + ctx->ps_sigstk.ss_size;
+ return (void *)tf->tf_usr_sp;
+}
+#endif
+
+
+/*
+ * System call to cleanup state after a signal
+ * has been taken. Reset signal mask and
+ * stack state from context left by sendsig (above).
+ * Return to previous pc and psl as specified by
+ * context left by sendsig. Check carefully to
+ * make sure that the user has not modified the
+ * psr to gain improper privileges or to cause
+ * a machine fault.
+ */
+
+int
+sys_sigreturn(struct proc *p, void *v, register_t *retval)
+{
+ struct sys_sigreturn_args /* {
+ syscallarg(struct sigcontext *) sigcntxp;
+ } */ *uap = v;
+ struct sigcontext *scp, context;
+ struct trapframe *tf;
+ struct sigacts *psp = p->p_sigacts;
+
+ /*
+ * we do a rather scary test in userland
+ */
+ if (v == NULL)
+ return (EFAULT);
+
+ /*
+ * The trampoline code hands us the context.
+ * It is unsafe to keep track of it ourselves, in the event that a
+ * program jumps out of a signal handler.
+ */
+ scp = SCARG(uap, sigcntxp);
+ if (copyin((caddr_t)scp, &context, sizeof(*scp)) != 0)
+ return (EFAULT);
+
+ /*
+ * Make sure the processor mode has not been tampered with and
+ * interrupts have not been disabled.
+ */
+#ifdef __PROG32
+ if ((context.sc_spsr & PSR_MODE) != PSR_USR32_MODE ||
+ (context.sc_spsr & (I32_bit | F32_bit)) != 0)
+ return (EINVAL);
+#else /* __PROG26 */
+ if ((context.sc_pc & R15_MODE) != R15_MODE_USR ||
+ (context.sc_pc & (R15_IRQ_DISABLE | R15_FIQ_DISABLE)) != 0)
+ return EINVAL;
+#endif
+
+ /* Restore register context. */
+ tf = process_frame(p);
+ tf->tf_r0 = context.sc_r0;
+ tf->tf_r1 = context.sc_r1;
+ tf->tf_r2 = context.sc_r2;
+ tf->tf_r3 = context.sc_r3;
+ tf->tf_r4 = context.sc_r4;
+ tf->tf_r5 = context.sc_r5;
+ tf->tf_r6 = context.sc_r6;
+ tf->tf_r7 = context.sc_r7;
+ tf->tf_r8 = context.sc_r8;
+ tf->tf_r9 = context.sc_r9;
+ tf->tf_r10 = context.sc_r10;
+ tf->tf_r11 = context.sc_r11;
+ tf->tf_r12 = context.sc_r12;
+ tf->tf_usr_sp = context.sc_usr_sp;
+ tf->tf_usr_lr = context.sc_usr_lr;
+ tf->tf_svc_lr = context.sc_svc_lr;
+ tf->tf_pc = context.sc_pc;
+ tf->tf_spsr = context.sc_spsr;
+
+ /* Restore signal stack. */
+ if (context.sc_onstack & SS_ONSTACK)
+ psp->ps_sigstk.ss_flags |= SS_ONSTACK;
+ else
+ psp->ps_sigstk.ss_flags &= ~SS_ONSTACK;
+
+ /* Restore signal mask. */
+#if 0
+ (void) sigprocmask1(p, SIG_SETMASK, &context.sc_mask, 0);
+#else
+ p->p_sigmask = context.sc_mask & ~sigcantmask;
+#endif
+
+ return (EJUSTRETURN);
+}
+
+#if 0
+void
+cpu_getmcontext(p, mcp, flags)
+ struct proc *p;
+ mcontext_t *mcp;
+ unsigned int *flags;
+{
+ struct trapframe *tf = process_frame(p);
+ __greg_t *gr = mcp->__gregs;
+ __greg_t ras_pc;
+
+ /* Save General Register context. */
+ gr[_REG_R0] = tf->tf_r0;
+ gr[_REG_R1] = tf->tf_r1;
+ gr[_REG_R2] = tf->tf_r2;
+ gr[_REG_R3] = tf->tf_r3;
+ gr[_REG_R4] = tf->tf_r4;
+ gr[_REG_R5] = tf->tf_r5;
+ gr[_REG_R6] = tf->tf_r6;
+ gr[_REG_R7] = tf->tf_r7;
+ gr[_REG_R8] = tf->tf_r8;
+ gr[_REG_R9] = tf->tf_r9;
+ gr[_REG_R10] = tf->tf_r10;
+ gr[_REG_R11] = tf->tf_r11;
+ gr[_REG_R12] = tf->tf_r12;
+ gr[_REG_SP] = tf->tf_usr_sp;
+ gr[_REG_LR] = tf->tf_usr_lr;
+ gr[_REG_PC] = tf->tf_pc;
+ gr[_REG_CPSR] = tf->tf_spsr;
+
+ if ((ras_pc = (__greg_t)ras_lookup(l->l_proc,
+ (caddr_t) gr[_REG_PC])) != -1)
+ gr[_REG_PC] = ras_pc;
+
+ *flags |= _UC_CPU;
+
+#ifdef ARMFPE
+ /* Save Floating Point Register context. */
+ arm_fpe_getcontext(p, (struct fpreg *)(void *)&mcp->fpregs);
+ *flags |= _UC_FPU;
+#endif
+}
+
+int
+cpu_setmcontext(p, mcp, flags)
+ struct proc *p;
+ const mcontext_t *mcp;
+ unsigned int flags;
+{
+ struct trapframe *tf = process_frame(l);
+ __greg_t *gr = mcp->__gregs;
+
+ if ((flags & _UC_CPU) != 0) {
+ /* Restore General Register context. */
+ /* Make sure the processor mode has not been tampered with. */
+#ifdef PROG32
+ if ((gr[_REG_CPSR] & PSR_MODE) != PSR_USR32_MODE ||
+ (gr[_REG_CPSR] & (I32_bit | F32_bit)) != 0)
+ return (EINVAL);
+#else /* PROG26 */
+ if ((gr[_REG_PC] & R15_MODE) != R15_MODE_USR ||
+ (gr[_REG_PC] & (R15_IRQ_DISABLE | R15_FIQ_DISABLE)) != 0)
+ return (EINVAL);
+#endif
+
+ tf->tf_r0 = gr[_REG_R0];
+ tf->tf_r1 = gr[_REG_R1];
+ tf->tf_r2 = gr[_REG_R2];
+ tf->tf_r3 = gr[_REG_R3];
+ tf->tf_r4 = gr[_REG_R4];
+ tf->tf_r5 = gr[_REG_R5];
+ tf->tf_r6 = gr[_REG_R6];
+ tf->tf_r7 = gr[_REG_R7];
+ tf->tf_r8 = gr[_REG_R8];
+ tf->tf_r9 = gr[_REG_R9];
+ tf->tf_r10 = gr[_REG_R10];
+ tf->tf_r11 = gr[_REG_R11];
+ tf->tf_r12 = gr[_REG_R12];
+ tf->tf_usr_sp = gr[_REG_SP];
+ tf->tf_usr_lr = gr[_REG_LR];
+ tf->tf_pc = gr[_REG_PC];
+ tf->tf_spsr = gr[_REG_CPSR];
+ }
+
+#ifdef ARMFPE
+ if ((flags & _UC_FPU) != 0) {
+ /* Restore Floating Point Register context. */
+ arm_fpe_setcontext(p, (struct fpreg *)(void *)&mcp->__fpregs);
+ }
+#endif
+ if (flags & _UC_SETSTACK)
+ l->l_proc->p_sigctx.ps_sigstk.ss_flags |= SS_ONSTACK;
+ if (flags & _UC_CLRSTACK)
+ l->l_proc->p_sigctx.ps_sigstk.ss_flags &= ~SS_ONSTACK;
+
+ return (0);
+}
+#endif
diff --git a/sys/arch/arm/arm/sigcode.S b/sys/arch/arm/arm/sigcode.S
new file mode 100644
index 00000000000..c46cc6e12d0
--- /dev/null
+++ b/sys/arch/arm/arm/sigcode.S
@@ -0,0 +1,62 @@
+/* $OpenBSD: sigcode.S,v 1.1 2004/02/01 05:09:48 drahn Exp $ */
+/* $NetBSD: sigcode.S,v 1.6 2003/10/05 19:44:58 matt Exp $ */
+
+/*
+ * Copyright (C) 1994-1997 Mark Brinicombe
+ * Copyright (C) 1994 Brini
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by Brini.
+ * 4. The name of Brini may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY BRINI ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL BRINI BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <machine/asm.h>
+
+#include "assym.h"
+#include <sys/syscall.h>
+
+/*
+ * Signal trampoline;
+ */
+
+ENTRY_NP(sigcode)
+/*
+ * The kernel arranges for the handler to be invoked directly. This
+ * trampoline is used only to return from the signal.
+ *
+ * The stack pointer points to the saved sigcontext.
+ */
+/* mov r0, sp */
+ add r0, sp, #SIGF_SC
+ swi SYS_sigreturn
+
+/* Well if that failed we better exit quick ! */
+
+ swi SYS_exit
+ b . - 8
+
+ .align 0
+ .global _C_LABEL(esigcode)
+_C_LABEL(esigcode):
diff --git a/sys/arch/arm/arm/softintr.c b/sys/arch/arm/arm/softintr.c
new file mode 100644
index 00000000000..bf6be9b552c
--- /dev/null
+++ b/sys/arch/arm/arm/softintr.c
@@ -0,0 +1,207 @@
+/* $OpenBSD: softintr.c,v 1.1 2004/02/01 05:09:48 drahn Exp $ */
+/* $NetBSD: softintr.c,v 1.2 2003/07/15 00:24:39 lukem Exp $ */
+
+/*
+ * Copyright (c) 2001 Wasabi Systems, Inc.
+ * All rights reserved.
+ *
+ * Written by Jason R. Thorpe for Wasabi Systems, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed for the NetBSD Project by
+ * Wasabi Systems, Inc.
+ * 4. The name of Wasabi Systems, Inc. may not be used to endorse
+ * or promote products derived from this software without specific prior
+ * written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <sys/param.h>
+#include <sys/malloc.h>
+
+/* XXX Network interrupts should be converted to new softintrs. */
+#include <net/netisr.h>
+
+#include <uvm/uvm_extern.h>
+
+#include <machine/intr.h>
+
+struct soft_intrq soft_intrq[SI_NQUEUES];
+
+struct soft_intrhand *softnet_intrhand;
+
+void netintr(void);
+
+/*
+ * softintr_init:
+ *
+ * Initialize the software interrupt system.
+ */
+void
+softintr_init(void)
+{
+#if 0
+ static const char *softintr_names[] = SI_QUEUENAMES;
+#endif
+ struct soft_intrq *siq;
+ int i;
+
+ for (i = 0; i < SI_NQUEUES; i++) {
+ siq = &soft_intrq[i];
+ TAILQ_INIT(&siq->siq_list);
+#if 0
+ evcnt_attach_dynamic(&siq->siq_evcnt, EVCNT_TYPE_INTR,
+ NULL, "soft", softintr_names[i]);
+#endif
+ siq->siq_si = i;
+ }
+
+ /* XXX Establish legacy software interrupt handlers. */
+ softnet_intrhand = softintr_establish(IPL_SOFTNET,
+ (void (*)(void *))netintr, NULL);
+
+ assert(softnet_intrhand != NULL);
+}
+
+/*
+ * softintr_dispatch:
+ *
+ * Process pending software interrupts on the specified queue.
+ *
+ * NOTE: We must already be at the correct interrupt priority level.
+ */
+void
+softintr_dispatch(int si)
+{
+ struct soft_intrq *siq = &soft_intrq[si];
+ struct soft_intrhand *sih;
+ int oldirqstate;
+
+ siq->siq_evcnt.ev_count++;
+ for (;;) {
+ oldirqstate = disable_interrupts(I32_bit);
+ sih = TAILQ_FIRST(&siq->siq_list);
+ if (sih == NULL) {
+ restore_interrupts(oldirqstate);
+ break;
+ }
+
+ TAILQ_REMOVE(&siq->siq_list, sih, sih_list);
+ sih->sih_pending = 0;
+
+ uvmexp.softs++;
+
+ restore_interrupts(oldirqstate);
+
+ (*sih->sih_func)(sih->sih_arg);
+ }
+}
+
+/*
+ * softintr_establish: [interface]
+ *
+ * Register a software interrupt handler.
+ */
+void *
+softintr_establish(int ipl, void (*func)(void *), void *arg)
+{
+ struct soft_intrhand *sih;
+ int si;
+
+ switch (ipl) {
+ case IPL_SOFT:
+ si = SI_SOFT;
+ break;
+
+ case IPL_SOFTCLOCK:
+ si = SI_SOFTCLOCK;
+ break;
+
+ case IPL_SOFTNET:
+ si = SI_SOFTNET;
+ break;
+
+ case IPL_TTY:
+ case IPL_SOFTSERIAL:
+ si = SI_SOFTSERIAL;
+ break;
+
+ default:
+ panic("softintr_establish: unknown soft IPL %d", ipl);
+ }
+
+ sih = malloc(sizeof(*sih), M_DEVBUF, M_NOWAIT);
+ if (__predict_true(sih != NULL)) {
+ sih->sih_func = func;
+ sih->sih_arg = arg;
+ sih->sih_siq = &soft_intrq[si];
+ sih->sih_pending = 0;
+ }
+printf("softintr_establish ipl 0x%x si %d\n", ipl, si);
+ return (sih);
+}
+
+/*
+ * softintr_disestablish: [interface]
+ *
+ * Unregister a software interrupt handler.
+ */
+void
+softintr_disestablish(void *arg)
+{
+ struct soft_intrhand *sih = arg;
+ struct soft_intrq *siq = sih->sih_siq;
+ int oldirqstate;
+
+ oldirqstate = disable_interrupts(I32_bit);
+ if (sih->sih_pending) {
+ TAILQ_REMOVE(&siq->siq_list, sih, sih_list);
+ sih->sih_pending = 0;
+ }
+ restore_interrupts(oldirqstate);
+
+ free(sih, M_DEVBUF);
+}
+
+int netisr;
+
+void
+netintr(void)
+{
+ int n, s;
+
+ s = splhigh();
+ n = netisr;
+ netisr = 0;
+ splx(s);
+
+#define DONETISR(bit, fn) \
+ do { \
+ if (n & (1 << (bit))) \
+ fn(); \
+ } while (/*CONSTCOND*/0)
+
+#include <net/netisr_dispatch.h>
+
+#undef DONETISR
+}
diff --git a/sys/arch/arm/arm/stubs.c b/sys/arch/arm/arm/stubs.c
new file mode 100644
index 00000000000..29996d0273a
--- /dev/null
+++ b/sys/arch/arm/arm/stubs.c
@@ -0,0 +1,215 @@
+/* $OpenBSD: stubs.c,v 1.1 2004/02/01 05:09:48 drahn Exp $ */
+/*^I$NetBSD: stubs.c,v 1.14 2003/07/15 00:24:42 lukem Exp $^I*/$
+
+/*
+ * Copyright (c) 1994-1998 Mark Brinicombe.
+ * Copyright (c) 1994 Brini.
+ * All rights reserved.
+ *
+ * This code is derived from software written for Brini by Mark Brinicombe
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by Mark Brinicombe
+ * for the NetBSD Project.
+ * 4. The name of the company nor the name of the author may be used to
+ * endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * Routines that are temporary or do not have a home yet.
+ *
+ * Created : 17/09/94
+ */
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/errno.h>
+#include <sys/proc.h>
+#include <sys/conf.h>
+#include <sys/msgbuf.h>
+#include <uvm/uvm_extern.h>
+#include <machine/cpu.h>
+#include <machine/intr.h>
+#include <machine/bootconfig.h>
+#include <machine/pcb.h>
+#include <arm/machdep.h>
+
+extern dev_t dumpdev;
+
+/*
+ * These variables are needed by /sbin/savecore
+ */
+u_int32_t dumpmag = 0x8fca0101; /* magic number */
+int dumpsize = 0; /* pages */
+long dumplo = 0; /* blocks */
+
+struct pcb dumppcb;
+
+/*
+ * This is called by main to set dumplo and dumpsize.
+ * Dumps always skip the first CLBYTES of disk space
+ * in case there might be a disk label stored there.
+ * If there is extra space, put dump at the end to
+ * reduce the chance that swapping trashes it.
+ */
+
+void dumpconf(void);
+
+void
+dumpconf()
+{
+ const struct bdevsw *bdev;
+ int nblks; /* size of dump area */
+
+ if (dumpdev == NODEV)
+ return;
+ bdev = bdevsw_lookup(dumpdev);
+ if (bdev == NULL)
+ panic("dumpconf: bad dumpdev=0x%x", dumpdev);
+ if (bdev->d_psize == NULL)
+ return;
+ nblks = (*bdev->d_psize)(dumpdev);
+ if (nblks <= ctod(1))
+ return;
+
+ dumpsize = physmem;
+
+ /* Always skip the first CLBYTES, in case there is a label there. */
+ if (dumplo < ctod(1))
+ dumplo = ctod(1);
+
+ /* Put dump at end of partition, and make it fit. */
+ if (dumpsize > dtoc(nblks - dumplo))
+ dumpsize = dtoc(nblks - dumplo);
+ if (dumplo < nblks - ctod(dumpsize))
+ dumplo = nblks - ctod(dumpsize);
+}
+
+/* This should be moved to machdep.c */
+
+extern char *memhook; /* XXX */
+
+/*
+ * Doadump comes here after turning off memory management and
+ * getting on the dump stack, either when called above, or by
+ * the auto-restart code.
+ */
+
+void
+dumpsys()
+{
+ const struct bdevsw *bdev;
+ daddr_t blkno;
+ int psize;
+ int error;
+ int addr;
+ int block;
+ int len;
+ vaddr_t dumpspace;
+
+ /* Save registers. */
+ savectx(&dumppcb);
+ /* flush everything out of caches */
+ cpu_dcache_wbinv_all();
+
+ if (dumpdev == NODEV)
+ return;
+ if (dumpsize == 0) {
+ dumpconf();
+ if (dumpsize == 0)
+ return;
+ }
+ if (dumplo <= 0) {
+ printf("\ndump to dev %u,%u not possible\n", major(dumpdev),
+ minor(dumpdev));
+ return;
+ }
+ printf("\ndumping to dev %u,%u offset %ld\n", major(dumpdev),
+ minor(dumpdev), dumplo);
+
+ blkno = dumplo;
+ dumpspace = (vaddr_t) memhook;
+
+ bdev = bdevsw_lookup(dumpdev);
+ if (bdev == NULL || bdev->d_psize == NULL)
+ return;
+ psize = (*bdev->d_psize)(dumpdev);
+ printf("dump ");
+ if (psize == -1) {
+ printf("area unavailable\n");
+ return;
+ }
+
+ error = 0;
+ len = 0;
+
+ for (block = 0; block < bootconfig.dramblocks && error == 0; ++block) {
+ addr = bootconfig.dram[block].address;
+ for (;addr < (bootconfig.dram[block].address
+ + (bootconfig.dram[block].pages * PAGE_SIZE));
+ addr += PAGE_SIZE) {
+ if ((len % (1024*1024)) == 0)
+ printf("%d ", len / (1024*1024));
+ pmap_kenter_pa(dumpspace, addr, VM_PROT_READ);
+ pmap_update(pmap_kernel());
+
+ error = (*bdev->d_dump)(dumpdev,
+ blkno, (caddr_t) dumpspace, PAGE_SIZE);
+ pmap_kremove(dumpspace, PAGE_SIZE);
+ pmap_update(pmap_kernel());
+ if (error) break;
+ blkno += btodb(PAGE_SIZE);
+ len += PAGE_SIZE;
+ }
+ }
+
+ switch (error) {
+ case ENXIO:
+ printf("device bad\n");
+ break;
+
+ case EFAULT:
+ printf("device not ready\n");
+ break;
+
+ case EINVAL:
+ printf("area improper\n");
+ break;
+
+ case EIO:
+ printf("i/o error\n");
+ break;
+
+ case EINTR:
+ printf("aborted from console\n");
+ break;
+
+ default:
+ printf("succeeded\n");
+ break;
+ }
+ printf("\n\n");
+ delay(1000000);
+}
+
+/* End of stubs.c */
diff --git a/sys/arch/arm/arm/sys_machdep.c b/sys/arch/arm/arm/sys_machdep.c
new file mode 100644
index 00000000000..3b08e677768
--- /dev/null
+++ b/sys/arch/arm/arm/sys_machdep.c
@@ -0,0 +1,119 @@
+/* $OpenBSD: sys_machdep.c,v 1.1 2004/02/01 05:09:48 drahn Exp $ */
+/*^I$NetBSD: sys_machdep.c,v 1.6 2003/07/15 00:24:42 lukem Exp $^I*/$
+
+/*
+ * Copyright (c) 1995-1997 Mark Brinicombe.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by Mark Brinicombe
+ * 4. The name of the company nor the name of the author may be used to
+ * endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * RiscBSD kernel project
+ *
+ * sys_machdep.c
+ *
+ * Machine dependant syscalls
+ *
+ * Created : 10/01/96
+ */
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/proc.h>
+#include <sys/mbuf.h>
+#include <sys/mount.h>
+#include <uvm/uvm_extern.h>
+#include <sys/sysctl.h>
+#include <sys/syscallargs.h>
+
+#include <machine/sysarch.h>
+
+/* Prototypes */
+static int arm32_sync_icache __P((struct proc *, char *, register_t *));
+static int arm32_drain_writebuf __P((struct proc *, char *, register_t *));
+
+static int
+arm32_sync_icache(p, args, retval)
+ struct proc *p;
+ char *args;
+ register_t *retval;
+{
+ struct arm_sync_icache_args ua;
+ int error;
+
+ if ((error = copyin(args, &ua, sizeof(ua))) != 0)
+ return (error);
+
+ cpu_icache_sync_range(ua.addr, ua.len);
+
+ *retval = 0;
+ return(0);
+}
+
+static int
+arm32_drain_writebuf(p, args, retval)
+ struct proc *p;
+ char *args;
+ register_t *retval;
+{
+ /* No args. */
+
+ cpu_drain_writebuf();
+
+ *retval = 0;
+ return(0);
+}
+
+int
+sys_sysarch(p, v, retval)
+ struct proc *p;
+ void *v;
+ register_t *retval;
+{
+ struct sys_sysarch_args /* {
+ syscallarg(int) op;
+ syscallarg(void *) parms;
+ } */ *uap = v;
+ int error = 0;
+
+ switch(SCARG(uap, op)) {
+ case ARM_SYNC_ICACHE :
+ error = arm32_sync_icache(p, SCARG(uap, parms), retval);
+ break;
+
+ case ARM_DRAIN_WRITEBUF :
+ error = arm32_drain_writebuf(p, SCARG(uap, parms), retval);
+ break;
+
+ default:
+ error = EINVAL;
+ break;
+ }
+ return (error);
+}
+
+/* End of sys_machdep.c */
diff --git a/sys/arch/arm/arm/syscall.c b/sys/arch/arm/arm/syscall.c
new file mode 100644
index 00000000000..57c3f1a7756
--- /dev/null
+++ b/sys/arch/arm/arm/syscall.c
@@ -0,0 +1,492 @@
+/* $OpenBSD: syscall.c,v 1.1 2004/02/01 05:09:48 drahn Exp $ */
+/* $NetBSD: syscall.c,v 1.24 2003/11/14 19:03:17 scw Exp $ */
+
+/*-
+ * Copyright (c) 2000, 2003 The NetBSD Foundation, Inc.
+ * All rights reserved.
+ *
+ * This code is derived from software contributed to The NetBSD Foundation
+ * by Charles M. Hannum.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the NetBSD
+ * Foundation, Inc. and its contributors.
+ * 4. Neither the name of The NetBSD Foundation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
+ * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * Copyright (c) 1994-1998 Mark Brinicombe.
+ * Copyright (c) 1994 Brini.
+ * All rights reserved.
+ *
+ * This code is derived from software written for Brini by Mark Brinicombe
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by Mark Brinicombe
+ * for the NetBSD Project.
+ * 4. The name of the company nor the name of the author may be used to
+ * endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * syscall entry handling
+ *
+ * Created : 09/11/94
+ */
+
+#include <sys/param.h>
+
+#include <sys/device.h>
+#include <sys/errno.h>
+#include <sys/kernel.h>
+#include <sys/reboot.h>
+#include <sys/signalvar.h>
+#include <sys/syscall.h>
+#include <sys/systm.h>
+#include <sys/user.h>
+#ifdef KTRACE
+#include <sys/ktrace.h>
+#endif
+#ifdef SYSTRACE
+#include <sys/systrace.h>
+#endif
+
+#include <uvm/uvm_extern.h>
+
+#include <machine/cpu.h>
+#include <machine/frame.h>
+#include <machine/pcb.h>
+#include <arm/swi.h>
+
+#ifdef acorn26
+#include <machine/machdep.h>
+#endif
+
+#define MAXARGS 8
+
+void syscall_intern(struct proc *);
+void syscall_plain(struct trapframe *, struct proc *, u_int32_t);
+void syscall_fancy(struct trapframe *, struct proc *, u_int32_t);
+
+void
+swi_handler(trapframe_t *frame)
+{
+ struct proc *p = curproc;
+ u_int32_t insn;
+ union sigval sv;
+
+ /*
+ * Enable interrupts if they were enabled before the exception.
+ * Since all syscalls *should* come from user mode it will always
+ * be safe to enable them, but check anyway.
+ */
+#ifdef acorn26
+ if ((frame->tf_r15 & R15_IRQ_DISABLE) == 0)
+ int_on();
+#else
+ if (!(frame->tf_spsr & I32_bit))
+ enable_interrupts(I32_bit);
+#endif
+
+#ifdef acorn26
+ frame->tf_pc += INSN_SIZE;
+#endif
+
+ /*
+ * Make sure the program counter is correctly aligned so we
+ * don't take an alignment fault trying to read the opcode.
+ */
+ if (__predict_false(((frame->tf_pc - INSN_SIZE) & 3) != 0)) {
+ /* Give the user an illegal instruction signal. */
+ sv.sival_ptr = (u_int32_t *)(u_int32_t)(frame->tf_pc-INSN_SIZE);
+ trapsignal(p, SIGILL, 0, ILL_ILLOPC, sv);
+ userret(p);
+ return;
+ }
+
+ /* XXX fuword? */
+#ifdef __PROG32
+ insn = *(u_int32_t *)(frame->tf_pc - INSN_SIZE);
+#else
+ insn = *(u_int32_t *)((frame->tf_r15 & R15_PC) - INSN_SIZE);
+#endif
+
+ p->p_addr->u_pcb.pcb_tf = frame;
+
+#ifdef CPU_ARM7
+ /*
+ * This code is only needed if we are including support for the ARM7
+ * core. Other CPUs do not need it but it does not hurt.
+ */
+
+ /*
+ * ARM700/ARM710 match sticks and sellotape job ...
+ *
+ * I know this affects GPS/VLSI ARM700/ARM710 + various ARM7500.
+ *
+ * On occasion data aborts are mishandled and end up calling
+ * the swi vector.
+ *
+ * If the instruction that caused the exception is not a SWI
+ * then we hit the bug.
+ */
+ if ((insn & 0x0f000000) != 0x0f000000) {
+ frame->tf_pc -= INSN_SIZE;
+ curcpu()->ci_arm700bugcount.ev_count++;
+ userret(l);
+ return;
+ }
+#endif /* CPU_ARM7 */
+
+ uvmexp.syscalls++;
+
+#if 0
+ (*(void(*)(struct trapframe *, struct proc *, u_int32_t))
+ (p->p_md.md_syscall))(frame, p, insn);
+#else
+ syscall_fancy(frame, p, insn);
+#endif
+}
+
+void
+syscall_intern(struct proc *p)
+{
+#ifdef KTRACE
+ if (p->p_traceflag & (KTRFAC_SYSCALL | KTRFAC_SYSRET)) {
+ p->p_md.md_syscall = syscall_fancy;
+ return;
+ }
+#endif
+#ifdef SYSTRACE
+ if (p->p_flag & P_SYSTRACE) {
+ p->p_md.md_syscall = syscall_fancy;
+ return;
+ }
+#endif
+ p->p_md.md_syscall = syscall_plain;
+}
+
+void
+syscall_plain(struct trapframe *frame, struct proc *p, u_int32_t insn)
+{
+ const struct sysent *callp;
+ int code, error;
+ u_int nap, nargs;
+ register_t *ap, *args, copyargs[MAXARGS], rval[2];
+ union sigval sv;
+
+ switch (insn & SWI_OS_MASK) { /* Which OS is the SWI from? */
+ case SWI_OS_ARM: /* ARM-defined SWIs */
+ code = insn & 0x00ffffff;
+ switch (code) {
+ case SWI_IMB:
+ case SWI_IMBrange:
+ /*
+ * Do nothing as there is no prefetch unit that needs
+ * flushing
+ */
+ break;
+ default:
+ /* Undefined so illegal instruction */
+ sv.sival_ptr = (u_int32_t *)(frame->tf_pc - INSN_SIZE);
+ trapsignal(p, SIGILL, 0, ILL_ILLOPN, sv);
+ break;
+ }
+
+ userret(p);
+ return;
+ case 0x000000: /* Old unofficial NetBSD range. */
+ case SWI_OS_NETBSD: /* New official NetBSD range. */
+ nap = 4;
+ break;
+ default:
+ /* Undefined so illegal instruction */
+ sv.sival_ptr = (u_int32_t *)(frame->tf_pc - INSN_SIZE);
+ trapsignal(p, SIGILL, 0, ILL_ILLOPN, sv);
+ userret(p);
+ return;
+ }
+
+ code = insn & 0x000fffff;
+
+ ap = &frame->tf_r0;
+ callp = p->p_emul->e_sysent;
+
+ switch (code) {
+ case SYS_syscall:
+ code = *ap++;
+ nap--;
+ break;
+ case SYS___syscall:
+ code = ap[_QUAD_LOWWORD];
+ ap += 2;
+ nap -= 2;
+ break;
+ }
+
+ if (code < 0 || code >= p->p_emul->e_nsysent) {
+ callp += p->p_emul->e_nosys;
+ } else {
+ callp += code;
+ }
+ nargs = callp->sy_argsize / sizeof(register_t);
+ if (nargs <= nap)
+ args = ap;
+ else {
+ KASSERT(nargs <= MAXARGS);
+ memcpy(copyargs, ap, nap * sizeof(register_t));
+ error = copyin((void *)frame->tf_usr_sp, copyargs + nap,
+ (nargs - nap) * sizeof(register_t));
+ if (error)
+ goto bad;
+ args = copyargs;
+ }
+
+#ifdef SYSCALL_DEBUG
+ scdebug_call(p, code, args);
+#endif
+ rval[0] = 0;
+ rval[1] = 0;
+ error = (*callp->sy_call)(p, args, rval);
+
+ switch (error) {
+ case 0:
+ frame->tf_r0 = rval[0];
+ frame->tf_r1 = rval[1];
+
+#ifdef __PROG32
+ frame->tf_spsr &= ~PSR_C_bit; /* carry bit */
+#else
+ frame->tf_r15 &= ~R15_FLAG_C; /* carry bit */
+#endif
+ break;
+
+ case ERESTART:
+ /*
+ * Reconstruct the pc to point at the swi.
+ */
+ frame->tf_pc -= INSN_SIZE;
+ break;
+
+ case EJUSTRETURN:
+ /* nothing to do */
+ break;
+
+ default:
+ bad:
+ frame->tf_r0 = error;
+#ifdef __PROG32
+ frame->tf_spsr |= PSR_C_bit; /* carry bit */
+#else
+ frame->tf_r15 |= R15_FLAG_C; /* carry bit */
+#endif
+ break;
+ }
+#ifdef SYSCALL_DEBUG
+ scdebug_ret(p, code, error, rval);
+#endif
+
+ userret(p);
+}
+
+void
+syscall_fancy(struct trapframe *frame, struct proc *p, u_int32_t insn)
+{
+ const struct sysent *callp;
+ int code, error, orig_error;
+ u_int nap, nargs;
+ register_t *ap, *args, copyargs[MAXARGS], rval[2];
+ union sigval sv;
+
+ switch (insn & SWI_OS_MASK) { /* Which OS is the SWI from? */
+ case SWI_OS_ARM: /* ARM-defined SWIs */
+ code = insn & 0x00ffffff;
+ switch (code) {
+ case SWI_IMB:
+ case SWI_IMBrange:
+ /*
+ * Do nothing as there is no prefetch unit that needs
+ * flushing
+ */
+ break;
+ default:
+ /* Undefined so illegal instruction */
+ sv.sival_ptr = (u_int32_t *)(frame->tf_pc - INSN_SIZE);
+ trapsignal(p, SIGILL, 0, ILL_ILLOPN, sv);
+ break;
+ }
+
+ userret(p);
+ return;
+ case 0x000000: /* Old unofficial NetBSD range. */
+ case SWI_OS_NETBSD: /* New official NetBSD range. */
+ nap = 4;
+ break;
+ default:
+ /* Undefined so illegal instruction */
+ sv.sival_ptr = (u_int32_t *)(frame->tf_pc - INSN_SIZE);
+ trapsignal(p, SIGILL, 0, ILL_ILLOPN, sv);
+ userret(p);
+ return;
+ }
+
+ code = insn & 0x000fffff;
+
+ ap = &frame->tf_r0;
+ callp = p->p_emul->e_sysent;
+
+ switch (code) {
+ case SYS_syscall:
+ code = *ap++;
+ nap--;
+ break;
+ case SYS___syscall:
+ code = ap[_QUAD_LOWWORD];
+ ap += 2;
+ nap -= 2;
+ break;
+ }
+
+ if (code < 0 || code >= p->p_emul->e_nsysent) {
+ callp += p->p_emul->e_nosys;
+ } else {
+ callp += code;
+ }
+ nargs = callp->sy_argsize / sizeof(register_t);
+ if (nargs <= nap) {
+ args = ap;
+ error = 0;
+ } else {
+ KASSERT(nargs <= MAXARGS);
+ memcpy(copyargs, ap, nap * sizeof(register_t));
+ error = copyin((void *)frame->tf_usr_sp, copyargs + nap,
+ (nargs - nap) * sizeof(register_t));
+ args = copyargs;
+ }
+ orig_error = error;
+#ifdef SYSCALL_DEBUG
+ scdebug_call(p, code, args);
+#endif
+#ifdef KTRACE
+ if (KTRPOINT(p, KTR_SYSCALL))
+ ktrsyscall(p, code, callp->sy_argsize, args);
+#endif
+ if (error)
+ goto bad;
+
+ rval[0] = 0;
+ rval[1] = 0;
+#if NSYSTRACE > 0
+ if (ISSET(p->p_flag, P_SYSTRACE))
+ orig_error = error = systrace_redirect(code, p, args, rval);
+ else
+#endif
+ orig_error = error = (*callp->sy_call)(p, args, rval);
+
+ switch (error) {
+ case 0:
+ frame->tf_r0 = rval[0];
+ frame->tf_r1 = rval[1];
+
+#ifdef __PROG32
+ frame->tf_spsr &= ~PSR_C_bit; /* carry bit */
+#else
+ frame->tf_r15 &= ~R15_FLAG_C; /* carry bit */
+#endif
+ break;
+
+ case ERESTART:
+ /*
+ * Reconstruct the pc to point at the swi.
+ */
+ frame->tf_pc -= INSN_SIZE;
+ break;
+
+ case EJUSTRETURN:
+ /* nothing to do */
+ break;
+
+ default:
+ bad:
+ frame->tf_r0 = error;
+#ifdef __PROG32
+ frame->tf_spsr |= PSR_C_bit; /* carry bit */
+#else
+ frame->tf_r15 |= R15_FLAG_C; /* carry bit */
+#endif
+ break;
+ }
+#ifdef SYSCALL_DEBUG
+ scdebug_ret(p, code, orig_error, rval);
+#endif
+ userret(p);
+#ifdef KTRACE
+ if (KTRPOINT(p, KTR_SYSRET))
+ ktrsysret(p, code, orig_error, rval[0]);
+#endif
+}
+
+void
+child_return(arg)
+ void *arg;
+{
+ struct proc *p = arg;
+ struct trapframe *frame = p->p_addr->u_pcb.pcb_tf;
+
+ frame->tf_r0 = 0;
+#ifdef __PROG32
+ frame->tf_spsr &= ~PSR_C_bit; /* carry bit */
+#else
+ frame->tf_r15 &= ~R15_FLAG_C; /* carry bit */
+#endif
+
+ userret(p);
+#ifdef KTRACE
+ if (KTRPOINT(p, KTR_SYSRET)) {
+ ktrsysret(p, SYS_fork, 0, 0);
+ }
+#endif
+}
diff --git a/sys/arch/arm/arm/undefined.c b/sys/arch/arm/arm/undefined.c
new file mode 100644
index 00000000000..7a38f631b2e
--- /dev/null
+++ b/sys/arch/arm/arm/undefined.c
@@ -0,0 +1,329 @@
+/* $OpenBSD: undefined.c,v 1.1 2004/02/01 05:09:48 drahn Exp $ */
+/* $NetBSD: undefined.c,v 1.22 2003/11/29 22:21:29 bjh21 Exp $ */
+
+/*
+ * Copyright (c) 2001 Ben Harris.
+ * Copyright (c) 1995 Mark Brinicombe.
+ * Copyright (c) 1995 Brini.
+ * All rights reserved.
+ *
+ * This code is derived from software written for Brini by Mark Brinicombe
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by Brini.
+ * 4. The name of the company nor the name of the author may be used to
+ * endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY BRINI ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL BRINI OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * RiscBSD kernel project
+ *
+ * undefined.c
+ *
+ * Fault handler
+ *
+ * Created : 06/01/95
+ */
+
+#define FAST_FPE
+
+#include <sys/param.h>
+
+#include <sys/malloc.h>
+#include <sys/queue.h>
+#include <sys/signal.h>
+#include <sys/signalvar.h>
+#include <sys/systm.h>
+#include <sys/proc.h>
+#include <sys/user.h>
+#include <sys/syslog.h>
+#include <sys/vmmeter.h>
+#ifdef FAST_FPE
+#include <sys/acct.h>
+#endif
+
+#include <uvm/uvm_extern.h>
+
+#include <machine/cpu.h>
+#include <machine/frame.h>
+#include <arm/undefined.h>
+#include <machine/trap.h>
+
+
+#ifdef acorn26
+#include <machine/machdep.h>
+#endif
+
+static int gdb_trapper(u_int, u_int, struct trapframe *, int);
+
+#ifdef FAST_FPE
+extern int want_resched;
+#endif
+
+LIST_HEAD(, undefined_handler) undefined_handlers[MAX_COPROCS];
+
+
+void *
+install_coproc_handler(int coproc, undef_handler_t handler)
+{
+ struct undefined_handler *uh;
+
+ KASSERT(coproc >= 0 && coproc < MAX_COPROCS);
+ KASSERT(handler != NULL); /* Used to be legal. */
+
+ /* XXX: M_TEMP??? */
+ MALLOC(uh, struct undefined_handler *, sizeof(*uh), M_TEMP, M_WAITOK);
+ uh->uh_handler = handler;
+ install_coproc_handler_static(coproc, uh);
+ return uh;
+}
+
+void
+install_coproc_handler_static(int coproc, struct undefined_handler *uh)
+{
+
+ LIST_INSERT_HEAD(&undefined_handlers[coproc], uh, uh_link);
+}
+
+void
+remove_coproc_handler(void *cookie)
+{
+ struct undefined_handler *uh = cookie;
+
+ LIST_REMOVE(uh, uh_link);
+ FREE(uh, M_TEMP);
+}
+
+
+static int
+gdb_trapper(u_int addr, u_int insn, struct trapframe *frame, int code)
+{
+ union sigval sv;
+ struct proc *p;
+ p = (curproc == NULL) ? &proc0 : curproc;
+
+ if (insn == GDB_BREAKPOINT || insn == GDB5_BREAKPOINT) {
+ if (code == FAULT_USER) {
+ sv.sival_int = addr;
+ trapsignal(p, SIGTRAP, 0, TRAP_BRKPT, sv);
+ return 0;
+ }
+#ifdef KGDB
+ return !kgdb_trap(T_BREAKPOINT, frame);
+#endif
+ }
+ return 1;
+}
+
+static struct undefined_handler gdb_uh;
+
+void
+undefined_init()
+{
+ int loop;
+
+ /* Not actually necessary -- the initialiser is just NULL */
+ for (loop = 0; loop < MAX_COPROCS; ++loop)
+ LIST_INIT(&undefined_handlers[loop]);
+
+ /* Install handler for GDB breakpoints */
+ gdb_uh.uh_handler = gdb_trapper;
+ install_coproc_handler_static(0, &gdb_uh);
+}
+
+
+void
+undefinedinstruction(trapframe_t *frame)
+{
+ struct proc *p;
+ u_int fault_pc;
+ int fault_instruction;
+ int fault_code;
+ int coprocessor;
+ struct undefined_handler *uh;
+#ifdef VERBOSE_ARM32
+ int s;
+#endif
+ union sigval sv;
+
+ /* Enable interrupts if they were enabled before the exception. */
+#ifdef acorn26
+ if ((frame->tf_r15 & R15_IRQ_DISABLE) == 0)
+ int_on();
+#else
+ if (!(frame->tf_spsr & I32_bit))
+ enable_interrupts(I32_bit);
+#endif
+
+#ifndef acorn26
+ frame->tf_pc -= INSN_SIZE;
+#endif
+
+#ifdef __PROG26
+ fault_pc = frame->tf_r15 & R15_PC;
+#else
+ fault_pc = frame->tf_pc;
+#endif
+
+ /* Get the current proc structure or proc0 if there is none. */
+ p = (curproc == NULL) ? &proc0 : curproc;
+
+ /*
+ * Make sure the program counter is correctly aligned so we
+ * don't take an alignment fault trying to read the opcode.
+ */
+ if (__predict_false((fault_pc & 3) != 0)) {
+ /* Give the user an illegal instruction signal. */
+ sv.sival_int = (u_int32_t) fault_pc;
+ trapsignal(p, SIGILL, 0, ILL_ILLOPC, sv);
+ userret(p);
+ return;
+ }
+
+ /*
+ * Should use fuword() here .. but in the interests of squeezing every
+ * bit of speed we will just use ReadWord(). We know the instruction
+ * can be read as was just executed so this will never fail unless the
+ * kernel is screwed up in which case it does not really matter does
+ * it ?
+ */
+
+ fault_instruction = *(u_int32_t *)fault_pc;
+
+ /* Update vmmeter statistics */
+ uvmexp.traps++;
+
+ /* Check for coprocessor instruction */
+
+ /*
+ * According to the datasheets you only need to look at bit 27 of the
+ * instruction to tell the difference between and undefined
+ * instruction and a coprocessor instruction following an undefined
+ * instruction trap.
+ */
+
+ if ((fault_instruction & (1 << 27)) != 0)
+ coprocessor = (fault_instruction >> 8) & 0x0f;
+ else
+ coprocessor = 0;
+
+ /* Get the current proc structure or proc0 if there is none. */
+
+ if ((p = curproc) == 0)
+ p = &proc0;
+
+#ifdef __PROG26
+ if ((frame->tf_r15 & R15_MODE) == R15_MODE_USR) {
+#else
+ if ((frame->tf_spsr & PSR_MODE) == PSR_USR32_MODE) {
+#endif
+ /*
+ * Modify the fault_code to reflect the USR/SVC state at
+ * time of fault.
+ */
+ fault_code = FAULT_USER;
+ p->p_addr->u_pcb.pcb_tf = frame;
+ } else
+ fault_code = 0;
+
+ /* OK this is were we do something about the instruction. */
+ LIST_FOREACH(uh, &undefined_handlers[coprocessor], uh_link)
+ if (uh->uh_handler(fault_pc, fault_instruction, frame,
+ fault_code) == 0)
+ break;
+
+ if (uh == NULL) {
+ /* Fault has not been handled */
+
+#ifdef VERBOSE_ARM32
+ s = spltty();
+
+ if ((fault_instruction & 0x0f000010) == 0x0e000000) {
+ printf("CDP\n");
+ disassemble(fault_pc);
+ } else if ((fault_instruction & 0x0e000000) == 0x0c000000) {
+ printf("LDC/STC\n");
+ disassemble(fault_pc);
+ } else if ((fault_instruction & 0x0f000010) == 0x0e000010) {
+ printf("MRC/MCR\n");
+ disassemble(fault_pc);
+ } else if ((fault_instruction & ~INSN_COND_MASK)
+ != (KERNEL_BREAKPOINT & ~INSN_COND_MASK)) {
+ printf("Undefined instruction\n");
+ disassemble(fault_pc);
+ }
+
+ splx(s);
+#endif
+
+ if ((fault_code & FAULT_USER) == 0) {
+ printf("Undefined instruction in kernel\n");
+#ifdef DDB
+ Debugger();
+#endif
+ }
+
+ sv.sival_int = frame->tf_pc;
+ trapsignal(p, SIGILL, 0, ILL_ILLOPC, sv);
+ }
+
+ if ((fault_code & FAULT_USER) == 0)
+ return;
+
+#ifdef FAST_FPE
+ /* Optimised exit code */
+ {
+ int sig;
+
+ /* take pending signals */
+
+ while ((sig = (CURSIG(p))) != 0) {
+ postsig(sig);
+ }
+
+ p->p_priority = p->p_usrpri;
+
+ /*
+ * Check for reschedule request, at the moment there is only
+ * 1 ast so this code should always be run
+ */
+
+ if (want_resched) {
+ /*
+ * We are being preempted.
+ */
+ preempt(NULL);
+ while ((sig = (CURSIG(p))) != 0) {
+ postsig(sig);
+ }
+ }
+
+ /* XXX
+ curcpu()->ci_schedstate.spc_curpriority = p->p_priority;
+ */
+ }
+
+#else
+ userret(p);
+#endif
+}
diff --git a/sys/arch/arm/arm/vectors.S b/sys/arch/arm/arm/vectors.S
new file mode 100644
index 00000000000..41d5deb1879
--- /dev/null
+++ b/sys/arch/arm/arm/vectors.S
@@ -0,0 +1,104 @@
+/* $OpenBSD: vectors.S,v 1.1 2004/02/01 05:09:48 drahn Exp $ */
+/* $NetBSD: vectors.S,v 1.4 2002/08/17 16:36:32 thorpej Exp $ */
+
+/*
+ * Copyright (C) 1994-1997 Mark Brinicombe
+ * Copyright (C) 1994 Brini
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by Brini.
+ * 4. The name of Brini may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY BRINI ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL BRINI BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "assym.h"
+#include <machine/asm.h>
+
+/*
+ * These are the exception vectors copied down to page 0.
+ *
+ * Note that FIQs are special; rather than using a level of
+ * indirection, we actually copy the FIQ code down into the
+ * vector page.
+ */
+
+ .text
+ .align 0
+ .global _C_LABEL(page0), _C_LABEL(page0_data), _C_LABEL(page0_end)
+ .global _C_LABEL(fiqvector)
+
+_C_LABEL(page0):
+ ldr pc, .Lreset_target
+ ldr pc, .Lundefined_target
+ ldr pc, .Lswi_target
+ ldr pc, .Lprefetch_abort_target
+ ldr pc, .Ldata_abort_target
+ ldr pc, .Laddress_exception_target
+ ldr pc, .Lirq_target
+#ifdef __ARM_FIQ_INDIRECT
+ ldr pc, .Lfiq_target
+#else
+.Lfiqvector:
+ .set _C_LABEL(fiqvector), . - _C_LABEL(page0)
+ subs pc, lr, #4
+ .org .Lfiqvector + 0x100
+#endif
+
+_C_LABEL(page0_data):
+.Lreset_target:
+ .word reset_entry
+
+.Lundefined_target:
+ .word undefined_entry
+
+.Lswi_target:
+ .word swi_entry
+
+.Lprefetch_abort_target:
+ .word prefetch_abort_entry
+
+.Ldata_abort_target:
+ .word data_abort_entry
+
+.Laddress_exception_target:
+ .word address_exception_entry
+
+.Lirq_target:
+ .word irq_entry
+
+#ifdef __ARM_FIQ_INDIRECT
+.Lfiq_target:
+ .word _C_LABEL(fiqvector)
+#else
+ .word 0 /* pad it out */
+#endif
+_C_LABEL(page0_end):
+
+#ifdef __ARM_FIQ_INDIRECT
+ .data
+ .align 0
+_C_LABEL(fiqvector):
+ subs pc, lr, #4
+ .org _C_LABEL(fiqvector) + 0x100
+#endif
diff --git a/sys/arch/arm/arm/vm_machdep.c b/sys/arch/arm/arm/vm_machdep.c
new file mode 100644
index 00000000000..5375f465e2a
--- /dev/null
+++ b/sys/arch/arm/arm/vm_machdep.c
@@ -0,0 +1,390 @@
+/* $OpenBSD: vm_machdep.c,v 1.1 2004/02/01 05:09:48 drahn Exp $ */
+/*^I$NetBSD: vm_machdep.c,v 1.31 2004/01/04 11:33:29 jdolecek Exp $^I*/$
+
+/*
+ * Copyright (c) 1994-1998 Mark Brinicombe.
+ * Copyright (c) 1994 Brini.
+ * All rights reserved.
+ *
+ * This code is derived from software written for Brini by Mark Brinicombe
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by Brini.
+ * 4. The name of the company nor the name of the author may be used to
+ * endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY BRINI ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL BRINI OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * RiscBSD kernel project
+ *
+ * vm_machdep.h
+ *
+ * vm machine specific bits
+ *
+ * Created : 08/10/94
+ */
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/proc.h>
+#include <sys/malloc.h>
+#include <sys/vnode.h>
+#include <sys/buf.h>
+#if 0
+#include <sys/pmc.h>
+#endif
+#include <sys/user.h>
+#include <sys/exec.h>
+#include <sys/syslog.h>
+
+#include <uvm/uvm_extern.h>
+
+#include <machine/cpu.h>
+#include <machine/pmap.h>
+#include <machine/reg.h>
+#include <machine/vmparam.h>
+
+#ifdef ARMFPE
+#include <arm/fpe-arm/armfpe.h>
+#endif
+
+extern pv_addr_t systempage;
+
+int process_read_regs __P((struct proc *p, struct reg *regs));
+int process_read_fpregs __P((struct proc *p, struct fpreg *regs));
+
+void switch_exit __P((struct proc *p, struct proc *p0,
+ void (*)(struct proc *)));
+extern void proc_trampoline __P((void));
+
+/*
+ * Special compilation symbols:
+ *
+ * STACKCHECKS - Fill undefined and supervisor stacks with a known pattern
+ * on forking and check the pattern on exit, reporting
+ * the amount of stack used.
+ */
+
+#if 0
+void
+cpu_proc_fork(p1, p2)
+ struct proc *p1, *p2;
+{
+
+#if defined(PERFCTRS)
+ if (PMC_ENABLED(p1))
+ pmc_md_fork(p1, p2);
+ else {
+ p2->p_md.pmc_enabled = 0;
+ p2->p_md.pmc_state = NULL;
+ }
+#endif
+}
+#endif
+
+/*
+ * Finish a fork operation, with process p2 nearly set up.
+ * Copy and update the pcb and trap frame, making the child ready to run.
+ *
+ * Rig the child's kernel stack so that it will start out in
+ * proc_trampoline() and call child_return() with p2 as an
+ * argument. This causes the newly-created child process to go
+ * directly to user level with an apparent return value of 0 from
+ * fork(), while the parent process returns normally.
+ *
+ * p1 is the process being forked; if p1 == &proc0, we are creating
+ * a kernel thread, and the return path and argument are specified with
+ * `func' and `arg'.
+ *
+ * If an alternate user-level stack is requested (with non-zero values
+ * in both the stack and stacksize args), set up the user stack pointer
+ * accordingly.
+ */
+void
+cpu_fork(p1, p2, stack, stacksize, func, arg)
+ struct proc *p1;
+ struct proc *p2;
+ void *stack;
+ size_t stacksize;
+ void (*func) __P((void *));
+ void *arg;
+{
+ struct pcb *pcb = (struct pcb *)&p2->p_addr->u_pcb;
+ struct trapframe *tf;
+ struct switchframe *sf;
+
+#ifdef PMAP_DEBUG
+ if (pmap_debug_level >= 0)
+ printf("cpu_fork: %p %p %p %p\n", p1, p2, curlwp, &proc0);
+#endif /* PMAP_DEBUG */
+
+#if 0 /* XXX */
+ if (l1 == curlwp) {
+ /* Sync the PCB before we copy it. */
+ savectx(curpcb);
+ }
+#endif
+
+ /* Copy the pcb */
+ *pcb = p1->p_addr->u_pcb;
+
+ /*
+ * Set up the undefined stack for the process.
+ * Note: this stack is not in use if we are forking from p1
+ */
+ pcb->pcb_un.un_32.pcb32_und_sp = (u_int)p2->p_addr +
+ USPACE_UNDEF_STACK_TOP;
+ pcb->pcb_un.un_32.pcb32_sp = (u_int)p2->p_addr + USPACE_SVC_STACK_TOP;
+
+#ifdef STACKCHECKS
+ /* Fill the undefined stack with a known pattern */
+ memset(((u_char *)p2->p_addr) + USPACE_UNDEF_STACK_BOTTOM, 0xdd,
+ (USPACE_UNDEF_STACK_TOP - USPACE_UNDEF_STACK_BOTTOM));
+ /* Fill the kernel stack with a known pattern */
+ memset(((u_char *)p2->p_addr) + USPACE_SVC_STACK_BOTTOM, 0xdd,
+ (USPACE_SVC_STACK_TOP - USPACE_SVC_STACK_BOTTOM));
+#endif /* STACKCHECKS */
+
+#ifdef PMAP_DEBUG
+ if (pmap_debug_level >= 0) {
+ printf("p1->procaddr=%p p1->procaddr->u_pcb=%p pid=%d pmap=%p\n",
+ p1->p_addr, &p1->p_addr->u_pcb, p1->p_lid,
+ p1->p_proc->p_vmspace->vm_map.pmap);
+ printf("p2->procaddr=%p p2->procaddr->u_pcb=%p pid=%d pmap=%p\n",
+ p2->p_addr, &p2->p_addr->u_pcb, p2->p_lid,
+ p2->p_proc->p_vmspace->vm_map.pmap);
+ }
+#endif /* PMAP_DEBUG */
+
+ pmap_activate(p2);
+
+#ifdef ARMFPE
+ /* Initialise a new FP context for p2 and copy the context from p1 */
+ arm_fpe_core_initcontext(FP_CONTEXT(p2));
+ arm_fpe_copycontext(FP_CONTEXT(p1), FP_CONTEXT(p2));
+#endif /* ARMFPE */
+
+ p2->p_addr->u_pcb.pcb_tf = tf =
+ (struct trapframe *)pcb->pcb_un.un_32.pcb32_sp - 1;
+ *tf = *p1->p_addr->u_pcb.pcb_tf;
+
+ /*
+ * If specified, give the child a different stack.
+ */
+ if (stack != NULL)
+ tf->tf_usr_sp = (u_int)stack + stacksize;
+
+ sf = (struct switchframe *)tf - 1;
+ sf->sf_r4 = (u_int)func;
+ sf->sf_r5 = (u_int)arg;
+ sf->sf_pc = (u_int)proc_trampoline;
+ pcb->pcb_un.un_32.pcb32_sp = (u_int)sf;
+}
+
+#if 0
+void
+cpu_setfunc(struct proc *p, void (*func)(void *), void *arg)
+{
+ struct pcb *pcb = &p->p_addr->u_pcb;
+ struct trapframe *tf = pcb->pcb_tf;
+ struct switchframe *sf = (struct switchframe *)tf - 1;
+
+ sf->sf_r4 = (u_int)func;
+ sf->sf_r5 = (u_int)arg;
+ sf->sf_pc = (u_int)proc_trampoline;
+ pcb->pcb_un.un_32.pcb32_sp = (u_int)sf;
+}
+#endif
+
+
+void
+cpu_exit(struct proc *p)
+{
+ pmap_update(p->p_vmspace->vm_map.pmap); /* XXX DSR help stability */
+ switch_exit(p, &proc0, exit2);
+}
+
+void
+cpu_swapin(p)
+ struct proc *p;
+{
+#if 0
+
+ /* Don't do this. See the comment in cpu_swapout(). */
+#ifdef PMAP_DEBUG
+ if (pmap_debug_level >= 0)
+ printf("cpu_swapin(%p, %d, %s, %p)\n", l, l->l_lid,
+ p->p_comm, p->p_vmspace->vm_map.pmap);
+#endif /* PMAP_DEBUG */
+
+ if (vector_page < KERNEL_BASE) {
+ /* Map the vector page */
+ pmap_enter(p->p_vmspace->vm_map.pmap, vector_page,
+ systempage.pv_pa, VM_PROT_READ, VM_PROT_READ|PMAP_WIRED);
+ pmap_update(p->p_vmspace->vm_map.pmap);
+ }
+#endif
+}
+
+
+void
+cpu_swapout(l)
+ struct proc *l;
+{
+#if 0
+ struct proc *p = l->l_proc;
+
+ /*
+ * Don't do this! If the pmap is shared with another process,
+ * it will loose it's page0 entry. That's bad news indeed.
+ */
+#ifdef PMAP_DEBUG
+ if (pmap_debug_level >= 0)
+ printf("cpu_swapout(%p, %d, %s, %p)\n", l, l->l_lid,
+ p->p_comm, &p->p_vmspace->vm_map.pmap);
+#endif /* PMAP_DEBUG */
+
+ if (vector_page < KERNEL_BASE) {
+ /* Free the system page mapping */
+ pmap_remove(p->p_vmspace->vm_map.pmap, vector_page,
+ vector_page + PAGE_SIZE);
+ pmap_update(p->p_vmspace->vm_map.pmap);
+ }
+#endif
+}
+
+
+/*
+ * Move pages from one kernel virtual address to another.
+ * Both addresses are assumed to reside in the Sysmap,
+ * and size must be a multiple of PAGE_SIZE.
+ */
+
+void
+pagemove(from, to, size)
+ caddr_t from, to;
+ size_t size;
+{
+ paddr_t pa;
+ boolean_t rv;
+
+ if (size % PAGE_SIZE)
+ panic("pagemove: size=%08lx", (u_long) size);
+
+ while (size > 0) {
+ rv = pmap_extract(pmap_kernel(), (vaddr_t) from, &pa);
+#ifdef DEBUG
+ if (rv == FALSE)
+ panic("pagemove 2");
+ if (pmap_extract(pmap_kernel(), (vaddr_t) to, NULL) == TRUE)
+ panic("pagemove 3");
+#endif
+ pmap_kremove((vaddr_t) from, PAGE_SIZE);
+ pmap_kenter_pa((vaddr_t) to, pa, VM_PROT_READ|VM_PROT_WRITE);
+ from += PAGE_SIZE;
+ to += PAGE_SIZE;
+ size -= PAGE_SIZE;
+ }
+ pmap_update(pmap_kernel());
+}
+
+/*
+ * Map a user I/O request into kernel virtual address space.
+ * Note: the pages are already locked by uvm_vslock(), so we
+ * do not need to pass an access_type to pmap_enter().
+ */
+void
+vmapbuf(bp, len)
+ struct buf *bp;
+ vsize_t len;
+{
+ vaddr_t faddr, taddr, off;
+ paddr_t fpa;
+
+
+#ifdef PMAP_DEBUG
+ if (pmap_debug_level >= 0)
+ printf("vmapbuf: bp=%08x buf=%08x len=%08x\n", (u_int)bp,
+ (u_int)bp->b_data, (u_int)len);
+#endif /* PMAP_DEBUG */
+
+ if ((bp->b_flags & B_PHYS) == 0)
+ panic("vmapbuf");
+
+ faddr = trunc_page((vaddr_t)bp->b_saveaddr = bp->b_data);
+ off = (vaddr_t)bp->b_data - faddr;
+ len = round_page(off + len);
+ taddr = uvm_km_valloc_wait(phys_map, len);
+ bp->b_data = (caddr_t)(taddr + off);
+
+ /*
+ * The region is locked, so we expect that pmap_pte() will return
+ * non-NULL.
+ */
+ while (len) {
+ (void) pmap_extract(vm_map_pmap(&bp->b_proc->p_vmspace->vm_map),
+ faddr, &fpa);
+ pmap_enter(pmap_kernel(), taddr, fpa,
+ VM_PROT_READ|VM_PROT_WRITE, VM_PROT_READ|VM_PROT_WRITE|PMAP_WIRED);
+ faddr += PAGE_SIZE;
+ taddr += PAGE_SIZE;
+ len -= PAGE_SIZE;
+ }
+ pmap_update(pmap_kernel());
+}
+
+/*
+ * Unmap a previously-mapped user I/O request.
+ */
+void
+vunmapbuf(bp, len)
+ struct buf *bp;
+ vsize_t len;
+{
+ vaddr_t addr, off;
+
+#ifdef PMAP_DEBUG
+ if (pmap_debug_level >= 0)
+ printf("vunmapbuf: bp=%08x buf=%08x len=%08x\n",
+ (u_int)bp, (u_int)bp->b_data, (u_int)len);
+#endif /* PMAP_DEBUG */
+
+ if ((bp->b_flags & B_PHYS) == 0)
+ panic("vunmapbuf");
+
+ /*
+ * Make sure the cache does not have dirty data for the
+ * pages we had mapped.
+ */
+ addr = trunc_page((vaddr_t)bp->b_data);
+ off = (vaddr_t)bp->b_data - addr;
+ len = round_page(off + len);
+
+ pmap_remove(pmap_kernel(), addr, addr + len);
+ pmap_update(pmap_kernel());
+ uvm_km_free_wakeup(phys_map, addr, len);
+ bp->b_data = bp->b_saveaddr;
+ bp->b_saveaddr = 0;
+}
+
+/* End of vm_machdep.c */
diff --git a/sys/arch/arm/arm/vm_machdep_arm.c b/sys/arch/arm/arm/vm_machdep_arm.c
new file mode 100644
index 00000000000..03de8238bbb
--- /dev/null
+++ b/sys/arch/arm/arm/vm_machdep_arm.c
@@ -0,0 +1,100 @@
+/* $OpenBSD: vm_machdep_arm.c,v 1.1 2004/02/01 05:09:48 drahn Exp $ */
+/* $NetBSD: vm_machdep_arm.c,v 1.7 2003/06/29 22:28:08 fvdl Exp $ */
+
+/*
+ * Copyright (c) 1994-1998 Mark Brinicombe.
+ * Copyright (c) 1994 Brini.
+ * All rights reserved.
+ *
+ * This code is derived from software written for Brini by Mark Brinicombe
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by Brini.
+ * 4. The name of the company nor the name of the author may be used to
+ * endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY BRINI ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL BRINI OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/param.h>
+
+#include <sys/core.h>
+#include <sys/exec.h>
+#include <sys/ptrace.h>
+#include <sys/signalvar.h>
+#include <sys/systm.h>
+#include <sys/uio.h>
+#include <sys/vnode.h>
+
+#include <machine/reg.h>
+
+
+/*
+ * Dump the machine specific segment at the start of a core dump.
+ */
+
+int
+cpu_coredump(struct proc *p, struct vnode *vp, struct ucred *cred,
+ struct core *chdr)
+{
+ int error;
+ struct {
+ struct reg regs;
+ struct fpreg fpregs;
+ } cpustate;
+ struct coreseg cseg;
+
+ CORE_SETMAGIC(*chdr, COREMAGIC, MID_MACHINE, 0);
+ chdr->c_hdrsize = ALIGN(sizeof(*chdr));
+ chdr->c_seghdrsize = ALIGN(sizeof(cseg));
+ chdr->c_cpusize = sizeof(cpustate);
+
+ /* Save integer registers. */
+ error = process_read_regs(p, &cpustate.regs);
+ if (error)
+ return error;
+ /* Save floating point registers. */
+ error = process_read_fpregs(p, &cpustate.fpregs);
+ if (error)
+ return error;
+
+ CORE_SETMAGIC(cseg, CORESEGMAGIC, MID_MACHINE, CORE_CPU);
+ cseg.c_addr = 0;
+ cseg.c_size = chdr->c_cpusize;
+
+ error = vn_rdwr(UIO_WRITE, vp, (caddr_t)&cseg, chdr->c_seghdrsize,
+ (off_t)chdr->c_hdrsize, UIO_SYSSPACE,
+ IO_NODELOCKED|IO_UNIT, cred, NULL, p);
+ if (error)
+ return error;
+
+ error = vn_rdwr(UIO_WRITE, vp, (caddr_t)&cpustate, sizeof(cpustate),
+ (off_t)(chdr->c_hdrsize + chdr->c_seghdrsize), UIO_SYSSPACE,
+ IO_NODELOCKED|IO_UNIT, cred, NULL, p);
+ if (error)
+ return error;
+
+ chdr->c_nseg++;
+
+ return error;
+}
diff --git a/sys/arch/arm/conf/files.arm b/sys/arch/arm/conf/files.arm
new file mode 100644
index 00000000000..f3196ad4742
--- /dev/null
+++ b/sys/arch/arm/conf/files.arm
@@ -0,0 +1,141 @@
+# $OpenBSD: files.arm,v 1.1 2004/02/01 05:09:48 drahn Exp $
+# $NetBSD: files.arm,v 1.76 2003/11/05 12:53:15 scw Exp $
+
+# CPU types. Make sure to update <arm/cpuconf.h> if you change this list.
+#defflag opt_cputypes.h CPU_ARM2 CPU_ARM250 CPU_ARM3
+#defflag opt_cputypes.h CPU_ARM6 CPU_ARM7 CPU_ARM7TDMI CPU_ARM8
+# CPU_ARM9 CPU_ARM10 CPU_SA110 CPU_SA1100
+# CPU_SA1110 CPU_IXP12X0 CPU_XSCALE_80200
+# CPU_XSCALE_80321 CPU_XSCALE_PXA2X0
+# CPU_XSCALE_IXP425
+
+#defparam opt_cpuoptions.h XSCALE_CCLKCFG
+#defflag opt_cpuoptions.h XSCALE_CACHE_WRITE_THROUGH
+#defflag opt_cpuoptions.h XSCALE_CACHE_WRITE_BACK
+#defflag opt_cpuoptions.h XSCALE_NO_COALESCE_WRITES
+#defflag opt_cpuoptions.h XSCALE_CACHE_READ_WRITE_ALLOCATE
+#defflag opt_cpuoptions.h ARM32_DISABLE_ALIGNMENT_FAULTS
+
+# Interrupt implementation header definition.
+#defparam opt_arm_intr_impl.h ARM_INTR_IMPL
+
+# Board-specific bus_space(9) definitions
+#defflag opt_arm_bus_space.h __BUS_SPACE_HAS_STREAM_METHODS
+
+# Floating point emulator
+#defflag ARMFPE
+#file arch/arm/fpe-arm/armfpe_glue.S armfpe
+#file arch/arm/fpe-arm/armfpe_init.c armfpe
+#file arch/arm/fpe-arm/armfpe.S armfpe
+
+# PMAP_DEBUG (heavily abused option)
+#defflag PMAP_DEBUG
+
+# MI console support
+file dev/cons.c
+
+# generic networking files
+file arch/arm/arm/in_cksum_arm.S inet
+file netns/ns_cksum.c ns
+
+# DDB
+file arch/arm/arm/db_disasm.c ddb
+file arch/arm/arm/db_interface.c (ddb|kgdb)
+file arch/arm/arm/db_trace.c ddb
+file arch/arm/arm/db_machdep.c ddb
+file arch/arm/arm/kgdb_machdep.c kgdb
+
+# FIQ support
+file arch/arm/arm/fiq.c
+file arch/arm/arm/fiq_subr.S
+
+# mainbus files
+device mainbus { [base = -1], [dack = -1], [irq = -1] }
+attach mainbus at root
+file arch/arm/mainbus/mainbus.c mainbus
+file arch/arm/mainbus/mainbus_io.c mainbus
+file arch/arm/mainbus/mainbus_io_asm.S mainbus
+
+device cpu { }
+attach cpu at mainbus with cpu_mainbus
+file arch/arm/mainbus/cpu_mainbus.c cpu_mainbus
+
+# files related to debugging
+file arch/arm/arm/disassem.c
+
+# bus_space(9)
+define bus_space_generic
+file arch/arm/arm/bus_space_asm_generic.S bus_space_generic
+file arch/arm/arm/bus_space_notimpl.S
+
+file arch/arm/arm/arm_machdep.c
+file arch/arm/arm/ast.c
+file arch/arm/arm/bcopyinout.S
+file arch/arm/arm/blockio.S
+file arch/arm/arm/bootconfig.c
+file arch/arm/arm/compat_13_machdep.c compat_13
+file arch/arm/arm/copystr.S
+file arch/arm/arm/cpufunc.c
+file arch/arm/arm/cpufunc_asm.S
+file arch/arm/arm/cpufunc_asm_arm3.S cpu_arm3
+file arch/arm/arm/cpufunc_asm_arm67.S cpu_arm6 | cpu_arm7
+file arch/arm/arm/cpufunc_asm_arm7tdmi.S cpu_arm7tdmi
+file arch/arm/arm/cpufunc_asm_arm8.S cpu_arm8
+file arch/arm/arm/cpufunc_asm_arm9.S cpu_arm9
+file arch/arm/arm/cpufunc_asm_arm10.S cpu_arm10
+file arch/arm/arm/cpufunc_asm_armv4.S cpu_arm9 | cpu_arm10 |
+ cpu_sa110 |
+ cpu_sa1100 |
+ cpu_sa1110 |
+ cpu_ixp12x0 |
+ cpu_xscale_80200 |
+ cpu_xscale_80321 |
+ cpu_xscale_ixp425 |
+ cpu_xscale_pxa2x0
+file arch/arm/arm/cpufunc_asm_sa1.S cpu_sa110 | cpu_sa1100 |
+ cpu_sa1110 |
+ cpu_ixp12x0
+file arch/arm/arm/cpufunc_asm_sa11x0.S cpu_sa1100 | cpu_sa1110
+file arch/arm/arm/cpufunc_asm_xscale.S cpu_xscale_80200 |
+ cpu_xscale_80321 |
+ cpu_xscale_ixp425 |
+ cpu_xscale_pxa2x0
+file arch/arm/arm/cpufunc_asm_ixp12x0.S cpu_ixp12x0
+file arch/arm/arm/process_machdep.c
+file arch/arm/arm/procfs_machdep.c procfs
+file arch/arm/arm/sig_machdep.c
+file arch/arm/arm/sigcode.S
+file arch/arm/arm/syscall.c
+file arch/arm/arm/undefined.c
+# vectors.S gets included manually by Makefile.acorn26, since it needs
+# to be at the start of the text segment on those machines.
+file arch/arm/arm/vectors.S
+file arch/arm/arm/vm_machdep_arm.c
+
+# files common to arm implementations
+file arch/arm/arm/arm32_machdep.c
+file arch/arm/arm/bus_dma.c
+file arch/arm/arm/cpu.c
+file arch/arm/arm/cpuswitch.S
+file arch/arm/arm/exception.S
+file arch/arm/arm/fault.c
+file arch/arm/arm/fusu.S
+file arch/arm/arm/mem.c
+file arch/arm/arm/pmap.c
+file arch/arm/arm/setcpsr.S
+file arch/arm/arm/setstack.S
+file arch/arm/arm/stubs.c
+file arch/arm/arm/sys_machdep.c
+file arch/arm/arm/vm_machdep.c
+file arch/arm/arm/atomic.S
+
+# arm library functions
+file arch/arm/arm/bcopy_page.S
+
+# Linux binary compatibility (COMPAT_LINUX)
+#include "compat/ossaudio/files.ossaudio"
+#include "compat/linux/files.linux"
+#include "compat/linux/arch/arm/files.linux_arm"
+#file arch/arm/arm/linux_sigcode.S compat_linux
+#file arch/arm/arm/linux_syscall.c compat_linux
+#file arch/arm/arm/linux_trap.c compat_linux
diff --git a/sys/arch/arm/conf/files.footbridge b/sys/arch/arm/conf/files.footbridge
new file mode 100644
index 00000000000..01671454bf5
--- /dev/null
+++ b/sys/arch/arm/conf/files.footbridge
@@ -0,0 +1,22 @@
+# $OpenBSD: files.footbridge,v 1.1 2004/02/01 05:09:48 drahn Exp $
+# $NetBSD: files.footbridge,v 1.11 2003/01/03 01:06:40 thorpej Exp $
+#
+# Shared footbridge files information
+
+# DC21285 "Footbridge" specific files
+device footbridge {}: pcibus, bus_space_generic, todservice
+attach footbridge at mainbus
+file arch/arm/footbridge/footbridge.c footbridge
+file arch/arm/footbridge/footbridge_machdep.c footbridge
+file arch/arm/footbridge/footbridge_io.c footbridge
+file arch/arm/footbridge/footbridge_pci.c footbridge
+file arch/arm/arm/irq_dispatch.S
+file arch/arm/footbridge/footbridge_irqhandler.c footbridge
+file arch/arm/footbridge/footbridge_clock.c footbridge
+file arch/arm/arm/softintr.c footbridge
+
+# DC21285 "Footbridge" serial port
+device fcom: tty, bus_space_generic
+attach fcom at footbridge
+file arch/arm/footbridge/footbridge_com.c fcom needs-flag
+file arch/arm/footbridge/footbridge_com_io.c fcom
diff --git a/sys/arch/arm/footbridge/dc21285mem.h b/sys/arch/arm/footbridge/dc21285mem.h
new file mode 100644
index 00000000000..29ab4045e87
--- /dev/null
+++ b/sys/arch/arm/footbridge/dc21285mem.h
@@ -0,0 +1,90 @@
+/* $OpenBSD: dc21285mem.h,v 1.1 2004/02/01 05:09:49 drahn Exp $ */
+/* $NetBSD: dc21285mem.h,v 1.2 2001/06/09 10:44:11 chris Exp $ */
+
+/*
+ * Copyright (c) 1997,1998 Mark Brinicombe.
+ * Copyright (c) 1997,1998 Causality Limited
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by Mark Brinicombe
+ * for the NetBSD Project.
+ * 4. The name of the company nor the name of the author may be used to
+ * endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+/*
+ * Physical memory map provided by the DC21285 'Footbridge'
+ */
+
+#define DC21285_SDRAM_BASE 0x00000000
+#define DC21285_SDRAM_SIZE 0x10000000 /* 256 MB */
+
+#define DC21285_SDRAM_A0MR 0x40000000
+#define DC21285_SDRAM_A1MR 0x40004000
+#define DC21285_SDRAM_A2MR 0x40008000
+#define DC21285_SDRAM_A3MR 0x4000C000
+
+#define DC21285_XBUS_XCS0 0x40010000
+#define DC21285_XBUS_XCS1 0x40011000
+#define DC21285_XBUS_XCS2 0x40012000
+#define DC21285_XBUS_NOCS 0x40013000
+
+#define DC21285_ROM_BASE 0x41000000
+#define DC21285_ROM_SIZE 0x01000000 /* 16MB */
+
+#define DC21285_ARMCSR_BASE 0x42000000
+#define DC21285_ARMCSR_SIZE 0x00100000 /* 1MB */
+
+#define DC21285_SA_CACHE_FLUSH_BASE 0x50000000
+#define DC21285_SA_CACHE_FLUSH_SIZE 0x01000000 /* 16MB */
+
+#define DC21285_OUTBOUND_WRITE_FLUSH 0x78000000
+
+#define DC21285_PCI_IACK_SPECIAL 0x79000000
+#define DC21285_PCI_TYPE_1_CONFIG 0x7A000000
+#define DC21285_PCI_TYPE_0_CONFIG 0x7B000000
+#define DC21285_PCI_IO_BASE 0x7C000000
+#define DC21285_PCI_IO_SIZE 0x00010000 /* 64K */
+#define DC21285_PCI_MEM_BASE 0x80000000
+#define DC21285_PCI_MEM_SIZE 0x80000000 /* 2GB */
+
+/*
+ * Standard Virtual memory map used for the DC21285 'Footbridge'
+ */
+#define DC21285_ARMCSR_VBASE 0xFD000000
+#define DC21285_ARMCSR_VSIZE 0x00100000 /* 1MB */
+#define DC21285_CACHE_FLUSH_VBASE 0xFD100000
+#define DC21285_CACHE_FLUSH_VSIZE 0x00100000 /* 1MB */
+#define DC21285_PCI_IO_VBASE 0xFD200000
+#define DC21285_PCI_IO_VSIZE 0x00100000 /* 1MB */
+#define DC21285_PCI_IACK_VBASE 0xFD300000
+#define DC21285_PCI_IACK_VSIZE 0x00100000 /* 1MB */
+#define DC21285_PCI_ISA_MEM_VBASE 0xFD400000
+#define DC21285_PCI_ISA_MEM_VSIZE 0x00100000 /* 1MB */
+#define DC21285_PCI_TYPE_1_CONFIG_VBASE 0xFE000000
+#define DC21285_PCI_TYPE_1_CONFIG_VSIZE 0x01000000 /* 16MB */
+#define DC21285_PCI_TYPE_0_CONFIG_VBASE 0xFF000000
+#define DC21285_PCI_TYPE_0_CONFIG_VSIZE 0x01000000 /* 16MB */
diff --git a/sys/arch/arm/footbridge/dc21285reg.h b/sys/arch/arm/footbridge/dc21285reg.h
new file mode 100644
index 00000000000..e1a690ccc1e
--- /dev/null
+++ b/sys/arch/arm/footbridge/dc21285reg.h
@@ -0,0 +1,393 @@
+/* $OpenBSD: dc21285reg.h,v 1.1 2004/02/01 05:09:49 drahn Exp $ */
+/* $NetBSD: dc21285reg.h,v 1.3 2002/11/03 21:43:30 chris Exp $ */
+
+/*
+ * Copyright (c) 1997,1998 Mark Brinicombe.
+ * Copyright (c) 1997,1998 Causality Limited
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by Mark Brinicombe
+ * for the NetBSD Project.
+ * 4. The name of the company nor the name of the author may be used to
+ * endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+/*
+ * DC21285 register definitions
+ */
+
+/* PCI registers in CSR space */
+
+#define VENDOR_ID 0x00
+#define DC21285_VENDOR_ID 0x1011
+#define DEVICE_ID 0x02
+#define DC21285_DEVICE_ID 0x1065
+#define REVISION 0x08
+#define CLASS 0x0A
+
+/* Other PCI control / status registers */
+
+#define OUTBOUND_INT_STATUS 0x030
+#define OUTBOUND_INT_MASK 0x034
+#define I2O_INBOUND_FIFO 0x040
+#define I2O_OUTBOUND_FIFO 0x044
+
+/* Mailbox registers */
+
+#define MAILBOX_0 0x050
+#define MAILBOX_1 0x054
+#define MAILBOX_2 0x058
+#define MAILBOX_3 0x05C
+
+#define DOORBELL 0x060
+#define DOORBELL_SETUP 0x064
+#define ROM_WRITE_BYTE_ADDRESS 0x068
+
+/* DMA Channel registers */
+
+#define DMA_CHAN_1_BYTE_COUNT 0x80
+#define DMA_CHAN_1_PCI_ADDR 0x84
+#define DMA_CHAN_1_SDRAM_ADDR 0x88
+#define DMA_CHAN_1_DESCRIPT 0x8C
+#define DMA_CHAN_1_CONTROL 0x90
+#define DMA_CHAN_2_BYTE_COUNT 0xA0
+#define DMA_CHAN_2_PCI_ADDR 0xA4
+#define DMA_CHAN_2_SDRAM_ADDR 0xA8
+#define DMA_CHAN_2_DESCRIPTOR 0xAC
+#define DMA_CHAN_2_CONTROL 0xB0
+
+/* Offsets into DMA descriptor */
+
+#define DMA_BYTE_COUNT 0
+#define DMA_PCI_ADDRESS 4
+#define DMA_SDRAM_ADDRESS 8
+#define DMA_NEXT_DESCRIPTOR 12
+
+/* DMA byte count register bits */
+
+#define DMA_INTERBURST_SHIFT 24
+#define DMA_PCI_TO_SDRAM 0
+#define DMA_SDRAM_TO_PCI (1 << 30)
+#define DMA_END_CHAIN (1 << 31)
+
+/* DMA control bits */
+
+#define DMA_ENABLE (1 << 0)
+#define DMA_TRANSFER_DONE (1 << 2)
+#define DMA_ERROR (1 << 3)
+#define DMA_REGISTOR_DESCRIPTOR (1 << 4)
+#define DMA_PCI_MEM_READ (0 << 5)
+#define DMA_PCI_MEM_READ_LINE (1 << 5)
+#define DMA_PCI_MEM_READ_MULTI1 (2 << 5)
+#define DMA_PCI_MEM_READ_MULTI2 (3 << 5)
+#define DMA_CHAIN_DONE (1 << 7)
+#define DMA_INTERBURST_4 (0 << 8)
+#define DMA_INTERBURST_8 (1 << 8)
+#define DMA_INTERBURST_16 (2 << 8)
+#define DMA_INTERBURST_32 (3 << 8)
+#define DMA_PCI_LENGTH_8 0
+#define DMA_PCI_LENGTH_16 (1 << 15)
+#define DMA_SDRAM_LENGTH_1 (0 << 16)
+#define DMA_SDRAM_LENGTH_2 (1 << 16)
+#define DMA_SDRAM_LENGTH_4 (2 << 16)
+#define DMA_SDRAM_LENGTH_8 (3 << 16)
+#define DMA_SDRAM_LENGTH_16 (4 << 16)
+
+/* CSR Base Address Mask */
+
+#define CSR_BA_MASK 0x0F8
+#define CSR_MASK_128B 0x00000000
+#define CSR_MASK_512KB 0x00040000
+#define CSR_MASK_1MB 0x000C0000
+#define CSR_MASK_2MB 0x001C0000
+#define CSR_MASK_4MB 0x003C0000
+#define CSR_MASK_8MB 0x007C0000
+#define CSR_MASK_16MB 0x00FC0000
+#define CSR_MASK_32MB 0x01FC0000
+#define CSR_MASK_64MB 0x03FC0000
+#define CSR_MASK_128MB 0x07FC0000
+#define CSR_MASK_256MB 0x0FFC0000
+#define CSR_BA_OFFSET 0x0FC
+
+/* SDRAM Base Address Mask */
+
+#define SDRAM_BA_MASK 0x100
+#define SDRAM_MASK_256KB 0x00000000
+#define SDRAM_MASK_512KB 0x00040000
+#define SDRAM_MASK_1MB 0x000C0000
+#define SDRAM_MASK_2MB 0x001C0000
+#define SDRAM_MASK_4MB 0x003C0000
+#define SDRAM_MASK_8MB 0x007C0000
+#define SDRAM_MASK_16MB 0x00FC0000
+#define SDRAM_MASK_32MB 0x01FC0000
+#define SDRAM_MASK_64MB 0x03FC0000
+#define SDRAM_MASK_128MB 0x07FC0000
+#define SDRAM_MASK_256MB 0x0FFC0000
+#define SDRAM_WINDOW_DISABLE (1 << 31)
+#define SDRAM_BA_OFFSET 0x104
+
+/* Expansion ROM Base Address Mask */
+
+#define EXPANSION_ROM_BA_MASK 0x108
+#define ROM_MASK_1MB 0x00000000
+#define ROM_MASK_2MB 0x00100000
+#define ROM_MASK_4MB 0x00300000
+#define ROM_MASK_8MB 0x00700000
+#define ROM_MASK_16MB 0x00F00000
+#define ROM_WINDOW_DISABLE (1 << 31)
+
+/* SDRAM configuration */
+
+#define SDRAM_TIMING 0x10C
+#define SDRAM_ARRAY_SIZE_0 0x0
+#define SDRAM_ARRAY_SIZE_1MB 0x1
+#define SDRAM_ARRAY_SIZE_2MB 0x2
+#define SDRAM_ARRAY_SIZE_4MB 0x3
+#define SDRAM_ARRAY_SIZE_8MB 0x4
+#define SDRAM_ARRAY_SIZE_16MB 0x5
+#define SDRAM_ARRAY_SIZE_32MB 0x6
+#define SDRAM_ARRAY_SIZE_64MB 0x7
+#define SDRAM_2_BANKS 0
+#define SDRAM_4_BANKS (1 << 3)
+#define SDRAM_ADDRESS_MUX_SHIFT 4
+#define SDRAM_ARRAY_BASE_SHIFT 20
+#define SDRAM_ADDRESS_SIZE_0 0x110
+#define SDRAM_ADDRESS_SIZE_1 0x114
+#define SDRAM_ADDRESS_SIZE_2 0x118
+#define SDRAM_ADDRESS_SIZE_3 0x11C
+
+/* I2O registers */
+
+#define I2O_INBOUND_FREE_HEAD 0x120
+#define I2O_INBOUND_POST_TAIL 0x124
+#define I2O_OUTBOUND_POST_HEAD 0x128
+#define I2O_OUTBOUND_FREE_TAIL 0x12c
+#define I2O_INBOUND_FREE_COUNT 0x130
+#define I2O_OUTBOUND_POST_COUNT 0x134
+#define I2O_INBOUND_POST_COUNT 0x138
+
+/* Control register */
+
+#define SA_CONTROL 0x13C
+#define INITIALIZE_COMPLETE (1 << 0)
+#define ASSERT_SERR (1 << 1)
+#define RECEIVED_SERR (1 << 3)
+#define SA_SDRAM_PARITY_ERROR (1 << 4)
+#define PCI_SDRAM_PARITY_ERROR (1 << 5)
+#define DMA_SDRAM_PARITY_ERROR (1 << 6)
+#define DISCARD_TIMER_EXPIRED (1 << 8)
+#define PCI_NOT_RESET (1 << 9)
+#define WATCHDOG_ENABLE (1 << 13)
+#define I2O_SIZE_256 (0 << 10)
+#define I2O_SIZE_512 (1 << 10)
+#define I2O_SIZE_1024 (2 << 10)
+#define I2O_SIZE_2048 (3 << 10)
+#define I2O_SIZE_4096 (4 << 10)
+#define I2O_SIZE_8192 (5 << 10)
+#define I2O_SIZE_16384 (6 << 10)
+#define I2O_SIZE_32768 (7 << 10)
+#define ROM_WIDTH_8 (3 << 14)
+#define ROM_WIDTH_16 (1 << 14)
+#define ROM_WIDTH_32 (2 << 14)
+#define ROM_ACCESS_TIME_SHIFT 16
+#define ROM_BURST_TIME_SHIFT 20
+#define ROM_TRISTATE_TIME_SHIFT 24
+#define XCS_DIRECTION_SHIFT 28
+#define PCI_CENTRAL_FUNCTION (1 << 31)
+
+#define PCI_ADDRESS_EXTENSION 0x140
+#define PREFETCHABLE_MEM_RANGE 0x144
+
+/* XBUS / PCI Arbiter registers */
+
+#define XBUS_CYCLE_ARBITER 0x148
+#define XBUS_CYCLE_0_SHIFT 0
+#define XBUS_CYCLE_1_SHIFT 3
+#define XBUS_CYCLE_2_SHIFT 6
+#define XBUS_CYCLE_3_SHIFT 9
+#define XBUS_CYCLE_STROBE_SHIFT 12
+#define XBUS_PCI_ARBITER (1 << 23)
+#define XBUS_INT_IN_L0_LOW 0
+#define XBUS_INT_IN_L0_HIGH (1 << 24)
+#define XBUS_INT_IN_L1_LOW 0
+#define XBUS_INT_IN_L1_HIGH (1 << 25)
+#define XBUS_INT_IN_L2_LOW 0
+#define XBUS_INT_IN_L2_HIGH (1 << 26)
+#define XBUS_INT_IN_L3_LOW 0
+#define XBUS_INT_IN_L3_HIGH (1 << 27)
+#define XBUS_INT_XCS0_LOW 0
+#define XBUS_INT_XCS0_HIGH (1 << 28)
+#define XBUS_INT_XCS1_LOW 0
+#define XBUS_INT_XCS1_HIGH (1 << 29)
+#define XBUS_INT_XCS2_LOW 0
+#define XBUS_INT_XCS2_HIGH (1 << 30)
+#define XBUS_PCI_INT_REQUEST (1 << 31)
+
+#define XBUS_IO_STROBE_MASK 0x14C
+#define XBUS_IO_STROBE_0_SHIFT 0
+#define XBUS_IO_STROBE_2_SHIFT 8
+#define XBUS_IO_STROBE_3_SHIFT 16
+#define XBUS_IO_STROBE_4_SHIFT 24
+
+#define DOORBELL_PCI_MASK 0x150
+#define DOORBELL_SA_MASK 0x154
+
+/* UART registers */
+
+#define UART_DATA 0x160
+#define UART_RX_STAT 0x164
+#define UART_PARITY_ERROR 0x01
+#define UART_FRAME_ERROR 0x02
+#define UART_OVERRUN_ERROR 0x04
+#define UART_RX_ERROR (UART_PARITY_ERROR | UART_FRAME_ERROR \
+ | UART_OVERRUN_ERROR)
+#define UART_H_UBRLCR 0x168
+#define UART_BREAK 0x01
+#define UART_PARITY_ENABLE 0x02
+#define UART_ODD_PARITY 0x00
+#define UART_EVEN_PARITY 0x04
+#define UART_STOP_BITS_1 0x00
+#define UART_STOP_BITS_2 0x08
+#define UART_ENABLE_FIFO 0x10
+#define UART_DATA_BITS_5 0x00
+#define UART_DATA_BITS_6 0x20
+#define UART_DATA_BITS_7 0x40
+#define UART_DATA_BITS_8 0x60
+#define UART_M_UBRLCR 0x16C
+#define UART_L_UBRLCR 0x170
+#define UART_BRD(fclk, x) (((fclk) / 4 / 16 / x) - 1)
+
+#define UART_CONTROL 0x174
+#define UART_ENABLE 0x01
+#define UART_SIR_ENABLE 0x02
+#define UART_IRDA_ENABLE 0x04
+#define UART_FLAGS 0x178
+#define UART_TX_BUSY 0x08
+#define UART_RX_FULL 0x10
+#define UART_TX_EMPTY 0x20
+
+/* Interrupt numbers for IRQ and FIQ registers */
+
+#define IRQ_RESERVED0 0x00
+#define IRQ_SOFTINT 0x01
+#define IRQ_SERIAL_RX 0x02
+#define IRQ_SERIAL_TX 0x03
+#define IRQ_TIMER_1 0x04
+#define IRQ_TIMER_2 0x05
+#define IRQ_TIMER_3 0x06
+#define IRQ_TIMER_4 0x07
+#define IRQ_IN_L0 0x08
+#define IRQ_IN_L1 0x09
+#define IRQ_IN_L2 0x0A
+#define IRQ_IN_L3 0x0B
+#define IRQ_XCS_L0 0x0C
+#define IRQ_XCS_L1 0x0D
+#define IRQ_XCS_L2 0x0E
+#define IRQ_DOORBELL 0x0F
+#define IRQ_DMA_1 0x10
+#define IRQ_DMA_2 0x11
+#define IRQ_PCI 0x12
+#define IRQ_PMCSR 0x13
+#define IRQ_RESERVED1 0x14
+#define IRQ_RESERVED2 0x15
+#define IRQ_BIST 0x16
+#define IRQ_SERR 0x17
+#define IRQ_SDRAM_PARITY 0x18
+#define IRQ_I2O 0x19
+#define IRQ_RESERVED3 0x1A
+#define IRQ_DISCARD_TIMER 0x1B
+#define IRQ_DATA_PARITY 0x1C
+#define IRQ_MASTER_ABORT 0x1D
+#define IRQ_TARGET_ABORT 0x1E
+#define IRQ_PARITY 0x1F
+
+/* IRQ and FIQ status / enable registers */
+
+#define IRQ_STATUS 0x180
+#define IRQ_RAW_STATUS 0x184
+#define IRQ_ENABLE 0x188
+#define IRQ_ENABLE_SET 0x188
+#define IRQ_ENABLE_CLEAR 0x18c
+#define IRQ_SOFT 0x190
+
+#define FIQ_STATUS 0x280
+#define FIQ_RAW_STATUS 0x284
+#define FIQ_ENABLE 0x288
+#define FIQ_ENABLE_SET 0x288
+#define FIQ_ENABLE_CLEAR 0x28c
+#define FIQ_SOFT 0x290
+
+/* Timer registers */
+
+/* Relative offsets and bases */
+
+#define TIMER_LOAD 0x00
+#define TIMER_VALUE 0x04
+#define TIMER_CONTROL 0x08
+#define TIMER_CLEAR 0x0C
+#define TIMER_1_BASE 0x300
+#define TIMER_2_BASE 0x320
+#define TIMER_3_BASE 0x340
+#define TIMER_4_BASE 0x360
+
+/* Control register bits */
+
+#define TIMER_FCLK 0x00
+#define TIMER_FCLK_16 0x04
+#define TIMER_FCLK_256 0x08
+#define TIMER_EXTERNAL 0x0C
+#define TIMER_MODE_FREERUN 0x00
+#define TIMER_MODE_PERIODIC 0x40
+#define TIMER_ENABLE 0x80
+
+/* Maximum timer value */
+
+#define TIMER_MAX_VAL 0x00FFFFFF
+
+/* Specific registers */
+
+#define TIMER_1_LOAD 0x300
+#define TIMER_1_VALUE 0x304
+#define TIMER_1_CONTROL 0x308
+#define TIMER_1_CLEAR 0x30C
+#define TIMER_2_LOAD 0x320
+#define TIMER_2_VALUE 0x324
+#define TIMER_2_CONTROL 0x328
+#define TIMER_2_CLEAR 0x32C
+#define TIMER_3_LOAD 0x340
+#define TIMER_3_VALUE 0x344
+#define TIMER_3_CONTROL 0x348
+#define TIMER_3_CLEAR 0x34C
+#define TIMER_4_LOAD 0x360
+#define TIMER_4_VALUE 0x364
+#define TIMER_4_CONTROL 0x368
+#define TIMER_4_CLEAR 0x36C
+
+/* Miscellaneous definitions */
+
+#ifndef FCLK
+#define FCLK 50000000
+#endif
diff --git a/sys/arch/arm/footbridge/footbridge.c b/sys/arch/arm/footbridge/footbridge.c
new file mode 100644
index 00000000000..35d2f3889fb
--- /dev/null
+++ b/sys/arch/arm/footbridge/footbridge.c
@@ -0,0 +1,293 @@
+/* $OpenBSD: footbridge.c,v 1.1 2004/02/01 05:09:49 drahn Exp $ */
+/* $NetBSD: footbridge.c,v 1.7 2002/05/16 01:01:33 thorpej Exp $ */
+
+/*
+ * Copyright (c) 1997,1998 Mark Brinicombe.
+ * Copyright (c) 1997,1998 Causality Limited
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by Mark Brinicombe
+ * for the NetBSD Project.
+ * 4. The name of the company nor the name of the author may be used to
+ * endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/kernel.h>
+#include <sys/conf.h>
+#include <sys/malloc.h>
+#include <sys/device.h>
+
+#include <dev/pci/pcivar.h>
+#define _ARM32_BUS_DMA_PRIVATE
+#include <machine/bus.h>
+#include <machine/intr.h>
+
+#include <arm/cpuconf.h>
+#include <arm/cpufunc.h>
+
+#include <arm/mainbus/mainbus.h>
+#include <arm/footbridge/footbridgevar.h>
+#include <arm/footbridge/dc21285reg.h>
+#include <arm/footbridge/dc21285mem.h>
+#include <arm/footbridge/footbridge.h>
+
+/*
+ * DC21285 'Footbridge' device
+ *
+ * This probes and attaches the footbridge device
+ * It then configures any children
+ */
+
+/* Declare prototypes */
+
+static int footbridge_match __P((struct device *parent, void *cf,
+ void *aux));
+static void footbridge_attach __P((struct device *parent, struct device *self,
+ void *aux));
+static int footbridge_print __P((void *aux, const char *pnp));
+static int footbridge_intr __P((void *arg));
+
+/* Driver and attach structures */
+struct cfattach footbridge_ca = {
+ sizeof(struct footbridge_softc), footbridge_match, footbridge_attach
+};
+
+struct cfdriver footbridge_cd = {
+ NULL, "footbridge", DV_DULL
+};
+
+/* Various bus space tags */
+extern struct bus_space footbridge_bs_tag;
+extern void footbridge_create_io_bs_tag(bus_space_tag_t t, void *cookie);
+extern void footbridge_create_mem_bs_tag(bus_space_tag_t t, void *cookie);
+struct bus_space footbridge_csr_tag;
+struct bus_space footbridge_pci_io_bs_tag;
+struct bus_space footbridge_pci_mem_bs_tag;
+extern struct arm32_pci_chipset footbridge_pci_chipset;
+extern struct arm32_bus_dma_tag footbridge_pci_bus_dma_tag;
+
+/* Used in footbridge_clock.c */
+struct footbridge_softc *clock_sc;
+
+/* Set to non-zero to enable verbose reporting of footbridge system ints */
+int footbridge_intr_report = 0;
+
+int footbridge_found;
+
+void
+footbridge_pci_bs_tag_init(void)
+{
+ /* Set up the PCI bus tags */
+ footbridge_create_io_bs_tag(&footbridge_pci_io_bs_tag,
+ (void *)DC21285_PCI_IO_VBASE);
+ footbridge_create_mem_bs_tag(&footbridge_pci_mem_bs_tag,
+ (void *)DC21285_PCI_MEM_BASE);
+}
+
+/*
+ * int footbridgeprint(void *aux, const char *name)
+ *
+ * print configuration info for children
+ */
+
+static int
+footbridge_print(aux, pnp)
+ void *aux;
+ const char *pnp;
+{
+ union footbridge_attach_args *fba = aux;
+
+ if (pnp)
+ printf("%s at %s", fba->fba_name, pnp);
+ if (strcmp(fba->fba_name, "pci") == 0)
+ printf(" bus %d", fba->fba_pba.pba_bus);
+ return(UNCONF);
+}
+
+/*
+ * int footbridge_match(struct device *parent, struct cfdata *cf, void *aux)
+ *
+ * Just return ok for this if it is device 0
+ */
+
+static int
+footbridge_match(parent, vcf, aux)
+ struct device *parent;
+ void *vcf;
+ void *aux;
+{
+ struct mainbus_attach_args *ma = aux;
+ struct cfdata *cf = (struct cfdata *)vcf;
+
+ return (strcmp(cf->cf_driver->cd_name, ma->ma_name) == 0);
+}
+
+
+/*
+ * void footbridge_attach(struct device *parent, struct device *dev, void *aux)
+ *
+ */
+
+static void
+footbridge_attach(parent, self, aux)
+ struct device *parent;
+ struct device *self;
+ void *aux;
+{
+ struct footbridge_softc *sc = (struct footbridge_softc *)self;
+ union footbridge_attach_args fba;
+ int vendor, device, rev;
+
+ /* There can only be 1 footbridge. */
+ footbridge_found = 1;
+
+ clock_sc = sc;
+
+ sc->sc_iot = &footbridge_bs_tag;
+
+ /* Map the Footbridge */
+ if (bus_space_map(sc->sc_iot, DC21285_ARMCSR_VBASE,
+ DC21285_ARMCSR_VSIZE, 0, &sc->sc_ioh))
+ panic("%s: Cannot map registers", self->dv_xname);
+
+ /* Read the ID to make sure it is what we think it is */
+ vendor = bus_space_read_2(sc->sc_iot, sc->sc_ioh, VENDOR_ID);
+ device = bus_space_read_2(sc->sc_iot, sc->sc_ioh, DEVICE_ID);
+ rev = bus_space_read_1(sc->sc_iot, sc->sc_ioh, REVISION);
+ if (vendor != DC21285_VENDOR_ID && device != DC21285_DEVICE_ID)
+ panic("%s: Unrecognised ID", self->dv_xname);
+
+ printf(": DC21285 rev %d\n", rev);
+
+ /* Disable all interrupts from the footbridge */
+ bus_space_write_4(sc->sc_iot, sc->sc_ioh, IRQ_ENABLE_CLEAR, 0xffffffff);
+ bus_space_write_4(sc->sc_iot, sc->sc_ioh, FIQ_ENABLE_CLEAR, 0xffffffff);
+
+/* bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0x18, 0x40000000);*/
+
+ /* Install a generic handler to catch a load of system interrupts */
+ sc->sc_serr_ih = footbridge_intr_claim(IRQ_SERR, IPL_HIGH,
+ "serr", footbridge_intr, sc);
+ sc->sc_sdram_par_ih = footbridge_intr_claim(IRQ_SDRAM_PARITY, IPL_HIGH,
+ "sdram parity", footbridge_intr, sc);
+ sc->sc_data_par_ih = footbridge_intr_claim(IRQ_DATA_PARITY, IPL_HIGH,
+ "data parity", footbridge_intr, sc);
+ sc->sc_master_abt_ih = footbridge_intr_claim(IRQ_MASTER_ABORT, IPL_HIGH,
+ "mast abt", footbridge_intr, sc);
+ sc->sc_target_abt_ih = footbridge_intr_claim(IRQ_TARGET_ABORT, IPL_HIGH,
+ "targ abt", footbridge_intr, sc);
+ sc->sc_parity_ih = footbridge_intr_claim(IRQ_PARITY, IPL_HIGH,
+ "parity", footbridge_intr, sc);
+
+ /* Set up the PCI bus tags */
+ footbridge_create_io_bs_tag(&footbridge_pci_io_bs_tag,
+ (void *)DC21285_PCI_IO_VBASE);
+ footbridge_create_mem_bs_tag(&footbridge_pci_mem_bs_tag,
+ (void *)DC21285_PCI_MEM_BASE);
+
+ /* calibrate the delay loop */
+ calibrate_delay();
+ /* Attach the PCI bus */
+ fba.fba_pba.pba_busname = "pci";
+ fba.fba_pba.pba_pc = &footbridge_pci_chipset;
+ fba.fba_pba.pba_iot = &footbridge_pci_io_bs_tag;
+ fba.fba_pba.pba_memt = &footbridge_pci_mem_bs_tag;
+ fba.fba_pba.pba_dmat = &footbridge_pci_bus_dma_tag;
+ /*
+ fba.fba_pba.pba_flags = PCI_FLAGS_IO_ENABLED | PCI_FLAGS_MEM_ENABLED;
+ */
+ fba.fba_pba.pba_bus = 0;
+ config_found(self, &fba.fba_pba, footbridge_print);
+
+ /* Attach a time-of-day clock device */
+ fba.fba_tca.ta_name = "todclock";
+ fba.fba_tca.ta_rtc_arg = NULL;
+ fba.fba_tca.ta_rtc_write = NULL;
+ fba.fba_tca.ta_rtc_read = NULL;
+ fba.fba_tca.ta_flags = TODCLOCK_FLAG_FAKE;
+ config_found(self, &fba.fba_tca, footbridge_print);
+
+ /* Attach uart device */
+ fba.fba_fca.fca_name = "fcom";
+ fba.fba_fca.fca_iot = sc->sc_iot;
+ fba.fba_fca.fca_ioh = sc->sc_ioh;
+ fba.fba_fca.fca_rx_irq = IRQ_SERIAL_RX;
+ fba.fba_fca.fca_tx_irq = IRQ_SERIAL_TX;
+ config_found(self, &fba.fba_fca, footbridge_print);
+
+ /* Setup fast SA110 cache clean area */
+#ifdef CPU_SA110
+ if (cputype == CPU_ID_SA110)
+ footbridge_sa110_cc_setup();
+#endif /* CPU_SA110 */
+
+}
+
+/* Generic footbridge interrupt handler */
+
+int
+footbridge_intr(arg)
+ void *arg;
+{
+ struct footbridge_softc *sc = arg;
+ u_int ctrl, intr;
+
+ /*
+ * Read the footbridge control register and check for
+ * SERR and parity errors
+ */
+ ctrl = bus_space_read_4(sc->sc_iot, sc->sc_ioh, SA_CONTROL);
+ intr = ctrl & (RECEIVED_SERR | SA_SDRAM_PARITY_ERROR |
+ PCI_SDRAM_PARITY_ERROR | DMA_SDRAM_PARITY_ERROR);
+ if (intr) {
+ /* Report the interrupt if reporting is enabled */
+ if (footbridge_intr_report)
+ printf("footbridge_intr: ctrl=%08x\n", intr);
+ /* Clear the interrupt state */
+ bus_space_write_4(sc->sc_iot, sc->sc_ioh, SA_CONTROL,
+ ctrl | intr);
+ }
+ /*
+ * Read the PCI status register and check for errors
+ */
+ ctrl = bus_space_read_4(sc->sc_iot, sc->sc_ioh, PCI_COMMAND_STATUS_REG);
+ intr = ctrl & (PCI_STATUS_PARITY_ERROR | PCI_STATUS_MASTER_TARGET_ABORT
+ | PCI_STATUS_MASTER_ABORT | PCI_STATUS_SPECIAL_ERROR
+ | PCI_STATUS_PARITY_DETECT);
+ if (intr) {
+ /* Report the interrupt if reporting is enabled */
+ if (footbridge_intr_report)
+ printf("footbridge_intr: pcistat=%08x\n", intr);
+ /* Clear the interrupt state */
+ bus_space_write_4(sc->sc_iot, sc->sc_ioh,
+ PCI_COMMAND_STATUS_REG, ctrl | intr);
+ }
+ return(0);
+}
+
+/* End of footbridge.c */
diff --git a/sys/arch/arm/footbridge/footbridge.h b/sys/arch/arm/footbridge/footbridge.h
new file mode 100644
index 00000000000..3da3c6af1a4
--- /dev/null
+++ b/sys/arch/arm/footbridge/footbridge.h
@@ -0,0 +1,17 @@
+/* $OpenBSD: footbridge.h,v 1.1 2004/02/01 05:09:49 drahn Exp $ */
+/* $NetBSD: footbridge.h,v 1.2 2002/05/04 10:04:42 chris Exp $ */
+
+#ifndef _FOOTBRIDGE_H_
+#define _FOOTBRIDGE_H_
+
+#include <sys/termios.h>
+#include <arm/bus.h>
+void footbridge_pci_bs_tag_init __P((void));
+void footbridge_sa110_cc_setup __P((void));
+void footbridge_create_io_bs_tag __P((struct bus_space *, void *));
+void footbridge_create_mem_bs_tag __P((struct bus_space *, void *));
+int fcomcnattach __P((u_int, int, tcflag_t));
+int fcomcndetach __P((void));
+void calibrate_delay __P((void));
+
+#endif
diff --git a/sys/arch/arm/footbridge/footbridge_clock.c b/sys/arch/arm/footbridge/footbridge_clock.c
new file mode 100644
index 00000000000..fbe733a4367
--- /dev/null
+++ b/sys/arch/arm/footbridge/footbridge_clock.c
@@ -0,0 +1,482 @@
+/* $OpenBSD: footbridge_clock.c,v 1.1 2004/02/01 05:09:49 drahn Exp $ */
+/* $NetBSD: footbridge_clock.c,v 1.17 2003/03/23 14:12:25 chris Exp $ */
+
+/*
+ * Copyright (c) 1997 Mark Brinicombe.
+ * Copyright (c) 1997 Causality Limited.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by Mark Brinicombe
+ * for the NetBSD Project.
+ * 4. The name of the company nor the name of the author may be used to
+ * endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+
+/* Include header files */
+
+#include <sys/types.h>
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/kernel.h>
+#include <sys/time.h>
+#include <sys/device.h>
+
+#include <machine/intr.h>
+
+#include <arm/cpufunc.h>
+
+#include <arm/footbridge/dc21285reg.h>
+#include <arm/footbridge/footbridgevar.h>
+#include <arm/footbridge/footbridge.h>
+
+extern struct footbridge_softc *clock_sc;
+extern u_int dc21285_fclk;
+
+int clockhandler __P((void *));
+int statclockhandler __P((void *));
+static int load_timer __P((int, int));
+
+/*
+ * Statistics clock variance, in usec. Variance must be a
+ * power of two. Since this gives us an even number, not an odd number,
+ * we discard one case and compensate. That is, a variance of 1024 would
+ * give us offsets in [0..1023]. Instead, we take offsets in [1..1023].
+ * This is symmetric about the point 512, or statvar/2, and thus averages
+ * to that value (assuming uniform random numbers).
+ */
+const int statvar = 1024;
+int statmin; /* minimum stat clock count in ticks */
+int statcountperusec; /* number of ticks per usec at current stathz */
+int statprev; /* last value of we set statclock to */
+
+#if 0
+static int clockmatch __P((struct device *parent, struct cfdata *cf, void *aux));
+static void clockattach __P((struct device *parent, struct device *self, void *aux));
+
+CFATTACH_DECL(footbridge_clock, sizeof(struct clock_softc),
+ clockmatch, clockattach, NULL, NULL);
+
+/*
+ * int clockmatch(struct device *parent, void *match, void *aux)
+ *
+ * Just return ok for this if it is device 0
+ */
+
+static int
+clockmatch(parent, cf, aux)
+ struct device *parent;
+ struct cfdata *cf;
+ void *aux;
+{
+ union footbridge_attach_args *fba = aux;
+
+ if (strcmp(fba->fba_ca.ca_name, "clk") == 0)
+ return(1);
+ return(0);
+}
+
+
+/*
+ * void clockattach(struct device *parent, struct device *dev, void *aux)
+ *
+ */
+
+static void
+clockattach(parent, self, aux)
+ struct device *parent;
+ struct device *self;
+ void *aux;
+{
+ struct clock_softc *sc = (struct clock_softc *)self;
+ union footbridge_attach_args *fba = aux;
+
+ sc->sc_iot = fba->fba_ca.ca_iot;
+ sc->sc_ioh = fba->fba_ca.ca_ioh;
+
+ clock_sc = sc;
+
+ /* Cannot do anything until cpu_initclocks() has been called */
+
+ printf("\n");
+}
+#endif
+
+/*
+ * int clockhandler(struct clockframe *frame)
+ *
+ * Function called by timer 1 interrupts.
+ * This just clears the interrupt condition and calls hardclock().
+ */
+
+int
+clockhandler(aframe)
+ void *aframe;
+{
+ struct clockframe *frame = aframe;
+ bus_space_write_4(clock_sc->sc_iot, clock_sc->sc_ioh,
+ TIMER_1_CLEAR, 0);
+ hardclock(frame);
+ {
+ void debugled(u_int32_t);
+ extern int ticks;
+ debugled(ticks);
+ }
+ return(0); /* Pass the interrupt on down the chain */
+}
+
+/*
+ * int statclockhandler(struct clockframe *frame)
+ *
+ * Function called by timer 2 interrupts.
+ * This just clears the interrupt condition and calls statclock().
+ */
+
+int
+statclockhandler(aframe)
+ void *aframe;
+{
+ struct clockframe *frame = aframe;
+ int newint, r;
+ int currentclock ;
+
+ /* start the clock off again */
+ bus_space_write_4(clock_sc->sc_iot, clock_sc->sc_ioh,
+ TIMER_2_CLEAR, 0);
+
+ do {
+ r = random() & (statvar-1);
+ } while (r == 0);
+ newint = statmin + (r * statcountperusec);
+
+ /* fetch the current count */
+ currentclock = bus_space_read_4(clock_sc->sc_iot, clock_sc->sc_ioh,
+ TIMER_2_VALUE);
+
+ /*
+ * work out how much time has run, add another usec for time spent
+ * here
+ */
+ r = ((statprev - currentclock) + statcountperusec);
+
+ if (r < newint) {
+ newint -= r;
+ r = 0;
+ }
+ else
+ printf("statclockhandler: Statclock overrun\n");
+
+
+ /*
+ * update the clock to the new counter, this reloads the existing
+ * timer
+ */
+ bus_space_write_4(clock_sc->sc_iot, clock_sc->sc_ioh,
+ TIMER_2_LOAD, newint);
+ statprev = newint;
+ statclock(frame);
+ if (r)
+ /*
+ * We've completely overrun the previous interval,
+ * make sure we report the correct number of ticks.
+ */
+ statclock(frame);
+
+ return(0); /* Pass the interrupt on down the chain */
+}
+
+static int
+load_timer(base, hz)
+ int base;
+ int hz;
+{
+ unsigned int timer_count;
+ int control;
+
+ timer_count = dc21285_fclk / hz;
+ if (timer_count > TIMER_MAX_VAL * 16) {
+ control = TIMER_FCLK_256;
+ timer_count >>= 8;
+ } else if (timer_count > TIMER_MAX_VAL) {
+ control = TIMER_FCLK_16;
+ timer_count >>= 4;
+ } else
+ control = TIMER_FCLK;
+
+ control |= (TIMER_ENABLE | TIMER_MODE_PERIODIC);
+ bus_space_write_4(clock_sc->sc_iot, clock_sc->sc_ioh,
+ base + TIMER_LOAD, timer_count);
+ bus_space_write_4(clock_sc->sc_iot, clock_sc->sc_ioh,
+ base + TIMER_CONTROL, control);
+ bus_space_write_4(clock_sc->sc_iot, clock_sc->sc_ioh,
+ base + TIMER_CLEAR, 0);
+ return(timer_count);
+}
+
+/*
+ * void setstatclockrate(int hz)
+ *
+ * Set the stat clock rate. The stat clock uses timer2
+ */
+
+void
+setstatclockrate(hz)
+ int hz;
+{
+ int statint;
+ int countpersecond;
+ int statvarticks;
+
+ /* statint == num in counter to drop by desired hz */
+ statint = statprev = clock_sc->sc_statclock_count =
+ load_timer(TIMER_2_BASE, hz);
+
+ /* Get the total ticks a second */
+ countpersecond = statint * hz;
+
+ /* now work out how many ticks per usec */
+ statcountperusec = countpersecond / 1000000;
+
+ /* calculate a variance range of statvar */
+ statvarticks = statcountperusec * statvar;
+
+ /* minimum is statint - 50% of variant */
+ statmin = statint - (statvarticks / 2);
+}
+
+/*
+ * void cpu_initclocks(void)
+ *
+ * Initialise the clocks.
+ *
+ * Timer 1 is used for the main system clock (hardclock)
+ * Timer 2 is used for the statistics clock (statclock)
+ */
+
+void
+cpu_initclocks()
+{
+ /* stathz and profhz should be set to something, we have the timer */
+#if 0
+ if (stathz == 0)
+ stathz = hz;
+
+ if (profhz == 0)
+ profhz = stathz * 5;
+#endif
+
+ /* Report the clock frequencies */
+ printf("clock: hz=%d stathz = %d profhz = %d\n", hz, stathz, profhz);
+
+ /* Setup timer 1 and claim interrupt */
+ clock_sc->sc_clock_count = load_timer(TIMER_1_BASE, hz);
+
+ /*
+ * Use ticks per 256us for accuracy since ticks per us is often
+ * fractional e.g. @ 66MHz
+ */
+ clock_sc->sc_clock_ticks_per_256us =
+ ((((clock_sc->sc_clock_count * hz) / 1000) * 256) / 1000);
+ clock_sc->sc_clockintr = footbridge_intr_claim(IRQ_TIMER_1, IPL_CLOCK,
+ "tmr1 hard clk", clockhandler, 0);
+
+ if (clock_sc->sc_clockintr == NULL)
+ panic("%s: Cannot install timer 1 interrupt handler",
+ clock_sc->sc_dev.dv_xname);
+
+ /* If stathz is non-zero then setup the stat clock */
+ if (stathz) {
+ /* Setup timer 2 and claim interrupt */
+ setstatclockrate(stathz);
+ clock_sc->sc_statclockintr = footbridge_intr_claim(IRQ_TIMER_2, IPL_STATCLOCK,
+ "tmr2 stat clk", statclockhandler, 0);
+ if (clock_sc->sc_statclockintr == NULL)
+ panic("%s: Cannot install timer 2 interrupt handler",
+ clock_sc->sc_dev.dv_xname);
+ }
+}
+
+
+/*
+ * void microtime(struct timeval *tvp)
+ *
+ * Fill in the specified timeval struct with the current time
+ * accurate to the microsecond.
+ */
+
+void
+microtime(tvp)
+ struct timeval *tvp;
+{
+ int s;
+ int tm;
+ int deltatm;
+ static struct timeval oldtv;
+
+ if (clock_sc == NULL || clock_sc->sc_clock_count == 0)
+ return;
+
+ s = splhigh();
+
+ tm = bus_space_read_4(clock_sc->sc_iot, clock_sc->sc_ioh,
+ TIMER_1_VALUE);
+
+ deltatm = clock_sc->sc_clock_count - tm;
+
+#ifdef DIAGNOSTIC
+ if (deltatm < 0)
+ panic("opps deltatm < 0 tm=%d deltatm=%d", tm, deltatm);
+#endif
+
+ /* Fill in the timeval struct */
+ *tvp = time;
+ tvp->tv_usec += ((deltatm << 8) / clock_sc->sc_clock_ticks_per_256us);
+
+ /* Make sure the micro seconds don't overflow. */
+ while (tvp->tv_usec >= 1000000) {
+ tvp->tv_usec -= 1000000;
+ ++tvp->tv_sec;
+ }
+
+ /* Make sure the time has advanced. */
+ if (tvp->tv_sec == oldtv.tv_sec &&
+ tvp->tv_usec <= oldtv.tv_usec) {
+ tvp->tv_usec = oldtv.tv_usec + 1;
+ if (tvp->tv_usec >= 1000000) {
+ tvp->tv_usec -= 1000000;
+ ++tvp->tv_sec;
+ }
+ }
+
+ oldtv = *tvp;
+ (void)splx(s);
+}
+
+/*
+ * Use a timer to track microseconds, if the footbridge hasn't been setup we
+ * rely on an estimated loop, however footbridge is attached very early on.
+ */
+
+static int delay_clock_count = 0;
+static int delay_count_per_usec = 0;
+
+void
+calibrate_delay(void)
+{
+ delay_clock_count = load_timer(TIMER_3_BASE, 100);
+ delay_count_per_usec = delay_clock_count/10000;
+#ifdef VERBOSE_DELAY_CALIBRATION
+ printf("delay calibration: delay_cc = %d, delay_c/us=%d\n",
+ delay_clock_count, delay_count_per_usec);
+
+ printf("0..");
+ delay(1000000);
+ printf("1..");
+ delay(1000000);
+ printf("2..");
+ delay(1000000);
+ printf("3..");
+ delay(1000000);
+ printf("4..");
+ delay(1000000);
+ printf("5..");
+ delay(1000000);
+ printf("6..");
+ delay(1000000);
+ printf("7..");
+ delay(1000000);
+ printf("8..");
+ delay(1000000);
+ printf("9..");
+ delay(1000000);
+ printf("10\n");
+#endif
+}
+
+int delaycount = 25000;
+
+void
+delay(n)
+ u_int n;
+{
+ volatile u_int i;
+ uint32_t cur, last, delta, usecs;
+
+ if (n == 0) return;
+
+
+ /*
+ * not calibrated the timer yet, so try to live with this horrible
+ * loop!
+ */
+ if (delay_clock_count == 0)
+ {
+ while (n-- > 0) {
+ for (i = delaycount; --i;);
+ }
+ return;
+ }
+
+ /*
+ * read the current value (do not reset it as delay is reentrant)
+ */
+ last = bus_space_read_4(clock_sc->sc_iot, clock_sc->sc_ioh,
+ TIMER_3_VALUE);
+
+ delta = usecs = 0;
+
+ while (n > usecs)
+ {
+ cur = bus_space_read_4(clock_sc->sc_iot, clock_sc->sc_ioh,
+ TIMER_3_VALUE);
+ if (last < cur)
+ /* timer has wrapped */
+ delta += ((delay_clock_count - cur) + last);
+ else
+ delta += (last - cur);
+
+ if (cur == 0)
+ {
+ /*
+ * reset the timer, note that if something blocks us for more
+ * than 1/100s we may delay for too long, but I believe that
+ * is fairly unlikely.
+ */
+ bus_space_write_4(clock_sc->sc_iot, clock_sc->sc_ioh,
+ TIMER_3_CLEAR, 0);
+ }
+ last = cur;
+
+ if (delta >= delay_count_per_usec)
+ {
+ usecs += delta / delay_count_per_usec;
+ delta %= delay_count_per_usec;
+ }
+ }
+}
+
+/* End of footbridge_clock.c */
diff --git a/sys/arch/arm/footbridge/footbridge_com.c b/sys/arch/arm/footbridge/footbridge_com.c
new file mode 100644
index 00000000000..7c930907148
--- /dev/null
+++ b/sys/arch/arm/footbridge/footbridge_com.c
@@ -0,0 +1,870 @@
+/* $OpenBSD: footbridge_com.c,v 1.1 2004/02/01 05:09:49 drahn Exp $ */
+/* $NetBSD: footbridge_com.c,v 1.13 2003/03/23 14:12:25 chris Exp $ */
+
+/*-
+ * Copyright (c) 1997 Mark Brinicombe
+ * Copyright (c) 1997 Causality Limited
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by Mark Brinicombe
+ * for the NetBSD Project.
+ * 4. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * COM driver, using the footbridge UART
+ */
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/ioctl.h>
+#include <sys/select.h>
+#include <sys/tty.h>
+#include <sys/proc.h>
+#include <sys/conf.h>
+#include <sys/syslog.h>
+#include <sys/device.h>
+#include <sys/malloc.h>
+#include <sys/termios.h>
+#include <machine/bus.h>
+#include <machine/intr.h>
+#include <arm/conf.h>
+#include <arm/footbridge/dc21285mem.h>
+#include <arm/footbridge/dc21285reg.h>
+#include <arm/footbridge/footbridgevar.h>
+#include <arm/footbridge/footbridge.h>
+
+#include <dev/cons.h>
+
+#include "fcom.h"
+
+extern u_int dc21285_fclk;
+
+
+#ifdef DDB
+/*
+ * Define the keycode recognised as a request to call the debugger
+ * A value of 0 disables the feature when DDB is built in
+ */
+#define DDB_KEYCODE '@'
+#ifndef DDB_KEYCODE
+#define DDB_KEYCODE 0
+#endif /* DDB_KEYCODE */
+#endif /* DDB */
+
+struct fcom_softc {
+ struct device sc_dev;
+ bus_space_tag_t sc_iot;
+ bus_space_handle_t sc_ioh;
+ void *sc_ih;
+ struct timeout sc_softintr_ch;
+ int sc_rx_irq;
+ int sc_tx_irq;
+ int sc_hwflags;
+#define HW_FLAG_CONSOLE 0x01
+ int sc_swflags;
+ int sc_l_ubrlcr;
+ int sc_m_ubrlcr;
+ int sc_h_ubrlcr;
+ char *sc_rxbuffer[2];
+ char *sc_rxbuf;
+ int sc_rxpos;
+ int sc_rxcur;
+ struct tty *sc_tty;
+};
+
+#define RX_BUFFER_SIZE 0x100
+
+static int fcom_probe __P((struct device *, void*, void *));
+static void fcom_attach __P((struct device *, struct device *, void *));
+static void fcom_softintr __P((void *));
+
+static int fcom_rxintr __P((void *));
+/*static int fcom_txintr __P((void *));*/
+
+/*struct consdev;*/
+/*void fcomcnprobe __P((struct consdev *));
+void fcomcninit __P((struct consdev *));*/
+int fcomcngetc __P((dev_t));
+void fcomcnputc __P((dev_t, int));
+void fcomcnpollc __P((dev_t, int));
+
+struct cfattach fcom_ca = {
+ sizeof (struct fcom_softc), fcom_probe, fcom_attach
+};
+
+struct cfdriver fcom_cd = {
+ NULL, "fcom", DV_DULL
+};
+#if 0
+CFATTACH_DECL(fcom, sizeof(struct fcom_softc),
+ fcom_probe, fcom_attach, NULL, NULL);
+#endif
+
+extern struct cfdriver fcom_cd;
+
+dev_type_open(fcomopen);
+dev_type_close(fcomclose);
+dev_type_read(fcomread);
+dev_type_write(fcomwrite);
+dev_type_ioctl(fcomioctl);
+dev_type_tty(fcomtty);
+dev_type_poll(fcompoll);
+
+#if 0
+const struct cdevsw fcom_cdevsw = {
+ fcomopen, fcomclose, fcomread, fcomwrite, fcomioctl,
+ nostop, fcomtty, fcompoll, nommap, ttykqfilter, D_TTY
+};
+#endif
+
+void fcominit __P((bus_space_tag_t, bus_space_handle_t, int, int));
+void fcominitcons __P((bus_space_tag_t, bus_space_handle_t));
+
+bus_space_tag_t fcomconstag;
+bus_space_handle_t fcomconsioh;
+extern int comcnmode;
+extern int comcnspeed;
+
+#define COMUNIT(x) (minor(x))
+#ifndef CONUNIT
+#define CONUNIT 0
+#endif
+
+/*
+ * The console is set up at init time, well in advance of the reset of the
+ * system and thus we have a private bus space tag for the console.
+ *
+ * The tag is provided by fcom_io.c and fcom_io_asm.S
+ */
+extern struct bus_space fcomcons_bs_tag;
+
+/*
+ * int fcom_probe(struct device *parent, struct cfdata *cf, void *aux)
+ *
+ * Make sure we are trying to attach a com device and then
+ * probe for one.
+ */
+
+static int
+fcom_probe(parent, cf, aux)
+ struct device *parent;
+ void *cf;
+ void *aux;
+{
+ union footbridge_attach_args *fba = aux;
+
+ if (strcmp(fba->fba_name, "fcom") == 0)
+ return(1);
+ return(0);
+}
+
+/*
+ * void fcom_attach(struct device *parent, struct device *self, void *aux)
+ *
+ * attach the com device
+ */
+
+static void
+fcom_attach(parent, self, aux)
+ struct device *parent, *self;
+ void *aux;
+{
+ union footbridge_attach_args *fba = aux;
+ struct fcom_softc *sc = (struct fcom_softc *)self;
+
+ /* Set up the softc */
+ sc->sc_iot = fba->fba_fca.fca_iot;
+ sc->sc_ioh = fba->fba_fca.fca_ioh;
+ timeout_set(&sc->sc_softintr_ch, fcom_softintr, sc);
+ sc->sc_rx_irq = fba->fba_fca.fca_rx_irq;
+ sc->sc_tx_irq = fba->fba_fca.fca_tx_irq;
+ sc->sc_hwflags = 0;
+ sc->sc_swflags = 0;
+
+ /* If we have a console tag then make a note of it */
+ if (fcomconstag)
+ sc->sc_hwflags |= HW_FLAG_CONSOLE;
+
+ if (sc->sc_hwflags & HW_FLAG_CONSOLE) {
+ int major;
+
+ /* locate the major number */
+ for (major = 0; major < nchrdev; ++major)
+ if (cdevsw[major].d_open == fcomopen)
+ break;
+
+ cn_tab->cn_dev = makedev(major, sc->sc_dev.dv_unit);
+ printf(": console");
+ }
+ printf("\n");
+
+ sc->sc_ih = footbridge_intr_claim(sc->sc_rx_irq, IPL_SERIAL,
+ "serial rx", fcom_rxintr, sc);
+ if (sc->sc_ih == NULL)
+ panic("%s: Cannot install rx interrupt handler",
+ sc->sc_dev.dv_xname);
+}
+
+static void fcomstart __P((struct tty *));
+static int fcomparam __P((struct tty *, struct termios *));
+
+int
+fcomopen(dev, flag, mode, p)
+ dev_t dev;
+ int flag, mode;
+ struct proc *p;
+{
+ struct fcom_softc *sc;
+ int unit = minor(dev);
+ struct tty *tp;
+
+ if (unit >= fcom_cd.cd_ndevs)
+ return ENXIO;
+ sc = fcom_cd.cd_devs[unit];
+ if (!sc)
+ return ENXIO;
+ if (!(tp = sc->sc_tty))
+ sc->sc_tty = tp = ttymalloc();
+ if (!sc->sc_rxbuffer[0]) {
+ sc->sc_rxbuffer[0] = malloc(RX_BUFFER_SIZE, M_DEVBUF, M_WAITOK);
+ sc->sc_rxbuffer[1] = malloc(RX_BUFFER_SIZE, M_DEVBUF, M_WAITOK);
+ sc->sc_rxpos = 0;
+ sc->sc_rxcur = 0;
+ sc->sc_rxbuf = sc->sc_rxbuffer[sc->sc_rxcur];
+ if (!sc->sc_rxbuf)
+ panic("%s: Cannot allocate rx buffer memory",
+ sc->sc_dev.dv_xname);
+ }
+ tp->t_oproc = fcomstart;
+ tp->t_param = fcomparam;
+ tp->t_dev = dev;
+ if (!ISSET(tp->t_state, TS_ISOPEN)) {
+ SET(tp->t_state, TS_WOPEN);
+ ttychars(tp);
+ tp->t_cflag = TTYDEF_CFLAG;
+ tp->t_iflag = TTYDEF_IFLAG;
+ tp->t_oflag = TTYDEF_OFLAG;
+ tp->t_lflag = TTYDEF_LFLAG;
+
+ /*
+ * Initialize the termios status to the defaults. Add in the
+ * sticky bits from TIOCSFLAGS.
+ */
+ tp->t_ispeed = 0;
+ if (ISSET(sc->sc_hwflags, HW_FLAG_CONSOLE))
+ tp->t_ospeed = comcnspeed;
+ else
+ tp->t_ospeed = TTYDEF_SPEED;
+
+ fcomparam(tp, &tp->t_termios);
+ ttsetwater(tp);
+ } else if (ISSET(tp->t_state, TS_XCLUDE) && p->p_ucred->cr_uid != 0)
+ return EBUSY;
+ tp->t_state |= TS_CARR_ON;
+
+ return (*linesw[tp->t_line].l_open)(dev, tp);
+}
+
+int
+fcomclose(dev, flag, mode, p)
+ dev_t dev;
+ int flag, mode;
+ struct proc *p;
+{
+ struct fcom_softc *sc = fcom_cd.cd_devs[minor(dev)];
+ struct tty *tp = sc->sc_tty;
+ /* XXX This is for cons.c. */
+ if (!ISSET(tp->t_state, TS_ISOPEN))
+ return (0);
+
+ (*linesw[tp->t_line].l_close)(tp, flag);
+ ttyclose(tp);
+#ifdef DIAGNOSTIC
+ if (sc->sc_rxbuffer[0] == NULL)
+ panic("fcomclose: rx buffers not allocated");
+#endif /* DIAGNOSTIC */
+ free(sc->sc_rxbuffer[0], M_DEVBUF);
+ free(sc->sc_rxbuffer[1], M_DEVBUF);
+ sc->sc_rxbuffer[0] = NULL;
+ sc->sc_rxbuffer[1] = NULL;
+
+ return 0;
+}
+
+int
+fcomread(dev, uio, flag)
+ dev_t dev;
+ struct uio *uio;
+ int flag;
+{
+ struct fcom_softc *sc = fcom_cd.cd_devs[minor(dev)];
+ struct tty *tp = sc->sc_tty;
+
+ return (*linesw[tp->t_line].l_read)(tp, uio, flag);
+}
+
+int
+fcomwrite(dev, uio, flag)
+ dev_t dev;
+ struct uio *uio;
+ int flag;
+{
+ struct fcom_softc *sc = fcom_cd.cd_devs[minor(dev)];
+ struct tty *tp = sc->sc_tty;
+
+ return (*linesw[tp->t_line].l_write)(tp, uio, flag);
+}
+
+#if 0
+int
+fcompoll(dev, events, p)
+ dev_t dev;
+ int events;
+ struct proc *p;
+{
+ struct fcom_softc *sc = fcom_cd.cd_devs[minor(dev)];
+ struct tty *tp = sc->sc_tty;
+
+ return (*linesw[tp->t_line].l_poll)(tp, events, p));
+}
+#endif
+
+int
+fcomioctl(dev, cmd, data, flag, p)
+ dev_t dev;
+ u_long cmd;
+ caddr_t data;
+ int flag;
+ struct proc *p;
+{
+ struct fcom_softc *sc = fcom_cd.cd_devs[minor(dev)];
+ struct tty *tp = sc->sc_tty;
+ int error;
+
+ error = (*linesw[tp->t_line].l_ioctl)(tp, cmd, data, flag, p);
+ if (error >= 0)
+ return error;
+ error = ttioctl(tp, cmd, data, flag, p);
+ if (error >= 0)
+ return error;
+
+ switch (cmd) {
+ case TIOCGFLAGS:
+ *(int *)data = sc->sc_swflags;
+ break;
+
+ case TIOCSFLAGS:
+ error = suser(p, 0);
+ if (error)
+ return (error);
+ sc->sc_swflags = *(int *)data;
+ break;
+ }
+
+ return 0;
+}
+
+struct tty *
+fcomtty(dev)
+ dev_t dev;
+{
+ struct fcom_softc *sc = fcom_cd.cd_devs[minor(dev)];
+
+ return sc->sc_tty;
+}
+
+int
+fcomstop(struct tty *tp, int flag)
+{
+ return 0;
+}
+
+static void
+fcomstart(tp)
+ struct tty *tp;
+{
+ struct clist *cl;
+ int s, len;
+ u_char buf[64];
+ int loop;
+ struct fcom_softc *sc = fcom_cd.cd_devs[minor(tp->t_dev)];
+ bus_space_tag_t iot = sc->sc_iot;
+ bus_space_handle_t ioh = sc->sc_ioh;
+ int timo;
+
+ s = spltty();
+ if (tp->t_state & (TS_TIMEOUT | TS_BUSY | TS_TTSTOP)) {
+ (void)splx(s);
+ return;
+ }
+ tp->t_state |= TS_BUSY;
+ (void)splx(s);
+
+/* s = splserial();*/
+ /* wait for any pending transmission to finish */
+ timo = 100000;
+ while ((bus_space_read_4(iot, ioh, UART_FLAGS) & UART_TX_BUSY) && --timo)
+ ;
+
+ s = splserial();
+ if (bus_space_read_4(iot, ioh, UART_FLAGS) & UART_TX_BUSY) {
+ tp->t_state |= TS_TIMEOUT;
+ timeout_add(&tp->t_rstrt_to, 1);
+ (void)splx(s);
+ return;
+ }
+
+ (void)splx(s);
+
+ cl = &tp->t_outq;
+ len = q_to_b(cl, buf, 64);
+ for (loop = 0; loop < len; ++loop) {
+/* s = splserial();*/
+
+ bus_space_write_4(iot, ioh, UART_DATA, buf[loop]);
+
+ /* wait for this transmission to complete */
+ timo = 100000;
+ while ((bus_space_read_4(iot, ioh, UART_FLAGS) & UART_TX_BUSY) && --timo)
+ ;
+/* (void)splx(s);*/
+ }
+ s = spltty();
+ tp->t_state &= ~TS_BUSY;
+ if (cl->c_cc) {
+ tp->t_state |= TS_TIMEOUT;
+ timeout_add(&tp->t_rstrt_to, 1);
+ }
+ if (cl->c_cc <= tp->t_lowat) {
+ if (tp->t_state & TS_ASLEEP) {
+ tp->t_state &= ~TS_ASLEEP;
+ wakeup(cl);
+ }
+ selwakeup(&tp->t_wsel);
+ }
+ (void)splx(s);
+}
+
+static int
+fcomparam(tp, t)
+ struct tty *tp;
+ struct termios *t;
+{
+ struct fcom_softc *sc = fcom_cd.cd_devs[minor(tp->t_dev)];
+ bus_space_tag_t iot = sc->sc_iot;
+ bus_space_handle_t ioh = sc->sc_ioh;
+ int baudrate;
+ int h_ubrlcr;
+ int m_ubrlcr;
+ int l_ubrlcr;
+ int s;
+
+ /* check requested parameters */
+ if (t->c_ospeed < 0)
+ return (EINVAL);
+ if (t->c_ispeed && t->c_ispeed != t->c_ospeed)
+ return (EINVAL);
+
+ switch (t->c_ospeed) {
+ case B1200:
+ case B2400:
+ case B4800:
+ case B9600:
+ case B19200:
+ case B38400:
+ baudrate = UART_BRD(dc21285_fclk, t->c_ospeed);
+ break;
+ default:
+ baudrate = UART_BRD(dc21285_fclk, 9600);
+ break;
+ }
+
+ l_ubrlcr = baudrate & 0xff;
+ m_ubrlcr = (baudrate >> 8) & 0xf;
+ h_ubrlcr = 0;
+
+ switch (ISSET(t->c_cflag, CSIZE)) {
+ case CS5:
+ h_ubrlcr |= UART_DATA_BITS_5;
+ break;
+ case CS6:
+ h_ubrlcr |= UART_DATA_BITS_6;
+ break;
+ case CS7:
+ h_ubrlcr |= UART_DATA_BITS_7;
+ break;
+ case CS8:
+ h_ubrlcr |= UART_DATA_BITS_8;
+ break;
+ }
+
+ if (ISSET(t->c_cflag, PARENB)) {
+ h_ubrlcr |= UART_PARITY_ENABLE;
+ if (ISSET(t->c_cflag, PARODD))
+ h_ubrlcr |= UART_ODD_PARITY;
+ else
+ h_ubrlcr |= UART_EVEN_PARITY;
+ }
+
+ if (ISSET(t->c_cflag, CSTOPB))
+ h_ubrlcr |= UART_STOP_BITS_2;
+
+ bus_space_write_4(iot, ioh, UART_L_UBRLCR, l_ubrlcr);
+ bus_space_write_4(iot, ioh, UART_M_UBRLCR, m_ubrlcr);
+ bus_space_write_4(iot, ioh, UART_H_UBRLCR, h_ubrlcr);
+
+ s = splserial();
+
+ sc->sc_l_ubrlcr = l_ubrlcr;
+ sc->sc_m_ubrlcr = m_ubrlcr;
+ sc->sc_h_ubrlcr = h_ubrlcr;
+
+ /*
+ * For the console, always force CLOCAL and !HUPCL, so that the port
+ * is always active.
+ */
+ if (ISSET(sc->sc_swflags, TIOCFLAG_SOFTCAR) ||
+ ISSET(sc->sc_hwflags, HW_FLAG_CONSOLE)) {
+ SET(t->c_cflag, CLOCAL);
+ CLR(t->c_cflag, HUPCL);
+ }
+
+ /* and copy to tty */
+ tp->t_ispeed = 0;
+ tp->t_ospeed = t->c_ospeed;
+ tp->t_cflag = t->c_cflag;
+
+ bus_space_write_4(iot, ioh, UART_L_UBRLCR, l_ubrlcr);
+ bus_space_write_4(iot, ioh, UART_M_UBRLCR, m_ubrlcr);
+ bus_space_write_4(iot, ioh, UART_H_UBRLCR, h_ubrlcr);
+
+ (void)splx(s);
+
+ return (0);
+}
+
+static int softint_scheduled = 0;
+
+static void
+fcom_softintr(arg)
+ void *arg;
+{
+ struct fcom_softc *sc = arg;
+ struct tty *tp = sc->sc_tty;
+ int s;
+ int loop;
+ int len;
+ char *ptr;
+
+ s = spltty();
+ ptr = sc->sc_rxbuf;
+ len = sc->sc_rxpos;
+ sc->sc_rxcur ^= 1;
+ sc->sc_rxbuf = sc->sc_rxbuffer[sc->sc_rxcur];
+ sc->sc_rxpos = 0;
+ (void)splx(s);
+
+ for (loop = 0; loop < len; ++loop)
+ (*linesw[tp->t_line].l_rint)(ptr[loop], tp);
+ softint_scheduled = 0;
+}
+
+#if 0
+static int
+fcom_txintr(arg)
+ void *arg;
+{
+/* struct fcom_softc *sc = arg;*/
+
+ printf("fcom_txintr()\n");
+ return(0);
+}
+#endif
+
+static int
+fcom_rxintr(arg)
+ void *arg;
+{
+ struct fcom_softc *sc = arg;
+ bus_space_tag_t iot = sc->sc_iot;
+ bus_space_handle_t ioh = sc->sc_ioh;
+ struct tty *tp = sc->sc_tty;
+ int status;
+ int byte;
+
+ do {
+ status = bus_space_read_4(iot, ioh, UART_FLAGS);
+ if ((status & UART_RX_FULL))
+ break;
+ byte = bus_space_read_4(iot, ioh, UART_DATA);
+ status = bus_space_read_4(iot, ioh, UART_RX_STAT);
+#if defined(DDB) && DDB_KEYCODE > 0
+ /*
+ * Temporary hack so that I can force the kernel into
+ * the debugger via the serial port
+ */
+ if (byte == DDB_KEYCODE) Debugger();
+#endif
+ if (tp && (tp->t_state & TS_ISOPEN))
+ if (sc->sc_rxpos < RX_BUFFER_SIZE) {
+ sc->sc_rxbuf[sc->sc_rxpos++] = byte;
+ if (!softint_scheduled) {
+ softint_scheduled = 1;
+ timeout_add(&sc->sc_softintr_ch, 1);
+ }
+ }
+ } while (1);
+ return(0);
+}
+
+#if 0
+void
+fcom_iflush(sc)
+ struct fcom_softc *sc;
+{
+ bus_space_tag_t iot = sc->sc_iot;
+ bus_space_handle_t ioh = sc->sc_ioh;
+
+ /* flush any pending I/O */
+ while (!ISSET(bus_space_read_4(iot, ioh, UART_FLAGS), UART_RX_FULL))
+ (void) bus_space_read_4(iot, ioh, UART_DATA);
+}
+#endif
+
+/*
+ * Following are all routines needed for COM to act as console
+ */
+
+#if 0
+void
+fcomcnprobe(cp)
+ struct consdev *cp;
+{
+ int major;
+
+ /* Serial console is always present so no probe */
+
+ /* locate the major number */
+ major = cdevsw_lookup_major(&fcom_cdevsw);
+
+ /* initialize required fields */
+ cp->cn_dev = makedev(major, CONUNIT);
+ cp->cn_pri = CN_REMOTE; /* Force a serial port console */
+}
+
+void
+fcomcninit(cp)
+ struct consdev *cp;
+{
+ fcomconstag = &fcomcons_bs_tag;
+
+ if (bus_space_map(fcomconstag, DC21285_ARMCSR_BASE, DC21285_ARMCSR_SIZE, 0, &fcomconsioh))
+ panic("fcomcninit: mapping failed");
+
+ fcominitcons(fcomconstag, fcomconsioh);
+}
+#endif
+
+struct consdev fcomcons = {
+ NULL, NULL, fcomcngetc, fcomcnputc, fcomcnpollc, NULL,
+ NODEV, CN_NORMAL
+};
+
+int
+fcomcnattach(iobase, rate, cflag)
+ u_int iobase;
+ int rate;
+ tcflag_t cflag;
+{
+#if 0
+ static struct consdev fcomcons = {
+ NULL, NULL, fcomcngetc, fcomcnputc, fcomcnpollc, NULL,
+ NULL, NULL, NODEV, CN_NORMAL
+ };
+#endif
+
+ fcomconstag = &fcomcons_bs_tag;
+
+ if (bus_space_map(fcomconstag, iobase, DC21285_ARMCSR_SIZE,
+ 0, &fcomconsioh))
+ panic("fcomcninit: mapping failed");
+
+ fcominit(fcomconstag, fcomconsioh, rate, cflag);
+
+ cn_tab = &fcomcons;
+
+/* comcnspeed = rate;
+ comcnmode = cflag;*/
+ return (0);
+}
+
+int
+fcomcndetach(void)
+{
+ bus_space_unmap(fcomconstag, fcomconsioh, DC21285_ARMCSR_SIZE);
+
+ cn_tab = NULL;
+ return (0);
+}
+
+/*
+ * Initialize UART to known state.
+ */
+void
+fcominit(iot, ioh, rate, mode)
+ bus_space_tag_t iot;
+ bus_space_handle_t ioh;
+ int rate;
+ int mode;
+{
+ int baudrate;
+ int h_ubrlcr;
+ int m_ubrlcr;
+ int l_ubrlcr;
+
+ switch (rate) {
+ case B1200:
+ case B2400:
+ case B4800:
+ case B9600:
+ case B19200:
+ case B38400:
+ baudrate = UART_BRD(dc21285_fclk, rate);
+ break;
+ default:
+ baudrate = UART_BRD(dc21285_fclk, 9600);
+ break;
+ }
+
+ h_ubrlcr = 0;
+ switch (mode & CSIZE) {
+ case CS5:
+ h_ubrlcr |= UART_DATA_BITS_5;
+ break;
+ case CS6:
+ h_ubrlcr |= UART_DATA_BITS_6;
+ break;
+ case CS7:
+ h_ubrlcr |= UART_DATA_BITS_7;
+ break;
+ case CS8:
+ h_ubrlcr |= UART_DATA_BITS_8;
+ break;
+ }
+
+ if (mode & PARENB)
+ h_ubrlcr |= UART_PARITY_ENABLE;
+ if (mode & PARODD)
+ h_ubrlcr |= UART_ODD_PARITY;
+ else
+ h_ubrlcr |= UART_EVEN_PARITY;
+
+ if (mode & CSTOPB)
+ h_ubrlcr |= UART_STOP_BITS_2;
+
+ m_ubrlcr = (baudrate >> 8) & 0xf;
+ l_ubrlcr = baudrate & 0xff;
+
+ bus_space_write_4(iot, ioh, UART_L_UBRLCR, l_ubrlcr);
+ bus_space_write_4(iot, ioh, UART_M_UBRLCR, m_ubrlcr);
+ bus_space_write_4(iot, ioh, UART_H_UBRLCR, h_ubrlcr);
+}
+#if 0
+/*
+ * Set UART for console use. Do normal init, then enable interrupts.
+ */
+void
+fcominitcons(iot, ioh)
+ bus_space_tag_t iot;
+ bus_space_handle_t ioh;
+{
+ int s = splserial();
+
+ fcominit(iot, ioh, comcnspeed, comcnmode);
+
+ delay(10000);
+
+ (void)splx(s);
+}
+#endif
+
+int
+fcomcngetc(dev)
+ dev_t dev;
+{
+ int s = splserial();
+ bus_space_tag_t iot = fcomconstag;
+ bus_space_handle_t ioh = fcomconsioh;
+ u_char stat, c;
+
+ while ((bus_space_read_4(iot, ioh, UART_FLAGS) & UART_RX_FULL) != 0)
+ ;
+ c = bus_space_read_4(iot, ioh, UART_DATA);
+ stat = bus_space_read_4(iot, ioh, UART_RX_STAT);
+ (void)splx(s);
+#if defined(DDB) && DDB_KEYCODE > 0
+ /*
+ * Temporary hack so that I can force the kernel into
+ * the debugger via the serial port
+ */
+ if (c == DDB_KEYCODE) Debugger();
+#endif
+
+ return (c);
+}
+
+/*
+ * Console kernel output character routine.
+ */
+void
+fcomcnputc(dev, c)
+ dev_t dev;
+ int c;
+{
+ int s = splserial();
+ bus_space_tag_t iot = fcomconstag;
+ bus_space_handle_t ioh = fcomconsioh;
+ int timo;
+
+ /* wait for any pending transmission to finish */
+ timo = 50000;
+ while ((bus_space_read_4(iot, ioh, UART_FLAGS) & UART_TX_BUSY) && --timo)
+ ;
+ bus_space_write_4(iot, ioh, UART_DATA, c);
+
+ /* wait for this transmission to complete */
+ timo = 1500000;
+ while ((bus_space_read_4(iot, ioh, UART_FLAGS) & UART_TX_BUSY) && --timo)
+ ;
+ /* Clear interrupt status here */
+ (void)splx(s);
+}
+
+void
+fcomcnpollc(dev, on)
+ dev_t dev;
+ int on;
+{
+}
diff --git a/sys/arch/arm/footbridge/footbridge_com_io.c b/sys/arch/arm/footbridge/footbridge_com_io.c
new file mode 100644
index 00000000000..32f60020210
--- /dev/null
+++ b/sys/arch/arm/footbridge/footbridge_com_io.c
@@ -0,0 +1,211 @@
+/* $OpenBSD: footbridge_com_io.c,v 1.1 2004/02/01 05:09:49 drahn Exp $ */
+/* $NetBSD: footbridge_com_io.c,v 1.4 2002/09/27 15:35:44 provos Exp $ */
+
+/*
+ * Copyright (c) 1997 Mark Brinicombe.
+ * Copyright (c) 1997 Causality Limited.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by Mark Brinicombe
+ * for the NetBSD Project.
+ * 4. The name of the company nor the name of the author may be used to
+ * endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+/*
+ * This file provides the bus space tag for the footbridge serial console
+ */
+
+/*
+ * bus_space I/O functions for mainbus
+ */
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <machine/bus.h>
+
+/* Proto types for all the bus_space structure functions */
+
+bs_protos(fcomcons);
+bs_protos(generic);
+bs_protos(bs_notimpl);
+
+/* Declare the fcomcons bus space tag */
+
+struct bus_space fcomcons_bs_tag = {
+ /* cookie */
+ NULL,
+
+ /* mapping/unmapping */
+ fcomcons_bs_map,
+ fcomcons_bs_unmap,
+ fcomcons_bs_subregion,
+
+ /* allocation/deallocation */
+ fcomcons_bs_alloc,
+ fcomcons_bs_free,
+
+ /* get kernel virtual address */
+ 0, /* never used */
+
+ /* Mmap bus space for user */
+ bs_notimpl_bs_mmap,
+
+ /* barrier */
+ fcomcons_bs_barrier,
+
+ /* read (single) */
+ bs_notimpl_bs_r_1,
+ bs_notimpl_bs_r_2,
+ generic_bs_r_4,
+ bs_notimpl_bs_r_8,
+
+ /* read multiple */
+ bs_notimpl_bs_rm_1,
+ bs_notimpl_bs_rm_2,
+ bs_notimpl_bs_rm_4,
+ bs_notimpl_bs_rm_8,
+
+ /* read region */
+ bs_notimpl_bs_rr_1,
+ bs_notimpl_bs_rr_2,
+ bs_notimpl_bs_rr_4,
+ bs_notimpl_bs_rr_8,
+
+ /* write (single) */
+ bs_notimpl_bs_w_1,
+ bs_notimpl_bs_w_2,
+ generic_bs_w_4,
+ bs_notimpl_bs_w_8,
+
+ /* write multiple */
+ bs_notimpl_bs_wm_1,
+ bs_notimpl_bs_wm_2,
+ bs_notimpl_bs_wm_4,
+ bs_notimpl_bs_wm_8,
+
+ /* write region */
+ bs_notimpl_bs_wr_1,
+ bs_notimpl_bs_wr_2,
+ bs_notimpl_bs_wr_4,
+ bs_notimpl_bs_wr_8,
+
+ bs_notimpl_bs_sm_1,
+ bs_notimpl_bs_sm_2,
+ bs_notimpl_bs_sm_4,
+ bs_notimpl_bs_sm_8,
+
+ /* set region */
+ bs_notimpl_bs_sr_1,
+ bs_notimpl_bs_sr_2,
+ bs_notimpl_bs_sr_4,
+ bs_notimpl_bs_sr_8,
+
+ /* copy */
+ bs_notimpl_bs_c_1,
+ bs_notimpl_bs_c_2,
+ bs_notimpl_bs_c_4,
+ bs_notimpl_bs_c_8,
+};
+
+/* bus space functions */
+
+int
+fcomcons_bs_map(t, bpa, size, cacheable, bshp)
+ void *t;
+ bus_addr_t bpa;
+ bus_size_t size;
+ int cacheable;
+ bus_space_handle_t *bshp;
+{
+ /*
+ * Temporary implementation as all I/O is already mapped etc.
+ *
+ * Eventually this function will do the mapping check for multiple maps
+ */
+ *bshp = bpa;
+ return(0);
+ }
+
+int
+fcomcons_bs_alloc(t, rstart, rend, size, alignment, boundary, cacheable,
+ bpap, bshp)
+ void *t;
+ bus_addr_t rstart, rend;
+ bus_size_t size, alignment, boundary;
+ int cacheable;
+ bus_addr_t *bpap;
+ bus_space_handle_t *bshp;
+{
+ panic("fcomcons_alloc(): Help!");
+}
+
+
+void
+fcomcons_bs_unmap(t, bsh, size)
+ void *t;
+ bus_space_handle_t bsh;
+ bus_size_t size;
+{
+ /*
+ * Temporary implementation
+ */
+}
+
+void
+fcomcons_bs_free(t, bsh, size)
+ void *t;
+ bus_space_handle_t bsh;
+ bus_size_t size;
+{
+
+ panic("fcomcons_free(): Help!");
+ /* fcomcons_unmap() does all that we need to do. */
+/* fcomcons_unmap(t, bsh, size);*/
+}
+
+int
+fcomcons_bs_subregion(t, bsh, offset, size, nbshp)
+ void *t;
+ bus_space_handle_t bsh;
+ bus_size_t offset, size;
+ bus_space_handle_t *nbshp;
+{
+
+ *nbshp = bsh + offset;
+ return (0);
+}
+
+void
+fcomcons_bs_barrier(t, bsh, offset, len, flags)
+ void *t;
+ bus_space_handle_t bsh;
+ bus_size_t offset, len;
+ int flags;
+{
+}
+
+/* End of footbridge_com_io.c */
diff --git a/sys/arch/arm/footbridge/footbridge_intr.h b/sys/arch/arm/footbridge/footbridge_intr.h
new file mode 100644
index 00000000000..2a9888e7e1b
--- /dev/null
+++ b/sys/arch/arm/footbridge/footbridge_intr.h
@@ -0,0 +1,215 @@
+/* $OpenBSD: footbridge_intr.h,v 1.1 2004/02/01 05:09:49 drahn Exp $ */
+/* $NetBSD: footbridge_intr.h,v 1.4 2003/01/03 00:56:00 thorpej Exp $ */
+
+/*
+ * Copyright (c) 2001, 2002 Wasabi Systems, Inc.
+ * All rights reserved.
+ *
+ * Written by Jason R. Thorpe for Wasabi Systems, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed for the NetBSD Project by
+ * Wasabi Systems, Inc.
+ * 4. The name of Wasabi Systems, Inc. may not be used to endorse
+ * or promote products derived from this software without specific prior
+ * written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _FOOTBRIDGE_INTR_H_
+#define _FOOTBRIDGE_INTR_H_
+
+#include <arm/armreg.h>
+
+/* Define the various Interrupt Priority Levels */
+
+/* Hardware Interrupt Priority Levels are not mutually exclusive. */
+
+#define IPL_NONE 0 /* nothing */
+#define IPL_SOFT 1 /* generic soft interrupts */
+#define IPL_SOFTCLOCK 2 /* clock software interrupts */
+#define IPL_SOFTNET 3 /* network software interrupts */
+#define IPL_BIO 4 /* block I/O */
+#define IPL_NET 5 /* network */
+#define IPL_SOFTSERIAL 6 /* serial software interrupts */
+#define IPL_TTY 7 /* terminal */
+#define IPL_VM 8 /* memory allocation */
+#define IPL_AUDIO 9 /* audio */
+#define IPL_CLOCK 10 /* clock */
+#define IPL_STATCLOCK 11 /* statclock */
+#define IPL_HIGH 12 /* everything */
+#define IPL_SERIAL 13 /* serial */
+
+#define NIPL 14
+
+#define IST_UNUSABLE -1 /* interrupt cannot be used */
+#define IST_NONE 0 /* none (dummy) */
+#define IST_PULSE 1 /* pulsed */
+#define IST_EDGE 2 /* edge-triggered */
+#define IST_LEVEL 3 /* level-triggered */
+
+#define __NEWINTR /* enables new hooks in cpu_fork()/cpu_switch() */
+
+#define ARM_IRQ_HANDLER _C_LABEL(footbridge_intr_dispatch)
+
+#ifndef _LOCORE
+#include <arm/cpufunc.h>
+
+#include <arm/footbridge/dc21285mem.h>
+#include <arm/footbridge/dc21285reg.h>
+
+#define INT_SWMASK \
+ ((1U << IRQ_SOFTINT) | (1U << IRQ_RESERVED0) | \
+ (1U << IRQ_RESERVED1) | (1U << IRQ_RESERVED2))
+#define ICU_INT_HWMASK (0xffffffff & ~(INT_SWMASK | (1U << IRQ_RESERVED3)))
+
+/* only call this with interrupts off */
+static __inline void __attribute__((__unused__))
+ footbridge_set_intrmask(void)
+{
+ extern __volatile uint32_t intr_enabled;
+ /* fetch once so we write the same number to both registers */
+ uint32_t tmp = intr_enabled & ICU_INT_HWMASK;
+
+ ((__volatile uint32_t*)(DC21285_ARMCSR_VBASE))[IRQ_ENABLE_SET>>2] = tmp;
+ ((__volatile uint32_t*)(DC21285_ARMCSR_VBASE))[IRQ_ENABLE_CLEAR>>2] = ~tmp;
+}
+
+static __inline void __attribute__((__unused__))
+footbridge_splx(int newspl)
+{
+ extern __volatile uint32_t intr_enabled;
+ extern __volatile int current_spl_level;
+ extern __volatile int footbridge_ipending;
+ extern void footbridge_do_pending(void);
+ int oldirqstate, hwpend;
+
+ current_spl_level = newspl;
+
+ hwpend = (footbridge_ipending & ICU_INT_HWMASK) & ~newspl;
+ if (hwpend != 0) {
+ oldirqstate = disable_interrupts(I32_bit);
+ intr_enabled |= hwpend;
+ footbridge_set_intrmask();
+ restore_interrupts(oldirqstate);
+ }
+
+ if ((footbridge_ipending & INT_SWMASK) & ~newspl)
+ footbridge_do_pending();
+}
+
+static __inline int __attribute__((__unused__))
+footbridge_splraise(int ipl)
+{
+ extern __volatile int current_spl_level;
+ extern int footbridge_imask[];
+ int old;
+
+ old = current_spl_level;
+ current_spl_level |= footbridge_imask[ipl];
+
+ return (old);
+}
+
+static __inline int __attribute__((__unused__))
+footbridge_spllower(int ipl)
+{
+ extern __volatile int current_spl_level;
+ extern int footbridge_imask[];
+ int old = current_spl_level;
+
+ footbridge_splx(footbridge_imask[ipl]);
+ return(old);
+}
+
+/* should only be defined in footbridge_intr.c */
+#if !defined(ARM_SPL_NOINLINE)
+
+#define splx(newspl) footbridge_splx(newspl)
+#define _spllower(ipl) footbridge_spllower(ipl)
+#define _splraise(ipl) footbridge_splraise(ipl)
+void _setsoftintr(int);
+
+#else
+
+int _splraise(int);
+int _spllower(int);
+void splx(int);
+void _setsoftintr(int);
+
+#endif /* ! ARM_SPL_NOINLINE */
+
+#include <sys/device.h>
+#include <sys/queue.h>
+#include <machine/irqhandler.h>
+
+#define splsoft() _splraise(IPL_SOFT)
+#define splsoftclock() _splraise(IPL_SOFTCLOCK)
+#define splsoftnet() _splraise(IPL_SOFTNET)
+#define splbio() _splraise(IPL_BIO)
+#define splnet() _splraise(IPL_NET)
+#define splsoftserial() _splraise(IPL_SOFTSERIAL)
+#define spltty() _splraise(IPL_TTY)
+#define spllpt() spltty()
+#define splvm() _splraise(IPL_VM)
+#define splimp() _splraise(IPL_VM)
+#define splaudio() _splraise(IPL_AUDIO)
+#define splclock() _splraise(IPL_CLOCK)
+#define splstatclock() _splraise(IPL_STATCLOCK)
+#define splhigh() _splraise(IPL_HIGH)
+#define splserial() _splraise(IPL_SERIAL)
+
+#define spl0() (void)_spllower(IPL_NONE)
+#define spllowersoftclock() (void)_spllower(IPL_SOFTCLOCK)
+
+#define splsched() splhigh()
+#define spllock() splhigh()
+
+/* Use generic software interrupt support. */
+#include <arm/softintr.h>
+
+/* footbridge has 32 interrupt lines */
+#define NIRQ 32
+
+struct intrhand {
+ TAILQ_ENTRY(intrhand) ih_list; /* link on intrq list */
+ int (*ih_func)(void *); /* handler */
+ void *ih_arg; /* arg for handler */
+ int ih_ipl; /* IPL_* */
+ int ih_irq; /* IRQ number */
+};
+
+#define IRQNAMESIZE sizeof("footbridge irq 31")
+
+struct intrq {
+ TAILQ_HEAD(, intrhand) iq_list; /* handler list */
+ struct evcnt iq_ev; /* event counter */
+ int iq_mask; /* IRQs to mask while handling */
+ int iq_levels; /* IPL_*'s this IRQ has */
+ int iq_ist; /* share type */
+ char iq_name[IRQNAMESIZE]; /* interrupt name */
+};
+
+#endif /* _LOCORE */
+
+#endif /* _FOOTBRIDGE_INTR_H */
diff --git a/sys/arch/arm/footbridge/footbridge_io.c b/sys/arch/arm/footbridge/footbridge_io.c
new file mode 100644
index 00000000000..5af52cb7591
--- /dev/null
+++ b/sys/arch/arm/footbridge/footbridge_io.c
@@ -0,0 +1,321 @@
+/* $OpenBSD: footbridge_io.c,v 1.1 2004/02/01 05:09:49 drahn Exp $ */
+/* $NetBSD: footbridge_io.c,v 1.6 2002/04/12 19:12:31 thorpej Exp $ */
+
+/*
+ * Copyright (c) 1997 Causality Limited
+ * Copyright (c) 1997 Mark Brinicombe.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by Mark Brinicombe
+ * for the NetBSD Project.
+ * 4. The name of the company nor the name of the author may be used to
+ * endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+/*
+ * bus_space I/O functions for footbridge
+ */
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <machine/bus.h>
+#include <arm/footbridge/footbridge.h>
+#include <arm/footbridge/dc21285mem.h>
+#include <uvm/uvm_extern.h>
+
+/* Proto types for all the bus_space structure functions */
+
+bs_protos(footbridge);
+bs_protos(generic);
+bs_protos(generic_armv4);
+bs_protos(bs_notimpl);
+bs_map_proto(footbridge_mem);
+bs_unmap_proto(footbridge_mem);
+
+/* Declare the footbridge bus space tag */
+
+struct bus_space footbridge_bs_tag = {
+ /* cookie */
+ (void *) 0, /* Base address */
+
+ /* mapping/unmapping */
+ footbridge_bs_map,
+ footbridge_bs_unmap,
+ footbridge_bs_subregion,
+
+ /* allocation/deallocation */
+ footbridge_bs_alloc,
+ footbridge_bs_free,
+
+ /* get kernel virtual address */
+ footbridge_bs_vaddr,
+
+ /* Mmap bus space for user */
+ bs_notimpl_bs_mmap,
+
+ /* barrier */
+ footbridge_bs_barrier,
+
+ /* read (single) */
+ generic_bs_r_1,
+ generic_armv4_bs_r_2,
+ generic_bs_r_4,
+ bs_notimpl_bs_r_8,
+
+ /* read multiple */
+ generic_bs_rm_1,
+ generic_armv4_bs_rm_2,
+ generic_bs_rm_4,
+ bs_notimpl_bs_rm_8,
+
+ /* read region */
+ bs_notimpl_bs_rr_1,
+ generic_armv4_bs_rr_2,
+ generic_bs_rr_4,
+ bs_notimpl_bs_rr_8,
+
+ /* write (single) */
+ generic_bs_w_1,
+ generic_armv4_bs_w_2,
+ generic_bs_w_4,
+ bs_notimpl_bs_w_8,
+
+ /* write multiple */
+ generic_bs_wm_1,
+ generic_armv4_bs_wm_2,
+ generic_bs_wm_4,
+ bs_notimpl_bs_wm_8,
+
+ /* write region */
+ bs_notimpl_bs_wr_1,
+ generic_armv4_bs_wr_2,
+ generic_bs_wr_4,
+ bs_notimpl_bs_wr_8,
+
+ /* set multiple */
+ bs_notimpl_bs_sm_1,
+ bs_notimpl_bs_sm_2,
+ bs_notimpl_bs_sm_4,
+ bs_notimpl_bs_sm_8,
+
+ /* set region */
+ bs_notimpl_bs_sr_1,
+ generic_armv4_bs_sr_2,
+ bs_notimpl_bs_sr_4,
+ bs_notimpl_bs_sr_8,
+
+ /* copy */
+ bs_notimpl_bs_c_1,
+ generic_armv4_bs_c_2,
+ bs_notimpl_bs_c_4,
+ bs_notimpl_bs_c_8,
+};
+
+void footbridge_create_io_bs_tag(t, cookie)
+ struct bus_space *t;
+ void *cookie;
+{
+ *t = footbridge_bs_tag;
+ t->bs_cookie = cookie;
+}
+
+void footbridge_create_mem_bs_tag(t, cookie)
+ struct bus_space *t;
+ void *cookie;
+{
+ *t = footbridge_bs_tag;
+ t->bs_map = footbridge_mem_bs_map;
+ t->bs_unmap = footbridge_mem_bs_unmap;
+ t->bs_cookie = cookie;
+}
+
+/* bus space functions */
+
+int
+footbridge_bs_map(t, bpa, size, cacheable, bshp)
+ void *t;
+ bus_addr_t bpa;
+ bus_size_t size;
+ int cacheable;
+ bus_space_handle_t *bshp;
+{
+ /*
+ * The whole 64K of PCI space is always completely mapped during
+ * boot.
+ *
+ * Eventually this function will do the mapping check overlapping /
+ * multiple mappings.
+ */
+
+ /* The cookie is the base address for the I/O area */
+ *bshp = bpa + (bus_addr_t)t;
+ return(0);
+}
+
+int
+footbridge_mem_bs_map(t, bpa, size, cacheable, bshp)
+ void *t;
+ bus_addr_t bpa;
+ bus_size_t size;
+ int cacheable;
+ bus_space_handle_t *bshp;
+{
+ bus_addr_t startpa, endpa;
+ vaddr_t va;
+
+ /* Round the allocation to page boundries */
+ startpa = trunc_page(bpa);
+ endpa = round_page(bpa + size);
+
+ /*
+ * Check for mappings below 1MB as we have this space already
+ * mapped. In practice it is only the VGA hole that takes
+ * advantage of this.
+ */
+ if (endpa < DC21285_PCI_ISA_MEM_VSIZE) {
+ /* Store the bus space handle */
+ *bshp = DC21285_PCI_ISA_MEM_VBASE + bpa;
+ return 0;
+ }
+
+ /*
+ * Eventually this function will do the mapping check for overlapping /
+ * multiple mappings
+ */
+
+ va = uvm_km_valloc(kernel_map, endpa - startpa);
+ if (va == 0)
+ return ENOMEM;
+
+ /* Store the bus space handle */
+ *bshp = va + (bpa & PGOFSET);
+
+ /* Now map the pages */
+ /* The cookie is the physical base address for the I/O area */
+ while (startpa < endpa) {
+ pmap_enter(pmap_kernel(), va, (bus_addr_t)t + startpa,
+ VM_PROT_READ | VM_PROT_WRITE, 0);
+ va += PAGE_SIZE;
+ startpa += PAGE_SIZE;
+ }
+ pmap_update(pmap_kernel());
+
+/* if (bpa >= DC21285_PCI_MEM_VSIZE && bpa != DC21285_ARMCSR_VBASE)
+ panic("footbridge_bs_map: Address out of range (%08lx)", bpa);
+*/
+ return(0);
+}
+
+int
+footbridge_bs_alloc(t, rstart, rend, size, alignment, boundary, cacheable,
+ bpap, bshp)
+ void *t;
+ bus_addr_t rstart, rend;
+ bus_size_t size, alignment, boundary;
+ int cacheable;
+ bus_addr_t *bpap;
+ bus_space_handle_t *bshp;
+{
+ panic("footbridge_alloc(): Help!");
+}
+
+
+void
+footbridge_bs_unmap(t, bsh, size)
+ void *t;
+ bus_space_handle_t bsh;
+ bus_size_t size;
+{
+ /*
+ * Temporary implementation
+ */
+}
+
+void
+footbridge_mem_bs_unmap(t, bsh, size)
+ void *t;
+ bus_space_handle_t bsh;
+ bus_size_t size;
+{
+ vaddr_t startva, endva;
+
+ /*
+ * Check for mappings below 1MB as we have this space permenantly
+ * mapped. In practice it is only the VGA hole that takes
+ * advantage of this.
+ */
+ if (bsh >= DC21285_PCI_ISA_MEM_VBASE
+ && bsh < (DC21285_PCI_ISA_MEM_VBASE + DC21285_PCI_ISA_MEM_VSIZE)) {
+ return;
+ }
+
+ startva = trunc_page(bsh);
+ endva = round_page(bsh + size);
+
+ uvm_km_free(kernel_map, startva, endva - startva);
+}
+
+void
+footbridge_bs_free(t, bsh, size)
+ void *t;
+ bus_space_handle_t bsh;
+ bus_size_t size;
+{
+
+ panic("footbridge_free(): Help!");
+ /* footbridge_bs_unmap() does all that we need to do. */
+/* footbridge_bs_unmap(t, bsh, size);*/
+}
+
+int
+footbridge_bs_subregion(t, bsh, offset, size, nbshp)
+ void *t;
+ bus_space_handle_t bsh;
+ bus_size_t offset, size;
+ bus_space_handle_t *nbshp;
+{
+
+ *nbshp = bsh + (offset << ((int)t));
+ return (0);
+}
+
+void *
+footbridge_bs_vaddr(t, bsh)
+ void *t;
+ bus_space_handle_t bsh;
+{
+
+ return ((void *)bsh);
+}
+
+void
+footbridge_bs_barrier(t, bsh, offset, len, flags)
+ void *t;
+ bus_space_handle_t bsh;
+ bus_size_t offset, len;
+ int flags;
+{
+}
diff --git a/sys/arch/arm/footbridge/footbridge_irqhandler.c b/sys/arch/arm/footbridge/footbridge_irqhandler.c
new file mode 100644
index 00000000000..b3c1cf57341
--- /dev/null
+++ b/sys/arch/arm/footbridge/footbridge_irqhandler.c
@@ -0,0 +1,482 @@
+/* $OpenBSD: footbridge_irqhandler.c,v 1.1 2004/02/01 05:09:49 drahn Exp $ */
+/* $NetBSD: footbridge_irqhandler.c,v 1.9 2003/06/16 20:00:57 thorpej Exp $ */
+
+/*
+ * Copyright (c) 2001, 2002 Wasabi Systems, Inc.
+ * All rights reserved.
+ *
+ * Written by Jason R. Thorpe for Wasabi Systems, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed for the NetBSD Project by
+ * Wasabi Systems, Inc.
+ * 4. The name of Wasabi Systems, Inc. may not be used to endorse
+ * or promote products derived from this software without specific prior
+ * written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef ARM_SPL_NOINLINE
+#define ARM_SPL_NOINLINE
+#endif
+
+#include <sys/cdefs.h>
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/malloc.h>
+#include <uvm/uvm_extern.h>
+
+#include <machine/intr.h>
+#include <machine/cpu.h>
+#include <arm/footbridge/dc21285mem.h>
+#include <arm/footbridge/dc21285reg.h>
+
+#include <dev/pci/pcivar.h>
+
+#include "isa.h"
+#if NISA > 0
+#include <dev/isa/isavar.h>
+#endif
+
+/* Interrupt handler queues. */
+static struct intrq footbridge_intrq[NIRQ];
+
+/* Interrupts to mask at each level. */
+int footbridge_imask[NIPL];
+
+/* Software copy of the IRQs we have enabled. */
+__volatile uint32_t intr_enabled;
+
+/* Current interrupt priority level */
+__volatile int current_spl_level;
+
+/* Interrupts pending */
+__volatile int footbridge_ipending;
+
+void footbridge_intr_dispatch(struct clockframe *frame);
+
+const struct evcnt *footbridge_pci_intr_evcnt __P((void *, pci_intr_handle_t));
+
+void footbridge_do_pending(void);
+
+static const uint32_t si_to_irqbit[SI_NQUEUES] =
+ { IRQ_SOFTINT,
+ IRQ_RESERVED0,
+ IRQ_RESERVED1,
+ IRQ_RESERVED2 };
+
+#define SI_TO_IRQBIT(si) (1U << si_to_irqbit[(si)])
+
+/*
+ * Map a software interrupt queue to an interrupt priority level.
+ */
+static const int si_to_ipl[SI_NQUEUES] = {
+ IPL_SOFT, /* SI_SOFT */
+ IPL_SOFTCLOCK, /* SI_SOFTCLOCK */
+ IPL_SOFTNET, /* SI_SOFTNET */
+ IPL_SOFTSERIAL, /* SI_SOFTSERIAL */
+};
+
+const struct evcnt *
+footbridge_pci_intr_evcnt(pcv, ih)
+ void *pcv;
+ pci_intr_handle_t ih;
+{
+ /* XXX check range is valid */
+#if NISA > 0
+ if (ih >= 0x80 && ih <= 0x8f) {
+ return isa_intr_evcnt(NULL, (ih & 0x0f));
+ }
+#endif
+ return &footbridge_intrq[ih].iq_ev;
+}
+
+static __inline void
+footbridge_enable_irq(int irq)
+{
+ intr_enabled |= (1U << irq);
+
+ footbridge_set_intrmask();
+}
+
+static __inline void
+footbridge_disable_irq(int irq)
+{
+ intr_enabled &= ~(1U << irq);
+ footbridge_set_intrmask();
+}
+
+/*
+ * NOTE: This routine must be called with interrupts disabled in the CPSR.
+ */
+static void
+footbridge_intr_calculate_masks(void)
+{
+ struct intrq *iq;
+ struct intrhand *ih;
+ int irq, ipl;
+
+ /* First, figure out which IPLs each IRQ has. */
+ for (irq = 0; irq < NIRQ; irq++) {
+ int levels = 0;
+ iq = &footbridge_intrq[irq];
+ footbridge_disable_irq(irq);
+ for (ih = TAILQ_FIRST(&iq->iq_list); ih != NULL;
+ ih = TAILQ_NEXT(ih, ih_list))
+ levels |= (1U << ih->ih_ipl);
+ iq->iq_levels = levels;
+ }
+
+ /* Next, figure out which IRQs are used by each IPL. */
+ for (ipl = 0; ipl < NIPL; ipl++) {
+ int irqs = 0;
+ for (irq = 0; irq < NIRQ; irq++) {
+ if (footbridge_intrq[irq].iq_levels & (1U << ipl))
+ irqs |= (1U << irq);
+ }
+ footbridge_imask[ipl] = irqs;
+ }
+
+ /* IPL_NONE must open up all interrupts */
+ footbridge_imask[IPL_NONE] = 0;
+
+ /*
+ * Initialize the soft interrupt masks to block themselves.
+ */
+ footbridge_imask[IPL_SOFT] = SI_TO_IRQBIT(SI_SOFT);
+ footbridge_imask[IPL_SOFTCLOCK] = SI_TO_IRQBIT(SI_SOFTCLOCK);
+ footbridge_imask[IPL_SOFTNET] = SI_TO_IRQBIT(SI_SOFTNET);
+ footbridge_imask[IPL_SOFTSERIAL] = SI_TO_IRQBIT(SI_SOFTSERIAL);
+
+ footbridge_imask[IPL_SOFTCLOCK] |= footbridge_imask[IPL_SOFT];
+ footbridge_imask[IPL_SOFTNET] |= footbridge_imask[IPL_SOFTCLOCK];
+
+ /*
+ * Enforce a heirarchy that gives "slow" device (or devices with
+ * limited input buffer space/"real-time" requirements) a better
+ * chance at not dropping data.
+ */
+ footbridge_imask[IPL_BIO] |= footbridge_imask[IPL_SOFTNET];
+ footbridge_imask[IPL_NET] |= footbridge_imask[IPL_BIO];
+ footbridge_imask[IPL_SOFTSERIAL] |= footbridge_imask[IPL_NET];
+
+ footbridge_imask[IPL_TTY] |= footbridge_imask[IPL_SOFTSERIAL];
+
+ /*
+ * splvm() blocks all interrupts that use the kernel memory
+ * allocation facilities.
+ */
+ footbridge_imask[IPL_VM] |= footbridge_imask[IPL_TTY];
+
+ /*
+ * Audio devices are not allowed to perform memory allocation
+ * in their interrupt routines, and they have fairly "real-time"
+ * requirements, so give them a high interrupt priority.
+ */
+ footbridge_imask[IPL_AUDIO] |= footbridge_imask[IPL_VM];
+
+ /*
+ * splclock() must block anything that uses the scheduler.
+ */
+ footbridge_imask[IPL_CLOCK] |= footbridge_imask[IPL_AUDIO];
+
+ /*
+ * footbridge has seperate statclock.
+ */
+ footbridge_imask[IPL_STATCLOCK] |= footbridge_imask[IPL_CLOCK];
+
+ /*
+ * splhigh() must block "everything".
+ */
+ footbridge_imask[IPL_HIGH] |= footbridge_imask[IPL_STATCLOCK];
+
+ /*
+ * XXX We need serial drivers to run at the absolute highest priority
+ * in order to avoid overruns, so serial > high.
+ */
+ footbridge_imask[IPL_SERIAL] |= footbridge_imask[IPL_HIGH];
+
+ /*
+ * Calculate the ipl level to go to when handling this interrupt
+ */
+ for (irq = 0; irq < NIRQ; irq++) {
+ int irqs = (1U << irq);
+ iq = &footbridge_intrq[irq];
+ if (TAILQ_FIRST(&iq->iq_list) != NULL)
+ footbridge_enable_irq(irq);
+ for (ih = TAILQ_FIRST(&iq->iq_list); ih != NULL;
+ ih = TAILQ_NEXT(ih, ih_list))
+ irqs |= footbridge_imask[ih->ih_ipl];
+ iq->iq_mask = irqs;
+ }
+}
+
+int
+_splraise(int ipl)
+{
+ return (footbridge_splraise(ipl));
+}
+
+/* this will always take us to the ipl passed in */
+void
+splx(int new)
+{
+ footbridge_splx(new);
+}
+
+int
+_spllower(int ipl)
+{
+ return (footbridge_spllower(ipl));
+}
+
+__inline void
+footbridge_do_pending(void)
+{
+#if 0
+ static __cpu_simple_lock_t processing = __SIMPLELOCK_UNLOCKED;
+#else
+ static int processing;
+#endif
+ uint32_t new, oldirqstate;
+
+#if 0
+ if (__cpu_simple_lock_try(&processing) == 0)
+ return;
+#else
+ if (processing)
+ return;
+ processing = 1;
+#endif
+
+ new = current_spl_level;
+
+ oldirqstate = disable_interrupts(I32_bit);
+
+#define DO_SOFTINT(si) \
+ if ((footbridge_ipending & ~new) & SI_TO_IRQBIT(si)) { \
+ footbridge_ipending &= ~SI_TO_IRQBIT(si); \
+ current_spl_level |= footbridge_imask[si_to_ipl[(si)]]; \
+ restore_interrupts(oldirqstate); \
+ softintr_dispatch(si); \
+ oldirqstate = disable_interrupts(I32_bit); \
+ current_spl_level = new; \
+ }
+ DO_SOFTINT(SI_SOFTSERIAL);
+ DO_SOFTINT(SI_SOFTNET);
+ DO_SOFTINT(SI_SOFTCLOCK);
+ DO_SOFTINT(SI_SOFT);
+
+#if 0
+ __cpu_simple_unlock(&processing);
+#else
+ processing = 0;
+#endif
+
+ restore_interrupts(oldirqstate);
+}
+
+
+/* called from splhigh, so the matching splx will set the interrupt up.*/
+void
+_setsoftintr(int si)
+{
+ int oldirqstate;
+
+ oldirqstate = disable_interrupts(I32_bit);
+ footbridge_ipending |= SI_TO_IRQBIT(si);
+ restore_interrupts(oldirqstate);
+
+ /* Process unmasked pending soft interrupts. */
+ if ((footbridge_ipending & INT_SWMASK) & ~current_spl_level)
+ footbridge_do_pending();
+}
+
+void
+footbridge_intr_init(void)
+{
+ struct intrq *iq;
+ int i;
+
+ intr_enabled = 0;
+ current_spl_level = 0xffffffff;
+ footbridge_ipending = 0;
+ footbridge_set_intrmask();
+
+ for (i = 0; i < NIRQ; i++) {
+ iq = &footbridge_intrq[i];
+ TAILQ_INIT(&iq->iq_list);
+
+ snprintf(iq->iq_name, sizeof(iq->iq_name), "irq %d", i);
+#if 0
+ evcnt_attach_dynamic(&iq->iq_ev, EVCNT_TYPE_INTR,
+ NULL, "footbridge", iq->iq_name);
+#endif
+ }
+
+ footbridge_intr_calculate_masks();
+
+ /* Enable IRQ's, we don't have any FIQ's*/
+ enable_interrupts(I32_bit);
+}
+
+void *
+footbridge_intr_claim(int irq, int ipl, char *name, int (*func)(void *), void *arg)
+{
+ struct intrq *iq;
+ struct intrhand *ih;
+ u_int oldirqstate;
+
+ if (irq < 0 || irq > NIRQ)
+ panic("footbridge_intr_establish: IRQ %d out of range", irq);
+
+ ih = malloc(sizeof(*ih), M_DEVBUF, M_NOWAIT);
+ if (ih == NULL)
+ {
+ printf("No memory");
+ return (NULL);
+ }
+
+ ih->ih_func = func;
+ ih->ih_arg = arg;
+ ih->ih_ipl = ipl;
+ ih->ih_irq = irq;
+
+ iq = &footbridge_intrq[irq];
+
+ iq->iq_ist = IST_LEVEL;
+
+ oldirqstate = disable_interrupts(I32_bit);
+
+ TAILQ_INSERT_TAIL(&iq->iq_list, ih, ih_list);
+
+ footbridge_intr_calculate_masks();
+
+ /* detach the existing event counter and add the new name */
+#if 0
+ evcnt_detach(&iq->iq_ev);
+ evcnt_attach_dynamic(&iq->iq_ev, EVCNT_TYPE_INTR,
+ NULL, "footbridge", name);
+#endif
+
+ restore_interrupts(oldirqstate);
+
+ return(ih);
+}
+
+void
+footbridge_intr_disestablish(void *cookie)
+{
+ struct intrhand *ih = cookie;
+ struct intrq *iq = &footbridge_intrq[ih->ih_irq];
+ int oldirqstate;
+
+ /* XXX need to free ih ? */
+ oldirqstate = disable_interrupts(I32_bit);
+
+ TAILQ_REMOVE(&iq->iq_list, ih, ih_list);
+
+ footbridge_intr_calculate_masks();
+
+ restore_interrupts(oldirqstate);
+}
+
+static uint32_t footbridge_intstatus(void);
+
+static inline uint32_t footbridge_intstatus()
+{
+ return ((__volatile uint32_t*)(DC21285_ARMCSR_VBASE))[IRQ_STATUS>>2];
+}
+
+/* called with external interrupts disabled */
+void
+footbridge_intr_dispatch(struct clockframe *frame)
+{
+ struct intrq *iq;
+ struct intrhand *ih;
+ int oldirqstate, pcpl, irq, ibit, hwpend;
+
+ pcpl = current_spl_level;
+
+ hwpend = footbridge_intstatus();
+
+ /*
+ * Disable all the interrupts that are pending. We will
+ * reenable them once they are processed and not masked.
+ */
+ intr_enabled &= ~hwpend;
+ footbridge_set_intrmask();
+
+ while (hwpend != 0) {
+ int intr_rc = 0;
+ irq = ffs(hwpend) - 1;
+ ibit = (1U << irq);
+
+ hwpend &= ~ibit;
+
+ if (pcpl & ibit) {
+ /*
+ * IRQ is masked; mark it as pending and check
+ * the next one. Note: the IRQ is already disabled.
+ */
+ footbridge_ipending |= ibit;
+ continue;
+ }
+
+ footbridge_ipending &= ~ibit;
+
+ iq = &footbridge_intrq[irq];
+ iq->iq_ev.ev_count++;
+ uvmexp.intrs++;
+ current_spl_level |= iq->iq_mask;
+ oldirqstate = enable_interrupts(I32_bit);
+ for (ih = TAILQ_FIRST(&iq->iq_list);
+ ((ih != NULL) && (intr_rc != 1));
+ ih = TAILQ_NEXT(ih, ih_list)) {
+ intr_rc = (*ih->ih_func)(ih->ih_arg ? ih->ih_arg : frame);
+ }
+ restore_interrupts(oldirqstate);
+
+ current_spl_level = pcpl;
+
+ /* Re-enable this interrupt now that's it's cleared. */
+ intr_enabled |= ibit;
+ footbridge_set_intrmask();
+
+ /* also check for any new interrupts that may have occured,
+ * that we can handle at this spl level */
+ hwpend |= (footbridge_ipending & ICU_INT_HWMASK) & ~pcpl;
+ }
+
+ /* Check for pendings soft intrs. */
+ if ((footbridge_ipending & INT_SWMASK) & ~current_spl_level) {
+ /*
+ * XXX this feels the wrong place to enable irqs, as some
+ * soft ints are higher priority than hardware irqs
+ */
+ oldirqstate = enable_interrupts(I32_bit);
+ footbridge_do_pending();
+ restore_interrupts(oldirqstate);
+ }
+}
diff --git a/sys/arch/arm/footbridge/footbridge_irqhandler.h b/sys/arch/arm/footbridge/footbridge_irqhandler.h
new file mode 100644
index 00000000000..cf0a7d109ca
--- /dev/null
+++ b/sys/arch/arm/footbridge/footbridge_irqhandler.h
@@ -0,0 +1,60 @@
+/* $OpenBSD: footbridge_irqhandler.h,v 1.1 2004/02/01 05:09:49 drahn Exp $ */
+/* $NetBSD: footbridge_irqhandler.h,v 1.2 2002/11/03 21:43:31 chris Exp $ */
+
+/*
+ * Copyright (c) 2001, 2002 Wasabi Systems, Inc.
+ * All rights reserved.
+ *
+ * Written by Jason R. Thorpe for Wasabi Systems, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed for the NetBSD Project by
+ * Wasabi Systems, Inc.
+ * 4. The name of Wasabi Systems, Inc. may not be used to endorse
+ * or promote products derived from this software without specific prior
+ * written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _FOOTBRIDGE_IRQHANDLER_H_
+#define _FOOTBRIDGE_IRQHANDLER_H_
+
+#ifndef _LOCORE
+#include <sys/types.h>
+#endif /* _LOCORE */
+
+#include <machine/intr.h>
+
+void footbridge_intr_init(void);
+void *footbridge_intr_establish(int, int, int (*)(void *), void *);
+void footbridge_intr_disestablish(void *);
+
+#ifdef _KERNEL
+void *footbridge_intr_claim(int irq, int ipl, char *name, int (*func)(void *), void *arg);
+void footbridge_intr_init(void);
+void footbridge_intr_disestablish(void *cookie);
+#endif /* _KERNEL */
+
+#endif /* _FOOTBRIDGE_IRQHANDLER_H_ */
+
+/* End of irqhandler.h */
diff --git a/sys/arch/arm/footbridge/footbridge_machdep.c b/sys/arch/arm/footbridge/footbridge_machdep.c
new file mode 100644
index 00000000000..40a5a1b4798
--- /dev/null
+++ b/sys/arch/arm/footbridge/footbridge_machdep.c
@@ -0,0 +1,66 @@
+/* $OpenBSD: footbridge_machdep.c,v 1.1 2004/02/01 05:09:49 drahn Exp $ */
+/* $NetBSD: footbridge_machdep.c,v 1.8 2002/05/03 16:45:22 rjs Exp $ */
+
+/*
+ * Copyright (c) 1997 Mark Brinicombe.
+ * Copyright (c) 1997 Causality Limited
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by Mark Brinicombe
+ * for the NetBSD Project.
+ * 4. The name of the company nor the name of the author may be used to
+ * endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <uvm/uvm_extern.h>
+#include <machine/pmap.h>
+#include <arm/footbridge/footbridge.h>
+#include <arm/footbridge/dc21285mem.h>
+
+/*
+ * For optimal cache cleaning we need two 16K banks of
+ * virtual address space that NOTHING else will access
+ * and then we alternate the cache cleaning between the
+ * two banks.
+ * The cache cleaning code requires requires 2 banks aligned
+ * on total size boundry so the banks can be alternated by
+ * eorring the size bit (assumes the bank size is a power of 2)
+ *
+ * On the DC21285 we have a special cache clean area so we will
+ * use it.
+ */
+
+extern unsigned int sa1_cache_clean_addr;
+extern unsigned int sa1_cache_clean_size;
+
+void
+footbridge_sa110_cc_setup(void)
+{
+ sa1_cache_clean_addr = DC21285_CACHE_FLUSH_VBASE;
+ sa1_cache_clean_size = (PAGE_SIZE * 4);
+}
diff --git a/sys/arch/arm/footbridge/footbridge_pci.c b/sys/arch/arm/footbridge/footbridge_pci.c
new file mode 100644
index 00000000000..ed06a2a6686
--- /dev/null
+++ b/sys/arch/arm/footbridge/footbridge_pci.c
@@ -0,0 +1,419 @@
+/* $OpenBSD: footbridge_pci.c,v 1.1 2004/02/01 05:09:49 drahn Exp $ */
+/* $NetBSD: footbridge_pci.c,v 1.4 2001/09/05 16:17:35 matt Exp $ */
+
+/*
+ * Copyright (c) 1997,1998 Mark Brinicombe.
+ * Copyright (c) 1997,1998 Causality Limited
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by Mark Brinicombe
+ * for the NetBSD Project.
+ * 4. The name of the company nor the name of the author may be used to
+ * endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/conf.h>
+#include <sys/malloc.h>
+#include <sys/device.h>
+
+#define _ARM32_BUS_DMA_PRIVATE
+#include <machine/bus.h>
+#include <machine/intr.h>
+
+#include <dev/pci/pcireg.h>
+#include <dev/pci/pcivar.h>
+
+#include <arm/footbridge/dc21285reg.h>
+#include <arm/footbridge/dc21285mem.h>
+
+#include "isa.h"
+#if NISA > 0
+#include <dev/isa/isavar.h>
+#endif
+
+void footbridge_pci_attach_hook __P((struct device *,
+ struct device *, struct pcibus_attach_args *));
+int footbridge_pci_bus_maxdevs __P((void *, int));
+pcitag_t footbridge_pci_make_tag __P((void *, int, int, int));
+void footbridge_pci_decompose_tag __P((void *, pcitag_t, int *,
+ int *, int *));
+pcireg_t footbridge_pci_conf_read __P((void *, pcitag_t, int));
+void footbridge_pci_conf_write __P((void *, pcitag_t, int,
+ pcireg_t));
+int footbridge_pci_intr_map __P((struct pci_attach_args *,
+ pci_intr_handle_t *));
+const char *footbridge_pci_intr_string __P((void *, pci_intr_handle_t));
+const struct evcnt *footbridge_pci_intr_evcnt __P((void *, pci_intr_handle_t));
+void *footbridge_pci_intr_establish (void *, pci_intr_handle_t,
+ int, int (*)(void *), void *, char *);
+void footbridge_pci_intr_disestablish (void *, void *);
+
+
+struct arm32_pci_chipset footbridge_pci_chipset = {
+ NULL, /* conf_v */
+#ifdef netwinder
+ netwinder_pci_attach_hook,
+#else
+ footbridge_pci_attach_hook,
+#endif
+ footbridge_pci_bus_maxdevs,
+ footbridge_pci_make_tag,
+ footbridge_pci_decompose_tag,
+ footbridge_pci_conf_read,
+ footbridge_pci_conf_write,
+ NULL, /* intr_v */
+ footbridge_pci_intr_map,
+ footbridge_pci_intr_string,
+ footbridge_pci_intr_evcnt,
+ footbridge_pci_intr_establish,
+ footbridge_pci_intr_disestablish
+};
+
+/*
+ * PCI doesn't have any special needs; just use the generic versions
+ * of these functions.
+ */
+struct arm32_bus_dma_tag footbridge_pci_bus_dma_tag = {
+ 0,
+ 0,
+ NULL,
+ _bus_dmamap_create,
+ _bus_dmamap_destroy,
+ _bus_dmamap_load,
+ _bus_dmamap_load_mbuf,
+ _bus_dmamap_load_uio,
+ _bus_dmamap_load_raw,
+ _bus_dmamap_unload,
+ _bus_dmamap_sync,
+ _bus_dmamem_alloc,
+ _bus_dmamem_free,
+ _bus_dmamem_map,
+ _bus_dmamem_unmap,
+ _bus_dmamem_mmap,
+};
+
+/*
+ * Currently we only support 12 devices as we select directly in the
+ * type 0 config cycle
+ * (See conf_{read,write} for more detail
+ */
+#define MAX_PCI_DEVICES 21
+
+/*static int
+pci_intr(void *arg)
+{
+ printf("pci int %x\n", (int)arg);
+ return(0);
+}*/
+
+
+void
+footbridge_pci_attach_hook(parent, self, pba)
+ struct device *parent, *self;
+ struct pcibus_attach_args *pba;
+{
+#ifdef PCI_DEBUG
+ printf("footbridge_pci_attach_hook()\n");
+#endif
+
+/* intr_claim(18, IPL_NONE, "pci int 0", pci_intr, (void *)0x10000);
+ intr_claim(8, IPL_NONE, "pci int 1", pci_intr, (void *)0x10001);
+ intr_claim(9, IPL_NONE, "pci int 2", pci_intr, (void *)0x10002);
+ intr_claim(11, IPL_NONE, "pci int 3", pci_intr, (void *)0x10003);*/
+}
+
+int
+footbridge_pci_bus_maxdevs(pcv, busno)
+ void *pcv;
+ int busno;
+{
+#ifdef PCI_DEBUG
+ printf("footbridge_pci_bus_maxdevs(pcv=%p, busno=%d)\n", pcv, busno);
+#endif
+ return(MAX_PCI_DEVICES);
+}
+
+pcitag_t
+footbridge_pci_make_tag(pcv, bus, device, function)
+ void *pcv;
+ int bus, device, function;
+{
+#ifdef PCI_DEBUG
+ printf("footbridge_pci_make_tag(pcv=%p, bus=%d, device=%d, function=%d)\n",
+ pcv, bus, device, function);
+#endif
+ return ((bus << 16) | (device << 11) | (function << 8));
+}
+
+void
+footbridge_pci_decompose_tag(pcv, tag, busp, devicep, functionp)
+ void *pcv;
+ pcitag_t tag;
+ int *busp, *devicep, *functionp;
+{
+#ifdef PCI_DEBUG
+ printf("footbridge_pci_decompose_tag(pcv=%p, tag=0x%08x, bp=%x, dp=%x, fp=%x)\n",
+ pcv, tag, busp, devicep, functionp);
+#endif
+
+ if (busp != NULL)
+ *busp = (tag >> 16) & 0xff;
+ if (devicep != NULL)
+ *devicep = (tag >> 11) & 0x1f;
+ if (functionp != NULL)
+ *functionp = (tag >> 8) & 0x7;
+}
+
+pcireg_t
+footbridge_pci_conf_read(pcv, tag, reg)
+ void *pcv;
+ pcitag_t tag;
+ int reg;
+{
+ int bus, device, function;
+ u_int address;
+ pcireg_t data;
+
+ footbridge_pci_decompose_tag(pcv, tag, &bus, &device, &function);
+ if (bus == 0)
+ /* Limited to 12 devices or we exceed type 0 config space */
+ address = DC21285_PCI_TYPE_0_CONFIG_VBASE | (3 << 22) | (device << 11);
+ else
+ address = DC21285_PCI_TYPE_1_CONFIG_VBASE | (device << 11) |
+ (bus << 16);
+
+ address |= (function << 8) | reg;
+
+ data = *((unsigned int *)address);
+#ifdef PCI_DEBUG
+ printf("footbridge_pci_conf_read(pcv=%p tag=0x%08x reg=0x%02x)=0x%08x\n",
+ pcv, tag, reg, data);
+#endif
+ return(data);
+}
+
+void
+footbridge_pci_conf_write(pcv, tag, reg, data)
+ void *pcv;
+ pcitag_t tag;
+ int reg;
+ pcireg_t data;
+{
+ int bus, device, function;
+ u_int address;
+
+ footbridge_pci_decompose_tag(pcv, tag, &bus, &device, &function);
+ if (bus == 0)
+ address = DC21285_PCI_TYPE_0_CONFIG_VBASE | (3 << 22) | (device << 11);
+ else
+ address = DC21285_PCI_TYPE_1_CONFIG_VBASE | (device << 11) |
+ (bus << 16);
+
+ address |= (function << 8) | reg;
+
+#ifdef PCI_DEBUG
+ printf("footbridge_pci_conf_write(pcv=%p tag=0x%08x reg=0x%02x, 0x%08x)\n",
+ pcv, tag, reg, data);
+#endif
+
+ *((unsigned int *)address) = data;
+}
+
+int
+footbridge_pci_intr_map(pa, ihp)
+ struct pci_attach_args *pa;
+ pci_intr_handle_t *ihp;
+{
+ int pin = pa->pa_intrpin, line = pa->pa_intrline;
+ int intr = -1;
+
+#ifdef PCI_DEBUG
+ void *pcv = pa->pa_pc;
+ pcitag_t intrtag = pa->pa_intrtag;
+ int bus, device, function;
+
+ footbridge_pci_decompose_tag(pcv, intrtag, &bus, &device, &function);
+ printf("footbride_pci_intr_map: pcv=%p, tag=%08lx pin=%d line=%d dev=%d\n",
+ pcv, intrtag, pin, line, device);
+#endif
+
+ /*
+ * Only the line is used to map the interrupt.
+ * The firmware is expected to setup up the interrupt
+ * line as seen from the CPU
+ * This means the firmware deals with the interrupt rotation
+ * between slots etc.
+ *
+ * Perhaps the firmware should also to the final mapping
+ * to a 21285 interrupt bit so the code below would be
+ * completely MI.
+ */
+
+ switch (line) {
+ case PCI_INTERRUPT_PIN_NONE:
+ case 0xff:
+ /* No IRQ */
+ printf("pci_intr_map: no mapping for pin %c\n", '@' + pin);
+ *ihp = -1;
+ return(1);
+ break;
+#ifdef __cats__
+ /* This is machine dependant and needs to be moved */
+ case PCI_INTERRUPT_PIN_A:
+ intr = IRQ_PCI;
+ break;
+ case PCI_INTERRUPT_PIN_B:
+ intr = IRQ_IN_L0;
+ break;
+ case PCI_INTERRUPT_PIN_C:
+ intr = IRQ_IN_L1;
+ break;
+ case PCI_INTERRUPT_PIN_D:
+ intr = IRQ_IN_L3;
+ break;
+#endif
+ default:
+ /*
+ * Experimental firmware feature ...
+ *
+ * If the interrupt line is in the range 0x80 to 0x8F
+ * then the lower 4 bits indicate the ISA interrupt
+ * bit that should be used.
+ * If the interrupt line is in the range 0x40 to 0x5F
+ * then the lower 5 bits indicate the actual DC21285
+ * interrupt bit that should be used.
+ */
+
+ if (line >= 0x40 && line <= 0x5f)
+ intr = line & 0x1f;
+ else if (line >= 0x80 && line <= 0x8f)
+ intr = line;
+ else {
+ printf("footbridge_pci_intr_map: out of range interrupt"
+ "pin %d line %d (%#x)\n", pin, line, line);
+ *ihp = -1;
+ return(1);
+ }
+ break;
+ }
+
+#ifdef PCI_DEBUG
+ printf("pin %d, line %d mapped to int %d\n", pin, line, intr);
+#endif
+
+ *ihp = intr;
+ return(0);
+}
+
+const char *
+footbridge_pci_intr_string(pcv, ih)
+ void *pcv;
+ pci_intr_handle_t ih;
+{
+ static char irqstr[8]; /* 4 + 2 + NULL + sanity */
+
+#ifdef PCI_DEBUG
+ printf("footbridge_pci_intr_string(pcv=0x%p, ih=0x%lx)\n", pcv, ih);
+#endif
+ if (ih == 0)
+ panic("footbridge_pci_intr_string: bogus handle 0x%lx", ih);
+
+#if NISA > 0
+ if (ih >= 0x80 && ih <= 0x8f) {
+ snprintf(irqstr, sizeof(irqstr), "isairq %ld", (ih & 0x0f));
+ return(irqstr);
+ }
+#endif
+ snprintf(irqstr, sizeof(irqstr), "irq %ld", ih);
+ return(irqstr);
+}
+
+#if 0
+const struct evcnt *
+footbridge_pci_intr_evcnt(pcv, ih)
+ void *pcv;
+ pci_intr_handle_t ih;
+{
+
+ /* XXX for now, no evcnt parent reported */
+ return NULL;
+}
+#endif
+
+void *
+footbridge_pci_intr_establish(pcv, ih, level, func, arg, name)
+ void *pcv;
+ pci_intr_handle_t ih;
+ int level, (*func) __P((void *));
+ void *arg;
+ char *name;
+{
+ void *intr;
+ int length;
+ char *string;
+
+#ifdef PCI_DEBUG
+ printf("footbridge_pci_intr_establish(pcv=%p, ih=0x%lx, level=%d, func=%p, arg=%p)\n",
+ pcv, ih, level, func, arg);
+#endif
+
+ /* Copy the interrupt string to a private buffer */
+ length = strlen(footbridge_pci_intr_string(pcv, ih));
+ string = malloc(length + 1, M_DEVBUF, M_WAITOK);
+ strlcpy(string, footbridge_pci_intr_string(pcv, ih), length);
+#if NISA > 0
+ /*
+ * XXX the IDE driver will attach the interrupts in compat mode and
+ * thus we need to fail this here.
+ * This assumes that the interrupts are 14 and 15 which they are for
+ * IDE compat mode.
+ * Really the firmware should make this clear in the interrupt reg.
+ */
+ if (ih >= 0x80 && ih <= 0x8d) {
+ intr = isa_intr_establish(NULL, (ih & 0x0f), IST_EDGE,
+ level, func, arg, string);
+ } else
+#endif
+ intr = footbridge_intr_claim(ih, level, string, func, arg);
+
+ return(intr);
+}
+
+void
+footbridge_pci_intr_disestablish(pcv, cookie)
+ void *pcv;
+ void *cookie;
+{
+#ifdef PCI_DEBUG
+ printf("footbridge_pci_intr_disestablish(pcv=%p, cookie=0x%x)\n",
+ pcv, cookie);
+#endif
+ /* XXXX Need to free the string */
+
+ footbridge_intr_disestablish(cookie);
+}
diff --git a/sys/arch/arm/footbridge/footbridgevar.h b/sys/arch/arm/footbridge/footbridgevar.h
new file mode 100644
index 00000000000..04cbe1ce74f
--- /dev/null
+++ b/sys/arch/arm/footbridge/footbridgevar.h
@@ -0,0 +1,96 @@
+/* $OpenBSD: footbridgevar.h,v 1.1 2004/02/01 05:09:49 drahn Exp $ */
+/* $NetBSD: footbridgevar.h,v 1.2 2002/02/10 12:26:00 chris Exp $ */
+
+/*
+ * Copyright (c) 1997 Mark Brinicombe.
+ * Copyright (c) 1997 Causality Limited
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by Mark Brinicombe
+ * for the NetBSD Project.
+ * 4. The name of the company nor the name of the author may be used to
+ * endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <machine/bus.h>
+#include <machine/rtc.h>
+#include <dev/pci/pcivar.h>
+#include <arm/footbridge/todclockvar.h>
+
+/*
+ * DC21285 softc structure.
+ *
+ * Contains the device node, bus space tag, handle and address
+ */
+
+struct footbridge_softc {
+ struct device sc_dev; /* device node */
+ bus_space_tag_t sc_iot; /* bus tag */
+ bus_space_handle_t sc_ioh; /* bus handle */
+
+ /* Clock related variables - used in footbridge_clock.c */
+ unsigned int sc_clock_ticks_per_256us;
+ unsigned int sc_clock_count;
+ void *sc_clockintr;
+ unsigned int sc_statclock_count;
+ void *sc_statclockintr;
+
+ /* Miscellaneous interrupts */
+ void * sc_serr_ih;
+ void * sc_sdram_par_ih;
+ void * sc_data_par_ih;
+ void * sc_master_abt_ih;
+ void * sc_target_abt_ih;
+ void * sc_parity_ih;
+};
+
+/*
+ * Attach args for child devices
+ */
+
+union footbridge_attach_args {
+ const char *fba_name; /* first element*/
+ struct {
+ bus_space_tag_t fba_iot; /* Bus tag */
+ bus_space_handle_t fba_ioh; /* Bus handle */
+ } fba_fba;
+ struct pcibus_attach_args fba_pba; /* pci attach args */
+ struct todclock_attach_args fba_tca;
+ struct fcom_attach_args {
+ char *fca_name;
+ bus_space_tag_t fca_iot;
+ bus_space_handle_t fca_ioh;
+ int fca_rx_irq;
+ int fca_tx_irq;
+ } fba_fca;
+/* struct clock_attach_args {
+ char *ca_name;
+ bus_space_tag_t ca_iot;
+ bus_space_handle_t ca_ioh;
+ } fba_ca;*/
+};
+
+/* End of footbridgevar.h */
diff --git a/sys/arch/arm/footbridge/genassym.cf b/sys/arch/arm/footbridge/genassym.cf
new file mode 100644
index 00000000000..864c1448d12
--- /dev/null
+++ b/sys/arch/arm/footbridge/genassym.cf
@@ -0,0 +1,47 @@
+# $OpenBSD: genassym.cf,v 1.1 2004/02/01 05:09:49 drahn Exp $
+# $NetBSD: genassym.cf,v 1.2 2001/12/20 01:20:23 thorpej Exp $
+
+# Copyright (c) 1982, 1990 The Regents of the University of California.
+# All rights reserved.
+#
+# This code is derived from software contributed to Berkeley by
+# William Jolitz.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+# 1. Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# 2. Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in the
+# documentation and/or other materials provided with the distribution.
+# 3. All advertising materials mentioning features or use of this software
+# must display the following acknowledgement:
+# This product includes software developed by the University of
+# California, Berkeley and its contributors.
+# 4. Neither the name of the University nor the names of its contributors
+# may be used to endorse or promote products derived from this software
+# without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+# SUCH DAMAGE.
+
+include <machine/intr.h>
+
+define IH_FUNC offsetof(struct irqhandler, ih_func)
+define IH_ARG offsetof(struct irqhandler, ih_arg)
+define IH_FLAGS offsetof(struct irqhandler, ih_flags)
+define IH_LEVEL offsetof(struct irqhandler, ih_level)
+define IH_NUM offsetof(struct irqhandler, ih_num)
+define IH_MASKADDR offsetof(struct irqhandler, ih_maskaddr)
+define IH_MASKBITS offsetof(struct irqhandler, ih_maskbits)
+define IH_NEXT offsetof(struct irqhandler, ih_next)
diff --git a/sys/arch/arm/footbridge/isa/ds1687reg.h b/sys/arch/arm/footbridge/isa/ds1687reg.h
new file mode 100644
index 00000000000..5f5e1ff9f1c
--- /dev/null
+++ b/sys/arch/arm/footbridge/isa/ds1687reg.h
@@ -0,0 +1,129 @@
+/* $OpenBSD: ds1687reg.h,v 1.1 2004/02/01 05:09:49 drahn Exp $ */
+/* $NetBSD: ds1687reg.h,v 1.1 2002/02/10 12:26:01 chris Exp $ */
+
+/*
+ * Copyright (c) 1998 Mark Brinicombe.
+ * Copyright (c) 1998 Causality Limited.
+ * All rights reserved.
+ *
+ * Written by Mark Brinicombe, Causality Limited
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by Mark Brinicombe
+ * for the NetBSD Project.
+ * 4. The name of the company nor the name of the author may be used to
+ * endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY CAUASLITY LIMITED ``AS IS'' AND ANY EXPRESS
+ * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL CAUSALITY LIMITED OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#define RTC_ADDR 0x72
+#define RTC_ADDR_REG 0x00
+#define RTC_DATA_REG 0x01
+
+#define RTC_SECONDS 0x00
+#define RTC_SECONDS_ALARM 0x01
+#define RTC_MINUTES 0x02
+#define RTC_MINUTES_ALARM 0x03
+#define RTC_HOURS 0x04
+#define RTC_HOURS_ALARM 0x05
+#define RTC_DAYOFWEEK 0x06
+#define RTC_DAYOFMONTH 0x07
+#define RTC_MONTH 0x08
+#define RTC_YEAR 0x09
+
+#define RTC_REG_A 0x0a
+#define RTC_REG_A_UIP 0x80 /* Update In Progress */
+#define RTC_REG_A_DV2 0x40 /* Countdown CHain */
+#define RTC_REG_A_DV1 0x20 /* Oscillator Enable */
+#define RTC_REG_A_DV0 0x10 /* Bank Select */
+#define RTC_REG_A_BANK_MASK RTC_REG_A_DV0
+#define RTC_REG_A_BANK1 RTC_REG_A_DV0
+#define RTC_REG_A_BANK0 0x00
+#define RTC_REG_A_RS_MASK 0x0f /* Rate select mask */
+#define RTC_REG_A_RS_NONE 0x00
+#define RTC_REG_A_RS_256HZ_1 0x01
+#define RTC_REG_A_RS_128HZ_1 0x02
+#define RTC_REG_A_RS_8192HZ 0x03
+#define RTC_REG_A_RS_4096HZ 0x04
+#define RTC_REG_A_RS_2048HZ 0x05
+#define RTC_REG_A_RS_1024HZ 0x06
+#define RTC_REG_A_RS_512HZ 0x07
+#define RTC_REG_A_RS_256HZ 0x08
+#define RTC_REG_A_RS_128HZ 0x09
+#define RTC_REG_A_RS_64HZ 0x0A
+#define RTC_REG_A_RS_32HZ 0x0B
+#define RTC_REG_A_RS_16HZ 0x0C
+#define RTC_REG_A_RS_8HZ 0x0D
+#define RTC_REG_A_RS_4HZ 0x0E
+#define RTC_REG_A_RS_2HZ 0x0F
+
+#define RTC_REG_B 0x0b
+#define RTC_REG_B_SET 0x80 /* Inhibit update */
+#define RTC_REG_B_PIE 0x40 /* Periodic Interrupt Enable */
+#define RTC_REG_B_AIE 0x20 /* Alarm Interrupt Enable */
+#define RTC_REG_B_UIE 0x10 /* Updated Ended Interrupt Enable */
+#define RTC_REG_B_SQWE 0x08 /* Square Wave Enable */
+#define RTC_REG_B_DM 0x04 /* Data Mode */
+#define RTC_REG_B_BINARY RTC_REG_B_DM
+#define RTC_REG_B_BCD 0
+#define RTC_REG_B_24_12 0x02 /* Hour format */
+#define RTC_REG_B_24_HOUR RTC_REG_B_24_12
+#define RTC_REG_B_12_HOUR 0
+#define RTC_REG_B_DSE 0x01 /* Daylight Savings Enable */
+
+#define RTC_REG_C 0x0c
+#define RTC_REG_C_IRQF 0x80 /* Interrupt Request Flag */
+#define RTC_REG_C_PF 0x40 /* Periodic Interrupt Flag */
+#define RTC_REG_C_AF 0x20 /* Alarm Interrupt Flag */
+#define RTC_REG_C_UF 0x10 /* Update Ended Flags */
+
+#define RTC_REG_D 0x0d
+#define RTC_REG_D_VRT 0x80 /* Valid RAM and Time */
+
+#define RTC_PC_RAM_START 0x0e
+#define RTC_PC_RAM_SIZE 50
+
+#define RTC_BANK0_RAM_START 0x40
+#define RTC_BANK0_RAM_SIZE 0x40
+
+#define RTC_MODEL 0x40
+#define RTC_SERIAL_1 0x41
+#define RTC_SERIAL_2 0x42
+#define RTC_SERIAL_3 0x43
+#define RTC_SERIAL_4 0x44
+#define RTC_SERIAL_5 0x45
+#define RTC_SERIAL_6 0x46
+#define RTC_CRC 0x47
+#define RTC_CENTURY 0x48
+#define RTC_DATE_ALARM 0x49
+#define RTC_REG_4A 0x4a
+#define RTC_REG_4A_VRT2 0x80
+#define RTC_REG_4A_INCR 0x40
+#define RTC_REG_4A_PAB 0x08
+#define RTC_REG_4A_RF 0x04
+#define RTC_REG_4B 0x4b
+#define RTC_EXT_RAM_ADDRESS 0x50
+#define RTC_EXT_RAM_DATA 0x53
+#define RTC_EXT_RAM_START 0x00
+#define RTC_EXT_RAM_SIZE 0x80
diff --git a/sys/arch/arm/footbridge/isa/dsrtc.c b/sys/arch/arm/footbridge/isa/dsrtc.c
new file mode 100644
index 00000000000..b909ed406e1
--- /dev/null
+++ b/sys/arch/arm/footbridge/isa/dsrtc.c
@@ -0,0 +1,279 @@
+/* $OpenBSD: dsrtc.c,v 1.1 2004/02/01 05:09:49 drahn Exp $ */
+/* $NetBSD: dsrtc.c,v 1.5 2003/03/23 14:12:26 chris Exp $ */
+
+/*
+ * Copyright (c) 1998 Mark Brinicombe.
+ * Copyright (c) 1998 Causality Limited.
+ * All rights reserved.
+ *
+ * Written by Mark Brinicombe, Causality Limited
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by Mark Brinicombe
+ * for the NetBSD Project.
+ * 4. The name of the company nor the name of the author may be used to
+ * endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY CAUASLITY LIMITED ``AS IS'' AND ANY EXPRESS
+ * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL CAUSALITY LIMITED OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/kernel.h>
+#include <sys/conf.h>
+#include <sys/device.h>
+
+#include <machine/rtc.h>
+
+#include <arm/footbridge/todclockvar.h>
+#include <arm/footbridge/isa/ds1687reg.h>
+
+#include <dev/isa/isavar.h>
+
+#define NRTC_PORTS 2
+
+struct dsrtc_softc {
+ struct device sc_dev;
+ bus_space_tag_t sc_iot;
+ bus_space_handle_t sc_ioh;
+};
+
+void dsrtcattach __P((struct device *parent, struct device *self, void *aux));
+int dsrtcmatch __P((struct device *parent, void *cf, void *aux));
+int ds1687_read __P((struct dsrtc_softc *sc, int addr));
+void ds1687_write __P((struct dsrtc_softc *sc, int addr, int data));
+int ds1687_ram_read __P((struct dsrtc_softc *sc, int addr));
+void ds1687_ram_write __P((struct dsrtc_softc *sc, int addr, int data));
+static void ds1687_bank_select __P((struct dsrtc_softc *, int));
+static int dsrtc_write __P((void *, rtc_t *));
+static int dsrtc_read __P((void *, rtc_t *));
+
+int
+ds1687_read(sc, addr)
+ struct dsrtc_softc *sc;
+ int addr;
+{
+
+ bus_space_write_1(sc->sc_iot, sc->sc_ioh, RTC_ADDR_REG, addr);
+ return(bus_space_read_1(sc->sc_iot, sc->sc_ioh, RTC_DATA_REG));
+}
+
+void
+ds1687_write(sc, addr, data)
+ struct dsrtc_softc *sc;
+ int addr;
+ int data;
+{
+
+ bus_space_write_1(sc->sc_iot, sc->sc_ioh, RTC_ADDR_REG, addr);
+ bus_space_write_1(sc->sc_iot, sc->sc_ioh, RTC_DATA_REG, data);
+}
+
+static void
+ds1687_bank_select(sc, bank)
+ struct dsrtc_softc *sc;
+ int bank;
+{
+ int data;
+
+ data = ds1687_read(sc, RTC_REG_A);
+ data &= ~RTC_REG_A_BANK_MASK;
+ if (bank)
+ data |= RTC_REG_A_BANK1;
+ ds1687_write(sc, RTC_REG_A, data);
+}
+
+#if 0
+/* Nothing uses these yet */
+int
+ds1687_ram_read(sc, addr)
+ struct dsrtc_softc *sc;
+ int addr;
+{
+ if (addr < RTC_PC_RAM_SIZE)
+ return(ds1687_read(sc, RTC_PC_RAM_START + addr));
+
+ addr -= RTC_PC_RAM_SIZE;
+ if (addr < RTC_BANK0_RAM_SIZE)
+ return(ds1687_read(sc, RTC_BANK0_RAM_START + addr));
+
+ addr -= RTC_BANK0_RAM_SIZE;
+ if (addr < RTC_EXT_RAM_SIZE) {
+ int data;
+
+ ds1687_bank_select(sc, 1);
+ ds1687_write(sc, RTC_EXT_RAM_ADDRESS, addr);
+ data = ds1687_read(sc, RTC_EXT_RAM_DATA);
+ ds1687_bank_select(sc, 0);
+ return(data);
+ }
+ return(-1);
+}
+
+void
+ds1687_ram_write(sc, addr, val)
+ struct dsrtc_softc *sc;
+ int addr;
+ int val;
+{
+ if (addr < RTC_PC_RAM_SIZE)
+ return(ds1687_write(sc, RTC_PC_RAM_START + addr, val));
+
+ addr -= RTC_PC_RAM_SIZE;
+ if (addr < RTC_BANK0_RAM_SIZE)
+ return(ds1687_write(sc, RTC_BANK0_RAM_START + addr, val));
+
+ addr -= RTC_BANK0_RAM_SIZE;
+ if (addr < RTC_EXT_RAM_SIZE) {
+ ds1687_bank_select(sc, 1);
+ ds1687_write(sc, RTC_EXT_RAM_ADDRESS, addr);
+ ds1687_write(sc, RTC_EXT_RAM_DATA, val);
+ ds1687_bank_select(sc, 0);
+ }
+}
+#endif
+
+static int
+dsrtc_write(arg, rtc)
+ void *arg;
+ rtc_t *rtc;
+{
+ struct dsrtc_softc *sc = arg;
+
+ ds1687_write(sc, RTC_SECONDS, rtc->rtc_sec);
+ ds1687_write(sc, RTC_MINUTES, rtc->rtc_min);
+ ds1687_write(sc, RTC_HOURS, rtc->rtc_hour);
+ ds1687_write(sc, RTC_DAYOFMONTH, rtc->rtc_day);
+ ds1687_write(sc, RTC_MONTH, rtc->rtc_mon);
+ ds1687_write(sc, RTC_YEAR, rtc->rtc_year);
+ ds1687_bank_select(sc, 1);
+ ds1687_write(sc, RTC_CENTURY, rtc->rtc_cen);
+ ds1687_bank_select(sc, 0);
+ return(1);
+}
+
+static int
+dsrtc_read(arg, rtc)
+ void *arg;
+ rtc_t *rtc;
+{
+ struct dsrtc_softc *sc = arg;
+
+ rtc->rtc_micro = 0;
+ rtc->rtc_centi = 0;
+ rtc->rtc_sec = ds1687_read(sc, RTC_SECONDS);
+ rtc->rtc_min = ds1687_read(sc, RTC_MINUTES);
+ rtc->rtc_hour = ds1687_read(sc, RTC_HOURS);
+ rtc->rtc_day = ds1687_read(sc, RTC_DAYOFMONTH);
+ rtc->rtc_mon = ds1687_read(sc, RTC_MONTH);
+ rtc->rtc_year = ds1687_read(sc, RTC_YEAR);
+ ds1687_bank_select(sc, 1);
+ rtc->rtc_cen = ds1687_read(sc, RTC_CENTURY);
+ ds1687_bank_select(sc, 0);
+
+ return(1);
+}
+
+/* device and attach structures */
+struct cfattach ds1687rtc_ca = {
+ sizeof(struct dsrtc_softc), dsrtcmatch, dsrtcattach
+};
+
+struct cfdriver ds1687rtc_cd = {
+ NULL, "dsrtc", DV_DULL
+};
+
+/*
+ * dsrtcmatch()
+ *
+ * Validate the IIC address to make sure its an RTC we understand
+ */
+
+int
+dsrtcmatch(parent, cf, aux)
+ struct device *parent;
+ void *cf;
+ void *aux;
+{
+ struct isa_attach_args *ia = aux;
+
+#ifdef __NetBSD__
+ if (ia->ia_nio < 1 ||
+ ia->ia_iobase == -1)
+ return (0);
+
+ ia->ia_nio = 1;
+ ia->ia_io[0].ir_size = NRTC_PORTS;
+#else
+ ia->ia_iosize = NRTC_PORTS;
+#endif
+
+
+#if 0
+ ia->ia_niomem = 0;
+ ia->ia_nirq = 0;
+ ia->ia_ndrq = 0;
+#endif
+
+ return(1);
+}
+
+/*
+ * dsrtcattach()
+ *
+ * Attach the rtc device
+ */
+
+void
+dsrtcattach(parent, self, aux)
+ struct device *parent;
+ struct device *self;
+ void *aux;
+{
+ struct dsrtc_softc *sc = (struct dsrtc_softc *)self;
+ struct isa_attach_args *ia = aux;
+ struct todclock_attach_args ta;
+
+ sc->sc_iot = ia->ia_iot;
+ if (bus_space_map(sc->sc_iot, ia->ia_iobase,
+ ia->ia_iosize, 0, &sc->sc_ioh)) {
+ printf(": cannot map I/O space\n");
+ return;
+ }
+
+ ds1687_write(sc, RTC_REG_A, RTC_REG_A_DV1);
+ ds1687_write(sc, RTC_REG_B, RTC_REG_B_BINARY | RTC_REG_B_24_HOUR);
+
+ if (!(ds1687_read(sc, RTC_REG_D) & RTC_REG_D_VRT))
+ printf(": lithium cell is dead, RTC unreliable");
+ printf("\n");
+
+ ta.ta_name = "todclock";
+ ta.ta_rtc_arg = sc;
+ ta.ta_rtc_write = dsrtc_write;
+ ta.ta_rtc_read = dsrtc_read;
+ ta.ta_flags = 0;
+ config_found(self, &ta, NULL);
+}
+
+/* End of dsrtc.c */
diff --git a/sys/arch/arm/footbridge/isa/icu.h b/sys/arch/arm/footbridge/isa/icu.h
new file mode 100644
index 00000000000..357a0d8e5a0
--- /dev/null
+++ b/sys/arch/arm/footbridge/isa/icu.h
@@ -0,0 +1,71 @@
+/* $OpenBSD: icu.h,v 1.1 2004/02/01 05:09:49 drahn Exp $ */
+/* $NetBSD: icu.h,v 1.1 2002/02/10 12:26:01 chris Exp $ */
+
+/*-
+ * Copyright (c) 1990 The Regents of the University of California.
+ * All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * William Jolitz.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)icu.h 5.6 (Berkeley) 5/9/91
+ */
+
+/*
+ * AT/386 Interrupt Control constants
+ * W. Jolitz 8/89
+ */
+
+#ifndef _ARM32_ISA_ICU_H_
+#define _ARM32_ISA_ICU_H_
+
+#ifndef _LOCORE
+
+/*
+ * Interrupt "level" mechanism variables, masks, and macros
+ */
+extern unsigned imen; /* interrupt mask enable */
+
+#define SET_ICUS() { \
+ outb(IO_ICU1 + 1, imen); \
+ outb(IO_ICU2 + 1, imen >> 8); \
+}
+
+#endif /* !_LOCORE */
+
+/*
+ * Interrupt enable bits -- in order of priority
+ */
+#define IRQ_SLAVE 2
+
+/*
+ * Interrupt Control offset into Interrupt descriptor table (IDT)
+ */
+#define ICU_OFFSET 32 /* 0-31 are processor exceptions */
+#define ICU_LEN 16 /* 32-47 are ISA interrupts */
+
+#endif /* !_ARM32_ISA_ICU_H_ */
diff --git a/sys/arch/arm/footbridge/isa/isa_io.c b/sys/arch/arm/footbridge/isa/isa_io.c
new file mode 100644
index 00000000000..fbb1253c91f
--- /dev/null
+++ b/sys/arch/arm/footbridge/isa/isa_io.c
@@ -0,0 +1,321 @@
+/* $OpenBSD: isa_io.c,v 1.1 2004/02/01 05:09:49 drahn Exp $ */
+/* $NetBSD: isa_io.c,v 1.2 2002/09/27 15:35:44 provos Exp $ */
+
+/*
+ * Copyright 1997
+ * Digital Equipment Corporation. All rights reserved.
+ *
+ * This software is furnished under license and may be used and
+ * copied only in accordance with the following terms and conditions.
+ * Subject to these conditions, you may download, copy, install,
+ * use, modify and distribute this software in source and/or binary
+ * form. No title or ownership is transferred hereby.
+ *
+ * 1) Any source code used, modified or distributed must reproduce
+ * and retain this copyright notice and list of conditions as
+ * they appear in the source file.
+ *
+ * 2) No right is granted to use any trade name, trademark, or logo of
+ * Digital Equipment Corporation. Neither the "Digital Equipment
+ * Corporation" name nor any trademark or logo of Digital Equipment
+ * Corporation may be used to endorse or promote products derived
+ * from this software without the prior written permission of
+ * Digital Equipment Corporation.
+ *
+ * 3) This software is provided "AS-IS" and any express or implied
+ * warranties, including but not limited to, any implied warranties
+ * of merchantability, fitness for a particular purpose, or
+ * non-infringement are disclaimed. In no event shall DIGITAL be
+ * liable for any damages whatsoever, and in particular, DIGITAL
+ * shall not be liable for special, indirect, consequential, or
+ * incidental damages or damages for lost profits, loss of
+ * revenue or loss of use, whether such damages arise in contract,
+ * negligence, tort, under statute, in equity, at law or otherwise,
+ * even if advised of the possibility of such damage.
+ */
+
+/*
+ * bus_space I/O functions for isa
+ */
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <machine/bus.h>
+#include <machine/pio.h>
+#include <machine/isa_machdep.h>
+
+/* Proto types for all the bus_space structure functions */
+
+bs_protos(isa);
+bs_protos(bs_notimpl);
+
+/*
+ * Declare the isa bus space tags
+ * The IO and MEM structs are identical, except for the cookies,
+ * which contain the address space bases.
+ */
+
+/*
+ * NOTE: ASSEMBLY LANGUAGE RELIES ON THE COOKIE -- THE FIRST MEMBER OF
+ * THIS STRUCTURE -- TO BE THE VIRTUAL ADDRESS OF ISA/IO!
+ */
+struct bus_space isa_io_bs_tag = {
+ /* cookie */
+ NULL, /* initialized below */
+
+ /* mapping/unmapping */
+ isa_bs_map,
+ isa_bs_unmap,
+ isa_bs_subregion,
+
+ /* allocation/deallocation */
+ isa_bs_alloc,
+ isa_bs_free,
+
+ /* get kernel virtual address */
+ isa_bs_vaddr,
+
+ /* mmap bus space for userland */
+ bs_notimpl_bs_mmap, /* XXX possible even? XXX */
+
+ /* barrier */
+ isa_bs_barrier,
+
+ /* read (single) */
+ isa_bs_r_1,
+ isa_bs_r_2,
+ isa_bs_r_4,
+ bs_notimpl_bs_r_8,
+
+ /* read multiple */
+ isa_bs_rm_1,
+ isa_bs_rm_2,
+ isa_bs_rm_4,
+ bs_notimpl_bs_rm_8,
+
+ /* read region */
+ isa_bs_rr_1,
+ isa_bs_rr_2,
+ isa_bs_rr_4,
+ bs_notimpl_bs_rr_8,
+
+ /* write (single) */
+ isa_bs_w_1,
+ isa_bs_w_2,
+ isa_bs_w_4,
+ bs_notimpl_bs_w_8,
+
+ /* write multiple */
+ isa_bs_wm_1,
+ isa_bs_wm_2,
+ isa_bs_wm_4,
+ bs_notimpl_bs_wm_8,
+
+ /* write region */
+ isa_bs_wr_1,
+ isa_bs_wr_2,
+ isa_bs_wr_4,
+ bs_notimpl_bs_wr_8,
+
+ /* set multiple */
+ bs_notimpl_bs_sm_1,
+ bs_notimpl_bs_sm_2,
+ bs_notimpl_bs_sm_4,
+ bs_notimpl_bs_sm_8,
+
+ /* set region */
+ bs_notimpl_bs_sr_1,
+ isa_bs_sr_2,
+ bs_notimpl_bs_sr_4,
+ bs_notimpl_bs_sr_8,
+
+ /* copy */
+ bs_notimpl_bs_c_1,
+ bs_notimpl_bs_c_2,
+ bs_notimpl_bs_c_4,
+ bs_notimpl_bs_c_8,
+};
+
+/*
+ * NOTE: ASSEMBLY LANGUAGE RELIES ON THE COOKIE -- THE FIRST MEMBER OF
+ * THIS STRUCTURE -- TO BE THE VIRTUAL ADDRESS OF ISA/MEMORY!
+ */
+struct bus_space isa_mem_bs_tag = {
+ /* cookie */
+ NULL, /* initialized below */
+
+ /* mapping/unmapping */
+ isa_bs_map,
+ isa_bs_unmap,
+ isa_bs_subregion,
+
+ /* allocation/deallocation */
+ isa_bs_alloc,
+ isa_bs_free,
+
+ /* get kernel virtual address */
+ isa_bs_vaddr,
+
+ /* mmap bus space for userland */
+ bs_notimpl_bs_mmap, /* XXX open for now ... XXX */
+
+ /* barrier */
+ isa_bs_barrier,
+
+ /* read (single) */
+ isa_bs_r_1,
+ isa_bs_r_2,
+ isa_bs_r_4,
+ bs_notimpl_bs_r_8,
+
+ /* read multiple */
+ isa_bs_rm_1,
+ isa_bs_rm_2,
+ isa_bs_rm_4,
+ bs_notimpl_bs_rm_8,
+
+ /* read region */
+ isa_bs_rr_1,
+ isa_bs_rr_2,
+ isa_bs_rr_4,
+ bs_notimpl_bs_rr_8,
+
+ /* write (single) */
+ isa_bs_w_1,
+ isa_bs_w_2,
+ isa_bs_w_4,
+ bs_notimpl_bs_w_8,
+
+ /* write multiple */
+ isa_bs_wm_1,
+ isa_bs_wm_2,
+ isa_bs_wm_4,
+ bs_notimpl_bs_wm_8,
+
+ /* write region */
+ isa_bs_wr_1,
+ isa_bs_wr_2,
+ isa_bs_wr_4,
+ bs_notimpl_bs_wr_8,
+
+ /* set multiple */
+ bs_notimpl_bs_sm_1,
+ bs_notimpl_bs_sm_2,
+ bs_notimpl_bs_sm_4,
+ bs_notimpl_bs_sm_8,
+
+ /* set region */
+ bs_notimpl_bs_sr_1,
+ isa_bs_sr_2,
+ bs_notimpl_bs_sr_4,
+ bs_notimpl_bs_sr_8,
+
+ /* copy */
+ bs_notimpl_bs_c_1,
+ bs_notimpl_bs_c_2,
+ bs_notimpl_bs_c_4,
+ bs_notimpl_bs_c_8,
+};
+
+/* bus space functions */
+
+void
+isa_io_init(isa_io_addr, isa_mem_addr)
+ vm_offset_t isa_io_addr;
+ vm_offset_t isa_mem_addr;
+{
+ isa_io_bs_tag.bs_cookie = (void *)isa_io_addr;
+ isa_mem_bs_tag.bs_cookie = (void *)isa_mem_addr;
+}
+
+/*
+ * break the abstraction: sometimes, other parts of the system
+ * (e.g. X servers) need to map ISA space directly. use these
+ * functions sparingly!
+ */
+vm_offset_t
+isa_io_data_vaddr(void)
+{
+ return (vm_offset_t)isa_io_bs_tag.bs_cookie;
+}
+
+vm_offset_t
+isa_mem_data_vaddr(void)
+{
+ return (vm_offset_t)isa_mem_bs_tag.bs_cookie;
+}
+
+int
+isa_bs_map(t, bpa, size, cacheable, bshp)
+ void *t;
+ bus_addr_t bpa;
+ bus_size_t size;
+ int cacheable;
+ bus_space_handle_t *bshp;
+{
+ *bshp = bpa + (bus_addr_t)t;
+ return(0);
+}
+
+void
+isa_bs_unmap(t, bsh, size)
+ void *t;
+ bus_space_handle_t bsh;
+ bus_size_t size;
+{
+ /* Nothing to do. */
+}
+
+int
+isa_bs_subregion(t, bsh, offset, size, nbshp)
+ void *t;
+ bus_space_handle_t bsh;
+ bus_size_t offset, size;
+ bus_space_handle_t *nbshp;
+{
+/* printf("isa_subregion(tag=%p, bsh=%lx, off=%lx, sz=%lx)\n",
+ t, bsh, offset, size);*/
+ *nbshp = bsh + offset;
+ return(0);
+}
+
+int
+isa_bs_alloc(t, rstart, rend, size, alignment, boundary, cacheable,
+ bpap, bshp)
+ void *t;
+ bus_addr_t rstart, rend;
+ bus_size_t size, alignment, boundary;
+ int cacheable;
+ bus_addr_t *bpap;
+ bus_space_handle_t *bshp;
+{
+ panic("isa_alloc(): Help!");
+}
+
+void
+isa_bs_free(t, bsh, size)
+ void *t;
+ bus_space_handle_t bsh;
+ bus_size_t size;
+{
+ panic("isa_free(): Help!");
+}
+
+void *
+isa_bs_vaddr(t, bsh)
+ void *t;
+ bus_space_handle_t bsh;
+{
+
+ return ((void *)bsh);
+}
+
+void
+isa_bs_barrier(t, bsh, offset, len, flags)
+ void *t;
+ bus_space_handle_t bsh;
+ bus_size_t offset, len;
+ int flags;
+{
+ /* just return */
+}
diff --git a/sys/arch/arm/footbridge/isa/isa_io_asm.S b/sys/arch/arm/footbridge/isa/isa_io_asm.S
new file mode 100644
index 00000000000..6e618e161b6
--- /dev/null
+++ b/sys/arch/arm/footbridge/isa/isa_io_asm.S
@@ -0,0 +1,342 @@
+/* $OpenBSD: isa_io_asm.S,v 1.1 2004/02/01 05:09:49 drahn Exp $ */
+/* $NetBSD: isa_io_asm.S,v 1.1 2002/02/10 12:26:01 chris Exp $ */
+
+/*-
+ * Copyright (c) 1997 The NetBSD Foundation, Inc.
+ * All rights reserved.
+ *
+ * This code is derived from software contributed to The NetBSD Foundation
+ * by Mark Brinicombe.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the NetBSD
+ * Foundation, Inc. and its contributors.
+ * 4. Neither the name of The NetBSD Foundation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
+ * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * Copyright 1997
+ * Digital Equipment Corporation. All rights reserved.
+ *
+ * This software is furnished under license and may be used and
+ * copied only in accordance with the following terms and conditions.
+ * Subject to these conditions, you may download, copy, install,
+ * use, modify and distribute this software in source and/or binary
+ * form. No title or ownership is transferred hereby.
+ *
+ * 1) Any source code used, modified or distributed must reproduce
+ * and retain this copyright notice and list of conditions as
+ * they appear in the source file.
+ *
+ * 2) No right is granted to use any trade name, trademark, or logo of
+ * Digital Equipment Corporation. Neither the "Digital Equipment
+ * Corporation" name nor any trademark or logo of Digital Equipment
+ * Corporation may be used to endorse or promote products derived
+ * from this software without the prior written permission of
+ * Digital Equipment Corporation.
+ *
+ * 3) This software is provided "AS-IS" and any express or implied
+ * warranties, including but not limited to, any implied warranties
+ * of merchantability, fitness for a particular purpose, or
+ * non-infringement are disclaimed. In no event shall DIGITAL be
+ * liable for any damages whatsoever, and in particular, DIGITAL
+ * shall not be liable for special, indirect, consequential, or
+ * incidental damages or damages for lost profits, loss of
+ * revenue or loss of use, whether such damages arise in contract,
+ * negligence, tort, under statute, in equity, at law or otherwise,
+ * even if advised of the possibility of such damage.
+ */
+
+/*
+ * bus_space I/O functions for isa
+ */
+
+#include <machine/asm.h>
+
+#ifdef GPROF
+#define PAUSE nop ; nop ; nop ; nop ; nop
+#else
+#define PAUSE
+#endif
+
+/*
+ * Note these functions use ARM Architecture V4 instructions as
+ * all current systems with ISA will be using processors that support
+ * V4 or later architectures (SHARK & CATS)
+ */
+
+/*
+ * read single
+ */
+
+ENTRY(isa_bs_r_1)
+ ldrb r0, [r1, r2]
+ PAUSE
+ mov pc, lr
+
+ENTRY(isa_bs_r_2)
+ ldrh r0, [r1, r2] /*.word 0xe19100b2*/
+ PAUSE
+ mov pc, lr
+
+ENTRY(isa_bs_r_4)
+ ldr r0, [r1, r2]
+ PAUSE
+ mov pc, lr
+
+/*
+ * read multiple.
+ */
+
+ENTRY(isa_bs_rm_1)
+ add r0, r1, r2
+ mov r1, r3
+ ldr r2, [sp, #0]
+ teq r2, #0
+ moveq pc, lr
+
+Lisa_rm_1_loop:
+ ldrb r3, [r0]
+ strb r3, [r1], #1
+ subs r2, r2, #1
+ bne Lisa_rm_1_loop
+
+ mov pc, lr
+
+ENTRY(isa_bs_rm_2)
+ add r0, r1, r2
+ mov r1, r3
+ ldr r2, [sp, #0]
+ teq r2, #0
+ moveq pc, lr
+
+Lisa_rm_2_loop:
+ ldrh r3, [r0] /*.word 0xe1d030b0*/
+ strh r3, [r1], #2 /*.word 0xe0c130b2*/
+ subs r2, r2, #1
+ bne Lisa_rm_2_loop
+
+ mov pc, lr
+
+ENTRY(isa_bs_rm_4)
+ add r0, r1, r2
+ mov r1, r3
+ ldr r2, [sp, #0]
+ teq r2, #0
+ moveq pc, lr
+
+Lisa_rm_4_loop:
+ ldr r3, [r0]
+ str r3, [r1], #4
+ subs r2, r2, #1
+ bne Lisa_rm_4_loop
+
+ mov pc, lr
+
+/*
+ * read region.
+ */
+
+ENTRY(isa_bs_rr_1)
+ add r0, r1, r2
+ mov r1, r3
+ ldr r2, [sp, #0]
+ teq r2, #0
+ moveq pc, lr
+
+Lisa_rr_1_loop:
+ ldrb r3, [r0], #1
+ strb r3, [r1], #1
+ subs r2, r2, #1
+ bne Lisa_rr_1_loop
+
+ mov pc, lr
+
+ENTRY(isa_bs_rr_2)
+ add r0, r1, r2
+ mov r1, r3
+ ldr r2, [sp, #0]
+ teq r2, #0
+ moveq pc, lr
+
+Lisa_rr_2_loop:
+ ldrh r3, [r0], #2
+ strh r3, [r1], #2 /*.word 0xe0c130b2*/
+ subs r2, r2, #1
+ bne Lisa_rr_2_loop
+
+ mov pc, lr
+
+ENTRY(isa_bs_rr_4)
+ add r0, r1, r2
+ mov r1, r3
+ ldr r2, [sp, #0]
+ teq r2, #0
+ moveq pc, lr
+
+Lisa_rr_4_loop:
+ ldr r3, [r0], #4
+ str r3, [r1], #4
+ subs r2, r2, #1
+ bne Lisa_rr_4_loop
+
+ mov pc, lr
+
+/*
+ * write single
+ */
+
+ENTRY(isa_bs_w_1)
+ strb r3, [r1, r2]
+ PAUSE
+ mov pc, lr
+
+ENTRY(isa_bs_w_2)
+ strh r3, [r1, r2] /*.word 0xe18130b2*/
+ PAUSE
+ mov pc, lr
+
+ENTRY(isa_bs_w_4)
+ str r3, [r1, r2]
+ PAUSE
+ mov pc, lr
+
+/*
+ * write multiple
+ */
+
+ENTRY(isa_bs_wm_1)
+ add r0, r1, r2
+ mov r1, r3
+ ldr r2, [sp, #0]
+ teq r2, #0
+ moveq pc, lr
+
+Lisa_wm_1_loop:
+ ldrb r3, [r1], #1
+ strb r3, [r0]
+ subs r2, r2, #1
+ bne Lisa_wm_1_loop
+
+ mov pc, lr
+
+ENTRY(isa_bs_wm_2)
+ add r0, r1, r2
+ mov r1, r3
+ ldr r2, [sp, #0]
+ teq r2, #0
+ moveq pc, lr
+
+Lisa_wm_2_loop:
+ ldrh r3, [r1], #2 /*.word 0xe0d130b2*/
+ strh r3, [r0] /*.word 0xe1c030b0*/
+ subs r2, r2, #1
+ bne Lisa_wm_2_loop
+
+ mov pc, lr
+
+ENTRY(isa_bs_wm_4)
+ add r0, r1, r2
+ mov r1, r3
+ ldr r2, [sp, #0]
+ teq r2, #0
+ moveq pc, lr
+
+Lisa_wm_4_loop:
+ ldr r3, [r1], #4
+ str r3, [r0]
+ subs r2, r2, #1
+ bne Lisa_wm_4_loop
+
+ mov pc, lr
+
+
+/*
+ * write region.
+ */
+
+ENTRY(isa_bs_wr_1)
+ add r0, r1, r2
+ mov r1, r3
+ ldr r2, [sp, #0]
+ teq r2, #0
+ moveq pc, lr
+
+Lisa_wr_1_loop:
+ ldrb r3, [r1], #1
+ strb r3, [r0], #1
+ subs r2, r2, #1
+ bne Lisa_wr_1_loop
+
+ mov pc, lr
+
+ENTRY(isa_bs_wr_2)
+ add r0, r1, r2
+ mov r1, r3
+ ldr r2, [sp, #0]
+ teq r2, #0
+ moveq pc, lr
+
+Lisa_wr_2_loop:
+ ldrh r3, [r1], #2 /*.word 0xe0d130b2*/
+ strh r3, [r0], #2
+ subs r2, r2, #1
+ bne Lisa_wr_2_loop
+
+ mov pc, lr
+
+ENTRY(isa_bs_wr_4)
+ add r0, r1, r2
+ mov r1, r3
+ ldr r2, [sp, #0]
+ teq r2, #0
+ moveq pc, lr
+
+Lisa_wr_4_loop:
+ ldr r3, [r1], #4
+ str r3, [r0], #4
+ subs r2, r2, #1
+ bne Lisa_wr_4_loop
+
+ mov pc, lr
+
+/*
+ * Set region
+ */
+
+ENTRY(isa_bs_sr_2)
+ add r0, r1, r2
+ mov r1, r3
+ ldr r2, [sp, #0]
+ teq r2, #0
+ moveq pc, lr
+
+Lisa_bs_sr_2_loop:
+ strh r1, [r0], #2 /*.word e0c010b2*/
+ subs r2, r2, #1
+ bne Lisa_bs_sr_2_loop
+
+ mov pc, lr
diff --git a/sys/arch/arm/footbridge/isa/isa_machdep.c b/sys/arch/arm/footbridge/isa/isa_machdep.c
new file mode 100644
index 00000000000..8927d26ddd6
--- /dev/null
+++ b/sys/arch/arm/footbridge/isa/isa_machdep.c
@@ -0,0 +1,609 @@
+/* $OpenBSD: isa_machdep.c,v 1.1 2004/02/01 05:09:49 drahn Exp $ */
+/* $NetBSD: isa_machdep.c,v 1.4 2003/06/16 20:00:57 thorpej Exp $ */
+
+/*-
+ * Copyright (c) 1996-1998 The NetBSD Foundation, Inc.
+ * All rights reserved.
+ *
+ * This code is derived from software contributed to The NetBSD Foundation
+ * by Mark Brinicombe, Charles M. Hannum and by Jason R. Thorpe of the
+ * Numerical Aerospace Simulation Facility, NASA Ames Research Center.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the NetBSD
+ * Foundation, Inc. and its contributors.
+ * 4. Neither the name of The NetBSD Foundation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
+ * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*-
+ * Copyright (c) 1991 The Regents of the University of California.
+ * All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * William Jolitz.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)isa.c 7.2 (Berkeley) 5/13/91
+ */
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/kernel.h>
+#include <sys/syslog.h>
+#include <sys/device.h>
+#include <sys/malloc.h>
+#include <sys/proc.h>
+
+#define _ARM32_BUS_DMA_PRIVATE
+#include <machine/bus.h>
+
+#include <machine/intr.h>
+#include <machine/pio.h>
+#include <machine/bootconfig.h>
+#include <machine/isa_machdep.h>
+
+#include <dev/isa/isareg.h>
+#include <dev/isa/isavar.h>
+#include <dev/isa/isadmareg.h>
+#include <dev/isa/isadmavar.h>
+#include <arm/footbridge/isa/icu.h>
+#include <arm/footbridge/dc21285reg.h>
+#include <arm/footbridge/dc21285mem.h>
+
+#include <uvm/uvm_extern.h>
+
+#include "isadma.h"
+
+/* prototypes */
+static void isa_icu_init __P((void));
+
+struct arm32_isa_chipset isa_chipset_tag;
+
+void isa_strayintr __P((int));
+void intr_calculatemasks __P((void));
+int fakeintr __P((void *));
+
+int isa_irqdispatch __P((void *arg));
+
+u_int imask[NIPL];
+unsigned imen;
+
+#define AUTO_EOI_1
+#define AUTO_EOI_2
+
+/*
+ * Fill in default interrupt table (in case of spuruious interrupt
+ * during configuration of kernel, setup interrupt control unit
+ */
+static void
+isa_icu_init(void)
+{
+ /* initialize 8259's */
+ outb(IO_ICU1, 0x11); /* reset; program device, four bytes */
+ outb(IO_ICU1+1, ICU_OFFSET); /* starting at this vector index */
+ outb(IO_ICU1+1, 1 << IRQ_SLAVE); /* slave on line 2 */
+#ifdef AUTO_EOI_1
+ outb(IO_ICU1+1, 2 | 1); /* auto EOI, 8086 mode */
+#else
+ outb(IO_ICU1+1, 1); /* 8086 mode */
+#endif
+ outb(IO_ICU1+1, 0xff); /* leave interrupts masked */
+ outb(IO_ICU1, 0x68); /* special mask mode (if available) */
+ outb(IO_ICU1, 0x0a); /* Read IRR by default. */
+#ifdef REORDER_IRQ
+ outb(IO_ICU1, 0xc0 | (3 - 1)); /* pri order 3-7, 0-2 (com2 first) */
+#endif
+
+ outb(IO_ICU2, 0x11); /* reset; program device, four bytes */
+ outb(IO_ICU2+1, ICU_OFFSET+8); /* staring at this vector index */
+ outb(IO_ICU2+1, IRQ_SLAVE);
+#ifdef AUTO_EOI_2
+ outb(IO_ICU2+1, 2 | 1); /* auto EOI, 8086 mode */
+#else
+ outb(IO_ICU2+1, 1); /* 8086 mode */
+#endif
+ outb(IO_ICU2+1, 0xff); /* leave interrupts masked */
+ outb(IO_ICU2, 0x68); /* special mask mode (if available) */
+ outb(IO_ICU2, 0x0a); /* Read IRR by default. */
+}
+
+/*
+ * Caught a stray interrupt, notify
+ */
+void
+isa_strayintr(irq)
+ int irq;
+{
+ static u_long strays;
+
+ /*
+ * Stray interrupts on irq 7 occur when an interrupt line is raised
+ * and then lowered before the CPU acknowledges it. This generally
+ * means either the device is screwed or something is cli'ing too
+ * long and it's timing out.
+ */
+ if (++strays <= 5)
+ log(LOG_ERR, "stray interrupt %d%s\n", irq,
+ strays >= 5 ? "; stopped logging" : "");
+}
+
+static struct intrq isa_intrq[ICU_LEN];
+
+/*
+ * Recalculate the interrupt masks from scratch.
+ * We could code special registry and deregistry versions of this function that
+ * would be faster, but the code would be nastier, and we don't expect this to
+ * happen very much anyway.
+ */
+void
+intr_calculatemasks()
+{
+ int irq, level;
+ struct intrq *iq;
+ struct intrhand *ih;
+
+ /* First, figure out which levels each IRQ uses. */
+ for (irq = 0; irq < ICU_LEN; irq++) {
+ int levels = 0;
+ iq = &isa_intrq[irq];
+ for (ih = TAILQ_FIRST(&iq->iq_list); ih != NULL;
+ ih = TAILQ_NEXT(ih, ih_list))
+ levels |= (1U << ih->ih_ipl);
+ iq->iq_levels = levels;
+ }
+
+ /* Then figure out which IRQs use each level. */
+ for (level = 0; level < NIPL; level++) {
+ int irqs = 0;
+ for (irq = 0; irq < ICU_LEN; irq++)
+ if (isa_intrq[irq].iq_levels & (1U << level))
+ irqs |= (1U << irq);
+ imask[level] = irqs;
+ }
+
+ /*
+ * IPL_NONE is used for hardware interrupts that are never blocked,
+ * and do not block anything else.
+ */
+ imask[IPL_NONE] = 0;
+
+ imask[IPL_SOFT] |= imask[IPL_NONE];
+ imask[IPL_SOFTCLOCK] |= imask[IPL_SOFT];
+ imask[IPL_SOFTNET] |= imask[IPL_SOFTCLOCK];
+
+ /*
+ * Enforce a hierarchy that gives slow devices a better chance at not
+ * dropping data.
+ */
+ imask[IPL_BIO] |= imask[IPL_SOFTCLOCK];
+ imask[IPL_NET] |= imask[IPL_BIO];
+ imask[IPL_SOFTSERIAL] |= imask[IPL_NET];
+ imask[IPL_TTY] |= imask[IPL_NET];
+ /*
+ * There are tty, network and disk drivers that use free() at interrupt
+ * time, so imp > (tty | net | bio).
+ */
+ imask[IPL_VM] |= imask[IPL_TTY];
+ imask[IPL_AUDIO] |= imask[IPL_VM];
+
+ /*
+ * Since run queues may be manipulated by both the statclock and tty,
+ * network, and disk drivers, clock > imp.
+ */
+ imask[IPL_CLOCK] |= imask[IPL_VM];
+ imask[IPL_STATCLOCK] |= imask[IPL_CLOCK];
+
+ /*
+ * IPL_HIGH must block everything that can manipulate a run queue.
+ */
+ imask[IPL_HIGH] |= imask[IPL_STATCLOCK];
+
+ /*
+ * We need serial drivers to run at the absolute highest priority to
+ * avoid overruns, so serial > high.
+ */
+ imask[IPL_SERIAL] |= imask[IPL_HIGH];
+
+ /* And eventually calculate the complete masks. */
+ for (irq = 0; irq < ICU_LEN; irq++) {
+ int irqs = 1 << irq;
+ iq = &isa_intrq[irq];
+ for (ih = TAILQ_FIRST(&iq->iq_list); ih != NULL;
+ ih = TAILQ_NEXT(ih, ih_list))
+ irqs |= imask[ih->ih_ipl];
+ iq->iq_mask = irqs;
+ }
+
+ /* Lastly, determine which IRQs are actually in use. */
+ {
+ int irqs = 0;
+ for (irq = 0; irq < ICU_LEN; irq++)
+ if (!TAILQ_EMPTY(&isa_intrq[irq].iq_list))
+ irqs |= (1U << irq);
+ if (irqs >= 0x100) /* any IRQs >= 8 in use */
+ irqs |= 1 << IRQ_SLAVE;
+ imen = ~irqs;
+ SET_ICUS();
+ }
+#if 0
+ printf("type\tmask\tlevel\thand\n");
+ for (irq = 0; irq < ICU_LEN; irq++) {
+ printf("%x\t%04x\t%x\t%p\n", intrtype[irq], intrmask[irq],
+ intrlevel[irq], intrhand[irq]);
+ }
+ for (level = 0; level < IPL_LEVELS; ++level)
+ printf("%d: %08x\n", level, imask[level]);
+#endif
+}
+
+int
+fakeintr(arg)
+ void *arg;
+{
+
+ return 0;
+}
+
+#define LEGAL_IRQ(x) ((x) >= 0 && (x) < ICU_LEN && (x) != 2)
+
+int
+isa_intr_alloc(ic, mask, type, irq)
+ isa_chipset_tag_t ic;
+ int mask;
+ int type;
+ int *irq;
+{
+ int i, tmp, bestirq, count;
+ struct intrq *iq;
+ struct intrhand *ih;
+
+ if (type == IST_NONE)
+ panic("intr_alloc: bogus type");
+
+ bestirq = -1;
+ count = -1;
+
+ /* some interrupts should never be dynamically allocated */
+ mask &= 0xdef8;
+
+ /*
+ * XXX some interrupts will be used later (6 for fdc, 12 for pms).
+ * the right answer is to do "breadth-first" searching of devices.
+ */
+ mask &= 0xefbf;
+
+ for (i = 0; i < ICU_LEN; i++) {
+ if (LEGAL_IRQ(i) == 0 || (mask & (1<<i)) == 0)
+ continue;
+
+ iq = &isa_intrq[i];
+ switch(iq->iq_ist) {
+ case IST_NONE:
+ /*
+ * if nothing's using the irq, just return it
+ */
+ *irq = i;
+ return (0);
+
+ case IST_EDGE:
+ case IST_LEVEL:
+ if (type != iq->iq_ist)
+ continue;
+ /*
+ * if the irq is shareable, count the number of other
+ * handlers, and if it's smaller than the last irq like
+ * this, remember it
+ *
+ * XXX We should probably also consider the
+ * interrupt level and stick IPL_TTY with other
+ * IPL_TTY, etc.
+ */
+ tmp = 0;
+ TAILQ_FOREACH(ih, &(iq->iq_list), ih_list)
+ tmp++;
+ if ((bestirq == -1) || (count > tmp)) {
+ bestirq = i;
+ count = tmp;
+ }
+ break;
+
+ case IST_PULSE:
+ /* this just isn't shareable */
+ continue;
+ }
+ }
+
+ if (bestirq == -1)
+ return (1);
+
+ *irq = bestirq;
+
+ return (0);
+}
+
+const struct evcnt *
+isa_intr_evcnt(isa_chipset_tag_t ic, int irq)
+{
+ return &isa_intrq[irq].iq_ev;
+}
+
+/*
+ * Set up an interrupt handler to start being called.
+ * XXX PRONE TO RACE CONDITIONS, UGLY, 'INTERESTING' INSERTION ALGORITHM.
+ */
+void *
+isa_intr_establish(ic, irq, type, level, ih_fun, ih_arg, name)
+ isa_chipset_tag_t ic;
+ int irq;
+ int type;
+ int level;
+ int (*ih_fun) __P((void *));
+ void *ih_arg;
+ char *name;
+{
+ struct intrq *iq;
+ struct intrhand *ih;
+ u_int oldirqstate;
+
+#if 0
+ printf("isa_intr_establish(%d, %d, %d)\n", irq, type, level);
+#endif
+ /* no point in sleeping unless someone can free memory. */
+ ih = malloc(sizeof *ih, M_DEVBUF, cold ? M_NOWAIT : M_WAITOK);
+ if (ih == NULL)
+ return (NULL);
+
+ if (!LEGAL_IRQ(irq) || type == IST_NONE)
+ panic("intr_establish: bogus irq or type");
+
+ iq = &isa_intrq[irq];
+
+ switch (iq->iq_ist) {
+ case IST_NONE:
+ iq->iq_ist = type;
+#if 0
+ printf("Setting irq %d to type %d - ", irq, type);
+#endif
+ if (irq < 8) {
+ outb(0x4d0, (inb(0x4d0) & ~(1 << irq))
+ | ((type == IST_LEVEL) ? (1 << irq) : 0));
+/* printf("%02x\n", inb(0x4d0));*/
+ } else {
+ outb(0x4d1, (inb(0x4d1) & ~(1 << irq))
+ | ((type == IST_LEVEL) ? (1 << irq) : 0));
+/* printf("%02x\n", inb(0x4d1));*/
+ }
+ break;
+ case IST_EDGE:
+ case IST_LEVEL:
+ if (iq->iq_ist == type)
+ break;
+ case IST_PULSE:
+ if (type != IST_NONE)
+ panic("intr_establish: can't share %s with %s",
+ isa_intr_typename(iq->iq_ist),
+ isa_intr_typename(type));
+ break;
+ }
+
+ ih->ih_func = ih_fun;
+ ih->ih_arg = ih_arg;
+ ih->ih_ipl = level;
+ ih->ih_irq = irq;
+
+ /* do not stop us */
+ oldirqstate = disable_interrupts(I32_bit);
+
+ TAILQ_INSERT_TAIL(&iq->iq_list, ih, ih_list);
+
+ intr_calculatemasks();
+ restore_interrupts(oldirqstate);
+
+ return (ih);
+}
+
+/*
+ * Deregister an interrupt handler.
+ */
+void
+isa_intr_disestablish(ic, arg)
+ isa_chipset_tag_t ic;
+ void *arg;
+{
+ struct intrhand *ih = arg;
+ struct intrq *iq = &isa_intrq[ih->ih_irq];
+ int irq = ih->ih_irq;
+ u_int oldirqstate;
+
+ if (!LEGAL_IRQ(irq))
+ panic("intr_disestablish: bogus irq");
+
+ oldirqstate = disable_interrupts(I32_bit);
+
+ TAILQ_REMOVE(&iq->iq_list, ih, ih_list);
+
+ intr_calculatemasks();
+
+ restore_interrupts(oldirqstate);
+
+ free(ih, M_DEVBUF);
+
+ if (TAILQ_EMPTY(&(iq->iq_list)))
+ iq->iq_ist = IST_NONE;
+}
+
+/*
+ * isa_intr_init()
+ *
+ * Initialise the ISA ICU and attach an ISA interrupt handler to the
+ * ISA interrupt line on the footbridge.
+ */
+void
+isa_intr_init(void)
+{
+ static void *isa_ih;
+ struct intrq *iq;
+ int i;
+
+ /*
+ * should get the parent here, but initialisation order being so
+ * strange I need to check if it's available
+ */
+ for (i = 0; i < ICU_LEN; i++) {
+ iq = &isa_intrq[i];
+ TAILQ_INIT(&iq->iq_list);
+
+ snprintf(iq->iq_name, sizeof(iq->iq_name), "irq %d", i);
+#if 0
+ evcnt_attach_dynamic(&iq->iq_ev, EVCNT_TYPE_INTR,
+ NULL, "isa", iq->iq_name);
+#endif
+ }
+
+ isa_icu_init();
+ intr_calculatemasks();
+ /* something to break the build in an informative way */
+#ifndef ISA_FOOTBRIDGE_IRQ
+#warning Before using isa with footbridge you must define ISA_FOOTBRIDGE_IRQ
+#endif
+ isa_ih = footbridge_intr_claim(ISA_FOOTBRIDGE_IRQ, IPL_BIO, "isabus",
+ isa_irqdispatch, NULL);
+
+}
+
+/* Static array of ISA DMA segments. We only have one on CATS */
+#if NISADMA > 0
+struct arm32_dma_range machdep_isa_dma_ranges[1];
+#endif
+
+void
+isa_footbridge_init(iobase, membase)
+ u_int iobase, membase;
+{
+#if NISADMA > 0
+ extern struct arm32_dma_range *footbridge_isa_dma_ranges;
+ extern int footbridge_isa_dma_nranges;
+
+ machdep_isa_dma_ranges[0].dr_sysbase = bootconfig.dram[0].address;
+ machdep_isa_dma_ranges[0].dr_busbase = bootconfig.dram[0].address;
+ machdep_isa_dma_ranges[0].dr_len = (16 * 1024 * 1024);
+
+ footbridge_isa_dma_ranges = machdep_isa_dma_ranges;
+ footbridge_isa_dma_nranges = 1;
+#endif
+
+ isa_io_init(iobase, membase);
+}
+
+void
+isa_attach_hook(parent, self, iba)
+ struct device *parent, *self;
+ struct isabus_attach_args *iba;
+{
+ /*
+ * Since we can only have one ISA bus, we just use a single
+ * statically allocated ISA chipset structure. Pass it up
+ * now.
+ */
+ iba->iba_ic = &isa_chipset_tag;
+#if NISADMA > 0
+ isa_dma_init();
+#endif
+}
+
+int
+isa_irqdispatch(arg)
+ void *arg;
+{
+ struct clockframe *frame = arg;
+ int irq;
+ struct intrq *iq;
+ struct intrhand *ih;
+ u_int iack;
+ int res = 0;
+
+ iack = *((u_int *)(DC21285_PCI_IACK_VBASE));
+ iack &= 0xff;
+ if (iack < 0x20 || iack > 0x2f) {
+ printf("isa_irqdispatch: %x\n", iack);
+ return(0);
+ }
+
+ irq = iack & 0x0f;
+ iq = &isa_intrq[irq];
+ iq->iq_ev.ev_count++;
+ for (ih = TAILQ_FIRST(&iq->iq_list); res != 1 && ih != NULL;
+ ih = TAILQ_NEXT(ih, ih_list)) {
+ res = (*ih->ih_func)(ih->ih_arg ? ih->ih_arg : frame);
+ }
+ return res;
+}
+
+
+void
+isa_fillw(val, addr, len)
+ u_int val;
+ void *addr;
+ size_t len;
+{
+ if ((u_int)addr >= isa_mem_data_vaddr()
+ && (u_int)addr < isa_mem_data_vaddr() + 0x100000) {
+ bus_size_t offset = ((u_int)addr) & 0xfffff;
+ bus_space_set_region_2(&isa_mem_bs_tag,
+ (bus_space_handle_t)isa_mem_bs_tag.bs_cookie, offset,
+ val, len);
+ } else {
+ u_short *ptr = addr;
+
+ while (len > 0) {
+ *ptr++ = val;
+ --len;
+ }
+ }
+}
diff --git a/sys/arch/arm/footbridge/isa/sysbeep_isa.c b/sys/arch/arm/footbridge/isa/sysbeep_isa.c
new file mode 100644
index 00000000000..71032afebf5
--- /dev/null
+++ b/sys/arch/arm/footbridge/isa/sysbeep_isa.c
@@ -0,0 +1,90 @@
+/* $OpenBSD: sysbeep_isa.c,v 1.1 2004/02/01 05:09:49 drahn Exp $ */
+/* $NetBSD: sysbeep_isa.c,v 1.4 2002/10/02 15:45:10 thorpej Exp $ */
+
+/*-
+ * Copyright (c) 1998 The NetBSD Foundation, Inc.
+ * All rights reserved.
+ *
+ * This code is derived from software contributed to The NetBSD Foundation
+ * by Mark Brinicombe of Causality Limited.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the NetBSD
+ * Foundation, Inc. and its contributors.
+ * 4. Neither the name of The NetBSD Foundation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
+ * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/device.h>
+#include <dev/isa/isavar.h>
+
+#include <dev/isa/pcppivar.h>
+
+/* Prototypes */
+int sysbeep_isa_match __P((struct device *parent, void *cf, void *aux));
+void sysbeep_isa_attach __P((struct device *parent, struct device *self, void *aux));
+void sysbeep_isa __P((int pitch, int period));
+
+/* device attach structure */
+struct cfattach sysbeep_isa_ca = {
+ sizeof (struct device), sysbeep_isa_match, sysbeep_isa_attach
+};
+
+struct cfdriver sysbeep_cd = {
+ NULL, "sysbeep_isa", DV_DULL
+};
+
+static int ppi_attached;
+static pcppi_tag_t ppicookie;
+
+int
+sysbeep_isa_match(parent, match, aux)
+ struct device *parent;
+ void *match;
+ void *aux;
+{
+ return (!ppi_attached);
+}
+
+void
+sysbeep_isa_attach(parent, self, aux)
+ struct device *parent, *self;
+ void *aux;
+{
+ printf("\n");
+
+ ppicookie = ((struct pcppi_attach_args *)aux)->pa_cookie;
+ ppi_attached = 1;
+}
+
+void
+sysbeep(pitch, period)
+ int pitch, period;
+{
+ if (ppi_attached)
+ pcppi_bell(ppicookie, pitch, period, 0);
+}
diff --git a/sys/arch/arm/footbridge/todclock.c b/sys/arch/arm/footbridge/todclock.c
new file mode 100644
index 00000000000..642724d749c
--- /dev/null
+++ b/sys/arch/arm/footbridge/todclock.c
@@ -0,0 +1,348 @@
+/* $OpenBSD: todclock.c,v 1.1 2004/02/01 05:09:49 drahn Exp $ */
+/* $NetBSD: todclock.c,v 1.4 2002/10/02 05:02:30 thorpej Exp $ */
+
+/*
+ * Copyright (c) 1994-1997 Mark Brinicombe.
+ * Copyright (c) 1994 Brini.
+ * All rights reserved.
+ *
+ * This code is derived from software written for Brini by Mark Brinicombe
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by Mark Brinicombe.
+ * 4. The name of the company nor the name of the author may be used to
+ * endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * RiscBSD kernel project
+ *
+ * clock.c
+ *
+ * Timer related machine specific code
+ *
+ * Created : 29/09/94
+ */
+
+/* Include header files */
+
+#include <sys/types.h>
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/kernel.h>
+#include <sys/time.h>
+#include <sys/device.h>
+
+#include <machine/rtc.h>
+#include <arm/footbridge/todclockvar.h>
+
+#include "todclock.h"
+
+#if NTODCLOCK > 1
+#error "Can only had 1 todclock device"
+#endif
+
+static int yeartoday __P((int));
+
+/*
+ * softc structure for the todclock device
+ */
+
+struct todclock_softc {
+ struct device sc_dev; /* device node */
+ void *sc_rtc_arg; /* arg to read/write */
+ int (*sc_rtc_write) __P((void *, rtc_t *)); /* rtc write function */
+ int (*sc_rtc_read) __P((void *, rtc_t *)); /* rtc read function */
+};
+
+/* prototypes for functions */
+
+static void todclockattach (struct device *parent, struct device *self,
+ void *aux);
+static int todclockmatch (struct device *parent, void *cf, void *aux);
+
+/*
+ * We need to remember our softc for functions like inittodr()
+ * and resettodr()
+ * since we only ever have one time-of-day device we can just store
+ * the direct pointer to softc.
+ */
+
+static struct todclock_softc *todclock_sc = NULL;
+
+/* driver and attach structures */
+
+struct cfattach todclock_ca = {
+ sizeof(struct todclock_softc), todclockmatch, todclockattach
+};
+
+struct cfdriver todclock_cd = {
+ NULL, "todclock", DV_DULL
+};
+
+
+/*
+ * int todclockmatch(struct device *parent, struct cfdata *cf, void *aux)
+ *
+ * todclock device probe function.
+ * just validate the attach args
+ */
+
+int
+todclockmatch(parent, cf, aux)
+ struct device *parent;
+ void *cf;
+ void *aux;
+{
+ struct todclock_attach_args *ta = aux;
+
+ if (todclock_sc != NULL)
+ return(0);
+ if (strcmp(ta->ta_name, "todclock") != 0)
+ return(0);
+
+ if (ta->ta_flags & TODCLOCK_FLAG_FAKE)
+ return(1);
+ return(2);
+}
+
+/*
+ * void todclockattach(struct device *parent, struct device *self, void *aux)
+ *
+ * todclock device attach function.
+ * Initialise the softc structure and do a search for children
+ */
+
+void
+todclockattach(parent, self, aux)
+ struct device *parent;
+ struct device *self;
+ void *aux;
+{
+ struct todclock_softc *sc = (void *)self;
+ struct todclock_attach_args *ta = aux;
+
+ /* set up our softc */
+ todclock_sc = sc;
+ todclock_sc->sc_rtc_arg = ta->ta_rtc_arg;
+ todclock_sc->sc_rtc_write = ta->ta_rtc_write;
+ todclock_sc->sc_rtc_read = ta->ta_rtc_read;
+
+ printf("\n");
+
+ /*
+ * Initialise the time of day register.
+ * This is normally left to the filing system to do but not all
+ * filing systems call it e.g. cd9660
+ */
+
+ inittodr(0);
+}
+
+static __inline int
+yeartoday(year)
+ int year;
+{
+ return((year % 4) ? 365 : 366);
+}
+
+
+static int month[12] = {31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31};
+static int timeset = 0;
+
+#define SECPERDAY (24*60*60)
+#define SECPERNYEAR (365*SECPERDAY)
+#define SECPER4YEARS (4*SECPERNYEAR+SECPERDAY)
+#define EPOCHYEAR 1970
+
+/*
+ * Globally visable functions
+ *
+ * These functions are used from other parts of the kernel.
+ * These functions use the functions defined in the tod_sc
+ * to actually read and write the rtc.
+ *
+ * The first todclock to be attached will be used for handling
+ * the time of day.
+ */
+
+/*
+ * Write back the time of day to the rtc
+ */
+
+void
+resettodr()
+{
+ int s;
+ time_t year, mon, day, hour, min, sec;
+ rtc_t rtc;
+
+ /* Have we set the system time in inittodr() */
+ if (!timeset)
+ return;
+
+ /* We need a todclock device and should always have one */
+ if (!todclock_sc)
+ return;
+
+ /* Abort early if there is not actually an RTC write routine */
+ if (todclock_sc->sc_rtc_write == NULL)
+ return;
+
+ sec = time.tv_sec;
+ sec -= tz.tz_minuteswest * 60;
+ if (tz.tz_dsttime)
+ time.tv_sec += 3600;
+ year = (sec / SECPER4YEARS) * 4;
+ sec %= SECPER4YEARS;
+
+ /* year now hold the number of years rounded down 4 */
+
+ while (sec > (yeartoday(EPOCHYEAR+year) * SECPERDAY)) {
+ sec -= yeartoday(EPOCHYEAR+year)*SECPERDAY;
+ year++;
+ }
+
+ /* year is now a correct offset from the EPOCHYEAR */
+
+ year+=EPOCHYEAR;
+ mon=0;
+ if (yeartoday(year) == 366)
+ month[1]=29;
+ else
+ month[1]=28;
+ while (sec >= month[mon]*SECPERDAY) {
+ sec -= month[mon]*SECPERDAY;
+ mon++;
+ }
+
+ day = sec / SECPERDAY;
+ sec %= SECPERDAY;
+ hour = sec / 3600;
+ sec %= 3600;
+ min = sec / 60;
+ sec %= 60;
+ rtc.rtc_cen = year / 100;
+ rtc.rtc_year = year % 100;
+ rtc.rtc_mon = mon+1;
+ rtc.rtc_day = day+1;
+ rtc.rtc_hour = hour;
+ rtc.rtc_min = min;
+ rtc.rtc_sec = sec;
+ rtc.rtc_centi =
+ rtc.rtc_micro = 0;
+
+ printf("resettod: %02d/%02d/%02d%02d %02d:%02d:%02d\n", rtc.rtc_day,
+ rtc.rtc_mon, rtc.rtc_cen, rtc.rtc_year, rtc.rtc_hour,
+ rtc.rtc_min, rtc.rtc_sec);
+
+ s = splclock();
+ todclock_sc->sc_rtc_write(todclock_sc->sc_rtc_arg, &rtc);
+ (void)splx(s);
+}
+
+/*
+ * Initialise the time of day register, based on the time base which is, e.g.
+ * from a filesystem.
+ */
+
+void
+inittodr(base)
+ time_t base;
+{
+ time_t n;
+ int i, days = 0;
+ int s;
+ int year;
+ rtc_t rtc;
+
+ /*
+ * Default to the suggested time but replace that we one from an
+ * RTC is we can.
+ */
+
+ /* We expect a todclock device */
+
+ /* Use the suggested time as a fall back */
+ time.tv_sec = base;
+ time.tv_usec = 0;
+
+ /* Can we read an RTC ? */
+ if (todclock_sc->sc_rtc_read) {
+ s = splclock();
+ if (todclock_sc->sc_rtc_read(todclock_sc->sc_rtc_arg, &rtc) == 0) {
+ (void)splx(s);
+ return;
+ }
+ (void)splx(s);
+ } else
+ return;
+
+ /* Convert the rtc time into seconds */
+
+ n = rtc.rtc_sec + 60 * rtc.rtc_min + 3600 * rtc.rtc_hour;
+ n += (rtc.rtc_day - 1) * 3600 * 24;
+ year = (rtc.rtc_year + rtc.rtc_cen * 100) - 1900;
+
+ if (yeartoday(year) == 366)
+ month[1] = 29;
+ for (i = rtc.rtc_mon - 2; i >= 0; i--)
+ days += month[i];
+ month[1] = 28;
+
+ for (i = 70; i < year; i++)
+ days += yeartoday(i);
+
+ n += days * 3600 * 24;
+
+ n += tz.tz_minuteswest * 60;
+ if (tz.tz_dsttime)
+ time.tv_sec -= 3600;
+
+ time.tv_sec = n;
+ time.tv_usec = 0;
+
+ /* timeset is used to ensure the time is valid before a resettodr() */
+
+ timeset = 1;
+
+ /* If the base was 0 then keep quiet */
+
+ if (base) {
+ printf("inittodr: %02d:%02d:%02d.%02d%02d %02d/%02d/%02d%02d\n",
+ rtc.rtc_hour, rtc.rtc_min, rtc.rtc_sec, rtc.rtc_centi,
+ rtc.rtc_micro, rtc.rtc_day, rtc.rtc_mon, rtc.rtc_cen,
+ rtc.rtc_year);
+
+ if (n > base + 60) {
+ days = (n - base) / SECPERDAY;
+ printf("Clock has gained %d day%c %ld hours %ld minutes %ld secs\n",
+ days, ((days == 1) ? 0 : 's'),
+ (long)((n - base) / 3600) % 24,
+ (long)((n - base) / 60) % 60,
+ (long) (n - base) % 60);
+ }
+ }
+}
+
+/* End of todclock.c */
diff --git a/sys/arch/arm/footbridge/todclockvar.h b/sys/arch/arm/footbridge/todclockvar.h
new file mode 100644
index 00000000000..58d296d0fa6
--- /dev/null
+++ b/sys/arch/arm/footbridge/todclockvar.h
@@ -0,0 +1,56 @@
+/* $OpenBSD: todclockvar.h,v 1.1 2004/02/01 05:09:49 drahn Exp $ */
+/* $NetBSD: todclockvar.h,v 1.1 2002/02/10 12:26:00 chris Exp $ */
+
+/*
+ * Copyright (c) 1997 Mark Brinicombe.
+ * Copyright (c) 1997 Causality Limited
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by Mark Brinicombe
+ * 4. The name of the company nor the name of the author may be used to
+ * endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * todclockvar.h
+ *
+ * structures and variables for the todclock device
+ *
+ * Created : 12/02/97
+ */
+
+/*
+ * Attach args for todclock device
+ */
+
+struct todclock_attach_args {
+ const char *ta_name; /* device name */
+ void *ta_rtc_arg; /* arg to read/write */
+ int (*ta_rtc_write) __P((void *, rtc_t *)); /* function to write rtc */
+ int (*ta_rtc_read) __P((void *, rtc_t *)); /* function to read rtc */
+ int ta_flags; /* flags */
+#define TODCLOCK_FLAG_FAKE 0x01 /* tod service is faked */
+};
+
+/* End of todclockvar.h */
diff --git a/sys/arch/arm/include/ansi.h b/sys/arch/arm/include/ansi.h
new file mode 100644
index 00000000000..466a195e1b3
--- /dev/null
+++ b/sys/arch/arm/include/ansi.h
@@ -0,0 +1,109 @@
+/* $OpenBSD: ansi.h,v 1.1 2004/02/01 05:09:49 drahn Exp $ */
+/* $NetBSD: ansi.h,v 1.4 2003/03/02 22:18:17 tshiozak Exp $ */
+
+/*
+ * Copyright (c) 1990, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * from: @(#)ansi.h 8.2 (Berkeley) 1/4/94
+ */
+
+#ifndef _ANSI_H_
+#define _ANSI_H_
+
+#if 0
+#include <machine/types.h>
+#endif
+
+/*
+ * Types which are fundamental to the implementation and may appear in
+ * more than one standard header are defined here. Standard headers
+ * then use:
+ * #ifdef _BSD_SIZE_T_
+ * typedef _BSD_SIZE_T_ size_t;
+ * #undef _BSD_SIZE_T_
+ * #endif
+ */
+#ifdef __ELF__
+#define _BSD_CLOCK_T_ unsigned int /* clock() */
+#define _BSD_PTRDIFF_T_ long int /* ptr1 - ptr2 */
+#define _BSD_SIZE_T_ unsigned long int /* sizeof() */
+#define _BSD_SSIZE_T_ long int /* byte count or error */
+#define _BSD_TIME_T_ int /* time() */
+#else
+#define _BSD_CLOCK_T_ unsigned long /* clock() */
+#define _BSD_PTRDIFF_T_ int /* ptr1 - ptr2 */
+#define _BSD_SIZE_T_ unsigned int /* sizeof() */
+#define _BSD_SSIZE_T_ int /* byte count or error */
+#define _BSD_TIME_T_ long /* time() */
+#endif
+#define _BSD_VA_LIST_ __builtin_va_list /* va_list */
+#define _BSD_CLOCKID_T_ int /* clockid_t */
+#define _BSD_TIMER_T_ int /* timer_t */
+#define _BSD_SUSECONDS_T_ int /* suseconds_t */
+#define _BSD_USECONDS_T_ unsigned int /* useconds_t */
+
+/*
+ * NOTE: rune_t is not covered by ANSI nor other standards, and should not
+ * be instantiated outside of lib/libc/locale. use wchar_t.
+ *
+ * Runes (wchar_t) is declared to be an ``int'' instead of the more natural
+ * ``unsigned long'' or ``long''. Two things are happening here. It is not
+ * unsigned so that EOF (-1) can be naturally assigned to it and used. Also,
+ * it looks like 10646 will be a 31 bit standard. This means that if your
+ * ints cannot hold 32 bits, you will be in trouble. The reason an int was
+ * chosen over a long is that the is*() and to*() routines take ints (says
+ * ANSI C), but they use _RUNE_T_ instead of int. By changing it here, you
+ * lose a bit of ANSI conformance, but your programs will still work.
+ *
+ * Note that _WCHAR_T_ and _RUNE_T_ must be of the same type. When wchar_t
+ * and rune_t are typedef'd, _WCHAR_T_ will be undef'd, but _RUNE_T remains
+ * defined for ctype.h.
+ */
+#define _BSD_WCHAR_T_ int /* wchar_t */
+#define _BSD_WINT_T_ int /* wint_t */
+#define _BSD_RUNE_T_ int /* rune_t */
+#define _BSD_WCTRANS_T_ void * /* wctrans_t */
+#define _BSD_WCTYPE_T_ void * /* wctype_t */
+
+/*
+ * mbstate_t is an opaque object to keep conversion state, during multibyte
+ * stream conversions. The content must not be referenced by user programs.
+ */
+typedef union {
+ char __mbstate8[128];
+ long long __mbstateL; /* for alignment */
+} __mbstate_t;
+#define _BSD_MBSTATE_T_ __mbstate_t /* mbstate_t */
+
+/*
+ * We describe off_t here so its declaration can be visible to
+ * stdio without pulling in all of <sys/type.h>, thus appeasing ANSI.
+ */
+#define _BSD_OFF_T_ long long /* file offset */
+
+#endif /* _ANSI_H_ */
diff --git a/sys/arch/arm/include/armreg.h b/sys/arch/arm/include/armreg.h
new file mode 100644
index 00000000000..f5c1ef567e9
--- /dev/null
+++ b/sys/arch/arm/include/armreg.h
@@ -0,0 +1,353 @@
+/* $OpenBSD: armreg.h,v 1.1 2004/02/01 05:09:49 drahn Exp $ */
+/* $NetBSD: armreg.h,v 1.27 2003/09/06 08:43:02 rearnsha Exp $ */
+
+/*
+ * Copyright (c) 1998, 2001 Ben Harris
+ * Copyright (c) 1994-1996 Mark Brinicombe.
+ * Copyright (c) 1994 Brini.
+ * All rights reserved.
+ *
+ * This code is derived from software written for Brini by Mark Brinicombe
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by Brini.
+ * 4. The name of the company nor the name of the author may be used to
+ * endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY BRINI ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL BRINI OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef _ARM_ARMREG_H
+#define _ARM_ARMREG_H
+
+/*
+ * ARM Process Status Register
+ *
+ * The picture in the ARM manuals looks like this:
+ * 3 3 2 2 2 2
+ * 1 0 9 8 7 6 8 7 6 5 4 0
+ * +-+-+-+-+-+-------------------------------------+-+-+-+---------+
+ * |N|Z|C|V|Q| reserved |I|F|T|M M M M M|
+ * | | | | | | | | | |4 3 2 1 0|
+ * +-+-+-+-+-+-------------------------------------+-+-+-+---------+
+ */
+
+#define PSR_FLAGS 0xf0000000 /* flags */
+#define PSR_N_bit (1 << 31) /* negative */
+#define PSR_Z_bit (1 << 30) /* zero */
+#define PSR_C_bit (1 << 29) /* carry */
+#define PSR_V_bit (1 << 28) /* overflow */
+
+#define PSR_Q_bit (1 << 27) /* saturation */
+
+#define I32_bit (1 << 7) /* IRQ disable */
+#define F32_bit (1 << 6) /* FIQ disable */
+
+#define PSR_T_bit (1 << 5) /* Thumb state */
+#define PSR_J_bit (1 << 24) /* Java mode */
+
+#define PSR_MODE 0x0000001f /* mode mask */
+#define PSR_USR26_MODE 0x00000000
+#define PSR_FIQ26_MODE 0x00000001
+#define PSR_IRQ26_MODE 0x00000002
+#define PSR_SVC26_MODE 0x00000003
+#define PSR_USR32_MODE 0x00000010
+#define PSR_FIQ32_MODE 0x00000011
+#define PSR_IRQ32_MODE 0x00000012
+#define PSR_SVC32_MODE 0x00000013
+#define PSR_ABT32_MODE 0x00000017
+#define PSR_UND32_MODE 0x0000001b
+#define PSR_SYS32_MODE 0x0000001f
+#define PSR_32_MODE 0x00000010
+
+#define PSR_IN_USR_MODE(psr) (!((psr) & 3)) /* XXX */
+#define PSR_IN_32_MODE(psr) ((psr) & PSR_32_MODE)
+
+/* In 26-bit modes, the PSR is stuffed into R15 along with the PC. */
+
+#define R15_MODE 0x00000003
+#define R15_MODE_USR 0x00000000
+#define R15_MODE_FIQ 0x00000001
+#define R15_MODE_IRQ 0x00000002
+#define R15_MODE_SVC 0x00000003
+
+#define R15_PC 0x03fffffc
+
+#define R15_FIQ_DISABLE 0x04000000
+#define R15_IRQ_DISABLE 0x08000000
+
+#define R15_FLAGS 0xf0000000
+#define R15_FLAG_N 0x80000000
+#define R15_FLAG_Z 0x40000000
+#define R15_FLAG_C 0x20000000
+#define R15_FLAG_V 0x10000000
+
+/*
+ * Co-processor 15: The system control co-processor.
+ */
+
+#define ARM_CP15_CPU_ID 0
+
+/*
+ * The CPU ID register is theoretically structured, but the definitions of
+ * the fields keep changing.
+ */
+
+/* The high-order byte is always the implementor */
+#define CPU_ID_IMPLEMENTOR_MASK 0xff000000
+#define CPU_ID_ARM_LTD 0x41000000 /* 'A' */
+#define CPU_ID_DEC 0x44000000 /* 'D' */
+#define CPU_ID_INTEL 0x69000000 /* 'i' */
+#define CPU_ID_TI 0x54000000 /* 'T' */
+
+/* How to decide what format the CPUID is in. */
+#define CPU_ID_ISOLD(x) (((x) & 0x0000f000) == 0x00000000)
+#define CPU_ID_IS7(x) (((x) & 0x0000f000) == 0x00007000)
+#define CPU_ID_ISNEW(x) (!CPU_ID_ISOLD(x) && !CPU_ID_IS7(x))
+
+/* On ARM3 and ARM6, this byte holds the foundry ID. */
+#define CPU_ID_FOUNDRY_MASK 0x00ff0000
+#define CPU_ID_FOUNDRY_VLSI 0x00560000
+
+/* On ARM7 it holds the architecture and variant (sub-model) */
+#define CPU_ID_7ARCH_MASK 0x00800000
+#define CPU_ID_7ARCH_V3 0x00000000
+#define CPU_ID_7ARCH_V4T 0x00800000
+#define CPU_ID_7VARIANT_MASK 0x007f0000
+
+/* On more recent ARMs, it does the same, but in a different format */
+#define CPU_ID_ARCH_MASK 0x000f0000
+#define CPU_ID_ARCH_V3 0x00000000
+#define CPU_ID_ARCH_V4 0x00010000
+#define CPU_ID_ARCH_V4T 0x00020000
+#define CPU_ID_ARCH_V5 0x00030000
+#define CPU_ID_ARCH_V5T 0x00040000
+#define CPU_ID_ARCH_V5TE 0x00050000
+#define CPU_ID_VARIANT_MASK 0x00f00000
+
+/* Next three nybbles are part number */
+#define CPU_ID_PARTNO_MASK 0x0000fff0
+
+/* Intel XScale has sub fields in part number */
+#define CPU_ID_XSCALE_COREGEN_MASK 0x0000e000 /* core generation */
+#define CPU_ID_XSCALE_COREREV_MASK 0x00001c00 /* core revision */
+#define CPU_ID_XSCALE_PRODUCT_MASK 0x000003f0 /* product number */
+
+/* And finally, the revision number. */
+#define CPU_ID_REVISION_MASK 0x0000000f
+
+/* Individual CPUs are probably best IDed by everything but the revision. */
+#define CPU_ID_CPU_MASK 0xfffffff0
+
+/* Fake CPU IDs for ARMs without CP15 */
+#define CPU_ID_ARM2 0x41560200
+#define CPU_ID_ARM250 0x41560250
+
+/* Pre-ARM7 CPUs -- [15:12] == 0 */
+#define CPU_ID_ARM3 0x41560300
+#define CPU_ID_ARM600 0x41560600
+#define CPU_ID_ARM610 0x41560610
+#define CPU_ID_ARM620 0x41560620
+
+/* ARM7 CPUs -- [15:12] == 7 */
+#define CPU_ID_ARM700 0x41007000 /* XXX This is a guess. */
+#define CPU_ID_ARM710 0x41007100
+#define CPU_ID_ARM7500 0x41027100 /* XXX This is a guess. */
+#define CPU_ID_ARM710A 0x41047100 /* inc ARM7100 */
+#define CPU_ID_ARM7500FE 0x41077100
+#define CPU_ID_ARM710T 0x41807100
+#define CPU_ID_ARM720T 0x41807200
+#define CPU_ID_ARM740T8K 0x41807400 /* XXX no MMU, 8KB cache */
+#define CPU_ID_ARM740T4K 0x41817400 /* XXX no MMU, 4KB cache */
+
+/* Post-ARM7 CPUs */
+#define CPU_ID_ARM810 0x41018100
+#define CPU_ID_ARM920T 0x41129200
+#define CPU_ID_ARM922T 0x41029220
+#define CPU_ID_ARM940T 0x41029400 /* XXX no MMU */
+#define CPU_ID_ARM946ES 0x41049460 /* XXX no MMU */
+#define CPU_ID_ARM966ES 0x41049660 /* XXX no MMU */
+#define CPU_ID_ARM966ESR1 0x41059660 /* XXX no MMU */
+#define CPU_ID_ARM1020E 0x4115a200 /* (AKA arm10 rev 1) */
+#define CPU_ID_ARM1022ES 0x4105a220
+#define CPU_ID_SA110 0x4401a100
+#define CPU_ID_SA1100 0x4401a110
+#define CPU_ID_TI925T 0x54029250
+#define CPU_ID_SA1110 0x6901b110
+#define CPU_ID_IXP1200 0x6901c120
+#define CPU_ID_80200 0x69052000
+#define CPU_ID_PXA250 0x69052100 /* sans core revision */
+#define CPU_ID_PXA210 0x69052120
+#define CPU_ID_PXA250A 0x69052100 /* 1st version Core */
+#define CPU_ID_PXA210A 0x69052120 /* 1st version Core */
+#define CPU_ID_PXA250B 0x69052900 /* 3rd version Core */
+#define CPU_ID_PXA210B 0x69052920 /* 3rd version Core */
+#define CPU_ID_PXA250C 0x69052d00 /* 4th version Core */
+#define CPU_ID_PXA210C 0x69052d20 /* 4th version Core */
+#define CPU_ID_80321_400 0x69052420
+#define CPU_ID_80321_600 0x69052430
+#define CPU_ID_80321_400_B0 0x69052c20
+#define CPU_ID_80321_600_B0 0x69052c30
+#define CPU_ID_IXP425_533 0x690541c0
+#define CPU_ID_IXP425_400 0x690541d0
+#define CPU_ID_IXP425_266 0x690541f0
+
+/* ARM3-specific coprocessor 15 registers */
+#define ARM3_CP15_FLUSH 1
+#define ARM3_CP15_CONTROL 2
+#define ARM3_CP15_CACHEABLE 3
+#define ARM3_CP15_UPDATEABLE 4
+#define ARM3_CP15_DISRUPTIVE 5
+
+/* ARM3 Control register bits */
+#define ARM3_CTL_CACHE_ON 0x00000001
+#define ARM3_CTL_SHARED 0x00000002
+#define ARM3_CTL_MONITOR 0x00000004
+
+/*
+ * Post-ARM3 CP15 registers:
+ *
+ * 1 Control register
+ *
+ * 2 Translation Table Base
+ *
+ * 3 Domain Access Control
+ *
+ * 4 Reserved
+ *
+ * 5 Fault Status
+ *
+ * 6 Fault Address
+ *
+ * 7 Cache/write-buffer Control
+ *
+ * 8 TLB Control
+ *
+ * 9 Cache Lockdown
+ *
+ * 10 TLB Lockdown
+ *
+ * 11 Reserved
+ *
+ * 12 Reserved
+ *
+ * 13 Process ID (for FCSE)
+ *
+ * 14 Reserved
+ *
+ * 15 Implementation Dependent
+ */
+
+/* Some of the definitions below need cleaning up for V3/V4 architectures */
+
+/* CPU control register (CP15 register 1) */
+#define CPU_CONTROL_MMU_ENABLE 0x00000001 /* M: MMU/Protection unit enable */
+#define CPU_CONTROL_AFLT_ENABLE 0x00000002 /* A: Alignment fault enable */
+#define CPU_CONTROL_DC_ENABLE 0x00000004 /* C: IDC/DC enable */
+#define CPU_CONTROL_WBUF_ENABLE 0x00000008 /* W: Write buffer enable */
+#define CPU_CONTROL_32BP_ENABLE 0x00000010 /* P: 32-bit exception handlers */
+#define CPU_CONTROL_32BD_ENABLE 0x00000020 /* D: 32-bit addressing */
+#define CPU_CONTROL_LABT_ENABLE 0x00000040 /* L: Late abort enable */
+#define CPU_CONTROL_BEND_ENABLE 0x00000080 /* B: Big-endian mode */
+#define CPU_CONTROL_SYST_ENABLE 0x00000100 /* S: System protection bit */
+#define CPU_CONTROL_ROM_ENABLE 0x00000200 /* R: ROM protection bit */
+#define CPU_CONTROL_CPCLK 0x00000400 /* F: Implementation defined */
+#define CPU_CONTROL_BPRD_ENABLE 0x00000800 /* Z: Branch prediction enable */
+#define CPU_CONTROL_IC_ENABLE 0x00001000 /* I: IC enable */
+#define CPU_CONTROL_VECRELOC 0x00002000 /* V: Vector relocation */
+#define CPU_CONTROL_ROUNDROBIN 0x00004000 /* RR: Predictable replacement */
+#define CPU_CONTROL_V4COMPAT 0x00008000 /* L4: ARMv4 compat LDR R15 etc */
+
+#define CPU_CONTROL_IDC_ENABLE CPU_CONTROL_DC_ENABLE
+
+/* XScale Auxillary Control Register (CP15 register 1, opcode2 1) */
+#define XSCALE_AUXCTL_K 0x00000001 /* dis. write buffer coalescing */
+#define XSCALE_AUXCTL_P 0x00000002 /* ECC protect page table access */
+#define XSCALE_AUXCTL_MD_WB_RA 0x00000000 /* mini-D$ wb, read-allocate */
+#define XSCALE_AUXCTL_MD_WB_RWA 0x00000010 /* mini-D$ wb, read/write-allocate */
+#define XSCALE_AUXCTL_MD_WT 0x00000020 /* mini-D$ wt, read-allocate */
+#define XSCALE_AUXCTL_MD_MASK 0x00000030
+
+/* Cache type register definitions */
+#define CPU_CT_ISIZE(x) ((x) & 0xfff) /* I$ info */
+#define CPU_CT_DSIZE(x) (((x) >> 12) & 0xfff) /* D$ info */
+#define CPU_CT_S (1U << 24) /* split cache */
+#define CPU_CT_CTYPE(x) (((x) >> 25) & 0xf) /* cache type */
+
+#define CPU_CT_CTYPE_WT 0 /* write-through */
+#define CPU_CT_CTYPE_WB1 1 /* write-back, clean w/ read */
+#define CPU_CT_CTYPE_WB2 2 /* w/b, clean w/ cp15,7 */
+#define CPU_CT_CTYPE_WB6 6 /* w/b, cp15,7, lockdown fmt A */
+#define CPU_CT_CTYPE_WB7 7 /* w/b, cp15,7, lockdown fmt B */
+
+#define CPU_CT_xSIZE_LEN(x) ((x) & 0x3) /* line size */
+#define CPU_CT_xSIZE_M (1U << 2) /* multiplier */
+#define CPU_CT_xSIZE_ASSOC(x) (((x) >> 3) & 0x7) /* associativity */
+#define CPU_CT_xSIZE_SIZE(x) (((x) >> 6) & 0x7) /* size */
+
+/* Fault status register definitions */
+
+#define FAULT_TYPE_MASK 0x0f
+#define FAULT_USER 0x10
+
+#define FAULT_WRTBUF_0 0x00 /* Vector Exception */
+#define FAULT_WRTBUF_1 0x02 /* Terminal Exception */
+#define FAULT_BUSERR_0 0x04 /* External Abort on Linefetch -- Section */
+#define FAULT_BUSERR_1 0x06 /* External Abort on Linefetch -- Page */
+#define FAULT_BUSERR_2 0x08 /* External Abort on Non-linefetch -- Section */
+#define FAULT_BUSERR_3 0x0a /* External Abort on Non-linefetch -- Page */
+#define FAULT_BUSTRNL1 0x0c /* External abort on Translation -- Level 1 */
+#define FAULT_BUSTRNL2 0x0e /* External abort on Translation -- Level 2 */
+#define FAULT_ALIGN_0 0x01 /* Alignment */
+#define FAULT_ALIGN_1 0x03 /* Alignment */
+#define FAULT_TRANS_S 0x05 /* Translation -- Section */
+#define FAULT_TRANS_P 0x07 /* Translation -- Page */
+#define FAULT_DOMAIN_S 0x09 /* Domain -- Section */
+#define FAULT_DOMAIN_P 0x0b /* Domain -- Page */
+#define FAULT_PERM_S 0x0d /* Permission -- Section */
+#define FAULT_PERM_P 0x0f /* Permission -- Page */
+
+#define FAULT_IMPRECISE 0x400 /* Imprecise exception (XSCALE) */
+
+/*
+ * Address of the vector page, low and high versions.
+ */
+#define ARM_VECTORS_LOW 0x00000000U
+#define ARM_VECTORS_HIGH 0xffff0000U
+
+/*
+ * ARM Instructions
+ *
+ * 3 3 2 2 2
+ * 1 0 9 8 7 0
+ * +-------+-------------------------------------------------------+
+ * | cond | instruction dependant |
+ * |c c c c| |
+ * +-------+-------------------------------------------------------+
+ */
+
+#define INSN_SIZE 4 /* Always 4 bytes */
+#define INSN_COND_MASK 0xf0000000 /* Condition mask */
+#define INSN_COND_AL 0xe0000000 /* Always condition */
+
+#endif
diff --git a/sys/arch/arm/include/asm.h b/sys/arch/arm/include/asm.h
new file mode 100644
index 00000000000..c7bd017aa70
--- /dev/null
+++ b/sys/arch/arm/include/asm.h
@@ -0,0 +1,130 @@
+/* $OpenBSD: asm.h,v 1.1 2004/02/01 05:09:49 drahn Exp $ */
+/* $NetBSD: asm.h,v 1.4 2001/07/16 05:43:32 matt Exp $ */
+
+/*
+ * Copyright (c) 1990 The Regents of the University of California.
+ * All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * William Jolitz.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * from: @(#)asm.h 5.5 (Berkeley) 5/7/91
+ */
+
+#ifndef _ARM32_ASM_H_
+#define _ARM32_ASM_H_
+
+#ifdef __ELF__
+# define _C_LABEL(x) x
+#else
+# ifdef __STDC__
+# define _C_LABEL(x) _ ## x
+# else
+# define _C_LABEL(x) _/**/x
+# endif
+#endif
+#define _ASM_LABEL(x) x
+
+#ifdef __STDC__
+# define __CONCAT(x,y) x ## y
+# define __STRING(x) #x
+#else
+# define __CONCAT(x,y) x/**/y
+# define __STRING(x) "x"
+#endif
+
+#ifndef _ALIGN_TEXT
+# define _ALIGN_TEXT .align 0
+#endif
+
+/*
+ * gas/arm uses @ as a single comment character and thus cannot be used here
+ * Instead it recognised the # instead of an @ symbols in .type directives
+ * We define a couple of macros so that assembly code will not be dependant
+ * on one or the other.
+ */
+#define _ASM_TYPE_FUNCTION #function
+#define _ASM_TYPE_OBJECT #object
+#define _ENTRY(x) \
+ .text; _ALIGN_TEXT; .globl x; .type x,_ASM_TYPE_FUNCTION; x:
+
+#ifdef GPROF
+# ifdef __ELF__
+# define _PROF_PROLOGUE \
+ mov ip, lr; bl __mcount
+# else
+# define _PROF_PROLOGUE \
+ mov ip,lr; bl mcount
+# endif
+#else
+# define _PROF_PROLOGUE
+#endif
+
+#define ENTRY(y) _ENTRY(_C_LABEL(y)); _PROF_PROLOGUE
+#define ENTRY_NP(y) _ENTRY(_C_LABEL(y))
+#define ASENTRY(y) _ENTRY(_ASM_LABEL(y)); _PROF_PROLOGUE
+#define ASENTRY_NP(y) _ENTRY(_ASM_LABEL(y))
+
+#define ASMSTR .asciz
+
+#if defined(__ELF__) && defined(PIC)
+#ifdef __STDC__
+#define PIC_SYM(x,y) x ## ( ## y ## )
+#else
+#define PIC_SYM(x,y) x/**/(/**/y/**/)
+#endif
+#else
+#define PIC_SYM(x,y) x
+#endif
+
+#ifdef __ELF__
+#define RCSID(x) .section ".ident"; .asciz x
+#else
+#define RCSID(x) .text; .asciz x
+#endif
+
+#ifdef __ELF__
+#define WEAK_ALIAS(alias,sym) \
+ .weak alias; \
+ alias = sym
+#endif
+
+#ifdef __STDC__
+#define WARN_REFERENCES(sym,msg) \
+ .stabs msg ## ,30,0,0,0 ; \
+ .stabs __STRING(_C_LABEL(sym)) ## ,1,0,0,0
+#elif defined(__ELF__)
+#define WARN_REFERENCES(sym,msg) \
+ .stabs msg,30,0,0,0 ; \
+ .stabs __STRING(sym),1,0,0,0
+#else
+#define WARN_REFERENCES(sym,msg) \
+ .stabs msg,30,0,0,0 ; \
+ .stabs __STRING(_/**/sym),1,0,0,0
+#endif /* __STDC__ */
+
+#endif /* !_ARM_ASM_H_ */
diff --git a/sys/arch/arm/include/atomic.h b/sys/arch/arm/include/atomic.h
new file mode 100644
index 00000000000..9ef4c687a9e
--- /dev/null
+++ b/sys/arch/arm/include/atomic.h
@@ -0,0 +1,103 @@
+/* $OpenBSD: atomic.h,v 1.1 2004/02/01 05:09:49 drahn Exp $ */
+/* $NetBSD: atomic.h,v 1.1 2002/10/19 12:22:34 bsh Exp $ */
+
+/*
+ * Copyright (C) 1994-1997 Mark Brinicombe
+ * Copyright (C) 1994 Brini
+ * All rights reserved.
+ *
+ * This code is derived from software written for Brini by Mark Brinicombe
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by Brini.
+ * 4. The name of Brini may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY BRINI ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL BRINI BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _ARM_ATOMIC_H_
+#define _ARM_ATOMIC_H_
+
+#ifndef ATOMIC_SET_BIT_NONINLINE_REQUIRED
+
+#if defined(__PROG26) || defined(ATOMIC_SET_BIT_NOINLINE)
+#define ATOMIC_SET_BIT_NONINLINE_REQUIRED
+#endif
+
+#endif /* ATOMIC_SET_BIT_NONINLINE_REQUIRED */
+
+
+#ifndef _LOCORE
+
+#include <sys/types.h>
+#include <arm/armreg.h> /* I32_bit */
+
+#ifdef ATOMIC_SET_BIT_NONINLINE_REQUIRED
+void atomic_set_bit( u_int *, u_int );
+void atomic_clear_bit( u_int *, u_int );
+#endif
+
+#ifdef __PROG32
+#define __with_interrupts_disabled(expr) \
+ do { \
+ u_int cpsr_save, tmp; \
+ \
+ __asm __volatile( \
+ "mrs %0, cpsr;" \
+ "orr %1, %0, %2;" \
+ "msr cpsr_all, %1;" \
+ : "=r" (cpsr_save), "=r" (tmp) \
+ : "I" (I32_bit) \
+ : "cc" ); \
+ (expr); \
+ __asm __volatile( \
+ "msr cpsr_all, %0" \
+ : /* no output */ \
+ : "r" (cpsr_save) \
+ : "cc" ); \
+ } while(0)
+
+static __inline void
+inline_atomic_set_bit( u_int *address, u_int setmask )
+{
+ __with_interrupts_disabled( *address |= setmask );
+}
+
+static __inline void
+inline_atomic_clear_bit( u_int *address, u_int clearmask )
+{
+ __with_interrupts_disabled( *address &= ~clearmask );
+}
+
+#if !defined(ATOMIC_SET_BIT_NOINLINE)
+
+#define atomic_set_bit(a,m) inline_atomic_set_bit(a,m)
+#define atomic_clear_bit(a,m) inline_atomic_clear_bit(a,m)
+
+#endif
+
+#endif /* __PROG32 */
+
+#undef __with_interrupts_disabled
+
+#endif /* _LOCORE */
+#endif /* _ARM_ATOMIC_H_ */
diff --git a/sys/arch/arm/include/blockio.h b/sys/arch/arm/include/blockio.h
new file mode 100644
index 00000000000..ce071c4717b
--- /dev/null
+++ b/sys/arch/arm/include/blockio.h
@@ -0,0 +1,54 @@
+/* $OpenBSD: blockio.h,v 1.1 2004/02/01 05:09:49 drahn Exp $ */
+/* $NetBSD: blockio.h,v 1.2 2001/06/02 10:44:56 bjh21 Exp $ */
+
+/*-
+ * Copyright (c) 2001 Ben Harris
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+/*
+ * blockio.h - low level functions for bulk PIO data transfer
+ */
+
+#ifndef _ARM_BLOCKIO_H_
+#define _ARM_BLOCKIO_H_
+
+/*
+ * All these take three arguments:
+ * I/O address
+ * Memory address
+ * Number of bytes to copy
+ */
+
+void read_multi_1(u_int, void *, u_int);
+void write_multi_1(u_int, const void *, u_int);
+#define read_multi_2 insw16
+#define write_multi_2 outsw16
+
+void insw(u_int, void *, u_int);
+void outsw(u_int, void *, u_int);
+void insw16(u_int, void *, u_int);
+void outsw16(u_int, void *, u_int);
+
+#endif
diff --git a/sys/arch/arm/include/bus.h b/sys/arch/arm/include/bus.h
new file mode 100644
index 00000000000..b1a508e9903
--- /dev/null
+++ b/sys/arch/arm/include/bus.h
@@ -0,0 +1,1071 @@
+/* $NetBSD: bus.h,v 1.12 2003/10/23 15:03:24 scw Exp $ */
+
+/*-
+ * Copyright (c) 1996, 1997, 1998, 2001 The NetBSD Foundation, Inc.
+ * All rights reserved.
+ *
+ * This code is derived from software contributed to The NetBSD Foundation
+ * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
+ * NASA Ames Research Center.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the NetBSD
+ * Foundation, Inc. and its contributors.
+ * 4. Neither the name of The NetBSD Foundation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
+ * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * Copyright (c) 1996 Charles M. Hannum. All rights reserved.
+ * Copyright (c) 1996 Christopher G. Demetriou. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by Christopher G. Demetriou
+ * for the NetBSD Project.
+ * 4. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _ARM32_BUS_H_
+#define _ARM32_BUS_H_
+
+#if defined(_KERNEL_OPT)
+#include "opt_arm_bus_space.h"
+#endif
+
+/*
+ * Addresses (in bus space).
+ */
+typedef u_long bus_addr_t;
+typedef u_long bus_size_t;
+
+/*
+ * Access methods for bus space.
+ */
+typedef struct bus_space *bus_space_tag_t;
+typedef u_long bus_space_handle_t;
+
+/*
+ * int bus_space_map __P((bus_space_tag_t t, bus_addr_t addr,
+ * bus_size_t size, int flags, bus_space_handle_t *bshp));
+ *
+ * Map a region of bus space.
+ */
+
+#define BUS_SPACE_MAP_CACHEABLE 0x01
+#define BUS_SPACE_MAP_LINEAR 0x02
+#define BUS_SPACE_MAP_PREFETCHABLE 0x04
+
+struct bus_space {
+ /* cookie */
+ void *bs_cookie;
+
+ /* mapping/unmapping */
+ int (*bs_map) __P((void *, bus_addr_t, bus_size_t,
+ int, bus_space_handle_t *));
+ void (*bs_unmap) __P((void *, bus_space_handle_t,
+ bus_size_t));
+ int (*bs_subregion) __P((void *, bus_space_handle_t,
+ bus_size_t, bus_size_t, bus_space_handle_t *));
+
+ /* allocation/deallocation */
+ int (*bs_alloc) __P((void *, bus_addr_t, bus_addr_t,
+ bus_size_t, bus_size_t, bus_size_t, int,
+ bus_addr_t *, bus_space_handle_t *));
+ void (*bs_free) __P((void *, bus_space_handle_t,
+ bus_size_t));
+
+ /* get kernel virtual address */
+ void * (*bs_vaddr) __P((void *, bus_space_handle_t));
+
+ /* mmap bus space for user */
+ paddr_t (*bs_mmap) __P((void *, bus_addr_t, off_t, int, int));
+
+ /* barrier */
+ void (*bs_barrier) __P((void *, bus_space_handle_t,
+ bus_size_t, bus_size_t, int));
+
+ /* read (single) */
+ u_int8_t (*bs_r_1) __P((void *, bus_space_handle_t,
+ bus_size_t));
+ u_int16_t (*bs_r_2) __P((void *, bus_space_handle_t,
+ bus_size_t));
+ u_int32_t (*bs_r_4) __P((void *, bus_space_handle_t,
+ bus_size_t));
+ u_int64_t (*bs_r_8) __P((void *, bus_space_handle_t,
+ bus_size_t));
+
+ /* read multiple */
+ void (*bs_rm_1) __P((void *, bus_space_handle_t,
+ bus_size_t, u_int8_t *, bus_size_t));
+ void (*bs_rm_2) __P((void *, bus_space_handle_t,
+ bus_size_t, u_int16_t *, bus_size_t));
+ void (*bs_rm_4) __P((void *, bus_space_handle_t,
+ bus_size_t, u_int32_t *, bus_size_t));
+ void (*bs_rm_8) __P((void *, bus_space_handle_t,
+ bus_size_t, u_int64_t *, bus_size_t));
+
+ /* read region */
+ void (*bs_rr_1) __P((void *, bus_space_handle_t,
+ bus_size_t, u_int8_t *, bus_size_t));
+ void (*bs_rr_2) __P((void *, bus_space_handle_t,
+ bus_size_t, u_int16_t *, bus_size_t));
+ void (*bs_rr_4) __P((void *, bus_space_handle_t,
+ bus_size_t, u_int32_t *, bus_size_t));
+ void (*bs_rr_8) __P((void *, bus_space_handle_t,
+ bus_size_t, u_int64_t *, bus_size_t));
+
+ /* write (single) */
+ void (*bs_w_1) __P((void *, bus_space_handle_t,
+ bus_size_t, u_int8_t));
+ void (*bs_w_2) __P((void *, bus_space_handle_t,
+ bus_size_t, u_int16_t));
+ void (*bs_w_4) __P((void *, bus_space_handle_t,
+ bus_size_t, u_int32_t));
+ void (*bs_w_8) __P((void *, bus_space_handle_t,
+ bus_size_t, u_int64_t));
+
+ /* write multiple */
+ void (*bs_wm_1) __P((void *, bus_space_handle_t,
+ bus_size_t, const u_int8_t *, bus_size_t));
+ void (*bs_wm_2) __P((void *, bus_space_handle_t,
+ bus_size_t, const u_int16_t *, bus_size_t));
+ void (*bs_wm_4) __P((void *, bus_space_handle_t,
+ bus_size_t, const u_int32_t *, bus_size_t));
+ void (*bs_wm_8) __P((void *, bus_space_handle_t,
+ bus_size_t, const u_int64_t *, bus_size_t));
+
+ /* write region */
+ void (*bs_wr_1) __P((void *, bus_space_handle_t,
+ bus_size_t, const u_int8_t *, bus_size_t));
+ void (*bs_wr_2) __P((void *, bus_space_handle_t,
+ bus_size_t, const u_int16_t *, bus_size_t));
+ void (*bs_wr_4) __P((void *, bus_space_handle_t,
+ bus_size_t, const u_int32_t *, bus_size_t));
+ void (*bs_wr_8) __P((void *, bus_space_handle_t,
+ bus_size_t, const u_int64_t *, bus_size_t));
+
+ /* set multiple */
+ void (*bs_sm_1) __P((void *, bus_space_handle_t,
+ bus_size_t, u_int8_t, bus_size_t));
+ void (*bs_sm_2) __P((void *, bus_space_handle_t,
+ bus_size_t, u_int16_t, bus_size_t));
+ void (*bs_sm_4) __P((void *, bus_space_handle_t,
+ bus_size_t, u_int32_t, bus_size_t));
+ void (*bs_sm_8) __P((void *, bus_space_handle_t,
+ bus_size_t, u_int64_t, bus_size_t));
+
+ /* set region */
+ void (*bs_sr_1) __P((void *, bus_space_handle_t,
+ bus_size_t, u_int8_t, bus_size_t));
+ void (*bs_sr_2) __P((void *, bus_space_handle_t,
+ bus_size_t, u_int16_t, bus_size_t));
+ void (*bs_sr_4) __P((void *, bus_space_handle_t,
+ bus_size_t, u_int32_t, bus_size_t));
+ void (*bs_sr_8) __P((void *, bus_space_handle_t,
+ bus_size_t, u_int64_t, bus_size_t));
+
+ /* copy */
+ void (*bs_c_1) __P((void *, bus_space_handle_t, bus_size_t,
+ bus_space_handle_t, bus_size_t, bus_size_t));
+ void (*bs_c_2) __P((void *, bus_space_handle_t, bus_size_t,
+ bus_space_handle_t, bus_size_t, bus_size_t));
+ void (*bs_c_4) __P((void *, bus_space_handle_t, bus_size_t,
+ bus_space_handle_t, bus_size_t, bus_size_t));
+ void (*bs_c_8) __P((void *, bus_space_handle_t, bus_size_t,
+ bus_space_handle_t, bus_size_t, bus_size_t));
+
+#ifdef __BUS_SPACE_HAS_STREAM_METHODS
+ /* read stream (single) */
+ u_int8_t (*bs_r_1_s) __P((void *, bus_space_handle_t,
+ bus_size_t));
+ u_int16_t (*bs_r_2_s) __P((void *, bus_space_handle_t,
+ bus_size_t));
+ u_int32_t (*bs_r_4_s) __P((void *, bus_space_handle_t,
+ bus_size_t));
+ u_int64_t (*bs_r_8_s) __P((void *, bus_space_handle_t,
+ bus_size_t));
+
+ /* read multiple stream */
+ void (*bs_rm_1_s) __P((void *, bus_space_handle_t,
+ bus_size_t, u_int8_t *, bus_size_t));
+ void (*bs_rm_2_s) __P((void *, bus_space_handle_t,
+ bus_size_t, u_int16_t *, bus_size_t));
+ void (*bs_rm_4_s) __P((void *, bus_space_handle_t,
+ bus_size_t, u_int32_t *, bus_size_t));
+ void (*bs_rm_8_s) __P((void *, bus_space_handle_t,
+ bus_size_t, u_int64_t *, bus_size_t));
+
+ /* read region stream */
+ void (*bs_rr_1_s) __P((void *, bus_space_handle_t,
+ bus_size_t, u_int8_t *, bus_size_t));
+ void (*bs_rr_2_s) __P((void *, bus_space_handle_t,
+ bus_size_t, u_int16_t *, bus_size_t));
+ void (*bs_rr_4_s) __P((void *, bus_space_handle_t,
+ bus_size_t, u_int32_t *, bus_size_t));
+ void (*bs_rr_8_s) __P((void *, bus_space_handle_t,
+ bus_size_t, u_int64_t *, bus_size_t));
+
+ /* write stream (single) */
+ void (*bs_w_1_s) __P((void *, bus_space_handle_t,
+ bus_size_t, u_int8_t));
+ void (*bs_w_2_s) __P((void *, bus_space_handle_t,
+ bus_size_t, u_int16_t));
+ void (*bs_w_4_s) __P((void *, bus_space_handle_t,
+ bus_size_t, u_int32_t));
+ void (*bs_w_8_s) __P((void *, bus_space_handle_t,
+ bus_size_t, u_int64_t));
+
+ /* write multiple stream */
+ void (*bs_wm_1_s) __P((void *, bus_space_handle_t,
+ bus_size_t, const u_int8_t *, bus_size_t));
+ void (*bs_wm_2_s) __P((void *, bus_space_handle_t,
+ bus_size_t, const u_int16_t *, bus_size_t));
+ void (*bs_wm_4_s) __P((void *, bus_space_handle_t,
+ bus_size_t, const u_int32_t *, bus_size_t));
+ void (*bs_wm_8_s) __P((void *, bus_space_handle_t,
+ bus_size_t, const u_int64_t *, bus_size_t));
+
+ /* write region stream */
+ void (*bs_wr_1_s) __P((void *, bus_space_handle_t,
+ bus_size_t, const u_int8_t *, bus_size_t));
+ void (*bs_wr_2_s) __P((void *, bus_space_handle_t,
+ bus_size_t, const u_int16_t *, bus_size_t));
+ void (*bs_wr_4_s) __P((void *, bus_space_handle_t,
+ bus_size_t, const u_int32_t *, bus_size_t));
+ void (*bs_wr_8_s) __P((void *, bus_space_handle_t,
+ bus_size_t, const u_int64_t *, bus_size_t));
+#endif /* __BUS_SPACE_HAS_STREAM_METHODS */
+};
+
+
+/*
+ * Utility macros; INTERNAL USE ONLY.
+ */
+#define __bs_c(a,b) __CONCAT(a,b)
+#define __bs_opname(op,size) __bs_c(__bs_c(__bs_c(bs_,op),_),size)
+
+#define __bs_rs(sz, t, h, o) \
+ (*(t)->__bs_opname(r,sz))((t)->bs_cookie, h, o)
+#define __bs_ws(sz, t, h, o, v) \
+ (*(t)->__bs_opname(w,sz))((t)->bs_cookie, h, o, v)
+#define __bs_nonsingle(type, sz, t, h, o, a, c) \
+ (*(t)->__bs_opname(type,sz))((t)->bs_cookie, h, o, a, c)
+#define __bs_set(type, sz, t, h, o, v, c) \
+ (*(t)->__bs_opname(type,sz))((t)->bs_cookie, h, o, v, c)
+#define __bs_copy(sz, t, h1, o1, h2, o2, cnt) \
+ (*(t)->__bs_opname(c,sz))((t)->bs_cookie, h1, o1, h2, o2, cnt)
+
+#ifdef __BUS_SPACE_HAS_STREAM_METHODS
+#define __bs_opname_s(op,size) __bs_c(__bs_c(__bs_c(__bs_c(bs_,op),_),size),_s)
+#define __bs_rs_s(sz, t, h, o) \
+ (*(t)->__bs_opname_s(r,sz))((t)->bs_cookie, h, o)
+#define __bs_ws_s(sz, t, h, o, v) \
+ (*(t)->__bs_opname_s(w,sz))((t)->bs_cookie, h, o, v)
+#define __bs_nonsingle_s(type, sz, t, h, o, a, c) \
+ (*(t)->__bs_opname_s(type,sz))((t)->bs_cookie, h, o, a, c)
+#define __bs_set_s(type, sz, t, h, o, v, c) \
+ (*(t)->__bs_opname_s(type,sz))((t)->bs_cookie, h, o, v, c)
+#define __bs_copy_s(sz, t, h1, o1, h2, o2, cnt) \
+ (*(t)->__bs_opname_s(c,sz))((t)->bs_cookie, h1, o1, h2, o2, cnt)
+#endif
+
+/*
+ * Mapping and unmapping operations.
+ */
+#define bus_space_map(t, a, s, c, hp) \
+ (*(t)->bs_map)((t)->bs_cookie, (a), (s), (c), (hp))
+#define bus_space_unmap(t, h, s) \
+ (*(t)->bs_unmap)((t)->bs_cookie, (h), (s))
+#define bus_space_subregion(t, h, o, s, hp) \
+ (*(t)->bs_subregion)((t)->bs_cookie, (h), (o), (s), (hp))
+
+
+/*
+ * Allocation and deallocation operations.
+ */
+#define bus_space_alloc(t, rs, re, s, a, b, c, ap, hp) \
+ (*(t)->bs_alloc)((t)->bs_cookie, (rs), (re), (s), (a), (b), \
+ (c), (ap), (hp))
+#define bus_space_free(t, h, s) \
+ (*(t)->bs_free)((t)->bs_cookie, (h), (s))
+
+/*
+ * Get kernel virtual address for ranges mapped BUS_SPACE_MAP_LINEAR.
+ */
+#define bus_space_vaddr(t, h) \
+ (*(t)->bs_vaddr)((t)->bs_cookie, (h))
+
+/*
+ * MMap bus space for a user application.
+ */
+#define bus_space_mmap(t, a, o, p, f) \
+ (*(t)->bs_mmap)((t)->bs_cookie, (a), (o), (p), (f))
+
+/*
+ * Bus barrier operations.
+ */
+#define bus_space_barrier(t, h, o, l, f) \
+ (*(t)->bs_barrier)((t)->bs_cookie, (h), (o), (l), (f))
+
+#define BUS_SPACE_BARRIER_READ 0x01
+#define BUS_SPACE_BARRIER_WRITE 0x02
+
+/*
+ * Bus read (single) operations.
+ */
+#define bus_space_read_1(t, h, o) __bs_rs(1,(t),(h),(o))
+#define bus_space_read_2(t, h, o) __bs_rs(2,(t),(h),(o))
+#define bus_space_read_4(t, h, o) __bs_rs(4,(t),(h),(o))
+#define bus_space_read_8(t, h, o) __bs_rs(8,(t),(h),(o))
+#ifdef __BUS_SPACE_HAS_STREAM_METHODS
+#define bus_space_read_stream_1(t, h, o) __bs_rs_s(1,(t),(h),(o))
+#define bus_space_read_stream_2(t, h, o) __bs_rs_s(2,(t),(h),(o))
+#define bus_space_read_stream_4(t, h, o) __bs_rs_s(4,(t),(h),(o))
+#define bus_space_read_stream_8(t, h, o) __bs_rs_s(8,(t),(h),(o))
+#endif
+
+
+/*
+ * Bus read multiple operations.
+ */
+#define bus_space_read_multi_1(t, h, o, a, c) \
+ __bs_nonsingle(rm,1,(t),(h),(o),(a),(c))
+#define bus_space_read_multi_2(t, h, o, a, c) \
+ __bs_nonsingle(rm,2,(t),(h),(o),(a),(c))
+#define bus_space_read_multi_4(t, h, o, a, c) \
+ __bs_nonsingle(rm,4,(t),(h),(o),(a),(c))
+#define bus_space_read_multi_8(t, h, o, a, c) \
+ __bs_nonsingle(rm,8,(t),(h),(o),(a),(c))
+#ifdef __BUS_SPACE_HAS_STREAM_METHODS
+#define bus_space_read_multi_stream_1(t, h, o, a, c) \
+ __bs_nonsingle_s(rm,1,(t),(h),(o),(a),(c))
+#define bus_space_read_multi_stream_2(t, h, o, a, c) \
+ __bs_nonsingle_s(rm,2,(t),(h),(o),(a),(c))
+#define bus_space_read_multi_stream_4(t, h, o, a, c) \
+ __bs_nonsingle_s(rm,4,(t),(h),(o),(a),(c))
+#define bus_space_read_multi_stream_8(t, h, o, a, c) \
+ __bs_nonsingle_s(rm,8,(t),(h),(o),(a),(c))
+#endif
+
+
+/*
+ * Bus read region operations.
+ */
+#define bus_space_read_region_1(t, h, o, a, c) \
+ __bs_nonsingle(rr,1,(t),(h),(o),(a),(c))
+#define bus_space_read_region_2(t, h, o, a, c) \
+ __bs_nonsingle(rr,2,(t),(h),(o),(a),(c))
+#define bus_space_read_region_4(t, h, o, a, c) \
+ __bs_nonsingle(rr,4,(t),(h),(o),(a),(c))
+#define bus_space_read_region_8(t, h, o, a, c) \
+ __bs_nonsingle(rr,8,(t),(h),(o),(a),(c))
+#ifdef __BUS_SPACE_HAS_STREAM_METHODS
+#define bus_space_read_region_stream_1(t, h, o, a, c) \
+ __bs_nonsingle_s(rr,1,(t),(h),(o),(a),(c))
+#define bus_space_read_region_stream_2(t, h, o, a, c) \
+ __bs_nonsingle_s(rr,2,(t),(h),(o),(a),(c))
+#define bus_space_read_region_stream_4(t, h, o, a, c) \
+ __bs_nonsingle_s(rr,4,(t),(h),(o),(a),(c))
+#define bus_space_read_region_stream_8(t, h, o, a, c) \
+ __bs_nonsingle_s(rr,8,(t),(h),(o),(a),(c))
+#endif
+
+
+/*
+ * Bus write (single) operations.
+ */
+#define bus_space_write_1(t, h, o, v) __bs_ws(1,(t),(h),(o),(v))
+#define bus_space_write_2(t, h, o, v) __bs_ws(2,(t),(h),(o),(v))
+#define bus_space_write_4(t, h, o, v) __bs_ws(4,(t),(h),(o),(v))
+#define bus_space_write_8(t, h, o, v) __bs_ws(8,(t),(h),(o),(v))
+#ifdef __BUS_SPACE_HAS_STREAM_METHODS
+#define bus_space_write_stream_1(t, h, o, v) __bs_ws_s(1,(t),(h),(o),(v))
+#define bus_space_write_stream_2(t, h, o, v) __bs_ws_s(2,(t),(h),(o),(v))
+#define bus_space_write_stream_4(t, h, o, v) __bs_ws_s(4,(t),(h),(o),(v))
+#define bus_space_write_stream_8(t, h, o, v) __bs_ws_s(8,(t),(h),(o),(v))
+#endif
+
+
+/*
+ * Bus write multiple operations.
+ */
+#define bus_space_write_multi_1(t, h, o, a, c) \
+ __bs_nonsingle(wm,1,(t),(h),(o),(a),(c))
+#define bus_space_write_multi_2(t, h, o, a, c) \
+ __bs_nonsingle(wm,2,(t),(h),(o),(a),(c))
+#define bus_space_write_multi_4(t, h, o, a, c) \
+ __bs_nonsingle(wm,4,(t),(h),(o),(a),(c))
+#define bus_space_write_multi_8(t, h, o, a, c) \
+ __bs_nonsingle(wm,8,(t),(h),(o),(a),(c))
+#ifdef __BUS_SPACE_HAS_STREAM_METHODS
+#define bus_space_write_multi_stream_1(t, h, o, a, c) \
+ __bs_nonsingle_s(wm,1,(t),(h),(o),(a),(c))
+#define bus_space_write_multi_stream_2(t, h, o, a, c) \
+ __bs_nonsingle_s(wm,2,(t),(h),(o),(a),(c))
+#define bus_space_write_multi_stream_4(t, h, o, a, c) \
+ __bs_nonsingle_s(wm,4,(t),(h),(o),(a),(c))
+#define bus_space_write_multi_stream_8(t, h, o, a, c) \
+ __bs_nonsingle_s(wm,8,(t),(h),(o),(a),(c))
+#endif
+
+
+/*
+ * Bus write region operations.
+ */
+#define bus_space_write_region_1(t, h, o, a, c) \
+ __bs_nonsingle(wr,1,(t),(h),(o),(a),(c))
+#define bus_space_write_region_2(t, h, o, a, c) \
+ __bs_nonsingle(wr,2,(t),(h),(o),(a),(c))
+#define bus_space_write_region_4(t, h, o, a, c) \
+ __bs_nonsingle(wr,4,(t),(h),(o),(a),(c))
+#define bus_space_write_region_8(t, h, o, a, c) \
+ __bs_nonsingle(wr,8,(t),(h),(o),(a),(c))
+#ifdef __BUS_SPACE_HAS_STREAM_METHODS
+#define bus_space_write_region_stream_1(t, h, o, a, c) \
+ __bs_nonsingle_s(wr,1,(t),(h),(o),(a),(c))
+#define bus_space_write_region_stream_2(t, h, o, a, c) \
+ __bs_nonsingle_s(wr,2,(t),(h),(o),(a),(c))
+#define bus_space_write_region_stream_4(t, h, o, a, c) \
+ __bs_nonsingle_s(wr,4,(t),(h),(o),(a),(c))
+#define bus_space_write_region_stream_8(t, h, o, a, c) \
+ __bs_nonsingle_s(wr,8,(t),(h),(o),(a),(c))
+#endif
+
+
+/*
+ * Set multiple operations.
+ */
+#define bus_space_set_multi_1(t, h, o, v, c) \
+ __bs_set(sm,1,(t),(h),(o),(v),(c))
+#define bus_space_set_multi_2(t, h, o, v, c) \
+ __bs_set(sm,2,(t),(h),(o),(v),(c))
+#define bus_space_set_multi_4(t, h, o, v, c) \
+ __bs_set(sm,4,(t),(h),(o),(v),(c))
+#define bus_space_set_multi_8(t, h, o, v, c) \
+ __bs_set(sm,8,(t),(h),(o),(v),(c))
+
+/*
+ * Set region operations.
+ */
+#define bus_space_set_region_1(t, h, o, v, c) \
+ __bs_set(sr,1,(t),(h),(o),(v),(c))
+#define bus_space_set_region_2(t, h, o, v, c) \
+ __bs_set(sr,2,(t),(h),(o),(v),(c))
+#define bus_space_set_region_4(t, h, o, v, c) \
+ __bs_set(sr,4,(t),(h),(o),(v),(c))
+#define bus_space_set_region_8(t, h, o, v, c) \
+ __bs_set(sr,8,(t),(h),(o),(v),(c))
+
+/*
+ * Copy operations.
+ */
+#define bus_space_copy_region_1(t, h1, o1, h2, o2, c) \
+ __bs_copy(1, t, h1, o1, h2, o2, c)
+#define bus_space_copy_region_2(t, h1, o1, h2, o2, c) \
+ __bs_copy(2, t, h1, o1, h2, o2, c)
+#define bus_space_copy_region_4(t, h1, o1, h2, o2, c) \
+ __bs_copy(4, t, h1, o1, h2, o2, c)
+#define bus_space_copy_region_8(t, h1, o1, h2, o2, c) \
+ __bs_copy(8, t, h1, o1, h2, o2, c)
+
+void
+bus_space_copy_1(bus_space_tag_t bst, bus_space_handle_t h1,
+ bus_space_handle_t h2, bus_size_t o1, bus_size_t o2, bus_size_t c);
+void
+bus_space_copy_2(bus_space_tag_t bst, bus_space_handle_t h1,
+ bus_space_handle_t h2, bus_size_t o1, bus_size_t o2, bus_size_t c);
+void
+bus_space_copy_4(bus_space_tag_t bst, bus_space_handle_t h1,
+ bus_space_handle_t h2, bus_size_t o1, bus_size_t o2, bus_size_t c);
+#define bus_space_copy_8 \
+ !!! bus_space_write_raw_multi_8 not implemented !!!
+
+/*
+ * Macros to provide prototypes for all the functions used in the
+ * bus_space structure
+ */
+
+#define bs_map_proto(f) \
+int __bs_c(f,_bs_map) __P((void *t, bus_addr_t addr, \
+ bus_size_t size, int cacheable, bus_space_handle_t *bshp));
+
+#define bs_unmap_proto(f) \
+void __bs_c(f,_bs_unmap) __P((void *t, bus_space_handle_t bsh, \
+ bus_size_t size));
+
+#define bs_subregion_proto(f) \
+int __bs_c(f,_bs_subregion) __P((void *t, bus_space_handle_t bsh, \
+ bus_size_t offset, bus_size_t size, \
+ bus_space_handle_t *nbshp));
+
+#define bs_alloc_proto(f) \
+int __bs_c(f,_bs_alloc) __P((void *t, bus_addr_t rstart, \
+ bus_addr_t rend, bus_size_t size, bus_size_t align, \
+ bus_size_t boundary, int cacheable, bus_addr_t *addrp, \
+ bus_space_handle_t *bshp));
+
+#define bs_free_proto(f) \
+void __bs_c(f,_bs_free) __P((void *t, bus_space_handle_t bsh, \
+ bus_size_t size));
+
+#define bs_vaddr_proto(f) \
+void * __bs_c(f,_bs_vaddr) __P((void *t, bus_space_handle_t bsh));
+
+#define bs_mmap_proto(f) \
+paddr_t __bs_c(f,_bs_mmap) __P((void *, bus_addr_t, off_t, int, int));
+
+#define bs_barrier_proto(f) \
+void __bs_c(f,_bs_barrier) __P((void *t, bus_space_handle_t bsh, \
+ bus_size_t offset, bus_size_t len, int flags));
+
+#define bs_r_1_proto(f) \
+u_int8_t __bs_c(f,_bs_r_1) __P((void *t, bus_space_handle_t bsh, \
+ bus_size_t offset));
+
+#define bs_r_2_proto(f) \
+u_int16_t __bs_c(f,_bs_r_2) __P((void *t, bus_space_handle_t bsh, \
+ bus_size_t offset));
+
+#define bs_r_4_proto(f) \
+u_int32_t __bs_c(f,_bs_r_4) __P((void *t, bus_space_handle_t bsh, \
+ bus_size_t offset));
+
+#define bs_r_8_proto(f) \
+u_int64_t __bs_c(f,_bs_r_8) __P((void *t, bus_space_handle_t bsh, \
+ bus_size_t offset));
+
+#define bs_w_1_proto(f) \
+void __bs_c(f,_bs_w_1) __P((void *t, bus_space_handle_t bsh, \
+ bus_size_t offset, u_int8_t value));
+
+#define bs_w_2_proto(f) \
+void __bs_c(f,_bs_w_2) __P((void *t, bus_space_handle_t bsh, \
+ bus_size_t offset, u_int16_t value));
+
+#define bs_w_4_proto(f) \
+void __bs_c(f,_bs_w_4) __P((void *t, bus_space_handle_t bsh, \
+ bus_size_t offset, u_int32_t value));
+
+#define bs_w_8_proto(f) \
+void __bs_c(f,_bs_w_8) __P((void *t, bus_space_handle_t bsh, \
+ bus_size_t offset, u_int64_t value));
+
+#define bs_rm_1_proto(f) \
+void __bs_c(f,_bs_rm_1) __P((void *t, bus_space_handle_t bsh, \
+ bus_size_t offset, u_int8_t *addr, bus_size_t count));
+
+#define bs_rm_2_proto(f) \
+void __bs_c(f,_bs_rm_2) __P((void *t, bus_space_handle_t bsh, \
+ bus_size_t offset, u_int16_t *addr, bus_size_t count));
+
+#define bs_rm_4_proto(f) \
+void __bs_c(f,_bs_rm_4) __P((void *t, bus_space_handle_t bsh, \
+ bus_size_t offset, u_int32_t *addr, bus_size_t count));
+
+#define bs_rm_8_proto(f) \
+void __bs_c(f,_bs_rm_8) __P((void *t, bus_space_handle_t bsh, \
+ bus_size_t offset, u_int64_t *addr, bus_size_t count));
+
+#define bs_wm_1_proto(f) \
+void __bs_c(f,_bs_wm_1) __P((void *t, bus_space_handle_t bsh, \
+ bus_size_t offset, const u_int8_t *addr, bus_size_t count));
+
+#define bs_wm_2_proto(f) \
+void __bs_c(f,_bs_wm_2) __P((void *t, bus_space_handle_t bsh, \
+ bus_size_t offset, const u_int16_t *addr, bus_size_t count));
+
+#define bs_wm_4_proto(f) \
+void __bs_c(f,_bs_wm_4) __P((void *t, bus_space_handle_t bsh, \
+ bus_size_t offset, const u_int32_t *addr, bus_size_t count));
+
+#define bs_wm_8_proto(f) \
+void __bs_c(f,_bs_wm_8) __P((void *t, bus_space_handle_t bsh, \
+ bus_size_t offset, const u_int64_t *addr, bus_size_t count));
+
+#define bs_rr_1_proto(f) \
+void __bs_c(f, _bs_rr_1) __P((void *t, bus_space_handle_t bsh, \
+ bus_size_t offset, u_int8_t *addr, bus_size_t count));
+
+#define bs_rr_2_proto(f) \
+void __bs_c(f, _bs_rr_2) __P((void *t, bus_space_handle_t bsh, \
+ bus_size_t offset, u_int16_t *addr, bus_size_t count));
+
+#define bs_rr_4_proto(f) \
+void __bs_c(f, _bs_rr_4) __P((void *t, bus_space_handle_t bsh, \
+ bus_size_t offset, u_int32_t *addr, bus_size_t count));
+
+#define bs_rr_8_proto(f) \
+void __bs_c(f, _bs_rr_8) __P((void *t, bus_space_handle_t bsh, \
+ bus_size_t offset, u_int64_t *addr, bus_size_t count));
+
+#define bs_wr_1_proto(f) \
+void __bs_c(f, _bs_wr_1) __P((void *t, bus_space_handle_t bsh, \
+ bus_size_t offset, const u_int8_t *addr, bus_size_t count));
+
+#define bs_wr_2_proto(f) \
+void __bs_c(f, _bs_wr_2) __P((void *t, bus_space_handle_t bsh, \
+ bus_size_t offset, const u_int16_t *addr, bus_size_t count));
+
+#define bs_wr_4_proto(f) \
+void __bs_c(f, _bs_wr_4) __P((void *t, bus_space_handle_t bsh, \
+ bus_size_t offset, const u_int32_t *addr, bus_size_t count));
+
+#define bs_wr_8_proto(f) \
+void __bs_c(f, _bs_wr_8) __P((void *t, bus_space_handle_t bsh, \
+ bus_size_t offset, const u_int64_t *addr, bus_size_t count));
+
+#define bs_sm_1_proto(f) \
+void __bs_c(f,_bs_sm_1) __P((void *t, bus_space_handle_t bsh, \
+ bus_size_t offset, u_int8_t value, bus_size_t count));
+
+#define bs_sm_2_proto(f) \
+void __bs_c(f,_bs_sm_2) __P((void *t, bus_space_handle_t bsh, \
+ bus_size_t offset, u_int16_t value, bus_size_t count));
+
+#define bs_sm_4_proto(f) \
+void __bs_c(f,_bs_sm_4) __P((void *t, bus_space_handle_t bsh, \
+ bus_size_t offset, u_int32_t value, bus_size_t count));
+
+#define bs_sm_8_proto(f) \
+void __bs_c(f,_bs_sm_8) __P((void *t, bus_space_handle_t bsh, \
+ bus_size_t offset, u_int64_t value, bus_size_t count));
+
+#define bs_sr_1_proto(f) \
+void __bs_c(f,_bs_sr_1) __P((void *t, bus_space_handle_t bsh, \
+ bus_size_t offset, u_int8_t value, bus_size_t count));
+
+#define bs_sr_2_proto(f) \
+void __bs_c(f,_bs_sr_2) __P((void *t, bus_space_handle_t bsh, \
+ bus_size_t offset, u_int16_t value, bus_size_t count));
+
+#define bs_sr_4_proto(f) \
+void __bs_c(f,_bs_sr_4) __P((void *t, bus_space_handle_t bsh, \
+ bus_size_t offset, u_int32_t value, bus_size_t count));
+
+#define bs_sr_8_proto(f) \
+void __bs_c(f,_bs_sr_8) __P((void *t, bus_space_handle_t bsh, \
+ bus_size_t offset, u_int64_t value, bus_size_t count));
+
+#define bs_c_1_proto(f) \
+void __bs_c(f,_bs_c_1) __P((void *t, bus_space_handle_t bsh1, \
+ bus_size_t offset1, bus_space_handle_t bsh2, \
+ bus_size_t offset2, bus_size_t count));
+
+#define bs_c_2_proto(f) \
+void __bs_c(f,_bs_c_2) __P((void *t, bus_space_handle_t bsh1, \
+ bus_size_t offset1, bus_space_handle_t bsh2, \
+ bus_size_t offset2, bus_size_t count));
+
+#define bs_c_4_proto(f) \
+void __bs_c(f,_bs_c_4) __P((void *t, bus_space_handle_t bsh1, \
+ bus_size_t offset1, bus_space_handle_t bsh2, \
+ bus_size_t offset2, bus_size_t count));
+
+#define bs_c_8_proto(f) \
+void __bs_c(f,_bs_c_8) __P((void *t, bus_space_handle_t bsh1, \
+ bus_size_t offset1, bus_space_handle_t bsh2, \
+ bus_size_t offset2, bus_size_t count));
+
+#define bs_protos(f) \
+bs_map_proto(f); \
+bs_unmap_proto(f); \
+bs_subregion_proto(f); \
+bs_alloc_proto(f); \
+bs_free_proto(f); \
+bs_vaddr_proto(f); \
+bs_mmap_proto(f); \
+bs_barrier_proto(f); \
+bs_r_1_proto(f); \
+bs_r_2_proto(f); \
+bs_r_4_proto(f); \
+bs_r_8_proto(f); \
+bs_w_1_proto(f); \
+bs_w_2_proto(f); \
+bs_w_4_proto(f); \
+bs_w_8_proto(f); \
+bs_rm_1_proto(f); \
+bs_rm_2_proto(f); \
+bs_rm_4_proto(f); \
+bs_rm_8_proto(f); \
+bs_wm_1_proto(f); \
+bs_wm_2_proto(f); \
+bs_wm_4_proto(f); \
+bs_wm_8_proto(f); \
+bs_rr_1_proto(f); \
+bs_rr_2_proto(f); \
+bs_rr_4_proto(f); \
+bs_rr_8_proto(f); \
+bs_wr_1_proto(f); \
+bs_wr_2_proto(f); \
+bs_wr_4_proto(f); \
+bs_wr_8_proto(f); \
+bs_sm_1_proto(f); \
+bs_sm_2_proto(f); \
+bs_sm_4_proto(f); \
+bs_sm_8_proto(f); \
+bs_sr_1_proto(f); \
+bs_sr_2_proto(f); \
+bs_sr_4_proto(f); \
+bs_sr_8_proto(f); \
+bs_c_1_proto(f); \
+bs_c_2_proto(f); \
+bs_c_4_proto(f); \
+bs_c_8_proto(f);
+
+#define BUS_SPACE_ALIGNED_POINTER(p, t) ALIGNED_POINTER(p, t)
+
+/* Bus Space DMA macros */
+
+/*
+ * Flags used in various bus DMA methods.
+ */
+#define BUS_DMA_WAITOK 0x000 /* safe to sleep (pseudo-flag) */
+#define BUS_DMA_NOWAIT 0x001 /* not safe to sleep */
+#define BUS_DMA_ALLOCNOW 0x002 /* perform resource allocation now */
+#define BUS_DMA_COHERENT 0x004 /* hint: map memory DMA coherent */
+#define BUS_DMA_STREAMING 0x008 /* hint: sequential, unidirectional */
+#define BUS_DMA_BUS1 0x010 /* placeholders for bus functions... */
+#define BUS_DMA_BUS2 0x020
+#define BUS_DMA_BUS3 0x040
+#define BUS_DMA_BUS4 0x080
+#define BUS_DMA_READ 0x100 /* mapping is device -> memory only */
+#define BUS_DMA_WRITE 0x200 /* mapping is memory -> device only */
+#define BUS_DMA_NOCACHE 0x400 /* hint: map non-cached memory */
+
+/*
+ * Private flags stored in the DMA map.
+ */
+#define ARM32_DMAMAP_COHERENT 0x10000 /* no cache flush necessary on sync */
+
+/* Forwards needed by prototypes below. */
+struct mbuf;
+struct uio;
+
+/*
+ * Operations performed by bus_dmamap_sync().
+ */
+#define BUS_DMASYNC_PREREAD 0x01 /* pre-read synchronization */
+#define BUS_DMASYNC_POSTREAD 0x02 /* post-read synchronization */
+#define BUS_DMASYNC_PREWRITE 0x04 /* pre-write synchronization */
+#define BUS_DMASYNC_POSTWRITE 0x08 /* post-write synchronization */
+
+typedef struct arm32_bus_dma_tag *bus_dma_tag_t;
+typedef struct arm32_bus_dmamap *bus_dmamap_t;
+
+#define BUS_DMA_TAG_VALID(t) ((t) != (bus_dma_tag_t)0)
+
+/*
+ * bus_dma_segment_t
+ *
+ * Describes a single contiguous DMA transaction. Values
+ * are suitable for programming into DMA registers.
+ */
+struct arm32_bus_dma_segment {
+ /*
+ * PUBLIC MEMBERS: these are used by machine-independent code.
+ */
+ bus_addr_t ds_addr; /* DMA address */
+ bus_size_t ds_len; /* length of transfer */
+ /*
+ * PRIVATE MEMBERS: not for use by machine-independent code.
+ */
+ bus_addr_t _ds_vaddr; /* Virtual mapped address
+ * Used by bus_dmamem_sync() */
+};
+typedef struct arm32_bus_dma_segment bus_dma_segment_t;
+
+/*
+ * arm32_dma_range
+ *
+ * This structure describes a valid DMA range.
+ */
+struct arm32_dma_range {
+ bus_addr_t dr_sysbase; /* system base address */
+ bus_addr_t dr_busbase; /* appears here on bus */
+ bus_size_t dr_len; /* length of range */
+};
+
+/*
+ * bus_dma_tag_t
+ *
+ * A machine-dependent opaque type describing the implementation of
+ * DMA for a given bus.
+ */
+
+struct arm32_bus_dma_tag {
+ /*
+ * DMA range for this tag. If the page doesn't fall within
+ * one of these ranges, an error is returned. The caller
+ * may then decide what to do with the transfer. If the
+ * range pointer is NULL, it is ignored.
+ */
+ struct arm32_dma_range *_ranges;
+ int _nranges;
+
+ /*
+ * Opaque cookie for use by back-end.
+ */
+ void *_cookie;
+
+ /*
+ * DMA mapping methods.
+ */
+ int (*_dmamap_create) __P((bus_dma_tag_t, bus_size_t, int,
+ bus_size_t, bus_size_t, int, bus_dmamap_t *));
+ void (*_dmamap_destroy) __P((bus_dma_tag_t, bus_dmamap_t));
+ int (*_dmamap_load) __P((bus_dma_tag_t, bus_dmamap_t, void *,
+ bus_size_t, struct proc *, int));
+ int (*_dmamap_load_mbuf) __P((bus_dma_tag_t, bus_dmamap_t,
+ struct mbuf *, int));
+ int (*_dmamap_load_uio) __P((bus_dma_tag_t, bus_dmamap_t,
+ struct uio *, int));
+ int (*_dmamap_load_raw) __P((bus_dma_tag_t, bus_dmamap_t,
+ bus_dma_segment_t *, int, bus_size_t, int));
+ void (*_dmamap_unload) __P((bus_dma_tag_t, bus_dmamap_t));
+ void (*_dmamap_sync) __P((bus_dma_tag_t, bus_dmamap_t,
+ bus_addr_t, bus_size_t, int));
+
+ /*
+ * DMA memory utility functions.
+ */
+ int (*_dmamem_alloc) __P((bus_dma_tag_t, bus_size_t, bus_size_t,
+ bus_size_t, bus_dma_segment_t *, int, int *, int));
+ void (*_dmamem_free) __P((bus_dma_tag_t,
+ bus_dma_segment_t *, int));
+ int (*_dmamem_map) __P((bus_dma_tag_t, bus_dma_segment_t *,
+ int, size_t, caddr_t *, int));
+ void (*_dmamem_unmap) __P((bus_dma_tag_t, caddr_t, size_t));
+ paddr_t (*_dmamem_mmap) __P((bus_dma_tag_t, bus_dma_segment_t *,
+ int, off_t, int, int));
+};
+
+#define bus_dmamap_create(t, s, n, m, b, f, p) \
+ (*(t)->_dmamap_create)((t), (s), (n), (m), (b), (f), (p))
+#define bus_dmamap_destroy(t, p) \
+ (*(t)->_dmamap_destroy)((t), (p))
+#define bus_dmamap_load(t, m, b, s, p, f) \
+ (*(t)->_dmamap_load)((t), (m), (b), (s), (p), (f))
+#define bus_dmamap_load_mbuf(t, m, b, f) \
+ (*(t)->_dmamap_load_mbuf)((t), (m), (b), (f))
+#define bus_dmamap_load_uio(t, m, u, f) \
+ (*(t)->_dmamap_load_uio)((t), (m), (u), (f))
+#define bus_dmamap_load_raw(t, m, sg, n, s, f) \
+ (*(t)->_dmamap_load_raw)((t), (m), (sg), (n), (s), (f))
+#define bus_dmamap_unload(t, p) \
+ (*(t)->_dmamap_unload)((t), (p))
+#define bus_dmamap_sync(t, p, o, l, ops) \
+ (void)((t)->_dmamap_sync ? \
+ (*(t)->_dmamap_sync)((t), (p), (o), (l), (ops)) : (void)0)
+
+#define bus_dmamem_alloc(t, s, a, b, sg, n, r, f) \
+ (*(t)->_dmamem_alloc)((t), (s), (a), (b), (sg), (n), (r), (f))
+#define bus_dmamem_free(t, sg, n) \
+ (*(t)->_dmamem_free)((t), (sg), (n))
+#define bus_dmamem_map(t, sg, n, s, k, f) \
+ (*(t)->_dmamem_map)((t), (sg), (n), (s), (k), (f))
+#define bus_dmamem_unmap(t, k, s) \
+ (*(t)->_dmamem_unmap)((t), (k), (s))
+#define bus_dmamem_mmap(t, sg, n, o, p, f) \
+ (*(t)->_dmamem_mmap)((t), (sg), (n), (o), (p), (f))
+
+/*
+ * bus_dmamap_t
+ *
+ * Describes a DMA mapping.
+ */
+struct arm32_bus_dmamap {
+ /*
+ * PRIVATE MEMBERS: not for use by machine-independent code.
+ */
+ bus_size_t _dm_size; /* largest DMA transfer mappable */
+ int _dm_segcnt; /* number of segs this map can map */
+ bus_size_t _dm_maxsegsz; /* largest possible segment */
+ bus_size_t _dm_boundary; /* don't cross this */
+ int _dm_flags; /* misc. flags */
+
+ void *_dm_origbuf; /* pointer to original buffer */
+ int _dm_buftype; /* type of buffer */
+ struct proc *_dm_proc; /* proc that owns the mapping */
+
+ void *_dm_cookie; /* cookie for bus-specific functions */
+
+ /*
+ * PUBLIC MEMBERS: these are used by machine-independent code.
+ */
+ bus_size_t dm_mapsize; /* size of the mapping */
+ int dm_nsegs; /* # valid segments in mapping */
+ bus_dma_segment_t dm_segs[1]; /* segments; variable length */
+};
+
+#ifdef _ARM32_BUS_DMA_PRIVATE
+
+/* _dm_buftype */
+#define ARM32_BUFTYPE_INVALID 0
+#define ARM32_BUFTYPE_LINEAR 1
+#define ARM32_BUFTYPE_MBUF 2
+#define ARM32_BUFTYPE_UIO 3
+#define ARM32_BUFTYPE_RAW 4
+
+int arm32_dma_range_intersect(struct arm32_dma_range *, int,
+ paddr_t pa, psize_t size, paddr_t *pap, psize_t *sizep);
+
+int _bus_dmamap_create __P((bus_dma_tag_t, bus_size_t, int, bus_size_t,
+ bus_size_t, int, bus_dmamap_t *));
+void _bus_dmamap_destroy __P((bus_dma_tag_t, bus_dmamap_t));
+int _bus_dmamap_load __P((bus_dma_tag_t, bus_dmamap_t, void *,
+ bus_size_t, struct proc *, int));
+int _bus_dmamap_load_mbuf __P((bus_dma_tag_t, bus_dmamap_t,
+ struct mbuf *, int));
+int _bus_dmamap_load_uio __P((bus_dma_tag_t, bus_dmamap_t,
+ struct uio *, int));
+int _bus_dmamap_load_raw __P((bus_dma_tag_t, bus_dmamap_t,
+ bus_dma_segment_t *, int, bus_size_t, int));
+void _bus_dmamap_unload __P((bus_dma_tag_t, bus_dmamap_t));
+void _bus_dmamap_sync __P((bus_dma_tag_t, bus_dmamap_t, bus_addr_t,
+ bus_size_t, int));
+
+int _bus_dmamem_alloc __P((bus_dma_tag_t tag, bus_size_t size,
+ bus_size_t alignment, bus_size_t boundary,
+ bus_dma_segment_t *segs, int nsegs, int *rsegs, int flags));
+void _bus_dmamem_free __P((bus_dma_tag_t tag, bus_dma_segment_t *segs,
+ int nsegs));
+int _bus_dmamem_map __P((bus_dma_tag_t tag, bus_dma_segment_t *segs,
+ int nsegs, size_t size, caddr_t *kvap, int flags));
+void _bus_dmamem_unmap __P((bus_dma_tag_t tag, caddr_t kva,
+ size_t size));
+paddr_t _bus_dmamem_mmap __P((bus_dma_tag_t tag, bus_dma_segment_t *segs,
+ int nsegs, off_t off, int prot, int flags));
+
+int _bus_dmamem_alloc_range __P((bus_dma_tag_t tag, bus_size_t size,
+ bus_size_t alignment, bus_size_t boundary,
+ bus_dma_segment_t *segs, int nsegs, int *rsegs, int flags,
+ vaddr_t low, vaddr_t high));
+#endif /* _ARM32_BUS_DMA_PRIVATE */
+/* These are OpenBSD extensions to the general NetBSD bus interface. */
+#if 0
+void
+bus_space_read_raw_multi_2(bus_space_tag_t bst, bus_space_handle_t bsh,
+ bus_addr_t ba, u_int8_t *dst, bus_size_t size);
+void
+bus_space_read_raw_multi_4(bus_space_tag_t bst, bus_space_handle_t bsh,
+ bus_addr_t ba, u_int8_t *dst, bus_size_t size);
+#define bus_space_read_raw_multi_8 \
+ !!! bus_space_read_raw_multi_8 not implemented !!!
+
+void
+bus_space_write_raw_multi_2(bus_space_tag_t bst, bus_space_handle_t bsh,
+ bus_addr_t ba, const u_int8_t *src, bus_size_t size);
+void
+bus_space_write_raw_multi_4(bus_space_tag_t bst, bus_space_handle_t bsh,
+ bus_addr_t ba, const u_int8_t *src, bus_size_t size);
+#define bus_space_write_raw_multi_8 \
+ !!! bus_space_write_raw_multi_8 not implemented !!!
+
+#else
+/* BLECH XXXDSR */
+static inline void
+bus_space_read_raw_multi_2(bus_space_tag_t bst, bus_space_handle_t bsh,
+ bus_addr_t ba, u_int8_t *dst, bus_size_t size);
+static inline void
+bus_space_read_raw_multi_2(bus_space_tag_t bst, bus_space_handle_t bsh,
+ bus_addr_t ba, u_int8_t *dst, bus_size_t size)
+{
+ u_int16_t *datap = (u_int16_t *)dst;
+ while (size > 0) {
+ *datap =bus_space_read_2(bst, bsh, ba);
+ datap++;
+ size -= 2;
+ }
+}
+static inline void
+bus_space_read_raw_multi_4(bus_space_tag_t bst, bus_space_handle_t bsh,
+ bus_addr_t ba, u_int8_t *dst, bus_size_t size);
+static inline void
+bus_space_read_raw_multi_4(bus_space_tag_t bst, bus_space_handle_t bsh,
+ bus_addr_t ba, u_int8_t *dst, bus_size_t size)
+{
+ u_int32_t *datap = (u_int32_t *)dst;
+ while (size > 0) {
+ *datap =bus_space_read_4(bst, bsh, ba);
+ datap++;
+ size -= 4;
+ }
+}
+
+static inline void
+bus_space_write_raw_multi_2(bus_space_tag_t bst, bus_space_handle_t bsh,
+ bus_addr_t ba, const u_int8_t *src, bus_size_t size);
+static inline void
+bus_space_write_raw_multi_2(bus_space_tag_t bst, bus_space_handle_t bsh,
+ bus_addr_t ba, const u_int8_t *src, bus_size_t size)
+{
+ u_int16_t *datap = (u_int16_t *)src;
+ while (size > 0) {
+ bus_space_write_2(bst, bsh, ba, *datap);
+ datap++;
+ size -= 2;
+ }
+}
+static inline void
+bus_space_write_raw_multi_4(bus_space_tag_t bst, bus_space_handle_t bsh,
+ bus_addr_t ba, const u_int8_t *src, bus_size_t size);
+static inline void
+bus_space_write_raw_multi_4(bus_space_tag_t bst, bus_space_handle_t bsh,
+ bus_addr_t ba, const u_int8_t *src, bus_size_t size)
+{
+ u_int32_t *datap = (u_int32_t *)src;
+ while (size > 0) {
+ bus_space_write_4(bst, bsh, ba, *datap);
+ datap++;
+ size -= 4;
+ }
+}
+#define bus_space_write_raw_multi_8 \
+ !!! bus_space_write_raw_multi_8 not implemented !!!
+
+#endif
+
+#endif /* _ARM32_BUS_H_ */
diff --git a/sys/arch/arm/include/cdefs.h b/sys/arch/arm/include/cdefs.h
new file mode 100644
index 00000000000..ddb29ed5939
--- /dev/null
+++ b/sys/arch/arm/include/cdefs.h
@@ -0,0 +1,13 @@
+/* $OpenBSD: cdefs.h,v 1.1 2004/02/01 05:09:49 drahn Exp $ */
+/* $NetBSD: cdefs.h,v 1.1 2001/01/10 19:02:05 bjh21 Exp $ */
+
+#ifndef _MACHINE_CDEFS_H_
+#define _MACHINE_CDEFS_H_
+
+#define __weak_alias(alias,sym) \
+ __asm__(".weak " __STRING(alias) " ; " __STRING(alias) " = " __STRING(sym))
+#define __warn_references(sym,msg) \
+ __asm__(".section .gnu.warning." __STRING(sym) " ; .ascii \"" msg "\" ; .text")
+
+
+#endif /* !_MACHINE_CDEFS_H_ */
diff --git a/sys/arch/arm/include/conf.h b/sys/arch/arm/include/conf.h
new file mode 100644
index 00000000000..0f7dac4128a
--- /dev/null
+++ b/sys/arch/arm/include/conf.h
@@ -0,0 +1,134 @@
+/* $OpenBSD: conf.h,v 1.1 2004/02/01 05:09:49 drahn Exp $ */
+/* $NetBSD: conf.h,v 1.7 2002/04/19 01:04:39 wiz Exp $ */
+
+/*
+ * Copyright (c) 1997 Mark Brinicombe.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by Mark Brinicombe
+ * for the NetBSD Project.
+ * 4. The name of the company nor the name of the author may be used to
+ * endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * RiscBSD kernel project
+ *
+ * conf.h
+ *
+ * Prototypes for device driver functions
+ */
+
+#ifndef _ARM_CONF_H
+#define _ARM_CONF_H
+
+
+#include <sys/conf.h>
+
+#define mmread mmrw
+#define mmwrite mmrw
+cdev_decl(mm);
+
+bdev_decl(wd);
+cdev_decl(wd);
+bdev_decl(sw);
+cdev_decl(sw);
+bdev_decl(fd);
+cdev_decl(fd);
+bdev_decl(rd);
+cdev_decl(rd);
+bdev_decl(raid);
+cdev_decl(raid);
+
+/* Character device declarations */
+
+/* open, close, read, write, ioctl, tty, mmap -- XXX should be a tty */
+#define cdev_physcon_init(c,n) cdev__ttym_init(c,n,0)
+
+/* open, close, ioctl */
+#define cdev_beep_init(c,n) cdev__oci_init(c,n)
+
+/* open, close, read, ioctl */
+#define cdev_kbd_init(c,n) cdev__ocri_init(c,n)
+
+/* open, close, ioctl, mmap */
+#define cdev_vidcvid_init(c,n) { \
+ dev_init(c,n,open), dev_init(c,n,close), dev_noimpl(read,enodev), \
+ dev_noimpl(write,enodev), dev_init(c,n,ioctl), \
+ dev_noimpl(stop,enodev), 0, seltrue, dev_init(c,n,mmap), 0 }
+
+/* open, close, read, write, ioctl */
+#define cdev_iic_init(c,n) cdev__ocrwi_init(c,n)
+#define cdev_rtc_init(c,n) cdev__ocrwi_init(c,n)
+
+/* open, close, read, ioctl */
+#define cdev_prof_init(c,n) cdev__ocri_init(c,n)
+
+cdev_decl(physcon);
+cdev_decl(vidcconsole);
+cdev_decl(biconsdev);
+cdev_decl(com);
+cdev_decl(lpt);
+cdev_decl(qms);
+cdev_decl(opms);
+cdev_decl(beep);
+cdev_decl(kbd);
+cdev_decl(iic);
+cdev_decl(rtc);
+cdev_decl(fcom);
+cdev_decl(pc);
+cdev_decl(ofcons_);
+cdev_decl(ofd);
+cdev_decl(ofrtc);
+cdev_decl(sacom);
+cdev_decl(scr);
+cdev_decl(prof);
+#define ofromread ofromrw
+#define ofromwrite ofromrw
+cdev_decl(ofrom);
+cdev_decl(joy);
+cdev_decl(usb);
+cdev_decl(uhid);
+cdev_decl(ugen);
+cdev_decl(ulpt);
+cdev_decl(ucom);
+cdev_decl(urio);
+cdev_decl(uscanner);
+cdev_decl(vc_nb_);
+cdev_decl(wsdisplay);
+cdev_decl(wsfont);
+cdev_decl(wskbd);
+cdev_decl(wsmouse);
+cdev_decl(wsmux);
+cdev_decl(scsibus);
+cdev_decl(openfirm);
+cdev_decl(pci);
+cdev_decl(agp);
+cdev_decl(iop);
+cdev_decl(ld);
+cdev_decl(mlx);
+cdev_decl(mly);
+cdev_decl(plcom);
+
+#endif /* _ARM_CONF_H_ */
diff --git a/sys/arch/arm/include/cpu.h b/sys/arch/arm/include/cpu.h
new file mode 100644
index 00000000000..22b999ed8fb
--- /dev/null
+++ b/sys/arch/arm/include/cpu.h
@@ -0,0 +1,302 @@
+/* $OpenBSD: cpu.h,v 1.1 2004/02/01 05:09:49 drahn Exp $ */
+/* $NetBSD: cpu.h,v 1.34 2003/06/23 11:01:08 martin Exp $ */
+
+/*
+ * Copyright (c) 1994-1996 Mark Brinicombe.
+ * Copyright (c) 1994 Brini.
+ * All rights reserved.
+ *
+ * This code is derived from software written for Brini by Mark Brinicombe
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by Brini.
+ * 4. The name of the company nor the name of the author may be used to
+ * endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY BRINI ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL BRINI OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * RiscBSD kernel project
+ *
+ * cpu.h
+ *
+ * CPU specific symbols
+ *
+ * Created : 18/09/94
+ *
+ * Based on kate/katelib/arm6.h
+ */
+
+#ifndef _ARM_CPU_H_
+#define _ARM_CPU_H_
+
+/*
+ * User-visible definitions
+ */
+
+/* CTL_MACHDEP definitions. */
+#define CPU_DEBUG 1 /* int: misc kernel debug control */
+#define CPU_BOOTED_DEVICE 2 /* string: device we booted from */
+#define CPU_BOOTED_KERNEL 3 /* string: kernel we booted */
+#define CPU_CONSDEV 4 /* struct: dev_t of our console */
+#define CPU_POWERSAVE 5 /* int: use CPU powersave mode */
+#define CPU_MAXID 6 /* number of valid machdep ids */
+
+#define CTL_MACHDEP_NAMES { \
+ { 0, 0 }, \
+ { "debug", CTLTYPE_INT }, \
+ { "booted_device", CTLTYPE_STRING }, \
+ { "booted_kernel", CTLTYPE_STRING }, \
+ { "console_device", CTLTYPE_STRUCT }, \
+ { "powersave", CTLTYPE_INT }, \
+}
+
+#ifdef _KERNEL
+
+/*
+ * Kernel-only definitions
+ */
+
+#include <arm/cpuconf.h>
+
+#include <machine/intr.h>
+#ifndef _LOCORE
+#if 0
+#include <sys/user.h>
+#endif
+#include <machine/frame.h>
+#include <machine/pcb.h>
+#endif /* !_LOCORE */
+
+#include <arm/armreg.h>
+
+#ifndef _LOCORE
+/* 1 == use cpu_sleep(), 0 == don't */
+extern int cpu_do_powersave;
+#endif
+
+#ifdef __PROG32
+#ifdef _LOCORE
+#define IRQdisable \
+ stmfd sp!, {r0} ; \
+ mrs r0, cpsr ; \
+ orr r0, r0, #(I32_bit) ; \
+ msr cpsr_c, r0 ; \
+ ldmfd sp!, {r0}
+
+#define IRQenable \
+ stmfd sp!, {r0} ; \
+ mrs r0, cpsr ; \
+ bic r0, r0, #(I32_bit) ; \
+ msr cpsr_c, r0 ; \
+ ldmfd sp!, {r0}
+
+#else
+#define IRQdisable __set_cpsr_c(I32_bit, I32_bit);
+#define IRQenable __set_cpsr_c(I32_bit, 0);
+#endif /* _LOCORE */
+#endif
+
+#ifndef _LOCORE
+
+/* All the CLKF_* macros take a struct clockframe * as an argument. */
+
+/*
+ * CLKF_USERMODE: Return TRUE/FALSE (1/0) depending on whether the
+ * frame came from USR mode or not.
+ */
+#ifdef __PROG32
+#define CLKF_USERMODE(frame) ((frame->if_spsr & PSR_MODE) == PSR_USR32_MODE)
+#else
+#define CLKF_USERMODE(frame) ((frame->if_r15 & R15_MODE) == R15_MODE_USR)
+#endif
+
+/*
+ * CLKF_BASEPRI: True if we were at spl0 before the interrupt.
+ *
+ * This is hard-wired to 0 on the ARM, since spllowersoftclock() might
+ * not actually be able to unblock the interrupt, which would cause us
+ * to run the softclock interrupts with hardclock blocked.
+ */
+#define CLKF_BASEPRI(frame) 0
+
+/*
+ * CLKF_INTR: True if we took the interrupt from inside another
+ * interrupt handler.
+ */
+extern int current_intr_depth;
+#ifdef __PROG32
+/* Hack to treat FPE time as interrupt time so we can measure it */
+#define CLKF_INTR(frame) \
+ ((current_intr_depth > 1) || \
+ (frame->if_spsr & PSR_MODE) == PSR_UND32_MODE)
+#else
+#define CLKF_INTR(frame) (current_intr_depth > 1)
+#endif
+
+/*
+ * CLKF_PC: Extract the program counter from a clockframe
+ */
+#ifdef __PROG32
+#define CLKF_PC(frame) (frame->if_pc)
+#else
+#define CLKF_PC(frame) (frame->if_r15 & R15_PC)
+#endif
+
+/*
+ * PROC_PC: Find out the program counter for the given process.
+ */
+#ifdef __PROG32
+#define PROC_PC(p) ((p)->p_addr->u_pcb.pcb_tf->tf_pc)
+#else
+#define PROC_PC(p) ((p)->p_addr->u_pcb.pcb_tf->tf_r15 & R15_PC)
+#endif
+
+/* The address of the vector page. */
+extern vaddr_t vector_page;
+#ifdef __PROG32
+void arm32_vector_init(vaddr_t, int);
+
+#define ARM_VEC_RESET (1 << 0)
+#define ARM_VEC_UNDEFINED (1 << 1)
+#define ARM_VEC_SWI (1 << 2)
+#define ARM_VEC_PREFETCH_ABORT (1 << 3)
+#define ARM_VEC_DATA_ABORT (1 << 4)
+#define ARM_VEC_ADDRESS_EXCEPTION (1 << 5)
+#define ARM_VEC_IRQ (1 << 6)
+#define ARM_VEC_FIQ (1 << 7)
+
+#define ARM_NVEC 8
+#define ARM_VEC_ALL 0xffffffff
+#endif
+
+/*
+ * Per-CPU information. For now we assume one CPU.
+ */
+
+#include <sys/device.h>
+/*
+#include <sys/sched.h>
+*/
+struct cpu_info {
+#if 0
+ struct schedstate_percpu ci_schedstate; /* scheduler state */
+#endif
+#if defined(DIAGNOSTIC) || defined(LOCKDEBUG)
+ u_long ci_spin_locks; /* # of spin locks held */
+ u_long ci_simple_locks; /* # of simple locks held */
+#endif
+ struct device *ci_dev; /* Device corresponding to this CPU */
+ u_int32_t ci_arm_cpuid; /* aggregate CPU id */
+ u_int32_t ci_arm_cputype; /* CPU type */
+ u_int32_t ci_arm_cpurev; /* CPU revision */
+ u_int32_t ci_ctrl; /* The CPU control register */
+ struct evcnt ci_arm700bugcount;
+#ifdef MULTIPROCESSOR
+ MP_CPU_INFO_MEMBERS
+#endif
+};
+
+#ifndef MULTIPROCESSOR
+extern struct cpu_info cpu_info_store;
+#define curcpu() (&cpu_info_store)
+#define cpu_number() 0
+#endif
+
+#ifdef __PROG32
+void cpu_proc_fork(struct proc *, struct proc *);
+#else
+#define cpu_proc_fork(p1, p2)
+#endif
+
+/*
+ * Scheduling glue
+ */
+
+extern int astpending;
+#define setsoftast() (astpending = 1)
+
+/*
+ * Notify the current process (p) that it has a signal pending,
+ * process as soon as possible.
+ */
+
+#define signotify(p) setsoftast()
+
+#define cpu_wait(p) /* nothing */
+
+/*
+ * Preempt the current process if in interrupt from user mode,
+ * or after the current trap/syscall if in system mode.
+ */
+extern int want_resched; /* resched() was called */
+#define need_resched(ci) (want_resched = 1, setsoftast())
+
+/*
+ * Give a profiling tick to the current process when the user profiling
+ * buffer pages are invalid. On the i386, request an ast to send us
+ * through trap(), marking the proc as needing a profiling tick.
+ */
+#define need_proftick(p) ((p)->p_flag |= P_OWEUPC, setsoftast())
+
+#ifndef acorn26
+/*
+ * cpu device glue (belongs in cpuvar.h)
+ */
+
+struct device;
+void cpu_attach __P((struct device *));
+int cpu_alloc_idlepcb __P((struct cpu_info *));
+#endif
+
+
+/*
+ * Random cruft
+ */
+
+/* locore.S */
+void atomic_set_bit __P((u_int *address, u_int setmask));
+void atomic_clear_bit __P((u_int *address, u_int clearmask));
+
+/* cpuswitch.S */
+struct pcb;
+void savectx __P((struct pcb *pcb));
+
+/* ast.c */
+void userret __P((register struct proc *p));
+
+/* machdep.h */
+void bootsync __P((void));
+
+/* fault.c */
+int badaddr_read __P((void *, size_t, void *));
+
+/* syscall.c */
+void swi_handler __P((trapframe_t *));
+
+#endif /* !_LOCORE */
+
+#endif /* _KERNEL */
+
+#endif /* !_ARM_CPU_H_ */
+
+/* End of cpu.h */
diff --git a/sys/arch/arm/include/cpuconf.h b/sys/arch/arm/include/cpuconf.h
new file mode 100644
index 00000000000..8ccd375e18e
--- /dev/null
+++ b/sys/arch/arm/include/cpuconf.h
@@ -0,0 +1,177 @@
+/* $OpenBSD: cpuconf.h,v 1.1 2004/02/01 05:09:49 drahn Exp $ */
+/* $NetBSD: cpuconf.h,v 1.7 2003/05/23 00:57:24 ichiro Exp $ */
+
+/*
+ * Copyright (c) 2002 Wasabi Systems, Inc.
+ * All rights reserved.
+ *
+ * Written by Jason R. Thorpe for Wasabi Systems, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed for the NetBSD Project by
+ * Wasabi Systems, Inc.
+ * 4. The name of Wasabi Systems, Inc. may not be used to endorse
+ * or promote products derived from this software without specific prior
+ * written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _ARM_CPUCONF_H_
+#define _ARM_CPUCONF_H_
+
+#if defined(_KERNEL_OPT)
+#include "opt_cputypes.h"
+#endif /* _KERNEL_OPT */
+
+/*
+ * IF YOU CHANGE THIS FILE, MAKE SURE TO UPDATE THE DEFINITION OF
+ * "PMAP_NEEDS_PTE_SYNC" IN <arm/arm/pmap.h> FOR THE CPU TYPE
+ * YOU ARE ADDING SUPPORT FOR.
+ */
+
+/*
+ * Step 1: Count the number of CPU types configured into the kernel.
+ */
+#if defined(_KERNEL_OPT)
+#define CPU_NTYPES (defined(CPU_ARM2) + defined(CPU_ARM250) + \
+ defined(CPU_ARM3) + \
+ defined(CPU_ARM6) + defined(CPU_ARM7) + \
+ defined(CPU_ARM7TDMI) + \
+ defined(CPU_ARM8) + defined(CPU_ARM9) + \
+ defined(CPU_ARM10) + \
+ defined(CPU_SA110) + defined(CPU_SA1100) + \
+ defined(CPU_SA1110) + \
+ defined(CPU_IXP12X0) + \
+ defined(CPU_XSCALE_80200) + \
+ defined(CPU_XSCALE_80321) + \
+ defined(CPU_XSCALE_PXA2X0) + \
+ defined(CPU_XSCALE_IXP425))
+#else
+#define CPU_NTYPES 2
+#endif /* _KERNEL_OPT */
+
+/*
+ * Step 2: Determine which ARM architecture versions are configured.
+ */
+#if !defined(_KERNEL_OPT) || \
+ (defined(CPU_ARM2) || defined(CPU_ARM250) || defined(CPU_ARM3))
+#define ARM_ARCH_2 1
+#else
+#define ARM_ARCH_2 0
+#endif
+
+#if !defined(_KERNEL_OPT) || \
+ (defined(CPU_ARM6) || defined(CPU_ARM7))
+#define ARM_ARCH_3 1
+#else
+#define ARM_ARCH_3 0
+#endif
+
+#if !defined(_KERNEL_OPT) || \
+ (defined(CPU_ARM7TDMI) || defined(CPU_ARM8) || defined(CPU_ARM9) || \
+ defined(CPU_ARM10) || defined(CPU_SA110) || defined(CPU_SA1100) || \
+ defined(CPU_SA1110) || defined(CPU_IXP12X0) || defined(CPU_XSCALE_IXP425))
+#define ARM_ARCH_4 1
+#else
+#define ARM_ARCH_4 0
+#endif
+
+#if !defined(_KERNEL_OPT) || \
+ (defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321) || \
+ defined(CPU_XSCALE_PXA2X0))
+#define ARM_ARCH_5 1
+#else
+#define ARM_ARCH_5 0
+#endif
+
+#define ARM_NARCH (ARM_ARCH_2 + ARM_ARCH_3 + ARM_ARCH_4 + ARM_ARCH_5)
+#if ARM_NARCH == 0
+#error ARM_NARCH is 0
+#endif
+
+/*
+ * Step 3: Define which MMU classes are configured:
+ *
+ * ARM_MMU_MEMC Prehistoric, external memory controller
+ * and MMU for ARMv2 CPUs.
+ *
+ * ARM_MMU_GENERIC Generic ARM MMU, compatible with ARM6.
+ *
+ * ARM_MMU_SA1 StrongARM SA-1 MMU. Compatible with generic
+ * ARM MMU, but has no write-through cache mode.
+ *
+ * ARM_MMU_XSCALE XScale MMU. Compatible with generic ARM
+ * MMU, but also has several extensions which
+ * require different PTE layout to use.
+ */
+#if !defined(_KERNEL_OPT) || \
+ (defined(CPU_ARM2) || defined(CPU_ARM250) || defined(CPU_ARM3))
+#define ARM_MMU_MEMC 1
+#else
+#define ARM_MMU_MEMC 0
+#endif
+
+#if !defined(_KERNEL_OPT) || \
+ (defined(CPU_ARM6) || defined(CPU_ARM7) || defined(CPU_ARM7TDMI) || \
+ defined(CPU_ARM8) || defined(CPU_ARM9) || defined(CPU_ARM10))
+#define ARM_MMU_GENERIC 1
+#else
+#define ARM_MMU_GENERIC 0
+#endif
+
+#if !defined(_KERNEL_OPT) || \
+ (defined(CPU_SA110) || defined(CPU_SA1100) || defined(CPU_SA1110) ||\
+ defined(CPU_IXP12X0))
+#define ARM_MMU_SA1 1
+#else
+#define ARM_MMU_SA1 0
+#endif
+
+#if !defined(_KERNEL_OPT) || \
+ (defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321) || \
+ defined(CPU_XSCALE_PXA2X0) || defined(CPU_XSCALE_IXP425))
+#define ARM_MMU_XSCALE 1
+#else
+#define ARM_MMU_XSCALE 0
+#endif
+
+#define ARM_NMMUS (ARM_MMU_MEMC + ARM_MMU_GENERIC + \
+ ARM_MMU_SA1 + ARM_MMU_XSCALE)
+#if ARM_NMMUS == 0
+#error ARM_NMMUS is 0
+#endif
+
+/*
+ * Step 4: Define features that may be present on a subset of CPUs
+ *
+ * ARM_XSCALE_PMU Performance Monitoring Unit on 80200 and 80321
+ */
+
+#if !defined(_KERNEL_OPT) || \
+ (defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321))
+#define ARM_XSCALE_PMU 1
+#else
+#define ARM_XSCALE_PMU 0
+#endif
+
+#endif /* _ARM_CPUCONF_H_ */
diff --git a/sys/arch/arm/include/cpufunc.h b/sys/arch/arm/include/cpufunc.h
new file mode 100644
index 00000000000..f732dfe03c2
--- /dev/null
+++ b/sys/arch/arm/include/cpufunc.h
@@ -0,0 +1,524 @@
+/* $OpenBSD: cpufunc.h,v 1.1 2004/02/01 05:09:49 drahn Exp $ */
+/* $NetBSD: cpufunc.h,v 1.29 2003/09/06 09:08:35 rearnsha Exp $ */
+
+/*
+ * Copyright (c) 1997 Mark Brinicombe.
+ * Copyright (c) 1997 Causality Limited
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by Causality Limited.
+ * 4. The name of Causality Limited may not be used to endorse or promote
+ * products derived from this software without specific prior written
+ * permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY CAUSALITY LIMITED ``AS IS'' AND ANY EXPRESS
+ * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL CAUSALITY LIMITED BE LIABLE FOR ANY DIRECT,
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * RiscBSD kernel project
+ *
+ * cpufunc.h
+ *
+ * Prototypes for cpu, mmu and tlb related functions.
+ */
+
+#ifndef _ARM32_CPUFUNC_H_
+#define _ARM32_CPUFUNC_H_
+
+#ifdef _KERNEL
+
+#include <sys/types.h>
+#include <arm/cpuconf.h>
+
+struct cpu_functions {
+
+ /* CPU functions */
+
+ u_int (*cf_id) __P((void));
+ void (*cf_cpwait) __P((void));
+
+ /* MMU functions */
+
+ u_int (*cf_control) __P((u_int bic, u_int eor));
+ void (*cf_domains) __P((u_int domains));
+ void (*cf_setttb) __P((u_int ttb));
+ u_int (*cf_faultstatus) __P((void));
+ u_int (*cf_faultaddress) __P((void));
+
+ /* TLB functions */
+
+ void (*cf_tlb_flushID) __P((void));
+ void (*cf_tlb_flushID_SE) __P((u_int va));
+ void (*cf_tlb_flushI) __P((void));
+ void (*cf_tlb_flushI_SE) __P((u_int va));
+ void (*cf_tlb_flushD) __P((void));
+ void (*cf_tlb_flushD_SE) __P((u_int va));
+
+ /*
+ * Cache operations:
+ *
+ * We define the following primitives:
+ *
+ * icache_sync_all Synchronize I-cache
+ * icache_sync_range Synchronize I-cache range
+ *
+ * dcache_wbinv_all Write-back and Invalidate D-cache
+ * dcache_wbinv_range Write-back and Invalidate D-cache range
+ * dcache_inv_range Invalidate D-cache range
+ * dcache_wb_range Write-back D-cache range
+ *
+ * idcache_wbinv_all Write-back and Invalidate D-cache,
+ * Invalidate I-cache
+ * idcache_wbinv_range Write-back and Invalidate D-cache,
+ * Invalidate I-cache range
+ *
+ * Note that the ARM term for "write-back" is "clean". We use
+ * the term "write-back" since it's a more common way to describe
+ * the operation.
+ *
+ * There are some rules that must be followed:
+ *
+ * I-cache Synch (all or range):
+ * The goal is to synchronize the instruction stream,
+ * so you may beed to write-back dirty D-cache blocks
+ * first. If a range is requested, and you can't
+ * synchronize just a range, you have to hit the whole
+ * thing.
+ *
+ * D-cache Write-Back and Invalidate range:
+ * If you can't WB-Inv a range, you must WB-Inv the
+ * entire D-cache.
+ *
+ * D-cache Invalidate:
+ * If you can't Inv the D-cache, you must Write-Back
+ * and Invalidate. Code that uses this operation
+ * MUST NOT assume that the D-cache will not be written
+ * back to memory.
+ *
+ * D-cache Write-Back:
+ * If you can't Write-back without doing an Inv,
+ * that's fine. Then treat this as a WB-Inv.
+ * Skipping the invalidate is merely an optimization.
+ *
+ * All operations:
+ * Valid virtual addresses must be passed to each
+ * cache operation.
+ */
+ void (*cf_icache_sync_all) __P((void));
+ void (*cf_icache_sync_range) __P((vaddr_t, vsize_t));
+
+ void (*cf_dcache_wbinv_all) __P((void));
+ void (*cf_dcache_wbinv_range) __P((vaddr_t, vsize_t));
+ void (*cf_dcache_inv_range) __P((vaddr_t, vsize_t));
+ void (*cf_dcache_wb_range) __P((vaddr_t, vsize_t));
+
+ void (*cf_idcache_wbinv_all) __P((void));
+ void (*cf_idcache_wbinv_range) __P((vaddr_t, vsize_t));
+
+ /* Other functions */
+
+ void (*cf_flush_prefetchbuf) __P((void));
+ void (*cf_drain_writebuf) __P((void));
+ void (*cf_flush_brnchtgt_C) __P((void));
+ void (*cf_flush_brnchtgt_E) __P((u_int va));
+
+ void (*cf_sleep) __P((int mode));
+
+ /* Soft functions */
+
+ int (*cf_dataabt_fixup) __P((void *arg));
+ int (*cf_prefetchabt_fixup) __P((void *arg));
+
+ void (*cf_context_switch) __P((void));
+
+ void (*cf_setup) __P((char *string));
+};
+
+extern struct cpu_functions cpufuncs;
+extern u_int cputype;
+
+#define cpu_id() cpufuncs.cf_id()
+#define cpu_cpwait() cpufuncs.cf_cpwait()
+
+#define cpu_control(c, e) cpufuncs.cf_control(c, e)
+#define cpu_domains(d) cpufuncs.cf_domains(d)
+#define cpu_setttb(t) cpufuncs.cf_setttb(t)
+#define cpu_faultstatus() cpufuncs.cf_faultstatus()
+#define cpu_faultaddress() cpufuncs.cf_faultaddress()
+
+#define cpu_tlb_flushID() cpufuncs.cf_tlb_flushID()
+#define cpu_tlb_flushID_SE(e) cpufuncs.cf_tlb_flushID_SE(e)
+#define cpu_tlb_flushI() cpufuncs.cf_tlb_flushI()
+#define cpu_tlb_flushI_SE(e) cpufuncs.cf_tlb_flushI_SE(e)
+#define cpu_tlb_flushD() cpufuncs.cf_tlb_flushD()
+#define cpu_tlb_flushD_SE(e) cpufuncs.cf_tlb_flushD_SE(e)
+
+#define cpu_icache_sync_all() cpufuncs.cf_icache_sync_all()
+#define cpu_icache_sync_range(a, s) cpufuncs.cf_icache_sync_range((a), (s))
+
+#define cpu_dcache_wbinv_all() cpufuncs.cf_dcache_wbinv_all()
+#define cpu_dcache_wbinv_range(a, s) cpufuncs.cf_dcache_wbinv_range((a), (s))
+#define cpu_dcache_inv_range(a, s) cpufuncs.cf_dcache_inv_range((a), (s))
+#define cpu_dcache_wb_range(a, s) cpufuncs.cf_dcache_wb_range((a), (s))
+
+#define cpu_idcache_wbinv_all() cpufuncs.cf_idcache_wbinv_all()
+#define cpu_idcache_wbinv_range(a, s) cpufuncs.cf_idcache_wbinv_range((a), (s))
+
+#define cpu_flush_prefetchbuf() cpufuncs.cf_flush_prefetchbuf()
+#define cpu_drain_writebuf() cpufuncs.cf_drain_writebuf()
+#define cpu_flush_brnchtgt_C() cpufuncs.cf_flush_brnchtgt_C()
+#define cpu_flush_brnchtgt_E(e) cpufuncs.cf_flush_brnchtgt_E(e)
+
+#define cpu_sleep(m) cpufuncs.cf_sleep(m)
+
+#define cpu_dataabt_fixup(a) cpufuncs.cf_dataabt_fixup(a)
+#define cpu_prefetchabt_fixup(a) cpufuncs.cf_prefetchabt_fixup(a)
+#define ABORT_FIXUP_OK 0 /* fixup succeeded */
+#define ABORT_FIXUP_FAILED 1 /* fixup failed */
+#define ABORT_FIXUP_RETURN 2 /* abort handler should return */
+
+#define cpu_setup(a) cpufuncs.cf_setup(a)
+
+int set_cpufuncs __P((void));
+#define ARCHITECTURE_NOT_PRESENT 1 /* known but not configured */
+#define ARCHITECTURE_NOT_SUPPORTED 2 /* not known */
+
+void cpufunc_nullop __P((void));
+int cpufunc_null_fixup __P((void *));
+int early_abort_fixup __P((void *));
+int late_abort_fixup __P((void *));
+u_int cpufunc_id __P((void));
+u_int cpufunc_control __P((u_int clear, u_int bic));
+void cpufunc_domains __P((u_int domains));
+u_int cpufunc_faultstatus __P((void));
+u_int cpufunc_faultaddress __P((void));
+
+#ifdef CPU_ARM3
+u_int arm3_control __P((u_int clear, u_int bic));
+void arm3_cache_flush __P((void));
+#endif /* CPU_ARM3 */
+
+#if defined(CPU_ARM6) || defined(CPU_ARM7)
+void arm67_setttb __P((u_int ttb));
+void arm67_tlb_flush __P((void));
+void arm67_tlb_purge __P((u_int va));
+void arm67_cache_flush __P((void));
+void arm67_context_switch __P((void));
+#endif /* CPU_ARM6 || CPU_ARM7 */
+
+#ifdef CPU_ARM6
+void arm6_setup __P((char *string));
+#endif /* CPU_ARM6 */
+
+#ifdef CPU_ARM7
+void arm7_setup __P((char *string));
+#endif /* CPU_ARM7 */
+
+#ifdef CPU_ARM7TDMI
+int arm7_dataabt_fixup __P((void *arg));
+void arm7tdmi_setup __P((char *string));
+void arm7tdmi_setttb __P((u_int ttb));
+void arm7tdmi_tlb_flushID __P((void));
+void arm7tdmi_tlb_flushID_SE __P((u_int va));
+void arm7tdmi_cache_flushID __P((void));
+void arm7tdmi_context_switch __P((void));
+#endif /* CPU_ARM7TDMI */
+
+#ifdef CPU_ARM8
+void arm8_setttb __P((u_int ttb));
+void arm8_tlb_flushID __P((void));
+void arm8_tlb_flushID_SE __P((u_int va));
+void arm8_cache_flushID __P((void));
+void arm8_cache_flushID_E __P((u_int entry));
+void arm8_cache_cleanID __P((void));
+void arm8_cache_cleanID_E __P((u_int entry));
+void arm8_cache_purgeID __P((void));
+void arm8_cache_purgeID_E __P((u_int entry));
+
+void arm8_cache_syncI __P((void));
+void arm8_cache_cleanID_rng __P((vaddr_t start, vsize_t end));
+void arm8_cache_cleanD_rng __P((vaddr_t start, vsize_t end));
+void arm8_cache_purgeID_rng __P((vaddr_t start, vsize_t end));
+void arm8_cache_purgeD_rng __P((vaddr_t start, vsize_t end));
+void arm8_cache_syncI_rng __P((vaddr_t start, vsize_t end));
+
+void arm8_context_switch __P((void));
+
+void arm8_setup __P((char *string));
+
+u_int arm8_clock_config __P((u_int, u_int));
+#endif
+
+#ifdef CPU_SA110
+void sa110_setup __P((char *string));
+void sa110_context_switch __P((void));
+#endif /* CPU_SA110 */
+
+#if defined(CPU_SA1100) || defined(CPU_SA1110)
+void sa11x0_drain_readbuf __P((void));
+
+void sa11x0_context_switch __P((void));
+void sa11x0_cpu_sleep __P((int mode));
+
+void sa11x0_setup __P((char *string));
+#endif
+
+#if defined(CPU_SA110) || defined(CPU_SA1100) || defined(CPU_SA1110)
+void sa1_setttb __P((u_int ttb));
+
+void sa1_tlb_flushID_SE __P((u_int va));
+
+void sa1_cache_flushID __P((void));
+void sa1_cache_flushI __P((void));
+void sa1_cache_flushD __P((void));
+void sa1_cache_flushD_SE __P((u_int entry));
+
+void sa1_cache_cleanID __P((void));
+void sa1_cache_cleanD __P((void));
+void sa1_cache_cleanD_E __P((u_int entry));
+
+void sa1_cache_purgeID __P((void));
+void sa1_cache_purgeID_E __P((u_int entry));
+void sa1_cache_purgeD __P((void));
+void sa1_cache_purgeD_E __P((u_int entry));
+
+void sa1_cache_syncI __P((void));
+void sa1_cache_cleanID_rng __P((vaddr_t start, vsize_t end));
+void sa1_cache_cleanD_rng __P((vaddr_t start, vsize_t end));
+void sa1_cache_purgeID_rng __P((vaddr_t start, vsize_t end));
+void sa1_cache_purgeD_rng __P((vaddr_t start, vsize_t end));
+void sa1_cache_syncI_rng __P((vaddr_t start, vsize_t end));
+
+#endif
+
+#ifdef CPU_ARM9
+void arm9_setttb __P((u_int));
+
+void arm9_tlb_flushID_SE __P((u_int va));
+
+void arm9_cache_flushID __P((void));
+void arm9_cache_flushID_SE __P((u_int));
+void arm9_cache_flushI __P((void));
+void arm9_cache_flushI_SE __P((u_int));
+void arm9_cache_flushD __P((void));
+void arm9_cache_flushD_SE __P((u_int));
+
+void arm9_cache_cleanID __P((void));
+
+void arm9_cache_syncI __P((void));
+void arm9_cache_flushID_rng __P((vaddr_t, vsize_t));
+void arm9_cache_flushD_rng __P((vaddr_t, vsize_t));
+void arm9_cache_syncI_rng __P((vaddr_t, vsize_t));
+
+void arm9_context_switch __P((void));
+
+void arm9_setup __P((char *string));
+#endif
+
+#ifdef CPU_ARM10
+void arm10_setttb __P((u_int));
+
+void arm10_tlb_flushID_SE __P((u_int));
+void arm10_tlb_flushI_SE __P((u_int));
+
+void arm10_icache_sync_all __P((void));
+void arm10_icache_sync_range __P((vaddr_t, vsize_t));
+
+void arm10_dcache_wbinv_all __P((void));
+void arm10_dcache_wbinv_range __P((vaddr_t, vsize_t));
+void arm10_dcache_inv_range __P((vaddr_t, vsize_t));
+void arm10_dcache_wb_range __P((vaddr_t, vsize_t));
+
+void arm10_idcache_wbinv_all __P((void));
+void arm10_idcache_wbinv_range __P((vaddr_t, vsize_t));
+
+void arm10_context_switch __P((void));
+
+void arm10_setup __P((char *string));
+
+extern unsigned arm10_dcache_sets_max;
+extern unsigned arm10_dcache_sets_inc;
+extern unsigned arm10_dcache_index_max;
+extern unsigned arm10_dcache_index_inc;
+#endif
+
+#if defined(CPU_ARM9) || defined(CPU_ARM10) || defined(CPU_SA110) || \
+ defined(CPU_SA1100) || defined(CPU_SA1110) || \
+ defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321) || \
+ defined(CPU_XSCALE_PXA2X0) || defined(CPU_XSCALE_IXP425)
+
+void armv4_tlb_flushID __P((void));
+void armv4_tlb_flushI __P((void));
+void armv4_tlb_flushD __P((void));
+void armv4_tlb_flushD_SE __P((u_int va));
+
+void armv4_drain_writebuf __P((void));
+#endif
+
+#if defined(CPU_IXP12X0)
+void ixp12x0_drain_readbuf __P((void));
+void ixp12x0_context_switch __P((void));
+void ixp12x0_setup __P((char *string));
+#endif
+
+#if defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321) || \
+ defined(CPU_XSCALE_PXA2X0) || defined(CPU_XSCALE_IXP425) || \
+ (ARM_MMU_XSCALE == 1)
+void xscale_cpwait __P((void));
+
+void xscale_cpu_sleep __P((int mode));
+
+u_int xscale_control __P((u_int clear, u_int bic));
+
+void xscale_setttb __P((u_int ttb));
+
+void xscale_tlb_flushID_SE __P((u_int va));
+
+void xscale_cache_flushID __P((void));
+void xscale_cache_flushI __P((void));
+void xscale_cache_flushD __P((void));
+void xscale_cache_flushD_SE __P((u_int entry));
+
+void xscale_cache_cleanID __P((void));
+void xscale_cache_cleanD __P((void));
+void xscale_cache_cleanD_E __P((u_int entry));
+
+void xscale_cache_clean_minidata __P((void));
+
+void xscale_cache_purgeID __P((void));
+void xscale_cache_purgeID_E __P((u_int entry));
+void xscale_cache_purgeD __P((void));
+void xscale_cache_purgeD_E __P((u_int entry));
+
+void xscale_cache_syncI __P((void));
+void xscale_cache_cleanID_rng __P((vaddr_t start, vsize_t end));
+void xscale_cache_cleanD_rng __P((vaddr_t start, vsize_t end));
+void xscale_cache_purgeID_rng __P((vaddr_t start, vsize_t end));
+void xscale_cache_purgeD_rng __P((vaddr_t start, vsize_t end));
+void xscale_cache_syncI_rng __P((vaddr_t start, vsize_t end));
+void xscale_cache_flushD_rng __P((vaddr_t start, vsize_t end));
+
+void xscale_context_switch __P((void));
+
+void xscale_setup __P((char *string));
+#endif /* CPU_XSCALE_80200 || CPU_XSCALE_80321 || CPU_XSCALE_PXA2X0 || CPU_XSCALE_IXP425 */
+
+#define tlb_flush cpu_tlb_flushID
+#define setttb cpu_setttb
+#define drain_writebuf cpu_drain_writebuf
+
+/*
+ * Macros for manipulating CPU interrupts
+ */
+#ifdef __PROG32
+static __inline u_int32_t __set_cpsr_c(u_int bic, u_int eor) __attribute__((__unused__));
+
+static __inline u_int32_t
+__set_cpsr_c(u_int bic, u_int eor)
+{
+ u_int32_t tmp, ret;
+
+ __asm __volatile(
+ "mrs %0, cpsr\n" /* Get the CPSR */
+ "bic %1, %0, %2\n" /* Clear bits */
+ "eor %1, %1, %3\n" /* XOR bits */
+ "msr cpsr_c, %1\n" /* Set the control field of CPSR */
+ : "=&r" (ret), "=&r" (tmp)
+ : "r" (bic), "r" (eor));
+
+ return ret;
+}
+
+#define disable_interrupts(mask) \
+ (__set_cpsr_c((mask) & (I32_bit | F32_bit), \
+ (mask) & (I32_bit | F32_bit)))
+
+#define enable_interrupts(mask) \
+ (__set_cpsr_c((mask) & (I32_bit | F32_bit), 0))
+
+#define restore_interrupts(old_cpsr) \
+ (__set_cpsr_c((I32_bit | F32_bit), (old_cpsr) & (I32_bit | F32_bit)))
+#else /* ! __PROG32 */
+#define disable_interrupts(mask) \
+ (set_r15((mask) & (R15_IRQ_DISABLE | R15_FIQ_DISABLE), \
+ (mask) & (R15_IRQ_DISABLE | R15_FIQ_DISABLE)))
+
+#define enable_interrupts(mask) \
+ (set_r15((mask) & (R15_IRQ_DISABLE | R15_FIQ_DISABLE), 0))
+
+#define restore_interrupts(old_r15) \
+ (set_r15((R15_IRQ_DISABLE | R15_FIQ_DISABLE), \
+ (old_r15) & (R15_IRQ_DISABLE | R15_FIQ_DISABLE)))
+#endif /* __PROG32 */
+
+#ifdef __PROG32
+/* Functions to manipulate the CPSR. */
+u_int SetCPSR(u_int bic, u_int eor);
+u_int GetCPSR(void);
+#else
+/* Functions to manipulate the processor control bits in r15. */
+u_int set_r15(u_int bic, u_int eor);
+u_int get_r15(void);
+#endif /* __PROG32 */
+
+/*
+ * Functions to manipulate cpu r13
+ * (in arm/arm/setstack.S)
+ */
+
+void set_stackptr __P((u_int mode, u_int address));
+u_int get_stackptr __P((u_int mode));
+
+/*
+ * Miscellany
+ */
+
+int get_pc_str_offset __P((void));
+
+/*
+ * CPU functions from locore.S
+ */
+
+void cpu_reset __P((void)) __attribute__((__noreturn__));
+
+/*
+ * Cache info variables.
+ */
+
+/* PRIMARY CACHE VARIABLES */
+extern int arm_picache_size;
+extern int arm_picache_line_size;
+extern int arm_picache_ways;
+
+extern int arm_pdcache_size; /* and unified */
+extern int arm_pdcache_line_size;
+extern int arm_pdcache_ways;
+
+extern int arm_pcache_type;
+extern int arm_pcache_unified;
+
+extern int arm_dcache_align;
+extern int arm_dcache_align_mask;
+
+#endif /* _KERNEL */
+#endif /* _ARM32_CPUFUNC_H_ */
+
+/* End of cpufunc.h */
diff --git a/sys/arch/arm/include/db_machdep.h b/sys/arch/arm/include/db_machdep.h
new file mode 100644
index 00000000000..073bd60d296
--- /dev/null
+++ b/sys/arch/arm/include/db_machdep.h
@@ -0,0 +1,117 @@
+/* $OpenBSD: db_machdep.h,v 1.1 2004/02/01 05:09:49 drahn Exp $ */
+/* $NetBSD: db_machdep.h,v 1.5 2001/11/22 18:00:00 thorpej Exp $ */
+
+/*
+ * Copyright (c) 1996 Scott K Stevens
+ *
+ * Mach Operating System
+ * Copyright (c) 1991,1990 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+
+#ifndef _ARM_DB_MACHDEP_H_
+#define _ARM_DB_MACHDEP_H_
+
+/*
+ * Machine-dependent defines for new kernel debugger.
+ */
+
+#include <uvm/uvm_extern.h>
+#include <arm/armreg.h>
+#include <machine/frame.h>
+#include <machine/trap.h>
+
+/* end of mangling */
+
+typedef vaddr_t db_addr_t; /* address - unsigned */
+typedef long db_expr_t; /* expression - signed */
+
+typedef trapframe_t db_regs_t;
+
+extern db_regs_t ddb_regs; /* register state */
+#define DDB_REGS (&ddb_regs)
+
+#ifdef __PROG26
+#define PC_REGS(regs) ((db_addr_t)(regs)->tf_r15 & R15_PC)
+#define PC_ADVANCE(regs) ((regs)->tf_r15 += 4)
+#else
+#define PC_REGS(regs) ((db_addr_t)(regs)->tf_pc)
+#endif
+
+#define BKPT_INST (KERNEL_BREAKPOINT) /* breakpoint instruction */
+#define BKPT_SIZE (INSN_SIZE) /* size of breakpoint inst */
+#define BKPT_SET(inst) (BKPT_INST)
+
+/*#define FIXUP_PC_AFTER_BREAK(regs) ((regs)->tf_pc -= BKPT_SIZE)*/
+
+#define T_BREAKPOINT (1)
+
+#define IS_BREAKPOINT_TRAP(type, code) ((type) == T_BREAKPOINT)
+#define IS_WATCHPOINT_TRAP(type, code) (0)
+
+#define inst_trap_return(ins) (0)
+/* ldmxx reg, {..., pc}
+ 01800000 stack mode
+ 000f0000 register
+ 0000ffff register list */
+/* mov pc, reg
+ 0000000f register */
+#define inst_return(ins) (((ins) & 0x0e108000) == 0x08108000 || \
+ ((ins) & 0x0ff0fff0) == 0x01a0f000)
+/* bl ...
+ 00ffffff offset>>2 */
+#define inst_call(ins) (((ins) & 0x0f000000) == 0x0b000000)
+/* b ...
+ 00ffffff offset>>2 */
+/* ldr pc, [pc, reg, lsl #2]
+ 0000000f register */
+#define inst_branch(ins) (((ins) & 0x0f000000) == 0x0a000000 || \
+ ((ins) & 0x0fdffff0) == 0x079ff100)
+#define inst_load(ins) (0)
+#define inst_store(ins) (0)
+#define inst_unconditional_flow_transfer(ins) \
+ ((((ins) & INSN_COND_MASK) == INSN_COND_AL) && \
+ (inst_branch(ins) || inst_call(ins) || inst_return(ins)))
+
+#define getreg_val (0)
+#define next_instr_address(pc, bd) ((bd) ? (pc) : ((pc) + INSN_SIZE))
+
+#define DB_MACHINE_COMMANDS
+
+#define SOFTWARE_SSTEP
+
+db_addr_t db_branch_taken(u_int inst, db_addr_t pc, db_regs_t *regs);
+int kdb_trap __P((int, db_regs_t *));
+void db_machine_init __P((void));
+
+#define branch_taken(ins, pc, fun, regs) \
+ db_branch_taken((ins), (pc), (regs))
+
+#define DB_ELF_SYMBOLS
+#define DB_ELFSIZE 32
+#define DB_NO_AOUT
+
+void db_show_panic_cmd __P((db_expr_t, int, db_expr_t, char *));
+void db_show_frame_cmd __P((db_expr_t, int, db_expr_t, char *));
+
+#endif /* _ARM_DB_MACHDEP_H_ */
diff --git a/sys/arch/arm/include/disklabel.h b/sys/arch/arm/include/disklabel.h
new file mode 100644
index 00000000000..28db2c8195b
--- /dev/null
+++ b/sys/arch/arm/include/disklabel.h
@@ -0,0 +1,151 @@
+/* $OpenBSD: disklabel.h,v 1.1 2004/02/01 05:09:49 drahn Exp $ */
+/* $OpenBSD: disklabel.h,v 1.1 2004/02/01 05:09:49 drahn Exp $ */
+/* $NetBSD: disklabel.h,v 1.2 2001/11/25 19:02:03 thorpej Exp $ */
+
+/*
+ * Copyright (c) 1994 Mark Brinicombe.
+ * Copyright (c) 1994 Brini.
+ * All rights reserved.
+ *
+ * This code is derived from software written for Brini by Mark Brinicombe
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by Brini.
+ * 4. The name of the company nor the name of the author may be used to
+ * endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY BRINI ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL BRINI OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * RiscBSD kernel project
+ *
+ * disklabel.h
+ *
+ * machine specific disk label info
+ *
+ * Created : 04/10/94
+ */
+
+#ifndef _ARM_DISKLABEL_H_
+#define _ARM_DISKLABEL_H_
+
+#define LABELSECTOR 1 /* sector containing label */
+#define LABELOFFSET 0 /* offset of label in sector */
+#define MAXPARTITIONS 16 /* number of partitions */
+#define RAW_PART 2 /* raw partition: XX?c */
+
+#include <sys/dkbad.h>
+#if 0
+#include <arm/disklabel_acorn.h>
+#include <sys/disklabel_mbr.h>
+#endif
+
+/* MBR partition table */
+#define DOSBBSECTOR 0 /* MBR sector number */
+#define DOSPARTOFF 446 /* Offset of MBR partition table */
+#define NDOSPART 4 /* # of partitions in MBR */
+#define DOSMAGICOFF 510 /* Offset of magic number */
+#define DOSMAGIC 0xaa55 /* Actual magic number */
+#define MBRMAGIC DOSMAGIC
+#define DOSMBR_SIGNATURE MBRMAGIC
+#define DOSMBR_SIGNATURE_OFF DOSMAGICOFF
+#define DOSACTIVE 0x80
+
+
+struct dos_partition {
+ u_int8_t dp_flag; /* bootstrap flags */
+ u_int8_t dp_shd; /* starting head */
+ u_int8_t dp_ssect; /* starting sector */
+ u_int8_t dp_scyl; /* starting cylinder */
+ u_int8_t dp_typ; /* partition type (see below) */
+ u_int8_t dp_ehd; /* end head */
+ u_int8_t dp_esect; /* end sector */
+ u_int8_t dp_ecyl; /* end cylinder */
+ u_int32_t dp_start; /* absolute starting sector number */
+ u_int32_t dp_size; /* partition size in sectors */
+};
+
+/* Known DOS partition types. */
+#define DOSPTYP_UNUSED 0x00 /* Unused partition */
+#define DOSPTYP_FAT12 0x01 /* 12-bit FAT */
+#define DOSPTYP_FAT16S 0x04 /* 16-bit FAT, less than 32M */
+#define DOSPTYP_EXTEND 0x05 /* Extended; contains sub-partitions */
+#define DOSPTYP_FAT16B 0x06 /* 16-bit FAT, more than 32M */
+#define DOSPTYP_FAT32 0x0b /* 32-bit FAT */
+#define DOSPTYP_FAT32L 0x0c /* 32-bit FAT, LBA-mapped */
+#define DOSPTYP_FAT16C 0x0e /* 16-bit FAT, CHS-mapped */
+#define DOSPTYP_EXTENDL 0x0f /* Extended, LBA-mapped; contains sub-partitions */
+#define DOSPTYP_ONTRACK 0x54
+#define DOSPTYP_LINUX 0x83 /* That other thing */
+#define DOSPTYP_FREEBSD 0xa5 /* FreeBSD partition type */
+#define DOSPTYP_OPENBSD 0xa6 /* OpenBSD partition type */
+#define DOSPTYP_NETBSD 0xa9 /* NetBSD partition type */
+
+#include <sys/dkbad.h>
+
+/* Isolate the relevant bits to get sector and cylinder. */
+#define DPSECT(s) ((s) & 0x3f)
+#define DPCYL(c, s) ((c) + (((s) & 0xc0) << 2))
+
+
+#ifdef __ARMEL__
+#define get_le(x) (*((u_int32_t *)x))
+#else
+static __inline u_int32_t get_le(void *p);
+static __inline u_int32_t
+#ifdef __cplusplus
+get_le(void *p)
+#else
+get_le(p)
+ void *p;
+#endif
+{
+ u_int8_t *_p = (u_int8_t *)p;
+ int x;
+ x = _p[0];
+ x |= _p[1] << 8;
+ x |= _p[2] << 16;
+ x |= _p[3] << 24;
+ return x;
+}
+#endif
+
+
+#define NMBRPART 4
+struct cpu_disklabel {
+ struct dos_partition dosparts[NMBRPART];
+ struct dkbad bad;
+};
+
+#ifdef _KERNEL
+struct buf;
+struct disklabel;
+/* for readdisklabel. rv != 0 -> matches, msg == NULL -> success */
+int mbr_label_read __P((dev_t, void (*)(struct buf *), struct disklabel *,
+ struct cpu_disklabel *, char **, int *, int *));
+
+/* for writedisklabel. rv == 0 -> dosen't match, rv > 0 -> success */
+int mbr_label_locate __P((dev_t, void (*)(struct buf *),
+ struct disklabel *, struct cpu_disklabel *, int *, int *));
+#endif /* _KERNEL */
+
+#endif /* _ARM_DISKLABEL_H_ */
diff --git a/sys/arch/arm/include/elf_abi.h b/sys/arch/arm/include/elf_abi.h
new file mode 100644
index 00000000000..3115568b7c3
--- /dev/null
+++ b/sys/arch/arm/include/elf_abi.h
@@ -0,0 +1,7 @@
+/* $OpenBSD: elf_abi.h,v 1.1 2004/02/01 05:09:49 drahn Exp $ */
+#ifndef _ARM_ELF_ABI_H
+#define _ARM_ELF_ABI_H
+
+#define DT_PROCNUM 0
+
+#endif /* _ARM_ELF_ABI_H */
diff --git a/sys/arch/arm/include/endian.h b/sys/arch/arm/include/endian.h
new file mode 100644
index 00000000000..b9e046a3d37
--- /dev/null
+++ b/sys/arch/arm/include/endian.h
@@ -0,0 +1,8 @@
+/* $OpenBSD: endian.h,v 1.1 2004/02/01 05:09:49 drahn Exp $ */
+
+#ifdef __ARMEB__
+#define BYTE_ORDER BIG_ENDIAN
+#else
+#define BYTE_ORDER LITTLE_ENDIAN
+#endif
+#include <sys/endian.h>
diff --git a/sys/arch/arm/include/fiq.h b/sys/arch/arm/include/fiq.h
new file mode 100644
index 00000000000..e246323b57f
--- /dev/null
+++ b/sys/arch/arm/include/fiq.h
@@ -0,0 +1,69 @@
+/* $OpenBSD: fiq.h,v 1.1 2004/02/01 05:09:49 drahn Exp $ */
+/* $NetBSD: fiq.h,v 1.1 2001/12/20 01:20:23 thorpej Exp $ */
+
+/*
+ * Copyright (c) 2001 Wasabi Systems, Inc.
+ * All rights reserved.
+ *
+ * Written by Jason R. Thorpe for Wasabi Systems, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed for the NetBSD Project by
+ * Wasabi Systems, Inc.
+ * 4. The name of Wasabi Systems, Inc. may not be used to endorse
+ * or promote products derived from this software without specific prior
+ * written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _ARM_FIQ_H_
+#define _ARM_FIQ_H_
+
+#include <sys/queue.h>
+
+struct fiqregs {
+ u_int fr_r8; /* FIQ mode r8 */
+ u_int fr_r9; /* FIQ mode r9 */
+ u_int fr_r10; /* FIQ mode r10 */
+ u_int fr_r11; /* FIQ mode r11 */
+ u_int fr_r12; /* FIQ mode r12 */
+ u_int fr_r13; /* FIQ mode r13 */
+};
+
+struct fiqhandler {
+ TAILQ_ENTRY(fiqhandler) fh_list;/* link in the FIQ handler stack */
+ void *fh_func; /* FIQ handler routine */
+ size_t fh_size; /* size of FIQ handler */
+ int fh_flags; /* flags; see below */
+ struct fiqregs *fh_regs; /* pointer to regs structure */
+};
+
+#define FH_CANPUSH 0x01 /* can push this handler out of the way */
+
+int fiq_claim(struct fiqhandler *);
+void fiq_release(struct fiqhandler *);
+
+void fiq_getregs(struct fiqregs *);
+void fiq_setregs(struct fiqregs *);
+
+#endif /* _ARM_FIQ_H_ */
diff --git a/sys/arch/arm/include/float.h b/sys/arch/arm/include/float.h
new file mode 100644
index 00000000000..1466dd4e7d7
--- /dev/null
+++ b/sys/arch/arm/include/float.h
@@ -0,0 +1,90 @@
+/* $OpenBSD: float.h,v 1.1 2004/02/01 05:09:49 drahn Exp $ */
+/* $NetBSD: float.h,v 1.1 2001/01/10 19:02:06 bjh21 Exp $ */
+
+/*
+ * Copyright (c) 1992, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * This software was developed by the Computer Systems Engineering group
+ * at Lawrence Berkeley Laboratory under DARPA contract BG 91-66 and
+ * contributed to Berkeley.
+ *
+ * All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Lawrence Berkeley Laboratory.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)float.h 8.1 (Berkeley) 6/11/93
+ */
+
+#ifndef _ARM32_FLOAT_H_
+#define _ARM32_FLOAT_H_
+
+#include <sys/cdefs.h>
+
+__BEGIN_DECLS
+extern int __flt_rounds __P((void));
+__END_DECLS
+
+#define FLT_RADIX 2 /* b */
+#define FLT_ROUNDS __flt_rounds()
+
+#define FLT_MANT_DIG 24 /* p */
+#define FLT_EPSILON 1.19209290E-7F /* b**(1-p) */
+#define FLT_DIG 6 /* floor((p-1)*log10(b))+(b == 10) */
+#define FLT_MIN_EXP (-125) /* emin */
+#define FLT_MIN 1.17549435E-38F /* b**(emin-1) */
+#define FLT_MIN_10_EXP (-37) /* ceil(log10(b**(emin-1))) */
+#define FLT_MAX_EXP 128 /* emax */
+#define FLT_MAX 3.40282347E+38F /* (1-b**(-p))*b**emax */
+#define FLT_MAX_10_EXP 38 /* floor(log10((1-b**(-p))*b**emax)) */
+
+#define DBL_MANT_DIG 53
+#define DBL_EPSILON 2.2204460492503131E-16
+#define DBL_DIG 15
+#define DBL_MIN_EXP (-1021)
+#define DBL_MIN 2.2250738585072014E-308
+#define DBL_MIN_10_EXP (-307)
+#define DBL_MAX_EXP 1024
+#define DBL_MAX 1.7976931348623157E+308
+#define DBL_MAX_10_EXP 308
+
+#define LDBL_MANT_DIG DBL_MANT_DIG
+#define LDBL_EPSILON DBL_EPSILON
+#define LDBL_DIG DBL_DIG
+#define LDBL_MIN_EXP DBL_MIN_EXP
+#define LDBL_MIN DBL_MIN
+#define LDBL_MIN_10_EXP DBL_MIN_10_EXP
+#define LDBL_MAX_EXP DBL_MAX_EXP
+#define LDBL_MAX DBL_MAX
+#define LDBL_MAX_10_EXP DBL_MAX_10_EXP
+
+#endif /* _ARM32_FLOAT_H_ */
diff --git a/sys/arch/arm/include/fp.h b/sys/arch/arm/include/fp.h
new file mode 100644
index 00000000000..5d9096e0fa5
--- /dev/null
+++ b/sys/arch/arm/include/fp.h
@@ -0,0 +1,87 @@
+/* $OpenBSD: fp.h,v 1.1 2004/02/01 05:09:49 drahn Exp $ */
+/* $NetBSD: fp.h,v 1.1 2001/01/10 19:02:06 bjh21 Exp $ */
+
+/*
+ * Copyright (c) 1995 Mark Brinicombe.
+ * Copyright (c) 1995 Brini.
+ * All rights reserved.
+ *
+ * This code is derived from software written for Brini by Mark Brinicombe
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by Brini.
+ * 4. The name of the company nor the name of the author may be used to
+ * endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY BRINI ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL BRINI OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * RiscBSD kernel project
+ *
+ * fp.h
+ *
+ * FP info
+ *
+ * Created : 10/10/95
+ */
+
+#ifndef __ARM32_FP_H
+#define __ARM32_FP_H
+
+/*
+ * An extended precision floating point number
+ */
+
+typedef struct fp_extended_precision {
+ u_int32_t fp_exponent;
+ u_int32_t fp_mantissa_hi;
+ u_int32_t fp_mantissa_lo;
+} fp_extended_precision_t;
+
+typedef struct fp_extended_precision fp_reg_t;
+
+/*
+ * Information about the FPE-SP state that is stored in the pcb
+ *
+ * This needs to move and be hidden from userland.
+ */
+
+struct fpe_sp_state {
+ unsigned int fp_flags;
+ unsigned int fp_sr;
+ unsigned int fp_cr;
+ fp_reg_t fp_registers[16];
+};
+
+/*
+ * Type for a saved FP context, if we want to translate the context to a
+ * user-readable form
+ */
+
+typedef struct {
+ u_int32_t fpsr;
+ fp_extended_precision_t regs[8];
+} fp_state_t;
+
+#endif
+
+/* End of fp.h */
diff --git a/sys/arch/arm/include/frame.h b/sys/arch/arm/include/frame.h
new file mode 100644
index 00000000000..2db8310770d
--- /dev/null
+++ b/sys/arch/arm/include/frame.h
@@ -0,0 +1,412 @@
+/* $OpenBSD: frame.h,v 1.1 2004/02/01 05:09:49 drahn Exp $ */
+/* $NetBSD: frame.h,v 1.9 2003/12/01 08:48:33 scw Exp $ */
+
+/*
+ * Copyright (c) 1994-1997 Mark Brinicombe.
+ * Copyright (c) 1994 Brini.
+ * All rights reserved.
+ *
+ * This code is derived from software written for Brini by Mark Brinicombe
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by Brini.
+ * 4. The name of the company nor the name of the author may be used to
+ * endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY BRINI ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL BRINI OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * RiscBSD kernel project
+ *
+ * frame.h
+ *
+ * Stack frames structures
+ *
+ * Created : 30/09/94
+ */
+
+#ifndef _ARM_FRAME_H_
+#define _ARM_FRAME_H_
+
+#ifndef _LOCORE
+
+#include <sys/signal.h>
+
+/*
+ * Trap frame. Pushed onto the kernel stack on a trap (synchronous exception).
+ */
+
+typedef struct trapframe {
+ register_t tf_spsr; /* Zero on arm26 */
+ register_t tf_r0;
+ register_t tf_r1;
+ register_t tf_r2;
+ register_t tf_r3;
+ register_t tf_r4;
+ register_t tf_r5;
+ register_t tf_r6;
+ register_t tf_r7;
+ register_t tf_r8;
+ register_t tf_r9;
+ register_t tf_r10;
+ register_t tf_r11;
+ register_t tf_r12;
+ register_t tf_usr_sp;
+ register_t tf_usr_lr;
+ register_t tf_svc_sp; /* Not used on arm26 */
+ register_t tf_svc_lr; /* Not used on arm26 */
+ register_t tf_pc;
+} trapframe_t;
+
+/* Register numbers */
+#define tf_r13 tf_usr_sp
+#define tf_r14 tf_usr_lr
+#define tf_r15 tf_pc
+
+/*
+ * Signal frame. Pushed onto user stack before calling sigcode.
+ */
+
+struct sigframe {
+ int sf_signum;
+ siginfo_t *sf_sip;
+ struct sigcontext *sf_scp;
+ sig_t sf_handler;
+ struct sigcontext sf_sc;
+ siginfo_t sf_si;
+};
+
+/* the pointers are use in the trampoline code to locate the ucontext */
+#if 0
+struct sigframe_siginfo {
+ siginfo_t sf_si; /* actual saved siginfo */
+ ucontext_t sf_uc; /* actual saved ucontext */
+};
+#endif
+
+#if 0
+#ifdef _KERNEL
+void sendsig_sigcontext(const ksiginfo_t *, const sigset_t *);
+#endif
+#endif
+
+#endif /* _LOCORE */
+
+#ifndef _LOCORE
+
+/*
+ * System stack frames.
+ */
+
+typedef struct irqframe {
+ unsigned int if_spsr;
+ unsigned int if_r0;
+ unsigned int if_r1;
+ unsigned int if_r2;
+ unsigned int if_r3;
+ unsigned int if_r4;
+ unsigned int if_r5;
+ unsigned int if_r6;
+ unsigned int if_r7;
+ unsigned int if_r8;
+ unsigned int if_r9;
+ unsigned int if_r10;
+ unsigned int if_r11;
+ unsigned int if_r12;
+ unsigned int if_usr_sp;
+ unsigned int if_usr_lr;
+ unsigned int if_svc_sp;
+ unsigned int if_svc_lr;
+ unsigned int if_pc;
+} irqframe_t;
+
+#define clockframe irqframe
+
+/*
+ * Switch frame
+ */
+
+struct switchframe {
+ u_int sf_r4;
+ u_int sf_r5;
+ u_int sf_r6;
+ u_int sf_r7;
+ u_int sf_pc;
+};
+
+/*
+ * Stack frame. Used during stack traces (db_trace.c)
+ */
+struct frame {
+ u_int fr_fp;
+ u_int fr_sp;
+ u_int fr_lr;
+ u_int fr_pc;
+};
+
+#ifdef _KERNEL
+void validate_trapframe __P((trapframe_t *, int));
+#endif /* _KERNEL */
+
+#else /* _LOCORE */
+
+/*
+ * AST_ALIGNMENT_FAULT_LOCALS and ENABLE_ALIGNMENT_FAULTS
+ * These are used in order to support dynamic enabling/disabling of
+ * alignment faults when executing old a.out ARM binaries.
+ */
+#if defined(COMPAT_15) && defined(EXEC_AOUT)
+#ifndef MULTIPROCESSOR
+
+/*
+ * Local variables needed by the AST/Alignment Fault macroes
+ */
+#define AST_ALIGNMENT_FAULT_LOCALS \
+.Laflt_astpending: ;\
+ .word _C_LABEL(astpending) ;\
+.Laflt_cpufuncs: ;\
+ .word _C_LABEL(cpufuncs) ;\
+.Laflt_curpcb: ;\
+ .word _C_LABEL(curpcb) ;\
+.Laflt_cpu_info_store: ;\
+ .word _C_LABEL(cpu_info_store)
+
+#define GET_CURPCB_ENTER \
+ ldr r1, .Laflt_curpcb ;\
+ ldr r1, [r1]
+
+#define GET_CPUINFO_ENTER \
+ ldr r0, .Laflt_cpu_info_store
+
+#define GET_CURPCB_EXIT \
+ ldr r1, .Laflt_curpcb ;\
+ ldr r2, .Laflt_cpu_info_store ;\
+ ldr r1, [r1]
+
+#else /* !MULTIPROCESSOR */
+
+#define AST_ALIGNMENT_FAULT_LOCALS \
+.Laflt_astpending: ;\
+ .word _C_LABEL(astpending) ;\
+.Laflt_cpufuncs: ;\
+ .word _C_LABEL(cpufuncs) ;\
+.Laflt_cpu_info: ;\
+ .word _C_LABEL(cpu_info)
+
+#define GET_CURPCB_ENTER \
+ ldr r4, .Laflt_cpu_info ;\
+ bl _C_LABEL(cpu_number) ;\
+ ldr r0, [r4, r0, lsl #2] ;\
+ ldr r1, [r0, #CI_CURPCB]
+
+#define GET_CPUINFO_ENTER /* nothing to do */
+
+#define GET_CURPCB_EXIT \
+ ldr r7, .Laflt_cpu_info ;\
+ bl _C_LABEL(cpu_number) ;\
+ ldr r2, [r7, r0, lsl #2] ;\
+ ldr r1, [r2, #CI_CURPCB]
+#endif /* MULTIPROCESSOR */
+
+/*
+ * This macro must be invoked following PUSHFRAMEINSVC or PUSHFRAME at
+ * the top of interrupt/exception handlers.
+ *
+ * When invoked, r0 *must* contain the value of SPSR on the current
+ * trap/interrupt frame. This is always the case if ENABLE_ALIGNMENT_FAULTS
+ * is invoked immediately after PUSHFRAMEINSVC or PUSHFRAME.
+ */
+#define ENABLE_ALIGNMENT_FAULTS \
+ and r0, r0, #(PSR_MODE) /* Test for USR32 mode */ ;\
+ teq r0, #(PSR_USR32_MODE) ;\
+ bne 1f /* Not USR mode skip AFLT */ ;\
+ GET_CURPCB_ENTER /* r1 = curpcb */ ;\
+ cmp r1, #0x00 /* curpcb NULL? */ ;\
+ ldrne r1, [r1, #PCB_FLAGS] /* Fetch curpcb->pcb_flags */ ;\
+ tstne r1, #PCB_NOALIGNFLT ;\
+ beq 1f /* AFLTs already enabled */ ;\
+ GET_CPUINFO_ENTER /* r0 = cpuinfo */ ;\
+ ldr r2, .Laflt_cpufuncs ;\
+ ldr r1, [r0, #CI_CTRL] /* Fetch control register */ ;\
+ mov r0, #-1 ;\
+ mov lr, pc ;\
+ ldr pc, [r2, #CF_CONTROL] /* Enable alignment faults */ ;\
+1:
+
+/*
+ * This macro must be invoked just before PULLFRAMEFROMSVCANDEXIT or
+ * PULLFRAME at the end of interrupt/exception handlers.
+ */
+#define DO_AST_AND_RESTORE_ALIGNMENT_FAULTS \
+ ldr r0, [sp] /* Get the SPSR from stack */ ;\
+ mrs r4, cpsr /* save CPSR */ ;\
+ and r0, r0, #(PSR_MODE) /* Returning to USR mode? */ ;\
+ teq r0, #(PSR_USR32_MODE) ;\
+ ldreq r5, .Laflt_astpending ;\
+ bne 3f /* Nope, get out now */ ;\
+ bic r4, r4, #(I32_bit) ;\
+1: orr r0, r4, #(I32_bit) /* Disable IRQs */ ;\
+ msr cpsr_c, r0 ;\
+ ldr r1, [r5] /* Pending AST? */ ;\
+ teq r1, #0x00000000 ;\
+ bne 2f /* Yup. Go deal with it */ ;\
+ GET_CURPCB_EXIT /* r1 = curpcb, r2 = cpuinfo */ ;\
+ cmp r1, #0x00 /* curpcb NULL? */ ;\
+ ldrne r1, [r1, #PCB_FLAGS] /* Fetch curpcb->pcb_flags */ ;\
+ tstne r1, #PCB_NOALIGNFLT ;\
+ beq 3f /* Keep AFLTs enabled */ ;\
+ ldr r1, [r2, #CI_CTRL] /* Fetch control register */ ;\
+ ldr r2, .Laflt_cpufuncs ;\
+ mov r0, #-1 ;\
+ bic r1, r1, #CPU_CONTROL_AFLT_ENABLE /* Disable AFLTs */ ;\
+ adr lr, 3f ;\
+ ldr pc, [r2, #CF_CONTROL] /* Set new CTRL reg value */ ;\
+2: mov r1, #0x00000000 ;\
+ str r1, [r5] /* Clear astpending */ ;\
+ msr cpsr_c, r4 /* Restore interrupts */ ;\
+ mov r0, sp ;\
+ adr lr, 1b ;\
+ b _C_LABEL(ast) /* ast(frame) */ ;\
+3:
+
+#else /* !(COMPAT_15 && EXEC_AOUT) */
+
+#define AST_ALIGNMENT_FAULT_LOCALS ;\
+.Laflt_astpending: ;\
+ .word _C_LABEL(astpending)
+
+#define ENABLE_ALIGNMENT_FAULTS /* nothing */
+
+#define DO_AST_AND_RESTORE_ALIGNMENT_FAULTS \
+ ldr r0, [sp] /* Get the SPSR from stack */ ;\
+ mrs r4, cpsr /* save CPSR */ ;\
+ and r0, r0, #(PSR_MODE) /* Returning to USR mode? */ ;\
+ teq r0, #(PSR_USR32_MODE) ;\
+ ldreq r5, .Laflt_astpending ;\
+ bne 2f /* Nope, get out now */ ;\
+ bic r4, r4, #(I32_bit) ;\
+1: orr r0, r4, #(I32_bit) /* Disable IRQs */ ;\
+ msr cpsr_c, r0 ;\
+ ldr r1, [r5] /* Pending AST? */ ;\
+ teq r1, #0x00000000 ;\
+ beq 2f /* Nope. Just bail */ ;\
+ mov r1, #0x00000000 ;\
+ str r1, [r5] /* Clear astpending */ ;\
+ msr cpsr_c, r4 /* Restore interrupts */ ;\
+ mov r0, sp ;\
+ adr lr, 1b ;\
+ b _C_LABEL(ast) /* ast(frame) */ ;\
+2:
+#endif /* COMPAT_15 && EXEC_AOUT */
+
+/*
+ * ASM macros for pushing and pulling trapframes from the stack
+ *
+ * These macros are used to handle the irqframe and trapframe structures
+ * defined above.
+ */
+
+/*
+ * PUSHFRAME - macro to push a trap frame on the stack in the current mode
+ * Since the current mode is used, the SVC lr field is not defined.
+ *
+ * NOTE: r13 and r14 are stored separately as a work around for the
+ * SA110 rev 2 STM^ bug
+ */
+
+#define PUSHFRAME \
+ str lr, [sp, #-4]!; /* Push the return address */ \
+ sub sp, sp, #(4*17); /* Adjust the stack pointer */ \
+ stmia sp, {r0-r12}; /* Push the user mode registers */ \
+ add r0, sp, #(4*13); /* Adjust the stack pointer */ \
+ stmia r0, {r13-r14}^; /* Push the user mode registers */ \
+ mov r0, r0; /* NOP for previous instruction */ \
+ mrs r0, spsr_all; /* Put the SPSR on the stack */ \
+ str r0, [sp, #-4]!
+
+/*
+ * PULLFRAME - macro to pull a trap frame from the stack in the current mode
+ * Since the current mode is used, the SVC lr field is ignored.
+ */
+
+#define PULLFRAME \
+ ldr r0, [sp], #0x0004; /* Get the SPSR from stack */ \
+ msr spsr_all, r0; \
+ ldmia sp, {r0-r14}^; /* Restore registers (usr mode) */ \
+ mov r0, r0; /* NOP for previous instruction */ \
+ add sp, sp, #(4*17); /* Adjust the stack pointer */ \
+ ldr lr, [sp], #0x0004 /* Pull the return address */
+
+/*
+ * PUSHFRAMEINSVC - macro to push a trap frame on the stack in SVC32 mode
+ * This should only be used if the processor is not currently in SVC32
+ * mode. The processor mode is switched to SVC mode and the trap frame is
+ * stored. The SVC lr field is used to store the previous value of
+ * lr in SVC mode.
+ *
+ * NOTE: r13 and r14 are stored separately as a work around for the
+ * SA110 rev 2 STM^ bug
+ */
+
+#define PUSHFRAMEINSVC \
+ stmdb sp, {r0-r3}; /* Save 4 registers */ \
+ mov r0, lr; /* Save xxx32 r14 */ \
+ mov r1, sp; /* Save xxx32 sp */ \
+ mrs r3, spsr; /* Save xxx32 spsr */ \
+ mrs r2, cpsr; /* Get the CPSR */ \
+ bic r2, r2, #(PSR_MODE); /* Fix for SVC mode */ \
+ orr r2, r2, #(PSR_SVC32_MODE); \
+ msr cpsr_c, r2; /* Punch into SVC mode */ \
+ mov r2, sp; /* Save SVC sp */ \
+ str r0, [sp, #-4]!; /* Push return address */ \
+ str lr, [sp, #-4]!; /* Push SVC lr */ \
+ str r2, [sp, #-4]!; /* Push SVC sp */ \
+ msr spsr_all, r3; /* Restore correct spsr */ \
+ ldmdb r1, {r0-r3}; /* Restore 4 regs from xxx mode */ \
+ sub sp, sp, #(4*15); /* Adjust the stack pointer */ \
+ stmia sp, {r0-r12}; /* Push the user mode registers */ \
+ add r0, sp, #(4*13); /* Adjust the stack pointer */ \
+ stmia r0, {r13-r14}^; /* Push the user mode registers */ \
+ mov r0, r0; /* NOP for previous instruction */ \
+ mrs r0, spsr_all; /* Put the SPSR on the stack */ \
+ str r0, [sp, #-4]!
+
+/*
+ * PULLFRAMEFROMSVCANDEXIT - macro to pull a trap frame from the stack
+ * in SVC32 mode and restore the saved processor mode and PC.
+ * This should be used when the SVC lr register needs to be restored on
+ * exit.
+ */
+
+#define PULLFRAMEFROMSVCANDEXIT \
+ ldr r0, [sp], #0x0004; /* Get the SPSR from stack */ \
+ msr spsr_all, r0; /* restore SPSR */ \
+ ldmia sp, {r0-r14}^; /* Restore registers (usr mode) */ \
+ mov r0, r0; /* NOP for previous instruction */ \
+ add sp, sp, #(4*15); /* Adjust the stack pointer */ \
+ ldmia sp, {sp, lr, pc}^ /* Restore lr and exit */
+
+#endif /* _LOCORE */
+
+#endif /* _ARM_FRAME_H_ */
+
+/* End of frame.h */
diff --git a/sys/arch/arm/include/ieee.h b/sys/arch/arm/include/ieee.h
new file mode 100644
index 00000000000..5f9b89ecc0e
--- /dev/null
+++ b/sys/arch/arm/include/ieee.h
@@ -0,0 +1,191 @@
+/* $OpenBSD: ieee.h,v 1.1 2004/02/01 05:09:49 drahn Exp $ */
+/* $NetBSD: ieee.h,v 1.2 2001/02/21 17:43:50 bjh21 Exp $ */
+
+/*
+ * Copyright (c) 1992, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * This software was developed by the Computer Systems Engineering group
+ * at Lawrence Berkeley Laboratory under DARPA contract BG 91-66 and
+ * contributed to Berkeley.
+ *
+ * All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Lawrence Berkeley Laboratory.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)ieee.h 8.1 (Berkeley) 6/11/93
+ */
+
+/*
+ * ieee.h defines the machine-dependent layout of the machine's IEEE
+ * floating point.
+ */
+
+/*
+ * Define the number of bits in each fraction and exponent.
+ *
+ * k k+1
+ * Note that 1.0 x 2 == 0.1 x 2 and that denorms are represented
+ *
+ * (-exp_bias+1)
+ * as fractions that look like 0.fffff x 2 . This means that
+ *
+ * -126
+ * the number 0.10000 x 2 , for instance, is the same as the normalized
+ *
+ * -127 -128
+ * float 1.0 x 2 . Thus, to represent 2 , we need one leading zero
+ *
+ * -129
+ * in the fraction; to represent 2 , we need two, and so on. This
+ *
+ * (-exp_bias-fracbits+1)
+ * implies that the smallest denormalized number is 2
+ *
+ * for whichever format we are talking about: for single precision, for
+ *
+ * -126 -149
+ * instance, we get .00000000000000000000001 x 2 , or 1.0 x 2 , and
+ *
+ * -149 == -127 - 23 + 1.
+ */
+
+/*
+ * The ARM has two sets of FP data formats. The FPA supports 32-bit, 64-bit
+ * and 96-bit IEEE formats, with the words in big-endian order. VFP supports
+ * 32-bin and 64-bit IEEE formats with the words in the CPU's native byte
+ * order.
+ *
+ * The FPA also has two packed decimal formats, but we ignore them here.
+ */
+
+#define SNG_EXPBITS 8
+#define SNG_FRACBITS 23
+
+#define DBL_EXPBITS 11
+#define DBL_FRACBITS 52
+
+#ifndef __VFP_FP__
+#define E80_EXPBITS 15
+#define E80_FRACBITS 64
+
+#define EXT_EXPBITS 15
+#define EXT_FRACBITS 112
+#endif
+
+struct ieee_single {
+ u_int sng_frac:23;
+ u_int sng_exponent:8;
+ u_int sng_sign:1;
+};
+
+#ifdef __VFP_FP__
+struct ieee_double {
+#ifdef __ARMEB__
+ u_int dbl_sign:1;
+ u_int dbl_exp:11;
+ u_int dbl_frach:20;
+ u_int dbl_fracl;
+#else /* !__ARMEB__ */
+ u_int dbl_fracl;
+ u_int dbl_frach:20;
+ u_int dbl_exp:11;
+ u_int dbl_sign:1;
+#endif /* !__ARMEB__ */
+};
+#else /* !__VFP_FP__ */
+struct ieee_double {
+ u_int dbl_frach:20;
+ u_int dbl_exp:11;
+ u_int dbl_sign:1;
+ u_int dbl_fracl;
+};
+
+union ieee_double_u {
+ double dblu_d;
+ struct ieee_double dblu_dbl;
+};
+
+
+struct ieee_e80 {
+ u_int e80_exp:15;
+ u_int e80_zero:16;
+ u_int e80_sign:1;
+ u_int e80_frach:31;
+ u_int e80_j:1;
+ u_int e80_fracl;
+};
+
+struct ieee_ext {
+ u_int ext_frach:16;
+ u_int ext_exp:15;
+ u_int ext_sign:1;
+ u_int ext_frachm;
+ u_int ext_fraclm;
+ u_int ext_fracl;
+};
+#endif /* !__VFP_FP__ */
+
+/*
+ * Floats whose exponent is in [1..INFNAN) (of whatever type) are
+ * `normal'. Floats whose exponent is INFNAN are either Inf or NaN.
+ * Floats whose exponent is zero are either zero (iff all fraction
+ * bits are zero) or subnormal values.
+ *
+ * A NaN is a `signalling NaN' if its QUIETNAN bit is clear in its
+ * high fraction; if the bit is set, it is a `quiet NaN'.
+ */
+#define SNG_EXP_INFNAN 255
+#define DBL_EXP_INFNAN 2047
+#ifndef __VFP_FP__
+#define E80_EXP_INFNAN 32767
+#define EXT_EXP_INFNAN 32767
+#endif /* !__VFP_FP__ */
+
+#if 0
+#define SNG_QUIETNAN (1 << 22)
+#define DBL_QUIETNAN (1 << 19)
+#ifndef __VFP_FP__
+#define E80_QUIETNAN (1 << 15)
+#define EXT_QUIETNAN (1 << 15)
+#endif /* !__VFP_FP__ */
+#endif
+
+/*
+ * Exponent biases.
+ */
+#define SNG_EXP_BIAS 127
+#define DBL_EXP_BIAS 1023
+#ifndef __VFP_FP__
+#define E80_EXP_BIAS 16383
+#define EXT_EXP_BIAS 16383
+#endif /* !__VFP_FP__ */
diff --git a/sys/arch/arm/include/ieeefp.h b/sys/arch/arm/include/ieeefp.h
new file mode 100644
index 00000000000..6aaf2b950e1
--- /dev/null
+++ b/sys/arch/arm/include/ieeefp.h
@@ -0,0 +1,41 @@
+/* $OpenBSD: ieeefp.h,v 1.1 2004/02/01 05:09:49 drahn Exp $ */
+/* $NetBSD: ieeefp.h,v 1.1 2001/01/10 19:02:06 bjh21 Exp $ */
+
+/*
+ * Based on ieeefp.h written by J.T. Conklin, Apr 28, 1995
+ * Public domain.
+ */
+
+#ifndef _ARM32_IEEEFP_H_
+#define _ARM32_IEEEFP_H_
+
+/* FP exception codes */
+
+#define FP_EXCEPT_INV 0
+#define FP_EXCEPT_DZ 1
+#define FP_EXCEPT_OFL 2
+#define FP_EXCEPT_UFL 3
+#define FP_EXCEPT_IMP 4
+
+/* Exception type (used by fpsetmask() et al.) */
+
+typedef int fp_except;
+
+/* Bit defines for fp_except */
+
+#define FP_X_INV (1 << FP_EXCEPT_INV) /* invalid operation exception */
+#define FP_X_DZ (1 << FP_EXCEPT_DZ) /* divide-by-zero exception */
+#define FP_X_OFL (1 << FP_EXCEPT_OFL) /* overflow exception */
+#define FP_X_UFL (1 << FP_EXCEPT_UFL) /* underflow exception */
+#define FP_X_IMP (1 << FP_EXCEPT_IMP) /* imprecise (loss of precision; "inexact") */
+
+/* Rounding modes */
+
+typedef enum {
+ FP_RN=0, /* round to nearest representable number */
+ FP_RP=1, /* round toward positive infinity */
+ FP_RM=2, /* round toward negative infinity */
+ FP_RZ=3 /* round to zero (truncate) */
+} fp_rnd;
+
+#endif /* _ARM32_IEEEFP_H_ */
diff --git a/sys/arch/arm/include/internal_types.h b/sys/arch/arm/include/internal_types.h
new file mode 100644
index 00000000000..96546cfacb9
--- /dev/null
+++ b/sys/arch/arm/include/internal_types.h
@@ -0,0 +1,6 @@
+/* $OpenBSD: internal_types.h,v 1.1 2004/02/01 05:09:49 drahn Exp $ */
+/* Public domain */
+#ifndef _ARM_INTERNAL_TYPES_H_
+#define _ARM_INTERNAL_TYPES_H_
+
+#endif
diff --git a/sys/arch/arm/include/isa_machdep.h b/sys/arch/arm/include/isa_machdep.h
new file mode 100644
index 00000000000..97e2c1171c8
--- /dev/null
+++ b/sys/arch/arm/include/isa_machdep.h
@@ -0,0 +1,193 @@
+/* $OpenBSD: isa_machdep.h,v 1.1 2004/02/01 05:09:49 drahn Exp $ */
+/* $NetBSD: isa_machdep.h,v 1.3 2002/01/07 22:58:07 chris Exp $ */
+
+/*-
+ * Copyright (c) 1996, 1997, 1998 The NetBSD Foundation, Inc.
+ * All rights reserved.
+ *
+ * This code is derived from software contributed to The NetBSD Foundation
+ * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
+ * NASA Ames Research Center.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the NetBSD
+ * Foundation, Inc. and its contributors.
+ * 4. Neither the name of The NetBSD Foundation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
+ * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _ARM32_ISA_MACHDEP_H_
+#define _ARM32_ISA_MACHDEP_H_
+
+#include <machine/bus.h>
+#include <dev/isa/isadmavar.h>
+
+#define __NO_ISA_INTR_CHECK
+/*
+ * Types provided to machine-independent ISA code.
+ */
+struct arm32_isa_chipset {
+ /*
+ struct isa_dma_state ic_dmastate;
+ */
+};
+
+typedef struct arm32_isa_chipset *isa_chipset_tag_t;
+
+struct device; /* XXX */
+struct isabus_attach_args; /* XXX */
+
+/*
+ * Functions provided to machine-independent ISA code.
+ */
+void isa_attach_hook(struct device *, struct device *,
+ struct isabus_attach_args *);
+const struct evcnt *isa_intr_evcnt(isa_chipset_tag_t ic, int irq);
+void *isa_intr_establish(isa_chipset_tag_t ic, int irq, int type,
+ int level, int (*ih_fun)(void *), void *ih_arg, char *name);
+void isa_intr_disestablish(isa_chipset_tag_t ic, void *handler);
+
+#if 0
+#define isa_dmainit(ic, bst, dmat, d) \
+ _isa_dmainit(&(ic)->ic_dmastate, (bst), (dmat), (d))
+#define isa_dmacascade(ic, c) \
+ _isa_dmacascade(&(ic)->ic_dmastate, (c))
+#define isa_dmamaxsize(ic, c) \
+ _isa_dmamaxsize(&(ic)->ic_dmastate, (c))
+#define isa_dmamap_create(ic, c, s, f) \
+ _isa_dmamap_create(&(ic)->ic_dmastate, (c), (s), (f))
+#define isa_dmamap_destroy(ic, c) \
+ _isa_dmamap_destroy(&(ic)->ic_dmastate, (c))
+#define isa_dmastart(ic, c, a, n, p, f, bf) \
+ _isa_dmastart(&(ic)->ic_dmastate, (c), (a), (n), (p), (f), (bf))
+#define isa_dmaabort(ic, c) \
+ _isa_dmaabort(&(ic)->ic_dmastate, (c))
+#define isa_dmacount(ic, c) \
+ _isa_dmacount(&(ic)->ic_dmastate, (c))
+#define isa_dmafinished(ic, c) \
+ _isa_dmafinished(&(ic)->ic_dmastate, (c))
+#define isa_dmadone(ic, c) \
+ _isa_dmadone(&(ic)->ic_dmastate, (c))
+#define isa_dmafreeze(ic) \
+ _isa_dmafreeze(&(ic)->ic_dmastate)
+#define isa_dmathaw(ic) \
+ _isa_dmathaw(&(ic)->ic_dmastate)
+#define isa_dmamem_alloc(ic, c, s, ap, f) \
+ _isa_dmamem_alloc(&(ic)->ic_dmastate, (c), (s), (ap), (f))
+#define isa_dmamem_free(ic, c, a, s) \
+ _isa_dmamem_free(&(ic)->ic_dmastate, (c), (a), (s))
+#define isa_dmamem_map(ic, c, a, s, kp, f) \
+ _isa_dmamem_map(&(ic)->ic_dmastate, (c), (a), (s), (kp), (f))
+#define isa_dmamem_unmap(ic, c, k, s) \
+ _isa_dmamem_unmap(&(ic)->ic_dmastate, (c), (k), (s))
+#define isa_dmamem_mmap(ic, c, a, s, o, p, f) \
+ _isa_dmamem_mmap(&(ic)->ic_dmastate, (c), (a), (s), (o), (p), (f))
+#define isa_drq_alloc(ic, c) \
+ _isa_drq_alloc(&(ic)->ic_dmastate, c)
+#define isa_drq_free(ic, c) \
+ _isa_drq_free(&(ic)->ic_dmastate, c)
+#define isa_drq_isfree(ic, c) \
+ _isa_drq_isfree(&(ic)->ic_dmastate, (c))
+#define isa_malloc(ic, c, s, p, f) \
+ _isa_malloc(&(ic)->ic_dmastate, (c), (s), (p), (f))
+#define isa_free(a, p) \
+ _isa_free((a), (p))
+#define isa_mappage(m, o, p) \
+ _isa_mappage((m), (o), (p))
+#endif
+
+/*
+ * ALL OF THE FOLLOWING ARE MACHINE-DEPENDENT, AND SHOULD NOT BE USED
+ * BY PORTABLE CODE.
+ */
+
+extern struct arm32_bus_dma_tag isa_bus_dma_tag;
+
+/*
+ * Cookie used by ISA DMA. A pointer to one of these is stashed in
+ * the DMA map.
+ */
+struct arm32_isa_dma_cookie {
+ int id_flags; /* flags; see below */
+
+ /*
+ * Information about the original buffer used during
+ * DMA map syncs. Note that origbuflen is only used
+ * for ID_BUFTYPE_LINEAR.
+ */
+ void *id_origbuf; /* pointer to orig buffer if
+ bouncing */
+ bus_size_t id_origbuflen; /* ...and size */
+ int id_buftype; /* type of buffer */
+
+ void *id_bouncebuf; /* pointer to the bounce buffer */
+ bus_size_t id_bouncebuflen; /* ...and size */
+ int id_nbouncesegs; /* number of valid bounce segs */
+ bus_dma_segment_t id_bouncesegs[0]; /* array of bounce buffer
+ physical memory segments */
+};
+
+/* id_flags */
+#define ID_MIGHT_NEED_BOUNCE 0x01 /* map could need bounce buffers */
+#define ID_HAS_BOUNCE 0x02 /* map currently has bounce buffers */
+#define ID_IS_BOUNCING 0x04 /* map is bouncing current xfer */
+
+/* id_buftype */
+#define ID_BUFTYPE_INVALID 0
+#define ID_BUFTYPE_LINEAR 1
+#define ID_BUFTYPE_MBUF 2
+#define ID_BUFTYPE_UIO 3
+#define ID_BUFTYPE_RAW 4
+
+/* bus space tags */
+extern struct bus_space isa_io_bs_tag;
+extern struct bus_space isa_mem_bs_tag;
+
+/* ISA chipset */
+extern struct arm32_isa_chipset isa_chipset_tag;
+
+/* for pccons.c */
+#define MONO_BASE 0x3B4
+#define MONO_BUF 0x000B0000
+#define CGA_BASE 0x3D4
+#define CGA_BUF 0x000B8000
+#define VGA_BUF 0xA0000
+#define VGA_BUF_LEN (0xBFFFF - 0xA0000)
+
+void isa_init(vaddr_t, vaddr_t);
+void isa_io_init(vaddr_t, vaddr_t);
+void isa_dma_init(void);
+vaddr_t isa_io_data_vaddr(void);
+vaddr_t isa_mem_data_vaddr(void);
+int isa_intr_alloc(isa_chipset_tag_t ic, int mask, int type, int *irq);
+void isa_intr_init(void);
+
+/*
+ * Miscellanous functions.
+ */
+void sysbeep(int, int); /* beep with the system speaker */
+void isa_fillw(u_int val, void *addr, size_t len);
+
+#endif /* _ARM32_ISA_MACHDEP_H_ XXX */
diff --git a/sys/arch/arm/include/katelib.h b/sys/arch/arm/include/katelib.h
new file mode 100644
index 00000000000..b17905d0ce7
--- /dev/null
+++ b/sys/arch/arm/include/katelib.h
@@ -0,0 +1,99 @@
+/* $OpenBSD: katelib.h,v 1.1 2004/02/01 05:09:49 drahn Exp $ */
+/* $NetBSD: katelib.h,v 1.3 2001/11/23 19:21:48 thorpej Exp $ */
+
+/*
+ * Copyright (c) 1994-1996 Mark Brinicombe.
+ * Copyright (c) 1994 Brini.
+ * All rights reserved.
+ *
+ * This code is derived from software written for Brini by Mark Brinicombe
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by Brini.
+ * 4. The name of the company nor the name of the author may be used to
+ * endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY BRINI ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL BRINI OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * RiscBSD kernel project
+ *
+ * katelib.h
+ *
+ * Prototypes for machine specific functions. Most of these
+ * could be inlined.
+ *
+ * This should not really be a separate header file. Eventually I will merge
+ * this into other header files once I have decided where the declarations
+ * should go.
+ *
+ * Created : 18/09/94
+ *
+ * Based on kate/katelib/prototypes.h
+ */
+
+/*
+ * USE OF THIS FILE IS DEPRECATED
+ */
+
+#include <sys/types.h>
+#include <arm/cpufunc.h>
+
+#ifdef _KERNEL
+
+/* Assembly modules */
+
+/* In blockio.S */
+#include <arm/blockio.h>
+
+/* Macros for reading and writing words, shorts, bytes */
+
+#define WriteWord(a, b) \
+*((volatile unsigned int *)(a)) = (b)
+
+#define ReadWord(a) \
+(*((volatile unsigned int *)(a)))
+
+#define WriteShort(a, b) \
+*((volatile unsigned int *)(a)) = ((b) | ((b) << 16))
+
+#define ReadShort(a) \
+((*((volatile unsigned int *)(a))) & 0xffff)
+
+#define WriteByte(a, b) \
+*((volatile unsigned char *)(a)) = (b)
+
+#define ReadByte(a) \
+(*((volatile unsigned char *)(a)))
+
+/* Define in/out macros */
+
+#define inb(port) ReadByte((port))
+#define outb(port, byte) WriteByte((port), (byte))
+#define inw(port) ReadShort((port))
+#define outw(port, word) WriteShort((port), (word))
+#define inl(port) ReadWord((port))
+#define outl(port, lword) WriteWord((port), (lword))
+
+#endif
+
+/* End of katelib.h */
diff --git a/sys/arch/arm/include/limits.h b/sys/arch/arm/include/limits.h
new file mode 100644
index 00000000000..44c99876b4f
--- /dev/null
+++ b/sys/arch/arm/include/limits.h
@@ -0,0 +1,54 @@
+/* $OpenBSD: limits.h,v 1.1 2004/02/01 05:09:49 drahn Exp $ */
+/* $NetBSD: limits.h,v 1.4 2003/04/28 23:16:18 bjh21 Exp $ */
+
+/*
+ * Copyright (c) 1988 The Regents of the University of California.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * from: @(#)limits.h 7.2 (Berkeley) 6/28/90
+ */
+
+#ifndef _ARM32_LIMITS_H_
+#define _ARM32_LIMITS_H_
+
+#define MB_LEN_MAX 1 /* no multibyte characters */
+
+#if !defined(_ANSI_SOURCE)
+#define SIZE_MAX UINT_MAX /* max value for a size_t */
+#define SSIZE_MAX INT_MAX /* max value for a ssize_t */
+
+#if !defined(_POSIX_SOURCE) && !defined(_XOPEN_SOURCE)
+#define SIZE_T_MAX UINT_MAX /* max value for a size_t */
+
+#define UQUAD_MAX 0xffffffffffffffffULL /* max unsigned quad */
+#define QUAD_MAX 0x7fffffffffffffffLL /* max signed quad */
+#define QUAD_MIN (-0x7fffffffffffffffLL-1) /* min signed quad */
+
+#endif /* !_POSIX_SOURCE && !_XOPEN_SOURCE */
+#endif /* !_ANSI_SOURCE */
+
+#endif /* _ARM32_LIMITS_H_ */
diff --git a/sys/arch/arm/include/lock.h b/sys/arch/arm/include/lock.h
new file mode 100644
index 00000000000..f949eb8d3b9
--- /dev/null
+++ b/sys/arch/arm/include/lock.h
@@ -0,0 +1,90 @@
+/* $OpenBSD: lock.h,v 1.1 2004/02/01 05:09:49 drahn Exp $ */
+/* $NetBSD: lock.h,v 1.3 2002/10/07 23:19:49 bjh21 Exp $ */
+
+/*-
+ * Copyright (c) 2000, 2001 The NetBSD Foundation, Inc.
+ * All rights reserved.
+ *
+ * This code is derived from software contributed to The NetBSD Foundation
+ * by Jason R. Thorpe.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the NetBSD
+ * Foundation, Inc. and its contributors.
+ * 4. Neither the name of The NetBSD Foundation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
+ * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * Machine-dependent spin lock operations.
+ *
+ * NOTE: The SWP insn used here is available only on ARM architecture
+ * version 3 and later (as well as 2a). What we are going to do is
+ * expect that the kernel will trap and emulate the insn. That will
+ * be slow, but give us the atomicity that we need.
+ */
+
+#ifndef _ARM_LOCK_H_
+#define _ARM_LOCK_H_
+
+static __inline int
+__swp(int __val, __volatile int *__ptr)
+{
+
+ __asm __volatile("swp %0, %1, [%2]"
+ : "=r" (__val) : "r" (__val), "r" (__ptr) : "memory");
+ return __val;
+}
+
+static __inline void __attribute__((__unused__))
+__cpu_simple_lock_init(__cpu_simple_lock_t *alp)
+{
+
+ *alp = __SIMPLELOCK_UNLOCKED;
+}
+
+static __inline void __attribute__((__unused__))
+__cpu_simple_lock(__cpu_simple_lock_t *alp)
+{
+
+ while (__swp(__SIMPLELOCK_LOCKED, alp) != __SIMPLELOCK_UNLOCKED)
+ continue;
+}
+
+static __inline int __attribute__((__unused__))
+__cpu_simple_lock_try(__cpu_simple_lock_t *alp)
+{
+
+ return (__swp(__SIMPLELOCK_LOCKED, alp) == __SIMPLELOCK_UNLOCKED);
+}
+
+static __inline void __attribute__((__unused__))
+__cpu_simple_unlock(__cpu_simple_lock_t *alp)
+{
+
+ *alp = __SIMPLELOCK_UNLOCKED;
+}
+
+#endif /* _ARM_LOCK_H_ */
diff --git a/sys/arch/arm/include/machdep.h b/sys/arch/arm/include/machdep.h
new file mode 100644
index 00000000000..4abb8321219
--- /dev/null
+++ b/sys/arch/arm/include/machdep.h
@@ -0,0 +1,27 @@
+/* $OpenBSD: machdep.h,v 1.1 2004/02/01 05:09:49 drahn Exp $ */
+/* $NetBSD: machdep.h,v 1.7 2002/02/21 02:52:21 thorpej Exp $ */
+
+#ifndef _ARM32_BOOT_MACHDEP_H_
+#define _ARM32_BOOT_MACHDEP_H_
+
+/* misc prototypes used by the many arm machdeps */
+void halt __P((void));
+void parse_mi_bootargs __P((char *));
+void data_abort_handler __P((trapframe_t *));
+void prefetch_abort_handler __P((trapframe_t *));
+void undefinedinstruction_bounce __P((trapframe_t *));
+void dumpsys __P((void));
+
+/*
+ * note that we use void * as all the platforms have different ideas on what
+ * the structure is
+ */
+u_int initarm __P((void *));
+
+/* from arm/arm/intr.c */
+void dosoftints __P((void));
+void set_spl_masks __P((void));
+#ifdef DIAGNOSTIC
+void dump_spl_masks __P((void));
+#endif
+#endif
diff --git a/sys/arch/arm/include/math.h b/sys/arch/arm/include/math.h
new file mode 100644
index 00000000000..a5818196591
--- /dev/null
+++ b/sys/arch/arm/include/math.h
@@ -0,0 +1,4 @@
+/* $OpenBSD: math.h,v 1.1 2004/02/01 05:09:49 drahn Exp $ */
+/* $NetBSD: math.h,v 1.2 2002/02/19 13:08:14 simonb Exp $ */
+
+#define __HAVE_NANF
diff --git a/sys/arch/arm/include/param.h b/sys/arch/arm/include/param.h
new file mode 100644
index 00000000000..72926990653
--- /dev/null
+++ b/sys/arch/arm/include/param.h
@@ -0,0 +1,241 @@
+/* $OpenBSD: param.h,v 1.1 2004/02/01 05:09:49 drahn Exp $ */
+/* $NetBSD: param.h,v 1.9 2002/03/24 03:37:23 thorpej Exp $ */
+
+/*
+ * Copyright (c) 1994,1995 Mark Brinicombe.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the RiscBSD team.
+ * 4. The name "RiscBSD" nor the name of the author may be used to
+ * endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY RISCBSD ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL RISCBSD OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef _ARM_ARM_PARAM_H_
+#define _ARM_ARM_PARAM_H_
+
+
+/*
+ * Machine dependent constants for ARM6+ processors
+ */
+/* These are defined in the Port File before it includes
+ * this file. */
+
+#define PAGE_SHIFT 12 /* LOG2(NBPG) */
+#define PGSHIFT 12 /* LOG2(NBPG) */
+#define PAGE_SIZE (1 << PAGE_SHIFT) /* bytes/page */
+#define NBPG (1 << PAGE_SHIFT) /* bytes/page */
+#define PAGE_MASK (PAGE_SIZE - 1)
+#define PGOFSET (PAGE_SIZE - 1)
+#define NPTEPG (PAGE_SIZE/(sizeof (pt_entry_t)))
+
+#define SSIZE 1 /* initial stack size/NBPG */
+#define SINCR 1 /* increment of stack/NBPG */
+#define UPAGES 2 /* pages of u-area */
+#define USPACE (UPAGES * PAGE_SIZE) /* total size of u-area */
+
+#ifndef MSGBUFSIZE
+#define MSGBUFSIZE PAGE_SIZE /* default message buffer size */
+#endif
+
+#ifndef NMBCLUSTERS
+#ifdef GATEWAY
+#define NMBCLUSTERS 2048 /* map size, max cluster allocation */
+#else
+#define NMBCLUSTERS 1024 /* map size, max cluster allocation */
+#endif
+#endif
+
+/*
+ * Minimum and maximum sizes of the kernel malloc arena in PAGE_SIZE-sized
+ * logical pages.
+ */
+#define NKMEMPAGES_MIN_DEFAULT ((6 * 1024 * 1024) >> PAGE_SHIFT)
+#define NKMEMPAGES_MAX_DEFAULT ((7 * 1024 * 1024) >> PAGE_SHIFT)
+
+/* Constants used to divide the USPACE area */
+
+/*
+ * The USPACE area contains :
+ * 1. the user structure for the process
+ * 2. the fp context for FP emulation
+ * 3. the kernel (svc) stack
+ * 4. the undefined instruction stack
+ *
+ * The layout of the area looks like this
+ *
+ * | user area | FP context | undefined stack | kernel stack |
+ *
+ * The size of the user area is known.
+ * The size of the FP context is variable depending of the FP emulator
+ * in use and whether there is hardware FP support. However we can put
+ * an upper limit on it.
+ * The undefined stack needs to be at least 512 bytes. This is a requirement
+ * if the FP emulators
+ * The kernel stack should be at least 4K is size.
+ *
+ * The stack top addresses are used to set the stack pointers. The stack bottom
+ * addresses at the addresses monitored by the diagnostic code for stack overflows
+ *
+ */
+
+#define FPCONTEXTSIZE (0x100)
+#define USPACE_SVC_STACK_TOP (USPACE)
+#define USPACE_SVC_STACK_BOTTOM (USPACE_SVC_STACK_TOP - 0x1000)
+#define USPACE_UNDEF_STACK_TOP (USPACE_SVC_STACK_BOTTOM - 0x10)
+#define USPACE_UNDEF_STACK_BOTTOM (sizeof(struct user) + FPCONTEXTSIZE + 10)
+
+#define arm_btop(x) ((x) >> PAGE_SHIFT)
+#define arm_ptob(x) ((x) << PAGE_SHIFT)
+#define arm_trunc_page(x) ((unsigned)(x) & ~PAGE_MASK)
+
+#ifdef _KERNEL
+#ifndef _LOCORE
+void delay __P((unsigned));
+#define DELAY(x) delay(x)
+#endif
+#endif
+
+/*
+ * Machine dependent constants for all ARM processors
+ */
+
+/*
+ * For KERNEL code:
+ * MACHINE must be defined by the individual port. This is so that
+ * uname returns the correct thing, etc.
+ *
+ * MACHINE_ARCH may be defined by individual ports as a temporary
+ * measure while we're finishing the conversion to ELF.
+ *
+ * For non-KERNEL code:
+ * If ELF, MACHINE and MACHINE_ARCH are forced to "arm/armeb".
+ */
+
+#if defined(_KERNEL)
+#ifndef MACHINE_ARCH /* XXX For now */
+#ifndef __ARMEB__
+#define _MACHINE_ARCH arm
+#define MACHINE_ARCH "arm"
+#else
+#define _MACHINE_ARCH armeb
+#define MACHINE_ARCH "armeb"
+#endif /* __ARMEB__ */
+#endif /* MACHINE_ARCH */
+#elif defined(__ELF__)
+#undef _MACHINE
+#undef MACHINE
+#undef _MACHINE_ARCH
+#undef MACHINE_ARCH
+#define _MACHINE arm
+#define MACHINE "arm"
+#ifndef __ARMEB__
+#define _MACHINE_ARCH arm
+#define MACHINE_ARCH "arm"
+#else
+#define _MACHINE_ARCH armeb
+#define MACHINE_ARCH "armeb"
+#endif /* __ARMEB__ */
+#endif /* __ELF__ */
+
+#define MID_MACHINE MID_ARM6
+
+/*
+ * Round p (pointer or byte index) up to a correctly-aligned value
+ * for all data types (int, long, ...). The result is u_int and
+ * must be cast to any desired pointer type.
+ *
+ * ALIGNED_POINTER is a boolean macro that checks whether an address
+ * is valid to fetch data elements of type t from on this architecture.
+ * This does not reflect the optimal alignment, just the possibility
+ * (within reasonable limits).
+ *
+ */
+#define ALIGNBYTES (sizeof(int) - 1)
+#define ALIGN(p) (((u_int)(p) + ALIGNBYTES) &~ ALIGNBYTES)
+#define ALIGNED_POINTER(p,t) ((((u_long)(p)) & (sizeof(t)-1)) == 0)
+/* ARM-specific macro to align a stack pointer (downwards). */
+#define STACKALIGNBYTES (8 - 1)
+#define STACKALIGN(p) ((u_int)(p) &~ STACKALIGNBYTES)
+
+#define DEV_BSHIFT 9 /* log2(DEV_BSIZE) */
+#define DEV_BSIZE (1 << DEV_BSHIFT)
+#define BLKDEV_IOSIZE 2048
+
+#ifndef MAXPHYS
+#define MAXPHYS 65536 /* max I/O transfer size */
+#endif
+
+/* pages ("clicks") to disk blocks */
+#define ctod(x) ((x) << (PAGE_SHIFT - DEV_BSHIFT))
+#define dtoc(x) ((x) >> (PAGE_SHIFT - DEV_BSHIFT))
+/*#define dtob(x) ((x) << DEV_BSHIFT)*/
+
+#define ctob(x) ((x) << PAGE_SHIFT)
+
+/* bytes to pages */
+#define btoc(x) (((x) + PAGE_MASK) >> PAGE_SHIFT)
+
+#define btodb(bytes) /* calculates (bytes / DEV_BSIZE) */ \
+ ((bytes) >> DEV_BSHIFT)
+#define dbtob(db) /* calculates (db * DEV_BSIZE) */ \
+ ((db) << DEV_BSHIFT)
+
+/*
+ * Map a ``block device block'' to a file system block.
+ * This should be device dependent, and should use the bsize
+ * field from the disk label.
+ * For now though just use DEV_BSIZE.
+ */
+#define bdbtofsb(bn) ((bn) / (BLKDEV_IOSIZE / DEV_BSIZE))
+
+/*
+ * Constants related to network buffer management.
+ * MCLBYTES must be no larger than NBPG (the software page size), and,
+ * on machines that exchange pages of input or output buffers with mbuf
+ * clusters (MAPPED_MBUFS), MCLBYTES must also be an integral multiple
+ * of the hardware page size.
+ */
+#define MSIZE 256 /* size of an mbuf */
+
+#ifndef MCLSHIFT
+#define MCLSHIFT 11 /* convert bytes to m_buf clusters */
+ /* 2K cluster can hold Ether frame */
+#endif /* MCLSHIFT */
+
+#define MCLBYTES (1 << MCLSHIFT) /* size of a m_buf cluster */
+
+#define ovbcopy bcopy
+
+#ifdef _KERNEL
+#ifdef _LOCORE
+#include <machine/psl.h>
+#else
+#include <sys/param.h>
+#include <machine/cpu.h>
+#endif
+#endif
+
+#endif /* _ARM_ARM_PARAM_H_ */
diff --git a/sys/arch/arm/include/pcb.h b/sys/arch/arm/include/pcb.h
new file mode 100644
index 00000000000..d52c0893513
--- /dev/null
+++ b/sys/arch/arm/include/pcb.h
@@ -0,0 +1,115 @@
+/* $OpenBSD: pcb.h,v 1.1 2004/02/01 05:09:49 drahn Exp $ */
+/* $NetBSD: pcb.h,v 1.10 2003/10/13 21:46:39 scw Exp $ */
+
+/*
+ * Copyright (c) 2001 Matt Thomas <matt@3am-software.com>.
+ * Copyright (c) 1994 Mark Brinicombe.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the RiscBSD team.
+ * 4. The name "RiscBSD" nor the name of the author may be used to
+ * endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY RISCBSD ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL RISCBSD OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef _ARM_PCB_H_
+#define _ARM_PCB_H_
+
+#include <machine/frame.h>
+#include <machine/fp.h>
+
+#include <arm/pte.h>
+
+struct trapframe;
+
+struct pcb_arm32 {
+ paddr_t pcb32_pagedir; /* PT hooks */
+ pd_entry_t *pcb32_pl1vec; /* PTR to vector_base L1 entry*/
+ pd_entry_t pcb32_l1vec; /* Value to stuff on ctx sw */
+ u_int pcb32_dacr; /* Domain Access Control Reg */
+ void *pcb32_cstate; /* &pmap->pm_cstate */
+ /*
+ * WARNING!
+ * cpuswitch.S relies on pcb32_r8 being quad-aligned in struct pcb
+ * (due to the use of "strd" when compiled for XSCALE)
+ */
+ u_int pcb32_r8; /* used */
+ u_int pcb32_r9; /* used */
+ u_int pcb32_r10; /* used */
+ u_int pcb32_r11; /* used */
+ u_int pcb32_r12; /* used */
+ u_int pcb32_sp; /* used */
+ u_int pcb32_lr;
+ u_int pcb32_pc;
+ u_int pcb32_und_sp;
+};
+#define pcb_pagedir pcb_un.un_32.pcb32_pagedir
+#define pcb_pl1vec pcb_un.un_32.pcb32_pl1vec
+#define pcb_l1vec pcb_un.un_32.pcb32_l1vec
+#define pcb_dacr pcb_un.un_32.pcb32_dacr
+#define pcb_cstate pcb_un.un_32.pcb32_cstate
+
+struct pcb_arm26 {
+ struct switchframe *pcb26_sf;
+};
+#define pcb_sf pcb_un.un_26.pcb26_sf
+
+/*
+ * WARNING!
+ * See warning for struct pcb_arm32, above, before changing struct pcb!
+ */
+struct pcb {
+ u_int pcb_flags;
+#define PCB_OWNFPU 0x00000001
+#define PCB_NOALIGNFLT 0x00000002 /* For COMPAT_15/EXEC_AOUT */
+ struct trapframe *pcb_tf;
+ caddr_t pcb_onfault; /* On fault handler */
+ union {
+ struct pcb_arm32 un_32;
+ struct pcb_arm26 un_26;
+ } pcb_un;
+ struct fpe_sp_state pcb_fpstate; /* Floating Point state */
+};
+#define pcb_ff pcb_fpstate /* for arm26 */
+
+/*
+ * No additional data for core dumps.
+ */
+struct md_coredump {
+ int md_empty;
+};
+
+#ifdef _KERNEL
+#ifdef _KERNEL_OPT
+#include "opt_multiprocessor.h"
+#endif
+#ifdef MULTIPROCESSOR
+#define curpcb (curcpu()->ci_curpcb)
+#else
+extern struct pcb *curpcb;
+#endif
+#endif /* _KERNEL */
+
+#endif /* _ARM_PCB_H_ */
diff --git a/sys/arch/arm/include/pci_machdep.h b/sys/arch/arm/include/pci_machdep.h
new file mode 100644
index 00000000000..2d67a3d9120
--- /dev/null
+++ b/sys/arch/arm/include/pci_machdep.h
@@ -0,0 +1,103 @@
+/* $OpenBSD: pci_machdep.h,v 1.1 2004/02/01 05:09:49 drahn Exp $ */
+/* $NetBSD: pci_machdep.h,v 1.2 2002/05/15 19:23:52 thorpej Exp $ */
+
+/*
+ * Modified for arm32 by Mark Brinicombe
+ *
+ * from: sys/arch/alpha/pci/pci_machdep.h
+ *
+ * Copyright (c) 1996 Carnegie-Mellon University.
+ * All rights reserved.
+ *
+ * Author: Chris G. Demetriou
+ *
+ * Permission to use, copy, modify and distribute this software and
+ * its documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
+ * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie the
+ * rights to redistribute these changes.
+ */
+
+/*
+ * Machine-specific definitions for PCI autoconfiguration.
+ */
+
+/*
+ * Types provided to machine-independent PCI code
+ */
+typedef struct arm32_pci_chipset *pci_chipset_tag_t;
+typedef u_long pcitag_t;
+typedef u_long pci_intr_handle_t;
+
+/*
+ * Forward declarations.
+ */
+struct pci_attach_args;
+
+/*
+ * arm32-specific PCI structure and type definitions.
+ * NOT TO BE USED DIRECTLY BY MACHINE INDEPENDENT CODE.
+ */
+struct arm32_pci_chipset {
+ void *pc_conf_v;
+ void (*pc_attach_hook)(struct device *,
+ struct device *, struct pcibus_attach_args *);
+ int (*pc_bus_maxdevs)(void *, int);
+ pcitag_t (*pc_make_tag)(void *, int, int, int);
+ void (*pc_decompose_tag)(void *, pcitag_t, int *,
+ int *, int *);
+ pcireg_t (*pc_conf_read)(void *, pcitag_t, int);
+ void (*pc_conf_write)(void *, pcitag_t, int, pcireg_t);
+
+ void *pc_intr_v;
+ int (*pc_intr_map)(struct pci_attach_args *,
+ pci_intr_handle_t *);
+ const char *(*pc_intr_string)(void *, pci_intr_handle_t);
+ const struct evcnt *(*pc_intr_evcnt)(void *, pci_intr_handle_t);
+ void *(*pc_intr_establish)(void *, pci_intr_handle_t,
+ int, int (*)(void *), void *, char *);
+ void (*pc_intr_disestablish)(void *, void *);
+};
+
+/*
+ * Functions provided to machine-independent PCI code.
+ */
+#define pci_attach_hook(p, s, pba) \
+ (*(pba)->pba_pc->pc_attach_hook)((p), (s), (pba))
+#define pci_bus_maxdevs(c, b) \
+ (*(c)->pc_bus_maxdevs)((c)->pc_conf_v, (b))
+#define pci_make_tag(c, b, d, f) \
+ (*(c)->pc_make_tag)((c)->pc_conf_v, (b), (d), (f))
+#define pci_decompose_tag(c, t, bp, dp, fp) \
+ (*(c)->pc_decompose_tag)((c)->pc_conf_v, (t), (bp), (dp), (fp))
+#define pci_conf_read(c, t, r) \
+ (*(c)->pc_conf_read)((c)->pc_conf_v, (t), (r))
+#define pci_conf_write(c, t, r, v) \
+ (*(c)->pc_conf_write)((c)->pc_conf_v, (t), (r), (v))
+#define pci_intr_map(pa, ihp) \
+ (*(pa)->pa_pc->pc_intr_map)((pa), (ihp))
+#define pci_intr_string(c, ih) \
+ (*(c)->pc_intr_string)((c)->pc_intr_v, (ih))
+#define pci_intr_evcnt(c, ih) \
+ (*(c)->pc_intr_evcnt)((c)->pc_intr_v, (ih))
+#define pci_intr_establish(c, ih, l, h, a, n) \
+ (*(c)->pc_intr_establish)((c)->pc_intr_v, (ih), (l), (h), (a), (n))
+#define pci_intr_disestablish(c, iv) \
+ (*(c)->pc_intr_disestablish)((c)->pc_intr_v, (iv))
+
+#define pci_enumerate_bus(sc, m, p) \
+ pci_enumerate_bus_generic((sc), (m), (p))
diff --git a/sys/arch/arm/include/pio.h b/sys/arch/arm/include/pio.h
new file mode 100644
index 00000000000..280c3c2d4c5
--- /dev/null
+++ b/sys/arch/arm/include/pio.h
@@ -0,0 +1,47 @@
+/* $OpenBSD: pio.h,v 1.1 2004/02/01 05:09:49 drahn Exp $ */
+/* $NetBSD: pio.h,v 1.1 2001/02/23 21:23:48 reinoud Exp $ */
+
+/*
+ * Copyright 1997
+ * Digital Equipment Corporation. All rights reserved.
+ *
+ * This software is furnished under license and may be used and
+ * copied only in accordance with the following terms and conditions.
+ * Subject to these conditions, you may download, copy, install,
+ * use, modify and distribute this software in source and/or binary
+ * form. No title or ownership is transferred hereby.
+ *
+ * 1) Any source code used, modified or distributed must reproduce
+ * and retain this copyright notice and list of conditions as
+ * they appear in the source file.
+ *
+ * 2) No right is granted to use any trade name, trademark, or logo of
+ * Digital Equipment Corporation. Neither the "Digital Equipment
+ * Corporation" name nor any trademark or logo of Digital Equipment
+ * Corporation may be used to endorse or promote products derived
+ * from this software without the prior written permission of
+ * Digital Equipment Corporation.
+ *
+ * 3) This software is provided "AS-IS" and any express or implied
+ * warranties, including but not limited to, any implied warranties
+ * of merchantability, fitness for a particular purpose, or
+ * non-infringement are disclaimed. In no event shall DIGITAL be
+ * liable for any damages whatsoever, and in particular, DIGITAL
+ * shall not be liable for special, indirect, consequential, or
+ * incidental damages or damages for lost profits, loss of
+ * revenue or loss of use, whether such damages arise in contract,
+ * negligence, tort, under statute, in equity, at law or otherwise,
+ * even if advised of the possibility of such damage.
+ */
+
+#ifndef _ARM32_PIO_H_
+#define _ARM32_PIO_H_
+
+#include <machine/bus.h>
+
+extern struct bus_space isa_io_bs_tag;
+
+#define inb(port) bus_space_read_1( &isa_io_bs_tag, (bus_space_handle_t)isa_io_bs_tag.bs_cookie, (port))
+#define outb(port, byte) bus_space_write_1(&isa_io_bs_tag, (bus_space_handle_t)isa_io_bs_tag.bs_cookie, (port), (byte))
+
+#endif /* _ARM32_PIO_H_ */
diff --git a/sys/arch/arm/include/pmap.h b/sys/arch/arm/include/pmap.h
new file mode 100644
index 00000000000..0261d72c332
--- /dev/null
+++ b/sys/arch/arm/include/pmap.h
@@ -0,0 +1,595 @@
+/* $OpenBSD: pmap.h,v 1.1 2004/02/01 05:09:49 drahn Exp $ */
+/* $NetBSD: pmap.h,v 1.76 2003/09/06 09:10:46 rearnsha Exp $ */
+
+/*
+ * Copyright (c) 2002, 2003 Wasabi Systems, Inc.
+ * All rights reserved.
+ *
+ * Written by Jason R. Thorpe & Steve C. Woodford for Wasabi Systems, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed for the NetBSD Project by
+ * Wasabi Systems, Inc.
+ * 4. The name of Wasabi Systems, Inc. may not be used to endorse
+ * or promote products derived from this software without specific prior
+ * written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * Copyright (c) 1994,1995 Mark Brinicombe.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by Mark Brinicombe
+ * 4. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _ARM32_PMAP_H_
+#define _ARM32_PMAP_H_
+
+#ifdef _KERNEL
+
+#include <arm/cpuconf.h>
+#include <arm/pte.h>
+#ifndef _LOCORE
+#include <arm/cpufunc.h>
+#include <uvm/uvm_object.h>
+#endif
+
+/*
+ * a pmap describes a processes' 4GB virtual address space. this
+ * virtual address space can be broken up into 4096 1MB regions which
+ * are described by L1 PTEs in the L1 table.
+ *
+ * There is a line drawn at KERNEL_BASE. Everything below that line
+ * changes when the VM context is switched. Everything above that line
+ * is the same no matter which VM context is running. This is achieved
+ * by making the L1 PTEs for those slots above KERNEL_BASE reference
+ * kernel L2 tables.
+ *
+ * The basic layout of the virtual address space thus looks like this:
+ *
+ * 0xffffffff
+ * .
+ * .
+ * .
+ * KERNEL_BASE
+ * --------------------
+ * .
+ * .
+ * .
+ * 0x00000000
+ */
+
+/*
+ * The number of L2 descriptor tables which can be tracked by an l2_dtable.
+ * A bucket size of 16 provides for 16MB of contiguous virtual address
+ * space per l2_dtable. Most processes will, therefore, require only two or
+ * three of these to map their whole working set.
+ */
+#define L2_BUCKET_LOG2 4
+#define L2_BUCKET_SIZE (1 << L2_BUCKET_LOG2)
+
+/*
+ * Given the above "L2-descriptors-per-l2_dtable" constant, the number
+ * of l2_dtable structures required to track all possible page descriptors
+ * mappable by an L1 translation table is given by the following constants:
+ */
+#define L2_LOG2 ((32 - L1_S_SHIFT) - L2_BUCKET_LOG2)
+#define L2_SIZE (1 << L2_LOG2)
+
+#ifndef _LOCORE
+
+struct l1_ttable;
+struct l2_dtable;
+
+/*
+ * Track cache/tlb occupancy using the following structure
+ */
+union pmap_cache_state {
+ struct {
+ union {
+ u_int8_t csu_cache_b[2];
+ u_int16_t csu_cache;
+ } cs_cache_u;
+
+ union {
+ u_int8_t csu_tlb_b[2];
+ u_int16_t csu_tlb;
+ } cs_tlb_u;
+ } cs_s;
+ u_int32_t cs_all;
+};
+#define cs_cache_id cs_s.cs_cache_u.csu_cache_b[0]
+#define cs_cache_d cs_s.cs_cache_u.csu_cache_b[1]
+#define cs_cache cs_s.cs_cache_u.csu_cache
+#define cs_tlb_id cs_s.cs_tlb_u.csu_tlb_b[0]
+#define cs_tlb_d cs_s.cs_tlb_u.csu_tlb_b[1]
+#define cs_tlb cs_s.cs_tlb_u.csu_tlb
+
+/*
+ * Assigned to cs_all to force cacheops to work for a particular pmap
+ */
+#define PMAP_CACHE_STATE_ALL 0xffffffffu
+
+/*
+ * This structure is used by machine-dependent code to describe
+ * static mappings of devices, created at bootstrap time.
+ */
+struct pmap_devmap {
+ vaddr_t pd_va; /* virtual address */
+ paddr_t pd_pa; /* physical address */
+ psize_t pd_size; /* size of region */
+ vm_prot_t pd_prot; /* protection code */
+ int pd_cache; /* cache attributes */
+};
+
+/*
+ * The pmap structure itself
+ */
+struct pmap {
+ u_int8_t pm_domain;
+ boolean_t pm_remove_all;
+ struct l1_ttable *pm_l1;
+ union pmap_cache_state pm_cstate;
+ struct uvm_object pm_obj;
+#define pm_lock pm_obj.vmobjlock
+ struct l2_dtable *pm_l2[L2_SIZE];
+ struct pmap_statistics pm_stats;
+ LIST_ENTRY(pmap) pm_list;
+};
+
+typedef struct pmap *pmap_t;
+
+/*
+ * Physical / virtual address structure. In a number of places (particularly
+ * during bootstrapping) we need to keep track of the physical and virtual
+ * addresses of various pages
+ */
+typedef struct pv_addr {
+ SLIST_ENTRY(pv_addr) pv_list;
+ paddr_t pv_pa;
+ vaddr_t pv_va;
+} pv_addr_t;
+
+/*
+ * Determine various modes for PTEs (user vs. kernel, cacheable
+ * vs. non-cacheable).
+ */
+#define PTE_KERNEL 0
+#define PTE_USER 1
+#define PTE_NOCACHE 0
+#define PTE_CACHE 1
+#define PTE_PAGETABLE 2
+
+/*
+ * Flags that indicate attributes of pages or mappings of pages.
+ *
+ * The PVF_MOD and PVF_REF flags are stored in the mdpage for each
+ * page. PVF_WIRED, PVF_WRITE, and PVF_NC are kept in individual
+ * pv_entry's for each page. They live in the same "namespace" so
+ * that we can clear multiple attributes at a time.
+ *
+ * Note the "non-cacheable" flag generally means the page has
+ * multiple mappings in a given address space.
+ */
+#define PVF_MOD 0x01 /* page is modified */
+#define PVF_REF 0x02 /* page is referenced */
+#define PVF_WIRED 0x04 /* mapping is wired */
+#define PVF_WRITE 0x08 /* mapping is writable */
+#define PVF_EXEC 0x10 /* mapping is executable */
+#define PVF_UNC 0x20 /* mapping is 'user' non-cacheable */
+#define PVF_KNC 0x40 /* mapping is 'kernel' non-cacheable */
+#define PVF_NC (PVF_UNC|PVF_KNC)
+
+/*
+ * Commonly referenced structures
+ */
+extern struct pmap kernel_pmap_store;
+extern int pmap_debug_level; /* Only exists if PMAP_DEBUG */
+
+/*
+ * Macros that we need to export
+ */
+#define pmap_kernel() (&kernel_pmap_store)
+#define pmap_resident_count(pmap) ((pmap)->pm_stats.resident_count)
+#define pmap_wired_count(pmap) ((pmap)->pm_stats.wired_count)
+
+#define pmap_is_modified(pg) \
+ (((pg)->mdpage.pvh_attrs & PVF_MOD) != 0)
+#define pmap_is_referenced(pg) \
+ (((pg)->mdpage.pvh_attrs & PVF_REF) != 0)
+
+#define pmap_copy(dp, sp, da, l, sa) /* nothing */
+
+#define pmap_phys_address(ppn) (arm_ptob((ppn)))
+
+/*
+ * Functions that we need to export
+ */
+void pmap_procwr(struct proc *, vaddr_t, int);
+void pmap_remove_all(pmap_t);
+boolean_t pmap_extract(pmap_t, vaddr_t, paddr_t *);
+
+#define PMAP_NEED_PROCWR
+#define PMAP_GROWKERNEL /* turn on pmap_growkernel interface */
+
+/* Functions we use internally. */
+void pmap_bootstrap(pd_entry_t *, vaddr_t, vaddr_t);
+
+int pmap_fault_fixup(pmap_t, vaddr_t, vm_prot_t, int);
+boolean_t pmap_get_pde_pte(pmap_t, vaddr_t, pd_entry_t **, pt_entry_t **);
+boolean_t pmap_get_pde(pmap_t, vaddr_t, pd_entry_t **);
+void pmap_set_pcb_pagedir(pmap_t, struct pcb *);
+
+void pmap_debug(int);
+void pmap_postinit(void);
+
+void vector_page_setprot(int);
+
+const struct pmap_devmap *pmap_devmap_find_pa(paddr_t, psize_t);
+const struct pmap_devmap *pmap_devmap_find_va(vaddr_t, vsize_t);
+
+/* Bootstrapping routines. */
+void pmap_map_section(vaddr_t, vaddr_t, paddr_t, int, int);
+void pmap_map_entry(vaddr_t, vaddr_t, paddr_t, int, int);
+vsize_t pmap_map_chunk(vaddr_t, vaddr_t, paddr_t, vsize_t, int, int);
+void pmap_link_l2pt(vaddr_t, vaddr_t, pv_addr_t *);
+void pmap_devmap_bootstrap(vaddr_t, const struct pmap_devmap *);
+void pmap_devmap_register(const struct pmap_devmap *);
+
+/*
+ * Special page zero routine for use by the idle loop (no cache cleans).
+ */
+boolean_t pmap_pageidlezero(struct vm_page *);
+#define PMAP_PAGEIDLEZERO(pg) pmap_pageidlezero((pg))
+
+/*
+ * The current top of kernel VM
+ */
+extern vaddr_t pmap_curmaxkvaddr;
+
+/*
+ * Useful macros and constants
+ */
+
+/* Virtual address to page table entry */
+static __inline pt_entry_t *
+vtopte(vaddr_t va)
+{
+ pd_entry_t *pdep;
+ pt_entry_t *ptep;
+
+ if (pmap_get_pde_pte(pmap_kernel(), va, &pdep, &ptep) == FALSE)
+ return (NULL);
+ return (ptep);
+}
+
+/*
+ * Virtual address to physical address
+ */
+static __inline paddr_t
+vtophys(vaddr_t va)
+{
+ paddr_t pa;
+
+ if (pmap_extract(pmap_kernel(), va, &pa) == FALSE)
+ return (0); /* XXXSCW: Panic? */
+
+ return (pa);
+}
+
+/*
+ * The new pmap ensures that page-tables are always mapping Write-Thru.
+ * Thus, on some platforms we can run fast and loose and avoid syncing PTEs
+ * on every change.
+ *
+ * Unfortunately, not all CPUs have a write-through cache mode. So we
+ * define PMAP_NEEDS_PTE_SYNC for C code to conditionally do PTE syncs,
+ * and if there is the chance for PTE syncs to be needed, we define
+ * PMAP_INCLUDE_PTE_SYNC so e.g. assembly code can include (and run)
+ * the code.
+ */
+extern int pmap_needs_pte_sync;
+#if defined(_KERNEL_OPT)
+/*
+ * StrongARM SA-1 caches do not have a write-through mode. So, on these,
+ * we need to do PTE syncs. If only SA-1 is configured, then evaluate
+ * this at compile time.
+ */
+#if (ARM_MMU_SA1 == 1) && (ARM_NMMUS == 1)
+#define PMAP_NEEDS_PTE_SYNC 1
+#define PMAP_INCLUDE_PTE_SYNC
+#elif (ARM_MMU_SA1 == 0)
+#define PMAP_NEEDS_PTE_SYNC 0
+#endif
+#endif /* _KERNEL_OPT */
+
+/*
+ * Provide a fallback in case we were not able to determine it at
+ * compile-time.
+ */
+#ifndef PMAP_NEEDS_PTE_SYNC
+#define PMAP_NEEDS_PTE_SYNC pmap_needs_pte_sync
+#define PMAP_INCLUDE_PTE_SYNC
+#endif
+
+#define PTE_SYNC(pte) \
+do { \
+ if (PMAP_NEEDS_PTE_SYNC) \
+ cpu_dcache_wb_range((vaddr_t)(pte), sizeof(pt_entry_t));\
+} while (/*CONSTCOND*/0)
+
+#define PTE_SYNC_RANGE(pte, cnt) \
+do { \
+ if (PMAP_NEEDS_PTE_SYNC) { \
+ cpu_dcache_wb_range((vaddr_t)(pte), \
+ (cnt) << 2); /* * sizeof(pt_entry_t) */ \
+ } \
+} while (/*CONSTCOND*/0)
+
+#define l1pte_valid(pde) ((pde) != 0)
+#define l1pte_section_p(pde) (((pde) & L1_TYPE_MASK) == L1_TYPE_S)
+#define l1pte_page_p(pde) (((pde) & L1_TYPE_MASK) == L1_TYPE_C)
+#define l1pte_fpage_p(pde) (((pde) & L1_TYPE_MASK) == L1_TYPE_F)
+
+#define l2pte_index(v) (((v) & L2_ADDR_BITS) >> L2_S_SHIFT)
+#define l2pte_valid(pte) ((pte) != 0)
+#define l2pte_pa(pte) ((pte) & L2_S_FRAME)
+#define l2pte_minidata(pte) (((pte) & \
+ (L2_B | L2_C | L2_XSCALE_T_TEX(TEX_XSCALE_X)))\
+ == (L2_C | L2_XSCALE_T_TEX(TEX_XSCALE_X)))
+
+/* L1 and L2 page table macros */
+#define pmap_pde_v(pde) l1pte_valid(*(pde))
+#define pmap_pde_section(pde) l1pte_section_p(*(pde))
+#define pmap_pde_page(pde) l1pte_page_p(*(pde))
+#define pmap_pde_fpage(pde) l1pte_fpage_p(*(pde))
+
+#define pmap_pte_v(pte) l2pte_valid(*(pte))
+#define pmap_pte_pa(pte) l2pte_pa(*(pte))
+
+/* Size of the kernel part of the L1 page table */
+#define KERNEL_PD_SIZE \
+ (L1_TABLE_SIZE - (KERNEL_BASE >> L1_S_SHIFT) * sizeof(pd_entry_t))
+
+/************************* ARM MMU configuration *****************************/
+
+#if (ARM_MMU_GENERIC + ARM_MMU_SA1) != 0
+void pmap_copy_page_generic(struct vm_page *, struct vm_page *);
+void pmap_zero_page_generic(struct vm_page *);
+
+void pmap_pte_init_generic(void);
+#if defined(CPU_ARM8)
+void pmap_pte_init_arm8(void);
+#endif
+#if defined(CPU_ARM9)
+void pmap_pte_init_arm9(void);
+#endif /* CPU_ARM9 */
+#if defined(CPU_ARM10)
+void pmap_pte_init_arm10(void);
+#endif /* CPU_ARM10 */
+#endif /* (ARM_MMU_GENERIC + ARM_MMU_SA1) != 0 */
+
+#if ARM_MMU_SA1 == 1
+void pmap_pte_init_sa1(void);
+#endif /* ARM_MMU_SA1 == 1 */
+
+#if ARM_MMU_XSCALE == 1
+void pmap_copy_page_xscale(struct vm_page *, struct vm_page *);
+void pmap_zero_page_xscale(struct vm_page *);
+
+void pmap_pte_init_xscale(void);
+
+void xscale_setup_minidata(vaddr_t, vaddr_t, paddr_t);
+
+#define PMAP_UAREA(va) pmap_uarea(va)
+void pmap_uarea(vaddr_t);
+#endif /* ARM_MMU_XSCALE == 1 */
+
+extern pt_entry_t pte_l1_s_cache_mode;
+extern pt_entry_t pte_l1_s_cache_mask;
+
+extern pt_entry_t pte_l2_l_cache_mode;
+extern pt_entry_t pte_l2_l_cache_mask;
+
+extern pt_entry_t pte_l2_s_cache_mode;
+extern pt_entry_t pte_l2_s_cache_mask;
+
+extern pt_entry_t pte_l1_s_cache_mode_pt;
+extern pt_entry_t pte_l2_l_cache_mode_pt;
+extern pt_entry_t pte_l2_s_cache_mode_pt;
+
+extern pt_entry_t pte_l2_s_prot_u;
+extern pt_entry_t pte_l2_s_prot_w;
+extern pt_entry_t pte_l2_s_prot_mask;
+
+extern pt_entry_t pte_l1_s_proto;
+extern pt_entry_t pte_l1_c_proto;
+extern pt_entry_t pte_l2_s_proto;
+
+extern void (*pmap_copy_page_func)(struct vm_page *, struct vm_page *);
+extern void (*pmap_zero_page_func)(struct vm_page *);
+
+#endif /* !_LOCORE */
+
+/*****************************************************************************/
+
+/*
+ * tell MI code that the cache is virtually-indexed *and* virtually-tagged.
+ */
+#define PMAP_CACHE_VIVT
+
+/*
+ * Definitions for MMU domains
+ */
+#define PMAP_DOMAINS 15 /* 15 'user' domains (0-14) */
+#define PMAP_DOMAIN_KERNEL 15 /* The kernel uses domain #15 */
+
+/*
+ * These macros define the various bit masks in the PTE.
+ *
+ * We use these macros since we use different bits on different processor
+ * models.
+ */
+#define L1_S_PROT_U (L1_S_AP(AP_U))
+#define L1_S_PROT_W (L1_S_AP(AP_W))
+#define L1_S_PROT_MASK (L1_S_PROT_U|L1_S_PROT_W)
+
+#define L1_S_CACHE_MASK_generic (L1_S_B|L1_S_C)
+#define L1_S_CACHE_MASK_xscale (L1_S_B|L1_S_C|L1_S_XSCALE_TEX(TEX_XSCALE_X))
+
+#define L2_L_PROT_U (L2_AP(AP_U))
+#define L2_L_PROT_W (L2_AP(AP_W))
+#define L2_L_PROT_MASK (L2_L_PROT_U|L2_L_PROT_W)
+
+#define L2_L_CACHE_MASK_generic (L2_B|L2_C)
+#define L2_L_CACHE_MASK_xscale (L2_B|L2_C|L2_XSCALE_L_TEX(TEX_XSCALE_X))
+
+#define L2_S_PROT_U_generic (L2_AP(AP_U))
+#define L2_S_PROT_W_generic (L2_AP(AP_W))
+#define L2_S_PROT_MASK_generic (L2_S_PROT_U|L2_S_PROT_W)
+
+#define L2_S_PROT_U_xscale (L2_AP0(AP_U))
+#define L2_S_PROT_W_xscale (L2_AP0(AP_W))
+#define L2_S_PROT_MASK_xscale (L2_S_PROT_U|L2_S_PROT_W)
+
+#define L2_S_CACHE_MASK_generic (L2_B|L2_C)
+#define L2_S_CACHE_MASK_xscale (L2_B|L2_C|L2_XSCALE_T_TEX(TEX_XSCALE_X))
+
+#define L1_S_PROTO_generic (L1_TYPE_S | L1_S_IMP)
+#define L1_S_PROTO_xscale (L1_TYPE_S)
+
+#define L1_C_PROTO_generic (L1_TYPE_C | L1_C_IMP2)
+#define L1_C_PROTO_xscale (L1_TYPE_C)
+
+#define L2_L_PROTO (L2_TYPE_L)
+
+#define L2_S_PROTO_generic (L2_TYPE_S)
+#define L2_S_PROTO_xscale (L2_TYPE_XSCALE_XS)
+
+/*
+ * User-visible names for the ones that vary with MMU class.
+ */
+
+#if ARM_NMMUS > 1
+/* More than one MMU class configured; use variables. */
+#define L2_S_PROT_U pte_l2_s_prot_u
+#define L2_S_PROT_W pte_l2_s_prot_w
+#define L2_S_PROT_MASK pte_l2_s_prot_mask
+
+#define L1_S_CACHE_MASK pte_l1_s_cache_mask
+#define L2_L_CACHE_MASK pte_l2_l_cache_mask
+#define L2_S_CACHE_MASK pte_l2_s_cache_mask
+
+#define L1_S_PROTO pte_l1_s_proto
+#define L1_C_PROTO pte_l1_c_proto
+#define L2_S_PROTO pte_l2_s_proto
+
+#define pmap_copy_page(s, d) (*pmap_copy_page_func)((s), (d))
+#define pmap_zero_page(d) (*pmap_zero_page_func)((d))
+#elif (ARM_MMU_GENERIC + ARM_MMU_SA1) != 0
+#define L2_S_PROT_U L2_S_PROT_U_generic
+#define L2_S_PROT_W L2_S_PROT_W_generic
+#define L2_S_PROT_MASK L2_S_PROT_MASK_generic
+
+#define L1_S_CACHE_MASK L1_S_CACHE_MASK_generic
+#define L2_L_CACHE_MASK L2_L_CACHE_MASK_generic
+#define L2_S_CACHE_MASK L2_S_CACHE_MASK_generic
+
+#define L1_S_PROTO L1_S_PROTO_generic
+#define L1_C_PROTO L1_C_PROTO_generic
+#define L2_S_PROTO L2_S_PROTO_generic
+
+#define pmap_copy_page(s, d) pmap_copy_page_generic((s), (d))
+#define pmap_zero_page(d) pmap_zero_page_generic((d))
+#elif ARM_MMU_XSCALE == 1
+#define L2_S_PROT_U L2_S_PROT_U_xscale
+#define L2_S_PROT_W L2_S_PROT_W_xscale
+#define L2_S_PROT_MASK L2_S_PROT_MASK_xscale
+
+#define L1_S_CACHE_MASK L1_S_CACHE_MASK_xscale
+#define L2_L_CACHE_MASK L2_L_CACHE_MASK_xscale
+#define L2_S_CACHE_MASK L2_S_CACHE_MASK_xscale
+
+#define L1_S_PROTO L1_S_PROTO_xscale
+#define L1_C_PROTO L1_C_PROTO_xscale
+#define L2_S_PROTO L2_S_PROTO_xscale
+
+#define pmap_copy_page(s, d) pmap_copy_page_xscale((s), (d))
+#define pmap_zero_page(d) pmap_zero_page_xscale((d))
+#endif /* ARM_NMMUS > 1 */
+
+/*
+ * These macros return various bits based on kernel/user and protection.
+ * Note that the compiler will usually fold these at compile time.
+ */
+#define L1_S_PROT(ku, pr) ((((ku) == PTE_USER) ? L1_S_PROT_U : 0) | \
+ (((pr) & VM_PROT_WRITE) ? L1_S_PROT_W : 0))
+
+#define L2_L_PROT(ku, pr) ((((ku) == PTE_USER) ? L2_L_PROT_U : 0) | \
+ (((pr) & VM_PROT_WRITE) ? L2_L_PROT_W : 0))
+
+#define L2_S_PROT(ku, pr) ((((ku) == PTE_USER) ? L2_S_PROT_U : 0) | \
+ (((pr) & VM_PROT_WRITE) ? L2_S_PROT_W : 0))
+
+/*
+ * Macros to test if a mapping is mappable with an L1 Section mapping
+ * or an L2 Large Page mapping.
+ */
+#define L1_S_MAPPABLE_P(va, pa, size) \
+ ((((va) | (pa)) & L1_S_OFFSET) == 0 && (size) >= L1_S_SIZE)
+
+#define L2_L_MAPPABLE_P(va, pa, size) \
+ ((((va) | (pa)) & L2_L_OFFSET) == 0 && (size) >= L2_L_SIZE)
+
+/*
+ * Hooks for the pool allocator.
+ */
+#define POOL_VTOPHYS(va) vtophys((vaddr_t) (va))
+
+#endif /* _KERNEL */
+
+#endif /* _ARM32_PMAP_H_ */
diff --git a/sys/arch/arm/include/proc.h b/sys/arch/arm/include/proc.h
new file mode 100644
index 00000000000..077559e4d8d
--- /dev/null
+++ b/sys/arch/arm/include/proc.h
@@ -0,0 +1,51 @@
+/* $OpenBSD: proc.h,v 1.1 2004/02/01 05:09:49 drahn Exp $ */
+/* $NetBSD: proc.h,v 1.5 2003/03/01 04:36:39 thorpej Exp $ */
+
+/*
+ * Copyright (c) 1994 Mark Brinicombe.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the RiscBSD team.
+ * 4. The name "RiscBSD" nor the name of the author may be used to
+ * endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY RISCBSD ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL RISCBSD OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef _ARM32_PROC_H_
+#define _ARM32_PROC_H_
+
+/*
+ * Machine-dependent part of the proc structure for arm.
+ */
+
+struct trapframe;
+
+struct mdproc {
+ void (*md_syscall)(struct trapframe *, struct proc *, u_int32_t);
+ int pmc_enabled; /* bitfield of enabled counters */
+ void *pmc_state; /* port-specific pmc state */
+};
+
+#endif /* _ARM32_PROC_H_ */
diff --git a/sys/arch/arm/include/profile.h b/sys/arch/arm/include/profile.h
new file mode 100644
index 00000000000..a15f022bff5
--- /dev/null
+++ b/sys/arch/arm/include/profile.h
@@ -0,0 +1,107 @@
+/* $OpenBSD: profile.h,v 1.1 2004/02/01 05:09:49 drahn Exp $ */
+/* $NetBSD: profile.h,v 1.5 2002/03/24 15:49:40 bjh21 Exp $ */
+
+/*
+ * Copyright (c) 2001 Ben Harris
+ * Copyright (c) 1995-1996 Mark Brinicombe
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by Mark Brinicombe.
+ * 4. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#define _MCOUNT_DECL void _mcount
+
+/*
+ * Cannot implement mcount in C as GCC will trash the ip register when it
+ * pushes a trapframe. Pity we cannot insert assembly before the function
+ * prologue.
+ */
+
+#ifdef __ELF__
+#define MCOUNT_ASM_NAME "__mcount"
+#ifdef PIC
+#define PLTSYM "(PLT)"
+#endif
+#else
+#define MCOUNT_ASM_NAME "mcount"
+#endif
+
+#ifndef PLTSYM
+#define PLTSYM
+#endif
+
+#define MCOUNT \
+ __asm__(".text"); \
+ __asm__(".align 0"); \
+ __asm__(".type " MCOUNT_ASM_NAME ",%function"); \
+ __asm__(".global " MCOUNT_ASM_NAME); \
+ __asm__(MCOUNT_ASM_NAME ":"); \
+ /* \
+ * Preserve registers that are trashed during mcount \
+ */ \
+ __asm__("stmfd sp!, {r0-r3, ip, lr}"); \
+ /* Check what mode we're in. EQ => 32, NE => 26 */ \
+ __asm__("teq r0, r0"); \
+ __asm__("teq pc, r15"); \
+ /* \
+ * find the return address for mcount, \
+ * and the return address for mcount's caller. \
+ * \
+ * frompcindex = pc pushed by call into self. \
+ */ \
+ __asm__("moveq r0, ip"); \
+ __asm__("bicne r0, ip, #0xfc000003"); \
+ /* \
+ * selfpc = pc pushed by mcount call \
+ */ \
+ __asm__("moveq r1, lr"); \
+ __asm__("bicne r1, lr, #0xfc000003"); \
+ /* \
+ * Call the real mcount code \
+ */ \
+ __asm__("bl " __STRING(_mcount) PLTSYM); \
+ /* \
+ * Restore registers that were trashed during mcount \
+ */ \
+ __asm__("ldmfd sp!, {r0-r3, lr, pc}");
+
+#ifdef _KERNEL
+#ifdef __PROG26
+extern int int_off_save(void);
+extern void int_restore(int);
+#define MCOUNT_ENTER (s = int_off_save())
+#define MCOUNT_EXIT int_restore(s)
+#else
+#include <arm/cpufunc.h>
+/*
+ * splhigh() and splx() are heavyweight, and call mcount(). Therefore
+ * we disabled interrupts (IRQ, but not FIQ) directly on the CPU.
+ *
+ * We're lucky that the CPSR and 's' both happen to be 'int's.
+ */
+#define MCOUNT_ENTER s = __set_cpsr_c(0x0080, 0x0080); /* kill IRQ */
+#define MCOUNT_EXIT __set_cpsr_c(0xffffffff, s); /* restore old value */
+#endif /* !acorn26 */
+#endif /* _KERNEL */
diff --git a/sys/arch/arm/include/pte.h b/sys/arch/arm/include/pte.h
new file mode 100644
index 00000000000..f263fffc6cd
--- /dev/null
+++ b/sys/arch/arm/include/pte.h
@@ -0,0 +1,246 @@
+/* $OpenBSD: pte.h,v 1.1 2004/02/01 05:09:49 drahn Exp $ */
+/* $NetBSD: pte.h,v 1.6 2003/04/18 11:08:28 scw Exp $ */
+
+/*
+ * Copyright (c) 2001, 2002 Wasabi Systems, Inc.
+ * All rights reserved.
+ *
+ * Written by Jason R. Thorpe for Wasabi Systems, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed for the NetBSD Project by
+ * Wasabi Systems, Inc.
+ * 4. The name of Wasabi Systems, Inc. may not be used to endorse
+ * or promote products derived from this software without specific prior
+ * written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _ARM_PTE_H_
+#define _ARM_PTE_H_
+
+/*
+ * The ARM MMU architecture was introduced with ARM v3 (previous ARM
+ * architecture versions used an optional off-CPU memory controller
+ * to perform address translation).
+ *
+ * The ARM MMU consists of a TLB and translation table walking logic.
+ * There is typically one TLB per memory interface (or, put another
+ * way, one TLB per software-visible cache).
+ *
+ * The ARM MMU is capable of mapping memory in the following chunks:
+ *
+ * 1M Sections (L1 table)
+ *
+ * 64K Large Pages (L2 table)
+ *
+ * 4K Small Pages (L2 table)
+ *
+ * 1K Tiny Pages (L2 table)
+ *
+ * There are two types of L2 tables: Coarse Tables and Fine Tables.
+ * Coarse Tables can map Large and Small Pages. Fine Tables can
+ * map Tiny Pages.
+ *
+ * Coarse Tables can define 4 Subpages within Large and Small pages.
+ * Subpages define different permissions for each Subpage within
+ * a Page.
+ *
+ * Coarse Tables are 1K in length. Fine tables are 4K in length.
+ *
+ * The Translation Table Base register holds the pointer to the
+ * L1 Table. The L1 Table is a 16K contiguous chunk of memory
+ * aligned to a 16K boundary. Each entry in the L1 Table maps
+ * 1M of virtual address space, either via a Section mapping or
+ * via an L2 Table.
+ *
+ * In addition, the Fast Context Switching Extension (FCSE) is available
+ * on some ARM v4 and ARM v5 processors. FCSE is a way of eliminating
+ * TLB/cache flushes on context switch by use of a smaller address space
+ * and a "process ID" that modifies the virtual address before being
+ * presented to the translation logic.
+ */
+
+#ifndef _LOCORE
+typedef uint32_t pd_entry_t; /* L1 table entry */
+typedef uint32_t pt_entry_t; /* L2 table entry */
+#endif /* _LOCORE */
+
+#define L1_S_SIZE 0x00100000 /* 1M */
+#define L1_S_OFFSET (L1_S_SIZE - 1)
+#define L1_S_FRAME (~L1_S_OFFSET)
+#define L1_S_SHIFT 20
+
+#define L2_L_SIZE 0x00010000 /* 64K */
+#define L2_L_OFFSET (L2_L_SIZE - 1)
+#define L2_L_FRAME (~L2_L_OFFSET)
+#define L2_L_SHIFT 16
+
+#define L2_S_SIZE 0x00001000 /* 4K */
+#define L2_S_OFFSET (L2_S_SIZE - 1)
+#define L2_S_FRAME (~L2_S_OFFSET)
+#define L2_S_SHIFT 12
+
+#define L2_T_SIZE 0x00000400 /* 1K */
+#define L2_T_OFFSET (L2_T_SIZE - 1)
+#define L2_T_FRAME (~L2_T_OFFSET)
+#define L2_T_SHIFT 10
+
+/*
+ * The NetBSD VM implementation only works on whole pages (4K),
+ * whereas the ARM MMU's Coarse tables are sized in terms of 1K
+ * (16K L1 table, 1K L2 table).
+ *
+ * So, we allocate L2 tables 4 at a time, thus yielding a 4K L2
+ * table.
+ */
+#define L1_ADDR_BITS 0xfff00000 /* L1 PTE address bits */
+#define L2_ADDR_BITS 0x000ff000 /* L2 PTE address bits */
+
+#define L1_TABLE_SIZE 0x4000 /* 16K */
+#define L2_TABLE_SIZE 0x1000 /* 4K */
+/*
+ * The new pmap deals with the 1KB coarse L2 tables by
+ * allocating them from a pool. Until every port has been converted,
+ * keep the old L2_TABLE_SIZE define lying around. Converted ports
+ * should use L2_TABLE_SIZE_REAL until then.
+ */
+#define L2_TABLE_SIZE_REAL 0x400 /* 1K */
+
+/*
+ * ARM L1 Descriptors
+ */
+
+#define L1_TYPE_INV 0x00 /* Invalid (fault) */
+#define L1_TYPE_C 0x01 /* Coarse L2 */
+#define L1_TYPE_S 0x02 /* Section */
+#define L1_TYPE_F 0x03 /* Fine L2 */
+#define L1_TYPE_MASK 0x03 /* mask of type bits */
+
+/* L1 Section Descriptor */
+#define L1_S_B 0x00000004 /* bufferable Section */
+#define L1_S_C 0x00000008 /* cacheable Section */
+#define L1_S_IMP 0x00000010 /* implementation defined */
+#define L1_S_DOM(x) ((x) << 5) /* domain */
+#define L1_S_DOM_MASK L1_S_DOM(0xf)
+#define L1_S_AP(x) ((x) << 10) /* access permissions */
+#define L1_S_ADDR_MASK 0xfff00000 /* phys address of section */
+
+#define L1_S_XSCALE_P 0x00000200 /* ECC enable for this section */
+#define L1_S_XSCALE_TEX(x) ((x) << 12) /* Type Extension */
+
+/* L1 Coarse Descriptor */
+#define L1_C_IMP0 0x00000004 /* implementation defined */
+#define L1_C_IMP1 0x00000008 /* implementation defined */
+#define L1_C_IMP2 0x00000010 /* implementation defined */
+#define L1_C_DOM(x) ((x) << 5) /* domain */
+#define L1_C_DOM_MASK L1_C_DOM(0xf)
+#define L1_C_ADDR_MASK 0xfffffc00 /* phys address of L2 Table */
+
+#define L1_C_XSCALE_P 0x00000200 /* ECC enable for this section */
+
+/* L1 Fine Descriptor */
+#define L1_F_IMP0 0x00000004 /* implementation defined */
+#define L1_F_IMP1 0x00000008 /* implementation defined */
+#define L1_F_IMP2 0x00000010 /* implementation defined */
+#define L1_F_DOM(x) ((x) << 5) /* domain */
+#define L1_F_DOM_MASK L1_F_DOM(0xf)
+#define L1_F_ADDR_MASK 0xfffff000 /* phys address of L2 Table */
+
+#define L1_F_XSCALE_P 0x00000200 /* ECC enable for this section */
+
+/*
+ * ARM L2 Descriptors
+ */
+
+#define L2_TYPE_INV 0x00 /* Invalid (fault) */
+#define L2_TYPE_L 0x01 /* Large Page */
+#define L2_TYPE_S 0x02 /* Small Page */
+#define L2_TYPE_T 0x03 /* Tiny Page */
+#define L2_TYPE_MASK 0x03 /* mask of type bits */
+
+ /*
+ * This L2 Descriptor type is available on XScale processors
+ * when using a Coarse L1 Descriptor. The Extended Small
+ * Descriptor has the same format as the XScale Tiny Descriptor,
+ * but describes a 4K page, rather than a 1K page.
+ */
+#define L2_TYPE_XSCALE_XS 0x03 /* XScale Extended Small Page */
+
+#define L2_B 0x00000004 /* Bufferable page */
+#define L2_C 0x00000008 /* Cacheable page */
+#define L2_AP0(x) ((x) << 4) /* access permissions (sp 0) */
+#define L2_AP1(x) ((x) << 6) /* access permissions (sp 1) */
+#define L2_AP2(x) ((x) << 8) /* access permissions (sp 2) */
+#define L2_AP3(x) ((x) << 10) /* access permissions (sp 3) */
+#define L2_AP(x) (L2_AP0(x) | L2_AP1(x) | L2_AP2(x) | L2_AP3(x))
+
+#define L2_XSCALE_L_TEX(x) ((x) << 12) /* Type Extension */
+#define L2_XSCALE_T_TEX(x) ((x) << 6) /* Type Extension */
+
+/*
+ * Access Permissions for L1 and L2 Descriptors.
+ */
+#define AP_W 0x01 /* writable */
+#define AP_U 0x02 /* user */
+
+/*
+ * Short-hand for common AP_* constants.
+ *
+ * Note: These values assume the S (System) bit is set and
+ * the R (ROM) bit is clear in CP15 register 1.
+ */
+#define AP_KR 0x00 /* kernel read */
+#define AP_KRW 0x01 /* kernel read/write */
+#define AP_KRWUR 0x02 /* kernel read/write usr read */
+#define AP_KRWURW 0x03 /* kernel read/write usr read/write */
+
+/*
+ * Domain Types for the Domain Access Control Register.
+ */
+#define DOMAIN_FAULT 0x00 /* no access */
+#define DOMAIN_CLIENT 0x01 /* client */
+#define DOMAIN_RESERVED 0x02 /* reserved */
+#define DOMAIN_MANAGER 0x03 /* manager */
+
+/*
+ * Type Extension bits for XScale processors.
+ *
+ * Behavior of C and B when X == 0:
+ *
+ * C B Cacheable Bufferable Write Policy Line Allocate Policy
+ * 0 0 N N - -
+ * 0 1 N Y - -
+ * 1 0 Y Y Write-through Read Allocate
+ * 1 1 Y Y Write-back Read Allocate
+ *
+ * Behavior of C and B when X == 1:
+ * C B Cacheable Bufferable Write Policy Line Allocate Policy
+ * 0 0 - - - - DO NOT USE
+ * 0 1 N Y - -
+ * 1 0 Mini-Data - - -
+ * 1 1 Y Y Write-back R/W Allocate
+ */
+#define TEX_XSCALE_X 0x01 /* X modifies C and B */
+
+#endif /* _ARM_PTE_H_ */
diff --git a/sys/arch/arm/include/ptrace.h b/sys/arch/arm/include/ptrace.h
new file mode 100644
index 00000000000..d0a556e41bc
--- /dev/null
+++ b/sys/arch/arm/include/ptrace.h
@@ -0,0 +1,44 @@
+/* $OpenBSD: ptrace.h,v 1.1 2004/02/01 05:09:49 drahn Exp $ */
+/* $NetBSD: ptrace.h,v 1.2 2001/10/19 00:18:20 bjh21 Exp $ */
+
+/*
+ * Copyright (c) 1995 Frank Lancaster
+ * Copyright (c) 1995 Tools GmbH
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by Christopher G. Demetriou.
+ * 4. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission
+ *
+ * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * arm-dependent ptrace definitions
+ */
+#ifndef _KERNEL
+#define PT_STEP (PT_FIRSTMACH + 0) /* Not implemented */
+#endif
+#define PT_GETREGS (PT_FIRSTMACH + 1)
+#define PT_SETREGS (PT_FIRSTMACH + 2)
+#define PT_GETFPREGS (PT_FIRSTMACH + 3)
+#define PT_SETFPREGS (PT_FIRSTMACH + 4)
diff --git a/sys/arch/arm/include/reg.h b/sys/arch/arm/include/reg.h
new file mode 100644
index 00000000000..28c1b43b26e
--- /dev/null
+++ b/sys/arch/arm/include/reg.h
@@ -0,0 +1,55 @@
+/* $OpenBSD: reg.h,v 1.1 2004/02/01 05:09:49 drahn Exp $ */
+/* $NetBSD: reg.h,v 1.1 2001/02/11 14:51:55 bjh21 Exp $ */
+
+/*
+ * Copyright (C) 1994, 1995 Frank Lancaster
+ * Copyright (C) 1994, 1995 TooLs GmbH.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by TooLs GmbH.
+ * 4. The name of TooLs GmbH may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * @(#)reg.h 5.5 (Berkeley) 1/18/91
+ */
+
+#ifndef _ARM32_REG_H_
+#define _ARM32_REG_H_
+
+#include <machine/fp.h>
+
+struct reg {
+ unsigned int r[13];
+ unsigned int r_sp;
+ unsigned int r_lr;
+ unsigned int r_pc;
+ unsigned int r_cpsr;
+};
+
+struct fpreg {
+ unsigned int fpr_fpsr;
+ fp_reg_t fpr[8];
+};
+
+#endif /* !_ARM32_REG_H_ */
diff --git a/sys/arch/arm/include/reloc.h b/sys/arch/arm/include/reloc.h
new file mode 100644
index 00000000000..f53ab524f26
--- /dev/null
+++ b/sys/arch/arm/include/reloc.h
@@ -0,0 +1,53 @@
+/* Processor specific relocation types */
+
+#define R_ARM_NONE 0
+#define R_ARM_PC24 1
+#define R_ARM_ABS32 2
+#define R_ARM_REL32 3
+#define R_ARM_PC13 4
+#define R_ARM_ABS16 5
+#define R_ARM_ABS12 6
+#define R_ARM_THM_ABS5 7
+#define R_ARM_ABS8 8
+#define R_ARM_SBREL32 9
+#define R_ARM_THM_PC22 10
+#define R_ARM_THM_PC8 11
+#define R_ARM_AMP_VCALL9 12
+#define R_ARM_SWI24 13
+#define R_ARM_THM_SWI8 14
+#define R_ARM_XPC25 15
+#define R_ARM_THM_XPC22 16
+
+/* 17-31 are reserved for ARM Linux. */
+#define R_ARM_COPY 20
+#define R_ARM_GLOB_DAT 21
+#define R_ARM_JUMP_SLOT 22
+#define R_ARM_RELATIVE 23
+#define R_ARM_GOTOFF 24
+#define R_ARM_GOTPC 25
+#define R_ARM_GOT32 26
+#define R_ARM_PLT32 27
+
+#define R_ARM_ALU_PCREL_7_0 32
+#define R_ARM_ALU_PCREL_15_8 33
+#define R_ARM_ALU_PCREL_23_15 34
+#define R_ARM_ALU_SBREL_11_0 35
+#define R_ARM_ALU_SBREL_19_12 36
+#define R_ARM_ALU_SBREL_27_20 37
+
+/* 96-111 are reserved to G++. */
+#define R_ARM_GNU_VTENTRY 100
+#define R_ARM_GNU_VTINHERIT 101
+#define R_ARM_THM_PC11 102
+#define R_ARM_THM_PC9 103
+
+/* 112-127 are reserved for private experiments. */
+
+#define R_ARM_RXPC25 249
+#define R_ARM_RSBREL32 250
+#define R_ARM_THM_RPC22 251
+#define R_ARM_RREL32 252
+#define R_ARM_RABS32 253
+#define R_ARM_RPC24 254
+#define R_ARM_RBASE 255
+
diff --git a/sys/arch/arm/include/rtc.h b/sys/arch/arm/include/rtc.h
new file mode 100644
index 00000000000..6c6a1666d52
--- /dev/null
+++ b/sys/arch/arm/include/rtc.h
@@ -0,0 +1,84 @@
+/* $OpenBSD: rtc.h,v 1.1 2004/02/01 05:09:49 drahn Exp $ */
+/* $NetBSD: rtc.h,v 1.1 2001/02/23 21:23:50 reinoud Exp $ */
+
+/*
+ * Copyright (c) 1994 Mark Brinicombe.
+ * Copyright (c) 1994 Brini.
+ * All rights reserved.
+ *
+ * This code is derived from software written for Brini by Mark Brinicombe
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by Brini.
+ * 4. The name of the company nor the name of the author may be used to
+ * endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY BRINI ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL BRINI OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * RiscBSD kernel project
+ *
+ * rtc.h
+ *
+ * Header file for RTC / CMOS stuff
+ *
+ * Created : 13/10/94
+ * Updated : 15/07/2000
+ *
+ * Based of kate/display/iiccontrol.c
+ */
+
+/*
+ * IIC addresses for RTC chip
+ * Two PCF8583 chips are supported on the IIC bus
+ */
+
+#define IIC_PCF8583_MASK 0xfc
+#define IIC_PCF8583_ADDR 0xa0
+
+#define RTC_Write (IIC_PCF8583_ADDR | IIC_WRITE)
+#define RTC_Read (IIC_PCF8583_ADDR | IIC_READ)
+
+typedef struct {
+ u_char rtc_micro;
+ u_char rtc_centi;
+ u_char rtc_sec;
+ u_char rtc_min;
+ u_char rtc_hour;
+ u_char rtc_day;
+ u_char rtc_mon;
+ u_char rtc_year;
+ u_char rtc_cen;
+} rtc_t;
+
+#define RTC_ADDR_CHECKSUM 0x3f
+#define RTC_ADDR_BOOTOPTS 0x90
+#define RTC_ADDR_REBOOTCNT 0x91
+#define RTC_ADDR_YEAR 0xc0
+#define RTC_ADDR_CENT 0xc1
+
+#ifdef _KERNEL
+int cmos_read __P((int));
+int cmos_write __P((int, int));
+#endif /* _KERNEL */
+
+/* End of rtc.h */
diff --git a/sys/arch/arm/include/setjmp.h b/sys/arch/arm/include/setjmp.h
new file mode 100644
index 00000000000..f20cab2e929
--- /dev/null
+++ b/sys/arch/arm/include/setjmp.h
@@ -0,0 +1,87 @@
+/* $OpenBSD: setjmp.h,v 1.1 2004/02/01 05:09:49 drahn Exp $ */
+/* $NetBSD: setjmp.h,v 1.2 2001/08/25 14:45:59 bjh21 Exp $ */
+
+/*
+ * machine/setjmp.h: machine dependent setjmp-related information.
+ */
+
+#ifdef __ELF__
+#define _JBLEN 64 /* size, in longs, of a jmp_buf */
+#else
+#define _JBLEN 29 /* size, in longs, of a jmp_buf */
+#endif
+
+/*
+ * NOTE: The internal structure of a jmp_buf is *PRIVATE*
+ * This information is provided as there is software
+ * that fiddles with this with obtain the stack pointer
+ * (yes really ! and its commercial !).
+ *
+ * Description of the setjmp buffer
+ *
+ * word 0 magic number (dependant on creator)
+ * 1 - 3 f4 fp register 4
+ * 4 - 6 f5 fp register 5
+ * 7 - 9 f6 fp register 6
+ * 10 - 12 f7 fp register 7
+ * 13 fpsr fp status register
+ * 14 r4 register 4
+ * 15 r5 register 5
+ * 16 r6 register 6
+ * 17 r7 register 7
+ * 18 r8 register 8
+ * 19 r9 register 9
+ * 20 r10 register 10 (sl)
+ * 21 r11 register 11 (fp)
+ * 22 r12 register 12 (ip)
+ * 23 r13 register 13 (sp)
+ * 24 r14 register 14 (lr)
+ * 25 signal mask (dependant on magic)
+ * 26 (con't)
+ * 27 (con't)
+ * 28 (con't)
+ *
+ * The magic number number identifies the jmp_buf and
+ * how the buffer was created as well as providing
+ * a sanity check
+ *
+ * A side note I should mention - Please do not tamper
+ * with the floating point fields. While they are
+ * always saved and restored at the moment this cannot
+ * be garenteed especially if the compiler happens
+ * to be generating soft-float code so no fp
+ * registers will be used.
+ *
+ * Whilst this can be seen an encouraging people to
+ * use the setjmp buffer in this way I think that it
+ * is for the best then if changes occur compiles will
+ * break rather than just having new builds falling over
+ * mysteriously.
+ */
+
+#define _JB_MAGIC__SETJMP 0x4278f500
+#define _JB_MAGIC_SETJMP 0x4278f501
+
+/* Valid for all jmp_buf's */
+
+#define _JB_MAGIC 0
+#define _JB_REG_F4 1
+#define _JB_REG_F5 4
+#define _JB_REG_F6 7
+#define _JB_REG_F7 10
+#define _JB_REG_FPSR 13
+#define _JB_REG_R4 14
+#define _JB_REG_R5 15
+#define _JB_REG_R6 16
+#define _JB_REG_R7 17
+#define _JB_REG_R8 18
+#define _JB_REG_R9 19
+#define _JB_REG_R10 20
+#define _JB_REG_R11 21
+#define _JB_REG_R12 22
+#define _JB_REG_R13 23
+#define _JB_REG_R14 24
+
+/* Only valid with the _JB_MAGIC_SETJMP magic */
+
+#define _JB_SIGMASK 25
diff --git a/sys/arch/arm/include/signal.h b/sys/arch/arm/include/signal.h
new file mode 100644
index 00000000000..d0d5122166d
--- /dev/null
+++ b/sys/arch/arm/include/signal.h
@@ -0,0 +1,134 @@
+/* $OpenBSD: signal.h,v 1.1 2004/02/01 05:09:49 drahn Exp $ */
+/* $NetBSD: signal.h,v 1.5 2003/10/18 17:57:21 briggs Exp $ */
+
+/*
+ * Copyright (c) 1994-1996 Mark Brinicombe.
+ * Copyright (c) 1994 Brini.
+ * All rights reserved.
+ *
+ * This code is derived from software written for Brini by Mark Brinicombe
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by Brini.
+ * 4. The name of the company nor the name of the author may be used to
+ * endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY BRINI ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL BRINI OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * RiscBSD kernel project
+ *
+ * signal.h
+ *
+ * Architecture dependant signal types and structures
+ *
+ * Created : 30/09/94
+ */
+
+#ifndef _ARM32_SIGNAL_H_
+#define _ARM32_SIGNAL_H_
+
+#ifndef _LOCORE
+typedef int sig_atomic_t;
+#endif
+
+#define __HAVE_SIGINFO
+
+
+#ifndef _LOCORE
+/*
+ * Information pushed on stack when a signal is delivered.
+ * This is used by the kernel to restore state following
+ * execution of the signal handler. It is also made available
+ * to the handler to allow it to restore state properly if
+ * a non-standard exit is performed.
+ */
+
+struct sigcontext {
+ int sc_onstack; /* sigstack state to restore */
+ int sc_mask; /* signal mask to restore (old style) */
+
+ unsigned int sc_spsr;
+ unsigned int sc_r0;
+ unsigned int sc_r1;
+ unsigned int sc_r2;
+ unsigned int sc_r3;
+ unsigned int sc_r4;
+ unsigned int sc_r5;
+ unsigned int sc_r6;
+ unsigned int sc_r7;
+ unsigned int sc_r8;
+ unsigned int sc_r9;
+ unsigned int sc_r10;
+ unsigned int sc_r11;
+ unsigned int sc_r12;
+ unsigned int sc_usr_sp;
+ unsigned int sc_usr_lr;
+ unsigned int sc_svc_lr;
+ unsigned int sc_pc;
+
+#if 0
+ sigset_t sc_mask; /* signal mask to restore (new style) */
+#endif
+};
+
+#endif /* !_LOCORE */
+
+/* Signals codes */
+
+/*
+ * SIGFPE codes
+ *
+ * see ieeefp.h for definition of FP exception codes
+ */
+
+#define SIG_CODE_FPE_CODE_MASK 0x00000f00 /* Mask for exception code */
+#define SIG_CODE_FPE_CODE_SHIFT 8 /* Shift for exception code */
+#define SIG_CODE_FPE_TYPE_MASK 0x000000ff /* Mask for specific code */
+
+/*
+ * SIGILL codes
+ *
+ * the signal code is the instruction that raised the signal
+ */
+
+/*
+ * SIGBUS and SIGSEGV codes
+ *
+ * The signal code is combination of the fault address and the fault code.
+ *
+ * The fault code is the coproc #15 fault status code
+ *
+ * The exception to this is a SIGBUS or SIGSEGV from a prefetch abort.
+ * In this case the fault status code is not valid so the TYPE_MASK
+ * should be treated as undefined (in practice it is the bottom 4 bits
+ * of the fault address).
+ */
+
+#define SIG_CODE_BUS_ADDR_MASK 0xfffffff0
+#define SIG_CODE_BUS_TYPE_MASK 0x0000000f
+#define SIG_CODE_SEGV_ADDR_MASK SIG_CODE_BUS_ADDR_MASK
+#define SIG_CODE_SEGV_TYPE_MASK SIG_CODE_BUS_TYPE_MASK
+
+#endif /* !_ARM_SIGNAL_H_ */
+
+/* End of signal.h */
diff --git a/sys/arch/arm/include/softintr.h b/sys/arch/arm/include/softintr.h
new file mode 100644
index 00000000000..fdd618dc841
--- /dev/null
+++ b/sys/arch/arm/include/softintr.h
@@ -0,0 +1,106 @@
+/* $OpenBSD: softintr.h,v 1.1 2004/02/01 05:09:49 drahn Exp $ */
+/* $NetBSD: softintr.h,v 1.1 2002/01/29 22:54:14 thorpej Exp $ */
+
+/*
+ * Copyright (c) 2001 Wasabi Systems, Inc.
+ * All rights reserved.
+ *
+ * Written by Jason R. Thorpe for Wasabi Systems, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed for the NetBSD Project by
+ * Wasabi Systems, Inc.
+ * 4. The name of Wasabi Systems, Inc. may not be used to endorse
+ * or promote products derived from this software without specific prior
+ * written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _ARM_SOFTINTR_H_
+#define _ARM_SOFTINTR_H_
+
+#ifdef _KERNEL
+
+/*
+ * Generic software interrupt support for all ARM platforms.
+ *
+ * To use this code, include <arm/softintr.h> from your platform's
+ * <machine/intr.h>.
+ */
+
+#define SI_SOFT 0 /* for IPL_SOFT */
+#define SI_SOFTCLOCK 1 /* for IPL_SOFTCLOCK */
+#define SI_SOFTNET 2 /* for IPL_SOFTNET */
+#define SI_SOFTSERIAL 3 /* for IPL_SOFTSERIAL */
+
+#define SI_NQUEUES 4
+
+#define SI_QUEUENAMES { \
+ "generic", \
+ "clock", \
+ "net", \
+ "serial", \
+}
+
+struct soft_intrhand {
+ TAILQ_ENTRY(soft_intrhand) sih_list;
+ void (*sih_func)(void *);
+ void *sih_arg;
+ struct soft_intrq *sih_siq;
+ int sih_pending;
+};
+
+struct soft_intrq {
+ TAILQ_HEAD(, soft_intrhand) siq_list;
+ struct evcnt siq_evcnt;
+ int siq_si;
+};
+
+void *softintr_establish(int, void (*)(void *), void *);
+void softintr_disestablish(void *);
+void softintr_init(void);
+void softintr_dispatch(int);
+
+#define softintr_schedule(arg) \
+do { \
+ struct soft_intrhand *__sih = (arg); \
+ struct soft_intrq *__siq = __sih->sih_siq; \
+ int __s; \
+ \
+ __s = splhigh(); \
+ if (__sih->sih_pending == 0) { \
+ TAILQ_INSERT_TAIL(&__siq->siq_list, __sih, sih_list); \
+ __sih->sih_pending = 1; \
+ _setsoftintr(__siq->siq_si); \
+ } \
+ splx(__s); \
+} while (/*CONSTCOND*/0)
+
+/* XXX For legacy software interrupts. */
+extern struct soft_intrhand *softnet_intrhand;
+
+#define setsoftnet() softintr_schedule(softnet_intrhand)
+
+#endif /* _KERNEL */
+
+#endif /* _ARM_SOFTINTR_H_ */
diff --git a/sys/arch/arm/include/spinlock.h b/sys/arch/arm/include/spinlock.h
new file mode 100644
index 00000000000..5b68222836d
--- /dev/null
+++ b/sys/arch/arm/include/spinlock.h
@@ -0,0 +1,10 @@
+/* $OpenBSD: spinlock.h,v 1.1 2004/02/01 05:09:49 drahn Exp $ */
+
+#ifndef _ARM_SPINLOCK_H_
+#define _ARM_SPINLOCK_H_
+
+#define _SPINLOCK_UNLOCKED (0)
+#define _SPINLOCK_LOCKED (1)
+typedef int _spinlock_lock_t;
+
+#endif
diff --git a/sys/arch/arm/include/swi.h b/sys/arch/arm/include/swi.h
new file mode 100644
index 00000000000..19a0145ab92
--- /dev/null
+++ b/sys/arch/arm/include/swi.h
@@ -0,0 +1,23 @@
+/* $OpenBSD: swi.h,v 1.1 2004/02/01 05:09:49 drahn Exp $ */
+/* $NetBSD: swi.h,v 1.1 2002/01/13 15:03:06 bjh21 Exp $ */
+
+/*
+ * This file is in the Public Domain.
+ * Ben Harris, 2002.
+ */
+
+#ifndef _ARM_SWI_H_
+#define _ARM_SWI_H_
+
+#define SWI_OS_MASK 0xf00000
+#define SWI_OS_RISCOS 0x000000
+#define SWI_OS_RISCIX 0x800000
+#define SWI_OS_LINUX 0x900000
+#define SWI_OS_NETBSD 0xa00000
+#define SWI_OS_ARM 0xf00000
+
+#define SWI_IMB 0xf00000
+#define SWI_IMBrange 0xf00001
+
+#endif
+
diff --git a/sys/arch/arm/include/sysarch.h b/sys/arch/arm/include/sysarch.h
new file mode 100644
index 00000000000..b379abbecd3
--- /dev/null
+++ b/sys/arch/arm/include/sysarch.h
@@ -0,0 +1,61 @@
+/* $OpenBSD: sysarch.h,v 1.1 2004/02/01 05:09:49 drahn Exp $ */
+/* $NetBSD: sysarch.h,v 1.4 2002/03/30 06:23:39 thorpej Exp $ */
+
+/*
+ * Copyright (c) 1996-1997 Mark Brinicombe.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by Mark Brinicombe.
+ * 4. The name of the company nor the name of the author may be used to
+ * endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef _ARM_SYSARCH_H_
+#define _ARM_SYSARCH_H_
+
+#include <sys/cdefs.h>
+
+/*
+ * Architecture specific syscalls (arm)
+ */
+
+#define ARM_SYNC_ICACHE 0
+#define ARM_DRAIN_WRITEBUF 1
+
+struct arm_sync_icache_args {
+ u_int32_t addr; /* Virtual start address */
+ size_t len; /* Region size */
+};
+
+#ifndef _KERNEL
+__BEGIN_DECLS
+int arm_sync_icache __P((u_int addr, int len));
+int arm_drain_writebuf __P((void));
+int sysarch __P((int, void *));
+__END_DECLS
+#endif
+
+#endif /* !_ARM_SYSARCH_H_ */
diff --git a/sys/arch/arm/include/trap.h b/sys/arch/arm/include/trap.h
new file mode 100644
index 00000000000..d6346a42ef4
--- /dev/null
+++ b/sys/arch/arm/include/trap.h
@@ -0,0 +1,71 @@
+/* $OpenBSD: trap.h,v 1.1 2004/02/01 05:09:49 drahn Exp $ */
+/* $NetBSD: trap.h,v 1.4 2003/04/28 01:54:50 briggs Exp $ */
+
+/*
+ * Copyright (c) 1995 Mark Brinicombe.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by Mark Brinicombe.
+ * 4. The name of the company nor the name of the author may be used to
+ * endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * RiscBSD kernel project
+ *
+ * trap.h
+ *
+ * Various trap definitions
+ */
+
+/*
+ * Instructions used for breakpoints.
+ *
+ * These are undefined instructions.
+ * Technically the userspace breakpoint could be a SWI but we want to
+ * keep this the same as IPKDB which needs an undefined instruction as
+ * a break point.
+ *
+ * Ideally ARM would define several standard instruction sequences for
+ * use as breakpoints.
+ *
+ * The BKPT instruction isn't much use to us, since its behaviour is
+ * unpredictable on ARMv3 and lower.
+ *
+ * The ARM ARM says that for maximum compatibility, we should use undefined
+ * instructions that look like 0x.7f...f. .
+ */
+
+#define GDB_BREAKPOINT 0xe6000011 /* Used by GDB 4.x */
+#define IPKDB_BREAKPOINT 0xe6000010 /* Used by IPKDB */
+#define GDB5_BREAKPOINT 0xe7ffdefe /* Used by GDB 5.0 */
+#define KERNEL_BREAKPOINT 0xe7ffffff /* Used by DDB */
+
+#define KBPT_ASM ".word 0xe7ffdefe"
+
+#define USER_BREAKPOINT GDB_BREAKPOINT
+
+#define T_FAULT 1
+
+/* End of trap.h */
diff --git a/sys/arch/arm/include/types.h b/sys/arch/arm/include/types.h
new file mode 100644
index 00000000000..d14f21478f0
--- /dev/null
+++ b/sys/arch/arm/include/types.h
@@ -0,0 +1,119 @@
+/* $OpenBSD: types.h,v 1.1 2004/02/01 05:09:49 drahn Exp $ */
+/* $NetBSD: types.h,v 1.4 2002/02/28 03:17:25 simonb Exp $ */
+
+/*
+ * Copyright (c) 1990 The Regents of the University of California.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * from: @(#)types.h 7.5 (Berkeley) 3/9/91
+ */
+
+#ifndef _ARM_TYPES_H_
+#define _ARM_TYPES_H_
+
+/* OpenBSD only supports arm32 */
+#ifdef _KERNEL
+#define __PROG32 /* indicate 32-bit mode */
+#endif
+
+#include <sys/cdefs.h>
+
+#if defined(_KERNEL)
+typedef struct label_t { /* Used by setjmp & longjmp */
+ int val[11];
+} label_t;
+#endif
+
+/* NB: This should probably be if defined(_KERNEL) */
+#if !defined(_POSIX_C_SOURCE) && !defined(_XOPEN_SOURCE)
+typedef unsigned long vm_offset_t;
+typedef unsigned long vm_size_t;
+
+typedef vm_offset_t paddr_t;
+typedef vm_size_t psize_t;
+typedef vm_offset_t vaddr_t;
+typedef vm_size_t vsize_t;
+#endif
+
+#define __HAVE_MINIMAL_EMUL
+
+/*
+ * Basic integral types. Omit the typedef if
+ * not possible for a machine/compiler combination.
+ */
+#define __BIT_TYPES_DEFINED__
+typedef __signed char int8_t;
+typedef unsigned char u_int8_t;
+typedef unsigned char uint8_t;
+typedef short int16_t;
+typedef unsigned short u_int16_t;
+typedef unsigned short uint16_t;
+typedef int int32_t;
+typedef unsigned int u_int32_t;
+typedef unsigned int uint32_t;
+/* LONGLONG */
+typedef long long int64_t;
+/* LONGLONG */
+typedef unsigned long long u_int64_t;
+/* LONGLONG */
+typedef unsigned long long uint64_t;
+
+typedef int32_t register_t;
+
+/*
+ * 7.18.1 Integer types
+ */
+
+/* 7.18.1.1 Exact-width integer types */
+
+typedef __signed char __int8_t;
+typedef unsigned char __uint8_t;
+typedef short int __int16_t;
+typedef unsigned short int __uint16_t;
+typedef int __int32_t;
+typedef unsigned int __uint32_t;
+#ifdef __COMPILER_INT64__
+typedef __COMPILER_INT64__ __int64_t;
+typedef __COMPILER_UINT64__ __uint64_t;
+#else
+/* LONGLONG */
+typedef long long int __int64_t;
+/* LONGLONG */
+typedef unsigned long long int __uint64_t;
+#endif
+
+
+/* 7.18.1.4 Integer types capable of holding object pointers */
+
+typedef long int __intptr_t;
+typedef unsigned long int __uintptr_t;
+
+#endif /* _ARM_TYPES_H_ */
diff --git a/sys/arch/arm/include/undefined.h b/sys/arch/arm/include/undefined.h
new file mode 100644
index 00000000000..dfb22764ed7
--- /dev/null
+++ b/sys/arch/arm/include/undefined.h
@@ -0,0 +1,89 @@
+/* $OpenBSD: undefined.h,v 1.1 2004/02/01 05:09:49 drahn Exp $ */
+/* $NetBSD: undefined.h,v 1.4 2001/12/20 01:20:23 thorpej Exp $ */
+
+/*
+ * Copyright (c) 1995-1996 Mark Brinicombe.
+ * Copyright (c) 1995 Brini.
+ * All rights reserved.
+ *
+ * This code is derived from software written for Brini by Mark Brinicombe
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by Brini.
+ * 4. The name of the company nor the name of the author may be used to
+ * endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY BRINI ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL BRINI OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * RiscBSD kernel project
+ *
+ * undefined.h
+ *
+ * Undefined instruction types, symbols and prototypes
+ *
+ * Created : 08/02/95
+ */
+
+
+#ifndef _ARM_UNDEFINED_H_
+#define _ARM_UNDEFINED_H_
+#ifdef _KERNEL
+
+#include <sys/queue.h>
+
+typedef int (*undef_handler_t) __P((unsigned int, unsigned int, trapframe_t *, int));
+
+#define FP_COPROC 1
+#define FP_COPROC2 2
+#define MAX_COPROCS 16
+
+/* Prototypes for undefined.c */
+
+void *install_coproc_handler __P((int, undef_handler_t));
+void remove_coproc_handler __P((void *));
+void undefined_init __P((void));
+
+/*
+ * XXX Stuff below here is for use before malloc() is available. Most code
+ * shouldn't use it.
+ */
+
+struct undefined_handler {
+ LIST_ENTRY(undefined_handler) uh_link;
+ undef_handler_t uh_handler;
+};
+
+/*
+ * Handlers installed using install_coproc_handler_static shouldn't be
+ * removed.
+ */
+void install_coproc_handler_static __P((int, struct undefined_handler *));
+
+/* Calls up to undefined.c from trap handlers */
+void undefinedinstruction(struct trapframe *);
+
+#endif
+
+/* End of undefined.h */
+
+#endif /* _ARM_UNDEFINED_H_ */
diff --git a/sys/arch/arm/include/vmparam.h b/sys/arch/arm/include/vmparam.h
new file mode 100644
index 00000000000..3a6aebc4b26
--- /dev/null
+++ b/sys/arch/arm/include/vmparam.h
@@ -0,0 +1,152 @@
+/* $OpenBSD: vmparam.h,v 1.1 2004/02/01 05:09:49 drahn Exp $ */
+/* $NetBSD: vmparam.h,v 1.18 2003/05/21 18:04:44 thorpej Exp $ */
+
+/*
+ * Copyright (c) 2001, 2002 Wasabi Systems, Inc.
+ * All rights reserved.
+ *
+ * Written by Jason R. Thorpe for Wasabi Systems, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed for the NetBSD Project by
+ * Wasabi Systems, Inc.
+ * 4. The name of Wasabi Systems, Inc. may not be used to endorse
+ * or promote products derived from this software without specific prior
+ * written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _ARM_ARM_VMPARAM_H_
+#define _ARM_ARM_VMPARAM_H_
+
+#ifdef _KERNEL
+
+/*
+ * Virtual Memory parameters common to all arm32 platforms.
+ */
+
+#include <sys/lock.h> /* struct simplelock */
+#include <arm/pte.h> /* pt_entry_t */
+#endif /* _KERNEL */
+
+#define USRTEXT VM_MIN_ADDRESS
+#define USRSTACK VM_MAXUSER_ADDRESS
+#define KERNBASE VM_MAXUSER_ADDRESS
+
+/*
+ * The time for a process to be blocked before being very swappable.
+ * This is a number of seconds which the system takes as being a non-trivial
+ * amount of real time. You probably shouldn't change this;
+ * it is used in subtle ways (fractions and multiples of it are, that is, like
+ * half of a ``long time'', almost a long time, etc.)
+ * It is related to human patience and other factors which don't really
+ * change over time.
+ */
+#define MAXSLP 20
+
+/*
+ * Note that MAXTSIZ can't be larger than 32M, otherwise the compiler
+ * would have to be changed to not generate "bl" instructions.
+ */
+#define MAXTSIZ (16*1024*1024) /* max text size */
+#ifndef DFLDSIZ
+#define DFLDSIZ (128*1024*1024) /* initial data size limit */
+#endif
+#ifndef MAXDSIZ
+#define MAXDSIZ (512*1024*1024) /* max data size */
+#endif
+#ifndef DFLSSIZ
+#define DFLSSIZ (2*1024*1024) /* initial stack size limit */
+#endif
+#ifndef MAXSSIZ
+#define MAXSSIZ (8*1024*1024) /* max stack size */
+#endif
+
+/*
+ * Size of SysV shared memory map
+ */
+#ifndef SHMMAXPGS
+#define SHMMAXPGS 1024
+#endif
+
+/*
+ * While the ARM architecture defines Section mappings, large pages,
+ * and small pages, the standard page size is (and will always be) 4K.
+ */
+#define PAGE_SHIFT 12
+#define PAGE_SIZE (1 << PAGE_SHIFT) /* bytes/page */
+#define PAGE_MASK (PAGE_SIZE - 1)
+
+/*
+ * Mach derived constants
+ */
+#define VM_MIN_ADDRESS ((vaddr_t) 0x00001000)
+#define VM_MAXUSER_ADDRESS ((vaddr_t) ARM_KERNEL_BASE)
+#define VM_MAX_ADDRESS VM_MAXUSER_ADDRESS
+
+#define VM_MIN_KERNEL_ADDRESS ((vaddr_t) ARM_KERNEL_BASE)
+#define VM_MAX_KERNEL_ADDRESS ((vaddr_t) 0xffffffff)
+
+#ifdef _KERNEL
+
+/* XXX max. amount of KVM to be used by buffers. */
+#ifndef VM_MAX_KERNEL_BUF
+extern vaddr_t virtual_avail;
+extern vaddr_t virtual_end;
+
+#define VM_MAX_KERNEL_BUF \
+ ((virtual_end - virtual_avail) * 4 / 10)
+#endif
+
+/*
+ * pmap-specific data store in the vm_page structure.
+ */
+#define __HAVE_VM_PAGE_MD
+struct vm_page_md {
+ struct pv_entry *pvh_list; /* pv_entry list */
+ struct simplelock pvh_slock; /* lock on this head */
+ int pvh_attrs; /* page attributes */
+ u_int uro_mappings;
+ u_int urw_mappings;
+ union {
+ u_short s_mappings[2]; /* Assume kernel count <= 65535 */
+ u_int i_mappings;
+ } k_u;
+#define kro_mappings k_u.s_mappings[0]
+#define krw_mappings k_u.s_mappings[1]
+#define k_mappings k_u.i_mappings
+};
+
+#define VM_MDPAGE_INIT(pg) \
+do { \
+ (pg)->mdpage.pvh_list = NULL; \
+ simple_lock_init(&(pg)->mdpage.pvh_slock); \
+ (pg)->mdpage.pvh_attrs = 0; \
+ (pg)->mdpage.uro_mappings = 0; \
+ (pg)->mdpage.urw_mappings = 0; \
+ (pg)->mdpage.k_mappings = 0; \
+} while (/*CONSTCOND*/0)
+
+#endif /* _KERNEL */
+
+#endif /* _ARM_ARM_VMPARAM_H_ */
diff --git a/sys/arch/arm/mainbus/cpu_mainbus.c b/sys/arch/arm/mainbus/cpu_mainbus.c
new file mode 100644
index 00000000000..63de2094b39
--- /dev/null
+++ b/sys/arch/arm/mainbus/cpu_mainbus.c
@@ -0,0 +1,102 @@
+/* $OpenBSD: cpu_mainbus.c,v 1.1 2004/02/01 05:09:49 drahn Exp $ */
+/* $NetBSD: cpu_mainbus.c,v 1.3 2002/01/05 22:41:48 chris Exp $ */
+
+/*
+ * Copyright (c) 1995 Mark Brinicombe.
+ * Copyright (c) 1995 Brini.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by Brini.
+ * 4. The name of the company nor the name of the author may be used to
+ * endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY BRINI ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL BRINI OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * RiscBSD kernel project
+ *
+ * cpu.c
+ *
+ * Probing and configuration for the master cpu
+ *
+ * Created : 10/10/95
+ */
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/malloc.h>
+#include <sys/device.h>
+#include <sys/proc.h>
+#if 0
+#include <uvm/uvm_extern.h>
+#include <machine/io.h>
+#include <machine/conf.h>
+#endif
+#include <machine/cpu.h>
+#if 0
+#include <arm/cpus.h>
+#include <arm/undefined.h>
+#endif
+#include <arm/mainbus/mainbus.h>
+
+/*
+ * Prototypes
+ */
+static int cpu_mainbus_match (struct device *, void *, void *);
+static void cpu_mainbus_attach (struct device *, struct device *, void *);
+
+/*
+ * int cpumatch(struct device *parent, struct cfdata *cf, void *aux)
+ */
+
+static int
+cpu_mainbus_match(struct device *parent, void *vcf, void *aux)
+{
+ struct mainbus_attach_args *ma = aux;
+ struct cfdata *cf = (struct cfdata *)vcf;
+
+ return (strcmp(cf->cf_driver->cd_name, ma->ma_name) == 0);
+}
+
+/*
+ * void cpusattach(struct device *parent, struct device *dev, void *aux)
+ *
+ * Attach the main cpu
+ */
+
+static void
+cpu_mainbus_attach(parent, self, aux)
+ struct device *parent;
+ struct device *self;
+ void *aux;
+{
+ cpu_attach(self);
+}
+
+struct cfattach cpu_mainbus_ca = {
+ sizeof(struct device), cpu_mainbus_match, cpu_mainbus_attach
+};
+
+struct cfdriver cpu_cd = {
+ NULL, "cpu", DV_DULL
+};
diff --git a/sys/arch/arm/mainbus/mainbus.c b/sys/arch/arm/mainbus/mainbus.c
new file mode 100644
index 00000000000..a472197631d
--- /dev/null
+++ b/sys/arch/arm/mainbus/mainbus.c
@@ -0,0 +1,129 @@
+/* $OpenBSD: mainbus.c,v 1.1 2004/02/01 05:09:49 drahn Exp $ */
+/* $NetBSD: mainbus.c,v 1.3 2001/06/13 17:52:43 nathanw Exp $ */
+
+/*
+ * Copyright (c) 1994,1995 Mark Brinicombe.
+ * Copyright (c) 1994 Brini.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by Brini.
+ * 4. The name of the company nor the name of the author may be used to
+ * endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY BRINI ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL BRINI OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * RiscBSD kernel project
+ *
+ * mainbus.c
+ *
+ * mainbus configuration
+ *
+ * Created : 15/12/94
+ */
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/kernel.h>
+#include <sys/conf.h>
+#include <sys/malloc.h>
+#include <sys/device.h>
+
+#if defined(arm32) /* XXX */
+#include <machine/io.h>
+#endif
+#include <machine/bus.h>
+#include <arm/mainbus/mainbus.h>
+/*
+#include "locators.h"
+*/
+
+/*
+ * mainbus is a root device so we a bus space tag to pass to children
+ *
+ * The tag is provided by mainbus_io.c and mainbus_io_asm.S
+ */
+
+extern struct bus_space mainbus_bs_tag;
+
+/* Prototypes for functions provided */
+
+int mainbusmatch __P((struct device *, void *, void *));
+void mainbusattach __P((struct device *, struct device *, void *));
+int mainbusprint __P((void *aux, const char *mainbus));
+int mainbussearch __P((struct device *, struct cfdata *, void *));
+
+/* attach and device structures for the device */
+
+struct cfattach mainbus_ca = {
+ sizeof(struct device), mainbusmatch, mainbusattach
+};
+
+struct cfdriver mainbus_cd = {
+ NULL, "mainbus", DV_DULL
+};
+
+/*
+ * int mainbusmatch(struct device *parent, struct cfdata *cf, void *aux)
+ *
+ * Always match for unit 0
+ */
+
+int
+mainbusmatch(struct device *parent, void *cf, void *aux)
+{
+ return (1);
+}
+
+/*
+ * void mainbusattach(struct device *parent, struct device *self, void *aux)
+ *
+ * probe and attach all children
+ */
+
+void
+mainbusattach(struct device *parent, struct device *self, void *aux)
+{
+ struct mainbus_attach_args ma;
+ printf("\n");
+
+ ma.ma_iot = &mainbus_bs_tag;
+ ma.ma_name = "cpu";
+ config_found(self, &ma, mainbusprint); /* XXX */
+ ma.ma_iot = &mainbus_bs_tag;
+ ma.ma_name = "footbridge";
+ config_found(self, &ma, mainbusprint); /* XXX */
+}
+
+/*
+ * int mainbusprint(void *aux, const char *mainbus)
+ *
+ * print routine used during config of children
+ */
+
+int
+mainbusprint(void *aux, const char *mainbus)
+{
+/* XXXX print flags */
+ return (QUIET);
+}
diff --git a/sys/arch/arm/mainbus/mainbus.h b/sys/arch/arm/mainbus/mainbus.h
new file mode 100644
index 00000000000..11fa23ecba8
--- /dev/null
+++ b/sys/arch/arm/mainbus/mainbus.h
@@ -0,0 +1,61 @@
+/* $OpenBSD: mainbus.h,v 1.1 2004/02/01 05:09:49 drahn Exp $ */
+/* $NetBSD: mainbus.h,v 1.1 2001/02/24 19:38:02 reinoud Exp $ */
+
+/*
+ * Copyright (c) 1994,1995 Mark Brinicombe.
+ * Copyright (c) 1994 Brini.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by Brini.
+ * 4. The name of the company nor the name of the author may be used to
+ * endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY BRINI ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL BRINI OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * RiscBSD kernel project
+ *
+ * mainbus.h
+ *
+ * mainbus configuration
+ *
+ * Created : 15/12/94
+ */
+
+#include <machine/bus.h>
+
+/*
+ * mainbus driver attach arguments
+ */
+
+struct mainbus_attach_args {
+ u_int ma_iobase; /* base i/o address */
+ int ma_iosize; /* span of ports used */
+ int ma_irq; /* interrupt request */
+ int ma_drq; /* DMA request */
+ void *ma_aux; /* driver specific */
+ bus_space_tag_t ma_iot; /* bus space tag */
+ char *ma_name;
+};
+
+/* End of mainbus.h */
diff --git a/sys/arch/arm/mainbus/mainbus_io.c b/sys/arch/arm/mainbus/mainbus_io.c
new file mode 100644
index 00000000000..4a6235b7804
--- /dev/null
+++ b/sys/arch/arm/mainbus/mainbus_io.c
@@ -0,0 +1,248 @@
+/* $OpenBSD: mainbus_io.c,v 1.1 2004/02/01 05:09:49 drahn Exp $ */
+/* $NetBSD: mainbus_io.c,v 1.14 2003/12/06 22:05:33 bjh21 Exp $ */
+
+/*
+ * Copyright (c) 1997 Mark Brinicombe.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by Mark Brinicombe.
+ * 4. The name of the company nor the name of the author may be used to
+ * endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+/*
+ * bus_space I/O functions for mainbus
+ */
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/queue.h>
+
+#include <uvm/uvm.h>
+
+#include <machine/bus.h>
+#include <machine/pmap.h>
+
+/* Proto types for all the bus_space structure functions */
+
+bs_protos(mainbus);
+bs_protos(bs_notimpl);
+
+/* Declare the mainbus bus space tag */
+
+struct bus_space mainbus_bs_tag = {
+ /* cookie */
+ NULL,
+
+ /* mapping/unmapping */
+ mainbus_bs_map,
+ mainbus_bs_unmap,
+ mainbus_bs_subregion,
+
+ /* allocation/deallocation */
+ mainbus_bs_alloc,
+ mainbus_bs_free,
+
+ /* get kernel virtual address */
+ 0, /* there is no linear mapping */
+
+ /* Mmap bus space for user */
+ mainbus_bs_mmap,
+
+ /* barrier */
+ mainbus_bs_barrier,
+
+ /* read (single) */
+ mainbus_bs_r_1,
+ mainbus_bs_r_2,
+ mainbus_bs_r_4,
+ bs_notimpl_bs_r_8,
+
+ /* read multiple */
+ bs_notimpl_bs_rm_1,
+ mainbus_bs_rm_2,
+ bs_notimpl_bs_rm_4,
+ bs_notimpl_bs_rm_8,
+
+ /* read region */
+ bs_notimpl_bs_rr_1,
+ bs_notimpl_bs_rr_2,
+ bs_notimpl_bs_rr_4,
+ bs_notimpl_bs_rr_8,
+
+ /* write (single) */
+ mainbus_bs_w_1,
+ mainbus_bs_w_2,
+ mainbus_bs_w_4,
+ bs_notimpl_bs_w_8,
+
+ /* write multiple */
+ mainbus_bs_wm_1,
+ mainbus_bs_wm_2,
+ bs_notimpl_bs_wm_4,
+ bs_notimpl_bs_wm_8,
+
+ /* write region */
+ bs_notimpl_bs_wr_1,
+ bs_notimpl_bs_wr_2,
+ bs_notimpl_bs_wr_4,
+ bs_notimpl_bs_wr_8,
+
+ bs_notimpl_bs_sm_1,
+ bs_notimpl_bs_sm_2,
+ bs_notimpl_bs_sm_4,
+ bs_notimpl_bs_sm_8,
+
+ /* set region */
+ bs_notimpl_bs_sr_1,
+ bs_notimpl_bs_sr_2,
+ bs_notimpl_bs_sr_4,
+ bs_notimpl_bs_sr_8,
+
+ /* copy */
+ bs_notimpl_bs_c_1,
+ bs_notimpl_bs_c_2,
+ bs_notimpl_bs_c_4,
+ bs_notimpl_bs_c_8,
+};
+
+/* bus space functions */
+
+int
+mainbus_bs_map(t, bpa, size, flags, bshp)
+ void *t;
+ bus_addr_t bpa;
+ bus_size_t size;
+ int flags;
+ bus_space_handle_t *bshp;
+{
+ u_long startpa, endpa, pa;
+ vaddr_t va;
+ pt_entry_t *pte;
+
+ if ((u_long)bpa > (u_long)KERNEL_BASE) {
+ /* XXX This is a temporary hack to aid transition. */
+ *bshp = bpa;
+ return(0);
+ }
+
+ startpa = trunc_page(bpa);
+ endpa = round_page(bpa + size);
+
+ /* XXX use extent manager to check duplicate mapping */
+
+ va = uvm_km_valloc(kernel_map, endpa - startpa);
+ if (! va)
+ return(ENOMEM);
+
+ *bshp = (bus_space_handle_t)(va + (bpa - startpa));
+
+ for(pa = startpa; pa < endpa; pa += PAGE_SIZE, va += PAGE_SIZE) {
+ pmap_kenter_pa(va, pa, VM_PROT_READ | VM_PROT_WRITE);
+ if ((flags & BUS_SPACE_MAP_CACHEABLE) == 0) {
+ pte = vtopte(va);
+ *pte &= ~L2_S_CACHE_MASK;
+ PTE_SYNC(pte);
+ }
+ }
+ pmap_update(pmap_kernel());
+
+ return(0);
+}
+
+int
+mainbus_bs_alloc(t, rstart, rend, size, alignment, boundary, cacheable,
+ bpap, bshp)
+ void *t;
+ bus_addr_t rstart, rend;
+ bus_size_t size, alignment, boundary;
+ int cacheable;
+ bus_addr_t *bpap;
+ bus_space_handle_t *bshp;
+{
+ panic("mainbus_bs_alloc(): Help!");
+}
+
+
+void
+mainbus_bs_unmap(t, bsh, size)
+ void *t;
+ bus_space_handle_t bsh;
+ bus_size_t size;
+{
+ /*
+ * Temporary implementation
+ */
+}
+
+void
+mainbus_bs_free(t, bsh, size)
+ void *t;
+ bus_space_handle_t bsh;
+ bus_size_t size;
+{
+
+ panic("mainbus_bs_free(): Help!");
+ /* mainbus_bs_unmap() does all that we need to do. */
+/* mainbus_bs_unmap(t, bsh, size);*/
+}
+
+int
+mainbus_bs_subregion(t, bsh, offset, size, nbshp)
+ void *t;
+ bus_space_handle_t bsh;
+ bus_size_t offset, size;
+ bus_space_handle_t *nbshp;
+{
+
+ *nbshp = bsh + (offset << 2);
+ return (0);
+}
+
+paddr_t
+mainbus_bs_mmap(t, paddr, offset, prot, flags)
+ void *t;
+ bus_addr_t paddr;
+ off_t offset;
+ int prot;
+ int flags;
+{
+ /*
+ * mmap from address `paddr+offset' for one page
+ */
+ return (arm_btop((paddr + offset)));
+}
+
+void
+mainbus_bs_barrier(t, bsh, offset, len, flags)
+ void *t;
+ bus_space_handle_t bsh;
+ bus_size_t offset, len;
+ int flags;
+{
+}
+
+/* End of mainbus_io.c */
diff --git a/sys/arch/arm/mainbus/mainbus_io_asm.S b/sys/arch/arm/mainbus/mainbus_io_asm.S
new file mode 100644
index 00000000000..af052ad3513
--- /dev/null
+++ b/sys/arch/arm/mainbus/mainbus_io_asm.S
@@ -0,0 +1,113 @@
+/* $OpenBSD: mainbus_io_asm.S,v 1.1 2004/02/01 05:09:49 drahn Exp $ */
+/* $NetBSD: mainbus_io_asm.S,v 1.1 2001/02/24 19:38:02 reinoud Exp $ */
+
+/*
+ * Copyright (c) 1997 Mark Brinicombe.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by Mark Brinicombe.
+ * 4. The name of the company nor the name of the author may be used to
+ * endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <machine/asm.h>
+
+/*
+ * bus_space I/O functions for mainbus
+ */
+
+
+/*
+ * read single
+ */
+
+ENTRY(mainbus_bs_r_1)
+ ldrb r0, [r1, r2, lsl #2]
+ mov pc, lr
+
+ENTRY(mainbus_bs_r_2)
+ ldr r0, [r1, r2, lsl #2]
+ bic r0, r0, #0xff000000
+ bic r0, r0, #0x00ff0000
+ mov pc, lr
+
+ENTRY(mainbus_bs_r_4)
+ ldr r0, [r1, r2, lsl #2]
+ mov pc, lr
+
+/*
+ * write single
+ */
+
+ENTRY(mainbus_bs_w_1)
+ strb r3, [r1, r2, lsl #2]
+ mov pc, lr
+
+ENTRY(mainbus_bs_w_2)
+ mov r3, r3, lsl #16
+ orr r3, r3, r3, lsr #16
+ str r3, [r1, r2, lsl #2]
+ mov pc, lr
+
+ENTRY(mainbus_bs_w_4)
+ str r3, [r1, r2, lsl #2]
+ mov pc, lr
+
+/*
+ * read multiple
+ */
+
+ENTRY(mainbus_bs_rm_2)
+ add r0, r1, r2, lsl #2
+ mov r1, r3
+ ldr r2, [sp, #0]
+ b _C_LABEL(insw16)
+
+/*
+ * write multiple
+ */
+
+ENTRY(mainbus_bs_wm_1)
+ add r0, r1, r2, lsl #2
+ ldr r2, [sp, #0]
+
+ /* Make sure that we have a positive length */
+ cmp r2, #0x00000000
+ movle pc, lr
+
+mainbus_wm_1_loop:
+ ldrb r1, [r3], #0x0001
+ str r1, [r0]
+ subs r2, r2, #0x00000001
+ bgt mainbus_wm_1_loop
+
+ mov pc, lr
+
+ENTRY(mainbus_bs_wm_2)
+ add r0, r1, r2, lsl #2
+ mov r1, r3
+ ldr r2, [sp, #0]
+ b _C_LABEL(outsw16)