/* $OpenBSD: machdep.c,v 1.172 2015/06/02 04:31:53 miod Exp $ */ /* $NetBSD: machdep.c,v 1.85 1997/09/12 08:55:02 pk Exp $ */ /* * Copyright (c) 1992, 1993 * The Regents of the University of California. All rights reserved. * * This software was developed by the Computer Systems Engineering group * at Lawrence Berkeley Laboratory under DARPA contract BG 91-66 and * contributed to Berkeley. * * All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by the University of * California, Lawrence Berkeley Laboratory. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * @(#)machdep.c 8.6 (Berkeley) 1/14/94 */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef SUN4M #include "power.h" #if NPOWER > 0 #include #endif #include "scf.h" #include "tctrl.h" #if NTCTRL > 0 #include #endif #endif #include "auxreg.h" #ifdef SUN4 #include #include "led.h" #endif vaddr_t vm_pie_max_addr = 0; struct vm_map *exec_map = NULL; struct uvm_constraint_range dma_constraint = { 0x0, (paddr_t)-1 }; struct uvm_constraint_range *uvm_md_constraints[] = { NULL }; int physmem; /* sysctl settable */ int sparc_led_blink = 1; /* * safepri is a safe priority for sleep to set for a spin-wait * during autoconfiguration or after a panic. */ int safepri = 0; void dumpsys(void); void stackdump(void); /* * Machine-dependent startup code */ void cpu_startup() { #ifdef DEBUG extern int pmapdebug; int opmapdebug = pmapdebug; #endif vaddr_t minaddr, maxaddr; paddr_t msgbufpa; extern struct user *proc0paddr; #ifdef DEBUG pmapdebug = 0; #endif if (CPU_ISSUN4M) stackgap_random = STACKGAP_RANDOM_SUN4M; /* * Re-map the message buffer from its temporary address * at KERNBASE to MSGBUF_VA. */ /* Get physical address of the message buffer */ pmap_extract(pmap_kernel(), (vaddr_t)KERNBASE, &msgbufpa); /* Invalidate the current mapping at KERNBASE. */ pmap_kremove((vaddr_t)KERNBASE, PAGE_SIZE); pmap_update(pmap_kernel()); /* Enter the new mapping */ pmap_map(MSGBUF_VA, msgbufpa, msgbufpa + PAGE_SIZE, PROT_READ | PROT_WRITE); /* Re-initialize the message buffer. */ initmsgbuf((caddr_t)(MSGBUF_VA + (CPU_ISSUN4 ? 4096 : 0)), MSGBUFSIZE); proc0.p_addr = proc0paddr; /* * Good {morning,afternoon,evening,night}. */ printf(version); /*identifycpu();*/ printf("real mem = %lu (%luMB)\n", ptoa(physmem), ptoa(physmem)/1024/1024); #if !defined(SMALL_KERNEL) /* * uvm_km_init() has allocated all the virtual memory below the * end of the kernel image. If VM_MIN_KERNEL_ADDRESS is below * KERNBASE, we need to reclaim that range. */ if (vm_min_kernel_address < (vaddr_t)KERNBASE) { uvm_unmap(kernel_map, vm_min_kernel_address, (vaddr_t)KERNBASE); } #endif /* * Allocate a submap for exec arguments. This map effectively * limits the number of processes exec'ing at any time. */ minaddr = vm_map_min(kernel_map); exec_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr, 16*NCARGS, VM_MAP_PAGEABLE, FALSE, NULL); /* * Set up userland PIE limits. PIE is disabled on sun4/4c/4e due * to the limited address space. */ if (CPU_ISSUN4M) { vm_pie_max_addr = VM_MAXUSER_ADDRESS / 4; } dvma_init(); #ifdef DEBUG pmapdebug = opmapdebug; #endif printf("avail mem = %lu (%luMB)\n", ptoa(uvmexp.free), ptoa(uvmexp.free)/1024/1024); /* * Set up buffers, so they can be used to read disk labels. */ bufinit(); } /* * Set up registers on exec. * * XXX this entire mess must be fixed */ /* ARGSUSED */ void setregs(p, pack, stack, retval) struct proc *p; struct exec_package *pack; u_long stack; register_t *retval; { struct trapframe *tf = p->p_md.md_tf; struct fpstate *fs; int psr; /* * Setup the process StackGhost cookie which will be XORed into * the return pointer as register windows are over/underflowed */ p->p_addr->u_pcb.pcb_wcookie = arc4random(); /* The cookie needs to guarantee invalid alignment after the XOR */ switch (p->p_addr->u_pcb.pcb_wcookie % 3) { case 0: /* Two lsb's already both set except if the cookie is 0 */ p->p_addr->u_pcb.pcb_wcookie |= 0x3; break; case 1: /* Set the lsb */ p->p_addr->u_pcb.pcb_wcookie = 1 | (p->p_addr->u_pcb.pcb_wcookie & ~0x3); break; case 2: /* Set the second most lsb */ p->p_addr->u_pcb.pcb_wcookie = 2 | (p->p_addr->u_pcb.pcb_wcookie & ~0x3); break; } /* * The syscall will ``return'' to npc or %g7 or %g2; set them all. * Set the rest of the registers to 0 except for %o6 (stack pointer, * built in exec()) and psr (retain CWP and PSR_S bits). */ psr = tf->tf_psr & (PSR_S | PSR_CWP); if ((fs = p->p_md.md_fpstate) != NULL) { /* * We hold an FPU state. If we own *the* FPU chip state * we must get rid of it, and the only way to do that is * to save it. In any case, get rid of our FPU state. */ if (p == cpuinfo.fpproc) { savefpstate(fs); cpuinfo.fpproc = NULL; } free((void *)fs, M_SUBPROC, 0); p->p_md.md_fpstate = NULL; } bzero((caddr_t)tf, sizeof *tf); tf->tf_psr = psr; tf->tf_npc = pack->ep_entry & ~3; tf->tf_global[2] = tf->tf_npc; /* XXX exec of init(8) returns via proc_trampoline() */ if (p->p_pid == 1) { tf->tf_pc = tf->tf_npc; tf->tf_npc += 4; } stack -= sizeof(struct rwindow); tf->tf_out[6] = stack; retval[1] = 0; } #ifdef DEBUG int sigdebug = 0; pid_t sigpid = 0; #define SDB_FOLLOW 0x01 #define SDB_KSTACK 0x02 #define SDB_FPSTATE 0x04 #endif struct sigframe { int sf_signo; /* signal number */ siginfo_t *sf_sip; /* points to siginfo_t */ int sf_xxx; /* placeholder */ caddr_t sf_addr; /* SunOS compat */ struct sigcontext sf_sc; /* actual sigcontext */ siginfo_t sf_si; }; /* * machine dependent system variables. */ int cpu_sysctl(name, namelen, oldp, oldlenp, newp, newlen, p) int *name; u_int namelen; void *oldp; size_t *oldlenp; void *newp; size_t newlen; struct proc *p; { #if (NLED > 0) || (NAUXREG > 0) || (NSCF > 0) int oldval; #endif int ret; extern int v8mul; /* all sysctl names are this level are terminal */ if (namelen != 1) return (ENOTDIR); /* overloaded */ switch (name[0]) { case CPU_LED_BLINK: #if (NLED > 0) || (NAUXREG > 0) || (NSCF > 0) oldval = sparc_led_blink; ret = sysctl_int(oldp, oldlenp, newp, newlen, &sparc_led_blink); /* * If we were false and are now true, call led_blink(). * led_blink() itself will catch the other case. */ if (!oldval && sparc_led_blink > oldval) { #if NAUXREG > 0 led_blink((caddr_t *)0); #endif #if NLED > 0 led_cycle((caddr_t *)led_sc); #endif #if NSCF > 0 scfblink((caddr_t *)0); #endif } return (ret); #else return (EOPNOTSUPP); #endif case CPU_CPUTYPE: return (sysctl_rdint(oldp, oldlenp, newp, cputyp)); case CPU_V8MUL: return (sysctl_rdint(oldp, oldlenp, newp, v8mul)); default: return (EOPNOTSUPP); } /* NOTREACHED */ } /* * Send an interrupt to process. */ void sendsig(catcher, sig, mask, code, type, val) sig_t catcher; int sig, mask; u_long code; int type; union sigval val; { struct proc *p = curproc; struct sigacts *psp = p->p_p->ps_sigacts; struct sigframe *fp; struct trapframe *tf; int caddr, oldsp, newsp; struct sigframe sf; tf = p->p_md.md_tf; oldsp = tf->tf_out[6]; /* * Compute new user stack addresses, subtract off * one signal frame, and align. */ if ((p->p_sigstk.ss_flags & SS_DISABLE) == 0 && !sigonstack(oldsp) && (psp->ps_sigonstack & sigmask(sig))) fp = (struct sigframe *)(p->p_sigstk.ss_sp + p->p_sigstk.ss_size); else fp = (struct sigframe *)oldsp; fp = (struct sigframe *)((int)(fp - 1) & ~7); #ifdef DEBUG if ((sigdebug & SDB_KSTACK) && p->p_pid == sigpid) printf("sendsig: %s[%d] sig %d newusp %p scp %p\n", p->p_comm, p->p_pid, sig, fp, &fp->sf_sc); #endif /* * Now set up the signal frame. We build it in kernel space * and then copy it out. We probably ought to just build it * directly in user space.... */ bzero(&sf, sizeof(sf)); sf.sf_signo = sig; sf.sf_sip = NULL; /* * Build the signal context to be used by sigreturn. */ sf.sf_sc.sc_mask = mask; sf.sf_sc.sc_sp = oldsp; sf.sf_sc.sc_pc = tf->tf_pc; sf.sf_sc.sc_npc = tf->tf_npc; sf.sf_sc.sc_psr = tf->tf_psr; sf.sf_sc.sc_g1 = tf->tf_global[1]; sf.sf_sc.sc_o0 = tf->tf_out[0]; if (psp->ps_siginfo & sigmask(sig)) { sf.sf_sip = &fp->sf_si; initsiginfo(&sf.sf_si, sig, code, type, val); } /* * Put the stack in a consistent state before we whack away * at it. Note that write_user_windows may just dump the * registers into the pcb; we need them in the process's memory. * We also need to make sure that when we start the signal handler, * its %i6 (%fp), which is loaded from the newly allocated stack area, * joins seamlessly with the frame it was in when the signal occurred, * so that the debugger and _longjmp code can back up through it. */ newsp = (int)fp - sizeof(struct rwindow); write_user_windows(); /* XXX do not copyout siginfo if not needed */ if (rwindow_save(p) || copyout((caddr_t)&sf, (caddr_t)fp, sizeof sf) || copyout(&oldsp, &((struct rwindow *)newsp)->rw_in[6], sizeof(register_t)) != 0) { /* * Process has trashed its stack; give it an illegal * instruction to halt it in its tracks. */ #ifdef DEBUG if ((sigdebug & SDB_KSTACK) && p->p_pid == sigpid) printf("sendsig: window save or copyout error\n"); #endif sigexit(p, SIGILL); /* NOTREACHED */ } #ifdef DEBUG if (sigdebug & SDB_FOLLOW) printf("sendsig: %s[%d] sig %d scp %p\n", p->p_comm, p->p_pid, sig, &fp->sf_sc); #endif /* * Arrange to continue execution at the code copied out in exec(). * It needs the function to call in %g1, and a new stack pointer. */ caddr = p->p_p->ps_sigcode; tf->tf_global[1] = (int)catcher; tf->tf_pc = caddr; tf->tf_npc = caddr + 4; tf->tf_out[6] = newsp; #ifdef DEBUG if ((sigdebug & SDB_KSTACK) && p->p_pid == sigpid) printf("sendsig: about to return to catcher\n"); #endif } /* * System call to cleanup state after a signal * has been taken. Reset signal mask and * stack state from context left by sendsig (above), * and return to the given trap frame (if there is one). * Check carefully to make sure that the user has not * modified the state to gain improper privileges or to cause * a machine fault. */ /* ARGSUSED */ int sys_sigreturn(p, v, retval) struct proc *p; void *v; register_t *retval; { struct sys_sigreturn_args /* { syscallarg(struct sigcontext *) sigcntxp; } */ *uap = v; struct sigcontext ksc; struct trapframe *tf; int error; /* First ensure consistent stack state (see sendsig). */ write_user_windows(); if (rwindow_save(p)) sigexit(p, SIGILL); #ifdef DEBUG if (sigdebug & SDB_FOLLOW) printf("sigreturn: %s[%d], sigcntxp %p\n", p->p_comm, p->p_pid, SCARG(uap, sigcntxp)); #endif if ((error = copyin(SCARG(uap, sigcntxp), &ksc, sizeof(ksc))) != 0) return (error); tf = p->p_md.md_tf; /* * Only the icc bits in the psr are used, so it need not be * verified. pc and npc must be multiples of 4. This is all * that is required; if it holds, just do it. */ if (((ksc.sc_pc | ksc.sc_npc) & 3) != 0) return (EINVAL); /* take only psr ICC field */ tf->tf_psr = (tf->tf_psr & ~PSR_ICC) | (ksc.sc_psr & PSR_ICC); tf->tf_pc = ksc.sc_pc; tf->tf_npc = ksc.sc_npc; tf->tf_global[1] = ksc.sc_g1; tf->tf_out[0] = ksc.sc_o0; tf->tf_out[6] = ksc.sc_sp; p->p_sigmask = ksc.sc_mask & ~sigcantmask; return (EJUSTRETURN); } int waittime = -1; __dead void boot(int howto) { int i; static char str[4]; /* room for "-sd\0" */ if (cold) { if ((howto & RB_USERREQ) == 0) howto |= RB_HALT; goto haltsys; } fb_unblank(); boothowto = howto; if ((howto & RB_NOSYNC) == 0 && waittime < 0) { waittime = 0; vfs_shutdown(); if ((howto & RB_TIMEBAD) == 0) { resettodr(); } else { printf("WARNING: not updating battery clock\n"); } } if_downall(); uvm_shutdown(); splhigh(); cold = 1; if ((howto & RB_DUMP) != 0) dumpsys(); haltsys: config_suspend_all(DVACT_POWERDOWN); if ((howto & RB_HALT) != 0 || (howto & RB_POWERDOWN) != 0) { #if defined(SUN4M) if ((howto & RB_POWERDOWN) != 0) { printf("attempting to power down...\n"); #if NTCTRL > 0 tadpole_powerdown(); #endif #if NPOWER > 0 auxio_powerdown(); #endif rominterpret("power-off"); printf("WARNING: powerdown failed!\n"); } #endif /* SUN4M */ printf("halted\n\n"); romhalt(); } printf("rebooting\n\n"); i = 1; if ((howto & RB_SINGLE) != 0) str[i++] = 's'; if ((howto & RB_KDB) != 0) str[i++] = 'd'; if (i > 1) { str[0] = '-'; str[i] = 0; } else str[0] = 0; romboot(str); for (;;) ; /* NOTREACHED */ } /* XXX - dumpmag not eplicitly used, savecore may search for it to get here */ u_long dumpmag = 0x8fca0101; /* magic number for savecore */ int dumpsize = 0; /* also for savecore */ long dumplo = 0; void dumpconf(void) { int nblks, dumpblks; if (dumpdev == NODEV || (nblks = (bdevsw[major(dumpdev)].d_psize)(dumpdev)) == 0) return; if (nblks <= ctod(1)) return; dumpblks = ctod(physmem) + ctod(pmap_dumpsize()); if (dumpblks > (nblks - ctod(1))) /* * dump size is too big for the partition. * Note, we safeguard a click at the front for a * possible disk label. */ return; /* Put the dump at the end of the partition */ dumplo = nblks - dumpblks; /* * savecore(8) expects dumpsize to be the number of pages * of actual core dumped (i.e. excluding the MMU stuff). */ dumpsize = physmem; } #define BYTES_PER_DUMP (32 * 1024) /* must be a multiple of pagesize */ static vaddr_t dumpspace; /* * Allocate the dump i/o buffer area during kernel memory allocation */ caddr_t reserve_dumppages(p) caddr_t p; { dumpspace = (vaddr_t)p; return (p + BYTES_PER_DUMP); } /* * Write a crash dump. */ void dumpsys() { int psize; daddr_t blkno; int (*dump)(dev_t, daddr_t, caddr_t, size_t); int error = 0; struct memarr *mp; int nmem; extern struct memarr *pmemarr; extern int npmemarr; /* copy registers to memory */ snapshot(cpcb); stackdump(); if (dumpdev == NODEV) return; /* * For dumps during autoconfiguration, * if dump device has already configured... */ if (dumpsize == 0) dumpconf(); if (dumplo <= 0) return; printf("\ndumping to dev(%d,%d), at offset %ld blocks\n", major(dumpdev), minor(dumpdev), dumplo); #ifdef UVM_SWAP_ENCRYPT uvm_swap_finicrypt_all(); #endif psize = (*bdevsw[major(dumpdev)].d_psize)(dumpdev); printf("dump "); if (psize == -1) { printf("area unavailable\n"); return; } blkno = dumplo; dump = bdevsw[major(dumpdev)].d_dump; printf("mmu "); error = pmap_dumpmmu(dump, blkno); blkno += ctod(pmap_dumpsize()); printf("memory "); for (mp = pmemarr, nmem = npmemarr; --nmem >= 0 && error == 0; mp++) { unsigned i = 0, n; unsigned maddr = mp->addr_lo; /* XXX - what's so special about PA 0 that we can't dump it? */ if (maddr == 0) { /* Skip first page at physical address 0 */ maddr += NBPG; i += NBPG; blkno += btodb(NBPG); } printf("@0x%x:", maddr); for (; i < mp->len; i += n) { n = mp->len - i; if (n > BYTES_PER_DUMP) n = BYTES_PER_DUMP; /* print out which MBs we are dumping */ if (i % (1024*1024) <= NBPG) printf("%d ", i / (1024*1024)); (void) pmap_map(dumpspace, maddr, maddr + n, PROT_READ); error = (*dump)(dumpdev, blkno, (caddr_t)dumpspace, (int)n); pmap_remove(pmap_kernel(), dumpspace, dumpspace + n); if (error) break; maddr += n; blkno += btodb(n); } } switch (error) { case ENXIO: printf("device bad\n"); break; case EFAULT: printf("device not ready\n"); break; case EINVAL: printf("area improper\n"); break; case EIO: printf("i/o error\n"); break; case 0: printf("succeeded\n"); break; default: printf("error %d\n", error); break; } } /* * get the fp and dump the stack as best we can. don't leave the * current stack page */ void stackdump() { struct frame *fp = getfp(), *sfp; sfp = fp; printf("Frame pointer is at %p\n", fp); printf("Call traceback:\n"); while (fp && ((u_long)fp >> PGSHIFT) == ((u_long)sfp >> PGSHIFT)) { printf(" pc = 0x%x args = (0x%x, 0x%x, 0x%x, 0x%x, 0x%x, 0x%x) fp = %p\n", fp->fr_pc, fp->fr_arg[0], fp->fr_arg[1], fp->fr_arg[2], fp->fr_arg[3], fp->fr_arg[4], fp->fr_arg[5], fp->fr_fp); fp = fp->fr_fp; } } /* * Map an I/O device given physical address and size in bytes, e.g., * * mydev = (struct mydev *)mapdev(myioaddr, 0, * 0, sizeof(struct mydev)); * * See also machine/autoconf.h. * * XXXART - verify types (too tired now). */ void * mapdev(phys, virt, offset, size) struct rom_reg *phys; int offset, virt, size; { vaddr_t va; paddr_t pa, base; void *ret; static vaddr_t iobase; unsigned int pmtype; if (iobase == 0) iobase = IODEV_BASE; base = (paddr_t)phys->rr_paddr + offset; if (virt != 0) { va = trunc_page(virt); size = round_page(virt + size) - va; } else { size = round_page(base + size) - trunc_page(base); va = iobase; iobase += size; if (iobase > IODEV_END) /* unlikely */ panic("mapiodev"); } if (size == 0) panic("mapdev: zero size"); ret = (void *)(va | (base & PGOFSET)); /* note: preserve page offset */ pa = trunc_page(base); pmtype = PMAP_IOENC(phys->rr_iospace); do { pmap_kenter_pa(va, pa | pmtype | PMAP_NC, PROT_READ | PROT_WRITE); va += PAGE_SIZE; pa += PAGE_SIZE; } while ((size -= PAGE_SIZE) > 0); pmap_update(pmap_kernel()); return (ret); } #ifdef SUN4 void oldmon_w_trace(va) u_long va; { u_long stop; struct frame *fp; if (curproc) printf("curproc = %p, pid %d\n", curproc, curproc->p_pid); else printf("no curproc\n"); printf("uvm: swtch %d, trap %d, sys %d, intr %d, soft %d, faults %d\n", uvmexp.swtch, uvmexp.traps, uvmexp.syscalls, uvmexp.intrs, uvmexp.softs, uvmexp.faults); write_user_windows(); printf("\nstack trace with sp = 0x%lx\n", va); stop = round_page(va); printf("stop at 0x%lx\n", stop); fp = (struct frame *) va; while (round_page((u_long) fp) == stop) { printf(" 0x%x(0x%x, 0x%x, 0x%x, 0x%x, 0x%x, 0x%x) fp %p\n", fp->fr_pc, fp->fr_arg[0], fp->fr_arg[1], fp->fr_arg[2], fp->fr_arg[3], fp->fr_arg[4], fp->fr_arg[5], fp->fr_fp); fp = fp->fr_fp; if (fp == NULL) break; } printf("end of stack trace\n"); } void oldmon_w_cmd(va, ar) u_long va; char *ar; { switch (*ar) { case '\0': switch (va) { case 0: panic("g0 panic"); case 4: printf("w: case 4\n"); break; default: printf("w: unknown case %ld\n", va); break; } break; case 't': oldmon_w_trace(va); break; default: printf("w: arg not allowed\n"); } } int ldcontrolb(addr) caddr_t addr; { struct pcb *xpcb; extern struct user *proc0paddr; u_long saveonfault; int res; int s; s = splhigh(); if (curproc == NULL) xpcb = (struct pcb *)proc0paddr; else xpcb = &curproc->p_addr->u_pcb; saveonfault = (u_long)xpcb->pcb_onfault; res = xldcontrolb(addr, xpcb); xpcb->pcb_onfault = (caddr_t)saveonfault; splx(s); return (res); } #endif /* SUN4 */ /* * Common function for DMA map creation. May be called by bus-specific * DMA map creation functions. */ int _bus_dmamap_create(bus_dma_tag_t t, bus_size_t size, int nsegments, bus_size_t maxsegsz, bus_size_t boundary, int flags, bus_dmamap_t *dmamp) { struct sparc_bus_dmamap *map; void *mapstore; size_t mapsize; /* * Allocate and initialize the DMA map. The end of the map * is a variable-sized array of segments, so we allocate enough * room for them in one shot. * * Note we don't preserve the WAITOK or NOWAIT flags. Preservation * of ALLOCNOW notifies others that we've reserved these resources, * and they are not to be freed. * * The bus_dmamap_t includes one bus_dma_segment_t, hence * the (nsegments - 1). */ mapsize = sizeof(struct sparc_bus_dmamap) + (sizeof(bus_dma_segment_t) * (nsegments - 1)); if ((mapstore = malloc(mapsize, M_DEVBUF, (flags & BUS_DMA_NOWAIT) ? (M_NOWAIT | M_ZERO) : (M_WAITOK | M_ZERO))) == NULL) return (ENOMEM); map = (struct sparc_bus_dmamap *)mapstore; map->_dm_size = size; map->_dm_segcnt = nsegments; map->_dm_maxmaxsegsz = maxsegsz; map->_dm_boundary = boundary; map->_dm_align = PAGE_SIZE; map->_dm_flags = flags & ~(BUS_DMA_WAITOK|BUS_DMA_NOWAIT); map->dm_maxsegsz = maxsegsz; map->dm_mapsize = 0; /* no valid mappings */ map->dm_nsegs = 0; *dmamp = map; return (0); } /* * Common function for DMA map destruction. May be called by bus-specific * DMA map destruction functions. */ void _bus_dmamap_destroy(bus_dma_tag_t t, bus_dmamap_t map) { free(map, M_DEVBUF, 0); } /* * Like _bus_dmamap_load(), but for mbufs. */ int _bus_dmamap_load_mbuf(bus_dma_tag_t t, bus_dmamap_t map, struct mbuf *m, int flags) { panic("_bus_dmamap_load_mbuf: not implemented"); } /* * Like _bus_dmamap_load(), but for uios. */ int _bus_dmamap_load_uio(bus_dma_tag_t t, bus_dmamap_t map, struct uio *uio, int flags) { panic("_bus_dmamap_load_uio: not implemented"); } /* * Like _bus_dmamap_load(), but for raw memory allocated with * bus_dmamem_alloc(). */ int _bus_dmamap_load_raw(bus_dma_tag_t t, bus_dmamap_t map, bus_dma_segment_t *segs, int nsegs, bus_size_t size, int flags) { panic("_bus_dmamap_load_raw: not implemented"); } /* * Common function for DMA map synchronization. May be called * by bus-specific DMA map synchronization functions. */ void _bus_dmamap_sync(bus_dma_tag_t t, bus_dmamap_t map, bus_addr_t offset, bus_size_t len, int ops) { } /* * Common function for DMA-safe memory allocation. May be called * by bus-specific DMA memory allocation functions. */ int _bus_dmamem_alloc(bus_dma_tag_t t, bus_size_t size, bus_size_t alignment, bus_size_t boundary, bus_dma_segment_t *segs, int nsegs, int *rsegs, int flags) { struct pglist *mlist; int error, plaflag; /* Always round the size. */ size = round_page(size); if ((mlist = malloc(sizeof(*mlist), M_DEVBUF, (flags & BUS_DMA_NOWAIT) ? M_NOWAIT : M_WAITOK)) == NULL) return (ENOMEM); /* * Allocate pages from the VM system. */ plaflag = flags & BUS_DMA_NOWAIT ? UVM_PLA_NOWAIT : UVM_PLA_WAITOK; if (flags & BUS_DMA_ZERO) plaflag |= UVM_PLA_ZERO; TAILQ_INIT(mlist); error = uvm_pglistalloc(size, (paddr_t)0, (paddr_t)-1, 0, 0, mlist, nsegs, plaflag); if (error) return (error); /* * Simply keep a pointer around to the linked list, so * bus_dmamap_free() can return it. * * NOBODY SHOULD TOUCH THE pageq FIELDS WHILE THESE PAGES * ARE IN OUR CUSTODY. */ segs[0]._ds_mlist = mlist; /* * We now have physical pages, but no DVMA addresses yet. These * will be allocated in bus_dmamap_load*() routines. Hence we * save any alignment and boundary requirements in this DMA * segment. */ segs[0].ds_addr = 0; segs[0].ds_len = 0; segs[0]._ds_va = 0; *rsegs = 1; return (0); } /* * Common function for freeing DMA-safe memory. May be called by * bus-specific DMA memory free functions. */ void _bus_dmamem_free(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs) { if (nsegs != 1) panic("bus_dmamem_free: nsegs = %d", nsegs); /* * Return the list of pages back to the VM system. */ uvm_pglistfree(segs[0]._ds_mlist); free(segs[0]._ds_mlist, M_DEVBUF, 0); } /* * Common function for unmapping DMA-safe memory. May be called by * bus-specific DMA memory unmapping functions. */ void _bus_dmamem_unmap(bus_dma_tag_t t, void *kva, size_t size) { #ifdef DIAGNOSTIC if ((u_long)kva & PAGE_MASK) panic("_bus_dmamem_unmap"); #endif km_free(kva, round_page(size), &kv_any, &kp_none); } /* * Common functin for mmap(2)'ing DMA-safe memory. May be called by * bus-specific DMA mmap(2)'ing functions. */ paddr_t _bus_dmamem_mmap(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs, off_t off, int prot, int flags) { panic("_bus_dmamem_mmap: not implemented"); }