/* $OpenBSD: vm_machdep.c,v 1.25 2001/06/26 21:35:43 miod Exp $ */ /* * Copyright (c) 1988 University of Utah. * Copyright (c) 1982, 1986, 1990, 1993 * The Regents of the University of California. All rights reserved. * * This code is derived from software contributed to Berkeley by * the Systems Programming Group of the University of Utah Computer * Science Department. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by the University of * California, Berkeley and its contributors. * 4. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * from: Utah $Hdr: vm_machdep.c 1.21 91/04/06$ * * @(#)vm_machdep.c 8.6 (Berkeley) 1/12/94 */ #include #include #include #include #include #include #include #include #include #include #include #include #include #if defined(UVM) #include #endif /* * Finish a fork operation, with process p2 nearly set up. * Copy and update the kernel stack and pcb, making the child * ready to run, and marking it so that it can return differently * than the parent. Returns 1 in the child process, 0 in the parent. * We currently double-map the user area so that the stack is at the same * address in each process; in the future we will probably relocate * the frame pointers on the stack after copying. */ void cpu_fork(p1, p2, stack, stacksize) register struct proc *p1, *p2; void *stack; size_t stacksize; { register struct pcb *pcb = &p2->p_addr->u_pcb; register struct trapframe *tf; register struct switchframe *sf; extern struct pcb *curpcb; extern void proc_trampoline(), child_return(); p2->p_md.md_flags = p1->p_md.md_flags; /* Copy pcb from proc p1 to p2. */ if (p1 == curproc) { /* Sync the PCB before we copy it. */ savectx(curpcb); } #ifdef DIAGNOSTIC else if (p1 != &proc0) panic("cpu_fork: curproc"); #endif *pcb = p1->p_addr->u_pcb; /* * Copy the trap frame, and arrange for the child to return directly * through return_to_user(). Note the inline version of cpu_set_kpc(). */ tf = (struct trapframe *)((u_int)p2->p_addr + USPACE) - 1; p2->p_md.md_regs = (int *)tf; *tf = *(struct trapframe *)p1->p_md.md_regs; /* * If specified, give the child a different stack. */ if (stack != NULL) tf->tf_regs[15] = (u_int)stack + stacksize; sf = (struct switchframe *)tf - 1; sf->sf_pc = (u_int)proc_trampoline; pcb->pcb_regs[6] = (int)child_return; /* A2 */ pcb->pcb_regs[7] = (int)p2; /* A3 */ pcb->pcb_regs[11] = (int)sf; /* SSP */ } void cpu_set_kpc(p, pc, arg) struct proc *p; void (*pc) __P((void *)); void *arg; { p->p_addr->u_pcb.pcb_regs[6] = (int)pc; /* A2 */ p->p_addr->u_pcb.pcb_regs[7] = (int)arg; /* A3 */ } /* * cpu_exit is called as the last action during exit. * We release the address space and machine-dependent resources, * including the memory for the user structure and kernel stack. * Once finished, we call switch_exit, which switches to a temporary * pcb and stack and never returns. We block memory allocation * until switch_exit has made things safe again. */ void cpu_exit(p) struct proc *p; { (void) splimp(); #if defined(UVM) uvmexp.swtch++; #else cnt.v_swtch++; #endif switch_exit(p); /* NOTREACHED */ } void cpu_cleanup(p) struct proc *p; { } /* * Dump the machine specific header information at the start of a core dump. */ int cpu_coredump(p, vp, cred, chdr) struct proc *p; struct vnode *vp; struct ucred *cred; struct core *chdr; { return (vn_rdwr(UIO_WRITE, vp, (caddr_t) p->p_addr, USPACE, (off_t)0, UIO_SYSSPACE, IO_NODELOCKED|IO_UNIT, cred, NULL, p)); } /* * Move pages from one kernel virtual address to another. * Both addresses are assumed to reside in the Sysmap. */ void pagemove(from, to, size) caddr_t from, to; size_t size; { vm_offset_t pa; #ifdef DEBUG if ((size & PAGE_MASK) != 0) panic("pagemove"); #endif while (size > 0) { pmap_extract(pmap_kernel(), (vm_offset_t)from, &pa); #ifdef DEBUG #if 0 if (pa == 0) panic("pagemove 2"); if (pmap_extract(pmap_kernel(), (vm_offset_t)to, XXX) != 0) panic("pagemove 3"); #endif #endif pmap_remove(pmap_kernel(), (vm_offset_t)from, (vm_offset_t)from + PAGE_SIZE); pmap_enter(pmap_kernel(), (vm_offset_t)to, pa, VM_PROT_READ|VM_PROT_WRITE, 1, VM_PROT_READ|VM_PROT_WRITE); from += PAGE_SIZE; to += PAGE_SIZE; size -= PAGE_SIZE; } } /* * Map `size' bytes of physical memory starting at `paddr' into * kernel VA space at `vaddr'. Read/write and cache-inhibit status * are specified by `prot'. */ void physaccess(vaddr, paddr, size, prot) caddr_t vaddr, paddr; register int size, prot; { register pt_entry_t *pte; register u_int page; pte = kvtopte(vaddr); page = (u_int)paddr & PG_FRAME; for (size = btoc(size); size; size--) { *pte++ = PG_V | prot | page; page += NBPG; } TBIAS(); } void physunaccess(vaddr, size) caddr_t vaddr; register int size; { register pt_entry_t *pte; pte = kvtopte(vaddr); for (size = btoc(size); size; size--) *pte++ = PG_NV; TBIAS(); } /* * Set a red zone in the kernel stack after the u. area. * We don't support a redzone right now. It really isn't clear * that it is a good idea since, if the kernel stack were to roll * into a write protected page, the processor would lock up (since * it cannot create an exception frame) and we would get no useful * post-mortem info. Currently, under the DEBUG option, we just * check at every clock interrupt to see if the current k-stack has * gone too far (i.e. into the "redzone" page) and if so, panic. * Look at _lev6intr in locore.s for more details. */ /*ARGSUSED*/ setredzone(pte, vaddr) pt_entry_t *pte; caddr_t vaddr; { } /* * Convert kernel VA to physical address */ int kvtop(addr) caddr_t addr; { paddr_t pa; if (pmap_extract(pmap_kernel(), (vm_offset_t)addr, &pa) == FALSE) panic("kvtop: zero page frame"); return((int)pa); } extern vm_map_t phys_map; /* * Map an IO request into kernel virtual address space. * * XXX we allocate KVA space by using kmem_alloc_wait which we know * allocates space without backing physical memory. This implementation * is a total crock, the multiple mappings of these physical pages should * be reflected in the higher-level VM structures to avoid problems. */ void vmapbuf(bp, siz) register struct buf *bp; vm_size_t siz; { register int npf; register caddr_t addr; register long flags = bp->b_flags; struct proc *p; int off; vm_offset_t kva; vm_offset_t pa; if ((flags & B_PHYS) == 0) panic("vmapbuf"); addr = bp->b_saveaddr = bp->b_data; off = (int)addr & PGOFSET; p = bp->b_proc; npf = btoc(round_page(bp->b_bcount + off)); #if defined(UVM) kva = uvm_km_valloc_wait(phys_map, ctob(npf)); #else kva = kmem_alloc_wait(phys_map, ctob(npf)); #endif bp->b_data = (caddr_t)(kva + off); while (npf--) { if (pmap_extract(vm_map_pmap(&p->p_vmspace->vm_map), (vm_offset_t)addr, &pa) == FALSE) panic("vmapbuf: null page frame"); pmap_enter(vm_map_pmap(phys_map), kva, trunc_page(pa), VM_PROT_READ|VM_PROT_WRITE, TRUE, VM_PROT_READ|VM_PROT_WRITE); addr += PAGE_SIZE; kva += PAGE_SIZE; } } /* * Free the io map PTEs associated with this IO operation. */ void vunmapbuf(bp, siz) register struct buf *bp; vm_size_t siz; { register caddr_t addr; register int npf; vm_offset_t kva; if ((bp->b_flags & B_PHYS) == 0) panic("vunmapbuf"); addr = bp->b_data; npf = btoc(round_page(bp->b_bcount + ((int)addr & PGOFSET))); kva = (vm_offset_t)((int)addr & ~PGOFSET); #if defined(UVM) uvm_km_free_wakeup(phys_map, kva, ctob(npf)); #else kmem_free_wakeup(phys_map, kva, ctob(npf)); #endif bp->b_data = bp->b_saveaddr; bp->b_saveaddr = NULL; }