summaryrefslogtreecommitdiff
path: root/sys/arch/powerpc
diff options
context:
space:
mode:
authorMartin Pieuchot <mpi@cvs.openbsd.org>2015-01-20 17:08:36 +0000
committerMartin Pieuchot <mpi@cvs.openbsd.org>2015-01-20 17:08:36 +0000
commit171179289f76d38633ca3d3c623cbed8a4871038 (patch)
tree787ddefc4cd7ec3a498fe56b8a6badb38effdc1a /sys/arch/powerpc
parent434b99412fc3d596dcf5ca68d60fdb9cc5cd9843 (diff)
Merge two copies of the same dma code into one file and sync the headers.
ok kettenis@
Diffstat (limited to 'sys/arch/powerpc')
-rw-r--r--sys/arch/powerpc/conf/files.powerpc3
-rw-r--r--sys/arch/powerpc/powerpc/bus_dma.c619
2 files changed, 621 insertions, 1 deletions
diff --git a/sys/arch/powerpc/conf/files.powerpc b/sys/arch/powerpc/conf/files.powerpc
index 279e4831b06..5c72c873f68 100644
--- a/sys/arch/powerpc/conf/files.powerpc
+++ b/sys/arch/powerpc/conf/files.powerpc
@@ -1,4 +1,4 @@
-# $OpenBSD: files.powerpc,v 1.48 2013/06/13 04:34:32 deraadt Exp $
+# $OpenBSD: files.powerpc,v 1.49 2015/01/20 17:08:35 mpi Exp $
#
file arch/powerpc/powerpc/setjmp.S ddb
@@ -6,6 +6,7 @@ file arch/powerpc/powerpc/copystr.c
file arch/powerpc/powerpc/cpu_subr.c
file arch/powerpc/powerpc/fpu.c
file arch/powerpc/powerpc/in_cksum.c inet
+file arch/powerpc/powerpc/bus_dma.c
file arch/powerpc/powerpc/pmap.c
file arch/powerpc/powerpc/process_machdep.c
file arch/powerpc/powerpc/sys_machdep.c
diff --git a/sys/arch/powerpc/powerpc/bus_dma.c b/sys/arch/powerpc/powerpc/bus_dma.c
new file mode 100644
index 00000000000..963bbb1d0c4
--- /dev/null
+++ b/sys/arch/powerpc/powerpc/bus_dma.c
@@ -0,0 +1,619 @@
+/* $OpenBSD: bus_dma.c,v 1.1 2015/01/20 17:08:35 mpi Exp $ */
+/* $NetBSD: machdep.c,v 1.214 1996/11/10 03:16:17 thorpej Exp $ */
+
+/*-
+ * Copyright (c) 1996, 1997 The NetBSD Foundation, Inc.
+ * All rights reserved.
+ *
+ * This code is derived from software contributed to The NetBSD Foundation
+ * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
+ * NASA Ames Research Center.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
+ * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <sys/param.h>
+#include <sys/proc.h>
+#include <sys/extent.h>
+#include <sys/buf.h>
+#include <sys/device.h>
+#include <sys/systm.h>
+#include <sys/conf.h>
+#include <sys/file.h>
+#include <sys/malloc.h>
+#include <sys/mbuf.h>
+#include <sys/mount.h>
+
+#include <uvm/uvm_extern.h>
+
+#include <machine/bus.h>
+
+int _dmamem_alloc_range( bus_dma_tag_t t, bus_size_t size,
+ bus_size_t alignment, bus_size_t boundary, bus_dma_segment_t *segs,
+ int nsegs, int *rsegs, int flags, vaddr_t low, vaddr_t high);
+int _dmamap_load_buffer(bus_dma_tag_t, bus_dmamap_t, void *, bus_size_t,
+ struct proc *, int, bus_addr_t *, int *, int);
+/*
+ * Common function for DMA map creation. May be called by bus-specific
+ * DMA map creation functions.
+ */
+int
+_dmamap_create(bus_dma_tag_t t, bus_size_t size, int nsegments,
+ bus_size_t maxsegsz, bus_size_t boundary, int flags, bus_dmamap_t *dmamp)
+{
+ struct powerpc_bus_dmamap *map;
+ void *mapstore;
+ size_t mapsize;
+
+ /*
+ * Allocate and initialize the DMA map. The end of the map
+ * is a variable-sized array of segments, so we allocate enough
+ * room for them in one shot.
+ *
+ * Note we don't preserve the WAITOK or NOWAIT flags. Preservation
+ * of ALLOCNOW notifies others that we've reserved these resources,
+ * and they are not to be freed.
+ *
+ * The bus_dmamap_t includes one bus_dma_segment_t, hence
+ * the (nsegments - 1).
+ */
+ mapsize = sizeof(struct powerpc_bus_dmamap) +
+ (sizeof(bus_dma_segment_t) * (nsegments - 1));
+ if ((mapstore = malloc(mapsize, M_DEVBUF, (flags & BUS_DMA_NOWAIT) ?
+ (M_NOWAIT | M_ZERO) : (M_WAITOK | M_ZERO))) == NULL)
+ return (ENOMEM);
+
+ map = (struct powerpc_bus_dmamap *)mapstore;
+ map->_dm_size = size;
+ map->_dm_segcnt = nsegments;
+ map->_dm_maxsegsz = maxsegsz;
+ map->_dm_boundary = boundary;
+ map->_dm_flags = flags & ~(BUS_DMA_WAITOK|BUS_DMA_NOWAIT);
+ map->dm_nsegs = 0; /* no valid mappings */
+ map->dm_mapsize = 0;
+
+ *dmamp = map;
+ return (0);
+}
+
+/*
+ * Common function for DMA map destruction. May be called by bus-specific
+ * DMA map destruction functions.
+ */
+void
+_dmamap_destroy(bus_dma_tag_t t, bus_dmamap_t map)
+{
+
+ free(map, M_DEVBUF, 0);
+}
+
+
+int
+_dmamap_load_buffer(bus_dma_tag_t t, bus_dmamap_t map, void *buf,
+ bus_size_t buflen, struct proc *p, int flags, bus_addr_t *lastaddrp,
+ int *segp, int first)
+{
+ bus_size_t sgsize;
+ bus_addr_t curaddr, lastaddr, baddr, bmask;
+ vaddr_t vaddr = (vaddr_t)buf;
+ pmap_t pmap;
+ int seg;
+
+ lastaddr = *lastaddrp;
+ bmask = ~(map->_dm_boundary - 1);
+
+ if (p != NULL)
+ pmap = p->p_vmspace->vm_map.pmap;
+ else
+ pmap = pmap_kernel();
+
+ for (seg = *segp; buflen > 0; ) {
+ /*
+ * Get the physical address for this segment.
+ */
+ if (pmap_extract(pmap, vaddr, (paddr_t *)&curaddr) != TRUE) {
+ panic("dmamap_load_buffer pmap %p vaddr %lx "
+ "pmap_extract failed", pmap, vaddr);
+ }
+
+ /*
+ * Compute the segment size, and adjust counts.
+ */
+ sgsize = PAGE_SIZE - ((u_long)vaddr & PGOFSET);
+ if (buflen < sgsize)
+ sgsize = buflen;
+
+ /*
+ * Make sure we don't cross any boundaries.
+ */
+ if (map->_dm_boundary > 0) {
+ baddr = (curaddr + map->_dm_boundary) & bmask;
+ if (sgsize > (baddr - curaddr))
+ sgsize = (baddr - curaddr);
+ }
+
+ /*
+ * Insert chunk into a segment, coalescing with the
+ * previous segment if possible.
+ */
+ if (first) {
+ map->dm_segs[seg].ds_addr = curaddr;
+ map->dm_segs[seg].ds_len = sgsize;
+ first = 0;
+ } else {
+ if (curaddr == lastaddr &&
+ (map->dm_segs[seg].ds_len + sgsize) <=
+ map->_dm_maxsegsz &&
+ (map->_dm_boundary == 0 ||
+ (map->dm_segs[seg].ds_addr & bmask) ==
+ (curaddr & bmask)))
+ map->dm_segs[seg].ds_len += sgsize;
+ else {
+ if (++seg >= map->_dm_segcnt)
+ break;
+ map->dm_segs[seg].ds_addr = curaddr;
+ map->dm_segs[seg].ds_len = sgsize;
+ }
+ }
+
+ lastaddr = curaddr + sgsize;
+ vaddr += sgsize;
+ buflen -= sgsize;
+ }
+
+ *segp = seg;
+ *lastaddrp = lastaddr;
+
+ /*
+ * Did we fit?
+ */
+ if (buflen != 0)
+ return (EFBIG); /* XX better return value here? */
+
+ return (0);
+}
+
+/*
+ * Common function for loading a DMA map with a linear buffer. May
+ * be called by bus-specific DMA map load functions.
+ */
+int
+_dmamap_load(bus_dma_tag_t t, bus_dmamap_t map, void *buf, bus_size_t buflen,
+ struct proc *p, int flags)
+{
+ bus_addr_t lastaddr;
+ int seg, error;
+
+ /*
+ * Make sure that on error condition we return "no valid mappings".
+ */
+ map->dm_mapsize = 0;
+ map->dm_nsegs = 0;
+
+ if (buflen > map->_dm_size)
+ return (EINVAL);
+
+ seg = 0;
+ error = _dmamap_load_buffer(t, map, buf, buflen, p, flags,
+ &lastaddr, &seg, 1);
+ if (error == 0) {
+ map->dm_mapsize = buflen;
+ map->dm_nsegs = seg + 1;
+ }
+ return (error);
+}
+
+/*
+ * Like _bus_dmamap_load(), but for mbufs.
+ */
+int
+_dmamap_load_mbuf(bus_dma_tag_t t, bus_dmamap_t map, struct mbuf *m0,
+ int flags)
+{
+ bus_addr_t lastaddr;
+ int seg, error, first;
+ struct mbuf *m;
+
+ /*
+ * Make sure that on error condition we return "no valid mappings".
+ */
+ map->dm_mapsize = 0;
+ map->dm_nsegs = 0;
+
+#ifdef DIAGNOSTIC
+ if ((m0->m_flags & M_PKTHDR) == 0)
+ panic("_bus_dmamap_load_mbuf: no packet header");
+#endif
+
+ if (m0->m_pkthdr.len > map->_dm_size)
+ return (EINVAL);
+
+ first = 1;
+ seg = 0;
+ error = 0;
+ for (m = m0; m != NULL && error == 0; m = m->m_next) {
+ if (m->m_len == 0)
+ continue;
+ error = _dmamap_load_buffer(t, map, m->m_data, m->m_len,
+ NULL, flags, &lastaddr, &seg, first);
+ first = 0;
+ }
+ if (error == 0) {
+ map->dm_mapsize = m0->m_pkthdr.len;
+ map->dm_nsegs = seg + 1;
+ }
+ return (error);
+}
+
+/*
+ * Like _bus_dmamap_load(), but for uios.
+ */
+int
+_dmamap_load_uio(bus_dma_tag_t t, bus_dmamap_t map, struct uio *uio, int flags)
+{
+ bus_addr_t lastaddr;
+ int seg, i, error, first;
+ bus_size_t minlen, resid;
+ struct proc *p = NULL;
+ struct iovec *iov;
+ caddr_t addr;
+
+ /*
+ * Make sure that on error condition we return "no valid mappings".
+ */
+ map->dm_mapsize = 0;
+ map->dm_nsegs = 0;
+
+ resid = uio->uio_resid;
+ iov = uio->uio_iov;
+
+ if (resid > map->_dm_size)
+ return (EINVAL);
+
+ if (uio->uio_segflg == UIO_USERSPACE) {
+ p = uio->uio_procp;
+#ifdef DIAGNOSTIC
+ if (p == NULL)
+ panic("_bus_dmamap_load_uio: USERSPACE but no proc");
+#endif
+ }
+
+ first = 1;
+ seg = 0;
+ error = 0;
+ for (i = 0; i < uio->uio_iovcnt && resid != 0 && error == 0; i++) {
+ /*
+ * Now at the first iovec to load. Load each iovec
+ * until we have exhausted the residual count.
+ */
+ minlen = resid < iov[i].iov_len ? resid : iov[i].iov_len;
+ addr = (caddr_t)iov[i].iov_base;
+
+ error = _dmamap_load_buffer(t, map, addr, minlen,
+ p, flags, &lastaddr, &seg, first);
+ first = 0;
+
+ resid -= minlen;
+ }
+ if (error == 0) {
+ map->dm_mapsize = uio->uio_resid;
+ map->dm_nsegs = seg + 1;
+ }
+ return (error);
+}
+
+/*
+ * Like _bus_dmamap_load(), but for raw memory allocated with
+ * bus_dmamem_alloc().
+ */
+int
+_dmamap_load_raw(bus_dma_tag_t t, bus_dmamap_t map, bus_dma_segment_t *segs,
+ int nsegs, bus_size_t size, int flags)
+{
+ if (nsegs > map->_dm_segcnt || size > map->_dm_size)
+ return (EINVAL);
+
+ /*
+ * Make sure we don't cross any boundaries.
+ */
+ if (map->_dm_boundary) {
+ bus_addr_t bmask = ~(map->_dm_boundary - 1);
+ int i;
+
+ for (i = 0; i < nsegs; i++) {
+ if (segs[i].ds_len > map->_dm_maxsegsz)
+ return (EINVAL);
+ if ((segs[i].ds_addr & bmask) !=
+ ((segs[i].ds_addr + segs[i].ds_len - 1) & bmask))
+ return (EINVAL);
+ }
+ }
+
+ bcopy(segs, map->dm_segs, nsegs * sizeof(*segs));
+ map->dm_nsegs = nsegs;
+ map->dm_mapsize = size;
+ return (0);
+}
+
+/*
+ * Common function for unloading a DMA map. May be called by
+ * bus-specific DMA map unload functions.
+ */
+void
+_dmamap_unload(bus_dma_tag_t t, bus_dmamap_t map)
+{
+
+ /*
+ * No resources to free; just mark the mappings as
+ * invalid.
+ */
+ map->dm_nsegs = 0;
+ map->dm_mapsize = 0;
+}
+
+/*
+ * Common function for DMA map synchronization. May be called
+ * by bus-specific DMA map synchronization functions.
+ */
+void
+_dmamap_sync(bus_dma_tag_t t, bus_dmamap_t map, bus_addr_t offset,
+bus_size_t len, int op)
+{
+ int i;
+ bus_size_t minlen, wlen;
+ bus_addr_t pa, addr;
+ struct vm_page *pg;
+
+ for (i = 0; i < map->dm_nsegs && len != 0; i++) {
+ /* Find the beginning segment. */
+ if (offset >= map->dm_segs[i].ds_len) {
+ offset -= map->dm_segs[i].ds_len;
+ continue;
+ }
+
+ minlen = len < map->dm_segs[i].ds_len - offset ?
+ len : map->dm_segs[i].ds_len - offset;
+
+ addr = map->dm_segs[i].ds_addr + offset;
+
+ switch (op) {
+ case BUS_DMASYNC_POSTWRITE:
+ for (pa = trunc_page(addr), wlen = 0;
+ pa < round_page(addr + minlen);
+ pa += PAGE_SIZE) {
+ pg = PHYS_TO_VM_PAGE(pa);
+ if (pg != NULL)
+ atomic_clearbits_int(&pg->pg_flags,
+ PG_PMAP_EXE);
+ }
+ }
+
+ }
+}
+
+/*
+ * Common function for DMA-safe memory allocation. May be called
+ * by bus-specific DMA memory allocation functions.
+ */
+int
+_dmamem_alloc(bus_dma_tag_t t, bus_size_t size, bus_size_t alignment,
+ bus_size_t boundary, bus_dma_segment_t *segs, int nsegs, int *rsegs,
+ int flags)
+{
+ return (_dmamem_alloc_range(t, size, alignment, boundary,
+ segs, nsegs, rsegs, flags, 0, -1));
+}
+
+/*
+ * Common function for freeing DMA-safe memory. May be called by
+ * bus-specific DMA memory free functions.
+ */
+void
+_dmamem_free(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs)
+{
+ struct vm_page *m;
+ bus_addr_t addr;
+ struct pglist mlist;
+ int curseg;
+
+ /*
+ * Build a list of pages to free back to the VM system.
+ */
+ TAILQ_INIT(&mlist);
+ for (curseg = 0; curseg < nsegs; curseg++) {
+ for (addr = segs[curseg].ds_addr;
+ addr < (segs[curseg].ds_addr + segs[curseg].ds_len);
+ addr += PAGE_SIZE) {
+ m = PHYS_TO_VM_PAGE(addr);
+ TAILQ_INSERT_TAIL(&mlist, m, pageq);
+ }
+ }
+
+ uvm_pglistfree(&mlist);
+}
+
+/*
+ * Common function for mapping DMA-safe memory. May be called by
+ * bus-specific DMA memory map functions.
+ */
+int
+_dmamem_map(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs, size_t size,
+ caddr_t *kvap, int flags)
+{
+ vaddr_t va, sva;
+ size_t ssize;
+ bus_addr_t addr;
+ int curseg, pmapflags = 0, error;
+ const struct kmem_dyn_mode *kd;
+
+ if (flags & BUS_DMA_NOCACHE)
+ pmapflags |= PMAP_NOCACHE;
+
+ size = round_page(size);
+ kd = flags & BUS_DMA_NOWAIT ? &kd_trylock : &kd_waitok;
+ va = (vaddr_t)km_alloc(size, &kv_any, &kp_none, kd);
+ if (va == 0)
+ return (ENOMEM);
+
+ *kvap = (caddr_t)va;
+
+ sva = va;
+ ssize = size;
+ for (curseg = 0; curseg < nsegs; curseg++) {
+ for (addr = segs[curseg].ds_addr;
+ addr < (segs[curseg].ds_addr + segs[curseg].ds_len);
+ addr += PAGE_SIZE, va += PAGE_SIZE, size -= PAGE_SIZE) {
+ if (size == 0)
+ panic("_bus_dmamem_map: size botch");
+ error = pmap_enter(pmap_kernel(), va, addr | pmapflags,
+ PROT_READ | PROT_WRITE,
+ PROT_READ | PROT_WRITE | PMAP_WIRED | PMAP_CANFAIL);
+ if (error) {
+ pmap_update(pmap_kernel());
+ km_free((void *)sva, ssize, &kv_any, &kp_none);
+ return (error);
+ }
+ }
+ }
+ pmap_update(pmap_kernel());
+
+ return (0);
+}
+
+/*
+ * Common function for unmapping DMA-safe memory. May be called by
+ * bus-specific DMA memory unmapping functions.
+ */
+void
+_dmamem_unmap(bus_dma_tag_t t, caddr_t kva, size_t size)
+{
+
+#ifdef DIAGNOSTIC
+ if ((u_long)kva & PGOFSET)
+ panic("_bus_dmamem_unmap");
+#endif
+
+ km_free(kva, round_page(size), &kv_any, &kp_none);
+}
+
+/*
+ * Common function for mmap(2)'ing DMA-safe memory. May be called by
+ * bus-specific DMA mmap(2)'ing functions.
+ */
+paddr_t
+_dmamem_mmap(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs, off_t off,
+ int prot, int flags)
+{
+ int i, pmapflags = 0;
+
+ if (flags & BUS_DMA_NOCACHE)
+ pmapflags |= PMAP_NOCACHE;
+
+ for (i = 0; i < nsegs; i++) {
+#ifdef DIAGNOSTIC
+ if (off & PGOFSET)
+ panic("_bus_dmamem_mmap: offset unaligned");
+ if (segs[i].ds_addr & PGOFSET)
+ panic("_bus_dmamem_mmap: segment unaligned");
+ if (segs[i].ds_len & PGOFSET)
+ panic("_bus_dmamem_mmap: segment size not multiple"
+ " of page size");
+#endif
+ if (off >= segs[i].ds_len) {
+ off -= segs[i].ds_len;
+ continue;
+ }
+
+ return ((segs[i].ds_addr + off) | pmapflags);
+ }
+
+ /* Page not found. */
+ return (-1);
+}
+
+/**********************************************************************
+ * DMA utility functions
+ **********************************************************************/
+
+/*
+ * Allocate physical memory from the given physical address range.
+ * Called by DMA-safe memory allocation methods.
+ */
+int
+_dmamem_alloc_range(bus_dma_tag_t t, bus_size_t size, bus_size_t alignment,
+ bus_size_t boundary, bus_dma_segment_t *segs, int nsegs, int *rsegs,
+ int flags, vaddr_t low, vaddr_t high)
+{
+ vaddr_t curaddr, lastaddr;
+ struct vm_page *m;
+ struct pglist mlist;
+ int curseg, error, plaflag;
+
+ /* Always round the size. */
+ size = round_page(size);
+
+ /*
+ * Allocate pages from the VM system.
+ */
+ plaflag = flags & BUS_DMA_NOWAIT ? UVM_PLA_NOWAIT : UVM_PLA_WAITOK;
+ if (flags & BUS_DMA_ZERO)
+ plaflag |= UVM_PLA_ZERO;
+
+ TAILQ_INIT(&mlist);
+ error = uvm_pglistalloc(size, low, high,
+ alignment, boundary, &mlist, nsegs, plaflag);
+ if (error)
+ return (error);
+
+ /*
+ * Compute the location, size, and number of segments actually
+ * returned by the VM code.
+ */
+ m = TAILQ_FIRST(&mlist);
+ curseg = 0;
+ lastaddr = segs[curseg].ds_addr = VM_PAGE_TO_PHYS(m);
+ segs[curseg].ds_len = PAGE_SIZE;
+ m = TAILQ_NEXT(m, pageq);
+
+ for (; m != NULL; m = TAILQ_NEXT(m, pageq)) {
+ curaddr = VM_PAGE_TO_PHYS(m);
+#ifdef DIAGNOSTIC
+ if (curaddr < low || curaddr >= high) {
+ printf("vm_page_alloc_memory returned non-sensical"
+ " address 0x%lx\n", curaddr);
+ panic("dmamem_alloc_range");
+ }
+#endif
+ if (curaddr == (lastaddr + PAGE_SIZE))
+ segs[curseg].ds_len += PAGE_SIZE;
+ else {
+ curseg++;
+ segs[curseg].ds_addr = curaddr;
+ segs[curseg].ds_len = PAGE_SIZE;
+ }
+ lastaddr = curaddr;
+ }
+
+ *rsegs = curseg + 1;
+
+ return (0);
+}