From 8d29d4182b507adfa3eef84e51b3fe8848024326 Mon Sep 17 00:00:00 2001 From: Owain Ainsworth Date: Thu, 8 Apr 2010 00:55:26 +0000 Subject: On amd64, move the bus_dma buftype stuff that is only used by sg_dma into the sg_dma code instead of main bus_dma. Add identical code to i386 since this will be used in the next commit. ok kettenis@ back in december. --- sys/arch/i386/i386/sg_dma.c | 69 +++++++++++++++++++++++++-------------------- sys/arch/i386/include/bus.h | 34 +++++++++++++++++++++- 2 files changed, 72 insertions(+), 31 deletions(-) (limited to 'sys/arch/i386') diff --git a/sys/arch/i386/i386/sg_dma.c b/sys/arch/i386/i386/sg_dma.c index 0ec2b4c35ad..313564f0ab6 100644 --- a/sys/arch/i386/i386/sg_dma.c +++ b/sys/arch/i386/i386/sg_dma.c @@ -1,4 +1,4 @@ -/* $OpenBSD: sg_dma.c,v 1.4 2009/08/09 13:35:43 oga Exp $ */ +/* $OpenBSD: sg_dma.c,v 1.5 2010/04/08 00:55:25 oga Exp $ */ /* * Copyright (c) 2009 Owain G. Ainsworth * @@ -64,25 +64,6 @@ #define MAX_DMA_SEGS 20 #endif -/* - * per-map DVMA page table - */ -struct sg_page_entry { - SPLAY_ENTRY(sg_page_entry) spe_node; - paddr_t spe_pa; - bus_addr_t spe_va; -}; - -/* this should be in the map's dm_cookie. */ -struct sg_page_map { - SPLAY_HEAD(sg_page_tree, sg_page_entry) spm_tree; - int spm_maxpage; /* Size of allocated page map */ - int spm_pagecnt; /* Number of entries in use */ - bus_addr_t spm_start; /* dva when bound */ - bus_size_t spm_size; /* size of bound map */ - struct sg_page_entry spm_map[1]; -}; - int sg_dmamap_load_seg(bus_dma_tag_t, struct sg_cookie *, bus_dmamap_t, bus_dma_segment_t *, int, int, bus_size_t, bus_size_t); @@ -330,8 +311,13 @@ sg_dmamap_load(bus_dma_tag_t t, bus_dmamap_t map, void *buf, } } } - if (err) + if (err) { sg_dmamap_unload(t, map); + } else { + spm->spm_origbuf = buf; + spm->spm_buftype = BUS_BUFTYPE_LINEAR; + spm->spm_proc = p; + } return (err); } @@ -348,9 +334,10 @@ sg_dmamap_load_mbuf(bus_dma_tag_t t, bus_dmamap_t map, struct mbuf *mb, * This code is adapted from sparc64, for very fragmented data * we may need to adapt the algorithm */ - bus_dma_segment_t segs[MAX_DMA_SEGS]; - size_t len; - int i; + bus_dma_segment_t segs[MAX_DMA_SEGS]; + struct sg_page_map *spm = map->_dm_cookie; + size_t len; + int i, err; /* * Make sure that on error condition we return "no valid mappings". @@ -400,7 +387,13 @@ sg_dmamap_load_mbuf(bus_dma_tag_t t, bus_dmamap_t map, struct mbuf *mb, } } - return (sg_dmamap_load_raw(t, map, segs, i, (bus_size_t)len, flags)); + err = sg_dmamap_load_raw(t, map, segs, i, (bus_size_t)len, flags); + + if (err == 0) { + spm->spm_origbuf = mb; + spm->spm_buftype = BUS_BUFTYPE_MBUF; + } + return (err); } /* @@ -415,9 +408,10 @@ sg_dmamap_load_uio(bus_dma_tag_t t, bus_dmamap_t map, struct uio *uio, * and unlock them at unload. Perhaps page loaning is the answer. * 'till then we only accept kernel data */ - bus_dma_segment_t segs[MAX_DMA_SEGS]; - size_t len; - int i, j; + bus_dma_segment_t segs[MAX_DMA_SEGS]; + struct sg_page_map *spm = map->_dm_cookie; + size_t len; + int i, j, err; /* * Make sure that on errror we return "no valid mappings". @@ -468,7 +462,13 @@ sg_dmamap_load_uio(bus_dma_tag_t t, bus_dmamap_t map, struct uio *uio, } - return (sg_dmamap_load_raw(t, map, segs, i, (bus_size_t)len, flags)); + err = sg_dmamap_load_raw(t, map, segs, i, (bus_size_t)len, flags); + + if (err == 0) { + spm->spm_origbuf = uio; + spm->spm_buftype = BUS_BUFTYPE_UIO; + } + return (err); } /* @@ -572,8 +572,13 @@ sg_dmamap_load_raw(bus_dma_tag_t t, bus_dmamap_t map, err = sg_dmamap_load_seg(t, is, map, segs, nsegs, flags, size, boundary); - if (err) + if (err) { sg_dmamap_unload(t, map); + } else { + /* This will be overwritten if mbuf or uio called us */ + spm->spm_origbuf = segs; + spm->spm_buftype = BUS_BUFTYPE_RAW; + } return (err); } @@ -773,6 +778,10 @@ sg_dmamap_unload(bus_dma_tag_t t, bus_dmamap_t map) mtx_leave(&is->sg_mtx); if (error != 0) printf("warning: %qd of DVMA space lost\n", sgsize); + + spm->spm_buftype = BUS_BUFTYPE_INVALID; + spm->spm_origbuf = NULL; + spm->spm_proc = NULL; _bus_dmamap_unload(t, map); } diff --git a/sys/arch/i386/include/bus.h b/sys/arch/i386/include/bus.h index 8ce28373ec1..120b2b3de05 100644 --- a/sys/arch/i386/include/bus.h +++ b/sys/arch/i386/include/bus.h @@ -1,4 +1,4 @@ -/* $OpenBSD: bus.h,v 1.51 2009/07/30 21:39:15 miod Exp $ */ +/* $OpenBSD: bus.h,v 1.52 2010/04/08 00:55:25 oga Exp $ */ /* $NetBSD: bus.h,v 1.6 1996/11/10 03:19:25 thorpej Exp $ */ /*- @@ -67,6 +67,7 @@ #define _I386_BUS_H_ #include +#include #include @@ -456,6 +457,13 @@ void bus_space_copy_4(bus_space_tag_t, bus_space_handle_t, bus_size_t, #define BUS_DMA_ZERO 0x1000 /* dmamem_alloc return zeroed mem */ #define BUS_DMA_SG 0x2000 /* Internal. memory is for SG map */ +/* types for _dm_buftype */ +#define BUS_BUFTYPE_INVALID 0 +#define BUS_BUFTYPE_LINEAR 1 +#define BUS_BUFTYPE_MBUF 2 +#define BUS_BUFTYPE_UIO 3 +#define BUS_BUFTYPE_RAW 4 + /* Forwards needed by prototypes below. */ struct mbuf; struct proc; @@ -631,6 +639,30 @@ struct sg_cookie { void (*flush_tlb)(void *); }; +/* + * per-map DVMA page table + */ +struct sg_page_entry { + SPLAY_ENTRY(sg_page_entry) spe_node; + paddr_t spe_pa; + bus_addr_t spe_va; +}; + +/* for sg_dma this will be in the map's dm_cookie. */ +struct sg_page_map { + SPLAY_HEAD(sg_page_tree, sg_page_entry) spm_tree; + + void *spm_origbuf; /* pointer to original data */ + int spm_buftype; /* type of data */ + struct proc *spm_proc; /* proc that owns the mapping */ + + int spm_maxpage; /* Size of allocated page map */ + int spm_pagecnt; /* Number of entries in use */ + bus_addr_t spm_start; /* dva when bound */ + bus_size_t spm_size; /* size of bound map */ + struct sg_page_entry spm_map[1]; +}; + struct sg_cookie *sg_dmatag_init(char *, void *, bus_addr_t, bus_size_t, void (*)(void *, vaddr_t, paddr_t, int), void (*)(void *, vaddr_t), void (*)(void *)); -- cgit v1.2.3