diff options
author | Owain Ainsworth <oga@cvs.openbsd.org> | 2009-04-05 21:57:42 +0000 |
---|---|---|
committer | Owain Ainsworth <oga@cvs.openbsd.org> | 2009-04-05 21:57:42 +0000 |
commit | 54b53150f9fa46d24fb7ad9f8d1d55a7420951e7 (patch) | |
tree | 79cf1befd894bb8915fa9f4bb24f1b5135da4e52 /sys/arch/sparc64/dev | |
parent | 050e2e9fac4e7632f4615f5b63d5220db75d9218 (diff) |
In the rare case where after we've loaded the iomap into the hardware,
if we fail while assembling the dmamap due to the memory not fitting
into our constraints we'll return from the function with the iomap still
loaded, and more importantly with memory still allocated from the
extent(9). So in such a case, make sure we clean up after outselves.
In order to make this cleaner, remove an impossible condition check
(kettenis and myself are satisfied that it will never happen), and make
iomap_load_map void (it can't fail), so that we can only fail after both
the extent is allocated and the iomap is loaded, and not inbetween the
two.
I tested iommu, kettenis tested viommu.
ok kettenis@.
Diffstat (limited to 'sys/arch/sparc64/dev')
-rw-r--r-- | sys/arch/sparc64/dev/iommu.c | 63 | ||||
-rw-r--r-- | sys/arch/sparc64/dev/viommu.c | 59 |
2 files changed, 71 insertions, 51 deletions
diff --git a/sys/arch/sparc64/dev/iommu.c b/sys/arch/sparc64/dev/iommu.c index b66a9f8f284..6f1fd90b236 100644 --- a/sys/arch/sparc64/dev/iommu.c +++ b/sys/arch/sparc64/dev/iommu.c @@ -1,4 +1,4 @@ -/* $OpenBSD: iommu.c,v 1.55 2009/03/16 21:00:48 oga Exp $ */ +/* $OpenBSD: iommu.c,v 1.56 2009/04/05 21:57:41 oga Exp $ */ /* $NetBSD: iommu.c,v 1.47 2002/02/08 20:03:45 eeh Exp $ */ /* @@ -90,9 +90,9 @@ int64_t iommu_tsb_entry(struct iommu_state *, vaddr_t); void strbuf_reset(struct strbuf_ctl *); int iommu_iomap_insert_page(struct iommu_map_state *, paddr_t); vaddr_t iommu_iomap_translate(struct iommu_map_state *, paddr_t); -int iommu_iomap_load_map(struct iommu_state *, struct iommu_map_state *, +void iommu_iomap_load_map(struct iommu_state *, struct iommu_map_state *, vaddr_t, int); -int iommu_iomap_unload_map(struct iommu_state *, struct iommu_map_state *); +void iommu_iomap_unload_map(struct iommu_state *, struct iommu_map_state *); struct iommu_map_state *iommu_iomap_create(int); void iommu_iomap_destroy(struct iommu_map_state *); void iommu_iomap_clear_pages(struct iommu_map_state *); @@ -763,9 +763,6 @@ iommu_dvmamap_load(bus_dma_tag_t t, bus_dma_tag_t t0, bus_dmamap_t map, if (err != 0) return (err); - if (dvmaddr == (bus_addr_t)-1) - return (ENOMEM); - /* Set the active DVMA map */ map->_dm_dvmastart = dvmaddr; map->_dm_dvmasize = sgsize; @@ -776,8 +773,7 @@ iommu_dvmamap_load(bus_dma_tag_t t, bus_dma_tag_t t0, bus_dmamap_t map, iommu_dvmamap_validate_map(t, is, map); #endif - if (iommu_iomap_load_map(is, ims, dvmaddr, flags)) - return (EFBIG); + iommu_iomap_load_map(is, ims, dvmaddr, flags); { /* Scope */ bus_addr_t a, aend; @@ -794,8 +790,8 @@ iommu_dvmamap_load(bus_dma_tag_t t, bus_dma_tag_t t0, bus_dmamap_t map, /* Yuck... Redoing the same pmap_extract... */ if (pmap_extract(pmap, a, &pa) == FALSE) { printf("iomap pmap error addr 0x%llx\n", a); - iommu_iomap_clear_pages(ims); - return (EFBIG); + err = EFBIG; + break; } pgstart = pa | (MAX(a, addr) & PAGE_MASK); @@ -809,17 +805,16 @@ iommu_dvmamap_load(bus_dma_tag_t t, bus_dma_tag_t t0, bus_dmamap_t map, err = iommu_dvmamap_append_range(t, map, pgstart, pglen, flags, boundary); if (err == EFBIG) - return (err); - if (err) { + break; + else if (err) { printf("iomap load seg page: %d for " "va 0x%llx pa %lx (%llx - %llx) " "for %d/0x%x\n", err, a, pa, pgstart, pgend, pglen, pglen); - return (err); + break; } } } - #ifdef DEBUG iommu_dvmamap_validate_map(t, is, map); @@ -835,6 +830,18 @@ iommu_dvmamap_load(bus_dma_tag_t t, bus_dma_tag_t t0, bus_dmamap_t map, #endif } #endif + if (err) { + /* XXX keep enough state and just call unload here? */ + iommu_iomap_unload_map(is, ims); + iommu_iomap_clear_pages(ims); + map->dm_mapsize = 0; + map->dm_nsegs = 0; + mtx_enter(&is->is_mtx); + err = extent_free(is->is_dvmamap, dvmaddr, sgsize, EX_NOWAIT); + map->_dm_dvmastart = 0; + map->_dm_dvmasize = 0; + mtx_leave(&is->is_mtx); + } return (err); } @@ -976,8 +983,6 @@ iommu_dvmamap_load_raw(bus_dma_tag_t t, bus_dma_tag_t t0, bus_dmamap_t map, #endif } #endif - if (dvmaddr == (bus_addr_t)-1) - return (ENOMEM); /* Set the active DVMA map */ map->_dm_dvmastart = dvmaddr; @@ -989,8 +994,7 @@ iommu_dvmamap_load_raw(bus_dma_tag_t t, bus_dma_tag_t t0, bus_dmamap_t map, iommu_dvmamap_validate_map(t, is, map); #endif - if (iommu_iomap_load_map(is, ims, dvmaddr, flags)) - return (EFBIG); + iommu_iomap_load_map(is, ims, dvmaddr, flags); if (segs[0]._ds_mlist) err = iommu_dvmamap_load_mlist(t, is, map, segs[0]._ds_mlist, @@ -999,9 +1003,6 @@ iommu_dvmamap_load_raw(bus_dma_tag_t t, bus_dma_tag_t t0, bus_dmamap_t map, err = iommu_dvmamap_load_seg(t, is, map, segs, nsegs, flags, size, boundary); - if (err) - iommu_iomap_unload_map(is, ims); - #ifdef DEBUG /* The map should be valid even if the load failed */ if (iommu_dvmamap_validate_map(t, is, map)) { @@ -1049,6 +1050,18 @@ iommu_dvmamap_load_raw(bus_dma_tag_t t, bus_dma_tag_t t0, bus_dmamap_t map, #endif } #endif + if (err) { + /* XXX keep enough state and just call unload here? */ + iommu_iomap_unload_map(is, ims); + iommu_iomap_clear_pages(ims); + map->dm_mapsize = 0; + map->dm_nsegs = 0; + mtx_enter(&is->is_mtx); + err = extent_free(is->is_dvmamap, dvmaddr, sgsize, EX_NOWAIT); + map->_dm_dvmastart = 0; + map->_dm_dvmasize = 0; + mtx_leave(&is->is_mtx); + } return (err); } @@ -1719,7 +1732,7 @@ iommu_iomap_insert_page(struct iommu_map_state *ims, paddr_t pa) * Locate the iomap by filling in the pa->va mapping and inserting it * into the IOMMU tables. */ -int +void iommu_iomap_load_map(struct iommu_state *is, struct iommu_map_state *ims, vaddr_t vmaddr, int flags) { @@ -1749,14 +1762,12 @@ iommu_iomap_load_map(struct iommu_state *is, struct iommu_map_state *ims, vmaddr += PAGE_SIZE; } - - return (0); } /* * Remove the iomap from the IOMMU. */ -int +void iommu_iomap_unload_map(struct iommu_state *is, struct iommu_map_state *ims) { struct iommu_page_map *ipm = &ims->ims_map; @@ -1774,8 +1785,6 @@ iommu_iomap_unload_map(struct iommu_state *is, struct iommu_map_state *ims) IOMMUREG_WRITE(is, iommu_cache_flush, is->is_ptsb + slot * 8); } - - return (0); } /* diff --git a/sys/arch/sparc64/dev/viommu.c b/sys/arch/sparc64/dev/viommu.c index 3f6b6160917..10d1b0ba3c7 100644 --- a/sys/arch/sparc64/dev/viommu.c +++ b/sys/arch/sparc64/dev/viommu.c @@ -1,4 +1,4 @@ -/* $OpenBSD: viommu.c,v 1.4 2009/03/16 21:00:48 oga Exp $ */ +/* $OpenBSD: viommu.c,v 1.5 2009/04/05 21:57:41 oga Exp $ */ /* $NetBSD: iommu.c,v 1.47 2002/02/08 20:03:45 eeh Exp $ */ /* @@ -86,9 +86,9 @@ int viommu_dvmamap_append_range(bus_dma_tag_t, bus_dmamap_t, paddr_t, bus_size_t, int, bus_size_t); int iommu_iomap_insert_page(struct iommu_map_state *, paddr_t); vaddr_t iommu_iomap_translate(struct iommu_map_state *, paddr_t); -int viommu_iomap_load_map(struct iommu_state *, struct iommu_map_state *, +void viommu_iomap_load_map(struct iommu_state *, struct iommu_map_state *, vaddr_t, int); -int viommu_iomap_unload_map(struct iommu_state *, struct iommu_map_state *); +void viommu_iomap_unload_map(struct iommu_state *, struct iommu_map_state *); struct iommu_map_state *viommu_iomap_create(int); void iommu_iomap_destroy(struct iommu_map_state *); void iommu_iomap_clear_pages(struct iommu_map_state *); @@ -376,17 +376,13 @@ viommu_dvmamap_load(bus_dma_tag_t t, bus_dma_tag_t t0, bus_dmamap_t map, if (err != 0) return (err); - if (dvmaddr == (bus_addr_t)-1) - return (ENOMEM); - /* Set the active DVMA map */ map->_dm_dvmastart = dvmaddr; map->_dm_dvmasize = sgsize; map->dm_mapsize = buflen; - if (viommu_iomap_load_map(is, ims, dvmaddr, flags)) - return (EFBIG); + viommu_iomap_load_map(is, ims, dvmaddr, flags); { /* Scope */ bus_addr_t a, aend; @@ -403,8 +399,8 @@ viommu_dvmamap_load(bus_dma_tag_t t, bus_dma_tag_t t0, bus_dmamap_t map, /* Yuck... Redoing the same pmap_extract... */ if (pmap_extract(pmap, a, &pa) == FALSE) { printf("iomap pmap error addr 0x%llx\n", a); - iommu_iomap_clear_pages(ims); - return (EFBIG); + err = EFBIG; + break; } pgstart = pa | (MAX(a, addr) & PAGE_MASK); @@ -418,16 +414,28 @@ viommu_dvmamap_load(bus_dma_tag_t t, bus_dma_tag_t t0, bus_dmamap_t map, err = viommu_dvmamap_append_range(t, map, pgstart, pglen, flags, boundary); if (err == EFBIG) - return (err); - if (err) { + break; + else if (err) { printf("iomap load seg page: %d for " "va 0x%llx pa %lx (%llx - %llx) " "for %d/0x%x\n", err, a, pa, pgstart, pgend, pglen, pglen); - return (err); + break; } } } + if (err) { + /* XXX keep enough state and just call unload here? */ + viommu_iomap_unload_map(is, ims); + iommu_iomap_clear_pages(ims); + map->dm_mapsize = 0; + map->dm_nsegs = 0; + mtx_enter(&is->is_mtx); + err = extent_free(is->is_dvmamap, dvmaddr, sgsize, EX_NOWAIT); + map->_dm_dvmastart = 0; + map->_dm_dvmasize = 0; + mtx_leave(&is->is_mtx); + } return (err); } @@ -563,8 +571,6 @@ viommu_dvmamap_load_raw(bus_dma_tag_t t, bus_dma_tag_t t0, bus_dmamap_t map, #endif } #endif - if (dvmaddr == (bus_addr_t)-1) - return (ENOMEM); /* Set the active DVMA map */ map->_dm_dvmastart = dvmaddr; @@ -572,8 +578,7 @@ viommu_dvmamap_load_raw(bus_dma_tag_t t, bus_dma_tag_t t0, bus_dmamap_t map, map->dm_mapsize = size; - if (viommu_iomap_load_map(is, ims, dvmaddr, flags)) - return (EFBIG); + viommu_iomap_load_map(is, ims, dvmaddr, flags); if (segs[0]._ds_mlist) err = viommu_dvmamap_load_mlist(t, is, map, segs[0]._ds_mlist, @@ -582,8 +587,18 @@ viommu_dvmamap_load_raw(bus_dma_tag_t t, bus_dma_tag_t t0, bus_dmamap_t map, err = viommu_dvmamap_load_seg(t, is, map, segs, nsegs, flags, size, boundary); - if (err) + if (err) { + /* XXX keep enough state and just call unload here? */ viommu_iomap_unload_map(is, ims); + iommu_iomap_clear_pages(ims); + map->dm_mapsize = 0; + map->dm_nsegs = 0; + mtx_enter(&is->is_mtx); + err = extent_free(is->is_dvmamap, dvmaddr, sgsize, EX_NOWAIT); + map->_dm_dvmastart = 0; + map->_dm_dvmasize = 0; + mtx_leave(&is->is_mtx); + } return (err); } @@ -929,7 +944,7 @@ viommu_iomap_create(int n) * Locate the iomap by filling in the pa->va mapping and inserting it * into the IOMMU tables. */ -int +void viommu_iomap_load_map(struct iommu_state *is, struct iommu_map_state *ims, vaddr_t vmaddr, int flags) { @@ -942,14 +957,12 @@ viommu_iomap_load_map(struct iommu_state *is, struct iommu_map_state *ims, viommu_enter(is, NULL, e->ipe_va, e->ipe_pa, flags); vmaddr += PAGE_SIZE; } - - return (0); } /* * Remove the iomap from the IOMMU. */ -int +void viommu_iomap_unload_map(struct iommu_state *is, struct iommu_map_state *ims) { struct iommu_page_map *ipm = &ims->ims_map; @@ -958,6 +971,4 @@ viommu_iomap_unload_map(struct iommu_state *is, struct iommu_map_state *ims) for (i = 0, e = ipm->ipm_map; i < ipm->ipm_pagecnt; ++i, ++e) viommu_remove(is, NULL, e->ipe_va); - - return (0); } |