summaryrefslogtreecommitdiff
path: root/sys/arch
diff options
context:
space:
mode:
authorPatrick Wildt <patrick@cvs.openbsd.org>2020-11-22 15:18:36 +0000
committerPatrick Wildt <patrick@cvs.openbsd.org>2020-11-22 15:18:36 +0000
commita7b4aed10ebdc84d48cc5cd2a73ccf7ee05f4ae5 (patch)
treea0277d5cbefab31494dfb794a996f631cf7e240a /sys/arch
parente13f17a870e471164619e364c13e79e657e06616 (diff)
ARM64's bus dma coalesces segments when they are physically contiguous,
to optimize the amount of segments given to hardware for DMA. The cache maintenance code uses the virtual addresses stored in the segments for flushing, since we have to flush by VA. Unfortunately the coalescing only checks if the physical blocks are contiguous. Thus it is possible that phys-contig but virt-non-contig blocks are coalesced. The cache flush operations are then not aware of this, and will flush the vaddr with the length of the whole physical block, thus running long and then flushing vaddrs that do not belong to the segments. Fix this by making sure the coalescing only occurs if the bus space is coherent, which means we do not do any flushing, or if the vaddrs are contiguous as well. An alternative fix would have been to keep a copy of the mbuf/uio pointers and flush vaddrs extracted from those objects. This fixes a panic seen with mbuf chains, where the physical data was surpisingly contiguous, but the mbufs' data pointers were not. ok drahn@ kettenis@
Diffstat (limited to 'sys/arch')
-rw-r--r--sys/arch/arm64/arm64/bus_dma.c16
1 files changed, 11 insertions, 5 deletions
diff --git a/sys/arch/arm64/arm64/bus_dma.c b/sys/arch/arm64/arm64/bus_dma.c
index 6336497e750..d260174c0fa 100644
--- a/sys/arch/arm64/arm64/bus_dma.c
+++ b/sys/arch/arm64/arm64/bus_dma.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: bus_dma.c,v 1.13 2020/04/21 07:57:17 kettenis Exp $ */
+/* $OpenBSD: bus_dma.c,v 1.14 2020/11/22 15:18:35 patrick Exp $ */
/*
* Copyright (c) 2003-2004 Opsycon AB (www.opsycon.se / www.opsycon.com)
@@ -312,9 +312,12 @@ _dmamap_load_raw(bus_dma_tag_t t, bus_dmamap_t map, bus_dma_segment_t *segs,
if (paddr == lastaddr &&
(map->dm_segs[seg].ds_len + sgsize) <=
map->_dm_maxsegsz &&
- (map->_dm_boundary == 0 ||
+ (map->_dm_boundary == 0 ||
(map->dm_segs[seg].ds_addr & bmask) ==
- (paddr & bmask)))
+ (paddr & bmask)) &&
+ (t->_flags & BUS_DMA_COHERENT ||
+ (map->dm_segs[seg]._ds_vaddr +
+ map->dm_segs[seg].ds_len == vaddr)))
map->dm_segs[seg].ds_len += sgsize;
else {
if (++seg >= map->_dm_segcnt)
@@ -643,9 +646,12 @@ _dmamap_load_buffer(bus_dma_tag_t t, bus_dmamap_t map, void *buf,
if ((bus_addr_t)curaddr == lastaddr &&
(map->dm_segs[seg].ds_len + sgsize) <=
map->_dm_maxsegsz &&
- (map->_dm_boundary == 0 ||
+ (map->_dm_boundary == 0 ||
(map->dm_segs[seg].ds_addr & bmask) ==
- ((bus_addr_t)curaddr & bmask)))
+ ((bus_addr_t)curaddr & bmask)) &&
+ (t->_flags & BUS_DMA_COHERENT ||
+ (map->dm_segs[seg]._ds_vaddr +
+ map->dm_segs[seg].ds_len == vaddr)))
map->dm_segs[seg].ds_len += sgsize;
else {
if (++seg >= map->_dm_segcnt)