diff options
author | Bob Beck <beck@cvs.openbsd.org> | 2019-05-08 12:40:58 +0000 |
---|---|---|
committer | Bob Beck <beck@cvs.openbsd.org> | 2019-05-08 12:40:58 +0000 |
commit | d94ed2f836cebac6cc321e51c6b339eed4120ac6 (patch) | |
tree | 360827a43c5a4dd708966da0374877d952e4090f /sys | |
parent | 32d816726178d61d54f24fa50aa06b6cbeef8dd2 (diff) |
Modify the buffer cache to always flip recovered DMA buffers high.
This also modifies the backoff logic to only back off what is requested
and not a "mimimum" amount. Tested by me, benno@, tedu@ anda ports build
by naddy@.
ok tedu@
Diffstat (limited to 'sys')
-rw-r--r-- | sys/kern/vfs_bio.c | 151 | ||||
-rw-r--r-- | sys/kern/vfs_biomem.c | 4 |
2 files changed, 123 insertions, 32 deletions
diff --git a/sys/kern/vfs_bio.c b/sys/kern/vfs_bio.c index a02bf70e354..f54dcb6b9de 100644 --- a/sys/kern/vfs_bio.c +++ b/sys/kern/vfs_bio.c @@ -1,4 +1,4 @@ -/* $OpenBSD: vfs_bio.c,v 1.188 2019/02/17 22:17:28 tedu Exp $ */ +/* $OpenBSD: vfs_bio.c,v 1.189 2019/05/08 12:40:57 beck Exp $ */ /* $NetBSD: vfs_bio.c,v 1.44 1996/06/11 11:15:36 pk Exp $ */ /* @@ -85,6 +85,7 @@ void buf_put(struct buf *); struct buf *bio_doread(struct vnode *, daddr_t, int, int); struct buf *buf_get(struct vnode *, daddr_t, size_t); void bread_cluster_callback(struct buf *); +int64_t bufcache_recover_dmapages(int discard, int64_t howmany); struct bcachestats bcstats; /* counters */ long lodirtypages; /* dirty page count low water mark */ @@ -250,8 +251,8 @@ bufinit(void) void bufadjust(int newbufpages) { - struct buf *bp; int s; + int64_t npages; if (newbufpages < buflowpages) newbufpages = buflowpages; @@ -264,20 +265,15 @@ bufadjust(int newbufpages) */ targetpages = bufpages - RESERVE_PAGES; + npages = bcstats.dmapages - targetpages; + /* * Shrinking the cache happens here only if someone has manually * adjusted bufcachepercent - or the pagedaemon has told us * to give back memory *now* - so we give it all back. */ - while ((bp = bufcache_getdmacleanbuf()) && - (bcstats.dmapages > targetpages)) { - bufcache_take(bp); - if (bp->b_vp) { - RBT_REMOVE(buf_rb_bufs, &bp->b_vp->v_bufs_tree, bp); - brelvp(bp); - } - buf_put(bp); - } + if (bcstats.dmapages > targetpages) + (void) bufcache_recover_dmapages(0, bcstats.dmapages - targetpages); bufcache_adjust(); /* @@ -1071,16 +1067,8 @@ buf_get(struct vnode *vp, daddr_t blkno, size_t size) * new allocation, free enough buffers first * to stay at the target with our new allocation. */ - while ((bcstats.dmapages + npages > targetpages) && - (bp = bufcache_getdmacleanbuf())) { - bufcache_take(bp); - if (bp->b_vp) { - RBT_REMOVE(buf_rb_bufs, - &bp->b_vp->v_bufs_tree, bp); - brelvp(bp); - } - buf_put(bp); - } + (void) bufcache_recover_dmapages(0, npages); + bufcache_adjust(); /* * If we get here, we tried to free the world down @@ -1455,13 +1443,15 @@ bufcache_getcleanbuf(int cachenum, int discard) { struct buf *bp = NULL; struct bufcache *cache = &cleancache[cachenum]; + struct bufqueue * queue; splassert(IPL_BIO); /* try cold queue */ - while ((bp = TAILQ_FIRST(&cache->coldqueue))) { - if ((!discard) && - cachenum < NUM_CACHES - 1 && ISSET(bp->b_flags, B_WARM)) { + while ((bp = TAILQ_FIRST(&cache->coldqueue)) || + (bp = TAILQ_FIRST(&cache->warmqueue)) || + (bp = TAILQ_FIRST(&cache->hotqueue))) { + if ((!discard) && cachenum < NUM_CACHES - 1) { int64_t pages = atop(bp->b_bufsize); struct bufcache *newcache; @@ -1490,7 +1480,17 @@ bufcache_getcleanbuf(int cachenum, int discard) } /* Move the buffer to the hot queue in the next cache */ - TAILQ_REMOVE(&cache->coldqueue, bp, b_freelist); + if (ISSET(bp->b_flags, B_COLD)) { + queue = &cache->coldqueue; + } else if (ISSET(bp->b_flags, B_WARM)) { + queue = &cache->warmqueue; + cache->warmbufpages -= pages; + } else { + queue = &cache->hotqueue; + cache->hotbufpages -= pages; + } + TAILQ_REMOVE(queue, bp, b_freelist); + cache->cachepages -= pages; CLR(bp->b_flags, B_WARM); CLR(bp->b_flags, B_COLD); bp->cache++; @@ -1502,16 +1502,105 @@ bufcache_getcleanbuf(int cachenum, int discard) TAILQ_INSERT_TAIL(&newcache->hotqueue, bp, b_freelist); } else - /* buffer is cold - give it up */ + /* Victim selected, give it up */ return bp; } - if ((bp = TAILQ_FIRST(&cache->warmqueue))) - return bp; - if ((bp = TAILQ_FIRST(&cache->hotqueue))) - return bp; return bp; } + +void +discard_buffer(struct buf *bp) { + bufcache_take(bp); + if (bp->b_vp) { + RBT_REMOVE(buf_rb_bufs, + &bp->b_vp->v_bufs_tree, bp); + brelvp(bp); + } + buf_put(bp); +} + +int64_t +bufcache_recover_dmapages(int discard, int64_t howmany) +{ + struct buf *bp = NULL; + struct bufcache *cache = &cleancache[DMA_CACHE]; + struct bufqueue * queue; + int64_t recovered = 0; + + splassert(IPL_BIO); + + while ((recovered < howmany) && + ((bp = TAILQ_FIRST(&cache->coldqueue)) || + (bp = TAILQ_FIRST(&cache->warmqueue)) || + (bp = TAILQ_FIRST(&cache->hotqueue)))) { + if (!discard && DMA_CACHE < NUM_CACHES - 1) { + int64_t pages = atop(bp->b_bufsize); + struct bufcache *newcache; + + KASSERT(bp->cache == DMA_CACHE); + + /* + * If this buffer was warm before, move it to + * the hot queue in the next cache + */ + + /* + * One way or another, the pages for this + * buffer are leaving DMA memory + */ + recovered += pages; + + if (fliphigh) { + /* + * If we are in the DMA cache, try to flip the + * buffer up high to move it on to the other + * caches. if we can't move the buffer to high + * memory without sleeping, we give it up + * now rather than fight for more memory + * against non buffer cache competitors. + */ + SET(bp->b_flags, B_BUSY); + if (bp->cache == 0 && buf_flip_high(bp) == -1) { + CLR(bp->b_flags, B_BUSY); + discard_buffer(bp); + } else { + CLR(bp->b_flags, B_BUSY); + + /* + * Move the buffer to the hot queue in + * the next cache + */ + if (ISSET(bp->b_flags, B_COLD)) { + queue = &cache->coldqueue; + } else if (ISSET(bp->b_flags, B_WARM)) { + queue = &cache->warmqueue; + cache->warmbufpages -= pages; + } else { + queue = &cache->hotqueue; + cache->hotbufpages -= pages; + } + TAILQ_REMOVE(queue, bp, b_freelist); + cache->cachepages -= pages; + CLR(bp->b_flags, B_WARM); + CLR(bp->b_flags, B_COLD); + bp->cache++; + newcache= &cleancache[bp->cache]; + newcache->cachepages += pages; + newcache->hotbufpages += pages; + chillbufs(newcache, &newcache->hotqueue, + &newcache->hotbufpages); + TAILQ_INSERT_TAIL(&newcache->hotqueue, + bp, b_freelist); + } + } else + discard_buffer(bp); + } else + discard_buffer(bp); + } + return recovered; +} + struct buf * bufcache_getcleanbuf_range(int start, int end, int discard) { @@ -1540,6 +1629,7 @@ bufcache_gethighcleanbuf(void) return bufcache_getcleanbuf_range(DMA_CACHE + 1, NUM_CACHES - 1, 0); } + struct buf * bufcache_getdmacleanbuf(void) { @@ -1548,6 +1638,7 @@ bufcache_getdmacleanbuf(void) return bufcache_getcleanbuf_range(DMA_CACHE, NUM_CACHES - 1, 0); } + struct buf * bufcache_getdirtybuf(void) { diff --git a/sys/kern/vfs_biomem.c b/sys/kern/vfs_biomem.c index 2d3d0403288..0f761c976c2 100644 --- a/sys/kern/vfs_biomem.c +++ b/sys/kern/vfs_biomem.c @@ -1,4 +1,4 @@ -/* $OpenBSD: vfs_biomem.c,v 1.39 2018/03/29 01:43:41 mlarkin Exp $ */ +/* $OpenBSD: vfs_biomem.c,v 1.40 2019/05/08 12:40:57 beck Exp $ */ /* * Copyright (c) 2007 Artur Grabowski <art@openbsd.org> @@ -291,7 +291,7 @@ buf_alloc_pages(struct buf *bp, vsize_t size) UVM_PLA_NOWAIT); if (i == 0) break; - } while (bufbackoff(&dma_constraint, 100) == 0); + } while (bufbackoff(&dma_constraint, size) == 0); if (i != 0) i = uvm_pagealloc_multi(buf_object, offs, size, UVM_PLA_WAITOK); |