summaryrefslogtreecommitdiff
path: root/sys/kern/vfs_bio.c
diff options
context:
space:
mode:
authorBob Beck <beck@cvs.openbsd.org>2019-05-08 12:40:58 +0000
committerBob Beck <beck@cvs.openbsd.org>2019-05-08 12:40:58 +0000
commitd94ed2f836cebac6cc321e51c6b339eed4120ac6 (patch)
tree360827a43c5a4dd708966da0374877d952e4090f /sys/kern/vfs_bio.c
parent32d816726178d61d54f24fa50aa06b6cbeef8dd2 (diff)
Modify the buffer cache to always flip recovered DMA buffers high.
This also modifies the backoff logic to only back off what is requested and not a "mimimum" amount. Tested by me, benno@, tedu@ anda ports build by naddy@. ok tedu@
Diffstat (limited to 'sys/kern/vfs_bio.c')
-rw-r--r--sys/kern/vfs_bio.c151
1 files changed, 121 insertions, 30 deletions
diff --git a/sys/kern/vfs_bio.c b/sys/kern/vfs_bio.c
index a02bf70e354..f54dcb6b9de 100644
--- a/sys/kern/vfs_bio.c
+++ b/sys/kern/vfs_bio.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: vfs_bio.c,v 1.188 2019/02/17 22:17:28 tedu Exp $ */
+/* $OpenBSD: vfs_bio.c,v 1.189 2019/05/08 12:40:57 beck Exp $ */
/* $NetBSD: vfs_bio.c,v 1.44 1996/06/11 11:15:36 pk Exp $ */
/*
@@ -85,6 +85,7 @@ void buf_put(struct buf *);
struct buf *bio_doread(struct vnode *, daddr_t, int, int);
struct buf *buf_get(struct vnode *, daddr_t, size_t);
void bread_cluster_callback(struct buf *);
+int64_t bufcache_recover_dmapages(int discard, int64_t howmany);
struct bcachestats bcstats; /* counters */
long lodirtypages; /* dirty page count low water mark */
@@ -250,8 +251,8 @@ bufinit(void)
void
bufadjust(int newbufpages)
{
- struct buf *bp;
int s;
+ int64_t npages;
if (newbufpages < buflowpages)
newbufpages = buflowpages;
@@ -264,20 +265,15 @@ bufadjust(int newbufpages)
*/
targetpages = bufpages - RESERVE_PAGES;
+ npages = bcstats.dmapages - targetpages;
+
/*
* Shrinking the cache happens here only if someone has manually
* adjusted bufcachepercent - or the pagedaemon has told us
* to give back memory *now* - so we give it all back.
*/
- while ((bp = bufcache_getdmacleanbuf()) &&
- (bcstats.dmapages > targetpages)) {
- bufcache_take(bp);
- if (bp->b_vp) {
- RBT_REMOVE(buf_rb_bufs, &bp->b_vp->v_bufs_tree, bp);
- brelvp(bp);
- }
- buf_put(bp);
- }
+ if (bcstats.dmapages > targetpages)
+ (void) bufcache_recover_dmapages(0, bcstats.dmapages - targetpages);
bufcache_adjust();
/*
@@ -1071,16 +1067,8 @@ buf_get(struct vnode *vp, daddr_t blkno, size_t size)
* new allocation, free enough buffers first
* to stay at the target with our new allocation.
*/
- while ((bcstats.dmapages + npages > targetpages) &&
- (bp = bufcache_getdmacleanbuf())) {
- bufcache_take(bp);
- if (bp->b_vp) {
- RBT_REMOVE(buf_rb_bufs,
- &bp->b_vp->v_bufs_tree, bp);
- brelvp(bp);
- }
- buf_put(bp);
- }
+ (void) bufcache_recover_dmapages(0, npages);
+ bufcache_adjust();
/*
* If we get here, we tried to free the world down
@@ -1455,13 +1443,15 @@ bufcache_getcleanbuf(int cachenum, int discard)
{
struct buf *bp = NULL;
struct bufcache *cache = &cleancache[cachenum];
+ struct bufqueue * queue;
splassert(IPL_BIO);
/* try cold queue */
- while ((bp = TAILQ_FIRST(&cache->coldqueue))) {
- if ((!discard) &&
- cachenum < NUM_CACHES - 1 && ISSET(bp->b_flags, B_WARM)) {
+ while ((bp = TAILQ_FIRST(&cache->coldqueue)) ||
+ (bp = TAILQ_FIRST(&cache->warmqueue)) ||
+ (bp = TAILQ_FIRST(&cache->hotqueue))) {
+ if ((!discard) && cachenum < NUM_CACHES - 1) {
int64_t pages = atop(bp->b_bufsize);
struct bufcache *newcache;
@@ -1490,7 +1480,17 @@ bufcache_getcleanbuf(int cachenum, int discard)
}
/* Move the buffer to the hot queue in the next cache */
- TAILQ_REMOVE(&cache->coldqueue, bp, b_freelist);
+ if (ISSET(bp->b_flags, B_COLD)) {
+ queue = &cache->coldqueue;
+ } else if (ISSET(bp->b_flags, B_WARM)) {
+ queue = &cache->warmqueue;
+ cache->warmbufpages -= pages;
+ } else {
+ queue = &cache->hotqueue;
+ cache->hotbufpages -= pages;
+ }
+ TAILQ_REMOVE(queue, bp, b_freelist);
+ cache->cachepages -= pages;
CLR(bp->b_flags, B_WARM);
CLR(bp->b_flags, B_COLD);
bp->cache++;
@@ -1502,16 +1502,105 @@ bufcache_getcleanbuf(int cachenum, int discard)
TAILQ_INSERT_TAIL(&newcache->hotqueue, bp, b_freelist);
}
else
- /* buffer is cold - give it up */
+ /* Victim selected, give it up */
return bp;
}
- if ((bp = TAILQ_FIRST(&cache->warmqueue)))
- return bp;
- if ((bp = TAILQ_FIRST(&cache->hotqueue)))
- return bp;
return bp;
}
+
+void
+discard_buffer(struct buf *bp) {
+ bufcache_take(bp);
+ if (bp->b_vp) {
+ RBT_REMOVE(buf_rb_bufs,
+ &bp->b_vp->v_bufs_tree, bp);
+ brelvp(bp);
+ }
+ buf_put(bp);
+}
+
+int64_t
+bufcache_recover_dmapages(int discard, int64_t howmany)
+{
+ struct buf *bp = NULL;
+ struct bufcache *cache = &cleancache[DMA_CACHE];
+ struct bufqueue * queue;
+ int64_t recovered = 0;
+
+ splassert(IPL_BIO);
+
+ while ((recovered < howmany) &&
+ ((bp = TAILQ_FIRST(&cache->coldqueue)) ||
+ (bp = TAILQ_FIRST(&cache->warmqueue)) ||
+ (bp = TAILQ_FIRST(&cache->hotqueue)))) {
+ if (!discard && DMA_CACHE < NUM_CACHES - 1) {
+ int64_t pages = atop(bp->b_bufsize);
+ struct bufcache *newcache;
+
+ KASSERT(bp->cache == DMA_CACHE);
+
+ /*
+ * If this buffer was warm before, move it to
+ * the hot queue in the next cache
+ */
+
+ /*
+ * One way or another, the pages for this
+ * buffer are leaving DMA memory
+ */
+ recovered += pages;
+
+ if (fliphigh) {
+ /*
+ * If we are in the DMA cache, try to flip the
+ * buffer up high to move it on to the other
+ * caches. if we can't move the buffer to high
+ * memory without sleeping, we give it up
+ * now rather than fight for more memory
+ * against non buffer cache competitors.
+ */
+ SET(bp->b_flags, B_BUSY);
+ if (bp->cache == 0 && buf_flip_high(bp) == -1) {
+ CLR(bp->b_flags, B_BUSY);
+ discard_buffer(bp);
+ } else {
+ CLR(bp->b_flags, B_BUSY);
+
+ /*
+ * Move the buffer to the hot queue in
+ * the next cache
+ */
+ if (ISSET(bp->b_flags, B_COLD)) {
+ queue = &cache->coldqueue;
+ } else if (ISSET(bp->b_flags, B_WARM)) {
+ queue = &cache->warmqueue;
+ cache->warmbufpages -= pages;
+ } else {
+ queue = &cache->hotqueue;
+ cache->hotbufpages -= pages;
+ }
+ TAILQ_REMOVE(queue, bp, b_freelist);
+ cache->cachepages -= pages;
+ CLR(bp->b_flags, B_WARM);
+ CLR(bp->b_flags, B_COLD);
+ bp->cache++;
+ newcache= &cleancache[bp->cache];
+ newcache->cachepages += pages;
+ newcache->hotbufpages += pages;
+ chillbufs(newcache, &newcache->hotqueue,
+ &newcache->hotbufpages);
+ TAILQ_INSERT_TAIL(&newcache->hotqueue,
+ bp, b_freelist);
+ }
+ } else
+ discard_buffer(bp);
+ } else
+ discard_buffer(bp);
+ }
+ return recovered;
+}
+
struct buf *
bufcache_getcleanbuf_range(int start, int end, int discard)
{
@@ -1540,6 +1629,7 @@ bufcache_gethighcleanbuf(void)
return bufcache_getcleanbuf_range(DMA_CACHE + 1, NUM_CACHES - 1, 0);
}
+
struct buf *
bufcache_getdmacleanbuf(void)
{
@@ -1548,6 +1638,7 @@ bufcache_getdmacleanbuf(void)
return bufcache_getcleanbuf_range(DMA_CACHE, NUM_CACHES - 1, 0);
}
+
struct buf *
bufcache_getdirtybuf(void)
{