From a684b1d89f4b3459cc990a70c83867100bb5751d Mon Sep 17 00:00:00 2001 From: beck Date: Mon, 24 Apr 2023 16:46:43 +0000 Subject: [PATCH] Remove recursive spl grabbing in vfs_bio.c in preparation for improvements. ok claudio@ --- sys/kern/vfs_bio.c | 36 +++++++++++++++++++++++------------- 1 file changed, 23 insertions(+), 13 deletions(-) diff --git a/sys/kern/vfs_bio.c b/sys/kern/vfs_bio.c index 9729467b71b..c011580e119 100644 --- a/sys/kern/vfs_bio.c +++ b/sys/kern/vfs_bio.c @@ -1,4 +1,4 @@ -/* $OpenBSD: vfs_bio.c,v 1.210 2022/08/14 01:58:28 jsg Exp $ */ +/* $OpenBSD: vfs_bio.c,v 1.211 2023/04/24 16:46:43 beck Exp $ */ /* $NetBSD: vfs_bio.c,v 1.44 1996/06/11 11:15:36 pk Exp $ */ /* @@ -84,6 +84,7 @@ struct buf *bio_doread(struct vnode *, daddr_t, int, int); struct buf *buf_get(struct vnode *, daddr_t, size_t); void bread_cluster_callback(struct buf *); int64_t bufcache_recover_dmapages(int discard, int64_t howmany); +static struct buf *incore_locked(struct vnode *vp, daddr_t blkno); struct bcachestats bcstats; /* counters */ long lodirtypages; /* dirty page count low water mark */ @@ -374,7 +375,6 @@ bufbackoff(struct uvm_constraint_range *range, long size) int buf_flip_high(struct buf *bp) { - int s; int ret = -1; KASSERT(ISSET(bp->b_flags, B_BC)); @@ -382,15 +382,15 @@ buf_flip_high(struct buf *bp) KASSERT(bp->cache == DMA_CACHE); KASSERT(fliphigh); + splassert(IPL_BIO); + /* Attempt to move the buffer to high memory if we can */ - s = splbio(); if (buf_realloc_pages(bp, &high_constraint, UVM_PLA_NOWAIT) == 0) { KASSERT(!ISSET(bp->b_flags, B_DMA)); bcstats.highflips++; ret = 0; } else bcstats.highflops++; - splx(s); return ret; } @@ -407,14 +407,14 @@ buf_flip_dma(struct buf *bp) KASSERT(ISSET(bp->b_flags, B_BUSY)); KASSERT(bp->cache < NUM_CACHES); + splassert(IPL_BIO); + if (!ISSET(bp->b_flags, B_DMA)) { - int s = splbio(); /* move buf to dma reachable memory */ (void) buf_realloc_pages(bp, &dma_constraint, UVM_PLA_WAITOK); KASSERT(ISSET(bp->b_flags, B_DMA)); bcstats.dmaflips++; - splx(s); } if (bp->cache > DMA_CACHE) { @@ -499,7 +499,7 @@ breadn(struct vnode *vp, daddr_t blkno, int size, daddr_t rablks[], */ for (i = 0; i < nrablks; i++) { /* If it's in the cache, just go on to next one. */ - if (incore(vp, rablks[i])) + if (incore_locked(vp, rablks[i])) continue; /* Get a buffer for the read-ahead block */ @@ -747,9 +747,9 @@ bwrite(struct buf *bp) /* Initiate disk write. Make sure the appropriate party is charged. */ bp->b_vp->v_numoutput++; - splx(s); buf_flip_dma(bp); SET(bp->b_flags, B_WRITEINPROG); + splx(s); VOP_STRATEGY(bp->b_vp, bp); /* @@ -973,14 +973,13 @@ brelse(struct buf *bp) * Determine if a block is in the cache. Just look on what would be its hash * chain. If it's there, return a pointer to it, unless it's marked invalid. */ -struct buf * -incore(struct vnode *vp, daddr_t blkno) +static struct buf * +incore_locked(struct vnode *vp, daddr_t blkno) { struct buf *bp; struct buf b; - int s; - s = splbio(); + splassert(IPL_BIO); /* Search buf lookup tree */ b.b_lblkno = blkno; @@ -988,7 +987,18 @@ incore(struct vnode *vp, daddr_t blkno) if (bp != NULL && ISSET(bp->b_flags, B_INVAL)) bp = NULL; + return (bp); +} +struct buf * +incore(struct vnode *vp, daddr_t blkno) +{ + struct buf *bp; + int s; + + s = splbio(); + bp = incore_locked(vp, blkno); splx(s); + return (bp); } @@ -1154,7 +1164,7 @@ buf_get(struct vnode *vp, daddr_t blkno, size_t size) * * But first, we check if someone beat us to it. */ - if (incore(vp, blkno)) { + if (incore_locked(vp, blkno)) { pool_put(&bufpool, bp); splx(s); return (NULL); -- 2.20.1