From 80cdbb01f3a3e08cfe11078e2cd2f226351b0bce Mon Sep 17 00:00:00 2001 From: Taylor R Campbell Date: Fri, 21 Feb 2020 23:17:33 +0000 Subject: [PATCH] WIP: lfs write flushbehind, like ffs does. This is tuned to the lfs segment size. It's kind of wrong because we really want to start flushing when the file system as a whole has accrued a segment's worth of data -- not just when a single file has. Writing a large number of little files won't trigger this. Currently WIP because it breaks tests/fs/vfs/t_full. --- sys/ufs/lfs/ulfs_readwrite.c | 14 ++++++++++++-- 1 file changed, 12 insertions(+), 2 deletions(-) diff --git a/sys/ufs/lfs/ulfs_readwrite.c b/sys/ufs/lfs/ulfs_readwrite.c index 12984c77a04f..01805dd18e4f 100644 --- a/sys/ufs/lfs/ulfs_readwrite.c +++ b/sys/ufs/lfs/ulfs_readwrite.c @@ -35,6 +35,8 @@ #include __KERNEL_RCSID(1, "$NetBSD: ulfs_readwrite.c,v 1.25 2019/06/20 00:49:11 christos Exp $"); +#include + #define FS struct lfs #define I_FS i_lfs #define READ lfs_read @@ -278,7 +280,6 @@ WRITE(void *v) KASSERT(vp->v_type == VREG); - async = true; lfs_availwait(fs, lfs_btofsb(fs, uio->uio_resid)); lfs_check(vp, LFS_UNUSED_LBN, 0); @@ -395,7 +396,16 @@ WRITE(void *v) * XXXUBC simplistic async flushing. */ - __USE(async); + unsigned shift = ilog2(lfs_segsize(fs)) - 1; + if (!async && + (oldoff >> shift) != (uio->uio_offset >> shift)) { + mutex_enter(vp->v_interlock); + error = VOP_PUTPAGES(vp, (oldoff >> shift) << shift, + (uio->uio_offset >> shift) << shift, + PGO_CLEANIT | PGO_LAZY); + if (error) + break; + } } if (error == 0 && ioflag & IO_SYNC) { mutex_enter(vp->v_interlock);