From d6fd9b3e4e16a6e1d158e085217f8c6ba50738f6 Mon Sep 17 00:00:00 2001 From: Fabian Keil Date: Fri, 27 May 2016 11:56:18 +0200 Subject: [PATCH 240/257] ZFS: Default to reducing the dirty data buffer to 50% of the maximum It's currently unclear whether or not dynamically scaling the buffer is a good idea at all. Obtained from: ElectroBSD --- .../contrib/opensolaris/uts/common/fs/zfs/arc.c | 22 ++++++++++++++-------- 1 file changed, 14 insertions(+), 8 deletions(-) diff --git a/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/arc.c b/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/arc.c index 61b1bcdffef2..477a528e2411 100644 --- a/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/arc.c +++ b/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/arc.c @@ -6277,17 +6277,23 @@ arc_init(void) zfs_dirty_data_max_max); } /* - * If memory is tight, reduce the dynamic write buffer - * as low as 0.5% of the physical memory. This allows - * a system with 1 GB of physical memory to remain stable - * while building kernels in a loop for days. - * Using 1% (10 MB) proved to be too much. + * Calculate minimum amount of dirty data allowed per pool. * - * Systems with 2 GB of physical memory don't seem to - * reach the lower limit while building kernels. + * Initially the default was 0.5% of the system's memory + * and was supposed to prevent crashes when reproducing + * ElectroBSD on systems with 1 GB or less. + * + * While it made the crashes less common, it did not actually + * prevent them. + * + * The problem was eventually tracked down to a geli bug + * and fixed (#209759), therefore it's unclear if dynamically + * reducing the amount of dirty data allowed is still useful. + * + * For now it's set to 50% of the maximum. */ if (zfs_dirty_data_max_min == 0) { - zfs_dirty_data_max_min = ptob(physmem) / 200; + zfs_dirty_data_max_min = zfs_dirty_data_max / 2; } #ifdef _KERNEL -- 2.11.0