aboutsummaryrefslogtreecommitdiff
path: root/sys/contrib/openzfs/module/os/freebsd/zfs
diff options
context:
space:
mode:
Diffstat (limited to 'sys/contrib/openzfs/module/os/freebsd/zfs')
-rw-r--r--sys/contrib/openzfs/module/os/freebsd/zfs/sysctl_os.c269
-rw-r--r--sys/contrib/openzfs/module/os/freebsd/zfs/zfs_ctldir.c3
-rw-r--r--sys/contrib/openzfs/module/os/freebsd/zfs/zfs_vnops_os.c31
-rw-r--r--sys/contrib/openzfs/module/os/freebsd/zfs/zvol_os.c1
4 files changed, 299 insertions, 5 deletions
diff --git a/sys/contrib/openzfs/module/os/freebsd/zfs/sysctl_os.c b/sys/contrib/openzfs/module/os/freebsd/zfs/sysctl_os.c
index 393bfaa65ff5..ebc2c0eeb6d2 100644
--- a/sys/contrib/openzfs/module/os/freebsd/zfs/sysctl_os.c
+++ b/sys/contrib/openzfs/module/os/freebsd/zfs/sysctl_os.c
@@ -163,6 +163,13 @@ param_set_arc_int(SYSCTL_HANDLER_ARGS)
return (0);
}
+static void
+warn_deprecated_sysctl(const char *old, const char *new)
+{
+ printf("WARNING: sysctl vfs.zfs.%s is deprecated. Use vfs.zfs.%s instead.\n",
+ old, new);
+}
+
int
param_set_arc_max(SYSCTL_HANDLER_ARGS)
{
@@ -185,9 +192,17 @@ param_set_arc_max(SYSCTL_HANDLER_ARGS)
if (val != 0)
zfs_arc_max = arc_c_max;
+ if (arg2 != 0)
+ warn_deprecated_sysctl("arc_max", "arc.max");
+
return (0);
}
+SYSCTL_PROC(_vfs_zfs, OID_AUTO, arc_max,
+ CTLTYPE_ULONG | CTLFLAG_RWTUN | CTLFLAG_MPSAFE,
+ NULL, 1, param_set_arc_max, "LU",
+ "Maximum ARC size in bytes (LEGACY)");
+
int
param_set_arc_min(SYSCTL_HANDLER_ARGS)
{
@@ -209,9 +224,17 @@ param_set_arc_min(SYSCTL_HANDLER_ARGS)
if (val != 0)
zfs_arc_min = arc_c_min;
+ if (arg2 != 0)
+ warn_deprecated_sysctl("arc_min", "arc.min");
+
return (0);
}
+SYSCTL_PROC(_vfs_zfs, OID_AUTO, arc_min,
+ CTLTYPE_ULONG | CTLFLAG_RWTUN | CTLFLAG_MPSAFE,
+ NULL, 1, param_set_arc_min, "LU",
+ "Minimum ARC size in bytes (LEGACY)");
+
extern uint_t zfs_arc_free_target;
int
@@ -232,9 +255,22 @@ param_set_arc_free_target(SYSCTL_HANDLER_ARGS)
zfs_arc_free_target = val;
+ if (arg2 != 0)
+ warn_deprecated_sysctl("arc_free_target", "arc.free_target");
+
return (0);
}
+/*
+ * NOTE: This sysctl is CTLFLAG_RW not CTLFLAG_RWTUN due to its dependency on
+ * pagedaemon initialization.
+ */
+SYSCTL_PROC(_vfs_zfs, OID_AUTO, arc_free_target,
+ CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_MPSAFE,
+ NULL, 1, param_set_arc_free_target, "IU",
+ "Desired number of free pages below which ARC triggers reclaim"
+ " (LEGACY)");
+
int
param_set_arc_no_grow_shift(SYSCTL_HANDLER_ARGS)
{
@@ -250,9 +286,193 @@ param_set_arc_no_grow_shift(SYSCTL_HANDLER_ARGS)
arc_no_grow_shift = val;
+ if (arg2 != 0)
+ warn_deprecated_sysctl("arc_no_grow_shift", "arc.no_grow_shift");
+
return (0);
}
+SYSCTL_PROC(_vfs_zfs, OID_AUTO, arc_no_grow_shift,
+ CTLTYPE_INT | CTLFLAG_RWTUN | CTLFLAG_MPSAFE,
+ NULL, 1, param_set_arc_no_grow_shift, "I",
+ "log2(fraction of ARC which must be free to allow growing) (LEGACY)");
+
+extern uint64_t l2arc_write_max;
+
+SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, l2arc_write_max,
+ CTLFLAG_RWTUN, &l2arc_write_max, 0,
+ "Max write bytes per interval (LEGACY)");
+
+extern uint64_t l2arc_write_boost;
+
+SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, l2arc_write_boost,
+ CTLFLAG_RWTUN, &l2arc_write_boost, 0,
+ "Extra write bytes during device warmup (LEGACY)");
+
+extern uint64_t l2arc_headroom;
+
+SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, l2arc_headroom,
+ CTLFLAG_RWTUN, &l2arc_headroom, 0,
+ "Number of max device writes to precache (LEGACY)");
+
+extern uint64_t l2arc_headroom_boost;
+
+SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, l2arc_headroom_boost,
+ CTLFLAG_RWTUN, &l2arc_headroom_boost, 0,
+ "Compressed l2arc_headroom multiplier (LEGACY)");
+
+extern uint64_t l2arc_feed_secs;
+
+SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, l2arc_feed_secs,
+ CTLFLAG_RWTUN, &l2arc_feed_secs, 0,
+ "Seconds between L2ARC writing (LEGACY)");
+
+extern uint64_t l2arc_feed_min_ms;
+
+SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, l2arc_feed_min_ms,
+ CTLFLAG_RWTUN, &l2arc_feed_min_ms, 0,
+ "Min feed interval in milliseconds (LEGACY)");
+
+extern int l2arc_noprefetch;
+
+SYSCTL_INT(_vfs_zfs, OID_AUTO, l2arc_noprefetch,
+ CTLFLAG_RWTUN, &l2arc_noprefetch, 0,
+ "Skip caching prefetched buffers (LEGACY)");
+
+extern int l2arc_feed_again;
+
+SYSCTL_INT(_vfs_zfs, OID_AUTO, l2arc_feed_again,
+ CTLFLAG_RWTUN, &l2arc_feed_again, 0,
+ "Turbo L2ARC warmup (LEGACY)");
+
+extern int l2arc_norw;
+
+SYSCTL_INT(_vfs_zfs, OID_AUTO, l2arc_norw,
+ CTLFLAG_RWTUN, &l2arc_norw, 0,
+ "No reads during writes (LEGACY)");
+
+static int
+param_get_arc_state_size(SYSCTL_HANDLER_ARGS)
+{
+ arc_state_t *state = (arc_state_t *)arg1;
+ int64_t val;
+
+ val = zfs_refcount_count(&state->arcs_size[ARC_BUFC_DATA]) +
+ zfs_refcount_count(&state->arcs_size[ARC_BUFC_METADATA]);
+ return (sysctl_handle_64(oidp, &val, 0, req));
+}
+
+extern arc_state_t ARC_anon;
+
+SYSCTL_PROC(_vfs_zfs, OID_AUTO, anon_size,
+ CTLTYPE_S64 | CTLFLAG_RD | CTLFLAG_MPSAFE,
+ &ARC_anon, 0, param_get_arc_state_size, "Q",
+ "size of anonymous state");
+SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, anon_metadata_esize, CTLFLAG_RD,
+ &ARC_anon.arcs_esize[ARC_BUFC_METADATA].rc_count, 0,
+ "size of evictable metadata in anonymous state");
+SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, anon_data_esize, CTLFLAG_RD,
+ &ARC_anon.arcs_esize[ARC_BUFC_DATA].rc_count, 0,
+ "size of evictable data in anonymous state");
+
+extern arc_state_t ARC_mru;
+
+SYSCTL_PROC(_vfs_zfs, OID_AUTO, mru_size,
+ CTLTYPE_S64 | CTLFLAG_RD | CTLFLAG_MPSAFE,
+ &ARC_mru, 0, param_get_arc_state_size, "Q",
+ "size of mru state");
+SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, mru_metadata_esize, CTLFLAG_RD,
+ &ARC_mru.arcs_esize[ARC_BUFC_METADATA].rc_count, 0,
+ "size of evictable metadata in mru state");
+SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, mru_data_esize, CTLFLAG_RD,
+ &ARC_mru.arcs_esize[ARC_BUFC_DATA].rc_count, 0,
+ "size of evictable data in mru state");
+
+extern arc_state_t ARC_mru_ghost;
+
+SYSCTL_PROC(_vfs_zfs, OID_AUTO, mru_ghost_size,
+ CTLTYPE_S64 | CTLFLAG_RD | CTLFLAG_MPSAFE,
+ &ARC_mru_ghost, 0, param_get_arc_state_size, "Q",
+ "size of mru ghost state");
+SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, mru_ghost_metadata_esize, CTLFLAG_RD,
+ &ARC_mru_ghost.arcs_esize[ARC_BUFC_METADATA].rc_count, 0,
+ "size of evictable metadata in mru ghost state");
+SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, mru_ghost_data_esize, CTLFLAG_RD,
+ &ARC_mru_ghost.arcs_esize[ARC_BUFC_DATA].rc_count, 0,
+ "size of evictable data in mru ghost state");
+
+extern arc_state_t ARC_mfu;
+
+SYSCTL_PROC(_vfs_zfs, OID_AUTO, mfu_size,
+ CTLTYPE_S64 | CTLFLAG_RD | CTLFLAG_MPSAFE,
+ &ARC_mfu, 0, param_get_arc_state_size, "Q",
+ "size of mfu state");
+SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, mfu_metadata_esize, CTLFLAG_RD,
+ &ARC_mfu.arcs_esize[ARC_BUFC_METADATA].rc_count, 0,
+ "size of evictable metadata in mfu state");
+SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, mfu_data_esize, CTLFLAG_RD,
+ &ARC_mfu.arcs_esize[ARC_BUFC_DATA].rc_count, 0,
+ "size of evictable data in mfu state");
+
+extern arc_state_t ARC_mfu_ghost;
+
+SYSCTL_PROC(_vfs_zfs, OID_AUTO, mfu_ghost_size,
+ CTLTYPE_S64 | CTLFLAG_RD | CTLFLAG_MPSAFE,
+ &ARC_mfu_ghost, 0, param_get_arc_state_size, "Q",
+ "size of mfu ghost state");
+SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, mfu_ghost_metadata_esize, CTLFLAG_RD,
+ &ARC_mfu_ghost.arcs_esize[ARC_BUFC_METADATA].rc_count, 0,
+ "size of evictable metadata in mfu ghost state");
+SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, mfu_ghost_data_esize, CTLFLAG_RD,
+ &ARC_mfu_ghost.arcs_esize[ARC_BUFC_DATA].rc_count, 0,
+ "size of evictable data in mfu ghost state");
+
+extern arc_state_t ARC_uncached;
+
+SYSCTL_PROC(_vfs_zfs, OID_AUTO, uncached_size,
+ CTLTYPE_S64 | CTLFLAG_RD | CTLFLAG_MPSAFE,
+ &ARC_uncached, 0, param_get_arc_state_size, "Q",
+ "size of uncached state");
+SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, uncached_metadata_esize, CTLFLAG_RD,
+ &ARC_uncached.arcs_esize[ARC_BUFC_METADATA].rc_count, 0,
+ "size of evictable metadata in uncached state");
+SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, uncached_data_esize, CTLFLAG_RD,
+ &ARC_uncached.arcs_esize[ARC_BUFC_DATA].rc_count, 0,
+ "size of evictable data in uncached state");
+
+extern arc_state_t ARC_l2c_only;
+
+SYSCTL_PROC(_vfs_zfs, OID_AUTO, l2c_only_size,
+ CTLTYPE_S64 | CTLFLAG_RD | CTLFLAG_MPSAFE,
+ &ARC_l2c_only, 0, param_get_arc_state_size, "Q",
+ "size of l2c_only state");
+
+/* dbuf.c */
+
+/* dmu.c */
+
+/* dmu_zfetch.c */
+
+SYSCTL_NODE(_vfs_zfs, OID_AUTO, zfetch, CTLFLAG_RW, 0, "ZFS ZFETCH (LEGACY)");
+
+extern uint32_t zfetch_max_distance;
+
+SYSCTL_UINT(_vfs_zfs_zfetch, OID_AUTO, max_distance,
+ CTLFLAG_RWTUN, &zfetch_max_distance, 0,
+ "Max bytes to prefetch per stream (LEGACY)");
+
+extern uint32_t zfetch_max_idistance;
+
+SYSCTL_UINT(_vfs_zfs_zfetch, OID_AUTO, max_idistance,
+ CTLFLAG_RWTUN, &zfetch_max_idistance, 0,
+ "Max bytes to prefetch indirects for per stream (LEGACY)");
+
+/* dsl_pool.c */
+
+/* dnode.c */
+
+/* dsl_scan.c */
+
/* metaslab.c */
int
@@ -313,6 +533,19 @@ SYSCTL_UINT(_vfs_zfs, OID_AUTO, condense_pct,
"Condense on-disk spacemap when it is more than this many percents"
" of in-memory counterpart");
+extern uint_t zfs_remove_max_segment;
+
+SYSCTL_UINT(_vfs_zfs, OID_AUTO, remove_max_segment,
+ CTLFLAG_RWTUN, &zfs_remove_max_segment, 0,
+ "Largest contiguous segment ZFS will attempt to allocate when removing"
+ " a device");
+
+extern int zfs_removal_suspend_progress;
+
+SYSCTL_INT(_vfs_zfs, OID_AUTO, removal_suspend_progress,
+ CTLFLAG_RWTUN, &zfs_removal_suspend_progress, 0,
+ "Ensures certain actions can happen while in the middle of a removal");
+
/*
* Minimum size which forces the dynamic allocator to change
* it's allocation strategy. Once the space map cannot satisfy
@@ -532,9 +765,18 @@ param_set_min_auto_ashift(SYSCTL_HANDLER_ARGS)
zfs_vdev_min_auto_ashift = val;
+ if (arg2 != 0)
+ warn_deprecated_sysctl("min_auto_ashift",
+ "vdev.min_auto_ashift");
+
return (0);
}
+SYSCTL_PROC(_vfs_zfs, OID_AUTO, min_auto_ashift,
+ CTLTYPE_UINT | CTLFLAG_RWTUN | CTLFLAG_MPSAFE, NULL, 1,
+ param_set_min_auto_ashift, "IU",
+ "Min ashift used when creating new top-level vdev. (LEGACY)");
+
int
param_set_max_auto_ashift(SYSCTL_HANDLER_ARGS)
{
@@ -551,9 +793,19 @@ param_set_max_auto_ashift(SYSCTL_HANDLER_ARGS)
zfs_vdev_max_auto_ashift = val;
+ if (arg2 != 0)
+ warn_deprecated_sysctl("max_auto_ashift",
+ "vdev.max_auto_ashift");
+
return (0);
}
+SYSCTL_PROC(_vfs_zfs, OID_AUTO, max_auto_ashift,
+ CTLTYPE_UINT | CTLFLAG_RWTUN | CTLFLAG_MPSAFE, NULL, 1,
+ param_set_max_auto_ashift, "IU",
+ "Max ashift used when optimizing for logical -> physical sector size on"
+ " new top-level vdevs. (LEGACY)");
+
/*
* Since the DTL space map of a vdev is not expected to have a lot of
* entries, we default its block size to 4K.
@@ -575,6 +827,23 @@ SYSCTL_INT(_vfs_zfs, OID_AUTO, standard_sm_blksz,
CTLFLAG_RDTUN, &zfs_vdev_standard_sm_blksz, 0,
"Block size for standard space map. Power of 2 greater than 4096.");
+extern int vdev_validate_skip;
+
+SYSCTL_INT(_vfs_zfs, OID_AUTO, validate_skip,
+ CTLFLAG_RDTUN, &vdev_validate_skip, 0,
+ "Enable to bypass vdev_validate().");
+
+/* vdev_mirror.c */
+
+/* vdev_queue.c */
+
+extern uint_t zfs_vdev_max_active;
+
+SYSCTL_UINT(_vfs_zfs, OID_AUTO, top_maxinflight,
+ CTLFLAG_RWTUN, &zfs_vdev_max_active, 0,
+ "The maximum number of I/Os of all types active for each device."
+ " (LEGACY)");
+
/* zio.c */
SYSCTL_INT(_vfs_zfs_zio, OID_AUTO, exclude_metadata,
diff --git a/sys/contrib/openzfs/module/os/freebsd/zfs/zfs_ctldir.c b/sys/contrib/openzfs/module/os/freebsd/zfs/zfs_ctldir.c
index 4de48e013ec4..d0a9c662e6f0 100644
--- a/sys/contrib/openzfs/module/os/freebsd/zfs/zfs_ctldir.c
+++ b/sys/contrib/openzfs/module/os/freebsd/zfs/zfs_ctldir.c
@@ -762,8 +762,7 @@ zfsctl_common_pathconf(struct vop_pathconf_args *ap)
return (0);
case _PC_MIN_HOLE_SIZE:
- *ap->a_retval = (int)SPA_MINBLOCKSIZE;
- return (0);
+ return (EINVAL);
case _PC_ACL_EXTENDED:
*ap->a_retval = 0;
diff --git a/sys/contrib/openzfs/module/os/freebsd/zfs/zfs_vnops_os.c b/sys/contrib/openzfs/module/os/freebsd/zfs/zfs_vnops_os.c
index 411225786089..f34a2fd37a77 100644
--- a/sys/contrib/openzfs/module/os/freebsd/zfs/zfs_vnops_os.c
+++ b/sys/contrib/openzfs/module/os/freebsd/zfs/zfs_vnops_os.c
@@ -4116,6 +4116,7 @@ zfs_pathconf(vnode_t *vp, int cmd, ulong_t *valp, cred_t *cr,
{
znode_t *zp;
zfsvfs_t *zfsvfs;
+ uint_t blksize, iosize;
int error;
switch (cmd) {
@@ -4127,8 +4128,20 @@ zfs_pathconf(vnode_t *vp, int cmd, ulong_t *valp, cred_t *cr,
*valp = 64;
return (0);
case _PC_MIN_HOLE_SIZE:
- *valp = (int)SPA_MINBLOCKSIZE;
- return (0);
+ iosize = vp->v_mount->mnt_stat.f_iosize;
+ if (vp->v_type == VREG) {
+ zp = VTOZ(vp);
+ blksize = zp->z_blksz;
+ if (zp->z_size <= blksize)
+ blksize = MAX(blksize, iosize);
+ *valp = (int)blksize;
+ return (0);
+ }
+ if (vp->v_type == VDIR) {
+ *valp = (int)iosize;
+ return (0);
+ }
+ return (EINVAL);
case _PC_ACL_EXTENDED:
#if 0 /* POSIX ACLs are not implemented for ZFS on FreeBSD yet. */
zp = VTOZ(vp);
@@ -4210,8 +4223,20 @@ zfs_getpages(struct vnode *vp, vm_page_t *ma, int count, int *rbehind,
zfs_vmobject_wlock(object);
(void) vm_page_grab_pages(object, OFF_TO_IDX(start),
- VM_ALLOC_NORMAL | VM_ALLOC_WAITOK | VM_ALLOC_ZERO,
+ VM_ALLOC_NORMAL | VM_ALLOC_WAITOK,
ma, count);
+ if (!vm_page_all_valid(ma[count - 1])) {
+ /*
+ * Later in this function, we copy DMU data to
+ * invalid pages only. The last page may not be
+ * entirely filled though, if the file does not
+ * end on a page boundary. Therefore, we zero
+ * that last page here to make sure it does not
+ * contain garbage after the end of file.
+ */
+ ASSERT(vm_page_none_valid(ma[count - 1]));
+ vm_page_zero_invalid(ma[count - 1], FALSE);
+ }
zfs_vmobject_wunlock(object);
}
if (blksz == zp->z_blksz)
diff --git a/sys/contrib/openzfs/module/os/freebsd/zfs/zvol_os.c b/sys/contrib/openzfs/module/os/freebsd/zfs/zvol_os.c
index 0dd2ecd7fd8d..3ddbfcb97184 100644
--- a/sys/contrib/openzfs/module/os/freebsd/zfs/zvol_os.c
+++ b/sys/contrib/openzfs/module/os/freebsd/zfs/zvol_os.c
@@ -183,6 +183,7 @@ static struct filterops zvol_filterops_vnode = {
.f_isfd = 1,
.f_detach = zvol_filter_detach,
.f_event = zvol_filter_vnode,
+ .f_copy = knote_triv_copy,
};
extern uint_t zfs_geom_probe_vdev_key;