aboutsummaryrefslogtreecommitdiff
path: root/module/os/linux/zfs
diff options
context:
space:
mode:
Diffstat (limited to 'module/os/linux/zfs')
-rw-r--r--module/os/linux/zfs/Makefile.in3
-rw-r--r--module/os/linux/zfs/abd_os.c2
-rw-r--r--module/os/linux/zfs/arc_os.c88
-rw-r--r--module/os/linux/zfs/policy.c5
-rw-r--r--module/os/linux/zfs/vdev_disk.c31
-rw-r--r--module/os/linux/zfs/vdev_file.c18
-rw-r--r--module/os/linux/zfs/zfs_ctldir.c1
-rw-r--r--module/os/linux/zfs/zfs_vfsops.c4
-rw-r--r--module/os/linux/zfs/zfs_vnops.c1091
-rw-r--r--module/os/linux/zfs/zfs_znode.c5
-rw-r--r--module/os/linux/zfs/zio_crypt.c15
-rw-r--r--module/os/linux/zfs/zpl_ctldir.c25
-rw-r--r--module/os/linux/zfs/zpl_file.c354
-rw-r--r--module/os/linux/zfs/zpl_inode.c10
-rw-r--r--module/os/linux/zfs/zpl_super.c23
-rw-r--r--module/os/linux/zfs/zpl_xattr.c24
-rw-r--r--module/os/linux/zfs/zvol_os.c177
17 files changed, 471 insertions, 1405 deletions
diff --git a/module/os/linux/zfs/Makefile.in b/module/os/linux/zfs/Makefile.in
index 87414d6eacc5..75bec52c94e2 100644
--- a/module/os/linux/zfs/Makefile.in
+++ b/module/os/linux/zfs/Makefile.in
@@ -23,8 +23,9 @@ $(MODULE)-objs += ../os/linux/zfs/zfs_dir.o
$(MODULE)-objs += ../os/linux/zfs/zfs_file_os.o
$(MODULE)-objs += ../os/linux/zfs/zfs_ioctl_os.o
$(MODULE)-objs += ../os/linux/zfs/zfs_sysfs.o
+$(MODULE)-objs += ../os/linux/zfs/zfs_uio.o
$(MODULE)-objs += ../os/linux/zfs/zfs_vfsops.o
-$(MODULE)-objs += ../os/linux/zfs/zfs_vnops.o
+$(MODULE)-objs += ../os/linux/zfs/zfs_vnops_os.o
$(MODULE)-objs += ../os/linux/zfs/zfs_znode.o
$(MODULE)-objs += ../os/linux/zfs/zio_crypt.o
$(MODULE)-objs += ../os/linux/zfs/zpl_ctldir.o
diff --git a/module/os/linux/zfs/abd_os.c b/module/os/linux/zfs/abd_os.c
index c2281449ed12..0abac228447f 100644
--- a/module/os/linux/zfs/abd_os.c
+++ b/module/os/linux/zfs/abd_os.c
@@ -178,7 +178,7 @@ static struct page *abd_zero_page = NULL;
static kmem_cache_t *abd_cache = NULL;
static kstat_t *abd_ksp;
-static size_t
+static uint_t
abd_chunkcnt_for_bytes(size_t size)
{
return (P2ROUNDUP(size, PAGESIZE) / PAGESIZE);
diff --git a/module/os/linux/zfs/arc_os.c b/module/os/linux/zfs/arc_os.c
index 792c75d46ffe..83d4a3d8496c 100644
--- a/module/os/linux/zfs/arc_os.c
+++ b/module/os/linux/zfs/arc_os.c
@@ -48,6 +48,8 @@
#include <sys/vmsystm.h>
#include <sys/zpl.h>
#include <linux/page_compat.h>
+#include <linux/notifier.h>
+#include <linux/memory.h>
#endif
#include <sys/callb.h>
#include <sys/kstat.h>
@@ -73,6 +75,9 @@
*/
int zfs_arc_shrinker_limit = 10000;
+#ifdef CONFIG_MEMORY_HOTPLUG
+static struct notifier_block arc_hotplug_callback_mem_nb;
+#endif
/*
* Return a default max arc size based on the amount of physical memory.
@@ -278,18 +283,9 @@ arc_memory_throttle(spa_t *spa, uint64_t reserve, uint64_t txg)
return (0);
}
-void
-arc_lowmem_init(void)
+static void
+arc_set_sys_free(uint64_t allmem)
{
- uint64_t allmem = arc_all_memory();
-
- /*
- * Register a shrinker to support synchronous (direct) memory
- * reclaim from the arc. This is done to prevent kswapd from
- * swapping out pages when it is preferable to shrink the arc.
- */
- spl_register_shrinker(&arc_shrinker);
-
/*
* The ARC tries to keep at least this much memory available for the
* system. This gives the ARC time to shrink in response to memory
@@ -343,6 +339,20 @@ arc_lowmem_init(void)
}
void
+arc_lowmem_init(void)
+{
+ uint64_t allmem = arc_all_memory();
+
+ /*
+ * Register a shrinker to support synchronous (direct) memory
+ * reclaim from the arc. This is done to prevent kswapd from
+ * swapping out pages when it is preferable to shrink the arc.
+ */
+ spl_register_shrinker(&arc_shrinker);
+ arc_set_sys_free(allmem);
+}
+
+void
arc_lowmem_fini(void)
{
spl_unregister_shrinker(&arc_shrinker);
@@ -375,6 +385,52 @@ param_set_arc_int(const char *buf, zfs_kernel_param_t *kp)
return (0);
}
+
+#ifdef CONFIG_MEMORY_HOTPLUG
+/* ARGSUSED */
+static int
+arc_hotplug_callback(struct notifier_block *self, unsigned long action,
+ void *arg)
+{
+ uint64_t allmem = arc_all_memory();
+ if (action != MEM_ONLINE)
+ return (NOTIFY_OK);
+
+ arc_set_limits(allmem);
+
+#ifdef __LP64__
+ if (zfs_dirty_data_max_max == 0)
+ zfs_dirty_data_max_max = MIN(4ULL * 1024 * 1024 * 1024,
+ allmem * zfs_dirty_data_max_max_percent / 100);
+#else
+ if (zfs_dirty_data_max_max == 0)
+ zfs_dirty_data_max_max = MIN(1ULL * 1024 * 1024 * 1024,
+ allmem * zfs_dirty_data_max_max_percent / 100);
+#endif
+
+ arc_set_sys_free(allmem);
+ return (NOTIFY_OK);
+}
+#endif
+
+void
+arc_register_hotplug(void)
+{
+#ifdef CONFIG_MEMORY_HOTPLUG
+ arc_hotplug_callback_mem_nb.notifier_call = arc_hotplug_callback;
+ /* There is no significance to the value 100 */
+ arc_hotplug_callback_mem_nb.priority = 100;
+ register_memory_notifier(&arc_hotplug_callback_mem_nb);
+#endif
+}
+
+void
+arc_unregister_hotplug(void)
+{
+#ifdef CONFIG_MEMORY_HOTPLUG
+ unregister_memory_notifier(&arc_hotplug_callback_mem_nb);
+#endif
+}
#else /* _KERNEL */
int64_t
arc_available_memory(void)
@@ -405,6 +461,16 @@ arc_free_memory(void)
{
return (spa_get_random(arc_all_memory() * 20 / 100));
}
+
+void
+arc_register_hotplug(void)
+{
+}
+
+void
+arc_unregister_hotplug(void)
+{
+}
#endif /* _KERNEL */
/*
diff --git a/module/os/linux/zfs/policy.c b/module/os/linux/zfs/policy.c
index 5267d67eea82..8780d7f6c70a 100644
--- a/module/os/linux/zfs/policy.c
+++ b/module/os/linux/zfs/policy.c
@@ -204,7 +204,8 @@ secpolicy_vnode_setdac(const cred_t *cr, uid_t owner)
* Enforced in the Linux VFS.
*/
int
-secpolicy_vnode_setid_retain(const cred_t *cr, boolean_t issuidroot)
+secpolicy_vnode_setid_retain(struct znode *zp __maybe_unused, const cred_t *cr,
+ boolean_t issuidroot)
{
return (priv_policy_user(cr, CAP_FSETID, EPERM));
}
@@ -271,7 +272,7 @@ void
secpolicy_setid_clear(vattr_t *vap, cred_t *cr)
{
if ((vap->va_mode & (S_ISUID | S_ISGID)) != 0 &&
- secpolicy_vnode_setid_retain(cr,
+ secpolicy_vnode_setid_retain(NULL, cr,
(vap->va_mode & S_ISUID) != 0 &&
(vap->va_mask & AT_UID) != 0 && vap->va_uid == 0) != 0) {
vap->va_mask |= AT_MODE;
diff --git a/module/os/linux/zfs/vdev_disk.c b/module/os/linux/zfs/vdev_disk.c
index a54961c76870..4bd27d1b516f 100644
--- a/module/os/linux/zfs/vdev_disk.c
+++ b/module/os/linux/zfs/vdev_disk.c
@@ -94,6 +94,14 @@ bdev_capacity(struct block_device *bdev)
return (i_size_read(bdev->bd_inode));
}
+#if !defined(HAVE_BDEV_WHOLE)
+static inline struct block_device *
+bdev_whole(struct block_device *bdev)
+{
+ return (bdev->bd_contains);
+}
+#endif
+
/*
* Returns the maximum expansion capacity of the block device (in bytes).
*
@@ -118,7 +126,7 @@ bdev_max_capacity(struct block_device *bdev, uint64_t wholedisk)
uint64_t psize;
int64_t available;
- if (wholedisk && bdev->bd_part != NULL && bdev != bdev->bd_contains) {
+ if (wholedisk && bdev != bdev_whole(bdev)) {
/*
* When reporting maximum expansion capacity for a wholedisk
* deduct any capacity which is expected to be lost due to
@@ -132,7 +140,7 @@ bdev_max_capacity(struct block_device *bdev, uint64_t wholedisk)
* "reserved" EFI partition: in such cases return the device
* usable capacity.
*/
- available = i_size_read(bdev->bd_contains->bd_inode) -
+ available = i_size_read(bdev_whole(bdev)->bd_inode) -
((EFI_MIN_RESV_SIZE + NEW_START_BLOCK +
PARTITION_END_ALIGNMENT) << SECTOR_BITS);
psize = MAX(available, bdev_capacity(bdev));
@@ -192,8 +200,8 @@ vdev_disk_open(vdev_t *v, uint64_t *psize, uint64_t *max_psize,
vd->vd_bdev = NULL;
if (bdev) {
- if (v->vdev_expanding && bdev != bdev->bd_contains) {
- bdevname(bdev->bd_contains, disk_name + 5);
+ if (v->vdev_expanding && bdev != bdev_whole(bdev)) {
+ bdevname(bdev_whole(bdev), disk_name + 5);
/*
* If userland has BLKPG_RESIZE_PARTITION,
* then it should have updated the partition
@@ -468,7 +476,11 @@ vdev_blkg_tryget(struct blkcg_gq *blkg)
this_cpu_inc(*count);
rc = true;
} else {
+#ifdef ZFS_PERCPU_REF_COUNT_IN_DATA
+ rc = atomic_long_inc_not_zero(&ref->data->count);
+#else
rc = atomic_long_inc_not_zero(&ref->count);
+#endif
}
rcu_read_unlock_sched();
@@ -787,7 +799,7 @@ vdev_disk_io_done(zio_t *zio)
vdev_t *v = zio->io_vd;
vdev_disk_t *vd = v->vdev_tsd;
- if (check_disk_change(vd->vd_bdev)) {
+ if (zfs_check_media_change(vd->vd_bdev)) {
invalidate_bdev(vd->vd_bdev);
v->vdev_remove_wanted = B_TRUE;
spa_async_request(zio->io_spa, SPA_ASYNC_REMOVE);
@@ -822,9 +834,13 @@ vdev_disk_rele(vdev_t *vd)
}
vdev_ops_t vdev_disk_ops = {
+ .vdev_op_init = NULL,
+ .vdev_op_fini = NULL,
.vdev_op_open = vdev_disk_open,
.vdev_op_close = vdev_disk_close,
.vdev_op_asize = vdev_default_asize,
+ .vdev_op_min_asize = vdev_default_min_asize,
+ .vdev_op_min_alloc = NULL,
.vdev_op_io_start = vdev_disk_io_start,
.vdev_op_io_done = vdev_disk_io_done,
.vdev_op_state_change = NULL,
@@ -833,6 +849,11 @@ vdev_ops_t vdev_disk_ops = {
.vdev_op_rele = vdev_disk_rele,
.vdev_op_remap = NULL,
.vdev_op_xlate = vdev_default_xlate,
+ .vdev_op_rebuild_asize = NULL,
+ .vdev_op_metaslab_init = NULL,
+ .vdev_op_config_generate = NULL,
+ .vdev_op_nparity = NULL,
+ .vdev_op_ndisks = NULL,
.vdev_op_type = VDEV_TYPE_DISK, /* name of this vdev type */
.vdev_op_leaf = B_TRUE /* leaf vdev */
};
diff --git a/module/os/linux/zfs/vdev_file.c b/module/os/linux/zfs/vdev_file.c
index 423ce858144c..bf8a13ae6154 100644
--- a/module/os/linux/zfs/vdev_file.c
+++ b/module/os/linux/zfs/vdev_file.c
@@ -305,9 +305,13 @@ vdev_file_io_done(zio_t *zio)
}
vdev_ops_t vdev_file_ops = {
+ .vdev_op_init = NULL,
+ .vdev_op_fini = NULL,
.vdev_op_open = vdev_file_open,
.vdev_op_close = vdev_file_close,
.vdev_op_asize = vdev_default_asize,
+ .vdev_op_min_asize = vdev_default_min_asize,
+ .vdev_op_min_alloc = NULL,
.vdev_op_io_start = vdev_file_io_start,
.vdev_op_io_done = vdev_file_io_done,
.vdev_op_state_change = NULL,
@@ -316,6 +320,11 @@ vdev_ops_t vdev_file_ops = {
.vdev_op_rele = vdev_file_rele,
.vdev_op_remap = NULL,
.vdev_op_xlate = vdev_default_xlate,
+ .vdev_op_rebuild_asize = NULL,
+ .vdev_op_metaslab_init = NULL,
+ .vdev_op_config_generate = NULL,
+ .vdev_op_nparity = NULL,
+ .vdev_op_ndisks = NULL,
.vdev_op_type = VDEV_TYPE_FILE, /* name of this vdev type */
.vdev_op_leaf = B_TRUE /* leaf vdev */
};
@@ -341,9 +350,13 @@ vdev_file_fini(void)
#ifndef _KERNEL
vdev_ops_t vdev_disk_ops = {
+ .vdev_op_init = NULL,
+ .vdev_op_fini = NULL,
.vdev_op_open = vdev_file_open,
.vdev_op_close = vdev_file_close,
.vdev_op_asize = vdev_default_asize,
+ .vdev_op_min_asize = vdev_default_min_asize,
+ .vdev_op_min_alloc = NULL,
.vdev_op_io_start = vdev_file_io_start,
.vdev_op_io_done = vdev_file_io_done,
.vdev_op_state_change = NULL,
@@ -352,6 +365,11 @@ vdev_ops_t vdev_disk_ops = {
.vdev_op_rele = vdev_file_rele,
.vdev_op_remap = NULL,
.vdev_op_xlate = vdev_default_xlate,
+ .vdev_op_rebuild_asize = NULL,
+ .vdev_op_metaslab_init = NULL,
+ .vdev_op_config_generate = NULL,
+ .vdev_op_nparity = NULL,
+ .vdev_op_ndisks = NULL,
.vdev_op_type = VDEV_TYPE_DISK, /* name of this vdev type */
.vdev_op_leaf = B_TRUE /* leaf vdev */
};
diff --git a/module/os/linux/zfs/zfs_ctldir.c b/module/os/linux/zfs/zfs_ctldir.c
index c13a9771235d..a1668e46e4f9 100644
--- a/module/os/linux/zfs/zfs_ctldir.c
+++ b/module/os/linux/zfs/zfs_ctldir.c
@@ -467,7 +467,6 @@ zfsctl_inode_alloc(zfsvfs_t *zfsvfs, uint64_t id,
zp->z_unlinked = B_FALSE;
zp->z_atime_dirty = B_FALSE;
zp->z_zn_prefetch = B_FALSE;
- zp->z_moved = B_FALSE;
zp->z_is_sa = B_FALSE;
zp->z_is_mapped = B_FALSE;
zp->z_is_ctldir = B_TRUE;
diff --git a/module/os/linux/zfs/zfs_vfsops.c b/module/os/linux/zfs/zfs_vfsops.c
index b218237d07ff..ef5927d4f155 100644
--- a/module/os/linux/zfs/zfs_vfsops.c
+++ b/module/os/linux/zfs/zfs_vfsops.c
@@ -294,7 +294,7 @@ zfs_sync(struct super_block *sb, int wait, cred_t *cr)
} else {
/*
* Sync all ZFS filesystems. This is what happens when you
- * run sync(1M). Unlike other filesystems, ZFS honors the
+ * run sync(1). Unlike other filesystems, ZFS honors the
* request by waiting for all pools to commit all dirty data.
*/
spa_sync_allpools();
@@ -1451,7 +1451,7 @@ int
zfs_domount(struct super_block *sb, zfs_mnt_t *zm, int silent)
{
const char *osname = zm->mnt_osname;
- struct inode *root_inode;
+ struct inode *root_inode = NULL;
uint64_t recordsize;
int error = 0;
zfsvfs_t *zfsvfs = NULL;
diff --git a/module/os/linux/zfs/zfs_vnops.c b/module/os/linux/zfs/zfs_vnops.c
index b668c7dff013..3be387a30e5c 100644
--- a/module/os/linux/zfs/zfs_vnops.c
+++ b/module/os/linux/zfs/zfs_vnops.c
@@ -240,78 +240,6 @@ zfs_close(struct inode *ip, int flag, cred_t *cr)
return (0);
}
-#if defined(SEEK_HOLE) && defined(SEEK_DATA)
-/*
- * Lseek support for finding holes (cmd == SEEK_HOLE) and
- * data (cmd == SEEK_DATA). "off" is an in/out parameter.
- */
-static int
-zfs_holey_common(struct inode *ip, int cmd, loff_t *off)
-{
- znode_t *zp = ITOZ(ip);
- uint64_t noff = (uint64_t)*off; /* new offset */
- uint64_t file_sz;
- int error;
- boolean_t hole;
-
- file_sz = zp->z_size;
- if (noff >= file_sz) {
- return (SET_ERROR(ENXIO));
- }
-
- if (cmd == SEEK_HOLE)
- hole = B_TRUE;
- else
- hole = B_FALSE;
-
- error = dmu_offset_next(ZTOZSB(zp)->z_os, zp->z_id, hole, &noff);
-
- if (error == ESRCH)
- return (SET_ERROR(ENXIO));
-
- /* file was dirty, so fall back to using generic logic */
- if (error == EBUSY) {
- if (hole)
- *off = file_sz;
-
- return (0);
- }
-
- /*
- * We could find a hole that begins after the logical end-of-file,
- * because dmu_offset_next() only works on whole blocks. If the
- * EOF falls mid-block, then indicate that the "virtual hole"
- * at the end of the file begins at the logical EOF, rather than
- * at the end of the last block.
- */
- if (noff > file_sz) {
- ASSERT(hole);
- noff = file_sz;
- }
-
- if (noff < *off)
- return (error);
- *off = noff;
- return (error);
-}
-
-int
-zfs_holey(struct inode *ip, int cmd, loff_t *off)
-{
- znode_t *zp = ITOZ(ip);
- zfsvfs_t *zfsvfs = ITOZSB(ip);
- int error;
-
- ZFS_ENTER(zfsvfs);
- ZFS_VERIFY_ZP(zp);
-
- error = zfs_holey_common(ip, cmd, off);
-
- ZFS_EXIT(zfsvfs);
- return (error);
-}
-#endif /* SEEK_HOLE && SEEK_DATA */
-
#if defined(_KERNEL)
/*
* When a file is memory mapped, we must keep the IO data synchronized
@@ -320,10 +248,10 @@ zfs_holey(struct inode *ip, int cmd, loff_t *off)
* On Write: If we find a memory mapped page, we write to *both*
* the page and the dmu buffer.
*/
-static void
-update_pages(struct inode *ip, int64_t start, int len,
- objset_t *os, uint64_t oid)
+void
+update_pages(znode_t *zp, int64_t start, int len, objset_t *os)
{
+ struct inode *ip = ZTOI(zp);
struct address_space *mp = ip->i_mapping;
struct page *pp;
uint64_t nbytes;
@@ -340,8 +268,8 @@ update_pages(struct inode *ip, int64_t start, int len,
flush_dcache_page(pp);
pb = kmap(pp);
- (void) dmu_read(os, oid, start+off, nbytes, pb+off,
- DMU_READ_PREFETCH);
+ (void) dmu_read(os, zp->z_id, start + off, nbytes,
+ pb + off, DMU_READ_PREFETCH);
kunmap(pp);
if (mapping_writably_mapped(mp))
@@ -369,12 +297,12 @@ update_pages(struct inode *ip, int64_t start, int len,
* NOTE: We will always "break up" the IO into PAGESIZE uiomoves when
* the file is memory mapped.
*/
-static int
-mappedread(struct inode *ip, int nbytes, uio_t *uio)
+int
+mappedread(znode_t *zp, int nbytes, uio_t *uio)
{
+ struct inode *ip = ZTOI(zp);
struct address_space *mp = ip->i_mapping;
struct page *pp;
- znode_t *zp = ITOZ(ip);
int64_t start, off;
uint64_t bytes;
int len = nbytes;
@@ -414,575 +342,9 @@ mappedread(struct inode *ip, int nbytes, uio_t *uio)
}
#endif /* _KERNEL */
-unsigned long zfs_read_chunk_size = 1024 * 1024; /* Tunable */
unsigned long zfs_delete_blocks = DMU_MAX_DELETEBLKCNT;
/*
- * Read bytes from specified file into supplied buffer.
- *
- * IN: ip - inode of file to be read from.
- * uio - structure supplying read location, range info,
- * and return buffer.
- * ioflag - O_SYNC flags; used to provide FRSYNC semantics.
- * O_DIRECT flag; used to bypass page cache.
- * cr - credentials of caller.
- *
- * OUT: uio - updated offset and range, buffer filled.
- *
- * RETURN: 0 on success, error code on failure.
- *
- * Side Effects:
- * inode - atime updated if byte count > 0
- */
-/* ARGSUSED */
-int
-zfs_read(struct inode *ip, uio_t *uio, int ioflag, cred_t *cr)
-{
- int error = 0;
- boolean_t frsync = B_FALSE;
-
- znode_t *zp = ITOZ(ip);
- zfsvfs_t *zfsvfs = ITOZSB(ip);
- ZFS_ENTER(zfsvfs);
- ZFS_VERIFY_ZP(zp);
-
- if (zp->z_pflags & ZFS_AV_QUARANTINED) {
- ZFS_EXIT(zfsvfs);
- return (SET_ERROR(EACCES));
- }
-
- /*
- * Validate file offset
- */
- if (uio->uio_loffset < (offset_t)0) {
- ZFS_EXIT(zfsvfs);
- return (SET_ERROR(EINVAL));
- }
-
- /*
- * Fasttrack empty reads
- */
- if (uio->uio_resid == 0) {
- ZFS_EXIT(zfsvfs);
- return (0);
- }
-
-#ifdef FRSYNC
- /*
- * If we're in FRSYNC mode, sync out this znode before reading it.
- * Only do this for non-snapshots.
- *
- * Some platforms do not support FRSYNC and instead map it
- * to O_SYNC, which results in unnecessary calls to zil_commit. We
- * only honor FRSYNC requests on platforms which support it.
- */
- frsync = !!(ioflag & FRSYNC);
-#endif
- if (zfsvfs->z_log &&
- (frsync || zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS))
- zil_commit(zfsvfs->z_log, zp->z_id);
-
- /*
- * Lock the range against changes.
- */
- zfs_locked_range_t *lr = zfs_rangelock_enter(&zp->z_rangelock,
- uio->uio_loffset, uio->uio_resid, RL_READER);
-
- /*
- * If we are reading past end-of-file we can skip
- * to the end; but we might still need to set atime.
- */
- if (uio->uio_loffset >= zp->z_size) {
- error = 0;
- goto out;
- }
-
- ASSERT(uio->uio_loffset < zp->z_size);
- ssize_t n = MIN(uio->uio_resid, zp->z_size - uio->uio_loffset);
- ssize_t start_resid = n;
-
-#ifdef HAVE_UIO_ZEROCOPY
- xuio_t *xuio = NULL;
- if ((uio->uio_extflg == UIO_XUIO) &&
- (((xuio_t *)uio)->xu_type == UIOTYPE_ZEROCOPY)) {
- int nblk;
- int blksz = zp->z_blksz;
- uint64_t offset = uio->uio_loffset;
-
- xuio = (xuio_t *)uio;
- if ((ISP2(blksz))) {
- nblk = (P2ROUNDUP(offset + n, blksz) - P2ALIGN(offset,
- blksz)) / blksz;
- } else {
- ASSERT(offset + n <= blksz);
- nblk = 1;
- }
- (void) dmu_xuio_init(xuio, nblk);
-
- if (vn_has_cached_data(ip)) {
- /*
- * For simplicity, we always allocate a full buffer
- * even if we only expect to read a portion of a block.
- */
- while (--nblk >= 0) {
- (void) dmu_xuio_add(xuio,
- dmu_request_arcbuf(sa_get_db(zp->z_sa_hdl),
- blksz), 0, blksz);
- }
- }
- }
-#endif /* HAVE_UIO_ZEROCOPY */
-
- while (n > 0) {
- ssize_t nbytes = MIN(n, zfs_read_chunk_size -
- P2PHASE(uio->uio_loffset, zfs_read_chunk_size));
-
- if (zp->z_is_mapped && !(ioflag & O_DIRECT)) {
- error = mappedread(ip, nbytes, uio);
- } else {
- error = dmu_read_uio_dbuf(sa_get_db(zp->z_sa_hdl),
- uio, nbytes);
- }
-
- if (error) {
- /* convert checksum errors into IO errors */
- if (error == ECKSUM)
- error = SET_ERROR(EIO);
- break;
- }
-
- n -= nbytes;
- }
-
- int64_t nread = start_resid - n;
- dataset_kstats_update_read_kstats(&zfsvfs->z_kstat, nread);
- task_io_account_read(nread);
-out:
- zfs_rangelock_exit(lr);
-
- ZFS_EXIT(zfsvfs);
- return (error);
-}
-
-/*
- * Write the bytes to a file.
- *
- * IN: ip - inode of file to be written to.
- * uio - structure supplying write location, range info,
- * and data buffer.
- * ioflag - O_APPEND flag set if in append mode.
- * O_DIRECT flag; used to bypass page cache.
- * cr - credentials of caller.
- *
- * OUT: uio - updated offset and range.
- *
- * RETURN: 0 if success
- * error code if failure
- *
- * Timestamps:
- * ip - ctime|mtime updated if byte count > 0
- */
-
-/* ARGSUSED */
-int
-zfs_write(struct inode *ip, uio_t *uio, int ioflag, cred_t *cr)
-{
- int error = 0;
- ssize_t start_resid = uio->uio_resid;
-
- /*
- * Fasttrack empty write
- */
- ssize_t n = start_resid;
- if (n == 0)
- return (0);
-
- rlim64_t limit = uio->uio_limit;
- if (limit == RLIM64_INFINITY || limit > MAXOFFSET_T)
- limit = MAXOFFSET_T;
-
- znode_t *zp = ITOZ(ip);
- zfsvfs_t *zfsvfs = ZTOZSB(zp);
- ZFS_ENTER(zfsvfs);
- ZFS_VERIFY_ZP(zp);
-
- sa_bulk_attr_t bulk[4];
- int count = 0;
- uint64_t mtime[2], ctime[2];
- SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MTIME(zfsvfs), NULL, &mtime, 16);
- SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CTIME(zfsvfs), NULL, &ctime, 16);
- SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_SIZE(zfsvfs), NULL,
- &zp->z_size, 8);
- SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_FLAGS(zfsvfs), NULL,
- &zp->z_pflags, 8);
-
- /*
- * Callers might not be able to detect properly that we are read-only,
- * so check it explicitly here.
- */
- if (zfs_is_readonly(zfsvfs)) {
- ZFS_EXIT(zfsvfs);
- return (SET_ERROR(EROFS));
- }
-
- /*
- * If immutable or not appending then return EPERM
- */
- if ((zp->z_pflags & (ZFS_IMMUTABLE | ZFS_READONLY)) ||
- ((zp->z_pflags & ZFS_APPENDONLY) && !(ioflag & O_APPEND) &&
- (uio->uio_loffset < zp->z_size))) {
- ZFS_EXIT(zfsvfs);
- return (SET_ERROR(EPERM));
- }
-
- /*
- * Validate file offset
- */
- offset_t woff = ioflag & O_APPEND ? zp->z_size : uio->uio_loffset;
- if (woff < 0) {
- ZFS_EXIT(zfsvfs);
- return (SET_ERROR(EINVAL));
- }
-
- int max_blksz = zfsvfs->z_max_blksz;
- xuio_t *xuio = NULL;
-
- /*
- * Pre-fault the pages to ensure slow (eg NFS) pages
- * don't hold up txg.
- * Skip this if uio contains loaned arc_buf.
- */
-#ifdef HAVE_UIO_ZEROCOPY
- if ((uio->uio_extflg == UIO_XUIO) &&
- (((xuio_t *)uio)->xu_type == UIOTYPE_ZEROCOPY))
- xuio = (xuio_t *)uio;
- else
-#endif
- if (uio_prefaultpages(MIN(n, max_blksz), uio)) {
- ZFS_EXIT(zfsvfs);
- return (SET_ERROR(EFAULT));
- }
-
- /*
- * If in append mode, set the io offset pointer to eof.
- */
- zfs_locked_range_t *lr;
- if (ioflag & O_APPEND) {
- /*
- * Obtain an appending range lock to guarantee file append
- * semantics. We reset the write offset once we have the lock.
- */
- lr = zfs_rangelock_enter(&zp->z_rangelock, 0, n, RL_APPEND);
- woff = lr->lr_offset;
- if (lr->lr_length == UINT64_MAX) {
- /*
- * We overlocked the file because this write will cause
- * the file block size to increase.
- * Note that zp_size cannot change with this lock held.
- */
- woff = zp->z_size;
- }
- uio->uio_loffset = woff;
- } else {
- /*
- * Note that if the file block size will change as a result of
- * this write, then this range lock will lock the entire file
- * so that we can re-write the block safely.
- */
- lr = zfs_rangelock_enter(&zp->z_rangelock, woff, n, RL_WRITER);
- }
-
- if (woff >= limit) {
- zfs_rangelock_exit(lr);
- ZFS_EXIT(zfsvfs);
- return (SET_ERROR(EFBIG));
- }
-
- if ((woff + n) > limit || woff > (limit - n))
- n = limit - woff;
-
- /* Will this write extend the file length? */
- int write_eof = (woff + n > zp->z_size);
-
- uint64_t end_size = MAX(zp->z_size, woff + n);
- zilog_t *zilog = zfsvfs->z_log;
-#ifdef HAVE_UIO_ZEROCOPY
- int i_iov = 0;
- const iovec_t *iovp = uio->uio_iov;
- int iovcnt __maybe_unused = uio->uio_iovcnt;
-#endif
-
-
- /*
- * Write the file in reasonable size chunks. Each chunk is written
- * in a separate transaction; this keeps the intent log records small
- * and allows us to do more fine-grained space accounting.
- */
- while (n > 0) {
- woff = uio->uio_loffset;
-
- if (zfs_id_overblockquota(zfsvfs, DMU_USERUSED_OBJECT,
- KUID_TO_SUID(ip->i_uid)) ||
- zfs_id_overblockquota(zfsvfs, DMU_GROUPUSED_OBJECT,
- KGID_TO_SGID(ip->i_gid)) ||
- (zp->z_projid != ZFS_DEFAULT_PROJID &&
- zfs_id_overblockquota(zfsvfs, DMU_PROJECTUSED_OBJECT,
- zp->z_projid))) {
- error = SET_ERROR(EDQUOT);
- break;
- }
-
- arc_buf_t *abuf = NULL;
- const iovec_t *aiov = NULL;
- if (xuio) {
-#ifdef HAVE_UIO_ZEROCOPY
- ASSERT(i_iov < iovcnt);
- ASSERT3U(uio->uio_segflg, !=, UIO_BVEC);
- aiov = &iovp[i_iov];
- abuf = dmu_xuio_arcbuf(xuio, i_iov);
- dmu_xuio_clear(xuio, i_iov);
- ASSERT((aiov->iov_base == abuf->b_data) ||
- ((char *)aiov->iov_base - (char *)abuf->b_data +
- aiov->iov_len == arc_buf_size(abuf)));
- i_iov++;
-#endif
- } else if (n >= max_blksz && woff >= zp->z_size &&
- P2PHASE(woff, max_blksz) == 0 &&
- zp->z_blksz == max_blksz) {
- /*
- * This write covers a full block. "Borrow" a buffer
- * from the dmu so that we can fill it before we enter
- * a transaction. This avoids the possibility of
- * holding up the transaction if the data copy hangs
- * up on a pagefault (e.g., from an NFS server mapping).
- */
- size_t cbytes;
-
- abuf = dmu_request_arcbuf(sa_get_db(zp->z_sa_hdl),
- max_blksz);
- ASSERT(abuf != NULL);
- ASSERT(arc_buf_size(abuf) == max_blksz);
- if ((error = uiocopy(abuf->b_data, max_blksz,
- UIO_WRITE, uio, &cbytes))) {
- dmu_return_arcbuf(abuf);
- break;
- }
- ASSERT(cbytes == max_blksz);
- }
-
- /*
- * Start a transaction.
- */
- dmu_tx_t *tx = dmu_tx_create(zfsvfs->z_os);
- dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE);
- dmu_buf_impl_t *db = (dmu_buf_impl_t *)sa_get_db(zp->z_sa_hdl);
- DB_DNODE_ENTER(db);
- dmu_tx_hold_write_by_dnode(tx, DB_DNODE(db), woff,
- MIN(n, max_blksz));
- DB_DNODE_EXIT(db);
- zfs_sa_upgrade_txholds(tx, zp);
- error = dmu_tx_assign(tx, TXG_WAIT);
- if (error) {
- dmu_tx_abort(tx);
- if (abuf != NULL)
- dmu_return_arcbuf(abuf);
- break;
- }
-
- /*
- * If rangelock_enter() over-locked we grow the blocksize
- * and then reduce the lock range. This will only happen
- * on the first iteration since rangelock_reduce() will
- * shrink down lr_length to the appropriate size.
- */
- if (lr->lr_length == UINT64_MAX) {
- uint64_t new_blksz;
-
- if (zp->z_blksz > max_blksz) {
- /*
- * File's blocksize is already larger than the
- * "recordsize" property. Only let it grow to
- * the next power of 2.
- */
- ASSERT(!ISP2(zp->z_blksz));
- new_blksz = MIN(end_size,
- 1 << highbit64(zp->z_blksz));
- } else {
- new_blksz = MIN(end_size, max_blksz);
- }
- zfs_grow_blocksize(zp, new_blksz, tx);
- zfs_rangelock_reduce(lr, woff, n);
- }
-
- /*
- * XXX - should we really limit each write to z_max_blksz?
- * Perhaps we should use SPA_MAXBLOCKSIZE chunks?
- */
- ssize_t nbytes = MIN(n, max_blksz - P2PHASE(woff, max_blksz));
-
- ssize_t tx_bytes;
- if (abuf == NULL) {
- tx_bytes = uio->uio_resid;
- uio->uio_fault_disable = B_TRUE;
- error = dmu_write_uio_dbuf(sa_get_db(zp->z_sa_hdl),
- uio, nbytes, tx);
- uio->uio_fault_disable = B_FALSE;
- if (error == EFAULT) {
- dmu_tx_commit(tx);
- /*
- * Account for partial writes before
- * continuing the loop.
- * Update needs to occur before the next
- * uio_prefaultpages, or prefaultpages may
- * error, and we may break the loop early.
- */
- if (tx_bytes != uio->uio_resid)
- n -= tx_bytes - uio->uio_resid;
- if (uio_prefaultpages(MIN(n, max_blksz), uio)) {
- break;
- }
- continue;
- } else if (error != 0) {
- dmu_tx_commit(tx);
- break;
- }
- tx_bytes -= uio->uio_resid;
- } else {
- tx_bytes = nbytes;
- ASSERT(xuio == NULL || tx_bytes == aiov->iov_len);
- /*
- * If this is not a full block write, but we are
- * extending the file past EOF and this data starts
- * block-aligned, use assign_arcbuf(). Otherwise,
- * write via dmu_write().
- */
- if (tx_bytes < max_blksz && (!write_eof ||
- aiov->iov_base != abuf->b_data)) {
- ASSERT(xuio);
- dmu_write(zfsvfs->z_os, zp->z_id, woff,
- /* cppcheck-suppress nullPointer */
- aiov->iov_len, aiov->iov_base, tx);
- dmu_return_arcbuf(abuf);
- xuio_stat_wbuf_copied();
- } else {
- ASSERT(xuio || tx_bytes == max_blksz);
- error = dmu_assign_arcbuf_by_dbuf(
- sa_get_db(zp->z_sa_hdl), woff, abuf, tx);
- if (error != 0) {
- dmu_return_arcbuf(abuf);
- dmu_tx_commit(tx);
- break;
- }
- }
- ASSERT(tx_bytes <= uio->uio_resid);
- uioskip(uio, tx_bytes);
- }
- if (tx_bytes && zp->z_is_mapped && !(ioflag & O_DIRECT)) {
- update_pages(ip, woff,
- tx_bytes, zfsvfs->z_os, zp->z_id);
- }
-
- /*
- * If we made no progress, we're done. If we made even
- * partial progress, update the znode and ZIL accordingly.
- */
- if (tx_bytes == 0) {
- (void) sa_update(zp->z_sa_hdl, SA_ZPL_SIZE(zfsvfs),
- (void *)&zp->z_size, sizeof (uint64_t), tx);
- dmu_tx_commit(tx);
- ASSERT(error != 0);
- break;
- }
-
- /*
- * Clear Set-UID/Set-GID bits on successful write if not
- * privileged and at least one of the execute bits is set.
- *
- * It would be nice to do this after all writes have
- * been done, but that would still expose the ISUID/ISGID
- * to another app after the partial write is committed.
- *
- * Note: we don't call zfs_fuid_map_id() here because
- * user 0 is not an ephemeral uid.
- */
- mutex_enter(&zp->z_acl_lock);
- uint32_t uid = KUID_TO_SUID(ip->i_uid);
- if ((zp->z_mode & (S_IXUSR | (S_IXUSR >> 3) |
- (S_IXUSR >> 6))) != 0 &&
- (zp->z_mode & (S_ISUID | S_ISGID)) != 0 &&
- secpolicy_vnode_setid_retain(cr,
- ((zp->z_mode & S_ISUID) != 0 && uid == 0)) != 0) {
- uint64_t newmode;
- zp->z_mode &= ~(S_ISUID | S_ISGID);
- ip->i_mode = newmode = zp->z_mode;
- (void) sa_update(zp->z_sa_hdl, SA_ZPL_MODE(zfsvfs),
- (void *)&newmode, sizeof (uint64_t), tx);
- }
- mutex_exit(&zp->z_acl_lock);
-
- zfs_tstamp_update_setup(zp, CONTENT_MODIFIED, mtime, ctime);
-
- /*
- * Update the file size (zp_size) if it has changed;
- * account for possible concurrent updates.
- */
- while ((end_size = zp->z_size) < uio->uio_loffset) {
- (void) atomic_cas_64(&zp->z_size, end_size,
- uio->uio_loffset);
- ASSERT(error == 0);
- }
- /*
- * If we are replaying and eof is non zero then force
- * the file size to the specified eof. Note, there's no
- * concurrency during replay.
- */
- if (zfsvfs->z_replay && zfsvfs->z_replay_eof != 0)
- zp->z_size = zfsvfs->z_replay_eof;
-
- error = sa_bulk_update(zp->z_sa_hdl, bulk, count, tx);
-
- zfs_log_write(zilog, tx, TX_WRITE, zp, woff, tx_bytes, ioflag,
- NULL, NULL);
- dmu_tx_commit(tx);
-
- if (error != 0)
- break;
- ASSERT(tx_bytes == nbytes);
- n -= nbytes;
-
- if (!xuio && n > 0) {
- if (uio_prefaultpages(MIN(n, max_blksz), uio)) {
- error = EFAULT;
- break;
- }
- }
- }
-
- zfs_inode_update(zp);
- zfs_rangelock_exit(lr);
-
- /*
- * If we're in replay mode, or we made no progress, return error.
- * Otherwise, it's at least a partial write, so it's successful.
- */
- if (zfsvfs->z_replay || uio->uio_resid == start_resid) {
- ZFS_EXIT(zfsvfs);
- return (error);
- }
-
- if (ioflag & (O_SYNC | O_DSYNC) ||
- zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
- zil_commit(zilog, zp->z_id);
-
- int64_t nwritten = start_resid - uio->uio_resid;
- dataset_kstats_update_write_kstats(&zfsvfs->z_kstat, nwritten);
- task_io_account_write(nwritten);
-
- ZFS_EXIT(zfsvfs);
- return (0);
-}
-
-/*
* Write the bytes to a file.
*
* IN: zp - znode of file to be written to
@@ -993,37 +355,40 @@ zfs_write(struct inode *ip, uio_t *uio, int ioflag, cred_t *cr)
* OUT: resid - remaining bytes to write
*
* RETURN: 0 if success
- * positive error code if failure
+ * positive error code if failure. EIO is returned
+ * for a short write when residp isn't provided.
*
* Timestamps:
* zp - ctime|mtime updated if byte count > 0
*/
int
zfs_write_simple(znode_t *zp, const void *data, size_t len,
- loff_t pos, size_t *resid)
+ loff_t pos, size_t *residp)
{
- ssize_t written;
- int error = 0;
+ fstrans_cookie_t cookie;
+ int error;
- written = zpl_write_common(ZTOI(zp), data, len, &pos,
- UIO_SYSSPACE, 0, kcred);
- if (written < 0) {
- error = -written;
- } else if (resid == NULL) {
- if (written < len)
- error = SET_ERROR(EIO); /* short write */
- } else {
- *resid = len - written;
+ struct iovec iov;
+ iov.iov_base = (void *)data;
+ iov.iov_len = len;
+
+ uio_t uio;
+ uio_iovec_init(&uio, &iov, 1, pos, UIO_SYSSPACE, len, 0);
+
+ cookie = spl_fstrans_mark();
+ error = zfs_write(zp, &uio, 0, kcred);
+ spl_fstrans_unmark(cookie);
+
+ if (error == 0) {
+ if (residp != NULL)
+ *residp = uio_resid(&uio);
+ else if (uio_resid(&uio) != 0)
+ error = SET_ERROR(EIO);
}
+
return (error);
}
-/*
- * Drop a reference on the passed inode asynchronously. This ensures
- * that the caller will never drop the last reference on an inode in
- * the current context. Doing so while holding open a tx could result
- * in a deadlock if iput_final() re-enters the filesystem code.
- */
void
zfs_zrele_async(znode_t *zp)
{
@@ -1040,179 +405,6 @@ zfs_zrele_async(znode_t *zp)
zrele(zp);
}
-/* ARGSUSED */
-static void
-zfs_get_done(zgd_t *zgd, int error)
-{
- znode_t *zp = zgd->zgd_private;
-
- if (zgd->zgd_db)
- dmu_buf_rele(zgd->zgd_db, zgd);
-
- zfs_rangelock_exit(zgd->zgd_lr);
-
- /*
- * Release the vnode asynchronously as we currently have the
- * txg stopped from syncing.
- */
- zfs_zrele_async(zp);
-
- kmem_free(zgd, sizeof (zgd_t));
-}
-
-#ifdef ZFS_DEBUG
-static int zil_fault_io = 0;
-#endif
-
-/*
- * Get data to generate a TX_WRITE intent log record.
- */
-int
-zfs_get_data(void *arg, lr_write_t *lr, char *buf, struct lwb *lwb, zio_t *zio)
-{
- zfsvfs_t *zfsvfs = arg;
- objset_t *os = zfsvfs->z_os;
- znode_t *zp;
- uint64_t object = lr->lr_foid;
- uint64_t offset = lr->lr_offset;
- uint64_t size = lr->lr_length;
- dmu_buf_t *db;
- zgd_t *zgd;
- int error = 0;
-
- ASSERT3P(lwb, !=, NULL);
- ASSERT3P(zio, !=, NULL);
- ASSERT3U(size, !=, 0);
-
- /*
- * Nothing to do if the file has been removed
- */
- if (zfs_zget(zfsvfs, object, &zp) != 0)
- return (SET_ERROR(ENOENT));
- if (zp->z_unlinked) {
- /*
- * Release the vnode asynchronously as we currently have the
- * txg stopped from syncing.
- */
- zfs_zrele_async(zp);
- return (SET_ERROR(ENOENT));
- }
-
- zgd = kmem_zalloc(sizeof (zgd_t), KM_SLEEP);
- zgd->zgd_lwb = lwb;
- zgd->zgd_private = zp;
-
- /*
- * Write records come in two flavors: immediate and indirect.
- * For small writes it's cheaper to store the data with the
- * log record (immediate); for large writes it's cheaper to
- * sync the data and get a pointer to it (indirect) so that
- * we don't have to write the data twice.
- */
- if (buf != NULL) { /* immediate write */
- zgd->zgd_lr = zfs_rangelock_enter(&zp->z_rangelock,
- offset, size, RL_READER);
- /* test for truncation needs to be done while range locked */
- if (offset >= zp->z_size) {
- error = SET_ERROR(ENOENT);
- } else {
- error = dmu_read(os, object, offset, size, buf,
- DMU_READ_NO_PREFETCH);
- }
- ASSERT(error == 0 || error == ENOENT);
- } else { /* indirect write */
- /*
- * Have to lock the whole block to ensure when it's
- * written out and its checksum is being calculated
- * that no one can change the data. We need to re-check
- * blocksize after we get the lock in case it's changed!
- */
- for (;;) {
- uint64_t blkoff;
- size = zp->z_blksz;
- blkoff = ISP2(size) ? P2PHASE(offset, size) : offset;
- offset -= blkoff;
- zgd->zgd_lr = zfs_rangelock_enter(&zp->z_rangelock,
- offset, size, RL_READER);
- if (zp->z_blksz == size)
- break;
- offset += blkoff;
- zfs_rangelock_exit(zgd->zgd_lr);
- }
- /* test for truncation needs to be done while range locked */
- if (lr->lr_offset >= zp->z_size)
- error = SET_ERROR(ENOENT);
-#ifdef ZFS_DEBUG
- if (zil_fault_io) {
- error = SET_ERROR(EIO);
- zil_fault_io = 0;
- }
-#endif
- if (error == 0)
- error = dmu_buf_hold(os, object, offset, zgd, &db,
- DMU_READ_NO_PREFETCH);
-
- if (error == 0) {
- blkptr_t *bp = &lr->lr_blkptr;
-
- zgd->zgd_db = db;
- zgd->zgd_bp = bp;
-
- ASSERT(db->db_offset == offset);
- ASSERT(db->db_size == size);
-
- error = dmu_sync(zio, lr->lr_common.lrc_txg,
- zfs_get_done, zgd);
- ASSERT(error || lr->lr_length <= size);
-
- /*
- * On success, we need to wait for the write I/O
- * initiated by dmu_sync() to complete before we can
- * release this dbuf. We will finish everything up
- * in the zfs_get_done() callback.
- */
- if (error == 0)
- return (0);
-
- if (error == EALREADY) {
- lr->lr_common.lrc_txtype = TX_WRITE2;
- /*
- * TX_WRITE2 relies on the data previously
- * written by the TX_WRITE that caused
- * EALREADY. We zero out the BP because
- * it is the old, currently-on-disk BP.
- */
- zgd->zgd_bp = NULL;
- BP_ZERO(bp);
- error = 0;
- }
- }
- }
-
- zfs_get_done(zgd, error);
-
- return (error);
-}
-
-/*ARGSUSED*/
-int
-zfs_access(struct inode *ip, int mode, int flag, cred_t *cr)
-{
- znode_t *zp = ITOZ(ip);
- zfsvfs_t *zfsvfs = ITOZSB(ip);
- int error;
-
- ZFS_ENTER(zfsvfs);
- ZFS_VERIFY_ZP(zp);
-
- if (flag & V_ACE_MASK)
- error = zfs_zaccess(zp, mode, flag, B_FALSE, cr);
- else
- error = zfs_zaccess_rwx(zp, mode, flag, cr);
-
- ZFS_EXIT(zfsvfs);
- return (error);
-}
/*
* Lookup an entry in a directory, or an extended attribute directory.
@@ -2440,26 +1632,6 @@ out:
return (error);
}
-ulong_t zfs_fsync_sync_cnt = 4;
-
-int
-zfs_fsync(znode_t *zp, int syncflag, cred_t *cr)
-{
- zfsvfs_t *zfsvfs = ZTOZSB(zp);
-
- (void) tsd_set(zfs_fsyncer_key, (void *)zfs_fsync_sync_cnt);
-
- if (zfsvfs->z_os->os_sync != ZFS_SYNC_DISABLED) {
- ZFS_ENTER(zfsvfs);
- ZFS_VERIFY_ZP(zp);
- zil_commit(zfsvfs->z_log, zp->z_id);
- ZFS_EXIT(zfsvfs);
- }
- tsd_set(zfs_fsyncer_key, NULL);
-
- return (0);
-}
-
/*
* Get the basic file attributes and place them in the provided kstat
* structure. The inode is assumed to be the authoritative source
@@ -4796,207 +3968,9 @@ zfs_fid(struct inode *ip, fid_t *fidp)
return (0);
}
-/*ARGSUSED*/
-int
-zfs_getsecattr(struct inode *ip, vsecattr_t *vsecp, int flag, cred_t *cr)
-{
- znode_t *zp = ITOZ(ip);
- zfsvfs_t *zfsvfs = ITOZSB(ip);
- int error;
- boolean_t skipaclchk = (flag & ATTR_NOACLCHECK) ? B_TRUE : B_FALSE;
-
- ZFS_ENTER(zfsvfs);
- ZFS_VERIFY_ZP(zp);
- error = zfs_getacl(zp, vsecp, skipaclchk, cr);
- ZFS_EXIT(zfsvfs);
-
- return (error);
-}
-
-/*ARGSUSED*/
-int
-zfs_setsecattr(znode_t *zp, vsecattr_t *vsecp, int flag, cred_t *cr)
-{
- zfsvfs_t *zfsvfs = ZTOZSB(zp);
- int error;
- boolean_t skipaclchk = (flag & ATTR_NOACLCHECK) ? B_TRUE : B_FALSE;
- zilog_t *zilog = zfsvfs->z_log;
-
- ZFS_ENTER(zfsvfs);
- ZFS_VERIFY_ZP(zp);
-
- error = zfs_setacl(zp, vsecp, skipaclchk, cr);
-
- if (zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
- zil_commit(zilog, 0);
-
- ZFS_EXIT(zfsvfs);
- return (error);
-}
-
-#ifdef HAVE_UIO_ZEROCOPY
-/*
- * The smallest read we may consider to loan out an arcbuf.
- * This must be a power of 2.
- */
-int zcr_blksz_min = (1 << 10); /* 1K */
-/*
- * If set to less than the file block size, allow loaning out of an
- * arcbuf for a partial block read. This must be a power of 2.
- */
-int zcr_blksz_max = (1 << 17); /* 128K */
-
-/*ARGSUSED*/
-static int
-zfs_reqzcbuf(struct inode *ip, enum uio_rw ioflag, xuio_t *xuio, cred_t *cr)
-{
- znode_t *zp = ITOZ(ip);
- zfsvfs_t *zfsvfs = ITOZSB(ip);
- int max_blksz = zfsvfs->z_max_blksz;
- uio_t *uio = &xuio->xu_uio;
- ssize_t size = uio->uio_resid;
- offset_t offset = uio->uio_loffset;
- int blksz;
- int fullblk, i;
- arc_buf_t *abuf;
- ssize_t maxsize;
- int preamble, postamble;
-
- if (xuio->xu_type != UIOTYPE_ZEROCOPY)
- return (SET_ERROR(EINVAL));
-
- ZFS_ENTER(zfsvfs);
- ZFS_VERIFY_ZP(zp);
- switch (ioflag) {
- case UIO_WRITE:
- /*
- * Loan out an arc_buf for write if write size is bigger than
- * max_blksz, and the file's block size is also max_blksz.
- */
- blksz = max_blksz;
- if (size < blksz || zp->z_blksz != blksz) {
- ZFS_EXIT(zfsvfs);
- return (SET_ERROR(EINVAL));
- }
- /*
- * Caller requests buffers for write before knowing where the
- * write offset might be (e.g. NFS TCP write).
- */
- if (offset == -1) {
- preamble = 0;
- } else {
- preamble = P2PHASE(offset, blksz);
- if (preamble) {
- preamble = blksz - preamble;
- size -= preamble;
- }
- }
-
- postamble = P2PHASE(size, blksz);
- size -= postamble;
-
- fullblk = size / blksz;
- (void) dmu_xuio_init(xuio,
- (preamble != 0) + fullblk + (postamble != 0));
-
- /*
- * Have to fix iov base/len for partial buffers. They
- * currently represent full arc_buf's.
- */
- if (preamble) {
- /* data begins in the middle of the arc_buf */
- abuf = dmu_request_arcbuf(sa_get_db(zp->z_sa_hdl),
- blksz);
- ASSERT(abuf);
- (void) dmu_xuio_add(xuio, abuf,
- blksz - preamble, preamble);
- }
-
- for (i = 0; i < fullblk; i++) {
- abuf = dmu_request_arcbuf(sa_get_db(zp->z_sa_hdl),
- blksz);
- ASSERT(abuf);
- (void) dmu_xuio_add(xuio, abuf, 0, blksz);
- }
-
- if (postamble) {
- /* data ends in the middle of the arc_buf */
- abuf = dmu_request_arcbuf(sa_get_db(zp->z_sa_hdl),
- blksz);
- ASSERT(abuf);
- (void) dmu_xuio_add(xuio, abuf, 0, postamble);
- }
- break;
- case UIO_READ:
- /*
- * Loan out an arc_buf for read if the read size is larger than
- * the current file block size. Block alignment is not
- * considered. Partial arc_buf will be loaned out for read.
- */
- blksz = zp->z_blksz;
- if (blksz < zcr_blksz_min)
- blksz = zcr_blksz_min;
- if (blksz > zcr_blksz_max)
- blksz = zcr_blksz_max;
- /* avoid potential complexity of dealing with it */
- if (blksz > max_blksz) {
- ZFS_EXIT(zfsvfs);
- return (SET_ERROR(EINVAL));
- }
-
- maxsize = zp->z_size - uio->uio_loffset;
- if (size > maxsize)
- size = maxsize;
-
- if (size < blksz) {
- ZFS_EXIT(zfsvfs);
- return (SET_ERROR(EINVAL));
- }
- break;
- default:
- ZFS_EXIT(zfsvfs);
- return (SET_ERROR(EINVAL));
- }
-
- uio->uio_extflg = UIO_XUIO;
- XUIO_XUZC_RW(xuio) = ioflag;
- ZFS_EXIT(zfsvfs);
- return (0);
-}
-
-/*ARGSUSED*/
-static int
-zfs_retzcbuf(struct inode *ip, xuio_t *xuio, cred_t *cr)
-{
- int i;
- arc_buf_t *abuf;
- int ioflag = XUIO_XUZC_RW(xuio);
-
- ASSERT(xuio->xu_type == UIOTYPE_ZEROCOPY);
-
- i = dmu_xuio_cnt(xuio);
- while (i-- > 0) {
- abuf = dmu_xuio_arcbuf(xuio, i);
- /*
- * if abuf == NULL, it must be a write buffer
- * that has been returned in zfs_write().
- */
- if (abuf)
- dmu_return_arcbuf(abuf);
- ASSERT(abuf || ioflag == UIO_WRITE);
- }
-
- dmu_xuio_fini(xuio);
- return (0);
-}
-#endif /* HAVE_UIO_ZEROCOPY */
-
#if defined(_KERNEL)
EXPORT_SYMBOL(zfs_open);
EXPORT_SYMBOL(zfs_close);
-EXPORT_SYMBOL(zfs_read);
-EXPORT_SYMBOL(zfs_write);
-EXPORT_SYMBOL(zfs_access);
EXPORT_SYMBOL(zfs_lookup);
EXPORT_SYMBOL(zfs_create);
EXPORT_SYMBOL(zfs_tmpfile);
@@ -5004,7 +3978,6 @@ EXPORT_SYMBOL(zfs_remove);
EXPORT_SYMBOL(zfs_mkdir);
EXPORT_SYMBOL(zfs_rmdir);
EXPORT_SYMBOL(zfs_readdir);
-EXPORT_SYMBOL(zfs_fsync);
EXPORT_SYMBOL(zfs_getattr_fast);
EXPORT_SYMBOL(zfs_setattr);
EXPORT_SYMBOL(zfs_rename);
@@ -5014,8 +3987,6 @@ EXPORT_SYMBOL(zfs_link);
EXPORT_SYMBOL(zfs_inactive);
EXPORT_SYMBOL(zfs_space);
EXPORT_SYMBOL(zfs_fid);
-EXPORT_SYMBOL(zfs_getsecattr);
-EXPORT_SYMBOL(zfs_setsecattr);
EXPORT_SYMBOL(zfs_getpage);
EXPORT_SYMBOL(zfs_putpage);
EXPORT_SYMBOL(zfs_dirty_inode);
@@ -5024,8 +3995,6 @@ EXPORT_SYMBOL(zfs_map);
/* BEGIN CSTYLED */
module_param(zfs_delete_blocks, ulong, 0644);
MODULE_PARM_DESC(zfs_delete_blocks, "Delete files larger than N blocks async");
-module_param(zfs_read_chunk_size, ulong, 0644);
-MODULE_PARM_DESC(zfs_read_chunk_size, "Bytes to read per chunk");
/* END CSTYLED */
#endif
diff --git a/module/os/linux/zfs/zfs_znode.c b/module/os/linux/zfs/zfs_znode.c
index a542c662cb15..b33594488ee0 100644
--- a/module/os/linux/zfs/zfs_znode.c
+++ b/module/os/linux/zfs/zfs_znode.c
@@ -134,7 +134,6 @@ zfs_znode_cache_constructor(void *buf, void *arg, int kmflags)
zp->z_acl_cached = NULL;
zp->z_xattr_cached = NULL;
zp->z_xattr_parent = 0;
- zp->z_moved = B_FALSE;
return (0);
}
@@ -505,6 +504,7 @@ zfs_inode_update(znode_t *zp)
dmu_object_size_from_db(sa_get_db(zp->z_sa_hdl), &blksize, &i_blocks);
spin_lock(&ip->i_lock);
+ ip->i_mode = zp->z_mode;
ip->i_blocks = i_blocks;
i_size_write(ip, zp->z_size);
spin_unlock(&ip->i_lock);
@@ -546,7 +546,6 @@ zfs_znode_alloc(zfsvfs_t *zfsvfs, dmu_buf_t *db, int blksz,
ASSERT3P(zp->z_xattr_cached, ==, NULL);
zp->z_unlinked = B_FALSE;
zp->z_atime_dirty = B_FALSE;
- zp->z_moved = B_FALSE;
zp->z_is_mapped = B_FALSE;
zp->z_is_ctldir = B_FALSE;
zp->z_is_stale = B_FALSE;
@@ -619,7 +618,6 @@ zfs_znode_alloc(zfsvfs_t *zfsvfs, dmu_buf_t *db, int blksz,
mutex_enter(&zfsvfs->z_znodes_lock);
list_insert_tail(&zfsvfs->z_all_znodes, zp);
zfsvfs->z_nr_znodes++;
- membar_producer();
mutex_exit(&zfsvfs->z_znodes_lock);
unlock_new_inode(ip);
@@ -1901,7 +1899,6 @@ zfs_create_fs(objset_t *os, cred_t *cr, nvlist_t *zplprops, dmu_tx_t *tx)
rootzp = kmem_cache_alloc(znode_cache, KM_SLEEP);
rootzp->z_unlinked = B_FALSE;
rootzp->z_atime_dirty = B_FALSE;
- rootzp->z_moved = B_FALSE;
rootzp->z_is_sa = USE_SA(version, os);
rootzp->z_pflags = 0;
diff --git a/module/os/linux/zfs/zio_crypt.c b/module/os/linux/zfs/zio_crypt.c
index 96dabe55a138..8106359e1c77 100644
--- a/module/os/linux/zfs/zio_crypt.c
+++ b/module/os/linux/zfs/zio_crypt.c
@@ -1198,6 +1198,16 @@ zio_crypt_do_objset_hmacs(zio_crypt_key_t *key, void *data, uint_t datalen,
bcopy(raw_portable_mac, portable_mac, ZIO_OBJSET_MAC_LEN);
/*
+ * This is necessary here as we check next whether
+ * OBJSET_FLAG_USERACCOUNTING_COMPLETE or
+ * OBJSET_FLAG_USEROBJACCOUNTING are set in order to
+ * decide if the local_mac should be zeroed out.
+ */
+ intval = osp->os_flags;
+ if (should_bswap)
+ intval = BSWAP_64(intval);
+
+ /*
* The local MAC protects the user, group and project accounting.
* If these objects are not present, the local MAC is zeroed out.
*/
@@ -1208,7 +1218,10 @@ zio_crypt_do_objset_hmacs(zio_crypt_key_t *key, void *data, uint_t datalen,
(datalen >= OBJSET_PHYS_SIZE_V2 &&
osp->os_userused_dnode.dn_type == DMU_OT_NONE &&
osp->os_groupused_dnode.dn_type == DMU_OT_NONE) ||
- (datalen <= OBJSET_PHYS_SIZE_V1)) {
+ (datalen <= OBJSET_PHYS_SIZE_V1) ||
+ (((intval & OBJSET_FLAG_USERACCOUNTING_COMPLETE) == 0 ||
+ (intval & OBJSET_FLAG_USEROBJACCOUNTING_COMPLETE) == 0) &&
+ key->zk_version > 0)) {
bzero(local_mac, ZIO_OBJSET_MAC_LEN);
return (0);
}
diff --git a/module/os/linux/zfs/zpl_ctldir.c b/module/os/linux/zfs/zpl_ctldir.c
index fa4500f6f8d1..e6420f19ed87 100644
--- a/module/os/linux/zfs/zpl_ctldir.c
+++ b/module/os/linux/zfs/zpl_ctldir.c
@@ -55,7 +55,7 @@ zpl_root_iterate(struct file *filp, zpl_dir_context_t *ctx)
zfsvfs_t *zfsvfs = ITOZSB(file_inode(filp));
int error = 0;
- ZFS_ENTER(zfsvfs);
+ ZPL_ENTER(zfsvfs);
if (!zpl_dir_emit_dots(filp, ctx))
goto out;
@@ -76,7 +76,7 @@ zpl_root_iterate(struct file *filp, zpl_dir_context_t *ctx)
ctx->pos++;
}
out:
- ZFS_EXIT(zfsvfs);
+ ZPL_EXIT(zfsvfs);
return (error);
}
@@ -242,13 +242,14 @@ zpl_snapdir_iterate(struct file *filp, zpl_dir_context_t *ctx)
uint64_t id, pos;
int error = 0;
- ZFS_ENTER(zfsvfs);
+ ZPL_ENTER(zfsvfs);
cookie = spl_fstrans_mark();
if (!zpl_dir_emit_dots(filp, ctx))
goto out;
- pos = ctx->pos;
+ /* Start the position at 0 if it already emitted . and .. */
+ pos = (ctx->pos == 2 ? 0 : ctx->pos);
while (error == 0) {
dsl_pool_config_enter(dmu_objset_pool(zfsvfs->z_os), FTAG);
error = -dmu_snapshot_list_next(zfsvfs->z_os, MAXNAMELEN,
@@ -265,7 +266,7 @@ zpl_snapdir_iterate(struct file *filp, zpl_dir_context_t *ctx)
}
out:
spl_fstrans_unmark(cookie);
- ZFS_EXIT(zfsvfs);
+ ZPL_EXIT(zfsvfs);
if (error == -ENOENT)
return (0);
@@ -368,13 +369,13 @@ zpl_snapdir_getattr_impl(const struct path *path, struct kstat *stat,
struct inode *ip = path->dentry->d_inode;
zfsvfs_t *zfsvfs = ITOZSB(ip);
- ZFS_ENTER(zfsvfs);
+ ZPL_ENTER(zfsvfs);
generic_fillattr(ip, stat);
stat->nlink = stat->size = 2;
stat->ctime = stat->mtime = dmu_objset_snap_cmtime(zfsvfs->z_os);
stat->atime = current_time(ip);
- ZFS_EXIT(zfsvfs);
+ ZPL_EXIT(zfsvfs);
return (0);
}
@@ -452,7 +453,7 @@ zpl_shares_iterate(struct file *filp, zpl_dir_context_t *ctx)
znode_t *dzp;
int error = 0;
- ZFS_ENTER(zfsvfs);
+ ZPL_ENTER(zfsvfs);
cookie = spl_fstrans_mark();
if (zfsvfs->z_shares_dir == 0) {
@@ -471,7 +472,7 @@ zpl_shares_iterate(struct file *filp, zpl_dir_context_t *ctx)
iput(ZTOI(dzp));
out:
spl_fstrans_unmark(cookie);
- ZFS_EXIT(zfsvfs);
+ ZPL_EXIT(zfsvfs);
ASSERT3S(error, <=, 0);
return (error);
@@ -502,13 +503,13 @@ zpl_shares_getattr_impl(const struct path *path, struct kstat *stat,
znode_t *dzp;
int error;
- ZFS_ENTER(zfsvfs);
+ ZPL_ENTER(zfsvfs);
if (zfsvfs->z_shares_dir == 0) {
generic_fillattr(path->dentry->d_inode, stat);
stat->nlink = stat->size = 2;
stat->atime = current_time(ip);
- ZFS_EXIT(zfsvfs);
+ ZPL_EXIT(zfsvfs);
return (0);
}
@@ -518,7 +519,7 @@ zpl_shares_getattr_impl(const struct path *path, struct kstat *stat,
iput(ZTOI(dzp));
}
- ZFS_EXIT(zfsvfs);
+ ZPL_EXIT(zfsvfs);
ASSERT3S(error, <=, 0);
return (error);
diff --git a/module/os/linux/zfs/zpl_file.c b/module/os/linux/zfs/zpl_file.c
index 51e189a87272..9e08c94e2147 100644
--- a/module/os/linux/zfs/zpl_file.c
+++ b/module/os/linux/zfs/zpl_file.c
@@ -212,244 +212,221 @@ zfs_io_flags(struct kiocb *kiocb)
return (flags);
}
-static ssize_t
-zpl_read_common_iovec(struct inode *ip, const struct iovec *iovp, size_t count,
- unsigned long nr_segs, loff_t *ppos, uio_seg_t segment, int flags,
- cred_t *cr, size_t skip)
+/*
+ * If relatime is enabled, call file_accessed() if zfs_relatime_need_update()
+ * is true. This is needed since datasets with inherited "relatime" property
+ * aren't necessarily mounted with the MNT_RELATIME flag (e.g. after
+ * `zfs set relatime=...`), which is what relatime test in VFS by
+ * relatime_need_update() is based on.
+ */
+static inline void
+zpl_file_accessed(struct file *filp)
{
- ssize_t read;
- uio_t uio = { { 0 }, 0 };
- int error;
- fstrans_cookie_t cookie;
-
- uio.uio_iov = iovp;
- uio.uio_iovcnt = nr_segs;
- uio.uio_loffset = *ppos;
- uio.uio_segflg = segment;
- uio.uio_limit = MAXOFFSET_T;
- uio.uio_resid = count;
- uio.uio_skip = skip;
-
- cookie = spl_fstrans_mark();
- error = -zfs_read(ip, &uio, flags, cr);
- spl_fstrans_unmark(cookie);
- if (error < 0)
- return (error);
-
- read = count - uio.uio_resid;
- *ppos += read;
+ struct inode *ip = filp->f_mapping->host;
- return (read);
+ if (!IS_NOATIME(ip) && ITOZSB(ip)->z_relatime) {
+ if (zfs_relatime_need_update(ip))
+ file_accessed(filp);
+ } else {
+ file_accessed(filp);
+ }
}
-inline ssize_t
-zpl_read_common(struct inode *ip, const char *buf, size_t len, loff_t *ppos,
- uio_seg_t segment, int flags, cred_t *cr)
-{
- struct iovec iov;
-
- iov.iov_base = (void *)buf;
- iov.iov_len = len;
+#if defined(HAVE_VFS_RW_ITERATE)
- return (zpl_read_common_iovec(ip, &iov, len, 1, ppos, segment,
- flags, cr, 0));
+/*
+ * When HAVE_VFS_IOV_ITER is defined the iov_iter structure supports
+ * iovecs, kvevs, bvecs and pipes, plus all the required interfaces to
+ * manipulate the iov_iter are available. In which case the full iov_iter
+ * can be attached to the uio and correctly handled in the lower layers.
+ * Otherwise, for older kernels extract the iovec and pass it instead.
+ */
+static void
+zpl_uio_init(uio_t *uio, struct kiocb *kiocb, struct iov_iter *to,
+ loff_t pos, ssize_t count, size_t skip)
+{
+#if defined(HAVE_VFS_IOV_ITER)
+ uio_iov_iter_init(uio, to, pos, count, skip);
+#else
+ uio_iovec_init(uio, to->iov, to->nr_segs, pos,
+ to->type & ITER_KVEC ? UIO_SYSSPACE : UIO_USERSPACE,
+ count, skip);
+#endif
}
static ssize_t
-zpl_iter_read_common(struct kiocb *kiocb, const struct iovec *iovp,
- unsigned long nr_segs, size_t count, uio_seg_t seg, size_t skip)
+zpl_iter_read(struct kiocb *kiocb, struct iov_iter *to)
{
cred_t *cr = CRED();
+ fstrans_cookie_t cookie;
struct file *filp = kiocb->ki_filp;
- struct inode *ip = filp->f_mapping->host;
- zfsvfs_t *zfsvfs = ZTOZSB(ITOZ(ip));
- ssize_t read;
- unsigned int f_flags = filp->f_flags;
+ ssize_t count = iov_iter_count(to);
+ uio_t uio;
+
+ zpl_uio_init(&uio, kiocb, to, kiocb->ki_pos, count, 0);
- f_flags |= zfs_io_flags(kiocb);
crhold(cr);
- read = zpl_read_common_iovec(filp->f_mapping->host, iovp, count,
- nr_segs, &kiocb->ki_pos, seg, f_flags, cr, skip);
+ cookie = spl_fstrans_mark();
+
+ int error = -zfs_read(ITOZ(filp->f_mapping->host), &uio,
+ filp->f_flags | zfs_io_flags(kiocb), cr);
+
+ spl_fstrans_unmark(cookie);
crfree(cr);
- /*
- * If relatime is enabled, call file_accessed() only if
- * zfs_relatime_need_update() is true. This is needed since datasets
- * with inherited "relatime" property aren't necessarily mounted with
- * MNT_RELATIME flag (e.g. after `zfs set relatime=...`), which is what
- * relatime test in VFS by relatime_need_update() is based on.
- */
- if (!IS_NOATIME(ip) && zfsvfs->z_relatime) {
- if (zfs_relatime_need_update(ip))
- file_accessed(filp);
- } else {
- file_accessed(filp);
- }
+ if (error < 0)
+ return (error);
+
+ ssize_t read = count - uio.uio_resid;
+ kiocb->ki_pos += read;
+
+ zpl_file_accessed(filp);
return (read);
}
-#if defined(HAVE_VFS_RW_ITERATE)
-static ssize_t
-zpl_iter_read(struct kiocb *kiocb, struct iov_iter *to)
+static inline ssize_t
+zpl_generic_write_checks(struct kiocb *kiocb, struct iov_iter *from,
+ size_t *countp)
{
- ssize_t ret;
- uio_seg_t seg = UIO_USERSPACE;
- if (to->type & ITER_KVEC)
- seg = UIO_SYSSPACE;
- if (to->type & ITER_BVEC)
- seg = UIO_BVEC;
- ret = zpl_iter_read_common(kiocb, to->iov, to->nr_segs,
- iov_iter_count(to), seg, to->iov_offset);
- if (ret > 0)
- iov_iter_advance(to, ret);
- return (ret);
-}
+#ifdef HAVE_GENERIC_WRITE_CHECKS_KIOCB
+ ssize_t ret = generic_write_checks(kiocb, from);
+ if (ret <= 0)
+ return (ret);
+
+ *countp = ret;
#else
-static ssize_t
-zpl_aio_read(struct kiocb *kiocb, const struct iovec *iovp,
- unsigned long nr_segs, loff_t pos)
-{
- ssize_t ret;
- size_t count;
+ struct file *file = kiocb->ki_filp;
+ struct address_space *mapping = file->f_mapping;
+ struct inode *ip = mapping->host;
+ int isblk = S_ISBLK(ip->i_mode);
- ret = generic_segment_checks(iovp, &nr_segs, &count, VERIFY_WRITE);
+ *countp = iov_iter_count(from);
+ ssize_t ret = generic_write_checks(file, &kiocb->ki_pos, countp, isblk);
if (ret)
return (ret);
+#endif
- return (zpl_iter_read_common(kiocb, iovp, nr_segs, count,
- UIO_USERSPACE, 0));
+ return (0);
}
-#endif /* HAVE_VFS_RW_ITERATE */
static ssize_t
-zpl_write_common_iovec(struct inode *ip, const struct iovec *iovp, size_t count,
- unsigned long nr_segs, loff_t *ppos, uio_seg_t segment, int flags,
- cred_t *cr, size_t skip)
+zpl_iter_write(struct kiocb *kiocb, struct iov_iter *from)
{
- ssize_t wrote;
- uio_t uio = { { 0 }, 0 };
- int error;
+ cred_t *cr = CRED();
fstrans_cookie_t cookie;
+ struct file *filp = kiocb->ki_filp;
+ struct inode *ip = filp->f_mapping->host;
+ uio_t uio;
+ size_t count = 0;
+ ssize_t ret;
- if (flags & O_APPEND)
- *ppos = i_size_read(ip);
+ ret = zpl_generic_write_checks(kiocb, from, &count);
+ if (ret)
+ return (ret);
- uio.uio_iov = iovp;
- uio.uio_iovcnt = nr_segs;
- uio.uio_loffset = *ppos;
- uio.uio_segflg = segment;
- uio.uio_limit = MAXOFFSET_T;
- uio.uio_resid = count;
- uio.uio_skip = skip;
+ zpl_uio_init(&uio, kiocb, from, kiocb->ki_pos, count, from->iov_offset);
+ crhold(cr);
cookie = spl_fstrans_mark();
- error = -zfs_write(ip, &uio, flags, cr);
+
+ int error = -zfs_write(ITOZ(ip), &uio,
+ filp->f_flags | zfs_io_flags(kiocb), cr);
+
spl_fstrans_unmark(cookie);
+ crfree(cr);
+
if (error < 0)
return (error);
- wrote = count - uio.uio_resid;
- *ppos += wrote;
+ ssize_t wrote = count - uio.uio_resid;
+ kiocb->ki_pos += wrote;
+
+ if (wrote > 0)
+ iov_iter_advance(from, wrote);
return (wrote);
}
-inline ssize_t
-zpl_write_common(struct inode *ip, const char *buf, size_t len, loff_t *ppos,
- uio_seg_t segment, int flags, cred_t *cr)
-{
- struct iovec iov;
-
- iov.iov_base = (void *)buf;
- iov.iov_len = len;
-
- return (zpl_write_common_iovec(ip, &iov, len, 1, ppos, segment,
- flags, cr, 0));
-}
+#else /* !HAVE_VFS_RW_ITERATE */
static ssize_t
-zpl_iter_write_common(struct kiocb *kiocb, const struct iovec *iovp,
- unsigned long nr_segs, size_t count, uio_seg_t seg, size_t skip)
+zpl_aio_read(struct kiocb *kiocb, const struct iovec *iov,
+ unsigned long nr_segs, loff_t pos)
{
cred_t *cr = CRED();
+ fstrans_cookie_t cookie;
struct file *filp = kiocb->ki_filp;
- ssize_t wrote;
- unsigned int f_flags = filp->f_flags;
-
- f_flags |= zfs_io_flags(kiocb);
- crhold(cr);
- wrote = zpl_write_common_iovec(filp->f_mapping->host, iovp, count,
- nr_segs, &kiocb->ki_pos, seg, f_flags, cr, skip);
- crfree(cr);
-
- return (wrote);
-}
-
-#if defined(HAVE_VFS_RW_ITERATE)
-static ssize_t
-zpl_iter_write(struct kiocb *kiocb, struct iov_iter *from)
-{
size_t count;
ssize_t ret;
- uio_seg_t seg = UIO_USERSPACE;
-#ifndef HAVE_GENERIC_WRITE_CHECKS_KIOCB
- struct file *file = kiocb->ki_filp;
- struct address_space *mapping = file->f_mapping;
- struct inode *ip = mapping->host;
- int isblk = S_ISBLK(ip->i_mode);
-
- count = iov_iter_count(from);
- ret = generic_write_checks(file, &kiocb->ki_pos, &count, isblk);
+ ret = generic_segment_checks(iov, &nr_segs, &count, VERIFY_WRITE);
if (ret)
return (ret);
-#else
- /*
- * XXX - ideally this check should be in the same lock region with
- * write operations, so that there's no TOCTTOU race when doing
- * append and someone else grow the file.
- */
- ret = generic_write_checks(kiocb, from);
- if (ret <= 0)
- return (ret);
- count = ret;
-#endif
- if (from->type & ITER_KVEC)
- seg = UIO_SYSSPACE;
- if (from->type & ITER_BVEC)
- seg = UIO_BVEC;
+ uio_t uio;
+ uio_iovec_init(&uio, iov, nr_segs, kiocb->ki_pos, UIO_USERSPACE,
+ count, 0);
- ret = zpl_iter_write_common(kiocb, from->iov, from->nr_segs,
- count, seg, from->iov_offset);
- if (ret > 0)
- iov_iter_advance(from, ret);
+ crhold(cr);
+ cookie = spl_fstrans_mark();
+
+ int error = -zfs_read(ITOZ(filp->f_mapping->host), &uio,
+ filp->f_flags | zfs_io_flags(kiocb), cr);
+
+ spl_fstrans_unmark(cookie);
+ crfree(cr);
+
+ if (error < 0)
+ return (error);
- return (ret);
+ ssize_t read = count - uio.uio_resid;
+ kiocb->ki_pos += read;
+
+ zpl_file_accessed(filp);
+
+ return (read);
}
-#else
+
static ssize_t
-zpl_aio_write(struct kiocb *kiocb, const struct iovec *iovp,
+zpl_aio_write(struct kiocb *kiocb, const struct iovec *iov,
unsigned long nr_segs, loff_t pos)
{
- struct file *file = kiocb->ki_filp;
- struct address_space *mapping = file->f_mapping;
- struct inode *ip = mapping->host;
- int isblk = S_ISBLK(ip->i_mode);
+ cred_t *cr = CRED();
+ fstrans_cookie_t cookie;
+ struct file *filp = kiocb->ki_filp;
+ struct inode *ip = filp->f_mapping->host;
size_t count;
ssize_t ret;
- ret = generic_segment_checks(iovp, &nr_segs, &count, VERIFY_READ);
+ ret = generic_segment_checks(iov, &nr_segs, &count, VERIFY_READ);
if (ret)
return (ret);
- ret = generic_write_checks(file, &pos, &count, isblk);
+ ret = generic_write_checks(filp, &pos, &count, S_ISBLK(ip->i_mode));
if (ret)
return (ret);
- return (zpl_iter_write_common(kiocb, iovp, nr_segs, count,
- UIO_USERSPACE, 0));
+ uio_t uio;
+ uio_iovec_init(&uio, iov, nr_segs, kiocb->ki_pos, UIO_USERSPACE,
+ count, 0);
+
+ crhold(cr);
+ cookie = spl_fstrans_mark();
+
+ int error = -zfs_write(ITOZ(ip), &uio,
+ filp->f_flags | zfs_io_flags(kiocb), cr);
+
+ spl_fstrans_unmark(cookie);
+ crfree(cr);
+
+ if (error < 0)
+ return (error);
+
+ ssize_t wrote = count - uio.uio_resid;
+ kiocb->ki_pos += wrote;
+
+ return (wrote);
}
#endif /* HAVE_VFS_RW_ITERATE */
@@ -486,14 +463,27 @@ zpl_direct_IO(int rw, struct kiocb *kiocb, struct iov_iter *iter, loff_t pos)
#error "Unknown direct IO interface"
#endif
-#else
+#else /* HAVE_VFS_RW_ITERATE */
#if defined(HAVE_VFS_DIRECT_IO_IOVEC)
static ssize_t
-zpl_direct_IO(int rw, struct kiocb *kiocb, const struct iovec *iovp,
+zpl_direct_IO(int rw, struct kiocb *kiocb, const struct iovec *iov,
loff_t pos, unsigned long nr_segs)
{
if (rw == WRITE)
+ return (zpl_aio_write(kiocb, iov, nr_segs, pos));
+ else
+ return (zpl_aio_read(kiocb, iov, nr_segs, pos));
+}
+#elif defined(HAVE_VFS_DIRECT_IO_ITER_RW_OFFSET)
+static ssize_t
+zpl_direct_IO(int rw, struct kiocb *kiocb, struct iov_iter *iter, loff_t pos)
+{
+ const struct iovec *iovp = iov_iter_iovec(iter);
+ unsigned long nr_segs = iter->nr_segs;
+
+ ASSERT3S(pos, ==, kiocb->ki_pos);
+ if (rw == WRITE)
return (zpl_aio_write(kiocb, iovp, nr_segs, pos));
else
return (zpl_aio_read(kiocb, iovp, nr_segs, pos));
@@ -517,7 +507,7 @@ zpl_llseek(struct file *filp, loff_t offset, int whence)
spl_inode_lock_shared(ip);
cookie = spl_fstrans_mark();
- error = -zfs_holey(ip, whence, &offset);
+ error = -zfs_holey(ITOZ(ip), whence, &offset);
spl_fstrans_unmark(cookie);
if (error == 0)
error = lseek_execute(filp, ip, offset, maxbytes);
@@ -603,10 +593,6 @@ zpl_mmap(struct file *filp, struct vm_area_struct *vma)
* Populate a page with data for the Linux page cache. This function is
* only used to support mmap(2). There will be an identical copy of the
* data in the ARC which is kept up to date via .write() and .writepage().
- *
- * Current this function relies on zpl_read_common() and the O_DIRECT
- * flag to read in a page. This works but the more correct way is to
- * update zfs_fillpage() to be Linux friendly and use that interface.
*/
static int
zpl_readpage(struct file *filp, struct page *pp)
@@ -675,10 +661,10 @@ zpl_writepages(struct address_space *mapping, struct writeback_control *wbc)
enum writeback_sync_modes sync_mode;
int result;
- ZFS_ENTER(zfsvfs);
+ ZPL_ENTER(zfsvfs);
if (zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
wbc->sync_mode = WB_SYNC_ALL;
- ZFS_EXIT(zfsvfs);
+ ZPL_EXIT(zfsvfs);
sync_mode = wbc->sync_mode;
/*
@@ -691,11 +677,11 @@ zpl_writepages(struct address_space *mapping, struct writeback_control *wbc)
wbc->sync_mode = WB_SYNC_NONE;
result = write_cache_pages(mapping, wbc, zpl_putpage, mapping);
if (sync_mode != wbc->sync_mode) {
- ZFS_ENTER(zfsvfs);
- ZFS_VERIFY_ZP(zp);
+ ZPL_ENTER(zfsvfs);
+ ZPL_VERIFY_ZP(zp);
if (zfsvfs->z_log != NULL)
zil_commit(zfsvfs->z_log, zp->z_id);
- ZFS_EXIT(zfsvfs);
+ ZPL_EXIT(zfsvfs);
/*
* We need to call write_cache_pages() again (we can't just
@@ -1037,6 +1023,10 @@ const struct file_operations zpl_file_operations = {
#endif
.read_iter = zpl_iter_read,
.write_iter = zpl_iter_write,
+#ifdef HAVE_VFS_IOV_ITER
+ .splice_read = generic_file_splice_read,
+ .splice_write = iter_file_splice_write,
+#endif
#else
.read = do_sync_read,
.write = do_sync_write,
diff --git a/module/os/linux/zfs/zpl_inode.c b/module/os/linux/zfs/zpl_inode.c
index f3b97a22074c..f336fbb1272b 100644
--- a/module/os/linux/zfs/zpl_inode.c
+++ b/module/os/linux/zfs/zpl_inode.c
@@ -490,19 +490,17 @@ zpl_get_link_common(struct dentry *dentry, struct inode *ip, char **link)
{
fstrans_cookie_t cookie;
cred_t *cr = CRED();
- struct iovec iov;
- uio_t uio = { { 0 }, 0 };
int error;
crhold(cr);
*link = NULL;
+
+ struct iovec iov;
iov.iov_len = MAXPATHLEN;
iov.iov_base = kmem_zalloc(MAXPATHLEN, KM_SLEEP);
- uio.uio_iov = &iov;
- uio.uio_iovcnt = 1;
- uio.uio_segflg = UIO_SYSSPACE;
- uio.uio_resid = (MAXPATHLEN - 1);
+ uio_t uio;
+ uio_iovec_init(&uio, &iov, 1, 0, UIO_SYSSPACE, MAXPATHLEN - 1, 0);
cookie = spl_fstrans_mark();
error = -zfs_readlink(ip, &uio, cr);
diff --git a/module/os/linux/zfs/zpl_super.c b/module/os/linux/zfs/zpl_super.c
index 9db8bda4cc66..c2fd3fee1401 100644
--- a/module/os/linux/zfs/zpl_super.c
+++ b/module/os/linux/zfs/zpl_super.c
@@ -185,14 +185,27 @@ zpl_remount_fs(struct super_block *sb, int *flags, char *data)
static int
__zpl_show_devname(struct seq_file *seq, zfsvfs_t *zfsvfs)
{
- char *fsname;
+ ZPL_ENTER(zfsvfs);
- ZFS_ENTER(zfsvfs);
- fsname = kmem_alloc(ZFS_MAX_DATASET_NAME_LEN, KM_SLEEP);
+ char *fsname = kmem_alloc(ZFS_MAX_DATASET_NAME_LEN, KM_SLEEP);
dmu_objset_name(zfsvfs->z_os, fsname);
- seq_puts(seq, fsname);
+
+ for (int i = 0; fsname[i] != 0; i++) {
+ /*
+ * Spaces in the dataset name must be converted to their
+ * octal escape sequence for getmntent(3) to correctly
+ * parse then fsname portion of /proc/self/mounts.
+ */
+ if (fsname[i] == ' ') {
+ seq_puts(seq, "\\040");
+ } else {
+ seq_putc(seq, fsname[i]);
+ }
+ }
+
kmem_free(fsname, ZFS_MAX_DATASET_NAME_LEN);
- ZFS_EXIT(zfsvfs);
+
+ ZPL_EXIT(zfsvfs);
return (0);
}
diff --git a/module/os/linux/zfs/zpl_xattr.c b/module/os/linux/zfs/zpl_xattr.c
index 9b5fd0fd397b..1ec3dae2bb81 100644
--- a/module/os/linux/zfs/zpl_xattr.c
+++ b/module/os/linux/zfs/zpl_xattr.c
@@ -274,10 +274,10 @@ static int
zpl_xattr_get_dir(struct inode *ip, const char *name, void *value,
size_t size, cred_t *cr)
{
+ fstrans_cookie_t cookie;
struct inode *xip = NULL;
znode_t *dxzp = NULL;
znode_t *xzp = NULL;
- loff_t pos = 0;
int error;
/* Lookup the xattr directory */
@@ -302,7 +302,19 @@ zpl_xattr_get_dir(struct inode *ip, const char *name, void *value,
goto out;
}
- error = zpl_read_common(xip, value, size, &pos, UIO_SYSSPACE, 0, cr);
+ struct iovec iov;
+ iov.iov_base = (void *)value;
+ iov.iov_len = size;
+
+ uio_t uio;
+ uio_iovec_init(&uio, &iov, 1, 0, UIO_SYSSPACE, size, 0);
+
+ cookie = spl_fstrans_mark();
+ error = -zfs_read(ITOZ(xip), &uio, 0, cr);
+ spl_fstrans_unmark(cookie);
+
+ if (error == 0)
+ error = size - uio_resid(&uio);
out:
if (xzp)
zrele(xzp);
@@ -441,7 +453,6 @@ zpl_xattr_set_dir(struct inode *ip, const char *name, const void *value,
znode_t *dxzp = NULL;
znode_t *xzp = NULL;
vattr_t *vap = NULL;
- ssize_t wrote;
int lookup_flags, error;
const int xattr_mode = S_IFREG | 0644;
loff_t pos = 0;
@@ -496,13 +507,8 @@ zpl_xattr_set_dir(struct inode *ip, const char *name, const void *value,
if (error)
goto out;
- wrote = zpl_write_common(ZTOI(xzp), value, size, &pos,
- UIO_SYSSPACE, 0, cr);
- if (wrote < 0)
- error = wrote;
-
+ error = -zfs_write_simple(xzp, value, size, pos, NULL);
out:
-
if (error == 0) {
ip->i_ctime = current_time(ip);
zfs_mark_inode_dirty(ip);
diff --git a/module/os/linux/zfs/zvol_os.c b/module/os/linux/zfs/zvol_os.c
index 218e1101edf8..cdc2076702af 100644
--- a/module/os/linux/zfs/zvol_os.c
+++ b/module/os/linux/zfs/zvol_os.c
@@ -66,49 +66,33 @@ typedef struct zv_request {
* Given a path, return TRUE if path is a ZVOL.
*/
static boolean_t
-zvol_is_zvol_impl(const char *device)
+zvol_is_zvol_impl(const char *path)
{
- struct block_device *bdev;
- unsigned int major;
+ dev_t dev = 0;
- bdev = vdev_lookup_bdev(device);
- if (IS_ERR(bdev))
+ if (vdev_lookup_bdev(path, &dev) != 0)
return (B_FALSE);
- major = MAJOR(bdev->bd_dev);
- bdput(bdev);
-
- if (major == zvol_major)
+ if (MAJOR(dev) == zvol_major)
return (B_TRUE);
return (B_FALSE);
}
static void
-uio_from_bio(uio_t *uio, struct bio *bio)
-{
- uio->uio_bvec = &bio->bi_io_vec[BIO_BI_IDX(bio)];
- uio->uio_iovcnt = bio->bi_vcnt - BIO_BI_IDX(bio);
- uio->uio_loffset = BIO_BI_SECTOR(bio) << 9;
- uio->uio_segflg = UIO_BVEC;
- uio->uio_limit = MAXOFFSET_T;
- uio->uio_resid = BIO_BI_SIZE(bio);
- uio->uio_skip = BIO_BI_SKIP(bio);
-}
-
-static void
zvol_write(void *arg)
{
- int error = 0;
-
zv_request_t *zvr = arg;
struct bio *bio = zvr->bio;
- uio_t uio = { { 0 }, 0 };
- uio_from_bio(&uio, bio);
+ int error = 0;
+ uio_t uio;
+
+ uio_bvec_init(&uio, bio);
zvol_state_t *zv = zvr->zv;
- ASSERT(zv && zv->zv_open_count > 0);
- ASSERT(zv->zv_zilog != NULL);
+ ASSERT3P(zv, !=, NULL);
+ ASSERT3U(zv->zv_open_count, >, 0);
+ ASSERT3P(zv->zv_zilog, !=, NULL);
/* bio marked as FLUSH need to flush before write */
if (bio_is_flush(bio))
@@ -122,10 +106,14 @@ zvol_write(void *arg)
return;
}
+ struct request_queue *q = zv->zv_zso->zvo_queue;
+ struct gendisk *disk = zv->zv_zso->zvo_disk;
ssize_t start_resid = uio.uio_resid;
- unsigned long start_jif = jiffies;
- blk_generic_start_io_acct(zv->zv_zso->zvo_queue, WRITE,
- bio_sectors(bio), &zv->zv_zso->zvo_disk->part0);
+ unsigned long start_time;
+
+ boolean_t acct = blk_queue_io_stat(q);
+ if (acct)
+ start_time = blk_generic_start_io_acct(q, disk, WRITE, bio);
boolean_t sync =
bio_is_fua(bio) || zv->zv_objset->os_sync == ZFS_SYNC_ALWAYS;
@@ -169,8 +157,10 @@ zvol_write(void *arg)
zil_commit(zv->zv_zilog, ZVOL_OBJ);
rw_exit(&zv->zv_suspend_lock);
- blk_generic_end_io_acct(zv->zv_zso->zvo_queue,
- WRITE, &zv->zv_zso->zvo_disk->part0, start_jif);
+
+ if (acct)
+ blk_generic_end_io_acct(q, disk, WRITE, bio, start_time);
+
BIO_END_IO(bio, -error);
kmem_free(zvr, sizeof (zv_request_t));
}
@@ -187,14 +177,18 @@ zvol_discard(void *arg)
boolean_t sync;
int error = 0;
dmu_tx_t *tx;
- unsigned long start_jif;
- ASSERT(zv && zv->zv_open_count > 0);
- ASSERT(zv->zv_zilog != NULL);
+ ASSERT3P(zv, !=, NULL);
+ ASSERT3U(zv->zv_open_count, >, 0);
+ ASSERT3P(zv->zv_zilog, !=, NULL);
+
+ struct request_queue *q = zv->zv_zso->zvo_queue;
+ struct gendisk *disk = zv->zv_zso->zvo_disk;
+ unsigned long start_time;
- start_jif = jiffies;
- blk_generic_start_io_acct(zv->zv_zso->zvo_queue, WRITE,
- bio_sectors(bio), &zv->zv_zso->zvo_disk->part0);
+ boolean_t acct = blk_queue_io_stat(q);
+ if (acct)
+ start_time = blk_generic_start_io_acct(q, disk, WRITE, bio);
sync = bio_is_fua(bio) || zv->zv_objset->os_sync == ZFS_SYNC_ALWAYS;
@@ -239,8 +233,10 @@ zvol_discard(void *arg)
unlock:
rw_exit(&zv->zv_suspend_lock);
- blk_generic_end_io_acct(zv->zv_zso->zvo_queue, WRITE,
- &zv->zv_zso->zvo_disk->part0, start_jif);
+
+ if (acct)
+ blk_generic_end_io_acct(q, disk, WRITE, bio, start_time);
+
BIO_END_IO(bio, -error);
kmem_free(zvr, sizeof (zv_request_t));
}
@@ -248,20 +244,25 @@ unlock:
static void
zvol_read(void *arg)
{
- int error = 0;
-
zv_request_t *zvr = arg;
struct bio *bio = zvr->bio;
- uio_t uio = { { 0 }, 0 };
- uio_from_bio(&uio, bio);
+ int error = 0;
+ uio_t uio;
+
+ uio_bvec_init(&uio, bio);
zvol_state_t *zv = zvr->zv;
- ASSERT(zv && zv->zv_open_count > 0);
+ ASSERT3P(zv, !=, NULL);
+ ASSERT3U(zv->zv_open_count, >, 0);
+ struct request_queue *q = zv->zv_zso->zvo_queue;
+ struct gendisk *disk = zv->zv_zso->zvo_disk;
ssize_t start_resid = uio.uio_resid;
- unsigned long start_jif = jiffies;
- blk_generic_start_io_acct(zv->zv_zso->zvo_queue, READ, bio_sectors(bio),
- &zv->zv_zso->zvo_disk->part0);
+ unsigned long start_time;
+
+ boolean_t acct = blk_queue_io_stat(q);
+ if (acct)
+ start_time = blk_generic_start_io_acct(q, disk, READ, bio);
zfs_locked_range_t *lr = zfs_rangelock_enter(&zv->zv_rangelock,
uio.uio_loffset, uio.uio_resid, RL_READER);
@@ -289,8 +290,10 @@ zvol_read(void *arg)
task_io_account_read(nread);
rw_exit(&zv->zv_suspend_lock);
- blk_generic_end_io_acct(zv->zv_zso->zvo_queue, READ,
- &zv->zv_zso->zvo_disk->part0, start_jif);
+
+ if (acct)
+ blk_generic_end_io_acct(q, disk, READ, bio, start_time);
+
BIO_END_IO(bio, -error);
kmem_free(zvr, sizeof (zv_request_t));
}
@@ -482,9 +485,9 @@ zvol_open(struct block_device *bdev, fmode_t flag)
rw_exit(&zvol_state_lock);
ASSERT(MUTEX_HELD(&zv->zv_state_lock));
- ASSERT(zv->zv_open_count != 0 || RW_READ_HELD(&zv->zv_suspend_lock));
if (zv->zv_open_count == 0) {
+ ASSERT(RW_READ_HELD(&zv->zv_suspend_lock));
error = -zvol_first_open(zv, !(flag & FMODE_WRITE));
if (error)
goto out_mutex;
@@ -501,7 +504,7 @@ zvol_open(struct block_device *bdev, fmode_t flag)
if (drop_suspend)
rw_exit(&zv->zv_suspend_lock);
- check_disk_change(bdev);
+ zfs_check_media_change(bdev);
return (0);
@@ -530,7 +533,7 @@ zvol_release(struct gendisk *disk, fmode_t mode)
zv = disk->private_data;
mutex_enter(&zv->zv_state_lock);
- ASSERT(zv->zv_open_count > 0);
+ ASSERT3U(zv->zv_open_count, >, 0);
/*
* make sure zvol is not suspended during last close
* (hold zv_suspend_lock) and respect proper lock acquisition
@@ -553,11 +556,12 @@ zvol_release(struct gendisk *disk, fmode_t mode)
rw_exit(&zvol_state_lock);
ASSERT(MUTEX_HELD(&zv->zv_state_lock));
- ASSERT(zv->zv_open_count != 1 || RW_READ_HELD(&zv->zv_suspend_lock));
zv->zv_open_count--;
- if (zv->zv_open_count == 0)
+ if (zv->zv_open_count == 0) {
+ ASSERT(RW_READ_HELD(&zv->zv_suspend_lock));
zvol_last_close(zv);
+ }
mutex_exit(&zv->zv_state_lock);
@@ -652,8 +656,15 @@ zvol_revalidate_disk(struct gendisk *disk)
static int
zvol_update_volsize(zvol_state_t *zv, uint64_t volsize)
{
+ struct gendisk *disk = zv->zv_zso->zvo_disk;
- revalidate_disk(zv->zv_zso->zvo_disk);
+#if defined(HAVE_REVALIDATE_DISK_SIZE)
+ revalidate_disk_size(disk, zvol_revalidate_disk(disk) == 0);
+#elif defined(HAVE_REVALIDATE_DISK)
+ revalidate_disk(disk);
+#else
+ zvol_revalidate_disk(disk);
+#endif
return (0);
}
@@ -697,46 +708,6 @@ zvol_getgeo(struct block_device *bdev, struct hd_geometry *geo)
return (0);
}
-/*
- * Find a zvol_state_t given the full major+minor dev_t. If found,
- * return with zv_state_lock taken, otherwise, return (NULL) without
- * taking zv_state_lock.
- */
-static zvol_state_t *
-zvol_find_by_dev(dev_t dev)
-{
- zvol_state_t *zv;
-
- rw_enter(&zvol_state_lock, RW_READER);
- for (zv = list_head(&zvol_state_list); zv != NULL;
- zv = list_next(&zvol_state_list, zv)) {
- mutex_enter(&zv->zv_state_lock);
- if (zv->zv_zso->zvo_dev == dev) {
- rw_exit(&zvol_state_lock);
- return (zv);
- }
- mutex_exit(&zv->zv_state_lock);
- }
- rw_exit(&zvol_state_lock);
-
- return (NULL);
-}
-
-static struct kobject *
-zvol_probe(dev_t dev, int *part, void *arg)
-{
- zvol_state_t *zv;
- struct kobject *kobj;
-
- zv = zvol_find_by_dev(dev);
- kobj = zv ? get_disk_and_module(zv->zv_zso->zvo_disk) : NULL;
- ASSERT(zv == NULL || MUTEX_HELD(&zv->zv_state_lock));
- if (zv)
- mutex_exit(&zv->zv_state_lock);
-
- return (kobj);
-}
-
static struct block_device_operations zvol_ops = {
.open = zvol_open,
.release = zvol_release,
@@ -774,6 +745,7 @@ zvol_alloc(dev_t dev, const char *name)
zv = kmem_zalloc(sizeof (zvol_state_t), KM_SLEEP);
zso = kmem_zalloc(sizeof (struct zvol_state_os), KM_SLEEP);
zv->zv_zso = zso;
+ zv->zv_volmode = volmode;
list_link_init(&zv->zv_next);
mutex_init(&zv->zv_state_lock, NULL, MUTEX_DEFAULT, NULL);
@@ -859,8 +831,8 @@ zvol_free(zvol_state_t *zv)
ASSERT(!RW_LOCK_HELD(&zv->zv_suspend_lock));
ASSERT(!MUTEX_HELD(&zv->zv_state_lock));
- ASSERT(zv->zv_open_count == 0);
- ASSERT(zv->zv_zso->zvo_disk->private_data == NULL);
+ ASSERT0(zv->zv_open_count);
+ ASSERT3P(zv->zv_zso->zvo_disk->private_data, ==, NULL);
rw_destroy(&zv->zv_suspend_lock);
zfs_rangelock_fini(&zv->zv_rangelock);
@@ -879,6 +851,11 @@ zvol_free(zvol_state_t *zv)
kmem_free(zv, sizeof (zvol_state_t));
}
+void
+zvol_wait_close(zvol_state_t *zv)
+{
+}
+
/*
* Create a block device minor node and setup the linkage between it
* and the specified volume. Once this function returns the block
@@ -1083,9 +1060,6 @@ zvol_init(void)
return (-ENOMEM);
}
zvol_init_impl();
- blk_register_region(MKDEV(zvol_major, 0), 1UL << MINORBITS,
- THIS_MODULE, zvol_probe, NULL, NULL);
-
ida_init(&zvol_ida);
zvol_register_ops(&zvol_linux_ops);
return (0);
@@ -1095,7 +1069,6 @@ void
zvol_fini(void)
{
zvol_fini_impl();
- blk_unregister_region(MKDEV(zvol_major, 0), 1UL << MINORBITS);
unregister_blkdev(zvol_major, ZVOL_DRIVER);
taskq_destroy(zvol_taskq);
ida_destroy(&zvol_ida);