aboutsummaryrefslogtreecommitdiff
path: root/module
diff options
context:
space:
mode:
Diffstat (limited to 'module')
-rw-r--r--module/icp/algs/aes/aes_impl.c12
-rw-r--r--module/icp/algs/edonr/edonr.c40
-rw-r--r--module/icp/algs/modes/cbc.c30
-rw-r--r--module/icp/algs/modes/ccm.c102
-rw-r--r--module/icp/algs/modes/ctr.c21
-rw-r--r--module/icp/algs/modes/ecb.c14
-rw-r--r--module/icp/algs/modes/gcm.c86
-rw-r--r--module/icp/algs/modes/modes.c2
-rw-r--r--module/icp/algs/sha2/sha2.c18
-rw-r--r--module/icp/algs/skein/skein.c136
-rw-r--r--module/icp/algs/skein/skein_port.h4
-rw-r--r--module/icp/api/kcf_ctxops.c2
-rw-r--r--module/icp/core/kcf_mech_tabs.c3
-rw-r--r--module/icp/core/kcf_prov_lib.c6
-rw-r--r--module/icp/io/aes.c23
-rw-r--r--module/icp/io/sha2_mod.c50
-rw-r--r--module/icp/io/skein_mod.c26
-rw-r--r--module/nvpair/nvpair.c50
-rw-r--r--module/os/freebsd/spl/acl_common.c2
-rw-r--r--module/os/freebsd/spl/sha256c.c4
-rw-r--r--module/os/freebsd/spl/sha512c.c8
-rw-r--r--module/os/freebsd/spl/spl_acl.c8
-rw-r--r--module/os/freebsd/spl/spl_vfs.c2
-rw-r--r--module/os/freebsd/spl/spl_zlib.c7
-rw-r--r--module/os/freebsd/spl/spl_zone.c4
-rw-r--r--module/os/freebsd/zfs/abd_os.c2
-rw-r--r--module/os/freebsd/zfs/crypto_os.c32
-rw-r--r--module/os/freebsd/zfs/dmu_os.c12
-rw-r--r--module/os/freebsd/zfs/hkdf.c2
-rw-r--r--module/os/freebsd/zfs/zfs_acl.c24
-rw-r--r--module/os/freebsd/zfs/zfs_ctldir.c4
-rw-r--r--module/os/freebsd/zfs/zfs_vnops_os.c6
-rw-r--r--module/os/freebsd/zfs/zfs_znode.c2
-rw-r--r--module/os/freebsd/zfs/zio_crypt.c148
-rw-r--r--module/os/linux/spl/spl-generic.c1
-rw-r--r--module/os/linux/zfs/qat_crypt.c10
-rw-r--r--module/os/linux/zfs/zfs_acl.c24
-rw-r--r--module/os/linux/zfs/zfs_dir.c4
-rw-r--r--module/os/linux/zfs/zfs_uio.c10
-rw-r--r--module/os/linux/zfs/zfs_vfsops.c2
-rw-r--r--module/os/linux/zfs/zfs_znode.c6
-rw-r--r--module/os/linux/zfs/zio_crypt.c132
-rw-r--r--module/zcommon/zfs_fletcher.c4
-rw-r--r--module/zcommon/zfs_fletcher_aarch64_neon.c2
-rw-r--r--module/zcommon/zfs_fletcher_avx512.c2
-rw-r--r--module/zcommon/zfs_fletcher_intel.c2
-rw-r--r--module/zcommon/zfs_fletcher_sse.c2
-rw-r--r--module/zcommon/zfs_fletcher_superscalar.c2
-rw-r--r--module/zcommon/zfs_fletcher_superscalar4.c2
-rw-r--r--module/zfs/aggsum.c2
-rw-r--r--module/zfs/arc.c75
-rw-r--r--module/zfs/blkptr.c2
-rw-r--r--module/zfs/bpobj.c6
-rw-r--r--module/zfs/btree.c2
-rw-r--r--module/zfs/dataset_kstats.c2
-rw-r--r--module/zfs/dbuf.c40
-rw-r--r--module/zfs/ddt.c30
-rw-r--r--module/zfs/dmu.c8
-rw-r--r--module/zfs/dmu_objset.c6
-rw-r--r--module/zfs/dmu_recv.c19
-rw-r--r--module/zfs/dmu_send.c26
-rw-r--r--module/zfs/dmu_traverse.c4
-rw-r--r--module/zfs/dnode.c52
-rw-r--r--module/zfs/dnode_sync.c14
-rw-r--r--module/zfs/dsl_bookmark.c10
-rw-r--r--module/zfs/dsl_crypt.c31
-rw-r--r--module/zfs/dsl_dataset.c14
-rw-r--r--module/zfs/dsl_deadlist.c2
-rw-r--r--module/zfs/dsl_scan.c28
-rw-r--r--module/zfs/edonr_zfs.c8
-rw-r--r--module/zfs/gzip.c4
-rw-r--r--module/zfs/hkdf.c2
-rw-r--r--module/zfs/metaslab.c14
-rw-r--r--module/zfs/range_tree.c8
-rw-r--r--module/zfs/sa.c17
-rw-r--r--module/zfs/skein_zfs.c13
-rw-r--r--module/zfs/spa.c6
-rw-r--r--module/zfs/spa_checkpoint.c2
-rw-r--r--module/zfs/spa_misc.c5
-rw-r--r--module/zfs/space_map.c5
-rw-r--r--module/zfs/txg.c4
-rw-r--r--module/zfs/vdev.c6
-rw-r--r--module/zfs/vdev_draid.c2
-rw-r--r--module/zfs/vdev_indirect.c6
-rw-r--r--module/zfs/vdev_indirect_births.c2
-rw-r--r--module/zfs/vdev_indirect_mapping.c4
-rw-r--r--module/zfs/vdev_label.c2
-rw-r--r--module/zfs/vdev_raidz.c4
-rw-r--r--module/zfs/vdev_rebuild.c10
-rw-r--r--module/zfs/vdev_trim.c12
-rw-r--r--module/zfs/zap.c2
-rw-r--r--module/zfs/zap_leaf.c9
-rw-r--r--module/zfs/zap_micro.c6
-rw-r--r--module/zfs/zfs_fm.c2
-rw-r--r--module/zfs/zfs_log.c34
-rw-r--r--module/zfs/zfs_replay.c15
-rw-r--r--module/zfs/zfs_sa.c11
-rw-r--r--module/zfs/zfs_vnops.c3
-rw-r--r--module/zfs/zil.c33
-rw-r--r--module/zfs/zio.c14
-rw-r--r--module/zfs/zio_checksum.c2
101 files changed, 859 insertions, 878 deletions
diff --git a/module/icp/algs/aes/aes_impl.c b/module/icp/algs/aes/aes_impl.c
index c238bee2170b..f518a54a6185 100644
--- a/module/icp/algs/aes/aes_impl.c
+++ b/module/icp/algs/aes/aes_impl.c
@@ -47,7 +47,7 @@ aes_init_keysched(const uint8_t *cipherKey, uint_t keyBits, void *keysched)
union {
uint64_t ka64[4];
uint32_t ka32[8];
- } keyarr;
+ } keyarr;
switch (keyBits) {
case 128:
@@ -81,7 +81,7 @@ aes_init_keysched(const uint8_t *cipherKey, uint_t keyBits, void *keysched)
keyarr.ka64[i] = *((uint64_t *)&cipherKey[j]);
}
} else {
- bcopy(cipherKey, keyarr.ka32, keysize);
+ memcpy(keyarr.ka32, cipherKey, keysize);
}
} else {
/* byte swap */
@@ -132,7 +132,7 @@ aes_encrypt_block(const void *ks, const uint8_t *pt, uint8_t *ct)
buffer[2] = htonl(*(uint32_t *)(void *)&pt[8]);
buffer[3] = htonl(*(uint32_t *)(void *)&pt[12]);
} else
- bcopy(pt, &buffer, AES_BLOCK_LEN);
+ memcpy(&buffer, pt, AES_BLOCK_LEN);
ops->encrypt(&ksch->encr_ks.ks32[0], ksch->nr, buffer, buffer);
@@ -143,7 +143,7 @@ aes_encrypt_block(const void *ks, const uint8_t *pt, uint8_t *ct)
*(uint32_t *)(void *)&ct[8] = htonl(buffer[2]);
*(uint32_t *)(void *)&ct[12] = htonl(buffer[3]);
} else
- bcopy(&buffer, ct, AES_BLOCK_LEN);
+ memcpy(ct, &buffer, AES_BLOCK_LEN);
}
return (CRYPTO_SUCCESS);
}
@@ -179,7 +179,7 @@ aes_decrypt_block(const void *ks, const uint8_t *ct, uint8_t *pt)
buffer[2] = htonl(*(uint32_t *)(void *)&ct[8]);
buffer[3] = htonl(*(uint32_t *)(void *)&ct[12]);
} else
- bcopy(ct, &buffer, AES_BLOCK_LEN);
+ memcpy(&buffer, ct, AES_BLOCK_LEN);
ops->decrypt(&ksch->decr_ks.ks32[0], ksch->nr, buffer, buffer);
@@ -190,7 +190,7 @@ aes_decrypt_block(const void *ks, const uint8_t *ct, uint8_t *pt)
*(uint32_t *)(void *)&pt[8] = htonl(buffer[2]);
*(uint32_t *)(void *)&pt[12] = htonl(buffer[3]);
} else
- bcopy(&buffer, pt, AES_BLOCK_LEN);
+ memcpy(pt, &buffer, AES_BLOCK_LEN);
}
return (CRYPTO_SUCCESS);
}
diff --git a/module/icp/algs/edonr/edonr.c b/module/icp/algs/edonr/edonr.c
index 20418eaa73cf..dcf63fc18b20 100644
--- a/module/icp/algs/edonr/edonr.c
+++ b/module/icp/algs/edonr/edonr.c
@@ -470,32 +470,32 @@ EdonRInit(EdonRState *state, size_t hashbitlen)
state->hashbitlen = 224;
state->bits_processed = 0;
state->unprocessed_bits = 0;
- bcopy(i224p2, hashState224(state)->DoublePipe,
- 16 * sizeof (uint32_t));
+ memcpy(hashState224(state)->DoublePipe, i224p2,
+ sizeof (i224p2));
break;
case 256:
state->hashbitlen = 256;
state->bits_processed = 0;
state->unprocessed_bits = 0;
- bcopy(i256p2, hashState256(state)->DoublePipe,
- 16 * sizeof (uint32_t));
+ memcpy(hashState256(state)->DoublePipe, i256p2,
+ sizeof (i256p2));
break;
case 384:
state->hashbitlen = 384;
state->bits_processed = 0;
state->unprocessed_bits = 0;
- bcopy(i384p2, hashState384(state)->DoublePipe,
- 16 * sizeof (uint64_t));
+ memcpy(hashState384(state)->DoublePipe, i384p2,
+ sizeof (i384p2));
break;
case 512:
state->hashbitlen = 512;
state->bits_processed = 0;
state->unprocessed_bits = 0;
- bcopy(i512p2, hashState224(state)->DoublePipe,
- 16 * sizeof (uint64_t));
+ memcpy(hashState224(state)->DoublePipe, i512p2,
+ sizeof (i512p2));
break;
}
}
@@ -520,8 +520,9 @@ EdonRUpdate(EdonRState *state, const uint8_t *data, size_t databitlen)
ASSERT(state->unprocessed_bits + databitlen <=
EdonR256_BLOCK_SIZE * 8);
- bcopy(data, hashState256(state)->LastPart
- + (state->unprocessed_bits >> 3), LastBytes);
+ memcpy(hashState256(state)->LastPart
+ + (state->unprocessed_bits >> 3),
+ data, LastBytes);
state->unprocessed_bits += (int)databitlen;
databitlen = state->unprocessed_bits;
/* LINTED E_BAD_PTR_CAST_ALIGN */
@@ -542,7 +543,8 @@ EdonRUpdate(EdonRState *state, const uint8_t *data, size_t databitlen)
1) & 0x01ff;
data32 += bits_processed >> 5; /* byte size update */
- bcopy(data32, hashState256(state)->LastPart, LastBytes);
+ memmove(hashState256(state)->LastPart,
+ data32, LastBytes);
}
break;
@@ -555,8 +557,9 @@ EdonRUpdate(EdonRState *state, const uint8_t *data, size_t databitlen)
ASSERT(state->unprocessed_bits + databitlen <=
EdonR512_BLOCK_SIZE * 8);
- bcopy(data, hashState512(state)->LastPart
- + (state->unprocessed_bits >> 3), LastBytes);
+ memcpy(hashState512(state)->LastPart
+ + (state->unprocessed_bits >> 3),
+ data, LastBytes);
state->unprocessed_bits += (int)databitlen;
databitlen = state->unprocessed_bits;
/* LINTED E_BAD_PTR_CAST_ALIGN */
@@ -577,7 +580,8 @@ EdonRUpdate(EdonRState *state, const uint8_t *data, size_t databitlen)
1) & 0x03ff;
data64 += bits_processed >> 6; /* byte size update */
- bcopy(data64, hashState512(state)->LastPart, LastBytes);
+ memmove(hashState512(state)->LastPart,
+ data64, LastBytes);
}
break;
}
@@ -682,7 +686,7 @@ EdonRFinal(EdonRState *state, uint8_t *hashval)
for (j = 0; j < EdonR224_DIGEST_SIZE >> 2; j++)
st_swap32(s32[j], d32 + j);
#else
- bcopy(hashState256(state)->DoublePipe + 9, hashval,
+ memcpy(hashval, hashState256(state)->DoublePipe + 9,
EdonR224_DIGEST_SIZE);
#endif
break;
@@ -696,7 +700,7 @@ EdonRFinal(EdonRState *state, uint8_t *hashval)
for (j = 0; j < EdonR256_DIGEST_SIZE >> 2; j++)
st_swap32(s32[j], d32 + j);
#else
- bcopy(hashState256(state)->DoublePipe + 8, hashval,
+ memcpy(hashval, hashState256(state)->DoublePipe + 8,
EdonR256_DIGEST_SIZE);
#endif
break;
@@ -710,7 +714,7 @@ EdonRFinal(EdonRState *state, uint8_t *hashval)
for (j = 0; j < EdonR384_DIGEST_SIZE >> 3; j++)
st_swap64(s64[j], d64 + j);
#else
- bcopy(hashState384(state)->DoublePipe + 10, hashval,
+ memcpy(hashval, hashState384(state)->DoublePipe + 10,
EdonR384_DIGEST_SIZE);
#endif
break;
@@ -724,7 +728,7 @@ EdonRFinal(EdonRState *state, uint8_t *hashval)
for (j = 0; j < EdonR512_DIGEST_SIZE >> 3; j++)
st_swap64(s64[j], d64 + j);
#else
- bcopy(hashState512(state)->DoublePipe + 8, hashval,
+ memcpy(hashval, hashState512(state)->DoublePipe + 8,
EdonR512_DIGEST_SIZE);
#endif
break;
diff --git a/module/icp/algs/modes/cbc.c b/module/icp/algs/modes/cbc.c
index 73605f04d858..da3ff4e3595b 100644
--- a/module/icp/algs/modes/cbc.c
+++ b/module/icp/algs/modes/cbc.c
@@ -51,8 +51,8 @@ cbc_encrypt_contiguous_blocks(cbc_ctx_t *ctx, char *data, size_t length,
if (length + ctx->cbc_remainder_len < block_size) {
/* accumulate bytes here and return */
- bcopy(datap,
- (uint8_t *)ctx->cbc_remainder + ctx->cbc_remainder_len,
+ memcpy((uint8_t *)ctx->cbc_remainder + ctx->cbc_remainder_len,
+ datap,
length);
ctx->cbc_remainder_len += length;
ctx->cbc_copy_to = datap;
@@ -70,8 +70,8 @@ cbc_encrypt_contiguous_blocks(cbc_ctx_t *ctx, char *data, size_t length,
if (need > remainder)
return (CRYPTO_DATA_LEN_RANGE);
- bcopy(datap, &((uint8_t *)ctx->cbc_remainder)
- [ctx->cbc_remainder_len], need);
+ memcpy(&((uint8_t *)ctx->cbc_remainder)
+ [ctx->cbc_remainder_len], datap, need);
blockp = (uint8_t *)ctx->cbc_remainder;
} else {
@@ -91,10 +91,10 @@ cbc_encrypt_contiguous_blocks(cbc_ctx_t *ctx, char *data, size_t length,
if (out_data_1_len == block_size) {
copy_block(lastp, out_data_1);
} else {
- bcopy(lastp, out_data_1, out_data_1_len);
+ memcpy(out_data_1, lastp, out_data_1_len);
if (out_data_2 != NULL) {
- bcopy(lastp + out_data_1_len,
- out_data_2,
+ memcpy(out_data_2,
+ lastp + out_data_1_len,
block_size - out_data_1_len);
}
}
@@ -113,7 +113,7 @@ cbc_encrypt_contiguous_blocks(cbc_ctx_t *ctx, char *data, size_t length,
/* Incomplete last block. */
if (remainder > 0 && remainder < block_size) {
- bcopy(datap, ctx->cbc_remainder, remainder);
+ memcpy(ctx->cbc_remainder, datap, remainder);
ctx->cbc_remainder_len = remainder;
ctx->cbc_copy_to = datap;
goto out;
@@ -157,8 +157,8 @@ cbc_decrypt_contiguous_blocks(cbc_ctx_t *ctx, char *data, size_t length,
if (length + ctx->cbc_remainder_len < block_size) {
/* accumulate bytes here and return */
- bcopy(datap,
- (uint8_t *)ctx->cbc_remainder + ctx->cbc_remainder_len,
+ memcpy((uint8_t *)ctx->cbc_remainder + ctx->cbc_remainder_len,
+ datap,
length);
ctx->cbc_remainder_len += length;
ctx->cbc_copy_to = datap;
@@ -176,8 +176,8 @@ cbc_decrypt_contiguous_blocks(cbc_ctx_t *ctx, char *data, size_t length,
if (need > remainder)
return (CRYPTO_ENCRYPTED_DATA_LEN_RANGE);
- bcopy(datap, &((uint8_t *)ctx->cbc_remainder)
- [ctx->cbc_remainder_len], need);
+ memcpy(&((uint8_t *)ctx->cbc_remainder)
+ [ctx->cbc_remainder_len], datap, need);
blockp = (uint8_t *)ctx->cbc_remainder;
} else {
@@ -203,9 +203,9 @@ cbc_decrypt_contiguous_blocks(cbc_ctx_t *ctx, char *data, size_t length,
crypto_get_ptrs(out, &iov_or_mp, &offset, &out_data_1,
&out_data_1_len, &out_data_2, block_size);
- bcopy(blockp, out_data_1, out_data_1_len);
+ memcpy(out_data_1, blockp, out_data_1_len);
if (out_data_2 != NULL) {
- bcopy(blockp + out_data_1_len, out_data_2,
+ memcpy(out_data_2, blockp + out_data_1_len,
block_size - out_data_1_len);
}
@@ -224,7 +224,7 @@ cbc_decrypt_contiguous_blocks(cbc_ctx_t *ctx, char *data, size_t length,
/* Incomplete last block. */
if (remainder > 0 && remainder < block_size) {
- bcopy(datap, ctx->cbc_remainder, remainder);
+ memcpy(ctx->cbc_remainder, datap, remainder);
ctx->cbc_remainder_len = remainder;
ctx->cbc_lastp = lastp;
ctx->cbc_copy_to = datap;
diff --git a/module/icp/algs/modes/ccm.c b/module/icp/algs/modes/ccm.c
index a41cbc395fd6..9fde2684a7c4 100644
--- a/module/icp/algs/modes/ccm.c
+++ b/module/icp/algs/modes/ccm.c
@@ -59,8 +59,8 @@ ccm_mode_encrypt_contiguous_blocks(ccm_ctx_t *ctx, char *data, size_t length,
if (length + ctx->ccm_remainder_len < block_size) {
/* accumulate bytes here and return */
- bcopy(datap,
- (uint8_t *)ctx->ccm_remainder + ctx->ccm_remainder_len,
+ memcpy((uint8_t *)ctx->ccm_remainder + ctx->ccm_remainder_len,
+ datap,
length);
ctx->ccm_remainder_len += length;
ctx->ccm_copy_to = datap;
@@ -80,8 +80,8 @@ ccm_mode_encrypt_contiguous_blocks(ccm_ctx_t *ctx, char *data, size_t length,
if (need > remainder)
return (CRYPTO_DATA_LEN_RANGE);
- bcopy(datap, &((uint8_t *)ctx->ccm_remainder)
- [ctx->ccm_remainder_len], need);
+ memcpy(&((uint8_t *)ctx->ccm_remainder)
+ [ctx->ccm_remainder_len], datap, need);
blockp = (uint8_t *)ctx->ccm_remainder;
} else {
@@ -132,10 +132,10 @@ ccm_mode_encrypt_contiguous_blocks(ccm_ctx_t *ctx, char *data, size_t length,
if (out_data_1_len == block_size) {
copy_block(lastp, out_data_1);
} else {
- bcopy(lastp, out_data_1, out_data_1_len);
+ memcpy(out_data_1, lastp, out_data_1_len);
if (out_data_2 != NULL) {
- bcopy(lastp + out_data_1_len,
- out_data_2,
+ memcpy(out_data_2,
+ lastp + out_data_1_len,
block_size - out_data_1_len);
}
}
@@ -154,7 +154,7 @@ ccm_mode_encrypt_contiguous_blocks(ccm_ctx_t *ctx, char *data, size_t length,
/* Incomplete last block. */
if (remainder > 0 && remainder < block_size) {
- bcopy(datap, ctx->ccm_remainder, remainder);
+ memcpy(ctx->ccm_remainder, datap, remainder);
ctx->ccm_remainder_len = remainder;
ctx->ccm_copy_to = datap;
goto out;
@@ -224,10 +224,10 @@ ccm_encrypt_final(ccm_ctx_t *ctx, crypto_data_t *out, size_t block_size,
/* ccm_mac_input_buf is not used for encryption */
macp = (uint8_t *)ctx->ccm_mac_input_buf;
- bzero(macp, block_size);
+ memset(macp, 0, block_size);
/* copy remainder to temporary buffer */
- bcopy(ctx->ccm_remainder, macp, ctx->ccm_remainder_len);
+ memcpy(macp, ctx->ccm_remainder, ctx->ccm_remainder_len);
/* calculate the CBC MAC */
xor_block(macp, mac_buf);
@@ -254,33 +254,32 @@ ccm_encrypt_final(ccm_ctx_t *ctx, crypto_data_t *out, size_t block_size,
ctx->ccm_remainder_len + ctx->ccm_mac_len);
if (ctx->ccm_remainder_len > 0) {
-
/* copy temporary block to where it belongs */
if (out_data_2 == NULL) {
/* everything will fit in out_data_1 */
- bcopy(macp, out_data_1, ctx->ccm_remainder_len);
- bcopy(ccm_mac_p, out_data_1 + ctx->ccm_remainder_len,
+ memcpy(out_data_1, macp, ctx->ccm_remainder_len);
+ memcpy(out_data_1 + ctx->ccm_remainder_len, ccm_mac_p,
ctx->ccm_mac_len);
} else {
-
if (out_data_1_len < ctx->ccm_remainder_len) {
-
size_t data_2_len_used;
- bcopy(macp, out_data_1, out_data_1_len);
+ memcpy(out_data_1, macp, out_data_1_len);
data_2_len_used = ctx->ccm_remainder_len
- out_data_1_len;
- bcopy((uint8_t *)macp + out_data_1_len,
- out_data_2, data_2_len_used);
- bcopy(ccm_mac_p, out_data_2 + data_2_len_used,
+ memcpy(out_data_2,
+ (uint8_t *)macp + out_data_1_len,
+ data_2_len_used);
+ memcpy(out_data_2 + data_2_len_used,
+ ccm_mac_p,
ctx->ccm_mac_len);
} else {
- bcopy(macp, out_data_1, out_data_1_len);
+ memcpy(out_data_1, macp, out_data_1_len);
if (out_data_1_len == ctx->ccm_remainder_len) {
/* mac will be in out_data_2 */
- bcopy(ccm_mac_p, out_data_2,
+ memcpy(out_data_2, ccm_mac_p,
ctx->ccm_mac_len);
} else {
size_t len_not_used = out_data_1_len -
@@ -290,11 +289,11 @@ ccm_encrypt_final(ccm_ctx_t *ctx, crypto_data_t *out, size_t block_size,
* out_data_1, part of the mac will be
* in out_data_2
*/
- bcopy(ccm_mac_p,
- out_data_1 + ctx->ccm_remainder_len,
- len_not_used);
- bcopy(ccm_mac_p + len_not_used,
- out_data_2,
+ memcpy(out_data_1 +
+ ctx->ccm_remainder_len,
+ ccm_mac_p, len_not_used);
+ memcpy(out_data_2,
+ ccm_mac_p + len_not_used,
ctx->ccm_mac_len - len_not_used);
}
@@ -302,9 +301,9 @@ ccm_encrypt_final(ccm_ctx_t *ctx, crypto_data_t *out, size_t block_size,
}
} else {
/* copy block to where it belongs */
- bcopy(ccm_mac_p, out_data_1, out_data_1_len);
+ memcpy(out_data_1, ccm_mac_p, out_data_1_len);
if (out_data_2 != NULL) {
- bcopy(ccm_mac_p + out_data_1_len, out_data_2,
+ memcpy(out_data_2, ccm_mac_p + out_data_1_len,
block_size - out_data_1_len);
}
}
@@ -372,7 +371,7 @@ ccm_mode_decrypt_contiguous_blocks(ccm_ctx_t *ctx, char *data, size_t length,
}
tmp = (uint8_t *)ctx->ccm_mac_input_buf;
- bcopy(datap, tmp + pm_len, length);
+ memcpy(tmp + pm_len, datap, length);
ctx->ccm_processed_mac_len += length;
return (CRYPTO_SUCCESS);
@@ -405,15 +404,15 @@ ccm_mode_decrypt_contiguous_blocks(ccm_ctx_t *ctx, char *data, size_t length,
mac_len = length - pt_part;
ctx->ccm_processed_mac_len = mac_len;
- bcopy(data + pt_part, ctx->ccm_mac_input_buf, mac_len);
+ memcpy(ctx->ccm_mac_input_buf, data + pt_part, mac_len);
if (pt_part + ctx->ccm_remainder_len < block_size) {
/*
* since this is last of the ciphertext, will
* just decrypt with it here
*/
- bcopy(datap, &((uint8_t *)ctx->ccm_remainder)
- [ctx->ccm_remainder_len], pt_part);
+ memcpy(&((uint8_t *)ctx->ccm_remainder)
+ [ctx->ccm_remainder_len], datap, pt_part);
ctx->ccm_remainder_len += pt_part;
ccm_decrypt_incomplete_block(ctx, encrypt_block);
ctx->ccm_processed_data_len += ctx->ccm_remainder_len;
@@ -424,9 +423,9 @@ ccm_mode_decrypt_contiguous_blocks(ccm_ctx_t *ctx, char *data, size_t length,
length = pt_part;
}
} else if (length + ctx->ccm_remainder_len < block_size) {
- /* accumulate bytes here and return */
- bcopy(datap,
- (uint8_t *)ctx->ccm_remainder + ctx->ccm_remainder_len,
+ /* accumulate bytes here and return */
+ memcpy((uint8_t *)ctx->ccm_remainder + ctx->ccm_remainder_len,
+ datap,
length);
ctx->ccm_remainder_len += length;
ctx->ccm_copy_to = datap;
@@ -441,8 +440,8 @@ ccm_mode_decrypt_contiguous_blocks(ccm_ctx_t *ctx, char *data, size_t length,
if (need > remainder)
return (CRYPTO_ENCRYPTED_DATA_LEN_RANGE);
- bcopy(datap, &((uint8_t *)ctx->ccm_remainder)
- [ctx->ccm_remainder_len], need);
+ memcpy(&((uint8_t *)ctx->ccm_remainder)
+ [ctx->ccm_remainder_len], datap, need);
blockp = (uint8_t *)ctx->ccm_remainder;
} else {
@@ -492,7 +491,7 @@ ccm_mode_decrypt_contiguous_blocks(ccm_ctx_t *ctx, char *data, size_t length,
/* Incomplete last block */
if (remainder > 0 && remainder < block_size) {
- bcopy(datap, ctx->ccm_remainder, remainder);
+ memcpy(ctx->ccm_remainder, datap, remainder);
ctx->ccm_remainder_len = remainder;
ctx->ccm_copy_to = datap;
if (ctx->ccm_processed_mac_len > 0) {
@@ -539,10 +538,9 @@ ccm_decrypt_final(ccm_ctx_t *ctx, crypto_data_t *out, size_t block_size,
macp = (uint8_t *)ctx->ccm_tmp;
while (mac_remain > 0) {
-
if (mac_remain < block_size) {
- bzero(macp, block_size);
- bcopy(pt, macp, mac_remain);
+ memset(macp, 0, block_size);
+ memcpy(macp, pt, mac_remain);
mac_remain = 0;
} else {
copy_block(pt, macp);
@@ -560,7 +558,7 @@ ccm_decrypt_final(ccm_ctx_t *ctx, crypto_data_t *out, size_t block_size,
calculate_ccm_mac((ccm_ctx_t *)ctx, ccm_mac_p, encrypt_block);
/* compare the input CCM MAC value with what we calculated */
- if (bcmp(ctx->ccm_mac_input_buf, ccm_mac_p, ctx->ccm_mac_len)) {
+ if (memcmp(ctx->ccm_mac_input_buf, ccm_mac_p, ctx->ccm_mac_len)) {
/* They don't match */
return (CRYPTO_INVALID_MAC);
} else {
@@ -654,10 +652,10 @@ ccm_format_initial_blocks(uchar_t *nonce, ulong_t nonceSize,
b0[0] = (have_adata << 6) | (((t - 2) / 2) << 3) | (q - 1);
/* copy the nonce value into b0 */
- bcopy(nonce, &(b0[1]), nonceSize);
+ memcpy(&(b0[1]), nonce, nonceSize);
/* store the length of the payload into b0 */
- bzero(&(b0[1+nonceSize]), q);
+ memset(&(b0[1+nonceSize]), 0, q);
payloadSize = aes_ctx->ccm_data_len;
limit = 8 < q ? 8 : q;
@@ -673,9 +671,9 @@ ccm_format_initial_blocks(uchar_t *nonce, ulong_t nonceSize,
cb[0] = 0x07 & (q-1); /* first byte */
/* copy the nonce value into the counter block */
- bcopy(nonce, &(cb[1]), nonceSize);
+ memcpy(&(cb[1]), nonce, nonceSize);
- bzero(&(cb[1+nonceSize]), q);
+ memset(&(cb[1+nonceSize]), 0, q);
/* Create the mask for the counter field based on the size of nonce */
q <<= 3;
@@ -782,7 +780,7 @@ ccm_init(ccm_ctx_t *ctx, unsigned char *nonce, size_t nonce_len,
/* The IV for CBC MAC for AES CCM mode is always zero */
ivp = (uint8_t *)ctx->ccm_tmp;
- bzero(ivp, block_size);
+ memset(ivp, 0, block_size);
xor_block(ivp, mac_buf);
@@ -800,14 +798,14 @@ ccm_init(ccm_ctx_t *ctx, unsigned char *nonce, size_t nonce_len,
/* 1st block: it contains encoded associated data, and some data */
authp = (uint8_t *)ctx->ccm_tmp;
- bzero(authp, block_size);
- bcopy(encoded_a, authp, encoded_a_len);
+ memset(authp, 0, block_size);
+ memcpy(authp, encoded_a, encoded_a_len);
processed = block_size - encoded_a_len;
if (processed > auth_data_len) {
/* in case auth_data is very small */
processed = auth_data_len;
}
- bcopy(auth_data, authp+encoded_a_len, processed);
+ memcpy(authp+encoded_a_len, auth_data, processed);
/* xor with previous buffer */
xor_block(authp, mac_buf);
encrypt_block(ctx->ccm_keysched, mac_buf, mac_buf);
@@ -823,8 +821,8 @@ ccm_init(ccm_ctx_t *ctx, unsigned char *nonce, size_t nonce_len,
* There's not a block full of data, pad rest of
* buffer with zero
*/
- bzero(authp, block_size);
- bcopy(&(auth_data[processed]), authp, remainder);
+ memset(authp, 0, block_size);
+ memcpy(authp, &(auth_data[processed]), remainder);
datap = (uint8_t *)authp;
remainder = 0;
} else {
diff --git a/module/icp/algs/modes/ctr.c b/module/icp/algs/modes/ctr.c
index 82295cda877e..c31c6251624b 100644
--- a/module/icp/algs/modes/ctr.c
+++ b/module/icp/algs/modes/ctr.c
@@ -52,8 +52,8 @@ ctr_mode_contiguous_blocks(ctr_ctx_t *ctx, char *data, size_t length,
if (length + ctx->ctr_remainder_len < block_size) {
/* accumulate bytes here and return */
- bcopy(datap,
- (uint8_t *)ctx->ctr_remainder + ctx->ctr_remainder_len,
+ memcpy((uint8_t *)ctx->ctr_remainder + ctx->ctr_remainder_len,
+ datap,
length);
ctx->ctr_remainder_len += length;
ctx->ctr_copy_to = datap;
@@ -71,8 +71,8 @@ ctr_mode_contiguous_blocks(ctr_ctx_t *ctx, char *data, size_t length,
if (need > remainder)
return (CRYPTO_DATA_LEN_RANGE);
- bcopy(datap, &((uint8_t *)ctx->ctr_remainder)
- [ctx->ctr_remainder_len], need);
+ memcpy(&((uint8_t *)ctx->ctr_remainder)
+ [ctx->ctr_remainder_len], datap, need);
blockp = (uint8_t *)ctx->ctr_remainder;
} else {
@@ -114,9 +114,9 @@ ctr_mode_contiguous_blocks(ctr_ctx_t *ctx, char *data, size_t length,
&out_data_1_len, &out_data_2, block_size);
/* copy block to where it belongs */
- bcopy(lastp, out_data_1, out_data_1_len);
+ memcpy(out_data_1, lastp, out_data_1_len);
if (out_data_2 != NULL) {
- bcopy(lastp + out_data_1_len, out_data_2,
+ memcpy(out_data_2, lastp + out_data_1_len,
block_size - out_data_1_len);
}
/* update offset */
@@ -134,7 +134,7 @@ ctr_mode_contiguous_blocks(ctr_ctx_t *ctx, char *data, size_t length,
/* Incomplete last block. */
if (remainder > 0 && remainder < block_size) {
- bcopy(datap, ctx->ctr_remainder, remainder);
+ memcpy(ctx->ctr_remainder, datap, remainder);
ctx->ctr_remainder_len = remainder;
ctx->ctr_copy_to = datap;
goto out;
@@ -176,10 +176,11 @@ ctr_mode_final(ctr_ctx_t *ctx, crypto_data_t *out,
crypto_get_ptrs(out, &iov_or_mp, &offset, &out_data_1,
&out_data_1_len, &out_data_2, ctx->ctr_remainder_len);
- bcopy(p, out_data_1, out_data_1_len);
+ memcpy(out_data_1, p, out_data_1_len);
if (out_data_2 != NULL) {
- bcopy((uint8_t *)p + out_data_1_len,
- out_data_2, ctx->ctr_remainder_len - out_data_1_len);
+ memcpy(out_data_2,
+ (uint8_t *)p + out_data_1_len,
+ ctx->ctr_remainder_len - out_data_1_len);
}
out->cd_offset += ctx->ctr_remainder_len;
ctx->ctr_remainder_len = 0;
diff --git a/module/icp/algs/modes/ecb.c b/module/icp/algs/modes/ecb.c
index ffbdb9d57d0a..e0b8ab15cdcf 100644
--- a/module/icp/algs/modes/ecb.c
+++ b/module/icp/algs/modes/ecb.c
@@ -49,8 +49,8 @@ ecb_cipher_contiguous_blocks(ecb_ctx_t *ctx, char *data, size_t length,
if (length + ctx->ecb_remainder_len < block_size) {
/* accumulate bytes here and return */
- bcopy(datap,
- (uint8_t *)ctx->ecb_remainder + ctx->ecb_remainder_len,
+ memcpy((uint8_t *)ctx->ecb_remainder + ctx->ecb_remainder_len,
+ datap,
length);
ctx->ecb_remainder_len += length;
ctx->ecb_copy_to = datap;
@@ -68,8 +68,8 @@ ecb_cipher_contiguous_blocks(ecb_ctx_t *ctx, char *data, size_t length,
if (need > remainder)
return (CRYPTO_DATA_LEN_RANGE);
- bcopy(datap, &((uint8_t *)ctx->ecb_remainder)
- [ctx->ecb_remainder_len], need);
+ memcpy(&((uint8_t *)ctx->ecb_remainder)
+ [ctx->ecb_remainder_len], datap, need);
blockp = (uint8_t *)ctx->ecb_remainder;
} else {
@@ -81,9 +81,9 @@ ecb_cipher_contiguous_blocks(ecb_ctx_t *ctx, char *data, size_t length,
&out_data_1_len, &out_data_2, block_size);
/* copy block to where it belongs */
- bcopy(lastp, out_data_1, out_data_1_len);
+ memcpy(out_data_1, lastp, out_data_1_len);
if (out_data_2 != NULL) {
- bcopy(lastp + out_data_1_len, out_data_2,
+ memcpy(out_data_2, lastp + out_data_1_len,
block_size - out_data_1_len);
}
/* update offset */
@@ -101,7 +101,7 @@ ecb_cipher_contiguous_blocks(ecb_ctx_t *ctx, char *data, size_t length,
/* Incomplete last block. */
if (remainder > 0 && remainder < block_size) {
- bcopy(datap, ctx->ecb_remainder, remainder);
+ memcpy(ctx->ecb_remainder, datap, remainder);
ctx->ecb_remainder_len = remainder;
ctx->ecb_copy_to = datap;
goto out;
diff --git a/module/icp/algs/modes/gcm.c b/module/icp/algs/modes/gcm.c
index 7d34c2b040f6..e666b45b5f44 100644
--- a/module/icp/algs/modes/gcm.c
+++ b/module/icp/algs/modes/gcm.c
@@ -108,8 +108,8 @@ gcm_mode_encrypt_contiguous_blocks(gcm_ctx_t *ctx, char *data, size_t length,
if (length + ctx->gcm_remainder_len < block_size) {
/* accumulate bytes here and return */
- bcopy(datap,
- (uint8_t *)ctx->gcm_remainder + ctx->gcm_remainder_len,
+ memcpy((uint8_t *)ctx->gcm_remainder + ctx->gcm_remainder_len,
+ datap,
length);
ctx->gcm_remainder_len += length;
if (ctx->gcm_copy_to == NULL) {
@@ -130,8 +130,8 @@ gcm_mode_encrypt_contiguous_blocks(gcm_ctx_t *ctx, char *data, size_t length,
if (need > remainder)
return (CRYPTO_DATA_LEN_RANGE);
- bcopy(datap, &((uint8_t *)ctx->gcm_remainder)
- [ctx->gcm_remainder_len], need);
+ memcpy(&((uint8_t *)ctx->gcm_remainder)
+ [ctx->gcm_remainder_len], datap, need);
blockp = (uint8_t *)ctx->gcm_remainder;
} else {
@@ -162,10 +162,10 @@ gcm_mode_encrypt_contiguous_blocks(gcm_ctx_t *ctx, char *data, size_t length,
if (out_data_1_len == block_size) {
copy_block(lastp, out_data_1);
} else {
- bcopy(lastp, out_data_1, out_data_1_len);
+ memcpy(out_data_1, lastp, out_data_1_len);
if (out_data_2 != NULL) {
- bcopy(lastp + out_data_1_len,
- out_data_2,
+ memcpy(out_data_2,
+ lastp + out_data_1_len,
block_size - out_data_1_len);
}
}
@@ -187,7 +187,7 @@ gcm_mode_encrypt_contiguous_blocks(gcm_ctx_t *ctx, char *data, size_t length,
/* Incomplete last block. */
if (remainder > 0 && remainder < block_size) {
- bcopy(datap, ctx->gcm_remainder, remainder);
+ memcpy(ctx->gcm_remainder, datap, remainder);
ctx->gcm_remainder_len = remainder;
ctx->gcm_copy_to = datap;
goto out;
@@ -245,7 +245,7 @@ gcm_encrypt_final(gcm_ctx_t *ctx, crypto_data_t *out, size_t block_size,
(uint8_t *)ctx->gcm_tmp);
macp = (uint8_t *)ctx->gcm_remainder;
- bzero(macp + ctx->gcm_remainder_len,
+ memset(macp + ctx->gcm_remainder_len, 0,
block_size - ctx->gcm_remainder_len);
/* XOR with counter block */
@@ -309,8 +309,8 @@ gcm_decrypt_incomplete_block(gcm_ctx_t *ctx, size_t block_size, size_t index,
counterp = (uint8_t *)ctx->gcm_tmp;
/* authentication tag */
- bzero((uint8_t *)ctx->gcm_tmp, block_size);
- bcopy(datap, (uint8_t *)ctx->gcm_tmp, ctx->gcm_remainder_len);
+ memset((uint8_t *)ctx->gcm_tmp, 0, block_size);
+ memcpy((uint8_t *)ctx->gcm_tmp, datap, ctx->gcm_remainder_len);
/* add ciphertext to the hash */
GHASH(ctx, ctx->gcm_tmp, ctx->gcm_ghash, gcm_impl_get_ops());
@@ -350,7 +350,7 @@ gcm_mode_decrypt_contiguous_blocks(gcm_ctx_t *ctx, char *data, size_t length,
}
if (ctx->gcm_pt_buf != NULL) {
- bcopy(ctx->gcm_pt_buf, new, ctx->gcm_pt_buf_len);
+ memcpy(new, ctx->gcm_pt_buf, ctx->gcm_pt_buf_len);
vmem_free(ctx->gcm_pt_buf, ctx->gcm_pt_buf_len);
} else {
ASSERT0(ctx->gcm_pt_buf_len);
@@ -358,7 +358,7 @@ gcm_mode_decrypt_contiguous_blocks(gcm_ctx_t *ctx, char *data, size_t length,
ctx->gcm_pt_buf = new;
ctx->gcm_pt_buf_len = new_len;
- bcopy(data, &ctx->gcm_pt_buf[ctx->gcm_processed_data_len],
+ memcpy(&ctx->gcm_pt_buf[ctx->gcm_processed_data_len], data,
length);
ctx->gcm_processed_data_len += length;
}
@@ -397,7 +397,7 @@ gcm_decrypt_final(gcm_ctx_t *ctx, crypto_data_t *out, size_t block_size,
while (remainder > 0) {
/* Incomplete last block */
if (remainder < block_size) {
- bcopy(blockp, ctx->gcm_remainder, remainder);
+ memcpy(ctx->gcm_remainder, blockp, remainder);
ctx->gcm_remainder_len = remainder;
/*
* not expecting anymore ciphertext, just
@@ -438,7 +438,7 @@ out:
xor_block((uint8_t *)ctx->gcm_J0, ghash);
/* compare the input authentication tag with what we calculated */
- if (bcmp(&ctx->gcm_pt_buf[pt_len], ghash, ctx->gcm_tag_len)) {
+ if (memcmp(&ctx->gcm_pt_buf[pt_len], ghash, ctx->gcm_tag_len)) {
/* They don't match */
return (CRYPTO_INVALID_MAC);
} else {
@@ -495,7 +495,7 @@ gcm_format_initial_blocks(uchar_t *iv, ulong_t iv_len,
ghash = (uint8_t *)ctx->gcm_ghash;
cb = (uint8_t *)ctx->gcm_cb;
if (iv_len == 12) {
- bcopy(iv, cb, 12);
+ memcpy(cb, iv, 12);
cb[12] = 0;
cb[13] = 0;
cb[14] = 0;
@@ -506,8 +506,8 @@ gcm_format_initial_blocks(uchar_t *iv, ulong_t iv_len,
/* GHASH the IV */
do {
if (remainder < block_size) {
- bzero(cb, block_size);
- bcopy(&(iv[processed]), cb, remainder);
+ memset(cb, 0, block_size);
+ memcpy(cb, &(iv[processed]), remainder);
datap = (uint8_t *)cb;
remainder = 0;
} else {
@@ -539,7 +539,7 @@ gcm_init(gcm_ctx_t *ctx, unsigned char *iv, size_t iv_len,
size_t remainder, processed;
/* encrypt zero block to get subkey H */
- bzero(ctx->gcm_H, sizeof (ctx->gcm_H));
+ memset(ctx->gcm_H, 0, sizeof (ctx->gcm_H));
encrypt_block(ctx->gcm_keysched, (uint8_t *)ctx->gcm_H,
(uint8_t *)ctx->gcm_H);
@@ -549,8 +549,8 @@ gcm_init(gcm_ctx_t *ctx, unsigned char *iv, size_t iv_len,
gops = gcm_impl_get_ops();
authp = (uint8_t *)ctx->gcm_tmp;
ghash = (uint8_t *)ctx->gcm_ghash;
- bzero(authp, block_size);
- bzero(ghash, block_size);
+ memset(authp, 0, block_size);
+ memset(ghash, 0, block_size);
processed = 0;
remainder = auth_data_len;
@@ -562,9 +562,9 @@ gcm_init(gcm_ctx_t *ctx, unsigned char *iv, size_t iv_len,
*/
if (auth_data != NULL) {
- bzero(authp, block_size);
- bcopy(&(auth_data[processed]),
- authp, remainder);
+ memset(authp, 0, block_size);
+ memcpy(authp, &(auth_data[processed]),
+ remainder);
} else {
ASSERT0(remainder);
}
@@ -1139,10 +1139,10 @@ gcm_simd_get_htab_size(boolean_t simd_mode)
static inline void
gcm_clear_ctx(gcm_ctx_t *ctx)
{
- bzero(ctx->gcm_remainder, sizeof (ctx->gcm_remainder));
- bzero(ctx->gcm_H, sizeof (ctx->gcm_H));
- bzero(ctx->gcm_J0, sizeof (ctx->gcm_J0));
- bzero(ctx->gcm_tmp, sizeof (ctx->gcm_tmp));
+ memset(ctx->gcm_remainder, 0, sizeof (ctx->gcm_remainder));
+ memset(ctx->gcm_H, 0, sizeof (ctx->gcm_H));
+ memset(ctx->gcm_J0, 0, sizeof (ctx->gcm_J0));
+ memset(ctx->gcm_tmp, 0, sizeof (ctx->gcm_tmp));
}
/* Increment the GCM counter block by n. */
@@ -1187,8 +1187,8 @@ gcm_mode_encrypt_contiguous_blocks_avx(gcm_ctx_t *ctx, char *data,
need = block_size - ctx->gcm_remainder_len;
if (length < need) {
/* Accumulate bytes here and return. */
- bcopy(datap, (uint8_t *)ctx->gcm_remainder +
- ctx->gcm_remainder_len, length);
+ memcpy((uint8_t *)ctx->gcm_remainder +
+ ctx->gcm_remainder_len, datap, length);
ctx->gcm_remainder_len += length;
if (ctx->gcm_copy_to == NULL) {
@@ -1197,8 +1197,8 @@ gcm_mode_encrypt_contiguous_blocks_avx(gcm_ctx_t *ctx, char *data,
return (CRYPTO_SUCCESS);
} else {
/* Complete incomplete block. */
- bcopy(datap, (uint8_t *)ctx->gcm_remainder +
- ctx->gcm_remainder_len, need);
+ memcpy((uint8_t *)ctx->gcm_remainder +
+ ctx->gcm_remainder_len, datap, need);
ctx->gcm_copy_to = NULL;
}
@@ -1276,7 +1276,7 @@ gcm_mode_encrypt_contiguous_blocks_avx(gcm_ctx_t *ctx, char *data,
/* Less than GCM_AVX_MIN_ENCRYPT_BYTES remain, operate on blocks. */
while (bleft > 0) {
if (bleft < block_size) {
- bcopy(datap, ctx->gcm_remainder, bleft);
+ memcpy(ctx->gcm_remainder, datap, bleft);
ctx->gcm_remainder_len = bleft;
ctx->gcm_copy_to = datap;
goto out;
@@ -1335,7 +1335,7 @@ gcm_encrypt_final_avx(gcm_ctx_t *ctx, crypto_data_t *out, size_t block_size)
const uint32_t *cb = (uint32_t *)ctx->gcm_cb;
aes_encrypt_intel(keysched, aes_rounds, cb, (uint32_t *)tmp);
- bzero(remainder + rem_len, block_size - rem_len);
+ memset(remainder + rem_len, 0, block_size - rem_len);
for (int i = 0; i < rem_len; i++) {
remainder[i] ^= tmp[i];
}
@@ -1431,8 +1431,8 @@ gcm_decrypt_final_avx(gcm_ctx_t *ctx, crypto_data_t *out, size_t block_size)
if (bleft < block_size) {
uint8_t *lastb = (uint8_t *)ctx->gcm_remainder;
- bzero(lastb, block_size);
- bcopy(datap, lastb, bleft);
+ memset(lastb, 0, block_size);
+ memcpy(lastb, datap, bleft);
/* The GCM processing. */
GHASH_AVX(ctx, lastb, block_size);
aes_encrypt_intel(key->encr_ks.ks32, key->nr, cb, tmp);
@@ -1468,7 +1468,7 @@ gcm_decrypt_final_avx(gcm_ctx_t *ctx, crypto_data_t *out, size_t block_size)
kfpu_end();
/* Compare the input authentication tag with what we calculated. */
- if (bcmp(&ctx->gcm_pt_buf[pt_len], ghash, ctx->gcm_tag_len)) {
+ if (memcmp(&ctx->gcm_pt_buf[pt_len], ghash, ctx->gcm_tag_len)) {
/* They don't match. */
return (CRYPTO_INVALID_MAC);
}
@@ -1500,8 +1500,8 @@ gcm_init_avx(gcm_ctx_t *ctx, unsigned char *iv, size_t iv_len,
ASSERT(block_size == GCM_BLOCK_LEN);
/* Init H (encrypt zero block) and create the initial counter block. */
- bzero(ctx->gcm_ghash, sizeof (ctx->gcm_ghash));
- bzero(H, sizeof (ctx->gcm_H));
+ memset(ctx->gcm_ghash, 0, sizeof (ctx->gcm_ghash));
+ memset(H, 0, sizeof (ctx->gcm_H));
kfpu_begin();
aes_encrypt_intel(keysched, aes_rounds,
(const uint32_t *)H, (uint32_t *)H);
@@ -1509,13 +1509,13 @@ gcm_init_avx(gcm_ctx_t *ctx, unsigned char *iv, size_t iv_len,
gcm_init_htab_avx(ctx->gcm_Htable, H);
if (iv_len == 12) {
- bcopy(iv, cb, 12);
+ memcpy(cb, iv, 12);
cb[12] = 0;
cb[13] = 0;
cb[14] = 0;
cb[15] = 1;
/* We need the ICB later. */
- bcopy(cb, ctx->gcm_J0, sizeof (ctx->gcm_J0));
+ memcpy(ctx->gcm_J0, cb, sizeof (ctx->gcm_J0));
} else {
/*
* Most consumers use 12 byte IVs, so it's OK to use the
@@ -1553,8 +1553,8 @@ gcm_init_avx(gcm_ctx_t *ctx, unsigned char *iv, size_t iv_len,
/* Zero pad and hash incomplete last block. */
uint8_t *authp = (uint8_t *)ctx->gcm_tmp;
- bzero(authp, block_size);
- bcopy(datap, authp, incomp);
+ memset(authp, 0, block_size);
+ memcpy(authp, datap, incomp);
GHASH_AVX(ctx, authp, block_size);
}
}
diff --git a/module/icp/algs/modes/modes.c b/module/icp/algs/modes/modes.c
index 59743c7d6829..d505de40ee2a 100644
--- a/module/icp/algs/modes/modes.c
+++ b/module/icp/algs/modes/modes.c
@@ -155,7 +155,7 @@ crypto_free_mode_ctx(void *ctx)
#ifdef CAN_USE_GCM_ASM
if (((gcm_ctx_t *)ctx)->gcm_Htable != NULL) {
gcm_ctx_t *gcm_ctx = (gcm_ctx_t *)ctx;
- bzero(gcm_ctx->gcm_Htable, gcm_ctx->gcm_htab_len);
+ memset(gcm_ctx->gcm_Htable, 0, gcm_ctx->gcm_htab_len);
kmem_free(gcm_ctx->gcm_Htable, gcm_ctx->gcm_htab_len);
}
#endif
diff --git a/module/icp/algs/sha2/sha2.c b/module/icp/algs/sha2/sha2.c
index 6f1e9b7193d4..151432f1a5df 100644
--- a/module/icp/algs/sha2/sha2.c
+++ b/module/icp/algs/sha2/sha2.c
@@ -190,7 +190,7 @@ SHA256Transform(SHA2_CTX *ctx, const uint8_t *blk)
#endif /* __sparc */
if ((uintptr_t)blk & 0x3) { /* not 4-byte aligned? */
- bcopy(blk, ctx->buf_un.buf32, sizeof (ctx->buf_un.buf32));
+ memcpy(ctx->buf_un.buf32, blk, sizeof (ctx->buf_un.buf32));
blk = (uint8_t *)ctx->buf_un.buf32;
}
@@ -406,7 +406,7 @@ SHA512Transform(SHA2_CTX *ctx, const uint8_t *blk)
if ((uintptr_t)blk & 0x7) { /* not 8-byte aligned? */
- bcopy(blk, ctx->buf_un.buf64, sizeof (ctx->buf_un.buf64));
+ memcpy(ctx->buf_un.buf64, blk, sizeof (ctx->buf_un.buf64));
blk = (uint8_t *)ctx->buf_un.buf64;
}
@@ -823,14 +823,14 @@ SHA2Update(SHA2_CTX *ctx, const void *inptr, size_t input_len)
/*
* general optimization:
*
- * only do initial bcopy() and SHA2Transform() if
+ * only do initial memcpy() and SHA2Transform() if
* buf_index != 0. if buf_index == 0, we're just
- * wasting our time doing the bcopy() since there
+ * wasting our time doing the memcpy() since there
* wasn't any data left over from a previous call to
* SHA2Update().
*/
if (buf_index) {
- bcopy(input, &ctx->buf_un.buf8[buf_index], buf_len);
+ memcpy(&ctx->buf_un.buf8[buf_index], input, buf_len);
if (algotype <= SHA256_HMAC_GEN_MECH_INFO_TYPE)
SHA256Transform(ctx, ctx->buf_un.buf8);
else
@@ -873,7 +873,7 @@ SHA2Update(SHA2_CTX *ctx, const void *inptr, size_t input_len)
* general optimization:
*
* if i and input_len are the same, return now instead
- * of calling bcopy(), since the bcopy() in this case
+ * of calling memcpy(), since the memcpy() in this case
* will be an expensive noop.
*/
@@ -884,7 +884,7 @@ SHA2Update(SHA2_CTX *ctx, const void *inptr, size_t input_len)
}
/* buffer remaining input */
- bcopy(&input[i], &ctx->buf_un.buf8[buf_index], input_len - i);
+ memcpy(&ctx->buf_un.buf8[buf_index], &input[i], input_len - i);
}
@@ -936,7 +936,7 @@ SHA2Final(void *digest, SHA2_CTX *ctx)
*/
Encode64(digest, ctx->state.s64, sizeof (uint64_t) * 3);
Encode64(last, &ctx->state.s64[3], sizeof (uint64_t));
- bcopy(last, (uint8_t *)digest + 24, 4);
+ memcpy((uint8_t *)digest + 24, last, 4);
} else if (algotype == SHA512_256_MECH_INFO_TYPE) {
Encode64(digest, ctx->state.s64, sizeof (uint64_t) * 4);
} else {
@@ -946,7 +946,7 @@ SHA2Final(void *digest, SHA2_CTX *ctx)
}
/* zeroize sensitive information */
- bzero(ctx, sizeof (*ctx));
+ memset(ctx, 0, sizeof (*ctx));
}
#ifdef _KERNEL
diff --git a/module/icp/algs/skein/skein.c b/module/icp/algs/skein/skein.c
index 83fe84260307..41ed2dd44e9e 100644
--- a/module/icp/algs/skein/skein.c
+++ b/module/icp/algs/skein/skein.c
@@ -26,16 +26,16 @@ Skein_256_Init(Skein_256_Ctxt_t *ctx, size_t hashBitLen)
switch (hashBitLen) { /* use pre-computed values, where available */
#ifndef SKEIN_NO_PRECOMP
case 256:
- bcopy(SKEIN_256_IV_256, ctx->X, sizeof (ctx->X));
+ memcpy(ctx->X, SKEIN_256_IV_256, sizeof (ctx->X));
break;
case 224:
- bcopy(SKEIN_256_IV_224, ctx->X, sizeof (ctx->X));
+ memcpy(ctx->X, SKEIN_256_IV_224, sizeof (ctx->X));
break;
case 160:
- bcopy(SKEIN_256_IV_160, ctx->X, sizeof (ctx->X));
+ memcpy(ctx->X, SKEIN_256_IV_160, sizeof (ctx->X));
break;
case 128:
- bcopy(SKEIN_256_IV_128, ctx->X, sizeof (ctx->X));
+ memcpy(ctx->X, SKEIN_256_IV_128, sizeof (ctx->X));
break;
#endif
default:
@@ -53,11 +53,11 @@ Skein_256_Init(Skein_256_Ctxt_t *ctx, size_t hashBitLen)
cfg.w[1] = Skein_Swap64(hashBitLen);
cfg.w[2] = Skein_Swap64(SKEIN_CFG_TREE_INFO_SEQUENTIAL);
/* zero pad config block */
- bzero(&cfg.w[3], sizeof (cfg) - 3 * sizeof (cfg.w[0]));
+ memset(&cfg.w[3], 0, sizeof (cfg) - 3 * sizeof (cfg.w[0]));
/* compute the initial chaining values from config block */
/* zero the chaining variables */
- bzero(ctx->X, sizeof (ctx->X));
+ memset(ctx->X, 0, sizeof (ctx->X));
Skein_256_Process_Block(ctx, cfg.b, 1, SKEIN_CFG_STR_LEN);
break;
}
@@ -91,7 +91,7 @@ Skein_256_InitExt(Skein_256_Ctxt_t *ctx, size_t hashBitLen, uint64_t treeInfo,
/* compute the initial chaining values ctx->X[], based on key */
if (keyBytes == 0) { /* is there a key? */
/* no key: use all zeroes as key for config block */
- bzero(ctx->X, sizeof (ctx->X));
+ memset(ctx->X, 0, sizeof (ctx->X));
} else { /* here to pre-process a key */
Skein_assert(sizeof (cfg.b) >= sizeof (ctx->X));
@@ -101,13 +101,13 @@ Skein_256_InitExt(Skein_256_Ctxt_t *ctx, size_t hashBitLen, uint64_t treeInfo,
/* set tweaks: T0 = 0; T1 = KEY type */
Skein_Start_New_Type(ctx, KEY);
/* zero the initial chaining variables */
- bzero(ctx->X, sizeof (ctx->X));
+ memset(ctx->X, 0, sizeof (ctx->X));
/* hash the key */
(void) Skein_256_Update(ctx, key, keyBytes);
/* put result into cfg.b[] */
(void) Skein_256_Final_Pad(ctx, cfg.b);
/* copy over into ctx->X[] */
- bcopy(cfg.b, ctx->X, sizeof (cfg.b));
+ memcpy(ctx->X, cfg.b, sizeof (cfg.b));
#if SKEIN_NEED_SWAP
{
uint_t i;
@@ -124,7 +124,7 @@ Skein_256_InitExt(Skein_256_Ctxt_t *ctx, size_t hashBitLen, uint64_t treeInfo,
ctx->h.hashBitLen = hashBitLen; /* output hash bit count */
Skein_Start_New_Type(ctx, CFG_FINAL);
- bzero(&cfg.w, sizeof (cfg.w)); /* pre-pad cfg.w[] with zeroes */
+ memset(&cfg.w, 0, sizeof (cfg.w)); /* pre-pad cfg.w[] with zeroes */
cfg.w[0] = Skein_Swap64(SKEIN_SCHEMA_VER);
cfg.w[1] = Skein_Swap64(hashBitLen); /* hash result length in bits */
/* tree hash config info (or SKEIN_CFG_TREE_INFO_SEQUENTIAL) */
@@ -161,7 +161,7 @@ Skein_256_Update(Skein_256_Ctxt_t *ctx, const uint8_t *msg, size_t msgByteCnt)
if (n) {
/* check on our logic here */
Skein_assert(n < msgByteCnt);
- bcopy(msg, &ctx->b[ctx->h.bCnt], n);
+ memcpy(&ctx->b[ctx->h.bCnt], msg, n);
msgByteCnt -= n;
msg += n;
ctx->h.bCnt += n;
@@ -189,7 +189,7 @@ Skein_256_Update(Skein_256_Ctxt_t *ctx, const uint8_t *msg, size_t msgByteCnt)
/* copy any remaining source message data bytes into b[] */
if (msgByteCnt) {
Skein_assert(msgByteCnt + ctx->h.bCnt <= SKEIN_256_BLOCK_BYTES);
- bcopy(msg, &ctx->b[ctx->h.bCnt], msgByteCnt);
+ memcpy(&ctx->b[ctx->h.bCnt], msg, msgByteCnt);
ctx->h.bCnt += msgByteCnt;
}
@@ -209,7 +209,7 @@ Skein_256_Final(Skein_256_Ctxt_t *ctx, uint8_t *hashVal)
ctx->h.T[1] |= SKEIN_T1_FLAG_FINAL; /* tag as the final block */
/* zero pad b[] if necessary */
if (ctx->h.bCnt < SKEIN_256_BLOCK_BYTES)
- bzero(&ctx->b[ctx->h.bCnt],
+ memset(&ctx->b[ctx->h.bCnt], 0,
SKEIN_256_BLOCK_BYTES - ctx->h.bCnt);
/* process the final block */
@@ -221,13 +221,12 @@ Skein_256_Final(Skein_256_Ctxt_t *ctx, uint8_t *hashVal)
/* run Threefish in "counter mode" to generate output */
/* zero out b[], so it can hold the counter */
- bzero(ctx->b, sizeof (ctx->b));
+ memset(ctx->b, 0, sizeof (ctx->b));
/* keep a local copy of counter mode "key" */
- bcopy(ctx->X, X, sizeof (X));
+ memcpy(X, ctx->X, sizeof (X));
for (i = 0; i * SKEIN_256_BLOCK_BYTES < byteCnt; i++) {
/* build the counter block */
- uint64_t tmp = Skein_Swap64((uint64_t)i);
- bcopy(&tmp, ctx->b, sizeof (tmp));
+ *(uint64_t *)ctx->b = Skein_Swap64((uint64_t)i);
Skein_Start_New_Type(ctx, OUT_FINAL);
/* run "counter mode" */
Skein_256_Process_Block(ctx, ctx->b, 1, sizeof (uint64_t));
@@ -240,7 +239,7 @@ Skein_256_Final(Skein_256_Ctxt_t *ctx, uint8_t *hashVal)
Skein_Show_Final(256, &ctx->h, n,
hashVal + i * SKEIN_256_BLOCK_BYTES);
/* restore the counter mode key for next time */
- bcopy(X, ctx->X, sizeof (X));
+ memcpy(ctx->X, X, sizeof (X));
}
return (SKEIN_SUCCESS);
}
@@ -262,16 +261,16 @@ Skein_512_Init(Skein_512_Ctxt_t *ctx, size_t hashBitLen)
switch (hashBitLen) { /* use pre-computed values, where available */
#ifndef SKEIN_NO_PRECOMP
case 512:
- bcopy(SKEIN_512_IV_512, ctx->X, sizeof (ctx->X));
+ memcpy(ctx->X, SKEIN_512_IV_512, sizeof (ctx->X));
break;
case 384:
- bcopy(SKEIN_512_IV_384, ctx->X, sizeof (ctx->X));
+ memcpy(ctx->X, SKEIN_512_IV_384, sizeof (ctx->X));
break;
case 256:
- bcopy(SKEIN_512_IV_256, ctx->X, sizeof (ctx->X));
+ memcpy(ctx->X, SKEIN_512_IV_256, sizeof (ctx->X));
break;
case 224:
- bcopy(SKEIN_512_IV_224, ctx->X, sizeof (ctx->X));
+ memcpy(ctx->X, SKEIN_512_IV_224, sizeof (ctx->X));
break;
#endif
default:
@@ -289,11 +288,11 @@ Skein_512_Init(Skein_512_Ctxt_t *ctx, size_t hashBitLen)
cfg.w[1] = Skein_Swap64(hashBitLen);
cfg.w[2] = Skein_Swap64(SKEIN_CFG_TREE_INFO_SEQUENTIAL);
/* zero pad config block */
- bzero(&cfg.w[3], sizeof (cfg) - 3 * sizeof (cfg.w[0]));
+ memset(&cfg.w[3], 0, sizeof (cfg) - 3 * sizeof (cfg.w[0]));
/* compute the initial chaining values from config block */
/* zero the chaining variables */
- bzero(ctx->X, sizeof (ctx->X));
+ memset(ctx->X, 0, sizeof (ctx->X));
Skein_512_Process_Block(ctx, cfg.b, 1, SKEIN_CFG_STR_LEN);
break;
}
@@ -328,7 +327,7 @@ Skein_512_InitExt(Skein_512_Ctxt_t *ctx, size_t hashBitLen, uint64_t treeInfo,
/* compute the initial chaining values ctx->X[], based on key */
if (keyBytes == 0) { /* is there a key? */
/* no key: use all zeroes as key for config block */
- bzero(ctx->X, sizeof (ctx->X));
+ memset(ctx->X, 0, sizeof (ctx->X));
} else { /* here to pre-process a key */
Skein_assert(sizeof (cfg.b) >= sizeof (ctx->X));
@@ -338,12 +337,12 @@ Skein_512_InitExt(Skein_512_Ctxt_t *ctx, size_t hashBitLen, uint64_t treeInfo,
/* set tweaks: T0 = 0; T1 = KEY type */
Skein_Start_New_Type(ctx, KEY);
/* zero the initial chaining variables */
- bzero(ctx->X, sizeof (ctx->X));
+ memset(ctx->X, 0, sizeof (ctx->X));
(void) Skein_512_Update(ctx, key, keyBytes); /* hash the key */
/* put result into cfg.b[] */
(void) Skein_512_Final_Pad(ctx, cfg.b);
/* copy over into ctx->X[] */
- bcopy(cfg.b, ctx->X, sizeof (cfg.b));
+ memcpy(ctx->X, cfg.b, sizeof (cfg.b));
#if SKEIN_NEED_SWAP
{
uint_t i;
@@ -360,7 +359,7 @@ Skein_512_InitExt(Skein_512_Ctxt_t *ctx, size_t hashBitLen, uint64_t treeInfo,
ctx->h.hashBitLen = hashBitLen; /* output hash bit count */
Skein_Start_New_Type(ctx, CFG_FINAL);
- bzero(&cfg.w, sizeof (cfg.w)); /* pre-pad cfg.w[] with zeroes */
+ memset(&cfg.w, 0, sizeof (cfg.w)); /* pre-pad cfg.w[] with zeroes */
cfg.w[0] = Skein_Swap64(SKEIN_SCHEMA_VER);
cfg.w[1] = Skein_Swap64(hashBitLen); /* hash result length in bits */
/* tree hash config info (or SKEIN_CFG_TREE_INFO_SEQUENTIAL) */
@@ -397,7 +396,7 @@ Skein_512_Update(Skein_512_Ctxt_t *ctx, const uint8_t *msg, size_t msgByteCnt)
if (n) {
/* check on our logic here */
Skein_assert(n < msgByteCnt);
- bcopy(msg, &ctx->b[ctx->h.bCnt], n);
+ memcpy(&ctx->b[ctx->h.bCnt], msg, n);
msgByteCnt -= n;
msg += n;
ctx->h.bCnt += n;
@@ -425,7 +424,7 @@ Skein_512_Update(Skein_512_Ctxt_t *ctx, const uint8_t *msg, size_t msgByteCnt)
/* copy any remaining source message data bytes into b[] */
if (msgByteCnt) {
Skein_assert(msgByteCnt + ctx->h.bCnt <= SKEIN_512_BLOCK_BYTES);
- bcopy(msg, &ctx->b[ctx->h.bCnt], msgByteCnt);
+ memcpy(&ctx->b[ctx->h.bCnt], msg, msgByteCnt);
ctx->h.bCnt += msgByteCnt;
}
@@ -445,7 +444,7 @@ Skein_512_Final(Skein_512_Ctxt_t *ctx, uint8_t *hashVal)
ctx->h.T[1] |= SKEIN_T1_FLAG_FINAL; /* tag as the final block */
/* zero pad b[] if necessary */
if (ctx->h.bCnt < SKEIN_512_BLOCK_BYTES)
- bzero(&ctx->b[ctx->h.bCnt],
+ memset(&ctx->b[ctx->h.bCnt], 0,
SKEIN_512_BLOCK_BYTES - ctx->h.bCnt);
/* process the final block */
@@ -457,13 +456,12 @@ Skein_512_Final(Skein_512_Ctxt_t *ctx, uint8_t *hashVal)
/* run Threefish in "counter mode" to generate output */
/* zero out b[], so it can hold the counter */
- bzero(ctx->b, sizeof (ctx->b));
+ memset(ctx->b, 0, sizeof (ctx->b));
/* keep a local copy of counter mode "key" */
- bcopy(ctx->X, X, sizeof (X));
+ memcpy(X, ctx->X, sizeof (X));
for (i = 0; i * SKEIN_512_BLOCK_BYTES < byteCnt; i++) {
/* build the counter block */
- uint64_t tmp = Skein_Swap64((uint64_t)i);
- bcopy(&tmp, ctx->b, sizeof (tmp));
+ *(uint64_t *)ctx->b = Skein_Swap64((uint64_t)i);
Skein_Start_New_Type(ctx, OUT_FINAL);
/* run "counter mode" */
Skein_512_Process_Block(ctx, ctx->b, 1, sizeof (uint64_t));
@@ -476,7 +474,7 @@ Skein_512_Final(Skein_512_Ctxt_t *ctx, uint8_t *hashVal)
Skein_Show_Final(512, &ctx->h, n,
hashVal + i * SKEIN_512_BLOCK_BYTES);
/* restore the counter mode key for next time */
- bcopy(X, ctx->X, sizeof (X));
+ memcpy(ctx->X, X, sizeof (X));
}
return (SKEIN_SUCCESS);
}
@@ -498,13 +496,13 @@ Skein1024_Init(Skein1024_Ctxt_t *ctx, size_t hashBitLen)
switch (hashBitLen) { /* use pre-computed values, where available */
#ifndef SKEIN_NO_PRECOMP
case 512:
- bcopy(SKEIN1024_IV_512, ctx->X, sizeof (ctx->X));
+ memcpy(ctx->X, SKEIN1024_IV_512, sizeof (ctx->X));
break;
case 384:
- bcopy(SKEIN1024_IV_384, ctx->X, sizeof (ctx->X));
+ memcpy(ctx->X, SKEIN1024_IV_384, sizeof (ctx->X));
break;
case 1024:
- bcopy(SKEIN1024_IV_1024, ctx->X, sizeof (ctx->X));
+ memcpy(ctx->X, SKEIN1024_IV_1024, sizeof (ctx->X));
break;
#endif
default:
@@ -522,11 +520,11 @@ Skein1024_Init(Skein1024_Ctxt_t *ctx, size_t hashBitLen)
cfg.w[1] = Skein_Swap64(hashBitLen);
cfg.w[2] = Skein_Swap64(SKEIN_CFG_TREE_INFO_SEQUENTIAL);
/* zero pad config block */
- bzero(&cfg.w[3], sizeof (cfg) - 3 * sizeof (cfg.w[0]));
+ memset(&cfg.w[3], 0, sizeof (cfg) - 3 * sizeof (cfg.w[0]));
/* compute the initial chaining values from config block */
/* zero the chaining variables */
- bzero(ctx->X, sizeof (ctx->X));
+ memset(ctx->X, 0, sizeof (ctx->X));
Skein1024_Process_Block(ctx, cfg.b, 1, SKEIN_CFG_STR_LEN);
break;
}
@@ -561,7 +559,7 @@ Skein1024_InitExt(Skein1024_Ctxt_t *ctx, size_t hashBitLen, uint64_t treeInfo,
/* compute the initial chaining values ctx->X[], based on key */
if (keyBytes == 0) { /* is there a key? */
/* no key: use all zeroes as key for config block */
- bzero(ctx->X, sizeof (ctx->X));
+ memset(ctx->X, 0, sizeof (ctx->X));
} else { /* here to pre-process a key */
Skein_assert(sizeof (cfg.b) >= sizeof (ctx->X));
/* do a mini-Init right here */
@@ -570,12 +568,12 @@ Skein1024_InitExt(Skein1024_Ctxt_t *ctx, size_t hashBitLen, uint64_t treeInfo,
/* set tweaks: T0 = 0; T1 = KEY type */
Skein_Start_New_Type(ctx, KEY);
/* zero the initial chaining variables */
- bzero(ctx->X, sizeof (ctx->X));
+ memset(ctx->X, 0, sizeof (ctx->X));
(void) Skein1024_Update(ctx, key, keyBytes); /* hash the key */
/* put result into cfg.b[] */
(void) Skein1024_Final_Pad(ctx, cfg.b);
/* copy over into ctx->X[] */
- bcopy(cfg.b, ctx->X, sizeof (cfg.b));
+ memcpy(ctx->X, cfg.b, sizeof (cfg.b));
#if SKEIN_NEED_SWAP
{
uint_t i;
@@ -592,7 +590,7 @@ Skein1024_InitExt(Skein1024_Ctxt_t *ctx, size_t hashBitLen, uint64_t treeInfo,
ctx->h.hashBitLen = hashBitLen; /* output hash bit count */
Skein_Start_New_Type(ctx, CFG_FINAL);
- bzero(&cfg.w, sizeof (cfg.w)); /* pre-pad cfg.w[] with zeroes */
+ memset(&cfg.w, 0, sizeof (cfg.w)); /* pre-pad cfg.w[] with zeroes */
cfg.w[0] = Skein_Swap64(SKEIN_SCHEMA_VER);
/* hash result length in bits */
cfg.w[1] = Skein_Swap64(hashBitLen);
@@ -630,7 +628,7 @@ Skein1024_Update(Skein1024_Ctxt_t *ctx, const uint8_t *msg, size_t msgByteCnt)
if (n) {
/* check on our logic here */
Skein_assert(n < msgByteCnt);
- bcopy(msg, &ctx->b[ctx->h.bCnt], n);
+ memcpy(&ctx->b[ctx->h.bCnt], msg, n);
msgByteCnt -= n;
msg += n;
ctx->h.bCnt += n;
@@ -658,7 +656,7 @@ Skein1024_Update(Skein1024_Ctxt_t *ctx, const uint8_t *msg, size_t msgByteCnt)
/* copy any remaining source message data bytes into b[] */
if (msgByteCnt) {
Skein_assert(msgByteCnt + ctx->h.bCnt <= SKEIN1024_BLOCK_BYTES);
- bcopy(msg, &ctx->b[ctx->h.bCnt], msgByteCnt);
+ memcpy(&ctx->b[ctx->h.bCnt], msg, msgByteCnt);
ctx->h.bCnt += msgByteCnt;
}
@@ -678,7 +676,7 @@ Skein1024_Final(Skein1024_Ctxt_t *ctx, uint8_t *hashVal)
ctx->h.T[1] |= SKEIN_T1_FLAG_FINAL; /* tag as the final block */
/* zero pad b[] if necessary */
if (ctx->h.bCnt < SKEIN1024_BLOCK_BYTES)
- bzero(&ctx->b[ctx->h.bCnt],
+ memset(&ctx->b[ctx->h.bCnt], 0,
SKEIN1024_BLOCK_BYTES - ctx->h.bCnt);
/* process the final block */
@@ -690,13 +688,12 @@ Skein1024_Final(Skein1024_Ctxt_t *ctx, uint8_t *hashVal)
/* run Threefish in "counter mode" to generate output */
/* zero out b[], so it can hold the counter */
- bzero(ctx->b, sizeof (ctx->b));
+ memset(ctx->b, 0, sizeof (ctx->b));
/* keep a local copy of counter mode "key" */
- bcopy(ctx->X, X, sizeof (X));
+ memcpy(X, ctx->X, sizeof (X));
for (i = 0; i * SKEIN1024_BLOCK_BYTES < byteCnt; i++) {
/* build the counter block */
- uint64_t tmp = Skein_Swap64((uint64_t)i);
- bcopy(&tmp, ctx->b, sizeof (tmp));
+ *(uint64_t *)ctx->b = Skein_Swap64((uint64_t)i);
Skein_Start_New_Type(ctx, OUT_FINAL);
/* run "counter mode" */
Skein1024_Process_Block(ctx, ctx->b, 1, sizeof (uint64_t));
@@ -709,7 +706,7 @@ Skein1024_Final(Skein1024_Ctxt_t *ctx, uint8_t *hashVal)
Skein_Show_Final(1024, &ctx->h, n,
hashVal + i * SKEIN1024_BLOCK_BYTES);
/* restore the counter mode key for next time */
- bcopy(X, ctx->X, sizeof (X));
+ memcpy(ctx->X, X, sizeof (X));
}
return (SKEIN_SUCCESS);
}
@@ -727,7 +724,7 @@ Skein_256_Final_Pad(Skein_256_Ctxt_t *ctx, uint8_t *hashVal)
ctx->h.T[1] |= SKEIN_T1_FLAG_FINAL; /* tag as the final block */
/* zero pad b[] if necessary */
if (ctx->h.bCnt < SKEIN_256_BLOCK_BYTES)
- bzero(&ctx->b[ctx->h.bCnt],
+ memset(&ctx->b[ctx->h.bCnt], 0,
SKEIN_256_BLOCK_BYTES - ctx->h.bCnt);
/* process the final block */
Skein_256_Process_Block(ctx, ctx->b, 1, ctx->h.bCnt);
@@ -748,7 +745,7 @@ Skein_512_Final_Pad(Skein_512_Ctxt_t *ctx, uint8_t *hashVal)
ctx->h.T[1] |= SKEIN_T1_FLAG_FINAL; /* tag as the final block */
/* zero pad b[] if necessary */
if (ctx->h.bCnt < SKEIN_512_BLOCK_BYTES)
- bzero(&ctx->b[ctx->h.bCnt],
+ memset(&ctx->b[ctx->h.bCnt], 0,
SKEIN_512_BLOCK_BYTES - ctx->h.bCnt);
/* process the final block */
Skein_512_Process_Block(ctx, ctx->b, 1, ctx->h.bCnt);
@@ -770,7 +767,7 @@ Skein1024_Final_Pad(Skein1024_Ctxt_t *ctx, uint8_t *hashVal)
ctx->h.T[1] |= SKEIN_T1_FLAG_FINAL;
/* zero pad b[] if necessary */
if (ctx->h.bCnt < SKEIN1024_BLOCK_BYTES)
- bzero(&ctx->b[ctx->h.bCnt],
+ memset(&ctx->b[ctx->h.bCnt], 0,
SKEIN1024_BLOCK_BYTES - ctx->h.bCnt);
/* process the final block */
Skein1024_Process_Block(ctx, ctx->b, 1, ctx->h.bCnt);
@@ -798,13 +795,12 @@ Skein_256_Output(Skein_256_Ctxt_t *ctx, uint8_t *hashVal)
/* run Threefish in "counter mode" to generate output */
/* zero out b[], so it can hold the counter */
- bzero(ctx->b, sizeof (ctx->b));
+ memset(ctx->b, 0, sizeof (ctx->b));
/* keep a local copy of counter mode "key" */
- bcopy(ctx->X, X, sizeof (X));
+ memcpy(X, ctx->X, sizeof (X));
for (i = 0; i * SKEIN_256_BLOCK_BYTES < byteCnt; i++) {
/* build the counter block */
- uint64_t tmp = Skein_Swap64((uint64_t)i);
- bcopy(&tmp, ctx->b, sizeof (tmp));
+ *(uint64_t *)ctx->b = Skein_Swap64((uint64_t)i);
Skein_Start_New_Type(ctx, OUT_FINAL);
/* run "counter mode" */
Skein_256_Process_Block(ctx, ctx->b, 1, sizeof (uint64_t));
@@ -817,7 +813,7 @@ Skein_256_Output(Skein_256_Ctxt_t *ctx, uint8_t *hashVal)
Skein_Show_Final(256, &ctx->h, n,
hashVal + i * SKEIN_256_BLOCK_BYTES);
/* restore the counter mode key for next time */
- bcopy(X, ctx->X, sizeof (X));
+ memcpy(ctx->X, X, sizeof (X));
}
return (SKEIN_SUCCESS);
}
@@ -838,13 +834,12 @@ Skein_512_Output(Skein_512_Ctxt_t *ctx, uint8_t *hashVal)
/* run Threefish in "counter mode" to generate output */
/* zero out b[], so it can hold the counter */
- bzero(ctx->b, sizeof (ctx->b));
+ memset(ctx->b, 0, sizeof (ctx->b));
/* keep a local copy of counter mode "key" */
- bcopy(ctx->X, X, sizeof (X));
+ memcpy(X, ctx->X, sizeof (X));
for (i = 0; i * SKEIN_512_BLOCK_BYTES < byteCnt; i++) {
/* build the counter block */
- uint64_t tmp = Skein_Swap64((uint64_t)i);
- bcopy(&tmp, ctx->b, sizeof (tmp));
+ *(uint64_t *)ctx->b = Skein_Swap64((uint64_t)i);
Skein_Start_New_Type(ctx, OUT_FINAL);
/* run "counter mode" */
Skein_512_Process_Block(ctx, ctx->b, 1, sizeof (uint64_t));
@@ -857,7 +852,7 @@ Skein_512_Output(Skein_512_Ctxt_t *ctx, uint8_t *hashVal)
Skein_Show_Final(256, &ctx->h, n,
hashVal + i * SKEIN_512_BLOCK_BYTES);
/* restore the counter mode key for next time */
- bcopy(X, ctx->X, sizeof (X));
+ memcpy(ctx->X, X, sizeof (X));
}
return (SKEIN_SUCCESS);
}
@@ -878,13 +873,12 @@ Skein1024_Output(Skein1024_Ctxt_t *ctx, uint8_t *hashVal)
/* run Threefish in "counter mode" to generate output */
/* zero out b[], so it can hold the counter */
- bzero(ctx->b, sizeof (ctx->b));
+ memset(ctx->b, 0, sizeof (ctx->b));
/* keep a local copy of counter mode "key" */
- bcopy(ctx->X, X, sizeof (X));
+ memcpy(X, ctx->X, sizeof (X));
for (i = 0; i * SKEIN1024_BLOCK_BYTES < byteCnt; i++) {
/* build the counter block */
- uint64_t tmp = Skein_Swap64((uint64_t)i);
- bcopy(&tmp, ctx->b, sizeof (tmp));
+ *(uint64_t *)ctx->b = Skein_Swap64((uint64_t)i);
Skein_Start_New_Type(ctx, OUT_FINAL);
/* run "counter mode" */
Skein1024_Process_Block(ctx, ctx->b, 1, sizeof (uint64_t));
@@ -897,7 +891,7 @@ Skein1024_Output(Skein1024_Ctxt_t *ctx, uint8_t *hashVal)
Skein_Show_Final(256, &ctx->h, n,
hashVal + i * SKEIN1024_BLOCK_BYTES);
/* restore the counter mode key for next time */
- bcopy(X, ctx->X, sizeof (X));
+ memcpy(ctx->X, X, sizeof (X));
}
return (SKEIN_SUCCESS);
}
diff --git a/module/icp/algs/skein/skein_port.h b/module/icp/algs/skein/skein_port.h
index ce4353082552..96d1266d019e 100644
--- a/module/icp/algs/skein/skein_port.h
+++ b/module/icp/algs/skein/skein_port.h
@@ -50,9 +50,9 @@
#else
/* here for x86 and x86-64 CPUs (and other detected little-endian CPUs) */
#define SKEIN_NEED_SWAP (0)
-#define Skein_Put64_LSB_First(dst08, src64, bCnt) bcopy(src64, dst08, bCnt)
+#define Skein_Put64_LSB_First(dst08, src64, bCnt) memcpy(dst08, src64, bCnt)
#define Skein_Get64_LSB_First(dst64, src08, wCnt) \
- bcopy(src08, dst64, 8 * (wCnt))
+ memcpy(dst64, src08, 8 * (wCnt))
#endif
#endif /* ifndef SKEIN_NEED_SWAP */
diff --git a/module/icp/api/kcf_ctxops.c b/module/icp/api/kcf_ctxops.c
index 67bf76a8f1fc..25ed94fe8bdf 100644
--- a/module/icp/api/kcf_ctxops.c
+++ b/module/icp/api/kcf_ctxops.c
@@ -138,7 +138,7 @@ crypto_destroy_ctx_template(crypto_ctx_template_t tmpl)
ASSERT(ctx_tmpl->ct_prov_tmpl != NULL);
- bzero(ctx_tmpl->ct_prov_tmpl, ctx_tmpl->ct_size);
+ memset(ctx_tmpl->ct_prov_tmpl, 0, ctx_tmpl->ct_size);
kmem_free(ctx_tmpl->ct_prov_tmpl, ctx_tmpl->ct_size);
kmem_free(ctx_tmpl, sizeof (kcf_ctx_template_t));
}
diff --git a/module/icp/core/kcf_mech_tabs.c b/module/icp/core/kcf_mech_tabs.c
index 347190aa7f8b..ec43d53dc3ff 100644
--- a/module/icp/core/kcf_mech_tabs.c
+++ b/module/icp/core/kcf_mech_tabs.c
@@ -250,7 +250,8 @@ kcf_add_mech_provider(short mech_indx,
/* allocate and initialize new kcf_prov_mech_desc */
prov_mech = kmem_zalloc(sizeof (kcf_prov_mech_desc_t), KM_SLEEP);
- bcopy(mech_info, &prov_mech->pm_mech_info, sizeof (crypto_mech_info_t));
+ memcpy(&prov_mech->pm_mech_info, mech_info,
+ sizeof (crypto_mech_info_t));
prov_mech->pm_prov_desc = prov_desc;
prov_desc->pd_mech_indx[KCF_MECH2CLASS(kcf_mech_type)]
[KCF_MECH2INDEX(kcf_mech_type)] = mech_indx;
diff --git a/module/icp/core/kcf_prov_lib.c b/module/icp/core/kcf_prov_lib.c
index 505dbec313de..4bc99a8eb84c 100644
--- a/module/icp/core/kcf_prov_lib.c
+++ b/module/icp/core/kcf_prov_lib.c
@@ -70,7 +70,7 @@ crypto_uio_copy_to_data(crypto_data_t *data, uchar_t *buf, int len)
offset, length);
datap = (uchar_t *)(zfs_uio_iovbase(uiop, vec_idx) + offset);
- bcopy(buf, datap, cur_len);
+ memcpy(datap, buf, cur_len);
buf += cur_len;
length -= cur_len;
@@ -99,8 +99,8 @@ crypto_put_output_data(uchar_t *buf, crypto_data_t *output, int len)
output->cd_length = len;
return (CRYPTO_BUFFER_TOO_SMALL);
}
- bcopy(buf, (uchar_t *)(output->cd_raw.iov_base +
- output->cd_offset), len);
+ memcpy((uchar_t *)(output->cd_raw.iov_base +
+ output->cd_offset), buf, len);
break;
case CRYPTO_DATA_UIO:
diff --git a/module/icp/io/aes.c b/module/icp/io/aes.c
index b0f51262dd07..945d560ebe57 100644
--- a/module/icp/io/aes.c
+++ b/module/icp/io/aes.c
@@ -832,7 +832,7 @@ aes_encrypt_atomic(crypto_mechanism_t *mechanism,
crypto_key_t *key, crypto_data_t *plaintext, crypto_data_t *ciphertext,
crypto_spi_ctx_template_t template)
{
- aes_ctx_t aes_ctx; /* on the stack */
+ aes_ctx_t aes_ctx = {{{{0}}}};
off_t saved_offset;
size_t saved_length;
size_t length_needed;
@@ -858,8 +858,6 @@ aes_encrypt_atomic(crypto_mechanism_t *mechanism,
if ((ret = aes_check_mech_param(mechanism, NULL)) != CRYPTO_SUCCESS)
return (ret);
- bzero(&aes_ctx, sizeof (aes_ctx_t));
-
ret = aes_common_init_ctx(&aes_ctx, template, mechanism, key,
KM_SLEEP, B_TRUE);
if (ret != CRYPTO_SUCCESS)
@@ -944,7 +942,7 @@ aes_encrypt_atomic(crypto_mechanism_t *mechanism,
out:
if (aes_ctx.ac_flags & PROVIDER_OWNS_KEY_SCHEDULE) {
- bzero(aes_ctx.ac_keysched, aes_ctx.ac_keysched_len);
+ memset(aes_ctx.ac_keysched, 0, aes_ctx.ac_keysched_len);
kmem_free(aes_ctx.ac_keysched, aes_ctx.ac_keysched_len);
}
#ifdef CAN_USE_GCM_ASM
@@ -953,7 +951,7 @@ out:
gcm_ctx_t *ctx = (gcm_ctx_t *)&aes_ctx;
- bzero(ctx->gcm_Htable, ctx->gcm_htab_len);
+ memset(ctx->gcm_Htable, 0, ctx->gcm_htab_len);
kmem_free(ctx->gcm_Htable, ctx->gcm_htab_len);
}
#endif
@@ -966,7 +964,7 @@ aes_decrypt_atomic(crypto_mechanism_t *mechanism,
crypto_key_t *key, crypto_data_t *ciphertext, crypto_data_t *plaintext,
crypto_spi_ctx_template_t template)
{
- aes_ctx_t aes_ctx; /* on the stack */
+ aes_ctx_t aes_ctx = {{{{0}}}};
off_t saved_offset;
size_t saved_length;
size_t length_needed;
@@ -992,8 +990,6 @@ aes_decrypt_atomic(crypto_mechanism_t *mechanism,
if ((ret = aes_check_mech_param(mechanism, NULL)) != CRYPTO_SUCCESS)
return (ret);
- bzero(&aes_ctx, sizeof (aes_ctx_t));
-
ret = aes_common_init_ctx(&aes_ctx, template, mechanism, key,
KM_SLEEP, B_FALSE);
if (ret != CRYPTO_SUCCESS)
@@ -1096,7 +1092,7 @@ aes_decrypt_atomic(crypto_mechanism_t *mechanism,
out:
if (aes_ctx.ac_flags & PROVIDER_OWNS_KEY_SCHEDULE) {
- bzero(aes_ctx.ac_keysched, aes_ctx.ac_keysched_len);
+ memset(aes_ctx.ac_keysched, 0, aes_ctx.ac_keysched_len);
kmem_free(aes_ctx.ac_keysched, aes_ctx.ac_keysched_len);
}
@@ -1113,7 +1109,7 @@ out:
if (((gcm_ctx_t *)&aes_ctx)->gcm_Htable != NULL) {
gcm_ctx_t *ctx = (gcm_ctx_t *)&aes_ctx;
- bzero(ctx->gcm_Htable, ctx->gcm_htab_len);
+ memset(ctx->gcm_Htable, 0, ctx->gcm_htab_len);
kmem_free(ctx->gcm_Htable, ctx->gcm_htab_len);
}
#endif
@@ -1150,7 +1146,7 @@ aes_create_ctx_template(crypto_mechanism_t *mechanism, crypto_key_t *key,
* in the key.
*/
if ((rv = init_keysched(key, keysched)) != CRYPTO_SUCCESS) {
- bzero(keysched, size);
+ memset(keysched, 0, size);
kmem_free(keysched, size);
return (rv);
}
@@ -1170,7 +1166,8 @@ aes_free_context(crypto_ctx_t *ctx)
if (aes_ctx != NULL) {
if (aes_ctx->ac_flags & PROVIDER_OWNS_KEY_SCHEDULE) {
ASSERT(aes_ctx->ac_keysched_len != 0);
- bzero(aes_ctx->ac_keysched, aes_ctx->ac_keysched_len);
+ memset(aes_ctx->ac_keysched, 0,
+ aes_ctx->ac_keysched_len);
kmem_free(aes_ctx->ac_keysched,
aes_ctx->ac_keysched_len);
}
@@ -1260,7 +1257,7 @@ aes_common_init_ctx(aes_ctx_t *aes_ctx, crypto_spi_ctx_template_t *template,
if (rv != CRYPTO_SUCCESS) {
if (aes_ctx->ac_flags & PROVIDER_OWNS_KEY_SCHEDULE) {
- bzero(keysched, size);
+ memset(keysched, 0, size);
kmem_free(keysched, size);
}
}
diff --git a/module/icp/io/sha2_mod.c b/module/icp/io/sha2_mod.c
index c586c3272647..4a218b500325 100644
--- a/module/icp/io/sha2_mod.c
+++ b/module/icp/io/sha2_mod.c
@@ -46,7 +46,7 @@
(len) = (uint32_t)*((ulong_t *)(m)->cm_param); \
else { \
ulong_t tmp_ulong; \
- bcopy((m)->cm_param, &tmp_ulong, sizeof (ulong_t)); \
+ memcpy(&tmp_ulong, (m)->cm_param, sizeof (ulong_t)); \
(len) = (uint32_t)tmp_ulong; \
} \
}
@@ -309,9 +309,9 @@ sha2_digest_final_uio(SHA2_CTX *sha2_ctx, crypto_data_t *digest,
*/
SHA2Final(digest_scratch, sha2_ctx);
- bcopy(digest_scratch, (uchar_t *)
+ memcpy((uchar_t *)
zfs_uio_iovbase(digest->cd_uio, vec_idx) + offset,
- digest_len);
+ digest_scratch, digest_len);
} else {
SHA2Final((uchar_t *)zfs_uio_iovbase(digest->
cd_uio, vec_idx) + offset,
@@ -336,8 +336,9 @@ sha2_digest_final_uio(SHA2_CTX *sha2_ctx, crypto_data_t *digest,
cur_len =
MIN(zfs_uio_iovlen(digest->cd_uio, vec_idx) -
offset, length);
- bcopy(digest_tmp + scratch_offset,
+ memcpy(
zfs_uio_iovbase(digest->cd_uio, vec_idx) + offset,
+ digest_tmp + scratch_offset,
cur_len);
length -= cur_len;
@@ -630,8 +631,8 @@ sha2_digest_atomic(crypto_mechanism_t *mechanism, crypto_data_t *data,
static void
sha2_mac_init_ctx(sha2_hmac_ctx_t *ctx, void *keyval, uint_t length_in_bytes)
{
- uint64_t ipad[SHA512_HMAC_BLOCK_SIZE / sizeof (uint64_t)];
- uint64_t opad[SHA512_HMAC_BLOCK_SIZE / sizeof (uint64_t)];
+ uint64_t ipad[SHA512_HMAC_BLOCK_SIZE / sizeof (uint64_t)] = {0};
+ uint64_t opad[SHA512_HMAC_BLOCK_SIZE / sizeof (uint64_t)] = {0};
int i, block_size, blocks_per_int64;
/* Determine the block size */
@@ -643,12 +644,12 @@ sha2_mac_init_ctx(sha2_hmac_ctx_t *ctx, void *keyval, uint_t length_in_bytes)
blocks_per_int64 = SHA512_HMAC_BLOCK_SIZE / sizeof (uint64_t);
}
- (void) bzero(ipad, block_size);
- (void) bzero(opad, block_size);
+ (void) memset(ipad, 0, block_size);
+ (void) memset(opad, 0, block_size);
if (keyval != NULL) {
- (void) bcopy(keyval, ipad, length_in_bytes);
- (void) bcopy(keyval, opad, length_in_bytes);
+ (void) memcpy(ipad, keyval, length_in_bytes);
+ (void) memcpy(opad, keyval, length_in_bytes);
} else {
ASSERT0(length_in_bytes);
}
@@ -666,7 +667,6 @@ sha2_mac_init_ctx(sha2_hmac_ctx_t *ctx, void *keyval, uint_t length_in_bytes)
/* perform SHA2 on opad */
SHA2Init(ctx->hc_mech_type, &ctx->hc_ocontext);
SHA2Update(&ctx->hc_ocontext, (uint8_t *)opad, block_size);
-
}
/*
@@ -708,7 +708,7 @@ sha2_mac_init(crypto_ctx_t *ctx, crypto_mechanism_t *mechanism,
PROV_SHA2_HMAC_CTX(ctx)->hc_mech_type = mechanism->cm_type;
if (ctx_template != NULL) {
/* reuse context template */
- bcopy(ctx_template, PROV_SHA2_HMAC_CTX(ctx),
+ memcpy(PROV_SHA2_HMAC_CTX(ctx), ctx_template,
sizeof (sha2_hmac_ctx_t));
} else {
/* no context template, compute context */
@@ -746,7 +746,7 @@ sha2_mac_init(crypto_ctx_t *ctx, crypto_mechanism_t *mechanism,
}
if (ret != CRYPTO_SUCCESS) {
- bzero(ctx->cc_provider_private, sizeof (sha2_hmac_ctx_t));
+ memset(ctx->cc_provider_private, 0, sizeof (sha2_hmac_ctx_t));
kmem_free(ctx->cc_provider_private, sizeof (sha2_hmac_ctx_t));
ctx->cc_provider_private = NULL;
}
@@ -850,8 +850,8 @@ sha2_mac_final(crypto_ctx_t *ctx, crypto_data_t *mac)
*/
SHA2Final(digest,
&PROV_SHA2_HMAC_CTX(ctx)->hc_ocontext);
- bcopy(digest, (unsigned char *)mac->cd_raw.iov_base +
- mac->cd_offset, digest_len);
+ memcpy((unsigned char *)mac->cd_raw.iov_base +
+ mac->cd_offset, digest, digest_len);
} else {
SHA2Final((unsigned char *)mac->cd_raw.iov_base +
mac->cd_offset,
@@ -872,7 +872,7 @@ sha2_mac_final(crypto_ctx_t *ctx, crypto_data_t *mac)
else
mac->cd_length = 0;
- bzero(ctx->cc_provider_private, sizeof (sha2_hmac_ctx_t));
+ memset(ctx->cc_provider_private, 0, sizeof (sha2_hmac_ctx_t));
kmem_free(ctx->cc_provider_private, sizeof (sha2_hmac_ctx_t));
ctx->cc_provider_private = NULL;
@@ -928,7 +928,7 @@ sha2_mac_atomic(crypto_mechanism_t *mechanism,
if (ctx_template != NULL) {
/* reuse context template */
- bcopy(ctx_template, &sha2_hmac_ctx, sizeof (sha2_hmac_ctx_t));
+ memcpy(&sha2_hmac_ctx, ctx_template, sizeof (sha2_hmac_ctx_t));
} else {
sha2_hmac_ctx.hc_mech_type = mechanism->cm_type;
/* no context template, initialize context */
@@ -1001,8 +1001,8 @@ sha2_mac_atomic(crypto_mechanism_t *mechanism,
* the user only what was requested.
*/
SHA2Final(digest, &sha2_hmac_ctx.hc_ocontext);
- bcopy(digest, (unsigned char *)mac->cd_raw.iov_base +
- mac->cd_offset, digest_len);
+ memcpy((unsigned char *)mac->cd_raw.iov_base +
+ mac->cd_offset, digest, digest_len);
} else {
SHA2Final((unsigned char *)mac->cd_raw.iov_base +
mac->cd_offset, &sha2_hmac_ctx.hc_ocontext);
@@ -1021,7 +1021,7 @@ sha2_mac_atomic(crypto_mechanism_t *mechanism,
return (CRYPTO_SUCCESS);
}
bail:
- bzero(&sha2_hmac_ctx, sizeof (sha2_hmac_ctx_t));
+ memset(&sha2_hmac_ctx, 0, sizeof (sha2_hmac_ctx_t));
mac->cd_length = 0;
return (ret);
}
@@ -1060,7 +1060,7 @@ sha2_mac_verify_atomic(crypto_mechanism_t *mechanism,
if (ctx_template != NULL) {
/* reuse context template */
- bcopy(ctx_template, &sha2_hmac_ctx, sizeof (sha2_hmac_ctx_t));
+ memcpy(&sha2_hmac_ctx, ctx_template, sizeof (sha2_hmac_ctx_t));
} else {
sha2_hmac_ctx.hc_mech_type = mechanism->cm_type;
/* no context template, initialize context */
@@ -1137,7 +1137,7 @@ sha2_mac_verify_atomic(crypto_mechanism_t *mechanism,
switch (mac->cd_format) {
case CRYPTO_DATA_RAW:
- if (bcmp(digest, (unsigned char *)mac->cd_raw.iov_base +
+ if (memcmp(digest, (unsigned char *)mac->cd_raw.iov_base +
mac->cd_offset, digest_len) != 0)
ret = CRYPTO_INVALID_MAC;
break;
@@ -1170,7 +1170,7 @@ sha2_mac_verify_atomic(crypto_mechanism_t *mechanism,
cur_len = MIN(zfs_uio_iovlen(mac->cd_uio, vec_idx) -
offset, length);
- if (bcmp(digest + scratch_offset,
+ if (memcmp(digest + scratch_offset,
zfs_uio_iovbase(mac->cd_uio, vec_idx) + offset,
cur_len) != 0) {
ret = CRYPTO_INVALID_MAC;
@@ -1191,7 +1191,7 @@ sha2_mac_verify_atomic(crypto_mechanism_t *mechanism,
return (ret);
bail:
- bzero(&sha2_hmac_ctx, sizeof (sha2_hmac_ctx_t));
+ memset(&sha2_hmac_ctx, 0, sizeof (sha2_hmac_ctx_t));
mac->cd_length = 0;
return (ret);
}
@@ -1282,7 +1282,7 @@ sha2_free_context(crypto_ctx_t *ctx)
else
ctx_len = sizeof (sha2_hmac_ctx_t);
- bzero(ctx->cc_provider_private, ctx_len);
+ memset(ctx->cc_provider_private, 0, ctx_len);
kmem_free(ctx->cc_provider_private, ctx_len);
ctx->cc_provider_private = NULL;
diff --git a/module/icp/io/skein_mod.c b/module/icp/io/skein_mod.c
index 1d6969e68862..a2ed6cedd8c6 100644
--- a/module/icp/io/skein_mod.c
+++ b/module/icp/io/skein_mod.c
@@ -292,8 +292,8 @@ skein_digest_final_uio(skein_ctx_t *ctx, crypto_data_t *digest)
while (vec_idx < zfs_uio_iovcnt(uio) && length > 0) {
cur_len = MIN(zfs_uio_iovlen(uio, vec_idx) - offset,
length);
- bcopy(digest_tmp + scratch_offset,
- zfs_uio_iovbase(uio, vec_idx) + offset, cur_len);
+ memcpy(zfs_uio_iovbase(uio, vec_idx) + offset,
+ digest_tmp + scratch_offset, cur_len);
length -= cur_len;
vec_idx++;
@@ -349,7 +349,7 @@ skein_digest_init(crypto_ctx_t *ctx, crypto_mechanism_t *mechanism)
return (CRYPTO_SUCCESS);
errout:
- bzero(SKEIN_CTX(ctx), sizeof (*SKEIN_CTX(ctx)));
+ memset(SKEIN_CTX(ctx), 0, sizeof (*SKEIN_CTX(ctx)));
kmem_free(SKEIN_CTX(ctx), sizeof (*SKEIN_CTX(ctx)));
SKEIN_CTX_LVALUE(ctx) = NULL;
return (error);
@@ -376,7 +376,7 @@ skein_digest(crypto_ctx_t *ctx, crypto_data_t *data, crypto_data_t *digest)
error = skein_update(ctx, data);
if (error != CRYPTO_SUCCESS) {
- bzero(SKEIN_CTX(ctx), sizeof (*SKEIN_CTX(ctx)));
+ memset(SKEIN_CTX(ctx), 0, sizeof (*SKEIN_CTX(ctx)));
kmem_free(SKEIN_CTX(ctx), sizeof (*SKEIN_CTX(ctx)));
SKEIN_CTX_LVALUE(ctx) = NULL;
digest->cd_length = 0;
@@ -452,7 +452,7 @@ skein_final(crypto_ctx_t *ctx, crypto_data_t *digest)
else
digest->cd_length = 0;
- bzero(SKEIN_CTX(ctx), sizeof (*SKEIN_CTX(ctx)));
+ memset(SKEIN_CTX(ctx), 0, sizeof (*SKEIN_CTX(ctx)));
kmem_free(SKEIN_CTX(ctx), sizeof (*(SKEIN_CTX(ctx))));
SKEIN_CTX_LVALUE(ctx) = NULL;
@@ -494,7 +494,7 @@ out:
CRYPTO_BITS2BYTES(skein_ctx.sc_digest_bitlen);
else
digest->cd_length = 0;
- bzero(&skein_ctx, sizeof (skein_ctx));
+ memset(&skein_ctx, 0, sizeof (skein_ctx));
return (error);
}
@@ -543,7 +543,7 @@ skein_mac_init(crypto_ctx_t *ctx, crypto_mechanism_t *mechanism,
return (CRYPTO_HOST_MEMORY);
if (ctx_template != NULL) {
- bcopy(ctx_template, SKEIN_CTX(ctx),
+ memcpy(SKEIN_CTX(ctx), ctx_template,
sizeof (*SKEIN_CTX(ctx)));
} else {
error = skein_mac_ctx_build(SKEIN_CTX(ctx), mechanism, key);
@@ -553,7 +553,7 @@ skein_mac_init(crypto_ctx_t *ctx, crypto_mechanism_t *mechanism,
return (CRYPTO_SUCCESS);
errout:
- bzero(SKEIN_CTX(ctx), sizeof (*SKEIN_CTX(ctx)));
+ memset(SKEIN_CTX(ctx), 0, sizeof (*SKEIN_CTX(ctx)));
kmem_free(SKEIN_CTX(ctx), sizeof (*SKEIN_CTX(ctx)));
return (error);
}
@@ -573,13 +573,13 @@ skein_mac_atomic(crypto_mechanism_t *mechanism,
crypto_spi_ctx_template_t ctx_template)
{
/* faux crypto context just for skein_digest_{update,final} */
- int error;
+ int error;
crypto_ctx_t ctx;
skein_ctx_t skein_ctx;
SKEIN_CTX_LVALUE(&ctx) = &skein_ctx;
if (ctx_template != NULL) {
- bcopy(ctx_template, &skein_ctx, sizeof (skein_ctx));
+ memcpy(&skein_ctx, ctx_template, sizeof (skein_ctx));
} else {
error = skein_mac_ctx_build(&skein_ctx, mechanism, key);
if (error != CRYPTO_SUCCESS)
@@ -593,7 +593,7 @@ skein_mac_atomic(crypto_mechanism_t *mechanism,
return (CRYPTO_SUCCESS);
errout:
- bzero(&skein_ctx, sizeof (skein_ctx));
+ memset(&skein_ctx, 0, sizeof (skein_ctx));
return (error);
}
@@ -624,7 +624,7 @@ skein_create_ctx_template(crypto_mechanism_t *mechanism, crypto_key_t *key,
return (CRYPTO_SUCCESS);
errout:
- bzero(ctx_tmpl, sizeof (*ctx_tmpl));
+ memset(ctx_tmpl, 0, sizeof (*ctx_tmpl));
kmem_free(ctx_tmpl, sizeof (*ctx_tmpl));
return (error);
}
@@ -636,7 +636,7 @@ static int
skein_free_context(crypto_ctx_t *ctx)
{
if (SKEIN_CTX(ctx) != NULL) {
- bzero(SKEIN_CTX(ctx), sizeof (*SKEIN_CTX(ctx)));
+ memset(SKEIN_CTX(ctx), 0, sizeof (*SKEIN_CTX(ctx)));
kmem_free(SKEIN_CTX(ctx), sizeof (*SKEIN_CTX(ctx)));
SKEIN_CTX_LVALUE(ctx) = NULL;
}
diff --git a/module/nvpair/nvpair.c b/module/nvpair/nvpair.c
index b4463dd7308f..53b4f7e5c644 100644
--- a/module/nvpair/nvpair.c
+++ b/module/nvpair/nvpair.c
@@ -203,7 +203,7 @@ nv_mem_zalloc(nvpriv_t *nvp, size_t size)
void *buf;
if ((buf = nva->nva_ops->nv_ao_alloc(nva, size)) != NULL)
- bzero(buf, size);
+ memset(buf, 0, size);
return (buf);
}
@@ -219,7 +219,7 @@ nv_mem_free(nvpriv_t *nvp, void *buf, size_t size)
static void
nv_priv_init(nvpriv_t *priv, nv_alloc_t *nva, uint32_t stat)
{
- bzero(priv, sizeof (nvpriv_t));
+ memset(priv, 0, sizeof (nvpriv_t));
priv->nvp_nva = nva;
priv->nvp_stat = stat;
@@ -1203,7 +1203,7 @@ nvlist_add_common(nvlist_t *nvl, const char *name,
nvp->nvp_name_sz = name_sz;
nvp->nvp_value_elem = nelem;
nvp->nvp_type = type;
- bcopy(name, NVP_NAME(nvp), name_sz);
+ memcpy(NVP_NAME(nvp), name, name_sz);
switch (type) {
case DATA_TYPE_BOOLEAN:
@@ -1217,7 +1217,7 @@ nvlist_add_common(nvlist_t *nvl, const char *name,
buf += nelem * sizeof (uint64_t);
for (i = 0; i < nelem; i++) {
int slen = strlen(strs[i]) + 1;
- bcopy(strs[i], buf, slen);
+ memcpy(buf, strs[i], slen);
cstrs[i] = buf;
buf += slen;
}
@@ -1255,7 +1255,7 @@ nvlist_add_common(nvlist_t *nvl, const char *name,
break;
}
default:
- bcopy(data, NVP_VALUE(nvp), value_sz);
+ memcpy(NVP_VALUE(nvp), data, value_sz);
}
/* if unique name, remove before add */
@@ -1588,7 +1588,7 @@ nvpair_value_common(const nvpair_t *nvp, data_type_t type, uint_t *nelem,
return (EINVAL);
if ((value_sz = i_get_value_size(type, NULL, 1)) < 0)
return (EINVAL);
- bcopy(NVP_VALUE(nvp), data, (size_t)value_sz);
+ memcpy(data, NVP_VALUE(nvp), (size_t)value_sz);
if (nelem != NULL)
*nelem = 1;
break;
@@ -2540,7 +2540,7 @@ nvs_embedded_nvl_array(nvstream_t *nvs, nvpair_t *nvp, size_t *size)
size_t len = nelem * sizeof (uint64_t);
nvlist_t *embedded = (nvlist_t *)((uintptr_t)nvlp + len);
- bzero(nvlp, len); /* don't trust packed data */
+ memset(nvlp, 0, len); /* don't trust packed data */
for (i = 0; i < nelem; i++) {
if (nvs_embedded(nvs, embedded) != 0) {
nvpair_free(nvp);
@@ -2820,15 +2820,15 @@ native_cp(nvstream_t *nvs, void *buf, size_t size)
return (EFAULT);
/*
- * The bcopy() below eliminates alignment requirement
+ * The memcpy() below eliminates alignment requirement
* on the buffer (stream) and is preferred over direct access.
*/
switch (nvs->nvs_op) {
case NVS_OP_ENCODE:
- bcopy(buf, native->n_curr, size);
+ memcpy(native->n_curr, buf, size);
break;
case NVS_OP_DECODE:
- bcopy(native->n_curr, buf, size);
+ memcpy(buf, native->n_curr, size);
break;
default:
return (EINVAL);
@@ -2895,7 +2895,7 @@ nvs_native_nvl_fini(nvstream_t *nvs)
if (native->n_curr + sizeof (int) > native->n_end)
return (EFAULT);
- bzero(native->n_curr, sizeof (int));
+ memset(native->n_curr, 0, sizeof (int));
native->n_curr += sizeof (int);
}
@@ -2912,10 +2912,10 @@ nvpair_native_embedded(nvstream_t *nvs, nvpair_t *nvp)
/*
* Null out the pointer that is meaningless in the packed
* structure. The address may not be aligned, so we have
- * to use bzero.
+ * to use memset.
*/
- bzero((char *)packed + offsetof(nvlist_t, nvl_priv),
- sizeof (uint64_t));
+ memset((char *)packed + offsetof(nvlist_t, nvl_priv),
+ 0, sizeof (uint64_t));
}
return (nvs_embedded(nvs, EMBEDDED_NVL(nvp)));
@@ -2933,18 +2933,18 @@ nvpair_native_embedded_array(nvstream_t *nvs, nvpair_t *nvp)
/*
* Null out pointers that are meaningless in the packed
* structure. The addresses may not be aligned, so we have
- * to use bzero.
+ * to use memset.
*/
- bzero(value, len);
+ memset(value, 0, len);
for (i = 0; i < NVP_NELEM(nvp); i++, packed++)
/*
* Null out the pointer that is meaningless in the
* packed structure. The address may not be aligned,
- * so we have to use bzero.
+ * so we have to use memset.
*/
- bzero((char *)packed + offsetof(nvlist_t, nvl_priv),
- sizeof (uint64_t));
+ memset((char *)packed + offsetof(nvlist_t, nvl_priv),
+ 0, sizeof (uint64_t));
}
return (nvs_embedded_nvl_array(nvs, nvp, NULL));
@@ -2961,9 +2961,9 @@ nvpair_native_string_array(nvstream_t *nvs, nvpair_t *nvp)
/*
* Null out pointers that are meaningless in the packed
* structure. The addresses may not be aligned, so we have
- * to use bzero.
+ * to use memset.
*/
- bzero(strp, NVP_NELEM(nvp) * sizeof (uint64_t));
+ memset(strp, 0, NVP_NELEM(nvp) * sizeof (uint64_t));
break;
}
case NVS_OP_DECODE: {
@@ -2988,9 +2988,9 @@ nvs_native_nvp_op(nvstream_t *nvs, nvpair_t *nvp)
int ret = 0;
/*
- * We do the initial bcopy of the data before we look at
+ * We do the initial memcpy of the data before we look at
* the nvpair type, because when we're decoding, we won't
- * have the correct values for the pair until we do the bcopy.
+ * have the correct values for the pair until we do the memcpy.
*/
switch (nvs->nvs_op) {
case NVS_OP_ENCODE:
@@ -3086,7 +3086,7 @@ nvs_native_nvpair(nvstream_t *nvs, nvpair_t *nvp, size_t *size)
/* try to read the size value from the stream */
if (native->n_curr + sizeof (int32_t) > native->n_end)
return (EFAULT);
- bcopy(native->n_curr, &decode_len, sizeof (int32_t));
+ memcpy(&decode_len, native->n_curr, sizeof (int32_t));
/* sanity check the size value */
if (decode_len < 0 ||
@@ -3451,7 +3451,7 @@ nvs_xdr_nvp_op(nvstream_t *nvs, nvpair_t *nvp)
int i;
if (nvs->nvs_op == NVS_OP_DECODE)
- bzero(buf, len); /* don't trust packed data */
+ memset(buf, 0, len); /* don't trust packed data */
for (i = 0; i < nelem; i++) {
if (buflen <= len)
diff --git a/module/os/freebsd/spl/acl_common.c b/module/os/freebsd/spl/acl_common.c
index 7fd0e36e1ba7..718d0de8b31a 100644
--- a/module/os/freebsd/spl/acl_common.c
+++ b/module/os/freebsd/spl/acl_common.c
@@ -738,7 +738,7 @@ out:
static void
acevals_init(acevals_t *vals, uid_t key)
{
- bzero(vals, sizeof (*vals));
+ memset(vals, 0, sizeof (*vals));
vals->allowed = ACE_MASK_UNDEFINED;
vals->denied = ACE_MASK_UNDEFINED;
vals->mask = ACE_MASK_UNDEFINED;
diff --git a/module/os/freebsd/spl/sha256c.c b/module/os/freebsd/spl/sha256c.c
index 241cf8c9ae76..52cf0df6c99d 100644
--- a/module/os/freebsd/spl/sha256c.c
+++ b/module/os/freebsd/spl/sha256c.c
@@ -301,7 +301,7 @@ SHA256_Final(unsigned char digest[static SHA256_DIGEST_LENGTH], SHA256_CTX *ctx)
be32enc_vect(digest, ctx->state, SHA256_DIGEST_LENGTH);
/* Clear the context state */
- explicit_bzero(ctx, sizeof (*ctx));
+ memset(ctx, 0, sizeof (*ctx));
}
/* SHA-224: ******************************************************* */
@@ -351,7 +351,7 @@ SHA224_Final(unsigned char digest[static SHA224_DIGEST_LENGTH], SHA224_CTX *ctx)
be32enc_vect(digest, ctx->state, SHA224_DIGEST_LENGTH);
/* Clear the context state */
- explicit_bzero(ctx, sizeof (*ctx));
+ memset(ctx, 0, sizeof (*ctx));
}
#ifdef WEAK_REFS
diff --git a/module/os/freebsd/spl/sha512c.c b/module/os/freebsd/spl/sha512c.c
index 146f338f0ed4..254cc21565c1 100644
--- a/module/os/freebsd/spl/sha512c.c
+++ b/module/os/freebsd/spl/sha512c.c
@@ -333,7 +333,7 @@ SHA512_Final(unsigned char digest[static SHA512_DIGEST_LENGTH], SHA512_CTX *ctx)
be64enc_vect(digest, ctx->state, SHA512_DIGEST_LENGTH);
/* Clear the context state */
- explicit_bzero(ctx, sizeof (*ctx));
+ memset(ctx, 0, sizeof (*ctx));
}
/* SHA-512t: ******************************************************** */
@@ -377,7 +377,7 @@ SHA512_224_Final(unsigned char digest[static SHA512_224_DIGEST_LENGTH],
be64enc_vect(digest, ctx->state, SHA512_224_DIGEST_LENGTH);
/* Clear the context state */
- explicit_bzero(ctx, sizeof (*ctx));
+ memset(ctx, 0, sizeof (*ctx));
}
void
@@ -417,7 +417,7 @@ SHA512_256_Final(unsigned char digest[static SHA512_256_DIGEST_LENGTH],
be64enc_vect(digest, ctx->state, SHA512_256_DIGEST_LENGTH);
/* Clear the context state */
- explicit_bzero(ctx, sizeof (*ctx));
+ memset(ctx, 0, sizeof (*ctx));
}
/* ** SHA-384: ******************************************************** */
@@ -467,7 +467,7 @@ SHA384_Final(unsigned char digest[static SHA384_DIGEST_LENGTH], SHA384_CTX *ctx)
be64enc_vect(digest, ctx->state, SHA384_DIGEST_LENGTH);
/* Clear the context state */
- explicit_bzero(ctx, sizeof (*ctx));
+ memset(ctx, 0, sizeof (*ctx));
}
#if 0
diff --git a/module/os/freebsd/spl/spl_acl.c b/module/os/freebsd/spl/spl_acl.c
index 74c26d03f87f..4d67cbb183ec 100644
--- a/module/os/freebsd/spl/spl_acl.c
+++ b/module/os/freebsd/spl/spl_acl.c
@@ -40,7 +40,7 @@ struct zfs2bsd {
int zb_bsd;
};
-struct zfs2bsd perms[] = {{ACE_READ_DATA, ACL_READ_DATA},
+static const struct zfs2bsd perms[] = {{ACE_READ_DATA, ACL_READ_DATA},
{ACE_WRITE_DATA, ACL_WRITE_DATA},
{ACE_EXECUTE, ACL_EXECUTE},
{ACE_APPEND_DATA, ACL_APPEND_DATA},
@@ -56,7 +56,7 @@ struct zfs2bsd perms[] = {{ACE_READ_DATA, ACL_READ_DATA},
{ACE_SYNCHRONIZE, ACL_SYNCHRONIZE},
{0, 0}};
-struct zfs2bsd flags[] = {{ACE_FILE_INHERIT_ACE,
+static const struct zfs2bsd flags[] = {{ACE_FILE_INHERIT_ACE,
ACL_ENTRY_FILE_INHERIT},
{ACE_DIRECTORY_INHERIT_ACE,
ACL_ENTRY_DIRECTORY_INHERIT},
@@ -122,7 +122,7 @@ acl_from_aces(struct acl *aclp, const ace_t *aces, int nentries)
return (EINVAL);
}
- bzero(aclp, sizeof (*aclp));
+ memset(aclp, 0, sizeof (*aclp));
aclp->acl_maxcnt = ACL_MAX_ENTRIES;
aclp->acl_cnt = nentries;
@@ -177,7 +177,7 @@ aces_from_acl(ace_t *aces, int *nentries, const struct acl *aclp)
const struct acl_entry *entry;
ace_t *ace;
- bzero(aces, sizeof (*aces) * aclp->acl_cnt);
+ memset(aces, 0, sizeof (*aces) * aclp->acl_cnt);
*nentries = aclp->acl_cnt;
diff --git a/module/os/freebsd/spl/spl_vfs.c b/module/os/freebsd/spl/spl_vfs.c
index 3f4feb140d5e..ff11f5d7acb8 100644
--- a/module/os/freebsd/spl/spl_vfs.c
+++ b/module/os/freebsd/spl/spl_vfs.c
@@ -85,7 +85,7 @@ vfs_setmntopt(vfs_t *vfsp, const char *name, const char *arg,
} else {
opt->len = strlen(arg) + 1;
opt->value = malloc(opt->len, M_MOUNT, M_WAITOK);
- bcopy(arg, opt->value, opt->len);
+ memcpy(opt->value, arg, opt->len);
}
MNT_ILOCK(vfsp);
diff --git a/module/os/freebsd/spl/spl_zlib.c b/module/os/freebsd/spl/spl_zlib.c
index fa2b0a2b6903..8bd3bdedf268 100644
--- a/module/os/freebsd/spl/spl_zlib.c
+++ b/module/os/freebsd/spl/spl_zlib.c
@@ -141,10 +141,9 @@ int
z_compress_level(void *dest, size_t *destLen, const void *source,
size_t sourceLen, int level)
{
- z_stream stream;
+ z_stream stream = {0};
int err;
- bzero(&stream, sizeof (stream));
stream.next_in = (Byte *)source;
stream.avail_in = (uInt)sourceLen;
stream.next_out = dest;
@@ -196,11 +195,9 @@ z_compress_level(void *dest, size_t *destLen, const void *source,
int
z_uncompress(void *dest, size_t *destLen, const void *source, size_t sourceLen)
{
- z_stream stream;
+ z_stream stream = {0};
int err;
- bzero(&stream, sizeof (stream));
-
stream.next_in = (Byte *)source;
stream.avail_in = (uInt)sourceLen;
stream.next_out = dest;
diff --git a/module/os/freebsd/spl/spl_zone.c b/module/os/freebsd/spl/spl_zone.c
index bd3f019b2fa6..658ef0bf056d 100644
--- a/module/os/freebsd/spl/spl_zone.c
+++ b/module/os/freebsd/spl/spl_zone.c
@@ -184,7 +184,7 @@ zone_dataset_visible(const char *dataset, int *write)
LIST_FOREACH(zd, head, zd_next) {
len = strlen(zd->zd_dataset);
if (strlen(dataset) >= len &&
- bcmp(dataset, zd->zd_dataset, len) == 0 &&
+ memcmp(dataset, zd->zd_dataset, len) == 0 &&
(dataset[len] == '\0' || dataset[len] == '/' ||
dataset[len] == '@')) {
if (write)
@@ -206,7 +206,7 @@ zone_dataset_visible(const char *dataset, int *write)
if (dataset[len - 1] == '/')
len--; /* Ignore trailing slash */
if (len < strlen(zd->zd_dataset) &&
- bcmp(dataset, zd->zd_dataset, len) == 0 &&
+ memcmp(dataset, zd->zd_dataset, len) == 0 &&
zd->zd_dataset[len] == '/') {
if (write)
*write = 0;
diff --git a/module/os/freebsd/zfs/abd_os.c b/module/os/freebsd/zfs/abd_os.c
index 722a8898cde8..5d2bae5cfeb4 100644
--- a/module/os/freebsd/zfs/abd_os.c
+++ b/module/os/freebsd/zfs/abd_os.c
@@ -250,7 +250,7 @@ abd_alloc_zero_scatter(void)
n = abd_chunkcnt_for_bytes(SPA_MAXBLOCKSIZE);
abd_zero_buf = kmem_cache_alloc(abd_chunk_cache, KM_PUSHPAGE);
- bzero(abd_zero_buf, PAGE_SIZE);
+ memset(abd_zero_buf, 0, PAGE_SIZE);
abd_zero_scatter = abd_alloc_struct(SPA_MAXBLOCKSIZE);
abd_zero_scatter->abd_flags |= ABD_FLAG_OWNER | ABD_FLAG_ZEROS;
diff --git a/module/os/freebsd/zfs/crypto_os.c b/module/os/freebsd/zfs/crypto_os.c
index 73083f59f532..ddb95b0d8d19 100644
--- a/module/os/freebsd/zfs/crypto_os.c
+++ b/module/os/freebsd/zfs/crypto_os.c
@@ -69,11 +69,11 @@ crypto_mac_init(struct hmac_ctx *ctx, const crypto_key_t *c_key)
/*
* This code is based on the similar code in geom/eli/g_eli_hmac.c
*/
- explicit_bzero(key, sizeof (key));
+ memset(key, 0, sizeof (key));
if (c_key->ck_length == 0)
/* do nothing */;
else if (cl_bytes <= SHA512_HMAC_BLOCK_SIZE)
- bcopy(c_key->ck_data, key, cl_bytes);
+ memcpy(key, c_key->ck_data, cl_bytes);
else {
/*
* If key is longer than 128 bytes reset it to
@@ -89,16 +89,16 @@ crypto_mac_init(struct hmac_ctx *ctx, const crypto_key_t *c_key)
k_ipad[i] = key[i] ^ 0x36;
k_opad[i] = key[i] ^ 0x5c;
}
- explicit_bzero(key, sizeof (key));
+ memset(key, 0, sizeof (key));
/* Start inner SHA512. */
SHA512_Init(&ctx->innerctx);
SHA512_Update(&ctx->innerctx, k_ipad, sizeof (k_ipad));
- explicit_bzero(k_ipad, sizeof (k_ipad));
+ memset(k_ipad, 0, sizeof (k_ipad));
/* Start outer SHA512. */
SHA512_Init(&ctx->outerctx);
SHA512_Update(&ctx->outerctx, k_opad, sizeof (k_opad));
- explicit_bzero(k_opad, sizeof (k_opad));
+ memset(k_opad, 0, sizeof (k_opad));
}
void
@@ -119,12 +119,12 @@ crypto_mac_final(struct hmac_ctx *ctx, void *md, size_t mdsize)
SHA512_Update(&ctx->outerctx, digest, sizeof (digest));
SHA512_Final(digest, &ctx->outerctx);
- explicit_bzero(ctx, sizeof (*ctx));
+ memset(ctx, 0, sizeof (*ctx));
/* mdsize == 0 means "Give me the whole hash!" */
if (mdsize == 0)
mdsize = SHA512_DIGEST_LENGTH;
- bcopy(digest, md, mdsize);
- explicit_bzero(digest, sizeof (digest));
+ memcpy(md, digest, mdsize);
+ memset(digest, 0, sizeof (digest));
}
void
@@ -156,7 +156,7 @@ freebsd_crypt_freesession(freebsd_crypt_session_t *sess)
{
mtx_destroy(&sess->fs_lock);
crypto_freesession(sess->fs_sid);
- explicit_bzero(sess, sizeof (*sess));
+ memset(sess, 0, sizeof (*sess));
}
static int
@@ -243,7 +243,7 @@ int
freebsd_crypt_newsession(freebsd_crypt_session_t *sessp,
const struct zio_crypt_info *c_info, crypto_key_t *key)
{
- struct crypto_session_params csp;
+ struct crypto_session_params csp = {0};
int error = 0;
#ifdef FCRYPTO_DEBUG
@@ -259,7 +259,6 @@ freebsd_crypt_newsession(freebsd_crypt_session_t *sessp,
}
printf("}\n");
#endif
- bzero(&csp, sizeof (csp));
csp.csp_mode = CSP_MODE_AEAD;
csp.csp_cipher_key = key->ck_data;
csp.csp_cipher_klen = key->ck_length / 8;
@@ -364,7 +363,7 @@ freebsd_crypt_uio(boolean_t encrypt,
crp->crp_payload_length = datalen;
crp->crp_digest_start = auth_len + datalen;
- bcopy(ivbuf, crp->crp_iv, ZIO_DATA_IV_LEN);
+ memcpy(crp->crp_iv, ivbuf, ZIO_DATA_IV_LEN);
error = zfs_crypto_dispatch(session, crp);
crypto_freereq(crp);
out:
@@ -384,7 +383,7 @@ int
freebsd_crypt_newsession(freebsd_crypt_session_t *sessp,
const struct zio_crypt_info *c_info, crypto_key_t *key)
{
- struct cryptoini cria, crie, *crip;
+ struct cryptoini cria = {0}, crie = {0}, *crip;
struct enc_xform *xform;
struct auth_hash *xauth;
int error = 0;
@@ -452,9 +451,6 @@ freebsd_crypt_newsession(freebsd_crypt_session_t *sessp,
xauth->name, xauth->keysize);
#endif
- bzero(&crie, sizeof (crie));
- bzero(&cria, sizeof (cria));
-
crie.cri_alg = xform->type;
crie.cri_key = key->ck_data;
crie.cri_klen = key->ck_length;
@@ -466,7 +462,7 @@ freebsd_crypt_newsession(freebsd_crypt_session_t *sessp,
cria.cri_next = &crie;
crie.cri_next = NULL;
crip = &cria;
- // Everything else is bzero'd
+ // Everything else is zero-initialised
error = crypto_newsession(&sid, crip,
CRYPTOCAP_F_HARDWARE | CRYPTOCAP_F_SOFTWARE);
@@ -595,7 +591,7 @@ freebsd_crypt_uio(boolean_t encrypt,
enc_desc->crd_inject = auth_len;
enc_desc->crd_alg = xform->type;
enc_desc->crd_flags = CRD_F_IV_EXPLICIT | CRD_F_IV_PRESENT;
- bcopy(ivbuf, enc_desc->crd_iv, ZIO_DATA_IV_LEN);
+ memcpy(enc_desc->crd_iv, ivbuf, ZIO_DATA_IV_LEN);
enc_desc->crd_next = NULL;
#ifdef FCRYPTO_DEBUG
diff --git a/module/os/freebsd/zfs/dmu_os.c b/module/os/freebsd/zfs/dmu_os.c
index 38488dbda6f4..a5f486b95db4 100644
--- a/module/os/freebsd/zfs/dmu_os.c
+++ b/module/os/freebsd/zfs/dmu_os.c
@@ -119,7 +119,7 @@ dmu_write_pages(objset_t *os, uint64_t object, uint64_t offset, uint64_t size,
db->db_offset + bufoff);
thiscpy = MIN(PAGESIZE, tocpy - copied);
va = zfs_map_page(*ma, &sf);
- bcopy(va, (char *)db->db_data + bufoff, thiscpy);
+ memcpy((char *)db->db_data + bufoff, va, thiscpy);
zfs_unmap_page(sf);
ma += 1;
bufoff += PAGESIZE;
@@ -189,7 +189,7 @@ dmu_read_pages(objset_t *os, uint64_t object, vm_page_t *ma, int count,
ASSERT3U(db->db_size, >, PAGE_SIZE);
bufoff = IDX_TO_OFF(m->pindex) % db->db_size;
va = zfs_map_page(m, &sf);
- bcopy((char *)db->db_data + bufoff, va, PAGESIZE);
+ memcpy(va, (char *)db->db_data + bufoff, PAGESIZE);
zfs_unmap_page(sf);
vm_page_valid(m);
dmu_page_lock(m);
@@ -231,7 +231,7 @@ dmu_read_pages(objset_t *os, uint64_t object, vm_page_t *ma, int count,
tocpy = MIN(db->db_size - bufoff, PAGESIZE - pgoff);
ASSERT3S(tocpy, >=, 0);
if (m != bogus_page)
- bcopy((char *)db->db_data + bufoff, va + pgoff, tocpy);
+ memcpy(va + pgoff, (char *)db->db_data + bufoff, tocpy);
pgoff += tocpy;
ASSERT3S(pgoff, >=, 0);
@@ -287,7 +287,7 @@ dmu_read_pages(objset_t *os, uint64_t object, vm_page_t *ma, int count,
#endif
if (pgoff != 0) {
ASSERT3P(m, !=, bogus_page);
- bzero(va + pgoff, PAGESIZE - pgoff);
+ memset(va + pgoff, 0, PAGESIZE - pgoff);
zfs_unmap_page(sf);
vm_page_valid(m);
}
@@ -309,11 +309,11 @@ dmu_read_pages(objset_t *os, uint64_t object, vm_page_t *ma, int count,
bufoff = IDX_TO_OFF(m->pindex) % db->db_size;
tocpy = MIN(db->db_size - bufoff, PAGESIZE);
va = zfs_map_page(m, &sf);
- bcopy((char *)db->db_data + bufoff, va, tocpy);
+ memcpy(va, (char *)db->db_data + bufoff, tocpy);
if (tocpy < PAGESIZE) {
ASSERT3S(i, ==, *rahead - 1);
ASSERT3U((db->db_size & PAGE_MASK), !=, 0);
- bzero(va + tocpy, PAGESIZE - tocpy);
+ memset(va + tocpy, 0, PAGESIZE - tocpy);
}
zfs_unmap_page(sf);
vm_page_valid(m);
diff --git a/module/os/freebsd/zfs/hkdf.c b/module/os/freebsd/zfs/hkdf.c
index ad5d67541ad2..3a3c426fa48a 100644
--- a/module/os/freebsd/zfs/hkdf.c
+++ b/module/os/freebsd/zfs/hkdf.c
@@ -63,7 +63,7 @@ hkdf_sha512_expand(uint8_t *extract_key, uint8_t *info, uint_t info_len,
crypto_mac_update(&ctx, info, info_len);
crypto_mac_update(&ctx, &c, 1);
crypto_mac_final(&ctx, T, SHA512_DIGEST_LENGTH);
- bcopy(T, out_buf + pos,
+ memcpy(out_buf + pos, T,
(i != N) ? SHA512_DIGEST_LENGTH : (out_len - pos));
pos += SHA512_DIGEST_LENGTH;
}
diff --git a/module/os/freebsd/zfs/zfs_acl.c b/module/os/freebsd/zfs/zfs_acl.c
index c0aa6732717c..aec8cb02d62b 100644
--- a/module/os/freebsd/zfs/zfs_acl.c
+++ b/module/os/freebsd/zfs/zfs_acl.c
@@ -689,10 +689,10 @@ zfs_copy_ace_2_fuid(zfsvfs_t *zfsvfs, vtype_t obj_type, zfs_acl_t *aclp,
zobjacep = (zfs_object_ace_t *)aceptr;
aceobjp = (ace_object_t *)acep;
- bcopy(aceobjp->a_obj_type, zobjacep->z_object_type,
+ memcpy(zobjacep->z_object_type, aceobjp->a_obj_type,
sizeof (aceobjp->a_obj_type));
- bcopy(aceobjp->a_inherit_obj_type,
- zobjacep->z_inherit_type,
+ memcpy(zobjacep->z_inherit_type,
+ aceobjp->a_inherit_obj_type,
sizeof (aceobjp->a_inherit_obj_type));
acep = (ace_t *)((caddr_t)acep + sizeof (ace_object_t));
break;
@@ -739,11 +739,11 @@ zfs_copy_fuid_2_ace(zfsvfs_t *zfsvfs, zfs_acl_t *aclp, cred_t *cr,
}
zobjacep = (zfs_object_ace_t *)zacep;
objacep = (ace_object_t *)acep;
- bcopy(zobjacep->z_object_type,
- objacep->a_obj_type,
+ memcpy(objacep->a_obj_type,
+ zobjacep->z_object_type,
sizeof (zobjacep->z_object_type));
- bcopy(zobjacep->z_inherit_type,
- objacep->a_inherit_obj_type,
+ memcpy(objacep->a_inherit_obj_type,
+ zobjacep->z_inherit_type,
sizeof (zobjacep->z_inherit_type));
ace_size = sizeof (ace_object_t);
break;
@@ -1094,7 +1094,7 @@ zfs_acl_node_read(znode_t *zp, boolean_t have_lock, zfs_acl_t **aclpp,
znode_acl.z_acl_extern_obj, 0, aclnode->z_size,
aclnode->z_acldata, DMU_READ_PREFETCH);
} else {
- bcopy(znode_acl.z_ace_data, aclnode->z_acldata,
+ memcpy(aclnode->z_acldata, znode_acl.z_ace_data,
aclnode->z_size);
}
} else {
@@ -1282,7 +1282,7 @@ zfs_aclset_common(znode_t *zp, zfs_acl_t *aclp, cred_t *cr, dmu_tx_t *tx)
aclnode = list_next(&aclp->z_acl, aclnode)) {
if (aclnode->z_ace_count == 0)
continue;
- bcopy(aclnode->z_acldata, start,
+ memcpy(start, aclnode->z_acldata,
aclnode->z_size);
start = (caddr_t)start + aclnode->z_size;
}
@@ -1564,7 +1564,7 @@ zfs_acl_inherit(zfsvfs_t *zfsvfs, vtype_t vtype, zfs_acl_t *paclp,
if ((data1sz = paclp->z_ops->ace_data(pacep, &data1)) != 0) {
data2sz = aclp->z_ops->ace_data(acep, &data2);
VERIFY3U(data2sz, ==, data1sz);
- bcopy(data1, data2, data2sz);
+ memcpy(data2, data1, data2sz);
}
aclp->z_acl_count++;
@@ -1633,7 +1633,7 @@ zfs_acl_ids_create(znode_t *dzp, int flag, vattr_t *vap, cred_t *cr,
ASSERT_VOP_ELOCKED(ZTOV(dzp), __func__);
} else
ASSERT3P(dzp->z_vnode, ==, NULL);
- bzero(acl_ids, sizeof (zfs_acl_ids_t));
+ memset(acl_ids, 0, sizeof (zfs_acl_ids_t));
acl_ids->z_mode = MAKEIMODE(vap->va_type, vap->va_mode);
if (vsecp)
@@ -1847,7 +1847,7 @@ zfs_getacl(znode_t *zp, vsecattr_t *vsecp, boolean_t skipaclchk, cred_t *cr)
for (aclnode = list_head(&aclp->z_acl); aclnode;
aclnode = list_next(&aclp->z_acl, aclnode)) {
- bcopy(aclnode->z_acldata, start,
+ memcpy(start, aclnode->z_acldata,
aclnode->z_size);
start = (caddr_t)start + aclnode->z_size;
}
diff --git a/module/os/freebsd/zfs/zfs_ctldir.c b/module/os/freebsd/zfs/zfs_ctldir.c
index 6692e7b317ba..4db5cb583327 100644
--- a/module/os/freebsd/zfs/zfs_ctldir.c
+++ b/module/os/freebsd/zfs/zfs_ctldir.c
@@ -721,7 +721,7 @@ zfsctl_root_vptocnp(struct vop_vptocnp_args *ap)
VOP_UNLOCK1(dvp);
*ap->a_vpp = dvp;
*ap->a_buflen -= sizeof (dotzfs_name);
- bcopy(dotzfs_name, ap->a_buf + *ap->a_buflen, sizeof (dotzfs_name));
+ memcpy(ap->a_buf + *ap->a_buflen, dotzfs_name, sizeof (dotzfs_name));
return (0);
}
@@ -1214,7 +1214,7 @@ zfsctl_snapshot_vptocnp(struct vop_vptocnp_args *ap)
VOP_UNLOCK1(dvp);
*ap->a_vpp = dvp;
*ap->a_buflen -= len;
- bcopy(node->sn_name, ap->a_buf + *ap->a_buflen, len);
+ memcpy(ap->a_buf + *ap->a_buflen, node->sn_name, len);
}
vfs_unbusy(mp);
#if __FreeBSD_version >= 1300045
diff --git a/module/os/freebsd/zfs/zfs_vnops_os.c b/module/os/freebsd/zfs/zfs_vnops_os.c
index 21d121a15846..256f495eca58 100644
--- a/module/os/freebsd/zfs/zfs_vnops_os.c
+++ b/module/os/freebsd/zfs/zfs_vnops_os.c
@@ -539,7 +539,7 @@ mappedread_sf(znode_t *zp, int nbytes, zfs_uio_t *uio)
error = dmu_read(os, zp->z_id, start, bytes, va,
DMU_READ_PREFETCH);
if (bytes != PAGESIZE && error == 0)
- bzero(va + bytes, PAGESIZE - bytes);
+ memset(va + bytes, 0, PAGESIZE - bytes);
zfs_unmap_page(sf);
zfs_vmobject_wlock_12(obj);
#if __FreeBSD_version >= 1300081
@@ -5273,7 +5273,7 @@ zfs_create_attrname(int attrnamespace, const char *name, char *attrname,
{
const char *namespace, *prefix, *suffix;
- bzero(attrname, size);
+ memset(attrname, 0, size);
switch (attrnamespace) {
case EXTATTR_NAMESPACE_USER:
@@ -6142,7 +6142,7 @@ zfs_vptocnp(struct vop_vptocnp_args *ap)
}
if (error == 0) {
*ap->a_buflen -= len;
- bcopy(name, ap->a_buf + *ap->a_buflen, len);
+ memcpy(ap->a_buf + *ap->a_buflen, name, len);
*ap->a_vpp = ZTOV(dzp);
}
ZFS_EXIT(zfsvfs);
diff --git a/module/os/freebsd/zfs/zfs_znode.c b/module/os/freebsd/zfs/zfs_znode.c
index c9efdfc5b267..fe73d2af4918 100644
--- a/module/os/freebsd/zfs/zfs_znode.c
+++ b/module/os/freebsd/zfs/zfs_znode.c
@@ -1975,7 +1975,7 @@ zfs_obj_to_path_impl(objset_t *osp, uint64_t obj, sa_handle_t *hdl,
complen = strlen(component);
path -= complen;
ASSERT3P(path, >=, buf);
- bcopy(component, path, complen);
+ memcpy(path, component, complen);
obj = pobj;
if (sa_hdl != hdl) {
diff --git a/module/os/freebsd/zfs/zio_crypt.c b/module/os/freebsd/zfs/zio_crypt.c
index a50b8058a945..0410ddd65a5c 100644
--- a/module/os/freebsd/zfs/zio_crypt.c
+++ b/module/os/freebsd/zfs/zio_crypt.c
@@ -211,10 +211,10 @@ zio_crypt_key_destroy_early(zio_crypt_key_t *key)
rw_destroy(&key->zk_salt_lock);
/* free crypto templates */
- bzero(&key->zk_session, sizeof (key->zk_session));
+ memset(&key->zk_session, 0, sizeof (key->zk_session));
/* zero out sensitive data */
- bzero(key, sizeof (zio_crypt_key_t));
+ memset(key, 0, sizeof (zio_crypt_key_t));
}
void
@@ -242,7 +242,7 @@ zio_crypt_key_init(uint64_t crypt, zio_crypt_key_t *key)
return (ENOTSUP);
keydata_len = zio_crypt_table[crypt].ci_keylen;
- bzero(key, sizeof (zio_crypt_key_t));
+ memset(key, 0, sizeof (zio_crypt_key_t));
rw_init(&key->zk_salt_lock, NULL, RW_DEFAULT, NULL);
/* fill keydata buffers and salt with random data */
@@ -324,7 +324,7 @@ zio_crypt_key_change_salt(zio_crypt_key_t *key)
goto out_unlock;
/* assign the salt and reset the usage count */
- bcopy(salt, key->zk_salt, ZIO_DATA_SALT_LEN);
+ memcpy(key->zk_salt, salt, ZIO_DATA_SALT_LEN);
key->zk_salt_count = 0;
freebsd_crypt_freesession(&key->zk_session);
@@ -352,7 +352,7 @@ zio_crypt_key_get_salt(zio_crypt_key_t *key, uint8_t *salt)
rw_enter(&key->zk_salt_lock, RW_READER);
- bcopy(key->zk_salt, salt, ZIO_DATA_SALT_LEN);
+ memcpy(salt, key->zk_salt, ZIO_DATA_SALT_LEN);
salt_change = (atomic_inc_64_nv(&key->zk_salt_count) >=
ZFS_CURRENT_MAX_SALT_USES);
@@ -450,9 +450,8 @@ zio_crypt_key_wrap(crypto_key_t *cwkey, zio_crypt_key_t *key, uint8_t *iv,
* the plain text (source) to the cipher buffer (dest).
* We set iovecs[0] -- the authentication data -- below.
*/
- bcopy((void*)key->zk_master_keydata, keydata_out, keydata_len);
- bcopy((void*)key->zk_hmac_keydata, hmac_keydata_out,
- SHA512_HMAC_KEYLEN);
+ memcpy(keydata_out, key->zk_master_keydata, keydata_len);
+ memcpy(hmac_keydata_out, key->zk_hmac_keydata, SHA512_HMAC_KEYLEN);
iovecs[1].iov_base = keydata_out;
iovecs[1].iov_len = keydata_len;
iovecs[2].iov_base = hmac_keydata_out;
@@ -529,12 +528,11 @@ zio_crypt_key_unwrap(crypto_key_t *cwkey, uint64_t crypt, uint64_t version,
*/
dst = key->zk_master_keydata;
src = keydata;
-
- bcopy(src, dst, keydata_len);
+ memcpy(dst, src, keydata_len);
dst = key->zk_hmac_keydata;
src = hmac_keydata;
- bcopy(src, dst, SHA512_HMAC_KEYLEN);
+ memcpy(dst, src, SHA512_HMAC_KEYLEN);
iovecs[1].iov_base = key->zk_master_keydata;
iovecs[1].iov_len = keydata_len;
@@ -618,7 +616,7 @@ zio_crypt_generate_iv(uint8_t *ivbuf)
return (0);
error:
- bzero(ivbuf, ZIO_DATA_IV_LEN);
+ memset(ivbuf, 0, ZIO_DATA_IV_LEN);
return (ret);
}
@@ -633,7 +631,7 @@ zio_crypt_do_hmac(zio_crypt_key_t *key, uint8_t *data, uint_t datalen,
crypto_mac(&key->zk_hmac_key, data, datalen,
raw_digestbuf, SHA512_DIGEST_LENGTH);
- bcopy(raw_digestbuf, digestbuf, digestlen);
+ memcpy(digestbuf, raw_digestbuf, digestlen);
return (0);
}
@@ -650,8 +648,8 @@ zio_crypt_generate_iv_salt_dedup(zio_crypt_key_t *key, uint8_t *data,
if (ret != 0)
return (ret);
- bcopy(digestbuf, salt, ZIO_DATA_SALT_LEN);
- bcopy(digestbuf + ZIO_DATA_SALT_LEN, ivbuf, ZIO_DATA_IV_LEN);
+ memcpy(salt, digestbuf, ZIO_DATA_SALT_LEN);
+ memcpy(ivbuf, digestbuf + ZIO_DATA_SALT_LEN, ZIO_DATA_IV_LEN);
return (0);
}
@@ -674,18 +672,18 @@ zio_crypt_encode_params_bp(blkptr_t *bp, uint8_t *salt, uint8_t *iv)
ASSERT(BP_IS_ENCRYPTED(bp));
if (!BP_SHOULD_BYTESWAP(bp)) {
- bcopy(salt, &bp->blk_dva[2].dva_word[0], sizeof (uint64_t));
- bcopy(iv, &bp->blk_dva[2].dva_word[1], sizeof (uint64_t));
- bcopy(iv + sizeof (uint64_t), &val32, sizeof (uint32_t));
+ memcpy(&bp->blk_dva[2].dva_word[0], salt, sizeof (uint64_t));
+ memcpy(&bp->blk_dva[2].dva_word[1], iv, sizeof (uint64_t));
+ memcpy(&val32, iv + sizeof (uint64_t), sizeof (uint32_t));
BP_SET_IV2(bp, val32);
} else {
- bcopy(salt, &val64, sizeof (uint64_t));
+ memcpy(&val64, salt, sizeof (uint64_t));
bp->blk_dva[2].dva_word[0] = BSWAP_64(val64);
- bcopy(iv, &val64, sizeof (uint64_t));
+ memcpy(&val64, iv, sizeof (uint64_t));
bp->blk_dva[2].dva_word[1] = BSWAP_64(val64);
- bcopy(iv + sizeof (uint64_t), &val32, sizeof (uint32_t));
+ memcpy(&val32, iv + sizeof (uint64_t), sizeof (uint32_t));
BP_SET_IV2(bp, BSWAP_32(val32));
}
}
@@ -700,26 +698,26 @@ zio_crypt_decode_params_bp(const blkptr_t *bp, uint8_t *salt, uint8_t *iv)
/* for convenience, so callers don't need to check */
if (BP_IS_AUTHENTICATED(bp)) {
- bzero(salt, ZIO_DATA_SALT_LEN);
- bzero(iv, ZIO_DATA_IV_LEN);
+ memset(salt, 0, ZIO_DATA_SALT_LEN);
+ memset(iv, 0, ZIO_DATA_IV_LEN);
return;
}
if (!BP_SHOULD_BYTESWAP(bp)) {
- bcopy(&bp->blk_dva[2].dva_word[0], salt, sizeof (uint64_t));
- bcopy(&bp->blk_dva[2].dva_word[1], iv, sizeof (uint64_t));
+ memcpy(salt, &bp->blk_dva[2].dva_word[0], sizeof (uint64_t));
+ memcpy(iv, &bp->blk_dva[2].dva_word[1], sizeof (uint64_t));
val32 = (uint32_t)BP_GET_IV2(bp);
- bcopy(&val32, iv + sizeof (uint64_t), sizeof (uint32_t));
+ memcpy(iv + sizeof (uint64_t), &val32, sizeof (uint32_t));
} else {
val64 = BSWAP_64(bp->blk_dva[2].dva_word[0]);
- bcopy(&val64, salt, sizeof (uint64_t));
+ memcpy(salt, &val64, sizeof (uint64_t));
val64 = BSWAP_64(bp->blk_dva[2].dva_word[1]);
- bcopy(&val64, iv, sizeof (uint64_t));
+ memcpy(iv, &val64, sizeof (uint64_t));
val32 = BSWAP_32((uint32_t)BP_GET_IV2(bp));
- bcopy(&val32, iv + sizeof (uint64_t), sizeof (uint32_t));
+ memcpy(iv + sizeof (uint64_t), &val32, sizeof (uint32_t));
}
}
@@ -732,14 +730,14 @@ zio_crypt_encode_mac_bp(blkptr_t *bp, uint8_t *mac)
ASSERT3U(BP_GET_TYPE(bp), !=, DMU_OT_OBJSET);
if (!BP_SHOULD_BYTESWAP(bp)) {
- bcopy(mac, &bp->blk_cksum.zc_word[2], sizeof (uint64_t));
- bcopy(mac + sizeof (uint64_t), &bp->blk_cksum.zc_word[3],
+ memcpy(&bp->blk_cksum.zc_word[2], mac, sizeof (uint64_t));
+ memcpy(&bp->blk_cksum.zc_word[3], mac + sizeof (uint64_t),
sizeof (uint64_t));
} else {
- bcopy(mac, &val64, sizeof (uint64_t));
+ memcpy(&val64, mac, sizeof (uint64_t));
bp->blk_cksum.zc_word[2] = BSWAP_64(val64);
- bcopy(mac + sizeof (uint64_t), &val64, sizeof (uint64_t));
+ memcpy(&val64, mac + sizeof (uint64_t), sizeof (uint64_t));
bp->blk_cksum.zc_word[3] = BSWAP_64(val64);
}
}
@@ -753,20 +751,20 @@ zio_crypt_decode_mac_bp(const blkptr_t *bp, uint8_t *mac)
/* for convenience, so callers don't need to check */
if (BP_GET_TYPE(bp) == DMU_OT_OBJSET) {
- bzero(mac, ZIO_DATA_MAC_LEN);
+ memset(mac, 0, ZIO_DATA_MAC_LEN);
return;
}
if (!BP_SHOULD_BYTESWAP(bp)) {
- bcopy(&bp->blk_cksum.zc_word[2], mac, sizeof (uint64_t));
- bcopy(&bp->blk_cksum.zc_word[3], mac + sizeof (uint64_t),
+ memcpy(mac, &bp->blk_cksum.zc_word[2], sizeof (uint64_t));
+ memcpy(mac + sizeof (uint64_t), &bp->blk_cksum.zc_word[3],
sizeof (uint64_t));
} else {
val64 = BSWAP_64(bp->blk_cksum.zc_word[2]);
- bcopy(&val64, mac, sizeof (uint64_t));
+ memcpy(mac, &val64, sizeof (uint64_t));
val64 = BSWAP_64(bp->blk_cksum.zc_word[3]);
- bcopy(&val64, mac + sizeof (uint64_t), sizeof (uint64_t));
+ memcpy(mac + sizeof (uint64_t), &val64, sizeof (uint64_t));
}
}
@@ -775,8 +773,8 @@ zio_crypt_encode_mac_zil(void *data, uint8_t *mac)
{
zil_chain_t *zilc = data;
- bcopy(mac, &zilc->zc_eck.zec_cksum.zc_word[2], sizeof (uint64_t));
- bcopy(mac + sizeof (uint64_t), &zilc->zc_eck.zec_cksum.zc_word[3],
+ memcpy(&zilc->zc_eck.zec_cksum.zc_word[2], mac, sizeof (uint64_t));
+ memcpy(&zilc->zc_eck.zec_cksum.zc_word[3], mac + sizeof (uint64_t),
sizeof (uint64_t));
}
@@ -790,8 +788,8 @@ zio_crypt_decode_mac_zil(const void *data, uint8_t *mac)
*/
const zil_chain_t *zilc = data;
- bcopy(&zilc->zc_eck.zec_cksum.zc_word[2], mac, sizeof (uint64_t));
- bcopy(&zilc->zc_eck.zec_cksum.zc_word[3], mac + sizeof (uint64_t),
+ memcpy(mac, &zilc->zc_eck.zec_cksum.zc_word[2], sizeof (uint64_t));
+ memcpy(mac + sizeof (uint64_t), &zilc->zc_eck.zec_cksum.zc_word[3],
sizeof (uint64_t));
}
@@ -818,7 +816,7 @@ zio_crypt_copy_dnode_bonus(abd_t *src_abd, uint8_t *dst, uint_t datalen)
if (dnp->dn_type != DMU_OT_NONE &&
DMU_OT_IS_ENCRYPTED(dnp->dn_bonustype) &&
dnp->dn_bonuslen != 0) {
- bcopy(DN_BONUS(dnp), DN_BONUS(&ddnp[i]),
+ memcpy(DN_BONUS(&ddnp[i]), DN_BONUS(dnp),
DN_MAX_BONUS_LEN(dnp));
}
}
@@ -946,7 +944,7 @@ zio_crypt_bp_do_aad_updates(uint8_t **aadp, uint_t *aad_len, uint64_t version,
blkptr_auth_buf_t bab;
zio_crypt_bp_auth_init(version, should_bswap, bp, &bab, &bab_len);
- bcopy(&bab, *aadp, bab_len);
+ memcpy(*aadp, &bab, bab_len);
*aadp += bab_len;
*aad_len += bab_len;
}
@@ -961,7 +959,7 @@ zio_crypt_do_dnode_hmac_updates(crypto_context_t ctx, uint64_t version,
uint8_t tmp_dncore[offsetof(dnode_phys_t, dn_blkptr)];
/* authenticate the core dnode (masking out non-portable bits) */
- bcopy(dnp, tmp_dncore, sizeof (tmp_dncore));
+ memcpy(tmp_dncore, dnp, sizeof (tmp_dncore));
adnp = (dnode_phys_t *)tmp_dncore;
if (le_bswap) {
adnp->dn_datablkszsec = BSWAP_16(adnp->dn_datablkszsec);
@@ -1057,7 +1055,7 @@ zio_crypt_do_objset_hmacs(zio_crypt_key_t *key, void *data, uint_t datalen,
crypto_mac_final(ctx, raw_portable_mac, SHA512_DIGEST_LENGTH);
- bcopy(raw_portable_mac, portable_mac, ZIO_OBJSET_MAC_LEN);
+ memcpy(portable_mac, raw_portable_mac, ZIO_OBJSET_MAC_LEN);
/*
* This is necessary here as we check next whether
@@ -1086,7 +1084,7 @@ zio_crypt_do_objset_hmacs(zio_crypt_key_t *key, void *data, uint_t datalen,
osp->os_userused_dnode.dn_type == DMU_OT_NONE &&
osp->os_groupused_dnode.dn_type == DMU_OT_NONE) ||
(datalen <= OBJSET_PHYS_SIZE_V1)) {
- bzero(local_mac, ZIO_OBJSET_MAC_LEN);
+ memset(local_mac, 0, ZIO_OBJSET_MAC_LEN);
return (0);
}
@@ -1129,13 +1127,13 @@ zio_crypt_do_objset_hmacs(zio_crypt_key_t *key, void *data, uint_t datalen,
crypto_mac_final(ctx, raw_local_mac, SHA512_DIGEST_LENGTH);
- bcopy(raw_local_mac, local_mac, ZIO_OBJSET_MAC_LEN);
+ memcpy(local_mac, raw_local_mac, ZIO_OBJSET_MAC_LEN);
return (0);
error:
- bzero(portable_mac, ZIO_OBJSET_MAC_LEN);
- bzero(local_mac, ZIO_OBJSET_MAC_LEN);
+ memset(portable_mac, 0, ZIO_OBJSET_MAC_LEN);
+ memset(local_mac, 0, ZIO_OBJSET_MAC_LEN);
return (ret);
}
@@ -1172,11 +1170,11 @@ zio_crypt_do_indirect_mac_checksum_impl(boolean_t generate, void *buf,
SHA2Final(digestbuf, &ctx);
if (generate) {
- bcopy(digestbuf, cksum, ZIO_DATA_MAC_LEN);
+ memcpy(cksum, digestbuf, ZIO_DATA_MAC_LEN);
return (0);
}
- if (bcmp(digestbuf, cksum, ZIO_DATA_MAC_LEN) != 0) {
+ if (memcmp(digestbuf, cksum, ZIO_DATA_MAC_LEN) != 0) {
#ifdef FCRYPTO_DEBUG
printf("%s(%d): Setting ECKSUM\n", __FUNCTION__, __LINE__);
#endif
@@ -1264,7 +1262,7 @@ zio_crypt_init_uios_zil(boolean_t encrypt, uint8_t *plainbuf,
src = cipherbuf;
dst = plainbuf;
}
- bcopy(src, dst, datalen);
+ memcpy(dst, src, datalen);
/* Find the start and end record of the log block. */
zilc = (zil_chain_t *)src;
@@ -1303,7 +1301,7 @@ zio_crypt_init_uios_zil(boolean_t encrypt, uint8_t *plainbuf,
* the embedded checksum will not have been calculated yet, so we don't
* authenticate that.
*/
- bcopy(src, aadp, sizeof (zil_chain_t) - sizeof (zio_eck_t));
+ memcpy(aadp, src, sizeof (zil_chain_t) - sizeof (zio_eck_t));
aadp += sizeof (zil_chain_t) - sizeof (zio_eck_t);
aad_len += sizeof (zil_chain_t) - sizeof (zio_eck_t);
@@ -1329,8 +1327,8 @@ zio_crypt_init_uios_zil(boolean_t encrypt, uint8_t *plainbuf,
}
/* copy the common lr_t */
- bcopy(slrp, dlrp, sizeof (lr_t));
- bcopy(slrp, aadp, sizeof (lr_t));
+ memcpy(dlrp, slrp, sizeof (lr_t));
+ memcpy(aadp, slrp, sizeof (lr_t));
aadp += sizeof (lr_t);
aad_len += sizeof (lr_t);
@@ -1347,11 +1345,12 @@ zio_crypt_init_uios_zil(boolean_t encrypt, uint8_t *plainbuf,
dst_iovecs[vec].iov_len = crypt_len;
/* copy the bp now since it will not be encrypted */
- bcopy(slrp + sizeof (lr_write_t) - sizeof (blkptr_t),
- dlrp + sizeof (lr_write_t) - sizeof (blkptr_t),
+ memcpy(dlrp + sizeof (lr_write_t) - sizeof (blkptr_t),
+ slrp + sizeof (lr_write_t) - sizeof (blkptr_t),
+ sizeof (blkptr_t));
+ memcpy(aadp,
+ slrp + sizeof (lr_write_t) - sizeof (blkptr_t),
sizeof (blkptr_t));
- bcopy(slrp + sizeof (lr_write_t) - sizeof (blkptr_t),
- aadp, sizeof (blkptr_t));
aadp += sizeof (blkptr_t);
aad_len += sizeof (blkptr_t);
vec++;
@@ -1419,7 +1418,7 @@ zio_crypt_init_uios_dnode(boolean_t encrypt, uint64_t version,
src = cipherbuf;
dst = plainbuf;
}
- bcopy(src, dst, datalen);
+ memcpy(dst, src, datalen);
sdnp = (dnode_phys_t *)src;
ddnp = (dnode_phys_t *)dst;
@@ -1462,10 +1461,11 @@ zio_crypt_init_uios_dnode(boolean_t encrypt, uint64_t version,
dnp = &sdnp[i];
/* copy over the core fields and blkptrs (kept as plaintext) */
- bcopy(dnp, &ddnp[i], (uint8_t *)DN_BONUS(dnp) - (uint8_t *)dnp);
+ memcpy(&ddnp[i], dnp,
+ (uint8_t *)DN_BONUS(dnp) - (uint8_t *)dnp);
if (dnp->dn_flags & DNODE_FLAG_SPILL_BLKPTR) {
- bcopy(DN_SPILL_BLKPTR(dnp), DN_SPILL_BLKPTR(&ddnp[i]),
+ memcpy(DN_SPILL_BLKPTR(&ddnp[i]), DN_SPILL_BLKPTR(dnp),
sizeof (blkptr_t));
}
@@ -1480,7 +1480,7 @@ zio_crypt_init_uios_dnode(boolean_t encrypt, uint64_t version,
* authenticated data.
*/
crypt_len = offsetof(dnode_phys_t, dn_blkptr);
- bcopy(dnp, aadp, crypt_len);
+ memcpy(aadp, dnp, crypt_len);
adnp = (dnode_phys_t *)aadp;
adnp->dn_flags &= DNODE_CRYPT_PORTABLE_FLAGS_MASK;
adnp->dn_used = 0;
@@ -1517,8 +1517,8 @@ zio_crypt_init_uios_dnode(boolean_t encrypt, uint64_t version,
vec++;
total_len += crypt_len;
} else {
- bcopy(DN_BONUS(dnp), DN_BONUS(&ddnp[i]), crypt_len);
- bcopy(DN_BONUS(dnp), aadp, crypt_len);
+ memcpy(DN_BONUS(&ddnp[i]), DN_BONUS(dnp), crypt_len);
+ memcpy(aadp, DN_BONUS(dnp), crypt_len);
aadp += crypt_len;
aad_len += crypt_len;
}
@@ -1561,7 +1561,7 @@ zio_crypt_init_uios_normal(boolean_t encrypt, uint8_t *plainbuf,
ret = SET_ERROR(ENOMEM);
goto error;
}
- bzero(cipher_iovecs, nr_cipher * sizeof (iovec_t));
+ memset(cipher_iovecs, 0, nr_cipher * sizeof (iovec_t));
if (encrypt) {
src = plainbuf;
@@ -1570,7 +1570,7 @@ zio_crypt_init_uios_normal(boolean_t encrypt, uint8_t *plainbuf,
src = cipherbuf;
dst = plainbuf;
}
- bcopy(src, dst, datalen);
+ memcpy(dst, src, datalen);
cipher_iovecs[0].iov_base = dst;
cipher_iovecs[0].iov_len = datalen;
@@ -1678,8 +1678,8 @@ zio_do_crypt_data(boolean_t encrypt, zio_crypt_key_t *key,
zfs_uio_init(&puio, &puio_s);
zfs_uio_init(&cuio, &cuio_s);
- bzero(GET_UIO_STRUCT(&puio), sizeof (struct uio));
- bzero(GET_UIO_STRUCT(&cuio), sizeof (struct uio));
+ memset(GET_UIO_STRUCT(&puio), 0, sizeof (struct uio));
+ memset(GET_UIO_STRUCT(&cuio), 0, sizeof (struct uio));
#ifdef FCRYPTO_DEBUG
printf("%s(%s, %p, %p, %d, %p, %p, %u, %s, %p, %p, %p)\n",
@@ -1710,7 +1710,7 @@ zio_do_crypt_data(boolean_t encrypt, zio_crypt_key_t *key,
rw_enter(&key->zk_salt_lock, RW_READER);
locked = B_TRUE;
- if (bcmp(salt, key->zk_salt, ZIO_DATA_SALT_LEN) == 0) {
+ if (memcmp(salt, key->zk_salt, ZIO_DATA_SALT_LEN) == 0) {
ckey = &key->zk_current_key;
tmpl = &key->zk_session;
} else {
@@ -1741,7 +1741,7 @@ zio_do_crypt_data(boolean_t encrypt, zio_crypt_key_t *key,
if (authbuf != NULL)
zio_buf_free(authbuf, datalen);
if (ckey == &tmp_ckey)
- bzero(enc_keydata, keydata_len);
+ memset(enc_keydata, 0, keydata_len);
zio_crypt_destroy_uio(&puio);
zio_crypt_destroy_uio(&cuio);
@@ -1753,14 +1753,14 @@ error:
kmem_free(failed_decrypt_buf, failed_decrypt_size);
failed_decrypt_buf = kmem_alloc(datalen, KM_SLEEP);
failed_decrypt_size = datalen;
- bcopy(cipherbuf, failed_decrypt_buf, datalen);
+ memcpy(failed_decrypt_buf, cipherbuf, datalen);
}
if (locked)
rw_exit(&key->zk_salt_lock);
if (authbuf != NULL)
zio_buf_free(authbuf, datalen);
if (ckey == &tmp_ckey)
- bzero(enc_keydata, keydata_len);
+ memset(enc_keydata, 0, keydata_len);
zio_crypt_destroy_uio(&puio);
zio_crypt_destroy_uio(&cuio);
return (SET_ERROR(ret));
diff --git a/module/os/linux/spl/spl-generic.c b/module/os/linux/spl/spl-generic.c
index 5bf2f6912457..bf83d2fc23ac 100644
--- a/module/os/linux/spl/spl-generic.c
+++ b/module/os/linux/spl/spl-generic.c
@@ -771,7 +771,6 @@ spl_init(void)
{
int rc = 0;
- bzero(&p0, sizeof (proc_t));
spl_random_init();
if ((rc = spl_kvmem_init()))
diff --git a/module/os/linux/zfs/qat_crypt.c b/module/os/linux/zfs/qat_crypt.c
index 4771b2f3bec5..18b6e38d1a6e 100644
--- a/module/os/linux/zfs/qat_crypt.c
+++ b/module/os/linux/zfs/qat_crypt.c
@@ -367,7 +367,7 @@ qat_crypt(qat_encrypt_dir_t dir, uint8_t *src_buf, uint8_t *dst_buf,
aad_len);
if (status != CPA_STATUS_SUCCESS)
goto fail;
- bcopy(aad_buf, op_data.pAdditionalAuthData, aad_len);
+ memcpy(op_data.pAdditionalAuthData, aad_buf, aad_len);
}
bytes_left = enc_len;
@@ -413,10 +413,10 @@ qat_crypt(qat_encrypt_dir_t dir, uint8_t *src_buf, uint8_t *dst_buf,
op_data.messageLenToHashInBytes = 0;
op_data.messageLenToCipherInBytes = enc_len;
op_data.ivLenInBytes = ZIO_DATA_IV_LEN;
- bcopy(iv_buf, op_data.pIv, ZIO_DATA_IV_LEN);
+ memcpy(op_data.pIv, iv_buf, ZIO_DATA_IV_LEN);
/* if dir is QAT_DECRYPT, copy digest_buf to pDigestResult */
if (dir == QAT_DECRYPT)
- bcopy(digest_buf, op_data.pDigestResult, ZIO_DATA_MAC_LEN);
+ memcpy(op_data.pDigestResult, digest_buf, ZIO_DATA_MAC_LEN);
cb.verify_result = CPA_FALSE;
init_completion(&cb.complete);
@@ -435,7 +435,7 @@ qat_crypt(qat_encrypt_dir_t dir, uint8_t *src_buf, uint8_t *dst_buf,
if (dir == QAT_ENCRYPT) {
/* if dir is QAT_ENCRYPT, save pDigestResult to digest_buf */
- bcopy(op_data.pDigestResult, digest_buf, ZIO_DATA_MAC_LEN);
+ memcpy(digest_buf, op_data.pDigestResult, ZIO_DATA_MAC_LEN);
QAT_STAT_INCR(encrypt_total_out_bytes, enc_len);
} else {
QAT_STAT_INCR(decrypt_total_out_bytes, enc_len);
@@ -557,7 +557,7 @@ qat_checksum(uint64_t cksum, uint8_t *buf, uint64_t size, zio_cksum_t *zcp)
goto fail;
}
- bcopy(digest_buffer, zcp, sizeof (zio_cksum_t));
+ memcpy(zcp, digest_buffer, sizeof (zio_cksum_t));
fail:
if (status != CPA_STATUS_SUCCESS)
diff --git a/module/os/linux/zfs/zfs_acl.c b/module/os/linux/zfs/zfs_acl.c
index 1859ecd9913b..351e4dad799c 100644
--- a/module/os/linux/zfs/zfs_acl.c
+++ b/module/os/linux/zfs/zfs_acl.c
@@ -687,10 +687,10 @@ zfs_copy_ace_2_fuid(zfsvfs_t *zfsvfs, umode_t obj_mode, zfs_acl_t *aclp,
zobjacep = (zfs_object_ace_t *)aceptr;
aceobjp = (ace_object_t *)acep;
- bcopy(aceobjp->a_obj_type, zobjacep->z_object_type,
+ memcpy(zobjacep->z_object_type, aceobjp->a_obj_type,
sizeof (aceobjp->a_obj_type));
- bcopy(aceobjp->a_inherit_obj_type,
- zobjacep->z_inherit_type,
+ memcpy(zobjacep->z_inherit_type,
+ aceobjp->a_inherit_obj_type,
sizeof (aceobjp->a_inherit_obj_type));
acep = (ace_t *)((caddr_t)acep + sizeof (ace_object_t));
break;
@@ -737,11 +737,11 @@ zfs_copy_fuid_2_ace(zfsvfs_t *zfsvfs, zfs_acl_t *aclp, cred_t *cr,
}
zobjacep = (zfs_object_ace_t *)zacep;
objacep = (ace_object_t *)acep;
- bcopy(zobjacep->z_object_type,
- objacep->a_obj_type,
+ memcpy(objacep->a_obj_type,
+ zobjacep->z_object_type,
sizeof (zobjacep->z_object_type));
- bcopy(zobjacep->z_inherit_type,
- objacep->a_inherit_obj_type,
+ memcpy(objacep->a_inherit_obj_type,
+ zobjacep->z_inherit_type,
sizeof (zobjacep->z_inherit_type));
ace_size = sizeof (ace_object_t);
break;
@@ -1102,7 +1102,7 @@ zfs_acl_node_read(struct znode *zp, boolean_t have_lock, zfs_acl_t **aclpp,
znode_acl.z_acl_extern_obj, 0, aclnode->z_size,
aclnode->z_acldata, DMU_READ_PREFETCH);
} else {
- bcopy(znode_acl.z_ace_data, aclnode->z_acldata,
+ memcpy(aclnode->z_acldata, znode_acl.z_ace_data,
aclnode->z_size);
}
} else {
@@ -1447,7 +1447,7 @@ zfs_aclset_common(znode_t *zp, zfs_acl_t *aclp, cred_t *cr, dmu_tx_t *tx)
aclnode = list_next(&aclp->z_acl, aclnode)) {
if (aclnode->z_ace_count == 0)
continue;
- bcopy(aclnode->z_acldata, start,
+ memcpy(start, aclnode->z_acldata,
aclnode->z_size);
start = (caddr_t)start + aclnode->z_size;
}
@@ -1727,7 +1727,7 @@ zfs_acl_inherit(zfsvfs_t *zfsvfs, umode_t va_mode, zfs_acl_t *paclp,
if ((data1sz = paclp->z_ops->ace_data(pacep, &data1)) != 0) {
VERIFY((data2sz = aclp->z_ops->ace_data(acep,
&data2)) == data1sz);
- bcopy(data1, data2, data2sz);
+ memcpy(data2, data1, data2sz);
}
aclp->z_acl_count++;
@@ -1791,7 +1791,7 @@ zfs_acl_ids_create(znode_t *dzp, int flag, vattr_t *vap, cred_t *cr,
boolean_t trim = B_FALSE;
boolean_t inherited = B_FALSE;
- bzero(acl_ids, sizeof (zfs_acl_ids_t));
+ memset(acl_ids, 0, sizeof (zfs_acl_ids_t));
acl_ids->z_mode = vap->va_mode;
if (vsecp)
@@ -2016,7 +2016,7 @@ zfs_getacl(znode_t *zp, vsecattr_t *vsecp, boolean_t skipaclchk, cred_t *cr)
for (aclnode = list_head(&aclp->z_acl); aclnode;
aclnode = list_next(&aclp->z_acl, aclnode)) {
- bcopy(aclnode->z_acldata, start,
+ memcpy(start, aclnode->z_acldata,
aclnode->z_size);
start = (caddr_t)start + aclnode->z_size;
}
diff --git a/module/os/linux/zfs/zfs_dir.c b/module/os/linux/zfs/zfs_dir.c
index 82b32d1cc3fa..c5b3b5ce7fc0 100644
--- a/module/os/linux/zfs/zfs_dir.c
+++ b/module/os/linux/zfs/zfs_dir.c
@@ -297,7 +297,7 @@ zfs_dirent_lock(zfs_dirlock_t **dlpp, znode_t *dzp, char *name,
*/
dl->dl_namesize = strlen(dl->dl_name) + 1;
name = kmem_alloc(dl->dl_namesize, KM_SLEEP);
- bcopy(dl->dl_name, name, dl->dl_namesize);
+ memcpy(name, dl->dl_name, dl->dl_namesize);
dl->dl_name = name;
}
@@ -625,7 +625,7 @@ zfs_purgedir(znode_t *dzp)
skipped += 1;
continue;
}
- bzero(&dl, sizeof (dl));
+ memset(&dl, 0, sizeof (dl));
dl.dl_dzp = dzp;
dl.dl_name = zap.za_name;
diff --git a/module/os/linux/zfs/zfs_uio.c b/module/os/linux/zfs/zfs_uio.c
index a3d5d5f83b6f..1b315c27016d 100644
--- a/module/os/linux/zfs/zfs_uio.c
+++ b/module/os/linux/zfs/zfs_uio.c
@@ -103,9 +103,9 @@ zfs_uiomove_iov(void *p, size_t n, zfs_uio_rw_t rw, zfs_uio_t *uio)
break;
case UIO_SYSSPACE:
if (rw == UIO_READ)
- bcopy(p, iov->iov_base + skip, cnt);
+ memcpy(iov->iov_base + skip, p, cnt);
else
- bcopy(iov->iov_base + skip, p, cnt);
+ memcpy(p, iov->iov_base + skip, cnt);
break;
default:
ASSERT(0);
@@ -138,9 +138,9 @@ zfs_uiomove_bvec(void *p, size_t n, zfs_uio_rw_t rw, zfs_uio_t *uio)
paddr = zfs_kmap_atomic(bv->bv_page);
if (rw == UIO_READ)
- bcopy(p, paddr + bv->bv_offset + skip, cnt);
+ memcpy(paddr + bv->bv_offset + skip, p, cnt);
else
- bcopy(paddr + bv->bv_offset + skip, p, cnt);
+ memcpy(p, paddr + bv->bv_offset + skip, cnt);
zfs_kunmap_atomic(paddr);
skip += cnt;
@@ -275,7 +275,7 @@ zfs_uiocopy(void *p, size_t n, zfs_uio_rw_t rw, zfs_uio_t *uio, size_t *cbytes)
zfs_uio_t uio_copy;
int ret;
- bcopy(uio, &uio_copy, sizeof (zfs_uio_t));
+ memcpy(&uio_copy, uio, sizeof (zfs_uio_t));
if (uio->uio_segflg == UIO_BVEC)
ret = zfs_uiomove_bvec(p, n, rw, &uio_copy);
diff --git a/module/os/linux/zfs/zfs_vfsops.c b/module/os/linux/zfs/zfs_vfsops.c
index 412af0e88931..81a059651e8a 100644
--- a/module/os/linux/zfs/zfs_vfsops.c
+++ b/module/os/linux/zfs/zfs_vfsops.c
@@ -1137,7 +1137,7 @@ zfs_statvfs(struct inode *ip, struct kstatfs *statp)
* We have all of 40 characters to stuff a string here.
* Is there anything useful we could/should provide?
*/
- bzero(statp->f_spare, sizeof (statp->f_spare));
+ memset(statp->f_spare, 0, sizeof (statp->f_spare));
if (dmu_objset_projectquota_enabled(zfsvfs->z_os) &&
dmu_objset_projectquota_present(zfsvfs->z_os)) {
diff --git a/module/os/linux/zfs/zfs_znode.c b/module/os/linux/zfs/zfs_znode.c
index d65d75312828..b76e65d16822 100644
--- a/module/os/linux/zfs/zfs_znode.c
+++ b/module/os/linux/zfs/zfs_znode.c
@@ -1581,7 +1581,7 @@ zfs_zero_partial_page(znode_t *zp, uint64_t start, uint64_t len)
flush_dcache_page(pp);
pb = kmap(pp);
- bzero(pb + off, len);
+ memset(pb + off, 0, len);
kunmap(pp);
if (mapping_writably_mapped(mp))
@@ -2153,7 +2153,7 @@ zfs_obj_to_path_impl(objset_t *osp, uint64_t obj, sa_handle_t *hdl,
component[0] = '/';
if (is_xattrdir) {
- (void) sprintf(component + 1, "<xattrdir>");
+ strcpy(component + 1, "<xattrdir>");
} else {
error = zap_value_search(osp, pobj, obj,
ZFS_DIRENT_OBJ(-1ULL), component + 1);
@@ -2164,7 +2164,7 @@ zfs_obj_to_path_impl(objset_t *osp, uint64_t obj, sa_handle_t *hdl,
complen = strlen(component);
path -= complen;
ASSERT(path >= buf);
- bcopy(component, path, complen);
+ memcpy(path, component, complen);
obj = pobj;
if (sa_hdl != hdl) {
diff --git a/module/os/linux/zfs/zio_crypt.c b/module/os/linux/zfs/zio_crypt.c
index 099d23484d32..dcab02b07894 100644
--- a/module/os/linux/zfs/zio_crypt.c
+++ b/module/os/linux/zfs/zio_crypt.c
@@ -216,7 +216,7 @@ zio_crypt_key_destroy(zio_crypt_key_t *key)
crypto_destroy_ctx_template(key->zk_hmac_tmpl);
/* zero out sensitive data */
- bzero(key, sizeof (zio_crypt_key_t));
+ memset(key, 0, sizeof (zio_crypt_key_t));
}
int
@@ -230,7 +230,7 @@ zio_crypt_key_init(uint64_t crypt, zio_crypt_key_t *key)
ASSERT3U(crypt, <, ZIO_CRYPT_FUNCTIONS);
keydata_len = zio_crypt_table[crypt].ci_keylen;
- bzero(key, sizeof (zio_crypt_key_t));
+ memset(key, 0, sizeof (zio_crypt_key_t));
/* fill keydata buffers and salt with random data */
ret = random_get_bytes((uint8_t *)&key->zk_guid, sizeof (uint64_t));
@@ -317,7 +317,7 @@ zio_crypt_key_change_salt(zio_crypt_key_t *key)
goto out_unlock;
/* assign the salt and reset the usage count */
- bcopy(salt, key->zk_salt, ZIO_DATA_SALT_LEN);
+ memcpy(key->zk_salt, salt, ZIO_DATA_SALT_LEN);
key->zk_salt_count = 0;
/* destroy the old context template and create the new one */
@@ -346,7 +346,7 @@ zio_crypt_key_get_salt(zio_crypt_key_t *key, uint8_t *salt)
rw_enter(&key->zk_salt_lock, RW_READER);
- bcopy(key->zk_salt, salt, ZIO_DATA_SALT_LEN);
+ memcpy(salt, key->zk_salt, ZIO_DATA_SALT_LEN);
salt_change = (atomic_inc_64_nv(&key->zk_salt_count) >=
ZFS_CURRENT_MAX_SALT_USES);
@@ -652,7 +652,7 @@ zio_crypt_generate_iv(uint8_t *ivbuf)
return (0);
error:
- bzero(ivbuf, ZIO_DATA_IV_LEN);
+ memset(ivbuf, 0, ZIO_DATA_IV_LEN);
return (ret);
}
@@ -693,12 +693,12 @@ zio_crypt_do_hmac(zio_crypt_key_t *key, uint8_t *data, uint_t datalen,
goto error;
}
- bcopy(raw_digestbuf, digestbuf, digestlen);
+ memcpy(digestbuf, raw_digestbuf, digestlen);
return (0);
error:
- bzero(digestbuf, digestlen);
+ memset(digestbuf, 0, digestlen);
return (ret);
}
@@ -714,8 +714,8 @@ zio_crypt_generate_iv_salt_dedup(zio_crypt_key_t *key, uint8_t *data,
if (ret != 0)
return (ret);
- bcopy(digestbuf, salt, ZIO_DATA_SALT_LEN);
- bcopy(digestbuf + ZIO_DATA_SALT_LEN, ivbuf, ZIO_DATA_IV_LEN);
+ memcpy(salt, digestbuf, ZIO_DATA_SALT_LEN);
+ memcpy(ivbuf, digestbuf + ZIO_DATA_SALT_LEN, ZIO_DATA_IV_LEN);
return (0);
}
@@ -738,18 +738,18 @@ zio_crypt_encode_params_bp(blkptr_t *bp, uint8_t *salt, uint8_t *iv)
ASSERT(BP_IS_ENCRYPTED(bp));
if (!BP_SHOULD_BYTESWAP(bp)) {
- bcopy(salt, &bp->blk_dva[2].dva_word[0], sizeof (uint64_t));
- bcopy(iv, &bp->blk_dva[2].dva_word[1], sizeof (uint64_t));
- bcopy(iv + sizeof (uint64_t), &val32, sizeof (uint32_t));
+ memcpy(&bp->blk_dva[2].dva_word[0], salt, sizeof (uint64_t));
+ memcpy(&bp->blk_dva[2].dva_word[1], iv, sizeof (uint64_t));
+ memcpy(&val32, iv + sizeof (uint64_t), sizeof (uint32_t));
BP_SET_IV2(bp, val32);
} else {
- bcopy(salt, &val64, sizeof (uint64_t));
+ memcpy(&val64, salt, sizeof (uint64_t));
bp->blk_dva[2].dva_word[0] = BSWAP_64(val64);
- bcopy(iv, &val64, sizeof (uint64_t));
+ memcpy(&val64, iv, sizeof (uint64_t));
bp->blk_dva[2].dva_word[1] = BSWAP_64(val64);
- bcopy(iv + sizeof (uint64_t), &val32, sizeof (uint32_t));
+ memcpy(&val32, iv + sizeof (uint64_t), sizeof (uint32_t));
BP_SET_IV2(bp, BSWAP_32(val32));
}
}
@@ -764,26 +764,26 @@ zio_crypt_decode_params_bp(const blkptr_t *bp, uint8_t *salt, uint8_t *iv)
/* for convenience, so callers don't need to check */
if (BP_IS_AUTHENTICATED(bp)) {
- bzero(salt, ZIO_DATA_SALT_LEN);
- bzero(iv, ZIO_DATA_IV_LEN);
+ memset(salt, 0, ZIO_DATA_SALT_LEN);
+ memset(iv, 0, ZIO_DATA_IV_LEN);
return;
}
if (!BP_SHOULD_BYTESWAP(bp)) {
- bcopy(&bp->blk_dva[2].dva_word[0], salt, sizeof (uint64_t));
- bcopy(&bp->blk_dva[2].dva_word[1], iv, sizeof (uint64_t));
+ memcpy(salt, &bp->blk_dva[2].dva_word[0], sizeof (uint64_t));
+ memcpy(iv, &bp->blk_dva[2].dva_word[1], sizeof (uint64_t));
val32 = (uint32_t)BP_GET_IV2(bp);
- bcopy(&val32, iv + sizeof (uint64_t), sizeof (uint32_t));
+ memcpy(iv + sizeof (uint64_t), &val32, sizeof (uint32_t));
} else {
val64 = BSWAP_64(bp->blk_dva[2].dva_word[0]);
- bcopy(&val64, salt, sizeof (uint64_t));
+ memcpy(salt, &val64, sizeof (uint64_t));
val64 = BSWAP_64(bp->blk_dva[2].dva_word[1]);
- bcopy(&val64, iv, sizeof (uint64_t));
+ memcpy(iv, &val64, sizeof (uint64_t));
val32 = BSWAP_32((uint32_t)BP_GET_IV2(bp));
- bcopy(&val32, iv + sizeof (uint64_t), sizeof (uint32_t));
+ memcpy(iv + sizeof (uint64_t), &val32, sizeof (uint32_t));
}
}
@@ -796,14 +796,14 @@ zio_crypt_encode_mac_bp(blkptr_t *bp, uint8_t *mac)
ASSERT3U(BP_GET_TYPE(bp), !=, DMU_OT_OBJSET);
if (!BP_SHOULD_BYTESWAP(bp)) {
- bcopy(mac, &bp->blk_cksum.zc_word[2], sizeof (uint64_t));
- bcopy(mac + sizeof (uint64_t), &bp->blk_cksum.zc_word[3],
+ memcpy(&bp->blk_cksum.zc_word[2], mac, sizeof (uint64_t));
+ memcpy(&bp->blk_cksum.zc_word[3], mac + sizeof (uint64_t),
sizeof (uint64_t));
} else {
- bcopy(mac, &val64, sizeof (uint64_t));
+ memcpy(&val64, mac, sizeof (uint64_t));
bp->blk_cksum.zc_word[2] = BSWAP_64(val64);
- bcopy(mac + sizeof (uint64_t), &val64, sizeof (uint64_t));
+ memcpy(&val64, mac + sizeof (uint64_t), sizeof (uint64_t));
bp->blk_cksum.zc_word[3] = BSWAP_64(val64);
}
}
@@ -817,20 +817,20 @@ zio_crypt_decode_mac_bp(const blkptr_t *bp, uint8_t *mac)
/* for convenience, so callers don't need to check */
if (BP_GET_TYPE(bp) == DMU_OT_OBJSET) {
- bzero(mac, ZIO_DATA_MAC_LEN);
+ memset(mac, 0, ZIO_DATA_MAC_LEN);
return;
}
if (!BP_SHOULD_BYTESWAP(bp)) {
- bcopy(&bp->blk_cksum.zc_word[2], mac, sizeof (uint64_t));
- bcopy(&bp->blk_cksum.zc_word[3], mac + sizeof (uint64_t),
+ memcpy(mac, &bp->blk_cksum.zc_word[2], sizeof (uint64_t));
+ memcpy(mac + sizeof (uint64_t), &bp->blk_cksum.zc_word[3],
sizeof (uint64_t));
} else {
val64 = BSWAP_64(bp->blk_cksum.zc_word[2]);
- bcopy(&val64, mac, sizeof (uint64_t));
+ memcpy(mac, &val64, sizeof (uint64_t));
val64 = BSWAP_64(bp->blk_cksum.zc_word[3]);
- bcopy(&val64, mac + sizeof (uint64_t), sizeof (uint64_t));
+ memcpy(mac + sizeof (uint64_t), &val64, sizeof (uint64_t));
}
}
@@ -839,8 +839,8 @@ zio_crypt_encode_mac_zil(void *data, uint8_t *mac)
{
zil_chain_t *zilc = data;
- bcopy(mac, &zilc->zc_eck.zec_cksum.zc_word[2], sizeof (uint64_t));
- bcopy(mac + sizeof (uint64_t), &zilc->zc_eck.zec_cksum.zc_word[3],
+ memcpy(&zilc->zc_eck.zec_cksum.zc_word[2], mac, sizeof (uint64_t));
+ memcpy(&zilc->zc_eck.zec_cksum.zc_word[3], mac + sizeof (uint64_t),
sizeof (uint64_t));
}
@@ -854,8 +854,8 @@ zio_crypt_decode_mac_zil(const void *data, uint8_t *mac)
*/
const zil_chain_t *zilc = data;
- bcopy(&zilc->zc_eck.zec_cksum.zc_word[2], mac, sizeof (uint64_t));
- bcopy(&zilc->zc_eck.zec_cksum.zc_word[3], mac + sizeof (uint64_t),
+ memcpy(mac, &zilc->zc_eck.zec_cksum.zc_word[2], sizeof (uint64_t));
+ memcpy(mac + sizeof (uint64_t), &zilc->zc_eck.zec_cksum.zc_word[3],
sizeof (uint64_t));
}
@@ -882,7 +882,7 @@ zio_crypt_copy_dnode_bonus(abd_t *src_abd, uint8_t *dst, uint_t datalen)
if (dnp->dn_type != DMU_OT_NONE &&
DMU_OT_IS_ENCRYPTED(dnp->dn_bonustype) &&
dnp->dn_bonuslen != 0) {
- bcopy(DN_BONUS(dnp), DN_BONUS(&ddnp[i]),
+ memcpy(DN_BONUS(&ddnp[i]), DN_BONUS(dnp),
DN_MAX_BONUS_LEN(dnp));
}
}
@@ -1024,7 +1024,7 @@ zio_crypt_bp_do_aad_updates(uint8_t **aadp, uint_t *aad_len, uint64_t version,
blkptr_auth_buf_t bab;
zio_crypt_bp_auth_init(version, should_bswap, bp, &bab, &bab_len);
- bcopy(&bab, *aadp, bab_len);
+ memcpy(*aadp, &bab, bab_len);
*aadp += bab_len;
*aad_len += bab_len;
}
@@ -1048,7 +1048,7 @@ zio_crypt_do_dnode_hmac_updates(crypto_context_t ctx, uint64_t version,
* of copying 512-64 unneeded bytes. The compiler seems to be fine
* with that.
*/
- bcopy(dnp, &tmp_dncore, dn_core_size);
+ memcpy(&tmp_dncore, dnp, dn_core_size);
adnp = &tmp_dncore;
if (le_bswap) {
@@ -1190,7 +1190,7 @@ zio_crypt_do_objset_hmacs(zio_crypt_key_t *key, void *data, uint_t datalen,
goto error;
}
- bcopy(raw_portable_mac, portable_mac, ZIO_OBJSET_MAC_LEN);
+ memcpy(portable_mac, raw_portable_mac, ZIO_OBJSET_MAC_LEN);
/*
* This is necessary here as we check next whether
@@ -1219,7 +1219,7 @@ zio_crypt_do_objset_hmacs(zio_crypt_key_t *key, void *data, uint_t datalen,
osp->os_userused_dnode.dn_type == DMU_OT_NONE &&
osp->os_groupused_dnode.dn_type == DMU_OT_NONE) ||
(datalen <= OBJSET_PHYS_SIZE_V1)) {
- bzero(local_mac, ZIO_OBJSET_MAC_LEN);
+ memset(local_mac, 0, ZIO_OBJSET_MAC_LEN);
return (0);
}
@@ -1282,13 +1282,13 @@ zio_crypt_do_objset_hmacs(zio_crypt_key_t *key, void *data, uint_t datalen,
goto error;
}
- bcopy(raw_local_mac, local_mac, ZIO_OBJSET_MAC_LEN);
+ memcpy(local_mac, raw_local_mac, ZIO_OBJSET_MAC_LEN);
return (0);
error:
- bzero(portable_mac, ZIO_OBJSET_MAC_LEN);
- bzero(local_mac, ZIO_OBJSET_MAC_LEN);
+ memset(portable_mac, 0, ZIO_OBJSET_MAC_LEN);
+ memset(local_mac, 0, ZIO_OBJSET_MAC_LEN);
return (ret);
}
@@ -1324,11 +1324,11 @@ zio_crypt_do_indirect_mac_checksum_impl(boolean_t generate, void *buf,
SHA2Final(digestbuf, &ctx);
if (generate) {
- bcopy(digestbuf, cksum, ZIO_DATA_MAC_LEN);
+ memcpy(cksum, digestbuf, ZIO_DATA_MAC_LEN);
return (0);
}
- if (bcmp(digestbuf, cksum, ZIO_DATA_MAC_LEN) != 0)
+ if (memcmp(digestbuf, cksum, ZIO_DATA_MAC_LEN) != 0)
return (SET_ERROR(ECKSUM));
return (0);
@@ -1409,7 +1409,7 @@ zio_crypt_init_uios_zil(boolean_t encrypt, uint8_t *plainbuf,
nr_src = 1;
nr_dst = 0;
}
- bzero(dst, datalen);
+ memset(dst, 0, datalen);
/* find the start and end record of the log block */
zilc = (zil_chain_t *)src;
@@ -1460,8 +1460,8 @@ zio_crypt_init_uios_zil(boolean_t encrypt, uint8_t *plainbuf,
* the embedded checksum will not have been calculated yet, so we don't
* authenticate that.
*/
- bcopy(src, dst, sizeof (zil_chain_t));
- bcopy(src, aadp, sizeof (zil_chain_t) - sizeof (zio_eck_t));
+ memcpy(dst, src, sizeof (zil_chain_t));
+ memcpy(aadp, src, sizeof (zil_chain_t) - sizeof (zio_eck_t));
aadp += sizeof (zil_chain_t) - sizeof (zio_eck_t);
aad_len += sizeof (zil_chain_t) - sizeof (zio_eck_t);
@@ -1482,8 +1482,8 @@ zio_crypt_init_uios_zil(boolean_t encrypt, uint8_t *plainbuf,
}
/* copy the common lr_t */
- bcopy(slrp, dlrp, sizeof (lr_t));
- bcopy(slrp, aadp, sizeof (lr_t));
+ memcpy(dlrp, slrp, sizeof (lr_t));
+ memcpy(aadp, slrp, sizeof (lr_t));
aadp += sizeof (lr_t);
aad_len += sizeof (lr_t);
@@ -1504,11 +1504,12 @@ zio_crypt_init_uios_zil(boolean_t encrypt, uint8_t *plainbuf,
dst_iovecs[nr_iovecs].iov_len = crypt_len;
/* copy the bp now since it will not be encrypted */
- bcopy(slrp + sizeof (lr_write_t) - sizeof (blkptr_t),
- dlrp + sizeof (lr_write_t) - sizeof (blkptr_t),
+ memcpy(dlrp + sizeof (lr_write_t) - sizeof (blkptr_t),
+ slrp + sizeof (lr_write_t) - sizeof (blkptr_t),
+ sizeof (blkptr_t));
+ memcpy(aadp,
+ slrp + sizeof (lr_write_t) - sizeof (blkptr_t),
sizeof (blkptr_t));
- bcopy(slrp + sizeof (lr_write_t) - sizeof (blkptr_t),
- aadp, sizeof (blkptr_t));
aadp += sizeof (blkptr_t);
aad_len += sizeof (blkptr_t);
nr_iovecs++;
@@ -1655,10 +1656,11 @@ zio_crypt_init_uios_dnode(boolean_t encrypt, uint64_t version,
dnp = &sdnp[i];
/* copy over the core fields and blkptrs (kept as plaintext) */
- bcopy(dnp, &ddnp[i], (uint8_t *)DN_BONUS(dnp) - (uint8_t *)dnp);
+ memcpy(&ddnp[i], dnp,
+ (uint8_t *)DN_BONUS(dnp) - (uint8_t *)dnp);
if (dnp->dn_flags & DNODE_FLAG_SPILL_BLKPTR) {
- bcopy(DN_SPILL_BLKPTR(dnp), DN_SPILL_BLKPTR(&ddnp[i]),
+ memcpy(DN_SPILL_BLKPTR(&ddnp[i]), DN_SPILL_BLKPTR(dnp),
sizeof (blkptr_t));
}
@@ -1673,7 +1675,7 @@ zio_crypt_init_uios_dnode(boolean_t encrypt, uint64_t version,
* authenticated data.
*/
crypt_len = offsetof(dnode_phys_t, dn_blkptr);
- bcopy(dnp, aadp, crypt_len);
+ memcpy(aadp, dnp, crypt_len);
adnp = (dnode_phys_t *)aadp;
adnp->dn_flags &= DNODE_CRYPT_PORTABLE_FLAGS_MASK;
adnp->dn_used = 0;
@@ -1716,8 +1718,8 @@ zio_crypt_init_uios_dnode(boolean_t encrypt, uint64_t version,
nr_iovecs++;
total_len += crypt_len;
} else {
- bcopy(DN_BONUS(dnp), DN_BONUS(&ddnp[i]), crypt_len);
- bcopy(DN_BONUS(dnp), aadp, crypt_len);
+ memcpy(DN_BONUS(&ddnp[i]), DN_BONUS(dnp), crypt_len);
+ memcpy(aadp, DN_BONUS(dnp), crypt_len);
aadp += crypt_len;
aad_len += crypt_len;
}
@@ -1898,7 +1900,7 @@ zio_do_crypt_data(boolean_t encrypt, zio_crypt_key_t *key,
rw_enter(&key->zk_salt_lock, RW_READER);
locked = B_TRUE;
- if (bcmp(salt, key->zk_salt, ZIO_DATA_SALT_LEN) == 0) {
+ if (memcmp(salt, key->zk_salt, ZIO_DATA_SALT_LEN) == 0) {
ckey = &key->zk_current_key;
tmpl = key->zk_current_tmpl;
} else {
@@ -1948,8 +1950,8 @@ zio_do_crypt_data(boolean_t encrypt, zio_crypt_key_t *key,
/* If the hardware implementation fails fall back to software */
}
- bzero(&puio, sizeof (zfs_uio_t));
- bzero(&cuio, sizeof (zfs_uio_t));
+ memset(&puio, 0, sizeof (puio));
+ memset(&cuio, 0, sizeof (cuio));
/* create uios for encryption */
ret = zio_crypt_init_uios(encrypt, key->zk_version, ot, plainbuf,
@@ -1972,7 +1974,7 @@ zio_do_crypt_data(boolean_t encrypt, zio_crypt_key_t *key,
if (authbuf != NULL)
zio_buf_free(authbuf, datalen);
if (ckey == &tmp_ckey)
- bzero(enc_keydata, keydata_len);
+ memset(enc_keydata, 0, keydata_len);
zio_crypt_destroy_uio(&puio);
zio_crypt_destroy_uio(&cuio);
@@ -1984,7 +1986,7 @@ error:
if (authbuf != NULL)
zio_buf_free(authbuf, datalen);
if (ckey == &tmp_ckey)
- bzero(enc_keydata, keydata_len);
+ memset(enc_keydata, 0, keydata_len);
zio_crypt_destroy_uio(&puio);
zio_crypt_destroy_uio(&cuio);
diff --git a/module/zcommon/zfs_fletcher.c b/module/zcommon/zfs_fletcher.c
index 16773d4de077..4606b5491daa 100644
--- a/module/zcommon/zfs_fletcher.c
+++ b/module/zcommon/zfs_fletcher.c
@@ -126,8 +126,8 @@
* which has been filled either by:
*
* 1. a compression step, which will be mostly cached, or
- * 2. a bcopy() or copyin(), which will be uncached (because the
- * copy is cache-bypassing).
+ * 2. a memcpy() or copyin(), which will be uncached
+ * (because the copy is cache-bypassing).
*
* For both cached and uncached data, both fletcher checksums are much faster
* than sha-256, and slower than 'off', which doesn't touch the data at all.
diff --git a/module/zcommon/zfs_fletcher_aarch64_neon.c b/module/zcommon/zfs_fletcher_aarch64_neon.c
index e84d69eb3415..319c5332d5a9 100644
--- a/module/zcommon/zfs_fletcher_aarch64_neon.c
+++ b/module/zcommon/zfs_fletcher_aarch64_neon.c
@@ -52,7 +52,7 @@ ZFS_NO_SANITIZE_UNDEFINED
static void
fletcher_4_aarch64_neon_init(fletcher_4_ctx_t *ctx)
{
- bzero(ctx->aarch64_neon, 4 * sizeof (zfs_fletcher_aarch64_neon_t));
+ memset(ctx->aarch64_neon, 0, 4 * sizeof (zfs_fletcher_aarch64_neon_t));
}
ZFS_NO_SANITIZE_UNDEFINED
diff --git a/module/zcommon/zfs_fletcher_avx512.c b/module/zcommon/zfs_fletcher_avx512.c
index 8ee438ab9325..9166834f536e 100644
--- a/module/zcommon/zfs_fletcher_avx512.c
+++ b/module/zcommon/zfs_fletcher_avx512.c
@@ -39,7 +39,7 @@ ZFS_NO_SANITIZE_UNDEFINED
static void
fletcher_4_avx512f_init(fletcher_4_ctx_t *ctx)
{
- bzero(ctx->avx512, 4 * sizeof (zfs_fletcher_avx512_t));
+ memset(ctx->avx512, 0, 4 * sizeof (zfs_fletcher_avx512_t));
}
ZFS_NO_SANITIZE_UNDEFINED
diff --git a/module/zcommon/zfs_fletcher_intel.c b/module/zcommon/zfs_fletcher_intel.c
index 16e61a96f8b1..15bfe3cc3c76 100644
--- a/module/zcommon/zfs_fletcher_intel.c
+++ b/module/zcommon/zfs_fletcher_intel.c
@@ -51,7 +51,7 @@ ZFS_NO_SANITIZE_UNDEFINED
static void
fletcher_4_avx2_init(fletcher_4_ctx_t *ctx)
{
- bzero(ctx->avx, 4 * sizeof (zfs_fletcher_avx_t));
+ memset(ctx->avx, 0, 4 * sizeof (zfs_fletcher_avx_t));
}
ZFS_NO_SANITIZE_UNDEFINED
diff --git a/module/zcommon/zfs_fletcher_sse.c b/module/zcommon/zfs_fletcher_sse.c
index fc5938488e61..3ee67996523b 100644
--- a/module/zcommon/zfs_fletcher_sse.c
+++ b/module/zcommon/zfs_fletcher_sse.c
@@ -53,7 +53,7 @@ ZFS_NO_SANITIZE_UNDEFINED
static void
fletcher_4_sse2_init(fletcher_4_ctx_t *ctx)
{
- bzero(ctx->sse, 4 * sizeof (zfs_fletcher_sse_t));
+ memset(ctx->sse, 0, 4 * sizeof (zfs_fletcher_sse_t));
}
ZFS_NO_SANITIZE_UNDEFINED
diff --git a/module/zcommon/zfs_fletcher_superscalar.c b/module/zcommon/zfs_fletcher_superscalar.c
index 73a74b9ae0ab..dec3c17506f4 100644
--- a/module/zcommon/zfs_fletcher_superscalar.c
+++ b/module/zcommon/zfs_fletcher_superscalar.c
@@ -51,7 +51,7 @@ ZFS_NO_SANITIZE_UNDEFINED
static void
fletcher_4_superscalar_init(fletcher_4_ctx_t *ctx)
{
- bzero(ctx->superscalar, 4 * sizeof (zfs_fletcher_superscalar_t));
+ memset(ctx->superscalar, 0, 4 * sizeof (zfs_fletcher_superscalar_t));
}
ZFS_NO_SANITIZE_UNDEFINED
diff --git a/module/zcommon/zfs_fletcher_superscalar4.c b/module/zcommon/zfs_fletcher_superscalar4.c
index 2dbf8bbb8146..9e69dc30eb0e 100644
--- a/module/zcommon/zfs_fletcher_superscalar4.c
+++ b/module/zcommon/zfs_fletcher_superscalar4.c
@@ -51,7 +51,7 @@ ZFS_NO_SANITIZE_UNDEFINED
static void
fletcher_4_superscalar4_init(fletcher_4_ctx_t *ctx)
{
- bzero(ctx->superscalar, 4 * sizeof (zfs_fletcher_superscalar_t));
+ memset(ctx->superscalar, 0, 4 * sizeof (zfs_fletcher_superscalar_t));
}
ZFS_NO_SANITIZE_UNDEFINED
diff --git a/module/zfs/aggsum.c b/module/zfs/aggsum.c
index c4ea4f86fc5f..488c6ef3b6fc 100644
--- a/module/zfs/aggsum.c
+++ b/module/zfs/aggsum.c
@@ -87,7 +87,7 @@ static uint_t aggsum_borrow_shift = 4;
void
aggsum_init(aggsum_t *as, uint64_t value)
{
- bzero(as, sizeof (*as));
+ memset(as, 0, sizeof (*as));
as->as_lower_bound = as->as_upper_bound = value;
mutex_init(&as->as_lock, NULL, MUTEX_DEFAULT, NULL);
/*
diff --git a/module/zfs/arc.c b/module/zfs/arc.c
index 744df24235e4..9e32aaaea8b6 100644
--- a/module/zfs/arc.c
+++ b/module/zfs/arc.c
@@ -250,7 +250,7 @@
* since the physical block is about to be rewritten. The new data contents
* will be contained in the arc_buf_t. As the I/O pipeline performs the write,
* it may compress the data before writing it to disk. The ARC will be called
- * with the transformed data and will bcopy the transformed on-disk block into
+ * with the transformed data and will memcpy the transformed on-disk block into
* a newly allocated b_pabd. Writes are always done into buffers which have
* either been loaned (and hence are new and don't have other readers) or
* buffers which have been released (and hence have their own hdr, if there
@@ -1132,7 +1132,7 @@ hdr_full_cons(void *vbuf, void *unused, int kmflag)
(void) unused, (void) kmflag;
arc_buf_hdr_t *hdr = vbuf;
- bzero(hdr, HDR_FULL_SIZE);
+ memset(hdr, 0, HDR_FULL_SIZE);
hdr->b_l1hdr.b_byteswap = DMU_BSWAP_NUMFUNCS;
cv_init(&hdr->b_l1hdr.b_cv, NULL, CV_DEFAULT, NULL);
zfs_refcount_create(&hdr->b_l1hdr.b_refcnt);
@@ -1152,7 +1152,7 @@ hdr_full_crypt_cons(void *vbuf, void *unused, int kmflag)
arc_buf_hdr_t *hdr = vbuf;
hdr_full_cons(vbuf, unused, kmflag);
- bzero(&hdr->b_crypt_hdr, sizeof (hdr->b_crypt_hdr));
+ memset(&hdr->b_crypt_hdr, 0, sizeof (hdr->b_crypt_hdr));
arc_space_consume(sizeof (hdr->b_crypt_hdr), ARC_SPACE_HDRS);
return (0);
@@ -1164,7 +1164,7 @@ hdr_l2only_cons(void *vbuf, void *unused, int kmflag)
(void) unused, (void) kmflag;
arc_buf_hdr_t *hdr = vbuf;
- bzero(hdr, HDR_L2ONLY_SIZE);
+ memset(hdr, 0, HDR_L2ONLY_SIZE);
arc_space_consume(HDR_L2ONLY_SIZE, ARC_SPACE_L2HDRS);
return (0);
@@ -1176,7 +1176,7 @@ buf_cons(void *vbuf, void *unused, int kmflag)
(void) unused, (void) kmflag;
arc_buf_t *buf = vbuf;
- bzero(buf, sizeof (arc_buf_t));
+ memset(buf, 0, sizeof (arc_buf_t));
mutex_init(&buf->b_evict_lock, NULL, MUTEX_DEFAULT, NULL);
arc_space_consume(sizeof (arc_buf_t), ARC_SPACE_HDRS);
@@ -1332,9 +1332,9 @@ arc_get_raw_params(arc_buf_t *buf, boolean_t *byteorder, uint8_t *salt,
ASSERT(HDR_PROTECTED(hdr));
- bcopy(hdr->b_crypt_hdr.b_salt, salt, ZIO_DATA_SALT_LEN);
- bcopy(hdr->b_crypt_hdr.b_iv, iv, ZIO_DATA_IV_LEN);
- bcopy(hdr->b_crypt_hdr.b_mac, mac, ZIO_DATA_MAC_LEN);
+ memcpy(salt, hdr->b_crypt_hdr.b_salt, ZIO_DATA_SALT_LEN);
+ memcpy(iv, hdr->b_crypt_hdr.b_iv, ZIO_DATA_IV_LEN);
+ memcpy(mac, hdr->b_crypt_hdr.b_mac, ZIO_DATA_MAC_LEN);
*byteorder = (hdr->b_l1hdr.b_byteswap == DMU_BSWAP_NUMFUNCS) ?
ZFS_HOST_BYTEORDER : !ZFS_HOST_BYTEORDER;
}
@@ -1692,7 +1692,7 @@ arc_buf_try_copy_decompressed_data(arc_buf_t *buf)
}
if (!ARC_BUF_COMPRESSED(from)) {
- bcopy(from->b_data, buf->b_data, arc_buf_size(buf));
+ memcpy(buf->b_data, from->b_data, arc_buf_size(buf));
copied = B_TRUE;
break;
}
@@ -3349,7 +3349,7 @@ arc_hdr_realloc(arc_buf_hdr_t *hdr, kmem_cache_t *old, kmem_cache_t *new)
ASSERT(MUTEX_HELD(HDR_LOCK(hdr)));
buf_hash_remove(hdr);
- bcopy(hdr, nhdr, HDR_L2ONLY_SIZE);
+ memcpy(nhdr, hdr, HDR_L2ONLY_SIZE);
if (new == hdr_full_cache || new == hdr_full_crypt_cache) {
arc_hdr_set_flags(nhdr, ARC_FLAG_HAS_L1HDR);
@@ -3512,7 +3512,7 @@ arc_hdr_realloc_crypt(arc_buf_hdr_t *hdr, boolean_t need_crypt)
}
/* unset all members of the original hdr */
- bzero(&hdr->b_dva, sizeof (dva_t));
+ memset(&hdr->b_dva, 0, sizeof (dva_t));
hdr->b_birth = 0;
hdr->b_type = ARC_BUFC_INVALID;
hdr->b_flags = 0;
@@ -3537,9 +3537,9 @@ arc_hdr_realloc_crypt(arc_buf_hdr_t *hdr, boolean_t need_crypt)
hdr->b_crypt_hdr.b_ot = DMU_OT_NONE;
hdr->b_crypt_hdr.b_ebufcnt = 0;
hdr->b_crypt_hdr.b_dsobj = 0;
- bzero(hdr->b_crypt_hdr.b_salt, ZIO_DATA_SALT_LEN);
- bzero(hdr->b_crypt_hdr.b_iv, ZIO_DATA_IV_LEN);
- bzero(hdr->b_crypt_hdr.b_mac, ZIO_DATA_MAC_LEN);
+ memset(hdr->b_crypt_hdr.b_salt, 0, ZIO_DATA_SALT_LEN);
+ memset(hdr->b_crypt_hdr.b_iv, 0, ZIO_DATA_IV_LEN);
+ memset(hdr->b_crypt_hdr.b_mac, 0, ZIO_DATA_MAC_LEN);
}
buf_discard_identity(hdr);
@@ -3577,11 +3577,11 @@ arc_convert_to_raw(arc_buf_t *buf, uint64_t dsobj, boolean_t byteorder,
arc_cksum_free(hdr);
if (salt != NULL)
- bcopy(salt, hdr->b_crypt_hdr.b_salt, ZIO_DATA_SALT_LEN);
+ memcpy(hdr->b_crypt_hdr.b_salt, salt, ZIO_DATA_SALT_LEN);
if (iv != NULL)
- bcopy(iv, hdr->b_crypt_hdr.b_iv, ZIO_DATA_IV_LEN);
+ memcpy(hdr->b_crypt_hdr.b_iv, iv, ZIO_DATA_IV_LEN);
if (mac != NULL)
- bcopy(mac, hdr->b_crypt_hdr.b_mac, ZIO_DATA_MAC_LEN);
+ memcpy(hdr->b_crypt_hdr.b_mac, mac, ZIO_DATA_MAC_LEN);
}
/*
@@ -3657,9 +3657,9 @@ arc_alloc_raw_buf(spa_t *spa, void *tag, uint64_t dsobj, boolean_t byteorder,
hdr->b_crypt_hdr.b_ot = ot;
hdr->b_l1hdr.b_byteswap = (byteorder == ZFS_HOST_BYTEORDER) ?
DMU_BSWAP_NUMFUNCS : DMU_OT_BYTESWAP(ot);
- bcopy(salt, hdr->b_crypt_hdr.b_salt, ZIO_DATA_SALT_LEN);
- bcopy(iv, hdr->b_crypt_hdr.b_iv, ZIO_DATA_IV_LEN);
- bcopy(mac, hdr->b_crypt_hdr.b_mac, ZIO_DATA_MAC_LEN);
+ memcpy(hdr->b_crypt_hdr.b_salt, salt, ZIO_DATA_SALT_LEN);
+ memcpy(hdr->b_crypt_hdr.b_iv, iv, ZIO_DATA_IV_LEN);
+ memcpy(hdr->b_crypt_hdr.b_mac, mac, ZIO_DATA_MAC_LEN);
/*
* This buffer will be considered encrypted even if the ot is not an
@@ -5643,7 +5643,7 @@ arc_bcopy_func(zio_t *zio, const zbookmark_phys_t *zb, const blkptr_t *bp,
if (buf == NULL)
return;
- bcopy(buf->b_data, arg, arc_buf_size(buf));
+ memcpy(arg, buf->b_data, arc_buf_size(buf));
arc_buf_destroy(buf, arg);
}
@@ -7106,11 +7106,11 @@ arc_write(zio_t *pio, spa_t *spa, uint64_t txg,
localprop.zp_byteorder =
(hdr->b_l1hdr.b_byteswap == DMU_BSWAP_NUMFUNCS) ?
ZFS_HOST_BYTEORDER : !ZFS_HOST_BYTEORDER;
- bcopy(hdr->b_crypt_hdr.b_salt, localprop.zp_salt,
+ memcpy(localprop.zp_salt, hdr->b_crypt_hdr.b_salt,
ZIO_DATA_SALT_LEN);
- bcopy(hdr->b_crypt_hdr.b_iv, localprop.zp_iv,
+ memcpy(localprop.zp_iv, hdr->b_crypt_hdr.b_iv,
ZIO_DATA_IV_LEN);
- bcopy(hdr->b_crypt_hdr.b_mac, localprop.zp_mac,
+ memcpy(localprop.zp_mac, hdr->b_crypt_hdr.b_mac,
ZIO_DATA_MAC_LEN);
if (DMU_OT_IS_ENCRYPTED(localprop.zp_type)) {
localprop.zp_nopwrite = B_FALSE;
@@ -8722,14 +8722,15 @@ top:
* block pointer in the header.
*/
if (i == 0) {
- bzero(l2dhdr, dev->l2ad_dev_hdr_asize);
+ memset(l2dhdr, 0,
+ dev->l2ad_dev_hdr_asize);
} else {
- bzero(&l2dhdr->dh_start_lbps[i],
+ memset(&l2dhdr->dh_start_lbps[i], 0,
sizeof (l2arc_log_blkptr_t));
}
break;
}
- bcopy(lb_ptr_buf->lb_ptr, &l2dhdr->dh_start_lbps[i],
+ memcpy(&l2dhdr->dh_start_lbps[i], lb_ptr_buf->lb_ptr,
sizeof (l2arc_log_blkptr_t));
lb_ptr_buf = list_next(&dev->l2ad_lbptr_list,
lb_ptr_buf);
@@ -9353,7 +9354,7 @@ l2arc_apply_transforms(spa_t *spa, arc_buf_hdr_t *hdr, uint64_t asize,
}
ASSERT3U(psize, <=, HDR_GET_PSIZE(hdr));
if (psize < asize)
- bzero((char *)tmp + psize, asize - psize);
+ memset((char *)tmp + psize, 0, asize - psize);
psize = HDR_GET_PSIZE(hdr);
abd_return_buf_copy(cabd, tmp, asize);
to_write = cabd;
@@ -9388,7 +9389,7 @@ encrypt:
abd_zero_off(eabd, psize, asize - psize);
/* assert that the MAC we got here matches the one we saved */
- ASSERT0(bcmp(mac, hdr->b_crypt_hdr.b_mac, ZIO_DATA_MAC_LEN));
+ ASSERT0(memcmp(mac, hdr->b_crypt_hdr.b_mac, ZIO_DATA_MAC_LEN));
spa_keystore_dsl_key_rele(spa, dck, FTAG);
if (to_write == cabd)
@@ -9897,7 +9898,7 @@ l2arc_rebuild_dev(l2arc_dev_t *dev, boolean_t reopen)
if (l2arc_trim_ahead > 0) {
dev->l2ad_trim_all = B_TRUE;
} else {
- bzero(l2dhdr, l2dhdr_asize);
+ memset(l2dhdr, 0, l2dhdr_asize);
l2arc_dev_hdr_update(dev);
}
}
@@ -10218,7 +10219,7 @@ l2arc_rebuild(l2arc_dev_t *dev)
goto out;
/* Prepare the rebuild process */
- bcopy(l2dhdr->dh_start_lbps, lbps, sizeof (lbps));
+ memcpy(lbps, l2dhdr->dh_start_lbps, sizeof (lbps));
/* Start the rebuild process */
for (;;) {
@@ -10264,7 +10265,7 @@ l2arc_rebuild(l2arc_dev_t *dev)
lb_ptr_buf = kmem_zalloc(sizeof (l2arc_lb_ptr_buf_t), KM_SLEEP);
lb_ptr_buf->lb_ptr = kmem_zalloc(sizeof (l2arc_log_blkptr_t),
KM_SLEEP);
- bcopy(&lbps[0], lb_ptr_buf->lb_ptr,
+ memcpy(lb_ptr_buf->lb_ptr, &lbps[0],
sizeof (l2arc_log_blkptr_t));
mutex_enter(&dev->l2ad_mtx);
list_insert_tail(&dev->l2ad_lbptr_list, lb_ptr_buf);
@@ -10362,7 +10363,7 @@ out:
*/
spa_history_log_internal(spa, "L2ARC rebuild", NULL,
"no valid log blocks");
- bzero(l2dhdr, dev->l2ad_dev_hdr_asize);
+ memset(l2dhdr, 0, dev->l2ad_dev_hdr_asize);
l2arc_dev_hdr_update(dev);
} else if (err == ECANCELED) {
/*
@@ -10853,13 +10854,13 @@ l2arc_log_blk_commit(l2arc_dev_t *dev, zio_t *pio, l2arc_write_callback_t *cb)
ZIO_CHECKSUM_FLETCHER_4);
if (asize < sizeof (*lb)) {
/* compression succeeded */
- bzero(tmpbuf + psize, asize - psize);
+ memset(tmpbuf + psize, 0, asize - psize);
L2BLK_SET_COMPRESS(
(&l2dhdr->dh_start_lbps[0])->lbp_prop,
ZIO_COMPRESS_LZ4);
} else {
/* compression failed */
- bcopy(lb, tmpbuf, sizeof (*lb));
+ memcpy(tmpbuf, lb, sizeof (*lb));
L2BLK_SET_COMPRESS(
(&l2dhdr->dh_start_lbps[0])->lbp_prop,
ZIO_COMPRESS_OFF);
@@ -10885,7 +10886,7 @@ l2arc_log_blk_commit(l2arc_dev_t *dev, zio_t *pio, l2arc_write_callback_t *cb)
* Include the committed log block's pointer in the list of pointers
* to log blocks present in the L2ARC device.
*/
- bcopy(&l2dhdr->dh_start_lbps[0], lb_ptr_buf->lb_ptr,
+ memcpy(lb_ptr_buf->lb_ptr, &l2dhdr->dh_start_lbps[0],
sizeof (l2arc_log_blkptr_t));
mutex_enter(&dev->l2ad_mtx);
list_insert_head(&dev->l2ad_lbptr_list, lb_ptr_buf);
@@ -10974,7 +10975,7 @@ l2arc_log_blk_insert(l2arc_dev_t *dev, const arc_buf_hdr_t *hdr)
ASSERT(HDR_HAS_L2HDR(hdr));
le = &lb->lb_entries[index];
- bzero(le, sizeof (*le));
+ memset(le, 0, sizeof (*le));
le->le_dva = hdr->b_dva;
le->le_birth = hdr->b_birth;
le->le_daddr = hdr->b_l2hdr.b_daddr;
diff --git a/module/zfs/blkptr.c b/module/zfs/blkptr.c
index aa09ded8dba3..d85f0737f6f6 100644
--- a/module/zfs/blkptr.c
+++ b/module/zfs/blkptr.c
@@ -58,7 +58,7 @@ encode_embedded_bp_compressed(blkptr_t *bp, void *data,
ASSERT3U(comp, >=, ZIO_COMPRESS_OFF);
ASSERT3U(comp, <, ZIO_COMPRESS_FUNCTIONS);
- bzero(bp, sizeof (*bp));
+ memset(bp, 0, sizeof (*bp));
BP_SET_EMBEDDED(bp, B_TRUE);
BP_SET_COMPRESS(bp, comp);
BP_SET_BYTEORDER(bp, ZFS_HOST_BYTEORDER);
diff --git a/module/zfs/bpobj.c b/module/zfs/bpobj.c
index 68f534c6b197..fb29ccfe31eb 100644
--- a/module/zfs/bpobj.c
+++ b/module/zfs/bpobj.c
@@ -156,7 +156,7 @@ bpobj_open(bpobj_t *bpo, objset_t *os, uint64_t object)
if (err)
return (err);
- bzero(bpo, sizeof (*bpo));
+ memset(bpo, 0, sizeof (*bpo));
mutex_init(&bpo->bpo_lock, NULL, MUTEX_DEFAULT, NULL);
ASSERT(bpo->bpo_dbuf == NULL);
@@ -805,12 +805,12 @@ bpobj_enqueue(bpobj_t *bpo, const blkptr_t *bp, boolean_t bp_freed,
* set of BP's stored, and bpobj_iterate() wouldn't visit
* all the space accounted for in the bpobj.
*/
- bzero(&stored_bp, sizeof (stored_bp));
+ memset(&stored_bp, 0, sizeof (stored_bp));
stored_bp.blk_prop = bp->blk_prop;
stored_bp.blk_birth = bp->blk_birth;
} else if (!BP_GET_DEDUP(bp)) {
/* The bpobj will compress better without the checksum */
- bzero(&stored_bp.blk_cksum, sizeof (stored_bp.blk_cksum));
+ memset(&stored_bp.blk_cksum, 0, sizeof (stored_bp.blk_cksum));
}
stored_bp.blk_fill = 0;
diff --git a/module/zfs/btree.c b/module/zfs/btree.c
index 5bcf621d5994..a079929b5bc8 100644
--- a/module/zfs/btree.c
+++ b/module/zfs/btree.c
@@ -159,7 +159,7 @@ zfs_btree_create(zfs_btree_t *tree, int (*compar) (const void *, const void *),
*/
ASSERT3U(size, <=, (BTREE_LEAF_SIZE - sizeof (zfs_btree_hdr_t)) / 4);
- bzero(tree, sizeof (*tree));
+ memset(tree, 0, sizeof (*tree));
tree->bt_compar = compar;
tree->bt_elem_size = size;
tree->bt_height = -1;
diff --git a/module/zfs/dataset_kstats.c b/module/zfs/dataset_kstats.c
index 3fbb24ddef5e..6b4b31e8c9c6 100644
--- a/module/zfs/dataset_kstats.c
+++ b/module/zfs/dataset_kstats.c
@@ -123,7 +123,7 @@ dataset_kstats_create(dataset_kstats_t *dk, objset_t *objset)
dataset_kstat_values_t *dk_kstats =
kmem_alloc(sizeof (empty_dataset_kstats), KM_SLEEP);
- bcopy(&empty_dataset_kstats, dk_kstats,
+ memcpy(dk_kstats, &empty_dataset_kstats,
sizeof (empty_dataset_kstats));
char *ds_name = kmem_zalloc(ZFS_MAX_DATASET_NAME_LEN, KM_SLEEP);
diff --git a/module/zfs/dbuf.c b/module/zfs/dbuf.c
index cb2b7e5a1def..c328431415ed 100644
--- a/module/zfs/dbuf.c
+++ b/module/zfs/dbuf.c
@@ -280,7 +280,7 @@ dbuf_cons(void *vdb, void *unused, int kmflag)
{
(void) unused, (void) kmflag;
dmu_buf_impl_t *db = vdb;
- bzero(db, sizeof (dmu_buf_impl_t));
+ memset(db, 0, sizeof (dmu_buf_impl_t));
mutex_init(&db->db_mtx, NULL, MUTEX_DEFAULT, NULL);
rw_init(&db->db_rwlock, NULL, RW_DEFAULT, NULL);
@@ -1235,7 +1235,7 @@ dbuf_loan_arcbuf(dmu_buf_impl_t *db)
mutex_exit(&db->db_mtx);
abuf = arc_loan_buf(spa, B_FALSE, blksz);
- bcopy(db->db.db_data, abuf->b_data, blksz);
+ memcpy(abuf->b_data, db->db.db_data, blksz);
} else {
abuf = db->db_buf;
arc_loan_inuse_buf(abuf, db);
@@ -1356,7 +1356,7 @@ dbuf_read_done(zio_t *zio, const zbookmark_phys_t *zb, const blkptr_t *bp,
/* freed in flight */
ASSERT(zio == NULL || zio->io_error == 0);
arc_release(buf, db);
- bzero(buf->b_data, db->db.db_size);
+ memset(buf->b_data, 0, db->db.db_size);
arc_buf_freeze(buf);
db->db_freed_in_flight = FALSE;
dbuf_set_data(db, buf);
@@ -1395,9 +1395,9 @@ dbuf_read_bonus(dmu_buf_impl_t *db, dnode_t *dn, uint32_t flags)
db->db.db_data = kmem_alloc(max_bonuslen, KM_SLEEP);
arc_space_consume(max_bonuslen, ARC_SPACE_BONUS);
if (bonuslen < max_bonuslen)
- bzero(db->db.db_data, max_bonuslen);
+ memset(db->db.db_data, 0, max_bonuslen);
if (bonuslen)
- bcopy(DN_BONUS(dn->dn_phys), db->db.db_data, bonuslen);
+ memcpy(db->db.db_data, DN_BONUS(dn->dn_phys), bonuslen);
db->db_state = DB_CACHED;
DTRACE_SET_STATE(db, "bonus buffer filled");
return (0);
@@ -1446,7 +1446,7 @@ dbuf_read_hole(dmu_buf_impl_t *db, dnode_t *dn)
if (is_hole) {
dbuf_set_data(db, dbuf_alloc_arcbuf(db));
- bzero(db->db.db_data, db->db.db_size);
+ memset(db->db.db_data, 0, db->db.db_size);
if (db->db_blkptr != NULL && db->db_level > 0 &&
BP_IS_HOLE(db->db_blkptr) &&
@@ -1657,7 +1657,7 @@ dbuf_fix_old_data(dmu_buf_impl_t *db, uint64_t txg)
int bonuslen = DN_SLOTS_TO_BONUSLEN(dn->dn_num_slots);
dr->dt.dl.dr_data = kmem_alloc(bonuslen, KM_SLEEP);
arc_space_consume(bonuslen, ARC_SPACE_BONUS);
- bcopy(db->db.db_data, dr->dt.dl.dr_data, bonuslen);
+ memcpy(dr->dt.dl.dr_data, db->db.db_data, bonuslen);
} else if (zfs_refcount_count(&db->db_holds) > db->db_dirtycnt) {
dnode_t *dn = DB_DNODE(db);
int size = arc_buf_size(db->db_buf);
@@ -1687,7 +1687,7 @@ dbuf_fix_old_data(dmu_buf_impl_t *db, uint64_t txg)
} else {
dr->dt.dl.dr_data = arc_alloc_buf(spa, db, type, size);
}
- bcopy(db->db.db_data, dr->dt.dl.dr_data->b_data, size);
+ memcpy(dr->dt.dl.dr_data->b_data, db->db.db_data, size);
} else {
db->db_buf = NULL;
dbuf_clear_data(db);
@@ -1985,7 +1985,7 @@ dbuf_free_range(dnode_t *dn, uint64_t start_blkid, uint64_t end_blkid,
ASSERT(db->db.db_data != NULL);
arc_release(db->db_buf, db);
rw_enter(&db->db_rwlock, RW_WRITER);
- bzero(db->db.db_data, db->db.db_size);
+ memset(db->db.db_data, 0, db->db.db_size);
rw_exit(&db->db_rwlock);
arc_buf_freeze(db->db_buf);
}
@@ -2022,10 +2022,10 @@ dbuf_new_size(dmu_buf_impl_t *db, int size, dmu_tx_t *tx)
/* copy old block data to the new block */
old_buf = db->db_buf;
- bcopy(old_buf->b_data, buf->b_data, MIN(osize, size));
+ memcpy(buf->b_data, old_buf->b_data, MIN(osize, size));
/* zero the remainder */
if (size > osize)
- bzero((uint8_t *)buf->b_data + osize, size - osize);
+ memset((uint8_t *)buf->b_data + osize, 0, size - osize);
mutex_enter(&db->db_mtx);
dbuf_set_data(db, buf);
@@ -2655,9 +2655,9 @@ dmu_buf_set_crypt_params(dmu_buf_t *db_fake, boolean_t byteorder,
dr->dt.dl.dr_has_raw_params = B_TRUE;
dr->dt.dl.dr_byteorder = byteorder;
- bcopy(salt, dr->dt.dl.dr_salt, ZIO_DATA_SALT_LEN);
- bcopy(iv, dr->dt.dl.dr_iv, ZIO_DATA_IV_LEN);
- bcopy(mac, dr->dt.dl.dr_mac, ZIO_DATA_MAC_LEN);
+ memcpy(dr->dt.dl.dr_salt, salt, ZIO_DATA_SALT_LEN);
+ memcpy(dr->dt.dl.dr_iv, iv, ZIO_DATA_IV_LEN);
+ memcpy(dr->dt.dl.dr_mac, mac, ZIO_DATA_MAC_LEN);
}
static void
@@ -2690,7 +2690,7 @@ dmu_buf_fill_done(dmu_buf_t *dbuf, dmu_tx_t *tx)
ASSERT(db->db_blkid != DMU_BONUS_BLKID);
/* we were freed while filling */
/* XXX dbuf_undirty? */
- bzero(db->db.db_data, db->db.db_size);
+ memset(db->db.db_data, 0, db->db.db_size);
db->db_freed_in_flight = FALSE;
DTRACE_SET_STATE(db,
"fill done handling freed in flight");
@@ -2802,7 +2802,7 @@ dbuf_assign_arcbuf(dmu_buf_impl_t *db, arc_buf_t *buf, dmu_tx_t *tx)
ASSERT(!arc_is_encrypted(buf));
mutex_exit(&db->db_mtx);
(void) dbuf_dirty(db, tx);
- bcopy(buf->b_data, db->db.db_data, db->db.db_size);
+ memcpy(db->db.db_data, buf->b_data, db->db.db_size);
arc_buf_destroy(buf, db);
return;
}
@@ -3516,7 +3516,7 @@ dbuf_hold_copy(dnode_t *dn, dmu_buf_impl_t *db)
}
rw_enter(&db->db_rwlock, RW_WRITER);
- bcopy(data->b_data, db->db.db_data, arc_buf_size(data));
+ memcpy(db->db.db_data, data->b_data, arc_buf_size(data));
rw_exit(&db->db_rwlock);
}
@@ -4040,7 +4040,7 @@ dbuf_sync_bonus(dbuf_dirty_record_t *dr, dmu_tx_t *tx)
dnode_t *dn = dr->dr_dnode;
ASSERT3U(DN_MAX_BONUS_LEN(dn->dn_phys), <=,
DN_SLOTS_TO_BONUSLEN(dn->dn_phys->dn_extra_slots + 1));
- bcopy(data, DN_BONUS(dn->dn_phys), DN_MAX_BONUS_LEN(dn->dn_phys));
+ memcpy(DN_BONUS(dn->dn_phys), data, DN_MAX_BONUS_LEN(dn->dn_phys));
dbuf_sync_leaf_verify_bonus_dnode(dr);
@@ -4460,7 +4460,7 @@ dbuf_sync_leaf(dbuf_dirty_record_t *dr, dmu_tx_t *tx)
} else {
*datap = arc_alloc_buf(os->os_spa, db, type, psize);
}
- bcopy(db->db.db_data, (*datap)->b_data, psize);
+ memcpy((*datap)->b_data, db->db.db_data, psize);
}
db->db_data_pending = dr;
@@ -4640,7 +4640,7 @@ dbuf_write_children_ready(zio_t *zio, arc_buf_t *buf, void *vdb)
* zero out.
*/
rw_enter(&db->db_rwlock, RW_WRITER);
- bzero(db->db.db_data, db->db.db_size);
+ memset(db->db.db_data, 0, db->db.db_size);
rw_exit(&db->db_rwlock);
}
DB_DNODE_EXIT(db);
diff --git a/module/zfs/ddt.c b/module/zfs/ddt.c
index f1415353f4b2..61a9c8f0eaed 100644
--- a/module/zfs/ddt.c
+++ b/module/zfs/ddt.c
@@ -46,11 +46,11 @@ static kmem_cache_t *ddt_entry_cache;
*/
int zfs_dedup_prefetch = 0;
-static const ddt_ops_t *ddt_ops[DDT_TYPES] = {
+static const ddt_ops_t *const ddt_ops[DDT_TYPES] = {
&ddt_zap_ops,
};
-static const char *ddt_class_name[DDT_CLASSES] = {
+static const char *const ddt_class_name[DDT_CLASSES] = {
"ditto",
"duplicate",
"unique",
@@ -99,7 +99,7 @@ ddt_object_destroy(ddt_t *ddt, enum ddt_type type, enum ddt_class class,
VERIFY(zap_remove(os, DMU_POOL_DIRECTORY_OBJECT, name, tx) == 0);
VERIFY(zap_remove(os, spa->spa_ddt_stat_object, name, tx) == 0);
VERIFY(ddt_ops[type]->ddt_op_destroy(os, *objectp, tx) == 0);
- bzero(&ddt->ddt_object_stats[type][class], sizeof (ddt_object_t));
+ memset(&ddt->ddt_object_stats[type][class], 0, sizeof (ddt_object_t));
*objectp = 0;
}
@@ -322,7 +322,7 @@ ddt_phys_fill(ddt_phys_t *ddp, const blkptr_t *bp)
void
ddt_phys_clear(ddt_phys_t *ddp)
{
- bzero(ddp, sizeof (*ddp));
+ memset(ddp, 0, sizeof (*ddp));
}
void
@@ -390,7 +390,7 @@ ddt_stat_generate(ddt_t *ddt, ddt_entry_t *dde, ddt_stat_t *dds)
uint64_t lsize = DDK_GET_LSIZE(ddk);
uint64_t psize = DDK_GET_PSIZE(ddk);
- bzero(dds, sizeof (*dds));
+ memset(dds, 0, sizeof (*dds));
for (int p = 0; p < DDT_PHYS_TYPES; p++, ddp++) {
uint64_t dsize = 0;
@@ -454,7 +454,7 @@ ddt_histogram_add(ddt_histogram_t *dst, const ddt_histogram_t *src)
void
ddt_histogram_stat(ddt_stat_t *dds, const ddt_histogram_t *ddh)
{
- bzero(dds, sizeof (*dds));
+ memset(dds, 0, sizeof (*dds));
for (int h = 0; h < 64; h++)
ddt_stat_add(dds, &ddh->ddh_stat[h], 0);
@@ -532,7 +532,7 @@ ddt_get_dedup_dspace(spa_t *spa)
if (spa->spa_dedup_dspace != ~0ULL)
return (spa->spa_dedup_dspace);
- bzero(&dds_total, sizeof (ddt_stat_t));
+ memset(&dds_total, 0, sizeof (ddt_stat_t));
/* Calculate and cache the stats */
ddt_get_dedup_stats(spa, &dds_total);
@@ -566,7 +566,7 @@ ddt_compress(void *src, uchar_t *dst, size_t s_len, size_t d_len)
if (c_len == s_len) {
cpfunc = ZIO_COMPRESS_OFF;
- bcopy(src, dst, s_len);
+ memcpy(dst, src, s_len);
}
*version = cpfunc;
@@ -586,7 +586,7 @@ ddt_decompress(uchar_t *src, void *dst, size_t s_len, size_t d_len)
if (ci->ci_decompress != NULL)
(void) ci->ci_decompress(src, dst, s_len, d_len, ci->ci_level);
else
- bcopy(src, dst, d_len);
+ memcpy(dst, src, d_len);
if (((version & DDT_COMPRESS_BYTEORDER_MASK) != 0) !=
(ZFS_HOST_BYTEORDER != 0))
@@ -633,7 +633,7 @@ ddt_alloc(const ddt_key_t *ddk)
ddt_entry_t *dde;
dde = kmem_cache_alloc(ddt_entry_cache, KM_SLEEP);
- bzero(dde, sizeof (ddt_entry_t));
+ memset(dde, 0, sizeof (ddt_entry_t));
cv_init(&dde->dde_cv, NULL, CV_DEFAULT, NULL);
dde->dde_key = *ddk;
@@ -785,7 +785,7 @@ ddt_table_alloc(spa_t *spa, enum zio_checksum c)
ddt_t *ddt;
ddt = kmem_cache_alloc(ddt_cache, KM_SLEEP);
- bzero(ddt, sizeof (ddt_t));
+ memset(ddt, 0, sizeof (ddt_t));
mutex_init(&ddt->ddt_lock, NULL, MUTEX_DEFAULT, NULL);
avl_create(&ddt->ddt_tree, ddt_entry_compare,
@@ -847,7 +847,7 @@ ddt_load(spa_t *spa)
/*
* Seed the cached histograms.
*/
- bcopy(ddt->ddt_histogram, &ddt->ddt_histogram_cache,
+ memcpy(&ddt->ddt_histogram_cache, ddt->ddt_histogram,
sizeof (ddt->ddt_histogram));
spa->spa_dedup_dspace = ~0ULL;
}
@@ -919,7 +919,7 @@ ddt_repair_start(ddt_t *ddt, const blkptr_t *bp)
}
}
- bzero(dde->dde_phys, sizeof (dde->dde_phys));
+ memset(dde->dde_phys, 0, sizeof (dde->dde_phys));
return (dde);
}
@@ -964,7 +964,7 @@ ddt_repair_entry(ddt_t *ddt, ddt_entry_t *dde, ddt_entry_t *rdde, zio_t *rio)
for (int p = 0; p < DDT_PHYS_TYPES; p++, ddp++, rddp++) {
if (ddp->ddp_phys_birth == 0 ||
ddp->ddp_phys_birth != rddp->ddp_phys_birth ||
- bcmp(ddp->ddp_dva, rddp->ddp_dva, sizeof (ddp->ddp_dva)))
+ memcmp(ddp->ddp_dva, rddp->ddp_dva, sizeof (ddp->ddp_dva)))
continue;
ddt_bp_create(ddt->ddt_checksum, ddk, ddp, &blk);
zio_nowait(zio_rewrite(zio, zio->io_spa, 0, &blk,
@@ -1108,7 +1108,7 @@ ddt_sync_table(ddt_t *ddt, dmu_tx_t *tx, uint64_t txg)
}
}
- bcopy(ddt->ddt_histogram, &ddt->ddt_histogram_cache,
+ memcpy(&ddt->ddt_histogram_cache, ddt->ddt_histogram,
sizeof (ddt->ddt_histogram));
spa->spa_dedup_dspace = ~0ULL;
}
diff --git a/module/zfs/dmu.c b/module/zfs/dmu.c
index 874ddc800870..461feeffb6a3 100644
--- a/module/zfs/dmu.c
+++ b/module/zfs/dmu.c
@@ -1012,7 +1012,7 @@ dmu_read_impl(dnode_t *dn, uint64_t offset, uint64_t size,
if (dn->dn_maxblkid == 0) {
uint64_t newsz = offset > dn->dn_datablksz ? 0 :
MIN(size, dn->dn_datablksz - offset);
- bzero((char *)buf + newsz, size - newsz);
+ memset((char *)buf + newsz, 0, size - newsz);
size = newsz;
}
@@ -2077,9 +2077,9 @@ dmu_write_policy(objset_t *os, dnode_t *dn, int level, int wp, zio_prop_t *zp)
zp->zp_nopwrite = nopwrite;
zp->zp_encrypt = encrypt;
zp->zp_byteorder = ZFS_HOST_BYTEORDER;
- bzero(zp->zp_salt, ZIO_DATA_SALT_LEN);
- bzero(zp->zp_iv, ZIO_DATA_IV_LEN);
- bzero(zp->zp_mac, ZIO_DATA_MAC_LEN);
+ memset(zp->zp_salt, 0, ZIO_DATA_SALT_LEN);
+ memset(zp->zp_iv, 0, ZIO_DATA_IV_LEN);
+ memset(zp->zp_mac, 0, ZIO_DATA_MAC_LEN);
zp->zp_zpl_smallblk = DMU_OT_IS_FILE(zp->zp_type) ?
os->os_zpl_special_smallblock : 0;
diff --git a/module/zfs/dmu_objset.c b/module/zfs/dmu_objset.c
index 9a74fa9ce360..8c2e75fc9306 100644
--- a/module/zfs/dmu_objset.c
+++ b/module/zfs/dmu_objset.c
@@ -516,8 +516,8 @@ dmu_objset_open_impl(spa_t *spa, dsl_dataset_t *ds, blkptr_t *bp,
if (arc_buf_size(os->os_phys_buf) < size) {
arc_buf_t *buf = arc_alloc_buf(spa, &os->os_phys_buf,
ARC_BUFC_METADATA, size);
- bzero(buf->b_data, size);
- bcopy(os->os_phys_buf->b_data, buf->b_data,
+ memset(buf->b_data, 0, size);
+ memcpy(buf->b_data, os->os_phys_buf->b_data,
arc_buf_size(os->os_phys_buf));
arc_buf_destroy(os->os_phys_buf, &os->os_phys_buf);
os->os_phys_buf = buf;
@@ -531,7 +531,7 @@ dmu_objset_open_impl(spa_t *spa, dsl_dataset_t *ds, blkptr_t *bp,
os->os_phys_buf = arc_alloc_buf(spa, &os->os_phys_buf,
ARC_BUFC_METADATA, size);
os->os_phys = os->os_phys_buf->b_data;
- bzero(os->os_phys, size);
+ memset(os->os_phys, 0, size);
}
/*
* These properties will be filled in by the logic in zfs_get_zplprop()
diff --git a/module/zfs/dmu_recv.c b/module/zfs/dmu_recv.c
index b34c1bc6934e..3c5cd36714bd 100644
--- a/module/zfs/dmu_recv.c
+++ b/module/zfs/dmu_recv.c
@@ -1148,7 +1148,7 @@ dmu_recv_begin(char *tofs, char *tosnap, dmu_replay_record_t *drr_begin,
dmu_recv_begin_arg_t drba = { 0 };
int err;
- bzero(drc, sizeof (dmu_recv_cookie_t));
+ memset(drc, 0, sizeof (dmu_recv_cookie_t));
drc->drc_drr_begin = drr_begin;
drc->drc_drrb = &drr_begin->drr_u.drr_begin;
drc->drc_tosnap = tosnap;
@@ -1211,7 +1211,6 @@ dmu_recv_begin(char *tofs, char *tosnap, dmu_replay_record_t *drr_begin,
dmu_recv_resume_begin_check, dmu_recv_resume_begin_sync,
&drba, 5, ZFS_SPACE_CHECK_NORMAL);
} else {
-
/*
* For non-raw, non-incremental, non-resuming receives the
* user can specify encryption parameters on the command line
@@ -1808,7 +1807,7 @@ receive_object(struct receive_writer_arg *rwa, struct drr_object *drro,
dmu_buf_will_dirty(db, tx);
ASSERT3U(db->db_size, >=, drro->drr_bonuslen);
- bcopy(data, db->db_data, DRR_OBJECT_PAYLOAD_SIZE(drro));
+ memcpy(db->db_data, data, DRR_OBJECT_PAYLOAD_SIZE(drro));
/*
* Raw bonus buffers have their byteorder determined by the
@@ -1949,11 +1948,11 @@ flush_write_batch_impl(struct receive_writer_arg *rwa)
zp.zp_byteorder = ZFS_HOST_BYTEORDER ^
!!DRR_IS_RAW_BYTESWAPPED(drrw->drr_flags) ^
rwa->byteswap;
- bcopy(drrw->drr_salt, zp.zp_salt,
+ memcpy(zp.zp_salt, drrw->drr_salt,
ZIO_DATA_SALT_LEN);
- bcopy(drrw->drr_iv, zp.zp_iv,
+ memcpy(zp.zp_iv, drrw->drr_iv,
ZIO_DATA_IV_LEN);
- bcopy(drrw->drr_mac, zp.zp_mac,
+ memcpy(zp.zp_mac, drrw->drr_mac,
ZIO_DATA_MAC_LEN);
if (DMU_OT_IS_ENCRYPTED(zp.zp_type)) {
zp.zp_nopwrite = B_FALSE;
@@ -2218,7 +2217,7 @@ receive_spill(struct receive_writer_arg *rwa, struct drr_spill *drrs,
}
}
- bcopy(abd_to_buf(abd), abuf->b_data, DRR_SPILL_PAYLOAD_SIZE(drrs));
+ memcpy(abuf->b_data, abd_to_buf(abd), DRR_SPILL_PAYLOAD_SIZE(drrs));
abd_free(abd);
dbuf_assign_arcbuf((dmu_buf_impl_t *)db_spill, abuf, tx);
@@ -2291,9 +2290,9 @@ receive_object_range(struct receive_writer_arg *rwa,
rwa->or_crypt_params_present = B_TRUE;
rwa->or_firstobj = drror->drr_firstobj;
rwa->or_numslots = drror->drr_numslots;
- bcopy(drror->drr_salt, rwa->or_salt, ZIO_DATA_SALT_LEN);
- bcopy(drror->drr_iv, rwa->or_iv, ZIO_DATA_IV_LEN);
- bcopy(drror->drr_mac, rwa->or_mac, ZIO_DATA_MAC_LEN);
+ memcpy(rwa->or_salt, drror->drr_salt, ZIO_DATA_SALT_LEN);
+ memcpy(rwa->or_iv, drror->drr_iv, ZIO_DATA_IV_LEN);
+ memcpy(rwa->or_mac, drror->drr_mac, ZIO_DATA_MAC_LEN);
rwa->or_byteorder = byteorder;
return (0);
diff --git a/module/zfs/dmu_send.c b/module/zfs/dmu_send.c
index 021dffefa141..d3567d1efe69 100644
--- a/module/zfs/dmu_send.c
+++ b/module/zfs/dmu_send.c
@@ -379,7 +379,7 @@ dump_free(dmu_send_cookie_t *dscp, uint64_t object, uint64_t offset,
}
}
/* create a FREE record and make it pending */
- bzero(dscp->dsc_drr, sizeof (dmu_replay_record_t));
+ memset(dscp->dsc_drr, 0, sizeof (dmu_replay_record_t));
dscp->dsc_drr->drr_type = DRR_FREE;
drrf->drr_object = object;
drrf->drr_offset = offset;
@@ -438,7 +438,7 @@ dump_redact(dmu_send_cookie_t *dscp, uint64_t object, uint64_t offset,
}
}
/* create a REDACT record and make it pending */
- bzero(dscp->dsc_drr, sizeof (dmu_replay_record_t));
+ memset(dscp->dsc_drr, 0, sizeof (dmu_replay_record_t));
dscp->dsc_drr->drr_type = DRR_REDACT;
drrr->drr_object = object;
drrr->drr_offset = offset;
@@ -480,7 +480,7 @@ dmu_dump_write(dmu_send_cookie_t *dscp, dmu_object_type_t type, uint64_t object,
dscp->dsc_pending_op = PENDING_NONE;
}
/* write a WRITE record */
- bzero(dscp->dsc_drr, sizeof (dmu_replay_record_t));
+ memset(dscp->dsc_drr, 0, sizeof (dmu_replay_record_t));
dscp->dsc_drr->drr_type = DRR_WRITE;
drrw->drr_object = object;
drrw->drr_type = type;
@@ -571,7 +571,7 @@ dump_write_embedded(dmu_send_cookie_t *dscp, uint64_t object, uint64_t offset,
ASSERT(BP_IS_EMBEDDED(bp));
- bzero(dscp->dsc_drr, sizeof (dmu_replay_record_t));
+ memset(dscp->dsc_drr, 0, sizeof (dmu_replay_record_t));
dscp->dsc_drr->drr_type = DRR_WRITE_EMBEDDED;
drrw->drr_object = object;
drrw->drr_offset = offset;
@@ -604,7 +604,7 @@ dump_spill(dmu_send_cookie_t *dscp, const blkptr_t *bp, uint64_t object,
}
/* write a SPILL record */
- bzero(dscp->dsc_drr, sizeof (dmu_replay_record_t));
+ memset(dscp->dsc_drr, 0, sizeof (dmu_replay_record_t));
dscp->dsc_drr->drr_type = DRR_SPILL;
drrs->drr_object = object;
drrs->drr_length = blksz;
@@ -686,7 +686,7 @@ dump_freeobjects(dmu_send_cookie_t *dscp, uint64_t firstobj, uint64_t numobjs)
}
/* write a FREEOBJECTS record */
- bzero(dscp->dsc_drr, sizeof (dmu_replay_record_t));
+ memset(dscp->dsc_drr, 0, sizeof (dmu_replay_record_t));
dscp->dsc_drr->drr_type = DRR_FREEOBJECTS;
drrfo->drr_firstobj = firstobj;
drrfo->drr_numobjs = numobjs;
@@ -727,7 +727,7 @@ dump_dnode(dmu_send_cookie_t *dscp, const blkptr_t *bp, uint64_t object,
}
/* write an OBJECT record */
- bzero(dscp->dsc_drr, sizeof (dmu_replay_record_t));
+ memset(dscp->dsc_drr, 0, sizeof (dmu_replay_record_t));
dscp->dsc_drr->drr_type = DRR_OBJECT;
drro->drr_object = object;
drro->drr_type = dnp->dn_type;
@@ -801,7 +801,7 @@ dump_dnode(dmu_send_cookie_t *dscp, const blkptr_t *bp, uint64_t object,
struct send_range record;
blkptr_t *bp = DN_SPILL_BLKPTR(dnp);
- bzero(&record, sizeof (struct send_range));
+ memset(&record, 0, sizeof (struct send_range));
record.type = DATA;
record.object = object;
record.eos_marker = B_FALSE;
@@ -841,7 +841,7 @@ dump_object_range(dmu_send_cookie_t *dscp, const blkptr_t *bp,
dscp->dsc_pending_op = PENDING_NONE;
}
- bzero(dscp->dsc_drr, sizeof (dmu_replay_record_t));
+ memset(dscp->dsc_drr, 0, sizeof (dmu_replay_record_t));
dscp->dsc_drr->drr_type = DRR_OBJECT_RANGE;
drror->drr_firstobj = firstobj;
drror->drr_numslots = numslots;
@@ -1136,7 +1136,7 @@ send_cb(spa_t *spa, zilog_t *zilog, const blkptr_t *bp,
record->sru.object.bp = *bp;
size_t size = sizeof (*dnp) * (dnp->dn_extra_slots + 1);
record->sru.object.dnp = kmem_alloc(size, KM_SLEEP);
- bcopy(dnp, record->sru.object.dnp, size);
+ memcpy(record->sru.object.dnp, dnp, size);
bqueue_enqueue(&sta->q, record, sizeof (*record));
return (0);
}
@@ -2597,7 +2597,7 @@ dmu_send_impl(struct dmu_send_params *dspp)
* the receive side that the stream is incomplete.
*/
if (!dspp->savedok) {
- bzero(drr, sizeof (dmu_replay_record_t));
+ memset(drr, 0, sizeof (dmu_replay_record_t));
drr->drr_type = DRR_END;
drr->drr_u.drr_end.drr_checksum = dsc.dsc_zc;
drr->drr_u.drr_end.drr_toguid = dsc.dsc_toguid;
@@ -2698,7 +2698,7 @@ dmu_send_obj(const char *pool, uint64_t tosnap, uint64_t fromsnap,
uint64_t size = dspp.numfromredactsnaps *
sizeof (uint64_t);
dspp.fromredactsnaps = kmem_zalloc(size, KM_SLEEP);
- bcopy(fromredact, dspp.fromredactsnaps, size);
+ memcpy(dspp.fromredactsnaps, fromredact, size);
}
boolean_t is_before =
@@ -2883,7 +2883,7 @@ dmu_send(const char *tosnap, const char *fromsnap, boolean_t embedok,
sizeof (uint64_t);
dspp.fromredactsnaps = kmem_zalloc(size,
KM_SLEEP);
- bcopy(fromredact, dspp.fromredactsnaps,
+ memcpy(dspp.fromredactsnaps, fromredact,
size);
}
if (!dsl_dataset_is_before(dspp.to_ds, fromds,
diff --git a/module/zfs/dmu_traverse.c b/module/zfs/dmu_traverse.c
index 8afcd776a9eb..41baff73a172 100644
--- a/module/zfs/dmu_traverse.c
+++ b/module/zfs/dmu_traverse.c
@@ -168,8 +168,8 @@ resume_skip_check(traverse_data_t *td, const dnode_phys_t *dnp,
* If we found the block we're trying to resume from, zero
* the bookmark out to indicate that we have resumed.
*/
- if (bcmp(zb, td->td_resume, sizeof (*zb)) == 0) {
- bzero(td->td_resume, sizeof (*zb));
+ if (memcmp(zb, td->td_resume, sizeof (*zb)) == 0) {
+ memset(td->td_resume, 0, sizeof (*zb));
if (td->td_flags & TRAVERSE_POST)
return (RESUME_SKIP_CHILDREN);
}
diff --git a/module/zfs/dnode.c b/module/zfs/dnode.c
index 7b53b7cd0534..af0ee1b0f8b1 100644
--- a/module/zfs/dnode.c
+++ b/module/zfs/dnode.c
@@ -128,15 +128,15 @@ dnode_cons(void *arg, void *unused, int kmflag)
zfs_refcount_create(&dn->dn_tx_holds);
list_link_init(&dn->dn_link);
- bzero(&dn->dn_next_type[0], sizeof (dn->dn_next_type));
- bzero(&dn->dn_next_nblkptr[0], sizeof (dn->dn_next_nblkptr));
- bzero(&dn->dn_next_nlevels[0], sizeof (dn->dn_next_nlevels));
- bzero(&dn->dn_next_indblkshift[0], sizeof (dn->dn_next_indblkshift));
- bzero(&dn->dn_next_bonustype[0], sizeof (dn->dn_next_bonustype));
- bzero(&dn->dn_rm_spillblk[0], sizeof (dn->dn_rm_spillblk));
- bzero(&dn->dn_next_bonuslen[0], sizeof (dn->dn_next_bonuslen));
- bzero(&dn->dn_next_blksz[0], sizeof (dn->dn_next_blksz));
- bzero(&dn->dn_next_maxblkid[0], sizeof (dn->dn_next_maxblkid));
+ memset(dn->dn_next_type, 0, sizeof (dn->dn_next_type));
+ memset(dn->dn_next_nblkptr, 0, sizeof (dn->dn_next_nblkptr));
+ memset(dn->dn_next_nlevels, 0, sizeof (dn->dn_next_nlevels));
+ memset(dn->dn_next_indblkshift, 0, sizeof (dn->dn_next_indblkshift));
+ memset(dn->dn_next_bonustype, 0, sizeof (dn->dn_next_bonustype));
+ memset(dn->dn_rm_spillblk, 0, sizeof (dn->dn_rm_spillblk));
+ memset(dn->dn_next_bonuslen, 0, sizeof (dn->dn_next_bonuslen));
+ memset(dn->dn_next_blksz, 0, sizeof (dn->dn_next_blksz));
+ memset(dn->dn_next_maxblkid, 0, sizeof (dn->dn_next_maxblkid));
for (int i = 0; i < TXG_SIZE; i++) {
multilist_link_init(&dn->dn_dirty_link[i]);
@@ -317,7 +317,7 @@ dnode_byteswap(dnode_phys_t *dnp)
int i;
if (dnp->dn_type == DMU_OT_NONE) {
- bzero(dnp, sizeof (dnode_phys_t));
+ memset(dnp, 0, sizeof (dnode_phys_t));
return;
}
@@ -395,7 +395,7 @@ dnode_setbonuslen(dnode_t *dn, int newsize, dmu_tx_t *tx)
/* clear any data after the end of the new size */
size_t diff = dn->dn_bonuslen - newsize;
char *data_end = ((char *)dn->dn_bonus->db.db_data) + newsize;
- bzero(data_end, diff);
+ memset(data_end, 0, diff);
}
dn->dn_bonuslen = newsize;
@@ -596,7 +596,7 @@ dnode_allocate(dnode_t *dn, dmu_object_type_t ot, int blocksize, int ibs,
DNODE_STAT_BUMP(dnode_allocate);
ASSERT(dn->dn_type == DMU_OT_NONE);
- ASSERT(bcmp(dn->dn_phys, &dnode_phys_zero, sizeof (dnode_phys_t)) == 0);
+ ASSERT0(memcmp(dn->dn_phys, &dnode_phys_zero, sizeof (dnode_phys_t)));
ASSERT(dn->dn_phys->dn_type == DMU_OT_NONE);
ASSERT(ot != DMU_OT_NONE);
ASSERT(DMU_OT_IS_VALID(ot));
@@ -749,8 +749,6 @@ dnode_reallocate(dnode_t *dn, dmu_object_type_t ot, int blocksize,
static void
dnode_move_impl(dnode_t *odn, dnode_t *ndn)
{
- int i;
-
ASSERT(!RW_LOCK_HELD(&odn->dn_struct_rwlock));
ASSERT(MUTEX_NOT_HELD(&odn->dn_mtx));
ASSERT(MUTEX_NOT_HELD(&odn->dn_dbufs_mtx));
@@ -774,29 +772,29 @@ dnode_move_impl(dnode_t *odn, dnode_t *ndn)
ndn->dn_datablksz = odn->dn_datablksz;
ndn->dn_maxblkid = odn->dn_maxblkid;
ndn->dn_num_slots = odn->dn_num_slots;
- bcopy(&odn->dn_next_type[0], &ndn->dn_next_type[0],
+ memcpy(ndn->dn_next_type, odn->dn_next_type,
sizeof (odn->dn_next_type));
- bcopy(&odn->dn_next_nblkptr[0], &ndn->dn_next_nblkptr[0],
+ memcpy(ndn->dn_next_nblkptr, odn->dn_next_nblkptr,
sizeof (odn->dn_next_nblkptr));
- bcopy(&odn->dn_next_nlevels[0], &ndn->dn_next_nlevels[0],
+ memcpy(ndn->dn_next_nlevels, odn->dn_next_nlevels,
sizeof (odn->dn_next_nlevels));
- bcopy(&odn->dn_next_indblkshift[0], &ndn->dn_next_indblkshift[0],
+ memcpy(ndn->dn_next_indblkshift, odn->dn_next_indblkshift,
sizeof (odn->dn_next_indblkshift));
- bcopy(&odn->dn_next_bonustype[0], &ndn->dn_next_bonustype[0],
+ memcpy(ndn->dn_next_bonustype, odn->dn_next_bonustype,
sizeof (odn->dn_next_bonustype));
- bcopy(&odn->dn_rm_spillblk[0], &ndn->dn_rm_spillblk[0],
+ memcpy(ndn->dn_rm_spillblk, odn->dn_rm_spillblk,
sizeof (odn->dn_rm_spillblk));
- bcopy(&odn->dn_next_bonuslen[0], &ndn->dn_next_bonuslen[0],
+ memcpy(ndn->dn_next_bonuslen, odn->dn_next_bonuslen,
sizeof (odn->dn_next_bonuslen));
- bcopy(&odn->dn_next_blksz[0], &ndn->dn_next_blksz[0],
+ memcpy(ndn->dn_next_blksz, odn->dn_next_blksz,
sizeof (odn->dn_next_blksz));
- bcopy(&odn->dn_next_maxblkid[0], &ndn->dn_next_maxblkid[0],
+ memcpy(ndn->dn_next_maxblkid, odn->dn_next_maxblkid,
sizeof (odn->dn_next_maxblkid));
- for (i = 0; i < TXG_SIZE; i++) {
+ for (int i = 0; i < TXG_SIZE; i++) {
list_move_tail(&ndn->dn_dirty_records[i],
&odn->dn_dirty_records[i]);
}
- bcopy(&odn->dn_free_ranges[0], &ndn->dn_free_ranges[0],
+ memcpy(ndn->dn_free_ranges, odn->dn_free_ranges,
sizeof (odn->dn_free_ranges));
ndn->dn_allocated_txg = odn->dn_allocated_txg;
ndn->dn_free_txg = odn->dn_free_txg;
@@ -850,7 +848,7 @@ dnode_move_impl(dnode_t *odn, dnode_t *ndn)
/*
* Satisfy the destructor.
*/
- for (i = 0; i < TXG_SIZE; i++) {
+ for (int i = 0; i < TXG_SIZE; i++) {
list_create(&odn->dn_dirty_records[i],
sizeof (dbuf_dirty_record_t),
offsetof(dbuf_dirty_record_t, dr_dirty_node));
@@ -2081,7 +2079,7 @@ dnode_partial_zero(dnode_t *dn, uint64_t off, uint64_t blkoff, uint64_t len,
dmu_buf_will_dirty(&db->db, tx);
data = db->db.db_data;
- bzero(data + blkoff, len);
+ memset(data + blkoff, 0, len);
}
dbuf_rele(db, FTAG);
}
diff --git a/module/zfs/dnode_sync.c b/module/zfs/dnode_sync.c
index 12ab4bea145f..973f93c4ec01 100644
--- a/module/zfs/dnode_sync.c
+++ b/module/zfs/dnode_sync.c
@@ -82,7 +82,7 @@ dnode_increase_indirection(dnode_t *dn, dmu_tx_t *tx)
ASSERT(db->db.db_data);
ASSERT(arc_released(db->db_buf));
ASSERT3U(sizeof (blkptr_t) * nblkptr, <=, db->db.db_size);
- bcopy(dn->dn_phys->dn_blkptr, db->db.db_data,
+ memcpy(db->db.db_data, dn->dn_phys->dn_blkptr,
sizeof (blkptr_t) * nblkptr);
arc_buf_freeze(db->db_buf);
@@ -119,7 +119,7 @@ dnode_increase_indirection(dnode_t *dn, dmu_tx_t *tx)
mutex_exit(&child->db_mtx);
}
- bzero(dn->dn_phys->dn_blkptr, sizeof (blkptr_t) * nblkptr);
+ memset(dn->dn_phys->dn_blkptr, 0, sizeof (blkptr_t) * nblkptr);
rw_exit(&db->db_rwlock);
if (dn->dn_dbuf != NULL)
@@ -158,7 +158,7 @@ free_blocks(dnode_t *dn, blkptr_t *bp, int num, dmu_tx_t *tx)
dmu_object_type_t type = BP_GET_TYPE(bp);
uint64_t lvl = BP_GET_LEVEL(bp);
- bzero(bp, sizeof (blkptr_t));
+ memset(bp, 0, sizeof (blkptr_t));
if (spa_feature_is_active(dn->dn_objset->os_spa,
SPA_FEATURE_HOLE_BIRTH)) {
@@ -347,7 +347,7 @@ free_children(dmu_buf_impl_t *db, uint64_t blkid, uint64_t nblks,
rw_enter(&db->db_rwlock, RW_WRITER);
for (i = 0, bp = db->db.db_data; i < 1 << epbs; i++, bp++)
ASSERT(BP_IS_HOLE(bp));
- bzero(db->db.db_data, db->db.db_size);
+ memset(db->db.db_data, 0, db->db.db_size);
free_blocks(dn, db->db_blkptr, 1, tx);
rw_exit(&db->db_rwlock);
}
@@ -597,7 +597,7 @@ dnode_sync_free(dnode_t *dn, dmu_tx_t *tx)
ASSERT(dn->dn_free_txg > 0);
if (dn->dn_allocated_txg != dn->dn_free_txg)
dmu_buf_will_dirty(&dn->dn_dbuf->db, tx);
- bzero(dn->dn_phys, sizeof (dnode_phys_t) * dn->dn_num_slots);
+ memset(dn->dn_phys, 0, sizeof (dnode_phys_t) * dn->dn_num_slots);
dnode_free_interior_slots(dn);
mutex_enter(&dn->dn_mtx);
@@ -634,7 +634,7 @@ dnode_sync(dnode_t *dn, dmu_tx_t *tx)
ASSERT(dmu_tx_is_syncing(tx));
ASSERT(dnp->dn_type != DMU_OT_NONE || dn->dn_allocated_txg);
ASSERT(dnp->dn_type != DMU_OT_NONE ||
- bcmp(dnp, &zerodn, DNODE_MIN_SIZE) == 0);
+ memcmp(dnp, &zerodn, DNODE_MIN_SIZE) == 0);
DNODE_VERIFY(dn);
ASSERT(dn->dn_dbuf == NULL || arc_released(dn->dn_dbuf->db_buf));
@@ -827,7 +827,7 @@ dnode_sync(dnode_t *dn, dmu_tx_t *tx)
ASSERT(dn->dn_allocated_txg == tx->tx_txg);
if (dn->dn_next_nblkptr[txgoff] > dnp->dn_nblkptr) {
/* zero the new blkptrs we are gaining */
- bzero(dnp->dn_blkptr + dnp->dn_nblkptr,
+ memset(dnp->dn_blkptr + dnp->dn_nblkptr, 0,
sizeof (blkptr_t) *
(dn->dn_next_nblkptr[txgoff] - dnp->dn_nblkptr));
#ifdef ZFS_DEBUG
diff --git a/module/zfs/dsl_bookmark.c b/module/zfs/dsl_bookmark.c
index b8e3523ffc2d..df43ff2180ac 100644
--- a/module/zfs/dsl_bookmark.c
+++ b/module/zfs/dsl_bookmark.c
@@ -82,7 +82,7 @@ dsl_bookmark_lookup_impl(dsl_dataset_t *ds, const char *shortname,
* Zero out the bookmark in case the one stored on disk
* is in an older, shorter format.
*/
- bzero(bmark_phys, sizeof (*bmark_phys));
+ memset(bmark_phys, 0, sizeof (*bmark_phys));
err = zap_lookup_norm(mos, bmark_zapobj, shortname, sizeof (uint64_t),
sizeof (*bmark_phys) / sizeof (uint64_t), bmark_phys, mt, NULL, 0,
@@ -381,7 +381,7 @@ dsl_bookmark_set_phys(zfs_bookmark_phys_t *zbm, dsl_dataset_t *snap)
&zbm->zbm_uncompressed_freed_before_next_snap);
dsl_dataset_rele(nextds, FTAG);
} else {
- bzero(&zbm->zbm_flags,
+ memset(&zbm->zbm_flags, 0,
sizeof (zfs_bookmark_phys_t) -
offsetof(zfs_bookmark_phys_t, zbm_flags));
}
@@ -426,8 +426,8 @@ dsl_bookmark_node_add(dsl_dataset_t *hds, dsl_bookmark_node_t *dbn,
spa_feature_incr(dp->dp_spa, SPA_FEATURE_BOOKMARK_V2, tx);
}
- __attribute__((unused)) zfs_bookmark_phys_t zero_phys = { 0 };
- ASSERT0(bcmp(((char *)&dbn->dbn_phys) + bookmark_phys_size,
+ zfs_bookmark_phys_t zero_phys = { 0 };
+ ASSERT0(memcmp(((char *)&dbn->dbn_phys) + bookmark_phys_size,
&zero_phys, sizeof (zfs_bookmark_phys_t) - bookmark_phys_size));
VERIFY0(zap_add(mos, hds->ds_bookmarks_obj, dbn->dbn_name,
@@ -482,7 +482,7 @@ dsl_bookmark_create_sync_impl_snap(const char *bookmark, const char *snapshot,
sizeof (redaction_list_phys_t) + num_redact_snaps *
sizeof (uint64_t));
dmu_buf_will_dirty(local_rl->rl_dbuf, tx);
- bcopy(redact_snaps, local_rl->rl_phys->rlp_snaps,
+ memcpy(local_rl->rl_phys->rlp_snaps, redact_snaps,
sizeof (uint64_t) * num_redact_snaps);
local_rl->rl_phys->rlp_num_snaps = num_redact_snaps;
if (bookmark_redacted) {
diff --git a/module/zfs/dsl_crypt.c b/module/zfs/dsl_crypt.c
index 6330a44b4c39..d802eb6b68c4 100644
--- a/module/zfs/dsl_crypt.c
+++ b/module/zfs/dsl_crypt.c
@@ -97,7 +97,7 @@ dsl_wrapping_key_free(dsl_wrapping_key_t *wkey)
ASSERT0(zfs_refcount_count(&wkey->wk_refcnt));
if (wkey->wk_key.ck_data) {
- bzero(wkey->wk_key.ck_data,
+ memset(wkey->wk_key.ck_data, 0,
CRYPTO_BITS2BYTES(wkey->wk_key.ck_length));
kmem_free(wkey->wk_key.ck_data,
CRYPTO_BITS2BYTES(wkey->wk_key.ck_length));
@@ -120,7 +120,7 @@ dsl_wrapping_key_create(uint8_t *wkeydata, zfs_keyformat_t keyformat,
wkey->wk_key.ck_data = kmem_alloc(WRAPPING_KEY_LEN, KM_SLEEP);
wkey->wk_key.ck_length = CRYPTO_BYTES2BITS(WRAPPING_KEY_LEN);
- bcopy(wkeydata, wkey->wk_key.ck_data, WRAPPING_KEY_LEN);
+ memcpy(wkey->wk_key.ck_data, wkeydata, WRAPPING_KEY_LEN);
/* initialize the rest of the struct */
zfs_refcount_create(&wkey->wk_refcnt);
@@ -591,7 +591,7 @@ dsl_crypto_key_open(objset_t *mos, dsl_wrapping_key_t *wkey,
error:
if (dck != NULL) {
- bzero(dck, sizeof (dsl_crypto_key_t));
+ memset(dck, 0, sizeof (dsl_crypto_key_t));
kmem_free(dck, sizeof (dsl_crypto_key_t));
}
@@ -2095,8 +2095,8 @@ dsl_crypto_recv_raw_objset_sync(dsl_dataset_t *ds, dmu_objset_type_t ostype,
* written out raw next time.
*/
arc_release(os->os_phys_buf, &os->os_phys_buf);
- bcopy(portable_mac, os->os_phys->os_portable_mac, ZIO_OBJSET_MAC_LEN);
- bzero(os->os_phys->os_local_mac, ZIO_OBJSET_MAC_LEN);
+ memcpy(os->os_phys->os_portable_mac, portable_mac, ZIO_OBJSET_MAC_LEN);
+ memset(os->os_phys->os_local_mac, 0, ZIO_OBJSET_MAC_LEN);
os->os_flags &= ~OBJSET_FLAG_USERACCOUNTING_COMPLETE;
os->os_next_write_raw[tx->tx_txg & TXG_MASK] = B_TRUE;
@@ -2547,7 +2547,7 @@ dsl_crypto_key_create_sync(uint64_t crypt, dsl_wrapping_key_t *wkey,
DSL_CRYPTO_KEY_VERSION, sizeof (uint64_t), 1, &version, tx));
zio_crypt_key_destroy(&dck.dck_key);
- bzero(&dck.dck_key, sizeof (zio_crypt_key_t));
+ memset(&dck.dck_key, 0, sizeof (zio_crypt_key_t));
return (dck.dck_obj);
}
@@ -2687,14 +2687,15 @@ spa_do_crypt_objset_mac_abd(boolean_t generate, spa_t *spa, uint64_t dsobj,
/* if we are generating encode the HMACs in the objset_phys_t */
if (generate) {
- bcopy(portable_mac, osp->os_portable_mac, ZIO_OBJSET_MAC_LEN);
- bcopy(local_mac, osp->os_local_mac, ZIO_OBJSET_MAC_LEN);
+ memcpy(osp->os_portable_mac, portable_mac, ZIO_OBJSET_MAC_LEN);
+ memcpy(osp->os_local_mac, local_mac, ZIO_OBJSET_MAC_LEN);
abd_return_buf_copy(abd, buf, datalen);
return (0);
}
- if (bcmp(portable_mac, osp->os_portable_mac, ZIO_OBJSET_MAC_LEN) != 0 ||
- bcmp(local_mac, osp->os_local_mac, ZIO_OBJSET_MAC_LEN) != 0) {
+ if (memcmp(portable_mac, osp->os_portable_mac,
+ ZIO_OBJSET_MAC_LEN) != 0 ||
+ memcmp(local_mac, osp->os_local_mac, ZIO_OBJSET_MAC_LEN) != 0) {
abd_return_buf(abd, buf, datalen);
return (SET_ERROR(ECKSUM));
}
@@ -2738,11 +2739,11 @@ spa_do_crypt_mac_abd(boolean_t generate, spa_t *spa, uint64_t dsobj, abd_t *abd,
* Otherwise verify that the MAC matched what we expected.
*/
if (generate) {
- bcopy(digestbuf, mac, ZIO_DATA_MAC_LEN);
+ memcpy(mac, digestbuf, ZIO_DATA_MAC_LEN);
return (0);
}
- if (bcmp(digestbuf, mac, ZIO_DATA_MAC_LEN) != 0)
+ if (memcmp(digestbuf, mac, ZIO_DATA_MAC_LEN) != 0)
return (SET_ERROR(ECKSUM));
return (0);
@@ -2841,9 +2842,9 @@ spa_do_crypt_abd(boolean_t encrypt, spa_t *spa, const zbookmark_phys_t *zb,
error:
if (encrypt) {
/* zero out any state we might have changed while encrypting */
- bzero(salt, ZIO_DATA_SALT_LEN);
- bzero(iv, ZIO_DATA_IV_LEN);
- bzero(mac, ZIO_DATA_MAC_LEN);
+ memset(salt, 0, ZIO_DATA_SALT_LEN);
+ memset(iv, 0, ZIO_DATA_IV_LEN);
+ memset(mac, 0, ZIO_DATA_MAC_LEN);
abd_return_buf(pabd, plainbuf, datalen);
abd_return_buf_copy(cabd, cipherbuf, datalen);
} else {
diff --git a/module/zfs/dsl_dataset.c b/module/zfs/dsl_dataset.c
index 85b48fd12b63..e836d681e920 100644
--- a/module/zfs/dsl_dataset.c
+++ b/module/zfs/dsl_dataset.c
@@ -1148,7 +1148,7 @@ dsl_dataset_create_sync_dd(dsl_dir_t *dd, dsl_dataset_t *origin,
VERIFY0(dmu_bonus_hold(mos, dsobj, FTAG, &dbuf));
dmu_buf_will_dirty(dbuf, tx);
dsphys = dbuf->db_data;
- bzero(dsphys, sizeof (dsl_dataset_phys_t));
+ memset(dsphys, 0, sizeof (dsl_dataset_phys_t));
dsphys->ds_dir_obj = dd->dd_object;
dsphys->ds_flags = flags;
dsphys->ds_fsid_guid = unique_create();
@@ -1248,11 +1248,11 @@ dsl_dataset_zero_zil(dsl_dataset_t *ds, dmu_tx_t *tx)
objset_t *os;
VERIFY0(dmu_objset_from_ds(ds, &os));
- if (bcmp(&os->os_zil_header, &zero_zil, sizeof (zero_zil)) != 0) {
+ if (memcmp(&os->os_zil_header, &zero_zil, sizeof (zero_zil)) != 0) {
dsl_pool_t *dp = ds->ds_dir->dd_pool;
zio_t *zio;
- bzero(&os->os_zil_header, sizeof (os->os_zil_header));
+ memset(&os->os_zil_header, 0, sizeof (os->os_zil_header));
if (os->os_encrypted)
os->os_next_write_raw[tx->tx_txg & TXG_MASK] = B_TRUE;
@@ -1696,7 +1696,7 @@ dsl_dataset_snapshot_sync_impl(dsl_dataset_t *ds, const char *snapname,
*/
ASSERT(spa_version(dmu_tx_pool(tx)->dp_spa) >= SPA_VERSION_FAST_SNAP ||
dmu_objset_from_ds(ds, &os) != 0 ||
- bcmp(&os->os_phys->os_zil_header, &zero_zil,
+ memcmp(&os->os_phys->os_zil_header, &zero_zil,
sizeof (zero_zil)) == 0);
/* Should not snapshot a dirty dataset. */
@@ -1718,7 +1718,7 @@ dsl_dataset_snapshot_sync_impl(dsl_dataset_t *ds, const char *snapname,
VERIFY0(dmu_bonus_hold(mos, dsobj, FTAG, &dbuf));
dmu_buf_will_dirty(dbuf, tx);
dsphys = dbuf->db_data;
- bzero(dsphys, sizeof (dsl_dataset_phys_t));
+ memset(dsphys, 0, sizeof (dsl_dataset_phys_t));
dsphys->ds_dir_obj = ds->ds_dir->dd_object;
dsphys->ds_fsid_guid = unique_create();
(void) random_get_pseudo_bytes((void*)&dsphys->ds_guid,
@@ -2895,7 +2895,7 @@ dsl_dataset_modified_since_snap(dsl_dataset_t *ds, dsl_dataset_t *snap)
return (B_TRUE);
if (dmu_objset_from_ds(snap, &os_snap) != 0)
return (B_TRUE);
- return (bcmp(&os->os_phys->os_meta_dnode,
+ return (memcmp(&os->os_phys->os_meta_dnode,
&os_snap->os_phys->os_meta_dnode,
sizeof (os->os_phys->os_meta_dnode)) != 0);
}
@@ -4916,7 +4916,7 @@ dsl_dataset_activate_redaction(dsl_dataset_t *ds, uint64_t *redact_snaps,
if (num_redact_snaps > 0) {
ftuaa->array = kmem_alloc(num_redact_snaps * sizeof (uint64_t),
KM_SLEEP);
- bcopy(redact_snaps, ftuaa->array, num_redact_snaps *
+ memcpy(ftuaa->array, redact_snaps, num_redact_snaps *
sizeof (uint64_t));
}
dsl_dataset_activate_feature(dsobj, SPA_FEATURE_REDACTED_DATASETS,
diff --git a/module/zfs/dsl_deadlist.c b/module/zfs/dsl_deadlist.c
index e620510be6b1..f516b1c3111e 100644
--- a/module/zfs/dsl_deadlist.c
+++ b/module/zfs/dsl_deadlist.c
@@ -855,7 +855,7 @@ dsl_deadlist_merge(dsl_deadlist_t *dl, uint64_t obj, dmu_tx_t *tx)
VERIFY0(dmu_bonus_hold(dl->dl_os, obj, FTAG, &bonus));
dlp = bonus->db_data;
dmu_buf_will_dirty(bonus, tx);
- bzero(dlp, sizeof (*dlp));
+ memset(dlp, 0, sizeof (*dlp));
dmu_buf_rele(bonus, FTAG);
mutex_exit(&dl->dl_lock);
}
diff --git a/module/zfs/dsl_scan.c b/module/zfs/dsl_scan.c
index c6a5807c92f5..dedf9c8a669a 100644
--- a/module/zfs/dsl_scan.c
+++ b/module/zfs/dsl_scan.c
@@ -393,7 +393,7 @@ dsl_scan_resilvering(dsl_pool_t *dp)
static inline void
sio2bp(const scan_io_t *sio, blkptr_t *bp)
{
- bzero(bp, sizeof (*bp));
+ memset(bp, 0, sizeof (*bp));
bp->blk_prop = sio->sio_blk_prop;
bp->blk_phys_birth = sio->sio_phys_birth;
bp->blk_birth = sio->sio_birth;
@@ -403,7 +403,7 @@ sio2bp(const scan_io_t *sio, blkptr_t *bp)
ASSERT3U(sio->sio_nr_dvas, >, 0);
ASSERT3U(sio->sio_nr_dvas, <=, SPA_DVAS_PER_BP);
- bcopy(sio->sio_dva, bp->blk_dva, sio->sio_nr_dvas * sizeof (dva_t));
+ memcpy(bp->blk_dva, sio->sio_dva, sio->sio_nr_dvas * sizeof (dva_t));
}
static inline void
@@ -508,7 +508,7 @@ dsl_scan_init(dsl_pool_t *dp, uint64_t txg)
return (EOVERFLOW);
}
- bcopy(zaptmp, &scn->scn_phys,
+ memcpy(&scn->scn_phys, zaptmp,
SCAN_PHYS_NUMINTS * sizeof (uint64_t));
scn->scn_phys.scn_flags = overflow;
@@ -567,7 +567,7 @@ dsl_scan_init(dsl_pool_t *dp, uint64_t txg)
}
}
- bcopy(&scn->scn_phys, &scn->scn_phys_cached, sizeof (scn->scn_phys));
+ memcpy(&scn->scn_phys_cached, &scn->scn_phys, sizeof (scn->scn_phys));
/* reload the queue into the in-core state */
if (scn->scn_phys.scn_queue_obj != 0) {
@@ -689,7 +689,7 @@ dsl_scan_sync_state(dsl_scan_t *scn, dmu_tx_t *tx, state_sync_type_t sync_type)
DMU_POOL_DIRECTORY_OBJECT,
DMU_POOL_SCAN, sizeof (uint64_t), SCAN_PHYS_NUMINTS,
&scn->scn_phys, tx));
- bcopy(&scn->scn_phys, &scn->scn_phys_cached,
+ memcpy(&scn->scn_phys_cached, &scn->scn_phys,
sizeof (scn->scn_phys));
if (scn->scn_checkpointing)
@@ -730,7 +730,7 @@ dsl_scan_setup_sync(void *arg, dmu_tx_t *tx)
ASSERT(!dsl_scan_is_running(scn));
ASSERT(*funcp > POOL_SCAN_NONE && *funcp < POOL_SCAN_FUNCS);
- bzero(&scn->scn_phys, sizeof (scn->scn_phys));
+ memset(&scn->scn_phys, 0, sizeof (scn->scn_phys));
scn->scn_phys.scn_func = *funcp;
scn->scn_phys.scn_state = DSS_SCANNING;
scn->scn_phys.scn_min_txg = 0;
@@ -798,7 +798,8 @@ dsl_scan_setup_sync(void *arg, dmu_tx_t *tx)
mutex_init(&dp->dp_blkstats->zab_lock, NULL,
MUTEX_DEFAULT, NULL);
}
- bzero(&dp->dp_blkstats->zab_type, sizeof (dp->dp_blkstats->zab_type));
+ memset(&dp->dp_blkstats->zab_type, 0,
+ sizeof (dp->dp_blkstats->zab_type));
if (spa_version(spa) < SPA_VERSION_DSL_SCRUB)
ot = DMU_OT_ZAP_OTHER;
@@ -806,7 +807,7 @@ dsl_scan_setup_sync(void *arg, dmu_tx_t *tx)
scn->scn_phys.scn_queue_obj = zap_create(dp->dp_meta_objset,
ot ? ot : DMU_OT_SCAN_QUEUE, DMU_OT_NONE, 0, tx);
- bcopy(&scn->scn_phys, &scn->scn_phys_cached, sizeof (scn->scn_phys));
+ memcpy(&scn->scn_phys_cached, &scn->scn_phys, sizeof (scn->scn_phys));
dsl_scan_sync_state(scn, tx, SYNC_MANDATORY);
@@ -1792,14 +1793,15 @@ dsl_scan_check_resume(dsl_scan_t *scn, const dnode_phys_t *dnp,
* indicate that it's OK to start checking for suspending
* again.
*/
- if (bcmp(zb, &scn->scn_phys.scn_bookmark, sizeof (*zb)) == 0 ||
+ if (memcmp(zb, &scn->scn_phys.scn_bookmark,
+ sizeof (*zb)) == 0 ||
zb->zb_object > scn->scn_phys.scn_bookmark.zb_object) {
dprintf("resuming at %llx/%llx/%llx/%llx\n",
(longlong_t)zb->zb_objset,
(longlong_t)zb->zb_object,
(longlong_t)zb->zb_level,
(longlong_t)zb->zb_blkid);
- bzero(&scn->scn_phys.scn_bookmark, sizeof (*zb));
+ memset(&scn->scn_phys.scn_bookmark, 0, sizeof (*zb));
}
}
return (B_FALSE);
@@ -2651,12 +2653,10 @@ static void
dsl_scan_ddt(dsl_scan_t *scn, dmu_tx_t *tx)
{
ddt_bookmark_t *ddb = &scn->scn_phys.scn_ddt_bookmark;
- ddt_entry_t dde;
+ ddt_entry_t dde = {{{{0}}}};
int error;
uint64_t n = 0;
- bzero(&dde, sizeof (ddt_entry_t));
-
while ((error = ddt_walk(scn->scn_dp->dp_spa, ddb, &dde)) == 0) {
ddt_t *ddt;
@@ -2749,7 +2749,7 @@ dsl_scan_visit(dsl_scan_t *scn, dmu_tx_t *tx)
* In case we suspended right at the end of the ds, zero the
* bookmark so we don't think that we're still trying to resume.
*/
- bzero(&scn->scn_phys.scn_bookmark, sizeof (zbookmark_phys_t));
+ memset(&scn->scn_phys.scn_bookmark, 0, sizeof (zbookmark_phys_t));
/*
* Keep pulling things out of the dataset avl queue. Updates to the
diff --git a/module/zfs/edonr_zfs.c b/module/zfs/edonr_zfs.c
index 7c62e0be07eb..e56b4054c67e 100644
--- a/module/zfs/edonr_zfs.c
+++ b/module/zfs/edonr_zfs.c
@@ -53,10 +53,10 @@ abd_checksum_edonr_native(abd_t *abd, uint64_t size,
EdonRState ctx;
ASSERT(ctx_template != NULL);
- bcopy(ctx_template, &ctx, sizeof (ctx));
+ memcpy(&ctx, ctx_template, sizeof (ctx));
(void) abd_iterate_func(abd, 0, size, edonr_incremental, &ctx);
EdonRFinal(&ctx, digest);
- bcopy(digest, zcp->zc_word, sizeof (zcp->zc_word));
+ memcpy(zcp->zc_word, digest, sizeof (zcp->zc_word));
}
/*
@@ -108,8 +108,8 @@ abd_checksum_edonr_tmpl_init(const zio_cksum_salt_t *salt)
void
abd_checksum_edonr_tmpl_free(void *ctx_template)
{
- EdonRState *ctx = ctx_template;
+ EdonRState *ctx = ctx_template;
- bzero(ctx, sizeof (*ctx));
+ memset(ctx, 0, sizeof (*ctx));
kmem_free(ctx, sizeof (*ctx));
}
diff --git a/module/zfs/gzip.c b/module/zfs/gzip.c
index 48191241bd7d..f2a7b6d839ef 100644
--- a/module/zfs/gzip.c
+++ b/module/zfs/gzip.c
@@ -66,7 +66,7 @@ gzip_compress(void *s_start, void *d_start, size_t s_len, size_t d_len, int n)
if (d_len != s_len)
return (s_len);
- bcopy(s_start, d_start, s_len);
+ memcpy(d_start, s_start, s_len);
return (s_len);
}
/* if hardware compression fails, do it again with software */
@@ -76,7 +76,7 @@ gzip_compress(void *s_start, void *d_start, size_t s_len, size_t d_len, int n)
if (d_len != s_len)
return (s_len);
- bcopy(s_start, d_start, s_len);
+ memcpy(d_start, s_start, s_len);
return (s_len);
}
diff --git a/module/zfs/hkdf.c b/module/zfs/hkdf.c
index 2c91401d5b4f..580544c8ac1a 100644
--- a/module/zfs/hkdf.c
+++ b/module/zfs/hkdf.c
@@ -132,7 +132,7 @@ hkdf_sha512_expand(uint8_t *extract_key, uint8_t *info, uint_t info_len,
if (ret != CRYPTO_SUCCESS)
return (SET_ERROR(EIO));
- bcopy(T, out_buf + pos,
+ memcpy(out_buf + pos, T,
(i != N) ? SHA512_DIGEST_LENGTH : (out_len - pos));
pos += SHA512_DIGEST_LENGTH;
}
diff --git a/module/zfs/metaslab.c b/module/zfs/metaslab.c
index bd17c1fe862a..7ed83b305db7 100644
--- a/module/zfs/metaslab.c
+++ b/module/zfs/metaslab.c
@@ -1952,9 +1952,9 @@ metaslab_aux_histograms_clear(metaslab_t *msp)
*/
ASSERT(msp->ms_loaded);
- bzero(msp->ms_synchist, sizeof (msp->ms_synchist));
+ memset(msp->ms_synchist, 0, sizeof (msp->ms_synchist));
for (int t = 0; t < TXG_DEFER_SIZE; t++)
- bzero(msp->ms_deferhist[t], sizeof (msp->ms_deferhist[t]));
+ memset(msp->ms_deferhist[t], 0, sizeof (msp->ms_deferhist[t]));
}
static void
@@ -2044,13 +2044,13 @@ metaslab_aux_histograms_update_done(metaslab_t *msp, boolean_t defer_allowed)
*/
uint64_t hist_index = spa_syncing_txg(spa) % TXG_DEFER_SIZE;
if (defer_allowed) {
- bcopy(msp->ms_synchist, msp->ms_deferhist[hist_index],
+ memcpy(msp->ms_deferhist[hist_index], msp->ms_synchist,
sizeof (msp->ms_synchist));
} else {
- bzero(msp->ms_deferhist[hist_index],
+ memset(msp->ms_deferhist[hist_index], 0,
sizeof (msp->ms_deferhist[hist_index]));
}
- bzero(msp->ms_synchist, sizeof (msp->ms_synchist));
+ memset(msp->ms_synchist, 0, sizeof (msp->ms_synchist));
}
/*
@@ -5296,7 +5296,7 @@ next:
goto top;
}
- bzero(&dva[d], sizeof (dva_t));
+ memset(&dva[d], 0, sizeof (dva_t));
metaslab_trace_add(zal, rotor, NULL, psize, d, TRACE_ENOSPC, allocator);
return (SET_ERROR(ENOSPC));
@@ -5809,7 +5809,7 @@ metaslab_alloc(spa_t *spa, metaslab_class_t *mc, uint64_t psize, blkptr_t *bp,
metaslab_group_alloc_decrement(spa,
DVA_GET_VDEV(&dva[d]), zio, flags,
allocator, B_FALSE);
- bzero(&dva[d], sizeof (dva_t));
+ memset(&dva[d], 0, sizeof (dva_t));
}
spa_config_exit(spa, SCL_ALLOC, FTAG);
return (error);
diff --git a/module/zfs/range_tree.c b/module/zfs/range_tree.c
index f9fb97476b64..fe4bf616c479 100644
--- a/module/zfs/range_tree.c
+++ b/module/zfs/range_tree.c
@@ -78,7 +78,7 @@
static inline void
rs_copy(range_seg_t *src, range_seg_t *dest, range_tree_t *rt)
{
- ASSERT3U(rt->rt_type, <=, RANGE_SEG_NUM_TYPES);
+ ASSERT3U(rt->rt_type, <, RANGE_SEG_NUM_TYPES);
size_t size = 0;
switch (rt->rt_type) {
case RANGE_SEG32:
@@ -91,9 +91,9 @@ rs_copy(range_seg_t *src, range_seg_t *dest, range_tree_t *rt)
size = sizeof (range_seg_gap_t);
break;
default:
- VERIFY(0);
+ __builtin_unreachable();
}
- bcopy(src, dest, size);
+ memcpy(dest, src, size);
}
void
@@ -701,7 +701,7 @@ range_tree_vacate(range_tree_t *rt, range_tree_func_t *func, void *arg)
zfs_btree_clear(&rt->rt_root);
}
- bzero(rt->rt_histogram, sizeof (rt->rt_histogram));
+ memset(rt->rt_histogram, 0, sizeof (rt->rt_histogram));
rt->rt_space = 0;
}
diff --git a/module/zfs/sa.c b/module/zfs/sa.c
index a078af159c1f..2b6776581a47 100644
--- a/module/zfs/sa.c
+++ b/module/zfs/sa.c
@@ -160,7 +160,7 @@ do { \
*(uint64_t *)((uintptr_t)t + 8) = \
*(uint64_t *)((uintptr_t)s + 8); \
} else { \
- bcopy(s, t, l); \
+ memcpy(t, s, l); \
} \
} else { \
sa_copy_data(f, s, t, l); \
@@ -414,7 +414,7 @@ sa_add_layout_entry(objset_t *os, const sa_attr_type_t *attrs, int attr_count,
tb->lot_attr_count = attr_count;
tb->lot_attrs = kmem_alloc(sizeof (sa_attr_type_t) * attr_count,
KM_SLEEP);
- bcopy(attrs, tb->lot_attrs, sizeof (sa_attr_type_t) * attr_count);
+ memcpy(tb->lot_attrs, attrs, sizeof (sa_attr_type_t) * attr_count);
tb->lot_num = lot_num;
tb->lot_hash = hash;
tb->lot_instance = 0;
@@ -511,7 +511,7 @@ static void
sa_copy_data(sa_data_locator_t *func, void *datastart, void *target, int buflen)
{
if (func == NULL) {
- bcopy(datastart, target, buflen);
+ memcpy(target, datastart, buflen);
} else {
boolean_t start;
int bytes;
@@ -523,7 +523,7 @@ sa_copy_data(sa_data_locator_t *func, void *datastart, void *target, int buflen)
bytes = 0;
while (bytes < buflen) {
func(&dataptr, &length, buflen, start, datastart);
- bcopy(dataptr, saptr, length);
+ memcpy(saptr, dataptr, length);
saptr = (void *)((caddr_t)saptr + length);
bytes += length;
start = B_FALSE;
@@ -1664,8 +1664,9 @@ sa_add_projid(sa_handle_t *hdl, dmu_tx_t *tx, uint64_t projid)
&xattr, 8);
if (zp->z_pflags & ZFS_BONUS_SCANSTAMP) {
- bcopy((caddr_t)db->db_data + ZFS_OLD_ZNODE_PHYS_SIZE,
- scanstamp, AV_SCANSTAMP_SZ);
+ memcpy(scanstamp,
+ (caddr_t)db->db_data + ZFS_OLD_ZNODE_PHYS_SIZE,
+ AV_SCANSTAMP_SZ);
SA_ADD_BULK_ATTR(attrs, count, SA_ZPL_SCANSTAMP(zfsvfs), NULL,
scanstamp, AV_SCANSTAMP_SZ);
zp->z_pflags &= ~ZFS_BONUS_SCANSTAMP;
@@ -1873,7 +1874,7 @@ sa_modify_attrs(sa_handle_t *hdl, sa_attr_type_t newattr,
if (dn->dn_bonuslen != 0) {
bonus_data_size = hdl->sa_bonus->db_size;
old_data[0] = kmem_alloc(bonus_data_size, KM_SLEEP);
- bcopy(hdl->sa_bonus->db_data, old_data[0],
+ memcpy(old_data[0], hdl->sa_bonus->db_data,
hdl->sa_bonus->db_size);
bonus_attr_count = hdl->sa_bonus_tab->sa_layout->lot_attr_count;
} else {
@@ -1886,7 +1887,7 @@ sa_modify_attrs(sa_handle_t *hdl, sa_attr_type_t newattr,
if ((error = sa_get_spill(hdl)) == 0) {
spill_data_size = hdl->sa_spill->db_size;
old_data[1] = vmem_alloc(spill_data_size, KM_SLEEP);
- bcopy(hdl->sa_spill->db_data, old_data[1],
+ memcpy(old_data[1], hdl->sa_spill->db_data,
hdl->sa_spill->db_size);
spill_attr_count =
hdl->sa_spill_tab->sa_layout->lot_attr_count;
diff --git a/module/zfs/skein_zfs.c b/module/zfs/skein_zfs.c
index 55c7ac2a50ad..4b2aca888eee 100644
--- a/module/zfs/skein_zfs.c
+++ b/module/zfs/skein_zfs.c
@@ -45,13 +45,13 @@ void
abd_checksum_skein_native(abd_t *abd, uint64_t size,
const void *ctx_template, zio_cksum_t *zcp)
{
- Skein_512_Ctxt_t ctx;
+ Skein_512_Ctxt_t ctx;
ASSERT(ctx_template != NULL);
- bcopy(ctx_template, &ctx, sizeof (ctx));
+ memcpy(&ctx, ctx_template, sizeof (ctx));
(void) abd_iterate_func(abd, 0, size, skein_incremental, &ctx);
(void) Skein_512_Final(&ctx, (uint8_t *)zcp);
- bzero(&ctx, sizeof (ctx));
+ memset(&ctx, 0, sizeof (ctx));
}
/*
@@ -79,9 +79,8 @@ abd_checksum_skein_byteswap(abd_t *abd, uint64_t size,
void *
abd_checksum_skein_tmpl_init(const zio_cksum_salt_t *salt)
{
- Skein_512_Ctxt_t *ctx;
+ Skein_512_Ctxt_t *ctx = kmem_zalloc(sizeof (*ctx), KM_SLEEP);
- ctx = kmem_zalloc(sizeof (*ctx), KM_SLEEP);
(void) Skein_512_InitExt(ctx, sizeof (zio_cksum_t) * 8, 0,
salt->zcs_bytes, sizeof (salt->zcs_bytes));
return (ctx);
@@ -94,8 +93,8 @@ abd_checksum_skein_tmpl_init(const zio_cksum_salt_t *salt)
void
abd_checksum_skein_tmpl_free(void *ctx_template)
{
- Skein_512_Ctxt_t *ctx = ctx_template;
+ Skein_512_Ctxt_t *ctx = ctx_template;
- bzero(ctx, sizeof (*ctx));
+ memset(ctx, 0, sizeof (*ctx));
kmem_free(ctx, sizeof (*ctx));
}
diff --git a/module/zfs/spa.c b/module/zfs/spa.c
index 744bcb434f06..174879321a94 100644
--- a/module/zfs/spa.c
+++ b/module/zfs/spa.c
@@ -947,8 +947,8 @@ spa_get_errlists(spa_t *spa, avl_tree_t *last, avl_tree_t *scrub)
{
ASSERT(MUTEX_HELD(&spa->spa_errlist_lock));
- bcopy(&spa->spa_errlist_last, last, sizeof (avl_tree_t));
- bcopy(&spa->spa_errlist_scrub, scrub, sizeof (avl_tree_t));
+ memcpy(last, &spa->spa_errlist_last, sizeof (avl_tree_t));
+ memcpy(scrub, &spa->spa_errlist_scrub, sizeof (avl_tree_t));
avl_create(&spa->spa_errlist_scrub,
spa_error_entry_compare, sizeof (spa_error_entry_t),
@@ -8495,7 +8495,7 @@ spa_sync_nvlist(spa_t *spa, uint64_t obj, nvlist_t *nv, dmu_tx_t *tx)
VERIFY(nvlist_pack(nv, &packed, &nvsize, NV_ENCODE_XDR,
KM_SLEEP) == 0);
- bzero(packed + nvsize, bufsize - nvsize);
+ memset(packed + nvsize, 0, bufsize - nvsize);
dmu_write(spa->spa_meta_objset, obj, 0, bufsize, packed, tx);
diff --git a/module/zfs/spa_checkpoint.c b/module/zfs/spa_checkpoint.c
index 68c3ae2e0c31..7c543caaa272 100644
--- a/module/zfs/spa_checkpoint.c
+++ b/module/zfs/spa_checkpoint.c
@@ -166,7 +166,7 @@ spa_checkpoint_get_stats(spa_t *spa, pool_checkpoint_stat_t *pcs)
if (!spa_feature_is_active(spa, SPA_FEATURE_POOL_CHECKPOINT))
return (SET_ERROR(ZFS_ERR_NO_CHECKPOINT));
- bzero(pcs, sizeof (pool_checkpoint_stat_t));
+ memset(pcs, 0, sizeof (pool_checkpoint_stat_t));
int error = zap_contains(spa_meta_objset(spa),
DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_ZPOOL_CHECKPOINT);
diff --git a/module/zfs/spa_misc.c b/module/zfs/spa_misc.c
index a04766e7e33b..12aec4a568eb 100644
--- a/module/zfs/spa_misc.c
+++ b/module/zfs/spa_misc.c
@@ -1477,8 +1477,7 @@ spa_strdup(const char *s)
len = strlen(s);
new = kmem_alloc(len + 1, KM_SLEEP);
- bcopy(s, new, len);
- new[len] = '\0';
+ memcpy(new, s, len + 1);
return (new);
}
@@ -2566,7 +2565,7 @@ spa_scan_get_stats(spa_t *spa, pool_scan_stat_t *ps)
if (scn == NULL || scn->scn_phys.scn_func == POOL_SCAN_NONE)
return (SET_ERROR(ENOENT));
- bzero(ps, sizeof (pool_scan_stat_t));
+ memset(ps, 0, sizeof (pool_scan_stat_t));
/* data stored on disk */
ps->pss_func = scn->scn_phys.scn_func;
diff --git a/module/zfs/space_map.c b/module/zfs/space_map.c
index 11d4798925e4..61282f693c2c 100644
--- a/module/zfs/space_map.c
+++ b/module/zfs/space_map.c
@@ -455,7 +455,8 @@ space_map_histogram_clear(space_map_t *sm)
if (sm->sm_dbuf->db_size != sizeof (space_map_phys_t))
return;
- bzero(sm->sm_phys->smp_histogram, sizeof (sm->sm_phys->smp_histogram));
+ memset(sm->sm_phys->smp_histogram, 0,
+ sizeof (sm->sm_phys->smp_histogram));
}
boolean_t
@@ -896,7 +897,7 @@ space_map_truncate(space_map_t *sm, int blocksize, dmu_tx_t *tx)
* will be reset. Do the same in the common case so that
* bugs related to the uncommon case do not go unnoticed.
*/
- bzero(sm->sm_phys->smp_histogram,
+ memset(sm->sm_phys->smp_histogram, 0,
sizeof (sm->sm_phys->smp_histogram));
}
diff --git a/module/zfs/txg.c b/module/zfs/txg.c
index 7908183caee7..aa72758a3b2e 100644
--- a/module/zfs/txg.c
+++ b/module/zfs/txg.c
@@ -121,7 +121,7 @@ txg_init(dsl_pool_t *dp, uint64_t txg)
{
tx_state_t *tx = &dp->dp_tx;
int c;
- bzero(tx, sizeof (tx_state_t));
+ memset(tx, 0, sizeof (tx_state_t));
tx->tx_cpu = vmem_zalloc(max_ncpus * sizeof (tx_cpu_t), KM_SLEEP);
@@ -186,7 +186,7 @@ txg_fini(dsl_pool_t *dp)
vmem_free(tx->tx_cpu, max_ncpus * sizeof (tx_cpu_t));
- bzero(tx, sizeof (tx_state_t));
+ memset(tx, 0, sizeof (tx_state_t));
}
/*
diff --git a/module/zfs/vdev.c b/module/zfs/vdev.c
index 7bc79a2259df..db2d2c5e44fb 100644
--- a/module/zfs/vdev.c
+++ b/module/zfs/vdev.c
@@ -475,7 +475,7 @@ vdev_add_child(vdev_t *pvd, vdev_t *cvd)
newchild = kmem_alloc(newsize, KM_SLEEP);
if (pvd->vdev_child != NULL) {
- bcopy(pvd->vdev_child, newchild, oldsize);
+ memcpy(newchild, pvd->vdev_child, oldsize);
kmem_free(pvd->vdev_child, oldsize);
}
@@ -1426,7 +1426,7 @@ vdev_metaslab_init(vdev_t *vd, uint64_t txg)
mspp = vmem_zalloc(newc * sizeof (*mspp), KM_SLEEP);
if (expanding) {
- bcopy(vd->vdev_ms, mspp, oldc * sizeof (*mspp));
+ memcpy(mspp, vd->vdev_ms, oldc * sizeof (*mspp));
vmem_free(vd->vdev_ms, oldc * sizeof (*mspp));
}
@@ -4418,7 +4418,7 @@ vdev_get_stats_ex(vdev_t *vd, vdev_stat_t *vs, vdev_stat_ex_t *vsx)
vdev_t *tvd = vd->vdev_top;
mutex_enter(&vd->vdev_stat_lock);
if (vs) {
- bcopy(&vd->vdev_stat, vs, sizeof (*vs));
+ memcpy(vs, &vd->vdev_stat, sizeof (*vs));
vs->vs_timestamp = gethrtime() - vs->vs_timestamp;
vs->vs_state = vd->vdev_state;
vs->vs_rsize = vdev_get_min_asize(vd);
diff --git a/module/zfs/vdev_draid.c b/module/zfs/vdev_draid.c
index 6c765d984585..fa8daf57b2eb 100644
--- a/module/zfs/vdev_draid.c
+++ b/module/zfs/vdev_draid.c
@@ -1725,7 +1725,7 @@ vdev_draid_spare_create(nvlist_t *nvroot, vdev_t *vd, uint64_t *ndraidp,
uint64_t nparity = vdc->vdc_nparity;
for (uint64_t spare_id = 0; spare_id < nspares; spare_id++) {
- bzero(path, sizeof (path));
+ memset(path, 0, sizeof (path));
(void) snprintf(path, sizeof (path) - 1,
"%s%llu-%llu-%llu", VDEV_TYPE_DRAID,
(u_longlong_t)nparity,
diff --git a/module/zfs/vdev_indirect.c b/module/zfs/vdev_indirect.c
index aeba1e99e6e5..9189d3f31241 100644
--- a/module/zfs/vdev_indirect.c
+++ b/module/zfs/vdev_indirect.c
@@ -48,8 +48,8 @@
* "vdev_remap" operation that executes a callback on each contiguous
* segment of the new location. This function is used in multiple ways:
*
- * - i/os to this vdev use the callback to determine where the
- * data is now located, and issue child i/os for each segment's new
+ * - I/Os to this vdev use the callback to determine where the
+ * data is now located, and issue child I/Os for each segment's new
* location.
*
* - frees and claims to this vdev use the callback to free or claim
@@ -1021,7 +1021,7 @@ vdev_indirect_mapping_duplicate_adjacent_entries(vdev_t *vd, uint64_t offset,
size_t copy_length = entries * sizeof (*first_mapping);
duplicate_mappings = kmem_alloc(copy_length, KM_SLEEP);
- bcopy(first_mapping, duplicate_mappings, copy_length);
+ memcpy(duplicate_mappings, first_mapping, copy_length);
*copied_entries = entries;
return (duplicate_mappings);
diff --git a/module/zfs/vdev_indirect_births.c b/module/zfs/vdev_indirect_births.c
index e8f925628d04..65a57e73604f 100644
--- a/module/zfs/vdev_indirect_births.c
+++ b/module/zfs/vdev_indirect_births.c
@@ -152,7 +152,7 @@ vdev_indirect_births_add_entry(vdev_indirect_births_t *vib,
new_entries = vmem_alloc(new_size, KM_SLEEP);
if (old_size > 0) {
- bcopy(vib->vib_entries, new_entries, old_size);
+ memcpy(new_entries, vib->vib_entries, old_size);
vmem_free(vib->vib_entries, old_size);
}
new_entries[vib->vib_phys->vib_count - 1] = vibe;
diff --git a/module/zfs/vdev_indirect_mapping.c b/module/zfs/vdev_indirect_mapping.c
index 4ade56e062f7..e92495f2dd34 100644
--- a/module/zfs/vdev_indirect_mapping.c
+++ b/module/zfs/vdev_indirect_mapping.c
@@ -482,7 +482,7 @@ vdev_indirect_mapping_add_entries(vdev_indirect_mapping_t *vim,
entries_written * sizeof (vdev_indirect_mapping_entry_phys_t));
vim->vim_entries = vmem_alloc(new_size, KM_SLEEP);
if (old_size > 0) {
- bcopy(old_entries, vim->vim_entries, old_size);
+ memcpy(vim->vim_entries, old_entries, old_size);
vmem_free(old_entries, old_size);
}
VERIFY0(dmu_read(vim->vim_objset, vim->vim_object, old_size,
@@ -584,7 +584,7 @@ vdev_indirect_mapping_load_obsolete_counts(vdev_indirect_mapping_t *vim)
0, counts_size,
counts, DMU_READ_PREFETCH));
} else {
- bzero(counts, counts_size);
+ memset(counts, 0, counts_size);
}
return (counts);
}
diff --git a/module/zfs/vdev_label.c b/module/zfs/vdev_label.c
index 29391af820ed..865710337e63 100644
--- a/module/zfs/vdev_label.c
+++ b/module/zfs/vdev_label.c
@@ -1565,7 +1565,7 @@ vdev_uberblock_load(vdev_t *rvd, uberblock_t *ub, nvlist_t **config)
ASSERT(ub);
ASSERT(config);
- bzero(ub, sizeof (uberblock_t));
+ memset(ub, 0, sizeof (uberblock_t));
*config = NULL;
cb.ubl_ubbest = ub;
diff --git a/module/zfs/vdev_raidz.c b/module/zfs/vdev_raidz.c
index 1d691c81b5d5..ae0777b3dcc1 100644
--- a/module/zfs/vdev_raidz.c
+++ b/module/zfs/vdev_raidz.c
@@ -1779,11 +1779,9 @@ vdev_raidz_checksum_error(zio_t *zio, raidz_col_t *rc, abd_t *bad_data)
static int
raidz_checksum_verify(zio_t *zio)
{
- zio_bad_cksum_t zbc;
+ zio_bad_cksum_t zbc = {{{0}}};
raidz_map_t *rm = zio->io_vsd;
- bzero(&zbc, sizeof (zio_bad_cksum_t));
-
int ret = zio_checksum_error(zio, &zbc);
if (ret != 0 && zbc.zbc_injected != 0)
rm->rm_ecksuminjected = 1;
diff --git a/module/zfs/vdev_rebuild.c b/module/zfs/vdev_rebuild.c
index 510463b1f970..6ec3a9256950 100644
--- a/module/zfs/vdev_rebuild.c
+++ b/module/zfs/vdev_rebuild.c
@@ -227,7 +227,7 @@ vdev_rebuild_initiate_sync(void *arg, dmu_tx_t *tx)
spa_feature_incr(vd->vdev_spa, SPA_FEATURE_DEVICE_REBUILD, tx);
mutex_enter(&vd->vdev_rebuild_lock);
- bzero(vrp, sizeof (uint64_t) * REBUILD_PHYS_ENTRIES);
+ memset(vrp, 0, sizeof (uint64_t) * REBUILD_PHYS_ENTRIES);
vrp->vrp_rebuild_state = VDEV_REBUILD_ACTIVE;
vrp->vrp_min_txg = 0;
vrp->vrp_max_txg = dmu_tx_get_txg(tx);
@@ -448,7 +448,7 @@ vdev_rebuild_clear_sync(void *arg, dmu_tx_t *tx)
}
clear_rebuild_bytes(vd);
- bzero(vrp, sizeof (uint64_t) * REBUILD_PHYS_ENTRIES);
+ memset(vrp, 0, sizeof (uint64_t) * REBUILD_PHYS_ENTRIES);
if (vd->vdev_top_zap != 0 && zap_contains(mos, vd->vdev_top_zap,
VDEV_TOP_ZAP_VDEV_REBUILD_PHYS) == 0) {
@@ -701,7 +701,7 @@ vdev_rebuild_load(vdev_t *vd)
vd->vdev_rebuilding = B_FALSE;
if (!spa_feature_is_enabled(spa, SPA_FEATURE_DEVICE_REBUILD)) {
- bzero(vrp, sizeof (uint64_t) * REBUILD_PHYS_ENTRIES);
+ memset(vrp, 0, sizeof (uint64_t) * REBUILD_PHYS_ENTRIES);
mutex_exit(&vd->vdev_rebuild_lock);
return (SET_ERROR(ENOTSUP));
}
@@ -718,7 +718,7 @@ vdev_rebuild_load(vdev_t *vd)
* status allowing a new resilver/rebuild to be started.
*/
if (err == ENOENT || err == EOVERFLOW || err == ECKSUM) {
- bzero(vrp, sizeof (uint64_t) * REBUILD_PHYS_ENTRIES);
+ memset(vrp, 0, sizeof (uint64_t) * REBUILD_PHYS_ENTRIES);
} else if (err) {
mutex_exit(&vd->vdev_rebuild_lock);
return (err);
@@ -1111,7 +1111,7 @@ vdev_rebuild_get_stats(vdev_t *tvd, vdev_rebuild_stat_t *vrs)
tvd->vdev_top_zap, VDEV_TOP_ZAP_VDEV_REBUILD_PHYS);
if (error == ENOENT) {
- bzero(vrs, sizeof (vdev_rebuild_stat_t));
+ memset(vrs, 0, sizeof (vdev_rebuild_stat_t));
vrs->vrs_state = VDEV_REBUILD_NONE;
error = 0;
} else if (error == 0) {
diff --git a/module/zfs/vdev_trim.c b/module/zfs/vdev_trim.c
index 77f27406ea01..43027f136898 100644
--- a/module/zfs/vdev_trim.c
+++ b/module/zfs/vdev_trim.c
@@ -1520,7 +1520,7 @@ vdev_trim_l2arc_thread(void *arg)
vdev_t *vd = arg;
spa_t *spa = vd->vdev_spa;
l2arc_dev_t *dev = l2arc_vdev_get(vd);
- trim_args_t ta;
+ trim_args_t ta = {0};
range_seg64_t physical_rs;
ASSERT(vdev_is_concrete(vd));
@@ -1531,7 +1531,6 @@ vdev_trim_l2arc_thread(void *arg)
vd->vdev_trim_partial = 0;
vd->vdev_trim_secure = 0;
- bzero(&ta, sizeof (ta));
ta.trim_vdev = vd;
ta.trim_tree = range_tree_create(NULL, RANGE_SEG64, NULL, 0, 0);
ta.trim_type = TRIM_TYPE_MANUAL;
@@ -1591,7 +1590,7 @@ vdev_trim_l2arc_thread(void *arg)
*/
spa_config_enter(vd->vdev_spa, SCL_L2ARC, vd,
RW_READER);
- bzero(dev->l2ad_dev_hdr, dev->l2ad_dev_hdr_asize);
+ memset(dev->l2ad_dev_hdr, 0, dev->l2ad_dev_hdr_asize);
l2arc_dev_hdr_update(dev);
spa_config_exit(vd->vdev_spa, SCL_L2ARC, vd);
@@ -1655,9 +1654,9 @@ vdev_trim_l2arc(spa_t *spa)
int
vdev_trim_simple(vdev_t *vd, uint64_t start, uint64_t size)
{
- trim_args_t ta;
- range_seg64_t physical_rs;
- int error;
+ trim_args_t ta = {0};
+ range_seg64_t physical_rs;
+ int error;
physical_rs.rs_start = start;
physical_rs.rs_end = start + size;
@@ -1666,7 +1665,6 @@ vdev_trim_simple(vdev_t *vd, uint64_t start, uint64_t size)
ASSERT(!vd->vdev_detached);
ASSERT(!vd->vdev_top->vdev_removing);
- bzero(&ta, sizeof (ta));
ta.trim_vdev = vd;
ta.trim_tree = range_tree_create(NULL, RANGE_SEG64, NULL, 0, 0);
ta.trim_type = TRIM_TYPE_SIMPLE;
diff --git a/module/zfs/zap.c b/module/zfs/zap.c
index d1d07f9fc804..b2b9dc27f1b6 100644
--- a/module/zfs/zap.c
+++ b/module/zfs/zap.c
@@ -112,7 +112,7 @@ fzap_upgrade(zap_t *zap, dmu_tx_t *tx, zap_flags_t flags)
* explicitly zero it since it might be coming from an
* initialized microzap
*/
- bzero(zap->zap_dbuf->db_data, zap->zap_dbuf->db_size);
+ memset(zap->zap_dbuf->db_data, 0, zap->zap_dbuf->db_size);
zp->zap_block_type = ZBT_HEADER;
zp->zap_magic = ZAP_MAGIC;
diff --git a/module/zfs/zap_leaf.c b/module/zfs/zap_leaf.c
index aad923d512df..d161c19c9cbe 100644
--- a/module/zfs/zap_leaf.c
+++ b/module/zfs/zap_leaf.c
@@ -207,7 +207,7 @@ zap_leaf_chunk_free(zap_leaf_t *l, uint16_t chunk)
zlf->lf_type = ZAP_CHUNK_FREE;
zlf->lf_next = zap_leaf_phys(l)->l_hdr.lh_freelist;
- bzero(zlf->lf_pad, sizeof (zlf->lf_pad)); /* help it to compress */
+ memset(zlf->lf_pad, 0, sizeof (zlf->lf_pad)); /* help it to compress */
zap_leaf_phys(l)->l_hdr.lh_freelist = chunk;
zap_leaf_phys(l)->l_hdr.lh_nfree++;
@@ -304,7 +304,7 @@ zap_leaf_array_read(zap_leaf_t *l, uint16_t chunk,
while (chunk != CHAIN_END) {
struct zap_leaf_array *la =
&ZAP_LEAF_CHUNK(l, chunk).l_array;
- bcopy(la->la_array, p, ZAP_LEAF_ARRAY_BYTES);
+ memcpy(p, la->la_array, ZAP_LEAF_ARRAY_BYTES);
p += ZAP_LEAF_ARRAY_BYTES;
chunk = la->la_next;
}
@@ -344,7 +344,7 @@ zap_leaf_array_match(zap_leaf_t *l, zap_name_t *zn,
zap_leaf_array_read(l, chunk, sizeof (*thiskey), array_numints,
sizeof (*thiskey), array_numints, thiskey);
- boolean_t match = bcmp(thiskey, zn->zn_key_orig,
+ boolean_t match = memcmp(thiskey, zn->zn_key_orig,
array_numints * sizeof (*thiskey)) == 0;
kmem_free(thiskey, array_numints * sizeof (*thiskey));
return (match);
@@ -372,7 +372,8 @@ zap_leaf_array_match(zap_leaf_t *l, zap_name_t *zn,
struct zap_leaf_array *la = &ZAP_LEAF_CHUNK(l, chunk).l_array;
int toread = MIN(array_numints - bseen, ZAP_LEAF_ARRAY_BYTES);
ASSERT3U(chunk, <, ZAP_LEAF_NUMCHUNKS(l));
- if (bcmp(la->la_array, (char *)zn->zn_key_orig + bseen, toread))
+ if (memcmp(la->la_array, (char *)zn->zn_key_orig + bseen,
+ toread))
break;
chunk = la->la_next;
bseen += toread;
diff --git a/module/zfs/zap_micro.c b/module/zfs/zap_micro.c
index 1f32e4450522..85134e999bea 100644
--- a/module/zfs/zap_micro.c
+++ b/module/zfs/zap_micro.c
@@ -641,7 +641,7 @@ mzap_upgrade(zap_t **zapp, void *tag, dmu_tx_t *tx, zap_flags_t flags)
int sz = zap->zap_dbuf->db_size;
mzap_phys_t *mzp = vmem_alloc(sz, KM_SLEEP);
- bcopy(zap->zap_dbuf->db_data, mzp, sz);
+ memcpy(mzp, zap->zap_dbuf->db_data, sz);
int nchunks = zap->zap_m.zap_num_chunks;
if (!flags) {
@@ -1407,7 +1407,7 @@ zap_remove_impl(zap_t *zap, const char *name,
err = SET_ERROR(ENOENT);
} else {
zap->zap_m.zap_num_entries--;
- bzero(&zap_m_phys(zap)->mz_chunk[mze->mze_chunkid],
+ memset(&zap_m_phys(zap)->mz_chunk[mze->mze_chunkid], 0,
sizeof (mzap_ent_phys_t));
mze_remove(zap, mze);
}
@@ -1632,7 +1632,7 @@ zap_get_stats(objset_t *os, uint64_t zapobj, zap_stats_t *zs)
if (err != 0)
return (err);
- bzero(zs, sizeof (zap_stats_t));
+ memset(zs, 0, sizeof (zap_stats_t));
if (zap->zap_ismicro) {
zs->zs_blocksize = zap->zap_dbuf->db_size;
diff --git a/module/zfs/zfs_fm.c b/module/zfs/zfs_fm.c
index 828385b430b4..adc9f4bc5ecb 100644
--- a/module/zfs/zfs_fm.c
+++ b/module/zfs/zfs_fm.c
@@ -1150,7 +1150,7 @@ zfs_ereport_start_checksum(spa_t *spa, vdev_t *vd, const zbookmark_phys_t *zb,
/* copy the checksum failure information if it was provided */
if (info != NULL) {
report->zcr_ckinfo = kmem_zalloc(sizeof (*info), KM_SLEEP);
- bcopy(info, report->zcr_ckinfo, sizeof (*info));
+ memcpy(report->zcr_ckinfo, info, sizeof (*info));
}
report->zcr_sector = 1ULL << vd->vdev_top->vdev_ashift;
diff --git a/module/zfs/zfs_log.c b/module/zfs/zfs_log.c
index 9df801870a4f..1d4f5aa79a85 100644
--- a/module/zfs/zfs_log.c
+++ b/module/zfs/zfs_log.c
@@ -127,9 +127,9 @@ zfs_log_xvattr(lr_attr_t *lrattr, xvattr_t *xvap)
attrs = (uint64_t *)bitmap;
*attrs = 0;
crtime = attrs + 1;
- bzero(crtime, 2 * sizeof (uint64_t));
+ memset(crtime, 0, 2 * sizeof (uint64_t));
scanstamp = (caddr_t)(crtime + 2);
- bzero(scanstamp, AV_SCANSTAMP_SZ);
+ memset(scanstamp, 0, AV_SCANSTAMP_SZ);
if (XVA_ISSET_REQ(xvap, XAT_READONLY))
*attrs |= (xoap->xoa_readonly == 0) ? 0 :
XAT0_READONLY;
@@ -168,13 +168,13 @@ zfs_log_xvattr(lr_attr_t *lrattr, xvattr_t *xvap)
if (XVA_ISSET_REQ(xvap, XAT_AV_SCANSTAMP)) {
ASSERT(!XVA_ISSET_REQ(xvap, XAT_PROJID));
- bcopy(xoap->xoa_av_scanstamp, scanstamp, AV_SCANSTAMP_SZ);
+ memcpy(scanstamp, xoap->xoa_av_scanstamp, AV_SCANSTAMP_SZ);
} else if (XVA_ISSET_REQ(xvap, XAT_PROJID)) {
/*
* XAT_PROJID and XAT_AV_SCANSTAMP will never be valid
* at the same time, so we can share the same space.
*/
- bcopy(&xoap->xoa_projid, scanstamp, sizeof (uint64_t));
+ memcpy(scanstamp, &xoap->xoa_projid, sizeof (uint64_t));
}
if (XVA_ISSET_REQ(xvap, XAT_REPARSE))
*attrs |= (xoap->xoa_reparse == 0) ? 0 :
@@ -214,7 +214,7 @@ zfs_log_fuid_domains(zfs_fuid_info_t *fuidp, void *start)
if (fuidp->z_domain_str_sz != 0) {
for (zdomain = list_head(&fuidp->z_domains); zdomain;
zdomain = list_next(&fuidp->z_domains, zdomain)) {
- bcopy((void *)zdomain->z_domain, start,
+ memcpy(start, zdomain->z_domain,
strlen(zdomain->z_domain) + 1);
start = (caddr_t)start +
strlen(zdomain->z_domain) + 1;
@@ -392,7 +392,7 @@ zfs_log_create(zilog_t *zilog, dmu_tx_t *tx, uint64_t txtype,
else
lracl->lr_acl_flags = 0;
- bcopy(vsecp->vsa_aclentp, end, aclsize);
+ memcpy(end, vsecp->vsa_aclentp, aclsize);
end = (caddr_t)end + ZIL_ACE_LENGTH(aclsize);
}
@@ -404,7 +404,7 @@ zfs_log_create(zilog_t *zilog, dmu_tx_t *tx, uint64_t txtype,
/*
* Now place file name in log record
*/
- bcopy(name, end, namesize);
+ memcpy(end, name, namesize);
zil_itx_assign(zilog, itx, tx);
}
@@ -426,7 +426,7 @@ zfs_log_remove(zilog_t *zilog, dmu_tx_t *tx, uint64_t txtype,
itx = zil_itx_create(txtype, sizeof (*lr) + namesize);
lr = (lr_remove_t *)&itx->itx_lr;
lr->lr_doid = dzp->z_id;
- bcopy(name, (char *)(lr + 1), namesize);
+ memcpy(lr + 1, name, namesize);
itx->itx_oid = foid;
@@ -462,7 +462,7 @@ zfs_log_link(zilog_t *zilog, dmu_tx_t *tx, uint64_t txtype,
lr = (lr_link_t *)&itx->itx_lr;
lr->lr_doid = dzp->z_id;
lr->lr_link_obj = zp->z_id;
- bcopy(name, (char *)(lr + 1), namesize);
+ memcpy(lr + 1, name, namesize);
zil_itx_assign(zilog, itx, tx);
}
@@ -493,8 +493,8 @@ zfs_log_symlink(zilog_t *zilog, dmu_tx_t *tx, uint64_t txtype,
sizeof (uint64_t));
(void) sa_lookup(zp->z_sa_hdl, SA_ZPL_CRTIME(ZTOZSB(zp)),
lr->lr_crtime, sizeof (uint64_t) * 2);
- bcopy(name, (char *)(lr + 1), namesize);
- bcopy(link, (char *)(lr + 1) + namesize, linksize);
+ memcpy((char *)(lr + 1), name, namesize);
+ memcpy((char *)(lr + 1) + namesize, link, linksize);
zil_itx_assign(zilog, itx, tx);
}
@@ -518,8 +518,8 @@ zfs_log_rename(zilog_t *zilog, dmu_tx_t *tx, uint64_t txtype, znode_t *sdzp,
lr = (lr_rename_t *)&itx->itx_lr;
lr->lr_sdoid = sdzp->z_id;
lr->lr_tdoid = tdzp->z_id;
- bcopy(sname, (char *)(lr + 1), snamesize);
- bcopy(dname, (char *)(lr + 1) + snamesize, dnamesize);
+ memcpy((char *)(lr + 1), sname, snamesize);
+ memcpy((char *)(lr + 1) + snamesize, dname, dnamesize);
itx->itx_oid = szp->z_id;
zil_itx_assign(zilog, itx, tx);
@@ -742,9 +742,9 @@ zfs_log_setsaxattr(zilog_t *zilog, dmu_tx_t *tx, int txtype,
lr = (lr_setsaxattr_t *)&itx->itx_lr;
lr->lr_foid = zp->z_id;
xattrstart = (char *)(lr + 1);
- bcopy(name, xattrstart, namelen);
+ memcpy(xattrstart, name, namelen);
if (value != NULL) {
- bcopy(value, (char *)xattrstart + namelen, size);
+ memcpy((char *)xattrstart + namelen, value, size);
lr->lr_size = size;
} else {
lr->lr_size = 0;
@@ -802,11 +802,11 @@ zfs_log_acl(zilog_t *zilog, dmu_tx_t *tx, znode_t *zp,
if (txtype == TX_ACL_V0) {
lrv0 = (lr_acl_v0_t *)lr;
- bcopy(vsecp->vsa_aclentp, (ace_t *)(lrv0 + 1), aclbytes);
+ memcpy(lrv0 + 1, vsecp->vsa_aclentp, aclbytes);
} else {
void *start = (ace_t *)(lr + 1);
- bcopy(vsecp->vsa_aclentp, start, aclbytes);
+ memcpy(start, vsecp->vsa_aclentp, aclbytes);
start = (caddr_t)start + ZIL_ACE_LENGTH(aclbytes);
diff --git a/module/zfs/zfs_replay.c b/module/zfs/zfs_replay.c
index 3ccd96dc256d..e9c5eeb55b07 100644
--- a/module/zfs/zfs_replay.c
+++ b/module/zfs/zfs_replay.c
@@ -70,7 +70,7 @@ static void
zfs_init_vattr(vattr_t *vap, uint64_t mask, uint64_t mode,
uint64_t uid, uint64_t gid, uint64_t rdev, uint64_t nodeid)
{
- bzero(vap, sizeof (*vap));
+ memset(vap, 0, sizeof (*vap));
vap->va_mask = (uint_t)mask;
vap->va_mode = mode;
#if defined(__FreeBSD__) || defined(__APPLE__)
@@ -143,13 +143,13 @@ zfs_replay_xvattr(lr_attr_t *lrattr, xvattr_t *xvap)
if (XVA_ISSET_REQ(xvap, XAT_AV_SCANSTAMP)) {
ASSERT(!XVA_ISSET_REQ(xvap, XAT_PROJID));
- bcopy(scanstamp, xoap->xoa_av_scanstamp, AV_SCANSTAMP_SZ);
+ memcpy(xoap->xoa_av_scanstamp, scanstamp, AV_SCANSTAMP_SZ);
} else if (XVA_ISSET_REQ(xvap, XAT_PROJID)) {
/*
* XAT_PROJID and XAT_AV_SCANSTAMP will never be valid
* at the same time, so we can share the same space.
*/
- bcopy(scanstamp, &xoap->xoa_projid, sizeof (uint64_t));
+ memcpy(&xoap->xoa_projid, scanstamp, sizeof (uint64_t));
}
if (XVA_ISSET_REQ(xvap, XAT_REPARSE))
xoap->xoa_reparse = ((*attrs & XAT0_REPARSE) != 0);
@@ -791,7 +791,7 @@ zfs_replay_truncate(void *arg1, void *arg2, boolean_t byteswap)
zfsvfs_t *zfsvfs = arg1;
lr_truncate_t *lr = arg2;
znode_t *zp;
- flock64_t fl;
+ flock64_t fl = {0};
int error;
if (byteswap)
@@ -800,7 +800,6 @@ zfs_replay_truncate(void *arg1, void *arg2, boolean_t byteswap)
if ((error = zfs_zget(zfsvfs, lr->lr_foid, &zp)) != 0)
return (error);
- bzero(&fl, sizeof (fl));
fl.l_type = F_WRLCK;
fl.l_whence = SEEK_SET;
fl.l_start = lr->lr_offset;
@@ -956,7 +955,7 @@ zfs_replay_acl_v0(void *arg1, void *arg2, boolean_t byteswap)
zfsvfs_t *zfsvfs = arg1;
lr_acl_v0_t *lr = arg2;
ace_t *ace = (ace_t *)(lr + 1); /* ace array follows lr_acl_t */
- vsecattr_t vsa;
+ vsecattr_t vsa = {0};
znode_t *zp;
int error;
@@ -968,7 +967,6 @@ zfs_replay_acl_v0(void *arg1, void *arg2, boolean_t byteswap)
if ((error = zfs_zget(zfsvfs, lr->lr_foid, &zp)) != 0)
return (error);
- bzero(&vsa, sizeof (vsa));
vsa.vsa_mask = VSA_ACE | VSA_ACECNT;
vsa.vsa_aclcnt = lr->lr_aclcnt;
vsa.vsa_aclentsz = sizeof (ace_t) * vsa.vsa_aclcnt;
@@ -1002,7 +1000,7 @@ zfs_replay_acl(void *arg1, void *arg2, boolean_t byteswap)
zfsvfs_t *zfsvfs = arg1;
lr_acl_t *lr = arg2;
ace_t *ace = (ace_t *)(lr + 1);
- vsecattr_t vsa;
+ vsecattr_t vsa = {0};
znode_t *zp;
int error;
@@ -1019,7 +1017,6 @@ zfs_replay_acl(void *arg1, void *arg2, boolean_t byteswap)
if ((error = zfs_zget(zfsvfs, lr->lr_foid, &zp)) != 0)
return (error);
- bzero(&vsa, sizeof (vsa));
vsa.vsa_mask = VSA_ACE | VSA_ACECNT | VSA_ACE_ACLFLAGS;
vsa.vsa_aclcnt = lr->lr_aclcnt;
vsa.vsa_aclentp = ace;
diff --git a/module/zfs/zfs_sa.c b/module/zfs/zfs_sa.c
index 1f15cae00da8..d0c7e169fde4 100644
--- a/module/zfs/zfs_sa.c
+++ b/module/zfs/zfs_sa.c
@@ -107,8 +107,8 @@ zfs_sa_symlink(znode_t *zp, char *link, int len, dmu_tx_t *tx)
if (ZFS_OLD_ZNODE_PHYS_SIZE + len <= dmu_bonus_max()) {
VERIFY0(dmu_set_bonus(db, len + ZFS_OLD_ZNODE_PHYS_SIZE, tx));
if (len) {
- bcopy(link, (caddr_t)db->db_data +
- ZFS_OLD_ZNODE_PHYS_SIZE, len);
+ memcpy((caddr_t)db->db_data +
+ ZFS_OLD_ZNODE_PHYS_SIZE, link, len);
}
} else {
dmu_buf_t *dbp;
@@ -120,7 +120,7 @@ zfs_sa_symlink(znode_t *zp, char *link, int len, dmu_tx_t *tx)
dmu_buf_will_dirty(dbp, tx);
ASSERT3U(len, <=, dbp->db_size);
- bcopy(link, dbp->db_data, len);
+ memcpy(dbp->db_data, link, len);
dmu_buf_rele(dbp, FTAG);
}
}
@@ -418,8 +418,9 @@ zfs_sa_upgrade(sa_handle_t *hdl, dmu_tx_t *tx)
/* if scanstamp then add scanstamp */
if (zp->z_pflags & ZFS_BONUS_SCANSTAMP) {
- bcopy((caddr_t)db->db_data + ZFS_OLD_ZNODE_PHYS_SIZE,
- scanstamp, AV_SCANSTAMP_SZ);
+ memcpy(scanstamp,
+ (caddr_t)db->db_data + ZFS_OLD_ZNODE_PHYS_SIZE,
+ AV_SCANSTAMP_SZ);
SA_ADD_BULK_ATTR(sa_attrs, count, SA_ZPL_SCANSTAMP(zfsvfs),
NULL, scanstamp, AV_SCANSTAMP_SZ);
zp->z_pflags &= ~ZFS_BONUS_SCANSTAMP;
diff --git a/module/zfs/zfs_vnops.c b/module/zfs/zfs_vnops.c
index 5d5b5f29a71d..62806e9fe8b1 100644
--- a/module/zfs/zfs_vnops.c
+++ b/module/zfs/zfs_vnops.c
@@ -355,9 +355,8 @@ zfs_clear_setid_bits_if_necessary(zfsvfs_t *zfsvfs, znode_t *zp, cred_t *cr,
* than one TX_SETATTR per transaction group.
*/
if (*clear_setid_bits_txgp != dmu_tx_get_txg(tx)) {
- vattr_t va;
+ vattr_t va = {0};
- bzero(&va, sizeof (va));
va.va_mask = AT_MODE;
va.va_nodeid = zp->z_id;
va.va_mode = newmode;
diff --git a/module/zfs/zil.c b/module/zfs/zil.c
index 10f89c916421..161ce3c97e71 100644
--- a/module/zfs/zil.c
+++ b/module/zfs/zil.c
@@ -259,12 +259,12 @@ zil_read_log_block(zilog_t *zilog, boolean_t decrypt, const blkptr_t *bp,
char *lr = (char *)(zilc + 1);
uint64_t len = zilc->zc_nused - sizeof (zil_chain_t);
- if (bcmp(&cksum, &zilc->zc_next_blk.blk_cksum,
+ if (memcmp(&cksum, &zilc->zc_next_blk.blk_cksum,
sizeof (cksum)) || BP_IS_HOLE(&zilc->zc_next_blk)) {
error = SET_ERROR(ECKSUM);
} else {
ASSERT3U(len, <=, SPA_OLD_MAXBLOCKSIZE);
- bcopy(lr, dst, len);
+ memcpy(dst, lr, len);
*end = (char *)dst + len;
*nbp = zilc->zc_next_blk;
}
@@ -273,14 +273,14 @@ zil_read_log_block(zilog_t *zilog, boolean_t decrypt, const blkptr_t *bp,
uint64_t size = BP_GET_LSIZE(bp);
zil_chain_t *zilc = (zil_chain_t *)(lr + size) - 1;
- if (bcmp(&cksum, &zilc->zc_next_blk.blk_cksum,
+ if (memcmp(&cksum, &zilc->zc_next_blk.blk_cksum,
sizeof (cksum)) || BP_IS_HOLE(&zilc->zc_next_blk) ||
(zilc->zc_nused > (size - sizeof (*zilc)))) {
error = SET_ERROR(ECKSUM);
} else {
ASSERT3U(zilc->zc_nused, <=,
SPA_OLD_MAXBLOCKSIZE);
- bcopy(lr, dst, zilc->zc_nused);
+ memcpy(dst, lr, zilc->zc_nused);
*end = (char *)dst + zilc->zc_nused;
*nbp = zilc->zc_next_blk;
}
@@ -307,7 +307,7 @@ zil_read_log_data(zilog_t *zilog, const lr_write_t *lr, void *wbuf)
if (BP_IS_HOLE(bp)) {
if (wbuf != NULL)
- bzero(wbuf, MAX(BP_GET_LSIZE(bp), lr->lr_length));
+ memset(wbuf, 0, MAX(BP_GET_LSIZE(bp), lr->lr_length));
return (0);
}
@@ -330,7 +330,7 @@ zil_read_log_data(zilog_t *zilog, const lr_write_t *lr, void *wbuf)
if (error == 0) {
if (wbuf != NULL)
- bcopy(abuf->b_data, wbuf, arc_buf_size(abuf));
+ memcpy(wbuf, abuf->b_data, arc_buf_size(abuf));
arc_buf_destroy(abuf, &abuf);
}
@@ -353,12 +353,10 @@ zil_parse(zilog_t *zilog, zil_parse_blk_func_t *parse_blk_func,
uint64_t max_lr_seq = 0;
uint64_t blk_count = 0;
uint64_t lr_count = 0;
- blkptr_t blk, next_blk;
+ blkptr_t blk, next_blk = {{{{0}}}};
char *lrbuf, *lrp;
int error = 0;
- bzero(&next_blk, sizeof (blkptr_t));
-
/*
* Old logs didn't record the maximum zh_claim_lr_seq.
*/
@@ -786,7 +784,7 @@ zil_create(zilog_t *zilog)
dmu_objset_type(zilog->zl_os) != DMU_OST_ZVOL,
dsl_dataset_feature_is_active(ds, SPA_FEATURE_ZILSAXATTR));
- ASSERT(error != 0 || bcmp(&blk, &zh->zh_log, sizeof (blk)) == 0);
+ ASSERT(error != 0 || memcmp(&blk, &zh->zh_log, sizeof (blk)) == 0);
IMPLY(error == 0, lwb != NULL);
return (lwb);
@@ -1648,7 +1646,7 @@ zil_lwb_write_issue(zilog_t *zilog, lwb_t *lwb)
/*
* clear unused data for security
*/
- bzero(lwb->lwb_buf + lwb->lwb_nused, wsz - lwb->lwb_nused);
+ memset(lwb->lwb_buf + lwb->lwb_nused, 0, wsz - lwb->lwb_nused);
spa_config_enter(zilog->zl_spa, SCL_STATE, lwb, RW_READER);
@@ -1782,7 +1780,7 @@ cont:
dnow = MIN(dlen, lwb_sp - reclen);
lr_buf = lwb->lwb_buf + lwb->lwb_nused;
- bcopy(lrc, lr_buf, reclen);
+ memcpy(lr_buf, lrc, reclen);
lrcb = (lr_t *)lr_buf; /* Like lrc, but inside lwb. */
lrwb = (lr_write_t *)lrcb; /* Like lrw, but inside lwb. */
@@ -1838,7 +1836,7 @@ cont:
lwb->lwb_write_zio);
if (dbuf != NULL && error == 0 && dnow == dlen)
/* Zero any padding bytes in the last block. */
- bzero((char *)dbuf + lrwb->lr_length, dpad);
+ memset((char *)dbuf + lrwb->lr_length, 0, dpad);
if (error == EIO) {
txg_wait_synced(zilog->zl_dmu_pool, txg);
@@ -1888,7 +1886,7 @@ zil_itx_create(uint64_t txtype, size_t olrsize)
itx->itx_lr.lrc_txtype = txtype;
itx->itx_lr.lrc_reclen = lrsize;
itx->itx_lr.lrc_seq = 0; /* defensive */
- bzero((char *)&itx->itx_lr + olrsize, lrsize - olrsize);
+ memset((char *)&itx->itx_lr + olrsize, 0, lrsize - olrsize);
itx->itx_sync = B_TRUE; /* default is synchronous */
itx->itx_callback = NULL;
itx->itx_callback_data = NULL;
@@ -3142,8 +3140,9 @@ zil_sync(zilog_t *zilog, dmu_tx_t *tx)
ASSERT(list_head(&zilog->zl_lwb_list) == NULL);
- bzero(zh, sizeof (zil_header_t));
- bzero(zilog->zl_replayed_seq, sizeof (zilog->zl_replayed_seq));
+ memset(zh, 0, sizeof (zil_header_t));
+ memset(zilog->zl_replayed_seq, 0,
+ sizeof (zilog->zl_replayed_seq));
if (zilog->zl_keep_first) {
/*
@@ -3645,7 +3644,7 @@ zil_replay_log_record(zilog_t *zilog, const lr_t *lr, void *zra,
/*
* Make a copy of the data so we can revise and extend it.
*/
- bcopy(lr, zr->zr_lr, reclen);
+ memcpy(zr->zr_lr, lr, reclen);
/*
* If this is a TX_WRITE with a blkptr, suck in the data.
diff --git a/module/zfs/zio.c b/module/zfs/zio.c
index 04a76c682045..f6adea572418 100644
--- a/module/zfs/zio.c
+++ b/module/zfs/zio.c
@@ -822,7 +822,7 @@ zio_create(zio_t *pio, spa_t *spa, uint64_t txg, const blkptr_t *bp,
IMPLY(lsize != psize, (flags & ZIO_FLAG_RAW_COMPRESS) != 0);
zio = kmem_cache_alloc(zio_cache, KM_SLEEP);
- bzero(zio, sizeof (zio_t));
+ memset(zio, 0, sizeof (zio_t));
mutex_init(&zio->io_lock, NULL, MUTEX_NOLOCKDEP, NULL);
cv_init(&zio->io_cv, NULL, CV_DEFAULT, NULL);
@@ -2883,7 +2883,7 @@ zio_write_gang_block(zio_t *pio, metaslab_class_t *mc)
gn = zio_gang_node_alloc(gnpp);
gbh = gn->gn_gbh;
- bzero(gbh, SPA_GANGBLOCKSIZE);
+ memset(gbh, 0, SPA_GANGBLOCKSIZE);
gbh_abd = abd_get_from_buf(gbh, SPA_GANGBLOCKSIZE);
/*
@@ -2912,9 +2912,9 @@ zio_write_gang_block(zio_t *pio, metaslab_class_t *mc)
zp.zp_nopwrite = B_FALSE;
zp.zp_encrypt = gio->io_prop.zp_encrypt;
zp.zp_byteorder = gio->io_prop.zp_byteorder;
- bzero(zp.zp_salt, ZIO_DATA_SALT_LEN);
- bzero(zp.zp_iv, ZIO_DATA_IV_LEN);
- bzero(zp.zp_mac, ZIO_DATA_MAC_LEN);
+ memset(zp.zp_salt, 0, ZIO_DATA_SALT_LEN);
+ memset(zp.zp_iv, 0, ZIO_DATA_IV_LEN);
+ memset(zp.zp_mac, 0, ZIO_DATA_MAC_LEN);
zio_t *cio = zio_write(zio, spa, txg, &gbh->zg_blkptr[g],
has_data ? abd_get_offset(pio->io_abd, pio->io_size -
@@ -3011,7 +3011,7 @@ zio_nop_write(zio_t *zio)
ASSERT3U(BP_GET_PSIZE(bp), ==, BP_GET_PSIZE(bp_orig));
ASSERT3U(BP_GET_LSIZE(bp), ==, BP_GET_LSIZE(bp_orig));
ASSERT(zp->zp_compress != ZIO_COMPRESS_OFF);
- ASSERT(bcmp(&bp->blk_prop, &bp_orig->blk_prop,
+ ASSERT(memcmp(&bp->blk_prop, &bp_orig->blk_prop,
sizeof (uint64_t)) == 0);
/*
@@ -4561,7 +4561,7 @@ zio_done(zio_t *zio)
if (zio->io_bp != NULL && !BP_IS_EMBEDDED(zio->io_bp)) {
ASSERT(zio->io_bp->blk_pad[0] == 0);
ASSERT(zio->io_bp->blk_pad[1] == 0);
- ASSERT(bcmp(zio->io_bp, &zio->io_bp_copy,
+ ASSERT(memcmp(zio->io_bp, &zio->io_bp_copy,
sizeof (blkptr_t)) == 0 ||
(zio->io_bp == zio_unique_parent(zio)->io_bp));
if (zio->io_type == ZIO_TYPE_WRITE && !BP_IS_HOLE(zio->io_bp) &&
diff --git a/module/zfs/zio_checksum.c b/module/zfs/zio_checksum.c
index 4dbab68dd7aa..d89e5765326f 100644
--- a/module/zfs/zio_checksum.c
+++ b/module/zfs/zio_checksum.c
@@ -351,7 +351,7 @@ zio_checksum_compute(zio_t *zio, enum zio_checksum checksum,
zio_eck_t eck;
size_t eck_offset;
- bzero(&saved, sizeof (zio_cksum_t));
+ memset(&saved, 0, sizeof (zio_cksum_t));
if (checksum == ZIO_CHECKSUM_ZILOG2) {
zil_chain_t zilc;