diff options
Diffstat (limited to 'module/icp')
-rw-r--r-- | module/icp/algs/aes/aes_impl.c | 12 | ||||
-rw-r--r-- | module/icp/algs/edonr/edonr.c | 40 | ||||
-rw-r--r-- | module/icp/algs/modes/cbc.c | 30 | ||||
-rw-r--r-- | module/icp/algs/modes/ccm.c | 102 | ||||
-rw-r--r-- | module/icp/algs/modes/ctr.c | 21 | ||||
-rw-r--r-- | module/icp/algs/modes/ecb.c | 14 | ||||
-rw-r--r-- | module/icp/algs/modes/gcm.c | 86 | ||||
-rw-r--r-- | module/icp/algs/modes/modes.c | 2 | ||||
-rw-r--r-- | module/icp/algs/sha2/sha2.c | 18 | ||||
-rw-r--r-- | module/icp/algs/skein/skein.c | 136 | ||||
-rw-r--r-- | module/icp/algs/skein/skein_port.h | 4 | ||||
-rw-r--r-- | module/icp/api/kcf_ctxops.c | 2 | ||||
-rw-r--r-- | module/icp/core/kcf_mech_tabs.c | 3 | ||||
-rw-r--r-- | module/icp/core/kcf_prov_lib.c | 6 | ||||
-rw-r--r-- | module/icp/io/aes.c | 23 | ||||
-rw-r--r-- | module/icp/io/sha2_mod.c | 50 | ||||
-rw-r--r-- | module/icp/io/skein_mod.c | 26 |
17 files changed, 285 insertions, 290 deletions
diff --git a/module/icp/algs/aes/aes_impl.c b/module/icp/algs/aes/aes_impl.c index c238bee2170b..f518a54a6185 100644 --- a/module/icp/algs/aes/aes_impl.c +++ b/module/icp/algs/aes/aes_impl.c @@ -47,7 +47,7 @@ aes_init_keysched(const uint8_t *cipherKey, uint_t keyBits, void *keysched) union { uint64_t ka64[4]; uint32_t ka32[8]; - } keyarr; + } keyarr; switch (keyBits) { case 128: @@ -81,7 +81,7 @@ aes_init_keysched(const uint8_t *cipherKey, uint_t keyBits, void *keysched) keyarr.ka64[i] = *((uint64_t *)&cipherKey[j]); } } else { - bcopy(cipherKey, keyarr.ka32, keysize); + memcpy(keyarr.ka32, cipherKey, keysize); } } else { /* byte swap */ @@ -132,7 +132,7 @@ aes_encrypt_block(const void *ks, const uint8_t *pt, uint8_t *ct) buffer[2] = htonl(*(uint32_t *)(void *)&pt[8]); buffer[3] = htonl(*(uint32_t *)(void *)&pt[12]); } else - bcopy(pt, &buffer, AES_BLOCK_LEN); + memcpy(&buffer, pt, AES_BLOCK_LEN); ops->encrypt(&ksch->encr_ks.ks32[0], ksch->nr, buffer, buffer); @@ -143,7 +143,7 @@ aes_encrypt_block(const void *ks, const uint8_t *pt, uint8_t *ct) *(uint32_t *)(void *)&ct[8] = htonl(buffer[2]); *(uint32_t *)(void *)&ct[12] = htonl(buffer[3]); } else - bcopy(&buffer, ct, AES_BLOCK_LEN); + memcpy(ct, &buffer, AES_BLOCK_LEN); } return (CRYPTO_SUCCESS); } @@ -179,7 +179,7 @@ aes_decrypt_block(const void *ks, const uint8_t *ct, uint8_t *pt) buffer[2] = htonl(*(uint32_t *)(void *)&ct[8]); buffer[3] = htonl(*(uint32_t *)(void *)&ct[12]); } else - bcopy(ct, &buffer, AES_BLOCK_LEN); + memcpy(&buffer, ct, AES_BLOCK_LEN); ops->decrypt(&ksch->decr_ks.ks32[0], ksch->nr, buffer, buffer); @@ -190,7 +190,7 @@ aes_decrypt_block(const void *ks, const uint8_t *ct, uint8_t *pt) *(uint32_t *)(void *)&pt[8] = htonl(buffer[2]); *(uint32_t *)(void *)&pt[12] = htonl(buffer[3]); } else - bcopy(&buffer, pt, AES_BLOCK_LEN); + memcpy(pt, &buffer, AES_BLOCK_LEN); } return (CRYPTO_SUCCESS); } diff --git a/module/icp/algs/edonr/edonr.c b/module/icp/algs/edonr/edonr.c index 20418eaa73cf..dcf63fc18b20 100644 --- a/module/icp/algs/edonr/edonr.c +++ b/module/icp/algs/edonr/edonr.c @@ -470,32 +470,32 @@ EdonRInit(EdonRState *state, size_t hashbitlen) state->hashbitlen = 224; state->bits_processed = 0; state->unprocessed_bits = 0; - bcopy(i224p2, hashState224(state)->DoublePipe, - 16 * sizeof (uint32_t)); + memcpy(hashState224(state)->DoublePipe, i224p2, + sizeof (i224p2)); break; case 256: state->hashbitlen = 256; state->bits_processed = 0; state->unprocessed_bits = 0; - bcopy(i256p2, hashState256(state)->DoublePipe, - 16 * sizeof (uint32_t)); + memcpy(hashState256(state)->DoublePipe, i256p2, + sizeof (i256p2)); break; case 384: state->hashbitlen = 384; state->bits_processed = 0; state->unprocessed_bits = 0; - bcopy(i384p2, hashState384(state)->DoublePipe, - 16 * sizeof (uint64_t)); + memcpy(hashState384(state)->DoublePipe, i384p2, + sizeof (i384p2)); break; case 512: state->hashbitlen = 512; state->bits_processed = 0; state->unprocessed_bits = 0; - bcopy(i512p2, hashState224(state)->DoublePipe, - 16 * sizeof (uint64_t)); + memcpy(hashState224(state)->DoublePipe, i512p2, + sizeof (i512p2)); break; } } @@ -520,8 +520,9 @@ EdonRUpdate(EdonRState *state, const uint8_t *data, size_t databitlen) ASSERT(state->unprocessed_bits + databitlen <= EdonR256_BLOCK_SIZE * 8); - bcopy(data, hashState256(state)->LastPart - + (state->unprocessed_bits >> 3), LastBytes); + memcpy(hashState256(state)->LastPart + + (state->unprocessed_bits >> 3), + data, LastBytes); state->unprocessed_bits += (int)databitlen; databitlen = state->unprocessed_bits; /* LINTED E_BAD_PTR_CAST_ALIGN */ @@ -542,7 +543,8 @@ EdonRUpdate(EdonRState *state, const uint8_t *data, size_t databitlen) 1) & 0x01ff; data32 += bits_processed >> 5; /* byte size update */ - bcopy(data32, hashState256(state)->LastPart, LastBytes); + memmove(hashState256(state)->LastPart, + data32, LastBytes); } break; @@ -555,8 +557,9 @@ EdonRUpdate(EdonRState *state, const uint8_t *data, size_t databitlen) ASSERT(state->unprocessed_bits + databitlen <= EdonR512_BLOCK_SIZE * 8); - bcopy(data, hashState512(state)->LastPart - + (state->unprocessed_bits >> 3), LastBytes); + memcpy(hashState512(state)->LastPart + + (state->unprocessed_bits >> 3), + data, LastBytes); state->unprocessed_bits += (int)databitlen; databitlen = state->unprocessed_bits; /* LINTED E_BAD_PTR_CAST_ALIGN */ @@ -577,7 +580,8 @@ EdonRUpdate(EdonRState *state, const uint8_t *data, size_t databitlen) 1) & 0x03ff; data64 += bits_processed >> 6; /* byte size update */ - bcopy(data64, hashState512(state)->LastPart, LastBytes); + memmove(hashState512(state)->LastPart, + data64, LastBytes); } break; } @@ -682,7 +686,7 @@ EdonRFinal(EdonRState *state, uint8_t *hashval) for (j = 0; j < EdonR224_DIGEST_SIZE >> 2; j++) st_swap32(s32[j], d32 + j); #else - bcopy(hashState256(state)->DoublePipe + 9, hashval, + memcpy(hashval, hashState256(state)->DoublePipe + 9, EdonR224_DIGEST_SIZE); #endif break; @@ -696,7 +700,7 @@ EdonRFinal(EdonRState *state, uint8_t *hashval) for (j = 0; j < EdonR256_DIGEST_SIZE >> 2; j++) st_swap32(s32[j], d32 + j); #else - bcopy(hashState256(state)->DoublePipe + 8, hashval, + memcpy(hashval, hashState256(state)->DoublePipe + 8, EdonR256_DIGEST_SIZE); #endif break; @@ -710,7 +714,7 @@ EdonRFinal(EdonRState *state, uint8_t *hashval) for (j = 0; j < EdonR384_DIGEST_SIZE >> 3; j++) st_swap64(s64[j], d64 + j); #else - bcopy(hashState384(state)->DoublePipe + 10, hashval, + memcpy(hashval, hashState384(state)->DoublePipe + 10, EdonR384_DIGEST_SIZE); #endif break; @@ -724,7 +728,7 @@ EdonRFinal(EdonRState *state, uint8_t *hashval) for (j = 0; j < EdonR512_DIGEST_SIZE >> 3; j++) st_swap64(s64[j], d64 + j); #else - bcopy(hashState512(state)->DoublePipe + 8, hashval, + memcpy(hashval, hashState512(state)->DoublePipe + 8, EdonR512_DIGEST_SIZE); #endif break; diff --git a/module/icp/algs/modes/cbc.c b/module/icp/algs/modes/cbc.c index 73605f04d858..da3ff4e3595b 100644 --- a/module/icp/algs/modes/cbc.c +++ b/module/icp/algs/modes/cbc.c @@ -51,8 +51,8 @@ cbc_encrypt_contiguous_blocks(cbc_ctx_t *ctx, char *data, size_t length, if (length + ctx->cbc_remainder_len < block_size) { /* accumulate bytes here and return */ - bcopy(datap, - (uint8_t *)ctx->cbc_remainder + ctx->cbc_remainder_len, + memcpy((uint8_t *)ctx->cbc_remainder + ctx->cbc_remainder_len, + datap, length); ctx->cbc_remainder_len += length; ctx->cbc_copy_to = datap; @@ -70,8 +70,8 @@ cbc_encrypt_contiguous_blocks(cbc_ctx_t *ctx, char *data, size_t length, if (need > remainder) return (CRYPTO_DATA_LEN_RANGE); - bcopy(datap, &((uint8_t *)ctx->cbc_remainder) - [ctx->cbc_remainder_len], need); + memcpy(&((uint8_t *)ctx->cbc_remainder) + [ctx->cbc_remainder_len], datap, need); blockp = (uint8_t *)ctx->cbc_remainder; } else { @@ -91,10 +91,10 @@ cbc_encrypt_contiguous_blocks(cbc_ctx_t *ctx, char *data, size_t length, if (out_data_1_len == block_size) { copy_block(lastp, out_data_1); } else { - bcopy(lastp, out_data_1, out_data_1_len); + memcpy(out_data_1, lastp, out_data_1_len); if (out_data_2 != NULL) { - bcopy(lastp + out_data_1_len, - out_data_2, + memcpy(out_data_2, + lastp + out_data_1_len, block_size - out_data_1_len); } } @@ -113,7 +113,7 @@ cbc_encrypt_contiguous_blocks(cbc_ctx_t *ctx, char *data, size_t length, /* Incomplete last block. */ if (remainder > 0 && remainder < block_size) { - bcopy(datap, ctx->cbc_remainder, remainder); + memcpy(ctx->cbc_remainder, datap, remainder); ctx->cbc_remainder_len = remainder; ctx->cbc_copy_to = datap; goto out; @@ -157,8 +157,8 @@ cbc_decrypt_contiguous_blocks(cbc_ctx_t *ctx, char *data, size_t length, if (length + ctx->cbc_remainder_len < block_size) { /* accumulate bytes here and return */ - bcopy(datap, - (uint8_t *)ctx->cbc_remainder + ctx->cbc_remainder_len, + memcpy((uint8_t *)ctx->cbc_remainder + ctx->cbc_remainder_len, + datap, length); ctx->cbc_remainder_len += length; ctx->cbc_copy_to = datap; @@ -176,8 +176,8 @@ cbc_decrypt_contiguous_blocks(cbc_ctx_t *ctx, char *data, size_t length, if (need > remainder) return (CRYPTO_ENCRYPTED_DATA_LEN_RANGE); - bcopy(datap, &((uint8_t *)ctx->cbc_remainder) - [ctx->cbc_remainder_len], need); + memcpy(&((uint8_t *)ctx->cbc_remainder) + [ctx->cbc_remainder_len], datap, need); blockp = (uint8_t *)ctx->cbc_remainder; } else { @@ -203,9 +203,9 @@ cbc_decrypt_contiguous_blocks(cbc_ctx_t *ctx, char *data, size_t length, crypto_get_ptrs(out, &iov_or_mp, &offset, &out_data_1, &out_data_1_len, &out_data_2, block_size); - bcopy(blockp, out_data_1, out_data_1_len); + memcpy(out_data_1, blockp, out_data_1_len); if (out_data_2 != NULL) { - bcopy(blockp + out_data_1_len, out_data_2, + memcpy(out_data_2, blockp + out_data_1_len, block_size - out_data_1_len); } @@ -224,7 +224,7 @@ cbc_decrypt_contiguous_blocks(cbc_ctx_t *ctx, char *data, size_t length, /* Incomplete last block. */ if (remainder > 0 && remainder < block_size) { - bcopy(datap, ctx->cbc_remainder, remainder); + memcpy(ctx->cbc_remainder, datap, remainder); ctx->cbc_remainder_len = remainder; ctx->cbc_lastp = lastp; ctx->cbc_copy_to = datap; diff --git a/module/icp/algs/modes/ccm.c b/module/icp/algs/modes/ccm.c index a41cbc395fd6..9fde2684a7c4 100644 --- a/module/icp/algs/modes/ccm.c +++ b/module/icp/algs/modes/ccm.c @@ -59,8 +59,8 @@ ccm_mode_encrypt_contiguous_blocks(ccm_ctx_t *ctx, char *data, size_t length, if (length + ctx->ccm_remainder_len < block_size) { /* accumulate bytes here and return */ - bcopy(datap, - (uint8_t *)ctx->ccm_remainder + ctx->ccm_remainder_len, + memcpy((uint8_t *)ctx->ccm_remainder + ctx->ccm_remainder_len, + datap, length); ctx->ccm_remainder_len += length; ctx->ccm_copy_to = datap; @@ -80,8 +80,8 @@ ccm_mode_encrypt_contiguous_blocks(ccm_ctx_t *ctx, char *data, size_t length, if (need > remainder) return (CRYPTO_DATA_LEN_RANGE); - bcopy(datap, &((uint8_t *)ctx->ccm_remainder) - [ctx->ccm_remainder_len], need); + memcpy(&((uint8_t *)ctx->ccm_remainder) + [ctx->ccm_remainder_len], datap, need); blockp = (uint8_t *)ctx->ccm_remainder; } else { @@ -132,10 +132,10 @@ ccm_mode_encrypt_contiguous_blocks(ccm_ctx_t *ctx, char *data, size_t length, if (out_data_1_len == block_size) { copy_block(lastp, out_data_1); } else { - bcopy(lastp, out_data_1, out_data_1_len); + memcpy(out_data_1, lastp, out_data_1_len); if (out_data_2 != NULL) { - bcopy(lastp + out_data_1_len, - out_data_2, + memcpy(out_data_2, + lastp + out_data_1_len, block_size - out_data_1_len); } } @@ -154,7 +154,7 @@ ccm_mode_encrypt_contiguous_blocks(ccm_ctx_t *ctx, char *data, size_t length, /* Incomplete last block. */ if (remainder > 0 && remainder < block_size) { - bcopy(datap, ctx->ccm_remainder, remainder); + memcpy(ctx->ccm_remainder, datap, remainder); ctx->ccm_remainder_len = remainder; ctx->ccm_copy_to = datap; goto out; @@ -224,10 +224,10 @@ ccm_encrypt_final(ccm_ctx_t *ctx, crypto_data_t *out, size_t block_size, /* ccm_mac_input_buf is not used for encryption */ macp = (uint8_t *)ctx->ccm_mac_input_buf; - bzero(macp, block_size); + memset(macp, 0, block_size); /* copy remainder to temporary buffer */ - bcopy(ctx->ccm_remainder, macp, ctx->ccm_remainder_len); + memcpy(macp, ctx->ccm_remainder, ctx->ccm_remainder_len); /* calculate the CBC MAC */ xor_block(macp, mac_buf); @@ -254,33 +254,32 @@ ccm_encrypt_final(ccm_ctx_t *ctx, crypto_data_t *out, size_t block_size, ctx->ccm_remainder_len + ctx->ccm_mac_len); if (ctx->ccm_remainder_len > 0) { - /* copy temporary block to where it belongs */ if (out_data_2 == NULL) { /* everything will fit in out_data_1 */ - bcopy(macp, out_data_1, ctx->ccm_remainder_len); - bcopy(ccm_mac_p, out_data_1 + ctx->ccm_remainder_len, + memcpy(out_data_1, macp, ctx->ccm_remainder_len); + memcpy(out_data_1 + ctx->ccm_remainder_len, ccm_mac_p, ctx->ccm_mac_len); } else { - if (out_data_1_len < ctx->ccm_remainder_len) { - size_t data_2_len_used; - bcopy(macp, out_data_1, out_data_1_len); + memcpy(out_data_1, macp, out_data_1_len); data_2_len_used = ctx->ccm_remainder_len - out_data_1_len; - bcopy((uint8_t *)macp + out_data_1_len, - out_data_2, data_2_len_used); - bcopy(ccm_mac_p, out_data_2 + data_2_len_used, + memcpy(out_data_2, + (uint8_t *)macp + out_data_1_len, + data_2_len_used); + memcpy(out_data_2 + data_2_len_used, + ccm_mac_p, ctx->ccm_mac_len); } else { - bcopy(macp, out_data_1, out_data_1_len); + memcpy(out_data_1, macp, out_data_1_len); if (out_data_1_len == ctx->ccm_remainder_len) { /* mac will be in out_data_2 */ - bcopy(ccm_mac_p, out_data_2, + memcpy(out_data_2, ccm_mac_p, ctx->ccm_mac_len); } else { size_t len_not_used = out_data_1_len - @@ -290,11 +289,11 @@ ccm_encrypt_final(ccm_ctx_t *ctx, crypto_data_t *out, size_t block_size, * out_data_1, part of the mac will be * in out_data_2 */ - bcopy(ccm_mac_p, - out_data_1 + ctx->ccm_remainder_len, - len_not_used); - bcopy(ccm_mac_p + len_not_used, - out_data_2, + memcpy(out_data_1 + + ctx->ccm_remainder_len, + ccm_mac_p, len_not_used); + memcpy(out_data_2, + ccm_mac_p + len_not_used, ctx->ccm_mac_len - len_not_used); } @@ -302,9 +301,9 @@ ccm_encrypt_final(ccm_ctx_t *ctx, crypto_data_t *out, size_t block_size, } } else { /* copy block to where it belongs */ - bcopy(ccm_mac_p, out_data_1, out_data_1_len); + memcpy(out_data_1, ccm_mac_p, out_data_1_len); if (out_data_2 != NULL) { - bcopy(ccm_mac_p + out_data_1_len, out_data_2, + memcpy(out_data_2, ccm_mac_p + out_data_1_len, block_size - out_data_1_len); } } @@ -372,7 +371,7 @@ ccm_mode_decrypt_contiguous_blocks(ccm_ctx_t *ctx, char *data, size_t length, } tmp = (uint8_t *)ctx->ccm_mac_input_buf; - bcopy(datap, tmp + pm_len, length); + memcpy(tmp + pm_len, datap, length); ctx->ccm_processed_mac_len += length; return (CRYPTO_SUCCESS); @@ -405,15 +404,15 @@ ccm_mode_decrypt_contiguous_blocks(ccm_ctx_t *ctx, char *data, size_t length, mac_len = length - pt_part; ctx->ccm_processed_mac_len = mac_len; - bcopy(data + pt_part, ctx->ccm_mac_input_buf, mac_len); + memcpy(ctx->ccm_mac_input_buf, data + pt_part, mac_len); if (pt_part + ctx->ccm_remainder_len < block_size) { /* * since this is last of the ciphertext, will * just decrypt with it here */ - bcopy(datap, &((uint8_t *)ctx->ccm_remainder) - [ctx->ccm_remainder_len], pt_part); + memcpy(&((uint8_t *)ctx->ccm_remainder) + [ctx->ccm_remainder_len], datap, pt_part); ctx->ccm_remainder_len += pt_part; ccm_decrypt_incomplete_block(ctx, encrypt_block); ctx->ccm_processed_data_len += ctx->ccm_remainder_len; @@ -424,9 +423,9 @@ ccm_mode_decrypt_contiguous_blocks(ccm_ctx_t *ctx, char *data, size_t length, length = pt_part; } } else if (length + ctx->ccm_remainder_len < block_size) { - /* accumulate bytes here and return */ - bcopy(datap, - (uint8_t *)ctx->ccm_remainder + ctx->ccm_remainder_len, + /* accumulate bytes here and return */ + memcpy((uint8_t *)ctx->ccm_remainder + ctx->ccm_remainder_len, + datap, length); ctx->ccm_remainder_len += length; ctx->ccm_copy_to = datap; @@ -441,8 +440,8 @@ ccm_mode_decrypt_contiguous_blocks(ccm_ctx_t *ctx, char *data, size_t length, if (need > remainder) return (CRYPTO_ENCRYPTED_DATA_LEN_RANGE); - bcopy(datap, &((uint8_t *)ctx->ccm_remainder) - [ctx->ccm_remainder_len], need); + memcpy(&((uint8_t *)ctx->ccm_remainder) + [ctx->ccm_remainder_len], datap, need); blockp = (uint8_t *)ctx->ccm_remainder; } else { @@ -492,7 +491,7 @@ ccm_mode_decrypt_contiguous_blocks(ccm_ctx_t *ctx, char *data, size_t length, /* Incomplete last block */ if (remainder > 0 && remainder < block_size) { - bcopy(datap, ctx->ccm_remainder, remainder); + memcpy(ctx->ccm_remainder, datap, remainder); ctx->ccm_remainder_len = remainder; ctx->ccm_copy_to = datap; if (ctx->ccm_processed_mac_len > 0) { @@ -539,10 +538,9 @@ ccm_decrypt_final(ccm_ctx_t *ctx, crypto_data_t *out, size_t block_size, macp = (uint8_t *)ctx->ccm_tmp; while (mac_remain > 0) { - if (mac_remain < block_size) { - bzero(macp, block_size); - bcopy(pt, macp, mac_remain); + memset(macp, 0, block_size); + memcpy(macp, pt, mac_remain); mac_remain = 0; } else { copy_block(pt, macp); @@ -560,7 +558,7 @@ ccm_decrypt_final(ccm_ctx_t *ctx, crypto_data_t *out, size_t block_size, calculate_ccm_mac((ccm_ctx_t *)ctx, ccm_mac_p, encrypt_block); /* compare the input CCM MAC value with what we calculated */ - if (bcmp(ctx->ccm_mac_input_buf, ccm_mac_p, ctx->ccm_mac_len)) { + if (memcmp(ctx->ccm_mac_input_buf, ccm_mac_p, ctx->ccm_mac_len)) { /* They don't match */ return (CRYPTO_INVALID_MAC); } else { @@ -654,10 +652,10 @@ ccm_format_initial_blocks(uchar_t *nonce, ulong_t nonceSize, b0[0] = (have_adata << 6) | (((t - 2) / 2) << 3) | (q - 1); /* copy the nonce value into b0 */ - bcopy(nonce, &(b0[1]), nonceSize); + memcpy(&(b0[1]), nonce, nonceSize); /* store the length of the payload into b0 */ - bzero(&(b0[1+nonceSize]), q); + memset(&(b0[1+nonceSize]), 0, q); payloadSize = aes_ctx->ccm_data_len; limit = 8 < q ? 8 : q; @@ -673,9 +671,9 @@ ccm_format_initial_blocks(uchar_t *nonce, ulong_t nonceSize, cb[0] = 0x07 & (q-1); /* first byte */ /* copy the nonce value into the counter block */ - bcopy(nonce, &(cb[1]), nonceSize); + memcpy(&(cb[1]), nonce, nonceSize); - bzero(&(cb[1+nonceSize]), q); + memset(&(cb[1+nonceSize]), 0, q); /* Create the mask for the counter field based on the size of nonce */ q <<= 3; @@ -782,7 +780,7 @@ ccm_init(ccm_ctx_t *ctx, unsigned char *nonce, size_t nonce_len, /* The IV for CBC MAC for AES CCM mode is always zero */ ivp = (uint8_t *)ctx->ccm_tmp; - bzero(ivp, block_size); + memset(ivp, 0, block_size); xor_block(ivp, mac_buf); @@ -800,14 +798,14 @@ ccm_init(ccm_ctx_t *ctx, unsigned char *nonce, size_t nonce_len, /* 1st block: it contains encoded associated data, and some data */ authp = (uint8_t *)ctx->ccm_tmp; - bzero(authp, block_size); - bcopy(encoded_a, authp, encoded_a_len); + memset(authp, 0, block_size); + memcpy(authp, encoded_a, encoded_a_len); processed = block_size - encoded_a_len; if (processed > auth_data_len) { /* in case auth_data is very small */ processed = auth_data_len; } - bcopy(auth_data, authp+encoded_a_len, processed); + memcpy(authp+encoded_a_len, auth_data, processed); /* xor with previous buffer */ xor_block(authp, mac_buf); encrypt_block(ctx->ccm_keysched, mac_buf, mac_buf); @@ -823,8 +821,8 @@ ccm_init(ccm_ctx_t *ctx, unsigned char *nonce, size_t nonce_len, * There's not a block full of data, pad rest of * buffer with zero */ - bzero(authp, block_size); - bcopy(&(auth_data[processed]), authp, remainder); + memset(authp, 0, block_size); + memcpy(authp, &(auth_data[processed]), remainder); datap = (uint8_t *)authp; remainder = 0; } else { diff --git a/module/icp/algs/modes/ctr.c b/module/icp/algs/modes/ctr.c index 82295cda877e..c31c6251624b 100644 --- a/module/icp/algs/modes/ctr.c +++ b/module/icp/algs/modes/ctr.c @@ -52,8 +52,8 @@ ctr_mode_contiguous_blocks(ctr_ctx_t *ctx, char *data, size_t length, if (length + ctx->ctr_remainder_len < block_size) { /* accumulate bytes here and return */ - bcopy(datap, - (uint8_t *)ctx->ctr_remainder + ctx->ctr_remainder_len, + memcpy((uint8_t *)ctx->ctr_remainder + ctx->ctr_remainder_len, + datap, length); ctx->ctr_remainder_len += length; ctx->ctr_copy_to = datap; @@ -71,8 +71,8 @@ ctr_mode_contiguous_blocks(ctr_ctx_t *ctx, char *data, size_t length, if (need > remainder) return (CRYPTO_DATA_LEN_RANGE); - bcopy(datap, &((uint8_t *)ctx->ctr_remainder) - [ctx->ctr_remainder_len], need); + memcpy(&((uint8_t *)ctx->ctr_remainder) + [ctx->ctr_remainder_len], datap, need); blockp = (uint8_t *)ctx->ctr_remainder; } else { @@ -114,9 +114,9 @@ ctr_mode_contiguous_blocks(ctr_ctx_t *ctx, char *data, size_t length, &out_data_1_len, &out_data_2, block_size); /* copy block to where it belongs */ - bcopy(lastp, out_data_1, out_data_1_len); + memcpy(out_data_1, lastp, out_data_1_len); if (out_data_2 != NULL) { - bcopy(lastp + out_data_1_len, out_data_2, + memcpy(out_data_2, lastp + out_data_1_len, block_size - out_data_1_len); } /* update offset */ @@ -134,7 +134,7 @@ ctr_mode_contiguous_blocks(ctr_ctx_t *ctx, char *data, size_t length, /* Incomplete last block. */ if (remainder > 0 && remainder < block_size) { - bcopy(datap, ctx->ctr_remainder, remainder); + memcpy(ctx->ctr_remainder, datap, remainder); ctx->ctr_remainder_len = remainder; ctx->ctr_copy_to = datap; goto out; @@ -176,10 +176,11 @@ ctr_mode_final(ctr_ctx_t *ctx, crypto_data_t *out, crypto_get_ptrs(out, &iov_or_mp, &offset, &out_data_1, &out_data_1_len, &out_data_2, ctx->ctr_remainder_len); - bcopy(p, out_data_1, out_data_1_len); + memcpy(out_data_1, p, out_data_1_len); if (out_data_2 != NULL) { - bcopy((uint8_t *)p + out_data_1_len, - out_data_2, ctx->ctr_remainder_len - out_data_1_len); + memcpy(out_data_2, + (uint8_t *)p + out_data_1_len, + ctx->ctr_remainder_len - out_data_1_len); } out->cd_offset += ctx->ctr_remainder_len; ctx->ctr_remainder_len = 0; diff --git a/module/icp/algs/modes/ecb.c b/module/icp/algs/modes/ecb.c index ffbdb9d57d0a..e0b8ab15cdcf 100644 --- a/module/icp/algs/modes/ecb.c +++ b/module/icp/algs/modes/ecb.c @@ -49,8 +49,8 @@ ecb_cipher_contiguous_blocks(ecb_ctx_t *ctx, char *data, size_t length, if (length + ctx->ecb_remainder_len < block_size) { /* accumulate bytes here and return */ - bcopy(datap, - (uint8_t *)ctx->ecb_remainder + ctx->ecb_remainder_len, + memcpy((uint8_t *)ctx->ecb_remainder + ctx->ecb_remainder_len, + datap, length); ctx->ecb_remainder_len += length; ctx->ecb_copy_to = datap; @@ -68,8 +68,8 @@ ecb_cipher_contiguous_blocks(ecb_ctx_t *ctx, char *data, size_t length, if (need > remainder) return (CRYPTO_DATA_LEN_RANGE); - bcopy(datap, &((uint8_t *)ctx->ecb_remainder) - [ctx->ecb_remainder_len], need); + memcpy(&((uint8_t *)ctx->ecb_remainder) + [ctx->ecb_remainder_len], datap, need); blockp = (uint8_t *)ctx->ecb_remainder; } else { @@ -81,9 +81,9 @@ ecb_cipher_contiguous_blocks(ecb_ctx_t *ctx, char *data, size_t length, &out_data_1_len, &out_data_2, block_size); /* copy block to where it belongs */ - bcopy(lastp, out_data_1, out_data_1_len); + memcpy(out_data_1, lastp, out_data_1_len); if (out_data_2 != NULL) { - bcopy(lastp + out_data_1_len, out_data_2, + memcpy(out_data_2, lastp + out_data_1_len, block_size - out_data_1_len); } /* update offset */ @@ -101,7 +101,7 @@ ecb_cipher_contiguous_blocks(ecb_ctx_t *ctx, char *data, size_t length, /* Incomplete last block. */ if (remainder > 0 && remainder < block_size) { - bcopy(datap, ctx->ecb_remainder, remainder); + memcpy(ctx->ecb_remainder, datap, remainder); ctx->ecb_remainder_len = remainder; ctx->ecb_copy_to = datap; goto out; diff --git a/module/icp/algs/modes/gcm.c b/module/icp/algs/modes/gcm.c index 7d34c2b040f6..e666b45b5f44 100644 --- a/module/icp/algs/modes/gcm.c +++ b/module/icp/algs/modes/gcm.c @@ -108,8 +108,8 @@ gcm_mode_encrypt_contiguous_blocks(gcm_ctx_t *ctx, char *data, size_t length, if (length + ctx->gcm_remainder_len < block_size) { /* accumulate bytes here and return */ - bcopy(datap, - (uint8_t *)ctx->gcm_remainder + ctx->gcm_remainder_len, + memcpy((uint8_t *)ctx->gcm_remainder + ctx->gcm_remainder_len, + datap, length); ctx->gcm_remainder_len += length; if (ctx->gcm_copy_to == NULL) { @@ -130,8 +130,8 @@ gcm_mode_encrypt_contiguous_blocks(gcm_ctx_t *ctx, char *data, size_t length, if (need > remainder) return (CRYPTO_DATA_LEN_RANGE); - bcopy(datap, &((uint8_t *)ctx->gcm_remainder) - [ctx->gcm_remainder_len], need); + memcpy(&((uint8_t *)ctx->gcm_remainder) + [ctx->gcm_remainder_len], datap, need); blockp = (uint8_t *)ctx->gcm_remainder; } else { @@ -162,10 +162,10 @@ gcm_mode_encrypt_contiguous_blocks(gcm_ctx_t *ctx, char *data, size_t length, if (out_data_1_len == block_size) { copy_block(lastp, out_data_1); } else { - bcopy(lastp, out_data_1, out_data_1_len); + memcpy(out_data_1, lastp, out_data_1_len); if (out_data_2 != NULL) { - bcopy(lastp + out_data_1_len, - out_data_2, + memcpy(out_data_2, + lastp + out_data_1_len, block_size - out_data_1_len); } } @@ -187,7 +187,7 @@ gcm_mode_encrypt_contiguous_blocks(gcm_ctx_t *ctx, char *data, size_t length, /* Incomplete last block. */ if (remainder > 0 && remainder < block_size) { - bcopy(datap, ctx->gcm_remainder, remainder); + memcpy(ctx->gcm_remainder, datap, remainder); ctx->gcm_remainder_len = remainder; ctx->gcm_copy_to = datap; goto out; @@ -245,7 +245,7 @@ gcm_encrypt_final(gcm_ctx_t *ctx, crypto_data_t *out, size_t block_size, (uint8_t *)ctx->gcm_tmp); macp = (uint8_t *)ctx->gcm_remainder; - bzero(macp + ctx->gcm_remainder_len, + memset(macp + ctx->gcm_remainder_len, 0, block_size - ctx->gcm_remainder_len); /* XOR with counter block */ @@ -309,8 +309,8 @@ gcm_decrypt_incomplete_block(gcm_ctx_t *ctx, size_t block_size, size_t index, counterp = (uint8_t *)ctx->gcm_tmp; /* authentication tag */ - bzero((uint8_t *)ctx->gcm_tmp, block_size); - bcopy(datap, (uint8_t *)ctx->gcm_tmp, ctx->gcm_remainder_len); + memset((uint8_t *)ctx->gcm_tmp, 0, block_size); + memcpy((uint8_t *)ctx->gcm_tmp, datap, ctx->gcm_remainder_len); /* add ciphertext to the hash */ GHASH(ctx, ctx->gcm_tmp, ctx->gcm_ghash, gcm_impl_get_ops()); @@ -350,7 +350,7 @@ gcm_mode_decrypt_contiguous_blocks(gcm_ctx_t *ctx, char *data, size_t length, } if (ctx->gcm_pt_buf != NULL) { - bcopy(ctx->gcm_pt_buf, new, ctx->gcm_pt_buf_len); + memcpy(new, ctx->gcm_pt_buf, ctx->gcm_pt_buf_len); vmem_free(ctx->gcm_pt_buf, ctx->gcm_pt_buf_len); } else { ASSERT0(ctx->gcm_pt_buf_len); @@ -358,7 +358,7 @@ gcm_mode_decrypt_contiguous_blocks(gcm_ctx_t *ctx, char *data, size_t length, ctx->gcm_pt_buf = new; ctx->gcm_pt_buf_len = new_len; - bcopy(data, &ctx->gcm_pt_buf[ctx->gcm_processed_data_len], + memcpy(&ctx->gcm_pt_buf[ctx->gcm_processed_data_len], data, length); ctx->gcm_processed_data_len += length; } @@ -397,7 +397,7 @@ gcm_decrypt_final(gcm_ctx_t *ctx, crypto_data_t *out, size_t block_size, while (remainder > 0) { /* Incomplete last block */ if (remainder < block_size) { - bcopy(blockp, ctx->gcm_remainder, remainder); + memcpy(ctx->gcm_remainder, blockp, remainder); ctx->gcm_remainder_len = remainder; /* * not expecting anymore ciphertext, just @@ -438,7 +438,7 @@ out: xor_block((uint8_t *)ctx->gcm_J0, ghash); /* compare the input authentication tag with what we calculated */ - if (bcmp(&ctx->gcm_pt_buf[pt_len], ghash, ctx->gcm_tag_len)) { + if (memcmp(&ctx->gcm_pt_buf[pt_len], ghash, ctx->gcm_tag_len)) { /* They don't match */ return (CRYPTO_INVALID_MAC); } else { @@ -495,7 +495,7 @@ gcm_format_initial_blocks(uchar_t *iv, ulong_t iv_len, ghash = (uint8_t *)ctx->gcm_ghash; cb = (uint8_t *)ctx->gcm_cb; if (iv_len == 12) { - bcopy(iv, cb, 12); + memcpy(cb, iv, 12); cb[12] = 0; cb[13] = 0; cb[14] = 0; @@ -506,8 +506,8 @@ gcm_format_initial_blocks(uchar_t *iv, ulong_t iv_len, /* GHASH the IV */ do { if (remainder < block_size) { - bzero(cb, block_size); - bcopy(&(iv[processed]), cb, remainder); + memset(cb, 0, block_size); + memcpy(cb, &(iv[processed]), remainder); datap = (uint8_t *)cb; remainder = 0; } else { @@ -539,7 +539,7 @@ gcm_init(gcm_ctx_t *ctx, unsigned char *iv, size_t iv_len, size_t remainder, processed; /* encrypt zero block to get subkey H */ - bzero(ctx->gcm_H, sizeof (ctx->gcm_H)); + memset(ctx->gcm_H, 0, sizeof (ctx->gcm_H)); encrypt_block(ctx->gcm_keysched, (uint8_t *)ctx->gcm_H, (uint8_t *)ctx->gcm_H); @@ -549,8 +549,8 @@ gcm_init(gcm_ctx_t *ctx, unsigned char *iv, size_t iv_len, gops = gcm_impl_get_ops(); authp = (uint8_t *)ctx->gcm_tmp; ghash = (uint8_t *)ctx->gcm_ghash; - bzero(authp, block_size); - bzero(ghash, block_size); + memset(authp, 0, block_size); + memset(ghash, 0, block_size); processed = 0; remainder = auth_data_len; @@ -562,9 +562,9 @@ gcm_init(gcm_ctx_t *ctx, unsigned char *iv, size_t iv_len, */ if (auth_data != NULL) { - bzero(authp, block_size); - bcopy(&(auth_data[processed]), - authp, remainder); + memset(authp, 0, block_size); + memcpy(authp, &(auth_data[processed]), + remainder); } else { ASSERT0(remainder); } @@ -1139,10 +1139,10 @@ gcm_simd_get_htab_size(boolean_t simd_mode) static inline void gcm_clear_ctx(gcm_ctx_t *ctx) { - bzero(ctx->gcm_remainder, sizeof (ctx->gcm_remainder)); - bzero(ctx->gcm_H, sizeof (ctx->gcm_H)); - bzero(ctx->gcm_J0, sizeof (ctx->gcm_J0)); - bzero(ctx->gcm_tmp, sizeof (ctx->gcm_tmp)); + memset(ctx->gcm_remainder, 0, sizeof (ctx->gcm_remainder)); + memset(ctx->gcm_H, 0, sizeof (ctx->gcm_H)); + memset(ctx->gcm_J0, 0, sizeof (ctx->gcm_J0)); + memset(ctx->gcm_tmp, 0, sizeof (ctx->gcm_tmp)); } /* Increment the GCM counter block by n. */ @@ -1187,8 +1187,8 @@ gcm_mode_encrypt_contiguous_blocks_avx(gcm_ctx_t *ctx, char *data, need = block_size - ctx->gcm_remainder_len; if (length < need) { /* Accumulate bytes here and return. */ - bcopy(datap, (uint8_t *)ctx->gcm_remainder + - ctx->gcm_remainder_len, length); + memcpy((uint8_t *)ctx->gcm_remainder + + ctx->gcm_remainder_len, datap, length); ctx->gcm_remainder_len += length; if (ctx->gcm_copy_to == NULL) { @@ -1197,8 +1197,8 @@ gcm_mode_encrypt_contiguous_blocks_avx(gcm_ctx_t *ctx, char *data, return (CRYPTO_SUCCESS); } else { /* Complete incomplete block. */ - bcopy(datap, (uint8_t *)ctx->gcm_remainder + - ctx->gcm_remainder_len, need); + memcpy((uint8_t *)ctx->gcm_remainder + + ctx->gcm_remainder_len, datap, need); ctx->gcm_copy_to = NULL; } @@ -1276,7 +1276,7 @@ gcm_mode_encrypt_contiguous_blocks_avx(gcm_ctx_t *ctx, char *data, /* Less than GCM_AVX_MIN_ENCRYPT_BYTES remain, operate on blocks. */ while (bleft > 0) { if (bleft < block_size) { - bcopy(datap, ctx->gcm_remainder, bleft); + memcpy(ctx->gcm_remainder, datap, bleft); ctx->gcm_remainder_len = bleft; ctx->gcm_copy_to = datap; goto out; @@ -1335,7 +1335,7 @@ gcm_encrypt_final_avx(gcm_ctx_t *ctx, crypto_data_t *out, size_t block_size) const uint32_t *cb = (uint32_t *)ctx->gcm_cb; aes_encrypt_intel(keysched, aes_rounds, cb, (uint32_t *)tmp); - bzero(remainder + rem_len, block_size - rem_len); + memset(remainder + rem_len, 0, block_size - rem_len); for (int i = 0; i < rem_len; i++) { remainder[i] ^= tmp[i]; } @@ -1431,8 +1431,8 @@ gcm_decrypt_final_avx(gcm_ctx_t *ctx, crypto_data_t *out, size_t block_size) if (bleft < block_size) { uint8_t *lastb = (uint8_t *)ctx->gcm_remainder; - bzero(lastb, block_size); - bcopy(datap, lastb, bleft); + memset(lastb, 0, block_size); + memcpy(lastb, datap, bleft); /* The GCM processing. */ GHASH_AVX(ctx, lastb, block_size); aes_encrypt_intel(key->encr_ks.ks32, key->nr, cb, tmp); @@ -1468,7 +1468,7 @@ gcm_decrypt_final_avx(gcm_ctx_t *ctx, crypto_data_t *out, size_t block_size) kfpu_end(); /* Compare the input authentication tag with what we calculated. */ - if (bcmp(&ctx->gcm_pt_buf[pt_len], ghash, ctx->gcm_tag_len)) { + if (memcmp(&ctx->gcm_pt_buf[pt_len], ghash, ctx->gcm_tag_len)) { /* They don't match. */ return (CRYPTO_INVALID_MAC); } @@ -1500,8 +1500,8 @@ gcm_init_avx(gcm_ctx_t *ctx, unsigned char *iv, size_t iv_len, ASSERT(block_size == GCM_BLOCK_LEN); /* Init H (encrypt zero block) and create the initial counter block. */ - bzero(ctx->gcm_ghash, sizeof (ctx->gcm_ghash)); - bzero(H, sizeof (ctx->gcm_H)); + memset(ctx->gcm_ghash, 0, sizeof (ctx->gcm_ghash)); + memset(H, 0, sizeof (ctx->gcm_H)); kfpu_begin(); aes_encrypt_intel(keysched, aes_rounds, (const uint32_t *)H, (uint32_t *)H); @@ -1509,13 +1509,13 @@ gcm_init_avx(gcm_ctx_t *ctx, unsigned char *iv, size_t iv_len, gcm_init_htab_avx(ctx->gcm_Htable, H); if (iv_len == 12) { - bcopy(iv, cb, 12); + memcpy(cb, iv, 12); cb[12] = 0; cb[13] = 0; cb[14] = 0; cb[15] = 1; /* We need the ICB later. */ - bcopy(cb, ctx->gcm_J0, sizeof (ctx->gcm_J0)); + memcpy(ctx->gcm_J0, cb, sizeof (ctx->gcm_J0)); } else { /* * Most consumers use 12 byte IVs, so it's OK to use the @@ -1553,8 +1553,8 @@ gcm_init_avx(gcm_ctx_t *ctx, unsigned char *iv, size_t iv_len, /* Zero pad and hash incomplete last block. */ uint8_t *authp = (uint8_t *)ctx->gcm_tmp; - bzero(authp, block_size); - bcopy(datap, authp, incomp); + memset(authp, 0, block_size); + memcpy(authp, datap, incomp); GHASH_AVX(ctx, authp, block_size); } } diff --git a/module/icp/algs/modes/modes.c b/module/icp/algs/modes/modes.c index 59743c7d6829..d505de40ee2a 100644 --- a/module/icp/algs/modes/modes.c +++ b/module/icp/algs/modes/modes.c @@ -155,7 +155,7 @@ crypto_free_mode_ctx(void *ctx) #ifdef CAN_USE_GCM_ASM if (((gcm_ctx_t *)ctx)->gcm_Htable != NULL) { gcm_ctx_t *gcm_ctx = (gcm_ctx_t *)ctx; - bzero(gcm_ctx->gcm_Htable, gcm_ctx->gcm_htab_len); + memset(gcm_ctx->gcm_Htable, 0, gcm_ctx->gcm_htab_len); kmem_free(gcm_ctx->gcm_Htable, gcm_ctx->gcm_htab_len); } #endif diff --git a/module/icp/algs/sha2/sha2.c b/module/icp/algs/sha2/sha2.c index 6f1e9b7193d4..151432f1a5df 100644 --- a/module/icp/algs/sha2/sha2.c +++ b/module/icp/algs/sha2/sha2.c @@ -190,7 +190,7 @@ SHA256Transform(SHA2_CTX *ctx, const uint8_t *blk) #endif /* __sparc */ if ((uintptr_t)blk & 0x3) { /* not 4-byte aligned? */ - bcopy(blk, ctx->buf_un.buf32, sizeof (ctx->buf_un.buf32)); + memcpy(ctx->buf_un.buf32, blk, sizeof (ctx->buf_un.buf32)); blk = (uint8_t *)ctx->buf_un.buf32; } @@ -406,7 +406,7 @@ SHA512Transform(SHA2_CTX *ctx, const uint8_t *blk) if ((uintptr_t)blk & 0x7) { /* not 8-byte aligned? */ - bcopy(blk, ctx->buf_un.buf64, sizeof (ctx->buf_un.buf64)); + memcpy(ctx->buf_un.buf64, blk, sizeof (ctx->buf_un.buf64)); blk = (uint8_t *)ctx->buf_un.buf64; } @@ -823,14 +823,14 @@ SHA2Update(SHA2_CTX *ctx, const void *inptr, size_t input_len) /* * general optimization: * - * only do initial bcopy() and SHA2Transform() if + * only do initial memcpy() and SHA2Transform() if * buf_index != 0. if buf_index == 0, we're just - * wasting our time doing the bcopy() since there + * wasting our time doing the memcpy() since there * wasn't any data left over from a previous call to * SHA2Update(). */ if (buf_index) { - bcopy(input, &ctx->buf_un.buf8[buf_index], buf_len); + memcpy(&ctx->buf_un.buf8[buf_index], input, buf_len); if (algotype <= SHA256_HMAC_GEN_MECH_INFO_TYPE) SHA256Transform(ctx, ctx->buf_un.buf8); else @@ -873,7 +873,7 @@ SHA2Update(SHA2_CTX *ctx, const void *inptr, size_t input_len) * general optimization: * * if i and input_len are the same, return now instead - * of calling bcopy(), since the bcopy() in this case + * of calling memcpy(), since the memcpy() in this case * will be an expensive noop. */ @@ -884,7 +884,7 @@ SHA2Update(SHA2_CTX *ctx, const void *inptr, size_t input_len) } /* buffer remaining input */ - bcopy(&input[i], &ctx->buf_un.buf8[buf_index], input_len - i); + memcpy(&ctx->buf_un.buf8[buf_index], &input[i], input_len - i); } @@ -936,7 +936,7 @@ SHA2Final(void *digest, SHA2_CTX *ctx) */ Encode64(digest, ctx->state.s64, sizeof (uint64_t) * 3); Encode64(last, &ctx->state.s64[3], sizeof (uint64_t)); - bcopy(last, (uint8_t *)digest + 24, 4); + memcpy((uint8_t *)digest + 24, last, 4); } else if (algotype == SHA512_256_MECH_INFO_TYPE) { Encode64(digest, ctx->state.s64, sizeof (uint64_t) * 4); } else { @@ -946,7 +946,7 @@ SHA2Final(void *digest, SHA2_CTX *ctx) } /* zeroize sensitive information */ - bzero(ctx, sizeof (*ctx)); + memset(ctx, 0, sizeof (*ctx)); } #ifdef _KERNEL diff --git a/module/icp/algs/skein/skein.c b/module/icp/algs/skein/skein.c index 83fe84260307..41ed2dd44e9e 100644 --- a/module/icp/algs/skein/skein.c +++ b/module/icp/algs/skein/skein.c @@ -26,16 +26,16 @@ Skein_256_Init(Skein_256_Ctxt_t *ctx, size_t hashBitLen) switch (hashBitLen) { /* use pre-computed values, where available */ #ifndef SKEIN_NO_PRECOMP case 256: - bcopy(SKEIN_256_IV_256, ctx->X, sizeof (ctx->X)); + memcpy(ctx->X, SKEIN_256_IV_256, sizeof (ctx->X)); break; case 224: - bcopy(SKEIN_256_IV_224, ctx->X, sizeof (ctx->X)); + memcpy(ctx->X, SKEIN_256_IV_224, sizeof (ctx->X)); break; case 160: - bcopy(SKEIN_256_IV_160, ctx->X, sizeof (ctx->X)); + memcpy(ctx->X, SKEIN_256_IV_160, sizeof (ctx->X)); break; case 128: - bcopy(SKEIN_256_IV_128, ctx->X, sizeof (ctx->X)); + memcpy(ctx->X, SKEIN_256_IV_128, sizeof (ctx->X)); break; #endif default: @@ -53,11 +53,11 @@ Skein_256_Init(Skein_256_Ctxt_t *ctx, size_t hashBitLen) cfg.w[1] = Skein_Swap64(hashBitLen); cfg.w[2] = Skein_Swap64(SKEIN_CFG_TREE_INFO_SEQUENTIAL); /* zero pad config block */ - bzero(&cfg.w[3], sizeof (cfg) - 3 * sizeof (cfg.w[0])); + memset(&cfg.w[3], 0, sizeof (cfg) - 3 * sizeof (cfg.w[0])); /* compute the initial chaining values from config block */ /* zero the chaining variables */ - bzero(ctx->X, sizeof (ctx->X)); + memset(ctx->X, 0, sizeof (ctx->X)); Skein_256_Process_Block(ctx, cfg.b, 1, SKEIN_CFG_STR_LEN); break; } @@ -91,7 +91,7 @@ Skein_256_InitExt(Skein_256_Ctxt_t *ctx, size_t hashBitLen, uint64_t treeInfo, /* compute the initial chaining values ctx->X[], based on key */ if (keyBytes == 0) { /* is there a key? */ /* no key: use all zeroes as key for config block */ - bzero(ctx->X, sizeof (ctx->X)); + memset(ctx->X, 0, sizeof (ctx->X)); } else { /* here to pre-process a key */ Skein_assert(sizeof (cfg.b) >= sizeof (ctx->X)); @@ -101,13 +101,13 @@ Skein_256_InitExt(Skein_256_Ctxt_t *ctx, size_t hashBitLen, uint64_t treeInfo, /* set tweaks: T0 = 0; T1 = KEY type */ Skein_Start_New_Type(ctx, KEY); /* zero the initial chaining variables */ - bzero(ctx->X, sizeof (ctx->X)); + memset(ctx->X, 0, sizeof (ctx->X)); /* hash the key */ (void) Skein_256_Update(ctx, key, keyBytes); /* put result into cfg.b[] */ (void) Skein_256_Final_Pad(ctx, cfg.b); /* copy over into ctx->X[] */ - bcopy(cfg.b, ctx->X, sizeof (cfg.b)); + memcpy(ctx->X, cfg.b, sizeof (cfg.b)); #if SKEIN_NEED_SWAP { uint_t i; @@ -124,7 +124,7 @@ Skein_256_InitExt(Skein_256_Ctxt_t *ctx, size_t hashBitLen, uint64_t treeInfo, ctx->h.hashBitLen = hashBitLen; /* output hash bit count */ Skein_Start_New_Type(ctx, CFG_FINAL); - bzero(&cfg.w, sizeof (cfg.w)); /* pre-pad cfg.w[] with zeroes */ + memset(&cfg.w, 0, sizeof (cfg.w)); /* pre-pad cfg.w[] with zeroes */ cfg.w[0] = Skein_Swap64(SKEIN_SCHEMA_VER); cfg.w[1] = Skein_Swap64(hashBitLen); /* hash result length in bits */ /* tree hash config info (or SKEIN_CFG_TREE_INFO_SEQUENTIAL) */ @@ -161,7 +161,7 @@ Skein_256_Update(Skein_256_Ctxt_t *ctx, const uint8_t *msg, size_t msgByteCnt) if (n) { /* check on our logic here */ Skein_assert(n < msgByteCnt); - bcopy(msg, &ctx->b[ctx->h.bCnt], n); + memcpy(&ctx->b[ctx->h.bCnt], msg, n); msgByteCnt -= n; msg += n; ctx->h.bCnt += n; @@ -189,7 +189,7 @@ Skein_256_Update(Skein_256_Ctxt_t *ctx, const uint8_t *msg, size_t msgByteCnt) /* copy any remaining source message data bytes into b[] */ if (msgByteCnt) { Skein_assert(msgByteCnt + ctx->h.bCnt <= SKEIN_256_BLOCK_BYTES); - bcopy(msg, &ctx->b[ctx->h.bCnt], msgByteCnt); + memcpy(&ctx->b[ctx->h.bCnt], msg, msgByteCnt); ctx->h.bCnt += msgByteCnt; } @@ -209,7 +209,7 @@ Skein_256_Final(Skein_256_Ctxt_t *ctx, uint8_t *hashVal) ctx->h.T[1] |= SKEIN_T1_FLAG_FINAL; /* tag as the final block */ /* zero pad b[] if necessary */ if (ctx->h.bCnt < SKEIN_256_BLOCK_BYTES) - bzero(&ctx->b[ctx->h.bCnt], + memset(&ctx->b[ctx->h.bCnt], 0, SKEIN_256_BLOCK_BYTES - ctx->h.bCnt); /* process the final block */ @@ -221,13 +221,12 @@ Skein_256_Final(Skein_256_Ctxt_t *ctx, uint8_t *hashVal) /* run Threefish in "counter mode" to generate output */ /* zero out b[], so it can hold the counter */ - bzero(ctx->b, sizeof (ctx->b)); + memset(ctx->b, 0, sizeof (ctx->b)); /* keep a local copy of counter mode "key" */ - bcopy(ctx->X, X, sizeof (X)); + memcpy(X, ctx->X, sizeof (X)); for (i = 0; i * SKEIN_256_BLOCK_BYTES < byteCnt; i++) { /* build the counter block */ - uint64_t tmp = Skein_Swap64((uint64_t)i); - bcopy(&tmp, ctx->b, sizeof (tmp)); + *(uint64_t *)ctx->b = Skein_Swap64((uint64_t)i); Skein_Start_New_Type(ctx, OUT_FINAL); /* run "counter mode" */ Skein_256_Process_Block(ctx, ctx->b, 1, sizeof (uint64_t)); @@ -240,7 +239,7 @@ Skein_256_Final(Skein_256_Ctxt_t *ctx, uint8_t *hashVal) Skein_Show_Final(256, &ctx->h, n, hashVal + i * SKEIN_256_BLOCK_BYTES); /* restore the counter mode key for next time */ - bcopy(X, ctx->X, sizeof (X)); + memcpy(ctx->X, X, sizeof (X)); } return (SKEIN_SUCCESS); } @@ -262,16 +261,16 @@ Skein_512_Init(Skein_512_Ctxt_t *ctx, size_t hashBitLen) switch (hashBitLen) { /* use pre-computed values, where available */ #ifndef SKEIN_NO_PRECOMP case 512: - bcopy(SKEIN_512_IV_512, ctx->X, sizeof (ctx->X)); + memcpy(ctx->X, SKEIN_512_IV_512, sizeof (ctx->X)); break; case 384: - bcopy(SKEIN_512_IV_384, ctx->X, sizeof (ctx->X)); + memcpy(ctx->X, SKEIN_512_IV_384, sizeof (ctx->X)); break; case 256: - bcopy(SKEIN_512_IV_256, ctx->X, sizeof (ctx->X)); + memcpy(ctx->X, SKEIN_512_IV_256, sizeof (ctx->X)); break; case 224: - bcopy(SKEIN_512_IV_224, ctx->X, sizeof (ctx->X)); + memcpy(ctx->X, SKEIN_512_IV_224, sizeof (ctx->X)); break; #endif default: @@ -289,11 +288,11 @@ Skein_512_Init(Skein_512_Ctxt_t *ctx, size_t hashBitLen) cfg.w[1] = Skein_Swap64(hashBitLen); cfg.w[2] = Skein_Swap64(SKEIN_CFG_TREE_INFO_SEQUENTIAL); /* zero pad config block */ - bzero(&cfg.w[3], sizeof (cfg) - 3 * sizeof (cfg.w[0])); + memset(&cfg.w[3], 0, sizeof (cfg) - 3 * sizeof (cfg.w[0])); /* compute the initial chaining values from config block */ /* zero the chaining variables */ - bzero(ctx->X, sizeof (ctx->X)); + memset(ctx->X, 0, sizeof (ctx->X)); Skein_512_Process_Block(ctx, cfg.b, 1, SKEIN_CFG_STR_LEN); break; } @@ -328,7 +327,7 @@ Skein_512_InitExt(Skein_512_Ctxt_t *ctx, size_t hashBitLen, uint64_t treeInfo, /* compute the initial chaining values ctx->X[], based on key */ if (keyBytes == 0) { /* is there a key? */ /* no key: use all zeroes as key for config block */ - bzero(ctx->X, sizeof (ctx->X)); + memset(ctx->X, 0, sizeof (ctx->X)); } else { /* here to pre-process a key */ Skein_assert(sizeof (cfg.b) >= sizeof (ctx->X)); @@ -338,12 +337,12 @@ Skein_512_InitExt(Skein_512_Ctxt_t *ctx, size_t hashBitLen, uint64_t treeInfo, /* set tweaks: T0 = 0; T1 = KEY type */ Skein_Start_New_Type(ctx, KEY); /* zero the initial chaining variables */ - bzero(ctx->X, sizeof (ctx->X)); + memset(ctx->X, 0, sizeof (ctx->X)); (void) Skein_512_Update(ctx, key, keyBytes); /* hash the key */ /* put result into cfg.b[] */ (void) Skein_512_Final_Pad(ctx, cfg.b); /* copy over into ctx->X[] */ - bcopy(cfg.b, ctx->X, sizeof (cfg.b)); + memcpy(ctx->X, cfg.b, sizeof (cfg.b)); #if SKEIN_NEED_SWAP { uint_t i; @@ -360,7 +359,7 @@ Skein_512_InitExt(Skein_512_Ctxt_t *ctx, size_t hashBitLen, uint64_t treeInfo, ctx->h.hashBitLen = hashBitLen; /* output hash bit count */ Skein_Start_New_Type(ctx, CFG_FINAL); - bzero(&cfg.w, sizeof (cfg.w)); /* pre-pad cfg.w[] with zeroes */ + memset(&cfg.w, 0, sizeof (cfg.w)); /* pre-pad cfg.w[] with zeroes */ cfg.w[0] = Skein_Swap64(SKEIN_SCHEMA_VER); cfg.w[1] = Skein_Swap64(hashBitLen); /* hash result length in bits */ /* tree hash config info (or SKEIN_CFG_TREE_INFO_SEQUENTIAL) */ @@ -397,7 +396,7 @@ Skein_512_Update(Skein_512_Ctxt_t *ctx, const uint8_t *msg, size_t msgByteCnt) if (n) { /* check on our logic here */ Skein_assert(n < msgByteCnt); - bcopy(msg, &ctx->b[ctx->h.bCnt], n); + memcpy(&ctx->b[ctx->h.bCnt], msg, n); msgByteCnt -= n; msg += n; ctx->h.bCnt += n; @@ -425,7 +424,7 @@ Skein_512_Update(Skein_512_Ctxt_t *ctx, const uint8_t *msg, size_t msgByteCnt) /* copy any remaining source message data bytes into b[] */ if (msgByteCnt) { Skein_assert(msgByteCnt + ctx->h.bCnt <= SKEIN_512_BLOCK_BYTES); - bcopy(msg, &ctx->b[ctx->h.bCnt], msgByteCnt); + memcpy(&ctx->b[ctx->h.bCnt], msg, msgByteCnt); ctx->h.bCnt += msgByteCnt; } @@ -445,7 +444,7 @@ Skein_512_Final(Skein_512_Ctxt_t *ctx, uint8_t *hashVal) ctx->h.T[1] |= SKEIN_T1_FLAG_FINAL; /* tag as the final block */ /* zero pad b[] if necessary */ if (ctx->h.bCnt < SKEIN_512_BLOCK_BYTES) - bzero(&ctx->b[ctx->h.bCnt], + memset(&ctx->b[ctx->h.bCnt], 0, SKEIN_512_BLOCK_BYTES - ctx->h.bCnt); /* process the final block */ @@ -457,13 +456,12 @@ Skein_512_Final(Skein_512_Ctxt_t *ctx, uint8_t *hashVal) /* run Threefish in "counter mode" to generate output */ /* zero out b[], so it can hold the counter */ - bzero(ctx->b, sizeof (ctx->b)); + memset(ctx->b, 0, sizeof (ctx->b)); /* keep a local copy of counter mode "key" */ - bcopy(ctx->X, X, sizeof (X)); + memcpy(X, ctx->X, sizeof (X)); for (i = 0; i * SKEIN_512_BLOCK_BYTES < byteCnt; i++) { /* build the counter block */ - uint64_t tmp = Skein_Swap64((uint64_t)i); - bcopy(&tmp, ctx->b, sizeof (tmp)); + *(uint64_t *)ctx->b = Skein_Swap64((uint64_t)i); Skein_Start_New_Type(ctx, OUT_FINAL); /* run "counter mode" */ Skein_512_Process_Block(ctx, ctx->b, 1, sizeof (uint64_t)); @@ -476,7 +474,7 @@ Skein_512_Final(Skein_512_Ctxt_t *ctx, uint8_t *hashVal) Skein_Show_Final(512, &ctx->h, n, hashVal + i * SKEIN_512_BLOCK_BYTES); /* restore the counter mode key for next time */ - bcopy(X, ctx->X, sizeof (X)); + memcpy(ctx->X, X, sizeof (X)); } return (SKEIN_SUCCESS); } @@ -498,13 +496,13 @@ Skein1024_Init(Skein1024_Ctxt_t *ctx, size_t hashBitLen) switch (hashBitLen) { /* use pre-computed values, where available */ #ifndef SKEIN_NO_PRECOMP case 512: - bcopy(SKEIN1024_IV_512, ctx->X, sizeof (ctx->X)); + memcpy(ctx->X, SKEIN1024_IV_512, sizeof (ctx->X)); break; case 384: - bcopy(SKEIN1024_IV_384, ctx->X, sizeof (ctx->X)); + memcpy(ctx->X, SKEIN1024_IV_384, sizeof (ctx->X)); break; case 1024: - bcopy(SKEIN1024_IV_1024, ctx->X, sizeof (ctx->X)); + memcpy(ctx->X, SKEIN1024_IV_1024, sizeof (ctx->X)); break; #endif default: @@ -522,11 +520,11 @@ Skein1024_Init(Skein1024_Ctxt_t *ctx, size_t hashBitLen) cfg.w[1] = Skein_Swap64(hashBitLen); cfg.w[2] = Skein_Swap64(SKEIN_CFG_TREE_INFO_SEQUENTIAL); /* zero pad config block */ - bzero(&cfg.w[3], sizeof (cfg) - 3 * sizeof (cfg.w[0])); + memset(&cfg.w[3], 0, sizeof (cfg) - 3 * sizeof (cfg.w[0])); /* compute the initial chaining values from config block */ /* zero the chaining variables */ - bzero(ctx->X, sizeof (ctx->X)); + memset(ctx->X, 0, sizeof (ctx->X)); Skein1024_Process_Block(ctx, cfg.b, 1, SKEIN_CFG_STR_LEN); break; } @@ -561,7 +559,7 @@ Skein1024_InitExt(Skein1024_Ctxt_t *ctx, size_t hashBitLen, uint64_t treeInfo, /* compute the initial chaining values ctx->X[], based on key */ if (keyBytes == 0) { /* is there a key? */ /* no key: use all zeroes as key for config block */ - bzero(ctx->X, sizeof (ctx->X)); + memset(ctx->X, 0, sizeof (ctx->X)); } else { /* here to pre-process a key */ Skein_assert(sizeof (cfg.b) >= sizeof (ctx->X)); /* do a mini-Init right here */ @@ -570,12 +568,12 @@ Skein1024_InitExt(Skein1024_Ctxt_t *ctx, size_t hashBitLen, uint64_t treeInfo, /* set tweaks: T0 = 0; T1 = KEY type */ Skein_Start_New_Type(ctx, KEY); /* zero the initial chaining variables */ - bzero(ctx->X, sizeof (ctx->X)); + memset(ctx->X, 0, sizeof (ctx->X)); (void) Skein1024_Update(ctx, key, keyBytes); /* hash the key */ /* put result into cfg.b[] */ (void) Skein1024_Final_Pad(ctx, cfg.b); /* copy over into ctx->X[] */ - bcopy(cfg.b, ctx->X, sizeof (cfg.b)); + memcpy(ctx->X, cfg.b, sizeof (cfg.b)); #if SKEIN_NEED_SWAP { uint_t i; @@ -592,7 +590,7 @@ Skein1024_InitExt(Skein1024_Ctxt_t *ctx, size_t hashBitLen, uint64_t treeInfo, ctx->h.hashBitLen = hashBitLen; /* output hash bit count */ Skein_Start_New_Type(ctx, CFG_FINAL); - bzero(&cfg.w, sizeof (cfg.w)); /* pre-pad cfg.w[] with zeroes */ + memset(&cfg.w, 0, sizeof (cfg.w)); /* pre-pad cfg.w[] with zeroes */ cfg.w[0] = Skein_Swap64(SKEIN_SCHEMA_VER); /* hash result length in bits */ cfg.w[1] = Skein_Swap64(hashBitLen); @@ -630,7 +628,7 @@ Skein1024_Update(Skein1024_Ctxt_t *ctx, const uint8_t *msg, size_t msgByteCnt) if (n) { /* check on our logic here */ Skein_assert(n < msgByteCnt); - bcopy(msg, &ctx->b[ctx->h.bCnt], n); + memcpy(&ctx->b[ctx->h.bCnt], msg, n); msgByteCnt -= n; msg += n; ctx->h.bCnt += n; @@ -658,7 +656,7 @@ Skein1024_Update(Skein1024_Ctxt_t *ctx, const uint8_t *msg, size_t msgByteCnt) /* copy any remaining source message data bytes into b[] */ if (msgByteCnt) { Skein_assert(msgByteCnt + ctx->h.bCnt <= SKEIN1024_BLOCK_BYTES); - bcopy(msg, &ctx->b[ctx->h.bCnt], msgByteCnt); + memcpy(&ctx->b[ctx->h.bCnt], msg, msgByteCnt); ctx->h.bCnt += msgByteCnt; } @@ -678,7 +676,7 @@ Skein1024_Final(Skein1024_Ctxt_t *ctx, uint8_t *hashVal) ctx->h.T[1] |= SKEIN_T1_FLAG_FINAL; /* tag as the final block */ /* zero pad b[] if necessary */ if (ctx->h.bCnt < SKEIN1024_BLOCK_BYTES) - bzero(&ctx->b[ctx->h.bCnt], + memset(&ctx->b[ctx->h.bCnt], 0, SKEIN1024_BLOCK_BYTES - ctx->h.bCnt); /* process the final block */ @@ -690,13 +688,12 @@ Skein1024_Final(Skein1024_Ctxt_t *ctx, uint8_t *hashVal) /* run Threefish in "counter mode" to generate output */ /* zero out b[], so it can hold the counter */ - bzero(ctx->b, sizeof (ctx->b)); + memset(ctx->b, 0, sizeof (ctx->b)); /* keep a local copy of counter mode "key" */ - bcopy(ctx->X, X, sizeof (X)); + memcpy(X, ctx->X, sizeof (X)); for (i = 0; i * SKEIN1024_BLOCK_BYTES < byteCnt; i++) { /* build the counter block */ - uint64_t tmp = Skein_Swap64((uint64_t)i); - bcopy(&tmp, ctx->b, sizeof (tmp)); + *(uint64_t *)ctx->b = Skein_Swap64((uint64_t)i); Skein_Start_New_Type(ctx, OUT_FINAL); /* run "counter mode" */ Skein1024_Process_Block(ctx, ctx->b, 1, sizeof (uint64_t)); @@ -709,7 +706,7 @@ Skein1024_Final(Skein1024_Ctxt_t *ctx, uint8_t *hashVal) Skein_Show_Final(1024, &ctx->h, n, hashVal + i * SKEIN1024_BLOCK_BYTES); /* restore the counter mode key for next time */ - bcopy(X, ctx->X, sizeof (X)); + memcpy(ctx->X, X, sizeof (X)); } return (SKEIN_SUCCESS); } @@ -727,7 +724,7 @@ Skein_256_Final_Pad(Skein_256_Ctxt_t *ctx, uint8_t *hashVal) ctx->h.T[1] |= SKEIN_T1_FLAG_FINAL; /* tag as the final block */ /* zero pad b[] if necessary */ if (ctx->h.bCnt < SKEIN_256_BLOCK_BYTES) - bzero(&ctx->b[ctx->h.bCnt], + memset(&ctx->b[ctx->h.bCnt], 0, SKEIN_256_BLOCK_BYTES - ctx->h.bCnt); /* process the final block */ Skein_256_Process_Block(ctx, ctx->b, 1, ctx->h.bCnt); @@ -748,7 +745,7 @@ Skein_512_Final_Pad(Skein_512_Ctxt_t *ctx, uint8_t *hashVal) ctx->h.T[1] |= SKEIN_T1_FLAG_FINAL; /* tag as the final block */ /* zero pad b[] if necessary */ if (ctx->h.bCnt < SKEIN_512_BLOCK_BYTES) - bzero(&ctx->b[ctx->h.bCnt], + memset(&ctx->b[ctx->h.bCnt], 0, SKEIN_512_BLOCK_BYTES - ctx->h.bCnt); /* process the final block */ Skein_512_Process_Block(ctx, ctx->b, 1, ctx->h.bCnt); @@ -770,7 +767,7 @@ Skein1024_Final_Pad(Skein1024_Ctxt_t *ctx, uint8_t *hashVal) ctx->h.T[1] |= SKEIN_T1_FLAG_FINAL; /* zero pad b[] if necessary */ if (ctx->h.bCnt < SKEIN1024_BLOCK_BYTES) - bzero(&ctx->b[ctx->h.bCnt], + memset(&ctx->b[ctx->h.bCnt], 0, SKEIN1024_BLOCK_BYTES - ctx->h.bCnt); /* process the final block */ Skein1024_Process_Block(ctx, ctx->b, 1, ctx->h.bCnt); @@ -798,13 +795,12 @@ Skein_256_Output(Skein_256_Ctxt_t *ctx, uint8_t *hashVal) /* run Threefish in "counter mode" to generate output */ /* zero out b[], so it can hold the counter */ - bzero(ctx->b, sizeof (ctx->b)); + memset(ctx->b, 0, sizeof (ctx->b)); /* keep a local copy of counter mode "key" */ - bcopy(ctx->X, X, sizeof (X)); + memcpy(X, ctx->X, sizeof (X)); for (i = 0; i * SKEIN_256_BLOCK_BYTES < byteCnt; i++) { /* build the counter block */ - uint64_t tmp = Skein_Swap64((uint64_t)i); - bcopy(&tmp, ctx->b, sizeof (tmp)); + *(uint64_t *)ctx->b = Skein_Swap64((uint64_t)i); Skein_Start_New_Type(ctx, OUT_FINAL); /* run "counter mode" */ Skein_256_Process_Block(ctx, ctx->b, 1, sizeof (uint64_t)); @@ -817,7 +813,7 @@ Skein_256_Output(Skein_256_Ctxt_t *ctx, uint8_t *hashVal) Skein_Show_Final(256, &ctx->h, n, hashVal + i * SKEIN_256_BLOCK_BYTES); /* restore the counter mode key for next time */ - bcopy(X, ctx->X, sizeof (X)); + memcpy(ctx->X, X, sizeof (X)); } return (SKEIN_SUCCESS); } @@ -838,13 +834,12 @@ Skein_512_Output(Skein_512_Ctxt_t *ctx, uint8_t *hashVal) /* run Threefish in "counter mode" to generate output */ /* zero out b[], so it can hold the counter */ - bzero(ctx->b, sizeof (ctx->b)); + memset(ctx->b, 0, sizeof (ctx->b)); /* keep a local copy of counter mode "key" */ - bcopy(ctx->X, X, sizeof (X)); + memcpy(X, ctx->X, sizeof (X)); for (i = 0; i * SKEIN_512_BLOCK_BYTES < byteCnt; i++) { /* build the counter block */ - uint64_t tmp = Skein_Swap64((uint64_t)i); - bcopy(&tmp, ctx->b, sizeof (tmp)); + *(uint64_t *)ctx->b = Skein_Swap64((uint64_t)i); Skein_Start_New_Type(ctx, OUT_FINAL); /* run "counter mode" */ Skein_512_Process_Block(ctx, ctx->b, 1, sizeof (uint64_t)); @@ -857,7 +852,7 @@ Skein_512_Output(Skein_512_Ctxt_t *ctx, uint8_t *hashVal) Skein_Show_Final(256, &ctx->h, n, hashVal + i * SKEIN_512_BLOCK_BYTES); /* restore the counter mode key for next time */ - bcopy(X, ctx->X, sizeof (X)); + memcpy(ctx->X, X, sizeof (X)); } return (SKEIN_SUCCESS); } @@ -878,13 +873,12 @@ Skein1024_Output(Skein1024_Ctxt_t *ctx, uint8_t *hashVal) /* run Threefish in "counter mode" to generate output */ /* zero out b[], so it can hold the counter */ - bzero(ctx->b, sizeof (ctx->b)); + memset(ctx->b, 0, sizeof (ctx->b)); /* keep a local copy of counter mode "key" */ - bcopy(ctx->X, X, sizeof (X)); + memcpy(X, ctx->X, sizeof (X)); for (i = 0; i * SKEIN1024_BLOCK_BYTES < byteCnt; i++) { /* build the counter block */ - uint64_t tmp = Skein_Swap64((uint64_t)i); - bcopy(&tmp, ctx->b, sizeof (tmp)); + *(uint64_t *)ctx->b = Skein_Swap64((uint64_t)i); Skein_Start_New_Type(ctx, OUT_FINAL); /* run "counter mode" */ Skein1024_Process_Block(ctx, ctx->b, 1, sizeof (uint64_t)); @@ -897,7 +891,7 @@ Skein1024_Output(Skein1024_Ctxt_t *ctx, uint8_t *hashVal) Skein_Show_Final(256, &ctx->h, n, hashVal + i * SKEIN1024_BLOCK_BYTES); /* restore the counter mode key for next time */ - bcopy(X, ctx->X, sizeof (X)); + memcpy(ctx->X, X, sizeof (X)); } return (SKEIN_SUCCESS); } diff --git a/module/icp/algs/skein/skein_port.h b/module/icp/algs/skein/skein_port.h index ce4353082552..96d1266d019e 100644 --- a/module/icp/algs/skein/skein_port.h +++ b/module/icp/algs/skein/skein_port.h @@ -50,9 +50,9 @@ #else /* here for x86 and x86-64 CPUs (and other detected little-endian CPUs) */ #define SKEIN_NEED_SWAP (0) -#define Skein_Put64_LSB_First(dst08, src64, bCnt) bcopy(src64, dst08, bCnt) +#define Skein_Put64_LSB_First(dst08, src64, bCnt) memcpy(dst08, src64, bCnt) #define Skein_Get64_LSB_First(dst64, src08, wCnt) \ - bcopy(src08, dst64, 8 * (wCnt)) + memcpy(dst64, src08, 8 * (wCnt)) #endif #endif /* ifndef SKEIN_NEED_SWAP */ diff --git a/module/icp/api/kcf_ctxops.c b/module/icp/api/kcf_ctxops.c index 67bf76a8f1fc..25ed94fe8bdf 100644 --- a/module/icp/api/kcf_ctxops.c +++ b/module/icp/api/kcf_ctxops.c @@ -138,7 +138,7 @@ crypto_destroy_ctx_template(crypto_ctx_template_t tmpl) ASSERT(ctx_tmpl->ct_prov_tmpl != NULL); - bzero(ctx_tmpl->ct_prov_tmpl, ctx_tmpl->ct_size); + memset(ctx_tmpl->ct_prov_tmpl, 0, ctx_tmpl->ct_size); kmem_free(ctx_tmpl->ct_prov_tmpl, ctx_tmpl->ct_size); kmem_free(ctx_tmpl, sizeof (kcf_ctx_template_t)); } diff --git a/module/icp/core/kcf_mech_tabs.c b/module/icp/core/kcf_mech_tabs.c index 347190aa7f8b..ec43d53dc3ff 100644 --- a/module/icp/core/kcf_mech_tabs.c +++ b/module/icp/core/kcf_mech_tabs.c @@ -250,7 +250,8 @@ kcf_add_mech_provider(short mech_indx, /* allocate and initialize new kcf_prov_mech_desc */ prov_mech = kmem_zalloc(sizeof (kcf_prov_mech_desc_t), KM_SLEEP); - bcopy(mech_info, &prov_mech->pm_mech_info, sizeof (crypto_mech_info_t)); + memcpy(&prov_mech->pm_mech_info, mech_info, + sizeof (crypto_mech_info_t)); prov_mech->pm_prov_desc = prov_desc; prov_desc->pd_mech_indx[KCF_MECH2CLASS(kcf_mech_type)] [KCF_MECH2INDEX(kcf_mech_type)] = mech_indx; diff --git a/module/icp/core/kcf_prov_lib.c b/module/icp/core/kcf_prov_lib.c index 505dbec313de..4bc99a8eb84c 100644 --- a/module/icp/core/kcf_prov_lib.c +++ b/module/icp/core/kcf_prov_lib.c @@ -70,7 +70,7 @@ crypto_uio_copy_to_data(crypto_data_t *data, uchar_t *buf, int len) offset, length); datap = (uchar_t *)(zfs_uio_iovbase(uiop, vec_idx) + offset); - bcopy(buf, datap, cur_len); + memcpy(datap, buf, cur_len); buf += cur_len; length -= cur_len; @@ -99,8 +99,8 @@ crypto_put_output_data(uchar_t *buf, crypto_data_t *output, int len) output->cd_length = len; return (CRYPTO_BUFFER_TOO_SMALL); } - bcopy(buf, (uchar_t *)(output->cd_raw.iov_base + - output->cd_offset), len); + memcpy((uchar_t *)(output->cd_raw.iov_base + + output->cd_offset), buf, len); break; case CRYPTO_DATA_UIO: diff --git a/module/icp/io/aes.c b/module/icp/io/aes.c index b0f51262dd07..945d560ebe57 100644 --- a/module/icp/io/aes.c +++ b/module/icp/io/aes.c @@ -832,7 +832,7 @@ aes_encrypt_atomic(crypto_mechanism_t *mechanism, crypto_key_t *key, crypto_data_t *plaintext, crypto_data_t *ciphertext, crypto_spi_ctx_template_t template) { - aes_ctx_t aes_ctx; /* on the stack */ + aes_ctx_t aes_ctx = {{{{0}}}}; off_t saved_offset; size_t saved_length; size_t length_needed; @@ -858,8 +858,6 @@ aes_encrypt_atomic(crypto_mechanism_t *mechanism, if ((ret = aes_check_mech_param(mechanism, NULL)) != CRYPTO_SUCCESS) return (ret); - bzero(&aes_ctx, sizeof (aes_ctx_t)); - ret = aes_common_init_ctx(&aes_ctx, template, mechanism, key, KM_SLEEP, B_TRUE); if (ret != CRYPTO_SUCCESS) @@ -944,7 +942,7 @@ aes_encrypt_atomic(crypto_mechanism_t *mechanism, out: if (aes_ctx.ac_flags & PROVIDER_OWNS_KEY_SCHEDULE) { - bzero(aes_ctx.ac_keysched, aes_ctx.ac_keysched_len); + memset(aes_ctx.ac_keysched, 0, aes_ctx.ac_keysched_len); kmem_free(aes_ctx.ac_keysched, aes_ctx.ac_keysched_len); } #ifdef CAN_USE_GCM_ASM @@ -953,7 +951,7 @@ out: gcm_ctx_t *ctx = (gcm_ctx_t *)&aes_ctx; - bzero(ctx->gcm_Htable, ctx->gcm_htab_len); + memset(ctx->gcm_Htable, 0, ctx->gcm_htab_len); kmem_free(ctx->gcm_Htable, ctx->gcm_htab_len); } #endif @@ -966,7 +964,7 @@ aes_decrypt_atomic(crypto_mechanism_t *mechanism, crypto_key_t *key, crypto_data_t *ciphertext, crypto_data_t *plaintext, crypto_spi_ctx_template_t template) { - aes_ctx_t aes_ctx; /* on the stack */ + aes_ctx_t aes_ctx = {{{{0}}}}; off_t saved_offset; size_t saved_length; size_t length_needed; @@ -992,8 +990,6 @@ aes_decrypt_atomic(crypto_mechanism_t *mechanism, if ((ret = aes_check_mech_param(mechanism, NULL)) != CRYPTO_SUCCESS) return (ret); - bzero(&aes_ctx, sizeof (aes_ctx_t)); - ret = aes_common_init_ctx(&aes_ctx, template, mechanism, key, KM_SLEEP, B_FALSE); if (ret != CRYPTO_SUCCESS) @@ -1096,7 +1092,7 @@ aes_decrypt_atomic(crypto_mechanism_t *mechanism, out: if (aes_ctx.ac_flags & PROVIDER_OWNS_KEY_SCHEDULE) { - bzero(aes_ctx.ac_keysched, aes_ctx.ac_keysched_len); + memset(aes_ctx.ac_keysched, 0, aes_ctx.ac_keysched_len); kmem_free(aes_ctx.ac_keysched, aes_ctx.ac_keysched_len); } @@ -1113,7 +1109,7 @@ out: if (((gcm_ctx_t *)&aes_ctx)->gcm_Htable != NULL) { gcm_ctx_t *ctx = (gcm_ctx_t *)&aes_ctx; - bzero(ctx->gcm_Htable, ctx->gcm_htab_len); + memset(ctx->gcm_Htable, 0, ctx->gcm_htab_len); kmem_free(ctx->gcm_Htable, ctx->gcm_htab_len); } #endif @@ -1150,7 +1146,7 @@ aes_create_ctx_template(crypto_mechanism_t *mechanism, crypto_key_t *key, * in the key. */ if ((rv = init_keysched(key, keysched)) != CRYPTO_SUCCESS) { - bzero(keysched, size); + memset(keysched, 0, size); kmem_free(keysched, size); return (rv); } @@ -1170,7 +1166,8 @@ aes_free_context(crypto_ctx_t *ctx) if (aes_ctx != NULL) { if (aes_ctx->ac_flags & PROVIDER_OWNS_KEY_SCHEDULE) { ASSERT(aes_ctx->ac_keysched_len != 0); - bzero(aes_ctx->ac_keysched, aes_ctx->ac_keysched_len); + memset(aes_ctx->ac_keysched, 0, + aes_ctx->ac_keysched_len); kmem_free(aes_ctx->ac_keysched, aes_ctx->ac_keysched_len); } @@ -1260,7 +1257,7 @@ aes_common_init_ctx(aes_ctx_t *aes_ctx, crypto_spi_ctx_template_t *template, if (rv != CRYPTO_SUCCESS) { if (aes_ctx->ac_flags & PROVIDER_OWNS_KEY_SCHEDULE) { - bzero(keysched, size); + memset(keysched, 0, size); kmem_free(keysched, size); } } diff --git a/module/icp/io/sha2_mod.c b/module/icp/io/sha2_mod.c index c586c3272647..4a218b500325 100644 --- a/module/icp/io/sha2_mod.c +++ b/module/icp/io/sha2_mod.c @@ -46,7 +46,7 @@ (len) = (uint32_t)*((ulong_t *)(m)->cm_param); \ else { \ ulong_t tmp_ulong; \ - bcopy((m)->cm_param, &tmp_ulong, sizeof (ulong_t)); \ + memcpy(&tmp_ulong, (m)->cm_param, sizeof (ulong_t)); \ (len) = (uint32_t)tmp_ulong; \ } \ } @@ -309,9 +309,9 @@ sha2_digest_final_uio(SHA2_CTX *sha2_ctx, crypto_data_t *digest, */ SHA2Final(digest_scratch, sha2_ctx); - bcopy(digest_scratch, (uchar_t *) + memcpy((uchar_t *) zfs_uio_iovbase(digest->cd_uio, vec_idx) + offset, - digest_len); + digest_scratch, digest_len); } else { SHA2Final((uchar_t *)zfs_uio_iovbase(digest-> cd_uio, vec_idx) + offset, @@ -336,8 +336,9 @@ sha2_digest_final_uio(SHA2_CTX *sha2_ctx, crypto_data_t *digest, cur_len = MIN(zfs_uio_iovlen(digest->cd_uio, vec_idx) - offset, length); - bcopy(digest_tmp + scratch_offset, + memcpy( zfs_uio_iovbase(digest->cd_uio, vec_idx) + offset, + digest_tmp + scratch_offset, cur_len); length -= cur_len; @@ -630,8 +631,8 @@ sha2_digest_atomic(crypto_mechanism_t *mechanism, crypto_data_t *data, static void sha2_mac_init_ctx(sha2_hmac_ctx_t *ctx, void *keyval, uint_t length_in_bytes) { - uint64_t ipad[SHA512_HMAC_BLOCK_SIZE / sizeof (uint64_t)]; - uint64_t opad[SHA512_HMAC_BLOCK_SIZE / sizeof (uint64_t)]; + uint64_t ipad[SHA512_HMAC_BLOCK_SIZE / sizeof (uint64_t)] = {0}; + uint64_t opad[SHA512_HMAC_BLOCK_SIZE / sizeof (uint64_t)] = {0}; int i, block_size, blocks_per_int64; /* Determine the block size */ @@ -643,12 +644,12 @@ sha2_mac_init_ctx(sha2_hmac_ctx_t *ctx, void *keyval, uint_t length_in_bytes) blocks_per_int64 = SHA512_HMAC_BLOCK_SIZE / sizeof (uint64_t); } - (void) bzero(ipad, block_size); - (void) bzero(opad, block_size); + (void) memset(ipad, 0, block_size); + (void) memset(opad, 0, block_size); if (keyval != NULL) { - (void) bcopy(keyval, ipad, length_in_bytes); - (void) bcopy(keyval, opad, length_in_bytes); + (void) memcpy(ipad, keyval, length_in_bytes); + (void) memcpy(opad, keyval, length_in_bytes); } else { ASSERT0(length_in_bytes); } @@ -666,7 +667,6 @@ sha2_mac_init_ctx(sha2_hmac_ctx_t *ctx, void *keyval, uint_t length_in_bytes) /* perform SHA2 on opad */ SHA2Init(ctx->hc_mech_type, &ctx->hc_ocontext); SHA2Update(&ctx->hc_ocontext, (uint8_t *)opad, block_size); - } /* @@ -708,7 +708,7 @@ sha2_mac_init(crypto_ctx_t *ctx, crypto_mechanism_t *mechanism, PROV_SHA2_HMAC_CTX(ctx)->hc_mech_type = mechanism->cm_type; if (ctx_template != NULL) { /* reuse context template */ - bcopy(ctx_template, PROV_SHA2_HMAC_CTX(ctx), + memcpy(PROV_SHA2_HMAC_CTX(ctx), ctx_template, sizeof (sha2_hmac_ctx_t)); } else { /* no context template, compute context */ @@ -746,7 +746,7 @@ sha2_mac_init(crypto_ctx_t *ctx, crypto_mechanism_t *mechanism, } if (ret != CRYPTO_SUCCESS) { - bzero(ctx->cc_provider_private, sizeof (sha2_hmac_ctx_t)); + memset(ctx->cc_provider_private, 0, sizeof (sha2_hmac_ctx_t)); kmem_free(ctx->cc_provider_private, sizeof (sha2_hmac_ctx_t)); ctx->cc_provider_private = NULL; } @@ -850,8 +850,8 @@ sha2_mac_final(crypto_ctx_t *ctx, crypto_data_t *mac) */ SHA2Final(digest, &PROV_SHA2_HMAC_CTX(ctx)->hc_ocontext); - bcopy(digest, (unsigned char *)mac->cd_raw.iov_base + - mac->cd_offset, digest_len); + memcpy((unsigned char *)mac->cd_raw.iov_base + + mac->cd_offset, digest, digest_len); } else { SHA2Final((unsigned char *)mac->cd_raw.iov_base + mac->cd_offset, @@ -872,7 +872,7 @@ sha2_mac_final(crypto_ctx_t *ctx, crypto_data_t *mac) else mac->cd_length = 0; - bzero(ctx->cc_provider_private, sizeof (sha2_hmac_ctx_t)); + memset(ctx->cc_provider_private, 0, sizeof (sha2_hmac_ctx_t)); kmem_free(ctx->cc_provider_private, sizeof (sha2_hmac_ctx_t)); ctx->cc_provider_private = NULL; @@ -928,7 +928,7 @@ sha2_mac_atomic(crypto_mechanism_t *mechanism, if (ctx_template != NULL) { /* reuse context template */ - bcopy(ctx_template, &sha2_hmac_ctx, sizeof (sha2_hmac_ctx_t)); + memcpy(&sha2_hmac_ctx, ctx_template, sizeof (sha2_hmac_ctx_t)); } else { sha2_hmac_ctx.hc_mech_type = mechanism->cm_type; /* no context template, initialize context */ @@ -1001,8 +1001,8 @@ sha2_mac_atomic(crypto_mechanism_t *mechanism, * the user only what was requested. */ SHA2Final(digest, &sha2_hmac_ctx.hc_ocontext); - bcopy(digest, (unsigned char *)mac->cd_raw.iov_base + - mac->cd_offset, digest_len); + memcpy((unsigned char *)mac->cd_raw.iov_base + + mac->cd_offset, digest, digest_len); } else { SHA2Final((unsigned char *)mac->cd_raw.iov_base + mac->cd_offset, &sha2_hmac_ctx.hc_ocontext); @@ -1021,7 +1021,7 @@ sha2_mac_atomic(crypto_mechanism_t *mechanism, return (CRYPTO_SUCCESS); } bail: - bzero(&sha2_hmac_ctx, sizeof (sha2_hmac_ctx_t)); + memset(&sha2_hmac_ctx, 0, sizeof (sha2_hmac_ctx_t)); mac->cd_length = 0; return (ret); } @@ -1060,7 +1060,7 @@ sha2_mac_verify_atomic(crypto_mechanism_t *mechanism, if (ctx_template != NULL) { /* reuse context template */ - bcopy(ctx_template, &sha2_hmac_ctx, sizeof (sha2_hmac_ctx_t)); + memcpy(&sha2_hmac_ctx, ctx_template, sizeof (sha2_hmac_ctx_t)); } else { sha2_hmac_ctx.hc_mech_type = mechanism->cm_type; /* no context template, initialize context */ @@ -1137,7 +1137,7 @@ sha2_mac_verify_atomic(crypto_mechanism_t *mechanism, switch (mac->cd_format) { case CRYPTO_DATA_RAW: - if (bcmp(digest, (unsigned char *)mac->cd_raw.iov_base + + if (memcmp(digest, (unsigned char *)mac->cd_raw.iov_base + mac->cd_offset, digest_len) != 0) ret = CRYPTO_INVALID_MAC; break; @@ -1170,7 +1170,7 @@ sha2_mac_verify_atomic(crypto_mechanism_t *mechanism, cur_len = MIN(zfs_uio_iovlen(mac->cd_uio, vec_idx) - offset, length); - if (bcmp(digest + scratch_offset, + if (memcmp(digest + scratch_offset, zfs_uio_iovbase(mac->cd_uio, vec_idx) + offset, cur_len) != 0) { ret = CRYPTO_INVALID_MAC; @@ -1191,7 +1191,7 @@ sha2_mac_verify_atomic(crypto_mechanism_t *mechanism, return (ret); bail: - bzero(&sha2_hmac_ctx, sizeof (sha2_hmac_ctx_t)); + memset(&sha2_hmac_ctx, 0, sizeof (sha2_hmac_ctx_t)); mac->cd_length = 0; return (ret); } @@ -1282,7 +1282,7 @@ sha2_free_context(crypto_ctx_t *ctx) else ctx_len = sizeof (sha2_hmac_ctx_t); - bzero(ctx->cc_provider_private, ctx_len); + memset(ctx->cc_provider_private, 0, ctx_len); kmem_free(ctx->cc_provider_private, ctx_len); ctx->cc_provider_private = NULL; diff --git a/module/icp/io/skein_mod.c b/module/icp/io/skein_mod.c index 1d6969e68862..a2ed6cedd8c6 100644 --- a/module/icp/io/skein_mod.c +++ b/module/icp/io/skein_mod.c @@ -292,8 +292,8 @@ skein_digest_final_uio(skein_ctx_t *ctx, crypto_data_t *digest) while (vec_idx < zfs_uio_iovcnt(uio) && length > 0) { cur_len = MIN(zfs_uio_iovlen(uio, vec_idx) - offset, length); - bcopy(digest_tmp + scratch_offset, - zfs_uio_iovbase(uio, vec_idx) + offset, cur_len); + memcpy(zfs_uio_iovbase(uio, vec_idx) + offset, + digest_tmp + scratch_offset, cur_len); length -= cur_len; vec_idx++; @@ -349,7 +349,7 @@ skein_digest_init(crypto_ctx_t *ctx, crypto_mechanism_t *mechanism) return (CRYPTO_SUCCESS); errout: - bzero(SKEIN_CTX(ctx), sizeof (*SKEIN_CTX(ctx))); + memset(SKEIN_CTX(ctx), 0, sizeof (*SKEIN_CTX(ctx))); kmem_free(SKEIN_CTX(ctx), sizeof (*SKEIN_CTX(ctx))); SKEIN_CTX_LVALUE(ctx) = NULL; return (error); @@ -376,7 +376,7 @@ skein_digest(crypto_ctx_t *ctx, crypto_data_t *data, crypto_data_t *digest) error = skein_update(ctx, data); if (error != CRYPTO_SUCCESS) { - bzero(SKEIN_CTX(ctx), sizeof (*SKEIN_CTX(ctx))); + memset(SKEIN_CTX(ctx), 0, sizeof (*SKEIN_CTX(ctx))); kmem_free(SKEIN_CTX(ctx), sizeof (*SKEIN_CTX(ctx))); SKEIN_CTX_LVALUE(ctx) = NULL; digest->cd_length = 0; @@ -452,7 +452,7 @@ skein_final(crypto_ctx_t *ctx, crypto_data_t *digest) else digest->cd_length = 0; - bzero(SKEIN_CTX(ctx), sizeof (*SKEIN_CTX(ctx))); + memset(SKEIN_CTX(ctx), 0, sizeof (*SKEIN_CTX(ctx))); kmem_free(SKEIN_CTX(ctx), sizeof (*(SKEIN_CTX(ctx)))); SKEIN_CTX_LVALUE(ctx) = NULL; @@ -494,7 +494,7 @@ out: CRYPTO_BITS2BYTES(skein_ctx.sc_digest_bitlen); else digest->cd_length = 0; - bzero(&skein_ctx, sizeof (skein_ctx)); + memset(&skein_ctx, 0, sizeof (skein_ctx)); return (error); } @@ -543,7 +543,7 @@ skein_mac_init(crypto_ctx_t *ctx, crypto_mechanism_t *mechanism, return (CRYPTO_HOST_MEMORY); if (ctx_template != NULL) { - bcopy(ctx_template, SKEIN_CTX(ctx), + memcpy(SKEIN_CTX(ctx), ctx_template, sizeof (*SKEIN_CTX(ctx))); } else { error = skein_mac_ctx_build(SKEIN_CTX(ctx), mechanism, key); @@ -553,7 +553,7 @@ skein_mac_init(crypto_ctx_t *ctx, crypto_mechanism_t *mechanism, return (CRYPTO_SUCCESS); errout: - bzero(SKEIN_CTX(ctx), sizeof (*SKEIN_CTX(ctx))); + memset(SKEIN_CTX(ctx), 0, sizeof (*SKEIN_CTX(ctx))); kmem_free(SKEIN_CTX(ctx), sizeof (*SKEIN_CTX(ctx))); return (error); } @@ -573,13 +573,13 @@ skein_mac_atomic(crypto_mechanism_t *mechanism, crypto_spi_ctx_template_t ctx_template) { /* faux crypto context just for skein_digest_{update,final} */ - int error; + int error; crypto_ctx_t ctx; skein_ctx_t skein_ctx; SKEIN_CTX_LVALUE(&ctx) = &skein_ctx; if (ctx_template != NULL) { - bcopy(ctx_template, &skein_ctx, sizeof (skein_ctx)); + memcpy(&skein_ctx, ctx_template, sizeof (skein_ctx)); } else { error = skein_mac_ctx_build(&skein_ctx, mechanism, key); if (error != CRYPTO_SUCCESS) @@ -593,7 +593,7 @@ skein_mac_atomic(crypto_mechanism_t *mechanism, return (CRYPTO_SUCCESS); errout: - bzero(&skein_ctx, sizeof (skein_ctx)); + memset(&skein_ctx, 0, sizeof (skein_ctx)); return (error); } @@ -624,7 +624,7 @@ skein_create_ctx_template(crypto_mechanism_t *mechanism, crypto_key_t *key, return (CRYPTO_SUCCESS); errout: - bzero(ctx_tmpl, sizeof (*ctx_tmpl)); + memset(ctx_tmpl, 0, sizeof (*ctx_tmpl)); kmem_free(ctx_tmpl, sizeof (*ctx_tmpl)); return (error); } @@ -636,7 +636,7 @@ static int skein_free_context(crypto_ctx_t *ctx) { if (SKEIN_CTX(ctx) != NULL) { - bzero(SKEIN_CTX(ctx), sizeof (*SKEIN_CTX(ctx))); + memset(SKEIN_CTX(ctx), 0, sizeof (*SKEIN_CTX(ctx))); kmem_free(SKEIN_CTX(ctx), sizeof (*SKEIN_CTX(ctx))); SKEIN_CTX_LVALUE(ctx) = NULL; } |