diff options
Diffstat (limited to 'lib/libmd')
32 files changed, 9502 insertions, 0 deletions
diff --git a/lib/libmd/Makefile b/lib/libmd/Makefile new file mode 100644 index 000000000000..547a134fc440 --- /dev/null +++ b/lib/libmd/Makefile @@ -0,0 +1,246 @@ +SHLIBDIR?= /lib + +.include <src.opts.mk> +.include <bsd.compiler.mk> + +PACKAGE= runtime +LIB= md +SHLIB_MAJOR= 7 +SRCS= md4c.c md5c.c md4hl.c md5hl.c \ + rmd160c.c rmd160hl.c \ + sha0c.c sha0hl.c sha1c.c sha1hl.c \ + sha224hl.c sha256c.c sha256hl.c \ + sha384hl.c \ + sha512c.c sha512hl.c sha512t224hl.c sha512t256hl.c \ + skein.c skein_block.c \ + skein256hl.c skein512hl.c skein1024hl.c +INCS= md4.h md5.h ripemd.h sha.h sha224.h sha256.h sha384.h sha512.h \ + sha512t.h skein.h skein_port.h skein_freebsd.h skein_iv.h + +WARNS?= 0 + +VERSION_DEF= ${SRCTOP}/lib/libc/Versions.def +SYMBOL_MAPS= ${.CURDIR}/Symbol.map + +MAN+= md4.3 md5.3 ripemd.3 sha.3 sha256.3 sha512.3 skein.3 +MLINKS+=md4.3 MD4Init.3 md4.3 MD4Update.3 md4.3 MD4Final.3 +MLINKS+=md4.3 MD4End.3 md4.3 MD4File.3 md4.3 MD4FileChunk.3 +MLINKS+=md4.3 MD4Data.3 +MLINKS+=md5.3 MD5Init.3 md5.3 MD5Update.3 md5.3 MD5Final.3 +MLINKS+=md5.3 MD5End.3 md5.3 MD5File.3 md5.3 MD5FileChunk.3 +MLINKS+=md5.3 MD5Data.3 +MLINKS+=ripemd.3 RIPEMD160_Init.3 ripemd.3 RIPEMD160_Update.3 +MLINKS+=ripemd.3 RIPEMD160_Final.3 ripemd.3 RIPEMD160_Data.3 +MLINKS+=ripemd.3 RIPEMD160_End.3 ripemd.3 RIPEMD160_File.3 +MLINKS+=ripemd.3 RIPEMD160_FileChunk.3 +MLINKS+=sha.3 SHA_Init.3 sha.3 SHA_Update.3 sha.3 SHA_Final.3 +MLINKS+=sha.3 SHA_End.3 sha.3 SHA_File.3 sha.3 SHA_FileChunk.3 +MLINKS+=sha.3 SHA_Data.3 +MLINKS+=sha.3 SHA1_Init.3 sha.3 SHA1_Update.3 sha.3 SHA1_Final.3 +MLINKS+=sha.3 SHA1_End.3 sha.3 SHA1_File.3 sha.3 SHA1_FileChunk.3 +MLINKS+=sha.3 SHA1_Data.3 +MLINKS+=sha256.3 SHA224_Init.3 sha256.3 SHA224_Update.3 +MLINKS+=sha256.3 SHA224_Final.3 sha256.3 SHA224_End.3 +MLINKS+=sha256.3 SHA224_File.3 sha256.3 SHA224_FileChunk.3 +MLINKS+=sha256.3 SHA224_Data.3 +MLINKS+=sha256.3 SHA256_Init.3 sha256.3 SHA256_Update.3 +MLINKS+=sha256.3 SHA256_Final.3 sha256.3 SHA256_End.3 +MLINKS+=sha256.3 SHA256_File.3 sha256.3 SHA256_FileChunk.3 +MLINKS+=sha256.3 SHA256_Data.3 +MLINKS+=sha512.3 SHA384_Init.3 sha512.3 SHA384_Update.3 +MLINKS+=sha512.3 SHA384_Final.3 sha512.3 SHA384_End.3 +MLINKS+=sha512.3 SHA384_File.3 sha512.3 SHA384_FileChunk.3 +MLINKS+=sha512.3 SHA384_Data.3 sha512.3 sha384.3 +MLINKS+=sha512.3 SHA512_Init.3 sha512.3 SHA512_Update.3 +MLINKS+=sha512.3 SHA512_Final.3 sha512.3 SHA512_End.3 +MLINKS+=sha512.3 SHA512_File.3 sha512.3 SHA512_FileChunk.3 +MLINKS+=sha512.3 SHA512_Data.3 +MLINKS+=sha512.3 SHA512_224_Init.3 sha512.3 SHA512_224_Update.3 +MLINKS+=sha512.3 SHA512_224_Final.3 sha512.3 SHA512_224_End.3 +MLINKS+=sha512.3 SHA512_224_File.3 sha512.3 SHA512_224_FileChunk.3 +MLINKS+=sha512.3 SHA512_224_Data.3 +MLINKS+=sha512.3 SHA512_256_Init.3 sha512.3 SHA512_256_Update.3 +MLINKS+=sha512.3 SHA512_256_Final.3 sha512.3 SHA512_256_End.3 +MLINKS+=sha512.3 SHA512_256_File.3 sha512.3 SHA512_256_FileChunk.3 +MLINKS+=sha512.3 SHA512_256_Data.3 +MLINKS+=skein.3 SKEIN256_Init.3 skein.3 SKEIN256_Update.3 +MLINKS+=skein.3 SKEIN256_Final.3 skein.3 SKEIN256_End.3 +MLINKS+=skein.3 SKEIN256_File.3 skein.3 SKEIN256_FileChunk.3 +MLINKS+=skein.3 SKEIN256_Data.3 skein.3 skein256.3 +MLINKS+=skein.3 SKEIN512_Init.3 skein.3 SKEIN512_Update.3 +MLINKS+=skein.3 SKEIN512_Final.3 skein.3 SKEIN512_End.3 +MLINKS+=skein.3 SKEIN512_File.3 skein.3 SKEIN512_FileChunk.3 +MLINKS+=skein.3 SKEIN512_Data.3 skein.3 skein512.3 +MLINKS+=skein.3 SKEIN1024_Init.3 skein.3 SKEIN1024_Update.3 +MLINKS+=skein.3 SKEIN1024_Final.3 skein.3 SKEIN1024_End.3 +MLINKS+=skein.3 SKEIN1024_File.3 skein.3 SKEIN1024_FileChunk.3 +MLINKS+=skein.3 SKEIN1024_Data.3 skein.3 skein1024.3 + +CLEANFILES+= md[245]hl.c md[245].ref md[245].3 mddriver \ + rmd160.ref rmd160hl.c rmddriver \ + sha0.ref sha0hl.c sha1.ref sha1hl.c shadriver \ + sha224.ref sha256.ref sha224hl.c sha256hl.c \ + sha384hl.c sha384.ref \ + sha512.ref sha512hl.c \ + sha512t224.ref sha512t224hl.c \ + sha512t256.ref sha512t256hl.c \ + skein256hl.c skein512hl.c skein1024hl.c \ + skein256.ref skein512.ref skein1024.ref \ + skeindriver + +# Need src tree sys/md5.h for MD5FileChunk prototype on older systems. +SRCS+= sys/md5.h +CLEANDIRS= sys +CFLAGS+= -I. +sys/md5.h: ${SRCTOP}/sys/${.TARGET} .NOMETA + ln -sf ${.ALLSRC} ${.TARGET} + +# Define WEAK_REFS to provide weak aliases for libmd symbols +# +# Note that the same sources are also used internally by libcrypt, +# in which case: +# * macros are used to rename symbols to libcrypt internal names +# * no weak aliases are generated +CFLAGS+= -I${.CURDIR} -I${SRCTOP}/sys/crypto/sha2 +CFLAGS+= -I${SRCTOP}/sys/crypto/skein +CFLAGS+= -DWEAK_REFS +# unroll the 256 and 512 loops, half unroll the 1024 +CFLAGS.skein_block.c+= -DSKEIN_LOOP=995 +.PATH: ${.CURDIR}/${MACHINE_ARCH} ${SRCTOP}/sys/crypto/sha2 +.PATH: ${SRCTOP}/sys/crypto/skein ${SRCTOP}/sys/crypto/skein/${MACHINE_ARCH} +.PATH: ${SRCTOP}/sys/kern + +USE_ASM_SOURCES?=1 +.if defined(BOOTSTRAPPING) || ${MK_MACHDEP_OPTIMIZATIONS} == no +# Don't build ASM sources when bootstrapping to avoid toolchain dependencies +USE_ASM_SOURCES:=0 +.endif + +.if ${USE_ASM_SOURCES} != 0 +.if exists(${MACHINE_ARCH}/sha1block.S) +SRCS+= sha1block.S +CFLAGS+= -DSHA1_ASM +.if exists(${MACHINE_ARCH}/sha1dispatch.c) +SRCS+= sha1dispatch.c +.endif +.endif +.if exists(${MACHINE_ARCH}/rmd160.S) +SRCS+= rmd160.S +CFLAGS+= -DRMD160_ASM +.endif +.if exists(${MACHINE_ARCH}/skein_block_asm.S) +# Fully unroll all loops in the assembly optimized version +ACFLAGS+= -DSKEIN_LOOP=0 +SRCS+= skein_block_asm.S +CFLAGS+= -DSKEIN_ASM -DSKEIN_USE_ASM=1792 # list of block functions to replace with assembly: 256+512+1024 = 1792 +# 20201002 Add explict Makefile dependency for r366344 (reenabled assembly +# optimized version). SKEIN_USE_ASM determines which routines should come from +# the assembly vs C versions, and skein_block needs to be rebuilt if it changes. +skein_block.o skein_block.pico: Makefile +.endif +.if exists(${MACHINE_ARCH}/rmd160.S) || exists(${MACHINE_ARCH}/skein_block_asm.S) +ACFLAGS+= -DELF -Wa,--noexecstack +.endif +.if ${MACHINE_CPUARCH} == "aarch64" +SRCS+= sha256c_arm64.c +CFLAGS+= -DARM64_SHA2 +CFLAGS.sha256c_arm64.c+= -march=armv8-a+crypto +.if ${COMPILER_FEATURES:Maarch64-sha512} +SRCS+= sha512c_arm64.c +CFLAGS+= -DARM64_SHA512 +CFLAGS.sha512c_arm64.c+= -march=armv8.2-a+sha3 +.endif # aarch64-sha512 +.endif +.endif # ${USE_ASM_SOURCES} != 0 + +md4hl.c: mdXhl.c + (echo '#define LENGTH 16'; \ + sed -e 's/mdX/md4/g' -e 's/MDX/MD4/g' ${.ALLSRC}) > ${.TARGET} + +md5hl.c: mdXhl.c + (echo '#define LENGTH 16'; \ + sed -e 's/mdX/md5/g' -e 's/MDX/MD5/g' ${.ALLSRC}) > ${.TARGET} + +sha0hl.c: mdXhl.c + (echo '#define LENGTH 20'; \ + sed -e 's/mdX/sha/g' -e 's/MDX/SHA_/g' -e 's/SHA__/SHA_/g' \ + ${.ALLSRC}) > ${.TARGET} + +sha1hl.c: mdXhl.c + (echo '#define LENGTH 20'; \ + sed -e 's/mdX/sha/g' -e 's/MDX/SHA1_/g' -e 's/SHA1__/SHA1_/g' \ + ${.ALLSRC}) > ${.TARGET} + +sha224hl.c: mdXhl.c + (echo '#define LENGTH 28'; \ + sed -e 's/mdX/sha224/g' -e 's/MDX/SHA224_/g' \ + -e 's/SHA224__/SHA224_/g' \ + ${.ALLSRC}) > ${.TARGET} + +sha256hl.c: mdXhl.c + (echo '#define LENGTH 32'; \ + sed -e 's/mdX/sha256/g' -e 's/MDX/SHA256_/g' \ + -e 's/SHA256__/SHA256_/g' \ + ${.ALLSRC}) > ${.TARGET} + +sha384hl.c: mdXhl.c + (echo '#define LENGTH 48'; \ + sed -e 's/mdX/sha384/g' -e 's/MDX/SHA384_/g' \ + -e 's/SHA384__/SHA384_/g' \ + ${.ALLSRC}) > ${.TARGET} + +sha512hl.c: mdXhl.c + (echo '#define LENGTH 64'; \ + sed -e 's/mdX/sha512/g' -e 's/MDX/SHA512_/g' \ + -e 's/SHA512__/SHA512_/g' \ + ${.ALLSRC}) > ${.TARGET} + +sha512t224hl.c: mdXhl.c + (echo '#define LENGTH 28'; \ + sed -e 's/mdX/sha512t/g' -e 's/MDX/SHA512_224_/g' \ + -e 's/SHA512_224__/SHA512_224_/g' \ + -e 's/SHA512_224_CTX/SHA512_CTX/g' \ + ${.ALLSRC}) > ${.TARGET} + +sha512t256hl.c: mdXhl.c + (echo '#define LENGTH 32'; \ + sed -e 's/mdX/sha512t/g' -e 's/MDX/SHA512_256_/g' \ + -e 's/SHA512_256__/SHA512_256_/g' \ + -e 's/SHA512_256_CTX/SHA512_CTX/g' \ + ${.ALLSRC}) > ${.TARGET} + +rmd160hl.c: mdXhl.c + (echo '#define LENGTH 20'; \ + sed -e 's/mdX/ripemd/g' -e 's/MDX/RIPEMD160_/g' \ + -e 's/RIPEMD160__/RIPEMD160_/g' \ + ${.ALLSRC}) > ${.TARGET} + +skein256hl.c: mdXhl.c + (echo '#define LENGTH 32'; \ + sed -e 's/mdX/skein/g' -e 's/MDX/SKEIN256_/g' \ + -e 's/SKEIN256__/SKEIN256_/g' \ + ${.ALLSRC}) > ${.TARGET} + +skein512hl.c: mdXhl.c + (echo '#define LENGTH 64'; \ + sed -e 's/mdX/skein/g' -e 's/MDX/SKEIN512_/g' \ + -e 's/SKEIN512__/SKEIN512_/g' \ + ${.ALLSRC}) > ${.TARGET} + +skein1024hl.c: mdXhl.c + (echo '#define LENGTH 128'; \ + sed -e 's/mdX/skein/g' -e 's/MDX/SKEIN1024_/g' \ + -e 's/SKEIN1024__/SKEIN1024_/g' \ + ${.ALLSRC}) > ${.TARGET} + + +.for i in 2 4 5 +md${i}.3: ${.CURDIR}/mdX.3 + sed -e "s/mdX/md${i}/g" -e "s/MDX/MD${i}/g" ${.ALLSRC} > ${.TARGET} + cat ${.CURDIR}/md${i}.copyright >> ${.TARGET} +.endfor + +HAS_TESTS= +SUBDIR.${MK_TESTS}+= tests + +.include <bsd.lib.mk> diff --git a/lib/libmd/Makefile.depend b/lib/libmd/Makefile.depend new file mode 100644 index 000000000000..6ef78fac5cbf --- /dev/null +++ b/lib/libmd/Makefile.depend @@ -0,0 +1,15 @@ +# Autogenerated - do NOT edit! + +DIRDEPS = \ + include \ + include/xlocale \ + lib/${CSU_DIR} \ + lib/libc \ + lib/libcompiler_rt \ + + +.include <dirdeps.mk> + +.if ${DEP_RELDIR} == ${_DEP_RELDIR} +# local dependencies - needed for -jN in clean tree +.endif diff --git a/lib/libmd/Symbol.map b/lib/libmd/Symbol.map new file mode 100644 index 000000000000..c3ea3a43968f --- /dev/null +++ b/lib/libmd/Symbol.map @@ -0,0 +1,256 @@ +FBSD_1.8 { + MD4Data; + MD4End; + MD4Fd; + MD4FdChunk; + MD4File; + MD4FileChunk; + MD4Final; + MD4Init; + MD4Pad; + MD4Update; + MD5Data; + MD5End; + MD5Fd; + MD5FdChunk; + MD5File; + MD5FileChunk; + MD5Final; + MD5Init; + MD5Update; + RIPEMD160_Data; + RIPEMD160_End; + RIPEMD160_Fd; + RIPEMD160_FdChunk; + RIPEMD160_File; + RIPEMD160_FileChunk; + RIPEMD160_Final; + RIPEMD160_Init; + RIPEMD160_Update; + SHA1_Data; + SHA1_End; + SHA1_Fd; + SHA1_FdChunk; + SHA1_File; + SHA1_FileChunk; + SHA1_Final; + SHA1_Init; + SHA1_Update; + SHA224_Data; + SHA224_End; + SHA224_Fd; + SHA224_FdChunk; + SHA224_File; + SHA224_FileChunk; + SHA224_Final; + SHA224_Init; + SHA224_Update; + SHA256_Data; + SHA256_End; + SHA256_Fd; + SHA256_FdChunk; + SHA256_File; + SHA256_FileChunk; + SHA256_Final; + SHA256_Init; + SHA256_Update; + SHA384_Data; + SHA384_End; + SHA384_Fd; + SHA384_FdChunk; + SHA384_File; + SHA384_FileChunk; + SHA384_Final; + SHA384_Init; + SHA384_Update; + SHA512_224_Data; + SHA512_224_End; + SHA512_224_Fd; + SHA512_224_FdChunk; + SHA512_224_File; + SHA512_224_FileChunk; + SHA512_224_Final; + SHA512_224_Init; + SHA512_224_Update; + SHA512_256_Data; + SHA512_256_End; + SHA512_256_Fd; + SHA512_256_FdChunk; + SHA512_256_File; + SHA512_256_FileChunk; + SHA512_256_Final; + SHA512_256_Init; + SHA512_256_Update; + SHA512_Data; + SHA512_End; + SHA512_Fd; + SHA512_FdChunk; + SHA512_File; + SHA512_FileChunk; + SHA512_Final; + SHA512_Init; + SHA512_Update; + SHA_Data; + SHA_End; + SHA_Fd; + SHA_FdChunk; + SHA_File; + SHA_FileChunk; + SKEIN1024_Data; + SKEIN1024_End; + SKEIN1024_Fd; + SKEIN1024_FdChunk; + SKEIN1024_File; + SKEIN1024_FileChunk; + SKEIN1024_Final; + SKEIN1024_Init; + SKEIN1024_Update; + SKEIN256_Data; + SKEIN256_End; + SKEIN256_Fd; + SKEIN256_FdChunk; + SKEIN256_File; + SKEIN256_FileChunk; + SKEIN256_Final; + SKEIN256_Init; + SKEIN256_Update; + SKEIN512_Data; + SKEIN512_End; + SKEIN512_Fd; + SKEIN512_FdChunk; + SKEIN512_File; + SKEIN512_FileChunk; + SKEIN512_Final; + SKEIN512_Init; + SKEIN512_Update; +}; + +FBSDprivate_1.0 { + _libmd_MD4Data; + _libmd_MD4End; + _libmd_MD4Fd; + _libmd_MD4FdChunk; + _libmd_MD4File; + _libmd_MD4FileChunk; + _libmd_MD4Final; + _libmd_MD4Init; + _libmd_MD4Pad; + _libmd_MD4Update; + _libmd_MD5Data; + _libmd_MD5End; + _libmd_MD5Fd; + _libmd_MD5FdChunk; + _libmd_MD5File; + _libmd_MD5FileChunk; + _libmd_MD5Final; + _libmd_MD5Init; + _libmd_MD5Update; + _libmd_RIPEMD160_Data; + _libmd_RIPEMD160_End; + _libmd_RIPEMD160_Fd; + _libmd_RIPEMD160_FdChunk; + _libmd_RIPEMD160_File; + _libmd_RIPEMD160_FileChunk; + _libmd_RIPEMD160_Final; + _libmd_RIPEMD160_Init; + _libmd_RIPEMD160_Update; + _libmd_SHA1_Data; + _libmd_SHA1_End; + _libmd_SHA1_Fd; + _libmd_SHA1_FdChunk; + _libmd_SHA1_File; + _libmd_SHA1_FileChunk; + _libmd_SHA1_Final; + _libmd_SHA1_Init; + _libmd_SHA1_Update; + _libmd_SHA224_Data; + _libmd_SHA224_End; + _libmd_SHA224_Fd; + _libmd_SHA224_FdChunk; + _libmd_SHA224_File; + _libmd_SHA224_FileChunk; + _libmd_SHA224_Final; + _libmd_SHA224_Init; + _libmd_SHA224_Update; + _libmd_SHA256_Data; + _libmd_SHA256_End; + _libmd_SHA256_Fd; + _libmd_SHA256_FdChunk; + _libmd_SHA256_File; + _libmd_SHA256_FileChunk; + _libmd_SHA256_Final; + _libmd_SHA256_Init; + _libmd_SHA256_Update; + _libmd_SHA384_Data; + _libmd_SHA384_End; + _libmd_SHA384_Fd; + _libmd_SHA384_FdChunk; + _libmd_SHA384_File; + _libmd_SHA384_FileChunk; + _libmd_SHA384_Final; + _libmd_SHA384_Init; + _libmd_SHA384_Update; + _libmd_SHA512_224_Data; + _libmd_SHA512_224_End; + _libmd_SHA512_224_Fd; + _libmd_SHA512_224_FdChunk; + _libmd_SHA512_224_File; + _libmd_SHA512_224_FileChunk; + _libmd_SHA512_224_Final; + _libmd_SHA512_224_Init; + _libmd_SHA512_224_Update; + _libmd_SHA512_256_Data; + _libmd_SHA512_256_End; + _libmd_SHA512_256_Fd; + _libmd_SHA512_256_FdChunk; + _libmd_SHA512_256_File; + _libmd_SHA512_256_FileChunk; + _libmd_SHA512_256_Final; + _libmd_SHA512_256_Init; + _libmd_SHA512_256_Update; + _libmd_SHA512_Data; + _libmd_SHA512_End; + _libmd_SHA512_Fd; + _libmd_SHA512_FdChunk; + _libmd_SHA512_File; + _libmd_SHA512_FileChunk; + _libmd_SHA512_Final; + _libmd_SHA512_Init; + _libmd_SHA512_Update; + _libmd_SHA_Data; + _libmd_SHA_End; + _libmd_SHA_Fd; + _libmd_SHA_FdChunk; + _libmd_SHA_File; + _libmd_SHA_FileChunk; + _libmd_SHA_Final; + _libmd_SHA_Init; + _libmd_SHA_Update; + _libmd_SKEIN1024_Data; + _libmd_SKEIN1024_End; + _libmd_SKEIN1024_Fd; + _libmd_SKEIN1024_FdChunk; + _libmd_SKEIN1024_File; + _libmd_SKEIN1024_FileChunk; + _libmd_SKEIN1024_Final; + _libmd_SKEIN1024_Init; + _libmd_SKEIN1024_Update; + _libmd_SKEIN256_Data; + _libmd_SKEIN256_End; + _libmd_SKEIN256_Fd; + _libmd_SKEIN256_FdChunk; + _libmd_SKEIN256_File; + _libmd_SKEIN256_FileChunk; + _libmd_SKEIN256_Final; + _libmd_SKEIN256_Init; + _libmd_SKEIN256_Update; + _libmd_SKEIN512_Data; + _libmd_SKEIN512_End; + _libmd_SKEIN512_Fd; + _libmd_SKEIN512_FdChunk; + _libmd_SKEIN512_File; + _libmd_SKEIN512_FileChunk; + _libmd_SKEIN512_Final; + _libmd_SKEIN512_Init; + _libmd_SKEIN512_Update; +}; diff --git a/lib/libmd/aarch64/sha1block.S b/lib/libmd/aarch64/sha1block.S new file mode 100644 index 000000000000..56a0297efadd --- /dev/null +++ b/lib/libmd/aarch64/sha1block.S @@ -0,0 +1,490 @@ +/*- + * Copyright (c) 2024 Robert Clausecker <fuz@freebsd.org> + * + * SPDX-License-Identifier: BSD-2-Clause + * + * sha1block_sha1 implementation based on sha1-arm.c, + * written and placed in public domain by Jeffrey Walton + * based on code from ARM, and by Johannes Schneiders, Skip + * Hovsmith and Barry O'Rourke for the mbedTLS project. + */ + +#include <machine/asm.h> + +/* + * Scalar SHA1 implementation. + * + * Due to the ample register file available on AArch64, the w array is + * kept entirely in registers. The saved a-e variables are instead kept + * in memory as we don't have that much memory. + */ + + // sha1block(SHA1_CTX, buf, len) +ENTRY(_libmd_sha1block_scalar) +ctx .req x0 +buf .req x1 +len .req x2 +w .req sp +a .req w3 +b .req w4 +c .req w5 +d .req w6 +e .req w7 +k .req w8 +f .req w9 +tmp .req w10 +w_0 .req w11 +w_1 .req w12 +w_2 .req w13 +w_3 .req w14 +w_4 .req w15 +w_5 .req w16 +w_6 .req w17 +// w18 is the platform register +w_7 .req w19 +w_8 .req w20 +w_9 .req w21 +w_10 .req w22 +w_11 .req w23 +w_12 .req w24 +w_13 .req w25 +w_14 .req w26 +w_15 .req w27 + +.macro shuffle w_i, w_i3, w_i8, w_i14 + eor \w_i, \w_i, \w_i3 + eor tmp, \w_i8, \w_i14 + eor \w_i, \w_i, tmp // w[i-16] ^ w[i-14] ^ w[i-8] ^ w[i-3] + ror \w_i, \w_i, #31 // w[i] = ... ror #31 +.endm + +.macro func1 a, b, c, d, e + and f, \c, \b + bic tmp, \d, \b + orr f, f, tmp +.endm + +.macro func2 a, b, c, d, e + eor f, \b, \c + eor f, f, \d +.endm + +.macro func3 a, b, c, d, e + eor tmp, \b, \c + and f, \b, \c + and tmp, tmp, \d + orr f, f, tmp +.endm + +.macro func4 a, b, c, d, e + func2 \a, \b, \c, \d, \e +.endm + +.macro mix a, b, c, d, e, w_i + ror \b, \b, #2 + ror tmp, \a, #27 + add \e, \e, \w_i + add tmp, tmp, k + add \e, \e, f + add \e, \e, tmp // (a ror 27) + e + f + k + w[i] +.endm + +.macro round1 a, b, c, d, e, w_i + func1 \a, \b, \c, \d, \e + rev \w_i, \w_i + mix \a, \b, \c, \d, \e, \w_i +.endm + +.macro round func, a, b, c, d, e, w_i, w_i3, w_i8, w_i14 + shuffle \w_i, \w_i3, \w_i8, \w_i14 + \func \a, \b, \c, \d, \e + mix \a, \b, \c, \d, \e, \w_i +.endm + +.macro round1x a, b, c, d, e, w_i, w_i3, w_i8, w_i14 + round func1, \a, \b, \c, \d, \e, \w_i, \w_i3, \w_i8, \w_i14 +.endm + +.macro round2 a, b, c, d, e, w_i, w_i3, w_i8, w_i14 + round func2, \a, \b, \c, \d, \e, \w_i, \w_i3, \w_i8, \w_i14 +.endm + +.macro round3 a, b, c, d, e, w_i, w_i3, w_i8, w_i14 + round func3, \a, \b, \c, \d, \e, \w_i, \w_i3, \w_i8, \w_i14 +.endm + +.macro round4 a, b, c, d, e, w_i, w_i3, w_i8, w_i14 + round func4, \a, \b, \c, \d, \e, \w_i, \w_i3, \w_i8, \w_i14 +.endm + + ands len, len, #~63 // take length in multiples of block length + beq 1f // bail out if input empty + + sub sp, sp, #24+9*8 // allocate stack space + str x19, [sp, #24+0*8] + stp x20, x21, [sp, #24+1*8] + stp x22, x23, [sp, #24+3*8] + stp x24, x25, [sp, #24+5*8] + stp x26, x27, [sp, #24+7*8] + + ldp a, b, [ctx, #0] // load SHA1 state from context + ldp c, d, [ctx, #8] + ldr e, [ctx, #16] + +0: stp a, b, [sp, #0] // save old SHA1 state + stp c, d, [sp, #8] + str e, [sp, #16] + + movz k, #0x7999 // round constant 1 + movk k, #0x5a82, lsl #16 + + ldp w_0, w_1, [buf, #0*4] + round1 a, b, c, d, e, w_0 + round1 e, a, b, c, d, w_1 + + ldp w_2, w_3, [buf, #2*4] + round1 d, e, a, b, c, w_2 + round1 c, d, e, a, b, w_3 + + ldp w_4, w_5, [buf, #4*4] + round1 b, c, d, e, a, w_4 + round1 a, b, c, d, e, w_5 + + ldp w_6, w_7, [buf, #6*4] + round1 e, a, b, c, d, w_6 + round1 d, e, a, b, c, w_7 + + ldp w_8, w_9, [buf, #8*4] + round1 c, d, e, a, b, w_8 + round1 b, c, d, e, a, w_9 + + ldp w_10, w_11, [buf, #10*4] + round1 a, b, c, d, e, w_10 + round1 e, a, b, c, d, w_11 + + ldp w_12, w_13, [buf, #12*4] + round1 d, e, a, b, c, w_12 + round1 c, d, e, a, b, w_13 + + ldp w_14, w_15, [buf, #14*4] + round1 b, c, d, e, a, w_14 + round1 a, b, c, d, e, w_15 + + round1x e, a, b, c, d, w_0, w_13, w_8, w_2 + round1x d, e, a, b, c, w_1, w_14, w_9, w_3 + round1x c, d, e, a, b, w_2, w_15, w_10, w_4 + round1x b, c, d, e, a, w_3, w_0, w_11, w_5 + + movz k, #0xeba1 // round constant 2 + movk k, #0x6ed9, lsl #16 + + round2 a, b, c, d, e, w_4, w_1, w_12, w_6 + round2 e, a, b, c, d, w_5, w_2, w_13, w_7 + round2 d, e, a, b, c, w_6, w_3, w_14, w_8 + round2 c, d, e, a, b, w_7, w_4, w_15, w_9 + round2 b, c, d, e, a, w_8, w_5, w_0, w_10 + + round2 a, b, c, d, e, w_9, w_6, w_1, w_11 + round2 e, a, b, c, d, w_10, w_7, w_2, w_12 + round2 d, e, a, b, c, w_11, w_8, w_3, w_13 + round2 c, d, e, a, b, w_12, w_9, w_4, w_14 + round2 b, c, d, e, a, w_13, w_10, w_5, w_15 + + round2 a, b, c, d, e, w_14, w_11, w_6, w_0 + round2 e, a, b, c, d, w_15, w_12, w_7, w_1 + round2 d, e, a, b, c, w_0, w_13, w_8, w_2 + round2 c, d, e, a, b, w_1, w_14, w_9, w_3 + round2 b, c, d, e, a, w_2, w_15, w_10, w_4 + + round2 a, b, c, d, e, w_3, w_0, w_11, w_5 + round2 e, a, b, c, d, w_4, w_1, w_12, w_6 + round2 d, e, a, b, c, w_5, w_2, w_13, w_7 + round2 c, d, e, a, b, w_6, w_3, w_14, w_8 + round2 b, c, d, e, a, w_7, w_4, w_15, w_9 + + movz k, #0xbcdc // round constant 3 + movk k, #0x8f1b, lsl #16 + + round3 a, b, c, d, e, w_8, w_5, w_0, w_10 + round3 e, a, b, c, d, w_9, w_6, w_1, w_11 + round3 d, e, a, b, c, w_10, w_7, w_2, w_12 + round3 c, d, e, a, b, w_11, w_8, w_3, w_13 + round3 b, c, d, e, a, w_12, w_9, w_4, w_14 + + round3 a, b, c, d, e, w_13, w_10, w_5, w_15 + round3 e, a, b, c, d, w_14, w_11, w_6, w_0 + round3 d, e, a, b, c, w_15, w_12, w_7, w_1 + round3 c, d, e, a, b, w_0, w_13, w_8, w_2 + round3 b, c, d, e, a, w_1, w_14, w_9, w_3 + + round3 a, b, c, d, e, w_2, w_15, w_10, w_4 + round3 e, a, b, c, d, w_3, w_0, w_11, w_5 + round3 d, e, a, b, c, w_4, w_1, w_12, w_6 + round3 c, d, e, a, b, w_5, w_2, w_13, w_7 + round3 b, c, d, e, a, w_6, w_3, w_14, w_8 + + round3 a, b, c, d, e, w_7, w_4, w_15, w_9 + round3 e, a, b, c, d, w_8, w_5, w_0, w_10 + round3 d, e, a, b, c, w_9, w_6, w_1, w_11 + round3 c, d, e, a, b, w_10, w_7, w_2, w_12 + round3 b, c, d, e, a, w_11, w_8, w_3, w_13 + + movz k, #0xc1d6 // round constant 4 + movk k, #0xca62, lsl #16 + + round4 a, b, c, d, e, w_12, w_9, w_4, w_14 + round4 e, a, b, c, d, w_13, w_10, w_5, w_15 + round4 d, e, a, b, c, w_14, w_11, w_6, w_0 + round4 c, d, e, a, b, w_15, w_12, w_7, w_1 + round4 b, c, d, e, a, w_0, w_13, w_8, w_2 + + round4 a, b, c, d, e, w_1, w_14, w_9, w_3 + round4 e, a, b, c, d, w_2, w_15, w_10, w_4 + round4 d, e, a, b, c, w_3, w_0, w_11, w_5 + round4 c, d, e, a, b, w_4, w_1, w_12, w_6 + round4 b, c, d, e, a, w_5, w_2, w_13, w_7 + + round4 a, b, c, d, e, w_6, w_3, w_14, w_8 + round4 e, a, b, c, d, w_7, w_4, w_15, w_9 + round4 d, e, a, b, c, w_8, w_5, w_0, w_10 + round4 c, d, e, a, b, w_9, w_6, w_1, w_11 + round4 b, c, d, e, a, w_10, w_7, w_2, w_12 + + round4 a, b, c, d, e, w_11, w_8, w_3, w_13 + round4 e, a, b, c, d, w_12, w_9, w_4, w_14 + round4 d, e, a, b, c, w_13, w_10, w_5, w_15 + round4 c, d, e, a, b, w_14, w_11, w_6, w_0 + round4 b, c, d, e, a, w_15, w_12, w_7, w_1 + + ldp w_0, w_1, [sp, #0] // reload saved SHA1 state + ldp w_2, w_3, [sp, #8] + ldr w_4, [sp, #16] + + add a, a, w_0 + add b, b, w_1 + add c, c, w_2 + add d, d, w_3 + add e, e, w_4 + + add buf, buf, #64 + subs len, len, #64 + bhi 0b + + stp a, b, [ctx, #0] // write updated SHA1 state + stp c, d, [ctx, #8] + str e, [ctx, #16] + + ldr x19, [sp, #24+0*8] + ldp x20, x21, [sp, #24+1*8] + ldp x22, x23, [sp, #24+3*8] + ldp x24, x25, [sp, #24+5*8] + ldp x26, x27, [sp, #24+7*8] + add sp, sp, #24+9*8 + +1: ret +END(_libmd_sha1block_scalar) + +/* + * SHA1 implementation using the SHA1 instruction set extension. + */ + + .arch_extension sha2 + + // sha1block(SHA1_CTX, buf, len) +ENTRY(_libmd_sha1block_sha1) + /* ctx, buf, len: same as for sha1block_scalar */ +kaddr .req x3 +abcd .req v0 +abcd_q .req q0 // alias for use with scalar instructions +abcd_s .req s0 +e0 .req s1 +e0_v .req v1 +e1 .req s2 +abcd_saved .req v3 +e0_saved .req v4 +tmp0 .req v5 +tmp1 .req v6 +msg0 .req v16 +msg1 .req v17 +msg2 .req v18 +msg3 .req v19 +k0 .req v20 +k1 .req v21 +k2 .req v22 +k3 .req v23 + + ands len, len, #~63 // take length in multiples of block length + beq 1f // bail out if input empty + + ldr abcd_q, [ctx, #0] + ldr e0, [ctx, #16] + + adrp kaddr, k1234 + add kaddr, kaddr, #:lo12:k1234 + ld4r {k0.4s, k1.4s, k2.4s, k3.4s}, [kaddr] + +0: mov abcd_saved.16b, abcd.16b + mov e0_saved.16b, e0_v.16b + + ld1 {msg0.4s, msg1.4s, msg2.4s, msg3.4s}, [buf], #64 + rev32 msg0.16b, msg0.16b + rev32 msg1.16b, msg1.16b + rev32 msg2.16b, msg2.16b + rev32 msg3.16b, msg3.16b + + add tmp0.4s, msg0.4s, k0.4s + add tmp1.4s, msg1.4s, k0.4s + + /* rounds 0--3 */ + sha1h e1, abcd_s + sha1c abcd_q, e0, tmp0.4s + add tmp0.4s, msg2.4s, k0.4s + sha1su0 msg0.4s, msg1.4s, msg2.4s + + /* rounds 4--7 */ + sha1h e0, abcd_s + sha1c abcd_q, e1, tmp1.4s + add tmp1.4s, msg3.4s, k0.4s + sha1su1 msg0.4s, msg3.4s + sha1su0 msg1.4s, msg2.4s, msg3.4s + + /* rounds 8--11 */ + sha1h e1, abcd_s + sha1c abcd_q, e0, tmp0.4s + add tmp0.4s, msg0.4s, k0.4s + sha1su1 msg1.4s, msg0.4s + sha1su0 msg2.4s, msg3.4s, msg0.4s + + /* rounds 12--15 */ + sha1h e0, abcd_s + sha1c abcd_q, e1, tmp1.4s + add tmp1.4s, msg1.4s, k1.4s + sha1su1 msg2.4s, msg1.4s + sha1su0 msg3.4s, msg0.4s, msg1.4s + + /* rounds 16--19 */ + sha1h e1, abcd_s + sha1c abcd_q, e0, tmp0.4s + add tmp0.4s, msg2.4s, k1.4s + sha1su1 msg3.4s, msg2.4s + sha1su0 msg0.4s, msg1.4s, msg2.4s + + /* rounds 20--23 */ + sha1h e0, abcd_s + sha1p abcd_q, e1, tmp1.4s + add tmp1.4s, msg3.4s, k1.4s + sha1su1 msg0.4s, msg3.4s + sha1su0 msg1.4s, msg2.4s, msg3.4s + + /* rounds 24--27 */ + sha1h e1, abcd_s + sha1p abcd_q, e0, tmp0.4s + add tmp0.4s, msg0.4s, k1.4s + sha1su1 msg1.4s, msg0.4s + sha1su0 msg2.4s, msg3.4s, msg0.4s + + /* rounds 28--31 */ + sha1h e0, abcd_s + sha1p abcd_q, e1, tmp1.4s + add tmp1.4s, msg1.4s, k1.4s + sha1su1 msg2.4s, msg1.4s + sha1su0 msg3.4s, msg0.4s, msg1.4s + + /* rounds 32--35 */ + sha1h e1, abcd_s + sha1p abcd_q, e0, tmp0.4s + add tmp0.4s, msg2.4s, k2.4s + sha1su1 msg3.4s, msg2.4s + sha1su0 msg0.4s, msg1.4s, msg2.4s + + /* rounds 36--39 */ + sha1h e0, abcd_s + sha1p abcd_q, e1, tmp1.4s + add tmp1.4s, msg3.4s, k2.4s + sha1su1 msg0.4s, msg3.4s + sha1su0 msg1.4s, msg2.4s, msg3.4s + + /* rounds 40--43 */ + sha1h e1, abcd_s + sha1m abcd_q, e0, tmp0.4s + add tmp0.4s, msg0.4s, k2.4s + sha1su1 msg1.4s, msg0.4s + sha1su0 msg2.4s, msg3.4s, msg0.4s + + /* rounds 44--47 */ + sha1h e0, abcd_s + sha1m abcd_q, e1, tmp1.4s + add tmp1.4s, msg1.4s, k2.4s + sha1su1 msg2.4s, msg1.4s + sha1su0 msg3.4s, msg0.4s, msg1.4s + + /* rounds 48--51 */ + sha1h e1, abcd_s + sha1m abcd_q, e0, tmp0.4s + add tmp0.4s, msg2.4s, k2.4s + sha1su1 msg3.4s, msg2.4s + sha1su0 msg0.4s, msg1.4s, msg2.4s + + /* rounds 52--55 */ + sha1h e0, abcd_s + sha1m abcd_q, e1, tmp1.4s + add tmp1.4s, msg3.4s, k3.4s + sha1su1 msg0.4s, msg3.4s + sha1su0 msg1.4s, msg2.4s, msg3.4s + + /* rounds 56--59 */ + sha1h e1, abcd_s + sha1m abcd_q, e0, tmp0.4s + add tmp0.4s, msg0.4s, k3.4s + sha1su1 msg1.4s, msg0.4s + sha1su0 msg2.4s, msg3.4s, msg0.4s + + /* rounds 60--63 */ + sha1h e0, abcd_s + sha1p abcd_q, e1, tmp1.4s + add tmp1.4s, msg1.4s, k3.4s + sha1su1 msg2.4s, msg1.4s + sha1su0 msg3.4s, msg0.4s, msg1.4s + + /* rounds 64--67 */ + sha1h e1, abcd_s + sha1p abcd_q, e0, tmp0.4s + add tmp0.4s, msg2.4s, k3.4s + sha1su1 msg3.4s, msg2.4s + sha1su0 msg0.4s, msg1.4s, msg2.4s + + /* rounds 68--71 */ + sha1h e0, abcd_s + sha1p abcd_q, e1, tmp1.4s + add tmp1.4s, msg3.4s, k3.4s + sha1su1 msg0.4s, msg3.4s + + /* rounds 72--75 */ + sha1h e1, abcd_s + sha1p abcd_q, e0, tmp0.4s + + /* rounds 76--79 */ + sha1h e0, abcd_s + sha1p abcd_q, e1, tmp1.4s + + add e0_v.4s, e0_v.4s, e0_saved.4s + add abcd.4s, abcd.4s, abcd_saved.4s + + subs len, len, #64 + bhi 0b + + str abcd_q, [ctx, #0] + str e0, [ctx, #16] + +1: ret +END(_libmd_sha1block_sha1) + + .section .rodata + .balign 16 +k1234: .4byte 0x5a827999 + .4byte 0x6ed9eba1 + .4byte 0x8f1bbcdc + .4byte 0xca62c1d6 + .size k1234, .-k1234 + + .section .note.GNU-stack,"",%progbits diff --git a/lib/libmd/aarch64/sha1dispatch.c b/lib/libmd/aarch64/sha1dispatch.c new file mode 100644 index 000000000000..e34bf0a1a344 --- /dev/null +++ b/lib/libmd/aarch64/sha1dispatch.c @@ -0,0 +1,24 @@ +/*- + * Copyright (c) 2024 Robert Clausecker <fuz@freebsd.org> + * + * SPDX-License-Identifier: BSD-2-Clause + */ + +#include <machine/ifunc.h> +#include <sha.h> +#include <sys/auxv.h> + +extern void _libmd_sha1block_scalar(SHA1_CTX *, const void *, size_t); +extern void _libmd_sha1block_sha1(SHA1_CTX *, const void *, size_t); + +DEFINE_IFUNC(, void, sha1_block, (SHA1_CTX *, const void *, size_t)) +{ + unsigned long hwcap = 0; + + elf_aux_info(AT_HWCAP, &hwcap, sizeof(hwcap)); + + if (hwcap & HWCAP_SHA1) + return (_libmd_sha1block_sha1); + else + return (_libmd_sha1block_scalar); +} diff --git a/lib/libmd/amd64/sha1block.S b/lib/libmd/amd64/sha1block.S new file mode 100644 index 000000000000..f1291ef2647a --- /dev/null +++ b/lib/libmd/amd64/sha1block.S @@ -0,0 +1,1851 @@ +/*- + * Copyright (c) 2013 The Go Authors. All rights reserved. + * Copyright (c) 2024 Robert Clausecker <fuz@freebsd.org> + * + * Adapted from Go's crypto/sha1/sha1block_amd64.s. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following disclaimer + * in the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Google Inc. nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include <machine/asm.h> + +/* + * SHA-1 block routine. See sha1c.c for C equivalent. + * + * There are 80 rounds of 4 types: + * - rounds 0-15 are type 1 and load data (round1 macro). + * - rounds 16-19 are type 1 and do not load data (round1x macro). + * - rounds 20-39 are type 2 and do not load data (round2 macro). + * - rounds 40-59 are type 3 and do not load data (round3 macro). + * - rounds 60-79 are type 4 and do not load data (round4 macro). + * + * Each round loads or shuffles the data, then computes a per-round + * function of b, c, d, and then mixes the result into and rotates the + * five registers a, b, c, d, e holding the intermediate results. + * + * The register rotation is implemented by rotating the arguments to + * the round macros instead of by explicit move instructions. + */ +.macro load index + mov (\index)*4(%rsi), %r10d + bswap %r10d + mov %r10d, (\index)*4(%rsp) +.endm + +.macro shuffle index + mov ((\index )&0xf)*4(%rsp), %r10d + xor ((\index- 3)&0xf)*4(%rsp), %r10d + xor ((\index- 8)&0xf)*4(%rsp), %r10d + xor ((\index-14)&0xf)*4(%rsp), %r10d + rol $1, %r10d + mov %r10d, ((\index)&0xf)*4(%rsp) +.endm + +.macro func1 a, b, c, d, e + mov \d, %r9d + xor \c, %r9d + and \b, %r9d + xor \d, %r9d +.endm + +.macro func2 a, b, c, d, e + mov \b, %r9d + xor \c, %r9d + xor \d, %r9d +.endm + +.macro func3 a, b, c, d, e + mov \b, %r8d + or \c, %r8d + and \d, %r8d + mov \b, %r9d + and \c, %r9d + or %r8d, %r9d +.endm + +.macro func4 a, b, c, d, e + func2 \a, \b, \c, \d, \e +.endm + +.macro mix a, b, c, d, e, const + rol $30, \b + add %r9d, \e + mov \a, %r8d + rol $5, %r8d + lea \const(\e, %r10d, 1), \e + add %r8d, \e +.endm + +.macro round1 a, b, c, d, e, index + load \index + func1 \a, \b, \c, \d, \e + mix \a, \b, \c, \d, \e, 0x5a827999 +.endm + +.macro round1x a, b, c, d, e, index + shuffle \index + func1 \a, \b, \c, \d, \e + mix \a, \b, \c, \d, \e, 0x5a827999 +.endm + +.macro round2 a, b, c, d, e, index + shuffle \index + func2 \a, \b, \c, \d, \e + mix \a, \b, \c, \d, \e, 0x6ed9eba1 +.endm + +.macro round3 a, b, c, d, e, index + shuffle \index + func3 \a, \b, \c, \d, \e + mix \a, \b, \c, \d, \e, 0x8f1bbcdc +.endm + +.macro round4 a, b, c, d, e, index + shuffle \index + func4 \a, \b, \c, \d, \e + mix \a, \b, \c, \d, \e, 0xca62c1d6 +.endm + + // sha1block(SHA1_CTX, buf, len) +ENTRY(_libmd_sha1block_scalar) + push %rbp + push %rbx + push %r12 + push %r13 + push %r14 + push %r15 + push %rdi // rdi: SHA1_CTX + sub $64+8, %rsp // 64 bytes for round keys + // plus alignment + + mov %rdi, %rbp + // rsi: buf + and $~63, %rdx // rdx: length in blocks + lea (%rsi, %rdx, 1), %rdi // rdi: end pointer + mov (%rbp), %eax // c->h0 + mov 4(%rbp), %ebx // c->h1 + mov 8(%rbp), %ecx // c->h2 + mov 12(%rbp), %edx // c->h3 + mov 16(%rbp), %ebp // c->h4 + + cmp %rsi, %rdi // any data to process? + je .Lend + +.Lloop: mov %eax, %r11d + mov %ebx, %r12d + mov %ecx, %r13d + mov %edx, %r14d + mov %ebp, %r15d + + round1 %eax, %ebx, %ecx, %edx, %ebp, 0 + round1 %ebp, %eax, %ebx, %ecx, %edx, 1 + round1 %edx, %ebp, %eax, %ebx, %ecx, 2 + round1 %ecx, %edx, %ebp, %eax, %ebx, 3 + round1 %ebx, %ecx, %edx, %ebp, %eax, 4 + + round1 %eax, %ebx, %ecx, %edx, %ebp, 5 + round1 %ebp, %eax, %ebx, %ecx, %edx, 6 + round1 %edx, %ebp, %eax, %ebx, %ecx, 7 + round1 %ecx, %edx, %ebp, %eax, %ebx, 8 + round1 %ebx, %ecx, %edx, %ebp, %eax, 9 + + round1 %eax, %ebx, %ecx, %edx, %ebp, 10 + round1 %ebp, %eax, %ebx, %ecx, %edx, 11 + round1 %edx, %ebp, %eax, %ebx, %ecx, 12 + round1 %ecx, %edx, %ebp, %eax, %ebx, 13 + round1 %ebx, %ecx, %edx, %ebp, %eax, 14 + + round1 %eax, %ebx, %ecx, %edx, %ebp, 15 + round1x %ebp, %eax, %ebx, %ecx, %edx, 16 + round1x %edx, %ebp, %eax, %ebx, %ecx, 17 + round1x %ecx, %edx, %ebp, %eax, %ebx, 18 + round1x %ebx, %ecx, %edx, %ebp, %eax, 19 + + round2 %eax, %ebx, %ecx, %edx, %ebp, 20 + round2 %ebp, %eax, %ebx, %ecx, %edx, 21 + round2 %edx, %ebp, %eax, %ebx, %ecx, 22 + round2 %ecx, %edx, %ebp, %eax, %ebx, 23 + round2 %ebx, %ecx, %edx, %ebp, %eax, 24 + + round2 %eax, %ebx, %ecx, %edx, %ebp, 25 + round2 %ebp, %eax, %ebx, %ecx, %edx, 26 + round2 %edx, %ebp, %eax, %ebx, %ecx, 27 + round2 %ecx, %edx, %ebp, %eax, %ebx, 28 + round2 %ebx, %ecx, %edx, %ebp, %eax, 29 + + round2 %eax, %ebx, %ecx, %edx, %ebp, 30 + round2 %ebp, %eax, %ebx, %ecx, %edx, 31 + round2 %edx, %ebp, %eax, %ebx, %ecx, 32 + round2 %ecx, %edx, %ebp, %eax, %ebx, 33 + round2 %ebx, %ecx, %edx, %ebp, %eax, 34 + + round2 %eax, %ebx, %ecx, %edx, %ebp, 35 + round2 %ebp, %eax, %ebx, %ecx, %edx, 36 + round2 %edx, %ebp, %eax, %ebx, %ecx, 37 + round2 %ecx, %edx, %ebp, %eax, %ebx, 38 + round2 %ebx, %ecx, %edx, %ebp, %eax, 39 + + round3 %eax, %ebx, %ecx, %edx, %ebp, 40 + round3 %ebp, %eax, %ebx, %ecx, %edx, 41 + round3 %edx, %ebp, %eax, %ebx, %ecx, 42 + round3 %ecx, %edx, %ebp, %eax, %ebx, 43 + round3 %ebx, %ecx, %edx, %ebp, %eax, 44 + + round3 %eax, %ebx, %ecx, %edx, %ebp, 45 + round3 %ebp, %eax, %ebx, %ecx, %edx, 46 + round3 %edx, %ebp, %eax, %ebx, %ecx, 47 + round3 %ecx, %edx, %ebp, %eax, %ebx, 48 + round3 %ebx, %ecx, %edx, %ebp, %eax, 49 + + round3 %eax, %ebx, %ecx, %edx, %ebp, 50 + round3 %ebp, %eax, %ebx, %ecx, %edx, 51 + round3 %edx, %ebp, %eax, %ebx, %ecx, 52 + round3 %ecx, %edx, %ebp, %eax, %ebx, 53 + round3 %ebx, %ecx, %edx, %ebp, %eax, 54 + + round3 %eax, %ebx, %ecx, %edx, %ebp, 55 + round3 %ebp, %eax, %ebx, %ecx, %edx, 56 + round3 %edx, %ebp, %eax, %ebx, %ecx, 57 + round3 %ecx, %edx, %ebp, %eax, %ebx, 58 + round3 %ebx, %ecx, %edx, %ebp, %eax, 59 + + round4 %eax, %ebx, %ecx, %edx, %ebp, 60 + round4 %ebp, %eax, %ebx, %ecx, %edx, 61 + round4 %edx, %ebp, %eax, %ebx, %ecx, 62 + round4 %ecx, %edx, %ebp, %eax, %ebx, 63 + round4 %ebx, %ecx, %edx, %ebp, %eax, 64 + + round4 %eax, %ebx, %ecx, %edx, %ebp, 65 + round4 %ebp, %eax, %ebx, %ecx, %edx, 66 + round4 %edx, %ebp, %eax, %ebx, %ecx, 67 + round4 %ecx, %edx, %ebp, %eax, %ebx, 68 + round4 %ebx, %ecx, %edx, %ebp, %eax, 69 + + round4 %eax, %ebx, %ecx, %edx, %ebp, 70 + round4 %ebp, %eax, %ebx, %ecx, %edx, 71 + round4 %edx, %ebp, %eax, %ebx, %ecx, 72 + round4 %ecx, %edx, %ebp, %eax, %ebx, 73 + round4 %ebx, %ecx, %edx, %ebp, %eax, 74 + + round4 %eax, %ebx, %ecx, %edx, %ebp, 75 + round4 %ebp, %eax, %ebx, %ecx, %edx, 76 + round4 %edx, %ebp, %eax, %ebx, %ecx, 77 + round4 %ecx, %edx, %ebp, %eax, %ebx, 78 + round4 %ebx, %ecx, %edx, %ebp, %eax, 79 + + add %r11d, %eax + add %r12d, %ebx + add %r13d, %ecx + add %r14d, %edx + add %r15d, %ebp + + add $64, %rsi + cmp %rdi, %rsi + jb .Lloop + +.Lend: add $64+8, %rsp + pop %rdi // SHA1_CTX + mov %eax, (%rdi) + mov %ebx, 4(%rdi) + mov %ecx, 8(%rdi) + mov %edx, 12(%rdi) + mov %ebp, 16(%rdi) + + pop %r15 + pop %r14 + pop %r13 + pop %r12 + pop %rbx + pop %rbp + ret +END(_libmd_sha1block_scalar) + +/* + * This is the implementation using AVX2, BMI1 and BMI2. It is based on: + * "SHA-1 implementation with Intel(R) AVX2 instruction set extensions" + * From http://software.intel.com/en-us/articles + * (look for improving-the-performance-of-the-secure-hash-algorithm-1) + * This implementation is 2x unrolled, and interleaves vector instructions, + * used to precompute W, with scalar computation of current round + * for optimal scheduling. + */ + + /* trivial helper macros */ +.macro update_hash a, tb, c, d, e + add (%r9), \a + mov \a, (%r9) + add 4(%r9), \tb + mov \tb, 4(%r9) + add 8(%r9), \c + mov \c, 8(%r9) + add 12(%r9), \d + mov \d, 12(%r9) + add 16(%r9), \e + mov \e, 16(%r9) +.endm + + /* help macros for recalc, which does precomputations */ +.macro precalc0 offset + vmovdqu \offset(%r10), %xmm0 +.endm + +.macro precalc1 offset + vinserti128 $1, \offset(%r13), %ymm0, %ymm0 +.endm + +.macro precalc2 yreg + vpshufb %ymm10, %ymm0, \yreg +.endm + +.macro precalc4 yreg, k_offset + vpaddd \k_offset(%r8), \yreg, %ymm0 +.endm + +.macro precalc7 offset + vmovdqu %ymm0, (\offset)*2(%r14) +.endm + +/* + * Message scheduling pre-compute for rounds 0-15 + * r13 is a pointer to the even 64-byte block + * r10 is a pointer to the odd 64-byte block + * r14 is a pointer to the temp buffer + * xmm0 is used as a temp register + * yreg is clobbered as part of the computation + * offset chooses a 16 byte chunk within a block + * r8 is a pointer to the constants block + * k_offset chooses K constants relevant to this round + * xmm10 holds the swap mask + */ +.macro precalc00_15 offset, yreg + precalc0 \offset + precalc1 \offset + precalc2 \yreg + precalc4 \yreg, 0 + precalc7 \offset +.endm + + /* helper macros for precalc16_31 */ +.macro precalc16 reg_sub16, reg_sub12, reg_sub4, reg + vpalignr $8, \reg_sub16, \reg_sub12, \reg // w[i - 14] + vpsrldq $4, \reg_sub4, %ymm0 // w[i - 3] +.endm + +.macro precalc17 reg_sub16, reg_sub8, reg + vpxor \reg_sub8, \reg, \reg + vpxor \reg_sub16, %ymm0, %ymm0 +.endm + +.macro precalc18 reg + vpxor %ymm0, \reg, \reg + vpslldq $12, \reg, %ymm9 +.endm + +.macro precalc19 reg + vpslld $1, \reg, %ymm0 + vpsrld $31, \reg, \reg + .endm + +.macro precalc20 reg + vpor \reg, %ymm0, %ymm0 + vpslld $2, %ymm9, \reg +.endm + +.macro precalc21 reg + vpsrld $30, %ymm9, %ymm9 + vpxor \reg, %ymm0, %ymm0 +.endm + +.macro precalc23 reg, k_offset, offset + vpxor %ymm9, %ymm0, \reg + vpaddd \k_offset(%r8), \reg, %ymm0 + vmovdqu %ymm0, (\offset)(%r14) +.endm + +/* + * Message scheduling pre-compute for rounds 16-31 + * calculating last 32 w[i] values in 8 XMM registers + * pre-calculate K+w[i] values and store to mem + * for later load by ALU add instruction. + * "brute force" vectorization for rounds 16-31 only + * due to w[i]->w[i-3] dependency. + + clobbers 5 input ymm registers REG_SUB* + * uses xmm0 and xmm9 as temp registers + * As always, r8 is a pointer to constants block + * and r14 is a pointer to temp buffer + */ +.macro precalc16_31 reg, reg_sub4, reg_sub8, reg_sub12, reg_sub16, k_offset, offset + precalc16 \reg_sub16, \reg_sub12, \reg_sub4, \reg + precalc17 \reg_sub16, \reg_sub8, \reg + precalc18 \reg + precalc19 \reg + precalc20 \reg + precalc21 \reg + precalc23 \reg, \k_offset, \offset +.endm + + /* helper macros for precalc_32_79 */ +.macro precalc32 reg_sub8, reg_sub4 + vpalignr $8, \reg_sub8, \reg_sub4, %ymm0 +.endm + +.macro precalc33 reg_sub28, reg + vpxor \reg_sub28, \reg, \reg +.endm + +.macro precalc34 reg_sub16 + vpxor \reg_sub16, %ymm0, %ymm0 +.endm + +.macro precalc35 reg + vpxor %ymm0, \reg, \reg +.endm + +.macro precalc36 reg + vpslld $2, \reg, %ymm0 +.endm + +.macro precalc37 reg + vpsrld $30, \reg, \reg + vpor \reg, %ymm0, \reg +.endm + +.macro precalc39 reg, k_offset, offset + vpaddd \k_offset(%r8), \reg, %ymm0 + vmovdqu %ymm0, \offset(%r14) +.endm + +.macro precalc32_79 reg, reg_sub4, reg_sub8, reg_sub16, reg_sub28, k_offset, offset + precalc32 \reg_sub8, \reg_sub4 + precalc33 \reg_sub28, \reg + precalc34 \reg_sub16 + precalc35 \reg + precalc36 \reg + precalc37 \reg + precalc39 \reg, \k_offset, \offset +.endm + +.macro precalc + precalc00_15 0x00, %ymm15 + precalc00_15 0x10, %ymm14 + precalc00_15 0x20, %ymm13 + precalc00_15 0x30, %ymm12 + precalc16_31 %ymm8, %ymm12, %ymm13, %ymm14, %ymm15, 0x00, 0x080 + precalc16_31 %ymm7, %ymm8, %ymm12, %ymm13, %ymm14, 0x20, 0x0a0 + precalc16_31 %ymm5, %ymm7, %ymm8, %ymm12, %ymm13, 0x20, 0x0c0 + precalc16_31 %ymm3, %ymm5, %ymm7, %ymm8, %ymm12, 0x20, 0x0e0 + precalc32_79 %ymm15, %ymm3, %ymm5, %ymm8, %ymm14, 0x20, 0x100 + precalc32_79 %ymm14, %ymm15, %ymm3, %ymm7, %ymm13, 0x20, 0x120 + precalc32_79 %ymm13, %ymm14, %ymm15, %ymm5, %ymm12, 0x40, 0x140 + precalc32_79 %ymm12, %ymm13, %ymm14, %ymm3, %ymm8, 0x40, 0x160 + precalc32_79 %ymm8, %ymm12, %ymm13, %ymm15, %ymm7, 0x40, 0x180 + precalc32_79 %ymm7, %ymm8, %ymm12, %ymm14, %ymm5, 0x40, 0x1a0 + precalc32_79 %ymm5, %ymm7, %ymm8, %ymm13, %ymm3, 0x40, 0x1c0 + precalc32_79 %ymm3, %ymm5, %ymm7, %ymm12, %ymm15, 0x60, 0x1e0 + precalc32_79 %ymm15, %ymm3, %ymm5, %ymm8, %ymm14, 0x60, 0x200 + precalc32_79 %ymm14, %ymm15, %ymm3, %ymm7, %ymm13, 0x60, 0x220 + precalc32_79 %ymm13, %ymm14, %ymm15, %ymm5, %ymm12, 0x60, 0x240 + precalc32_79 %ymm12, %ymm13, %ymm14, %ymm3, %ymm8, 0x60, 0x260 +.endm + +/* + * Macros calculating individual rounds have general form + * calc_round_pre + precalc_round + calc_round_post + * calc_round_{pre,post} macros follow + */ +.macro calc_f1_pre offset, reg_a, reg_b, reg_c, reg_e + add \offset(%r15), \reg_e + andn \reg_c, \reg_a, %ebp + add \reg_b, \reg_e // add F from the previous round + rorx $0x1b, \reg_a, %r12d + rorx $2, \reg_a, \reg_b // for the next round +.endm + +/* + * Calculate F for the next round + */ +.macro calc_f1_post reg_a, reg_b, reg_e + and \reg_b, \reg_a // b & c + xor %ebp, \reg_a // F1 = (b&c) ^ (~b&d) + add %r12d, \reg_e +.endm + +/* + * Registers are cyclically rotated: + * edx -> eax -> edi -> esi -> ebx -> ecx + */ +.macro calc0 + mov %esi, %ebx // precalculate first round + rorx $2, %esi, %esi + andn %eax, %ebx, %ebp + and %edi, %ebx + xor %ebp, %ebx + calc_f1_pre 0x0, %ecx, %ebx, %edi, %edx + precalc0 0x80 + calc_f1_post %ecx, %esi, %edx +.endm + +.macro calc1 + calc_f1_pre 0x4, %edx, %ecx, %esi, %eax + precalc1 0x80 + calc_f1_post %edx, %ebx, %eax +.endm + +.macro calc2 + calc_f1_pre 0x8, %eax, %edx, %ebx, %edi + precalc2 %ymm15 + calc_f1_post %eax, %ecx, %edi +.endm + +.macro calc3 + calc_f1_pre 0xc, %edi, %eax, %ecx, %esi + calc_f1_post %edi, %edx, %esi +.endm + +.macro calc4 + calc_f1_pre 0x20, %esi, %edi, %edx, %ebx + precalc4 %ymm15, 0x0 + calc_f1_post %esi, %eax, %ebx +.endm + +.macro calc5 + calc_f1_pre 0x24, %ebx, %esi, %eax, %ecx + calc_f1_post %ebx, %edi, %ecx +.endm + +.macro calc6 + calc_f1_pre 0x28, %ecx, %ebx, %edi, %edx + calc_f1_post %ecx, %esi, %edx +.endm + +.macro calc7 + calc_f1_pre 0x2c, %edx, %ecx, %esi, %eax + precalc7 0x0 + calc_f1_post %edx, %ebx, %eax +.endm + +.macro calc8 + calc_f1_pre 0x40, %eax, %edx, %ebx, %edi + precalc0 0x90 + calc_f1_post %eax, %ecx, %edi +.endm + +.macro calc9 + calc_f1_pre 0x44, %edi, %eax, %ecx, %esi + precalc1 0x90 + calc_f1_post %edi, %edx, %esi +.endm + +.macro calc10 + calc_f1_pre 0x48, %esi, %edi, %edx, %ebx + precalc2 %ymm14 + calc_f1_post %esi, %eax, %ebx +.endm + +.macro calc11 + calc_f1_pre 0x4c, %ebx, %esi, %eax, %ecx + calc_f1_post %ebx, %edi, %ecx +.endm + +.macro calc12 + calc_f1_pre 0x60, %ecx, %ebx, %edi, %edx + precalc4 %ymm14, 0 + calc_f1_post %ecx, %esi, %edx +.endm + +.macro calc13 + calc_f1_pre 0x64, %edx, %ecx, %esi, %eax + calc_f1_post %edx, %ebx, %eax +.endm + +.macro calc14 + calc_f1_pre 0x68, %eax, %edx, %ebx, %edi + calc_f1_post %eax, %ecx, %edi +.endm + +.macro calc15 + calc_f1_pre 0x6c, %edi, %eax, %ecx, %esi + precalc7 0x10 + calc_f1_post %edi, %edx, %esi +.endm + +.macro calc16 + calc_f1_pre 0x80, %esi, %edi, %edx, %ebx + precalc0 0xa0 + calc_f1_post %esi, %eax, %ebx +.endm + +.macro calc17 + calc_f1_pre 0x84, %ebx, %esi, %eax, %ecx + precalc1 0xa0 + calc_f1_post %ebx, %edi, %ecx +.endm + +.macro calc18 + calc_f1_pre 0x88, %ecx, %ebx, %edi, %edx + precalc2 %ymm13 + calc_f1_post %ecx, %esi, %edx +.endm + +.macro calc_f2_pre offset, reg_a, reg_b, reg_e + add \offset(%r15), \reg_e + add \reg_b, \reg_e // add F from the previous round + rorx $0x1b, \reg_a, %r12d + rorx $2, \reg_a, \reg_b // for next round +.endm + +.macro calc_f2_post reg_a, reg_b, reg_c, reg_e + xor \reg_b, \reg_a + add %r12d, \reg_e + xor \reg_c, \reg_a +.endm + +.macro calc19 + calc_f2_pre 0x8c, %edx, %ecx, %eax + calc_f2_post %edx, %ebx, %esi, %eax +.endm + +.macro calc20 + calc_f2_pre 0xa0, %eax, %edx, %edi + precalc4 %ymm13, 0x0 + calc_f2_post %eax, %ecx, %ebx, %edi +.endm + +.macro calc21 + calc_f2_pre 0xa4, %edi, %eax, %esi + calc_f2_post %edi, %edx, %ecx, %esi +.endm + +.macro calc22 + calc_f2_pre 0xa8, %esi, %edi, %ebx + calc_f2_post %esi, %eax, %edx, %ebx +.endm + +.macro calc23 + calc_f2_pre 0xac, %ebx, %esi, %ecx + precalc7 0x20 + calc_f2_post %ebx, %edi, %eax, %ecx +.endm + +.macro calc24 + calc_f2_pre 0xc0, %ecx, %ebx, %edx + precalc0 0xb0 + calc_f2_post %ecx, %esi, %edi, %edx +.endm + +.macro calc25 + calc_f2_pre 0xc4, %edx, %ecx, %eax + precalc1 0xb0 + calc_f2_post %edx, %ebx, %esi, %eax +.endm + +.macro calc26 + calc_f2_pre 0xc8, %eax, %edx, %edi + precalc2 %ymm12 + calc_f2_post %eax, %ecx, %ebx, %edi +.endm + +.macro calc27 + calc_f2_pre 0xcc, %edi, %eax, %esi + calc_f2_post %edi, %edx, %ecx, %esi +.endm + +.macro calc28 + calc_f2_pre 0xe0, %esi, %edi, %ebx + precalc4 %ymm12, 0x0 + calc_f2_post %esi, %eax, %edx, %ebx +.endm + +.macro calc29 + calc_f2_pre 0xe4, %ebx, %esi, %ecx + calc_f2_post %ebx, %edi, %eax, %ecx +.endm + +.macro calc30 + calc_f2_pre 0xe8, %ecx, %ebx, %edx + calc_f2_post %ecx, %esi, %edi, %edx +.endm + +.macro calc31 + calc_f2_pre 0xec, %edx, %ecx, %eax + precalc7 0x30 + calc_f2_post %edx, %ebx, %esi, %eax +.endm + +.macro calc32 + calc_f2_pre 0x100, %eax, %edx, %edi + precalc16 %ymm15, %ymm14, %ymm12, %ymm8 + calc_f2_post %eax, %ecx, %ebx, %edi +.endm + +.macro calc33 + calc_f2_pre 0x104, %edi, %eax, %esi + precalc17 %ymm15, %ymm13, %ymm8 + calc_f2_post %edi, %edx, %ecx, %esi +.endm + +.macro calc34 + calc_f2_pre 0x108, %esi, %edi, %ebx + precalc18 %ymm8 + calc_f2_post %esi, %eax, %edx, %ebx +.endm + +.macro calc35 + calc_f2_pre 0x10c, %ebx, %esi, %ecx + precalc19 %ymm8 + calc_f2_post %ebx, %edi, %eax, %ecx +.endm + +.macro calc36 + calc_f2_pre 0x120, %ecx, %ebx, %edx + precalc20 %ymm8 + calc_f2_post %ecx, %esi, %edi, %edx +.endm + +.macro calc37 + calc_f2_pre 0x124, %edx, %ecx, %eax + precalc21 %ymm8 + calc_f2_post %edx, %ebx, %esi, %eax +.endm + +.macro calc38 + calc_f2_pre 0x128, %eax, %edx, %edi + calc_f2_post %eax, %ecx, %ebx, %edi +.endm + +.macro calc_f3_pre offset, reg_e + add \offset(%r15), \reg_e +.endm + +.macro calc_f3_post reg_a, reg_b, reg_c, reg_e, reg_tb + add \reg_tb, \reg_e // add F from the previous round + mov \reg_b, %ebp + or \reg_a, %ebp + rorx $0x1b, \reg_a, %r12d + rorx $2, \reg_a, \reg_tb + and \reg_c, %ebp // calculate F for the next round + and \reg_b, \reg_a + or %ebp, \reg_a + add %r12d, \reg_e +.endm + +.macro calc39 + calc_f3_pre 0x12c, %esi + precalc23 %ymm8, 0x0, 0x80 + calc_f3_post %edi, %edx, %ecx, %esi, %eax +.endm + +.macro calc40 + calc_f3_pre 0x140, %ebx + precalc16 %ymm14, %ymm13, %ymm8, %ymm7 + calc_f3_post %esi, %eax, %edx, %ebx, %edi +.endm + +.macro calc41 + calc_f3_pre 0x144, %ecx + precalc17 %ymm14, %ymm12, %ymm7 + calc_f3_post %ebx, %edi, %eax, %ecx, %esi +.endm + +.macro calc42 + calc_f3_pre 0x148, %edx + precalc18 %ymm7 + calc_f3_post %ecx, %esi, %edi, %edx, %ebx +.endm + +.macro calc43 + calc_f3_pre 0x14c, %eax + precalc19 %ymm7 + calc_f3_post %edx, %ebx, %esi, %eax, %ecx +.endm + +.macro calc44 + calc_f3_pre 0x160, %edi + precalc20 %ymm7 + calc_f3_post %eax, %ecx, %ebx, %edi, %edx +.endm + +.macro calc45 + calc_f3_pre 0x164, %esi + precalc21 %ymm7 + calc_f3_post %edi, %edx, %ecx, %esi, %eax +.endm + +.macro calc46 + calc_f3_pre 0x168, %ebx + calc_f3_post %esi, %eax, %edx, %ebx, %edi +.endm + +.macro calc47 + calc_f3_pre 0x16c, %ecx + vpxor %ymm9, %ymm0, %ymm7 + vpaddd 0x20(%r8), %ymm7, %ymm0 + vmovdqu %ymm0, 0xa0(%r14) + calc_f3_post %ebx, %edi, %eax, %ecx, %esi +.endm + +.macro calc48 + calc_f3_pre 0x180, %edx + precalc16 %ymm13, %ymm12, %ymm7, %ymm5 + calc_f3_post %ecx, %esi, %edi, %edx, %ebx +.endm + +.macro calc49 + calc_f3_pre 0x184, %eax + precalc17 %ymm13, %ymm8, %ymm5 + calc_f3_post %edx, %ebx, %esi, %eax, %ecx +.endm + +.macro calc50 + calc_f3_pre 0x188, %edi + precalc18 %ymm5 + calc_f3_post %eax, %ecx, %ebx, %edi, %edx +.endm + +.macro calc51 + calc_f3_pre 0x18c, %esi + precalc19 %ymm5 + calc_f3_post %edi, %edx, %ecx, %esi, %eax +.endm + +.macro calc52 + calc_f3_pre 0x1a0, %ebx + precalc20 %ymm5 + calc_f3_post %esi, %eax, %edx, %ebx, %edi +.endm + +.macro calc53 + calc_f3_pre 0x1a4, %ecx + precalc21 %ymm5 + calc_f3_post %ebx, %edi, %eax, %ecx, %esi +.endm + +.macro calc54 + calc_f3_pre 0x1a8, %edx + calc_f3_post %ecx, %esi, %edi, %edx, %ebx +.endm + +.macro calc55 + calc_f3_pre 0x1ac, %eax + precalc23 %ymm5, 0x20, 0xc0 + calc_f3_post %edx, %ebx, %esi, %eax, %ecx +.endm + +.macro calc56 + calc_f3_pre 0x1c0, %edi + precalc16 %ymm12, %ymm8, %ymm5, %ymm3 + calc_f3_post %eax, %ecx, %ebx, %edi, %edx +.endm + +.macro calc57 + calc_f3_pre 0x1c4, %esi + precalc17 %ymm12, %ymm7, %ymm3 + calc_f3_post %edi, %edx, %ecx, %esi, %eax +.endm + +.macro calc58 + calc_f3_pre 0x1c8, %ebx + precalc18 %ymm3 + calc_f3_post %esi, %eax, %edx, %ebx, %edi +.endm + +.macro calc59 + calc_f2_pre 0x1cc, %ebx, %esi, %ecx + precalc19 %ymm3 + calc_f2_post %ebx, %edi, %eax, %ecx +.endm + +.macro calc60 + calc_f2_pre 0x1e0, %ecx, %ebx, %edx + precalc20 %ymm3 + calc_f2_post %ecx, %esi, %edi, %edx +.endm + +.macro calc61 + calc_f2_pre 0x1e4, %edx, %ecx, %eax + precalc21 %ymm3 + calc_f2_post %edx, %ebx, %esi, %eax +.endm + +.macro calc62 + calc_f2_pre 0x1e8, %eax, %edx, %edi + calc_f2_post %eax, %ecx, %ebx, %edi +.endm + +.macro calc63 + calc_f2_pre 0x1ec, %edi, %eax, %esi + precalc23 %ymm3, 0x20, 0xe0 + calc_f2_post %edi, %edx, %ecx, %esi +.endm + +.macro calc64 + calc_f2_pre 0x200, %esi, %edi, %ebx + precalc32 %ymm5, %ymm3 + calc_f2_post %esi, %eax, %edx, %ebx +.endm + +.macro calc65 + calc_f2_pre 0x204, %ebx, %esi, %ecx + precalc33 %ymm14, %ymm15 + calc_f2_post %ebx, %edi, %eax, %ecx +.endm + +.macro calc66 + calc_f2_pre 0x208, %ecx, %ebx, %edx + precalc34 %ymm8 + calc_f2_post %ecx, %esi, %edi, %edx +.endm + +.macro calc67 + calc_f2_pre 0x20c, %edx, %ecx, %eax + precalc35 %ymm15 + calc_f2_post %edx, %ebx, %esi, %eax +.endm + +.macro calc68 + calc_f2_pre 0x220, %eax, %edx, %edi + precalc36 %ymm15 + calc_f2_post %eax, %ecx, %ebx, %edi +.endm + +.macro calc69 + calc_f2_pre 0x224, %edi, %eax, %esi + precalc37 %ymm15 + calc_f2_post %edi, %edx, %ecx, %esi +.endm + +.macro calc70 + calc_f2_pre 0x228, %esi, %edi, %ebx + calc_f2_post %esi, %eax, %edx, %ebx +.endm + +.macro calc71 + calc_f2_pre 0x22c, %ebx, %esi, %ecx + precalc39 %ymm15, 0x20, 0x100 + calc_f2_post %ebx, %edi, %eax, %ecx +.endm + +.macro calc72 + calc_f2_pre 0x240, %ecx, %ebx, %edx + precalc32 %ymm3, %ymm15 + calc_f2_post %ecx, %esi, %edi, %edx +.endm + +.macro calc73 + calc_f2_pre 0x244, %edx, %ecx, %eax + precalc33 %ymm13, %ymm14 + calc_f2_post %edx, %ebx, %esi, %eax +.endm + +.macro calc74 + calc_f2_pre 0x248, %eax, %edx, %edi + precalc34 %ymm7 + calc_f2_post %eax, %ecx, %ebx, %edi +.endm + +.macro calc75 + calc_f2_pre 0x24c, %edi, %eax, %esi + precalc35 %ymm14 + calc_f2_post %edi, %edx, %ecx, %esi +.endm + +.macro calc76 + calc_f2_pre 0x260, %esi, %edi, %ebx + precalc36 %ymm14 + calc_f2_post %esi, %eax, %edx, %ebx +.endm + +.macro calc77 + calc_f2_pre 0x264, %ebx, %esi, %ecx + precalc37 %ymm14 + calc_f2_post %ebx, %edi, %eax, %ecx +.endm + +.macro calc78 + calc_f2_pre 0x268, %ecx, %ebx, %edx + calc_f2_post %ecx, %esi, %edi, %edx +.endm + +.macro calc79 + add 0x26c(%r15), %eax + add %ecx, %eax + rorx $0x1b, %edx, %r12d + precalc39 %ymm14, 0x20, 0x120 + add %r12d, %eax +.endm + +/* + * Similar to calc0 + */ +.macro calc80 + mov %ecx, %edx // precalculate first round + rorx $2, %ecx, %ecx + andn %esi, %edx, %ebp + and %ebx, %edx + xor %ebp, %edx + calc_f1_pre 0x10, %eax, %edx, %ebx, %edi + precalc32 %ymm15, %ymm14 + calc_f1_post %eax, %ecx, %edi +.endm + +.macro calc81 + calc_f1_pre 0x14, %edi, %eax, %ecx, %esi + precalc33 %ymm12, %ymm13 + calc_f1_post %edi, %edx, %esi +.endm + +.macro calc82 + calc_f1_pre 0x18, %esi, %edi, %edx, %ebx + precalc34 %ymm5 + calc_f1_post %esi, %eax, %ebx +.endm + +.macro calc83 + calc_f1_pre 0x1c, %ebx, %esi, %eax, %ecx + precalc35 %ymm13 + calc_f1_post %ebx, %edi, %ecx +.endm + +.macro calc84 + calc_f1_pre 0x30, %ecx, %ebx, %edi, %edx + precalc36 %ymm13 + calc_f1_post %ecx, %esi, %edx +.endm + +.macro calc85 + calc_f1_pre 0x34, %edx, %ecx, %esi, %eax + precalc37 %ymm13 + calc_f1_post %edx, %ebx, %eax +.endm + +.macro calc86 + calc_f1_pre 0x38, %eax, %edx, %ebx, %edi + calc_f1_post %eax, %ecx, %edi +.endm + +.macro calc87 + calc_f1_pre 0x3c, %edi, %eax, %ecx, %esi + precalc39 %ymm13, 0x40, 0x140 + calc_f1_post %edi, %edx, %esi +.endm + +.macro calc88 + calc_f1_pre 0x50, %esi, %edi, %edx, %ebx + precalc32 %ymm14, %ymm13 + calc_f1_post %esi, %eax, %ebx +.endm + +.macro calc89 + calc_f1_pre 0x54, %ebx, %esi, %eax, %ecx + precalc33 %ymm8, %ymm12 + calc_f1_post %ebx, %edi, %ecx +.endm + +.macro calc90 + calc_f1_pre 0x58, %ecx, %ebx, %edi, %edx + precalc34 %ymm3 + calc_f1_post %ecx, %esi, %edx +.endm + +.macro calc91 + calc_f1_pre 0x5c, %edx, %ecx, %esi, %eax + precalc35 %ymm12 + calc_f1_post %edx, %ebx, %eax +.endm + +.macro calc92 + calc_f1_pre 0x70, %eax, %edx, %ebx, %edi + precalc36 %ymm12 + calc_f1_post %eax, %ecx, %edi +.endm + +.macro calc93 + calc_f1_pre 0x74, %edi, %eax, %ecx, %esi + precalc37 %ymm12 + calc_f1_post %edi, %edx, %esi +.endm + +.macro calc94 + calc_f1_pre 0x78, %esi, %edi, %edx, %ebx + calc_f1_post %esi, %eax, %ebx +.endm + +.macro calc95 + calc_f1_pre 0x7c, %ebx, %esi, %eax, %ecx + precalc39 %ymm12, 0x40, 0x160 + calc_f1_post %ebx, %edi, %ecx +.endm + +.macro calc96 + calc_f1_pre 0x90, %ecx, %ebx, %edi, %edx + precalc32 %ymm13, %ymm12 + calc_f1_post %ecx, %esi, %edx +.endm + +.macro calc97 + calc_f1_pre 0x94, %edx, %ecx, %esi, %eax + precalc33 %ymm7, %ymm8 + calc_f1_post %edx, %ebx, %eax +.endm + +.macro calc98 + calc_f1_pre 0x98, %eax, %edx, %ebx, %edi + precalc34 %ymm15 + calc_f1_post %eax, %ecx, %edi +.endm + +.macro calc99 + calc_f2_pre 0x9c, %edi, %eax, %esi + precalc35 %ymm8 + calc_f2_post %edi, %edx, %ecx, %esi +.endm + +.macro calc100 + calc_f2_pre 0xb0, %esi, %edi, %ebx + precalc36 %ymm8 + calc_f2_post %esi, %eax, %edx, %ebx +.endm + +.macro calc101 + calc_f2_pre 0xb4, %ebx, %esi, %ecx + precalc37 %ymm8 + calc_f2_post %ebx, %edi, %eax, %ecx +.endm + +.macro calc102 + calc_f2_pre 0xb8, %ecx, %ebx, %edx + calc_f2_post %ecx, %esi, %edi, %edx +.endm + +.macro calc103 + calc_f2_pre 0xbc, %edx, %ecx, %eax + precalc39 %ymm8, 0x40, 0x180 + calc_f2_post %edx, %ebx, %esi, %eax +.endm + +.macro calc104 + calc_f2_pre 0xd0, %eax, %edx, %edi + precalc32 %ymm12, %ymm8 + calc_f2_post %eax, %ecx, %ebx, %edi +.endm + +.macro calc105 + calc_f2_pre 0xd4, %edi, %eax, %esi + precalc33 %ymm5, %ymm7 + calc_f2_post %edi, %edx, %ecx, %esi +.endm + +.macro calc106 + calc_f2_pre 0xd8, %esi, %edi, %ebx + precalc34 %ymm14 + calc_f2_post %esi, %eax, %edx, %ebx +.endm + +.macro calc107 + calc_f2_pre 0xdc, %ebx, %esi, %ecx + precalc35 %ymm7 + calc_f2_post %ebx, %edi, %eax, %ecx +.endm + +.macro calc108 + calc_f2_pre 0xf0, %ecx, %ebx, %edx + precalc36 %ymm7 + calc_f2_post %ecx, %esi, %edi, %edx +.endm + +.macro calc109 + calc_f2_pre 0xf4, %edx, %ecx, %eax + precalc37 %ymm7 + calc_f2_post %edx, %ebx, %esi, %eax +.endm + +.macro calc110 + calc_f2_pre 0xf8, %eax, %edx, %edi + calc_f2_post %eax, %ecx, %ebx, %edi +.endm + +.macro calc111 + calc_f2_pre 0xfc, %edi, %eax, %esi + precalc39 %ymm7, 0x40, 0x1a0 + calc_f2_post %edi, %edx, %ecx, %esi +.endm + +.macro calc112 + calc_f2_pre 0x110, %esi, %edi, %ebx + precalc32 %ymm8, %ymm7 + calc_f2_post %esi, %eax, %edx, %ebx +.endm + +.macro calc113 + calc_f2_pre 0x114, %ebx, %esi, %ecx + precalc33 %ymm3, %ymm5 + calc_f2_post %ebx, %edi, %eax, %ecx +.endm + +.macro calc114 + calc_f2_pre 0x118, %ecx, %ebx, %edx + precalc34 %ymm13 + calc_f2_post %ecx, %esi, %edi, %edx +.endm + +.macro calc115 + calc_f2_pre 0x11c, %edx, %ecx, %eax + precalc35 %ymm5 + calc_f2_post %edx, %ebx, %esi, %eax +.endm + +.macro calc116 + calc_f2_pre 0x130, %eax, %edx, %edi + precalc36 %ymm5 + calc_f2_post %eax, %ecx, %ebx, %edi +.endm + +.macro calc117 + calc_f2_pre 0x134, %edi, %eax, %esi + precalc37 %ymm5 + calc_f2_post %edi, %edx, %ecx, %esi +.endm + +.macro calc118 + calc_f2_pre 0x138, %esi, %edi, %ebx + calc_f2_post %esi, %eax, %edx, %ebx +.endm + +.macro calc119 + calc_f3_pre 0x13c, %ecx + precalc39 %ymm5, 0x40, 0x1c0 + calc_f3_post %ebx, %edi, %eax, %ecx, %esi +.endm + +.macro calc120 + calc_f3_pre 0x150, %edx + precalc32 %ymm7, %ymm5 + calc_f3_post %ecx, %esi, %edi, %edx, %ebx +.endm + +.macro calc121 + calc_f3_pre 0x154, %eax + precalc33 %ymm15, %ymm3 + calc_f3_post %edx, %ebx, %esi, %eax, %ecx +.endm + +.macro calc122 + calc_f3_pre 0x158, %edi + precalc34 %ymm12 + calc_f3_post %eax, %ecx, %ebx, %edi, %edx +.endm + +.macro calc123 + calc_f3_pre 0x15c, %esi + precalc35 %ymm3 + calc_f3_post %edi, %edx, %ecx, %esi, %eax +.endm + +.macro calc124 + calc_f3_pre 0x170, %ebx + precalc36 %ymm3 + calc_f3_post %esi, %eax, %edx, %ebx, %edi +.endm + +.macro calc125 + calc_f3_pre 0x174, %ecx + precalc37 %ymm3 + calc_f3_post %ebx, %edi, %eax, %ecx, %esi +.endm + +.macro calc126 + calc_f3_pre 0x178, %edx + calc_f3_post %ecx, %esi, %edi, %edx, %ebx +.endm + +.macro calc127 + calc_f3_pre 0x17c, %eax + precalc39 %ymm3, 0x60, 0x1e0 + calc_f3_post %edx, %ebx, %esi, %eax, %ecx +.endm + +.macro calc128 + calc_f3_pre 0x190, %edi + precalc32 %ymm5, %ymm3 + calc_f3_post %eax, %ecx, %ebx, %edi, %edx +.endm + +.macro calc129 + calc_f3_pre 0x194, %esi + precalc33 %ymm14, %ymm15 + calc_f3_post %edi, %edx, %ecx, %esi, %eax +.endm + +.macro calc130 + calc_f3_pre 0x198, %ebx + precalc34 %ymm8 + calc_f3_post %esi, %eax, %edx, %ebx, %edi +.endm + +.macro calc131 + calc_f3_pre 0x19c, %ecx + precalc35 %ymm15 + calc_f3_post %ebx, %edi, %eax, %ecx, %esi +.endm + +.macro calc132 + calc_f3_pre 0x1b0, %edx + precalc36 %ymm15 + calc_f3_post %ecx, %esi, %edi, %edx, %ebx +.endm + +.macro calc133 + calc_f3_pre 0x1b4, %eax + precalc37 %ymm15 + calc_f3_post %edx, %ebx, %esi, %eax, %ecx +.endm + +.macro calc134 + calc_f3_pre 0x1b8, %edi + calc_f3_post %eax, %ecx, %ebx, %edi, %edx +.endm + +.macro calc135 + calc_f3_pre 0x1bc, %esi + precalc39 %ymm15, 0x60, 0x200 + calc_f3_post %edi, %edx, %ecx, %esi, %eax +.endm + +.macro calc136 + calc_f3_pre 0x1d0, %ebx + precalc32 %ymm3, %ymm15 + calc_f3_post %esi, %eax, %edx, %ebx, %edi +.endm + +.macro calc137 + calc_f3_pre 0x1d4, %ecx + precalc33 %ymm13, %ymm14 + calc_f3_post %ebx, %edi, %eax, %ecx, %esi +.endm + +.macro calc138 + calc_f3_pre 0x1d8, %edx + precalc34 %ymm7 + calc_f3_post %ecx, %esi, %edi, %edx, %ebx +.endm + +.macro calc139 + calc_f2_pre 0x1dc, %edx, %ecx, %eax + precalc35 %ymm14 + calc_f2_post %edx, %ebx, %esi, %eax +.endm + +.macro calc140 + calc_f2_pre 0x1f0, %eax, %edx, %edi + precalc36 %ymm14 + calc_f2_post %eax, %ecx, %ebx, %edi +.endm + +.macro calc141 + calc_f2_pre 0x1f4, %edi, %eax, %esi + precalc37 %ymm14 + calc_f2_post %edi, %edx, %ecx, %esi +.endm + +.macro calc142 + calc_f2_pre 0x1f8, %esi, %edi, %ebx + calc_f2_post %esi, %eax, %edx, %ebx +.endm + +.macro calc143 + calc_f2_pre 0x1fc, %ebx, %esi, %ecx + precalc39 %ymm14, 0x60, 0x220 + calc_f2_post %ebx, %edi, %eax, %ecx +.endm + +.macro calc144 + calc_f2_pre 0x210, %ecx, %ebx, %edx + precalc32 %ymm15, %ymm14 + calc_f2_post %ecx, %esi, %edi, %edx +.endm + +.macro calc145 + calc_f2_pre 0x214, %edx, %ecx, %eax + precalc33 %ymm12, %ymm13 + calc_f2_post %edx, %ebx, %esi, %eax +.endm + +.macro calc146 + calc_f2_pre 0x218, %eax, %edx, %edi + precalc34 %ymm5 + calc_f2_post %eax, %ecx, %ebx, %edi +.endm + +.macro calc147 + calc_f2_pre 0x21c, %edi, %eax, %esi + precalc35 %ymm13 + calc_f2_post %edi, %edx, %ecx, %esi +.endm + +.macro calc148 + calc_f2_pre 0x230, %esi, %edi, %ebx + precalc36 %ymm13 + calc_f2_post %esi, %eax, %edx, %ebx +.endm + +.macro calc149 + calc_f2_pre 0x234, %ebx, %esi, %ecx + precalc37 %ymm13 + calc_f2_post %ebx, %edi, %eax, %ecx +.endm + +.macro calc150 + calc_f2_pre 0x238, %ecx, %ebx, %edx + calc_f2_post %ecx, %esi, %edi, %edx +.endm + +.macro calc151 + calc_f2_pre 0x23c, %edx, %ecx, %eax + precalc39 %ymm13, 0x60, 0x240 + calc_f2_post %edx, %ebx, %esi, %eax +.endm + +.macro calc152 + calc_f2_pre 0x250, %eax, %edx, %edi + precalc32 %ymm14, %ymm13 + calc_f2_post %eax, %ecx, %ebx, %edi +.endm + +.macro calc153 + calc_f2_pre 0x254, %edi, %eax, %esi + precalc33 %ymm8, %ymm12 + calc_f2_post %edi, %edx, %ecx, %esi +.endm + +.macro calc154 + calc_f2_pre 0x258, %esi, %edi, %ebx + precalc34 %ymm3 + calc_f2_post %esi, %eax, %edx, %ebx +.endm + +.macro calc155 + calc_f2_pre 0x25c, %ebx, %esi, %ecx + precalc35 %ymm12 + calc_f2_post %ebx, %edi, %eax, %ecx +.endm + +.macro calc156 + calc_f2_pre 0x270, %ecx, %ebx, %edx + precalc36 %ymm12 + calc_f2_post %ecx, %esi, %edi, %edx +.endm + +.macro calc157 + calc_f2_pre 0x274, %edx, %ecx, %eax + precalc37 %ymm12 + calc_f2_post %edx, %ebx, %esi, %eax +.endm + +.macro calc158 + calc_f2_pre 0x278, %eax, %edx, %edi + calc_f2_post %eax, %ecx, %ebx, %edi +.endm + +.macro calc159 + add 0x27c(%r15), %esi + add %eax, %esi + rorx $0x1b, %edi, %r12d + precalc39 %ymm12, 0x60, 0x260 + add %r12d, %esi +.endm + + // sha1block(SHA1_CTX, buf, len) +ENTRY(_libmd_sha1block_avx2) + push %rbx + push %rbp + push %r12 + push %r13 + push %r14 + push %r15 + sub $1408+8, %rsp + + and $~63, %rdx + lea k_xmm_ar(%rip), %r8 + mov %rdi, %r9 + mov %rsi, %r10 + lea 64(%rsi), %r13 + lea 64(%rsi, %rdx), %r11 + cmp %r11, %r13 + cmovae %r8, %r13 + vmovdqu bswap_shufb_ctl(%rip), %ymm10 + + mov (%r9), %ecx + mov 4(%r9), %esi + mov 8(%r9), %edi + mov 12(%r9), %eax + mov 16(%r9), %edx + mov %rsp, %r14 + lea 2*4*80+32(%rsp), %r15 + precalc // precalc WK for first 2 blocks + xchg %r14, %r15 + + // this is unrolled +.Loop: cmp %r8, %r10 // we use the value of R8 (set below) + // as a signal of the last block + jne .Lbegin + add $1408+8, %rsp + pop %r15 + pop %r14 + pop %r13 + pop %r12 + pop %rbp + pop %rbx + vzeroupper + ret + +.Lbegin: + calc0 + calc1 + calc2 + calc3 + calc4 + calc5 + calc6 + calc7 + calc8 + calc9 + calc10 + calc11 + calc12 + calc13 + calc14 + calc15 + calc16 + calc17 + calc18 + calc19 + calc20 + calc21 + calc22 + calc23 + calc24 + calc25 + calc26 + calc27 + calc28 + calc29 + calc30 + calc31 + calc32 + calc33 + calc34 + calc35 + calc36 + calc37 + calc38 + calc39 + calc40 + calc41 + calc42 + calc43 + calc44 + calc45 + calc46 + calc47 + calc48 + calc49 + calc50 + calc51 + calc52 + calc53 + calc54 + calc55 + calc56 + calc57 + calc58 + calc59 + + add $128, %r10 // move to the next even-64-byte block + cmp %r11, %r10 // is the current block the last one? + cmovae %r8, %r10 // signal the last iteration smartly + + calc60 + calc61 + calc62 + calc63 + calc64 + calc65 + calc66 + calc67 + calc68 + calc69 + calc70 + calc71 + calc72 + calc73 + calc74 + calc75 + calc76 + calc77 + calc78 + calc79 + + update_hash %eax, %edx, %ebx, %esi, %edi + cmp %r8, %r10 // is the current block the last one? + je .Loop + mov %edx, %ecx + + calc80 + calc81 + calc82 + calc83 + calc84 + calc85 + calc86 + calc87 + calc88 + calc89 + calc90 + calc91 + calc92 + calc93 + calc94 + calc95 + calc96 + calc97 + calc98 + calc99 + calc100 + calc101 + calc102 + calc103 + calc104 + calc105 + calc106 + calc107 + calc108 + calc109 + calc110 + calc111 + calc112 + calc113 + calc114 + calc115 + calc116 + calc117 + calc118 + calc119 + calc120 + calc121 + calc122 + calc123 + calc124 + calc125 + calc126 + calc127 + calc128 + calc129 + calc130 + calc131 + calc132 + calc133 + calc134 + calc135 + calc136 + calc137 + calc138 + calc139 + + add $128, %r13 // move to the next even-64-byte block + cmp %r11, %r13 // is the current block the last one? + cmovae %r8, %r10 + + calc140 + calc141 + calc142 + calc143 + calc144 + calc145 + calc146 + calc147 + calc148 + calc149 + calc150 + calc151 + calc152 + calc153 + calc154 + calc155 + calc156 + calc157 + calc158 + calc159 + + update_hash %esi, %edi, %edx, %ecx, %ebx + mov %esi, %r12d // reset state for AVX2 reg permutation + mov %edi, %esi + mov %edx, %edi + mov %ebx, %edx + mov %ecx, %eax + mov %r12d, %ecx + xchg %r14, %r15 + jmp .Loop +END(_libmd_sha1block_avx2) + + .section .rodata + .balign 32 +k_xmm_ar: + .fill 8, 4, 0x5a827999 + .fill 8, 4, 0x6ed9eba1 + .fill 8, 4, 0x8f1bbcdc + .fill 8, 4, 0xca62c1d6 + .size k_xmm_ar, .-k_xmm_ar + +bswap_shufb_ctl: + .4byte 0x00010203 + .4byte 0x04050607 + .4byte 0x08090a0b + .4byte 0x0c0d0e0f + .4byte 0x00010203 + .4byte 0x04050607 + .4byte 0x08090a0b + .4byte 0x0c0d0e0f + .size bswap_shufb_ctl, .-bswap_shufb_ctl + + /* + * SHA1 implementation using the Intel SHA extensions (SHANI). + * + * Imlemented according to the Intel white paper + * + * S. Gulley, V. Gopal, K. Yap, W. Feghali, J. Guilford, + * G. Wolrich: "Intel SHA Extensions: new instruction supporting + * the Secure Hash Algorithm on Intel® architecture processors", + * July 2013. + */ + // sha1block(SHA1_CTX, buf, len) +ENTRY(_libmd_sha1block_shani) + and $~63, %rdx // round length to block-size multiple + lea (%rsi, %rdx, 1), %rcx // end pointer + test %rdx, %rdx // nothing to do? + je 1f // if so, terminate immediately + + movdqu (%rdi), %xmm6 // h0, h1, h2, h3 + pxor %xmm7, %xmm7 + pshufd $0x1b, %xmm6, %xmm6 // h3, h2, h1, h0 + pinsrd $3, 16(%rdi), %xmm7 // h4 in the highest word of xmm7 + movdqu shuf_mask(%rip), %xmm4 + + // main loop +0: movdqa %xmm6, %xmm8 // stash ABCD + movdqa %xmm7, %xmm9 // stash E + + // rounds 0--3 + movdqu 0*16(%rsi), %xmm0 // load first message block + pshufb %xmm4, %xmm0 // and byte-swap + paddd %xmm0, %xmm7 // E += w[0] + movdqa %xmm6, %xmm5 // E' = A + sha1rnds4 $0, %xmm7, %xmm6 // perform rounds 0--3 + + // rounds 4--7 + movdqu 1*16(%rsi), %xmm1 + pshufb %xmm4, %xmm1 + sha1nexte %xmm1, %xmm5 + movdqa %xmm6, %xmm7 + sha1rnds4 $0, %xmm5, %xmm6 + sha1msg1 %xmm1, %xmm0 + + // rounds 8--11 + movdqu 2*16(%rsi), %xmm2 + pshufb %xmm4, %xmm2 + sha1nexte %xmm2, %xmm7 + movdqa %xmm6, %xmm5 + sha1rnds4 $0, %xmm7, %xmm6 + sha1msg1 %xmm2, %xmm1 + pxor %xmm2, %xmm0 + +.macro midround msg3, msg0, msg1, msg2, e1, e0, k + sha1nexte \msg3, \e1 + movdqa %xmm6, \e0 + sha1msg2 \msg3, \msg0 + sha1rnds4 $\k, \e1, %xmm6 + sha1msg1 \msg3, \msg2 + pxor \msg3, \msg1 +.endm + + movdqu 3*16(%rsi), %xmm3 // load third message block + pshufb %xmm4, %xmm3 + + add $4*16, %rsi + + midround %xmm3, %xmm0, %xmm1, %xmm2, %xmm5, %xmm7, 0 // 12--15 + midround %xmm0, %xmm1, %xmm2, %xmm3, %xmm7, %xmm5, 0 // 16--19 + midround %xmm1, %xmm2, %xmm3, %xmm0, %xmm5, %xmm7, 1 // 20--23 + midround %xmm2, %xmm3, %xmm0, %xmm1, %xmm7, %xmm5, 1 // 24--27 + midround %xmm3, %xmm0, %xmm1, %xmm2, %xmm5, %xmm7, 1 // 28--31 + midround %xmm0, %xmm1, %xmm2, %xmm3, %xmm7, %xmm5, 1 // 32--35 + midround %xmm1, %xmm2, %xmm3, %xmm0, %xmm5, %xmm7, 1 // 36--39 + midround %xmm2, %xmm3, %xmm0, %xmm1, %xmm7, %xmm5, 2 // 40--43 + midround %xmm3, %xmm0, %xmm1, %xmm2, %xmm5, %xmm7, 2 // 44--47 + midround %xmm0, %xmm1, %xmm2, %xmm3, %xmm7, %xmm5, 2 // 48--51 + midround %xmm1, %xmm2, %xmm3, %xmm0, %xmm5, %xmm7, 2 // 52--55 + midround %xmm2, %xmm3, %xmm0, %xmm1, %xmm7, %xmm5, 2 // 56--59 + midround %xmm3, %xmm0, %xmm1, %xmm2, %xmm5, %xmm7, 3 // 60--63 + midround %xmm0, %xmm1, %xmm2, %xmm3, %xmm7, %xmm5, 3 // 64--67 + + // rounds 68--71 + sha1nexte %xmm1, %xmm5 + movdqa %xmm6, %xmm7 + sha1msg2 %xmm1, %xmm2 + sha1rnds4 $3, %xmm5, %xmm6 + pxor %xmm1, %xmm3 + + // rounds 72--75 + sha1nexte %xmm2, %xmm7 + movdqa %xmm6, %xmm5 + sha1msg2 %xmm2, %xmm3 + sha1rnds4 $3, %xmm7, %xmm6 + + // rounds 76--79 + sha1nexte %xmm3, %xmm5 + movdqa %xmm6, %xmm7 + sha1rnds4 $3, %xmm5, %xmm6 + + sha1nexte %xmm9, %xmm7 // add saved E + paddd %xmm8, %xmm6 // add saved ABCD + + cmp %rsi, %rcx // end reached? + jne 0b + + pshufd $0x1b, %xmm6, %xmm6 // restore order of h0--h3 + movdqu %xmm6, (%rdi) // write h0--h3 + pextrd $3, %xmm7, 16(%rdi) // write h4 +1: ret +END(_libmd_sha1block_shani) + + .section .rodata + .balign 16 +shuf_mask: + .8byte 0x08090a0b0c0d0e0f + .8byte 0x0001020304050607 + .size shuf_mask, .-shuf_mask + + .section .note.GNU-stack,"",%progbits diff --git a/lib/libmd/amd64/sha1dispatch.c b/lib/libmd/amd64/sha1dispatch.c new file mode 100644 index 000000000000..86509195d56e --- /dev/null +++ b/lib/libmd/amd64/sha1dispatch.c @@ -0,0 +1,77 @@ +/*- + * Copyright (c) 2016 The Go Authors. All rights reserved. + * Copyright (c) 2024 Robert Clausecker <fuz@freebsd.org> + * + * Adapted from Go's crypto/sha1/sha1block_amd64.go. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following disclaimer + * in the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Google Inc. nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include <machine/specialreg.h> +#include <sha.h> +#include <x86/ifunc.h> + +extern void _libmd_sha1block_scalar(SHA1_CTX *, const void *, size_t); +extern void _libmd_sha1block_avx2(SHA1_CTX *, const void *, size_t); +extern void _libmd_sha1block_shani(SHA1_CTX *, const void *, size_t); +static void sha1block_avx2_wrapper(SHA1_CTX *, const void *, size_t); + +#define AVX2_STDEXT_NEEDED \ + (CPUID_STDEXT_BMI1 | CPUID_STDEXT_AVX2 | CPUID_STDEXT_BMI2) + +DEFINE_UIFUNC(, void, sha1_block, (SHA1_CTX *, const void *, size_t)) +{ + if (cpu_stdext_feature & CPUID_STDEXT_SHA) + return (_libmd_sha1block_shani); + if ((cpu_stdext_feature & AVX2_STDEXT_NEEDED) == AVX2_STDEXT_NEEDED) + return (sha1block_avx2_wrapper); + else + return (_libmd_sha1block_scalar); +} + +static void +sha1block_avx2_wrapper(SHA1_CTX *c, const void *data, size_t len) +{ + if (len >= 256) { + /* + * sha1block_avx2 calculates sha1 for 2 block per iteration. + * It also interleaves the precalculation for next the block. + * So it may read up-to 192 bytes past the end of p. + * We may add checks inside sha1block_avx2, but this will + * just turn it into a copy of sha1block_scalar, + * so call it directly, instead. + */ + size_t safe_len = len - 128; + + if (safe_len % 128 != 0) + safe_len -= 64; + + _libmd_sha1block_avx2(c, data, safe_len); + _libmd_sha1block_scalar(c, data + safe_len, len - safe_len); + } else + _libmd_sha1block_scalar(c, data, len); +} diff --git a/lib/libmd/i386/rmd160.S b/lib/libmd/i386/rmd160.S new file mode 100644 index 000000000000..57da14f83a7c --- /dev/null +++ b/lib/libmd/i386/rmd160.S @@ -0,0 +1,2018 @@ +/* Run the C pre-processor over this file with one of the following defined + * ELF - elf object files, + * OUT - a.out object files, + * BSDI - BSDI style a.out object files + * SOL - Solaris style elf + */ + +#ifndef PIC +#define TYPE(a,b) .type a,b +#define SIZE(a,b) .size a,b + +#if defined(OUT) || defined(BSDI) +#define ripemd160_block_x86 _ripemd160_block_x86 + +#endif + +#ifdef OUT +#define OK 1 +#define ALIGN 4 +#endif + +#ifdef BSDI +#define OK 1 +#define ALIGN 4 +#undef SIZE +#undef TYPE +#define SIZE(a,b) +#define TYPE(a,b) +#endif + +#if defined(ELF) || defined(SOL) +#define OK 1 +#define ALIGN 4 +#endif + +#ifndef OK +You need to define one of +ELF - elf systems - linux-elf, NetBSD and DG-UX +OUT - a.out systems - linux-a.out and FreeBSD +SOL - solaris systems, which are elf with strange comment lines +BSDI - a.out with a very primative version of as. +#endif + +/* Let the Assembler begin :-) */ + /* Don't even think of reading this code */ + /* It was automatically generated by rmd-586.pl */ + /* Which is a perl program used to generate the x86 assember for */ + /* any of elf, a.out, BSDI,Win32, or Solaris */ + /* eric <eay@cryptsoft.com> */ + + .file "rmd-586.s" + .version "01.01" +gcc2_compiled.: +.text + .p2align ALIGN +.globl ripemd160_block_x86 + TYPE(ripemd160_block_x86,@function) +ripemd160_block_x86: + pushl %esi + movl 16(%esp), %ecx + pushl %edi + movl 16(%esp), %esi + pushl %ebp + addl %esi, %ecx + pushl %ebx + subl $64, %ecx + subl $88, %esp + movl %ecx, (%esp) + movl 108(%esp), %edi +.L000start: + + movl (%esi), %eax + movl 4(%esi), %ebx + movl %eax, 4(%esp) + movl %ebx, 8(%esp) + movl 8(%esi), %eax + movl 12(%esi), %ebx + movl %eax, 12(%esp) + movl %ebx, 16(%esp) + movl 16(%esi), %eax + movl 20(%esi), %ebx + movl %eax, 20(%esp) + movl %ebx, 24(%esp) + movl 24(%esi), %eax + movl 28(%esi), %ebx + movl %eax, 28(%esp) + movl %ebx, 32(%esp) + movl 32(%esi), %eax + movl 36(%esi), %ebx + movl %eax, 36(%esp) + movl %ebx, 40(%esp) + movl 40(%esi), %eax + movl 44(%esi), %ebx + movl %eax, 44(%esp) + movl %ebx, 48(%esp) + movl 48(%esi), %eax + movl 52(%esi), %ebx + movl %eax, 52(%esp) + movl %ebx, 56(%esp) + movl 56(%esi), %eax + movl 60(%esi), %ebx + movl %eax, 60(%esp) + movl %ebx, 64(%esp) + addl $64, %esi + movl (%edi), %eax + movl %esi, 112(%esp) + movl 4(%edi), %ebx + movl 8(%edi), %ecx + movl 12(%edi), %edx + movl 16(%edi), %ebp + /* 0 */ + movl %ecx, %esi + xorl %edx, %esi + movl 4(%esp), %edi + xorl %ebx, %esi + addl %edi, %eax + roll $10, %ecx + addl %esi, %eax + movl %ebx, %esi + roll $11, %eax + addl %ebp, %eax + /* 1 */ + xorl %ecx, %esi + movl 8(%esp), %edi + xorl %eax, %esi + addl %esi, %ebp + movl %eax, %esi + roll $10, %ebx + addl %edi, %ebp + xorl %ebx, %esi + roll $14, %ebp + addl %edx, %ebp + /* 2 */ + movl 12(%esp), %edi + xorl %ebp, %esi + addl %edi, %edx + roll $10, %eax + addl %esi, %edx + movl %ebp, %esi + roll $15, %edx + addl %ecx, %edx + /* 3 */ + xorl %eax, %esi + movl 16(%esp), %edi + xorl %edx, %esi + addl %esi, %ecx + movl %edx, %esi + roll $10, %ebp + addl %edi, %ecx + xorl %ebp, %esi + roll $12, %ecx + addl %ebx, %ecx + /* 4 */ + movl 20(%esp), %edi + xorl %ecx, %esi + addl %edi, %ebx + roll $10, %edx + addl %esi, %ebx + movl %ecx, %esi + roll $5, %ebx + addl %eax, %ebx + /* 5 */ + xorl %edx, %esi + movl 24(%esp), %edi + xorl %ebx, %esi + addl %esi, %eax + movl %ebx, %esi + roll $10, %ecx + addl %edi, %eax + xorl %ecx, %esi + roll $8, %eax + addl %ebp, %eax + /* 6 */ + movl 28(%esp), %edi + xorl %eax, %esi + addl %edi, %ebp + roll $10, %ebx + addl %esi, %ebp + movl %eax, %esi + roll $7, %ebp + addl %edx, %ebp + /* 7 */ + xorl %ebx, %esi + movl 32(%esp), %edi + xorl %ebp, %esi + addl %esi, %edx + movl %ebp, %esi + roll $10, %eax + addl %edi, %edx + xorl %eax, %esi + roll $9, %edx + addl %ecx, %edx + /* 8 */ + movl 36(%esp), %edi + xorl %edx, %esi + addl %edi, %ecx + roll $10, %ebp + addl %esi, %ecx + movl %edx, %esi + roll $11, %ecx + addl %ebx, %ecx + /* 9 */ + xorl %ebp, %esi + movl 40(%esp), %edi + xorl %ecx, %esi + addl %esi, %ebx + movl %ecx, %esi + roll $10, %edx + addl %edi, %ebx + xorl %edx, %esi + roll $13, %ebx + addl %eax, %ebx + /* 10 */ + movl 44(%esp), %edi + xorl %ebx, %esi + addl %edi, %eax + roll $10, %ecx + addl %esi, %eax + movl %ebx, %esi + roll $14, %eax + addl %ebp, %eax + /* 11 */ + xorl %ecx, %esi + movl 48(%esp), %edi + xorl %eax, %esi + addl %esi, %ebp + movl %eax, %esi + roll $10, %ebx + addl %edi, %ebp + xorl %ebx, %esi + roll $15, %ebp + addl %edx, %ebp + /* 12 */ + movl 52(%esp), %edi + xorl %ebp, %esi + addl %edi, %edx + roll $10, %eax + addl %esi, %edx + movl %ebp, %esi + roll $6, %edx + addl %ecx, %edx + /* 13 */ + xorl %eax, %esi + movl 56(%esp), %edi + xorl %edx, %esi + addl %esi, %ecx + movl %edx, %esi + roll $10, %ebp + addl %edi, %ecx + xorl %ebp, %esi + roll $7, %ecx + addl %ebx, %ecx + /* 14 */ + movl 60(%esp), %edi + xorl %ecx, %esi + addl %edi, %ebx + roll $10, %edx + addl %esi, %ebx + movl %ecx, %esi + roll $9, %ebx + addl %eax, %ebx + /* 15 */ + xorl %edx, %esi + movl 64(%esp), %edi + xorl %ebx, %esi + addl %esi, %eax + movl $-1, %esi + roll $10, %ecx + addl %edi, %eax + movl 32(%esp), %edi + roll $8, %eax + addl %ebp, %eax + /* 16 */ + addl %edi, %ebp + movl %ebx, %edi + subl %eax, %esi + andl %eax, %edi + andl %ecx, %esi + orl %esi, %edi + movl 20(%esp), %esi + roll $10, %ebx + leal 1518500249(%ebp,%edi,1),%ebp + movl $-1, %edi + roll $7, %ebp + addl %edx, %ebp + /* 17 */ + addl %esi, %edx + movl %eax, %esi + subl %ebp, %edi + andl %ebp, %esi + andl %ebx, %edi + orl %edi, %esi + movl 56(%esp), %edi + roll $10, %eax + leal 1518500249(%edx,%esi,1),%edx + movl $-1, %esi + roll $6, %edx + addl %ecx, %edx + /* 18 */ + addl %edi, %ecx + movl %ebp, %edi + subl %edx, %esi + andl %edx, %edi + andl %eax, %esi + orl %esi, %edi + movl 8(%esp), %esi + roll $10, %ebp + leal 1518500249(%ecx,%edi,1),%ecx + movl $-1, %edi + roll $8, %ecx + addl %ebx, %ecx + /* 19 */ + addl %esi, %ebx + movl %edx, %esi + subl %ecx, %edi + andl %ecx, %esi + andl %ebp, %edi + orl %edi, %esi + movl 44(%esp), %edi + roll $10, %edx + leal 1518500249(%ebx,%esi,1),%ebx + movl $-1, %esi + roll $13, %ebx + addl %eax, %ebx + /* 20 */ + addl %edi, %eax + movl %ecx, %edi + subl %ebx, %esi + andl %ebx, %edi + andl %edx, %esi + orl %esi, %edi + movl 28(%esp), %esi + roll $10, %ecx + leal 1518500249(%eax,%edi,1),%eax + movl $-1, %edi + roll $11, %eax + addl %ebp, %eax + /* 21 */ + addl %esi, %ebp + movl %ebx, %esi + subl %eax, %edi + andl %eax, %esi + andl %ecx, %edi + orl %edi, %esi + movl 64(%esp), %edi + roll $10, %ebx + leal 1518500249(%ebp,%esi,1),%ebp + movl $-1, %esi + roll $9, %ebp + addl %edx, %ebp + /* 22 */ + addl %edi, %edx + movl %eax, %edi + subl %ebp, %esi + andl %ebp, %edi + andl %ebx, %esi + orl %esi, %edi + movl 16(%esp), %esi + roll $10, %eax + leal 1518500249(%edx,%edi,1),%edx + movl $-1, %edi + roll $7, %edx + addl %ecx, %edx + /* 23 */ + addl %esi, %ecx + movl %ebp, %esi + subl %edx, %edi + andl %edx, %esi + andl %eax, %edi + orl %edi, %esi + movl 52(%esp), %edi + roll $10, %ebp + leal 1518500249(%ecx,%esi,1),%ecx + movl $-1, %esi + roll $15, %ecx + addl %ebx, %ecx + /* 24 */ + addl %edi, %ebx + movl %edx, %edi + subl %ecx, %esi + andl %ecx, %edi + andl %ebp, %esi + orl %esi, %edi + movl 4(%esp), %esi + roll $10, %edx + leal 1518500249(%ebx,%edi,1),%ebx + movl $-1, %edi + roll $7, %ebx + addl %eax, %ebx + /* 25 */ + addl %esi, %eax + movl %ecx, %esi + subl %ebx, %edi + andl %ebx, %esi + andl %edx, %edi + orl %edi, %esi + movl 40(%esp), %edi + roll $10, %ecx + leal 1518500249(%eax,%esi,1),%eax + movl $-1, %esi + roll $12, %eax + addl %ebp, %eax + /* 26 */ + addl %edi, %ebp + movl %ebx, %edi + subl %eax, %esi + andl %eax, %edi + andl %ecx, %esi + orl %esi, %edi + movl 24(%esp), %esi + roll $10, %ebx + leal 1518500249(%ebp,%edi,1),%ebp + movl $-1, %edi + roll $15, %ebp + addl %edx, %ebp + /* 27 */ + addl %esi, %edx + movl %eax, %esi + subl %ebp, %edi + andl %ebp, %esi + andl %ebx, %edi + orl %edi, %esi + movl 12(%esp), %edi + roll $10, %eax + leal 1518500249(%edx,%esi,1),%edx + movl $-1, %esi + roll $9, %edx + addl %ecx, %edx + /* 28 */ + addl %edi, %ecx + movl %ebp, %edi + subl %edx, %esi + andl %edx, %edi + andl %eax, %esi + orl %esi, %edi + movl 60(%esp), %esi + roll $10, %ebp + leal 1518500249(%ecx,%edi,1),%ecx + movl $-1, %edi + roll $11, %ecx + addl %ebx, %ecx + /* 29 */ + addl %esi, %ebx + movl %edx, %esi + subl %ecx, %edi + andl %ecx, %esi + andl %ebp, %edi + orl %edi, %esi + movl 48(%esp), %edi + roll $10, %edx + leal 1518500249(%ebx,%esi,1),%ebx + movl $-1, %esi + roll $7, %ebx + addl %eax, %ebx + /* 30 */ + addl %edi, %eax + movl %ecx, %edi + subl %ebx, %esi + andl %ebx, %edi + andl %edx, %esi + orl %esi, %edi + movl 36(%esp), %esi + roll $10, %ecx + leal 1518500249(%eax,%edi,1),%eax + movl $-1, %edi + roll $13, %eax + addl %ebp, %eax + /* 31 */ + addl %esi, %ebp + movl %ebx, %esi + subl %eax, %edi + andl %eax, %esi + andl %ecx, %edi + orl %edi, %esi + movl $-1, %edi + roll $10, %ebx + leal 1518500249(%ebp,%esi,1),%ebp + subl %eax, %edi + roll $12, %ebp + addl %edx, %ebp + /* 32 */ + movl 16(%esp), %esi + orl %ebp, %edi + addl %esi, %edx + xorl %ebx, %edi + movl $-1, %esi + roll $10, %eax + leal 1859775393(%edx,%edi,1),%edx + subl %ebp, %esi + roll $11, %edx + addl %ecx, %edx + /* 33 */ + movl 44(%esp), %edi + orl %edx, %esi + addl %edi, %ecx + xorl %eax, %esi + movl $-1, %edi + roll $10, %ebp + leal 1859775393(%ecx,%esi,1),%ecx + subl %edx, %edi + roll $13, %ecx + addl %ebx, %ecx + /* 34 */ + movl 60(%esp), %esi + orl %ecx, %edi + addl %esi, %ebx + xorl %ebp, %edi + movl $-1, %esi + roll $10, %edx + leal 1859775393(%ebx,%edi,1),%ebx + subl %ecx, %esi + roll $6, %ebx + addl %eax, %ebx + /* 35 */ + movl 20(%esp), %edi + orl %ebx, %esi + addl %edi, %eax + xorl %edx, %esi + movl $-1, %edi + roll $10, %ecx + leal 1859775393(%eax,%esi,1),%eax + subl %ebx, %edi + roll $7, %eax + addl %ebp, %eax + /* 36 */ + movl 40(%esp), %esi + orl %eax, %edi + addl %esi, %ebp + xorl %ecx, %edi + movl $-1, %esi + roll $10, %ebx + leal 1859775393(%ebp,%edi,1),%ebp + subl %eax, %esi + roll $14, %ebp + addl %edx, %ebp + /* 37 */ + movl 64(%esp), %edi + orl %ebp, %esi + addl %edi, %edx + xorl %ebx, %esi + movl $-1, %edi + roll $10, %eax + leal 1859775393(%edx,%esi,1),%edx + subl %ebp, %edi + roll $9, %edx + addl %ecx, %edx + /* 38 */ + movl 36(%esp), %esi + orl %edx, %edi + addl %esi, %ecx + xorl %eax, %edi + movl $-1, %esi + roll $10, %ebp + leal 1859775393(%ecx,%edi,1),%ecx + subl %edx, %esi + roll $13, %ecx + addl %ebx, %ecx + /* 39 */ + movl 8(%esp), %edi + orl %ecx, %esi + addl %edi, %ebx + xorl %ebp, %esi + movl $-1, %edi + roll $10, %edx + leal 1859775393(%ebx,%esi,1),%ebx + subl %ecx, %edi + roll $15, %ebx + addl %eax, %ebx + /* 40 */ + movl 12(%esp), %esi + orl %ebx, %edi + addl %esi, %eax + xorl %edx, %edi + movl $-1, %esi + roll $10, %ecx + leal 1859775393(%eax,%edi,1),%eax + subl %ebx, %esi + roll $14, %eax + addl %ebp, %eax + /* 41 */ + movl 32(%esp), %edi + orl %eax, %esi + addl %edi, %ebp + xorl %ecx, %esi + movl $-1, %edi + roll $10, %ebx + leal 1859775393(%ebp,%esi,1),%ebp + subl %eax, %edi + roll $8, %ebp + addl %edx, %ebp + /* 42 */ + movl 4(%esp), %esi + orl %ebp, %edi + addl %esi, %edx + xorl %ebx, %edi + movl $-1, %esi + roll $10, %eax + leal 1859775393(%edx,%edi,1),%edx + subl %ebp, %esi + roll $13, %edx + addl %ecx, %edx + /* 43 */ + movl 28(%esp), %edi + orl %edx, %esi + addl %edi, %ecx + xorl %eax, %esi + movl $-1, %edi + roll $10, %ebp + leal 1859775393(%ecx,%esi,1),%ecx + subl %edx, %edi + roll $6, %ecx + addl %ebx, %ecx + /* 44 */ + movl 56(%esp), %esi + orl %ecx, %edi + addl %esi, %ebx + xorl %ebp, %edi + movl $-1, %esi + roll $10, %edx + leal 1859775393(%ebx,%edi,1),%ebx + subl %ecx, %esi + roll $5, %ebx + addl %eax, %ebx + /* 45 */ + movl 48(%esp), %edi + orl %ebx, %esi + addl %edi, %eax + xorl %edx, %esi + movl $-1, %edi + roll $10, %ecx + leal 1859775393(%eax,%esi,1),%eax + subl %ebx, %edi + roll $12, %eax + addl %ebp, %eax + /* 46 */ + movl 24(%esp), %esi + orl %eax, %edi + addl %esi, %ebp + xorl %ecx, %edi + movl $-1, %esi + roll $10, %ebx + leal 1859775393(%ebp,%edi,1),%ebp + subl %eax, %esi + roll $7, %ebp + addl %edx, %ebp + /* 47 */ + movl 52(%esp), %edi + orl %ebp, %esi + addl %edi, %edx + xorl %ebx, %esi + movl $-1, %edi + roll $10, %eax + leal 1859775393(%edx,%esi,1),%edx + movl %eax, %esi + roll $5, %edx + addl %ecx, %edx + /* 48 */ + subl %eax, %edi + andl %edx, %esi + andl %ebp, %edi + orl %esi, %edi + movl 8(%esp), %esi + roll $10, %ebp + leal 2400959708(%ecx,%edi,),%ecx + movl $-1, %edi + addl %esi, %ecx + movl %ebp, %esi + roll $11, %ecx + addl %ebx, %ecx + /* 49 */ + subl %ebp, %edi + andl %ecx, %esi + andl %edx, %edi + orl %esi, %edi + movl 40(%esp), %esi + roll $10, %edx + leal 2400959708(%ebx,%edi,),%ebx + movl $-1, %edi + addl %esi, %ebx + movl %edx, %esi + roll $12, %ebx + addl %eax, %ebx + /* 50 */ + subl %edx, %edi + andl %ebx, %esi + andl %ecx, %edi + orl %esi, %edi + movl 48(%esp), %esi + roll $10, %ecx + leal 2400959708(%eax,%edi,),%eax + movl $-1, %edi + addl %esi, %eax + movl %ecx, %esi + roll $14, %eax + addl %ebp, %eax + /* 51 */ + subl %ecx, %edi + andl %eax, %esi + andl %ebx, %edi + orl %esi, %edi + movl 44(%esp), %esi + roll $10, %ebx + leal 2400959708(%ebp,%edi,),%ebp + movl $-1, %edi + addl %esi, %ebp + movl %ebx, %esi + roll $15, %ebp + addl %edx, %ebp + /* 52 */ + subl %ebx, %edi + andl %ebp, %esi + andl %eax, %edi + orl %esi, %edi + movl 4(%esp), %esi + roll $10, %eax + leal 2400959708(%edx,%edi,),%edx + movl $-1, %edi + addl %esi, %edx + movl %eax, %esi + roll $14, %edx + addl %ecx, %edx + /* 53 */ + subl %eax, %edi + andl %edx, %esi + andl %ebp, %edi + orl %esi, %edi + movl 36(%esp), %esi + roll $10, %ebp + leal 2400959708(%ecx,%edi,),%ecx + movl $-1, %edi + addl %esi, %ecx + movl %ebp, %esi + roll $15, %ecx + addl %ebx, %ecx + /* 54 */ + subl %ebp, %edi + andl %ecx, %esi + andl %edx, %edi + orl %esi, %edi + movl 52(%esp), %esi + roll $10, %edx + leal 2400959708(%ebx,%edi,),%ebx + movl $-1, %edi + addl %esi, %ebx + movl %edx, %esi + roll $9, %ebx + addl %eax, %ebx + /* 55 */ + subl %edx, %edi + andl %ebx, %esi + andl %ecx, %edi + orl %esi, %edi + movl 20(%esp), %esi + roll $10, %ecx + leal 2400959708(%eax,%edi,),%eax + movl $-1, %edi + addl %esi, %eax + movl %ecx, %esi + roll $8, %eax + addl %ebp, %eax + /* 56 */ + subl %ecx, %edi + andl %eax, %esi + andl %ebx, %edi + orl %esi, %edi + movl 56(%esp), %esi + roll $10, %ebx + leal 2400959708(%ebp,%edi,),%ebp + movl $-1, %edi + addl %esi, %ebp + movl %ebx, %esi + roll $9, %ebp + addl %edx, %ebp + /* 57 */ + subl %ebx, %edi + andl %ebp, %esi + andl %eax, %edi + orl %esi, %edi + movl 16(%esp), %esi + roll $10, %eax + leal 2400959708(%edx,%edi,),%edx + movl $-1, %edi + addl %esi, %edx + movl %eax, %esi + roll $14, %edx + addl %ecx, %edx + /* 58 */ + subl %eax, %edi + andl %edx, %esi + andl %ebp, %edi + orl %esi, %edi + movl 32(%esp), %esi + roll $10, %ebp + leal 2400959708(%ecx,%edi,),%ecx + movl $-1, %edi + addl %esi, %ecx + movl %ebp, %esi + roll $5, %ecx + addl %ebx, %ecx + /* 59 */ + subl %ebp, %edi + andl %ecx, %esi + andl %edx, %edi + orl %esi, %edi + movl 64(%esp), %esi + roll $10, %edx + leal 2400959708(%ebx,%edi,),%ebx + movl $-1, %edi + addl %esi, %ebx + movl %edx, %esi + roll $6, %ebx + addl %eax, %ebx + /* 60 */ + subl %edx, %edi + andl %ebx, %esi + andl %ecx, %edi + orl %esi, %edi + movl 60(%esp), %esi + roll $10, %ecx + leal 2400959708(%eax,%edi,),%eax + movl $-1, %edi + addl %esi, %eax + movl %ecx, %esi + roll $8, %eax + addl %ebp, %eax + /* 61 */ + subl %ecx, %edi + andl %eax, %esi + andl %ebx, %edi + orl %esi, %edi + movl 24(%esp), %esi + roll $10, %ebx + leal 2400959708(%ebp,%edi,),%ebp + movl $-1, %edi + addl %esi, %ebp + movl %ebx, %esi + roll $6, %ebp + addl %edx, %ebp + /* 62 */ + subl %ebx, %edi + andl %ebp, %esi + andl %eax, %edi + orl %esi, %edi + movl 28(%esp), %esi + roll $10, %eax + leal 2400959708(%edx,%edi,),%edx + movl $-1, %edi + addl %esi, %edx + movl %eax, %esi + roll $5, %edx + addl %ecx, %edx + /* 63 */ + subl %eax, %edi + andl %edx, %esi + andl %ebp, %edi + orl %esi, %edi + movl 12(%esp), %esi + roll $10, %ebp + leal 2400959708(%ecx,%edi,),%ecx + movl $-1, %edi + addl %esi, %ecx + subl %ebp, %edi + roll $12, %ecx + addl %ebx, %ecx + /* 64 */ + movl 20(%esp), %esi + orl %edx, %edi + addl %esi, %ebx + xorl %ecx, %edi + movl $-1, %esi + roll $10, %edx + leal 2840853838(%ebx,%edi,1),%ebx + subl %edx, %esi + roll $9, %ebx + addl %eax, %ebx + /* 65 */ + movl 4(%esp), %edi + orl %ecx, %esi + addl %edi, %eax + xorl %ebx, %esi + movl $-1, %edi + roll $10, %ecx + leal 2840853838(%eax,%esi,1),%eax + subl %ecx, %edi + roll $15, %eax + addl %ebp, %eax + /* 66 */ + movl 24(%esp), %esi + orl %ebx, %edi + addl %esi, %ebp + xorl %eax, %edi + movl $-1, %esi + roll $10, %ebx + leal 2840853838(%ebp,%edi,1),%ebp + subl %ebx, %esi + roll $5, %ebp + addl %edx, %ebp + /* 67 */ + movl 40(%esp), %edi + orl %eax, %esi + addl %edi, %edx + xorl %ebp, %esi + movl $-1, %edi + roll $10, %eax + leal 2840853838(%edx,%esi,1),%edx + subl %eax, %edi + roll $11, %edx + addl %ecx, %edx + /* 68 */ + movl 32(%esp), %esi + orl %ebp, %edi + addl %esi, %ecx + xorl %edx, %edi + movl $-1, %esi + roll $10, %ebp + leal 2840853838(%ecx,%edi,1),%ecx + subl %ebp, %esi + roll $6, %ecx + addl %ebx, %ecx + /* 69 */ + movl 52(%esp), %edi + orl %edx, %esi + addl %edi, %ebx + xorl %ecx, %esi + movl $-1, %edi + roll $10, %edx + leal 2840853838(%ebx,%esi,1),%ebx + subl %edx, %edi + roll $8, %ebx + addl %eax, %ebx + /* 70 */ + movl 12(%esp), %esi + orl %ecx, %edi + addl %esi, %eax + xorl %ebx, %edi + movl $-1, %esi + roll $10, %ecx + leal 2840853838(%eax,%edi,1),%eax + subl %ecx, %esi + roll $13, %eax + addl %ebp, %eax + /* 71 */ + movl 44(%esp), %edi + orl %ebx, %esi + addl %edi, %ebp + xorl %eax, %esi + movl $-1, %edi + roll $10, %ebx + leal 2840853838(%ebp,%esi,1),%ebp + subl %ebx, %edi + roll $12, %ebp + addl %edx, %ebp + /* 72 */ + movl 60(%esp), %esi + orl %eax, %edi + addl %esi, %edx + xorl %ebp, %edi + movl $-1, %esi + roll $10, %eax + leal 2840853838(%edx,%edi,1),%edx + subl %eax, %esi + roll $5, %edx + addl %ecx, %edx + /* 73 */ + movl 8(%esp), %edi + orl %ebp, %esi + addl %edi, %ecx + xorl %edx, %esi + movl $-1, %edi + roll $10, %ebp + leal 2840853838(%ecx,%esi,1),%ecx + subl %ebp, %edi + roll $12, %ecx + addl %ebx, %ecx + /* 74 */ + movl 16(%esp), %esi + orl %edx, %edi + addl %esi, %ebx + xorl %ecx, %edi + movl $-1, %esi + roll $10, %edx + leal 2840853838(%ebx,%edi,1),%ebx + subl %edx, %esi + roll $13, %ebx + addl %eax, %ebx + /* 75 */ + movl 36(%esp), %edi + orl %ecx, %esi + addl %edi, %eax + xorl %ebx, %esi + movl $-1, %edi + roll $10, %ecx + leal 2840853838(%eax,%esi,1),%eax + subl %ecx, %edi + roll $14, %eax + addl %ebp, %eax + /* 76 */ + movl 48(%esp), %esi + orl %ebx, %edi + addl %esi, %ebp + xorl %eax, %edi + movl $-1, %esi + roll $10, %ebx + leal 2840853838(%ebp,%edi,1),%ebp + subl %ebx, %esi + roll $11, %ebp + addl %edx, %ebp + /* 77 */ + movl 28(%esp), %edi + orl %eax, %esi + addl %edi, %edx + xorl %ebp, %esi + movl $-1, %edi + roll $10, %eax + leal 2840853838(%edx,%esi,1),%edx + subl %eax, %edi + roll $8, %edx + addl %ecx, %edx + /* 78 */ + movl 64(%esp), %esi + orl %ebp, %edi + addl %esi, %ecx + xorl %edx, %edi + movl $-1, %esi + roll $10, %ebp + leal 2840853838(%ecx,%edi,1),%ecx + subl %ebp, %esi + roll $5, %ecx + addl %ebx, %ecx + /* 79 */ + movl 56(%esp), %edi + orl %edx, %esi + addl %edi, %ebx + xorl %ecx, %esi + movl 108(%esp), %edi + roll $10, %edx + leal 2840853838(%ebx,%esi,1),%ebx + movl %eax, 68(%esp) + roll $6, %ebx + addl %eax, %ebx + movl (%edi), %eax + movl %ebx, 72(%esp) + movl %ecx, 76(%esp) + movl 4(%edi), %ebx + movl %edx, 80(%esp) + movl 8(%edi), %ecx + movl %ebp, 84(%esp) + movl 12(%edi), %edx + movl 16(%edi), %ebp + /* 80 */ + movl $-1, %edi + subl %edx, %edi + movl 24(%esp), %esi + orl %ecx, %edi + addl %esi, %eax + xorl %ebx, %edi + movl $-1, %esi + roll $10, %ecx + leal 1352829926(%eax,%edi,1),%eax + subl %ecx, %esi + roll $8, %eax + addl %ebp, %eax + /* 81 */ + movl 60(%esp), %edi + orl %ebx, %esi + addl %edi, %ebp + xorl %eax, %esi + movl $-1, %edi + roll $10, %ebx + leal 1352829926(%ebp,%esi,1),%ebp + subl %ebx, %edi + roll $9, %ebp + addl %edx, %ebp + /* 82 */ + movl 32(%esp), %esi + orl %eax, %edi + addl %esi, %edx + xorl %ebp, %edi + movl $-1, %esi + roll $10, %eax + leal 1352829926(%edx,%edi,1),%edx + subl %eax, %esi + roll $9, %edx + addl %ecx, %edx + /* 83 */ + movl 4(%esp), %edi + orl %ebp, %esi + addl %edi, %ecx + xorl %edx, %esi + movl $-1, %edi + roll $10, %ebp + leal 1352829926(%ecx,%esi,1),%ecx + subl %ebp, %edi + roll $11, %ecx + addl %ebx, %ecx + /* 84 */ + movl 40(%esp), %esi + orl %edx, %edi + addl %esi, %ebx + xorl %ecx, %edi + movl $-1, %esi + roll $10, %edx + leal 1352829926(%ebx,%edi,1),%ebx + subl %edx, %esi + roll $13, %ebx + addl %eax, %ebx + /* 85 */ + movl 12(%esp), %edi + orl %ecx, %esi + addl %edi, %eax + xorl %ebx, %esi + movl $-1, %edi + roll $10, %ecx + leal 1352829926(%eax,%esi,1),%eax + subl %ecx, %edi + roll $15, %eax + addl %ebp, %eax + /* 86 */ + movl 48(%esp), %esi + orl %ebx, %edi + addl %esi, %ebp + xorl %eax, %edi + movl $-1, %esi + roll $10, %ebx + leal 1352829926(%ebp,%edi,1),%ebp + subl %ebx, %esi + roll $15, %ebp + addl %edx, %ebp + /* 87 */ + movl 20(%esp), %edi + orl %eax, %esi + addl %edi, %edx + xorl %ebp, %esi + movl $-1, %edi + roll $10, %eax + leal 1352829926(%edx,%esi,1),%edx + subl %eax, %edi + roll $5, %edx + addl %ecx, %edx + /* 88 */ + movl 56(%esp), %esi + orl %ebp, %edi + addl %esi, %ecx + xorl %edx, %edi + movl $-1, %esi + roll $10, %ebp + leal 1352829926(%ecx,%edi,1),%ecx + subl %ebp, %esi + roll $7, %ecx + addl %ebx, %ecx + /* 89 */ + movl 28(%esp), %edi + orl %edx, %esi + addl %edi, %ebx + xorl %ecx, %esi + movl $-1, %edi + roll $10, %edx + leal 1352829926(%ebx,%esi,1),%ebx + subl %edx, %edi + roll $7, %ebx + addl %eax, %ebx + /* 90 */ + movl 64(%esp), %esi + orl %ecx, %edi + addl %esi, %eax + xorl %ebx, %edi + movl $-1, %esi + roll $10, %ecx + leal 1352829926(%eax,%edi,1),%eax + subl %ecx, %esi + roll $8, %eax + addl %ebp, %eax + /* 91 */ + movl 36(%esp), %edi + orl %ebx, %esi + addl %edi, %ebp + xorl %eax, %esi + movl $-1, %edi + roll $10, %ebx + leal 1352829926(%ebp,%esi,1),%ebp + subl %ebx, %edi + roll $11, %ebp + addl %edx, %ebp + /* 92 */ + movl 8(%esp), %esi + orl %eax, %edi + addl %esi, %edx + xorl %ebp, %edi + movl $-1, %esi + roll $10, %eax + leal 1352829926(%edx,%edi,1),%edx + subl %eax, %esi + roll $14, %edx + addl %ecx, %edx + /* 93 */ + movl 44(%esp), %edi + orl %ebp, %esi + addl %edi, %ecx + xorl %edx, %esi + movl $-1, %edi + roll $10, %ebp + leal 1352829926(%ecx,%esi,1),%ecx + subl %ebp, %edi + roll $14, %ecx + addl %ebx, %ecx + /* 94 */ + movl 16(%esp), %esi + orl %edx, %edi + addl %esi, %ebx + xorl %ecx, %edi + movl $-1, %esi + roll $10, %edx + leal 1352829926(%ebx,%edi,1),%ebx + subl %edx, %esi + roll $12, %ebx + addl %eax, %ebx + /* 95 */ + movl 52(%esp), %edi + orl %ecx, %esi + addl %edi, %eax + xorl %ebx, %esi + movl $-1, %edi + roll $10, %ecx + leal 1352829926(%eax,%esi,1),%eax + movl %ecx, %esi + roll $6, %eax + addl %ebp, %eax + /* 96 */ + subl %ecx, %edi + andl %eax, %esi + andl %ebx, %edi + orl %esi, %edi + movl 28(%esp), %esi + roll $10, %ebx + leal 1548603684(%ebp,%edi,),%ebp + movl $-1, %edi + addl %esi, %ebp + movl %ebx, %esi + roll $9, %ebp + addl %edx, %ebp + /* 97 */ + subl %ebx, %edi + andl %ebp, %esi + andl %eax, %edi + orl %esi, %edi + movl 48(%esp), %esi + roll $10, %eax + leal 1548603684(%edx,%edi,),%edx + movl $-1, %edi + addl %esi, %edx + movl %eax, %esi + roll $13, %edx + addl %ecx, %edx + /* 98 */ + subl %eax, %edi + andl %edx, %esi + andl %ebp, %edi + orl %esi, %edi + movl 16(%esp), %esi + roll $10, %ebp + leal 1548603684(%ecx,%edi,),%ecx + movl $-1, %edi + addl %esi, %ecx + movl %ebp, %esi + roll $15, %ecx + addl %ebx, %ecx + /* 99 */ + subl %ebp, %edi + andl %ecx, %esi + andl %edx, %edi + orl %esi, %edi + movl 32(%esp), %esi + roll $10, %edx + leal 1548603684(%ebx,%edi,),%ebx + movl $-1, %edi + addl %esi, %ebx + movl %edx, %esi + roll $7, %ebx + addl %eax, %ebx + /* 100 */ + subl %edx, %edi + andl %ebx, %esi + andl %ecx, %edi + orl %esi, %edi + movl 4(%esp), %esi + roll $10, %ecx + leal 1548603684(%eax,%edi,),%eax + movl $-1, %edi + addl %esi, %eax + movl %ecx, %esi + roll $12, %eax + addl %ebp, %eax + /* 101 */ + subl %ecx, %edi + andl %eax, %esi + andl %ebx, %edi + orl %esi, %edi + movl 56(%esp), %esi + roll $10, %ebx + leal 1548603684(%ebp,%edi,),%ebp + movl $-1, %edi + addl %esi, %ebp + movl %ebx, %esi + roll $8, %ebp + addl %edx, %ebp + /* 102 */ + subl %ebx, %edi + andl %ebp, %esi + andl %eax, %edi + orl %esi, %edi + movl 24(%esp), %esi + roll $10, %eax + leal 1548603684(%edx,%edi,),%edx + movl $-1, %edi + addl %esi, %edx + movl %eax, %esi + roll $9, %edx + addl %ecx, %edx + /* 103 */ + subl %eax, %edi + andl %edx, %esi + andl %ebp, %edi + orl %esi, %edi + movl 44(%esp), %esi + roll $10, %ebp + leal 1548603684(%ecx,%edi,),%ecx + movl $-1, %edi + addl %esi, %ecx + movl %ebp, %esi + roll $11, %ecx + addl %ebx, %ecx + /* 104 */ + subl %ebp, %edi + andl %ecx, %esi + andl %edx, %edi + orl %esi, %edi + movl 60(%esp), %esi + roll $10, %edx + leal 1548603684(%ebx,%edi,),%ebx + movl $-1, %edi + addl %esi, %ebx + movl %edx, %esi + roll $7, %ebx + addl %eax, %ebx + /* 105 */ + subl %edx, %edi + andl %ebx, %esi + andl %ecx, %edi + orl %esi, %edi + movl 64(%esp), %esi + roll $10, %ecx + leal 1548603684(%eax,%edi,),%eax + movl $-1, %edi + addl %esi, %eax + movl %ecx, %esi + roll $7, %eax + addl %ebp, %eax + /* 106 */ + subl %ecx, %edi + andl %eax, %esi + andl %ebx, %edi + orl %esi, %edi + movl 36(%esp), %esi + roll $10, %ebx + leal 1548603684(%ebp,%edi,),%ebp + movl $-1, %edi + addl %esi, %ebp + movl %ebx, %esi + roll $12, %ebp + addl %edx, %ebp + /* 107 */ + subl %ebx, %edi + andl %ebp, %esi + andl %eax, %edi + orl %esi, %edi + movl 52(%esp), %esi + roll $10, %eax + leal 1548603684(%edx,%edi,),%edx + movl $-1, %edi + addl %esi, %edx + movl %eax, %esi + roll $7, %edx + addl %ecx, %edx + /* 108 */ + subl %eax, %edi + andl %edx, %esi + andl %ebp, %edi + orl %esi, %edi + movl 20(%esp), %esi + roll $10, %ebp + leal 1548603684(%ecx,%edi,),%ecx + movl $-1, %edi + addl %esi, %ecx + movl %ebp, %esi + roll $6, %ecx + addl %ebx, %ecx + /* 109 */ + subl %ebp, %edi + andl %ecx, %esi + andl %edx, %edi + orl %esi, %edi + movl 40(%esp), %esi + roll $10, %edx + leal 1548603684(%ebx,%edi,),%ebx + movl $-1, %edi + addl %esi, %ebx + movl %edx, %esi + roll $15, %ebx + addl %eax, %ebx + /* 110 */ + subl %edx, %edi + andl %ebx, %esi + andl %ecx, %edi + orl %esi, %edi + movl 8(%esp), %esi + roll $10, %ecx + leal 1548603684(%eax,%edi,),%eax + movl $-1, %edi + addl %esi, %eax + movl %ecx, %esi + roll $13, %eax + addl %ebp, %eax + /* 111 */ + subl %ecx, %edi + andl %eax, %esi + andl %ebx, %edi + orl %esi, %edi + movl 12(%esp), %esi + roll $10, %ebx + leal 1548603684(%ebp,%edi,),%ebp + movl $-1, %edi + addl %esi, %ebp + subl %eax, %edi + roll $11, %ebp + addl %edx, %ebp + /* 112 */ + movl 64(%esp), %esi + orl %ebp, %edi + addl %esi, %edx + xorl %ebx, %edi + movl $-1, %esi + roll $10, %eax + leal 1836072691(%edx,%edi,1),%edx + subl %ebp, %esi + roll $9, %edx + addl %ecx, %edx + /* 113 */ + movl 24(%esp), %edi + orl %edx, %esi + addl %edi, %ecx + xorl %eax, %esi + movl $-1, %edi + roll $10, %ebp + leal 1836072691(%ecx,%esi,1),%ecx + subl %edx, %edi + roll $7, %ecx + addl %ebx, %ecx + /* 114 */ + movl 8(%esp), %esi + orl %ecx, %edi + addl %esi, %ebx + xorl %ebp, %edi + movl $-1, %esi + roll $10, %edx + leal 1836072691(%ebx,%edi,1),%ebx + subl %ecx, %esi + roll $15, %ebx + addl %eax, %ebx + /* 115 */ + movl 16(%esp), %edi + orl %ebx, %esi + addl %edi, %eax + xorl %edx, %esi + movl $-1, %edi + roll $10, %ecx + leal 1836072691(%eax,%esi,1),%eax + subl %ebx, %edi + roll $11, %eax + addl %ebp, %eax + /* 116 */ + movl 32(%esp), %esi + orl %eax, %edi + addl %esi, %ebp + xorl %ecx, %edi + movl $-1, %esi + roll $10, %ebx + leal 1836072691(%ebp,%edi,1),%ebp + subl %eax, %esi + roll $8, %ebp + addl %edx, %ebp + /* 117 */ + movl 60(%esp), %edi + orl %ebp, %esi + addl %edi, %edx + xorl %ebx, %esi + movl $-1, %edi + roll $10, %eax + leal 1836072691(%edx,%esi,1),%edx + subl %ebp, %edi + roll $6, %edx + addl %ecx, %edx + /* 118 */ + movl 28(%esp), %esi + orl %edx, %edi + addl %esi, %ecx + xorl %eax, %edi + movl $-1, %esi + roll $10, %ebp + leal 1836072691(%ecx,%edi,1),%ecx + subl %edx, %esi + roll $6, %ecx + addl %ebx, %ecx + /* 119 */ + movl 40(%esp), %edi + orl %ecx, %esi + addl %edi, %ebx + xorl %ebp, %esi + movl $-1, %edi + roll $10, %edx + leal 1836072691(%ebx,%esi,1),%ebx + subl %ecx, %edi + roll $14, %ebx + addl %eax, %ebx + /* 120 */ + movl 48(%esp), %esi + orl %ebx, %edi + addl %esi, %eax + xorl %edx, %edi + movl $-1, %esi + roll $10, %ecx + leal 1836072691(%eax,%edi,1),%eax + subl %ebx, %esi + roll $12, %eax + addl %ebp, %eax + /* 121 */ + movl 36(%esp), %edi + orl %eax, %esi + addl %edi, %ebp + xorl %ecx, %esi + movl $-1, %edi + roll $10, %ebx + leal 1836072691(%ebp,%esi,1),%ebp + subl %eax, %edi + roll $13, %ebp + addl %edx, %ebp + /* 122 */ + movl 52(%esp), %esi + orl %ebp, %edi + addl %esi, %edx + xorl %ebx, %edi + movl $-1, %esi + roll $10, %eax + leal 1836072691(%edx,%edi,1),%edx + subl %ebp, %esi + roll $5, %edx + addl %ecx, %edx + /* 123 */ + movl 12(%esp), %edi + orl %edx, %esi + addl %edi, %ecx + xorl %eax, %esi + movl $-1, %edi + roll $10, %ebp + leal 1836072691(%ecx,%esi,1),%ecx + subl %edx, %edi + roll $14, %ecx + addl %ebx, %ecx + /* 124 */ + movl 44(%esp), %esi + orl %ecx, %edi + addl %esi, %ebx + xorl %ebp, %edi + movl $-1, %esi + roll $10, %edx + leal 1836072691(%ebx,%edi,1),%ebx + subl %ecx, %esi + roll $13, %ebx + addl %eax, %ebx + /* 125 */ + movl 4(%esp), %edi + orl %ebx, %esi + addl %edi, %eax + xorl %edx, %esi + movl $-1, %edi + roll $10, %ecx + leal 1836072691(%eax,%esi,1),%eax + subl %ebx, %edi + roll $13, %eax + addl %ebp, %eax + /* 126 */ + movl 20(%esp), %esi + orl %eax, %edi + addl %esi, %ebp + xorl %ecx, %edi + movl $-1, %esi + roll $10, %ebx + leal 1836072691(%ebp,%edi,1),%ebp + subl %eax, %esi + roll $7, %ebp + addl %edx, %ebp + /* 127 */ + movl 56(%esp), %edi + orl %ebp, %esi + addl %edi, %edx + xorl %ebx, %esi + movl 36(%esp), %edi + roll $10, %eax + leal 1836072691(%edx,%esi,1),%edx + movl $-1, %esi + roll $5, %edx + addl %ecx, %edx + /* 128 */ + addl %edi, %ecx + movl %ebp, %edi + subl %edx, %esi + andl %edx, %edi + andl %eax, %esi + orl %esi, %edi + movl 28(%esp), %esi + roll $10, %ebp + leal 2053994217(%ecx,%edi,1),%ecx + movl $-1, %edi + roll $15, %ecx + addl %ebx, %ecx + /* 129 */ + addl %esi, %ebx + movl %edx, %esi + subl %ecx, %edi + andl %ecx, %esi + andl %ebp, %edi + orl %edi, %esi + movl 20(%esp), %edi + roll $10, %edx + leal 2053994217(%ebx,%esi,1),%ebx + movl $-1, %esi + roll $5, %ebx + addl %eax, %ebx + /* 130 */ + addl %edi, %eax + movl %ecx, %edi + subl %ebx, %esi + andl %ebx, %edi + andl %edx, %esi + orl %esi, %edi + movl 8(%esp), %esi + roll $10, %ecx + leal 2053994217(%eax,%edi,1),%eax + movl $-1, %edi + roll $8, %eax + addl %ebp, %eax + /* 131 */ + addl %esi, %ebp + movl %ebx, %esi + subl %eax, %edi + andl %eax, %esi + andl %ecx, %edi + orl %edi, %esi + movl 16(%esp), %edi + roll $10, %ebx + leal 2053994217(%ebp,%esi,1),%ebp + movl $-1, %esi + roll $11, %ebp + addl %edx, %ebp + /* 132 */ + addl %edi, %edx + movl %eax, %edi + subl %ebp, %esi + andl %ebp, %edi + andl %ebx, %esi + orl %esi, %edi + movl 48(%esp), %esi + roll $10, %eax + leal 2053994217(%edx,%edi,1),%edx + movl $-1, %edi + roll $14, %edx + addl %ecx, %edx + /* 133 */ + addl %esi, %ecx + movl %ebp, %esi + subl %edx, %edi + andl %edx, %esi + andl %eax, %edi + orl %edi, %esi + movl 64(%esp), %edi + roll $10, %ebp + leal 2053994217(%ecx,%esi,1),%ecx + movl $-1, %esi + roll $14, %ecx + addl %ebx, %ecx + /* 134 */ + addl %edi, %ebx + movl %edx, %edi + subl %ecx, %esi + andl %ecx, %edi + andl %ebp, %esi + orl %esi, %edi + movl 4(%esp), %esi + roll $10, %edx + leal 2053994217(%ebx,%edi,1),%ebx + movl $-1, %edi + roll $6, %ebx + addl %eax, %ebx + /* 135 */ + addl %esi, %eax + movl %ecx, %esi + subl %ebx, %edi + andl %ebx, %esi + andl %edx, %edi + orl %edi, %esi + movl 24(%esp), %edi + roll $10, %ecx + leal 2053994217(%eax,%esi,1),%eax + movl $-1, %esi + roll $14, %eax + addl %ebp, %eax + /* 136 */ + addl %edi, %ebp + movl %ebx, %edi + subl %eax, %esi + andl %eax, %edi + andl %ecx, %esi + orl %esi, %edi + movl 52(%esp), %esi + roll $10, %ebx + leal 2053994217(%ebp,%edi,1),%ebp + movl $-1, %edi + roll $6, %ebp + addl %edx, %ebp + /* 137 */ + addl %esi, %edx + movl %eax, %esi + subl %ebp, %edi + andl %ebp, %esi + andl %ebx, %edi + orl %edi, %esi + movl 12(%esp), %edi + roll $10, %eax + leal 2053994217(%edx,%esi,1),%edx + movl $-1, %esi + roll $9, %edx + addl %ecx, %edx + /* 138 */ + addl %edi, %ecx + movl %ebp, %edi + subl %edx, %esi + andl %edx, %edi + andl %eax, %esi + orl %esi, %edi + movl 56(%esp), %esi + roll $10, %ebp + leal 2053994217(%ecx,%edi,1),%ecx + movl $-1, %edi + roll $12, %ecx + addl %ebx, %ecx + /* 139 */ + addl %esi, %ebx + movl %edx, %esi + subl %ecx, %edi + andl %ecx, %esi + andl %ebp, %edi + orl %edi, %esi + movl 40(%esp), %edi + roll $10, %edx + leal 2053994217(%ebx,%esi,1),%ebx + movl $-1, %esi + roll $9, %ebx + addl %eax, %ebx + /* 140 */ + addl %edi, %eax + movl %ecx, %edi + subl %ebx, %esi + andl %ebx, %edi + andl %edx, %esi + orl %esi, %edi + movl 32(%esp), %esi + roll $10, %ecx + leal 2053994217(%eax,%edi,1),%eax + movl $-1, %edi + roll $12, %eax + addl %ebp, %eax + /* 141 */ + addl %esi, %ebp + movl %ebx, %esi + subl %eax, %edi + andl %eax, %esi + andl %ecx, %edi + orl %edi, %esi + movl 44(%esp), %edi + roll $10, %ebx + leal 2053994217(%ebp,%esi,1),%ebp + movl $-1, %esi + roll $5, %ebp + addl %edx, %ebp + /* 142 */ + addl %edi, %edx + movl %eax, %edi + subl %ebp, %esi + andl %ebp, %edi + andl %ebx, %esi + orl %esi, %edi + movl 60(%esp), %esi + roll $10, %eax + leal 2053994217(%edx,%edi,1),%edx + movl $-1, %edi + roll $15, %edx + addl %ecx, %edx + /* 143 */ + addl %esi, %ecx + movl %ebp, %esi + subl %edx, %edi + andl %edx, %esi + andl %eax, %edi + orl %esi, %edi + movl %edx, %esi + roll $10, %ebp + leal 2053994217(%ecx,%edi,1),%ecx + xorl %ebp, %esi + roll $8, %ecx + addl %ebx, %ecx + /* 144 */ + movl 52(%esp), %edi + xorl %ecx, %esi + addl %edi, %ebx + roll $10, %edx + addl %esi, %ebx + movl %ecx, %esi + roll $8, %ebx + addl %eax, %ebx + /* 145 */ + xorl %edx, %esi + movl 64(%esp), %edi + xorl %ebx, %esi + addl %esi, %eax + movl %ebx, %esi + roll $10, %ecx + addl %edi, %eax + xorl %ecx, %esi + roll $5, %eax + addl %ebp, %eax + /* 146 */ + movl 44(%esp), %edi + xorl %eax, %esi + addl %edi, %ebp + roll $10, %ebx + addl %esi, %ebp + movl %eax, %esi + roll $12, %ebp + addl %edx, %ebp + /* 147 */ + xorl %ebx, %esi + movl 20(%esp), %edi + xorl %ebp, %esi + addl %esi, %edx + movl %ebp, %esi + roll $10, %eax + addl %edi, %edx + xorl %eax, %esi + roll $9, %edx + addl %ecx, %edx + /* 148 */ + movl 8(%esp), %edi + xorl %edx, %esi + addl %edi, %ecx + roll $10, %ebp + addl %esi, %ecx + movl %edx, %esi + roll $12, %ecx + addl %ebx, %ecx + /* 149 */ + xorl %ebp, %esi + movl 24(%esp), %edi + xorl %ecx, %esi + addl %esi, %ebx + movl %ecx, %esi + roll $10, %edx + addl %edi, %ebx + xorl %edx, %esi + roll $5, %ebx + addl %eax, %ebx + /* 150 */ + movl 36(%esp), %edi + xorl %ebx, %esi + addl %edi, %eax + roll $10, %ecx + addl %esi, %eax + movl %ebx, %esi + roll $14, %eax + addl %ebp, %eax + /* 151 */ + xorl %ecx, %esi + movl 32(%esp), %edi + xorl %eax, %esi + addl %esi, %ebp + movl %eax, %esi + roll $10, %ebx + addl %edi, %ebp + xorl %ebx, %esi + roll $6, %ebp + addl %edx, %ebp + /* 152 */ + movl 28(%esp), %edi + xorl %ebp, %esi + addl %edi, %edx + roll $10, %eax + addl %esi, %edx + movl %ebp, %esi + roll $8, %edx + addl %ecx, %edx + /* 153 */ + xorl %eax, %esi + movl 12(%esp), %edi + xorl %edx, %esi + addl %esi, %ecx + movl %edx, %esi + roll $10, %ebp + addl %edi, %ecx + xorl %ebp, %esi + roll $13, %ecx + addl %ebx, %ecx + /* 154 */ + movl 56(%esp), %edi + xorl %ecx, %esi + addl %edi, %ebx + roll $10, %edx + addl %esi, %ebx + movl %ecx, %esi + roll $6, %ebx + addl %eax, %ebx + /* 155 */ + xorl %edx, %esi + movl 60(%esp), %edi + xorl %ebx, %esi + addl %esi, %eax + movl %ebx, %esi + roll $10, %ecx + addl %edi, %eax + xorl %ecx, %esi + roll $5, %eax + addl %ebp, %eax + /* 156 */ + movl 4(%esp), %edi + xorl %eax, %esi + addl %edi, %ebp + roll $10, %ebx + addl %esi, %ebp + movl %eax, %esi + roll $15, %ebp + addl %edx, %ebp + /* 157 */ + xorl %ebx, %esi + movl 16(%esp), %edi + xorl %ebp, %esi + addl %esi, %edx + movl %ebp, %esi + roll $10, %eax + addl %edi, %edx + xorl %eax, %esi + roll $13, %edx + addl %ecx, %edx + /* 158 */ + movl 40(%esp), %edi + xorl %edx, %esi + addl %edi, %ecx + roll $10, %ebp + addl %esi, %ecx + movl %edx, %esi + roll $11, %ecx + addl %ebx, %ecx + /* 159 */ + xorl %ebp, %esi + movl 48(%esp), %edi + xorl %ecx, %esi + addl %esi, %ebx + roll $10, %edx + addl %edi, %ebx + movl 108(%esp), %edi + roll $11, %ebx + addl %eax, %ebx + movl 4(%edi), %esi + addl %esi, %edx + movl 76(%esp), %esi + addl %esi, %edx + movl 8(%edi), %esi + addl %esi, %ebp + movl 80(%esp), %esi + addl %esi, %ebp + movl 12(%edi), %esi + addl %esi, %eax + movl 84(%esp), %esi + addl %esi, %eax + movl 16(%edi), %esi + addl %esi, %ebx + movl 68(%esp), %esi + addl %esi, %ebx + movl (%edi), %esi + addl %esi, %ecx + movl 72(%esp), %esi + addl %esi, %ecx + movl %edx, (%edi) + movl %ebp, 4(%edi) + movl %eax, 8(%edi) + movl %ebx, 12(%edi) + movl %ecx, 16(%edi) + movl (%esp), %edi + movl 112(%esp), %esi + cmpl %esi, %edi + movl 108(%esp), %edi + jae .L000start + addl $88, %esp + popl %ebx + popl %ebp + popl %edi + popl %esi + ret +.ripemd160_block_x86_end: + SIZE(ripemd160_block_x86,.ripemd160_block_x86_end-ripemd160_block_x86) +.ident "desasm.pl" +#endif /* not PIC */ diff --git a/lib/libmd/md4.copyright b/lib/libmd/md4.copyright new file mode 100644 index 000000000000..7c7af2efb887 --- /dev/null +++ b/lib/libmd/md4.copyright @@ -0,0 +1,19 @@ +Copyright (C) 1991-2, RSA Data Security, Inc. Created 1991. All +rights reserved. +.Pp +License to copy and use this software is granted provided that it +is identified as the "RSA Data Security, Inc. MD4 Message-Digest +Algorithm" in all material mentioning or referencing this software +or this function. +License is also granted to make and use derivative works provided +that such works are identified as "derived from the RSA Data +Security, Inc. MD4 Message-Digest Algorithm" in all material +mentioning or referencing the derived work. +.Pp +RSA Data Security, Inc. makes no representations concerning either +the merchantability of this software or the suitability of this +software for any particular purpose. It is provided "as is" +without express or implied warranty of any kind. +.Pp +These notices must be retained in any copies of any part of this +documentation and/or software. diff --git a/lib/libmd/md4.h b/lib/libmd/md4.h new file mode 100644 index 000000000000..f251fa4821cf --- /dev/null +++ b/lib/libmd/md4.h @@ -0,0 +1,33 @@ +/* MD4.H - header file for MD4C.C + */ + +/*- + SPDX-License-Identifier: RSA-MD + + Copyright (C) 1991-2, RSA Data Security, Inc. Created 1991. All + rights reserved. + + License to copy and use this software is granted provided that it + is identified as the "RSA Data Security, Inc. MD4 Message-Digest + Algorithm" in all material mentioning or referencing this software + or this function. + License is also granted to make and use derivative works provided + that such works are identified as "derived from the RSA Data + Security, Inc. MD4 Message-Digest Algorithm" in all material + mentioning or referencing the derived work. + + RSA Data Security, Inc. makes no representations concerning either + the merchantability of this software or the suitability of this + software for any particular purpose. It is provided "as is" + without express or implied warranty of any kind. + + These notices must be retained in any copies of any part of this + documentation and/or software. + */ + +#ifndef _MD4_H_ +#define _MD4_H_ + +#include <sys/md4.h> + +#endif /* _MD4_H_ */ diff --git a/lib/libmd/md5.copyright b/lib/libmd/md5.copyright new file mode 100644 index 000000000000..08bb01166590 --- /dev/null +++ b/lib/libmd/md5.copyright @@ -0,0 +1,20 @@ +Copyright (C) 1991-2, RSA Data Security, Inc. Created 1991. All +rights reserved. +.Pp +License to copy and use this software is granted provided that it +is identified as the "RSA Data Security, Inc. MD5 Message-Digest +Algorithm" in all material mentioning or referencing this software +or this function. +.Pp +License is also granted to make and use derivative works provided +that such works are identified as "derived from the RSA Data +Security, Inc. MD5 Message-Digest Algorithm" in all material +mentioning or referencing the derived work. +.Pp +RSA Data Security, Inc. makes no representations concerning either +the merchantability of this software or the suitability of this +software for any particular purpose. It is provided "as is" +without express or implied warranty of any kind. +.Pp +These notices must be retained in any copies of any part of this +documentation and/or software. diff --git a/lib/libmd/md5.h b/lib/libmd/md5.h new file mode 100644 index 000000000000..fca9d5f6ee6e --- /dev/null +++ b/lib/libmd/md5.h @@ -0,0 +1,6 @@ + +#ifndef _MD5_H_ +#define _MD5_H_ + +#include <sys/md5.h> +#endif /* _MD5_H_ */ diff --git a/lib/libmd/mdX.3 b/lib/libmd/mdX.3 new file mode 100644 index 000000000000..7c86f28ad628 --- /dev/null +++ b/lib/libmd/mdX.3 @@ -0,0 +1,228 @@ +.\" +.\" ---------------------------------------------------------------------------- +.\" "THE BEER-WARE LICENSE" (Revision 42): +.\" <phk@FreeBSD.org> wrote this file. As long as you retain this notice you +.\" can do whatever you want with this stuff. If we meet some day, and you think +.\" this stuff is worth it, you can buy me a beer in return. Poul-Henning Kamp +.\" ---------------------------------------------------------------------------- +.\" +.Dd March 8, 2022 +.Dt MDX 3 +.Os +.Sh NAME +.Nm MDXInit , +.Nm MDXUpdate , +.Nm MDXPad , +.Nm MDXFinal , +.Nm MDXEnd , +.Nm MDXFd , +.Nm MDXFdChunk , +.Nm MDXFile , +.Nm MDXFileChunk , +.Nm MDXData +.Nd calculate the RSA Data Security, Inc., ``MDX'' message digest +.Sh LIBRARY +.Lb libmd +.Sh SYNOPSIS +.In sys/types.h +.In mdX.h +.Ft void +.Fn MDXInit "MDX_CTX *context" +.Ft void +.Fn MDXUpdate "MDX_CTX *context" "const void *data" "unsigned int len" +.Ft void +.Fn MDXPad "MDX_CTX *context" +.Ft void +.Fn MDXFinal "unsigned char digest[16]" "MDX_CTX *context" +.Ft "char *" +.Fn MDXEnd "MDX_CTX *context" "char *buf" +.Ft "char *" +.Fn MDXFd "int fd" "char *buf" +.Ft "char *" +.Fn MDXFdChunk "int fd" "char *buf" "off_t offset" "off_t length" +.Ft "char *" +.Fn MDXFile "const char *filename" "char *buf" +.Ft "char *" +.Fn MDXFileChunk "const char *filename" "char *buf" "off_t offset" "off_t length" +.Ft "char *" +.Fn MDXData "const void *data" "unsigned int len" "char *buf" +.Sh DESCRIPTION +The MDX functions calculate a 128-bit cryptographic checksum (digest) +for any number of input bytes. +A cryptographic checksum is a one-way +hash-function, that is, you cannot find (except by exhaustive search) +the input corresponding to a particular output. +This net result is a +.Dq fingerprint +of the input-data, which does not disclose the actual input. +.Pp +MD4 is the fastest and MD5 is somewhat slower. +MD4 has now been broken; it should only be used where necessary for +backward compatibility. +MD5 has not yet (1999-02-11) been broken, but sufficient attacks have been +made that its security is in some doubt. +The attacks on both MD4 and MD5 +are both in the nature of finding +.Dq collisions +\[en] +that is, multiple +inputs which hash to the same value; it is still unlikely for an attacker +to be able to determine the exact original input given a hash value. +.Pp +The +.Fn MDXInit , +.Fn MDXUpdate , +and +.Fn MDXFinal +functions are the core functions. +Allocate an +.Vt MDX_CTX , +initialize it with +.Fn MDXInit , +run over the data with +.Fn MDXUpdate , +and finally extract the result using +.Fn MDXFinal , +which will also erase the +.Vt MDX_CTX . +.Pp +The +.Fn MDXPad +function can be used to pad message data in same way +as done by +.Fn MDXFinal +without terminating calculation. +.Pp +The +.Fn MDXEnd +function is a wrapper for +.Fn MDXFinal +which converts the return value to a 33-character +(including the terminating '\e0') +ASCII string which represents the 128 bits in hexadecimal. +.Pp +The +.Fn MDXFile +function calculates the digest of a file, and uses +.Fn MDXEnd +to return the result. +If the file cannot be opened, a null pointer is returned. +The +.Fn MDXFileChunk +function is similar to +.Fn MDXFile , +but it only calculates the digest over a byte-range of the file specified, +starting at +.Fa offset +and spanning +.Fa length +bytes. +If the +.Fa length +parameter is specified as 0, or more than the length of the remaining part +of the file, +.Fn MDXFileChunk +calculates the digest from +.Fa offset +to the end of file. +The +.Fn MDXData +function calculates the digest of a chunk of data in memory, and uses +.Fn MDXEnd +to return the result. +.Pp +The +.Fn MDXFd +and +.Fn MDXFdChunk +functions are identical to their +.Fn MDXFile +and +.Fn MDXFileChunk +counterparts, with the exception that the first argument is an +.Fa fd +instead of a +.Fa filename . +.Pp +When using +.Fn MDXEnd , +.Fn MDXFile , +or +.Fn MDXData , +the +.Fa buf +argument can be a null pointer, in which case the returned string +is allocated with +.Xr malloc 3 +and subsequently must be explicitly deallocated using +.Xr free 3 +after use. +If the +.Fa buf +argument is non-null it must point to at least 33 characters of buffer space. +.Sh ERRORS +The +.Fn MDXEnd +function called with a null buf argument may fail and return NULL if: +.Bl -tag -width Er +.It Bq Er ENOMEM +Insufficient storage space is available. +.El +.Pp +The +.Fn MDXFile +and +.Fn MDXFileChunk +may return NULL when underlying +.Xr open 2 , +.Xr fstat 2 , +.Xr lseek 2 , +or +.Xr MDXEnd 3 +fail. +.Sh SEE ALSO +.Xr md4 3 , +.Xr md5 3 , +.Xr ripemd 3 , +.Xr sha 3 , +.Xr sha256 3 , +.Xr sha512 3 , +.Xr skein 3 +.Rs +.%A R. Rivest +.%T The MD4 Message-Digest Algorithm +.%O RFC 1186 +.Re +.Rs +.%A R. Rivest +.%T The MD5 Message-Digest Algorithm +.%O RFC 1321 +.Re +.Rs +.%A H. Dobbertin +.%T Alf Swindles Ann +.%J CryptoBytes +.%N 1(3):5 +.%D 1995 +.Re +.Rs +.%A MJ. B. Robshaw +.%T On Recent Results for MD2, MD4 and MD5 +.%J RSA Laboratories Bulletin +.%N 4 +.%D November 12, 1996 +.Re +.Sh HISTORY +These functions appeared in +.Fx 2.0 . +.Sh AUTHORS +The original MDX routines were developed by +RSA Data Security, Inc., and published in the above references. +This code is derived directly from these implementations by +.An Poul-Henning Kamp Aq Mt phk@FreeBSD.org . +.Pp +Phk ristede runen. +.Sh BUGS +The MD5 algorithm has been proven to be vulnerable to practical collision +attacks and should not be relied upon to produce unique outputs, +.Em nor should they be used as part of a cryptographic signature scheme. diff --git a/lib/libmd/mdXhl.c b/lib/libmd/mdXhl.c new file mode 100644 index 000000000000..68598f192056 --- /dev/null +++ b/lib/libmd/mdXhl.c @@ -0,0 +1,139 @@ +/*- mdXhl.c + * SPDX-License-Identifier: Beerware + * + * ---------------------------------------------------------------------------- + * "THE BEER-WARE LICENSE" (Revision 42): + * <phk@FreeBSD.org> wrote this file. As long as you retain this notice you + * can do whatever you want with this stuff. If we meet some day, and you think + * this stuff is worth it, you can buy me a beer in return. Poul-Henning Kamp + * ---------------------------------------------------------------------------- + */ + +#include <sys/types.h> +#include <sys/stat.h> +#include <fcntl.h> +#include <unistd.h> + +#include <errno.h> +#include <stdio.h> +#include <stdlib.h> + +#include "mdX.h" + +char * +MDXEnd(MDX_CTX *ctx, char *buf) +{ + int i; + unsigned char digest[LENGTH]; + static const char hex[]="0123456789abcdef"; + + if (!buf) + buf = malloc(2*LENGTH + 1); + if (!buf) + return 0; + MDXFinal(digest, ctx); + for (i = 0; i < LENGTH; i++) { + buf[i+i] = hex[digest[i] >> 4]; + buf[i+i+1] = hex[digest[i] & 0x0f]; + } + buf[i+i] = '\0'; + return buf; +} + +char * +MDXFd(int fd, char *buf) +{ + return MDXFdChunk(fd, buf, 0, 0); +} + +char * +MDXFdChunk(int fd, char *buf, off_t ofs, off_t len) +{ + unsigned char buffer[16*1024]; + MDX_CTX ctx; + struct stat stbuf; + int readrv, e; + off_t remain; + + if (len < 0) { + errno = EINVAL; + return NULL; + } + + MDXInit(&ctx); + if (ofs != 0) { + errno = 0; + if (lseek(fd, ofs, SEEK_SET) != ofs || + (ofs == -1 && errno != 0)) { + readrv = -1; + goto error; + } + } + remain = len; + readrv = 0; + while (len == 0 || remain > 0) { + if (len == 0 || remain > sizeof(buffer)) + readrv = read(fd, buffer, sizeof(buffer)); + else + readrv = read(fd, buffer, remain); + if (readrv <= 0) + break; + MDXUpdate(&ctx, buffer, readrv); + remain -= readrv; + } +error: + if (readrv < 0) + return NULL; + return (MDXEnd(&ctx, buf)); +} + +char * +MDXFile(const char *filename, char *buf) +{ + return (MDXFileChunk(filename, buf, 0, 0)); +} + +char * +MDXFileChunk(const char *filename, char *buf, off_t ofs, off_t len) +{ + char *ret; + int e, fd; + + fd = open(filename, O_RDONLY); + if (fd < 0) + return NULL; + ret = MDXFdChunk(fd, buf, ofs, len); + e = errno; + close (fd); + errno = e; + return ret; +} + +char * +MDXData (const void *data, unsigned int len, char *buf) +{ + MDX_CTX ctx; + + MDXInit(&ctx); + MDXUpdate(&ctx,data,len); + return (MDXEnd(&ctx, buf)); +} + +#ifdef WEAK_REFS +/* When building libmd, provide weak references. Note: this is not + activated in the context of compiling these sources for internal + use in libcrypt. + */ +#undef MDXEnd +__weak_reference(_libmd_MDXEnd, MDXEnd); +#undef MDXFile +__weak_reference(_libmd_MDXFile, MDXFile); +#undef MDXFileChunk +__weak_reference(_libmd_MDXFileChunk, MDXFileChunk); +#undef MDXFd +__weak_reference(_libmd_MDXFd, MDXFd); +#undef MDXFdChunk +__weak_reference(_libmd_MDXFdChunk, MDXFdChunk); +#undef MDXData +__weak_reference(_libmd_MDXData, MDXData); +#endif diff --git a/lib/libmd/ripemd.3 b/lib/libmd/ripemd.3 new file mode 100644 index 000000000000..cad07aac8007 --- /dev/null +++ b/lib/libmd/ripemd.3 @@ -0,0 +1,180 @@ +.\" +.\" ---------------------------------------------------------------------------- +.\" "THE BEER-WARE LICENSE" (Revision 42): +.\" <phk@FreeBSD.org> wrote this file. As long as you retain this notice you +.\" can do whatever you want with this stuff. If we meet some day, and you think +.\" this stuff is worth it, you can buy me a beer in return. Poul-Henning Kamp +.\" ---------------------------------------------------------------------------- +.\" +.\" From: Id: mdX.3,v 1.14 1999/02/11 20:31:49 wollman Exp +.\" +.Dd March 8, 2022 +.Dt RIPEMD 3 +.Os +.Sh NAME +.Nm RIPEMD160_Init , +.Nm RIPEMD160_Update , +.Nm RIPEMD160_Final , +.Nm RIPEMD160_End , +.Nm RIPEMD160_Fd , +.Nm RIPEMD160_FdChunk , +.Nm RIPEMD160_File , +.Nm RIPEMD160_FileChunk , +.Nm RIPEMD160_Data +.Nd calculate the RIPEMD160 message digest +.Sh LIBRARY +.Lb libmd +.Sh SYNOPSIS +.In sys/types.h +.In ripemd.h +.Ft void +.Fn RIPEMD160_Init "RIPEMD160_CTX *context" +.Ft void +.Fn RIPEMD160_Update "RIPEMD160_CTX *context" "const unsigned char *data" "unsigned int len" +.Ft void +.Fn RIPEMD160_Final "unsigned char digest[20]" "RIPEMD160_CTX *context" +.Ft "char *" +.Fn RIPEMD160_End "RIPEMD160_CTX *context" "char *buf" +.Ft "char *" +.Fn RIPEMD160_Fd "int fd" "char *buf" +.Ft "char *" +.Fn RIPEMD160_FdChunk "int fd" "char *buf" "off_t offset" "off_t length" +.Ft "char *" +.Fn RIPEMD160_File "const char *filename" "char *buf" +.Ft "char *" +.Fn RIPEMD160_FileChunk "const char *filename" "char *buf" "off_t offset" "off_t length" +.Ft "char *" +.Fn RIPEMD160_Data "const unsigned char *data" "unsigned int len" "char *buf" +.Sh DESCRIPTION +The +.Li RIPEMD160_ +functions calculate a 160-bit cryptographic checksum (digest) +for any number of input bytes. +A cryptographic checksum is a one-way +hash function; that is, it is computationally impractical to find +the input corresponding to a particular output. +This net result is a +.Dq fingerprint +of the input-data, which does not disclose the actual input. +.Pp +The +.Fn RIPEMD160_Init , +.Fn RIPEMD160_Update , +and +.Fn RIPEMD160_Final +functions are the core functions. +Allocate an +.Vt RIPEMD160_CTX , +initialize it with +.Fn RIPEMD160_Init , +run over the data with +.Fn RIPEMD160_Update , +and finally extract the result using +.Fn RIPEMD160_Final , +which will also erase the +.Vt RIPEMD160_CTX . +.Pp +The +.Fn RIPEMD160_End +function is a wrapper for +.Fn RIPEMD160_Final +which converts the return value to a 41-character +(including the terminating '\e0') +ASCII string which represents the 160 bits in hexadecimal. +.Pp +The +.Fn RIPEMD160_File +function calculates the digest of a file, and uses +.Fn RIPEMD160_End +to return the result. +If the file cannot be opened, a null pointer is returned. +The +.Fn RIPEMD160_FileChunk +function is similar to +.Fn RIPEMD160_File , +but it only calculates the digest over a byte-range of the file specified, +starting at +.Fa offset +and spanning +.Fa length +bytes. +If the +.Fa length +parameter is specified as 0, or more than the length of the remaining part +of the file, +.Fn RIPEMD160_FileChunk +calculates the digest from +.Fa offset +to the end of file. +The +.Fn RIPEMD160_Data +function calculates the digest of a chunk of data in memory, and uses +.Fn RIPEMD160_End +to return the result. +.Pp +The +.Fn RIPEMD160_Fd +and +.Fn RIPEMD160_FdChunk +functions are identical to their +.Fn RIPEMD160_File +and +.Fn RIPEMD160_FileChunk +counterparts, with the exception that the first argument is an +.Fa fd +instead of a +.Fa filename . +.Pp +When using +.Fn RIPEMD160_End , +.Fn RIPEMD160_File , +or +.Fn RIPEMD160_Data , +the +.Fa buf +argument can be a null pointer, in which case the returned string +is allocated with +.Xr malloc 3 +and subsequently must be explicitly deallocated using +.Xr free 3 +after use. +If the +.Fa buf +argument is non-null it must point to at least 41 characters of buffer space. +.Sh ERRORS +The +.Fn RIPEMD160_End +function called with a null buf argument may fail and return NULL if: +.Bl -tag -width Er +.It Bq Er ENOMEM +Insufficient storage space is available. +.El +.Pp +The +.Fn RIPEMD160_File +and +.Fn RIPEMD160_FileChunk +may return NULL when underlying +.Xr open 2 , +.Xr fstat 2 , +.Xr lseek 2 , +or +.Xr RIPEMD160_End 3 +fail. +.Sh SEE ALSO +.Xr md4 3 , +.Xr md5 3 , +.Xr sha 3 , +.Xr sha256 3 , +.Xr sha512 3 , +.Xr skein 3 +.Sh HISTORY +These functions appeared in +.Fx 4.0 . +.Sh AUTHORS +The core hash routines were implemented by Eric Young based on the +published RIPEMD160 specification. +.Sh BUGS +No method is known to exist which finds two files having the same hash value, +nor to find a file with a specific hash value. +There is on the other hand no guarantee that such a method does not exist. diff --git a/lib/libmd/ripemd.h b/lib/libmd/ripemd.h new file mode 100644 index 000000000000..252483be0a17 --- /dev/null +++ b/lib/libmd/ripemd.h @@ -0,0 +1,132 @@ +/* crypto/ripemd/ripemd.h */ +/* Copyright (C) 1995-1998 Eric Young (eay@cryptsoft.com) + * All rights reserved. + * + * This package is an SSL implementation written + * by Eric Young (eay@cryptsoft.com). + * The implementation was written so as to conform with Netscapes SSL. + * + * This library is free for commercial and non-commercial use as long as + * the following conditions are aheared to. The following conditions + * apply to all code found in this distribution, be it the RC4, RSA, + * lhash, DES, etc., code; not just the SSL code. The SSL documentation + * included with this distribution is covered by the same copyright terms + * except that the holder is Tim Hudson (tjh@cryptsoft.com). + * + * Copyright remains Eric Young's, and as such any Copyright notices in + * the code are not to be removed. + * If this package is used in a product, Eric Young should be given attribution + * as the author of the parts of the library used. + * This can be in the form of a textual message at program startup or + * in documentation (online or textual) provided with the package. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * "This product includes cryptographic software written by + * Eric Young (eay@cryptsoft.com)" + * The word 'cryptographic' can be left out if the routines from the library + * being used are not cryptographic related :-). + * 4. If you include any Windows specific code (or a derivative thereof) from + * the apps directory (application code) you must include an acknowledgement: + * "This product includes software written by Tim Hudson (tjh@cryptsoft.com)" + * + * THIS SOFTWARE IS PROVIDED BY ERIC YOUNG ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * The licence and distribution terms for any publically available version or + * derivative of this code cannot be changed. i.e. this code cannot simply be + * copied and put under another distribution licence + * [including the GNU Public Licence.] + */ + +/* + */ + +#ifndef HEADER_RIPEMD_H +#define HEADER_RIPEMD_H + +#include <sys/types.h> /* XXX switch to machine/ansi.h and __ types */ + +#define RIPEMD160_CBLOCK 64 +#define RIPEMD160_LBLOCK 16 +#define RIPEMD160_BLOCK 16 +#define RIPEMD160_LAST_BLOCK 56 +#define RIPEMD160_LENGTH_BLOCK 8 +#define RIPEMD160_DIGEST_LENGTH 20 + +typedef struct RIPEMD160state_st { + u_int32_t A,B,C,D,E; + u_int32_t Nl,Nh; + u_int32_t data[RIPEMD160_LBLOCK]; + int num; +} RIPEMD160_CTX; + +__BEGIN_DECLS + +/* Ensure libmd symbols do not clash with libcrypto */ + +#ifndef RIPEMD160_Init +#define RIPEMD160_Init _libmd_RIPEMD160_Init +#endif +#ifndef RIPEMD160_Update +#define RIPEMD160_Update _libmd_RIPEMD160_Update +#endif +#ifndef RIPEMD160_Final +#define RIPEMD160_Final _libmd_RIPEMD160_Final +#endif +#ifndef RIPEMD160_End +#define RIPEMD160_End _libmd_RIPEMD160_End +#endif +#ifndef RIPEMD160_Fd +#define RIPEMD160_Fd _libmd_RIPEMD160_Fd +#endif +#ifndef RIPEMD160_FdChunk +#define RIPEMD160_FdChunk _libmd_RIPEMD160_FdChunk +#endif +#ifndef RIPEMD160_File +#define RIPEMD160_File _libmd_RIPEMD160_File +#endif +#ifndef RIPEMD160_FileChunk +#define RIPEMD160_FileChunk _libmd_RIPEMD160_FileChunk +#endif +#ifndef RIPEMD160_Data +#define RIPEMD160_Data _libmd_RIPEMD160_Data +#endif +#ifndef RIPEMD160_Transform +#define RIPEMD160_Transform _libmd_RIPEMD160_Transform +#endif + +#ifndef ripemd160_block +#define ripemd160_block _libmd_ripemd160_block +#endif + +void RIPEMD160_Init(RIPEMD160_CTX *c); +void RIPEMD160_Update(RIPEMD160_CTX *c, const void *data, + size_t len); +void RIPEMD160_Final(unsigned char *md, RIPEMD160_CTX *c); +char *RIPEMD160_End(RIPEMD160_CTX *, char *); +char *RIPEMD160_Fd(int, char *); +char *RIPEMD160_FdChunk(int, char *, off_t, off_t); +char *RIPEMD160_File(const char *, char *); +char *RIPEMD160_FileChunk(const char *, char *, off_t, off_t); +char *RIPEMD160_Data(const void *, unsigned int, char *); +__END_DECLS + +#endif diff --git a/lib/libmd/rmd160c.c b/lib/libmd/rmd160c.c new file mode 100644 index 000000000000..92d6524b25b4 --- /dev/null +++ b/lib/libmd/rmd160c.c @@ -0,0 +1,543 @@ +/* crypto/ripemd/rmd_dgst.c */ +/* Copyright (C) 1995-1998 Eric Young (eay@cryptsoft.com) + * All rights reserved. + * + * This package is an SSL implementation written + * by Eric Young (eay@cryptsoft.com). + * The implementation was written so as to conform with Netscapes SSL. + * + * This library is free for commercial and non-commercial use as long as + * the following conditions are aheared to. The following conditions + * apply to all code found in this distribution, be it the RC4, RSA, + * lhash, DES, etc., code; not just the SSL code. The SSL documentation + * included with this distribution is covered by the same copyright terms + * except that the holder is Tim Hudson (tjh@cryptsoft.com). + * + * Copyright remains Eric Young's, and as such any Copyright notices in + * the code are not to be removed. + * If this package is used in a product, Eric Young should be given attribution + * as the author of the parts of the library used. + * This can be in the form of a textual message at program startup or + * in documentation (online or textual) provided with the package. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * "This product includes cryptographic software written by + * Eric Young (eay@cryptsoft.com)" + * The word 'cryptographic' can be left out if the routines from the library + * being used are not cryptographic related :-). + * 4. If you include any Windows specific code (or a derivative thereof) from + * the apps directory (application code) you must include an acknowledgement: + * "This product includes software written by Tim Hudson (tjh@cryptsoft.com)" + * + * THIS SOFTWARE IS PROVIDED BY ERIC YOUNG ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * The licence and distribution terms for any publically available version or + * derivative of this code cannot be changed. i.e. this code cannot simply be + * copied and put under another distribution licence + * [including the GNU Public Licence.] + */ + +#include <sys/types.h> + +#include <stdio.h> +#include <string.h> + +#if 0 +#include <machine/ansi.h> /* we use the __ variants of bit-sized types */ +#endif +#include <machine/endian.h> + +#include "rmd_locl.h" + +/* + * The assembly-language code is not position-independent, so don't + * try to use it in a shared library. + */ +#ifdef PIC +#undef RMD160_ASM +#endif + +static char *RMD160_version="RIPEMD160 part of SSLeay 0.9.0b 11-Oct-1998"; + +#ifdef RMD160_ASM +void ripemd160_block_x86(RIPEMD160_CTX *c, const u_int32_t *p,int num); +#define ripemd160_block ripemd160_block_x86 +#else +void ripemd160_block(RIPEMD160_CTX *c, const u_int32_t *p,int num); +#endif + +void RIPEMD160_Init(RIPEMD160_CTX *c) + { + c->A=RIPEMD160_A; + c->B=RIPEMD160_B; + c->C=RIPEMD160_C; + c->D=RIPEMD160_D; + c->E=RIPEMD160_E; + c->Nl=0; + c->Nh=0; + c->num=0; + } + +void RIPEMD160_Update(RIPEMD160_CTX *c, const void *in, size_t len) + { + u_int32_t *p; + int sw,sc; + u_int32_t l; + const unsigned char *data = in; + + if (len == 0) return; + + l=(c->Nl+(len<<3))&0xffffffffL; + if (l < c->Nl) /* overflow */ + c->Nh++; + c->Nh+=(len>>29); + c->Nl=l; + + if (c->num != 0) + { + p=c->data; + sw=c->num>>2; + sc=c->num&0x03; + + if ((c->num+len) >= RIPEMD160_CBLOCK) + { + l= p[sw]; + p_c2l(data,l,sc); + p[sw++]=l; + for (; sw<RIPEMD160_LBLOCK; sw++) + { + c2l(data,l); + p[sw]=l; + } + len-=(RIPEMD160_CBLOCK-c->num); + + ripemd160_block(c,p,64); + c->num=0; + /* drop through and do the rest */ + } + else + { + int ew,ec; + + c->num+=(int)len; + if ((sc+len) < 4) /* ugly, add char's to a word */ + { + l= p[sw]; + p_c2l_p(data,l,sc,len); + p[sw]=l; + } + else + { + ew=(c->num>>2); + ec=(c->num&0x03); + l= p[sw]; + p_c2l(data,l,sc); + p[sw++]=l; + for (; sw < ew; sw++) + { c2l(data,l); p[sw]=l; } + if (ec) + { + c2l_p(data,l,ec); + p[sw]=l; + } + } + return; + } + } + /* we now can process the input data in blocks of RIPEMD160_CBLOCK + * chars and save the leftovers to c->data. */ +#if BYTE_ORDER == LITTLE_ENDIAN + if ((((unsigned long)data)%sizeof(u_int32_t)) == 0) + { + sw=(int)len/RIPEMD160_CBLOCK; + if (sw > 0) + { + sw*=RIPEMD160_CBLOCK; + ripemd160_block(c,(u_int32_t *)data,sw); + data+=sw; + len-=sw; + } + } +#endif + p=c->data; + while (len >= RIPEMD160_CBLOCK) + { +#if BYTE_ORDER == LITTLE_ENDIAN || BYTE_ORDER == BIG_ENDIAN + if (p != (u_int32_t *)data) + memcpy(p,data,RIPEMD160_CBLOCK); + data+=RIPEMD160_CBLOCK; +#if BYTE_ORDER == BIG_ENDIAN + for (sw=(RIPEMD160_LBLOCK/4); sw; sw--) + { + Endian_Reverse32(p[0]); + Endian_Reverse32(p[1]); + Endian_Reverse32(p[2]); + Endian_Reverse32(p[3]); + p+=4; + } +#endif +#else + for (sw=(RIPEMD160_LBLOCK/4); sw; sw--) + { + c2l(data,l); *(p++)=l; + c2l(data,l); *(p++)=l; + c2l(data,l); *(p++)=l; + c2l(data,l); *(p++)=l; + } +#endif + p=c->data; + ripemd160_block(c,p,64); + len-=RIPEMD160_CBLOCK; + } + sc=(int)len; + c->num=sc; + if (sc) + { + sw=sc>>2; /* words to copy */ +#if BYTE_ORDER == LITTLE_ENDIAN + p[sw]=0; + memcpy(p,data,sc); +#else + sc&=0x03; + for ( ; sw; sw--) + { c2l(data,l); *(p++)=l; } + c2l_p(data,l,sc); + *p=l; +#endif + } + } + +static void RIPEMD160_Transform(RIPEMD160_CTX *c, unsigned char *b) + { + u_int32_t p[16]; +#if BYTE_ORDER != LITTLE_ENDIAN + u_int32_t *q; + int i; +#endif + +#if BYTE_ORDER == BIG_ENDIAN || BYTE_ORDER == LITTLE_ENDIAN + memcpy(p,b,64); +#if BYTE_ORDER == BIG_ENDIAN + q=p; + for (i=(RIPEMD160_LBLOCK/4); i; i--) + { + Endian_Reverse32(q[0]); + Endian_Reverse32(q[1]); + Endian_Reverse32(q[2]); + Endian_Reverse32(q[3]); + q+=4; + } +#endif +#else + q=p; + for (i=(RIPEMD160_LBLOCK/4); i; i--) + { + u_int32_t l; + c2l(b,l); *(q++)=l; + c2l(b,l); *(q++)=l; + c2l(b,l); *(q++)=l; + c2l(b,l); *(q++)=l; + } +#endif + ripemd160_block(c,p,64); + } + +#ifndef RMD160_ASM + +void ripemd160_block(RIPEMD160_CTX *ctx, const u_int32_t *X, int num) + { + u_int32_t A,B,C,D,E; + u_int32_t a,b,c,d,e; + + for (;;) + { + A=ctx->A; B=ctx->B; C=ctx->C; D=ctx->D; E=ctx->E; + + RIP1(A,B,C,D,E,WL00,SL00); + RIP1(E,A,B,C,D,WL01,SL01); + RIP1(D,E,A,B,C,WL02,SL02); + RIP1(C,D,E,A,B,WL03,SL03); + RIP1(B,C,D,E,A,WL04,SL04); + RIP1(A,B,C,D,E,WL05,SL05); + RIP1(E,A,B,C,D,WL06,SL06); + RIP1(D,E,A,B,C,WL07,SL07); + RIP1(C,D,E,A,B,WL08,SL08); + RIP1(B,C,D,E,A,WL09,SL09); + RIP1(A,B,C,D,E,WL10,SL10); + RIP1(E,A,B,C,D,WL11,SL11); + RIP1(D,E,A,B,C,WL12,SL12); + RIP1(C,D,E,A,B,WL13,SL13); + RIP1(B,C,D,E,A,WL14,SL14); + RIP1(A,B,C,D,E,WL15,SL15); + + RIP2(E,A,B,C,D,WL16,SL16,KL1); + RIP2(D,E,A,B,C,WL17,SL17,KL1); + RIP2(C,D,E,A,B,WL18,SL18,KL1); + RIP2(B,C,D,E,A,WL19,SL19,KL1); + RIP2(A,B,C,D,E,WL20,SL20,KL1); + RIP2(E,A,B,C,D,WL21,SL21,KL1); + RIP2(D,E,A,B,C,WL22,SL22,KL1); + RIP2(C,D,E,A,B,WL23,SL23,KL1); + RIP2(B,C,D,E,A,WL24,SL24,KL1); + RIP2(A,B,C,D,E,WL25,SL25,KL1); + RIP2(E,A,B,C,D,WL26,SL26,KL1); + RIP2(D,E,A,B,C,WL27,SL27,KL1); + RIP2(C,D,E,A,B,WL28,SL28,KL1); + RIP2(B,C,D,E,A,WL29,SL29,KL1); + RIP2(A,B,C,D,E,WL30,SL30,KL1); + RIP2(E,A,B,C,D,WL31,SL31,KL1); + + RIP3(D,E,A,B,C,WL32,SL32,KL2); + RIP3(C,D,E,A,B,WL33,SL33,KL2); + RIP3(B,C,D,E,A,WL34,SL34,KL2); + RIP3(A,B,C,D,E,WL35,SL35,KL2); + RIP3(E,A,B,C,D,WL36,SL36,KL2); + RIP3(D,E,A,B,C,WL37,SL37,KL2); + RIP3(C,D,E,A,B,WL38,SL38,KL2); + RIP3(B,C,D,E,A,WL39,SL39,KL2); + RIP3(A,B,C,D,E,WL40,SL40,KL2); + RIP3(E,A,B,C,D,WL41,SL41,KL2); + RIP3(D,E,A,B,C,WL42,SL42,KL2); + RIP3(C,D,E,A,B,WL43,SL43,KL2); + RIP3(B,C,D,E,A,WL44,SL44,KL2); + RIP3(A,B,C,D,E,WL45,SL45,KL2); + RIP3(E,A,B,C,D,WL46,SL46,KL2); + RIP3(D,E,A,B,C,WL47,SL47,KL2); + + RIP4(C,D,E,A,B,WL48,SL48,KL3); + RIP4(B,C,D,E,A,WL49,SL49,KL3); + RIP4(A,B,C,D,E,WL50,SL50,KL3); + RIP4(E,A,B,C,D,WL51,SL51,KL3); + RIP4(D,E,A,B,C,WL52,SL52,KL3); + RIP4(C,D,E,A,B,WL53,SL53,KL3); + RIP4(B,C,D,E,A,WL54,SL54,KL3); + RIP4(A,B,C,D,E,WL55,SL55,KL3); + RIP4(E,A,B,C,D,WL56,SL56,KL3); + RIP4(D,E,A,B,C,WL57,SL57,KL3); + RIP4(C,D,E,A,B,WL58,SL58,KL3); + RIP4(B,C,D,E,A,WL59,SL59,KL3); + RIP4(A,B,C,D,E,WL60,SL60,KL3); + RIP4(E,A,B,C,D,WL61,SL61,KL3); + RIP4(D,E,A,B,C,WL62,SL62,KL3); + RIP4(C,D,E,A,B,WL63,SL63,KL3); + + RIP5(B,C,D,E,A,WL64,SL64,KL4); + RIP5(A,B,C,D,E,WL65,SL65,KL4); + RIP5(E,A,B,C,D,WL66,SL66,KL4); + RIP5(D,E,A,B,C,WL67,SL67,KL4); + RIP5(C,D,E,A,B,WL68,SL68,KL4); + RIP5(B,C,D,E,A,WL69,SL69,KL4); + RIP5(A,B,C,D,E,WL70,SL70,KL4); + RIP5(E,A,B,C,D,WL71,SL71,KL4); + RIP5(D,E,A,B,C,WL72,SL72,KL4); + RIP5(C,D,E,A,B,WL73,SL73,KL4); + RIP5(B,C,D,E,A,WL74,SL74,KL4); + RIP5(A,B,C,D,E,WL75,SL75,KL4); + RIP5(E,A,B,C,D,WL76,SL76,KL4); + RIP5(D,E,A,B,C,WL77,SL77,KL4); + RIP5(C,D,E,A,B,WL78,SL78,KL4); + RIP5(B,C,D,E,A,WL79,SL79,KL4); + + a=A; b=B; c=C; d=D; e=E; + /* Do other half */ + A=ctx->A; B=ctx->B; C=ctx->C; D=ctx->D; E=ctx->E; + + RIP5(A,B,C,D,E,WR00,SR00,KR0); + RIP5(E,A,B,C,D,WR01,SR01,KR0); + RIP5(D,E,A,B,C,WR02,SR02,KR0); + RIP5(C,D,E,A,B,WR03,SR03,KR0); + RIP5(B,C,D,E,A,WR04,SR04,KR0); + RIP5(A,B,C,D,E,WR05,SR05,KR0); + RIP5(E,A,B,C,D,WR06,SR06,KR0); + RIP5(D,E,A,B,C,WR07,SR07,KR0); + RIP5(C,D,E,A,B,WR08,SR08,KR0); + RIP5(B,C,D,E,A,WR09,SR09,KR0); + RIP5(A,B,C,D,E,WR10,SR10,KR0); + RIP5(E,A,B,C,D,WR11,SR11,KR0); + RIP5(D,E,A,B,C,WR12,SR12,KR0); + RIP5(C,D,E,A,B,WR13,SR13,KR0); + RIP5(B,C,D,E,A,WR14,SR14,KR0); + RIP5(A,B,C,D,E,WR15,SR15,KR0); + + RIP4(E,A,B,C,D,WR16,SR16,KR1); + RIP4(D,E,A,B,C,WR17,SR17,KR1); + RIP4(C,D,E,A,B,WR18,SR18,KR1); + RIP4(B,C,D,E,A,WR19,SR19,KR1); + RIP4(A,B,C,D,E,WR20,SR20,KR1); + RIP4(E,A,B,C,D,WR21,SR21,KR1); + RIP4(D,E,A,B,C,WR22,SR22,KR1); + RIP4(C,D,E,A,B,WR23,SR23,KR1); + RIP4(B,C,D,E,A,WR24,SR24,KR1); + RIP4(A,B,C,D,E,WR25,SR25,KR1); + RIP4(E,A,B,C,D,WR26,SR26,KR1); + RIP4(D,E,A,B,C,WR27,SR27,KR1); + RIP4(C,D,E,A,B,WR28,SR28,KR1); + RIP4(B,C,D,E,A,WR29,SR29,KR1); + RIP4(A,B,C,D,E,WR30,SR30,KR1); + RIP4(E,A,B,C,D,WR31,SR31,KR1); + + RIP3(D,E,A,B,C,WR32,SR32,KR2); + RIP3(C,D,E,A,B,WR33,SR33,KR2); + RIP3(B,C,D,E,A,WR34,SR34,KR2); + RIP3(A,B,C,D,E,WR35,SR35,KR2); + RIP3(E,A,B,C,D,WR36,SR36,KR2); + RIP3(D,E,A,B,C,WR37,SR37,KR2); + RIP3(C,D,E,A,B,WR38,SR38,KR2); + RIP3(B,C,D,E,A,WR39,SR39,KR2); + RIP3(A,B,C,D,E,WR40,SR40,KR2); + RIP3(E,A,B,C,D,WR41,SR41,KR2); + RIP3(D,E,A,B,C,WR42,SR42,KR2); + RIP3(C,D,E,A,B,WR43,SR43,KR2); + RIP3(B,C,D,E,A,WR44,SR44,KR2); + RIP3(A,B,C,D,E,WR45,SR45,KR2); + RIP3(E,A,B,C,D,WR46,SR46,KR2); + RIP3(D,E,A,B,C,WR47,SR47,KR2); + + RIP2(C,D,E,A,B,WR48,SR48,KR3); + RIP2(B,C,D,E,A,WR49,SR49,KR3); + RIP2(A,B,C,D,E,WR50,SR50,KR3); + RIP2(E,A,B,C,D,WR51,SR51,KR3); + RIP2(D,E,A,B,C,WR52,SR52,KR3); + RIP2(C,D,E,A,B,WR53,SR53,KR3); + RIP2(B,C,D,E,A,WR54,SR54,KR3); + RIP2(A,B,C,D,E,WR55,SR55,KR3); + RIP2(E,A,B,C,D,WR56,SR56,KR3); + RIP2(D,E,A,B,C,WR57,SR57,KR3); + RIP2(C,D,E,A,B,WR58,SR58,KR3); + RIP2(B,C,D,E,A,WR59,SR59,KR3); + RIP2(A,B,C,D,E,WR60,SR60,KR3); + RIP2(E,A,B,C,D,WR61,SR61,KR3); + RIP2(D,E,A,B,C,WR62,SR62,KR3); + RIP2(C,D,E,A,B,WR63,SR63,KR3); + + RIP1(B,C,D,E,A,WR64,SR64); + RIP1(A,B,C,D,E,WR65,SR65); + RIP1(E,A,B,C,D,WR66,SR66); + RIP1(D,E,A,B,C,WR67,SR67); + RIP1(C,D,E,A,B,WR68,SR68); + RIP1(B,C,D,E,A,WR69,SR69); + RIP1(A,B,C,D,E,WR70,SR70); + RIP1(E,A,B,C,D,WR71,SR71); + RIP1(D,E,A,B,C,WR72,SR72); + RIP1(C,D,E,A,B,WR73,SR73); + RIP1(B,C,D,E,A,WR74,SR74); + RIP1(A,B,C,D,E,WR75,SR75); + RIP1(E,A,B,C,D,WR76,SR76); + RIP1(D,E,A,B,C,WR77,SR77); + RIP1(C,D,E,A,B,WR78,SR78); + RIP1(B,C,D,E,A,WR79,SR79); + + D =ctx->B+c+D; + ctx->B=ctx->C+d+E; + ctx->C=ctx->D+e+A; + ctx->D=ctx->E+a+B; + ctx->E=ctx->A+b+C; + ctx->A=D; + + X+=16; + num-=64; + if (num <= 0) break; + } + } +#endif + +void RIPEMD160_Final(unsigned char *md, RIPEMD160_CTX *c) + { + int i,j; + u_int32_t l; + u_int32_t *p; + static unsigned char end[4]={0x80,0x00,0x00,0x00}; + unsigned char *cp=end; + + /* c->num should definitly have room for at least one more byte. */ + p=c->data; + j=c->num; + i=j>>2; + + /* purify often complains about the following line as an + * Uninitialized Memory Read. While this can be true, the + * following p_c2l macro will reset l when that case is true. + * This is because j&0x03 contains the number of 'valid' bytes + * already in p[i]. If and only if j&0x03 == 0, the UMR will + * occur but this is also the only time p_c2l will do + * l= *(cp++) instead of l|= *(cp++) + * Many thanks to Alex Tang <altitude@cic.net> for pickup this + * 'potential bug' */ +#ifdef PURIFY + if ((j&0x03) == 0) p[i]=0; +#endif + l=p[i]; + p_c2l(cp,l,j&0x03); + p[i]=l; + i++; + /* i is the next 'undefined word' */ + if (c->num >= RIPEMD160_LAST_BLOCK) + { + for (; i<RIPEMD160_LBLOCK; i++) + p[i]=0; + ripemd160_block(c,p,64); + i=0; + } + for (; i<(RIPEMD160_LBLOCK-2); i++) + p[i]=0; + p[RIPEMD160_LBLOCK-2]=c->Nl; + p[RIPEMD160_LBLOCK-1]=c->Nh; + ripemd160_block(c,p,64); + cp=md; + l=c->A; l2c(l,cp); + l=c->B; l2c(l,cp); + l=c->C; l2c(l,cp); + l=c->D; l2c(l,cp); + l=c->E; l2c(l,cp); + + /* Clear the context state */ + explicit_bzero(&c, sizeof(c)); + } + +#ifdef undef +int printit(unsigned long *l) + { + int i,ii; + + for (i=0; i<2; i++) + { + for (ii=0; ii<8; ii++) + { + fprintf(stderr,"%08lx ",l[i*8+ii]); + } + fprintf(stderr,"\n"); + } + } +#endif + +#ifdef WEAK_REFS +/* When building libmd, provide weak references. Note: this is not + activated in the context of compiling these sources for internal + use in libcrypt. + */ +#undef RIPEMD160_Init +__weak_reference(_libmd_RIPEMD160_Init, RIPEMD160_Init); +#undef RIPEMD160_Update +__weak_reference(_libmd_RIPEMD160_Update, RIPEMD160_Update); +#undef RIPEMD160_Final +__weak_reference(_libmd_RIPEMD160_Final, RIPEMD160_Final); +#endif diff --git a/lib/libmd/rmd_locl.h b/lib/libmd/rmd_locl.h new file mode 100644 index 000000000000..58d6f2d1e880 --- /dev/null +++ b/lib/libmd/rmd_locl.h @@ -0,0 +1,216 @@ +/* crypto/ripemd/rmd_locl.h */ +/* Copyright (C) 1995-1998 Eric Young (eay@cryptsoft.com) + * All rights reserved. + * + * This package is an SSL implementation written + * by Eric Young (eay@cryptsoft.com). + * The implementation was written so as to conform with Netscapes SSL. + * + * This library is free for commercial and non-commercial use as long as + * the following conditions are aheared to. The following conditions + * apply to all code found in this distribution, be it the RC4, RSA, + * lhash, DES, etc., code; not just the SSL code. The SSL documentation + * included with this distribution is covered by the same copyright terms + * except that the holder is Tim Hudson (tjh@cryptsoft.com). + * + * Copyright remains Eric Young's, and as such any Copyright notices in + * the code are not to be removed. + * If this package is used in a product, Eric Young should be given attribution + * as the author of the parts of the library used. + * This can be in the form of a textual message at program startup or + * in documentation (online or textual) provided with the package. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * "This product includes cryptographic software written by + * Eric Young (eay@cryptsoft.com)" + * The word 'cryptographic' can be left out if the routines from the library + * being used are not cryptographic related :-). + * 4. If you include any Windows specific code (or a derivative thereof) from + * the apps directory (application code) you must include an acknowledgement: + * "This product includes software written by Tim Hudson (tjh@cryptsoft.com)" + * + * THIS SOFTWARE IS PROVIDED BY ERIC YOUNG ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * The licence and distribution terms for any publically available version or + * derivative of this code cannot be changed. i.e. this code cannot simply be + * copied and put under another distribution licence + * [including the GNU Public Licence.] + */ + +#include "ripemd.h" + +#undef c2nl +#define c2nl(c,l) (l =(((u_int32_t)(*((c)++)))<<24), \ + l|=(((u_int32_t)(*((c)++)))<<16), \ + l|=(((u_int32_t)(*((c)++)))<< 8), \ + l|=(((u_int32_t)(*((c)++))) )) + +#undef p_c2nl +#define p_c2nl(c,l,n) { \ + switch (n) { \ + case 0: l =((u_int32_t)(*((c)++)))<<24; \ + case 1: l|=((u_int32_t)(*((c)++)))<<16; \ + case 2: l|=((u_int32_t)(*((c)++)))<< 8; \ + case 3: l|=((u_int32_t)(*((c)++))); \ + } \ + } + +#undef c2nl_p +/* NOTE the pointer is not incremented at the end of this */ +#define c2nl_p(c,l,n) { \ + l=0; \ + (c)+=n; \ + switch (n) { \ + case 3: l =((u_int32_t)(*(--(c))))<< 8; \ + case 2: l|=((u_int32_t)(*(--(c))))<<16; \ + case 1: l|=((u_int32_t)(*(--(c))))<<24; \ + } \ + } + +#undef p_c2nl_p +#define p_c2nl_p(c,l,sc,len) { \ + switch (sc) \ + { \ + case 0: l =((u_int32_t)(*((c)++)))<<24; \ + if (--len == 0) break; \ + case 1: l|=((u_int32_t)(*((c)++)))<<16; \ + if (--len == 0) break; \ + case 2: l|=((u_int32_t)(*((c)++)))<< 8; \ + } \ + } + +#undef nl2c +#define nl2c(l,c) (*((c)++)=(unsigned char)(((l)>>24)&0xff), \ + *((c)++)=(unsigned char)(((l)>>16)&0xff), \ + *((c)++)=(unsigned char)(((l)>> 8)&0xff), \ + *((c)++)=(unsigned char)(((l) )&0xff)) + +#undef c2l +#define c2l(c,l) (l =(((u_int32_t)(*((c)++))) ), \ + l|=(((u_int32_t)(*((c)++)))<< 8), \ + l|=(((u_int32_t)(*((c)++)))<<16), \ + l|=(((u_int32_t)(*((c)++)))<<24)) + +#undef p_c2l +#define p_c2l(c,l,n) { \ + switch (n) { \ + case 0: l =((u_int32_t)(*((c)++))); \ + case 1: l|=((u_int32_t)(*((c)++)))<< 8; \ + case 2: l|=((u_int32_t)(*((c)++)))<<16; \ + case 3: l|=((u_int32_t)(*((c)++)))<<24; \ + } \ + } + +#undef c2l_p +/* NOTE the pointer is not incremented at the end of this */ +#define c2l_p(c,l,n) { \ + l=0; \ + (c)+=n; \ + switch (n) { \ + case 3: l =((u_int32_t)(*(--(c))))<<16; \ + case 2: l|=((u_int32_t)(*(--(c))))<< 8; \ + case 1: l|=((u_int32_t)(*(--(c)))); \ + } \ + } + +#undef p_c2l_p +#define p_c2l_p(c,l,sc,len) { \ + switch (sc) \ + { \ + case 0: l =((u_int32_t)(*((c)++))); \ + if (--len == 0) break; \ + case 1: l|=((u_int32_t)(*((c)++)))<< 8; \ + if (--len == 0) break; \ + case 2: l|=((u_int32_t)(*((c)++)))<<16; \ + } \ + } + +#undef l2c +#define l2c(l,c) (*((c)++)=(unsigned char)(((l) )&0xff), \ + *((c)++)=(unsigned char)(((l)>> 8)&0xff), \ + *((c)++)=(unsigned char)(((l)>>16)&0xff), \ + *((c)++)=(unsigned char)(((l)>>24)&0xff)) + +#undef ROTATE +#if defined(WIN32) +#define ROTATE(a,n) _lrotl(a,n) +#else +#define ROTATE(a,n) (((a)<<(n))|(((a)&0xffffffff)>>(32-(n)))) +#endif + +/* A nice byte order reversal from Wei Dai <weidai@eskimo.com> */ +#if defined(WIN32) +/* 5 instructions with rotate instruction, else 9 */ +#define Endian_Reverse32(a) \ + { \ + u_int32_t l=(a); \ + (a)=((ROTATE(l,8)&0x00FF00FF)|(ROTATE(l,24)&0xFF00FF00)); \ + } +#else +/* 6 instructions with rotate instruction, else 8 */ +#define Endian_Reverse32(a) \ + { \ + u_int32_t l=(a); \ + l=(((l&0xFF00FF00)>>8L)|((l&0x00FF00FF)<<8L)); \ + (a)=ROTATE(l,16L); \ + } +#endif + +#define F1(x,y,z) ((x)^(y)^(z)) +#define F2(x,y,z) (((x)&(y))|((~x)&z)) +#define F3(x,y,z) (((x)|(~y))^(z)) +#define F4(x,y,z) (((x)&(z))|((y)&(~(z)))) +#define F5(x,y,z) ((x)^((y)|(~(z)))) + +#define RIPEMD160_A 0x67452301L +#define RIPEMD160_B 0xEFCDAB89L +#define RIPEMD160_C 0x98BADCFEL +#define RIPEMD160_D 0x10325476L +#define RIPEMD160_E 0xC3D2E1F0L + +#include "rmdconst.h" + +#define RIP1(a,b,c,d,e,w,s) { \ + a+=F1(b,c,d)+X[w]; \ + a=ROTATE(a,s)+e; \ + c=ROTATE(c,10); } + +#define RIP2(a,b,c,d,e,w,s,K) { \ + a+=F2(b,c,d)+X[w]+K; \ + a=ROTATE(a,s)+e; \ + c=ROTATE(c,10); } + +#define RIP3(a,b,c,d,e,w,s,K) { \ + a+=F3(b,c,d)+X[w]+K; \ + a=ROTATE(a,s)+e; \ + c=ROTATE(c,10); } + +#define RIP4(a,b,c,d,e,w,s,K) { \ + a+=F4(b,c,d)+X[w]+K; \ + a=ROTATE(a,s)+e; \ + c=ROTATE(c,10); } + +#define RIP5(a,b,c,d,e,w,s,K) { \ + a+=F5(b,c,d)+X[w]+K; \ + a=ROTATE(a,s)+e; \ + c=ROTATE(c,10); } + diff --git a/lib/libmd/rmdconst.h b/lib/libmd/rmdconst.h new file mode 100644 index 000000000000..f7aa9939fabe --- /dev/null +++ b/lib/libmd/rmdconst.h @@ -0,0 +1,399 @@ +/* crypto/ripemd/rmdconst.h */ +/* Copyright (C) 1995-1998 Eric Young (eay@cryptsoft.com) + * All rights reserved. + * + * This package is an SSL implementation written + * by Eric Young (eay@cryptsoft.com). + * The implementation was written so as to conform with Netscapes SSL. + * + * This library is free for commercial and non-commercial use as long as + * the following conditions are aheared to. The following conditions + * apply to all code found in this distribution, be it the RC4, RSA, + * lhash, DES, etc., code; not just the SSL code. The SSL documentation + * included with this distribution is covered by the same copyright terms + * except that the holder is Tim Hudson (tjh@cryptsoft.com). + * + * Copyright remains Eric Young's, and as such any Copyright notices in + * the code are not to be removed. + * If this package is used in a product, Eric Young should be given attribution + * as the author of the parts of the library used. + * This can be in the form of a textual message at program startup or + * in documentation (online or textual) provided with the package. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * "This product includes cryptographic software written by + * Eric Young (eay@cryptsoft.com)" + * The word 'cryptographic' can be left out if the routines from the library + * being used are not cryptographic related :-). + * 4. If you include any Windows specific code (or a derivative thereof) from + * the apps directory (application code) you must include an acknowledgement: + * "This product includes software written by Tim Hudson (tjh@cryptsoft.com)" + * + * THIS SOFTWARE IS PROVIDED BY ERIC YOUNG ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * The licence and distribution terms for any publically available version or + * derivative of this code cannot be changed. i.e. this code cannot simply be + * copied and put under another distribution licence + * [including the GNU Public Licence.] + */ +#define KL0 0x00000000L +#define KL1 0x5A827999L +#define KL2 0x6ED9EBA1L +#define KL3 0x8F1BBCDCL +#define KL4 0xA953FD4EL + +#define KR0 0x50A28BE6L +#define KR1 0x5C4DD124L +#define KR2 0x6D703EF3L +#define KR3 0x7A6D76E9L +#define KR4 0x00000000L + +#define WL00 0 +#define SL00 11 +#define WL01 1 +#define SL01 14 +#define WL02 2 +#define SL02 15 +#define WL03 3 +#define SL03 12 +#define WL04 4 +#define SL04 5 +#define WL05 5 +#define SL05 8 +#define WL06 6 +#define SL06 7 +#define WL07 7 +#define SL07 9 +#define WL08 8 +#define SL08 11 +#define WL09 9 +#define SL09 13 +#define WL10 10 +#define SL10 14 +#define WL11 11 +#define SL11 15 +#define WL12 12 +#define SL12 6 +#define WL13 13 +#define SL13 7 +#define WL14 14 +#define SL14 9 +#define WL15 15 +#define SL15 8 + +#define WL16 7 +#define SL16 7 +#define WL17 4 +#define SL17 6 +#define WL18 13 +#define SL18 8 +#define WL19 1 +#define SL19 13 +#define WL20 10 +#define SL20 11 +#define WL21 6 +#define SL21 9 +#define WL22 15 +#define SL22 7 +#define WL23 3 +#define SL23 15 +#define WL24 12 +#define SL24 7 +#define WL25 0 +#define SL25 12 +#define WL26 9 +#define SL26 15 +#define WL27 5 +#define SL27 9 +#define WL28 2 +#define SL28 11 +#define WL29 14 +#define SL29 7 +#define WL30 11 +#define SL30 13 +#define WL31 8 +#define SL31 12 + +#define WL32 3 +#define SL32 11 +#define WL33 10 +#define SL33 13 +#define WL34 14 +#define SL34 6 +#define WL35 4 +#define SL35 7 +#define WL36 9 +#define SL36 14 +#define WL37 15 +#define SL37 9 +#define WL38 8 +#define SL38 13 +#define WL39 1 +#define SL39 15 +#define WL40 2 +#define SL40 14 +#define WL41 7 +#define SL41 8 +#define WL42 0 +#define SL42 13 +#define WL43 6 +#define SL43 6 +#define WL44 13 +#define SL44 5 +#define WL45 11 +#define SL45 12 +#define WL46 5 +#define SL46 7 +#define WL47 12 +#define SL47 5 + +#define WL48 1 +#define SL48 11 +#define WL49 9 +#define SL49 12 +#define WL50 11 +#define SL50 14 +#define WL51 10 +#define SL51 15 +#define WL52 0 +#define SL52 14 +#define WL53 8 +#define SL53 15 +#define WL54 12 +#define SL54 9 +#define WL55 4 +#define SL55 8 +#define WL56 13 +#define SL56 9 +#define WL57 3 +#define SL57 14 +#define WL58 7 +#define SL58 5 +#define WL59 15 +#define SL59 6 +#define WL60 14 +#define SL60 8 +#define WL61 5 +#define SL61 6 +#define WL62 6 +#define SL62 5 +#define WL63 2 +#define SL63 12 + +#define WL64 4 +#define SL64 9 +#define WL65 0 +#define SL65 15 +#define WL66 5 +#define SL66 5 +#define WL67 9 +#define SL67 11 +#define WL68 7 +#define SL68 6 +#define WL69 12 +#define SL69 8 +#define WL70 2 +#define SL70 13 +#define WL71 10 +#define SL71 12 +#define WL72 14 +#define SL72 5 +#define WL73 1 +#define SL73 12 +#define WL74 3 +#define SL74 13 +#define WL75 8 +#define SL75 14 +#define WL76 11 +#define SL76 11 +#define WL77 6 +#define SL77 8 +#define WL78 15 +#define SL78 5 +#define WL79 13 +#define SL79 6 + +#define WR00 5 +#define SR00 8 +#define WR01 14 +#define SR01 9 +#define WR02 7 +#define SR02 9 +#define WR03 0 +#define SR03 11 +#define WR04 9 +#define SR04 13 +#define WR05 2 +#define SR05 15 +#define WR06 11 +#define SR06 15 +#define WR07 4 +#define SR07 5 +#define WR08 13 +#define SR08 7 +#define WR09 6 +#define SR09 7 +#define WR10 15 +#define SR10 8 +#define WR11 8 +#define SR11 11 +#define WR12 1 +#define SR12 14 +#define WR13 10 +#define SR13 14 +#define WR14 3 +#define SR14 12 +#define WR15 12 +#define SR15 6 + +#define WR16 6 +#define SR16 9 +#define WR17 11 +#define SR17 13 +#define WR18 3 +#define SR18 15 +#define WR19 7 +#define SR19 7 +#define WR20 0 +#define SR20 12 +#define WR21 13 +#define SR21 8 +#define WR22 5 +#define SR22 9 +#define WR23 10 +#define SR23 11 +#define WR24 14 +#define SR24 7 +#define WR25 15 +#define SR25 7 +#define WR26 8 +#define SR26 12 +#define WR27 12 +#define SR27 7 +#define WR28 4 +#define SR28 6 +#define WR29 9 +#define SR29 15 +#define WR30 1 +#define SR30 13 +#define WR31 2 +#define SR31 11 + +#define WR32 15 +#define SR32 9 +#define WR33 5 +#define SR33 7 +#define WR34 1 +#define SR34 15 +#define WR35 3 +#define SR35 11 +#define WR36 7 +#define SR36 8 +#define WR37 14 +#define SR37 6 +#define WR38 6 +#define SR38 6 +#define WR39 9 +#define SR39 14 +#define WR40 11 +#define SR40 12 +#define WR41 8 +#define SR41 13 +#define WR42 12 +#define SR42 5 +#define WR43 2 +#define SR43 14 +#define WR44 10 +#define SR44 13 +#define WR45 0 +#define SR45 13 +#define WR46 4 +#define SR46 7 +#define WR47 13 +#define SR47 5 + +#define WR48 8 +#define SR48 15 +#define WR49 6 +#define SR49 5 +#define WR50 4 +#define SR50 8 +#define WR51 1 +#define SR51 11 +#define WR52 3 +#define SR52 14 +#define WR53 11 +#define SR53 14 +#define WR54 15 +#define SR54 6 +#define WR55 0 +#define SR55 14 +#define WR56 5 +#define SR56 6 +#define WR57 12 +#define SR57 9 +#define WR58 2 +#define SR58 12 +#define WR59 13 +#define SR59 9 +#define WR60 9 +#define SR60 12 +#define WR61 7 +#define SR61 5 +#define WR62 10 +#define SR62 15 +#define WR63 14 +#define SR63 8 + +#define WR64 12 +#define SR64 8 +#define WR65 15 +#define SR65 5 +#define WR66 10 +#define SR66 12 +#define WR67 4 +#define SR67 9 +#define WR68 1 +#define SR68 12 +#define WR69 5 +#define SR69 5 +#define WR70 8 +#define SR70 14 +#define WR71 7 +#define SR71 6 +#define WR72 6 +#define SR72 8 +#define WR73 2 +#define SR73 13 +#define WR74 13 +#define SR74 6 +#define WR75 14 +#define SR75 5 +#define WR76 0 +#define SR76 15 +#define WR77 3 +#define SR77 13 +#define WR78 9 +#define SR78 11 +#define WR79 11 +#define SR79 11 + diff --git a/lib/libmd/sha.3 b/lib/libmd/sha.3 new file mode 100644 index 000000000000..e2475f981738 --- /dev/null +++ b/lib/libmd/sha.3 @@ -0,0 +1,211 @@ +.\" +.\" ---------------------------------------------------------------------------- +.\" "THE BEER-WARE LICENSE" (Revision 42): +.\" <phk@FreeBSD.org> wrote this file. As long as you retain this notice you +.\" can do whatever you want with this stuff. If we meet some day, and you think +.\" this stuff is worth it, you can buy me a beer in return. Poul-Henning Kamp +.\" ---------------------------------------------------------------------------- +.\" +.\" From: Id: mdX.3,v 1.14 1999/02/11 20:31:49 wollman Exp +.\" +.Dd February 12, 2023 +.Dt SHA 3 +.Os +.Sh NAME +.Nm SHA_Init , +.Nm SHA_Update , +.Nm SHA_Final , +.Nm SHA_End , +.Nm SHA_Fd , +.Nm SHA_FdChunk , +.Nm SHA_File , +.Nm SHA_FileChunk , +.Nm SHA_Data , +.Nm SHA1_Init , +.Nm SHA1_Update , +.Nm SHA1_Final , +.Nm SHA1_End , +.Nm SHA1_Fd , +.Nm SHA1_FdChunk , +.Nm SHA1_File , +.Nm SHA1_FileChunk , +.Nm SHA1_Data +.Nd calculate the FIPS 160 and 160-1 ``SHA'' message digests +.Sh LIBRARY +.Lb libmd +.Sh SYNOPSIS +.In sys/types.h +.In sha.h +.Ft void +.Fn SHA_Init "SHA_CTX *context" +.Ft void +.Fn SHA_Update "SHA_CTX *context" "const unsigned char *data" "size_t len" +.Ft void +.Fn SHA_Final "unsigned char digest[20]" "SHA_CTX *context" +.Ft "char *" +.Fn SHA_End "SHA_CTX *context" "char *buf" +.Ft "char *" +.Fn SHA_Fd "int fd" "char *buf" +.Ft "char *" +.Fn SHA_FdChunk "int fd" "char *buf" "off_t offset" "off_t length" +.Ft "char *" +.Fn SHA_File "const char *filename" "char *buf" +.Ft "char *" +.Fn SHA_FileChunk "const char *filename" "char *buf" "off_t offset" "off_t length" +.Ft "char *" +.Fn SHA_Data "const unsigned char *data" "unsigned int len" "char *buf" +.Ft void +.Fn SHA1_Init "SHA_CTX *context" +.Ft void +.Fn SHA1_Update "SHA_CTX *context" "const unsigned char *data" "size_t len" +.Ft void +.Fn SHA1_Final "unsigned char digest[20]" "SHA_CTX *context" +.Ft "char *" +.Fn SHA1_End "SHA_CTX *context" "char *buf" +.Ft "char *" +.Fn SHA1_Fd "int fd" "char *buf" +.Ft "char *" +.Fn SHA1_FdChunk "int fd" "char *buf" "off_t offset" "off_t length" +.Ft "char *" +.Fn SHA1_File "const char *filename" "char *buf" +.Ft "char *" +.Fn SHA1_FileChunk "const char *filename" "char *buf" "off_t offset" "off_t length" +.Ft "char *" +.Fn SHA1_Data "const unsigned char *data" "unsigned int len" "char *buf" +.Sh DESCRIPTION +The +.Li SHA_ +and +.Li SHA1_ +functions calculate a 160-bit cryptographic checksum (digest) +for any number of input bytes. +A cryptographic checksum is a one-way +hash function; that is, it is computationally impractical to find +the input corresponding to a particular output. +This net result is +a +.Dq fingerprint +of the input-data, which does not disclose the actual input. +.Pp +SHA (or SHA-0) is the original Secure Hash Algorithm specified in FIPS 160. +It was quickly proven insecure, and has been superseded by SHA-1. +SHA-0 is included for compatibility purposes only. +.Pp +The +.Fn SHA1_Init , +.Fn SHA1_Update , +and +.Fn SHA1_Final +functions are the core functions. +Allocate an +.Vt SHA_CTX , +initialize it with +.Fn SHA1_Init , +run over the data with +.Fn SHA1_Update , +and finally extract the result using +.Fn SHA1_Final , +which will also erase the +.Vt SHA_CTX . +.Pp +.Fn SHA1_End +is a wrapper for +.Fn SHA1_Final +which converts the return value to a 41-character +(including the terminating '\e0') +ASCII string which represents the 160 bits in hexadecimal. +.Pp +.Fn SHA1_File +calculates the digest of a file, and uses +.Fn SHA1_End +to return the result. +If the file cannot be opened, a null pointer is returned. +.Fn SHA1_FileChunk +is similar to +.Fn SHA1_File , +but it only calculates the digest over a byte-range of the file specified, +starting at +.Fa offset +and spanning +.Fa length +bytes. +If the +.Fa length +parameter is specified as 0, or more than the length of the remaining part +of the file, +.Fn SHA1_FileChunk +calculates the digest from +.Fa offset +to the end of file. +.Fn SHA1_Data +calculates the digest of a chunk of data in memory, and uses +.Fn SHA1_End +to return the result. +.Pp +The +.Fn SHA1_Fd +and +.Fn SHA1_FdChunk +functions are identical to their +.Fn SHA1_File +and +.Fn SHA1_FileChunk +counterparts, with the exception that the first argument is an +.Fa fd +instead of a +.Fa filename . +.Pp +When using +.Fn SHA1_End , +.Fn SHA1_File , +or +.Fn SHA1_Data , +the +.Fa buf +argument can be a null pointer, in which case the returned string +is allocated with +.Xr malloc 3 +and subsequently must be explicitly deallocated using +.Xr free 3 +after use. +If the +.Fa buf +argument is non-null it must point to at least 41 characters of buffer space. +.Sh ERRORS +The +.Fn SHA1_End +function called with a null buf argument may fail and return NULL if: +.Bl -tag -width Er +.It Bq Er ENOMEM +Insufficient storage space is available. +.El +.Pp +The +.Fn SHA1_File +and +.Fn SHA1_FileChunk +may return NULL when underlying +.Xr open 2 , +.Xr fstat 2 , +.Xr lseek 2 , +or +.Xr SHA1_End 3 +fail. +.Sh SEE ALSO +.Xr md4 3 , +.Xr md5 3 , +.Xr ripemd 3 , +.Xr sha256 3 , +.Xr sha512 3 , +.Xr skein 3 +.Sh HISTORY +These functions appeared in +.Fx 4.0 . +.Sh AUTHORS +The core hash routines were implemented by Eric Young based on the +published +FIPS standards. +.Sh BUGS +The SHA1 algorithm has been proven to be vulnerable to practical collision +attacks and should not be relied upon to produce unique outputs, +.Em nor should it be used as part of a new cryptographic signature scheme. diff --git a/lib/libmd/sha.h b/lib/libmd/sha.h new file mode 100644 index 000000000000..3d1cd8da3ae2 --- /dev/null +++ b/lib/libmd/sha.h @@ -0,0 +1,170 @@ +/* Copyright (C) 1995-1998 Eric Young (eay@cryptsoft.com) + * All rights reserved. + * + * This package is an SSL implementation written + * by Eric Young (eay@cryptsoft.com). + * The implementation was written so as to conform with Netscapes SSL. + * + * This library is free for commercial and non-commercial use as long as + * the following conditions are aheared to. The following conditions + * apply to all code found in this distribution, be it the RC4, RSA, + * lhash, DES, etc., code; not just the SSL code. The SSL documentation + * included with this distribution is covered by the same copyright terms + * except that the holder is Tim Hudson (tjh@cryptsoft.com). + * + * Copyright remains Eric Young's, and as such any Copyright notices in + * the code are not to be removed. + * If this package is used in a product, Eric Young should be given attribution + * as the author of the parts of the library used. + * This can be in the form of a textual message at program startup or + * in documentation (online or textual) provided with the package. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * "This product includes cryptographic software written by + * Eric Young (eay@cryptsoft.com)" + * The word 'cryptographic' can be left out if the routines from the library + * being used are not cryptographic related :-). + * 4. If you include any Windows specific code (or a derivative thereof) from + * the apps directory (application code) you must include an acknowledgement: + * "This product includes software written by Tim Hudson (tjh@cryptsoft.com)" + * + * THIS SOFTWARE IS PROVIDED BY ERIC YOUNG ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * The licence and distribution terms for any publically available version or + * derivative of this code cannot be changed. i.e. this code cannot simply be + * copied and put under another distribution licence + * [including the GNU Public Licence.] + */ + +#ifndef _SHA_H_ +#define _SHA_H_ 1 + +#include <sys/types.h> /* XXX switch to machine/ansi.h and __ types */ + +#define SHA_CBLOCK 64 +#define SHA_LBLOCK 16 +#define SHA_BLOCK 16 +#define SHA_LAST_BLOCK 56 +#define SHA_LENGTH_BLOCK 8 +#define SHA_DIGEST_LENGTH 20 + +typedef struct SHAstate_st { + u_int32_t h0, h1, h2, h3, h4; + u_int32_t Nl, Nh; + u_int32_t data[SHA_LBLOCK]; + int num; +} SHA_CTX; +#define SHA1_CTX SHA_CTX + +__BEGIN_DECLS + +/* Ensure libmd symbols do not clash with libcrypto */ + +#ifndef SHA_Init +#define SHA_Init _libmd_SHA_Init +#endif +#ifndef SHA_Update +#define SHA_Update _libmd_SHA_Update +#endif +#ifndef SHA_Final +#define SHA_Final _libmd_SHA_Final +#endif +#ifndef SHA_End +#define SHA_End _libmd_SHA_End +#endif +#ifndef SHA_Fd +#define SHA_Fd _libmd_SHA_Fd +#endif +#ifndef SHA_FdChunk +#define SHA_FdChunk _libmd_SHA_FdChunk +#endif +#ifndef SHA_File +#define SHA_File _libmd_SHA_File +#endif +#ifndef SHA_FileChunk +#define SHA_FileChunk _libmd_SHA_FileChunk +#endif +#ifndef SHA_Data +#define SHA_Data _libmd_SHA_Data +#endif + +#ifndef sha_block +#define sha_block _libmd_sha_block +#endif + +#ifndef SHA1_Init +#define SHA1_Init _libmd_SHA1_Init +#endif +#ifndef SHA1_Update +#define SHA1_Update _libmd_SHA1_Update +#endif +#ifndef SHA1_Final +#define SHA1_Final _libmd_SHA1_Final +#endif +#ifndef SHA1_End +#define SHA1_End _libmd_SHA1_End +#endif +#ifndef SHA1_Fd +#define SHA1_Fd _libmd_SHA1_Fd +#endif +#ifndef SHA1_FdChunk +#define SHA1_FdChunk _libmd_SHA1_FdChunk +#endif +#ifndef SHA1_File +#define SHA1_File _libmd_SHA1_File +#endif +#ifndef SHA1_FileChunk +#define SHA1_FileChunk _libmd_SHA1_FileChunk +#endif +#ifndef SHA1_Data +#define SHA1_Data _libmd_SHA1_Data +#endif +#ifndef SHA1_Transform +#define SHA1_Transform _libmd_SHA1_Transform +#endif + +#ifndef sha1_block +#define sha1_block _libmd_sha1_block +#endif + +void SHA_Init(SHA_CTX *c); +void SHA_Update(SHA_CTX *c, const void *data, size_t len); +void SHA_Final(unsigned char *md, SHA_CTX *c); +char *SHA_End(SHA_CTX *, char *); +char *SHA_Fd(int, char *); +char *SHA_FdChunk(int, char *, off_t, off_t); +char *SHA_File(const char *, char *); +char *SHA_FileChunk(const char *, char *, off_t, off_t); +char *SHA_Data(const void *, unsigned int, char *); + +void SHA1_Init(SHA_CTX *c); +void SHA1_Update(SHA_CTX *c, const void *data, size_t len); +void SHA1_Final(unsigned char *md, SHA_CTX *c); +char *SHA1_End(SHA_CTX *, char *); +char *SHA1_Fd(int, char *); +char *SHA1_FdChunk(int, char *, off_t, off_t); +char *SHA1_File(const char *, char *); +char *SHA1_FileChunk(const char *, char *, off_t, off_t); +char *SHA1_Data(const void *, unsigned int, char *); +__END_DECLS + +#endif /* !_SHA_H_ */ diff --git a/lib/libmd/sha0c.c b/lib/libmd/sha0c.c new file mode 100644 index 000000000000..a36617b6ca60 --- /dev/null +++ b/lib/libmd/sha0c.c @@ -0,0 +1,438 @@ +/* crypto/sha/sha_dgst.c */ +/* Copyright (C) 1995-1998 Eric Young (eay@cryptsoft.com) + * All rights reserved. + * + * This package is an SSL implementation written + * by Eric Young (eay@cryptsoft.com). + * The implementation was written so as to conform with Netscapes SSL. + * + * This library is free for commercial and non-commercial use as long as + * the following conditions are aheared to. The following conditions + * apply to all code found in this distribution, be it the RC4, RSA, + * lhash, DES, etc., code; not just the SSL code. The SSL documentation + * included with this distribution is covered by the same copyright terms + * except that the holder is Tim Hudson (tjh@cryptsoft.com). + * + * Copyright remains Eric Young's, and as such any Copyright notices in + * the code are not to be removed. + * If this package is used in a product, Eric Young should be given attribution + * as the author of the parts of the library used. + * This can be in the form of a textual message at program startup or + * in documentation (online or textual) provided with the package. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * "This product includes cryptographic software written by + * Eric Young (eay@cryptsoft.com)" + * The word 'cryptographic' can be left out if the routines from the library + * being used are not cryptographic related :-). + * 4. If you include any Windows specific code (or a derivative thereof) from + * the apps directory (application code) you must include an acknowledgement: + * "This product includes software written by Tim Hudson (tjh@cryptsoft.com)" + * + * THIS SOFTWARE IS PROVIDED BY ERIC YOUNG ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * The licence and distribution terms for any publically available version or + * derivative of this code cannot be changed. i.e. this code cannot simply be + * copied and put under another distribution licence + * [including the GNU Public Licence.] + */ + +#include <sys/types.h> + +#include <stdio.h> +#include <string.h> + +#if 0 +#include <machine/ansi.h> /* we use the __ variants of bit-sized types */ +#endif +#include <machine/endian.h> + +#define SHA_0 +#undef SHA_1 +#include "sha.h" +#include "sha_locl.h" + +char *SHA_version="SHA part of SSLeay 0.9.0b 11-Oct-1998"; + +/* Implemented from SHA-0 document - The Secure Hash Algorithm + */ + +#define INIT_DATA_h0 (unsigned long)0x67452301L +#define INIT_DATA_h1 (unsigned long)0xefcdab89L +#define INIT_DATA_h2 (unsigned long)0x98badcfeL +#define INIT_DATA_h3 (unsigned long)0x10325476L +#define INIT_DATA_h4 (unsigned long)0xc3d2e1f0L + +#define K_00_19 0x5a827999L +#define K_20_39 0x6ed9eba1L +#define K_40_59 0x8f1bbcdcL +#define K_60_79 0xca62c1d6L + +#ifndef NOPROTO + void sha_block(SHA_CTX *c, const u_int32_t *p, int num); +#else + void sha_block(); +#endif + +#define M_c2nl c2nl +#define M_p_c2nl p_c2nl +#define M_c2nl_p c2nl_p +#define M_p_c2nl_p p_c2nl_p +#define M_nl2c nl2c + +void SHA_Init(SHA_CTX *c) + { + c->h0=INIT_DATA_h0; + c->h1=INIT_DATA_h1; + c->h2=INIT_DATA_h2; + c->h3=INIT_DATA_h3; + c->h4=INIT_DATA_h4; + c->Nl=0; + c->Nh=0; + c->num=0; + } + +void SHA_Update(SHA_CTX *c, const void *in, size_t len) + { + u_int32_t *p; + int ew,ec,sw,sc; + u_int32_t l; + const unsigned char *data = in; + + if (len == 0) return; + + l=(c->Nl+(len<<3))&0xffffffffL; + if (l < c->Nl) /* overflow */ + c->Nh++; + c->Nh+=(len>>29); + c->Nl=l; + + if (c->num != 0) + { + p=c->data; + sw=c->num>>2; + sc=c->num&0x03; + + if ((c->num+len) >= SHA_CBLOCK) + { + l= p[sw]; + M_p_c2nl(data,l,sc); + p[sw++]=l; + for (; sw<SHA_LBLOCK; sw++) + { + M_c2nl(data,l); + p[sw]=l; + } + len-=(SHA_CBLOCK-c->num); + + sha_block(c,p,64); + c->num=0; + /* drop through and do the rest */ + } + else + { + c->num+=(int)len; + if ((sc+len) < 4) /* ugly, add char's to a word */ + { + l= p[sw]; + M_p_c2nl_p(data,l,sc,len); + p[sw]=l; + } + else + { + ew=(c->num>>2); + ec=(c->num&0x03); + l= p[sw]; + M_p_c2nl(data,l,sc); + p[sw++]=l; + for (; sw < ew; sw++) + { M_c2nl(data,l); p[sw]=l; } + if (ec) + { + M_c2nl_p(data,l,ec); + p[sw]=l; + } + } + return; + } + } + /* We can only do the following code for assember, the reason + * being that the sha_block 'C' version changes the values + * in the 'data' array. The assember code avoids this and + * copies it to a local array. I should be able to do this for + * the C version as well.... + */ +#if 1 +#if BYTE_ORDER == BIG_ENDIAN || defined(SHA_ASM) + if ((((unsigned int)data)%sizeof(u_int32_t)) == 0) + { + sw=len/SHA_CBLOCK; + if (sw) + { + sw*=SHA_CBLOCK; + sha_block(c,(u_int32_t *)data,sw); + data+=sw; + len-=sw; + } + } +#endif +#endif + /* we now can process the input data in blocks of SHA_CBLOCK + * chars and save the leftovers to c->data. */ + p=c->data; + while (len >= SHA_CBLOCK) + { +#if BYTE_ORDER == BIG_ENDIAN || BYTE_ORDER == LITTLE_ENDIAN + if (p != (u_int32_t *)data) + memcpy(p,data,SHA_CBLOCK); + data+=SHA_CBLOCK; +# if BYTE_ORDER == LITTLE_ENDIAN +# ifndef SHA_ASM /* Will not happen */ + for (sw=(SHA_LBLOCK/4); sw; sw--) + { + Endian_Reverse32(p[0]); + Endian_Reverse32(p[1]); + Endian_Reverse32(p[2]); + Endian_Reverse32(p[3]); + p+=4; + } + p=c->data; +# endif +# endif +#else + for (sw=(SHA_BLOCK/4); sw; sw--) + { + M_c2nl(data,l); *(p++)=l; + M_c2nl(data,l); *(p++)=l; + M_c2nl(data,l); *(p++)=l; + M_c2nl(data,l); *(p++)=l; + } + p=c->data; +#endif + sha_block(c,p,64); + len-=SHA_CBLOCK; + } + ec=(int)len; + c->num=ec; + ew=(ec>>2); + ec&=0x03; + + for (sw=0; sw < ew; sw++) + { M_c2nl(data,l); p[sw]=l; } + M_c2nl_p(data,l,ec); + p[sw]=l; + } + +static void SHA_Transform(SHA_CTX *c, unsigned char *b) + { + u_int32_t p[16]; +#if BYTE_ORDER == LITTLE_ENDIAN + u_int32_t *q; + int i; +#endif + +#if BYTE_ORDER == BIG_ENDIAN || BYTE_ORDER == LITTLE_ENDIAN + memcpy(p,b,64); +#if BYTE_ORDER == LITTLE_ENDIAN + q=p; + for (i=(SHA_LBLOCK/4); i; i--) + { + Endian_Reverse32(q[0]); + Endian_Reverse32(q[1]); + Endian_Reverse32(q[2]); + Endian_Reverse32(q[3]); + q+=4; + } +#endif +#else + q=p; + for (i=(SHA_LBLOCK/4); i; i--) + { + u_int32_t l; + c2nl(b,l); *(q++)=l; + c2nl(b,l); *(q++)=l; + c2nl(b,l); *(q++)=l; + c2nl(b,l); *(q++)=l; + } +#endif + sha_block(c,p,64); + } + +void sha_block(SHA_CTX *c, const u_int32_t *W, int num) + { + u_int32_t A,B,C,D,E,T; + u_int32_t X[16]; + + A=c->h0; + B=c->h1; + C=c->h2; + D=c->h3; + E=c->h4; + + for (;;) + { + BODY_00_15( 0,A,B,C,D,E,T,W); + BODY_00_15( 1,T,A,B,C,D,E,W); + BODY_00_15( 2,E,T,A,B,C,D,W); + BODY_00_15( 3,D,E,T,A,B,C,W); + BODY_00_15( 4,C,D,E,T,A,B,W); + BODY_00_15( 5,B,C,D,E,T,A,W); + BODY_00_15( 6,A,B,C,D,E,T,W); + BODY_00_15( 7,T,A,B,C,D,E,W); + BODY_00_15( 8,E,T,A,B,C,D,W); + BODY_00_15( 9,D,E,T,A,B,C,W); + BODY_00_15(10,C,D,E,T,A,B,W); + BODY_00_15(11,B,C,D,E,T,A,W); + BODY_00_15(12,A,B,C,D,E,T,W); + BODY_00_15(13,T,A,B,C,D,E,W); + BODY_00_15(14,E,T,A,B,C,D,W); + BODY_00_15(15,D,E,T,A,B,C,W); + BODY_16_19(16,C,D,E,T,A,B,W,W,W,W); + BODY_16_19(17,B,C,D,E,T,A,W,W,W,W); + BODY_16_19(18,A,B,C,D,E,T,W,W,W,W); + BODY_16_19(19,T,A,B,C,D,E,W,W,W,X); + + BODY_20_31(20,E,T,A,B,C,D,W,W,W,X); + BODY_20_31(21,D,E,T,A,B,C,W,W,W,X); + BODY_20_31(22,C,D,E,T,A,B,W,W,W,X); + BODY_20_31(23,B,C,D,E,T,A,W,W,W,X); + BODY_20_31(24,A,B,C,D,E,T,W,W,X,X); + BODY_20_31(25,T,A,B,C,D,E,W,W,X,X); + BODY_20_31(26,E,T,A,B,C,D,W,W,X,X); + BODY_20_31(27,D,E,T,A,B,C,W,W,X,X); + BODY_20_31(28,C,D,E,T,A,B,W,W,X,X); + BODY_20_31(29,B,C,D,E,T,A,W,W,X,X); + BODY_20_31(30,A,B,C,D,E,T,W,X,X,X); + BODY_20_31(31,T,A,B,C,D,E,W,X,X,X); + BODY_32_39(32,E,T,A,B,C,D,X); + BODY_32_39(33,D,E,T,A,B,C,X); + BODY_32_39(34,C,D,E,T,A,B,X); + BODY_32_39(35,B,C,D,E,T,A,X); + BODY_32_39(36,A,B,C,D,E,T,X); + BODY_32_39(37,T,A,B,C,D,E,X); + BODY_32_39(38,E,T,A,B,C,D,X); + BODY_32_39(39,D,E,T,A,B,C,X); + + BODY_40_59(40,C,D,E,T,A,B,X); + BODY_40_59(41,B,C,D,E,T,A,X); + BODY_40_59(42,A,B,C,D,E,T,X); + BODY_40_59(43,T,A,B,C,D,E,X); + BODY_40_59(44,E,T,A,B,C,D,X); + BODY_40_59(45,D,E,T,A,B,C,X); + BODY_40_59(46,C,D,E,T,A,B,X); + BODY_40_59(47,B,C,D,E,T,A,X); + BODY_40_59(48,A,B,C,D,E,T,X); + BODY_40_59(49,T,A,B,C,D,E,X); + BODY_40_59(50,E,T,A,B,C,D,X); + BODY_40_59(51,D,E,T,A,B,C,X); + BODY_40_59(52,C,D,E,T,A,B,X); + BODY_40_59(53,B,C,D,E,T,A,X); + BODY_40_59(54,A,B,C,D,E,T,X); + BODY_40_59(55,T,A,B,C,D,E,X); + BODY_40_59(56,E,T,A,B,C,D,X); + BODY_40_59(57,D,E,T,A,B,C,X); + BODY_40_59(58,C,D,E,T,A,B,X); + BODY_40_59(59,B,C,D,E,T,A,X); + + BODY_60_79(60,A,B,C,D,E,T,X); + BODY_60_79(61,T,A,B,C,D,E,X); + BODY_60_79(62,E,T,A,B,C,D,X); + BODY_60_79(63,D,E,T,A,B,C,X); + BODY_60_79(64,C,D,E,T,A,B,X); + BODY_60_79(65,B,C,D,E,T,A,X); + BODY_60_79(66,A,B,C,D,E,T,X); + BODY_60_79(67,T,A,B,C,D,E,X); + BODY_60_79(68,E,T,A,B,C,D,X); + BODY_60_79(69,D,E,T,A,B,C,X); + BODY_60_79(70,C,D,E,T,A,B,X); + BODY_60_79(71,B,C,D,E,T,A,X); + BODY_60_79(72,A,B,C,D,E,T,X); + BODY_60_79(73,T,A,B,C,D,E,X); + BODY_60_79(74,E,T,A,B,C,D,X); + BODY_60_79(75,D,E,T,A,B,C,X); + BODY_60_79(76,C,D,E,T,A,B,X); + BODY_60_79(77,B,C,D,E,T,A,X); + BODY_60_79(78,A,B,C,D,E,T,X); + BODY_60_79(79,T,A,B,C,D,E,X); + + c->h0=(c->h0+E)&0xffffffffL; + c->h1=(c->h1+T)&0xffffffffL; + c->h2=(c->h2+A)&0xffffffffL; + c->h3=(c->h3+B)&0xffffffffL; + c->h4=(c->h4+C)&0xffffffffL; + + num-=64; + if (num <= 0) break; + + A=c->h0; + B=c->h1; + C=c->h2; + D=c->h3; + E=c->h4; + + W+=16; + } + } + +void SHA_Final(unsigned char *md, SHA_CTX *c) + { + int i,j; + u_int32_t l; + u_int32_t *p; + static unsigned char end[4]={0x80,0x00,0x00,0x00}; + unsigned char *cp=end; + + /* c->num should definitly have room for at least one more byte. */ + p=c->data; + j=c->num; + i=j>>2; +#ifdef PURIFY + if ((j&0x03) == 0) p[i]=0; +#endif + l=p[i]; + M_p_c2nl(cp,l,j&0x03); + p[i]=l; + i++; + /* i is the next 'undefined word' */ + if (c->num >= SHA_LAST_BLOCK) + { + for (; i<SHA_LBLOCK; i++) + p[i]=0; + sha_block(c,p,64); + i=0; + } + for (; i<(SHA_LBLOCK-2); i++) + p[i]=0; + p[SHA_LBLOCK-2]=c->Nh; + p[SHA_LBLOCK-1]=c->Nl; + sha_block(c,p,64); + cp=md; + l=c->h0; nl2c(l,cp); + l=c->h1; nl2c(l,cp); + l=c->h2; nl2c(l,cp); + l=c->h3; nl2c(l,cp); + l=c->h4; nl2c(l,cp); + + /* Clear the context state */ + explicit_bzero(&c, sizeof(c)); + } + diff --git a/lib/libmd/sha1c.c b/lib/libmd/sha1c.c new file mode 100644 index 000000000000..128e0b991742 --- /dev/null +++ b/lib/libmd/sha1c.c @@ -0,0 +1,260 @@ +/*- + * Copyright (c) 2009 The Go Authors. All rights reserved. + * Copyright (c) 2024 Robert Clausecker <fuz@freebsd.org> + * + * Adapted from Go's crypto/sha1/sha1.go. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following disclaimer + * in the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Google Inc. nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include <assert.h> +#include <sha.h> +#include <stdint.h> +#include <string.h> +#include <strings.h> +#include <sys/endian.h> + +#ifdef SHA1_ASM +extern void sha1_block(SHA1_CTX *, const void *, size_t); +#else +static void sha1_block(SHA1_CTX *, const void *, size_t); +#endif + +#define INIT0 0x67452301 +#define INIT1 0xEFCDAB89 +#define INIT2 0x98BADCFE +#define INIT3 0x10325476 +#define INIT4 0xC3D2E1F0 + +#define K0 0x5A827999 +#define K1 0x6ED9EBA1 +#define K2 0x8F1BBCDC +#define K3 0xCA62C1D6 + +void +SHA1_Init(SHA1_CTX *c) +{ + c->h0 = INIT0; + c->h1 = INIT1; + c->h2 = INIT2; + c->h3 = INIT3; + c->h4 = INIT4; + c->Nl = 0; + c->Nh = 0; + c->num = 0; +} + +void +SHA1_Update(SHA1_CTX *c, const void *data, size_t len) +{ + uint64_t nn; + const char *p = data; + + nn = (uint64_t)c->Nl | (uint64_t)c->Nh << 32; + nn += len; + c->Nl = (uint32_t)nn; + c->Nh = (uint32_t)(nn >> 32); + + if (c->num > 0) { + size_t n = SHA_CBLOCK - c->num; + + if (n > len) + n = len; + + memcpy((char *)c->data + c->num, p, n); + c->num += n; + if (c->num == SHA_CBLOCK) { + sha1_block(c, (void *)c->data, SHA_CBLOCK); + c->num = 0; + } + + p += n; + len -= n; + } + + if (len >= SHA_CBLOCK) { + size_t n = len & ~(size_t)(SHA_CBLOCK - 1); + + sha1_block(c, p, n); + p += n; + len -= n; + } + + if (len > 0) { + memcpy(c->data, p, len); + c->num = len; + } +} + +void +SHA1_Final(unsigned char *md, SHA1_CTX *c) +{ + uint64_t len; + size_t t; + unsigned char tmp[SHA_CBLOCK + sizeof(uint64_t)] = {0x80, 0}; + + len = (uint64_t)c->Nl | (uint64_t)c->Nh << 32; + t = 64 + 56 - c->Nl % 64; + if (t > 64) + t -= 64; + + /* length in bits */ + len <<= 3; + be64enc(tmp + t, len); + SHA1_Update(c, tmp, t + 8); + assert(c->num == 0); + + be32enc(md + 0, c->h0); + be32enc(md + 4, c->h1); + be32enc(md + 8, c->h2); + be32enc(md + 12, c->h3); + be32enc(md + 16, c->h4); + + explicit_bzero(c, sizeof(*c)); +} + +#ifndef SHA1_ASM +static void +/* invariant: len is a multiple of SHA_CBLOCK */ +sha1_block(SHA1_CTX *c, const void *data, size_t len) +{ + uint32_t w[16]; + uint32_t h0 = c->h0, h1 = c->h1, h2 = c->h2, h3 = c->h3, h4 = c->h4; + const char *p = data; + + while (len >= SHA_CBLOCK) { + size_t i; + uint32_t a = h0, b = h1, c = h2, d = h3, e = h4; + uint32_t f, t, tmp; + +# pragma unroll + for (i = 0; i < 16; i++) + w[i] = be32dec(p + 4*i); + +# pragma unroll + for (i = 0; i < 16; i++) { + f = b & c | ~b & d; + t = (a << 5 | a >> 32 - 5) + f + e + w[i & 0xf] + K0; + e = d; + d = c; + c = b << 30 | b >> 32 - 30; + b = a; + a = t; + } + +# pragma unroll + for (; i < 20; i++) { + tmp = w[i - 3 & 0xf] ^ w[i - 8 & 0xf] ^ w[i - 14 & 0xf] ^ w[i & 0xf]; + w[i & 0xf] = tmp << 1 | tmp >> 32 - 1; + + f = b & c | ~b & d; + t = (a << 5 | a >> 32 - 5) + f + e + w[i & 0xf] + K0; + e = d; + d = c; + c = b << 30 | b >> 32 - 30; + b = a; + a = t; + } + +# pragma unroll + for (; i < 40; i++) { + tmp = w[i - 3 & 0xf] ^ w[i - 8 & 0xf] ^ w[i - 14 & 0xf] ^ w[i & 0xf]; + w[i & 0xf] = tmp << 1 | tmp >> 32 - 1; + + f = b ^ c ^ d; + t = (a << 5 | a >> 32 - 5) + f + e + w[i & 0xf] + K1; + e = d; + d = c; + c = b << 30 | b >> 32 - 30; + b = a; + a = t; + } + +# pragma unroll + for (; i < 60; i++) { + tmp = w[i - 3 & 0xf] ^ w[i - 8 & 0xf] ^ w[i - 14 & 0xf] ^ w[i & 0xf]; + w[i & 0xf] = tmp << 1 | tmp >> 32 - 1; + + f = (b | c) & d | b & c; + t = (a << 5 | a >> 32 - 5) + f + e + w[i & 0xf] + K2; + e = d; + d = c; + c = b << 30 | b >> 32 - 30; + b = a; + a = t; + } + +# pragma unroll + for (; i < 80; i++) { + tmp = w[i - 3 & 0xf] ^ w[i - 8 & 0xf] ^ w[i - 14 & 0xf] ^ w[i & 0xf]; + w[i & 0xf] = tmp << 1 | tmp >> 32 - 1; + + f = b ^ c ^ d; + t = (a << 5 | a >> 32 - 5) + f + e + w[i & 0xf] + K3; + e = d; + d = c; + c = b << 30 | b >> 32 - 30; + b = a; + a = t; + } + + h0 += a; + h1 += b; + h2 += c; + h3 += d; + h4 += e; + + p += SHA_CBLOCK; + len -= SHA_CBLOCK; + } + + c->h0 = h0; + c->h1 = h1; + c->h2 = h2; + c->h3 = h3; + c->h4 = h4; +} +#endif + +#ifdef WEAK_REFS +/* When building libmd, provide weak references. Note: this is not + activated in the context of compiling these sources for internal + use in libcrypt. + */ +#undef SHA_Init +__weak_reference(_libmd_SHA_Init, SHA_Init); +#undef SHA_Update +__weak_reference(_libmd_SHA_Update, SHA_Update); +#undef SHA_Final +__weak_reference(_libmd_SHA_Final, SHA_Final); +#undef SHA1_Init +__weak_reference(_libmd_SHA1_Init, SHA1_Init); +#undef SHA1_Update +__weak_reference(_libmd_SHA1_Update, SHA1_Update); +#undef SHA1_Final +__weak_reference(_libmd_SHA1_Final, SHA1_Final); +#endif diff --git a/lib/libmd/sha256.3 b/lib/libmd/sha256.3 new file mode 100644 index 000000000000..406dfd1b357d --- /dev/null +++ b/lib/libmd/sha256.3 @@ -0,0 +1,208 @@ +.\" +.\" ---------------------------------------------------------------------------- +.\" "THE BEER-WARE LICENSE" (Revision 42): +.\" <phk@FreeBSD.org> wrote this file. As long as you retain this notice you +.\" can do whatever you want with this stuff. If we meet some day, and you think +.\" this stuff is worth it, you can buy me a beer in return. Poul-Henning Kamp +.\" ---------------------------------------------------------------------------- +.\" +.\" From: Id: mdX.3,v 1.14 1999/02/11 20:31:49 wollman Exp +.\" +.Dd March 8, 2022 +.Dt SHA256 3 +.Os +.Sh NAME +.Nm SHA224_Init , +.Nm SHA224_Update , +.Nm SHA224_Final , +.Nm SHA224_End , +.Nm SHA224_Fd , +.Nm SHA224_FdChunk , +.Nm SHA224_File , +.Nm SHA224_FileChunk , +.Nm SHA224_Data , +.Nm SHA256_Init , +.Nm SHA256_Update , +.Nm SHA256_Final , +.Nm SHA256_End , +.Nm SHA256_Fd , +.Nm SHA256_FdChunk , +.Nm SHA256_File , +.Nm SHA256_FileChunk , +.Nm SHA256_Data +.Nd calculate the FIPS 180-2 ``SHA-256'' (or SHA-224) message digest +.Sh LIBRARY +.Lb libmd +.Sh SYNOPSIS +.In sys/types.h +.In sha224.h +.Ft void +.Fn SHA224_Init "SHA224_CTX *context" +.Ft void +.Fn SHA224_Update "SHA224_CTX *context" "const unsigned char *data" "size_t len" +.Ft void +.Fn SHA224_Final "unsigned char digest[32]" "SHA224_CTX *context" +.Ft "char *" +.Fn SHA224_End "SHA224_CTX *context" "char *buf" +.Ft "char *" +.Fn SHA224_Fd "int fd" "char *buf" +.Ft "char *" +.Fn SHA224_FdChunk "int fd" "char *buf" "off_t offset" "off_t length" +.Ft "char *" +.Fn SHA224_File "const char *filename" "char *buf" +.Ft "char *" +.Fn SHA224_FileChunk "const char *filename" "char *buf" "off_t offset" "off_t length" +.Ft "char *" +.Fn SHA224_Data "const unsigned char *data" "unsigned int len" "char *buf" +.In sha256.h +.Ft void +.Fn SHA256_Init "SHA256_CTX *context" +.Ft void +.Fn SHA256_Update "SHA256_CTX *context" "const unsigned char *data" "size_t len" +.Ft void +.Fn SHA256_Final "unsigned char digest[32]" "SHA256_CTX *context" +.Ft "char *" +.Fn SHA256_End "SHA256_CTX *context" "char *buf" +.Ft "char *" +.Fn SHA256_Fd "int fd" "char *buf" +.Ft "char *" +.Fn SHA256_FdChunk "int fd" "char *buf" "off_t offset" "off_t length" +.Ft "char *" +.Fn SHA256_File "const char *filename" "char *buf" +.Ft "char *" +.Fn SHA256_FileChunk "const char *filename" "char *buf" "off_t offset" "off_t length" +.Ft "char *" +.Fn SHA256_Data "const unsigned char *data" "unsigned int len" "char *buf" +.Sh DESCRIPTION +The +.Li SHA256_ +functions calculate a 256-bit cryptographic checksum (digest) +for any number of input bytes. +A cryptographic checksum is a one-way +hash function; that is, it is computationally impractical to find +the input corresponding to a particular output. +This net result is +a +.Dq fingerprint +of the input-data, which does not disclose the actual input. +.Pp +The +.Fn SHA256_Init , +.Fn SHA256_Update , +and +.Fn SHA256_Final +functions are the core functions. +Allocate an +.Vt SHA256_CTX , +initialize it with +.Fn SHA256_Init , +run over the data with +.Fn SHA256_Update , +and finally extract the result using +.Fn SHA256_Final , +which will also erase the +.Vt SHA256_CTX . +.Pp +.Fn SHA256_End +is a wrapper for +.Fn SHA256_Final +which converts the return value to a 65-character +(including the terminating '\e0') +ASCII string which represents the 256 bits in hexadecimal. +.Pp +.Fn SHA256_File +calculates the digest of a file, and uses +.Fn SHA256_End +to return the result. +If the file cannot be opened, a null pointer is returned. +.Fn SHA256_FileChunk +is similar to +.Fn SHA256_File , +but it only calculates the digest over a byte-range of the file specified, +starting at +.Fa offset +and spanning +.Fa length +bytes. +If the +.Fa length +parameter is specified as 0, or more than the length of the remaining part +of the file, +.Fn SHA256_FileChunk +calculates the digest from +.Fa offset +to the end of file. +.Fn SHA256_Data +calculates the digest of a chunk of data in memory, and uses +.Fn SHA256_End +to return the result. +.Pp +The +.Fn SHA256_Fd +and +.Fn SHA256_FdChunk +functions are identical to their +.Fn SHA256_File +and +.Fn SHA256_FileChunk +counterparts, with the exception that the first argument is an +.Fa fd +instead of a +.Fa filename . +.Pp +When using +.Fn SHA256_End , +.Fn SHA256_File , +or +.Fn SHA256_Data , +the +.Fa buf +argument can be a null pointer, in which case the returned string +is allocated with +.Xr malloc 3 +and subsequently must be explicitly deallocated using +.Xr free 3 +after use. +If the +.Fa buf +argument is non-null it must point to at least 65 characters of buffer space. +.Pp +SHA224 is identical SHA256, except it has slightly different initialization +vectors, and is truncated to a shorter digest. +.Sh ERRORS +The +.Fn SHA256_End +function called with a null buf argument may fail and return NULL if: +.Bl -tag -width Er +.It Bq Er ENOMEM +Insufficient storage space is available. +.El +.Pp +The +.Fn SHA256_File +and +.Fn SHA256_FileChunk +may return NULL when underlying +.Xr open 2 , +.Xr fstat 2 , +.Xr lseek 2 , +or +.Xr SHA256_End 3 +fail. +.Sh SEE ALSO +.Xr md4 3 , +.Xr md5 3 , +.Xr ripemd 3 , +.Xr sha 3 , +.Xr sha512 3 , +.Xr skein 3 +.Sh HISTORY +These functions appeared in +.Fx 6.0 . +.Sh AUTHORS +The core hash routines were implemented by Colin Percival based on +the published FIPS 180-2 standard. +.Sh BUGS +No method is known to exist which finds two files having the same hash value, +nor to find a file with a specific hash value. +There is on the other hand no guarantee that such a method does not exist. diff --git a/lib/libmd/sha512.3 b/lib/libmd/sha512.3 new file mode 100644 index 000000000000..17d0d0988350 --- /dev/null +++ b/lib/libmd/sha512.3 @@ -0,0 +1,294 @@ +.\" +.\" ---------------------------------------------------------------------------- +.\" "THE BEER-WARE LICENSE" (Revision 42): +.\" <phk@FreeBSD.org> wrote this file. As long as you retain this notice you +.\" can do whatever you want with this stuff. If we meet some day, and you think +.\" this stuff is worth it, you can buy me a beer in return. Poul-Henning Kamp +.\" ---------------------------------------------------------------------------- +.\" +.\" From: Id: mdX.3,v 1.14 1999/02/11 20:31:49 wollman Exp +.\" +.Dd February 12, 2023 +.Dt SHA512 3 +.Os +.Sh NAME +.Nm SHA512_Init , +.Nm SHA512_Update , +.Nm SHA512_Final , +.Nm SHA512_End , +.Nm SHA512_Fd , +.Nm SHA512_FdChunk , +.Nm SHA512_File , +.Nm SHA512_FileChunk , +.Nm SHA512_Data , +.Nm SHA384_Init , +.Nm SHA384_Update , +.Nm SHA384_Final , +.Nm SHA384_End , +.Nm SHA384_Fd , +.Nm SHA384_FdChunk , +.Nm SHA384_File , +.Nm SHA384_FileChunk , +.Nm SHA384_Data , +.Nm SHA512_224_Init , +.Nm SHA512_224_Update , +.Nm SHA512_224_Final , +.Nm SHA512_224_End , +.Nm SHA512_224_File , +.Nm SHA512_224_FileChunk , +.Nm SHA512_224_Data +.Nm SHA512_256_Init , +.Nm SHA512_256_Update , +.Nm SHA512_256_Final , +.Nm SHA512_256_End , +.Nm SHA512_256_Fd , +.Nm SHA512_256_FdChunk , +.Nm SHA512_256_File , +.Nm SHA512_256_FileChunk , +.Nm SHA512_256_Data +.Nd calculate the FIPS 180-4 ``SHA-512'' family of message digests +.Sh LIBRARY +.Lb libmd +.Sh SYNOPSIS +.In sys/types.h +.In sha512.h +.Ft void +.Fn SHA512_Init "SHA512_CTX *context" +.Ft void +.Fn SHA512_Update "SHA512_CTX *context" "const unsigned char *data" "size_t len" +.Ft void +.Fn SHA512_Final "unsigned char digest[64]" "SHA512_CTX *context" +.Ft "char *" +.Fn SHA512_End "SHA512_CTX *context" "char *buf" +.Ft "char *" +.Fn SHA512_Fd "int fd" "char *buf" +.Ft "char *" +.Fn SHA512_FdChunk "int fd" "char *buf" "off_t offset" "off_t length" +.Ft "char *" +.Fn SHA512_File "const char *filename" "char *buf" +.Ft "char *" +.Fn SHA512_FileChunk "const char *filename" "char *buf" "off_t offset" "off_t length" +.Ft "char *" +.Fn SHA512_Data "const unsigned char *data" "unsigned int len" "char *buf" +.In sha384.h +.Ft void +.Fn SHA384_Init "SHA384_CTX *context" +.Ft void +.Fn SHA384_Update "SHA384_CTX *context" "const unsigned char *data" "size_t len" +.Ft void +.Fn SHA384_Final "unsigned char digest[48]" "SHA384_CTX *context" +.Ft "char *" +.Fn SHA384_End "SHA384_CTX *context" "char *buf" +.Ft "char *" +.Fn SHA384_Fd "int fd" "char *buf" +.Ft "char *" +.Fn SHA384_FdChunk "int fd" "char *buf" "off_t offset" "off_t length" +.Ft "char *" +.Fn SHA384_File "const char *filename" "char *buf" +.Ft "char *" +.Fn SHA384_FileChunk "const char *filename" "char *buf" "off_t offset" "off_t length" +.Ft "char *" +.Fn SHA384_Data "const unsigned char *data" "unsigned int len" "char *buf" +.In sha512t.h +.Ft void +.Fn SHA512_224_Init "SHA512_CTX *context" +.Ft void +.Fn SHA512_224_Update "SHA512_CTX *context" "const unsigned char *data" "size_t len" +.Ft void +.Fn SHA512_224_Final "unsigned char digest[32]" "SHA512_CTX *context" +.Ft "char *" +.Fn SHA512_224_End "SHA512_CTX *context" "char *buf" +.Ft "char *" +.Fn SHA512_224_File "const char *filename" "char *buf" +.Ft "char *" +.Fn SHA512_224_FileChunk "const char *filename" "char *buf" "off_t offset" "off_t length" +.Ft "char *" +.Fn SHA512_224_Data "const unsigned char *data" "unsigned int len" "char *buf" +.Ft void +.Fn SHA512_256_Init "SHA512_CTX *context" +.Ft void +.Fn SHA512_256_Update "SHA512_CTX *context" "const unsigned char *data" "size_t len" +.Ft void +.Fn SHA512_256_Final "unsigned char digest[32]" "SHA512_CTX *context" +.Ft "char *" +.Fn SHA512_256_End "SHA512_CTX *context" "char *buf" +.Ft "char *" +.Fn SHA512_256_Fd "int fd" "char *buf" +.Ft "char *" +.Fn SHA512_256_FdChunk "int fd" "char *buf" "off_t offset" "off_t length" +.Ft "char *" +.Fn SHA512_256_File "const char *filename" "char *buf" +.Ft "char *" +.Fn SHA512_256_FileChunk "const char *filename" "char *buf" "off_t offset" "off_t length" +.Ft "char *" +.Fn SHA512_256_Data "const unsigned char *data" "unsigned int len" "char *buf" +.Sh DESCRIPTION +The +.Li SHA512_ +functions calculate a 512-bit cryptographic checksum (digest) +for any number of input bytes. +A cryptographic checksum is a one-way +hash function; that is, it is computationally impractical to find +the input corresponding to a particular output. +This net result is +a +.Dq fingerprint +of the input-data, which does not disclose the actual input. +.Pp +The +.Fn SHA512_Init , +.Fn SHA512_Update , +and +.Fn SHA512_Final +functions are the core functions. +Allocate an +.Vt SHA512_CTX , +initialize it with +.Fn SHA512_Init , +run over the data with +.Fn SHA512_Update , +and finally extract the result using +.Fn SHA512_Final , +which will also erase the +.Vt SHA512_CTX . +.Pp +.Fn SHA512_End +is a wrapper for +.Fn SHA512_Final +which converts the return value to a 129-character +(including the terminating '\e0') +ASCII string which represents the 512 bits in hexadecimal. +.Pp +.Fn SHA512_File +calculates the digest of a file, and uses +.Fn SHA512_End +to return the result. +If the file cannot be opened, a null pointer is returned. +.Fn SHA512_FileChunk +is similar to +.Fn SHA512_File , +but it only calculates the digest over a byte-range of the file specified, +starting at +.Fa offset +and spanning +.Fa length +bytes. +If the +.Fa length +parameter is specified as 0, or more than the length of the remaining part +of the file, +.Fn SHA512_FileChunk +calculates the digest from +.Fa offset +to the end of file. +.Fn SHA512_Data +calculates the digest of a chunk of data in memory, and uses +.Fn SHA512_End +to return the result. +.Pp +The +.Fn SHA512_Fd +and +.Fn SHA512_FdChunk +functions are identical to their +.Fn SHA512_File +and +.Fn SHA512_FileChunk +counterparts, with the exception that the first argument is an +.Fa fd +instead of a +.Fa filename . +.Pp +When using +.Fn SHA512_End , +.Fn SHA512_File , +or +.Fn SHA512_Data , +the +.Fa buf +argument can be a null pointer, in which case the returned string +is allocated with +.Xr malloc 3 +and subsequently must be explicitly deallocated using +.Xr free 3 +after use. +If the +.Fa buf +argument is non-null it must point to at least 129 characters of buffer space. +.Pp +The +.Li SHA384_ , +.Li SHA512_224 , +and +.Li SHA512_256_ +functions are identical to the +.Li SHA512_ +functions except they use a different initial hash value and the output is +truncated to 384, 224, and 256 bits respectively. +.Pp +.Fn SHA384_End +is a wrapper for +.Fn SHA384_Final +which converts the return value to a 97-character +(including the terminating '\e0') +ASCII string which represents the 384 bits in hexadecimal. +.Pp +.Fn SHA512_224_End +is a wrapper for +.Fn SHA512_Final +which converts the return value to a 57-character +(including the terminating '\e0') +ASCII string which represents the 224 bits in hexadecimal. +.Pp +.Fn SHA512_224_End +is a wrapper for +.Fn SHA512_Final +which converts the return value to a 57-character +(including the terminating '\e0') +.Tn ASCII +string which represents the 224 bits in hexadecimal. +.Pp +.Fn SHA512_256_End +is a wrapper for +.Fn SHA512_Final +which converts the return value to a 65-character +(including the terminating '\e0') +ASCII string which represents the 256 bits in hexadecimal. +.Sh ERRORS +The +.Fn SHA512_End +function called with a null buf argument may fail and return NULL if: +.Bl -tag -width Er +.It Bq Er ENOMEM +Insufficient storage space is available. +.El +.Pp +The +.Fn SHA512_File +and +.Fn SHA512_FileChunk +may return NULL when underlying +.Xr open 2 , +.Xr fstat 2 , +.Xr lseek 2 , +or +.Xr SHA512_End 3 +fail. +.Sh SEE ALSO +.Xr md4 3 , +.Xr md5 3 , +.Xr ripemd 3 , +.Xr sha 3 , +.Xr sha256 3 , +.Xr sha512 3 , +.Xr skein 3 +.Sh HISTORY +These functions appeared in +.Fx 9.0 . +.Sh AUTHORS +The core hash routines were implemented by Colin Percival based on +the published FIPS 180-2 standard. +.Sh BUGS +No method is known to exist which finds two files having the same hash value, +nor to find a file with a specific hash value. +There is on the other hand no guarantee that such a method does not exist. diff --git a/lib/libmd/sha_locl.h b/lib/libmd/sha_locl.h new file mode 100644 index 000000000000..4f661af22875 --- /dev/null +++ b/lib/libmd/sha_locl.h @@ -0,0 +1,243 @@ +/* crypto/sha/sha_locl.h */ +/* Copyright (C) 1995-1998 Eric Young (eay@cryptsoft.com) + * All rights reserved. + * + * This package is an SSL implementation written + * by Eric Young (eay@cryptsoft.com). + * The implementation was written so as to conform with Netscapes SSL. + * + * This library is free for commercial and non-commercial use as long as + * the following conditions are aheared to. The following conditions + * apply to all code found in this distribution, be it the RC4, RSA, + * lhash, DES, etc., code; not just the SSL code. The SSL documentation + * included with this distribution is covered by the same copyright terms + * except that the holder is Tim Hudson (tjh@cryptsoft.com). + * + * Copyright remains Eric Young's, and as such any Copyright notices in + * the code are not to be removed. + * If this package is used in a product, Eric Young should be given attribution + * as the author of the parts of the library used. + * This can be in the form of a textual message at program startup or + * in documentation (online or textual) provided with the package. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * "This product includes cryptographic software written by + * Eric Young (eay@cryptsoft.com)" + * The word 'cryptographic' can be left out if the routines from the library + * being used are not cryptographic related :-). + * 4. If you include any Windows specific code (or a derivative thereof) from + * the apps directory (application code) you must include an acknowledgement: + * "This product includes software written by Tim Hudson (tjh@cryptsoft.com)" + * + * THIS SOFTWARE IS PROVIDED BY ERIC YOUNG ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * The licence and distribution terms for any publically available version or + * derivative of this code cannot be changed. i.e. this code cannot simply be + * copied and put under another distribution licence + * [including the GNU Public Licence.] + */ + +#ifdef undef +/* one or the other needs to be defined */ +#ifndef SHA_1 /* FIPE 180-1 */ +#define SHA_0 /* FIPS 180 */ +#endif +#endif + +#define ULONG unsigned long +#define UCHAR unsigned char +#define UINT unsigned int + +#ifdef NOCONST +#define const +#endif + +#undef c2nl +#define c2nl(c,l) (l =(((unsigned long)(*((c)++)))<<24), \ + l|=(((unsigned long)(*((c)++)))<<16), \ + l|=(((unsigned long)(*((c)++)))<< 8), \ + l|=(((unsigned long)(*((c)++))) )) + +#undef p_c2nl +#define p_c2nl(c,l,n) { \ + switch (n) { \ + case 0: l =((unsigned long)(*((c)++)))<<24; \ + case 1: l|=((unsigned long)(*((c)++)))<<16; \ + case 2: l|=((unsigned long)(*((c)++)))<< 8; \ + case 3: l|=((unsigned long)(*((c)++))); \ + } \ + } + +#undef c2nl_p +/* NOTE the pointer is not incremented at the end of this */ +#define c2nl_p(c,l,n) { \ + l=0; \ + (c)+=n; \ + switch (n) { \ + case 3: l =((unsigned long)(*(--(c))))<< 8; \ + case 2: l|=((unsigned long)(*(--(c))))<<16; \ + case 1: l|=((unsigned long)(*(--(c))))<<24; \ + } \ + } + +#undef p_c2nl_p +#define p_c2nl_p(c,l,sc,len) { \ + switch (sc) \ + { \ + case 0: l =((unsigned long)(*((c)++)))<<24; \ + if (--len == 0) break; \ + case 1: l|=((unsigned long)(*((c)++)))<<16; \ + if (--len == 0) break; \ + case 2: l|=((unsigned long)(*((c)++)))<< 8; \ + } \ + } + +#undef nl2c +#define nl2c(l,c) (*((c)++)=(unsigned char)(((l)>>24)&0xff), \ + *((c)++)=(unsigned char)(((l)>>16)&0xff), \ + *((c)++)=(unsigned char)(((l)>> 8)&0xff), \ + *((c)++)=(unsigned char)(((l) )&0xff)) + +#undef c2l +#define c2l(c,l) (l =(((unsigned long)(*((c)++))) ), \ + l|=(((unsigned long)(*((c)++)))<< 8), \ + l|=(((unsigned long)(*((c)++)))<<16), \ + l|=(((unsigned long)(*((c)++)))<<24)) + +#undef p_c2l +#define p_c2l(c,l,n) { \ + switch (n) { \ + case 0: l =((unsigned long)(*((c)++))); \ + case 1: l|=((unsigned long)(*((c)++)))<< 8; \ + case 2: l|=((unsigned long)(*((c)++)))<<16; \ + case 3: l|=((unsigned long)(*((c)++)))<<24; \ + } \ + } + +#undef c2l_p +/* NOTE the pointer is not incremented at the end of this */ +#define c2l_p(c,l,n) { \ + l=0; \ + (c)+=n; \ + switch (n) { \ + case 3: l =((unsigned long)(*(--(c))))<<16; \ + case 2: l|=((unsigned long)(*(--(c))))<< 8; \ + case 1: l|=((unsigned long)(*(--(c)))); \ + } \ + } + +#undef p_c2l_p +#define p_c2l_p(c,l,sc,len) { \ + switch (sc) \ + { \ + case 0: l =((unsigned long)(*((c)++))); \ + if (--len == 0) break; \ + case 1: l|=((unsigned long)(*((c)++)))<< 8; \ + if (--len == 0) break; \ + case 2: l|=((unsigned long)(*((c)++)))<<16; \ + } \ + } + +#undef l2c +#define l2c(l,c) (*((c)++)=(unsigned char)(((l) )&0xff), \ + *((c)++)=(unsigned char)(((l)>> 8)&0xff), \ + *((c)++)=(unsigned char)(((l)>>16)&0xff), \ + *((c)++)=(unsigned char)(((l)>>24)&0xff)) + +#undef ROTATE +#if defined(WIN32) +#define ROTATE(a,n) _lrotl(a,n) +#else +#define ROTATE(a,n) (((a)<<(n))|(((a)&0xffffffff)>>(32-(n)))) +#endif + +/* A nice byte order reversal from Wei Dai <weidai@eskimo.com> */ +#if defined(WIN32) +/* 5 instructions with rotate instruction, else 9 */ +#define Endian_Reverse32(a) \ + { \ + unsigned long l=(a); \ + (a)=((ROTATE(l,8)&0x00FF00FF)|(ROTATE(l,24)&0xFF00FF00)); \ + } +#else +/* 6 instructions with rotate instruction, else 8 */ +#define Endian_Reverse32(a) \ + { \ + unsigned long l=(a); \ + l=(((l&0xFF00FF00)>>8L)|((l&0x00FF00FF)<<8L)); \ + (a)=ROTATE(l,16L); \ + } +#endif + +/* As pointed out by Wei Dai <weidai@eskimo.com>, F() below can be + * simplified to the code in F_00_19. Wei attributes these optimisations + * to Peter Gutmann's SHS code, and he attributes it to Rich Schroeppel. + * #define F(x,y,z) (((x) & (y)) | ((~(x)) & (z))) + * I've just become aware of another tweak to be made, again from Wei Dai, + * in F_40_59, (x&a)|(y&a) -> (x|y)&a + */ +#define F_00_19(b,c,d) ((((c) ^ (d)) & (b)) ^ (d)) +#define F_20_39(b,c,d) ((b) ^ (c) ^ (d)) +#define F_40_59(b,c,d) (((b) & (c)) | (((b)|(c)) & (d))) +#define F_60_79(b,c,d) F_20_39(b,c,d) + +#ifdef SHA_0 +#undef Xupdate +#define Xupdate(a,i,ia,ib,ic,id) X[(i)&0x0f]=(a)=\ + (ia[(i)&0x0f]^ib[((i)+2)&0x0f]^ic[((i)+8)&0x0f]^id[((i)+13)&0x0f]); +#endif +#ifdef SHA_1 +#undef Xupdate +#define Xupdate(a,i,ia,ib,ic,id) (a)=\ + (ia[(i)&0x0f]^ib[((i)+2)&0x0f]^ic[((i)+8)&0x0f]^id[((i)+13)&0x0f]);\ + X[(i)&0x0f]=(a)=ROTATE((a),1); +#endif + +#define BODY_00_15(i,a,b,c,d,e,f,xa) \ + (f)=xa[i]+(e)+K_00_19+ROTATE((a),5)+F_00_19((b),(c),(d)); \ + (b)=ROTATE((b),30); + +#define BODY_16_19(i,a,b,c,d,e,f,xa,xb,xc,xd) \ + Xupdate(f,i,xa,xb,xc,xd); \ + (f)+=(e)+K_00_19+ROTATE((a),5)+F_00_19((b),(c),(d)); \ + (b)=ROTATE((b),30); + +#define BODY_20_31(i,a,b,c,d,e,f,xa,xb,xc,xd) \ + Xupdate(f,i,xa,xb,xc,xd); \ + (f)+=(e)+K_20_39+ROTATE((a),5)+F_20_39((b),(c),(d)); \ + (b)=ROTATE((b),30); + +#define BODY_32_39(i,a,b,c,d,e,f,xa) \ + Xupdate(f,i,xa,xa,xa,xa); \ + (f)+=(e)+K_20_39+ROTATE((a),5)+F_20_39((b),(c),(d)); \ + (b)=ROTATE((b),30); + +#define BODY_40_59(i,a,b,c,d,e,f,xa) \ + Xupdate(f,i,xa,xa,xa,xa); \ + (f)+=(e)+K_40_59+ROTATE((a),5)+F_40_59((b),(c),(d)); \ + (b)=ROTATE((b),30); + +#define BODY_60_79(i,a,b,c,d,e,f,xa) \ + Xupdate(f,i,xa,xa,xa,xa); \ + (f)=X[(i)&0x0f]+(e)+K_60_79+ROTATE((a),5)+F_60_79((b),(c),(d)); \ + (b)=ROTATE((b),30); + diff --git a/lib/libmd/skein.3 b/lib/libmd/skein.3 new file mode 100644 index 000000000000..8fe79a4ad0bf --- /dev/null +++ b/lib/libmd/skein.3 @@ -0,0 +1,264 @@ +.\"- +.\" Copyright (c) 2016 Allan Jude +.\" All rights reserved. +.\" +.\" Redistribution and use in source and binary forms, with or without +.\" modification, are permitted provided that the following conditions +.\" are met: +.\" 1. Redistributions of source code must retain the above copyright +.\" notice, this list of conditions and the following disclaimer. +.\" 2. Redistributions in binary form must reproduce the above copyright +.\" notice, this list of conditions and the following disclaimer in the +.\" documentation and/or other materials provided with the distribution. +.\" +.\" THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND +.\" ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +.\" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +.\" ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE +.\" FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +.\" DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS +.\" OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) +.\" HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +.\" LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY +.\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF +.\" SUCH DAMAGE. +.\" +.Dd March 8, 2022 +.Dt SKEIN 3 +.Os +.Sh NAME +.Nm SKEIN256_Init , +.Nm SKEIN256_Update , +.Nm SKEIN256_Final , +.Nm SKEIN256_End , +.Nm SKEIN256_Fd , +.Nm SKEIN256_FdChunk , +.Nm SKEIN256_File , +.Nm SKEIN256_FileChunk , +.Nm SKEIN256_Data , +.Nm SKEIN512_Init , +.Nm SKEIN512_Update , +.Nm SKEIN512_Final , +.Nm SKEIN512_End , +.Nm SKEIN512_Fd , +.Nm SKEIN512_FdChunk , +.Nm SKEIN512_File , +.Nm SKEIN512_FileChunk , +.Nm SKEIN512_Data , +.Nm SKEIN1024_Init , +.Nm SKEIN1024_Update , +.Nm SKEIN1024_Final , +.Nm SKEIN1024_End , +.Nm SKEIN1024_Fd , +.Nm SKEIN1024_FdChunk , +.Nm SKEIN1024_File , +.Nm SKEIN1024_FileChunk , +.Nm SKEIN1024_Data +.Nd calculate the ``SKEIN'' family of message digests +.Sh LIBRARY +.Lb libmd +.Sh SYNOPSIS +.In sys/types.h +.In skein.h +.Ft void +.Fn SKEIN256_Init "SKEIN256_CTX *context" +.Ft void +.Fn SKEIN256_Update "SKEIN256_CTX *context" "const unsigned char *data" "size_t len" +.Ft void +.Fn SKEIN256_Final "unsigned char digest[32]" "SKEIN256_CTX *context" +.Ft "char *" +.Fn SKEIN256_End "SKEIN256_CTX *context" "char *buf" +.Ft "char *" +.Fn SKEIN256_Fd "int fd" "char *buf" +.Ft "char *" +.Fn SKEIN256_FdChunk "int fd" "char *buf" "off_t offset" "off_t length" +.Ft "char *" +.Fn SKEIN256_File "const char *filename" "char *buf" +.Ft "char *" +.Fn SKEIN256_FileChunk "const char *filename" "char *buf" "off_t offset" "off_t length" +.Ft "char *" +.Fn SKEIN256_Data "const unsigned char *data" "unsigned int len" "char *buf" +.Ft void +.Fn SKEIN512_Init "SKEIN512_CTX *context" +.Ft void +.Fn SKEIN512_Update "SKEIN512_CTX *context" "const unsigned char *data" "size_t len" +.Ft void +.Fn SKEIN512_Final "unsigned char digest[64]" "SKEIN512_CTX *context" +.Ft "char *" +.Fn SKEIN512_End "SKEIN512_CTX *context" "char *buf" +.Ft "char *" +.Fn SKEIN512_Fd "int fd" "char *buf" +.Ft "char *" +.Fn SKEIN512_FdChunk "int fd" "char *buf" "off_t offset" "off_t length" +.Ft "char *" +.Fn SKEIN512_File "const char *filename" "char *buf" +.Ft "char *" +.Fn SKEIN512_FileChunk "const char *filename" "char *buf" "off_t offset" "off_t length" +.Ft "char *" +.Fn SKEIN512_Data "const unsigned char *data" "unsigned int len" "char *buf" +.Ft void +.Fn SKEIN1024_Init "SKEIN1024_CTX *context" +.Ft void +.Fn SKEIN1024_Update "SKEIN1024_CTX *context" "const unsigned char *data" "size_t len" +.Ft void +.Fn SKEIN1024_Final "unsigned char digest[128]" "SKEIN1024_CTX *context" +.Ft "char *" +.Fn SKEIN1024_End "SKEIN1024_CTX *context" "char *buf" +.Ft "char *" +.Fn SKEIN1024_Fd "int fd" "char *buf" +.Ft "char *" +.Fn SKEIN1024_FdChunk "int fd" "char *buf" "off_t offset" "off_t length" +.Ft "char *" +.Fn SKEIN1024_File "const char *filename" "char *buf" +.Ft "char *" +.Fn SKEIN1024_FileChunk "const char *filename" "char *buf" "off_t offset" "off_t length" +.Ft "char *" +.Fn SKEIN1024_Data "const unsigned char *data" "unsigned int len" "char *buf" +.Sh DESCRIPTION +.Li Skein +is a new family of cryptographic hash functions based on the +.Li Threefish +large-block cipher. +Its design combines speed, security, simplicity, and a great deal of +flexibility in a modular package that is easy to analyze. +.Li Skein +is defined for three different internal state sizes\(em256 bits, 512 bits, and +1024 bits\(emand any output size. +This allows Skein to be a drop-in replacement for the entire SHA family +of hash functions. +.Pp +The +.Fn SKEIN256_Init , +.Fn SKEIN256_Update , +and +.Fn SKEIN256_Final +functions are the core functions. +Allocate an +.Vt SKEIN256_CTX , +initialize it with +.Fn SKEIN256_Init , +run over the data with +.Fn SKEIN256_Update , +and finally extract the result using +.Fn SKEIN256_Final , +which will also erase the +.Vt SKEIN256_CTX . +.Pp +.Fn SKEIN256_End +is a wrapper for +.Fn SKEIN256_Final +which converts the return value to a 33-character +(including the terminating '\e0') +ASCII string which represents the 256 bits in hexadecimal. +.Pp +.Fn SKEIN256_File +calculates the digest of a file, and uses +.Fn SKEIN256_End +to return the result. +If the file cannot be opened, a null pointer is returned. +.Fn SKEIN256_FileChunk +is similar to +.Fn SKEIN256_File , +but it only calculates the digest over a byte-range of the file specified, +starting at +.Fa offset +and spanning +.Fa length +bytes. +If the +.Fa length +parameter is specified as 0, or more than the length of the remaining part +of the file, +.Fn SKEIN256_FileChunk +calculates the digest from +.Fa offset +to the end of file. +.Fn SKEIN256_Data +calculates the digest of a chunk of data in memory, and uses +.Fn SKEIN256_End +to return the result. +.Pp +The +.Fn SKEIN256_Fd +and +.Fn SKEIN256_FdChunk +functions are identical to their +.Fn SKEIN256_File +and +.Fn SKEIN256_FileChunk +counterparts, with the exception that the first argument is an +.Fa fd +instead of a +.Fa filename . +.Pp +When using +.Fn SKEIN256_End , +.Fn SKEIN256_File , +or +.Fn SKEIN256_Data , +the +.Fa buf +argument can be a null pointer, in which case the returned string +is allocated with +.Xr malloc 3 +and subsequently must be explicitly deallocated using +.Xr free 3 +after use. +If the +.Fa buf +argument is non-null it must point to at least 33 characters of buffer space. +.Pp +The +.Li SKEIN512_ +and +.Li SKEIN1024_ +functions are similar to the +.Li SKEIN256_ +functions except they produce a 512-bit, 65 character, +or 1024-bit, 129 character, output. +.Sh ERRORS +The +.Fn SKEIN256_End +function called with a null buf argument may fail and return NULL if: +.Bl -tag -width Er +.It Bq Er ENOMEM +Insufficient storage space is available. +.El +.Pp +The +.Fn SKEIN256_File +and +.Fn SKEIN256_FileChunk +may return NULL when underlying +.Xr open 2 , +.Xr fstat 2 , +.Xr lseek 2 , +or +.Xr SKEIN256_End 3 +fail. +.Sh SEE ALSO +.Xr md4 3 , +.Xr md5 3 , +.Xr ripemd 3 , +.Xr sha 3 , +.Xr sha256 3 , +.Xr sha512 3 +.Sh HISTORY +These functions appeared in +.Fx 11.0 . +.Sh AUTHORS +.An -nosplit +The core hash routines were imported from version 1.3 of the optimized +Skein reference implementation written by +.An Doug Whiting +as submitted to the NSA SHA-3 contest. +The algorithms were developed by +.An Niels Ferguson , +.An Stefan Lucks , +.An Bruce Schneier , +.An Doug Whiting , +.An Mihir Bellare , +.An Tadayoshi Kohno , +.An Jon Callas, +and +.An Jesse Walker . diff --git a/lib/libmd/tests/Makefile b/lib/libmd/tests/Makefile new file mode 100644 index 000000000000..e11e8e1bf154 --- /dev/null +++ b/lib/libmd/tests/Makefile @@ -0,0 +1,255 @@ +# $FreeBSD$ + +PACKAGE= tests + +ATF_TESTS_SH= libmd_test + +MDTESTS= md rmd sha skein + +MDTESTS_md= md4 md5 +MDTESTS_rmd= rmd160 +MDTESTS_sha= sha0 sha1 sha224 sha256 sha384 sha512 sha512t224 sha512t256 +MDTESTS_skein= skein256 skein512 skein1024 + +LIBADD= md + +.for mdt in ${MDTESTS} +REFS_${mdt}= ${MDTESTS_${mdt}:C/$/.ref/} +${PACKAGE}FILES+= ${REFS_${mdt}} +CLEANFILES+= ${REFS_${mdt}} + +# This target is simply convenient for auditing, letting you build all of one +# digest suite with `make <family>driver`. +.PHONY: ${mdt}driver + +. for mdtc in ${MDTESTS_${mdt}} +PROGS+= ${mdtc}driver +DIGESTS+= ${mdtc} + +# Avoid obj collision, we'll build the same driver.c multiple times. +${mdtc}driver.c: ${mdt}driver.c + ${CP} ${.CURDIR}/${mdt}driver.c ${.TARGET} + +SRCS_${mdtc}driver= ${mdtc}driver.c +BINDIR_${mdtc}driver= ${TESTSDIR} +.if ${mdt} == "sha" +CFLAGS_${mdtc}driver= -DSHA=${mdtc:C/sha//:C/t//} +.else +CFLAGS_${mdtc}driver= -D${mdt:tu}=${mdtc:C/${mdt}//} +.endif + +${mdt}driver: ${mdtc}driver + +. endfor +.endfor + +CLEANFILES+= libmd_test.sh +libmd_test.sh: Makefile + :> ${.TARGET} +.for _digest in ${DIGESTS} + @echo "atf_test_case ${_digest}" >> ${.TARGET} + @echo "${_digest}_body() {" >> ${.TARGET} + @echo " atf_check -o file:\$$(atf_get_srcdir)/${_digest}.ref \\" >> \ + ${.TARGET} + @echo " \$$(atf_get_srcdir)/${_digest}driver" >> ${.TARGET} + @echo "}" >> ${.TARGET} +.endfor + @echo "atf_init_test_cases() {" >> ${.TARGET} +.for _digest in ${DIGESTS} + @echo " atf_add_test_case ${_digest}" >> ${.TARGET} +.endfor + @echo "}" >> ${.TARGET} + +md4.ref: + echo 'MD4 test suite:' > ${.TARGET} + @echo 'MD4 ("") = 31d6cfe0d16ae931b73c59d7e0c089c0' >> ${.TARGET} + @echo 'MD4 ("a") = bde52cb31de33e46245e05fbdbd6fb24' >> ${.TARGET} + @echo 'MD4 ("abc") = a448017aaf21d8525fc10ae87aa6729d' >> ${.TARGET} + @echo 'MD4 ("message digest") = d9130a8164549fe818874806e1c7014b' >> ${.TARGET} + @echo 'MD4 ("abcdefghijklmnopqrstuvwxyz") = d79e1c308aa5bbcdeea8ed63df412da9' >> ${.TARGET} + @echo 'MD4 ("ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789") =' \ + '043f8582f241db351ce627e153e7f0e4' >> ${.TARGET} + @echo 'MD4 ("12345678901234567890123456789012345678901234567890123456789012345678901234567890") =' \ + 'e33b4ddc9c38f2199c3e7b164fcc0536' >> ${.TARGET} + +md5.ref: + echo 'MD5 test suite:' > ${.TARGET} + @echo 'MD5 ("") = d41d8cd98f00b204e9800998ecf8427e' >> ${.TARGET} + @echo 'MD5 ("a") = 0cc175b9c0f1b6a831c399e269772661' >> ${.TARGET} + @echo 'MD5 ("abc") = 900150983cd24fb0d6963f7d28e17f72' >> ${.TARGET} + @echo 'MD5 ("message digest") = f96b697d7cb7938d525a2f31aaf161d0' >> ${.TARGET} + @echo 'MD5 ("abcdefghijklmnopqrstuvwxyz") = c3fcd3d76192e4007dfb496cca67e13b' >> ${.TARGET} + @echo 'MD5 ("ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789") = d174ab98d277d9f5a5611c2c9f419d9f' >> ${.TARGET} + @echo 'MD5 ("12345678901234567890123456789012345678901234567890123456789012345678901234567890") = 57edf4a22be3c955ac49da2e2107b67a' >> ${.TARGET} + +sha0.ref: + echo 'SHA-0 test suite:' > ${.TARGET} + @echo 'SHA-0 ("") = f96cea198ad1dd5617ac084a3d92c6107708c0ef' >> ${.TARGET} + @echo 'SHA-0 ("abc") = 0164b8a914cd2a5e74c4f7ff082c4d97f1edf880' >> ${.TARGET} + @echo 'SHA-0 ("message digest") =' \ + 'c1b0f222d150ebb9aa36a40cafdc8bcbed830b14' >> ${.TARGET} + @echo 'SHA-0 ("abcdefghijklmnopqrstuvwxyz") =' \ + 'b40ce07a430cfd3c033039b9fe9afec95dc1bdcd' >> ${.TARGET} + @echo 'SHA-0 ("ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789") =' \ + '79e966f7a3a990df33e40e3d7f8f18d2caebadfa' >> ${.TARGET} + @echo 'SHA-0 ("12345678901234567890123456789012345678901234567890123456789012345678901234567890") =' \ + '4aa29d14d171522ece47bee8957e35a41f3e9cff' >> ${.TARGET} + +sha1.ref: + echo 'SHA-1 test suite:' > ${.TARGET} + @echo 'SHA-1 ("") = da39a3ee5e6b4b0d3255bfef95601890afd80709' >> ${.TARGET} + @echo 'SHA-1 ("abc") = a9993e364706816aba3e25717850c26c9cd0d89d' >> ${.TARGET} + @echo 'SHA-1 ("message digest") =' \ + 'c12252ceda8be8994d5fa0290a47231c1d16aae3' >> ${.TARGET} + @echo 'SHA-1 ("abcdefghijklmnopqrstuvwxyz") =' \ + '32d10c7b8cf96570ca04ce37f2a19d84240d3a89' >> ${.TARGET} + @echo 'SHA-1 ("ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789") =' \ + '761c457bf73b14d27e9e9265c46f4b4dda11f940' >> ${.TARGET} + @echo 'SHA-1 ("12345678901234567890123456789012345678901234567890123456789012345678901234567890") =' \ + '50abf5706a150990a08b2c5ea40fa0e585554732' >> ${.TARGET} + +sha224.ref: + echo 'SHA-224 test suite:' > ${.TARGET} + @echo 'SHA-224 ("") = d14a028c2a3a2bc9476102bb288234c415a2b01f828ea62ac5b3e42f' >> ${.TARGET} + @echo 'SHA-224 ("abc") =' \ + '23097d223405d8228642a477bda255b32aadbce4bda0b3f7e36c9da7' >> ${.TARGET} + @echo 'SHA-224 ("message digest") =' \ + '2cb21c83ae2f004de7e81c3c7019cbcb65b71ab656b22d6d0c39b8eb' >> ${.TARGET} + @echo 'SHA-224 ("abcdefghijklmnopqrstuvwxyz") =' \ + '45a5f72c39c5cff2522eb3429799e49e5f44b356ef926bcf390dccc2' >> ${.TARGET} + @echo 'SHA-224 ("ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789") =' \ + 'bff72b4fcb7d75e5632900ac5f90d219e05e97a7bde72e740db393d9' >> ${.TARGET} + @echo 'SHA-224 ("12345678901234567890123456789012345678901234567890123456789012345678901234567890") =' \ + 'b50aecbe4e9bb0b57bc5f3ae760a8e01db24f203fb3cdcd13148046e' >> ${.TARGET} + +sha256.ref: + echo 'SHA-256 test suite:' > ${.TARGET} + @echo 'SHA-256 ("") = e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855' >> ${.TARGET} + @echo 'SHA-256 ("abc") =' \ + 'ba7816bf8f01cfea414140de5dae2223b00361a396177a9cb410ff61f20015ad' >> ${.TARGET} + @echo 'SHA-256 ("message digest") =' \ + 'f7846f55cf23e14eebeab5b4e1550cad5b509e3348fbc4efa3a1413d393cb650' >> ${.TARGET} + @echo 'SHA-256 ("abcdefghijklmnopqrstuvwxyz") =' \ + '71c480df93d6ae2f1efad1447c66c9525e316218cf51fc8d9ed832f2daf18b73' >> ${.TARGET} + @echo 'SHA-256 ("ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789") =' \ + 'db4bfcbd4da0cd85a60c3c37d3fbd8805c77f15fc6b1fdfe614ee0a7c8fdb4c0' >> ${.TARGET} + @echo 'SHA-256 ("12345678901234567890123456789012345678901234567890123456789012345678901234567890") =' \ + 'f371bc4a311f2b009eef952dd83ca80e2b60026c8e935592d0f9c308453c813e' >> ${.TARGET} + +sha384.ref: + echo 'SHA-384 test suite:' > ${.TARGET} + @echo 'SHA-384 ("") =' \ + '38b060a751ac96384cd9327eb1b1e36a21fdb71114be07434c0cc7bf63f6e1da274edebfe76f65fbd51ad2f14898b95b' >> ${.TARGET} + @echo 'SHA-384 ("abc") =' \ + 'cb00753f45a35e8bb5a03d699ac65007272c32ab0eded1631a8b605a43ff5bed8086072ba1e7cc2358baeca134c825a7' >> ${.TARGET} + @echo 'SHA-384 ("message digest") =' \ + '473ed35167ec1f5d8e550368a3db39be54639f828868e9454c239fc8b52e3c61dbd0d8b4de1390c256dcbb5d5fd99cd5' >> ${.TARGET} + @echo 'SHA-384 ("abcdefghijklmnopqrstuvwxyz") =' \ + 'feb67349df3db6f5924815d6c3dc133f091809213731fe5c7b5f4999e463479ff2877f5f2936fa63bb43784b12f3ebb4' >> ${.TARGET} + @echo 'SHA-384 ("ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789") =' \ + '1761336e3f7cbfe51deb137f026f89e01a448e3b1fafa64039c1464ee8732f11a5341a6f41e0c202294736ed64db1a84' >> ${.TARGET} + @echo 'SHA-384 ("12345678901234567890123456789012345678901234567890123456789012345678901234567890") =' \ + 'b12932b0627d1c060942f5447764155655bd4da0c9afa6dd9b9ef53129af1b8fb0195996d2de9ca0df9d821ffee67026' >> ${.TARGET} + +sha512.ref: + echo 'SHA-512 test suite:' > ${.TARGET} + @echo 'SHA-512 ("") =' \ + 'cf83e1357eefb8bdf1542850d66d8007d620e4050b5715dc83f4a921d36ce9ce47d0d13c5d85f2b0ff8318d2877eec2f63b931bd47417a81a538327af927da3e' >> ${.TARGET} + @echo 'SHA-512 ("abc") =' \ + 'ddaf35a193617abacc417349ae20413112e6fa4e89a97ea20a9eeee64b55d39a2192992a274fc1a836ba3c23a3feebbd454d4423643ce80e2a9ac94fa54ca49f' >> ${.TARGET} + @echo 'SHA-512 ("message digest") =' \ + '107dbf389d9e9f71a3a95f6c055b9251bc5268c2be16d6c13492ea45b0199f3309e16455ab1e96118e8a905d5597b72038ddb372a89826046de66687bb420e7c' >> ${.TARGET} + @echo 'SHA-512 ("abcdefghijklmnopqrstuvwxyz") =' \ + '4dbff86cc2ca1bae1e16468a05cb9881c97f1753bce3619034898faa1aabe429955a1bf8ec483d7421fe3c1646613a59ed5441fb0f321389f77f48a879c7b1f1' >> ${.TARGET} + @echo 'SHA-512 ("ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789") =' \ + '1e07be23c26a86ea37ea810c8ec7809352515a970e9253c26f536cfc7a9996c45c8370583e0a78fa4a90041d71a4ceab7423f19c71b9d5a3e01249f0bebd5894' >> ${.TARGET} + @echo 'SHA-512 ("12345678901234567890123456789012345678901234567890123456789012345678901234567890") =' \ + '72ec1ef1124a45b047e8b7c75a932195135bb61de24ec0d1914042246e0aec3a2354e093d76f3048b456764346900cb130d2a4fd5dd16abb5e30bcb850dee843' >> ${.TARGET} + +sha512t224.ref: + echo 'SHA-512224 test suite:' > ${.TARGET} + @echo 'SHA-512224 ("") =' \ + '6ed0dd02806fa89e25de060c19d3ac86cabb87d6a0ddd05c333b84f4' >> ${.TARGET} + @echo 'SHA-512224 ("abc") =' \ + '4634270f707b6a54daae7530460842e20e37ed265ceee9a43e8924aa' >> ${.TARGET} + @echo 'SHA-512224 ("message digest") =' \ + 'ad1a4db188fe57064f4f24609d2a83cd0afb9b398eb2fcaeaae2c564' >> ${.TARGET} + @echo 'SHA-512224 ("abcdefghijklmnopqrstuvwxyz") =' \ + 'ff83148aa07ec30655c1b40aff86141c0215fe2a54f767d3f38743d8' >> ${.TARGET} + @echo 'SHA-512224 ("ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789") =' \ + 'a8b4b9174b99ffc67d6f49be9981587b96441051e16e6dd036b140d3' >> ${.TARGET} + @echo 'SHA-512224 ("12345678901234567890123456789012345678901234567890123456789012345678901234567890") =' \ + 'ae988faaa47e401a45f704d1272d99702458fea2ddc6582827556dd2' >> ${.TARGET} + +sha512t256.ref: + echo 'SHA-512256 test suite:' > ${.TARGET} + @echo 'SHA-512256 ("") =' \ + 'c672b8d1ef56ed28ab87c3622c5114069bdd3ad7b8f9737498d0c01ecef0967a' >> ${.TARGET} + @echo 'SHA-512256 ("abc") =' \ + '53048e2681941ef99b2e29b76b4c7dabe4c2d0c634fc6d46e0e2f13107e7af23' >> ${.TARGET} + @echo 'SHA-512256 ("message digest") =' \ + '0cf471fd17ed69d990daf3433c89b16d63dec1bb9cb42a6094604ee5d7b4e9fb' >> ${.TARGET} + @echo 'SHA-512256 ("abcdefghijklmnopqrstuvwxyz") =' \ + 'fc3189443f9c268f626aea08a756abe7b726b05f701cb08222312ccfd6710a26' >> ${.TARGET} + @echo 'SHA-512256 ("ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789") =' \ + 'cdf1cc0effe26ecc0c13758f7b4a48e000615df241284185c39eb05d355bb9c8' >> ${.TARGET} + @echo 'SHA-512256 ("12345678901234567890123456789012345678901234567890123456789012345678901234567890") =' \ + '2c9fdbc0c90bdd87612ee8455474f9044850241dc105b1e8b94b8ddf5fac9148' >> ${.TARGET} + +rmd160.ref: + echo 'RIPEMD160 test suite:' > ${.TARGET} + @echo 'RIPEMD160 ("") = 9c1185a5c5e9fc54612808977ee8f548b2258d31' >> ${.TARGET} + @echo 'RIPEMD160 ("abc") = 8eb208f7e05d987a9b044a8e98c6b087f15a0bfc' >> ${.TARGET} + @echo 'RIPEMD160 ("message digest") =' \ + '5d0689ef49d2fae572b881b123a85ffa21595f36' >> ${.TARGET} + @echo 'RIPEMD160 ("abcdefghijklmnopqrstuvwxyz") =' \ + 'f71c27109c692c1b56bbdceb5b9d2865b3708dbc' >> ${.TARGET} + @echo 'RIPEMD160 ("ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789") =' \ + 'b0e20b6e3116640286ed3a87a5713079b21f5189' >> ${.TARGET} + @echo 'RIPEMD160 ("12345678901234567890123456789012345678901234567890123456789012345678901234567890") =' \ + '9b752e45573d4b39f4dbd3323cab82bf63326bfb' >> ${.TARGET} + +skein256.ref: + echo 'SKEIN256 test suite:' > ${.TARGET} + @echo 'SKEIN256 ("") = c8877087da56e072870daa843f176e9453115929094c3a40c463a196c29bf7ba' >> ${.TARGET} + @echo 'SKEIN256 ("abc") = 258bdec343b9fde1639221a5ae0144a96e552e5288753c5fec76c05fc2fc1870' >> ${.TARGET} + @echo 'SKEIN256 ("message digest") =' \ + '4d2ce0062b5eb3a4db95bc1117dd8aa014f6cd50fdc8e64f31f7d41f9231e488' >> ${.TARGET} + @echo 'SKEIN256 ("abcdefghijklmnopqrstuvwxyz") =' \ + '46d8440685461b00e3ddb891b2ecc6855287d2bd8834a95fb1c1708b00ea5e82' >> ${.TARGET} + @echo 'SKEIN256 ("ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789") =' \ + '7c5eb606389556b33d34eb2536459528dc0af97adbcd0ce273aeb650f598d4b2' >> ${.TARGET} + @echo 'SKEIN256 ("12345678901234567890123456789012345678901234567890123456789012345678901234567890") =' \ + '4def7a7e5464a140ae9c3a80279fbebce4bd00f9faad819ab7e001512f67a10d' >> ${.TARGET} + +skein512.ref: + echo 'SKEIN512 test suite:' > ${.TARGET} + @echo 'SKEIN512 ("") =' \ + 'bc5b4c50925519c290cc634277ae3d6257212395cba733bbad37a4af0fa06af41fca7903d06564fea7a2d3730dbdb80c1f85562dfcc070334ea4d1d9e72cba7a' >> ${.TARGET} + @echo 'SKEIN512 ("abc") =' \ + '8f5dd9ec798152668e35129496b029a960c9a9b88662f7f9482f110b31f9f93893ecfb25c009baad9e46737197d5630379816a886aa05526d3a70df272d96e75' >> ${.TARGET} + @echo 'SKEIN512 ("message digest") =' \ + '15b73c158ffb875fed4d72801ded0794c720b121c0c78edf45f900937e6933d9e21a3a984206933d504b5dbb2368000411477ee1b204c986068df77886542fcc' >> ${.TARGET} + @echo 'SKEIN512 ("abcdefghijklmnopqrstuvwxyz") =' \ + '23793ad900ef12f9165c8080da6fdfd2c8354a2929b8aadf83aa82a3c6470342f57cf8c035ec0d97429b626c4d94f28632c8f5134fd367dca5cf293d2ec13f8c' >> ${.TARGET} + @echo 'SKEIN512 ("ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789") =' \ + '0c6bed927e022f5ddcf81877d42e5f75798a9f8fd3ede3d83baac0a2f364b082e036c11af35fe478745459dd8f5c0b73efe3c56ba5bb2009208d5a29cc6e469c' >> ${.TARGET} + @echo 'SKEIN512 ("12345678901234567890123456789012345678901234567890123456789012345678901234567890") =' \ + '2ca9fcffb3456f297d1b5f407014ecb856f0baac8eb540f534b1f187196f21e88f31103128c2f03fcc9857d7a58eb66f9525e2302d88833ee069295537a434ce' >> ${.TARGET} + +skein1024.ref: + echo 'SKEIN1024 test suite:' > ${.TARGET} + @echo 'SKEIN1024 ("") =' \ + '0fff9563bb3279289227ac77d319b6fff8d7e9f09da1247b72a0a265cd6d2a62645ad547ed8193db48cff847c06494a03f55666d3b47eb4c20456c9373c86297d630d5578ebd34cb40991578f9f52b18003efa35d3da6553ff35db91b81ab890bec1b189b7f52cb2a783ebb7d823d725b0b4a71f6824e88f68f982eefc6d19c6' >> ${.TARGET} + @echo 'SKEIN1024 ("abc") =' \ + '35a599a0f91abcdb4cb73c19b8cb8d947742d82c309137a7caed29e8e0a2ca7a9ff9a90c34c1908cc7e7fd99bb15032fb86e76df21b72628399b5f7c3cc209d7bb31c99cd4e19465622a049afbb87c03b5ce3888d17e6e667279ec0aa9b3e2712624c01b5f5bbe1a564220bdcf6990af0c2539019f313fdd7406cca3892a1f1f' >> ${.TARGET} + @echo 'SKEIN1024 ("message digest") =' \ + 'ea891f5268acd0fac97467fc1aa89d1ce8681a9992a42540e53babee861483110c2d16f49e73bac27653ff173003e40cfb08516cd34262e6af95a5d8645c9c1abb3e813604d508b8511b30f9a5c1b352aa0791c7d2f27b2706dccea54bc7de6555b5202351751c3299f97c09cf89c40f67187e2521c0fad82b30edbb224f0458' >> ${.TARGET} + @echo 'SKEIN1024 ("abcdefghijklmnopqrstuvwxyz") =' \ + 'f23d95c2a25fbcd0e797cd058fec39d3c52d2b5afd7a9af1df934e63257d1d3dcf3246e7329c0f1104c1e51e3d22e300507b0c3b9f985bb1f645ef49835080536becf83788e17fed09c9982ba65c3cb7ffe6a5f745b911c506962adf226e435c42f6f6bc08d288f9c810e807e3216ef444f3db22744441deefa4900982a1371f' >> ${.TARGET} + @echo 'SKEIN1024 ("ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789") =' \ + 'cf3889e8a8d11bfd3938055d7d061437962bc5eac8ae83b1b71c94be201b8cf657fdbfc38674997a008c0c903f56a23feb3ae30e012377f1cfa080a9ca7fe8b96138662653fb3335c7d06595bf8baf65e215307532094cfdfa056bd8052ab792a3944a2adaa47b30335b8badb8fe9eb94fe329cdca04e58bbc530f0af709f469' >> ${.TARGET} + @echo 'SKEIN1024 ("12345678901234567890123456789012345678901234567890123456789012345678901234567890") =' \ + 'cf21a613620e6c119eca31fdfaad449a8e02f95ca256c21d2a105f8e4157048f9fe1e897893ea18b64e0e37cb07d5ac947f27ba544caf7cbc1ad094e675aed77a366270f7eb7f46543bccfa61c526fd628408058ed00ed566ac35a9761d002e629c4fb0d430b2f4ad016fcc49c44d2981c4002da0eecc42144160e2eaea4855a' >> ${.TARGET} + +.include <bsd.test.mk> diff --git a/lib/libmd/tests/mddriver.c b/lib/libmd/tests/mddriver.c new file mode 100644 index 000000000000..1229a4e2929f --- /dev/null +++ b/lib/libmd/tests/mddriver.c @@ -0,0 +1,66 @@ +/* MDDRIVER.C - test driver for MD2, MD4 and MD5 */ + +/* Copyright (C) 1990-2, RSA Data Security, Inc. Created 1990. All rights + * reserved. + * + * RSA Data Security, Inc. makes no representations concerning either the + * merchantability of this software or the suitability of this software for + * any particular purpose. It is provided "as is" without express or implied + * warranty of any kind. + * + * These notices must be retained in any copies of any part of this + * documentation and/or software. */ + +#include <sys/types.h> + +#include <stdio.h> +#include <time.h> +#include <string.h> + +/* The following makes MD default to MD5 if it has not already been defined + * with C compiler flags. */ +#ifndef MD +#define MD 5 +#endif + +#if MD == 2 +#include "md2.h" +#define MDData MD2Data +#endif +#if MD == 4 +#include "md4.h" +#define MDData MD4Data +#endif +#if MD == 5 +#include "md5.h" +#define MDData MD5Data +#endif + +/* Digests a string and prints the result. */ +static void +MDString(const char *string) +{ + char buf[33]; + + printf("MD%d (\"%s\") = %s\n", + MD, string, MDData(string, strlen(string), buf)); +} + +/* Digests a reference suite of strings and prints the results. */ +int +main(void) +{ + printf("MD%d test suite:\n", MD); + + MDString(""); + MDString("a"); + MDString("abc"); + MDString("message digest"); + MDString("abcdefghijklmnopqrstuvwxyz"); + MDString("ABCDEFGHIJKLMNOPQRSTUVWXYZ" + "abcdefghijklmnopqrstuvwxyz0123456789"); + MDString("1234567890123456789012345678901234567890" + "1234567890123456789012345678901234567890"); + + return 0; +} diff --git a/lib/libmd/tests/rmddriver.c b/lib/libmd/tests/rmddriver.c new file mode 100644 index 000000000000..50227dcffb5d --- /dev/null +++ b/lib/libmd/tests/rmddriver.c @@ -0,0 +1,48 @@ +/* RIPEMD160DRIVER.C - test driver for RIPEMD160 */ + +/* Copyright (C) 1990-2, RSA Data Security, Inc. Created 1990. All rights + * reserved. + * + * RSA Data Security, Inc. makes no representations concerning either the + * merchantability of this software or the suitability of this software for + * any particular purpose. It is provided "as is" without express or implied + * warranty of any kind. + * + * These notices must be retained in any copies of any part of this + * documentation and/or software. */ + +#include <sys/types.h> + +#include <stdio.h> +#include <time.h> +#include <string.h> + +#include "ripemd.h" + +/* Digests a string and prints the result. */ +static void +RIPEMD160String(const char *string) +{ + char buf[2*20 + 1]; + + printf("RIPEMD160 (\"%s\") = %s\n", + string, RIPEMD160_Data(string, strlen(string), buf)); +} + +/* Digests a reference suite of strings and prints the results. */ +int +main(void) +{ + printf("RIPEMD160 test suite:\n"); + + RIPEMD160String(""); + RIPEMD160String("abc"); + RIPEMD160String("message digest"); + RIPEMD160String("abcdefghijklmnopqrstuvwxyz"); + RIPEMD160String("ABCDEFGHIJKLMNOPQRSTUVWXYZ" + "abcdefghijklmnopqrstuvwxyz0123456789"); + RIPEMD160String("1234567890123456789012345678901234567890" + "1234567890123456789012345678901234567890"); + + return 0; +} diff --git a/lib/libmd/tests/shadriver.c b/lib/libmd/tests/shadriver.c new file mode 100644 index 000000000000..85cb21185bc8 --- /dev/null +++ b/lib/libmd/tests/shadriver.c @@ -0,0 +1,85 @@ +/* SHADRIVER.C - test driver for SHA-1 (and SHA-2) */ + +/*- + * SPDX-License-Identifier: RSA-MD + * + * Copyright (C) 1990-2, RSA Data Security, Inc. Created 1990. All rights + * reserved. + * + * RSA Data Security, Inc. makes no representations concerning either the + * merchantability of this software or the suitability of this software for + * any particular purpose. It is provided "as is" without express or implied + * warranty of any kind. + * + * These notices must be retained in any copies of any part of this + * documentation and/or software. */ + +#include <sys/types.h> + +#include <stdio.h> +#include <time.h> +#include <string.h> + +#include "sha.h" +#include "sha224.h" +#include "sha256.h" +#include "sha384.h" +#include "sha512.h" +#include "sha512t.h" + +/* The following makes SHA default to SHA-1 if it has not already been + * defined with C compiler flags. */ +#ifndef SHA +#define SHA 1 +#endif + +#if SHA == 1 +#undef SHA_Data +#define SHA_Data SHA1_Data +#elif SHA == 224 +#undef SHA_Data +#define SHA_Data SHA224_Data +#elif SHA == 256 +#undef SHA_Data +#define SHA_Data SHA256_Data +#elif SHA == 384 +#undef SHA_Data +#define SHA_Data SHA384_Data +#elif SHA == 512 +#undef SHA_Data +#define SHA_Data SHA512_Data +#elif SHA == 512224 +#undef SHA_Data +#define SHA_Data SHA512_224_Data +#elif SHA == 512256 +#undef SHA_Data +#define SHA_Data SHA512_256_Data +#endif + +/* Digests a string and prints the result. */ +static void +SHAString(const char *string) +{ + char buf[2*64 + 1]; + + printf("SHA-%d (\"%s\") = %s\n", + SHA, string, SHA_Data(string, strlen(string), buf)); +} + +/* Digests a reference suite of strings and prints the results. */ +int +main(void) +{ + printf("SHA-%d test suite:\n", SHA); + + SHAString(""); + SHAString("abc"); + SHAString("message digest"); + SHAString("abcdefghijklmnopqrstuvwxyz"); + SHAString("ABCDEFGHIJKLMNOPQRSTUVWXYZ" + "abcdefghijklmnopqrstuvwxyz0123456789"); + SHAString("1234567890123456789012345678901234567890" + "1234567890123456789012345678901234567890"); + + return 0; +} diff --git a/lib/libmd/tests/skeindriver.c b/lib/libmd/tests/skeindriver.c new file mode 100644 index 000000000000..a28ac8fc2c2a --- /dev/null +++ b/lib/libmd/tests/skeindriver.c @@ -0,0 +1,68 @@ +/* SKEINDRIVER.C - test driver for SKEIN */ + +/*- + * SPDX-License-Identifier: RSA-MD + * + * Copyright (C) 1990-2, RSA Data Security, Inc. Created 1990. All rights + * reserved. + * + * RSA Data Security, Inc. makes no representations concerning either the + * merchantability of this software or the suitability of this software for + * any particular purpose. It is provided "as is" without express or implied + * warranty of any kind. + * + * These notices must be retained in any copies of any part of this + * documentation and/or software. */ + +#include <sys/types.h> + +#include <stdio.h> +#include <time.h> +#include <string.h> + +#include "skein.h" + +/* The following makes SKEIN default to SKEIN512 if it has not already been + * defined with C compiler flags. */ +#ifndef SKEIN +#define SKEIN 512 +#endif + +#if SKEIN == 256 +#undef SKEIN_Data +#define SKEIN_Data SKEIN256_Data +#elif SKEIN == 512 +#undef SKEIN_Data +#define SKEIN_Data SKEIN512_Data +#elif SKEIN == 1024 +#undef SKEIN_Data +#define SKEIN_Data SKEIN1024_Data +#endif + +/* Digests a string and prints the result. */ +static void +SKEINString(const char *string) +{ + char buf[2*128 + 1]; + + printf("SKEIN%d (\"%s\") = %s\n", + SKEIN, string, SKEIN_Data(string, strlen(string), buf)); +} + +/* Digests a reference suite of strings and prints the results. */ +int +main(void) +{ + printf("SKEIN%d test suite:\n", SKEIN); + + SKEINString(""); + SKEINString("abc"); + SKEINString("message digest"); + SKEINString("abcdefghijklmnopqrstuvwxyz"); + SKEINString("ABCDEFGHIJKLMNOPQRSTUVWXYZ" + "abcdefghijklmnopqrstuvwxyz0123456789"); + SKEINString("1234567890123456789012345678901234567890" + "1234567890123456789012345678901234567890"); + + return 0; +} |