diff options
Diffstat (limited to 'crypto/bn')
| -rw-r--r-- | crypto/bn/asm/armv4-gf2m.pl | 4 | ||||
| -rw-r--r-- | crypto/bn/asm/rsaz-2k-avxifma.pl | 105 | ||||
| -rw-r--r-- | crypto/bn/asm/rsaz-3k-avxifma.pl | 41 | ||||
| -rw-r--r-- | crypto/bn/asm/rsaz-4k-avxifma.pl | 41 | ||||
| -rw-r--r-- | crypto/bn/asm/sparcv9-mont.pl | 4 | ||||
| -rw-r--r-- | crypto/bn/bn_dh.c | 64 | ||||
| -rw-r--r-- | crypto/bn/bn_exp.c | 34 | ||||
| -rw-r--r-- | crypto/bn/bn_local.h | 4 | ||||
| -rw-r--r-- | crypto/bn/bn_mont.c | 9 | ||||
| -rw-r--r-- | crypto/bn/bn_ppc.c | 10 | ||||
| -rw-r--r-- | crypto/bn/bn_prime.c | 4 | ||||
| -rw-r--r-- | crypto/bn/bn_rsa_fips186_4.c | 4 | ||||
| -rw-r--r-- | crypto/bn/bn_sparc.c | 16 |
13 files changed, 254 insertions, 86 deletions
diff --git a/crypto/bn/asm/armv4-gf2m.pl b/crypto/bn/asm/armv4-gf2m.pl index 5733a0174660..923e6d4464d2 100644 --- a/crypto/bn/asm/armv4-gf2m.pl +++ b/crypto/bn/asm/armv4-gf2m.pl @@ -1,5 +1,5 @@ #! /usr/bin/env perl -# Copyright 2011-2023 The OpenSSL Project Authors. All Rights Reserved. +# Copyright 2011-2026 The OpenSSL Project Authors. All Rights Reserved. # # Licensed under the Apache License 2.0 (the "License"). You may not use # this file except in compliance with the License. You can obtain a copy @@ -37,7 +37,7 @@ # Câmara, D.; Gouvêa, C. P. L.; López, J. & Dahab, R.: Fast Software # Polynomial Multiplication on ARM Processors using the NEON Engine. # -# http://conradoplg.cryptoland.net/files/2010/12/mocrysen13.pdf +# https://conradoplg.modp.net/files/2010/12/mocrysen13.pdf # $output is the last argument if it looks like a file (it has an extension) # $flavour is the first argument if it doesn't look like a file diff --git a/crypto/bn/asm/rsaz-2k-avxifma.pl b/crypto/bn/asm/rsaz-2k-avxifma.pl index ea45d2051a70..b84a3e4f1954 100644 --- a/crypto/bn/asm/rsaz-2k-avxifma.pl +++ b/crypto/bn/asm/rsaz-2k-avxifma.pl @@ -1,4 +1,4 @@ -# Copyright 2024-2025 The OpenSSL Project Authors. All Rights Reserved. +# Copyright 2024-2026 The OpenSSL Project Authors. All Rights Reserved. # Copyright (c) 2024, Intel Corporation. All Rights Reserved. # # Licensed under the Apache License 2.0 (the "License"). You may not use @@ -362,6 +362,23 @@ ossl_rsaz_amm52x20_x1_avxifma256: .cfi_push %r14 push %r15 .cfi_push %r15 +___ +$code.=<<___ if ($win64); + push %rsi # save non-volatile registers + push %rdi + lea -168(%rsp), %rsp # 16*10 + (8 bytes to get correct 16-byte SIMD alignment) + vmovapd %xmm6, `16*0`(%rsp) + vmovapd %xmm7, `16*1`(%rsp) + vmovapd %xmm8, `16*2`(%rsp) + vmovapd %xmm9, `16*3`(%rsp) + vmovapd %xmm10, `16*4`(%rsp) + vmovapd %xmm11, `16*5`(%rsp) + vmovapd %xmm12, `16*6`(%rsp) + vmovapd %xmm13, `16*7`(%rsp) + vmovapd %xmm14, `16*8`(%rsp) + vmovapd %xmm15, `16*9`(%rsp) +___ +$code.=<<___; .Lossl_rsaz_amm52x20_x1_avxifma256_body: # Zeroing accumulators @@ -401,6 +418,23 @@ $code.=<<___; vmovdqu $R2_0, `4*32`($res) vzeroupper +___ +$code.=<<___ if ($win64); + vmovapd `16*0`(%rsp), %xmm6 + vmovapd `16*1`(%rsp), %xmm7 + vmovapd `16*2`(%rsp), %xmm8 + vmovapd `16*3`(%rsp), %xmm9 + vmovapd `16*4`(%rsp), %xmm10 + vmovapd `16*5`(%rsp), %xmm11 + vmovapd `16*6`(%rsp), %xmm12 + vmovapd `16*7`(%rsp), %xmm13 + vmovapd `16*8`(%rsp), %xmm14 + vmovapd `16*9`(%rsp), %xmm15 + lea 168(%rsp), %rsp + pop %rdi + pop %rsi +___ +$code.=<<___; mov 0(%rsp),%r15 .cfi_restore %r15 mov 8(%rsp),%r14 @@ -553,6 +587,23 @@ ossl_rsaz_amm52x20_x2_avxifma256: .cfi_push %r14 push %r15 .cfi_push %r15 +___ +$code.=<<___ if ($win64); + push %rsi # save non-volatile registers + push %rdi + lea -168(%rsp), %rsp # 16*10 + (8 bytes to get correct 16-byte SIMD alignment) + vmovapd %xmm6, `16*0`(%rsp) + vmovapd %xmm7, `16*1`(%rsp) + vmovapd %xmm8, `16*2`(%rsp) + vmovapd %xmm9, `16*3`(%rsp) + vmovapd %xmm10, `16*4`(%rsp) + vmovapd %xmm11, `16*5`(%rsp) + vmovapd %xmm12, `16*6`(%rsp) + vmovapd %xmm13, `16*7`(%rsp) + vmovapd %xmm14, `16*8`(%rsp) + vmovapd %xmm15, `16*9`(%rsp) +___ +$code.=<<___; .Lossl_rsaz_amm52x20_x2_avxifma256_body: # Zeroing accumulators @@ -604,6 +655,23 @@ $code.=<<___; vmovdqu $R2_1, `9*32`($res) vzeroupper +___ +$code.=<<___ if ($win64); + vmovapd `16*0`(%rsp), %xmm6 + vmovapd `16*1`(%rsp), %xmm7 + vmovapd `16*2`(%rsp), %xmm8 + vmovapd `16*3`(%rsp), %xmm9 + vmovapd `16*4`(%rsp), %xmm10 + vmovapd `16*5`(%rsp), %xmm11 + vmovapd `16*6`(%rsp), %xmm12 + vmovapd `16*7`(%rsp), %xmm13 + vmovapd `16*8`(%rsp), %xmm14 + vmovapd `16*9`(%rsp), %xmm15 + lea 168(%rsp), %rsp + pop %rdi + pop %rsi +___ +$code.=<<___; mov 0(%rsp),%r15 .cfi_restore %r15 mov 8(%rsp),%r14 @@ -663,6 +731,23 @@ $code.=<<___; ossl_extract_multiplier_2x20_win5_avx: .cfi_startproc endbranch +___ +$code.=<<___ if ($win64); + push %rsi # save non-volatile registers + push %rdi + lea -168(%rsp), %rsp # 16*10 + (8 bytes to get correct 16-byte SIMD alignment) + vmovapd %xmm6, `16*0`(%rsp) + vmovapd %xmm7, `16*1`(%rsp) + vmovapd %xmm8, `16*2`(%rsp) + vmovapd %xmm9, `16*3`(%rsp) + vmovapd %xmm10, `16*4`(%rsp) + vmovapd %xmm11, `16*5`(%rsp) + vmovapd %xmm12, `16*6`(%rsp) + vmovapd %xmm13, `16*7`(%rsp) + vmovapd %xmm14, `16*8`(%rsp) + vmovapd %xmm15, `16*9`(%rsp) +___ +$code.=<<___; vmovapd .Lones(%rip), $ones # broadcast ones vmovq $red_tbl_idx1, $tmp_xmm vpbroadcastq $tmp_xmm, $idx1 @@ -709,6 +794,24 @@ foreach (0..9) { $code.="vmovdqu $t[$_], `${_}*32`($out) \n"; } $code.=<<___; + vzeroupper +___ +$code.=<<___ if ($win64); + vmovapd `16*0`(%rsp), %xmm6 + vmovapd `16*1`(%rsp), %xmm7 + vmovapd `16*2`(%rsp), %xmm8 + vmovapd `16*3`(%rsp), %xmm9 + vmovapd `16*4`(%rsp), %xmm10 + vmovapd `16*5`(%rsp), %xmm11 + vmovapd `16*6`(%rsp), %xmm12 + vmovapd `16*7`(%rsp), %xmm13 + vmovapd `16*8`(%rsp), %xmm14 + vmovapd `16*9`(%rsp), %xmm15 + lea 168(%rsp), %rsp + pop %rdi + pop %rsi +___ +$code.=<<___; ret .cfi_endproc .size ossl_extract_multiplier_2x20_win5_avx, .-ossl_extract_multiplier_2x20_win5_avx diff --git a/crypto/bn/asm/rsaz-3k-avxifma.pl b/crypto/bn/asm/rsaz-3k-avxifma.pl index a19cb5aaa309..1948d726b38b 100644 --- a/crypto/bn/asm/rsaz-3k-avxifma.pl +++ b/crypto/bn/asm/rsaz-3k-avxifma.pl @@ -1,4 +1,4 @@ -# Copyright 2024-2025 The OpenSSL Project Authors. All Rights Reserved. +# Copyright 2024-2026 The OpenSSL Project Authors. All Rights Reserved. # Copyright (c) 2024, Intel Corporation. All Rights Reserved. # # Licensed under the Apache License 2.0 (the "License"). You may not use @@ -87,8 +87,6 @@ my ($res,$a,$b,$m,$k0) = @_6_args_universal_ABI; my $mask52 = "%rax"; my $acc0_0 = "%r9"; my $acc0_0_low = "%r9d"; -my $acc0_1 = "%r15"; -my $acc0_1_low = "%r15d"; my $b_ptr = "%r11"; my $iter = "%ebx"; @@ -741,7 +739,7 @@ $code.=<<___; vmovdqu $R3_0, `6*32`($res) vmovdqu $R3_0h, `7*32`($res) - xorl $acc0_1_low, $acc0_1_low + xorl $acc0_0_low, $acc0_0_low lea 16($b_ptr), $b_ptr movq \$0xfffffffffffff, $mask52 # 52-bit mask @@ -857,6 +855,23 @@ $code.=<<___; ossl_extract_multiplier_2x30_win5_avx: .cfi_startproc endbranch +___ +$code.=<<___ if ($win64); + push %rsi # save non-volatile registers + push %rdi + lea -168(%rsp), %rsp # 16*10 + (8 bytes to get correct 16-byte SIMD alignment) + vmovapd %xmm6, `16*0`(%rsp) + vmovapd %xmm7, `16*1`(%rsp) + vmovapd %xmm8, `16*2`(%rsp) + vmovapd %xmm9, `16*3`(%rsp) + vmovapd %xmm10, `16*4`(%rsp) + vmovapd %xmm11, `16*5`(%rsp) + vmovapd %xmm12, `16*6`(%rsp) + vmovapd %xmm13, `16*7`(%rsp) + vmovapd %xmm14, `16*8`(%rsp) + vmovapd %xmm15, `16*9`(%rsp) +___ +$code.=<<___; vmovapd .Lones(%rip), $ones # broadcast ones vmovq $red_tbl_idx1, $tmp_xmm vpbroadcastq $tmp_xmm, $idx1 @@ -930,6 +945,24 @@ foreach (8..15) { $code.="vmovdqu $t[$_], `${_}*32`($out) \n"; } +$code.=<<___; + vzeroupper +___ +$code.=<<___ if ($win64); + vmovapd `16*0`(%rsp), %xmm6 + vmovapd `16*1`(%rsp), %xmm7 + vmovapd `16*2`(%rsp), %xmm8 + vmovapd `16*3`(%rsp), %xmm9 + vmovapd `16*4`(%rsp), %xmm10 + vmovapd `16*5`(%rsp), %xmm11 + vmovapd `16*6`(%rsp), %xmm12 + vmovapd `16*7`(%rsp), %xmm13 + vmovapd `16*8`(%rsp), %xmm14 + vmovapd `16*9`(%rsp), %xmm15 + lea 168(%rsp), %rsp + pop %rdi + pop %rsi +___ $code.=<<___; diff --git a/crypto/bn/asm/rsaz-4k-avxifma.pl b/crypto/bn/asm/rsaz-4k-avxifma.pl index f15e2d74118c..9f299430cefc 100644 --- a/crypto/bn/asm/rsaz-4k-avxifma.pl +++ b/crypto/bn/asm/rsaz-4k-avxifma.pl @@ -1,4 +1,4 @@ -# Copyright 2024-2025 The OpenSSL Project Authors. All Rights Reserved. +# Copyright 2024-2026 The OpenSSL Project Authors. All Rights Reserved. # Copyright (c) 2024, Intel Corporation. All Rights Reserved. # # Licensed under the Apache License 2.0 (the "License"). You may not use @@ -84,8 +84,6 @@ my ($res,$a,$b,$m,$k0) = @_6_args_universal_ABI; my $mask52 = "%rax"; my $acc0_0 = "%r9"; my $acc0_0_low = "%r9d"; -my $acc0_1 = "%r15"; -my $acc0_1_low = "%r15d"; my $b_ptr = "%r11"; my $iter = "%ebx"; @@ -834,7 +832,7 @@ $code.=<<___; vmovdqu $R4_0, `8*32`($res) vmovdqu $R4_0h, `9*32`($res) - xorl $acc0_1_low, $acc0_1_low + xorl $acc0_0_low, $acc0_0_low movq \$0xfffffffffffff, $mask52 @@ -975,6 +973,23 @@ $code.=<<___; ossl_extract_multiplier_2x40_win5_avx: .cfi_startproc endbranch +___ +$code.=<<___ if ($win64); + push %rsi # save non-volatile registers + push %rdi + lea -168(%rsp), %rsp # 16*10 + (8 bytes to get correct 16-byte SIMD alignment) + vmovapd %xmm6, `16*0`(%rsp) + vmovapd %xmm7, `16*1`(%rsp) + vmovapd %xmm8, `16*2`(%rsp) + vmovapd %xmm9, `16*3`(%rsp) + vmovapd %xmm10, `16*4`(%rsp) + vmovapd %xmm11, `16*5`(%rsp) + vmovapd %xmm12, `16*6`(%rsp) + vmovapd %xmm13, `16*7`(%rsp) + vmovapd %xmm14, `16*8`(%rsp) + vmovapd %xmm15, `16*9`(%rsp) +___ +$code.=<<___; vmovapd .Lones(%rip), $ones # broadcast ones vmovq $red_tbl_idx1, $tmp_xmm vpbroadcastq $tmp_xmm, $idx1 @@ -1002,6 +1017,24 @@ foreach (0..9) { $code.="vmovdqu $t[$_], `(10+$_)*32`($out) \n"; } $code.=<<___; + vzeroupper +___ +$code.=<<___ if ($win64); + vmovapd `16*0`(%rsp), %xmm6 + vmovapd `16*1`(%rsp), %xmm7 + vmovapd `16*2`(%rsp), %xmm8 + vmovapd `16*3`(%rsp), %xmm9 + vmovapd `16*4`(%rsp), %xmm10 + vmovapd `16*5`(%rsp), %xmm11 + vmovapd `16*6`(%rsp), %xmm12 + vmovapd `16*7`(%rsp), %xmm13 + vmovapd `16*8`(%rsp), %xmm14 + vmovapd `16*9`(%rsp), %xmm15 + lea 168(%rsp), %rsp + pop %rdi + pop %rsi +___ +$code.=<<___; ret .cfi_endproc diff --git a/crypto/bn/asm/sparcv9-mont.pl b/crypto/bn/asm/sparcv9-mont.pl index fe51fcaf81c7..d438af562648 100644 --- a/crypto/bn/asm/sparcv9-mont.pl +++ b/crypto/bn/asm/sparcv9-mont.pl @@ -1,5 +1,5 @@ #! /usr/bin/env perl -# Copyright 2005-2021 The OpenSSL Project Authors. All Rights Reserved. +# Copyright 2005-2026 The OpenSSL Project Authors. All Rights Reserved. # # Licensed under the Apache License 2.0 (the "License"). You may not use # this file except in compliance with the License. You can obtain a copy @@ -394,11 +394,11 @@ $code.=<<___; mulx $car1,$mul1,$car1 mulx $npj,$mul1,$acc1 + add $tmp1,$car0,$car0 add $tmp0,$car1,$car1 and $car0,$mask,$acc0 ld [$np+8],$npj ! np[2] srlx $car1,32,$car1 - add $tmp1,$car1,$car1 srlx $car0,32,$car0 add $acc0,$car1,$car1 and $car0,1,$sbit diff --git a/crypto/bn/bn_dh.c b/crypto/bn/bn_dh.c index 542c33d6a8d7..d069481e981b 100644 --- a/crypto/bn/bn_dh.c +++ b/crypto/bn/bn_dh.c @@ -1,5 +1,5 @@ /* - * Copyright 2014-2021 The OpenSSL Project Authors. All Rights Reserved. + * Copyright 2014-2026 The OpenSSL Project Authors. All Rights Reserved. * * Licensed under the Apache License 2.0 (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy @@ -14,7 +14,7 @@ #include "crypto/bn_dh.h" #if BN_BITS2 == 64 -#define BN_DEF(lo, hi) (BN_ULONG) hi << 32 | lo +#define BN_DEF(lo, hi) (BN_ULONG)hi << 32 | lo #else #define BN_DEF(lo, hi) lo, hi #endif @@ -1387,37 +1387,37 @@ const BIGNUM ossl_bignum_const_2 = { }; make_dh_bn(dh1024_160_p) - make_dh_bn(dh1024_160_q) - make_dh_bn(dh1024_160_g) - make_dh_bn(dh2048_224_p) - make_dh_bn(dh2048_224_q) - make_dh_bn(dh2048_224_g) - make_dh_bn(dh2048_256_p) - make_dh_bn(dh2048_256_q) - make_dh_bn(dh2048_256_g) +make_dh_bn(dh1024_160_q) +make_dh_bn(dh1024_160_g) +make_dh_bn(dh2048_224_p) +make_dh_bn(dh2048_224_q) +make_dh_bn(dh2048_224_g) +make_dh_bn(dh2048_256_p) +make_dh_bn(dh2048_256_q) +make_dh_bn(dh2048_256_g) - make_dh_bn(ffdhe2048_p) - make_dh_bn(ffdhe2048_q) - make_dh_bn(ffdhe3072_p) - make_dh_bn(ffdhe3072_q) - make_dh_bn(ffdhe4096_p) - make_dh_bn(ffdhe4096_q) - make_dh_bn(ffdhe6144_p) - make_dh_bn(ffdhe6144_q) - make_dh_bn(ffdhe8192_p) - make_dh_bn(ffdhe8192_q) +make_dh_bn(ffdhe2048_p) +make_dh_bn(ffdhe2048_q) +make_dh_bn(ffdhe3072_p) +make_dh_bn(ffdhe3072_q) +make_dh_bn(ffdhe4096_p) +make_dh_bn(ffdhe4096_q) +make_dh_bn(ffdhe6144_p) +make_dh_bn(ffdhe6144_q) +make_dh_bn(ffdhe8192_p) +make_dh_bn(ffdhe8192_q) #ifndef FIPS_MODULE - make_dh_bn(modp_1536_p) - make_dh_bn(modp_1536_q) +make_dh_bn(modp_1536_p) +make_dh_bn(modp_1536_q) #endif - make_dh_bn(modp_2048_p) - make_dh_bn(modp_2048_q) - make_dh_bn(modp_3072_p) - make_dh_bn(modp_3072_q) - make_dh_bn(modp_4096_p) - make_dh_bn(modp_4096_q) - make_dh_bn(modp_6144_p) - make_dh_bn(modp_6144_q) - make_dh_bn(modp_8192_p) - make_dh_bn(modp_8192_q) +make_dh_bn(modp_2048_p) +make_dh_bn(modp_2048_q) +make_dh_bn(modp_3072_p) +make_dh_bn(modp_3072_q) +make_dh_bn(modp_4096_p) +make_dh_bn(modp_4096_q) +make_dh_bn(modp_6144_p) +make_dh_bn(modp_6144_q) +make_dh_bn(modp_8192_p) +make_dh_bn(modp_8192_q) diff --git a/crypto/bn/bn_exp.c b/crypto/bn/bn_exp.c index 58d38b9ebd2a..44931f803802 100644 --- a/crypto/bn/bn_exp.c +++ b/crypto/bn/bn_exp.c @@ -1,5 +1,5 @@ /* - * Copyright 1995-2025 The OpenSSL Project Authors. All Rights Reserved. + * Copyright 1995-2026 The OpenSSL Project Authors. All Rights Reserved. * * Licensed under the Apache License 2.0 (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy @@ -771,16 +771,16 @@ int bn_mod_exp_mont_fixed_top(BIGNUM *rr, const BIGNUM *a, const BIGNUM *p, typedef int (*bn_pwr5_mont_f)(BN_ULONG *tp, const BN_ULONG *np, const BN_ULONG *n0, const void *table, int power, int bits); - int bn_pwr5_mont_t4_8(BN_ULONG * tp, const BN_ULONG *np, + int bn_pwr5_mont_t4_8(BN_ULONG *tp, const BN_ULONG *np, const BN_ULONG *n0, const void *table, int power, int bits); - int bn_pwr5_mont_t4_16(BN_ULONG * tp, const BN_ULONG *np, + int bn_pwr5_mont_t4_16(BN_ULONG *tp, const BN_ULONG *np, const BN_ULONG *n0, const void *table, int power, int bits); - int bn_pwr5_mont_t4_24(BN_ULONG * tp, const BN_ULONG *np, + int bn_pwr5_mont_t4_24(BN_ULONG *tp, const BN_ULONG *np, const BN_ULONG *n0, const void *table, int power, int bits); - int bn_pwr5_mont_t4_32(BN_ULONG * tp, const BN_ULONG *np, + int bn_pwr5_mont_t4_32(BN_ULONG *tp, const BN_ULONG *np, const BN_ULONG *n0, const void *table, int power, int bits); static const bn_pwr5_mont_f pwr5_funcs[4] = { @@ -792,15 +792,15 @@ int bn_mod_exp_mont_fixed_top(BIGNUM *rr, const BIGNUM *a, const BIGNUM *p, typedef int (*bn_mul_mont_f)(BN_ULONG *rp, const BN_ULONG *ap, const void *bp, const BN_ULONG *np, const BN_ULONG *n0); - int bn_mul_mont_t4_8(BN_ULONG * rp, const BN_ULONG *ap, const void *bp, + int bn_mul_mont_t4_8(BN_ULONG *rp, const BN_ULONG *ap, const void *bp, const BN_ULONG *np, const BN_ULONG *n0); - int bn_mul_mont_t4_16(BN_ULONG * rp, const BN_ULONG *ap, + int bn_mul_mont_t4_16(BN_ULONG *rp, const BN_ULONG *ap, const void *bp, const BN_ULONG *np, const BN_ULONG *n0); - int bn_mul_mont_t4_24(BN_ULONG * rp, const BN_ULONG *ap, + int bn_mul_mont_t4_24(BN_ULONG *rp, const BN_ULONG *ap, const void *bp, const BN_ULONG *np, const BN_ULONG *n0); - int bn_mul_mont_t4_32(BN_ULONG * rp, const BN_ULONG *ap, + int bn_mul_mont_t4_32(BN_ULONG *rp, const BN_ULONG *ap, const void *bp, const BN_ULONG *np, const BN_ULONG *n0); static const bn_mul_mont_f mul_funcs[4] = { @@ -809,20 +809,20 @@ int bn_mod_exp_mont_fixed_top(BIGNUM *rr, const BIGNUM *a, const BIGNUM *p, }; bn_mul_mont_f mul_worker = mul_funcs[top / 16 - 1]; - void bn_mul_mont_vis3(BN_ULONG * rp, const BN_ULONG *ap, + void bn_mul_mont_vis3(BN_ULONG *rp, const BN_ULONG *ap, const void *bp, const BN_ULONG *np, const BN_ULONG *n0, int num); - void bn_mul_mont_t4(BN_ULONG * rp, const BN_ULONG *ap, + void bn_mul_mont_t4(BN_ULONG *rp, const BN_ULONG *ap, const void *bp, const BN_ULONG *np, const BN_ULONG *n0, int num); - void bn_mul_mont_gather5_t4(BN_ULONG * rp, const BN_ULONG *ap, + void bn_mul_mont_gather5_t4(BN_ULONG *rp, const BN_ULONG *ap, const void *table, const BN_ULONG *np, const BN_ULONG *n0, int num, int power); void bn_flip_n_scatter5_t4(const BN_ULONG *inp, size_t num, void *table, size_t power); - void bn_gather5_t4(BN_ULONG * out, size_t num, + void bn_gather5_t4(BN_ULONG *out, size_t num, void *table, size_t power); - void bn_flip_t4(BN_ULONG * dst, BN_ULONG * src, size_t num); + void bn_flip_t4(BN_ULONG *dst, BN_ULONG *src, size_t num); BN_ULONG *np = mont->N.d, *n0 = mont->n0; int stride = 5 * (6 - (top / 16 - 1)); /* multiple of 5, but less @@ -922,13 +922,13 @@ int bn_mod_exp_mont_fixed_top(BIGNUM *rr, const BIGNUM *a, const BIGNUM *p, * Given those inputs, |bn_mul_mont| may not give reduced * output, but it will still produce "almost" reduced output. */ - void bn_mul_mont_gather5(BN_ULONG * rp, const BN_ULONG *ap, + void bn_mul_mont_gather5(BN_ULONG *rp, const BN_ULONG *ap, const void *table, const BN_ULONG *np, const BN_ULONG *n0, int num, int power); void bn_scatter5(const BN_ULONG *inp, size_t num, void *table, size_t power); - void bn_gather5(BN_ULONG * out, size_t num, void *table, size_t power); - void bn_power5(BN_ULONG * rp, const BN_ULONG *ap, + void bn_gather5(BN_ULONG *out, size_t num, void *table, size_t power); + void bn_power5(BN_ULONG *rp, const BN_ULONG *ap, const void *table, const BN_ULONG *np, const BN_ULONG *n0, int num, int power); int bn_get_bits5(const BN_ULONG *ap, int off); diff --git a/crypto/bn/bn_local.h b/crypto/bn/bn_local.h index 45b545bce3dd..9ebadfd76e79 100644 --- a/crypto/bn/bn_local.h +++ b/crypto/bn/bn_local.h @@ -1,5 +1,5 @@ /* - * Copyright 1995-2023 The OpenSSL Project Authors. All Rights Reserved. + * Copyright 1995-2026 The OpenSSL Project Authors. All Rights Reserved. * * Licensed under the Apache License 2.0 (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy @@ -387,7 +387,7 @@ struct bn_gencb_st { #elif defined(__alpha) && (defined(SIXTY_FOUR_BIT_LONG) || defined(SIXTY_FOUR_BIT)) #if defined(__DECC) #include <c_asm.h> -#define BN_UMULT_HIGH(a, b) (BN_ULONG) asm("umulh %a0,%a1,%v0", (a), (b)) +#define BN_UMULT_HIGH(a, b) (BN_ULONG)asm("umulh %a0,%a1,%v0", (a), (b)) #elif defined(__GNUC__) && __GNUC__ >= 2 #define BN_UMULT_HIGH(a, b) ({ \ register BN_ULONG ret; \ diff --git a/crypto/bn/bn_mont.c b/crypto/bn/bn_mont.c index 1527ac10fb1b..72e935c52fb9 100644 --- a/crypto/bn/bn_mont.c +++ b/crypto/bn/bn_mont.c @@ -1,5 +1,5 @@ /* - * Copyright 1995-2024 The OpenSSL Project Authors. All Rights Reserved. + * Copyright 1995-2026 The OpenSSL Project Authors. All Rights Reserved. * * Licensed under the Apache License 2.0 (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy @@ -8,10 +8,9 @@ */ /* - * Details about Montgomery multiplication algorithms can be found at - * http://security.ece.orst.edu/publications.html, e.g. - * http://security.ece.orst.edu/koc/papers/j37acmon.pdf and - * sections 3.8 and 4.2 in http://security.ece.orst.edu/koc/papers/r01rsasw.pdf + * Details about Montgomery multiplication algorithms can be found in + * https://www.microsoft.com/en-us/research/wp-content/uploads/1996/01/j37acmon.pdf + * and https://cetinkayakoc.net/docs/r01.pdf */ #include "internal/cryptlib.h" diff --git a/crypto/bn/bn_ppc.c b/crypto/bn/bn_ppc.c index 049ffa50da98..8ef52a30c1a4 100644 --- a/crypto/bn/bn_ppc.c +++ b/crypto/bn/bn_ppc.c @@ -1,5 +1,5 @@ /* - * Copyright 2009-2025 The OpenSSL Project Authors. All Rights Reserved. + * Copyright 2009-2026 The OpenSSL Project Authors. All Rights Reserved. * * Licensed under the Apache License 2.0 (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy @@ -15,14 +15,14 @@ int bn_mul_mont(BN_ULONG *rp, const BN_ULONG *ap, const BN_ULONG *bp, const BN_ULONG *np, const BN_ULONG *n0, int num) { - int bn_mul_mont_int(BN_ULONG * rp, const BN_ULONG *ap, const BN_ULONG *bp, + int bn_mul_mont_int(BN_ULONG *rp, const BN_ULONG *ap, const BN_ULONG *bp, const BN_ULONG *np, const BN_ULONG *n0, int num); - int bn_mul4x_mont_int(BN_ULONG * rp, const BN_ULONG *ap, const BN_ULONG *bp, + int bn_mul4x_mont_int(BN_ULONG *rp, const BN_ULONG *ap, const BN_ULONG *bp, const BN_ULONG *np, const BN_ULONG *n0, int num); - int bn_mul_mont_fixed_n6(BN_ULONG * rp, const BN_ULONG *ap, + int bn_mul_mont_fixed_n6(BN_ULONG *rp, const BN_ULONG *ap, const BN_ULONG *bp, const BN_ULONG *np, const BN_ULONG *n0, int num); - int bn_mul_mont_300_fixed_n6(BN_ULONG * rp, const BN_ULONG *ap, + int bn_mul_mont_300_fixed_n6(BN_ULONG *rp, const BN_ULONG *ap, const BN_ULONG *bp, const BN_ULONG *np, const BN_ULONG *n0, int num); diff --git a/crypto/bn/bn_prime.c b/crypto/bn/bn_prime.c index 0c38e2ccd207..08f889e59799 100644 --- a/crypto/bn/bn_prime.c +++ b/crypto/bn/bn_prime.c @@ -1,5 +1,5 @@ /* - * Copyright 1995-2021 The OpenSSL Project Authors. All Rights Reserved. + * Copyright 1995-2026 The OpenSSL Project Authors. All Rights Reserved. * * Licensed under the Apache License 2.0 (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy @@ -30,7 +30,7 @@ static int bn_is_prime_int(const BIGNUM *w, int checks, BN_CTX *ctx, #define square(x) ((BN_ULONG)(x) * (BN_ULONG)(x)) #if BN_BITS2 == 64 -#define BN_DEF(lo, hi) (BN_ULONG) hi << 32 | lo +#define BN_DEF(lo, hi) (BN_ULONG)hi << 32 | lo #else #define BN_DEF(lo, hi) lo, hi #endif diff --git a/crypto/bn/bn_rsa_fips186_4.c b/crypto/bn/bn_rsa_fips186_4.c index c2574167d06d..08def7f13c98 100644 --- a/crypto/bn/bn_rsa_fips186_4.c +++ b/crypto/bn/bn_rsa_fips186_4.c @@ -1,5 +1,5 @@ /* - * Copyright 2018-2023 The OpenSSL Project Authors. All Rights Reserved. + * Copyright 2018-2026 The OpenSSL Project Authors. All Rights Reserved. * Copyright (c) 2018-2019, Oracle and/or its affiliates. All rights reserved. * * Licensed under the Apache License 2.0 (the "License"). You may not use @@ -29,7 +29,7 @@ #include "internal/nelem.h" #if BN_BITS2 == 64 -#define BN_DEF(lo, hi) (BN_ULONG) hi << 32 | lo +#define BN_DEF(lo, hi) (BN_ULONG)hi << 32 | lo #else #define BN_DEF(lo, hi) lo, hi #endif diff --git a/crypto/bn/bn_sparc.c b/crypto/bn/bn_sparc.c index a236e42dfae9..7c902191d773 100644 --- a/crypto/bn/bn_sparc.c +++ b/crypto/bn/bn_sparc.c @@ -1,5 +1,5 @@ /* - * Copyright 2005-2021 The OpenSSL Project Authors. All Rights Reserved. + * Copyright 2005-2026 The OpenSSL Project Authors. All Rights Reserved. * * Licensed under the Apache License 2.0 (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy @@ -16,11 +16,11 @@ int bn_mul_mont(BN_ULONG *rp, const BN_ULONG *ap, const BN_ULONG *bp, const BN_ULONG *np, const BN_ULONG *n0, int num) { - int bn_mul_mont_vis3(BN_ULONG * rp, const BN_ULONG *ap, const BN_ULONG *bp, + int bn_mul_mont_vis3(BN_ULONG *rp, const BN_ULONG *ap, const BN_ULONG *bp, const BN_ULONG *np, const BN_ULONG *n0, int num); - int bn_mul_mont_fpu(BN_ULONG * rp, const BN_ULONG *ap, const BN_ULONG *bp, + int bn_mul_mont_fpu(BN_ULONG *rp, const BN_ULONG *ap, const BN_ULONG *bp, const BN_ULONG *np, const BN_ULONG *n0, int num); - int bn_mul_mont_int(BN_ULONG * rp, const BN_ULONG *ap, const BN_ULONG *bp, + int bn_mul_mont_int(BN_ULONG *rp, const BN_ULONG *ap, const BN_ULONG *bp, const BN_ULONG *np, const BN_ULONG *n0, int num); if (!(num & 1) && num >= 6) { @@ -29,16 +29,16 @@ int bn_mul_mont(BN_ULONG *rp, const BN_ULONG *ap, const BN_ULONG *bp, const BN_ULONG *bp, const BN_ULONG *np, const BN_ULONG *n0); - int bn_mul_mont_t4_8(BN_ULONG * rp, const BN_ULONG *ap, + int bn_mul_mont_t4_8(BN_ULONG *rp, const BN_ULONG *ap, const BN_ULONG *bp, const BN_ULONG *np, const BN_ULONG *n0); - int bn_mul_mont_t4_16(BN_ULONG * rp, const BN_ULONG *ap, + int bn_mul_mont_t4_16(BN_ULONG *rp, const BN_ULONG *ap, const BN_ULONG *bp, const BN_ULONG *np, const BN_ULONG *n0); - int bn_mul_mont_t4_24(BN_ULONG * rp, const BN_ULONG *ap, + int bn_mul_mont_t4_24(BN_ULONG *rp, const BN_ULONG *ap, const BN_ULONG *bp, const BN_ULONG *np, const BN_ULONG *n0); - int bn_mul_mont_t4_32(BN_ULONG * rp, const BN_ULONG *ap, + int bn_mul_mont_t4_32(BN_ULONG *rp, const BN_ULONG *ap, const BN_ULONG *bp, const BN_ULONG *np, const BN_ULONG *n0); static const bn_mul_mont_f funcs[4] = { |
