summaryrefslogtreecommitdiff
path: root/sys
diff options
context:
space:
mode:
authorJung-uk Kim <jkim@FreeBSD.org>2020-12-09 02:05:14 +0000
committerJung-uk Kim <jkim@FreeBSD.org>2020-12-09 02:05:14 +0000
commitc3c73b4f0a91d2806e1a632b75f769fb4fa89576 (patch)
tree7c868b042745dce2a0f7c34a517d3721441b24b1 /sys
parent5ee33a90767ef975146d7c441c0517ff789617b1 (diff)
parent970a464089066970886f0bce6d1c9dcfbcb2e8ea (diff)
downloadsrc-test2-c3c73b4f0a91d2806e1a632b75f769fb4fa89576.tar.gz
src-test2-c3c73b4f0a91d2806e1a632b75f769fb4fa89576.zip
Merge OpenSSL 1.1.1i.
Notes
Notes: svn path=/head/; revision=368472
Diffstat (limited to 'sys')
-rw-r--r--sys/crypto/openssl/aarch64/aesv8-armx.S38
-rw-r--r--sys/crypto/openssl/aarch64/chacha-armv8.S1
-rw-r--r--sys/crypto/openssl/aarch64/poly1305-armv8.S8
-rw-r--r--sys/crypto/openssl/aarch64/sha1-armv8.S2
-rw-r--r--sys/crypto/openssl/aarch64/sha256-armv8.S4
-rw-r--r--sys/crypto/openssl/aarch64/sha512-armv8.S4
-rw-r--r--sys/crypto/openssl/arm/aesv8-armx.S38
7 files changed, 56 insertions, 39 deletions
diff --git a/sys/crypto/openssl/aarch64/aesv8-armx.S b/sys/crypto/openssl/aarch64/aesv8-armx.S
index d8eb85a9840e..cd33ebf8e258 100644
--- a/sys/crypto/openssl/aarch64/aesv8-armx.S
+++ b/sys/crypto/openssl/aarch64/aesv8-armx.S
@@ -104,7 +104,12 @@ aes_v8_set_encrypt_key:
.Loop192:
tbl v6.16b,{v4.16b},v2.16b
ext v5.16b,v0.16b,v3.16b,#12
+#ifdef __ARMEB__
+ st1 {v4.4s},[x2],#16
+ sub x2,x2,#8
+#else
st1 {v4.8b},[x2],#8
+#endif
aese v6.16b,v0.16b
subs w1,w1,#1
@@ -575,8 +580,11 @@ aes_v8_ctr32_encrypt_blocks:
ldr w5,[x3,#240]
ldr w8, [x4, #12]
+#ifdef __ARMEB__
+ ld1 {v0.16b},[x4]
+#else
ld1 {v0.4s},[x4]
-
+#endif
ld1 {v16.4s,v17.4s},[x3] // load key schedule...
sub w5,w5,#4
mov x12,#16
@@ -592,17 +600,17 @@ aes_v8_ctr32_encrypt_blocks:
#ifndef __ARMEB__
rev w8, w8
#endif
- orr v1.16b,v0.16b,v0.16b
add w10, w8, #1
- orr v18.16b,v0.16b,v0.16b
- add w8, w8, #2
orr v6.16b,v0.16b,v0.16b
rev w10, w10
- mov v1.s[3],w10
+ mov v6.s[3],w10
+ add w8, w8, #2
+ orr v1.16b,v6.16b,v6.16b
b.ls .Lctr32_tail
rev w12, w8
+ mov v6.s[3],w12
sub x2,x2,#3 // bias
- mov v18.s[3],w12
+ orr v18.16b,v6.16b,v6.16b
b .Loop3x_ctr32
.align 4
@@ -629,11 +637,11 @@ aes_v8_ctr32_encrypt_blocks:
aese v1.16b,v16.16b
aesmc v5.16b,v1.16b
ld1 {v2.16b},[x0],#16
- orr v0.16b,v6.16b,v6.16b
+ add w9,w8,#1
aese v18.16b,v16.16b
aesmc v18.16b,v18.16b
ld1 {v3.16b},[x0],#16
- orr v1.16b,v6.16b,v6.16b
+ rev w9,w9
aese v4.16b,v17.16b
aesmc v4.16b,v4.16b
aese v5.16b,v17.16b
@@ -642,8 +650,6 @@ aes_v8_ctr32_encrypt_blocks:
mov x7,x3
aese v18.16b,v17.16b
aesmc v17.16b,v18.16b
- orr v18.16b,v6.16b,v6.16b
- add w9,w8,#1
aese v4.16b,v20.16b
aesmc v4.16b,v4.16b
aese v5.16b,v20.16b
@@ -659,20 +665,22 @@ aes_v8_ctr32_encrypt_blocks:
aese v5.16b,v21.16b
aesmc v5.16b,v5.16b
eor v19.16b,v19.16b,v7.16b
- rev w9,w9
+ mov v6.s[3], w9
aese v17.16b,v21.16b
aesmc v17.16b,v17.16b
- mov v0.s[3], w9
+ orr v0.16b,v6.16b,v6.16b
rev w10,w10
aese v4.16b,v22.16b
aesmc v4.16b,v4.16b
+ mov v6.s[3], w10
+ rev w12,w8
aese v5.16b,v22.16b
aesmc v5.16b,v5.16b
- mov v1.s[3], w10
- rev w12,w8
+ orr v1.16b,v6.16b,v6.16b
+ mov v6.s[3], w12
aese v17.16b,v22.16b
aesmc v17.16b,v17.16b
- mov v18.s[3], w12
+ orr v18.16b,v6.16b,v6.16b
subs x2,x2,#3
aese v4.16b,v23.16b
aese v5.16b,v23.16b
diff --git a/sys/crypto/openssl/aarch64/chacha-armv8.S b/sys/crypto/openssl/aarch64/chacha-armv8.S
index 0208c2030fae..123c78b18387 100644
--- a/sys/crypto/openssl/aarch64/chacha-armv8.S
+++ b/sys/crypto/openssl/aarch64/chacha-armv8.S
@@ -5,6 +5,7 @@
.text
+.hidden OPENSSL_armcap_P
.align 5
.Lsigma:
diff --git a/sys/crypto/openssl/aarch64/poly1305-armv8.S b/sys/crypto/openssl/aarch64/poly1305-armv8.S
index 5e145838fe34..4fc458d568f8 100644
--- a/sys/crypto/openssl/aarch64/poly1305-armv8.S
+++ b/sys/crypto/openssl/aarch64/poly1305-armv8.S
@@ -6,10 +6,14 @@
// forward "declarations" are required for Apple
+.hidden OPENSSL_armcap_P
+.globl poly1305_init
+.hidden poly1305_init
.globl poly1305_blocks
+.hidden poly1305_blocks
.globl poly1305_emit
+.hidden poly1305_emit
-.globl poly1305_init
.type poly1305_init,%function
.align 5
poly1305_init:
@@ -797,8 +801,8 @@ poly1305_blocks_neon:
st1 {v23.s}[0],[x0]
.Lno_data_neon:
-.inst 0xd50323bf // autiasp
ldr x29,[sp],#80
+.inst 0xd50323bf // autiasp
ret
.size poly1305_blocks_neon,.-poly1305_blocks_neon
diff --git a/sys/crypto/openssl/aarch64/sha1-armv8.S b/sys/crypto/openssl/aarch64/sha1-armv8.S
index 033ed0b134a3..f72c9c8b18bf 100644
--- a/sys/crypto/openssl/aarch64/sha1-armv8.S
+++ b/sys/crypto/openssl/aarch64/sha1-armv8.S
@@ -5,6 +5,7 @@
.text
+.hidden OPENSSL_armcap_P
.globl sha1_block_data_order
.type sha1_block_data_order,%function
.align 6
@@ -1220,4 +1221,3 @@ sha1_block_armv8:
.byte 83,72,65,49,32,98,108,111,99,107,32,116,114,97,110,115,102,111,114,109,32,102,111,114,32,65,82,77,118,56,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0
.align 2
.align 2
-.comm OPENSSL_armcap_P,4,4
diff --git a/sys/crypto/openssl/aarch64/sha256-armv8.S b/sys/crypto/openssl/aarch64/sha256-armv8.S
index 35bf48ba5178..e1a08d98912f 100644
--- a/sys/crypto/openssl/aarch64/sha256-armv8.S
+++ b/sys/crypto/openssl/aarch64/sha256-armv8.S
@@ -61,6 +61,7 @@
.text
+.hidden OPENSSL_armcap_P
.globl sha256_block_data_order
.type sha256_block_data_order,%function
.align 6
@@ -2062,6 +2063,3 @@ sha256_block_neon:
add sp,sp,#16*4+16
ret
.size sha256_block_neon,.-sha256_block_neon
-#ifndef __KERNEL__
-.comm OPENSSL_armcap_P,4,4
-#endif
diff --git a/sys/crypto/openssl/aarch64/sha512-armv8.S b/sys/crypto/openssl/aarch64/sha512-armv8.S
index 06cf5a239d89..8fdd14f254cb 100644
--- a/sys/crypto/openssl/aarch64/sha512-armv8.S
+++ b/sys/crypto/openssl/aarch64/sha512-armv8.S
@@ -61,6 +61,7 @@
.text
+.hidden OPENSSL_armcap_P
.globl sha512_block_data_order
.type sha512_block_data_order,%function
.align 6
@@ -1617,6 +1618,3 @@ sha512_block_armv8:
ret
.size sha512_block_armv8,.-sha512_block_armv8
#endif
-#ifndef __KERNEL__
-.comm OPENSSL_armcap_P,4,4
-#endif
diff --git a/sys/crypto/openssl/arm/aesv8-armx.S b/sys/crypto/openssl/arm/aesv8-armx.S
index 277483389856..f46d50c57337 100644
--- a/sys/crypto/openssl/arm/aesv8-armx.S
+++ b/sys/crypto/openssl/arm/aesv8-armx.S
@@ -110,7 +110,12 @@ aes_v8_set_encrypt_key:
vtbl.8 d20,{q8},d4
vtbl.8 d21,{q8},d5
vext.8 q9,q0,q3,#12
+#ifdef __ARMEB__
+ vst1.32 {q8},[r2]!
+ sub r2,r2,#8
+#else
vst1.32 {d16},[r2]!
+#endif
.byte 0x00,0x43,0xf0,0xf3 @ aese q10,q0
subs r1,r1,#1
@@ -582,8 +587,11 @@ aes_v8_ctr32_encrypt_blocks:
ldr r5,[r3,#240]
ldr r8, [r4, #12]
+#ifdef __ARMEB__
+ vld1.8 {q0},[r4]
+#else
vld1.32 {q0},[r4]
-
+#endif
vld1.32 {q8,q9},[r3] @ load key schedule...
sub r5,r5,#4
mov r12,#16
@@ -599,17 +607,17 @@ aes_v8_ctr32_encrypt_blocks:
#ifndef __ARMEB__
rev r8, r8
#endif
- vorr q1,q0,q0
add r10, r8, #1
- vorr q10,q0,q0
- add r8, r8, #2
vorr q6,q0,q0
rev r10, r10
- vmov.32 d3[1],r10
+ vmov.32 d13[1],r10
+ add r8, r8, #2
+ vorr q1,q6,q6
bls .Lctr32_tail
rev r12, r8
+ vmov.32 d13[1],r12
sub r2,r2,#3 @ bias
- vmov.32 d21[1],r12
+ vorr q10,q6,q6
b .Loop3x_ctr32
.align 4
@@ -636,11 +644,11 @@ aes_v8_ctr32_encrypt_blocks:
.byte 0x20,0x23,0xb0,0xf3 @ aese q1,q8
.byte 0x82,0xa3,0xb0,0xf3 @ aesmc q5,q1
vld1.8 {q2},[r0]!
- vorr q0,q6,q6
+ add r9,r8,#1
.byte 0x20,0x43,0xf0,0xf3 @ aese q10,q8
.byte 0xa4,0x43,0xf0,0xf3 @ aesmc q10,q10
vld1.8 {q3},[r0]!
- vorr q1,q6,q6
+ rev r9,r9
.byte 0x22,0x83,0xb0,0xf3 @ aese q4,q9
.byte 0x88,0x83,0xb0,0xf3 @ aesmc q4,q4
.byte 0x22,0xa3,0xb0,0xf3 @ aese q5,q9
@@ -649,8 +657,6 @@ aes_v8_ctr32_encrypt_blocks:
mov r7,r3
.byte 0x22,0x43,0xf0,0xf3 @ aese q10,q9
.byte 0xa4,0x23,0xf0,0xf3 @ aesmc q9,q10
- vorr q10,q6,q6
- add r9,r8,#1
.byte 0x28,0x83,0xb0,0xf3 @ aese q4,q12
.byte 0x88,0x83,0xb0,0xf3 @ aesmc q4,q4
.byte 0x28,0xa3,0xb0,0xf3 @ aese q5,q12
@@ -666,20 +672,22 @@ aes_v8_ctr32_encrypt_blocks:
.byte 0x2a,0xa3,0xb0,0xf3 @ aese q5,q13
.byte 0x8a,0xa3,0xb0,0xf3 @ aesmc q5,q5
veor q11,q11,q7
- rev r9,r9
+ vmov.32 d13[1], r9
.byte 0x2a,0x23,0xf0,0xf3 @ aese q9,q13
.byte 0xa2,0x23,0xf0,0xf3 @ aesmc q9,q9
- vmov.32 d1[1], r9
+ vorr q0,q6,q6
rev r10,r10
.byte 0x2c,0x83,0xb0,0xf3 @ aese q4,q14
.byte 0x88,0x83,0xb0,0xf3 @ aesmc q4,q4
+ vmov.32 d13[1], r10
+ rev r12,r8
.byte 0x2c,0xa3,0xb0,0xf3 @ aese q5,q14
.byte 0x8a,0xa3,0xb0,0xf3 @ aesmc q5,q5
- vmov.32 d3[1], r10
- rev r12,r8
+ vorr q1,q6,q6
+ vmov.32 d13[1], r12
.byte 0x2c,0x23,0xf0,0xf3 @ aese q9,q14
.byte 0xa2,0x23,0xf0,0xf3 @ aesmc q9,q9
- vmov.32 d21[1], r12
+ vorr q10,q6,q6
subs r2,r2,#3
.byte 0x2e,0x83,0xb0,0xf3 @ aese q4,q15
.byte 0x2e,0xa3,0xb0,0xf3 @ aese q5,q15