[openssl-commits] [openssl] master update

Andy Polyakov appro at openssl.org
Fri Dec 22 11:39:21 UTC 2017


The branch master has been updated
       via  24d06e8ca07f705635a072dcb6ad08c2555c9025 (commit)
      from  17b602802114d53017ff7894319498934a580b17 (commit)


- Log -----------------------------------------------------------------
commit 24d06e8ca07f705635a072dcb6ad08c2555c9025
Author: Andy Polyakov <appro at openssl.org>
Date:   Sun Dec 17 21:32:38 2017 +0100

    Add sha/asm/keccak1600-avx512vl.pl.
    
    Reviewed-by: Rich Salz <rsalz at openssl.org>
    (Merged from https://github.com/openssl/openssl/pull/4948)

-----------------------------------------------------------------------

Summary of changes:
 .../{keccak1600-avx2.pl => keccak1600-avx512vl.pl} | 254 +++++++--------------
 1 file changed, 82 insertions(+), 172 deletions(-)
 copy crypto/sha/asm/{keccak1600-avx2.pl => keccak1600-avx512vl.pl} (63%)

diff --git a/crypto/sha/asm/keccak1600-avx2.pl b/crypto/sha/asm/keccak1600-avx512vl.pl
similarity index 63%
copy from crypto/sha/asm/keccak1600-avx2.pl
copy to crypto/sha/asm/keccak1600-avx512vl.pl
index 82ca672..53f1e84 100755
--- a/crypto/sha/asm/keccak1600-avx2.pl
+++ b/crypto/sha/asm/keccak1600-avx512vl.pl
@@ -13,46 +13,24 @@
 # details see http://www.openssl.org/~appro/cryptogams/.
 # ====================================================================
 #
-# Keccak-1600 for AVX2.
+# Keccak-1600 for AVX512VL.
 #
-# July 2017.
+# December 2017.
 #
-# To paraphrase Gilles Van Assche, if you contemplate Fig. 2.3 on page
-# 20 of The Keccak reference [or Fig. 5 of FIPS PUB 202], and load data
-# other than A[0][0] in magic order into 6 [256-bit] registers, *each
-# dedicated to one axis*, Pi permutation is reduced to intra-register
-# shuffles...
+# This is an adaptation of AVX2 module that reuses register data
+# layout, but utilizes new 256-bit AVX512VL instructions. See AVX2
+# module for further information on layout.
 #
-# It makes other steps more intricate, but overall, is it a win? To be
-# more specific index permutations organized by quadruples are:
-#
-#       [4][4] [3][3] [2][2] [1][1]<-+
-#       [0][4] [0][3] [0][2] [0][1]<-+
-#       [3][0] [1][0] [4][0] [2][0]  |
-#       [4][3] [3][1] [2][4] [1][2]  |
-#       [3][4] [1][3] [4][2] [2][1]  |
-#       [2][3] [4][1] [1][4] [3][2]  |
-#       [2][2] [4][4] [1][1] [3][3] -+
+########################################################################
+# Numbers are cycles per processed byte out of large message.
 #
-# This however is highly impractical for Theta and Chi. What would help
-# Theta is if x indices were aligned column-wise, or in other words:
+#			r=1088(*)
 #
-#       [0][4] [0][3] [0][2] [0][1]
-#       [3][0] [1][0] [4][0] [2][0]
-#vpermq([4][3] [3][1] [2][4] [1][2], 0b01110010)
-#       [2][4] [4][3] [1][2] [3][1]
-#vpermq([4][2] [3][4] [2][1] [1][3], 0b10001101)
-#       [3][4] [1][3] [4][2] [2][1]
-#vpermq([2][3] [4][1] [1][4] [3][2], 0b01110010)
-#       [1][4] [2][3] [3][2] [4][1]
-#vpermq([1][1] [2][2] [3][3] [4][4], 0b00011011)
-#       [4][4] [3][3] [2][2] [1][1]
+# Skylake-X		6.4/+47%
 #
-# So here we have it, lines not marked with vpermq() represent the magic
-# order in which data is to be loaded and maintained. [And lines marked
-# with vpermq() represent Pi circular permutation in chosen layout. Note
-# that first step is permutation-free.] A[0][0] is loaded to register of
-# its own, to all lanes. [A[0][0] is not part of Pi permutation or Rho.]
+# (*)	Corresponds to SHA3-256. Percentage after slash is improvement
+#	coefficient in comparison to scalar keccak1600-x86_64.pl.
+
 # Digits in variables' names denote right-most coordinates:
 
 my ($A00,	# [0][0] [0][0] [0][0] [0][0]		# %ymm0
@@ -73,57 +51,9 @@ my @A_jagged = ([0,0], [1,0], [1,1], [1,2], [1,3],	# [0][0..4]
 		[2,1], [5,0], [4,1], [3,2], [6,3]);	# [4][0..4]
    @A_jagged = map(8*($$_[0]*4+$$_[1]), @A_jagged);	# ... and now linear
 
-# But on the other hand Chi is much better off if y indices were aligned
-# column-wise, not x. For this reason we have to shuffle data prior
-# Chi and revert it afterwards. Prior shuffle is naturally merged with
-# Pi itself:
-#
-#       [0][4] [0][3] [0][2] [0][1]
-#       [3][0] [1][0] [4][0] [2][0]
-#vpermq([4][3] [3][1] [2][4] [1][2], 0b01110010)
-#vpermq([2][4] [4][3] [1][2] [3][1], 0b00011011) = 0b10001101
-#       [3][1] [1][2] [4][3] [2][4]
-#vpermq([4][2] [3][4] [2][1] [1][3], 0b10001101)
-#vpermq([3][4] [1][3] [4][2] [2][1], 0b11100100) = 0b10001101
-#       [3][4] [1][3] [4][2] [2][1]
-#vpermq([2][3] [4][1] [1][4] [3][2], 0b01110010)
-#vpermq([1][4] [2][3] [3][2] [4][1], 0b01110010) = 0b00011011
-#       [3][2] [1][4] [4][1] [2][3]
-#vpermq([1][1] [2][2] [3][3] [4][4], 0b00011011)
-#vpermq([4][4] [3][3] [2][2] [1][1], 0b10001101) = 0b01110010
-#       [3][3] [1][1] [4][4] [2][2]
-#
-# And reverse post-Chi permutation:
-#
-#       [0][4] [0][3] [0][2] [0][1]
-#       [3][0] [1][0] [4][0] [2][0]
-#vpermq([3][1] [1][2] [4][3] [2][4], 0b00011011)
-#       [2][4] [4][3] [1][2] [3][1]
-#vpermq([3][4] [1][3] [4][2] [2][1], 0b11100100) = nop :-)
-#       [3][4] [1][3] [4][2] [2][1]
-#vpermq([3][2] [1][4] [4][1] [2][3], 0b10001101)
-#       [1][4] [2][3] [3][2] [4][1]
-#vpermq([3][3] [1][1] [4][4] [2][2], 0b01110010)
-#       [4][4] [3][3] [2][2] [1][1]
-#
-########################################################################
-# Numbers are cycles per processed byte out of large message.
-#
-#			r=1088(*)
-#
-# Haswell		8.7/+10%
-# Skylake		7.8/+20%
-# Ryzen			17(**)
-#
-# (*)	Corresponds to SHA3-256. Percentage after slash is improvement
-#	coefficient in comparison to scalar keccak1600-x86_64.pl.
-# (**)	It's expected that Ryzen performs poorly, because instruction
-#	issue rate is limited to two AVX2 instructions per cycle and
-#	in addition vpblendd is reportedly bound to specific port.
-#	Obviously this code path should not be executed on Ryzen.
-
 my @T = map("%ymm$_",(7..15));
 my ($C14,$C00,$D00,$D14) = @T[5..8];
+my ($R20,$R01,$R31,$R21,$R41,$R11) = map("%ymm$_",(16..21));
 
 $code.=<<___;
 .text
@@ -131,85 +61,60 @@ $code.=<<___;
 .type	__KeccakF1600,\@function
 .align	32
 __KeccakF1600:
-	lea		rhotates_left+96(%rip),%r8
-	lea		rhotates_right+96(%rip),%r9
 	lea		iotas(%rip),%r10
 	mov		\$24,%eax
-	jmp		.Loop_avx2
+	jmp		.Loop_avx512vl
 
 .align	32
-.Loop_avx2:
+.Loop_avx512vl:
 	######################################### Theta
 	vpshufd		\$0b01001110,$A20,$C00
 	vpxor		$A31,$A41,$C14
 	vpxor		$A11,$A21, at T[2]
-	vpxor		$A01,$C14,$C14
-	vpxor		@T[2],$C14,$C14		# C[1..4]
+	vpternlogq	\$0x96,$A01,$T[2],$C14	# C[1..4]
 
-	vpermq		\$0b10010011,$C14, at T[4]
 	vpxor		$A20,$C00,$C00
 	vpermq		\$0b01001110,$C00, at T[0]
 
-	vpsrlq		\$63,$C14, at T[1]
-	vpaddq		$C14,$C14, at T[2]
-	vpor		@T[2], at T[1], at T[1]	# ROL64(C[1..4],1)
+	vpermq		\$0b10010011,$C14, at T[4]
+	vprolq		\$1,$C14, at T[1]		# ROL64(C[1..4],1)
 
 	vpermq		\$0b00111001, at T[1],$D14
 	vpxor		@T[4], at T[1],$D00
 	vpermq		\$0b00000000,$D00,$D00	# D[0..0] = ROL64(C[1],1) ^ C[4]
 
-	vpxor		$A00,$C00,$C00
-	vpxor		@T[0],$C00,$C00		# C[0..0]
+	vpternlogq	\$0x96, at T[0],$A00,$C00	# C[0..0]
+	vprolq		\$1,$C00, at T[1]		# ROL64(C[0..0],1)
 
-	vpsrlq		\$63,$C00, at T[0]
-	vpaddq		$C00,$C00, at T[1]
-	vpor		@T[0], at T[1], at T[1]	# ROL64(C[0..0],1)
-
-	vpxor		$D00,$A20,$A20		# ^= D[0..0]
 	vpxor		$D00,$A00,$A00		# ^= D[0..0]
 
 	vpblendd	\$0b11000000, at T[1],$D14,$D14
-	vpblendd	\$0b00000011,$C00, at T[4], at T[4]
-	vpxor		@T[4],$D14,$D14		# D[1..4] = ROL64(C[2..4,0),1) ^ C[0..3]
+	vpblendd	\$0b00000011,$C00, at T[4], at T[0]
 
 	######################################### Rho + Pi + pre-Chi shuffle
-	vpsllvq		0*32-96(%r8),$A20, at T[3]
-	vpsrlvq		0*32-96(%r9),$A20,$A20
-	vpor		@T[3],$A20,$A20
-
-	 vpxor		$D14,$A31,$A31		# ^= D[1..4] from Theta
-	vpsllvq		2*32-96(%r8),$A31, at T[4]
-	vpsrlvq		2*32-96(%r9),$A31,$A31
-	vpor		@T[4],$A31,$A31
-
-	 vpxor		$D14,$A21,$A21		# ^= D[1..4] from Theta
-	vpsllvq		3*32-96(%r8),$A21, at T[5]
-	vpsrlvq		3*32-96(%r9),$A21,$A21
-	vpor		@T[5],$A21,$A21
-
-	 vpxor		$D14,$A41,$A41		# ^= D[1..4] from Theta
-	vpsllvq		4*32-96(%r8),$A41, at T[6]
-	vpsrlvq		4*32-96(%r9),$A41,$A41
-	vpor		@T[6],$A41,$A41
-
-	 vpxor		$D14,$A11,$A11		# ^= D[1..4] from Theta
+	 vpxor		$D00,$A20,$A20		# ^= D[0..0] from Theta
+	vprolvq		$R20,$A20,$A20
+
+	 vpternlogq	\$0x96, at T[0],$D14,$A31	# ^= D[1..4] from Theta
+	vprolvq		$R31,$A31,$A31
+
+	 vpternlogq	\$0x96, at T[0],$D14,$A21	# ^= D[1..4] from Theta
+	vprolvq		$R21,$A21,$A21
+
+	 vpternlogq	\$0x96, at T[0],$D14,$A41	# ^= D[1..4] from Theta
+	vprolvq		$R41,$A41,$A41
+
 	 vpermq		\$0b10001101,$A20, at T[3]	# $A20 -> future $A31
 	 vpermq		\$0b10001101,$A31, at T[4]	# $A31 -> future $A21
-	vpsllvq		5*32-96(%r8),$A11, at T[7]
-	vpsrlvq		5*32-96(%r9),$A11, at T[1]
-	vpor		@T[7], at T[1], at T[1]	# $A11 -> future $A01
+	 vpternlogq	\$0x96, at T[0],$D14,$A11	# ^= D[1..4] from Theta
+	vprolvq		$R11,$A11, at T[1]		# $A11 -> future $A01
 
-	 vpxor		$D14,$A01,$A01		# ^= D[1..4] from Theta
 	 vpermq		\$0b00011011,$A21, at T[5]	# $A21 -> future $A41
 	 vpermq		\$0b01110010,$A41, at T[6]	# $A41 -> future $A11
-	vpsllvq		1*32-96(%r8),$A01, at T[8]
-	vpsrlvq		1*32-96(%r9),$A01, at T[2]
-	vpor		@T[8], at T[2], at T[2]	# $A01 -> future $A20
+	 vpternlogq	\$0x96, at T[0],$D14,$A01	# ^= D[1..4] from Theta
+	vprolvq		$R01,$A01, at T[2]		# $A01 -> future $A20
 
 	######################################### Chi
-	vpsrldq		\$8, at T[1], at T[7]
-	vpandn		@T[7], at T[1], at T[0]	# tgting  [0][0] [0][0] [0][0] [0][0]
-
 	vpblendd	\$0b00001100, at T[6], at T[2],$A31	#               [4][4] [2][0]
 	vpblendd	\$0b00001100, at T[2], at T[4], at T[8]	#               [4][0] [2][1]
 	 vpblendd	\$0b00001100, at T[4], at T[3],$A41	#               [4][2] [2][4]
@@ -222,25 +127,24 @@ __KeccakF1600:
 	vpblendd	\$0b11000000, at T[6], at T[8], at T[8]	# [3][3] [1][4] [4][0] [2][1]
 	 vpblendd	\$0b11000000, at T[6],$A41,$A41	# [3][3] [1][0] [4][2] [2][4]
 	 vpblendd	\$0b11000000, at T[4], at T[7], at T[7]	# [3][4] [1][1] [4][3] [2][0]
-	vpandn		@T[8],$A31,$A31		# tgting  [3][1] [1][2] [4][3] [2][4]
-	 vpandn		@T[7],$A41,$A41		# tgting  [3][2] [1][4] [4][1] [2][3]
+	vpternlogq	\$0xC6, at T[8], at T[3],$A31		# [3][1] [1][2] [4][3] [2][4]
+	 vpternlogq	\$0xC6, at T[7], at T[5],$A41		# [3][2] [1][4] [4][1] [2][3]
+
+	vpsrldq		\$8, at T[1], at T[0]
+	vpandn		@T[0], at T[1], at T[0]	# tgting  [0][0] [0][0] [0][0] [0][0]
 
 	vpblendd	\$0b00001100, at T[2], at T[5],$A11	#               [4][0] [2][3]
 	vpblendd	\$0b00001100, at T[5], at T[3], at T[8]	#               [4][1] [2][4]
-	 vpxor		@T[3],$A31,$A31
 	vpblendd	\$0b00110000, at T[3],$A11,$A11	#        [1][2] [4][0] [2][3]
 	vpblendd	\$0b00110000, at T[4], at T[8], at T[8]	#        [1][3] [4][1] [2][4]
-	 vpxor		@T[5],$A41,$A41
 	vpblendd	\$0b11000000, at T[4],$A11,$A11	# [3][4] [1][2] [4][0] [2][3]
 	vpblendd	\$0b11000000, at T[2], at T[8], at T[8]	# [3][0] [1][3] [4][1] [2][4]
-	vpandn		@T[8],$A11,$A11		# tgting  [3][3] [1][1] [4][4] [2][2]
-	vpxor		@T[6],$A11,$A11
+	vpternlogq	\$0xC6, at T[8], at T[6],$A11		# [3][3] [1][1] [4][4] [2][2]
 
 	  vpermq	\$0b00011110, at T[1],$A21		# [0][1] [0][2] [0][4] [0][3]
 	  vpblendd	\$0b00110000,$A00,$A21, at T[8]	# [0][1] [0][0] [0][4] [0][3]
 	  vpermq	\$0b00111001, at T[1],$A01		# [0][1] [0][4] [0][3] [0][2]
 	  vpblendd	\$0b11000000,$A00,$A01,$A01	# [0][0] [0][4] [0][3] [0][2]
-	  vpandn	@T[8],$A01,$A01		# tgting  [0][4] [0][3] [0][2] [0][1]
 
 	vpblendd	\$0b00001100, at T[5], at T[4],$A20	#               [4][1] [2][1]
 	vpblendd	\$0b00001100, at T[4], at T[6], at T[7]	#               [4][2] [2][2]
@@ -248,11 +152,10 @@ __KeccakF1600:
 	vpblendd	\$0b00110000, at T[3], at T[7], at T[7]	#        [1][2] [4][2] [2][2]
 	vpblendd	\$0b11000000, at T[3],$A20,$A20	# [3][1] [1][1] [4][1] [2][1]
 	vpblendd	\$0b11000000, at T[5], at T[7], at T[7]	# [3][2] [1][2] [4][2] [2][2]
-	vpandn		@T[7],$A20,$A20		# tgting  [3][0] [1][0] [4][0] [2][0]
-	vpxor		@T[2],$A20,$A20
+	vpternlogq	\$0xC6, at T[7], at T[2],$A20		# [3][0] [1][0] [4][0] [2][0]
 
 	 vpermq		\$0b00000000, at T[0], at T[0]	# [0][0] [0][0] [0][0] [0][0]
-	 vpermq		\$0b00011011,$A31,$A31	# post-Chi shuffle
+	 vpermq		\$0b00011011,$A31,$A31		# post-Chi shuffle
 	 vpermq		\$0b10001101,$A41,$A41
 	 vpermq		\$0b01110010,$A11,$A11
 
@@ -262,18 +165,16 @@ __KeccakF1600:
 	vpblendd	\$0b00110000, at T[2], at T[7], at T[7]	#        [1][0] [4][4] [2][3]
 	vpblendd	\$0b11000000, at T[2],$A21,$A21	# [3][0] [1][4] [4][3] [2][2]
 	vpblendd	\$0b11000000, at T[3], at T[7], at T[7]	# [3][1] [1][0] [4][4] [2][3]
-	vpandn		@T[7],$A21,$A21		# tgting  [3][4] [1][3] [4][2] [2][1]
 
-	vpxor		@T[0],$A00,$A00
-	vpxor		@T[1],$A01,$A01
-	vpxor		@T[4],$A21,$A21
+	vpternlogq	\$0xC6, at T[8], at T[1],$A01		# [0][4] [0][3] [0][2] [0][1]
+	vpternlogq	\$0xC6, at T[7], at T[4],$A21		# [3][4] [1][3] [4][2] [2][1]
 
 	######################################### Iota
-	vpxor		(%r10),$A00,$A00
+	vpternlogq	\$0x96,(%r10), at T[0],$A00
 	lea		32(%r10),%r10
 
 	dec		%eax
-	jnz		.Loop_avx2
+	jnz		.Loop_avx512vl
 
 	ret
 .size	__KeccakF1600,.-__KeccakF1600
@@ -294,6 +195,7 @@ SHA3_absorb:
 	lea	96($A_flat),$A_flat
 	lea	96($inp),$inp
 	lea	96(%rsp),%r10
+	lea	rhotates_left(%rip),%r8
 
 	vzeroupper
 
@@ -305,6 +207,13 @@ SHA3_absorb:
 	vmovdqu		8+32*4-96($A_flat),$A41
 	vmovdqu		8+32*5-96($A_flat),$A11
 
+	vmovdqa64	0*32(%r8),$R20		# load "rhotate" indices
+	vmovdqa64	1*32(%r8),$R01
+	vmovdqa64	2*32(%r8),$R31
+	vmovdqa64	3*32(%r8),$R21
+	vmovdqa64	4*32(%r8),$R41
+	vmovdqa64	5*32(%r8),$R11
+
 	vpxor		@T[0], at T[0], at T[0]
 	vmovdqa		@T[0],32*2-96(%r10)	# zero transfer area on stack
 	vmovdqa		@T[0],32*3-96(%r10)
@@ -312,10 +221,10 @@ SHA3_absorb:
 	vmovdqa		@T[0],32*5-96(%r10)
 	vmovdqa		@T[0],32*6-96(%r10)
 
-.Loop_absorb_avx2:
+.Loop_absorb_avx512vl:
 	mov		$bsz,%rax
 	sub		$bsz,$len
-	jc		.Ldone_absorb_avx2
+	jc		.Ldone_absorb_avx512vl
 
 	shr		\$3,%eax
 	vpbroadcastq	0-96($inp), at T[0]
@@ -325,13 +234,13 @@ ___
 for(my $i=5; $i<25; $i++) {
 $code.=<<___
 	dec	%eax
-	jz	.Labsorved_avx2
+	jz	.Labsorved_avx512vl
 	mov	8*$i-96($inp),%r8
 	mov	%r8,$A_jagged[$i]-96(%r10)
 ___
 }
 $code.=<<___;
-.Labsorved_avx2:
+.Labsorved_avx512vl:
 	lea	($inp,$bsz),$inp
 
 	vpxor	@T[0],$A00,$A00
@@ -345,9 +254,9 @@ $code.=<<___;
 	call	__KeccakF1600
 
 	lea	96(%rsp),%r10
-	jmp	.Loop_absorb_avx2
+	jmp	.Loop_absorb_avx512vl
 
-.Ldone_absorb_avx2:
+.Ldone_absorb_avx512vl:
 	vmovq	%xmm0,-96($A_flat)
 	vmovdqu	$A01,8+32*0-96($A_flat)
 	vmovdqu	$A20,8+32*1-96($A_flat)
@@ -370,6 +279,7 @@ SHA3_squeeze:
 	mov	%rsp,%r11
 
 	lea	96($A_flat),$A_flat
+	lea	rhotates_left(%rip),%r8
 	shr	\$3,$bsz
 
 	vzeroupper
@@ -383,25 +293,32 @@ SHA3_squeeze:
 	vmovdqu		8+32*4-96($A_flat),$A41
 	vmovdqu		8+32*5-96($A_flat),$A11
 
+	vmovdqa64	0*32(%r8),$R20		# load "rhotate" indices
+	vmovdqa64	1*32(%r8),$R01
+	vmovdqa64	2*32(%r8),$R31
+	vmovdqa64	3*32(%r8),$R21
+	vmovdqa64	4*32(%r8),$R41
+	vmovdqa64	5*32(%r8),$R11
+
 	mov	$bsz,%rax
 
-.Loop_squeeze_avx2:
+.Loop_squeeze_avx512vl:
 	mov	@A_jagged[$i]-96($A_flat),%r8
 ___
 for (my $i=0; $i<25; $i++) {
 $code.=<<___;
 	sub	\$8,$len
-	jc	.Ltail_squeeze_avx2
+	jc	.Ltail_squeeze_avx512vl
 	mov	%r8,($out)
 	lea	8($out),$out
-	je	.Ldone_squeeze_avx2
+	je	.Ldone_squeeze_avx512vl
 	dec	%eax
-	je	.Lextend_output_avx2
+	je	.Lextend_output_avx512vl
 	mov	@A_jagged[$i+1]-120($A_flat),%r8
 ___
 }
 $code.=<<___;
-.Lextend_output_avx2:
+.Lextend_output_avx512vl:
 	call	__KeccakF1600
 
 	vmovq	%xmm0,-96($A_flat)
@@ -413,19 +330,19 @@ $code.=<<___;
 	vmovdqu	$A11,8+32*5-96($A_flat)
 
 	mov	$bsz,%rax
-	jmp	.Loop_squeeze_avx2
+	jmp	.Loop_squeeze_avx512vl
 
 
-.Ltail_squeeze_avx2:
+.Ltail_squeeze_avx512vl:
 	add	\$8,$len
-.Loop_tail_avx2:
+.Loop_tail_avx512vl:
 	mov	%r8b,($out)
 	lea	1($out),$out
 	shr	\$8,%r8
 	dec	$len
-	jnz	.Loop_tail_avx2
+	jnz	.Loop_tail_avx512vl
 
-.Ldone_squeeze_avx2:
+.Ldone_squeeze_avx512vl:
 	vzeroupper
 
 	lea	(%r11),%rsp
@@ -440,13 +357,6 @@ rhotates_left:
 	.quad	10,	61,	55,	8	# [2][1] [4][2] [1][3] [3][4]
 	.quad	2,	15,	25,	20	# [4][1] [3][2] [2][3] [1][4]
 	.quad	44,	43,	21,	14	# [1][1] [2][2] [3][3] [4][4]
-rhotates_right:
-	.quad	64-3,	64-18,	64-36,	64-41
-	.quad	64-1,	64-62,	64-28,	64-27
-	.quad	64-45,	64-6,	64-56,	64-39
-	.quad	64-10,	64-61,	64-55,	64-8
-	.quad	64-2,	64-15,	64-25,	64-20
-	.quad	64-44,	64-43,	64-21,	64-14
 iotas:
 	.quad	0x0000000000000001, 0x0000000000000001, 0x0000000000000001, 0x0000000000000001
 	.quad	0x0000000000008082, 0x0000000000008082, 0x0000000000008082, 0x0000000000008082
@@ -473,7 +383,7 @@ iotas:
 	.quad	0x0000000080000001, 0x0000000080000001, 0x0000000080000001, 0x0000000080000001
 	.quad	0x8000000080008008, 0x8000000080008008, 0x8000000080008008, 0x8000000080008008
 
-.asciz	"Keccak-1600 absorb and squeeze for AVX2, CRYPTOGAMS by <appro\@openssl.org>"
+.asciz	"Keccak-1600 absorb and squeeze for AVX512VL, CRYPTOGAMS by <appro\@openssl.org>"
 ___
 
 print $code;


More information about the openssl-commits mailing list