[openssl-commits] [openssl] master update
Andy Polyakov
appro at openssl.org
Thu Jun 29 19:16:33 UTC 2017
The branch master has been updated
via 7807267bedb3b5ad18bce7ef2b984028fd0b7510 (commit)
via d6f0c94a6551f22650dc1cabe25e1141061831de (commit)
via a1613840ddae01334999f7a92265eac0d4f50da4 (commit)
via a078d9dfa92d34458529de19818faa94a75ae908 (commit)
via 64aef3f53d79d5af85f5fe3bb3c57d2c529a0790 (commit)
from 4c17819c41b32f6652662d4a2942e35c67d7d650 (commit)
- Log -----------------------------------------------------------------
commit 7807267bedb3b5ad18bce7ef2b984028fd0b7510
Author: Andy Polyakov <appro at openssl.org>
Date: Tue Jun 27 21:45:18 2017 +0200
Add sha/asm/keccak1600-s390x.pl.
Reviewed-by: Richard Levitte <levitte at openssl.org>
commit d6f0c94a6551f22650dc1cabe25e1141061831de
Author: Andy Polyakov <appro at openssl.org>
Date: Mon Jun 26 17:39:43 2017 +0200
sha/asm/keccak1600-x86_64.pl: add CFI directives.
Reviewed-by: Richard Levitte <levitte at openssl.org>
commit a1613840ddae01334999f7a92265eac0d4f50da4
Author: Andy Polyakov <appro at openssl.org>
Date: Mon Jun 26 17:29:24 2017 +0200
sha/asm/keccak1600-x86_64.pl: optimize by re-ordering instructions.
Reviewed-by: Richard Levitte <levitte at openssl.org>
commit a078d9dfa92d34458529de19818faa94a75ae908
Author: Andy Polyakov <appro at openssl.org>
Date: Mon Jun 26 17:28:13 2017 +0200
sha/asm/keccak1600-x86_64.pl: remove redundant moves.
Reviewed-by: Richard Levitte <levitte at openssl.org>
commit 64aef3f53d79d5af85f5fe3bb3c57d2c529a0790
Author: Andy Polyakov <appro at openssl.org>
Date: Mon Jun 26 17:27:09 2017 +0200
Add sha/asm/keccak1600-x86_64.pl.
Reviewed-by: Richard Levitte <levitte at openssl.org>
-----------------------------------------------------------------------
Summary of changes:
crypto/sha/asm/keccak1600-s390x.pl | 568 +++++++++++++++++++++++++++++++++
crypto/sha/asm/keccak1600-x86_64.pl | 609 ++++++++++++++++++++++++++++++++++++
2 files changed, 1177 insertions(+)
create mode 100755 crypto/sha/asm/keccak1600-s390x.pl
create mode 100755 crypto/sha/asm/keccak1600-x86_64.pl
diff --git a/crypto/sha/asm/keccak1600-s390x.pl b/crypto/sha/asm/keccak1600-s390x.pl
new file mode 100755
index 0000000..b150abe
--- /dev/null
+++ b/crypto/sha/asm/keccak1600-s390x.pl
@@ -0,0 +1,568 @@
+#!/usr/bin/env perl
+# Copyright 2017 The OpenSSL Project Authors. All Rights Reserved.
+#
+# Licensed under the OpenSSL license (the "License"). You may not use
+# this file except in compliance with the License. You can obtain a copy
+# in the file LICENSE in the source distribution or at
+# https://www.openssl.org/source/license.html
+#
+# ====================================================================
+# Written by Andy Polyakov <appro at openssl.org> for the OpenSSL
+# project. The module is, however, dual licensed under OpenSSL and
+# CRYPTOGAMS licenses depending on where you obtain it. For further
+# details see http://www.openssl.org/~appro/cryptogams/.
+# ====================================================================
+#
+# Keccak-1600 for s390x.
+#
+# June 2017.
+#
+# Below code is [lane complementing] KECCAK_2X implementation (see
+# sha/keccak1600.c) with C[5] and D[5] held in register bank. Though
+# instead of actually unrolling the loop pair-wise I simply flip
+# pointers to T[][] and A[][] at the end of round. Since number of
+# rounds is even, last round writes to A[][] and everything works out.
+# In the nutshell it's transliteration of x86_64 module, because both
+# architectures have similar capabilities/limitations. Performance
+# measurement is problematic as I don't have access to an idle system.
+# It looks like z13 processes one byte [out of long message] in ~14
+# cycles. At least the result is consistent with estimate based on
+# amount of instruction and assumed instruction issue rate. It's ~2.5x
+# faster than compiler-generated code.
+
+$flavour = shift;
+
+if ($flavour =~ /3[12]/) {
+ $SIZE_T=4;
+ $g="";
+} else {
+ $SIZE_T=8;
+ $g="g";
+}
+
+while (($output=shift) && ($output!~/\w[\w\-]*\.\w+$/)) {}
+open STDOUT,">$output";
+
+my @A = map([ 8*$_, 8*($_+1), 8*($_+2), 8*($_+3), 8*($_+4) ], (0,5,10,15,20));
+
+my @C = map("%r$_",(0,1,5..7));
+my @D = map("%r$_",(8..12));
+my @T = map("%r$_",(13..14));
+my ($src,$dst,$iotas) = map("%r$_",(2..4));
+my $sp = "%r15";
+
+$stdframe=16*$SIZE_T+4*8;
+$frame=$stdframe+25*8;
+
+my @rhotates = ([ 0, 1, 62, 28, 27 ],
+ [ 36, 44, 6, 55, 20 ],
+ [ 3, 10, 43, 25, 39 ],
+ [ 41, 45, 15, 21, 8 ],
+ [ 18, 2, 61, 56, 14 ]);
+
+{ my @C = @C; # copy, because we mess the up...
+ my @D = @D;
+
+$code.=<<___;
+.text
+
+.type __KeccakF1600,\@function
+.align 32
+__KeccakF1600:
+ st${g} %r14,$SIZE_T*14($sp)
+ lg @C[0],$A[4][0]($src)
+ lg @C[1],$A[4][1]($src)
+ lg @C[2],$A[4][2]($src)
+ lg @C[3],$A[4][3]($src)
+ lg @C[4],$A[4][4]($src)
+ j .Loop
+
+.align 16
+.Loop:
+ lg @D[0],$A[0][0]($src)
+ lg @D[1],$A[1][1]($src)
+ lg @D[2],$A[2][2]($src)
+ lg @D[3],$A[3][3]($src)
+
+ xgr @C[0], at D[0]
+ xg @C[1],$A[0][1]($src)
+ xg @C[2],$A[0][2]($src)
+ xg @C[3],$A[0][3]($src)
+ lgr @D[4], at C[4]
+ xg @C[4],$A[0][4]($src)
+
+ xg @C[0],$A[1][0]($src)
+ xgr @C[1], at D[1]
+ xg @C[2],$A[1][2]($src)
+ xg @C[3],$A[1][3]($src)
+ xg @C[4],$A[1][4]($src)
+
+ xg @C[0],$A[2][0]($src)
+ xg @C[1],$A[2][1]($src)
+ xgr @C[2], at D[2]
+ xg @C[3],$A[2][3]($src)
+ xg @C[4],$A[2][4]($src)
+
+ xg @C[0],$A[3][0]($src)
+ xg @C[1],$A[3][1]($src)
+ xg @C[2],$A[3][2]($src)
+ xgr @C[3], at D[3]
+ xg @C[4],$A[3][4]($src)
+
+ lgr @T[0], at C[2]
+ rllg @C[2], at C[2],1
+ xgr @C[2], at C[0] # D[1] = ROL64(C[2], 1) ^ C[0]
+
+ rllg @C[0], at C[0],1
+ xgr @C[0], at C[3] # D[4] = ROL64(C[0], 1) ^ C[3]
+
+ rllg @C[3], at C[3],1
+ xgr @C[3], at C[1] # D[2] = ROL64(C[3], 1) ^ C[1]
+
+ rllg @C[1], at C[1],1
+ xgr @C[1], at C[4] # D[0] = ROL64(C[1], 1) ^ C[4]
+
+ rllg @C[4], at C[4],1
+ xgr @C[4], at T[0] # D[3] = ROL64(C[4], 1) ^ C[2]
+___
+ my @E = @D;
+ @D = (@C[1], at C[2], at C[3], at C[4], at C[0]);
+ @C = @E;
+$code.=<<___;
+ xgr @C[1], at D[1]
+ xgr @C[2], at D[2]
+ xgr @C[3], at D[3]
+ rllg @C[1], at C[1],$rhotates[1][1]
+ xgr @C[4], at D[4]
+ rllg @C[2], at C[2],$rhotates[2][2]
+ xgr @C[0], at D[0]
+
+ lgr @T[0], at C[1]
+ ogr @C[1], at C[2]
+ rllg @C[3], at C[3],$rhotates[3][3]
+ xgr @C[1], at C[0] # C[0] ^ ( C[1] | C[2])
+ rllg @C[4], at C[4],$rhotates[4][4]
+ xg @C[1],0($iotas)
+ la $iotas,8($iotas)
+ stg @C[1],$A[0][0]($dst) # R[0][0] = C[0] ^ ( C[1] | C[2]) ^ iotas[i]
+
+ lgr @T[1], at C[4]
+ ngr @C[4], at C[3]
+ lghi @C[1],-1 # no 'not' instruction :-(
+ xgr @C[4], at C[2] # C[2] ^ ( C[4] & C[3])
+ xgr @C[2], at C[1] # not @C[2]
+ stg @C[4],$A[0][2]($dst) # R[0][2] = C[2] ^ ( C[4] & C[3])
+ ogr @C[2], at C[3]
+ xgr @C[2], at T[0] # C[1] ^ (~C[2] | C[3])
+
+ ngr @T[0], at C[0]
+ stg @C[2],$A[0][1]($dst) # R[0][1] = C[1] ^ (~C[2] | C[3])
+ xgr @T[0], at T[1] # C[4] ^ ( C[1] & C[0])
+ ogr @T[1], at C[0]
+ stg @T[0],$A[0][4]($dst) # R[0][4] = C[4] ^ ( C[1] & C[0])
+ xgr @T[1], at C[3] # C[3] ^ ( C[4] | C[0])
+ stg @T[1],$A[0][3]($dst) # R[0][3] = C[3] ^ ( C[4] | C[0])
+
+
+ lg @C[0],$A[0][3]($src)
+ lg @C[4],$A[4][2]($src)
+ lg @C[3],$A[3][1]($src)
+ lg @C[1],$A[1][4]($src)
+ lg @C[2],$A[2][0]($src)
+
+ xgr @C[0], at D[3]
+ xgr @C[4], at D[2]
+ rllg @C[0], at C[0],$rhotates[0][3]
+ xgr @C[3], at D[1]
+ rllg @C[4], at C[4],$rhotates[4][2]
+ xgr @C[1], at D[4]
+ rllg @C[3], at C[3],$rhotates[3][1]
+ xgr @C[2], at D[0]
+
+ lgr @T[0], at C[0]
+ ogr @C[0], at C[4]
+ rllg @C[1], at C[1],$rhotates[1][4]
+ xgr @C[0], at C[3] # C[3] ^ (C[0] | C[4])
+ rllg @C[2], at C[2],$rhotates[2][0]
+ stg @C[0],$A[1][3]($dst) # R[1][3] = C[3] ^ (C[0] | C[4])
+
+ lgr @T[1], at C[1]
+ ngr @C[1], at T[0]
+ lghi @C[0],-1 # no 'not' instruction :-(
+ xgr @C[1], at C[4] # C[4] ^ (C[1] & C[0])
+ xgr @C[4], at C[0] # not @C[4]
+ stg @C[1],$A[1][4]($dst) # R[1][4] = C[4] ^ (C[1] & C[0])
+
+ ogr @C[4], at C[3]
+ xgr @C[4], at C[2] # C[2] ^ (~C[4] | C[3])
+
+ ngr @C[3], at C[2]
+ stg @C[4],$A[1][2]($dst) # R[1][2] = C[2] ^ (~C[4] | C[3])
+ xgr @C[3], at T[1] # C[1] ^ (C[3] & C[2])
+ ogr @T[1], at C[2]
+ stg @C[3],$A[1][1]($dst) # R[1][1] = C[1] ^ (C[3] & C[2])
+ xgr @T[1], at T[0] # C[0] ^ (C[1] | C[2])
+ stg @T[1],$A[1][0]($dst) # R[1][0] = C[0] ^ (C[1] | C[2])
+
+
+ lg @C[2],$A[2][3]($src)
+ lg @C[3],$A[3][4]($src)
+ lg @C[1],$A[1][2]($src)
+ lg @C[4],$A[4][0]($src)
+ lg @C[0],$A[0][1]($src)
+
+ xgr @C[2], at D[3]
+ xgr @C[3], at D[4]
+ rllg @C[2], at C[2],$rhotates[2][3]
+ xgr @C[1], at D[2]
+ rllg @C[3], at C[3],$rhotates[3][4]
+ xgr @C[4], at D[0]
+ rllg @C[1], at C[1],$rhotates[1][2]
+ xgr @C[0], at D[1]
+
+ lgr @T[0], at C[2]
+ ngr @C[2], at C[3]
+ rllg @C[4], at C[4],$rhotates[4][0]
+ xgr @C[2], at C[1] # C[1] ^ ( C[2] & C[3])
+ lghi @T[1],-1 # no 'not' instruction :-(
+ stg @C[2],$A[2][1]($dst) # R[2][1] = C[1] ^ ( C[2] & C[3])
+
+ xgr @C[3], at T[1] # not @C[3]
+ lgr @T[1], at C[4]
+ ngr @C[4], at C[3]
+ rllg @C[0], at C[0],$rhotates[0][1]
+ xgr @C[4], at T[0] # C[2] ^ ( C[4] & ~C[3])
+ ogr @T[0], at C[1]
+ stg @C[4],$A[2][2]($dst) # R[2][2] = C[2] ^ ( C[4] & ~C[3])
+ xgr @T[0], at C[0] # C[0] ^ ( C[2] | C[1])
+
+ ngr @C[1], at C[0]
+ stg @T[0],$A[2][0]($dst) # R[2][0] = C[0] ^ ( C[2] | C[1])
+ xgr @C[1], at T[1] # C[4] ^ ( C[1] & C[0])
+ ogr @C[0], at T[1]
+ stg @C[1],$A[2][4]($dst) # R[2][4] = C[4] ^ ( C[1] & C[0])
+ xgr @C[0], at C[3] # ~C[3] ^ ( C[0] | C[4])
+ stg @C[0],$A[2][3]($dst) # R[2][3] = ~C[3] ^ ( C[0] | C[4])
+
+
+ lg @C[2],$A[2][1]($src)
+ lg @C[3],$A[3][2]($src)
+ lg @C[1],$A[1][0]($src)
+ lg @C[4],$A[4][3]($src)
+ lg @C[0],$A[0][4]($src)
+
+ xgr @C[2], at D[1]
+ xgr @C[3], at D[2]
+ rllg @C[2], at C[2],$rhotates[2][1]
+ xgr @C[1], at D[0]
+ rllg @C[3], at C[3],$rhotates[3][2]
+ xgr @C[4], at D[3]
+ rllg @C[1], at C[1],$rhotates[1][0]
+ xgr @C[0], at D[4]
+ rllg @C[4], at C[4],$rhotates[4][3]
+
+ lgr @T[0], at C[2]
+ ogr @C[2], at C[3]
+ lghi @T[1],-1 # no 'not' instruction :-(
+ xgr @C[2], at C[1] # C[1] ^ ( C[2] | C[3])
+ xgr @C[3], at T[1] # not @C[3]
+ stg @C[2],$A[3][1]($dst) # R[3][1] = C[1] ^ ( C[2] | C[3])
+
+ lgr @T[1], at C[4]
+ ogr @C[4], at C[3]
+ rllg @C[0], at C[0],$rhotates[0][4]
+ xgr @C[4], at T[0] # C[2] ^ ( C[4] | ~C[3])
+ ngr @T[0], at C[1]
+ stg @C[4],$A[3][2]($dst) # R[3][2] = C[2] ^ ( C[4] | ~C[3])
+ xgr @T[0], at C[0] # C[0] ^ ( C[2] & C[1])
+
+ ogr @C[1], at C[0]
+ stg @T[0],$A[3][0]($dst) # R[3][0] = C[0] ^ ( C[2] & C[1])
+ xgr @C[1], at T[1] # C[4] ^ ( C[1] | C[0])
+ ngr @C[0], at T[1]
+ stg @C[1],$A[3][4]($dst) # R[3][4] = C[4] ^ ( C[1] | C[0])
+ xgr @C[0], at C[3] # ~C[3] ^ ( C[0] & C[4])
+ stg @C[0],$A[3][3]($dst) # R[3][3] = ~C[3] ^ ( C[0] & C[4])
+
+
+ xg @D[2],$A[0][2]($src)
+ xg @D[3],$A[1][3]($src)
+ xg @D[1],$A[4][1]($src)
+ xg @D[4],$A[2][4]($src)
+ xgr $dst,$src # xchg $dst,$src
+ rllg @D[2], at D[2],$rhotates[0][2]
+ xg @D[0],$A[3][0]($src)
+ rllg @D[3], at D[3],$rhotates[1][3]
+ xgr $src,$dst
+ rllg @D[1], at D[1],$rhotates[4][1]
+ xgr $dst,$src
+ rllg @D[4], at D[4],$rhotates[2][4]
+___
+ @C = (@D[2], at D[3], at D[4], at D[0], at D[1]);
+$code.=<<___;
+ lgr @T[0], at C[0]
+ ngr @C[0], at C[1]
+ lghi @T[1],-1 # no 'not' instruction :-(
+ xgr @C[0], at C[4] # C[4] ^ ( C[0] & C[1])
+ xgr @C[1], at T[1] # not @C[1]
+ stg @C[0],$A[4][4]($src) # R[4][4] = C[4] ^ ( C[0] & C[1])
+
+ lgr @T[1], at C[2]
+ ngr @C[2], at C[1]
+ rllg @D[0], at D[0],$rhotates[3][0]
+ xgr @C[2], at T[0] # C[0] ^ ( C[2] & ~C[1])
+ ogr @T[0], at C[4]
+ stg @C[2],$A[4][0]($src) # R[4][0] = C[0] ^ ( C[2] & ~C[1])
+ xgr @T[0], at C[3] # C[3] ^ ( C[0] | C[4])
+
+ ngr @C[4], at C[3]
+ stg @T[0],$A[4][3]($src) # R[4][3] = C[3] ^ ( C[0] | C[4])
+ xgr @C[4], at T[1] # C[2] ^ ( C[4] & C[3])
+ ogr @C[3], at T[1]
+ stg @C[4],$A[4][2]($src) # R[4][2] = C[2] ^ ( C[4] & C[3])
+ xgr @C[3], at C[1] # ~C[1] ^ ( C[2] | C[3])
+
+ lgr @C[1], at C[0] # harmonize with the loop top
+ lgr @C[0], at T[0]
+ stg @C[3],$A[4][1]($src) # R[4][1] = ~C[1] ^ ( C[2] | C[3])
+
+ tmll $iotas,255
+ jnz .Loop
+
+ l${g} %r14,$SIZE_T*14($sp)
+ br %r14
+.size __KeccakF1600,.-__KeccakF1600
+___
+}
+{
+$code.=<<___;
+.globl KeccakF1600
+.type KeccakF1600,\@function
+.align 32
+KeccakF1600:
+.LKeccakF1600:
+ lghi %r1,-$frame
+ stm${g} %r6,%r15,$SIZE_T*6($sp)
+ lgr %r0,$sp
+ la $sp,0(%r1,$sp)
+ st${g} %r0,0($sp)
+
+ lghi @D[0],-1 # no 'not' instruction :-(
+ lghi @D[1],-1
+ lghi @D[2],-1
+ lghi @D[3],-1
+ lghi @D[4],-1
+ lghi @T[0],-1
+ xg @D[0],$A[0][1]($src)
+ xg @D[1],$A[0][2]($src)
+ xg @D[2],$A[1][3]($src)
+ xg @D[3],$A[2][2]($src)
+ xg @D[4],$A[3][2]($src)
+ xg @T[0],$A[4][0]($src)
+ stg @D[0],$A[0][1]($src)
+ stg @D[1],$A[0][2]($src)
+ stg @D[2],$A[1][3]($src)
+ stg @D[3],$A[2][2]($src)
+ stg @D[4],$A[3][2]($src)
+ stg @T[0],$A[4][0]($src)
+
+ la $dst,$stdframe($sp)
+ larl $iotas,iotas
+
+ bras %r14,__KeccakF1600
+
+ lghi @D[0],-1 # no 'not' instruction :-(
+ lghi @D[1],-1
+ lghi @D[2],-1
+ lghi @D[3],-1
+ lghi @D[4],-1
+ lghi @T[0],-1
+ xg @D[0],$A[0][1]($src)
+ xg @D[1],$A[0][2]($src)
+ xg @D[2],$A[1][3]($src)
+ xg @D[3],$A[2][2]($src)
+ xg @D[4],$A[3][2]($src)
+ xg @T[0],$A[4][0]($src)
+ stg @D[0],$A[0][1]($src)
+ stg @D[1],$A[0][2]($src)
+ stg @D[2],$A[1][3]($src)
+ stg @D[3],$A[2][2]($src)
+ stg @D[4],$A[3][2]($src)
+ stg @T[0],$A[4][0]($src)
+
+ lm${g} %r6,%r15,$frame+6*$SIZE_T($sp)
+ br %r14
+.size KeccakF1600,.-KeccakF1600
+___
+}
+{ my ($A_flat,$inp,$len,$bsz) = map("%r$_",(2..5));
+
+$code.=<<___;
+.globl SHA3_absorb
+.type SHA3_absorb,\@function
+.align 32
+SHA3_absorb:
+ lghi %r1,-$frame
+ stm${g} %r5,%r15,$SIZE_T*5($sp)
+ lgr %r0,$sp
+ la $sp,0(%r1,$sp)
+ st${g} %r0,0($sp)
+
+ lghi @D[0],-1 # no 'not' instruction :-(
+ lghi @D[1],-1
+ lghi @D[2],-1
+ lghi @D[3],-1
+ lghi @D[4],-1
+ lghi @T[0],-1
+ xg @D[0],$A[0][1]($src)
+ xg @D[1],$A[0][2]($src)
+ xg @D[2],$A[1][3]($src)
+ xg @D[3],$A[2][2]($src)
+ xg @D[4],$A[3][2]($src)
+ xg @T[0],$A[4][0]($src)
+ stg @D[0],$A[0][1]($src)
+ stg @D[1],$A[0][2]($src)
+ stg @D[2],$A[1][3]($src)
+ stg @D[3],$A[2][2]($src)
+ stg @D[4],$A[3][2]($src)
+ stg @T[0],$A[4][0]($src)
+
+.Loop_absorb:
+ cl${g}r $len,$bsz
+ jl .Ldone_absorb
+
+ srl${g} $bsz,3
+ la %r1,0($A_flat)
+
+.Lblock_absorb:
+ lrvg %r0,0($inp)
+ la $inp,8($inp)
+ xg %r0,0(%r1)
+ la %r1,8(%r1)
+ a${g}hi $len,-8
+ stg %r0,-8(%r1)
+ brct $bsz,.Lblock_absorb
+
+ stm${g} $inp,$len,$frame+3*$SIZE_T($sp)
+ la $dst,$stdframe($sp)
+ larl $iotas,iotas
+ bras %r14,__KeccakF1600
+ lm${g} $inp,$bsz,$frame+3*$SIZE_T($sp)
+ j .Loop_absorb
+
+.align 16
+.Ldone_absorb:
+ lghi @D[0],-1 # no 'not' instruction :-(
+ lghi @D[1],-1
+ lghi @D[2],-1
+ lghi @D[3],-1
+ lghi @D[4],-1
+ lghi @T[0],-1
+ xg @D[0],$A[0][1]($src)
+ xg @D[1],$A[0][2]($src)
+ xg @D[2],$A[1][3]($src)
+ xg @D[3],$A[2][2]($src)
+ xg @D[4],$A[3][2]($src)
+ xg @T[0],$A[4][0]($src)
+ stg @D[0],$A[0][1]($src)
+ stg @D[1],$A[0][2]($src)
+ stg @D[2],$A[1][3]($src)
+ stg @D[3],$A[2][2]($src)
+ stg @D[4],$A[3][2]($src)
+ stg @T[0],$A[4][0]($src)
+
+ lgr %r2,$len # return value
+
+ lm${g} %r6,%r15,$frame+6*$SIZE_T($sp)
+ br %r14
+.size SHA3_absorb,.-SHA3_absorb
+___
+}
+{ my ($A_flat,$out,$len,$bsz) = map("%r$_",(2..5));
+
+$code.=<<___;
+.globl SHA3_squeeze
+.type SHA3_squeeze,\@function
+.align 32
+SHA3_squeeze:
+ srl${g} $bsz,3
+ st${g} %r14,2*$SIZE_T($sp)
+ lghi %r14,8
+ st${g} $bsz,5*$SIZE_T($sp)
+ la %r1,0($A_flat)
+
+ j .Loop_squeeze
+
+.align 16
+.Loop_squeeze:
+ cl${g}r $len,%r14
+ jl .Ltail_squeeze
+
+ lrvg %r0,0(%r1)
+ la %r1,8(%r1)
+ stg %r0,0($out)
+ la $out,8($out)
+ a${g}hi $len,-8 # len -= 8
+ jz .Ldone_squeeze
+
+ brct $bsz,.Loop_squeeze # bsz--
+
+ stm${g} $out,$len,3*$SIZE_T($sp)
+ bras %r14,.LKeccakF1600
+ lm${g} $out,$bsz,3*$SIZE_T($sp)
+ lghi %r14,8
+ la %r1,0($A_flat)
+ j .Loop_squeeze
+
+.Ltail_squeeze:
+ lg %r0,0(%r1)
+.Loop_tail_squeeze:
+ stc %r0,0($out)
+ la $out,1($out)
+ srlg %r0,8
+ brct $len,.Loop_tail_squeeze
+
+.Ldone_squeeze:
+ l${g} %r14,2*$SIZE_T($sp)
+ br %r14
+.size SHA3_squeeze,.-SHA3_squeeze
+___
+}
+$code.=<<___;
+.align 256
+ .quad 0,0,0,0,0,0,0,0
+.type iotas,\@object
+iotas:
+ .quad 0x0000000000000001
+ .quad 0x0000000000008082
+ .quad 0x800000000000808a
+ .quad 0x8000000080008000
+ .quad 0x000000000000808b
+ .quad 0x0000000080000001
+ .quad 0x8000000080008081
+ .quad 0x8000000000008009
+ .quad 0x000000000000008a
+ .quad 0x0000000000000088
+ .quad 0x0000000080008009
+ .quad 0x000000008000000a
+ .quad 0x000000008000808b
+ .quad 0x800000000000008b
+ .quad 0x8000000000008089
+ .quad 0x8000000000008003
+ .quad 0x8000000000008002
+ .quad 0x8000000000000080
+ .quad 0x000000000000800a
+ .quad 0x800000008000000a
+ .quad 0x8000000080008081
+ .quad 0x8000000000008080
+ .quad 0x0000000080000001
+ .quad 0x8000000080008008
+.size iotas,.-iotas
+.asciz "Keccak-1600 absorb and squeeze for s390x, CRYPTOGAMS by <appro\@openssl.org>"
+___
+
+# unlike 32-bit shift 64-bit one takes three arguments
+$code =~ s/(srlg\s+)(%r[0-9]+),/$1$2,$2,/gm;
+
+print $code;
+close STDOUT;
diff --git a/crypto/sha/asm/keccak1600-x86_64.pl b/crypto/sha/asm/keccak1600-x86_64.pl
new file mode 100755
index 0000000..eb12c99
--- /dev/null
+++ b/crypto/sha/asm/keccak1600-x86_64.pl
@@ -0,0 +1,609 @@
+#!/usr/bin/env perl
+# Copyright 2017 The OpenSSL Project Authors. All Rights Reserved.
+#
+# Licensed under the OpenSSL license (the "License"). You may not use
+# this file except in compliance with the License. You can obtain a copy
+# in the file LICENSE in the source distribution or at
+# https://www.openssl.org/source/license.html
+#
+# ====================================================================
+# Written by Andy Polyakov <appro at openssl.org> for the OpenSSL
+# project. The module is, however, dual licensed under OpenSSL and
+# CRYPTOGAMS licenses depending on where you obtain it. For further
+# details see http://www.openssl.org/~appro/cryptogams/.
+# ====================================================================
+#
+# Keccak-1600 for x86_86.
+#
+# June 2017.
+#
+# Below code is [lane complementing] KECCAK_2X implementation (see
+# sha/keccak1600.c) with C[5] and D[5] held in register bank. Though
+# instead of actually unrolling the loop pair-wise I simply flip
+# pointers to T[][] and A[][] at the end of round. Since number of
+# rounds is even, last round writes to A[][] and everything works out.
+# How does it compare to assembly module in Keccak Code Package? KCP
+# is faster on couple of processors, VIA Nano and Goldmont by 4-6%,
+# otherwise this module is either as fast or faster by up to 15%...
+#
+########################################################################
+# Numbers are cycles per processed byte out of large message.
+#
+# r=1088(*)
+#
+# P4 25.8
+# Core 2 13.0
+# Westmere 13.7
+# Sandy Bridge 12.9(**)
+# Haswell 9.7
+# Skylake 9.4
+# Silvermont 22.8
+# Goldmont 16.4
+# VIA Nano 18.0
+# Sledgehammer 13.3
+# Bulldozer 16.5
+#
+# (*) Corresponds to SHA3-256. Improvement over compiler-generate
+# varies a lot, most commont coefficient is 15% in comparison to
+# gcc-5.x, 50% for gcc-4.x, 90% for gcc-3.x.
+# (**) Sandy Bridge has broken rotate instruction. Performance can be
+# improved by 14% by replacing rotates with double-precision
+# shift with same register as source and destination.
+
+$flavour = shift;
+$output = shift;
+if ($flavour =~ /\./) { $output = $flavour; undef $flavour; }
+
+$win64=0; $win64=1 if ($flavour =~ /[nm]asm|mingw64/ || $output =~ /\.asm$/);
+
+$0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
+( $xlate="${dir}x86_64-xlate.pl" and -f $xlate ) or
+( $xlate="${dir}../../perlasm/x86_64-xlate.pl" and -f $xlate) or
+die "can't locate x86_64-xlate.pl";
+
+open OUT,"| \"$^X\" \"$xlate\" $flavour \"$output\"";
+*STDOUT=*OUT;
+
+my @A = map([ 8*$_-100, 8*($_+1)-100, 8*($_+2)-100,
+ 8*($_+3)-100, 8*($_+4)-100 ], (0,5,10,15,20));
+
+my @C = ("%rax","%rbx","%rcx","%rdx","%rbp");
+my @D = map("%r$_",(8..12));
+my @T = map("%r$_",(13..14));
+my $iotas = "%r15";
+
+my @rhotates = ([ 0, 1, 62, 28, 27 ],
+ [ 36, 44, 6, 55, 20 ],
+ [ 3, 10, 43, 25, 39 ],
+ [ 41, 45, 15, 21, 8 ],
+ [ 18, 2, 61, 56, 14 ]);
+
+$code.=<<___;
+.text
+
+.type __KeccakF1600,\@function
+.align 32
+__KeccakF1600:
+ mov $A[4][0](%rdi), at C[0]
+ mov $A[4][1](%rdi), at C[1]
+ mov $A[4][2](%rdi), at C[2]
+ mov $A[4][3](%rdi), at C[3]
+ mov $A[4][4](%rdi), at C[4]
+ jmp .Loop
+
+.align 32
+.Loop:
+ mov $A[0][0](%rdi), at D[0]
+ mov $A[1][1](%rdi), at D[1]
+ mov $A[2][2](%rdi), at D[2]
+ mov $A[3][3](%rdi), at D[3]
+
+ xor $A[0][2](%rdi), at C[2]
+ xor $A[0][3](%rdi), at C[3]
+ xor @D[0], @C[0]
+ xor $A[0][1](%rdi), at C[1]
+ xor $A[1][2](%rdi), at C[2]
+ xor $A[1][0](%rdi), at C[0]
+ mov @C[4], at D[4]
+ xor $A[0][4](%rdi), at C[4]
+
+ xor @D[2], @C[2]
+ xor $A[2][0](%rdi), at C[0]
+ xor $A[1][3](%rdi), at C[3]
+ xor @D[1], @C[1]
+ xor $A[1][4](%rdi), at C[4]
+
+ xor $A[3][2](%rdi), at C[2]
+ xor $A[3][0](%rdi), at C[0]
+ xor $A[2][3](%rdi), at C[3]
+ xor $A[2][1](%rdi), at C[1]
+ xor $A[2][4](%rdi), at C[4]
+
+ mov @C[2], at T[0]
+ rol \$1, at C[2]
+ xor @C[0], at C[2] # D[1] = ROL64(C[2], 1) ^ C[0]
+ xor @D[3], @C[3]
+
+ rol \$1, at C[0]
+ xor @C[3], at C[0] # D[4] = ROL64(C[0], 1) ^ C[3]
+ xor $A[3][1](%rdi), at C[1]
+
+ rol \$1, at C[3]
+ xor @C[1], at C[3] # D[2] = ROL64(C[3], 1) ^ C[1]
+ xor $A[3][4](%rdi), at C[4]
+
+ rol \$1, at C[1]
+ xor @C[4], at C[1] # D[0] = ROL64(C[1], 1) ^ C[4]
+
+ rol \$1, at C[4]
+ xor @T[0], at C[4] # D[3] = ROL64(C[4], 1) ^ C[2]
+___
+ my @E = @D;
+ @D = (@C[1], at C[2], at C[3], at C[4], at C[0]);
+ @C = @E;
+$code.=<<___;
+ xor @D[1], at C[1]
+ xor @D[2], at C[2]
+ rol \$$rhotates[1][1], at C[1]
+ xor @D[3], at C[3]
+ xor @D[4], at C[4]
+ rol \$$rhotates[2][2], at C[2]
+ xor @D[0], at C[0]
+ mov @C[1], at T[0]
+ rol \$$rhotates[3][3], at C[3]
+ or @C[2], at C[1]
+ xor @C[0], at C[1] # C[0] ^ ( C[1] | C[2])
+ rol \$$rhotates[4][4], at C[4]
+
+ xor ($iotas), at C[1]
+ lea 8($iotas),$iotas
+
+ mov @C[4], at T[1]
+ and @C[3], at C[4]
+ mov @C[1],$A[0][0](%rsi) # R[0][0] = C[0] ^ ( C[1] | C[2]) ^ iotas[i]
+ xor @C[2], at C[4] # C[2] ^ ( C[4] & C[3])
+ not @C[2]
+ mov @C[4],$A[0][2](%rsi) # R[0][2] = C[2] ^ ( C[4] & C[3])
+
+ or @C[3], at C[2]
+ xor @T[0], at C[2] # C[1] ^ (~C[2] | C[3])
+ mov @C[2],$A[0][1](%rsi) # R[0][1] = C[1] ^ (~C[2] | C[3])
+
+ and @C[0], at T[0]
+ xor @T[1], at T[0] # C[4] ^ ( C[1] & C[0])
+ mov @T[0],$A[0][4](%rsi) # R[0][4] = C[4] ^ ( C[1] & C[0])
+
+ or @C[0], at T[1]
+ xor @C[3], at T[1] # C[3] ^ ( C[4] | C[0])
+ mov @T[1],$A[0][3](%rsi) # R[0][3] = C[3] ^ ( C[4] | C[0])
+
+
+ mov $A[0][3](%rdi), at C[0]
+ mov $A[4][2](%rdi), at C[4]
+ mov $A[3][1](%rdi), at C[3]
+ mov $A[1][4](%rdi), at C[1]
+ mov $A[2][0](%rdi), at C[2]
+
+ xor @D[3], at C[0]
+ xor @D[2], at C[4]
+ rol \$$rhotates[0][3], at C[0]
+ xor @D[1], at C[3]
+ xor @D[4], at C[1]
+ rol \$$rhotates[4][2], at C[4]
+ rol \$$rhotates[3][1], at C[3]
+ xor @D[0], at C[2]
+ rol \$$rhotates[1][4], at C[1]
+ mov @C[0], at T[0]
+ or @C[4], at C[0]
+ rol \$$rhotates[2][0], at C[2]
+
+ xor @C[3], at C[0] # C[3] ^ (C[0] | C[4])
+ mov @C[0],$A[1][3](%rsi) # R[1][3] = C[3] ^ (C[0] | C[4])
+
+ mov @C[1], at T[1]
+ and @T[0], at C[1]
+ xor @C[4], at C[1] # C[4] ^ (C[1] & C[0])
+ not @C[4]
+ mov @C[1],$A[1][4](%rsi) # R[1][4] = C[4] ^ (C[1] & C[0])
+
+ or @C[3], at C[4]
+ xor @C[2], at C[4] # C[2] ^ (~C[4] | C[3])
+ mov @C[4],$A[1][2](%rsi) # R[1][2] = C[2] ^ (~C[4] | C[3])
+
+ and @C[2], at C[3]
+ xor @T[1], at C[3] # C[1] ^ (C[3] & C[2])
+ mov @C[3],$A[1][1](%rsi) # R[1][1] = C[1] ^ (C[3] & C[2])
+
+ or @C[2], at T[1]
+ xor @T[0], at T[1] # C[0] ^ (C[1] | C[2])
+ mov @T[1],$A[1][0](%rsi) # R[1][0] = C[0] ^ (C[1] | C[2])
+
+
+ mov $A[2][3](%rdi), at C[2]
+ mov $A[3][4](%rdi), at C[3]
+ mov $A[1][2](%rdi), at C[1]
+ mov $A[4][0](%rdi), at C[4]
+ mov $A[0][1](%rdi), at C[0]
+
+ xor @D[3], at C[2]
+ xor @D[4], at C[3]
+ rol \$$rhotates[2][3], at C[2]
+ xor @D[2], at C[1]
+ rol \$$rhotates[3][4], at C[3]
+ xor @D[0], at C[4]
+ rol \$$rhotates[1][2], at C[1]
+ xor @D[1], at C[0]
+ rol \$$rhotates[4][0], at C[4]
+ mov @C[2], at T[0]
+ and @C[3], at C[2]
+ rol \$$rhotates[0][1], at C[0]
+
+ not @C[3]
+ xor @C[1], at C[2] # C[1] ^ ( C[2] & C[3])
+ mov @C[2],$A[2][1](%rsi) # R[2][1] = C[1] ^ ( C[2] & C[3])
+
+ mov @C[4], at T[1]
+ and @C[3], at C[4]
+ xor @T[0], at C[4] # C[2] ^ ( C[4] & ~C[3])
+ mov @C[4],$A[2][2](%rsi) # R[2][2] = C[2] ^ ( C[4] & ~C[3])
+
+ or @C[1], at T[0]
+ xor @C[0], at T[0] # C[0] ^ ( C[2] | C[1])
+ mov @T[0],$A[2][0](%rsi) # R[2][0] = C[0] ^ ( C[2] | C[1])
+
+ and @C[0], at C[1]
+ xor @T[1], at C[1] # C[4] ^ ( C[1] & C[0])
+ mov @C[1],$A[2][4](%rsi) # R[2][4] = C[4] ^ ( C[1] & C[0])
+
+ or @T[1], at C[0]
+ xor @C[3], at C[0] # ~C[3] ^ ( C[0] | C[4])
+ mov @C[0],$A[2][3](%rsi) # R[2][3] = ~C[3] ^ ( C[0] | C[4])
+
+
+ mov $A[2][1](%rdi), at C[2]
+ mov $A[3][2](%rdi), at C[3]
+ mov $A[1][0](%rdi), at C[1]
+ mov $A[4][3](%rdi), at C[4]
+ mov $A[0][4](%rdi), at C[0]
+
+ xor @D[1], at C[2]
+ xor @D[2], at C[3]
+ rol \$$rhotates[2][1], at C[2]
+ xor @D[0], at C[1]
+ rol \$$rhotates[3][2], at C[3]
+ xor @D[3], at C[4]
+ rol \$$rhotates[1][0], at C[1]
+ xor @D[4], at C[0]
+ rol \$$rhotates[4][3], at C[4]
+ mov @C[2], at T[0]
+ or @C[3], at C[2]
+ rol \$$rhotates[0][4], at C[0]
+
+ not @C[3]
+ xor @C[1], at C[2] # C[1] ^ ( C[2] | C[3])
+ mov @C[2],$A[3][1](%rsi) # R[3][1] = C[1] ^ ( C[2] | C[3])
+
+ mov @C[4], at T[1]
+ or @C[3], at C[4]
+ xor @T[0], at C[4] # C[2] ^ ( C[4] | ~C[3])
+ mov @C[4],$A[3][2](%rsi) # R[3][2] = C[2] ^ ( C[4] | ~C[3])
+
+ and @C[1], at T[0]
+ xor @C[0], at T[0] # C[0] ^ ( C[2] & C[1])
+ mov @T[0],$A[3][0](%rsi) # R[3][0] = C[0] ^ ( C[2] & C[1])
+
+ or @C[0], at C[1]
+ xor @T[1], at C[1] # C[4] ^ ( C[1] | C[0])
+ mov @C[1],$A[3][4](%rsi) # R[3][4] = C[4] ^ ( C[1] | C[0])
+
+ and @T[1], at C[0]
+ xor @C[3], at C[0] # ~C[3] ^ ( C[0] & C[4])
+ mov @C[0],$A[3][3](%rsi) # R[3][3] = ~C[3] ^ ( C[0] & C[4])
+
+
+ xor $A[0][2](%rdi), at D[2]
+ xor $A[1][3](%rdi), at D[3]
+ rol \$$rhotates[0][2], at D[2]
+ xor $A[4][1](%rdi), at D[1]
+ rol \$$rhotates[1][3], at D[3]
+ xor $A[2][4](%rdi), at D[4]
+ rol \$$rhotates[4][1], at D[1]
+ xor $A[3][0](%rdi), at D[0]
+ xchg %rsi,%rdi
+ rol \$$rhotates[2][4], at D[4]
+ rol \$$rhotates[3][0], at D[0]
+___
+ @C = (@D[2], at D[3], at D[4], at D[0], at D[1]);
+$code.=<<___;
+ mov @C[0], at T[0]
+ and @C[1], at C[0]
+ not @C[1]
+ xor @C[4], at C[0] # C[4] ^ ( C[0] & C[1])
+ mov @C[0],$A[4][4](%rdi) # R[4][4] = C[4] ^ ( C[0] & C[1])
+
+ mov @C[2], at T[1]
+ and @C[1], at C[2]
+ xor @T[0], at C[2] # C[0] ^ ( C[2] & ~C[1])
+ mov @C[2],$A[4][0](%rdi) # R[4][0] = C[0] ^ ( C[2] & ~C[1])
+
+ or @C[4], at T[0]
+ xor @C[3], at T[0] # C[3] ^ ( C[0] | C[4])
+ mov @T[0],$A[4][3](%rdi) # R[4][3] = C[3] ^ ( C[0] | C[4])
+
+ and @C[3], at C[4]
+ xor @T[1], at C[4] # C[2] ^ ( C[4] & C[3])
+ mov @C[4],$A[4][2](%rdi) # R[4][2] = C[2] ^ ( C[4] & C[3])
+
+ or @T[1], at C[3]
+ xor @C[1], at C[3] # ~C[1] ^ ( C[2] | C[3])
+ mov @C[3],$A[4][1](%rdi) # R[4][1] = ~C[1] ^ ( C[2] | C[3])
+
+ mov @C[0], at C[1] # harmonize with the loop top
+ mov @T[0], at C[0]
+
+ test \$255,$iotas
+ jnz .Loop
+
+ lea -192($iotas),$iotas # rewind iotas
+ ret
+.size __KeccakF1600,.-__KeccakF1600
+
+.globl KeccakF1600
+.type KeccakF1600,\@function
+.align 32
+KeccakF1600:
+.cfi_startproc
+ push %rbx
+.cfi_push %rbx
+ push %rbp
+.cfi_push %rbp
+ push %r12
+.cfi_push %r12
+ push %r13
+.cfi_push %r13
+ push %r14
+.cfi_push %r14
+ push %r15
+.cfi_push %r15
+
+ lea 100(%rdi),%rdi # size optimization
+ sub \$200,%rsp
+.cfi_adjust_cfa_offset 200
+
+ notq $A[0][1](%rdi)
+ notq $A[0][2](%rdi)
+ notq $A[1][3](%rdi)
+ notq $A[2][2](%rdi)
+ notq $A[3][2](%rdi)
+ notq $A[4][0](%rdi)
+
+ lea iotas(%rip),$iotas
+ lea 100(%rsp),%rsi # size optimization
+
+ call __KeccakF1600
+
+ notq $A[0][1](%rdi)
+ notq $A[0][2](%rdi)
+ notq $A[1][3](%rdi)
+ notq $A[2][2](%rdi)
+ notq $A[3][2](%rdi)
+ notq $A[4][0](%rdi)
+ lea -100(%rdi),%rdi # preserve A[][]
+
+ add \$200,%rsp
+.cfi_adjust_cfa_offset -200
+
+ pop %r15
+.cfi_pop %r15
+ pop %r14
+.cfi_pop %r14
+ pop %r13
+.cfi_pop %r13
+ pop %r12
+.cfi_pop %r12
+ pop %rbp
+.cfi_pop %rbp
+ pop %rbx
+.cfi_pop %rbx
+ ret
+.cfi_endproc
+.size KeccakF1600,.-KeccakF1600
+___
+
+{ my ($A_flat,$inp,$len,$bsz) = ("%rdi","%rsi","%rdx","%rcx");
+ ($A_flat,$inp) = ("%r8","%r9");
+$code.=<<___;
+.globl SHA3_absorb
+.type SHA3_absorb,\@function
+.align 32
+SHA3_absorb:
+.cfi_startproc
+ push %rbx
+.cfi_push %rbx
+ push %rbp
+.cfi_push %rbp
+ push %r12
+.cfi_push %r12
+ push %r13
+.cfi_push %r13
+ push %r14
+.cfi_push %r14
+ push %r15
+.cfi_push %r15
+
+ lea 100(%rdi),%rdi # size optimization
+ sub \$232,%rsp
+.cfi_adjust_cfa_offset 232
+
+ mov %rsi,$inp
+ lea 100(%rsp),%rsi # size optimization
+
+ notq $A[0][1](%rdi)
+ notq $A[0][2](%rdi)
+ notq $A[1][3](%rdi)
+ notq $A[2][2](%rdi)
+ notq $A[3][2](%rdi)
+ notq $A[4][0](%rdi)
+ lea iotas(%rip),$iotas
+
+ mov $bsz,216-100(%rsi) # save bsz
+
+.Loop_absorb:
+ cmp $bsz,$len
+ jc .Ldone_absorb
+
+ shr \$3,$bsz
+ lea -100(%rdi),$A_flat
+
+.Lblock_absorb:
+ mov ($inp),%rax
+ lea 8($inp),$inp
+ xor ($A_flat),%rax
+ lea 8($A_flat),$A_flat
+ sub \$8,$len
+ mov %rax,-8($A_flat)
+ sub \$1,$bsz
+ jnz .Lblock_absorb
+
+ mov $inp,200-100(%rsi) # save inp
+ mov $len,208-100(%rsi) # save len
+ call __KeccakF1600
+ mov 200-100(%rsi),$inp # pull inp
+ mov 208-100(%rsi),$len # pull len
+ mov 216-100(%rsi),$bsz # pull bsz
+ jmp .Loop_absorb
+
+.align 32
+.Ldone_absorb:
+ mov $len,%rax # return value
+
+ notq $A[0][1](%rdi)
+ notq $A[0][2](%rdi)
+ notq $A[1][3](%rdi)
+ notq $A[2][2](%rdi)
+ notq $A[3][2](%rdi)
+ notq $A[4][0](%rdi)
+
+ add \$232,%rsp
+.cfi_adjust_cfa_offset -232
+
+ pop %r15
+.cfi_pop %r15
+ pop %r14
+.cfi_pop %r14
+ pop %r13
+.cfi_pop %r13
+ pop %r12
+.cfi_pop %r12
+ pop %rbp
+.cfi_pop %rbp
+ pop %rbx
+.cfi_pop %rbx
+ ret
+.cfi_endproc
+.size SHA3_absorb,.-SHA3_absorb
+___
+}
+{ my ($A_flat,$out,$len,$bsz) = ("%rdi","%rsi","%rdx","%rcx");
+ ($out,$len,$bsz) = ("%r12","%r13","%r14");
+
+$code.=<<___;
+.globl SHA3_squeeze
+.type SHA3_squeeze,\@function
+.align 32
+SHA3_squeeze:
+.cfi_startproc
+ push %r12
+.cfi_push %r12
+ push %r13
+.cfi_push %r13
+ push %r14
+.cfi_push %r14
+
+ shr \$3,%rcx
+ mov $A_flat,%r8
+ mov %rsi,$out
+ mov %rdx,$len
+ mov %rcx,$bsz
+ jmp .Loop_squeeze
+
+.align 32
+.Loop_squeeze:
+ cmp \$8,$len
+ jb .Ltail_squeeze
+
+ mov (%r8),%rax
+ lea 8(%r8),%r8
+ mov %rax,($out)
+ lea 8($out),$out
+ sub \$8,$len # len -= 8
+ jz .Ldone_squeeze
+
+ sub \$1,%rcx # bsz--
+ jnz .Loop_squeeze
+
+ call KeccakF1600
+ mov $A_flat,%r8
+ mov $bsz,%rcx
+ jmp .Loop_squeeze
+
+.Ltail_squeeze:
+ mov %r8, %rsi
+ mov $out,%rdi
+ mov $len,%rcx
+ .byte 0xf3,0xa4 # rep movsb
+
+.Ldone_squeeze:
+ pop %r14
+.cfi_pop %r14
+ pop %r13
+.cfi_pop %r13
+ pop %r12
+.cfi_pop %r13
+ ret
+.cfi_endproc
+.size SHA3_squeeze,.-SHA3_squeeze
+___
+}
+$code.=<<___;
+.align 256
+ .quad 0,0,0,0,0,0,0,0
+.type iotas,\@object
+iotas:
+ .quad 0x0000000000000001
+ .quad 0x0000000000008082
+ .quad 0x800000000000808a
+ .quad 0x8000000080008000
+ .quad 0x000000000000808b
+ .quad 0x0000000080000001
+ .quad 0x8000000080008081
+ .quad 0x8000000000008009
+ .quad 0x000000000000008a
+ .quad 0x0000000000000088
+ .quad 0x0000000080008009
+ .quad 0x000000008000000a
+ .quad 0x000000008000808b
+ .quad 0x800000000000008b
+ .quad 0x8000000000008089
+ .quad 0x8000000000008003
+ .quad 0x8000000000008002
+ .quad 0x8000000000000080
+ .quad 0x000000000000800a
+ .quad 0x800000008000000a
+ .quad 0x8000000080008081
+ .quad 0x8000000000008080
+ .quad 0x0000000080000001
+ .quad 0x8000000080008008
+.size iotas,.-iotas
+.asciz "Keccak-1600 absorb and squeeze for x86_64, CRYPTOGAMS by <appro\@openssl.org>"
+___
+
+foreach (split("\n",$code)) {
+ # Below replacement results in 11.3 on Sandy Bridge, 9.4 on
+ # Haswell, but it hurts other processors by up to 2-3-4x...
+ #s/rol\s+(\$[0-9]+),(%[a-z][a-z0-9]+)/shld\t$1,$2,$2/;
+
+ print $_, "\n";
+}
+
+close STDOUT;
More information about the openssl-commits
mailing list