[openssl-commits] [openssl] master update

Andy Polyakov appro at openssl.org
Wed Jun 21 14:27:35 UTC 2017


The branch master has been updated
       via  b5cdec2feac6049418543216ac5da70395697839 (commit)
       via  53ddf7dd05db4efc3080a6f52ee8d0857a957358 (commit)
      from  9924087573cfbc8d2bc97088f36d1a81ca00cda3 (commit)


- Log -----------------------------------------------------------------
commit b5cdec2feac6049418543216ac5da70395697839
Author: Andy Polyakov <appro at openssl.org>
Date:   Sun Jun 18 14:58:52 2017 +0200

    sha/asm/sha512p8-ppc.pl: add POWER8 performance data.
    
    [skip ci]
    
    Reviewed-by: Bernd Edlinger <bernd.edlinger at hotmail.de>
    Reviewed-by: Rich Salz <rsalz at openssl.org>
    (Merged from https://github.com/openssl/openssl/pull/3705)

commit 53ddf7dd05db4efc3080a6f52ee8d0857a957358
Author: Andy Polyakov <appro at openssl.org>
Date:   Sat Jun 17 13:46:29 2017 +0200

    Add Keccak-1600 modules for PPC64 and POWER8.
    
    [skip ci]
    
    Reviewed-by: Bernd Edlinger <bernd.edlinger at hotmail.de>
    Reviewed-by: Rich Salz <rsalz at openssl.org>
    (Merged from https://github.com/openssl/openssl/pull/3705)

-----------------------------------------------------------------------

Summary of changes:
 crypto/sha/asm/keccak1600-ppc64.pl | 757 +++++++++++++++++++++++++++++++++
 crypto/sha/asm/keccak1600p8-ppc.pl | 850 +++++++++++++++++++++++++++++++++++++
 crypto/sha/asm/sha512p8-ppc.pl     |   9 +
 3 files changed, 1616 insertions(+)
 create mode 100755 crypto/sha/asm/keccak1600-ppc64.pl
 create mode 100755 crypto/sha/asm/keccak1600p8-ppc.pl

diff --git a/crypto/sha/asm/keccak1600-ppc64.pl b/crypto/sha/asm/keccak1600-ppc64.pl
new file mode 100755
index 0000000..f89f71c
--- /dev/null
+++ b/crypto/sha/asm/keccak1600-ppc64.pl
@@ -0,0 +1,757 @@
+#!/usr/bin/env perl
+# Copyright 2017 The OpenSSL Project Authors. All Rights Reserved.
+#
+# Licensed under the OpenSSL license (the "License").  You may not use
+# this file except in compliance with the License.  You can obtain a copy
+# in the file LICENSE in the source distribution or at
+# https://www.openssl.org/source/license.html
+#
+# ====================================================================
+# Written by Andy Polyakov <appro at openssl.org> for the OpenSSL
+# project. The module is, however, dual licensed under OpenSSL and
+# CRYPTOGAMS licenses depending on where you obtain it. For further
+# details see http://www.openssl.org/~appro/cryptogams/.
+# ====================================================================
+#
+# Keccak-1600 for PPC64.
+#
+# June 2017.
+#
+# This is straightforward KECCAK_1X_ALT implementation that works on
+# *any* PPC64. Then PowerISA 2.07 adds 2x64-bit vector rotate, and
+# it's possible to achieve performance better than below, but that is
+# naturally option only for POWER8 and successors...
+#
+######################################################################
+# Numbers are cycles per processed byte.
+#
+#		r=1088(*)
+#
+# PPC970/G5	14.6/+120%
+# POWER7	10.3/+100%
+# POWER8	11.5/+85%
+#
+# (*)	Corresponds to SHA3-256. Percentage after slash is improvement
+#	over gcc-4.x-generated KECCAK_1X_ALT code. Newer compilers do
+#	much better (but watch out for them generating code specific
+#	to processor they execute on).
+
+$flavour = shift;
+
+if ($flavour =~ /64/) {
+	$SIZE_T	=8;
+	$LRSAVE	=2*$SIZE_T;
+	$UCMP	="cmpld";
+	$STU	="stdu";
+	$POP	="ld";
+	$PUSH	="std";
+} else { die "nonsense $flavour"; }
+
+$0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
+( $xlate="${dir}ppc-xlate.pl" and -f $xlate ) or
+( $xlate="${dir}../../perlasm/ppc-xlate.pl" and -f $xlate) or
+die "can't locate ppc-xlate.pl";
+
+open STDOUT,"| $^X $xlate $flavour ".shift || die "can't call $xlate: $!";
+
+$FRAME=24*$SIZE_T+6*$SIZE_T+32;
+$LOCALS=6*$SIZE_T;
+$TEMP=$LOCALS+6*$SIZE_T;
+
+my $sp ="r1";
+
+my @A = map([ "r$_", "r".($_+1), "r".($_+2), "r".($_+3), "r".($_+4) ],
+            (7, 12, 17, 22, 27));
+   $A[1][1] = "r6"; # r13 is reserved
+
+my @C = map("r$_", (0,3,4,5));
+
+my @rhotates = ([  0,  1, 62, 28, 27 ],
+                [ 36, 44,  6, 55, 20 ],
+                [  3, 10, 43, 25, 39 ],
+                [ 41, 45, 15, 21,  8 ],
+                [ 18,  2, 61, 56, 14 ]);
+
+$code.=<<___;
+.text
+
+.type	KeccakF1600_int,\@function
+.align	5
+KeccakF1600_int:
+	li	r0,24
+	mtctr	r0
+	b	.Loop
+.align	4
+.Loop:
+	xor	$C[0],$A[0][0],$A[1][0]		; Theta
+	std	$A[0][4],`$TEMP+0`($sp)
+	xor	$C[1],$A[0][1],$A[1][1]
+	std	$A[1][4],`$TEMP+8`($sp)
+	xor	$C[2],$A[0][2],$A[1][2]
+	std	$A[2][4],`$TEMP+16`($sp)
+	xor	$C[3],$A[0][3],$A[1][3]
+	std	$A[3][4],`$TEMP+24`($sp)
+___
+	$C[4]=$A[0][4];
+	$C[5]=$A[1][4];
+	$C[6]=$A[2][4];
+	$C[7]=$A[3][4];
+$code.=<<___;
+	xor	$C[4],$A[0][4],$A[1][4]
+	xor	$C[0],$C[0],$A[2][0]
+	xor	$C[1],$C[1],$A[2][1]
+	xor	$C[2],$C[2],$A[2][2]
+	xor	$C[3],$C[3],$A[2][3]
+	xor	$C[4],$C[4],$A[2][4]
+	xor	$C[0],$C[0],$A[3][0]
+	xor	$C[1],$C[1],$A[3][1]
+	xor	$C[2],$C[2],$A[3][2]
+	xor	$C[3],$C[3],$A[3][3]
+	xor	$C[4],$C[4],$A[3][4]
+	xor	$C[0],$C[0],$A[4][0]
+	xor	$C[2],$C[2],$A[4][2]
+	xor	$C[1],$C[1],$A[4][1]
+	xor	$C[3],$C[3],$A[4][3]
+	rotldi	$C[5],$C[2],1
+	xor	$C[4],$C[4],$A[4][4]
+	rotldi	$C[6],$C[3],1
+	xor	$C[5],$C[5],$C[0]
+	rotldi	$C[7],$C[4],1
+
+	xor	$A[0][1],$A[0][1],$C[5]
+	xor	$A[1][1],$A[1][1],$C[5]
+	xor	$A[2][1],$A[2][1],$C[5]
+	xor	$A[3][1],$A[3][1],$C[5]
+	xor	$A[4][1],$A[4][1],$C[5]
+
+	rotldi	$C[5],$C[0],1
+	xor	$C[6],$C[6],$C[1]
+	xor	$C[2],$C[2],$C[7]
+	rotldi	$C[7],$C[1],1
+	xor	$C[3],$C[3],$C[5]
+	xor	$C[4],$C[4],$C[7]
+
+	xor	$C[1],   $A[0][2],$C[6]			;mr	$C[1],$A[0][2]
+	xor	$A[1][2],$A[1][2],$C[6]
+	xor	$A[2][2],$A[2][2],$C[6]
+	xor	$A[3][2],$A[3][2],$C[6]
+	xor	$A[4][2],$A[4][2],$C[6]
+
+	xor	$A[0][0],$A[0][0],$C[4]
+	xor	$A[1][0],$A[1][0],$C[4]
+	xor	$A[2][0],$A[2][0],$C[4]
+	xor	$A[3][0],$A[3][0],$C[4]
+	xor	$A[4][0],$A[4][0],$C[4]
+___
+	$C[4]=undef;
+	$C[5]=undef;
+	$C[6]=undef;
+	$C[7]=undef;
+$code.=<<___;
+	ld	$A[0][4],`$TEMP+0`($sp)
+	xor	$C[0],   $A[0][3],$C[2]			;mr	$C[0],$A[0][3]
+	ld	$A[1][4],`$TEMP+8`($sp)
+	xor	$A[1][3],$A[1][3],$C[2]
+	ld	$A[2][4],`$TEMP+16`($sp)
+	xor	$A[2][3],$A[2][3],$C[2]
+	ld	$A[3][4],`$TEMP+24`($sp)
+	xor	$A[3][3],$A[3][3],$C[2]
+	xor	$A[4][3],$A[4][3],$C[2]
+
+	xor	$C[2],   $A[0][4],$C[3]			;mr	$C[2],$A[0][4]
+	xor	$A[1][4],$A[1][4],$C[3]
+	xor	$A[2][4],$A[2][4],$C[3]
+	xor	$A[3][4],$A[3][4],$C[3]
+	xor	$A[4][4],$A[4][4],$C[3]
+
+	mr	$C[3],$A[0][1]				; Rho+Pi
+	rotldi	$A[0][1],$A[1][1],$rhotates[1][1]
+	;mr	$C[1],$A[0][2]
+	rotldi	$A[0][2],$A[2][2],$rhotates[2][2]
+	;mr	$C[0],$A[0][3]
+	rotldi	$A[0][3],$A[3][3],$rhotates[3][3]
+	;mr	$C[2],$A[0][4]
+	rotldi	$A[0][4],$A[4][4],$rhotates[4][4]
+
+	rotldi	$A[1][1],$A[1][4],$rhotates[1][4]
+	rotldi	$A[2][2],$A[2][3],$rhotates[2][3]
+	rotldi	$A[3][3],$A[3][2],$rhotates[3][2]
+	rotldi	$A[4][4],$A[4][1],$rhotates[4][1]
+
+	rotldi	$A[1][4],$A[4][2],$rhotates[4][2]
+	rotldi	$A[2][3],$A[3][4],$rhotates[3][4]
+	rotldi	$A[3][2],$A[2][1],$rhotates[2][1]
+	rotldi	$A[4][1],$A[1][3],$rhotates[1][3]
+
+	rotldi	$A[4][2],$A[2][4],$rhotates[2][4]
+	rotldi	$A[3][4],$A[4][3],$rhotates[4][3]
+	rotldi	$A[2][1],$A[1][2],$rhotates[1][2]
+	rotldi	$A[1][3],$A[3][1],$rhotates[3][1]
+
+	rotldi	$A[2][4],$A[4][0],$rhotates[4][0]
+	rotldi	$A[4][3],$A[3][0],$rhotates[3][0]
+	rotldi	$A[1][2],$A[2][0],$rhotates[2][0]
+	rotldi	$A[3][1],$A[1][0],$rhotates[1][0]
+
+	rotldi	$A[1][0],$C[0],$rhotates[0][3]
+	rotldi	$A[2][0],$C[3],$rhotates[0][1]
+	rotldi	$A[3][0],$C[2],$rhotates[0][4]
+	rotldi	$A[4][0],$C[1],$rhotates[0][2]
+
+	andc	$C[0],$A[0][2],$A[0][1]			; Chi+Iota
+	andc	$C[1],$A[0][3],$A[0][2]
+	andc	$C[2],$A[0][0],$A[0][4]
+	andc	$C[3],$A[0][1],$A[0][0]
+	xor	$A[0][0],$A[0][0],$C[0]
+	andc	$C[0],$A[0][4],$A[0][3]
+	xor	$A[0][1],$A[0][1],$C[1]
+	 ld	$C[1],`$LOCALS+4*$SIZE_T`($sp)
+	xor	$A[0][3],$A[0][3],$C[2]
+	xor	$A[0][4],$A[0][4],$C[3]
+	xor	$A[0][2],$A[0][2],$C[0]
+	 ldu	$C[3],8($C[1])				; Iota[i++]
+
+	andc	$C[0],$A[1][2],$A[1][1]
+	 std	$C[1],`$LOCALS+4*$SIZE_T`($sp)
+	andc	$C[1],$A[1][3],$A[1][2]
+	andc	$C[2],$A[1][0],$A[1][4]
+	 xor	$A[0][0],$A[0][0],$C[3]			; A[0][0] ^= Iota
+	andc	$C[3],$A[1][1],$A[1][0]
+	xor	$A[1][0],$A[1][0],$C[0]
+	andc	$C[0],$A[1][4],$A[1][3]
+	xor	$A[1][1],$A[1][1],$C[1]
+	xor	$A[1][3],$A[1][3],$C[2]
+	xor	$A[1][4],$A[1][4],$C[3]
+	xor	$A[1][2],$A[1][2],$C[0]
+
+	andc	$C[0],$A[2][2],$A[2][1]
+	andc	$C[1],$A[2][3],$A[2][2]
+	andc	$C[2],$A[2][0],$A[2][4]
+	andc	$C[3],$A[2][1],$A[2][0]
+	xor	$A[2][0],$A[2][0],$C[0]
+	andc	$C[0],$A[2][4],$A[2][3]
+	xor	$A[2][1],$A[2][1],$C[1]
+	xor	$A[2][3],$A[2][3],$C[2]
+	xor	$A[2][4],$A[2][4],$C[3]
+	xor	$A[2][2],$A[2][2],$C[0]
+
+	andc	$C[0],$A[3][2],$A[3][1]
+	andc	$C[1],$A[3][3],$A[3][2]
+	andc	$C[2],$A[3][0],$A[3][4]
+	andc	$C[3],$A[3][1],$A[3][0]
+	xor	$A[3][0],$A[3][0],$C[0]
+	andc	$C[0],$A[3][4],$A[3][3]
+	xor	$A[3][1],$A[3][1],$C[1]
+	xor	$A[3][3],$A[3][3],$C[2]
+	xor	$A[3][4],$A[3][4],$C[3]
+	xor	$A[3][2],$A[3][2],$C[0]
+
+	andc	$C[0],$A[4][2],$A[4][1]
+	andc	$C[1],$A[4][3],$A[4][2]
+	andc	$C[2],$A[4][0],$A[4][4]
+	andc	$C[3],$A[4][1],$A[4][0]
+	xor	$A[4][0],$A[4][0],$C[0]
+	andc	$C[0],$A[4][4],$A[4][3]
+	xor	$A[4][1],$A[4][1],$C[1]
+	xor	$A[4][3],$A[4][3],$C[2]
+	xor	$A[4][4],$A[4][4],$C[3]
+	xor	$A[4][2],$A[4][2],$C[0]
+
+	bdnz	.Loop
+
+	blr
+	.long	0
+	.byte	0,12,0x14,0,0,0,0,0
+.size	KeccakF1600_int,.-KeccakF1600_int
+
+.type	KeccakF1600,\@function
+.align	5
+KeccakF1600:
+	$STU	$sp,-$FRAME($sp)
+	mflr	r0
+	$PUSH	r14,`$FRAME-$SIZE_T*18`($sp)
+	$PUSH	r15,`$FRAME-$SIZE_T*17`($sp)
+	$PUSH	r16,`$FRAME-$SIZE_T*16`($sp)
+	$PUSH	r17,`$FRAME-$SIZE_T*15`($sp)
+	$PUSH	r18,`$FRAME-$SIZE_T*14`($sp)
+	$PUSH	r19,`$FRAME-$SIZE_T*13`($sp)
+	$PUSH	r20,`$FRAME-$SIZE_T*12`($sp)
+	$PUSH	r21,`$FRAME-$SIZE_T*11`($sp)
+	$PUSH	r22,`$FRAME-$SIZE_T*10`($sp)
+	$PUSH	r23,`$FRAME-$SIZE_T*9`($sp)
+	$PUSH	r24,`$FRAME-$SIZE_T*8`($sp)
+	$PUSH	r25,`$FRAME-$SIZE_T*7`($sp)
+	$PUSH	r26,`$FRAME-$SIZE_T*6`($sp)
+	$PUSH	r27,`$FRAME-$SIZE_T*5`($sp)
+	$PUSH	r28,`$FRAME-$SIZE_T*4`($sp)
+	$PUSH	r29,`$FRAME-$SIZE_T*3`($sp)
+	$PUSH	r30,`$FRAME-$SIZE_T*2`($sp)
+	$PUSH	r31,`$FRAME-$SIZE_T*1`($sp)
+	$PUSH	r0,`$FRAME+$LRSAVE`($sp)
+
+	bl	PICmeup
+	subi	r12,r12,8			; prepare for ldu
+
+	$PUSH	r3,`$LOCALS+0*$SIZE_T`($sp)
+	;$PUSH	r4,`$LOCALS+1*$SIZE_T`($sp)
+	;$PUSH	r5,`$LOCALS+2*$SIZE_T`($sp)
+	;$PUSH	r6,`$LOCALS+3*$SIZE_T`($sp)
+	$PUSH	r12,`$LOCALS+4*$SIZE_T`($sp)
+
+	ld	$A[0][0],`8*0`(r3)		; load A[5][5]
+	ld	$A[0][1],`8*1`(r3)
+	ld	$A[0][2],`8*2`(r3)
+	ld	$A[0][3],`8*3`(r3)
+	ld	$A[0][4],`8*4`(r3)
+	ld	$A[1][0],`8*5`(r3)
+	ld	$A[1][1],`8*6`(r3)
+	ld	$A[1][2],`8*7`(r3)
+	ld	$A[1][3],`8*8`(r3)
+	ld	$A[1][4],`8*9`(r3)
+	ld	$A[2][0],`8*10`(r3)
+	ld	$A[2][1],`8*11`(r3)
+	ld	$A[2][2],`8*12`(r3)
+	ld	$A[2][3],`8*13`(r3)
+	ld	$A[2][4],`8*14`(r3)
+	ld	$A[3][0],`8*15`(r3)
+	ld	$A[3][1],`8*16`(r3)
+	ld	$A[3][2],`8*17`(r3)
+	ld	$A[3][3],`8*18`(r3)
+	ld	$A[3][4],`8*19`(r3)
+	ld	$A[4][0],`8*20`(r3)
+	ld	$A[4][1],`8*21`(r3)
+	ld	$A[4][2],`8*22`(r3)
+	ld	$A[4][3],`8*23`(r3)
+	ld	$A[4][4],`8*24`(r3)
+
+	bl	KeccakF1600_int
+
+	$POP	r3,`$LOCALS+0*$SIZE_T`($sp)
+	std	$A[0][0],`8*0`(r3)		; return A[5][5]
+	std	$A[0][1],`8*1`(r3)
+	std	$A[0][2],`8*2`(r3)
+	std	$A[0][3],`8*3`(r3)
+	std	$A[0][4],`8*4`(r3)
+	std	$A[1][0],`8*5`(r3)
+	std	$A[1][1],`8*6`(r3)
+	std	$A[1][2],`8*7`(r3)
+	std	$A[1][3],`8*8`(r3)
+	std	$A[1][4],`8*9`(r3)
+	std	$A[2][0],`8*10`(r3)
+	std	$A[2][1],`8*11`(r3)
+	std	$A[2][2],`8*12`(r3)
+	std	$A[2][3],`8*13`(r3)
+	std	$A[2][4],`8*14`(r3)
+	std	$A[3][0],`8*15`(r3)
+	std	$A[3][1],`8*16`(r3)
+	std	$A[3][2],`8*17`(r3)
+	std	$A[3][3],`8*18`(r3)
+	std	$A[3][4],`8*19`(r3)
+	std	$A[4][0],`8*20`(r3)
+	std	$A[4][1],`8*21`(r3)
+	std	$A[4][2],`8*22`(r3)
+	std	$A[4][3],`8*23`(r3)
+	std	$A[4][4],`8*24`(r3)
+
+	$POP	r0,`$FRAME+$LRSAVE`($sp)
+	$POP	r14,`$FRAME-$SIZE_T*18`($sp)
+	$POP	r15,`$FRAME-$SIZE_T*17`($sp)
+	$POP	r16,`$FRAME-$SIZE_T*16`($sp)
+	$POP	r17,`$FRAME-$SIZE_T*15`($sp)
+	$POP	r18,`$FRAME-$SIZE_T*14`($sp)
+	$POP	r19,`$FRAME-$SIZE_T*13`($sp)
+	$POP	r20,`$FRAME-$SIZE_T*12`($sp)
+	$POP	r21,`$FRAME-$SIZE_T*11`($sp)
+	$POP	r22,`$FRAME-$SIZE_T*10`($sp)
+	$POP	r23,`$FRAME-$SIZE_T*9`($sp)
+	$POP	r24,`$FRAME-$SIZE_T*8`($sp)
+	$POP	r25,`$FRAME-$SIZE_T*7`($sp)
+	$POP	r26,`$FRAME-$SIZE_T*6`($sp)
+	$POP	r27,`$FRAME-$SIZE_T*5`($sp)
+	$POP	r28,`$FRAME-$SIZE_T*4`($sp)
+	$POP	r29,`$FRAME-$SIZE_T*3`($sp)
+	$POP	r30,`$FRAME-$SIZE_T*2`($sp)
+	$POP	r31,`$FRAME-$SIZE_T*1`($sp)
+	mtlr	r0
+	addi	$sp,$sp,$FRAME
+	blr
+	.long	0
+	.byte	0,12,4,1,0x80,18,1,0
+	.long	0
+.size	KeccakF1600,.-KeccakF1600
+
+.type	dword_le_load,\@function
+.align	5
+dword_le_load:
+	lbzu	r0,1(r3)
+	lbzu	r4,1(r3)
+	lbzu	r5,1(r3)
+	insrdi	r0,r4,8,48
+	lbzu	r4,1(r3)
+	insrdi	r0,r5,8,40
+	lbzu	r5,1(r3)
+	insrdi	r0,r4,8,32
+	lbzu	r4,1(r3)
+	insrdi	r0,r5,8,24
+	lbzu	r5,1(r3)
+	insrdi	r0,r4,8,16
+	lbzu	r4,1(r3)
+	insrdi	r0,r5,8,8
+	insrdi	r0,r4,8,0
+	blr
+	.long	0
+	.byte	0,12,0x14,0,0,0,1,0
+	.long	0
+.size	dword_le_load,.-dword_le_load
+
+.globl	SHA3_absorb
+.type	SHA3_absorb,\@function
+.align	5
+SHA3_absorb:
+	$STU	$sp,-$FRAME($sp)
+	mflr	r0
+	$PUSH	r14,`$FRAME-$SIZE_T*18`($sp)
+	$PUSH	r15,`$FRAME-$SIZE_T*17`($sp)
+	$PUSH	r16,`$FRAME-$SIZE_T*16`($sp)
+	$PUSH	r17,`$FRAME-$SIZE_T*15`($sp)
+	$PUSH	r18,`$FRAME-$SIZE_T*14`($sp)
+	$PUSH	r19,`$FRAME-$SIZE_T*13`($sp)
+	$PUSH	r20,`$FRAME-$SIZE_T*12`($sp)
+	$PUSH	r21,`$FRAME-$SIZE_T*11`($sp)
+	$PUSH	r22,`$FRAME-$SIZE_T*10`($sp)
+	$PUSH	r23,`$FRAME-$SIZE_T*9`($sp)
+	$PUSH	r24,`$FRAME-$SIZE_T*8`($sp)
+	$PUSH	r25,`$FRAME-$SIZE_T*7`($sp)
+	$PUSH	r26,`$FRAME-$SIZE_T*6`($sp)
+	$PUSH	r27,`$FRAME-$SIZE_T*5`($sp)
+	$PUSH	r28,`$FRAME-$SIZE_T*4`($sp)
+	$PUSH	r29,`$FRAME-$SIZE_T*3`($sp)
+	$PUSH	r30,`$FRAME-$SIZE_T*2`($sp)
+	$PUSH	r31,`$FRAME-$SIZE_T*1`($sp)
+	$PUSH	r0,`$FRAME+$LRSAVE`($sp)
+
+	bl	PICmeup
+	subi	r4,r4,1				; prepare for lbzu
+	subi	r12,r12,8			; prepare for ldu
+
+	$PUSH	r3,`$LOCALS+0*$SIZE_T`($sp)	; save A[][]
+	$PUSH	r4,`$LOCALS+1*$SIZE_T`($sp)	; save inp
+	$PUSH	r5,`$LOCALS+2*$SIZE_T`($sp)	; save len
+	$PUSH	r6,`$LOCALS+3*$SIZE_T`($sp)	; save bsz
+	mr	r0,r6
+	$PUSH	r12,`$LOCALS+4*$SIZE_T`($sp)
+
+	ld	$A[0][0],`8*0`(r3)		; load A[5][5]
+	ld	$A[0][1],`8*1`(r3)
+	ld	$A[0][2],`8*2`(r3)
+	ld	$A[0][3],`8*3`(r3)
+	ld	$A[0][4],`8*4`(r3)
+	ld	$A[1][0],`8*5`(r3)
+	ld	$A[1][1],`8*6`(r3)
+	ld	$A[1][2],`8*7`(r3)
+	ld	$A[1][3],`8*8`(r3)
+	ld	$A[1][4],`8*9`(r3)
+	ld	$A[2][0],`8*10`(r3)
+	ld	$A[2][1],`8*11`(r3)
+	ld	$A[2][2],`8*12`(r3)
+	ld	$A[2][3],`8*13`(r3)
+	ld	$A[2][4],`8*14`(r3)
+	ld	$A[3][0],`8*15`(r3)
+	ld	$A[3][1],`8*16`(r3)
+	ld	$A[3][2],`8*17`(r3)
+	ld	$A[3][3],`8*18`(r3)
+	ld	$A[3][4],`8*19`(r3)
+	ld	$A[4][0],`8*20`(r3)
+	ld	$A[4][1],`8*21`(r3)
+	ld	$A[4][2],`8*22`(r3)
+	ld	$A[4][3],`8*23`(r3)
+	ld	$A[4][4],`8*24`(r3)
+
+	mr	r3,r4
+	mr	r4,r5
+	mr	r5,r0
+
+	b	.Loop_absorb
+
+.align	4
+.Loop_absorb:
+	$UCMP	r4,r5				; len < bsz?
+	blt	.Labsorbed
+
+	sub	r4,r4,r5			; len -= bsz
+	srwi	r5,r5,3
+	$PUSH	r4,`$LOCALS+2*$SIZE_T`($sp)	; save len
+	mtctr	r5
+	bl	dword_le_load			; *inp++
+	xor	$A[0][0],$A[0][0],r0
+	bdz	.Lprocess_block
+	bl	dword_le_load			; *inp++
+	xor	$A[0][1],$A[0][1],r0
+	bdz	.Lprocess_block
+	bl	dword_le_load			; *inp++
+	xor	$A[0][2],$A[0][2],r0
+	bdz	.Lprocess_block
+	bl	dword_le_load			; *inp++
+	xor	$A[0][3],$A[0][3],r0
+	bdz	.Lprocess_block
+	bl	dword_le_load			; *inp++
+	xor	$A[0][4],$A[0][4],r0
+	bdz	.Lprocess_block
+	bl	dword_le_load			; *inp++
+	xor	$A[1][0],$A[1][0],r0
+	bdz	.Lprocess_block
+	bl	dword_le_load			; *inp++
+	xor	$A[1][1],$A[1][1],r0
+	bdz	.Lprocess_block
+	bl	dword_le_load			; *inp++
+	xor	$A[1][2],$A[1][2],r0
+	bdz	.Lprocess_block
+	bl	dword_le_load			; *inp++
+	xor	$A[1][3],$A[1][3],r0
+	bdz	.Lprocess_block
+	bl	dword_le_load			; *inp++
+	xor	$A[1][4],$A[1][4],r0
+	bdz	.Lprocess_block
+	bl	dword_le_load			; *inp++
+	xor	$A[2][0],$A[2][0],r0
+	bdz	.Lprocess_block
+	bl	dword_le_load			; *inp++
+	xor	$A[2][1],$A[2][1],r0
+	bdz	.Lprocess_block
+	bl	dword_le_load			; *inp++
+	xor	$A[2][2],$A[2][2],r0
+	bdz	.Lprocess_block
+	bl	dword_le_load			; *inp++
+	xor	$A[2][3],$A[2][3],r0
+	bdz	.Lprocess_block
+	bl	dword_le_load			; *inp++
+	xor	$A[2][4],$A[2][4],r0
+	bdz	.Lprocess_block
+	bl	dword_le_load			; *inp++
+	xor	$A[3][0],$A[3][0],r0
+	bdz	.Lprocess_block
+	bl	dword_le_load			; *inp++
+	xor	$A[3][1],$A[3][1],r0
+	bdz	.Lprocess_block
+	bl	dword_le_load			; *inp++
+	xor	$A[3][2],$A[3][2],r0
+	bdz	.Lprocess_block
+	bl	dword_le_load			; *inp++
+	xor	$A[3][3],$A[3][3],r0
+	bdz	.Lprocess_block
+	bl	dword_le_load			; *inp++
+	xor	$A[3][4],$A[3][4],r0
+	bdz	.Lprocess_block
+	bl	dword_le_load			; *inp++
+	xor	$A[4][0],$A[4][0],r0
+	bdz	.Lprocess_block
+	bl	dword_le_load			; *inp++
+	xor	$A[4][1],$A[4][1],r0
+	bdz	.Lprocess_block
+	bl	dword_le_load			; *inp++
+	xor	$A[4][2],$A[4][2],r0
+	bdz	.Lprocess_block
+	bl	dword_le_load			; *inp++
+	xor	$A[4][3],$A[4][3],r0
+	bdz	.Lprocess_block
+	bl	dword_le_load			; *inp++
+	xor	$A[4][4],$A[4][4],r0
+
+.Lprocess_block:
+	$PUSH	r3,`$LOCALS+1*$SIZE_T`($sp)	; save inp
+
+	bl	KeccakF1600_int
+
+	$POP	r0,`$LOCALS+4*$SIZE_T`($sp)	; pull iotas[24]
+	$POP	r5,`$LOCALS+3*$SIZE_T`($sp)	; restore bsz
+	$POP	r4,`$LOCALS+2*$SIZE_T`($sp)	; restore len
+	$POP	r3,`$LOCALS+1*$SIZE_T`($sp)	; restore inp
+	addic	r0,r0,`-8*24`			; rewind iotas
+	$PUSH	r0,`$LOCALS+4*$SIZE_T`($sp)
+
+	b	.Loop_absorb
+
+.align	4
+.Labsorbed:
+	$POP	r3,`$LOCALS+0*$SIZE_T`($sp)
+	std	$A[0][0],`8*0`(r3)		; return A[5][5]
+	std	$A[0][1],`8*1`(r3)
+	std	$A[0][2],`8*2`(r3)
+	std	$A[0][3],`8*3`(r3)
+	std	$A[0][4],`8*4`(r3)
+	std	$A[1][0],`8*5`(r3)
+	std	$A[1][1],`8*6`(r3)
+	std	$A[1][2],`8*7`(r3)
+	std	$A[1][3],`8*8`(r3)
+	std	$A[1][4],`8*9`(r3)
+	std	$A[2][0],`8*10`(r3)
+	std	$A[2][1],`8*11`(r3)
+	std	$A[2][2],`8*12`(r3)
+	std	$A[2][3],`8*13`(r3)
+	std	$A[2][4],`8*14`(r3)
+	std	$A[3][0],`8*15`(r3)
+	std	$A[3][1],`8*16`(r3)
+	std	$A[3][2],`8*17`(r3)
+	std	$A[3][3],`8*18`(r3)
+	std	$A[3][4],`8*19`(r3)
+	std	$A[4][0],`8*20`(r3)
+	std	$A[4][1],`8*21`(r3)
+	std	$A[4][2],`8*22`(r3)
+	std	$A[4][3],`8*23`(r3)
+	std	$A[4][4],`8*24`(r3)
+
+	mr	r3,r4				; return value
+	$POP	r0,`$FRAME+$LRSAVE`($sp)
+	$POP	r14,`$FRAME-$SIZE_T*18`($sp)
+	$POP	r15,`$FRAME-$SIZE_T*17`($sp)
+	$POP	r16,`$FRAME-$SIZE_T*16`($sp)
+	$POP	r17,`$FRAME-$SIZE_T*15`($sp)
+	$POP	r18,`$FRAME-$SIZE_T*14`($sp)
+	$POP	r19,`$FRAME-$SIZE_T*13`($sp)
+	$POP	r20,`$FRAME-$SIZE_T*12`($sp)
+	$POP	r21,`$FRAME-$SIZE_T*11`($sp)
+	$POP	r22,`$FRAME-$SIZE_T*10`($sp)
+	$POP	r23,`$FRAME-$SIZE_T*9`($sp)
+	$POP	r24,`$FRAME-$SIZE_T*8`($sp)
+	$POP	r25,`$FRAME-$SIZE_T*7`($sp)
+	$POP	r26,`$FRAME-$SIZE_T*6`($sp)
+	$POP	r27,`$FRAME-$SIZE_T*5`($sp)
+	$POP	r28,`$FRAME-$SIZE_T*4`($sp)
+	$POP	r29,`$FRAME-$SIZE_T*3`($sp)
+	$POP	r30,`$FRAME-$SIZE_T*2`($sp)
+	$POP	r31,`$FRAME-$SIZE_T*1`($sp)
+	mtlr	r0
+	addi	$sp,$sp,$FRAME
+	blr
+	.long	0
+	.byte	0,12,4,1,0x80,18,4,0
+	.long	0
+.size	SHA3_absorb,.-SHA3_absorb
+___
+{
+my ($A_flat,$out,$len,$bsz) = map("r$_",(28..31));
+$code.=<<___;
+.globl	SHA3_squeeze
+.type	SHA3_squeeze,\@function
+.align	5
+SHA3_squeeze:
+	$STU	$sp,`-10*$SIZE_T`($sp)
+	mflr	r0
+	$PUSH	r28,`6*$SIZE_T`($sp)
+	$PUSH	r29,`7*$SIZE_T`($sp)
+	$PUSH	r30,`8*$SIZE_T`($sp)
+	$PUSH	r31,`9*$SIZE_T`($sp)
+	$PUSH	r0,`10*$SIZE_T+$LRSAVE`($sp)
+
+	mr	$A_flat,r3
+	subi	r3,r3,8			; prepare for ldu
+	subi	$out,r4,1		; prepare for stbu
+	mr	$len,r5
+	mr	$bsz,r6
+	b	.Loop_squeeze
+
+.align	4
+.Loop_squeeze:
+	ldu	r0,8(r3)
+	${UCMP}i $len,8
+	blt	.Lsqueeze_tail
+
+	stbu	r0,1($out)
+	srdi	r0,r0,8
+	stbu	r0,1($out)
+	srdi	r0,r0,8
+	stbu	r0,1($out)
+	srdi	r0,r0,8
+	stbu	r0,1($out)
+	srdi	r0,r0,8
+	stbu	r0,1($out)
+	srdi	r0,r0,8
+	stbu	r0,1($out)
+	srdi	r0,r0,8
+	stbu	r0,1($out)
+	srdi	r0,r0,8
+	stbu	r0,1($out)
+
+	subic.	$len,$len,8
+	beq	.Lsqueeze_done
+
+	subic.	r6,r6,8
+	bgt	.Loop_squeeze
+
+	mr	r3,$A_flat
+	bl	KeccakF1600
+	subi	r3,$A_flat,8		; prepare for ldu
+	mr	r6,$bsz
+	b	.Loop_squeeze
+
+.align	4
+.Lsqueeze_tail:
+	mtctr	$len
+.Loop_tail:
+	stbu	r0,1($out)
+	srdi	r0,r0,8
+	bdnz	.Loop_tail
+
+.Lsqueeze_done:
+	$POP	r0,`10*$SIZE_T+$LRSAVE`($sp)
+	$POP	r28,`6*$SIZE_T`($sp)
+	$POP	r29,`7*$SIZE_T`($sp)
+	$POP	r30,`8*$SIZE_T`($sp)
+	$POP	r31,`9*$SIZE_T`($sp)
+	mtlr	r0
+	addi	$sp,$sp,`10*$SIZE_T`
+	blr
+	.long	0
+	.byte	0,12,4,1,0x80,4,4,0
+	.long	0
+.size	SHA3_squeeze,.-SHA3_squeeze
+___
+}
+
+# Ugly hack here, because PPC assembler syntax seem to vary too
+# much from platforms to platform...
+$code.=<<___;
+.align	6
+PICmeup:
+	mflr	r0
+	bcl	20,31,\$+4
+	mflr	r12   ; vvvvvv "distance" between . and 1st data entry
+	addi	r12,r12,`64-8`
+	mtlr	r0
+	blr
+	.long	0
+	.byte	0,12,0x14,0,0,0,0,0
+	.space	`64-9*4`
+.type	iotas,\@object
+iotas:
+	.quad	0x0000000000000001
+	.quad	0x0000000000008082
+	.quad	0x800000000000808a
+	.quad	0x8000000080008000
+	.quad	0x000000000000808b
+	.quad	0x0000000080000001
+	.quad	0x8000000080008081
+	.quad	0x8000000000008009
+	.quad	0x000000000000008a
+	.quad	0x0000000000000088
+	.quad	0x0000000080008009
+	.quad	0x000000008000000a
+	.quad	0x000000008000808b
+	.quad	0x800000000000008b
+	.quad	0x8000000000008089
+	.quad	0x8000000000008003
+	.quad	0x8000000000008002
+	.quad	0x8000000000000080
+	.quad	0x000000000000800a
+	.quad	0x800000008000000a
+	.quad	0x8000000080008081
+	.quad	0x8000000000008080
+	.quad	0x0000000080000001
+	.quad	0x8000000080008008
+.size	iotas,.-iotas
+.asciz	"Keccak-1600 absorb and squeeze for PPC64, CRYPTOGAMS by <appro\@openssl.org>"
+___
+
+$code =~ s/\`([^\`]*)\`/eval $1/gem;
+print $code;
+close STDOUT;
diff --git a/crypto/sha/asm/keccak1600p8-ppc.pl b/crypto/sha/asm/keccak1600p8-ppc.pl
new file mode 100755
index 0000000..feec688
--- /dev/null
+++ b/crypto/sha/asm/keccak1600p8-ppc.pl
@@ -0,0 +1,850 @@
+#!/usr/bin/env perl
+# Copyright 2017 The OpenSSL Project Authors. All Rights Reserved.
+#
+# Licensed under the OpenSSL license (the "License").  You may not use
+# this file except in compliance with the License.  You can obtain a copy
+# in the file LICENSE in the source distribution or at
+# https://www.openssl.org/source/license.html
+#
+# ====================================================================
+# Written by Andy Polyakov <appro at openssl.org> for the OpenSSL
+# project. The module is, however, dual licensed under OpenSSL and
+# CRYPTOGAMS licenses depending on where you obtain it. For further
+# details see http://www.openssl.org/~appro/cryptogams/.
+# ====================================================================
+#
+# Keccak-1600 for PowerISA 2.07.
+#
+# June 2017.
+#
+# This is straightforward KECCAK_1X_ALT SIMD implementation, but with
+# disjoint Rho and Pi. The module is ABI-bitness- and endian-neutral.
+# POWER8 processor spends 9.8 cycles to process byte out of large
+# buffer for r=1088, which matches SHA3-256. This is 17% better than
+# scalar PPC64 code. It probably should be noted that if POWER8's
+# successor can achieve higher scalar instruction issue rate, then
+# this module will loose...
+
+$flavour = shift;
+
+if ($flavour =~ /64/) {
+	$SIZE_T	=8;
+	$LRSAVE	=2*$SIZE_T;
+	$UCMP	="cmpld";
+	$STU	="stdu";
+	$POP	="ld";
+	$PUSH	="std";
+} elsif ($flavour =~ /32/) {
+	$SIZE_T	=4;
+	$LRSAVE	=$SIZE_T;
+	$STU	="stwu";
+	$POP	="lwz";
+	$PUSH	="stw";
+	$UCMP	="cmplw";
+} else { die "nonsense $flavour"; }
+
+$0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
+( $xlate="${dir}ppc-xlate.pl" and -f $xlate ) or
+( $xlate="${dir}../../perlasm/ppc-xlate.pl" and -f $xlate) or
+die "can't locate ppc-xlate.pl";
+
+open STDOUT,"| $^X $xlate $flavour ".shift || die "can't call $xlate: $!";
+
+$FRAME=6*$SIZE_T+13*16;	# 13*16 is for v20-v31 offload
+
+my $sp ="r1";
+
+my $iotas = "r12";
+
+########################################################################
+# Register layout:
+#
+# v0		A[0][0] A[1][0]
+# v1		A[0][1] A[1][1]
+# v2		A[0][2] A[1][2]
+# v3		A[0][3] A[1][3]
+# v4		A[0][4] A[1][4]
+#
+# v5		A[2][0] A[3][0]
+# v6		A[2][1] A[3][1]
+# v7		A[2][2] A[3][2]
+# v8		A[2][3] A[3][3]
+# v9		A[2][4] A[3][4]
+#
+# v10		A[4][0] A[4][1]
+# v11		A[4][2] A[4][3]
+# v12		A[4][4] A[4][4]
+#
+# v13..25	rhotates[][]
+# v26..31	volatile
+#
+$code.=<<___;
+.machine	"any"
+.text
+
+.type	KeccakF1600_int,\@function
+.align	5
+KeccakF1600_int:
+	li	r0,24
+	mtctr	r0
+	li	r0,0
+	b	.Loop
+
+.align	4
+.Loop:
+	;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; Theta
+	vxor	v26,v0, v5		; A[0..1][0]^A[2..3][0]
+	vxor	v27,v1, v6		; A[0..1][1]^A[2..3][1]
+	vxor	v28,v2, v7		; A[0..1][2]^A[2..3][2]
+	vxor	v29,v3, v8		; A[0..1][3]^A[2..3][3]
+	vxor	v30,v4, v9		; A[0..1][4]^A[2..3][4]
+	vpermdi	v31,v26,v27,0b00	; A[0][0..1]^A[2][0..1]
+	vpermdi	v26,v26,v27,0b11	; A[1][0..1]^A[3][0..1]
+	vpermdi	v27,v28,v29,0b00	; A[0][2..3]^A[2][2..3]
+	vpermdi	v28,v28,v29,0b11	; A[1][2..3]^A[3][2..3]
+	vpermdi	v29,v30,v30,0b10	; A[1..0][4]^A[3..2][4]
+	vxor	v26,v26,v31		; C[0..1]
+	vxor	v27,v27,v28		; C[2..3]
+	vxor	v28,v29,v30		; C[4..4]
+	vspltisb v31,1
+	vxor	v26,v26,v10		; C[0..1] ^= A[4][0..1]
+	vxor	v27,v27,v11		; C[2..3] ^= A[4][2..3]
+	vxor	v28,v28,v12		; C[4..4] ^= A[4][4..4], low!
+
+	vrld	v29,v26,v31		; ROL64(C[0..1],1)
+	vrld	v30,v27,v31		; ROL64(C[2..3],1)
+	vrld	v31,v28,v31		; ROL64(C[4..4],1)
+	vpermdi	v31,v31,v29,0b10
+	vxor	v26,v26,v30		; C[0..1] ^= ROL64(C[2..3],1)
+	vxor	v27,v27,v31		; C[2..3] ^= ROL64(C[4..0],1)
+	vxor	v28,v28,v29		; C[4..4] ^= ROL64(C[0..1],1), low!
+
+	vpermdi	v29,v26,v26,0b00	; C[0..0]
+	vpermdi	v30,v28,v26,0b10	; C[4..0]
+	vpermdi	v31,v28,v28,0b11	; C[4..4]
+	vxor	v1, v1, v29		; A[0..1][1] ^= C[0..0]
+	vxor	v6, v6, v29		; A[2..3][1] ^= C[0..0]
+	vxor	v10,v10,v30		; A[4][0..1] ^= C[4..0]
+	vxor	v0, v0, v31		; A[0..1][0] ^= C[4..4]
+	vxor	v5, v5, v31		; A[2..3][0] ^= C[4..4]
+
+	vpermdi	v29,v27,v27,0b00	; C[2..2]
+	vpermdi	v30,v26,v26,0b11	; C[1..1]
+	vpermdi	v31,v26,v27,0b10	; C[1..2]
+	vxor	v3, v3, v29		; A[0..1][3] ^= C[2..2]
+	vxor	v8, v8, v29		; A[2..3][3] ^= C[2..2]
+	vxor	v2, v2, v30		; A[0..1][2] ^= C[1..1]
+	vxor	v7, v7, v30		; A[2..3][2] ^= C[1..1]
+	vxor	v11,v11,v31		; A[4][2..3] ^= C[1..2]
+
+	vpermdi	v29,v27,v27,0b11	; C[3..3]
+	vxor	v4, v4, v29		; A[0..1][4] ^= C[3..3]
+	vxor	v9, v9, v29		; A[2..3][4] ^= C[3..3]
+	vxor	v12,v12,v29		; A[4..4][4] ^= C[3..3]
+
+	;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; Rho
+	vrld	v26,v0, v13		; v0
+	vrld	v1, v1, v14
+	vrld	v27,v2, v15		; v2
+	vrld	v28,v3, v16		; v3
+	vrld	v4, v4, v17
+	vrld	v5, v5, v18
+	vrld	v6, v6, v19
+	vrld	v29,v7, v20		; v7
+	vrld	v8, v8, v21
+	vrld	v9, v9, v22
+	vrld	v10,v10,v23
+	vrld	v30,v11,v24		; v11
+	vrld	v12,v12,v25
+
+	;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; Pi
+	vpermdi	v0, v26,v28,0b00	; [0][0] [1][0] < [0][0] [0][3]
+	vpermdi	v2, v29,v5, 0b00	; [0][2] [1][2] < [2][2] [2][0]
+	vpermdi	v11,v9, v5, 0b01	; [4][2] [4][3] < [2][4] [3][0]
+	vpermdi	v5, v1, v4, 0b00	; [2][0] [3][0] < [0][1] [0][4]
+	vpermdi	v1, v1, v4, 0b11	; [0][1] [1][1] < [1][1] [1][4]
+	vpermdi	v3, v8, v6, 0b11	; [0][3] [1][3] < [3][3] [3][1]
+	vpermdi	v4, v12,v30,0b10	; [0][4] [1][4] < [4][4] [4][2]
+	vpermdi	v7, v8, v6, 0b00	; [2][2] [3][2] < [2][3] [2][1]
+	vpermdi	v6, v27,v26,0b11	; [2][1] [3][1] < [1][2] [1][0]
+	vpermdi	v8, v9, v29,0b11	; [2][3] [3][3] < [3][4] [3][2]
+	vpermdi	v12,v10,v10,0b11	; [4][4] [4][4] < [4][1] [4][1]
+	vpermdi	v9, v10,v30,0b01	; [2][4] [3][4] < [4][0] [4][3]
+	vpermdi	v10,v27,v28,0b01	; [4][0] [4][1] < [0][2] [1][3]
+
+	;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; Chi + Iota
+	lvx_u	v31,$iotas,r0		; iotas[index]
+	addic	r0,r0,16		; index++
+
+	vandc	v26,v2, v1		; (~A[0..1][1] & A[0..1][2])
+	vandc	v27,v3, v2		; (~A[0..1][2] & A[0..1][3])
+	vandc	v28,v4, v3		; (~A[0..1][3] & A[0..1][4])
+	vandc	v29,v0, v4		; (~A[0..1][4] & A[0..1][0])
+	vandc	v30,v1, v0		; (~A[0..1][0] & A[0..1][1])
+	vxor	v0, v0, v26		; A[0..1][0] ^= (~A[0..1][1] & A[0..1][2])
+	vxor	v1, v1, v27		; A[0..1][1] ^= (~A[0..1][2] & A[0..1][3])
+	vxor	v2, v2, v28		; A[0..1][2] ^= (~A[0..1][3] & A[0..1][4])
+	vxor	v3, v3, v29		; A[0..1][3] ^= (~A[0..1][4] & A[0..1][0])
+	vxor	v4, v4, v30		; A[0..1][4] ^= (~A[0..1][0] & A[0..1][1])
+
+	vandc	v26,v7, v6		; (~A[2..3][1] & A[2..3][2])
+	vandc	v27,v8, v7		; (~A[2..3][2] & A[2..3][3])
+	vandc	v28,v9, v8		; (~A[2..3][3] & A[2..3][4])
+	vandc	v29,v5, v9		; (~A[2..3][4] & A[2..3][0])
+	vandc	v30,v6, v5		; (~A[2..3][0] & A[2..3][1])
+	vxor	v5, v5, v26		; A[2..3][0] ^= (~A[2..3][1] & A[2..3][2])
+	vxor	v6, v6, v27		; A[2..3][1] ^= (~A[2..3][2] & A[2..3][3])
+	vxor	v7, v7, v28		; A[2..3][2] ^= (~A[2..3][3] & A[2..3][4])
+	vxor	v8, v8, v29		; A[2..3][3] ^= (~A[2..3][4] & A[2..3][0])
+	vxor	v9, v9, v30		; A[2..3][4] ^= (~A[2..3][0] & A[2..3][1])
+
+	vxor	v0, v0, v31		; A[0][0] ^= iotas[index++]
+
+	vpermdi	v26,v10,v11,0b10	; A[4][1..2]
+	vpermdi	v27,v12,v10,0b00	; A[4][4..0]
+	vpermdi	v28,v11,v12,0b10	; A[4][3..4]
+	vpermdi	v29,v10,v10,0b10	; A[4][1..0]
+	vandc	v26,v11,v26		; (~A[4][1..2] & A[4][2..3])
+	vandc	v27,v27,v28		; (~A[4][3..4] & A[4][4..0])
+	vandc	v28,v10,v29		; (~A[4][1..0] & A[4][0..1])
+	vxor	v10,v10,v26		; A[4][0..1] ^= (~A[4][1..2] & A[4][2..3])
+	vxor	v11,v11,v27		; A[4][2..3] ^= (~A[4][3..4] & A[4][4..0])
+	vxor	v12,v12,v28		; A[4][4..4] ^= (~A[4][0..1] & A[4][1..0])
+
+	bdnz	.Loop
+
+	vpermdi	v12,v12,v12,0b11	; broadcast A[4][4]
+	blr
+	.long	0
+	.byte	0,12,0x14,0,0,0,0,0
+.size	KeccakF1600_int,.-KeccakF1600_int
+
+.type	KeccakF1600,\@function
+.align	5
+KeccakF1600:
+	$STU	$sp,-$FRAME($sp)
+	li	r10,`15+6*$SIZE_T`
+	li	r11,`31+6*$SIZE_T`
+	mflr	r8
+	mfspr	r7, 256			; save vrsave
+	stvx	v20,r10,$sp
+	addi	r10,r10,32
+	stvx	v21,r11,$sp
+	addi	r11,r11,32
+	stvx	v22,r10,$sp
+	addi	r10,r10,32
+	stvx	v23,r11,$sp
+	addi	r11,r11,32
+	stvx	v24,r10,$sp
+	addi	r10,r10,32
+	stvx	v25,r11,$sp
+	addi	r11,r11,32
+	stvx	v26,r10,$sp
+	addi	r10,r10,32
+	stvx	v27,r11,$sp
+	addi	r11,r11,32
+	stvx	v28,r10,$sp
+	addi	r10,r10,32
+	stvx	v29,r11,$sp
+	addi	r11,r11,32
+	stvx	v30,r10,$sp
+	stvx	v31,r11,$sp
+	stw	r7,`$FRAME-4`($sp)	; save vrsave
+	li	r0, -1
+	$PUSH	r8,`$FRAME+$LRSAVE`($sp)
+	mtspr	256, r0			; preserve all AltiVec registers
+
+	li	r11,16
+	lvx_4w	v0,0,r3			; load A[5][5]
+	li	r10,32
+	lvx_4w	v1,r11,r3
+	addi	r11,r11,32
+	lvx_4w	v2,r10,r3
+	addi	r10,r10,32
+	lvx_4w	v3,r11,r3
+	addi	r11,r11,32
+	lvx_4w	v4,r10,r3
+	addi	r10,r10,32
+	lvx_4w	v5,r11,r3
+	addi	r11,r11,32
+	lvx_4w	v6,r10,r3
+	addi	r10,r10,32
+	lvx_4w	v7,r11,r3
+	addi	r11,r11,32
+	lvx_4w	v8,r10,r3
+	addi	r10,r10,32
+	lvx_4w	v9,r11,r3
+	addi	r11,r11,32
+	lvx_4w	v10,r10,r3
+	addi	r10,r10,32
+	lvx_4w	v11,r11,r3
+	lvx_splt v12,r10,r3
+
+	bl	PICmeup
+
+	li	r11,16
+	lvx_u	v13,0,r12		; load rhotates
+	li	r10,32
+	lvx_u	v14,r11,r12
+	addi	r11,r11,32
+	lvx_u	v15,r10,r12
+	addi	r10,r10,32
+	lvx_u	v16,r11,r12
+	addi	r11,r11,32
+	lvx_u	v17,r10,r12
+	addi	r10,r10,32
+	lvx_u	v18,r11,r12
+	addi	r11,r11,32
+	lvx_u	v19,r10,r12
+	addi	r10,r10,32
+	lvx_u	v20,r11,r12
+	addi	r11,r11,32
+	lvx_u	v21,r10,r12
+	addi	r10,r10,32
+	lvx_u	v22,r11,r12
+	addi	r11,r11,32
+	lvx_u	v23,r10,r12
+	addi	r10,r10,32
+	lvx_u	v24,r11,r12
+	lvx_u	v25,r10,r12
+	addi	r12,r12,`16*16`		; points at iotas
+
+	bl	KeccakF1600_int
+
+	li	r11,16
+	stvx_4w	v0,0,r3			; return A[5][5]
+	li	r10,32
+	stvx_4w	v1,r11,r3
+	addi	r11,r11,32
+	stvx_4w	v2,r10,r3
+	addi	r10,r10,32
+	stvx_4w	v3,r11,r3
+	addi	r11,r11,32
+	stvx_4w	v4,r10,r3
+	addi	r10,r10,32
+	stvx_4w	v5,r11,r3
+	addi	r11,r11,32
+	stvx_4w	v6,r10,r3
+	addi	r10,r10,32
+	stvx_4w	v7,r11,r3
+	addi	r11,r11,32
+	stvx_4w	v8,r10,r3
+	addi	r10,r10,32
+	stvx_4w	v9,r11,r3
+	addi	r11,r11,32
+	stvx_4w	v10,r10,r3
+	addi	r10,r10,32
+	stvx_4w	v11,r11,r3
+	stvdx_u v12,r10,r3
+
+	li	r10,`15+6*$SIZE_T`
+	li	r11,`31+6*$SIZE_T`
+	mtlr	r8
+	mtspr	256, r7			; restore vrsave
+	lvx	v20,r10,$sp
+	addi	r10,r10,32
+	lvx	v21,r11,$sp
+	addi	r11,r11,32
+	lvx	v22,r10,$sp
+	addi	r10,r10,32
+	lvx	v23,r11,$sp
+	addi	r11,r11,32
+	lvx	v24,r10,$sp
+	addi	r10,r10,32
+	lvx	v25,r11,$sp
+	addi	r11,r11,32
+	lvx	v26,r10,$sp
+	addi	r10,r10,32
+	lvx	v27,r11,$sp
+	addi	r11,r11,32
+	lvx	v28,r10,$sp
+	addi	r10,r10,32
+	lvx	v29,r11,$sp
+	addi	r11,r11,32
+	lvx	v30,r10,$sp
+	lvx	v31,r11,$sp
+	addi	$sp,$sp,$FRAME
+	blr
+	.long	0
+	.byte	0,12,0x04,1,0x80,0,1,0
+	.long	0
+.size	KeccakF1600,.-KeccakF1600
+___
+{
+my ($A_jagged,$inp,$len,$bsz) = map("r$_",(3..6));
+
+$code.=<<___;
+.globl	SHA3_absorb
+.type	SHA3_absorb,\@function
+.align	5
+SHA3_absorb:
+	$STU	$sp,-$FRAME($sp)
+	li	r10,`15+6*$SIZE_T`
+	li	r11,`31+6*$SIZE_T`
+	mflr	r8
+	mfspr	r7, 256			; save vrsave
+	stvx	v20,r10,$sp
+	addi	r10,r10,32
+	stvx	v21,r11,$sp
+	addi	r11,r11,32
+	stvx	v22,r10,$sp
+	addi	r10,r10,32
+	stvx	v23,r11,$sp
+	addi	r11,r11,32
+	stvx	v24,r10,$sp
+	addi	r10,r10,32
+	stvx	v25,r11,$sp
+	addi	r11,r11,32
+	stvx	v26,r10,$sp
+	addi	r10,r10,32
+	stvx	v27,r11,$sp
+	addi	r11,r11,32
+	stvx	v28,r10,$sp
+	addi	r10,r10,32
+	stvx	v29,r11,$sp
+	addi	r11,r11,32
+	stvx	v30,r10,$sp
+	stvx	v31,r11,$sp
+	stw	r7,`$FRAME-4`($sp)	; save vrsave
+	li	r0, -1
+	$PUSH	r8,`$FRAME+$LRSAVE`($sp)
+	mtspr	256, r0			; preserve all AltiVec registers
+
+	li	r11,16
+	lvx_4w	v0,0,$A_jagged		; load A[5][5]
+	li	r10,32
+	lvx_4w	v1,r11,$A_jagged
+	addi	r11,r11,32
+	lvx_4w	v2,r10,$A_jagged
+	addi	r10,r10,32
+	lvx_4w	v3,r11,$A_jagged
+	addi	r11,r11,32
+	lvx_4w	v4,r10,$A_jagged
+	addi	r10,r10,32
+	lvx_4w	v5,r11,$A_jagged
+	addi	r11,r11,32
+	lvx_4w	v6,r10,$A_jagged
+	addi	r10,r10,32
+	lvx_4w	v7,r11,$A_jagged
+	addi	r11,r11,32
+	lvx_4w	v8,r10,$A_jagged
+	addi	r10,r10,32
+	lvx_4w	v9,r11,$A_jagged
+	addi	r11,r11,32
+	lvx_4w	v10,r10,$A_jagged
+	addi	r10,r10,32
+	lvx_4w	v11,r11,$A_jagged
+	lvx_splt v12,r10,$A_jagged
+
+	bl	PICmeup
+
+	li	r11,16
+	lvx_u	v13,0,r12		; load rhotates
+	li	r10,32
+	lvx_u	v14,r11,r12
+	addi	r11,r11,32
+	lvx_u	v15,r10,r12
+	addi	r10,r10,32
+	lvx_u	v16,r11,r12
+	addi	r11,r11,32
+	lvx_u	v17,r10,r12
+	addi	r10,r10,32
+	lvx_u	v18,r11,r12
+	addi	r11,r11,32
+	lvx_u	v19,r10,r12
+	addi	r10,r10,32
+	lvx_u	v20,r11,r12
+	addi	r11,r11,32
+	lvx_u	v21,r10,r12
+	addi	r10,r10,32
+	lvx_u	v22,r11,r12
+	addi	r11,r11,32
+	lvx_u	v23,r10,r12
+	addi	r10,r10,32
+	lvx_u	v24,r11,r12
+	lvx_u	v25,r10,r12
+	li	r10,-32
+	li	r11,-16
+	addi	r12,r12,`16*16`		; points at iotas
+	b	.Loop_absorb
+
+.align	4
+.Loop_absorb:
+	$UCMP	$len,$bsz		; len < bsz?
+	blt	.Labsorbed
+
+	sub	$len,$len,$bsz		; len -= bsz
+	srwi	r0,$bsz,3
+	mtctr	r0
+
+	lvx_u	v30,r10,r12		; permutation masks
+	lvx_u	v31,r11,r12
+	?vspltisb v27,7			; prepare masks for byte swap
+	?vxor	v30,v30,v27		; on big-endian
+	?vxor	v31,v31,v27
+
+	vxor	v27,v27,v27		; zero
+	lvdx_u	v26,0,$inp
+	addi	$inp,$inp,8
+	vperm	v26,v26,v27,v30
+	vxor	v0, v0, v26
+	bdz	.Lprocess_block
+	lvdx_u	v26,0,$inp
+	addi	$inp,$inp,8
+	vperm	v26,v26,v27,v30
+	vxor	v1, v1, v26
+	bdz	.Lprocess_block
+	lvdx_u	v26,0,$inp
+	addi	$inp,$inp,8
+	vperm	v26,v26,v27,v30
+	vxor	v2, v2, v26
+	bdz	.Lprocess_block
+	lvdx_u	v26,0,$inp
+	addi	$inp,$inp,8
+	vperm	v26,v26,v27,v30
+	vxor	v3, v3, v26
+	bdz	.Lprocess_block
+	lvdx_u	v26,0,$inp
+	addi	$inp,$inp,8
+	vperm	v26,v26,v27,v30
+	vxor	v4, v4, v26
+	bdz	.Lprocess_block
+	lvdx_u	v26,0,$inp
+	addi	$inp,$inp,8
+	vperm	v26,v26,v27,v31
+	vxor	v0, v0, v26
+	bdz	.Lprocess_block
+	lvdx_u	v26,0,$inp
+	addi	$inp,$inp,8
+	vperm	v26,v26,v27,v31
+	vxor	v1, v1, v26
+	bdz	.Lprocess_block
+	lvdx_u	v26,0,$inp
+	addi	$inp,$inp,8
+	vperm	v26,v26,v27,v31
+	vxor	v2, v2, v26
+	bdz	.Lprocess_block
+	lvdx_u	v26,0,$inp
+	addi	$inp,$inp,8
+	vperm	v26,v26,v27,v31
+	vxor	v3, v3, v26
+	bdz	.Lprocess_block
+	lvdx_u	v26,0,$inp
+	addi	$inp,$inp,8
+	vperm	v26,v26,v27,v31
+	vxor	v4, v4, v26
+	bdz	.Lprocess_block
+	lvdx_u	v26,0,$inp
+	addi	$inp,$inp,8
+	vperm	v26,v26,v27,v30
+	vxor	v5, v5, v26
+	bdz	.Lprocess_block
+	lvdx_u	v26,0,$inp
+	addi	$inp,$inp,8
+	vperm	v26,v26,v27,v30
+	vxor	v6, v6, v26
+	bdz	.Lprocess_block
+	lvdx_u	v26,0,$inp
+	addi	$inp,$inp,8
+	vperm	v26,v26,v27,v30
+	vxor	v7, v7, v26
+	bdz	.Lprocess_block
+	lvdx_u	v26,0,$inp
+	addi	$inp,$inp,8
+	vperm	v26,v26,v27,v30
+	vxor	v8, v8, v26
+	bdz	.Lprocess_block
+	lvdx_u	v26,0,$inp
+	addi	$inp,$inp,8
+	vperm	v26,v26,v27,v30
+	vxor	v9, v9, v26
+	bdz	.Lprocess_block
+	lvdx_u	v26,0,$inp
+	addi	$inp,$inp,8
+	vperm	v26,v26,v27,v31
+	vxor	v5, v5, v26
+	bdz	.Lprocess_block
+	lvdx_u	v26,0,$inp
+	addi	$inp,$inp,8
+	vperm	v26,v26,v27,v31
+	vxor	v6, v6, v26
+	bdz	.Lprocess_block
+	lvdx_u	v26,0,$inp
+	addi	$inp,$inp,8
+	vperm	v26,v26,v27,v31
+	vxor	v7, v7, v26
+	bdz	.Lprocess_block
+	lvdx_u	v26,0,$inp
+	addi	$inp,$inp,8
+	vperm	v26,v26,v27,v31
+	vxor	v8, v8, v26
+	bdz	.Lprocess_block
+	lvdx_u	v26,0,$inp
+	addi	$inp,$inp,8
+	vperm	v26,v26,v27,v31
+	vxor	v9, v9, v26
+	bdz	.Lprocess_block
+	lvdx_u	v26,0,$inp
+	addi	$inp,$inp,8
+	vperm	v26,v26,v27,v30
+	vxor	v10, v10, v26
+	bdz	.Lprocess_block
+	lvdx_u	v26,0,$inp
+	addi	$inp,$inp,8
+	vperm	v26,v26,v27,v31
+	vxor	v10, v10, v26
+	bdz	.Lprocess_block
+	lvdx_u	v26,0,$inp
+	addi	$inp,$inp,8
+	vperm	v26,v26,v27,v30
+	vxor	v11, v11, v26
+	bdz	.Lprocess_block
+	lvdx_u	v26,0,$inp
+	addi	$inp,$inp,8
+	vperm	v26,v26,v27,v31
+	vxor	v11, v11, v26
+	bdz	.Lprocess_block
+	lvdx_u	v26,0,$inp
+	addi	$inp,$inp,8
+	vperm	v26,v26,v27,v31
+	vxor	v12, v12, v26
+
+.Lprocess_block:
+	bl	KeccakF1600_int
+
+	b	.Loop_absorb
+
+.align	4
+.Labsorbed:
+	li	r11,16
+	stvx_4w	v0,0,$A_jagged		; return A[5][5]
+	li	r10,32
+	stvx_4w	v1,r11,$A_jagged
+	addi	r11,r11,32
+	stvx_4w	v2,r10,$A_jagged
+	addi	r10,r10,32
+	stvx_4w	v3,r11,$A_jagged
+	addi	r11,r11,32
+	stvx_4w	v4,r10,$A_jagged
+	addi	r10,r10,32
+	stvx_4w	v5,r11,$A_jagged
+	addi	r11,r11,32
+	stvx_4w	v6,r10,$A_jagged
+	addi	r10,r10,32
+	stvx_4w	v7,r11,$A_jagged
+	addi	r11,r11,32
+	stvx_4w	v8,r10,$A_jagged
+	addi	r10,r10,32
+	stvx_4w	v9,r11,$A_jagged
+	addi	r11,r11,32
+	stvx_4w	v10,r10,$A_jagged
+	addi	r10,r10,32
+	stvx_4w	v11,r11,$A_jagged
+	stvdx_u v12,r10,$A_jagged
+
+	mr	r3,$len			; return value
+	li	r10,`15+6*$SIZE_T`
+	li	r11,`31+6*$SIZE_T`
+	mtlr	r8
+	mtspr	256, r7			; restore vrsave
+	lvx	v20,r10,$sp
+	addi	r10,r10,32
+	lvx	v21,r11,$sp
+	addi	r11,r11,32
+	lvx	v22,r10,$sp
+	addi	r10,r10,32
+	lvx	v23,r11,$sp
+	addi	r11,r11,32
+	lvx	v24,r10,$sp
+	addi	r10,r10,32
+	lvx	v25,r11,$sp
+	addi	r11,r11,32
+	lvx	v26,r10,$sp
+	addi	r10,r10,32
+	lvx	v27,r11,$sp
+	addi	r11,r11,32
+	lvx	v28,r10,$sp
+	addi	r10,r10,32
+	lvx	v29,r11,$sp
+	addi	r11,r11,32
+	lvx	v30,r10,$sp
+	lvx	v31,r11,$sp
+	addi	$sp,$sp,$FRAME
+	blr
+	.long	0
+	.byte	0,12,0x04,1,0x80,0,4,0
+	.long	0
+.size	SHA3_absorb,.-SHA3_absorb
+___
+}
+{
+my ($A_jagged,$out,$len,$bsz) = map("r$_",(3..6));
+
+$code.=<<___;
+.globl	SHA3_squeeze
+.type	SHA3_squeeze,\@function
+.align	5
+SHA3_squeeze:
+	mflr	r9			; r9 is not touched by KeccakF1600
+	subi	$out,$out,1		; prepare for stbu
+	addi	r8,$A_jagged,4		; prepare volatiles
+	mr	r10,$bsz
+	li	r11,0
+	b	.Loop_squeeze
+.align	4
+.Loop_squeeze:
+	lwzx	r7,r11,r8		; lo
+	lwzx	r0,r11,$A_jagged	; hi
+	${UCMP}i $len,8
+	blt	.Lsqueeze_tail
+
+	stbu	r7,1($out)		; write lo
+	srwi	r7,r7,8
+	stbu	r7,1($out)
+	srwi	r7,r7,8
+	stbu	r7,1($out)
+	srwi	r7,r7,8
+	stbu	r7,1($out)
+	stbu	r0,1($out)		; write hi
+	srwi	r0,r0,8
+	stbu	r0,1($out)
+	srwi	r0,r0,8
+	stbu	r0,1($out)
+	srwi	r0,r0,8
+	stbu	r0,1($out)
+
+	subic.	$len,$len,8
+	beqlr				; return if done
+
+	subic.	r10,r10,8
+	ble	.Loutput_expand
+
+	addi	r11,r11,16		; calculate jagged index
+	cmplwi	r11,`16*5`
+	blt	.Loop_squeeze
+	subi	r11,r11,72
+	beq	.Loop_squeeze
+	addi	r11,r11,72
+	cmplwi	r11,`16*5+8`
+	subi	r11,r11,8
+	beq	.Loop_squeeze
+	addi	r11,r11,8
+	cmplwi	r11,`16*10`
+	subi	r11,r11,72
+	beq	.Loop_squeeze
+	addi	r11,r11,72
+	blt	.Loop_squeeze
+	subi	r11,r11,8
+	b	.Loop_squeeze
+
+.align	4
+.Loutput_expand:
+	bl	KeccakF1600
+	mtlr	r9
+
+	addi	r8,$A_jagged,4		; restore volatiles
+	mr	r10,$bsz
+	li	r11,0
+	b	.Loop_squeeze
+
+.align	4
+.Lsqueeze_tail:
+	mtctr	$len
+	subic.	$len,$len,4
+	ble	.Loop_tail_lo
+	li	r8,4
+	mtctr	r8
+.Loop_tail_lo:
+	stbu	r7,1($out)
+	srdi	r7,r7,8
+	bdnz	.Loop_tail_lo
+	ble	.Lsqueeze_done
+	mtctr	$len
+.Loop_tail_hi:
+	stbu	r0,1($out)
+	srdi	r0,r0,8
+	bdnz	.Loop_tail_hi
+
+.Lsqueeze_done:
+	blr
+	.long	0
+	.byte	0,12,0x14,0,0,0,4,0
+	.long	0
+.size	SHA3_squeeze,.-SHA3_squeeze
+___
+}
+$code.=<<___;
+.align	6
+PICmeup:
+	mflr	r0
+	bcl	20,31,\$+4
+	mflr	r12   ; vvvvvv "distance" between . and 1st data entry
+	addi	r12,r12,`64-8`
+	mtlr	r0
+	blr
+	.long	0
+	.byte	0,12,0x14,0,0,0,0,0
+	.space	`64-9*4`
+.type	rhotates,\@object
+.align	6
+rhotates:
+	.quad	0,  36
+	.quad	1,  44
+	.quad	62,  6
+	.quad	28, 55
+	.quad	27, 20
+	.quad	3,  41
+	.quad	10, 45
+	.quad	43, 15
+	.quad	25, 21
+	.quad	39,  8
+	.quad	18,  2
+	.quad	61, 56
+	.quad	14, 14
+.size	rhotates,.-rhotates
+	.quad	0,0
+	.quad	0x0001020304050607,0x1011121314151617
+	.quad	0x1011121314151617,0x0001020304050607
+.type	iotas,\@object
+iotas:
+	.quad	0x0000000000000001,0
+	.quad	0x0000000000008082,0
+	.quad	0x800000000000808a,0
+	.quad	0x8000000080008000,0
+	.quad	0x000000000000808b,0
+	.quad	0x0000000080000001,0
+	.quad	0x8000000080008081,0
+	.quad	0x8000000000008009,0
+	.quad	0x000000000000008a,0
+	.quad	0x0000000000000088,0
+	.quad	0x0000000080008009,0
+	.quad	0x000000008000000a,0
+	.quad	0x000000008000808b,0
+	.quad	0x800000000000008b,0
+	.quad	0x8000000000008089,0
+	.quad	0x8000000000008003,0
+	.quad	0x8000000000008002,0
+	.quad	0x8000000000000080,0
+	.quad	0x000000000000800a,0
+	.quad	0x800000008000000a,0
+	.quad	0x8000000080008081,0
+	.quad	0x8000000000008080,0
+	.quad	0x0000000080000001,0
+	.quad	0x8000000080008008,0
+.size	iotas,.-iotas
+.asciz	"Keccak-1600 absorb and squeeze for PowerISA 2.07, CRYPTOGAMS by <appro\@openssl.org>"
+___
+
+foreach  (split("\n",$code)) {
+	s/\`([^\`]*)\`/eval $1/ge;
+
+	if ($flavour =~ /le$/) {	# little-endian
+	    s/\?([a-z]+)/;$1/;
+	} else {			# big-endian
+	    s/\?([a-z]+)/$1/;
+	}
+
+	print $_,"\n";
+}
+
+close STDOUT;
diff --git a/crypto/sha/asm/sha512p8-ppc.pl b/crypto/sha/asm/sha512p8-ppc.pl
index 4d3d3b2..5457c4a 100755
--- a/crypto/sha/asm/sha512p8-ppc.pl
+++ b/crypto/sha/asm/sha512p8-ppc.pl
@@ -30,6 +30,15 @@
 # for sha1-ppc.pl - 73%. 100% means that multi-process result equals
 # to single-process one, given that all threads end up on the same
 # physical core.
+#
+######################################################################
+# Believed-to-be-accurate results in cycles per processed byte [on
+# little-endian system]. Numbers in square brackets are for 64-bit
+# build of sha512-ppc.pl, presented for reference.
+#
+#		POWER8
+# SHA256	9.9 [15.8]
+# SHA512	6.3 [10.3]
 
 $flavour=shift;
 $output =shift;


More information about the openssl-commits mailing list