[openssl-commits] [openssl] OpenSSL_1_0_2-stable update

Andy Polyakov appro at openssl.org
Mon Mar 7 14:03:38 UTC 2016


The branch OpenSSL_1_0_2-stable has been updated
       via  bd34ecbae008f23f9d64375ef766148e23084ccf (commit)
       via  ba26fa14556ba49466d51e4d9e6be32afee9c465 (commit)
      from  df14e5023743bde0ed2860817729cee61fab3662 (commit)


- Log -----------------------------------------------------------------
commit bd34ecbae008f23f9d64375ef766148e23084ccf
Author: Andy Polyakov <appro at openssl.org>
Date:   Fri Mar 4 11:39:11 2016 +0100

    bn/asm/x86[_64]-mont*.pl: complement alloca with page-walking.
    
    Some OSes, *cough*-dows, insist on stack being "wired" to
    physical memory in strictly sequential manner, i.e. if stack
    allocation spans two pages, then reference to farmost one can
    be punishable by SEGV. But page walking can do good even on
    other OSes, because it guarantees that villain thread hits
    the guard page before it can make damage to innocent one...
    
    Reviewed-by: Rich Salz <rsalz at openssl.org>
    (cherry picked from commit adc4f1fc25b2cac90076f1e1695b05b7aeeae501)

commit ba26fa14556ba49466d51e4d9e6be32afee9c465
Author: Andy Polyakov <appro at openssl.org>
Date:   Fri Mar 4 11:32:26 2016 +0100

    perlasm/x86_64-xlate.pl: handle binary constants early.
    
    Not all assemblers of "gas" flavour handle binary constants, e.g.
    seasoned MacOS Xcode doesn't, so give them a hand.
    
    Reviewed-by: Rich Salz <rsalz at openssl.org>
    Reviewed-by: Viktor Dukhovni <viktor at openssl.org>
    (cherry picked from commit 6e42e3ff9cde43830555549fdafa2a8b37b9485b)

-----------------------------------------------------------------------

Summary of changes:
 crypto/bn/asm/x86-mont.pl      | 15 +++++++++++
 crypto/bn/asm/x86_64-mont.pl   | 42 ++++++++++++++++++++++++++++-
 crypto/bn/asm/x86_64-mont5.pl  | 61 +++++++++++++++++++++++++++++++++++++++++-
 crypto/perlasm/x86_64-xlate.pl |  2 +-
 4 files changed, 117 insertions(+), 3 deletions(-)

diff --git a/crypto/bn/asm/x86-mont.pl b/crypto/bn/asm/x86-mont.pl
index e8f6b05..89f4de6 100755
--- a/crypto/bn/asm/x86-mont.pl
+++ b/crypto/bn/asm/x86-mont.pl
@@ -85,6 +85,21 @@ $frame=32;				# size of above frame rounded up to 16n
 
 	&and	("esp",-64);		# align to cache line
 
+	# Some OSes, *cough*-dows, insist on stack being "wired" to
+	# physical memory in strictly sequential manner, i.e. if stack
+	# allocation spans two pages, then reference to farmost one can
+	# be punishable by SEGV. But page walking can do good even on
+	# other OSes, because it guarantees that villain thread hits
+	# the guard page before it can make damage to innocent one...
+	&mov	("eax","ebp");
+	&sub	("eax","esp");
+	&and	("eax",-4096);
+&set_label("page_walk");
+	&mov	("edx",&DWP(0,"esp","eax"));
+	&sub	("eax",4096);
+	&data_byte(0x2e);
+	&jnc	(&label("page_walk"));
+
 	################################# load argument block...
 	&mov	("eax",&DWP(0*4,"esi"));# BN_ULONG *rp
 	&mov	("ebx",&DWP(1*4,"esi"));# const BN_ULONG *ap
diff --git a/crypto/bn/asm/x86_64-mont.pl b/crypto/bn/asm/x86_64-mont.pl
index 29ba122..8fb6c99 100755
--- a/crypto/bn/asm/x86_64-mont.pl
+++ b/crypto/bn/asm/x86_64-mont.pl
@@ -130,6 +130,20 @@ $code.=<<___;
 
 	mov	%r11,8(%rsp,$num,8)	# tp[num+1]=%rsp
 .Lmul_body:
+	# Some OSes, *cough*-dows, insist on stack being "wired" to
+	# physical memory in strictly sequential manner, i.e. if stack
+	# allocation spans two pages, then reference to farmost one can
+	# be punishable by SEGV. But page walking can do good even on
+	# other OSes, because it guarantees that villain thread hits
+	# the guard page before it can make damage to innocent one...
+	sub	%rsp,%r11
+	and	\$-4096,%r11
+.Lmul_page_walk:
+	mov	(%rsp,%r11),%r10
+	sub	\$4096,%r11
+	.byte	0x66,0x2e		# predict non-taken
+	jnc	.Lmul_page_walk
+
 	mov	$bp,%r12		# reassign $bp
 ___
 		$bp="%r12";
@@ -342,6 +356,14 @@ $code.=<<___;
 
 	mov	%r11,8(%rsp,$num,8)	# tp[num+1]=%rsp
 .Lmul4x_body:
+	sub	%rsp,%r11
+	and	\$-4096,%r11
+.Lmul4x_page_walk:
+	mov	(%rsp,%r11),%r10
+	sub	\$4096,%r11
+	.byte	0x2e			# predict non-taken
+	jnc	.Lmul4x_page_walk
+
 	mov	$rp,16(%rsp,$num,8)	# tp[num+2]=$rp
 	mov	%rdx,%r12		# reassign $bp
 ___
@@ -795,6 +817,15 @@ bn_sqr8x_mont:
 	sub	%r11,%rsp
 .Lsqr8x_sp_done:
 	and	\$-64,%rsp
+	mov	%rax,%r11
+	sub	%rsp,%r11
+	and	\$-4096,%r11
+.Lsqr8x_page_walk:
+	mov	(%rsp,%r11),%r10
+	sub	\$4096,%r11
+	.byte	0x2e			# predict non-taken
+	jnc	.Lsqr8x_page_walk
+
 	mov	$num,%r10
 	neg	$num
 
@@ -932,8 +963,17 @@ bn_mulx4x_mont:
 	sub	$num,%r10		# -$num
 	mov	($n0),$n0		# *n0
 	lea	-72(%rsp,%r10),%rsp	# alloca(frame+$num+8)
-	lea	($bp,$num),%r10
 	and	\$-128,%rsp
+	mov	%rax,%r11
+	sub	%rsp,%r11
+	and	\$-4096,%r11
+.Lmulx4x_page_walk:
+	mov	(%rsp,%r11),%r10
+	sub	\$4096,%r11
+	.byte	0x66,0x2e		# predict non-taken
+	jnc	.Lmulx4x_page_walk
+
+	lea	($bp,$num),%r10
 	##############################################################
 	# Stack layout
 	# +0	num
diff --git a/crypto/bn/asm/x86_64-mont5.pl b/crypto/bn/asm/x86_64-mont5.pl
index 2e8c9db..938e170 100755
--- a/crypto/bn/asm/x86_64-mont5.pl
+++ b/crypto/bn/asm/x86_64-mont5.pl
@@ -115,6 +115,20 @@ $code.=<<___;
 
 	mov	%rax,8(%rsp,$num,8)	# tp[num+1]=%rsp
 .Lmul_body:
+	# Some OSes, *cough*-dows, insist on stack being "wired" to
+	# physical memory in strictly sequential manner, i.e. if stack
+	# allocation spans two pages, then reference to farmost one can
+	# be punishable by SEGV. But page walking can do good even on
+	# other OSes, because it guarantees that villain thread hits
+	# the guard page before it can make damage to innocent one...
+	sub	%rsp,%rax
+	and	\$-4096,%rax
+.Lmul_page_walk:
+	mov	(%rsp,%rax),%r11
+	sub	\$4096,%rax
+	.byte	0x2e			# predict non-taken
+	jnc	.Lmul_page_walk
+
 	lea	128($bp),%r12		# reassign $bp (+size optimization)
 ___
 		$bp="%r12";
@@ -469,6 +483,15 @@ $code.=<<___;
 	sub	%r11,%rsp
 .Lmul4xsp_done:
 	and	\$-64,%rsp
+	mov	%rax,%r11
+	sub	%rsp,%r11
+	and	\$-4096,%r11
+.Lmul4x_page_walk:
+	mov	(%rsp,%r11),%r10
+	sub	\$4096,%r11
+	.byte	0x2e			# predict non-taken
+	jnc	.Lmul4x_page_walk
+
 	neg	$num
 
 	mov	%rax,40(%rsp)
@@ -1058,6 +1081,15 @@ $code.=<<___;
 	sub	%r11,%rsp
 .Lpwr_sp_done:
 	and	\$-64,%rsp
+	mov	%rax,%r11
+	sub	%rsp,%r11
+	and	\$-4096,%r11
+.Lpwr_page_walk:
+	mov	(%rsp,%r11),%r10
+	sub	\$4096,%r11
+	.byte	0x2e			# predict non-taken
+	jnc	.Lpwr_page_walk
+
 	mov	$num,%r10	
 	neg	$num
 
@@ -2028,7 +2060,16 @@ bn_from_mont8x:
 	sub	%r11,%rsp
 .Lfrom_sp_done:
 	and	\$-64,%rsp
-	mov	$num,%r10	
+	mov	%rax,%r11
+	sub	%rsp,%r11
+	and	\$-4096,%r11
+.Lfrom_page_walk:
+	mov	(%rsp,%r11),%r10
+	sub	\$4096,%r11
+	.byte	0x2e			# predict non-taken
+	jnc	.Lfrom_page_walk
+
+	mov	$num,%r10
 	neg	$num
 
 	##############################################################
@@ -2173,6 +2214,15 @@ bn_mulx4x_mont_gather5:
 	sub	%r11,%rsp
 .Lmulx4xsp_done:	
 	and	\$-64,%rsp		# ensure alignment
+	mov	%rax,%r11
+	sub	%rsp,%r11
+	and	\$-4096,%r11
+.Lmulx4x_page_walk:
+	mov	(%rsp,%r11),%r10
+	sub	\$4096,%r11
+	.byte	0x2e			# predict non-taken
+	jnc	.Lmulx4x_page_walk
+
 	##############################################################
 	# Stack layout
 	# +0	-num
@@ -2619,6 +2669,15 @@ bn_powerx5:
 	sub	%r11,%rsp
 .Lpwrx_sp_done:
 	and	\$-64,%rsp
+	mov	%rax,%r11
+	sub	%rsp,%r11
+	and	\$-4096,%r11
+.Lpwrx_page_walk:
+	mov	(%rsp,%r11),%r10
+	sub	\$4096,%r11
+	.byte	0x2e			# predict non-taken
+	jnc	.Lpwrx_page_walk
+
 	mov	$num,%r10	
 	neg	$num
 
diff --git a/crypto/perlasm/x86_64-xlate.pl b/crypto/perlasm/x86_64-xlate.pl
index ee04221..7a3dd04 100755
--- a/crypto/perlasm/x86_64-xlate.pl
+++ b/crypto/perlasm/x86_64-xlate.pl
@@ -195,6 +195,7 @@ my %globals;
     sub out {
     	my $self = shift;
 
+	$self->{value} =~ s/\b(0b[0-1]+)/oct($1)/eig;
 	if ($gas) {
 	    # Solaris /usr/ccs/bin/as can't handle multiplications
 	    # in $self->{value}
@@ -205,7 +206,6 @@ my %globals;
 	    }
 	    sprintf "\$%s",$self->{value};
 	} else {
-	    $self->{value} =~ s/(0b[0-1]+)/oct($1)/eig;
 	    $self->{value} =~ s/0x([0-9a-f]+)/0$1h/ig if ($masm);
 	    sprintf "%s",$self->{value};
 	}


More information about the openssl-commits mailing list