[openssl-commits] [openssl] master update

Andy Polyakov appro at openssl.org
Mon Feb 13 20:12:19 UTC 2017


The branch master has been updated
       via  86e112788e2ab9740c0cabf3ae4b1eb67b386bab (commit)
       via  79ca382d4762c58c4b92fceb4e202e90c71292ae (commit)
      from  219aa86cb04e1bfc9c156fab18da2f767502afb2 (commit)


- Log -----------------------------------------------------------------
commit 86e112788e2ab9740c0cabf3ae4b1eb67b386bab
Author: Andy Polyakov <appro at openssl.org>
Date:   Fri Feb 10 12:20:18 2017 +0100

    ec/asm/ecp_nistz256-x86_64.pl: add CFI directives.
    
    Reviewed-by: Rich Salz <rsalz at openssl.org>

commit 79ca382d4762c58c4b92fceb4e202e90c71292ae
Author: Andy Polyakov <appro at openssl.org>
Date:   Fri Feb 10 11:43:42 2017 +0100

    ec/asm/ecp_nistz256-x86_64.pl: fix typo-bug in Win64 SE handler.
    
    Thanks to Jun Sun for spotting this.
    
    Reviewed-by: Rich Salz <rsalz at openssl.org>

-----------------------------------------------------------------------

Summary of changes:
 crypto/ec/asm/ecp_nistz256-x86_64.pl | 137 ++++++++++++++++++++++++++++++++++-
 1 file changed, 135 insertions(+), 2 deletions(-)

diff --git a/crypto/ec/asm/ecp_nistz256-x86_64.pl b/crypto/ec/asm/ecp_nistz256-x86_64.pl
index 1028c09..99bbb0b 100755
--- a/crypto/ec/asm/ecp_nistz256-x86_64.pl
+++ b/crypto/ec/asm/ecp_nistz256-x86_64.pl
@@ -131,8 +131,11 @@ $code.=<<___;
 .type	ecp_nistz256_mul_by_2,\@function,2
 .align	64
 ecp_nistz256_mul_by_2:
+.cfi_startproc
 	push	%r12
+.cfi_push	%r12
 	push	%r13
+.cfi_push	%r13
 .Lmul_by_2_body:
 
 	mov	8*0($a_ptr), $a0
@@ -167,10 +170,14 @@ ecp_nistz256_mul_by_2:
 	mov	$a3, 8*3($r_ptr)
 
 	mov	0(%rsp),%r13
+.cfi_restore	%r13
 	mov	8(%rsp),%r12
+.cfi_restore	%r12
 	lea	16(%rsp),%rsp
+.cfi_adjust_cfa_offset	-16
 .Lmul_by_2_epilogue:
 	ret
+.cfi_endproc
 .size	ecp_nistz256_mul_by_2,.-ecp_nistz256_mul_by_2
 
 ################################################################################
@@ -179,8 +186,11 @@ ecp_nistz256_mul_by_2:
 .type	ecp_nistz256_div_by_2,\@function,2
 .align	32
 ecp_nistz256_div_by_2:
+.cfi_startproc
 	push	%r12
+.cfi_push	%r12
 	push	%r13
+.cfi_push	%r13
 .Ldiv_by_2_body:
 
 	mov	8*0($a_ptr), $a0
@@ -230,10 +240,14 @@ ecp_nistz256_div_by_2:
 	mov	$a3, 8*3($r_ptr)
 
 	mov	0(%rsp),%r13
+.cfi_restore	%r13
 	mov	8(%rsp),%r12
+.cfi_restore	%r12
 	lea	16(%rsp),%rsp
+.cfi_adjust_cfa_offset	-16
 .Ldiv_by_2_epilogue:
 	ret
+.cfi_endproc
 .size	ecp_nistz256_div_by_2,.-ecp_nistz256_div_by_2
 
 ################################################################################
@@ -242,8 +256,11 @@ ecp_nistz256_div_by_2:
 .type	ecp_nistz256_mul_by_3,\@function,2
 .align	32
 ecp_nistz256_mul_by_3:
+.cfi_startproc
 	push	%r12
+.cfi_push	%r12
 	push	%r13
+.cfi_push	%r13
 .Lmul_by_3_body:
 
 	mov	8*0($a_ptr), $a0
@@ -299,10 +316,14 @@ ecp_nistz256_mul_by_3:
 	mov	$a3, 8*3($r_ptr)
 
 	mov	0(%rsp),%r13
+.cfi_restore	%r13
 	mov	8(%rsp),%r12
+.cfi_restore	%r12
 	lea	16(%rsp),%rsp
+.cfi_adjust_cfa_offset	-16
 .Lmul_by_3_epilogue:
 	ret
+.cfi_endproc
 .size	ecp_nistz256_mul_by_3,.-ecp_nistz256_mul_by_3
 
 ################################################################################
@@ -311,8 +332,11 @@ ecp_nistz256_mul_by_3:
 .type	ecp_nistz256_add,\@function,3
 .align	32
 ecp_nistz256_add:
+.cfi_startproc
 	push	%r12
+.cfi_push	%r12
 	push	%r13
+.cfi_push	%r13
 .Ladd_body:
 
 	mov	8*0($a_ptr), $a0
@@ -348,10 +372,14 @@ ecp_nistz256_add:
 	mov	$a3, 8*3($r_ptr)
 
 	mov	0(%rsp),%r13
+.cfi_restore	%r13
 	mov	8(%rsp),%r12
+.cfi_restore	%r12
 	lea	16(%rsp),%rsp
+.cfi_adjust_cfa_offset	-16
 .Ladd_epilogue:
 	ret
+.cfi_endproc
 .size	ecp_nistz256_add,.-ecp_nistz256_add
 
 ################################################################################
@@ -360,8 +388,11 @@ ecp_nistz256_add:
 .type	ecp_nistz256_sub,\@function,3
 .align	32
 ecp_nistz256_sub:
+.cfi_startproc
 	push	%r12
+.cfi_push	%r12
 	push	%r13
+.cfi_push	%r13
 .Lsub_body:
 
 	mov	8*0($a_ptr), $a0
@@ -397,10 +428,14 @@ ecp_nistz256_sub:
 	mov	$a3, 8*3($r_ptr)
 
 	mov	0(%rsp),%r13
+.cfi_restore	%r13
 	mov	8(%rsp),%r12
+.cfi_restore	%r12
 	lea	16(%rsp),%rsp
+.cfi_adjust_cfa_offset	-16
 .Lsub_epilogue:
 	ret
+.cfi_endproc
 .size	ecp_nistz256_sub,.-ecp_nistz256_sub
 
 ################################################################################
@@ -409,8 +444,11 @@ ecp_nistz256_sub:
 .type	ecp_nistz256_neg,\@function,2
 .align	32
 ecp_nistz256_neg:
+.cfi_startproc
 	push	%r12
+.cfi_push	%r12
 	push	%r13
+.cfi_push	%r13
 .Lneg_body:
 
 	xor	$a0, $a0
@@ -446,10 +484,14 @@ ecp_nistz256_neg:
 	mov	$a3, 8*3($r_ptr)
 
 	mov	0(%rsp),%r13
+.cfi_restore	%r13
 	mov	8(%rsp),%r12
+.cfi_restore	%r12
 	lea	16(%rsp),%rsp
+.cfi_adjust_cfa_offset	-16
 .Lneg_epilogue:
 	ret
+.cfi_endproc
 .size	ecp_nistz256_neg,.-ecp_nistz256_neg
 ___
 }
@@ -488,6 +530,7 @@ $code.=<<___;
 .type	ecp_nistz256_mul_mont,\@function,3
 .align	32
 ecp_nistz256_mul_mont:
+.cfi_startproc
 ___
 $code.=<<___	if ($addx);
 	mov	\$0x80100, %ecx
@@ -496,11 +539,17 @@ ___
 $code.=<<___;
 .Lmul_mont:
 	push	%rbp
+.cfi_push	%rbp
 	push	%rbx
+.cfi_push	%rbx
 	push	%r12
+.cfi_push	%r12
 	push	%r13
+.cfi_push	%r13
 	push	%r14
+.cfi_push	%r14
 	push	%r15
+.cfi_push	%r15
 .Lmul_body:
 ___
 $code.=<<___	if ($addx);
@@ -535,14 +584,22 @@ ___
 $code.=<<___;
 .Lmul_mont_done:
 	mov	0(%rsp),%r15
+.cfi_restore	%r15
 	mov	8(%rsp),%r14
+.cfi_restore	%r14
 	mov	16(%rsp),%r13
+.cfi_restore	%r13
 	mov	24(%rsp),%r12
+.cfi_restore	%r12
 	mov	32(%rsp),%rbx
+.cfi_restore	%rbx
 	mov	40(%rsp),%rbp
+.cfi_restore	%rbp
 	lea	48(%rsp),%rsp
+.cfi_adjust_cfa_offset	-48
 .Lmul_epilogue:
 	ret
+.cfi_endproc
 .size	ecp_nistz256_mul_mont,.-ecp_nistz256_mul_mont
 
 .type	__ecp_nistz256_mul_montq,\@abi-omnipotent
@@ -772,6 +829,7 @@ __ecp_nistz256_mul_montq:
 .type	ecp_nistz256_sqr_mont,\@function,2
 .align	32
 ecp_nistz256_sqr_mont:
+.cfi_startproc
 ___
 $code.=<<___	if ($addx);
 	mov	\$0x80100, %ecx
@@ -779,11 +837,17 @@ $code.=<<___	if ($addx);
 ___
 $code.=<<___;
 	push	%rbp
+.cfi_push	%rbp
 	push	%rbx
+.cfi_push	%rbx
 	push	%r12
+.cfi_push	%r12
 	push	%r13
+.cfi_push	%r13
 	push	%r14
+.cfi_push	%r14
 	push	%r15
+.cfi_push	%r15
 .Lsqr_body:
 ___
 $code.=<<___	if ($addx);
@@ -814,14 +878,22 @@ ___
 $code.=<<___;
 .Lsqr_mont_done:
 	mov	0(%rsp),%r15
+.cfi_restore	%r15
 	mov	8(%rsp),%r14
+.cfi_restore	%r14
 	mov	16(%rsp),%r13
+.cfi_restore	%r13
 	mov	24(%rsp),%r12
+.cfi_restore	%r12
 	mov	32(%rsp),%rbx
+.cfi_restore	%rbx
 	mov	40(%rsp),%rbp
+.cfi_restore	%rbp
 	lea	48(%rsp),%rsp
+.cfi_adjust_cfa_offset	-48
 .Lsqr_epilogue:
 	ret
+.cfi_endproc
 .size	ecp_nistz256_sqr_mont,.-ecp_nistz256_sqr_mont
 
 .type	__ecp_nistz256_sqr_montq,\@abi-omnipotent
@@ -1306,8 +1378,11 @@ $code.=<<___;
 .type	ecp_nistz256_from_mont,\@function,2
 .align	32
 ecp_nistz256_from_mont:
+.cfi_startproc
 	push	%r12
+.cfi_push	%r12
 	push	%r13
+.cfi_push	%r13
 .Lfrom_body:
 
 	mov	8*0($in_ptr), %rax
@@ -1390,10 +1465,14 @@ ecp_nistz256_from_mont:
 	mov	$acc3, 8*3($r_ptr)
 
 	mov	0(%rsp),%r13
+.cfi_restore	%r13
 	mov	8(%rsp),%r12
+.cfi_restore	%r12
 	lea	16(%rsp),%rsp
+.cfi_adjust_cfa_offset	-16
 .Lfrom_epilogue:
 	ret
+.cfi_endproc
 .size	ecp_nistz256_from_mont,.-ecp_nistz256_from_mont
 ___
 }
@@ -2055,6 +2134,7 @@ $code.=<<___;
 .type	ecp_nistz256_point_double,\@function,2
 .align	32
 ecp_nistz256_point_double:
+.cfi_startproc
 ___
 $code.=<<___	if ($addx);
 	mov	\$0x80100, %ecx
@@ -2071,17 +2151,25 @@ $code.=<<___;
 .type	ecp_nistz256_point_doublex,\@function,2
 .align	32
 ecp_nistz256_point_doublex:
+.cfi_startproc
 .Lpoint_doublex:
 ___
     }
 $code.=<<___;
 	push	%rbp
+.cfi_push	%rbp
 	push	%rbx
+.cfi_push	%rbx
 	push	%r12
+.cfi_push	%r12
 	push	%r13
+.cfi_push	%r13
 	push	%r14
+.cfi_push	%r14
 	push	%r15
+.cfi_push	%r15
 	sub	\$32*5+8, %rsp
+.cfi_adjust_cfa_offset	32*5+8
 .Lpoint_double${x}_body:
 
 .Lpoint_double_shortcut$x:
@@ -2254,15 +2342,24 @@ $code.=<<___;
 	call	__ecp_nistz256_sub_from$x	# p256_sub(res_y, S, res_y);
 
 	lea	32*5+56(%rsp), %rsi
+.cfi_def_cfa	%rsi,8
 	mov	-48(%rsi),%r15
+.cfi_restore	%r15
 	mov	-40(%rsi),%r14
+.cfi_restore	%r14
 	mov	-32(%rsi),%r13
+.cfi_restore	%r13
 	mov	-24(%rsi),%r12
+.cfi_restore	%r12
 	mov	-16(%rsi),%rbx
+.cfi_restore	%rbx
 	mov	-8(%rsi),%rbp
+.cfi_restore	%rbp
 	lea	(%rsi),%rsp
+.cfi_def_cfa_register	%rsp
 .Lpoint_double${x}_epilogue:
 	ret
+.cfi_endproc
 .size	ecp_nistz256_point_double$sfx,.-ecp_nistz256_point_double$sfx
 ___
 }
@@ -2288,6 +2385,7 @@ $code.=<<___;
 .type	ecp_nistz256_point_add,\@function,3
 .align	32
 ecp_nistz256_point_add:
+.cfi_startproc
 ___
 $code.=<<___	if ($addx);
 	mov	\$0x80100, %ecx
@@ -2304,17 +2402,25 @@ $code.=<<___;
 .type	ecp_nistz256_point_addx,\@function,3
 .align	32
 ecp_nistz256_point_addx:
+.cfi_startproc
 .Lpoint_addx:
 ___
     }
 $code.=<<___;
 	push	%rbp
+.cfi_push	%rbp
 	push	%rbx
+.cfi_push	%rbx
 	push	%r12
+.cfi_push	%r12
 	push	%r13
+.cfi_push	%r13
 	push	%r14
+.cfi_push	%r14
 	push	%r15
+.cfi_push	%r15
 	sub	\$32*18+8, %rsp
+.cfi_adjust_cfa_offset	32*18+8
 .Lpoint_add${x}_body:
 
 	movdqu	0x00($a_ptr), %xmm0		# copy	*(P256_POINT *)$a_ptr
@@ -2625,15 +2731,24 @@ $code.=<<___;
 
 .Ladd_done$x:
 	lea	32*18+56(%rsp), %rsi
+.cfi_def_cfa	%rsi,8
 	mov	-48(%rsi),%r15
+.cfi_restore	%r15
 	mov	-40(%rsi),%r14
+.cfi_restore	%r14
 	mov	-32(%rsi),%r13
+.cfi_restore	%r13
 	mov	-24(%rsi),%r12
+.cfi_restore	%r12
 	mov	-16(%rsi),%rbx
+.cfi_restore	%rbx
 	mov	-8(%rsi),%rbp
+.cfi_restore	%rbp
 	lea	(%rsi),%rsp
+.cfi_def_cfa_register	%rsp
 .Lpoint_add${x}_epilogue:
 	ret
+.cfi_endproc
 .size	ecp_nistz256_point_add$sfx,.-ecp_nistz256_point_add$sfx
 ___
 }
@@ -2658,6 +2773,7 @@ $code.=<<___;
 .type	ecp_nistz256_point_add_affine,\@function,3
 .align	32
 ecp_nistz256_point_add_affine:
+.cfi_startproc
 ___
 $code.=<<___	if ($addx);
 	mov	\$0x80100, %ecx
@@ -2674,17 +2790,25 @@ $code.=<<___;
 .type	ecp_nistz256_point_add_affinex,\@function,3
 .align	32
 ecp_nistz256_point_add_affinex:
+.cfi_startproc
 .Lpoint_add_affinex:
 ___
     }
 $code.=<<___;
 	push	%rbp
+.cfi_push	%rbp
 	push	%rbx
+.cfi_push	%rbx
 	push	%r12
+.cfi_push	%r12
 	push	%r13
+.cfi_push	%r13
 	push	%r14
+.cfi_push	%r14
 	push	%r15
+.cfi_push	%r15
 	sub	\$32*15+8, %rsp
+.cfi_adjust_cfa_offset	32*15+8
 .Ladd_affine${x}_body:
 
 	movdqu	0x00($a_ptr), %xmm0	# copy	*(P256_POINT *)$a_ptr
@@ -2931,15 +3055,24 @@ $code.=<<___;
 	movdqu	%xmm3, 0x30($r_ptr)
 
 	lea	32*15+56(%rsp), %rsi
+.cfi_def_cfa	%rsi,8
 	mov	-48(%rsi),%r15
+.cfi_restore	%r15
 	mov	-40(%rsi),%r14
+.cfi_restore	%r14
 	mov	-32(%rsi),%r13
+.cfi_restore	%r13
 	mov	-24(%rsi),%r12
+.cfi_restore	%r12
 	mov	-16(%rsi),%rbx
+.cfi_restore	%rbx
 	mov	-8(%rsi),%rbp
+.cfi_restore	%rbp
 	lea	(%rsi),%rsp
+.cfi_def_cfa_register	%rsp
 .Ladd_affine${x}_epilogue:
 	ret
+.cfi_endproc
 .size	ecp_nistz256_point_add_affine$sfx,.-ecp_nistz256_point_add_affine$sfx
 ___
 }
@@ -3178,8 +3311,8 @@ full_handler:
 	mov	8(%r11),%r10d		# HandlerData[2]
 	lea	(%rax,%r10),%rax
 
-	mov	-8(%rax),%rbx
-	mov	-16(%rax),%rbp
+	mov	-8(%rax),%rbp
+	mov	-16(%rax),%rbx
 	mov	-24(%rax),%r12
 	mov	-32(%rax),%r13
 	mov	-40(%rax),%r14


More information about the openssl-commits mailing list