in src/crypto/fipsmodule/bn/asm/rsaz-avx2.pl [149:817]
sub \$$FrameSize, %rsp
mov $np, $tmp
sub \$-128, $rp # size optimization
sub \$-128, $ap
sub \$-128, $np
and \$4095, $tmp # see if $np crosses page
add \$32*10, $tmp
shr \$12, $tmp
vpxor $ACC9,$ACC9,$ACC9
jz .Lsqr_1024_no_n_copy
# unaligned 256-bit load that crosses page boundary can
# cause >2x performance degradation here, so if $np does
# cross page boundary, copy it to stack and make sure stack
# frame doesn't...
sub \$32*10,%rsp
vmovdqu 32*0-128($np), $ACC0
and \$-2048, %rsp
vmovdqu 32*1-128($np), $ACC1
vmovdqu 32*2-128($np), $ACC2
vmovdqu 32*3-128($np), $ACC3
vmovdqu 32*4-128($np), $ACC4
vmovdqu 32*5-128($np), $ACC5
vmovdqu 32*6-128($np), $ACC6
vmovdqu 32*7-128($np), $ACC7
vmovdqu 32*8-128($np), $ACC8
lea $FrameSize+128(%rsp),$np
vmovdqu $ACC0, 32*0-128($np)
vmovdqu $ACC1, 32*1-128($np)
vmovdqu $ACC2, 32*2-128($np)
vmovdqu $ACC3, 32*3-128($np)
vmovdqu $ACC4, 32*4-128($np)
vmovdqu $ACC5, 32*5-128($np)
vmovdqu $ACC6, 32*6-128($np)
vmovdqu $ACC7, 32*7-128($np)
vmovdqu $ACC8, 32*8-128($np)
vmovdqu $ACC9, 32*9-128($np) # $ACC9 is zero
.Lsqr_1024_no_n_copy:
and \$-1024, %rsp
vmovdqu 32*1-128($ap), $ACC1
vmovdqu 32*2-128($ap), $ACC2
vmovdqu 32*3-128($ap), $ACC3
vmovdqu 32*4-128($ap), $ACC4
vmovdqu 32*5-128($ap), $ACC5
vmovdqu 32*6-128($ap), $ACC6
vmovdqu 32*7-128($ap), $ACC7
vmovdqu 32*8-128($ap), $ACC8
lea 192(%rsp), $tp0 # 64+128=192
vmovdqu .Land_mask(%rip), $AND_MASK
jmp .LOOP_GRANDE_SQR_1024
.align 32
.LOOP_GRANDE_SQR_1024:
lea 32*18+128(%rsp), $aap # size optimization
lea 448(%rsp), $tp1 # 64+128+256=448
# the squaring is performed as described in Variant B of
# "Speeding up Big-Number Squaring", so start by calculating
# the A*2=A+A vector
vpaddq $ACC1, $ACC1, $ACC1
vpbroadcastq 32*0-128($ap), $B1
vpaddq $ACC2, $ACC2, $ACC2
vmovdqa $ACC1, 32*0-128($aap)
vpaddq $ACC3, $ACC3, $ACC3
vmovdqa $ACC2, 32*1-128($aap)
vpaddq $ACC4, $ACC4, $ACC4
vmovdqa $ACC3, 32*2-128($aap)
vpaddq $ACC5, $ACC5, $ACC5
vmovdqa $ACC4, 32*3-128($aap)
vpaddq $ACC6, $ACC6, $ACC6
vmovdqa $ACC5, 32*4-128($aap)
vpaddq $ACC7, $ACC7, $ACC7
vmovdqa $ACC6, 32*5-128($aap)
vpaddq $ACC8, $ACC8, $ACC8
vmovdqa $ACC7, 32*6-128($aap)
vpxor $ACC9, $ACC9, $ACC9
vmovdqa $ACC8, 32*7-128($aap)
vpmuludq 32*0-128($ap), $B1, $ACC0
vpbroadcastq 32*1-128($ap), $B2
vmovdqu $ACC9, 32*9-192($tp0) # zero upper half
vpmuludq $B1, $ACC1, $ACC1
vmovdqu $ACC9, 32*10-448($tp1)
vpmuludq $B1, $ACC2, $ACC2
vmovdqu $ACC9, 32*11-448($tp1)
vpmuludq $B1, $ACC3, $ACC3
vmovdqu $ACC9, 32*12-448($tp1)
vpmuludq $B1, $ACC4, $ACC4
vmovdqu $ACC9, 32*13-448($tp1)
vpmuludq $B1, $ACC5, $ACC5
vmovdqu $ACC9, 32*14-448($tp1)
vpmuludq $B1, $ACC6, $ACC6
vmovdqu $ACC9, 32*15-448($tp1)
vpmuludq $B1, $ACC7, $ACC7
vmovdqu $ACC9, 32*16-448($tp1)
vpmuludq $B1, $ACC8, $ACC8
vpbroadcastq 32*2-128($ap), $B1
vmovdqu $ACC9, 32*17-448($tp1)
mov $ap, $tpa
mov \$4, $i
jmp .Lsqr_entry_1024
___
$TEMP0=$Y1;
$TEMP2=$Y2;
$code.=<<___;
.align 32
.LOOP_SQR_1024:
vpbroadcastq 32*1-128($tpa), $B2
vpmuludq 32*0-128($ap), $B1, $ACC0
vpaddq 32*0-192($tp0), $ACC0, $ACC0
vpmuludq 32*0-128($aap), $B1, $ACC1
vpaddq 32*1-192($tp0), $ACC1, $ACC1
vpmuludq 32*1-128($aap), $B1, $ACC2
vpaddq 32*2-192($tp0), $ACC2, $ACC2
vpmuludq 32*2-128($aap), $B1, $ACC3
vpaddq 32*3-192($tp0), $ACC3, $ACC3
vpmuludq 32*3-128($aap), $B1, $ACC4
vpaddq 32*4-192($tp0), $ACC4, $ACC4
vpmuludq 32*4-128($aap), $B1, $ACC5
vpaddq 32*5-192($tp0), $ACC5, $ACC5
vpmuludq 32*5-128($aap), $B1, $ACC6
vpaddq 32*6-192($tp0), $ACC6, $ACC6
vpmuludq 32*6-128($aap), $B1, $ACC7
vpaddq 32*7-192($tp0), $ACC7, $ACC7
vpmuludq 32*7-128($aap), $B1, $ACC8
vpbroadcastq 32*2-128($tpa), $B1
vpaddq 32*8-192($tp0), $ACC8, $ACC8
.Lsqr_entry_1024:
vmovdqu $ACC0, 32*0-192($tp0)
vmovdqu $ACC1, 32*1-192($tp0)
vpmuludq 32*1-128($ap), $B2, $TEMP0
vpaddq $TEMP0, $ACC2, $ACC2
vpmuludq 32*1-128($aap), $B2, $TEMP1
vpaddq $TEMP1, $ACC3, $ACC3
vpmuludq 32*2-128($aap), $B2, $TEMP2
vpaddq $TEMP2, $ACC4, $ACC4
vpmuludq 32*3-128($aap), $B2, $TEMP0
vpaddq $TEMP0, $ACC5, $ACC5
vpmuludq 32*4-128($aap), $B2, $TEMP1
vpaddq $TEMP1, $ACC6, $ACC6
vpmuludq 32*5-128($aap), $B2, $TEMP2
vpaddq $TEMP2, $ACC7, $ACC7
vpmuludq 32*6-128($aap), $B2, $TEMP0
vpaddq $TEMP0, $ACC8, $ACC8
vpmuludq 32*7-128($aap), $B2, $ACC0
vpbroadcastq 32*3-128($tpa), $B2
vpaddq 32*9-192($tp0), $ACC0, $ACC0
vmovdqu $ACC2, 32*2-192($tp0)
vmovdqu $ACC3, 32*3-192($tp0)
vpmuludq 32*2-128($ap), $B1, $TEMP2
vpaddq $TEMP2, $ACC4, $ACC4
vpmuludq 32*2-128($aap), $B1, $TEMP0
vpaddq $TEMP0, $ACC5, $ACC5
vpmuludq 32*3-128($aap), $B1, $TEMP1
vpaddq $TEMP1, $ACC6, $ACC6
vpmuludq 32*4-128($aap), $B1, $TEMP2
vpaddq $TEMP2, $ACC7, $ACC7
vpmuludq 32*5-128($aap), $B1, $TEMP0
vpaddq $TEMP0, $ACC8, $ACC8
vpmuludq 32*6-128($aap), $B1, $TEMP1
vpaddq $TEMP1, $ACC0, $ACC0
vpmuludq 32*7-128($aap), $B1, $ACC1
vpbroadcastq 32*4-128($tpa), $B1
vpaddq 32*10-448($tp1), $ACC1, $ACC1
vmovdqu $ACC4, 32*4-192($tp0)
vmovdqu $ACC5, 32*5-192($tp0)
vpmuludq 32*3-128($ap), $B2, $TEMP0
vpaddq $TEMP0, $ACC6, $ACC6
vpmuludq 32*3-128($aap), $B2, $TEMP1
vpaddq $TEMP1, $ACC7, $ACC7
vpmuludq 32*4-128($aap), $B2, $TEMP2
vpaddq $TEMP2, $ACC8, $ACC8
vpmuludq 32*5-128($aap), $B2, $TEMP0
vpaddq $TEMP0, $ACC0, $ACC0
vpmuludq 32*6-128($aap), $B2, $TEMP1
vpaddq $TEMP1, $ACC1, $ACC1
vpmuludq 32*7-128($aap), $B2, $ACC2
vpbroadcastq 32*5-128($tpa), $B2
vpaddq 32*11-448($tp1), $ACC2, $ACC2
vmovdqu $ACC6, 32*6-192($tp0)
vmovdqu $ACC7, 32*7-192($tp0)
vpmuludq 32*4-128($ap), $B1, $TEMP0
vpaddq $TEMP0, $ACC8, $ACC8
vpmuludq 32*4-128($aap), $B1, $TEMP1
vpaddq $TEMP1, $ACC0, $ACC0
vpmuludq 32*5-128($aap), $B1, $TEMP2
vpaddq $TEMP2, $ACC1, $ACC1
vpmuludq 32*6-128($aap), $B1, $TEMP0
vpaddq $TEMP0, $ACC2, $ACC2
vpmuludq 32*7-128($aap), $B1, $ACC3
vpbroadcastq 32*6-128($tpa), $B1
vpaddq 32*12-448($tp1), $ACC3, $ACC3
vmovdqu $ACC8, 32*8-192($tp0)
vmovdqu $ACC0, 32*9-192($tp0)
lea 8($tp0), $tp0
vpmuludq 32*5-128($ap), $B2, $TEMP2
vpaddq $TEMP2, $ACC1, $ACC1
vpmuludq 32*5-128($aap), $B2, $TEMP0
vpaddq $TEMP0, $ACC2, $ACC2
vpmuludq 32*6-128($aap), $B2, $TEMP1
vpaddq $TEMP1, $ACC3, $ACC3
vpmuludq 32*7-128($aap), $B2, $ACC4
vpbroadcastq 32*7-128($tpa), $B2
vpaddq 32*13-448($tp1), $ACC4, $ACC4
vmovdqu $ACC1, 32*10-448($tp1)
vmovdqu $ACC2, 32*11-448($tp1)
vpmuludq 32*6-128($ap), $B1, $TEMP0
vpaddq $TEMP0, $ACC3, $ACC3
vpmuludq 32*6-128($aap), $B1, $TEMP1
vpbroadcastq 32*8-128($tpa), $ACC0 # borrow $ACC0 for $B1
vpaddq $TEMP1, $ACC4, $ACC4
vpmuludq 32*7-128($aap), $B1, $ACC5
vpbroadcastq 32*0+8-128($tpa), $B1 # for next iteration
vpaddq 32*14-448($tp1), $ACC5, $ACC5
vmovdqu $ACC3, 32*12-448($tp1)
vmovdqu $ACC4, 32*13-448($tp1)
lea 8($tpa), $tpa
vpmuludq 32*7-128($ap), $B2, $TEMP0
vpaddq $TEMP0, $ACC5, $ACC5
vpmuludq 32*7-128($aap), $B2, $ACC6
vpaddq 32*15-448($tp1), $ACC6, $ACC6
vpmuludq 32*8-128($ap), $ACC0, $ACC7
vmovdqu $ACC5, 32*14-448($tp1)
vpaddq 32*16-448($tp1), $ACC7, $ACC7
vmovdqu $ACC6, 32*15-448($tp1)
vmovdqu $ACC7, 32*16-448($tp1)
lea 8($tp1), $tp1
dec $i
jnz .LOOP_SQR_1024
___
$ZERO = $ACC9;
$TEMP0 = $B1;
$TEMP2 = $B2;
$TEMP3 = $Y1;
$TEMP4 = $Y2;
$code.=<<___;
# we need to fix indices 32-39 to avoid overflow
vmovdqu 32*8(%rsp), $ACC8 # 32*8-192($tp0),
vmovdqu 32*9(%rsp), $ACC1 # 32*9-192($tp0)
vmovdqu 32*10(%rsp), $ACC2 # 32*10-192($tp0)
lea 192(%rsp), $tp0 # 64+128=192
vpsrlq \$29, $ACC8, $TEMP1
vpand $AND_MASK, $ACC8, $ACC8
vpsrlq \$29, $ACC1, $TEMP2
vpand $AND_MASK, $ACC1, $ACC1
vpermq \$0x93, $TEMP1, $TEMP1
vpxor $ZERO, $ZERO, $ZERO
vpermq \$0x93, $TEMP2, $TEMP2
vpblendd \$3, $ZERO, $TEMP1, $TEMP0
vpblendd \$3, $TEMP1, $TEMP2, $TEMP1
vpaddq $TEMP0, $ACC8, $ACC8
vpblendd \$3, $TEMP2, $ZERO, $TEMP2
vpaddq $TEMP1, $ACC1, $ACC1
vpaddq $TEMP2, $ACC2, $ACC2
vmovdqu $ACC1, 32*9-192($tp0)
vmovdqu $ACC2, 32*10-192($tp0)
mov (%rsp), %rax
mov 8(%rsp), $r1
mov 16(%rsp), $r2
mov 24(%rsp), $r3
vmovdqu 32*1(%rsp), $ACC1
vmovdqu 32*2-192($tp0), $ACC2
vmovdqu 32*3-192($tp0), $ACC3
vmovdqu 32*4-192($tp0), $ACC4
vmovdqu 32*5-192($tp0), $ACC5
vmovdqu 32*6-192($tp0), $ACC6
vmovdqu 32*7-192($tp0), $ACC7
mov %rax, $r0
imull $n0, %eax
and \$0x1fffffff, %eax
vmovd %eax, $Y1
mov %rax, %rdx
imulq -128($np), %rax
vpbroadcastq $Y1, $Y1
add %rax, $r0
mov %rdx, %rax
imulq 8-128($np), %rax
shr \$29, $r0
add %rax, $r1
mov %rdx, %rax
imulq 16-128($np), %rax
add $r0, $r1
add %rax, $r2
imulq 24-128($np), %rdx
add %rdx, $r3
mov $r1, %rax
imull $n0, %eax
and \$0x1fffffff, %eax
mov \$9, $i
jmp .LOOP_REDUCE_1024
.align 32
.LOOP_REDUCE_1024:
vmovd %eax, $Y2
vpbroadcastq $Y2, $Y2
vpmuludq 32*1-128($np), $Y1, $TEMP0
mov %rax, %rdx
imulq -128($np), %rax
vpaddq $TEMP0, $ACC1, $ACC1
add %rax, $r1
vpmuludq 32*2-128($np), $Y1, $TEMP1
mov %rdx, %rax
imulq 8-128($np), %rax
vpaddq $TEMP1, $ACC2, $ACC2
vpmuludq 32*3-128($np), $Y1, $TEMP2
.byte 0x67
add %rax, $r2
.byte 0x67
mov %rdx, %rax
imulq 16-128($np), %rax
shr \$29, $r1
vpaddq $TEMP2, $ACC3, $ACC3
vpmuludq 32*4-128($np), $Y1, $TEMP0
add %rax, $r3
add $r1, $r2
vpaddq $TEMP0, $ACC4, $ACC4
vpmuludq 32*5-128($np), $Y1, $TEMP1
mov $r2, %rax
imull $n0, %eax
vpaddq $TEMP1, $ACC5, $ACC5
vpmuludq 32*6-128($np), $Y1, $TEMP2
and \$0x1fffffff, %eax
vpaddq $TEMP2, $ACC6, $ACC6
vpmuludq 32*7-128($np), $Y1, $TEMP0
vpaddq $TEMP0, $ACC7, $ACC7
vpmuludq 32*8-128($np), $Y1, $TEMP1
vmovd %eax, $Y1
#vmovdqu 32*1-8-128($np), $TEMP2 # moved below
vpaddq $TEMP1, $ACC8, $ACC8
#vmovdqu 32*2-8-128($np), $TEMP0 # moved below
vpbroadcastq $Y1, $Y1
vpmuludq 32*1-8-128($np), $Y2, $TEMP2 # see above
vmovdqu 32*3-8-128($np), $TEMP1
mov %rax, %rdx
imulq -128($np), %rax
vpaddq $TEMP2, $ACC1, $ACC1
vpmuludq 32*2-8-128($np), $Y2, $TEMP0 # see above
vmovdqu 32*4-8-128($np), $TEMP2
add %rax, $r2
mov %rdx, %rax
imulq 8-128($np), %rax
vpaddq $TEMP0, $ACC2, $ACC2
add $r3, %rax
shr \$29, $r2
vpmuludq $Y2, $TEMP1, $TEMP1
vmovdqu 32*5-8-128($np), $TEMP0
add $r2, %rax
vpaddq $TEMP1, $ACC3, $ACC3
vpmuludq $Y2, $TEMP2, $TEMP2
vmovdqu 32*6-8-128($np), $TEMP1
.byte 0x67
mov %rax, $r3
imull $n0, %eax
vpaddq $TEMP2, $ACC4, $ACC4
vpmuludq $Y2, $TEMP0, $TEMP0
.byte 0xc4,0x41,0x7e,0x6f,0x9d,0x58,0x00,0x00,0x00 # vmovdqu 32*7-8-128($np), $TEMP2
and \$0x1fffffff, %eax
vpaddq $TEMP0, $ACC5, $ACC5
vpmuludq $Y2, $TEMP1, $TEMP1
vmovdqu 32*8-8-128($np), $TEMP0
vpaddq $TEMP1, $ACC6, $ACC6
vpmuludq $Y2, $TEMP2, $TEMP2
vmovdqu 32*9-8-128($np), $ACC9
vmovd %eax, $ACC0 # borrow ACC0 for Y2
imulq -128($np), %rax
vpaddq $TEMP2, $ACC7, $ACC7
vpmuludq $Y2, $TEMP0, $TEMP0
vmovdqu 32*1-16-128($np), $TEMP1
vpbroadcastq $ACC0, $ACC0
vpaddq $TEMP0, $ACC8, $ACC8
vpmuludq $Y2, $ACC9, $ACC9
vmovdqu 32*2-16-128($np), $TEMP2
add %rax, $r3
___
($ACC0,$Y2)=($Y2,$ACC0);
$code.=<<___;
vmovdqu 32*1-24-128($np), $ACC0
vpmuludq $Y1, $TEMP1, $TEMP1
vmovdqu 32*3-16-128($np), $TEMP0
vpaddq $TEMP1, $ACC1, $ACC1
vpmuludq $Y2, $ACC0, $ACC0
vpmuludq $Y1, $TEMP2, $TEMP2
.byte 0xc4,0x41,0x7e,0x6f,0xb5,0xf0,0xff,0xff,0xff # vmovdqu 32*4-16-128($np), $TEMP1
vpaddq $ACC1, $ACC0, $ACC0
vpaddq $TEMP2, $ACC2, $ACC2
vpmuludq $Y1, $TEMP0, $TEMP0
vmovdqu 32*5-16-128($np), $TEMP2
.byte 0x67
vmovq $ACC0, %rax
vmovdqu $ACC0, (%rsp) # transfer $r0-$r3
vpaddq $TEMP0, $ACC3, $ACC3
vpmuludq $Y1, $TEMP1, $TEMP1
vmovdqu 32*6-16-128($np), $TEMP0
vpaddq $TEMP1, $ACC4, $ACC4
vpmuludq $Y1, $TEMP2, $TEMP2
vmovdqu 32*7-16-128($np), $TEMP1
vpaddq $TEMP2, $ACC5, $ACC5
vpmuludq $Y1, $TEMP0, $TEMP0
vmovdqu 32*8-16-128($np), $TEMP2
vpaddq $TEMP0, $ACC6, $ACC6
vpmuludq $Y1, $TEMP1, $TEMP1
shr \$29, $r3
vmovdqu 32*9-16-128($np), $TEMP0
add $r3, %rax
vpaddq $TEMP1, $ACC7, $ACC7
vpmuludq $Y1, $TEMP2, $TEMP2
#vmovdqu 32*2-24-128($np), $TEMP1 # moved below
mov %rax, $r0
imull $n0, %eax
vpaddq $TEMP2, $ACC8, $ACC8
vpmuludq $Y1, $TEMP0, $TEMP0
and \$0x1fffffff, %eax
vmovd %eax, $Y1
vmovdqu 32*3-24-128($np), $TEMP2
.byte 0x67
vpaddq $TEMP0, $ACC9, $ACC9
vpbroadcastq $Y1, $Y1
vpmuludq 32*2-24-128($np), $Y2, $TEMP1 # see above
vmovdqu 32*4-24-128($np), $TEMP0
mov %rax, %rdx
imulq -128($np), %rax
mov 8(%rsp), $r1
vpaddq $TEMP1, $ACC2, $ACC1
vpmuludq $Y2, $TEMP2, $TEMP2
vmovdqu 32*5-24-128($np), $TEMP1
add %rax, $r0
mov %rdx, %rax
imulq 8-128($np), %rax
.byte 0x67
shr \$29, $r0
mov 16(%rsp), $r2
vpaddq $TEMP2, $ACC3, $ACC2
vpmuludq $Y2, $TEMP0, $TEMP0
vmovdqu 32*6-24-128($np), $TEMP2
add %rax, $r1
mov %rdx, %rax
imulq 16-128($np), %rax
vpaddq $TEMP0, $ACC4, $ACC3
vpmuludq $Y2, $TEMP1, $TEMP1
vmovdqu 32*7-24-128($np), $TEMP0
imulq 24-128($np), %rdx # future $r3
add %rax, $r2
lea ($r0,$r1), %rax
vpaddq $TEMP1, $ACC5, $ACC4
vpmuludq $Y2, $TEMP2, $TEMP2
vmovdqu 32*8-24-128($np), $TEMP1
mov %rax, $r1
imull $n0, %eax
vpmuludq $Y2, $TEMP0, $TEMP0
vpaddq $TEMP2, $ACC6, $ACC5
vmovdqu 32*9-24-128($np), $TEMP2
and \$0x1fffffff, %eax
vpaddq $TEMP0, $ACC7, $ACC6
vpmuludq $Y2, $TEMP1, $TEMP1
add 24(%rsp), %rdx
vpaddq $TEMP1, $ACC8, $ACC7
vpmuludq $Y2, $TEMP2, $TEMP2
vpaddq $TEMP2, $ACC9, $ACC8
vmovq $r3, $ACC9
mov %rdx, $r3
dec $i
jnz .LOOP_REDUCE_1024
___
($ACC0,$Y2)=($Y2,$ACC0);
$code.=<<___;
lea 448(%rsp), $tp1 # size optimization
vpaddq $ACC9, $Y2, $ACC0
vpxor $ZERO, $ZERO, $ZERO
vpaddq 32*9-192($tp0), $ACC0, $ACC0
vpaddq 32*10-448($tp1), $ACC1, $ACC1
vpaddq 32*11-448($tp1), $ACC2, $ACC2
vpaddq 32*12-448($tp1), $ACC3, $ACC3
vpaddq 32*13-448($tp1), $ACC4, $ACC4
vpaddq 32*14-448($tp1), $ACC5, $ACC5
vpaddq 32*15-448($tp1), $ACC6, $ACC6
vpaddq 32*16-448($tp1), $ACC7, $ACC7
vpaddq 32*17-448($tp1), $ACC8, $ACC8
vpsrlq \$29, $ACC0, $TEMP1
vpand $AND_MASK, $ACC0, $ACC0
vpsrlq \$29, $ACC1, $TEMP2
vpand $AND_MASK, $ACC1, $ACC1
vpsrlq \$29, $ACC2, $TEMP3
vpermq \$0x93, $TEMP1, $TEMP1
vpand $AND_MASK, $ACC2, $ACC2
vpsrlq \$29, $ACC3, $TEMP4
vpermq \$0x93, $TEMP2, $TEMP2
vpand $AND_MASK, $ACC3, $ACC3
vpermq \$0x93, $TEMP3, $TEMP3
vpblendd \$3, $ZERO, $TEMP1, $TEMP0
vpermq \$0x93, $TEMP4, $TEMP4
vpblendd \$3, $TEMP1, $TEMP2, $TEMP1
vpaddq $TEMP0, $ACC0, $ACC0
vpblendd \$3, $TEMP2, $TEMP3, $TEMP2
vpaddq $TEMP1, $ACC1, $ACC1
vpblendd \$3, $TEMP3, $TEMP4, $TEMP3
vpaddq $TEMP2, $ACC2, $ACC2
vpblendd \$3, $TEMP4, $ZERO, $TEMP4
vpaddq $TEMP3, $ACC3, $ACC3
vpaddq $TEMP4, $ACC4, $ACC4
vpsrlq \$29, $ACC0, $TEMP1
vpand $AND_MASK, $ACC0, $ACC0
vpsrlq \$29, $ACC1, $TEMP2
vpand $AND_MASK, $ACC1, $ACC1
vpsrlq \$29, $ACC2, $TEMP3
vpermq \$0x93, $TEMP1, $TEMP1
vpand $AND_MASK, $ACC2, $ACC2
vpsrlq \$29, $ACC3, $TEMP4
vpermq \$0x93, $TEMP2, $TEMP2
vpand $AND_MASK, $ACC3, $ACC3
vpermq \$0x93, $TEMP3, $TEMP3
vpblendd \$3, $ZERO, $TEMP1, $TEMP0
vpermq \$0x93, $TEMP4, $TEMP4
vpblendd \$3, $TEMP1, $TEMP2, $TEMP1
vpaddq $TEMP0, $ACC0, $ACC0
vpblendd \$3, $TEMP2, $TEMP3, $TEMP2
vpaddq $TEMP1, $ACC1, $ACC1
vmovdqu $ACC0, 32*0-128($rp)
vpblendd \$3, $TEMP3, $TEMP4, $TEMP3
vpaddq $TEMP2, $ACC2, $ACC2
vmovdqu $ACC1, 32*1-128($rp)
vpblendd \$3, $TEMP4, $ZERO, $TEMP4
vpaddq $TEMP3, $ACC3, $ACC3
vmovdqu $ACC2, 32*2-128($rp)
vpaddq $TEMP4, $ACC4, $ACC4
vmovdqu $ACC3, 32*3-128($rp)
___
$TEMP5=$ACC0;
$code.=<<___;
vpsrlq \$29, $ACC4, $TEMP1
vpand $AND_MASK, $ACC4, $ACC4
vpsrlq \$29, $ACC5, $TEMP2
vpand $AND_MASK, $ACC5, $ACC5
vpsrlq \$29, $ACC6, $TEMP3
vpermq \$0x93, $TEMP1, $TEMP1
vpand $AND_MASK, $ACC6, $ACC6
vpsrlq \$29, $ACC7, $TEMP4
vpermq \$0x93, $TEMP2, $TEMP2
vpand $AND_MASK, $ACC7, $ACC7
vpsrlq \$29, $ACC8, $TEMP5
vpermq \$0x93, $TEMP3, $TEMP3
vpand $AND_MASK, $ACC8, $ACC8
vpermq \$0x93, $TEMP4, $TEMP4
vpblendd \$3, $ZERO, $TEMP1, $TEMP0
vpermq \$0x93, $TEMP5, $TEMP5
vpblendd \$3, $TEMP1, $TEMP2, $TEMP1
vpaddq $TEMP0, $ACC4, $ACC4
vpblendd \$3, $TEMP2, $TEMP3, $TEMP2
vpaddq $TEMP1, $ACC5, $ACC5
vpblendd \$3, $TEMP3, $TEMP4, $TEMP3
vpaddq $TEMP2, $ACC6, $ACC6
vpblendd \$3, $TEMP4, $TEMP5, $TEMP4
vpaddq $TEMP3, $ACC7, $ACC7
vpaddq $TEMP4, $ACC8, $ACC8
vpsrlq \$29, $ACC4, $TEMP1
vpand $AND_MASK, $ACC4, $ACC4
vpsrlq \$29, $ACC5, $TEMP2
vpand $AND_MASK, $ACC5, $ACC5
vpsrlq \$29, $ACC6, $TEMP3
vpermq \$0x93, $TEMP1, $TEMP1
vpand $AND_MASK, $ACC6, $ACC6
vpsrlq \$29, $ACC7, $TEMP4
vpermq \$0x93, $TEMP2, $TEMP2
vpand $AND_MASK, $ACC7, $ACC7
vpsrlq \$29, $ACC8, $TEMP5
vpermq \$0x93, $TEMP3, $TEMP3
vpand $AND_MASK, $ACC8, $ACC8
vpermq \$0x93, $TEMP4, $TEMP4
vpblendd \$3, $ZERO, $TEMP1, $TEMP0
vpermq \$0x93, $TEMP5, $TEMP5
vpblendd \$3, $TEMP1, $TEMP2, $TEMP1
vpaddq $TEMP0, $ACC4, $ACC4
vpblendd \$3, $TEMP2, $TEMP3, $TEMP2
vpaddq $TEMP1, $ACC5, $ACC5
vmovdqu $ACC4, 32*4-128($rp)
vpblendd \$3, $TEMP3, $TEMP4, $TEMP3
vpaddq $TEMP2, $ACC6, $ACC6
vmovdqu $ACC5, 32*5-128($rp)
vpblendd \$3, $TEMP4, $TEMP5, $TEMP4
vpaddq $TEMP3, $ACC7, $ACC7
vmovdqu $ACC6, 32*6-128($rp)
vpaddq $TEMP4, $ACC8, $ACC8
vmovdqu $ACC7, 32*7-128($rp)
vmovdqu $ACC8, 32*8-128($rp)
mov $rp, $ap
dec $rep
jne .LOOP_GRANDE_SQR_1024
vzeroall
mov %rbp, %rax
.cfi_def_cfa_register %rax
___
$code.=<<___ if ($win64);
.Lsqr_1024_in_tail:
movaps -0xd8(%rax),%xmm6
movaps -0xc8(%rax),%xmm7
movaps -0xb8(%rax),%xmm8
movaps -0xa8(%rax),%xmm9
movaps -0x98(%rax),%xmm10
movaps -0x88(%rax),%xmm11
movaps -0x78(%rax),%xmm12
movaps -0x68(%rax),%xmm13
movaps -0x58(%rax),%xmm14
movaps -0x48(%rax),%xmm15
___
$code.=<<___;
mov -48(%rax),%r15
.cfi_restore %r15
mov -40(%rax),%r14
.cfi_restore %r14
mov -32(%rax),%r13
.cfi_restore %r13
mov -24(%rax),%r12
.cfi_restore %r12
mov -16(%rax),%rbp
.cfi_restore %rbp
mov -8(%rax),%rbx
.cfi_restore %rbx
lea (%rax),%rsp # restore %rsp
.cfi_def_cfa_register %rsp
.Lsqr_1024_epilogue:
ret
.cfi_endproc
.size rsaz_1024_sqr_avx2,.-rsaz_1024_sqr_avx2
___
}
{ # void AMM_WW(