sub $rptr,%r11()

in ring/crypto/fipsmodule/bn/asm/x86_64-mont5.pl [1129:1752]


	sub	$rptr,%r11
	and	\$4095,%r11
	cmp	%r11,%r10
	jb	.Lpwr_sp_alt
	sub	%r11,%rbp		# align with $aptr
	lea	-320(%rbp,$num,2),%rbp	# future alloca(frame+2*num*8+256)
	jmp	.Lpwr_sp_done

.align	32
.Lpwr_sp_alt:
	lea	4096-320(,$num,2),%r10
	lea	-320(%rbp,$num,2),%rbp	# future alloca(frame+2*num*8+256)
	sub	%r10,%r11
	mov	\$0,%r10
	cmovc	%r10,%r11
	sub	%r11,%rbp
.Lpwr_sp_done:
	and	\$-64,%rbp
	mov	%rsp,%r11
	sub	%rbp,%r11
	and	\$-4096,%r11
	lea	(%rbp,%r11),%rsp
	mov	(%rsp),%r10
	cmp	%rbp,%rsp
	ja	.Lpwr_page_walk
	jmp	.Lpwr_page_walk_done

.Lpwr_page_walk:
	lea	-4096(%rsp),%rsp
	mov	(%rsp),%r10
	cmp	%rbp,%rsp
	ja	.Lpwr_page_walk
.Lpwr_page_walk_done:

	mov	$num,%r10
	neg	$num

	##############################################################
	# Stack layout
	#
	# +0	saved $num, used in reduction section
	# +8	&t[2*$num], used in reduction section
	# +32	saved *n0
	# +40	saved %rsp
	# +48	t[2*$num]
	#
	mov	$n0,  32(%rsp)
	mov	%rax, 40(%rsp)		# save original %rsp
.cfi_cfa_expression	%rsp+40,deref,+8
.Lpower5_body:
	movq	$rptr,%xmm1		# save $rptr, used in sqr8x
	movq	$nptr,%xmm2		# save $nptr
	movq	%r10, %xmm3		# -$num, used in sqr8x
	movq	$bptr,%xmm4

	call	__bn_sqr8x_internal
	call	__bn_post4x_internal
	call	__bn_sqr8x_internal
	call	__bn_post4x_internal
	call	__bn_sqr8x_internal
	call	__bn_post4x_internal
	call	__bn_sqr8x_internal
	call	__bn_post4x_internal
	call	__bn_sqr8x_internal
	call	__bn_post4x_internal

	movq	%xmm2,$nptr
	movq	%xmm4,$bptr
	mov	$aptr,$rptr
	mov	40(%rsp),%rax
	lea	32(%rsp),$n0

	call	mul4x_internal

	mov	40(%rsp),%rsi		# restore %rsp
.cfi_def_cfa	%rsi,8
	mov	\$1,%rax
	mov	-48(%rsi),%r15
.cfi_restore	%r15
	mov	-40(%rsi),%r14
.cfi_restore	%r14
	mov	-32(%rsi),%r13
.cfi_restore	%r13
	mov	-24(%rsi),%r12
.cfi_restore	%r12
	mov	-16(%rsi),%rbp
.cfi_restore	%rbp
	mov	-8(%rsi),%rbx
.cfi_restore	%rbx
	lea	(%rsi),%rsp
.cfi_def_cfa_register	%rsp
.Lpower5_epilogue:
	ret
.cfi_endproc
.size	GFp_bn_power5,.-GFp_bn_power5

.globl	GFp_bn_sqr8x_internal
.hidden	GFp_bn_sqr8x_internal
.type	GFp_bn_sqr8x_internal,\@abi-omnipotent
.align	32
GFp_bn_sqr8x_internal:
__bn_sqr8x_internal:
.cfi_startproc
	##############################################################
	# Squaring part:
	#
	# a) multiply-n-add everything but a[i]*a[i];
	# b) shift result of a) by 1 to the left and accumulate
	#    a[i]*a[i] products;
	#
	##############################################################
	#                                                     a[1]a[0]
	#                                                 a[2]a[0]
	#                                             a[3]a[0]
	#                                             a[2]a[1]
	#                                         a[4]a[0]
	#                                         a[3]a[1]
	#                                     a[5]a[0]
	#                                     a[4]a[1]
	#                                     a[3]a[2]
	#                                 a[6]a[0]
	#                                 a[5]a[1]
	#                                 a[4]a[2]
	#                             a[7]a[0]
	#                             a[6]a[1]
	#                             a[5]a[2]
	#                             a[4]a[3]
	#                         a[7]a[1]
	#                         a[6]a[2]
	#                         a[5]a[3]
	#                     a[7]a[2]
	#                     a[6]a[3]
	#                     a[5]a[4]
	#                 a[7]a[3]
	#                 a[6]a[4]
	#             a[7]a[4]
	#             a[6]a[5]
	#         a[7]a[5]
	#     a[7]a[6]
	#                                                     a[1]a[0]
	#                                                 a[2]a[0]
	#                                             a[3]a[0]
	#                                         a[4]a[0]
	#                                     a[5]a[0]
	#                                 a[6]a[0]
	#                             a[7]a[0]
	#                                             a[2]a[1]
	#                                         a[3]a[1]
	#                                     a[4]a[1]
	#                                 a[5]a[1]
	#                             a[6]a[1]
	#                         a[7]a[1]
	#                                     a[3]a[2]
	#                                 a[4]a[2]
	#                             a[5]a[2]
	#                         a[6]a[2]
	#                     a[7]a[2]
	#                             a[4]a[3]
	#                         a[5]a[3]
	#                     a[6]a[3]
	#                 a[7]a[3]
	#                     a[5]a[4]
	#                 a[6]a[4]
	#             a[7]a[4]
	#             a[6]a[5]
	#         a[7]a[5]
	#     a[7]a[6]
	#                                                         a[0]a[0]
	#                                                 a[1]a[1]
	#                                         a[2]a[2]
	#                                 a[3]a[3]
	#                         a[4]a[4]
	#                 a[5]a[5]
	#         a[6]a[6]
	# a[7]a[7]

	lea	32(%r10),$i		# $i=-($num-32)
	lea	($aptr,$num),$aptr	# end of a[] buffer, ($aptr,$i)=&ap[2]

	mov	$num,$j			# $j=$num

					# comments apply to $num==8 case
	mov	-32($aptr,$i),$a0	# a[0]
	lea	48+8(%rsp,$num,2),$tptr	# end of tp[] buffer, &tp[2*$num]
	mov	-24($aptr,$i),%rax	# a[1]
	lea	-32($tptr,$i),$tptr	# end of tp[] window, &tp[2*$num-"$i"]
	mov	-16($aptr,$i),$ai	# a[2]
	mov	%rax,$a1

	mul	$a0			# a[1]*a[0]
	mov	%rax,$A0[0]		# a[1]*a[0]
	 mov	$ai,%rax		# a[2]
	mov	%rdx,$A0[1]
	mov	$A0[0],-24($tptr,$i)	# t[1]

	mul	$a0			# a[2]*a[0]
	add	%rax,$A0[1]
	 mov	$ai,%rax
	adc	\$0,%rdx
	mov	$A0[1],-16($tptr,$i)	# t[2]
	mov	%rdx,$A0[0]


	 mov	-8($aptr,$i),$ai	# a[3]
	mul	$a1			# a[2]*a[1]
	mov	%rax,$A1[0]		# a[2]*a[1]+t[3]
	 mov	$ai,%rax
	mov	%rdx,$A1[1]

	 lea	($i),$j
	mul	$a0			# a[3]*a[0]
	add	%rax,$A0[0]		# a[3]*a[0]+a[2]*a[1]+t[3]
	 mov	$ai,%rax
	mov	%rdx,$A0[1]
	adc	\$0,$A0[1]
	add	$A1[0],$A0[0]
	adc	\$0,$A0[1]
	mov	$A0[0],-8($tptr,$j)	# t[3]
	jmp	.Lsqr4x_1st

.align	32
.Lsqr4x_1st:
	 mov	($aptr,$j),$ai		# a[4]
	mul	$a1			# a[3]*a[1]
	add	%rax,$A1[1]		# a[3]*a[1]+t[4]
	 mov	$ai,%rax
	mov	%rdx,$A1[0]
	adc	\$0,$A1[0]

	mul	$a0			# a[4]*a[0]
	add	%rax,$A0[1]		# a[4]*a[0]+a[3]*a[1]+t[4]
	 mov	$ai,%rax		# a[3]
	 mov	8($aptr,$j),$ai		# a[5]
	mov	%rdx,$A0[0]
	adc	\$0,$A0[0]
	add	$A1[1],$A0[1]
	adc	\$0,$A0[0]


	mul	$a1			# a[4]*a[3]
	add	%rax,$A1[0]		# a[4]*a[3]+t[5]
	 mov	$ai,%rax
	 mov	$A0[1],($tptr,$j)	# t[4]
	mov	%rdx,$A1[1]
	adc	\$0,$A1[1]

	mul	$a0			# a[5]*a[2]
	add	%rax,$A0[0]		# a[5]*a[2]+a[4]*a[3]+t[5]
	 mov	$ai,%rax
	 mov	16($aptr,$j),$ai	# a[6]
	mov	%rdx,$A0[1]
	adc	\$0,$A0[1]
	add	$A1[0],$A0[0]
	adc	\$0,$A0[1]

	mul	$a1			# a[5]*a[3]
	add	%rax,$A1[1]		# a[5]*a[3]+t[6]
	 mov	$ai,%rax
	 mov	$A0[0],8($tptr,$j)	# t[5]
	mov	%rdx,$A1[0]
	adc	\$0,$A1[0]

	mul	$a0			# a[6]*a[2]
	add	%rax,$A0[1]		# a[6]*a[2]+a[5]*a[3]+t[6]
	 mov	$ai,%rax		# a[3]
	 mov	24($aptr,$j),$ai	# a[7]
	mov	%rdx,$A0[0]
	adc	\$0,$A0[0]
	add	$A1[1],$A0[1]
	adc	\$0,$A0[0]


	mul	$a1			# a[6]*a[5]
	add	%rax,$A1[0]		# a[6]*a[5]+t[7]
	 mov	$ai,%rax
	 mov	$A0[1],16($tptr,$j)	# t[6]
	mov	%rdx,$A1[1]
	adc	\$0,$A1[1]
	 lea	32($j),$j

	mul	$a0			# a[7]*a[4]
	add	%rax,$A0[0]		# a[7]*a[4]+a[6]*a[5]+t[6]
	 mov	$ai,%rax
	mov	%rdx,$A0[1]
	adc	\$0,$A0[1]
	add	$A1[0],$A0[0]
	adc	\$0,$A0[1]
	mov	$A0[0],-8($tptr,$j)	# t[7]

	cmp	\$0,$j
	jne	.Lsqr4x_1st

	mul	$a1			# a[7]*a[5]
	add	%rax,$A1[1]
	lea	16($i),$i
	adc	\$0,%rdx
	add	$A0[1],$A1[1]
	adc	\$0,%rdx

	mov	$A1[1],($tptr)		# t[8]
	mov	%rdx,$A1[0]
	mov	%rdx,8($tptr)		# t[9]
	jmp	.Lsqr4x_outer

.align	32
.Lsqr4x_outer:				# comments apply to $num==6 case
	mov	-32($aptr,$i),$a0	# a[0]
	lea	48+8(%rsp,$num,2),$tptr	# end of tp[] buffer, &tp[2*$num]
	mov	-24($aptr,$i),%rax	# a[1]
	lea	-32($tptr,$i),$tptr	# end of tp[] window, &tp[2*$num-"$i"]
	mov	-16($aptr,$i),$ai	# a[2]
	mov	%rax,$a1

	mul	$a0			# a[1]*a[0]
	mov	-24($tptr,$i),$A0[0]	# t[1]
	add	%rax,$A0[0]		# a[1]*a[0]+t[1]
	 mov	$ai,%rax		# a[2]
	adc	\$0,%rdx
	mov	$A0[0],-24($tptr,$i)	# t[1]
	mov	%rdx,$A0[1]

	mul	$a0			# a[2]*a[0]
	add	%rax,$A0[1]
	 mov	$ai,%rax
	adc	\$0,%rdx
	add	-16($tptr,$i),$A0[1]	# a[2]*a[0]+t[2]
	mov	%rdx,$A0[0]
	adc	\$0,$A0[0]
	mov	$A0[1],-16($tptr,$i)	# t[2]

	xor	$A1[0],$A1[0]

	 mov	-8($aptr,$i),$ai	# a[3]
	mul	$a1			# a[2]*a[1]
	add	%rax,$A1[0]		# a[2]*a[1]+t[3]
	 mov	$ai,%rax
	adc	\$0,%rdx
	add	-8($tptr,$i),$A1[0]
	mov	%rdx,$A1[1]
	adc	\$0,$A1[1]

	mul	$a0			# a[3]*a[0]
	add	%rax,$A0[0]		# a[3]*a[0]+a[2]*a[1]+t[3]
	 mov	$ai,%rax
	adc	\$0,%rdx
	add	$A1[0],$A0[0]
	mov	%rdx,$A0[1]
	adc	\$0,$A0[1]
	mov	$A0[0],-8($tptr,$i)	# t[3]

	lea	($i),$j
	jmp	.Lsqr4x_inner

.align	32
.Lsqr4x_inner:
	 mov	($aptr,$j),$ai		# a[4]
	mul	$a1			# a[3]*a[1]
	add	%rax,$A1[1]		# a[3]*a[1]+t[4]
	 mov	$ai,%rax
	mov	%rdx,$A1[0]
	adc	\$0,$A1[0]
	add	($tptr,$j),$A1[1]
	adc	\$0,$A1[0]

	.byte	0x67
	mul	$a0			# a[4]*a[0]
	add	%rax,$A0[1]		# a[4]*a[0]+a[3]*a[1]+t[4]
	 mov	$ai,%rax		# a[3]
	 mov	8($aptr,$j),$ai		# a[5]
	mov	%rdx,$A0[0]
	adc	\$0,$A0[0]
	add	$A1[1],$A0[1]
	adc	\$0,$A0[0]

	mul	$a1			# a[4]*a[3]
	add	%rax,$A1[0]		# a[4]*a[3]+t[5]
	mov	$A0[1],($tptr,$j)	# t[4]
	 mov	$ai,%rax
	mov	%rdx,$A1[1]
	adc	\$0,$A1[1]
	add	8($tptr,$j),$A1[0]
	lea	16($j),$j		# j++
	adc	\$0,$A1[1]

	mul	$a0			# a[5]*a[2]
	add	%rax,$A0[0]		# a[5]*a[2]+a[4]*a[3]+t[5]
	 mov	$ai,%rax
	adc	\$0,%rdx
	add	$A1[0],$A0[0]
	mov	%rdx,$A0[1]
	adc	\$0,$A0[1]
	mov	$A0[0],-8($tptr,$j)	# t[5], "preloaded t[1]" below

	cmp	\$0,$j
	jne	.Lsqr4x_inner

	.byte	0x67
	mul	$a1			# a[5]*a[3]
	add	%rax,$A1[1]
	adc	\$0,%rdx
	add	$A0[1],$A1[1]
	adc	\$0,%rdx

	mov	$A1[1],($tptr)		# t[6], "preloaded t[2]" below
	mov	%rdx,$A1[0]
	mov	%rdx,8($tptr)		# t[7], "preloaded t[3]" below

	add	\$16,$i
	jnz	.Lsqr4x_outer

					# comments apply to $num==4 case
	mov	-32($aptr),$a0		# a[0]
	lea	48+8(%rsp,$num,2),$tptr	# end of tp[] buffer, &tp[2*$num]
	mov	-24($aptr),%rax		# a[1]
	lea	-32($tptr,$i),$tptr	# end of tp[] window, &tp[2*$num-"$i"]
	mov	-16($aptr),$ai		# a[2]
	mov	%rax,$a1

	mul	$a0			# a[1]*a[0]
	add	%rax,$A0[0]		# a[1]*a[0]+t[1], preloaded t[1]
	 mov	$ai,%rax		# a[2]
	mov	%rdx,$A0[1]
	adc	\$0,$A0[1]

	mul	$a0			# a[2]*a[0]
	add	%rax,$A0[1]
	 mov	$ai,%rax
	 mov	$A0[0],-24($tptr)	# t[1]
	mov	%rdx,$A0[0]
	adc	\$0,$A0[0]
	add	$A1[1],$A0[1]		# a[2]*a[0]+t[2], preloaded t[2]
	 mov	-8($aptr),$ai		# a[3]
	adc	\$0,$A0[0]

	mul	$a1			# a[2]*a[1]
	add	%rax,$A1[0]		# a[2]*a[1]+t[3], preloaded t[3]
	 mov	$ai,%rax
	 mov	$A0[1],-16($tptr)	# t[2]
	mov	%rdx,$A1[1]
	adc	\$0,$A1[1]

	mul	$a0			# a[3]*a[0]
	add	%rax,$A0[0]		# a[3]*a[0]+a[2]*a[1]+t[3]
	 mov	$ai,%rax
	mov	%rdx,$A0[1]
	adc	\$0,$A0[1]
	add	$A1[0],$A0[0]
	adc	\$0,$A0[1]
	mov	$A0[0],-8($tptr)	# t[3]

	mul	$a1			# a[3]*a[1]
	add	%rax,$A1[1]
	 mov	-16($aptr),%rax		# a[2]
	adc	\$0,%rdx
	add	$A0[1],$A1[1]
	adc	\$0,%rdx

	mov	$A1[1],($tptr)		# t[4]
	mov	%rdx,$A1[0]
	mov	%rdx,8($tptr)		# t[5]

	mul	$ai			# a[2]*a[3]
___
{
my ($shift,$carry)=($a0,$a1);
my @S=(@A1,$ai,$n0);
$code.=<<___;
	 add	\$16,$i
	 xor	$shift,$shift
	 sub	$num,$i			# $i=16-$num
	 xor	$carry,$carry

	add	$A1[0],%rax		# t[5]
	adc	\$0,%rdx
	mov	%rax,8($tptr)		# t[5]
	mov	%rdx,16($tptr)		# t[6]
	mov	$carry,24($tptr)	# t[7]

	 mov	-16($aptr,$i),%rax	# a[0]
	lea	48+8(%rsp),$tptr
	 xor	$A0[0],$A0[0]		# t[0]
	 mov	8($tptr),$A0[1]		# t[1]

	lea	($shift,$A0[0],2),$S[0]	# t[2*i]<<1 | shift
	shr	\$63,$A0[0]
	lea	($j,$A0[1],2),$S[1]	# t[2*i+1]<<1 |
	shr	\$63,$A0[1]
	or	$A0[0],$S[1]		# | t[2*i]>>63
	 mov	16($tptr),$A0[0]	# t[2*i+2]	# prefetch
	mov	$A0[1],$shift		# shift=t[2*i+1]>>63
	mul	%rax			# a[i]*a[i]
	neg	$carry			# mov $carry,cf
	 mov	24($tptr),$A0[1]	# t[2*i+2+1]	# prefetch
	adc	%rax,$S[0]
	 mov	-8($aptr,$i),%rax	# a[i+1]	# prefetch
	mov	$S[0],($tptr)
	adc	%rdx,$S[1]

	lea	($shift,$A0[0],2),$S[2]	# t[2*i]<<1 | shift
	 mov	$S[1],8($tptr)
	 sbb	$carry,$carry		# mov cf,$carry
	shr	\$63,$A0[0]
	lea	($j,$A0[1],2),$S[3]	# t[2*i+1]<<1 |
	shr	\$63,$A0[1]
	or	$A0[0],$S[3]		# | t[2*i]>>63
	 mov	32($tptr),$A0[0]	# t[2*i+2]	# prefetch
	mov	$A0[1],$shift		# shift=t[2*i+1]>>63
	mul	%rax			# a[i]*a[i]
	neg	$carry			# mov $carry,cf
	 mov	40($tptr),$A0[1]	# t[2*i+2+1]	# prefetch
	adc	%rax,$S[2]
	 mov	0($aptr,$i),%rax	# a[i+1]	# prefetch
	mov	$S[2],16($tptr)
	adc	%rdx,$S[3]
	lea	16($i),$i
	mov	$S[3],24($tptr)
	sbb	$carry,$carry		# mov cf,$carry
	lea	64($tptr),$tptr
	jmp	.Lsqr4x_shift_n_add

.align	32
.Lsqr4x_shift_n_add:
	lea	($shift,$A0[0],2),$S[0]	# t[2*i]<<1 | shift
	shr	\$63,$A0[0]
	lea	($j,$A0[1],2),$S[1]	# t[2*i+1]<<1 |
	shr	\$63,$A0[1]
	or	$A0[0],$S[1]		# | t[2*i]>>63
	 mov	-16($tptr),$A0[0]	# t[2*i+2]	# prefetch
	mov	$A0[1],$shift		# shift=t[2*i+1]>>63
	mul	%rax			# a[i]*a[i]
	neg	$carry			# mov $carry,cf
	 mov	-8($tptr),$A0[1]	# t[2*i+2+1]	# prefetch
	adc	%rax,$S[0]
	 mov	-8($aptr,$i),%rax	# a[i+1]	# prefetch
	mov	$S[0],-32($tptr)
	adc	%rdx,$S[1]

	lea	($shift,$A0[0],2),$S[2]	# t[2*i]<<1 | shift
	 mov	$S[1],-24($tptr)
	 sbb	$carry,$carry		# mov cf,$carry
	shr	\$63,$A0[0]
	lea	($j,$A0[1],2),$S[3]	# t[2*i+1]<<1 |
	shr	\$63,$A0[1]
	or	$A0[0],$S[3]		# | t[2*i]>>63
	 mov	0($tptr),$A0[0]		# t[2*i+2]	# prefetch
	mov	$A0[1],$shift		# shift=t[2*i+1]>>63
	mul	%rax			# a[i]*a[i]
	neg	$carry			# mov $carry,cf
	 mov	8($tptr),$A0[1]		# t[2*i+2+1]	# prefetch
	adc	%rax,$S[2]
	 mov	0($aptr,$i),%rax	# a[i+1]	# prefetch
	mov	$S[2],-16($tptr)
	adc	%rdx,$S[3]

	lea	($shift,$A0[0],2),$S[0]	# t[2*i]<<1 | shift
	 mov	$S[3],-8($tptr)
	 sbb	$carry,$carry		# mov cf,$carry
	shr	\$63,$A0[0]
	lea	($j,$A0[1],2),$S[1]	# t[2*i+1]<<1 |
	shr	\$63,$A0[1]
	or	$A0[0],$S[1]		# | t[2*i]>>63
	 mov	16($tptr),$A0[0]	# t[2*i+2]	# prefetch
	mov	$A0[1],$shift		# shift=t[2*i+1]>>63
	mul	%rax			# a[i]*a[i]
	neg	$carry			# mov $carry,cf
	 mov	24($tptr),$A0[1]	# t[2*i+2+1]	# prefetch
	adc	%rax,$S[0]
	 mov	8($aptr,$i),%rax	# a[i+1]	# prefetch
	mov	$S[0],0($tptr)
	adc	%rdx,$S[1]

	lea	($shift,$A0[0],2),$S[2]	# t[2*i]<<1 | shift
	 mov	$S[1],8($tptr)
	 sbb	$carry,$carry		# mov cf,$carry
	shr	\$63,$A0[0]
	lea	($j,$A0[1],2),$S[3]	# t[2*i+1]<<1 |
	shr	\$63,$A0[1]
	or	$A0[0],$S[3]		# | t[2*i]>>63
	 mov	32($tptr),$A0[0]	# t[2*i+2]	# prefetch
	mov	$A0[1],$shift		# shift=t[2*i+1]>>63
	mul	%rax			# a[i]*a[i]
	neg	$carry			# mov $carry,cf
	 mov	40($tptr),$A0[1]	# t[2*i+2+1]	# prefetch
	adc	%rax,$S[2]
	 mov	16($aptr,$i),%rax	# a[i+1]	# prefetch
	mov	$S[2],16($tptr)
	adc	%rdx,$S[3]
	mov	$S[3],24($tptr)
	sbb	$carry,$carry		# mov cf,$carry
	lea	64($tptr),$tptr
	add	\$32,$i
	jnz	.Lsqr4x_shift_n_add

	lea	($shift,$A0[0],2),$S[0]	# t[2*i]<<1 | shift
	.byte	0x67
	shr	\$63,$A0[0]
	lea	($j,$A0[1],2),$S[1]	# t[2*i+1]<<1 |
	shr	\$63,$A0[1]
	or	$A0[0],$S[1]		# | t[2*i]>>63
	 mov	-16($tptr),$A0[0]	# t[2*i+2]	# prefetch
	mov	$A0[1],$shift		# shift=t[2*i+1]>>63
	mul	%rax			# a[i]*a[i]
	neg	$carry			# mov $carry,cf
	 mov	-8($tptr),$A0[1]	# t[2*i+2+1]	# prefetch
	adc	%rax,$S[0]
	 mov	-8($aptr),%rax		# a[i+1]	# prefetch
	mov	$S[0],-32($tptr)
	adc	%rdx,$S[1]

	lea	($shift,$A0[0],2),$S[2]	# t[2*i]<<1|shift
	 mov	$S[1],-24($tptr)
	 sbb	$carry,$carry		# mov cf,$carry
	shr	\$63,$A0[0]
	lea	($j,$A0[1],2),$S[3]	# t[2*i+1]<<1 |
	shr	\$63,$A0[1]
	or	$A0[0],$S[3]		# | t[2*i]>>63
	mul	%rax			# a[i]*a[i]
	neg	$carry			# mov $carry,cf
	adc	%rax,$S[2]
	adc	%rdx,$S[3]
	mov	$S[2],-16($tptr)
	mov	$S[3],-8($tptr)
___
}