in arch/x86/crypto/poly1305-x86_64-cryptogams.pl [1533:2813]
sub poly1305_blocks_avxN {
my ($avx512) = @_;
my $suffix = $avx512 ? "_avx512" : "";
$code.=<<___;
.cfi_startproc
mov 20($ctx),%r8d # is_base2_26
cmp \$128,$len
jae .Lblocks_avx2$suffix
test %r8d,%r8d
jz .Lblocks
.Lblocks_avx2$suffix:
and \$-16,$len
jz .Lno_data_avx2$suffix
vzeroupper
test %r8d,%r8d
jz .Lbase2_64_avx2$suffix
test \$63,$len
jz .Leven_avx2$suffix
push %rbp
.cfi_push %rbp
mov %rsp,%rbp
push %rbx
.cfi_push %rbx
push %r12
.cfi_push %r12
push %r13
.cfi_push %r13
push %r14
.cfi_push %r14
push %r15
.cfi_push %r15
.Lblocks_avx2_body$suffix:
mov $len,%r15 # reassign $len
mov 0($ctx),$d1 # load hash value
mov 8($ctx),$d2
mov 16($ctx),$h2#d
mov 24($ctx),$r0 # load r
mov 32($ctx),$s1
################################# base 2^26 -> base 2^64
mov $d1#d,$h0#d
and \$`-1*(1<<31)`,$d1
mov $d2,$r1 # borrow $r1
mov $d2#d,$h1#d
and \$`-1*(1<<31)`,$d2
shr \$6,$d1
shl \$52,$r1
add $d1,$h0
shr \$12,$h1
shr \$18,$d2
add $r1,$h0
adc $d2,$h1
mov $h2,$d1
shl \$40,$d1
shr \$24,$h2
add $d1,$h1
adc \$0,$h2 # can be partially reduced...
mov \$-4,$d2 # ... so reduce
mov $h2,$d1
and $h2,$d2
shr \$2,$d1
and \$3,$h2
add $d2,$d1 # =*5
add $d1,$h0
adc \$0,$h1
adc \$0,$h2
mov $s1,$r1
mov $s1,%rax
shr \$2,$s1
add $r1,$s1 # s1 = r1 + (r1 >> 2)
.Lbase2_26_pre_avx2$suffix:
add 0($inp),$h0 # accumulate input
adc 8($inp),$h1
lea 16($inp),$inp
adc $padbit,$h2
sub \$16,%r15
call __poly1305_block
mov $r1,%rax
test \$63,%r15
jnz .Lbase2_26_pre_avx2$suffix
test $padbit,$padbit # if $padbit is zero,
jz .Lstore_base2_64_avx2$suffix # store hash in base 2^64 format
################################# base 2^64 -> base 2^26
mov $h0,%rax
mov $h0,%rdx
shr \$52,$h0
mov $h1,$r0
mov $h1,$r1
shr \$26,%rdx
and \$0x3ffffff,%rax # h[0]
shl \$12,$r0
and \$0x3ffffff,%rdx # h[1]
shr \$14,$h1
or $r0,$h0
shl \$24,$h2
and \$0x3ffffff,$h0 # h[2]
shr \$40,$r1
and \$0x3ffffff,$h1 # h[3]
or $r1,$h2 # h[4]
test %r15,%r15
jz .Lstore_base2_26_avx2$suffix
vmovd %rax#d,%x#$H0
vmovd %rdx#d,%x#$H1
vmovd $h0#d,%x#$H2
vmovd $h1#d,%x#$H3
vmovd $h2#d,%x#$H4
jmp .Lproceed_avx2$suffix
.align 32
.Lstore_base2_64_avx2$suffix:
mov $h0,0($ctx)
mov $h1,8($ctx)
mov $h2,16($ctx) # note that is_base2_26 is zeroed
jmp .Ldone_avx2$suffix
.align 16
.Lstore_base2_26_avx2$suffix:
mov %rax#d,0($ctx) # store hash value base 2^26
mov %rdx#d,4($ctx)
mov $h0#d,8($ctx)
mov $h1#d,12($ctx)
mov $h2#d,16($ctx)
.align 16
.Ldone_avx2$suffix:
pop %r15
.cfi_restore %r15
pop %r14
.cfi_restore %r14
pop %r13
.cfi_restore %r13
pop %r12
.cfi_restore %r12
pop %rbx
.cfi_restore %rbx
pop %rbp
.cfi_restore %rbp
.Lno_data_avx2$suffix:
.Lblocks_avx2_epilogue$suffix:
ret
.cfi_endproc
.align 32
.Lbase2_64_avx2$suffix:
.cfi_startproc
push %rbp
.cfi_push %rbp
mov %rsp,%rbp
push %rbx
.cfi_push %rbx
push %r12
.cfi_push %r12
push %r13
.cfi_push %r13
push %r14
.cfi_push %r14
push %r15
.cfi_push %r15
.Lbase2_64_avx2_body$suffix:
mov $len,%r15 # reassign $len
mov 24($ctx),$r0 # load r
mov 32($ctx),$s1
mov 0($ctx),$h0 # load hash value
mov 8($ctx),$h1
mov 16($ctx),$h2#d
mov $s1,$r1
mov $s1,%rax
shr \$2,$s1
add $r1,$s1 # s1 = r1 + (r1 >> 2)
test \$63,$len
jz .Linit_avx2$suffix
.Lbase2_64_pre_avx2$suffix:
add 0($inp),$h0 # accumulate input
adc 8($inp),$h1
lea 16($inp),$inp
adc $padbit,$h2
sub \$16,%r15
call __poly1305_block
mov $r1,%rax
test \$63,%r15
jnz .Lbase2_64_pre_avx2$suffix
.Linit_avx2$suffix:
################################# base 2^64 -> base 2^26
mov $h0,%rax
mov $h0,%rdx
shr \$52,$h0
mov $h1,$d1
mov $h1,$d2
shr \$26,%rdx
and \$0x3ffffff,%rax # h[0]
shl \$12,$d1
and \$0x3ffffff,%rdx # h[1]
shr \$14,$h1
or $d1,$h0
shl \$24,$h2
and \$0x3ffffff,$h0 # h[2]
shr \$40,$d2
and \$0x3ffffff,$h1 # h[3]
or $d2,$h2 # h[4]
vmovd %rax#d,%x#$H0
vmovd %rdx#d,%x#$H1
vmovd $h0#d,%x#$H2
vmovd $h1#d,%x#$H3
vmovd $h2#d,%x#$H4
movl \$1,20($ctx) # set is_base2_26
call __poly1305_init_avx
.Lproceed_avx2$suffix:
mov %r15,$len # restore $len
___
$code.=<<___ if (!$kernel);
mov OPENSSL_ia32cap_P+8(%rip),%r9d
mov \$`(1<<31|1<<30|1<<16)`,%r11d
___
$code.=<<___;
pop %r15
.cfi_restore %r15
pop %r14
.cfi_restore %r14
pop %r13
.cfi_restore %r13
pop %r12
.cfi_restore %r12
pop %rbx
.cfi_restore %rbx
pop %rbp
.cfi_restore %rbp
.Lbase2_64_avx2_epilogue$suffix:
jmp .Ldo_avx2$suffix
.cfi_endproc
.align 32
.Leven_avx2$suffix:
.cfi_startproc
___
$code.=<<___ if (!$kernel);
mov OPENSSL_ia32cap_P+8(%rip),%r9d
___
$code.=<<___;
vmovd 4*0($ctx),%x#$H0 # load hash value base 2^26
vmovd 4*1($ctx),%x#$H1
vmovd 4*2($ctx),%x#$H2
vmovd 4*3($ctx),%x#$H3
vmovd 4*4($ctx),%x#$H4
.Ldo_avx2$suffix:
___
$code.=<<___ if (!$kernel && $avx>2);
cmp \$512,$len
jb .Lskip_avx512
and %r11d,%r9d
test \$`1<<16`,%r9d # check for AVX512F
jnz .Lblocks_avx512
.Lskip_avx512$suffix:
___
$code.=<<___ if ($avx > 2 && $avx512 && $kernel);
cmp \$512,$len
jae .Lblocks_avx512
___
$code.=<<___ if (!$win64);
lea 8(%rsp),%r10
.cfi_def_cfa_register %r10
sub \$0x128,%rsp
___
$code.=<<___ if ($win64);
lea 8(%rsp),%r10
sub \$0x1c8,%rsp
vmovdqa %xmm6,-0xb0(%r10)
vmovdqa %xmm7,-0xa0(%r10)
vmovdqa %xmm8,-0x90(%r10)
vmovdqa %xmm9,-0x80(%r10)
vmovdqa %xmm10,-0x70(%r10)
vmovdqa %xmm11,-0x60(%r10)
vmovdqa %xmm12,-0x50(%r10)
vmovdqa %xmm13,-0x40(%r10)
vmovdqa %xmm14,-0x30(%r10)
vmovdqa %xmm15,-0x20(%r10)
.Ldo_avx2_body$suffix:
___
$code.=<<___;
lea .Lconst(%rip),%rcx
lea 48+64($ctx),$ctx # size optimization
vmovdqa 96(%rcx),$T0 # .Lpermd_avx2
# expand and copy pre-calculated table to stack
vmovdqu `16*0-64`($ctx),%x#$T2
and \$-512,%rsp
vmovdqu `16*1-64`($ctx),%x#$T3
vmovdqu `16*2-64`($ctx),%x#$T4
vmovdqu `16*3-64`($ctx),%x#$D0
vmovdqu `16*4-64`($ctx),%x#$D1
vmovdqu `16*5-64`($ctx),%x#$D2
lea 0x90(%rsp),%rax # size optimization
vmovdqu `16*6-64`($ctx),%x#$D3
vpermd $T2,$T0,$T2 # 00003412 -> 14243444
vmovdqu `16*7-64`($ctx),%x#$D4
vpermd $T3,$T0,$T3
vmovdqu `16*8-64`($ctx),%x#$MASK
vpermd $T4,$T0,$T4
vmovdqa $T2,0x00(%rsp)
vpermd $D0,$T0,$D0
vmovdqa $T3,0x20-0x90(%rax)
vpermd $D1,$T0,$D1
vmovdqa $T4,0x40-0x90(%rax)
vpermd $D2,$T0,$D2
vmovdqa $D0,0x60-0x90(%rax)
vpermd $D3,$T0,$D3
vmovdqa $D1,0x80-0x90(%rax)
vpermd $D4,$T0,$D4
vmovdqa $D2,0xa0-0x90(%rax)
vpermd $MASK,$T0,$MASK
vmovdqa $D3,0xc0-0x90(%rax)
vmovdqa $D4,0xe0-0x90(%rax)
vmovdqa $MASK,0x100-0x90(%rax)
vmovdqa 64(%rcx),$MASK # .Lmask26
################################################################
# load input
vmovdqu 16*0($inp),%x#$T0
vmovdqu 16*1($inp),%x#$T1
vinserti128 \$1,16*2($inp),$T0,$T0
vinserti128 \$1,16*3($inp),$T1,$T1
lea 16*4($inp),$inp
vpsrldq \$6,$T0,$T2 # splat input
vpsrldq \$6,$T1,$T3
vpunpckhqdq $T1,$T0,$T4 # 4
vpunpcklqdq $T3,$T2,$T2 # 2:3
vpunpcklqdq $T1,$T0,$T0 # 0:1
vpsrlq \$30,$T2,$T3
vpsrlq \$4,$T2,$T2
vpsrlq \$26,$T0,$T1
vpsrlq \$40,$T4,$T4 # 4
vpand $MASK,$T2,$T2 # 2
vpand $MASK,$T0,$T0 # 0
vpand $MASK,$T1,$T1 # 1
vpand $MASK,$T3,$T3 # 3
vpor 32(%rcx),$T4,$T4 # padbit, yes, always
vpaddq $H2,$T2,$H2 # accumulate input
sub \$64,$len
jz .Ltail_avx2$suffix
jmp .Loop_avx2$suffix
.align 32
.Loop_avx2$suffix:
################################################################
# ((inp[0]*r^4+inp[4])*r^4+inp[ 8])*r^4
# ((inp[1]*r^4+inp[5])*r^4+inp[ 9])*r^3
# ((inp[2]*r^4+inp[6])*r^4+inp[10])*r^2
# ((inp[3]*r^4+inp[7])*r^4+inp[11])*r^1
# \________/\__________/
################################################################
#vpaddq $H2,$T2,$H2 # accumulate input
vpaddq $H0,$T0,$H0
vmovdqa `32*0`(%rsp),$T0 # r0^4
vpaddq $H1,$T1,$H1
vmovdqa `32*1`(%rsp),$T1 # r1^4
vpaddq $H3,$T3,$H3
vmovdqa `32*3`(%rsp),$T2 # r2^4
vpaddq $H4,$T4,$H4
vmovdqa `32*6-0x90`(%rax),$T3 # s3^4
vmovdqa `32*8-0x90`(%rax),$S4 # s4^4
# d4 = h4*r0 + h3*r1 + h2*r2 + h1*r3 + h0*r4
# d3 = h3*r0 + h2*r1 + h1*r2 + h0*r3 + h4*5*r4
# d2 = h2*r0 + h1*r1 + h0*r2 + h4*5*r3 + h3*5*r4
# d1 = h1*r0 + h0*r1 + h4*5*r2 + h3*5*r3 + h2*5*r4
# d0 = h0*r0 + h4*5*r1 + h3*5*r2 + h2*5*r3 + h1*5*r4
#
# however, as h2 is "chronologically" first one available pull
# corresponding operations up, so it's
#
# d4 = h2*r2 + h4*r0 + h3*r1 + h1*r3 + h0*r4
# d3 = h2*r1 + h3*r0 + h1*r2 + h0*r3 + h4*5*r4
# d2 = h2*r0 + h1*r1 + h0*r2 + h4*5*r3 + h3*5*r4
# d1 = h2*5*r4 + h1*r0 + h0*r1 + h4*5*r2 + h3*5*r3
# d0 = h2*5*r3 + h0*r0 + h4*5*r1 + h3*5*r2 + h1*5*r4
vpmuludq $H2,$T0,$D2 # d2 = h2*r0
vpmuludq $H2,$T1,$D3 # d3 = h2*r1
vpmuludq $H2,$T2,$D4 # d4 = h2*r2
vpmuludq $H2,$T3,$D0 # d0 = h2*s3
vpmuludq $H2,$S4,$D1 # d1 = h2*s4
vpmuludq $H0,$T1,$T4 # h0*r1
vpmuludq $H1,$T1,$H2 # h1*r1, borrow $H2 as temp
vpaddq $T4,$D1,$D1 # d1 += h0*r1
vpaddq $H2,$D2,$D2 # d2 += h1*r1
vpmuludq $H3,$T1,$T4 # h3*r1
vpmuludq `32*2`(%rsp),$H4,$H2 # h4*s1
vpaddq $T4,$D4,$D4 # d4 += h3*r1
vpaddq $H2,$D0,$D0 # d0 += h4*s1
vmovdqa `32*4-0x90`(%rax),$T1 # s2
vpmuludq $H0,$T0,$T4 # h0*r0
vpmuludq $H1,$T0,$H2 # h1*r0
vpaddq $T4,$D0,$D0 # d0 += h0*r0
vpaddq $H2,$D1,$D1 # d1 += h1*r0
vpmuludq $H3,$T0,$T4 # h3*r0
vpmuludq $H4,$T0,$H2 # h4*r0
vmovdqu 16*0($inp),%x#$T0 # load input
vpaddq $T4,$D3,$D3 # d3 += h3*r0
vpaddq $H2,$D4,$D4 # d4 += h4*r0
vinserti128 \$1,16*2($inp),$T0,$T0
vpmuludq $H3,$T1,$T4 # h3*s2
vpmuludq $H4,$T1,$H2 # h4*s2
vmovdqu 16*1($inp),%x#$T1
vpaddq $T4,$D0,$D0 # d0 += h3*s2
vpaddq $H2,$D1,$D1 # d1 += h4*s2
vmovdqa `32*5-0x90`(%rax),$H2 # r3
vpmuludq $H1,$T2,$T4 # h1*r2
vpmuludq $H0,$T2,$T2 # h0*r2
vpaddq $T4,$D3,$D3 # d3 += h1*r2
vpaddq $T2,$D2,$D2 # d2 += h0*r2
vinserti128 \$1,16*3($inp),$T1,$T1
lea 16*4($inp),$inp
vpmuludq $H1,$H2,$T4 # h1*r3
vpmuludq $H0,$H2,$H2 # h0*r3
vpsrldq \$6,$T0,$T2 # splat input
vpaddq $T4,$D4,$D4 # d4 += h1*r3
vpaddq $H2,$D3,$D3 # d3 += h0*r3
vpmuludq $H3,$T3,$T4 # h3*s3
vpmuludq $H4,$T3,$H2 # h4*s3
vpsrldq \$6,$T1,$T3
vpaddq $T4,$D1,$D1 # d1 += h3*s3
vpaddq $H2,$D2,$D2 # d2 += h4*s3
vpunpckhqdq $T1,$T0,$T4 # 4
vpmuludq $H3,$S4,$H3 # h3*s4
vpmuludq $H4,$S4,$H4 # h4*s4
vpunpcklqdq $T1,$T0,$T0 # 0:1
vpaddq $H3,$D2,$H2 # h2 = d2 + h3*r4
vpaddq $H4,$D3,$H3 # h3 = d3 + h4*r4
vpunpcklqdq $T3,$T2,$T3 # 2:3
vpmuludq `32*7-0x90`(%rax),$H0,$H4 # h0*r4
vpmuludq $H1,$S4,$H0 # h1*s4
vmovdqa 64(%rcx),$MASK # .Lmask26
vpaddq $H4,$D4,$H4 # h4 = d4 + h0*r4
vpaddq $H0,$D0,$H0 # h0 = d0 + h1*s4
################################################################
# lazy reduction (interleaved with tail of input splat)
vpsrlq \$26,$H3,$D3
vpand $MASK,$H3,$H3
vpaddq $D3,$H4,$H4 # h3 -> h4
vpsrlq \$26,$H0,$D0
vpand $MASK,$H0,$H0
vpaddq $D0,$D1,$H1 # h0 -> h1
vpsrlq \$26,$H4,$D4
vpand $MASK,$H4,$H4
vpsrlq \$4,$T3,$T2
vpsrlq \$26,$H1,$D1
vpand $MASK,$H1,$H1
vpaddq $D1,$H2,$H2 # h1 -> h2
vpaddq $D4,$H0,$H0
vpsllq \$2,$D4,$D4
vpaddq $D4,$H0,$H0 # h4 -> h0
vpand $MASK,$T2,$T2 # 2
vpsrlq \$26,$T0,$T1
vpsrlq \$26,$H2,$D2
vpand $MASK,$H2,$H2
vpaddq $D2,$H3,$H3 # h2 -> h3
vpaddq $T2,$H2,$H2 # modulo-scheduled
vpsrlq \$30,$T3,$T3
vpsrlq \$26,$H0,$D0
vpand $MASK,$H0,$H0
vpaddq $D0,$H1,$H1 # h0 -> h1
vpsrlq \$40,$T4,$T4 # 4
vpsrlq \$26,$H3,$D3
vpand $MASK,$H3,$H3
vpaddq $D3,$H4,$H4 # h3 -> h4
vpand $MASK,$T0,$T0 # 0
vpand $MASK,$T1,$T1 # 1
vpand $MASK,$T3,$T3 # 3
vpor 32(%rcx),$T4,$T4 # padbit, yes, always
sub \$64,$len
jnz .Loop_avx2$suffix
.byte 0x66,0x90
.Ltail_avx2$suffix:
################################################################
# while above multiplications were by r^4 in all lanes, in last
# iteration we multiply least significant lane by r^4 and most
# significant one by r, so copy of above except that references
# to the precomputed table are displaced by 4...
#vpaddq $H2,$T2,$H2 # accumulate input
vpaddq $H0,$T0,$H0
vmovdqu `32*0+4`(%rsp),$T0 # r0^4
vpaddq $H1,$T1,$H1
vmovdqu `32*1+4`(%rsp),$T1 # r1^4
vpaddq $H3,$T3,$H3
vmovdqu `32*3+4`(%rsp),$T2 # r2^4
vpaddq $H4,$T4,$H4
vmovdqu `32*6+4-0x90`(%rax),$T3 # s3^4
vmovdqu `32*8+4-0x90`(%rax),$S4 # s4^4
vpmuludq $H2,$T0,$D2 # d2 = h2*r0
vpmuludq $H2,$T1,$D3 # d3 = h2*r1
vpmuludq $H2,$T2,$D4 # d4 = h2*r2
vpmuludq $H2,$T3,$D0 # d0 = h2*s3
vpmuludq $H2,$S4,$D1 # d1 = h2*s4
vpmuludq $H0,$T1,$T4 # h0*r1
vpmuludq $H1,$T1,$H2 # h1*r1
vpaddq $T4,$D1,$D1 # d1 += h0*r1
vpaddq $H2,$D2,$D2 # d2 += h1*r1
vpmuludq $H3,$T1,$T4 # h3*r1
vpmuludq `32*2+4`(%rsp),$H4,$H2 # h4*s1
vpaddq $T4,$D4,$D4 # d4 += h3*r1
vpaddq $H2,$D0,$D0 # d0 += h4*s1
vpmuludq $H0,$T0,$T4 # h0*r0
vpmuludq $H1,$T0,$H2 # h1*r0
vpaddq $T4,$D0,$D0 # d0 += h0*r0
vmovdqu `32*4+4-0x90`(%rax),$T1 # s2
vpaddq $H2,$D1,$D1 # d1 += h1*r0
vpmuludq $H3,$T0,$T4 # h3*r0
vpmuludq $H4,$T0,$H2 # h4*r0
vpaddq $T4,$D3,$D3 # d3 += h3*r0
vpaddq $H2,$D4,$D4 # d4 += h4*r0
vpmuludq $H3,$T1,$T4 # h3*s2
vpmuludq $H4,$T1,$H2 # h4*s2
vpaddq $T4,$D0,$D0 # d0 += h3*s2
vpaddq $H2,$D1,$D1 # d1 += h4*s2
vmovdqu `32*5+4-0x90`(%rax),$H2 # r3
vpmuludq $H1,$T2,$T4 # h1*r2
vpmuludq $H0,$T2,$T2 # h0*r2
vpaddq $T4,$D3,$D3 # d3 += h1*r2
vpaddq $T2,$D2,$D2 # d2 += h0*r2
vpmuludq $H1,$H2,$T4 # h1*r3
vpmuludq $H0,$H2,$H2 # h0*r3
vpaddq $T4,$D4,$D4 # d4 += h1*r3
vpaddq $H2,$D3,$D3 # d3 += h0*r3
vpmuludq $H3,$T3,$T4 # h3*s3
vpmuludq $H4,$T3,$H2 # h4*s3
vpaddq $T4,$D1,$D1 # d1 += h3*s3
vpaddq $H2,$D2,$D2 # d2 += h4*s3
vpmuludq $H3,$S4,$H3 # h3*s4
vpmuludq $H4,$S4,$H4 # h4*s4
vpaddq $H3,$D2,$H2 # h2 = d2 + h3*r4
vpaddq $H4,$D3,$H3 # h3 = d3 + h4*r4
vpmuludq `32*7+4-0x90`(%rax),$H0,$H4 # h0*r4
vpmuludq $H1,$S4,$H0 # h1*s4
vmovdqa 64(%rcx),$MASK # .Lmask26
vpaddq $H4,$D4,$H4 # h4 = d4 + h0*r4
vpaddq $H0,$D0,$H0 # h0 = d0 + h1*s4
################################################################
# horizontal addition
vpsrldq \$8,$D1,$T1
vpsrldq \$8,$H2,$T2
vpsrldq \$8,$H3,$T3
vpsrldq \$8,$H4,$T4
vpsrldq \$8,$H0,$T0
vpaddq $T1,$D1,$D1
vpaddq $T2,$H2,$H2
vpaddq $T3,$H3,$H3
vpaddq $T4,$H4,$H4
vpaddq $T0,$H0,$H0
vpermq \$0x2,$H3,$T3
vpermq \$0x2,$H4,$T4
vpermq \$0x2,$H0,$T0
vpermq \$0x2,$D1,$T1
vpermq \$0x2,$H2,$T2
vpaddq $T3,$H3,$H3
vpaddq $T4,$H4,$H4
vpaddq $T0,$H0,$H0
vpaddq $T1,$D1,$D1
vpaddq $T2,$H2,$H2
################################################################
# lazy reduction
vpsrlq \$26,$H3,$D3
vpand $MASK,$H3,$H3
vpaddq $D3,$H4,$H4 # h3 -> h4
vpsrlq \$26,$H0,$D0
vpand $MASK,$H0,$H0
vpaddq $D0,$D1,$H1 # h0 -> h1
vpsrlq \$26,$H4,$D4
vpand $MASK,$H4,$H4
vpsrlq \$26,$H1,$D1
vpand $MASK,$H1,$H1
vpaddq $D1,$H2,$H2 # h1 -> h2
vpaddq $D4,$H0,$H0
vpsllq \$2,$D4,$D4
vpaddq $D4,$H0,$H0 # h4 -> h0
vpsrlq \$26,$H2,$D2
vpand $MASK,$H2,$H2
vpaddq $D2,$H3,$H3 # h2 -> h3
vpsrlq \$26,$H0,$D0
vpand $MASK,$H0,$H0
vpaddq $D0,$H1,$H1 # h0 -> h1
vpsrlq \$26,$H3,$D3
vpand $MASK,$H3,$H3
vpaddq $D3,$H4,$H4 # h3 -> h4
vmovd %x#$H0,`4*0-48-64`($ctx)# save partially reduced
vmovd %x#$H1,`4*1-48-64`($ctx)
vmovd %x#$H2,`4*2-48-64`($ctx)
vmovd %x#$H3,`4*3-48-64`($ctx)
vmovd %x#$H4,`4*4-48-64`($ctx)
___
$code.=<<___ if ($win64);
vmovdqa -0xb0(%r10),%xmm6
vmovdqa -0xa0(%r10),%xmm7
vmovdqa -0x90(%r10),%xmm8
vmovdqa -0x80(%r10),%xmm9
vmovdqa -0x70(%r10),%xmm10
vmovdqa -0x60(%r10),%xmm11
vmovdqa -0x50(%r10),%xmm12
vmovdqa -0x40(%r10),%xmm13
vmovdqa -0x30(%r10),%xmm14
vmovdqa -0x20(%r10),%xmm15
lea -8(%r10),%rsp
.Ldo_avx2_epilogue$suffix:
___
$code.=<<___ if (!$win64);
lea -8(%r10),%rsp
.cfi_def_cfa_register %rsp
___
$code.=<<___;
vzeroupper
ret
.cfi_endproc
___
if($avx > 2 && $avx512) {
my ($R0,$R1,$R2,$R3,$R4, $S1,$S2,$S3,$S4) = map("%zmm$_",(16..24));
my ($M0,$M1,$M2,$M3,$M4) = map("%zmm$_",(25..29));
my $PADBIT="%zmm30";
map(s/%y/%z/,($T4,$T0,$T1,$T2,$T3)); # switch to %zmm domain
map(s/%y/%z/,($D0,$D1,$D2,$D3,$D4));
map(s/%y/%z/,($H0,$H1,$H2,$H3,$H4));
map(s/%y/%z/,($MASK));
$code.=<<___;
.cfi_startproc
.Lblocks_avx512:
mov \$15,%eax
kmovw %eax,%k2
___
$code.=<<___ if (!$win64);
lea 8(%rsp),%r10
.cfi_def_cfa_register %r10
sub \$0x128,%rsp
___
$code.=<<___ if ($win64);
lea 8(%rsp),%r10
sub \$0x1c8,%rsp
vmovdqa %xmm6,-0xb0(%r10)
vmovdqa %xmm7,-0xa0(%r10)
vmovdqa %xmm8,-0x90(%r10)
vmovdqa %xmm9,-0x80(%r10)
vmovdqa %xmm10,-0x70(%r10)
vmovdqa %xmm11,-0x60(%r10)
vmovdqa %xmm12,-0x50(%r10)
vmovdqa %xmm13,-0x40(%r10)
vmovdqa %xmm14,-0x30(%r10)
vmovdqa %xmm15,-0x20(%r10)
.Ldo_avx512_body:
___
$code.=<<___;
lea .Lconst(%rip),%rcx
lea 48+64($ctx),$ctx # size optimization
vmovdqa 96(%rcx),%y#$T2 # .Lpermd_avx2
# expand pre-calculated table
vmovdqu `16*0-64`($ctx),%x#$D0 # will become expanded ${R0}
and \$-512,%rsp
vmovdqu `16*1-64`($ctx),%x#$D1 # will become ... ${R1}
mov \$0x20,%rax
vmovdqu `16*2-64`($ctx),%x#$T0 # ... ${S1}
vmovdqu `16*3-64`($ctx),%x#$D2 # ... ${R2}
vmovdqu `16*4-64`($ctx),%x#$T1 # ... ${S2}
vmovdqu `16*5-64`($ctx),%x#$D3 # ... ${R3}
vmovdqu `16*6-64`($ctx),%x#$T3 # ... ${S3}
vmovdqu `16*7-64`($ctx),%x#$D4 # ... ${R4}
vmovdqu `16*8-64`($ctx),%x#$T4 # ... ${S4}
vpermd $D0,$T2,$R0 # 00003412 -> 14243444
vpbroadcastq 64(%rcx),$MASK # .Lmask26
vpermd $D1,$T2,$R1
vpermd $T0,$T2,$S1
vpermd $D2,$T2,$R2
vmovdqa64 $R0,0x00(%rsp){%k2} # save in case $len%128 != 0
vpsrlq \$32,$R0,$T0 # 14243444 -> 01020304
vpermd $T1,$T2,$S2
vmovdqu64 $R1,0x00(%rsp,%rax){%k2}
vpsrlq \$32,$R1,$T1
vpermd $D3,$T2,$R3
vmovdqa64 $S1,0x40(%rsp){%k2}
vpermd $T3,$T2,$S3
vpermd $D4,$T2,$R4
vmovdqu64 $R2,0x40(%rsp,%rax){%k2}
vpermd $T4,$T2,$S4
vmovdqa64 $S2,0x80(%rsp){%k2}
vmovdqu64 $R3,0x80(%rsp,%rax){%k2}
vmovdqa64 $S3,0xc0(%rsp){%k2}
vmovdqu64 $R4,0xc0(%rsp,%rax){%k2}
vmovdqa64 $S4,0x100(%rsp){%k2}
################################################################
# calculate 5th through 8th powers of the key
#
# d0 = r0'*r0 + r1'*5*r4 + r2'*5*r3 + r3'*5*r2 + r4'*5*r1
# d1 = r0'*r1 + r1'*r0 + r2'*5*r4 + r3'*5*r3 + r4'*5*r2
# d2 = r0'*r2 + r1'*r1 + r2'*r0 + r3'*5*r4 + r4'*5*r3
# d3 = r0'*r3 + r1'*r2 + r2'*r1 + r3'*r0 + r4'*5*r4
# d4 = r0'*r4 + r1'*r3 + r2'*r2 + r3'*r1 + r4'*r0
vpmuludq $T0,$R0,$D0 # d0 = r0'*r0
vpmuludq $T0,$R1,$D1 # d1 = r0'*r1
vpmuludq $T0,$R2,$D2 # d2 = r0'*r2
vpmuludq $T0,$R3,$D3 # d3 = r0'*r3
vpmuludq $T0,$R4,$D4 # d4 = r0'*r4
vpsrlq \$32,$R2,$T2
vpmuludq $T1,$S4,$M0
vpmuludq $T1,$R0,$M1
vpmuludq $T1,$R1,$M2
vpmuludq $T1,$R2,$M3
vpmuludq $T1,$R3,$M4
vpsrlq \$32,$R3,$T3
vpaddq $M0,$D0,$D0 # d0 += r1'*5*r4
vpaddq $M1,$D1,$D1 # d1 += r1'*r0
vpaddq $M2,$D2,$D2 # d2 += r1'*r1
vpaddq $M3,$D3,$D3 # d3 += r1'*r2
vpaddq $M4,$D4,$D4 # d4 += r1'*r3
vpmuludq $T2,$S3,$M0
vpmuludq $T2,$S4,$M1
vpmuludq $T2,$R1,$M3
vpmuludq $T2,$R2,$M4
vpmuludq $T2,$R0,$M2
vpsrlq \$32,$R4,$T4
vpaddq $M0,$D0,$D0 # d0 += r2'*5*r3
vpaddq $M1,$D1,$D1 # d1 += r2'*5*r4
vpaddq $M3,$D3,$D3 # d3 += r2'*r1
vpaddq $M4,$D4,$D4 # d4 += r2'*r2
vpaddq $M2,$D2,$D2 # d2 += r2'*r0
vpmuludq $T3,$S2,$M0
vpmuludq $T3,$R0,$M3
vpmuludq $T3,$R1,$M4
vpmuludq $T3,$S3,$M1
vpmuludq $T3,$S4,$M2
vpaddq $M0,$D0,$D0 # d0 += r3'*5*r2
vpaddq $M3,$D3,$D3 # d3 += r3'*r0
vpaddq $M4,$D4,$D4 # d4 += r3'*r1
vpaddq $M1,$D1,$D1 # d1 += r3'*5*r3
vpaddq $M2,$D2,$D2 # d2 += r3'*5*r4
vpmuludq $T4,$S4,$M3
vpmuludq $T4,$R0,$M4
vpmuludq $T4,$S1,$M0
vpmuludq $T4,$S2,$M1
vpmuludq $T4,$S3,$M2
vpaddq $M3,$D3,$D3 # d3 += r2'*5*r4
vpaddq $M4,$D4,$D4 # d4 += r2'*r0
vpaddq $M0,$D0,$D0 # d0 += r2'*5*r1
vpaddq $M1,$D1,$D1 # d1 += r2'*5*r2
vpaddq $M2,$D2,$D2 # d2 += r2'*5*r3
################################################################
# load input
vmovdqu64 16*0($inp),%z#$T3
vmovdqu64 16*4($inp),%z#$T4
lea 16*8($inp),$inp
################################################################
# lazy reduction
vpsrlq \$26,$D3,$M3
vpandq $MASK,$D3,$D3
vpaddq $M3,$D4,$D4 # d3 -> d4
vpsrlq \$26,$D0,$M0
vpandq $MASK,$D0,$D0
vpaddq $M0,$D1,$D1 # d0 -> d1
vpsrlq \$26,$D4,$M4
vpandq $MASK,$D4,$D4
vpsrlq \$26,$D1,$M1
vpandq $MASK,$D1,$D1
vpaddq $M1,$D2,$D2 # d1 -> d2
vpaddq $M4,$D0,$D0
vpsllq \$2,$M4,$M4
vpaddq $M4,$D0,$D0 # d4 -> d0
vpsrlq \$26,$D2,$M2
vpandq $MASK,$D2,$D2
vpaddq $M2,$D3,$D3 # d2 -> d3
vpsrlq \$26,$D0,$M0
vpandq $MASK,$D0,$D0
vpaddq $M0,$D1,$D1 # d0 -> d1
vpsrlq \$26,$D3,$M3
vpandq $MASK,$D3,$D3
vpaddq $M3,$D4,$D4 # d3 -> d4
################################################################
# at this point we have 14243444 in $R0-$S4 and 05060708 in
# $D0-$D4, ...
vpunpcklqdq $T4,$T3,$T0 # transpose input
vpunpckhqdq $T4,$T3,$T4
# ... since input 64-bit lanes are ordered as 73625140, we could
# "vperm" it to 76543210 (here and in each loop iteration), *or*
# we could just flow along, hence the goal for $R0-$S4 is
# 1858286838784888 ...
vmovdqa32 128(%rcx),$M0 # .Lpermd_avx512:
mov \$0x7777,%eax
kmovw %eax,%k1
vpermd $R0,$M0,$R0 # 14243444 -> 1---2---3---4---
vpermd $R1,$M0,$R1
vpermd $R2,$M0,$R2
vpermd $R3,$M0,$R3
vpermd $R4,$M0,$R4
vpermd $D0,$M0,${R0}{%k1} # 05060708 -> 1858286838784888
vpermd $D1,$M0,${R1}{%k1}
vpermd $D2,$M0,${R2}{%k1}
vpermd $D3,$M0,${R3}{%k1}
vpermd $D4,$M0,${R4}{%k1}
vpslld \$2,$R1,$S1 # *5
vpslld \$2,$R2,$S2
vpslld \$2,$R3,$S3
vpslld \$2,$R4,$S4
vpaddd $R1,$S1,$S1
vpaddd $R2,$S2,$S2
vpaddd $R3,$S3,$S3
vpaddd $R4,$S4,$S4
vpbroadcastq 32(%rcx),$PADBIT # .L129
vpsrlq \$52,$T0,$T2 # splat input
vpsllq \$12,$T4,$T3
vporq $T3,$T2,$T2
vpsrlq \$26,$T0,$T1
vpsrlq \$14,$T4,$T3
vpsrlq \$40,$T4,$T4 # 4
vpandq $MASK,$T2,$T2 # 2
vpandq $MASK,$T0,$T0 # 0
#vpandq $MASK,$T1,$T1 # 1
#vpandq $MASK,$T3,$T3 # 3
#vporq $PADBIT,$T4,$T4 # padbit, yes, always
vpaddq $H2,$T2,$H2 # accumulate input
sub \$192,$len
jbe .Ltail_avx512
jmp .Loop_avx512
.align 32
.Loop_avx512:
################################################################
# ((inp[0]*r^8+inp[ 8])*r^8+inp[16])*r^8
# ((inp[1]*r^8+inp[ 9])*r^8+inp[17])*r^7
# ((inp[2]*r^8+inp[10])*r^8+inp[18])*r^6
# ((inp[3]*r^8+inp[11])*r^8+inp[19])*r^5
# ((inp[4]*r^8+inp[12])*r^8+inp[20])*r^4
# ((inp[5]*r^8+inp[13])*r^8+inp[21])*r^3
# ((inp[6]*r^8+inp[14])*r^8+inp[22])*r^2
# ((inp[7]*r^8+inp[15])*r^8+inp[23])*r^1
# \________/\___________/
################################################################
#vpaddq $H2,$T2,$H2 # accumulate input
# d4 = h4*r0 + h3*r1 + h2*r2 + h1*r3 + h0*r4
# d3 = h3*r0 + h2*r1 + h1*r2 + h0*r3 + h4*5*r4
# d2 = h2*r0 + h1*r1 + h0*r2 + h4*5*r3 + h3*5*r4
# d1 = h1*r0 + h0*r1 + h4*5*r2 + h3*5*r3 + h2*5*r4
# d0 = h0*r0 + h4*5*r1 + h3*5*r2 + h2*5*r3 + h1*5*r4
#
# however, as h2 is "chronologically" first one available pull
# corresponding operations up, so it's
#
# d3 = h2*r1 + h0*r3 + h1*r2 + h3*r0 + h4*5*r4
# d4 = h2*r2 + h0*r4 + h1*r3 + h3*r1 + h4*r0
# d0 = h2*5*r3 + h0*r0 + h1*5*r4 + h3*5*r2 + h4*5*r1
# d1 = h2*5*r4 + h0*r1 + h1*r0 + h3*5*r3 + h4*5*r2
# d2 = h2*r0 + h0*r2 + h1*r1 + h3*5*r4 + h4*5*r3
vpmuludq $H2,$R1,$D3 # d3 = h2*r1
vpaddq $H0,$T0,$H0
vpmuludq $H2,$R2,$D4 # d4 = h2*r2
vpandq $MASK,$T1,$T1 # 1
vpmuludq $H2,$S3,$D0 # d0 = h2*s3
vpandq $MASK,$T3,$T3 # 3
vpmuludq $H2,$S4,$D1 # d1 = h2*s4
vporq $PADBIT,$T4,$T4 # padbit, yes, always
vpmuludq $H2,$R0,$D2 # d2 = h2*r0
vpaddq $H1,$T1,$H1 # accumulate input
vpaddq $H3,$T3,$H3
vpaddq $H4,$T4,$H4
vmovdqu64 16*0($inp),$T3 # load input
vmovdqu64 16*4($inp),$T4
lea 16*8($inp),$inp
vpmuludq $H0,$R3,$M3
vpmuludq $H0,$R4,$M4
vpmuludq $H0,$R0,$M0
vpmuludq $H0,$R1,$M1
vpaddq $M3,$D3,$D3 # d3 += h0*r3
vpaddq $M4,$D4,$D4 # d4 += h0*r4
vpaddq $M0,$D0,$D0 # d0 += h0*r0
vpaddq $M1,$D1,$D1 # d1 += h0*r1
vpmuludq $H1,$R2,$M3
vpmuludq $H1,$R3,$M4
vpmuludq $H1,$S4,$M0
vpmuludq $H0,$R2,$M2
vpaddq $M3,$D3,$D3 # d3 += h1*r2
vpaddq $M4,$D4,$D4 # d4 += h1*r3
vpaddq $M0,$D0,$D0 # d0 += h1*s4
vpaddq $M2,$D2,$D2 # d2 += h0*r2
vpunpcklqdq $T4,$T3,$T0 # transpose input
vpunpckhqdq $T4,$T3,$T4
vpmuludq $H3,$R0,$M3
vpmuludq $H3,$R1,$M4
vpmuludq $H1,$R0,$M1
vpmuludq $H1,$R1,$M2
vpaddq $M3,$D3,$D3 # d3 += h3*r0
vpaddq $M4,$D4,$D4 # d4 += h3*r1
vpaddq $M1,$D1,$D1 # d1 += h1*r0
vpaddq $M2,$D2,$D2 # d2 += h1*r1
vpmuludq $H4,$S4,$M3
vpmuludq $H4,$R0,$M4
vpmuludq $H3,$S2,$M0
vpmuludq $H3,$S3,$M1
vpaddq $M3,$D3,$D3 # d3 += h4*s4
vpmuludq $H3,$S4,$M2
vpaddq $M4,$D4,$D4 # d4 += h4*r0
vpaddq $M0,$D0,$D0 # d0 += h3*s2
vpaddq $M1,$D1,$D1 # d1 += h3*s3
vpaddq $M2,$D2,$D2 # d2 += h3*s4
vpmuludq $H4,$S1,$M0
vpmuludq $H4,$S2,$M1
vpmuludq $H4,$S3,$M2
vpaddq $M0,$D0,$H0 # h0 = d0 + h4*s1
vpaddq $M1,$D1,$H1 # h1 = d2 + h4*s2
vpaddq $M2,$D2,$H2 # h2 = d3 + h4*s3
################################################################
# lazy reduction (interleaved with input splat)
vpsrlq \$52,$T0,$T2 # splat input
vpsllq \$12,$T4,$T3
vpsrlq \$26,$D3,$H3
vpandq $MASK,$D3,$D3
vpaddq $H3,$D4,$H4 # h3 -> h4
vporq $T3,$T2,$T2
vpsrlq \$26,$H0,$D0
vpandq $MASK,$H0,$H0
vpaddq $D0,$H1,$H1 # h0 -> h1
vpandq $MASK,$T2,$T2 # 2
vpsrlq \$26,$H4,$D4
vpandq $MASK,$H4,$H4
vpsrlq \$26,$H1,$D1
vpandq $MASK,$H1,$H1
vpaddq $D1,$H2,$H2 # h1 -> h2
vpaddq $D4,$H0,$H0
vpsllq \$2,$D4,$D4
vpaddq $D4,$H0,$H0 # h4 -> h0
vpaddq $T2,$H2,$H2 # modulo-scheduled
vpsrlq \$26,$T0,$T1
vpsrlq \$26,$H2,$D2
vpandq $MASK,$H2,$H2
vpaddq $D2,$D3,$H3 # h2 -> h3
vpsrlq \$14,$T4,$T3
vpsrlq \$26,$H0,$D0
vpandq $MASK,$H0,$H0
vpaddq $D0,$H1,$H1 # h0 -> h1
vpsrlq \$40,$T4,$T4 # 4
vpsrlq \$26,$H3,$D3
vpandq $MASK,$H3,$H3
vpaddq $D3,$H4,$H4 # h3 -> h4
vpandq $MASK,$T0,$T0 # 0
#vpandq $MASK,$T1,$T1 # 1
#vpandq $MASK,$T3,$T3 # 3
#vporq $PADBIT,$T4,$T4 # padbit, yes, always
sub \$128,$len
ja .Loop_avx512
.Ltail_avx512:
################################################################
# while above multiplications were by r^8 in all lanes, in last
# iteration we multiply least significant lane by r^8 and most
# significant one by r, that's why table gets shifted...
vpsrlq \$32,$R0,$R0 # 0105020603070408
vpsrlq \$32,$R1,$R1
vpsrlq \$32,$R2,$R2
vpsrlq \$32,$S3,$S3
vpsrlq \$32,$S4,$S4
vpsrlq \$32,$R3,$R3
vpsrlq \$32,$R4,$R4
vpsrlq \$32,$S1,$S1
vpsrlq \$32,$S2,$S2
################################################################
# load either next or last 64 byte of input
lea ($inp,$len),$inp
#vpaddq $H2,$T2,$H2 # accumulate input
vpaddq $H0,$T0,$H0
vpmuludq $H2,$R1,$D3 # d3 = h2*r1
vpmuludq $H2,$R2,$D4 # d4 = h2*r2
vpmuludq $H2,$S3,$D0 # d0 = h2*s3
vpandq $MASK,$T1,$T1 # 1
vpmuludq $H2,$S4,$D1 # d1 = h2*s4
vpandq $MASK,$T3,$T3 # 3
vpmuludq $H2,$R0,$D2 # d2 = h2*r0
vporq $PADBIT,$T4,$T4 # padbit, yes, always
vpaddq $H1,$T1,$H1 # accumulate input
vpaddq $H3,$T3,$H3
vpaddq $H4,$T4,$H4
vmovdqu 16*0($inp),%x#$T0
vpmuludq $H0,$R3,$M3
vpmuludq $H0,$R4,$M4
vpmuludq $H0,$R0,$M0
vpmuludq $H0,$R1,$M1
vpaddq $M3,$D3,$D3 # d3 += h0*r3
vpaddq $M4,$D4,$D4 # d4 += h0*r4
vpaddq $M0,$D0,$D0 # d0 += h0*r0
vpaddq $M1,$D1,$D1 # d1 += h0*r1
vmovdqu 16*1($inp),%x#$T1
vpmuludq $H1,$R2,$M3
vpmuludq $H1,$R3,$M4
vpmuludq $H1,$S4,$M0
vpmuludq $H0,$R2,$M2
vpaddq $M3,$D3,$D3 # d3 += h1*r2
vpaddq $M4,$D4,$D4 # d4 += h1*r3
vpaddq $M0,$D0,$D0 # d0 += h1*s4
vpaddq $M2,$D2,$D2 # d2 += h0*r2
vinserti128 \$1,16*2($inp),%y#$T0,%y#$T0
vpmuludq $H3,$R0,$M3
vpmuludq $H3,$R1,$M4
vpmuludq $H1,$R0,$M1
vpmuludq $H1,$R1,$M2
vpaddq $M3,$D3,$D3 # d3 += h3*r0
vpaddq $M4,$D4,$D4 # d4 += h3*r1
vpaddq $M1,$D1,$D1 # d1 += h1*r0
vpaddq $M2,$D2,$D2 # d2 += h1*r1
vinserti128 \$1,16*3($inp),%y#$T1,%y#$T1
vpmuludq $H4,$S4,$M3
vpmuludq $H4,$R0,$M4
vpmuludq $H3,$S2,$M0
vpmuludq $H3,$S3,$M1
vpmuludq $H3,$S4,$M2
vpaddq $M3,$D3,$H3 # h3 = d3 + h4*s4
vpaddq $M4,$D4,$D4 # d4 += h4*r0
vpaddq $M0,$D0,$D0 # d0 += h3*s2
vpaddq $M1,$D1,$D1 # d1 += h3*s3
vpaddq $M2,$D2,$D2 # d2 += h3*s4
vpmuludq $H4,$S1,$M0
vpmuludq $H4,$S2,$M1
vpmuludq $H4,$S3,$M2
vpaddq $M0,$D0,$H0 # h0 = d0 + h4*s1
vpaddq $M1,$D1,$H1 # h1 = d2 + h4*s2
vpaddq $M2,$D2,$H2 # h2 = d3 + h4*s3
################################################################
# horizontal addition
mov \$1,%eax
vpermq \$0xb1,$H3,$D3
vpermq \$0xb1,$D4,$H4
vpermq \$0xb1,$H0,$D0
vpermq \$0xb1,$H1,$D1
vpermq \$0xb1,$H2,$D2
vpaddq $D3,$H3,$H3
vpaddq $D4,$H4,$H4
vpaddq $D0,$H0,$H0
vpaddq $D1,$H1,$H1
vpaddq $D2,$H2,$H2
kmovw %eax,%k3
vpermq \$0x2,$H3,$D3
vpermq \$0x2,$H4,$D4
vpermq \$0x2,$H0,$D0
vpermq \$0x2,$H1,$D1
vpermq \$0x2,$H2,$D2
vpaddq $D3,$H3,$H3
vpaddq $D4,$H4,$H4
vpaddq $D0,$H0,$H0
vpaddq $D1,$H1,$H1
vpaddq $D2,$H2,$H2
vextracti64x4 \$0x1,$H3,%y#$D3
vextracti64x4 \$0x1,$H4,%y#$D4
vextracti64x4 \$0x1,$H0,%y#$D0
vextracti64x4 \$0x1,$H1,%y#$D1
vextracti64x4 \$0x1,$H2,%y#$D2
vpaddq $D3,$H3,${H3}{%k3}{z} # keep single qword in case
vpaddq $D4,$H4,${H4}{%k3}{z} # it's passed to .Ltail_avx2
vpaddq $D0,$H0,${H0}{%k3}{z}
vpaddq $D1,$H1,${H1}{%k3}{z}
vpaddq $D2,$H2,${H2}{%k3}{z}
___
map(s/%z/%y/,($T0,$T1,$T2,$T3,$T4, $PADBIT));
map(s/%z/%y/,($H0,$H1,$H2,$H3,$H4, $D0,$D1,$D2,$D3,$D4, $MASK));
$code.=<<___;
################################################################
# lazy reduction (interleaved with input splat)
vpsrlq \$26,$H3,$D3
vpand $MASK,$H3,$H3
vpsrldq \$6,$T0,$T2 # splat input
vpsrldq \$6,$T1,$T3
vpunpckhqdq $T1,$T0,$T4 # 4
vpaddq $D3,$H4,$H4 # h3 -> h4
vpsrlq \$26,$H0,$D0
vpand $MASK,$H0,$H0
vpunpcklqdq $T3,$T2,$T2 # 2:3
vpunpcklqdq $T1,$T0,$T0 # 0:1
vpaddq $D0,$H1,$H1 # h0 -> h1
vpsrlq \$26,$H4,$D4
vpand $MASK,$H4,$H4
vpsrlq \$26,$H1,$D1
vpand $MASK,$H1,$H1
vpsrlq \$30,$T2,$T3
vpsrlq \$4,$T2,$T2
vpaddq $D1,$H2,$H2 # h1 -> h2
vpaddq $D4,$H0,$H0
vpsllq \$2,$D4,$D4
vpsrlq \$26,$T0,$T1
vpsrlq \$40,$T4,$T4 # 4
vpaddq $D4,$H0,$H0 # h4 -> h0
vpsrlq \$26,$H2,$D2
vpand $MASK,$H2,$H2
vpand $MASK,$T2,$T2 # 2
vpand $MASK,$T0,$T0 # 0
vpaddq $D2,$H3,$H3 # h2 -> h3
vpsrlq \$26,$H0,$D0
vpand $MASK,$H0,$H0
vpaddq $H2,$T2,$H2 # accumulate input for .Ltail_avx2
vpand $MASK,$T1,$T1 # 1
vpaddq $D0,$H1,$H1 # h0 -> h1
vpsrlq \$26,$H3,$D3
vpand $MASK,$H3,$H3
vpand $MASK,$T3,$T3 # 3
vpor 32(%rcx),$T4,$T4 # padbit, yes, always
vpaddq $D3,$H4,$H4 # h3 -> h4
lea 0x90(%rsp),%rax # size optimization for .Ltail_avx2
add \$64,$len
jnz .Ltail_avx2$suffix
vpsubq $T2,$H2,$H2 # undo input accumulation
vmovd %x#$H0,`4*0-48-64`($ctx)# save partially reduced
vmovd %x#$H1,`4*1-48-64`($ctx)
vmovd %x#$H2,`4*2-48-64`($ctx)
vmovd %x#$H3,`4*3-48-64`($ctx)
vmovd %x#$H4,`4*4-48-64`($ctx)
vzeroall
___
$code.=<<___ if ($win64);
movdqa -0xb0(%r10),%xmm6
movdqa -0xa0(%r10),%xmm7
movdqa -0x90(%r10),%xmm8
movdqa -0x80(%r10),%xmm9
movdqa -0x70(%r10),%xmm10
movdqa -0x60(%r10),%xmm11
movdqa -0x50(%r10),%xmm12
movdqa -0x40(%r10),%xmm13
movdqa -0x30(%r10),%xmm14
movdqa -0x20(%r10),%xmm15
lea -8(%r10),%rsp
.Ldo_avx512_epilogue:
___
$code.=<<___ if (!$win64);
lea -8(%r10),%rsp
.cfi_def_cfa_register %rsp
___
$code.=<<___;
ret
.cfi_endproc
___
}
}