sub \$6,$len()

in crypto/fipsmodule/modes/asm/aesni-gcm-x86_64.pl [108:514]


	sub		\$6,$len
	vpxor		$Z0,$Z0,$Z0		# $Z0   = 0
	vmovdqu		0x00-0x80($key),$rndkey
	vpaddb		$T2,$T1,$inout1
	vpaddb		$T2,$inout1,$inout2
	vpaddb		$T2,$inout2,$inout3
	vpaddb		$T2,$inout3,$inout4
	vpaddb		$T2,$inout4,$inout5
	vpxor		$rndkey,$T1,$inout0
	vmovdqu		$Z0,16+8(%rsp)		# "$Z3" = 0
	jmp		.Loop6x

.align	32
.Loop6x:
	add		\$`6<<24`,$counter
	jc		.Lhandle_ctr32		# discard $inout[1-5]?
	vmovdqu		0x00-0x20($Htable),$Hkey	# $Hkey^1
	  vpaddb	$T2,$inout5,$T1		# next counter value
	  vpxor		$rndkey,$inout1,$inout1
	  vpxor		$rndkey,$inout2,$inout2

.Lresume_ctr32:
	vmovdqu		$T1,($ivp)		# save next counter value
	vpclmulqdq	\$0x10,$Hkey,$Z3,$Z1
	  vpxor		$rndkey,$inout3,$inout3
	  vmovups	0x10-0x80($key),$T2	# borrow $T2 for $rndkey
	vpclmulqdq	\$0x01,$Hkey,$Z3,$Z2

	# At this point, the current block of 96 (0x60) bytes has already been
	# loaded into registers. Concurrently with processing it, we want to
	# load the next 96 bytes of input for the next round. Obviously, we can
	# only do this if there are at least 96 more bytes of input beyond the
	# input we're currently processing, or else we'd read past the end of
	# the input buffer. Here, we set |%r12| to 96 if there are at least 96
	# bytes of input beyond the 96 bytes we're already processing, and we
	# set |%r12| to 0 otherwise. In the case where we set |%r12| to 96,
	# we'll read in the next block so that it is in registers for the next
	# loop iteration. In the case where we set |%r12| to 0, we'll re-read
	# the current block and then ignore what we re-read.
	#
	# At this point, |$in0| points to the current (already read into
	# registers) block, and |$end0| points to 2*96 bytes before the end of
	# the input. Thus, |$in0| > |$end0| means that we do not have the next
	# 96-byte block to read in, and |$in0| <= |$end0| means we do.
	xor		%r12,%r12
	cmp		$in0,$end0

	  vaesenc	$T2,$inout0,$inout0
	vmovdqu		0x30+8(%rsp),$Ii	# I[4]
	  vpxor		$rndkey,$inout4,$inout4
	vpclmulqdq	\$0x00,$Hkey,$Z3,$T1
	  vaesenc	$T2,$inout1,$inout1
	  vpxor		$rndkey,$inout5,$inout5
	setnc		%r12b
	vpclmulqdq	\$0x11,$Hkey,$Z3,$Z3
	  vaesenc	$T2,$inout2,$inout2
	vmovdqu		0x10-0x20($Htable),$Hkey	# $Hkey^2
	neg		%r12
	  vaesenc	$T2,$inout3,$inout3
	 vpxor		$Z1,$Z2,$Z2
	vpclmulqdq	\$0x00,$Hkey,$Ii,$Z1
	 vpxor		$Z0,$Xi,$Xi		# modulo-scheduled
	  vaesenc	$T2,$inout4,$inout4
	 vpxor		$Z1,$T1,$Z0
	and		\$0x60,%r12
	  vmovups	0x20-0x80($key),$rndkey
	vpclmulqdq	\$0x10,$Hkey,$Ii,$T1
	  vaesenc	$T2,$inout5,$inout5

	vpclmulqdq	\$0x01,$Hkey,$Ii,$T2
	lea		($in0,%r12),$in0
	  vaesenc	$rndkey,$inout0,$inout0
	 vpxor		16+8(%rsp),$Xi,$Xi	# modulo-scheduled [vpxor $Z3,$Xi,$Xi]
	vpclmulqdq	\$0x11,$Hkey,$Ii,$Hkey
	 vmovdqu	0x40+8(%rsp),$Ii	# I[3]
	  vaesenc	$rndkey,$inout1,$inout1
	movbe		0x58($in0),%r13
	  vaesenc	$rndkey,$inout2,$inout2
	movbe		0x50($in0),%r12
	  vaesenc	$rndkey,$inout3,$inout3
	mov		%r13,0x20+8(%rsp)
	  vaesenc	$rndkey,$inout4,$inout4
	mov		%r12,0x28+8(%rsp)
	vmovdqu		0x30-0x20($Htable),$Z1	# borrow $Z1 for $Hkey^3
	  vaesenc	$rndkey,$inout5,$inout5

	  vmovups	0x30-0x80($key),$rndkey
	 vpxor		$T1,$Z2,$Z2
	vpclmulqdq	\$0x00,$Z1,$Ii,$T1
	  vaesenc	$rndkey,$inout0,$inout0
	 vpxor		$T2,$Z2,$Z2
	vpclmulqdq	\$0x10,$Z1,$Ii,$T2
	  vaesenc	$rndkey,$inout1,$inout1
	 vpxor		$Hkey,$Z3,$Z3
	vpclmulqdq	\$0x01,$Z1,$Ii,$Hkey
	  vaesenc	$rndkey,$inout2,$inout2
	vpclmulqdq	\$0x11,$Z1,$Ii,$Z1
	 vmovdqu	0x50+8(%rsp),$Ii	# I[2]
	  vaesenc	$rndkey,$inout3,$inout3
	  vaesenc	$rndkey,$inout4,$inout4
	 vpxor		$T1,$Z0,$Z0
	vmovdqu		0x40-0x20($Htable),$T1	# borrow $T1 for $Hkey^4
	  vaesenc	$rndkey,$inout5,$inout5

	  vmovups	0x40-0x80($key),$rndkey
	 vpxor		$T2,$Z2,$Z2
	vpclmulqdq	\$0x00,$T1,$Ii,$T2
	  vaesenc	$rndkey,$inout0,$inout0
	 vpxor		$Hkey,$Z2,$Z2
	vpclmulqdq	\$0x10,$T1,$Ii,$Hkey
	  vaesenc	$rndkey,$inout1,$inout1
	movbe		0x48($in0),%r13
	 vpxor		$Z1,$Z3,$Z3
	vpclmulqdq	\$0x01,$T1,$Ii,$Z1
	  vaesenc	$rndkey,$inout2,$inout2
	movbe		0x40($in0),%r12
	vpclmulqdq	\$0x11,$T1,$Ii,$T1
	 vmovdqu	0x60+8(%rsp),$Ii	# I[1]
	  vaesenc	$rndkey,$inout3,$inout3
	mov		%r13,0x30+8(%rsp)
	  vaesenc	$rndkey,$inout4,$inout4
	mov		%r12,0x38+8(%rsp)
	 vpxor		$T2,$Z0,$Z0
	vmovdqu		0x60-0x20($Htable),$T2	# borrow $T2 for $Hkey^5
	  vaesenc	$rndkey,$inout5,$inout5

	  vmovups	0x50-0x80($key),$rndkey
	 vpxor		$Hkey,$Z2,$Z2
	vpclmulqdq	\$0x00,$T2,$Ii,$Hkey
	  vaesenc	$rndkey,$inout0,$inout0
	 vpxor		$Z1,$Z2,$Z2
	vpclmulqdq	\$0x10,$T2,$Ii,$Z1
	  vaesenc	$rndkey,$inout1,$inout1
	movbe		0x38($in0),%r13
	 vpxor		$T1,$Z3,$Z3
	vpclmulqdq	\$0x01,$T2,$Ii,$T1
	 vpxor		0x70+8(%rsp),$Xi,$Xi	# accumulate I[0]
	  vaesenc	$rndkey,$inout2,$inout2
	movbe		0x30($in0),%r12
	vpclmulqdq	\$0x11,$T2,$Ii,$T2
	  vaesenc	$rndkey,$inout3,$inout3
	mov		%r13,0x40+8(%rsp)
	  vaesenc	$rndkey,$inout4,$inout4
	mov		%r12,0x48+8(%rsp)
	 vpxor		$Hkey,$Z0,$Z0
	 vmovdqu	0x70-0x20($Htable),$Hkey	# $Hkey^6
	  vaesenc	$rndkey,$inout5,$inout5

	  vmovups	0x60-0x80($key),$rndkey
	 vpxor		$Z1,$Z2,$Z2
	vpclmulqdq	\$0x10,$Hkey,$Xi,$Z1
	  vaesenc	$rndkey,$inout0,$inout0
	 vpxor		$T1,$Z2,$Z2
	vpclmulqdq	\$0x01,$Hkey,$Xi,$T1
	  vaesenc	$rndkey,$inout1,$inout1
	movbe		0x28($in0),%r13
	 vpxor		$T2,$Z3,$Z3
	vpclmulqdq	\$0x00,$Hkey,$Xi,$T2
	  vaesenc	$rndkey,$inout2,$inout2
	movbe		0x20($in0),%r12
	vpclmulqdq	\$0x11,$Hkey,$Xi,$Xi
	  vaesenc	$rndkey,$inout3,$inout3
	mov		%r13,0x50+8(%rsp)
	  vaesenc	$rndkey,$inout4,$inout4
	mov		%r12,0x58+8(%rsp)
	vpxor		$Z1,$Z2,$Z2
	  vaesenc	$rndkey,$inout5,$inout5
	vpxor		$T1,$Z2,$Z2

	  vmovups	0x70-0x80($key),$rndkey
	vpslldq		\$8,$Z2,$Z1
	vpxor		$T2,$Z0,$Z0
	vmovdqu		0x10($const),$Hkey	# .Lpoly

	  vaesenc	$rndkey,$inout0,$inout0
	vpxor		$Xi,$Z3,$Z3
	  vaesenc	$rndkey,$inout1,$inout1
	vpxor		$Z1,$Z0,$Z0
	movbe		0x18($in0),%r13
	  vaesenc	$rndkey,$inout2,$inout2
	movbe		0x10($in0),%r12
	vpalignr	\$8,$Z0,$Z0,$Ii		# 1st phase
	vpclmulqdq	\$0x10,$Hkey,$Z0,$Z0
	mov		%r13,0x60+8(%rsp)
	  vaesenc	$rndkey,$inout3,$inout3
	mov		%r12,0x68+8(%rsp)
	  vaesenc	$rndkey,$inout4,$inout4
	  vmovups	0x80-0x80($key),$T1	# borrow $T1 for $rndkey
	  vaesenc	$rndkey,$inout5,$inout5

	  vaesenc	$T1,$inout0,$inout0
	  vmovups	0x90-0x80($key),$rndkey
	  vaesenc	$T1,$inout1,$inout1
	vpsrldq		\$8,$Z2,$Z2
	  vaesenc	$T1,$inout2,$inout2
	vpxor		$Z2,$Z3,$Z3
	  vaesenc	$T1,$inout3,$inout3
	vpxor		$Ii,$Z0,$Z0
	movbe		0x08($in0),%r13
	  vaesenc	$T1,$inout4,$inout4
	movbe		0x00($in0),%r12
	  vaesenc	$T1,$inout5,$inout5
	  vmovups	0xa0-0x80($key),$T1
	  cmp		\$11,$rounds
	  jb		.Lenc_tail		# 128-bit key

	  vaesenc	$rndkey,$inout0,$inout0
	  vaesenc	$rndkey,$inout1,$inout1
	  vaesenc	$rndkey,$inout2,$inout2
	  vaesenc	$rndkey,$inout3,$inout3
	  vaesenc	$rndkey,$inout4,$inout4
	  vaesenc	$rndkey,$inout5,$inout5

	  vaesenc	$T1,$inout0,$inout0
	  vaesenc	$T1,$inout1,$inout1
	  vaesenc	$T1,$inout2,$inout2
	  vaesenc	$T1,$inout3,$inout3
	  vaesenc	$T1,$inout4,$inout4
	  vmovups	0xb0-0x80($key),$rndkey
	  vaesenc	$T1,$inout5,$inout5
	  vmovups	0xc0-0x80($key),$T1
	  je		.Lenc_tail		# 192-bit key

	  vaesenc	$rndkey,$inout0,$inout0
	  vaesenc	$rndkey,$inout1,$inout1
	  vaesenc	$rndkey,$inout2,$inout2
	  vaesenc	$rndkey,$inout3,$inout3
	  vaesenc	$rndkey,$inout4,$inout4
	  vaesenc	$rndkey,$inout5,$inout5

	  vaesenc	$T1,$inout0,$inout0
	  vaesenc	$T1,$inout1,$inout1
	  vaesenc	$T1,$inout2,$inout2
	  vaesenc	$T1,$inout3,$inout3
	  vaesenc	$T1,$inout4,$inout4
	  vmovups	0xd0-0x80($key),$rndkey
	  vaesenc	$T1,$inout5,$inout5
	  vmovups	0xe0-0x80($key),$T1
	  jmp		.Lenc_tail		# 256-bit key

.align	32
.Lhandle_ctr32:
	vmovdqu		($const),$Ii		# borrow $Ii for .Lbswap_mask
	  vpshufb	$Ii,$T1,$Z2		# byte-swap counter
	  vmovdqu	0x30($const),$Z1	# borrow $Z1, .Ltwo_lsb
	  vpaddd	0x40($const),$Z2,$inout1	# .Lone_lsb
	  vpaddd	$Z1,$Z2,$inout2
	vmovdqu		0x00-0x20($Htable),$Hkey	# $Hkey^1
	  vpaddd	$Z1,$inout1,$inout3
	  vpshufb	$Ii,$inout1,$inout1
	  vpaddd	$Z1,$inout2,$inout4
	  vpshufb	$Ii,$inout2,$inout2
	  vpxor		$rndkey,$inout1,$inout1
	  vpaddd	$Z1,$inout3,$inout5
	  vpshufb	$Ii,$inout3,$inout3
	  vpxor		$rndkey,$inout2,$inout2
	  vpaddd	$Z1,$inout4,$T1		# byte-swapped next counter value
	  vpshufb	$Ii,$inout4,$inout4
	  vpshufb	$Ii,$inout5,$inout5
	  vpshufb	$Ii,$T1,$T1		# next counter value
	jmp		.Lresume_ctr32

.align	32
.Lenc_tail:
	  vaesenc	$rndkey,$inout0,$inout0
	vmovdqu		$Z3,16+8(%rsp)		# postpone vpxor $Z3,$Xi,$Xi
	vpalignr	\$8,$Z0,$Z0,$Xi		# 2nd phase
	  vaesenc	$rndkey,$inout1,$inout1
	vpclmulqdq	\$0x10,$Hkey,$Z0,$Z0
	  vpxor		0x00($inp),$T1,$T2
	  vaesenc	$rndkey,$inout2,$inout2
	  vpxor		0x10($inp),$T1,$Ii
	  vaesenc	$rndkey,$inout3,$inout3
	  vpxor		0x20($inp),$T1,$Z1
	  vaesenc	$rndkey,$inout4,$inout4
	  vpxor		0x30($inp),$T1,$Z2
	  vaesenc	$rndkey,$inout5,$inout5
	  vpxor		0x40($inp),$T1,$Z3
	  vpxor		0x50($inp),$T1,$Hkey
	  vmovdqu	($ivp),$T1		# load next counter value

	  vaesenclast	$T2,$inout0,$inout0
	  vmovdqu	0x20($const),$T2	# borrow $T2, .Lone_msb
	  vaesenclast	$Ii,$inout1,$inout1
	 vpaddb		$T2,$T1,$Ii
	mov		%r13,0x70+8(%rsp)
	lea		0x60($inp),$inp
	# These two prefetches were added in BoringSSL. See change that added them.
	 prefetcht0	512($inp)		# We use 96-byte block so prefetch 2 lines (128 bytes)
	 prefetcht0	576($inp)
	  vaesenclast	$Z1,$inout2,$inout2
	 vpaddb		$T2,$Ii,$Z1
	mov		%r12,0x78+8(%rsp)
	lea		0x60($out),$out
	  vmovdqu	0x00-0x80($key),$rndkey
	  vaesenclast	$Z2,$inout3,$inout3
	 vpaddb		$T2,$Z1,$Z2
	  vaesenclast	$Z3, $inout4,$inout4
	 vpaddb		$T2,$Z2,$Z3
	  vaesenclast	$Hkey,$inout5,$inout5
	 vpaddb		$T2,$Z3,$Hkey

	add		\$0x60,%rax
	sub		\$0x6,$len
	jc		.L6x_done

	  vmovups	$inout0,-0x60($out)	# save output
	 vpxor		$rndkey,$T1,$inout0
	  vmovups	$inout1,-0x50($out)
	 vmovdqa	$Ii,$inout1		# 0 latency
	  vmovups	$inout2,-0x40($out)
	 vmovdqa	$Z1,$inout2		# 0 latency
	  vmovups	$inout3,-0x30($out)
	 vmovdqa	$Z2,$inout3		# 0 latency
	  vmovups	$inout4,-0x20($out)
	 vmovdqa	$Z3,$inout4		# 0 latency
	  vmovups	$inout5,-0x10($out)
	 vmovdqa	$Hkey,$inout5		# 0 latency
	vmovdqu		0x20+8(%rsp),$Z3	# I[5]
	jmp		.Loop6x

.L6x_done:
	vpxor		16+8(%rsp),$Xi,$Xi	# modulo-scheduled
	vpxor		$Z0,$Xi,$Xi		# modulo-scheduled

	ret
.cfi_endproc
.size	_aesni_ctr32_ghash_6x,.-_aesni_ctr32_ghash_6x
___
######################################################################
#
# size_t aesni_gcm_[en|de]crypt(const void *inp, void *out, size_t len,
#		const AES_KEY *key, unsigned char iv[16], const u128 *Htbl[9],
#		u128 *Xip);
$code.=<<___;
.globl	aesni_gcm_decrypt
.type	aesni_gcm_decrypt,\@abi-omnipotent
.align	32
aesni_gcm_decrypt:
.cfi_startproc
.seh_startproc
	_CET_ENDBR
	xor	%rax,%rax

	# We call |_aesni_ctr32_ghash_6x|, which requires at least 96 (0x60)
	# bytes of input.
	cmp	\$0x60,$len			# minimal accepted length
	jb	.Lgcm_dec_abort

	push	%rbp
.cfi_push	%rbp
.seh_pushreg	%rbp
	mov	%rsp, %rbp			# save stack pointer
.cfi_def_cfa_register	%rbp
	push	%rbx
.cfi_push	%rbx
.seh_pushreg	%rbx
	push	%r12
.cfi_push	%r12
.seh_pushreg	%r12
	push	%r13
.cfi_push	%r13
.seh_pushreg	%r13
	push	%r14
.cfi_push	%r14
.seh_pushreg	%r14
	push	%r15
.cfi_push	%r15
.seh_pushreg	%r15
___
if ($win64) {
$code.=<<___
	lea	-0xa8(%rsp),%rsp		# 8 extra bytes to align the stack
.seh_allocstack	0xa8
.seh_setframe	%rbp, 0xa8+5*8
	# Load the last two parameters. These go into %rdi and %rsi, which are
	# non-volatile on Windows, so stash them in the parameter stack area
	# first.
	mov	%rdi, 0x10(%rbp)
.seh_savereg	%rdi, 0xa8+5*8+0x10
	mov	%rsi, 0x18(%rbp)
.seh_savereg	%rsi, 0xa8+5*8+0x18
	mov	0x30(%rbp), $ivp
	mov	0x38(%rbp), $Htable
	# Save non-volatile XMM registers.
	movaps	%xmm6,-0xd0(%rbp)
.seh_savexmm128	%xmm6, 0xa8+5*8-0xd0
	movaps	%xmm7,-0xc0(%rbp)
.seh_savexmm128	%xmm7, 0xa8+5*8-0xc0
	movaps	%xmm8,-0xb0(%rbp)
.seh_savexmm128	%xmm8, 0xa8+5*8-0xb0
	movaps	%xmm9,-0xa0(%rbp)
.seh_savexmm128	%xmm9, 0xa8+5*8-0xa0
	movaps	%xmm10,-0x90(%rbp)
.seh_savexmm128	%xmm10, 0xa8+5*8-0x90
	movaps	%xmm11,-0x80(%rbp)
.seh_savexmm128	%xmm11, 0xa8+5*8-0x80
	movaps	%xmm12,-0x70(%rbp)
.seh_savexmm128	%xmm12, 0xa8+5*8-0x70
	movaps	%xmm13,-0x60(%rbp)
.seh_savexmm128	%xmm13, 0xa8+5*8-0x60
	movaps	%xmm14,-0x50(%rbp)
.seh_savexmm128	%xmm14, 0xa8+5*8-0x50
	movaps	%xmm15,-0x40(%rbp)
.seh_savexmm128	%xmm15, 0xa8+5*8-0x40
___
}