func rewriteValueAMD64_OpAMD64ORL()

in src/cmd/compile/internal/ssa/rewriteAMD64.go [15790:17154]


func rewriteValueAMD64_OpAMD64ORL(v *Value) bool {
	v_1 := v.Args[1]
	v_0 := v.Args[0]
	b := v.Block
	typ := &b.Func.Config.Types
	// match: (ORL (SHLL (MOVLconst [1]) y) x)
	// result: (BTSL x y)
	for {
		for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
			if v_0.Op != OpAMD64SHLL {
				continue
			}
			y := v_0.Args[1]
			v_0_0 := v_0.Args[0]
			if v_0_0.Op != OpAMD64MOVLconst || auxIntToInt32(v_0_0.AuxInt) != 1 {
				continue
			}
			x := v_1
			v.reset(OpAMD64BTSL)
			v.AddArg2(x, y)
			return true
		}
		break
	}
	// match: (ORL (MOVLconst [c]) x)
	// cond: isUint32PowerOfTwo(int64(c)) && uint64(c) >= 128
	// result: (BTSLconst [int8(log32(c))] x)
	for {
		for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
			if v_0.Op != OpAMD64MOVLconst {
				continue
			}
			c := auxIntToInt32(v_0.AuxInt)
			x := v_1
			if !(isUint32PowerOfTwo(int64(c)) && uint64(c) >= 128) {
				continue
			}
			v.reset(OpAMD64BTSLconst)
			v.AuxInt = int8ToAuxInt(int8(log32(c)))
			v.AddArg(x)
			return true
		}
		break
	}
	// match: (ORL x (MOVLconst [c]))
	// result: (ORLconst [c] x)
	for {
		for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
			x := v_0
			if v_1.Op != OpAMD64MOVLconst {
				continue
			}
			c := auxIntToInt32(v_1.AuxInt)
			v.reset(OpAMD64ORLconst)
			v.AuxInt = int32ToAuxInt(c)
			v.AddArg(x)
			return true
		}
		break
	}
	// match: (ORL (SHLLconst x [c]) (SHRLconst x [d]))
	// cond: d==32-c
	// result: (ROLLconst x [c])
	for {
		for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
			if v_0.Op != OpAMD64SHLLconst {
				continue
			}
			c := auxIntToInt8(v_0.AuxInt)
			x := v_0.Args[0]
			if v_1.Op != OpAMD64SHRLconst {
				continue
			}
			d := auxIntToInt8(v_1.AuxInt)
			if x != v_1.Args[0] || !(d == 32-c) {
				continue
			}
			v.reset(OpAMD64ROLLconst)
			v.AuxInt = int8ToAuxInt(c)
			v.AddArg(x)
			return true
		}
		break
	}
	// match: (ORL <t> (SHLLconst x [c]) (SHRWconst x [d]))
	// cond: d==16-c && c < 16 && t.Size() == 2
	// result: (ROLWconst x [c])
	for {
		t := v.Type
		for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
			if v_0.Op != OpAMD64SHLLconst {
				continue
			}
			c := auxIntToInt8(v_0.AuxInt)
			x := v_0.Args[0]
			if v_1.Op != OpAMD64SHRWconst {
				continue
			}
			d := auxIntToInt8(v_1.AuxInt)
			if x != v_1.Args[0] || !(d == 16-c && c < 16 && t.Size() == 2) {
				continue
			}
			v.reset(OpAMD64ROLWconst)
			v.AuxInt = int8ToAuxInt(c)
			v.AddArg(x)
			return true
		}
		break
	}
	// match: (ORL <t> (SHLLconst x [c]) (SHRBconst x [d]))
	// cond: d==8-c && c < 8 && t.Size() == 1
	// result: (ROLBconst x [c])
	for {
		t := v.Type
		for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
			if v_0.Op != OpAMD64SHLLconst {
				continue
			}
			c := auxIntToInt8(v_0.AuxInt)
			x := v_0.Args[0]
			if v_1.Op != OpAMD64SHRBconst {
				continue
			}
			d := auxIntToInt8(v_1.AuxInt)
			if x != v_1.Args[0] || !(d == 8-c && c < 8 && t.Size() == 1) {
				continue
			}
			v.reset(OpAMD64ROLBconst)
			v.AuxInt = int8ToAuxInt(c)
			v.AddArg(x)
			return true
		}
		break
	}
	// match: (ORL (SHLL x y) (ANDL (SHRL x (NEGQ y)) (SBBLcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [31]) [-32])) [32]))))
	// result: (ROLL x y)
	for {
		for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
			if v_0.Op != OpAMD64SHLL {
				continue
			}
			y := v_0.Args[1]
			x := v_0.Args[0]
			if v_1.Op != OpAMD64ANDL {
				continue
			}
			_ = v_1.Args[1]
			v_1_0 := v_1.Args[0]
			v_1_1 := v_1.Args[1]
			for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
				if v_1_0.Op != OpAMD64SHRL {
					continue
				}
				_ = v_1_0.Args[1]
				if x != v_1_0.Args[0] {
					continue
				}
				v_1_0_1 := v_1_0.Args[1]
				if v_1_0_1.Op != OpAMD64NEGQ || y != v_1_0_1.Args[0] || v_1_1.Op != OpAMD64SBBLcarrymask {
					continue
				}
				v_1_1_0 := v_1_1.Args[0]
				if v_1_1_0.Op != OpAMD64CMPQconst || auxIntToInt32(v_1_1_0.AuxInt) != 32 {
					continue
				}
				v_1_1_0_0 := v_1_1_0.Args[0]
				if v_1_1_0_0.Op != OpAMD64NEGQ {
					continue
				}
				v_1_1_0_0_0 := v_1_1_0_0.Args[0]
				if v_1_1_0_0_0.Op != OpAMD64ADDQconst || auxIntToInt32(v_1_1_0_0_0.AuxInt) != -32 {
					continue
				}
				v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0]
				if v_1_1_0_0_0_0.Op != OpAMD64ANDQconst || auxIntToInt32(v_1_1_0_0_0_0.AuxInt) != 31 || y != v_1_1_0_0_0_0.Args[0] {
					continue
				}
				v.reset(OpAMD64ROLL)
				v.AddArg2(x, y)
				return true
			}
		}
		break
	}
	// match: (ORL (SHLL x y) (ANDL (SHRL x (NEGL y)) (SBBLcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [31]) [-32])) [32]))))
	// result: (ROLL x y)
	for {
		for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
			if v_0.Op != OpAMD64SHLL {
				continue
			}
			y := v_0.Args[1]
			x := v_0.Args[0]
			if v_1.Op != OpAMD64ANDL {
				continue
			}
			_ = v_1.Args[1]
			v_1_0 := v_1.Args[0]
			v_1_1 := v_1.Args[1]
			for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
				if v_1_0.Op != OpAMD64SHRL {
					continue
				}
				_ = v_1_0.Args[1]
				if x != v_1_0.Args[0] {
					continue
				}
				v_1_0_1 := v_1_0.Args[1]
				if v_1_0_1.Op != OpAMD64NEGL || y != v_1_0_1.Args[0] || v_1_1.Op != OpAMD64SBBLcarrymask {
					continue
				}
				v_1_1_0 := v_1_1.Args[0]
				if v_1_1_0.Op != OpAMD64CMPLconst || auxIntToInt32(v_1_1_0.AuxInt) != 32 {
					continue
				}
				v_1_1_0_0 := v_1_1_0.Args[0]
				if v_1_1_0_0.Op != OpAMD64NEGL {
					continue
				}
				v_1_1_0_0_0 := v_1_1_0_0.Args[0]
				if v_1_1_0_0_0.Op != OpAMD64ADDLconst || auxIntToInt32(v_1_1_0_0_0.AuxInt) != -32 {
					continue
				}
				v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0]
				if v_1_1_0_0_0_0.Op != OpAMD64ANDLconst || auxIntToInt32(v_1_1_0_0_0_0.AuxInt) != 31 || y != v_1_1_0_0_0_0.Args[0] {
					continue
				}
				v.reset(OpAMD64ROLL)
				v.AddArg2(x, y)
				return true
			}
		}
		break
	}
	// match: (ORL (SHRL x y) (ANDL (SHLL x (NEGQ y)) (SBBLcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [31]) [-32])) [32]))))
	// result: (RORL x y)
	for {
		for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
			if v_0.Op != OpAMD64SHRL {
				continue
			}
			y := v_0.Args[1]
			x := v_0.Args[0]
			if v_1.Op != OpAMD64ANDL {
				continue
			}
			_ = v_1.Args[1]
			v_1_0 := v_1.Args[0]
			v_1_1 := v_1.Args[1]
			for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
				if v_1_0.Op != OpAMD64SHLL {
					continue
				}
				_ = v_1_0.Args[1]
				if x != v_1_0.Args[0] {
					continue
				}
				v_1_0_1 := v_1_0.Args[1]
				if v_1_0_1.Op != OpAMD64NEGQ || y != v_1_0_1.Args[0] || v_1_1.Op != OpAMD64SBBLcarrymask {
					continue
				}
				v_1_1_0 := v_1_1.Args[0]
				if v_1_1_0.Op != OpAMD64CMPQconst || auxIntToInt32(v_1_1_0.AuxInt) != 32 {
					continue
				}
				v_1_1_0_0 := v_1_1_0.Args[0]
				if v_1_1_0_0.Op != OpAMD64NEGQ {
					continue
				}
				v_1_1_0_0_0 := v_1_1_0_0.Args[0]
				if v_1_1_0_0_0.Op != OpAMD64ADDQconst || auxIntToInt32(v_1_1_0_0_0.AuxInt) != -32 {
					continue
				}
				v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0]
				if v_1_1_0_0_0_0.Op != OpAMD64ANDQconst || auxIntToInt32(v_1_1_0_0_0_0.AuxInt) != 31 || y != v_1_1_0_0_0_0.Args[0] {
					continue
				}
				v.reset(OpAMD64RORL)
				v.AddArg2(x, y)
				return true
			}
		}
		break
	}
	// match: (ORL (SHRL x y) (ANDL (SHLL x (NEGL y)) (SBBLcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [31]) [-32])) [32]))))
	// result: (RORL x y)
	for {
		for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
			if v_0.Op != OpAMD64SHRL {
				continue
			}
			y := v_0.Args[1]
			x := v_0.Args[0]
			if v_1.Op != OpAMD64ANDL {
				continue
			}
			_ = v_1.Args[1]
			v_1_0 := v_1.Args[0]
			v_1_1 := v_1.Args[1]
			for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
				if v_1_0.Op != OpAMD64SHLL {
					continue
				}
				_ = v_1_0.Args[1]
				if x != v_1_0.Args[0] {
					continue
				}
				v_1_0_1 := v_1_0.Args[1]
				if v_1_0_1.Op != OpAMD64NEGL || y != v_1_0_1.Args[0] || v_1_1.Op != OpAMD64SBBLcarrymask {
					continue
				}
				v_1_1_0 := v_1_1.Args[0]
				if v_1_1_0.Op != OpAMD64CMPLconst || auxIntToInt32(v_1_1_0.AuxInt) != 32 {
					continue
				}
				v_1_1_0_0 := v_1_1_0.Args[0]
				if v_1_1_0_0.Op != OpAMD64NEGL {
					continue
				}
				v_1_1_0_0_0 := v_1_1_0_0.Args[0]
				if v_1_1_0_0_0.Op != OpAMD64ADDLconst || auxIntToInt32(v_1_1_0_0_0.AuxInt) != -32 {
					continue
				}
				v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0]
				if v_1_1_0_0_0_0.Op != OpAMD64ANDLconst || auxIntToInt32(v_1_1_0_0_0_0.AuxInt) != 31 || y != v_1_1_0_0_0_0.Args[0] {
					continue
				}
				v.reset(OpAMD64RORL)
				v.AddArg2(x, y)
				return true
			}
		}
		break
	}
	// match: (ORL (SHLL x (ANDQconst y [15])) (ANDL (SHRW x (NEGQ (ADDQconst (ANDQconst y [15]) [-16]))) (SBBLcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [15]) [-16])) [16]))))
	// cond: v.Type.Size() == 2
	// result: (ROLW x y)
	for {
		for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
			if v_0.Op != OpAMD64SHLL {
				continue
			}
			_ = v_0.Args[1]
			x := v_0.Args[0]
			v_0_1 := v_0.Args[1]
			if v_0_1.Op != OpAMD64ANDQconst || auxIntToInt32(v_0_1.AuxInt) != 15 {
				continue
			}
			y := v_0_1.Args[0]
			if v_1.Op != OpAMD64ANDL {
				continue
			}
			_ = v_1.Args[1]
			v_1_0 := v_1.Args[0]
			v_1_1 := v_1.Args[1]
			for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
				if v_1_0.Op != OpAMD64SHRW {
					continue
				}
				_ = v_1_0.Args[1]
				if x != v_1_0.Args[0] {
					continue
				}
				v_1_0_1 := v_1_0.Args[1]
				if v_1_0_1.Op != OpAMD64NEGQ {
					continue
				}
				v_1_0_1_0 := v_1_0_1.Args[0]
				if v_1_0_1_0.Op != OpAMD64ADDQconst || auxIntToInt32(v_1_0_1_0.AuxInt) != -16 {
					continue
				}
				v_1_0_1_0_0 := v_1_0_1_0.Args[0]
				if v_1_0_1_0_0.Op != OpAMD64ANDQconst || auxIntToInt32(v_1_0_1_0_0.AuxInt) != 15 || y != v_1_0_1_0_0.Args[0] || v_1_1.Op != OpAMD64SBBLcarrymask {
					continue
				}
				v_1_1_0 := v_1_1.Args[0]
				if v_1_1_0.Op != OpAMD64CMPQconst || auxIntToInt32(v_1_1_0.AuxInt) != 16 {
					continue
				}
				v_1_1_0_0 := v_1_1_0.Args[0]
				if v_1_1_0_0.Op != OpAMD64NEGQ {
					continue
				}
				v_1_1_0_0_0 := v_1_1_0_0.Args[0]
				if v_1_1_0_0_0.Op != OpAMD64ADDQconst || auxIntToInt32(v_1_1_0_0_0.AuxInt) != -16 {
					continue
				}
				v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0]
				if v_1_1_0_0_0_0.Op != OpAMD64ANDQconst || auxIntToInt32(v_1_1_0_0_0_0.AuxInt) != 15 || y != v_1_1_0_0_0_0.Args[0] || !(v.Type.Size() == 2) {
					continue
				}
				v.reset(OpAMD64ROLW)
				v.AddArg2(x, y)
				return true
			}
		}
		break
	}
	// match: (ORL (SHLL x (ANDLconst y [15])) (ANDL (SHRW x (NEGL (ADDLconst (ANDLconst y [15]) [-16]))) (SBBLcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [15]) [-16])) [16]))))
	// cond: v.Type.Size() == 2
	// result: (ROLW x y)
	for {
		for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
			if v_0.Op != OpAMD64SHLL {
				continue
			}
			_ = v_0.Args[1]
			x := v_0.Args[0]
			v_0_1 := v_0.Args[1]
			if v_0_1.Op != OpAMD64ANDLconst || auxIntToInt32(v_0_1.AuxInt) != 15 {
				continue
			}
			y := v_0_1.Args[0]
			if v_1.Op != OpAMD64ANDL {
				continue
			}
			_ = v_1.Args[1]
			v_1_0 := v_1.Args[0]
			v_1_1 := v_1.Args[1]
			for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
				if v_1_0.Op != OpAMD64SHRW {
					continue
				}
				_ = v_1_0.Args[1]
				if x != v_1_0.Args[0] {
					continue
				}
				v_1_0_1 := v_1_0.Args[1]
				if v_1_0_1.Op != OpAMD64NEGL {
					continue
				}
				v_1_0_1_0 := v_1_0_1.Args[0]
				if v_1_0_1_0.Op != OpAMD64ADDLconst || auxIntToInt32(v_1_0_1_0.AuxInt) != -16 {
					continue
				}
				v_1_0_1_0_0 := v_1_0_1_0.Args[0]
				if v_1_0_1_0_0.Op != OpAMD64ANDLconst || auxIntToInt32(v_1_0_1_0_0.AuxInt) != 15 || y != v_1_0_1_0_0.Args[0] || v_1_1.Op != OpAMD64SBBLcarrymask {
					continue
				}
				v_1_1_0 := v_1_1.Args[0]
				if v_1_1_0.Op != OpAMD64CMPLconst || auxIntToInt32(v_1_1_0.AuxInt) != 16 {
					continue
				}
				v_1_1_0_0 := v_1_1_0.Args[0]
				if v_1_1_0_0.Op != OpAMD64NEGL {
					continue
				}
				v_1_1_0_0_0 := v_1_1_0_0.Args[0]
				if v_1_1_0_0_0.Op != OpAMD64ADDLconst || auxIntToInt32(v_1_1_0_0_0.AuxInt) != -16 {
					continue
				}
				v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0]
				if v_1_1_0_0_0_0.Op != OpAMD64ANDLconst || auxIntToInt32(v_1_1_0_0_0_0.AuxInt) != 15 || y != v_1_1_0_0_0_0.Args[0] || !(v.Type.Size() == 2) {
					continue
				}
				v.reset(OpAMD64ROLW)
				v.AddArg2(x, y)
				return true
			}
		}
		break
	}
	// match: (ORL (SHRW x (ANDQconst y [15])) (SHLL x (NEGQ (ADDQconst (ANDQconst y [15]) [-16]))))
	// cond: v.Type.Size() == 2
	// result: (RORW x y)
	for {
		for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
			if v_0.Op != OpAMD64SHRW {
				continue
			}
			_ = v_0.Args[1]
			x := v_0.Args[0]
			v_0_1 := v_0.Args[1]
			if v_0_1.Op != OpAMD64ANDQconst || auxIntToInt32(v_0_1.AuxInt) != 15 {
				continue
			}
			y := v_0_1.Args[0]
			if v_1.Op != OpAMD64SHLL {
				continue
			}
			_ = v_1.Args[1]
			if x != v_1.Args[0] {
				continue
			}
			v_1_1 := v_1.Args[1]
			if v_1_1.Op != OpAMD64NEGQ {
				continue
			}
			v_1_1_0 := v_1_1.Args[0]
			if v_1_1_0.Op != OpAMD64ADDQconst || auxIntToInt32(v_1_1_0.AuxInt) != -16 {
				continue
			}
			v_1_1_0_0 := v_1_1_0.Args[0]
			if v_1_1_0_0.Op != OpAMD64ANDQconst || auxIntToInt32(v_1_1_0_0.AuxInt) != 15 || y != v_1_1_0_0.Args[0] || !(v.Type.Size() == 2) {
				continue
			}
			v.reset(OpAMD64RORW)
			v.AddArg2(x, y)
			return true
		}
		break
	}
	// match: (ORL (SHRW x (ANDLconst y [15])) (SHLL x (NEGL (ADDLconst (ANDLconst y [15]) [-16]))))
	// cond: v.Type.Size() == 2
	// result: (RORW x y)
	for {
		for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
			if v_0.Op != OpAMD64SHRW {
				continue
			}
			_ = v_0.Args[1]
			x := v_0.Args[0]
			v_0_1 := v_0.Args[1]
			if v_0_1.Op != OpAMD64ANDLconst || auxIntToInt32(v_0_1.AuxInt) != 15 {
				continue
			}
			y := v_0_1.Args[0]
			if v_1.Op != OpAMD64SHLL {
				continue
			}
			_ = v_1.Args[1]
			if x != v_1.Args[0] {
				continue
			}
			v_1_1 := v_1.Args[1]
			if v_1_1.Op != OpAMD64NEGL {
				continue
			}
			v_1_1_0 := v_1_1.Args[0]
			if v_1_1_0.Op != OpAMD64ADDLconst || auxIntToInt32(v_1_1_0.AuxInt) != -16 {
				continue
			}
			v_1_1_0_0 := v_1_1_0.Args[0]
			if v_1_1_0_0.Op != OpAMD64ANDLconst || auxIntToInt32(v_1_1_0_0.AuxInt) != 15 || y != v_1_1_0_0.Args[0] || !(v.Type.Size() == 2) {
				continue
			}
			v.reset(OpAMD64RORW)
			v.AddArg2(x, y)
			return true
		}
		break
	}
	// match: (ORL (SHLL x (ANDQconst y [ 7])) (ANDL (SHRB x (NEGQ (ADDQconst (ANDQconst y [ 7]) [ -8]))) (SBBLcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [ 7]) [ -8])) [ 8]))))
	// cond: v.Type.Size() == 1
	// result: (ROLB x y)
	for {
		for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
			if v_0.Op != OpAMD64SHLL {
				continue
			}
			_ = v_0.Args[1]
			x := v_0.Args[0]
			v_0_1 := v_0.Args[1]
			if v_0_1.Op != OpAMD64ANDQconst || auxIntToInt32(v_0_1.AuxInt) != 7 {
				continue
			}
			y := v_0_1.Args[0]
			if v_1.Op != OpAMD64ANDL {
				continue
			}
			_ = v_1.Args[1]
			v_1_0 := v_1.Args[0]
			v_1_1 := v_1.Args[1]
			for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
				if v_1_0.Op != OpAMD64SHRB {
					continue
				}
				_ = v_1_0.Args[1]
				if x != v_1_0.Args[0] {
					continue
				}
				v_1_0_1 := v_1_0.Args[1]
				if v_1_0_1.Op != OpAMD64NEGQ {
					continue
				}
				v_1_0_1_0 := v_1_0_1.Args[0]
				if v_1_0_1_0.Op != OpAMD64ADDQconst || auxIntToInt32(v_1_0_1_0.AuxInt) != -8 {
					continue
				}
				v_1_0_1_0_0 := v_1_0_1_0.Args[0]
				if v_1_0_1_0_0.Op != OpAMD64ANDQconst || auxIntToInt32(v_1_0_1_0_0.AuxInt) != 7 || y != v_1_0_1_0_0.Args[0] || v_1_1.Op != OpAMD64SBBLcarrymask {
					continue
				}
				v_1_1_0 := v_1_1.Args[0]
				if v_1_1_0.Op != OpAMD64CMPQconst || auxIntToInt32(v_1_1_0.AuxInt) != 8 {
					continue
				}
				v_1_1_0_0 := v_1_1_0.Args[0]
				if v_1_1_0_0.Op != OpAMD64NEGQ {
					continue
				}
				v_1_1_0_0_0 := v_1_1_0_0.Args[0]
				if v_1_1_0_0_0.Op != OpAMD64ADDQconst || auxIntToInt32(v_1_1_0_0_0.AuxInt) != -8 {
					continue
				}
				v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0]
				if v_1_1_0_0_0_0.Op != OpAMD64ANDQconst || auxIntToInt32(v_1_1_0_0_0_0.AuxInt) != 7 || y != v_1_1_0_0_0_0.Args[0] || !(v.Type.Size() == 1) {
					continue
				}
				v.reset(OpAMD64ROLB)
				v.AddArg2(x, y)
				return true
			}
		}
		break
	}
	// match: (ORL (SHLL x (ANDLconst y [ 7])) (ANDL (SHRB x (NEGL (ADDLconst (ANDLconst y [ 7]) [ -8]))) (SBBLcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [ 7]) [ -8])) [ 8]))))
	// cond: v.Type.Size() == 1
	// result: (ROLB x y)
	for {
		for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
			if v_0.Op != OpAMD64SHLL {
				continue
			}
			_ = v_0.Args[1]
			x := v_0.Args[0]
			v_0_1 := v_0.Args[1]
			if v_0_1.Op != OpAMD64ANDLconst || auxIntToInt32(v_0_1.AuxInt) != 7 {
				continue
			}
			y := v_0_1.Args[0]
			if v_1.Op != OpAMD64ANDL {
				continue
			}
			_ = v_1.Args[1]
			v_1_0 := v_1.Args[0]
			v_1_1 := v_1.Args[1]
			for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
				if v_1_0.Op != OpAMD64SHRB {
					continue
				}
				_ = v_1_0.Args[1]
				if x != v_1_0.Args[0] {
					continue
				}
				v_1_0_1 := v_1_0.Args[1]
				if v_1_0_1.Op != OpAMD64NEGL {
					continue
				}
				v_1_0_1_0 := v_1_0_1.Args[0]
				if v_1_0_1_0.Op != OpAMD64ADDLconst || auxIntToInt32(v_1_0_1_0.AuxInt) != -8 {
					continue
				}
				v_1_0_1_0_0 := v_1_0_1_0.Args[0]
				if v_1_0_1_0_0.Op != OpAMD64ANDLconst || auxIntToInt32(v_1_0_1_0_0.AuxInt) != 7 || y != v_1_0_1_0_0.Args[0] || v_1_1.Op != OpAMD64SBBLcarrymask {
					continue
				}
				v_1_1_0 := v_1_1.Args[0]
				if v_1_1_0.Op != OpAMD64CMPLconst || auxIntToInt32(v_1_1_0.AuxInt) != 8 {
					continue
				}
				v_1_1_0_0 := v_1_1_0.Args[0]
				if v_1_1_0_0.Op != OpAMD64NEGL {
					continue
				}
				v_1_1_0_0_0 := v_1_1_0_0.Args[0]
				if v_1_1_0_0_0.Op != OpAMD64ADDLconst || auxIntToInt32(v_1_1_0_0_0.AuxInt) != -8 {
					continue
				}
				v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0]
				if v_1_1_0_0_0_0.Op != OpAMD64ANDLconst || auxIntToInt32(v_1_1_0_0_0_0.AuxInt) != 7 || y != v_1_1_0_0_0_0.Args[0] || !(v.Type.Size() == 1) {
					continue
				}
				v.reset(OpAMD64ROLB)
				v.AddArg2(x, y)
				return true
			}
		}
		break
	}
	// match: (ORL (SHRB x (ANDQconst y [ 7])) (SHLL x (NEGQ (ADDQconst (ANDQconst y [ 7]) [ -8]))))
	// cond: v.Type.Size() == 1
	// result: (RORB x y)
	for {
		for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
			if v_0.Op != OpAMD64SHRB {
				continue
			}
			_ = v_0.Args[1]
			x := v_0.Args[0]
			v_0_1 := v_0.Args[1]
			if v_0_1.Op != OpAMD64ANDQconst || auxIntToInt32(v_0_1.AuxInt) != 7 {
				continue
			}
			y := v_0_1.Args[0]
			if v_1.Op != OpAMD64SHLL {
				continue
			}
			_ = v_1.Args[1]
			if x != v_1.Args[0] {
				continue
			}
			v_1_1 := v_1.Args[1]
			if v_1_1.Op != OpAMD64NEGQ {
				continue
			}
			v_1_1_0 := v_1_1.Args[0]
			if v_1_1_0.Op != OpAMD64ADDQconst || auxIntToInt32(v_1_1_0.AuxInt) != -8 {
				continue
			}
			v_1_1_0_0 := v_1_1_0.Args[0]
			if v_1_1_0_0.Op != OpAMD64ANDQconst || auxIntToInt32(v_1_1_0_0.AuxInt) != 7 || y != v_1_1_0_0.Args[0] || !(v.Type.Size() == 1) {
				continue
			}
			v.reset(OpAMD64RORB)
			v.AddArg2(x, y)
			return true
		}
		break
	}
	// match: (ORL (SHRB x (ANDLconst y [ 7])) (SHLL x (NEGL (ADDLconst (ANDLconst y [ 7]) [ -8]))))
	// cond: v.Type.Size() == 1
	// result: (RORB x y)
	for {
		for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
			if v_0.Op != OpAMD64SHRB {
				continue
			}
			_ = v_0.Args[1]
			x := v_0.Args[0]
			v_0_1 := v_0.Args[1]
			if v_0_1.Op != OpAMD64ANDLconst || auxIntToInt32(v_0_1.AuxInt) != 7 {
				continue
			}
			y := v_0_1.Args[0]
			if v_1.Op != OpAMD64SHLL {
				continue
			}
			_ = v_1.Args[1]
			if x != v_1.Args[0] {
				continue
			}
			v_1_1 := v_1.Args[1]
			if v_1_1.Op != OpAMD64NEGL {
				continue
			}
			v_1_1_0 := v_1_1.Args[0]
			if v_1_1_0.Op != OpAMD64ADDLconst || auxIntToInt32(v_1_1_0.AuxInt) != -8 {
				continue
			}
			v_1_1_0_0 := v_1_1_0.Args[0]
			if v_1_1_0_0.Op != OpAMD64ANDLconst || auxIntToInt32(v_1_1_0_0.AuxInt) != 7 || y != v_1_1_0_0.Args[0] || !(v.Type.Size() == 1) {
				continue
			}
			v.reset(OpAMD64RORB)
			v.AddArg2(x, y)
			return true
		}
		break
	}
	// match: (ORL x x)
	// result: x
	for {
		x := v_0
		if x != v_1 {
			break
		}
		v.copyOf(x)
		return true
	}
	// match: (ORL x0:(MOVBload [i0] {s} p mem) sh:(SHLLconst [8] x1:(MOVBload [i1] {s} p mem)))
	// cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0, x1, sh)
	// result: @mergePoint(b,x0,x1) (MOVWload [i0] {s} p mem)
	for {
		for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
			x0 := v_0
			if x0.Op != OpAMD64MOVBload {
				continue
			}
			i0 := auxIntToInt32(x0.AuxInt)
			s := auxToSym(x0.Aux)
			mem := x0.Args[1]
			p := x0.Args[0]
			sh := v_1
			if sh.Op != OpAMD64SHLLconst || auxIntToInt8(sh.AuxInt) != 8 {
				continue
			}
			x1 := sh.Args[0]
			if x1.Op != OpAMD64MOVBload {
				continue
			}
			i1 := auxIntToInt32(x1.AuxInt)
			if auxToSym(x1.Aux) != s {
				continue
			}
			_ = x1.Args[1]
			if p != x1.Args[0] || mem != x1.Args[1] || !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0, x1, sh)) {
				continue
			}
			b = mergePoint(b, x0, x1)
			v0 := b.NewValue0(x1.Pos, OpAMD64MOVWload, typ.UInt16)
			v.copyOf(v0)
			v0.AuxInt = int32ToAuxInt(i0)
			v0.Aux = symToAux(s)
			v0.AddArg2(p, mem)
			return true
		}
		break
	}
	// match: (ORL x0:(MOVBload [i] {s} p0 mem) sh:(SHLLconst [8] x1:(MOVBload [i] {s} p1 mem)))
	// cond: x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && sequentialAddresses(p0, p1, 1) && mergePoint(b,x0,x1) != nil && clobber(x0, x1, sh)
	// result: @mergePoint(b,x0,x1) (MOVWload [i] {s} p0 mem)
	for {
		for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
			x0 := v_0
			if x0.Op != OpAMD64MOVBload {
				continue
			}
			i := auxIntToInt32(x0.AuxInt)
			s := auxToSym(x0.Aux)
			mem := x0.Args[1]
			p0 := x0.Args[0]
			sh := v_1
			if sh.Op != OpAMD64SHLLconst || auxIntToInt8(sh.AuxInt) != 8 {
				continue
			}
			x1 := sh.Args[0]
			if x1.Op != OpAMD64MOVBload || auxIntToInt32(x1.AuxInt) != i || auxToSym(x1.Aux) != s {
				continue
			}
			_ = x1.Args[1]
			p1 := x1.Args[0]
			if mem != x1.Args[1] || !(x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && sequentialAddresses(p0, p1, 1) && mergePoint(b, x0, x1) != nil && clobber(x0, x1, sh)) {
				continue
			}
			b = mergePoint(b, x0, x1)
			v0 := b.NewValue0(x1.Pos, OpAMD64MOVWload, typ.UInt16)
			v.copyOf(v0)
			v0.AuxInt = int32ToAuxInt(i)
			v0.Aux = symToAux(s)
			v0.AddArg2(p0, mem)
			return true
		}
		break
	}
	// match: (ORL x0:(MOVWload [i0] {s} p mem) sh:(SHLLconst [16] x1:(MOVWload [i1] {s} p mem)))
	// cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0, x1, sh)
	// result: @mergePoint(b,x0,x1) (MOVLload [i0] {s} p mem)
	for {
		for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
			x0 := v_0
			if x0.Op != OpAMD64MOVWload {
				continue
			}
			i0 := auxIntToInt32(x0.AuxInt)
			s := auxToSym(x0.Aux)
			mem := x0.Args[1]
			p := x0.Args[0]
			sh := v_1
			if sh.Op != OpAMD64SHLLconst || auxIntToInt8(sh.AuxInt) != 16 {
				continue
			}
			x1 := sh.Args[0]
			if x1.Op != OpAMD64MOVWload {
				continue
			}
			i1 := auxIntToInt32(x1.AuxInt)
			if auxToSym(x1.Aux) != s {
				continue
			}
			_ = x1.Args[1]
			if p != x1.Args[0] || mem != x1.Args[1] || !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0, x1, sh)) {
				continue
			}
			b = mergePoint(b, x0, x1)
			v0 := b.NewValue0(x1.Pos, OpAMD64MOVLload, typ.UInt32)
			v.copyOf(v0)
			v0.AuxInt = int32ToAuxInt(i0)
			v0.Aux = symToAux(s)
			v0.AddArg2(p, mem)
			return true
		}
		break
	}
	// match: (ORL x0:(MOVWload [i] {s} p0 mem) sh:(SHLLconst [16] x1:(MOVWload [i] {s} p1 mem)))
	// cond: x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && sequentialAddresses(p0, p1, 2) && mergePoint(b,x0,x1) != nil && clobber(x0, x1, sh)
	// result: @mergePoint(b,x0,x1) (MOVLload [i] {s} p0 mem)
	for {
		for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
			x0 := v_0
			if x0.Op != OpAMD64MOVWload {
				continue
			}
			i := auxIntToInt32(x0.AuxInt)
			s := auxToSym(x0.Aux)
			mem := x0.Args[1]
			p0 := x0.Args[0]
			sh := v_1
			if sh.Op != OpAMD64SHLLconst || auxIntToInt8(sh.AuxInt) != 16 {
				continue
			}
			x1 := sh.Args[0]
			if x1.Op != OpAMD64MOVWload || auxIntToInt32(x1.AuxInt) != i || auxToSym(x1.Aux) != s {
				continue
			}
			_ = x1.Args[1]
			p1 := x1.Args[0]
			if mem != x1.Args[1] || !(x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && sequentialAddresses(p0, p1, 2) && mergePoint(b, x0, x1) != nil && clobber(x0, x1, sh)) {
				continue
			}
			b = mergePoint(b, x0, x1)
			v0 := b.NewValue0(x1.Pos, OpAMD64MOVLload, typ.UInt32)
			v.copyOf(v0)
			v0.AuxInt = int32ToAuxInt(i)
			v0.Aux = symToAux(s)
			v0.AddArg2(p0, mem)
			return true
		}
		break
	}
	// match: (ORL s1:(SHLLconst [j1] x1:(MOVBload [i1] {s} p mem)) or:(ORL s0:(SHLLconst [j0] x0:(MOVBload [i0] {s} p mem)) y))
	// cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0, x1, s0, s1, or)
	// result: @mergePoint(b,x0,x1,y) (ORL <v.Type> (SHLLconst <v.Type> [j0] (MOVWload [i0] {s} p mem)) y)
	for {
		for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
			s1 := v_0
			if s1.Op != OpAMD64SHLLconst {
				continue
			}
			j1 := auxIntToInt8(s1.AuxInt)
			x1 := s1.Args[0]
			if x1.Op != OpAMD64MOVBload {
				continue
			}
			i1 := auxIntToInt32(x1.AuxInt)
			s := auxToSym(x1.Aux)
			mem := x1.Args[1]
			p := x1.Args[0]
			or := v_1
			if or.Op != OpAMD64ORL {
				continue
			}
			_ = or.Args[1]
			or_0 := or.Args[0]
			or_1 := or.Args[1]
			for _i1 := 0; _i1 <= 1; _i1, or_0, or_1 = _i1+1, or_1, or_0 {
				s0 := or_0
				if s0.Op != OpAMD64SHLLconst {
					continue
				}
				j0 := auxIntToInt8(s0.AuxInt)
				x0 := s0.Args[0]
				if x0.Op != OpAMD64MOVBload {
					continue
				}
				i0 := auxIntToInt32(x0.AuxInt)
				if auxToSym(x0.Aux) != s {
					continue
				}
				_ = x0.Args[1]
				if p != x0.Args[0] || mem != x0.Args[1] {
					continue
				}
				y := or_1
				if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0, x1, s0, s1, or)) {
					continue
				}
				b = mergePoint(b, x0, x1, y)
				v0 := b.NewValue0(x0.Pos, OpAMD64ORL, v.Type)
				v.copyOf(v0)
				v1 := b.NewValue0(x0.Pos, OpAMD64SHLLconst, v.Type)
				v1.AuxInt = int8ToAuxInt(j0)
				v2 := b.NewValue0(x0.Pos, OpAMD64MOVWload, typ.UInt16)
				v2.AuxInt = int32ToAuxInt(i0)
				v2.Aux = symToAux(s)
				v2.AddArg2(p, mem)
				v1.AddArg(v2)
				v0.AddArg2(v1, y)
				return true
			}
		}
		break
	}
	// match: (ORL s1:(SHLLconst [j1] x1:(MOVBload [i] {s} p1 mem)) or:(ORL s0:(SHLLconst [j0] x0:(MOVBload [i] {s} p0 mem)) y))
	// cond: j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && sequentialAddresses(p0, p1, 1) && mergePoint(b,x0,x1,y) != nil && clobber(x0, x1, s0, s1, or)
	// result: @mergePoint(b,x0,x1,y) (ORL <v.Type> (SHLLconst <v.Type> [j0] (MOVWload [i] {s} p0 mem)) y)
	for {
		for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
			s1 := v_0
			if s1.Op != OpAMD64SHLLconst {
				continue
			}
			j1 := auxIntToInt8(s1.AuxInt)
			x1 := s1.Args[0]
			if x1.Op != OpAMD64MOVBload {
				continue
			}
			i := auxIntToInt32(x1.AuxInt)
			s := auxToSym(x1.Aux)
			mem := x1.Args[1]
			p1 := x1.Args[0]
			or := v_1
			if or.Op != OpAMD64ORL {
				continue
			}
			_ = or.Args[1]
			or_0 := or.Args[0]
			or_1 := or.Args[1]
			for _i1 := 0; _i1 <= 1; _i1, or_0, or_1 = _i1+1, or_1, or_0 {
				s0 := or_0
				if s0.Op != OpAMD64SHLLconst {
					continue
				}
				j0 := auxIntToInt8(s0.AuxInt)
				x0 := s0.Args[0]
				if x0.Op != OpAMD64MOVBload || auxIntToInt32(x0.AuxInt) != i || auxToSym(x0.Aux) != s {
					continue
				}
				_ = x0.Args[1]
				p0 := x0.Args[0]
				if mem != x0.Args[1] {
					continue
				}
				y := or_1
				if !(j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && sequentialAddresses(p0, p1, 1) && mergePoint(b, x0, x1, y) != nil && clobber(x0, x1, s0, s1, or)) {
					continue
				}
				b = mergePoint(b, x0, x1, y)
				v0 := b.NewValue0(x0.Pos, OpAMD64ORL, v.Type)
				v.copyOf(v0)
				v1 := b.NewValue0(x0.Pos, OpAMD64SHLLconst, v.Type)
				v1.AuxInt = int8ToAuxInt(j0)
				v2 := b.NewValue0(x0.Pos, OpAMD64MOVWload, typ.UInt16)
				v2.AuxInt = int32ToAuxInt(i)
				v2.Aux = symToAux(s)
				v2.AddArg2(p0, mem)
				v1.AddArg(v2)
				v0.AddArg2(v1, y)
				return true
			}
		}
		break
	}
	// match: (ORL x1:(MOVBload [i1] {s} p mem) sh:(SHLLconst [8] x0:(MOVBload [i0] {s} p mem)))
	// cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0, x1, sh)
	// result: @mergePoint(b,x0,x1) (ROLWconst <v.Type> [8] (MOVWload [i0] {s} p mem))
	for {
		for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
			x1 := v_0
			if x1.Op != OpAMD64MOVBload {
				continue
			}
			i1 := auxIntToInt32(x1.AuxInt)
			s := auxToSym(x1.Aux)
			mem := x1.Args[1]
			p := x1.Args[0]
			sh := v_1
			if sh.Op != OpAMD64SHLLconst || auxIntToInt8(sh.AuxInt) != 8 {
				continue
			}
			x0 := sh.Args[0]
			if x0.Op != OpAMD64MOVBload {
				continue
			}
			i0 := auxIntToInt32(x0.AuxInt)
			if auxToSym(x0.Aux) != s {
				continue
			}
			_ = x0.Args[1]
			if p != x0.Args[0] || mem != x0.Args[1] || !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0, x1, sh)) {
				continue
			}
			b = mergePoint(b, x0, x1)
			v0 := b.NewValue0(x0.Pos, OpAMD64ROLWconst, v.Type)
			v.copyOf(v0)
			v0.AuxInt = int8ToAuxInt(8)
			v1 := b.NewValue0(x0.Pos, OpAMD64MOVWload, typ.UInt16)
			v1.AuxInt = int32ToAuxInt(i0)
			v1.Aux = symToAux(s)
			v1.AddArg2(p, mem)
			v0.AddArg(v1)
			return true
		}
		break
	}
	// match: (ORL x1:(MOVBload [i] {s} p1 mem) sh:(SHLLconst [8] x0:(MOVBload [i] {s} p0 mem)))
	// cond: x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && sequentialAddresses(p0, p1, 1) && mergePoint(b,x0,x1) != nil && clobber(x0, x1, sh)
	// result: @mergePoint(b,x0,x1) (ROLWconst <v.Type> [8] (MOVWload [i] {s} p0 mem))
	for {
		for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
			x1 := v_0
			if x1.Op != OpAMD64MOVBload {
				continue
			}
			i := auxIntToInt32(x1.AuxInt)
			s := auxToSym(x1.Aux)
			mem := x1.Args[1]
			p1 := x1.Args[0]
			sh := v_1
			if sh.Op != OpAMD64SHLLconst || auxIntToInt8(sh.AuxInt) != 8 {
				continue
			}
			x0 := sh.Args[0]
			if x0.Op != OpAMD64MOVBload || auxIntToInt32(x0.AuxInt) != i || auxToSym(x0.Aux) != s {
				continue
			}
			_ = x0.Args[1]
			p0 := x0.Args[0]
			if mem != x0.Args[1] || !(x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && sequentialAddresses(p0, p1, 1) && mergePoint(b, x0, x1) != nil && clobber(x0, x1, sh)) {
				continue
			}
			b = mergePoint(b, x0, x1)
			v0 := b.NewValue0(x0.Pos, OpAMD64ROLWconst, v.Type)
			v.copyOf(v0)
			v0.AuxInt = int8ToAuxInt(8)
			v1 := b.NewValue0(x0.Pos, OpAMD64MOVWload, typ.UInt16)
			v1.AuxInt = int32ToAuxInt(i)
			v1.Aux = symToAux(s)
			v1.AddArg2(p0, mem)
			v0.AddArg(v1)
			return true
		}
		break
	}
	// match: (ORL r1:(ROLWconst [8] x1:(MOVWload [i1] {s} p mem)) sh:(SHLLconst [16] r0:(ROLWconst [8] x0:(MOVWload [i0] {s} p mem))))
	// cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0, x1, r0, r1, sh)
	// result: @mergePoint(b,x0,x1) (BSWAPL <v.Type> (MOVLload [i0] {s} p mem))
	for {
		for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
			r1 := v_0
			if r1.Op != OpAMD64ROLWconst || auxIntToInt8(r1.AuxInt) != 8 {
				continue
			}
			x1 := r1.Args[0]
			if x1.Op != OpAMD64MOVWload {
				continue
			}
			i1 := auxIntToInt32(x1.AuxInt)
			s := auxToSym(x1.Aux)
			mem := x1.Args[1]
			p := x1.Args[0]
			sh := v_1
			if sh.Op != OpAMD64SHLLconst || auxIntToInt8(sh.AuxInt) != 16 {
				continue
			}
			r0 := sh.Args[0]
			if r0.Op != OpAMD64ROLWconst || auxIntToInt8(r0.AuxInt) != 8 {
				continue
			}
			x0 := r0.Args[0]
			if x0.Op != OpAMD64MOVWload {
				continue
			}
			i0 := auxIntToInt32(x0.AuxInt)
			if auxToSym(x0.Aux) != s {
				continue
			}
			_ = x0.Args[1]
			if p != x0.Args[0] || mem != x0.Args[1] || !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0, x1, r0, r1, sh)) {
				continue
			}
			b = mergePoint(b, x0, x1)
			v0 := b.NewValue0(x0.Pos, OpAMD64BSWAPL, v.Type)
			v.copyOf(v0)
			v1 := b.NewValue0(x0.Pos, OpAMD64MOVLload, typ.UInt32)
			v1.AuxInt = int32ToAuxInt(i0)
			v1.Aux = symToAux(s)
			v1.AddArg2(p, mem)
			v0.AddArg(v1)
			return true
		}
		break
	}
	// match: (ORL r1:(ROLWconst [8] x1:(MOVWload [i] {s} p1 mem)) sh:(SHLLconst [16] r0:(ROLWconst [8] x0:(MOVWload [i] {s} p0 mem))))
	// cond: x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && sequentialAddresses(p0, p1, 2) && mergePoint(b,x0,x1) != nil && clobber(x0, x1, r0, r1, sh)
	// result: @mergePoint(b,x0,x1) (BSWAPL <v.Type> (MOVLload [i] {s} p0 mem))
	for {
		for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
			r1 := v_0
			if r1.Op != OpAMD64ROLWconst || auxIntToInt8(r1.AuxInt) != 8 {
				continue
			}
			x1 := r1.Args[0]
			if x1.Op != OpAMD64MOVWload {
				continue
			}
			i := auxIntToInt32(x1.AuxInt)
			s := auxToSym(x1.Aux)
			mem := x1.Args[1]
			p1 := x1.Args[0]
			sh := v_1
			if sh.Op != OpAMD64SHLLconst || auxIntToInt8(sh.AuxInt) != 16 {
				continue
			}
			r0 := sh.Args[0]
			if r0.Op != OpAMD64ROLWconst || auxIntToInt8(r0.AuxInt) != 8 {
				continue
			}
			x0 := r0.Args[0]
			if x0.Op != OpAMD64MOVWload || auxIntToInt32(x0.AuxInt) != i || auxToSym(x0.Aux) != s {
				continue
			}
			_ = x0.Args[1]
			p0 := x0.Args[0]
			if mem != x0.Args[1] || !(x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && sequentialAddresses(p0, p1, 2) && mergePoint(b, x0, x1) != nil && clobber(x0, x1, r0, r1, sh)) {
				continue
			}
			b = mergePoint(b, x0, x1)
			v0 := b.NewValue0(x0.Pos, OpAMD64BSWAPL, v.Type)
			v.copyOf(v0)
			v1 := b.NewValue0(x0.Pos, OpAMD64MOVLload, typ.UInt32)
			v1.AuxInt = int32ToAuxInt(i)
			v1.Aux = symToAux(s)
			v1.AddArg2(p0, mem)
			v0.AddArg(v1)
			return true
		}
		break
	}
	// match: (ORL s0:(SHLLconst [j0] x0:(MOVBload [i0] {s} p mem)) or:(ORL s1:(SHLLconst [j1] x1:(MOVBload [i1] {s} p mem)) y))
	// cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0, x1, s0, s1, or)
	// result: @mergePoint(b,x0,x1,y) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWload [i0] {s} p mem))) y)
	for {
		for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
			s0 := v_0
			if s0.Op != OpAMD64SHLLconst {
				continue
			}
			j0 := auxIntToInt8(s0.AuxInt)
			x0 := s0.Args[0]
			if x0.Op != OpAMD64MOVBload {
				continue
			}
			i0 := auxIntToInt32(x0.AuxInt)
			s := auxToSym(x0.Aux)
			mem := x0.Args[1]
			p := x0.Args[0]
			or := v_1
			if or.Op != OpAMD64ORL {
				continue
			}
			_ = or.Args[1]
			or_0 := or.Args[0]
			or_1 := or.Args[1]
			for _i1 := 0; _i1 <= 1; _i1, or_0, or_1 = _i1+1, or_1, or_0 {
				s1 := or_0
				if s1.Op != OpAMD64SHLLconst {
					continue
				}
				j1 := auxIntToInt8(s1.AuxInt)
				x1 := s1.Args[0]
				if x1.Op != OpAMD64MOVBload {
					continue
				}
				i1 := auxIntToInt32(x1.AuxInt)
				if auxToSym(x1.Aux) != s {
					continue
				}
				_ = x1.Args[1]
				if p != x1.Args[0] || mem != x1.Args[1] {
					continue
				}
				y := or_1
				if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0, x1, s0, s1, or)) {
					continue
				}
				b = mergePoint(b, x0, x1, y)
				v0 := b.NewValue0(x1.Pos, OpAMD64ORL, v.Type)
				v.copyOf(v0)
				v1 := b.NewValue0(x1.Pos, OpAMD64SHLLconst, v.Type)
				v1.AuxInt = int8ToAuxInt(j1)
				v2 := b.NewValue0(x1.Pos, OpAMD64ROLWconst, typ.UInt16)
				v2.AuxInt = int8ToAuxInt(8)
				v3 := b.NewValue0(x1.Pos, OpAMD64MOVWload, typ.UInt16)
				v3.AuxInt = int32ToAuxInt(i0)
				v3.Aux = symToAux(s)
				v3.AddArg2(p, mem)
				v2.AddArg(v3)
				v1.AddArg(v2)
				v0.AddArg2(v1, y)
				return true
			}
		}
		break
	}
	// match: (ORL s0:(SHLLconst [j0] x0:(MOVBload [i] {s} p0 mem)) or:(ORL s1:(SHLLconst [j1] x1:(MOVBload [i] {s} p1 mem)) y))
	// cond: j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && sequentialAddresses(p0, p1, 1) && mergePoint(b,x0,x1,y) != nil && clobber(x0, x1, s0, s1, or)
	// result: @mergePoint(b,x0,x1,y) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWload [i] {s} p0 mem))) y)
	for {
		for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
			s0 := v_0
			if s0.Op != OpAMD64SHLLconst {
				continue
			}
			j0 := auxIntToInt8(s0.AuxInt)
			x0 := s0.Args[0]
			if x0.Op != OpAMD64MOVBload {
				continue
			}
			i := auxIntToInt32(x0.AuxInt)
			s := auxToSym(x0.Aux)
			mem := x0.Args[1]
			p0 := x0.Args[0]
			or := v_1
			if or.Op != OpAMD64ORL {
				continue
			}
			_ = or.Args[1]
			or_0 := or.Args[0]
			or_1 := or.Args[1]
			for _i1 := 0; _i1 <= 1; _i1, or_0, or_1 = _i1+1, or_1, or_0 {
				s1 := or_0
				if s1.Op != OpAMD64SHLLconst {
					continue
				}
				j1 := auxIntToInt8(s1.AuxInt)
				x1 := s1.Args[0]
				if x1.Op != OpAMD64MOVBload || auxIntToInt32(x1.AuxInt) != i || auxToSym(x1.Aux) != s {
					continue
				}
				_ = x1.Args[1]
				p1 := x1.Args[0]
				if mem != x1.Args[1] {
					continue
				}
				y := or_1
				if !(j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && sequentialAddresses(p0, p1, 1) && mergePoint(b, x0, x1, y) != nil && clobber(x0, x1, s0, s1, or)) {
					continue
				}
				b = mergePoint(b, x0, x1, y)
				v0 := b.NewValue0(x1.Pos, OpAMD64ORL, v.Type)
				v.copyOf(v0)
				v1 := b.NewValue0(x1.Pos, OpAMD64SHLLconst, v.Type)
				v1.AuxInt = int8ToAuxInt(j1)
				v2 := b.NewValue0(x1.Pos, OpAMD64ROLWconst, typ.UInt16)
				v2.AuxInt = int8ToAuxInt(8)
				v3 := b.NewValue0(x1.Pos, OpAMD64MOVWload, typ.UInt16)
				v3.AuxInt = int32ToAuxInt(i)
				v3.Aux = symToAux(s)
				v3.AddArg2(p0, mem)
				v2.AddArg(v3)
				v1.AddArg(v2)
				v0.AddArg2(v1, y)
				return true
			}
		}
		break
	}
	// match: (ORL x l:(MOVLload [off] {sym} ptr mem))
	// cond: canMergeLoadClobber(v, l, x) && clobber(l)
	// result: (ORLload x [off] {sym} ptr mem)
	for {
		for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
			x := v_0
			l := v_1
			if l.Op != OpAMD64MOVLload {
				continue
			}
			off := auxIntToInt32(l.AuxInt)
			sym := auxToSym(l.Aux)
			mem := l.Args[1]
			ptr := l.Args[0]
			if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
				continue
			}
			v.reset(OpAMD64ORLload)
			v.AuxInt = int32ToAuxInt(off)
			v.Aux = symToAux(sym)
			v.AddArg3(x, ptr, mem)
			return true
		}
		break
	}
	return false
}