in src/cmd/compile/internal/ssa/rewriteAMD64.go [9957:11147]
func rewriteValueAMD64_OpAMD64MOVBstore(v *Value) bool {
v_2 := v.Args[2]
v_1 := v.Args[1]
v_0 := v.Args[0]
b := v.Block
typ := &b.Func.Config.Types
// match: (MOVBstore [off] {sym} ptr y:(SETL x) mem)
// cond: y.Uses == 1
// result: (SETLstore [off] {sym} ptr x mem)
for {
off := auxIntToInt32(v.AuxInt)
sym := auxToSym(v.Aux)
ptr := v_0
y := v_1
if y.Op != OpAMD64SETL {
break
}
x := y.Args[0]
mem := v_2
if !(y.Uses == 1) {
break
}
v.reset(OpAMD64SETLstore)
v.AuxInt = int32ToAuxInt(off)
v.Aux = symToAux(sym)
v.AddArg3(ptr, x, mem)
return true
}
// match: (MOVBstore [off] {sym} ptr y:(SETLE x) mem)
// cond: y.Uses == 1
// result: (SETLEstore [off] {sym} ptr x mem)
for {
off := auxIntToInt32(v.AuxInt)
sym := auxToSym(v.Aux)
ptr := v_0
y := v_1
if y.Op != OpAMD64SETLE {
break
}
x := y.Args[0]
mem := v_2
if !(y.Uses == 1) {
break
}
v.reset(OpAMD64SETLEstore)
v.AuxInt = int32ToAuxInt(off)
v.Aux = symToAux(sym)
v.AddArg3(ptr, x, mem)
return true
}
// match: (MOVBstore [off] {sym} ptr y:(SETG x) mem)
// cond: y.Uses == 1
// result: (SETGstore [off] {sym} ptr x mem)
for {
off := auxIntToInt32(v.AuxInt)
sym := auxToSym(v.Aux)
ptr := v_0
y := v_1
if y.Op != OpAMD64SETG {
break
}
x := y.Args[0]
mem := v_2
if !(y.Uses == 1) {
break
}
v.reset(OpAMD64SETGstore)
v.AuxInt = int32ToAuxInt(off)
v.Aux = symToAux(sym)
v.AddArg3(ptr, x, mem)
return true
}
// match: (MOVBstore [off] {sym} ptr y:(SETGE x) mem)
// cond: y.Uses == 1
// result: (SETGEstore [off] {sym} ptr x mem)
for {
off := auxIntToInt32(v.AuxInt)
sym := auxToSym(v.Aux)
ptr := v_0
y := v_1
if y.Op != OpAMD64SETGE {
break
}
x := y.Args[0]
mem := v_2
if !(y.Uses == 1) {
break
}
v.reset(OpAMD64SETGEstore)
v.AuxInt = int32ToAuxInt(off)
v.Aux = symToAux(sym)
v.AddArg3(ptr, x, mem)
return true
}
// match: (MOVBstore [off] {sym} ptr y:(SETEQ x) mem)
// cond: y.Uses == 1
// result: (SETEQstore [off] {sym} ptr x mem)
for {
off := auxIntToInt32(v.AuxInt)
sym := auxToSym(v.Aux)
ptr := v_0
y := v_1
if y.Op != OpAMD64SETEQ {
break
}
x := y.Args[0]
mem := v_2
if !(y.Uses == 1) {
break
}
v.reset(OpAMD64SETEQstore)
v.AuxInt = int32ToAuxInt(off)
v.Aux = symToAux(sym)
v.AddArg3(ptr, x, mem)
return true
}
// match: (MOVBstore [off] {sym} ptr y:(SETNE x) mem)
// cond: y.Uses == 1
// result: (SETNEstore [off] {sym} ptr x mem)
for {
off := auxIntToInt32(v.AuxInt)
sym := auxToSym(v.Aux)
ptr := v_0
y := v_1
if y.Op != OpAMD64SETNE {
break
}
x := y.Args[0]
mem := v_2
if !(y.Uses == 1) {
break
}
v.reset(OpAMD64SETNEstore)
v.AuxInt = int32ToAuxInt(off)
v.Aux = symToAux(sym)
v.AddArg3(ptr, x, mem)
return true
}
// match: (MOVBstore [off] {sym} ptr y:(SETB x) mem)
// cond: y.Uses == 1
// result: (SETBstore [off] {sym} ptr x mem)
for {
off := auxIntToInt32(v.AuxInt)
sym := auxToSym(v.Aux)
ptr := v_0
y := v_1
if y.Op != OpAMD64SETB {
break
}
x := y.Args[0]
mem := v_2
if !(y.Uses == 1) {
break
}
v.reset(OpAMD64SETBstore)
v.AuxInt = int32ToAuxInt(off)
v.Aux = symToAux(sym)
v.AddArg3(ptr, x, mem)
return true
}
// match: (MOVBstore [off] {sym} ptr y:(SETBE x) mem)
// cond: y.Uses == 1
// result: (SETBEstore [off] {sym} ptr x mem)
for {
off := auxIntToInt32(v.AuxInt)
sym := auxToSym(v.Aux)
ptr := v_0
y := v_1
if y.Op != OpAMD64SETBE {
break
}
x := y.Args[0]
mem := v_2
if !(y.Uses == 1) {
break
}
v.reset(OpAMD64SETBEstore)
v.AuxInt = int32ToAuxInt(off)
v.Aux = symToAux(sym)
v.AddArg3(ptr, x, mem)
return true
}
// match: (MOVBstore [off] {sym} ptr y:(SETA x) mem)
// cond: y.Uses == 1
// result: (SETAstore [off] {sym} ptr x mem)
for {
off := auxIntToInt32(v.AuxInt)
sym := auxToSym(v.Aux)
ptr := v_0
y := v_1
if y.Op != OpAMD64SETA {
break
}
x := y.Args[0]
mem := v_2
if !(y.Uses == 1) {
break
}
v.reset(OpAMD64SETAstore)
v.AuxInt = int32ToAuxInt(off)
v.Aux = symToAux(sym)
v.AddArg3(ptr, x, mem)
return true
}
// match: (MOVBstore [off] {sym} ptr y:(SETAE x) mem)
// cond: y.Uses == 1
// result: (SETAEstore [off] {sym} ptr x mem)
for {
off := auxIntToInt32(v.AuxInt)
sym := auxToSym(v.Aux)
ptr := v_0
y := v_1
if y.Op != OpAMD64SETAE {
break
}
x := y.Args[0]
mem := v_2
if !(y.Uses == 1) {
break
}
v.reset(OpAMD64SETAEstore)
v.AuxInt = int32ToAuxInt(off)
v.Aux = symToAux(sym)
v.AddArg3(ptr, x, mem)
return true
}
// match: (MOVBstore [off] {sym} ptr (MOVBQSX x) mem)
// result: (MOVBstore [off] {sym} ptr x mem)
for {
off := auxIntToInt32(v.AuxInt)
sym := auxToSym(v.Aux)
ptr := v_0
if v_1.Op != OpAMD64MOVBQSX {
break
}
x := v_1.Args[0]
mem := v_2
v.reset(OpAMD64MOVBstore)
v.AuxInt = int32ToAuxInt(off)
v.Aux = symToAux(sym)
v.AddArg3(ptr, x, mem)
return true
}
// match: (MOVBstore [off] {sym} ptr (MOVBQZX x) mem)
// result: (MOVBstore [off] {sym} ptr x mem)
for {
off := auxIntToInt32(v.AuxInt)
sym := auxToSym(v.Aux)
ptr := v_0
if v_1.Op != OpAMD64MOVBQZX {
break
}
x := v_1.Args[0]
mem := v_2
v.reset(OpAMD64MOVBstore)
v.AuxInt = int32ToAuxInt(off)
v.Aux = symToAux(sym)
v.AddArg3(ptr, x, mem)
return true
}
// match: (MOVBstore [off1] {sym} (ADDQconst [off2] ptr) val mem)
// cond: is32Bit(int64(off1)+int64(off2))
// result: (MOVBstore [off1+off2] {sym} ptr val mem)
for {
off1 := auxIntToInt32(v.AuxInt)
sym := auxToSym(v.Aux)
if v_0.Op != OpAMD64ADDQconst {
break
}
off2 := auxIntToInt32(v_0.AuxInt)
ptr := v_0.Args[0]
val := v_1
mem := v_2
if !(is32Bit(int64(off1) + int64(off2))) {
break
}
v.reset(OpAMD64MOVBstore)
v.AuxInt = int32ToAuxInt(off1 + off2)
v.Aux = symToAux(sym)
v.AddArg3(ptr, val, mem)
return true
}
// match: (MOVBstore [off] {sym} ptr (MOVLconst [c]) mem)
// result: (MOVBstoreconst [makeValAndOff(int32(int8(c)),off)] {sym} ptr mem)
for {
off := auxIntToInt32(v.AuxInt)
sym := auxToSym(v.Aux)
ptr := v_0
if v_1.Op != OpAMD64MOVLconst {
break
}
c := auxIntToInt32(v_1.AuxInt)
mem := v_2
v.reset(OpAMD64MOVBstoreconst)
v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(int8(c)), off))
v.Aux = symToAux(sym)
v.AddArg2(ptr, mem)
return true
}
// match: (MOVBstore [off] {sym} ptr (MOVQconst [c]) mem)
// result: (MOVBstoreconst [makeValAndOff(int32(int8(c)),off)] {sym} ptr mem)
for {
off := auxIntToInt32(v.AuxInt)
sym := auxToSym(v.Aux)
ptr := v_0
if v_1.Op != OpAMD64MOVQconst {
break
}
c := auxIntToInt64(v_1.AuxInt)
mem := v_2
v.reset(OpAMD64MOVBstoreconst)
v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(int8(c)), off))
v.Aux = symToAux(sym)
v.AddArg2(ptr, mem)
return true
}
// match: (MOVBstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
// cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
// result: (MOVBstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
for {
off1 := auxIntToInt32(v.AuxInt)
sym1 := auxToSym(v.Aux)
if v_0.Op != OpAMD64LEAQ {
break
}
off2 := auxIntToInt32(v_0.AuxInt)
sym2 := auxToSym(v_0.Aux)
base := v_0.Args[0]
val := v_1
mem := v_2
if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
break
}
v.reset(OpAMD64MOVBstore)
v.AuxInt = int32ToAuxInt(off1 + off2)
v.Aux = symToAux(mergeSym(sym1, sym2))
v.AddArg3(base, val, mem)
return true
}
// match: (MOVBstore [i] {s} p w x0:(MOVBstore [i-1] {s} p (SHRWconst [8] w) mem))
// cond: x0.Uses == 1 && clobber(x0)
// result: (MOVWstore [i-1] {s} p (ROLWconst <w.Type> [8] w) mem)
for {
i := auxIntToInt32(v.AuxInt)
s := auxToSym(v.Aux)
p := v_0
w := v_1
x0 := v_2
if x0.Op != OpAMD64MOVBstore || auxIntToInt32(x0.AuxInt) != i-1 || auxToSym(x0.Aux) != s {
break
}
mem := x0.Args[2]
if p != x0.Args[0] {
break
}
x0_1 := x0.Args[1]
if x0_1.Op != OpAMD64SHRWconst || auxIntToInt8(x0_1.AuxInt) != 8 || w != x0_1.Args[0] || !(x0.Uses == 1 && clobber(x0)) {
break
}
v.reset(OpAMD64MOVWstore)
v.AuxInt = int32ToAuxInt(i - 1)
v.Aux = symToAux(s)
v0 := b.NewValue0(x0.Pos, OpAMD64ROLWconst, w.Type)
v0.AuxInt = int8ToAuxInt(8)
v0.AddArg(w)
v.AddArg3(p, v0, mem)
return true
}
// match: (MOVBstore [i] {s} p1 w x0:(MOVBstore [i] {s} p0 (SHRWconst [8] w) mem))
// cond: x0.Uses == 1 && sequentialAddresses(p0, p1, 1) && clobber(x0)
// result: (MOVWstore [i] {s} p0 (ROLWconst <w.Type> [8] w) mem)
for {
i := auxIntToInt32(v.AuxInt)
s := auxToSym(v.Aux)
p1 := v_0
w := v_1
x0 := v_2
if x0.Op != OpAMD64MOVBstore || auxIntToInt32(x0.AuxInt) != i || auxToSym(x0.Aux) != s {
break
}
mem := x0.Args[2]
p0 := x0.Args[0]
x0_1 := x0.Args[1]
if x0_1.Op != OpAMD64SHRWconst || auxIntToInt8(x0_1.AuxInt) != 8 || w != x0_1.Args[0] || !(x0.Uses == 1 && sequentialAddresses(p0, p1, 1) && clobber(x0)) {
break
}
v.reset(OpAMD64MOVWstore)
v.AuxInt = int32ToAuxInt(i)
v.Aux = symToAux(s)
v0 := b.NewValue0(x0.Pos, OpAMD64ROLWconst, w.Type)
v0.AuxInt = int8ToAuxInt(8)
v0.AddArg(w)
v.AddArg3(p0, v0, mem)
return true
}
// match: (MOVBstore [i] {s} p w x2:(MOVBstore [i-1] {s} p (SHRLconst [8] w) x1:(MOVBstore [i-2] {s} p (SHRLconst [16] w) x0:(MOVBstore [i-3] {s} p (SHRLconst [24] w) mem))))
// cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && clobber(x0, x1, x2)
// result: (MOVLstore [i-3] {s} p (BSWAPL <w.Type> w) mem)
for {
i := auxIntToInt32(v.AuxInt)
s := auxToSym(v.Aux)
p := v_0
w := v_1
x2 := v_2
if x2.Op != OpAMD64MOVBstore || auxIntToInt32(x2.AuxInt) != i-1 || auxToSym(x2.Aux) != s {
break
}
_ = x2.Args[2]
if p != x2.Args[0] {
break
}
x2_1 := x2.Args[1]
if x2_1.Op != OpAMD64SHRLconst || auxIntToInt8(x2_1.AuxInt) != 8 || w != x2_1.Args[0] {
break
}
x1 := x2.Args[2]
if x1.Op != OpAMD64MOVBstore || auxIntToInt32(x1.AuxInt) != i-2 || auxToSym(x1.Aux) != s {
break
}
_ = x1.Args[2]
if p != x1.Args[0] {
break
}
x1_1 := x1.Args[1]
if x1_1.Op != OpAMD64SHRLconst || auxIntToInt8(x1_1.AuxInt) != 16 || w != x1_1.Args[0] {
break
}
x0 := x1.Args[2]
if x0.Op != OpAMD64MOVBstore || auxIntToInt32(x0.AuxInt) != i-3 || auxToSym(x0.Aux) != s {
break
}
mem := x0.Args[2]
if p != x0.Args[0] {
break
}
x0_1 := x0.Args[1]
if x0_1.Op != OpAMD64SHRLconst || auxIntToInt8(x0_1.AuxInt) != 24 || w != x0_1.Args[0] || !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && clobber(x0, x1, x2)) {
break
}
v.reset(OpAMD64MOVLstore)
v.AuxInt = int32ToAuxInt(i - 3)
v.Aux = symToAux(s)
v0 := b.NewValue0(x0.Pos, OpAMD64BSWAPL, w.Type)
v0.AddArg(w)
v.AddArg3(p, v0, mem)
return true
}
// match: (MOVBstore [i] {s} p3 w x2:(MOVBstore [i] {s} p2 (SHRLconst [8] w) x1:(MOVBstore [i] {s} p1 (SHRLconst [16] w) x0:(MOVBstore [i] {s} p0 (SHRLconst [24] w) mem))))
// cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && sequentialAddresses(p0, p1, 1) && sequentialAddresses(p1, p2, 1) && sequentialAddresses(p2, p3, 1) && clobber(x0, x1, x2)
// result: (MOVLstore [i] {s} p0 (BSWAPL <w.Type> w) mem)
for {
i := auxIntToInt32(v.AuxInt)
s := auxToSym(v.Aux)
p3 := v_0
w := v_1
x2 := v_2
if x2.Op != OpAMD64MOVBstore || auxIntToInt32(x2.AuxInt) != i || auxToSym(x2.Aux) != s {
break
}
_ = x2.Args[2]
p2 := x2.Args[0]
x2_1 := x2.Args[1]
if x2_1.Op != OpAMD64SHRLconst || auxIntToInt8(x2_1.AuxInt) != 8 || w != x2_1.Args[0] {
break
}
x1 := x2.Args[2]
if x1.Op != OpAMD64MOVBstore || auxIntToInt32(x1.AuxInt) != i || auxToSym(x1.Aux) != s {
break
}
_ = x1.Args[2]
p1 := x1.Args[0]
x1_1 := x1.Args[1]
if x1_1.Op != OpAMD64SHRLconst || auxIntToInt8(x1_1.AuxInt) != 16 || w != x1_1.Args[0] {
break
}
x0 := x1.Args[2]
if x0.Op != OpAMD64MOVBstore || auxIntToInt32(x0.AuxInt) != i || auxToSym(x0.Aux) != s {
break
}
mem := x0.Args[2]
p0 := x0.Args[0]
x0_1 := x0.Args[1]
if x0_1.Op != OpAMD64SHRLconst || auxIntToInt8(x0_1.AuxInt) != 24 || w != x0_1.Args[0] || !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && sequentialAddresses(p0, p1, 1) && sequentialAddresses(p1, p2, 1) && sequentialAddresses(p2, p3, 1) && clobber(x0, x1, x2)) {
break
}
v.reset(OpAMD64MOVLstore)
v.AuxInt = int32ToAuxInt(i)
v.Aux = symToAux(s)
v0 := b.NewValue0(x0.Pos, OpAMD64BSWAPL, w.Type)
v0.AddArg(w)
v.AddArg3(p0, v0, mem)
return true
}
// match: (MOVBstore [i] {s} p w x6:(MOVBstore [i-1] {s} p (SHRQconst [8] w) x5:(MOVBstore [i-2] {s} p (SHRQconst [16] w) x4:(MOVBstore [i-3] {s} p (SHRQconst [24] w) x3:(MOVBstore [i-4] {s} p (SHRQconst [32] w) x2:(MOVBstore [i-5] {s} p (SHRQconst [40] w) x1:(MOVBstore [i-6] {s} p (SHRQconst [48] w) x0:(MOVBstore [i-7] {s} p (SHRQconst [56] w) mem))))))))
// cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0, x1, x2, x3, x4, x5, x6)
// result: (MOVQstore [i-7] {s} p (BSWAPQ <w.Type> w) mem)
for {
i := auxIntToInt32(v.AuxInt)
s := auxToSym(v.Aux)
p := v_0
w := v_1
x6 := v_2
if x6.Op != OpAMD64MOVBstore || auxIntToInt32(x6.AuxInt) != i-1 || auxToSym(x6.Aux) != s {
break
}
_ = x6.Args[2]
if p != x6.Args[0] {
break
}
x6_1 := x6.Args[1]
if x6_1.Op != OpAMD64SHRQconst || auxIntToInt8(x6_1.AuxInt) != 8 || w != x6_1.Args[0] {
break
}
x5 := x6.Args[2]
if x5.Op != OpAMD64MOVBstore || auxIntToInt32(x5.AuxInt) != i-2 || auxToSym(x5.Aux) != s {
break
}
_ = x5.Args[2]
if p != x5.Args[0] {
break
}
x5_1 := x5.Args[1]
if x5_1.Op != OpAMD64SHRQconst || auxIntToInt8(x5_1.AuxInt) != 16 || w != x5_1.Args[0] {
break
}
x4 := x5.Args[2]
if x4.Op != OpAMD64MOVBstore || auxIntToInt32(x4.AuxInt) != i-3 || auxToSym(x4.Aux) != s {
break
}
_ = x4.Args[2]
if p != x4.Args[0] {
break
}
x4_1 := x4.Args[1]
if x4_1.Op != OpAMD64SHRQconst || auxIntToInt8(x4_1.AuxInt) != 24 || w != x4_1.Args[0] {
break
}
x3 := x4.Args[2]
if x3.Op != OpAMD64MOVBstore || auxIntToInt32(x3.AuxInt) != i-4 || auxToSym(x3.Aux) != s {
break
}
_ = x3.Args[2]
if p != x3.Args[0] {
break
}
x3_1 := x3.Args[1]
if x3_1.Op != OpAMD64SHRQconst || auxIntToInt8(x3_1.AuxInt) != 32 || w != x3_1.Args[0] {
break
}
x2 := x3.Args[2]
if x2.Op != OpAMD64MOVBstore || auxIntToInt32(x2.AuxInt) != i-5 || auxToSym(x2.Aux) != s {
break
}
_ = x2.Args[2]
if p != x2.Args[0] {
break
}
x2_1 := x2.Args[1]
if x2_1.Op != OpAMD64SHRQconst || auxIntToInt8(x2_1.AuxInt) != 40 || w != x2_1.Args[0] {
break
}
x1 := x2.Args[2]
if x1.Op != OpAMD64MOVBstore || auxIntToInt32(x1.AuxInt) != i-6 || auxToSym(x1.Aux) != s {
break
}
_ = x1.Args[2]
if p != x1.Args[0] {
break
}
x1_1 := x1.Args[1]
if x1_1.Op != OpAMD64SHRQconst || auxIntToInt8(x1_1.AuxInt) != 48 || w != x1_1.Args[0] {
break
}
x0 := x1.Args[2]
if x0.Op != OpAMD64MOVBstore || auxIntToInt32(x0.AuxInt) != i-7 || auxToSym(x0.Aux) != s {
break
}
mem := x0.Args[2]
if p != x0.Args[0] {
break
}
x0_1 := x0.Args[1]
if x0_1.Op != OpAMD64SHRQconst || auxIntToInt8(x0_1.AuxInt) != 56 || w != x0_1.Args[0] || !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0, x1, x2, x3, x4, x5, x6)) {
break
}
v.reset(OpAMD64MOVQstore)
v.AuxInt = int32ToAuxInt(i - 7)
v.Aux = symToAux(s)
v0 := b.NewValue0(x0.Pos, OpAMD64BSWAPQ, w.Type)
v0.AddArg(w)
v.AddArg3(p, v0, mem)
return true
}
// match: (MOVBstore [i] {s} p7 w x6:(MOVBstore [i] {s} p6 (SHRQconst [8] w) x5:(MOVBstore [i] {s} p5 (SHRQconst [16] w) x4:(MOVBstore [i] {s} p4 (SHRQconst [24] w) x3:(MOVBstore [i] {s} p3 (SHRQconst [32] w) x2:(MOVBstore [i] {s} p2 (SHRQconst [40] w) x1:(MOVBstore [i] {s} p1 (SHRQconst [48] w) x0:(MOVBstore [i] {s} p0 (SHRQconst [56] w) mem))))))))
// cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && sequentialAddresses(p0, p1, 1) && sequentialAddresses(p1, p2, 1) && sequentialAddresses(p2, p3, 1) && sequentialAddresses(p3, p4, 1) && sequentialAddresses(p4, p5, 1) && sequentialAddresses(p5, p6, 1) && sequentialAddresses(p6, p7, 1) && clobber(x0, x1, x2, x3, x4, x5, x6)
// result: (MOVQstore [i] {s} p0 (BSWAPQ <w.Type> w) mem)
for {
i := auxIntToInt32(v.AuxInt)
s := auxToSym(v.Aux)
p7 := v_0
w := v_1
x6 := v_2
if x6.Op != OpAMD64MOVBstore || auxIntToInt32(x6.AuxInt) != i || auxToSym(x6.Aux) != s {
break
}
_ = x6.Args[2]
p6 := x6.Args[0]
x6_1 := x6.Args[1]
if x6_1.Op != OpAMD64SHRQconst || auxIntToInt8(x6_1.AuxInt) != 8 || w != x6_1.Args[0] {
break
}
x5 := x6.Args[2]
if x5.Op != OpAMD64MOVBstore || auxIntToInt32(x5.AuxInt) != i || auxToSym(x5.Aux) != s {
break
}
_ = x5.Args[2]
p5 := x5.Args[0]
x5_1 := x5.Args[1]
if x5_1.Op != OpAMD64SHRQconst || auxIntToInt8(x5_1.AuxInt) != 16 || w != x5_1.Args[0] {
break
}
x4 := x5.Args[2]
if x4.Op != OpAMD64MOVBstore || auxIntToInt32(x4.AuxInt) != i || auxToSym(x4.Aux) != s {
break
}
_ = x4.Args[2]
p4 := x4.Args[0]
x4_1 := x4.Args[1]
if x4_1.Op != OpAMD64SHRQconst || auxIntToInt8(x4_1.AuxInt) != 24 || w != x4_1.Args[0] {
break
}
x3 := x4.Args[2]
if x3.Op != OpAMD64MOVBstore || auxIntToInt32(x3.AuxInt) != i || auxToSym(x3.Aux) != s {
break
}
_ = x3.Args[2]
p3 := x3.Args[0]
x3_1 := x3.Args[1]
if x3_1.Op != OpAMD64SHRQconst || auxIntToInt8(x3_1.AuxInt) != 32 || w != x3_1.Args[0] {
break
}
x2 := x3.Args[2]
if x2.Op != OpAMD64MOVBstore || auxIntToInt32(x2.AuxInt) != i || auxToSym(x2.Aux) != s {
break
}
_ = x2.Args[2]
p2 := x2.Args[0]
x2_1 := x2.Args[1]
if x2_1.Op != OpAMD64SHRQconst || auxIntToInt8(x2_1.AuxInt) != 40 || w != x2_1.Args[0] {
break
}
x1 := x2.Args[2]
if x1.Op != OpAMD64MOVBstore || auxIntToInt32(x1.AuxInt) != i || auxToSym(x1.Aux) != s {
break
}
_ = x1.Args[2]
p1 := x1.Args[0]
x1_1 := x1.Args[1]
if x1_1.Op != OpAMD64SHRQconst || auxIntToInt8(x1_1.AuxInt) != 48 || w != x1_1.Args[0] {
break
}
x0 := x1.Args[2]
if x0.Op != OpAMD64MOVBstore || auxIntToInt32(x0.AuxInt) != i || auxToSym(x0.Aux) != s {
break
}
mem := x0.Args[2]
p0 := x0.Args[0]
x0_1 := x0.Args[1]
if x0_1.Op != OpAMD64SHRQconst || auxIntToInt8(x0_1.AuxInt) != 56 || w != x0_1.Args[0] || !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && sequentialAddresses(p0, p1, 1) && sequentialAddresses(p1, p2, 1) && sequentialAddresses(p2, p3, 1) && sequentialAddresses(p3, p4, 1) && sequentialAddresses(p4, p5, 1) && sequentialAddresses(p5, p6, 1) && sequentialAddresses(p6, p7, 1) && clobber(x0, x1, x2, x3, x4, x5, x6)) {
break
}
v.reset(OpAMD64MOVQstore)
v.AuxInt = int32ToAuxInt(i)
v.Aux = symToAux(s)
v0 := b.NewValue0(x0.Pos, OpAMD64BSWAPQ, w.Type)
v0.AddArg(w)
v.AddArg3(p0, v0, mem)
return true
}
// match: (MOVBstore [i] {s} p (SHRWconst [8] w) x:(MOVBstore [i-1] {s} p w mem))
// cond: x.Uses == 1 && clobber(x)
// result: (MOVWstore [i-1] {s} p w mem)
for {
i := auxIntToInt32(v.AuxInt)
s := auxToSym(v.Aux)
p := v_0
if v_1.Op != OpAMD64SHRWconst || auxIntToInt8(v_1.AuxInt) != 8 {
break
}
w := v_1.Args[0]
x := v_2
if x.Op != OpAMD64MOVBstore || auxIntToInt32(x.AuxInt) != i-1 || auxToSym(x.Aux) != s {
break
}
mem := x.Args[2]
if p != x.Args[0] || w != x.Args[1] || !(x.Uses == 1 && clobber(x)) {
break
}
v.reset(OpAMD64MOVWstore)
v.AuxInt = int32ToAuxInt(i - 1)
v.Aux = symToAux(s)
v.AddArg3(p, w, mem)
return true
}
// match: (MOVBstore [i] {s} p (SHRLconst [8] w) x:(MOVBstore [i-1] {s} p w mem))
// cond: x.Uses == 1 && clobber(x)
// result: (MOVWstore [i-1] {s} p w mem)
for {
i := auxIntToInt32(v.AuxInt)
s := auxToSym(v.Aux)
p := v_0
if v_1.Op != OpAMD64SHRLconst || auxIntToInt8(v_1.AuxInt) != 8 {
break
}
w := v_1.Args[0]
x := v_2
if x.Op != OpAMD64MOVBstore || auxIntToInt32(x.AuxInt) != i-1 || auxToSym(x.Aux) != s {
break
}
mem := x.Args[2]
if p != x.Args[0] || w != x.Args[1] || !(x.Uses == 1 && clobber(x)) {
break
}
v.reset(OpAMD64MOVWstore)
v.AuxInt = int32ToAuxInt(i - 1)
v.Aux = symToAux(s)
v.AddArg3(p, w, mem)
return true
}
// match: (MOVBstore [i] {s} p (SHRQconst [8] w) x:(MOVBstore [i-1] {s} p w mem))
// cond: x.Uses == 1 && clobber(x)
// result: (MOVWstore [i-1] {s} p w mem)
for {
i := auxIntToInt32(v.AuxInt)
s := auxToSym(v.Aux)
p := v_0
if v_1.Op != OpAMD64SHRQconst || auxIntToInt8(v_1.AuxInt) != 8 {
break
}
w := v_1.Args[0]
x := v_2
if x.Op != OpAMD64MOVBstore || auxIntToInt32(x.AuxInt) != i-1 || auxToSym(x.Aux) != s {
break
}
mem := x.Args[2]
if p != x.Args[0] || w != x.Args[1] || !(x.Uses == 1 && clobber(x)) {
break
}
v.reset(OpAMD64MOVWstore)
v.AuxInt = int32ToAuxInt(i - 1)
v.Aux = symToAux(s)
v.AddArg3(p, w, mem)
return true
}
// match: (MOVBstore [i] {s} p w x:(MOVBstore [i+1] {s} p (SHRWconst [8] w) mem))
// cond: x.Uses == 1 && clobber(x)
// result: (MOVWstore [i] {s} p w mem)
for {
i := auxIntToInt32(v.AuxInt)
s := auxToSym(v.Aux)
p := v_0
w := v_1
x := v_2
if x.Op != OpAMD64MOVBstore || auxIntToInt32(x.AuxInt) != i+1 || auxToSym(x.Aux) != s {
break
}
mem := x.Args[2]
if p != x.Args[0] {
break
}
x_1 := x.Args[1]
if x_1.Op != OpAMD64SHRWconst || auxIntToInt8(x_1.AuxInt) != 8 || w != x_1.Args[0] || !(x.Uses == 1 && clobber(x)) {
break
}
v.reset(OpAMD64MOVWstore)
v.AuxInt = int32ToAuxInt(i)
v.Aux = symToAux(s)
v.AddArg3(p, w, mem)
return true
}
// match: (MOVBstore [i] {s} p w x:(MOVBstore [i+1] {s} p (SHRLconst [8] w) mem))
// cond: x.Uses == 1 && clobber(x)
// result: (MOVWstore [i] {s} p w mem)
for {
i := auxIntToInt32(v.AuxInt)
s := auxToSym(v.Aux)
p := v_0
w := v_1
x := v_2
if x.Op != OpAMD64MOVBstore || auxIntToInt32(x.AuxInt) != i+1 || auxToSym(x.Aux) != s {
break
}
mem := x.Args[2]
if p != x.Args[0] {
break
}
x_1 := x.Args[1]
if x_1.Op != OpAMD64SHRLconst || auxIntToInt8(x_1.AuxInt) != 8 || w != x_1.Args[0] || !(x.Uses == 1 && clobber(x)) {
break
}
v.reset(OpAMD64MOVWstore)
v.AuxInt = int32ToAuxInt(i)
v.Aux = symToAux(s)
v.AddArg3(p, w, mem)
return true
}
// match: (MOVBstore [i] {s} p w x:(MOVBstore [i+1] {s} p (SHRQconst [8] w) mem))
// cond: x.Uses == 1 && clobber(x)
// result: (MOVWstore [i] {s} p w mem)
for {
i := auxIntToInt32(v.AuxInt)
s := auxToSym(v.Aux)
p := v_0
w := v_1
x := v_2
if x.Op != OpAMD64MOVBstore || auxIntToInt32(x.AuxInt) != i+1 || auxToSym(x.Aux) != s {
break
}
mem := x.Args[2]
if p != x.Args[0] {
break
}
x_1 := x.Args[1]
if x_1.Op != OpAMD64SHRQconst || auxIntToInt8(x_1.AuxInt) != 8 || w != x_1.Args[0] || !(x.Uses == 1 && clobber(x)) {
break
}
v.reset(OpAMD64MOVWstore)
v.AuxInt = int32ToAuxInt(i)
v.Aux = symToAux(s)
v.AddArg3(p, w, mem)
return true
}
// match: (MOVBstore [i] {s} p (SHRLconst [j] w) x:(MOVBstore [i-1] {s} p w0:(SHRLconst [j-8] w) mem))
// cond: x.Uses == 1 && clobber(x)
// result: (MOVWstore [i-1] {s} p w0 mem)
for {
i := auxIntToInt32(v.AuxInt)
s := auxToSym(v.Aux)
p := v_0
if v_1.Op != OpAMD64SHRLconst {
break
}
j := auxIntToInt8(v_1.AuxInt)
w := v_1.Args[0]
x := v_2
if x.Op != OpAMD64MOVBstore || auxIntToInt32(x.AuxInt) != i-1 || auxToSym(x.Aux) != s {
break
}
mem := x.Args[2]
if p != x.Args[0] {
break
}
w0 := x.Args[1]
if w0.Op != OpAMD64SHRLconst || auxIntToInt8(w0.AuxInt) != j-8 || w != w0.Args[0] || !(x.Uses == 1 && clobber(x)) {
break
}
v.reset(OpAMD64MOVWstore)
v.AuxInt = int32ToAuxInt(i - 1)
v.Aux = symToAux(s)
v.AddArg3(p, w0, mem)
return true
}
// match: (MOVBstore [i] {s} p (SHRQconst [j] w) x:(MOVBstore [i-1] {s} p w0:(SHRQconst [j-8] w) mem))
// cond: x.Uses == 1 && clobber(x)
// result: (MOVWstore [i-1] {s} p w0 mem)
for {
i := auxIntToInt32(v.AuxInt)
s := auxToSym(v.Aux)
p := v_0
if v_1.Op != OpAMD64SHRQconst {
break
}
j := auxIntToInt8(v_1.AuxInt)
w := v_1.Args[0]
x := v_2
if x.Op != OpAMD64MOVBstore || auxIntToInt32(x.AuxInt) != i-1 || auxToSym(x.Aux) != s {
break
}
mem := x.Args[2]
if p != x.Args[0] {
break
}
w0 := x.Args[1]
if w0.Op != OpAMD64SHRQconst || auxIntToInt8(w0.AuxInt) != j-8 || w != w0.Args[0] || !(x.Uses == 1 && clobber(x)) {
break
}
v.reset(OpAMD64MOVWstore)
v.AuxInt = int32ToAuxInt(i - 1)
v.Aux = symToAux(s)
v.AddArg3(p, w0, mem)
return true
}
// match: (MOVBstore [i] {s} p1 (SHRWconst [8] w) x:(MOVBstore [i] {s} p0 w mem))
// cond: x.Uses == 1 && sequentialAddresses(p0, p1, 1) && clobber(x)
// result: (MOVWstore [i] {s} p0 w mem)
for {
i := auxIntToInt32(v.AuxInt)
s := auxToSym(v.Aux)
p1 := v_0
if v_1.Op != OpAMD64SHRWconst || auxIntToInt8(v_1.AuxInt) != 8 {
break
}
w := v_1.Args[0]
x := v_2
if x.Op != OpAMD64MOVBstore || auxIntToInt32(x.AuxInt) != i || auxToSym(x.Aux) != s {
break
}
mem := x.Args[2]
p0 := x.Args[0]
if w != x.Args[1] || !(x.Uses == 1 && sequentialAddresses(p0, p1, 1) && clobber(x)) {
break
}
v.reset(OpAMD64MOVWstore)
v.AuxInt = int32ToAuxInt(i)
v.Aux = symToAux(s)
v.AddArg3(p0, w, mem)
return true
}
// match: (MOVBstore [i] {s} p1 (SHRLconst [8] w) x:(MOVBstore [i] {s} p0 w mem))
// cond: x.Uses == 1 && sequentialAddresses(p0, p1, 1) && clobber(x)
// result: (MOVWstore [i] {s} p0 w mem)
for {
i := auxIntToInt32(v.AuxInt)
s := auxToSym(v.Aux)
p1 := v_0
if v_1.Op != OpAMD64SHRLconst || auxIntToInt8(v_1.AuxInt) != 8 {
break
}
w := v_1.Args[0]
x := v_2
if x.Op != OpAMD64MOVBstore || auxIntToInt32(x.AuxInt) != i || auxToSym(x.Aux) != s {
break
}
mem := x.Args[2]
p0 := x.Args[0]
if w != x.Args[1] || !(x.Uses == 1 && sequentialAddresses(p0, p1, 1) && clobber(x)) {
break
}
v.reset(OpAMD64MOVWstore)
v.AuxInt = int32ToAuxInt(i)
v.Aux = symToAux(s)
v.AddArg3(p0, w, mem)
return true
}
// match: (MOVBstore [i] {s} p1 (SHRQconst [8] w) x:(MOVBstore [i] {s} p0 w mem))
// cond: x.Uses == 1 && sequentialAddresses(p0, p1, 1) && clobber(x)
// result: (MOVWstore [i] {s} p0 w mem)
for {
i := auxIntToInt32(v.AuxInt)
s := auxToSym(v.Aux)
p1 := v_0
if v_1.Op != OpAMD64SHRQconst || auxIntToInt8(v_1.AuxInt) != 8 {
break
}
w := v_1.Args[0]
x := v_2
if x.Op != OpAMD64MOVBstore || auxIntToInt32(x.AuxInt) != i || auxToSym(x.Aux) != s {
break
}
mem := x.Args[2]
p0 := x.Args[0]
if w != x.Args[1] || !(x.Uses == 1 && sequentialAddresses(p0, p1, 1) && clobber(x)) {
break
}
v.reset(OpAMD64MOVWstore)
v.AuxInt = int32ToAuxInt(i)
v.Aux = symToAux(s)
v.AddArg3(p0, w, mem)
return true
}
// match: (MOVBstore [i] {s} p0 w x:(MOVBstore [i] {s} p1 (SHRWconst [8] w) mem))
// cond: x.Uses == 1 && sequentialAddresses(p0, p1, 1) && clobber(x)
// result: (MOVWstore [i] {s} p0 w mem)
for {
i := auxIntToInt32(v.AuxInt)
s := auxToSym(v.Aux)
p0 := v_0
w := v_1
x := v_2
if x.Op != OpAMD64MOVBstore || auxIntToInt32(x.AuxInt) != i || auxToSym(x.Aux) != s {
break
}
mem := x.Args[2]
p1 := x.Args[0]
x_1 := x.Args[1]
if x_1.Op != OpAMD64SHRWconst || auxIntToInt8(x_1.AuxInt) != 8 || w != x_1.Args[0] || !(x.Uses == 1 && sequentialAddresses(p0, p1, 1) && clobber(x)) {
break
}
v.reset(OpAMD64MOVWstore)
v.AuxInt = int32ToAuxInt(i)
v.Aux = symToAux(s)
v.AddArg3(p0, w, mem)
return true
}
// match: (MOVBstore [i] {s} p0 w x:(MOVBstore [i] {s} p1 (SHRLconst [8] w) mem))
// cond: x.Uses == 1 && sequentialAddresses(p0, p1, 1) && clobber(x)
// result: (MOVWstore [i] {s} p0 w mem)
for {
i := auxIntToInt32(v.AuxInt)
s := auxToSym(v.Aux)
p0 := v_0
w := v_1
x := v_2
if x.Op != OpAMD64MOVBstore || auxIntToInt32(x.AuxInt) != i || auxToSym(x.Aux) != s {
break
}
mem := x.Args[2]
p1 := x.Args[0]
x_1 := x.Args[1]
if x_1.Op != OpAMD64SHRLconst || auxIntToInt8(x_1.AuxInt) != 8 || w != x_1.Args[0] || !(x.Uses == 1 && sequentialAddresses(p0, p1, 1) && clobber(x)) {
break
}
v.reset(OpAMD64MOVWstore)
v.AuxInt = int32ToAuxInt(i)
v.Aux = symToAux(s)
v.AddArg3(p0, w, mem)
return true
}
// match: (MOVBstore [i] {s} p0 w x:(MOVBstore [i] {s} p1 (SHRQconst [8] w) mem))
// cond: x.Uses == 1 && sequentialAddresses(p0, p1, 1) && clobber(x)
// result: (MOVWstore [i] {s} p0 w mem)
for {
i := auxIntToInt32(v.AuxInt)
s := auxToSym(v.Aux)
p0 := v_0
w := v_1
x := v_2
if x.Op != OpAMD64MOVBstore || auxIntToInt32(x.AuxInt) != i || auxToSym(x.Aux) != s {
break
}
mem := x.Args[2]
p1 := x.Args[0]
x_1 := x.Args[1]
if x_1.Op != OpAMD64SHRQconst || auxIntToInt8(x_1.AuxInt) != 8 || w != x_1.Args[0] || !(x.Uses == 1 && sequentialAddresses(p0, p1, 1) && clobber(x)) {
break
}
v.reset(OpAMD64MOVWstore)
v.AuxInt = int32ToAuxInt(i)
v.Aux = symToAux(s)
v.AddArg3(p0, w, mem)
return true
}
// match: (MOVBstore [i] {s} p1 (SHRLconst [j] w) x:(MOVBstore [i] {s} p0 w0:(SHRLconst [j-8] w) mem))
// cond: x.Uses == 1 && sequentialAddresses(p0, p1, 1) && clobber(x)
// result: (MOVWstore [i] {s} p0 w0 mem)
for {
i := auxIntToInt32(v.AuxInt)
s := auxToSym(v.Aux)
p1 := v_0
if v_1.Op != OpAMD64SHRLconst {
break
}
j := auxIntToInt8(v_1.AuxInt)
w := v_1.Args[0]
x := v_2
if x.Op != OpAMD64MOVBstore || auxIntToInt32(x.AuxInt) != i || auxToSym(x.Aux) != s {
break
}
mem := x.Args[2]
p0 := x.Args[0]
w0 := x.Args[1]
if w0.Op != OpAMD64SHRLconst || auxIntToInt8(w0.AuxInt) != j-8 || w != w0.Args[0] || !(x.Uses == 1 && sequentialAddresses(p0, p1, 1) && clobber(x)) {
break
}
v.reset(OpAMD64MOVWstore)
v.AuxInt = int32ToAuxInt(i)
v.Aux = symToAux(s)
v.AddArg3(p0, w0, mem)
return true
}
// match: (MOVBstore [i] {s} p1 (SHRQconst [j] w) x:(MOVBstore [i] {s} p0 w0:(SHRQconst [j-8] w) mem))
// cond: x.Uses == 1 && sequentialAddresses(p0, p1, 1) && clobber(x)
// result: (MOVWstore [i] {s} p0 w0 mem)
for {
i := auxIntToInt32(v.AuxInt)
s := auxToSym(v.Aux)
p1 := v_0
if v_1.Op != OpAMD64SHRQconst {
break
}
j := auxIntToInt8(v_1.AuxInt)
w := v_1.Args[0]
x := v_2
if x.Op != OpAMD64MOVBstore || auxIntToInt32(x.AuxInt) != i || auxToSym(x.Aux) != s {
break
}
mem := x.Args[2]
p0 := x.Args[0]
w0 := x.Args[1]
if w0.Op != OpAMD64SHRQconst || auxIntToInt8(w0.AuxInt) != j-8 || w != w0.Args[0] || !(x.Uses == 1 && sequentialAddresses(p0, p1, 1) && clobber(x)) {
break
}
v.reset(OpAMD64MOVWstore)
v.AuxInt = int32ToAuxInt(i)
v.Aux = symToAux(s)
v.AddArg3(p0, w0, mem)
return true
}
// match: (MOVBstore [7] {s} p1 (SHRQconst [56] w) x1:(MOVWstore [5] {s} p1 (SHRQconst [40] w) x2:(MOVLstore [1] {s} p1 (SHRQconst [8] w) x3:(MOVBstore [0] {s} p1 w mem))))
// cond: x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && clobber(x1, x2, x3)
// result: (MOVQstore {s} p1 w mem)
for {
if auxIntToInt32(v.AuxInt) != 7 {
break
}
s := auxToSym(v.Aux)
p1 := v_0
if v_1.Op != OpAMD64SHRQconst || auxIntToInt8(v_1.AuxInt) != 56 {
break
}
w := v_1.Args[0]
x1 := v_2
if x1.Op != OpAMD64MOVWstore || auxIntToInt32(x1.AuxInt) != 5 || auxToSym(x1.Aux) != s {
break
}
_ = x1.Args[2]
if p1 != x1.Args[0] {
break
}
x1_1 := x1.Args[1]
if x1_1.Op != OpAMD64SHRQconst || auxIntToInt8(x1_1.AuxInt) != 40 || w != x1_1.Args[0] {
break
}
x2 := x1.Args[2]
if x2.Op != OpAMD64MOVLstore || auxIntToInt32(x2.AuxInt) != 1 || auxToSym(x2.Aux) != s {
break
}
_ = x2.Args[2]
if p1 != x2.Args[0] {
break
}
x2_1 := x2.Args[1]
if x2_1.Op != OpAMD64SHRQconst || auxIntToInt8(x2_1.AuxInt) != 8 || w != x2_1.Args[0] {
break
}
x3 := x2.Args[2]
if x3.Op != OpAMD64MOVBstore || auxIntToInt32(x3.AuxInt) != 0 || auxToSym(x3.Aux) != s {
break
}
mem := x3.Args[2]
if p1 != x3.Args[0] || w != x3.Args[1] || !(x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && clobber(x1, x2, x3)) {
break
}
v.reset(OpAMD64MOVQstore)
v.Aux = symToAux(s)
v.AddArg3(p1, w, mem)
return true
}
// match: (MOVBstore [i] {s} p x1:(MOVBload [j] {s2} p2 mem) mem2:(MOVBstore [i-1] {s} p x2:(MOVBload [j-1] {s2} p2 mem) mem))
// cond: x1.Uses == 1 && x2.Uses == 1 && mem2.Uses == 1 && clobber(x1, x2, mem2)
// result: (MOVWstore [i-1] {s} p (MOVWload [j-1] {s2} p2 mem) mem)
for {
i := auxIntToInt32(v.AuxInt)
s := auxToSym(v.Aux)
p := v_0
x1 := v_1
if x1.Op != OpAMD64MOVBload {
break
}
j := auxIntToInt32(x1.AuxInt)
s2 := auxToSym(x1.Aux)
mem := x1.Args[1]
p2 := x1.Args[0]
mem2 := v_2
if mem2.Op != OpAMD64MOVBstore || auxIntToInt32(mem2.AuxInt) != i-1 || auxToSym(mem2.Aux) != s {
break
}
_ = mem2.Args[2]
if p != mem2.Args[0] {
break
}
x2 := mem2.Args[1]
if x2.Op != OpAMD64MOVBload || auxIntToInt32(x2.AuxInt) != j-1 || auxToSym(x2.Aux) != s2 {
break
}
_ = x2.Args[1]
if p2 != x2.Args[0] || mem != x2.Args[1] || mem != mem2.Args[2] || !(x1.Uses == 1 && x2.Uses == 1 && mem2.Uses == 1 && clobber(x1, x2, mem2)) {
break
}
v.reset(OpAMD64MOVWstore)
v.AuxInt = int32ToAuxInt(i - 1)
v.Aux = symToAux(s)
v0 := b.NewValue0(x2.Pos, OpAMD64MOVWload, typ.UInt16)
v0.AuxInt = int32ToAuxInt(j - 1)
v0.Aux = symToAux(s2)
v0.AddArg2(p2, mem)
v.AddArg3(p, v0, mem)
return true
}
return false
}